Compile nonbonded kernels as C++
[alexxy/gromacs.git] / src / gromacs / gmxlib / nonbonded / nb_kernel_avx_256_single / nb_kernel_ElecEwSw_VdwLJSw_GeomW4P1_avx_256_single.cpp
1 /*
2  * This file is part of the GROMACS molecular simulation package.
3  *
4  * Copyright (c) 2012,2013,2014,2015,2017,2018, by the GROMACS development team, led by
5  * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6  * and including many others, as listed in the AUTHORS file in the
7  * top-level source directory and at http://www.gromacs.org.
8  *
9  * GROMACS is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU Lesser General Public License
11  * as published by the Free Software Foundation; either version 2.1
12  * of the License, or (at your option) any later version.
13  *
14  * GROMACS is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
17  * Lesser General Public License for more details.
18  *
19  * You should have received a copy of the GNU Lesser General Public
20  * License along with GROMACS; if not, see
21  * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA.
23  *
24  * If you want to redistribute modifications to GROMACS, please
25  * consider that scientific software is very special. Version
26  * control is crucial - bugs must be traceable. We will be happy to
27  * consider code for inclusion in the official distribution, but
28  * derived work must not be called official GROMACS. Details are found
29  * in the README & COPYING files - if they are missing, get the
30  * official version at http://www.gromacs.org.
31  *
32  * To help us fund GROMACS development, we humbly ask that you cite
33  * the research papers on the package. Check out http://www.gromacs.org.
34  */
35 /*
36  * Note: this file was generated by the GROMACS avx_256_single kernel generator.
37  */
38 #include "gmxpre.h"
39
40 #include "config.h"
41
42 #include <math.h>
43
44 #include "../nb_kernel.h"
45 #include "gromacs/gmxlib/nrnb.h"
46
47 #include "kernelutil_x86_avx_256_single.h"
48
49 /*
50  * Gromacs nonbonded kernel:   nb_kernel_ElecEwSw_VdwLJSw_GeomW4P1_VF_avx_256_single
51  * Electrostatics interaction: Ewald
52  * VdW interaction:            LennardJones
53  * Geometry:                   Water4-Particle
54  * Calculate force/pot:        PotentialAndForce
55  */
56 void
57 nb_kernel_ElecEwSw_VdwLJSw_GeomW4P1_VF_avx_256_single
58                     (t_nblist                    * gmx_restrict       nlist,
59                      rvec                        * gmx_restrict          xx,
60                      rvec                        * gmx_restrict          ff,
61                      struct t_forcerec           * gmx_restrict          fr,
62                      t_mdatoms                   * gmx_restrict     mdatoms,
63                      nb_kernel_data_t gmx_unused * gmx_restrict kernel_data,
64                      t_nrnb                      * gmx_restrict        nrnb)
65 {
66     /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
67      * just 0 for non-waters.
68      * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
69      * jnr indices corresponding to data put in the four positions in the SIMD register.
70      */
71     int              i_shift_offset,i_coord_offset,outeriter,inneriter;
72     int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
73     int              jnrA,jnrB,jnrC,jnrD;
74     int              jnrE,jnrF,jnrG,jnrH;
75     int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
76     int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
77     int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
78     int              j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
79     int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
80     real             rcutoff_scalar;
81     real             *shiftvec,*fshift,*x,*f;
82     real             *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
83     real             scratch[4*DIM];
84     __m256           tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
85     real *           vdwioffsetptr0;
86     __m256           ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
87     real *           vdwioffsetptr1;
88     __m256           ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
89     real *           vdwioffsetptr2;
90     __m256           ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
91     real *           vdwioffsetptr3;
92     __m256           ix3,iy3,iz3,fix3,fiy3,fiz3,iq3,isai3;
93     int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
94     __m256           jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
95     __m256           dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
96     __m256           dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
97     __m256           dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
98     __m256           dx30,dy30,dz30,rsq30,rinv30,rinvsq30,r30,qq30,c6_30,c12_30;
99     __m256           velec,felec,velecsum,facel,crf,krf,krf2;
100     real             *charge;
101     int              nvdwtype;
102     __m256           rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
103     int              *vdwtype;
104     real             *vdwparam;
105     __m256           one_sixth   = _mm256_set1_ps(1.0/6.0);
106     __m256           one_twelfth = _mm256_set1_ps(1.0/12.0);
107     __m256i          ewitab;
108     __m128i          ewitab_lo,ewitab_hi;
109     __m256           ewtabscale,eweps,sh_ewald,ewrt,ewtabhalfspace,ewtabF,ewtabFn,ewtabD,ewtabV;
110     __m256           beta,beta2,beta3,zeta2,pmecorrF,pmecorrV,rinv3;
111     real             *ewtab;
112     __m256           rswitch,swV3,swV4,swV5,swF2,swF3,swF4,d,d2,sw,dsw;
113     real             rswitch_scalar,d_scalar;
114     __m256           dummy_mask,cutoff_mask;
115     __m256           signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
116     __m256           one     = _mm256_set1_ps(1.0);
117     __m256           two     = _mm256_set1_ps(2.0);
118     x                = xx[0];
119     f                = ff[0];
120
121     nri              = nlist->nri;
122     iinr             = nlist->iinr;
123     jindex           = nlist->jindex;
124     jjnr             = nlist->jjnr;
125     shiftidx         = nlist->shift;
126     gid              = nlist->gid;
127     shiftvec         = fr->shift_vec[0];
128     fshift           = fr->fshift[0];
129     facel            = _mm256_set1_ps(fr->ic->epsfac);
130     charge           = mdatoms->chargeA;
131     nvdwtype         = fr->ntype;
132     vdwparam         = fr->nbfp;
133     vdwtype          = mdatoms->typeA;
134
135     sh_ewald         = _mm256_set1_ps(fr->ic->sh_ewald);
136     beta             = _mm256_set1_ps(fr->ic->ewaldcoeff_q);
137     beta2            = _mm256_mul_ps(beta,beta);
138     beta3            = _mm256_mul_ps(beta,beta2);
139
140     ewtab            = fr->ic->tabq_coul_FDV0;
141     ewtabscale       = _mm256_set1_ps(fr->ic->tabq_scale);
142     ewtabhalfspace   = _mm256_set1_ps(0.5/fr->ic->tabq_scale);
143
144     /* Setup water-specific parameters */
145     inr              = nlist->iinr[0];
146     iq1              = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+1]));
147     iq2              = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+2]));
148     iq3              = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+3]));
149     vdwioffsetptr0   = vdwparam+2*nvdwtype*vdwtype[inr+0];
150
151     /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
152     rcutoff_scalar   = fr->ic->rcoulomb;
153     rcutoff          = _mm256_set1_ps(rcutoff_scalar);
154     rcutoff2         = _mm256_mul_ps(rcutoff,rcutoff);
155
156     rswitch_scalar   = fr->ic->rcoulomb_switch;
157     rswitch          = _mm256_set1_ps(rswitch_scalar);
158     /* Setup switch parameters */
159     d_scalar         = rcutoff_scalar-rswitch_scalar;
160     d                = _mm256_set1_ps(d_scalar);
161     swV3             = _mm256_set1_ps(-10.0/(d_scalar*d_scalar*d_scalar));
162     swV4             = _mm256_set1_ps( 15.0/(d_scalar*d_scalar*d_scalar*d_scalar));
163     swV5             = _mm256_set1_ps( -6.0/(d_scalar*d_scalar*d_scalar*d_scalar*d_scalar));
164     swF2             = _mm256_set1_ps(-30.0/(d_scalar*d_scalar*d_scalar));
165     swF3             = _mm256_set1_ps( 60.0/(d_scalar*d_scalar*d_scalar*d_scalar));
166     swF4             = _mm256_set1_ps(-30.0/(d_scalar*d_scalar*d_scalar*d_scalar*d_scalar));
167
168     /* Avoid stupid compiler warnings */
169     jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
170     j_coord_offsetA = 0;
171     j_coord_offsetB = 0;
172     j_coord_offsetC = 0;
173     j_coord_offsetD = 0;
174     j_coord_offsetE = 0;
175     j_coord_offsetF = 0;
176     j_coord_offsetG = 0;
177     j_coord_offsetH = 0;
178
179     outeriter        = 0;
180     inneriter        = 0;
181
182     for(iidx=0;iidx<4*DIM;iidx++)
183     {
184         scratch[iidx] = 0.0;
185     }
186
187     /* Start outer loop over neighborlists */
188     for(iidx=0; iidx<nri; iidx++)
189     {
190         /* Load shift vector for this list */
191         i_shift_offset   = DIM*shiftidx[iidx];
192
193         /* Load limits for loop over neighbors */
194         j_index_start    = jindex[iidx];
195         j_index_end      = jindex[iidx+1];
196
197         /* Get outer coordinate index */
198         inr              = iinr[iidx];
199         i_coord_offset   = DIM*inr;
200
201         /* Load i particle coords and add shift vector */
202         gmx_mm256_load_shift_and_4rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,
203                                                     &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2,&ix3,&iy3,&iz3);
204
205         fix0             = _mm256_setzero_ps();
206         fiy0             = _mm256_setzero_ps();
207         fiz0             = _mm256_setzero_ps();
208         fix1             = _mm256_setzero_ps();
209         fiy1             = _mm256_setzero_ps();
210         fiz1             = _mm256_setzero_ps();
211         fix2             = _mm256_setzero_ps();
212         fiy2             = _mm256_setzero_ps();
213         fiz2             = _mm256_setzero_ps();
214         fix3             = _mm256_setzero_ps();
215         fiy3             = _mm256_setzero_ps();
216         fiz3             = _mm256_setzero_ps();
217
218         /* Reset potential sums */
219         velecsum         = _mm256_setzero_ps();
220         vvdwsum          = _mm256_setzero_ps();
221
222         /* Start inner kernel loop */
223         for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
224         {
225
226             /* Get j neighbor index, and coordinate index */
227             jnrA             = jjnr[jidx];
228             jnrB             = jjnr[jidx+1];
229             jnrC             = jjnr[jidx+2];
230             jnrD             = jjnr[jidx+3];
231             jnrE             = jjnr[jidx+4];
232             jnrF             = jjnr[jidx+5];
233             jnrG             = jjnr[jidx+6];
234             jnrH             = jjnr[jidx+7];
235             j_coord_offsetA  = DIM*jnrA;
236             j_coord_offsetB  = DIM*jnrB;
237             j_coord_offsetC  = DIM*jnrC;
238             j_coord_offsetD  = DIM*jnrD;
239             j_coord_offsetE  = DIM*jnrE;
240             j_coord_offsetF  = DIM*jnrF;
241             j_coord_offsetG  = DIM*jnrG;
242             j_coord_offsetH  = DIM*jnrH;
243
244             /* load j atom coordinates */
245             gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
246                                                  x+j_coord_offsetC,x+j_coord_offsetD,
247                                                  x+j_coord_offsetE,x+j_coord_offsetF,
248                                                  x+j_coord_offsetG,x+j_coord_offsetH,
249                                                  &jx0,&jy0,&jz0);
250
251             /* Calculate displacement vector */
252             dx00             = _mm256_sub_ps(ix0,jx0);
253             dy00             = _mm256_sub_ps(iy0,jy0);
254             dz00             = _mm256_sub_ps(iz0,jz0);
255             dx10             = _mm256_sub_ps(ix1,jx0);
256             dy10             = _mm256_sub_ps(iy1,jy0);
257             dz10             = _mm256_sub_ps(iz1,jz0);
258             dx20             = _mm256_sub_ps(ix2,jx0);
259             dy20             = _mm256_sub_ps(iy2,jy0);
260             dz20             = _mm256_sub_ps(iz2,jz0);
261             dx30             = _mm256_sub_ps(ix3,jx0);
262             dy30             = _mm256_sub_ps(iy3,jy0);
263             dz30             = _mm256_sub_ps(iz3,jz0);
264
265             /* Calculate squared distance and things based on it */
266             rsq00            = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
267             rsq10            = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
268             rsq20            = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
269             rsq30            = gmx_mm256_calc_rsq_ps(dx30,dy30,dz30);
270
271             rinv00           = avx256_invsqrt_f(rsq00);
272             rinv10           = avx256_invsqrt_f(rsq10);
273             rinv20           = avx256_invsqrt_f(rsq20);
274             rinv30           = avx256_invsqrt_f(rsq30);
275
276             rinvsq00         = _mm256_mul_ps(rinv00,rinv00);
277             rinvsq10         = _mm256_mul_ps(rinv10,rinv10);
278             rinvsq20         = _mm256_mul_ps(rinv20,rinv20);
279             rinvsq30         = _mm256_mul_ps(rinv30,rinv30);
280
281             /* Load parameters for j particles */
282             jq0              = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
283                                                                  charge+jnrC+0,charge+jnrD+0,
284                                                                  charge+jnrE+0,charge+jnrF+0,
285                                                                  charge+jnrG+0,charge+jnrH+0);
286             vdwjidx0A        = 2*vdwtype[jnrA+0];
287             vdwjidx0B        = 2*vdwtype[jnrB+0];
288             vdwjidx0C        = 2*vdwtype[jnrC+0];
289             vdwjidx0D        = 2*vdwtype[jnrD+0];
290             vdwjidx0E        = 2*vdwtype[jnrE+0];
291             vdwjidx0F        = 2*vdwtype[jnrF+0];
292             vdwjidx0G        = 2*vdwtype[jnrG+0];
293             vdwjidx0H        = 2*vdwtype[jnrH+0];
294
295             fjx0             = _mm256_setzero_ps();
296             fjy0             = _mm256_setzero_ps();
297             fjz0             = _mm256_setzero_ps();
298
299             /**************************
300              * CALCULATE INTERACTIONS *
301              **************************/
302
303             if (gmx_mm256_any_lt(rsq00,rcutoff2))
304             {
305
306             r00              = _mm256_mul_ps(rsq00,rinv00);
307
308             /* Compute parameters for interactions between i and j atoms */
309             gmx_mm256_load_8pair_swizzle_ps(vdwioffsetptr0+vdwjidx0A,
310                                             vdwioffsetptr0+vdwjidx0B,
311                                             vdwioffsetptr0+vdwjidx0C,
312                                             vdwioffsetptr0+vdwjidx0D,
313                                             vdwioffsetptr0+vdwjidx0E,
314                                             vdwioffsetptr0+vdwjidx0F,
315                                             vdwioffsetptr0+vdwjidx0G,
316                                             vdwioffsetptr0+vdwjidx0H,
317                                             &c6_00,&c12_00);
318
319             /* LENNARD-JONES DISPERSION/REPULSION */
320
321             rinvsix          = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
322             vvdw6            = _mm256_mul_ps(c6_00,rinvsix);
323             vvdw12           = _mm256_mul_ps(c12_00,_mm256_mul_ps(rinvsix,rinvsix));
324             vvdw             = _mm256_sub_ps( _mm256_mul_ps(vvdw12,one_twelfth) , _mm256_mul_ps(vvdw6,one_sixth) );
325             fvdw             = _mm256_mul_ps(_mm256_sub_ps(vvdw12,vvdw6),rinvsq00);
326
327             d                = _mm256_sub_ps(r00,rswitch);
328             d                = _mm256_max_ps(d,_mm256_setzero_ps());
329             d2               = _mm256_mul_ps(d,d);
330             sw               = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
331
332             dsw              = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
333
334             /* Evaluate switch function */
335             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
336             fvdw             = _mm256_sub_ps( _mm256_mul_ps(fvdw,sw) , _mm256_mul_ps(rinv00,_mm256_mul_ps(vvdw,dsw)) );
337             vvdw             = _mm256_mul_ps(vvdw,sw);
338             cutoff_mask      = _mm256_cmp_ps(rsq00,rcutoff2,_CMP_LT_OQ);
339
340             /* Update potential sum for this i atom from the interaction with this j atom. */
341             vvdw             = _mm256_and_ps(vvdw,cutoff_mask);
342             vvdwsum          = _mm256_add_ps(vvdwsum,vvdw);
343
344             fscal            = fvdw;
345
346             fscal            = _mm256_and_ps(fscal,cutoff_mask);
347
348             /* Calculate temporary vectorial force */
349             tx               = _mm256_mul_ps(fscal,dx00);
350             ty               = _mm256_mul_ps(fscal,dy00);
351             tz               = _mm256_mul_ps(fscal,dz00);
352
353             /* Update vectorial force */
354             fix0             = _mm256_add_ps(fix0,tx);
355             fiy0             = _mm256_add_ps(fiy0,ty);
356             fiz0             = _mm256_add_ps(fiz0,tz);
357
358             fjx0             = _mm256_add_ps(fjx0,tx);
359             fjy0             = _mm256_add_ps(fjy0,ty);
360             fjz0             = _mm256_add_ps(fjz0,tz);
361
362             }
363
364             /**************************
365              * CALCULATE INTERACTIONS *
366              **************************/
367
368             if (gmx_mm256_any_lt(rsq10,rcutoff2))
369             {
370
371             r10              = _mm256_mul_ps(rsq10,rinv10);
372
373             /* Compute parameters for interactions between i and j atoms */
374             qq10             = _mm256_mul_ps(iq1,jq0);
375
376             /* EWALD ELECTROSTATICS */
377             
378             /* Analytical PME correction */
379             zeta2            = _mm256_mul_ps(beta2,rsq10);
380             rinv3            = _mm256_mul_ps(rinvsq10,rinv10);
381             pmecorrF         = avx256_pmecorrF_f(zeta2);
382             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
383             felec            = _mm256_mul_ps(qq10,felec);
384             pmecorrV         = avx256_pmecorrV_f(zeta2);
385             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
386             velec            = _mm256_sub_ps(rinv10,pmecorrV);
387             velec            = _mm256_mul_ps(qq10,velec);
388             
389             d                = _mm256_sub_ps(r10,rswitch);
390             d                = _mm256_max_ps(d,_mm256_setzero_ps());
391             d2               = _mm256_mul_ps(d,d);
392             sw               = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
393
394             dsw              = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
395
396             /* Evaluate switch function */
397             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
398             felec            = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv10,_mm256_mul_ps(velec,dsw)) );
399             velec            = _mm256_mul_ps(velec,sw);
400             cutoff_mask      = _mm256_cmp_ps(rsq10,rcutoff2,_CMP_LT_OQ);
401
402             /* Update potential sum for this i atom from the interaction with this j atom. */
403             velec            = _mm256_and_ps(velec,cutoff_mask);
404             velecsum         = _mm256_add_ps(velecsum,velec);
405
406             fscal            = felec;
407
408             fscal            = _mm256_and_ps(fscal,cutoff_mask);
409
410             /* Calculate temporary vectorial force */
411             tx               = _mm256_mul_ps(fscal,dx10);
412             ty               = _mm256_mul_ps(fscal,dy10);
413             tz               = _mm256_mul_ps(fscal,dz10);
414
415             /* Update vectorial force */
416             fix1             = _mm256_add_ps(fix1,tx);
417             fiy1             = _mm256_add_ps(fiy1,ty);
418             fiz1             = _mm256_add_ps(fiz1,tz);
419
420             fjx0             = _mm256_add_ps(fjx0,tx);
421             fjy0             = _mm256_add_ps(fjy0,ty);
422             fjz0             = _mm256_add_ps(fjz0,tz);
423
424             }
425
426             /**************************
427              * CALCULATE INTERACTIONS *
428              **************************/
429
430             if (gmx_mm256_any_lt(rsq20,rcutoff2))
431             {
432
433             r20              = _mm256_mul_ps(rsq20,rinv20);
434
435             /* Compute parameters for interactions between i and j atoms */
436             qq20             = _mm256_mul_ps(iq2,jq0);
437
438             /* EWALD ELECTROSTATICS */
439             
440             /* Analytical PME correction */
441             zeta2            = _mm256_mul_ps(beta2,rsq20);
442             rinv3            = _mm256_mul_ps(rinvsq20,rinv20);
443             pmecorrF         = avx256_pmecorrF_f(zeta2);
444             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
445             felec            = _mm256_mul_ps(qq20,felec);
446             pmecorrV         = avx256_pmecorrV_f(zeta2);
447             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
448             velec            = _mm256_sub_ps(rinv20,pmecorrV);
449             velec            = _mm256_mul_ps(qq20,velec);
450             
451             d                = _mm256_sub_ps(r20,rswitch);
452             d                = _mm256_max_ps(d,_mm256_setzero_ps());
453             d2               = _mm256_mul_ps(d,d);
454             sw               = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
455
456             dsw              = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
457
458             /* Evaluate switch function */
459             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
460             felec            = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv20,_mm256_mul_ps(velec,dsw)) );
461             velec            = _mm256_mul_ps(velec,sw);
462             cutoff_mask      = _mm256_cmp_ps(rsq20,rcutoff2,_CMP_LT_OQ);
463
464             /* Update potential sum for this i atom from the interaction with this j atom. */
465             velec            = _mm256_and_ps(velec,cutoff_mask);
466             velecsum         = _mm256_add_ps(velecsum,velec);
467
468             fscal            = felec;
469
470             fscal            = _mm256_and_ps(fscal,cutoff_mask);
471
472             /* Calculate temporary vectorial force */
473             tx               = _mm256_mul_ps(fscal,dx20);
474             ty               = _mm256_mul_ps(fscal,dy20);
475             tz               = _mm256_mul_ps(fscal,dz20);
476
477             /* Update vectorial force */
478             fix2             = _mm256_add_ps(fix2,tx);
479             fiy2             = _mm256_add_ps(fiy2,ty);
480             fiz2             = _mm256_add_ps(fiz2,tz);
481
482             fjx0             = _mm256_add_ps(fjx0,tx);
483             fjy0             = _mm256_add_ps(fjy0,ty);
484             fjz0             = _mm256_add_ps(fjz0,tz);
485
486             }
487
488             /**************************
489              * CALCULATE INTERACTIONS *
490              **************************/
491
492             if (gmx_mm256_any_lt(rsq30,rcutoff2))
493             {
494
495             r30              = _mm256_mul_ps(rsq30,rinv30);
496
497             /* Compute parameters for interactions between i and j atoms */
498             qq30             = _mm256_mul_ps(iq3,jq0);
499
500             /* EWALD ELECTROSTATICS */
501             
502             /* Analytical PME correction */
503             zeta2            = _mm256_mul_ps(beta2,rsq30);
504             rinv3            = _mm256_mul_ps(rinvsq30,rinv30);
505             pmecorrF         = avx256_pmecorrF_f(zeta2);
506             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
507             felec            = _mm256_mul_ps(qq30,felec);
508             pmecorrV         = avx256_pmecorrV_f(zeta2);
509             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
510             velec            = _mm256_sub_ps(rinv30,pmecorrV);
511             velec            = _mm256_mul_ps(qq30,velec);
512             
513             d                = _mm256_sub_ps(r30,rswitch);
514             d                = _mm256_max_ps(d,_mm256_setzero_ps());
515             d2               = _mm256_mul_ps(d,d);
516             sw               = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
517
518             dsw              = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
519
520             /* Evaluate switch function */
521             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
522             felec            = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv30,_mm256_mul_ps(velec,dsw)) );
523             velec            = _mm256_mul_ps(velec,sw);
524             cutoff_mask      = _mm256_cmp_ps(rsq30,rcutoff2,_CMP_LT_OQ);
525
526             /* Update potential sum for this i atom from the interaction with this j atom. */
527             velec            = _mm256_and_ps(velec,cutoff_mask);
528             velecsum         = _mm256_add_ps(velecsum,velec);
529
530             fscal            = felec;
531
532             fscal            = _mm256_and_ps(fscal,cutoff_mask);
533
534             /* Calculate temporary vectorial force */
535             tx               = _mm256_mul_ps(fscal,dx30);
536             ty               = _mm256_mul_ps(fscal,dy30);
537             tz               = _mm256_mul_ps(fscal,dz30);
538
539             /* Update vectorial force */
540             fix3             = _mm256_add_ps(fix3,tx);
541             fiy3             = _mm256_add_ps(fiy3,ty);
542             fiz3             = _mm256_add_ps(fiz3,tz);
543
544             fjx0             = _mm256_add_ps(fjx0,tx);
545             fjy0             = _mm256_add_ps(fjy0,ty);
546             fjz0             = _mm256_add_ps(fjz0,tz);
547
548             }
549
550             fjptrA             = f+j_coord_offsetA;
551             fjptrB             = f+j_coord_offsetB;
552             fjptrC             = f+j_coord_offsetC;
553             fjptrD             = f+j_coord_offsetD;
554             fjptrE             = f+j_coord_offsetE;
555             fjptrF             = f+j_coord_offsetF;
556             fjptrG             = f+j_coord_offsetG;
557             fjptrH             = f+j_coord_offsetH;
558
559             gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,fjx0,fjy0,fjz0);
560
561             /* Inner loop uses 386 flops */
562         }
563
564         if(jidx<j_index_end)
565         {
566
567             /* Get j neighbor index, and coordinate index */
568             jnrlistA         = jjnr[jidx];
569             jnrlistB         = jjnr[jidx+1];
570             jnrlistC         = jjnr[jidx+2];
571             jnrlistD         = jjnr[jidx+3];
572             jnrlistE         = jjnr[jidx+4];
573             jnrlistF         = jjnr[jidx+5];
574             jnrlistG         = jjnr[jidx+6];
575             jnrlistH         = jjnr[jidx+7];
576             /* Sign of each element will be negative for non-real atoms.
577              * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
578              * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
579              */
580             dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
581                                             gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
582                                             
583             jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
584             jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
585             jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
586             jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
587             jnrE       = (jnrlistE>=0) ? jnrlistE : 0;
588             jnrF       = (jnrlistF>=0) ? jnrlistF : 0;
589             jnrG       = (jnrlistG>=0) ? jnrlistG : 0;
590             jnrH       = (jnrlistH>=0) ? jnrlistH : 0;
591             j_coord_offsetA  = DIM*jnrA;
592             j_coord_offsetB  = DIM*jnrB;
593             j_coord_offsetC  = DIM*jnrC;
594             j_coord_offsetD  = DIM*jnrD;
595             j_coord_offsetE  = DIM*jnrE;
596             j_coord_offsetF  = DIM*jnrF;
597             j_coord_offsetG  = DIM*jnrG;
598             j_coord_offsetH  = DIM*jnrH;
599
600             /* load j atom coordinates */
601             gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
602                                                  x+j_coord_offsetC,x+j_coord_offsetD,
603                                                  x+j_coord_offsetE,x+j_coord_offsetF,
604                                                  x+j_coord_offsetG,x+j_coord_offsetH,
605                                                  &jx0,&jy0,&jz0);
606
607             /* Calculate displacement vector */
608             dx00             = _mm256_sub_ps(ix0,jx0);
609             dy00             = _mm256_sub_ps(iy0,jy0);
610             dz00             = _mm256_sub_ps(iz0,jz0);
611             dx10             = _mm256_sub_ps(ix1,jx0);
612             dy10             = _mm256_sub_ps(iy1,jy0);
613             dz10             = _mm256_sub_ps(iz1,jz0);
614             dx20             = _mm256_sub_ps(ix2,jx0);
615             dy20             = _mm256_sub_ps(iy2,jy0);
616             dz20             = _mm256_sub_ps(iz2,jz0);
617             dx30             = _mm256_sub_ps(ix3,jx0);
618             dy30             = _mm256_sub_ps(iy3,jy0);
619             dz30             = _mm256_sub_ps(iz3,jz0);
620
621             /* Calculate squared distance and things based on it */
622             rsq00            = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
623             rsq10            = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
624             rsq20            = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
625             rsq30            = gmx_mm256_calc_rsq_ps(dx30,dy30,dz30);
626
627             rinv00           = avx256_invsqrt_f(rsq00);
628             rinv10           = avx256_invsqrt_f(rsq10);
629             rinv20           = avx256_invsqrt_f(rsq20);
630             rinv30           = avx256_invsqrt_f(rsq30);
631
632             rinvsq00         = _mm256_mul_ps(rinv00,rinv00);
633             rinvsq10         = _mm256_mul_ps(rinv10,rinv10);
634             rinvsq20         = _mm256_mul_ps(rinv20,rinv20);
635             rinvsq30         = _mm256_mul_ps(rinv30,rinv30);
636
637             /* Load parameters for j particles */
638             jq0              = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
639                                                                  charge+jnrC+0,charge+jnrD+0,
640                                                                  charge+jnrE+0,charge+jnrF+0,
641                                                                  charge+jnrG+0,charge+jnrH+0);
642             vdwjidx0A        = 2*vdwtype[jnrA+0];
643             vdwjidx0B        = 2*vdwtype[jnrB+0];
644             vdwjidx0C        = 2*vdwtype[jnrC+0];
645             vdwjidx0D        = 2*vdwtype[jnrD+0];
646             vdwjidx0E        = 2*vdwtype[jnrE+0];
647             vdwjidx0F        = 2*vdwtype[jnrF+0];
648             vdwjidx0G        = 2*vdwtype[jnrG+0];
649             vdwjidx0H        = 2*vdwtype[jnrH+0];
650
651             fjx0             = _mm256_setzero_ps();
652             fjy0             = _mm256_setzero_ps();
653             fjz0             = _mm256_setzero_ps();
654
655             /**************************
656              * CALCULATE INTERACTIONS *
657              **************************/
658
659             if (gmx_mm256_any_lt(rsq00,rcutoff2))
660             {
661
662             r00              = _mm256_mul_ps(rsq00,rinv00);
663             r00              = _mm256_andnot_ps(dummy_mask,r00);
664
665             /* Compute parameters for interactions between i and j atoms */
666             gmx_mm256_load_8pair_swizzle_ps(vdwioffsetptr0+vdwjidx0A,
667                                             vdwioffsetptr0+vdwjidx0B,
668                                             vdwioffsetptr0+vdwjidx0C,
669                                             vdwioffsetptr0+vdwjidx0D,
670                                             vdwioffsetptr0+vdwjidx0E,
671                                             vdwioffsetptr0+vdwjidx0F,
672                                             vdwioffsetptr0+vdwjidx0G,
673                                             vdwioffsetptr0+vdwjidx0H,
674                                             &c6_00,&c12_00);
675
676             /* LENNARD-JONES DISPERSION/REPULSION */
677
678             rinvsix          = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
679             vvdw6            = _mm256_mul_ps(c6_00,rinvsix);
680             vvdw12           = _mm256_mul_ps(c12_00,_mm256_mul_ps(rinvsix,rinvsix));
681             vvdw             = _mm256_sub_ps( _mm256_mul_ps(vvdw12,one_twelfth) , _mm256_mul_ps(vvdw6,one_sixth) );
682             fvdw             = _mm256_mul_ps(_mm256_sub_ps(vvdw12,vvdw6),rinvsq00);
683
684             d                = _mm256_sub_ps(r00,rswitch);
685             d                = _mm256_max_ps(d,_mm256_setzero_ps());
686             d2               = _mm256_mul_ps(d,d);
687             sw               = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
688
689             dsw              = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
690
691             /* Evaluate switch function */
692             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
693             fvdw             = _mm256_sub_ps( _mm256_mul_ps(fvdw,sw) , _mm256_mul_ps(rinv00,_mm256_mul_ps(vvdw,dsw)) );
694             vvdw             = _mm256_mul_ps(vvdw,sw);
695             cutoff_mask      = _mm256_cmp_ps(rsq00,rcutoff2,_CMP_LT_OQ);
696
697             /* Update potential sum for this i atom from the interaction with this j atom. */
698             vvdw             = _mm256_and_ps(vvdw,cutoff_mask);
699             vvdw             = _mm256_andnot_ps(dummy_mask,vvdw);
700             vvdwsum          = _mm256_add_ps(vvdwsum,vvdw);
701
702             fscal            = fvdw;
703
704             fscal            = _mm256_and_ps(fscal,cutoff_mask);
705
706             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
707
708             /* Calculate temporary vectorial force */
709             tx               = _mm256_mul_ps(fscal,dx00);
710             ty               = _mm256_mul_ps(fscal,dy00);
711             tz               = _mm256_mul_ps(fscal,dz00);
712
713             /* Update vectorial force */
714             fix0             = _mm256_add_ps(fix0,tx);
715             fiy0             = _mm256_add_ps(fiy0,ty);
716             fiz0             = _mm256_add_ps(fiz0,tz);
717
718             fjx0             = _mm256_add_ps(fjx0,tx);
719             fjy0             = _mm256_add_ps(fjy0,ty);
720             fjz0             = _mm256_add_ps(fjz0,tz);
721
722             }
723
724             /**************************
725              * CALCULATE INTERACTIONS *
726              **************************/
727
728             if (gmx_mm256_any_lt(rsq10,rcutoff2))
729             {
730
731             r10              = _mm256_mul_ps(rsq10,rinv10);
732             r10              = _mm256_andnot_ps(dummy_mask,r10);
733
734             /* Compute parameters for interactions between i and j atoms */
735             qq10             = _mm256_mul_ps(iq1,jq0);
736
737             /* EWALD ELECTROSTATICS */
738             
739             /* Analytical PME correction */
740             zeta2            = _mm256_mul_ps(beta2,rsq10);
741             rinv3            = _mm256_mul_ps(rinvsq10,rinv10);
742             pmecorrF         = avx256_pmecorrF_f(zeta2);
743             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
744             felec            = _mm256_mul_ps(qq10,felec);
745             pmecorrV         = avx256_pmecorrV_f(zeta2);
746             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
747             velec            = _mm256_sub_ps(rinv10,pmecorrV);
748             velec            = _mm256_mul_ps(qq10,velec);
749             
750             d                = _mm256_sub_ps(r10,rswitch);
751             d                = _mm256_max_ps(d,_mm256_setzero_ps());
752             d2               = _mm256_mul_ps(d,d);
753             sw               = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
754
755             dsw              = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
756
757             /* Evaluate switch function */
758             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
759             felec            = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv10,_mm256_mul_ps(velec,dsw)) );
760             velec            = _mm256_mul_ps(velec,sw);
761             cutoff_mask      = _mm256_cmp_ps(rsq10,rcutoff2,_CMP_LT_OQ);
762
763             /* Update potential sum for this i atom from the interaction with this j atom. */
764             velec            = _mm256_and_ps(velec,cutoff_mask);
765             velec            = _mm256_andnot_ps(dummy_mask,velec);
766             velecsum         = _mm256_add_ps(velecsum,velec);
767
768             fscal            = felec;
769
770             fscal            = _mm256_and_ps(fscal,cutoff_mask);
771
772             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
773
774             /* Calculate temporary vectorial force */
775             tx               = _mm256_mul_ps(fscal,dx10);
776             ty               = _mm256_mul_ps(fscal,dy10);
777             tz               = _mm256_mul_ps(fscal,dz10);
778
779             /* Update vectorial force */
780             fix1             = _mm256_add_ps(fix1,tx);
781             fiy1             = _mm256_add_ps(fiy1,ty);
782             fiz1             = _mm256_add_ps(fiz1,tz);
783
784             fjx0             = _mm256_add_ps(fjx0,tx);
785             fjy0             = _mm256_add_ps(fjy0,ty);
786             fjz0             = _mm256_add_ps(fjz0,tz);
787
788             }
789
790             /**************************
791              * CALCULATE INTERACTIONS *
792              **************************/
793
794             if (gmx_mm256_any_lt(rsq20,rcutoff2))
795             {
796
797             r20              = _mm256_mul_ps(rsq20,rinv20);
798             r20              = _mm256_andnot_ps(dummy_mask,r20);
799
800             /* Compute parameters for interactions between i and j atoms */
801             qq20             = _mm256_mul_ps(iq2,jq0);
802
803             /* EWALD ELECTROSTATICS */
804             
805             /* Analytical PME correction */
806             zeta2            = _mm256_mul_ps(beta2,rsq20);
807             rinv3            = _mm256_mul_ps(rinvsq20,rinv20);
808             pmecorrF         = avx256_pmecorrF_f(zeta2);
809             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
810             felec            = _mm256_mul_ps(qq20,felec);
811             pmecorrV         = avx256_pmecorrV_f(zeta2);
812             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
813             velec            = _mm256_sub_ps(rinv20,pmecorrV);
814             velec            = _mm256_mul_ps(qq20,velec);
815             
816             d                = _mm256_sub_ps(r20,rswitch);
817             d                = _mm256_max_ps(d,_mm256_setzero_ps());
818             d2               = _mm256_mul_ps(d,d);
819             sw               = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
820
821             dsw              = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
822
823             /* Evaluate switch function */
824             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
825             felec            = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv20,_mm256_mul_ps(velec,dsw)) );
826             velec            = _mm256_mul_ps(velec,sw);
827             cutoff_mask      = _mm256_cmp_ps(rsq20,rcutoff2,_CMP_LT_OQ);
828
829             /* Update potential sum for this i atom from the interaction with this j atom. */
830             velec            = _mm256_and_ps(velec,cutoff_mask);
831             velec            = _mm256_andnot_ps(dummy_mask,velec);
832             velecsum         = _mm256_add_ps(velecsum,velec);
833
834             fscal            = felec;
835
836             fscal            = _mm256_and_ps(fscal,cutoff_mask);
837
838             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
839
840             /* Calculate temporary vectorial force */
841             tx               = _mm256_mul_ps(fscal,dx20);
842             ty               = _mm256_mul_ps(fscal,dy20);
843             tz               = _mm256_mul_ps(fscal,dz20);
844
845             /* Update vectorial force */
846             fix2             = _mm256_add_ps(fix2,tx);
847             fiy2             = _mm256_add_ps(fiy2,ty);
848             fiz2             = _mm256_add_ps(fiz2,tz);
849
850             fjx0             = _mm256_add_ps(fjx0,tx);
851             fjy0             = _mm256_add_ps(fjy0,ty);
852             fjz0             = _mm256_add_ps(fjz0,tz);
853
854             }
855
856             /**************************
857              * CALCULATE INTERACTIONS *
858              **************************/
859
860             if (gmx_mm256_any_lt(rsq30,rcutoff2))
861             {
862
863             r30              = _mm256_mul_ps(rsq30,rinv30);
864             r30              = _mm256_andnot_ps(dummy_mask,r30);
865
866             /* Compute parameters for interactions between i and j atoms */
867             qq30             = _mm256_mul_ps(iq3,jq0);
868
869             /* EWALD ELECTROSTATICS */
870             
871             /* Analytical PME correction */
872             zeta2            = _mm256_mul_ps(beta2,rsq30);
873             rinv3            = _mm256_mul_ps(rinvsq30,rinv30);
874             pmecorrF         = avx256_pmecorrF_f(zeta2);
875             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
876             felec            = _mm256_mul_ps(qq30,felec);
877             pmecorrV         = avx256_pmecorrV_f(zeta2);
878             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
879             velec            = _mm256_sub_ps(rinv30,pmecorrV);
880             velec            = _mm256_mul_ps(qq30,velec);
881             
882             d                = _mm256_sub_ps(r30,rswitch);
883             d                = _mm256_max_ps(d,_mm256_setzero_ps());
884             d2               = _mm256_mul_ps(d,d);
885             sw               = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
886
887             dsw              = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
888
889             /* Evaluate switch function */
890             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
891             felec            = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv30,_mm256_mul_ps(velec,dsw)) );
892             velec            = _mm256_mul_ps(velec,sw);
893             cutoff_mask      = _mm256_cmp_ps(rsq30,rcutoff2,_CMP_LT_OQ);
894
895             /* Update potential sum for this i atom from the interaction with this j atom. */
896             velec            = _mm256_and_ps(velec,cutoff_mask);
897             velec            = _mm256_andnot_ps(dummy_mask,velec);
898             velecsum         = _mm256_add_ps(velecsum,velec);
899
900             fscal            = felec;
901
902             fscal            = _mm256_and_ps(fscal,cutoff_mask);
903
904             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
905
906             /* Calculate temporary vectorial force */
907             tx               = _mm256_mul_ps(fscal,dx30);
908             ty               = _mm256_mul_ps(fscal,dy30);
909             tz               = _mm256_mul_ps(fscal,dz30);
910
911             /* Update vectorial force */
912             fix3             = _mm256_add_ps(fix3,tx);
913             fiy3             = _mm256_add_ps(fiy3,ty);
914             fiz3             = _mm256_add_ps(fiz3,tz);
915
916             fjx0             = _mm256_add_ps(fjx0,tx);
917             fjy0             = _mm256_add_ps(fjy0,ty);
918             fjz0             = _mm256_add_ps(fjz0,tz);
919
920             }
921
922             fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
923             fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
924             fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
925             fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
926             fjptrE             = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
927             fjptrF             = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
928             fjptrG             = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
929             fjptrH             = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
930
931             gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,fjx0,fjy0,fjz0);
932
933             /* Inner loop uses 390 flops */
934         }
935
936         /* End of innermost loop */
937
938         gmx_mm256_update_iforce_4atom_swizzle_ps(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,fix3,fiy3,fiz3,
939                                                  f+i_coord_offset,fshift+i_shift_offset);
940
941         ggid                        = gid[iidx];
942         /* Update potential energies */
943         gmx_mm256_update_1pot_ps(velecsum,kernel_data->energygrp_elec+ggid);
944         gmx_mm256_update_1pot_ps(vvdwsum,kernel_data->energygrp_vdw+ggid);
945
946         /* Increment number of inner iterations */
947         inneriter                  += j_index_end - j_index_start;
948
949         /* Outer loop uses 26 flops */
950     }
951
952     /* Increment number of outer iterations */
953     outeriter        += nri;
954
955     /* Update outer/inner flops */
956
957     inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W4_VF,outeriter*26 + inneriter*390);
958 }
959 /*
960  * Gromacs nonbonded kernel:   nb_kernel_ElecEwSw_VdwLJSw_GeomW4P1_F_avx_256_single
961  * Electrostatics interaction: Ewald
962  * VdW interaction:            LennardJones
963  * Geometry:                   Water4-Particle
964  * Calculate force/pot:        Force
965  */
966 void
967 nb_kernel_ElecEwSw_VdwLJSw_GeomW4P1_F_avx_256_single
968                     (t_nblist                    * gmx_restrict       nlist,
969                      rvec                        * gmx_restrict          xx,
970                      rvec                        * gmx_restrict          ff,
971                      struct t_forcerec           * gmx_restrict          fr,
972                      t_mdatoms                   * gmx_restrict     mdatoms,
973                      nb_kernel_data_t gmx_unused * gmx_restrict kernel_data,
974                      t_nrnb                      * gmx_restrict        nrnb)
975 {
976     /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
977      * just 0 for non-waters.
978      * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
979      * jnr indices corresponding to data put in the four positions in the SIMD register.
980      */
981     int              i_shift_offset,i_coord_offset,outeriter,inneriter;
982     int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
983     int              jnrA,jnrB,jnrC,jnrD;
984     int              jnrE,jnrF,jnrG,jnrH;
985     int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
986     int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
987     int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
988     int              j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
989     int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
990     real             rcutoff_scalar;
991     real             *shiftvec,*fshift,*x,*f;
992     real             *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
993     real             scratch[4*DIM];
994     __m256           tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
995     real *           vdwioffsetptr0;
996     __m256           ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
997     real *           vdwioffsetptr1;
998     __m256           ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
999     real *           vdwioffsetptr2;
1000     __m256           ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
1001     real *           vdwioffsetptr3;
1002     __m256           ix3,iy3,iz3,fix3,fiy3,fiz3,iq3,isai3;
1003     int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
1004     __m256           jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
1005     __m256           dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
1006     __m256           dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
1007     __m256           dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
1008     __m256           dx30,dy30,dz30,rsq30,rinv30,rinvsq30,r30,qq30,c6_30,c12_30;
1009     __m256           velec,felec,velecsum,facel,crf,krf,krf2;
1010     real             *charge;
1011     int              nvdwtype;
1012     __m256           rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
1013     int              *vdwtype;
1014     real             *vdwparam;
1015     __m256           one_sixth   = _mm256_set1_ps(1.0/6.0);
1016     __m256           one_twelfth = _mm256_set1_ps(1.0/12.0);
1017     __m256i          ewitab;
1018     __m128i          ewitab_lo,ewitab_hi;
1019     __m256           ewtabscale,eweps,sh_ewald,ewrt,ewtabhalfspace,ewtabF,ewtabFn,ewtabD,ewtabV;
1020     __m256           beta,beta2,beta3,zeta2,pmecorrF,pmecorrV,rinv3;
1021     real             *ewtab;
1022     __m256           rswitch,swV3,swV4,swV5,swF2,swF3,swF4,d,d2,sw,dsw;
1023     real             rswitch_scalar,d_scalar;
1024     __m256           dummy_mask,cutoff_mask;
1025     __m256           signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
1026     __m256           one     = _mm256_set1_ps(1.0);
1027     __m256           two     = _mm256_set1_ps(2.0);
1028     x                = xx[0];
1029     f                = ff[0];
1030
1031     nri              = nlist->nri;
1032     iinr             = nlist->iinr;
1033     jindex           = nlist->jindex;
1034     jjnr             = nlist->jjnr;
1035     shiftidx         = nlist->shift;
1036     gid              = nlist->gid;
1037     shiftvec         = fr->shift_vec[0];
1038     fshift           = fr->fshift[0];
1039     facel            = _mm256_set1_ps(fr->ic->epsfac);
1040     charge           = mdatoms->chargeA;
1041     nvdwtype         = fr->ntype;
1042     vdwparam         = fr->nbfp;
1043     vdwtype          = mdatoms->typeA;
1044
1045     sh_ewald         = _mm256_set1_ps(fr->ic->sh_ewald);
1046     beta             = _mm256_set1_ps(fr->ic->ewaldcoeff_q);
1047     beta2            = _mm256_mul_ps(beta,beta);
1048     beta3            = _mm256_mul_ps(beta,beta2);
1049
1050     ewtab            = fr->ic->tabq_coul_FDV0;
1051     ewtabscale       = _mm256_set1_ps(fr->ic->tabq_scale);
1052     ewtabhalfspace   = _mm256_set1_ps(0.5/fr->ic->tabq_scale);
1053
1054     /* Setup water-specific parameters */
1055     inr              = nlist->iinr[0];
1056     iq1              = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+1]));
1057     iq2              = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+2]));
1058     iq3              = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+3]));
1059     vdwioffsetptr0   = vdwparam+2*nvdwtype*vdwtype[inr+0];
1060
1061     /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
1062     rcutoff_scalar   = fr->ic->rcoulomb;
1063     rcutoff          = _mm256_set1_ps(rcutoff_scalar);
1064     rcutoff2         = _mm256_mul_ps(rcutoff,rcutoff);
1065
1066     rswitch_scalar   = fr->ic->rcoulomb_switch;
1067     rswitch          = _mm256_set1_ps(rswitch_scalar);
1068     /* Setup switch parameters */
1069     d_scalar         = rcutoff_scalar-rswitch_scalar;
1070     d                = _mm256_set1_ps(d_scalar);
1071     swV3             = _mm256_set1_ps(-10.0/(d_scalar*d_scalar*d_scalar));
1072     swV4             = _mm256_set1_ps( 15.0/(d_scalar*d_scalar*d_scalar*d_scalar));
1073     swV5             = _mm256_set1_ps( -6.0/(d_scalar*d_scalar*d_scalar*d_scalar*d_scalar));
1074     swF2             = _mm256_set1_ps(-30.0/(d_scalar*d_scalar*d_scalar));
1075     swF3             = _mm256_set1_ps( 60.0/(d_scalar*d_scalar*d_scalar*d_scalar));
1076     swF4             = _mm256_set1_ps(-30.0/(d_scalar*d_scalar*d_scalar*d_scalar*d_scalar));
1077
1078     /* Avoid stupid compiler warnings */
1079     jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
1080     j_coord_offsetA = 0;
1081     j_coord_offsetB = 0;
1082     j_coord_offsetC = 0;
1083     j_coord_offsetD = 0;
1084     j_coord_offsetE = 0;
1085     j_coord_offsetF = 0;
1086     j_coord_offsetG = 0;
1087     j_coord_offsetH = 0;
1088
1089     outeriter        = 0;
1090     inneriter        = 0;
1091
1092     for(iidx=0;iidx<4*DIM;iidx++)
1093     {
1094         scratch[iidx] = 0.0;
1095     }
1096
1097     /* Start outer loop over neighborlists */
1098     for(iidx=0; iidx<nri; iidx++)
1099     {
1100         /* Load shift vector for this list */
1101         i_shift_offset   = DIM*shiftidx[iidx];
1102
1103         /* Load limits for loop over neighbors */
1104         j_index_start    = jindex[iidx];
1105         j_index_end      = jindex[iidx+1];
1106
1107         /* Get outer coordinate index */
1108         inr              = iinr[iidx];
1109         i_coord_offset   = DIM*inr;
1110
1111         /* Load i particle coords and add shift vector */
1112         gmx_mm256_load_shift_and_4rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,
1113                                                     &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2,&ix3,&iy3,&iz3);
1114
1115         fix0             = _mm256_setzero_ps();
1116         fiy0             = _mm256_setzero_ps();
1117         fiz0             = _mm256_setzero_ps();
1118         fix1             = _mm256_setzero_ps();
1119         fiy1             = _mm256_setzero_ps();
1120         fiz1             = _mm256_setzero_ps();
1121         fix2             = _mm256_setzero_ps();
1122         fiy2             = _mm256_setzero_ps();
1123         fiz2             = _mm256_setzero_ps();
1124         fix3             = _mm256_setzero_ps();
1125         fiy3             = _mm256_setzero_ps();
1126         fiz3             = _mm256_setzero_ps();
1127
1128         /* Start inner kernel loop */
1129         for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
1130         {
1131
1132             /* Get j neighbor index, and coordinate index */
1133             jnrA             = jjnr[jidx];
1134             jnrB             = jjnr[jidx+1];
1135             jnrC             = jjnr[jidx+2];
1136             jnrD             = jjnr[jidx+3];
1137             jnrE             = jjnr[jidx+4];
1138             jnrF             = jjnr[jidx+5];
1139             jnrG             = jjnr[jidx+6];
1140             jnrH             = jjnr[jidx+7];
1141             j_coord_offsetA  = DIM*jnrA;
1142             j_coord_offsetB  = DIM*jnrB;
1143             j_coord_offsetC  = DIM*jnrC;
1144             j_coord_offsetD  = DIM*jnrD;
1145             j_coord_offsetE  = DIM*jnrE;
1146             j_coord_offsetF  = DIM*jnrF;
1147             j_coord_offsetG  = DIM*jnrG;
1148             j_coord_offsetH  = DIM*jnrH;
1149
1150             /* load j atom coordinates */
1151             gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
1152                                                  x+j_coord_offsetC,x+j_coord_offsetD,
1153                                                  x+j_coord_offsetE,x+j_coord_offsetF,
1154                                                  x+j_coord_offsetG,x+j_coord_offsetH,
1155                                                  &jx0,&jy0,&jz0);
1156
1157             /* Calculate displacement vector */
1158             dx00             = _mm256_sub_ps(ix0,jx0);
1159             dy00             = _mm256_sub_ps(iy0,jy0);
1160             dz00             = _mm256_sub_ps(iz0,jz0);
1161             dx10             = _mm256_sub_ps(ix1,jx0);
1162             dy10             = _mm256_sub_ps(iy1,jy0);
1163             dz10             = _mm256_sub_ps(iz1,jz0);
1164             dx20             = _mm256_sub_ps(ix2,jx0);
1165             dy20             = _mm256_sub_ps(iy2,jy0);
1166             dz20             = _mm256_sub_ps(iz2,jz0);
1167             dx30             = _mm256_sub_ps(ix3,jx0);
1168             dy30             = _mm256_sub_ps(iy3,jy0);
1169             dz30             = _mm256_sub_ps(iz3,jz0);
1170
1171             /* Calculate squared distance and things based on it */
1172             rsq00            = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
1173             rsq10            = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
1174             rsq20            = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
1175             rsq30            = gmx_mm256_calc_rsq_ps(dx30,dy30,dz30);
1176
1177             rinv00           = avx256_invsqrt_f(rsq00);
1178             rinv10           = avx256_invsqrt_f(rsq10);
1179             rinv20           = avx256_invsqrt_f(rsq20);
1180             rinv30           = avx256_invsqrt_f(rsq30);
1181
1182             rinvsq00         = _mm256_mul_ps(rinv00,rinv00);
1183             rinvsq10         = _mm256_mul_ps(rinv10,rinv10);
1184             rinvsq20         = _mm256_mul_ps(rinv20,rinv20);
1185             rinvsq30         = _mm256_mul_ps(rinv30,rinv30);
1186
1187             /* Load parameters for j particles */
1188             jq0              = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
1189                                                                  charge+jnrC+0,charge+jnrD+0,
1190                                                                  charge+jnrE+0,charge+jnrF+0,
1191                                                                  charge+jnrG+0,charge+jnrH+0);
1192             vdwjidx0A        = 2*vdwtype[jnrA+0];
1193             vdwjidx0B        = 2*vdwtype[jnrB+0];
1194             vdwjidx0C        = 2*vdwtype[jnrC+0];
1195             vdwjidx0D        = 2*vdwtype[jnrD+0];
1196             vdwjidx0E        = 2*vdwtype[jnrE+0];
1197             vdwjidx0F        = 2*vdwtype[jnrF+0];
1198             vdwjidx0G        = 2*vdwtype[jnrG+0];
1199             vdwjidx0H        = 2*vdwtype[jnrH+0];
1200
1201             fjx0             = _mm256_setzero_ps();
1202             fjy0             = _mm256_setzero_ps();
1203             fjz0             = _mm256_setzero_ps();
1204
1205             /**************************
1206              * CALCULATE INTERACTIONS *
1207              **************************/
1208
1209             if (gmx_mm256_any_lt(rsq00,rcutoff2))
1210             {
1211
1212             r00              = _mm256_mul_ps(rsq00,rinv00);
1213
1214             /* Compute parameters for interactions between i and j atoms */
1215             gmx_mm256_load_8pair_swizzle_ps(vdwioffsetptr0+vdwjidx0A,
1216                                             vdwioffsetptr0+vdwjidx0B,
1217                                             vdwioffsetptr0+vdwjidx0C,
1218                                             vdwioffsetptr0+vdwjidx0D,
1219                                             vdwioffsetptr0+vdwjidx0E,
1220                                             vdwioffsetptr0+vdwjidx0F,
1221                                             vdwioffsetptr0+vdwjidx0G,
1222                                             vdwioffsetptr0+vdwjidx0H,
1223                                             &c6_00,&c12_00);
1224
1225             /* LENNARD-JONES DISPERSION/REPULSION */
1226
1227             rinvsix          = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
1228             vvdw6            = _mm256_mul_ps(c6_00,rinvsix);
1229             vvdw12           = _mm256_mul_ps(c12_00,_mm256_mul_ps(rinvsix,rinvsix));
1230             vvdw             = _mm256_sub_ps( _mm256_mul_ps(vvdw12,one_twelfth) , _mm256_mul_ps(vvdw6,one_sixth) );
1231             fvdw             = _mm256_mul_ps(_mm256_sub_ps(vvdw12,vvdw6),rinvsq00);
1232
1233             d                = _mm256_sub_ps(r00,rswitch);
1234             d                = _mm256_max_ps(d,_mm256_setzero_ps());
1235             d2               = _mm256_mul_ps(d,d);
1236             sw               = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
1237
1238             dsw              = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
1239
1240             /* Evaluate switch function */
1241             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
1242             fvdw             = _mm256_sub_ps( _mm256_mul_ps(fvdw,sw) , _mm256_mul_ps(rinv00,_mm256_mul_ps(vvdw,dsw)) );
1243             cutoff_mask      = _mm256_cmp_ps(rsq00,rcutoff2,_CMP_LT_OQ);
1244
1245             fscal            = fvdw;
1246
1247             fscal            = _mm256_and_ps(fscal,cutoff_mask);
1248
1249             /* Calculate temporary vectorial force */
1250             tx               = _mm256_mul_ps(fscal,dx00);
1251             ty               = _mm256_mul_ps(fscal,dy00);
1252             tz               = _mm256_mul_ps(fscal,dz00);
1253
1254             /* Update vectorial force */
1255             fix0             = _mm256_add_ps(fix0,tx);
1256             fiy0             = _mm256_add_ps(fiy0,ty);
1257             fiz0             = _mm256_add_ps(fiz0,tz);
1258
1259             fjx0             = _mm256_add_ps(fjx0,tx);
1260             fjy0             = _mm256_add_ps(fjy0,ty);
1261             fjz0             = _mm256_add_ps(fjz0,tz);
1262
1263             }
1264
1265             /**************************
1266              * CALCULATE INTERACTIONS *
1267              **************************/
1268
1269             if (gmx_mm256_any_lt(rsq10,rcutoff2))
1270             {
1271
1272             r10              = _mm256_mul_ps(rsq10,rinv10);
1273
1274             /* Compute parameters for interactions between i and j atoms */
1275             qq10             = _mm256_mul_ps(iq1,jq0);
1276
1277             /* EWALD ELECTROSTATICS */
1278             
1279             /* Analytical PME correction */
1280             zeta2            = _mm256_mul_ps(beta2,rsq10);
1281             rinv3            = _mm256_mul_ps(rinvsq10,rinv10);
1282             pmecorrF         = avx256_pmecorrF_f(zeta2);
1283             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1284             felec            = _mm256_mul_ps(qq10,felec);
1285             pmecorrV         = avx256_pmecorrV_f(zeta2);
1286             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
1287             velec            = _mm256_sub_ps(rinv10,pmecorrV);
1288             velec            = _mm256_mul_ps(qq10,velec);
1289             
1290             d                = _mm256_sub_ps(r10,rswitch);
1291             d                = _mm256_max_ps(d,_mm256_setzero_ps());
1292             d2               = _mm256_mul_ps(d,d);
1293             sw               = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
1294
1295             dsw              = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
1296
1297             /* Evaluate switch function */
1298             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
1299             felec            = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv10,_mm256_mul_ps(velec,dsw)) );
1300             cutoff_mask      = _mm256_cmp_ps(rsq10,rcutoff2,_CMP_LT_OQ);
1301
1302             fscal            = felec;
1303
1304             fscal            = _mm256_and_ps(fscal,cutoff_mask);
1305
1306             /* Calculate temporary vectorial force */
1307             tx               = _mm256_mul_ps(fscal,dx10);
1308             ty               = _mm256_mul_ps(fscal,dy10);
1309             tz               = _mm256_mul_ps(fscal,dz10);
1310
1311             /* Update vectorial force */
1312             fix1             = _mm256_add_ps(fix1,tx);
1313             fiy1             = _mm256_add_ps(fiy1,ty);
1314             fiz1             = _mm256_add_ps(fiz1,tz);
1315
1316             fjx0             = _mm256_add_ps(fjx0,tx);
1317             fjy0             = _mm256_add_ps(fjy0,ty);
1318             fjz0             = _mm256_add_ps(fjz0,tz);
1319
1320             }
1321
1322             /**************************
1323              * CALCULATE INTERACTIONS *
1324              **************************/
1325
1326             if (gmx_mm256_any_lt(rsq20,rcutoff2))
1327             {
1328
1329             r20              = _mm256_mul_ps(rsq20,rinv20);
1330
1331             /* Compute parameters for interactions between i and j atoms */
1332             qq20             = _mm256_mul_ps(iq2,jq0);
1333
1334             /* EWALD ELECTROSTATICS */
1335             
1336             /* Analytical PME correction */
1337             zeta2            = _mm256_mul_ps(beta2,rsq20);
1338             rinv3            = _mm256_mul_ps(rinvsq20,rinv20);
1339             pmecorrF         = avx256_pmecorrF_f(zeta2);
1340             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1341             felec            = _mm256_mul_ps(qq20,felec);
1342             pmecorrV         = avx256_pmecorrV_f(zeta2);
1343             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
1344             velec            = _mm256_sub_ps(rinv20,pmecorrV);
1345             velec            = _mm256_mul_ps(qq20,velec);
1346             
1347             d                = _mm256_sub_ps(r20,rswitch);
1348             d                = _mm256_max_ps(d,_mm256_setzero_ps());
1349             d2               = _mm256_mul_ps(d,d);
1350             sw               = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
1351
1352             dsw              = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
1353
1354             /* Evaluate switch function */
1355             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
1356             felec            = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv20,_mm256_mul_ps(velec,dsw)) );
1357             cutoff_mask      = _mm256_cmp_ps(rsq20,rcutoff2,_CMP_LT_OQ);
1358
1359             fscal            = felec;
1360
1361             fscal            = _mm256_and_ps(fscal,cutoff_mask);
1362
1363             /* Calculate temporary vectorial force */
1364             tx               = _mm256_mul_ps(fscal,dx20);
1365             ty               = _mm256_mul_ps(fscal,dy20);
1366             tz               = _mm256_mul_ps(fscal,dz20);
1367
1368             /* Update vectorial force */
1369             fix2             = _mm256_add_ps(fix2,tx);
1370             fiy2             = _mm256_add_ps(fiy2,ty);
1371             fiz2             = _mm256_add_ps(fiz2,tz);
1372
1373             fjx0             = _mm256_add_ps(fjx0,tx);
1374             fjy0             = _mm256_add_ps(fjy0,ty);
1375             fjz0             = _mm256_add_ps(fjz0,tz);
1376
1377             }
1378
1379             /**************************
1380              * CALCULATE INTERACTIONS *
1381              **************************/
1382
1383             if (gmx_mm256_any_lt(rsq30,rcutoff2))
1384             {
1385
1386             r30              = _mm256_mul_ps(rsq30,rinv30);
1387
1388             /* Compute parameters for interactions between i and j atoms */
1389             qq30             = _mm256_mul_ps(iq3,jq0);
1390
1391             /* EWALD ELECTROSTATICS */
1392             
1393             /* Analytical PME correction */
1394             zeta2            = _mm256_mul_ps(beta2,rsq30);
1395             rinv3            = _mm256_mul_ps(rinvsq30,rinv30);
1396             pmecorrF         = avx256_pmecorrF_f(zeta2);
1397             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1398             felec            = _mm256_mul_ps(qq30,felec);
1399             pmecorrV         = avx256_pmecorrV_f(zeta2);
1400             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
1401             velec            = _mm256_sub_ps(rinv30,pmecorrV);
1402             velec            = _mm256_mul_ps(qq30,velec);
1403             
1404             d                = _mm256_sub_ps(r30,rswitch);
1405             d                = _mm256_max_ps(d,_mm256_setzero_ps());
1406             d2               = _mm256_mul_ps(d,d);
1407             sw               = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
1408
1409             dsw              = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
1410
1411             /* Evaluate switch function */
1412             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
1413             felec            = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv30,_mm256_mul_ps(velec,dsw)) );
1414             cutoff_mask      = _mm256_cmp_ps(rsq30,rcutoff2,_CMP_LT_OQ);
1415
1416             fscal            = felec;
1417
1418             fscal            = _mm256_and_ps(fscal,cutoff_mask);
1419
1420             /* Calculate temporary vectorial force */
1421             tx               = _mm256_mul_ps(fscal,dx30);
1422             ty               = _mm256_mul_ps(fscal,dy30);
1423             tz               = _mm256_mul_ps(fscal,dz30);
1424
1425             /* Update vectorial force */
1426             fix3             = _mm256_add_ps(fix3,tx);
1427             fiy3             = _mm256_add_ps(fiy3,ty);
1428             fiz3             = _mm256_add_ps(fiz3,tz);
1429
1430             fjx0             = _mm256_add_ps(fjx0,tx);
1431             fjy0             = _mm256_add_ps(fjy0,ty);
1432             fjz0             = _mm256_add_ps(fjz0,tz);
1433
1434             }
1435
1436             fjptrA             = f+j_coord_offsetA;
1437             fjptrB             = f+j_coord_offsetB;
1438             fjptrC             = f+j_coord_offsetC;
1439             fjptrD             = f+j_coord_offsetD;
1440             fjptrE             = f+j_coord_offsetE;
1441             fjptrF             = f+j_coord_offsetF;
1442             fjptrG             = f+j_coord_offsetG;
1443             fjptrH             = f+j_coord_offsetH;
1444
1445             gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,fjx0,fjy0,fjz0);
1446
1447             /* Inner loop uses 374 flops */
1448         }
1449
1450         if(jidx<j_index_end)
1451         {
1452
1453             /* Get j neighbor index, and coordinate index */
1454             jnrlistA         = jjnr[jidx];
1455             jnrlistB         = jjnr[jidx+1];
1456             jnrlistC         = jjnr[jidx+2];
1457             jnrlistD         = jjnr[jidx+3];
1458             jnrlistE         = jjnr[jidx+4];
1459             jnrlistF         = jjnr[jidx+5];
1460             jnrlistG         = jjnr[jidx+6];
1461             jnrlistH         = jjnr[jidx+7];
1462             /* Sign of each element will be negative for non-real atoms.
1463              * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
1464              * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
1465              */
1466             dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
1467                                             gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
1468                                             
1469             jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
1470             jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
1471             jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
1472             jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
1473             jnrE       = (jnrlistE>=0) ? jnrlistE : 0;
1474             jnrF       = (jnrlistF>=0) ? jnrlistF : 0;
1475             jnrG       = (jnrlistG>=0) ? jnrlistG : 0;
1476             jnrH       = (jnrlistH>=0) ? jnrlistH : 0;
1477             j_coord_offsetA  = DIM*jnrA;
1478             j_coord_offsetB  = DIM*jnrB;
1479             j_coord_offsetC  = DIM*jnrC;
1480             j_coord_offsetD  = DIM*jnrD;
1481             j_coord_offsetE  = DIM*jnrE;
1482             j_coord_offsetF  = DIM*jnrF;
1483             j_coord_offsetG  = DIM*jnrG;
1484             j_coord_offsetH  = DIM*jnrH;
1485
1486             /* load j atom coordinates */
1487             gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
1488                                                  x+j_coord_offsetC,x+j_coord_offsetD,
1489                                                  x+j_coord_offsetE,x+j_coord_offsetF,
1490                                                  x+j_coord_offsetG,x+j_coord_offsetH,
1491                                                  &jx0,&jy0,&jz0);
1492
1493             /* Calculate displacement vector */
1494             dx00             = _mm256_sub_ps(ix0,jx0);
1495             dy00             = _mm256_sub_ps(iy0,jy0);
1496             dz00             = _mm256_sub_ps(iz0,jz0);
1497             dx10             = _mm256_sub_ps(ix1,jx0);
1498             dy10             = _mm256_sub_ps(iy1,jy0);
1499             dz10             = _mm256_sub_ps(iz1,jz0);
1500             dx20             = _mm256_sub_ps(ix2,jx0);
1501             dy20             = _mm256_sub_ps(iy2,jy0);
1502             dz20             = _mm256_sub_ps(iz2,jz0);
1503             dx30             = _mm256_sub_ps(ix3,jx0);
1504             dy30             = _mm256_sub_ps(iy3,jy0);
1505             dz30             = _mm256_sub_ps(iz3,jz0);
1506
1507             /* Calculate squared distance and things based on it */
1508             rsq00            = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
1509             rsq10            = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
1510             rsq20            = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
1511             rsq30            = gmx_mm256_calc_rsq_ps(dx30,dy30,dz30);
1512
1513             rinv00           = avx256_invsqrt_f(rsq00);
1514             rinv10           = avx256_invsqrt_f(rsq10);
1515             rinv20           = avx256_invsqrt_f(rsq20);
1516             rinv30           = avx256_invsqrt_f(rsq30);
1517
1518             rinvsq00         = _mm256_mul_ps(rinv00,rinv00);
1519             rinvsq10         = _mm256_mul_ps(rinv10,rinv10);
1520             rinvsq20         = _mm256_mul_ps(rinv20,rinv20);
1521             rinvsq30         = _mm256_mul_ps(rinv30,rinv30);
1522
1523             /* Load parameters for j particles */
1524             jq0              = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
1525                                                                  charge+jnrC+0,charge+jnrD+0,
1526                                                                  charge+jnrE+0,charge+jnrF+0,
1527                                                                  charge+jnrG+0,charge+jnrH+0);
1528             vdwjidx0A        = 2*vdwtype[jnrA+0];
1529             vdwjidx0B        = 2*vdwtype[jnrB+0];
1530             vdwjidx0C        = 2*vdwtype[jnrC+0];
1531             vdwjidx0D        = 2*vdwtype[jnrD+0];
1532             vdwjidx0E        = 2*vdwtype[jnrE+0];
1533             vdwjidx0F        = 2*vdwtype[jnrF+0];
1534             vdwjidx0G        = 2*vdwtype[jnrG+0];
1535             vdwjidx0H        = 2*vdwtype[jnrH+0];
1536
1537             fjx0             = _mm256_setzero_ps();
1538             fjy0             = _mm256_setzero_ps();
1539             fjz0             = _mm256_setzero_ps();
1540
1541             /**************************
1542              * CALCULATE INTERACTIONS *
1543              **************************/
1544
1545             if (gmx_mm256_any_lt(rsq00,rcutoff2))
1546             {
1547
1548             r00              = _mm256_mul_ps(rsq00,rinv00);
1549             r00              = _mm256_andnot_ps(dummy_mask,r00);
1550
1551             /* Compute parameters for interactions between i and j atoms */
1552             gmx_mm256_load_8pair_swizzle_ps(vdwioffsetptr0+vdwjidx0A,
1553                                             vdwioffsetptr0+vdwjidx0B,
1554                                             vdwioffsetptr0+vdwjidx0C,
1555                                             vdwioffsetptr0+vdwjidx0D,
1556                                             vdwioffsetptr0+vdwjidx0E,
1557                                             vdwioffsetptr0+vdwjidx0F,
1558                                             vdwioffsetptr0+vdwjidx0G,
1559                                             vdwioffsetptr0+vdwjidx0H,
1560                                             &c6_00,&c12_00);
1561
1562             /* LENNARD-JONES DISPERSION/REPULSION */
1563
1564             rinvsix          = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
1565             vvdw6            = _mm256_mul_ps(c6_00,rinvsix);
1566             vvdw12           = _mm256_mul_ps(c12_00,_mm256_mul_ps(rinvsix,rinvsix));
1567             vvdw             = _mm256_sub_ps( _mm256_mul_ps(vvdw12,one_twelfth) , _mm256_mul_ps(vvdw6,one_sixth) );
1568             fvdw             = _mm256_mul_ps(_mm256_sub_ps(vvdw12,vvdw6),rinvsq00);
1569
1570             d                = _mm256_sub_ps(r00,rswitch);
1571             d                = _mm256_max_ps(d,_mm256_setzero_ps());
1572             d2               = _mm256_mul_ps(d,d);
1573             sw               = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
1574
1575             dsw              = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
1576
1577             /* Evaluate switch function */
1578             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
1579             fvdw             = _mm256_sub_ps( _mm256_mul_ps(fvdw,sw) , _mm256_mul_ps(rinv00,_mm256_mul_ps(vvdw,dsw)) );
1580             cutoff_mask      = _mm256_cmp_ps(rsq00,rcutoff2,_CMP_LT_OQ);
1581
1582             fscal            = fvdw;
1583
1584             fscal            = _mm256_and_ps(fscal,cutoff_mask);
1585
1586             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
1587
1588             /* Calculate temporary vectorial force */
1589             tx               = _mm256_mul_ps(fscal,dx00);
1590             ty               = _mm256_mul_ps(fscal,dy00);
1591             tz               = _mm256_mul_ps(fscal,dz00);
1592
1593             /* Update vectorial force */
1594             fix0             = _mm256_add_ps(fix0,tx);
1595             fiy0             = _mm256_add_ps(fiy0,ty);
1596             fiz0             = _mm256_add_ps(fiz0,tz);
1597
1598             fjx0             = _mm256_add_ps(fjx0,tx);
1599             fjy0             = _mm256_add_ps(fjy0,ty);
1600             fjz0             = _mm256_add_ps(fjz0,tz);
1601
1602             }
1603
1604             /**************************
1605              * CALCULATE INTERACTIONS *
1606              **************************/
1607
1608             if (gmx_mm256_any_lt(rsq10,rcutoff2))
1609             {
1610
1611             r10              = _mm256_mul_ps(rsq10,rinv10);
1612             r10              = _mm256_andnot_ps(dummy_mask,r10);
1613
1614             /* Compute parameters for interactions between i and j atoms */
1615             qq10             = _mm256_mul_ps(iq1,jq0);
1616
1617             /* EWALD ELECTROSTATICS */
1618             
1619             /* Analytical PME correction */
1620             zeta2            = _mm256_mul_ps(beta2,rsq10);
1621             rinv3            = _mm256_mul_ps(rinvsq10,rinv10);
1622             pmecorrF         = avx256_pmecorrF_f(zeta2);
1623             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1624             felec            = _mm256_mul_ps(qq10,felec);
1625             pmecorrV         = avx256_pmecorrV_f(zeta2);
1626             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
1627             velec            = _mm256_sub_ps(rinv10,pmecorrV);
1628             velec            = _mm256_mul_ps(qq10,velec);
1629             
1630             d                = _mm256_sub_ps(r10,rswitch);
1631             d                = _mm256_max_ps(d,_mm256_setzero_ps());
1632             d2               = _mm256_mul_ps(d,d);
1633             sw               = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
1634
1635             dsw              = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
1636
1637             /* Evaluate switch function */
1638             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
1639             felec            = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv10,_mm256_mul_ps(velec,dsw)) );
1640             cutoff_mask      = _mm256_cmp_ps(rsq10,rcutoff2,_CMP_LT_OQ);
1641
1642             fscal            = felec;
1643
1644             fscal            = _mm256_and_ps(fscal,cutoff_mask);
1645
1646             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
1647
1648             /* Calculate temporary vectorial force */
1649             tx               = _mm256_mul_ps(fscal,dx10);
1650             ty               = _mm256_mul_ps(fscal,dy10);
1651             tz               = _mm256_mul_ps(fscal,dz10);
1652
1653             /* Update vectorial force */
1654             fix1             = _mm256_add_ps(fix1,tx);
1655             fiy1             = _mm256_add_ps(fiy1,ty);
1656             fiz1             = _mm256_add_ps(fiz1,tz);
1657
1658             fjx0             = _mm256_add_ps(fjx0,tx);
1659             fjy0             = _mm256_add_ps(fjy0,ty);
1660             fjz0             = _mm256_add_ps(fjz0,tz);
1661
1662             }
1663
1664             /**************************
1665              * CALCULATE INTERACTIONS *
1666              **************************/
1667
1668             if (gmx_mm256_any_lt(rsq20,rcutoff2))
1669             {
1670
1671             r20              = _mm256_mul_ps(rsq20,rinv20);
1672             r20              = _mm256_andnot_ps(dummy_mask,r20);
1673
1674             /* Compute parameters for interactions between i and j atoms */
1675             qq20             = _mm256_mul_ps(iq2,jq0);
1676
1677             /* EWALD ELECTROSTATICS */
1678             
1679             /* Analytical PME correction */
1680             zeta2            = _mm256_mul_ps(beta2,rsq20);
1681             rinv3            = _mm256_mul_ps(rinvsq20,rinv20);
1682             pmecorrF         = avx256_pmecorrF_f(zeta2);
1683             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1684             felec            = _mm256_mul_ps(qq20,felec);
1685             pmecorrV         = avx256_pmecorrV_f(zeta2);
1686             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
1687             velec            = _mm256_sub_ps(rinv20,pmecorrV);
1688             velec            = _mm256_mul_ps(qq20,velec);
1689             
1690             d                = _mm256_sub_ps(r20,rswitch);
1691             d                = _mm256_max_ps(d,_mm256_setzero_ps());
1692             d2               = _mm256_mul_ps(d,d);
1693             sw               = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
1694
1695             dsw              = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
1696
1697             /* Evaluate switch function */
1698             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
1699             felec            = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv20,_mm256_mul_ps(velec,dsw)) );
1700             cutoff_mask      = _mm256_cmp_ps(rsq20,rcutoff2,_CMP_LT_OQ);
1701
1702             fscal            = felec;
1703
1704             fscal            = _mm256_and_ps(fscal,cutoff_mask);
1705
1706             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
1707
1708             /* Calculate temporary vectorial force */
1709             tx               = _mm256_mul_ps(fscal,dx20);
1710             ty               = _mm256_mul_ps(fscal,dy20);
1711             tz               = _mm256_mul_ps(fscal,dz20);
1712
1713             /* Update vectorial force */
1714             fix2             = _mm256_add_ps(fix2,tx);
1715             fiy2             = _mm256_add_ps(fiy2,ty);
1716             fiz2             = _mm256_add_ps(fiz2,tz);
1717
1718             fjx0             = _mm256_add_ps(fjx0,tx);
1719             fjy0             = _mm256_add_ps(fjy0,ty);
1720             fjz0             = _mm256_add_ps(fjz0,tz);
1721
1722             }
1723
1724             /**************************
1725              * CALCULATE INTERACTIONS *
1726              **************************/
1727
1728             if (gmx_mm256_any_lt(rsq30,rcutoff2))
1729             {
1730
1731             r30              = _mm256_mul_ps(rsq30,rinv30);
1732             r30              = _mm256_andnot_ps(dummy_mask,r30);
1733
1734             /* Compute parameters for interactions between i and j atoms */
1735             qq30             = _mm256_mul_ps(iq3,jq0);
1736
1737             /* EWALD ELECTROSTATICS */
1738             
1739             /* Analytical PME correction */
1740             zeta2            = _mm256_mul_ps(beta2,rsq30);
1741             rinv3            = _mm256_mul_ps(rinvsq30,rinv30);
1742             pmecorrF         = avx256_pmecorrF_f(zeta2);
1743             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1744             felec            = _mm256_mul_ps(qq30,felec);
1745             pmecorrV         = avx256_pmecorrV_f(zeta2);
1746             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
1747             velec            = _mm256_sub_ps(rinv30,pmecorrV);
1748             velec            = _mm256_mul_ps(qq30,velec);
1749             
1750             d                = _mm256_sub_ps(r30,rswitch);
1751             d                = _mm256_max_ps(d,_mm256_setzero_ps());
1752             d2               = _mm256_mul_ps(d,d);
1753             sw               = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
1754
1755             dsw              = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
1756
1757             /* Evaluate switch function */
1758             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
1759             felec            = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv30,_mm256_mul_ps(velec,dsw)) );
1760             cutoff_mask      = _mm256_cmp_ps(rsq30,rcutoff2,_CMP_LT_OQ);
1761
1762             fscal            = felec;
1763
1764             fscal            = _mm256_and_ps(fscal,cutoff_mask);
1765
1766             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
1767
1768             /* Calculate temporary vectorial force */
1769             tx               = _mm256_mul_ps(fscal,dx30);
1770             ty               = _mm256_mul_ps(fscal,dy30);
1771             tz               = _mm256_mul_ps(fscal,dz30);
1772
1773             /* Update vectorial force */
1774             fix3             = _mm256_add_ps(fix3,tx);
1775             fiy3             = _mm256_add_ps(fiy3,ty);
1776             fiz3             = _mm256_add_ps(fiz3,tz);
1777
1778             fjx0             = _mm256_add_ps(fjx0,tx);
1779             fjy0             = _mm256_add_ps(fjy0,ty);
1780             fjz0             = _mm256_add_ps(fjz0,tz);
1781
1782             }
1783
1784             fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
1785             fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
1786             fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
1787             fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
1788             fjptrE             = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
1789             fjptrF             = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
1790             fjptrG             = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
1791             fjptrH             = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
1792
1793             gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,fjx0,fjy0,fjz0);
1794
1795             /* Inner loop uses 378 flops */
1796         }
1797
1798         /* End of innermost loop */
1799
1800         gmx_mm256_update_iforce_4atom_swizzle_ps(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,fix3,fiy3,fiz3,
1801                                                  f+i_coord_offset,fshift+i_shift_offset);
1802
1803         /* Increment number of inner iterations */
1804         inneriter                  += j_index_end - j_index_start;
1805
1806         /* Outer loop uses 24 flops */
1807     }
1808
1809     /* Increment number of outer iterations */
1810     outeriter        += nri;
1811
1812     /* Update outer/inner flops */
1813
1814     inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W4_F,outeriter*24 + inneriter*378);
1815 }