Added option to gmx nmeig to print ZPE.
[alexxy/gromacs.git] / src / gromacs / gmxlib / nonbonded / nb_kernel_avx_256_single / nb_kernel_ElecEwSw_VdwNone_GeomW3P1_avx_256_single.c
1 /*
2  * This file is part of the GROMACS molecular simulation package.
3  *
4  * Copyright (c) 2012,2013,2014,2015,2017, by the GROMACS development team, led by
5  * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6  * and including many others, as listed in the AUTHORS file in the
7  * top-level source directory and at http://www.gromacs.org.
8  *
9  * GROMACS is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU Lesser General Public License
11  * as published by the Free Software Foundation; either version 2.1
12  * of the License, or (at your option) any later version.
13  *
14  * GROMACS is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
17  * Lesser General Public License for more details.
18  *
19  * You should have received a copy of the GNU Lesser General Public
20  * License along with GROMACS; if not, see
21  * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA.
23  *
24  * If you want to redistribute modifications to GROMACS, please
25  * consider that scientific software is very special. Version
26  * control is crucial - bugs must be traceable. We will be happy to
27  * consider code for inclusion in the official distribution, but
28  * derived work must not be called official GROMACS. Details are found
29  * in the README & COPYING files - if they are missing, get the
30  * official version at http://www.gromacs.org.
31  *
32  * To help us fund GROMACS development, we humbly ask that you cite
33  * the research papers on the package. Check out http://www.gromacs.org.
34  */
35 /*
36  * Note: this file was generated by the GROMACS avx_256_single kernel generator.
37  */
38 #include "gmxpre.h"
39
40 #include "config.h"
41
42 #include <math.h>
43
44 #include "../nb_kernel.h"
45 #include "gromacs/gmxlib/nrnb.h"
46
47 #include "kernelutil_x86_avx_256_single.h"
48
49 /*
50  * Gromacs nonbonded kernel:   nb_kernel_ElecEwSw_VdwNone_GeomW3P1_VF_avx_256_single
51  * Electrostatics interaction: Ewald
52  * VdW interaction:            None
53  * Geometry:                   Water3-Particle
54  * Calculate force/pot:        PotentialAndForce
55  */
56 void
57 nb_kernel_ElecEwSw_VdwNone_GeomW3P1_VF_avx_256_single
58                     (t_nblist                    * gmx_restrict       nlist,
59                      rvec                        * gmx_restrict          xx,
60                      rvec                        * gmx_restrict          ff,
61                      struct t_forcerec           * gmx_restrict          fr,
62                      t_mdatoms                   * gmx_restrict     mdatoms,
63                      nb_kernel_data_t gmx_unused * gmx_restrict kernel_data,
64                      t_nrnb                      * gmx_restrict        nrnb)
65 {
66     /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
67      * just 0 for non-waters.
68      * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
69      * jnr indices corresponding to data put in the four positions in the SIMD register.
70      */
71     int              i_shift_offset,i_coord_offset,outeriter,inneriter;
72     int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
73     int              jnrA,jnrB,jnrC,jnrD;
74     int              jnrE,jnrF,jnrG,jnrH;
75     int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
76     int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
77     int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
78     int              j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
79     int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
80     real             rcutoff_scalar;
81     real             *shiftvec,*fshift,*x,*f;
82     real             *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
83     real             scratch[4*DIM];
84     __m256           tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
85     real *           vdwioffsetptr0;
86     __m256           ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
87     real *           vdwioffsetptr1;
88     __m256           ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
89     real *           vdwioffsetptr2;
90     __m256           ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
91     int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
92     __m256           jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
93     __m256           dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
94     __m256           dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
95     __m256           dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
96     __m256           velec,felec,velecsum,facel,crf,krf,krf2;
97     real             *charge;
98     __m256i          ewitab;
99     __m128i          ewitab_lo,ewitab_hi;
100     __m256           ewtabscale,eweps,sh_ewald,ewrt,ewtabhalfspace,ewtabF,ewtabFn,ewtabD,ewtabV;
101     __m256           beta,beta2,beta3,zeta2,pmecorrF,pmecorrV,rinv3;
102     real             *ewtab;
103     __m256           rswitch,swV3,swV4,swV5,swF2,swF3,swF4,d,d2,sw,dsw;
104     real             rswitch_scalar,d_scalar;
105     __m256           dummy_mask,cutoff_mask;
106     __m256           signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
107     __m256           one     = _mm256_set1_ps(1.0);
108     __m256           two     = _mm256_set1_ps(2.0);
109     x                = xx[0];
110     f                = ff[0];
111
112     nri              = nlist->nri;
113     iinr             = nlist->iinr;
114     jindex           = nlist->jindex;
115     jjnr             = nlist->jjnr;
116     shiftidx         = nlist->shift;
117     gid              = nlist->gid;
118     shiftvec         = fr->shift_vec[0];
119     fshift           = fr->fshift[0];
120     facel            = _mm256_set1_ps(fr->ic->epsfac);
121     charge           = mdatoms->chargeA;
122
123     sh_ewald         = _mm256_set1_ps(fr->ic->sh_ewald);
124     beta             = _mm256_set1_ps(fr->ic->ewaldcoeff_q);
125     beta2            = _mm256_mul_ps(beta,beta);
126     beta3            = _mm256_mul_ps(beta,beta2);
127
128     ewtab            = fr->ic->tabq_coul_FDV0;
129     ewtabscale       = _mm256_set1_ps(fr->ic->tabq_scale);
130     ewtabhalfspace   = _mm256_set1_ps(0.5/fr->ic->tabq_scale);
131
132     /* Setup water-specific parameters */
133     inr              = nlist->iinr[0];
134     iq0              = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+0]));
135     iq1              = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+1]));
136     iq2              = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+2]));
137
138     /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
139     rcutoff_scalar   = fr->ic->rcoulomb;
140     rcutoff          = _mm256_set1_ps(rcutoff_scalar);
141     rcutoff2         = _mm256_mul_ps(rcutoff,rcutoff);
142
143     rswitch_scalar   = fr->ic->rcoulomb_switch;
144     rswitch          = _mm256_set1_ps(rswitch_scalar);
145     /* Setup switch parameters */
146     d_scalar         = rcutoff_scalar-rswitch_scalar;
147     d                = _mm256_set1_ps(d_scalar);
148     swV3             = _mm256_set1_ps(-10.0/(d_scalar*d_scalar*d_scalar));
149     swV4             = _mm256_set1_ps( 15.0/(d_scalar*d_scalar*d_scalar*d_scalar));
150     swV5             = _mm256_set1_ps( -6.0/(d_scalar*d_scalar*d_scalar*d_scalar*d_scalar));
151     swF2             = _mm256_set1_ps(-30.0/(d_scalar*d_scalar*d_scalar));
152     swF3             = _mm256_set1_ps( 60.0/(d_scalar*d_scalar*d_scalar*d_scalar));
153     swF4             = _mm256_set1_ps(-30.0/(d_scalar*d_scalar*d_scalar*d_scalar*d_scalar));
154
155     /* Avoid stupid compiler warnings */
156     jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
157     j_coord_offsetA = 0;
158     j_coord_offsetB = 0;
159     j_coord_offsetC = 0;
160     j_coord_offsetD = 0;
161     j_coord_offsetE = 0;
162     j_coord_offsetF = 0;
163     j_coord_offsetG = 0;
164     j_coord_offsetH = 0;
165
166     outeriter        = 0;
167     inneriter        = 0;
168
169     for(iidx=0;iidx<4*DIM;iidx++)
170     {
171         scratch[iidx] = 0.0;
172     }
173
174     /* Start outer loop over neighborlists */
175     for(iidx=0; iidx<nri; iidx++)
176     {
177         /* Load shift vector for this list */
178         i_shift_offset   = DIM*shiftidx[iidx];
179
180         /* Load limits for loop over neighbors */
181         j_index_start    = jindex[iidx];
182         j_index_end      = jindex[iidx+1];
183
184         /* Get outer coordinate index */
185         inr              = iinr[iidx];
186         i_coord_offset   = DIM*inr;
187
188         /* Load i particle coords and add shift vector */
189         gmx_mm256_load_shift_and_3rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,
190                                                     &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2);
191
192         fix0             = _mm256_setzero_ps();
193         fiy0             = _mm256_setzero_ps();
194         fiz0             = _mm256_setzero_ps();
195         fix1             = _mm256_setzero_ps();
196         fiy1             = _mm256_setzero_ps();
197         fiz1             = _mm256_setzero_ps();
198         fix2             = _mm256_setzero_ps();
199         fiy2             = _mm256_setzero_ps();
200         fiz2             = _mm256_setzero_ps();
201
202         /* Reset potential sums */
203         velecsum         = _mm256_setzero_ps();
204
205         /* Start inner kernel loop */
206         for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
207         {
208
209             /* Get j neighbor index, and coordinate index */
210             jnrA             = jjnr[jidx];
211             jnrB             = jjnr[jidx+1];
212             jnrC             = jjnr[jidx+2];
213             jnrD             = jjnr[jidx+3];
214             jnrE             = jjnr[jidx+4];
215             jnrF             = jjnr[jidx+5];
216             jnrG             = jjnr[jidx+6];
217             jnrH             = jjnr[jidx+7];
218             j_coord_offsetA  = DIM*jnrA;
219             j_coord_offsetB  = DIM*jnrB;
220             j_coord_offsetC  = DIM*jnrC;
221             j_coord_offsetD  = DIM*jnrD;
222             j_coord_offsetE  = DIM*jnrE;
223             j_coord_offsetF  = DIM*jnrF;
224             j_coord_offsetG  = DIM*jnrG;
225             j_coord_offsetH  = DIM*jnrH;
226
227             /* load j atom coordinates */
228             gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
229                                                  x+j_coord_offsetC,x+j_coord_offsetD,
230                                                  x+j_coord_offsetE,x+j_coord_offsetF,
231                                                  x+j_coord_offsetG,x+j_coord_offsetH,
232                                                  &jx0,&jy0,&jz0);
233
234             /* Calculate displacement vector */
235             dx00             = _mm256_sub_ps(ix0,jx0);
236             dy00             = _mm256_sub_ps(iy0,jy0);
237             dz00             = _mm256_sub_ps(iz0,jz0);
238             dx10             = _mm256_sub_ps(ix1,jx0);
239             dy10             = _mm256_sub_ps(iy1,jy0);
240             dz10             = _mm256_sub_ps(iz1,jz0);
241             dx20             = _mm256_sub_ps(ix2,jx0);
242             dy20             = _mm256_sub_ps(iy2,jy0);
243             dz20             = _mm256_sub_ps(iz2,jz0);
244
245             /* Calculate squared distance and things based on it */
246             rsq00            = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
247             rsq10            = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
248             rsq20            = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
249
250             rinv00           = avx256_invsqrt_f(rsq00);
251             rinv10           = avx256_invsqrt_f(rsq10);
252             rinv20           = avx256_invsqrt_f(rsq20);
253
254             rinvsq00         = _mm256_mul_ps(rinv00,rinv00);
255             rinvsq10         = _mm256_mul_ps(rinv10,rinv10);
256             rinvsq20         = _mm256_mul_ps(rinv20,rinv20);
257
258             /* Load parameters for j particles */
259             jq0              = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
260                                                                  charge+jnrC+0,charge+jnrD+0,
261                                                                  charge+jnrE+0,charge+jnrF+0,
262                                                                  charge+jnrG+0,charge+jnrH+0);
263
264             fjx0             = _mm256_setzero_ps();
265             fjy0             = _mm256_setzero_ps();
266             fjz0             = _mm256_setzero_ps();
267
268             /**************************
269              * CALCULATE INTERACTIONS *
270              **************************/
271
272             if (gmx_mm256_any_lt(rsq00,rcutoff2))
273             {
274
275             r00              = _mm256_mul_ps(rsq00,rinv00);
276
277             /* Compute parameters for interactions between i and j atoms */
278             qq00             = _mm256_mul_ps(iq0,jq0);
279
280             /* EWALD ELECTROSTATICS */
281             
282             /* Analytical PME correction */
283             zeta2            = _mm256_mul_ps(beta2,rsq00);
284             rinv3            = _mm256_mul_ps(rinvsq00,rinv00);
285             pmecorrF         = avx256_pmecorrF_f(zeta2);
286             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
287             felec            = _mm256_mul_ps(qq00,felec);
288             pmecorrV         = avx256_pmecorrV_f(zeta2);
289             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
290             velec            = _mm256_sub_ps(rinv00,pmecorrV);
291             velec            = _mm256_mul_ps(qq00,velec);
292             
293             d                = _mm256_sub_ps(r00,rswitch);
294             d                = _mm256_max_ps(d,_mm256_setzero_ps());
295             d2               = _mm256_mul_ps(d,d);
296             sw               = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
297
298             dsw              = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
299
300             /* Evaluate switch function */
301             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
302             felec            = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv00,_mm256_mul_ps(velec,dsw)) );
303             velec            = _mm256_mul_ps(velec,sw);
304             cutoff_mask      = _mm256_cmp_ps(rsq00,rcutoff2,_CMP_LT_OQ);
305
306             /* Update potential sum for this i atom from the interaction with this j atom. */
307             velec            = _mm256_and_ps(velec,cutoff_mask);
308             velecsum         = _mm256_add_ps(velecsum,velec);
309
310             fscal            = felec;
311
312             fscal            = _mm256_and_ps(fscal,cutoff_mask);
313
314             /* Calculate temporary vectorial force */
315             tx               = _mm256_mul_ps(fscal,dx00);
316             ty               = _mm256_mul_ps(fscal,dy00);
317             tz               = _mm256_mul_ps(fscal,dz00);
318
319             /* Update vectorial force */
320             fix0             = _mm256_add_ps(fix0,tx);
321             fiy0             = _mm256_add_ps(fiy0,ty);
322             fiz0             = _mm256_add_ps(fiz0,tz);
323
324             fjx0             = _mm256_add_ps(fjx0,tx);
325             fjy0             = _mm256_add_ps(fjy0,ty);
326             fjz0             = _mm256_add_ps(fjz0,tz);
327
328             }
329
330             /**************************
331              * CALCULATE INTERACTIONS *
332              **************************/
333
334             if (gmx_mm256_any_lt(rsq10,rcutoff2))
335             {
336
337             r10              = _mm256_mul_ps(rsq10,rinv10);
338
339             /* Compute parameters for interactions between i and j atoms */
340             qq10             = _mm256_mul_ps(iq1,jq0);
341
342             /* EWALD ELECTROSTATICS */
343             
344             /* Analytical PME correction */
345             zeta2            = _mm256_mul_ps(beta2,rsq10);
346             rinv3            = _mm256_mul_ps(rinvsq10,rinv10);
347             pmecorrF         = avx256_pmecorrF_f(zeta2);
348             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
349             felec            = _mm256_mul_ps(qq10,felec);
350             pmecorrV         = avx256_pmecorrV_f(zeta2);
351             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
352             velec            = _mm256_sub_ps(rinv10,pmecorrV);
353             velec            = _mm256_mul_ps(qq10,velec);
354             
355             d                = _mm256_sub_ps(r10,rswitch);
356             d                = _mm256_max_ps(d,_mm256_setzero_ps());
357             d2               = _mm256_mul_ps(d,d);
358             sw               = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
359
360             dsw              = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
361
362             /* Evaluate switch function */
363             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
364             felec            = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv10,_mm256_mul_ps(velec,dsw)) );
365             velec            = _mm256_mul_ps(velec,sw);
366             cutoff_mask      = _mm256_cmp_ps(rsq10,rcutoff2,_CMP_LT_OQ);
367
368             /* Update potential sum for this i atom from the interaction with this j atom. */
369             velec            = _mm256_and_ps(velec,cutoff_mask);
370             velecsum         = _mm256_add_ps(velecsum,velec);
371
372             fscal            = felec;
373
374             fscal            = _mm256_and_ps(fscal,cutoff_mask);
375
376             /* Calculate temporary vectorial force */
377             tx               = _mm256_mul_ps(fscal,dx10);
378             ty               = _mm256_mul_ps(fscal,dy10);
379             tz               = _mm256_mul_ps(fscal,dz10);
380
381             /* Update vectorial force */
382             fix1             = _mm256_add_ps(fix1,tx);
383             fiy1             = _mm256_add_ps(fiy1,ty);
384             fiz1             = _mm256_add_ps(fiz1,tz);
385
386             fjx0             = _mm256_add_ps(fjx0,tx);
387             fjy0             = _mm256_add_ps(fjy0,ty);
388             fjz0             = _mm256_add_ps(fjz0,tz);
389
390             }
391
392             /**************************
393              * CALCULATE INTERACTIONS *
394              **************************/
395
396             if (gmx_mm256_any_lt(rsq20,rcutoff2))
397             {
398
399             r20              = _mm256_mul_ps(rsq20,rinv20);
400
401             /* Compute parameters for interactions between i and j atoms */
402             qq20             = _mm256_mul_ps(iq2,jq0);
403
404             /* EWALD ELECTROSTATICS */
405             
406             /* Analytical PME correction */
407             zeta2            = _mm256_mul_ps(beta2,rsq20);
408             rinv3            = _mm256_mul_ps(rinvsq20,rinv20);
409             pmecorrF         = avx256_pmecorrF_f(zeta2);
410             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
411             felec            = _mm256_mul_ps(qq20,felec);
412             pmecorrV         = avx256_pmecorrV_f(zeta2);
413             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
414             velec            = _mm256_sub_ps(rinv20,pmecorrV);
415             velec            = _mm256_mul_ps(qq20,velec);
416             
417             d                = _mm256_sub_ps(r20,rswitch);
418             d                = _mm256_max_ps(d,_mm256_setzero_ps());
419             d2               = _mm256_mul_ps(d,d);
420             sw               = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
421
422             dsw              = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
423
424             /* Evaluate switch function */
425             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
426             felec            = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv20,_mm256_mul_ps(velec,dsw)) );
427             velec            = _mm256_mul_ps(velec,sw);
428             cutoff_mask      = _mm256_cmp_ps(rsq20,rcutoff2,_CMP_LT_OQ);
429
430             /* Update potential sum for this i atom from the interaction with this j atom. */
431             velec            = _mm256_and_ps(velec,cutoff_mask);
432             velecsum         = _mm256_add_ps(velecsum,velec);
433
434             fscal            = felec;
435
436             fscal            = _mm256_and_ps(fscal,cutoff_mask);
437
438             /* Calculate temporary vectorial force */
439             tx               = _mm256_mul_ps(fscal,dx20);
440             ty               = _mm256_mul_ps(fscal,dy20);
441             tz               = _mm256_mul_ps(fscal,dz20);
442
443             /* Update vectorial force */
444             fix2             = _mm256_add_ps(fix2,tx);
445             fiy2             = _mm256_add_ps(fiy2,ty);
446             fiz2             = _mm256_add_ps(fiz2,tz);
447
448             fjx0             = _mm256_add_ps(fjx0,tx);
449             fjy0             = _mm256_add_ps(fjy0,ty);
450             fjz0             = _mm256_add_ps(fjz0,tz);
451
452             }
453
454             fjptrA             = f+j_coord_offsetA;
455             fjptrB             = f+j_coord_offsetB;
456             fjptrC             = f+j_coord_offsetC;
457             fjptrD             = f+j_coord_offsetD;
458             fjptrE             = f+j_coord_offsetE;
459             fjptrF             = f+j_coord_offsetF;
460             fjptrG             = f+j_coord_offsetG;
461             fjptrH             = f+j_coord_offsetH;
462
463             gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,fjx0,fjy0,fjz0);
464
465             /* Inner loop uses 327 flops */
466         }
467
468         if(jidx<j_index_end)
469         {
470
471             /* Get j neighbor index, and coordinate index */
472             jnrlistA         = jjnr[jidx];
473             jnrlistB         = jjnr[jidx+1];
474             jnrlistC         = jjnr[jidx+2];
475             jnrlistD         = jjnr[jidx+3];
476             jnrlistE         = jjnr[jidx+4];
477             jnrlistF         = jjnr[jidx+5];
478             jnrlistG         = jjnr[jidx+6];
479             jnrlistH         = jjnr[jidx+7];
480             /* Sign of each element will be negative for non-real atoms.
481              * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
482              * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
483              */
484             dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
485                                             gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
486                                             
487             jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
488             jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
489             jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
490             jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
491             jnrE       = (jnrlistE>=0) ? jnrlistE : 0;
492             jnrF       = (jnrlistF>=0) ? jnrlistF : 0;
493             jnrG       = (jnrlistG>=0) ? jnrlistG : 0;
494             jnrH       = (jnrlistH>=0) ? jnrlistH : 0;
495             j_coord_offsetA  = DIM*jnrA;
496             j_coord_offsetB  = DIM*jnrB;
497             j_coord_offsetC  = DIM*jnrC;
498             j_coord_offsetD  = DIM*jnrD;
499             j_coord_offsetE  = DIM*jnrE;
500             j_coord_offsetF  = DIM*jnrF;
501             j_coord_offsetG  = DIM*jnrG;
502             j_coord_offsetH  = DIM*jnrH;
503
504             /* load j atom coordinates */
505             gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
506                                                  x+j_coord_offsetC,x+j_coord_offsetD,
507                                                  x+j_coord_offsetE,x+j_coord_offsetF,
508                                                  x+j_coord_offsetG,x+j_coord_offsetH,
509                                                  &jx0,&jy0,&jz0);
510
511             /* Calculate displacement vector */
512             dx00             = _mm256_sub_ps(ix0,jx0);
513             dy00             = _mm256_sub_ps(iy0,jy0);
514             dz00             = _mm256_sub_ps(iz0,jz0);
515             dx10             = _mm256_sub_ps(ix1,jx0);
516             dy10             = _mm256_sub_ps(iy1,jy0);
517             dz10             = _mm256_sub_ps(iz1,jz0);
518             dx20             = _mm256_sub_ps(ix2,jx0);
519             dy20             = _mm256_sub_ps(iy2,jy0);
520             dz20             = _mm256_sub_ps(iz2,jz0);
521
522             /* Calculate squared distance and things based on it */
523             rsq00            = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
524             rsq10            = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
525             rsq20            = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
526
527             rinv00           = avx256_invsqrt_f(rsq00);
528             rinv10           = avx256_invsqrt_f(rsq10);
529             rinv20           = avx256_invsqrt_f(rsq20);
530
531             rinvsq00         = _mm256_mul_ps(rinv00,rinv00);
532             rinvsq10         = _mm256_mul_ps(rinv10,rinv10);
533             rinvsq20         = _mm256_mul_ps(rinv20,rinv20);
534
535             /* Load parameters for j particles */
536             jq0              = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
537                                                                  charge+jnrC+0,charge+jnrD+0,
538                                                                  charge+jnrE+0,charge+jnrF+0,
539                                                                  charge+jnrG+0,charge+jnrH+0);
540
541             fjx0             = _mm256_setzero_ps();
542             fjy0             = _mm256_setzero_ps();
543             fjz0             = _mm256_setzero_ps();
544
545             /**************************
546              * CALCULATE INTERACTIONS *
547              **************************/
548
549             if (gmx_mm256_any_lt(rsq00,rcutoff2))
550             {
551
552             r00              = _mm256_mul_ps(rsq00,rinv00);
553             r00              = _mm256_andnot_ps(dummy_mask,r00);
554
555             /* Compute parameters for interactions between i and j atoms */
556             qq00             = _mm256_mul_ps(iq0,jq0);
557
558             /* EWALD ELECTROSTATICS */
559             
560             /* Analytical PME correction */
561             zeta2            = _mm256_mul_ps(beta2,rsq00);
562             rinv3            = _mm256_mul_ps(rinvsq00,rinv00);
563             pmecorrF         = avx256_pmecorrF_f(zeta2);
564             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
565             felec            = _mm256_mul_ps(qq00,felec);
566             pmecorrV         = avx256_pmecorrV_f(zeta2);
567             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
568             velec            = _mm256_sub_ps(rinv00,pmecorrV);
569             velec            = _mm256_mul_ps(qq00,velec);
570             
571             d                = _mm256_sub_ps(r00,rswitch);
572             d                = _mm256_max_ps(d,_mm256_setzero_ps());
573             d2               = _mm256_mul_ps(d,d);
574             sw               = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
575
576             dsw              = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
577
578             /* Evaluate switch function */
579             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
580             felec            = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv00,_mm256_mul_ps(velec,dsw)) );
581             velec            = _mm256_mul_ps(velec,sw);
582             cutoff_mask      = _mm256_cmp_ps(rsq00,rcutoff2,_CMP_LT_OQ);
583
584             /* Update potential sum for this i atom from the interaction with this j atom. */
585             velec            = _mm256_and_ps(velec,cutoff_mask);
586             velec            = _mm256_andnot_ps(dummy_mask,velec);
587             velecsum         = _mm256_add_ps(velecsum,velec);
588
589             fscal            = felec;
590
591             fscal            = _mm256_and_ps(fscal,cutoff_mask);
592
593             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
594
595             /* Calculate temporary vectorial force */
596             tx               = _mm256_mul_ps(fscal,dx00);
597             ty               = _mm256_mul_ps(fscal,dy00);
598             tz               = _mm256_mul_ps(fscal,dz00);
599
600             /* Update vectorial force */
601             fix0             = _mm256_add_ps(fix0,tx);
602             fiy0             = _mm256_add_ps(fiy0,ty);
603             fiz0             = _mm256_add_ps(fiz0,tz);
604
605             fjx0             = _mm256_add_ps(fjx0,tx);
606             fjy0             = _mm256_add_ps(fjy0,ty);
607             fjz0             = _mm256_add_ps(fjz0,tz);
608
609             }
610
611             /**************************
612              * CALCULATE INTERACTIONS *
613              **************************/
614
615             if (gmx_mm256_any_lt(rsq10,rcutoff2))
616             {
617
618             r10              = _mm256_mul_ps(rsq10,rinv10);
619             r10              = _mm256_andnot_ps(dummy_mask,r10);
620
621             /* Compute parameters for interactions between i and j atoms */
622             qq10             = _mm256_mul_ps(iq1,jq0);
623
624             /* EWALD ELECTROSTATICS */
625             
626             /* Analytical PME correction */
627             zeta2            = _mm256_mul_ps(beta2,rsq10);
628             rinv3            = _mm256_mul_ps(rinvsq10,rinv10);
629             pmecorrF         = avx256_pmecorrF_f(zeta2);
630             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
631             felec            = _mm256_mul_ps(qq10,felec);
632             pmecorrV         = avx256_pmecorrV_f(zeta2);
633             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
634             velec            = _mm256_sub_ps(rinv10,pmecorrV);
635             velec            = _mm256_mul_ps(qq10,velec);
636             
637             d                = _mm256_sub_ps(r10,rswitch);
638             d                = _mm256_max_ps(d,_mm256_setzero_ps());
639             d2               = _mm256_mul_ps(d,d);
640             sw               = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
641
642             dsw              = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
643
644             /* Evaluate switch function */
645             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
646             felec            = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv10,_mm256_mul_ps(velec,dsw)) );
647             velec            = _mm256_mul_ps(velec,sw);
648             cutoff_mask      = _mm256_cmp_ps(rsq10,rcutoff2,_CMP_LT_OQ);
649
650             /* Update potential sum for this i atom from the interaction with this j atom. */
651             velec            = _mm256_and_ps(velec,cutoff_mask);
652             velec            = _mm256_andnot_ps(dummy_mask,velec);
653             velecsum         = _mm256_add_ps(velecsum,velec);
654
655             fscal            = felec;
656
657             fscal            = _mm256_and_ps(fscal,cutoff_mask);
658
659             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
660
661             /* Calculate temporary vectorial force */
662             tx               = _mm256_mul_ps(fscal,dx10);
663             ty               = _mm256_mul_ps(fscal,dy10);
664             tz               = _mm256_mul_ps(fscal,dz10);
665
666             /* Update vectorial force */
667             fix1             = _mm256_add_ps(fix1,tx);
668             fiy1             = _mm256_add_ps(fiy1,ty);
669             fiz1             = _mm256_add_ps(fiz1,tz);
670
671             fjx0             = _mm256_add_ps(fjx0,tx);
672             fjy0             = _mm256_add_ps(fjy0,ty);
673             fjz0             = _mm256_add_ps(fjz0,tz);
674
675             }
676
677             /**************************
678              * CALCULATE INTERACTIONS *
679              **************************/
680
681             if (gmx_mm256_any_lt(rsq20,rcutoff2))
682             {
683
684             r20              = _mm256_mul_ps(rsq20,rinv20);
685             r20              = _mm256_andnot_ps(dummy_mask,r20);
686
687             /* Compute parameters for interactions between i and j atoms */
688             qq20             = _mm256_mul_ps(iq2,jq0);
689
690             /* EWALD ELECTROSTATICS */
691             
692             /* Analytical PME correction */
693             zeta2            = _mm256_mul_ps(beta2,rsq20);
694             rinv3            = _mm256_mul_ps(rinvsq20,rinv20);
695             pmecorrF         = avx256_pmecorrF_f(zeta2);
696             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
697             felec            = _mm256_mul_ps(qq20,felec);
698             pmecorrV         = avx256_pmecorrV_f(zeta2);
699             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
700             velec            = _mm256_sub_ps(rinv20,pmecorrV);
701             velec            = _mm256_mul_ps(qq20,velec);
702             
703             d                = _mm256_sub_ps(r20,rswitch);
704             d                = _mm256_max_ps(d,_mm256_setzero_ps());
705             d2               = _mm256_mul_ps(d,d);
706             sw               = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
707
708             dsw              = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
709
710             /* Evaluate switch function */
711             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
712             felec            = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv20,_mm256_mul_ps(velec,dsw)) );
713             velec            = _mm256_mul_ps(velec,sw);
714             cutoff_mask      = _mm256_cmp_ps(rsq20,rcutoff2,_CMP_LT_OQ);
715
716             /* Update potential sum for this i atom from the interaction with this j atom. */
717             velec            = _mm256_and_ps(velec,cutoff_mask);
718             velec            = _mm256_andnot_ps(dummy_mask,velec);
719             velecsum         = _mm256_add_ps(velecsum,velec);
720
721             fscal            = felec;
722
723             fscal            = _mm256_and_ps(fscal,cutoff_mask);
724
725             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
726
727             /* Calculate temporary vectorial force */
728             tx               = _mm256_mul_ps(fscal,dx20);
729             ty               = _mm256_mul_ps(fscal,dy20);
730             tz               = _mm256_mul_ps(fscal,dz20);
731
732             /* Update vectorial force */
733             fix2             = _mm256_add_ps(fix2,tx);
734             fiy2             = _mm256_add_ps(fiy2,ty);
735             fiz2             = _mm256_add_ps(fiz2,tz);
736
737             fjx0             = _mm256_add_ps(fjx0,tx);
738             fjy0             = _mm256_add_ps(fjy0,ty);
739             fjz0             = _mm256_add_ps(fjz0,tz);
740
741             }
742
743             fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
744             fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
745             fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
746             fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
747             fjptrE             = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
748             fjptrF             = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
749             fjptrG             = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
750             fjptrH             = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
751
752             gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,fjx0,fjy0,fjz0);
753
754             /* Inner loop uses 330 flops */
755         }
756
757         /* End of innermost loop */
758
759         gmx_mm256_update_iforce_3atom_swizzle_ps(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,
760                                                  f+i_coord_offset,fshift+i_shift_offset);
761
762         ggid                        = gid[iidx];
763         /* Update potential energies */
764         gmx_mm256_update_1pot_ps(velecsum,kernel_data->energygrp_elec+ggid);
765
766         /* Increment number of inner iterations */
767         inneriter                  += j_index_end - j_index_start;
768
769         /* Outer loop uses 19 flops */
770     }
771
772     /* Increment number of outer iterations */
773     outeriter        += nri;
774
775     /* Update outer/inner flops */
776
777     inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_W3_VF,outeriter*19 + inneriter*330);
778 }
779 /*
780  * Gromacs nonbonded kernel:   nb_kernel_ElecEwSw_VdwNone_GeomW3P1_F_avx_256_single
781  * Electrostatics interaction: Ewald
782  * VdW interaction:            None
783  * Geometry:                   Water3-Particle
784  * Calculate force/pot:        Force
785  */
786 void
787 nb_kernel_ElecEwSw_VdwNone_GeomW3P1_F_avx_256_single
788                     (t_nblist                    * gmx_restrict       nlist,
789                      rvec                        * gmx_restrict          xx,
790                      rvec                        * gmx_restrict          ff,
791                      struct t_forcerec           * gmx_restrict          fr,
792                      t_mdatoms                   * gmx_restrict     mdatoms,
793                      nb_kernel_data_t gmx_unused * gmx_restrict kernel_data,
794                      t_nrnb                      * gmx_restrict        nrnb)
795 {
796     /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
797      * just 0 for non-waters.
798      * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
799      * jnr indices corresponding to data put in the four positions in the SIMD register.
800      */
801     int              i_shift_offset,i_coord_offset,outeriter,inneriter;
802     int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
803     int              jnrA,jnrB,jnrC,jnrD;
804     int              jnrE,jnrF,jnrG,jnrH;
805     int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
806     int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
807     int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
808     int              j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
809     int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
810     real             rcutoff_scalar;
811     real             *shiftvec,*fshift,*x,*f;
812     real             *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
813     real             scratch[4*DIM];
814     __m256           tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
815     real *           vdwioffsetptr0;
816     __m256           ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
817     real *           vdwioffsetptr1;
818     __m256           ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
819     real *           vdwioffsetptr2;
820     __m256           ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
821     int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
822     __m256           jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
823     __m256           dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
824     __m256           dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
825     __m256           dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
826     __m256           velec,felec,velecsum,facel,crf,krf,krf2;
827     real             *charge;
828     __m256i          ewitab;
829     __m128i          ewitab_lo,ewitab_hi;
830     __m256           ewtabscale,eweps,sh_ewald,ewrt,ewtabhalfspace,ewtabF,ewtabFn,ewtabD,ewtabV;
831     __m256           beta,beta2,beta3,zeta2,pmecorrF,pmecorrV,rinv3;
832     real             *ewtab;
833     __m256           rswitch,swV3,swV4,swV5,swF2,swF3,swF4,d,d2,sw,dsw;
834     real             rswitch_scalar,d_scalar;
835     __m256           dummy_mask,cutoff_mask;
836     __m256           signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
837     __m256           one     = _mm256_set1_ps(1.0);
838     __m256           two     = _mm256_set1_ps(2.0);
839     x                = xx[0];
840     f                = ff[0];
841
842     nri              = nlist->nri;
843     iinr             = nlist->iinr;
844     jindex           = nlist->jindex;
845     jjnr             = nlist->jjnr;
846     shiftidx         = nlist->shift;
847     gid              = nlist->gid;
848     shiftvec         = fr->shift_vec[0];
849     fshift           = fr->fshift[0];
850     facel            = _mm256_set1_ps(fr->ic->epsfac);
851     charge           = mdatoms->chargeA;
852
853     sh_ewald         = _mm256_set1_ps(fr->ic->sh_ewald);
854     beta             = _mm256_set1_ps(fr->ic->ewaldcoeff_q);
855     beta2            = _mm256_mul_ps(beta,beta);
856     beta3            = _mm256_mul_ps(beta,beta2);
857
858     ewtab            = fr->ic->tabq_coul_FDV0;
859     ewtabscale       = _mm256_set1_ps(fr->ic->tabq_scale);
860     ewtabhalfspace   = _mm256_set1_ps(0.5/fr->ic->tabq_scale);
861
862     /* Setup water-specific parameters */
863     inr              = nlist->iinr[0];
864     iq0              = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+0]));
865     iq1              = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+1]));
866     iq2              = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+2]));
867
868     /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
869     rcutoff_scalar   = fr->ic->rcoulomb;
870     rcutoff          = _mm256_set1_ps(rcutoff_scalar);
871     rcutoff2         = _mm256_mul_ps(rcutoff,rcutoff);
872
873     rswitch_scalar   = fr->ic->rcoulomb_switch;
874     rswitch          = _mm256_set1_ps(rswitch_scalar);
875     /* Setup switch parameters */
876     d_scalar         = rcutoff_scalar-rswitch_scalar;
877     d                = _mm256_set1_ps(d_scalar);
878     swV3             = _mm256_set1_ps(-10.0/(d_scalar*d_scalar*d_scalar));
879     swV4             = _mm256_set1_ps( 15.0/(d_scalar*d_scalar*d_scalar*d_scalar));
880     swV5             = _mm256_set1_ps( -6.0/(d_scalar*d_scalar*d_scalar*d_scalar*d_scalar));
881     swF2             = _mm256_set1_ps(-30.0/(d_scalar*d_scalar*d_scalar));
882     swF3             = _mm256_set1_ps( 60.0/(d_scalar*d_scalar*d_scalar*d_scalar));
883     swF4             = _mm256_set1_ps(-30.0/(d_scalar*d_scalar*d_scalar*d_scalar*d_scalar));
884
885     /* Avoid stupid compiler warnings */
886     jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
887     j_coord_offsetA = 0;
888     j_coord_offsetB = 0;
889     j_coord_offsetC = 0;
890     j_coord_offsetD = 0;
891     j_coord_offsetE = 0;
892     j_coord_offsetF = 0;
893     j_coord_offsetG = 0;
894     j_coord_offsetH = 0;
895
896     outeriter        = 0;
897     inneriter        = 0;
898
899     for(iidx=0;iidx<4*DIM;iidx++)
900     {
901         scratch[iidx] = 0.0;
902     }
903
904     /* Start outer loop over neighborlists */
905     for(iidx=0; iidx<nri; iidx++)
906     {
907         /* Load shift vector for this list */
908         i_shift_offset   = DIM*shiftidx[iidx];
909
910         /* Load limits for loop over neighbors */
911         j_index_start    = jindex[iidx];
912         j_index_end      = jindex[iidx+1];
913
914         /* Get outer coordinate index */
915         inr              = iinr[iidx];
916         i_coord_offset   = DIM*inr;
917
918         /* Load i particle coords and add shift vector */
919         gmx_mm256_load_shift_and_3rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,
920                                                     &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2);
921
922         fix0             = _mm256_setzero_ps();
923         fiy0             = _mm256_setzero_ps();
924         fiz0             = _mm256_setzero_ps();
925         fix1             = _mm256_setzero_ps();
926         fiy1             = _mm256_setzero_ps();
927         fiz1             = _mm256_setzero_ps();
928         fix2             = _mm256_setzero_ps();
929         fiy2             = _mm256_setzero_ps();
930         fiz2             = _mm256_setzero_ps();
931
932         /* Start inner kernel loop */
933         for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
934         {
935
936             /* Get j neighbor index, and coordinate index */
937             jnrA             = jjnr[jidx];
938             jnrB             = jjnr[jidx+1];
939             jnrC             = jjnr[jidx+2];
940             jnrD             = jjnr[jidx+3];
941             jnrE             = jjnr[jidx+4];
942             jnrF             = jjnr[jidx+5];
943             jnrG             = jjnr[jidx+6];
944             jnrH             = jjnr[jidx+7];
945             j_coord_offsetA  = DIM*jnrA;
946             j_coord_offsetB  = DIM*jnrB;
947             j_coord_offsetC  = DIM*jnrC;
948             j_coord_offsetD  = DIM*jnrD;
949             j_coord_offsetE  = DIM*jnrE;
950             j_coord_offsetF  = DIM*jnrF;
951             j_coord_offsetG  = DIM*jnrG;
952             j_coord_offsetH  = DIM*jnrH;
953
954             /* load j atom coordinates */
955             gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
956                                                  x+j_coord_offsetC,x+j_coord_offsetD,
957                                                  x+j_coord_offsetE,x+j_coord_offsetF,
958                                                  x+j_coord_offsetG,x+j_coord_offsetH,
959                                                  &jx0,&jy0,&jz0);
960
961             /* Calculate displacement vector */
962             dx00             = _mm256_sub_ps(ix0,jx0);
963             dy00             = _mm256_sub_ps(iy0,jy0);
964             dz00             = _mm256_sub_ps(iz0,jz0);
965             dx10             = _mm256_sub_ps(ix1,jx0);
966             dy10             = _mm256_sub_ps(iy1,jy0);
967             dz10             = _mm256_sub_ps(iz1,jz0);
968             dx20             = _mm256_sub_ps(ix2,jx0);
969             dy20             = _mm256_sub_ps(iy2,jy0);
970             dz20             = _mm256_sub_ps(iz2,jz0);
971
972             /* Calculate squared distance and things based on it */
973             rsq00            = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
974             rsq10            = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
975             rsq20            = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
976
977             rinv00           = avx256_invsqrt_f(rsq00);
978             rinv10           = avx256_invsqrt_f(rsq10);
979             rinv20           = avx256_invsqrt_f(rsq20);
980
981             rinvsq00         = _mm256_mul_ps(rinv00,rinv00);
982             rinvsq10         = _mm256_mul_ps(rinv10,rinv10);
983             rinvsq20         = _mm256_mul_ps(rinv20,rinv20);
984
985             /* Load parameters for j particles */
986             jq0              = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
987                                                                  charge+jnrC+0,charge+jnrD+0,
988                                                                  charge+jnrE+0,charge+jnrF+0,
989                                                                  charge+jnrG+0,charge+jnrH+0);
990
991             fjx0             = _mm256_setzero_ps();
992             fjy0             = _mm256_setzero_ps();
993             fjz0             = _mm256_setzero_ps();
994
995             /**************************
996              * CALCULATE INTERACTIONS *
997              **************************/
998
999             if (gmx_mm256_any_lt(rsq00,rcutoff2))
1000             {
1001
1002             r00              = _mm256_mul_ps(rsq00,rinv00);
1003
1004             /* Compute parameters for interactions between i and j atoms */
1005             qq00             = _mm256_mul_ps(iq0,jq0);
1006
1007             /* EWALD ELECTROSTATICS */
1008             
1009             /* Analytical PME correction */
1010             zeta2            = _mm256_mul_ps(beta2,rsq00);
1011             rinv3            = _mm256_mul_ps(rinvsq00,rinv00);
1012             pmecorrF         = avx256_pmecorrF_f(zeta2);
1013             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1014             felec            = _mm256_mul_ps(qq00,felec);
1015             pmecorrV         = avx256_pmecorrV_f(zeta2);
1016             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
1017             velec            = _mm256_sub_ps(rinv00,pmecorrV);
1018             velec            = _mm256_mul_ps(qq00,velec);
1019             
1020             d                = _mm256_sub_ps(r00,rswitch);
1021             d                = _mm256_max_ps(d,_mm256_setzero_ps());
1022             d2               = _mm256_mul_ps(d,d);
1023             sw               = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
1024
1025             dsw              = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
1026
1027             /* Evaluate switch function */
1028             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
1029             felec            = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv00,_mm256_mul_ps(velec,dsw)) );
1030             cutoff_mask      = _mm256_cmp_ps(rsq00,rcutoff2,_CMP_LT_OQ);
1031
1032             fscal            = felec;
1033
1034             fscal            = _mm256_and_ps(fscal,cutoff_mask);
1035
1036             /* Calculate temporary vectorial force */
1037             tx               = _mm256_mul_ps(fscal,dx00);
1038             ty               = _mm256_mul_ps(fscal,dy00);
1039             tz               = _mm256_mul_ps(fscal,dz00);
1040
1041             /* Update vectorial force */
1042             fix0             = _mm256_add_ps(fix0,tx);
1043             fiy0             = _mm256_add_ps(fiy0,ty);
1044             fiz0             = _mm256_add_ps(fiz0,tz);
1045
1046             fjx0             = _mm256_add_ps(fjx0,tx);
1047             fjy0             = _mm256_add_ps(fjy0,ty);
1048             fjz0             = _mm256_add_ps(fjz0,tz);
1049
1050             }
1051
1052             /**************************
1053              * CALCULATE INTERACTIONS *
1054              **************************/
1055
1056             if (gmx_mm256_any_lt(rsq10,rcutoff2))
1057             {
1058
1059             r10              = _mm256_mul_ps(rsq10,rinv10);
1060
1061             /* Compute parameters for interactions between i and j atoms */
1062             qq10             = _mm256_mul_ps(iq1,jq0);
1063
1064             /* EWALD ELECTROSTATICS */
1065             
1066             /* Analytical PME correction */
1067             zeta2            = _mm256_mul_ps(beta2,rsq10);
1068             rinv3            = _mm256_mul_ps(rinvsq10,rinv10);
1069             pmecorrF         = avx256_pmecorrF_f(zeta2);
1070             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1071             felec            = _mm256_mul_ps(qq10,felec);
1072             pmecorrV         = avx256_pmecorrV_f(zeta2);
1073             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
1074             velec            = _mm256_sub_ps(rinv10,pmecorrV);
1075             velec            = _mm256_mul_ps(qq10,velec);
1076             
1077             d                = _mm256_sub_ps(r10,rswitch);
1078             d                = _mm256_max_ps(d,_mm256_setzero_ps());
1079             d2               = _mm256_mul_ps(d,d);
1080             sw               = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
1081
1082             dsw              = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
1083
1084             /* Evaluate switch function */
1085             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
1086             felec            = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv10,_mm256_mul_ps(velec,dsw)) );
1087             cutoff_mask      = _mm256_cmp_ps(rsq10,rcutoff2,_CMP_LT_OQ);
1088
1089             fscal            = felec;
1090
1091             fscal            = _mm256_and_ps(fscal,cutoff_mask);
1092
1093             /* Calculate temporary vectorial force */
1094             tx               = _mm256_mul_ps(fscal,dx10);
1095             ty               = _mm256_mul_ps(fscal,dy10);
1096             tz               = _mm256_mul_ps(fscal,dz10);
1097
1098             /* Update vectorial force */
1099             fix1             = _mm256_add_ps(fix1,tx);
1100             fiy1             = _mm256_add_ps(fiy1,ty);
1101             fiz1             = _mm256_add_ps(fiz1,tz);
1102
1103             fjx0             = _mm256_add_ps(fjx0,tx);
1104             fjy0             = _mm256_add_ps(fjy0,ty);
1105             fjz0             = _mm256_add_ps(fjz0,tz);
1106
1107             }
1108
1109             /**************************
1110              * CALCULATE INTERACTIONS *
1111              **************************/
1112
1113             if (gmx_mm256_any_lt(rsq20,rcutoff2))
1114             {
1115
1116             r20              = _mm256_mul_ps(rsq20,rinv20);
1117
1118             /* Compute parameters for interactions between i and j atoms */
1119             qq20             = _mm256_mul_ps(iq2,jq0);
1120
1121             /* EWALD ELECTROSTATICS */
1122             
1123             /* Analytical PME correction */
1124             zeta2            = _mm256_mul_ps(beta2,rsq20);
1125             rinv3            = _mm256_mul_ps(rinvsq20,rinv20);
1126             pmecorrF         = avx256_pmecorrF_f(zeta2);
1127             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1128             felec            = _mm256_mul_ps(qq20,felec);
1129             pmecorrV         = avx256_pmecorrV_f(zeta2);
1130             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
1131             velec            = _mm256_sub_ps(rinv20,pmecorrV);
1132             velec            = _mm256_mul_ps(qq20,velec);
1133             
1134             d                = _mm256_sub_ps(r20,rswitch);
1135             d                = _mm256_max_ps(d,_mm256_setzero_ps());
1136             d2               = _mm256_mul_ps(d,d);
1137             sw               = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
1138
1139             dsw              = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
1140
1141             /* Evaluate switch function */
1142             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
1143             felec            = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv20,_mm256_mul_ps(velec,dsw)) );
1144             cutoff_mask      = _mm256_cmp_ps(rsq20,rcutoff2,_CMP_LT_OQ);
1145
1146             fscal            = felec;
1147
1148             fscal            = _mm256_and_ps(fscal,cutoff_mask);
1149
1150             /* Calculate temporary vectorial force */
1151             tx               = _mm256_mul_ps(fscal,dx20);
1152             ty               = _mm256_mul_ps(fscal,dy20);
1153             tz               = _mm256_mul_ps(fscal,dz20);
1154
1155             /* Update vectorial force */
1156             fix2             = _mm256_add_ps(fix2,tx);
1157             fiy2             = _mm256_add_ps(fiy2,ty);
1158             fiz2             = _mm256_add_ps(fiz2,tz);
1159
1160             fjx0             = _mm256_add_ps(fjx0,tx);
1161             fjy0             = _mm256_add_ps(fjy0,ty);
1162             fjz0             = _mm256_add_ps(fjz0,tz);
1163
1164             }
1165
1166             fjptrA             = f+j_coord_offsetA;
1167             fjptrB             = f+j_coord_offsetB;
1168             fjptrC             = f+j_coord_offsetC;
1169             fjptrD             = f+j_coord_offsetD;
1170             fjptrE             = f+j_coord_offsetE;
1171             fjptrF             = f+j_coord_offsetF;
1172             fjptrG             = f+j_coord_offsetG;
1173             fjptrH             = f+j_coord_offsetH;
1174
1175             gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,fjx0,fjy0,fjz0);
1176
1177             /* Inner loop uses 318 flops */
1178         }
1179
1180         if(jidx<j_index_end)
1181         {
1182
1183             /* Get j neighbor index, and coordinate index */
1184             jnrlistA         = jjnr[jidx];
1185             jnrlistB         = jjnr[jidx+1];
1186             jnrlistC         = jjnr[jidx+2];
1187             jnrlistD         = jjnr[jidx+3];
1188             jnrlistE         = jjnr[jidx+4];
1189             jnrlistF         = jjnr[jidx+5];
1190             jnrlistG         = jjnr[jidx+6];
1191             jnrlistH         = jjnr[jidx+7];
1192             /* Sign of each element will be negative for non-real atoms.
1193              * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
1194              * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
1195              */
1196             dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
1197                                             gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
1198                                             
1199             jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
1200             jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
1201             jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
1202             jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
1203             jnrE       = (jnrlistE>=0) ? jnrlistE : 0;
1204             jnrF       = (jnrlistF>=0) ? jnrlistF : 0;
1205             jnrG       = (jnrlistG>=0) ? jnrlistG : 0;
1206             jnrH       = (jnrlistH>=0) ? jnrlistH : 0;
1207             j_coord_offsetA  = DIM*jnrA;
1208             j_coord_offsetB  = DIM*jnrB;
1209             j_coord_offsetC  = DIM*jnrC;
1210             j_coord_offsetD  = DIM*jnrD;
1211             j_coord_offsetE  = DIM*jnrE;
1212             j_coord_offsetF  = DIM*jnrF;
1213             j_coord_offsetG  = DIM*jnrG;
1214             j_coord_offsetH  = DIM*jnrH;
1215
1216             /* load j atom coordinates */
1217             gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
1218                                                  x+j_coord_offsetC,x+j_coord_offsetD,
1219                                                  x+j_coord_offsetE,x+j_coord_offsetF,
1220                                                  x+j_coord_offsetG,x+j_coord_offsetH,
1221                                                  &jx0,&jy0,&jz0);
1222
1223             /* Calculate displacement vector */
1224             dx00             = _mm256_sub_ps(ix0,jx0);
1225             dy00             = _mm256_sub_ps(iy0,jy0);
1226             dz00             = _mm256_sub_ps(iz0,jz0);
1227             dx10             = _mm256_sub_ps(ix1,jx0);
1228             dy10             = _mm256_sub_ps(iy1,jy0);
1229             dz10             = _mm256_sub_ps(iz1,jz0);
1230             dx20             = _mm256_sub_ps(ix2,jx0);
1231             dy20             = _mm256_sub_ps(iy2,jy0);
1232             dz20             = _mm256_sub_ps(iz2,jz0);
1233
1234             /* Calculate squared distance and things based on it */
1235             rsq00            = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
1236             rsq10            = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
1237             rsq20            = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
1238
1239             rinv00           = avx256_invsqrt_f(rsq00);
1240             rinv10           = avx256_invsqrt_f(rsq10);
1241             rinv20           = avx256_invsqrt_f(rsq20);
1242
1243             rinvsq00         = _mm256_mul_ps(rinv00,rinv00);
1244             rinvsq10         = _mm256_mul_ps(rinv10,rinv10);
1245             rinvsq20         = _mm256_mul_ps(rinv20,rinv20);
1246
1247             /* Load parameters for j particles */
1248             jq0              = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
1249                                                                  charge+jnrC+0,charge+jnrD+0,
1250                                                                  charge+jnrE+0,charge+jnrF+0,
1251                                                                  charge+jnrG+0,charge+jnrH+0);
1252
1253             fjx0             = _mm256_setzero_ps();
1254             fjy0             = _mm256_setzero_ps();
1255             fjz0             = _mm256_setzero_ps();
1256
1257             /**************************
1258              * CALCULATE INTERACTIONS *
1259              **************************/
1260
1261             if (gmx_mm256_any_lt(rsq00,rcutoff2))
1262             {
1263
1264             r00              = _mm256_mul_ps(rsq00,rinv00);
1265             r00              = _mm256_andnot_ps(dummy_mask,r00);
1266
1267             /* Compute parameters for interactions between i and j atoms */
1268             qq00             = _mm256_mul_ps(iq0,jq0);
1269
1270             /* EWALD ELECTROSTATICS */
1271             
1272             /* Analytical PME correction */
1273             zeta2            = _mm256_mul_ps(beta2,rsq00);
1274             rinv3            = _mm256_mul_ps(rinvsq00,rinv00);
1275             pmecorrF         = avx256_pmecorrF_f(zeta2);
1276             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1277             felec            = _mm256_mul_ps(qq00,felec);
1278             pmecorrV         = avx256_pmecorrV_f(zeta2);
1279             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
1280             velec            = _mm256_sub_ps(rinv00,pmecorrV);
1281             velec            = _mm256_mul_ps(qq00,velec);
1282             
1283             d                = _mm256_sub_ps(r00,rswitch);
1284             d                = _mm256_max_ps(d,_mm256_setzero_ps());
1285             d2               = _mm256_mul_ps(d,d);
1286             sw               = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
1287
1288             dsw              = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
1289
1290             /* Evaluate switch function */
1291             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
1292             felec            = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv00,_mm256_mul_ps(velec,dsw)) );
1293             cutoff_mask      = _mm256_cmp_ps(rsq00,rcutoff2,_CMP_LT_OQ);
1294
1295             fscal            = felec;
1296
1297             fscal            = _mm256_and_ps(fscal,cutoff_mask);
1298
1299             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
1300
1301             /* Calculate temporary vectorial force */
1302             tx               = _mm256_mul_ps(fscal,dx00);
1303             ty               = _mm256_mul_ps(fscal,dy00);
1304             tz               = _mm256_mul_ps(fscal,dz00);
1305
1306             /* Update vectorial force */
1307             fix0             = _mm256_add_ps(fix0,tx);
1308             fiy0             = _mm256_add_ps(fiy0,ty);
1309             fiz0             = _mm256_add_ps(fiz0,tz);
1310
1311             fjx0             = _mm256_add_ps(fjx0,tx);
1312             fjy0             = _mm256_add_ps(fjy0,ty);
1313             fjz0             = _mm256_add_ps(fjz0,tz);
1314
1315             }
1316
1317             /**************************
1318              * CALCULATE INTERACTIONS *
1319              **************************/
1320
1321             if (gmx_mm256_any_lt(rsq10,rcutoff2))
1322             {
1323
1324             r10              = _mm256_mul_ps(rsq10,rinv10);
1325             r10              = _mm256_andnot_ps(dummy_mask,r10);
1326
1327             /* Compute parameters for interactions between i and j atoms */
1328             qq10             = _mm256_mul_ps(iq1,jq0);
1329
1330             /* EWALD ELECTROSTATICS */
1331             
1332             /* Analytical PME correction */
1333             zeta2            = _mm256_mul_ps(beta2,rsq10);
1334             rinv3            = _mm256_mul_ps(rinvsq10,rinv10);
1335             pmecorrF         = avx256_pmecorrF_f(zeta2);
1336             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1337             felec            = _mm256_mul_ps(qq10,felec);
1338             pmecorrV         = avx256_pmecorrV_f(zeta2);
1339             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
1340             velec            = _mm256_sub_ps(rinv10,pmecorrV);
1341             velec            = _mm256_mul_ps(qq10,velec);
1342             
1343             d                = _mm256_sub_ps(r10,rswitch);
1344             d                = _mm256_max_ps(d,_mm256_setzero_ps());
1345             d2               = _mm256_mul_ps(d,d);
1346             sw               = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
1347
1348             dsw              = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
1349
1350             /* Evaluate switch function */
1351             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
1352             felec            = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv10,_mm256_mul_ps(velec,dsw)) );
1353             cutoff_mask      = _mm256_cmp_ps(rsq10,rcutoff2,_CMP_LT_OQ);
1354
1355             fscal            = felec;
1356
1357             fscal            = _mm256_and_ps(fscal,cutoff_mask);
1358
1359             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
1360
1361             /* Calculate temporary vectorial force */
1362             tx               = _mm256_mul_ps(fscal,dx10);
1363             ty               = _mm256_mul_ps(fscal,dy10);
1364             tz               = _mm256_mul_ps(fscal,dz10);
1365
1366             /* Update vectorial force */
1367             fix1             = _mm256_add_ps(fix1,tx);
1368             fiy1             = _mm256_add_ps(fiy1,ty);
1369             fiz1             = _mm256_add_ps(fiz1,tz);
1370
1371             fjx0             = _mm256_add_ps(fjx0,tx);
1372             fjy0             = _mm256_add_ps(fjy0,ty);
1373             fjz0             = _mm256_add_ps(fjz0,tz);
1374
1375             }
1376
1377             /**************************
1378              * CALCULATE INTERACTIONS *
1379              **************************/
1380
1381             if (gmx_mm256_any_lt(rsq20,rcutoff2))
1382             {
1383
1384             r20              = _mm256_mul_ps(rsq20,rinv20);
1385             r20              = _mm256_andnot_ps(dummy_mask,r20);
1386
1387             /* Compute parameters for interactions between i and j atoms */
1388             qq20             = _mm256_mul_ps(iq2,jq0);
1389
1390             /* EWALD ELECTROSTATICS */
1391             
1392             /* Analytical PME correction */
1393             zeta2            = _mm256_mul_ps(beta2,rsq20);
1394             rinv3            = _mm256_mul_ps(rinvsq20,rinv20);
1395             pmecorrF         = avx256_pmecorrF_f(zeta2);
1396             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1397             felec            = _mm256_mul_ps(qq20,felec);
1398             pmecorrV         = avx256_pmecorrV_f(zeta2);
1399             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
1400             velec            = _mm256_sub_ps(rinv20,pmecorrV);
1401             velec            = _mm256_mul_ps(qq20,velec);
1402             
1403             d                = _mm256_sub_ps(r20,rswitch);
1404             d                = _mm256_max_ps(d,_mm256_setzero_ps());
1405             d2               = _mm256_mul_ps(d,d);
1406             sw               = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
1407
1408             dsw              = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
1409
1410             /* Evaluate switch function */
1411             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
1412             felec            = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv20,_mm256_mul_ps(velec,dsw)) );
1413             cutoff_mask      = _mm256_cmp_ps(rsq20,rcutoff2,_CMP_LT_OQ);
1414
1415             fscal            = felec;
1416
1417             fscal            = _mm256_and_ps(fscal,cutoff_mask);
1418
1419             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
1420
1421             /* Calculate temporary vectorial force */
1422             tx               = _mm256_mul_ps(fscal,dx20);
1423             ty               = _mm256_mul_ps(fscal,dy20);
1424             tz               = _mm256_mul_ps(fscal,dz20);
1425
1426             /* Update vectorial force */
1427             fix2             = _mm256_add_ps(fix2,tx);
1428             fiy2             = _mm256_add_ps(fiy2,ty);
1429             fiz2             = _mm256_add_ps(fiz2,tz);
1430
1431             fjx0             = _mm256_add_ps(fjx0,tx);
1432             fjy0             = _mm256_add_ps(fjy0,ty);
1433             fjz0             = _mm256_add_ps(fjz0,tz);
1434
1435             }
1436
1437             fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
1438             fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
1439             fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
1440             fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
1441             fjptrE             = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
1442             fjptrF             = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
1443             fjptrG             = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
1444             fjptrH             = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
1445
1446             gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,fjx0,fjy0,fjz0);
1447
1448             /* Inner loop uses 321 flops */
1449         }
1450
1451         /* End of innermost loop */
1452
1453         gmx_mm256_update_iforce_3atom_swizzle_ps(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,
1454                                                  f+i_coord_offset,fshift+i_shift_offset);
1455
1456         /* Increment number of inner iterations */
1457         inneriter                  += j_index_end - j_index_start;
1458
1459         /* Outer loop uses 18 flops */
1460     }
1461
1462     /* Increment number of outer iterations */
1463     outeriter        += nri;
1464
1465     /* Update outer/inner flops */
1466
1467     inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_W3_F,outeriter*18 + inneriter*321);
1468 }