Use full path for legacyheaders
[alexxy/gromacs.git] / src / gromacs / gmxlib / nonbonded / nb_kernel_avx_256_single / nb_kernel_ElecEwSw_VdwNone_GeomW3W3_avx_256_single.c
1 /*
2  * This file is part of the GROMACS molecular simulation package.
3  *
4  * Copyright (c) 2012,2013,2014, by the GROMACS development team, led by
5  * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6  * and including many others, as listed in the AUTHORS file in the
7  * top-level source directory and at http://www.gromacs.org.
8  *
9  * GROMACS is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU Lesser General Public License
11  * as published by the Free Software Foundation; either version 2.1
12  * of the License, or (at your option) any later version.
13  *
14  * GROMACS is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
17  * Lesser General Public License for more details.
18  *
19  * You should have received a copy of the GNU Lesser General Public
20  * License along with GROMACS; if not, see
21  * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA.
23  *
24  * If you want to redistribute modifications to GROMACS, please
25  * consider that scientific software is very special. Version
26  * control is crucial - bugs must be traceable. We will be happy to
27  * consider code for inclusion in the official distribution, but
28  * derived work must not be called official GROMACS. Details are found
29  * in the README & COPYING files - if they are missing, get the
30  * official version at http://www.gromacs.org.
31  *
32  * To help us fund GROMACS development, we humbly ask that you cite
33  * the research papers on the package. Check out http://www.gromacs.org.
34  */
35 /*
36  * Note: this file was generated by the GROMACS avx_256_single kernel generator.
37  */
38 #include "config.h"
39
40 #include <math.h>
41
42 #include "../nb_kernel.h"
43 #include "gromacs/legacyheaders/types/simple.h"
44 #include "gromacs/math/vec.h"
45 #include "gromacs/legacyheaders/nrnb.h"
46
47 #include "gromacs/simd/math_x86_avx_256_single.h"
48 #include "kernelutil_x86_avx_256_single.h"
49
50 /*
51  * Gromacs nonbonded kernel:   nb_kernel_ElecEwSw_VdwNone_GeomW3W3_VF_avx_256_single
52  * Electrostatics interaction: Ewald
53  * VdW interaction:            None
54  * Geometry:                   Water3-Water3
55  * Calculate force/pot:        PotentialAndForce
56  */
57 void
58 nb_kernel_ElecEwSw_VdwNone_GeomW3W3_VF_avx_256_single
59                     (t_nblist                    * gmx_restrict       nlist,
60                      rvec                        * gmx_restrict          xx,
61                      rvec                        * gmx_restrict          ff,
62                      t_forcerec                  * gmx_restrict          fr,
63                      t_mdatoms                   * gmx_restrict     mdatoms,
64                      nb_kernel_data_t gmx_unused * gmx_restrict kernel_data,
65                      t_nrnb                      * gmx_restrict        nrnb)
66 {
67     /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
68      * just 0 for non-waters.
69      * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
70      * jnr indices corresponding to data put in the four positions in the SIMD register.
71      */
72     int              i_shift_offset,i_coord_offset,outeriter,inneriter;
73     int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
74     int              jnrA,jnrB,jnrC,jnrD;
75     int              jnrE,jnrF,jnrG,jnrH;
76     int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
77     int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
78     int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
79     int              j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
80     int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
81     real             rcutoff_scalar;
82     real             *shiftvec,*fshift,*x,*f;
83     real             *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
84     real             scratch[4*DIM];
85     __m256           tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
86     real *           vdwioffsetptr0;
87     __m256           ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
88     real *           vdwioffsetptr1;
89     __m256           ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
90     real *           vdwioffsetptr2;
91     __m256           ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
92     int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
93     __m256           jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
94     int              vdwjidx1A,vdwjidx1B,vdwjidx1C,vdwjidx1D,vdwjidx1E,vdwjidx1F,vdwjidx1G,vdwjidx1H;
95     __m256           jx1,jy1,jz1,fjx1,fjy1,fjz1,jq1,isaj1;
96     int              vdwjidx2A,vdwjidx2B,vdwjidx2C,vdwjidx2D,vdwjidx2E,vdwjidx2F,vdwjidx2G,vdwjidx2H;
97     __m256           jx2,jy2,jz2,fjx2,fjy2,fjz2,jq2,isaj2;
98     __m256           dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
99     __m256           dx01,dy01,dz01,rsq01,rinv01,rinvsq01,r01,qq01,c6_01,c12_01;
100     __m256           dx02,dy02,dz02,rsq02,rinv02,rinvsq02,r02,qq02,c6_02,c12_02;
101     __m256           dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
102     __m256           dx11,dy11,dz11,rsq11,rinv11,rinvsq11,r11,qq11,c6_11,c12_11;
103     __m256           dx12,dy12,dz12,rsq12,rinv12,rinvsq12,r12,qq12,c6_12,c12_12;
104     __m256           dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
105     __m256           dx21,dy21,dz21,rsq21,rinv21,rinvsq21,r21,qq21,c6_21,c12_21;
106     __m256           dx22,dy22,dz22,rsq22,rinv22,rinvsq22,r22,qq22,c6_22,c12_22;
107     __m256           velec,felec,velecsum,facel,crf,krf,krf2;
108     real             *charge;
109     __m256i          ewitab;
110     __m128i          ewitab_lo,ewitab_hi;
111     __m256           ewtabscale,eweps,sh_ewald,ewrt,ewtabhalfspace,ewtabF,ewtabFn,ewtabD,ewtabV;
112     __m256           beta,beta2,beta3,zeta2,pmecorrF,pmecorrV,rinv3;
113     real             *ewtab;
114     __m256           rswitch,swV3,swV4,swV5,swF2,swF3,swF4,d,d2,sw,dsw;
115     real             rswitch_scalar,d_scalar;
116     __m256           dummy_mask,cutoff_mask;
117     __m256           signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
118     __m256           one     = _mm256_set1_ps(1.0);
119     __m256           two     = _mm256_set1_ps(2.0);
120     x                = xx[0];
121     f                = ff[0];
122
123     nri              = nlist->nri;
124     iinr             = nlist->iinr;
125     jindex           = nlist->jindex;
126     jjnr             = nlist->jjnr;
127     shiftidx         = nlist->shift;
128     gid              = nlist->gid;
129     shiftvec         = fr->shift_vec[0];
130     fshift           = fr->fshift[0];
131     facel            = _mm256_set1_ps(fr->epsfac);
132     charge           = mdatoms->chargeA;
133
134     sh_ewald         = _mm256_set1_ps(fr->ic->sh_ewald);
135     beta             = _mm256_set1_ps(fr->ic->ewaldcoeff_q);
136     beta2            = _mm256_mul_ps(beta,beta);
137     beta3            = _mm256_mul_ps(beta,beta2);
138
139     ewtab            = fr->ic->tabq_coul_FDV0;
140     ewtabscale       = _mm256_set1_ps(fr->ic->tabq_scale);
141     ewtabhalfspace   = _mm256_set1_ps(0.5/fr->ic->tabq_scale);
142
143     /* Setup water-specific parameters */
144     inr              = nlist->iinr[0];
145     iq0              = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+0]));
146     iq1              = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+1]));
147     iq2              = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+2]));
148
149     jq0              = _mm256_set1_ps(charge[inr+0]);
150     jq1              = _mm256_set1_ps(charge[inr+1]);
151     jq2              = _mm256_set1_ps(charge[inr+2]);
152     qq00             = _mm256_mul_ps(iq0,jq0);
153     qq01             = _mm256_mul_ps(iq0,jq1);
154     qq02             = _mm256_mul_ps(iq0,jq2);
155     qq10             = _mm256_mul_ps(iq1,jq0);
156     qq11             = _mm256_mul_ps(iq1,jq1);
157     qq12             = _mm256_mul_ps(iq1,jq2);
158     qq20             = _mm256_mul_ps(iq2,jq0);
159     qq21             = _mm256_mul_ps(iq2,jq1);
160     qq22             = _mm256_mul_ps(iq2,jq2);
161
162     /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
163     rcutoff_scalar   = fr->rcoulomb;
164     rcutoff          = _mm256_set1_ps(rcutoff_scalar);
165     rcutoff2         = _mm256_mul_ps(rcutoff,rcutoff);
166
167     rswitch_scalar   = fr->rcoulomb_switch;
168     rswitch          = _mm256_set1_ps(rswitch_scalar);
169     /* Setup switch parameters */
170     d_scalar         = rcutoff_scalar-rswitch_scalar;
171     d                = _mm256_set1_ps(d_scalar);
172     swV3             = _mm256_set1_ps(-10.0/(d_scalar*d_scalar*d_scalar));
173     swV4             = _mm256_set1_ps( 15.0/(d_scalar*d_scalar*d_scalar*d_scalar));
174     swV5             = _mm256_set1_ps( -6.0/(d_scalar*d_scalar*d_scalar*d_scalar*d_scalar));
175     swF2             = _mm256_set1_ps(-30.0/(d_scalar*d_scalar*d_scalar));
176     swF3             = _mm256_set1_ps( 60.0/(d_scalar*d_scalar*d_scalar*d_scalar));
177     swF4             = _mm256_set1_ps(-30.0/(d_scalar*d_scalar*d_scalar*d_scalar*d_scalar));
178
179     /* Avoid stupid compiler warnings */
180     jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
181     j_coord_offsetA = 0;
182     j_coord_offsetB = 0;
183     j_coord_offsetC = 0;
184     j_coord_offsetD = 0;
185     j_coord_offsetE = 0;
186     j_coord_offsetF = 0;
187     j_coord_offsetG = 0;
188     j_coord_offsetH = 0;
189
190     outeriter        = 0;
191     inneriter        = 0;
192
193     for(iidx=0;iidx<4*DIM;iidx++)
194     {
195         scratch[iidx] = 0.0;
196     }
197
198     /* Start outer loop over neighborlists */
199     for(iidx=0; iidx<nri; iidx++)
200     {
201         /* Load shift vector for this list */
202         i_shift_offset   = DIM*shiftidx[iidx];
203
204         /* Load limits for loop over neighbors */
205         j_index_start    = jindex[iidx];
206         j_index_end      = jindex[iidx+1];
207
208         /* Get outer coordinate index */
209         inr              = iinr[iidx];
210         i_coord_offset   = DIM*inr;
211
212         /* Load i particle coords and add shift vector */
213         gmx_mm256_load_shift_and_3rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,
214                                                     &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2);
215
216         fix0             = _mm256_setzero_ps();
217         fiy0             = _mm256_setzero_ps();
218         fiz0             = _mm256_setzero_ps();
219         fix1             = _mm256_setzero_ps();
220         fiy1             = _mm256_setzero_ps();
221         fiz1             = _mm256_setzero_ps();
222         fix2             = _mm256_setzero_ps();
223         fiy2             = _mm256_setzero_ps();
224         fiz2             = _mm256_setzero_ps();
225
226         /* Reset potential sums */
227         velecsum         = _mm256_setzero_ps();
228
229         /* Start inner kernel loop */
230         for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
231         {
232
233             /* Get j neighbor index, and coordinate index */
234             jnrA             = jjnr[jidx];
235             jnrB             = jjnr[jidx+1];
236             jnrC             = jjnr[jidx+2];
237             jnrD             = jjnr[jidx+3];
238             jnrE             = jjnr[jidx+4];
239             jnrF             = jjnr[jidx+5];
240             jnrG             = jjnr[jidx+6];
241             jnrH             = jjnr[jidx+7];
242             j_coord_offsetA  = DIM*jnrA;
243             j_coord_offsetB  = DIM*jnrB;
244             j_coord_offsetC  = DIM*jnrC;
245             j_coord_offsetD  = DIM*jnrD;
246             j_coord_offsetE  = DIM*jnrE;
247             j_coord_offsetF  = DIM*jnrF;
248             j_coord_offsetG  = DIM*jnrG;
249             j_coord_offsetH  = DIM*jnrH;
250
251             /* load j atom coordinates */
252             gmx_mm256_load_3rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
253                                                  x+j_coord_offsetC,x+j_coord_offsetD,
254                                                  x+j_coord_offsetE,x+j_coord_offsetF,
255                                                  x+j_coord_offsetG,x+j_coord_offsetH,
256                                               &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
257
258             /* Calculate displacement vector */
259             dx00             = _mm256_sub_ps(ix0,jx0);
260             dy00             = _mm256_sub_ps(iy0,jy0);
261             dz00             = _mm256_sub_ps(iz0,jz0);
262             dx01             = _mm256_sub_ps(ix0,jx1);
263             dy01             = _mm256_sub_ps(iy0,jy1);
264             dz01             = _mm256_sub_ps(iz0,jz1);
265             dx02             = _mm256_sub_ps(ix0,jx2);
266             dy02             = _mm256_sub_ps(iy0,jy2);
267             dz02             = _mm256_sub_ps(iz0,jz2);
268             dx10             = _mm256_sub_ps(ix1,jx0);
269             dy10             = _mm256_sub_ps(iy1,jy0);
270             dz10             = _mm256_sub_ps(iz1,jz0);
271             dx11             = _mm256_sub_ps(ix1,jx1);
272             dy11             = _mm256_sub_ps(iy1,jy1);
273             dz11             = _mm256_sub_ps(iz1,jz1);
274             dx12             = _mm256_sub_ps(ix1,jx2);
275             dy12             = _mm256_sub_ps(iy1,jy2);
276             dz12             = _mm256_sub_ps(iz1,jz2);
277             dx20             = _mm256_sub_ps(ix2,jx0);
278             dy20             = _mm256_sub_ps(iy2,jy0);
279             dz20             = _mm256_sub_ps(iz2,jz0);
280             dx21             = _mm256_sub_ps(ix2,jx1);
281             dy21             = _mm256_sub_ps(iy2,jy1);
282             dz21             = _mm256_sub_ps(iz2,jz1);
283             dx22             = _mm256_sub_ps(ix2,jx2);
284             dy22             = _mm256_sub_ps(iy2,jy2);
285             dz22             = _mm256_sub_ps(iz2,jz2);
286
287             /* Calculate squared distance and things based on it */
288             rsq00            = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
289             rsq01            = gmx_mm256_calc_rsq_ps(dx01,dy01,dz01);
290             rsq02            = gmx_mm256_calc_rsq_ps(dx02,dy02,dz02);
291             rsq10            = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
292             rsq11            = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
293             rsq12            = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
294             rsq20            = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
295             rsq21            = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
296             rsq22            = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
297
298             rinv00           = gmx_mm256_invsqrt_ps(rsq00);
299             rinv01           = gmx_mm256_invsqrt_ps(rsq01);
300             rinv02           = gmx_mm256_invsqrt_ps(rsq02);
301             rinv10           = gmx_mm256_invsqrt_ps(rsq10);
302             rinv11           = gmx_mm256_invsqrt_ps(rsq11);
303             rinv12           = gmx_mm256_invsqrt_ps(rsq12);
304             rinv20           = gmx_mm256_invsqrt_ps(rsq20);
305             rinv21           = gmx_mm256_invsqrt_ps(rsq21);
306             rinv22           = gmx_mm256_invsqrt_ps(rsq22);
307
308             rinvsq00         = _mm256_mul_ps(rinv00,rinv00);
309             rinvsq01         = _mm256_mul_ps(rinv01,rinv01);
310             rinvsq02         = _mm256_mul_ps(rinv02,rinv02);
311             rinvsq10         = _mm256_mul_ps(rinv10,rinv10);
312             rinvsq11         = _mm256_mul_ps(rinv11,rinv11);
313             rinvsq12         = _mm256_mul_ps(rinv12,rinv12);
314             rinvsq20         = _mm256_mul_ps(rinv20,rinv20);
315             rinvsq21         = _mm256_mul_ps(rinv21,rinv21);
316             rinvsq22         = _mm256_mul_ps(rinv22,rinv22);
317
318             fjx0             = _mm256_setzero_ps();
319             fjy0             = _mm256_setzero_ps();
320             fjz0             = _mm256_setzero_ps();
321             fjx1             = _mm256_setzero_ps();
322             fjy1             = _mm256_setzero_ps();
323             fjz1             = _mm256_setzero_ps();
324             fjx2             = _mm256_setzero_ps();
325             fjy2             = _mm256_setzero_ps();
326             fjz2             = _mm256_setzero_ps();
327
328             /**************************
329              * CALCULATE INTERACTIONS *
330              **************************/
331
332             if (gmx_mm256_any_lt(rsq00,rcutoff2))
333             {
334
335             r00              = _mm256_mul_ps(rsq00,rinv00);
336
337             /* EWALD ELECTROSTATICS */
338             
339             /* Analytical PME correction */
340             zeta2            = _mm256_mul_ps(beta2,rsq00);
341             rinv3            = _mm256_mul_ps(rinvsq00,rinv00);
342             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
343             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
344             felec            = _mm256_mul_ps(qq00,felec);
345             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
346             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
347             velec            = _mm256_sub_ps(rinv00,pmecorrV);
348             velec            = _mm256_mul_ps(qq00,velec);
349             
350             d                = _mm256_sub_ps(r00,rswitch);
351             d                = _mm256_max_ps(d,_mm256_setzero_ps());
352             d2               = _mm256_mul_ps(d,d);
353             sw               = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
354
355             dsw              = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
356
357             /* Evaluate switch function */
358             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
359             felec            = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv00,_mm256_mul_ps(velec,dsw)) );
360             velec            = _mm256_mul_ps(velec,sw);
361             cutoff_mask      = _mm256_cmp_ps(rsq00,rcutoff2,_CMP_LT_OQ);
362
363             /* Update potential sum for this i atom from the interaction with this j atom. */
364             velec            = _mm256_and_ps(velec,cutoff_mask);
365             velecsum         = _mm256_add_ps(velecsum,velec);
366
367             fscal            = felec;
368
369             fscal            = _mm256_and_ps(fscal,cutoff_mask);
370
371             /* Calculate temporary vectorial force */
372             tx               = _mm256_mul_ps(fscal,dx00);
373             ty               = _mm256_mul_ps(fscal,dy00);
374             tz               = _mm256_mul_ps(fscal,dz00);
375
376             /* Update vectorial force */
377             fix0             = _mm256_add_ps(fix0,tx);
378             fiy0             = _mm256_add_ps(fiy0,ty);
379             fiz0             = _mm256_add_ps(fiz0,tz);
380
381             fjx0             = _mm256_add_ps(fjx0,tx);
382             fjy0             = _mm256_add_ps(fjy0,ty);
383             fjz0             = _mm256_add_ps(fjz0,tz);
384
385             }
386
387             /**************************
388              * CALCULATE INTERACTIONS *
389              **************************/
390
391             if (gmx_mm256_any_lt(rsq01,rcutoff2))
392             {
393
394             r01              = _mm256_mul_ps(rsq01,rinv01);
395
396             /* EWALD ELECTROSTATICS */
397             
398             /* Analytical PME correction */
399             zeta2            = _mm256_mul_ps(beta2,rsq01);
400             rinv3            = _mm256_mul_ps(rinvsq01,rinv01);
401             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
402             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
403             felec            = _mm256_mul_ps(qq01,felec);
404             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
405             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
406             velec            = _mm256_sub_ps(rinv01,pmecorrV);
407             velec            = _mm256_mul_ps(qq01,velec);
408             
409             d                = _mm256_sub_ps(r01,rswitch);
410             d                = _mm256_max_ps(d,_mm256_setzero_ps());
411             d2               = _mm256_mul_ps(d,d);
412             sw               = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
413
414             dsw              = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
415
416             /* Evaluate switch function */
417             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
418             felec            = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv01,_mm256_mul_ps(velec,dsw)) );
419             velec            = _mm256_mul_ps(velec,sw);
420             cutoff_mask      = _mm256_cmp_ps(rsq01,rcutoff2,_CMP_LT_OQ);
421
422             /* Update potential sum for this i atom from the interaction with this j atom. */
423             velec            = _mm256_and_ps(velec,cutoff_mask);
424             velecsum         = _mm256_add_ps(velecsum,velec);
425
426             fscal            = felec;
427
428             fscal            = _mm256_and_ps(fscal,cutoff_mask);
429
430             /* Calculate temporary vectorial force */
431             tx               = _mm256_mul_ps(fscal,dx01);
432             ty               = _mm256_mul_ps(fscal,dy01);
433             tz               = _mm256_mul_ps(fscal,dz01);
434
435             /* Update vectorial force */
436             fix0             = _mm256_add_ps(fix0,tx);
437             fiy0             = _mm256_add_ps(fiy0,ty);
438             fiz0             = _mm256_add_ps(fiz0,tz);
439
440             fjx1             = _mm256_add_ps(fjx1,tx);
441             fjy1             = _mm256_add_ps(fjy1,ty);
442             fjz1             = _mm256_add_ps(fjz1,tz);
443
444             }
445
446             /**************************
447              * CALCULATE INTERACTIONS *
448              **************************/
449
450             if (gmx_mm256_any_lt(rsq02,rcutoff2))
451             {
452
453             r02              = _mm256_mul_ps(rsq02,rinv02);
454
455             /* EWALD ELECTROSTATICS */
456             
457             /* Analytical PME correction */
458             zeta2            = _mm256_mul_ps(beta2,rsq02);
459             rinv3            = _mm256_mul_ps(rinvsq02,rinv02);
460             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
461             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
462             felec            = _mm256_mul_ps(qq02,felec);
463             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
464             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
465             velec            = _mm256_sub_ps(rinv02,pmecorrV);
466             velec            = _mm256_mul_ps(qq02,velec);
467             
468             d                = _mm256_sub_ps(r02,rswitch);
469             d                = _mm256_max_ps(d,_mm256_setzero_ps());
470             d2               = _mm256_mul_ps(d,d);
471             sw               = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
472
473             dsw              = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
474
475             /* Evaluate switch function */
476             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
477             felec            = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv02,_mm256_mul_ps(velec,dsw)) );
478             velec            = _mm256_mul_ps(velec,sw);
479             cutoff_mask      = _mm256_cmp_ps(rsq02,rcutoff2,_CMP_LT_OQ);
480
481             /* Update potential sum for this i atom from the interaction with this j atom. */
482             velec            = _mm256_and_ps(velec,cutoff_mask);
483             velecsum         = _mm256_add_ps(velecsum,velec);
484
485             fscal            = felec;
486
487             fscal            = _mm256_and_ps(fscal,cutoff_mask);
488
489             /* Calculate temporary vectorial force */
490             tx               = _mm256_mul_ps(fscal,dx02);
491             ty               = _mm256_mul_ps(fscal,dy02);
492             tz               = _mm256_mul_ps(fscal,dz02);
493
494             /* Update vectorial force */
495             fix0             = _mm256_add_ps(fix0,tx);
496             fiy0             = _mm256_add_ps(fiy0,ty);
497             fiz0             = _mm256_add_ps(fiz0,tz);
498
499             fjx2             = _mm256_add_ps(fjx2,tx);
500             fjy2             = _mm256_add_ps(fjy2,ty);
501             fjz2             = _mm256_add_ps(fjz2,tz);
502
503             }
504
505             /**************************
506              * CALCULATE INTERACTIONS *
507              **************************/
508
509             if (gmx_mm256_any_lt(rsq10,rcutoff2))
510             {
511
512             r10              = _mm256_mul_ps(rsq10,rinv10);
513
514             /* EWALD ELECTROSTATICS */
515             
516             /* Analytical PME correction */
517             zeta2            = _mm256_mul_ps(beta2,rsq10);
518             rinv3            = _mm256_mul_ps(rinvsq10,rinv10);
519             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
520             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
521             felec            = _mm256_mul_ps(qq10,felec);
522             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
523             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
524             velec            = _mm256_sub_ps(rinv10,pmecorrV);
525             velec            = _mm256_mul_ps(qq10,velec);
526             
527             d                = _mm256_sub_ps(r10,rswitch);
528             d                = _mm256_max_ps(d,_mm256_setzero_ps());
529             d2               = _mm256_mul_ps(d,d);
530             sw               = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
531
532             dsw              = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
533
534             /* Evaluate switch function */
535             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
536             felec            = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv10,_mm256_mul_ps(velec,dsw)) );
537             velec            = _mm256_mul_ps(velec,sw);
538             cutoff_mask      = _mm256_cmp_ps(rsq10,rcutoff2,_CMP_LT_OQ);
539
540             /* Update potential sum for this i atom from the interaction with this j atom. */
541             velec            = _mm256_and_ps(velec,cutoff_mask);
542             velecsum         = _mm256_add_ps(velecsum,velec);
543
544             fscal            = felec;
545
546             fscal            = _mm256_and_ps(fscal,cutoff_mask);
547
548             /* Calculate temporary vectorial force */
549             tx               = _mm256_mul_ps(fscal,dx10);
550             ty               = _mm256_mul_ps(fscal,dy10);
551             tz               = _mm256_mul_ps(fscal,dz10);
552
553             /* Update vectorial force */
554             fix1             = _mm256_add_ps(fix1,tx);
555             fiy1             = _mm256_add_ps(fiy1,ty);
556             fiz1             = _mm256_add_ps(fiz1,tz);
557
558             fjx0             = _mm256_add_ps(fjx0,tx);
559             fjy0             = _mm256_add_ps(fjy0,ty);
560             fjz0             = _mm256_add_ps(fjz0,tz);
561
562             }
563
564             /**************************
565              * CALCULATE INTERACTIONS *
566              **************************/
567
568             if (gmx_mm256_any_lt(rsq11,rcutoff2))
569             {
570
571             r11              = _mm256_mul_ps(rsq11,rinv11);
572
573             /* EWALD ELECTROSTATICS */
574             
575             /* Analytical PME correction */
576             zeta2            = _mm256_mul_ps(beta2,rsq11);
577             rinv3            = _mm256_mul_ps(rinvsq11,rinv11);
578             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
579             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
580             felec            = _mm256_mul_ps(qq11,felec);
581             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
582             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
583             velec            = _mm256_sub_ps(rinv11,pmecorrV);
584             velec            = _mm256_mul_ps(qq11,velec);
585             
586             d                = _mm256_sub_ps(r11,rswitch);
587             d                = _mm256_max_ps(d,_mm256_setzero_ps());
588             d2               = _mm256_mul_ps(d,d);
589             sw               = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
590
591             dsw              = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
592
593             /* Evaluate switch function */
594             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
595             felec            = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv11,_mm256_mul_ps(velec,dsw)) );
596             velec            = _mm256_mul_ps(velec,sw);
597             cutoff_mask      = _mm256_cmp_ps(rsq11,rcutoff2,_CMP_LT_OQ);
598
599             /* Update potential sum for this i atom from the interaction with this j atom. */
600             velec            = _mm256_and_ps(velec,cutoff_mask);
601             velecsum         = _mm256_add_ps(velecsum,velec);
602
603             fscal            = felec;
604
605             fscal            = _mm256_and_ps(fscal,cutoff_mask);
606
607             /* Calculate temporary vectorial force */
608             tx               = _mm256_mul_ps(fscal,dx11);
609             ty               = _mm256_mul_ps(fscal,dy11);
610             tz               = _mm256_mul_ps(fscal,dz11);
611
612             /* Update vectorial force */
613             fix1             = _mm256_add_ps(fix1,tx);
614             fiy1             = _mm256_add_ps(fiy1,ty);
615             fiz1             = _mm256_add_ps(fiz1,tz);
616
617             fjx1             = _mm256_add_ps(fjx1,tx);
618             fjy1             = _mm256_add_ps(fjy1,ty);
619             fjz1             = _mm256_add_ps(fjz1,tz);
620
621             }
622
623             /**************************
624              * CALCULATE INTERACTIONS *
625              **************************/
626
627             if (gmx_mm256_any_lt(rsq12,rcutoff2))
628             {
629
630             r12              = _mm256_mul_ps(rsq12,rinv12);
631
632             /* EWALD ELECTROSTATICS */
633             
634             /* Analytical PME correction */
635             zeta2            = _mm256_mul_ps(beta2,rsq12);
636             rinv3            = _mm256_mul_ps(rinvsq12,rinv12);
637             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
638             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
639             felec            = _mm256_mul_ps(qq12,felec);
640             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
641             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
642             velec            = _mm256_sub_ps(rinv12,pmecorrV);
643             velec            = _mm256_mul_ps(qq12,velec);
644             
645             d                = _mm256_sub_ps(r12,rswitch);
646             d                = _mm256_max_ps(d,_mm256_setzero_ps());
647             d2               = _mm256_mul_ps(d,d);
648             sw               = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
649
650             dsw              = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
651
652             /* Evaluate switch function */
653             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
654             felec            = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv12,_mm256_mul_ps(velec,dsw)) );
655             velec            = _mm256_mul_ps(velec,sw);
656             cutoff_mask      = _mm256_cmp_ps(rsq12,rcutoff2,_CMP_LT_OQ);
657
658             /* Update potential sum for this i atom from the interaction with this j atom. */
659             velec            = _mm256_and_ps(velec,cutoff_mask);
660             velecsum         = _mm256_add_ps(velecsum,velec);
661
662             fscal            = felec;
663
664             fscal            = _mm256_and_ps(fscal,cutoff_mask);
665
666             /* Calculate temporary vectorial force */
667             tx               = _mm256_mul_ps(fscal,dx12);
668             ty               = _mm256_mul_ps(fscal,dy12);
669             tz               = _mm256_mul_ps(fscal,dz12);
670
671             /* Update vectorial force */
672             fix1             = _mm256_add_ps(fix1,tx);
673             fiy1             = _mm256_add_ps(fiy1,ty);
674             fiz1             = _mm256_add_ps(fiz1,tz);
675
676             fjx2             = _mm256_add_ps(fjx2,tx);
677             fjy2             = _mm256_add_ps(fjy2,ty);
678             fjz2             = _mm256_add_ps(fjz2,tz);
679
680             }
681
682             /**************************
683              * CALCULATE INTERACTIONS *
684              **************************/
685
686             if (gmx_mm256_any_lt(rsq20,rcutoff2))
687             {
688
689             r20              = _mm256_mul_ps(rsq20,rinv20);
690
691             /* EWALD ELECTROSTATICS */
692             
693             /* Analytical PME correction */
694             zeta2            = _mm256_mul_ps(beta2,rsq20);
695             rinv3            = _mm256_mul_ps(rinvsq20,rinv20);
696             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
697             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
698             felec            = _mm256_mul_ps(qq20,felec);
699             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
700             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
701             velec            = _mm256_sub_ps(rinv20,pmecorrV);
702             velec            = _mm256_mul_ps(qq20,velec);
703             
704             d                = _mm256_sub_ps(r20,rswitch);
705             d                = _mm256_max_ps(d,_mm256_setzero_ps());
706             d2               = _mm256_mul_ps(d,d);
707             sw               = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
708
709             dsw              = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
710
711             /* Evaluate switch function */
712             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
713             felec            = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv20,_mm256_mul_ps(velec,dsw)) );
714             velec            = _mm256_mul_ps(velec,sw);
715             cutoff_mask      = _mm256_cmp_ps(rsq20,rcutoff2,_CMP_LT_OQ);
716
717             /* Update potential sum for this i atom from the interaction with this j atom. */
718             velec            = _mm256_and_ps(velec,cutoff_mask);
719             velecsum         = _mm256_add_ps(velecsum,velec);
720
721             fscal            = felec;
722
723             fscal            = _mm256_and_ps(fscal,cutoff_mask);
724
725             /* Calculate temporary vectorial force */
726             tx               = _mm256_mul_ps(fscal,dx20);
727             ty               = _mm256_mul_ps(fscal,dy20);
728             tz               = _mm256_mul_ps(fscal,dz20);
729
730             /* Update vectorial force */
731             fix2             = _mm256_add_ps(fix2,tx);
732             fiy2             = _mm256_add_ps(fiy2,ty);
733             fiz2             = _mm256_add_ps(fiz2,tz);
734
735             fjx0             = _mm256_add_ps(fjx0,tx);
736             fjy0             = _mm256_add_ps(fjy0,ty);
737             fjz0             = _mm256_add_ps(fjz0,tz);
738
739             }
740
741             /**************************
742              * CALCULATE INTERACTIONS *
743              **************************/
744
745             if (gmx_mm256_any_lt(rsq21,rcutoff2))
746             {
747
748             r21              = _mm256_mul_ps(rsq21,rinv21);
749
750             /* EWALD ELECTROSTATICS */
751             
752             /* Analytical PME correction */
753             zeta2            = _mm256_mul_ps(beta2,rsq21);
754             rinv3            = _mm256_mul_ps(rinvsq21,rinv21);
755             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
756             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
757             felec            = _mm256_mul_ps(qq21,felec);
758             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
759             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
760             velec            = _mm256_sub_ps(rinv21,pmecorrV);
761             velec            = _mm256_mul_ps(qq21,velec);
762             
763             d                = _mm256_sub_ps(r21,rswitch);
764             d                = _mm256_max_ps(d,_mm256_setzero_ps());
765             d2               = _mm256_mul_ps(d,d);
766             sw               = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
767
768             dsw              = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
769
770             /* Evaluate switch function */
771             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
772             felec            = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv21,_mm256_mul_ps(velec,dsw)) );
773             velec            = _mm256_mul_ps(velec,sw);
774             cutoff_mask      = _mm256_cmp_ps(rsq21,rcutoff2,_CMP_LT_OQ);
775
776             /* Update potential sum for this i atom from the interaction with this j atom. */
777             velec            = _mm256_and_ps(velec,cutoff_mask);
778             velecsum         = _mm256_add_ps(velecsum,velec);
779
780             fscal            = felec;
781
782             fscal            = _mm256_and_ps(fscal,cutoff_mask);
783
784             /* Calculate temporary vectorial force */
785             tx               = _mm256_mul_ps(fscal,dx21);
786             ty               = _mm256_mul_ps(fscal,dy21);
787             tz               = _mm256_mul_ps(fscal,dz21);
788
789             /* Update vectorial force */
790             fix2             = _mm256_add_ps(fix2,tx);
791             fiy2             = _mm256_add_ps(fiy2,ty);
792             fiz2             = _mm256_add_ps(fiz2,tz);
793
794             fjx1             = _mm256_add_ps(fjx1,tx);
795             fjy1             = _mm256_add_ps(fjy1,ty);
796             fjz1             = _mm256_add_ps(fjz1,tz);
797
798             }
799
800             /**************************
801              * CALCULATE INTERACTIONS *
802              **************************/
803
804             if (gmx_mm256_any_lt(rsq22,rcutoff2))
805             {
806
807             r22              = _mm256_mul_ps(rsq22,rinv22);
808
809             /* EWALD ELECTROSTATICS */
810             
811             /* Analytical PME correction */
812             zeta2            = _mm256_mul_ps(beta2,rsq22);
813             rinv3            = _mm256_mul_ps(rinvsq22,rinv22);
814             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
815             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
816             felec            = _mm256_mul_ps(qq22,felec);
817             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
818             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
819             velec            = _mm256_sub_ps(rinv22,pmecorrV);
820             velec            = _mm256_mul_ps(qq22,velec);
821             
822             d                = _mm256_sub_ps(r22,rswitch);
823             d                = _mm256_max_ps(d,_mm256_setzero_ps());
824             d2               = _mm256_mul_ps(d,d);
825             sw               = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
826
827             dsw              = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
828
829             /* Evaluate switch function */
830             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
831             felec            = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv22,_mm256_mul_ps(velec,dsw)) );
832             velec            = _mm256_mul_ps(velec,sw);
833             cutoff_mask      = _mm256_cmp_ps(rsq22,rcutoff2,_CMP_LT_OQ);
834
835             /* Update potential sum for this i atom from the interaction with this j atom. */
836             velec            = _mm256_and_ps(velec,cutoff_mask);
837             velecsum         = _mm256_add_ps(velecsum,velec);
838
839             fscal            = felec;
840
841             fscal            = _mm256_and_ps(fscal,cutoff_mask);
842
843             /* Calculate temporary vectorial force */
844             tx               = _mm256_mul_ps(fscal,dx22);
845             ty               = _mm256_mul_ps(fscal,dy22);
846             tz               = _mm256_mul_ps(fscal,dz22);
847
848             /* Update vectorial force */
849             fix2             = _mm256_add_ps(fix2,tx);
850             fiy2             = _mm256_add_ps(fiy2,ty);
851             fiz2             = _mm256_add_ps(fiz2,tz);
852
853             fjx2             = _mm256_add_ps(fjx2,tx);
854             fjy2             = _mm256_add_ps(fjy2,ty);
855             fjz2             = _mm256_add_ps(fjz2,tz);
856
857             }
858
859             fjptrA             = f+j_coord_offsetA;
860             fjptrB             = f+j_coord_offsetB;
861             fjptrC             = f+j_coord_offsetC;
862             fjptrD             = f+j_coord_offsetD;
863             fjptrE             = f+j_coord_offsetE;
864             fjptrF             = f+j_coord_offsetF;
865             fjptrG             = f+j_coord_offsetG;
866             fjptrH             = f+j_coord_offsetH;
867
868             gmx_mm256_decrement_3rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,
869                                                       fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
870
871             /* Inner loop uses 972 flops */
872         }
873
874         if(jidx<j_index_end)
875         {
876
877             /* Get j neighbor index, and coordinate index */
878             jnrlistA         = jjnr[jidx];
879             jnrlistB         = jjnr[jidx+1];
880             jnrlistC         = jjnr[jidx+2];
881             jnrlistD         = jjnr[jidx+3];
882             jnrlistE         = jjnr[jidx+4];
883             jnrlistF         = jjnr[jidx+5];
884             jnrlistG         = jjnr[jidx+6];
885             jnrlistH         = jjnr[jidx+7];
886             /* Sign of each element will be negative for non-real atoms.
887              * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
888              * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
889              */
890             dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
891                                             gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
892                                             
893             jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
894             jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
895             jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
896             jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
897             jnrE       = (jnrlistE>=0) ? jnrlistE : 0;
898             jnrF       = (jnrlistF>=0) ? jnrlistF : 0;
899             jnrG       = (jnrlistG>=0) ? jnrlistG : 0;
900             jnrH       = (jnrlistH>=0) ? jnrlistH : 0;
901             j_coord_offsetA  = DIM*jnrA;
902             j_coord_offsetB  = DIM*jnrB;
903             j_coord_offsetC  = DIM*jnrC;
904             j_coord_offsetD  = DIM*jnrD;
905             j_coord_offsetE  = DIM*jnrE;
906             j_coord_offsetF  = DIM*jnrF;
907             j_coord_offsetG  = DIM*jnrG;
908             j_coord_offsetH  = DIM*jnrH;
909
910             /* load j atom coordinates */
911             gmx_mm256_load_3rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
912                                                  x+j_coord_offsetC,x+j_coord_offsetD,
913                                                  x+j_coord_offsetE,x+j_coord_offsetF,
914                                                  x+j_coord_offsetG,x+j_coord_offsetH,
915                                               &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
916
917             /* Calculate displacement vector */
918             dx00             = _mm256_sub_ps(ix0,jx0);
919             dy00             = _mm256_sub_ps(iy0,jy0);
920             dz00             = _mm256_sub_ps(iz0,jz0);
921             dx01             = _mm256_sub_ps(ix0,jx1);
922             dy01             = _mm256_sub_ps(iy0,jy1);
923             dz01             = _mm256_sub_ps(iz0,jz1);
924             dx02             = _mm256_sub_ps(ix0,jx2);
925             dy02             = _mm256_sub_ps(iy0,jy2);
926             dz02             = _mm256_sub_ps(iz0,jz2);
927             dx10             = _mm256_sub_ps(ix1,jx0);
928             dy10             = _mm256_sub_ps(iy1,jy0);
929             dz10             = _mm256_sub_ps(iz1,jz0);
930             dx11             = _mm256_sub_ps(ix1,jx1);
931             dy11             = _mm256_sub_ps(iy1,jy1);
932             dz11             = _mm256_sub_ps(iz1,jz1);
933             dx12             = _mm256_sub_ps(ix1,jx2);
934             dy12             = _mm256_sub_ps(iy1,jy2);
935             dz12             = _mm256_sub_ps(iz1,jz2);
936             dx20             = _mm256_sub_ps(ix2,jx0);
937             dy20             = _mm256_sub_ps(iy2,jy0);
938             dz20             = _mm256_sub_ps(iz2,jz0);
939             dx21             = _mm256_sub_ps(ix2,jx1);
940             dy21             = _mm256_sub_ps(iy2,jy1);
941             dz21             = _mm256_sub_ps(iz2,jz1);
942             dx22             = _mm256_sub_ps(ix2,jx2);
943             dy22             = _mm256_sub_ps(iy2,jy2);
944             dz22             = _mm256_sub_ps(iz2,jz2);
945
946             /* Calculate squared distance and things based on it */
947             rsq00            = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
948             rsq01            = gmx_mm256_calc_rsq_ps(dx01,dy01,dz01);
949             rsq02            = gmx_mm256_calc_rsq_ps(dx02,dy02,dz02);
950             rsq10            = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
951             rsq11            = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
952             rsq12            = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
953             rsq20            = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
954             rsq21            = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
955             rsq22            = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
956
957             rinv00           = gmx_mm256_invsqrt_ps(rsq00);
958             rinv01           = gmx_mm256_invsqrt_ps(rsq01);
959             rinv02           = gmx_mm256_invsqrt_ps(rsq02);
960             rinv10           = gmx_mm256_invsqrt_ps(rsq10);
961             rinv11           = gmx_mm256_invsqrt_ps(rsq11);
962             rinv12           = gmx_mm256_invsqrt_ps(rsq12);
963             rinv20           = gmx_mm256_invsqrt_ps(rsq20);
964             rinv21           = gmx_mm256_invsqrt_ps(rsq21);
965             rinv22           = gmx_mm256_invsqrt_ps(rsq22);
966
967             rinvsq00         = _mm256_mul_ps(rinv00,rinv00);
968             rinvsq01         = _mm256_mul_ps(rinv01,rinv01);
969             rinvsq02         = _mm256_mul_ps(rinv02,rinv02);
970             rinvsq10         = _mm256_mul_ps(rinv10,rinv10);
971             rinvsq11         = _mm256_mul_ps(rinv11,rinv11);
972             rinvsq12         = _mm256_mul_ps(rinv12,rinv12);
973             rinvsq20         = _mm256_mul_ps(rinv20,rinv20);
974             rinvsq21         = _mm256_mul_ps(rinv21,rinv21);
975             rinvsq22         = _mm256_mul_ps(rinv22,rinv22);
976
977             fjx0             = _mm256_setzero_ps();
978             fjy0             = _mm256_setzero_ps();
979             fjz0             = _mm256_setzero_ps();
980             fjx1             = _mm256_setzero_ps();
981             fjy1             = _mm256_setzero_ps();
982             fjz1             = _mm256_setzero_ps();
983             fjx2             = _mm256_setzero_ps();
984             fjy2             = _mm256_setzero_ps();
985             fjz2             = _mm256_setzero_ps();
986
987             /**************************
988              * CALCULATE INTERACTIONS *
989              **************************/
990
991             if (gmx_mm256_any_lt(rsq00,rcutoff2))
992             {
993
994             r00              = _mm256_mul_ps(rsq00,rinv00);
995             r00              = _mm256_andnot_ps(dummy_mask,r00);
996
997             /* EWALD ELECTROSTATICS */
998             
999             /* Analytical PME correction */
1000             zeta2            = _mm256_mul_ps(beta2,rsq00);
1001             rinv3            = _mm256_mul_ps(rinvsq00,rinv00);
1002             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
1003             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1004             felec            = _mm256_mul_ps(qq00,felec);
1005             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
1006             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
1007             velec            = _mm256_sub_ps(rinv00,pmecorrV);
1008             velec            = _mm256_mul_ps(qq00,velec);
1009             
1010             d                = _mm256_sub_ps(r00,rswitch);
1011             d                = _mm256_max_ps(d,_mm256_setzero_ps());
1012             d2               = _mm256_mul_ps(d,d);
1013             sw               = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
1014
1015             dsw              = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
1016
1017             /* Evaluate switch function */
1018             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
1019             felec            = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv00,_mm256_mul_ps(velec,dsw)) );
1020             velec            = _mm256_mul_ps(velec,sw);
1021             cutoff_mask      = _mm256_cmp_ps(rsq00,rcutoff2,_CMP_LT_OQ);
1022
1023             /* Update potential sum for this i atom from the interaction with this j atom. */
1024             velec            = _mm256_and_ps(velec,cutoff_mask);
1025             velec            = _mm256_andnot_ps(dummy_mask,velec);
1026             velecsum         = _mm256_add_ps(velecsum,velec);
1027
1028             fscal            = felec;
1029
1030             fscal            = _mm256_and_ps(fscal,cutoff_mask);
1031
1032             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
1033
1034             /* Calculate temporary vectorial force */
1035             tx               = _mm256_mul_ps(fscal,dx00);
1036             ty               = _mm256_mul_ps(fscal,dy00);
1037             tz               = _mm256_mul_ps(fscal,dz00);
1038
1039             /* Update vectorial force */
1040             fix0             = _mm256_add_ps(fix0,tx);
1041             fiy0             = _mm256_add_ps(fiy0,ty);
1042             fiz0             = _mm256_add_ps(fiz0,tz);
1043
1044             fjx0             = _mm256_add_ps(fjx0,tx);
1045             fjy0             = _mm256_add_ps(fjy0,ty);
1046             fjz0             = _mm256_add_ps(fjz0,tz);
1047
1048             }
1049
1050             /**************************
1051              * CALCULATE INTERACTIONS *
1052              **************************/
1053
1054             if (gmx_mm256_any_lt(rsq01,rcutoff2))
1055             {
1056
1057             r01              = _mm256_mul_ps(rsq01,rinv01);
1058             r01              = _mm256_andnot_ps(dummy_mask,r01);
1059
1060             /* EWALD ELECTROSTATICS */
1061             
1062             /* Analytical PME correction */
1063             zeta2            = _mm256_mul_ps(beta2,rsq01);
1064             rinv3            = _mm256_mul_ps(rinvsq01,rinv01);
1065             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
1066             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1067             felec            = _mm256_mul_ps(qq01,felec);
1068             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
1069             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
1070             velec            = _mm256_sub_ps(rinv01,pmecorrV);
1071             velec            = _mm256_mul_ps(qq01,velec);
1072             
1073             d                = _mm256_sub_ps(r01,rswitch);
1074             d                = _mm256_max_ps(d,_mm256_setzero_ps());
1075             d2               = _mm256_mul_ps(d,d);
1076             sw               = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
1077
1078             dsw              = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
1079
1080             /* Evaluate switch function */
1081             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
1082             felec            = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv01,_mm256_mul_ps(velec,dsw)) );
1083             velec            = _mm256_mul_ps(velec,sw);
1084             cutoff_mask      = _mm256_cmp_ps(rsq01,rcutoff2,_CMP_LT_OQ);
1085
1086             /* Update potential sum for this i atom from the interaction with this j atom. */
1087             velec            = _mm256_and_ps(velec,cutoff_mask);
1088             velec            = _mm256_andnot_ps(dummy_mask,velec);
1089             velecsum         = _mm256_add_ps(velecsum,velec);
1090
1091             fscal            = felec;
1092
1093             fscal            = _mm256_and_ps(fscal,cutoff_mask);
1094
1095             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
1096
1097             /* Calculate temporary vectorial force */
1098             tx               = _mm256_mul_ps(fscal,dx01);
1099             ty               = _mm256_mul_ps(fscal,dy01);
1100             tz               = _mm256_mul_ps(fscal,dz01);
1101
1102             /* Update vectorial force */
1103             fix0             = _mm256_add_ps(fix0,tx);
1104             fiy0             = _mm256_add_ps(fiy0,ty);
1105             fiz0             = _mm256_add_ps(fiz0,tz);
1106
1107             fjx1             = _mm256_add_ps(fjx1,tx);
1108             fjy1             = _mm256_add_ps(fjy1,ty);
1109             fjz1             = _mm256_add_ps(fjz1,tz);
1110
1111             }
1112
1113             /**************************
1114              * CALCULATE INTERACTIONS *
1115              **************************/
1116
1117             if (gmx_mm256_any_lt(rsq02,rcutoff2))
1118             {
1119
1120             r02              = _mm256_mul_ps(rsq02,rinv02);
1121             r02              = _mm256_andnot_ps(dummy_mask,r02);
1122
1123             /* EWALD ELECTROSTATICS */
1124             
1125             /* Analytical PME correction */
1126             zeta2            = _mm256_mul_ps(beta2,rsq02);
1127             rinv3            = _mm256_mul_ps(rinvsq02,rinv02);
1128             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
1129             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1130             felec            = _mm256_mul_ps(qq02,felec);
1131             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
1132             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
1133             velec            = _mm256_sub_ps(rinv02,pmecorrV);
1134             velec            = _mm256_mul_ps(qq02,velec);
1135             
1136             d                = _mm256_sub_ps(r02,rswitch);
1137             d                = _mm256_max_ps(d,_mm256_setzero_ps());
1138             d2               = _mm256_mul_ps(d,d);
1139             sw               = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
1140
1141             dsw              = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
1142
1143             /* Evaluate switch function */
1144             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
1145             felec            = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv02,_mm256_mul_ps(velec,dsw)) );
1146             velec            = _mm256_mul_ps(velec,sw);
1147             cutoff_mask      = _mm256_cmp_ps(rsq02,rcutoff2,_CMP_LT_OQ);
1148
1149             /* Update potential sum for this i atom from the interaction with this j atom. */
1150             velec            = _mm256_and_ps(velec,cutoff_mask);
1151             velec            = _mm256_andnot_ps(dummy_mask,velec);
1152             velecsum         = _mm256_add_ps(velecsum,velec);
1153
1154             fscal            = felec;
1155
1156             fscal            = _mm256_and_ps(fscal,cutoff_mask);
1157
1158             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
1159
1160             /* Calculate temporary vectorial force */
1161             tx               = _mm256_mul_ps(fscal,dx02);
1162             ty               = _mm256_mul_ps(fscal,dy02);
1163             tz               = _mm256_mul_ps(fscal,dz02);
1164
1165             /* Update vectorial force */
1166             fix0             = _mm256_add_ps(fix0,tx);
1167             fiy0             = _mm256_add_ps(fiy0,ty);
1168             fiz0             = _mm256_add_ps(fiz0,tz);
1169
1170             fjx2             = _mm256_add_ps(fjx2,tx);
1171             fjy2             = _mm256_add_ps(fjy2,ty);
1172             fjz2             = _mm256_add_ps(fjz2,tz);
1173
1174             }
1175
1176             /**************************
1177              * CALCULATE INTERACTIONS *
1178              **************************/
1179
1180             if (gmx_mm256_any_lt(rsq10,rcutoff2))
1181             {
1182
1183             r10              = _mm256_mul_ps(rsq10,rinv10);
1184             r10              = _mm256_andnot_ps(dummy_mask,r10);
1185
1186             /* EWALD ELECTROSTATICS */
1187             
1188             /* Analytical PME correction */
1189             zeta2            = _mm256_mul_ps(beta2,rsq10);
1190             rinv3            = _mm256_mul_ps(rinvsq10,rinv10);
1191             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
1192             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1193             felec            = _mm256_mul_ps(qq10,felec);
1194             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
1195             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
1196             velec            = _mm256_sub_ps(rinv10,pmecorrV);
1197             velec            = _mm256_mul_ps(qq10,velec);
1198             
1199             d                = _mm256_sub_ps(r10,rswitch);
1200             d                = _mm256_max_ps(d,_mm256_setzero_ps());
1201             d2               = _mm256_mul_ps(d,d);
1202             sw               = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
1203
1204             dsw              = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
1205
1206             /* Evaluate switch function */
1207             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
1208             felec            = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv10,_mm256_mul_ps(velec,dsw)) );
1209             velec            = _mm256_mul_ps(velec,sw);
1210             cutoff_mask      = _mm256_cmp_ps(rsq10,rcutoff2,_CMP_LT_OQ);
1211
1212             /* Update potential sum for this i atom from the interaction with this j atom. */
1213             velec            = _mm256_and_ps(velec,cutoff_mask);
1214             velec            = _mm256_andnot_ps(dummy_mask,velec);
1215             velecsum         = _mm256_add_ps(velecsum,velec);
1216
1217             fscal            = felec;
1218
1219             fscal            = _mm256_and_ps(fscal,cutoff_mask);
1220
1221             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
1222
1223             /* Calculate temporary vectorial force */
1224             tx               = _mm256_mul_ps(fscal,dx10);
1225             ty               = _mm256_mul_ps(fscal,dy10);
1226             tz               = _mm256_mul_ps(fscal,dz10);
1227
1228             /* Update vectorial force */
1229             fix1             = _mm256_add_ps(fix1,tx);
1230             fiy1             = _mm256_add_ps(fiy1,ty);
1231             fiz1             = _mm256_add_ps(fiz1,tz);
1232
1233             fjx0             = _mm256_add_ps(fjx0,tx);
1234             fjy0             = _mm256_add_ps(fjy0,ty);
1235             fjz0             = _mm256_add_ps(fjz0,tz);
1236
1237             }
1238
1239             /**************************
1240              * CALCULATE INTERACTIONS *
1241              **************************/
1242
1243             if (gmx_mm256_any_lt(rsq11,rcutoff2))
1244             {
1245
1246             r11              = _mm256_mul_ps(rsq11,rinv11);
1247             r11              = _mm256_andnot_ps(dummy_mask,r11);
1248
1249             /* EWALD ELECTROSTATICS */
1250             
1251             /* Analytical PME correction */
1252             zeta2            = _mm256_mul_ps(beta2,rsq11);
1253             rinv3            = _mm256_mul_ps(rinvsq11,rinv11);
1254             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
1255             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1256             felec            = _mm256_mul_ps(qq11,felec);
1257             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
1258             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
1259             velec            = _mm256_sub_ps(rinv11,pmecorrV);
1260             velec            = _mm256_mul_ps(qq11,velec);
1261             
1262             d                = _mm256_sub_ps(r11,rswitch);
1263             d                = _mm256_max_ps(d,_mm256_setzero_ps());
1264             d2               = _mm256_mul_ps(d,d);
1265             sw               = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
1266
1267             dsw              = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
1268
1269             /* Evaluate switch function */
1270             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
1271             felec            = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv11,_mm256_mul_ps(velec,dsw)) );
1272             velec            = _mm256_mul_ps(velec,sw);
1273             cutoff_mask      = _mm256_cmp_ps(rsq11,rcutoff2,_CMP_LT_OQ);
1274
1275             /* Update potential sum for this i atom from the interaction with this j atom. */
1276             velec            = _mm256_and_ps(velec,cutoff_mask);
1277             velec            = _mm256_andnot_ps(dummy_mask,velec);
1278             velecsum         = _mm256_add_ps(velecsum,velec);
1279
1280             fscal            = felec;
1281
1282             fscal            = _mm256_and_ps(fscal,cutoff_mask);
1283
1284             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
1285
1286             /* Calculate temporary vectorial force */
1287             tx               = _mm256_mul_ps(fscal,dx11);
1288             ty               = _mm256_mul_ps(fscal,dy11);
1289             tz               = _mm256_mul_ps(fscal,dz11);
1290
1291             /* Update vectorial force */
1292             fix1             = _mm256_add_ps(fix1,tx);
1293             fiy1             = _mm256_add_ps(fiy1,ty);
1294             fiz1             = _mm256_add_ps(fiz1,tz);
1295
1296             fjx1             = _mm256_add_ps(fjx1,tx);
1297             fjy1             = _mm256_add_ps(fjy1,ty);
1298             fjz1             = _mm256_add_ps(fjz1,tz);
1299
1300             }
1301
1302             /**************************
1303              * CALCULATE INTERACTIONS *
1304              **************************/
1305
1306             if (gmx_mm256_any_lt(rsq12,rcutoff2))
1307             {
1308
1309             r12              = _mm256_mul_ps(rsq12,rinv12);
1310             r12              = _mm256_andnot_ps(dummy_mask,r12);
1311
1312             /* EWALD ELECTROSTATICS */
1313             
1314             /* Analytical PME correction */
1315             zeta2            = _mm256_mul_ps(beta2,rsq12);
1316             rinv3            = _mm256_mul_ps(rinvsq12,rinv12);
1317             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
1318             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1319             felec            = _mm256_mul_ps(qq12,felec);
1320             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
1321             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
1322             velec            = _mm256_sub_ps(rinv12,pmecorrV);
1323             velec            = _mm256_mul_ps(qq12,velec);
1324             
1325             d                = _mm256_sub_ps(r12,rswitch);
1326             d                = _mm256_max_ps(d,_mm256_setzero_ps());
1327             d2               = _mm256_mul_ps(d,d);
1328             sw               = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
1329
1330             dsw              = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
1331
1332             /* Evaluate switch function */
1333             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
1334             felec            = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv12,_mm256_mul_ps(velec,dsw)) );
1335             velec            = _mm256_mul_ps(velec,sw);
1336             cutoff_mask      = _mm256_cmp_ps(rsq12,rcutoff2,_CMP_LT_OQ);
1337
1338             /* Update potential sum for this i atom from the interaction with this j atom. */
1339             velec            = _mm256_and_ps(velec,cutoff_mask);
1340             velec            = _mm256_andnot_ps(dummy_mask,velec);
1341             velecsum         = _mm256_add_ps(velecsum,velec);
1342
1343             fscal            = felec;
1344
1345             fscal            = _mm256_and_ps(fscal,cutoff_mask);
1346
1347             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
1348
1349             /* Calculate temporary vectorial force */
1350             tx               = _mm256_mul_ps(fscal,dx12);
1351             ty               = _mm256_mul_ps(fscal,dy12);
1352             tz               = _mm256_mul_ps(fscal,dz12);
1353
1354             /* Update vectorial force */
1355             fix1             = _mm256_add_ps(fix1,tx);
1356             fiy1             = _mm256_add_ps(fiy1,ty);
1357             fiz1             = _mm256_add_ps(fiz1,tz);
1358
1359             fjx2             = _mm256_add_ps(fjx2,tx);
1360             fjy2             = _mm256_add_ps(fjy2,ty);
1361             fjz2             = _mm256_add_ps(fjz2,tz);
1362
1363             }
1364
1365             /**************************
1366              * CALCULATE INTERACTIONS *
1367              **************************/
1368
1369             if (gmx_mm256_any_lt(rsq20,rcutoff2))
1370             {
1371
1372             r20              = _mm256_mul_ps(rsq20,rinv20);
1373             r20              = _mm256_andnot_ps(dummy_mask,r20);
1374
1375             /* EWALD ELECTROSTATICS */
1376             
1377             /* Analytical PME correction */
1378             zeta2            = _mm256_mul_ps(beta2,rsq20);
1379             rinv3            = _mm256_mul_ps(rinvsq20,rinv20);
1380             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
1381             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1382             felec            = _mm256_mul_ps(qq20,felec);
1383             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
1384             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
1385             velec            = _mm256_sub_ps(rinv20,pmecorrV);
1386             velec            = _mm256_mul_ps(qq20,velec);
1387             
1388             d                = _mm256_sub_ps(r20,rswitch);
1389             d                = _mm256_max_ps(d,_mm256_setzero_ps());
1390             d2               = _mm256_mul_ps(d,d);
1391             sw               = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
1392
1393             dsw              = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
1394
1395             /* Evaluate switch function */
1396             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
1397             felec            = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv20,_mm256_mul_ps(velec,dsw)) );
1398             velec            = _mm256_mul_ps(velec,sw);
1399             cutoff_mask      = _mm256_cmp_ps(rsq20,rcutoff2,_CMP_LT_OQ);
1400
1401             /* Update potential sum for this i atom from the interaction with this j atom. */
1402             velec            = _mm256_and_ps(velec,cutoff_mask);
1403             velec            = _mm256_andnot_ps(dummy_mask,velec);
1404             velecsum         = _mm256_add_ps(velecsum,velec);
1405
1406             fscal            = felec;
1407
1408             fscal            = _mm256_and_ps(fscal,cutoff_mask);
1409
1410             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
1411
1412             /* Calculate temporary vectorial force */
1413             tx               = _mm256_mul_ps(fscal,dx20);
1414             ty               = _mm256_mul_ps(fscal,dy20);
1415             tz               = _mm256_mul_ps(fscal,dz20);
1416
1417             /* Update vectorial force */
1418             fix2             = _mm256_add_ps(fix2,tx);
1419             fiy2             = _mm256_add_ps(fiy2,ty);
1420             fiz2             = _mm256_add_ps(fiz2,tz);
1421
1422             fjx0             = _mm256_add_ps(fjx0,tx);
1423             fjy0             = _mm256_add_ps(fjy0,ty);
1424             fjz0             = _mm256_add_ps(fjz0,tz);
1425
1426             }
1427
1428             /**************************
1429              * CALCULATE INTERACTIONS *
1430              **************************/
1431
1432             if (gmx_mm256_any_lt(rsq21,rcutoff2))
1433             {
1434
1435             r21              = _mm256_mul_ps(rsq21,rinv21);
1436             r21              = _mm256_andnot_ps(dummy_mask,r21);
1437
1438             /* EWALD ELECTROSTATICS */
1439             
1440             /* Analytical PME correction */
1441             zeta2            = _mm256_mul_ps(beta2,rsq21);
1442             rinv3            = _mm256_mul_ps(rinvsq21,rinv21);
1443             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
1444             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1445             felec            = _mm256_mul_ps(qq21,felec);
1446             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
1447             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
1448             velec            = _mm256_sub_ps(rinv21,pmecorrV);
1449             velec            = _mm256_mul_ps(qq21,velec);
1450             
1451             d                = _mm256_sub_ps(r21,rswitch);
1452             d                = _mm256_max_ps(d,_mm256_setzero_ps());
1453             d2               = _mm256_mul_ps(d,d);
1454             sw               = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
1455
1456             dsw              = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
1457
1458             /* Evaluate switch function */
1459             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
1460             felec            = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv21,_mm256_mul_ps(velec,dsw)) );
1461             velec            = _mm256_mul_ps(velec,sw);
1462             cutoff_mask      = _mm256_cmp_ps(rsq21,rcutoff2,_CMP_LT_OQ);
1463
1464             /* Update potential sum for this i atom from the interaction with this j atom. */
1465             velec            = _mm256_and_ps(velec,cutoff_mask);
1466             velec            = _mm256_andnot_ps(dummy_mask,velec);
1467             velecsum         = _mm256_add_ps(velecsum,velec);
1468
1469             fscal            = felec;
1470
1471             fscal            = _mm256_and_ps(fscal,cutoff_mask);
1472
1473             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
1474
1475             /* Calculate temporary vectorial force */
1476             tx               = _mm256_mul_ps(fscal,dx21);
1477             ty               = _mm256_mul_ps(fscal,dy21);
1478             tz               = _mm256_mul_ps(fscal,dz21);
1479
1480             /* Update vectorial force */
1481             fix2             = _mm256_add_ps(fix2,tx);
1482             fiy2             = _mm256_add_ps(fiy2,ty);
1483             fiz2             = _mm256_add_ps(fiz2,tz);
1484
1485             fjx1             = _mm256_add_ps(fjx1,tx);
1486             fjy1             = _mm256_add_ps(fjy1,ty);
1487             fjz1             = _mm256_add_ps(fjz1,tz);
1488
1489             }
1490
1491             /**************************
1492              * CALCULATE INTERACTIONS *
1493              **************************/
1494
1495             if (gmx_mm256_any_lt(rsq22,rcutoff2))
1496             {
1497
1498             r22              = _mm256_mul_ps(rsq22,rinv22);
1499             r22              = _mm256_andnot_ps(dummy_mask,r22);
1500
1501             /* EWALD ELECTROSTATICS */
1502             
1503             /* Analytical PME correction */
1504             zeta2            = _mm256_mul_ps(beta2,rsq22);
1505             rinv3            = _mm256_mul_ps(rinvsq22,rinv22);
1506             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
1507             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1508             felec            = _mm256_mul_ps(qq22,felec);
1509             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
1510             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
1511             velec            = _mm256_sub_ps(rinv22,pmecorrV);
1512             velec            = _mm256_mul_ps(qq22,velec);
1513             
1514             d                = _mm256_sub_ps(r22,rswitch);
1515             d                = _mm256_max_ps(d,_mm256_setzero_ps());
1516             d2               = _mm256_mul_ps(d,d);
1517             sw               = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
1518
1519             dsw              = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
1520
1521             /* Evaluate switch function */
1522             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
1523             felec            = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv22,_mm256_mul_ps(velec,dsw)) );
1524             velec            = _mm256_mul_ps(velec,sw);
1525             cutoff_mask      = _mm256_cmp_ps(rsq22,rcutoff2,_CMP_LT_OQ);
1526
1527             /* Update potential sum for this i atom from the interaction with this j atom. */
1528             velec            = _mm256_and_ps(velec,cutoff_mask);
1529             velec            = _mm256_andnot_ps(dummy_mask,velec);
1530             velecsum         = _mm256_add_ps(velecsum,velec);
1531
1532             fscal            = felec;
1533
1534             fscal            = _mm256_and_ps(fscal,cutoff_mask);
1535
1536             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
1537
1538             /* Calculate temporary vectorial force */
1539             tx               = _mm256_mul_ps(fscal,dx22);
1540             ty               = _mm256_mul_ps(fscal,dy22);
1541             tz               = _mm256_mul_ps(fscal,dz22);
1542
1543             /* Update vectorial force */
1544             fix2             = _mm256_add_ps(fix2,tx);
1545             fiy2             = _mm256_add_ps(fiy2,ty);
1546             fiz2             = _mm256_add_ps(fiz2,tz);
1547
1548             fjx2             = _mm256_add_ps(fjx2,tx);
1549             fjy2             = _mm256_add_ps(fjy2,ty);
1550             fjz2             = _mm256_add_ps(fjz2,tz);
1551
1552             }
1553
1554             fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
1555             fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
1556             fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
1557             fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
1558             fjptrE             = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
1559             fjptrF             = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
1560             fjptrG             = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
1561             fjptrH             = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
1562
1563             gmx_mm256_decrement_3rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,
1564                                                       fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
1565
1566             /* Inner loop uses 981 flops */
1567         }
1568
1569         /* End of innermost loop */
1570
1571         gmx_mm256_update_iforce_3atom_swizzle_ps(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,
1572                                                  f+i_coord_offset,fshift+i_shift_offset);
1573
1574         ggid                        = gid[iidx];
1575         /* Update potential energies */
1576         gmx_mm256_update_1pot_ps(velecsum,kernel_data->energygrp_elec+ggid);
1577
1578         /* Increment number of inner iterations */
1579         inneriter                  += j_index_end - j_index_start;
1580
1581         /* Outer loop uses 19 flops */
1582     }
1583
1584     /* Increment number of outer iterations */
1585     outeriter        += nri;
1586
1587     /* Update outer/inner flops */
1588
1589     inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_W3W3_VF,outeriter*19 + inneriter*981);
1590 }
1591 /*
1592  * Gromacs nonbonded kernel:   nb_kernel_ElecEwSw_VdwNone_GeomW3W3_F_avx_256_single
1593  * Electrostatics interaction: Ewald
1594  * VdW interaction:            None
1595  * Geometry:                   Water3-Water3
1596  * Calculate force/pot:        Force
1597  */
1598 void
1599 nb_kernel_ElecEwSw_VdwNone_GeomW3W3_F_avx_256_single
1600                     (t_nblist                    * gmx_restrict       nlist,
1601                      rvec                        * gmx_restrict          xx,
1602                      rvec                        * gmx_restrict          ff,
1603                      t_forcerec                  * gmx_restrict          fr,
1604                      t_mdatoms                   * gmx_restrict     mdatoms,
1605                      nb_kernel_data_t gmx_unused * gmx_restrict kernel_data,
1606                      t_nrnb                      * gmx_restrict        nrnb)
1607 {
1608     /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
1609      * just 0 for non-waters.
1610      * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
1611      * jnr indices corresponding to data put in the four positions in the SIMD register.
1612      */
1613     int              i_shift_offset,i_coord_offset,outeriter,inneriter;
1614     int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
1615     int              jnrA,jnrB,jnrC,jnrD;
1616     int              jnrE,jnrF,jnrG,jnrH;
1617     int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
1618     int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
1619     int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
1620     int              j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
1621     int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
1622     real             rcutoff_scalar;
1623     real             *shiftvec,*fshift,*x,*f;
1624     real             *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
1625     real             scratch[4*DIM];
1626     __m256           tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
1627     real *           vdwioffsetptr0;
1628     __m256           ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
1629     real *           vdwioffsetptr1;
1630     __m256           ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
1631     real *           vdwioffsetptr2;
1632     __m256           ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
1633     int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
1634     __m256           jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
1635     int              vdwjidx1A,vdwjidx1B,vdwjidx1C,vdwjidx1D,vdwjidx1E,vdwjidx1F,vdwjidx1G,vdwjidx1H;
1636     __m256           jx1,jy1,jz1,fjx1,fjy1,fjz1,jq1,isaj1;
1637     int              vdwjidx2A,vdwjidx2B,vdwjidx2C,vdwjidx2D,vdwjidx2E,vdwjidx2F,vdwjidx2G,vdwjidx2H;
1638     __m256           jx2,jy2,jz2,fjx2,fjy2,fjz2,jq2,isaj2;
1639     __m256           dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
1640     __m256           dx01,dy01,dz01,rsq01,rinv01,rinvsq01,r01,qq01,c6_01,c12_01;
1641     __m256           dx02,dy02,dz02,rsq02,rinv02,rinvsq02,r02,qq02,c6_02,c12_02;
1642     __m256           dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
1643     __m256           dx11,dy11,dz11,rsq11,rinv11,rinvsq11,r11,qq11,c6_11,c12_11;
1644     __m256           dx12,dy12,dz12,rsq12,rinv12,rinvsq12,r12,qq12,c6_12,c12_12;
1645     __m256           dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
1646     __m256           dx21,dy21,dz21,rsq21,rinv21,rinvsq21,r21,qq21,c6_21,c12_21;
1647     __m256           dx22,dy22,dz22,rsq22,rinv22,rinvsq22,r22,qq22,c6_22,c12_22;
1648     __m256           velec,felec,velecsum,facel,crf,krf,krf2;
1649     real             *charge;
1650     __m256i          ewitab;
1651     __m128i          ewitab_lo,ewitab_hi;
1652     __m256           ewtabscale,eweps,sh_ewald,ewrt,ewtabhalfspace,ewtabF,ewtabFn,ewtabD,ewtabV;
1653     __m256           beta,beta2,beta3,zeta2,pmecorrF,pmecorrV,rinv3;
1654     real             *ewtab;
1655     __m256           rswitch,swV3,swV4,swV5,swF2,swF3,swF4,d,d2,sw,dsw;
1656     real             rswitch_scalar,d_scalar;
1657     __m256           dummy_mask,cutoff_mask;
1658     __m256           signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
1659     __m256           one     = _mm256_set1_ps(1.0);
1660     __m256           two     = _mm256_set1_ps(2.0);
1661     x                = xx[0];
1662     f                = ff[0];
1663
1664     nri              = nlist->nri;
1665     iinr             = nlist->iinr;
1666     jindex           = nlist->jindex;
1667     jjnr             = nlist->jjnr;
1668     shiftidx         = nlist->shift;
1669     gid              = nlist->gid;
1670     shiftvec         = fr->shift_vec[0];
1671     fshift           = fr->fshift[0];
1672     facel            = _mm256_set1_ps(fr->epsfac);
1673     charge           = mdatoms->chargeA;
1674
1675     sh_ewald         = _mm256_set1_ps(fr->ic->sh_ewald);
1676     beta             = _mm256_set1_ps(fr->ic->ewaldcoeff_q);
1677     beta2            = _mm256_mul_ps(beta,beta);
1678     beta3            = _mm256_mul_ps(beta,beta2);
1679
1680     ewtab            = fr->ic->tabq_coul_FDV0;
1681     ewtabscale       = _mm256_set1_ps(fr->ic->tabq_scale);
1682     ewtabhalfspace   = _mm256_set1_ps(0.5/fr->ic->tabq_scale);
1683
1684     /* Setup water-specific parameters */
1685     inr              = nlist->iinr[0];
1686     iq0              = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+0]));
1687     iq1              = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+1]));
1688     iq2              = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+2]));
1689
1690     jq0              = _mm256_set1_ps(charge[inr+0]);
1691     jq1              = _mm256_set1_ps(charge[inr+1]);
1692     jq2              = _mm256_set1_ps(charge[inr+2]);
1693     qq00             = _mm256_mul_ps(iq0,jq0);
1694     qq01             = _mm256_mul_ps(iq0,jq1);
1695     qq02             = _mm256_mul_ps(iq0,jq2);
1696     qq10             = _mm256_mul_ps(iq1,jq0);
1697     qq11             = _mm256_mul_ps(iq1,jq1);
1698     qq12             = _mm256_mul_ps(iq1,jq2);
1699     qq20             = _mm256_mul_ps(iq2,jq0);
1700     qq21             = _mm256_mul_ps(iq2,jq1);
1701     qq22             = _mm256_mul_ps(iq2,jq2);
1702
1703     /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
1704     rcutoff_scalar   = fr->rcoulomb;
1705     rcutoff          = _mm256_set1_ps(rcutoff_scalar);
1706     rcutoff2         = _mm256_mul_ps(rcutoff,rcutoff);
1707
1708     rswitch_scalar   = fr->rcoulomb_switch;
1709     rswitch          = _mm256_set1_ps(rswitch_scalar);
1710     /* Setup switch parameters */
1711     d_scalar         = rcutoff_scalar-rswitch_scalar;
1712     d                = _mm256_set1_ps(d_scalar);
1713     swV3             = _mm256_set1_ps(-10.0/(d_scalar*d_scalar*d_scalar));
1714     swV4             = _mm256_set1_ps( 15.0/(d_scalar*d_scalar*d_scalar*d_scalar));
1715     swV5             = _mm256_set1_ps( -6.0/(d_scalar*d_scalar*d_scalar*d_scalar*d_scalar));
1716     swF2             = _mm256_set1_ps(-30.0/(d_scalar*d_scalar*d_scalar));
1717     swF3             = _mm256_set1_ps( 60.0/(d_scalar*d_scalar*d_scalar*d_scalar));
1718     swF4             = _mm256_set1_ps(-30.0/(d_scalar*d_scalar*d_scalar*d_scalar*d_scalar));
1719
1720     /* Avoid stupid compiler warnings */
1721     jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
1722     j_coord_offsetA = 0;
1723     j_coord_offsetB = 0;
1724     j_coord_offsetC = 0;
1725     j_coord_offsetD = 0;
1726     j_coord_offsetE = 0;
1727     j_coord_offsetF = 0;
1728     j_coord_offsetG = 0;
1729     j_coord_offsetH = 0;
1730
1731     outeriter        = 0;
1732     inneriter        = 0;
1733
1734     for(iidx=0;iidx<4*DIM;iidx++)
1735     {
1736         scratch[iidx] = 0.0;
1737     }
1738
1739     /* Start outer loop over neighborlists */
1740     for(iidx=0; iidx<nri; iidx++)
1741     {
1742         /* Load shift vector for this list */
1743         i_shift_offset   = DIM*shiftidx[iidx];
1744
1745         /* Load limits for loop over neighbors */
1746         j_index_start    = jindex[iidx];
1747         j_index_end      = jindex[iidx+1];
1748
1749         /* Get outer coordinate index */
1750         inr              = iinr[iidx];
1751         i_coord_offset   = DIM*inr;
1752
1753         /* Load i particle coords and add shift vector */
1754         gmx_mm256_load_shift_and_3rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,
1755                                                     &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2);
1756
1757         fix0             = _mm256_setzero_ps();
1758         fiy0             = _mm256_setzero_ps();
1759         fiz0             = _mm256_setzero_ps();
1760         fix1             = _mm256_setzero_ps();
1761         fiy1             = _mm256_setzero_ps();
1762         fiz1             = _mm256_setzero_ps();
1763         fix2             = _mm256_setzero_ps();
1764         fiy2             = _mm256_setzero_ps();
1765         fiz2             = _mm256_setzero_ps();
1766
1767         /* Start inner kernel loop */
1768         for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
1769         {
1770
1771             /* Get j neighbor index, and coordinate index */
1772             jnrA             = jjnr[jidx];
1773             jnrB             = jjnr[jidx+1];
1774             jnrC             = jjnr[jidx+2];
1775             jnrD             = jjnr[jidx+3];
1776             jnrE             = jjnr[jidx+4];
1777             jnrF             = jjnr[jidx+5];
1778             jnrG             = jjnr[jidx+6];
1779             jnrH             = jjnr[jidx+7];
1780             j_coord_offsetA  = DIM*jnrA;
1781             j_coord_offsetB  = DIM*jnrB;
1782             j_coord_offsetC  = DIM*jnrC;
1783             j_coord_offsetD  = DIM*jnrD;
1784             j_coord_offsetE  = DIM*jnrE;
1785             j_coord_offsetF  = DIM*jnrF;
1786             j_coord_offsetG  = DIM*jnrG;
1787             j_coord_offsetH  = DIM*jnrH;
1788
1789             /* load j atom coordinates */
1790             gmx_mm256_load_3rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
1791                                                  x+j_coord_offsetC,x+j_coord_offsetD,
1792                                                  x+j_coord_offsetE,x+j_coord_offsetF,
1793                                                  x+j_coord_offsetG,x+j_coord_offsetH,
1794                                               &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
1795
1796             /* Calculate displacement vector */
1797             dx00             = _mm256_sub_ps(ix0,jx0);
1798             dy00             = _mm256_sub_ps(iy0,jy0);
1799             dz00             = _mm256_sub_ps(iz0,jz0);
1800             dx01             = _mm256_sub_ps(ix0,jx1);
1801             dy01             = _mm256_sub_ps(iy0,jy1);
1802             dz01             = _mm256_sub_ps(iz0,jz1);
1803             dx02             = _mm256_sub_ps(ix0,jx2);
1804             dy02             = _mm256_sub_ps(iy0,jy2);
1805             dz02             = _mm256_sub_ps(iz0,jz2);
1806             dx10             = _mm256_sub_ps(ix1,jx0);
1807             dy10             = _mm256_sub_ps(iy1,jy0);
1808             dz10             = _mm256_sub_ps(iz1,jz0);
1809             dx11             = _mm256_sub_ps(ix1,jx1);
1810             dy11             = _mm256_sub_ps(iy1,jy1);
1811             dz11             = _mm256_sub_ps(iz1,jz1);
1812             dx12             = _mm256_sub_ps(ix1,jx2);
1813             dy12             = _mm256_sub_ps(iy1,jy2);
1814             dz12             = _mm256_sub_ps(iz1,jz2);
1815             dx20             = _mm256_sub_ps(ix2,jx0);
1816             dy20             = _mm256_sub_ps(iy2,jy0);
1817             dz20             = _mm256_sub_ps(iz2,jz0);
1818             dx21             = _mm256_sub_ps(ix2,jx1);
1819             dy21             = _mm256_sub_ps(iy2,jy1);
1820             dz21             = _mm256_sub_ps(iz2,jz1);
1821             dx22             = _mm256_sub_ps(ix2,jx2);
1822             dy22             = _mm256_sub_ps(iy2,jy2);
1823             dz22             = _mm256_sub_ps(iz2,jz2);
1824
1825             /* Calculate squared distance and things based on it */
1826             rsq00            = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
1827             rsq01            = gmx_mm256_calc_rsq_ps(dx01,dy01,dz01);
1828             rsq02            = gmx_mm256_calc_rsq_ps(dx02,dy02,dz02);
1829             rsq10            = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
1830             rsq11            = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
1831             rsq12            = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
1832             rsq20            = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
1833             rsq21            = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
1834             rsq22            = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
1835
1836             rinv00           = gmx_mm256_invsqrt_ps(rsq00);
1837             rinv01           = gmx_mm256_invsqrt_ps(rsq01);
1838             rinv02           = gmx_mm256_invsqrt_ps(rsq02);
1839             rinv10           = gmx_mm256_invsqrt_ps(rsq10);
1840             rinv11           = gmx_mm256_invsqrt_ps(rsq11);
1841             rinv12           = gmx_mm256_invsqrt_ps(rsq12);
1842             rinv20           = gmx_mm256_invsqrt_ps(rsq20);
1843             rinv21           = gmx_mm256_invsqrt_ps(rsq21);
1844             rinv22           = gmx_mm256_invsqrt_ps(rsq22);
1845
1846             rinvsq00         = _mm256_mul_ps(rinv00,rinv00);
1847             rinvsq01         = _mm256_mul_ps(rinv01,rinv01);
1848             rinvsq02         = _mm256_mul_ps(rinv02,rinv02);
1849             rinvsq10         = _mm256_mul_ps(rinv10,rinv10);
1850             rinvsq11         = _mm256_mul_ps(rinv11,rinv11);
1851             rinvsq12         = _mm256_mul_ps(rinv12,rinv12);
1852             rinvsq20         = _mm256_mul_ps(rinv20,rinv20);
1853             rinvsq21         = _mm256_mul_ps(rinv21,rinv21);
1854             rinvsq22         = _mm256_mul_ps(rinv22,rinv22);
1855
1856             fjx0             = _mm256_setzero_ps();
1857             fjy0             = _mm256_setzero_ps();
1858             fjz0             = _mm256_setzero_ps();
1859             fjx1             = _mm256_setzero_ps();
1860             fjy1             = _mm256_setzero_ps();
1861             fjz1             = _mm256_setzero_ps();
1862             fjx2             = _mm256_setzero_ps();
1863             fjy2             = _mm256_setzero_ps();
1864             fjz2             = _mm256_setzero_ps();
1865
1866             /**************************
1867              * CALCULATE INTERACTIONS *
1868              **************************/
1869
1870             if (gmx_mm256_any_lt(rsq00,rcutoff2))
1871             {
1872
1873             r00              = _mm256_mul_ps(rsq00,rinv00);
1874
1875             /* EWALD ELECTROSTATICS */
1876             
1877             /* Analytical PME correction */
1878             zeta2            = _mm256_mul_ps(beta2,rsq00);
1879             rinv3            = _mm256_mul_ps(rinvsq00,rinv00);
1880             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
1881             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1882             felec            = _mm256_mul_ps(qq00,felec);
1883             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
1884             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
1885             velec            = _mm256_sub_ps(rinv00,pmecorrV);
1886             velec            = _mm256_mul_ps(qq00,velec);
1887             
1888             d                = _mm256_sub_ps(r00,rswitch);
1889             d                = _mm256_max_ps(d,_mm256_setzero_ps());
1890             d2               = _mm256_mul_ps(d,d);
1891             sw               = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
1892
1893             dsw              = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
1894
1895             /* Evaluate switch function */
1896             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
1897             felec            = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv00,_mm256_mul_ps(velec,dsw)) );
1898             cutoff_mask      = _mm256_cmp_ps(rsq00,rcutoff2,_CMP_LT_OQ);
1899
1900             fscal            = felec;
1901
1902             fscal            = _mm256_and_ps(fscal,cutoff_mask);
1903
1904             /* Calculate temporary vectorial force */
1905             tx               = _mm256_mul_ps(fscal,dx00);
1906             ty               = _mm256_mul_ps(fscal,dy00);
1907             tz               = _mm256_mul_ps(fscal,dz00);
1908
1909             /* Update vectorial force */
1910             fix0             = _mm256_add_ps(fix0,tx);
1911             fiy0             = _mm256_add_ps(fiy0,ty);
1912             fiz0             = _mm256_add_ps(fiz0,tz);
1913
1914             fjx0             = _mm256_add_ps(fjx0,tx);
1915             fjy0             = _mm256_add_ps(fjy0,ty);
1916             fjz0             = _mm256_add_ps(fjz0,tz);
1917
1918             }
1919
1920             /**************************
1921              * CALCULATE INTERACTIONS *
1922              **************************/
1923
1924             if (gmx_mm256_any_lt(rsq01,rcutoff2))
1925             {
1926
1927             r01              = _mm256_mul_ps(rsq01,rinv01);
1928
1929             /* EWALD ELECTROSTATICS */
1930             
1931             /* Analytical PME correction */
1932             zeta2            = _mm256_mul_ps(beta2,rsq01);
1933             rinv3            = _mm256_mul_ps(rinvsq01,rinv01);
1934             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
1935             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1936             felec            = _mm256_mul_ps(qq01,felec);
1937             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
1938             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
1939             velec            = _mm256_sub_ps(rinv01,pmecorrV);
1940             velec            = _mm256_mul_ps(qq01,velec);
1941             
1942             d                = _mm256_sub_ps(r01,rswitch);
1943             d                = _mm256_max_ps(d,_mm256_setzero_ps());
1944             d2               = _mm256_mul_ps(d,d);
1945             sw               = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
1946
1947             dsw              = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
1948
1949             /* Evaluate switch function */
1950             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
1951             felec            = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv01,_mm256_mul_ps(velec,dsw)) );
1952             cutoff_mask      = _mm256_cmp_ps(rsq01,rcutoff2,_CMP_LT_OQ);
1953
1954             fscal            = felec;
1955
1956             fscal            = _mm256_and_ps(fscal,cutoff_mask);
1957
1958             /* Calculate temporary vectorial force */
1959             tx               = _mm256_mul_ps(fscal,dx01);
1960             ty               = _mm256_mul_ps(fscal,dy01);
1961             tz               = _mm256_mul_ps(fscal,dz01);
1962
1963             /* Update vectorial force */
1964             fix0             = _mm256_add_ps(fix0,tx);
1965             fiy0             = _mm256_add_ps(fiy0,ty);
1966             fiz0             = _mm256_add_ps(fiz0,tz);
1967
1968             fjx1             = _mm256_add_ps(fjx1,tx);
1969             fjy1             = _mm256_add_ps(fjy1,ty);
1970             fjz1             = _mm256_add_ps(fjz1,tz);
1971
1972             }
1973
1974             /**************************
1975              * CALCULATE INTERACTIONS *
1976              **************************/
1977
1978             if (gmx_mm256_any_lt(rsq02,rcutoff2))
1979             {
1980
1981             r02              = _mm256_mul_ps(rsq02,rinv02);
1982
1983             /* EWALD ELECTROSTATICS */
1984             
1985             /* Analytical PME correction */
1986             zeta2            = _mm256_mul_ps(beta2,rsq02);
1987             rinv3            = _mm256_mul_ps(rinvsq02,rinv02);
1988             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
1989             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1990             felec            = _mm256_mul_ps(qq02,felec);
1991             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
1992             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
1993             velec            = _mm256_sub_ps(rinv02,pmecorrV);
1994             velec            = _mm256_mul_ps(qq02,velec);
1995             
1996             d                = _mm256_sub_ps(r02,rswitch);
1997             d                = _mm256_max_ps(d,_mm256_setzero_ps());
1998             d2               = _mm256_mul_ps(d,d);
1999             sw               = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
2000
2001             dsw              = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
2002
2003             /* Evaluate switch function */
2004             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
2005             felec            = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv02,_mm256_mul_ps(velec,dsw)) );
2006             cutoff_mask      = _mm256_cmp_ps(rsq02,rcutoff2,_CMP_LT_OQ);
2007
2008             fscal            = felec;
2009
2010             fscal            = _mm256_and_ps(fscal,cutoff_mask);
2011
2012             /* Calculate temporary vectorial force */
2013             tx               = _mm256_mul_ps(fscal,dx02);
2014             ty               = _mm256_mul_ps(fscal,dy02);
2015             tz               = _mm256_mul_ps(fscal,dz02);
2016
2017             /* Update vectorial force */
2018             fix0             = _mm256_add_ps(fix0,tx);
2019             fiy0             = _mm256_add_ps(fiy0,ty);
2020             fiz0             = _mm256_add_ps(fiz0,tz);
2021
2022             fjx2             = _mm256_add_ps(fjx2,tx);
2023             fjy2             = _mm256_add_ps(fjy2,ty);
2024             fjz2             = _mm256_add_ps(fjz2,tz);
2025
2026             }
2027
2028             /**************************
2029              * CALCULATE INTERACTIONS *
2030              **************************/
2031
2032             if (gmx_mm256_any_lt(rsq10,rcutoff2))
2033             {
2034
2035             r10              = _mm256_mul_ps(rsq10,rinv10);
2036
2037             /* EWALD ELECTROSTATICS */
2038             
2039             /* Analytical PME correction */
2040             zeta2            = _mm256_mul_ps(beta2,rsq10);
2041             rinv3            = _mm256_mul_ps(rinvsq10,rinv10);
2042             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
2043             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
2044             felec            = _mm256_mul_ps(qq10,felec);
2045             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
2046             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
2047             velec            = _mm256_sub_ps(rinv10,pmecorrV);
2048             velec            = _mm256_mul_ps(qq10,velec);
2049             
2050             d                = _mm256_sub_ps(r10,rswitch);
2051             d                = _mm256_max_ps(d,_mm256_setzero_ps());
2052             d2               = _mm256_mul_ps(d,d);
2053             sw               = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
2054
2055             dsw              = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
2056
2057             /* Evaluate switch function */
2058             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
2059             felec            = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv10,_mm256_mul_ps(velec,dsw)) );
2060             cutoff_mask      = _mm256_cmp_ps(rsq10,rcutoff2,_CMP_LT_OQ);
2061
2062             fscal            = felec;
2063
2064             fscal            = _mm256_and_ps(fscal,cutoff_mask);
2065
2066             /* Calculate temporary vectorial force */
2067             tx               = _mm256_mul_ps(fscal,dx10);
2068             ty               = _mm256_mul_ps(fscal,dy10);
2069             tz               = _mm256_mul_ps(fscal,dz10);
2070
2071             /* Update vectorial force */
2072             fix1             = _mm256_add_ps(fix1,tx);
2073             fiy1             = _mm256_add_ps(fiy1,ty);
2074             fiz1             = _mm256_add_ps(fiz1,tz);
2075
2076             fjx0             = _mm256_add_ps(fjx0,tx);
2077             fjy0             = _mm256_add_ps(fjy0,ty);
2078             fjz0             = _mm256_add_ps(fjz0,tz);
2079
2080             }
2081
2082             /**************************
2083              * CALCULATE INTERACTIONS *
2084              **************************/
2085
2086             if (gmx_mm256_any_lt(rsq11,rcutoff2))
2087             {
2088
2089             r11              = _mm256_mul_ps(rsq11,rinv11);
2090
2091             /* EWALD ELECTROSTATICS */
2092             
2093             /* Analytical PME correction */
2094             zeta2            = _mm256_mul_ps(beta2,rsq11);
2095             rinv3            = _mm256_mul_ps(rinvsq11,rinv11);
2096             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
2097             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
2098             felec            = _mm256_mul_ps(qq11,felec);
2099             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
2100             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
2101             velec            = _mm256_sub_ps(rinv11,pmecorrV);
2102             velec            = _mm256_mul_ps(qq11,velec);
2103             
2104             d                = _mm256_sub_ps(r11,rswitch);
2105             d                = _mm256_max_ps(d,_mm256_setzero_ps());
2106             d2               = _mm256_mul_ps(d,d);
2107             sw               = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
2108
2109             dsw              = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
2110
2111             /* Evaluate switch function */
2112             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
2113             felec            = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv11,_mm256_mul_ps(velec,dsw)) );
2114             cutoff_mask      = _mm256_cmp_ps(rsq11,rcutoff2,_CMP_LT_OQ);
2115
2116             fscal            = felec;
2117
2118             fscal            = _mm256_and_ps(fscal,cutoff_mask);
2119
2120             /* Calculate temporary vectorial force */
2121             tx               = _mm256_mul_ps(fscal,dx11);
2122             ty               = _mm256_mul_ps(fscal,dy11);
2123             tz               = _mm256_mul_ps(fscal,dz11);
2124
2125             /* Update vectorial force */
2126             fix1             = _mm256_add_ps(fix1,tx);
2127             fiy1             = _mm256_add_ps(fiy1,ty);
2128             fiz1             = _mm256_add_ps(fiz1,tz);
2129
2130             fjx1             = _mm256_add_ps(fjx1,tx);
2131             fjy1             = _mm256_add_ps(fjy1,ty);
2132             fjz1             = _mm256_add_ps(fjz1,tz);
2133
2134             }
2135
2136             /**************************
2137              * CALCULATE INTERACTIONS *
2138              **************************/
2139
2140             if (gmx_mm256_any_lt(rsq12,rcutoff2))
2141             {
2142
2143             r12              = _mm256_mul_ps(rsq12,rinv12);
2144
2145             /* EWALD ELECTROSTATICS */
2146             
2147             /* Analytical PME correction */
2148             zeta2            = _mm256_mul_ps(beta2,rsq12);
2149             rinv3            = _mm256_mul_ps(rinvsq12,rinv12);
2150             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
2151             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
2152             felec            = _mm256_mul_ps(qq12,felec);
2153             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
2154             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
2155             velec            = _mm256_sub_ps(rinv12,pmecorrV);
2156             velec            = _mm256_mul_ps(qq12,velec);
2157             
2158             d                = _mm256_sub_ps(r12,rswitch);
2159             d                = _mm256_max_ps(d,_mm256_setzero_ps());
2160             d2               = _mm256_mul_ps(d,d);
2161             sw               = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
2162
2163             dsw              = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
2164
2165             /* Evaluate switch function */
2166             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
2167             felec            = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv12,_mm256_mul_ps(velec,dsw)) );
2168             cutoff_mask      = _mm256_cmp_ps(rsq12,rcutoff2,_CMP_LT_OQ);
2169
2170             fscal            = felec;
2171
2172             fscal            = _mm256_and_ps(fscal,cutoff_mask);
2173
2174             /* Calculate temporary vectorial force */
2175             tx               = _mm256_mul_ps(fscal,dx12);
2176             ty               = _mm256_mul_ps(fscal,dy12);
2177             tz               = _mm256_mul_ps(fscal,dz12);
2178
2179             /* Update vectorial force */
2180             fix1             = _mm256_add_ps(fix1,tx);
2181             fiy1             = _mm256_add_ps(fiy1,ty);
2182             fiz1             = _mm256_add_ps(fiz1,tz);
2183
2184             fjx2             = _mm256_add_ps(fjx2,tx);
2185             fjy2             = _mm256_add_ps(fjy2,ty);
2186             fjz2             = _mm256_add_ps(fjz2,tz);
2187
2188             }
2189
2190             /**************************
2191              * CALCULATE INTERACTIONS *
2192              **************************/
2193
2194             if (gmx_mm256_any_lt(rsq20,rcutoff2))
2195             {
2196
2197             r20              = _mm256_mul_ps(rsq20,rinv20);
2198
2199             /* EWALD ELECTROSTATICS */
2200             
2201             /* Analytical PME correction */
2202             zeta2            = _mm256_mul_ps(beta2,rsq20);
2203             rinv3            = _mm256_mul_ps(rinvsq20,rinv20);
2204             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
2205             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
2206             felec            = _mm256_mul_ps(qq20,felec);
2207             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
2208             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
2209             velec            = _mm256_sub_ps(rinv20,pmecorrV);
2210             velec            = _mm256_mul_ps(qq20,velec);
2211             
2212             d                = _mm256_sub_ps(r20,rswitch);
2213             d                = _mm256_max_ps(d,_mm256_setzero_ps());
2214             d2               = _mm256_mul_ps(d,d);
2215             sw               = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
2216
2217             dsw              = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
2218
2219             /* Evaluate switch function */
2220             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
2221             felec            = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv20,_mm256_mul_ps(velec,dsw)) );
2222             cutoff_mask      = _mm256_cmp_ps(rsq20,rcutoff2,_CMP_LT_OQ);
2223
2224             fscal            = felec;
2225
2226             fscal            = _mm256_and_ps(fscal,cutoff_mask);
2227
2228             /* Calculate temporary vectorial force */
2229             tx               = _mm256_mul_ps(fscal,dx20);
2230             ty               = _mm256_mul_ps(fscal,dy20);
2231             tz               = _mm256_mul_ps(fscal,dz20);
2232
2233             /* Update vectorial force */
2234             fix2             = _mm256_add_ps(fix2,tx);
2235             fiy2             = _mm256_add_ps(fiy2,ty);
2236             fiz2             = _mm256_add_ps(fiz2,tz);
2237
2238             fjx0             = _mm256_add_ps(fjx0,tx);
2239             fjy0             = _mm256_add_ps(fjy0,ty);
2240             fjz0             = _mm256_add_ps(fjz0,tz);
2241
2242             }
2243
2244             /**************************
2245              * CALCULATE INTERACTIONS *
2246              **************************/
2247
2248             if (gmx_mm256_any_lt(rsq21,rcutoff2))
2249             {
2250
2251             r21              = _mm256_mul_ps(rsq21,rinv21);
2252
2253             /* EWALD ELECTROSTATICS */
2254             
2255             /* Analytical PME correction */
2256             zeta2            = _mm256_mul_ps(beta2,rsq21);
2257             rinv3            = _mm256_mul_ps(rinvsq21,rinv21);
2258             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
2259             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
2260             felec            = _mm256_mul_ps(qq21,felec);
2261             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
2262             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
2263             velec            = _mm256_sub_ps(rinv21,pmecorrV);
2264             velec            = _mm256_mul_ps(qq21,velec);
2265             
2266             d                = _mm256_sub_ps(r21,rswitch);
2267             d                = _mm256_max_ps(d,_mm256_setzero_ps());
2268             d2               = _mm256_mul_ps(d,d);
2269             sw               = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
2270
2271             dsw              = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
2272
2273             /* Evaluate switch function */
2274             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
2275             felec            = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv21,_mm256_mul_ps(velec,dsw)) );
2276             cutoff_mask      = _mm256_cmp_ps(rsq21,rcutoff2,_CMP_LT_OQ);
2277
2278             fscal            = felec;
2279
2280             fscal            = _mm256_and_ps(fscal,cutoff_mask);
2281
2282             /* Calculate temporary vectorial force */
2283             tx               = _mm256_mul_ps(fscal,dx21);
2284             ty               = _mm256_mul_ps(fscal,dy21);
2285             tz               = _mm256_mul_ps(fscal,dz21);
2286
2287             /* Update vectorial force */
2288             fix2             = _mm256_add_ps(fix2,tx);
2289             fiy2             = _mm256_add_ps(fiy2,ty);
2290             fiz2             = _mm256_add_ps(fiz2,tz);
2291
2292             fjx1             = _mm256_add_ps(fjx1,tx);
2293             fjy1             = _mm256_add_ps(fjy1,ty);
2294             fjz1             = _mm256_add_ps(fjz1,tz);
2295
2296             }
2297
2298             /**************************
2299              * CALCULATE INTERACTIONS *
2300              **************************/
2301
2302             if (gmx_mm256_any_lt(rsq22,rcutoff2))
2303             {
2304
2305             r22              = _mm256_mul_ps(rsq22,rinv22);
2306
2307             /* EWALD ELECTROSTATICS */
2308             
2309             /* Analytical PME correction */
2310             zeta2            = _mm256_mul_ps(beta2,rsq22);
2311             rinv3            = _mm256_mul_ps(rinvsq22,rinv22);
2312             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
2313             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
2314             felec            = _mm256_mul_ps(qq22,felec);
2315             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
2316             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
2317             velec            = _mm256_sub_ps(rinv22,pmecorrV);
2318             velec            = _mm256_mul_ps(qq22,velec);
2319             
2320             d                = _mm256_sub_ps(r22,rswitch);
2321             d                = _mm256_max_ps(d,_mm256_setzero_ps());
2322             d2               = _mm256_mul_ps(d,d);
2323             sw               = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
2324
2325             dsw              = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
2326
2327             /* Evaluate switch function */
2328             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
2329             felec            = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv22,_mm256_mul_ps(velec,dsw)) );
2330             cutoff_mask      = _mm256_cmp_ps(rsq22,rcutoff2,_CMP_LT_OQ);
2331
2332             fscal            = felec;
2333
2334             fscal            = _mm256_and_ps(fscal,cutoff_mask);
2335
2336             /* Calculate temporary vectorial force */
2337             tx               = _mm256_mul_ps(fscal,dx22);
2338             ty               = _mm256_mul_ps(fscal,dy22);
2339             tz               = _mm256_mul_ps(fscal,dz22);
2340
2341             /* Update vectorial force */
2342             fix2             = _mm256_add_ps(fix2,tx);
2343             fiy2             = _mm256_add_ps(fiy2,ty);
2344             fiz2             = _mm256_add_ps(fiz2,tz);
2345
2346             fjx2             = _mm256_add_ps(fjx2,tx);
2347             fjy2             = _mm256_add_ps(fjy2,ty);
2348             fjz2             = _mm256_add_ps(fjz2,tz);
2349
2350             }
2351
2352             fjptrA             = f+j_coord_offsetA;
2353             fjptrB             = f+j_coord_offsetB;
2354             fjptrC             = f+j_coord_offsetC;
2355             fjptrD             = f+j_coord_offsetD;
2356             fjptrE             = f+j_coord_offsetE;
2357             fjptrF             = f+j_coord_offsetF;
2358             fjptrG             = f+j_coord_offsetG;
2359             fjptrH             = f+j_coord_offsetH;
2360
2361             gmx_mm256_decrement_3rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,
2362                                                       fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
2363
2364             /* Inner loop uses 945 flops */
2365         }
2366
2367         if(jidx<j_index_end)
2368         {
2369
2370             /* Get j neighbor index, and coordinate index */
2371             jnrlistA         = jjnr[jidx];
2372             jnrlistB         = jjnr[jidx+1];
2373             jnrlistC         = jjnr[jidx+2];
2374             jnrlistD         = jjnr[jidx+3];
2375             jnrlistE         = jjnr[jidx+4];
2376             jnrlistF         = jjnr[jidx+5];
2377             jnrlistG         = jjnr[jidx+6];
2378             jnrlistH         = jjnr[jidx+7];
2379             /* Sign of each element will be negative for non-real atoms.
2380              * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
2381              * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
2382              */
2383             dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
2384                                             gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
2385                                             
2386             jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
2387             jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
2388             jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
2389             jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
2390             jnrE       = (jnrlistE>=0) ? jnrlistE : 0;
2391             jnrF       = (jnrlistF>=0) ? jnrlistF : 0;
2392             jnrG       = (jnrlistG>=0) ? jnrlistG : 0;
2393             jnrH       = (jnrlistH>=0) ? jnrlistH : 0;
2394             j_coord_offsetA  = DIM*jnrA;
2395             j_coord_offsetB  = DIM*jnrB;
2396             j_coord_offsetC  = DIM*jnrC;
2397             j_coord_offsetD  = DIM*jnrD;
2398             j_coord_offsetE  = DIM*jnrE;
2399             j_coord_offsetF  = DIM*jnrF;
2400             j_coord_offsetG  = DIM*jnrG;
2401             j_coord_offsetH  = DIM*jnrH;
2402
2403             /* load j atom coordinates */
2404             gmx_mm256_load_3rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
2405                                                  x+j_coord_offsetC,x+j_coord_offsetD,
2406                                                  x+j_coord_offsetE,x+j_coord_offsetF,
2407                                                  x+j_coord_offsetG,x+j_coord_offsetH,
2408                                               &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
2409
2410             /* Calculate displacement vector */
2411             dx00             = _mm256_sub_ps(ix0,jx0);
2412             dy00             = _mm256_sub_ps(iy0,jy0);
2413             dz00             = _mm256_sub_ps(iz0,jz0);
2414             dx01             = _mm256_sub_ps(ix0,jx1);
2415             dy01             = _mm256_sub_ps(iy0,jy1);
2416             dz01             = _mm256_sub_ps(iz0,jz1);
2417             dx02             = _mm256_sub_ps(ix0,jx2);
2418             dy02             = _mm256_sub_ps(iy0,jy2);
2419             dz02             = _mm256_sub_ps(iz0,jz2);
2420             dx10             = _mm256_sub_ps(ix1,jx0);
2421             dy10             = _mm256_sub_ps(iy1,jy0);
2422             dz10             = _mm256_sub_ps(iz1,jz0);
2423             dx11             = _mm256_sub_ps(ix1,jx1);
2424             dy11             = _mm256_sub_ps(iy1,jy1);
2425             dz11             = _mm256_sub_ps(iz1,jz1);
2426             dx12             = _mm256_sub_ps(ix1,jx2);
2427             dy12             = _mm256_sub_ps(iy1,jy2);
2428             dz12             = _mm256_sub_ps(iz1,jz2);
2429             dx20             = _mm256_sub_ps(ix2,jx0);
2430             dy20             = _mm256_sub_ps(iy2,jy0);
2431             dz20             = _mm256_sub_ps(iz2,jz0);
2432             dx21             = _mm256_sub_ps(ix2,jx1);
2433             dy21             = _mm256_sub_ps(iy2,jy1);
2434             dz21             = _mm256_sub_ps(iz2,jz1);
2435             dx22             = _mm256_sub_ps(ix2,jx2);
2436             dy22             = _mm256_sub_ps(iy2,jy2);
2437             dz22             = _mm256_sub_ps(iz2,jz2);
2438
2439             /* Calculate squared distance and things based on it */
2440             rsq00            = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
2441             rsq01            = gmx_mm256_calc_rsq_ps(dx01,dy01,dz01);
2442             rsq02            = gmx_mm256_calc_rsq_ps(dx02,dy02,dz02);
2443             rsq10            = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
2444             rsq11            = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
2445             rsq12            = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
2446             rsq20            = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
2447             rsq21            = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
2448             rsq22            = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
2449
2450             rinv00           = gmx_mm256_invsqrt_ps(rsq00);
2451             rinv01           = gmx_mm256_invsqrt_ps(rsq01);
2452             rinv02           = gmx_mm256_invsqrt_ps(rsq02);
2453             rinv10           = gmx_mm256_invsqrt_ps(rsq10);
2454             rinv11           = gmx_mm256_invsqrt_ps(rsq11);
2455             rinv12           = gmx_mm256_invsqrt_ps(rsq12);
2456             rinv20           = gmx_mm256_invsqrt_ps(rsq20);
2457             rinv21           = gmx_mm256_invsqrt_ps(rsq21);
2458             rinv22           = gmx_mm256_invsqrt_ps(rsq22);
2459
2460             rinvsq00         = _mm256_mul_ps(rinv00,rinv00);
2461             rinvsq01         = _mm256_mul_ps(rinv01,rinv01);
2462             rinvsq02         = _mm256_mul_ps(rinv02,rinv02);
2463             rinvsq10         = _mm256_mul_ps(rinv10,rinv10);
2464             rinvsq11         = _mm256_mul_ps(rinv11,rinv11);
2465             rinvsq12         = _mm256_mul_ps(rinv12,rinv12);
2466             rinvsq20         = _mm256_mul_ps(rinv20,rinv20);
2467             rinvsq21         = _mm256_mul_ps(rinv21,rinv21);
2468             rinvsq22         = _mm256_mul_ps(rinv22,rinv22);
2469
2470             fjx0             = _mm256_setzero_ps();
2471             fjy0             = _mm256_setzero_ps();
2472             fjz0             = _mm256_setzero_ps();
2473             fjx1             = _mm256_setzero_ps();
2474             fjy1             = _mm256_setzero_ps();
2475             fjz1             = _mm256_setzero_ps();
2476             fjx2             = _mm256_setzero_ps();
2477             fjy2             = _mm256_setzero_ps();
2478             fjz2             = _mm256_setzero_ps();
2479
2480             /**************************
2481              * CALCULATE INTERACTIONS *
2482              **************************/
2483
2484             if (gmx_mm256_any_lt(rsq00,rcutoff2))
2485             {
2486
2487             r00              = _mm256_mul_ps(rsq00,rinv00);
2488             r00              = _mm256_andnot_ps(dummy_mask,r00);
2489
2490             /* EWALD ELECTROSTATICS */
2491             
2492             /* Analytical PME correction */
2493             zeta2            = _mm256_mul_ps(beta2,rsq00);
2494             rinv3            = _mm256_mul_ps(rinvsq00,rinv00);
2495             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
2496             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
2497             felec            = _mm256_mul_ps(qq00,felec);
2498             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
2499             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
2500             velec            = _mm256_sub_ps(rinv00,pmecorrV);
2501             velec            = _mm256_mul_ps(qq00,velec);
2502             
2503             d                = _mm256_sub_ps(r00,rswitch);
2504             d                = _mm256_max_ps(d,_mm256_setzero_ps());
2505             d2               = _mm256_mul_ps(d,d);
2506             sw               = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
2507
2508             dsw              = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
2509
2510             /* Evaluate switch function */
2511             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
2512             felec            = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv00,_mm256_mul_ps(velec,dsw)) );
2513             cutoff_mask      = _mm256_cmp_ps(rsq00,rcutoff2,_CMP_LT_OQ);
2514
2515             fscal            = felec;
2516
2517             fscal            = _mm256_and_ps(fscal,cutoff_mask);
2518
2519             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
2520
2521             /* Calculate temporary vectorial force */
2522             tx               = _mm256_mul_ps(fscal,dx00);
2523             ty               = _mm256_mul_ps(fscal,dy00);
2524             tz               = _mm256_mul_ps(fscal,dz00);
2525
2526             /* Update vectorial force */
2527             fix0             = _mm256_add_ps(fix0,tx);
2528             fiy0             = _mm256_add_ps(fiy0,ty);
2529             fiz0             = _mm256_add_ps(fiz0,tz);
2530
2531             fjx0             = _mm256_add_ps(fjx0,tx);
2532             fjy0             = _mm256_add_ps(fjy0,ty);
2533             fjz0             = _mm256_add_ps(fjz0,tz);
2534
2535             }
2536
2537             /**************************
2538              * CALCULATE INTERACTIONS *
2539              **************************/
2540
2541             if (gmx_mm256_any_lt(rsq01,rcutoff2))
2542             {
2543
2544             r01              = _mm256_mul_ps(rsq01,rinv01);
2545             r01              = _mm256_andnot_ps(dummy_mask,r01);
2546
2547             /* EWALD ELECTROSTATICS */
2548             
2549             /* Analytical PME correction */
2550             zeta2            = _mm256_mul_ps(beta2,rsq01);
2551             rinv3            = _mm256_mul_ps(rinvsq01,rinv01);
2552             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
2553             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
2554             felec            = _mm256_mul_ps(qq01,felec);
2555             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
2556             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
2557             velec            = _mm256_sub_ps(rinv01,pmecorrV);
2558             velec            = _mm256_mul_ps(qq01,velec);
2559             
2560             d                = _mm256_sub_ps(r01,rswitch);
2561             d                = _mm256_max_ps(d,_mm256_setzero_ps());
2562             d2               = _mm256_mul_ps(d,d);
2563             sw               = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
2564
2565             dsw              = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
2566
2567             /* Evaluate switch function */
2568             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
2569             felec            = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv01,_mm256_mul_ps(velec,dsw)) );
2570             cutoff_mask      = _mm256_cmp_ps(rsq01,rcutoff2,_CMP_LT_OQ);
2571
2572             fscal            = felec;
2573
2574             fscal            = _mm256_and_ps(fscal,cutoff_mask);
2575
2576             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
2577
2578             /* Calculate temporary vectorial force */
2579             tx               = _mm256_mul_ps(fscal,dx01);
2580             ty               = _mm256_mul_ps(fscal,dy01);
2581             tz               = _mm256_mul_ps(fscal,dz01);
2582
2583             /* Update vectorial force */
2584             fix0             = _mm256_add_ps(fix0,tx);
2585             fiy0             = _mm256_add_ps(fiy0,ty);
2586             fiz0             = _mm256_add_ps(fiz0,tz);
2587
2588             fjx1             = _mm256_add_ps(fjx1,tx);
2589             fjy1             = _mm256_add_ps(fjy1,ty);
2590             fjz1             = _mm256_add_ps(fjz1,tz);
2591
2592             }
2593
2594             /**************************
2595              * CALCULATE INTERACTIONS *
2596              **************************/
2597
2598             if (gmx_mm256_any_lt(rsq02,rcutoff2))
2599             {
2600
2601             r02              = _mm256_mul_ps(rsq02,rinv02);
2602             r02              = _mm256_andnot_ps(dummy_mask,r02);
2603
2604             /* EWALD ELECTROSTATICS */
2605             
2606             /* Analytical PME correction */
2607             zeta2            = _mm256_mul_ps(beta2,rsq02);
2608             rinv3            = _mm256_mul_ps(rinvsq02,rinv02);
2609             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
2610             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
2611             felec            = _mm256_mul_ps(qq02,felec);
2612             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
2613             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
2614             velec            = _mm256_sub_ps(rinv02,pmecorrV);
2615             velec            = _mm256_mul_ps(qq02,velec);
2616             
2617             d                = _mm256_sub_ps(r02,rswitch);
2618             d                = _mm256_max_ps(d,_mm256_setzero_ps());
2619             d2               = _mm256_mul_ps(d,d);
2620             sw               = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
2621
2622             dsw              = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
2623
2624             /* Evaluate switch function */
2625             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
2626             felec            = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv02,_mm256_mul_ps(velec,dsw)) );
2627             cutoff_mask      = _mm256_cmp_ps(rsq02,rcutoff2,_CMP_LT_OQ);
2628
2629             fscal            = felec;
2630
2631             fscal            = _mm256_and_ps(fscal,cutoff_mask);
2632
2633             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
2634
2635             /* Calculate temporary vectorial force */
2636             tx               = _mm256_mul_ps(fscal,dx02);
2637             ty               = _mm256_mul_ps(fscal,dy02);
2638             tz               = _mm256_mul_ps(fscal,dz02);
2639
2640             /* Update vectorial force */
2641             fix0             = _mm256_add_ps(fix0,tx);
2642             fiy0             = _mm256_add_ps(fiy0,ty);
2643             fiz0             = _mm256_add_ps(fiz0,tz);
2644
2645             fjx2             = _mm256_add_ps(fjx2,tx);
2646             fjy2             = _mm256_add_ps(fjy2,ty);
2647             fjz2             = _mm256_add_ps(fjz2,tz);
2648
2649             }
2650
2651             /**************************
2652              * CALCULATE INTERACTIONS *
2653              **************************/
2654
2655             if (gmx_mm256_any_lt(rsq10,rcutoff2))
2656             {
2657
2658             r10              = _mm256_mul_ps(rsq10,rinv10);
2659             r10              = _mm256_andnot_ps(dummy_mask,r10);
2660
2661             /* EWALD ELECTROSTATICS */
2662             
2663             /* Analytical PME correction */
2664             zeta2            = _mm256_mul_ps(beta2,rsq10);
2665             rinv3            = _mm256_mul_ps(rinvsq10,rinv10);
2666             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
2667             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
2668             felec            = _mm256_mul_ps(qq10,felec);
2669             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
2670             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
2671             velec            = _mm256_sub_ps(rinv10,pmecorrV);
2672             velec            = _mm256_mul_ps(qq10,velec);
2673             
2674             d                = _mm256_sub_ps(r10,rswitch);
2675             d                = _mm256_max_ps(d,_mm256_setzero_ps());
2676             d2               = _mm256_mul_ps(d,d);
2677             sw               = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
2678
2679             dsw              = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
2680
2681             /* Evaluate switch function */
2682             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
2683             felec            = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv10,_mm256_mul_ps(velec,dsw)) );
2684             cutoff_mask      = _mm256_cmp_ps(rsq10,rcutoff2,_CMP_LT_OQ);
2685
2686             fscal            = felec;
2687
2688             fscal            = _mm256_and_ps(fscal,cutoff_mask);
2689
2690             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
2691
2692             /* Calculate temporary vectorial force */
2693             tx               = _mm256_mul_ps(fscal,dx10);
2694             ty               = _mm256_mul_ps(fscal,dy10);
2695             tz               = _mm256_mul_ps(fscal,dz10);
2696
2697             /* Update vectorial force */
2698             fix1             = _mm256_add_ps(fix1,tx);
2699             fiy1             = _mm256_add_ps(fiy1,ty);
2700             fiz1             = _mm256_add_ps(fiz1,tz);
2701
2702             fjx0             = _mm256_add_ps(fjx0,tx);
2703             fjy0             = _mm256_add_ps(fjy0,ty);
2704             fjz0             = _mm256_add_ps(fjz0,tz);
2705
2706             }
2707
2708             /**************************
2709              * CALCULATE INTERACTIONS *
2710              **************************/
2711
2712             if (gmx_mm256_any_lt(rsq11,rcutoff2))
2713             {
2714
2715             r11              = _mm256_mul_ps(rsq11,rinv11);
2716             r11              = _mm256_andnot_ps(dummy_mask,r11);
2717
2718             /* EWALD ELECTROSTATICS */
2719             
2720             /* Analytical PME correction */
2721             zeta2            = _mm256_mul_ps(beta2,rsq11);
2722             rinv3            = _mm256_mul_ps(rinvsq11,rinv11);
2723             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
2724             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
2725             felec            = _mm256_mul_ps(qq11,felec);
2726             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
2727             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
2728             velec            = _mm256_sub_ps(rinv11,pmecorrV);
2729             velec            = _mm256_mul_ps(qq11,velec);
2730             
2731             d                = _mm256_sub_ps(r11,rswitch);
2732             d                = _mm256_max_ps(d,_mm256_setzero_ps());
2733             d2               = _mm256_mul_ps(d,d);
2734             sw               = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
2735
2736             dsw              = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
2737
2738             /* Evaluate switch function */
2739             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
2740             felec            = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv11,_mm256_mul_ps(velec,dsw)) );
2741             cutoff_mask      = _mm256_cmp_ps(rsq11,rcutoff2,_CMP_LT_OQ);
2742
2743             fscal            = felec;
2744
2745             fscal            = _mm256_and_ps(fscal,cutoff_mask);
2746
2747             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
2748
2749             /* Calculate temporary vectorial force */
2750             tx               = _mm256_mul_ps(fscal,dx11);
2751             ty               = _mm256_mul_ps(fscal,dy11);
2752             tz               = _mm256_mul_ps(fscal,dz11);
2753
2754             /* Update vectorial force */
2755             fix1             = _mm256_add_ps(fix1,tx);
2756             fiy1             = _mm256_add_ps(fiy1,ty);
2757             fiz1             = _mm256_add_ps(fiz1,tz);
2758
2759             fjx1             = _mm256_add_ps(fjx1,tx);
2760             fjy1             = _mm256_add_ps(fjy1,ty);
2761             fjz1             = _mm256_add_ps(fjz1,tz);
2762
2763             }
2764
2765             /**************************
2766              * CALCULATE INTERACTIONS *
2767              **************************/
2768
2769             if (gmx_mm256_any_lt(rsq12,rcutoff2))
2770             {
2771
2772             r12              = _mm256_mul_ps(rsq12,rinv12);
2773             r12              = _mm256_andnot_ps(dummy_mask,r12);
2774
2775             /* EWALD ELECTROSTATICS */
2776             
2777             /* Analytical PME correction */
2778             zeta2            = _mm256_mul_ps(beta2,rsq12);
2779             rinv3            = _mm256_mul_ps(rinvsq12,rinv12);
2780             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
2781             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
2782             felec            = _mm256_mul_ps(qq12,felec);
2783             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
2784             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
2785             velec            = _mm256_sub_ps(rinv12,pmecorrV);
2786             velec            = _mm256_mul_ps(qq12,velec);
2787             
2788             d                = _mm256_sub_ps(r12,rswitch);
2789             d                = _mm256_max_ps(d,_mm256_setzero_ps());
2790             d2               = _mm256_mul_ps(d,d);
2791             sw               = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
2792
2793             dsw              = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
2794
2795             /* Evaluate switch function */
2796             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
2797             felec            = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv12,_mm256_mul_ps(velec,dsw)) );
2798             cutoff_mask      = _mm256_cmp_ps(rsq12,rcutoff2,_CMP_LT_OQ);
2799
2800             fscal            = felec;
2801
2802             fscal            = _mm256_and_ps(fscal,cutoff_mask);
2803
2804             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
2805
2806             /* Calculate temporary vectorial force */
2807             tx               = _mm256_mul_ps(fscal,dx12);
2808             ty               = _mm256_mul_ps(fscal,dy12);
2809             tz               = _mm256_mul_ps(fscal,dz12);
2810
2811             /* Update vectorial force */
2812             fix1             = _mm256_add_ps(fix1,tx);
2813             fiy1             = _mm256_add_ps(fiy1,ty);
2814             fiz1             = _mm256_add_ps(fiz1,tz);
2815
2816             fjx2             = _mm256_add_ps(fjx2,tx);
2817             fjy2             = _mm256_add_ps(fjy2,ty);
2818             fjz2             = _mm256_add_ps(fjz2,tz);
2819
2820             }
2821
2822             /**************************
2823              * CALCULATE INTERACTIONS *
2824              **************************/
2825
2826             if (gmx_mm256_any_lt(rsq20,rcutoff2))
2827             {
2828
2829             r20              = _mm256_mul_ps(rsq20,rinv20);
2830             r20              = _mm256_andnot_ps(dummy_mask,r20);
2831
2832             /* EWALD ELECTROSTATICS */
2833             
2834             /* Analytical PME correction */
2835             zeta2            = _mm256_mul_ps(beta2,rsq20);
2836             rinv3            = _mm256_mul_ps(rinvsq20,rinv20);
2837             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
2838             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
2839             felec            = _mm256_mul_ps(qq20,felec);
2840             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
2841             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
2842             velec            = _mm256_sub_ps(rinv20,pmecorrV);
2843             velec            = _mm256_mul_ps(qq20,velec);
2844             
2845             d                = _mm256_sub_ps(r20,rswitch);
2846             d                = _mm256_max_ps(d,_mm256_setzero_ps());
2847             d2               = _mm256_mul_ps(d,d);
2848             sw               = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
2849
2850             dsw              = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
2851
2852             /* Evaluate switch function */
2853             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
2854             felec            = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv20,_mm256_mul_ps(velec,dsw)) );
2855             cutoff_mask      = _mm256_cmp_ps(rsq20,rcutoff2,_CMP_LT_OQ);
2856
2857             fscal            = felec;
2858
2859             fscal            = _mm256_and_ps(fscal,cutoff_mask);
2860
2861             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
2862
2863             /* Calculate temporary vectorial force */
2864             tx               = _mm256_mul_ps(fscal,dx20);
2865             ty               = _mm256_mul_ps(fscal,dy20);
2866             tz               = _mm256_mul_ps(fscal,dz20);
2867
2868             /* Update vectorial force */
2869             fix2             = _mm256_add_ps(fix2,tx);
2870             fiy2             = _mm256_add_ps(fiy2,ty);
2871             fiz2             = _mm256_add_ps(fiz2,tz);
2872
2873             fjx0             = _mm256_add_ps(fjx0,tx);
2874             fjy0             = _mm256_add_ps(fjy0,ty);
2875             fjz0             = _mm256_add_ps(fjz0,tz);
2876
2877             }
2878
2879             /**************************
2880              * CALCULATE INTERACTIONS *
2881              **************************/
2882
2883             if (gmx_mm256_any_lt(rsq21,rcutoff2))
2884             {
2885
2886             r21              = _mm256_mul_ps(rsq21,rinv21);
2887             r21              = _mm256_andnot_ps(dummy_mask,r21);
2888
2889             /* EWALD ELECTROSTATICS */
2890             
2891             /* Analytical PME correction */
2892             zeta2            = _mm256_mul_ps(beta2,rsq21);
2893             rinv3            = _mm256_mul_ps(rinvsq21,rinv21);
2894             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
2895             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
2896             felec            = _mm256_mul_ps(qq21,felec);
2897             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
2898             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
2899             velec            = _mm256_sub_ps(rinv21,pmecorrV);
2900             velec            = _mm256_mul_ps(qq21,velec);
2901             
2902             d                = _mm256_sub_ps(r21,rswitch);
2903             d                = _mm256_max_ps(d,_mm256_setzero_ps());
2904             d2               = _mm256_mul_ps(d,d);
2905             sw               = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
2906
2907             dsw              = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
2908
2909             /* Evaluate switch function */
2910             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
2911             felec            = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv21,_mm256_mul_ps(velec,dsw)) );
2912             cutoff_mask      = _mm256_cmp_ps(rsq21,rcutoff2,_CMP_LT_OQ);
2913
2914             fscal            = felec;
2915
2916             fscal            = _mm256_and_ps(fscal,cutoff_mask);
2917
2918             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
2919
2920             /* Calculate temporary vectorial force */
2921             tx               = _mm256_mul_ps(fscal,dx21);
2922             ty               = _mm256_mul_ps(fscal,dy21);
2923             tz               = _mm256_mul_ps(fscal,dz21);
2924
2925             /* Update vectorial force */
2926             fix2             = _mm256_add_ps(fix2,tx);
2927             fiy2             = _mm256_add_ps(fiy2,ty);
2928             fiz2             = _mm256_add_ps(fiz2,tz);
2929
2930             fjx1             = _mm256_add_ps(fjx1,tx);
2931             fjy1             = _mm256_add_ps(fjy1,ty);
2932             fjz1             = _mm256_add_ps(fjz1,tz);
2933
2934             }
2935
2936             /**************************
2937              * CALCULATE INTERACTIONS *
2938              **************************/
2939
2940             if (gmx_mm256_any_lt(rsq22,rcutoff2))
2941             {
2942
2943             r22              = _mm256_mul_ps(rsq22,rinv22);
2944             r22              = _mm256_andnot_ps(dummy_mask,r22);
2945
2946             /* EWALD ELECTROSTATICS */
2947             
2948             /* Analytical PME correction */
2949             zeta2            = _mm256_mul_ps(beta2,rsq22);
2950             rinv3            = _mm256_mul_ps(rinvsq22,rinv22);
2951             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
2952             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
2953             felec            = _mm256_mul_ps(qq22,felec);
2954             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
2955             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
2956             velec            = _mm256_sub_ps(rinv22,pmecorrV);
2957             velec            = _mm256_mul_ps(qq22,velec);
2958             
2959             d                = _mm256_sub_ps(r22,rswitch);
2960             d                = _mm256_max_ps(d,_mm256_setzero_ps());
2961             d2               = _mm256_mul_ps(d,d);
2962             sw               = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
2963
2964             dsw              = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
2965
2966             /* Evaluate switch function */
2967             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
2968             felec            = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv22,_mm256_mul_ps(velec,dsw)) );
2969             cutoff_mask      = _mm256_cmp_ps(rsq22,rcutoff2,_CMP_LT_OQ);
2970
2971             fscal            = felec;
2972
2973             fscal            = _mm256_and_ps(fscal,cutoff_mask);
2974
2975             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
2976
2977             /* Calculate temporary vectorial force */
2978             tx               = _mm256_mul_ps(fscal,dx22);
2979             ty               = _mm256_mul_ps(fscal,dy22);
2980             tz               = _mm256_mul_ps(fscal,dz22);
2981
2982             /* Update vectorial force */
2983             fix2             = _mm256_add_ps(fix2,tx);
2984             fiy2             = _mm256_add_ps(fiy2,ty);
2985             fiz2             = _mm256_add_ps(fiz2,tz);
2986
2987             fjx2             = _mm256_add_ps(fjx2,tx);
2988             fjy2             = _mm256_add_ps(fjy2,ty);
2989             fjz2             = _mm256_add_ps(fjz2,tz);
2990
2991             }
2992
2993             fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
2994             fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
2995             fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
2996             fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
2997             fjptrE             = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
2998             fjptrF             = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
2999             fjptrG             = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
3000             fjptrH             = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
3001
3002             gmx_mm256_decrement_3rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,
3003                                                       fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
3004
3005             /* Inner loop uses 954 flops */
3006         }
3007
3008         /* End of innermost loop */
3009
3010         gmx_mm256_update_iforce_3atom_swizzle_ps(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,
3011                                                  f+i_coord_offset,fshift+i_shift_offset);
3012
3013         /* Increment number of inner iterations */
3014         inneriter                  += j_index_end - j_index_start;
3015
3016         /* Outer loop uses 18 flops */
3017     }
3018
3019     /* Increment number of outer iterations */
3020     outeriter        += nri;
3021
3022     /* Update outer/inner flops */
3023
3024     inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_W3W3_F,outeriter*18 + inneriter*954);
3025 }