89708e4ce52fbb6206a06cb66b82c6cf8b66c7a5
[alexxy/gromacs.git] / src / gromacs / gmxlib / nonbonded / nb_kernel_avx_256_single / nb_kernel_ElecEwSh_VdwLJEwSh_GeomW3W3_avx_256_single.c
1 /*
2  * This file is part of the GROMACS molecular simulation package.
3  *
4  * Copyright (c) 2012,2013,2014, by the GROMACS development team, led by
5  * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6  * and including many others, as listed in the AUTHORS file in the
7  * top-level source directory and at http://www.gromacs.org.
8  *
9  * GROMACS is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU Lesser General Public License
11  * as published by the Free Software Foundation; either version 2.1
12  * of the License, or (at your option) any later version.
13  *
14  * GROMACS is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
17  * Lesser General Public License for more details.
18  *
19  * You should have received a copy of the GNU Lesser General Public
20  * License along with GROMACS; if not, see
21  * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA.
23  *
24  * If you want to redistribute modifications to GROMACS, please
25  * consider that scientific software is very special. Version
26  * control is crucial - bugs must be traceable. We will be happy to
27  * consider code for inclusion in the official distribution, but
28  * derived work must not be called official GROMACS. Details are found
29  * in the README & COPYING files - if they are missing, get the
30  * official version at http://www.gromacs.org.
31  *
32  * To help us fund GROMACS development, we humbly ask that you cite
33  * the research papers on the package. Check out http://www.gromacs.org.
34  */
35 /*
36  * Note: this file was generated by the GROMACS avx_256_single kernel generator.
37  */
38 #include "config.h"
39
40 #include <math.h>
41
42 #include "../nb_kernel.h"
43 #include "types/simple.h"
44 #include "gromacs/math/vec.h"
45 #include "nrnb.h"
46
47 #include "gromacs/simd/math_x86_avx_256_single.h"
48 #include "kernelutil_x86_avx_256_single.h"
49
50 /*
51  * Gromacs nonbonded kernel:   nb_kernel_ElecEwSh_VdwLJEwSh_GeomW3W3_VF_avx_256_single
52  * Electrostatics interaction: Ewald
53  * VdW interaction:            LJEwald
54  * Geometry:                   Water3-Water3
55  * Calculate force/pot:        PotentialAndForce
56  */
57 void
58 nb_kernel_ElecEwSh_VdwLJEwSh_GeomW3W3_VF_avx_256_single
59                     (t_nblist                    * gmx_restrict       nlist,
60                      rvec                        * gmx_restrict          xx,
61                      rvec                        * gmx_restrict          ff,
62                      t_forcerec                  * gmx_restrict          fr,
63                      t_mdatoms                   * gmx_restrict     mdatoms,
64                      nb_kernel_data_t gmx_unused * gmx_restrict kernel_data,
65                      t_nrnb                      * gmx_restrict        nrnb)
66 {
67     /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
68      * just 0 for non-waters.
69      * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
70      * jnr indices corresponding to data put in the four positions in the SIMD register.
71      */
72     int              i_shift_offset,i_coord_offset,outeriter,inneriter;
73     int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
74     int              jnrA,jnrB,jnrC,jnrD;
75     int              jnrE,jnrF,jnrG,jnrH;
76     int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
77     int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
78     int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
79     int              j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
80     int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
81     real             rcutoff_scalar;
82     real             *shiftvec,*fshift,*x,*f;
83     real             *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
84     real             scratch[4*DIM];
85     __m256           tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
86     real *           vdwioffsetptr0;
87     real *           vdwgridioffsetptr0;
88     __m256           ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
89     real *           vdwioffsetptr1;
90     real *           vdwgridioffsetptr1;
91     __m256           ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
92     real *           vdwioffsetptr2;
93     real *           vdwgridioffsetptr2;
94     __m256           ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
95     int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
96     __m256           jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
97     int              vdwjidx1A,vdwjidx1B,vdwjidx1C,vdwjidx1D,vdwjidx1E,vdwjidx1F,vdwjidx1G,vdwjidx1H;
98     __m256           jx1,jy1,jz1,fjx1,fjy1,fjz1,jq1,isaj1;
99     int              vdwjidx2A,vdwjidx2B,vdwjidx2C,vdwjidx2D,vdwjidx2E,vdwjidx2F,vdwjidx2G,vdwjidx2H;
100     __m256           jx2,jy2,jz2,fjx2,fjy2,fjz2,jq2,isaj2;
101     __m256           dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
102     __m256           dx01,dy01,dz01,rsq01,rinv01,rinvsq01,r01,qq01,c6_01,c12_01;
103     __m256           dx02,dy02,dz02,rsq02,rinv02,rinvsq02,r02,qq02,c6_02,c12_02;
104     __m256           dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
105     __m256           dx11,dy11,dz11,rsq11,rinv11,rinvsq11,r11,qq11,c6_11,c12_11;
106     __m256           dx12,dy12,dz12,rsq12,rinv12,rinvsq12,r12,qq12,c6_12,c12_12;
107     __m256           dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
108     __m256           dx21,dy21,dz21,rsq21,rinv21,rinvsq21,r21,qq21,c6_21,c12_21;
109     __m256           dx22,dy22,dz22,rsq22,rinv22,rinvsq22,r22,qq22,c6_22,c12_22;
110     __m256           velec,felec,velecsum,facel,crf,krf,krf2;
111     real             *charge;
112     int              nvdwtype;
113     __m256           rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
114     int              *vdwtype;
115     real             *vdwparam;
116     __m256           one_sixth   = _mm256_set1_ps(1.0/6.0);
117     __m256           one_twelfth = _mm256_set1_ps(1.0/12.0);
118     __m256           c6grid_00;
119     __m256           c6grid_01;
120     __m256           c6grid_02;
121     __m256           c6grid_10;
122     __m256           c6grid_11;
123     __m256           c6grid_12;
124     __m256           c6grid_20;
125     __m256           c6grid_21;
126     __m256           c6grid_22;
127     real             *vdwgridparam;
128     __m256           ewclj,ewclj2,ewclj6,ewcljrsq,poly,exponent,f6A,f6B,sh_lj_ewald;
129     __m256           one_half  = _mm256_set1_ps(0.5);
130     __m256           minus_one = _mm256_set1_ps(-1.0);
131     __m256i          ewitab;
132     __m128i          ewitab_lo,ewitab_hi;
133     __m256           ewtabscale,eweps,sh_ewald,ewrt,ewtabhalfspace,ewtabF,ewtabFn,ewtabD,ewtabV;
134     __m256           beta,beta2,beta3,zeta2,pmecorrF,pmecorrV,rinv3;
135     real             *ewtab;
136     __m256           dummy_mask,cutoff_mask;
137     __m256           signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
138     __m256           one     = _mm256_set1_ps(1.0);
139     __m256           two     = _mm256_set1_ps(2.0);
140     x                = xx[0];
141     f                = ff[0];
142
143     nri              = nlist->nri;
144     iinr             = nlist->iinr;
145     jindex           = nlist->jindex;
146     jjnr             = nlist->jjnr;
147     shiftidx         = nlist->shift;
148     gid              = nlist->gid;
149     shiftvec         = fr->shift_vec[0];
150     fshift           = fr->fshift[0];
151     facel            = _mm256_set1_ps(fr->epsfac);
152     charge           = mdatoms->chargeA;
153     nvdwtype         = fr->ntype;
154     vdwparam         = fr->nbfp;
155     vdwtype          = mdatoms->typeA;
156     vdwgridparam     = fr->ljpme_c6grid;
157     sh_lj_ewald      = _mm256_set1_ps(fr->ic->sh_lj_ewald);
158     ewclj            = _mm256_set1_ps(fr->ewaldcoeff_lj);
159     ewclj2           = _mm256_mul_ps(minus_one,_mm256_mul_ps(ewclj,ewclj));
160
161     sh_ewald         = _mm256_set1_ps(fr->ic->sh_ewald);
162     beta             = _mm256_set1_ps(fr->ic->ewaldcoeff_q);
163     beta2            = _mm256_mul_ps(beta,beta);
164     beta3            = _mm256_mul_ps(beta,beta2);
165
166     ewtab            = fr->ic->tabq_coul_FDV0;
167     ewtabscale       = _mm256_set1_ps(fr->ic->tabq_scale);
168     ewtabhalfspace   = _mm256_set1_ps(0.5/fr->ic->tabq_scale);
169
170     /* Setup water-specific parameters */
171     inr              = nlist->iinr[0];
172     iq0              = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+0]));
173     iq1              = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+1]));
174     iq2              = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+2]));
175     vdwioffsetptr0   = vdwparam+2*nvdwtype*vdwtype[inr+0];
176     vdwgridioffsetptr0 = vdwgridparam+2*nvdwtype*vdwtype[inr+0];
177
178     jq0              = _mm256_set1_ps(charge[inr+0]);
179     jq1              = _mm256_set1_ps(charge[inr+1]);
180     jq2              = _mm256_set1_ps(charge[inr+2]);
181     vdwjidx0A        = 2*vdwtype[inr+0];
182     qq00             = _mm256_mul_ps(iq0,jq0);
183     c6_00            = _mm256_set1_ps(vdwioffsetptr0[vdwjidx0A]);
184     c12_00           = _mm256_set1_ps(vdwioffsetptr0[vdwjidx0A+1]);
185     c6grid_00        = _mm256_set1_ps(vdwgridioffsetptr0[vdwjidx0A]);
186     qq01             = _mm256_mul_ps(iq0,jq1);
187     qq02             = _mm256_mul_ps(iq0,jq2);
188     qq10             = _mm256_mul_ps(iq1,jq0);
189     qq11             = _mm256_mul_ps(iq1,jq1);
190     qq12             = _mm256_mul_ps(iq1,jq2);
191     qq20             = _mm256_mul_ps(iq2,jq0);
192     qq21             = _mm256_mul_ps(iq2,jq1);
193     qq22             = _mm256_mul_ps(iq2,jq2);
194
195     /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
196     rcutoff_scalar   = fr->rcoulomb;
197     rcutoff          = _mm256_set1_ps(rcutoff_scalar);
198     rcutoff2         = _mm256_mul_ps(rcutoff,rcutoff);
199
200     sh_vdw_invrcut6  = _mm256_set1_ps(fr->ic->sh_invrc6);
201     rvdw             = _mm256_set1_ps(fr->rvdw);
202
203     /* Avoid stupid compiler warnings */
204     jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
205     j_coord_offsetA = 0;
206     j_coord_offsetB = 0;
207     j_coord_offsetC = 0;
208     j_coord_offsetD = 0;
209     j_coord_offsetE = 0;
210     j_coord_offsetF = 0;
211     j_coord_offsetG = 0;
212     j_coord_offsetH = 0;
213
214     outeriter        = 0;
215     inneriter        = 0;
216
217     for(iidx=0;iidx<4*DIM;iidx++)
218     {
219         scratch[iidx] = 0.0;
220     }
221
222     /* Start outer loop over neighborlists */
223     for(iidx=0; iidx<nri; iidx++)
224     {
225         /* Load shift vector for this list */
226         i_shift_offset   = DIM*shiftidx[iidx];
227
228         /* Load limits for loop over neighbors */
229         j_index_start    = jindex[iidx];
230         j_index_end      = jindex[iidx+1];
231
232         /* Get outer coordinate index */
233         inr              = iinr[iidx];
234         i_coord_offset   = DIM*inr;
235
236         /* Load i particle coords and add shift vector */
237         gmx_mm256_load_shift_and_3rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,
238                                                     &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2);
239
240         fix0             = _mm256_setzero_ps();
241         fiy0             = _mm256_setzero_ps();
242         fiz0             = _mm256_setzero_ps();
243         fix1             = _mm256_setzero_ps();
244         fiy1             = _mm256_setzero_ps();
245         fiz1             = _mm256_setzero_ps();
246         fix2             = _mm256_setzero_ps();
247         fiy2             = _mm256_setzero_ps();
248         fiz2             = _mm256_setzero_ps();
249
250         /* Reset potential sums */
251         velecsum         = _mm256_setzero_ps();
252         vvdwsum          = _mm256_setzero_ps();
253
254         /* Start inner kernel loop */
255         for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
256         {
257
258             /* Get j neighbor index, and coordinate index */
259             jnrA             = jjnr[jidx];
260             jnrB             = jjnr[jidx+1];
261             jnrC             = jjnr[jidx+2];
262             jnrD             = jjnr[jidx+3];
263             jnrE             = jjnr[jidx+4];
264             jnrF             = jjnr[jidx+5];
265             jnrG             = jjnr[jidx+6];
266             jnrH             = jjnr[jidx+7];
267             j_coord_offsetA  = DIM*jnrA;
268             j_coord_offsetB  = DIM*jnrB;
269             j_coord_offsetC  = DIM*jnrC;
270             j_coord_offsetD  = DIM*jnrD;
271             j_coord_offsetE  = DIM*jnrE;
272             j_coord_offsetF  = DIM*jnrF;
273             j_coord_offsetG  = DIM*jnrG;
274             j_coord_offsetH  = DIM*jnrH;
275
276             /* load j atom coordinates */
277             gmx_mm256_load_3rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
278                                                  x+j_coord_offsetC,x+j_coord_offsetD,
279                                                  x+j_coord_offsetE,x+j_coord_offsetF,
280                                                  x+j_coord_offsetG,x+j_coord_offsetH,
281                                               &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
282
283             /* Calculate displacement vector */
284             dx00             = _mm256_sub_ps(ix0,jx0);
285             dy00             = _mm256_sub_ps(iy0,jy0);
286             dz00             = _mm256_sub_ps(iz0,jz0);
287             dx01             = _mm256_sub_ps(ix0,jx1);
288             dy01             = _mm256_sub_ps(iy0,jy1);
289             dz01             = _mm256_sub_ps(iz0,jz1);
290             dx02             = _mm256_sub_ps(ix0,jx2);
291             dy02             = _mm256_sub_ps(iy0,jy2);
292             dz02             = _mm256_sub_ps(iz0,jz2);
293             dx10             = _mm256_sub_ps(ix1,jx0);
294             dy10             = _mm256_sub_ps(iy1,jy0);
295             dz10             = _mm256_sub_ps(iz1,jz0);
296             dx11             = _mm256_sub_ps(ix1,jx1);
297             dy11             = _mm256_sub_ps(iy1,jy1);
298             dz11             = _mm256_sub_ps(iz1,jz1);
299             dx12             = _mm256_sub_ps(ix1,jx2);
300             dy12             = _mm256_sub_ps(iy1,jy2);
301             dz12             = _mm256_sub_ps(iz1,jz2);
302             dx20             = _mm256_sub_ps(ix2,jx0);
303             dy20             = _mm256_sub_ps(iy2,jy0);
304             dz20             = _mm256_sub_ps(iz2,jz0);
305             dx21             = _mm256_sub_ps(ix2,jx1);
306             dy21             = _mm256_sub_ps(iy2,jy1);
307             dz21             = _mm256_sub_ps(iz2,jz1);
308             dx22             = _mm256_sub_ps(ix2,jx2);
309             dy22             = _mm256_sub_ps(iy2,jy2);
310             dz22             = _mm256_sub_ps(iz2,jz2);
311
312             /* Calculate squared distance and things based on it */
313             rsq00            = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
314             rsq01            = gmx_mm256_calc_rsq_ps(dx01,dy01,dz01);
315             rsq02            = gmx_mm256_calc_rsq_ps(dx02,dy02,dz02);
316             rsq10            = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
317             rsq11            = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
318             rsq12            = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
319             rsq20            = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
320             rsq21            = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
321             rsq22            = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
322
323             rinv00           = gmx_mm256_invsqrt_ps(rsq00);
324             rinv01           = gmx_mm256_invsqrt_ps(rsq01);
325             rinv02           = gmx_mm256_invsqrt_ps(rsq02);
326             rinv10           = gmx_mm256_invsqrt_ps(rsq10);
327             rinv11           = gmx_mm256_invsqrt_ps(rsq11);
328             rinv12           = gmx_mm256_invsqrt_ps(rsq12);
329             rinv20           = gmx_mm256_invsqrt_ps(rsq20);
330             rinv21           = gmx_mm256_invsqrt_ps(rsq21);
331             rinv22           = gmx_mm256_invsqrt_ps(rsq22);
332
333             rinvsq00         = _mm256_mul_ps(rinv00,rinv00);
334             rinvsq01         = _mm256_mul_ps(rinv01,rinv01);
335             rinvsq02         = _mm256_mul_ps(rinv02,rinv02);
336             rinvsq10         = _mm256_mul_ps(rinv10,rinv10);
337             rinvsq11         = _mm256_mul_ps(rinv11,rinv11);
338             rinvsq12         = _mm256_mul_ps(rinv12,rinv12);
339             rinvsq20         = _mm256_mul_ps(rinv20,rinv20);
340             rinvsq21         = _mm256_mul_ps(rinv21,rinv21);
341             rinvsq22         = _mm256_mul_ps(rinv22,rinv22);
342
343             fjx0             = _mm256_setzero_ps();
344             fjy0             = _mm256_setzero_ps();
345             fjz0             = _mm256_setzero_ps();
346             fjx1             = _mm256_setzero_ps();
347             fjy1             = _mm256_setzero_ps();
348             fjz1             = _mm256_setzero_ps();
349             fjx2             = _mm256_setzero_ps();
350             fjy2             = _mm256_setzero_ps();
351             fjz2             = _mm256_setzero_ps();
352
353             /**************************
354              * CALCULATE INTERACTIONS *
355              **************************/
356
357             if (gmx_mm256_any_lt(rsq00,rcutoff2))
358             {
359
360             r00              = _mm256_mul_ps(rsq00,rinv00);
361
362             /* EWALD ELECTROSTATICS */
363             
364             /* Analytical PME correction */
365             zeta2            = _mm256_mul_ps(beta2,rsq00);
366             rinv3            = _mm256_mul_ps(rinvsq00,rinv00);
367             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
368             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
369             felec            = _mm256_mul_ps(qq00,felec);
370             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
371             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
372             velec            = _mm256_sub_ps(_mm256_sub_ps(rinv00,sh_ewald),pmecorrV);
373             velec            = _mm256_mul_ps(qq00,velec);
374             
375             /* Analytical LJ-PME */
376             rinvsix          = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
377             ewcljrsq         = _mm256_mul_ps(ewclj2,rsq00);
378             ewclj6           = _mm256_mul_ps(ewclj2,_mm256_mul_ps(ewclj2,ewclj2));
379             exponent         = gmx_simd_exp_r(ewcljrsq);
380             /* poly = exp(-(beta*r)^2) * (1 + (beta*r)^2 + (beta*r)^4 /2) */
381             poly             = _mm256_mul_ps(exponent,_mm256_add_ps(_mm256_sub_ps(one,ewcljrsq),_mm256_mul_ps(_mm256_mul_ps(ewcljrsq,ewcljrsq),one_half)));
382             /* vvdw6 = [C6 - C6grid * (1-poly)]/r6 */
383             vvdw6            = _mm256_mul_ps(_mm256_sub_ps(c6_00,_mm256_mul_ps(c6grid_00,_mm256_sub_ps(one,poly))),rinvsix);
384             vvdw12           = _mm256_mul_ps(c12_00,_mm256_mul_ps(rinvsix,rinvsix));
385             vvdw             = _mm256_sub_ps(_mm256_mul_ps( _mm256_sub_ps(vvdw12 , _mm256_mul_ps(c12_00,_mm256_mul_ps(sh_vdw_invrcut6,sh_vdw_invrcut6))), one_twelfth) ,
386                                           _mm256_mul_ps( _mm256_sub_ps(vvdw6,_mm256_add_ps(_mm256_mul_ps(c6_00,sh_vdw_invrcut6),_mm256_mul_ps(c6grid_00,sh_lj_ewald))),one_sixth));
387             /* fvdw = vvdw12/r - (vvdw6/r + (C6grid * exponent * beta^6)/r) */
388             fvdw             = _mm256_mul_ps(_mm256_sub_ps(vvdw12,_mm256_sub_ps(vvdw6,_mm256_mul_ps(_mm256_mul_ps(c6grid_00,one_sixth),_mm256_mul_ps(exponent,ewclj6)))),rinvsq00);
389
390             cutoff_mask      = _mm256_cmp_ps(rsq00,rcutoff2,_CMP_LT_OQ);
391
392             /* Update potential sum for this i atom from the interaction with this j atom. */
393             velec            = _mm256_and_ps(velec,cutoff_mask);
394             velecsum         = _mm256_add_ps(velecsum,velec);
395             vvdw             = _mm256_and_ps(vvdw,cutoff_mask);
396             vvdwsum          = _mm256_add_ps(vvdwsum,vvdw);
397
398             fscal            = _mm256_add_ps(felec,fvdw);
399
400             fscal            = _mm256_and_ps(fscal,cutoff_mask);
401
402             /* Calculate temporary vectorial force */
403             tx               = _mm256_mul_ps(fscal,dx00);
404             ty               = _mm256_mul_ps(fscal,dy00);
405             tz               = _mm256_mul_ps(fscal,dz00);
406
407             /* Update vectorial force */
408             fix0             = _mm256_add_ps(fix0,tx);
409             fiy0             = _mm256_add_ps(fiy0,ty);
410             fiz0             = _mm256_add_ps(fiz0,tz);
411
412             fjx0             = _mm256_add_ps(fjx0,tx);
413             fjy0             = _mm256_add_ps(fjy0,ty);
414             fjz0             = _mm256_add_ps(fjz0,tz);
415
416             }
417
418             /**************************
419              * CALCULATE INTERACTIONS *
420              **************************/
421
422             if (gmx_mm256_any_lt(rsq01,rcutoff2))
423             {
424
425             r01              = _mm256_mul_ps(rsq01,rinv01);
426
427             /* EWALD ELECTROSTATICS */
428             
429             /* Analytical PME correction */
430             zeta2            = _mm256_mul_ps(beta2,rsq01);
431             rinv3            = _mm256_mul_ps(rinvsq01,rinv01);
432             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
433             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
434             felec            = _mm256_mul_ps(qq01,felec);
435             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
436             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
437             velec            = _mm256_sub_ps(_mm256_sub_ps(rinv01,sh_ewald),pmecorrV);
438             velec            = _mm256_mul_ps(qq01,velec);
439             
440             cutoff_mask      = _mm256_cmp_ps(rsq01,rcutoff2,_CMP_LT_OQ);
441
442             /* Update potential sum for this i atom from the interaction with this j atom. */
443             velec            = _mm256_and_ps(velec,cutoff_mask);
444             velecsum         = _mm256_add_ps(velecsum,velec);
445
446             fscal            = felec;
447
448             fscal            = _mm256_and_ps(fscal,cutoff_mask);
449
450             /* Calculate temporary vectorial force */
451             tx               = _mm256_mul_ps(fscal,dx01);
452             ty               = _mm256_mul_ps(fscal,dy01);
453             tz               = _mm256_mul_ps(fscal,dz01);
454
455             /* Update vectorial force */
456             fix0             = _mm256_add_ps(fix0,tx);
457             fiy0             = _mm256_add_ps(fiy0,ty);
458             fiz0             = _mm256_add_ps(fiz0,tz);
459
460             fjx1             = _mm256_add_ps(fjx1,tx);
461             fjy1             = _mm256_add_ps(fjy1,ty);
462             fjz1             = _mm256_add_ps(fjz1,tz);
463
464             }
465
466             /**************************
467              * CALCULATE INTERACTIONS *
468              **************************/
469
470             if (gmx_mm256_any_lt(rsq02,rcutoff2))
471             {
472
473             r02              = _mm256_mul_ps(rsq02,rinv02);
474
475             /* EWALD ELECTROSTATICS */
476             
477             /* Analytical PME correction */
478             zeta2            = _mm256_mul_ps(beta2,rsq02);
479             rinv3            = _mm256_mul_ps(rinvsq02,rinv02);
480             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
481             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
482             felec            = _mm256_mul_ps(qq02,felec);
483             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
484             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
485             velec            = _mm256_sub_ps(_mm256_sub_ps(rinv02,sh_ewald),pmecorrV);
486             velec            = _mm256_mul_ps(qq02,velec);
487             
488             cutoff_mask      = _mm256_cmp_ps(rsq02,rcutoff2,_CMP_LT_OQ);
489
490             /* Update potential sum for this i atom from the interaction with this j atom. */
491             velec            = _mm256_and_ps(velec,cutoff_mask);
492             velecsum         = _mm256_add_ps(velecsum,velec);
493
494             fscal            = felec;
495
496             fscal            = _mm256_and_ps(fscal,cutoff_mask);
497
498             /* Calculate temporary vectorial force */
499             tx               = _mm256_mul_ps(fscal,dx02);
500             ty               = _mm256_mul_ps(fscal,dy02);
501             tz               = _mm256_mul_ps(fscal,dz02);
502
503             /* Update vectorial force */
504             fix0             = _mm256_add_ps(fix0,tx);
505             fiy0             = _mm256_add_ps(fiy0,ty);
506             fiz0             = _mm256_add_ps(fiz0,tz);
507
508             fjx2             = _mm256_add_ps(fjx2,tx);
509             fjy2             = _mm256_add_ps(fjy2,ty);
510             fjz2             = _mm256_add_ps(fjz2,tz);
511
512             }
513
514             /**************************
515              * CALCULATE INTERACTIONS *
516              **************************/
517
518             if (gmx_mm256_any_lt(rsq10,rcutoff2))
519             {
520
521             r10              = _mm256_mul_ps(rsq10,rinv10);
522
523             /* EWALD ELECTROSTATICS */
524             
525             /* Analytical PME correction */
526             zeta2            = _mm256_mul_ps(beta2,rsq10);
527             rinv3            = _mm256_mul_ps(rinvsq10,rinv10);
528             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
529             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
530             felec            = _mm256_mul_ps(qq10,felec);
531             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
532             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
533             velec            = _mm256_sub_ps(_mm256_sub_ps(rinv10,sh_ewald),pmecorrV);
534             velec            = _mm256_mul_ps(qq10,velec);
535             
536             cutoff_mask      = _mm256_cmp_ps(rsq10,rcutoff2,_CMP_LT_OQ);
537
538             /* Update potential sum for this i atom from the interaction with this j atom. */
539             velec            = _mm256_and_ps(velec,cutoff_mask);
540             velecsum         = _mm256_add_ps(velecsum,velec);
541
542             fscal            = felec;
543
544             fscal            = _mm256_and_ps(fscal,cutoff_mask);
545
546             /* Calculate temporary vectorial force */
547             tx               = _mm256_mul_ps(fscal,dx10);
548             ty               = _mm256_mul_ps(fscal,dy10);
549             tz               = _mm256_mul_ps(fscal,dz10);
550
551             /* Update vectorial force */
552             fix1             = _mm256_add_ps(fix1,tx);
553             fiy1             = _mm256_add_ps(fiy1,ty);
554             fiz1             = _mm256_add_ps(fiz1,tz);
555
556             fjx0             = _mm256_add_ps(fjx0,tx);
557             fjy0             = _mm256_add_ps(fjy0,ty);
558             fjz0             = _mm256_add_ps(fjz0,tz);
559
560             }
561
562             /**************************
563              * CALCULATE INTERACTIONS *
564              **************************/
565
566             if (gmx_mm256_any_lt(rsq11,rcutoff2))
567             {
568
569             r11              = _mm256_mul_ps(rsq11,rinv11);
570
571             /* EWALD ELECTROSTATICS */
572             
573             /* Analytical PME correction */
574             zeta2            = _mm256_mul_ps(beta2,rsq11);
575             rinv3            = _mm256_mul_ps(rinvsq11,rinv11);
576             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
577             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
578             felec            = _mm256_mul_ps(qq11,felec);
579             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
580             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
581             velec            = _mm256_sub_ps(_mm256_sub_ps(rinv11,sh_ewald),pmecorrV);
582             velec            = _mm256_mul_ps(qq11,velec);
583             
584             cutoff_mask      = _mm256_cmp_ps(rsq11,rcutoff2,_CMP_LT_OQ);
585
586             /* Update potential sum for this i atom from the interaction with this j atom. */
587             velec            = _mm256_and_ps(velec,cutoff_mask);
588             velecsum         = _mm256_add_ps(velecsum,velec);
589
590             fscal            = felec;
591
592             fscal            = _mm256_and_ps(fscal,cutoff_mask);
593
594             /* Calculate temporary vectorial force */
595             tx               = _mm256_mul_ps(fscal,dx11);
596             ty               = _mm256_mul_ps(fscal,dy11);
597             tz               = _mm256_mul_ps(fscal,dz11);
598
599             /* Update vectorial force */
600             fix1             = _mm256_add_ps(fix1,tx);
601             fiy1             = _mm256_add_ps(fiy1,ty);
602             fiz1             = _mm256_add_ps(fiz1,tz);
603
604             fjx1             = _mm256_add_ps(fjx1,tx);
605             fjy1             = _mm256_add_ps(fjy1,ty);
606             fjz1             = _mm256_add_ps(fjz1,tz);
607
608             }
609
610             /**************************
611              * CALCULATE INTERACTIONS *
612              **************************/
613
614             if (gmx_mm256_any_lt(rsq12,rcutoff2))
615             {
616
617             r12              = _mm256_mul_ps(rsq12,rinv12);
618
619             /* EWALD ELECTROSTATICS */
620             
621             /* Analytical PME correction */
622             zeta2            = _mm256_mul_ps(beta2,rsq12);
623             rinv3            = _mm256_mul_ps(rinvsq12,rinv12);
624             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
625             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
626             felec            = _mm256_mul_ps(qq12,felec);
627             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
628             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
629             velec            = _mm256_sub_ps(_mm256_sub_ps(rinv12,sh_ewald),pmecorrV);
630             velec            = _mm256_mul_ps(qq12,velec);
631             
632             cutoff_mask      = _mm256_cmp_ps(rsq12,rcutoff2,_CMP_LT_OQ);
633
634             /* Update potential sum for this i atom from the interaction with this j atom. */
635             velec            = _mm256_and_ps(velec,cutoff_mask);
636             velecsum         = _mm256_add_ps(velecsum,velec);
637
638             fscal            = felec;
639
640             fscal            = _mm256_and_ps(fscal,cutoff_mask);
641
642             /* Calculate temporary vectorial force */
643             tx               = _mm256_mul_ps(fscal,dx12);
644             ty               = _mm256_mul_ps(fscal,dy12);
645             tz               = _mm256_mul_ps(fscal,dz12);
646
647             /* Update vectorial force */
648             fix1             = _mm256_add_ps(fix1,tx);
649             fiy1             = _mm256_add_ps(fiy1,ty);
650             fiz1             = _mm256_add_ps(fiz1,tz);
651
652             fjx2             = _mm256_add_ps(fjx2,tx);
653             fjy2             = _mm256_add_ps(fjy2,ty);
654             fjz2             = _mm256_add_ps(fjz2,tz);
655
656             }
657
658             /**************************
659              * CALCULATE INTERACTIONS *
660              **************************/
661
662             if (gmx_mm256_any_lt(rsq20,rcutoff2))
663             {
664
665             r20              = _mm256_mul_ps(rsq20,rinv20);
666
667             /* EWALD ELECTROSTATICS */
668             
669             /* Analytical PME correction */
670             zeta2            = _mm256_mul_ps(beta2,rsq20);
671             rinv3            = _mm256_mul_ps(rinvsq20,rinv20);
672             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
673             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
674             felec            = _mm256_mul_ps(qq20,felec);
675             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
676             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
677             velec            = _mm256_sub_ps(_mm256_sub_ps(rinv20,sh_ewald),pmecorrV);
678             velec            = _mm256_mul_ps(qq20,velec);
679             
680             cutoff_mask      = _mm256_cmp_ps(rsq20,rcutoff2,_CMP_LT_OQ);
681
682             /* Update potential sum for this i atom from the interaction with this j atom. */
683             velec            = _mm256_and_ps(velec,cutoff_mask);
684             velecsum         = _mm256_add_ps(velecsum,velec);
685
686             fscal            = felec;
687
688             fscal            = _mm256_and_ps(fscal,cutoff_mask);
689
690             /* Calculate temporary vectorial force */
691             tx               = _mm256_mul_ps(fscal,dx20);
692             ty               = _mm256_mul_ps(fscal,dy20);
693             tz               = _mm256_mul_ps(fscal,dz20);
694
695             /* Update vectorial force */
696             fix2             = _mm256_add_ps(fix2,tx);
697             fiy2             = _mm256_add_ps(fiy2,ty);
698             fiz2             = _mm256_add_ps(fiz2,tz);
699
700             fjx0             = _mm256_add_ps(fjx0,tx);
701             fjy0             = _mm256_add_ps(fjy0,ty);
702             fjz0             = _mm256_add_ps(fjz0,tz);
703
704             }
705
706             /**************************
707              * CALCULATE INTERACTIONS *
708              **************************/
709
710             if (gmx_mm256_any_lt(rsq21,rcutoff2))
711             {
712
713             r21              = _mm256_mul_ps(rsq21,rinv21);
714
715             /* EWALD ELECTROSTATICS */
716             
717             /* Analytical PME correction */
718             zeta2            = _mm256_mul_ps(beta2,rsq21);
719             rinv3            = _mm256_mul_ps(rinvsq21,rinv21);
720             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
721             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
722             felec            = _mm256_mul_ps(qq21,felec);
723             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
724             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
725             velec            = _mm256_sub_ps(_mm256_sub_ps(rinv21,sh_ewald),pmecorrV);
726             velec            = _mm256_mul_ps(qq21,velec);
727             
728             cutoff_mask      = _mm256_cmp_ps(rsq21,rcutoff2,_CMP_LT_OQ);
729
730             /* Update potential sum for this i atom from the interaction with this j atom. */
731             velec            = _mm256_and_ps(velec,cutoff_mask);
732             velecsum         = _mm256_add_ps(velecsum,velec);
733
734             fscal            = felec;
735
736             fscal            = _mm256_and_ps(fscal,cutoff_mask);
737
738             /* Calculate temporary vectorial force */
739             tx               = _mm256_mul_ps(fscal,dx21);
740             ty               = _mm256_mul_ps(fscal,dy21);
741             tz               = _mm256_mul_ps(fscal,dz21);
742
743             /* Update vectorial force */
744             fix2             = _mm256_add_ps(fix2,tx);
745             fiy2             = _mm256_add_ps(fiy2,ty);
746             fiz2             = _mm256_add_ps(fiz2,tz);
747
748             fjx1             = _mm256_add_ps(fjx1,tx);
749             fjy1             = _mm256_add_ps(fjy1,ty);
750             fjz1             = _mm256_add_ps(fjz1,tz);
751
752             }
753
754             /**************************
755              * CALCULATE INTERACTIONS *
756              **************************/
757
758             if (gmx_mm256_any_lt(rsq22,rcutoff2))
759             {
760
761             r22              = _mm256_mul_ps(rsq22,rinv22);
762
763             /* EWALD ELECTROSTATICS */
764             
765             /* Analytical PME correction */
766             zeta2            = _mm256_mul_ps(beta2,rsq22);
767             rinv3            = _mm256_mul_ps(rinvsq22,rinv22);
768             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
769             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
770             felec            = _mm256_mul_ps(qq22,felec);
771             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
772             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
773             velec            = _mm256_sub_ps(_mm256_sub_ps(rinv22,sh_ewald),pmecorrV);
774             velec            = _mm256_mul_ps(qq22,velec);
775             
776             cutoff_mask      = _mm256_cmp_ps(rsq22,rcutoff2,_CMP_LT_OQ);
777
778             /* Update potential sum for this i atom from the interaction with this j atom. */
779             velec            = _mm256_and_ps(velec,cutoff_mask);
780             velecsum         = _mm256_add_ps(velecsum,velec);
781
782             fscal            = felec;
783
784             fscal            = _mm256_and_ps(fscal,cutoff_mask);
785
786             /* Calculate temporary vectorial force */
787             tx               = _mm256_mul_ps(fscal,dx22);
788             ty               = _mm256_mul_ps(fscal,dy22);
789             tz               = _mm256_mul_ps(fscal,dz22);
790
791             /* Update vectorial force */
792             fix2             = _mm256_add_ps(fix2,tx);
793             fiy2             = _mm256_add_ps(fiy2,ty);
794             fiz2             = _mm256_add_ps(fiz2,tz);
795
796             fjx2             = _mm256_add_ps(fjx2,tx);
797             fjy2             = _mm256_add_ps(fjy2,ty);
798             fjz2             = _mm256_add_ps(fjz2,tz);
799
800             }
801
802             fjptrA             = f+j_coord_offsetA;
803             fjptrB             = f+j_coord_offsetB;
804             fjptrC             = f+j_coord_offsetC;
805             fjptrD             = f+j_coord_offsetD;
806             fjptrE             = f+j_coord_offsetE;
807             fjptrF             = f+j_coord_offsetF;
808             fjptrG             = f+j_coord_offsetG;
809             fjptrH             = f+j_coord_offsetH;
810
811             gmx_mm256_decrement_3rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,
812                                                       fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
813
814             /* Inner loop uses 1017 flops */
815         }
816
817         if(jidx<j_index_end)
818         {
819
820             /* Get j neighbor index, and coordinate index */
821             jnrlistA         = jjnr[jidx];
822             jnrlistB         = jjnr[jidx+1];
823             jnrlistC         = jjnr[jidx+2];
824             jnrlistD         = jjnr[jidx+3];
825             jnrlistE         = jjnr[jidx+4];
826             jnrlistF         = jjnr[jidx+5];
827             jnrlistG         = jjnr[jidx+6];
828             jnrlistH         = jjnr[jidx+7];
829             /* Sign of each element will be negative for non-real atoms.
830              * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
831              * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
832              */
833             dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
834                                             gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
835                                             
836             jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
837             jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
838             jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
839             jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
840             jnrE       = (jnrlistE>=0) ? jnrlistE : 0;
841             jnrF       = (jnrlistF>=0) ? jnrlistF : 0;
842             jnrG       = (jnrlistG>=0) ? jnrlistG : 0;
843             jnrH       = (jnrlistH>=0) ? jnrlistH : 0;
844             j_coord_offsetA  = DIM*jnrA;
845             j_coord_offsetB  = DIM*jnrB;
846             j_coord_offsetC  = DIM*jnrC;
847             j_coord_offsetD  = DIM*jnrD;
848             j_coord_offsetE  = DIM*jnrE;
849             j_coord_offsetF  = DIM*jnrF;
850             j_coord_offsetG  = DIM*jnrG;
851             j_coord_offsetH  = DIM*jnrH;
852
853             /* load j atom coordinates */
854             gmx_mm256_load_3rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
855                                                  x+j_coord_offsetC,x+j_coord_offsetD,
856                                                  x+j_coord_offsetE,x+j_coord_offsetF,
857                                                  x+j_coord_offsetG,x+j_coord_offsetH,
858                                               &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
859
860             /* Calculate displacement vector */
861             dx00             = _mm256_sub_ps(ix0,jx0);
862             dy00             = _mm256_sub_ps(iy0,jy0);
863             dz00             = _mm256_sub_ps(iz0,jz0);
864             dx01             = _mm256_sub_ps(ix0,jx1);
865             dy01             = _mm256_sub_ps(iy0,jy1);
866             dz01             = _mm256_sub_ps(iz0,jz1);
867             dx02             = _mm256_sub_ps(ix0,jx2);
868             dy02             = _mm256_sub_ps(iy0,jy2);
869             dz02             = _mm256_sub_ps(iz0,jz2);
870             dx10             = _mm256_sub_ps(ix1,jx0);
871             dy10             = _mm256_sub_ps(iy1,jy0);
872             dz10             = _mm256_sub_ps(iz1,jz0);
873             dx11             = _mm256_sub_ps(ix1,jx1);
874             dy11             = _mm256_sub_ps(iy1,jy1);
875             dz11             = _mm256_sub_ps(iz1,jz1);
876             dx12             = _mm256_sub_ps(ix1,jx2);
877             dy12             = _mm256_sub_ps(iy1,jy2);
878             dz12             = _mm256_sub_ps(iz1,jz2);
879             dx20             = _mm256_sub_ps(ix2,jx0);
880             dy20             = _mm256_sub_ps(iy2,jy0);
881             dz20             = _mm256_sub_ps(iz2,jz0);
882             dx21             = _mm256_sub_ps(ix2,jx1);
883             dy21             = _mm256_sub_ps(iy2,jy1);
884             dz21             = _mm256_sub_ps(iz2,jz1);
885             dx22             = _mm256_sub_ps(ix2,jx2);
886             dy22             = _mm256_sub_ps(iy2,jy2);
887             dz22             = _mm256_sub_ps(iz2,jz2);
888
889             /* Calculate squared distance and things based on it */
890             rsq00            = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
891             rsq01            = gmx_mm256_calc_rsq_ps(dx01,dy01,dz01);
892             rsq02            = gmx_mm256_calc_rsq_ps(dx02,dy02,dz02);
893             rsq10            = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
894             rsq11            = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
895             rsq12            = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
896             rsq20            = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
897             rsq21            = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
898             rsq22            = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
899
900             rinv00           = gmx_mm256_invsqrt_ps(rsq00);
901             rinv01           = gmx_mm256_invsqrt_ps(rsq01);
902             rinv02           = gmx_mm256_invsqrt_ps(rsq02);
903             rinv10           = gmx_mm256_invsqrt_ps(rsq10);
904             rinv11           = gmx_mm256_invsqrt_ps(rsq11);
905             rinv12           = gmx_mm256_invsqrt_ps(rsq12);
906             rinv20           = gmx_mm256_invsqrt_ps(rsq20);
907             rinv21           = gmx_mm256_invsqrt_ps(rsq21);
908             rinv22           = gmx_mm256_invsqrt_ps(rsq22);
909
910             rinvsq00         = _mm256_mul_ps(rinv00,rinv00);
911             rinvsq01         = _mm256_mul_ps(rinv01,rinv01);
912             rinvsq02         = _mm256_mul_ps(rinv02,rinv02);
913             rinvsq10         = _mm256_mul_ps(rinv10,rinv10);
914             rinvsq11         = _mm256_mul_ps(rinv11,rinv11);
915             rinvsq12         = _mm256_mul_ps(rinv12,rinv12);
916             rinvsq20         = _mm256_mul_ps(rinv20,rinv20);
917             rinvsq21         = _mm256_mul_ps(rinv21,rinv21);
918             rinvsq22         = _mm256_mul_ps(rinv22,rinv22);
919
920             fjx0             = _mm256_setzero_ps();
921             fjy0             = _mm256_setzero_ps();
922             fjz0             = _mm256_setzero_ps();
923             fjx1             = _mm256_setzero_ps();
924             fjy1             = _mm256_setzero_ps();
925             fjz1             = _mm256_setzero_ps();
926             fjx2             = _mm256_setzero_ps();
927             fjy2             = _mm256_setzero_ps();
928             fjz2             = _mm256_setzero_ps();
929
930             /**************************
931              * CALCULATE INTERACTIONS *
932              **************************/
933
934             if (gmx_mm256_any_lt(rsq00,rcutoff2))
935             {
936
937             r00              = _mm256_mul_ps(rsq00,rinv00);
938             r00              = _mm256_andnot_ps(dummy_mask,r00);
939
940             /* EWALD ELECTROSTATICS */
941             
942             /* Analytical PME correction */
943             zeta2            = _mm256_mul_ps(beta2,rsq00);
944             rinv3            = _mm256_mul_ps(rinvsq00,rinv00);
945             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
946             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
947             felec            = _mm256_mul_ps(qq00,felec);
948             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
949             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
950             velec            = _mm256_sub_ps(_mm256_sub_ps(rinv00,sh_ewald),pmecorrV);
951             velec            = _mm256_mul_ps(qq00,velec);
952             
953             /* Analytical LJ-PME */
954             rinvsix          = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
955             ewcljrsq         = _mm256_mul_ps(ewclj2,rsq00);
956             ewclj6           = _mm256_mul_ps(ewclj2,_mm256_mul_ps(ewclj2,ewclj2));
957             exponent         = gmx_simd_exp_r(ewcljrsq);
958             /* poly = exp(-(beta*r)^2) * (1 + (beta*r)^2 + (beta*r)^4 /2) */
959             poly             = _mm256_mul_ps(exponent,_mm256_add_ps(_mm256_sub_ps(one,ewcljrsq),_mm256_mul_ps(_mm256_mul_ps(ewcljrsq,ewcljrsq),one_half)));
960             /* vvdw6 = [C6 - C6grid * (1-poly)]/r6 */
961             vvdw6            = _mm256_mul_ps(_mm256_sub_ps(c6_00,_mm256_mul_ps(c6grid_00,_mm256_sub_ps(one,poly))),rinvsix);
962             vvdw12           = _mm256_mul_ps(c12_00,_mm256_mul_ps(rinvsix,rinvsix));
963             vvdw             = _mm256_sub_ps(_mm256_mul_ps( _mm256_sub_ps(vvdw12 , _mm256_mul_ps(c12_00,_mm256_mul_ps(sh_vdw_invrcut6,sh_vdw_invrcut6))), one_twelfth) ,
964                                           _mm256_mul_ps( _mm256_sub_ps(vvdw6,_mm256_add_ps(_mm256_mul_ps(c6_00,sh_vdw_invrcut6),_mm256_mul_ps(c6grid_00,sh_lj_ewald))),one_sixth));
965             /* fvdw = vvdw12/r - (vvdw6/r + (C6grid * exponent * beta^6)/r) */
966             fvdw             = _mm256_mul_ps(_mm256_sub_ps(vvdw12,_mm256_sub_ps(vvdw6,_mm256_mul_ps(_mm256_mul_ps(c6grid_00,one_sixth),_mm256_mul_ps(exponent,ewclj6)))),rinvsq00);
967
968             cutoff_mask      = _mm256_cmp_ps(rsq00,rcutoff2,_CMP_LT_OQ);
969
970             /* Update potential sum for this i atom from the interaction with this j atom. */
971             velec            = _mm256_and_ps(velec,cutoff_mask);
972             velec            = _mm256_andnot_ps(dummy_mask,velec);
973             velecsum         = _mm256_add_ps(velecsum,velec);
974             vvdw             = _mm256_and_ps(vvdw,cutoff_mask);
975             vvdw             = _mm256_andnot_ps(dummy_mask,vvdw);
976             vvdwsum          = _mm256_add_ps(vvdwsum,vvdw);
977
978             fscal            = _mm256_add_ps(felec,fvdw);
979
980             fscal            = _mm256_and_ps(fscal,cutoff_mask);
981
982             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
983
984             /* Calculate temporary vectorial force */
985             tx               = _mm256_mul_ps(fscal,dx00);
986             ty               = _mm256_mul_ps(fscal,dy00);
987             tz               = _mm256_mul_ps(fscal,dz00);
988
989             /* Update vectorial force */
990             fix0             = _mm256_add_ps(fix0,tx);
991             fiy0             = _mm256_add_ps(fiy0,ty);
992             fiz0             = _mm256_add_ps(fiz0,tz);
993
994             fjx0             = _mm256_add_ps(fjx0,tx);
995             fjy0             = _mm256_add_ps(fjy0,ty);
996             fjz0             = _mm256_add_ps(fjz0,tz);
997
998             }
999
1000             /**************************
1001              * CALCULATE INTERACTIONS *
1002              **************************/
1003
1004             if (gmx_mm256_any_lt(rsq01,rcutoff2))
1005             {
1006
1007             r01              = _mm256_mul_ps(rsq01,rinv01);
1008             r01              = _mm256_andnot_ps(dummy_mask,r01);
1009
1010             /* EWALD ELECTROSTATICS */
1011             
1012             /* Analytical PME correction */
1013             zeta2            = _mm256_mul_ps(beta2,rsq01);
1014             rinv3            = _mm256_mul_ps(rinvsq01,rinv01);
1015             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
1016             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1017             felec            = _mm256_mul_ps(qq01,felec);
1018             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
1019             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
1020             velec            = _mm256_sub_ps(_mm256_sub_ps(rinv01,sh_ewald),pmecorrV);
1021             velec            = _mm256_mul_ps(qq01,velec);
1022             
1023             cutoff_mask      = _mm256_cmp_ps(rsq01,rcutoff2,_CMP_LT_OQ);
1024
1025             /* Update potential sum for this i atom from the interaction with this j atom. */
1026             velec            = _mm256_and_ps(velec,cutoff_mask);
1027             velec            = _mm256_andnot_ps(dummy_mask,velec);
1028             velecsum         = _mm256_add_ps(velecsum,velec);
1029
1030             fscal            = felec;
1031
1032             fscal            = _mm256_and_ps(fscal,cutoff_mask);
1033
1034             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
1035
1036             /* Calculate temporary vectorial force */
1037             tx               = _mm256_mul_ps(fscal,dx01);
1038             ty               = _mm256_mul_ps(fscal,dy01);
1039             tz               = _mm256_mul_ps(fscal,dz01);
1040
1041             /* Update vectorial force */
1042             fix0             = _mm256_add_ps(fix0,tx);
1043             fiy0             = _mm256_add_ps(fiy0,ty);
1044             fiz0             = _mm256_add_ps(fiz0,tz);
1045
1046             fjx1             = _mm256_add_ps(fjx1,tx);
1047             fjy1             = _mm256_add_ps(fjy1,ty);
1048             fjz1             = _mm256_add_ps(fjz1,tz);
1049
1050             }
1051
1052             /**************************
1053              * CALCULATE INTERACTIONS *
1054              **************************/
1055
1056             if (gmx_mm256_any_lt(rsq02,rcutoff2))
1057             {
1058
1059             r02              = _mm256_mul_ps(rsq02,rinv02);
1060             r02              = _mm256_andnot_ps(dummy_mask,r02);
1061
1062             /* EWALD ELECTROSTATICS */
1063             
1064             /* Analytical PME correction */
1065             zeta2            = _mm256_mul_ps(beta2,rsq02);
1066             rinv3            = _mm256_mul_ps(rinvsq02,rinv02);
1067             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
1068             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1069             felec            = _mm256_mul_ps(qq02,felec);
1070             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
1071             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
1072             velec            = _mm256_sub_ps(_mm256_sub_ps(rinv02,sh_ewald),pmecorrV);
1073             velec            = _mm256_mul_ps(qq02,velec);
1074             
1075             cutoff_mask      = _mm256_cmp_ps(rsq02,rcutoff2,_CMP_LT_OQ);
1076
1077             /* Update potential sum for this i atom from the interaction with this j atom. */
1078             velec            = _mm256_and_ps(velec,cutoff_mask);
1079             velec            = _mm256_andnot_ps(dummy_mask,velec);
1080             velecsum         = _mm256_add_ps(velecsum,velec);
1081
1082             fscal            = felec;
1083
1084             fscal            = _mm256_and_ps(fscal,cutoff_mask);
1085
1086             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
1087
1088             /* Calculate temporary vectorial force */
1089             tx               = _mm256_mul_ps(fscal,dx02);
1090             ty               = _mm256_mul_ps(fscal,dy02);
1091             tz               = _mm256_mul_ps(fscal,dz02);
1092
1093             /* Update vectorial force */
1094             fix0             = _mm256_add_ps(fix0,tx);
1095             fiy0             = _mm256_add_ps(fiy0,ty);
1096             fiz0             = _mm256_add_ps(fiz0,tz);
1097
1098             fjx2             = _mm256_add_ps(fjx2,tx);
1099             fjy2             = _mm256_add_ps(fjy2,ty);
1100             fjz2             = _mm256_add_ps(fjz2,tz);
1101
1102             }
1103
1104             /**************************
1105              * CALCULATE INTERACTIONS *
1106              **************************/
1107
1108             if (gmx_mm256_any_lt(rsq10,rcutoff2))
1109             {
1110
1111             r10              = _mm256_mul_ps(rsq10,rinv10);
1112             r10              = _mm256_andnot_ps(dummy_mask,r10);
1113
1114             /* EWALD ELECTROSTATICS */
1115             
1116             /* Analytical PME correction */
1117             zeta2            = _mm256_mul_ps(beta2,rsq10);
1118             rinv3            = _mm256_mul_ps(rinvsq10,rinv10);
1119             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
1120             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1121             felec            = _mm256_mul_ps(qq10,felec);
1122             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
1123             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
1124             velec            = _mm256_sub_ps(_mm256_sub_ps(rinv10,sh_ewald),pmecorrV);
1125             velec            = _mm256_mul_ps(qq10,velec);
1126             
1127             cutoff_mask      = _mm256_cmp_ps(rsq10,rcutoff2,_CMP_LT_OQ);
1128
1129             /* Update potential sum for this i atom from the interaction with this j atom. */
1130             velec            = _mm256_and_ps(velec,cutoff_mask);
1131             velec            = _mm256_andnot_ps(dummy_mask,velec);
1132             velecsum         = _mm256_add_ps(velecsum,velec);
1133
1134             fscal            = felec;
1135
1136             fscal            = _mm256_and_ps(fscal,cutoff_mask);
1137
1138             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
1139
1140             /* Calculate temporary vectorial force */
1141             tx               = _mm256_mul_ps(fscal,dx10);
1142             ty               = _mm256_mul_ps(fscal,dy10);
1143             tz               = _mm256_mul_ps(fscal,dz10);
1144
1145             /* Update vectorial force */
1146             fix1             = _mm256_add_ps(fix1,tx);
1147             fiy1             = _mm256_add_ps(fiy1,ty);
1148             fiz1             = _mm256_add_ps(fiz1,tz);
1149
1150             fjx0             = _mm256_add_ps(fjx0,tx);
1151             fjy0             = _mm256_add_ps(fjy0,ty);
1152             fjz0             = _mm256_add_ps(fjz0,tz);
1153
1154             }
1155
1156             /**************************
1157              * CALCULATE INTERACTIONS *
1158              **************************/
1159
1160             if (gmx_mm256_any_lt(rsq11,rcutoff2))
1161             {
1162
1163             r11              = _mm256_mul_ps(rsq11,rinv11);
1164             r11              = _mm256_andnot_ps(dummy_mask,r11);
1165
1166             /* EWALD ELECTROSTATICS */
1167             
1168             /* Analytical PME correction */
1169             zeta2            = _mm256_mul_ps(beta2,rsq11);
1170             rinv3            = _mm256_mul_ps(rinvsq11,rinv11);
1171             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
1172             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1173             felec            = _mm256_mul_ps(qq11,felec);
1174             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
1175             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
1176             velec            = _mm256_sub_ps(_mm256_sub_ps(rinv11,sh_ewald),pmecorrV);
1177             velec            = _mm256_mul_ps(qq11,velec);
1178             
1179             cutoff_mask      = _mm256_cmp_ps(rsq11,rcutoff2,_CMP_LT_OQ);
1180
1181             /* Update potential sum for this i atom from the interaction with this j atom. */
1182             velec            = _mm256_and_ps(velec,cutoff_mask);
1183             velec            = _mm256_andnot_ps(dummy_mask,velec);
1184             velecsum         = _mm256_add_ps(velecsum,velec);
1185
1186             fscal            = felec;
1187
1188             fscal            = _mm256_and_ps(fscal,cutoff_mask);
1189
1190             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
1191
1192             /* Calculate temporary vectorial force */
1193             tx               = _mm256_mul_ps(fscal,dx11);
1194             ty               = _mm256_mul_ps(fscal,dy11);
1195             tz               = _mm256_mul_ps(fscal,dz11);
1196
1197             /* Update vectorial force */
1198             fix1             = _mm256_add_ps(fix1,tx);
1199             fiy1             = _mm256_add_ps(fiy1,ty);
1200             fiz1             = _mm256_add_ps(fiz1,tz);
1201
1202             fjx1             = _mm256_add_ps(fjx1,tx);
1203             fjy1             = _mm256_add_ps(fjy1,ty);
1204             fjz1             = _mm256_add_ps(fjz1,tz);
1205
1206             }
1207
1208             /**************************
1209              * CALCULATE INTERACTIONS *
1210              **************************/
1211
1212             if (gmx_mm256_any_lt(rsq12,rcutoff2))
1213             {
1214
1215             r12              = _mm256_mul_ps(rsq12,rinv12);
1216             r12              = _mm256_andnot_ps(dummy_mask,r12);
1217
1218             /* EWALD ELECTROSTATICS */
1219             
1220             /* Analytical PME correction */
1221             zeta2            = _mm256_mul_ps(beta2,rsq12);
1222             rinv3            = _mm256_mul_ps(rinvsq12,rinv12);
1223             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
1224             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1225             felec            = _mm256_mul_ps(qq12,felec);
1226             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
1227             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
1228             velec            = _mm256_sub_ps(_mm256_sub_ps(rinv12,sh_ewald),pmecorrV);
1229             velec            = _mm256_mul_ps(qq12,velec);
1230             
1231             cutoff_mask      = _mm256_cmp_ps(rsq12,rcutoff2,_CMP_LT_OQ);
1232
1233             /* Update potential sum for this i atom from the interaction with this j atom. */
1234             velec            = _mm256_and_ps(velec,cutoff_mask);
1235             velec            = _mm256_andnot_ps(dummy_mask,velec);
1236             velecsum         = _mm256_add_ps(velecsum,velec);
1237
1238             fscal            = felec;
1239
1240             fscal            = _mm256_and_ps(fscal,cutoff_mask);
1241
1242             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
1243
1244             /* Calculate temporary vectorial force */
1245             tx               = _mm256_mul_ps(fscal,dx12);
1246             ty               = _mm256_mul_ps(fscal,dy12);
1247             tz               = _mm256_mul_ps(fscal,dz12);
1248
1249             /* Update vectorial force */
1250             fix1             = _mm256_add_ps(fix1,tx);
1251             fiy1             = _mm256_add_ps(fiy1,ty);
1252             fiz1             = _mm256_add_ps(fiz1,tz);
1253
1254             fjx2             = _mm256_add_ps(fjx2,tx);
1255             fjy2             = _mm256_add_ps(fjy2,ty);
1256             fjz2             = _mm256_add_ps(fjz2,tz);
1257
1258             }
1259
1260             /**************************
1261              * CALCULATE INTERACTIONS *
1262              **************************/
1263
1264             if (gmx_mm256_any_lt(rsq20,rcutoff2))
1265             {
1266
1267             r20              = _mm256_mul_ps(rsq20,rinv20);
1268             r20              = _mm256_andnot_ps(dummy_mask,r20);
1269
1270             /* EWALD ELECTROSTATICS */
1271             
1272             /* Analytical PME correction */
1273             zeta2            = _mm256_mul_ps(beta2,rsq20);
1274             rinv3            = _mm256_mul_ps(rinvsq20,rinv20);
1275             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
1276             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1277             felec            = _mm256_mul_ps(qq20,felec);
1278             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
1279             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
1280             velec            = _mm256_sub_ps(_mm256_sub_ps(rinv20,sh_ewald),pmecorrV);
1281             velec            = _mm256_mul_ps(qq20,velec);
1282             
1283             cutoff_mask      = _mm256_cmp_ps(rsq20,rcutoff2,_CMP_LT_OQ);
1284
1285             /* Update potential sum for this i atom from the interaction with this j atom. */
1286             velec            = _mm256_and_ps(velec,cutoff_mask);
1287             velec            = _mm256_andnot_ps(dummy_mask,velec);
1288             velecsum         = _mm256_add_ps(velecsum,velec);
1289
1290             fscal            = felec;
1291
1292             fscal            = _mm256_and_ps(fscal,cutoff_mask);
1293
1294             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
1295
1296             /* Calculate temporary vectorial force */
1297             tx               = _mm256_mul_ps(fscal,dx20);
1298             ty               = _mm256_mul_ps(fscal,dy20);
1299             tz               = _mm256_mul_ps(fscal,dz20);
1300
1301             /* Update vectorial force */
1302             fix2             = _mm256_add_ps(fix2,tx);
1303             fiy2             = _mm256_add_ps(fiy2,ty);
1304             fiz2             = _mm256_add_ps(fiz2,tz);
1305
1306             fjx0             = _mm256_add_ps(fjx0,tx);
1307             fjy0             = _mm256_add_ps(fjy0,ty);
1308             fjz0             = _mm256_add_ps(fjz0,tz);
1309
1310             }
1311
1312             /**************************
1313              * CALCULATE INTERACTIONS *
1314              **************************/
1315
1316             if (gmx_mm256_any_lt(rsq21,rcutoff2))
1317             {
1318
1319             r21              = _mm256_mul_ps(rsq21,rinv21);
1320             r21              = _mm256_andnot_ps(dummy_mask,r21);
1321
1322             /* EWALD ELECTROSTATICS */
1323             
1324             /* Analytical PME correction */
1325             zeta2            = _mm256_mul_ps(beta2,rsq21);
1326             rinv3            = _mm256_mul_ps(rinvsq21,rinv21);
1327             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
1328             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1329             felec            = _mm256_mul_ps(qq21,felec);
1330             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
1331             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
1332             velec            = _mm256_sub_ps(_mm256_sub_ps(rinv21,sh_ewald),pmecorrV);
1333             velec            = _mm256_mul_ps(qq21,velec);
1334             
1335             cutoff_mask      = _mm256_cmp_ps(rsq21,rcutoff2,_CMP_LT_OQ);
1336
1337             /* Update potential sum for this i atom from the interaction with this j atom. */
1338             velec            = _mm256_and_ps(velec,cutoff_mask);
1339             velec            = _mm256_andnot_ps(dummy_mask,velec);
1340             velecsum         = _mm256_add_ps(velecsum,velec);
1341
1342             fscal            = felec;
1343
1344             fscal            = _mm256_and_ps(fscal,cutoff_mask);
1345
1346             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
1347
1348             /* Calculate temporary vectorial force */
1349             tx               = _mm256_mul_ps(fscal,dx21);
1350             ty               = _mm256_mul_ps(fscal,dy21);
1351             tz               = _mm256_mul_ps(fscal,dz21);
1352
1353             /* Update vectorial force */
1354             fix2             = _mm256_add_ps(fix2,tx);
1355             fiy2             = _mm256_add_ps(fiy2,ty);
1356             fiz2             = _mm256_add_ps(fiz2,tz);
1357
1358             fjx1             = _mm256_add_ps(fjx1,tx);
1359             fjy1             = _mm256_add_ps(fjy1,ty);
1360             fjz1             = _mm256_add_ps(fjz1,tz);
1361
1362             }
1363
1364             /**************************
1365              * CALCULATE INTERACTIONS *
1366              **************************/
1367
1368             if (gmx_mm256_any_lt(rsq22,rcutoff2))
1369             {
1370
1371             r22              = _mm256_mul_ps(rsq22,rinv22);
1372             r22              = _mm256_andnot_ps(dummy_mask,r22);
1373
1374             /* EWALD ELECTROSTATICS */
1375             
1376             /* Analytical PME correction */
1377             zeta2            = _mm256_mul_ps(beta2,rsq22);
1378             rinv3            = _mm256_mul_ps(rinvsq22,rinv22);
1379             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
1380             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1381             felec            = _mm256_mul_ps(qq22,felec);
1382             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
1383             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
1384             velec            = _mm256_sub_ps(_mm256_sub_ps(rinv22,sh_ewald),pmecorrV);
1385             velec            = _mm256_mul_ps(qq22,velec);
1386             
1387             cutoff_mask      = _mm256_cmp_ps(rsq22,rcutoff2,_CMP_LT_OQ);
1388
1389             /* Update potential sum for this i atom from the interaction with this j atom. */
1390             velec            = _mm256_and_ps(velec,cutoff_mask);
1391             velec            = _mm256_andnot_ps(dummy_mask,velec);
1392             velecsum         = _mm256_add_ps(velecsum,velec);
1393
1394             fscal            = felec;
1395
1396             fscal            = _mm256_and_ps(fscal,cutoff_mask);
1397
1398             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
1399
1400             /* Calculate temporary vectorial force */
1401             tx               = _mm256_mul_ps(fscal,dx22);
1402             ty               = _mm256_mul_ps(fscal,dy22);
1403             tz               = _mm256_mul_ps(fscal,dz22);
1404
1405             /* Update vectorial force */
1406             fix2             = _mm256_add_ps(fix2,tx);
1407             fiy2             = _mm256_add_ps(fiy2,ty);
1408             fiz2             = _mm256_add_ps(fiz2,tz);
1409
1410             fjx2             = _mm256_add_ps(fjx2,tx);
1411             fjy2             = _mm256_add_ps(fjy2,ty);
1412             fjz2             = _mm256_add_ps(fjz2,tz);
1413
1414             }
1415
1416             fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
1417             fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
1418             fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
1419             fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
1420             fjptrE             = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
1421             fjptrF             = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
1422             fjptrG             = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
1423             fjptrH             = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
1424
1425             gmx_mm256_decrement_3rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,
1426                                                       fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
1427
1428             /* Inner loop uses 1026 flops */
1429         }
1430
1431         /* End of innermost loop */
1432
1433         gmx_mm256_update_iforce_3atom_swizzle_ps(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,
1434                                                  f+i_coord_offset,fshift+i_shift_offset);
1435
1436         ggid                        = gid[iidx];
1437         /* Update potential energies */
1438         gmx_mm256_update_1pot_ps(velecsum,kernel_data->energygrp_elec+ggid);
1439         gmx_mm256_update_1pot_ps(vvdwsum,kernel_data->energygrp_vdw+ggid);
1440
1441         /* Increment number of inner iterations */
1442         inneriter                  += j_index_end - j_index_start;
1443
1444         /* Outer loop uses 20 flops */
1445     }
1446
1447     /* Increment number of outer iterations */
1448     outeriter        += nri;
1449
1450     /* Update outer/inner flops */
1451
1452     inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W3W3_VF,outeriter*20 + inneriter*1026);
1453 }
1454 /*
1455  * Gromacs nonbonded kernel:   nb_kernel_ElecEwSh_VdwLJEwSh_GeomW3W3_F_avx_256_single
1456  * Electrostatics interaction: Ewald
1457  * VdW interaction:            LJEwald
1458  * Geometry:                   Water3-Water3
1459  * Calculate force/pot:        Force
1460  */
1461 void
1462 nb_kernel_ElecEwSh_VdwLJEwSh_GeomW3W3_F_avx_256_single
1463                     (t_nblist                    * gmx_restrict       nlist,
1464                      rvec                        * gmx_restrict          xx,
1465                      rvec                        * gmx_restrict          ff,
1466                      t_forcerec                  * gmx_restrict          fr,
1467                      t_mdatoms                   * gmx_restrict     mdatoms,
1468                      nb_kernel_data_t gmx_unused * gmx_restrict kernel_data,
1469                      t_nrnb                      * gmx_restrict        nrnb)
1470 {
1471     /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
1472      * just 0 for non-waters.
1473      * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
1474      * jnr indices corresponding to data put in the four positions in the SIMD register.
1475      */
1476     int              i_shift_offset,i_coord_offset,outeriter,inneriter;
1477     int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
1478     int              jnrA,jnrB,jnrC,jnrD;
1479     int              jnrE,jnrF,jnrG,jnrH;
1480     int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
1481     int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
1482     int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
1483     int              j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
1484     int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
1485     real             rcutoff_scalar;
1486     real             *shiftvec,*fshift,*x,*f;
1487     real             *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
1488     real             scratch[4*DIM];
1489     __m256           tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
1490     real *           vdwioffsetptr0;
1491     real *           vdwgridioffsetptr0;
1492     __m256           ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
1493     real *           vdwioffsetptr1;
1494     real *           vdwgridioffsetptr1;
1495     __m256           ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
1496     real *           vdwioffsetptr2;
1497     real *           vdwgridioffsetptr2;
1498     __m256           ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
1499     int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
1500     __m256           jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
1501     int              vdwjidx1A,vdwjidx1B,vdwjidx1C,vdwjidx1D,vdwjidx1E,vdwjidx1F,vdwjidx1G,vdwjidx1H;
1502     __m256           jx1,jy1,jz1,fjx1,fjy1,fjz1,jq1,isaj1;
1503     int              vdwjidx2A,vdwjidx2B,vdwjidx2C,vdwjidx2D,vdwjidx2E,vdwjidx2F,vdwjidx2G,vdwjidx2H;
1504     __m256           jx2,jy2,jz2,fjx2,fjy2,fjz2,jq2,isaj2;
1505     __m256           dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
1506     __m256           dx01,dy01,dz01,rsq01,rinv01,rinvsq01,r01,qq01,c6_01,c12_01;
1507     __m256           dx02,dy02,dz02,rsq02,rinv02,rinvsq02,r02,qq02,c6_02,c12_02;
1508     __m256           dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
1509     __m256           dx11,dy11,dz11,rsq11,rinv11,rinvsq11,r11,qq11,c6_11,c12_11;
1510     __m256           dx12,dy12,dz12,rsq12,rinv12,rinvsq12,r12,qq12,c6_12,c12_12;
1511     __m256           dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
1512     __m256           dx21,dy21,dz21,rsq21,rinv21,rinvsq21,r21,qq21,c6_21,c12_21;
1513     __m256           dx22,dy22,dz22,rsq22,rinv22,rinvsq22,r22,qq22,c6_22,c12_22;
1514     __m256           velec,felec,velecsum,facel,crf,krf,krf2;
1515     real             *charge;
1516     int              nvdwtype;
1517     __m256           rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
1518     int              *vdwtype;
1519     real             *vdwparam;
1520     __m256           one_sixth   = _mm256_set1_ps(1.0/6.0);
1521     __m256           one_twelfth = _mm256_set1_ps(1.0/12.0);
1522     __m256           c6grid_00;
1523     __m256           c6grid_01;
1524     __m256           c6grid_02;
1525     __m256           c6grid_10;
1526     __m256           c6grid_11;
1527     __m256           c6grid_12;
1528     __m256           c6grid_20;
1529     __m256           c6grid_21;
1530     __m256           c6grid_22;
1531     real             *vdwgridparam;
1532     __m256           ewclj,ewclj2,ewclj6,ewcljrsq,poly,exponent,f6A,f6B,sh_lj_ewald;
1533     __m256           one_half  = _mm256_set1_ps(0.5);
1534     __m256           minus_one = _mm256_set1_ps(-1.0);
1535     __m256i          ewitab;
1536     __m128i          ewitab_lo,ewitab_hi;
1537     __m256           ewtabscale,eweps,sh_ewald,ewrt,ewtabhalfspace,ewtabF,ewtabFn,ewtabD,ewtabV;
1538     __m256           beta,beta2,beta3,zeta2,pmecorrF,pmecorrV,rinv3;
1539     real             *ewtab;
1540     __m256           dummy_mask,cutoff_mask;
1541     __m256           signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
1542     __m256           one     = _mm256_set1_ps(1.0);
1543     __m256           two     = _mm256_set1_ps(2.0);
1544     x                = xx[0];
1545     f                = ff[0];
1546
1547     nri              = nlist->nri;
1548     iinr             = nlist->iinr;
1549     jindex           = nlist->jindex;
1550     jjnr             = nlist->jjnr;
1551     shiftidx         = nlist->shift;
1552     gid              = nlist->gid;
1553     shiftvec         = fr->shift_vec[0];
1554     fshift           = fr->fshift[0];
1555     facel            = _mm256_set1_ps(fr->epsfac);
1556     charge           = mdatoms->chargeA;
1557     nvdwtype         = fr->ntype;
1558     vdwparam         = fr->nbfp;
1559     vdwtype          = mdatoms->typeA;
1560     vdwgridparam     = fr->ljpme_c6grid;
1561     sh_lj_ewald      = _mm256_set1_ps(fr->ic->sh_lj_ewald);
1562     ewclj            = _mm256_set1_ps(fr->ewaldcoeff_lj);
1563     ewclj2           = _mm256_mul_ps(minus_one,_mm256_mul_ps(ewclj,ewclj));
1564
1565     sh_ewald         = _mm256_set1_ps(fr->ic->sh_ewald);
1566     beta             = _mm256_set1_ps(fr->ic->ewaldcoeff_q);
1567     beta2            = _mm256_mul_ps(beta,beta);
1568     beta3            = _mm256_mul_ps(beta,beta2);
1569
1570     ewtab            = fr->ic->tabq_coul_F;
1571     ewtabscale       = _mm256_set1_ps(fr->ic->tabq_scale);
1572     ewtabhalfspace   = _mm256_set1_ps(0.5/fr->ic->tabq_scale);
1573
1574     /* Setup water-specific parameters */
1575     inr              = nlist->iinr[0];
1576     iq0              = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+0]));
1577     iq1              = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+1]));
1578     iq2              = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+2]));
1579     vdwioffsetptr0   = vdwparam+2*nvdwtype*vdwtype[inr+0];
1580     vdwgridioffsetptr0 = vdwgridparam+2*nvdwtype*vdwtype[inr+0];
1581
1582     jq0              = _mm256_set1_ps(charge[inr+0]);
1583     jq1              = _mm256_set1_ps(charge[inr+1]);
1584     jq2              = _mm256_set1_ps(charge[inr+2]);
1585     vdwjidx0A        = 2*vdwtype[inr+0];
1586     qq00             = _mm256_mul_ps(iq0,jq0);
1587     c6_00            = _mm256_set1_ps(vdwioffsetptr0[vdwjidx0A]);
1588     c12_00           = _mm256_set1_ps(vdwioffsetptr0[vdwjidx0A+1]);
1589     c6grid_00        = _mm256_set1_ps(vdwgridioffsetptr0[vdwjidx0A]);
1590     qq01             = _mm256_mul_ps(iq0,jq1);
1591     qq02             = _mm256_mul_ps(iq0,jq2);
1592     qq10             = _mm256_mul_ps(iq1,jq0);
1593     qq11             = _mm256_mul_ps(iq1,jq1);
1594     qq12             = _mm256_mul_ps(iq1,jq2);
1595     qq20             = _mm256_mul_ps(iq2,jq0);
1596     qq21             = _mm256_mul_ps(iq2,jq1);
1597     qq22             = _mm256_mul_ps(iq2,jq2);
1598
1599     /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
1600     rcutoff_scalar   = fr->rcoulomb;
1601     rcutoff          = _mm256_set1_ps(rcutoff_scalar);
1602     rcutoff2         = _mm256_mul_ps(rcutoff,rcutoff);
1603
1604     sh_vdw_invrcut6  = _mm256_set1_ps(fr->ic->sh_invrc6);
1605     rvdw             = _mm256_set1_ps(fr->rvdw);
1606
1607     /* Avoid stupid compiler warnings */
1608     jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
1609     j_coord_offsetA = 0;
1610     j_coord_offsetB = 0;
1611     j_coord_offsetC = 0;
1612     j_coord_offsetD = 0;
1613     j_coord_offsetE = 0;
1614     j_coord_offsetF = 0;
1615     j_coord_offsetG = 0;
1616     j_coord_offsetH = 0;
1617
1618     outeriter        = 0;
1619     inneriter        = 0;
1620
1621     for(iidx=0;iidx<4*DIM;iidx++)
1622     {
1623         scratch[iidx] = 0.0;
1624     }
1625
1626     /* Start outer loop over neighborlists */
1627     for(iidx=0; iidx<nri; iidx++)
1628     {
1629         /* Load shift vector for this list */
1630         i_shift_offset   = DIM*shiftidx[iidx];
1631
1632         /* Load limits for loop over neighbors */
1633         j_index_start    = jindex[iidx];
1634         j_index_end      = jindex[iidx+1];
1635
1636         /* Get outer coordinate index */
1637         inr              = iinr[iidx];
1638         i_coord_offset   = DIM*inr;
1639
1640         /* Load i particle coords and add shift vector */
1641         gmx_mm256_load_shift_and_3rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,
1642                                                     &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2);
1643
1644         fix0             = _mm256_setzero_ps();
1645         fiy0             = _mm256_setzero_ps();
1646         fiz0             = _mm256_setzero_ps();
1647         fix1             = _mm256_setzero_ps();
1648         fiy1             = _mm256_setzero_ps();
1649         fiz1             = _mm256_setzero_ps();
1650         fix2             = _mm256_setzero_ps();
1651         fiy2             = _mm256_setzero_ps();
1652         fiz2             = _mm256_setzero_ps();
1653
1654         /* Start inner kernel loop */
1655         for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
1656         {
1657
1658             /* Get j neighbor index, and coordinate index */
1659             jnrA             = jjnr[jidx];
1660             jnrB             = jjnr[jidx+1];
1661             jnrC             = jjnr[jidx+2];
1662             jnrD             = jjnr[jidx+3];
1663             jnrE             = jjnr[jidx+4];
1664             jnrF             = jjnr[jidx+5];
1665             jnrG             = jjnr[jidx+6];
1666             jnrH             = jjnr[jidx+7];
1667             j_coord_offsetA  = DIM*jnrA;
1668             j_coord_offsetB  = DIM*jnrB;
1669             j_coord_offsetC  = DIM*jnrC;
1670             j_coord_offsetD  = DIM*jnrD;
1671             j_coord_offsetE  = DIM*jnrE;
1672             j_coord_offsetF  = DIM*jnrF;
1673             j_coord_offsetG  = DIM*jnrG;
1674             j_coord_offsetH  = DIM*jnrH;
1675
1676             /* load j atom coordinates */
1677             gmx_mm256_load_3rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
1678                                                  x+j_coord_offsetC,x+j_coord_offsetD,
1679                                                  x+j_coord_offsetE,x+j_coord_offsetF,
1680                                                  x+j_coord_offsetG,x+j_coord_offsetH,
1681                                               &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
1682
1683             /* Calculate displacement vector */
1684             dx00             = _mm256_sub_ps(ix0,jx0);
1685             dy00             = _mm256_sub_ps(iy0,jy0);
1686             dz00             = _mm256_sub_ps(iz0,jz0);
1687             dx01             = _mm256_sub_ps(ix0,jx1);
1688             dy01             = _mm256_sub_ps(iy0,jy1);
1689             dz01             = _mm256_sub_ps(iz0,jz1);
1690             dx02             = _mm256_sub_ps(ix0,jx2);
1691             dy02             = _mm256_sub_ps(iy0,jy2);
1692             dz02             = _mm256_sub_ps(iz0,jz2);
1693             dx10             = _mm256_sub_ps(ix1,jx0);
1694             dy10             = _mm256_sub_ps(iy1,jy0);
1695             dz10             = _mm256_sub_ps(iz1,jz0);
1696             dx11             = _mm256_sub_ps(ix1,jx1);
1697             dy11             = _mm256_sub_ps(iy1,jy1);
1698             dz11             = _mm256_sub_ps(iz1,jz1);
1699             dx12             = _mm256_sub_ps(ix1,jx2);
1700             dy12             = _mm256_sub_ps(iy1,jy2);
1701             dz12             = _mm256_sub_ps(iz1,jz2);
1702             dx20             = _mm256_sub_ps(ix2,jx0);
1703             dy20             = _mm256_sub_ps(iy2,jy0);
1704             dz20             = _mm256_sub_ps(iz2,jz0);
1705             dx21             = _mm256_sub_ps(ix2,jx1);
1706             dy21             = _mm256_sub_ps(iy2,jy1);
1707             dz21             = _mm256_sub_ps(iz2,jz1);
1708             dx22             = _mm256_sub_ps(ix2,jx2);
1709             dy22             = _mm256_sub_ps(iy2,jy2);
1710             dz22             = _mm256_sub_ps(iz2,jz2);
1711
1712             /* Calculate squared distance and things based on it */
1713             rsq00            = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
1714             rsq01            = gmx_mm256_calc_rsq_ps(dx01,dy01,dz01);
1715             rsq02            = gmx_mm256_calc_rsq_ps(dx02,dy02,dz02);
1716             rsq10            = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
1717             rsq11            = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
1718             rsq12            = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
1719             rsq20            = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
1720             rsq21            = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
1721             rsq22            = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
1722
1723             rinv00           = gmx_mm256_invsqrt_ps(rsq00);
1724             rinv01           = gmx_mm256_invsqrt_ps(rsq01);
1725             rinv02           = gmx_mm256_invsqrt_ps(rsq02);
1726             rinv10           = gmx_mm256_invsqrt_ps(rsq10);
1727             rinv11           = gmx_mm256_invsqrt_ps(rsq11);
1728             rinv12           = gmx_mm256_invsqrt_ps(rsq12);
1729             rinv20           = gmx_mm256_invsqrt_ps(rsq20);
1730             rinv21           = gmx_mm256_invsqrt_ps(rsq21);
1731             rinv22           = gmx_mm256_invsqrt_ps(rsq22);
1732
1733             rinvsq00         = _mm256_mul_ps(rinv00,rinv00);
1734             rinvsq01         = _mm256_mul_ps(rinv01,rinv01);
1735             rinvsq02         = _mm256_mul_ps(rinv02,rinv02);
1736             rinvsq10         = _mm256_mul_ps(rinv10,rinv10);
1737             rinvsq11         = _mm256_mul_ps(rinv11,rinv11);
1738             rinvsq12         = _mm256_mul_ps(rinv12,rinv12);
1739             rinvsq20         = _mm256_mul_ps(rinv20,rinv20);
1740             rinvsq21         = _mm256_mul_ps(rinv21,rinv21);
1741             rinvsq22         = _mm256_mul_ps(rinv22,rinv22);
1742
1743             fjx0             = _mm256_setzero_ps();
1744             fjy0             = _mm256_setzero_ps();
1745             fjz0             = _mm256_setzero_ps();
1746             fjx1             = _mm256_setzero_ps();
1747             fjy1             = _mm256_setzero_ps();
1748             fjz1             = _mm256_setzero_ps();
1749             fjx2             = _mm256_setzero_ps();
1750             fjy2             = _mm256_setzero_ps();
1751             fjz2             = _mm256_setzero_ps();
1752
1753             /**************************
1754              * CALCULATE INTERACTIONS *
1755              **************************/
1756
1757             if (gmx_mm256_any_lt(rsq00,rcutoff2))
1758             {
1759
1760             r00              = _mm256_mul_ps(rsq00,rinv00);
1761
1762             /* EWALD ELECTROSTATICS */
1763             
1764             /* Analytical PME correction */
1765             zeta2            = _mm256_mul_ps(beta2,rsq00);
1766             rinv3            = _mm256_mul_ps(rinvsq00,rinv00);
1767             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
1768             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1769             felec            = _mm256_mul_ps(qq00,felec);
1770             
1771             /* Analytical LJ-PME */
1772             rinvsix          = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
1773             ewcljrsq         = _mm256_mul_ps(ewclj2,rsq00);
1774             ewclj6           = _mm256_mul_ps(ewclj2,_mm256_mul_ps(ewclj2,ewclj2));
1775             exponent         = gmx_simd_exp_r(ewcljrsq);
1776             /* poly = exp(-(beta*r)^2) * (1 + (beta*r)^2 + (beta*r)^4 /2) */
1777             poly             = _mm256_mul_ps(exponent,_mm256_add_ps(_mm256_sub_ps(one,ewcljrsq),_mm256_mul_ps(_mm256_mul_ps(ewcljrsq,ewcljrsq),one_half)));
1778             /* f6A = 6 * C6grid * (1 - poly) */
1779             f6A              = _mm256_mul_ps(c6grid_00,_mm256_sub_ps(one,poly));
1780             /* f6B = C6grid * exponent * beta^6 */
1781             f6B              = _mm256_mul_ps(_mm256_mul_ps(c6grid_00,one_sixth),_mm256_mul_ps(exponent,ewclj6));
1782             /* fvdw = 12*C12/r13 - ((6*C6 - f6A)/r6 + f6B)/r */
1783             fvdw              = _mm256_mul_ps(_mm256_add_ps(_mm256_mul_ps(_mm256_sub_ps(_mm256_mul_ps(c12_00,rinvsix),_mm256_sub_ps(c6_00,f6A)),rinvsix),f6B),rinvsq00);
1784
1785             cutoff_mask      = _mm256_cmp_ps(rsq00,rcutoff2,_CMP_LT_OQ);
1786
1787             fscal            = _mm256_add_ps(felec,fvdw);
1788
1789             fscal            = _mm256_and_ps(fscal,cutoff_mask);
1790
1791             /* Calculate temporary vectorial force */
1792             tx               = _mm256_mul_ps(fscal,dx00);
1793             ty               = _mm256_mul_ps(fscal,dy00);
1794             tz               = _mm256_mul_ps(fscal,dz00);
1795
1796             /* Update vectorial force */
1797             fix0             = _mm256_add_ps(fix0,tx);
1798             fiy0             = _mm256_add_ps(fiy0,ty);
1799             fiz0             = _mm256_add_ps(fiz0,tz);
1800
1801             fjx0             = _mm256_add_ps(fjx0,tx);
1802             fjy0             = _mm256_add_ps(fjy0,ty);
1803             fjz0             = _mm256_add_ps(fjz0,tz);
1804
1805             }
1806
1807             /**************************
1808              * CALCULATE INTERACTIONS *
1809              **************************/
1810
1811             if (gmx_mm256_any_lt(rsq01,rcutoff2))
1812             {
1813
1814             r01              = _mm256_mul_ps(rsq01,rinv01);
1815
1816             /* EWALD ELECTROSTATICS */
1817             
1818             /* Analytical PME correction */
1819             zeta2            = _mm256_mul_ps(beta2,rsq01);
1820             rinv3            = _mm256_mul_ps(rinvsq01,rinv01);
1821             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
1822             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1823             felec            = _mm256_mul_ps(qq01,felec);
1824             
1825             cutoff_mask      = _mm256_cmp_ps(rsq01,rcutoff2,_CMP_LT_OQ);
1826
1827             fscal            = felec;
1828
1829             fscal            = _mm256_and_ps(fscal,cutoff_mask);
1830
1831             /* Calculate temporary vectorial force */
1832             tx               = _mm256_mul_ps(fscal,dx01);
1833             ty               = _mm256_mul_ps(fscal,dy01);
1834             tz               = _mm256_mul_ps(fscal,dz01);
1835
1836             /* Update vectorial force */
1837             fix0             = _mm256_add_ps(fix0,tx);
1838             fiy0             = _mm256_add_ps(fiy0,ty);
1839             fiz0             = _mm256_add_ps(fiz0,tz);
1840
1841             fjx1             = _mm256_add_ps(fjx1,tx);
1842             fjy1             = _mm256_add_ps(fjy1,ty);
1843             fjz1             = _mm256_add_ps(fjz1,tz);
1844
1845             }
1846
1847             /**************************
1848              * CALCULATE INTERACTIONS *
1849              **************************/
1850
1851             if (gmx_mm256_any_lt(rsq02,rcutoff2))
1852             {
1853
1854             r02              = _mm256_mul_ps(rsq02,rinv02);
1855
1856             /* EWALD ELECTROSTATICS */
1857             
1858             /* Analytical PME correction */
1859             zeta2            = _mm256_mul_ps(beta2,rsq02);
1860             rinv3            = _mm256_mul_ps(rinvsq02,rinv02);
1861             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
1862             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1863             felec            = _mm256_mul_ps(qq02,felec);
1864             
1865             cutoff_mask      = _mm256_cmp_ps(rsq02,rcutoff2,_CMP_LT_OQ);
1866
1867             fscal            = felec;
1868
1869             fscal            = _mm256_and_ps(fscal,cutoff_mask);
1870
1871             /* Calculate temporary vectorial force */
1872             tx               = _mm256_mul_ps(fscal,dx02);
1873             ty               = _mm256_mul_ps(fscal,dy02);
1874             tz               = _mm256_mul_ps(fscal,dz02);
1875
1876             /* Update vectorial force */
1877             fix0             = _mm256_add_ps(fix0,tx);
1878             fiy0             = _mm256_add_ps(fiy0,ty);
1879             fiz0             = _mm256_add_ps(fiz0,tz);
1880
1881             fjx2             = _mm256_add_ps(fjx2,tx);
1882             fjy2             = _mm256_add_ps(fjy2,ty);
1883             fjz2             = _mm256_add_ps(fjz2,tz);
1884
1885             }
1886
1887             /**************************
1888              * CALCULATE INTERACTIONS *
1889              **************************/
1890
1891             if (gmx_mm256_any_lt(rsq10,rcutoff2))
1892             {
1893
1894             r10              = _mm256_mul_ps(rsq10,rinv10);
1895
1896             /* EWALD ELECTROSTATICS */
1897             
1898             /* Analytical PME correction */
1899             zeta2            = _mm256_mul_ps(beta2,rsq10);
1900             rinv3            = _mm256_mul_ps(rinvsq10,rinv10);
1901             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
1902             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1903             felec            = _mm256_mul_ps(qq10,felec);
1904             
1905             cutoff_mask      = _mm256_cmp_ps(rsq10,rcutoff2,_CMP_LT_OQ);
1906
1907             fscal            = felec;
1908
1909             fscal            = _mm256_and_ps(fscal,cutoff_mask);
1910
1911             /* Calculate temporary vectorial force */
1912             tx               = _mm256_mul_ps(fscal,dx10);
1913             ty               = _mm256_mul_ps(fscal,dy10);
1914             tz               = _mm256_mul_ps(fscal,dz10);
1915
1916             /* Update vectorial force */
1917             fix1             = _mm256_add_ps(fix1,tx);
1918             fiy1             = _mm256_add_ps(fiy1,ty);
1919             fiz1             = _mm256_add_ps(fiz1,tz);
1920
1921             fjx0             = _mm256_add_ps(fjx0,tx);
1922             fjy0             = _mm256_add_ps(fjy0,ty);
1923             fjz0             = _mm256_add_ps(fjz0,tz);
1924
1925             }
1926
1927             /**************************
1928              * CALCULATE INTERACTIONS *
1929              **************************/
1930
1931             if (gmx_mm256_any_lt(rsq11,rcutoff2))
1932             {
1933
1934             r11              = _mm256_mul_ps(rsq11,rinv11);
1935
1936             /* EWALD ELECTROSTATICS */
1937             
1938             /* Analytical PME correction */
1939             zeta2            = _mm256_mul_ps(beta2,rsq11);
1940             rinv3            = _mm256_mul_ps(rinvsq11,rinv11);
1941             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
1942             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1943             felec            = _mm256_mul_ps(qq11,felec);
1944             
1945             cutoff_mask      = _mm256_cmp_ps(rsq11,rcutoff2,_CMP_LT_OQ);
1946
1947             fscal            = felec;
1948
1949             fscal            = _mm256_and_ps(fscal,cutoff_mask);
1950
1951             /* Calculate temporary vectorial force */
1952             tx               = _mm256_mul_ps(fscal,dx11);
1953             ty               = _mm256_mul_ps(fscal,dy11);
1954             tz               = _mm256_mul_ps(fscal,dz11);
1955
1956             /* Update vectorial force */
1957             fix1             = _mm256_add_ps(fix1,tx);
1958             fiy1             = _mm256_add_ps(fiy1,ty);
1959             fiz1             = _mm256_add_ps(fiz1,tz);
1960
1961             fjx1             = _mm256_add_ps(fjx1,tx);
1962             fjy1             = _mm256_add_ps(fjy1,ty);
1963             fjz1             = _mm256_add_ps(fjz1,tz);
1964
1965             }
1966
1967             /**************************
1968              * CALCULATE INTERACTIONS *
1969              **************************/
1970
1971             if (gmx_mm256_any_lt(rsq12,rcutoff2))
1972             {
1973
1974             r12              = _mm256_mul_ps(rsq12,rinv12);
1975
1976             /* EWALD ELECTROSTATICS */
1977             
1978             /* Analytical PME correction */
1979             zeta2            = _mm256_mul_ps(beta2,rsq12);
1980             rinv3            = _mm256_mul_ps(rinvsq12,rinv12);
1981             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
1982             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1983             felec            = _mm256_mul_ps(qq12,felec);
1984             
1985             cutoff_mask      = _mm256_cmp_ps(rsq12,rcutoff2,_CMP_LT_OQ);
1986
1987             fscal            = felec;
1988
1989             fscal            = _mm256_and_ps(fscal,cutoff_mask);
1990
1991             /* Calculate temporary vectorial force */
1992             tx               = _mm256_mul_ps(fscal,dx12);
1993             ty               = _mm256_mul_ps(fscal,dy12);
1994             tz               = _mm256_mul_ps(fscal,dz12);
1995
1996             /* Update vectorial force */
1997             fix1             = _mm256_add_ps(fix1,tx);
1998             fiy1             = _mm256_add_ps(fiy1,ty);
1999             fiz1             = _mm256_add_ps(fiz1,tz);
2000
2001             fjx2             = _mm256_add_ps(fjx2,tx);
2002             fjy2             = _mm256_add_ps(fjy2,ty);
2003             fjz2             = _mm256_add_ps(fjz2,tz);
2004
2005             }
2006
2007             /**************************
2008              * CALCULATE INTERACTIONS *
2009              **************************/
2010
2011             if (gmx_mm256_any_lt(rsq20,rcutoff2))
2012             {
2013
2014             r20              = _mm256_mul_ps(rsq20,rinv20);
2015
2016             /* EWALD ELECTROSTATICS */
2017             
2018             /* Analytical PME correction */
2019             zeta2            = _mm256_mul_ps(beta2,rsq20);
2020             rinv3            = _mm256_mul_ps(rinvsq20,rinv20);
2021             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
2022             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
2023             felec            = _mm256_mul_ps(qq20,felec);
2024             
2025             cutoff_mask      = _mm256_cmp_ps(rsq20,rcutoff2,_CMP_LT_OQ);
2026
2027             fscal            = felec;
2028
2029             fscal            = _mm256_and_ps(fscal,cutoff_mask);
2030
2031             /* Calculate temporary vectorial force */
2032             tx               = _mm256_mul_ps(fscal,dx20);
2033             ty               = _mm256_mul_ps(fscal,dy20);
2034             tz               = _mm256_mul_ps(fscal,dz20);
2035
2036             /* Update vectorial force */
2037             fix2             = _mm256_add_ps(fix2,tx);
2038             fiy2             = _mm256_add_ps(fiy2,ty);
2039             fiz2             = _mm256_add_ps(fiz2,tz);
2040
2041             fjx0             = _mm256_add_ps(fjx0,tx);
2042             fjy0             = _mm256_add_ps(fjy0,ty);
2043             fjz0             = _mm256_add_ps(fjz0,tz);
2044
2045             }
2046
2047             /**************************
2048              * CALCULATE INTERACTIONS *
2049              **************************/
2050
2051             if (gmx_mm256_any_lt(rsq21,rcutoff2))
2052             {
2053
2054             r21              = _mm256_mul_ps(rsq21,rinv21);
2055
2056             /* EWALD ELECTROSTATICS */
2057             
2058             /* Analytical PME correction */
2059             zeta2            = _mm256_mul_ps(beta2,rsq21);
2060             rinv3            = _mm256_mul_ps(rinvsq21,rinv21);
2061             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
2062             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
2063             felec            = _mm256_mul_ps(qq21,felec);
2064             
2065             cutoff_mask      = _mm256_cmp_ps(rsq21,rcutoff2,_CMP_LT_OQ);
2066
2067             fscal            = felec;
2068
2069             fscal            = _mm256_and_ps(fscal,cutoff_mask);
2070
2071             /* Calculate temporary vectorial force */
2072             tx               = _mm256_mul_ps(fscal,dx21);
2073             ty               = _mm256_mul_ps(fscal,dy21);
2074             tz               = _mm256_mul_ps(fscal,dz21);
2075
2076             /* Update vectorial force */
2077             fix2             = _mm256_add_ps(fix2,tx);
2078             fiy2             = _mm256_add_ps(fiy2,ty);
2079             fiz2             = _mm256_add_ps(fiz2,tz);
2080
2081             fjx1             = _mm256_add_ps(fjx1,tx);
2082             fjy1             = _mm256_add_ps(fjy1,ty);
2083             fjz1             = _mm256_add_ps(fjz1,tz);
2084
2085             }
2086
2087             /**************************
2088              * CALCULATE INTERACTIONS *
2089              **************************/
2090
2091             if (gmx_mm256_any_lt(rsq22,rcutoff2))
2092             {
2093
2094             r22              = _mm256_mul_ps(rsq22,rinv22);
2095
2096             /* EWALD ELECTROSTATICS */
2097             
2098             /* Analytical PME correction */
2099             zeta2            = _mm256_mul_ps(beta2,rsq22);
2100             rinv3            = _mm256_mul_ps(rinvsq22,rinv22);
2101             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
2102             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
2103             felec            = _mm256_mul_ps(qq22,felec);
2104             
2105             cutoff_mask      = _mm256_cmp_ps(rsq22,rcutoff2,_CMP_LT_OQ);
2106
2107             fscal            = felec;
2108
2109             fscal            = _mm256_and_ps(fscal,cutoff_mask);
2110
2111             /* Calculate temporary vectorial force */
2112             tx               = _mm256_mul_ps(fscal,dx22);
2113             ty               = _mm256_mul_ps(fscal,dy22);
2114             tz               = _mm256_mul_ps(fscal,dz22);
2115
2116             /* Update vectorial force */
2117             fix2             = _mm256_add_ps(fix2,tx);
2118             fiy2             = _mm256_add_ps(fiy2,ty);
2119             fiz2             = _mm256_add_ps(fiz2,tz);
2120
2121             fjx2             = _mm256_add_ps(fjx2,tx);
2122             fjy2             = _mm256_add_ps(fjy2,ty);
2123             fjz2             = _mm256_add_ps(fjz2,tz);
2124
2125             }
2126
2127             fjptrA             = f+j_coord_offsetA;
2128             fjptrB             = f+j_coord_offsetB;
2129             fjptrC             = f+j_coord_offsetC;
2130             fjptrD             = f+j_coord_offsetD;
2131             fjptrE             = f+j_coord_offsetE;
2132             fjptrF             = f+j_coord_offsetF;
2133             fjptrG             = f+j_coord_offsetG;
2134             fjptrH             = f+j_coord_offsetH;
2135
2136             gmx_mm256_decrement_3rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,
2137                                                       fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
2138
2139             /* Inner loop uses 554 flops */
2140         }
2141
2142         if(jidx<j_index_end)
2143         {
2144
2145             /* Get j neighbor index, and coordinate index */
2146             jnrlistA         = jjnr[jidx];
2147             jnrlistB         = jjnr[jidx+1];
2148             jnrlistC         = jjnr[jidx+2];
2149             jnrlistD         = jjnr[jidx+3];
2150             jnrlistE         = jjnr[jidx+4];
2151             jnrlistF         = jjnr[jidx+5];
2152             jnrlistG         = jjnr[jidx+6];
2153             jnrlistH         = jjnr[jidx+7];
2154             /* Sign of each element will be negative for non-real atoms.
2155              * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
2156              * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
2157              */
2158             dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
2159                                             gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
2160                                             
2161             jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
2162             jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
2163             jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
2164             jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
2165             jnrE       = (jnrlistE>=0) ? jnrlistE : 0;
2166             jnrF       = (jnrlistF>=0) ? jnrlistF : 0;
2167             jnrG       = (jnrlistG>=0) ? jnrlistG : 0;
2168             jnrH       = (jnrlistH>=0) ? jnrlistH : 0;
2169             j_coord_offsetA  = DIM*jnrA;
2170             j_coord_offsetB  = DIM*jnrB;
2171             j_coord_offsetC  = DIM*jnrC;
2172             j_coord_offsetD  = DIM*jnrD;
2173             j_coord_offsetE  = DIM*jnrE;
2174             j_coord_offsetF  = DIM*jnrF;
2175             j_coord_offsetG  = DIM*jnrG;
2176             j_coord_offsetH  = DIM*jnrH;
2177
2178             /* load j atom coordinates */
2179             gmx_mm256_load_3rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
2180                                                  x+j_coord_offsetC,x+j_coord_offsetD,
2181                                                  x+j_coord_offsetE,x+j_coord_offsetF,
2182                                                  x+j_coord_offsetG,x+j_coord_offsetH,
2183                                               &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
2184
2185             /* Calculate displacement vector */
2186             dx00             = _mm256_sub_ps(ix0,jx0);
2187             dy00             = _mm256_sub_ps(iy0,jy0);
2188             dz00             = _mm256_sub_ps(iz0,jz0);
2189             dx01             = _mm256_sub_ps(ix0,jx1);
2190             dy01             = _mm256_sub_ps(iy0,jy1);
2191             dz01             = _mm256_sub_ps(iz0,jz1);
2192             dx02             = _mm256_sub_ps(ix0,jx2);
2193             dy02             = _mm256_sub_ps(iy0,jy2);
2194             dz02             = _mm256_sub_ps(iz0,jz2);
2195             dx10             = _mm256_sub_ps(ix1,jx0);
2196             dy10             = _mm256_sub_ps(iy1,jy0);
2197             dz10             = _mm256_sub_ps(iz1,jz0);
2198             dx11             = _mm256_sub_ps(ix1,jx1);
2199             dy11             = _mm256_sub_ps(iy1,jy1);
2200             dz11             = _mm256_sub_ps(iz1,jz1);
2201             dx12             = _mm256_sub_ps(ix1,jx2);
2202             dy12             = _mm256_sub_ps(iy1,jy2);
2203             dz12             = _mm256_sub_ps(iz1,jz2);
2204             dx20             = _mm256_sub_ps(ix2,jx0);
2205             dy20             = _mm256_sub_ps(iy2,jy0);
2206             dz20             = _mm256_sub_ps(iz2,jz0);
2207             dx21             = _mm256_sub_ps(ix2,jx1);
2208             dy21             = _mm256_sub_ps(iy2,jy1);
2209             dz21             = _mm256_sub_ps(iz2,jz1);
2210             dx22             = _mm256_sub_ps(ix2,jx2);
2211             dy22             = _mm256_sub_ps(iy2,jy2);
2212             dz22             = _mm256_sub_ps(iz2,jz2);
2213
2214             /* Calculate squared distance and things based on it */
2215             rsq00            = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
2216             rsq01            = gmx_mm256_calc_rsq_ps(dx01,dy01,dz01);
2217             rsq02            = gmx_mm256_calc_rsq_ps(dx02,dy02,dz02);
2218             rsq10            = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
2219             rsq11            = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
2220             rsq12            = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
2221             rsq20            = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
2222             rsq21            = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
2223             rsq22            = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
2224
2225             rinv00           = gmx_mm256_invsqrt_ps(rsq00);
2226             rinv01           = gmx_mm256_invsqrt_ps(rsq01);
2227             rinv02           = gmx_mm256_invsqrt_ps(rsq02);
2228             rinv10           = gmx_mm256_invsqrt_ps(rsq10);
2229             rinv11           = gmx_mm256_invsqrt_ps(rsq11);
2230             rinv12           = gmx_mm256_invsqrt_ps(rsq12);
2231             rinv20           = gmx_mm256_invsqrt_ps(rsq20);
2232             rinv21           = gmx_mm256_invsqrt_ps(rsq21);
2233             rinv22           = gmx_mm256_invsqrt_ps(rsq22);
2234
2235             rinvsq00         = _mm256_mul_ps(rinv00,rinv00);
2236             rinvsq01         = _mm256_mul_ps(rinv01,rinv01);
2237             rinvsq02         = _mm256_mul_ps(rinv02,rinv02);
2238             rinvsq10         = _mm256_mul_ps(rinv10,rinv10);
2239             rinvsq11         = _mm256_mul_ps(rinv11,rinv11);
2240             rinvsq12         = _mm256_mul_ps(rinv12,rinv12);
2241             rinvsq20         = _mm256_mul_ps(rinv20,rinv20);
2242             rinvsq21         = _mm256_mul_ps(rinv21,rinv21);
2243             rinvsq22         = _mm256_mul_ps(rinv22,rinv22);
2244
2245             fjx0             = _mm256_setzero_ps();
2246             fjy0             = _mm256_setzero_ps();
2247             fjz0             = _mm256_setzero_ps();
2248             fjx1             = _mm256_setzero_ps();
2249             fjy1             = _mm256_setzero_ps();
2250             fjz1             = _mm256_setzero_ps();
2251             fjx2             = _mm256_setzero_ps();
2252             fjy2             = _mm256_setzero_ps();
2253             fjz2             = _mm256_setzero_ps();
2254
2255             /**************************
2256              * CALCULATE INTERACTIONS *
2257              **************************/
2258
2259             if (gmx_mm256_any_lt(rsq00,rcutoff2))
2260             {
2261
2262             r00              = _mm256_mul_ps(rsq00,rinv00);
2263             r00              = _mm256_andnot_ps(dummy_mask,r00);
2264
2265             /* EWALD ELECTROSTATICS */
2266             
2267             /* Analytical PME correction */
2268             zeta2            = _mm256_mul_ps(beta2,rsq00);
2269             rinv3            = _mm256_mul_ps(rinvsq00,rinv00);
2270             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
2271             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
2272             felec            = _mm256_mul_ps(qq00,felec);
2273             
2274             /* Analytical LJ-PME */
2275             rinvsix          = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
2276             ewcljrsq         = _mm256_mul_ps(ewclj2,rsq00);
2277             ewclj6           = _mm256_mul_ps(ewclj2,_mm256_mul_ps(ewclj2,ewclj2));
2278             exponent         = gmx_simd_exp_r(ewcljrsq);
2279             /* poly = exp(-(beta*r)^2) * (1 + (beta*r)^2 + (beta*r)^4 /2) */
2280             poly             = _mm256_mul_ps(exponent,_mm256_add_ps(_mm256_sub_ps(one,ewcljrsq),_mm256_mul_ps(_mm256_mul_ps(ewcljrsq,ewcljrsq),one_half)));
2281             /* f6A = 6 * C6grid * (1 - poly) */
2282             f6A              = _mm256_mul_ps(c6grid_00,_mm256_sub_ps(one,poly));
2283             /* f6B = C6grid * exponent * beta^6 */
2284             f6B              = _mm256_mul_ps(_mm256_mul_ps(c6grid_00,one_sixth),_mm256_mul_ps(exponent,ewclj6));
2285             /* fvdw = 12*C12/r13 - ((6*C6 - f6A)/r6 + f6B)/r */
2286             fvdw              = _mm256_mul_ps(_mm256_add_ps(_mm256_mul_ps(_mm256_sub_ps(_mm256_mul_ps(c12_00,rinvsix),_mm256_sub_ps(c6_00,f6A)),rinvsix),f6B),rinvsq00);
2287
2288             cutoff_mask      = _mm256_cmp_ps(rsq00,rcutoff2,_CMP_LT_OQ);
2289
2290             fscal            = _mm256_add_ps(felec,fvdw);
2291
2292             fscal            = _mm256_and_ps(fscal,cutoff_mask);
2293
2294             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
2295
2296             /* Calculate temporary vectorial force */
2297             tx               = _mm256_mul_ps(fscal,dx00);
2298             ty               = _mm256_mul_ps(fscal,dy00);
2299             tz               = _mm256_mul_ps(fscal,dz00);
2300
2301             /* Update vectorial force */
2302             fix0             = _mm256_add_ps(fix0,tx);
2303             fiy0             = _mm256_add_ps(fiy0,ty);
2304             fiz0             = _mm256_add_ps(fiz0,tz);
2305
2306             fjx0             = _mm256_add_ps(fjx0,tx);
2307             fjy0             = _mm256_add_ps(fjy0,ty);
2308             fjz0             = _mm256_add_ps(fjz0,tz);
2309
2310             }
2311
2312             /**************************
2313              * CALCULATE INTERACTIONS *
2314              **************************/
2315
2316             if (gmx_mm256_any_lt(rsq01,rcutoff2))
2317             {
2318
2319             r01              = _mm256_mul_ps(rsq01,rinv01);
2320             r01              = _mm256_andnot_ps(dummy_mask,r01);
2321
2322             /* EWALD ELECTROSTATICS */
2323             
2324             /* Analytical PME correction */
2325             zeta2            = _mm256_mul_ps(beta2,rsq01);
2326             rinv3            = _mm256_mul_ps(rinvsq01,rinv01);
2327             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
2328             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
2329             felec            = _mm256_mul_ps(qq01,felec);
2330             
2331             cutoff_mask      = _mm256_cmp_ps(rsq01,rcutoff2,_CMP_LT_OQ);
2332
2333             fscal            = felec;
2334
2335             fscal            = _mm256_and_ps(fscal,cutoff_mask);
2336
2337             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
2338
2339             /* Calculate temporary vectorial force */
2340             tx               = _mm256_mul_ps(fscal,dx01);
2341             ty               = _mm256_mul_ps(fscal,dy01);
2342             tz               = _mm256_mul_ps(fscal,dz01);
2343
2344             /* Update vectorial force */
2345             fix0             = _mm256_add_ps(fix0,tx);
2346             fiy0             = _mm256_add_ps(fiy0,ty);
2347             fiz0             = _mm256_add_ps(fiz0,tz);
2348
2349             fjx1             = _mm256_add_ps(fjx1,tx);
2350             fjy1             = _mm256_add_ps(fjy1,ty);
2351             fjz1             = _mm256_add_ps(fjz1,tz);
2352
2353             }
2354
2355             /**************************
2356              * CALCULATE INTERACTIONS *
2357              **************************/
2358
2359             if (gmx_mm256_any_lt(rsq02,rcutoff2))
2360             {
2361
2362             r02              = _mm256_mul_ps(rsq02,rinv02);
2363             r02              = _mm256_andnot_ps(dummy_mask,r02);
2364
2365             /* EWALD ELECTROSTATICS */
2366             
2367             /* Analytical PME correction */
2368             zeta2            = _mm256_mul_ps(beta2,rsq02);
2369             rinv3            = _mm256_mul_ps(rinvsq02,rinv02);
2370             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
2371             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
2372             felec            = _mm256_mul_ps(qq02,felec);
2373             
2374             cutoff_mask      = _mm256_cmp_ps(rsq02,rcutoff2,_CMP_LT_OQ);
2375
2376             fscal            = felec;
2377
2378             fscal            = _mm256_and_ps(fscal,cutoff_mask);
2379
2380             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
2381
2382             /* Calculate temporary vectorial force */
2383             tx               = _mm256_mul_ps(fscal,dx02);
2384             ty               = _mm256_mul_ps(fscal,dy02);
2385             tz               = _mm256_mul_ps(fscal,dz02);
2386
2387             /* Update vectorial force */
2388             fix0             = _mm256_add_ps(fix0,tx);
2389             fiy0             = _mm256_add_ps(fiy0,ty);
2390             fiz0             = _mm256_add_ps(fiz0,tz);
2391
2392             fjx2             = _mm256_add_ps(fjx2,tx);
2393             fjy2             = _mm256_add_ps(fjy2,ty);
2394             fjz2             = _mm256_add_ps(fjz2,tz);
2395
2396             }
2397
2398             /**************************
2399              * CALCULATE INTERACTIONS *
2400              **************************/
2401
2402             if (gmx_mm256_any_lt(rsq10,rcutoff2))
2403             {
2404
2405             r10              = _mm256_mul_ps(rsq10,rinv10);
2406             r10              = _mm256_andnot_ps(dummy_mask,r10);
2407
2408             /* EWALD ELECTROSTATICS */
2409             
2410             /* Analytical PME correction */
2411             zeta2            = _mm256_mul_ps(beta2,rsq10);
2412             rinv3            = _mm256_mul_ps(rinvsq10,rinv10);
2413             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
2414             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
2415             felec            = _mm256_mul_ps(qq10,felec);
2416             
2417             cutoff_mask      = _mm256_cmp_ps(rsq10,rcutoff2,_CMP_LT_OQ);
2418
2419             fscal            = felec;
2420
2421             fscal            = _mm256_and_ps(fscal,cutoff_mask);
2422
2423             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
2424
2425             /* Calculate temporary vectorial force */
2426             tx               = _mm256_mul_ps(fscal,dx10);
2427             ty               = _mm256_mul_ps(fscal,dy10);
2428             tz               = _mm256_mul_ps(fscal,dz10);
2429
2430             /* Update vectorial force */
2431             fix1             = _mm256_add_ps(fix1,tx);
2432             fiy1             = _mm256_add_ps(fiy1,ty);
2433             fiz1             = _mm256_add_ps(fiz1,tz);
2434
2435             fjx0             = _mm256_add_ps(fjx0,tx);
2436             fjy0             = _mm256_add_ps(fjy0,ty);
2437             fjz0             = _mm256_add_ps(fjz0,tz);
2438
2439             }
2440
2441             /**************************
2442              * CALCULATE INTERACTIONS *
2443              **************************/
2444
2445             if (gmx_mm256_any_lt(rsq11,rcutoff2))
2446             {
2447
2448             r11              = _mm256_mul_ps(rsq11,rinv11);
2449             r11              = _mm256_andnot_ps(dummy_mask,r11);
2450
2451             /* EWALD ELECTROSTATICS */
2452             
2453             /* Analytical PME correction */
2454             zeta2            = _mm256_mul_ps(beta2,rsq11);
2455             rinv3            = _mm256_mul_ps(rinvsq11,rinv11);
2456             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
2457             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
2458             felec            = _mm256_mul_ps(qq11,felec);
2459             
2460             cutoff_mask      = _mm256_cmp_ps(rsq11,rcutoff2,_CMP_LT_OQ);
2461
2462             fscal            = felec;
2463
2464             fscal            = _mm256_and_ps(fscal,cutoff_mask);
2465
2466             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
2467
2468             /* Calculate temporary vectorial force */
2469             tx               = _mm256_mul_ps(fscal,dx11);
2470             ty               = _mm256_mul_ps(fscal,dy11);
2471             tz               = _mm256_mul_ps(fscal,dz11);
2472
2473             /* Update vectorial force */
2474             fix1             = _mm256_add_ps(fix1,tx);
2475             fiy1             = _mm256_add_ps(fiy1,ty);
2476             fiz1             = _mm256_add_ps(fiz1,tz);
2477
2478             fjx1             = _mm256_add_ps(fjx1,tx);
2479             fjy1             = _mm256_add_ps(fjy1,ty);
2480             fjz1             = _mm256_add_ps(fjz1,tz);
2481
2482             }
2483
2484             /**************************
2485              * CALCULATE INTERACTIONS *
2486              **************************/
2487
2488             if (gmx_mm256_any_lt(rsq12,rcutoff2))
2489             {
2490
2491             r12              = _mm256_mul_ps(rsq12,rinv12);
2492             r12              = _mm256_andnot_ps(dummy_mask,r12);
2493
2494             /* EWALD ELECTROSTATICS */
2495             
2496             /* Analytical PME correction */
2497             zeta2            = _mm256_mul_ps(beta2,rsq12);
2498             rinv3            = _mm256_mul_ps(rinvsq12,rinv12);
2499             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
2500             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
2501             felec            = _mm256_mul_ps(qq12,felec);
2502             
2503             cutoff_mask      = _mm256_cmp_ps(rsq12,rcutoff2,_CMP_LT_OQ);
2504
2505             fscal            = felec;
2506
2507             fscal            = _mm256_and_ps(fscal,cutoff_mask);
2508
2509             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
2510
2511             /* Calculate temporary vectorial force */
2512             tx               = _mm256_mul_ps(fscal,dx12);
2513             ty               = _mm256_mul_ps(fscal,dy12);
2514             tz               = _mm256_mul_ps(fscal,dz12);
2515
2516             /* Update vectorial force */
2517             fix1             = _mm256_add_ps(fix1,tx);
2518             fiy1             = _mm256_add_ps(fiy1,ty);
2519             fiz1             = _mm256_add_ps(fiz1,tz);
2520
2521             fjx2             = _mm256_add_ps(fjx2,tx);
2522             fjy2             = _mm256_add_ps(fjy2,ty);
2523             fjz2             = _mm256_add_ps(fjz2,tz);
2524
2525             }
2526
2527             /**************************
2528              * CALCULATE INTERACTIONS *
2529              **************************/
2530
2531             if (gmx_mm256_any_lt(rsq20,rcutoff2))
2532             {
2533
2534             r20              = _mm256_mul_ps(rsq20,rinv20);
2535             r20              = _mm256_andnot_ps(dummy_mask,r20);
2536
2537             /* EWALD ELECTROSTATICS */
2538             
2539             /* Analytical PME correction */
2540             zeta2            = _mm256_mul_ps(beta2,rsq20);
2541             rinv3            = _mm256_mul_ps(rinvsq20,rinv20);
2542             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
2543             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
2544             felec            = _mm256_mul_ps(qq20,felec);
2545             
2546             cutoff_mask      = _mm256_cmp_ps(rsq20,rcutoff2,_CMP_LT_OQ);
2547
2548             fscal            = felec;
2549
2550             fscal            = _mm256_and_ps(fscal,cutoff_mask);
2551
2552             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
2553
2554             /* Calculate temporary vectorial force */
2555             tx               = _mm256_mul_ps(fscal,dx20);
2556             ty               = _mm256_mul_ps(fscal,dy20);
2557             tz               = _mm256_mul_ps(fscal,dz20);
2558
2559             /* Update vectorial force */
2560             fix2             = _mm256_add_ps(fix2,tx);
2561             fiy2             = _mm256_add_ps(fiy2,ty);
2562             fiz2             = _mm256_add_ps(fiz2,tz);
2563
2564             fjx0             = _mm256_add_ps(fjx0,tx);
2565             fjy0             = _mm256_add_ps(fjy0,ty);
2566             fjz0             = _mm256_add_ps(fjz0,tz);
2567
2568             }
2569
2570             /**************************
2571              * CALCULATE INTERACTIONS *
2572              **************************/
2573
2574             if (gmx_mm256_any_lt(rsq21,rcutoff2))
2575             {
2576
2577             r21              = _mm256_mul_ps(rsq21,rinv21);
2578             r21              = _mm256_andnot_ps(dummy_mask,r21);
2579
2580             /* EWALD ELECTROSTATICS */
2581             
2582             /* Analytical PME correction */
2583             zeta2            = _mm256_mul_ps(beta2,rsq21);
2584             rinv3            = _mm256_mul_ps(rinvsq21,rinv21);
2585             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
2586             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
2587             felec            = _mm256_mul_ps(qq21,felec);
2588             
2589             cutoff_mask      = _mm256_cmp_ps(rsq21,rcutoff2,_CMP_LT_OQ);
2590
2591             fscal            = felec;
2592
2593             fscal            = _mm256_and_ps(fscal,cutoff_mask);
2594
2595             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
2596
2597             /* Calculate temporary vectorial force */
2598             tx               = _mm256_mul_ps(fscal,dx21);
2599             ty               = _mm256_mul_ps(fscal,dy21);
2600             tz               = _mm256_mul_ps(fscal,dz21);
2601
2602             /* Update vectorial force */
2603             fix2             = _mm256_add_ps(fix2,tx);
2604             fiy2             = _mm256_add_ps(fiy2,ty);
2605             fiz2             = _mm256_add_ps(fiz2,tz);
2606
2607             fjx1             = _mm256_add_ps(fjx1,tx);
2608             fjy1             = _mm256_add_ps(fjy1,ty);
2609             fjz1             = _mm256_add_ps(fjz1,tz);
2610
2611             }
2612
2613             /**************************
2614              * CALCULATE INTERACTIONS *
2615              **************************/
2616
2617             if (gmx_mm256_any_lt(rsq22,rcutoff2))
2618             {
2619
2620             r22              = _mm256_mul_ps(rsq22,rinv22);
2621             r22              = _mm256_andnot_ps(dummy_mask,r22);
2622
2623             /* EWALD ELECTROSTATICS */
2624             
2625             /* Analytical PME correction */
2626             zeta2            = _mm256_mul_ps(beta2,rsq22);
2627             rinv3            = _mm256_mul_ps(rinvsq22,rinv22);
2628             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
2629             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
2630             felec            = _mm256_mul_ps(qq22,felec);
2631             
2632             cutoff_mask      = _mm256_cmp_ps(rsq22,rcutoff2,_CMP_LT_OQ);
2633
2634             fscal            = felec;
2635
2636             fscal            = _mm256_and_ps(fscal,cutoff_mask);
2637
2638             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
2639
2640             /* Calculate temporary vectorial force */
2641             tx               = _mm256_mul_ps(fscal,dx22);
2642             ty               = _mm256_mul_ps(fscal,dy22);
2643             tz               = _mm256_mul_ps(fscal,dz22);
2644
2645             /* Update vectorial force */
2646             fix2             = _mm256_add_ps(fix2,tx);
2647             fiy2             = _mm256_add_ps(fiy2,ty);
2648             fiz2             = _mm256_add_ps(fiz2,tz);
2649
2650             fjx2             = _mm256_add_ps(fjx2,tx);
2651             fjy2             = _mm256_add_ps(fjy2,ty);
2652             fjz2             = _mm256_add_ps(fjz2,tz);
2653
2654             }
2655
2656             fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
2657             fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
2658             fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
2659             fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
2660             fjptrE             = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
2661             fjptrF             = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
2662             fjptrG             = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
2663             fjptrH             = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
2664
2665             gmx_mm256_decrement_3rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,
2666                                                       fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
2667
2668             /* Inner loop uses 563 flops */
2669         }
2670
2671         /* End of innermost loop */
2672
2673         gmx_mm256_update_iforce_3atom_swizzle_ps(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,
2674                                                  f+i_coord_offset,fshift+i_shift_offset);
2675
2676         /* Increment number of inner iterations */
2677         inneriter                  += j_index_end - j_index_start;
2678
2679         /* Outer loop uses 18 flops */
2680     }
2681
2682     /* Increment number of outer iterations */
2683     outeriter        += nri;
2684
2685     /* Update outer/inner flops */
2686
2687     inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W3W3_F,outeriter*18 + inneriter*563);
2688 }