Merge release-5-0 into master
[alexxy/gromacs.git] / src / gromacs / gmxlib / nonbonded / nb_kernel_avx_256_single / nb_kernel_ElecEwSh_VdwLJEwSh_GeomW3W3_avx_256_single.c
1 /*
2  * This file is part of the GROMACS molecular simulation package.
3  *
4  * Copyright (c) 2012,2013,2014, by the GROMACS development team, led by
5  * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6  * and including many others, as listed in the AUTHORS file in the
7  * top-level source directory and at http://www.gromacs.org.
8  *
9  * GROMACS is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU Lesser General Public License
11  * as published by the Free Software Foundation; either version 2.1
12  * of the License, or (at your option) any later version.
13  *
14  * GROMACS is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
17  * Lesser General Public License for more details.
18  *
19  * You should have received a copy of the GNU Lesser General Public
20  * License along with GROMACS; if not, see
21  * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA.
23  *
24  * If you want to redistribute modifications to GROMACS, please
25  * consider that scientific software is very special. Version
26  * control is crucial - bugs must be traceable. We will be happy to
27  * consider code for inclusion in the official distribution, but
28  * derived work must not be called official GROMACS. Details are found
29  * in the README & COPYING files - if they are missing, get the
30  * official version at http://www.gromacs.org.
31  *
32  * To help us fund GROMACS development, we humbly ask that you cite
33  * the research papers on the package. Check out http://www.gromacs.org.
34  */
35 /*
36  * Note: this file was generated by the GROMACS avx_256_single kernel generator.
37  */
38 #include "gmxpre.h"
39
40 #include "config.h"
41
42 #include <math.h>
43
44 #include "../nb_kernel.h"
45 #include "gromacs/legacyheaders/types/simple.h"
46 #include "gromacs/math/vec.h"
47 #include "gromacs/legacyheaders/nrnb.h"
48
49 #include "gromacs/simd/math_x86_avx_256_single.h"
50 #include "kernelutil_x86_avx_256_single.h"
51
52 /*
53  * Gromacs nonbonded kernel:   nb_kernel_ElecEwSh_VdwLJEwSh_GeomW3W3_VF_avx_256_single
54  * Electrostatics interaction: Ewald
55  * VdW interaction:            LJEwald
56  * Geometry:                   Water3-Water3
57  * Calculate force/pot:        PotentialAndForce
58  */
59 void
60 nb_kernel_ElecEwSh_VdwLJEwSh_GeomW3W3_VF_avx_256_single
61                     (t_nblist                    * gmx_restrict       nlist,
62                      rvec                        * gmx_restrict          xx,
63                      rvec                        * gmx_restrict          ff,
64                      t_forcerec                  * gmx_restrict          fr,
65                      t_mdatoms                   * gmx_restrict     mdatoms,
66                      nb_kernel_data_t gmx_unused * gmx_restrict kernel_data,
67                      t_nrnb                      * gmx_restrict        nrnb)
68 {
69     /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
70      * just 0 for non-waters.
71      * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
72      * jnr indices corresponding to data put in the four positions in the SIMD register.
73      */
74     int              i_shift_offset,i_coord_offset,outeriter,inneriter;
75     int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
76     int              jnrA,jnrB,jnrC,jnrD;
77     int              jnrE,jnrF,jnrG,jnrH;
78     int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
79     int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
80     int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
81     int              j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
82     int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
83     real             rcutoff_scalar;
84     real             *shiftvec,*fshift,*x,*f;
85     real             *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
86     real             scratch[4*DIM];
87     __m256           tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
88     real *           vdwioffsetptr0;
89     real *           vdwgridioffsetptr0;
90     __m256           ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
91     real *           vdwioffsetptr1;
92     real *           vdwgridioffsetptr1;
93     __m256           ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
94     real *           vdwioffsetptr2;
95     real *           vdwgridioffsetptr2;
96     __m256           ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
97     int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
98     __m256           jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
99     int              vdwjidx1A,vdwjidx1B,vdwjidx1C,vdwjidx1D,vdwjidx1E,vdwjidx1F,vdwjidx1G,vdwjidx1H;
100     __m256           jx1,jy1,jz1,fjx1,fjy1,fjz1,jq1,isaj1;
101     int              vdwjidx2A,vdwjidx2B,vdwjidx2C,vdwjidx2D,vdwjidx2E,vdwjidx2F,vdwjidx2G,vdwjidx2H;
102     __m256           jx2,jy2,jz2,fjx2,fjy2,fjz2,jq2,isaj2;
103     __m256           dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
104     __m256           dx01,dy01,dz01,rsq01,rinv01,rinvsq01,r01,qq01,c6_01,c12_01;
105     __m256           dx02,dy02,dz02,rsq02,rinv02,rinvsq02,r02,qq02,c6_02,c12_02;
106     __m256           dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
107     __m256           dx11,dy11,dz11,rsq11,rinv11,rinvsq11,r11,qq11,c6_11,c12_11;
108     __m256           dx12,dy12,dz12,rsq12,rinv12,rinvsq12,r12,qq12,c6_12,c12_12;
109     __m256           dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
110     __m256           dx21,dy21,dz21,rsq21,rinv21,rinvsq21,r21,qq21,c6_21,c12_21;
111     __m256           dx22,dy22,dz22,rsq22,rinv22,rinvsq22,r22,qq22,c6_22,c12_22;
112     __m256           velec,felec,velecsum,facel,crf,krf,krf2;
113     real             *charge;
114     int              nvdwtype;
115     __m256           rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
116     int              *vdwtype;
117     real             *vdwparam;
118     __m256           one_sixth   = _mm256_set1_ps(1.0/6.0);
119     __m256           one_twelfth = _mm256_set1_ps(1.0/12.0);
120     __m256           c6grid_00;
121     __m256           c6grid_01;
122     __m256           c6grid_02;
123     __m256           c6grid_10;
124     __m256           c6grid_11;
125     __m256           c6grid_12;
126     __m256           c6grid_20;
127     __m256           c6grid_21;
128     __m256           c6grid_22;
129     real             *vdwgridparam;
130     __m256           ewclj,ewclj2,ewclj6,ewcljrsq,poly,exponent,f6A,f6B,sh_lj_ewald;
131     __m256           one_half  = _mm256_set1_ps(0.5);
132     __m256           minus_one = _mm256_set1_ps(-1.0);
133     __m256i          ewitab;
134     __m128i          ewitab_lo,ewitab_hi;
135     __m256           ewtabscale,eweps,sh_ewald,ewrt,ewtabhalfspace,ewtabF,ewtabFn,ewtabD,ewtabV;
136     __m256           beta,beta2,beta3,zeta2,pmecorrF,pmecorrV,rinv3;
137     real             *ewtab;
138     __m256           dummy_mask,cutoff_mask;
139     __m256           signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
140     __m256           one     = _mm256_set1_ps(1.0);
141     __m256           two     = _mm256_set1_ps(2.0);
142     x                = xx[0];
143     f                = ff[0];
144
145     nri              = nlist->nri;
146     iinr             = nlist->iinr;
147     jindex           = nlist->jindex;
148     jjnr             = nlist->jjnr;
149     shiftidx         = nlist->shift;
150     gid              = nlist->gid;
151     shiftvec         = fr->shift_vec[0];
152     fshift           = fr->fshift[0];
153     facel            = _mm256_set1_ps(fr->epsfac);
154     charge           = mdatoms->chargeA;
155     nvdwtype         = fr->ntype;
156     vdwparam         = fr->nbfp;
157     vdwtype          = mdatoms->typeA;
158     vdwgridparam     = fr->ljpme_c6grid;
159     sh_lj_ewald      = _mm256_set1_ps(fr->ic->sh_lj_ewald);
160     ewclj            = _mm256_set1_ps(fr->ewaldcoeff_lj);
161     ewclj2           = _mm256_mul_ps(minus_one,_mm256_mul_ps(ewclj,ewclj));
162
163     sh_ewald         = _mm256_set1_ps(fr->ic->sh_ewald);
164     beta             = _mm256_set1_ps(fr->ic->ewaldcoeff_q);
165     beta2            = _mm256_mul_ps(beta,beta);
166     beta3            = _mm256_mul_ps(beta,beta2);
167
168     ewtab            = fr->ic->tabq_coul_FDV0;
169     ewtabscale       = _mm256_set1_ps(fr->ic->tabq_scale);
170     ewtabhalfspace   = _mm256_set1_ps(0.5/fr->ic->tabq_scale);
171
172     /* Setup water-specific parameters */
173     inr              = nlist->iinr[0];
174     iq0              = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+0]));
175     iq1              = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+1]));
176     iq2              = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+2]));
177     vdwioffsetptr0   = vdwparam+2*nvdwtype*vdwtype[inr+0];
178     vdwgridioffsetptr0 = vdwgridparam+2*nvdwtype*vdwtype[inr+0];
179
180     jq0              = _mm256_set1_ps(charge[inr+0]);
181     jq1              = _mm256_set1_ps(charge[inr+1]);
182     jq2              = _mm256_set1_ps(charge[inr+2]);
183     vdwjidx0A        = 2*vdwtype[inr+0];
184     qq00             = _mm256_mul_ps(iq0,jq0);
185     c6_00            = _mm256_set1_ps(vdwioffsetptr0[vdwjidx0A]);
186     c12_00           = _mm256_set1_ps(vdwioffsetptr0[vdwjidx0A+1]);
187     c6grid_00        = _mm256_set1_ps(vdwgridioffsetptr0[vdwjidx0A]);
188     qq01             = _mm256_mul_ps(iq0,jq1);
189     qq02             = _mm256_mul_ps(iq0,jq2);
190     qq10             = _mm256_mul_ps(iq1,jq0);
191     qq11             = _mm256_mul_ps(iq1,jq1);
192     qq12             = _mm256_mul_ps(iq1,jq2);
193     qq20             = _mm256_mul_ps(iq2,jq0);
194     qq21             = _mm256_mul_ps(iq2,jq1);
195     qq22             = _mm256_mul_ps(iq2,jq2);
196
197     /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
198     rcutoff_scalar   = fr->rcoulomb;
199     rcutoff          = _mm256_set1_ps(rcutoff_scalar);
200     rcutoff2         = _mm256_mul_ps(rcutoff,rcutoff);
201
202     sh_vdw_invrcut6  = _mm256_set1_ps(fr->ic->sh_invrc6);
203     rvdw             = _mm256_set1_ps(fr->rvdw);
204
205     /* Avoid stupid compiler warnings */
206     jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
207     j_coord_offsetA = 0;
208     j_coord_offsetB = 0;
209     j_coord_offsetC = 0;
210     j_coord_offsetD = 0;
211     j_coord_offsetE = 0;
212     j_coord_offsetF = 0;
213     j_coord_offsetG = 0;
214     j_coord_offsetH = 0;
215
216     outeriter        = 0;
217     inneriter        = 0;
218
219     for(iidx=0;iidx<4*DIM;iidx++)
220     {
221         scratch[iidx] = 0.0;
222     }
223
224     /* Start outer loop over neighborlists */
225     for(iidx=0; iidx<nri; iidx++)
226     {
227         /* Load shift vector for this list */
228         i_shift_offset   = DIM*shiftidx[iidx];
229
230         /* Load limits for loop over neighbors */
231         j_index_start    = jindex[iidx];
232         j_index_end      = jindex[iidx+1];
233
234         /* Get outer coordinate index */
235         inr              = iinr[iidx];
236         i_coord_offset   = DIM*inr;
237
238         /* Load i particle coords and add shift vector */
239         gmx_mm256_load_shift_and_3rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,
240                                                     &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2);
241
242         fix0             = _mm256_setzero_ps();
243         fiy0             = _mm256_setzero_ps();
244         fiz0             = _mm256_setzero_ps();
245         fix1             = _mm256_setzero_ps();
246         fiy1             = _mm256_setzero_ps();
247         fiz1             = _mm256_setzero_ps();
248         fix2             = _mm256_setzero_ps();
249         fiy2             = _mm256_setzero_ps();
250         fiz2             = _mm256_setzero_ps();
251
252         /* Reset potential sums */
253         velecsum         = _mm256_setzero_ps();
254         vvdwsum          = _mm256_setzero_ps();
255
256         /* Start inner kernel loop */
257         for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
258         {
259
260             /* Get j neighbor index, and coordinate index */
261             jnrA             = jjnr[jidx];
262             jnrB             = jjnr[jidx+1];
263             jnrC             = jjnr[jidx+2];
264             jnrD             = jjnr[jidx+3];
265             jnrE             = jjnr[jidx+4];
266             jnrF             = jjnr[jidx+5];
267             jnrG             = jjnr[jidx+6];
268             jnrH             = jjnr[jidx+7];
269             j_coord_offsetA  = DIM*jnrA;
270             j_coord_offsetB  = DIM*jnrB;
271             j_coord_offsetC  = DIM*jnrC;
272             j_coord_offsetD  = DIM*jnrD;
273             j_coord_offsetE  = DIM*jnrE;
274             j_coord_offsetF  = DIM*jnrF;
275             j_coord_offsetG  = DIM*jnrG;
276             j_coord_offsetH  = DIM*jnrH;
277
278             /* load j atom coordinates */
279             gmx_mm256_load_3rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
280                                                  x+j_coord_offsetC,x+j_coord_offsetD,
281                                                  x+j_coord_offsetE,x+j_coord_offsetF,
282                                                  x+j_coord_offsetG,x+j_coord_offsetH,
283                                               &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
284
285             /* Calculate displacement vector */
286             dx00             = _mm256_sub_ps(ix0,jx0);
287             dy00             = _mm256_sub_ps(iy0,jy0);
288             dz00             = _mm256_sub_ps(iz0,jz0);
289             dx01             = _mm256_sub_ps(ix0,jx1);
290             dy01             = _mm256_sub_ps(iy0,jy1);
291             dz01             = _mm256_sub_ps(iz0,jz1);
292             dx02             = _mm256_sub_ps(ix0,jx2);
293             dy02             = _mm256_sub_ps(iy0,jy2);
294             dz02             = _mm256_sub_ps(iz0,jz2);
295             dx10             = _mm256_sub_ps(ix1,jx0);
296             dy10             = _mm256_sub_ps(iy1,jy0);
297             dz10             = _mm256_sub_ps(iz1,jz0);
298             dx11             = _mm256_sub_ps(ix1,jx1);
299             dy11             = _mm256_sub_ps(iy1,jy1);
300             dz11             = _mm256_sub_ps(iz1,jz1);
301             dx12             = _mm256_sub_ps(ix1,jx2);
302             dy12             = _mm256_sub_ps(iy1,jy2);
303             dz12             = _mm256_sub_ps(iz1,jz2);
304             dx20             = _mm256_sub_ps(ix2,jx0);
305             dy20             = _mm256_sub_ps(iy2,jy0);
306             dz20             = _mm256_sub_ps(iz2,jz0);
307             dx21             = _mm256_sub_ps(ix2,jx1);
308             dy21             = _mm256_sub_ps(iy2,jy1);
309             dz21             = _mm256_sub_ps(iz2,jz1);
310             dx22             = _mm256_sub_ps(ix2,jx2);
311             dy22             = _mm256_sub_ps(iy2,jy2);
312             dz22             = _mm256_sub_ps(iz2,jz2);
313
314             /* Calculate squared distance and things based on it */
315             rsq00            = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
316             rsq01            = gmx_mm256_calc_rsq_ps(dx01,dy01,dz01);
317             rsq02            = gmx_mm256_calc_rsq_ps(dx02,dy02,dz02);
318             rsq10            = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
319             rsq11            = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
320             rsq12            = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
321             rsq20            = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
322             rsq21            = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
323             rsq22            = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
324
325             rinv00           = gmx_mm256_invsqrt_ps(rsq00);
326             rinv01           = gmx_mm256_invsqrt_ps(rsq01);
327             rinv02           = gmx_mm256_invsqrt_ps(rsq02);
328             rinv10           = gmx_mm256_invsqrt_ps(rsq10);
329             rinv11           = gmx_mm256_invsqrt_ps(rsq11);
330             rinv12           = gmx_mm256_invsqrt_ps(rsq12);
331             rinv20           = gmx_mm256_invsqrt_ps(rsq20);
332             rinv21           = gmx_mm256_invsqrt_ps(rsq21);
333             rinv22           = gmx_mm256_invsqrt_ps(rsq22);
334
335             rinvsq00         = _mm256_mul_ps(rinv00,rinv00);
336             rinvsq01         = _mm256_mul_ps(rinv01,rinv01);
337             rinvsq02         = _mm256_mul_ps(rinv02,rinv02);
338             rinvsq10         = _mm256_mul_ps(rinv10,rinv10);
339             rinvsq11         = _mm256_mul_ps(rinv11,rinv11);
340             rinvsq12         = _mm256_mul_ps(rinv12,rinv12);
341             rinvsq20         = _mm256_mul_ps(rinv20,rinv20);
342             rinvsq21         = _mm256_mul_ps(rinv21,rinv21);
343             rinvsq22         = _mm256_mul_ps(rinv22,rinv22);
344
345             fjx0             = _mm256_setzero_ps();
346             fjy0             = _mm256_setzero_ps();
347             fjz0             = _mm256_setzero_ps();
348             fjx1             = _mm256_setzero_ps();
349             fjy1             = _mm256_setzero_ps();
350             fjz1             = _mm256_setzero_ps();
351             fjx2             = _mm256_setzero_ps();
352             fjy2             = _mm256_setzero_ps();
353             fjz2             = _mm256_setzero_ps();
354
355             /**************************
356              * CALCULATE INTERACTIONS *
357              **************************/
358
359             if (gmx_mm256_any_lt(rsq00,rcutoff2))
360             {
361
362             r00              = _mm256_mul_ps(rsq00,rinv00);
363
364             /* EWALD ELECTROSTATICS */
365             
366             /* Analytical PME correction */
367             zeta2            = _mm256_mul_ps(beta2,rsq00);
368             rinv3            = _mm256_mul_ps(rinvsq00,rinv00);
369             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
370             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
371             felec            = _mm256_mul_ps(qq00,felec);
372             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
373             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
374             velec            = _mm256_sub_ps(_mm256_sub_ps(rinv00,sh_ewald),pmecorrV);
375             velec            = _mm256_mul_ps(qq00,velec);
376             
377             /* Analytical LJ-PME */
378             rinvsix          = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
379             ewcljrsq         = _mm256_mul_ps(ewclj2,rsq00);
380             ewclj6           = _mm256_mul_ps(ewclj2,_mm256_mul_ps(ewclj2,ewclj2));
381             exponent         = gmx_simd_exp_r(ewcljrsq);
382             /* poly = exp(-(beta*r)^2) * (1 + (beta*r)^2 + (beta*r)^4 /2) */
383             poly             = _mm256_mul_ps(exponent,_mm256_add_ps(_mm256_sub_ps(one,ewcljrsq),_mm256_mul_ps(_mm256_mul_ps(ewcljrsq,ewcljrsq),one_half)));
384             /* vvdw6 = [C6 - C6grid * (1-poly)]/r6 */
385             vvdw6            = _mm256_mul_ps(_mm256_sub_ps(c6_00,_mm256_mul_ps(c6grid_00,_mm256_sub_ps(one,poly))),rinvsix);
386             vvdw12           = _mm256_mul_ps(c12_00,_mm256_mul_ps(rinvsix,rinvsix));
387             vvdw             = _mm256_sub_ps(_mm256_mul_ps( _mm256_sub_ps(vvdw12 , _mm256_mul_ps(c12_00,_mm256_mul_ps(sh_vdw_invrcut6,sh_vdw_invrcut6))), one_twelfth) ,
388                                           _mm256_mul_ps( _mm256_sub_ps(vvdw6,_mm256_add_ps(_mm256_mul_ps(c6_00,sh_vdw_invrcut6),_mm256_mul_ps(c6grid_00,sh_lj_ewald))),one_sixth));
389             /* fvdw = vvdw12/r - (vvdw6/r + (C6grid * exponent * beta^6)/r) */
390             fvdw             = _mm256_mul_ps(_mm256_sub_ps(vvdw12,_mm256_sub_ps(vvdw6,_mm256_mul_ps(_mm256_mul_ps(c6grid_00,one_sixth),_mm256_mul_ps(exponent,ewclj6)))),rinvsq00);
391
392             cutoff_mask      = _mm256_cmp_ps(rsq00,rcutoff2,_CMP_LT_OQ);
393
394             /* Update potential sum for this i atom from the interaction with this j atom. */
395             velec            = _mm256_and_ps(velec,cutoff_mask);
396             velecsum         = _mm256_add_ps(velecsum,velec);
397             vvdw             = _mm256_and_ps(vvdw,cutoff_mask);
398             vvdwsum          = _mm256_add_ps(vvdwsum,vvdw);
399
400             fscal            = _mm256_add_ps(felec,fvdw);
401
402             fscal            = _mm256_and_ps(fscal,cutoff_mask);
403
404             /* Calculate temporary vectorial force */
405             tx               = _mm256_mul_ps(fscal,dx00);
406             ty               = _mm256_mul_ps(fscal,dy00);
407             tz               = _mm256_mul_ps(fscal,dz00);
408
409             /* Update vectorial force */
410             fix0             = _mm256_add_ps(fix0,tx);
411             fiy0             = _mm256_add_ps(fiy0,ty);
412             fiz0             = _mm256_add_ps(fiz0,tz);
413
414             fjx0             = _mm256_add_ps(fjx0,tx);
415             fjy0             = _mm256_add_ps(fjy0,ty);
416             fjz0             = _mm256_add_ps(fjz0,tz);
417
418             }
419
420             /**************************
421              * CALCULATE INTERACTIONS *
422              **************************/
423
424             if (gmx_mm256_any_lt(rsq01,rcutoff2))
425             {
426
427             r01              = _mm256_mul_ps(rsq01,rinv01);
428
429             /* EWALD ELECTROSTATICS */
430             
431             /* Analytical PME correction */
432             zeta2            = _mm256_mul_ps(beta2,rsq01);
433             rinv3            = _mm256_mul_ps(rinvsq01,rinv01);
434             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
435             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
436             felec            = _mm256_mul_ps(qq01,felec);
437             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
438             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
439             velec            = _mm256_sub_ps(_mm256_sub_ps(rinv01,sh_ewald),pmecorrV);
440             velec            = _mm256_mul_ps(qq01,velec);
441             
442             cutoff_mask      = _mm256_cmp_ps(rsq01,rcutoff2,_CMP_LT_OQ);
443
444             /* Update potential sum for this i atom from the interaction with this j atom. */
445             velec            = _mm256_and_ps(velec,cutoff_mask);
446             velecsum         = _mm256_add_ps(velecsum,velec);
447
448             fscal            = felec;
449
450             fscal            = _mm256_and_ps(fscal,cutoff_mask);
451
452             /* Calculate temporary vectorial force */
453             tx               = _mm256_mul_ps(fscal,dx01);
454             ty               = _mm256_mul_ps(fscal,dy01);
455             tz               = _mm256_mul_ps(fscal,dz01);
456
457             /* Update vectorial force */
458             fix0             = _mm256_add_ps(fix0,tx);
459             fiy0             = _mm256_add_ps(fiy0,ty);
460             fiz0             = _mm256_add_ps(fiz0,tz);
461
462             fjx1             = _mm256_add_ps(fjx1,tx);
463             fjy1             = _mm256_add_ps(fjy1,ty);
464             fjz1             = _mm256_add_ps(fjz1,tz);
465
466             }
467
468             /**************************
469              * CALCULATE INTERACTIONS *
470              **************************/
471
472             if (gmx_mm256_any_lt(rsq02,rcutoff2))
473             {
474
475             r02              = _mm256_mul_ps(rsq02,rinv02);
476
477             /* EWALD ELECTROSTATICS */
478             
479             /* Analytical PME correction */
480             zeta2            = _mm256_mul_ps(beta2,rsq02);
481             rinv3            = _mm256_mul_ps(rinvsq02,rinv02);
482             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
483             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
484             felec            = _mm256_mul_ps(qq02,felec);
485             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
486             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
487             velec            = _mm256_sub_ps(_mm256_sub_ps(rinv02,sh_ewald),pmecorrV);
488             velec            = _mm256_mul_ps(qq02,velec);
489             
490             cutoff_mask      = _mm256_cmp_ps(rsq02,rcutoff2,_CMP_LT_OQ);
491
492             /* Update potential sum for this i atom from the interaction with this j atom. */
493             velec            = _mm256_and_ps(velec,cutoff_mask);
494             velecsum         = _mm256_add_ps(velecsum,velec);
495
496             fscal            = felec;
497
498             fscal            = _mm256_and_ps(fscal,cutoff_mask);
499
500             /* Calculate temporary vectorial force */
501             tx               = _mm256_mul_ps(fscal,dx02);
502             ty               = _mm256_mul_ps(fscal,dy02);
503             tz               = _mm256_mul_ps(fscal,dz02);
504
505             /* Update vectorial force */
506             fix0             = _mm256_add_ps(fix0,tx);
507             fiy0             = _mm256_add_ps(fiy0,ty);
508             fiz0             = _mm256_add_ps(fiz0,tz);
509
510             fjx2             = _mm256_add_ps(fjx2,tx);
511             fjy2             = _mm256_add_ps(fjy2,ty);
512             fjz2             = _mm256_add_ps(fjz2,tz);
513
514             }
515
516             /**************************
517              * CALCULATE INTERACTIONS *
518              **************************/
519
520             if (gmx_mm256_any_lt(rsq10,rcutoff2))
521             {
522
523             r10              = _mm256_mul_ps(rsq10,rinv10);
524
525             /* EWALD ELECTROSTATICS */
526             
527             /* Analytical PME correction */
528             zeta2            = _mm256_mul_ps(beta2,rsq10);
529             rinv3            = _mm256_mul_ps(rinvsq10,rinv10);
530             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
531             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
532             felec            = _mm256_mul_ps(qq10,felec);
533             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
534             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
535             velec            = _mm256_sub_ps(_mm256_sub_ps(rinv10,sh_ewald),pmecorrV);
536             velec            = _mm256_mul_ps(qq10,velec);
537             
538             cutoff_mask      = _mm256_cmp_ps(rsq10,rcutoff2,_CMP_LT_OQ);
539
540             /* Update potential sum for this i atom from the interaction with this j atom. */
541             velec            = _mm256_and_ps(velec,cutoff_mask);
542             velecsum         = _mm256_add_ps(velecsum,velec);
543
544             fscal            = felec;
545
546             fscal            = _mm256_and_ps(fscal,cutoff_mask);
547
548             /* Calculate temporary vectorial force */
549             tx               = _mm256_mul_ps(fscal,dx10);
550             ty               = _mm256_mul_ps(fscal,dy10);
551             tz               = _mm256_mul_ps(fscal,dz10);
552
553             /* Update vectorial force */
554             fix1             = _mm256_add_ps(fix1,tx);
555             fiy1             = _mm256_add_ps(fiy1,ty);
556             fiz1             = _mm256_add_ps(fiz1,tz);
557
558             fjx0             = _mm256_add_ps(fjx0,tx);
559             fjy0             = _mm256_add_ps(fjy0,ty);
560             fjz0             = _mm256_add_ps(fjz0,tz);
561
562             }
563
564             /**************************
565              * CALCULATE INTERACTIONS *
566              **************************/
567
568             if (gmx_mm256_any_lt(rsq11,rcutoff2))
569             {
570
571             r11              = _mm256_mul_ps(rsq11,rinv11);
572
573             /* EWALD ELECTROSTATICS */
574             
575             /* Analytical PME correction */
576             zeta2            = _mm256_mul_ps(beta2,rsq11);
577             rinv3            = _mm256_mul_ps(rinvsq11,rinv11);
578             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
579             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
580             felec            = _mm256_mul_ps(qq11,felec);
581             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
582             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
583             velec            = _mm256_sub_ps(_mm256_sub_ps(rinv11,sh_ewald),pmecorrV);
584             velec            = _mm256_mul_ps(qq11,velec);
585             
586             cutoff_mask      = _mm256_cmp_ps(rsq11,rcutoff2,_CMP_LT_OQ);
587
588             /* Update potential sum for this i atom from the interaction with this j atom. */
589             velec            = _mm256_and_ps(velec,cutoff_mask);
590             velecsum         = _mm256_add_ps(velecsum,velec);
591
592             fscal            = felec;
593
594             fscal            = _mm256_and_ps(fscal,cutoff_mask);
595
596             /* Calculate temporary vectorial force */
597             tx               = _mm256_mul_ps(fscal,dx11);
598             ty               = _mm256_mul_ps(fscal,dy11);
599             tz               = _mm256_mul_ps(fscal,dz11);
600
601             /* Update vectorial force */
602             fix1             = _mm256_add_ps(fix1,tx);
603             fiy1             = _mm256_add_ps(fiy1,ty);
604             fiz1             = _mm256_add_ps(fiz1,tz);
605
606             fjx1             = _mm256_add_ps(fjx1,tx);
607             fjy1             = _mm256_add_ps(fjy1,ty);
608             fjz1             = _mm256_add_ps(fjz1,tz);
609
610             }
611
612             /**************************
613              * CALCULATE INTERACTIONS *
614              **************************/
615
616             if (gmx_mm256_any_lt(rsq12,rcutoff2))
617             {
618
619             r12              = _mm256_mul_ps(rsq12,rinv12);
620
621             /* EWALD ELECTROSTATICS */
622             
623             /* Analytical PME correction */
624             zeta2            = _mm256_mul_ps(beta2,rsq12);
625             rinv3            = _mm256_mul_ps(rinvsq12,rinv12);
626             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
627             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
628             felec            = _mm256_mul_ps(qq12,felec);
629             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
630             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
631             velec            = _mm256_sub_ps(_mm256_sub_ps(rinv12,sh_ewald),pmecorrV);
632             velec            = _mm256_mul_ps(qq12,velec);
633             
634             cutoff_mask      = _mm256_cmp_ps(rsq12,rcutoff2,_CMP_LT_OQ);
635
636             /* Update potential sum for this i atom from the interaction with this j atom. */
637             velec            = _mm256_and_ps(velec,cutoff_mask);
638             velecsum         = _mm256_add_ps(velecsum,velec);
639
640             fscal            = felec;
641
642             fscal            = _mm256_and_ps(fscal,cutoff_mask);
643
644             /* Calculate temporary vectorial force */
645             tx               = _mm256_mul_ps(fscal,dx12);
646             ty               = _mm256_mul_ps(fscal,dy12);
647             tz               = _mm256_mul_ps(fscal,dz12);
648
649             /* Update vectorial force */
650             fix1             = _mm256_add_ps(fix1,tx);
651             fiy1             = _mm256_add_ps(fiy1,ty);
652             fiz1             = _mm256_add_ps(fiz1,tz);
653
654             fjx2             = _mm256_add_ps(fjx2,tx);
655             fjy2             = _mm256_add_ps(fjy2,ty);
656             fjz2             = _mm256_add_ps(fjz2,tz);
657
658             }
659
660             /**************************
661              * CALCULATE INTERACTIONS *
662              **************************/
663
664             if (gmx_mm256_any_lt(rsq20,rcutoff2))
665             {
666
667             r20              = _mm256_mul_ps(rsq20,rinv20);
668
669             /* EWALD ELECTROSTATICS */
670             
671             /* Analytical PME correction */
672             zeta2            = _mm256_mul_ps(beta2,rsq20);
673             rinv3            = _mm256_mul_ps(rinvsq20,rinv20);
674             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
675             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
676             felec            = _mm256_mul_ps(qq20,felec);
677             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
678             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
679             velec            = _mm256_sub_ps(_mm256_sub_ps(rinv20,sh_ewald),pmecorrV);
680             velec            = _mm256_mul_ps(qq20,velec);
681             
682             cutoff_mask      = _mm256_cmp_ps(rsq20,rcutoff2,_CMP_LT_OQ);
683
684             /* Update potential sum for this i atom from the interaction with this j atom. */
685             velec            = _mm256_and_ps(velec,cutoff_mask);
686             velecsum         = _mm256_add_ps(velecsum,velec);
687
688             fscal            = felec;
689
690             fscal            = _mm256_and_ps(fscal,cutoff_mask);
691
692             /* Calculate temporary vectorial force */
693             tx               = _mm256_mul_ps(fscal,dx20);
694             ty               = _mm256_mul_ps(fscal,dy20);
695             tz               = _mm256_mul_ps(fscal,dz20);
696
697             /* Update vectorial force */
698             fix2             = _mm256_add_ps(fix2,tx);
699             fiy2             = _mm256_add_ps(fiy2,ty);
700             fiz2             = _mm256_add_ps(fiz2,tz);
701
702             fjx0             = _mm256_add_ps(fjx0,tx);
703             fjy0             = _mm256_add_ps(fjy0,ty);
704             fjz0             = _mm256_add_ps(fjz0,tz);
705
706             }
707
708             /**************************
709              * CALCULATE INTERACTIONS *
710              **************************/
711
712             if (gmx_mm256_any_lt(rsq21,rcutoff2))
713             {
714
715             r21              = _mm256_mul_ps(rsq21,rinv21);
716
717             /* EWALD ELECTROSTATICS */
718             
719             /* Analytical PME correction */
720             zeta2            = _mm256_mul_ps(beta2,rsq21);
721             rinv3            = _mm256_mul_ps(rinvsq21,rinv21);
722             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
723             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
724             felec            = _mm256_mul_ps(qq21,felec);
725             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
726             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
727             velec            = _mm256_sub_ps(_mm256_sub_ps(rinv21,sh_ewald),pmecorrV);
728             velec            = _mm256_mul_ps(qq21,velec);
729             
730             cutoff_mask      = _mm256_cmp_ps(rsq21,rcutoff2,_CMP_LT_OQ);
731
732             /* Update potential sum for this i atom from the interaction with this j atom. */
733             velec            = _mm256_and_ps(velec,cutoff_mask);
734             velecsum         = _mm256_add_ps(velecsum,velec);
735
736             fscal            = felec;
737
738             fscal            = _mm256_and_ps(fscal,cutoff_mask);
739
740             /* Calculate temporary vectorial force */
741             tx               = _mm256_mul_ps(fscal,dx21);
742             ty               = _mm256_mul_ps(fscal,dy21);
743             tz               = _mm256_mul_ps(fscal,dz21);
744
745             /* Update vectorial force */
746             fix2             = _mm256_add_ps(fix2,tx);
747             fiy2             = _mm256_add_ps(fiy2,ty);
748             fiz2             = _mm256_add_ps(fiz2,tz);
749
750             fjx1             = _mm256_add_ps(fjx1,tx);
751             fjy1             = _mm256_add_ps(fjy1,ty);
752             fjz1             = _mm256_add_ps(fjz1,tz);
753
754             }
755
756             /**************************
757              * CALCULATE INTERACTIONS *
758              **************************/
759
760             if (gmx_mm256_any_lt(rsq22,rcutoff2))
761             {
762
763             r22              = _mm256_mul_ps(rsq22,rinv22);
764
765             /* EWALD ELECTROSTATICS */
766             
767             /* Analytical PME correction */
768             zeta2            = _mm256_mul_ps(beta2,rsq22);
769             rinv3            = _mm256_mul_ps(rinvsq22,rinv22);
770             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
771             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
772             felec            = _mm256_mul_ps(qq22,felec);
773             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
774             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
775             velec            = _mm256_sub_ps(_mm256_sub_ps(rinv22,sh_ewald),pmecorrV);
776             velec            = _mm256_mul_ps(qq22,velec);
777             
778             cutoff_mask      = _mm256_cmp_ps(rsq22,rcutoff2,_CMP_LT_OQ);
779
780             /* Update potential sum for this i atom from the interaction with this j atom. */
781             velec            = _mm256_and_ps(velec,cutoff_mask);
782             velecsum         = _mm256_add_ps(velecsum,velec);
783
784             fscal            = felec;
785
786             fscal            = _mm256_and_ps(fscal,cutoff_mask);
787
788             /* Calculate temporary vectorial force */
789             tx               = _mm256_mul_ps(fscal,dx22);
790             ty               = _mm256_mul_ps(fscal,dy22);
791             tz               = _mm256_mul_ps(fscal,dz22);
792
793             /* Update vectorial force */
794             fix2             = _mm256_add_ps(fix2,tx);
795             fiy2             = _mm256_add_ps(fiy2,ty);
796             fiz2             = _mm256_add_ps(fiz2,tz);
797
798             fjx2             = _mm256_add_ps(fjx2,tx);
799             fjy2             = _mm256_add_ps(fjy2,ty);
800             fjz2             = _mm256_add_ps(fjz2,tz);
801
802             }
803
804             fjptrA             = f+j_coord_offsetA;
805             fjptrB             = f+j_coord_offsetB;
806             fjptrC             = f+j_coord_offsetC;
807             fjptrD             = f+j_coord_offsetD;
808             fjptrE             = f+j_coord_offsetE;
809             fjptrF             = f+j_coord_offsetF;
810             fjptrG             = f+j_coord_offsetG;
811             fjptrH             = f+j_coord_offsetH;
812
813             gmx_mm256_decrement_3rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,
814                                                       fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
815
816             /* Inner loop uses 1017 flops */
817         }
818
819         if(jidx<j_index_end)
820         {
821
822             /* Get j neighbor index, and coordinate index */
823             jnrlistA         = jjnr[jidx];
824             jnrlistB         = jjnr[jidx+1];
825             jnrlistC         = jjnr[jidx+2];
826             jnrlistD         = jjnr[jidx+3];
827             jnrlistE         = jjnr[jidx+4];
828             jnrlistF         = jjnr[jidx+5];
829             jnrlistG         = jjnr[jidx+6];
830             jnrlistH         = jjnr[jidx+7];
831             /* Sign of each element will be negative for non-real atoms.
832              * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
833              * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
834              */
835             dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
836                                             gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
837                                             
838             jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
839             jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
840             jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
841             jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
842             jnrE       = (jnrlistE>=0) ? jnrlistE : 0;
843             jnrF       = (jnrlistF>=0) ? jnrlistF : 0;
844             jnrG       = (jnrlistG>=0) ? jnrlistG : 0;
845             jnrH       = (jnrlistH>=0) ? jnrlistH : 0;
846             j_coord_offsetA  = DIM*jnrA;
847             j_coord_offsetB  = DIM*jnrB;
848             j_coord_offsetC  = DIM*jnrC;
849             j_coord_offsetD  = DIM*jnrD;
850             j_coord_offsetE  = DIM*jnrE;
851             j_coord_offsetF  = DIM*jnrF;
852             j_coord_offsetG  = DIM*jnrG;
853             j_coord_offsetH  = DIM*jnrH;
854
855             /* load j atom coordinates */
856             gmx_mm256_load_3rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
857                                                  x+j_coord_offsetC,x+j_coord_offsetD,
858                                                  x+j_coord_offsetE,x+j_coord_offsetF,
859                                                  x+j_coord_offsetG,x+j_coord_offsetH,
860                                               &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
861
862             /* Calculate displacement vector */
863             dx00             = _mm256_sub_ps(ix0,jx0);
864             dy00             = _mm256_sub_ps(iy0,jy0);
865             dz00             = _mm256_sub_ps(iz0,jz0);
866             dx01             = _mm256_sub_ps(ix0,jx1);
867             dy01             = _mm256_sub_ps(iy0,jy1);
868             dz01             = _mm256_sub_ps(iz0,jz1);
869             dx02             = _mm256_sub_ps(ix0,jx2);
870             dy02             = _mm256_sub_ps(iy0,jy2);
871             dz02             = _mm256_sub_ps(iz0,jz2);
872             dx10             = _mm256_sub_ps(ix1,jx0);
873             dy10             = _mm256_sub_ps(iy1,jy0);
874             dz10             = _mm256_sub_ps(iz1,jz0);
875             dx11             = _mm256_sub_ps(ix1,jx1);
876             dy11             = _mm256_sub_ps(iy1,jy1);
877             dz11             = _mm256_sub_ps(iz1,jz1);
878             dx12             = _mm256_sub_ps(ix1,jx2);
879             dy12             = _mm256_sub_ps(iy1,jy2);
880             dz12             = _mm256_sub_ps(iz1,jz2);
881             dx20             = _mm256_sub_ps(ix2,jx0);
882             dy20             = _mm256_sub_ps(iy2,jy0);
883             dz20             = _mm256_sub_ps(iz2,jz0);
884             dx21             = _mm256_sub_ps(ix2,jx1);
885             dy21             = _mm256_sub_ps(iy2,jy1);
886             dz21             = _mm256_sub_ps(iz2,jz1);
887             dx22             = _mm256_sub_ps(ix2,jx2);
888             dy22             = _mm256_sub_ps(iy2,jy2);
889             dz22             = _mm256_sub_ps(iz2,jz2);
890
891             /* Calculate squared distance and things based on it */
892             rsq00            = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
893             rsq01            = gmx_mm256_calc_rsq_ps(dx01,dy01,dz01);
894             rsq02            = gmx_mm256_calc_rsq_ps(dx02,dy02,dz02);
895             rsq10            = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
896             rsq11            = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
897             rsq12            = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
898             rsq20            = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
899             rsq21            = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
900             rsq22            = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
901
902             rinv00           = gmx_mm256_invsqrt_ps(rsq00);
903             rinv01           = gmx_mm256_invsqrt_ps(rsq01);
904             rinv02           = gmx_mm256_invsqrt_ps(rsq02);
905             rinv10           = gmx_mm256_invsqrt_ps(rsq10);
906             rinv11           = gmx_mm256_invsqrt_ps(rsq11);
907             rinv12           = gmx_mm256_invsqrt_ps(rsq12);
908             rinv20           = gmx_mm256_invsqrt_ps(rsq20);
909             rinv21           = gmx_mm256_invsqrt_ps(rsq21);
910             rinv22           = gmx_mm256_invsqrt_ps(rsq22);
911
912             rinvsq00         = _mm256_mul_ps(rinv00,rinv00);
913             rinvsq01         = _mm256_mul_ps(rinv01,rinv01);
914             rinvsq02         = _mm256_mul_ps(rinv02,rinv02);
915             rinvsq10         = _mm256_mul_ps(rinv10,rinv10);
916             rinvsq11         = _mm256_mul_ps(rinv11,rinv11);
917             rinvsq12         = _mm256_mul_ps(rinv12,rinv12);
918             rinvsq20         = _mm256_mul_ps(rinv20,rinv20);
919             rinvsq21         = _mm256_mul_ps(rinv21,rinv21);
920             rinvsq22         = _mm256_mul_ps(rinv22,rinv22);
921
922             fjx0             = _mm256_setzero_ps();
923             fjy0             = _mm256_setzero_ps();
924             fjz0             = _mm256_setzero_ps();
925             fjx1             = _mm256_setzero_ps();
926             fjy1             = _mm256_setzero_ps();
927             fjz1             = _mm256_setzero_ps();
928             fjx2             = _mm256_setzero_ps();
929             fjy2             = _mm256_setzero_ps();
930             fjz2             = _mm256_setzero_ps();
931
932             /**************************
933              * CALCULATE INTERACTIONS *
934              **************************/
935
936             if (gmx_mm256_any_lt(rsq00,rcutoff2))
937             {
938
939             r00              = _mm256_mul_ps(rsq00,rinv00);
940             r00              = _mm256_andnot_ps(dummy_mask,r00);
941
942             /* EWALD ELECTROSTATICS */
943             
944             /* Analytical PME correction */
945             zeta2            = _mm256_mul_ps(beta2,rsq00);
946             rinv3            = _mm256_mul_ps(rinvsq00,rinv00);
947             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
948             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
949             felec            = _mm256_mul_ps(qq00,felec);
950             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
951             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
952             velec            = _mm256_sub_ps(_mm256_sub_ps(rinv00,sh_ewald),pmecorrV);
953             velec            = _mm256_mul_ps(qq00,velec);
954             
955             /* Analytical LJ-PME */
956             rinvsix          = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
957             ewcljrsq         = _mm256_mul_ps(ewclj2,rsq00);
958             ewclj6           = _mm256_mul_ps(ewclj2,_mm256_mul_ps(ewclj2,ewclj2));
959             exponent         = gmx_simd_exp_r(ewcljrsq);
960             /* poly = exp(-(beta*r)^2) * (1 + (beta*r)^2 + (beta*r)^4 /2) */
961             poly             = _mm256_mul_ps(exponent,_mm256_add_ps(_mm256_sub_ps(one,ewcljrsq),_mm256_mul_ps(_mm256_mul_ps(ewcljrsq,ewcljrsq),one_half)));
962             /* vvdw6 = [C6 - C6grid * (1-poly)]/r6 */
963             vvdw6            = _mm256_mul_ps(_mm256_sub_ps(c6_00,_mm256_mul_ps(c6grid_00,_mm256_sub_ps(one,poly))),rinvsix);
964             vvdw12           = _mm256_mul_ps(c12_00,_mm256_mul_ps(rinvsix,rinvsix));
965             vvdw             = _mm256_sub_ps(_mm256_mul_ps( _mm256_sub_ps(vvdw12 , _mm256_mul_ps(c12_00,_mm256_mul_ps(sh_vdw_invrcut6,sh_vdw_invrcut6))), one_twelfth) ,
966                                           _mm256_mul_ps( _mm256_sub_ps(vvdw6,_mm256_add_ps(_mm256_mul_ps(c6_00,sh_vdw_invrcut6),_mm256_mul_ps(c6grid_00,sh_lj_ewald))),one_sixth));
967             /* fvdw = vvdw12/r - (vvdw6/r + (C6grid * exponent * beta^6)/r) */
968             fvdw             = _mm256_mul_ps(_mm256_sub_ps(vvdw12,_mm256_sub_ps(vvdw6,_mm256_mul_ps(_mm256_mul_ps(c6grid_00,one_sixth),_mm256_mul_ps(exponent,ewclj6)))),rinvsq00);
969
970             cutoff_mask      = _mm256_cmp_ps(rsq00,rcutoff2,_CMP_LT_OQ);
971
972             /* Update potential sum for this i atom from the interaction with this j atom. */
973             velec            = _mm256_and_ps(velec,cutoff_mask);
974             velec            = _mm256_andnot_ps(dummy_mask,velec);
975             velecsum         = _mm256_add_ps(velecsum,velec);
976             vvdw             = _mm256_and_ps(vvdw,cutoff_mask);
977             vvdw             = _mm256_andnot_ps(dummy_mask,vvdw);
978             vvdwsum          = _mm256_add_ps(vvdwsum,vvdw);
979
980             fscal            = _mm256_add_ps(felec,fvdw);
981
982             fscal            = _mm256_and_ps(fscal,cutoff_mask);
983
984             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
985
986             /* Calculate temporary vectorial force */
987             tx               = _mm256_mul_ps(fscal,dx00);
988             ty               = _mm256_mul_ps(fscal,dy00);
989             tz               = _mm256_mul_ps(fscal,dz00);
990
991             /* Update vectorial force */
992             fix0             = _mm256_add_ps(fix0,tx);
993             fiy0             = _mm256_add_ps(fiy0,ty);
994             fiz0             = _mm256_add_ps(fiz0,tz);
995
996             fjx0             = _mm256_add_ps(fjx0,tx);
997             fjy0             = _mm256_add_ps(fjy0,ty);
998             fjz0             = _mm256_add_ps(fjz0,tz);
999
1000             }
1001
1002             /**************************
1003              * CALCULATE INTERACTIONS *
1004              **************************/
1005
1006             if (gmx_mm256_any_lt(rsq01,rcutoff2))
1007             {
1008
1009             r01              = _mm256_mul_ps(rsq01,rinv01);
1010             r01              = _mm256_andnot_ps(dummy_mask,r01);
1011
1012             /* EWALD ELECTROSTATICS */
1013             
1014             /* Analytical PME correction */
1015             zeta2            = _mm256_mul_ps(beta2,rsq01);
1016             rinv3            = _mm256_mul_ps(rinvsq01,rinv01);
1017             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
1018             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1019             felec            = _mm256_mul_ps(qq01,felec);
1020             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
1021             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
1022             velec            = _mm256_sub_ps(_mm256_sub_ps(rinv01,sh_ewald),pmecorrV);
1023             velec            = _mm256_mul_ps(qq01,velec);
1024             
1025             cutoff_mask      = _mm256_cmp_ps(rsq01,rcutoff2,_CMP_LT_OQ);
1026
1027             /* Update potential sum for this i atom from the interaction with this j atom. */
1028             velec            = _mm256_and_ps(velec,cutoff_mask);
1029             velec            = _mm256_andnot_ps(dummy_mask,velec);
1030             velecsum         = _mm256_add_ps(velecsum,velec);
1031
1032             fscal            = felec;
1033
1034             fscal            = _mm256_and_ps(fscal,cutoff_mask);
1035
1036             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
1037
1038             /* Calculate temporary vectorial force */
1039             tx               = _mm256_mul_ps(fscal,dx01);
1040             ty               = _mm256_mul_ps(fscal,dy01);
1041             tz               = _mm256_mul_ps(fscal,dz01);
1042
1043             /* Update vectorial force */
1044             fix0             = _mm256_add_ps(fix0,tx);
1045             fiy0             = _mm256_add_ps(fiy0,ty);
1046             fiz0             = _mm256_add_ps(fiz0,tz);
1047
1048             fjx1             = _mm256_add_ps(fjx1,tx);
1049             fjy1             = _mm256_add_ps(fjy1,ty);
1050             fjz1             = _mm256_add_ps(fjz1,tz);
1051
1052             }
1053
1054             /**************************
1055              * CALCULATE INTERACTIONS *
1056              **************************/
1057
1058             if (gmx_mm256_any_lt(rsq02,rcutoff2))
1059             {
1060
1061             r02              = _mm256_mul_ps(rsq02,rinv02);
1062             r02              = _mm256_andnot_ps(dummy_mask,r02);
1063
1064             /* EWALD ELECTROSTATICS */
1065             
1066             /* Analytical PME correction */
1067             zeta2            = _mm256_mul_ps(beta2,rsq02);
1068             rinv3            = _mm256_mul_ps(rinvsq02,rinv02);
1069             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
1070             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1071             felec            = _mm256_mul_ps(qq02,felec);
1072             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
1073             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
1074             velec            = _mm256_sub_ps(_mm256_sub_ps(rinv02,sh_ewald),pmecorrV);
1075             velec            = _mm256_mul_ps(qq02,velec);
1076             
1077             cutoff_mask      = _mm256_cmp_ps(rsq02,rcutoff2,_CMP_LT_OQ);
1078
1079             /* Update potential sum for this i atom from the interaction with this j atom. */
1080             velec            = _mm256_and_ps(velec,cutoff_mask);
1081             velec            = _mm256_andnot_ps(dummy_mask,velec);
1082             velecsum         = _mm256_add_ps(velecsum,velec);
1083
1084             fscal            = felec;
1085
1086             fscal            = _mm256_and_ps(fscal,cutoff_mask);
1087
1088             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
1089
1090             /* Calculate temporary vectorial force */
1091             tx               = _mm256_mul_ps(fscal,dx02);
1092             ty               = _mm256_mul_ps(fscal,dy02);
1093             tz               = _mm256_mul_ps(fscal,dz02);
1094
1095             /* Update vectorial force */
1096             fix0             = _mm256_add_ps(fix0,tx);
1097             fiy0             = _mm256_add_ps(fiy0,ty);
1098             fiz0             = _mm256_add_ps(fiz0,tz);
1099
1100             fjx2             = _mm256_add_ps(fjx2,tx);
1101             fjy2             = _mm256_add_ps(fjy2,ty);
1102             fjz2             = _mm256_add_ps(fjz2,tz);
1103
1104             }
1105
1106             /**************************
1107              * CALCULATE INTERACTIONS *
1108              **************************/
1109
1110             if (gmx_mm256_any_lt(rsq10,rcutoff2))
1111             {
1112
1113             r10              = _mm256_mul_ps(rsq10,rinv10);
1114             r10              = _mm256_andnot_ps(dummy_mask,r10);
1115
1116             /* EWALD ELECTROSTATICS */
1117             
1118             /* Analytical PME correction */
1119             zeta2            = _mm256_mul_ps(beta2,rsq10);
1120             rinv3            = _mm256_mul_ps(rinvsq10,rinv10);
1121             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
1122             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1123             felec            = _mm256_mul_ps(qq10,felec);
1124             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
1125             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
1126             velec            = _mm256_sub_ps(_mm256_sub_ps(rinv10,sh_ewald),pmecorrV);
1127             velec            = _mm256_mul_ps(qq10,velec);
1128             
1129             cutoff_mask      = _mm256_cmp_ps(rsq10,rcutoff2,_CMP_LT_OQ);
1130
1131             /* Update potential sum for this i atom from the interaction with this j atom. */
1132             velec            = _mm256_and_ps(velec,cutoff_mask);
1133             velec            = _mm256_andnot_ps(dummy_mask,velec);
1134             velecsum         = _mm256_add_ps(velecsum,velec);
1135
1136             fscal            = felec;
1137
1138             fscal            = _mm256_and_ps(fscal,cutoff_mask);
1139
1140             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
1141
1142             /* Calculate temporary vectorial force */
1143             tx               = _mm256_mul_ps(fscal,dx10);
1144             ty               = _mm256_mul_ps(fscal,dy10);
1145             tz               = _mm256_mul_ps(fscal,dz10);
1146
1147             /* Update vectorial force */
1148             fix1             = _mm256_add_ps(fix1,tx);
1149             fiy1             = _mm256_add_ps(fiy1,ty);
1150             fiz1             = _mm256_add_ps(fiz1,tz);
1151
1152             fjx0             = _mm256_add_ps(fjx0,tx);
1153             fjy0             = _mm256_add_ps(fjy0,ty);
1154             fjz0             = _mm256_add_ps(fjz0,tz);
1155
1156             }
1157
1158             /**************************
1159              * CALCULATE INTERACTIONS *
1160              **************************/
1161
1162             if (gmx_mm256_any_lt(rsq11,rcutoff2))
1163             {
1164
1165             r11              = _mm256_mul_ps(rsq11,rinv11);
1166             r11              = _mm256_andnot_ps(dummy_mask,r11);
1167
1168             /* EWALD ELECTROSTATICS */
1169             
1170             /* Analytical PME correction */
1171             zeta2            = _mm256_mul_ps(beta2,rsq11);
1172             rinv3            = _mm256_mul_ps(rinvsq11,rinv11);
1173             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
1174             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1175             felec            = _mm256_mul_ps(qq11,felec);
1176             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
1177             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
1178             velec            = _mm256_sub_ps(_mm256_sub_ps(rinv11,sh_ewald),pmecorrV);
1179             velec            = _mm256_mul_ps(qq11,velec);
1180             
1181             cutoff_mask      = _mm256_cmp_ps(rsq11,rcutoff2,_CMP_LT_OQ);
1182
1183             /* Update potential sum for this i atom from the interaction with this j atom. */
1184             velec            = _mm256_and_ps(velec,cutoff_mask);
1185             velec            = _mm256_andnot_ps(dummy_mask,velec);
1186             velecsum         = _mm256_add_ps(velecsum,velec);
1187
1188             fscal            = felec;
1189
1190             fscal            = _mm256_and_ps(fscal,cutoff_mask);
1191
1192             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
1193
1194             /* Calculate temporary vectorial force */
1195             tx               = _mm256_mul_ps(fscal,dx11);
1196             ty               = _mm256_mul_ps(fscal,dy11);
1197             tz               = _mm256_mul_ps(fscal,dz11);
1198
1199             /* Update vectorial force */
1200             fix1             = _mm256_add_ps(fix1,tx);
1201             fiy1             = _mm256_add_ps(fiy1,ty);
1202             fiz1             = _mm256_add_ps(fiz1,tz);
1203
1204             fjx1             = _mm256_add_ps(fjx1,tx);
1205             fjy1             = _mm256_add_ps(fjy1,ty);
1206             fjz1             = _mm256_add_ps(fjz1,tz);
1207
1208             }
1209
1210             /**************************
1211              * CALCULATE INTERACTIONS *
1212              **************************/
1213
1214             if (gmx_mm256_any_lt(rsq12,rcutoff2))
1215             {
1216
1217             r12              = _mm256_mul_ps(rsq12,rinv12);
1218             r12              = _mm256_andnot_ps(dummy_mask,r12);
1219
1220             /* EWALD ELECTROSTATICS */
1221             
1222             /* Analytical PME correction */
1223             zeta2            = _mm256_mul_ps(beta2,rsq12);
1224             rinv3            = _mm256_mul_ps(rinvsq12,rinv12);
1225             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
1226             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1227             felec            = _mm256_mul_ps(qq12,felec);
1228             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
1229             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
1230             velec            = _mm256_sub_ps(_mm256_sub_ps(rinv12,sh_ewald),pmecorrV);
1231             velec            = _mm256_mul_ps(qq12,velec);
1232             
1233             cutoff_mask      = _mm256_cmp_ps(rsq12,rcutoff2,_CMP_LT_OQ);
1234
1235             /* Update potential sum for this i atom from the interaction with this j atom. */
1236             velec            = _mm256_and_ps(velec,cutoff_mask);
1237             velec            = _mm256_andnot_ps(dummy_mask,velec);
1238             velecsum         = _mm256_add_ps(velecsum,velec);
1239
1240             fscal            = felec;
1241
1242             fscal            = _mm256_and_ps(fscal,cutoff_mask);
1243
1244             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
1245
1246             /* Calculate temporary vectorial force */
1247             tx               = _mm256_mul_ps(fscal,dx12);
1248             ty               = _mm256_mul_ps(fscal,dy12);
1249             tz               = _mm256_mul_ps(fscal,dz12);
1250
1251             /* Update vectorial force */
1252             fix1             = _mm256_add_ps(fix1,tx);
1253             fiy1             = _mm256_add_ps(fiy1,ty);
1254             fiz1             = _mm256_add_ps(fiz1,tz);
1255
1256             fjx2             = _mm256_add_ps(fjx2,tx);
1257             fjy2             = _mm256_add_ps(fjy2,ty);
1258             fjz2             = _mm256_add_ps(fjz2,tz);
1259
1260             }
1261
1262             /**************************
1263              * CALCULATE INTERACTIONS *
1264              **************************/
1265
1266             if (gmx_mm256_any_lt(rsq20,rcutoff2))
1267             {
1268
1269             r20              = _mm256_mul_ps(rsq20,rinv20);
1270             r20              = _mm256_andnot_ps(dummy_mask,r20);
1271
1272             /* EWALD ELECTROSTATICS */
1273             
1274             /* Analytical PME correction */
1275             zeta2            = _mm256_mul_ps(beta2,rsq20);
1276             rinv3            = _mm256_mul_ps(rinvsq20,rinv20);
1277             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
1278             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1279             felec            = _mm256_mul_ps(qq20,felec);
1280             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
1281             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
1282             velec            = _mm256_sub_ps(_mm256_sub_ps(rinv20,sh_ewald),pmecorrV);
1283             velec            = _mm256_mul_ps(qq20,velec);
1284             
1285             cutoff_mask      = _mm256_cmp_ps(rsq20,rcutoff2,_CMP_LT_OQ);
1286
1287             /* Update potential sum for this i atom from the interaction with this j atom. */
1288             velec            = _mm256_and_ps(velec,cutoff_mask);
1289             velec            = _mm256_andnot_ps(dummy_mask,velec);
1290             velecsum         = _mm256_add_ps(velecsum,velec);
1291
1292             fscal            = felec;
1293
1294             fscal            = _mm256_and_ps(fscal,cutoff_mask);
1295
1296             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
1297
1298             /* Calculate temporary vectorial force */
1299             tx               = _mm256_mul_ps(fscal,dx20);
1300             ty               = _mm256_mul_ps(fscal,dy20);
1301             tz               = _mm256_mul_ps(fscal,dz20);
1302
1303             /* Update vectorial force */
1304             fix2             = _mm256_add_ps(fix2,tx);
1305             fiy2             = _mm256_add_ps(fiy2,ty);
1306             fiz2             = _mm256_add_ps(fiz2,tz);
1307
1308             fjx0             = _mm256_add_ps(fjx0,tx);
1309             fjy0             = _mm256_add_ps(fjy0,ty);
1310             fjz0             = _mm256_add_ps(fjz0,tz);
1311
1312             }
1313
1314             /**************************
1315              * CALCULATE INTERACTIONS *
1316              **************************/
1317
1318             if (gmx_mm256_any_lt(rsq21,rcutoff2))
1319             {
1320
1321             r21              = _mm256_mul_ps(rsq21,rinv21);
1322             r21              = _mm256_andnot_ps(dummy_mask,r21);
1323
1324             /* EWALD ELECTROSTATICS */
1325             
1326             /* Analytical PME correction */
1327             zeta2            = _mm256_mul_ps(beta2,rsq21);
1328             rinv3            = _mm256_mul_ps(rinvsq21,rinv21);
1329             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
1330             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1331             felec            = _mm256_mul_ps(qq21,felec);
1332             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
1333             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
1334             velec            = _mm256_sub_ps(_mm256_sub_ps(rinv21,sh_ewald),pmecorrV);
1335             velec            = _mm256_mul_ps(qq21,velec);
1336             
1337             cutoff_mask      = _mm256_cmp_ps(rsq21,rcutoff2,_CMP_LT_OQ);
1338
1339             /* Update potential sum for this i atom from the interaction with this j atom. */
1340             velec            = _mm256_and_ps(velec,cutoff_mask);
1341             velec            = _mm256_andnot_ps(dummy_mask,velec);
1342             velecsum         = _mm256_add_ps(velecsum,velec);
1343
1344             fscal            = felec;
1345
1346             fscal            = _mm256_and_ps(fscal,cutoff_mask);
1347
1348             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
1349
1350             /* Calculate temporary vectorial force */
1351             tx               = _mm256_mul_ps(fscal,dx21);
1352             ty               = _mm256_mul_ps(fscal,dy21);
1353             tz               = _mm256_mul_ps(fscal,dz21);
1354
1355             /* Update vectorial force */
1356             fix2             = _mm256_add_ps(fix2,tx);
1357             fiy2             = _mm256_add_ps(fiy2,ty);
1358             fiz2             = _mm256_add_ps(fiz2,tz);
1359
1360             fjx1             = _mm256_add_ps(fjx1,tx);
1361             fjy1             = _mm256_add_ps(fjy1,ty);
1362             fjz1             = _mm256_add_ps(fjz1,tz);
1363
1364             }
1365
1366             /**************************
1367              * CALCULATE INTERACTIONS *
1368              **************************/
1369
1370             if (gmx_mm256_any_lt(rsq22,rcutoff2))
1371             {
1372
1373             r22              = _mm256_mul_ps(rsq22,rinv22);
1374             r22              = _mm256_andnot_ps(dummy_mask,r22);
1375
1376             /* EWALD ELECTROSTATICS */
1377             
1378             /* Analytical PME correction */
1379             zeta2            = _mm256_mul_ps(beta2,rsq22);
1380             rinv3            = _mm256_mul_ps(rinvsq22,rinv22);
1381             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
1382             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1383             felec            = _mm256_mul_ps(qq22,felec);
1384             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
1385             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
1386             velec            = _mm256_sub_ps(_mm256_sub_ps(rinv22,sh_ewald),pmecorrV);
1387             velec            = _mm256_mul_ps(qq22,velec);
1388             
1389             cutoff_mask      = _mm256_cmp_ps(rsq22,rcutoff2,_CMP_LT_OQ);
1390
1391             /* Update potential sum for this i atom from the interaction with this j atom. */
1392             velec            = _mm256_and_ps(velec,cutoff_mask);
1393             velec            = _mm256_andnot_ps(dummy_mask,velec);
1394             velecsum         = _mm256_add_ps(velecsum,velec);
1395
1396             fscal            = felec;
1397
1398             fscal            = _mm256_and_ps(fscal,cutoff_mask);
1399
1400             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
1401
1402             /* Calculate temporary vectorial force */
1403             tx               = _mm256_mul_ps(fscal,dx22);
1404             ty               = _mm256_mul_ps(fscal,dy22);
1405             tz               = _mm256_mul_ps(fscal,dz22);
1406
1407             /* Update vectorial force */
1408             fix2             = _mm256_add_ps(fix2,tx);
1409             fiy2             = _mm256_add_ps(fiy2,ty);
1410             fiz2             = _mm256_add_ps(fiz2,tz);
1411
1412             fjx2             = _mm256_add_ps(fjx2,tx);
1413             fjy2             = _mm256_add_ps(fjy2,ty);
1414             fjz2             = _mm256_add_ps(fjz2,tz);
1415
1416             }
1417
1418             fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
1419             fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
1420             fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
1421             fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
1422             fjptrE             = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
1423             fjptrF             = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
1424             fjptrG             = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
1425             fjptrH             = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
1426
1427             gmx_mm256_decrement_3rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,
1428                                                       fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
1429
1430             /* Inner loop uses 1026 flops */
1431         }
1432
1433         /* End of innermost loop */
1434
1435         gmx_mm256_update_iforce_3atom_swizzle_ps(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,
1436                                                  f+i_coord_offset,fshift+i_shift_offset);
1437
1438         ggid                        = gid[iidx];
1439         /* Update potential energies */
1440         gmx_mm256_update_1pot_ps(velecsum,kernel_data->energygrp_elec+ggid);
1441         gmx_mm256_update_1pot_ps(vvdwsum,kernel_data->energygrp_vdw+ggid);
1442
1443         /* Increment number of inner iterations */
1444         inneriter                  += j_index_end - j_index_start;
1445
1446         /* Outer loop uses 20 flops */
1447     }
1448
1449     /* Increment number of outer iterations */
1450     outeriter        += nri;
1451
1452     /* Update outer/inner flops */
1453
1454     inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W3W3_VF,outeriter*20 + inneriter*1026);
1455 }
1456 /*
1457  * Gromacs nonbonded kernel:   nb_kernel_ElecEwSh_VdwLJEwSh_GeomW3W3_F_avx_256_single
1458  * Electrostatics interaction: Ewald
1459  * VdW interaction:            LJEwald
1460  * Geometry:                   Water3-Water3
1461  * Calculate force/pot:        Force
1462  */
1463 void
1464 nb_kernel_ElecEwSh_VdwLJEwSh_GeomW3W3_F_avx_256_single
1465                     (t_nblist                    * gmx_restrict       nlist,
1466                      rvec                        * gmx_restrict          xx,
1467                      rvec                        * gmx_restrict          ff,
1468                      t_forcerec                  * gmx_restrict          fr,
1469                      t_mdatoms                   * gmx_restrict     mdatoms,
1470                      nb_kernel_data_t gmx_unused * gmx_restrict kernel_data,
1471                      t_nrnb                      * gmx_restrict        nrnb)
1472 {
1473     /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
1474      * just 0 for non-waters.
1475      * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
1476      * jnr indices corresponding to data put in the four positions in the SIMD register.
1477      */
1478     int              i_shift_offset,i_coord_offset,outeriter,inneriter;
1479     int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
1480     int              jnrA,jnrB,jnrC,jnrD;
1481     int              jnrE,jnrF,jnrG,jnrH;
1482     int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
1483     int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
1484     int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
1485     int              j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
1486     int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
1487     real             rcutoff_scalar;
1488     real             *shiftvec,*fshift,*x,*f;
1489     real             *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
1490     real             scratch[4*DIM];
1491     __m256           tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
1492     real *           vdwioffsetptr0;
1493     real *           vdwgridioffsetptr0;
1494     __m256           ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
1495     real *           vdwioffsetptr1;
1496     real *           vdwgridioffsetptr1;
1497     __m256           ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
1498     real *           vdwioffsetptr2;
1499     real *           vdwgridioffsetptr2;
1500     __m256           ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
1501     int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
1502     __m256           jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
1503     int              vdwjidx1A,vdwjidx1B,vdwjidx1C,vdwjidx1D,vdwjidx1E,vdwjidx1F,vdwjidx1G,vdwjidx1H;
1504     __m256           jx1,jy1,jz1,fjx1,fjy1,fjz1,jq1,isaj1;
1505     int              vdwjidx2A,vdwjidx2B,vdwjidx2C,vdwjidx2D,vdwjidx2E,vdwjidx2F,vdwjidx2G,vdwjidx2H;
1506     __m256           jx2,jy2,jz2,fjx2,fjy2,fjz2,jq2,isaj2;
1507     __m256           dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
1508     __m256           dx01,dy01,dz01,rsq01,rinv01,rinvsq01,r01,qq01,c6_01,c12_01;
1509     __m256           dx02,dy02,dz02,rsq02,rinv02,rinvsq02,r02,qq02,c6_02,c12_02;
1510     __m256           dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
1511     __m256           dx11,dy11,dz11,rsq11,rinv11,rinvsq11,r11,qq11,c6_11,c12_11;
1512     __m256           dx12,dy12,dz12,rsq12,rinv12,rinvsq12,r12,qq12,c6_12,c12_12;
1513     __m256           dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
1514     __m256           dx21,dy21,dz21,rsq21,rinv21,rinvsq21,r21,qq21,c6_21,c12_21;
1515     __m256           dx22,dy22,dz22,rsq22,rinv22,rinvsq22,r22,qq22,c6_22,c12_22;
1516     __m256           velec,felec,velecsum,facel,crf,krf,krf2;
1517     real             *charge;
1518     int              nvdwtype;
1519     __m256           rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
1520     int              *vdwtype;
1521     real             *vdwparam;
1522     __m256           one_sixth   = _mm256_set1_ps(1.0/6.0);
1523     __m256           one_twelfth = _mm256_set1_ps(1.0/12.0);
1524     __m256           c6grid_00;
1525     __m256           c6grid_01;
1526     __m256           c6grid_02;
1527     __m256           c6grid_10;
1528     __m256           c6grid_11;
1529     __m256           c6grid_12;
1530     __m256           c6grid_20;
1531     __m256           c6grid_21;
1532     __m256           c6grid_22;
1533     real             *vdwgridparam;
1534     __m256           ewclj,ewclj2,ewclj6,ewcljrsq,poly,exponent,f6A,f6B,sh_lj_ewald;
1535     __m256           one_half  = _mm256_set1_ps(0.5);
1536     __m256           minus_one = _mm256_set1_ps(-1.0);
1537     __m256i          ewitab;
1538     __m128i          ewitab_lo,ewitab_hi;
1539     __m256           ewtabscale,eweps,sh_ewald,ewrt,ewtabhalfspace,ewtabF,ewtabFn,ewtabD,ewtabV;
1540     __m256           beta,beta2,beta3,zeta2,pmecorrF,pmecorrV,rinv3;
1541     real             *ewtab;
1542     __m256           dummy_mask,cutoff_mask;
1543     __m256           signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
1544     __m256           one     = _mm256_set1_ps(1.0);
1545     __m256           two     = _mm256_set1_ps(2.0);
1546     x                = xx[0];
1547     f                = ff[0];
1548
1549     nri              = nlist->nri;
1550     iinr             = nlist->iinr;
1551     jindex           = nlist->jindex;
1552     jjnr             = nlist->jjnr;
1553     shiftidx         = nlist->shift;
1554     gid              = nlist->gid;
1555     shiftvec         = fr->shift_vec[0];
1556     fshift           = fr->fshift[0];
1557     facel            = _mm256_set1_ps(fr->epsfac);
1558     charge           = mdatoms->chargeA;
1559     nvdwtype         = fr->ntype;
1560     vdwparam         = fr->nbfp;
1561     vdwtype          = mdatoms->typeA;
1562     vdwgridparam     = fr->ljpme_c6grid;
1563     sh_lj_ewald      = _mm256_set1_ps(fr->ic->sh_lj_ewald);
1564     ewclj            = _mm256_set1_ps(fr->ewaldcoeff_lj);
1565     ewclj2           = _mm256_mul_ps(minus_one,_mm256_mul_ps(ewclj,ewclj));
1566
1567     sh_ewald         = _mm256_set1_ps(fr->ic->sh_ewald);
1568     beta             = _mm256_set1_ps(fr->ic->ewaldcoeff_q);
1569     beta2            = _mm256_mul_ps(beta,beta);
1570     beta3            = _mm256_mul_ps(beta,beta2);
1571
1572     ewtab            = fr->ic->tabq_coul_F;
1573     ewtabscale       = _mm256_set1_ps(fr->ic->tabq_scale);
1574     ewtabhalfspace   = _mm256_set1_ps(0.5/fr->ic->tabq_scale);
1575
1576     /* Setup water-specific parameters */
1577     inr              = nlist->iinr[0];
1578     iq0              = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+0]));
1579     iq1              = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+1]));
1580     iq2              = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+2]));
1581     vdwioffsetptr0   = vdwparam+2*nvdwtype*vdwtype[inr+0];
1582     vdwgridioffsetptr0 = vdwgridparam+2*nvdwtype*vdwtype[inr+0];
1583
1584     jq0              = _mm256_set1_ps(charge[inr+0]);
1585     jq1              = _mm256_set1_ps(charge[inr+1]);
1586     jq2              = _mm256_set1_ps(charge[inr+2]);
1587     vdwjidx0A        = 2*vdwtype[inr+0];
1588     qq00             = _mm256_mul_ps(iq0,jq0);
1589     c6_00            = _mm256_set1_ps(vdwioffsetptr0[vdwjidx0A]);
1590     c12_00           = _mm256_set1_ps(vdwioffsetptr0[vdwjidx0A+1]);
1591     c6grid_00        = _mm256_set1_ps(vdwgridioffsetptr0[vdwjidx0A]);
1592     qq01             = _mm256_mul_ps(iq0,jq1);
1593     qq02             = _mm256_mul_ps(iq0,jq2);
1594     qq10             = _mm256_mul_ps(iq1,jq0);
1595     qq11             = _mm256_mul_ps(iq1,jq1);
1596     qq12             = _mm256_mul_ps(iq1,jq2);
1597     qq20             = _mm256_mul_ps(iq2,jq0);
1598     qq21             = _mm256_mul_ps(iq2,jq1);
1599     qq22             = _mm256_mul_ps(iq2,jq2);
1600
1601     /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
1602     rcutoff_scalar   = fr->rcoulomb;
1603     rcutoff          = _mm256_set1_ps(rcutoff_scalar);
1604     rcutoff2         = _mm256_mul_ps(rcutoff,rcutoff);
1605
1606     sh_vdw_invrcut6  = _mm256_set1_ps(fr->ic->sh_invrc6);
1607     rvdw             = _mm256_set1_ps(fr->rvdw);
1608
1609     /* Avoid stupid compiler warnings */
1610     jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
1611     j_coord_offsetA = 0;
1612     j_coord_offsetB = 0;
1613     j_coord_offsetC = 0;
1614     j_coord_offsetD = 0;
1615     j_coord_offsetE = 0;
1616     j_coord_offsetF = 0;
1617     j_coord_offsetG = 0;
1618     j_coord_offsetH = 0;
1619
1620     outeriter        = 0;
1621     inneriter        = 0;
1622
1623     for(iidx=0;iidx<4*DIM;iidx++)
1624     {
1625         scratch[iidx] = 0.0;
1626     }
1627
1628     /* Start outer loop over neighborlists */
1629     for(iidx=0; iidx<nri; iidx++)
1630     {
1631         /* Load shift vector for this list */
1632         i_shift_offset   = DIM*shiftidx[iidx];
1633
1634         /* Load limits for loop over neighbors */
1635         j_index_start    = jindex[iidx];
1636         j_index_end      = jindex[iidx+1];
1637
1638         /* Get outer coordinate index */
1639         inr              = iinr[iidx];
1640         i_coord_offset   = DIM*inr;
1641
1642         /* Load i particle coords and add shift vector */
1643         gmx_mm256_load_shift_and_3rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,
1644                                                     &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2);
1645
1646         fix0             = _mm256_setzero_ps();
1647         fiy0             = _mm256_setzero_ps();
1648         fiz0             = _mm256_setzero_ps();
1649         fix1             = _mm256_setzero_ps();
1650         fiy1             = _mm256_setzero_ps();
1651         fiz1             = _mm256_setzero_ps();
1652         fix2             = _mm256_setzero_ps();
1653         fiy2             = _mm256_setzero_ps();
1654         fiz2             = _mm256_setzero_ps();
1655
1656         /* Start inner kernel loop */
1657         for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
1658         {
1659
1660             /* Get j neighbor index, and coordinate index */
1661             jnrA             = jjnr[jidx];
1662             jnrB             = jjnr[jidx+1];
1663             jnrC             = jjnr[jidx+2];
1664             jnrD             = jjnr[jidx+3];
1665             jnrE             = jjnr[jidx+4];
1666             jnrF             = jjnr[jidx+5];
1667             jnrG             = jjnr[jidx+6];
1668             jnrH             = jjnr[jidx+7];
1669             j_coord_offsetA  = DIM*jnrA;
1670             j_coord_offsetB  = DIM*jnrB;
1671             j_coord_offsetC  = DIM*jnrC;
1672             j_coord_offsetD  = DIM*jnrD;
1673             j_coord_offsetE  = DIM*jnrE;
1674             j_coord_offsetF  = DIM*jnrF;
1675             j_coord_offsetG  = DIM*jnrG;
1676             j_coord_offsetH  = DIM*jnrH;
1677
1678             /* load j atom coordinates */
1679             gmx_mm256_load_3rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
1680                                                  x+j_coord_offsetC,x+j_coord_offsetD,
1681                                                  x+j_coord_offsetE,x+j_coord_offsetF,
1682                                                  x+j_coord_offsetG,x+j_coord_offsetH,
1683                                               &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
1684
1685             /* Calculate displacement vector */
1686             dx00             = _mm256_sub_ps(ix0,jx0);
1687             dy00             = _mm256_sub_ps(iy0,jy0);
1688             dz00             = _mm256_sub_ps(iz0,jz0);
1689             dx01             = _mm256_sub_ps(ix0,jx1);
1690             dy01             = _mm256_sub_ps(iy0,jy1);
1691             dz01             = _mm256_sub_ps(iz0,jz1);
1692             dx02             = _mm256_sub_ps(ix0,jx2);
1693             dy02             = _mm256_sub_ps(iy0,jy2);
1694             dz02             = _mm256_sub_ps(iz0,jz2);
1695             dx10             = _mm256_sub_ps(ix1,jx0);
1696             dy10             = _mm256_sub_ps(iy1,jy0);
1697             dz10             = _mm256_sub_ps(iz1,jz0);
1698             dx11             = _mm256_sub_ps(ix1,jx1);
1699             dy11             = _mm256_sub_ps(iy1,jy1);
1700             dz11             = _mm256_sub_ps(iz1,jz1);
1701             dx12             = _mm256_sub_ps(ix1,jx2);
1702             dy12             = _mm256_sub_ps(iy1,jy2);
1703             dz12             = _mm256_sub_ps(iz1,jz2);
1704             dx20             = _mm256_sub_ps(ix2,jx0);
1705             dy20             = _mm256_sub_ps(iy2,jy0);
1706             dz20             = _mm256_sub_ps(iz2,jz0);
1707             dx21             = _mm256_sub_ps(ix2,jx1);
1708             dy21             = _mm256_sub_ps(iy2,jy1);
1709             dz21             = _mm256_sub_ps(iz2,jz1);
1710             dx22             = _mm256_sub_ps(ix2,jx2);
1711             dy22             = _mm256_sub_ps(iy2,jy2);
1712             dz22             = _mm256_sub_ps(iz2,jz2);
1713
1714             /* Calculate squared distance and things based on it */
1715             rsq00            = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
1716             rsq01            = gmx_mm256_calc_rsq_ps(dx01,dy01,dz01);
1717             rsq02            = gmx_mm256_calc_rsq_ps(dx02,dy02,dz02);
1718             rsq10            = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
1719             rsq11            = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
1720             rsq12            = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
1721             rsq20            = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
1722             rsq21            = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
1723             rsq22            = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
1724
1725             rinv00           = gmx_mm256_invsqrt_ps(rsq00);
1726             rinv01           = gmx_mm256_invsqrt_ps(rsq01);
1727             rinv02           = gmx_mm256_invsqrt_ps(rsq02);
1728             rinv10           = gmx_mm256_invsqrt_ps(rsq10);
1729             rinv11           = gmx_mm256_invsqrt_ps(rsq11);
1730             rinv12           = gmx_mm256_invsqrt_ps(rsq12);
1731             rinv20           = gmx_mm256_invsqrt_ps(rsq20);
1732             rinv21           = gmx_mm256_invsqrt_ps(rsq21);
1733             rinv22           = gmx_mm256_invsqrt_ps(rsq22);
1734
1735             rinvsq00         = _mm256_mul_ps(rinv00,rinv00);
1736             rinvsq01         = _mm256_mul_ps(rinv01,rinv01);
1737             rinvsq02         = _mm256_mul_ps(rinv02,rinv02);
1738             rinvsq10         = _mm256_mul_ps(rinv10,rinv10);
1739             rinvsq11         = _mm256_mul_ps(rinv11,rinv11);
1740             rinvsq12         = _mm256_mul_ps(rinv12,rinv12);
1741             rinvsq20         = _mm256_mul_ps(rinv20,rinv20);
1742             rinvsq21         = _mm256_mul_ps(rinv21,rinv21);
1743             rinvsq22         = _mm256_mul_ps(rinv22,rinv22);
1744
1745             fjx0             = _mm256_setzero_ps();
1746             fjy0             = _mm256_setzero_ps();
1747             fjz0             = _mm256_setzero_ps();
1748             fjx1             = _mm256_setzero_ps();
1749             fjy1             = _mm256_setzero_ps();
1750             fjz1             = _mm256_setzero_ps();
1751             fjx2             = _mm256_setzero_ps();
1752             fjy2             = _mm256_setzero_ps();
1753             fjz2             = _mm256_setzero_ps();
1754
1755             /**************************
1756              * CALCULATE INTERACTIONS *
1757              **************************/
1758
1759             if (gmx_mm256_any_lt(rsq00,rcutoff2))
1760             {
1761
1762             r00              = _mm256_mul_ps(rsq00,rinv00);
1763
1764             /* EWALD ELECTROSTATICS */
1765             
1766             /* Analytical PME correction */
1767             zeta2            = _mm256_mul_ps(beta2,rsq00);
1768             rinv3            = _mm256_mul_ps(rinvsq00,rinv00);
1769             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
1770             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1771             felec            = _mm256_mul_ps(qq00,felec);
1772             
1773             /* Analytical LJ-PME */
1774             rinvsix          = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
1775             ewcljrsq         = _mm256_mul_ps(ewclj2,rsq00);
1776             ewclj6           = _mm256_mul_ps(ewclj2,_mm256_mul_ps(ewclj2,ewclj2));
1777             exponent         = gmx_simd_exp_r(ewcljrsq);
1778             /* poly = exp(-(beta*r)^2) * (1 + (beta*r)^2 + (beta*r)^4 /2) */
1779             poly             = _mm256_mul_ps(exponent,_mm256_add_ps(_mm256_sub_ps(one,ewcljrsq),_mm256_mul_ps(_mm256_mul_ps(ewcljrsq,ewcljrsq),one_half)));
1780             /* f6A = 6 * C6grid * (1 - poly) */
1781             f6A              = _mm256_mul_ps(c6grid_00,_mm256_sub_ps(one,poly));
1782             /* f6B = C6grid * exponent * beta^6 */
1783             f6B              = _mm256_mul_ps(_mm256_mul_ps(c6grid_00,one_sixth),_mm256_mul_ps(exponent,ewclj6));
1784             /* fvdw = 12*C12/r13 - ((6*C6 - f6A)/r6 + f6B)/r */
1785             fvdw              = _mm256_mul_ps(_mm256_add_ps(_mm256_mul_ps(_mm256_sub_ps(_mm256_mul_ps(c12_00,rinvsix),_mm256_sub_ps(c6_00,f6A)),rinvsix),f6B),rinvsq00);
1786
1787             cutoff_mask      = _mm256_cmp_ps(rsq00,rcutoff2,_CMP_LT_OQ);
1788
1789             fscal            = _mm256_add_ps(felec,fvdw);
1790
1791             fscal            = _mm256_and_ps(fscal,cutoff_mask);
1792
1793             /* Calculate temporary vectorial force */
1794             tx               = _mm256_mul_ps(fscal,dx00);
1795             ty               = _mm256_mul_ps(fscal,dy00);
1796             tz               = _mm256_mul_ps(fscal,dz00);
1797
1798             /* Update vectorial force */
1799             fix0             = _mm256_add_ps(fix0,tx);
1800             fiy0             = _mm256_add_ps(fiy0,ty);
1801             fiz0             = _mm256_add_ps(fiz0,tz);
1802
1803             fjx0             = _mm256_add_ps(fjx0,tx);
1804             fjy0             = _mm256_add_ps(fjy0,ty);
1805             fjz0             = _mm256_add_ps(fjz0,tz);
1806
1807             }
1808
1809             /**************************
1810              * CALCULATE INTERACTIONS *
1811              **************************/
1812
1813             if (gmx_mm256_any_lt(rsq01,rcutoff2))
1814             {
1815
1816             r01              = _mm256_mul_ps(rsq01,rinv01);
1817
1818             /* EWALD ELECTROSTATICS */
1819             
1820             /* Analytical PME correction */
1821             zeta2            = _mm256_mul_ps(beta2,rsq01);
1822             rinv3            = _mm256_mul_ps(rinvsq01,rinv01);
1823             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
1824             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1825             felec            = _mm256_mul_ps(qq01,felec);
1826             
1827             cutoff_mask      = _mm256_cmp_ps(rsq01,rcutoff2,_CMP_LT_OQ);
1828
1829             fscal            = felec;
1830
1831             fscal            = _mm256_and_ps(fscal,cutoff_mask);
1832
1833             /* Calculate temporary vectorial force */
1834             tx               = _mm256_mul_ps(fscal,dx01);
1835             ty               = _mm256_mul_ps(fscal,dy01);
1836             tz               = _mm256_mul_ps(fscal,dz01);
1837
1838             /* Update vectorial force */
1839             fix0             = _mm256_add_ps(fix0,tx);
1840             fiy0             = _mm256_add_ps(fiy0,ty);
1841             fiz0             = _mm256_add_ps(fiz0,tz);
1842
1843             fjx1             = _mm256_add_ps(fjx1,tx);
1844             fjy1             = _mm256_add_ps(fjy1,ty);
1845             fjz1             = _mm256_add_ps(fjz1,tz);
1846
1847             }
1848
1849             /**************************
1850              * CALCULATE INTERACTIONS *
1851              **************************/
1852
1853             if (gmx_mm256_any_lt(rsq02,rcutoff2))
1854             {
1855
1856             r02              = _mm256_mul_ps(rsq02,rinv02);
1857
1858             /* EWALD ELECTROSTATICS */
1859             
1860             /* Analytical PME correction */
1861             zeta2            = _mm256_mul_ps(beta2,rsq02);
1862             rinv3            = _mm256_mul_ps(rinvsq02,rinv02);
1863             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
1864             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1865             felec            = _mm256_mul_ps(qq02,felec);
1866             
1867             cutoff_mask      = _mm256_cmp_ps(rsq02,rcutoff2,_CMP_LT_OQ);
1868
1869             fscal            = felec;
1870
1871             fscal            = _mm256_and_ps(fscal,cutoff_mask);
1872
1873             /* Calculate temporary vectorial force */
1874             tx               = _mm256_mul_ps(fscal,dx02);
1875             ty               = _mm256_mul_ps(fscal,dy02);
1876             tz               = _mm256_mul_ps(fscal,dz02);
1877
1878             /* Update vectorial force */
1879             fix0             = _mm256_add_ps(fix0,tx);
1880             fiy0             = _mm256_add_ps(fiy0,ty);
1881             fiz0             = _mm256_add_ps(fiz0,tz);
1882
1883             fjx2             = _mm256_add_ps(fjx2,tx);
1884             fjy2             = _mm256_add_ps(fjy2,ty);
1885             fjz2             = _mm256_add_ps(fjz2,tz);
1886
1887             }
1888
1889             /**************************
1890              * CALCULATE INTERACTIONS *
1891              **************************/
1892
1893             if (gmx_mm256_any_lt(rsq10,rcutoff2))
1894             {
1895
1896             r10              = _mm256_mul_ps(rsq10,rinv10);
1897
1898             /* EWALD ELECTROSTATICS */
1899             
1900             /* Analytical PME correction */
1901             zeta2            = _mm256_mul_ps(beta2,rsq10);
1902             rinv3            = _mm256_mul_ps(rinvsq10,rinv10);
1903             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
1904             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1905             felec            = _mm256_mul_ps(qq10,felec);
1906             
1907             cutoff_mask      = _mm256_cmp_ps(rsq10,rcutoff2,_CMP_LT_OQ);
1908
1909             fscal            = felec;
1910
1911             fscal            = _mm256_and_ps(fscal,cutoff_mask);
1912
1913             /* Calculate temporary vectorial force */
1914             tx               = _mm256_mul_ps(fscal,dx10);
1915             ty               = _mm256_mul_ps(fscal,dy10);
1916             tz               = _mm256_mul_ps(fscal,dz10);
1917
1918             /* Update vectorial force */
1919             fix1             = _mm256_add_ps(fix1,tx);
1920             fiy1             = _mm256_add_ps(fiy1,ty);
1921             fiz1             = _mm256_add_ps(fiz1,tz);
1922
1923             fjx0             = _mm256_add_ps(fjx0,tx);
1924             fjy0             = _mm256_add_ps(fjy0,ty);
1925             fjz0             = _mm256_add_ps(fjz0,tz);
1926
1927             }
1928
1929             /**************************
1930              * CALCULATE INTERACTIONS *
1931              **************************/
1932
1933             if (gmx_mm256_any_lt(rsq11,rcutoff2))
1934             {
1935
1936             r11              = _mm256_mul_ps(rsq11,rinv11);
1937
1938             /* EWALD ELECTROSTATICS */
1939             
1940             /* Analytical PME correction */
1941             zeta2            = _mm256_mul_ps(beta2,rsq11);
1942             rinv3            = _mm256_mul_ps(rinvsq11,rinv11);
1943             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
1944             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1945             felec            = _mm256_mul_ps(qq11,felec);
1946             
1947             cutoff_mask      = _mm256_cmp_ps(rsq11,rcutoff2,_CMP_LT_OQ);
1948
1949             fscal            = felec;
1950
1951             fscal            = _mm256_and_ps(fscal,cutoff_mask);
1952
1953             /* Calculate temporary vectorial force */
1954             tx               = _mm256_mul_ps(fscal,dx11);
1955             ty               = _mm256_mul_ps(fscal,dy11);
1956             tz               = _mm256_mul_ps(fscal,dz11);
1957
1958             /* Update vectorial force */
1959             fix1             = _mm256_add_ps(fix1,tx);
1960             fiy1             = _mm256_add_ps(fiy1,ty);
1961             fiz1             = _mm256_add_ps(fiz1,tz);
1962
1963             fjx1             = _mm256_add_ps(fjx1,tx);
1964             fjy1             = _mm256_add_ps(fjy1,ty);
1965             fjz1             = _mm256_add_ps(fjz1,tz);
1966
1967             }
1968
1969             /**************************
1970              * CALCULATE INTERACTIONS *
1971              **************************/
1972
1973             if (gmx_mm256_any_lt(rsq12,rcutoff2))
1974             {
1975
1976             r12              = _mm256_mul_ps(rsq12,rinv12);
1977
1978             /* EWALD ELECTROSTATICS */
1979             
1980             /* Analytical PME correction */
1981             zeta2            = _mm256_mul_ps(beta2,rsq12);
1982             rinv3            = _mm256_mul_ps(rinvsq12,rinv12);
1983             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
1984             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1985             felec            = _mm256_mul_ps(qq12,felec);
1986             
1987             cutoff_mask      = _mm256_cmp_ps(rsq12,rcutoff2,_CMP_LT_OQ);
1988
1989             fscal            = felec;
1990
1991             fscal            = _mm256_and_ps(fscal,cutoff_mask);
1992
1993             /* Calculate temporary vectorial force */
1994             tx               = _mm256_mul_ps(fscal,dx12);
1995             ty               = _mm256_mul_ps(fscal,dy12);
1996             tz               = _mm256_mul_ps(fscal,dz12);
1997
1998             /* Update vectorial force */
1999             fix1             = _mm256_add_ps(fix1,tx);
2000             fiy1             = _mm256_add_ps(fiy1,ty);
2001             fiz1             = _mm256_add_ps(fiz1,tz);
2002
2003             fjx2             = _mm256_add_ps(fjx2,tx);
2004             fjy2             = _mm256_add_ps(fjy2,ty);
2005             fjz2             = _mm256_add_ps(fjz2,tz);
2006
2007             }
2008
2009             /**************************
2010              * CALCULATE INTERACTIONS *
2011              **************************/
2012
2013             if (gmx_mm256_any_lt(rsq20,rcutoff2))
2014             {
2015
2016             r20              = _mm256_mul_ps(rsq20,rinv20);
2017
2018             /* EWALD ELECTROSTATICS */
2019             
2020             /* Analytical PME correction */
2021             zeta2            = _mm256_mul_ps(beta2,rsq20);
2022             rinv3            = _mm256_mul_ps(rinvsq20,rinv20);
2023             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
2024             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
2025             felec            = _mm256_mul_ps(qq20,felec);
2026             
2027             cutoff_mask      = _mm256_cmp_ps(rsq20,rcutoff2,_CMP_LT_OQ);
2028
2029             fscal            = felec;
2030
2031             fscal            = _mm256_and_ps(fscal,cutoff_mask);
2032
2033             /* Calculate temporary vectorial force */
2034             tx               = _mm256_mul_ps(fscal,dx20);
2035             ty               = _mm256_mul_ps(fscal,dy20);
2036             tz               = _mm256_mul_ps(fscal,dz20);
2037
2038             /* Update vectorial force */
2039             fix2             = _mm256_add_ps(fix2,tx);
2040             fiy2             = _mm256_add_ps(fiy2,ty);
2041             fiz2             = _mm256_add_ps(fiz2,tz);
2042
2043             fjx0             = _mm256_add_ps(fjx0,tx);
2044             fjy0             = _mm256_add_ps(fjy0,ty);
2045             fjz0             = _mm256_add_ps(fjz0,tz);
2046
2047             }
2048
2049             /**************************
2050              * CALCULATE INTERACTIONS *
2051              **************************/
2052
2053             if (gmx_mm256_any_lt(rsq21,rcutoff2))
2054             {
2055
2056             r21              = _mm256_mul_ps(rsq21,rinv21);
2057
2058             /* EWALD ELECTROSTATICS */
2059             
2060             /* Analytical PME correction */
2061             zeta2            = _mm256_mul_ps(beta2,rsq21);
2062             rinv3            = _mm256_mul_ps(rinvsq21,rinv21);
2063             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
2064             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
2065             felec            = _mm256_mul_ps(qq21,felec);
2066             
2067             cutoff_mask      = _mm256_cmp_ps(rsq21,rcutoff2,_CMP_LT_OQ);
2068
2069             fscal            = felec;
2070
2071             fscal            = _mm256_and_ps(fscal,cutoff_mask);
2072
2073             /* Calculate temporary vectorial force */
2074             tx               = _mm256_mul_ps(fscal,dx21);
2075             ty               = _mm256_mul_ps(fscal,dy21);
2076             tz               = _mm256_mul_ps(fscal,dz21);
2077
2078             /* Update vectorial force */
2079             fix2             = _mm256_add_ps(fix2,tx);
2080             fiy2             = _mm256_add_ps(fiy2,ty);
2081             fiz2             = _mm256_add_ps(fiz2,tz);
2082
2083             fjx1             = _mm256_add_ps(fjx1,tx);
2084             fjy1             = _mm256_add_ps(fjy1,ty);
2085             fjz1             = _mm256_add_ps(fjz1,tz);
2086
2087             }
2088
2089             /**************************
2090              * CALCULATE INTERACTIONS *
2091              **************************/
2092
2093             if (gmx_mm256_any_lt(rsq22,rcutoff2))
2094             {
2095
2096             r22              = _mm256_mul_ps(rsq22,rinv22);
2097
2098             /* EWALD ELECTROSTATICS */
2099             
2100             /* Analytical PME correction */
2101             zeta2            = _mm256_mul_ps(beta2,rsq22);
2102             rinv3            = _mm256_mul_ps(rinvsq22,rinv22);
2103             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
2104             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
2105             felec            = _mm256_mul_ps(qq22,felec);
2106             
2107             cutoff_mask      = _mm256_cmp_ps(rsq22,rcutoff2,_CMP_LT_OQ);
2108
2109             fscal            = felec;
2110
2111             fscal            = _mm256_and_ps(fscal,cutoff_mask);
2112
2113             /* Calculate temporary vectorial force */
2114             tx               = _mm256_mul_ps(fscal,dx22);
2115             ty               = _mm256_mul_ps(fscal,dy22);
2116             tz               = _mm256_mul_ps(fscal,dz22);
2117
2118             /* Update vectorial force */
2119             fix2             = _mm256_add_ps(fix2,tx);
2120             fiy2             = _mm256_add_ps(fiy2,ty);
2121             fiz2             = _mm256_add_ps(fiz2,tz);
2122
2123             fjx2             = _mm256_add_ps(fjx2,tx);
2124             fjy2             = _mm256_add_ps(fjy2,ty);
2125             fjz2             = _mm256_add_ps(fjz2,tz);
2126
2127             }
2128
2129             fjptrA             = f+j_coord_offsetA;
2130             fjptrB             = f+j_coord_offsetB;
2131             fjptrC             = f+j_coord_offsetC;
2132             fjptrD             = f+j_coord_offsetD;
2133             fjptrE             = f+j_coord_offsetE;
2134             fjptrF             = f+j_coord_offsetF;
2135             fjptrG             = f+j_coord_offsetG;
2136             fjptrH             = f+j_coord_offsetH;
2137
2138             gmx_mm256_decrement_3rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,
2139                                                       fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
2140
2141             /* Inner loop uses 554 flops */
2142         }
2143
2144         if(jidx<j_index_end)
2145         {
2146
2147             /* Get j neighbor index, and coordinate index */
2148             jnrlistA         = jjnr[jidx];
2149             jnrlistB         = jjnr[jidx+1];
2150             jnrlistC         = jjnr[jidx+2];
2151             jnrlistD         = jjnr[jidx+3];
2152             jnrlistE         = jjnr[jidx+4];
2153             jnrlistF         = jjnr[jidx+5];
2154             jnrlistG         = jjnr[jidx+6];
2155             jnrlistH         = jjnr[jidx+7];
2156             /* Sign of each element will be negative for non-real atoms.
2157              * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
2158              * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
2159              */
2160             dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
2161                                             gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
2162                                             
2163             jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
2164             jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
2165             jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
2166             jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
2167             jnrE       = (jnrlistE>=0) ? jnrlistE : 0;
2168             jnrF       = (jnrlistF>=0) ? jnrlistF : 0;
2169             jnrG       = (jnrlistG>=0) ? jnrlistG : 0;
2170             jnrH       = (jnrlistH>=0) ? jnrlistH : 0;
2171             j_coord_offsetA  = DIM*jnrA;
2172             j_coord_offsetB  = DIM*jnrB;
2173             j_coord_offsetC  = DIM*jnrC;
2174             j_coord_offsetD  = DIM*jnrD;
2175             j_coord_offsetE  = DIM*jnrE;
2176             j_coord_offsetF  = DIM*jnrF;
2177             j_coord_offsetG  = DIM*jnrG;
2178             j_coord_offsetH  = DIM*jnrH;
2179
2180             /* load j atom coordinates */
2181             gmx_mm256_load_3rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
2182                                                  x+j_coord_offsetC,x+j_coord_offsetD,
2183                                                  x+j_coord_offsetE,x+j_coord_offsetF,
2184                                                  x+j_coord_offsetG,x+j_coord_offsetH,
2185                                               &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
2186
2187             /* Calculate displacement vector */
2188             dx00             = _mm256_sub_ps(ix0,jx0);
2189             dy00             = _mm256_sub_ps(iy0,jy0);
2190             dz00             = _mm256_sub_ps(iz0,jz0);
2191             dx01             = _mm256_sub_ps(ix0,jx1);
2192             dy01             = _mm256_sub_ps(iy0,jy1);
2193             dz01             = _mm256_sub_ps(iz0,jz1);
2194             dx02             = _mm256_sub_ps(ix0,jx2);
2195             dy02             = _mm256_sub_ps(iy0,jy2);
2196             dz02             = _mm256_sub_ps(iz0,jz2);
2197             dx10             = _mm256_sub_ps(ix1,jx0);
2198             dy10             = _mm256_sub_ps(iy1,jy0);
2199             dz10             = _mm256_sub_ps(iz1,jz0);
2200             dx11             = _mm256_sub_ps(ix1,jx1);
2201             dy11             = _mm256_sub_ps(iy1,jy1);
2202             dz11             = _mm256_sub_ps(iz1,jz1);
2203             dx12             = _mm256_sub_ps(ix1,jx2);
2204             dy12             = _mm256_sub_ps(iy1,jy2);
2205             dz12             = _mm256_sub_ps(iz1,jz2);
2206             dx20             = _mm256_sub_ps(ix2,jx0);
2207             dy20             = _mm256_sub_ps(iy2,jy0);
2208             dz20             = _mm256_sub_ps(iz2,jz0);
2209             dx21             = _mm256_sub_ps(ix2,jx1);
2210             dy21             = _mm256_sub_ps(iy2,jy1);
2211             dz21             = _mm256_sub_ps(iz2,jz1);
2212             dx22             = _mm256_sub_ps(ix2,jx2);
2213             dy22             = _mm256_sub_ps(iy2,jy2);
2214             dz22             = _mm256_sub_ps(iz2,jz2);
2215
2216             /* Calculate squared distance and things based on it */
2217             rsq00            = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
2218             rsq01            = gmx_mm256_calc_rsq_ps(dx01,dy01,dz01);
2219             rsq02            = gmx_mm256_calc_rsq_ps(dx02,dy02,dz02);
2220             rsq10            = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
2221             rsq11            = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
2222             rsq12            = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
2223             rsq20            = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
2224             rsq21            = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
2225             rsq22            = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
2226
2227             rinv00           = gmx_mm256_invsqrt_ps(rsq00);
2228             rinv01           = gmx_mm256_invsqrt_ps(rsq01);
2229             rinv02           = gmx_mm256_invsqrt_ps(rsq02);
2230             rinv10           = gmx_mm256_invsqrt_ps(rsq10);
2231             rinv11           = gmx_mm256_invsqrt_ps(rsq11);
2232             rinv12           = gmx_mm256_invsqrt_ps(rsq12);
2233             rinv20           = gmx_mm256_invsqrt_ps(rsq20);
2234             rinv21           = gmx_mm256_invsqrt_ps(rsq21);
2235             rinv22           = gmx_mm256_invsqrt_ps(rsq22);
2236
2237             rinvsq00         = _mm256_mul_ps(rinv00,rinv00);
2238             rinvsq01         = _mm256_mul_ps(rinv01,rinv01);
2239             rinvsq02         = _mm256_mul_ps(rinv02,rinv02);
2240             rinvsq10         = _mm256_mul_ps(rinv10,rinv10);
2241             rinvsq11         = _mm256_mul_ps(rinv11,rinv11);
2242             rinvsq12         = _mm256_mul_ps(rinv12,rinv12);
2243             rinvsq20         = _mm256_mul_ps(rinv20,rinv20);
2244             rinvsq21         = _mm256_mul_ps(rinv21,rinv21);
2245             rinvsq22         = _mm256_mul_ps(rinv22,rinv22);
2246
2247             fjx0             = _mm256_setzero_ps();
2248             fjy0             = _mm256_setzero_ps();
2249             fjz0             = _mm256_setzero_ps();
2250             fjx1             = _mm256_setzero_ps();
2251             fjy1             = _mm256_setzero_ps();
2252             fjz1             = _mm256_setzero_ps();
2253             fjx2             = _mm256_setzero_ps();
2254             fjy2             = _mm256_setzero_ps();
2255             fjz2             = _mm256_setzero_ps();
2256
2257             /**************************
2258              * CALCULATE INTERACTIONS *
2259              **************************/
2260
2261             if (gmx_mm256_any_lt(rsq00,rcutoff2))
2262             {
2263
2264             r00              = _mm256_mul_ps(rsq00,rinv00);
2265             r00              = _mm256_andnot_ps(dummy_mask,r00);
2266
2267             /* EWALD ELECTROSTATICS */
2268             
2269             /* Analytical PME correction */
2270             zeta2            = _mm256_mul_ps(beta2,rsq00);
2271             rinv3            = _mm256_mul_ps(rinvsq00,rinv00);
2272             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
2273             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
2274             felec            = _mm256_mul_ps(qq00,felec);
2275             
2276             /* Analytical LJ-PME */
2277             rinvsix          = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
2278             ewcljrsq         = _mm256_mul_ps(ewclj2,rsq00);
2279             ewclj6           = _mm256_mul_ps(ewclj2,_mm256_mul_ps(ewclj2,ewclj2));
2280             exponent         = gmx_simd_exp_r(ewcljrsq);
2281             /* poly = exp(-(beta*r)^2) * (1 + (beta*r)^2 + (beta*r)^4 /2) */
2282             poly             = _mm256_mul_ps(exponent,_mm256_add_ps(_mm256_sub_ps(one,ewcljrsq),_mm256_mul_ps(_mm256_mul_ps(ewcljrsq,ewcljrsq),one_half)));
2283             /* f6A = 6 * C6grid * (1 - poly) */
2284             f6A              = _mm256_mul_ps(c6grid_00,_mm256_sub_ps(one,poly));
2285             /* f6B = C6grid * exponent * beta^6 */
2286             f6B              = _mm256_mul_ps(_mm256_mul_ps(c6grid_00,one_sixth),_mm256_mul_ps(exponent,ewclj6));
2287             /* fvdw = 12*C12/r13 - ((6*C6 - f6A)/r6 + f6B)/r */
2288             fvdw              = _mm256_mul_ps(_mm256_add_ps(_mm256_mul_ps(_mm256_sub_ps(_mm256_mul_ps(c12_00,rinvsix),_mm256_sub_ps(c6_00,f6A)),rinvsix),f6B),rinvsq00);
2289
2290             cutoff_mask      = _mm256_cmp_ps(rsq00,rcutoff2,_CMP_LT_OQ);
2291
2292             fscal            = _mm256_add_ps(felec,fvdw);
2293
2294             fscal            = _mm256_and_ps(fscal,cutoff_mask);
2295
2296             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
2297
2298             /* Calculate temporary vectorial force */
2299             tx               = _mm256_mul_ps(fscal,dx00);
2300             ty               = _mm256_mul_ps(fscal,dy00);
2301             tz               = _mm256_mul_ps(fscal,dz00);
2302
2303             /* Update vectorial force */
2304             fix0             = _mm256_add_ps(fix0,tx);
2305             fiy0             = _mm256_add_ps(fiy0,ty);
2306             fiz0             = _mm256_add_ps(fiz0,tz);
2307
2308             fjx0             = _mm256_add_ps(fjx0,tx);
2309             fjy0             = _mm256_add_ps(fjy0,ty);
2310             fjz0             = _mm256_add_ps(fjz0,tz);
2311
2312             }
2313
2314             /**************************
2315              * CALCULATE INTERACTIONS *
2316              **************************/
2317
2318             if (gmx_mm256_any_lt(rsq01,rcutoff2))
2319             {
2320
2321             r01              = _mm256_mul_ps(rsq01,rinv01);
2322             r01              = _mm256_andnot_ps(dummy_mask,r01);
2323
2324             /* EWALD ELECTROSTATICS */
2325             
2326             /* Analytical PME correction */
2327             zeta2            = _mm256_mul_ps(beta2,rsq01);
2328             rinv3            = _mm256_mul_ps(rinvsq01,rinv01);
2329             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
2330             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
2331             felec            = _mm256_mul_ps(qq01,felec);
2332             
2333             cutoff_mask      = _mm256_cmp_ps(rsq01,rcutoff2,_CMP_LT_OQ);
2334
2335             fscal            = felec;
2336
2337             fscal            = _mm256_and_ps(fscal,cutoff_mask);
2338
2339             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
2340
2341             /* Calculate temporary vectorial force */
2342             tx               = _mm256_mul_ps(fscal,dx01);
2343             ty               = _mm256_mul_ps(fscal,dy01);
2344             tz               = _mm256_mul_ps(fscal,dz01);
2345
2346             /* Update vectorial force */
2347             fix0             = _mm256_add_ps(fix0,tx);
2348             fiy0             = _mm256_add_ps(fiy0,ty);
2349             fiz0             = _mm256_add_ps(fiz0,tz);
2350
2351             fjx1             = _mm256_add_ps(fjx1,tx);
2352             fjy1             = _mm256_add_ps(fjy1,ty);
2353             fjz1             = _mm256_add_ps(fjz1,tz);
2354
2355             }
2356
2357             /**************************
2358              * CALCULATE INTERACTIONS *
2359              **************************/
2360
2361             if (gmx_mm256_any_lt(rsq02,rcutoff2))
2362             {
2363
2364             r02              = _mm256_mul_ps(rsq02,rinv02);
2365             r02              = _mm256_andnot_ps(dummy_mask,r02);
2366
2367             /* EWALD ELECTROSTATICS */
2368             
2369             /* Analytical PME correction */
2370             zeta2            = _mm256_mul_ps(beta2,rsq02);
2371             rinv3            = _mm256_mul_ps(rinvsq02,rinv02);
2372             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
2373             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
2374             felec            = _mm256_mul_ps(qq02,felec);
2375             
2376             cutoff_mask      = _mm256_cmp_ps(rsq02,rcutoff2,_CMP_LT_OQ);
2377
2378             fscal            = felec;
2379
2380             fscal            = _mm256_and_ps(fscal,cutoff_mask);
2381
2382             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
2383
2384             /* Calculate temporary vectorial force */
2385             tx               = _mm256_mul_ps(fscal,dx02);
2386             ty               = _mm256_mul_ps(fscal,dy02);
2387             tz               = _mm256_mul_ps(fscal,dz02);
2388
2389             /* Update vectorial force */
2390             fix0             = _mm256_add_ps(fix0,tx);
2391             fiy0             = _mm256_add_ps(fiy0,ty);
2392             fiz0             = _mm256_add_ps(fiz0,tz);
2393
2394             fjx2             = _mm256_add_ps(fjx2,tx);
2395             fjy2             = _mm256_add_ps(fjy2,ty);
2396             fjz2             = _mm256_add_ps(fjz2,tz);
2397
2398             }
2399
2400             /**************************
2401              * CALCULATE INTERACTIONS *
2402              **************************/
2403
2404             if (gmx_mm256_any_lt(rsq10,rcutoff2))
2405             {
2406
2407             r10              = _mm256_mul_ps(rsq10,rinv10);
2408             r10              = _mm256_andnot_ps(dummy_mask,r10);
2409
2410             /* EWALD ELECTROSTATICS */
2411             
2412             /* Analytical PME correction */
2413             zeta2            = _mm256_mul_ps(beta2,rsq10);
2414             rinv3            = _mm256_mul_ps(rinvsq10,rinv10);
2415             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
2416             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
2417             felec            = _mm256_mul_ps(qq10,felec);
2418             
2419             cutoff_mask      = _mm256_cmp_ps(rsq10,rcutoff2,_CMP_LT_OQ);
2420
2421             fscal            = felec;
2422
2423             fscal            = _mm256_and_ps(fscal,cutoff_mask);
2424
2425             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
2426
2427             /* Calculate temporary vectorial force */
2428             tx               = _mm256_mul_ps(fscal,dx10);
2429             ty               = _mm256_mul_ps(fscal,dy10);
2430             tz               = _mm256_mul_ps(fscal,dz10);
2431
2432             /* Update vectorial force */
2433             fix1             = _mm256_add_ps(fix1,tx);
2434             fiy1             = _mm256_add_ps(fiy1,ty);
2435             fiz1             = _mm256_add_ps(fiz1,tz);
2436
2437             fjx0             = _mm256_add_ps(fjx0,tx);
2438             fjy0             = _mm256_add_ps(fjy0,ty);
2439             fjz0             = _mm256_add_ps(fjz0,tz);
2440
2441             }
2442
2443             /**************************
2444              * CALCULATE INTERACTIONS *
2445              **************************/
2446
2447             if (gmx_mm256_any_lt(rsq11,rcutoff2))
2448             {
2449
2450             r11              = _mm256_mul_ps(rsq11,rinv11);
2451             r11              = _mm256_andnot_ps(dummy_mask,r11);
2452
2453             /* EWALD ELECTROSTATICS */
2454             
2455             /* Analytical PME correction */
2456             zeta2            = _mm256_mul_ps(beta2,rsq11);
2457             rinv3            = _mm256_mul_ps(rinvsq11,rinv11);
2458             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
2459             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
2460             felec            = _mm256_mul_ps(qq11,felec);
2461             
2462             cutoff_mask      = _mm256_cmp_ps(rsq11,rcutoff2,_CMP_LT_OQ);
2463
2464             fscal            = felec;
2465
2466             fscal            = _mm256_and_ps(fscal,cutoff_mask);
2467
2468             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
2469
2470             /* Calculate temporary vectorial force */
2471             tx               = _mm256_mul_ps(fscal,dx11);
2472             ty               = _mm256_mul_ps(fscal,dy11);
2473             tz               = _mm256_mul_ps(fscal,dz11);
2474
2475             /* Update vectorial force */
2476             fix1             = _mm256_add_ps(fix1,tx);
2477             fiy1             = _mm256_add_ps(fiy1,ty);
2478             fiz1             = _mm256_add_ps(fiz1,tz);
2479
2480             fjx1             = _mm256_add_ps(fjx1,tx);
2481             fjy1             = _mm256_add_ps(fjy1,ty);
2482             fjz1             = _mm256_add_ps(fjz1,tz);
2483
2484             }
2485
2486             /**************************
2487              * CALCULATE INTERACTIONS *
2488              **************************/
2489
2490             if (gmx_mm256_any_lt(rsq12,rcutoff2))
2491             {
2492
2493             r12              = _mm256_mul_ps(rsq12,rinv12);
2494             r12              = _mm256_andnot_ps(dummy_mask,r12);
2495
2496             /* EWALD ELECTROSTATICS */
2497             
2498             /* Analytical PME correction */
2499             zeta2            = _mm256_mul_ps(beta2,rsq12);
2500             rinv3            = _mm256_mul_ps(rinvsq12,rinv12);
2501             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
2502             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
2503             felec            = _mm256_mul_ps(qq12,felec);
2504             
2505             cutoff_mask      = _mm256_cmp_ps(rsq12,rcutoff2,_CMP_LT_OQ);
2506
2507             fscal            = felec;
2508
2509             fscal            = _mm256_and_ps(fscal,cutoff_mask);
2510
2511             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
2512
2513             /* Calculate temporary vectorial force */
2514             tx               = _mm256_mul_ps(fscal,dx12);
2515             ty               = _mm256_mul_ps(fscal,dy12);
2516             tz               = _mm256_mul_ps(fscal,dz12);
2517
2518             /* Update vectorial force */
2519             fix1             = _mm256_add_ps(fix1,tx);
2520             fiy1             = _mm256_add_ps(fiy1,ty);
2521             fiz1             = _mm256_add_ps(fiz1,tz);
2522
2523             fjx2             = _mm256_add_ps(fjx2,tx);
2524             fjy2             = _mm256_add_ps(fjy2,ty);
2525             fjz2             = _mm256_add_ps(fjz2,tz);
2526
2527             }
2528
2529             /**************************
2530              * CALCULATE INTERACTIONS *
2531              **************************/
2532
2533             if (gmx_mm256_any_lt(rsq20,rcutoff2))
2534             {
2535
2536             r20              = _mm256_mul_ps(rsq20,rinv20);
2537             r20              = _mm256_andnot_ps(dummy_mask,r20);
2538
2539             /* EWALD ELECTROSTATICS */
2540             
2541             /* Analytical PME correction */
2542             zeta2            = _mm256_mul_ps(beta2,rsq20);
2543             rinv3            = _mm256_mul_ps(rinvsq20,rinv20);
2544             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
2545             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
2546             felec            = _mm256_mul_ps(qq20,felec);
2547             
2548             cutoff_mask      = _mm256_cmp_ps(rsq20,rcutoff2,_CMP_LT_OQ);
2549
2550             fscal            = felec;
2551
2552             fscal            = _mm256_and_ps(fscal,cutoff_mask);
2553
2554             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
2555
2556             /* Calculate temporary vectorial force */
2557             tx               = _mm256_mul_ps(fscal,dx20);
2558             ty               = _mm256_mul_ps(fscal,dy20);
2559             tz               = _mm256_mul_ps(fscal,dz20);
2560
2561             /* Update vectorial force */
2562             fix2             = _mm256_add_ps(fix2,tx);
2563             fiy2             = _mm256_add_ps(fiy2,ty);
2564             fiz2             = _mm256_add_ps(fiz2,tz);
2565
2566             fjx0             = _mm256_add_ps(fjx0,tx);
2567             fjy0             = _mm256_add_ps(fjy0,ty);
2568             fjz0             = _mm256_add_ps(fjz0,tz);
2569
2570             }
2571
2572             /**************************
2573              * CALCULATE INTERACTIONS *
2574              **************************/
2575
2576             if (gmx_mm256_any_lt(rsq21,rcutoff2))
2577             {
2578
2579             r21              = _mm256_mul_ps(rsq21,rinv21);
2580             r21              = _mm256_andnot_ps(dummy_mask,r21);
2581
2582             /* EWALD ELECTROSTATICS */
2583             
2584             /* Analytical PME correction */
2585             zeta2            = _mm256_mul_ps(beta2,rsq21);
2586             rinv3            = _mm256_mul_ps(rinvsq21,rinv21);
2587             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
2588             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
2589             felec            = _mm256_mul_ps(qq21,felec);
2590             
2591             cutoff_mask      = _mm256_cmp_ps(rsq21,rcutoff2,_CMP_LT_OQ);
2592
2593             fscal            = felec;
2594
2595             fscal            = _mm256_and_ps(fscal,cutoff_mask);
2596
2597             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
2598
2599             /* Calculate temporary vectorial force */
2600             tx               = _mm256_mul_ps(fscal,dx21);
2601             ty               = _mm256_mul_ps(fscal,dy21);
2602             tz               = _mm256_mul_ps(fscal,dz21);
2603
2604             /* Update vectorial force */
2605             fix2             = _mm256_add_ps(fix2,tx);
2606             fiy2             = _mm256_add_ps(fiy2,ty);
2607             fiz2             = _mm256_add_ps(fiz2,tz);
2608
2609             fjx1             = _mm256_add_ps(fjx1,tx);
2610             fjy1             = _mm256_add_ps(fjy1,ty);
2611             fjz1             = _mm256_add_ps(fjz1,tz);
2612
2613             }
2614
2615             /**************************
2616              * CALCULATE INTERACTIONS *
2617              **************************/
2618
2619             if (gmx_mm256_any_lt(rsq22,rcutoff2))
2620             {
2621
2622             r22              = _mm256_mul_ps(rsq22,rinv22);
2623             r22              = _mm256_andnot_ps(dummy_mask,r22);
2624
2625             /* EWALD ELECTROSTATICS */
2626             
2627             /* Analytical PME correction */
2628             zeta2            = _mm256_mul_ps(beta2,rsq22);
2629             rinv3            = _mm256_mul_ps(rinvsq22,rinv22);
2630             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
2631             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
2632             felec            = _mm256_mul_ps(qq22,felec);
2633             
2634             cutoff_mask      = _mm256_cmp_ps(rsq22,rcutoff2,_CMP_LT_OQ);
2635
2636             fscal            = felec;
2637
2638             fscal            = _mm256_and_ps(fscal,cutoff_mask);
2639
2640             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
2641
2642             /* Calculate temporary vectorial force */
2643             tx               = _mm256_mul_ps(fscal,dx22);
2644             ty               = _mm256_mul_ps(fscal,dy22);
2645             tz               = _mm256_mul_ps(fscal,dz22);
2646
2647             /* Update vectorial force */
2648             fix2             = _mm256_add_ps(fix2,tx);
2649             fiy2             = _mm256_add_ps(fiy2,ty);
2650             fiz2             = _mm256_add_ps(fiz2,tz);
2651
2652             fjx2             = _mm256_add_ps(fjx2,tx);
2653             fjy2             = _mm256_add_ps(fjy2,ty);
2654             fjz2             = _mm256_add_ps(fjz2,tz);
2655
2656             }
2657
2658             fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
2659             fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
2660             fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
2661             fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
2662             fjptrE             = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
2663             fjptrF             = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
2664             fjptrG             = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
2665             fjptrH             = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
2666
2667             gmx_mm256_decrement_3rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,
2668                                                       fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
2669
2670             /* Inner loop uses 563 flops */
2671         }
2672
2673         /* End of innermost loop */
2674
2675         gmx_mm256_update_iforce_3atom_swizzle_ps(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,
2676                                                  f+i_coord_offset,fshift+i_shift_offset);
2677
2678         /* Increment number of inner iterations */
2679         inneriter                  += j_index_end - j_index_start;
2680
2681         /* Outer loop uses 18 flops */
2682     }
2683
2684     /* Increment number of outer iterations */
2685     outeriter        += nri;
2686
2687     /* Update outer/inner flops */
2688
2689     inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W3W3_F,outeriter*18 + inneriter*563);
2690 }