Added option to gmx nmeig to print ZPE.
[alexxy/gromacs.git] / src / gromacs / gmxlib / nonbonded / nb_kernel_avx_256_single / nb_kernel_ElecEwSh_VdwLJSh_GeomW4W4_avx_256_single.c
1 /*
2  * This file is part of the GROMACS molecular simulation package.
3  *
4  * Copyright (c) 2012,2013,2014,2015,2017, by the GROMACS development team, led by
5  * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6  * and including many others, as listed in the AUTHORS file in the
7  * top-level source directory and at http://www.gromacs.org.
8  *
9  * GROMACS is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU Lesser General Public License
11  * as published by the Free Software Foundation; either version 2.1
12  * of the License, or (at your option) any later version.
13  *
14  * GROMACS is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
17  * Lesser General Public License for more details.
18  *
19  * You should have received a copy of the GNU Lesser General Public
20  * License along with GROMACS; if not, see
21  * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA.
23  *
24  * If you want to redistribute modifications to GROMACS, please
25  * consider that scientific software is very special. Version
26  * control is crucial - bugs must be traceable. We will be happy to
27  * consider code for inclusion in the official distribution, but
28  * derived work must not be called official GROMACS. Details are found
29  * in the README & COPYING files - if they are missing, get the
30  * official version at http://www.gromacs.org.
31  *
32  * To help us fund GROMACS development, we humbly ask that you cite
33  * the research papers on the package. Check out http://www.gromacs.org.
34  */
35 /*
36  * Note: this file was generated by the GROMACS avx_256_single kernel generator.
37  */
38 #include "gmxpre.h"
39
40 #include "config.h"
41
42 #include <math.h>
43
44 #include "../nb_kernel.h"
45 #include "gromacs/gmxlib/nrnb.h"
46
47 #include "kernelutil_x86_avx_256_single.h"
48
49 /*
50  * Gromacs nonbonded kernel:   nb_kernel_ElecEwSh_VdwLJSh_GeomW4W4_VF_avx_256_single
51  * Electrostatics interaction: Ewald
52  * VdW interaction:            LennardJones
53  * Geometry:                   Water4-Water4
54  * Calculate force/pot:        PotentialAndForce
55  */
56 void
57 nb_kernel_ElecEwSh_VdwLJSh_GeomW4W4_VF_avx_256_single
58                     (t_nblist                    * gmx_restrict       nlist,
59                      rvec                        * gmx_restrict          xx,
60                      rvec                        * gmx_restrict          ff,
61                      struct t_forcerec           * gmx_restrict          fr,
62                      t_mdatoms                   * gmx_restrict     mdatoms,
63                      nb_kernel_data_t gmx_unused * gmx_restrict kernel_data,
64                      t_nrnb                      * gmx_restrict        nrnb)
65 {
66     /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
67      * just 0 for non-waters.
68      * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
69      * jnr indices corresponding to data put in the four positions in the SIMD register.
70      */
71     int              i_shift_offset,i_coord_offset,outeriter,inneriter;
72     int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
73     int              jnrA,jnrB,jnrC,jnrD;
74     int              jnrE,jnrF,jnrG,jnrH;
75     int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
76     int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
77     int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
78     int              j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
79     int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
80     real             rcutoff_scalar;
81     real             *shiftvec,*fshift,*x,*f;
82     real             *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
83     real             scratch[4*DIM];
84     __m256           tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
85     real *           vdwioffsetptr0;
86     __m256           ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
87     real *           vdwioffsetptr1;
88     __m256           ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
89     real *           vdwioffsetptr2;
90     __m256           ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
91     real *           vdwioffsetptr3;
92     __m256           ix3,iy3,iz3,fix3,fiy3,fiz3,iq3,isai3;
93     int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
94     __m256           jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
95     int              vdwjidx1A,vdwjidx1B,vdwjidx1C,vdwjidx1D,vdwjidx1E,vdwjidx1F,vdwjidx1G,vdwjidx1H;
96     __m256           jx1,jy1,jz1,fjx1,fjy1,fjz1,jq1,isaj1;
97     int              vdwjidx2A,vdwjidx2B,vdwjidx2C,vdwjidx2D,vdwjidx2E,vdwjidx2F,vdwjidx2G,vdwjidx2H;
98     __m256           jx2,jy2,jz2,fjx2,fjy2,fjz2,jq2,isaj2;
99     int              vdwjidx3A,vdwjidx3B,vdwjidx3C,vdwjidx3D,vdwjidx3E,vdwjidx3F,vdwjidx3G,vdwjidx3H;
100     __m256           jx3,jy3,jz3,fjx3,fjy3,fjz3,jq3,isaj3;
101     __m256           dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
102     __m256           dx11,dy11,dz11,rsq11,rinv11,rinvsq11,r11,qq11,c6_11,c12_11;
103     __m256           dx12,dy12,dz12,rsq12,rinv12,rinvsq12,r12,qq12,c6_12,c12_12;
104     __m256           dx13,dy13,dz13,rsq13,rinv13,rinvsq13,r13,qq13,c6_13,c12_13;
105     __m256           dx21,dy21,dz21,rsq21,rinv21,rinvsq21,r21,qq21,c6_21,c12_21;
106     __m256           dx22,dy22,dz22,rsq22,rinv22,rinvsq22,r22,qq22,c6_22,c12_22;
107     __m256           dx23,dy23,dz23,rsq23,rinv23,rinvsq23,r23,qq23,c6_23,c12_23;
108     __m256           dx31,dy31,dz31,rsq31,rinv31,rinvsq31,r31,qq31,c6_31,c12_31;
109     __m256           dx32,dy32,dz32,rsq32,rinv32,rinvsq32,r32,qq32,c6_32,c12_32;
110     __m256           dx33,dy33,dz33,rsq33,rinv33,rinvsq33,r33,qq33,c6_33,c12_33;
111     __m256           velec,felec,velecsum,facel,crf,krf,krf2;
112     real             *charge;
113     int              nvdwtype;
114     __m256           rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
115     int              *vdwtype;
116     real             *vdwparam;
117     __m256           one_sixth   = _mm256_set1_ps(1.0/6.0);
118     __m256           one_twelfth = _mm256_set1_ps(1.0/12.0);
119     __m256i          ewitab;
120     __m128i          ewitab_lo,ewitab_hi;
121     __m256           ewtabscale,eweps,sh_ewald,ewrt,ewtabhalfspace,ewtabF,ewtabFn,ewtabD,ewtabV;
122     __m256           beta,beta2,beta3,zeta2,pmecorrF,pmecorrV,rinv3;
123     real             *ewtab;
124     __m256           dummy_mask,cutoff_mask;
125     __m256           signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
126     __m256           one     = _mm256_set1_ps(1.0);
127     __m256           two     = _mm256_set1_ps(2.0);
128     x                = xx[0];
129     f                = ff[0];
130
131     nri              = nlist->nri;
132     iinr             = nlist->iinr;
133     jindex           = nlist->jindex;
134     jjnr             = nlist->jjnr;
135     shiftidx         = nlist->shift;
136     gid              = nlist->gid;
137     shiftvec         = fr->shift_vec[0];
138     fshift           = fr->fshift[0];
139     facel            = _mm256_set1_ps(fr->ic->epsfac);
140     charge           = mdatoms->chargeA;
141     nvdwtype         = fr->ntype;
142     vdwparam         = fr->nbfp;
143     vdwtype          = mdatoms->typeA;
144
145     sh_ewald         = _mm256_set1_ps(fr->ic->sh_ewald);
146     beta             = _mm256_set1_ps(fr->ic->ewaldcoeff_q);
147     beta2            = _mm256_mul_ps(beta,beta);
148     beta3            = _mm256_mul_ps(beta,beta2);
149
150     ewtab            = fr->ic->tabq_coul_FDV0;
151     ewtabscale       = _mm256_set1_ps(fr->ic->tabq_scale);
152     ewtabhalfspace   = _mm256_set1_ps(0.5/fr->ic->tabq_scale);
153
154     /* Setup water-specific parameters */
155     inr              = nlist->iinr[0];
156     iq1              = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+1]));
157     iq2              = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+2]));
158     iq3              = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+3]));
159     vdwioffsetptr0   = vdwparam+2*nvdwtype*vdwtype[inr+0];
160
161     jq1              = _mm256_set1_ps(charge[inr+1]);
162     jq2              = _mm256_set1_ps(charge[inr+2]);
163     jq3              = _mm256_set1_ps(charge[inr+3]);
164     vdwjidx0A        = 2*vdwtype[inr+0];
165     c6_00            = _mm256_set1_ps(vdwioffsetptr0[vdwjidx0A]);
166     c12_00           = _mm256_set1_ps(vdwioffsetptr0[vdwjidx0A+1]);
167     qq11             = _mm256_mul_ps(iq1,jq1);
168     qq12             = _mm256_mul_ps(iq1,jq2);
169     qq13             = _mm256_mul_ps(iq1,jq3);
170     qq21             = _mm256_mul_ps(iq2,jq1);
171     qq22             = _mm256_mul_ps(iq2,jq2);
172     qq23             = _mm256_mul_ps(iq2,jq3);
173     qq31             = _mm256_mul_ps(iq3,jq1);
174     qq32             = _mm256_mul_ps(iq3,jq2);
175     qq33             = _mm256_mul_ps(iq3,jq3);
176
177     /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
178     rcutoff_scalar   = fr->ic->rcoulomb;
179     rcutoff          = _mm256_set1_ps(rcutoff_scalar);
180     rcutoff2         = _mm256_mul_ps(rcutoff,rcutoff);
181
182     sh_vdw_invrcut6  = _mm256_set1_ps(fr->ic->sh_invrc6);
183     rvdw             = _mm256_set1_ps(fr->ic->rvdw);
184
185     /* Avoid stupid compiler warnings */
186     jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
187     j_coord_offsetA = 0;
188     j_coord_offsetB = 0;
189     j_coord_offsetC = 0;
190     j_coord_offsetD = 0;
191     j_coord_offsetE = 0;
192     j_coord_offsetF = 0;
193     j_coord_offsetG = 0;
194     j_coord_offsetH = 0;
195
196     outeriter        = 0;
197     inneriter        = 0;
198
199     for(iidx=0;iidx<4*DIM;iidx++)
200     {
201         scratch[iidx] = 0.0;
202     }
203
204     /* Start outer loop over neighborlists */
205     for(iidx=0; iidx<nri; iidx++)
206     {
207         /* Load shift vector for this list */
208         i_shift_offset   = DIM*shiftidx[iidx];
209
210         /* Load limits for loop over neighbors */
211         j_index_start    = jindex[iidx];
212         j_index_end      = jindex[iidx+1];
213
214         /* Get outer coordinate index */
215         inr              = iinr[iidx];
216         i_coord_offset   = DIM*inr;
217
218         /* Load i particle coords and add shift vector */
219         gmx_mm256_load_shift_and_4rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,
220                                                     &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2,&ix3,&iy3,&iz3);
221
222         fix0             = _mm256_setzero_ps();
223         fiy0             = _mm256_setzero_ps();
224         fiz0             = _mm256_setzero_ps();
225         fix1             = _mm256_setzero_ps();
226         fiy1             = _mm256_setzero_ps();
227         fiz1             = _mm256_setzero_ps();
228         fix2             = _mm256_setzero_ps();
229         fiy2             = _mm256_setzero_ps();
230         fiz2             = _mm256_setzero_ps();
231         fix3             = _mm256_setzero_ps();
232         fiy3             = _mm256_setzero_ps();
233         fiz3             = _mm256_setzero_ps();
234
235         /* Reset potential sums */
236         velecsum         = _mm256_setzero_ps();
237         vvdwsum          = _mm256_setzero_ps();
238
239         /* Start inner kernel loop */
240         for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
241         {
242
243             /* Get j neighbor index, and coordinate index */
244             jnrA             = jjnr[jidx];
245             jnrB             = jjnr[jidx+1];
246             jnrC             = jjnr[jidx+2];
247             jnrD             = jjnr[jidx+3];
248             jnrE             = jjnr[jidx+4];
249             jnrF             = jjnr[jidx+5];
250             jnrG             = jjnr[jidx+6];
251             jnrH             = jjnr[jidx+7];
252             j_coord_offsetA  = DIM*jnrA;
253             j_coord_offsetB  = DIM*jnrB;
254             j_coord_offsetC  = DIM*jnrC;
255             j_coord_offsetD  = DIM*jnrD;
256             j_coord_offsetE  = DIM*jnrE;
257             j_coord_offsetF  = DIM*jnrF;
258             j_coord_offsetG  = DIM*jnrG;
259             j_coord_offsetH  = DIM*jnrH;
260
261             /* load j atom coordinates */
262             gmx_mm256_load_4rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
263                                                  x+j_coord_offsetC,x+j_coord_offsetD,
264                                                  x+j_coord_offsetE,x+j_coord_offsetF,
265                                                  x+j_coord_offsetG,x+j_coord_offsetH,
266                                                  &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,
267                                                  &jy2,&jz2,&jx3,&jy3,&jz3);
268
269             /* Calculate displacement vector */
270             dx00             = _mm256_sub_ps(ix0,jx0);
271             dy00             = _mm256_sub_ps(iy0,jy0);
272             dz00             = _mm256_sub_ps(iz0,jz0);
273             dx11             = _mm256_sub_ps(ix1,jx1);
274             dy11             = _mm256_sub_ps(iy1,jy1);
275             dz11             = _mm256_sub_ps(iz1,jz1);
276             dx12             = _mm256_sub_ps(ix1,jx2);
277             dy12             = _mm256_sub_ps(iy1,jy2);
278             dz12             = _mm256_sub_ps(iz1,jz2);
279             dx13             = _mm256_sub_ps(ix1,jx3);
280             dy13             = _mm256_sub_ps(iy1,jy3);
281             dz13             = _mm256_sub_ps(iz1,jz3);
282             dx21             = _mm256_sub_ps(ix2,jx1);
283             dy21             = _mm256_sub_ps(iy2,jy1);
284             dz21             = _mm256_sub_ps(iz2,jz1);
285             dx22             = _mm256_sub_ps(ix2,jx2);
286             dy22             = _mm256_sub_ps(iy2,jy2);
287             dz22             = _mm256_sub_ps(iz2,jz2);
288             dx23             = _mm256_sub_ps(ix2,jx3);
289             dy23             = _mm256_sub_ps(iy2,jy3);
290             dz23             = _mm256_sub_ps(iz2,jz3);
291             dx31             = _mm256_sub_ps(ix3,jx1);
292             dy31             = _mm256_sub_ps(iy3,jy1);
293             dz31             = _mm256_sub_ps(iz3,jz1);
294             dx32             = _mm256_sub_ps(ix3,jx2);
295             dy32             = _mm256_sub_ps(iy3,jy2);
296             dz32             = _mm256_sub_ps(iz3,jz2);
297             dx33             = _mm256_sub_ps(ix3,jx3);
298             dy33             = _mm256_sub_ps(iy3,jy3);
299             dz33             = _mm256_sub_ps(iz3,jz3);
300
301             /* Calculate squared distance and things based on it */
302             rsq00            = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
303             rsq11            = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
304             rsq12            = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
305             rsq13            = gmx_mm256_calc_rsq_ps(dx13,dy13,dz13);
306             rsq21            = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
307             rsq22            = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
308             rsq23            = gmx_mm256_calc_rsq_ps(dx23,dy23,dz23);
309             rsq31            = gmx_mm256_calc_rsq_ps(dx31,dy31,dz31);
310             rsq32            = gmx_mm256_calc_rsq_ps(dx32,dy32,dz32);
311             rsq33            = gmx_mm256_calc_rsq_ps(dx33,dy33,dz33);
312
313             rinv11           = avx256_invsqrt_f(rsq11);
314             rinv12           = avx256_invsqrt_f(rsq12);
315             rinv13           = avx256_invsqrt_f(rsq13);
316             rinv21           = avx256_invsqrt_f(rsq21);
317             rinv22           = avx256_invsqrt_f(rsq22);
318             rinv23           = avx256_invsqrt_f(rsq23);
319             rinv31           = avx256_invsqrt_f(rsq31);
320             rinv32           = avx256_invsqrt_f(rsq32);
321             rinv33           = avx256_invsqrt_f(rsq33);
322
323             rinvsq00         = avx256_inv_f(rsq00);
324             rinvsq11         = _mm256_mul_ps(rinv11,rinv11);
325             rinvsq12         = _mm256_mul_ps(rinv12,rinv12);
326             rinvsq13         = _mm256_mul_ps(rinv13,rinv13);
327             rinvsq21         = _mm256_mul_ps(rinv21,rinv21);
328             rinvsq22         = _mm256_mul_ps(rinv22,rinv22);
329             rinvsq23         = _mm256_mul_ps(rinv23,rinv23);
330             rinvsq31         = _mm256_mul_ps(rinv31,rinv31);
331             rinvsq32         = _mm256_mul_ps(rinv32,rinv32);
332             rinvsq33         = _mm256_mul_ps(rinv33,rinv33);
333
334             fjx0             = _mm256_setzero_ps();
335             fjy0             = _mm256_setzero_ps();
336             fjz0             = _mm256_setzero_ps();
337             fjx1             = _mm256_setzero_ps();
338             fjy1             = _mm256_setzero_ps();
339             fjz1             = _mm256_setzero_ps();
340             fjx2             = _mm256_setzero_ps();
341             fjy2             = _mm256_setzero_ps();
342             fjz2             = _mm256_setzero_ps();
343             fjx3             = _mm256_setzero_ps();
344             fjy3             = _mm256_setzero_ps();
345             fjz3             = _mm256_setzero_ps();
346
347             /**************************
348              * CALCULATE INTERACTIONS *
349              **************************/
350
351             if (gmx_mm256_any_lt(rsq00,rcutoff2))
352             {
353
354             /* LENNARD-JONES DISPERSION/REPULSION */
355
356             rinvsix          = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
357             vvdw6            = _mm256_mul_ps(c6_00,rinvsix);
358             vvdw12           = _mm256_mul_ps(c12_00,_mm256_mul_ps(rinvsix,rinvsix));
359             vvdw             = _mm256_sub_ps(_mm256_mul_ps( _mm256_sub_ps(vvdw12 , _mm256_mul_ps(c12_00,_mm256_mul_ps(sh_vdw_invrcut6,sh_vdw_invrcut6))), one_twelfth) ,
360                                           _mm256_mul_ps( _mm256_sub_ps(vvdw6,_mm256_mul_ps(c6_00,sh_vdw_invrcut6)),one_sixth));
361             fvdw             = _mm256_mul_ps(_mm256_sub_ps(vvdw12,vvdw6),rinvsq00);
362
363             cutoff_mask      = _mm256_cmp_ps(rsq00,rcutoff2,_CMP_LT_OQ);
364
365             /* Update potential sum for this i atom from the interaction with this j atom. */
366             vvdw             = _mm256_and_ps(vvdw,cutoff_mask);
367             vvdwsum          = _mm256_add_ps(vvdwsum,vvdw);
368
369             fscal            = fvdw;
370
371             fscal            = _mm256_and_ps(fscal,cutoff_mask);
372
373             /* Calculate temporary vectorial force */
374             tx               = _mm256_mul_ps(fscal,dx00);
375             ty               = _mm256_mul_ps(fscal,dy00);
376             tz               = _mm256_mul_ps(fscal,dz00);
377
378             /* Update vectorial force */
379             fix0             = _mm256_add_ps(fix0,tx);
380             fiy0             = _mm256_add_ps(fiy0,ty);
381             fiz0             = _mm256_add_ps(fiz0,tz);
382
383             fjx0             = _mm256_add_ps(fjx0,tx);
384             fjy0             = _mm256_add_ps(fjy0,ty);
385             fjz0             = _mm256_add_ps(fjz0,tz);
386
387             }
388
389             /**************************
390              * CALCULATE INTERACTIONS *
391              **************************/
392
393             if (gmx_mm256_any_lt(rsq11,rcutoff2))
394             {
395
396             r11              = _mm256_mul_ps(rsq11,rinv11);
397
398             /* EWALD ELECTROSTATICS */
399             
400             /* Analytical PME correction */
401             zeta2            = _mm256_mul_ps(beta2,rsq11);
402             rinv3            = _mm256_mul_ps(rinvsq11,rinv11);
403             pmecorrF         = avx256_pmecorrF_f(zeta2);
404             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
405             felec            = _mm256_mul_ps(qq11,felec);
406             pmecorrV         = avx256_pmecorrV_f(zeta2);
407             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
408             velec            = _mm256_sub_ps(_mm256_sub_ps(rinv11,sh_ewald),pmecorrV);
409             velec            = _mm256_mul_ps(qq11,velec);
410             
411             cutoff_mask      = _mm256_cmp_ps(rsq11,rcutoff2,_CMP_LT_OQ);
412
413             /* Update potential sum for this i atom from the interaction with this j atom. */
414             velec            = _mm256_and_ps(velec,cutoff_mask);
415             velecsum         = _mm256_add_ps(velecsum,velec);
416
417             fscal            = felec;
418
419             fscal            = _mm256_and_ps(fscal,cutoff_mask);
420
421             /* Calculate temporary vectorial force */
422             tx               = _mm256_mul_ps(fscal,dx11);
423             ty               = _mm256_mul_ps(fscal,dy11);
424             tz               = _mm256_mul_ps(fscal,dz11);
425
426             /* Update vectorial force */
427             fix1             = _mm256_add_ps(fix1,tx);
428             fiy1             = _mm256_add_ps(fiy1,ty);
429             fiz1             = _mm256_add_ps(fiz1,tz);
430
431             fjx1             = _mm256_add_ps(fjx1,tx);
432             fjy1             = _mm256_add_ps(fjy1,ty);
433             fjz1             = _mm256_add_ps(fjz1,tz);
434
435             }
436
437             /**************************
438              * CALCULATE INTERACTIONS *
439              **************************/
440
441             if (gmx_mm256_any_lt(rsq12,rcutoff2))
442             {
443
444             r12              = _mm256_mul_ps(rsq12,rinv12);
445
446             /* EWALD ELECTROSTATICS */
447             
448             /* Analytical PME correction */
449             zeta2            = _mm256_mul_ps(beta2,rsq12);
450             rinv3            = _mm256_mul_ps(rinvsq12,rinv12);
451             pmecorrF         = avx256_pmecorrF_f(zeta2);
452             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
453             felec            = _mm256_mul_ps(qq12,felec);
454             pmecorrV         = avx256_pmecorrV_f(zeta2);
455             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
456             velec            = _mm256_sub_ps(_mm256_sub_ps(rinv12,sh_ewald),pmecorrV);
457             velec            = _mm256_mul_ps(qq12,velec);
458             
459             cutoff_mask      = _mm256_cmp_ps(rsq12,rcutoff2,_CMP_LT_OQ);
460
461             /* Update potential sum for this i atom from the interaction with this j atom. */
462             velec            = _mm256_and_ps(velec,cutoff_mask);
463             velecsum         = _mm256_add_ps(velecsum,velec);
464
465             fscal            = felec;
466
467             fscal            = _mm256_and_ps(fscal,cutoff_mask);
468
469             /* Calculate temporary vectorial force */
470             tx               = _mm256_mul_ps(fscal,dx12);
471             ty               = _mm256_mul_ps(fscal,dy12);
472             tz               = _mm256_mul_ps(fscal,dz12);
473
474             /* Update vectorial force */
475             fix1             = _mm256_add_ps(fix1,tx);
476             fiy1             = _mm256_add_ps(fiy1,ty);
477             fiz1             = _mm256_add_ps(fiz1,tz);
478
479             fjx2             = _mm256_add_ps(fjx2,tx);
480             fjy2             = _mm256_add_ps(fjy2,ty);
481             fjz2             = _mm256_add_ps(fjz2,tz);
482
483             }
484
485             /**************************
486              * CALCULATE INTERACTIONS *
487              **************************/
488
489             if (gmx_mm256_any_lt(rsq13,rcutoff2))
490             {
491
492             r13              = _mm256_mul_ps(rsq13,rinv13);
493
494             /* EWALD ELECTROSTATICS */
495             
496             /* Analytical PME correction */
497             zeta2            = _mm256_mul_ps(beta2,rsq13);
498             rinv3            = _mm256_mul_ps(rinvsq13,rinv13);
499             pmecorrF         = avx256_pmecorrF_f(zeta2);
500             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
501             felec            = _mm256_mul_ps(qq13,felec);
502             pmecorrV         = avx256_pmecorrV_f(zeta2);
503             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
504             velec            = _mm256_sub_ps(_mm256_sub_ps(rinv13,sh_ewald),pmecorrV);
505             velec            = _mm256_mul_ps(qq13,velec);
506             
507             cutoff_mask      = _mm256_cmp_ps(rsq13,rcutoff2,_CMP_LT_OQ);
508
509             /* Update potential sum for this i atom from the interaction with this j atom. */
510             velec            = _mm256_and_ps(velec,cutoff_mask);
511             velecsum         = _mm256_add_ps(velecsum,velec);
512
513             fscal            = felec;
514
515             fscal            = _mm256_and_ps(fscal,cutoff_mask);
516
517             /* Calculate temporary vectorial force */
518             tx               = _mm256_mul_ps(fscal,dx13);
519             ty               = _mm256_mul_ps(fscal,dy13);
520             tz               = _mm256_mul_ps(fscal,dz13);
521
522             /* Update vectorial force */
523             fix1             = _mm256_add_ps(fix1,tx);
524             fiy1             = _mm256_add_ps(fiy1,ty);
525             fiz1             = _mm256_add_ps(fiz1,tz);
526
527             fjx3             = _mm256_add_ps(fjx3,tx);
528             fjy3             = _mm256_add_ps(fjy3,ty);
529             fjz3             = _mm256_add_ps(fjz3,tz);
530
531             }
532
533             /**************************
534              * CALCULATE INTERACTIONS *
535              **************************/
536
537             if (gmx_mm256_any_lt(rsq21,rcutoff2))
538             {
539
540             r21              = _mm256_mul_ps(rsq21,rinv21);
541
542             /* EWALD ELECTROSTATICS */
543             
544             /* Analytical PME correction */
545             zeta2            = _mm256_mul_ps(beta2,rsq21);
546             rinv3            = _mm256_mul_ps(rinvsq21,rinv21);
547             pmecorrF         = avx256_pmecorrF_f(zeta2);
548             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
549             felec            = _mm256_mul_ps(qq21,felec);
550             pmecorrV         = avx256_pmecorrV_f(zeta2);
551             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
552             velec            = _mm256_sub_ps(_mm256_sub_ps(rinv21,sh_ewald),pmecorrV);
553             velec            = _mm256_mul_ps(qq21,velec);
554             
555             cutoff_mask      = _mm256_cmp_ps(rsq21,rcutoff2,_CMP_LT_OQ);
556
557             /* Update potential sum for this i atom from the interaction with this j atom. */
558             velec            = _mm256_and_ps(velec,cutoff_mask);
559             velecsum         = _mm256_add_ps(velecsum,velec);
560
561             fscal            = felec;
562
563             fscal            = _mm256_and_ps(fscal,cutoff_mask);
564
565             /* Calculate temporary vectorial force */
566             tx               = _mm256_mul_ps(fscal,dx21);
567             ty               = _mm256_mul_ps(fscal,dy21);
568             tz               = _mm256_mul_ps(fscal,dz21);
569
570             /* Update vectorial force */
571             fix2             = _mm256_add_ps(fix2,tx);
572             fiy2             = _mm256_add_ps(fiy2,ty);
573             fiz2             = _mm256_add_ps(fiz2,tz);
574
575             fjx1             = _mm256_add_ps(fjx1,tx);
576             fjy1             = _mm256_add_ps(fjy1,ty);
577             fjz1             = _mm256_add_ps(fjz1,tz);
578
579             }
580
581             /**************************
582              * CALCULATE INTERACTIONS *
583              **************************/
584
585             if (gmx_mm256_any_lt(rsq22,rcutoff2))
586             {
587
588             r22              = _mm256_mul_ps(rsq22,rinv22);
589
590             /* EWALD ELECTROSTATICS */
591             
592             /* Analytical PME correction */
593             zeta2            = _mm256_mul_ps(beta2,rsq22);
594             rinv3            = _mm256_mul_ps(rinvsq22,rinv22);
595             pmecorrF         = avx256_pmecorrF_f(zeta2);
596             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
597             felec            = _mm256_mul_ps(qq22,felec);
598             pmecorrV         = avx256_pmecorrV_f(zeta2);
599             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
600             velec            = _mm256_sub_ps(_mm256_sub_ps(rinv22,sh_ewald),pmecorrV);
601             velec            = _mm256_mul_ps(qq22,velec);
602             
603             cutoff_mask      = _mm256_cmp_ps(rsq22,rcutoff2,_CMP_LT_OQ);
604
605             /* Update potential sum for this i atom from the interaction with this j atom. */
606             velec            = _mm256_and_ps(velec,cutoff_mask);
607             velecsum         = _mm256_add_ps(velecsum,velec);
608
609             fscal            = felec;
610
611             fscal            = _mm256_and_ps(fscal,cutoff_mask);
612
613             /* Calculate temporary vectorial force */
614             tx               = _mm256_mul_ps(fscal,dx22);
615             ty               = _mm256_mul_ps(fscal,dy22);
616             tz               = _mm256_mul_ps(fscal,dz22);
617
618             /* Update vectorial force */
619             fix2             = _mm256_add_ps(fix2,tx);
620             fiy2             = _mm256_add_ps(fiy2,ty);
621             fiz2             = _mm256_add_ps(fiz2,tz);
622
623             fjx2             = _mm256_add_ps(fjx2,tx);
624             fjy2             = _mm256_add_ps(fjy2,ty);
625             fjz2             = _mm256_add_ps(fjz2,tz);
626
627             }
628
629             /**************************
630              * CALCULATE INTERACTIONS *
631              **************************/
632
633             if (gmx_mm256_any_lt(rsq23,rcutoff2))
634             {
635
636             r23              = _mm256_mul_ps(rsq23,rinv23);
637
638             /* EWALD ELECTROSTATICS */
639             
640             /* Analytical PME correction */
641             zeta2            = _mm256_mul_ps(beta2,rsq23);
642             rinv3            = _mm256_mul_ps(rinvsq23,rinv23);
643             pmecorrF         = avx256_pmecorrF_f(zeta2);
644             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
645             felec            = _mm256_mul_ps(qq23,felec);
646             pmecorrV         = avx256_pmecorrV_f(zeta2);
647             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
648             velec            = _mm256_sub_ps(_mm256_sub_ps(rinv23,sh_ewald),pmecorrV);
649             velec            = _mm256_mul_ps(qq23,velec);
650             
651             cutoff_mask      = _mm256_cmp_ps(rsq23,rcutoff2,_CMP_LT_OQ);
652
653             /* Update potential sum for this i atom from the interaction with this j atom. */
654             velec            = _mm256_and_ps(velec,cutoff_mask);
655             velecsum         = _mm256_add_ps(velecsum,velec);
656
657             fscal            = felec;
658
659             fscal            = _mm256_and_ps(fscal,cutoff_mask);
660
661             /* Calculate temporary vectorial force */
662             tx               = _mm256_mul_ps(fscal,dx23);
663             ty               = _mm256_mul_ps(fscal,dy23);
664             tz               = _mm256_mul_ps(fscal,dz23);
665
666             /* Update vectorial force */
667             fix2             = _mm256_add_ps(fix2,tx);
668             fiy2             = _mm256_add_ps(fiy2,ty);
669             fiz2             = _mm256_add_ps(fiz2,tz);
670
671             fjx3             = _mm256_add_ps(fjx3,tx);
672             fjy3             = _mm256_add_ps(fjy3,ty);
673             fjz3             = _mm256_add_ps(fjz3,tz);
674
675             }
676
677             /**************************
678              * CALCULATE INTERACTIONS *
679              **************************/
680
681             if (gmx_mm256_any_lt(rsq31,rcutoff2))
682             {
683
684             r31              = _mm256_mul_ps(rsq31,rinv31);
685
686             /* EWALD ELECTROSTATICS */
687             
688             /* Analytical PME correction */
689             zeta2            = _mm256_mul_ps(beta2,rsq31);
690             rinv3            = _mm256_mul_ps(rinvsq31,rinv31);
691             pmecorrF         = avx256_pmecorrF_f(zeta2);
692             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
693             felec            = _mm256_mul_ps(qq31,felec);
694             pmecorrV         = avx256_pmecorrV_f(zeta2);
695             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
696             velec            = _mm256_sub_ps(_mm256_sub_ps(rinv31,sh_ewald),pmecorrV);
697             velec            = _mm256_mul_ps(qq31,velec);
698             
699             cutoff_mask      = _mm256_cmp_ps(rsq31,rcutoff2,_CMP_LT_OQ);
700
701             /* Update potential sum for this i atom from the interaction with this j atom. */
702             velec            = _mm256_and_ps(velec,cutoff_mask);
703             velecsum         = _mm256_add_ps(velecsum,velec);
704
705             fscal            = felec;
706
707             fscal            = _mm256_and_ps(fscal,cutoff_mask);
708
709             /* Calculate temporary vectorial force */
710             tx               = _mm256_mul_ps(fscal,dx31);
711             ty               = _mm256_mul_ps(fscal,dy31);
712             tz               = _mm256_mul_ps(fscal,dz31);
713
714             /* Update vectorial force */
715             fix3             = _mm256_add_ps(fix3,tx);
716             fiy3             = _mm256_add_ps(fiy3,ty);
717             fiz3             = _mm256_add_ps(fiz3,tz);
718
719             fjx1             = _mm256_add_ps(fjx1,tx);
720             fjy1             = _mm256_add_ps(fjy1,ty);
721             fjz1             = _mm256_add_ps(fjz1,tz);
722
723             }
724
725             /**************************
726              * CALCULATE INTERACTIONS *
727              **************************/
728
729             if (gmx_mm256_any_lt(rsq32,rcutoff2))
730             {
731
732             r32              = _mm256_mul_ps(rsq32,rinv32);
733
734             /* EWALD ELECTROSTATICS */
735             
736             /* Analytical PME correction */
737             zeta2            = _mm256_mul_ps(beta2,rsq32);
738             rinv3            = _mm256_mul_ps(rinvsq32,rinv32);
739             pmecorrF         = avx256_pmecorrF_f(zeta2);
740             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
741             felec            = _mm256_mul_ps(qq32,felec);
742             pmecorrV         = avx256_pmecorrV_f(zeta2);
743             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
744             velec            = _mm256_sub_ps(_mm256_sub_ps(rinv32,sh_ewald),pmecorrV);
745             velec            = _mm256_mul_ps(qq32,velec);
746             
747             cutoff_mask      = _mm256_cmp_ps(rsq32,rcutoff2,_CMP_LT_OQ);
748
749             /* Update potential sum for this i atom from the interaction with this j atom. */
750             velec            = _mm256_and_ps(velec,cutoff_mask);
751             velecsum         = _mm256_add_ps(velecsum,velec);
752
753             fscal            = felec;
754
755             fscal            = _mm256_and_ps(fscal,cutoff_mask);
756
757             /* Calculate temporary vectorial force */
758             tx               = _mm256_mul_ps(fscal,dx32);
759             ty               = _mm256_mul_ps(fscal,dy32);
760             tz               = _mm256_mul_ps(fscal,dz32);
761
762             /* Update vectorial force */
763             fix3             = _mm256_add_ps(fix3,tx);
764             fiy3             = _mm256_add_ps(fiy3,ty);
765             fiz3             = _mm256_add_ps(fiz3,tz);
766
767             fjx2             = _mm256_add_ps(fjx2,tx);
768             fjy2             = _mm256_add_ps(fjy2,ty);
769             fjz2             = _mm256_add_ps(fjz2,tz);
770
771             }
772
773             /**************************
774              * CALCULATE INTERACTIONS *
775              **************************/
776
777             if (gmx_mm256_any_lt(rsq33,rcutoff2))
778             {
779
780             r33              = _mm256_mul_ps(rsq33,rinv33);
781
782             /* EWALD ELECTROSTATICS */
783             
784             /* Analytical PME correction */
785             zeta2            = _mm256_mul_ps(beta2,rsq33);
786             rinv3            = _mm256_mul_ps(rinvsq33,rinv33);
787             pmecorrF         = avx256_pmecorrF_f(zeta2);
788             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
789             felec            = _mm256_mul_ps(qq33,felec);
790             pmecorrV         = avx256_pmecorrV_f(zeta2);
791             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
792             velec            = _mm256_sub_ps(_mm256_sub_ps(rinv33,sh_ewald),pmecorrV);
793             velec            = _mm256_mul_ps(qq33,velec);
794             
795             cutoff_mask      = _mm256_cmp_ps(rsq33,rcutoff2,_CMP_LT_OQ);
796
797             /* Update potential sum for this i atom from the interaction with this j atom. */
798             velec            = _mm256_and_ps(velec,cutoff_mask);
799             velecsum         = _mm256_add_ps(velecsum,velec);
800
801             fscal            = felec;
802
803             fscal            = _mm256_and_ps(fscal,cutoff_mask);
804
805             /* Calculate temporary vectorial force */
806             tx               = _mm256_mul_ps(fscal,dx33);
807             ty               = _mm256_mul_ps(fscal,dy33);
808             tz               = _mm256_mul_ps(fscal,dz33);
809
810             /* Update vectorial force */
811             fix3             = _mm256_add_ps(fix3,tx);
812             fiy3             = _mm256_add_ps(fiy3,ty);
813             fiz3             = _mm256_add_ps(fiz3,tz);
814
815             fjx3             = _mm256_add_ps(fjx3,tx);
816             fjy3             = _mm256_add_ps(fjy3,ty);
817             fjz3             = _mm256_add_ps(fjz3,tz);
818
819             }
820
821             fjptrA             = f+j_coord_offsetA;
822             fjptrB             = f+j_coord_offsetB;
823             fjptrC             = f+j_coord_offsetC;
824             fjptrD             = f+j_coord_offsetD;
825             fjptrE             = f+j_coord_offsetE;
826             fjptrF             = f+j_coord_offsetF;
827             fjptrG             = f+j_coord_offsetG;
828             fjptrH             = f+j_coord_offsetH;
829
830             gmx_mm256_decrement_4rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,
831                                                       fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,
832                                                       fjx2,fjy2,fjz2,fjx3,fjy3,fjz3);
833
834             /* Inner loop uses 1025 flops */
835         }
836
837         if(jidx<j_index_end)
838         {
839
840             /* Get j neighbor index, and coordinate index */
841             jnrlistA         = jjnr[jidx];
842             jnrlistB         = jjnr[jidx+1];
843             jnrlistC         = jjnr[jidx+2];
844             jnrlistD         = jjnr[jidx+3];
845             jnrlistE         = jjnr[jidx+4];
846             jnrlistF         = jjnr[jidx+5];
847             jnrlistG         = jjnr[jidx+6];
848             jnrlistH         = jjnr[jidx+7];
849             /* Sign of each element will be negative for non-real atoms.
850              * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
851              * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
852              */
853             dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
854                                             gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
855                                             
856             jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
857             jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
858             jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
859             jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
860             jnrE       = (jnrlistE>=0) ? jnrlistE : 0;
861             jnrF       = (jnrlistF>=0) ? jnrlistF : 0;
862             jnrG       = (jnrlistG>=0) ? jnrlistG : 0;
863             jnrH       = (jnrlistH>=0) ? jnrlistH : 0;
864             j_coord_offsetA  = DIM*jnrA;
865             j_coord_offsetB  = DIM*jnrB;
866             j_coord_offsetC  = DIM*jnrC;
867             j_coord_offsetD  = DIM*jnrD;
868             j_coord_offsetE  = DIM*jnrE;
869             j_coord_offsetF  = DIM*jnrF;
870             j_coord_offsetG  = DIM*jnrG;
871             j_coord_offsetH  = DIM*jnrH;
872
873             /* load j atom coordinates */
874             gmx_mm256_load_4rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
875                                                  x+j_coord_offsetC,x+j_coord_offsetD,
876                                                  x+j_coord_offsetE,x+j_coord_offsetF,
877                                                  x+j_coord_offsetG,x+j_coord_offsetH,
878                                                  &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,
879                                                  &jy2,&jz2,&jx3,&jy3,&jz3);
880
881             /* Calculate displacement vector */
882             dx00             = _mm256_sub_ps(ix0,jx0);
883             dy00             = _mm256_sub_ps(iy0,jy0);
884             dz00             = _mm256_sub_ps(iz0,jz0);
885             dx11             = _mm256_sub_ps(ix1,jx1);
886             dy11             = _mm256_sub_ps(iy1,jy1);
887             dz11             = _mm256_sub_ps(iz1,jz1);
888             dx12             = _mm256_sub_ps(ix1,jx2);
889             dy12             = _mm256_sub_ps(iy1,jy2);
890             dz12             = _mm256_sub_ps(iz1,jz2);
891             dx13             = _mm256_sub_ps(ix1,jx3);
892             dy13             = _mm256_sub_ps(iy1,jy3);
893             dz13             = _mm256_sub_ps(iz1,jz3);
894             dx21             = _mm256_sub_ps(ix2,jx1);
895             dy21             = _mm256_sub_ps(iy2,jy1);
896             dz21             = _mm256_sub_ps(iz2,jz1);
897             dx22             = _mm256_sub_ps(ix2,jx2);
898             dy22             = _mm256_sub_ps(iy2,jy2);
899             dz22             = _mm256_sub_ps(iz2,jz2);
900             dx23             = _mm256_sub_ps(ix2,jx3);
901             dy23             = _mm256_sub_ps(iy2,jy3);
902             dz23             = _mm256_sub_ps(iz2,jz3);
903             dx31             = _mm256_sub_ps(ix3,jx1);
904             dy31             = _mm256_sub_ps(iy3,jy1);
905             dz31             = _mm256_sub_ps(iz3,jz1);
906             dx32             = _mm256_sub_ps(ix3,jx2);
907             dy32             = _mm256_sub_ps(iy3,jy2);
908             dz32             = _mm256_sub_ps(iz3,jz2);
909             dx33             = _mm256_sub_ps(ix3,jx3);
910             dy33             = _mm256_sub_ps(iy3,jy3);
911             dz33             = _mm256_sub_ps(iz3,jz3);
912
913             /* Calculate squared distance and things based on it */
914             rsq00            = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
915             rsq11            = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
916             rsq12            = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
917             rsq13            = gmx_mm256_calc_rsq_ps(dx13,dy13,dz13);
918             rsq21            = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
919             rsq22            = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
920             rsq23            = gmx_mm256_calc_rsq_ps(dx23,dy23,dz23);
921             rsq31            = gmx_mm256_calc_rsq_ps(dx31,dy31,dz31);
922             rsq32            = gmx_mm256_calc_rsq_ps(dx32,dy32,dz32);
923             rsq33            = gmx_mm256_calc_rsq_ps(dx33,dy33,dz33);
924
925             rinv11           = avx256_invsqrt_f(rsq11);
926             rinv12           = avx256_invsqrt_f(rsq12);
927             rinv13           = avx256_invsqrt_f(rsq13);
928             rinv21           = avx256_invsqrt_f(rsq21);
929             rinv22           = avx256_invsqrt_f(rsq22);
930             rinv23           = avx256_invsqrt_f(rsq23);
931             rinv31           = avx256_invsqrt_f(rsq31);
932             rinv32           = avx256_invsqrt_f(rsq32);
933             rinv33           = avx256_invsqrt_f(rsq33);
934
935             rinvsq00         = avx256_inv_f(rsq00);
936             rinvsq11         = _mm256_mul_ps(rinv11,rinv11);
937             rinvsq12         = _mm256_mul_ps(rinv12,rinv12);
938             rinvsq13         = _mm256_mul_ps(rinv13,rinv13);
939             rinvsq21         = _mm256_mul_ps(rinv21,rinv21);
940             rinvsq22         = _mm256_mul_ps(rinv22,rinv22);
941             rinvsq23         = _mm256_mul_ps(rinv23,rinv23);
942             rinvsq31         = _mm256_mul_ps(rinv31,rinv31);
943             rinvsq32         = _mm256_mul_ps(rinv32,rinv32);
944             rinvsq33         = _mm256_mul_ps(rinv33,rinv33);
945
946             fjx0             = _mm256_setzero_ps();
947             fjy0             = _mm256_setzero_ps();
948             fjz0             = _mm256_setzero_ps();
949             fjx1             = _mm256_setzero_ps();
950             fjy1             = _mm256_setzero_ps();
951             fjz1             = _mm256_setzero_ps();
952             fjx2             = _mm256_setzero_ps();
953             fjy2             = _mm256_setzero_ps();
954             fjz2             = _mm256_setzero_ps();
955             fjx3             = _mm256_setzero_ps();
956             fjy3             = _mm256_setzero_ps();
957             fjz3             = _mm256_setzero_ps();
958
959             /**************************
960              * CALCULATE INTERACTIONS *
961              **************************/
962
963             if (gmx_mm256_any_lt(rsq00,rcutoff2))
964             {
965
966             /* LENNARD-JONES DISPERSION/REPULSION */
967
968             rinvsix          = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
969             vvdw6            = _mm256_mul_ps(c6_00,rinvsix);
970             vvdw12           = _mm256_mul_ps(c12_00,_mm256_mul_ps(rinvsix,rinvsix));
971             vvdw             = _mm256_sub_ps(_mm256_mul_ps( _mm256_sub_ps(vvdw12 , _mm256_mul_ps(c12_00,_mm256_mul_ps(sh_vdw_invrcut6,sh_vdw_invrcut6))), one_twelfth) ,
972                                           _mm256_mul_ps( _mm256_sub_ps(vvdw6,_mm256_mul_ps(c6_00,sh_vdw_invrcut6)),one_sixth));
973             fvdw             = _mm256_mul_ps(_mm256_sub_ps(vvdw12,vvdw6),rinvsq00);
974
975             cutoff_mask      = _mm256_cmp_ps(rsq00,rcutoff2,_CMP_LT_OQ);
976
977             /* Update potential sum for this i atom from the interaction with this j atom. */
978             vvdw             = _mm256_and_ps(vvdw,cutoff_mask);
979             vvdw             = _mm256_andnot_ps(dummy_mask,vvdw);
980             vvdwsum          = _mm256_add_ps(vvdwsum,vvdw);
981
982             fscal            = fvdw;
983
984             fscal            = _mm256_and_ps(fscal,cutoff_mask);
985
986             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
987
988             /* Calculate temporary vectorial force */
989             tx               = _mm256_mul_ps(fscal,dx00);
990             ty               = _mm256_mul_ps(fscal,dy00);
991             tz               = _mm256_mul_ps(fscal,dz00);
992
993             /* Update vectorial force */
994             fix0             = _mm256_add_ps(fix0,tx);
995             fiy0             = _mm256_add_ps(fiy0,ty);
996             fiz0             = _mm256_add_ps(fiz0,tz);
997
998             fjx0             = _mm256_add_ps(fjx0,tx);
999             fjy0             = _mm256_add_ps(fjy0,ty);
1000             fjz0             = _mm256_add_ps(fjz0,tz);
1001
1002             }
1003
1004             /**************************
1005              * CALCULATE INTERACTIONS *
1006              **************************/
1007
1008             if (gmx_mm256_any_lt(rsq11,rcutoff2))
1009             {
1010
1011             r11              = _mm256_mul_ps(rsq11,rinv11);
1012             r11              = _mm256_andnot_ps(dummy_mask,r11);
1013
1014             /* EWALD ELECTROSTATICS */
1015             
1016             /* Analytical PME correction */
1017             zeta2            = _mm256_mul_ps(beta2,rsq11);
1018             rinv3            = _mm256_mul_ps(rinvsq11,rinv11);
1019             pmecorrF         = avx256_pmecorrF_f(zeta2);
1020             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1021             felec            = _mm256_mul_ps(qq11,felec);
1022             pmecorrV         = avx256_pmecorrV_f(zeta2);
1023             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
1024             velec            = _mm256_sub_ps(_mm256_sub_ps(rinv11,sh_ewald),pmecorrV);
1025             velec            = _mm256_mul_ps(qq11,velec);
1026             
1027             cutoff_mask      = _mm256_cmp_ps(rsq11,rcutoff2,_CMP_LT_OQ);
1028
1029             /* Update potential sum for this i atom from the interaction with this j atom. */
1030             velec            = _mm256_and_ps(velec,cutoff_mask);
1031             velec            = _mm256_andnot_ps(dummy_mask,velec);
1032             velecsum         = _mm256_add_ps(velecsum,velec);
1033
1034             fscal            = felec;
1035
1036             fscal            = _mm256_and_ps(fscal,cutoff_mask);
1037
1038             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
1039
1040             /* Calculate temporary vectorial force */
1041             tx               = _mm256_mul_ps(fscal,dx11);
1042             ty               = _mm256_mul_ps(fscal,dy11);
1043             tz               = _mm256_mul_ps(fscal,dz11);
1044
1045             /* Update vectorial force */
1046             fix1             = _mm256_add_ps(fix1,tx);
1047             fiy1             = _mm256_add_ps(fiy1,ty);
1048             fiz1             = _mm256_add_ps(fiz1,tz);
1049
1050             fjx1             = _mm256_add_ps(fjx1,tx);
1051             fjy1             = _mm256_add_ps(fjy1,ty);
1052             fjz1             = _mm256_add_ps(fjz1,tz);
1053
1054             }
1055
1056             /**************************
1057              * CALCULATE INTERACTIONS *
1058              **************************/
1059
1060             if (gmx_mm256_any_lt(rsq12,rcutoff2))
1061             {
1062
1063             r12              = _mm256_mul_ps(rsq12,rinv12);
1064             r12              = _mm256_andnot_ps(dummy_mask,r12);
1065
1066             /* EWALD ELECTROSTATICS */
1067             
1068             /* Analytical PME correction */
1069             zeta2            = _mm256_mul_ps(beta2,rsq12);
1070             rinv3            = _mm256_mul_ps(rinvsq12,rinv12);
1071             pmecorrF         = avx256_pmecorrF_f(zeta2);
1072             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1073             felec            = _mm256_mul_ps(qq12,felec);
1074             pmecorrV         = avx256_pmecorrV_f(zeta2);
1075             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
1076             velec            = _mm256_sub_ps(_mm256_sub_ps(rinv12,sh_ewald),pmecorrV);
1077             velec            = _mm256_mul_ps(qq12,velec);
1078             
1079             cutoff_mask      = _mm256_cmp_ps(rsq12,rcutoff2,_CMP_LT_OQ);
1080
1081             /* Update potential sum for this i atom from the interaction with this j atom. */
1082             velec            = _mm256_and_ps(velec,cutoff_mask);
1083             velec            = _mm256_andnot_ps(dummy_mask,velec);
1084             velecsum         = _mm256_add_ps(velecsum,velec);
1085
1086             fscal            = felec;
1087
1088             fscal            = _mm256_and_ps(fscal,cutoff_mask);
1089
1090             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
1091
1092             /* Calculate temporary vectorial force */
1093             tx               = _mm256_mul_ps(fscal,dx12);
1094             ty               = _mm256_mul_ps(fscal,dy12);
1095             tz               = _mm256_mul_ps(fscal,dz12);
1096
1097             /* Update vectorial force */
1098             fix1             = _mm256_add_ps(fix1,tx);
1099             fiy1             = _mm256_add_ps(fiy1,ty);
1100             fiz1             = _mm256_add_ps(fiz1,tz);
1101
1102             fjx2             = _mm256_add_ps(fjx2,tx);
1103             fjy2             = _mm256_add_ps(fjy2,ty);
1104             fjz2             = _mm256_add_ps(fjz2,tz);
1105
1106             }
1107
1108             /**************************
1109              * CALCULATE INTERACTIONS *
1110              **************************/
1111
1112             if (gmx_mm256_any_lt(rsq13,rcutoff2))
1113             {
1114
1115             r13              = _mm256_mul_ps(rsq13,rinv13);
1116             r13              = _mm256_andnot_ps(dummy_mask,r13);
1117
1118             /* EWALD ELECTROSTATICS */
1119             
1120             /* Analytical PME correction */
1121             zeta2            = _mm256_mul_ps(beta2,rsq13);
1122             rinv3            = _mm256_mul_ps(rinvsq13,rinv13);
1123             pmecorrF         = avx256_pmecorrF_f(zeta2);
1124             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1125             felec            = _mm256_mul_ps(qq13,felec);
1126             pmecorrV         = avx256_pmecorrV_f(zeta2);
1127             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
1128             velec            = _mm256_sub_ps(_mm256_sub_ps(rinv13,sh_ewald),pmecorrV);
1129             velec            = _mm256_mul_ps(qq13,velec);
1130             
1131             cutoff_mask      = _mm256_cmp_ps(rsq13,rcutoff2,_CMP_LT_OQ);
1132
1133             /* Update potential sum for this i atom from the interaction with this j atom. */
1134             velec            = _mm256_and_ps(velec,cutoff_mask);
1135             velec            = _mm256_andnot_ps(dummy_mask,velec);
1136             velecsum         = _mm256_add_ps(velecsum,velec);
1137
1138             fscal            = felec;
1139
1140             fscal            = _mm256_and_ps(fscal,cutoff_mask);
1141
1142             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
1143
1144             /* Calculate temporary vectorial force */
1145             tx               = _mm256_mul_ps(fscal,dx13);
1146             ty               = _mm256_mul_ps(fscal,dy13);
1147             tz               = _mm256_mul_ps(fscal,dz13);
1148
1149             /* Update vectorial force */
1150             fix1             = _mm256_add_ps(fix1,tx);
1151             fiy1             = _mm256_add_ps(fiy1,ty);
1152             fiz1             = _mm256_add_ps(fiz1,tz);
1153
1154             fjx3             = _mm256_add_ps(fjx3,tx);
1155             fjy3             = _mm256_add_ps(fjy3,ty);
1156             fjz3             = _mm256_add_ps(fjz3,tz);
1157
1158             }
1159
1160             /**************************
1161              * CALCULATE INTERACTIONS *
1162              **************************/
1163
1164             if (gmx_mm256_any_lt(rsq21,rcutoff2))
1165             {
1166
1167             r21              = _mm256_mul_ps(rsq21,rinv21);
1168             r21              = _mm256_andnot_ps(dummy_mask,r21);
1169
1170             /* EWALD ELECTROSTATICS */
1171             
1172             /* Analytical PME correction */
1173             zeta2            = _mm256_mul_ps(beta2,rsq21);
1174             rinv3            = _mm256_mul_ps(rinvsq21,rinv21);
1175             pmecorrF         = avx256_pmecorrF_f(zeta2);
1176             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1177             felec            = _mm256_mul_ps(qq21,felec);
1178             pmecorrV         = avx256_pmecorrV_f(zeta2);
1179             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
1180             velec            = _mm256_sub_ps(_mm256_sub_ps(rinv21,sh_ewald),pmecorrV);
1181             velec            = _mm256_mul_ps(qq21,velec);
1182             
1183             cutoff_mask      = _mm256_cmp_ps(rsq21,rcutoff2,_CMP_LT_OQ);
1184
1185             /* Update potential sum for this i atom from the interaction with this j atom. */
1186             velec            = _mm256_and_ps(velec,cutoff_mask);
1187             velec            = _mm256_andnot_ps(dummy_mask,velec);
1188             velecsum         = _mm256_add_ps(velecsum,velec);
1189
1190             fscal            = felec;
1191
1192             fscal            = _mm256_and_ps(fscal,cutoff_mask);
1193
1194             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
1195
1196             /* Calculate temporary vectorial force */
1197             tx               = _mm256_mul_ps(fscal,dx21);
1198             ty               = _mm256_mul_ps(fscal,dy21);
1199             tz               = _mm256_mul_ps(fscal,dz21);
1200
1201             /* Update vectorial force */
1202             fix2             = _mm256_add_ps(fix2,tx);
1203             fiy2             = _mm256_add_ps(fiy2,ty);
1204             fiz2             = _mm256_add_ps(fiz2,tz);
1205
1206             fjx1             = _mm256_add_ps(fjx1,tx);
1207             fjy1             = _mm256_add_ps(fjy1,ty);
1208             fjz1             = _mm256_add_ps(fjz1,tz);
1209
1210             }
1211
1212             /**************************
1213              * CALCULATE INTERACTIONS *
1214              **************************/
1215
1216             if (gmx_mm256_any_lt(rsq22,rcutoff2))
1217             {
1218
1219             r22              = _mm256_mul_ps(rsq22,rinv22);
1220             r22              = _mm256_andnot_ps(dummy_mask,r22);
1221
1222             /* EWALD ELECTROSTATICS */
1223             
1224             /* Analytical PME correction */
1225             zeta2            = _mm256_mul_ps(beta2,rsq22);
1226             rinv3            = _mm256_mul_ps(rinvsq22,rinv22);
1227             pmecorrF         = avx256_pmecorrF_f(zeta2);
1228             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1229             felec            = _mm256_mul_ps(qq22,felec);
1230             pmecorrV         = avx256_pmecorrV_f(zeta2);
1231             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
1232             velec            = _mm256_sub_ps(_mm256_sub_ps(rinv22,sh_ewald),pmecorrV);
1233             velec            = _mm256_mul_ps(qq22,velec);
1234             
1235             cutoff_mask      = _mm256_cmp_ps(rsq22,rcutoff2,_CMP_LT_OQ);
1236
1237             /* Update potential sum for this i atom from the interaction with this j atom. */
1238             velec            = _mm256_and_ps(velec,cutoff_mask);
1239             velec            = _mm256_andnot_ps(dummy_mask,velec);
1240             velecsum         = _mm256_add_ps(velecsum,velec);
1241
1242             fscal            = felec;
1243
1244             fscal            = _mm256_and_ps(fscal,cutoff_mask);
1245
1246             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
1247
1248             /* Calculate temporary vectorial force */
1249             tx               = _mm256_mul_ps(fscal,dx22);
1250             ty               = _mm256_mul_ps(fscal,dy22);
1251             tz               = _mm256_mul_ps(fscal,dz22);
1252
1253             /* Update vectorial force */
1254             fix2             = _mm256_add_ps(fix2,tx);
1255             fiy2             = _mm256_add_ps(fiy2,ty);
1256             fiz2             = _mm256_add_ps(fiz2,tz);
1257
1258             fjx2             = _mm256_add_ps(fjx2,tx);
1259             fjy2             = _mm256_add_ps(fjy2,ty);
1260             fjz2             = _mm256_add_ps(fjz2,tz);
1261
1262             }
1263
1264             /**************************
1265              * CALCULATE INTERACTIONS *
1266              **************************/
1267
1268             if (gmx_mm256_any_lt(rsq23,rcutoff2))
1269             {
1270
1271             r23              = _mm256_mul_ps(rsq23,rinv23);
1272             r23              = _mm256_andnot_ps(dummy_mask,r23);
1273
1274             /* EWALD ELECTROSTATICS */
1275             
1276             /* Analytical PME correction */
1277             zeta2            = _mm256_mul_ps(beta2,rsq23);
1278             rinv3            = _mm256_mul_ps(rinvsq23,rinv23);
1279             pmecorrF         = avx256_pmecorrF_f(zeta2);
1280             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1281             felec            = _mm256_mul_ps(qq23,felec);
1282             pmecorrV         = avx256_pmecorrV_f(zeta2);
1283             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
1284             velec            = _mm256_sub_ps(_mm256_sub_ps(rinv23,sh_ewald),pmecorrV);
1285             velec            = _mm256_mul_ps(qq23,velec);
1286             
1287             cutoff_mask      = _mm256_cmp_ps(rsq23,rcutoff2,_CMP_LT_OQ);
1288
1289             /* Update potential sum for this i atom from the interaction with this j atom. */
1290             velec            = _mm256_and_ps(velec,cutoff_mask);
1291             velec            = _mm256_andnot_ps(dummy_mask,velec);
1292             velecsum         = _mm256_add_ps(velecsum,velec);
1293
1294             fscal            = felec;
1295
1296             fscal            = _mm256_and_ps(fscal,cutoff_mask);
1297
1298             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
1299
1300             /* Calculate temporary vectorial force */
1301             tx               = _mm256_mul_ps(fscal,dx23);
1302             ty               = _mm256_mul_ps(fscal,dy23);
1303             tz               = _mm256_mul_ps(fscal,dz23);
1304
1305             /* Update vectorial force */
1306             fix2             = _mm256_add_ps(fix2,tx);
1307             fiy2             = _mm256_add_ps(fiy2,ty);
1308             fiz2             = _mm256_add_ps(fiz2,tz);
1309
1310             fjx3             = _mm256_add_ps(fjx3,tx);
1311             fjy3             = _mm256_add_ps(fjy3,ty);
1312             fjz3             = _mm256_add_ps(fjz3,tz);
1313
1314             }
1315
1316             /**************************
1317              * CALCULATE INTERACTIONS *
1318              **************************/
1319
1320             if (gmx_mm256_any_lt(rsq31,rcutoff2))
1321             {
1322
1323             r31              = _mm256_mul_ps(rsq31,rinv31);
1324             r31              = _mm256_andnot_ps(dummy_mask,r31);
1325
1326             /* EWALD ELECTROSTATICS */
1327             
1328             /* Analytical PME correction */
1329             zeta2            = _mm256_mul_ps(beta2,rsq31);
1330             rinv3            = _mm256_mul_ps(rinvsq31,rinv31);
1331             pmecorrF         = avx256_pmecorrF_f(zeta2);
1332             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1333             felec            = _mm256_mul_ps(qq31,felec);
1334             pmecorrV         = avx256_pmecorrV_f(zeta2);
1335             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
1336             velec            = _mm256_sub_ps(_mm256_sub_ps(rinv31,sh_ewald),pmecorrV);
1337             velec            = _mm256_mul_ps(qq31,velec);
1338             
1339             cutoff_mask      = _mm256_cmp_ps(rsq31,rcutoff2,_CMP_LT_OQ);
1340
1341             /* Update potential sum for this i atom from the interaction with this j atom. */
1342             velec            = _mm256_and_ps(velec,cutoff_mask);
1343             velec            = _mm256_andnot_ps(dummy_mask,velec);
1344             velecsum         = _mm256_add_ps(velecsum,velec);
1345
1346             fscal            = felec;
1347
1348             fscal            = _mm256_and_ps(fscal,cutoff_mask);
1349
1350             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
1351
1352             /* Calculate temporary vectorial force */
1353             tx               = _mm256_mul_ps(fscal,dx31);
1354             ty               = _mm256_mul_ps(fscal,dy31);
1355             tz               = _mm256_mul_ps(fscal,dz31);
1356
1357             /* Update vectorial force */
1358             fix3             = _mm256_add_ps(fix3,tx);
1359             fiy3             = _mm256_add_ps(fiy3,ty);
1360             fiz3             = _mm256_add_ps(fiz3,tz);
1361
1362             fjx1             = _mm256_add_ps(fjx1,tx);
1363             fjy1             = _mm256_add_ps(fjy1,ty);
1364             fjz1             = _mm256_add_ps(fjz1,tz);
1365
1366             }
1367
1368             /**************************
1369              * CALCULATE INTERACTIONS *
1370              **************************/
1371
1372             if (gmx_mm256_any_lt(rsq32,rcutoff2))
1373             {
1374
1375             r32              = _mm256_mul_ps(rsq32,rinv32);
1376             r32              = _mm256_andnot_ps(dummy_mask,r32);
1377
1378             /* EWALD ELECTROSTATICS */
1379             
1380             /* Analytical PME correction */
1381             zeta2            = _mm256_mul_ps(beta2,rsq32);
1382             rinv3            = _mm256_mul_ps(rinvsq32,rinv32);
1383             pmecorrF         = avx256_pmecorrF_f(zeta2);
1384             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1385             felec            = _mm256_mul_ps(qq32,felec);
1386             pmecorrV         = avx256_pmecorrV_f(zeta2);
1387             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
1388             velec            = _mm256_sub_ps(_mm256_sub_ps(rinv32,sh_ewald),pmecorrV);
1389             velec            = _mm256_mul_ps(qq32,velec);
1390             
1391             cutoff_mask      = _mm256_cmp_ps(rsq32,rcutoff2,_CMP_LT_OQ);
1392
1393             /* Update potential sum for this i atom from the interaction with this j atom. */
1394             velec            = _mm256_and_ps(velec,cutoff_mask);
1395             velec            = _mm256_andnot_ps(dummy_mask,velec);
1396             velecsum         = _mm256_add_ps(velecsum,velec);
1397
1398             fscal            = felec;
1399
1400             fscal            = _mm256_and_ps(fscal,cutoff_mask);
1401
1402             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
1403
1404             /* Calculate temporary vectorial force */
1405             tx               = _mm256_mul_ps(fscal,dx32);
1406             ty               = _mm256_mul_ps(fscal,dy32);
1407             tz               = _mm256_mul_ps(fscal,dz32);
1408
1409             /* Update vectorial force */
1410             fix3             = _mm256_add_ps(fix3,tx);
1411             fiy3             = _mm256_add_ps(fiy3,ty);
1412             fiz3             = _mm256_add_ps(fiz3,tz);
1413
1414             fjx2             = _mm256_add_ps(fjx2,tx);
1415             fjy2             = _mm256_add_ps(fjy2,ty);
1416             fjz2             = _mm256_add_ps(fjz2,tz);
1417
1418             }
1419
1420             /**************************
1421              * CALCULATE INTERACTIONS *
1422              **************************/
1423
1424             if (gmx_mm256_any_lt(rsq33,rcutoff2))
1425             {
1426
1427             r33              = _mm256_mul_ps(rsq33,rinv33);
1428             r33              = _mm256_andnot_ps(dummy_mask,r33);
1429
1430             /* EWALD ELECTROSTATICS */
1431             
1432             /* Analytical PME correction */
1433             zeta2            = _mm256_mul_ps(beta2,rsq33);
1434             rinv3            = _mm256_mul_ps(rinvsq33,rinv33);
1435             pmecorrF         = avx256_pmecorrF_f(zeta2);
1436             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1437             felec            = _mm256_mul_ps(qq33,felec);
1438             pmecorrV         = avx256_pmecorrV_f(zeta2);
1439             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
1440             velec            = _mm256_sub_ps(_mm256_sub_ps(rinv33,sh_ewald),pmecorrV);
1441             velec            = _mm256_mul_ps(qq33,velec);
1442             
1443             cutoff_mask      = _mm256_cmp_ps(rsq33,rcutoff2,_CMP_LT_OQ);
1444
1445             /* Update potential sum for this i atom from the interaction with this j atom. */
1446             velec            = _mm256_and_ps(velec,cutoff_mask);
1447             velec            = _mm256_andnot_ps(dummy_mask,velec);
1448             velecsum         = _mm256_add_ps(velecsum,velec);
1449
1450             fscal            = felec;
1451
1452             fscal            = _mm256_and_ps(fscal,cutoff_mask);
1453
1454             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
1455
1456             /* Calculate temporary vectorial force */
1457             tx               = _mm256_mul_ps(fscal,dx33);
1458             ty               = _mm256_mul_ps(fscal,dy33);
1459             tz               = _mm256_mul_ps(fscal,dz33);
1460
1461             /* Update vectorial force */
1462             fix3             = _mm256_add_ps(fix3,tx);
1463             fiy3             = _mm256_add_ps(fiy3,ty);
1464             fiz3             = _mm256_add_ps(fiz3,tz);
1465
1466             fjx3             = _mm256_add_ps(fjx3,tx);
1467             fjy3             = _mm256_add_ps(fjy3,ty);
1468             fjz3             = _mm256_add_ps(fjz3,tz);
1469
1470             }
1471
1472             fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
1473             fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
1474             fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
1475             fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
1476             fjptrE             = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
1477             fjptrF             = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
1478             fjptrG             = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
1479             fjptrH             = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
1480
1481             gmx_mm256_decrement_4rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,
1482                                                       fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,
1483                                                       fjx2,fjy2,fjz2,fjx3,fjy3,fjz3);
1484
1485             /* Inner loop uses 1034 flops */
1486         }
1487
1488         /* End of innermost loop */
1489
1490         gmx_mm256_update_iforce_4atom_swizzle_ps(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,fix3,fiy3,fiz3,
1491                                                  f+i_coord_offset,fshift+i_shift_offset);
1492
1493         ggid                        = gid[iidx];
1494         /* Update potential energies */
1495         gmx_mm256_update_1pot_ps(velecsum,kernel_data->energygrp_elec+ggid);
1496         gmx_mm256_update_1pot_ps(vvdwsum,kernel_data->energygrp_vdw+ggid);
1497
1498         /* Increment number of inner iterations */
1499         inneriter                  += j_index_end - j_index_start;
1500
1501         /* Outer loop uses 26 flops */
1502     }
1503
1504     /* Increment number of outer iterations */
1505     outeriter        += nri;
1506
1507     /* Update outer/inner flops */
1508
1509     inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W4W4_VF,outeriter*26 + inneriter*1034);
1510 }
1511 /*
1512  * Gromacs nonbonded kernel:   nb_kernel_ElecEwSh_VdwLJSh_GeomW4W4_F_avx_256_single
1513  * Electrostatics interaction: Ewald
1514  * VdW interaction:            LennardJones
1515  * Geometry:                   Water4-Water4
1516  * Calculate force/pot:        Force
1517  */
1518 void
1519 nb_kernel_ElecEwSh_VdwLJSh_GeomW4W4_F_avx_256_single
1520                     (t_nblist                    * gmx_restrict       nlist,
1521                      rvec                        * gmx_restrict          xx,
1522                      rvec                        * gmx_restrict          ff,
1523                      struct t_forcerec           * gmx_restrict          fr,
1524                      t_mdatoms                   * gmx_restrict     mdatoms,
1525                      nb_kernel_data_t gmx_unused * gmx_restrict kernel_data,
1526                      t_nrnb                      * gmx_restrict        nrnb)
1527 {
1528     /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
1529      * just 0 for non-waters.
1530      * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
1531      * jnr indices corresponding to data put in the four positions in the SIMD register.
1532      */
1533     int              i_shift_offset,i_coord_offset,outeriter,inneriter;
1534     int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
1535     int              jnrA,jnrB,jnrC,jnrD;
1536     int              jnrE,jnrF,jnrG,jnrH;
1537     int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
1538     int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
1539     int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
1540     int              j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
1541     int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
1542     real             rcutoff_scalar;
1543     real             *shiftvec,*fshift,*x,*f;
1544     real             *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
1545     real             scratch[4*DIM];
1546     __m256           tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
1547     real *           vdwioffsetptr0;
1548     __m256           ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
1549     real *           vdwioffsetptr1;
1550     __m256           ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
1551     real *           vdwioffsetptr2;
1552     __m256           ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
1553     real *           vdwioffsetptr3;
1554     __m256           ix3,iy3,iz3,fix3,fiy3,fiz3,iq3,isai3;
1555     int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
1556     __m256           jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
1557     int              vdwjidx1A,vdwjidx1B,vdwjidx1C,vdwjidx1D,vdwjidx1E,vdwjidx1F,vdwjidx1G,vdwjidx1H;
1558     __m256           jx1,jy1,jz1,fjx1,fjy1,fjz1,jq1,isaj1;
1559     int              vdwjidx2A,vdwjidx2B,vdwjidx2C,vdwjidx2D,vdwjidx2E,vdwjidx2F,vdwjidx2G,vdwjidx2H;
1560     __m256           jx2,jy2,jz2,fjx2,fjy2,fjz2,jq2,isaj2;
1561     int              vdwjidx3A,vdwjidx3B,vdwjidx3C,vdwjidx3D,vdwjidx3E,vdwjidx3F,vdwjidx3G,vdwjidx3H;
1562     __m256           jx3,jy3,jz3,fjx3,fjy3,fjz3,jq3,isaj3;
1563     __m256           dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
1564     __m256           dx11,dy11,dz11,rsq11,rinv11,rinvsq11,r11,qq11,c6_11,c12_11;
1565     __m256           dx12,dy12,dz12,rsq12,rinv12,rinvsq12,r12,qq12,c6_12,c12_12;
1566     __m256           dx13,dy13,dz13,rsq13,rinv13,rinvsq13,r13,qq13,c6_13,c12_13;
1567     __m256           dx21,dy21,dz21,rsq21,rinv21,rinvsq21,r21,qq21,c6_21,c12_21;
1568     __m256           dx22,dy22,dz22,rsq22,rinv22,rinvsq22,r22,qq22,c6_22,c12_22;
1569     __m256           dx23,dy23,dz23,rsq23,rinv23,rinvsq23,r23,qq23,c6_23,c12_23;
1570     __m256           dx31,dy31,dz31,rsq31,rinv31,rinvsq31,r31,qq31,c6_31,c12_31;
1571     __m256           dx32,dy32,dz32,rsq32,rinv32,rinvsq32,r32,qq32,c6_32,c12_32;
1572     __m256           dx33,dy33,dz33,rsq33,rinv33,rinvsq33,r33,qq33,c6_33,c12_33;
1573     __m256           velec,felec,velecsum,facel,crf,krf,krf2;
1574     real             *charge;
1575     int              nvdwtype;
1576     __m256           rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
1577     int              *vdwtype;
1578     real             *vdwparam;
1579     __m256           one_sixth   = _mm256_set1_ps(1.0/6.0);
1580     __m256           one_twelfth = _mm256_set1_ps(1.0/12.0);
1581     __m256i          ewitab;
1582     __m128i          ewitab_lo,ewitab_hi;
1583     __m256           ewtabscale,eweps,sh_ewald,ewrt,ewtabhalfspace,ewtabF,ewtabFn,ewtabD,ewtabV;
1584     __m256           beta,beta2,beta3,zeta2,pmecorrF,pmecorrV,rinv3;
1585     real             *ewtab;
1586     __m256           dummy_mask,cutoff_mask;
1587     __m256           signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
1588     __m256           one     = _mm256_set1_ps(1.0);
1589     __m256           two     = _mm256_set1_ps(2.0);
1590     x                = xx[0];
1591     f                = ff[0];
1592
1593     nri              = nlist->nri;
1594     iinr             = nlist->iinr;
1595     jindex           = nlist->jindex;
1596     jjnr             = nlist->jjnr;
1597     shiftidx         = nlist->shift;
1598     gid              = nlist->gid;
1599     shiftvec         = fr->shift_vec[0];
1600     fshift           = fr->fshift[0];
1601     facel            = _mm256_set1_ps(fr->ic->epsfac);
1602     charge           = mdatoms->chargeA;
1603     nvdwtype         = fr->ntype;
1604     vdwparam         = fr->nbfp;
1605     vdwtype          = mdatoms->typeA;
1606
1607     sh_ewald         = _mm256_set1_ps(fr->ic->sh_ewald);
1608     beta             = _mm256_set1_ps(fr->ic->ewaldcoeff_q);
1609     beta2            = _mm256_mul_ps(beta,beta);
1610     beta3            = _mm256_mul_ps(beta,beta2);
1611
1612     ewtab            = fr->ic->tabq_coul_F;
1613     ewtabscale       = _mm256_set1_ps(fr->ic->tabq_scale);
1614     ewtabhalfspace   = _mm256_set1_ps(0.5/fr->ic->tabq_scale);
1615
1616     /* Setup water-specific parameters */
1617     inr              = nlist->iinr[0];
1618     iq1              = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+1]));
1619     iq2              = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+2]));
1620     iq3              = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+3]));
1621     vdwioffsetptr0   = vdwparam+2*nvdwtype*vdwtype[inr+0];
1622
1623     jq1              = _mm256_set1_ps(charge[inr+1]);
1624     jq2              = _mm256_set1_ps(charge[inr+2]);
1625     jq3              = _mm256_set1_ps(charge[inr+3]);
1626     vdwjidx0A        = 2*vdwtype[inr+0];
1627     c6_00            = _mm256_set1_ps(vdwioffsetptr0[vdwjidx0A]);
1628     c12_00           = _mm256_set1_ps(vdwioffsetptr0[vdwjidx0A+1]);
1629     qq11             = _mm256_mul_ps(iq1,jq1);
1630     qq12             = _mm256_mul_ps(iq1,jq2);
1631     qq13             = _mm256_mul_ps(iq1,jq3);
1632     qq21             = _mm256_mul_ps(iq2,jq1);
1633     qq22             = _mm256_mul_ps(iq2,jq2);
1634     qq23             = _mm256_mul_ps(iq2,jq3);
1635     qq31             = _mm256_mul_ps(iq3,jq1);
1636     qq32             = _mm256_mul_ps(iq3,jq2);
1637     qq33             = _mm256_mul_ps(iq3,jq3);
1638
1639     /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
1640     rcutoff_scalar   = fr->ic->rcoulomb;
1641     rcutoff          = _mm256_set1_ps(rcutoff_scalar);
1642     rcutoff2         = _mm256_mul_ps(rcutoff,rcutoff);
1643
1644     sh_vdw_invrcut6  = _mm256_set1_ps(fr->ic->sh_invrc6);
1645     rvdw             = _mm256_set1_ps(fr->ic->rvdw);
1646
1647     /* Avoid stupid compiler warnings */
1648     jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
1649     j_coord_offsetA = 0;
1650     j_coord_offsetB = 0;
1651     j_coord_offsetC = 0;
1652     j_coord_offsetD = 0;
1653     j_coord_offsetE = 0;
1654     j_coord_offsetF = 0;
1655     j_coord_offsetG = 0;
1656     j_coord_offsetH = 0;
1657
1658     outeriter        = 0;
1659     inneriter        = 0;
1660
1661     for(iidx=0;iidx<4*DIM;iidx++)
1662     {
1663         scratch[iidx] = 0.0;
1664     }
1665
1666     /* Start outer loop over neighborlists */
1667     for(iidx=0; iidx<nri; iidx++)
1668     {
1669         /* Load shift vector for this list */
1670         i_shift_offset   = DIM*shiftidx[iidx];
1671
1672         /* Load limits for loop over neighbors */
1673         j_index_start    = jindex[iidx];
1674         j_index_end      = jindex[iidx+1];
1675
1676         /* Get outer coordinate index */
1677         inr              = iinr[iidx];
1678         i_coord_offset   = DIM*inr;
1679
1680         /* Load i particle coords and add shift vector */
1681         gmx_mm256_load_shift_and_4rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,
1682                                                     &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2,&ix3,&iy3,&iz3);
1683
1684         fix0             = _mm256_setzero_ps();
1685         fiy0             = _mm256_setzero_ps();
1686         fiz0             = _mm256_setzero_ps();
1687         fix1             = _mm256_setzero_ps();
1688         fiy1             = _mm256_setzero_ps();
1689         fiz1             = _mm256_setzero_ps();
1690         fix2             = _mm256_setzero_ps();
1691         fiy2             = _mm256_setzero_ps();
1692         fiz2             = _mm256_setzero_ps();
1693         fix3             = _mm256_setzero_ps();
1694         fiy3             = _mm256_setzero_ps();
1695         fiz3             = _mm256_setzero_ps();
1696
1697         /* Start inner kernel loop */
1698         for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
1699         {
1700
1701             /* Get j neighbor index, and coordinate index */
1702             jnrA             = jjnr[jidx];
1703             jnrB             = jjnr[jidx+1];
1704             jnrC             = jjnr[jidx+2];
1705             jnrD             = jjnr[jidx+3];
1706             jnrE             = jjnr[jidx+4];
1707             jnrF             = jjnr[jidx+5];
1708             jnrG             = jjnr[jidx+6];
1709             jnrH             = jjnr[jidx+7];
1710             j_coord_offsetA  = DIM*jnrA;
1711             j_coord_offsetB  = DIM*jnrB;
1712             j_coord_offsetC  = DIM*jnrC;
1713             j_coord_offsetD  = DIM*jnrD;
1714             j_coord_offsetE  = DIM*jnrE;
1715             j_coord_offsetF  = DIM*jnrF;
1716             j_coord_offsetG  = DIM*jnrG;
1717             j_coord_offsetH  = DIM*jnrH;
1718
1719             /* load j atom coordinates */
1720             gmx_mm256_load_4rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
1721                                                  x+j_coord_offsetC,x+j_coord_offsetD,
1722                                                  x+j_coord_offsetE,x+j_coord_offsetF,
1723                                                  x+j_coord_offsetG,x+j_coord_offsetH,
1724                                                  &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,
1725                                                  &jy2,&jz2,&jx3,&jy3,&jz3);
1726
1727             /* Calculate displacement vector */
1728             dx00             = _mm256_sub_ps(ix0,jx0);
1729             dy00             = _mm256_sub_ps(iy0,jy0);
1730             dz00             = _mm256_sub_ps(iz0,jz0);
1731             dx11             = _mm256_sub_ps(ix1,jx1);
1732             dy11             = _mm256_sub_ps(iy1,jy1);
1733             dz11             = _mm256_sub_ps(iz1,jz1);
1734             dx12             = _mm256_sub_ps(ix1,jx2);
1735             dy12             = _mm256_sub_ps(iy1,jy2);
1736             dz12             = _mm256_sub_ps(iz1,jz2);
1737             dx13             = _mm256_sub_ps(ix1,jx3);
1738             dy13             = _mm256_sub_ps(iy1,jy3);
1739             dz13             = _mm256_sub_ps(iz1,jz3);
1740             dx21             = _mm256_sub_ps(ix2,jx1);
1741             dy21             = _mm256_sub_ps(iy2,jy1);
1742             dz21             = _mm256_sub_ps(iz2,jz1);
1743             dx22             = _mm256_sub_ps(ix2,jx2);
1744             dy22             = _mm256_sub_ps(iy2,jy2);
1745             dz22             = _mm256_sub_ps(iz2,jz2);
1746             dx23             = _mm256_sub_ps(ix2,jx3);
1747             dy23             = _mm256_sub_ps(iy2,jy3);
1748             dz23             = _mm256_sub_ps(iz2,jz3);
1749             dx31             = _mm256_sub_ps(ix3,jx1);
1750             dy31             = _mm256_sub_ps(iy3,jy1);
1751             dz31             = _mm256_sub_ps(iz3,jz1);
1752             dx32             = _mm256_sub_ps(ix3,jx2);
1753             dy32             = _mm256_sub_ps(iy3,jy2);
1754             dz32             = _mm256_sub_ps(iz3,jz2);
1755             dx33             = _mm256_sub_ps(ix3,jx3);
1756             dy33             = _mm256_sub_ps(iy3,jy3);
1757             dz33             = _mm256_sub_ps(iz3,jz3);
1758
1759             /* Calculate squared distance and things based on it */
1760             rsq00            = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
1761             rsq11            = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
1762             rsq12            = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
1763             rsq13            = gmx_mm256_calc_rsq_ps(dx13,dy13,dz13);
1764             rsq21            = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
1765             rsq22            = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
1766             rsq23            = gmx_mm256_calc_rsq_ps(dx23,dy23,dz23);
1767             rsq31            = gmx_mm256_calc_rsq_ps(dx31,dy31,dz31);
1768             rsq32            = gmx_mm256_calc_rsq_ps(dx32,dy32,dz32);
1769             rsq33            = gmx_mm256_calc_rsq_ps(dx33,dy33,dz33);
1770
1771             rinv11           = avx256_invsqrt_f(rsq11);
1772             rinv12           = avx256_invsqrt_f(rsq12);
1773             rinv13           = avx256_invsqrt_f(rsq13);
1774             rinv21           = avx256_invsqrt_f(rsq21);
1775             rinv22           = avx256_invsqrt_f(rsq22);
1776             rinv23           = avx256_invsqrt_f(rsq23);
1777             rinv31           = avx256_invsqrt_f(rsq31);
1778             rinv32           = avx256_invsqrt_f(rsq32);
1779             rinv33           = avx256_invsqrt_f(rsq33);
1780
1781             rinvsq00         = avx256_inv_f(rsq00);
1782             rinvsq11         = _mm256_mul_ps(rinv11,rinv11);
1783             rinvsq12         = _mm256_mul_ps(rinv12,rinv12);
1784             rinvsq13         = _mm256_mul_ps(rinv13,rinv13);
1785             rinvsq21         = _mm256_mul_ps(rinv21,rinv21);
1786             rinvsq22         = _mm256_mul_ps(rinv22,rinv22);
1787             rinvsq23         = _mm256_mul_ps(rinv23,rinv23);
1788             rinvsq31         = _mm256_mul_ps(rinv31,rinv31);
1789             rinvsq32         = _mm256_mul_ps(rinv32,rinv32);
1790             rinvsq33         = _mm256_mul_ps(rinv33,rinv33);
1791
1792             fjx0             = _mm256_setzero_ps();
1793             fjy0             = _mm256_setzero_ps();
1794             fjz0             = _mm256_setzero_ps();
1795             fjx1             = _mm256_setzero_ps();
1796             fjy1             = _mm256_setzero_ps();
1797             fjz1             = _mm256_setzero_ps();
1798             fjx2             = _mm256_setzero_ps();
1799             fjy2             = _mm256_setzero_ps();
1800             fjz2             = _mm256_setzero_ps();
1801             fjx3             = _mm256_setzero_ps();
1802             fjy3             = _mm256_setzero_ps();
1803             fjz3             = _mm256_setzero_ps();
1804
1805             /**************************
1806              * CALCULATE INTERACTIONS *
1807              **************************/
1808
1809             if (gmx_mm256_any_lt(rsq00,rcutoff2))
1810             {
1811
1812             /* LENNARD-JONES DISPERSION/REPULSION */
1813
1814             rinvsix          = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
1815             fvdw             = _mm256_mul_ps(_mm256_sub_ps(_mm256_mul_ps(c12_00,rinvsix),c6_00),_mm256_mul_ps(rinvsix,rinvsq00));
1816
1817             cutoff_mask      = _mm256_cmp_ps(rsq00,rcutoff2,_CMP_LT_OQ);
1818
1819             fscal            = fvdw;
1820
1821             fscal            = _mm256_and_ps(fscal,cutoff_mask);
1822
1823             /* Calculate temporary vectorial force */
1824             tx               = _mm256_mul_ps(fscal,dx00);
1825             ty               = _mm256_mul_ps(fscal,dy00);
1826             tz               = _mm256_mul_ps(fscal,dz00);
1827
1828             /* Update vectorial force */
1829             fix0             = _mm256_add_ps(fix0,tx);
1830             fiy0             = _mm256_add_ps(fiy0,ty);
1831             fiz0             = _mm256_add_ps(fiz0,tz);
1832
1833             fjx0             = _mm256_add_ps(fjx0,tx);
1834             fjy0             = _mm256_add_ps(fjy0,ty);
1835             fjz0             = _mm256_add_ps(fjz0,tz);
1836
1837             }
1838
1839             /**************************
1840              * CALCULATE INTERACTIONS *
1841              **************************/
1842
1843             if (gmx_mm256_any_lt(rsq11,rcutoff2))
1844             {
1845
1846             r11              = _mm256_mul_ps(rsq11,rinv11);
1847
1848             /* EWALD ELECTROSTATICS */
1849             
1850             /* Analytical PME correction */
1851             zeta2            = _mm256_mul_ps(beta2,rsq11);
1852             rinv3            = _mm256_mul_ps(rinvsq11,rinv11);
1853             pmecorrF         = avx256_pmecorrF_f(zeta2);
1854             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1855             felec            = _mm256_mul_ps(qq11,felec);
1856             
1857             cutoff_mask      = _mm256_cmp_ps(rsq11,rcutoff2,_CMP_LT_OQ);
1858
1859             fscal            = felec;
1860
1861             fscal            = _mm256_and_ps(fscal,cutoff_mask);
1862
1863             /* Calculate temporary vectorial force */
1864             tx               = _mm256_mul_ps(fscal,dx11);
1865             ty               = _mm256_mul_ps(fscal,dy11);
1866             tz               = _mm256_mul_ps(fscal,dz11);
1867
1868             /* Update vectorial force */
1869             fix1             = _mm256_add_ps(fix1,tx);
1870             fiy1             = _mm256_add_ps(fiy1,ty);
1871             fiz1             = _mm256_add_ps(fiz1,tz);
1872
1873             fjx1             = _mm256_add_ps(fjx1,tx);
1874             fjy1             = _mm256_add_ps(fjy1,ty);
1875             fjz1             = _mm256_add_ps(fjz1,tz);
1876
1877             }
1878
1879             /**************************
1880              * CALCULATE INTERACTIONS *
1881              **************************/
1882
1883             if (gmx_mm256_any_lt(rsq12,rcutoff2))
1884             {
1885
1886             r12              = _mm256_mul_ps(rsq12,rinv12);
1887
1888             /* EWALD ELECTROSTATICS */
1889             
1890             /* Analytical PME correction */
1891             zeta2            = _mm256_mul_ps(beta2,rsq12);
1892             rinv3            = _mm256_mul_ps(rinvsq12,rinv12);
1893             pmecorrF         = avx256_pmecorrF_f(zeta2);
1894             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1895             felec            = _mm256_mul_ps(qq12,felec);
1896             
1897             cutoff_mask      = _mm256_cmp_ps(rsq12,rcutoff2,_CMP_LT_OQ);
1898
1899             fscal            = felec;
1900
1901             fscal            = _mm256_and_ps(fscal,cutoff_mask);
1902
1903             /* Calculate temporary vectorial force */
1904             tx               = _mm256_mul_ps(fscal,dx12);
1905             ty               = _mm256_mul_ps(fscal,dy12);
1906             tz               = _mm256_mul_ps(fscal,dz12);
1907
1908             /* Update vectorial force */
1909             fix1             = _mm256_add_ps(fix1,tx);
1910             fiy1             = _mm256_add_ps(fiy1,ty);
1911             fiz1             = _mm256_add_ps(fiz1,tz);
1912
1913             fjx2             = _mm256_add_ps(fjx2,tx);
1914             fjy2             = _mm256_add_ps(fjy2,ty);
1915             fjz2             = _mm256_add_ps(fjz2,tz);
1916
1917             }
1918
1919             /**************************
1920              * CALCULATE INTERACTIONS *
1921              **************************/
1922
1923             if (gmx_mm256_any_lt(rsq13,rcutoff2))
1924             {
1925
1926             r13              = _mm256_mul_ps(rsq13,rinv13);
1927
1928             /* EWALD ELECTROSTATICS */
1929             
1930             /* Analytical PME correction */
1931             zeta2            = _mm256_mul_ps(beta2,rsq13);
1932             rinv3            = _mm256_mul_ps(rinvsq13,rinv13);
1933             pmecorrF         = avx256_pmecorrF_f(zeta2);
1934             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1935             felec            = _mm256_mul_ps(qq13,felec);
1936             
1937             cutoff_mask      = _mm256_cmp_ps(rsq13,rcutoff2,_CMP_LT_OQ);
1938
1939             fscal            = felec;
1940
1941             fscal            = _mm256_and_ps(fscal,cutoff_mask);
1942
1943             /* Calculate temporary vectorial force */
1944             tx               = _mm256_mul_ps(fscal,dx13);
1945             ty               = _mm256_mul_ps(fscal,dy13);
1946             tz               = _mm256_mul_ps(fscal,dz13);
1947
1948             /* Update vectorial force */
1949             fix1             = _mm256_add_ps(fix1,tx);
1950             fiy1             = _mm256_add_ps(fiy1,ty);
1951             fiz1             = _mm256_add_ps(fiz1,tz);
1952
1953             fjx3             = _mm256_add_ps(fjx3,tx);
1954             fjy3             = _mm256_add_ps(fjy3,ty);
1955             fjz3             = _mm256_add_ps(fjz3,tz);
1956
1957             }
1958
1959             /**************************
1960              * CALCULATE INTERACTIONS *
1961              **************************/
1962
1963             if (gmx_mm256_any_lt(rsq21,rcutoff2))
1964             {
1965
1966             r21              = _mm256_mul_ps(rsq21,rinv21);
1967
1968             /* EWALD ELECTROSTATICS */
1969             
1970             /* Analytical PME correction */
1971             zeta2            = _mm256_mul_ps(beta2,rsq21);
1972             rinv3            = _mm256_mul_ps(rinvsq21,rinv21);
1973             pmecorrF         = avx256_pmecorrF_f(zeta2);
1974             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1975             felec            = _mm256_mul_ps(qq21,felec);
1976             
1977             cutoff_mask      = _mm256_cmp_ps(rsq21,rcutoff2,_CMP_LT_OQ);
1978
1979             fscal            = felec;
1980
1981             fscal            = _mm256_and_ps(fscal,cutoff_mask);
1982
1983             /* Calculate temporary vectorial force */
1984             tx               = _mm256_mul_ps(fscal,dx21);
1985             ty               = _mm256_mul_ps(fscal,dy21);
1986             tz               = _mm256_mul_ps(fscal,dz21);
1987
1988             /* Update vectorial force */
1989             fix2             = _mm256_add_ps(fix2,tx);
1990             fiy2             = _mm256_add_ps(fiy2,ty);
1991             fiz2             = _mm256_add_ps(fiz2,tz);
1992
1993             fjx1             = _mm256_add_ps(fjx1,tx);
1994             fjy1             = _mm256_add_ps(fjy1,ty);
1995             fjz1             = _mm256_add_ps(fjz1,tz);
1996
1997             }
1998
1999             /**************************
2000              * CALCULATE INTERACTIONS *
2001              **************************/
2002
2003             if (gmx_mm256_any_lt(rsq22,rcutoff2))
2004             {
2005
2006             r22              = _mm256_mul_ps(rsq22,rinv22);
2007
2008             /* EWALD ELECTROSTATICS */
2009             
2010             /* Analytical PME correction */
2011             zeta2            = _mm256_mul_ps(beta2,rsq22);
2012             rinv3            = _mm256_mul_ps(rinvsq22,rinv22);
2013             pmecorrF         = avx256_pmecorrF_f(zeta2);
2014             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
2015             felec            = _mm256_mul_ps(qq22,felec);
2016             
2017             cutoff_mask      = _mm256_cmp_ps(rsq22,rcutoff2,_CMP_LT_OQ);
2018
2019             fscal            = felec;
2020
2021             fscal            = _mm256_and_ps(fscal,cutoff_mask);
2022
2023             /* Calculate temporary vectorial force */
2024             tx               = _mm256_mul_ps(fscal,dx22);
2025             ty               = _mm256_mul_ps(fscal,dy22);
2026             tz               = _mm256_mul_ps(fscal,dz22);
2027
2028             /* Update vectorial force */
2029             fix2             = _mm256_add_ps(fix2,tx);
2030             fiy2             = _mm256_add_ps(fiy2,ty);
2031             fiz2             = _mm256_add_ps(fiz2,tz);
2032
2033             fjx2             = _mm256_add_ps(fjx2,tx);
2034             fjy2             = _mm256_add_ps(fjy2,ty);
2035             fjz2             = _mm256_add_ps(fjz2,tz);
2036
2037             }
2038
2039             /**************************
2040              * CALCULATE INTERACTIONS *
2041              **************************/
2042
2043             if (gmx_mm256_any_lt(rsq23,rcutoff2))
2044             {
2045
2046             r23              = _mm256_mul_ps(rsq23,rinv23);
2047
2048             /* EWALD ELECTROSTATICS */
2049             
2050             /* Analytical PME correction */
2051             zeta2            = _mm256_mul_ps(beta2,rsq23);
2052             rinv3            = _mm256_mul_ps(rinvsq23,rinv23);
2053             pmecorrF         = avx256_pmecorrF_f(zeta2);
2054             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
2055             felec            = _mm256_mul_ps(qq23,felec);
2056             
2057             cutoff_mask      = _mm256_cmp_ps(rsq23,rcutoff2,_CMP_LT_OQ);
2058
2059             fscal            = felec;
2060
2061             fscal            = _mm256_and_ps(fscal,cutoff_mask);
2062
2063             /* Calculate temporary vectorial force */
2064             tx               = _mm256_mul_ps(fscal,dx23);
2065             ty               = _mm256_mul_ps(fscal,dy23);
2066             tz               = _mm256_mul_ps(fscal,dz23);
2067
2068             /* Update vectorial force */
2069             fix2             = _mm256_add_ps(fix2,tx);
2070             fiy2             = _mm256_add_ps(fiy2,ty);
2071             fiz2             = _mm256_add_ps(fiz2,tz);
2072
2073             fjx3             = _mm256_add_ps(fjx3,tx);
2074             fjy3             = _mm256_add_ps(fjy3,ty);
2075             fjz3             = _mm256_add_ps(fjz3,tz);
2076
2077             }
2078
2079             /**************************
2080              * CALCULATE INTERACTIONS *
2081              **************************/
2082
2083             if (gmx_mm256_any_lt(rsq31,rcutoff2))
2084             {
2085
2086             r31              = _mm256_mul_ps(rsq31,rinv31);
2087
2088             /* EWALD ELECTROSTATICS */
2089             
2090             /* Analytical PME correction */
2091             zeta2            = _mm256_mul_ps(beta2,rsq31);
2092             rinv3            = _mm256_mul_ps(rinvsq31,rinv31);
2093             pmecorrF         = avx256_pmecorrF_f(zeta2);
2094             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
2095             felec            = _mm256_mul_ps(qq31,felec);
2096             
2097             cutoff_mask      = _mm256_cmp_ps(rsq31,rcutoff2,_CMP_LT_OQ);
2098
2099             fscal            = felec;
2100
2101             fscal            = _mm256_and_ps(fscal,cutoff_mask);
2102
2103             /* Calculate temporary vectorial force */
2104             tx               = _mm256_mul_ps(fscal,dx31);
2105             ty               = _mm256_mul_ps(fscal,dy31);
2106             tz               = _mm256_mul_ps(fscal,dz31);
2107
2108             /* Update vectorial force */
2109             fix3             = _mm256_add_ps(fix3,tx);
2110             fiy3             = _mm256_add_ps(fiy3,ty);
2111             fiz3             = _mm256_add_ps(fiz3,tz);
2112
2113             fjx1             = _mm256_add_ps(fjx1,tx);
2114             fjy1             = _mm256_add_ps(fjy1,ty);
2115             fjz1             = _mm256_add_ps(fjz1,tz);
2116
2117             }
2118
2119             /**************************
2120              * CALCULATE INTERACTIONS *
2121              **************************/
2122
2123             if (gmx_mm256_any_lt(rsq32,rcutoff2))
2124             {
2125
2126             r32              = _mm256_mul_ps(rsq32,rinv32);
2127
2128             /* EWALD ELECTROSTATICS */
2129             
2130             /* Analytical PME correction */
2131             zeta2            = _mm256_mul_ps(beta2,rsq32);
2132             rinv3            = _mm256_mul_ps(rinvsq32,rinv32);
2133             pmecorrF         = avx256_pmecorrF_f(zeta2);
2134             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
2135             felec            = _mm256_mul_ps(qq32,felec);
2136             
2137             cutoff_mask      = _mm256_cmp_ps(rsq32,rcutoff2,_CMP_LT_OQ);
2138
2139             fscal            = felec;
2140
2141             fscal            = _mm256_and_ps(fscal,cutoff_mask);
2142
2143             /* Calculate temporary vectorial force */
2144             tx               = _mm256_mul_ps(fscal,dx32);
2145             ty               = _mm256_mul_ps(fscal,dy32);
2146             tz               = _mm256_mul_ps(fscal,dz32);
2147
2148             /* Update vectorial force */
2149             fix3             = _mm256_add_ps(fix3,tx);
2150             fiy3             = _mm256_add_ps(fiy3,ty);
2151             fiz3             = _mm256_add_ps(fiz3,tz);
2152
2153             fjx2             = _mm256_add_ps(fjx2,tx);
2154             fjy2             = _mm256_add_ps(fjy2,ty);
2155             fjz2             = _mm256_add_ps(fjz2,tz);
2156
2157             }
2158
2159             /**************************
2160              * CALCULATE INTERACTIONS *
2161              **************************/
2162
2163             if (gmx_mm256_any_lt(rsq33,rcutoff2))
2164             {
2165
2166             r33              = _mm256_mul_ps(rsq33,rinv33);
2167
2168             /* EWALD ELECTROSTATICS */
2169             
2170             /* Analytical PME correction */
2171             zeta2            = _mm256_mul_ps(beta2,rsq33);
2172             rinv3            = _mm256_mul_ps(rinvsq33,rinv33);
2173             pmecorrF         = avx256_pmecorrF_f(zeta2);
2174             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
2175             felec            = _mm256_mul_ps(qq33,felec);
2176             
2177             cutoff_mask      = _mm256_cmp_ps(rsq33,rcutoff2,_CMP_LT_OQ);
2178
2179             fscal            = felec;
2180
2181             fscal            = _mm256_and_ps(fscal,cutoff_mask);
2182
2183             /* Calculate temporary vectorial force */
2184             tx               = _mm256_mul_ps(fscal,dx33);
2185             ty               = _mm256_mul_ps(fscal,dy33);
2186             tz               = _mm256_mul_ps(fscal,dz33);
2187
2188             /* Update vectorial force */
2189             fix3             = _mm256_add_ps(fix3,tx);
2190             fiy3             = _mm256_add_ps(fiy3,ty);
2191             fiz3             = _mm256_add_ps(fiz3,tz);
2192
2193             fjx3             = _mm256_add_ps(fjx3,tx);
2194             fjy3             = _mm256_add_ps(fjy3,ty);
2195             fjz3             = _mm256_add_ps(fjz3,tz);
2196
2197             }
2198
2199             fjptrA             = f+j_coord_offsetA;
2200             fjptrB             = f+j_coord_offsetB;
2201             fjptrC             = f+j_coord_offsetC;
2202             fjptrD             = f+j_coord_offsetD;
2203             fjptrE             = f+j_coord_offsetE;
2204             fjptrF             = f+j_coord_offsetF;
2205             fjptrG             = f+j_coord_offsetG;
2206             fjptrH             = f+j_coord_offsetH;
2207
2208             gmx_mm256_decrement_4rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,
2209                                                       fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,
2210                                                       fjx2,fjy2,fjz2,fjx3,fjy3,fjz3);
2211
2212             /* Inner loop uses 564 flops */
2213         }
2214
2215         if(jidx<j_index_end)
2216         {
2217
2218             /* Get j neighbor index, and coordinate index */
2219             jnrlistA         = jjnr[jidx];
2220             jnrlistB         = jjnr[jidx+1];
2221             jnrlistC         = jjnr[jidx+2];
2222             jnrlistD         = jjnr[jidx+3];
2223             jnrlistE         = jjnr[jidx+4];
2224             jnrlistF         = jjnr[jidx+5];
2225             jnrlistG         = jjnr[jidx+6];
2226             jnrlistH         = jjnr[jidx+7];
2227             /* Sign of each element will be negative for non-real atoms.
2228              * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
2229              * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
2230              */
2231             dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
2232                                             gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
2233                                             
2234             jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
2235             jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
2236             jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
2237             jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
2238             jnrE       = (jnrlistE>=0) ? jnrlistE : 0;
2239             jnrF       = (jnrlistF>=0) ? jnrlistF : 0;
2240             jnrG       = (jnrlistG>=0) ? jnrlistG : 0;
2241             jnrH       = (jnrlistH>=0) ? jnrlistH : 0;
2242             j_coord_offsetA  = DIM*jnrA;
2243             j_coord_offsetB  = DIM*jnrB;
2244             j_coord_offsetC  = DIM*jnrC;
2245             j_coord_offsetD  = DIM*jnrD;
2246             j_coord_offsetE  = DIM*jnrE;
2247             j_coord_offsetF  = DIM*jnrF;
2248             j_coord_offsetG  = DIM*jnrG;
2249             j_coord_offsetH  = DIM*jnrH;
2250
2251             /* load j atom coordinates */
2252             gmx_mm256_load_4rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
2253                                                  x+j_coord_offsetC,x+j_coord_offsetD,
2254                                                  x+j_coord_offsetE,x+j_coord_offsetF,
2255                                                  x+j_coord_offsetG,x+j_coord_offsetH,
2256                                                  &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,
2257                                                  &jy2,&jz2,&jx3,&jy3,&jz3);
2258
2259             /* Calculate displacement vector */
2260             dx00             = _mm256_sub_ps(ix0,jx0);
2261             dy00             = _mm256_sub_ps(iy0,jy0);
2262             dz00             = _mm256_sub_ps(iz0,jz0);
2263             dx11             = _mm256_sub_ps(ix1,jx1);
2264             dy11             = _mm256_sub_ps(iy1,jy1);
2265             dz11             = _mm256_sub_ps(iz1,jz1);
2266             dx12             = _mm256_sub_ps(ix1,jx2);
2267             dy12             = _mm256_sub_ps(iy1,jy2);
2268             dz12             = _mm256_sub_ps(iz1,jz2);
2269             dx13             = _mm256_sub_ps(ix1,jx3);
2270             dy13             = _mm256_sub_ps(iy1,jy3);
2271             dz13             = _mm256_sub_ps(iz1,jz3);
2272             dx21             = _mm256_sub_ps(ix2,jx1);
2273             dy21             = _mm256_sub_ps(iy2,jy1);
2274             dz21             = _mm256_sub_ps(iz2,jz1);
2275             dx22             = _mm256_sub_ps(ix2,jx2);
2276             dy22             = _mm256_sub_ps(iy2,jy2);
2277             dz22             = _mm256_sub_ps(iz2,jz2);
2278             dx23             = _mm256_sub_ps(ix2,jx3);
2279             dy23             = _mm256_sub_ps(iy2,jy3);
2280             dz23             = _mm256_sub_ps(iz2,jz3);
2281             dx31             = _mm256_sub_ps(ix3,jx1);
2282             dy31             = _mm256_sub_ps(iy3,jy1);
2283             dz31             = _mm256_sub_ps(iz3,jz1);
2284             dx32             = _mm256_sub_ps(ix3,jx2);
2285             dy32             = _mm256_sub_ps(iy3,jy2);
2286             dz32             = _mm256_sub_ps(iz3,jz2);
2287             dx33             = _mm256_sub_ps(ix3,jx3);
2288             dy33             = _mm256_sub_ps(iy3,jy3);
2289             dz33             = _mm256_sub_ps(iz3,jz3);
2290
2291             /* Calculate squared distance and things based on it */
2292             rsq00            = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
2293             rsq11            = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
2294             rsq12            = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
2295             rsq13            = gmx_mm256_calc_rsq_ps(dx13,dy13,dz13);
2296             rsq21            = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
2297             rsq22            = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
2298             rsq23            = gmx_mm256_calc_rsq_ps(dx23,dy23,dz23);
2299             rsq31            = gmx_mm256_calc_rsq_ps(dx31,dy31,dz31);
2300             rsq32            = gmx_mm256_calc_rsq_ps(dx32,dy32,dz32);
2301             rsq33            = gmx_mm256_calc_rsq_ps(dx33,dy33,dz33);
2302
2303             rinv11           = avx256_invsqrt_f(rsq11);
2304             rinv12           = avx256_invsqrt_f(rsq12);
2305             rinv13           = avx256_invsqrt_f(rsq13);
2306             rinv21           = avx256_invsqrt_f(rsq21);
2307             rinv22           = avx256_invsqrt_f(rsq22);
2308             rinv23           = avx256_invsqrt_f(rsq23);
2309             rinv31           = avx256_invsqrt_f(rsq31);
2310             rinv32           = avx256_invsqrt_f(rsq32);
2311             rinv33           = avx256_invsqrt_f(rsq33);
2312
2313             rinvsq00         = avx256_inv_f(rsq00);
2314             rinvsq11         = _mm256_mul_ps(rinv11,rinv11);
2315             rinvsq12         = _mm256_mul_ps(rinv12,rinv12);
2316             rinvsq13         = _mm256_mul_ps(rinv13,rinv13);
2317             rinvsq21         = _mm256_mul_ps(rinv21,rinv21);
2318             rinvsq22         = _mm256_mul_ps(rinv22,rinv22);
2319             rinvsq23         = _mm256_mul_ps(rinv23,rinv23);
2320             rinvsq31         = _mm256_mul_ps(rinv31,rinv31);
2321             rinvsq32         = _mm256_mul_ps(rinv32,rinv32);
2322             rinvsq33         = _mm256_mul_ps(rinv33,rinv33);
2323
2324             fjx0             = _mm256_setzero_ps();
2325             fjy0             = _mm256_setzero_ps();
2326             fjz0             = _mm256_setzero_ps();
2327             fjx1             = _mm256_setzero_ps();
2328             fjy1             = _mm256_setzero_ps();
2329             fjz1             = _mm256_setzero_ps();
2330             fjx2             = _mm256_setzero_ps();
2331             fjy2             = _mm256_setzero_ps();
2332             fjz2             = _mm256_setzero_ps();
2333             fjx3             = _mm256_setzero_ps();
2334             fjy3             = _mm256_setzero_ps();
2335             fjz3             = _mm256_setzero_ps();
2336
2337             /**************************
2338              * CALCULATE INTERACTIONS *
2339              **************************/
2340
2341             if (gmx_mm256_any_lt(rsq00,rcutoff2))
2342             {
2343
2344             /* LENNARD-JONES DISPERSION/REPULSION */
2345
2346             rinvsix          = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
2347             fvdw             = _mm256_mul_ps(_mm256_sub_ps(_mm256_mul_ps(c12_00,rinvsix),c6_00),_mm256_mul_ps(rinvsix,rinvsq00));
2348
2349             cutoff_mask      = _mm256_cmp_ps(rsq00,rcutoff2,_CMP_LT_OQ);
2350
2351             fscal            = fvdw;
2352
2353             fscal            = _mm256_and_ps(fscal,cutoff_mask);
2354
2355             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
2356
2357             /* Calculate temporary vectorial force */
2358             tx               = _mm256_mul_ps(fscal,dx00);
2359             ty               = _mm256_mul_ps(fscal,dy00);
2360             tz               = _mm256_mul_ps(fscal,dz00);
2361
2362             /* Update vectorial force */
2363             fix0             = _mm256_add_ps(fix0,tx);
2364             fiy0             = _mm256_add_ps(fiy0,ty);
2365             fiz0             = _mm256_add_ps(fiz0,tz);
2366
2367             fjx0             = _mm256_add_ps(fjx0,tx);
2368             fjy0             = _mm256_add_ps(fjy0,ty);
2369             fjz0             = _mm256_add_ps(fjz0,tz);
2370
2371             }
2372
2373             /**************************
2374              * CALCULATE INTERACTIONS *
2375              **************************/
2376
2377             if (gmx_mm256_any_lt(rsq11,rcutoff2))
2378             {
2379
2380             r11              = _mm256_mul_ps(rsq11,rinv11);
2381             r11              = _mm256_andnot_ps(dummy_mask,r11);
2382
2383             /* EWALD ELECTROSTATICS */
2384             
2385             /* Analytical PME correction */
2386             zeta2            = _mm256_mul_ps(beta2,rsq11);
2387             rinv3            = _mm256_mul_ps(rinvsq11,rinv11);
2388             pmecorrF         = avx256_pmecorrF_f(zeta2);
2389             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
2390             felec            = _mm256_mul_ps(qq11,felec);
2391             
2392             cutoff_mask      = _mm256_cmp_ps(rsq11,rcutoff2,_CMP_LT_OQ);
2393
2394             fscal            = felec;
2395
2396             fscal            = _mm256_and_ps(fscal,cutoff_mask);
2397
2398             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
2399
2400             /* Calculate temporary vectorial force */
2401             tx               = _mm256_mul_ps(fscal,dx11);
2402             ty               = _mm256_mul_ps(fscal,dy11);
2403             tz               = _mm256_mul_ps(fscal,dz11);
2404
2405             /* Update vectorial force */
2406             fix1             = _mm256_add_ps(fix1,tx);
2407             fiy1             = _mm256_add_ps(fiy1,ty);
2408             fiz1             = _mm256_add_ps(fiz1,tz);
2409
2410             fjx1             = _mm256_add_ps(fjx1,tx);
2411             fjy1             = _mm256_add_ps(fjy1,ty);
2412             fjz1             = _mm256_add_ps(fjz1,tz);
2413
2414             }
2415
2416             /**************************
2417              * CALCULATE INTERACTIONS *
2418              **************************/
2419
2420             if (gmx_mm256_any_lt(rsq12,rcutoff2))
2421             {
2422
2423             r12              = _mm256_mul_ps(rsq12,rinv12);
2424             r12              = _mm256_andnot_ps(dummy_mask,r12);
2425
2426             /* EWALD ELECTROSTATICS */
2427             
2428             /* Analytical PME correction */
2429             zeta2            = _mm256_mul_ps(beta2,rsq12);
2430             rinv3            = _mm256_mul_ps(rinvsq12,rinv12);
2431             pmecorrF         = avx256_pmecorrF_f(zeta2);
2432             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
2433             felec            = _mm256_mul_ps(qq12,felec);
2434             
2435             cutoff_mask      = _mm256_cmp_ps(rsq12,rcutoff2,_CMP_LT_OQ);
2436
2437             fscal            = felec;
2438
2439             fscal            = _mm256_and_ps(fscal,cutoff_mask);
2440
2441             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
2442
2443             /* Calculate temporary vectorial force */
2444             tx               = _mm256_mul_ps(fscal,dx12);
2445             ty               = _mm256_mul_ps(fscal,dy12);
2446             tz               = _mm256_mul_ps(fscal,dz12);
2447
2448             /* Update vectorial force */
2449             fix1             = _mm256_add_ps(fix1,tx);
2450             fiy1             = _mm256_add_ps(fiy1,ty);
2451             fiz1             = _mm256_add_ps(fiz1,tz);
2452
2453             fjx2             = _mm256_add_ps(fjx2,tx);
2454             fjy2             = _mm256_add_ps(fjy2,ty);
2455             fjz2             = _mm256_add_ps(fjz2,tz);
2456
2457             }
2458
2459             /**************************
2460              * CALCULATE INTERACTIONS *
2461              **************************/
2462
2463             if (gmx_mm256_any_lt(rsq13,rcutoff2))
2464             {
2465
2466             r13              = _mm256_mul_ps(rsq13,rinv13);
2467             r13              = _mm256_andnot_ps(dummy_mask,r13);
2468
2469             /* EWALD ELECTROSTATICS */
2470             
2471             /* Analytical PME correction */
2472             zeta2            = _mm256_mul_ps(beta2,rsq13);
2473             rinv3            = _mm256_mul_ps(rinvsq13,rinv13);
2474             pmecorrF         = avx256_pmecorrF_f(zeta2);
2475             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
2476             felec            = _mm256_mul_ps(qq13,felec);
2477             
2478             cutoff_mask      = _mm256_cmp_ps(rsq13,rcutoff2,_CMP_LT_OQ);
2479
2480             fscal            = felec;
2481
2482             fscal            = _mm256_and_ps(fscal,cutoff_mask);
2483
2484             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
2485
2486             /* Calculate temporary vectorial force */
2487             tx               = _mm256_mul_ps(fscal,dx13);
2488             ty               = _mm256_mul_ps(fscal,dy13);
2489             tz               = _mm256_mul_ps(fscal,dz13);
2490
2491             /* Update vectorial force */
2492             fix1             = _mm256_add_ps(fix1,tx);
2493             fiy1             = _mm256_add_ps(fiy1,ty);
2494             fiz1             = _mm256_add_ps(fiz1,tz);
2495
2496             fjx3             = _mm256_add_ps(fjx3,tx);
2497             fjy3             = _mm256_add_ps(fjy3,ty);
2498             fjz3             = _mm256_add_ps(fjz3,tz);
2499
2500             }
2501
2502             /**************************
2503              * CALCULATE INTERACTIONS *
2504              **************************/
2505
2506             if (gmx_mm256_any_lt(rsq21,rcutoff2))
2507             {
2508
2509             r21              = _mm256_mul_ps(rsq21,rinv21);
2510             r21              = _mm256_andnot_ps(dummy_mask,r21);
2511
2512             /* EWALD ELECTROSTATICS */
2513             
2514             /* Analytical PME correction */
2515             zeta2            = _mm256_mul_ps(beta2,rsq21);
2516             rinv3            = _mm256_mul_ps(rinvsq21,rinv21);
2517             pmecorrF         = avx256_pmecorrF_f(zeta2);
2518             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
2519             felec            = _mm256_mul_ps(qq21,felec);
2520             
2521             cutoff_mask      = _mm256_cmp_ps(rsq21,rcutoff2,_CMP_LT_OQ);
2522
2523             fscal            = felec;
2524
2525             fscal            = _mm256_and_ps(fscal,cutoff_mask);
2526
2527             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
2528
2529             /* Calculate temporary vectorial force */
2530             tx               = _mm256_mul_ps(fscal,dx21);
2531             ty               = _mm256_mul_ps(fscal,dy21);
2532             tz               = _mm256_mul_ps(fscal,dz21);
2533
2534             /* Update vectorial force */
2535             fix2             = _mm256_add_ps(fix2,tx);
2536             fiy2             = _mm256_add_ps(fiy2,ty);
2537             fiz2             = _mm256_add_ps(fiz2,tz);
2538
2539             fjx1             = _mm256_add_ps(fjx1,tx);
2540             fjy1             = _mm256_add_ps(fjy1,ty);
2541             fjz1             = _mm256_add_ps(fjz1,tz);
2542
2543             }
2544
2545             /**************************
2546              * CALCULATE INTERACTIONS *
2547              **************************/
2548
2549             if (gmx_mm256_any_lt(rsq22,rcutoff2))
2550             {
2551
2552             r22              = _mm256_mul_ps(rsq22,rinv22);
2553             r22              = _mm256_andnot_ps(dummy_mask,r22);
2554
2555             /* EWALD ELECTROSTATICS */
2556             
2557             /* Analytical PME correction */
2558             zeta2            = _mm256_mul_ps(beta2,rsq22);
2559             rinv3            = _mm256_mul_ps(rinvsq22,rinv22);
2560             pmecorrF         = avx256_pmecorrF_f(zeta2);
2561             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
2562             felec            = _mm256_mul_ps(qq22,felec);
2563             
2564             cutoff_mask      = _mm256_cmp_ps(rsq22,rcutoff2,_CMP_LT_OQ);
2565
2566             fscal            = felec;
2567
2568             fscal            = _mm256_and_ps(fscal,cutoff_mask);
2569
2570             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
2571
2572             /* Calculate temporary vectorial force */
2573             tx               = _mm256_mul_ps(fscal,dx22);
2574             ty               = _mm256_mul_ps(fscal,dy22);
2575             tz               = _mm256_mul_ps(fscal,dz22);
2576
2577             /* Update vectorial force */
2578             fix2             = _mm256_add_ps(fix2,tx);
2579             fiy2             = _mm256_add_ps(fiy2,ty);
2580             fiz2             = _mm256_add_ps(fiz2,tz);
2581
2582             fjx2             = _mm256_add_ps(fjx2,tx);
2583             fjy2             = _mm256_add_ps(fjy2,ty);
2584             fjz2             = _mm256_add_ps(fjz2,tz);
2585
2586             }
2587
2588             /**************************
2589              * CALCULATE INTERACTIONS *
2590              **************************/
2591
2592             if (gmx_mm256_any_lt(rsq23,rcutoff2))
2593             {
2594
2595             r23              = _mm256_mul_ps(rsq23,rinv23);
2596             r23              = _mm256_andnot_ps(dummy_mask,r23);
2597
2598             /* EWALD ELECTROSTATICS */
2599             
2600             /* Analytical PME correction */
2601             zeta2            = _mm256_mul_ps(beta2,rsq23);
2602             rinv3            = _mm256_mul_ps(rinvsq23,rinv23);
2603             pmecorrF         = avx256_pmecorrF_f(zeta2);
2604             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
2605             felec            = _mm256_mul_ps(qq23,felec);
2606             
2607             cutoff_mask      = _mm256_cmp_ps(rsq23,rcutoff2,_CMP_LT_OQ);
2608
2609             fscal            = felec;
2610
2611             fscal            = _mm256_and_ps(fscal,cutoff_mask);
2612
2613             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
2614
2615             /* Calculate temporary vectorial force */
2616             tx               = _mm256_mul_ps(fscal,dx23);
2617             ty               = _mm256_mul_ps(fscal,dy23);
2618             tz               = _mm256_mul_ps(fscal,dz23);
2619
2620             /* Update vectorial force */
2621             fix2             = _mm256_add_ps(fix2,tx);
2622             fiy2             = _mm256_add_ps(fiy2,ty);
2623             fiz2             = _mm256_add_ps(fiz2,tz);
2624
2625             fjx3             = _mm256_add_ps(fjx3,tx);
2626             fjy3             = _mm256_add_ps(fjy3,ty);
2627             fjz3             = _mm256_add_ps(fjz3,tz);
2628
2629             }
2630
2631             /**************************
2632              * CALCULATE INTERACTIONS *
2633              **************************/
2634
2635             if (gmx_mm256_any_lt(rsq31,rcutoff2))
2636             {
2637
2638             r31              = _mm256_mul_ps(rsq31,rinv31);
2639             r31              = _mm256_andnot_ps(dummy_mask,r31);
2640
2641             /* EWALD ELECTROSTATICS */
2642             
2643             /* Analytical PME correction */
2644             zeta2            = _mm256_mul_ps(beta2,rsq31);
2645             rinv3            = _mm256_mul_ps(rinvsq31,rinv31);
2646             pmecorrF         = avx256_pmecorrF_f(zeta2);
2647             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
2648             felec            = _mm256_mul_ps(qq31,felec);
2649             
2650             cutoff_mask      = _mm256_cmp_ps(rsq31,rcutoff2,_CMP_LT_OQ);
2651
2652             fscal            = felec;
2653
2654             fscal            = _mm256_and_ps(fscal,cutoff_mask);
2655
2656             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
2657
2658             /* Calculate temporary vectorial force */
2659             tx               = _mm256_mul_ps(fscal,dx31);
2660             ty               = _mm256_mul_ps(fscal,dy31);
2661             tz               = _mm256_mul_ps(fscal,dz31);
2662
2663             /* Update vectorial force */
2664             fix3             = _mm256_add_ps(fix3,tx);
2665             fiy3             = _mm256_add_ps(fiy3,ty);
2666             fiz3             = _mm256_add_ps(fiz3,tz);
2667
2668             fjx1             = _mm256_add_ps(fjx1,tx);
2669             fjy1             = _mm256_add_ps(fjy1,ty);
2670             fjz1             = _mm256_add_ps(fjz1,tz);
2671
2672             }
2673
2674             /**************************
2675              * CALCULATE INTERACTIONS *
2676              **************************/
2677
2678             if (gmx_mm256_any_lt(rsq32,rcutoff2))
2679             {
2680
2681             r32              = _mm256_mul_ps(rsq32,rinv32);
2682             r32              = _mm256_andnot_ps(dummy_mask,r32);
2683
2684             /* EWALD ELECTROSTATICS */
2685             
2686             /* Analytical PME correction */
2687             zeta2            = _mm256_mul_ps(beta2,rsq32);
2688             rinv3            = _mm256_mul_ps(rinvsq32,rinv32);
2689             pmecorrF         = avx256_pmecorrF_f(zeta2);
2690             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
2691             felec            = _mm256_mul_ps(qq32,felec);
2692             
2693             cutoff_mask      = _mm256_cmp_ps(rsq32,rcutoff2,_CMP_LT_OQ);
2694
2695             fscal            = felec;
2696
2697             fscal            = _mm256_and_ps(fscal,cutoff_mask);
2698
2699             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
2700
2701             /* Calculate temporary vectorial force */
2702             tx               = _mm256_mul_ps(fscal,dx32);
2703             ty               = _mm256_mul_ps(fscal,dy32);
2704             tz               = _mm256_mul_ps(fscal,dz32);
2705
2706             /* Update vectorial force */
2707             fix3             = _mm256_add_ps(fix3,tx);
2708             fiy3             = _mm256_add_ps(fiy3,ty);
2709             fiz3             = _mm256_add_ps(fiz3,tz);
2710
2711             fjx2             = _mm256_add_ps(fjx2,tx);
2712             fjy2             = _mm256_add_ps(fjy2,ty);
2713             fjz2             = _mm256_add_ps(fjz2,tz);
2714
2715             }
2716
2717             /**************************
2718              * CALCULATE INTERACTIONS *
2719              **************************/
2720
2721             if (gmx_mm256_any_lt(rsq33,rcutoff2))
2722             {
2723
2724             r33              = _mm256_mul_ps(rsq33,rinv33);
2725             r33              = _mm256_andnot_ps(dummy_mask,r33);
2726
2727             /* EWALD ELECTROSTATICS */
2728             
2729             /* Analytical PME correction */
2730             zeta2            = _mm256_mul_ps(beta2,rsq33);
2731             rinv3            = _mm256_mul_ps(rinvsq33,rinv33);
2732             pmecorrF         = avx256_pmecorrF_f(zeta2);
2733             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
2734             felec            = _mm256_mul_ps(qq33,felec);
2735             
2736             cutoff_mask      = _mm256_cmp_ps(rsq33,rcutoff2,_CMP_LT_OQ);
2737
2738             fscal            = felec;
2739
2740             fscal            = _mm256_and_ps(fscal,cutoff_mask);
2741
2742             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
2743
2744             /* Calculate temporary vectorial force */
2745             tx               = _mm256_mul_ps(fscal,dx33);
2746             ty               = _mm256_mul_ps(fscal,dy33);
2747             tz               = _mm256_mul_ps(fscal,dz33);
2748
2749             /* Update vectorial force */
2750             fix3             = _mm256_add_ps(fix3,tx);
2751             fiy3             = _mm256_add_ps(fiy3,ty);
2752             fiz3             = _mm256_add_ps(fiz3,tz);
2753
2754             fjx3             = _mm256_add_ps(fjx3,tx);
2755             fjy3             = _mm256_add_ps(fjy3,ty);
2756             fjz3             = _mm256_add_ps(fjz3,tz);
2757
2758             }
2759
2760             fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
2761             fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
2762             fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
2763             fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
2764             fjptrE             = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
2765             fjptrF             = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
2766             fjptrG             = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
2767             fjptrH             = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
2768
2769             gmx_mm256_decrement_4rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,
2770                                                       fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,
2771                                                       fjx2,fjy2,fjz2,fjx3,fjy3,fjz3);
2772
2773             /* Inner loop uses 573 flops */
2774         }
2775
2776         /* End of innermost loop */
2777
2778         gmx_mm256_update_iforce_4atom_swizzle_ps(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,fix3,fiy3,fiz3,
2779                                                  f+i_coord_offset,fshift+i_shift_offset);
2780
2781         /* Increment number of inner iterations */
2782         inneriter                  += j_index_end - j_index_start;
2783
2784         /* Outer loop uses 24 flops */
2785     }
2786
2787     /* Increment number of outer iterations */
2788     outeriter        += nri;
2789
2790     /* Update outer/inner flops */
2791
2792     inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W4W4_F,outeriter*24 + inneriter*573);
2793 }