Merge release-5-0 into master
[alexxy/gromacs.git] / src / gromacs / gmxlib / nonbonded / nb_kernel_avx_256_single / nb_kernel_ElecEwSh_VdwNone_GeomW4W4_avx_256_single.c
1 /*
2  * This file is part of the GROMACS molecular simulation package.
3  *
4  * Copyright (c) 2012,2013,2014, by the GROMACS development team, led by
5  * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6  * and including many others, as listed in the AUTHORS file in the
7  * top-level source directory and at http://www.gromacs.org.
8  *
9  * GROMACS is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU Lesser General Public License
11  * as published by the Free Software Foundation; either version 2.1
12  * of the License, or (at your option) any later version.
13  *
14  * GROMACS is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
17  * Lesser General Public License for more details.
18  *
19  * You should have received a copy of the GNU Lesser General Public
20  * License along with GROMACS; if not, see
21  * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA.
23  *
24  * If you want to redistribute modifications to GROMACS, please
25  * consider that scientific software is very special. Version
26  * control is crucial - bugs must be traceable. We will be happy to
27  * consider code for inclusion in the official distribution, but
28  * derived work must not be called official GROMACS. Details are found
29  * in the README & COPYING files - if they are missing, get the
30  * official version at http://www.gromacs.org.
31  *
32  * To help us fund GROMACS development, we humbly ask that you cite
33  * the research papers on the package. Check out http://www.gromacs.org.
34  */
35 /*
36  * Note: this file was generated by the GROMACS avx_256_single kernel generator.
37  */
38 #include "gmxpre.h"
39
40 #include "config.h"
41
42 #include <math.h>
43
44 #include "../nb_kernel.h"
45 #include "gromacs/legacyheaders/types/simple.h"
46 #include "gromacs/math/vec.h"
47 #include "gromacs/legacyheaders/nrnb.h"
48
49 #include "gromacs/simd/math_x86_avx_256_single.h"
50 #include "kernelutil_x86_avx_256_single.h"
51
52 /*
53  * Gromacs nonbonded kernel:   nb_kernel_ElecEwSh_VdwNone_GeomW4W4_VF_avx_256_single
54  * Electrostatics interaction: Ewald
55  * VdW interaction:            None
56  * Geometry:                   Water4-Water4
57  * Calculate force/pot:        PotentialAndForce
58  */
59 void
60 nb_kernel_ElecEwSh_VdwNone_GeomW4W4_VF_avx_256_single
61                     (t_nblist                    * gmx_restrict       nlist,
62                      rvec                        * gmx_restrict          xx,
63                      rvec                        * gmx_restrict          ff,
64                      t_forcerec                  * gmx_restrict          fr,
65                      t_mdatoms                   * gmx_restrict     mdatoms,
66                      nb_kernel_data_t gmx_unused * gmx_restrict kernel_data,
67                      t_nrnb                      * gmx_restrict        nrnb)
68 {
69     /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
70      * just 0 for non-waters.
71      * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
72      * jnr indices corresponding to data put in the four positions in the SIMD register.
73      */
74     int              i_shift_offset,i_coord_offset,outeriter,inneriter;
75     int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
76     int              jnrA,jnrB,jnrC,jnrD;
77     int              jnrE,jnrF,jnrG,jnrH;
78     int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
79     int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
80     int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
81     int              j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
82     int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
83     real             rcutoff_scalar;
84     real             *shiftvec,*fshift,*x,*f;
85     real             *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
86     real             scratch[4*DIM];
87     __m256           tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
88     real *           vdwioffsetptr1;
89     __m256           ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
90     real *           vdwioffsetptr2;
91     __m256           ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
92     real *           vdwioffsetptr3;
93     __m256           ix3,iy3,iz3,fix3,fiy3,fiz3,iq3,isai3;
94     int              vdwjidx1A,vdwjidx1B,vdwjidx1C,vdwjidx1D,vdwjidx1E,vdwjidx1F,vdwjidx1G,vdwjidx1H;
95     __m256           jx1,jy1,jz1,fjx1,fjy1,fjz1,jq1,isaj1;
96     int              vdwjidx2A,vdwjidx2B,vdwjidx2C,vdwjidx2D,vdwjidx2E,vdwjidx2F,vdwjidx2G,vdwjidx2H;
97     __m256           jx2,jy2,jz2,fjx2,fjy2,fjz2,jq2,isaj2;
98     int              vdwjidx3A,vdwjidx3B,vdwjidx3C,vdwjidx3D,vdwjidx3E,vdwjidx3F,vdwjidx3G,vdwjidx3H;
99     __m256           jx3,jy3,jz3,fjx3,fjy3,fjz3,jq3,isaj3;
100     __m256           dx11,dy11,dz11,rsq11,rinv11,rinvsq11,r11,qq11,c6_11,c12_11;
101     __m256           dx12,dy12,dz12,rsq12,rinv12,rinvsq12,r12,qq12,c6_12,c12_12;
102     __m256           dx13,dy13,dz13,rsq13,rinv13,rinvsq13,r13,qq13,c6_13,c12_13;
103     __m256           dx21,dy21,dz21,rsq21,rinv21,rinvsq21,r21,qq21,c6_21,c12_21;
104     __m256           dx22,dy22,dz22,rsq22,rinv22,rinvsq22,r22,qq22,c6_22,c12_22;
105     __m256           dx23,dy23,dz23,rsq23,rinv23,rinvsq23,r23,qq23,c6_23,c12_23;
106     __m256           dx31,dy31,dz31,rsq31,rinv31,rinvsq31,r31,qq31,c6_31,c12_31;
107     __m256           dx32,dy32,dz32,rsq32,rinv32,rinvsq32,r32,qq32,c6_32,c12_32;
108     __m256           dx33,dy33,dz33,rsq33,rinv33,rinvsq33,r33,qq33,c6_33,c12_33;
109     __m256           velec,felec,velecsum,facel,crf,krf,krf2;
110     real             *charge;
111     __m256i          ewitab;
112     __m128i          ewitab_lo,ewitab_hi;
113     __m256           ewtabscale,eweps,sh_ewald,ewrt,ewtabhalfspace,ewtabF,ewtabFn,ewtabD,ewtabV;
114     __m256           beta,beta2,beta3,zeta2,pmecorrF,pmecorrV,rinv3;
115     real             *ewtab;
116     __m256           dummy_mask,cutoff_mask;
117     __m256           signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
118     __m256           one     = _mm256_set1_ps(1.0);
119     __m256           two     = _mm256_set1_ps(2.0);
120     x                = xx[0];
121     f                = ff[0];
122
123     nri              = nlist->nri;
124     iinr             = nlist->iinr;
125     jindex           = nlist->jindex;
126     jjnr             = nlist->jjnr;
127     shiftidx         = nlist->shift;
128     gid              = nlist->gid;
129     shiftvec         = fr->shift_vec[0];
130     fshift           = fr->fshift[0];
131     facel            = _mm256_set1_ps(fr->epsfac);
132     charge           = mdatoms->chargeA;
133
134     sh_ewald         = _mm256_set1_ps(fr->ic->sh_ewald);
135     beta             = _mm256_set1_ps(fr->ic->ewaldcoeff_q);
136     beta2            = _mm256_mul_ps(beta,beta);
137     beta3            = _mm256_mul_ps(beta,beta2);
138
139     ewtab            = fr->ic->tabq_coul_FDV0;
140     ewtabscale       = _mm256_set1_ps(fr->ic->tabq_scale);
141     ewtabhalfspace   = _mm256_set1_ps(0.5/fr->ic->tabq_scale);
142
143     /* Setup water-specific parameters */
144     inr              = nlist->iinr[0];
145     iq1              = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+1]));
146     iq2              = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+2]));
147     iq3              = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+3]));
148
149     jq1              = _mm256_set1_ps(charge[inr+1]);
150     jq2              = _mm256_set1_ps(charge[inr+2]);
151     jq3              = _mm256_set1_ps(charge[inr+3]);
152     qq11             = _mm256_mul_ps(iq1,jq1);
153     qq12             = _mm256_mul_ps(iq1,jq2);
154     qq13             = _mm256_mul_ps(iq1,jq3);
155     qq21             = _mm256_mul_ps(iq2,jq1);
156     qq22             = _mm256_mul_ps(iq2,jq2);
157     qq23             = _mm256_mul_ps(iq2,jq3);
158     qq31             = _mm256_mul_ps(iq3,jq1);
159     qq32             = _mm256_mul_ps(iq3,jq2);
160     qq33             = _mm256_mul_ps(iq3,jq3);
161
162     /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
163     rcutoff_scalar   = fr->rcoulomb;
164     rcutoff          = _mm256_set1_ps(rcutoff_scalar);
165     rcutoff2         = _mm256_mul_ps(rcutoff,rcutoff);
166
167     /* Avoid stupid compiler warnings */
168     jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
169     j_coord_offsetA = 0;
170     j_coord_offsetB = 0;
171     j_coord_offsetC = 0;
172     j_coord_offsetD = 0;
173     j_coord_offsetE = 0;
174     j_coord_offsetF = 0;
175     j_coord_offsetG = 0;
176     j_coord_offsetH = 0;
177
178     outeriter        = 0;
179     inneriter        = 0;
180
181     for(iidx=0;iidx<4*DIM;iidx++)
182     {
183         scratch[iidx] = 0.0;
184     }
185
186     /* Start outer loop over neighborlists */
187     for(iidx=0; iidx<nri; iidx++)
188     {
189         /* Load shift vector for this list */
190         i_shift_offset   = DIM*shiftidx[iidx];
191
192         /* Load limits for loop over neighbors */
193         j_index_start    = jindex[iidx];
194         j_index_end      = jindex[iidx+1];
195
196         /* Get outer coordinate index */
197         inr              = iinr[iidx];
198         i_coord_offset   = DIM*inr;
199
200         /* Load i particle coords and add shift vector */
201         gmx_mm256_load_shift_and_3rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset+DIM,
202                                                     &ix1,&iy1,&iz1,&ix2,&iy2,&iz2,&ix3,&iy3,&iz3);
203
204         fix1             = _mm256_setzero_ps();
205         fiy1             = _mm256_setzero_ps();
206         fiz1             = _mm256_setzero_ps();
207         fix2             = _mm256_setzero_ps();
208         fiy2             = _mm256_setzero_ps();
209         fiz2             = _mm256_setzero_ps();
210         fix3             = _mm256_setzero_ps();
211         fiy3             = _mm256_setzero_ps();
212         fiz3             = _mm256_setzero_ps();
213
214         /* Reset potential sums */
215         velecsum         = _mm256_setzero_ps();
216
217         /* Start inner kernel loop */
218         for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
219         {
220
221             /* Get j neighbor index, and coordinate index */
222             jnrA             = jjnr[jidx];
223             jnrB             = jjnr[jidx+1];
224             jnrC             = jjnr[jidx+2];
225             jnrD             = jjnr[jidx+3];
226             jnrE             = jjnr[jidx+4];
227             jnrF             = jjnr[jidx+5];
228             jnrG             = jjnr[jidx+6];
229             jnrH             = jjnr[jidx+7];
230             j_coord_offsetA  = DIM*jnrA;
231             j_coord_offsetB  = DIM*jnrB;
232             j_coord_offsetC  = DIM*jnrC;
233             j_coord_offsetD  = DIM*jnrD;
234             j_coord_offsetE  = DIM*jnrE;
235             j_coord_offsetF  = DIM*jnrF;
236             j_coord_offsetG  = DIM*jnrG;
237             j_coord_offsetH  = DIM*jnrH;
238
239             /* load j atom coordinates */
240             gmx_mm256_load_3rvec_8ptr_swizzle_ps(x+j_coord_offsetA+DIM,x+j_coord_offsetB+DIM,
241                                                  x+j_coord_offsetC+DIM,x+j_coord_offsetD+DIM,
242                                                  x+j_coord_offsetE+DIM,x+j_coord_offsetF+DIM,
243                                                  x+j_coord_offsetG+DIM,x+j_coord_offsetH+DIM,
244                                                  &jx1,&jy1,&jz1,&jx2,&jy2,&jz2,&jx3,&jy3,&jz3);
245
246             /* Calculate displacement vector */
247             dx11             = _mm256_sub_ps(ix1,jx1);
248             dy11             = _mm256_sub_ps(iy1,jy1);
249             dz11             = _mm256_sub_ps(iz1,jz1);
250             dx12             = _mm256_sub_ps(ix1,jx2);
251             dy12             = _mm256_sub_ps(iy1,jy2);
252             dz12             = _mm256_sub_ps(iz1,jz2);
253             dx13             = _mm256_sub_ps(ix1,jx3);
254             dy13             = _mm256_sub_ps(iy1,jy3);
255             dz13             = _mm256_sub_ps(iz1,jz3);
256             dx21             = _mm256_sub_ps(ix2,jx1);
257             dy21             = _mm256_sub_ps(iy2,jy1);
258             dz21             = _mm256_sub_ps(iz2,jz1);
259             dx22             = _mm256_sub_ps(ix2,jx2);
260             dy22             = _mm256_sub_ps(iy2,jy2);
261             dz22             = _mm256_sub_ps(iz2,jz2);
262             dx23             = _mm256_sub_ps(ix2,jx3);
263             dy23             = _mm256_sub_ps(iy2,jy3);
264             dz23             = _mm256_sub_ps(iz2,jz3);
265             dx31             = _mm256_sub_ps(ix3,jx1);
266             dy31             = _mm256_sub_ps(iy3,jy1);
267             dz31             = _mm256_sub_ps(iz3,jz1);
268             dx32             = _mm256_sub_ps(ix3,jx2);
269             dy32             = _mm256_sub_ps(iy3,jy2);
270             dz32             = _mm256_sub_ps(iz3,jz2);
271             dx33             = _mm256_sub_ps(ix3,jx3);
272             dy33             = _mm256_sub_ps(iy3,jy3);
273             dz33             = _mm256_sub_ps(iz3,jz3);
274
275             /* Calculate squared distance and things based on it */
276             rsq11            = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
277             rsq12            = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
278             rsq13            = gmx_mm256_calc_rsq_ps(dx13,dy13,dz13);
279             rsq21            = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
280             rsq22            = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
281             rsq23            = gmx_mm256_calc_rsq_ps(dx23,dy23,dz23);
282             rsq31            = gmx_mm256_calc_rsq_ps(dx31,dy31,dz31);
283             rsq32            = gmx_mm256_calc_rsq_ps(dx32,dy32,dz32);
284             rsq33            = gmx_mm256_calc_rsq_ps(dx33,dy33,dz33);
285
286             rinv11           = gmx_mm256_invsqrt_ps(rsq11);
287             rinv12           = gmx_mm256_invsqrt_ps(rsq12);
288             rinv13           = gmx_mm256_invsqrt_ps(rsq13);
289             rinv21           = gmx_mm256_invsqrt_ps(rsq21);
290             rinv22           = gmx_mm256_invsqrt_ps(rsq22);
291             rinv23           = gmx_mm256_invsqrt_ps(rsq23);
292             rinv31           = gmx_mm256_invsqrt_ps(rsq31);
293             rinv32           = gmx_mm256_invsqrt_ps(rsq32);
294             rinv33           = gmx_mm256_invsqrt_ps(rsq33);
295
296             rinvsq11         = _mm256_mul_ps(rinv11,rinv11);
297             rinvsq12         = _mm256_mul_ps(rinv12,rinv12);
298             rinvsq13         = _mm256_mul_ps(rinv13,rinv13);
299             rinvsq21         = _mm256_mul_ps(rinv21,rinv21);
300             rinvsq22         = _mm256_mul_ps(rinv22,rinv22);
301             rinvsq23         = _mm256_mul_ps(rinv23,rinv23);
302             rinvsq31         = _mm256_mul_ps(rinv31,rinv31);
303             rinvsq32         = _mm256_mul_ps(rinv32,rinv32);
304             rinvsq33         = _mm256_mul_ps(rinv33,rinv33);
305
306             fjx1             = _mm256_setzero_ps();
307             fjy1             = _mm256_setzero_ps();
308             fjz1             = _mm256_setzero_ps();
309             fjx2             = _mm256_setzero_ps();
310             fjy2             = _mm256_setzero_ps();
311             fjz2             = _mm256_setzero_ps();
312             fjx3             = _mm256_setzero_ps();
313             fjy3             = _mm256_setzero_ps();
314             fjz3             = _mm256_setzero_ps();
315
316             /**************************
317              * CALCULATE INTERACTIONS *
318              **************************/
319
320             if (gmx_mm256_any_lt(rsq11,rcutoff2))
321             {
322
323             r11              = _mm256_mul_ps(rsq11,rinv11);
324
325             /* EWALD ELECTROSTATICS */
326             
327             /* Analytical PME correction */
328             zeta2            = _mm256_mul_ps(beta2,rsq11);
329             rinv3            = _mm256_mul_ps(rinvsq11,rinv11);
330             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
331             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
332             felec            = _mm256_mul_ps(qq11,felec);
333             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
334             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
335             velec            = _mm256_sub_ps(_mm256_sub_ps(rinv11,sh_ewald),pmecorrV);
336             velec            = _mm256_mul_ps(qq11,velec);
337             
338             cutoff_mask      = _mm256_cmp_ps(rsq11,rcutoff2,_CMP_LT_OQ);
339
340             /* Update potential sum for this i atom from the interaction with this j atom. */
341             velec            = _mm256_and_ps(velec,cutoff_mask);
342             velecsum         = _mm256_add_ps(velecsum,velec);
343
344             fscal            = felec;
345
346             fscal            = _mm256_and_ps(fscal,cutoff_mask);
347
348             /* Calculate temporary vectorial force */
349             tx               = _mm256_mul_ps(fscal,dx11);
350             ty               = _mm256_mul_ps(fscal,dy11);
351             tz               = _mm256_mul_ps(fscal,dz11);
352
353             /* Update vectorial force */
354             fix1             = _mm256_add_ps(fix1,tx);
355             fiy1             = _mm256_add_ps(fiy1,ty);
356             fiz1             = _mm256_add_ps(fiz1,tz);
357
358             fjx1             = _mm256_add_ps(fjx1,tx);
359             fjy1             = _mm256_add_ps(fjy1,ty);
360             fjz1             = _mm256_add_ps(fjz1,tz);
361
362             }
363
364             /**************************
365              * CALCULATE INTERACTIONS *
366              **************************/
367
368             if (gmx_mm256_any_lt(rsq12,rcutoff2))
369             {
370
371             r12              = _mm256_mul_ps(rsq12,rinv12);
372
373             /* EWALD ELECTROSTATICS */
374             
375             /* Analytical PME correction */
376             zeta2            = _mm256_mul_ps(beta2,rsq12);
377             rinv3            = _mm256_mul_ps(rinvsq12,rinv12);
378             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
379             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
380             felec            = _mm256_mul_ps(qq12,felec);
381             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
382             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
383             velec            = _mm256_sub_ps(_mm256_sub_ps(rinv12,sh_ewald),pmecorrV);
384             velec            = _mm256_mul_ps(qq12,velec);
385             
386             cutoff_mask      = _mm256_cmp_ps(rsq12,rcutoff2,_CMP_LT_OQ);
387
388             /* Update potential sum for this i atom from the interaction with this j atom. */
389             velec            = _mm256_and_ps(velec,cutoff_mask);
390             velecsum         = _mm256_add_ps(velecsum,velec);
391
392             fscal            = felec;
393
394             fscal            = _mm256_and_ps(fscal,cutoff_mask);
395
396             /* Calculate temporary vectorial force */
397             tx               = _mm256_mul_ps(fscal,dx12);
398             ty               = _mm256_mul_ps(fscal,dy12);
399             tz               = _mm256_mul_ps(fscal,dz12);
400
401             /* Update vectorial force */
402             fix1             = _mm256_add_ps(fix1,tx);
403             fiy1             = _mm256_add_ps(fiy1,ty);
404             fiz1             = _mm256_add_ps(fiz1,tz);
405
406             fjx2             = _mm256_add_ps(fjx2,tx);
407             fjy2             = _mm256_add_ps(fjy2,ty);
408             fjz2             = _mm256_add_ps(fjz2,tz);
409
410             }
411
412             /**************************
413              * CALCULATE INTERACTIONS *
414              **************************/
415
416             if (gmx_mm256_any_lt(rsq13,rcutoff2))
417             {
418
419             r13              = _mm256_mul_ps(rsq13,rinv13);
420
421             /* EWALD ELECTROSTATICS */
422             
423             /* Analytical PME correction */
424             zeta2            = _mm256_mul_ps(beta2,rsq13);
425             rinv3            = _mm256_mul_ps(rinvsq13,rinv13);
426             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
427             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
428             felec            = _mm256_mul_ps(qq13,felec);
429             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
430             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
431             velec            = _mm256_sub_ps(_mm256_sub_ps(rinv13,sh_ewald),pmecorrV);
432             velec            = _mm256_mul_ps(qq13,velec);
433             
434             cutoff_mask      = _mm256_cmp_ps(rsq13,rcutoff2,_CMP_LT_OQ);
435
436             /* Update potential sum for this i atom from the interaction with this j atom. */
437             velec            = _mm256_and_ps(velec,cutoff_mask);
438             velecsum         = _mm256_add_ps(velecsum,velec);
439
440             fscal            = felec;
441
442             fscal            = _mm256_and_ps(fscal,cutoff_mask);
443
444             /* Calculate temporary vectorial force */
445             tx               = _mm256_mul_ps(fscal,dx13);
446             ty               = _mm256_mul_ps(fscal,dy13);
447             tz               = _mm256_mul_ps(fscal,dz13);
448
449             /* Update vectorial force */
450             fix1             = _mm256_add_ps(fix1,tx);
451             fiy1             = _mm256_add_ps(fiy1,ty);
452             fiz1             = _mm256_add_ps(fiz1,tz);
453
454             fjx3             = _mm256_add_ps(fjx3,tx);
455             fjy3             = _mm256_add_ps(fjy3,ty);
456             fjz3             = _mm256_add_ps(fjz3,tz);
457
458             }
459
460             /**************************
461              * CALCULATE INTERACTIONS *
462              **************************/
463
464             if (gmx_mm256_any_lt(rsq21,rcutoff2))
465             {
466
467             r21              = _mm256_mul_ps(rsq21,rinv21);
468
469             /* EWALD ELECTROSTATICS */
470             
471             /* Analytical PME correction */
472             zeta2            = _mm256_mul_ps(beta2,rsq21);
473             rinv3            = _mm256_mul_ps(rinvsq21,rinv21);
474             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
475             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
476             felec            = _mm256_mul_ps(qq21,felec);
477             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
478             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
479             velec            = _mm256_sub_ps(_mm256_sub_ps(rinv21,sh_ewald),pmecorrV);
480             velec            = _mm256_mul_ps(qq21,velec);
481             
482             cutoff_mask      = _mm256_cmp_ps(rsq21,rcutoff2,_CMP_LT_OQ);
483
484             /* Update potential sum for this i atom from the interaction with this j atom. */
485             velec            = _mm256_and_ps(velec,cutoff_mask);
486             velecsum         = _mm256_add_ps(velecsum,velec);
487
488             fscal            = felec;
489
490             fscal            = _mm256_and_ps(fscal,cutoff_mask);
491
492             /* Calculate temporary vectorial force */
493             tx               = _mm256_mul_ps(fscal,dx21);
494             ty               = _mm256_mul_ps(fscal,dy21);
495             tz               = _mm256_mul_ps(fscal,dz21);
496
497             /* Update vectorial force */
498             fix2             = _mm256_add_ps(fix2,tx);
499             fiy2             = _mm256_add_ps(fiy2,ty);
500             fiz2             = _mm256_add_ps(fiz2,tz);
501
502             fjx1             = _mm256_add_ps(fjx1,tx);
503             fjy1             = _mm256_add_ps(fjy1,ty);
504             fjz1             = _mm256_add_ps(fjz1,tz);
505
506             }
507
508             /**************************
509              * CALCULATE INTERACTIONS *
510              **************************/
511
512             if (gmx_mm256_any_lt(rsq22,rcutoff2))
513             {
514
515             r22              = _mm256_mul_ps(rsq22,rinv22);
516
517             /* EWALD ELECTROSTATICS */
518             
519             /* Analytical PME correction */
520             zeta2            = _mm256_mul_ps(beta2,rsq22);
521             rinv3            = _mm256_mul_ps(rinvsq22,rinv22);
522             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
523             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
524             felec            = _mm256_mul_ps(qq22,felec);
525             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
526             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
527             velec            = _mm256_sub_ps(_mm256_sub_ps(rinv22,sh_ewald),pmecorrV);
528             velec            = _mm256_mul_ps(qq22,velec);
529             
530             cutoff_mask      = _mm256_cmp_ps(rsq22,rcutoff2,_CMP_LT_OQ);
531
532             /* Update potential sum for this i atom from the interaction with this j atom. */
533             velec            = _mm256_and_ps(velec,cutoff_mask);
534             velecsum         = _mm256_add_ps(velecsum,velec);
535
536             fscal            = felec;
537
538             fscal            = _mm256_and_ps(fscal,cutoff_mask);
539
540             /* Calculate temporary vectorial force */
541             tx               = _mm256_mul_ps(fscal,dx22);
542             ty               = _mm256_mul_ps(fscal,dy22);
543             tz               = _mm256_mul_ps(fscal,dz22);
544
545             /* Update vectorial force */
546             fix2             = _mm256_add_ps(fix2,tx);
547             fiy2             = _mm256_add_ps(fiy2,ty);
548             fiz2             = _mm256_add_ps(fiz2,tz);
549
550             fjx2             = _mm256_add_ps(fjx2,tx);
551             fjy2             = _mm256_add_ps(fjy2,ty);
552             fjz2             = _mm256_add_ps(fjz2,tz);
553
554             }
555
556             /**************************
557              * CALCULATE INTERACTIONS *
558              **************************/
559
560             if (gmx_mm256_any_lt(rsq23,rcutoff2))
561             {
562
563             r23              = _mm256_mul_ps(rsq23,rinv23);
564
565             /* EWALD ELECTROSTATICS */
566             
567             /* Analytical PME correction */
568             zeta2            = _mm256_mul_ps(beta2,rsq23);
569             rinv3            = _mm256_mul_ps(rinvsq23,rinv23);
570             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
571             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
572             felec            = _mm256_mul_ps(qq23,felec);
573             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
574             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
575             velec            = _mm256_sub_ps(_mm256_sub_ps(rinv23,sh_ewald),pmecorrV);
576             velec            = _mm256_mul_ps(qq23,velec);
577             
578             cutoff_mask      = _mm256_cmp_ps(rsq23,rcutoff2,_CMP_LT_OQ);
579
580             /* Update potential sum for this i atom from the interaction with this j atom. */
581             velec            = _mm256_and_ps(velec,cutoff_mask);
582             velecsum         = _mm256_add_ps(velecsum,velec);
583
584             fscal            = felec;
585
586             fscal            = _mm256_and_ps(fscal,cutoff_mask);
587
588             /* Calculate temporary vectorial force */
589             tx               = _mm256_mul_ps(fscal,dx23);
590             ty               = _mm256_mul_ps(fscal,dy23);
591             tz               = _mm256_mul_ps(fscal,dz23);
592
593             /* Update vectorial force */
594             fix2             = _mm256_add_ps(fix2,tx);
595             fiy2             = _mm256_add_ps(fiy2,ty);
596             fiz2             = _mm256_add_ps(fiz2,tz);
597
598             fjx3             = _mm256_add_ps(fjx3,tx);
599             fjy3             = _mm256_add_ps(fjy3,ty);
600             fjz3             = _mm256_add_ps(fjz3,tz);
601
602             }
603
604             /**************************
605              * CALCULATE INTERACTIONS *
606              **************************/
607
608             if (gmx_mm256_any_lt(rsq31,rcutoff2))
609             {
610
611             r31              = _mm256_mul_ps(rsq31,rinv31);
612
613             /* EWALD ELECTROSTATICS */
614             
615             /* Analytical PME correction */
616             zeta2            = _mm256_mul_ps(beta2,rsq31);
617             rinv3            = _mm256_mul_ps(rinvsq31,rinv31);
618             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
619             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
620             felec            = _mm256_mul_ps(qq31,felec);
621             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
622             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
623             velec            = _mm256_sub_ps(_mm256_sub_ps(rinv31,sh_ewald),pmecorrV);
624             velec            = _mm256_mul_ps(qq31,velec);
625             
626             cutoff_mask      = _mm256_cmp_ps(rsq31,rcutoff2,_CMP_LT_OQ);
627
628             /* Update potential sum for this i atom from the interaction with this j atom. */
629             velec            = _mm256_and_ps(velec,cutoff_mask);
630             velecsum         = _mm256_add_ps(velecsum,velec);
631
632             fscal            = felec;
633
634             fscal            = _mm256_and_ps(fscal,cutoff_mask);
635
636             /* Calculate temporary vectorial force */
637             tx               = _mm256_mul_ps(fscal,dx31);
638             ty               = _mm256_mul_ps(fscal,dy31);
639             tz               = _mm256_mul_ps(fscal,dz31);
640
641             /* Update vectorial force */
642             fix3             = _mm256_add_ps(fix3,tx);
643             fiy3             = _mm256_add_ps(fiy3,ty);
644             fiz3             = _mm256_add_ps(fiz3,tz);
645
646             fjx1             = _mm256_add_ps(fjx1,tx);
647             fjy1             = _mm256_add_ps(fjy1,ty);
648             fjz1             = _mm256_add_ps(fjz1,tz);
649
650             }
651
652             /**************************
653              * CALCULATE INTERACTIONS *
654              **************************/
655
656             if (gmx_mm256_any_lt(rsq32,rcutoff2))
657             {
658
659             r32              = _mm256_mul_ps(rsq32,rinv32);
660
661             /* EWALD ELECTROSTATICS */
662             
663             /* Analytical PME correction */
664             zeta2            = _mm256_mul_ps(beta2,rsq32);
665             rinv3            = _mm256_mul_ps(rinvsq32,rinv32);
666             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
667             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
668             felec            = _mm256_mul_ps(qq32,felec);
669             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
670             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
671             velec            = _mm256_sub_ps(_mm256_sub_ps(rinv32,sh_ewald),pmecorrV);
672             velec            = _mm256_mul_ps(qq32,velec);
673             
674             cutoff_mask      = _mm256_cmp_ps(rsq32,rcutoff2,_CMP_LT_OQ);
675
676             /* Update potential sum for this i atom from the interaction with this j atom. */
677             velec            = _mm256_and_ps(velec,cutoff_mask);
678             velecsum         = _mm256_add_ps(velecsum,velec);
679
680             fscal            = felec;
681
682             fscal            = _mm256_and_ps(fscal,cutoff_mask);
683
684             /* Calculate temporary vectorial force */
685             tx               = _mm256_mul_ps(fscal,dx32);
686             ty               = _mm256_mul_ps(fscal,dy32);
687             tz               = _mm256_mul_ps(fscal,dz32);
688
689             /* Update vectorial force */
690             fix3             = _mm256_add_ps(fix3,tx);
691             fiy3             = _mm256_add_ps(fiy3,ty);
692             fiz3             = _mm256_add_ps(fiz3,tz);
693
694             fjx2             = _mm256_add_ps(fjx2,tx);
695             fjy2             = _mm256_add_ps(fjy2,ty);
696             fjz2             = _mm256_add_ps(fjz2,tz);
697
698             }
699
700             /**************************
701              * CALCULATE INTERACTIONS *
702              **************************/
703
704             if (gmx_mm256_any_lt(rsq33,rcutoff2))
705             {
706
707             r33              = _mm256_mul_ps(rsq33,rinv33);
708
709             /* EWALD ELECTROSTATICS */
710             
711             /* Analytical PME correction */
712             zeta2            = _mm256_mul_ps(beta2,rsq33);
713             rinv3            = _mm256_mul_ps(rinvsq33,rinv33);
714             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
715             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
716             felec            = _mm256_mul_ps(qq33,felec);
717             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
718             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
719             velec            = _mm256_sub_ps(_mm256_sub_ps(rinv33,sh_ewald),pmecorrV);
720             velec            = _mm256_mul_ps(qq33,velec);
721             
722             cutoff_mask      = _mm256_cmp_ps(rsq33,rcutoff2,_CMP_LT_OQ);
723
724             /* Update potential sum for this i atom from the interaction with this j atom. */
725             velec            = _mm256_and_ps(velec,cutoff_mask);
726             velecsum         = _mm256_add_ps(velecsum,velec);
727
728             fscal            = felec;
729
730             fscal            = _mm256_and_ps(fscal,cutoff_mask);
731
732             /* Calculate temporary vectorial force */
733             tx               = _mm256_mul_ps(fscal,dx33);
734             ty               = _mm256_mul_ps(fscal,dy33);
735             tz               = _mm256_mul_ps(fscal,dz33);
736
737             /* Update vectorial force */
738             fix3             = _mm256_add_ps(fix3,tx);
739             fiy3             = _mm256_add_ps(fiy3,ty);
740             fiz3             = _mm256_add_ps(fiz3,tz);
741
742             fjx3             = _mm256_add_ps(fjx3,tx);
743             fjy3             = _mm256_add_ps(fjy3,ty);
744             fjz3             = _mm256_add_ps(fjz3,tz);
745
746             }
747
748             fjptrA             = f+j_coord_offsetA;
749             fjptrB             = f+j_coord_offsetB;
750             fjptrC             = f+j_coord_offsetC;
751             fjptrD             = f+j_coord_offsetD;
752             fjptrE             = f+j_coord_offsetE;
753             fjptrF             = f+j_coord_offsetF;
754             fjptrG             = f+j_coord_offsetG;
755             fjptrH             = f+j_coord_offsetH;
756
757             gmx_mm256_decrement_3rvec_8ptr_swizzle_ps(fjptrA+DIM,fjptrB+DIM,fjptrC+DIM,fjptrD+DIM,
758                                                       fjptrE+DIM,fjptrF+DIM,fjptrG+DIM,fjptrH+DIM,
759                                                       fjx1,fjy1,fjz1,fjx2,fjy2,fjz2,fjx3,fjy3,fjz3);
760
761             /* Inner loop uses 981 flops */
762         }
763
764         if(jidx<j_index_end)
765         {
766
767             /* Get j neighbor index, and coordinate index */
768             jnrlistA         = jjnr[jidx];
769             jnrlistB         = jjnr[jidx+1];
770             jnrlistC         = jjnr[jidx+2];
771             jnrlistD         = jjnr[jidx+3];
772             jnrlistE         = jjnr[jidx+4];
773             jnrlistF         = jjnr[jidx+5];
774             jnrlistG         = jjnr[jidx+6];
775             jnrlistH         = jjnr[jidx+7];
776             /* Sign of each element will be negative for non-real atoms.
777              * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
778              * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
779              */
780             dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
781                                             gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
782                                             
783             jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
784             jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
785             jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
786             jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
787             jnrE       = (jnrlistE>=0) ? jnrlistE : 0;
788             jnrF       = (jnrlistF>=0) ? jnrlistF : 0;
789             jnrG       = (jnrlistG>=0) ? jnrlistG : 0;
790             jnrH       = (jnrlistH>=0) ? jnrlistH : 0;
791             j_coord_offsetA  = DIM*jnrA;
792             j_coord_offsetB  = DIM*jnrB;
793             j_coord_offsetC  = DIM*jnrC;
794             j_coord_offsetD  = DIM*jnrD;
795             j_coord_offsetE  = DIM*jnrE;
796             j_coord_offsetF  = DIM*jnrF;
797             j_coord_offsetG  = DIM*jnrG;
798             j_coord_offsetH  = DIM*jnrH;
799
800             /* load j atom coordinates */
801             gmx_mm256_load_3rvec_8ptr_swizzle_ps(x+j_coord_offsetA+DIM,x+j_coord_offsetB+DIM,
802                                                  x+j_coord_offsetC+DIM,x+j_coord_offsetD+DIM,
803                                                  x+j_coord_offsetE+DIM,x+j_coord_offsetF+DIM,
804                                                  x+j_coord_offsetG+DIM,x+j_coord_offsetH+DIM,
805                                                  &jx1,&jy1,&jz1,&jx2,&jy2,&jz2,&jx3,&jy3,&jz3);
806
807             /* Calculate displacement vector */
808             dx11             = _mm256_sub_ps(ix1,jx1);
809             dy11             = _mm256_sub_ps(iy1,jy1);
810             dz11             = _mm256_sub_ps(iz1,jz1);
811             dx12             = _mm256_sub_ps(ix1,jx2);
812             dy12             = _mm256_sub_ps(iy1,jy2);
813             dz12             = _mm256_sub_ps(iz1,jz2);
814             dx13             = _mm256_sub_ps(ix1,jx3);
815             dy13             = _mm256_sub_ps(iy1,jy3);
816             dz13             = _mm256_sub_ps(iz1,jz3);
817             dx21             = _mm256_sub_ps(ix2,jx1);
818             dy21             = _mm256_sub_ps(iy2,jy1);
819             dz21             = _mm256_sub_ps(iz2,jz1);
820             dx22             = _mm256_sub_ps(ix2,jx2);
821             dy22             = _mm256_sub_ps(iy2,jy2);
822             dz22             = _mm256_sub_ps(iz2,jz2);
823             dx23             = _mm256_sub_ps(ix2,jx3);
824             dy23             = _mm256_sub_ps(iy2,jy3);
825             dz23             = _mm256_sub_ps(iz2,jz3);
826             dx31             = _mm256_sub_ps(ix3,jx1);
827             dy31             = _mm256_sub_ps(iy3,jy1);
828             dz31             = _mm256_sub_ps(iz3,jz1);
829             dx32             = _mm256_sub_ps(ix3,jx2);
830             dy32             = _mm256_sub_ps(iy3,jy2);
831             dz32             = _mm256_sub_ps(iz3,jz2);
832             dx33             = _mm256_sub_ps(ix3,jx3);
833             dy33             = _mm256_sub_ps(iy3,jy3);
834             dz33             = _mm256_sub_ps(iz3,jz3);
835
836             /* Calculate squared distance and things based on it */
837             rsq11            = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
838             rsq12            = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
839             rsq13            = gmx_mm256_calc_rsq_ps(dx13,dy13,dz13);
840             rsq21            = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
841             rsq22            = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
842             rsq23            = gmx_mm256_calc_rsq_ps(dx23,dy23,dz23);
843             rsq31            = gmx_mm256_calc_rsq_ps(dx31,dy31,dz31);
844             rsq32            = gmx_mm256_calc_rsq_ps(dx32,dy32,dz32);
845             rsq33            = gmx_mm256_calc_rsq_ps(dx33,dy33,dz33);
846
847             rinv11           = gmx_mm256_invsqrt_ps(rsq11);
848             rinv12           = gmx_mm256_invsqrt_ps(rsq12);
849             rinv13           = gmx_mm256_invsqrt_ps(rsq13);
850             rinv21           = gmx_mm256_invsqrt_ps(rsq21);
851             rinv22           = gmx_mm256_invsqrt_ps(rsq22);
852             rinv23           = gmx_mm256_invsqrt_ps(rsq23);
853             rinv31           = gmx_mm256_invsqrt_ps(rsq31);
854             rinv32           = gmx_mm256_invsqrt_ps(rsq32);
855             rinv33           = gmx_mm256_invsqrt_ps(rsq33);
856
857             rinvsq11         = _mm256_mul_ps(rinv11,rinv11);
858             rinvsq12         = _mm256_mul_ps(rinv12,rinv12);
859             rinvsq13         = _mm256_mul_ps(rinv13,rinv13);
860             rinvsq21         = _mm256_mul_ps(rinv21,rinv21);
861             rinvsq22         = _mm256_mul_ps(rinv22,rinv22);
862             rinvsq23         = _mm256_mul_ps(rinv23,rinv23);
863             rinvsq31         = _mm256_mul_ps(rinv31,rinv31);
864             rinvsq32         = _mm256_mul_ps(rinv32,rinv32);
865             rinvsq33         = _mm256_mul_ps(rinv33,rinv33);
866
867             fjx1             = _mm256_setzero_ps();
868             fjy1             = _mm256_setzero_ps();
869             fjz1             = _mm256_setzero_ps();
870             fjx2             = _mm256_setzero_ps();
871             fjy2             = _mm256_setzero_ps();
872             fjz2             = _mm256_setzero_ps();
873             fjx3             = _mm256_setzero_ps();
874             fjy3             = _mm256_setzero_ps();
875             fjz3             = _mm256_setzero_ps();
876
877             /**************************
878              * CALCULATE INTERACTIONS *
879              **************************/
880
881             if (gmx_mm256_any_lt(rsq11,rcutoff2))
882             {
883
884             r11              = _mm256_mul_ps(rsq11,rinv11);
885             r11              = _mm256_andnot_ps(dummy_mask,r11);
886
887             /* EWALD ELECTROSTATICS */
888             
889             /* Analytical PME correction */
890             zeta2            = _mm256_mul_ps(beta2,rsq11);
891             rinv3            = _mm256_mul_ps(rinvsq11,rinv11);
892             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
893             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
894             felec            = _mm256_mul_ps(qq11,felec);
895             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
896             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
897             velec            = _mm256_sub_ps(_mm256_sub_ps(rinv11,sh_ewald),pmecorrV);
898             velec            = _mm256_mul_ps(qq11,velec);
899             
900             cutoff_mask      = _mm256_cmp_ps(rsq11,rcutoff2,_CMP_LT_OQ);
901
902             /* Update potential sum for this i atom from the interaction with this j atom. */
903             velec            = _mm256_and_ps(velec,cutoff_mask);
904             velec            = _mm256_andnot_ps(dummy_mask,velec);
905             velecsum         = _mm256_add_ps(velecsum,velec);
906
907             fscal            = felec;
908
909             fscal            = _mm256_and_ps(fscal,cutoff_mask);
910
911             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
912
913             /* Calculate temporary vectorial force */
914             tx               = _mm256_mul_ps(fscal,dx11);
915             ty               = _mm256_mul_ps(fscal,dy11);
916             tz               = _mm256_mul_ps(fscal,dz11);
917
918             /* Update vectorial force */
919             fix1             = _mm256_add_ps(fix1,tx);
920             fiy1             = _mm256_add_ps(fiy1,ty);
921             fiz1             = _mm256_add_ps(fiz1,tz);
922
923             fjx1             = _mm256_add_ps(fjx1,tx);
924             fjy1             = _mm256_add_ps(fjy1,ty);
925             fjz1             = _mm256_add_ps(fjz1,tz);
926
927             }
928
929             /**************************
930              * CALCULATE INTERACTIONS *
931              **************************/
932
933             if (gmx_mm256_any_lt(rsq12,rcutoff2))
934             {
935
936             r12              = _mm256_mul_ps(rsq12,rinv12);
937             r12              = _mm256_andnot_ps(dummy_mask,r12);
938
939             /* EWALD ELECTROSTATICS */
940             
941             /* Analytical PME correction */
942             zeta2            = _mm256_mul_ps(beta2,rsq12);
943             rinv3            = _mm256_mul_ps(rinvsq12,rinv12);
944             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
945             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
946             felec            = _mm256_mul_ps(qq12,felec);
947             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
948             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
949             velec            = _mm256_sub_ps(_mm256_sub_ps(rinv12,sh_ewald),pmecorrV);
950             velec            = _mm256_mul_ps(qq12,velec);
951             
952             cutoff_mask      = _mm256_cmp_ps(rsq12,rcutoff2,_CMP_LT_OQ);
953
954             /* Update potential sum for this i atom from the interaction with this j atom. */
955             velec            = _mm256_and_ps(velec,cutoff_mask);
956             velec            = _mm256_andnot_ps(dummy_mask,velec);
957             velecsum         = _mm256_add_ps(velecsum,velec);
958
959             fscal            = felec;
960
961             fscal            = _mm256_and_ps(fscal,cutoff_mask);
962
963             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
964
965             /* Calculate temporary vectorial force */
966             tx               = _mm256_mul_ps(fscal,dx12);
967             ty               = _mm256_mul_ps(fscal,dy12);
968             tz               = _mm256_mul_ps(fscal,dz12);
969
970             /* Update vectorial force */
971             fix1             = _mm256_add_ps(fix1,tx);
972             fiy1             = _mm256_add_ps(fiy1,ty);
973             fiz1             = _mm256_add_ps(fiz1,tz);
974
975             fjx2             = _mm256_add_ps(fjx2,tx);
976             fjy2             = _mm256_add_ps(fjy2,ty);
977             fjz2             = _mm256_add_ps(fjz2,tz);
978
979             }
980
981             /**************************
982              * CALCULATE INTERACTIONS *
983              **************************/
984
985             if (gmx_mm256_any_lt(rsq13,rcutoff2))
986             {
987
988             r13              = _mm256_mul_ps(rsq13,rinv13);
989             r13              = _mm256_andnot_ps(dummy_mask,r13);
990
991             /* EWALD ELECTROSTATICS */
992             
993             /* Analytical PME correction */
994             zeta2            = _mm256_mul_ps(beta2,rsq13);
995             rinv3            = _mm256_mul_ps(rinvsq13,rinv13);
996             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
997             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
998             felec            = _mm256_mul_ps(qq13,felec);
999             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
1000             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
1001             velec            = _mm256_sub_ps(_mm256_sub_ps(rinv13,sh_ewald),pmecorrV);
1002             velec            = _mm256_mul_ps(qq13,velec);
1003             
1004             cutoff_mask      = _mm256_cmp_ps(rsq13,rcutoff2,_CMP_LT_OQ);
1005
1006             /* Update potential sum for this i atom from the interaction with this j atom. */
1007             velec            = _mm256_and_ps(velec,cutoff_mask);
1008             velec            = _mm256_andnot_ps(dummy_mask,velec);
1009             velecsum         = _mm256_add_ps(velecsum,velec);
1010
1011             fscal            = felec;
1012
1013             fscal            = _mm256_and_ps(fscal,cutoff_mask);
1014
1015             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
1016
1017             /* Calculate temporary vectorial force */
1018             tx               = _mm256_mul_ps(fscal,dx13);
1019             ty               = _mm256_mul_ps(fscal,dy13);
1020             tz               = _mm256_mul_ps(fscal,dz13);
1021
1022             /* Update vectorial force */
1023             fix1             = _mm256_add_ps(fix1,tx);
1024             fiy1             = _mm256_add_ps(fiy1,ty);
1025             fiz1             = _mm256_add_ps(fiz1,tz);
1026
1027             fjx3             = _mm256_add_ps(fjx3,tx);
1028             fjy3             = _mm256_add_ps(fjy3,ty);
1029             fjz3             = _mm256_add_ps(fjz3,tz);
1030
1031             }
1032
1033             /**************************
1034              * CALCULATE INTERACTIONS *
1035              **************************/
1036
1037             if (gmx_mm256_any_lt(rsq21,rcutoff2))
1038             {
1039
1040             r21              = _mm256_mul_ps(rsq21,rinv21);
1041             r21              = _mm256_andnot_ps(dummy_mask,r21);
1042
1043             /* EWALD ELECTROSTATICS */
1044             
1045             /* Analytical PME correction */
1046             zeta2            = _mm256_mul_ps(beta2,rsq21);
1047             rinv3            = _mm256_mul_ps(rinvsq21,rinv21);
1048             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
1049             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1050             felec            = _mm256_mul_ps(qq21,felec);
1051             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
1052             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
1053             velec            = _mm256_sub_ps(_mm256_sub_ps(rinv21,sh_ewald),pmecorrV);
1054             velec            = _mm256_mul_ps(qq21,velec);
1055             
1056             cutoff_mask      = _mm256_cmp_ps(rsq21,rcutoff2,_CMP_LT_OQ);
1057
1058             /* Update potential sum for this i atom from the interaction with this j atom. */
1059             velec            = _mm256_and_ps(velec,cutoff_mask);
1060             velec            = _mm256_andnot_ps(dummy_mask,velec);
1061             velecsum         = _mm256_add_ps(velecsum,velec);
1062
1063             fscal            = felec;
1064
1065             fscal            = _mm256_and_ps(fscal,cutoff_mask);
1066
1067             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
1068
1069             /* Calculate temporary vectorial force */
1070             tx               = _mm256_mul_ps(fscal,dx21);
1071             ty               = _mm256_mul_ps(fscal,dy21);
1072             tz               = _mm256_mul_ps(fscal,dz21);
1073
1074             /* Update vectorial force */
1075             fix2             = _mm256_add_ps(fix2,tx);
1076             fiy2             = _mm256_add_ps(fiy2,ty);
1077             fiz2             = _mm256_add_ps(fiz2,tz);
1078
1079             fjx1             = _mm256_add_ps(fjx1,tx);
1080             fjy1             = _mm256_add_ps(fjy1,ty);
1081             fjz1             = _mm256_add_ps(fjz1,tz);
1082
1083             }
1084
1085             /**************************
1086              * CALCULATE INTERACTIONS *
1087              **************************/
1088
1089             if (gmx_mm256_any_lt(rsq22,rcutoff2))
1090             {
1091
1092             r22              = _mm256_mul_ps(rsq22,rinv22);
1093             r22              = _mm256_andnot_ps(dummy_mask,r22);
1094
1095             /* EWALD ELECTROSTATICS */
1096             
1097             /* Analytical PME correction */
1098             zeta2            = _mm256_mul_ps(beta2,rsq22);
1099             rinv3            = _mm256_mul_ps(rinvsq22,rinv22);
1100             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
1101             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1102             felec            = _mm256_mul_ps(qq22,felec);
1103             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
1104             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
1105             velec            = _mm256_sub_ps(_mm256_sub_ps(rinv22,sh_ewald),pmecorrV);
1106             velec            = _mm256_mul_ps(qq22,velec);
1107             
1108             cutoff_mask      = _mm256_cmp_ps(rsq22,rcutoff2,_CMP_LT_OQ);
1109
1110             /* Update potential sum for this i atom from the interaction with this j atom. */
1111             velec            = _mm256_and_ps(velec,cutoff_mask);
1112             velec            = _mm256_andnot_ps(dummy_mask,velec);
1113             velecsum         = _mm256_add_ps(velecsum,velec);
1114
1115             fscal            = felec;
1116
1117             fscal            = _mm256_and_ps(fscal,cutoff_mask);
1118
1119             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
1120
1121             /* Calculate temporary vectorial force */
1122             tx               = _mm256_mul_ps(fscal,dx22);
1123             ty               = _mm256_mul_ps(fscal,dy22);
1124             tz               = _mm256_mul_ps(fscal,dz22);
1125
1126             /* Update vectorial force */
1127             fix2             = _mm256_add_ps(fix2,tx);
1128             fiy2             = _mm256_add_ps(fiy2,ty);
1129             fiz2             = _mm256_add_ps(fiz2,tz);
1130
1131             fjx2             = _mm256_add_ps(fjx2,tx);
1132             fjy2             = _mm256_add_ps(fjy2,ty);
1133             fjz2             = _mm256_add_ps(fjz2,tz);
1134
1135             }
1136
1137             /**************************
1138              * CALCULATE INTERACTIONS *
1139              **************************/
1140
1141             if (gmx_mm256_any_lt(rsq23,rcutoff2))
1142             {
1143
1144             r23              = _mm256_mul_ps(rsq23,rinv23);
1145             r23              = _mm256_andnot_ps(dummy_mask,r23);
1146
1147             /* EWALD ELECTROSTATICS */
1148             
1149             /* Analytical PME correction */
1150             zeta2            = _mm256_mul_ps(beta2,rsq23);
1151             rinv3            = _mm256_mul_ps(rinvsq23,rinv23);
1152             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
1153             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1154             felec            = _mm256_mul_ps(qq23,felec);
1155             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
1156             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
1157             velec            = _mm256_sub_ps(_mm256_sub_ps(rinv23,sh_ewald),pmecorrV);
1158             velec            = _mm256_mul_ps(qq23,velec);
1159             
1160             cutoff_mask      = _mm256_cmp_ps(rsq23,rcutoff2,_CMP_LT_OQ);
1161
1162             /* Update potential sum for this i atom from the interaction with this j atom. */
1163             velec            = _mm256_and_ps(velec,cutoff_mask);
1164             velec            = _mm256_andnot_ps(dummy_mask,velec);
1165             velecsum         = _mm256_add_ps(velecsum,velec);
1166
1167             fscal            = felec;
1168
1169             fscal            = _mm256_and_ps(fscal,cutoff_mask);
1170
1171             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
1172
1173             /* Calculate temporary vectorial force */
1174             tx               = _mm256_mul_ps(fscal,dx23);
1175             ty               = _mm256_mul_ps(fscal,dy23);
1176             tz               = _mm256_mul_ps(fscal,dz23);
1177
1178             /* Update vectorial force */
1179             fix2             = _mm256_add_ps(fix2,tx);
1180             fiy2             = _mm256_add_ps(fiy2,ty);
1181             fiz2             = _mm256_add_ps(fiz2,tz);
1182
1183             fjx3             = _mm256_add_ps(fjx3,tx);
1184             fjy3             = _mm256_add_ps(fjy3,ty);
1185             fjz3             = _mm256_add_ps(fjz3,tz);
1186
1187             }
1188
1189             /**************************
1190              * CALCULATE INTERACTIONS *
1191              **************************/
1192
1193             if (gmx_mm256_any_lt(rsq31,rcutoff2))
1194             {
1195
1196             r31              = _mm256_mul_ps(rsq31,rinv31);
1197             r31              = _mm256_andnot_ps(dummy_mask,r31);
1198
1199             /* EWALD ELECTROSTATICS */
1200             
1201             /* Analytical PME correction */
1202             zeta2            = _mm256_mul_ps(beta2,rsq31);
1203             rinv3            = _mm256_mul_ps(rinvsq31,rinv31);
1204             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
1205             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1206             felec            = _mm256_mul_ps(qq31,felec);
1207             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
1208             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
1209             velec            = _mm256_sub_ps(_mm256_sub_ps(rinv31,sh_ewald),pmecorrV);
1210             velec            = _mm256_mul_ps(qq31,velec);
1211             
1212             cutoff_mask      = _mm256_cmp_ps(rsq31,rcutoff2,_CMP_LT_OQ);
1213
1214             /* Update potential sum for this i atom from the interaction with this j atom. */
1215             velec            = _mm256_and_ps(velec,cutoff_mask);
1216             velec            = _mm256_andnot_ps(dummy_mask,velec);
1217             velecsum         = _mm256_add_ps(velecsum,velec);
1218
1219             fscal            = felec;
1220
1221             fscal            = _mm256_and_ps(fscal,cutoff_mask);
1222
1223             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
1224
1225             /* Calculate temporary vectorial force */
1226             tx               = _mm256_mul_ps(fscal,dx31);
1227             ty               = _mm256_mul_ps(fscal,dy31);
1228             tz               = _mm256_mul_ps(fscal,dz31);
1229
1230             /* Update vectorial force */
1231             fix3             = _mm256_add_ps(fix3,tx);
1232             fiy3             = _mm256_add_ps(fiy3,ty);
1233             fiz3             = _mm256_add_ps(fiz3,tz);
1234
1235             fjx1             = _mm256_add_ps(fjx1,tx);
1236             fjy1             = _mm256_add_ps(fjy1,ty);
1237             fjz1             = _mm256_add_ps(fjz1,tz);
1238
1239             }
1240
1241             /**************************
1242              * CALCULATE INTERACTIONS *
1243              **************************/
1244
1245             if (gmx_mm256_any_lt(rsq32,rcutoff2))
1246             {
1247
1248             r32              = _mm256_mul_ps(rsq32,rinv32);
1249             r32              = _mm256_andnot_ps(dummy_mask,r32);
1250
1251             /* EWALD ELECTROSTATICS */
1252             
1253             /* Analytical PME correction */
1254             zeta2            = _mm256_mul_ps(beta2,rsq32);
1255             rinv3            = _mm256_mul_ps(rinvsq32,rinv32);
1256             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
1257             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1258             felec            = _mm256_mul_ps(qq32,felec);
1259             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
1260             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
1261             velec            = _mm256_sub_ps(_mm256_sub_ps(rinv32,sh_ewald),pmecorrV);
1262             velec            = _mm256_mul_ps(qq32,velec);
1263             
1264             cutoff_mask      = _mm256_cmp_ps(rsq32,rcutoff2,_CMP_LT_OQ);
1265
1266             /* Update potential sum for this i atom from the interaction with this j atom. */
1267             velec            = _mm256_and_ps(velec,cutoff_mask);
1268             velec            = _mm256_andnot_ps(dummy_mask,velec);
1269             velecsum         = _mm256_add_ps(velecsum,velec);
1270
1271             fscal            = felec;
1272
1273             fscal            = _mm256_and_ps(fscal,cutoff_mask);
1274
1275             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
1276
1277             /* Calculate temporary vectorial force */
1278             tx               = _mm256_mul_ps(fscal,dx32);
1279             ty               = _mm256_mul_ps(fscal,dy32);
1280             tz               = _mm256_mul_ps(fscal,dz32);
1281
1282             /* Update vectorial force */
1283             fix3             = _mm256_add_ps(fix3,tx);
1284             fiy3             = _mm256_add_ps(fiy3,ty);
1285             fiz3             = _mm256_add_ps(fiz3,tz);
1286
1287             fjx2             = _mm256_add_ps(fjx2,tx);
1288             fjy2             = _mm256_add_ps(fjy2,ty);
1289             fjz2             = _mm256_add_ps(fjz2,tz);
1290
1291             }
1292
1293             /**************************
1294              * CALCULATE INTERACTIONS *
1295              **************************/
1296
1297             if (gmx_mm256_any_lt(rsq33,rcutoff2))
1298             {
1299
1300             r33              = _mm256_mul_ps(rsq33,rinv33);
1301             r33              = _mm256_andnot_ps(dummy_mask,r33);
1302
1303             /* EWALD ELECTROSTATICS */
1304             
1305             /* Analytical PME correction */
1306             zeta2            = _mm256_mul_ps(beta2,rsq33);
1307             rinv3            = _mm256_mul_ps(rinvsq33,rinv33);
1308             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
1309             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1310             felec            = _mm256_mul_ps(qq33,felec);
1311             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
1312             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
1313             velec            = _mm256_sub_ps(_mm256_sub_ps(rinv33,sh_ewald),pmecorrV);
1314             velec            = _mm256_mul_ps(qq33,velec);
1315             
1316             cutoff_mask      = _mm256_cmp_ps(rsq33,rcutoff2,_CMP_LT_OQ);
1317
1318             /* Update potential sum for this i atom from the interaction with this j atom. */
1319             velec            = _mm256_and_ps(velec,cutoff_mask);
1320             velec            = _mm256_andnot_ps(dummy_mask,velec);
1321             velecsum         = _mm256_add_ps(velecsum,velec);
1322
1323             fscal            = felec;
1324
1325             fscal            = _mm256_and_ps(fscal,cutoff_mask);
1326
1327             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
1328
1329             /* Calculate temporary vectorial force */
1330             tx               = _mm256_mul_ps(fscal,dx33);
1331             ty               = _mm256_mul_ps(fscal,dy33);
1332             tz               = _mm256_mul_ps(fscal,dz33);
1333
1334             /* Update vectorial force */
1335             fix3             = _mm256_add_ps(fix3,tx);
1336             fiy3             = _mm256_add_ps(fiy3,ty);
1337             fiz3             = _mm256_add_ps(fiz3,tz);
1338
1339             fjx3             = _mm256_add_ps(fjx3,tx);
1340             fjy3             = _mm256_add_ps(fjy3,ty);
1341             fjz3             = _mm256_add_ps(fjz3,tz);
1342
1343             }
1344
1345             fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
1346             fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
1347             fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
1348             fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
1349             fjptrE             = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
1350             fjptrF             = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
1351             fjptrG             = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
1352             fjptrH             = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
1353
1354             gmx_mm256_decrement_3rvec_8ptr_swizzle_ps(fjptrA+DIM,fjptrB+DIM,fjptrC+DIM,fjptrD+DIM,
1355                                                       fjptrE+DIM,fjptrF+DIM,fjptrG+DIM,fjptrH+DIM,
1356                                                       fjx1,fjy1,fjz1,fjx2,fjy2,fjz2,fjx3,fjy3,fjz3);
1357
1358             /* Inner loop uses 990 flops */
1359         }
1360
1361         /* End of innermost loop */
1362
1363         gmx_mm256_update_iforce_3atom_swizzle_ps(fix1,fiy1,fiz1,fix2,fiy2,fiz2,fix3,fiy3,fiz3,
1364                                                  f+i_coord_offset+DIM,fshift+i_shift_offset);
1365
1366         ggid                        = gid[iidx];
1367         /* Update potential energies */
1368         gmx_mm256_update_1pot_ps(velecsum,kernel_data->energygrp_elec+ggid);
1369
1370         /* Increment number of inner iterations */
1371         inneriter                  += j_index_end - j_index_start;
1372
1373         /* Outer loop uses 19 flops */
1374     }
1375
1376     /* Increment number of outer iterations */
1377     outeriter        += nri;
1378
1379     /* Update outer/inner flops */
1380
1381     inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_W4W4_VF,outeriter*19 + inneriter*990);
1382 }
1383 /*
1384  * Gromacs nonbonded kernel:   nb_kernel_ElecEwSh_VdwNone_GeomW4W4_F_avx_256_single
1385  * Electrostatics interaction: Ewald
1386  * VdW interaction:            None
1387  * Geometry:                   Water4-Water4
1388  * Calculate force/pot:        Force
1389  */
1390 void
1391 nb_kernel_ElecEwSh_VdwNone_GeomW4W4_F_avx_256_single
1392                     (t_nblist                    * gmx_restrict       nlist,
1393                      rvec                        * gmx_restrict          xx,
1394                      rvec                        * gmx_restrict          ff,
1395                      t_forcerec                  * gmx_restrict          fr,
1396                      t_mdatoms                   * gmx_restrict     mdatoms,
1397                      nb_kernel_data_t gmx_unused * gmx_restrict kernel_data,
1398                      t_nrnb                      * gmx_restrict        nrnb)
1399 {
1400     /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
1401      * just 0 for non-waters.
1402      * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
1403      * jnr indices corresponding to data put in the four positions in the SIMD register.
1404      */
1405     int              i_shift_offset,i_coord_offset,outeriter,inneriter;
1406     int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
1407     int              jnrA,jnrB,jnrC,jnrD;
1408     int              jnrE,jnrF,jnrG,jnrH;
1409     int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
1410     int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
1411     int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
1412     int              j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
1413     int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
1414     real             rcutoff_scalar;
1415     real             *shiftvec,*fshift,*x,*f;
1416     real             *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
1417     real             scratch[4*DIM];
1418     __m256           tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
1419     real *           vdwioffsetptr1;
1420     __m256           ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
1421     real *           vdwioffsetptr2;
1422     __m256           ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
1423     real *           vdwioffsetptr3;
1424     __m256           ix3,iy3,iz3,fix3,fiy3,fiz3,iq3,isai3;
1425     int              vdwjidx1A,vdwjidx1B,vdwjidx1C,vdwjidx1D,vdwjidx1E,vdwjidx1F,vdwjidx1G,vdwjidx1H;
1426     __m256           jx1,jy1,jz1,fjx1,fjy1,fjz1,jq1,isaj1;
1427     int              vdwjidx2A,vdwjidx2B,vdwjidx2C,vdwjidx2D,vdwjidx2E,vdwjidx2F,vdwjidx2G,vdwjidx2H;
1428     __m256           jx2,jy2,jz2,fjx2,fjy2,fjz2,jq2,isaj2;
1429     int              vdwjidx3A,vdwjidx3B,vdwjidx3C,vdwjidx3D,vdwjidx3E,vdwjidx3F,vdwjidx3G,vdwjidx3H;
1430     __m256           jx3,jy3,jz3,fjx3,fjy3,fjz3,jq3,isaj3;
1431     __m256           dx11,dy11,dz11,rsq11,rinv11,rinvsq11,r11,qq11,c6_11,c12_11;
1432     __m256           dx12,dy12,dz12,rsq12,rinv12,rinvsq12,r12,qq12,c6_12,c12_12;
1433     __m256           dx13,dy13,dz13,rsq13,rinv13,rinvsq13,r13,qq13,c6_13,c12_13;
1434     __m256           dx21,dy21,dz21,rsq21,rinv21,rinvsq21,r21,qq21,c6_21,c12_21;
1435     __m256           dx22,dy22,dz22,rsq22,rinv22,rinvsq22,r22,qq22,c6_22,c12_22;
1436     __m256           dx23,dy23,dz23,rsq23,rinv23,rinvsq23,r23,qq23,c6_23,c12_23;
1437     __m256           dx31,dy31,dz31,rsq31,rinv31,rinvsq31,r31,qq31,c6_31,c12_31;
1438     __m256           dx32,dy32,dz32,rsq32,rinv32,rinvsq32,r32,qq32,c6_32,c12_32;
1439     __m256           dx33,dy33,dz33,rsq33,rinv33,rinvsq33,r33,qq33,c6_33,c12_33;
1440     __m256           velec,felec,velecsum,facel,crf,krf,krf2;
1441     real             *charge;
1442     __m256i          ewitab;
1443     __m128i          ewitab_lo,ewitab_hi;
1444     __m256           ewtabscale,eweps,sh_ewald,ewrt,ewtabhalfspace,ewtabF,ewtabFn,ewtabD,ewtabV;
1445     __m256           beta,beta2,beta3,zeta2,pmecorrF,pmecorrV,rinv3;
1446     real             *ewtab;
1447     __m256           dummy_mask,cutoff_mask;
1448     __m256           signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
1449     __m256           one     = _mm256_set1_ps(1.0);
1450     __m256           two     = _mm256_set1_ps(2.0);
1451     x                = xx[0];
1452     f                = ff[0];
1453
1454     nri              = nlist->nri;
1455     iinr             = nlist->iinr;
1456     jindex           = nlist->jindex;
1457     jjnr             = nlist->jjnr;
1458     shiftidx         = nlist->shift;
1459     gid              = nlist->gid;
1460     shiftvec         = fr->shift_vec[0];
1461     fshift           = fr->fshift[0];
1462     facel            = _mm256_set1_ps(fr->epsfac);
1463     charge           = mdatoms->chargeA;
1464
1465     sh_ewald         = _mm256_set1_ps(fr->ic->sh_ewald);
1466     beta             = _mm256_set1_ps(fr->ic->ewaldcoeff_q);
1467     beta2            = _mm256_mul_ps(beta,beta);
1468     beta3            = _mm256_mul_ps(beta,beta2);
1469
1470     ewtab            = fr->ic->tabq_coul_F;
1471     ewtabscale       = _mm256_set1_ps(fr->ic->tabq_scale);
1472     ewtabhalfspace   = _mm256_set1_ps(0.5/fr->ic->tabq_scale);
1473
1474     /* Setup water-specific parameters */
1475     inr              = nlist->iinr[0];
1476     iq1              = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+1]));
1477     iq2              = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+2]));
1478     iq3              = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+3]));
1479
1480     jq1              = _mm256_set1_ps(charge[inr+1]);
1481     jq2              = _mm256_set1_ps(charge[inr+2]);
1482     jq3              = _mm256_set1_ps(charge[inr+3]);
1483     qq11             = _mm256_mul_ps(iq1,jq1);
1484     qq12             = _mm256_mul_ps(iq1,jq2);
1485     qq13             = _mm256_mul_ps(iq1,jq3);
1486     qq21             = _mm256_mul_ps(iq2,jq1);
1487     qq22             = _mm256_mul_ps(iq2,jq2);
1488     qq23             = _mm256_mul_ps(iq2,jq3);
1489     qq31             = _mm256_mul_ps(iq3,jq1);
1490     qq32             = _mm256_mul_ps(iq3,jq2);
1491     qq33             = _mm256_mul_ps(iq3,jq3);
1492
1493     /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
1494     rcutoff_scalar   = fr->rcoulomb;
1495     rcutoff          = _mm256_set1_ps(rcutoff_scalar);
1496     rcutoff2         = _mm256_mul_ps(rcutoff,rcutoff);
1497
1498     /* Avoid stupid compiler warnings */
1499     jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
1500     j_coord_offsetA = 0;
1501     j_coord_offsetB = 0;
1502     j_coord_offsetC = 0;
1503     j_coord_offsetD = 0;
1504     j_coord_offsetE = 0;
1505     j_coord_offsetF = 0;
1506     j_coord_offsetG = 0;
1507     j_coord_offsetH = 0;
1508
1509     outeriter        = 0;
1510     inneriter        = 0;
1511
1512     for(iidx=0;iidx<4*DIM;iidx++)
1513     {
1514         scratch[iidx] = 0.0;
1515     }
1516
1517     /* Start outer loop over neighborlists */
1518     for(iidx=0; iidx<nri; iidx++)
1519     {
1520         /* Load shift vector for this list */
1521         i_shift_offset   = DIM*shiftidx[iidx];
1522
1523         /* Load limits for loop over neighbors */
1524         j_index_start    = jindex[iidx];
1525         j_index_end      = jindex[iidx+1];
1526
1527         /* Get outer coordinate index */
1528         inr              = iinr[iidx];
1529         i_coord_offset   = DIM*inr;
1530
1531         /* Load i particle coords and add shift vector */
1532         gmx_mm256_load_shift_and_3rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset+DIM,
1533                                                     &ix1,&iy1,&iz1,&ix2,&iy2,&iz2,&ix3,&iy3,&iz3);
1534
1535         fix1             = _mm256_setzero_ps();
1536         fiy1             = _mm256_setzero_ps();
1537         fiz1             = _mm256_setzero_ps();
1538         fix2             = _mm256_setzero_ps();
1539         fiy2             = _mm256_setzero_ps();
1540         fiz2             = _mm256_setzero_ps();
1541         fix3             = _mm256_setzero_ps();
1542         fiy3             = _mm256_setzero_ps();
1543         fiz3             = _mm256_setzero_ps();
1544
1545         /* Start inner kernel loop */
1546         for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
1547         {
1548
1549             /* Get j neighbor index, and coordinate index */
1550             jnrA             = jjnr[jidx];
1551             jnrB             = jjnr[jidx+1];
1552             jnrC             = jjnr[jidx+2];
1553             jnrD             = jjnr[jidx+3];
1554             jnrE             = jjnr[jidx+4];
1555             jnrF             = jjnr[jidx+5];
1556             jnrG             = jjnr[jidx+6];
1557             jnrH             = jjnr[jidx+7];
1558             j_coord_offsetA  = DIM*jnrA;
1559             j_coord_offsetB  = DIM*jnrB;
1560             j_coord_offsetC  = DIM*jnrC;
1561             j_coord_offsetD  = DIM*jnrD;
1562             j_coord_offsetE  = DIM*jnrE;
1563             j_coord_offsetF  = DIM*jnrF;
1564             j_coord_offsetG  = DIM*jnrG;
1565             j_coord_offsetH  = DIM*jnrH;
1566
1567             /* load j atom coordinates */
1568             gmx_mm256_load_3rvec_8ptr_swizzle_ps(x+j_coord_offsetA+DIM,x+j_coord_offsetB+DIM,
1569                                                  x+j_coord_offsetC+DIM,x+j_coord_offsetD+DIM,
1570                                                  x+j_coord_offsetE+DIM,x+j_coord_offsetF+DIM,
1571                                                  x+j_coord_offsetG+DIM,x+j_coord_offsetH+DIM,
1572                                                  &jx1,&jy1,&jz1,&jx2,&jy2,&jz2,&jx3,&jy3,&jz3);
1573
1574             /* Calculate displacement vector */
1575             dx11             = _mm256_sub_ps(ix1,jx1);
1576             dy11             = _mm256_sub_ps(iy1,jy1);
1577             dz11             = _mm256_sub_ps(iz1,jz1);
1578             dx12             = _mm256_sub_ps(ix1,jx2);
1579             dy12             = _mm256_sub_ps(iy1,jy2);
1580             dz12             = _mm256_sub_ps(iz1,jz2);
1581             dx13             = _mm256_sub_ps(ix1,jx3);
1582             dy13             = _mm256_sub_ps(iy1,jy3);
1583             dz13             = _mm256_sub_ps(iz1,jz3);
1584             dx21             = _mm256_sub_ps(ix2,jx1);
1585             dy21             = _mm256_sub_ps(iy2,jy1);
1586             dz21             = _mm256_sub_ps(iz2,jz1);
1587             dx22             = _mm256_sub_ps(ix2,jx2);
1588             dy22             = _mm256_sub_ps(iy2,jy2);
1589             dz22             = _mm256_sub_ps(iz2,jz2);
1590             dx23             = _mm256_sub_ps(ix2,jx3);
1591             dy23             = _mm256_sub_ps(iy2,jy3);
1592             dz23             = _mm256_sub_ps(iz2,jz3);
1593             dx31             = _mm256_sub_ps(ix3,jx1);
1594             dy31             = _mm256_sub_ps(iy3,jy1);
1595             dz31             = _mm256_sub_ps(iz3,jz1);
1596             dx32             = _mm256_sub_ps(ix3,jx2);
1597             dy32             = _mm256_sub_ps(iy3,jy2);
1598             dz32             = _mm256_sub_ps(iz3,jz2);
1599             dx33             = _mm256_sub_ps(ix3,jx3);
1600             dy33             = _mm256_sub_ps(iy3,jy3);
1601             dz33             = _mm256_sub_ps(iz3,jz3);
1602
1603             /* Calculate squared distance and things based on it */
1604             rsq11            = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
1605             rsq12            = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
1606             rsq13            = gmx_mm256_calc_rsq_ps(dx13,dy13,dz13);
1607             rsq21            = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
1608             rsq22            = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
1609             rsq23            = gmx_mm256_calc_rsq_ps(dx23,dy23,dz23);
1610             rsq31            = gmx_mm256_calc_rsq_ps(dx31,dy31,dz31);
1611             rsq32            = gmx_mm256_calc_rsq_ps(dx32,dy32,dz32);
1612             rsq33            = gmx_mm256_calc_rsq_ps(dx33,dy33,dz33);
1613
1614             rinv11           = gmx_mm256_invsqrt_ps(rsq11);
1615             rinv12           = gmx_mm256_invsqrt_ps(rsq12);
1616             rinv13           = gmx_mm256_invsqrt_ps(rsq13);
1617             rinv21           = gmx_mm256_invsqrt_ps(rsq21);
1618             rinv22           = gmx_mm256_invsqrt_ps(rsq22);
1619             rinv23           = gmx_mm256_invsqrt_ps(rsq23);
1620             rinv31           = gmx_mm256_invsqrt_ps(rsq31);
1621             rinv32           = gmx_mm256_invsqrt_ps(rsq32);
1622             rinv33           = gmx_mm256_invsqrt_ps(rsq33);
1623
1624             rinvsq11         = _mm256_mul_ps(rinv11,rinv11);
1625             rinvsq12         = _mm256_mul_ps(rinv12,rinv12);
1626             rinvsq13         = _mm256_mul_ps(rinv13,rinv13);
1627             rinvsq21         = _mm256_mul_ps(rinv21,rinv21);
1628             rinvsq22         = _mm256_mul_ps(rinv22,rinv22);
1629             rinvsq23         = _mm256_mul_ps(rinv23,rinv23);
1630             rinvsq31         = _mm256_mul_ps(rinv31,rinv31);
1631             rinvsq32         = _mm256_mul_ps(rinv32,rinv32);
1632             rinvsq33         = _mm256_mul_ps(rinv33,rinv33);
1633
1634             fjx1             = _mm256_setzero_ps();
1635             fjy1             = _mm256_setzero_ps();
1636             fjz1             = _mm256_setzero_ps();
1637             fjx2             = _mm256_setzero_ps();
1638             fjy2             = _mm256_setzero_ps();
1639             fjz2             = _mm256_setzero_ps();
1640             fjx3             = _mm256_setzero_ps();
1641             fjy3             = _mm256_setzero_ps();
1642             fjz3             = _mm256_setzero_ps();
1643
1644             /**************************
1645              * CALCULATE INTERACTIONS *
1646              **************************/
1647
1648             if (gmx_mm256_any_lt(rsq11,rcutoff2))
1649             {
1650
1651             r11              = _mm256_mul_ps(rsq11,rinv11);
1652
1653             /* EWALD ELECTROSTATICS */
1654             
1655             /* Analytical PME correction */
1656             zeta2            = _mm256_mul_ps(beta2,rsq11);
1657             rinv3            = _mm256_mul_ps(rinvsq11,rinv11);
1658             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
1659             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1660             felec            = _mm256_mul_ps(qq11,felec);
1661             
1662             cutoff_mask      = _mm256_cmp_ps(rsq11,rcutoff2,_CMP_LT_OQ);
1663
1664             fscal            = felec;
1665
1666             fscal            = _mm256_and_ps(fscal,cutoff_mask);
1667
1668             /* Calculate temporary vectorial force */
1669             tx               = _mm256_mul_ps(fscal,dx11);
1670             ty               = _mm256_mul_ps(fscal,dy11);
1671             tz               = _mm256_mul_ps(fscal,dz11);
1672
1673             /* Update vectorial force */
1674             fix1             = _mm256_add_ps(fix1,tx);
1675             fiy1             = _mm256_add_ps(fiy1,ty);
1676             fiz1             = _mm256_add_ps(fiz1,tz);
1677
1678             fjx1             = _mm256_add_ps(fjx1,tx);
1679             fjy1             = _mm256_add_ps(fjy1,ty);
1680             fjz1             = _mm256_add_ps(fjz1,tz);
1681
1682             }
1683
1684             /**************************
1685              * CALCULATE INTERACTIONS *
1686              **************************/
1687
1688             if (gmx_mm256_any_lt(rsq12,rcutoff2))
1689             {
1690
1691             r12              = _mm256_mul_ps(rsq12,rinv12);
1692
1693             /* EWALD ELECTROSTATICS */
1694             
1695             /* Analytical PME correction */
1696             zeta2            = _mm256_mul_ps(beta2,rsq12);
1697             rinv3            = _mm256_mul_ps(rinvsq12,rinv12);
1698             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
1699             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1700             felec            = _mm256_mul_ps(qq12,felec);
1701             
1702             cutoff_mask      = _mm256_cmp_ps(rsq12,rcutoff2,_CMP_LT_OQ);
1703
1704             fscal            = felec;
1705
1706             fscal            = _mm256_and_ps(fscal,cutoff_mask);
1707
1708             /* Calculate temporary vectorial force */
1709             tx               = _mm256_mul_ps(fscal,dx12);
1710             ty               = _mm256_mul_ps(fscal,dy12);
1711             tz               = _mm256_mul_ps(fscal,dz12);
1712
1713             /* Update vectorial force */
1714             fix1             = _mm256_add_ps(fix1,tx);
1715             fiy1             = _mm256_add_ps(fiy1,ty);
1716             fiz1             = _mm256_add_ps(fiz1,tz);
1717
1718             fjx2             = _mm256_add_ps(fjx2,tx);
1719             fjy2             = _mm256_add_ps(fjy2,ty);
1720             fjz2             = _mm256_add_ps(fjz2,tz);
1721
1722             }
1723
1724             /**************************
1725              * CALCULATE INTERACTIONS *
1726              **************************/
1727
1728             if (gmx_mm256_any_lt(rsq13,rcutoff2))
1729             {
1730
1731             r13              = _mm256_mul_ps(rsq13,rinv13);
1732
1733             /* EWALD ELECTROSTATICS */
1734             
1735             /* Analytical PME correction */
1736             zeta2            = _mm256_mul_ps(beta2,rsq13);
1737             rinv3            = _mm256_mul_ps(rinvsq13,rinv13);
1738             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
1739             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1740             felec            = _mm256_mul_ps(qq13,felec);
1741             
1742             cutoff_mask      = _mm256_cmp_ps(rsq13,rcutoff2,_CMP_LT_OQ);
1743
1744             fscal            = felec;
1745
1746             fscal            = _mm256_and_ps(fscal,cutoff_mask);
1747
1748             /* Calculate temporary vectorial force */
1749             tx               = _mm256_mul_ps(fscal,dx13);
1750             ty               = _mm256_mul_ps(fscal,dy13);
1751             tz               = _mm256_mul_ps(fscal,dz13);
1752
1753             /* Update vectorial force */
1754             fix1             = _mm256_add_ps(fix1,tx);
1755             fiy1             = _mm256_add_ps(fiy1,ty);
1756             fiz1             = _mm256_add_ps(fiz1,tz);
1757
1758             fjx3             = _mm256_add_ps(fjx3,tx);
1759             fjy3             = _mm256_add_ps(fjy3,ty);
1760             fjz3             = _mm256_add_ps(fjz3,tz);
1761
1762             }
1763
1764             /**************************
1765              * CALCULATE INTERACTIONS *
1766              **************************/
1767
1768             if (gmx_mm256_any_lt(rsq21,rcutoff2))
1769             {
1770
1771             r21              = _mm256_mul_ps(rsq21,rinv21);
1772
1773             /* EWALD ELECTROSTATICS */
1774             
1775             /* Analytical PME correction */
1776             zeta2            = _mm256_mul_ps(beta2,rsq21);
1777             rinv3            = _mm256_mul_ps(rinvsq21,rinv21);
1778             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
1779             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1780             felec            = _mm256_mul_ps(qq21,felec);
1781             
1782             cutoff_mask      = _mm256_cmp_ps(rsq21,rcutoff2,_CMP_LT_OQ);
1783
1784             fscal            = felec;
1785
1786             fscal            = _mm256_and_ps(fscal,cutoff_mask);
1787
1788             /* Calculate temporary vectorial force */
1789             tx               = _mm256_mul_ps(fscal,dx21);
1790             ty               = _mm256_mul_ps(fscal,dy21);
1791             tz               = _mm256_mul_ps(fscal,dz21);
1792
1793             /* Update vectorial force */
1794             fix2             = _mm256_add_ps(fix2,tx);
1795             fiy2             = _mm256_add_ps(fiy2,ty);
1796             fiz2             = _mm256_add_ps(fiz2,tz);
1797
1798             fjx1             = _mm256_add_ps(fjx1,tx);
1799             fjy1             = _mm256_add_ps(fjy1,ty);
1800             fjz1             = _mm256_add_ps(fjz1,tz);
1801
1802             }
1803
1804             /**************************
1805              * CALCULATE INTERACTIONS *
1806              **************************/
1807
1808             if (gmx_mm256_any_lt(rsq22,rcutoff2))
1809             {
1810
1811             r22              = _mm256_mul_ps(rsq22,rinv22);
1812
1813             /* EWALD ELECTROSTATICS */
1814             
1815             /* Analytical PME correction */
1816             zeta2            = _mm256_mul_ps(beta2,rsq22);
1817             rinv3            = _mm256_mul_ps(rinvsq22,rinv22);
1818             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
1819             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1820             felec            = _mm256_mul_ps(qq22,felec);
1821             
1822             cutoff_mask      = _mm256_cmp_ps(rsq22,rcutoff2,_CMP_LT_OQ);
1823
1824             fscal            = felec;
1825
1826             fscal            = _mm256_and_ps(fscal,cutoff_mask);
1827
1828             /* Calculate temporary vectorial force */
1829             tx               = _mm256_mul_ps(fscal,dx22);
1830             ty               = _mm256_mul_ps(fscal,dy22);
1831             tz               = _mm256_mul_ps(fscal,dz22);
1832
1833             /* Update vectorial force */
1834             fix2             = _mm256_add_ps(fix2,tx);
1835             fiy2             = _mm256_add_ps(fiy2,ty);
1836             fiz2             = _mm256_add_ps(fiz2,tz);
1837
1838             fjx2             = _mm256_add_ps(fjx2,tx);
1839             fjy2             = _mm256_add_ps(fjy2,ty);
1840             fjz2             = _mm256_add_ps(fjz2,tz);
1841
1842             }
1843
1844             /**************************
1845              * CALCULATE INTERACTIONS *
1846              **************************/
1847
1848             if (gmx_mm256_any_lt(rsq23,rcutoff2))
1849             {
1850
1851             r23              = _mm256_mul_ps(rsq23,rinv23);
1852
1853             /* EWALD ELECTROSTATICS */
1854             
1855             /* Analytical PME correction */
1856             zeta2            = _mm256_mul_ps(beta2,rsq23);
1857             rinv3            = _mm256_mul_ps(rinvsq23,rinv23);
1858             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
1859             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1860             felec            = _mm256_mul_ps(qq23,felec);
1861             
1862             cutoff_mask      = _mm256_cmp_ps(rsq23,rcutoff2,_CMP_LT_OQ);
1863
1864             fscal            = felec;
1865
1866             fscal            = _mm256_and_ps(fscal,cutoff_mask);
1867
1868             /* Calculate temporary vectorial force */
1869             tx               = _mm256_mul_ps(fscal,dx23);
1870             ty               = _mm256_mul_ps(fscal,dy23);
1871             tz               = _mm256_mul_ps(fscal,dz23);
1872
1873             /* Update vectorial force */
1874             fix2             = _mm256_add_ps(fix2,tx);
1875             fiy2             = _mm256_add_ps(fiy2,ty);
1876             fiz2             = _mm256_add_ps(fiz2,tz);
1877
1878             fjx3             = _mm256_add_ps(fjx3,tx);
1879             fjy3             = _mm256_add_ps(fjy3,ty);
1880             fjz3             = _mm256_add_ps(fjz3,tz);
1881
1882             }
1883
1884             /**************************
1885              * CALCULATE INTERACTIONS *
1886              **************************/
1887
1888             if (gmx_mm256_any_lt(rsq31,rcutoff2))
1889             {
1890
1891             r31              = _mm256_mul_ps(rsq31,rinv31);
1892
1893             /* EWALD ELECTROSTATICS */
1894             
1895             /* Analytical PME correction */
1896             zeta2            = _mm256_mul_ps(beta2,rsq31);
1897             rinv3            = _mm256_mul_ps(rinvsq31,rinv31);
1898             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
1899             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1900             felec            = _mm256_mul_ps(qq31,felec);
1901             
1902             cutoff_mask      = _mm256_cmp_ps(rsq31,rcutoff2,_CMP_LT_OQ);
1903
1904             fscal            = felec;
1905
1906             fscal            = _mm256_and_ps(fscal,cutoff_mask);
1907
1908             /* Calculate temporary vectorial force */
1909             tx               = _mm256_mul_ps(fscal,dx31);
1910             ty               = _mm256_mul_ps(fscal,dy31);
1911             tz               = _mm256_mul_ps(fscal,dz31);
1912
1913             /* Update vectorial force */
1914             fix3             = _mm256_add_ps(fix3,tx);
1915             fiy3             = _mm256_add_ps(fiy3,ty);
1916             fiz3             = _mm256_add_ps(fiz3,tz);
1917
1918             fjx1             = _mm256_add_ps(fjx1,tx);
1919             fjy1             = _mm256_add_ps(fjy1,ty);
1920             fjz1             = _mm256_add_ps(fjz1,tz);
1921
1922             }
1923
1924             /**************************
1925              * CALCULATE INTERACTIONS *
1926              **************************/
1927
1928             if (gmx_mm256_any_lt(rsq32,rcutoff2))
1929             {
1930
1931             r32              = _mm256_mul_ps(rsq32,rinv32);
1932
1933             /* EWALD ELECTROSTATICS */
1934             
1935             /* Analytical PME correction */
1936             zeta2            = _mm256_mul_ps(beta2,rsq32);
1937             rinv3            = _mm256_mul_ps(rinvsq32,rinv32);
1938             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
1939             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1940             felec            = _mm256_mul_ps(qq32,felec);
1941             
1942             cutoff_mask      = _mm256_cmp_ps(rsq32,rcutoff2,_CMP_LT_OQ);
1943
1944             fscal            = felec;
1945
1946             fscal            = _mm256_and_ps(fscal,cutoff_mask);
1947
1948             /* Calculate temporary vectorial force */
1949             tx               = _mm256_mul_ps(fscal,dx32);
1950             ty               = _mm256_mul_ps(fscal,dy32);
1951             tz               = _mm256_mul_ps(fscal,dz32);
1952
1953             /* Update vectorial force */
1954             fix3             = _mm256_add_ps(fix3,tx);
1955             fiy3             = _mm256_add_ps(fiy3,ty);
1956             fiz3             = _mm256_add_ps(fiz3,tz);
1957
1958             fjx2             = _mm256_add_ps(fjx2,tx);
1959             fjy2             = _mm256_add_ps(fjy2,ty);
1960             fjz2             = _mm256_add_ps(fjz2,tz);
1961
1962             }
1963
1964             /**************************
1965              * CALCULATE INTERACTIONS *
1966              **************************/
1967
1968             if (gmx_mm256_any_lt(rsq33,rcutoff2))
1969             {
1970
1971             r33              = _mm256_mul_ps(rsq33,rinv33);
1972
1973             /* EWALD ELECTROSTATICS */
1974             
1975             /* Analytical PME correction */
1976             zeta2            = _mm256_mul_ps(beta2,rsq33);
1977             rinv3            = _mm256_mul_ps(rinvsq33,rinv33);
1978             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
1979             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1980             felec            = _mm256_mul_ps(qq33,felec);
1981             
1982             cutoff_mask      = _mm256_cmp_ps(rsq33,rcutoff2,_CMP_LT_OQ);
1983
1984             fscal            = felec;
1985
1986             fscal            = _mm256_and_ps(fscal,cutoff_mask);
1987
1988             /* Calculate temporary vectorial force */
1989             tx               = _mm256_mul_ps(fscal,dx33);
1990             ty               = _mm256_mul_ps(fscal,dy33);
1991             tz               = _mm256_mul_ps(fscal,dz33);
1992
1993             /* Update vectorial force */
1994             fix3             = _mm256_add_ps(fix3,tx);
1995             fiy3             = _mm256_add_ps(fiy3,ty);
1996             fiz3             = _mm256_add_ps(fiz3,tz);
1997
1998             fjx3             = _mm256_add_ps(fjx3,tx);
1999             fjy3             = _mm256_add_ps(fjy3,ty);
2000             fjz3             = _mm256_add_ps(fjz3,tz);
2001
2002             }
2003
2004             fjptrA             = f+j_coord_offsetA;
2005             fjptrB             = f+j_coord_offsetB;
2006             fjptrC             = f+j_coord_offsetC;
2007             fjptrD             = f+j_coord_offsetD;
2008             fjptrE             = f+j_coord_offsetE;
2009             fjptrF             = f+j_coord_offsetF;
2010             fjptrG             = f+j_coord_offsetG;
2011             fjptrH             = f+j_coord_offsetH;
2012
2013             gmx_mm256_decrement_3rvec_8ptr_swizzle_ps(fjptrA+DIM,fjptrB+DIM,fjptrC+DIM,fjptrD+DIM,
2014                                                       fjptrE+DIM,fjptrF+DIM,fjptrG+DIM,fjptrH+DIM,
2015                                                       fjx1,fjy1,fjz1,fjx2,fjy2,fjz2,fjx3,fjy3,fjz3);
2016
2017             /* Inner loop uses 531 flops */
2018         }
2019
2020         if(jidx<j_index_end)
2021         {
2022
2023             /* Get j neighbor index, and coordinate index */
2024             jnrlistA         = jjnr[jidx];
2025             jnrlistB         = jjnr[jidx+1];
2026             jnrlistC         = jjnr[jidx+2];
2027             jnrlistD         = jjnr[jidx+3];
2028             jnrlistE         = jjnr[jidx+4];
2029             jnrlistF         = jjnr[jidx+5];
2030             jnrlistG         = jjnr[jidx+6];
2031             jnrlistH         = jjnr[jidx+7];
2032             /* Sign of each element will be negative for non-real atoms.
2033              * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
2034              * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
2035              */
2036             dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
2037                                             gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
2038                                             
2039             jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
2040             jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
2041             jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
2042             jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
2043             jnrE       = (jnrlistE>=0) ? jnrlistE : 0;
2044             jnrF       = (jnrlistF>=0) ? jnrlistF : 0;
2045             jnrG       = (jnrlistG>=0) ? jnrlistG : 0;
2046             jnrH       = (jnrlistH>=0) ? jnrlistH : 0;
2047             j_coord_offsetA  = DIM*jnrA;
2048             j_coord_offsetB  = DIM*jnrB;
2049             j_coord_offsetC  = DIM*jnrC;
2050             j_coord_offsetD  = DIM*jnrD;
2051             j_coord_offsetE  = DIM*jnrE;
2052             j_coord_offsetF  = DIM*jnrF;
2053             j_coord_offsetG  = DIM*jnrG;
2054             j_coord_offsetH  = DIM*jnrH;
2055
2056             /* load j atom coordinates */
2057             gmx_mm256_load_3rvec_8ptr_swizzle_ps(x+j_coord_offsetA+DIM,x+j_coord_offsetB+DIM,
2058                                                  x+j_coord_offsetC+DIM,x+j_coord_offsetD+DIM,
2059                                                  x+j_coord_offsetE+DIM,x+j_coord_offsetF+DIM,
2060                                                  x+j_coord_offsetG+DIM,x+j_coord_offsetH+DIM,
2061                                                  &jx1,&jy1,&jz1,&jx2,&jy2,&jz2,&jx3,&jy3,&jz3);
2062
2063             /* Calculate displacement vector */
2064             dx11             = _mm256_sub_ps(ix1,jx1);
2065             dy11             = _mm256_sub_ps(iy1,jy1);
2066             dz11             = _mm256_sub_ps(iz1,jz1);
2067             dx12             = _mm256_sub_ps(ix1,jx2);
2068             dy12             = _mm256_sub_ps(iy1,jy2);
2069             dz12             = _mm256_sub_ps(iz1,jz2);
2070             dx13             = _mm256_sub_ps(ix1,jx3);
2071             dy13             = _mm256_sub_ps(iy1,jy3);
2072             dz13             = _mm256_sub_ps(iz1,jz3);
2073             dx21             = _mm256_sub_ps(ix2,jx1);
2074             dy21             = _mm256_sub_ps(iy2,jy1);
2075             dz21             = _mm256_sub_ps(iz2,jz1);
2076             dx22             = _mm256_sub_ps(ix2,jx2);
2077             dy22             = _mm256_sub_ps(iy2,jy2);
2078             dz22             = _mm256_sub_ps(iz2,jz2);
2079             dx23             = _mm256_sub_ps(ix2,jx3);
2080             dy23             = _mm256_sub_ps(iy2,jy3);
2081             dz23             = _mm256_sub_ps(iz2,jz3);
2082             dx31             = _mm256_sub_ps(ix3,jx1);
2083             dy31             = _mm256_sub_ps(iy3,jy1);
2084             dz31             = _mm256_sub_ps(iz3,jz1);
2085             dx32             = _mm256_sub_ps(ix3,jx2);
2086             dy32             = _mm256_sub_ps(iy3,jy2);
2087             dz32             = _mm256_sub_ps(iz3,jz2);
2088             dx33             = _mm256_sub_ps(ix3,jx3);
2089             dy33             = _mm256_sub_ps(iy3,jy3);
2090             dz33             = _mm256_sub_ps(iz3,jz3);
2091
2092             /* Calculate squared distance and things based on it */
2093             rsq11            = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
2094             rsq12            = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
2095             rsq13            = gmx_mm256_calc_rsq_ps(dx13,dy13,dz13);
2096             rsq21            = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
2097             rsq22            = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
2098             rsq23            = gmx_mm256_calc_rsq_ps(dx23,dy23,dz23);
2099             rsq31            = gmx_mm256_calc_rsq_ps(dx31,dy31,dz31);
2100             rsq32            = gmx_mm256_calc_rsq_ps(dx32,dy32,dz32);
2101             rsq33            = gmx_mm256_calc_rsq_ps(dx33,dy33,dz33);
2102
2103             rinv11           = gmx_mm256_invsqrt_ps(rsq11);
2104             rinv12           = gmx_mm256_invsqrt_ps(rsq12);
2105             rinv13           = gmx_mm256_invsqrt_ps(rsq13);
2106             rinv21           = gmx_mm256_invsqrt_ps(rsq21);
2107             rinv22           = gmx_mm256_invsqrt_ps(rsq22);
2108             rinv23           = gmx_mm256_invsqrt_ps(rsq23);
2109             rinv31           = gmx_mm256_invsqrt_ps(rsq31);
2110             rinv32           = gmx_mm256_invsqrt_ps(rsq32);
2111             rinv33           = gmx_mm256_invsqrt_ps(rsq33);
2112
2113             rinvsq11         = _mm256_mul_ps(rinv11,rinv11);
2114             rinvsq12         = _mm256_mul_ps(rinv12,rinv12);
2115             rinvsq13         = _mm256_mul_ps(rinv13,rinv13);
2116             rinvsq21         = _mm256_mul_ps(rinv21,rinv21);
2117             rinvsq22         = _mm256_mul_ps(rinv22,rinv22);
2118             rinvsq23         = _mm256_mul_ps(rinv23,rinv23);
2119             rinvsq31         = _mm256_mul_ps(rinv31,rinv31);
2120             rinvsq32         = _mm256_mul_ps(rinv32,rinv32);
2121             rinvsq33         = _mm256_mul_ps(rinv33,rinv33);
2122
2123             fjx1             = _mm256_setzero_ps();
2124             fjy1             = _mm256_setzero_ps();
2125             fjz1             = _mm256_setzero_ps();
2126             fjx2             = _mm256_setzero_ps();
2127             fjy2             = _mm256_setzero_ps();
2128             fjz2             = _mm256_setzero_ps();
2129             fjx3             = _mm256_setzero_ps();
2130             fjy3             = _mm256_setzero_ps();
2131             fjz3             = _mm256_setzero_ps();
2132
2133             /**************************
2134              * CALCULATE INTERACTIONS *
2135              **************************/
2136
2137             if (gmx_mm256_any_lt(rsq11,rcutoff2))
2138             {
2139
2140             r11              = _mm256_mul_ps(rsq11,rinv11);
2141             r11              = _mm256_andnot_ps(dummy_mask,r11);
2142
2143             /* EWALD ELECTROSTATICS */
2144             
2145             /* Analytical PME correction */
2146             zeta2            = _mm256_mul_ps(beta2,rsq11);
2147             rinv3            = _mm256_mul_ps(rinvsq11,rinv11);
2148             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
2149             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
2150             felec            = _mm256_mul_ps(qq11,felec);
2151             
2152             cutoff_mask      = _mm256_cmp_ps(rsq11,rcutoff2,_CMP_LT_OQ);
2153
2154             fscal            = felec;
2155
2156             fscal            = _mm256_and_ps(fscal,cutoff_mask);
2157
2158             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
2159
2160             /* Calculate temporary vectorial force */
2161             tx               = _mm256_mul_ps(fscal,dx11);
2162             ty               = _mm256_mul_ps(fscal,dy11);
2163             tz               = _mm256_mul_ps(fscal,dz11);
2164
2165             /* Update vectorial force */
2166             fix1             = _mm256_add_ps(fix1,tx);
2167             fiy1             = _mm256_add_ps(fiy1,ty);
2168             fiz1             = _mm256_add_ps(fiz1,tz);
2169
2170             fjx1             = _mm256_add_ps(fjx1,tx);
2171             fjy1             = _mm256_add_ps(fjy1,ty);
2172             fjz1             = _mm256_add_ps(fjz1,tz);
2173
2174             }
2175
2176             /**************************
2177              * CALCULATE INTERACTIONS *
2178              **************************/
2179
2180             if (gmx_mm256_any_lt(rsq12,rcutoff2))
2181             {
2182
2183             r12              = _mm256_mul_ps(rsq12,rinv12);
2184             r12              = _mm256_andnot_ps(dummy_mask,r12);
2185
2186             /* EWALD ELECTROSTATICS */
2187             
2188             /* Analytical PME correction */
2189             zeta2            = _mm256_mul_ps(beta2,rsq12);
2190             rinv3            = _mm256_mul_ps(rinvsq12,rinv12);
2191             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
2192             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
2193             felec            = _mm256_mul_ps(qq12,felec);
2194             
2195             cutoff_mask      = _mm256_cmp_ps(rsq12,rcutoff2,_CMP_LT_OQ);
2196
2197             fscal            = felec;
2198
2199             fscal            = _mm256_and_ps(fscal,cutoff_mask);
2200
2201             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
2202
2203             /* Calculate temporary vectorial force */
2204             tx               = _mm256_mul_ps(fscal,dx12);
2205             ty               = _mm256_mul_ps(fscal,dy12);
2206             tz               = _mm256_mul_ps(fscal,dz12);
2207
2208             /* Update vectorial force */
2209             fix1             = _mm256_add_ps(fix1,tx);
2210             fiy1             = _mm256_add_ps(fiy1,ty);
2211             fiz1             = _mm256_add_ps(fiz1,tz);
2212
2213             fjx2             = _mm256_add_ps(fjx2,tx);
2214             fjy2             = _mm256_add_ps(fjy2,ty);
2215             fjz2             = _mm256_add_ps(fjz2,tz);
2216
2217             }
2218
2219             /**************************
2220              * CALCULATE INTERACTIONS *
2221              **************************/
2222
2223             if (gmx_mm256_any_lt(rsq13,rcutoff2))
2224             {
2225
2226             r13              = _mm256_mul_ps(rsq13,rinv13);
2227             r13              = _mm256_andnot_ps(dummy_mask,r13);
2228
2229             /* EWALD ELECTROSTATICS */
2230             
2231             /* Analytical PME correction */
2232             zeta2            = _mm256_mul_ps(beta2,rsq13);
2233             rinv3            = _mm256_mul_ps(rinvsq13,rinv13);
2234             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
2235             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
2236             felec            = _mm256_mul_ps(qq13,felec);
2237             
2238             cutoff_mask      = _mm256_cmp_ps(rsq13,rcutoff2,_CMP_LT_OQ);
2239
2240             fscal            = felec;
2241
2242             fscal            = _mm256_and_ps(fscal,cutoff_mask);
2243
2244             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
2245
2246             /* Calculate temporary vectorial force */
2247             tx               = _mm256_mul_ps(fscal,dx13);
2248             ty               = _mm256_mul_ps(fscal,dy13);
2249             tz               = _mm256_mul_ps(fscal,dz13);
2250
2251             /* Update vectorial force */
2252             fix1             = _mm256_add_ps(fix1,tx);
2253             fiy1             = _mm256_add_ps(fiy1,ty);
2254             fiz1             = _mm256_add_ps(fiz1,tz);
2255
2256             fjx3             = _mm256_add_ps(fjx3,tx);
2257             fjy3             = _mm256_add_ps(fjy3,ty);
2258             fjz3             = _mm256_add_ps(fjz3,tz);
2259
2260             }
2261
2262             /**************************
2263              * CALCULATE INTERACTIONS *
2264              **************************/
2265
2266             if (gmx_mm256_any_lt(rsq21,rcutoff2))
2267             {
2268
2269             r21              = _mm256_mul_ps(rsq21,rinv21);
2270             r21              = _mm256_andnot_ps(dummy_mask,r21);
2271
2272             /* EWALD ELECTROSTATICS */
2273             
2274             /* Analytical PME correction */
2275             zeta2            = _mm256_mul_ps(beta2,rsq21);
2276             rinv3            = _mm256_mul_ps(rinvsq21,rinv21);
2277             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
2278             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
2279             felec            = _mm256_mul_ps(qq21,felec);
2280             
2281             cutoff_mask      = _mm256_cmp_ps(rsq21,rcutoff2,_CMP_LT_OQ);
2282
2283             fscal            = felec;
2284
2285             fscal            = _mm256_and_ps(fscal,cutoff_mask);
2286
2287             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
2288
2289             /* Calculate temporary vectorial force */
2290             tx               = _mm256_mul_ps(fscal,dx21);
2291             ty               = _mm256_mul_ps(fscal,dy21);
2292             tz               = _mm256_mul_ps(fscal,dz21);
2293
2294             /* Update vectorial force */
2295             fix2             = _mm256_add_ps(fix2,tx);
2296             fiy2             = _mm256_add_ps(fiy2,ty);
2297             fiz2             = _mm256_add_ps(fiz2,tz);
2298
2299             fjx1             = _mm256_add_ps(fjx1,tx);
2300             fjy1             = _mm256_add_ps(fjy1,ty);
2301             fjz1             = _mm256_add_ps(fjz1,tz);
2302
2303             }
2304
2305             /**************************
2306              * CALCULATE INTERACTIONS *
2307              **************************/
2308
2309             if (gmx_mm256_any_lt(rsq22,rcutoff2))
2310             {
2311
2312             r22              = _mm256_mul_ps(rsq22,rinv22);
2313             r22              = _mm256_andnot_ps(dummy_mask,r22);
2314
2315             /* EWALD ELECTROSTATICS */
2316             
2317             /* Analytical PME correction */
2318             zeta2            = _mm256_mul_ps(beta2,rsq22);
2319             rinv3            = _mm256_mul_ps(rinvsq22,rinv22);
2320             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
2321             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
2322             felec            = _mm256_mul_ps(qq22,felec);
2323             
2324             cutoff_mask      = _mm256_cmp_ps(rsq22,rcutoff2,_CMP_LT_OQ);
2325
2326             fscal            = felec;
2327
2328             fscal            = _mm256_and_ps(fscal,cutoff_mask);
2329
2330             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
2331
2332             /* Calculate temporary vectorial force */
2333             tx               = _mm256_mul_ps(fscal,dx22);
2334             ty               = _mm256_mul_ps(fscal,dy22);
2335             tz               = _mm256_mul_ps(fscal,dz22);
2336
2337             /* Update vectorial force */
2338             fix2             = _mm256_add_ps(fix2,tx);
2339             fiy2             = _mm256_add_ps(fiy2,ty);
2340             fiz2             = _mm256_add_ps(fiz2,tz);
2341
2342             fjx2             = _mm256_add_ps(fjx2,tx);
2343             fjy2             = _mm256_add_ps(fjy2,ty);
2344             fjz2             = _mm256_add_ps(fjz2,tz);
2345
2346             }
2347
2348             /**************************
2349              * CALCULATE INTERACTIONS *
2350              **************************/
2351
2352             if (gmx_mm256_any_lt(rsq23,rcutoff2))
2353             {
2354
2355             r23              = _mm256_mul_ps(rsq23,rinv23);
2356             r23              = _mm256_andnot_ps(dummy_mask,r23);
2357
2358             /* EWALD ELECTROSTATICS */
2359             
2360             /* Analytical PME correction */
2361             zeta2            = _mm256_mul_ps(beta2,rsq23);
2362             rinv3            = _mm256_mul_ps(rinvsq23,rinv23);
2363             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
2364             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
2365             felec            = _mm256_mul_ps(qq23,felec);
2366             
2367             cutoff_mask      = _mm256_cmp_ps(rsq23,rcutoff2,_CMP_LT_OQ);
2368
2369             fscal            = felec;
2370
2371             fscal            = _mm256_and_ps(fscal,cutoff_mask);
2372
2373             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
2374
2375             /* Calculate temporary vectorial force */
2376             tx               = _mm256_mul_ps(fscal,dx23);
2377             ty               = _mm256_mul_ps(fscal,dy23);
2378             tz               = _mm256_mul_ps(fscal,dz23);
2379
2380             /* Update vectorial force */
2381             fix2             = _mm256_add_ps(fix2,tx);
2382             fiy2             = _mm256_add_ps(fiy2,ty);
2383             fiz2             = _mm256_add_ps(fiz2,tz);
2384
2385             fjx3             = _mm256_add_ps(fjx3,tx);
2386             fjy3             = _mm256_add_ps(fjy3,ty);
2387             fjz3             = _mm256_add_ps(fjz3,tz);
2388
2389             }
2390
2391             /**************************
2392              * CALCULATE INTERACTIONS *
2393              **************************/
2394
2395             if (gmx_mm256_any_lt(rsq31,rcutoff2))
2396             {
2397
2398             r31              = _mm256_mul_ps(rsq31,rinv31);
2399             r31              = _mm256_andnot_ps(dummy_mask,r31);
2400
2401             /* EWALD ELECTROSTATICS */
2402             
2403             /* Analytical PME correction */
2404             zeta2            = _mm256_mul_ps(beta2,rsq31);
2405             rinv3            = _mm256_mul_ps(rinvsq31,rinv31);
2406             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
2407             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
2408             felec            = _mm256_mul_ps(qq31,felec);
2409             
2410             cutoff_mask      = _mm256_cmp_ps(rsq31,rcutoff2,_CMP_LT_OQ);
2411
2412             fscal            = felec;
2413
2414             fscal            = _mm256_and_ps(fscal,cutoff_mask);
2415
2416             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
2417
2418             /* Calculate temporary vectorial force */
2419             tx               = _mm256_mul_ps(fscal,dx31);
2420             ty               = _mm256_mul_ps(fscal,dy31);
2421             tz               = _mm256_mul_ps(fscal,dz31);
2422
2423             /* Update vectorial force */
2424             fix3             = _mm256_add_ps(fix3,tx);
2425             fiy3             = _mm256_add_ps(fiy3,ty);
2426             fiz3             = _mm256_add_ps(fiz3,tz);
2427
2428             fjx1             = _mm256_add_ps(fjx1,tx);
2429             fjy1             = _mm256_add_ps(fjy1,ty);
2430             fjz1             = _mm256_add_ps(fjz1,tz);
2431
2432             }
2433
2434             /**************************
2435              * CALCULATE INTERACTIONS *
2436              **************************/
2437
2438             if (gmx_mm256_any_lt(rsq32,rcutoff2))
2439             {
2440
2441             r32              = _mm256_mul_ps(rsq32,rinv32);
2442             r32              = _mm256_andnot_ps(dummy_mask,r32);
2443
2444             /* EWALD ELECTROSTATICS */
2445             
2446             /* Analytical PME correction */
2447             zeta2            = _mm256_mul_ps(beta2,rsq32);
2448             rinv3            = _mm256_mul_ps(rinvsq32,rinv32);
2449             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
2450             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
2451             felec            = _mm256_mul_ps(qq32,felec);
2452             
2453             cutoff_mask      = _mm256_cmp_ps(rsq32,rcutoff2,_CMP_LT_OQ);
2454
2455             fscal            = felec;
2456
2457             fscal            = _mm256_and_ps(fscal,cutoff_mask);
2458
2459             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
2460
2461             /* Calculate temporary vectorial force */
2462             tx               = _mm256_mul_ps(fscal,dx32);
2463             ty               = _mm256_mul_ps(fscal,dy32);
2464             tz               = _mm256_mul_ps(fscal,dz32);
2465
2466             /* Update vectorial force */
2467             fix3             = _mm256_add_ps(fix3,tx);
2468             fiy3             = _mm256_add_ps(fiy3,ty);
2469             fiz3             = _mm256_add_ps(fiz3,tz);
2470
2471             fjx2             = _mm256_add_ps(fjx2,tx);
2472             fjy2             = _mm256_add_ps(fjy2,ty);
2473             fjz2             = _mm256_add_ps(fjz2,tz);
2474
2475             }
2476
2477             /**************************
2478              * CALCULATE INTERACTIONS *
2479              **************************/
2480
2481             if (gmx_mm256_any_lt(rsq33,rcutoff2))
2482             {
2483
2484             r33              = _mm256_mul_ps(rsq33,rinv33);
2485             r33              = _mm256_andnot_ps(dummy_mask,r33);
2486
2487             /* EWALD ELECTROSTATICS */
2488             
2489             /* Analytical PME correction */
2490             zeta2            = _mm256_mul_ps(beta2,rsq33);
2491             rinv3            = _mm256_mul_ps(rinvsq33,rinv33);
2492             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
2493             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
2494             felec            = _mm256_mul_ps(qq33,felec);
2495             
2496             cutoff_mask      = _mm256_cmp_ps(rsq33,rcutoff2,_CMP_LT_OQ);
2497
2498             fscal            = felec;
2499
2500             fscal            = _mm256_and_ps(fscal,cutoff_mask);
2501
2502             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
2503
2504             /* Calculate temporary vectorial force */
2505             tx               = _mm256_mul_ps(fscal,dx33);
2506             ty               = _mm256_mul_ps(fscal,dy33);
2507             tz               = _mm256_mul_ps(fscal,dz33);
2508
2509             /* Update vectorial force */
2510             fix3             = _mm256_add_ps(fix3,tx);
2511             fiy3             = _mm256_add_ps(fiy3,ty);
2512             fiz3             = _mm256_add_ps(fiz3,tz);
2513
2514             fjx3             = _mm256_add_ps(fjx3,tx);
2515             fjy3             = _mm256_add_ps(fjy3,ty);
2516             fjz3             = _mm256_add_ps(fjz3,tz);
2517
2518             }
2519
2520             fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
2521             fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
2522             fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
2523             fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
2524             fjptrE             = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
2525             fjptrF             = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
2526             fjptrG             = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
2527             fjptrH             = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
2528
2529             gmx_mm256_decrement_3rvec_8ptr_swizzle_ps(fjptrA+DIM,fjptrB+DIM,fjptrC+DIM,fjptrD+DIM,
2530                                                       fjptrE+DIM,fjptrF+DIM,fjptrG+DIM,fjptrH+DIM,
2531                                                       fjx1,fjy1,fjz1,fjx2,fjy2,fjz2,fjx3,fjy3,fjz3);
2532
2533             /* Inner loop uses 540 flops */
2534         }
2535
2536         /* End of innermost loop */
2537
2538         gmx_mm256_update_iforce_3atom_swizzle_ps(fix1,fiy1,fiz1,fix2,fiy2,fiz2,fix3,fiy3,fiz3,
2539                                                  f+i_coord_offset+DIM,fshift+i_shift_offset);
2540
2541         /* Increment number of inner iterations */
2542         inneriter                  += j_index_end - j_index_start;
2543
2544         /* Outer loop uses 18 flops */
2545     }
2546
2547     /* Increment number of outer iterations */
2548     outeriter        += nri;
2549
2550     /* Update outer/inner flops */
2551
2552     inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_W4W4_F,outeriter*18 + inneriter*540);
2553 }