Added option to gmx nmeig to print ZPE.
[alexxy/gromacs.git] / src / gromacs / gmxlib / nonbonded / nb_kernel_avx_256_single / nb_kernel_ElecEw_VdwNone_GeomW4W4_avx_256_single.c
1 /*
2  * This file is part of the GROMACS molecular simulation package.
3  *
4  * Copyright (c) 2012,2013,2014,2015,2017, by the GROMACS development team, led by
5  * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6  * and including many others, as listed in the AUTHORS file in the
7  * top-level source directory and at http://www.gromacs.org.
8  *
9  * GROMACS is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU Lesser General Public License
11  * as published by the Free Software Foundation; either version 2.1
12  * of the License, or (at your option) any later version.
13  *
14  * GROMACS is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
17  * Lesser General Public License for more details.
18  *
19  * You should have received a copy of the GNU Lesser General Public
20  * License along with GROMACS; if not, see
21  * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA.
23  *
24  * If you want to redistribute modifications to GROMACS, please
25  * consider that scientific software is very special. Version
26  * control is crucial - bugs must be traceable. We will be happy to
27  * consider code for inclusion in the official distribution, but
28  * derived work must not be called official GROMACS. Details are found
29  * in the README & COPYING files - if they are missing, get the
30  * official version at http://www.gromacs.org.
31  *
32  * To help us fund GROMACS development, we humbly ask that you cite
33  * the research papers on the package. Check out http://www.gromacs.org.
34  */
35 /*
36  * Note: this file was generated by the GROMACS avx_256_single kernel generator.
37  */
38 #include "gmxpre.h"
39
40 #include "config.h"
41
42 #include <math.h>
43
44 #include "../nb_kernel.h"
45 #include "gromacs/gmxlib/nrnb.h"
46
47 #include "kernelutil_x86_avx_256_single.h"
48
49 /*
50  * Gromacs nonbonded kernel:   nb_kernel_ElecEw_VdwNone_GeomW4W4_VF_avx_256_single
51  * Electrostatics interaction: Ewald
52  * VdW interaction:            None
53  * Geometry:                   Water4-Water4
54  * Calculate force/pot:        PotentialAndForce
55  */
56 void
57 nb_kernel_ElecEw_VdwNone_GeomW4W4_VF_avx_256_single
58                     (t_nblist                    * gmx_restrict       nlist,
59                      rvec                        * gmx_restrict          xx,
60                      rvec                        * gmx_restrict          ff,
61                      struct t_forcerec           * gmx_restrict          fr,
62                      t_mdatoms                   * gmx_restrict     mdatoms,
63                      nb_kernel_data_t gmx_unused * gmx_restrict kernel_data,
64                      t_nrnb                      * gmx_restrict        nrnb)
65 {
66     /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
67      * just 0 for non-waters.
68      * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
69      * jnr indices corresponding to data put in the four positions in the SIMD register.
70      */
71     int              i_shift_offset,i_coord_offset,outeriter,inneriter;
72     int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
73     int              jnrA,jnrB,jnrC,jnrD;
74     int              jnrE,jnrF,jnrG,jnrH;
75     int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
76     int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
77     int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
78     int              j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
79     int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
80     real             rcutoff_scalar;
81     real             *shiftvec,*fshift,*x,*f;
82     real             *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
83     real             scratch[4*DIM];
84     __m256           tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
85     real *           vdwioffsetptr1;
86     __m256           ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
87     real *           vdwioffsetptr2;
88     __m256           ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
89     real *           vdwioffsetptr3;
90     __m256           ix3,iy3,iz3,fix3,fiy3,fiz3,iq3,isai3;
91     int              vdwjidx1A,vdwjidx1B,vdwjidx1C,vdwjidx1D,vdwjidx1E,vdwjidx1F,vdwjidx1G,vdwjidx1H;
92     __m256           jx1,jy1,jz1,fjx1,fjy1,fjz1,jq1,isaj1;
93     int              vdwjidx2A,vdwjidx2B,vdwjidx2C,vdwjidx2D,vdwjidx2E,vdwjidx2F,vdwjidx2G,vdwjidx2H;
94     __m256           jx2,jy2,jz2,fjx2,fjy2,fjz2,jq2,isaj2;
95     int              vdwjidx3A,vdwjidx3B,vdwjidx3C,vdwjidx3D,vdwjidx3E,vdwjidx3F,vdwjidx3G,vdwjidx3H;
96     __m256           jx3,jy3,jz3,fjx3,fjy3,fjz3,jq3,isaj3;
97     __m256           dx11,dy11,dz11,rsq11,rinv11,rinvsq11,r11,qq11,c6_11,c12_11;
98     __m256           dx12,dy12,dz12,rsq12,rinv12,rinvsq12,r12,qq12,c6_12,c12_12;
99     __m256           dx13,dy13,dz13,rsq13,rinv13,rinvsq13,r13,qq13,c6_13,c12_13;
100     __m256           dx21,dy21,dz21,rsq21,rinv21,rinvsq21,r21,qq21,c6_21,c12_21;
101     __m256           dx22,dy22,dz22,rsq22,rinv22,rinvsq22,r22,qq22,c6_22,c12_22;
102     __m256           dx23,dy23,dz23,rsq23,rinv23,rinvsq23,r23,qq23,c6_23,c12_23;
103     __m256           dx31,dy31,dz31,rsq31,rinv31,rinvsq31,r31,qq31,c6_31,c12_31;
104     __m256           dx32,dy32,dz32,rsq32,rinv32,rinvsq32,r32,qq32,c6_32,c12_32;
105     __m256           dx33,dy33,dz33,rsq33,rinv33,rinvsq33,r33,qq33,c6_33,c12_33;
106     __m256           velec,felec,velecsum,facel,crf,krf,krf2;
107     real             *charge;
108     __m256i          ewitab;
109     __m128i          ewitab_lo,ewitab_hi;
110     __m256           ewtabscale,eweps,sh_ewald,ewrt,ewtabhalfspace,ewtabF,ewtabFn,ewtabD,ewtabV;
111     __m256           beta,beta2,beta3,zeta2,pmecorrF,pmecorrV,rinv3;
112     real             *ewtab;
113     __m256           dummy_mask,cutoff_mask;
114     __m256           signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
115     __m256           one     = _mm256_set1_ps(1.0);
116     __m256           two     = _mm256_set1_ps(2.0);
117     x                = xx[0];
118     f                = ff[0];
119
120     nri              = nlist->nri;
121     iinr             = nlist->iinr;
122     jindex           = nlist->jindex;
123     jjnr             = nlist->jjnr;
124     shiftidx         = nlist->shift;
125     gid              = nlist->gid;
126     shiftvec         = fr->shift_vec[0];
127     fshift           = fr->fshift[0];
128     facel            = _mm256_set1_ps(fr->ic->epsfac);
129     charge           = mdatoms->chargeA;
130
131     sh_ewald         = _mm256_set1_ps(fr->ic->sh_ewald);
132     beta             = _mm256_set1_ps(fr->ic->ewaldcoeff_q);
133     beta2            = _mm256_mul_ps(beta,beta);
134     beta3            = _mm256_mul_ps(beta,beta2);
135
136     ewtab            = fr->ic->tabq_coul_FDV0;
137     ewtabscale       = _mm256_set1_ps(fr->ic->tabq_scale);
138     ewtabhalfspace   = _mm256_set1_ps(0.5/fr->ic->tabq_scale);
139
140     /* Setup water-specific parameters */
141     inr              = nlist->iinr[0];
142     iq1              = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+1]));
143     iq2              = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+2]));
144     iq3              = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+3]));
145
146     jq1              = _mm256_set1_ps(charge[inr+1]);
147     jq2              = _mm256_set1_ps(charge[inr+2]);
148     jq3              = _mm256_set1_ps(charge[inr+3]);
149     qq11             = _mm256_mul_ps(iq1,jq1);
150     qq12             = _mm256_mul_ps(iq1,jq2);
151     qq13             = _mm256_mul_ps(iq1,jq3);
152     qq21             = _mm256_mul_ps(iq2,jq1);
153     qq22             = _mm256_mul_ps(iq2,jq2);
154     qq23             = _mm256_mul_ps(iq2,jq3);
155     qq31             = _mm256_mul_ps(iq3,jq1);
156     qq32             = _mm256_mul_ps(iq3,jq2);
157     qq33             = _mm256_mul_ps(iq3,jq3);
158
159     /* Avoid stupid compiler warnings */
160     jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
161     j_coord_offsetA = 0;
162     j_coord_offsetB = 0;
163     j_coord_offsetC = 0;
164     j_coord_offsetD = 0;
165     j_coord_offsetE = 0;
166     j_coord_offsetF = 0;
167     j_coord_offsetG = 0;
168     j_coord_offsetH = 0;
169
170     outeriter        = 0;
171     inneriter        = 0;
172
173     for(iidx=0;iidx<4*DIM;iidx++)
174     {
175         scratch[iidx] = 0.0;
176     }
177
178     /* Start outer loop over neighborlists */
179     for(iidx=0; iidx<nri; iidx++)
180     {
181         /* Load shift vector for this list */
182         i_shift_offset   = DIM*shiftidx[iidx];
183
184         /* Load limits for loop over neighbors */
185         j_index_start    = jindex[iidx];
186         j_index_end      = jindex[iidx+1];
187
188         /* Get outer coordinate index */
189         inr              = iinr[iidx];
190         i_coord_offset   = DIM*inr;
191
192         /* Load i particle coords and add shift vector */
193         gmx_mm256_load_shift_and_3rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset+DIM,
194                                                     &ix1,&iy1,&iz1,&ix2,&iy2,&iz2,&ix3,&iy3,&iz3);
195
196         fix1             = _mm256_setzero_ps();
197         fiy1             = _mm256_setzero_ps();
198         fiz1             = _mm256_setzero_ps();
199         fix2             = _mm256_setzero_ps();
200         fiy2             = _mm256_setzero_ps();
201         fiz2             = _mm256_setzero_ps();
202         fix3             = _mm256_setzero_ps();
203         fiy3             = _mm256_setzero_ps();
204         fiz3             = _mm256_setzero_ps();
205
206         /* Reset potential sums */
207         velecsum         = _mm256_setzero_ps();
208
209         /* Start inner kernel loop */
210         for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
211         {
212
213             /* Get j neighbor index, and coordinate index */
214             jnrA             = jjnr[jidx];
215             jnrB             = jjnr[jidx+1];
216             jnrC             = jjnr[jidx+2];
217             jnrD             = jjnr[jidx+3];
218             jnrE             = jjnr[jidx+4];
219             jnrF             = jjnr[jidx+5];
220             jnrG             = jjnr[jidx+6];
221             jnrH             = jjnr[jidx+7];
222             j_coord_offsetA  = DIM*jnrA;
223             j_coord_offsetB  = DIM*jnrB;
224             j_coord_offsetC  = DIM*jnrC;
225             j_coord_offsetD  = DIM*jnrD;
226             j_coord_offsetE  = DIM*jnrE;
227             j_coord_offsetF  = DIM*jnrF;
228             j_coord_offsetG  = DIM*jnrG;
229             j_coord_offsetH  = DIM*jnrH;
230
231             /* load j atom coordinates */
232             gmx_mm256_load_3rvec_8ptr_swizzle_ps(x+j_coord_offsetA+DIM,x+j_coord_offsetB+DIM,
233                                                  x+j_coord_offsetC+DIM,x+j_coord_offsetD+DIM,
234                                                  x+j_coord_offsetE+DIM,x+j_coord_offsetF+DIM,
235                                                  x+j_coord_offsetG+DIM,x+j_coord_offsetH+DIM,
236                                                  &jx1,&jy1,&jz1,&jx2,&jy2,&jz2,&jx3,&jy3,&jz3);
237
238             /* Calculate displacement vector */
239             dx11             = _mm256_sub_ps(ix1,jx1);
240             dy11             = _mm256_sub_ps(iy1,jy1);
241             dz11             = _mm256_sub_ps(iz1,jz1);
242             dx12             = _mm256_sub_ps(ix1,jx2);
243             dy12             = _mm256_sub_ps(iy1,jy2);
244             dz12             = _mm256_sub_ps(iz1,jz2);
245             dx13             = _mm256_sub_ps(ix1,jx3);
246             dy13             = _mm256_sub_ps(iy1,jy3);
247             dz13             = _mm256_sub_ps(iz1,jz3);
248             dx21             = _mm256_sub_ps(ix2,jx1);
249             dy21             = _mm256_sub_ps(iy2,jy1);
250             dz21             = _mm256_sub_ps(iz2,jz1);
251             dx22             = _mm256_sub_ps(ix2,jx2);
252             dy22             = _mm256_sub_ps(iy2,jy2);
253             dz22             = _mm256_sub_ps(iz2,jz2);
254             dx23             = _mm256_sub_ps(ix2,jx3);
255             dy23             = _mm256_sub_ps(iy2,jy3);
256             dz23             = _mm256_sub_ps(iz2,jz3);
257             dx31             = _mm256_sub_ps(ix3,jx1);
258             dy31             = _mm256_sub_ps(iy3,jy1);
259             dz31             = _mm256_sub_ps(iz3,jz1);
260             dx32             = _mm256_sub_ps(ix3,jx2);
261             dy32             = _mm256_sub_ps(iy3,jy2);
262             dz32             = _mm256_sub_ps(iz3,jz2);
263             dx33             = _mm256_sub_ps(ix3,jx3);
264             dy33             = _mm256_sub_ps(iy3,jy3);
265             dz33             = _mm256_sub_ps(iz3,jz3);
266
267             /* Calculate squared distance and things based on it */
268             rsq11            = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
269             rsq12            = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
270             rsq13            = gmx_mm256_calc_rsq_ps(dx13,dy13,dz13);
271             rsq21            = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
272             rsq22            = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
273             rsq23            = gmx_mm256_calc_rsq_ps(dx23,dy23,dz23);
274             rsq31            = gmx_mm256_calc_rsq_ps(dx31,dy31,dz31);
275             rsq32            = gmx_mm256_calc_rsq_ps(dx32,dy32,dz32);
276             rsq33            = gmx_mm256_calc_rsq_ps(dx33,dy33,dz33);
277
278             rinv11           = avx256_invsqrt_f(rsq11);
279             rinv12           = avx256_invsqrt_f(rsq12);
280             rinv13           = avx256_invsqrt_f(rsq13);
281             rinv21           = avx256_invsqrt_f(rsq21);
282             rinv22           = avx256_invsqrt_f(rsq22);
283             rinv23           = avx256_invsqrt_f(rsq23);
284             rinv31           = avx256_invsqrt_f(rsq31);
285             rinv32           = avx256_invsqrt_f(rsq32);
286             rinv33           = avx256_invsqrt_f(rsq33);
287
288             rinvsq11         = _mm256_mul_ps(rinv11,rinv11);
289             rinvsq12         = _mm256_mul_ps(rinv12,rinv12);
290             rinvsq13         = _mm256_mul_ps(rinv13,rinv13);
291             rinvsq21         = _mm256_mul_ps(rinv21,rinv21);
292             rinvsq22         = _mm256_mul_ps(rinv22,rinv22);
293             rinvsq23         = _mm256_mul_ps(rinv23,rinv23);
294             rinvsq31         = _mm256_mul_ps(rinv31,rinv31);
295             rinvsq32         = _mm256_mul_ps(rinv32,rinv32);
296             rinvsq33         = _mm256_mul_ps(rinv33,rinv33);
297
298             fjx1             = _mm256_setzero_ps();
299             fjy1             = _mm256_setzero_ps();
300             fjz1             = _mm256_setzero_ps();
301             fjx2             = _mm256_setzero_ps();
302             fjy2             = _mm256_setzero_ps();
303             fjz2             = _mm256_setzero_ps();
304             fjx3             = _mm256_setzero_ps();
305             fjy3             = _mm256_setzero_ps();
306             fjz3             = _mm256_setzero_ps();
307
308             /**************************
309              * CALCULATE INTERACTIONS *
310              **************************/
311
312             r11              = _mm256_mul_ps(rsq11,rinv11);
313
314             /* EWALD ELECTROSTATICS */
315             
316             /* Analytical PME correction */
317             zeta2            = _mm256_mul_ps(beta2,rsq11);
318             rinv3            = _mm256_mul_ps(rinvsq11,rinv11);
319             pmecorrF         = avx256_pmecorrF_f(zeta2);
320             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
321             felec            = _mm256_mul_ps(qq11,felec);
322             pmecorrV         = avx256_pmecorrV_f(zeta2);
323             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
324             velec            = _mm256_sub_ps(rinv11,pmecorrV);
325             velec            = _mm256_mul_ps(qq11,velec);
326             
327             /* Update potential sum for this i atom from the interaction with this j atom. */
328             velecsum         = _mm256_add_ps(velecsum,velec);
329
330             fscal            = felec;
331
332             /* Calculate temporary vectorial force */
333             tx               = _mm256_mul_ps(fscal,dx11);
334             ty               = _mm256_mul_ps(fscal,dy11);
335             tz               = _mm256_mul_ps(fscal,dz11);
336
337             /* Update vectorial force */
338             fix1             = _mm256_add_ps(fix1,tx);
339             fiy1             = _mm256_add_ps(fiy1,ty);
340             fiz1             = _mm256_add_ps(fiz1,tz);
341
342             fjx1             = _mm256_add_ps(fjx1,tx);
343             fjy1             = _mm256_add_ps(fjy1,ty);
344             fjz1             = _mm256_add_ps(fjz1,tz);
345
346             /**************************
347              * CALCULATE INTERACTIONS *
348              **************************/
349
350             r12              = _mm256_mul_ps(rsq12,rinv12);
351
352             /* EWALD ELECTROSTATICS */
353             
354             /* Analytical PME correction */
355             zeta2            = _mm256_mul_ps(beta2,rsq12);
356             rinv3            = _mm256_mul_ps(rinvsq12,rinv12);
357             pmecorrF         = avx256_pmecorrF_f(zeta2);
358             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
359             felec            = _mm256_mul_ps(qq12,felec);
360             pmecorrV         = avx256_pmecorrV_f(zeta2);
361             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
362             velec            = _mm256_sub_ps(rinv12,pmecorrV);
363             velec            = _mm256_mul_ps(qq12,velec);
364             
365             /* Update potential sum for this i atom from the interaction with this j atom. */
366             velecsum         = _mm256_add_ps(velecsum,velec);
367
368             fscal            = felec;
369
370             /* Calculate temporary vectorial force */
371             tx               = _mm256_mul_ps(fscal,dx12);
372             ty               = _mm256_mul_ps(fscal,dy12);
373             tz               = _mm256_mul_ps(fscal,dz12);
374
375             /* Update vectorial force */
376             fix1             = _mm256_add_ps(fix1,tx);
377             fiy1             = _mm256_add_ps(fiy1,ty);
378             fiz1             = _mm256_add_ps(fiz1,tz);
379
380             fjx2             = _mm256_add_ps(fjx2,tx);
381             fjy2             = _mm256_add_ps(fjy2,ty);
382             fjz2             = _mm256_add_ps(fjz2,tz);
383
384             /**************************
385              * CALCULATE INTERACTIONS *
386              **************************/
387
388             r13              = _mm256_mul_ps(rsq13,rinv13);
389
390             /* EWALD ELECTROSTATICS */
391             
392             /* Analytical PME correction */
393             zeta2            = _mm256_mul_ps(beta2,rsq13);
394             rinv3            = _mm256_mul_ps(rinvsq13,rinv13);
395             pmecorrF         = avx256_pmecorrF_f(zeta2);
396             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
397             felec            = _mm256_mul_ps(qq13,felec);
398             pmecorrV         = avx256_pmecorrV_f(zeta2);
399             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
400             velec            = _mm256_sub_ps(rinv13,pmecorrV);
401             velec            = _mm256_mul_ps(qq13,velec);
402             
403             /* Update potential sum for this i atom from the interaction with this j atom. */
404             velecsum         = _mm256_add_ps(velecsum,velec);
405
406             fscal            = felec;
407
408             /* Calculate temporary vectorial force */
409             tx               = _mm256_mul_ps(fscal,dx13);
410             ty               = _mm256_mul_ps(fscal,dy13);
411             tz               = _mm256_mul_ps(fscal,dz13);
412
413             /* Update vectorial force */
414             fix1             = _mm256_add_ps(fix1,tx);
415             fiy1             = _mm256_add_ps(fiy1,ty);
416             fiz1             = _mm256_add_ps(fiz1,tz);
417
418             fjx3             = _mm256_add_ps(fjx3,tx);
419             fjy3             = _mm256_add_ps(fjy3,ty);
420             fjz3             = _mm256_add_ps(fjz3,tz);
421
422             /**************************
423              * CALCULATE INTERACTIONS *
424              **************************/
425
426             r21              = _mm256_mul_ps(rsq21,rinv21);
427
428             /* EWALD ELECTROSTATICS */
429             
430             /* Analytical PME correction */
431             zeta2            = _mm256_mul_ps(beta2,rsq21);
432             rinv3            = _mm256_mul_ps(rinvsq21,rinv21);
433             pmecorrF         = avx256_pmecorrF_f(zeta2);
434             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
435             felec            = _mm256_mul_ps(qq21,felec);
436             pmecorrV         = avx256_pmecorrV_f(zeta2);
437             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
438             velec            = _mm256_sub_ps(rinv21,pmecorrV);
439             velec            = _mm256_mul_ps(qq21,velec);
440             
441             /* Update potential sum for this i atom from the interaction with this j atom. */
442             velecsum         = _mm256_add_ps(velecsum,velec);
443
444             fscal            = felec;
445
446             /* Calculate temporary vectorial force */
447             tx               = _mm256_mul_ps(fscal,dx21);
448             ty               = _mm256_mul_ps(fscal,dy21);
449             tz               = _mm256_mul_ps(fscal,dz21);
450
451             /* Update vectorial force */
452             fix2             = _mm256_add_ps(fix2,tx);
453             fiy2             = _mm256_add_ps(fiy2,ty);
454             fiz2             = _mm256_add_ps(fiz2,tz);
455
456             fjx1             = _mm256_add_ps(fjx1,tx);
457             fjy1             = _mm256_add_ps(fjy1,ty);
458             fjz1             = _mm256_add_ps(fjz1,tz);
459
460             /**************************
461              * CALCULATE INTERACTIONS *
462              **************************/
463
464             r22              = _mm256_mul_ps(rsq22,rinv22);
465
466             /* EWALD ELECTROSTATICS */
467             
468             /* Analytical PME correction */
469             zeta2            = _mm256_mul_ps(beta2,rsq22);
470             rinv3            = _mm256_mul_ps(rinvsq22,rinv22);
471             pmecorrF         = avx256_pmecorrF_f(zeta2);
472             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
473             felec            = _mm256_mul_ps(qq22,felec);
474             pmecorrV         = avx256_pmecorrV_f(zeta2);
475             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
476             velec            = _mm256_sub_ps(rinv22,pmecorrV);
477             velec            = _mm256_mul_ps(qq22,velec);
478             
479             /* Update potential sum for this i atom from the interaction with this j atom. */
480             velecsum         = _mm256_add_ps(velecsum,velec);
481
482             fscal            = felec;
483
484             /* Calculate temporary vectorial force */
485             tx               = _mm256_mul_ps(fscal,dx22);
486             ty               = _mm256_mul_ps(fscal,dy22);
487             tz               = _mm256_mul_ps(fscal,dz22);
488
489             /* Update vectorial force */
490             fix2             = _mm256_add_ps(fix2,tx);
491             fiy2             = _mm256_add_ps(fiy2,ty);
492             fiz2             = _mm256_add_ps(fiz2,tz);
493
494             fjx2             = _mm256_add_ps(fjx2,tx);
495             fjy2             = _mm256_add_ps(fjy2,ty);
496             fjz2             = _mm256_add_ps(fjz2,tz);
497
498             /**************************
499              * CALCULATE INTERACTIONS *
500              **************************/
501
502             r23              = _mm256_mul_ps(rsq23,rinv23);
503
504             /* EWALD ELECTROSTATICS */
505             
506             /* Analytical PME correction */
507             zeta2            = _mm256_mul_ps(beta2,rsq23);
508             rinv3            = _mm256_mul_ps(rinvsq23,rinv23);
509             pmecorrF         = avx256_pmecorrF_f(zeta2);
510             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
511             felec            = _mm256_mul_ps(qq23,felec);
512             pmecorrV         = avx256_pmecorrV_f(zeta2);
513             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
514             velec            = _mm256_sub_ps(rinv23,pmecorrV);
515             velec            = _mm256_mul_ps(qq23,velec);
516             
517             /* Update potential sum for this i atom from the interaction with this j atom. */
518             velecsum         = _mm256_add_ps(velecsum,velec);
519
520             fscal            = felec;
521
522             /* Calculate temporary vectorial force */
523             tx               = _mm256_mul_ps(fscal,dx23);
524             ty               = _mm256_mul_ps(fscal,dy23);
525             tz               = _mm256_mul_ps(fscal,dz23);
526
527             /* Update vectorial force */
528             fix2             = _mm256_add_ps(fix2,tx);
529             fiy2             = _mm256_add_ps(fiy2,ty);
530             fiz2             = _mm256_add_ps(fiz2,tz);
531
532             fjx3             = _mm256_add_ps(fjx3,tx);
533             fjy3             = _mm256_add_ps(fjy3,ty);
534             fjz3             = _mm256_add_ps(fjz3,tz);
535
536             /**************************
537              * CALCULATE INTERACTIONS *
538              **************************/
539
540             r31              = _mm256_mul_ps(rsq31,rinv31);
541
542             /* EWALD ELECTROSTATICS */
543             
544             /* Analytical PME correction */
545             zeta2            = _mm256_mul_ps(beta2,rsq31);
546             rinv3            = _mm256_mul_ps(rinvsq31,rinv31);
547             pmecorrF         = avx256_pmecorrF_f(zeta2);
548             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
549             felec            = _mm256_mul_ps(qq31,felec);
550             pmecorrV         = avx256_pmecorrV_f(zeta2);
551             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
552             velec            = _mm256_sub_ps(rinv31,pmecorrV);
553             velec            = _mm256_mul_ps(qq31,velec);
554             
555             /* Update potential sum for this i atom from the interaction with this j atom. */
556             velecsum         = _mm256_add_ps(velecsum,velec);
557
558             fscal            = felec;
559
560             /* Calculate temporary vectorial force */
561             tx               = _mm256_mul_ps(fscal,dx31);
562             ty               = _mm256_mul_ps(fscal,dy31);
563             tz               = _mm256_mul_ps(fscal,dz31);
564
565             /* Update vectorial force */
566             fix3             = _mm256_add_ps(fix3,tx);
567             fiy3             = _mm256_add_ps(fiy3,ty);
568             fiz3             = _mm256_add_ps(fiz3,tz);
569
570             fjx1             = _mm256_add_ps(fjx1,tx);
571             fjy1             = _mm256_add_ps(fjy1,ty);
572             fjz1             = _mm256_add_ps(fjz1,tz);
573
574             /**************************
575              * CALCULATE INTERACTIONS *
576              **************************/
577
578             r32              = _mm256_mul_ps(rsq32,rinv32);
579
580             /* EWALD ELECTROSTATICS */
581             
582             /* Analytical PME correction */
583             zeta2            = _mm256_mul_ps(beta2,rsq32);
584             rinv3            = _mm256_mul_ps(rinvsq32,rinv32);
585             pmecorrF         = avx256_pmecorrF_f(zeta2);
586             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
587             felec            = _mm256_mul_ps(qq32,felec);
588             pmecorrV         = avx256_pmecorrV_f(zeta2);
589             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
590             velec            = _mm256_sub_ps(rinv32,pmecorrV);
591             velec            = _mm256_mul_ps(qq32,velec);
592             
593             /* Update potential sum for this i atom from the interaction with this j atom. */
594             velecsum         = _mm256_add_ps(velecsum,velec);
595
596             fscal            = felec;
597
598             /* Calculate temporary vectorial force */
599             tx               = _mm256_mul_ps(fscal,dx32);
600             ty               = _mm256_mul_ps(fscal,dy32);
601             tz               = _mm256_mul_ps(fscal,dz32);
602
603             /* Update vectorial force */
604             fix3             = _mm256_add_ps(fix3,tx);
605             fiy3             = _mm256_add_ps(fiy3,ty);
606             fiz3             = _mm256_add_ps(fiz3,tz);
607
608             fjx2             = _mm256_add_ps(fjx2,tx);
609             fjy2             = _mm256_add_ps(fjy2,ty);
610             fjz2             = _mm256_add_ps(fjz2,tz);
611
612             /**************************
613              * CALCULATE INTERACTIONS *
614              **************************/
615
616             r33              = _mm256_mul_ps(rsq33,rinv33);
617
618             /* EWALD ELECTROSTATICS */
619             
620             /* Analytical PME correction */
621             zeta2            = _mm256_mul_ps(beta2,rsq33);
622             rinv3            = _mm256_mul_ps(rinvsq33,rinv33);
623             pmecorrF         = avx256_pmecorrF_f(zeta2);
624             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
625             felec            = _mm256_mul_ps(qq33,felec);
626             pmecorrV         = avx256_pmecorrV_f(zeta2);
627             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
628             velec            = _mm256_sub_ps(rinv33,pmecorrV);
629             velec            = _mm256_mul_ps(qq33,velec);
630             
631             /* Update potential sum for this i atom from the interaction with this j atom. */
632             velecsum         = _mm256_add_ps(velecsum,velec);
633
634             fscal            = felec;
635
636             /* Calculate temporary vectorial force */
637             tx               = _mm256_mul_ps(fscal,dx33);
638             ty               = _mm256_mul_ps(fscal,dy33);
639             tz               = _mm256_mul_ps(fscal,dz33);
640
641             /* Update vectorial force */
642             fix3             = _mm256_add_ps(fix3,tx);
643             fiy3             = _mm256_add_ps(fiy3,ty);
644             fiz3             = _mm256_add_ps(fiz3,tz);
645
646             fjx3             = _mm256_add_ps(fjx3,tx);
647             fjy3             = _mm256_add_ps(fjy3,ty);
648             fjz3             = _mm256_add_ps(fjz3,tz);
649
650             fjptrA             = f+j_coord_offsetA;
651             fjptrB             = f+j_coord_offsetB;
652             fjptrC             = f+j_coord_offsetC;
653             fjptrD             = f+j_coord_offsetD;
654             fjptrE             = f+j_coord_offsetE;
655             fjptrF             = f+j_coord_offsetF;
656             fjptrG             = f+j_coord_offsetG;
657             fjptrH             = f+j_coord_offsetH;
658
659             gmx_mm256_decrement_3rvec_8ptr_swizzle_ps(fjptrA+DIM,fjptrB+DIM,fjptrC+DIM,fjptrD+DIM,
660                                                       fjptrE+DIM,fjptrF+DIM,fjptrG+DIM,fjptrH+DIM,
661                                                       fjx1,fjy1,fjz1,fjx2,fjy2,fjz2,fjx3,fjy3,fjz3);
662
663             /* Inner loop uses 756 flops */
664         }
665
666         if(jidx<j_index_end)
667         {
668
669             /* Get j neighbor index, and coordinate index */
670             jnrlistA         = jjnr[jidx];
671             jnrlistB         = jjnr[jidx+1];
672             jnrlistC         = jjnr[jidx+2];
673             jnrlistD         = jjnr[jidx+3];
674             jnrlistE         = jjnr[jidx+4];
675             jnrlistF         = jjnr[jidx+5];
676             jnrlistG         = jjnr[jidx+6];
677             jnrlistH         = jjnr[jidx+7];
678             /* Sign of each element will be negative for non-real atoms.
679              * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
680              * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
681              */
682             dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
683                                             gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
684                                             
685             jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
686             jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
687             jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
688             jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
689             jnrE       = (jnrlistE>=0) ? jnrlistE : 0;
690             jnrF       = (jnrlistF>=0) ? jnrlistF : 0;
691             jnrG       = (jnrlistG>=0) ? jnrlistG : 0;
692             jnrH       = (jnrlistH>=0) ? jnrlistH : 0;
693             j_coord_offsetA  = DIM*jnrA;
694             j_coord_offsetB  = DIM*jnrB;
695             j_coord_offsetC  = DIM*jnrC;
696             j_coord_offsetD  = DIM*jnrD;
697             j_coord_offsetE  = DIM*jnrE;
698             j_coord_offsetF  = DIM*jnrF;
699             j_coord_offsetG  = DIM*jnrG;
700             j_coord_offsetH  = DIM*jnrH;
701
702             /* load j atom coordinates */
703             gmx_mm256_load_3rvec_8ptr_swizzle_ps(x+j_coord_offsetA+DIM,x+j_coord_offsetB+DIM,
704                                                  x+j_coord_offsetC+DIM,x+j_coord_offsetD+DIM,
705                                                  x+j_coord_offsetE+DIM,x+j_coord_offsetF+DIM,
706                                                  x+j_coord_offsetG+DIM,x+j_coord_offsetH+DIM,
707                                                  &jx1,&jy1,&jz1,&jx2,&jy2,&jz2,&jx3,&jy3,&jz3);
708
709             /* Calculate displacement vector */
710             dx11             = _mm256_sub_ps(ix1,jx1);
711             dy11             = _mm256_sub_ps(iy1,jy1);
712             dz11             = _mm256_sub_ps(iz1,jz1);
713             dx12             = _mm256_sub_ps(ix1,jx2);
714             dy12             = _mm256_sub_ps(iy1,jy2);
715             dz12             = _mm256_sub_ps(iz1,jz2);
716             dx13             = _mm256_sub_ps(ix1,jx3);
717             dy13             = _mm256_sub_ps(iy1,jy3);
718             dz13             = _mm256_sub_ps(iz1,jz3);
719             dx21             = _mm256_sub_ps(ix2,jx1);
720             dy21             = _mm256_sub_ps(iy2,jy1);
721             dz21             = _mm256_sub_ps(iz2,jz1);
722             dx22             = _mm256_sub_ps(ix2,jx2);
723             dy22             = _mm256_sub_ps(iy2,jy2);
724             dz22             = _mm256_sub_ps(iz2,jz2);
725             dx23             = _mm256_sub_ps(ix2,jx3);
726             dy23             = _mm256_sub_ps(iy2,jy3);
727             dz23             = _mm256_sub_ps(iz2,jz3);
728             dx31             = _mm256_sub_ps(ix3,jx1);
729             dy31             = _mm256_sub_ps(iy3,jy1);
730             dz31             = _mm256_sub_ps(iz3,jz1);
731             dx32             = _mm256_sub_ps(ix3,jx2);
732             dy32             = _mm256_sub_ps(iy3,jy2);
733             dz32             = _mm256_sub_ps(iz3,jz2);
734             dx33             = _mm256_sub_ps(ix3,jx3);
735             dy33             = _mm256_sub_ps(iy3,jy3);
736             dz33             = _mm256_sub_ps(iz3,jz3);
737
738             /* Calculate squared distance and things based on it */
739             rsq11            = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
740             rsq12            = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
741             rsq13            = gmx_mm256_calc_rsq_ps(dx13,dy13,dz13);
742             rsq21            = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
743             rsq22            = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
744             rsq23            = gmx_mm256_calc_rsq_ps(dx23,dy23,dz23);
745             rsq31            = gmx_mm256_calc_rsq_ps(dx31,dy31,dz31);
746             rsq32            = gmx_mm256_calc_rsq_ps(dx32,dy32,dz32);
747             rsq33            = gmx_mm256_calc_rsq_ps(dx33,dy33,dz33);
748
749             rinv11           = avx256_invsqrt_f(rsq11);
750             rinv12           = avx256_invsqrt_f(rsq12);
751             rinv13           = avx256_invsqrt_f(rsq13);
752             rinv21           = avx256_invsqrt_f(rsq21);
753             rinv22           = avx256_invsqrt_f(rsq22);
754             rinv23           = avx256_invsqrt_f(rsq23);
755             rinv31           = avx256_invsqrt_f(rsq31);
756             rinv32           = avx256_invsqrt_f(rsq32);
757             rinv33           = avx256_invsqrt_f(rsq33);
758
759             rinvsq11         = _mm256_mul_ps(rinv11,rinv11);
760             rinvsq12         = _mm256_mul_ps(rinv12,rinv12);
761             rinvsq13         = _mm256_mul_ps(rinv13,rinv13);
762             rinvsq21         = _mm256_mul_ps(rinv21,rinv21);
763             rinvsq22         = _mm256_mul_ps(rinv22,rinv22);
764             rinvsq23         = _mm256_mul_ps(rinv23,rinv23);
765             rinvsq31         = _mm256_mul_ps(rinv31,rinv31);
766             rinvsq32         = _mm256_mul_ps(rinv32,rinv32);
767             rinvsq33         = _mm256_mul_ps(rinv33,rinv33);
768
769             fjx1             = _mm256_setzero_ps();
770             fjy1             = _mm256_setzero_ps();
771             fjz1             = _mm256_setzero_ps();
772             fjx2             = _mm256_setzero_ps();
773             fjy2             = _mm256_setzero_ps();
774             fjz2             = _mm256_setzero_ps();
775             fjx3             = _mm256_setzero_ps();
776             fjy3             = _mm256_setzero_ps();
777             fjz3             = _mm256_setzero_ps();
778
779             /**************************
780              * CALCULATE INTERACTIONS *
781              **************************/
782
783             r11              = _mm256_mul_ps(rsq11,rinv11);
784             r11              = _mm256_andnot_ps(dummy_mask,r11);
785
786             /* EWALD ELECTROSTATICS */
787             
788             /* Analytical PME correction */
789             zeta2            = _mm256_mul_ps(beta2,rsq11);
790             rinv3            = _mm256_mul_ps(rinvsq11,rinv11);
791             pmecorrF         = avx256_pmecorrF_f(zeta2);
792             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
793             felec            = _mm256_mul_ps(qq11,felec);
794             pmecorrV         = avx256_pmecorrV_f(zeta2);
795             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
796             velec            = _mm256_sub_ps(rinv11,pmecorrV);
797             velec            = _mm256_mul_ps(qq11,velec);
798             
799             /* Update potential sum for this i atom from the interaction with this j atom. */
800             velec            = _mm256_andnot_ps(dummy_mask,velec);
801             velecsum         = _mm256_add_ps(velecsum,velec);
802
803             fscal            = felec;
804
805             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
806
807             /* Calculate temporary vectorial force */
808             tx               = _mm256_mul_ps(fscal,dx11);
809             ty               = _mm256_mul_ps(fscal,dy11);
810             tz               = _mm256_mul_ps(fscal,dz11);
811
812             /* Update vectorial force */
813             fix1             = _mm256_add_ps(fix1,tx);
814             fiy1             = _mm256_add_ps(fiy1,ty);
815             fiz1             = _mm256_add_ps(fiz1,tz);
816
817             fjx1             = _mm256_add_ps(fjx1,tx);
818             fjy1             = _mm256_add_ps(fjy1,ty);
819             fjz1             = _mm256_add_ps(fjz1,tz);
820
821             /**************************
822              * CALCULATE INTERACTIONS *
823              **************************/
824
825             r12              = _mm256_mul_ps(rsq12,rinv12);
826             r12              = _mm256_andnot_ps(dummy_mask,r12);
827
828             /* EWALD ELECTROSTATICS */
829             
830             /* Analytical PME correction */
831             zeta2            = _mm256_mul_ps(beta2,rsq12);
832             rinv3            = _mm256_mul_ps(rinvsq12,rinv12);
833             pmecorrF         = avx256_pmecorrF_f(zeta2);
834             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
835             felec            = _mm256_mul_ps(qq12,felec);
836             pmecorrV         = avx256_pmecorrV_f(zeta2);
837             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
838             velec            = _mm256_sub_ps(rinv12,pmecorrV);
839             velec            = _mm256_mul_ps(qq12,velec);
840             
841             /* Update potential sum for this i atom from the interaction with this j atom. */
842             velec            = _mm256_andnot_ps(dummy_mask,velec);
843             velecsum         = _mm256_add_ps(velecsum,velec);
844
845             fscal            = felec;
846
847             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
848
849             /* Calculate temporary vectorial force */
850             tx               = _mm256_mul_ps(fscal,dx12);
851             ty               = _mm256_mul_ps(fscal,dy12);
852             tz               = _mm256_mul_ps(fscal,dz12);
853
854             /* Update vectorial force */
855             fix1             = _mm256_add_ps(fix1,tx);
856             fiy1             = _mm256_add_ps(fiy1,ty);
857             fiz1             = _mm256_add_ps(fiz1,tz);
858
859             fjx2             = _mm256_add_ps(fjx2,tx);
860             fjy2             = _mm256_add_ps(fjy2,ty);
861             fjz2             = _mm256_add_ps(fjz2,tz);
862
863             /**************************
864              * CALCULATE INTERACTIONS *
865              **************************/
866
867             r13              = _mm256_mul_ps(rsq13,rinv13);
868             r13              = _mm256_andnot_ps(dummy_mask,r13);
869
870             /* EWALD ELECTROSTATICS */
871             
872             /* Analytical PME correction */
873             zeta2            = _mm256_mul_ps(beta2,rsq13);
874             rinv3            = _mm256_mul_ps(rinvsq13,rinv13);
875             pmecorrF         = avx256_pmecorrF_f(zeta2);
876             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
877             felec            = _mm256_mul_ps(qq13,felec);
878             pmecorrV         = avx256_pmecorrV_f(zeta2);
879             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
880             velec            = _mm256_sub_ps(rinv13,pmecorrV);
881             velec            = _mm256_mul_ps(qq13,velec);
882             
883             /* Update potential sum for this i atom from the interaction with this j atom. */
884             velec            = _mm256_andnot_ps(dummy_mask,velec);
885             velecsum         = _mm256_add_ps(velecsum,velec);
886
887             fscal            = felec;
888
889             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
890
891             /* Calculate temporary vectorial force */
892             tx               = _mm256_mul_ps(fscal,dx13);
893             ty               = _mm256_mul_ps(fscal,dy13);
894             tz               = _mm256_mul_ps(fscal,dz13);
895
896             /* Update vectorial force */
897             fix1             = _mm256_add_ps(fix1,tx);
898             fiy1             = _mm256_add_ps(fiy1,ty);
899             fiz1             = _mm256_add_ps(fiz1,tz);
900
901             fjx3             = _mm256_add_ps(fjx3,tx);
902             fjy3             = _mm256_add_ps(fjy3,ty);
903             fjz3             = _mm256_add_ps(fjz3,tz);
904
905             /**************************
906              * CALCULATE INTERACTIONS *
907              **************************/
908
909             r21              = _mm256_mul_ps(rsq21,rinv21);
910             r21              = _mm256_andnot_ps(dummy_mask,r21);
911
912             /* EWALD ELECTROSTATICS */
913             
914             /* Analytical PME correction */
915             zeta2            = _mm256_mul_ps(beta2,rsq21);
916             rinv3            = _mm256_mul_ps(rinvsq21,rinv21);
917             pmecorrF         = avx256_pmecorrF_f(zeta2);
918             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
919             felec            = _mm256_mul_ps(qq21,felec);
920             pmecorrV         = avx256_pmecorrV_f(zeta2);
921             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
922             velec            = _mm256_sub_ps(rinv21,pmecorrV);
923             velec            = _mm256_mul_ps(qq21,velec);
924             
925             /* Update potential sum for this i atom from the interaction with this j atom. */
926             velec            = _mm256_andnot_ps(dummy_mask,velec);
927             velecsum         = _mm256_add_ps(velecsum,velec);
928
929             fscal            = felec;
930
931             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
932
933             /* Calculate temporary vectorial force */
934             tx               = _mm256_mul_ps(fscal,dx21);
935             ty               = _mm256_mul_ps(fscal,dy21);
936             tz               = _mm256_mul_ps(fscal,dz21);
937
938             /* Update vectorial force */
939             fix2             = _mm256_add_ps(fix2,tx);
940             fiy2             = _mm256_add_ps(fiy2,ty);
941             fiz2             = _mm256_add_ps(fiz2,tz);
942
943             fjx1             = _mm256_add_ps(fjx1,tx);
944             fjy1             = _mm256_add_ps(fjy1,ty);
945             fjz1             = _mm256_add_ps(fjz1,tz);
946
947             /**************************
948              * CALCULATE INTERACTIONS *
949              **************************/
950
951             r22              = _mm256_mul_ps(rsq22,rinv22);
952             r22              = _mm256_andnot_ps(dummy_mask,r22);
953
954             /* EWALD ELECTROSTATICS */
955             
956             /* Analytical PME correction */
957             zeta2            = _mm256_mul_ps(beta2,rsq22);
958             rinv3            = _mm256_mul_ps(rinvsq22,rinv22);
959             pmecorrF         = avx256_pmecorrF_f(zeta2);
960             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
961             felec            = _mm256_mul_ps(qq22,felec);
962             pmecorrV         = avx256_pmecorrV_f(zeta2);
963             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
964             velec            = _mm256_sub_ps(rinv22,pmecorrV);
965             velec            = _mm256_mul_ps(qq22,velec);
966             
967             /* Update potential sum for this i atom from the interaction with this j atom. */
968             velec            = _mm256_andnot_ps(dummy_mask,velec);
969             velecsum         = _mm256_add_ps(velecsum,velec);
970
971             fscal            = felec;
972
973             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
974
975             /* Calculate temporary vectorial force */
976             tx               = _mm256_mul_ps(fscal,dx22);
977             ty               = _mm256_mul_ps(fscal,dy22);
978             tz               = _mm256_mul_ps(fscal,dz22);
979
980             /* Update vectorial force */
981             fix2             = _mm256_add_ps(fix2,tx);
982             fiy2             = _mm256_add_ps(fiy2,ty);
983             fiz2             = _mm256_add_ps(fiz2,tz);
984
985             fjx2             = _mm256_add_ps(fjx2,tx);
986             fjy2             = _mm256_add_ps(fjy2,ty);
987             fjz2             = _mm256_add_ps(fjz2,tz);
988
989             /**************************
990              * CALCULATE INTERACTIONS *
991              **************************/
992
993             r23              = _mm256_mul_ps(rsq23,rinv23);
994             r23              = _mm256_andnot_ps(dummy_mask,r23);
995
996             /* EWALD ELECTROSTATICS */
997             
998             /* Analytical PME correction */
999             zeta2            = _mm256_mul_ps(beta2,rsq23);
1000             rinv3            = _mm256_mul_ps(rinvsq23,rinv23);
1001             pmecorrF         = avx256_pmecorrF_f(zeta2);
1002             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1003             felec            = _mm256_mul_ps(qq23,felec);
1004             pmecorrV         = avx256_pmecorrV_f(zeta2);
1005             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
1006             velec            = _mm256_sub_ps(rinv23,pmecorrV);
1007             velec            = _mm256_mul_ps(qq23,velec);
1008             
1009             /* Update potential sum for this i atom from the interaction with this j atom. */
1010             velec            = _mm256_andnot_ps(dummy_mask,velec);
1011             velecsum         = _mm256_add_ps(velecsum,velec);
1012
1013             fscal            = felec;
1014
1015             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
1016
1017             /* Calculate temporary vectorial force */
1018             tx               = _mm256_mul_ps(fscal,dx23);
1019             ty               = _mm256_mul_ps(fscal,dy23);
1020             tz               = _mm256_mul_ps(fscal,dz23);
1021
1022             /* Update vectorial force */
1023             fix2             = _mm256_add_ps(fix2,tx);
1024             fiy2             = _mm256_add_ps(fiy2,ty);
1025             fiz2             = _mm256_add_ps(fiz2,tz);
1026
1027             fjx3             = _mm256_add_ps(fjx3,tx);
1028             fjy3             = _mm256_add_ps(fjy3,ty);
1029             fjz3             = _mm256_add_ps(fjz3,tz);
1030
1031             /**************************
1032              * CALCULATE INTERACTIONS *
1033              **************************/
1034
1035             r31              = _mm256_mul_ps(rsq31,rinv31);
1036             r31              = _mm256_andnot_ps(dummy_mask,r31);
1037
1038             /* EWALD ELECTROSTATICS */
1039             
1040             /* Analytical PME correction */
1041             zeta2            = _mm256_mul_ps(beta2,rsq31);
1042             rinv3            = _mm256_mul_ps(rinvsq31,rinv31);
1043             pmecorrF         = avx256_pmecorrF_f(zeta2);
1044             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1045             felec            = _mm256_mul_ps(qq31,felec);
1046             pmecorrV         = avx256_pmecorrV_f(zeta2);
1047             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
1048             velec            = _mm256_sub_ps(rinv31,pmecorrV);
1049             velec            = _mm256_mul_ps(qq31,velec);
1050             
1051             /* Update potential sum for this i atom from the interaction with this j atom. */
1052             velec            = _mm256_andnot_ps(dummy_mask,velec);
1053             velecsum         = _mm256_add_ps(velecsum,velec);
1054
1055             fscal            = felec;
1056
1057             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
1058
1059             /* Calculate temporary vectorial force */
1060             tx               = _mm256_mul_ps(fscal,dx31);
1061             ty               = _mm256_mul_ps(fscal,dy31);
1062             tz               = _mm256_mul_ps(fscal,dz31);
1063
1064             /* Update vectorial force */
1065             fix3             = _mm256_add_ps(fix3,tx);
1066             fiy3             = _mm256_add_ps(fiy3,ty);
1067             fiz3             = _mm256_add_ps(fiz3,tz);
1068
1069             fjx1             = _mm256_add_ps(fjx1,tx);
1070             fjy1             = _mm256_add_ps(fjy1,ty);
1071             fjz1             = _mm256_add_ps(fjz1,tz);
1072
1073             /**************************
1074              * CALCULATE INTERACTIONS *
1075              **************************/
1076
1077             r32              = _mm256_mul_ps(rsq32,rinv32);
1078             r32              = _mm256_andnot_ps(dummy_mask,r32);
1079
1080             /* EWALD ELECTROSTATICS */
1081             
1082             /* Analytical PME correction */
1083             zeta2            = _mm256_mul_ps(beta2,rsq32);
1084             rinv3            = _mm256_mul_ps(rinvsq32,rinv32);
1085             pmecorrF         = avx256_pmecorrF_f(zeta2);
1086             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1087             felec            = _mm256_mul_ps(qq32,felec);
1088             pmecorrV         = avx256_pmecorrV_f(zeta2);
1089             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
1090             velec            = _mm256_sub_ps(rinv32,pmecorrV);
1091             velec            = _mm256_mul_ps(qq32,velec);
1092             
1093             /* Update potential sum for this i atom from the interaction with this j atom. */
1094             velec            = _mm256_andnot_ps(dummy_mask,velec);
1095             velecsum         = _mm256_add_ps(velecsum,velec);
1096
1097             fscal            = felec;
1098
1099             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
1100
1101             /* Calculate temporary vectorial force */
1102             tx               = _mm256_mul_ps(fscal,dx32);
1103             ty               = _mm256_mul_ps(fscal,dy32);
1104             tz               = _mm256_mul_ps(fscal,dz32);
1105
1106             /* Update vectorial force */
1107             fix3             = _mm256_add_ps(fix3,tx);
1108             fiy3             = _mm256_add_ps(fiy3,ty);
1109             fiz3             = _mm256_add_ps(fiz3,tz);
1110
1111             fjx2             = _mm256_add_ps(fjx2,tx);
1112             fjy2             = _mm256_add_ps(fjy2,ty);
1113             fjz2             = _mm256_add_ps(fjz2,tz);
1114
1115             /**************************
1116              * CALCULATE INTERACTIONS *
1117              **************************/
1118
1119             r33              = _mm256_mul_ps(rsq33,rinv33);
1120             r33              = _mm256_andnot_ps(dummy_mask,r33);
1121
1122             /* EWALD ELECTROSTATICS */
1123             
1124             /* Analytical PME correction */
1125             zeta2            = _mm256_mul_ps(beta2,rsq33);
1126             rinv3            = _mm256_mul_ps(rinvsq33,rinv33);
1127             pmecorrF         = avx256_pmecorrF_f(zeta2);
1128             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1129             felec            = _mm256_mul_ps(qq33,felec);
1130             pmecorrV         = avx256_pmecorrV_f(zeta2);
1131             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
1132             velec            = _mm256_sub_ps(rinv33,pmecorrV);
1133             velec            = _mm256_mul_ps(qq33,velec);
1134             
1135             /* Update potential sum for this i atom from the interaction with this j atom. */
1136             velec            = _mm256_andnot_ps(dummy_mask,velec);
1137             velecsum         = _mm256_add_ps(velecsum,velec);
1138
1139             fscal            = felec;
1140
1141             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
1142
1143             /* Calculate temporary vectorial force */
1144             tx               = _mm256_mul_ps(fscal,dx33);
1145             ty               = _mm256_mul_ps(fscal,dy33);
1146             tz               = _mm256_mul_ps(fscal,dz33);
1147
1148             /* Update vectorial force */
1149             fix3             = _mm256_add_ps(fix3,tx);
1150             fiy3             = _mm256_add_ps(fiy3,ty);
1151             fiz3             = _mm256_add_ps(fiz3,tz);
1152
1153             fjx3             = _mm256_add_ps(fjx3,tx);
1154             fjy3             = _mm256_add_ps(fjy3,ty);
1155             fjz3             = _mm256_add_ps(fjz3,tz);
1156
1157             fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
1158             fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
1159             fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
1160             fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
1161             fjptrE             = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
1162             fjptrF             = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
1163             fjptrG             = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
1164             fjptrH             = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
1165
1166             gmx_mm256_decrement_3rvec_8ptr_swizzle_ps(fjptrA+DIM,fjptrB+DIM,fjptrC+DIM,fjptrD+DIM,
1167                                                       fjptrE+DIM,fjptrF+DIM,fjptrG+DIM,fjptrH+DIM,
1168                                                       fjx1,fjy1,fjz1,fjx2,fjy2,fjz2,fjx3,fjy3,fjz3);
1169
1170             /* Inner loop uses 765 flops */
1171         }
1172
1173         /* End of innermost loop */
1174
1175         gmx_mm256_update_iforce_3atom_swizzle_ps(fix1,fiy1,fiz1,fix2,fiy2,fiz2,fix3,fiy3,fiz3,
1176                                                  f+i_coord_offset+DIM,fshift+i_shift_offset);
1177
1178         ggid                        = gid[iidx];
1179         /* Update potential energies */
1180         gmx_mm256_update_1pot_ps(velecsum,kernel_data->energygrp_elec+ggid);
1181
1182         /* Increment number of inner iterations */
1183         inneriter                  += j_index_end - j_index_start;
1184
1185         /* Outer loop uses 19 flops */
1186     }
1187
1188     /* Increment number of outer iterations */
1189     outeriter        += nri;
1190
1191     /* Update outer/inner flops */
1192
1193     inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_W4W4_VF,outeriter*19 + inneriter*765);
1194 }
1195 /*
1196  * Gromacs nonbonded kernel:   nb_kernel_ElecEw_VdwNone_GeomW4W4_F_avx_256_single
1197  * Electrostatics interaction: Ewald
1198  * VdW interaction:            None
1199  * Geometry:                   Water4-Water4
1200  * Calculate force/pot:        Force
1201  */
1202 void
1203 nb_kernel_ElecEw_VdwNone_GeomW4W4_F_avx_256_single
1204                     (t_nblist                    * gmx_restrict       nlist,
1205                      rvec                        * gmx_restrict          xx,
1206                      rvec                        * gmx_restrict          ff,
1207                      struct t_forcerec           * gmx_restrict          fr,
1208                      t_mdatoms                   * gmx_restrict     mdatoms,
1209                      nb_kernel_data_t gmx_unused * gmx_restrict kernel_data,
1210                      t_nrnb                      * gmx_restrict        nrnb)
1211 {
1212     /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
1213      * just 0 for non-waters.
1214      * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
1215      * jnr indices corresponding to data put in the four positions in the SIMD register.
1216      */
1217     int              i_shift_offset,i_coord_offset,outeriter,inneriter;
1218     int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
1219     int              jnrA,jnrB,jnrC,jnrD;
1220     int              jnrE,jnrF,jnrG,jnrH;
1221     int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
1222     int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
1223     int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
1224     int              j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
1225     int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
1226     real             rcutoff_scalar;
1227     real             *shiftvec,*fshift,*x,*f;
1228     real             *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
1229     real             scratch[4*DIM];
1230     __m256           tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
1231     real *           vdwioffsetptr1;
1232     __m256           ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
1233     real *           vdwioffsetptr2;
1234     __m256           ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
1235     real *           vdwioffsetptr3;
1236     __m256           ix3,iy3,iz3,fix3,fiy3,fiz3,iq3,isai3;
1237     int              vdwjidx1A,vdwjidx1B,vdwjidx1C,vdwjidx1D,vdwjidx1E,vdwjidx1F,vdwjidx1G,vdwjidx1H;
1238     __m256           jx1,jy1,jz1,fjx1,fjy1,fjz1,jq1,isaj1;
1239     int              vdwjidx2A,vdwjidx2B,vdwjidx2C,vdwjidx2D,vdwjidx2E,vdwjidx2F,vdwjidx2G,vdwjidx2H;
1240     __m256           jx2,jy2,jz2,fjx2,fjy2,fjz2,jq2,isaj2;
1241     int              vdwjidx3A,vdwjidx3B,vdwjidx3C,vdwjidx3D,vdwjidx3E,vdwjidx3F,vdwjidx3G,vdwjidx3H;
1242     __m256           jx3,jy3,jz3,fjx3,fjy3,fjz3,jq3,isaj3;
1243     __m256           dx11,dy11,dz11,rsq11,rinv11,rinvsq11,r11,qq11,c6_11,c12_11;
1244     __m256           dx12,dy12,dz12,rsq12,rinv12,rinvsq12,r12,qq12,c6_12,c12_12;
1245     __m256           dx13,dy13,dz13,rsq13,rinv13,rinvsq13,r13,qq13,c6_13,c12_13;
1246     __m256           dx21,dy21,dz21,rsq21,rinv21,rinvsq21,r21,qq21,c6_21,c12_21;
1247     __m256           dx22,dy22,dz22,rsq22,rinv22,rinvsq22,r22,qq22,c6_22,c12_22;
1248     __m256           dx23,dy23,dz23,rsq23,rinv23,rinvsq23,r23,qq23,c6_23,c12_23;
1249     __m256           dx31,dy31,dz31,rsq31,rinv31,rinvsq31,r31,qq31,c6_31,c12_31;
1250     __m256           dx32,dy32,dz32,rsq32,rinv32,rinvsq32,r32,qq32,c6_32,c12_32;
1251     __m256           dx33,dy33,dz33,rsq33,rinv33,rinvsq33,r33,qq33,c6_33,c12_33;
1252     __m256           velec,felec,velecsum,facel,crf,krf,krf2;
1253     real             *charge;
1254     __m256i          ewitab;
1255     __m128i          ewitab_lo,ewitab_hi;
1256     __m256           ewtabscale,eweps,sh_ewald,ewrt,ewtabhalfspace,ewtabF,ewtabFn,ewtabD,ewtabV;
1257     __m256           beta,beta2,beta3,zeta2,pmecorrF,pmecorrV,rinv3;
1258     real             *ewtab;
1259     __m256           dummy_mask,cutoff_mask;
1260     __m256           signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
1261     __m256           one     = _mm256_set1_ps(1.0);
1262     __m256           two     = _mm256_set1_ps(2.0);
1263     x                = xx[0];
1264     f                = ff[0];
1265
1266     nri              = nlist->nri;
1267     iinr             = nlist->iinr;
1268     jindex           = nlist->jindex;
1269     jjnr             = nlist->jjnr;
1270     shiftidx         = nlist->shift;
1271     gid              = nlist->gid;
1272     shiftvec         = fr->shift_vec[0];
1273     fshift           = fr->fshift[0];
1274     facel            = _mm256_set1_ps(fr->ic->epsfac);
1275     charge           = mdatoms->chargeA;
1276
1277     sh_ewald         = _mm256_set1_ps(fr->ic->sh_ewald);
1278     beta             = _mm256_set1_ps(fr->ic->ewaldcoeff_q);
1279     beta2            = _mm256_mul_ps(beta,beta);
1280     beta3            = _mm256_mul_ps(beta,beta2);
1281
1282     ewtab            = fr->ic->tabq_coul_F;
1283     ewtabscale       = _mm256_set1_ps(fr->ic->tabq_scale);
1284     ewtabhalfspace   = _mm256_set1_ps(0.5/fr->ic->tabq_scale);
1285
1286     /* Setup water-specific parameters */
1287     inr              = nlist->iinr[0];
1288     iq1              = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+1]));
1289     iq2              = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+2]));
1290     iq3              = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+3]));
1291
1292     jq1              = _mm256_set1_ps(charge[inr+1]);
1293     jq2              = _mm256_set1_ps(charge[inr+2]);
1294     jq3              = _mm256_set1_ps(charge[inr+3]);
1295     qq11             = _mm256_mul_ps(iq1,jq1);
1296     qq12             = _mm256_mul_ps(iq1,jq2);
1297     qq13             = _mm256_mul_ps(iq1,jq3);
1298     qq21             = _mm256_mul_ps(iq2,jq1);
1299     qq22             = _mm256_mul_ps(iq2,jq2);
1300     qq23             = _mm256_mul_ps(iq2,jq3);
1301     qq31             = _mm256_mul_ps(iq3,jq1);
1302     qq32             = _mm256_mul_ps(iq3,jq2);
1303     qq33             = _mm256_mul_ps(iq3,jq3);
1304
1305     /* Avoid stupid compiler warnings */
1306     jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
1307     j_coord_offsetA = 0;
1308     j_coord_offsetB = 0;
1309     j_coord_offsetC = 0;
1310     j_coord_offsetD = 0;
1311     j_coord_offsetE = 0;
1312     j_coord_offsetF = 0;
1313     j_coord_offsetG = 0;
1314     j_coord_offsetH = 0;
1315
1316     outeriter        = 0;
1317     inneriter        = 0;
1318
1319     for(iidx=0;iidx<4*DIM;iidx++)
1320     {
1321         scratch[iidx] = 0.0;
1322     }
1323
1324     /* Start outer loop over neighborlists */
1325     for(iidx=0; iidx<nri; iidx++)
1326     {
1327         /* Load shift vector for this list */
1328         i_shift_offset   = DIM*shiftidx[iidx];
1329
1330         /* Load limits for loop over neighbors */
1331         j_index_start    = jindex[iidx];
1332         j_index_end      = jindex[iidx+1];
1333
1334         /* Get outer coordinate index */
1335         inr              = iinr[iidx];
1336         i_coord_offset   = DIM*inr;
1337
1338         /* Load i particle coords and add shift vector */
1339         gmx_mm256_load_shift_and_3rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset+DIM,
1340                                                     &ix1,&iy1,&iz1,&ix2,&iy2,&iz2,&ix3,&iy3,&iz3);
1341
1342         fix1             = _mm256_setzero_ps();
1343         fiy1             = _mm256_setzero_ps();
1344         fiz1             = _mm256_setzero_ps();
1345         fix2             = _mm256_setzero_ps();
1346         fiy2             = _mm256_setzero_ps();
1347         fiz2             = _mm256_setzero_ps();
1348         fix3             = _mm256_setzero_ps();
1349         fiy3             = _mm256_setzero_ps();
1350         fiz3             = _mm256_setzero_ps();
1351
1352         /* Start inner kernel loop */
1353         for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
1354         {
1355
1356             /* Get j neighbor index, and coordinate index */
1357             jnrA             = jjnr[jidx];
1358             jnrB             = jjnr[jidx+1];
1359             jnrC             = jjnr[jidx+2];
1360             jnrD             = jjnr[jidx+3];
1361             jnrE             = jjnr[jidx+4];
1362             jnrF             = jjnr[jidx+5];
1363             jnrG             = jjnr[jidx+6];
1364             jnrH             = jjnr[jidx+7];
1365             j_coord_offsetA  = DIM*jnrA;
1366             j_coord_offsetB  = DIM*jnrB;
1367             j_coord_offsetC  = DIM*jnrC;
1368             j_coord_offsetD  = DIM*jnrD;
1369             j_coord_offsetE  = DIM*jnrE;
1370             j_coord_offsetF  = DIM*jnrF;
1371             j_coord_offsetG  = DIM*jnrG;
1372             j_coord_offsetH  = DIM*jnrH;
1373
1374             /* load j atom coordinates */
1375             gmx_mm256_load_3rvec_8ptr_swizzle_ps(x+j_coord_offsetA+DIM,x+j_coord_offsetB+DIM,
1376                                                  x+j_coord_offsetC+DIM,x+j_coord_offsetD+DIM,
1377                                                  x+j_coord_offsetE+DIM,x+j_coord_offsetF+DIM,
1378                                                  x+j_coord_offsetG+DIM,x+j_coord_offsetH+DIM,
1379                                                  &jx1,&jy1,&jz1,&jx2,&jy2,&jz2,&jx3,&jy3,&jz3);
1380
1381             /* Calculate displacement vector */
1382             dx11             = _mm256_sub_ps(ix1,jx1);
1383             dy11             = _mm256_sub_ps(iy1,jy1);
1384             dz11             = _mm256_sub_ps(iz1,jz1);
1385             dx12             = _mm256_sub_ps(ix1,jx2);
1386             dy12             = _mm256_sub_ps(iy1,jy2);
1387             dz12             = _mm256_sub_ps(iz1,jz2);
1388             dx13             = _mm256_sub_ps(ix1,jx3);
1389             dy13             = _mm256_sub_ps(iy1,jy3);
1390             dz13             = _mm256_sub_ps(iz1,jz3);
1391             dx21             = _mm256_sub_ps(ix2,jx1);
1392             dy21             = _mm256_sub_ps(iy2,jy1);
1393             dz21             = _mm256_sub_ps(iz2,jz1);
1394             dx22             = _mm256_sub_ps(ix2,jx2);
1395             dy22             = _mm256_sub_ps(iy2,jy2);
1396             dz22             = _mm256_sub_ps(iz2,jz2);
1397             dx23             = _mm256_sub_ps(ix2,jx3);
1398             dy23             = _mm256_sub_ps(iy2,jy3);
1399             dz23             = _mm256_sub_ps(iz2,jz3);
1400             dx31             = _mm256_sub_ps(ix3,jx1);
1401             dy31             = _mm256_sub_ps(iy3,jy1);
1402             dz31             = _mm256_sub_ps(iz3,jz1);
1403             dx32             = _mm256_sub_ps(ix3,jx2);
1404             dy32             = _mm256_sub_ps(iy3,jy2);
1405             dz32             = _mm256_sub_ps(iz3,jz2);
1406             dx33             = _mm256_sub_ps(ix3,jx3);
1407             dy33             = _mm256_sub_ps(iy3,jy3);
1408             dz33             = _mm256_sub_ps(iz3,jz3);
1409
1410             /* Calculate squared distance and things based on it */
1411             rsq11            = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
1412             rsq12            = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
1413             rsq13            = gmx_mm256_calc_rsq_ps(dx13,dy13,dz13);
1414             rsq21            = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
1415             rsq22            = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
1416             rsq23            = gmx_mm256_calc_rsq_ps(dx23,dy23,dz23);
1417             rsq31            = gmx_mm256_calc_rsq_ps(dx31,dy31,dz31);
1418             rsq32            = gmx_mm256_calc_rsq_ps(dx32,dy32,dz32);
1419             rsq33            = gmx_mm256_calc_rsq_ps(dx33,dy33,dz33);
1420
1421             rinv11           = avx256_invsqrt_f(rsq11);
1422             rinv12           = avx256_invsqrt_f(rsq12);
1423             rinv13           = avx256_invsqrt_f(rsq13);
1424             rinv21           = avx256_invsqrt_f(rsq21);
1425             rinv22           = avx256_invsqrt_f(rsq22);
1426             rinv23           = avx256_invsqrt_f(rsq23);
1427             rinv31           = avx256_invsqrt_f(rsq31);
1428             rinv32           = avx256_invsqrt_f(rsq32);
1429             rinv33           = avx256_invsqrt_f(rsq33);
1430
1431             rinvsq11         = _mm256_mul_ps(rinv11,rinv11);
1432             rinvsq12         = _mm256_mul_ps(rinv12,rinv12);
1433             rinvsq13         = _mm256_mul_ps(rinv13,rinv13);
1434             rinvsq21         = _mm256_mul_ps(rinv21,rinv21);
1435             rinvsq22         = _mm256_mul_ps(rinv22,rinv22);
1436             rinvsq23         = _mm256_mul_ps(rinv23,rinv23);
1437             rinvsq31         = _mm256_mul_ps(rinv31,rinv31);
1438             rinvsq32         = _mm256_mul_ps(rinv32,rinv32);
1439             rinvsq33         = _mm256_mul_ps(rinv33,rinv33);
1440
1441             fjx1             = _mm256_setzero_ps();
1442             fjy1             = _mm256_setzero_ps();
1443             fjz1             = _mm256_setzero_ps();
1444             fjx2             = _mm256_setzero_ps();
1445             fjy2             = _mm256_setzero_ps();
1446             fjz2             = _mm256_setzero_ps();
1447             fjx3             = _mm256_setzero_ps();
1448             fjy3             = _mm256_setzero_ps();
1449             fjz3             = _mm256_setzero_ps();
1450
1451             /**************************
1452              * CALCULATE INTERACTIONS *
1453              **************************/
1454
1455             r11              = _mm256_mul_ps(rsq11,rinv11);
1456
1457             /* EWALD ELECTROSTATICS */
1458             
1459             /* Analytical PME correction */
1460             zeta2            = _mm256_mul_ps(beta2,rsq11);
1461             rinv3            = _mm256_mul_ps(rinvsq11,rinv11);
1462             pmecorrF         = avx256_pmecorrF_f(zeta2);
1463             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1464             felec            = _mm256_mul_ps(qq11,felec);
1465             
1466             fscal            = felec;
1467
1468             /* Calculate temporary vectorial force */
1469             tx               = _mm256_mul_ps(fscal,dx11);
1470             ty               = _mm256_mul_ps(fscal,dy11);
1471             tz               = _mm256_mul_ps(fscal,dz11);
1472
1473             /* Update vectorial force */
1474             fix1             = _mm256_add_ps(fix1,tx);
1475             fiy1             = _mm256_add_ps(fiy1,ty);
1476             fiz1             = _mm256_add_ps(fiz1,tz);
1477
1478             fjx1             = _mm256_add_ps(fjx1,tx);
1479             fjy1             = _mm256_add_ps(fjy1,ty);
1480             fjz1             = _mm256_add_ps(fjz1,tz);
1481
1482             /**************************
1483              * CALCULATE INTERACTIONS *
1484              **************************/
1485
1486             r12              = _mm256_mul_ps(rsq12,rinv12);
1487
1488             /* EWALD ELECTROSTATICS */
1489             
1490             /* Analytical PME correction */
1491             zeta2            = _mm256_mul_ps(beta2,rsq12);
1492             rinv3            = _mm256_mul_ps(rinvsq12,rinv12);
1493             pmecorrF         = avx256_pmecorrF_f(zeta2);
1494             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1495             felec            = _mm256_mul_ps(qq12,felec);
1496             
1497             fscal            = felec;
1498
1499             /* Calculate temporary vectorial force */
1500             tx               = _mm256_mul_ps(fscal,dx12);
1501             ty               = _mm256_mul_ps(fscal,dy12);
1502             tz               = _mm256_mul_ps(fscal,dz12);
1503
1504             /* Update vectorial force */
1505             fix1             = _mm256_add_ps(fix1,tx);
1506             fiy1             = _mm256_add_ps(fiy1,ty);
1507             fiz1             = _mm256_add_ps(fiz1,tz);
1508
1509             fjx2             = _mm256_add_ps(fjx2,tx);
1510             fjy2             = _mm256_add_ps(fjy2,ty);
1511             fjz2             = _mm256_add_ps(fjz2,tz);
1512
1513             /**************************
1514              * CALCULATE INTERACTIONS *
1515              **************************/
1516
1517             r13              = _mm256_mul_ps(rsq13,rinv13);
1518
1519             /* EWALD ELECTROSTATICS */
1520             
1521             /* Analytical PME correction */
1522             zeta2            = _mm256_mul_ps(beta2,rsq13);
1523             rinv3            = _mm256_mul_ps(rinvsq13,rinv13);
1524             pmecorrF         = avx256_pmecorrF_f(zeta2);
1525             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1526             felec            = _mm256_mul_ps(qq13,felec);
1527             
1528             fscal            = felec;
1529
1530             /* Calculate temporary vectorial force */
1531             tx               = _mm256_mul_ps(fscal,dx13);
1532             ty               = _mm256_mul_ps(fscal,dy13);
1533             tz               = _mm256_mul_ps(fscal,dz13);
1534
1535             /* Update vectorial force */
1536             fix1             = _mm256_add_ps(fix1,tx);
1537             fiy1             = _mm256_add_ps(fiy1,ty);
1538             fiz1             = _mm256_add_ps(fiz1,tz);
1539
1540             fjx3             = _mm256_add_ps(fjx3,tx);
1541             fjy3             = _mm256_add_ps(fjy3,ty);
1542             fjz3             = _mm256_add_ps(fjz3,tz);
1543
1544             /**************************
1545              * CALCULATE INTERACTIONS *
1546              **************************/
1547
1548             r21              = _mm256_mul_ps(rsq21,rinv21);
1549
1550             /* EWALD ELECTROSTATICS */
1551             
1552             /* Analytical PME correction */
1553             zeta2            = _mm256_mul_ps(beta2,rsq21);
1554             rinv3            = _mm256_mul_ps(rinvsq21,rinv21);
1555             pmecorrF         = avx256_pmecorrF_f(zeta2);
1556             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1557             felec            = _mm256_mul_ps(qq21,felec);
1558             
1559             fscal            = felec;
1560
1561             /* Calculate temporary vectorial force */
1562             tx               = _mm256_mul_ps(fscal,dx21);
1563             ty               = _mm256_mul_ps(fscal,dy21);
1564             tz               = _mm256_mul_ps(fscal,dz21);
1565
1566             /* Update vectorial force */
1567             fix2             = _mm256_add_ps(fix2,tx);
1568             fiy2             = _mm256_add_ps(fiy2,ty);
1569             fiz2             = _mm256_add_ps(fiz2,tz);
1570
1571             fjx1             = _mm256_add_ps(fjx1,tx);
1572             fjy1             = _mm256_add_ps(fjy1,ty);
1573             fjz1             = _mm256_add_ps(fjz1,tz);
1574
1575             /**************************
1576              * CALCULATE INTERACTIONS *
1577              **************************/
1578
1579             r22              = _mm256_mul_ps(rsq22,rinv22);
1580
1581             /* EWALD ELECTROSTATICS */
1582             
1583             /* Analytical PME correction */
1584             zeta2            = _mm256_mul_ps(beta2,rsq22);
1585             rinv3            = _mm256_mul_ps(rinvsq22,rinv22);
1586             pmecorrF         = avx256_pmecorrF_f(zeta2);
1587             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1588             felec            = _mm256_mul_ps(qq22,felec);
1589             
1590             fscal            = felec;
1591
1592             /* Calculate temporary vectorial force */
1593             tx               = _mm256_mul_ps(fscal,dx22);
1594             ty               = _mm256_mul_ps(fscal,dy22);
1595             tz               = _mm256_mul_ps(fscal,dz22);
1596
1597             /* Update vectorial force */
1598             fix2             = _mm256_add_ps(fix2,tx);
1599             fiy2             = _mm256_add_ps(fiy2,ty);
1600             fiz2             = _mm256_add_ps(fiz2,tz);
1601
1602             fjx2             = _mm256_add_ps(fjx2,tx);
1603             fjy2             = _mm256_add_ps(fjy2,ty);
1604             fjz2             = _mm256_add_ps(fjz2,tz);
1605
1606             /**************************
1607              * CALCULATE INTERACTIONS *
1608              **************************/
1609
1610             r23              = _mm256_mul_ps(rsq23,rinv23);
1611
1612             /* EWALD ELECTROSTATICS */
1613             
1614             /* Analytical PME correction */
1615             zeta2            = _mm256_mul_ps(beta2,rsq23);
1616             rinv3            = _mm256_mul_ps(rinvsq23,rinv23);
1617             pmecorrF         = avx256_pmecorrF_f(zeta2);
1618             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1619             felec            = _mm256_mul_ps(qq23,felec);
1620             
1621             fscal            = felec;
1622
1623             /* Calculate temporary vectorial force */
1624             tx               = _mm256_mul_ps(fscal,dx23);
1625             ty               = _mm256_mul_ps(fscal,dy23);
1626             tz               = _mm256_mul_ps(fscal,dz23);
1627
1628             /* Update vectorial force */
1629             fix2             = _mm256_add_ps(fix2,tx);
1630             fiy2             = _mm256_add_ps(fiy2,ty);
1631             fiz2             = _mm256_add_ps(fiz2,tz);
1632
1633             fjx3             = _mm256_add_ps(fjx3,tx);
1634             fjy3             = _mm256_add_ps(fjy3,ty);
1635             fjz3             = _mm256_add_ps(fjz3,tz);
1636
1637             /**************************
1638              * CALCULATE INTERACTIONS *
1639              **************************/
1640
1641             r31              = _mm256_mul_ps(rsq31,rinv31);
1642
1643             /* EWALD ELECTROSTATICS */
1644             
1645             /* Analytical PME correction */
1646             zeta2            = _mm256_mul_ps(beta2,rsq31);
1647             rinv3            = _mm256_mul_ps(rinvsq31,rinv31);
1648             pmecorrF         = avx256_pmecorrF_f(zeta2);
1649             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1650             felec            = _mm256_mul_ps(qq31,felec);
1651             
1652             fscal            = felec;
1653
1654             /* Calculate temporary vectorial force */
1655             tx               = _mm256_mul_ps(fscal,dx31);
1656             ty               = _mm256_mul_ps(fscal,dy31);
1657             tz               = _mm256_mul_ps(fscal,dz31);
1658
1659             /* Update vectorial force */
1660             fix3             = _mm256_add_ps(fix3,tx);
1661             fiy3             = _mm256_add_ps(fiy3,ty);
1662             fiz3             = _mm256_add_ps(fiz3,tz);
1663
1664             fjx1             = _mm256_add_ps(fjx1,tx);
1665             fjy1             = _mm256_add_ps(fjy1,ty);
1666             fjz1             = _mm256_add_ps(fjz1,tz);
1667
1668             /**************************
1669              * CALCULATE INTERACTIONS *
1670              **************************/
1671
1672             r32              = _mm256_mul_ps(rsq32,rinv32);
1673
1674             /* EWALD ELECTROSTATICS */
1675             
1676             /* Analytical PME correction */
1677             zeta2            = _mm256_mul_ps(beta2,rsq32);
1678             rinv3            = _mm256_mul_ps(rinvsq32,rinv32);
1679             pmecorrF         = avx256_pmecorrF_f(zeta2);
1680             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1681             felec            = _mm256_mul_ps(qq32,felec);
1682             
1683             fscal            = felec;
1684
1685             /* Calculate temporary vectorial force */
1686             tx               = _mm256_mul_ps(fscal,dx32);
1687             ty               = _mm256_mul_ps(fscal,dy32);
1688             tz               = _mm256_mul_ps(fscal,dz32);
1689
1690             /* Update vectorial force */
1691             fix3             = _mm256_add_ps(fix3,tx);
1692             fiy3             = _mm256_add_ps(fiy3,ty);
1693             fiz3             = _mm256_add_ps(fiz3,tz);
1694
1695             fjx2             = _mm256_add_ps(fjx2,tx);
1696             fjy2             = _mm256_add_ps(fjy2,ty);
1697             fjz2             = _mm256_add_ps(fjz2,tz);
1698
1699             /**************************
1700              * CALCULATE INTERACTIONS *
1701              **************************/
1702
1703             r33              = _mm256_mul_ps(rsq33,rinv33);
1704
1705             /* EWALD ELECTROSTATICS */
1706             
1707             /* Analytical PME correction */
1708             zeta2            = _mm256_mul_ps(beta2,rsq33);
1709             rinv3            = _mm256_mul_ps(rinvsq33,rinv33);
1710             pmecorrF         = avx256_pmecorrF_f(zeta2);
1711             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1712             felec            = _mm256_mul_ps(qq33,felec);
1713             
1714             fscal            = felec;
1715
1716             /* Calculate temporary vectorial force */
1717             tx               = _mm256_mul_ps(fscal,dx33);
1718             ty               = _mm256_mul_ps(fscal,dy33);
1719             tz               = _mm256_mul_ps(fscal,dz33);
1720
1721             /* Update vectorial force */
1722             fix3             = _mm256_add_ps(fix3,tx);
1723             fiy3             = _mm256_add_ps(fiy3,ty);
1724             fiz3             = _mm256_add_ps(fiz3,tz);
1725
1726             fjx3             = _mm256_add_ps(fjx3,tx);
1727             fjy3             = _mm256_add_ps(fjy3,ty);
1728             fjz3             = _mm256_add_ps(fjz3,tz);
1729
1730             fjptrA             = f+j_coord_offsetA;
1731             fjptrB             = f+j_coord_offsetB;
1732             fjptrC             = f+j_coord_offsetC;
1733             fjptrD             = f+j_coord_offsetD;
1734             fjptrE             = f+j_coord_offsetE;
1735             fjptrF             = f+j_coord_offsetF;
1736             fjptrG             = f+j_coord_offsetG;
1737             fjptrH             = f+j_coord_offsetH;
1738
1739             gmx_mm256_decrement_3rvec_8ptr_swizzle_ps(fjptrA+DIM,fjptrB+DIM,fjptrC+DIM,fjptrD+DIM,
1740                                                       fjptrE+DIM,fjptrF+DIM,fjptrG+DIM,fjptrH+DIM,
1741                                                       fjx1,fjy1,fjz1,fjx2,fjy2,fjz2,fjx3,fjy3,fjz3);
1742
1743             /* Inner loop uses 504 flops */
1744         }
1745
1746         if(jidx<j_index_end)
1747         {
1748
1749             /* Get j neighbor index, and coordinate index */
1750             jnrlistA         = jjnr[jidx];
1751             jnrlistB         = jjnr[jidx+1];
1752             jnrlistC         = jjnr[jidx+2];
1753             jnrlistD         = jjnr[jidx+3];
1754             jnrlistE         = jjnr[jidx+4];
1755             jnrlistF         = jjnr[jidx+5];
1756             jnrlistG         = jjnr[jidx+6];
1757             jnrlistH         = jjnr[jidx+7];
1758             /* Sign of each element will be negative for non-real atoms.
1759              * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
1760              * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
1761              */
1762             dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
1763                                             gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
1764                                             
1765             jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
1766             jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
1767             jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
1768             jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
1769             jnrE       = (jnrlistE>=0) ? jnrlistE : 0;
1770             jnrF       = (jnrlistF>=0) ? jnrlistF : 0;
1771             jnrG       = (jnrlistG>=0) ? jnrlistG : 0;
1772             jnrH       = (jnrlistH>=0) ? jnrlistH : 0;
1773             j_coord_offsetA  = DIM*jnrA;
1774             j_coord_offsetB  = DIM*jnrB;
1775             j_coord_offsetC  = DIM*jnrC;
1776             j_coord_offsetD  = DIM*jnrD;
1777             j_coord_offsetE  = DIM*jnrE;
1778             j_coord_offsetF  = DIM*jnrF;
1779             j_coord_offsetG  = DIM*jnrG;
1780             j_coord_offsetH  = DIM*jnrH;
1781
1782             /* load j atom coordinates */
1783             gmx_mm256_load_3rvec_8ptr_swizzle_ps(x+j_coord_offsetA+DIM,x+j_coord_offsetB+DIM,
1784                                                  x+j_coord_offsetC+DIM,x+j_coord_offsetD+DIM,
1785                                                  x+j_coord_offsetE+DIM,x+j_coord_offsetF+DIM,
1786                                                  x+j_coord_offsetG+DIM,x+j_coord_offsetH+DIM,
1787                                                  &jx1,&jy1,&jz1,&jx2,&jy2,&jz2,&jx3,&jy3,&jz3);
1788
1789             /* Calculate displacement vector */
1790             dx11             = _mm256_sub_ps(ix1,jx1);
1791             dy11             = _mm256_sub_ps(iy1,jy1);
1792             dz11             = _mm256_sub_ps(iz1,jz1);
1793             dx12             = _mm256_sub_ps(ix1,jx2);
1794             dy12             = _mm256_sub_ps(iy1,jy2);
1795             dz12             = _mm256_sub_ps(iz1,jz2);
1796             dx13             = _mm256_sub_ps(ix1,jx3);
1797             dy13             = _mm256_sub_ps(iy1,jy3);
1798             dz13             = _mm256_sub_ps(iz1,jz3);
1799             dx21             = _mm256_sub_ps(ix2,jx1);
1800             dy21             = _mm256_sub_ps(iy2,jy1);
1801             dz21             = _mm256_sub_ps(iz2,jz1);
1802             dx22             = _mm256_sub_ps(ix2,jx2);
1803             dy22             = _mm256_sub_ps(iy2,jy2);
1804             dz22             = _mm256_sub_ps(iz2,jz2);
1805             dx23             = _mm256_sub_ps(ix2,jx3);
1806             dy23             = _mm256_sub_ps(iy2,jy3);
1807             dz23             = _mm256_sub_ps(iz2,jz3);
1808             dx31             = _mm256_sub_ps(ix3,jx1);
1809             dy31             = _mm256_sub_ps(iy3,jy1);
1810             dz31             = _mm256_sub_ps(iz3,jz1);
1811             dx32             = _mm256_sub_ps(ix3,jx2);
1812             dy32             = _mm256_sub_ps(iy3,jy2);
1813             dz32             = _mm256_sub_ps(iz3,jz2);
1814             dx33             = _mm256_sub_ps(ix3,jx3);
1815             dy33             = _mm256_sub_ps(iy3,jy3);
1816             dz33             = _mm256_sub_ps(iz3,jz3);
1817
1818             /* Calculate squared distance and things based on it */
1819             rsq11            = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
1820             rsq12            = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
1821             rsq13            = gmx_mm256_calc_rsq_ps(dx13,dy13,dz13);
1822             rsq21            = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
1823             rsq22            = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
1824             rsq23            = gmx_mm256_calc_rsq_ps(dx23,dy23,dz23);
1825             rsq31            = gmx_mm256_calc_rsq_ps(dx31,dy31,dz31);
1826             rsq32            = gmx_mm256_calc_rsq_ps(dx32,dy32,dz32);
1827             rsq33            = gmx_mm256_calc_rsq_ps(dx33,dy33,dz33);
1828
1829             rinv11           = avx256_invsqrt_f(rsq11);
1830             rinv12           = avx256_invsqrt_f(rsq12);
1831             rinv13           = avx256_invsqrt_f(rsq13);
1832             rinv21           = avx256_invsqrt_f(rsq21);
1833             rinv22           = avx256_invsqrt_f(rsq22);
1834             rinv23           = avx256_invsqrt_f(rsq23);
1835             rinv31           = avx256_invsqrt_f(rsq31);
1836             rinv32           = avx256_invsqrt_f(rsq32);
1837             rinv33           = avx256_invsqrt_f(rsq33);
1838
1839             rinvsq11         = _mm256_mul_ps(rinv11,rinv11);
1840             rinvsq12         = _mm256_mul_ps(rinv12,rinv12);
1841             rinvsq13         = _mm256_mul_ps(rinv13,rinv13);
1842             rinvsq21         = _mm256_mul_ps(rinv21,rinv21);
1843             rinvsq22         = _mm256_mul_ps(rinv22,rinv22);
1844             rinvsq23         = _mm256_mul_ps(rinv23,rinv23);
1845             rinvsq31         = _mm256_mul_ps(rinv31,rinv31);
1846             rinvsq32         = _mm256_mul_ps(rinv32,rinv32);
1847             rinvsq33         = _mm256_mul_ps(rinv33,rinv33);
1848
1849             fjx1             = _mm256_setzero_ps();
1850             fjy1             = _mm256_setzero_ps();
1851             fjz1             = _mm256_setzero_ps();
1852             fjx2             = _mm256_setzero_ps();
1853             fjy2             = _mm256_setzero_ps();
1854             fjz2             = _mm256_setzero_ps();
1855             fjx3             = _mm256_setzero_ps();
1856             fjy3             = _mm256_setzero_ps();
1857             fjz3             = _mm256_setzero_ps();
1858
1859             /**************************
1860              * CALCULATE INTERACTIONS *
1861              **************************/
1862
1863             r11              = _mm256_mul_ps(rsq11,rinv11);
1864             r11              = _mm256_andnot_ps(dummy_mask,r11);
1865
1866             /* EWALD ELECTROSTATICS */
1867             
1868             /* Analytical PME correction */
1869             zeta2            = _mm256_mul_ps(beta2,rsq11);
1870             rinv3            = _mm256_mul_ps(rinvsq11,rinv11);
1871             pmecorrF         = avx256_pmecorrF_f(zeta2);
1872             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1873             felec            = _mm256_mul_ps(qq11,felec);
1874             
1875             fscal            = felec;
1876
1877             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
1878
1879             /* Calculate temporary vectorial force */
1880             tx               = _mm256_mul_ps(fscal,dx11);
1881             ty               = _mm256_mul_ps(fscal,dy11);
1882             tz               = _mm256_mul_ps(fscal,dz11);
1883
1884             /* Update vectorial force */
1885             fix1             = _mm256_add_ps(fix1,tx);
1886             fiy1             = _mm256_add_ps(fiy1,ty);
1887             fiz1             = _mm256_add_ps(fiz1,tz);
1888
1889             fjx1             = _mm256_add_ps(fjx1,tx);
1890             fjy1             = _mm256_add_ps(fjy1,ty);
1891             fjz1             = _mm256_add_ps(fjz1,tz);
1892
1893             /**************************
1894              * CALCULATE INTERACTIONS *
1895              **************************/
1896
1897             r12              = _mm256_mul_ps(rsq12,rinv12);
1898             r12              = _mm256_andnot_ps(dummy_mask,r12);
1899
1900             /* EWALD ELECTROSTATICS */
1901             
1902             /* Analytical PME correction */
1903             zeta2            = _mm256_mul_ps(beta2,rsq12);
1904             rinv3            = _mm256_mul_ps(rinvsq12,rinv12);
1905             pmecorrF         = avx256_pmecorrF_f(zeta2);
1906             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1907             felec            = _mm256_mul_ps(qq12,felec);
1908             
1909             fscal            = felec;
1910
1911             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
1912
1913             /* Calculate temporary vectorial force */
1914             tx               = _mm256_mul_ps(fscal,dx12);
1915             ty               = _mm256_mul_ps(fscal,dy12);
1916             tz               = _mm256_mul_ps(fscal,dz12);
1917
1918             /* Update vectorial force */
1919             fix1             = _mm256_add_ps(fix1,tx);
1920             fiy1             = _mm256_add_ps(fiy1,ty);
1921             fiz1             = _mm256_add_ps(fiz1,tz);
1922
1923             fjx2             = _mm256_add_ps(fjx2,tx);
1924             fjy2             = _mm256_add_ps(fjy2,ty);
1925             fjz2             = _mm256_add_ps(fjz2,tz);
1926
1927             /**************************
1928              * CALCULATE INTERACTIONS *
1929              **************************/
1930
1931             r13              = _mm256_mul_ps(rsq13,rinv13);
1932             r13              = _mm256_andnot_ps(dummy_mask,r13);
1933
1934             /* EWALD ELECTROSTATICS */
1935             
1936             /* Analytical PME correction */
1937             zeta2            = _mm256_mul_ps(beta2,rsq13);
1938             rinv3            = _mm256_mul_ps(rinvsq13,rinv13);
1939             pmecorrF         = avx256_pmecorrF_f(zeta2);
1940             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1941             felec            = _mm256_mul_ps(qq13,felec);
1942             
1943             fscal            = felec;
1944
1945             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
1946
1947             /* Calculate temporary vectorial force */
1948             tx               = _mm256_mul_ps(fscal,dx13);
1949             ty               = _mm256_mul_ps(fscal,dy13);
1950             tz               = _mm256_mul_ps(fscal,dz13);
1951
1952             /* Update vectorial force */
1953             fix1             = _mm256_add_ps(fix1,tx);
1954             fiy1             = _mm256_add_ps(fiy1,ty);
1955             fiz1             = _mm256_add_ps(fiz1,tz);
1956
1957             fjx3             = _mm256_add_ps(fjx3,tx);
1958             fjy3             = _mm256_add_ps(fjy3,ty);
1959             fjz3             = _mm256_add_ps(fjz3,tz);
1960
1961             /**************************
1962              * CALCULATE INTERACTIONS *
1963              **************************/
1964
1965             r21              = _mm256_mul_ps(rsq21,rinv21);
1966             r21              = _mm256_andnot_ps(dummy_mask,r21);
1967
1968             /* EWALD ELECTROSTATICS */
1969             
1970             /* Analytical PME correction */
1971             zeta2            = _mm256_mul_ps(beta2,rsq21);
1972             rinv3            = _mm256_mul_ps(rinvsq21,rinv21);
1973             pmecorrF         = avx256_pmecorrF_f(zeta2);
1974             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1975             felec            = _mm256_mul_ps(qq21,felec);
1976             
1977             fscal            = felec;
1978
1979             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
1980
1981             /* Calculate temporary vectorial force */
1982             tx               = _mm256_mul_ps(fscal,dx21);
1983             ty               = _mm256_mul_ps(fscal,dy21);
1984             tz               = _mm256_mul_ps(fscal,dz21);
1985
1986             /* Update vectorial force */
1987             fix2             = _mm256_add_ps(fix2,tx);
1988             fiy2             = _mm256_add_ps(fiy2,ty);
1989             fiz2             = _mm256_add_ps(fiz2,tz);
1990
1991             fjx1             = _mm256_add_ps(fjx1,tx);
1992             fjy1             = _mm256_add_ps(fjy1,ty);
1993             fjz1             = _mm256_add_ps(fjz1,tz);
1994
1995             /**************************
1996              * CALCULATE INTERACTIONS *
1997              **************************/
1998
1999             r22              = _mm256_mul_ps(rsq22,rinv22);
2000             r22              = _mm256_andnot_ps(dummy_mask,r22);
2001
2002             /* EWALD ELECTROSTATICS */
2003             
2004             /* Analytical PME correction */
2005             zeta2            = _mm256_mul_ps(beta2,rsq22);
2006             rinv3            = _mm256_mul_ps(rinvsq22,rinv22);
2007             pmecorrF         = avx256_pmecorrF_f(zeta2);
2008             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
2009             felec            = _mm256_mul_ps(qq22,felec);
2010             
2011             fscal            = felec;
2012
2013             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
2014
2015             /* Calculate temporary vectorial force */
2016             tx               = _mm256_mul_ps(fscal,dx22);
2017             ty               = _mm256_mul_ps(fscal,dy22);
2018             tz               = _mm256_mul_ps(fscal,dz22);
2019
2020             /* Update vectorial force */
2021             fix2             = _mm256_add_ps(fix2,tx);
2022             fiy2             = _mm256_add_ps(fiy2,ty);
2023             fiz2             = _mm256_add_ps(fiz2,tz);
2024
2025             fjx2             = _mm256_add_ps(fjx2,tx);
2026             fjy2             = _mm256_add_ps(fjy2,ty);
2027             fjz2             = _mm256_add_ps(fjz2,tz);
2028
2029             /**************************
2030              * CALCULATE INTERACTIONS *
2031              **************************/
2032
2033             r23              = _mm256_mul_ps(rsq23,rinv23);
2034             r23              = _mm256_andnot_ps(dummy_mask,r23);
2035
2036             /* EWALD ELECTROSTATICS */
2037             
2038             /* Analytical PME correction */
2039             zeta2            = _mm256_mul_ps(beta2,rsq23);
2040             rinv3            = _mm256_mul_ps(rinvsq23,rinv23);
2041             pmecorrF         = avx256_pmecorrF_f(zeta2);
2042             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
2043             felec            = _mm256_mul_ps(qq23,felec);
2044             
2045             fscal            = felec;
2046
2047             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
2048
2049             /* Calculate temporary vectorial force */
2050             tx               = _mm256_mul_ps(fscal,dx23);
2051             ty               = _mm256_mul_ps(fscal,dy23);
2052             tz               = _mm256_mul_ps(fscal,dz23);
2053
2054             /* Update vectorial force */
2055             fix2             = _mm256_add_ps(fix2,tx);
2056             fiy2             = _mm256_add_ps(fiy2,ty);
2057             fiz2             = _mm256_add_ps(fiz2,tz);
2058
2059             fjx3             = _mm256_add_ps(fjx3,tx);
2060             fjy3             = _mm256_add_ps(fjy3,ty);
2061             fjz3             = _mm256_add_ps(fjz3,tz);
2062
2063             /**************************
2064              * CALCULATE INTERACTIONS *
2065              **************************/
2066
2067             r31              = _mm256_mul_ps(rsq31,rinv31);
2068             r31              = _mm256_andnot_ps(dummy_mask,r31);
2069
2070             /* EWALD ELECTROSTATICS */
2071             
2072             /* Analytical PME correction */
2073             zeta2            = _mm256_mul_ps(beta2,rsq31);
2074             rinv3            = _mm256_mul_ps(rinvsq31,rinv31);
2075             pmecorrF         = avx256_pmecorrF_f(zeta2);
2076             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
2077             felec            = _mm256_mul_ps(qq31,felec);
2078             
2079             fscal            = felec;
2080
2081             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
2082
2083             /* Calculate temporary vectorial force */
2084             tx               = _mm256_mul_ps(fscal,dx31);
2085             ty               = _mm256_mul_ps(fscal,dy31);
2086             tz               = _mm256_mul_ps(fscal,dz31);
2087
2088             /* Update vectorial force */
2089             fix3             = _mm256_add_ps(fix3,tx);
2090             fiy3             = _mm256_add_ps(fiy3,ty);
2091             fiz3             = _mm256_add_ps(fiz3,tz);
2092
2093             fjx1             = _mm256_add_ps(fjx1,tx);
2094             fjy1             = _mm256_add_ps(fjy1,ty);
2095             fjz1             = _mm256_add_ps(fjz1,tz);
2096
2097             /**************************
2098              * CALCULATE INTERACTIONS *
2099              **************************/
2100
2101             r32              = _mm256_mul_ps(rsq32,rinv32);
2102             r32              = _mm256_andnot_ps(dummy_mask,r32);
2103
2104             /* EWALD ELECTROSTATICS */
2105             
2106             /* Analytical PME correction */
2107             zeta2            = _mm256_mul_ps(beta2,rsq32);
2108             rinv3            = _mm256_mul_ps(rinvsq32,rinv32);
2109             pmecorrF         = avx256_pmecorrF_f(zeta2);
2110             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
2111             felec            = _mm256_mul_ps(qq32,felec);
2112             
2113             fscal            = felec;
2114
2115             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
2116
2117             /* Calculate temporary vectorial force */
2118             tx               = _mm256_mul_ps(fscal,dx32);
2119             ty               = _mm256_mul_ps(fscal,dy32);
2120             tz               = _mm256_mul_ps(fscal,dz32);
2121
2122             /* Update vectorial force */
2123             fix3             = _mm256_add_ps(fix3,tx);
2124             fiy3             = _mm256_add_ps(fiy3,ty);
2125             fiz3             = _mm256_add_ps(fiz3,tz);
2126
2127             fjx2             = _mm256_add_ps(fjx2,tx);
2128             fjy2             = _mm256_add_ps(fjy2,ty);
2129             fjz2             = _mm256_add_ps(fjz2,tz);
2130
2131             /**************************
2132              * CALCULATE INTERACTIONS *
2133              **************************/
2134
2135             r33              = _mm256_mul_ps(rsq33,rinv33);
2136             r33              = _mm256_andnot_ps(dummy_mask,r33);
2137
2138             /* EWALD ELECTROSTATICS */
2139             
2140             /* Analytical PME correction */
2141             zeta2            = _mm256_mul_ps(beta2,rsq33);
2142             rinv3            = _mm256_mul_ps(rinvsq33,rinv33);
2143             pmecorrF         = avx256_pmecorrF_f(zeta2);
2144             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
2145             felec            = _mm256_mul_ps(qq33,felec);
2146             
2147             fscal            = felec;
2148
2149             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
2150
2151             /* Calculate temporary vectorial force */
2152             tx               = _mm256_mul_ps(fscal,dx33);
2153             ty               = _mm256_mul_ps(fscal,dy33);
2154             tz               = _mm256_mul_ps(fscal,dz33);
2155
2156             /* Update vectorial force */
2157             fix3             = _mm256_add_ps(fix3,tx);
2158             fiy3             = _mm256_add_ps(fiy3,ty);
2159             fiz3             = _mm256_add_ps(fiz3,tz);
2160
2161             fjx3             = _mm256_add_ps(fjx3,tx);
2162             fjy3             = _mm256_add_ps(fjy3,ty);
2163             fjz3             = _mm256_add_ps(fjz3,tz);
2164
2165             fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
2166             fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
2167             fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
2168             fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
2169             fjptrE             = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
2170             fjptrF             = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
2171             fjptrG             = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
2172             fjptrH             = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
2173
2174             gmx_mm256_decrement_3rvec_8ptr_swizzle_ps(fjptrA+DIM,fjptrB+DIM,fjptrC+DIM,fjptrD+DIM,
2175                                                       fjptrE+DIM,fjptrF+DIM,fjptrG+DIM,fjptrH+DIM,
2176                                                       fjx1,fjy1,fjz1,fjx2,fjy2,fjz2,fjx3,fjy3,fjz3);
2177
2178             /* Inner loop uses 513 flops */
2179         }
2180
2181         /* End of innermost loop */
2182
2183         gmx_mm256_update_iforce_3atom_swizzle_ps(fix1,fiy1,fiz1,fix2,fiy2,fiz2,fix3,fiy3,fiz3,
2184                                                  f+i_coord_offset+DIM,fshift+i_shift_offset);
2185
2186         /* Increment number of inner iterations */
2187         inneriter                  += j_index_end - j_index_start;
2188
2189         /* Outer loop uses 18 flops */
2190     }
2191
2192     /* Increment number of outer iterations */
2193     outeriter        += nri;
2194
2195     /* Update outer/inner flops */
2196
2197     inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_W4W4_F,outeriter*18 + inneriter*513);
2198 }