Added option to gmx nmeig to print ZPE.
[alexxy/gromacs.git] / src / gromacs / gmxlib / nonbonded / nb_kernel_avx_256_single / nb_kernel_ElecEwSh_VdwNone_GeomW3W3_avx_256_single.c
1 /*
2  * This file is part of the GROMACS molecular simulation package.
3  *
4  * Copyright (c) 2012,2013,2014,2015,2017, by the GROMACS development team, led by
5  * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6  * and including many others, as listed in the AUTHORS file in the
7  * top-level source directory and at http://www.gromacs.org.
8  *
9  * GROMACS is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU Lesser General Public License
11  * as published by the Free Software Foundation; either version 2.1
12  * of the License, or (at your option) any later version.
13  *
14  * GROMACS is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
17  * Lesser General Public License for more details.
18  *
19  * You should have received a copy of the GNU Lesser General Public
20  * License along with GROMACS; if not, see
21  * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA.
23  *
24  * If you want to redistribute modifications to GROMACS, please
25  * consider that scientific software is very special. Version
26  * control is crucial - bugs must be traceable. We will be happy to
27  * consider code for inclusion in the official distribution, but
28  * derived work must not be called official GROMACS. Details are found
29  * in the README & COPYING files - if they are missing, get the
30  * official version at http://www.gromacs.org.
31  *
32  * To help us fund GROMACS development, we humbly ask that you cite
33  * the research papers on the package. Check out http://www.gromacs.org.
34  */
35 /*
36  * Note: this file was generated by the GROMACS avx_256_single kernel generator.
37  */
38 #include "gmxpre.h"
39
40 #include "config.h"
41
42 #include <math.h>
43
44 #include "../nb_kernel.h"
45 #include "gromacs/gmxlib/nrnb.h"
46
47 #include "kernelutil_x86_avx_256_single.h"
48
49 /*
50  * Gromacs nonbonded kernel:   nb_kernel_ElecEwSh_VdwNone_GeomW3W3_VF_avx_256_single
51  * Electrostatics interaction: Ewald
52  * VdW interaction:            None
53  * Geometry:                   Water3-Water3
54  * Calculate force/pot:        PotentialAndForce
55  */
56 void
57 nb_kernel_ElecEwSh_VdwNone_GeomW3W3_VF_avx_256_single
58                     (t_nblist                    * gmx_restrict       nlist,
59                      rvec                        * gmx_restrict          xx,
60                      rvec                        * gmx_restrict          ff,
61                      struct t_forcerec           * gmx_restrict          fr,
62                      t_mdatoms                   * gmx_restrict     mdatoms,
63                      nb_kernel_data_t gmx_unused * gmx_restrict kernel_data,
64                      t_nrnb                      * gmx_restrict        nrnb)
65 {
66     /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
67      * just 0 for non-waters.
68      * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
69      * jnr indices corresponding to data put in the four positions in the SIMD register.
70      */
71     int              i_shift_offset,i_coord_offset,outeriter,inneriter;
72     int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
73     int              jnrA,jnrB,jnrC,jnrD;
74     int              jnrE,jnrF,jnrG,jnrH;
75     int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
76     int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
77     int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
78     int              j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
79     int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
80     real             rcutoff_scalar;
81     real             *shiftvec,*fshift,*x,*f;
82     real             *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
83     real             scratch[4*DIM];
84     __m256           tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
85     real *           vdwioffsetptr0;
86     __m256           ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
87     real *           vdwioffsetptr1;
88     __m256           ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
89     real *           vdwioffsetptr2;
90     __m256           ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
91     int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
92     __m256           jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
93     int              vdwjidx1A,vdwjidx1B,vdwjidx1C,vdwjidx1D,vdwjidx1E,vdwjidx1F,vdwjidx1G,vdwjidx1H;
94     __m256           jx1,jy1,jz1,fjx1,fjy1,fjz1,jq1,isaj1;
95     int              vdwjidx2A,vdwjidx2B,vdwjidx2C,vdwjidx2D,vdwjidx2E,vdwjidx2F,vdwjidx2G,vdwjidx2H;
96     __m256           jx2,jy2,jz2,fjx2,fjy2,fjz2,jq2,isaj2;
97     __m256           dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
98     __m256           dx01,dy01,dz01,rsq01,rinv01,rinvsq01,r01,qq01,c6_01,c12_01;
99     __m256           dx02,dy02,dz02,rsq02,rinv02,rinvsq02,r02,qq02,c6_02,c12_02;
100     __m256           dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
101     __m256           dx11,dy11,dz11,rsq11,rinv11,rinvsq11,r11,qq11,c6_11,c12_11;
102     __m256           dx12,dy12,dz12,rsq12,rinv12,rinvsq12,r12,qq12,c6_12,c12_12;
103     __m256           dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
104     __m256           dx21,dy21,dz21,rsq21,rinv21,rinvsq21,r21,qq21,c6_21,c12_21;
105     __m256           dx22,dy22,dz22,rsq22,rinv22,rinvsq22,r22,qq22,c6_22,c12_22;
106     __m256           velec,felec,velecsum,facel,crf,krf,krf2;
107     real             *charge;
108     __m256i          ewitab;
109     __m128i          ewitab_lo,ewitab_hi;
110     __m256           ewtabscale,eweps,sh_ewald,ewrt,ewtabhalfspace,ewtabF,ewtabFn,ewtabD,ewtabV;
111     __m256           beta,beta2,beta3,zeta2,pmecorrF,pmecorrV,rinv3;
112     real             *ewtab;
113     __m256           dummy_mask,cutoff_mask;
114     __m256           signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
115     __m256           one     = _mm256_set1_ps(1.0);
116     __m256           two     = _mm256_set1_ps(2.0);
117     x                = xx[0];
118     f                = ff[0];
119
120     nri              = nlist->nri;
121     iinr             = nlist->iinr;
122     jindex           = nlist->jindex;
123     jjnr             = nlist->jjnr;
124     shiftidx         = nlist->shift;
125     gid              = nlist->gid;
126     shiftvec         = fr->shift_vec[0];
127     fshift           = fr->fshift[0];
128     facel            = _mm256_set1_ps(fr->ic->epsfac);
129     charge           = mdatoms->chargeA;
130
131     sh_ewald         = _mm256_set1_ps(fr->ic->sh_ewald);
132     beta             = _mm256_set1_ps(fr->ic->ewaldcoeff_q);
133     beta2            = _mm256_mul_ps(beta,beta);
134     beta3            = _mm256_mul_ps(beta,beta2);
135
136     ewtab            = fr->ic->tabq_coul_FDV0;
137     ewtabscale       = _mm256_set1_ps(fr->ic->tabq_scale);
138     ewtabhalfspace   = _mm256_set1_ps(0.5/fr->ic->tabq_scale);
139
140     /* Setup water-specific parameters */
141     inr              = nlist->iinr[0];
142     iq0              = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+0]));
143     iq1              = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+1]));
144     iq2              = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+2]));
145
146     jq0              = _mm256_set1_ps(charge[inr+0]);
147     jq1              = _mm256_set1_ps(charge[inr+1]);
148     jq2              = _mm256_set1_ps(charge[inr+2]);
149     qq00             = _mm256_mul_ps(iq0,jq0);
150     qq01             = _mm256_mul_ps(iq0,jq1);
151     qq02             = _mm256_mul_ps(iq0,jq2);
152     qq10             = _mm256_mul_ps(iq1,jq0);
153     qq11             = _mm256_mul_ps(iq1,jq1);
154     qq12             = _mm256_mul_ps(iq1,jq2);
155     qq20             = _mm256_mul_ps(iq2,jq0);
156     qq21             = _mm256_mul_ps(iq2,jq1);
157     qq22             = _mm256_mul_ps(iq2,jq2);
158
159     /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
160     rcutoff_scalar   = fr->ic->rcoulomb;
161     rcutoff          = _mm256_set1_ps(rcutoff_scalar);
162     rcutoff2         = _mm256_mul_ps(rcutoff,rcutoff);
163
164     /* Avoid stupid compiler warnings */
165     jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
166     j_coord_offsetA = 0;
167     j_coord_offsetB = 0;
168     j_coord_offsetC = 0;
169     j_coord_offsetD = 0;
170     j_coord_offsetE = 0;
171     j_coord_offsetF = 0;
172     j_coord_offsetG = 0;
173     j_coord_offsetH = 0;
174
175     outeriter        = 0;
176     inneriter        = 0;
177
178     for(iidx=0;iidx<4*DIM;iidx++)
179     {
180         scratch[iidx] = 0.0;
181     }
182
183     /* Start outer loop over neighborlists */
184     for(iidx=0; iidx<nri; iidx++)
185     {
186         /* Load shift vector for this list */
187         i_shift_offset   = DIM*shiftidx[iidx];
188
189         /* Load limits for loop over neighbors */
190         j_index_start    = jindex[iidx];
191         j_index_end      = jindex[iidx+1];
192
193         /* Get outer coordinate index */
194         inr              = iinr[iidx];
195         i_coord_offset   = DIM*inr;
196
197         /* Load i particle coords and add shift vector */
198         gmx_mm256_load_shift_and_3rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,
199                                                     &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2);
200
201         fix0             = _mm256_setzero_ps();
202         fiy0             = _mm256_setzero_ps();
203         fiz0             = _mm256_setzero_ps();
204         fix1             = _mm256_setzero_ps();
205         fiy1             = _mm256_setzero_ps();
206         fiz1             = _mm256_setzero_ps();
207         fix2             = _mm256_setzero_ps();
208         fiy2             = _mm256_setzero_ps();
209         fiz2             = _mm256_setzero_ps();
210
211         /* Reset potential sums */
212         velecsum         = _mm256_setzero_ps();
213
214         /* Start inner kernel loop */
215         for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
216         {
217
218             /* Get j neighbor index, and coordinate index */
219             jnrA             = jjnr[jidx];
220             jnrB             = jjnr[jidx+1];
221             jnrC             = jjnr[jidx+2];
222             jnrD             = jjnr[jidx+3];
223             jnrE             = jjnr[jidx+4];
224             jnrF             = jjnr[jidx+5];
225             jnrG             = jjnr[jidx+6];
226             jnrH             = jjnr[jidx+7];
227             j_coord_offsetA  = DIM*jnrA;
228             j_coord_offsetB  = DIM*jnrB;
229             j_coord_offsetC  = DIM*jnrC;
230             j_coord_offsetD  = DIM*jnrD;
231             j_coord_offsetE  = DIM*jnrE;
232             j_coord_offsetF  = DIM*jnrF;
233             j_coord_offsetG  = DIM*jnrG;
234             j_coord_offsetH  = DIM*jnrH;
235
236             /* load j atom coordinates */
237             gmx_mm256_load_3rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
238                                                  x+j_coord_offsetC,x+j_coord_offsetD,
239                                                  x+j_coord_offsetE,x+j_coord_offsetF,
240                                                  x+j_coord_offsetG,x+j_coord_offsetH,
241                                               &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
242
243             /* Calculate displacement vector */
244             dx00             = _mm256_sub_ps(ix0,jx0);
245             dy00             = _mm256_sub_ps(iy0,jy0);
246             dz00             = _mm256_sub_ps(iz0,jz0);
247             dx01             = _mm256_sub_ps(ix0,jx1);
248             dy01             = _mm256_sub_ps(iy0,jy1);
249             dz01             = _mm256_sub_ps(iz0,jz1);
250             dx02             = _mm256_sub_ps(ix0,jx2);
251             dy02             = _mm256_sub_ps(iy0,jy2);
252             dz02             = _mm256_sub_ps(iz0,jz2);
253             dx10             = _mm256_sub_ps(ix1,jx0);
254             dy10             = _mm256_sub_ps(iy1,jy0);
255             dz10             = _mm256_sub_ps(iz1,jz0);
256             dx11             = _mm256_sub_ps(ix1,jx1);
257             dy11             = _mm256_sub_ps(iy1,jy1);
258             dz11             = _mm256_sub_ps(iz1,jz1);
259             dx12             = _mm256_sub_ps(ix1,jx2);
260             dy12             = _mm256_sub_ps(iy1,jy2);
261             dz12             = _mm256_sub_ps(iz1,jz2);
262             dx20             = _mm256_sub_ps(ix2,jx0);
263             dy20             = _mm256_sub_ps(iy2,jy0);
264             dz20             = _mm256_sub_ps(iz2,jz0);
265             dx21             = _mm256_sub_ps(ix2,jx1);
266             dy21             = _mm256_sub_ps(iy2,jy1);
267             dz21             = _mm256_sub_ps(iz2,jz1);
268             dx22             = _mm256_sub_ps(ix2,jx2);
269             dy22             = _mm256_sub_ps(iy2,jy2);
270             dz22             = _mm256_sub_ps(iz2,jz2);
271
272             /* Calculate squared distance and things based on it */
273             rsq00            = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
274             rsq01            = gmx_mm256_calc_rsq_ps(dx01,dy01,dz01);
275             rsq02            = gmx_mm256_calc_rsq_ps(dx02,dy02,dz02);
276             rsq10            = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
277             rsq11            = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
278             rsq12            = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
279             rsq20            = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
280             rsq21            = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
281             rsq22            = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
282
283             rinv00           = avx256_invsqrt_f(rsq00);
284             rinv01           = avx256_invsqrt_f(rsq01);
285             rinv02           = avx256_invsqrt_f(rsq02);
286             rinv10           = avx256_invsqrt_f(rsq10);
287             rinv11           = avx256_invsqrt_f(rsq11);
288             rinv12           = avx256_invsqrt_f(rsq12);
289             rinv20           = avx256_invsqrt_f(rsq20);
290             rinv21           = avx256_invsqrt_f(rsq21);
291             rinv22           = avx256_invsqrt_f(rsq22);
292
293             rinvsq00         = _mm256_mul_ps(rinv00,rinv00);
294             rinvsq01         = _mm256_mul_ps(rinv01,rinv01);
295             rinvsq02         = _mm256_mul_ps(rinv02,rinv02);
296             rinvsq10         = _mm256_mul_ps(rinv10,rinv10);
297             rinvsq11         = _mm256_mul_ps(rinv11,rinv11);
298             rinvsq12         = _mm256_mul_ps(rinv12,rinv12);
299             rinvsq20         = _mm256_mul_ps(rinv20,rinv20);
300             rinvsq21         = _mm256_mul_ps(rinv21,rinv21);
301             rinvsq22         = _mm256_mul_ps(rinv22,rinv22);
302
303             fjx0             = _mm256_setzero_ps();
304             fjy0             = _mm256_setzero_ps();
305             fjz0             = _mm256_setzero_ps();
306             fjx1             = _mm256_setzero_ps();
307             fjy1             = _mm256_setzero_ps();
308             fjz1             = _mm256_setzero_ps();
309             fjx2             = _mm256_setzero_ps();
310             fjy2             = _mm256_setzero_ps();
311             fjz2             = _mm256_setzero_ps();
312
313             /**************************
314              * CALCULATE INTERACTIONS *
315              **************************/
316
317             if (gmx_mm256_any_lt(rsq00,rcutoff2))
318             {
319
320             r00              = _mm256_mul_ps(rsq00,rinv00);
321
322             /* EWALD ELECTROSTATICS */
323             
324             /* Analytical PME correction */
325             zeta2            = _mm256_mul_ps(beta2,rsq00);
326             rinv3            = _mm256_mul_ps(rinvsq00,rinv00);
327             pmecorrF         = avx256_pmecorrF_f(zeta2);
328             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
329             felec            = _mm256_mul_ps(qq00,felec);
330             pmecorrV         = avx256_pmecorrV_f(zeta2);
331             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
332             velec            = _mm256_sub_ps(_mm256_sub_ps(rinv00,sh_ewald),pmecorrV);
333             velec            = _mm256_mul_ps(qq00,velec);
334             
335             cutoff_mask      = _mm256_cmp_ps(rsq00,rcutoff2,_CMP_LT_OQ);
336
337             /* Update potential sum for this i atom from the interaction with this j atom. */
338             velec            = _mm256_and_ps(velec,cutoff_mask);
339             velecsum         = _mm256_add_ps(velecsum,velec);
340
341             fscal            = felec;
342
343             fscal            = _mm256_and_ps(fscal,cutoff_mask);
344
345             /* Calculate temporary vectorial force */
346             tx               = _mm256_mul_ps(fscal,dx00);
347             ty               = _mm256_mul_ps(fscal,dy00);
348             tz               = _mm256_mul_ps(fscal,dz00);
349
350             /* Update vectorial force */
351             fix0             = _mm256_add_ps(fix0,tx);
352             fiy0             = _mm256_add_ps(fiy0,ty);
353             fiz0             = _mm256_add_ps(fiz0,tz);
354
355             fjx0             = _mm256_add_ps(fjx0,tx);
356             fjy0             = _mm256_add_ps(fjy0,ty);
357             fjz0             = _mm256_add_ps(fjz0,tz);
358
359             }
360
361             /**************************
362              * CALCULATE INTERACTIONS *
363              **************************/
364
365             if (gmx_mm256_any_lt(rsq01,rcutoff2))
366             {
367
368             r01              = _mm256_mul_ps(rsq01,rinv01);
369
370             /* EWALD ELECTROSTATICS */
371             
372             /* Analytical PME correction */
373             zeta2            = _mm256_mul_ps(beta2,rsq01);
374             rinv3            = _mm256_mul_ps(rinvsq01,rinv01);
375             pmecorrF         = avx256_pmecorrF_f(zeta2);
376             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
377             felec            = _mm256_mul_ps(qq01,felec);
378             pmecorrV         = avx256_pmecorrV_f(zeta2);
379             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
380             velec            = _mm256_sub_ps(_mm256_sub_ps(rinv01,sh_ewald),pmecorrV);
381             velec            = _mm256_mul_ps(qq01,velec);
382             
383             cutoff_mask      = _mm256_cmp_ps(rsq01,rcutoff2,_CMP_LT_OQ);
384
385             /* Update potential sum for this i atom from the interaction with this j atom. */
386             velec            = _mm256_and_ps(velec,cutoff_mask);
387             velecsum         = _mm256_add_ps(velecsum,velec);
388
389             fscal            = felec;
390
391             fscal            = _mm256_and_ps(fscal,cutoff_mask);
392
393             /* Calculate temporary vectorial force */
394             tx               = _mm256_mul_ps(fscal,dx01);
395             ty               = _mm256_mul_ps(fscal,dy01);
396             tz               = _mm256_mul_ps(fscal,dz01);
397
398             /* Update vectorial force */
399             fix0             = _mm256_add_ps(fix0,tx);
400             fiy0             = _mm256_add_ps(fiy0,ty);
401             fiz0             = _mm256_add_ps(fiz0,tz);
402
403             fjx1             = _mm256_add_ps(fjx1,tx);
404             fjy1             = _mm256_add_ps(fjy1,ty);
405             fjz1             = _mm256_add_ps(fjz1,tz);
406
407             }
408
409             /**************************
410              * CALCULATE INTERACTIONS *
411              **************************/
412
413             if (gmx_mm256_any_lt(rsq02,rcutoff2))
414             {
415
416             r02              = _mm256_mul_ps(rsq02,rinv02);
417
418             /* EWALD ELECTROSTATICS */
419             
420             /* Analytical PME correction */
421             zeta2            = _mm256_mul_ps(beta2,rsq02);
422             rinv3            = _mm256_mul_ps(rinvsq02,rinv02);
423             pmecorrF         = avx256_pmecorrF_f(zeta2);
424             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
425             felec            = _mm256_mul_ps(qq02,felec);
426             pmecorrV         = avx256_pmecorrV_f(zeta2);
427             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
428             velec            = _mm256_sub_ps(_mm256_sub_ps(rinv02,sh_ewald),pmecorrV);
429             velec            = _mm256_mul_ps(qq02,velec);
430             
431             cutoff_mask      = _mm256_cmp_ps(rsq02,rcutoff2,_CMP_LT_OQ);
432
433             /* Update potential sum for this i atom from the interaction with this j atom. */
434             velec            = _mm256_and_ps(velec,cutoff_mask);
435             velecsum         = _mm256_add_ps(velecsum,velec);
436
437             fscal            = felec;
438
439             fscal            = _mm256_and_ps(fscal,cutoff_mask);
440
441             /* Calculate temporary vectorial force */
442             tx               = _mm256_mul_ps(fscal,dx02);
443             ty               = _mm256_mul_ps(fscal,dy02);
444             tz               = _mm256_mul_ps(fscal,dz02);
445
446             /* Update vectorial force */
447             fix0             = _mm256_add_ps(fix0,tx);
448             fiy0             = _mm256_add_ps(fiy0,ty);
449             fiz0             = _mm256_add_ps(fiz0,tz);
450
451             fjx2             = _mm256_add_ps(fjx2,tx);
452             fjy2             = _mm256_add_ps(fjy2,ty);
453             fjz2             = _mm256_add_ps(fjz2,tz);
454
455             }
456
457             /**************************
458              * CALCULATE INTERACTIONS *
459              **************************/
460
461             if (gmx_mm256_any_lt(rsq10,rcutoff2))
462             {
463
464             r10              = _mm256_mul_ps(rsq10,rinv10);
465
466             /* EWALD ELECTROSTATICS */
467             
468             /* Analytical PME correction */
469             zeta2            = _mm256_mul_ps(beta2,rsq10);
470             rinv3            = _mm256_mul_ps(rinvsq10,rinv10);
471             pmecorrF         = avx256_pmecorrF_f(zeta2);
472             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
473             felec            = _mm256_mul_ps(qq10,felec);
474             pmecorrV         = avx256_pmecorrV_f(zeta2);
475             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
476             velec            = _mm256_sub_ps(_mm256_sub_ps(rinv10,sh_ewald),pmecorrV);
477             velec            = _mm256_mul_ps(qq10,velec);
478             
479             cutoff_mask      = _mm256_cmp_ps(rsq10,rcutoff2,_CMP_LT_OQ);
480
481             /* Update potential sum for this i atom from the interaction with this j atom. */
482             velec            = _mm256_and_ps(velec,cutoff_mask);
483             velecsum         = _mm256_add_ps(velecsum,velec);
484
485             fscal            = felec;
486
487             fscal            = _mm256_and_ps(fscal,cutoff_mask);
488
489             /* Calculate temporary vectorial force */
490             tx               = _mm256_mul_ps(fscal,dx10);
491             ty               = _mm256_mul_ps(fscal,dy10);
492             tz               = _mm256_mul_ps(fscal,dz10);
493
494             /* Update vectorial force */
495             fix1             = _mm256_add_ps(fix1,tx);
496             fiy1             = _mm256_add_ps(fiy1,ty);
497             fiz1             = _mm256_add_ps(fiz1,tz);
498
499             fjx0             = _mm256_add_ps(fjx0,tx);
500             fjy0             = _mm256_add_ps(fjy0,ty);
501             fjz0             = _mm256_add_ps(fjz0,tz);
502
503             }
504
505             /**************************
506              * CALCULATE INTERACTIONS *
507              **************************/
508
509             if (gmx_mm256_any_lt(rsq11,rcutoff2))
510             {
511
512             r11              = _mm256_mul_ps(rsq11,rinv11);
513
514             /* EWALD ELECTROSTATICS */
515             
516             /* Analytical PME correction */
517             zeta2            = _mm256_mul_ps(beta2,rsq11);
518             rinv3            = _mm256_mul_ps(rinvsq11,rinv11);
519             pmecorrF         = avx256_pmecorrF_f(zeta2);
520             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
521             felec            = _mm256_mul_ps(qq11,felec);
522             pmecorrV         = avx256_pmecorrV_f(zeta2);
523             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
524             velec            = _mm256_sub_ps(_mm256_sub_ps(rinv11,sh_ewald),pmecorrV);
525             velec            = _mm256_mul_ps(qq11,velec);
526             
527             cutoff_mask      = _mm256_cmp_ps(rsq11,rcutoff2,_CMP_LT_OQ);
528
529             /* Update potential sum for this i atom from the interaction with this j atom. */
530             velec            = _mm256_and_ps(velec,cutoff_mask);
531             velecsum         = _mm256_add_ps(velecsum,velec);
532
533             fscal            = felec;
534
535             fscal            = _mm256_and_ps(fscal,cutoff_mask);
536
537             /* Calculate temporary vectorial force */
538             tx               = _mm256_mul_ps(fscal,dx11);
539             ty               = _mm256_mul_ps(fscal,dy11);
540             tz               = _mm256_mul_ps(fscal,dz11);
541
542             /* Update vectorial force */
543             fix1             = _mm256_add_ps(fix1,tx);
544             fiy1             = _mm256_add_ps(fiy1,ty);
545             fiz1             = _mm256_add_ps(fiz1,tz);
546
547             fjx1             = _mm256_add_ps(fjx1,tx);
548             fjy1             = _mm256_add_ps(fjy1,ty);
549             fjz1             = _mm256_add_ps(fjz1,tz);
550
551             }
552
553             /**************************
554              * CALCULATE INTERACTIONS *
555              **************************/
556
557             if (gmx_mm256_any_lt(rsq12,rcutoff2))
558             {
559
560             r12              = _mm256_mul_ps(rsq12,rinv12);
561
562             /* EWALD ELECTROSTATICS */
563             
564             /* Analytical PME correction */
565             zeta2            = _mm256_mul_ps(beta2,rsq12);
566             rinv3            = _mm256_mul_ps(rinvsq12,rinv12);
567             pmecorrF         = avx256_pmecorrF_f(zeta2);
568             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
569             felec            = _mm256_mul_ps(qq12,felec);
570             pmecorrV         = avx256_pmecorrV_f(zeta2);
571             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
572             velec            = _mm256_sub_ps(_mm256_sub_ps(rinv12,sh_ewald),pmecorrV);
573             velec            = _mm256_mul_ps(qq12,velec);
574             
575             cutoff_mask      = _mm256_cmp_ps(rsq12,rcutoff2,_CMP_LT_OQ);
576
577             /* Update potential sum for this i atom from the interaction with this j atom. */
578             velec            = _mm256_and_ps(velec,cutoff_mask);
579             velecsum         = _mm256_add_ps(velecsum,velec);
580
581             fscal            = felec;
582
583             fscal            = _mm256_and_ps(fscal,cutoff_mask);
584
585             /* Calculate temporary vectorial force */
586             tx               = _mm256_mul_ps(fscal,dx12);
587             ty               = _mm256_mul_ps(fscal,dy12);
588             tz               = _mm256_mul_ps(fscal,dz12);
589
590             /* Update vectorial force */
591             fix1             = _mm256_add_ps(fix1,tx);
592             fiy1             = _mm256_add_ps(fiy1,ty);
593             fiz1             = _mm256_add_ps(fiz1,tz);
594
595             fjx2             = _mm256_add_ps(fjx2,tx);
596             fjy2             = _mm256_add_ps(fjy2,ty);
597             fjz2             = _mm256_add_ps(fjz2,tz);
598
599             }
600
601             /**************************
602              * CALCULATE INTERACTIONS *
603              **************************/
604
605             if (gmx_mm256_any_lt(rsq20,rcutoff2))
606             {
607
608             r20              = _mm256_mul_ps(rsq20,rinv20);
609
610             /* EWALD ELECTROSTATICS */
611             
612             /* Analytical PME correction */
613             zeta2            = _mm256_mul_ps(beta2,rsq20);
614             rinv3            = _mm256_mul_ps(rinvsq20,rinv20);
615             pmecorrF         = avx256_pmecorrF_f(zeta2);
616             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
617             felec            = _mm256_mul_ps(qq20,felec);
618             pmecorrV         = avx256_pmecorrV_f(zeta2);
619             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
620             velec            = _mm256_sub_ps(_mm256_sub_ps(rinv20,sh_ewald),pmecorrV);
621             velec            = _mm256_mul_ps(qq20,velec);
622             
623             cutoff_mask      = _mm256_cmp_ps(rsq20,rcutoff2,_CMP_LT_OQ);
624
625             /* Update potential sum for this i atom from the interaction with this j atom. */
626             velec            = _mm256_and_ps(velec,cutoff_mask);
627             velecsum         = _mm256_add_ps(velecsum,velec);
628
629             fscal            = felec;
630
631             fscal            = _mm256_and_ps(fscal,cutoff_mask);
632
633             /* Calculate temporary vectorial force */
634             tx               = _mm256_mul_ps(fscal,dx20);
635             ty               = _mm256_mul_ps(fscal,dy20);
636             tz               = _mm256_mul_ps(fscal,dz20);
637
638             /* Update vectorial force */
639             fix2             = _mm256_add_ps(fix2,tx);
640             fiy2             = _mm256_add_ps(fiy2,ty);
641             fiz2             = _mm256_add_ps(fiz2,tz);
642
643             fjx0             = _mm256_add_ps(fjx0,tx);
644             fjy0             = _mm256_add_ps(fjy0,ty);
645             fjz0             = _mm256_add_ps(fjz0,tz);
646
647             }
648
649             /**************************
650              * CALCULATE INTERACTIONS *
651              **************************/
652
653             if (gmx_mm256_any_lt(rsq21,rcutoff2))
654             {
655
656             r21              = _mm256_mul_ps(rsq21,rinv21);
657
658             /* EWALD ELECTROSTATICS */
659             
660             /* Analytical PME correction */
661             zeta2            = _mm256_mul_ps(beta2,rsq21);
662             rinv3            = _mm256_mul_ps(rinvsq21,rinv21);
663             pmecorrF         = avx256_pmecorrF_f(zeta2);
664             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
665             felec            = _mm256_mul_ps(qq21,felec);
666             pmecorrV         = avx256_pmecorrV_f(zeta2);
667             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
668             velec            = _mm256_sub_ps(_mm256_sub_ps(rinv21,sh_ewald),pmecorrV);
669             velec            = _mm256_mul_ps(qq21,velec);
670             
671             cutoff_mask      = _mm256_cmp_ps(rsq21,rcutoff2,_CMP_LT_OQ);
672
673             /* Update potential sum for this i atom from the interaction with this j atom. */
674             velec            = _mm256_and_ps(velec,cutoff_mask);
675             velecsum         = _mm256_add_ps(velecsum,velec);
676
677             fscal            = felec;
678
679             fscal            = _mm256_and_ps(fscal,cutoff_mask);
680
681             /* Calculate temporary vectorial force */
682             tx               = _mm256_mul_ps(fscal,dx21);
683             ty               = _mm256_mul_ps(fscal,dy21);
684             tz               = _mm256_mul_ps(fscal,dz21);
685
686             /* Update vectorial force */
687             fix2             = _mm256_add_ps(fix2,tx);
688             fiy2             = _mm256_add_ps(fiy2,ty);
689             fiz2             = _mm256_add_ps(fiz2,tz);
690
691             fjx1             = _mm256_add_ps(fjx1,tx);
692             fjy1             = _mm256_add_ps(fjy1,ty);
693             fjz1             = _mm256_add_ps(fjz1,tz);
694
695             }
696
697             /**************************
698              * CALCULATE INTERACTIONS *
699              **************************/
700
701             if (gmx_mm256_any_lt(rsq22,rcutoff2))
702             {
703
704             r22              = _mm256_mul_ps(rsq22,rinv22);
705
706             /* EWALD ELECTROSTATICS */
707             
708             /* Analytical PME correction */
709             zeta2            = _mm256_mul_ps(beta2,rsq22);
710             rinv3            = _mm256_mul_ps(rinvsq22,rinv22);
711             pmecorrF         = avx256_pmecorrF_f(zeta2);
712             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
713             felec            = _mm256_mul_ps(qq22,felec);
714             pmecorrV         = avx256_pmecorrV_f(zeta2);
715             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
716             velec            = _mm256_sub_ps(_mm256_sub_ps(rinv22,sh_ewald),pmecorrV);
717             velec            = _mm256_mul_ps(qq22,velec);
718             
719             cutoff_mask      = _mm256_cmp_ps(rsq22,rcutoff2,_CMP_LT_OQ);
720
721             /* Update potential sum for this i atom from the interaction with this j atom. */
722             velec            = _mm256_and_ps(velec,cutoff_mask);
723             velecsum         = _mm256_add_ps(velecsum,velec);
724
725             fscal            = felec;
726
727             fscal            = _mm256_and_ps(fscal,cutoff_mask);
728
729             /* Calculate temporary vectorial force */
730             tx               = _mm256_mul_ps(fscal,dx22);
731             ty               = _mm256_mul_ps(fscal,dy22);
732             tz               = _mm256_mul_ps(fscal,dz22);
733
734             /* Update vectorial force */
735             fix2             = _mm256_add_ps(fix2,tx);
736             fiy2             = _mm256_add_ps(fiy2,ty);
737             fiz2             = _mm256_add_ps(fiz2,tz);
738
739             fjx2             = _mm256_add_ps(fjx2,tx);
740             fjy2             = _mm256_add_ps(fjy2,ty);
741             fjz2             = _mm256_add_ps(fjz2,tz);
742
743             }
744
745             fjptrA             = f+j_coord_offsetA;
746             fjptrB             = f+j_coord_offsetB;
747             fjptrC             = f+j_coord_offsetC;
748             fjptrD             = f+j_coord_offsetD;
749             fjptrE             = f+j_coord_offsetE;
750             fjptrF             = f+j_coord_offsetF;
751             fjptrG             = f+j_coord_offsetG;
752             fjptrH             = f+j_coord_offsetH;
753
754             gmx_mm256_decrement_3rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,
755                                                       fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
756
757             /* Inner loop uses 981 flops */
758         }
759
760         if(jidx<j_index_end)
761         {
762
763             /* Get j neighbor index, and coordinate index */
764             jnrlistA         = jjnr[jidx];
765             jnrlistB         = jjnr[jidx+1];
766             jnrlistC         = jjnr[jidx+2];
767             jnrlistD         = jjnr[jidx+3];
768             jnrlistE         = jjnr[jidx+4];
769             jnrlistF         = jjnr[jidx+5];
770             jnrlistG         = jjnr[jidx+6];
771             jnrlistH         = jjnr[jidx+7];
772             /* Sign of each element will be negative for non-real atoms.
773              * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
774              * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
775              */
776             dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
777                                             gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
778                                             
779             jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
780             jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
781             jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
782             jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
783             jnrE       = (jnrlistE>=0) ? jnrlistE : 0;
784             jnrF       = (jnrlistF>=0) ? jnrlistF : 0;
785             jnrG       = (jnrlistG>=0) ? jnrlistG : 0;
786             jnrH       = (jnrlistH>=0) ? jnrlistH : 0;
787             j_coord_offsetA  = DIM*jnrA;
788             j_coord_offsetB  = DIM*jnrB;
789             j_coord_offsetC  = DIM*jnrC;
790             j_coord_offsetD  = DIM*jnrD;
791             j_coord_offsetE  = DIM*jnrE;
792             j_coord_offsetF  = DIM*jnrF;
793             j_coord_offsetG  = DIM*jnrG;
794             j_coord_offsetH  = DIM*jnrH;
795
796             /* load j atom coordinates */
797             gmx_mm256_load_3rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
798                                                  x+j_coord_offsetC,x+j_coord_offsetD,
799                                                  x+j_coord_offsetE,x+j_coord_offsetF,
800                                                  x+j_coord_offsetG,x+j_coord_offsetH,
801                                               &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
802
803             /* Calculate displacement vector */
804             dx00             = _mm256_sub_ps(ix0,jx0);
805             dy00             = _mm256_sub_ps(iy0,jy0);
806             dz00             = _mm256_sub_ps(iz0,jz0);
807             dx01             = _mm256_sub_ps(ix0,jx1);
808             dy01             = _mm256_sub_ps(iy0,jy1);
809             dz01             = _mm256_sub_ps(iz0,jz1);
810             dx02             = _mm256_sub_ps(ix0,jx2);
811             dy02             = _mm256_sub_ps(iy0,jy2);
812             dz02             = _mm256_sub_ps(iz0,jz2);
813             dx10             = _mm256_sub_ps(ix1,jx0);
814             dy10             = _mm256_sub_ps(iy1,jy0);
815             dz10             = _mm256_sub_ps(iz1,jz0);
816             dx11             = _mm256_sub_ps(ix1,jx1);
817             dy11             = _mm256_sub_ps(iy1,jy1);
818             dz11             = _mm256_sub_ps(iz1,jz1);
819             dx12             = _mm256_sub_ps(ix1,jx2);
820             dy12             = _mm256_sub_ps(iy1,jy2);
821             dz12             = _mm256_sub_ps(iz1,jz2);
822             dx20             = _mm256_sub_ps(ix2,jx0);
823             dy20             = _mm256_sub_ps(iy2,jy0);
824             dz20             = _mm256_sub_ps(iz2,jz0);
825             dx21             = _mm256_sub_ps(ix2,jx1);
826             dy21             = _mm256_sub_ps(iy2,jy1);
827             dz21             = _mm256_sub_ps(iz2,jz1);
828             dx22             = _mm256_sub_ps(ix2,jx2);
829             dy22             = _mm256_sub_ps(iy2,jy2);
830             dz22             = _mm256_sub_ps(iz2,jz2);
831
832             /* Calculate squared distance and things based on it */
833             rsq00            = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
834             rsq01            = gmx_mm256_calc_rsq_ps(dx01,dy01,dz01);
835             rsq02            = gmx_mm256_calc_rsq_ps(dx02,dy02,dz02);
836             rsq10            = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
837             rsq11            = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
838             rsq12            = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
839             rsq20            = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
840             rsq21            = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
841             rsq22            = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
842
843             rinv00           = avx256_invsqrt_f(rsq00);
844             rinv01           = avx256_invsqrt_f(rsq01);
845             rinv02           = avx256_invsqrt_f(rsq02);
846             rinv10           = avx256_invsqrt_f(rsq10);
847             rinv11           = avx256_invsqrt_f(rsq11);
848             rinv12           = avx256_invsqrt_f(rsq12);
849             rinv20           = avx256_invsqrt_f(rsq20);
850             rinv21           = avx256_invsqrt_f(rsq21);
851             rinv22           = avx256_invsqrt_f(rsq22);
852
853             rinvsq00         = _mm256_mul_ps(rinv00,rinv00);
854             rinvsq01         = _mm256_mul_ps(rinv01,rinv01);
855             rinvsq02         = _mm256_mul_ps(rinv02,rinv02);
856             rinvsq10         = _mm256_mul_ps(rinv10,rinv10);
857             rinvsq11         = _mm256_mul_ps(rinv11,rinv11);
858             rinvsq12         = _mm256_mul_ps(rinv12,rinv12);
859             rinvsq20         = _mm256_mul_ps(rinv20,rinv20);
860             rinvsq21         = _mm256_mul_ps(rinv21,rinv21);
861             rinvsq22         = _mm256_mul_ps(rinv22,rinv22);
862
863             fjx0             = _mm256_setzero_ps();
864             fjy0             = _mm256_setzero_ps();
865             fjz0             = _mm256_setzero_ps();
866             fjx1             = _mm256_setzero_ps();
867             fjy1             = _mm256_setzero_ps();
868             fjz1             = _mm256_setzero_ps();
869             fjx2             = _mm256_setzero_ps();
870             fjy2             = _mm256_setzero_ps();
871             fjz2             = _mm256_setzero_ps();
872
873             /**************************
874              * CALCULATE INTERACTIONS *
875              **************************/
876
877             if (gmx_mm256_any_lt(rsq00,rcutoff2))
878             {
879
880             r00              = _mm256_mul_ps(rsq00,rinv00);
881             r00              = _mm256_andnot_ps(dummy_mask,r00);
882
883             /* EWALD ELECTROSTATICS */
884             
885             /* Analytical PME correction */
886             zeta2            = _mm256_mul_ps(beta2,rsq00);
887             rinv3            = _mm256_mul_ps(rinvsq00,rinv00);
888             pmecorrF         = avx256_pmecorrF_f(zeta2);
889             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
890             felec            = _mm256_mul_ps(qq00,felec);
891             pmecorrV         = avx256_pmecorrV_f(zeta2);
892             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
893             velec            = _mm256_sub_ps(_mm256_sub_ps(rinv00,sh_ewald),pmecorrV);
894             velec            = _mm256_mul_ps(qq00,velec);
895             
896             cutoff_mask      = _mm256_cmp_ps(rsq00,rcutoff2,_CMP_LT_OQ);
897
898             /* Update potential sum for this i atom from the interaction with this j atom. */
899             velec            = _mm256_and_ps(velec,cutoff_mask);
900             velec            = _mm256_andnot_ps(dummy_mask,velec);
901             velecsum         = _mm256_add_ps(velecsum,velec);
902
903             fscal            = felec;
904
905             fscal            = _mm256_and_ps(fscal,cutoff_mask);
906
907             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
908
909             /* Calculate temporary vectorial force */
910             tx               = _mm256_mul_ps(fscal,dx00);
911             ty               = _mm256_mul_ps(fscal,dy00);
912             tz               = _mm256_mul_ps(fscal,dz00);
913
914             /* Update vectorial force */
915             fix0             = _mm256_add_ps(fix0,tx);
916             fiy0             = _mm256_add_ps(fiy0,ty);
917             fiz0             = _mm256_add_ps(fiz0,tz);
918
919             fjx0             = _mm256_add_ps(fjx0,tx);
920             fjy0             = _mm256_add_ps(fjy0,ty);
921             fjz0             = _mm256_add_ps(fjz0,tz);
922
923             }
924
925             /**************************
926              * CALCULATE INTERACTIONS *
927              **************************/
928
929             if (gmx_mm256_any_lt(rsq01,rcutoff2))
930             {
931
932             r01              = _mm256_mul_ps(rsq01,rinv01);
933             r01              = _mm256_andnot_ps(dummy_mask,r01);
934
935             /* EWALD ELECTROSTATICS */
936             
937             /* Analytical PME correction */
938             zeta2            = _mm256_mul_ps(beta2,rsq01);
939             rinv3            = _mm256_mul_ps(rinvsq01,rinv01);
940             pmecorrF         = avx256_pmecorrF_f(zeta2);
941             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
942             felec            = _mm256_mul_ps(qq01,felec);
943             pmecorrV         = avx256_pmecorrV_f(zeta2);
944             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
945             velec            = _mm256_sub_ps(_mm256_sub_ps(rinv01,sh_ewald),pmecorrV);
946             velec            = _mm256_mul_ps(qq01,velec);
947             
948             cutoff_mask      = _mm256_cmp_ps(rsq01,rcutoff2,_CMP_LT_OQ);
949
950             /* Update potential sum for this i atom from the interaction with this j atom. */
951             velec            = _mm256_and_ps(velec,cutoff_mask);
952             velec            = _mm256_andnot_ps(dummy_mask,velec);
953             velecsum         = _mm256_add_ps(velecsum,velec);
954
955             fscal            = felec;
956
957             fscal            = _mm256_and_ps(fscal,cutoff_mask);
958
959             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
960
961             /* Calculate temporary vectorial force */
962             tx               = _mm256_mul_ps(fscal,dx01);
963             ty               = _mm256_mul_ps(fscal,dy01);
964             tz               = _mm256_mul_ps(fscal,dz01);
965
966             /* Update vectorial force */
967             fix0             = _mm256_add_ps(fix0,tx);
968             fiy0             = _mm256_add_ps(fiy0,ty);
969             fiz0             = _mm256_add_ps(fiz0,tz);
970
971             fjx1             = _mm256_add_ps(fjx1,tx);
972             fjy1             = _mm256_add_ps(fjy1,ty);
973             fjz1             = _mm256_add_ps(fjz1,tz);
974
975             }
976
977             /**************************
978              * CALCULATE INTERACTIONS *
979              **************************/
980
981             if (gmx_mm256_any_lt(rsq02,rcutoff2))
982             {
983
984             r02              = _mm256_mul_ps(rsq02,rinv02);
985             r02              = _mm256_andnot_ps(dummy_mask,r02);
986
987             /* EWALD ELECTROSTATICS */
988             
989             /* Analytical PME correction */
990             zeta2            = _mm256_mul_ps(beta2,rsq02);
991             rinv3            = _mm256_mul_ps(rinvsq02,rinv02);
992             pmecorrF         = avx256_pmecorrF_f(zeta2);
993             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
994             felec            = _mm256_mul_ps(qq02,felec);
995             pmecorrV         = avx256_pmecorrV_f(zeta2);
996             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
997             velec            = _mm256_sub_ps(_mm256_sub_ps(rinv02,sh_ewald),pmecorrV);
998             velec            = _mm256_mul_ps(qq02,velec);
999             
1000             cutoff_mask      = _mm256_cmp_ps(rsq02,rcutoff2,_CMP_LT_OQ);
1001
1002             /* Update potential sum for this i atom from the interaction with this j atom. */
1003             velec            = _mm256_and_ps(velec,cutoff_mask);
1004             velec            = _mm256_andnot_ps(dummy_mask,velec);
1005             velecsum         = _mm256_add_ps(velecsum,velec);
1006
1007             fscal            = felec;
1008
1009             fscal            = _mm256_and_ps(fscal,cutoff_mask);
1010
1011             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
1012
1013             /* Calculate temporary vectorial force */
1014             tx               = _mm256_mul_ps(fscal,dx02);
1015             ty               = _mm256_mul_ps(fscal,dy02);
1016             tz               = _mm256_mul_ps(fscal,dz02);
1017
1018             /* Update vectorial force */
1019             fix0             = _mm256_add_ps(fix0,tx);
1020             fiy0             = _mm256_add_ps(fiy0,ty);
1021             fiz0             = _mm256_add_ps(fiz0,tz);
1022
1023             fjx2             = _mm256_add_ps(fjx2,tx);
1024             fjy2             = _mm256_add_ps(fjy2,ty);
1025             fjz2             = _mm256_add_ps(fjz2,tz);
1026
1027             }
1028
1029             /**************************
1030              * CALCULATE INTERACTIONS *
1031              **************************/
1032
1033             if (gmx_mm256_any_lt(rsq10,rcutoff2))
1034             {
1035
1036             r10              = _mm256_mul_ps(rsq10,rinv10);
1037             r10              = _mm256_andnot_ps(dummy_mask,r10);
1038
1039             /* EWALD ELECTROSTATICS */
1040             
1041             /* Analytical PME correction */
1042             zeta2            = _mm256_mul_ps(beta2,rsq10);
1043             rinv3            = _mm256_mul_ps(rinvsq10,rinv10);
1044             pmecorrF         = avx256_pmecorrF_f(zeta2);
1045             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1046             felec            = _mm256_mul_ps(qq10,felec);
1047             pmecorrV         = avx256_pmecorrV_f(zeta2);
1048             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
1049             velec            = _mm256_sub_ps(_mm256_sub_ps(rinv10,sh_ewald),pmecorrV);
1050             velec            = _mm256_mul_ps(qq10,velec);
1051             
1052             cutoff_mask      = _mm256_cmp_ps(rsq10,rcutoff2,_CMP_LT_OQ);
1053
1054             /* Update potential sum for this i atom from the interaction with this j atom. */
1055             velec            = _mm256_and_ps(velec,cutoff_mask);
1056             velec            = _mm256_andnot_ps(dummy_mask,velec);
1057             velecsum         = _mm256_add_ps(velecsum,velec);
1058
1059             fscal            = felec;
1060
1061             fscal            = _mm256_and_ps(fscal,cutoff_mask);
1062
1063             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
1064
1065             /* Calculate temporary vectorial force */
1066             tx               = _mm256_mul_ps(fscal,dx10);
1067             ty               = _mm256_mul_ps(fscal,dy10);
1068             tz               = _mm256_mul_ps(fscal,dz10);
1069
1070             /* Update vectorial force */
1071             fix1             = _mm256_add_ps(fix1,tx);
1072             fiy1             = _mm256_add_ps(fiy1,ty);
1073             fiz1             = _mm256_add_ps(fiz1,tz);
1074
1075             fjx0             = _mm256_add_ps(fjx0,tx);
1076             fjy0             = _mm256_add_ps(fjy0,ty);
1077             fjz0             = _mm256_add_ps(fjz0,tz);
1078
1079             }
1080
1081             /**************************
1082              * CALCULATE INTERACTIONS *
1083              **************************/
1084
1085             if (gmx_mm256_any_lt(rsq11,rcutoff2))
1086             {
1087
1088             r11              = _mm256_mul_ps(rsq11,rinv11);
1089             r11              = _mm256_andnot_ps(dummy_mask,r11);
1090
1091             /* EWALD ELECTROSTATICS */
1092             
1093             /* Analytical PME correction */
1094             zeta2            = _mm256_mul_ps(beta2,rsq11);
1095             rinv3            = _mm256_mul_ps(rinvsq11,rinv11);
1096             pmecorrF         = avx256_pmecorrF_f(zeta2);
1097             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1098             felec            = _mm256_mul_ps(qq11,felec);
1099             pmecorrV         = avx256_pmecorrV_f(zeta2);
1100             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
1101             velec            = _mm256_sub_ps(_mm256_sub_ps(rinv11,sh_ewald),pmecorrV);
1102             velec            = _mm256_mul_ps(qq11,velec);
1103             
1104             cutoff_mask      = _mm256_cmp_ps(rsq11,rcutoff2,_CMP_LT_OQ);
1105
1106             /* Update potential sum for this i atom from the interaction with this j atom. */
1107             velec            = _mm256_and_ps(velec,cutoff_mask);
1108             velec            = _mm256_andnot_ps(dummy_mask,velec);
1109             velecsum         = _mm256_add_ps(velecsum,velec);
1110
1111             fscal            = felec;
1112
1113             fscal            = _mm256_and_ps(fscal,cutoff_mask);
1114
1115             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
1116
1117             /* Calculate temporary vectorial force */
1118             tx               = _mm256_mul_ps(fscal,dx11);
1119             ty               = _mm256_mul_ps(fscal,dy11);
1120             tz               = _mm256_mul_ps(fscal,dz11);
1121
1122             /* Update vectorial force */
1123             fix1             = _mm256_add_ps(fix1,tx);
1124             fiy1             = _mm256_add_ps(fiy1,ty);
1125             fiz1             = _mm256_add_ps(fiz1,tz);
1126
1127             fjx1             = _mm256_add_ps(fjx1,tx);
1128             fjy1             = _mm256_add_ps(fjy1,ty);
1129             fjz1             = _mm256_add_ps(fjz1,tz);
1130
1131             }
1132
1133             /**************************
1134              * CALCULATE INTERACTIONS *
1135              **************************/
1136
1137             if (gmx_mm256_any_lt(rsq12,rcutoff2))
1138             {
1139
1140             r12              = _mm256_mul_ps(rsq12,rinv12);
1141             r12              = _mm256_andnot_ps(dummy_mask,r12);
1142
1143             /* EWALD ELECTROSTATICS */
1144             
1145             /* Analytical PME correction */
1146             zeta2            = _mm256_mul_ps(beta2,rsq12);
1147             rinv3            = _mm256_mul_ps(rinvsq12,rinv12);
1148             pmecorrF         = avx256_pmecorrF_f(zeta2);
1149             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1150             felec            = _mm256_mul_ps(qq12,felec);
1151             pmecorrV         = avx256_pmecorrV_f(zeta2);
1152             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
1153             velec            = _mm256_sub_ps(_mm256_sub_ps(rinv12,sh_ewald),pmecorrV);
1154             velec            = _mm256_mul_ps(qq12,velec);
1155             
1156             cutoff_mask      = _mm256_cmp_ps(rsq12,rcutoff2,_CMP_LT_OQ);
1157
1158             /* Update potential sum for this i atom from the interaction with this j atom. */
1159             velec            = _mm256_and_ps(velec,cutoff_mask);
1160             velec            = _mm256_andnot_ps(dummy_mask,velec);
1161             velecsum         = _mm256_add_ps(velecsum,velec);
1162
1163             fscal            = felec;
1164
1165             fscal            = _mm256_and_ps(fscal,cutoff_mask);
1166
1167             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
1168
1169             /* Calculate temporary vectorial force */
1170             tx               = _mm256_mul_ps(fscal,dx12);
1171             ty               = _mm256_mul_ps(fscal,dy12);
1172             tz               = _mm256_mul_ps(fscal,dz12);
1173
1174             /* Update vectorial force */
1175             fix1             = _mm256_add_ps(fix1,tx);
1176             fiy1             = _mm256_add_ps(fiy1,ty);
1177             fiz1             = _mm256_add_ps(fiz1,tz);
1178
1179             fjx2             = _mm256_add_ps(fjx2,tx);
1180             fjy2             = _mm256_add_ps(fjy2,ty);
1181             fjz2             = _mm256_add_ps(fjz2,tz);
1182
1183             }
1184
1185             /**************************
1186              * CALCULATE INTERACTIONS *
1187              **************************/
1188
1189             if (gmx_mm256_any_lt(rsq20,rcutoff2))
1190             {
1191
1192             r20              = _mm256_mul_ps(rsq20,rinv20);
1193             r20              = _mm256_andnot_ps(dummy_mask,r20);
1194
1195             /* EWALD ELECTROSTATICS */
1196             
1197             /* Analytical PME correction */
1198             zeta2            = _mm256_mul_ps(beta2,rsq20);
1199             rinv3            = _mm256_mul_ps(rinvsq20,rinv20);
1200             pmecorrF         = avx256_pmecorrF_f(zeta2);
1201             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1202             felec            = _mm256_mul_ps(qq20,felec);
1203             pmecorrV         = avx256_pmecorrV_f(zeta2);
1204             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
1205             velec            = _mm256_sub_ps(_mm256_sub_ps(rinv20,sh_ewald),pmecorrV);
1206             velec            = _mm256_mul_ps(qq20,velec);
1207             
1208             cutoff_mask      = _mm256_cmp_ps(rsq20,rcutoff2,_CMP_LT_OQ);
1209
1210             /* Update potential sum for this i atom from the interaction with this j atom. */
1211             velec            = _mm256_and_ps(velec,cutoff_mask);
1212             velec            = _mm256_andnot_ps(dummy_mask,velec);
1213             velecsum         = _mm256_add_ps(velecsum,velec);
1214
1215             fscal            = felec;
1216
1217             fscal            = _mm256_and_ps(fscal,cutoff_mask);
1218
1219             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
1220
1221             /* Calculate temporary vectorial force */
1222             tx               = _mm256_mul_ps(fscal,dx20);
1223             ty               = _mm256_mul_ps(fscal,dy20);
1224             tz               = _mm256_mul_ps(fscal,dz20);
1225
1226             /* Update vectorial force */
1227             fix2             = _mm256_add_ps(fix2,tx);
1228             fiy2             = _mm256_add_ps(fiy2,ty);
1229             fiz2             = _mm256_add_ps(fiz2,tz);
1230
1231             fjx0             = _mm256_add_ps(fjx0,tx);
1232             fjy0             = _mm256_add_ps(fjy0,ty);
1233             fjz0             = _mm256_add_ps(fjz0,tz);
1234
1235             }
1236
1237             /**************************
1238              * CALCULATE INTERACTIONS *
1239              **************************/
1240
1241             if (gmx_mm256_any_lt(rsq21,rcutoff2))
1242             {
1243
1244             r21              = _mm256_mul_ps(rsq21,rinv21);
1245             r21              = _mm256_andnot_ps(dummy_mask,r21);
1246
1247             /* EWALD ELECTROSTATICS */
1248             
1249             /* Analytical PME correction */
1250             zeta2            = _mm256_mul_ps(beta2,rsq21);
1251             rinv3            = _mm256_mul_ps(rinvsq21,rinv21);
1252             pmecorrF         = avx256_pmecorrF_f(zeta2);
1253             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1254             felec            = _mm256_mul_ps(qq21,felec);
1255             pmecorrV         = avx256_pmecorrV_f(zeta2);
1256             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
1257             velec            = _mm256_sub_ps(_mm256_sub_ps(rinv21,sh_ewald),pmecorrV);
1258             velec            = _mm256_mul_ps(qq21,velec);
1259             
1260             cutoff_mask      = _mm256_cmp_ps(rsq21,rcutoff2,_CMP_LT_OQ);
1261
1262             /* Update potential sum for this i atom from the interaction with this j atom. */
1263             velec            = _mm256_and_ps(velec,cutoff_mask);
1264             velec            = _mm256_andnot_ps(dummy_mask,velec);
1265             velecsum         = _mm256_add_ps(velecsum,velec);
1266
1267             fscal            = felec;
1268
1269             fscal            = _mm256_and_ps(fscal,cutoff_mask);
1270
1271             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
1272
1273             /* Calculate temporary vectorial force */
1274             tx               = _mm256_mul_ps(fscal,dx21);
1275             ty               = _mm256_mul_ps(fscal,dy21);
1276             tz               = _mm256_mul_ps(fscal,dz21);
1277
1278             /* Update vectorial force */
1279             fix2             = _mm256_add_ps(fix2,tx);
1280             fiy2             = _mm256_add_ps(fiy2,ty);
1281             fiz2             = _mm256_add_ps(fiz2,tz);
1282
1283             fjx1             = _mm256_add_ps(fjx1,tx);
1284             fjy1             = _mm256_add_ps(fjy1,ty);
1285             fjz1             = _mm256_add_ps(fjz1,tz);
1286
1287             }
1288
1289             /**************************
1290              * CALCULATE INTERACTIONS *
1291              **************************/
1292
1293             if (gmx_mm256_any_lt(rsq22,rcutoff2))
1294             {
1295
1296             r22              = _mm256_mul_ps(rsq22,rinv22);
1297             r22              = _mm256_andnot_ps(dummy_mask,r22);
1298
1299             /* EWALD ELECTROSTATICS */
1300             
1301             /* Analytical PME correction */
1302             zeta2            = _mm256_mul_ps(beta2,rsq22);
1303             rinv3            = _mm256_mul_ps(rinvsq22,rinv22);
1304             pmecorrF         = avx256_pmecorrF_f(zeta2);
1305             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1306             felec            = _mm256_mul_ps(qq22,felec);
1307             pmecorrV         = avx256_pmecorrV_f(zeta2);
1308             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
1309             velec            = _mm256_sub_ps(_mm256_sub_ps(rinv22,sh_ewald),pmecorrV);
1310             velec            = _mm256_mul_ps(qq22,velec);
1311             
1312             cutoff_mask      = _mm256_cmp_ps(rsq22,rcutoff2,_CMP_LT_OQ);
1313
1314             /* Update potential sum for this i atom from the interaction with this j atom. */
1315             velec            = _mm256_and_ps(velec,cutoff_mask);
1316             velec            = _mm256_andnot_ps(dummy_mask,velec);
1317             velecsum         = _mm256_add_ps(velecsum,velec);
1318
1319             fscal            = felec;
1320
1321             fscal            = _mm256_and_ps(fscal,cutoff_mask);
1322
1323             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
1324
1325             /* Calculate temporary vectorial force */
1326             tx               = _mm256_mul_ps(fscal,dx22);
1327             ty               = _mm256_mul_ps(fscal,dy22);
1328             tz               = _mm256_mul_ps(fscal,dz22);
1329
1330             /* Update vectorial force */
1331             fix2             = _mm256_add_ps(fix2,tx);
1332             fiy2             = _mm256_add_ps(fiy2,ty);
1333             fiz2             = _mm256_add_ps(fiz2,tz);
1334
1335             fjx2             = _mm256_add_ps(fjx2,tx);
1336             fjy2             = _mm256_add_ps(fjy2,ty);
1337             fjz2             = _mm256_add_ps(fjz2,tz);
1338
1339             }
1340
1341             fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
1342             fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
1343             fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
1344             fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
1345             fjptrE             = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
1346             fjptrF             = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
1347             fjptrG             = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
1348             fjptrH             = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
1349
1350             gmx_mm256_decrement_3rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,
1351                                                       fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
1352
1353             /* Inner loop uses 990 flops */
1354         }
1355
1356         /* End of innermost loop */
1357
1358         gmx_mm256_update_iforce_3atom_swizzle_ps(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,
1359                                                  f+i_coord_offset,fshift+i_shift_offset);
1360
1361         ggid                        = gid[iidx];
1362         /* Update potential energies */
1363         gmx_mm256_update_1pot_ps(velecsum,kernel_data->energygrp_elec+ggid);
1364
1365         /* Increment number of inner iterations */
1366         inneriter                  += j_index_end - j_index_start;
1367
1368         /* Outer loop uses 19 flops */
1369     }
1370
1371     /* Increment number of outer iterations */
1372     outeriter        += nri;
1373
1374     /* Update outer/inner flops */
1375
1376     inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_W3W3_VF,outeriter*19 + inneriter*990);
1377 }
1378 /*
1379  * Gromacs nonbonded kernel:   nb_kernel_ElecEwSh_VdwNone_GeomW3W3_F_avx_256_single
1380  * Electrostatics interaction: Ewald
1381  * VdW interaction:            None
1382  * Geometry:                   Water3-Water3
1383  * Calculate force/pot:        Force
1384  */
1385 void
1386 nb_kernel_ElecEwSh_VdwNone_GeomW3W3_F_avx_256_single
1387                     (t_nblist                    * gmx_restrict       nlist,
1388                      rvec                        * gmx_restrict          xx,
1389                      rvec                        * gmx_restrict          ff,
1390                      struct t_forcerec           * gmx_restrict          fr,
1391                      t_mdatoms                   * gmx_restrict     mdatoms,
1392                      nb_kernel_data_t gmx_unused * gmx_restrict kernel_data,
1393                      t_nrnb                      * gmx_restrict        nrnb)
1394 {
1395     /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
1396      * just 0 for non-waters.
1397      * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
1398      * jnr indices corresponding to data put in the four positions in the SIMD register.
1399      */
1400     int              i_shift_offset,i_coord_offset,outeriter,inneriter;
1401     int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
1402     int              jnrA,jnrB,jnrC,jnrD;
1403     int              jnrE,jnrF,jnrG,jnrH;
1404     int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
1405     int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
1406     int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
1407     int              j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
1408     int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
1409     real             rcutoff_scalar;
1410     real             *shiftvec,*fshift,*x,*f;
1411     real             *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
1412     real             scratch[4*DIM];
1413     __m256           tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
1414     real *           vdwioffsetptr0;
1415     __m256           ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
1416     real *           vdwioffsetptr1;
1417     __m256           ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
1418     real *           vdwioffsetptr2;
1419     __m256           ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
1420     int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
1421     __m256           jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
1422     int              vdwjidx1A,vdwjidx1B,vdwjidx1C,vdwjidx1D,vdwjidx1E,vdwjidx1F,vdwjidx1G,vdwjidx1H;
1423     __m256           jx1,jy1,jz1,fjx1,fjy1,fjz1,jq1,isaj1;
1424     int              vdwjidx2A,vdwjidx2B,vdwjidx2C,vdwjidx2D,vdwjidx2E,vdwjidx2F,vdwjidx2G,vdwjidx2H;
1425     __m256           jx2,jy2,jz2,fjx2,fjy2,fjz2,jq2,isaj2;
1426     __m256           dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
1427     __m256           dx01,dy01,dz01,rsq01,rinv01,rinvsq01,r01,qq01,c6_01,c12_01;
1428     __m256           dx02,dy02,dz02,rsq02,rinv02,rinvsq02,r02,qq02,c6_02,c12_02;
1429     __m256           dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
1430     __m256           dx11,dy11,dz11,rsq11,rinv11,rinvsq11,r11,qq11,c6_11,c12_11;
1431     __m256           dx12,dy12,dz12,rsq12,rinv12,rinvsq12,r12,qq12,c6_12,c12_12;
1432     __m256           dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
1433     __m256           dx21,dy21,dz21,rsq21,rinv21,rinvsq21,r21,qq21,c6_21,c12_21;
1434     __m256           dx22,dy22,dz22,rsq22,rinv22,rinvsq22,r22,qq22,c6_22,c12_22;
1435     __m256           velec,felec,velecsum,facel,crf,krf,krf2;
1436     real             *charge;
1437     __m256i          ewitab;
1438     __m128i          ewitab_lo,ewitab_hi;
1439     __m256           ewtabscale,eweps,sh_ewald,ewrt,ewtabhalfspace,ewtabF,ewtabFn,ewtabD,ewtabV;
1440     __m256           beta,beta2,beta3,zeta2,pmecorrF,pmecorrV,rinv3;
1441     real             *ewtab;
1442     __m256           dummy_mask,cutoff_mask;
1443     __m256           signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
1444     __m256           one     = _mm256_set1_ps(1.0);
1445     __m256           two     = _mm256_set1_ps(2.0);
1446     x                = xx[0];
1447     f                = ff[0];
1448
1449     nri              = nlist->nri;
1450     iinr             = nlist->iinr;
1451     jindex           = nlist->jindex;
1452     jjnr             = nlist->jjnr;
1453     shiftidx         = nlist->shift;
1454     gid              = nlist->gid;
1455     shiftvec         = fr->shift_vec[0];
1456     fshift           = fr->fshift[0];
1457     facel            = _mm256_set1_ps(fr->ic->epsfac);
1458     charge           = mdatoms->chargeA;
1459
1460     sh_ewald         = _mm256_set1_ps(fr->ic->sh_ewald);
1461     beta             = _mm256_set1_ps(fr->ic->ewaldcoeff_q);
1462     beta2            = _mm256_mul_ps(beta,beta);
1463     beta3            = _mm256_mul_ps(beta,beta2);
1464
1465     ewtab            = fr->ic->tabq_coul_F;
1466     ewtabscale       = _mm256_set1_ps(fr->ic->tabq_scale);
1467     ewtabhalfspace   = _mm256_set1_ps(0.5/fr->ic->tabq_scale);
1468
1469     /* Setup water-specific parameters */
1470     inr              = nlist->iinr[0];
1471     iq0              = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+0]));
1472     iq1              = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+1]));
1473     iq2              = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+2]));
1474
1475     jq0              = _mm256_set1_ps(charge[inr+0]);
1476     jq1              = _mm256_set1_ps(charge[inr+1]);
1477     jq2              = _mm256_set1_ps(charge[inr+2]);
1478     qq00             = _mm256_mul_ps(iq0,jq0);
1479     qq01             = _mm256_mul_ps(iq0,jq1);
1480     qq02             = _mm256_mul_ps(iq0,jq2);
1481     qq10             = _mm256_mul_ps(iq1,jq0);
1482     qq11             = _mm256_mul_ps(iq1,jq1);
1483     qq12             = _mm256_mul_ps(iq1,jq2);
1484     qq20             = _mm256_mul_ps(iq2,jq0);
1485     qq21             = _mm256_mul_ps(iq2,jq1);
1486     qq22             = _mm256_mul_ps(iq2,jq2);
1487
1488     /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
1489     rcutoff_scalar   = fr->ic->rcoulomb;
1490     rcutoff          = _mm256_set1_ps(rcutoff_scalar);
1491     rcutoff2         = _mm256_mul_ps(rcutoff,rcutoff);
1492
1493     /* Avoid stupid compiler warnings */
1494     jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
1495     j_coord_offsetA = 0;
1496     j_coord_offsetB = 0;
1497     j_coord_offsetC = 0;
1498     j_coord_offsetD = 0;
1499     j_coord_offsetE = 0;
1500     j_coord_offsetF = 0;
1501     j_coord_offsetG = 0;
1502     j_coord_offsetH = 0;
1503
1504     outeriter        = 0;
1505     inneriter        = 0;
1506
1507     for(iidx=0;iidx<4*DIM;iidx++)
1508     {
1509         scratch[iidx] = 0.0;
1510     }
1511
1512     /* Start outer loop over neighborlists */
1513     for(iidx=0; iidx<nri; iidx++)
1514     {
1515         /* Load shift vector for this list */
1516         i_shift_offset   = DIM*shiftidx[iidx];
1517
1518         /* Load limits for loop over neighbors */
1519         j_index_start    = jindex[iidx];
1520         j_index_end      = jindex[iidx+1];
1521
1522         /* Get outer coordinate index */
1523         inr              = iinr[iidx];
1524         i_coord_offset   = DIM*inr;
1525
1526         /* Load i particle coords and add shift vector */
1527         gmx_mm256_load_shift_and_3rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,
1528                                                     &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2);
1529
1530         fix0             = _mm256_setzero_ps();
1531         fiy0             = _mm256_setzero_ps();
1532         fiz0             = _mm256_setzero_ps();
1533         fix1             = _mm256_setzero_ps();
1534         fiy1             = _mm256_setzero_ps();
1535         fiz1             = _mm256_setzero_ps();
1536         fix2             = _mm256_setzero_ps();
1537         fiy2             = _mm256_setzero_ps();
1538         fiz2             = _mm256_setzero_ps();
1539
1540         /* Start inner kernel loop */
1541         for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
1542         {
1543
1544             /* Get j neighbor index, and coordinate index */
1545             jnrA             = jjnr[jidx];
1546             jnrB             = jjnr[jidx+1];
1547             jnrC             = jjnr[jidx+2];
1548             jnrD             = jjnr[jidx+3];
1549             jnrE             = jjnr[jidx+4];
1550             jnrF             = jjnr[jidx+5];
1551             jnrG             = jjnr[jidx+6];
1552             jnrH             = jjnr[jidx+7];
1553             j_coord_offsetA  = DIM*jnrA;
1554             j_coord_offsetB  = DIM*jnrB;
1555             j_coord_offsetC  = DIM*jnrC;
1556             j_coord_offsetD  = DIM*jnrD;
1557             j_coord_offsetE  = DIM*jnrE;
1558             j_coord_offsetF  = DIM*jnrF;
1559             j_coord_offsetG  = DIM*jnrG;
1560             j_coord_offsetH  = DIM*jnrH;
1561
1562             /* load j atom coordinates */
1563             gmx_mm256_load_3rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
1564                                                  x+j_coord_offsetC,x+j_coord_offsetD,
1565                                                  x+j_coord_offsetE,x+j_coord_offsetF,
1566                                                  x+j_coord_offsetG,x+j_coord_offsetH,
1567                                               &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
1568
1569             /* Calculate displacement vector */
1570             dx00             = _mm256_sub_ps(ix0,jx0);
1571             dy00             = _mm256_sub_ps(iy0,jy0);
1572             dz00             = _mm256_sub_ps(iz0,jz0);
1573             dx01             = _mm256_sub_ps(ix0,jx1);
1574             dy01             = _mm256_sub_ps(iy0,jy1);
1575             dz01             = _mm256_sub_ps(iz0,jz1);
1576             dx02             = _mm256_sub_ps(ix0,jx2);
1577             dy02             = _mm256_sub_ps(iy0,jy2);
1578             dz02             = _mm256_sub_ps(iz0,jz2);
1579             dx10             = _mm256_sub_ps(ix1,jx0);
1580             dy10             = _mm256_sub_ps(iy1,jy0);
1581             dz10             = _mm256_sub_ps(iz1,jz0);
1582             dx11             = _mm256_sub_ps(ix1,jx1);
1583             dy11             = _mm256_sub_ps(iy1,jy1);
1584             dz11             = _mm256_sub_ps(iz1,jz1);
1585             dx12             = _mm256_sub_ps(ix1,jx2);
1586             dy12             = _mm256_sub_ps(iy1,jy2);
1587             dz12             = _mm256_sub_ps(iz1,jz2);
1588             dx20             = _mm256_sub_ps(ix2,jx0);
1589             dy20             = _mm256_sub_ps(iy2,jy0);
1590             dz20             = _mm256_sub_ps(iz2,jz0);
1591             dx21             = _mm256_sub_ps(ix2,jx1);
1592             dy21             = _mm256_sub_ps(iy2,jy1);
1593             dz21             = _mm256_sub_ps(iz2,jz1);
1594             dx22             = _mm256_sub_ps(ix2,jx2);
1595             dy22             = _mm256_sub_ps(iy2,jy2);
1596             dz22             = _mm256_sub_ps(iz2,jz2);
1597
1598             /* Calculate squared distance and things based on it */
1599             rsq00            = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
1600             rsq01            = gmx_mm256_calc_rsq_ps(dx01,dy01,dz01);
1601             rsq02            = gmx_mm256_calc_rsq_ps(dx02,dy02,dz02);
1602             rsq10            = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
1603             rsq11            = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
1604             rsq12            = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
1605             rsq20            = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
1606             rsq21            = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
1607             rsq22            = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
1608
1609             rinv00           = avx256_invsqrt_f(rsq00);
1610             rinv01           = avx256_invsqrt_f(rsq01);
1611             rinv02           = avx256_invsqrt_f(rsq02);
1612             rinv10           = avx256_invsqrt_f(rsq10);
1613             rinv11           = avx256_invsqrt_f(rsq11);
1614             rinv12           = avx256_invsqrt_f(rsq12);
1615             rinv20           = avx256_invsqrt_f(rsq20);
1616             rinv21           = avx256_invsqrt_f(rsq21);
1617             rinv22           = avx256_invsqrt_f(rsq22);
1618
1619             rinvsq00         = _mm256_mul_ps(rinv00,rinv00);
1620             rinvsq01         = _mm256_mul_ps(rinv01,rinv01);
1621             rinvsq02         = _mm256_mul_ps(rinv02,rinv02);
1622             rinvsq10         = _mm256_mul_ps(rinv10,rinv10);
1623             rinvsq11         = _mm256_mul_ps(rinv11,rinv11);
1624             rinvsq12         = _mm256_mul_ps(rinv12,rinv12);
1625             rinvsq20         = _mm256_mul_ps(rinv20,rinv20);
1626             rinvsq21         = _mm256_mul_ps(rinv21,rinv21);
1627             rinvsq22         = _mm256_mul_ps(rinv22,rinv22);
1628
1629             fjx0             = _mm256_setzero_ps();
1630             fjy0             = _mm256_setzero_ps();
1631             fjz0             = _mm256_setzero_ps();
1632             fjx1             = _mm256_setzero_ps();
1633             fjy1             = _mm256_setzero_ps();
1634             fjz1             = _mm256_setzero_ps();
1635             fjx2             = _mm256_setzero_ps();
1636             fjy2             = _mm256_setzero_ps();
1637             fjz2             = _mm256_setzero_ps();
1638
1639             /**************************
1640              * CALCULATE INTERACTIONS *
1641              **************************/
1642
1643             if (gmx_mm256_any_lt(rsq00,rcutoff2))
1644             {
1645
1646             r00              = _mm256_mul_ps(rsq00,rinv00);
1647
1648             /* EWALD ELECTROSTATICS */
1649             
1650             /* Analytical PME correction */
1651             zeta2            = _mm256_mul_ps(beta2,rsq00);
1652             rinv3            = _mm256_mul_ps(rinvsq00,rinv00);
1653             pmecorrF         = avx256_pmecorrF_f(zeta2);
1654             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1655             felec            = _mm256_mul_ps(qq00,felec);
1656             
1657             cutoff_mask      = _mm256_cmp_ps(rsq00,rcutoff2,_CMP_LT_OQ);
1658
1659             fscal            = felec;
1660
1661             fscal            = _mm256_and_ps(fscal,cutoff_mask);
1662
1663             /* Calculate temporary vectorial force */
1664             tx               = _mm256_mul_ps(fscal,dx00);
1665             ty               = _mm256_mul_ps(fscal,dy00);
1666             tz               = _mm256_mul_ps(fscal,dz00);
1667
1668             /* Update vectorial force */
1669             fix0             = _mm256_add_ps(fix0,tx);
1670             fiy0             = _mm256_add_ps(fiy0,ty);
1671             fiz0             = _mm256_add_ps(fiz0,tz);
1672
1673             fjx0             = _mm256_add_ps(fjx0,tx);
1674             fjy0             = _mm256_add_ps(fjy0,ty);
1675             fjz0             = _mm256_add_ps(fjz0,tz);
1676
1677             }
1678
1679             /**************************
1680              * CALCULATE INTERACTIONS *
1681              **************************/
1682
1683             if (gmx_mm256_any_lt(rsq01,rcutoff2))
1684             {
1685
1686             r01              = _mm256_mul_ps(rsq01,rinv01);
1687
1688             /* EWALD ELECTROSTATICS */
1689             
1690             /* Analytical PME correction */
1691             zeta2            = _mm256_mul_ps(beta2,rsq01);
1692             rinv3            = _mm256_mul_ps(rinvsq01,rinv01);
1693             pmecorrF         = avx256_pmecorrF_f(zeta2);
1694             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1695             felec            = _mm256_mul_ps(qq01,felec);
1696             
1697             cutoff_mask      = _mm256_cmp_ps(rsq01,rcutoff2,_CMP_LT_OQ);
1698
1699             fscal            = felec;
1700
1701             fscal            = _mm256_and_ps(fscal,cutoff_mask);
1702
1703             /* Calculate temporary vectorial force */
1704             tx               = _mm256_mul_ps(fscal,dx01);
1705             ty               = _mm256_mul_ps(fscal,dy01);
1706             tz               = _mm256_mul_ps(fscal,dz01);
1707
1708             /* Update vectorial force */
1709             fix0             = _mm256_add_ps(fix0,tx);
1710             fiy0             = _mm256_add_ps(fiy0,ty);
1711             fiz0             = _mm256_add_ps(fiz0,tz);
1712
1713             fjx1             = _mm256_add_ps(fjx1,tx);
1714             fjy1             = _mm256_add_ps(fjy1,ty);
1715             fjz1             = _mm256_add_ps(fjz1,tz);
1716
1717             }
1718
1719             /**************************
1720              * CALCULATE INTERACTIONS *
1721              **************************/
1722
1723             if (gmx_mm256_any_lt(rsq02,rcutoff2))
1724             {
1725
1726             r02              = _mm256_mul_ps(rsq02,rinv02);
1727
1728             /* EWALD ELECTROSTATICS */
1729             
1730             /* Analytical PME correction */
1731             zeta2            = _mm256_mul_ps(beta2,rsq02);
1732             rinv3            = _mm256_mul_ps(rinvsq02,rinv02);
1733             pmecorrF         = avx256_pmecorrF_f(zeta2);
1734             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1735             felec            = _mm256_mul_ps(qq02,felec);
1736             
1737             cutoff_mask      = _mm256_cmp_ps(rsq02,rcutoff2,_CMP_LT_OQ);
1738
1739             fscal            = felec;
1740
1741             fscal            = _mm256_and_ps(fscal,cutoff_mask);
1742
1743             /* Calculate temporary vectorial force */
1744             tx               = _mm256_mul_ps(fscal,dx02);
1745             ty               = _mm256_mul_ps(fscal,dy02);
1746             tz               = _mm256_mul_ps(fscal,dz02);
1747
1748             /* Update vectorial force */
1749             fix0             = _mm256_add_ps(fix0,tx);
1750             fiy0             = _mm256_add_ps(fiy0,ty);
1751             fiz0             = _mm256_add_ps(fiz0,tz);
1752
1753             fjx2             = _mm256_add_ps(fjx2,tx);
1754             fjy2             = _mm256_add_ps(fjy2,ty);
1755             fjz2             = _mm256_add_ps(fjz2,tz);
1756
1757             }
1758
1759             /**************************
1760              * CALCULATE INTERACTIONS *
1761              **************************/
1762
1763             if (gmx_mm256_any_lt(rsq10,rcutoff2))
1764             {
1765
1766             r10              = _mm256_mul_ps(rsq10,rinv10);
1767
1768             /* EWALD ELECTROSTATICS */
1769             
1770             /* Analytical PME correction */
1771             zeta2            = _mm256_mul_ps(beta2,rsq10);
1772             rinv3            = _mm256_mul_ps(rinvsq10,rinv10);
1773             pmecorrF         = avx256_pmecorrF_f(zeta2);
1774             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1775             felec            = _mm256_mul_ps(qq10,felec);
1776             
1777             cutoff_mask      = _mm256_cmp_ps(rsq10,rcutoff2,_CMP_LT_OQ);
1778
1779             fscal            = felec;
1780
1781             fscal            = _mm256_and_ps(fscal,cutoff_mask);
1782
1783             /* Calculate temporary vectorial force */
1784             tx               = _mm256_mul_ps(fscal,dx10);
1785             ty               = _mm256_mul_ps(fscal,dy10);
1786             tz               = _mm256_mul_ps(fscal,dz10);
1787
1788             /* Update vectorial force */
1789             fix1             = _mm256_add_ps(fix1,tx);
1790             fiy1             = _mm256_add_ps(fiy1,ty);
1791             fiz1             = _mm256_add_ps(fiz1,tz);
1792
1793             fjx0             = _mm256_add_ps(fjx0,tx);
1794             fjy0             = _mm256_add_ps(fjy0,ty);
1795             fjz0             = _mm256_add_ps(fjz0,tz);
1796
1797             }
1798
1799             /**************************
1800              * CALCULATE INTERACTIONS *
1801              **************************/
1802
1803             if (gmx_mm256_any_lt(rsq11,rcutoff2))
1804             {
1805
1806             r11              = _mm256_mul_ps(rsq11,rinv11);
1807
1808             /* EWALD ELECTROSTATICS */
1809             
1810             /* Analytical PME correction */
1811             zeta2            = _mm256_mul_ps(beta2,rsq11);
1812             rinv3            = _mm256_mul_ps(rinvsq11,rinv11);
1813             pmecorrF         = avx256_pmecorrF_f(zeta2);
1814             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1815             felec            = _mm256_mul_ps(qq11,felec);
1816             
1817             cutoff_mask      = _mm256_cmp_ps(rsq11,rcutoff2,_CMP_LT_OQ);
1818
1819             fscal            = felec;
1820
1821             fscal            = _mm256_and_ps(fscal,cutoff_mask);
1822
1823             /* Calculate temporary vectorial force */
1824             tx               = _mm256_mul_ps(fscal,dx11);
1825             ty               = _mm256_mul_ps(fscal,dy11);
1826             tz               = _mm256_mul_ps(fscal,dz11);
1827
1828             /* Update vectorial force */
1829             fix1             = _mm256_add_ps(fix1,tx);
1830             fiy1             = _mm256_add_ps(fiy1,ty);
1831             fiz1             = _mm256_add_ps(fiz1,tz);
1832
1833             fjx1             = _mm256_add_ps(fjx1,tx);
1834             fjy1             = _mm256_add_ps(fjy1,ty);
1835             fjz1             = _mm256_add_ps(fjz1,tz);
1836
1837             }
1838
1839             /**************************
1840              * CALCULATE INTERACTIONS *
1841              **************************/
1842
1843             if (gmx_mm256_any_lt(rsq12,rcutoff2))
1844             {
1845
1846             r12              = _mm256_mul_ps(rsq12,rinv12);
1847
1848             /* EWALD ELECTROSTATICS */
1849             
1850             /* Analytical PME correction */
1851             zeta2            = _mm256_mul_ps(beta2,rsq12);
1852             rinv3            = _mm256_mul_ps(rinvsq12,rinv12);
1853             pmecorrF         = avx256_pmecorrF_f(zeta2);
1854             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1855             felec            = _mm256_mul_ps(qq12,felec);
1856             
1857             cutoff_mask      = _mm256_cmp_ps(rsq12,rcutoff2,_CMP_LT_OQ);
1858
1859             fscal            = felec;
1860
1861             fscal            = _mm256_and_ps(fscal,cutoff_mask);
1862
1863             /* Calculate temporary vectorial force */
1864             tx               = _mm256_mul_ps(fscal,dx12);
1865             ty               = _mm256_mul_ps(fscal,dy12);
1866             tz               = _mm256_mul_ps(fscal,dz12);
1867
1868             /* Update vectorial force */
1869             fix1             = _mm256_add_ps(fix1,tx);
1870             fiy1             = _mm256_add_ps(fiy1,ty);
1871             fiz1             = _mm256_add_ps(fiz1,tz);
1872
1873             fjx2             = _mm256_add_ps(fjx2,tx);
1874             fjy2             = _mm256_add_ps(fjy2,ty);
1875             fjz2             = _mm256_add_ps(fjz2,tz);
1876
1877             }
1878
1879             /**************************
1880              * CALCULATE INTERACTIONS *
1881              **************************/
1882
1883             if (gmx_mm256_any_lt(rsq20,rcutoff2))
1884             {
1885
1886             r20              = _mm256_mul_ps(rsq20,rinv20);
1887
1888             /* EWALD ELECTROSTATICS */
1889             
1890             /* Analytical PME correction */
1891             zeta2            = _mm256_mul_ps(beta2,rsq20);
1892             rinv3            = _mm256_mul_ps(rinvsq20,rinv20);
1893             pmecorrF         = avx256_pmecorrF_f(zeta2);
1894             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1895             felec            = _mm256_mul_ps(qq20,felec);
1896             
1897             cutoff_mask      = _mm256_cmp_ps(rsq20,rcutoff2,_CMP_LT_OQ);
1898
1899             fscal            = felec;
1900
1901             fscal            = _mm256_and_ps(fscal,cutoff_mask);
1902
1903             /* Calculate temporary vectorial force */
1904             tx               = _mm256_mul_ps(fscal,dx20);
1905             ty               = _mm256_mul_ps(fscal,dy20);
1906             tz               = _mm256_mul_ps(fscal,dz20);
1907
1908             /* Update vectorial force */
1909             fix2             = _mm256_add_ps(fix2,tx);
1910             fiy2             = _mm256_add_ps(fiy2,ty);
1911             fiz2             = _mm256_add_ps(fiz2,tz);
1912
1913             fjx0             = _mm256_add_ps(fjx0,tx);
1914             fjy0             = _mm256_add_ps(fjy0,ty);
1915             fjz0             = _mm256_add_ps(fjz0,tz);
1916
1917             }
1918
1919             /**************************
1920              * CALCULATE INTERACTIONS *
1921              **************************/
1922
1923             if (gmx_mm256_any_lt(rsq21,rcutoff2))
1924             {
1925
1926             r21              = _mm256_mul_ps(rsq21,rinv21);
1927
1928             /* EWALD ELECTROSTATICS */
1929             
1930             /* Analytical PME correction */
1931             zeta2            = _mm256_mul_ps(beta2,rsq21);
1932             rinv3            = _mm256_mul_ps(rinvsq21,rinv21);
1933             pmecorrF         = avx256_pmecorrF_f(zeta2);
1934             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1935             felec            = _mm256_mul_ps(qq21,felec);
1936             
1937             cutoff_mask      = _mm256_cmp_ps(rsq21,rcutoff2,_CMP_LT_OQ);
1938
1939             fscal            = felec;
1940
1941             fscal            = _mm256_and_ps(fscal,cutoff_mask);
1942
1943             /* Calculate temporary vectorial force */
1944             tx               = _mm256_mul_ps(fscal,dx21);
1945             ty               = _mm256_mul_ps(fscal,dy21);
1946             tz               = _mm256_mul_ps(fscal,dz21);
1947
1948             /* Update vectorial force */
1949             fix2             = _mm256_add_ps(fix2,tx);
1950             fiy2             = _mm256_add_ps(fiy2,ty);
1951             fiz2             = _mm256_add_ps(fiz2,tz);
1952
1953             fjx1             = _mm256_add_ps(fjx1,tx);
1954             fjy1             = _mm256_add_ps(fjy1,ty);
1955             fjz1             = _mm256_add_ps(fjz1,tz);
1956
1957             }
1958
1959             /**************************
1960              * CALCULATE INTERACTIONS *
1961              **************************/
1962
1963             if (gmx_mm256_any_lt(rsq22,rcutoff2))
1964             {
1965
1966             r22              = _mm256_mul_ps(rsq22,rinv22);
1967
1968             /* EWALD ELECTROSTATICS */
1969             
1970             /* Analytical PME correction */
1971             zeta2            = _mm256_mul_ps(beta2,rsq22);
1972             rinv3            = _mm256_mul_ps(rinvsq22,rinv22);
1973             pmecorrF         = avx256_pmecorrF_f(zeta2);
1974             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1975             felec            = _mm256_mul_ps(qq22,felec);
1976             
1977             cutoff_mask      = _mm256_cmp_ps(rsq22,rcutoff2,_CMP_LT_OQ);
1978
1979             fscal            = felec;
1980
1981             fscal            = _mm256_and_ps(fscal,cutoff_mask);
1982
1983             /* Calculate temporary vectorial force */
1984             tx               = _mm256_mul_ps(fscal,dx22);
1985             ty               = _mm256_mul_ps(fscal,dy22);
1986             tz               = _mm256_mul_ps(fscal,dz22);
1987
1988             /* Update vectorial force */
1989             fix2             = _mm256_add_ps(fix2,tx);
1990             fiy2             = _mm256_add_ps(fiy2,ty);
1991             fiz2             = _mm256_add_ps(fiz2,tz);
1992
1993             fjx2             = _mm256_add_ps(fjx2,tx);
1994             fjy2             = _mm256_add_ps(fjy2,ty);
1995             fjz2             = _mm256_add_ps(fjz2,tz);
1996
1997             }
1998
1999             fjptrA             = f+j_coord_offsetA;
2000             fjptrB             = f+j_coord_offsetB;
2001             fjptrC             = f+j_coord_offsetC;
2002             fjptrD             = f+j_coord_offsetD;
2003             fjptrE             = f+j_coord_offsetE;
2004             fjptrF             = f+j_coord_offsetF;
2005             fjptrG             = f+j_coord_offsetG;
2006             fjptrH             = f+j_coord_offsetH;
2007
2008             gmx_mm256_decrement_3rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,
2009                                                       fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
2010
2011             /* Inner loop uses 531 flops */
2012         }
2013
2014         if(jidx<j_index_end)
2015         {
2016
2017             /* Get j neighbor index, and coordinate index */
2018             jnrlistA         = jjnr[jidx];
2019             jnrlistB         = jjnr[jidx+1];
2020             jnrlistC         = jjnr[jidx+2];
2021             jnrlistD         = jjnr[jidx+3];
2022             jnrlistE         = jjnr[jidx+4];
2023             jnrlistF         = jjnr[jidx+5];
2024             jnrlistG         = jjnr[jidx+6];
2025             jnrlistH         = jjnr[jidx+7];
2026             /* Sign of each element will be negative for non-real atoms.
2027              * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
2028              * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
2029              */
2030             dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
2031                                             gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
2032                                             
2033             jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
2034             jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
2035             jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
2036             jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
2037             jnrE       = (jnrlistE>=0) ? jnrlistE : 0;
2038             jnrF       = (jnrlistF>=0) ? jnrlistF : 0;
2039             jnrG       = (jnrlistG>=0) ? jnrlistG : 0;
2040             jnrH       = (jnrlistH>=0) ? jnrlistH : 0;
2041             j_coord_offsetA  = DIM*jnrA;
2042             j_coord_offsetB  = DIM*jnrB;
2043             j_coord_offsetC  = DIM*jnrC;
2044             j_coord_offsetD  = DIM*jnrD;
2045             j_coord_offsetE  = DIM*jnrE;
2046             j_coord_offsetF  = DIM*jnrF;
2047             j_coord_offsetG  = DIM*jnrG;
2048             j_coord_offsetH  = DIM*jnrH;
2049
2050             /* load j atom coordinates */
2051             gmx_mm256_load_3rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
2052                                                  x+j_coord_offsetC,x+j_coord_offsetD,
2053                                                  x+j_coord_offsetE,x+j_coord_offsetF,
2054                                                  x+j_coord_offsetG,x+j_coord_offsetH,
2055                                               &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
2056
2057             /* Calculate displacement vector */
2058             dx00             = _mm256_sub_ps(ix0,jx0);
2059             dy00             = _mm256_sub_ps(iy0,jy0);
2060             dz00             = _mm256_sub_ps(iz0,jz0);
2061             dx01             = _mm256_sub_ps(ix0,jx1);
2062             dy01             = _mm256_sub_ps(iy0,jy1);
2063             dz01             = _mm256_sub_ps(iz0,jz1);
2064             dx02             = _mm256_sub_ps(ix0,jx2);
2065             dy02             = _mm256_sub_ps(iy0,jy2);
2066             dz02             = _mm256_sub_ps(iz0,jz2);
2067             dx10             = _mm256_sub_ps(ix1,jx0);
2068             dy10             = _mm256_sub_ps(iy1,jy0);
2069             dz10             = _mm256_sub_ps(iz1,jz0);
2070             dx11             = _mm256_sub_ps(ix1,jx1);
2071             dy11             = _mm256_sub_ps(iy1,jy1);
2072             dz11             = _mm256_sub_ps(iz1,jz1);
2073             dx12             = _mm256_sub_ps(ix1,jx2);
2074             dy12             = _mm256_sub_ps(iy1,jy2);
2075             dz12             = _mm256_sub_ps(iz1,jz2);
2076             dx20             = _mm256_sub_ps(ix2,jx0);
2077             dy20             = _mm256_sub_ps(iy2,jy0);
2078             dz20             = _mm256_sub_ps(iz2,jz0);
2079             dx21             = _mm256_sub_ps(ix2,jx1);
2080             dy21             = _mm256_sub_ps(iy2,jy1);
2081             dz21             = _mm256_sub_ps(iz2,jz1);
2082             dx22             = _mm256_sub_ps(ix2,jx2);
2083             dy22             = _mm256_sub_ps(iy2,jy2);
2084             dz22             = _mm256_sub_ps(iz2,jz2);
2085
2086             /* Calculate squared distance and things based on it */
2087             rsq00            = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
2088             rsq01            = gmx_mm256_calc_rsq_ps(dx01,dy01,dz01);
2089             rsq02            = gmx_mm256_calc_rsq_ps(dx02,dy02,dz02);
2090             rsq10            = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
2091             rsq11            = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
2092             rsq12            = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
2093             rsq20            = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
2094             rsq21            = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
2095             rsq22            = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
2096
2097             rinv00           = avx256_invsqrt_f(rsq00);
2098             rinv01           = avx256_invsqrt_f(rsq01);
2099             rinv02           = avx256_invsqrt_f(rsq02);
2100             rinv10           = avx256_invsqrt_f(rsq10);
2101             rinv11           = avx256_invsqrt_f(rsq11);
2102             rinv12           = avx256_invsqrt_f(rsq12);
2103             rinv20           = avx256_invsqrt_f(rsq20);
2104             rinv21           = avx256_invsqrt_f(rsq21);
2105             rinv22           = avx256_invsqrt_f(rsq22);
2106
2107             rinvsq00         = _mm256_mul_ps(rinv00,rinv00);
2108             rinvsq01         = _mm256_mul_ps(rinv01,rinv01);
2109             rinvsq02         = _mm256_mul_ps(rinv02,rinv02);
2110             rinvsq10         = _mm256_mul_ps(rinv10,rinv10);
2111             rinvsq11         = _mm256_mul_ps(rinv11,rinv11);
2112             rinvsq12         = _mm256_mul_ps(rinv12,rinv12);
2113             rinvsq20         = _mm256_mul_ps(rinv20,rinv20);
2114             rinvsq21         = _mm256_mul_ps(rinv21,rinv21);
2115             rinvsq22         = _mm256_mul_ps(rinv22,rinv22);
2116
2117             fjx0             = _mm256_setzero_ps();
2118             fjy0             = _mm256_setzero_ps();
2119             fjz0             = _mm256_setzero_ps();
2120             fjx1             = _mm256_setzero_ps();
2121             fjy1             = _mm256_setzero_ps();
2122             fjz1             = _mm256_setzero_ps();
2123             fjx2             = _mm256_setzero_ps();
2124             fjy2             = _mm256_setzero_ps();
2125             fjz2             = _mm256_setzero_ps();
2126
2127             /**************************
2128              * CALCULATE INTERACTIONS *
2129              **************************/
2130
2131             if (gmx_mm256_any_lt(rsq00,rcutoff2))
2132             {
2133
2134             r00              = _mm256_mul_ps(rsq00,rinv00);
2135             r00              = _mm256_andnot_ps(dummy_mask,r00);
2136
2137             /* EWALD ELECTROSTATICS */
2138             
2139             /* Analytical PME correction */
2140             zeta2            = _mm256_mul_ps(beta2,rsq00);
2141             rinv3            = _mm256_mul_ps(rinvsq00,rinv00);
2142             pmecorrF         = avx256_pmecorrF_f(zeta2);
2143             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
2144             felec            = _mm256_mul_ps(qq00,felec);
2145             
2146             cutoff_mask      = _mm256_cmp_ps(rsq00,rcutoff2,_CMP_LT_OQ);
2147
2148             fscal            = felec;
2149
2150             fscal            = _mm256_and_ps(fscal,cutoff_mask);
2151
2152             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
2153
2154             /* Calculate temporary vectorial force */
2155             tx               = _mm256_mul_ps(fscal,dx00);
2156             ty               = _mm256_mul_ps(fscal,dy00);
2157             tz               = _mm256_mul_ps(fscal,dz00);
2158
2159             /* Update vectorial force */
2160             fix0             = _mm256_add_ps(fix0,tx);
2161             fiy0             = _mm256_add_ps(fiy0,ty);
2162             fiz0             = _mm256_add_ps(fiz0,tz);
2163
2164             fjx0             = _mm256_add_ps(fjx0,tx);
2165             fjy0             = _mm256_add_ps(fjy0,ty);
2166             fjz0             = _mm256_add_ps(fjz0,tz);
2167
2168             }
2169
2170             /**************************
2171              * CALCULATE INTERACTIONS *
2172              **************************/
2173
2174             if (gmx_mm256_any_lt(rsq01,rcutoff2))
2175             {
2176
2177             r01              = _mm256_mul_ps(rsq01,rinv01);
2178             r01              = _mm256_andnot_ps(dummy_mask,r01);
2179
2180             /* EWALD ELECTROSTATICS */
2181             
2182             /* Analytical PME correction */
2183             zeta2            = _mm256_mul_ps(beta2,rsq01);
2184             rinv3            = _mm256_mul_ps(rinvsq01,rinv01);
2185             pmecorrF         = avx256_pmecorrF_f(zeta2);
2186             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
2187             felec            = _mm256_mul_ps(qq01,felec);
2188             
2189             cutoff_mask      = _mm256_cmp_ps(rsq01,rcutoff2,_CMP_LT_OQ);
2190
2191             fscal            = felec;
2192
2193             fscal            = _mm256_and_ps(fscal,cutoff_mask);
2194
2195             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
2196
2197             /* Calculate temporary vectorial force */
2198             tx               = _mm256_mul_ps(fscal,dx01);
2199             ty               = _mm256_mul_ps(fscal,dy01);
2200             tz               = _mm256_mul_ps(fscal,dz01);
2201
2202             /* Update vectorial force */
2203             fix0             = _mm256_add_ps(fix0,tx);
2204             fiy0             = _mm256_add_ps(fiy0,ty);
2205             fiz0             = _mm256_add_ps(fiz0,tz);
2206
2207             fjx1             = _mm256_add_ps(fjx1,tx);
2208             fjy1             = _mm256_add_ps(fjy1,ty);
2209             fjz1             = _mm256_add_ps(fjz1,tz);
2210
2211             }
2212
2213             /**************************
2214              * CALCULATE INTERACTIONS *
2215              **************************/
2216
2217             if (gmx_mm256_any_lt(rsq02,rcutoff2))
2218             {
2219
2220             r02              = _mm256_mul_ps(rsq02,rinv02);
2221             r02              = _mm256_andnot_ps(dummy_mask,r02);
2222
2223             /* EWALD ELECTROSTATICS */
2224             
2225             /* Analytical PME correction */
2226             zeta2            = _mm256_mul_ps(beta2,rsq02);
2227             rinv3            = _mm256_mul_ps(rinvsq02,rinv02);
2228             pmecorrF         = avx256_pmecorrF_f(zeta2);
2229             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
2230             felec            = _mm256_mul_ps(qq02,felec);
2231             
2232             cutoff_mask      = _mm256_cmp_ps(rsq02,rcutoff2,_CMP_LT_OQ);
2233
2234             fscal            = felec;
2235
2236             fscal            = _mm256_and_ps(fscal,cutoff_mask);
2237
2238             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
2239
2240             /* Calculate temporary vectorial force */
2241             tx               = _mm256_mul_ps(fscal,dx02);
2242             ty               = _mm256_mul_ps(fscal,dy02);
2243             tz               = _mm256_mul_ps(fscal,dz02);
2244
2245             /* Update vectorial force */
2246             fix0             = _mm256_add_ps(fix0,tx);
2247             fiy0             = _mm256_add_ps(fiy0,ty);
2248             fiz0             = _mm256_add_ps(fiz0,tz);
2249
2250             fjx2             = _mm256_add_ps(fjx2,tx);
2251             fjy2             = _mm256_add_ps(fjy2,ty);
2252             fjz2             = _mm256_add_ps(fjz2,tz);
2253
2254             }
2255
2256             /**************************
2257              * CALCULATE INTERACTIONS *
2258              **************************/
2259
2260             if (gmx_mm256_any_lt(rsq10,rcutoff2))
2261             {
2262
2263             r10              = _mm256_mul_ps(rsq10,rinv10);
2264             r10              = _mm256_andnot_ps(dummy_mask,r10);
2265
2266             /* EWALD ELECTROSTATICS */
2267             
2268             /* Analytical PME correction */
2269             zeta2            = _mm256_mul_ps(beta2,rsq10);
2270             rinv3            = _mm256_mul_ps(rinvsq10,rinv10);
2271             pmecorrF         = avx256_pmecorrF_f(zeta2);
2272             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
2273             felec            = _mm256_mul_ps(qq10,felec);
2274             
2275             cutoff_mask      = _mm256_cmp_ps(rsq10,rcutoff2,_CMP_LT_OQ);
2276
2277             fscal            = felec;
2278
2279             fscal            = _mm256_and_ps(fscal,cutoff_mask);
2280
2281             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
2282
2283             /* Calculate temporary vectorial force */
2284             tx               = _mm256_mul_ps(fscal,dx10);
2285             ty               = _mm256_mul_ps(fscal,dy10);
2286             tz               = _mm256_mul_ps(fscal,dz10);
2287
2288             /* Update vectorial force */
2289             fix1             = _mm256_add_ps(fix1,tx);
2290             fiy1             = _mm256_add_ps(fiy1,ty);
2291             fiz1             = _mm256_add_ps(fiz1,tz);
2292
2293             fjx0             = _mm256_add_ps(fjx0,tx);
2294             fjy0             = _mm256_add_ps(fjy0,ty);
2295             fjz0             = _mm256_add_ps(fjz0,tz);
2296
2297             }
2298
2299             /**************************
2300              * CALCULATE INTERACTIONS *
2301              **************************/
2302
2303             if (gmx_mm256_any_lt(rsq11,rcutoff2))
2304             {
2305
2306             r11              = _mm256_mul_ps(rsq11,rinv11);
2307             r11              = _mm256_andnot_ps(dummy_mask,r11);
2308
2309             /* EWALD ELECTROSTATICS */
2310             
2311             /* Analytical PME correction */
2312             zeta2            = _mm256_mul_ps(beta2,rsq11);
2313             rinv3            = _mm256_mul_ps(rinvsq11,rinv11);
2314             pmecorrF         = avx256_pmecorrF_f(zeta2);
2315             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
2316             felec            = _mm256_mul_ps(qq11,felec);
2317             
2318             cutoff_mask      = _mm256_cmp_ps(rsq11,rcutoff2,_CMP_LT_OQ);
2319
2320             fscal            = felec;
2321
2322             fscal            = _mm256_and_ps(fscal,cutoff_mask);
2323
2324             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
2325
2326             /* Calculate temporary vectorial force */
2327             tx               = _mm256_mul_ps(fscal,dx11);
2328             ty               = _mm256_mul_ps(fscal,dy11);
2329             tz               = _mm256_mul_ps(fscal,dz11);
2330
2331             /* Update vectorial force */
2332             fix1             = _mm256_add_ps(fix1,tx);
2333             fiy1             = _mm256_add_ps(fiy1,ty);
2334             fiz1             = _mm256_add_ps(fiz1,tz);
2335
2336             fjx1             = _mm256_add_ps(fjx1,tx);
2337             fjy1             = _mm256_add_ps(fjy1,ty);
2338             fjz1             = _mm256_add_ps(fjz1,tz);
2339
2340             }
2341
2342             /**************************
2343              * CALCULATE INTERACTIONS *
2344              **************************/
2345
2346             if (gmx_mm256_any_lt(rsq12,rcutoff2))
2347             {
2348
2349             r12              = _mm256_mul_ps(rsq12,rinv12);
2350             r12              = _mm256_andnot_ps(dummy_mask,r12);
2351
2352             /* EWALD ELECTROSTATICS */
2353             
2354             /* Analytical PME correction */
2355             zeta2            = _mm256_mul_ps(beta2,rsq12);
2356             rinv3            = _mm256_mul_ps(rinvsq12,rinv12);
2357             pmecorrF         = avx256_pmecorrF_f(zeta2);
2358             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
2359             felec            = _mm256_mul_ps(qq12,felec);
2360             
2361             cutoff_mask      = _mm256_cmp_ps(rsq12,rcutoff2,_CMP_LT_OQ);
2362
2363             fscal            = felec;
2364
2365             fscal            = _mm256_and_ps(fscal,cutoff_mask);
2366
2367             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
2368
2369             /* Calculate temporary vectorial force */
2370             tx               = _mm256_mul_ps(fscal,dx12);
2371             ty               = _mm256_mul_ps(fscal,dy12);
2372             tz               = _mm256_mul_ps(fscal,dz12);
2373
2374             /* Update vectorial force */
2375             fix1             = _mm256_add_ps(fix1,tx);
2376             fiy1             = _mm256_add_ps(fiy1,ty);
2377             fiz1             = _mm256_add_ps(fiz1,tz);
2378
2379             fjx2             = _mm256_add_ps(fjx2,tx);
2380             fjy2             = _mm256_add_ps(fjy2,ty);
2381             fjz2             = _mm256_add_ps(fjz2,tz);
2382
2383             }
2384
2385             /**************************
2386              * CALCULATE INTERACTIONS *
2387              **************************/
2388
2389             if (gmx_mm256_any_lt(rsq20,rcutoff2))
2390             {
2391
2392             r20              = _mm256_mul_ps(rsq20,rinv20);
2393             r20              = _mm256_andnot_ps(dummy_mask,r20);
2394
2395             /* EWALD ELECTROSTATICS */
2396             
2397             /* Analytical PME correction */
2398             zeta2            = _mm256_mul_ps(beta2,rsq20);
2399             rinv3            = _mm256_mul_ps(rinvsq20,rinv20);
2400             pmecorrF         = avx256_pmecorrF_f(zeta2);
2401             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
2402             felec            = _mm256_mul_ps(qq20,felec);
2403             
2404             cutoff_mask      = _mm256_cmp_ps(rsq20,rcutoff2,_CMP_LT_OQ);
2405
2406             fscal            = felec;
2407
2408             fscal            = _mm256_and_ps(fscal,cutoff_mask);
2409
2410             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
2411
2412             /* Calculate temporary vectorial force */
2413             tx               = _mm256_mul_ps(fscal,dx20);
2414             ty               = _mm256_mul_ps(fscal,dy20);
2415             tz               = _mm256_mul_ps(fscal,dz20);
2416
2417             /* Update vectorial force */
2418             fix2             = _mm256_add_ps(fix2,tx);
2419             fiy2             = _mm256_add_ps(fiy2,ty);
2420             fiz2             = _mm256_add_ps(fiz2,tz);
2421
2422             fjx0             = _mm256_add_ps(fjx0,tx);
2423             fjy0             = _mm256_add_ps(fjy0,ty);
2424             fjz0             = _mm256_add_ps(fjz0,tz);
2425
2426             }
2427
2428             /**************************
2429              * CALCULATE INTERACTIONS *
2430              **************************/
2431
2432             if (gmx_mm256_any_lt(rsq21,rcutoff2))
2433             {
2434
2435             r21              = _mm256_mul_ps(rsq21,rinv21);
2436             r21              = _mm256_andnot_ps(dummy_mask,r21);
2437
2438             /* EWALD ELECTROSTATICS */
2439             
2440             /* Analytical PME correction */
2441             zeta2            = _mm256_mul_ps(beta2,rsq21);
2442             rinv3            = _mm256_mul_ps(rinvsq21,rinv21);
2443             pmecorrF         = avx256_pmecorrF_f(zeta2);
2444             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
2445             felec            = _mm256_mul_ps(qq21,felec);
2446             
2447             cutoff_mask      = _mm256_cmp_ps(rsq21,rcutoff2,_CMP_LT_OQ);
2448
2449             fscal            = felec;
2450
2451             fscal            = _mm256_and_ps(fscal,cutoff_mask);
2452
2453             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
2454
2455             /* Calculate temporary vectorial force */
2456             tx               = _mm256_mul_ps(fscal,dx21);
2457             ty               = _mm256_mul_ps(fscal,dy21);
2458             tz               = _mm256_mul_ps(fscal,dz21);
2459
2460             /* Update vectorial force */
2461             fix2             = _mm256_add_ps(fix2,tx);
2462             fiy2             = _mm256_add_ps(fiy2,ty);
2463             fiz2             = _mm256_add_ps(fiz2,tz);
2464
2465             fjx1             = _mm256_add_ps(fjx1,tx);
2466             fjy1             = _mm256_add_ps(fjy1,ty);
2467             fjz1             = _mm256_add_ps(fjz1,tz);
2468
2469             }
2470
2471             /**************************
2472              * CALCULATE INTERACTIONS *
2473              **************************/
2474
2475             if (gmx_mm256_any_lt(rsq22,rcutoff2))
2476             {
2477
2478             r22              = _mm256_mul_ps(rsq22,rinv22);
2479             r22              = _mm256_andnot_ps(dummy_mask,r22);
2480
2481             /* EWALD ELECTROSTATICS */
2482             
2483             /* Analytical PME correction */
2484             zeta2            = _mm256_mul_ps(beta2,rsq22);
2485             rinv3            = _mm256_mul_ps(rinvsq22,rinv22);
2486             pmecorrF         = avx256_pmecorrF_f(zeta2);
2487             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
2488             felec            = _mm256_mul_ps(qq22,felec);
2489             
2490             cutoff_mask      = _mm256_cmp_ps(rsq22,rcutoff2,_CMP_LT_OQ);
2491
2492             fscal            = felec;
2493
2494             fscal            = _mm256_and_ps(fscal,cutoff_mask);
2495
2496             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
2497
2498             /* Calculate temporary vectorial force */
2499             tx               = _mm256_mul_ps(fscal,dx22);
2500             ty               = _mm256_mul_ps(fscal,dy22);
2501             tz               = _mm256_mul_ps(fscal,dz22);
2502
2503             /* Update vectorial force */
2504             fix2             = _mm256_add_ps(fix2,tx);
2505             fiy2             = _mm256_add_ps(fiy2,ty);
2506             fiz2             = _mm256_add_ps(fiz2,tz);
2507
2508             fjx2             = _mm256_add_ps(fjx2,tx);
2509             fjy2             = _mm256_add_ps(fjy2,ty);
2510             fjz2             = _mm256_add_ps(fjz2,tz);
2511
2512             }
2513
2514             fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
2515             fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
2516             fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
2517             fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
2518             fjptrE             = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
2519             fjptrF             = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
2520             fjptrG             = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
2521             fjptrH             = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
2522
2523             gmx_mm256_decrement_3rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,
2524                                                       fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
2525
2526             /* Inner loop uses 540 flops */
2527         }
2528
2529         /* End of innermost loop */
2530
2531         gmx_mm256_update_iforce_3atom_swizzle_ps(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,
2532                                                  f+i_coord_offset,fshift+i_shift_offset);
2533
2534         /* Increment number of inner iterations */
2535         inneriter                  += j_index_end - j_index_start;
2536
2537         /* Outer loop uses 18 flops */
2538     }
2539
2540     /* Increment number of outer iterations */
2541     outeriter        += nri;
2542
2543     /* Update outer/inner flops */
2544
2545     inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_W3W3_F,outeriter*18 + inneriter*540);
2546 }