Merge release-4-6 into master
[alexxy/gromacs.git] / src / gromacs / gmxlib / nonbonded / nb_kernel_avx_256_double / nb_kernel_ElecEwSw_VdwNone_GeomW3W3_avx_256_double.c
1 /*
2  * Note: this file was generated by the Gromacs avx_256_double kernel generator.
3  *
4  *                This source code is part of
5  *
6  *                 G   R   O   M   A   C   S
7  *
8  * Copyright (c) 2001-2012, The GROMACS Development Team
9  *
10  * Gromacs is a library for molecular simulation and trajectory analysis,
11  * written by Erik Lindahl, David van der Spoel, Berk Hess, and others - for
12  * a full list of developers and information, check out http://www.gromacs.org
13  *
14  * This program is free software; you can redistribute it and/or modify it under
15  * the terms of the GNU Lesser General Public License as published by the Free
16  * Software Foundation; either version 2 of the License, or (at your option) any
17  * later version.
18  *
19  * To help fund GROMACS development, we humbly ask that you cite
20  * the papers people have written on it - you can find them on the website.
21  */
22 #ifdef HAVE_CONFIG_H
23 #include <config.h>
24 #endif
25
26 #include <math.h>
27
28 #include "../nb_kernel.h"
29 #include "types/simple.h"
30 #include "vec.h"
31 #include "nrnb.h"
32
33 #include "gmx_math_x86_avx_256_double.h"
34 #include "kernelutil_x86_avx_256_double.h"
35
36 /*
37  * Gromacs nonbonded kernel:   nb_kernel_ElecEwSw_VdwNone_GeomW3W3_VF_avx_256_double
38  * Electrostatics interaction: Ewald
39  * VdW interaction:            None
40  * Geometry:                   Water3-Water3
41  * Calculate force/pot:        PotentialAndForce
42  */
43 void
44 nb_kernel_ElecEwSw_VdwNone_GeomW3W3_VF_avx_256_double
45                     (t_nblist * gmx_restrict                nlist,
46                      rvec * gmx_restrict                    xx,
47                      rvec * gmx_restrict                    ff,
48                      t_forcerec * gmx_restrict              fr,
49                      t_mdatoms * gmx_restrict               mdatoms,
50                      nb_kernel_data_t * gmx_restrict        kernel_data,
51                      t_nrnb * gmx_restrict                  nrnb)
52 {
53     /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
54      * just 0 for non-waters.
55      * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
56      * jnr indices corresponding to data put in the four positions in the SIMD register.
57      */
58     int              i_shift_offset,i_coord_offset,outeriter,inneriter;
59     int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
60     int              jnrA,jnrB,jnrC,jnrD;
61     int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
62     int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
63     int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
64     int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
65     real             rcutoff_scalar;
66     real             *shiftvec,*fshift,*x,*f;
67     real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
68     real             scratch[4*DIM];
69     __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
70     real *           vdwioffsetptr0;
71     __m256d          ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
72     real *           vdwioffsetptr1;
73     __m256d          ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
74     real *           vdwioffsetptr2;
75     __m256d          ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
76     int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
77     __m256d          jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
78     int              vdwjidx1A,vdwjidx1B,vdwjidx1C,vdwjidx1D;
79     __m256d          jx1,jy1,jz1,fjx1,fjy1,fjz1,jq1,isaj1;
80     int              vdwjidx2A,vdwjidx2B,vdwjidx2C,vdwjidx2D;
81     __m256d          jx2,jy2,jz2,fjx2,fjy2,fjz2,jq2,isaj2;
82     __m256d          dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
83     __m256d          dx01,dy01,dz01,rsq01,rinv01,rinvsq01,r01,qq01,c6_01,c12_01;
84     __m256d          dx02,dy02,dz02,rsq02,rinv02,rinvsq02,r02,qq02,c6_02,c12_02;
85     __m256d          dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
86     __m256d          dx11,dy11,dz11,rsq11,rinv11,rinvsq11,r11,qq11,c6_11,c12_11;
87     __m256d          dx12,dy12,dz12,rsq12,rinv12,rinvsq12,r12,qq12,c6_12,c12_12;
88     __m256d          dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
89     __m256d          dx21,dy21,dz21,rsq21,rinv21,rinvsq21,r21,qq21,c6_21,c12_21;
90     __m256d          dx22,dy22,dz22,rsq22,rinv22,rinvsq22,r22,qq22,c6_22,c12_22;
91     __m256d          velec,felec,velecsum,facel,crf,krf,krf2;
92     real             *charge;
93     __m128i          ewitab;
94     __m256d          ewtabscale,eweps,sh_ewald,ewrt,ewtabhalfspace,ewtabF,ewtabFn,ewtabD,ewtabV;
95     __m256d          beta,beta2,beta3,zeta2,pmecorrF,pmecorrV,rinv3;
96     real             *ewtab;
97     __m256d          rswitch,swV3,swV4,swV5,swF2,swF3,swF4,d,d2,sw,dsw;
98     real             rswitch_scalar,d_scalar;
99     __m256d          dummy_mask,cutoff_mask;
100     __m128           tmpmask0,tmpmask1;
101     __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
102     __m256d          one     = _mm256_set1_pd(1.0);
103     __m256d          two     = _mm256_set1_pd(2.0);
104     x                = xx[0];
105     f                = ff[0];
106
107     nri              = nlist->nri;
108     iinr             = nlist->iinr;
109     jindex           = nlist->jindex;
110     jjnr             = nlist->jjnr;
111     shiftidx         = nlist->shift;
112     gid              = nlist->gid;
113     shiftvec         = fr->shift_vec[0];
114     fshift           = fr->fshift[0];
115     facel            = _mm256_set1_pd(fr->epsfac);
116     charge           = mdatoms->chargeA;
117
118     sh_ewald         = _mm256_set1_pd(fr->ic->sh_ewald);
119     beta             = _mm256_set1_pd(fr->ic->ewaldcoeff);
120     beta2            = _mm256_mul_pd(beta,beta);
121     beta3            = _mm256_mul_pd(beta,beta2);
122
123     ewtab            = fr->ic->tabq_coul_FDV0;
124     ewtabscale       = _mm256_set1_pd(fr->ic->tabq_scale);
125     ewtabhalfspace   = _mm256_set1_pd(0.5/fr->ic->tabq_scale);
126
127     /* Setup water-specific parameters */
128     inr              = nlist->iinr[0];
129     iq0              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+0]));
130     iq1              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+1]));
131     iq2              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+2]));
132
133     jq0              = _mm256_set1_pd(charge[inr+0]);
134     jq1              = _mm256_set1_pd(charge[inr+1]);
135     jq2              = _mm256_set1_pd(charge[inr+2]);
136     qq00             = _mm256_mul_pd(iq0,jq0);
137     qq01             = _mm256_mul_pd(iq0,jq1);
138     qq02             = _mm256_mul_pd(iq0,jq2);
139     qq10             = _mm256_mul_pd(iq1,jq0);
140     qq11             = _mm256_mul_pd(iq1,jq1);
141     qq12             = _mm256_mul_pd(iq1,jq2);
142     qq20             = _mm256_mul_pd(iq2,jq0);
143     qq21             = _mm256_mul_pd(iq2,jq1);
144     qq22             = _mm256_mul_pd(iq2,jq2);
145
146     /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
147     rcutoff_scalar   = fr->rcoulomb;
148     rcutoff          = _mm256_set1_pd(rcutoff_scalar);
149     rcutoff2         = _mm256_mul_pd(rcutoff,rcutoff);
150
151     rswitch_scalar   = fr->rcoulomb_switch;
152     rswitch          = _mm256_set1_pd(rswitch_scalar);
153     /* Setup switch parameters */
154     d_scalar         = rcutoff_scalar-rswitch_scalar;
155     d                = _mm256_set1_pd(d_scalar);
156     swV3             = _mm256_set1_pd(-10.0/(d_scalar*d_scalar*d_scalar));
157     swV4             = _mm256_set1_pd( 15.0/(d_scalar*d_scalar*d_scalar*d_scalar));
158     swV5             = _mm256_set1_pd( -6.0/(d_scalar*d_scalar*d_scalar*d_scalar*d_scalar));
159     swF2             = _mm256_set1_pd(-30.0/(d_scalar*d_scalar*d_scalar));
160     swF3             = _mm256_set1_pd( 60.0/(d_scalar*d_scalar*d_scalar*d_scalar));
161     swF4             = _mm256_set1_pd(-30.0/(d_scalar*d_scalar*d_scalar*d_scalar*d_scalar));
162
163     /* Avoid stupid compiler warnings */
164     jnrA = jnrB = jnrC = jnrD = 0;
165     j_coord_offsetA = 0;
166     j_coord_offsetB = 0;
167     j_coord_offsetC = 0;
168     j_coord_offsetD = 0;
169
170     outeriter        = 0;
171     inneriter        = 0;
172
173     for(iidx=0;iidx<4*DIM;iidx++)
174     {
175         scratch[iidx] = 0.0;
176     }
177
178     /* Start outer loop over neighborlists */
179     for(iidx=0; iidx<nri; iidx++)
180     {
181         /* Load shift vector for this list */
182         i_shift_offset   = DIM*shiftidx[iidx];
183
184         /* Load limits for loop over neighbors */
185         j_index_start    = jindex[iidx];
186         j_index_end      = jindex[iidx+1];
187
188         /* Get outer coordinate index */
189         inr              = iinr[iidx];
190         i_coord_offset   = DIM*inr;
191
192         /* Load i particle coords and add shift vector */
193         gmx_mm256_load_shift_and_3rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset,
194                                                     &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2);
195
196         fix0             = _mm256_setzero_pd();
197         fiy0             = _mm256_setzero_pd();
198         fiz0             = _mm256_setzero_pd();
199         fix1             = _mm256_setzero_pd();
200         fiy1             = _mm256_setzero_pd();
201         fiz1             = _mm256_setzero_pd();
202         fix2             = _mm256_setzero_pd();
203         fiy2             = _mm256_setzero_pd();
204         fiz2             = _mm256_setzero_pd();
205
206         /* Reset potential sums */
207         velecsum         = _mm256_setzero_pd();
208
209         /* Start inner kernel loop */
210         for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
211         {
212
213             /* Get j neighbor index, and coordinate index */
214             jnrA             = jjnr[jidx];
215             jnrB             = jjnr[jidx+1];
216             jnrC             = jjnr[jidx+2];
217             jnrD             = jjnr[jidx+3];
218             j_coord_offsetA  = DIM*jnrA;
219             j_coord_offsetB  = DIM*jnrB;
220             j_coord_offsetC  = DIM*jnrC;
221             j_coord_offsetD  = DIM*jnrD;
222
223             /* load j atom coordinates */
224             gmx_mm256_load_3rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
225                                                  x+j_coord_offsetC,x+j_coord_offsetD,
226                                               &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
227
228             /* Calculate displacement vector */
229             dx00             = _mm256_sub_pd(ix0,jx0);
230             dy00             = _mm256_sub_pd(iy0,jy0);
231             dz00             = _mm256_sub_pd(iz0,jz0);
232             dx01             = _mm256_sub_pd(ix0,jx1);
233             dy01             = _mm256_sub_pd(iy0,jy1);
234             dz01             = _mm256_sub_pd(iz0,jz1);
235             dx02             = _mm256_sub_pd(ix0,jx2);
236             dy02             = _mm256_sub_pd(iy0,jy2);
237             dz02             = _mm256_sub_pd(iz0,jz2);
238             dx10             = _mm256_sub_pd(ix1,jx0);
239             dy10             = _mm256_sub_pd(iy1,jy0);
240             dz10             = _mm256_sub_pd(iz1,jz0);
241             dx11             = _mm256_sub_pd(ix1,jx1);
242             dy11             = _mm256_sub_pd(iy1,jy1);
243             dz11             = _mm256_sub_pd(iz1,jz1);
244             dx12             = _mm256_sub_pd(ix1,jx2);
245             dy12             = _mm256_sub_pd(iy1,jy2);
246             dz12             = _mm256_sub_pd(iz1,jz2);
247             dx20             = _mm256_sub_pd(ix2,jx0);
248             dy20             = _mm256_sub_pd(iy2,jy0);
249             dz20             = _mm256_sub_pd(iz2,jz0);
250             dx21             = _mm256_sub_pd(ix2,jx1);
251             dy21             = _mm256_sub_pd(iy2,jy1);
252             dz21             = _mm256_sub_pd(iz2,jz1);
253             dx22             = _mm256_sub_pd(ix2,jx2);
254             dy22             = _mm256_sub_pd(iy2,jy2);
255             dz22             = _mm256_sub_pd(iz2,jz2);
256
257             /* Calculate squared distance and things based on it */
258             rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
259             rsq01            = gmx_mm256_calc_rsq_pd(dx01,dy01,dz01);
260             rsq02            = gmx_mm256_calc_rsq_pd(dx02,dy02,dz02);
261             rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
262             rsq11            = gmx_mm256_calc_rsq_pd(dx11,dy11,dz11);
263             rsq12            = gmx_mm256_calc_rsq_pd(dx12,dy12,dz12);
264             rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
265             rsq21            = gmx_mm256_calc_rsq_pd(dx21,dy21,dz21);
266             rsq22            = gmx_mm256_calc_rsq_pd(dx22,dy22,dz22);
267
268             rinv00           = gmx_mm256_invsqrt_pd(rsq00);
269             rinv01           = gmx_mm256_invsqrt_pd(rsq01);
270             rinv02           = gmx_mm256_invsqrt_pd(rsq02);
271             rinv10           = gmx_mm256_invsqrt_pd(rsq10);
272             rinv11           = gmx_mm256_invsqrt_pd(rsq11);
273             rinv12           = gmx_mm256_invsqrt_pd(rsq12);
274             rinv20           = gmx_mm256_invsqrt_pd(rsq20);
275             rinv21           = gmx_mm256_invsqrt_pd(rsq21);
276             rinv22           = gmx_mm256_invsqrt_pd(rsq22);
277
278             rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
279             rinvsq01         = _mm256_mul_pd(rinv01,rinv01);
280             rinvsq02         = _mm256_mul_pd(rinv02,rinv02);
281             rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
282             rinvsq11         = _mm256_mul_pd(rinv11,rinv11);
283             rinvsq12         = _mm256_mul_pd(rinv12,rinv12);
284             rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
285             rinvsq21         = _mm256_mul_pd(rinv21,rinv21);
286             rinvsq22         = _mm256_mul_pd(rinv22,rinv22);
287
288             fjx0             = _mm256_setzero_pd();
289             fjy0             = _mm256_setzero_pd();
290             fjz0             = _mm256_setzero_pd();
291             fjx1             = _mm256_setzero_pd();
292             fjy1             = _mm256_setzero_pd();
293             fjz1             = _mm256_setzero_pd();
294             fjx2             = _mm256_setzero_pd();
295             fjy2             = _mm256_setzero_pd();
296             fjz2             = _mm256_setzero_pd();
297
298             /**************************
299              * CALCULATE INTERACTIONS *
300              **************************/
301
302             if (gmx_mm256_any_lt(rsq00,rcutoff2))
303             {
304
305             r00              = _mm256_mul_pd(rsq00,rinv00);
306
307             /* EWALD ELECTROSTATICS */
308
309             /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
310             ewrt             = _mm256_mul_pd(r00,ewtabscale);
311             ewitab           = _mm256_cvttpd_epi32(ewrt);
312             eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
313             ewitab           = _mm_slli_epi32(ewitab,2);
314             ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
315             ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
316             ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
317             ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
318             GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
319             felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
320             velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
321             velec            = _mm256_mul_pd(qq00,_mm256_sub_pd(rinv00,velec));
322             felec            = _mm256_mul_pd(_mm256_mul_pd(qq00,rinv00),_mm256_sub_pd(rinvsq00,felec));
323
324             d                = _mm256_sub_pd(r00,rswitch);
325             d                = _mm256_max_pd(d,_mm256_setzero_pd());
326             d2               = _mm256_mul_pd(d,d);
327             sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
328
329             dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
330
331             /* Evaluate switch function */
332             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
333             felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv00,_mm256_mul_pd(velec,dsw)) );
334             velec            = _mm256_mul_pd(velec,sw);
335             cutoff_mask      = _mm256_cmp_pd(rsq00,rcutoff2,_CMP_LT_OQ);
336
337             /* Update potential sum for this i atom from the interaction with this j atom. */
338             velec            = _mm256_and_pd(velec,cutoff_mask);
339             velecsum         = _mm256_add_pd(velecsum,velec);
340
341             fscal            = felec;
342
343             fscal            = _mm256_and_pd(fscal,cutoff_mask);
344
345             /* Calculate temporary vectorial force */
346             tx               = _mm256_mul_pd(fscal,dx00);
347             ty               = _mm256_mul_pd(fscal,dy00);
348             tz               = _mm256_mul_pd(fscal,dz00);
349
350             /* Update vectorial force */
351             fix0             = _mm256_add_pd(fix0,tx);
352             fiy0             = _mm256_add_pd(fiy0,ty);
353             fiz0             = _mm256_add_pd(fiz0,tz);
354
355             fjx0             = _mm256_add_pd(fjx0,tx);
356             fjy0             = _mm256_add_pd(fjy0,ty);
357             fjz0             = _mm256_add_pd(fjz0,tz);
358
359             }
360
361             /**************************
362              * CALCULATE INTERACTIONS *
363              **************************/
364
365             if (gmx_mm256_any_lt(rsq01,rcutoff2))
366             {
367
368             r01              = _mm256_mul_pd(rsq01,rinv01);
369
370             /* EWALD ELECTROSTATICS */
371
372             /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
373             ewrt             = _mm256_mul_pd(r01,ewtabscale);
374             ewitab           = _mm256_cvttpd_epi32(ewrt);
375             eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
376             ewitab           = _mm_slli_epi32(ewitab,2);
377             ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
378             ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
379             ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
380             ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
381             GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
382             felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
383             velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
384             velec            = _mm256_mul_pd(qq01,_mm256_sub_pd(rinv01,velec));
385             felec            = _mm256_mul_pd(_mm256_mul_pd(qq01,rinv01),_mm256_sub_pd(rinvsq01,felec));
386
387             d                = _mm256_sub_pd(r01,rswitch);
388             d                = _mm256_max_pd(d,_mm256_setzero_pd());
389             d2               = _mm256_mul_pd(d,d);
390             sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
391
392             dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
393
394             /* Evaluate switch function */
395             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
396             felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv01,_mm256_mul_pd(velec,dsw)) );
397             velec            = _mm256_mul_pd(velec,sw);
398             cutoff_mask      = _mm256_cmp_pd(rsq01,rcutoff2,_CMP_LT_OQ);
399
400             /* Update potential sum for this i atom from the interaction with this j atom. */
401             velec            = _mm256_and_pd(velec,cutoff_mask);
402             velecsum         = _mm256_add_pd(velecsum,velec);
403
404             fscal            = felec;
405
406             fscal            = _mm256_and_pd(fscal,cutoff_mask);
407
408             /* Calculate temporary vectorial force */
409             tx               = _mm256_mul_pd(fscal,dx01);
410             ty               = _mm256_mul_pd(fscal,dy01);
411             tz               = _mm256_mul_pd(fscal,dz01);
412
413             /* Update vectorial force */
414             fix0             = _mm256_add_pd(fix0,tx);
415             fiy0             = _mm256_add_pd(fiy0,ty);
416             fiz0             = _mm256_add_pd(fiz0,tz);
417
418             fjx1             = _mm256_add_pd(fjx1,tx);
419             fjy1             = _mm256_add_pd(fjy1,ty);
420             fjz1             = _mm256_add_pd(fjz1,tz);
421
422             }
423
424             /**************************
425              * CALCULATE INTERACTIONS *
426              **************************/
427
428             if (gmx_mm256_any_lt(rsq02,rcutoff2))
429             {
430
431             r02              = _mm256_mul_pd(rsq02,rinv02);
432
433             /* EWALD ELECTROSTATICS */
434
435             /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
436             ewrt             = _mm256_mul_pd(r02,ewtabscale);
437             ewitab           = _mm256_cvttpd_epi32(ewrt);
438             eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
439             ewitab           = _mm_slli_epi32(ewitab,2);
440             ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
441             ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
442             ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
443             ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
444             GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
445             felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
446             velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
447             velec            = _mm256_mul_pd(qq02,_mm256_sub_pd(rinv02,velec));
448             felec            = _mm256_mul_pd(_mm256_mul_pd(qq02,rinv02),_mm256_sub_pd(rinvsq02,felec));
449
450             d                = _mm256_sub_pd(r02,rswitch);
451             d                = _mm256_max_pd(d,_mm256_setzero_pd());
452             d2               = _mm256_mul_pd(d,d);
453             sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
454
455             dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
456
457             /* Evaluate switch function */
458             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
459             felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv02,_mm256_mul_pd(velec,dsw)) );
460             velec            = _mm256_mul_pd(velec,sw);
461             cutoff_mask      = _mm256_cmp_pd(rsq02,rcutoff2,_CMP_LT_OQ);
462
463             /* Update potential sum for this i atom from the interaction with this j atom. */
464             velec            = _mm256_and_pd(velec,cutoff_mask);
465             velecsum         = _mm256_add_pd(velecsum,velec);
466
467             fscal            = felec;
468
469             fscal            = _mm256_and_pd(fscal,cutoff_mask);
470
471             /* Calculate temporary vectorial force */
472             tx               = _mm256_mul_pd(fscal,dx02);
473             ty               = _mm256_mul_pd(fscal,dy02);
474             tz               = _mm256_mul_pd(fscal,dz02);
475
476             /* Update vectorial force */
477             fix0             = _mm256_add_pd(fix0,tx);
478             fiy0             = _mm256_add_pd(fiy0,ty);
479             fiz0             = _mm256_add_pd(fiz0,tz);
480
481             fjx2             = _mm256_add_pd(fjx2,tx);
482             fjy2             = _mm256_add_pd(fjy2,ty);
483             fjz2             = _mm256_add_pd(fjz2,tz);
484
485             }
486
487             /**************************
488              * CALCULATE INTERACTIONS *
489              **************************/
490
491             if (gmx_mm256_any_lt(rsq10,rcutoff2))
492             {
493
494             r10              = _mm256_mul_pd(rsq10,rinv10);
495
496             /* EWALD ELECTROSTATICS */
497
498             /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
499             ewrt             = _mm256_mul_pd(r10,ewtabscale);
500             ewitab           = _mm256_cvttpd_epi32(ewrt);
501             eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
502             ewitab           = _mm_slli_epi32(ewitab,2);
503             ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
504             ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
505             ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
506             ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
507             GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
508             felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
509             velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
510             velec            = _mm256_mul_pd(qq10,_mm256_sub_pd(rinv10,velec));
511             felec            = _mm256_mul_pd(_mm256_mul_pd(qq10,rinv10),_mm256_sub_pd(rinvsq10,felec));
512
513             d                = _mm256_sub_pd(r10,rswitch);
514             d                = _mm256_max_pd(d,_mm256_setzero_pd());
515             d2               = _mm256_mul_pd(d,d);
516             sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
517
518             dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
519
520             /* Evaluate switch function */
521             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
522             felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv10,_mm256_mul_pd(velec,dsw)) );
523             velec            = _mm256_mul_pd(velec,sw);
524             cutoff_mask      = _mm256_cmp_pd(rsq10,rcutoff2,_CMP_LT_OQ);
525
526             /* Update potential sum for this i atom from the interaction with this j atom. */
527             velec            = _mm256_and_pd(velec,cutoff_mask);
528             velecsum         = _mm256_add_pd(velecsum,velec);
529
530             fscal            = felec;
531
532             fscal            = _mm256_and_pd(fscal,cutoff_mask);
533
534             /* Calculate temporary vectorial force */
535             tx               = _mm256_mul_pd(fscal,dx10);
536             ty               = _mm256_mul_pd(fscal,dy10);
537             tz               = _mm256_mul_pd(fscal,dz10);
538
539             /* Update vectorial force */
540             fix1             = _mm256_add_pd(fix1,tx);
541             fiy1             = _mm256_add_pd(fiy1,ty);
542             fiz1             = _mm256_add_pd(fiz1,tz);
543
544             fjx0             = _mm256_add_pd(fjx0,tx);
545             fjy0             = _mm256_add_pd(fjy0,ty);
546             fjz0             = _mm256_add_pd(fjz0,tz);
547
548             }
549
550             /**************************
551              * CALCULATE INTERACTIONS *
552              **************************/
553
554             if (gmx_mm256_any_lt(rsq11,rcutoff2))
555             {
556
557             r11              = _mm256_mul_pd(rsq11,rinv11);
558
559             /* EWALD ELECTROSTATICS */
560
561             /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
562             ewrt             = _mm256_mul_pd(r11,ewtabscale);
563             ewitab           = _mm256_cvttpd_epi32(ewrt);
564             eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
565             ewitab           = _mm_slli_epi32(ewitab,2);
566             ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
567             ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
568             ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
569             ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
570             GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
571             felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
572             velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
573             velec            = _mm256_mul_pd(qq11,_mm256_sub_pd(rinv11,velec));
574             felec            = _mm256_mul_pd(_mm256_mul_pd(qq11,rinv11),_mm256_sub_pd(rinvsq11,felec));
575
576             d                = _mm256_sub_pd(r11,rswitch);
577             d                = _mm256_max_pd(d,_mm256_setzero_pd());
578             d2               = _mm256_mul_pd(d,d);
579             sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
580
581             dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
582
583             /* Evaluate switch function */
584             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
585             felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv11,_mm256_mul_pd(velec,dsw)) );
586             velec            = _mm256_mul_pd(velec,sw);
587             cutoff_mask      = _mm256_cmp_pd(rsq11,rcutoff2,_CMP_LT_OQ);
588
589             /* Update potential sum for this i atom from the interaction with this j atom. */
590             velec            = _mm256_and_pd(velec,cutoff_mask);
591             velecsum         = _mm256_add_pd(velecsum,velec);
592
593             fscal            = felec;
594
595             fscal            = _mm256_and_pd(fscal,cutoff_mask);
596
597             /* Calculate temporary vectorial force */
598             tx               = _mm256_mul_pd(fscal,dx11);
599             ty               = _mm256_mul_pd(fscal,dy11);
600             tz               = _mm256_mul_pd(fscal,dz11);
601
602             /* Update vectorial force */
603             fix1             = _mm256_add_pd(fix1,tx);
604             fiy1             = _mm256_add_pd(fiy1,ty);
605             fiz1             = _mm256_add_pd(fiz1,tz);
606
607             fjx1             = _mm256_add_pd(fjx1,tx);
608             fjy1             = _mm256_add_pd(fjy1,ty);
609             fjz1             = _mm256_add_pd(fjz1,tz);
610
611             }
612
613             /**************************
614              * CALCULATE INTERACTIONS *
615              **************************/
616
617             if (gmx_mm256_any_lt(rsq12,rcutoff2))
618             {
619
620             r12              = _mm256_mul_pd(rsq12,rinv12);
621
622             /* EWALD ELECTROSTATICS */
623
624             /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
625             ewrt             = _mm256_mul_pd(r12,ewtabscale);
626             ewitab           = _mm256_cvttpd_epi32(ewrt);
627             eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
628             ewitab           = _mm_slli_epi32(ewitab,2);
629             ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
630             ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
631             ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
632             ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
633             GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
634             felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
635             velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
636             velec            = _mm256_mul_pd(qq12,_mm256_sub_pd(rinv12,velec));
637             felec            = _mm256_mul_pd(_mm256_mul_pd(qq12,rinv12),_mm256_sub_pd(rinvsq12,felec));
638
639             d                = _mm256_sub_pd(r12,rswitch);
640             d                = _mm256_max_pd(d,_mm256_setzero_pd());
641             d2               = _mm256_mul_pd(d,d);
642             sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
643
644             dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
645
646             /* Evaluate switch function */
647             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
648             felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv12,_mm256_mul_pd(velec,dsw)) );
649             velec            = _mm256_mul_pd(velec,sw);
650             cutoff_mask      = _mm256_cmp_pd(rsq12,rcutoff2,_CMP_LT_OQ);
651
652             /* Update potential sum for this i atom from the interaction with this j atom. */
653             velec            = _mm256_and_pd(velec,cutoff_mask);
654             velecsum         = _mm256_add_pd(velecsum,velec);
655
656             fscal            = felec;
657
658             fscal            = _mm256_and_pd(fscal,cutoff_mask);
659
660             /* Calculate temporary vectorial force */
661             tx               = _mm256_mul_pd(fscal,dx12);
662             ty               = _mm256_mul_pd(fscal,dy12);
663             tz               = _mm256_mul_pd(fscal,dz12);
664
665             /* Update vectorial force */
666             fix1             = _mm256_add_pd(fix1,tx);
667             fiy1             = _mm256_add_pd(fiy1,ty);
668             fiz1             = _mm256_add_pd(fiz1,tz);
669
670             fjx2             = _mm256_add_pd(fjx2,tx);
671             fjy2             = _mm256_add_pd(fjy2,ty);
672             fjz2             = _mm256_add_pd(fjz2,tz);
673
674             }
675
676             /**************************
677              * CALCULATE INTERACTIONS *
678              **************************/
679
680             if (gmx_mm256_any_lt(rsq20,rcutoff2))
681             {
682
683             r20              = _mm256_mul_pd(rsq20,rinv20);
684
685             /* EWALD ELECTROSTATICS */
686
687             /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
688             ewrt             = _mm256_mul_pd(r20,ewtabscale);
689             ewitab           = _mm256_cvttpd_epi32(ewrt);
690             eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
691             ewitab           = _mm_slli_epi32(ewitab,2);
692             ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
693             ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
694             ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
695             ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
696             GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
697             felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
698             velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
699             velec            = _mm256_mul_pd(qq20,_mm256_sub_pd(rinv20,velec));
700             felec            = _mm256_mul_pd(_mm256_mul_pd(qq20,rinv20),_mm256_sub_pd(rinvsq20,felec));
701
702             d                = _mm256_sub_pd(r20,rswitch);
703             d                = _mm256_max_pd(d,_mm256_setzero_pd());
704             d2               = _mm256_mul_pd(d,d);
705             sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
706
707             dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
708
709             /* Evaluate switch function */
710             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
711             felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv20,_mm256_mul_pd(velec,dsw)) );
712             velec            = _mm256_mul_pd(velec,sw);
713             cutoff_mask      = _mm256_cmp_pd(rsq20,rcutoff2,_CMP_LT_OQ);
714
715             /* Update potential sum for this i atom from the interaction with this j atom. */
716             velec            = _mm256_and_pd(velec,cutoff_mask);
717             velecsum         = _mm256_add_pd(velecsum,velec);
718
719             fscal            = felec;
720
721             fscal            = _mm256_and_pd(fscal,cutoff_mask);
722
723             /* Calculate temporary vectorial force */
724             tx               = _mm256_mul_pd(fscal,dx20);
725             ty               = _mm256_mul_pd(fscal,dy20);
726             tz               = _mm256_mul_pd(fscal,dz20);
727
728             /* Update vectorial force */
729             fix2             = _mm256_add_pd(fix2,tx);
730             fiy2             = _mm256_add_pd(fiy2,ty);
731             fiz2             = _mm256_add_pd(fiz2,tz);
732
733             fjx0             = _mm256_add_pd(fjx0,tx);
734             fjy0             = _mm256_add_pd(fjy0,ty);
735             fjz0             = _mm256_add_pd(fjz0,tz);
736
737             }
738
739             /**************************
740              * CALCULATE INTERACTIONS *
741              **************************/
742
743             if (gmx_mm256_any_lt(rsq21,rcutoff2))
744             {
745
746             r21              = _mm256_mul_pd(rsq21,rinv21);
747
748             /* EWALD ELECTROSTATICS */
749
750             /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
751             ewrt             = _mm256_mul_pd(r21,ewtabscale);
752             ewitab           = _mm256_cvttpd_epi32(ewrt);
753             eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
754             ewitab           = _mm_slli_epi32(ewitab,2);
755             ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
756             ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
757             ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
758             ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
759             GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
760             felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
761             velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
762             velec            = _mm256_mul_pd(qq21,_mm256_sub_pd(rinv21,velec));
763             felec            = _mm256_mul_pd(_mm256_mul_pd(qq21,rinv21),_mm256_sub_pd(rinvsq21,felec));
764
765             d                = _mm256_sub_pd(r21,rswitch);
766             d                = _mm256_max_pd(d,_mm256_setzero_pd());
767             d2               = _mm256_mul_pd(d,d);
768             sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
769
770             dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
771
772             /* Evaluate switch function */
773             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
774             felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv21,_mm256_mul_pd(velec,dsw)) );
775             velec            = _mm256_mul_pd(velec,sw);
776             cutoff_mask      = _mm256_cmp_pd(rsq21,rcutoff2,_CMP_LT_OQ);
777
778             /* Update potential sum for this i atom from the interaction with this j atom. */
779             velec            = _mm256_and_pd(velec,cutoff_mask);
780             velecsum         = _mm256_add_pd(velecsum,velec);
781
782             fscal            = felec;
783
784             fscal            = _mm256_and_pd(fscal,cutoff_mask);
785
786             /* Calculate temporary vectorial force */
787             tx               = _mm256_mul_pd(fscal,dx21);
788             ty               = _mm256_mul_pd(fscal,dy21);
789             tz               = _mm256_mul_pd(fscal,dz21);
790
791             /* Update vectorial force */
792             fix2             = _mm256_add_pd(fix2,tx);
793             fiy2             = _mm256_add_pd(fiy2,ty);
794             fiz2             = _mm256_add_pd(fiz2,tz);
795
796             fjx1             = _mm256_add_pd(fjx1,tx);
797             fjy1             = _mm256_add_pd(fjy1,ty);
798             fjz1             = _mm256_add_pd(fjz1,tz);
799
800             }
801
802             /**************************
803              * CALCULATE INTERACTIONS *
804              **************************/
805
806             if (gmx_mm256_any_lt(rsq22,rcutoff2))
807             {
808
809             r22              = _mm256_mul_pd(rsq22,rinv22);
810
811             /* EWALD ELECTROSTATICS */
812
813             /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
814             ewrt             = _mm256_mul_pd(r22,ewtabscale);
815             ewitab           = _mm256_cvttpd_epi32(ewrt);
816             eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
817             ewitab           = _mm_slli_epi32(ewitab,2);
818             ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
819             ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
820             ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
821             ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
822             GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
823             felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
824             velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
825             velec            = _mm256_mul_pd(qq22,_mm256_sub_pd(rinv22,velec));
826             felec            = _mm256_mul_pd(_mm256_mul_pd(qq22,rinv22),_mm256_sub_pd(rinvsq22,felec));
827
828             d                = _mm256_sub_pd(r22,rswitch);
829             d                = _mm256_max_pd(d,_mm256_setzero_pd());
830             d2               = _mm256_mul_pd(d,d);
831             sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
832
833             dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
834
835             /* Evaluate switch function */
836             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
837             felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv22,_mm256_mul_pd(velec,dsw)) );
838             velec            = _mm256_mul_pd(velec,sw);
839             cutoff_mask      = _mm256_cmp_pd(rsq22,rcutoff2,_CMP_LT_OQ);
840
841             /* Update potential sum for this i atom from the interaction with this j atom. */
842             velec            = _mm256_and_pd(velec,cutoff_mask);
843             velecsum         = _mm256_add_pd(velecsum,velec);
844
845             fscal            = felec;
846
847             fscal            = _mm256_and_pd(fscal,cutoff_mask);
848
849             /* Calculate temporary vectorial force */
850             tx               = _mm256_mul_pd(fscal,dx22);
851             ty               = _mm256_mul_pd(fscal,dy22);
852             tz               = _mm256_mul_pd(fscal,dz22);
853
854             /* Update vectorial force */
855             fix2             = _mm256_add_pd(fix2,tx);
856             fiy2             = _mm256_add_pd(fiy2,ty);
857             fiz2             = _mm256_add_pd(fiz2,tz);
858
859             fjx2             = _mm256_add_pd(fjx2,tx);
860             fjy2             = _mm256_add_pd(fjy2,ty);
861             fjz2             = _mm256_add_pd(fjz2,tz);
862
863             }
864
865             fjptrA             = f+j_coord_offsetA;
866             fjptrB             = f+j_coord_offsetB;
867             fjptrC             = f+j_coord_offsetC;
868             fjptrD             = f+j_coord_offsetD;
869
870             gmx_mm256_decrement_3rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,
871                                                       fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
872
873             /* Inner loop uses 585 flops */
874         }
875
876         if(jidx<j_index_end)
877         {
878
879             /* Get j neighbor index, and coordinate index */
880             jnrlistA         = jjnr[jidx];
881             jnrlistB         = jjnr[jidx+1];
882             jnrlistC         = jjnr[jidx+2];
883             jnrlistD         = jjnr[jidx+3];
884             /* Sign of each element will be negative for non-real atoms.
885              * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
886              * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
887              */
888             tmpmask0 = gmx_mm_castsi128_pd(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
889
890             tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
891             tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
892             dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
893
894             jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
895             jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
896             jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
897             jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
898             j_coord_offsetA  = DIM*jnrA;
899             j_coord_offsetB  = DIM*jnrB;
900             j_coord_offsetC  = DIM*jnrC;
901             j_coord_offsetD  = DIM*jnrD;
902
903             /* load j atom coordinates */
904             gmx_mm256_load_3rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
905                                                  x+j_coord_offsetC,x+j_coord_offsetD,
906                                               &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
907
908             /* Calculate displacement vector */
909             dx00             = _mm256_sub_pd(ix0,jx0);
910             dy00             = _mm256_sub_pd(iy0,jy0);
911             dz00             = _mm256_sub_pd(iz0,jz0);
912             dx01             = _mm256_sub_pd(ix0,jx1);
913             dy01             = _mm256_sub_pd(iy0,jy1);
914             dz01             = _mm256_sub_pd(iz0,jz1);
915             dx02             = _mm256_sub_pd(ix0,jx2);
916             dy02             = _mm256_sub_pd(iy0,jy2);
917             dz02             = _mm256_sub_pd(iz0,jz2);
918             dx10             = _mm256_sub_pd(ix1,jx0);
919             dy10             = _mm256_sub_pd(iy1,jy0);
920             dz10             = _mm256_sub_pd(iz1,jz0);
921             dx11             = _mm256_sub_pd(ix1,jx1);
922             dy11             = _mm256_sub_pd(iy1,jy1);
923             dz11             = _mm256_sub_pd(iz1,jz1);
924             dx12             = _mm256_sub_pd(ix1,jx2);
925             dy12             = _mm256_sub_pd(iy1,jy2);
926             dz12             = _mm256_sub_pd(iz1,jz2);
927             dx20             = _mm256_sub_pd(ix2,jx0);
928             dy20             = _mm256_sub_pd(iy2,jy0);
929             dz20             = _mm256_sub_pd(iz2,jz0);
930             dx21             = _mm256_sub_pd(ix2,jx1);
931             dy21             = _mm256_sub_pd(iy2,jy1);
932             dz21             = _mm256_sub_pd(iz2,jz1);
933             dx22             = _mm256_sub_pd(ix2,jx2);
934             dy22             = _mm256_sub_pd(iy2,jy2);
935             dz22             = _mm256_sub_pd(iz2,jz2);
936
937             /* Calculate squared distance and things based on it */
938             rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
939             rsq01            = gmx_mm256_calc_rsq_pd(dx01,dy01,dz01);
940             rsq02            = gmx_mm256_calc_rsq_pd(dx02,dy02,dz02);
941             rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
942             rsq11            = gmx_mm256_calc_rsq_pd(dx11,dy11,dz11);
943             rsq12            = gmx_mm256_calc_rsq_pd(dx12,dy12,dz12);
944             rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
945             rsq21            = gmx_mm256_calc_rsq_pd(dx21,dy21,dz21);
946             rsq22            = gmx_mm256_calc_rsq_pd(dx22,dy22,dz22);
947
948             rinv00           = gmx_mm256_invsqrt_pd(rsq00);
949             rinv01           = gmx_mm256_invsqrt_pd(rsq01);
950             rinv02           = gmx_mm256_invsqrt_pd(rsq02);
951             rinv10           = gmx_mm256_invsqrt_pd(rsq10);
952             rinv11           = gmx_mm256_invsqrt_pd(rsq11);
953             rinv12           = gmx_mm256_invsqrt_pd(rsq12);
954             rinv20           = gmx_mm256_invsqrt_pd(rsq20);
955             rinv21           = gmx_mm256_invsqrt_pd(rsq21);
956             rinv22           = gmx_mm256_invsqrt_pd(rsq22);
957
958             rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
959             rinvsq01         = _mm256_mul_pd(rinv01,rinv01);
960             rinvsq02         = _mm256_mul_pd(rinv02,rinv02);
961             rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
962             rinvsq11         = _mm256_mul_pd(rinv11,rinv11);
963             rinvsq12         = _mm256_mul_pd(rinv12,rinv12);
964             rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
965             rinvsq21         = _mm256_mul_pd(rinv21,rinv21);
966             rinvsq22         = _mm256_mul_pd(rinv22,rinv22);
967
968             fjx0             = _mm256_setzero_pd();
969             fjy0             = _mm256_setzero_pd();
970             fjz0             = _mm256_setzero_pd();
971             fjx1             = _mm256_setzero_pd();
972             fjy1             = _mm256_setzero_pd();
973             fjz1             = _mm256_setzero_pd();
974             fjx2             = _mm256_setzero_pd();
975             fjy2             = _mm256_setzero_pd();
976             fjz2             = _mm256_setzero_pd();
977
978             /**************************
979              * CALCULATE INTERACTIONS *
980              **************************/
981
982             if (gmx_mm256_any_lt(rsq00,rcutoff2))
983             {
984
985             r00              = _mm256_mul_pd(rsq00,rinv00);
986             r00              = _mm256_andnot_pd(dummy_mask,r00);
987
988             /* EWALD ELECTROSTATICS */
989
990             /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
991             ewrt             = _mm256_mul_pd(r00,ewtabscale);
992             ewitab           = _mm256_cvttpd_epi32(ewrt);
993             eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
994             ewitab           = _mm_slli_epi32(ewitab,2);
995             ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
996             ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
997             ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
998             ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
999             GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
1000             felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
1001             velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
1002             velec            = _mm256_mul_pd(qq00,_mm256_sub_pd(rinv00,velec));
1003             felec            = _mm256_mul_pd(_mm256_mul_pd(qq00,rinv00),_mm256_sub_pd(rinvsq00,felec));
1004
1005             d                = _mm256_sub_pd(r00,rswitch);
1006             d                = _mm256_max_pd(d,_mm256_setzero_pd());
1007             d2               = _mm256_mul_pd(d,d);
1008             sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
1009
1010             dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
1011
1012             /* Evaluate switch function */
1013             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
1014             felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv00,_mm256_mul_pd(velec,dsw)) );
1015             velec            = _mm256_mul_pd(velec,sw);
1016             cutoff_mask      = _mm256_cmp_pd(rsq00,rcutoff2,_CMP_LT_OQ);
1017
1018             /* Update potential sum for this i atom from the interaction with this j atom. */
1019             velec            = _mm256_and_pd(velec,cutoff_mask);
1020             velec            = _mm256_andnot_pd(dummy_mask,velec);
1021             velecsum         = _mm256_add_pd(velecsum,velec);
1022
1023             fscal            = felec;
1024
1025             fscal            = _mm256_and_pd(fscal,cutoff_mask);
1026
1027             fscal            = _mm256_andnot_pd(dummy_mask,fscal);
1028
1029             /* Calculate temporary vectorial force */
1030             tx               = _mm256_mul_pd(fscal,dx00);
1031             ty               = _mm256_mul_pd(fscal,dy00);
1032             tz               = _mm256_mul_pd(fscal,dz00);
1033
1034             /* Update vectorial force */
1035             fix0             = _mm256_add_pd(fix0,tx);
1036             fiy0             = _mm256_add_pd(fiy0,ty);
1037             fiz0             = _mm256_add_pd(fiz0,tz);
1038
1039             fjx0             = _mm256_add_pd(fjx0,tx);
1040             fjy0             = _mm256_add_pd(fjy0,ty);
1041             fjz0             = _mm256_add_pd(fjz0,tz);
1042
1043             }
1044
1045             /**************************
1046              * CALCULATE INTERACTIONS *
1047              **************************/
1048
1049             if (gmx_mm256_any_lt(rsq01,rcutoff2))
1050             {
1051
1052             r01              = _mm256_mul_pd(rsq01,rinv01);
1053             r01              = _mm256_andnot_pd(dummy_mask,r01);
1054
1055             /* EWALD ELECTROSTATICS */
1056
1057             /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
1058             ewrt             = _mm256_mul_pd(r01,ewtabscale);
1059             ewitab           = _mm256_cvttpd_epi32(ewrt);
1060             eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
1061             ewitab           = _mm_slli_epi32(ewitab,2);
1062             ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
1063             ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
1064             ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
1065             ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
1066             GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
1067             felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
1068             velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
1069             velec            = _mm256_mul_pd(qq01,_mm256_sub_pd(rinv01,velec));
1070             felec            = _mm256_mul_pd(_mm256_mul_pd(qq01,rinv01),_mm256_sub_pd(rinvsq01,felec));
1071
1072             d                = _mm256_sub_pd(r01,rswitch);
1073             d                = _mm256_max_pd(d,_mm256_setzero_pd());
1074             d2               = _mm256_mul_pd(d,d);
1075             sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
1076
1077             dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
1078
1079             /* Evaluate switch function */
1080             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
1081             felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv01,_mm256_mul_pd(velec,dsw)) );
1082             velec            = _mm256_mul_pd(velec,sw);
1083             cutoff_mask      = _mm256_cmp_pd(rsq01,rcutoff2,_CMP_LT_OQ);
1084
1085             /* Update potential sum for this i atom from the interaction with this j atom. */
1086             velec            = _mm256_and_pd(velec,cutoff_mask);
1087             velec            = _mm256_andnot_pd(dummy_mask,velec);
1088             velecsum         = _mm256_add_pd(velecsum,velec);
1089
1090             fscal            = felec;
1091
1092             fscal            = _mm256_and_pd(fscal,cutoff_mask);
1093
1094             fscal            = _mm256_andnot_pd(dummy_mask,fscal);
1095
1096             /* Calculate temporary vectorial force */
1097             tx               = _mm256_mul_pd(fscal,dx01);
1098             ty               = _mm256_mul_pd(fscal,dy01);
1099             tz               = _mm256_mul_pd(fscal,dz01);
1100
1101             /* Update vectorial force */
1102             fix0             = _mm256_add_pd(fix0,tx);
1103             fiy0             = _mm256_add_pd(fiy0,ty);
1104             fiz0             = _mm256_add_pd(fiz0,tz);
1105
1106             fjx1             = _mm256_add_pd(fjx1,tx);
1107             fjy1             = _mm256_add_pd(fjy1,ty);
1108             fjz1             = _mm256_add_pd(fjz1,tz);
1109
1110             }
1111
1112             /**************************
1113              * CALCULATE INTERACTIONS *
1114              **************************/
1115
1116             if (gmx_mm256_any_lt(rsq02,rcutoff2))
1117             {
1118
1119             r02              = _mm256_mul_pd(rsq02,rinv02);
1120             r02              = _mm256_andnot_pd(dummy_mask,r02);
1121
1122             /* EWALD ELECTROSTATICS */
1123
1124             /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
1125             ewrt             = _mm256_mul_pd(r02,ewtabscale);
1126             ewitab           = _mm256_cvttpd_epi32(ewrt);
1127             eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
1128             ewitab           = _mm_slli_epi32(ewitab,2);
1129             ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
1130             ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
1131             ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
1132             ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
1133             GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
1134             felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
1135             velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
1136             velec            = _mm256_mul_pd(qq02,_mm256_sub_pd(rinv02,velec));
1137             felec            = _mm256_mul_pd(_mm256_mul_pd(qq02,rinv02),_mm256_sub_pd(rinvsq02,felec));
1138
1139             d                = _mm256_sub_pd(r02,rswitch);
1140             d                = _mm256_max_pd(d,_mm256_setzero_pd());
1141             d2               = _mm256_mul_pd(d,d);
1142             sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
1143
1144             dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
1145
1146             /* Evaluate switch function */
1147             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
1148             felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv02,_mm256_mul_pd(velec,dsw)) );
1149             velec            = _mm256_mul_pd(velec,sw);
1150             cutoff_mask      = _mm256_cmp_pd(rsq02,rcutoff2,_CMP_LT_OQ);
1151
1152             /* Update potential sum for this i atom from the interaction with this j atom. */
1153             velec            = _mm256_and_pd(velec,cutoff_mask);
1154             velec            = _mm256_andnot_pd(dummy_mask,velec);
1155             velecsum         = _mm256_add_pd(velecsum,velec);
1156
1157             fscal            = felec;
1158
1159             fscal            = _mm256_and_pd(fscal,cutoff_mask);
1160
1161             fscal            = _mm256_andnot_pd(dummy_mask,fscal);
1162
1163             /* Calculate temporary vectorial force */
1164             tx               = _mm256_mul_pd(fscal,dx02);
1165             ty               = _mm256_mul_pd(fscal,dy02);
1166             tz               = _mm256_mul_pd(fscal,dz02);
1167
1168             /* Update vectorial force */
1169             fix0             = _mm256_add_pd(fix0,tx);
1170             fiy0             = _mm256_add_pd(fiy0,ty);
1171             fiz0             = _mm256_add_pd(fiz0,tz);
1172
1173             fjx2             = _mm256_add_pd(fjx2,tx);
1174             fjy2             = _mm256_add_pd(fjy2,ty);
1175             fjz2             = _mm256_add_pd(fjz2,tz);
1176
1177             }
1178
1179             /**************************
1180              * CALCULATE INTERACTIONS *
1181              **************************/
1182
1183             if (gmx_mm256_any_lt(rsq10,rcutoff2))
1184             {
1185
1186             r10              = _mm256_mul_pd(rsq10,rinv10);
1187             r10              = _mm256_andnot_pd(dummy_mask,r10);
1188
1189             /* EWALD ELECTROSTATICS */
1190
1191             /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
1192             ewrt             = _mm256_mul_pd(r10,ewtabscale);
1193             ewitab           = _mm256_cvttpd_epi32(ewrt);
1194             eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
1195             ewitab           = _mm_slli_epi32(ewitab,2);
1196             ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
1197             ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
1198             ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
1199             ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
1200             GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
1201             felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
1202             velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
1203             velec            = _mm256_mul_pd(qq10,_mm256_sub_pd(rinv10,velec));
1204             felec            = _mm256_mul_pd(_mm256_mul_pd(qq10,rinv10),_mm256_sub_pd(rinvsq10,felec));
1205
1206             d                = _mm256_sub_pd(r10,rswitch);
1207             d                = _mm256_max_pd(d,_mm256_setzero_pd());
1208             d2               = _mm256_mul_pd(d,d);
1209             sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
1210
1211             dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
1212
1213             /* Evaluate switch function */
1214             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
1215             felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv10,_mm256_mul_pd(velec,dsw)) );
1216             velec            = _mm256_mul_pd(velec,sw);
1217             cutoff_mask      = _mm256_cmp_pd(rsq10,rcutoff2,_CMP_LT_OQ);
1218
1219             /* Update potential sum for this i atom from the interaction with this j atom. */
1220             velec            = _mm256_and_pd(velec,cutoff_mask);
1221             velec            = _mm256_andnot_pd(dummy_mask,velec);
1222             velecsum         = _mm256_add_pd(velecsum,velec);
1223
1224             fscal            = felec;
1225
1226             fscal            = _mm256_and_pd(fscal,cutoff_mask);
1227
1228             fscal            = _mm256_andnot_pd(dummy_mask,fscal);
1229
1230             /* Calculate temporary vectorial force */
1231             tx               = _mm256_mul_pd(fscal,dx10);
1232             ty               = _mm256_mul_pd(fscal,dy10);
1233             tz               = _mm256_mul_pd(fscal,dz10);
1234
1235             /* Update vectorial force */
1236             fix1             = _mm256_add_pd(fix1,tx);
1237             fiy1             = _mm256_add_pd(fiy1,ty);
1238             fiz1             = _mm256_add_pd(fiz1,tz);
1239
1240             fjx0             = _mm256_add_pd(fjx0,tx);
1241             fjy0             = _mm256_add_pd(fjy0,ty);
1242             fjz0             = _mm256_add_pd(fjz0,tz);
1243
1244             }
1245
1246             /**************************
1247              * CALCULATE INTERACTIONS *
1248              **************************/
1249
1250             if (gmx_mm256_any_lt(rsq11,rcutoff2))
1251             {
1252
1253             r11              = _mm256_mul_pd(rsq11,rinv11);
1254             r11              = _mm256_andnot_pd(dummy_mask,r11);
1255
1256             /* EWALD ELECTROSTATICS */
1257
1258             /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
1259             ewrt             = _mm256_mul_pd(r11,ewtabscale);
1260             ewitab           = _mm256_cvttpd_epi32(ewrt);
1261             eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
1262             ewitab           = _mm_slli_epi32(ewitab,2);
1263             ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
1264             ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
1265             ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
1266             ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
1267             GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
1268             felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
1269             velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
1270             velec            = _mm256_mul_pd(qq11,_mm256_sub_pd(rinv11,velec));
1271             felec            = _mm256_mul_pd(_mm256_mul_pd(qq11,rinv11),_mm256_sub_pd(rinvsq11,felec));
1272
1273             d                = _mm256_sub_pd(r11,rswitch);
1274             d                = _mm256_max_pd(d,_mm256_setzero_pd());
1275             d2               = _mm256_mul_pd(d,d);
1276             sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
1277
1278             dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
1279
1280             /* Evaluate switch function */
1281             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
1282             felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv11,_mm256_mul_pd(velec,dsw)) );
1283             velec            = _mm256_mul_pd(velec,sw);
1284             cutoff_mask      = _mm256_cmp_pd(rsq11,rcutoff2,_CMP_LT_OQ);
1285
1286             /* Update potential sum for this i atom from the interaction with this j atom. */
1287             velec            = _mm256_and_pd(velec,cutoff_mask);
1288             velec            = _mm256_andnot_pd(dummy_mask,velec);
1289             velecsum         = _mm256_add_pd(velecsum,velec);
1290
1291             fscal            = felec;
1292
1293             fscal            = _mm256_and_pd(fscal,cutoff_mask);
1294
1295             fscal            = _mm256_andnot_pd(dummy_mask,fscal);
1296
1297             /* Calculate temporary vectorial force */
1298             tx               = _mm256_mul_pd(fscal,dx11);
1299             ty               = _mm256_mul_pd(fscal,dy11);
1300             tz               = _mm256_mul_pd(fscal,dz11);
1301
1302             /* Update vectorial force */
1303             fix1             = _mm256_add_pd(fix1,tx);
1304             fiy1             = _mm256_add_pd(fiy1,ty);
1305             fiz1             = _mm256_add_pd(fiz1,tz);
1306
1307             fjx1             = _mm256_add_pd(fjx1,tx);
1308             fjy1             = _mm256_add_pd(fjy1,ty);
1309             fjz1             = _mm256_add_pd(fjz1,tz);
1310
1311             }
1312
1313             /**************************
1314              * CALCULATE INTERACTIONS *
1315              **************************/
1316
1317             if (gmx_mm256_any_lt(rsq12,rcutoff2))
1318             {
1319
1320             r12              = _mm256_mul_pd(rsq12,rinv12);
1321             r12              = _mm256_andnot_pd(dummy_mask,r12);
1322
1323             /* EWALD ELECTROSTATICS */
1324
1325             /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
1326             ewrt             = _mm256_mul_pd(r12,ewtabscale);
1327             ewitab           = _mm256_cvttpd_epi32(ewrt);
1328             eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
1329             ewitab           = _mm_slli_epi32(ewitab,2);
1330             ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
1331             ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
1332             ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
1333             ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
1334             GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
1335             felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
1336             velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
1337             velec            = _mm256_mul_pd(qq12,_mm256_sub_pd(rinv12,velec));
1338             felec            = _mm256_mul_pd(_mm256_mul_pd(qq12,rinv12),_mm256_sub_pd(rinvsq12,felec));
1339
1340             d                = _mm256_sub_pd(r12,rswitch);
1341             d                = _mm256_max_pd(d,_mm256_setzero_pd());
1342             d2               = _mm256_mul_pd(d,d);
1343             sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
1344
1345             dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
1346
1347             /* Evaluate switch function */
1348             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
1349             felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv12,_mm256_mul_pd(velec,dsw)) );
1350             velec            = _mm256_mul_pd(velec,sw);
1351             cutoff_mask      = _mm256_cmp_pd(rsq12,rcutoff2,_CMP_LT_OQ);
1352
1353             /* Update potential sum for this i atom from the interaction with this j atom. */
1354             velec            = _mm256_and_pd(velec,cutoff_mask);
1355             velec            = _mm256_andnot_pd(dummy_mask,velec);
1356             velecsum         = _mm256_add_pd(velecsum,velec);
1357
1358             fscal            = felec;
1359
1360             fscal            = _mm256_and_pd(fscal,cutoff_mask);
1361
1362             fscal            = _mm256_andnot_pd(dummy_mask,fscal);
1363
1364             /* Calculate temporary vectorial force */
1365             tx               = _mm256_mul_pd(fscal,dx12);
1366             ty               = _mm256_mul_pd(fscal,dy12);
1367             tz               = _mm256_mul_pd(fscal,dz12);
1368
1369             /* Update vectorial force */
1370             fix1             = _mm256_add_pd(fix1,tx);
1371             fiy1             = _mm256_add_pd(fiy1,ty);
1372             fiz1             = _mm256_add_pd(fiz1,tz);
1373
1374             fjx2             = _mm256_add_pd(fjx2,tx);
1375             fjy2             = _mm256_add_pd(fjy2,ty);
1376             fjz2             = _mm256_add_pd(fjz2,tz);
1377
1378             }
1379
1380             /**************************
1381              * CALCULATE INTERACTIONS *
1382              **************************/
1383
1384             if (gmx_mm256_any_lt(rsq20,rcutoff2))
1385             {
1386
1387             r20              = _mm256_mul_pd(rsq20,rinv20);
1388             r20              = _mm256_andnot_pd(dummy_mask,r20);
1389
1390             /* EWALD ELECTROSTATICS */
1391
1392             /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
1393             ewrt             = _mm256_mul_pd(r20,ewtabscale);
1394             ewitab           = _mm256_cvttpd_epi32(ewrt);
1395             eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
1396             ewitab           = _mm_slli_epi32(ewitab,2);
1397             ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
1398             ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
1399             ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
1400             ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
1401             GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
1402             felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
1403             velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
1404             velec            = _mm256_mul_pd(qq20,_mm256_sub_pd(rinv20,velec));
1405             felec            = _mm256_mul_pd(_mm256_mul_pd(qq20,rinv20),_mm256_sub_pd(rinvsq20,felec));
1406
1407             d                = _mm256_sub_pd(r20,rswitch);
1408             d                = _mm256_max_pd(d,_mm256_setzero_pd());
1409             d2               = _mm256_mul_pd(d,d);
1410             sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
1411
1412             dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
1413
1414             /* Evaluate switch function */
1415             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
1416             felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv20,_mm256_mul_pd(velec,dsw)) );
1417             velec            = _mm256_mul_pd(velec,sw);
1418             cutoff_mask      = _mm256_cmp_pd(rsq20,rcutoff2,_CMP_LT_OQ);
1419
1420             /* Update potential sum for this i atom from the interaction with this j atom. */
1421             velec            = _mm256_and_pd(velec,cutoff_mask);
1422             velec            = _mm256_andnot_pd(dummy_mask,velec);
1423             velecsum         = _mm256_add_pd(velecsum,velec);
1424
1425             fscal            = felec;
1426
1427             fscal            = _mm256_and_pd(fscal,cutoff_mask);
1428
1429             fscal            = _mm256_andnot_pd(dummy_mask,fscal);
1430
1431             /* Calculate temporary vectorial force */
1432             tx               = _mm256_mul_pd(fscal,dx20);
1433             ty               = _mm256_mul_pd(fscal,dy20);
1434             tz               = _mm256_mul_pd(fscal,dz20);
1435
1436             /* Update vectorial force */
1437             fix2             = _mm256_add_pd(fix2,tx);
1438             fiy2             = _mm256_add_pd(fiy2,ty);
1439             fiz2             = _mm256_add_pd(fiz2,tz);
1440
1441             fjx0             = _mm256_add_pd(fjx0,tx);
1442             fjy0             = _mm256_add_pd(fjy0,ty);
1443             fjz0             = _mm256_add_pd(fjz0,tz);
1444
1445             }
1446
1447             /**************************
1448              * CALCULATE INTERACTIONS *
1449              **************************/
1450
1451             if (gmx_mm256_any_lt(rsq21,rcutoff2))
1452             {
1453
1454             r21              = _mm256_mul_pd(rsq21,rinv21);
1455             r21              = _mm256_andnot_pd(dummy_mask,r21);
1456
1457             /* EWALD ELECTROSTATICS */
1458
1459             /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
1460             ewrt             = _mm256_mul_pd(r21,ewtabscale);
1461             ewitab           = _mm256_cvttpd_epi32(ewrt);
1462             eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
1463             ewitab           = _mm_slli_epi32(ewitab,2);
1464             ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
1465             ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
1466             ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
1467             ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
1468             GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
1469             felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
1470             velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
1471             velec            = _mm256_mul_pd(qq21,_mm256_sub_pd(rinv21,velec));
1472             felec            = _mm256_mul_pd(_mm256_mul_pd(qq21,rinv21),_mm256_sub_pd(rinvsq21,felec));
1473
1474             d                = _mm256_sub_pd(r21,rswitch);
1475             d                = _mm256_max_pd(d,_mm256_setzero_pd());
1476             d2               = _mm256_mul_pd(d,d);
1477             sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
1478
1479             dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
1480
1481             /* Evaluate switch function */
1482             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
1483             felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv21,_mm256_mul_pd(velec,dsw)) );
1484             velec            = _mm256_mul_pd(velec,sw);
1485             cutoff_mask      = _mm256_cmp_pd(rsq21,rcutoff2,_CMP_LT_OQ);
1486
1487             /* Update potential sum for this i atom from the interaction with this j atom. */
1488             velec            = _mm256_and_pd(velec,cutoff_mask);
1489             velec            = _mm256_andnot_pd(dummy_mask,velec);
1490             velecsum         = _mm256_add_pd(velecsum,velec);
1491
1492             fscal            = felec;
1493
1494             fscal            = _mm256_and_pd(fscal,cutoff_mask);
1495
1496             fscal            = _mm256_andnot_pd(dummy_mask,fscal);
1497
1498             /* Calculate temporary vectorial force */
1499             tx               = _mm256_mul_pd(fscal,dx21);
1500             ty               = _mm256_mul_pd(fscal,dy21);
1501             tz               = _mm256_mul_pd(fscal,dz21);
1502
1503             /* Update vectorial force */
1504             fix2             = _mm256_add_pd(fix2,tx);
1505             fiy2             = _mm256_add_pd(fiy2,ty);
1506             fiz2             = _mm256_add_pd(fiz2,tz);
1507
1508             fjx1             = _mm256_add_pd(fjx1,tx);
1509             fjy1             = _mm256_add_pd(fjy1,ty);
1510             fjz1             = _mm256_add_pd(fjz1,tz);
1511
1512             }
1513
1514             /**************************
1515              * CALCULATE INTERACTIONS *
1516              **************************/
1517
1518             if (gmx_mm256_any_lt(rsq22,rcutoff2))
1519             {
1520
1521             r22              = _mm256_mul_pd(rsq22,rinv22);
1522             r22              = _mm256_andnot_pd(dummy_mask,r22);
1523
1524             /* EWALD ELECTROSTATICS */
1525
1526             /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
1527             ewrt             = _mm256_mul_pd(r22,ewtabscale);
1528             ewitab           = _mm256_cvttpd_epi32(ewrt);
1529             eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
1530             ewitab           = _mm_slli_epi32(ewitab,2);
1531             ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
1532             ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
1533             ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
1534             ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
1535             GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
1536             felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
1537             velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
1538             velec            = _mm256_mul_pd(qq22,_mm256_sub_pd(rinv22,velec));
1539             felec            = _mm256_mul_pd(_mm256_mul_pd(qq22,rinv22),_mm256_sub_pd(rinvsq22,felec));
1540
1541             d                = _mm256_sub_pd(r22,rswitch);
1542             d                = _mm256_max_pd(d,_mm256_setzero_pd());
1543             d2               = _mm256_mul_pd(d,d);
1544             sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
1545
1546             dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
1547
1548             /* Evaluate switch function */
1549             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
1550             felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv22,_mm256_mul_pd(velec,dsw)) );
1551             velec            = _mm256_mul_pd(velec,sw);
1552             cutoff_mask      = _mm256_cmp_pd(rsq22,rcutoff2,_CMP_LT_OQ);
1553
1554             /* Update potential sum for this i atom from the interaction with this j atom. */
1555             velec            = _mm256_and_pd(velec,cutoff_mask);
1556             velec            = _mm256_andnot_pd(dummy_mask,velec);
1557             velecsum         = _mm256_add_pd(velecsum,velec);
1558
1559             fscal            = felec;
1560
1561             fscal            = _mm256_and_pd(fscal,cutoff_mask);
1562
1563             fscal            = _mm256_andnot_pd(dummy_mask,fscal);
1564
1565             /* Calculate temporary vectorial force */
1566             tx               = _mm256_mul_pd(fscal,dx22);
1567             ty               = _mm256_mul_pd(fscal,dy22);
1568             tz               = _mm256_mul_pd(fscal,dz22);
1569
1570             /* Update vectorial force */
1571             fix2             = _mm256_add_pd(fix2,tx);
1572             fiy2             = _mm256_add_pd(fiy2,ty);
1573             fiz2             = _mm256_add_pd(fiz2,tz);
1574
1575             fjx2             = _mm256_add_pd(fjx2,tx);
1576             fjy2             = _mm256_add_pd(fjy2,ty);
1577             fjz2             = _mm256_add_pd(fjz2,tz);
1578
1579             }
1580
1581             fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
1582             fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
1583             fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
1584             fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
1585
1586             gmx_mm256_decrement_3rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,
1587                                                       fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
1588
1589             /* Inner loop uses 594 flops */
1590         }
1591
1592         /* End of innermost loop */
1593
1594         gmx_mm256_update_iforce_3atom_swizzle_pd(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,
1595                                                  f+i_coord_offset,fshift+i_shift_offset);
1596
1597         ggid                        = gid[iidx];
1598         /* Update potential energies */
1599         gmx_mm256_update_1pot_pd(velecsum,kernel_data->energygrp_elec+ggid);
1600
1601         /* Increment number of inner iterations */
1602         inneriter                  += j_index_end - j_index_start;
1603
1604         /* Outer loop uses 19 flops */
1605     }
1606
1607     /* Increment number of outer iterations */
1608     outeriter        += nri;
1609
1610     /* Update outer/inner flops */
1611
1612     inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_W3W3_VF,outeriter*19 + inneriter*594);
1613 }
1614 /*
1615  * Gromacs nonbonded kernel:   nb_kernel_ElecEwSw_VdwNone_GeomW3W3_F_avx_256_double
1616  * Electrostatics interaction: Ewald
1617  * VdW interaction:            None
1618  * Geometry:                   Water3-Water3
1619  * Calculate force/pot:        Force
1620  */
1621 void
1622 nb_kernel_ElecEwSw_VdwNone_GeomW3W3_F_avx_256_double
1623                     (t_nblist * gmx_restrict                nlist,
1624                      rvec * gmx_restrict                    xx,
1625                      rvec * gmx_restrict                    ff,
1626                      t_forcerec * gmx_restrict              fr,
1627                      t_mdatoms * gmx_restrict               mdatoms,
1628                      nb_kernel_data_t * gmx_restrict        kernel_data,
1629                      t_nrnb * gmx_restrict                  nrnb)
1630 {
1631     /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
1632      * just 0 for non-waters.
1633      * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
1634      * jnr indices corresponding to data put in the four positions in the SIMD register.
1635      */
1636     int              i_shift_offset,i_coord_offset,outeriter,inneriter;
1637     int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
1638     int              jnrA,jnrB,jnrC,jnrD;
1639     int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
1640     int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
1641     int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
1642     int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
1643     real             rcutoff_scalar;
1644     real             *shiftvec,*fshift,*x,*f;
1645     real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
1646     real             scratch[4*DIM];
1647     __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
1648     real *           vdwioffsetptr0;
1649     __m256d          ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
1650     real *           vdwioffsetptr1;
1651     __m256d          ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
1652     real *           vdwioffsetptr2;
1653     __m256d          ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
1654     int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
1655     __m256d          jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
1656     int              vdwjidx1A,vdwjidx1B,vdwjidx1C,vdwjidx1D;
1657     __m256d          jx1,jy1,jz1,fjx1,fjy1,fjz1,jq1,isaj1;
1658     int              vdwjidx2A,vdwjidx2B,vdwjidx2C,vdwjidx2D;
1659     __m256d          jx2,jy2,jz2,fjx2,fjy2,fjz2,jq2,isaj2;
1660     __m256d          dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
1661     __m256d          dx01,dy01,dz01,rsq01,rinv01,rinvsq01,r01,qq01,c6_01,c12_01;
1662     __m256d          dx02,dy02,dz02,rsq02,rinv02,rinvsq02,r02,qq02,c6_02,c12_02;
1663     __m256d          dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
1664     __m256d          dx11,dy11,dz11,rsq11,rinv11,rinvsq11,r11,qq11,c6_11,c12_11;
1665     __m256d          dx12,dy12,dz12,rsq12,rinv12,rinvsq12,r12,qq12,c6_12,c12_12;
1666     __m256d          dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
1667     __m256d          dx21,dy21,dz21,rsq21,rinv21,rinvsq21,r21,qq21,c6_21,c12_21;
1668     __m256d          dx22,dy22,dz22,rsq22,rinv22,rinvsq22,r22,qq22,c6_22,c12_22;
1669     __m256d          velec,felec,velecsum,facel,crf,krf,krf2;
1670     real             *charge;
1671     __m128i          ewitab;
1672     __m256d          ewtabscale,eweps,sh_ewald,ewrt,ewtabhalfspace,ewtabF,ewtabFn,ewtabD,ewtabV;
1673     __m256d          beta,beta2,beta3,zeta2,pmecorrF,pmecorrV,rinv3;
1674     real             *ewtab;
1675     __m256d          rswitch,swV3,swV4,swV5,swF2,swF3,swF4,d,d2,sw,dsw;
1676     real             rswitch_scalar,d_scalar;
1677     __m256d          dummy_mask,cutoff_mask;
1678     __m128           tmpmask0,tmpmask1;
1679     __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
1680     __m256d          one     = _mm256_set1_pd(1.0);
1681     __m256d          two     = _mm256_set1_pd(2.0);
1682     x                = xx[0];
1683     f                = ff[0];
1684
1685     nri              = nlist->nri;
1686     iinr             = nlist->iinr;
1687     jindex           = nlist->jindex;
1688     jjnr             = nlist->jjnr;
1689     shiftidx         = nlist->shift;
1690     gid              = nlist->gid;
1691     shiftvec         = fr->shift_vec[0];
1692     fshift           = fr->fshift[0];
1693     facel            = _mm256_set1_pd(fr->epsfac);
1694     charge           = mdatoms->chargeA;
1695
1696     sh_ewald         = _mm256_set1_pd(fr->ic->sh_ewald);
1697     beta             = _mm256_set1_pd(fr->ic->ewaldcoeff);
1698     beta2            = _mm256_mul_pd(beta,beta);
1699     beta3            = _mm256_mul_pd(beta,beta2);
1700
1701     ewtab            = fr->ic->tabq_coul_FDV0;
1702     ewtabscale       = _mm256_set1_pd(fr->ic->tabq_scale);
1703     ewtabhalfspace   = _mm256_set1_pd(0.5/fr->ic->tabq_scale);
1704
1705     /* Setup water-specific parameters */
1706     inr              = nlist->iinr[0];
1707     iq0              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+0]));
1708     iq1              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+1]));
1709     iq2              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+2]));
1710
1711     jq0              = _mm256_set1_pd(charge[inr+0]);
1712     jq1              = _mm256_set1_pd(charge[inr+1]);
1713     jq2              = _mm256_set1_pd(charge[inr+2]);
1714     qq00             = _mm256_mul_pd(iq0,jq0);
1715     qq01             = _mm256_mul_pd(iq0,jq1);
1716     qq02             = _mm256_mul_pd(iq0,jq2);
1717     qq10             = _mm256_mul_pd(iq1,jq0);
1718     qq11             = _mm256_mul_pd(iq1,jq1);
1719     qq12             = _mm256_mul_pd(iq1,jq2);
1720     qq20             = _mm256_mul_pd(iq2,jq0);
1721     qq21             = _mm256_mul_pd(iq2,jq1);
1722     qq22             = _mm256_mul_pd(iq2,jq2);
1723
1724     /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
1725     rcutoff_scalar   = fr->rcoulomb;
1726     rcutoff          = _mm256_set1_pd(rcutoff_scalar);
1727     rcutoff2         = _mm256_mul_pd(rcutoff,rcutoff);
1728
1729     rswitch_scalar   = fr->rcoulomb_switch;
1730     rswitch          = _mm256_set1_pd(rswitch_scalar);
1731     /* Setup switch parameters */
1732     d_scalar         = rcutoff_scalar-rswitch_scalar;
1733     d                = _mm256_set1_pd(d_scalar);
1734     swV3             = _mm256_set1_pd(-10.0/(d_scalar*d_scalar*d_scalar));
1735     swV4             = _mm256_set1_pd( 15.0/(d_scalar*d_scalar*d_scalar*d_scalar));
1736     swV5             = _mm256_set1_pd( -6.0/(d_scalar*d_scalar*d_scalar*d_scalar*d_scalar));
1737     swF2             = _mm256_set1_pd(-30.0/(d_scalar*d_scalar*d_scalar));
1738     swF3             = _mm256_set1_pd( 60.0/(d_scalar*d_scalar*d_scalar*d_scalar));
1739     swF4             = _mm256_set1_pd(-30.0/(d_scalar*d_scalar*d_scalar*d_scalar*d_scalar));
1740
1741     /* Avoid stupid compiler warnings */
1742     jnrA = jnrB = jnrC = jnrD = 0;
1743     j_coord_offsetA = 0;
1744     j_coord_offsetB = 0;
1745     j_coord_offsetC = 0;
1746     j_coord_offsetD = 0;
1747
1748     outeriter        = 0;
1749     inneriter        = 0;
1750
1751     for(iidx=0;iidx<4*DIM;iidx++)
1752     {
1753         scratch[iidx] = 0.0;
1754     }
1755
1756     /* Start outer loop over neighborlists */
1757     for(iidx=0; iidx<nri; iidx++)
1758     {
1759         /* Load shift vector for this list */
1760         i_shift_offset   = DIM*shiftidx[iidx];
1761
1762         /* Load limits for loop over neighbors */
1763         j_index_start    = jindex[iidx];
1764         j_index_end      = jindex[iidx+1];
1765
1766         /* Get outer coordinate index */
1767         inr              = iinr[iidx];
1768         i_coord_offset   = DIM*inr;
1769
1770         /* Load i particle coords and add shift vector */
1771         gmx_mm256_load_shift_and_3rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset,
1772                                                     &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2);
1773
1774         fix0             = _mm256_setzero_pd();
1775         fiy0             = _mm256_setzero_pd();
1776         fiz0             = _mm256_setzero_pd();
1777         fix1             = _mm256_setzero_pd();
1778         fiy1             = _mm256_setzero_pd();
1779         fiz1             = _mm256_setzero_pd();
1780         fix2             = _mm256_setzero_pd();
1781         fiy2             = _mm256_setzero_pd();
1782         fiz2             = _mm256_setzero_pd();
1783
1784         /* Start inner kernel loop */
1785         for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
1786         {
1787
1788             /* Get j neighbor index, and coordinate index */
1789             jnrA             = jjnr[jidx];
1790             jnrB             = jjnr[jidx+1];
1791             jnrC             = jjnr[jidx+2];
1792             jnrD             = jjnr[jidx+3];
1793             j_coord_offsetA  = DIM*jnrA;
1794             j_coord_offsetB  = DIM*jnrB;
1795             j_coord_offsetC  = DIM*jnrC;
1796             j_coord_offsetD  = DIM*jnrD;
1797
1798             /* load j atom coordinates */
1799             gmx_mm256_load_3rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
1800                                                  x+j_coord_offsetC,x+j_coord_offsetD,
1801                                               &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
1802
1803             /* Calculate displacement vector */
1804             dx00             = _mm256_sub_pd(ix0,jx0);
1805             dy00             = _mm256_sub_pd(iy0,jy0);
1806             dz00             = _mm256_sub_pd(iz0,jz0);
1807             dx01             = _mm256_sub_pd(ix0,jx1);
1808             dy01             = _mm256_sub_pd(iy0,jy1);
1809             dz01             = _mm256_sub_pd(iz0,jz1);
1810             dx02             = _mm256_sub_pd(ix0,jx2);
1811             dy02             = _mm256_sub_pd(iy0,jy2);
1812             dz02             = _mm256_sub_pd(iz0,jz2);
1813             dx10             = _mm256_sub_pd(ix1,jx0);
1814             dy10             = _mm256_sub_pd(iy1,jy0);
1815             dz10             = _mm256_sub_pd(iz1,jz0);
1816             dx11             = _mm256_sub_pd(ix1,jx1);
1817             dy11             = _mm256_sub_pd(iy1,jy1);
1818             dz11             = _mm256_sub_pd(iz1,jz1);
1819             dx12             = _mm256_sub_pd(ix1,jx2);
1820             dy12             = _mm256_sub_pd(iy1,jy2);
1821             dz12             = _mm256_sub_pd(iz1,jz2);
1822             dx20             = _mm256_sub_pd(ix2,jx0);
1823             dy20             = _mm256_sub_pd(iy2,jy0);
1824             dz20             = _mm256_sub_pd(iz2,jz0);
1825             dx21             = _mm256_sub_pd(ix2,jx1);
1826             dy21             = _mm256_sub_pd(iy2,jy1);
1827             dz21             = _mm256_sub_pd(iz2,jz1);
1828             dx22             = _mm256_sub_pd(ix2,jx2);
1829             dy22             = _mm256_sub_pd(iy2,jy2);
1830             dz22             = _mm256_sub_pd(iz2,jz2);
1831
1832             /* Calculate squared distance and things based on it */
1833             rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
1834             rsq01            = gmx_mm256_calc_rsq_pd(dx01,dy01,dz01);
1835             rsq02            = gmx_mm256_calc_rsq_pd(dx02,dy02,dz02);
1836             rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
1837             rsq11            = gmx_mm256_calc_rsq_pd(dx11,dy11,dz11);
1838             rsq12            = gmx_mm256_calc_rsq_pd(dx12,dy12,dz12);
1839             rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
1840             rsq21            = gmx_mm256_calc_rsq_pd(dx21,dy21,dz21);
1841             rsq22            = gmx_mm256_calc_rsq_pd(dx22,dy22,dz22);
1842
1843             rinv00           = gmx_mm256_invsqrt_pd(rsq00);
1844             rinv01           = gmx_mm256_invsqrt_pd(rsq01);
1845             rinv02           = gmx_mm256_invsqrt_pd(rsq02);
1846             rinv10           = gmx_mm256_invsqrt_pd(rsq10);
1847             rinv11           = gmx_mm256_invsqrt_pd(rsq11);
1848             rinv12           = gmx_mm256_invsqrt_pd(rsq12);
1849             rinv20           = gmx_mm256_invsqrt_pd(rsq20);
1850             rinv21           = gmx_mm256_invsqrt_pd(rsq21);
1851             rinv22           = gmx_mm256_invsqrt_pd(rsq22);
1852
1853             rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
1854             rinvsq01         = _mm256_mul_pd(rinv01,rinv01);
1855             rinvsq02         = _mm256_mul_pd(rinv02,rinv02);
1856             rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
1857             rinvsq11         = _mm256_mul_pd(rinv11,rinv11);
1858             rinvsq12         = _mm256_mul_pd(rinv12,rinv12);
1859             rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
1860             rinvsq21         = _mm256_mul_pd(rinv21,rinv21);
1861             rinvsq22         = _mm256_mul_pd(rinv22,rinv22);
1862
1863             fjx0             = _mm256_setzero_pd();
1864             fjy0             = _mm256_setzero_pd();
1865             fjz0             = _mm256_setzero_pd();
1866             fjx1             = _mm256_setzero_pd();
1867             fjy1             = _mm256_setzero_pd();
1868             fjz1             = _mm256_setzero_pd();
1869             fjx2             = _mm256_setzero_pd();
1870             fjy2             = _mm256_setzero_pd();
1871             fjz2             = _mm256_setzero_pd();
1872
1873             /**************************
1874              * CALCULATE INTERACTIONS *
1875              **************************/
1876
1877             if (gmx_mm256_any_lt(rsq00,rcutoff2))
1878             {
1879
1880             r00              = _mm256_mul_pd(rsq00,rinv00);
1881
1882             /* EWALD ELECTROSTATICS */
1883
1884             /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
1885             ewrt             = _mm256_mul_pd(r00,ewtabscale);
1886             ewitab           = _mm256_cvttpd_epi32(ewrt);
1887             eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
1888             ewitab           = _mm_slli_epi32(ewitab,2);
1889             ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
1890             ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
1891             ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
1892             ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
1893             GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
1894             felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
1895             velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
1896             velec            = _mm256_mul_pd(qq00,_mm256_sub_pd(rinv00,velec));
1897             felec            = _mm256_mul_pd(_mm256_mul_pd(qq00,rinv00),_mm256_sub_pd(rinvsq00,felec));
1898
1899             d                = _mm256_sub_pd(r00,rswitch);
1900             d                = _mm256_max_pd(d,_mm256_setzero_pd());
1901             d2               = _mm256_mul_pd(d,d);
1902             sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
1903
1904             dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
1905
1906             /* Evaluate switch function */
1907             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
1908             felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv00,_mm256_mul_pd(velec,dsw)) );
1909             cutoff_mask      = _mm256_cmp_pd(rsq00,rcutoff2,_CMP_LT_OQ);
1910
1911             fscal            = felec;
1912
1913             fscal            = _mm256_and_pd(fscal,cutoff_mask);
1914
1915             /* Calculate temporary vectorial force */
1916             tx               = _mm256_mul_pd(fscal,dx00);
1917             ty               = _mm256_mul_pd(fscal,dy00);
1918             tz               = _mm256_mul_pd(fscal,dz00);
1919
1920             /* Update vectorial force */
1921             fix0             = _mm256_add_pd(fix0,tx);
1922             fiy0             = _mm256_add_pd(fiy0,ty);
1923             fiz0             = _mm256_add_pd(fiz0,tz);
1924
1925             fjx0             = _mm256_add_pd(fjx0,tx);
1926             fjy0             = _mm256_add_pd(fjy0,ty);
1927             fjz0             = _mm256_add_pd(fjz0,tz);
1928
1929             }
1930
1931             /**************************
1932              * CALCULATE INTERACTIONS *
1933              **************************/
1934
1935             if (gmx_mm256_any_lt(rsq01,rcutoff2))
1936             {
1937
1938             r01              = _mm256_mul_pd(rsq01,rinv01);
1939
1940             /* EWALD ELECTROSTATICS */
1941
1942             /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
1943             ewrt             = _mm256_mul_pd(r01,ewtabscale);
1944             ewitab           = _mm256_cvttpd_epi32(ewrt);
1945             eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
1946             ewitab           = _mm_slli_epi32(ewitab,2);
1947             ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
1948             ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
1949             ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
1950             ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
1951             GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
1952             felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
1953             velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
1954             velec            = _mm256_mul_pd(qq01,_mm256_sub_pd(rinv01,velec));
1955             felec            = _mm256_mul_pd(_mm256_mul_pd(qq01,rinv01),_mm256_sub_pd(rinvsq01,felec));
1956
1957             d                = _mm256_sub_pd(r01,rswitch);
1958             d                = _mm256_max_pd(d,_mm256_setzero_pd());
1959             d2               = _mm256_mul_pd(d,d);
1960             sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
1961
1962             dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
1963
1964             /* Evaluate switch function */
1965             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
1966             felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv01,_mm256_mul_pd(velec,dsw)) );
1967             cutoff_mask      = _mm256_cmp_pd(rsq01,rcutoff2,_CMP_LT_OQ);
1968
1969             fscal            = felec;
1970
1971             fscal            = _mm256_and_pd(fscal,cutoff_mask);
1972
1973             /* Calculate temporary vectorial force */
1974             tx               = _mm256_mul_pd(fscal,dx01);
1975             ty               = _mm256_mul_pd(fscal,dy01);
1976             tz               = _mm256_mul_pd(fscal,dz01);
1977
1978             /* Update vectorial force */
1979             fix0             = _mm256_add_pd(fix0,tx);
1980             fiy0             = _mm256_add_pd(fiy0,ty);
1981             fiz0             = _mm256_add_pd(fiz0,tz);
1982
1983             fjx1             = _mm256_add_pd(fjx1,tx);
1984             fjy1             = _mm256_add_pd(fjy1,ty);
1985             fjz1             = _mm256_add_pd(fjz1,tz);
1986
1987             }
1988
1989             /**************************
1990              * CALCULATE INTERACTIONS *
1991              **************************/
1992
1993             if (gmx_mm256_any_lt(rsq02,rcutoff2))
1994             {
1995
1996             r02              = _mm256_mul_pd(rsq02,rinv02);
1997
1998             /* EWALD ELECTROSTATICS */
1999
2000             /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
2001             ewrt             = _mm256_mul_pd(r02,ewtabscale);
2002             ewitab           = _mm256_cvttpd_epi32(ewrt);
2003             eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
2004             ewitab           = _mm_slli_epi32(ewitab,2);
2005             ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
2006             ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
2007             ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
2008             ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
2009             GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
2010             felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
2011             velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
2012             velec            = _mm256_mul_pd(qq02,_mm256_sub_pd(rinv02,velec));
2013             felec            = _mm256_mul_pd(_mm256_mul_pd(qq02,rinv02),_mm256_sub_pd(rinvsq02,felec));
2014
2015             d                = _mm256_sub_pd(r02,rswitch);
2016             d                = _mm256_max_pd(d,_mm256_setzero_pd());
2017             d2               = _mm256_mul_pd(d,d);
2018             sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
2019
2020             dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
2021
2022             /* Evaluate switch function */
2023             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
2024             felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv02,_mm256_mul_pd(velec,dsw)) );
2025             cutoff_mask      = _mm256_cmp_pd(rsq02,rcutoff2,_CMP_LT_OQ);
2026
2027             fscal            = felec;
2028
2029             fscal            = _mm256_and_pd(fscal,cutoff_mask);
2030
2031             /* Calculate temporary vectorial force */
2032             tx               = _mm256_mul_pd(fscal,dx02);
2033             ty               = _mm256_mul_pd(fscal,dy02);
2034             tz               = _mm256_mul_pd(fscal,dz02);
2035
2036             /* Update vectorial force */
2037             fix0             = _mm256_add_pd(fix0,tx);
2038             fiy0             = _mm256_add_pd(fiy0,ty);
2039             fiz0             = _mm256_add_pd(fiz0,tz);
2040
2041             fjx2             = _mm256_add_pd(fjx2,tx);
2042             fjy2             = _mm256_add_pd(fjy2,ty);
2043             fjz2             = _mm256_add_pd(fjz2,tz);
2044
2045             }
2046
2047             /**************************
2048              * CALCULATE INTERACTIONS *
2049              **************************/
2050
2051             if (gmx_mm256_any_lt(rsq10,rcutoff2))
2052             {
2053
2054             r10              = _mm256_mul_pd(rsq10,rinv10);
2055
2056             /* EWALD ELECTROSTATICS */
2057
2058             /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
2059             ewrt             = _mm256_mul_pd(r10,ewtabscale);
2060             ewitab           = _mm256_cvttpd_epi32(ewrt);
2061             eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
2062             ewitab           = _mm_slli_epi32(ewitab,2);
2063             ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
2064             ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
2065             ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
2066             ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
2067             GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
2068             felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
2069             velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
2070             velec            = _mm256_mul_pd(qq10,_mm256_sub_pd(rinv10,velec));
2071             felec            = _mm256_mul_pd(_mm256_mul_pd(qq10,rinv10),_mm256_sub_pd(rinvsq10,felec));
2072
2073             d                = _mm256_sub_pd(r10,rswitch);
2074             d                = _mm256_max_pd(d,_mm256_setzero_pd());
2075             d2               = _mm256_mul_pd(d,d);
2076             sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
2077
2078             dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
2079
2080             /* Evaluate switch function */
2081             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
2082             felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv10,_mm256_mul_pd(velec,dsw)) );
2083             cutoff_mask      = _mm256_cmp_pd(rsq10,rcutoff2,_CMP_LT_OQ);
2084
2085             fscal            = felec;
2086
2087             fscal            = _mm256_and_pd(fscal,cutoff_mask);
2088
2089             /* Calculate temporary vectorial force */
2090             tx               = _mm256_mul_pd(fscal,dx10);
2091             ty               = _mm256_mul_pd(fscal,dy10);
2092             tz               = _mm256_mul_pd(fscal,dz10);
2093
2094             /* Update vectorial force */
2095             fix1             = _mm256_add_pd(fix1,tx);
2096             fiy1             = _mm256_add_pd(fiy1,ty);
2097             fiz1             = _mm256_add_pd(fiz1,tz);
2098
2099             fjx0             = _mm256_add_pd(fjx0,tx);
2100             fjy0             = _mm256_add_pd(fjy0,ty);
2101             fjz0             = _mm256_add_pd(fjz0,tz);
2102
2103             }
2104
2105             /**************************
2106              * CALCULATE INTERACTIONS *
2107              **************************/
2108
2109             if (gmx_mm256_any_lt(rsq11,rcutoff2))
2110             {
2111
2112             r11              = _mm256_mul_pd(rsq11,rinv11);
2113
2114             /* EWALD ELECTROSTATICS */
2115
2116             /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
2117             ewrt             = _mm256_mul_pd(r11,ewtabscale);
2118             ewitab           = _mm256_cvttpd_epi32(ewrt);
2119             eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
2120             ewitab           = _mm_slli_epi32(ewitab,2);
2121             ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
2122             ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
2123             ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
2124             ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
2125             GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
2126             felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
2127             velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
2128             velec            = _mm256_mul_pd(qq11,_mm256_sub_pd(rinv11,velec));
2129             felec            = _mm256_mul_pd(_mm256_mul_pd(qq11,rinv11),_mm256_sub_pd(rinvsq11,felec));
2130
2131             d                = _mm256_sub_pd(r11,rswitch);
2132             d                = _mm256_max_pd(d,_mm256_setzero_pd());
2133             d2               = _mm256_mul_pd(d,d);
2134             sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
2135
2136             dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
2137
2138             /* Evaluate switch function */
2139             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
2140             felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv11,_mm256_mul_pd(velec,dsw)) );
2141             cutoff_mask      = _mm256_cmp_pd(rsq11,rcutoff2,_CMP_LT_OQ);
2142
2143             fscal            = felec;
2144
2145             fscal            = _mm256_and_pd(fscal,cutoff_mask);
2146
2147             /* Calculate temporary vectorial force */
2148             tx               = _mm256_mul_pd(fscal,dx11);
2149             ty               = _mm256_mul_pd(fscal,dy11);
2150             tz               = _mm256_mul_pd(fscal,dz11);
2151
2152             /* Update vectorial force */
2153             fix1             = _mm256_add_pd(fix1,tx);
2154             fiy1             = _mm256_add_pd(fiy1,ty);
2155             fiz1             = _mm256_add_pd(fiz1,tz);
2156
2157             fjx1             = _mm256_add_pd(fjx1,tx);
2158             fjy1             = _mm256_add_pd(fjy1,ty);
2159             fjz1             = _mm256_add_pd(fjz1,tz);
2160
2161             }
2162
2163             /**************************
2164              * CALCULATE INTERACTIONS *
2165              **************************/
2166
2167             if (gmx_mm256_any_lt(rsq12,rcutoff2))
2168             {
2169
2170             r12              = _mm256_mul_pd(rsq12,rinv12);
2171
2172             /* EWALD ELECTROSTATICS */
2173
2174             /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
2175             ewrt             = _mm256_mul_pd(r12,ewtabscale);
2176             ewitab           = _mm256_cvttpd_epi32(ewrt);
2177             eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
2178             ewitab           = _mm_slli_epi32(ewitab,2);
2179             ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
2180             ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
2181             ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
2182             ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
2183             GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
2184             felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
2185             velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
2186             velec            = _mm256_mul_pd(qq12,_mm256_sub_pd(rinv12,velec));
2187             felec            = _mm256_mul_pd(_mm256_mul_pd(qq12,rinv12),_mm256_sub_pd(rinvsq12,felec));
2188
2189             d                = _mm256_sub_pd(r12,rswitch);
2190             d                = _mm256_max_pd(d,_mm256_setzero_pd());
2191             d2               = _mm256_mul_pd(d,d);
2192             sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
2193
2194             dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
2195
2196             /* Evaluate switch function */
2197             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
2198             felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv12,_mm256_mul_pd(velec,dsw)) );
2199             cutoff_mask      = _mm256_cmp_pd(rsq12,rcutoff2,_CMP_LT_OQ);
2200
2201             fscal            = felec;
2202
2203             fscal            = _mm256_and_pd(fscal,cutoff_mask);
2204
2205             /* Calculate temporary vectorial force */
2206             tx               = _mm256_mul_pd(fscal,dx12);
2207             ty               = _mm256_mul_pd(fscal,dy12);
2208             tz               = _mm256_mul_pd(fscal,dz12);
2209
2210             /* Update vectorial force */
2211             fix1             = _mm256_add_pd(fix1,tx);
2212             fiy1             = _mm256_add_pd(fiy1,ty);
2213             fiz1             = _mm256_add_pd(fiz1,tz);
2214
2215             fjx2             = _mm256_add_pd(fjx2,tx);
2216             fjy2             = _mm256_add_pd(fjy2,ty);
2217             fjz2             = _mm256_add_pd(fjz2,tz);
2218
2219             }
2220
2221             /**************************
2222              * CALCULATE INTERACTIONS *
2223              **************************/
2224
2225             if (gmx_mm256_any_lt(rsq20,rcutoff2))
2226             {
2227
2228             r20              = _mm256_mul_pd(rsq20,rinv20);
2229
2230             /* EWALD ELECTROSTATICS */
2231
2232             /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
2233             ewrt             = _mm256_mul_pd(r20,ewtabscale);
2234             ewitab           = _mm256_cvttpd_epi32(ewrt);
2235             eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
2236             ewitab           = _mm_slli_epi32(ewitab,2);
2237             ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
2238             ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
2239             ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
2240             ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
2241             GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
2242             felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
2243             velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
2244             velec            = _mm256_mul_pd(qq20,_mm256_sub_pd(rinv20,velec));
2245             felec            = _mm256_mul_pd(_mm256_mul_pd(qq20,rinv20),_mm256_sub_pd(rinvsq20,felec));
2246
2247             d                = _mm256_sub_pd(r20,rswitch);
2248             d                = _mm256_max_pd(d,_mm256_setzero_pd());
2249             d2               = _mm256_mul_pd(d,d);
2250             sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
2251
2252             dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
2253
2254             /* Evaluate switch function */
2255             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
2256             felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv20,_mm256_mul_pd(velec,dsw)) );
2257             cutoff_mask      = _mm256_cmp_pd(rsq20,rcutoff2,_CMP_LT_OQ);
2258
2259             fscal            = felec;
2260
2261             fscal            = _mm256_and_pd(fscal,cutoff_mask);
2262
2263             /* Calculate temporary vectorial force */
2264             tx               = _mm256_mul_pd(fscal,dx20);
2265             ty               = _mm256_mul_pd(fscal,dy20);
2266             tz               = _mm256_mul_pd(fscal,dz20);
2267
2268             /* Update vectorial force */
2269             fix2             = _mm256_add_pd(fix2,tx);
2270             fiy2             = _mm256_add_pd(fiy2,ty);
2271             fiz2             = _mm256_add_pd(fiz2,tz);
2272
2273             fjx0             = _mm256_add_pd(fjx0,tx);
2274             fjy0             = _mm256_add_pd(fjy0,ty);
2275             fjz0             = _mm256_add_pd(fjz0,tz);
2276
2277             }
2278
2279             /**************************
2280              * CALCULATE INTERACTIONS *
2281              **************************/
2282
2283             if (gmx_mm256_any_lt(rsq21,rcutoff2))
2284             {
2285
2286             r21              = _mm256_mul_pd(rsq21,rinv21);
2287
2288             /* EWALD ELECTROSTATICS */
2289
2290             /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
2291             ewrt             = _mm256_mul_pd(r21,ewtabscale);
2292             ewitab           = _mm256_cvttpd_epi32(ewrt);
2293             eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
2294             ewitab           = _mm_slli_epi32(ewitab,2);
2295             ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
2296             ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
2297             ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
2298             ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
2299             GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
2300             felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
2301             velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
2302             velec            = _mm256_mul_pd(qq21,_mm256_sub_pd(rinv21,velec));
2303             felec            = _mm256_mul_pd(_mm256_mul_pd(qq21,rinv21),_mm256_sub_pd(rinvsq21,felec));
2304
2305             d                = _mm256_sub_pd(r21,rswitch);
2306             d                = _mm256_max_pd(d,_mm256_setzero_pd());
2307             d2               = _mm256_mul_pd(d,d);
2308             sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
2309
2310             dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
2311
2312             /* Evaluate switch function */
2313             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
2314             felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv21,_mm256_mul_pd(velec,dsw)) );
2315             cutoff_mask      = _mm256_cmp_pd(rsq21,rcutoff2,_CMP_LT_OQ);
2316
2317             fscal            = felec;
2318
2319             fscal            = _mm256_and_pd(fscal,cutoff_mask);
2320
2321             /* Calculate temporary vectorial force */
2322             tx               = _mm256_mul_pd(fscal,dx21);
2323             ty               = _mm256_mul_pd(fscal,dy21);
2324             tz               = _mm256_mul_pd(fscal,dz21);
2325
2326             /* Update vectorial force */
2327             fix2             = _mm256_add_pd(fix2,tx);
2328             fiy2             = _mm256_add_pd(fiy2,ty);
2329             fiz2             = _mm256_add_pd(fiz2,tz);
2330
2331             fjx1             = _mm256_add_pd(fjx1,tx);
2332             fjy1             = _mm256_add_pd(fjy1,ty);
2333             fjz1             = _mm256_add_pd(fjz1,tz);
2334
2335             }
2336
2337             /**************************
2338              * CALCULATE INTERACTIONS *
2339              **************************/
2340
2341             if (gmx_mm256_any_lt(rsq22,rcutoff2))
2342             {
2343
2344             r22              = _mm256_mul_pd(rsq22,rinv22);
2345
2346             /* EWALD ELECTROSTATICS */
2347
2348             /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
2349             ewrt             = _mm256_mul_pd(r22,ewtabscale);
2350             ewitab           = _mm256_cvttpd_epi32(ewrt);
2351             eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
2352             ewitab           = _mm_slli_epi32(ewitab,2);
2353             ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
2354             ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
2355             ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
2356             ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
2357             GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
2358             felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
2359             velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
2360             velec            = _mm256_mul_pd(qq22,_mm256_sub_pd(rinv22,velec));
2361             felec            = _mm256_mul_pd(_mm256_mul_pd(qq22,rinv22),_mm256_sub_pd(rinvsq22,felec));
2362
2363             d                = _mm256_sub_pd(r22,rswitch);
2364             d                = _mm256_max_pd(d,_mm256_setzero_pd());
2365             d2               = _mm256_mul_pd(d,d);
2366             sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
2367
2368             dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
2369
2370             /* Evaluate switch function */
2371             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
2372             felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv22,_mm256_mul_pd(velec,dsw)) );
2373             cutoff_mask      = _mm256_cmp_pd(rsq22,rcutoff2,_CMP_LT_OQ);
2374
2375             fscal            = felec;
2376
2377             fscal            = _mm256_and_pd(fscal,cutoff_mask);
2378
2379             /* Calculate temporary vectorial force */
2380             tx               = _mm256_mul_pd(fscal,dx22);
2381             ty               = _mm256_mul_pd(fscal,dy22);
2382             tz               = _mm256_mul_pd(fscal,dz22);
2383
2384             /* Update vectorial force */
2385             fix2             = _mm256_add_pd(fix2,tx);
2386             fiy2             = _mm256_add_pd(fiy2,ty);
2387             fiz2             = _mm256_add_pd(fiz2,tz);
2388
2389             fjx2             = _mm256_add_pd(fjx2,tx);
2390             fjy2             = _mm256_add_pd(fjy2,ty);
2391             fjz2             = _mm256_add_pd(fjz2,tz);
2392
2393             }
2394
2395             fjptrA             = f+j_coord_offsetA;
2396             fjptrB             = f+j_coord_offsetB;
2397             fjptrC             = f+j_coord_offsetC;
2398             fjptrD             = f+j_coord_offsetD;
2399
2400             gmx_mm256_decrement_3rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,
2401                                                       fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
2402
2403             /* Inner loop uses 558 flops */
2404         }
2405
2406         if(jidx<j_index_end)
2407         {
2408
2409             /* Get j neighbor index, and coordinate index */
2410             jnrlistA         = jjnr[jidx];
2411             jnrlistB         = jjnr[jidx+1];
2412             jnrlistC         = jjnr[jidx+2];
2413             jnrlistD         = jjnr[jidx+3];
2414             /* Sign of each element will be negative for non-real atoms.
2415              * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
2416              * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
2417              */
2418             tmpmask0 = gmx_mm_castsi128_pd(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
2419
2420             tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
2421             tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
2422             dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
2423
2424             jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
2425             jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
2426             jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
2427             jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
2428             j_coord_offsetA  = DIM*jnrA;
2429             j_coord_offsetB  = DIM*jnrB;
2430             j_coord_offsetC  = DIM*jnrC;
2431             j_coord_offsetD  = DIM*jnrD;
2432
2433             /* load j atom coordinates */
2434             gmx_mm256_load_3rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
2435                                                  x+j_coord_offsetC,x+j_coord_offsetD,
2436                                               &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
2437
2438             /* Calculate displacement vector */
2439             dx00             = _mm256_sub_pd(ix0,jx0);
2440             dy00             = _mm256_sub_pd(iy0,jy0);
2441             dz00             = _mm256_sub_pd(iz0,jz0);
2442             dx01             = _mm256_sub_pd(ix0,jx1);
2443             dy01             = _mm256_sub_pd(iy0,jy1);
2444             dz01             = _mm256_sub_pd(iz0,jz1);
2445             dx02             = _mm256_sub_pd(ix0,jx2);
2446             dy02             = _mm256_sub_pd(iy0,jy2);
2447             dz02             = _mm256_sub_pd(iz0,jz2);
2448             dx10             = _mm256_sub_pd(ix1,jx0);
2449             dy10             = _mm256_sub_pd(iy1,jy0);
2450             dz10             = _mm256_sub_pd(iz1,jz0);
2451             dx11             = _mm256_sub_pd(ix1,jx1);
2452             dy11             = _mm256_sub_pd(iy1,jy1);
2453             dz11             = _mm256_sub_pd(iz1,jz1);
2454             dx12             = _mm256_sub_pd(ix1,jx2);
2455             dy12             = _mm256_sub_pd(iy1,jy2);
2456             dz12             = _mm256_sub_pd(iz1,jz2);
2457             dx20             = _mm256_sub_pd(ix2,jx0);
2458             dy20             = _mm256_sub_pd(iy2,jy0);
2459             dz20             = _mm256_sub_pd(iz2,jz0);
2460             dx21             = _mm256_sub_pd(ix2,jx1);
2461             dy21             = _mm256_sub_pd(iy2,jy1);
2462             dz21             = _mm256_sub_pd(iz2,jz1);
2463             dx22             = _mm256_sub_pd(ix2,jx2);
2464             dy22             = _mm256_sub_pd(iy2,jy2);
2465             dz22             = _mm256_sub_pd(iz2,jz2);
2466
2467             /* Calculate squared distance and things based on it */
2468             rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
2469             rsq01            = gmx_mm256_calc_rsq_pd(dx01,dy01,dz01);
2470             rsq02            = gmx_mm256_calc_rsq_pd(dx02,dy02,dz02);
2471             rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
2472             rsq11            = gmx_mm256_calc_rsq_pd(dx11,dy11,dz11);
2473             rsq12            = gmx_mm256_calc_rsq_pd(dx12,dy12,dz12);
2474             rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
2475             rsq21            = gmx_mm256_calc_rsq_pd(dx21,dy21,dz21);
2476             rsq22            = gmx_mm256_calc_rsq_pd(dx22,dy22,dz22);
2477
2478             rinv00           = gmx_mm256_invsqrt_pd(rsq00);
2479             rinv01           = gmx_mm256_invsqrt_pd(rsq01);
2480             rinv02           = gmx_mm256_invsqrt_pd(rsq02);
2481             rinv10           = gmx_mm256_invsqrt_pd(rsq10);
2482             rinv11           = gmx_mm256_invsqrt_pd(rsq11);
2483             rinv12           = gmx_mm256_invsqrt_pd(rsq12);
2484             rinv20           = gmx_mm256_invsqrt_pd(rsq20);
2485             rinv21           = gmx_mm256_invsqrt_pd(rsq21);
2486             rinv22           = gmx_mm256_invsqrt_pd(rsq22);
2487
2488             rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
2489             rinvsq01         = _mm256_mul_pd(rinv01,rinv01);
2490             rinvsq02         = _mm256_mul_pd(rinv02,rinv02);
2491             rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
2492             rinvsq11         = _mm256_mul_pd(rinv11,rinv11);
2493             rinvsq12         = _mm256_mul_pd(rinv12,rinv12);
2494             rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
2495             rinvsq21         = _mm256_mul_pd(rinv21,rinv21);
2496             rinvsq22         = _mm256_mul_pd(rinv22,rinv22);
2497
2498             fjx0             = _mm256_setzero_pd();
2499             fjy0             = _mm256_setzero_pd();
2500             fjz0             = _mm256_setzero_pd();
2501             fjx1             = _mm256_setzero_pd();
2502             fjy1             = _mm256_setzero_pd();
2503             fjz1             = _mm256_setzero_pd();
2504             fjx2             = _mm256_setzero_pd();
2505             fjy2             = _mm256_setzero_pd();
2506             fjz2             = _mm256_setzero_pd();
2507
2508             /**************************
2509              * CALCULATE INTERACTIONS *
2510              **************************/
2511
2512             if (gmx_mm256_any_lt(rsq00,rcutoff2))
2513             {
2514
2515             r00              = _mm256_mul_pd(rsq00,rinv00);
2516             r00              = _mm256_andnot_pd(dummy_mask,r00);
2517
2518             /* EWALD ELECTROSTATICS */
2519
2520             /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
2521             ewrt             = _mm256_mul_pd(r00,ewtabscale);
2522             ewitab           = _mm256_cvttpd_epi32(ewrt);
2523             eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
2524             ewitab           = _mm_slli_epi32(ewitab,2);
2525             ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
2526             ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
2527             ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
2528             ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
2529             GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
2530             felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
2531             velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
2532             velec            = _mm256_mul_pd(qq00,_mm256_sub_pd(rinv00,velec));
2533             felec            = _mm256_mul_pd(_mm256_mul_pd(qq00,rinv00),_mm256_sub_pd(rinvsq00,felec));
2534
2535             d                = _mm256_sub_pd(r00,rswitch);
2536             d                = _mm256_max_pd(d,_mm256_setzero_pd());
2537             d2               = _mm256_mul_pd(d,d);
2538             sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
2539
2540             dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
2541
2542             /* Evaluate switch function */
2543             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
2544             felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv00,_mm256_mul_pd(velec,dsw)) );
2545             cutoff_mask      = _mm256_cmp_pd(rsq00,rcutoff2,_CMP_LT_OQ);
2546
2547             fscal            = felec;
2548
2549             fscal            = _mm256_and_pd(fscal,cutoff_mask);
2550
2551             fscal            = _mm256_andnot_pd(dummy_mask,fscal);
2552
2553             /* Calculate temporary vectorial force */
2554             tx               = _mm256_mul_pd(fscal,dx00);
2555             ty               = _mm256_mul_pd(fscal,dy00);
2556             tz               = _mm256_mul_pd(fscal,dz00);
2557
2558             /* Update vectorial force */
2559             fix0             = _mm256_add_pd(fix0,tx);
2560             fiy0             = _mm256_add_pd(fiy0,ty);
2561             fiz0             = _mm256_add_pd(fiz0,tz);
2562
2563             fjx0             = _mm256_add_pd(fjx0,tx);
2564             fjy0             = _mm256_add_pd(fjy0,ty);
2565             fjz0             = _mm256_add_pd(fjz0,tz);
2566
2567             }
2568
2569             /**************************
2570              * CALCULATE INTERACTIONS *
2571              **************************/
2572
2573             if (gmx_mm256_any_lt(rsq01,rcutoff2))
2574             {
2575
2576             r01              = _mm256_mul_pd(rsq01,rinv01);
2577             r01              = _mm256_andnot_pd(dummy_mask,r01);
2578
2579             /* EWALD ELECTROSTATICS */
2580
2581             /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
2582             ewrt             = _mm256_mul_pd(r01,ewtabscale);
2583             ewitab           = _mm256_cvttpd_epi32(ewrt);
2584             eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
2585             ewitab           = _mm_slli_epi32(ewitab,2);
2586             ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
2587             ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
2588             ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
2589             ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
2590             GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
2591             felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
2592             velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
2593             velec            = _mm256_mul_pd(qq01,_mm256_sub_pd(rinv01,velec));
2594             felec            = _mm256_mul_pd(_mm256_mul_pd(qq01,rinv01),_mm256_sub_pd(rinvsq01,felec));
2595
2596             d                = _mm256_sub_pd(r01,rswitch);
2597             d                = _mm256_max_pd(d,_mm256_setzero_pd());
2598             d2               = _mm256_mul_pd(d,d);
2599             sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
2600
2601             dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
2602
2603             /* Evaluate switch function */
2604             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
2605             felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv01,_mm256_mul_pd(velec,dsw)) );
2606             cutoff_mask      = _mm256_cmp_pd(rsq01,rcutoff2,_CMP_LT_OQ);
2607
2608             fscal            = felec;
2609
2610             fscal            = _mm256_and_pd(fscal,cutoff_mask);
2611
2612             fscal            = _mm256_andnot_pd(dummy_mask,fscal);
2613
2614             /* Calculate temporary vectorial force */
2615             tx               = _mm256_mul_pd(fscal,dx01);
2616             ty               = _mm256_mul_pd(fscal,dy01);
2617             tz               = _mm256_mul_pd(fscal,dz01);
2618
2619             /* Update vectorial force */
2620             fix0             = _mm256_add_pd(fix0,tx);
2621             fiy0             = _mm256_add_pd(fiy0,ty);
2622             fiz0             = _mm256_add_pd(fiz0,tz);
2623
2624             fjx1             = _mm256_add_pd(fjx1,tx);
2625             fjy1             = _mm256_add_pd(fjy1,ty);
2626             fjz1             = _mm256_add_pd(fjz1,tz);
2627
2628             }
2629
2630             /**************************
2631              * CALCULATE INTERACTIONS *
2632              **************************/
2633
2634             if (gmx_mm256_any_lt(rsq02,rcutoff2))
2635             {
2636
2637             r02              = _mm256_mul_pd(rsq02,rinv02);
2638             r02              = _mm256_andnot_pd(dummy_mask,r02);
2639
2640             /* EWALD ELECTROSTATICS */
2641
2642             /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
2643             ewrt             = _mm256_mul_pd(r02,ewtabscale);
2644             ewitab           = _mm256_cvttpd_epi32(ewrt);
2645             eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
2646             ewitab           = _mm_slli_epi32(ewitab,2);
2647             ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
2648             ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
2649             ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
2650             ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
2651             GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
2652             felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
2653             velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
2654             velec            = _mm256_mul_pd(qq02,_mm256_sub_pd(rinv02,velec));
2655             felec            = _mm256_mul_pd(_mm256_mul_pd(qq02,rinv02),_mm256_sub_pd(rinvsq02,felec));
2656
2657             d                = _mm256_sub_pd(r02,rswitch);
2658             d                = _mm256_max_pd(d,_mm256_setzero_pd());
2659             d2               = _mm256_mul_pd(d,d);
2660             sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
2661
2662             dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
2663
2664             /* Evaluate switch function */
2665             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
2666             felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv02,_mm256_mul_pd(velec,dsw)) );
2667             cutoff_mask      = _mm256_cmp_pd(rsq02,rcutoff2,_CMP_LT_OQ);
2668
2669             fscal            = felec;
2670
2671             fscal            = _mm256_and_pd(fscal,cutoff_mask);
2672
2673             fscal            = _mm256_andnot_pd(dummy_mask,fscal);
2674
2675             /* Calculate temporary vectorial force */
2676             tx               = _mm256_mul_pd(fscal,dx02);
2677             ty               = _mm256_mul_pd(fscal,dy02);
2678             tz               = _mm256_mul_pd(fscal,dz02);
2679
2680             /* Update vectorial force */
2681             fix0             = _mm256_add_pd(fix0,tx);
2682             fiy0             = _mm256_add_pd(fiy0,ty);
2683             fiz0             = _mm256_add_pd(fiz0,tz);
2684
2685             fjx2             = _mm256_add_pd(fjx2,tx);
2686             fjy2             = _mm256_add_pd(fjy2,ty);
2687             fjz2             = _mm256_add_pd(fjz2,tz);
2688
2689             }
2690
2691             /**************************
2692              * CALCULATE INTERACTIONS *
2693              **************************/
2694
2695             if (gmx_mm256_any_lt(rsq10,rcutoff2))
2696             {
2697
2698             r10              = _mm256_mul_pd(rsq10,rinv10);
2699             r10              = _mm256_andnot_pd(dummy_mask,r10);
2700
2701             /* EWALD ELECTROSTATICS */
2702
2703             /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
2704             ewrt             = _mm256_mul_pd(r10,ewtabscale);
2705             ewitab           = _mm256_cvttpd_epi32(ewrt);
2706             eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
2707             ewitab           = _mm_slli_epi32(ewitab,2);
2708             ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
2709             ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
2710             ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
2711             ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
2712             GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
2713             felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
2714             velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
2715             velec            = _mm256_mul_pd(qq10,_mm256_sub_pd(rinv10,velec));
2716             felec            = _mm256_mul_pd(_mm256_mul_pd(qq10,rinv10),_mm256_sub_pd(rinvsq10,felec));
2717
2718             d                = _mm256_sub_pd(r10,rswitch);
2719             d                = _mm256_max_pd(d,_mm256_setzero_pd());
2720             d2               = _mm256_mul_pd(d,d);
2721             sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
2722
2723             dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
2724
2725             /* Evaluate switch function */
2726             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
2727             felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv10,_mm256_mul_pd(velec,dsw)) );
2728             cutoff_mask      = _mm256_cmp_pd(rsq10,rcutoff2,_CMP_LT_OQ);
2729
2730             fscal            = felec;
2731
2732             fscal            = _mm256_and_pd(fscal,cutoff_mask);
2733
2734             fscal            = _mm256_andnot_pd(dummy_mask,fscal);
2735
2736             /* Calculate temporary vectorial force */
2737             tx               = _mm256_mul_pd(fscal,dx10);
2738             ty               = _mm256_mul_pd(fscal,dy10);
2739             tz               = _mm256_mul_pd(fscal,dz10);
2740
2741             /* Update vectorial force */
2742             fix1             = _mm256_add_pd(fix1,tx);
2743             fiy1             = _mm256_add_pd(fiy1,ty);
2744             fiz1             = _mm256_add_pd(fiz1,tz);
2745
2746             fjx0             = _mm256_add_pd(fjx0,tx);
2747             fjy0             = _mm256_add_pd(fjy0,ty);
2748             fjz0             = _mm256_add_pd(fjz0,tz);
2749
2750             }
2751
2752             /**************************
2753              * CALCULATE INTERACTIONS *
2754              **************************/
2755
2756             if (gmx_mm256_any_lt(rsq11,rcutoff2))
2757             {
2758
2759             r11              = _mm256_mul_pd(rsq11,rinv11);
2760             r11              = _mm256_andnot_pd(dummy_mask,r11);
2761
2762             /* EWALD ELECTROSTATICS */
2763
2764             /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
2765             ewrt             = _mm256_mul_pd(r11,ewtabscale);
2766             ewitab           = _mm256_cvttpd_epi32(ewrt);
2767             eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
2768             ewitab           = _mm_slli_epi32(ewitab,2);
2769             ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
2770             ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
2771             ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
2772             ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
2773             GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
2774             felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
2775             velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
2776             velec            = _mm256_mul_pd(qq11,_mm256_sub_pd(rinv11,velec));
2777             felec            = _mm256_mul_pd(_mm256_mul_pd(qq11,rinv11),_mm256_sub_pd(rinvsq11,felec));
2778
2779             d                = _mm256_sub_pd(r11,rswitch);
2780             d                = _mm256_max_pd(d,_mm256_setzero_pd());
2781             d2               = _mm256_mul_pd(d,d);
2782             sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
2783
2784             dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
2785
2786             /* Evaluate switch function */
2787             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
2788             felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv11,_mm256_mul_pd(velec,dsw)) );
2789             cutoff_mask      = _mm256_cmp_pd(rsq11,rcutoff2,_CMP_LT_OQ);
2790
2791             fscal            = felec;
2792
2793             fscal            = _mm256_and_pd(fscal,cutoff_mask);
2794
2795             fscal            = _mm256_andnot_pd(dummy_mask,fscal);
2796
2797             /* Calculate temporary vectorial force */
2798             tx               = _mm256_mul_pd(fscal,dx11);
2799             ty               = _mm256_mul_pd(fscal,dy11);
2800             tz               = _mm256_mul_pd(fscal,dz11);
2801
2802             /* Update vectorial force */
2803             fix1             = _mm256_add_pd(fix1,tx);
2804             fiy1             = _mm256_add_pd(fiy1,ty);
2805             fiz1             = _mm256_add_pd(fiz1,tz);
2806
2807             fjx1             = _mm256_add_pd(fjx1,tx);
2808             fjy1             = _mm256_add_pd(fjy1,ty);
2809             fjz1             = _mm256_add_pd(fjz1,tz);
2810
2811             }
2812
2813             /**************************
2814              * CALCULATE INTERACTIONS *
2815              **************************/
2816
2817             if (gmx_mm256_any_lt(rsq12,rcutoff2))
2818             {
2819
2820             r12              = _mm256_mul_pd(rsq12,rinv12);
2821             r12              = _mm256_andnot_pd(dummy_mask,r12);
2822
2823             /* EWALD ELECTROSTATICS */
2824
2825             /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
2826             ewrt             = _mm256_mul_pd(r12,ewtabscale);
2827             ewitab           = _mm256_cvttpd_epi32(ewrt);
2828             eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
2829             ewitab           = _mm_slli_epi32(ewitab,2);
2830             ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
2831             ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
2832             ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
2833             ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
2834             GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
2835             felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
2836             velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
2837             velec            = _mm256_mul_pd(qq12,_mm256_sub_pd(rinv12,velec));
2838             felec            = _mm256_mul_pd(_mm256_mul_pd(qq12,rinv12),_mm256_sub_pd(rinvsq12,felec));
2839
2840             d                = _mm256_sub_pd(r12,rswitch);
2841             d                = _mm256_max_pd(d,_mm256_setzero_pd());
2842             d2               = _mm256_mul_pd(d,d);
2843             sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
2844
2845             dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
2846
2847             /* Evaluate switch function */
2848             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
2849             felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv12,_mm256_mul_pd(velec,dsw)) );
2850             cutoff_mask      = _mm256_cmp_pd(rsq12,rcutoff2,_CMP_LT_OQ);
2851
2852             fscal            = felec;
2853
2854             fscal            = _mm256_and_pd(fscal,cutoff_mask);
2855
2856             fscal            = _mm256_andnot_pd(dummy_mask,fscal);
2857
2858             /* Calculate temporary vectorial force */
2859             tx               = _mm256_mul_pd(fscal,dx12);
2860             ty               = _mm256_mul_pd(fscal,dy12);
2861             tz               = _mm256_mul_pd(fscal,dz12);
2862
2863             /* Update vectorial force */
2864             fix1             = _mm256_add_pd(fix1,tx);
2865             fiy1             = _mm256_add_pd(fiy1,ty);
2866             fiz1             = _mm256_add_pd(fiz1,tz);
2867
2868             fjx2             = _mm256_add_pd(fjx2,tx);
2869             fjy2             = _mm256_add_pd(fjy2,ty);
2870             fjz2             = _mm256_add_pd(fjz2,tz);
2871
2872             }
2873
2874             /**************************
2875              * CALCULATE INTERACTIONS *
2876              **************************/
2877
2878             if (gmx_mm256_any_lt(rsq20,rcutoff2))
2879             {
2880
2881             r20              = _mm256_mul_pd(rsq20,rinv20);
2882             r20              = _mm256_andnot_pd(dummy_mask,r20);
2883
2884             /* EWALD ELECTROSTATICS */
2885
2886             /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
2887             ewrt             = _mm256_mul_pd(r20,ewtabscale);
2888             ewitab           = _mm256_cvttpd_epi32(ewrt);
2889             eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
2890             ewitab           = _mm_slli_epi32(ewitab,2);
2891             ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
2892             ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
2893             ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
2894             ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
2895             GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
2896             felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
2897             velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
2898             velec            = _mm256_mul_pd(qq20,_mm256_sub_pd(rinv20,velec));
2899             felec            = _mm256_mul_pd(_mm256_mul_pd(qq20,rinv20),_mm256_sub_pd(rinvsq20,felec));
2900
2901             d                = _mm256_sub_pd(r20,rswitch);
2902             d                = _mm256_max_pd(d,_mm256_setzero_pd());
2903             d2               = _mm256_mul_pd(d,d);
2904             sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
2905
2906             dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
2907
2908             /* Evaluate switch function */
2909             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
2910             felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv20,_mm256_mul_pd(velec,dsw)) );
2911             cutoff_mask      = _mm256_cmp_pd(rsq20,rcutoff2,_CMP_LT_OQ);
2912
2913             fscal            = felec;
2914
2915             fscal            = _mm256_and_pd(fscal,cutoff_mask);
2916
2917             fscal            = _mm256_andnot_pd(dummy_mask,fscal);
2918
2919             /* Calculate temporary vectorial force */
2920             tx               = _mm256_mul_pd(fscal,dx20);
2921             ty               = _mm256_mul_pd(fscal,dy20);
2922             tz               = _mm256_mul_pd(fscal,dz20);
2923
2924             /* Update vectorial force */
2925             fix2             = _mm256_add_pd(fix2,tx);
2926             fiy2             = _mm256_add_pd(fiy2,ty);
2927             fiz2             = _mm256_add_pd(fiz2,tz);
2928
2929             fjx0             = _mm256_add_pd(fjx0,tx);
2930             fjy0             = _mm256_add_pd(fjy0,ty);
2931             fjz0             = _mm256_add_pd(fjz0,tz);
2932
2933             }
2934
2935             /**************************
2936              * CALCULATE INTERACTIONS *
2937              **************************/
2938
2939             if (gmx_mm256_any_lt(rsq21,rcutoff2))
2940             {
2941
2942             r21              = _mm256_mul_pd(rsq21,rinv21);
2943             r21              = _mm256_andnot_pd(dummy_mask,r21);
2944
2945             /* EWALD ELECTROSTATICS */
2946
2947             /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
2948             ewrt             = _mm256_mul_pd(r21,ewtabscale);
2949             ewitab           = _mm256_cvttpd_epi32(ewrt);
2950             eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
2951             ewitab           = _mm_slli_epi32(ewitab,2);
2952             ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
2953             ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
2954             ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
2955             ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
2956             GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
2957             felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
2958             velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
2959             velec            = _mm256_mul_pd(qq21,_mm256_sub_pd(rinv21,velec));
2960             felec            = _mm256_mul_pd(_mm256_mul_pd(qq21,rinv21),_mm256_sub_pd(rinvsq21,felec));
2961
2962             d                = _mm256_sub_pd(r21,rswitch);
2963             d                = _mm256_max_pd(d,_mm256_setzero_pd());
2964             d2               = _mm256_mul_pd(d,d);
2965             sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
2966
2967             dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
2968
2969             /* Evaluate switch function */
2970             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
2971             felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv21,_mm256_mul_pd(velec,dsw)) );
2972             cutoff_mask      = _mm256_cmp_pd(rsq21,rcutoff2,_CMP_LT_OQ);
2973
2974             fscal            = felec;
2975
2976             fscal            = _mm256_and_pd(fscal,cutoff_mask);
2977
2978             fscal            = _mm256_andnot_pd(dummy_mask,fscal);
2979
2980             /* Calculate temporary vectorial force */
2981             tx               = _mm256_mul_pd(fscal,dx21);
2982             ty               = _mm256_mul_pd(fscal,dy21);
2983             tz               = _mm256_mul_pd(fscal,dz21);
2984
2985             /* Update vectorial force */
2986             fix2             = _mm256_add_pd(fix2,tx);
2987             fiy2             = _mm256_add_pd(fiy2,ty);
2988             fiz2             = _mm256_add_pd(fiz2,tz);
2989
2990             fjx1             = _mm256_add_pd(fjx1,tx);
2991             fjy1             = _mm256_add_pd(fjy1,ty);
2992             fjz1             = _mm256_add_pd(fjz1,tz);
2993
2994             }
2995
2996             /**************************
2997              * CALCULATE INTERACTIONS *
2998              **************************/
2999
3000             if (gmx_mm256_any_lt(rsq22,rcutoff2))
3001             {
3002
3003             r22              = _mm256_mul_pd(rsq22,rinv22);
3004             r22              = _mm256_andnot_pd(dummy_mask,r22);
3005
3006             /* EWALD ELECTROSTATICS */
3007
3008             /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
3009             ewrt             = _mm256_mul_pd(r22,ewtabscale);
3010             ewitab           = _mm256_cvttpd_epi32(ewrt);
3011             eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
3012             ewitab           = _mm_slli_epi32(ewitab,2);
3013             ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
3014             ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
3015             ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
3016             ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
3017             GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
3018             felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
3019             velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
3020             velec            = _mm256_mul_pd(qq22,_mm256_sub_pd(rinv22,velec));
3021             felec            = _mm256_mul_pd(_mm256_mul_pd(qq22,rinv22),_mm256_sub_pd(rinvsq22,felec));
3022
3023             d                = _mm256_sub_pd(r22,rswitch);
3024             d                = _mm256_max_pd(d,_mm256_setzero_pd());
3025             d2               = _mm256_mul_pd(d,d);
3026             sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
3027
3028             dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
3029
3030             /* Evaluate switch function */
3031             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
3032             felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv22,_mm256_mul_pd(velec,dsw)) );
3033             cutoff_mask      = _mm256_cmp_pd(rsq22,rcutoff2,_CMP_LT_OQ);
3034
3035             fscal            = felec;
3036
3037             fscal            = _mm256_and_pd(fscal,cutoff_mask);
3038
3039             fscal            = _mm256_andnot_pd(dummy_mask,fscal);
3040
3041             /* Calculate temporary vectorial force */
3042             tx               = _mm256_mul_pd(fscal,dx22);
3043             ty               = _mm256_mul_pd(fscal,dy22);
3044             tz               = _mm256_mul_pd(fscal,dz22);
3045
3046             /* Update vectorial force */
3047             fix2             = _mm256_add_pd(fix2,tx);
3048             fiy2             = _mm256_add_pd(fiy2,ty);
3049             fiz2             = _mm256_add_pd(fiz2,tz);
3050
3051             fjx2             = _mm256_add_pd(fjx2,tx);
3052             fjy2             = _mm256_add_pd(fjy2,ty);
3053             fjz2             = _mm256_add_pd(fjz2,tz);
3054
3055             }
3056
3057             fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
3058             fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
3059             fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
3060             fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
3061
3062             gmx_mm256_decrement_3rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,
3063                                                       fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
3064
3065             /* Inner loop uses 567 flops */
3066         }
3067
3068         /* End of innermost loop */
3069
3070         gmx_mm256_update_iforce_3atom_swizzle_pd(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,
3071                                                  f+i_coord_offset,fshift+i_shift_offset);
3072
3073         /* Increment number of inner iterations */
3074         inneriter                  += j_index_end - j_index_start;
3075
3076         /* Outer loop uses 18 flops */
3077     }
3078
3079     /* Increment number of outer iterations */
3080     outeriter        += nri;
3081
3082     /* Update outer/inner flops */
3083
3084     inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_W3W3_F,outeriter*18 + inneriter*567);
3085 }