Use full path for legacyheaders
[alexxy/gromacs.git] / src / gromacs / gmxlib / nonbonded / nb_kernel_avx_256_double / nb_kernel_ElecEwSw_VdwNone_GeomW4P1_avx_256_double.c
1 /*
2  * This file is part of the GROMACS molecular simulation package.
3  *
4  * Copyright (c) 2012,2013,2014, by the GROMACS development team, led by
5  * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6  * and including many others, as listed in the AUTHORS file in the
7  * top-level source directory and at http://www.gromacs.org.
8  *
9  * GROMACS is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU Lesser General Public License
11  * as published by the Free Software Foundation; either version 2.1
12  * of the License, or (at your option) any later version.
13  *
14  * GROMACS is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
17  * Lesser General Public License for more details.
18  *
19  * You should have received a copy of the GNU Lesser General Public
20  * License along with GROMACS; if not, see
21  * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA.
23  *
24  * If you want to redistribute modifications to GROMACS, please
25  * consider that scientific software is very special. Version
26  * control is crucial - bugs must be traceable. We will be happy to
27  * consider code for inclusion in the official distribution, but
28  * derived work must not be called official GROMACS. Details are found
29  * in the README & COPYING files - if they are missing, get the
30  * official version at http://www.gromacs.org.
31  *
32  * To help us fund GROMACS development, we humbly ask that you cite
33  * the research papers on the package. Check out http://www.gromacs.org.
34  */
35 /*
36  * Note: this file was generated by the GROMACS avx_256_double kernel generator.
37  */
38 #include "config.h"
39
40 #include <math.h>
41
42 #include "../nb_kernel.h"
43 #include "gromacs/legacyheaders/types/simple.h"
44 #include "gromacs/math/vec.h"
45 #include "gromacs/legacyheaders/nrnb.h"
46
47 #include "gromacs/simd/math_x86_avx_256_double.h"
48 #include "kernelutil_x86_avx_256_double.h"
49
50 /*
51  * Gromacs nonbonded kernel:   nb_kernel_ElecEwSw_VdwNone_GeomW4P1_VF_avx_256_double
52  * Electrostatics interaction: Ewald
53  * VdW interaction:            None
54  * Geometry:                   Water4-Particle
55  * Calculate force/pot:        PotentialAndForce
56  */
57 void
58 nb_kernel_ElecEwSw_VdwNone_GeomW4P1_VF_avx_256_double
59                     (t_nblist                    * gmx_restrict       nlist,
60                      rvec                        * gmx_restrict          xx,
61                      rvec                        * gmx_restrict          ff,
62                      t_forcerec                  * gmx_restrict          fr,
63                      t_mdatoms                   * gmx_restrict     mdatoms,
64                      nb_kernel_data_t gmx_unused * gmx_restrict kernel_data,
65                      t_nrnb                      * gmx_restrict        nrnb)
66 {
67     /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
68      * just 0 for non-waters.
69      * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
70      * jnr indices corresponding to data put in the four positions in the SIMD register.
71      */
72     int              i_shift_offset,i_coord_offset,outeriter,inneriter;
73     int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
74     int              jnrA,jnrB,jnrC,jnrD;
75     int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
76     int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
77     int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
78     int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
79     real             rcutoff_scalar;
80     real             *shiftvec,*fshift,*x,*f;
81     real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
82     real             scratch[4*DIM];
83     __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
84     real *           vdwioffsetptr1;
85     __m256d          ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
86     real *           vdwioffsetptr2;
87     __m256d          ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
88     real *           vdwioffsetptr3;
89     __m256d          ix3,iy3,iz3,fix3,fiy3,fiz3,iq3,isai3;
90     int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
91     __m256d          jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
92     __m256d          dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
93     __m256d          dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
94     __m256d          dx30,dy30,dz30,rsq30,rinv30,rinvsq30,r30,qq30,c6_30,c12_30;
95     __m256d          velec,felec,velecsum,facel,crf,krf,krf2;
96     real             *charge;
97     __m128i          ewitab;
98     __m256d          ewtabscale,eweps,sh_ewald,ewrt,ewtabhalfspace,ewtabF,ewtabFn,ewtabD,ewtabV;
99     __m256d          beta,beta2,beta3,zeta2,pmecorrF,pmecorrV,rinv3;
100     real             *ewtab;
101     __m256d          rswitch,swV3,swV4,swV5,swF2,swF3,swF4,d,d2,sw,dsw;
102     real             rswitch_scalar,d_scalar;
103     __m256d          dummy_mask,cutoff_mask;
104     __m128           tmpmask0,tmpmask1;
105     __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
106     __m256d          one     = _mm256_set1_pd(1.0);
107     __m256d          two     = _mm256_set1_pd(2.0);
108     x                = xx[0];
109     f                = ff[0];
110
111     nri              = nlist->nri;
112     iinr             = nlist->iinr;
113     jindex           = nlist->jindex;
114     jjnr             = nlist->jjnr;
115     shiftidx         = nlist->shift;
116     gid              = nlist->gid;
117     shiftvec         = fr->shift_vec[0];
118     fshift           = fr->fshift[0];
119     facel            = _mm256_set1_pd(fr->epsfac);
120     charge           = mdatoms->chargeA;
121
122     sh_ewald         = _mm256_set1_pd(fr->ic->sh_ewald);
123     beta             = _mm256_set1_pd(fr->ic->ewaldcoeff_q);
124     beta2            = _mm256_mul_pd(beta,beta);
125     beta3            = _mm256_mul_pd(beta,beta2);
126
127     ewtab            = fr->ic->tabq_coul_FDV0;
128     ewtabscale       = _mm256_set1_pd(fr->ic->tabq_scale);
129     ewtabhalfspace   = _mm256_set1_pd(0.5/fr->ic->tabq_scale);
130
131     /* Setup water-specific parameters */
132     inr              = nlist->iinr[0];
133     iq1              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+1]));
134     iq2              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+2]));
135     iq3              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+3]));
136
137     /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
138     rcutoff_scalar   = fr->rcoulomb;
139     rcutoff          = _mm256_set1_pd(rcutoff_scalar);
140     rcutoff2         = _mm256_mul_pd(rcutoff,rcutoff);
141
142     rswitch_scalar   = fr->rcoulomb_switch;
143     rswitch          = _mm256_set1_pd(rswitch_scalar);
144     /* Setup switch parameters */
145     d_scalar         = rcutoff_scalar-rswitch_scalar;
146     d                = _mm256_set1_pd(d_scalar);
147     swV3             = _mm256_set1_pd(-10.0/(d_scalar*d_scalar*d_scalar));
148     swV4             = _mm256_set1_pd( 15.0/(d_scalar*d_scalar*d_scalar*d_scalar));
149     swV5             = _mm256_set1_pd( -6.0/(d_scalar*d_scalar*d_scalar*d_scalar*d_scalar));
150     swF2             = _mm256_set1_pd(-30.0/(d_scalar*d_scalar*d_scalar));
151     swF3             = _mm256_set1_pd( 60.0/(d_scalar*d_scalar*d_scalar*d_scalar));
152     swF4             = _mm256_set1_pd(-30.0/(d_scalar*d_scalar*d_scalar*d_scalar*d_scalar));
153
154     /* Avoid stupid compiler warnings */
155     jnrA = jnrB = jnrC = jnrD = 0;
156     j_coord_offsetA = 0;
157     j_coord_offsetB = 0;
158     j_coord_offsetC = 0;
159     j_coord_offsetD = 0;
160
161     outeriter        = 0;
162     inneriter        = 0;
163
164     for(iidx=0;iidx<4*DIM;iidx++)
165     {
166         scratch[iidx] = 0.0;
167     }
168
169     /* Start outer loop over neighborlists */
170     for(iidx=0; iidx<nri; iidx++)
171     {
172         /* Load shift vector for this list */
173         i_shift_offset   = DIM*shiftidx[iidx];
174
175         /* Load limits for loop over neighbors */
176         j_index_start    = jindex[iidx];
177         j_index_end      = jindex[iidx+1];
178
179         /* Get outer coordinate index */
180         inr              = iinr[iidx];
181         i_coord_offset   = DIM*inr;
182
183         /* Load i particle coords and add shift vector */
184         gmx_mm256_load_shift_and_3rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset+DIM,
185                                                     &ix1,&iy1,&iz1,&ix2,&iy2,&iz2,&ix3,&iy3,&iz3);
186
187         fix1             = _mm256_setzero_pd();
188         fiy1             = _mm256_setzero_pd();
189         fiz1             = _mm256_setzero_pd();
190         fix2             = _mm256_setzero_pd();
191         fiy2             = _mm256_setzero_pd();
192         fiz2             = _mm256_setzero_pd();
193         fix3             = _mm256_setzero_pd();
194         fiy3             = _mm256_setzero_pd();
195         fiz3             = _mm256_setzero_pd();
196
197         /* Reset potential sums */
198         velecsum         = _mm256_setzero_pd();
199
200         /* Start inner kernel loop */
201         for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
202         {
203
204             /* Get j neighbor index, and coordinate index */
205             jnrA             = jjnr[jidx];
206             jnrB             = jjnr[jidx+1];
207             jnrC             = jjnr[jidx+2];
208             jnrD             = jjnr[jidx+3];
209             j_coord_offsetA  = DIM*jnrA;
210             j_coord_offsetB  = DIM*jnrB;
211             j_coord_offsetC  = DIM*jnrC;
212             j_coord_offsetD  = DIM*jnrD;
213
214             /* load j atom coordinates */
215             gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
216                                                  x+j_coord_offsetC,x+j_coord_offsetD,
217                                                  &jx0,&jy0,&jz0);
218
219             /* Calculate displacement vector */
220             dx10             = _mm256_sub_pd(ix1,jx0);
221             dy10             = _mm256_sub_pd(iy1,jy0);
222             dz10             = _mm256_sub_pd(iz1,jz0);
223             dx20             = _mm256_sub_pd(ix2,jx0);
224             dy20             = _mm256_sub_pd(iy2,jy0);
225             dz20             = _mm256_sub_pd(iz2,jz0);
226             dx30             = _mm256_sub_pd(ix3,jx0);
227             dy30             = _mm256_sub_pd(iy3,jy0);
228             dz30             = _mm256_sub_pd(iz3,jz0);
229
230             /* Calculate squared distance and things based on it */
231             rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
232             rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
233             rsq30            = gmx_mm256_calc_rsq_pd(dx30,dy30,dz30);
234
235             rinv10           = gmx_mm256_invsqrt_pd(rsq10);
236             rinv20           = gmx_mm256_invsqrt_pd(rsq20);
237             rinv30           = gmx_mm256_invsqrt_pd(rsq30);
238
239             rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
240             rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
241             rinvsq30         = _mm256_mul_pd(rinv30,rinv30);
242
243             /* Load parameters for j particles */
244             jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
245                                                                  charge+jnrC+0,charge+jnrD+0);
246
247             fjx0             = _mm256_setzero_pd();
248             fjy0             = _mm256_setzero_pd();
249             fjz0             = _mm256_setzero_pd();
250
251             /**************************
252              * CALCULATE INTERACTIONS *
253              **************************/
254
255             if (gmx_mm256_any_lt(rsq10,rcutoff2))
256             {
257
258             r10              = _mm256_mul_pd(rsq10,rinv10);
259
260             /* Compute parameters for interactions between i and j atoms */
261             qq10             = _mm256_mul_pd(iq1,jq0);
262
263             /* EWALD ELECTROSTATICS */
264
265             /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
266             ewrt             = _mm256_mul_pd(r10,ewtabscale);
267             ewitab           = _mm256_cvttpd_epi32(ewrt);
268             eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
269             ewitab           = _mm_slli_epi32(ewitab,2);
270             ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
271             ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
272             ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
273             ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
274             GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
275             felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
276             velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
277             velec            = _mm256_mul_pd(qq10,_mm256_sub_pd(rinv10,velec));
278             felec            = _mm256_mul_pd(_mm256_mul_pd(qq10,rinv10),_mm256_sub_pd(rinvsq10,felec));
279
280             d                = _mm256_sub_pd(r10,rswitch);
281             d                = _mm256_max_pd(d,_mm256_setzero_pd());
282             d2               = _mm256_mul_pd(d,d);
283             sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
284
285             dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
286
287             /* Evaluate switch function */
288             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
289             felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv10,_mm256_mul_pd(velec,dsw)) );
290             velec            = _mm256_mul_pd(velec,sw);
291             cutoff_mask      = _mm256_cmp_pd(rsq10,rcutoff2,_CMP_LT_OQ);
292
293             /* Update potential sum for this i atom from the interaction with this j atom. */
294             velec            = _mm256_and_pd(velec,cutoff_mask);
295             velecsum         = _mm256_add_pd(velecsum,velec);
296
297             fscal            = felec;
298
299             fscal            = _mm256_and_pd(fscal,cutoff_mask);
300
301             /* Calculate temporary vectorial force */
302             tx               = _mm256_mul_pd(fscal,dx10);
303             ty               = _mm256_mul_pd(fscal,dy10);
304             tz               = _mm256_mul_pd(fscal,dz10);
305
306             /* Update vectorial force */
307             fix1             = _mm256_add_pd(fix1,tx);
308             fiy1             = _mm256_add_pd(fiy1,ty);
309             fiz1             = _mm256_add_pd(fiz1,tz);
310
311             fjx0             = _mm256_add_pd(fjx0,tx);
312             fjy0             = _mm256_add_pd(fjy0,ty);
313             fjz0             = _mm256_add_pd(fjz0,tz);
314
315             }
316
317             /**************************
318              * CALCULATE INTERACTIONS *
319              **************************/
320
321             if (gmx_mm256_any_lt(rsq20,rcutoff2))
322             {
323
324             r20              = _mm256_mul_pd(rsq20,rinv20);
325
326             /* Compute parameters for interactions between i and j atoms */
327             qq20             = _mm256_mul_pd(iq2,jq0);
328
329             /* EWALD ELECTROSTATICS */
330
331             /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
332             ewrt             = _mm256_mul_pd(r20,ewtabscale);
333             ewitab           = _mm256_cvttpd_epi32(ewrt);
334             eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
335             ewitab           = _mm_slli_epi32(ewitab,2);
336             ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
337             ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
338             ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
339             ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
340             GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
341             felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
342             velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
343             velec            = _mm256_mul_pd(qq20,_mm256_sub_pd(rinv20,velec));
344             felec            = _mm256_mul_pd(_mm256_mul_pd(qq20,rinv20),_mm256_sub_pd(rinvsq20,felec));
345
346             d                = _mm256_sub_pd(r20,rswitch);
347             d                = _mm256_max_pd(d,_mm256_setzero_pd());
348             d2               = _mm256_mul_pd(d,d);
349             sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
350
351             dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
352
353             /* Evaluate switch function */
354             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
355             felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv20,_mm256_mul_pd(velec,dsw)) );
356             velec            = _mm256_mul_pd(velec,sw);
357             cutoff_mask      = _mm256_cmp_pd(rsq20,rcutoff2,_CMP_LT_OQ);
358
359             /* Update potential sum for this i atom from the interaction with this j atom. */
360             velec            = _mm256_and_pd(velec,cutoff_mask);
361             velecsum         = _mm256_add_pd(velecsum,velec);
362
363             fscal            = felec;
364
365             fscal            = _mm256_and_pd(fscal,cutoff_mask);
366
367             /* Calculate temporary vectorial force */
368             tx               = _mm256_mul_pd(fscal,dx20);
369             ty               = _mm256_mul_pd(fscal,dy20);
370             tz               = _mm256_mul_pd(fscal,dz20);
371
372             /* Update vectorial force */
373             fix2             = _mm256_add_pd(fix2,tx);
374             fiy2             = _mm256_add_pd(fiy2,ty);
375             fiz2             = _mm256_add_pd(fiz2,tz);
376
377             fjx0             = _mm256_add_pd(fjx0,tx);
378             fjy0             = _mm256_add_pd(fjy0,ty);
379             fjz0             = _mm256_add_pd(fjz0,tz);
380
381             }
382
383             /**************************
384              * CALCULATE INTERACTIONS *
385              **************************/
386
387             if (gmx_mm256_any_lt(rsq30,rcutoff2))
388             {
389
390             r30              = _mm256_mul_pd(rsq30,rinv30);
391
392             /* Compute parameters for interactions between i and j atoms */
393             qq30             = _mm256_mul_pd(iq3,jq0);
394
395             /* EWALD ELECTROSTATICS */
396
397             /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
398             ewrt             = _mm256_mul_pd(r30,ewtabscale);
399             ewitab           = _mm256_cvttpd_epi32(ewrt);
400             eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
401             ewitab           = _mm_slli_epi32(ewitab,2);
402             ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
403             ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
404             ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
405             ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
406             GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
407             felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
408             velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
409             velec            = _mm256_mul_pd(qq30,_mm256_sub_pd(rinv30,velec));
410             felec            = _mm256_mul_pd(_mm256_mul_pd(qq30,rinv30),_mm256_sub_pd(rinvsq30,felec));
411
412             d                = _mm256_sub_pd(r30,rswitch);
413             d                = _mm256_max_pd(d,_mm256_setzero_pd());
414             d2               = _mm256_mul_pd(d,d);
415             sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
416
417             dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
418
419             /* Evaluate switch function */
420             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
421             felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv30,_mm256_mul_pd(velec,dsw)) );
422             velec            = _mm256_mul_pd(velec,sw);
423             cutoff_mask      = _mm256_cmp_pd(rsq30,rcutoff2,_CMP_LT_OQ);
424
425             /* Update potential sum for this i atom from the interaction with this j atom. */
426             velec            = _mm256_and_pd(velec,cutoff_mask);
427             velecsum         = _mm256_add_pd(velecsum,velec);
428
429             fscal            = felec;
430
431             fscal            = _mm256_and_pd(fscal,cutoff_mask);
432
433             /* Calculate temporary vectorial force */
434             tx               = _mm256_mul_pd(fscal,dx30);
435             ty               = _mm256_mul_pd(fscal,dy30);
436             tz               = _mm256_mul_pd(fscal,dz30);
437
438             /* Update vectorial force */
439             fix3             = _mm256_add_pd(fix3,tx);
440             fiy3             = _mm256_add_pd(fiy3,ty);
441             fiz3             = _mm256_add_pd(fiz3,tz);
442
443             fjx0             = _mm256_add_pd(fjx0,tx);
444             fjy0             = _mm256_add_pd(fjy0,ty);
445             fjz0             = _mm256_add_pd(fjz0,tz);
446
447             }
448
449             fjptrA             = f+j_coord_offsetA;
450             fjptrB             = f+j_coord_offsetB;
451             fjptrC             = f+j_coord_offsetC;
452             fjptrD             = f+j_coord_offsetD;
453
454             gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,fjx0,fjy0,fjz0);
455
456             /* Inner loop uses 198 flops */
457         }
458
459         if(jidx<j_index_end)
460         {
461
462             /* Get j neighbor index, and coordinate index */
463             jnrlistA         = jjnr[jidx];
464             jnrlistB         = jjnr[jidx+1];
465             jnrlistC         = jjnr[jidx+2];
466             jnrlistD         = jjnr[jidx+3];
467             /* Sign of each element will be negative for non-real atoms.
468              * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
469              * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
470              */
471             tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
472
473             tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
474             tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
475             dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
476
477             jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
478             jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
479             jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
480             jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
481             j_coord_offsetA  = DIM*jnrA;
482             j_coord_offsetB  = DIM*jnrB;
483             j_coord_offsetC  = DIM*jnrC;
484             j_coord_offsetD  = DIM*jnrD;
485
486             /* load j atom coordinates */
487             gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
488                                                  x+j_coord_offsetC,x+j_coord_offsetD,
489                                                  &jx0,&jy0,&jz0);
490
491             /* Calculate displacement vector */
492             dx10             = _mm256_sub_pd(ix1,jx0);
493             dy10             = _mm256_sub_pd(iy1,jy0);
494             dz10             = _mm256_sub_pd(iz1,jz0);
495             dx20             = _mm256_sub_pd(ix2,jx0);
496             dy20             = _mm256_sub_pd(iy2,jy0);
497             dz20             = _mm256_sub_pd(iz2,jz0);
498             dx30             = _mm256_sub_pd(ix3,jx0);
499             dy30             = _mm256_sub_pd(iy3,jy0);
500             dz30             = _mm256_sub_pd(iz3,jz0);
501
502             /* Calculate squared distance and things based on it */
503             rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
504             rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
505             rsq30            = gmx_mm256_calc_rsq_pd(dx30,dy30,dz30);
506
507             rinv10           = gmx_mm256_invsqrt_pd(rsq10);
508             rinv20           = gmx_mm256_invsqrt_pd(rsq20);
509             rinv30           = gmx_mm256_invsqrt_pd(rsq30);
510
511             rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
512             rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
513             rinvsq30         = _mm256_mul_pd(rinv30,rinv30);
514
515             /* Load parameters for j particles */
516             jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
517                                                                  charge+jnrC+0,charge+jnrD+0);
518
519             fjx0             = _mm256_setzero_pd();
520             fjy0             = _mm256_setzero_pd();
521             fjz0             = _mm256_setzero_pd();
522
523             /**************************
524              * CALCULATE INTERACTIONS *
525              **************************/
526
527             if (gmx_mm256_any_lt(rsq10,rcutoff2))
528             {
529
530             r10              = _mm256_mul_pd(rsq10,rinv10);
531             r10              = _mm256_andnot_pd(dummy_mask,r10);
532
533             /* Compute parameters for interactions between i and j atoms */
534             qq10             = _mm256_mul_pd(iq1,jq0);
535
536             /* EWALD ELECTROSTATICS */
537
538             /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
539             ewrt             = _mm256_mul_pd(r10,ewtabscale);
540             ewitab           = _mm256_cvttpd_epi32(ewrt);
541             eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
542             ewitab           = _mm_slli_epi32(ewitab,2);
543             ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
544             ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
545             ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
546             ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
547             GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
548             felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
549             velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
550             velec            = _mm256_mul_pd(qq10,_mm256_sub_pd(rinv10,velec));
551             felec            = _mm256_mul_pd(_mm256_mul_pd(qq10,rinv10),_mm256_sub_pd(rinvsq10,felec));
552
553             d                = _mm256_sub_pd(r10,rswitch);
554             d                = _mm256_max_pd(d,_mm256_setzero_pd());
555             d2               = _mm256_mul_pd(d,d);
556             sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
557
558             dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
559
560             /* Evaluate switch function */
561             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
562             felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv10,_mm256_mul_pd(velec,dsw)) );
563             velec            = _mm256_mul_pd(velec,sw);
564             cutoff_mask      = _mm256_cmp_pd(rsq10,rcutoff2,_CMP_LT_OQ);
565
566             /* Update potential sum for this i atom from the interaction with this j atom. */
567             velec            = _mm256_and_pd(velec,cutoff_mask);
568             velec            = _mm256_andnot_pd(dummy_mask,velec);
569             velecsum         = _mm256_add_pd(velecsum,velec);
570
571             fscal            = felec;
572
573             fscal            = _mm256_and_pd(fscal,cutoff_mask);
574
575             fscal            = _mm256_andnot_pd(dummy_mask,fscal);
576
577             /* Calculate temporary vectorial force */
578             tx               = _mm256_mul_pd(fscal,dx10);
579             ty               = _mm256_mul_pd(fscal,dy10);
580             tz               = _mm256_mul_pd(fscal,dz10);
581
582             /* Update vectorial force */
583             fix1             = _mm256_add_pd(fix1,tx);
584             fiy1             = _mm256_add_pd(fiy1,ty);
585             fiz1             = _mm256_add_pd(fiz1,tz);
586
587             fjx0             = _mm256_add_pd(fjx0,tx);
588             fjy0             = _mm256_add_pd(fjy0,ty);
589             fjz0             = _mm256_add_pd(fjz0,tz);
590
591             }
592
593             /**************************
594              * CALCULATE INTERACTIONS *
595              **************************/
596
597             if (gmx_mm256_any_lt(rsq20,rcutoff2))
598             {
599
600             r20              = _mm256_mul_pd(rsq20,rinv20);
601             r20              = _mm256_andnot_pd(dummy_mask,r20);
602
603             /* Compute parameters for interactions between i and j atoms */
604             qq20             = _mm256_mul_pd(iq2,jq0);
605
606             /* EWALD ELECTROSTATICS */
607
608             /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
609             ewrt             = _mm256_mul_pd(r20,ewtabscale);
610             ewitab           = _mm256_cvttpd_epi32(ewrt);
611             eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
612             ewitab           = _mm_slli_epi32(ewitab,2);
613             ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
614             ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
615             ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
616             ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
617             GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
618             felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
619             velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
620             velec            = _mm256_mul_pd(qq20,_mm256_sub_pd(rinv20,velec));
621             felec            = _mm256_mul_pd(_mm256_mul_pd(qq20,rinv20),_mm256_sub_pd(rinvsq20,felec));
622
623             d                = _mm256_sub_pd(r20,rswitch);
624             d                = _mm256_max_pd(d,_mm256_setzero_pd());
625             d2               = _mm256_mul_pd(d,d);
626             sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
627
628             dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
629
630             /* Evaluate switch function */
631             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
632             felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv20,_mm256_mul_pd(velec,dsw)) );
633             velec            = _mm256_mul_pd(velec,sw);
634             cutoff_mask      = _mm256_cmp_pd(rsq20,rcutoff2,_CMP_LT_OQ);
635
636             /* Update potential sum for this i atom from the interaction with this j atom. */
637             velec            = _mm256_and_pd(velec,cutoff_mask);
638             velec            = _mm256_andnot_pd(dummy_mask,velec);
639             velecsum         = _mm256_add_pd(velecsum,velec);
640
641             fscal            = felec;
642
643             fscal            = _mm256_and_pd(fscal,cutoff_mask);
644
645             fscal            = _mm256_andnot_pd(dummy_mask,fscal);
646
647             /* Calculate temporary vectorial force */
648             tx               = _mm256_mul_pd(fscal,dx20);
649             ty               = _mm256_mul_pd(fscal,dy20);
650             tz               = _mm256_mul_pd(fscal,dz20);
651
652             /* Update vectorial force */
653             fix2             = _mm256_add_pd(fix2,tx);
654             fiy2             = _mm256_add_pd(fiy2,ty);
655             fiz2             = _mm256_add_pd(fiz2,tz);
656
657             fjx0             = _mm256_add_pd(fjx0,tx);
658             fjy0             = _mm256_add_pd(fjy0,ty);
659             fjz0             = _mm256_add_pd(fjz0,tz);
660
661             }
662
663             /**************************
664              * CALCULATE INTERACTIONS *
665              **************************/
666
667             if (gmx_mm256_any_lt(rsq30,rcutoff2))
668             {
669
670             r30              = _mm256_mul_pd(rsq30,rinv30);
671             r30              = _mm256_andnot_pd(dummy_mask,r30);
672
673             /* Compute parameters for interactions between i and j atoms */
674             qq30             = _mm256_mul_pd(iq3,jq0);
675
676             /* EWALD ELECTROSTATICS */
677
678             /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
679             ewrt             = _mm256_mul_pd(r30,ewtabscale);
680             ewitab           = _mm256_cvttpd_epi32(ewrt);
681             eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
682             ewitab           = _mm_slli_epi32(ewitab,2);
683             ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
684             ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
685             ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
686             ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
687             GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
688             felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
689             velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
690             velec            = _mm256_mul_pd(qq30,_mm256_sub_pd(rinv30,velec));
691             felec            = _mm256_mul_pd(_mm256_mul_pd(qq30,rinv30),_mm256_sub_pd(rinvsq30,felec));
692
693             d                = _mm256_sub_pd(r30,rswitch);
694             d                = _mm256_max_pd(d,_mm256_setzero_pd());
695             d2               = _mm256_mul_pd(d,d);
696             sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
697
698             dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
699
700             /* Evaluate switch function */
701             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
702             felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv30,_mm256_mul_pd(velec,dsw)) );
703             velec            = _mm256_mul_pd(velec,sw);
704             cutoff_mask      = _mm256_cmp_pd(rsq30,rcutoff2,_CMP_LT_OQ);
705
706             /* Update potential sum for this i atom from the interaction with this j atom. */
707             velec            = _mm256_and_pd(velec,cutoff_mask);
708             velec            = _mm256_andnot_pd(dummy_mask,velec);
709             velecsum         = _mm256_add_pd(velecsum,velec);
710
711             fscal            = felec;
712
713             fscal            = _mm256_and_pd(fscal,cutoff_mask);
714
715             fscal            = _mm256_andnot_pd(dummy_mask,fscal);
716
717             /* Calculate temporary vectorial force */
718             tx               = _mm256_mul_pd(fscal,dx30);
719             ty               = _mm256_mul_pd(fscal,dy30);
720             tz               = _mm256_mul_pd(fscal,dz30);
721
722             /* Update vectorial force */
723             fix3             = _mm256_add_pd(fix3,tx);
724             fiy3             = _mm256_add_pd(fiy3,ty);
725             fiz3             = _mm256_add_pd(fiz3,tz);
726
727             fjx0             = _mm256_add_pd(fjx0,tx);
728             fjy0             = _mm256_add_pd(fjy0,ty);
729             fjz0             = _mm256_add_pd(fjz0,tz);
730
731             }
732
733             fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
734             fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
735             fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
736             fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
737
738             gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,fjx0,fjy0,fjz0);
739
740             /* Inner loop uses 201 flops */
741         }
742
743         /* End of innermost loop */
744
745         gmx_mm256_update_iforce_3atom_swizzle_pd(fix1,fiy1,fiz1,fix2,fiy2,fiz2,fix3,fiy3,fiz3,
746                                                  f+i_coord_offset+DIM,fshift+i_shift_offset);
747
748         ggid                        = gid[iidx];
749         /* Update potential energies */
750         gmx_mm256_update_1pot_pd(velecsum,kernel_data->energygrp_elec+ggid);
751
752         /* Increment number of inner iterations */
753         inneriter                  += j_index_end - j_index_start;
754
755         /* Outer loop uses 19 flops */
756     }
757
758     /* Increment number of outer iterations */
759     outeriter        += nri;
760
761     /* Update outer/inner flops */
762
763     inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_W4_VF,outeriter*19 + inneriter*201);
764 }
765 /*
766  * Gromacs nonbonded kernel:   nb_kernel_ElecEwSw_VdwNone_GeomW4P1_F_avx_256_double
767  * Electrostatics interaction: Ewald
768  * VdW interaction:            None
769  * Geometry:                   Water4-Particle
770  * Calculate force/pot:        Force
771  */
772 void
773 nb_kernel_ElecEwSw_VdwNone_GeomW4P1_F_avx_256_double
774                     (t_nblist                    * gmx_restrict       nlist,
775                      rvec                        * gmx_restrict          xx,
776                      rvec                        * gmx_restrict          ff,
777                      t_forcerec                  * gmx_restrict          fr,
778                      t_mdatoms                   * gmx_restrict     mdatoms,
779                      nb_kernel_data_t gmx_unused * gmx_restrict kernel_data,
780                      t_nrnb                      * gmx_restrict        nrnb)
781 {
782     /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
783      * just 0 for non-waters.
784      * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
785      * jnr indices corresponding to data put in the four positions in the SIMD register.
786      */
787     int              i_shift_offset,i_coord_offset,outeriter,inneriter;
788     int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
789     int              jnrA,jnrB,jnrC,jnrD;
790     int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
791     int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
792     int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
793     int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
794     real             rcutoff_scalar;
795     real             *shiftvec,*fshift,*x,*f;
796     real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
797     real             scratch[4*DIM];
798     __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
799     real *           vdwioffsetptr1;
800     __m256d          ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
801     real *           vdwioffsetptr2;
802     __m256d          ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
803     real *           vdwioffsetptr3;
804     __m256d          ix3,iy3,iz3,fix3,fiy3,fiz3,iq3,isai3;
805     int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
806     __m256d          jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
807     __m256d          dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
808     __m256d          dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
809     __m256d          dx30,dy30,dz30,rsq30,rinv30,rinvsq30,r30,qq30,c6_30,c12_30;
810     __m256d          velec,felec,velecsum,facel,crf,krf,krf2;
811     real             *charge;
812     __m128i          ewitab;
813     __m256d          ewtabscale,eweps,sh_ewald,ewrt,ewtabhalfspace,ewtabF,ewtabFn,ewtabD,ewtabV;
814     __m256d          beta,beta2,beta3,zeta2,pmecorrF,pmecorrV,rinv3;
815     real             *ewtab;
816     __m256d          rswitch,swV3,swV4,swV5,swF2,swF3,swF4,d,d2,sw,dsw;
817     real             rswitch_scalar,d_scalar;
818     __m256d          dummy_mask,cutoff_mask;
819     __m128           tmpmask0,tmpmask1;
820     __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
821     __m256d          one     = _mm256_set1_pd(1.0);
822     __m256d          two     = _mm256_set1_pd(2.0);
823     x                = xx[0];
824     f                = ff[0];
825
826     nri              = nlist->nri;
827     iinr             = nlist->iinr;
828     jindex           = nlist->jindex;
829     jjnr             = nlist->jjnr;
830     shiftidx         = nlist->shift;
831     gid              = nlist->gid;
832     shiftvec         = fr->shift_vec[0];
833     fshift           = fr->fshift[0];
834     facel            = _mm256_set1_pd(fr->epsfac);
835     charge           = mdatoms->chargeA;
836
837     sh_ewald         = _mm256_set1_pd(fr->ic->sh_ewald);
838     beta             = _mm256_set1_pd(fr->ic->ewaldcoeff_q);
839     beta2            = _mm256_mul_pd(beta,beta);
840     beta3            = _mm256_mul_pd(beta,beta2);
841
842     ewtab            = fr->ic->tabq_coul_FDV0;
843     ewtabscale       = _mm256_set1_pd(fr->ic->tabq_scale);
844     ewtabhalfspace   = _mm256_set1_pd(0.5/fr->ic->tabq_scale);
845
846     /* Setup water-specific parameters */
847     inr              = nlist->iinr[0];
848     iq1              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+1]));
849     iq2              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+2]));
850     iq3              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+3]));
851
852     /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
853     rcutoff_scalar   = fr->rcoulomb;
854     rcutoff          = _mm256_set1_pd(rcutoff_scalar);
855     rcutoff2         = _mm256_mul_pd(rcutoff,rcutoff);
856
857     rswitch_scalar   = fr->rcoulomb_switch;
858     rswitch          = _mm256_set1_pd(rswitch_scalar);
859     /* Setup switch parameters */
860     d_scalar         = rcutoff_scalar-rswitch_scalar;
861     d                = _mm256_set1_pd(d_scalar);
862     swV3             = _mm256_set1_pd(-10.0/(d_scalar*d_scalar*d_scalar));
863     swV4             = _mm256_set1_pd( 15.0/(d_scalar*d_scalar*d_scalar*d_scalar));
864     swV5             = _mm256_set1_pd( -6.0/(d_scalar*d_scalar*d_scalar*d_scalar*d_scalar));
865     swF2             = _mm256_set1_pd(-30.0/(d_scalar*d_scalar*d_scalar));
866     swF3             = _mm256_set1_pd( 60.0/(d_scalar*d_scalar*d_scalar*d_scalar));
867     swF4             = _mm256_set1_pd(-30.0/(d_scalar*d_scalar*d_scalar*d_scalar*d_scalar));
868
869     /* Avoid stupid compiler warnings */
870     jnrA = jnrB = jnrC = jnrD = 0;
871     j_coord_offsetA = 0;
872     j_coord_offsetB = 0;
873     j_coord_offsetC = 0;
874     j_coord_offsetD = 0;
875
876     outeriter        = 0;
877     inneriter        = 0;
878
879     for(iidx=0;iidx<4*DIM;iidx++)
880     {
881         scratch[iidx] = 0.0;
882     }
883
884     /* Start outer loop over neighborlists */
885     for(iidx=0; iidx<nri; iidx++)
886     {
887         /* Load shift vector for this list */
888         i_shift_offset   = DIM*shiftidx[iidx];
889
890         /* Load limits for loop over neighbors */
891         j_index_start    = jindex[iidx];
892         j_index_end      = jindex[iidx+1];
893
894         /* Get outer coordinate index */
895         inr              = iinr[iidx];
896         i_coord_offset   = DIM*inr;
897
898         /* Load i particle coords and add shift vector */
899         gmx_mm256_load_shift_and_3rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset+DIM,
900                                                     &ix1,&iy1,&iz1,&ix2,&iy2,&iz2,&ix3,&iy3,&iz3);
901
902         fix1             = _mm256_setzero_pd();
903         fiy1             = _mm256_setzero_pd();
904         fiz1             = _mm256_setzero_pd();
905         fix2             = _mm256_setzero_pd();
906         fiy2             = _mm256_setzero_pd();
907         fiz2             = _mm256_setzero_pd();
908         fix3             = _mm256_setzero_pd();
909         fiy3             = _mm256_setzero_pd();
910         fiz3             = _mm256_setzero_pd();
911
912         /* Start inner kernel loop */
913         for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
914         {
915
916             /* Get j neighbor index, and coordinate index */
917             jnrA             = jjnr[jidx];
918             jnrB             = jjnr[jidx+1];
919             jnrC             = jjnr[jidx+2];
920             jnrD             = jjnr[jidx+3];
921             j_coord_offsetA  = DIM*jnrA;
922             j_coord_offsetB  = DIM*jnrB;
923             j_coord_offsetC  = DIM*jnrC;
924             j_coord_offsetD  = DIM*jnrD;
925
926             /* load j atom coordinates */
927             gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
928                                                  x+j_coord_offsetC,x+j_coord_offsetD,
929                                                  &jx0,&jy0,&jz0);
930
931             /* Calculate displacement vector */
932             dx10             = _mm256_sub_pd(ix1,jx0);
933             dy10             = _mm256_sub_pd(iy1,jy0);
934             dz10             = _mm256_sub_pd(iz1,jz0);
935             dx20             = _mm256_sub_pd(ix2,jx0);
936             dy20             = _mm256_sub_pd(iy2,jy0);
937             dz20             = _mm256_sub_pd(iz2,jz0);
938             dx30             = _mm256_sub_pd(ix3,jx0);
939             dy30             = _mm256_sub_pd(iy3,jy0);
940             dz30             = _mm256_sub_pd(iz3,jz0);
941
942             /* Calculate squared distance and things based on it */
943             rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
944             rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
945             rsq30            = gmx_mm256_calc_rsq_pd(dx30,dy30,dz30);
946
947             rinv10           = gmx_mm256_invsqrt_pd(rsq10);
948             rinv20           = gmx_mm256_invsqrt_pd(rsq20);
949             rinv30           = gmx_mm256_invsqrt_pd(rsq30);
950
951             rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
952             rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
953             rinvsq30         = _mm256_mul_pd(rinv30,rinv30);
954
955             /* Load parameters for j particles */
956             jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
957                                                                  charge+jnrC+0,charge+jnrD+0);
958
959             fjx0             = _mm256_setzero_pd();
960             fjy0             = _mm256_setzero_pd();
961             fjz0             = _mm256_setzero_pd();
962
963             /**************************
964              * CALCULATE INTERACTIONS *
965              **************************/
966
967             if (gmx_mm256_any_lt(rsq10,rcutoff2))
968             {
969
970             r10              = _mm256_mul_pd(rsq10,rinv10);
971
972             /* Compute parameters for interactions between i and j atoms */
973             qq10             = _mm256_mul_pd(iq1,jq0);
974
975             /* EWALD ELECTROSTATICS */
976
977             /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
978             ewrt             = _mm256_mul_pd(r10,ewtabscale);
979             ewitab           = _mm256_cvttpd_epi32(ewrt);
980             eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
981             ewitab           = _mm_slli_epi32(ewitab,2);
982             ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
983             ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
984             ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
985             ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
986             GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
987             felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
988             velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
989             velec            = _mm256_mul_pd(qq10,_mm256_sub_pd(rinv10,velec));
990             felec            = _mm256_mul_pd(_mm256_mul_pd(qq10,rinv10),_mm256_sub_pd(rinvsq10,felec));
991
992             d                = _mm256_sub_pd(r10,rswitch);
993             d                = _mm256_max_pd(d,_mm256_setzero_pd());
994             d2               = _mm256_mul_pd(d,d);
995             sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
996
997             dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
998
999             /* Evaluate switch function */
1000             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
1001             felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv10,_mm256_mul_pd(velec,dsw)) );
1002             cutoff_mask      = _mm256_cmp_pd(rsq10,rcutoff2,_CMP_LT_OQ);
1003
1004             fscal            = felec;
1005
1006             fscal            = _mm256_and_pd(fscal,cutoff_mask);
1007
1008             /* Calculate temporary vectorial force */
1009             tx               = _mm256_mul_pd(fscal,dx10);
1010             ty               = _mm256_mul_pd(fscal,dy10);
1011             tz               = _mm256_mul_pd(fscal,dz10);
1012
1013             /* Update vectorial force */
1014             fix1             = _mm256_add_pd(fix1,tx);
1015             fiy1             = _mm256_add_pd(fiy1,ty);
1016             fiz1             = _mm256_add_pd(fiz1,tz);
1017
1018             fjx0             = _mm256_add_pd(fjx0,tx);
1019             fjy0             = _mm256_add_pd(fjy0,ty);
1020             fjz0             = _mm256_add_pd(fjz0,tz);
1021
1022             }
1023
1024             /**************************
1025              * CALCULATE INTERACTIONS *
1026              **************************/
1027
1028             if (gmx_mm256_any_lt(rsq20,rcutoff2))
1029             {
1030
1031             r20              = _mm256_mul_pd(rsq20,rinv20);
1032
1033             /* Compute parameters for interactions between i and j atoms */
1034             qq20             = _mm256_mul_pd(iq2,jq0);
1035
1036             /* EWALD ELECTROSTATICS */
1037
1038             /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
1039             ewrt             = _mm256_mul_pd(r20,ewtabscale);
1040             ewitab           = _mm256_cvttpd_epi32(ewrt);
1041             eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
1042             ewitab           = _mm_slli_epi32(ewitab,2);
1043             ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
1044             ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
1045             ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
1046             ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
1047             GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
1048             felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
1049             velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
1050             velec            = _mm256_mul_pd(qq20,_mm256_sub_pd(rinv20,velec));
1051             felec            = _mm256_mul_pd(_mm256_mul_pd(qq20,rinv20),_mm256_sub_pd(rinvsq20,felec));
1052
1053             d                = _mm256_sub_pd(r20,rswitch);
1054             d                = _mm256_max_pd(d,_mm256_setzero_pd());
1055             d2               = _mm256_mul_pd(d,d);
1056             sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
1057
1058             dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
1059
1060             /* Evaluate switch function */
1061             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
1062             felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv20,_mm256_mul_pd(velec,dsw)) );
1063             cutoff_mask      = _mm256_cmp_pd(rsq20,rcutoff2,_CMP_LT_OQ);
1064
1065             fscal            = felec;
1066
1067             fscal            = _mm256_and_pd(fscal,cutoff_mask);
1068
1069             /* Calculate temporary vectorial force */
1070             tx               = _mm256_mul_pd(fscal,dx20);
1071             ty               = _mm256_mul_pd(fscal,dy20);
1072             tz               = _mm256_mul_pd(fscal,dz20);
1073
1074             /* Update vectorial force */
1075             fix2             = _mm256_add_pd(fix2,tx);
1076             fiy2             = _mm256_add_pd(fiy2,ty);
1077             fiz2             = _mm256_add_pd(fiz2,tz);
1078
1079             fjx0             = _mm256_add_pd(fjx0,tx);
1080             fjy0             = _mm256_add_pd(fjy0,ty);
1081             fjz0             = _mm256_add_pd(fjz0,tz);
1082
1083             }
1084
1085             /**************************
1086              * CALCULATE INTERACTIONS *
1087              **************************/
1088
1089             if (gmx_mm256_any_lt(rsq30,rcutoff2))
1090             {
1091
1092             r30              = _mm256_mul_pd(rsq30,rinv30);
1093
1094             /* Compute parameters for interactions between i and j atoms */
1095             qq30             = _mm256_mul_pd(iq3,jq0);
1096
1097             /* EWALD ELECTROSTATICS */
1098
1099             /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
1100             ewrt             = _mm256_mul_pd(r30,ewtabscale);
1101             ewitab           = _mm256_cvttpd_epi32(ewrt);
1102             eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
1103             ewitab           = _mm_slli_epi32(ewitab,2);
1104             ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
1105             ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
1106             ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
1107             ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
1108             GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
1109             felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
1110             velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
1111             velec            = _mm256_mul_pd(qq30,_mm256_sub_pd(rinv30,velec));
1112             felec            = _mm256_mul_pd(_mm256_mul_pd(qq30,rinv30),_mm256_sub_pd(rinvsq30,felec));
1113
1114             d                = _mm256_sub_pd(r30,rswitch);
1115             d                = _mm256_max_pd(d,_mm256_setzero_pd());
1116             d2               = _mm256_mul_pd(d,d);
1117             sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
1118
1119             dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
1120
1121             /* Evaluate switch function */
1122             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
1123             felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv30,_mm256_mul_pd(velec,dsw)) );
1124             cutoff_mask      = _mm256_cmp_pd(rsq30,rcutoff2,_CMP_LT_OQ);
1125
1126             fscal            = felec;
1127
1128             fscal            = _mm256_and_pd(fscal,cutoff_mask);
1129
1130             /* Calculate temporary vectorial force */
1131             tx               = _mm256_mul_pd(fscal,dx30);
1132             ty               = _mm256_mul_pd(fscal,dy30);
1133             tz               = _mm256_mul_pd(fscal,dz30);
1134
1135             /* Update vectorial force */
1136             fix3             = _mm256_add_pd(fix3,tx);
1137             fiy3             = _mm256_add_pd(fiy3,ty);
1138             fiz3             = _mm256_add_pd(fiz3,tz);
1139
1140             fjx0             = _mm256_add_pd(fjx0,tx);
1141             fjy0             = _mm256_add_pd(fjy0,ty);
1142             fjz0             = _mm256_add_pd(fjz0,tz);
1143
1144             }
1145
1146             fjptrA             = f+j_coord_offsetA;
1147             fjptrB             = f+j_coord_offsetB;
1148             fjptrC             = f+j_coord_offsetC;
1149             fjptrD             = f+j_coord_offsetD;
1150
1151             gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,fjx0,fjy0,fjz0);
1152
1153             /* Inner loop uses 189 flops */
1154         }
1155
1156         if(jidx<j_index_end)
1157         {
1158
1159             /* Get j neighbor index, and coordinate index */
1160             jnrlistA         = jjnr[jidx];
1161             jnrlistB         = jjnr[jidx+1];
1162             jnrlistC         = jjnr[jidx+2];
1163             jnrlistD         = jjnr[jidx+3];
1164             /* Sign of each element will be negative for non-real atoms.
1165              * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
1166              * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
1167              */
1168             tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
1169
1170             tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
1171             tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
1172             dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
1173
1174             jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
1175             jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
1176             jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
1177             jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
1178             j_coord_offsetA  = DIM*jnrA;
1179             j_coord_offsetB  = DIM*jnrB;
1180             j_coord_offsetC  = DIM*jnrC;
1181             j_coord_offsetD  = DIM*jnrD;
1182
1183             /* load j atom coordinates */
1184             gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
1185                                                  x+j_coord_offsetC,x+j_coord_offsetD,
1186                                                  &jx0,&jy0,&jz0);
1187
1188             /* Calculate displacement vector */
1189             dx10             = _mm256_sub_pd(ix1,jx0);
1190             dy10             = _mm256_sub_pd(iy1,jy0);
1191             dz10             = _mm256_sub_pd(iz1,jz0);
1192             dx20             = _mm256_sub_pd(ix2,jx0);
1193             dy20             = _mm256_sub_pd(iy2,jy0);
1194             dz20             = _mm256_sub_pd(iz2,jz0);
1195             dx30             = _mm256_sub_pd(ix3,jx0);
1196             dy30             = _mm256_sub_pd(iy3,jy0);
1197             dz30             = _mm256_sub_pd(iz3,jz0);
1198
1199             /* Calculate squared distance and things based on it */
1200             rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
1201             rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
1202             rsq30            = gmx_mm256_calc_rsq_pd(dx30,dy30,dz30);
1203
1204             rinv10           = gmx_mm256_invsqrt_pd(rsq10);
1205             rinv20           = gmx_mm256_invsqrt_pd(rsq20);
1206             rinv30           = gmx_mm256_invsqrt_pd(rsq30);
1207
1208             rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
1209             rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
1210             rinvsq30         = _mm256_mul_pd(rinv30,rinv30);
1211
1212             /* Load parameters for j particles */
1213             jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
1214                                                                  charge+jnrC+0,charge+jnrD+0);
1215
1216             fjx0             = _mm256_setzero_pd();
1217             fjy0             = _mm256_setzero_pd();
1218             fjz0             = _mm256_setzero_pd();
1219
1220             /**************************
1221              * CALCULATE INTERACTIONS *
1222              **************************/
1223
1224             if (gmx_mm256_any_lt(rsq10,rcutoff2))
1225             {
1226
1227             r10              = _mm256_mul_pd(rsq10,rinv10);
1228             r10              = _mm256_andnot_pd(dummy_mask,r10);
1229
1230             /* Compute parameters for interactions between i and j atoms */
1231             qq10             = _mm256_mul_pd(iq1,jq0);
1232
1233             /* EWALD ELECTROSTATICS */
1234
1235             /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
1236             ewrt             = _mm256_mul_pd(r10,ewtabscale);
1237             ewitab           = _mm256_cvttpd_epi32(ewrt);
1238             eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
1239             ewitab           = _mm_slli_epi32(ewitab,2);
1240             ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
1241             ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
1242             ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
1243             ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
1244             GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
1245             felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
1246             velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
1247             velec            = _mm256_mul_pd(qq10,_mm256_sub_pd(rinv10,velec));
1248             felec            = _mm256_mul_pd(_mm256_mul_pd(qq10,rinv10),_mm256_sub_pd(rinvsq10,felec));
1249
1250             d                = _mm256_sub_pd(r10,rswitch);
1251             d                = _mm256_max_pd(d,_mm256_setzero_pd());
1252             d2               = _mm256_mul_pd(d,d);
1253             sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
1254
1255             dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
1256
1257             /* Evaluate switch function */
1258             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
1259             felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv10,_mm256_mul_pd(velec,dsw)) );
1260             cutoff_mask      = _mm256_cmp_pd(rsq10,rcutoff2,_CMP_LT_OQ);
1261
1262             fscal            = felec;
1263
1264             fscal            = _mm256_and_pd(fscal,cutoff_mask);
1265
1266             fscal            = _mm256_andnot_pd(dummy_mask,fscal);
1267
1268             /* Calculate temporary vectorial force */
1269             tx               = _mm256_mul_pd(fscal,dx10);
1270             ty               = _mm256_mul_pd(fscal,dy10);
1271             tz               = _mm256_mul_pd(fscal,dz10);
1272
1273             /* Update vectorial force */
1274             fix1             = _mm256_add_pd(fix1,tx);
1275             fiy1             = _mm256_add_pd(fiy1,ty);
1276             fiz1             = _mm256_add_pd(fiz1,tz);
1277
1278             fjx0             = _mm256_add_pd(fjx0,tx);
1279             fjy0             = _mm256_add_pd(fjy0,ty);
1280             fjz0             = _mm256_add_pd(fjz0,tz);
1281
1282             }
1283
1284             /**************************
1285              * CALCULATE INTERACTIONS *
1286              **************************/
1287
1288             if (gmx_mm256_any_lt(rsq20,rcutoff2))
1289             {
1290
1291             r20              = _mm256_mul_pd(rsq20,rinv20);
1292             r20              = _mm256_andnot_pd(dummy_mask,r20);
1293
1294             /* Compute parameters for interactions between i and j atoms */
1295             qq20             = _mm256_mul_pd(iq2,jq0);
1296
1297             /* EWALD ELECTROSTATICS */
1298
1299             /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
1300             ewrt             = _mm256_mul_pd(r20,ewtabscale);
1301             ewitab           = _mm256_cvttpd_epi32(ewrt);
1302             eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
1303             ewitab           = _mm_slli_epi32(ewitab,2);
1304             ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
1305             ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
1306             ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
1307             ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
1308             GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
1309             felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
1310             velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
1311             velec            = _mm256_mul_pd(qq20,_mm256_sub_pd(rinv20,velec));
1312             felec            = _mm256_mul_pd(_mm256_mul_pd(qq20,rinv20),_mm256_sub_pd(rinvsq20,felec));
1313
1314             d                = _mm256_sub_pd(r20,rswitch);
1315             d                = _mm256_max_pd(d,_mm256_setzero_pd());
1316             d2               = _mm256_mul_pd(d,d);
1317             sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
1318
1319             dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
1320
1321             /* Evaluate switch function */
1322             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
1323             felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv20,_mm256_mul_pd(velec,dsw)) );
1324             cutoff_mask      = _mm256_cmp_pd(rsq20,rcutoff2,_CMP_LT_OQ);
1325
1326             fscal            = felec;
1327
1328             fscal            = _mm256_and_pd(fscal,cutoff_mask);
1329
1330             fscal            = _mm256_andnot_pd(dummy_mask,fscal);
1331
1332             /* Calculate temporary vectorial force */
1333             tx               = _mm256_mul_pd(fscal,dx20);
1334             ty               = _mm256_mul_pd(fscal,dy20);
1335             tz               = _mm256_mul_pd(fscal,dz20);
1336
1337             /* Update vectorial force */
1338             fix2             = _mm256_add_pd(fix2,tx);
1339             fiy2             = _mm256_add_pd(fiy2,ty);
1340             fiz2             = _mm256_add_pd(fiz2,tz);
1341
1342             fjx0             = _mm256_add_pd(fjx0,tx);
1343             fjy0             = _mm256_add_pd(fjy0,ty);
1344             fjz0             = _mm256_add_pd(fjz0,tz);
1345
1346             }
1347
1348             /**************************
1349              * CALCULATE INTERACTIONS *
1350              **************************/
1351
1352             if (gmx_mm256_any_lt(rsq30,rcutoff2))
1353             {
1354
1355             r30              = _mm256_mul_pd(rsq30,rinv30);
1356             r30              = _mm256_andnot_pd(dummy_mask,r30);
1357
1358             /* Compute parameters for interactions between i and j atoms */
1359             qq30             = _mm256_mul_pd(iq3,jq0);
1360
1361             /* EWALD ELECTROSTATICS */
1362
1363             /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
1364             ewrt             = _mm256_mul_pd(r30,ewtabscale);
1365             ewitab           = _mm256_cvttpd_epi32(ewrt);
1366             eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
1367             ewitab           = _mm_slli_epi32(ewitab,2);
1368             ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
1369             ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
1370             ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
1371             ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
1372             GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
1373             felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
1374             velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
1375             velec            = _mm256_mul_pd(qq30,_mm256_sub_pd(rinv30,velec));
1376             felec            = _mm256_mul_pd(_mm256_mul_pd(qq30,rinv30),_mm256_sub_pd(rinvsq30,felec));
1377
1378             d                = _mm256_sub_pd(r30,rswitch);
1379             d                = _mm256_max_pd(d,_mm256_setzero_pd());
1380             d2               = _mm256_mul_pd(d,d);
1381             sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
1382
1383             dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
1384
1385             /* Evaluate switch function */
1386             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
1387             felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv30,_mm256_mul_pd(velec,dsw)) );
1388             cutoff_mask      = _mm256_cmp_pd(rsq30,rcutoff2,_CMP_LT_OQ);
1389
1390             fscal            = felec;
1391
1392             fscal            = _mm256_and_pd(fscal,cutoff_mask);
1393
1394             fscal            = _mm256_andnot_pd(dummy_mask,fscal);
1395
1396             /* Calculate temporary vectorial force */
1397             tx               = _mm256_mul_pd(fscal,dx30);
1398             ty               = _mm256_mul_pd(fscal,dy30);
1399             tz               = _mm256_mul_pd(fscal,dz30);
1400
1401             /* Update vectorial force */
1402             fix3             = _mm256_add_pd(fix3,tx);
1403             fiy3             = _mm256_add_pd(fiy3,ty);
1404             fiz3             = _mm256_add_pd(fiz3,tz);
1405
1406             fjx0             = _mm256_add_pd(fjx0,tx);
1407             fjy0             = _mm256_add_pd(fjy0,ty);
1408             fjz0             = _mm256_add_pd(fjz0,tz);
1409
1410             }
1411
1412             fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
1413             fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
1414             fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
1415             fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
1416
1417             gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,fjx0,fjy0,fjz0);
1418
1419             /* Inner loop uses 192 flops */
1420         }
1421
1422         /* End of innermost loop */
1423
1424         gmx_mm256_update_iforce_3atom_swizzle_pd(fix1,fiy1,fiz1,fix2,fiy2,fiz2,fix3,fiy3,fiz3,
1425                                                  f+i_coord_offset+DIM,fshift+i_shift_offset);
1426
1427         /* Increment number of inner iterations */
1428         inneriter                  += j_index_end - j_index_start;
1429
1430         /* Outer loop uses 18 flops */
1431     }
1432
1433     /* Increment number of outer iterations */
1434     outeriter        += nri;
1435
1436     /* Update outer/inner flops */
1437
1438     inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_W4_F,outeriter*18 + inneriter*192);
1439 }