Remove no-inline-max-size and suppress remark
[alexxy/gromacs.git] / src / gromacs / gmxlib / nonbonded / nb_kernel_avx_256_double / nb_kernel_ElecEwSw_VdwLJSw_GeomW4P1_avx_256_double.c
1 /*
2  * This file is part of the GROMACS molecular simulation package.
3  *
4  * Copyright (c) 2012,2013, by the GROMACS development team, led by
5  * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6  * and including many others, as listed in the AUTHORS file in the
7  * top-level source directory and at http://www.gromacs.org.
8  *
9  * GROMACS is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU Lesser General Public License
11  * as published by the Free Software Foundation; either version 2.1
12  * of the License, or (at your option) any later version.
13  *
14  * GROMACS is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
17  * Lesser General Public License for more details.
18  *
19  * You should have received a copy of the GNU Lesser General Public
20  * License along with GROMACS; if not, see
21  * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA.
23  *
24  * If you want to redistribute modifications to GROMACS, please
25  * consider that scientific software is very special. Version
26  * control is crucial - bugs must be traceable. We will be happy to
27  * consider code for inclusion in the official distribution, but
28  * derived work must not be called official GROMACS. Details are found
29  * in the README & COPYING files - if they are missing, get the
30  * official version at http://www.gromacs.org.
31  *
32  * To help us fund GROMACS development, we humbly ask that you cite
33  * the research papers on the package. Check out http://www.gromacs.org.
34  */
35 /*
36  * Note: this file was generated by the GROMACS avx_256_double kernel generator.
37  */
38 #ifdef HAVE_CONFIG_H
39 #include <config.h>
40 #endif
41
42 #include <math.h>
43
44 #include "../nb_kernel.h"
45 #include "types/simple.h"
46 #include "vec.h"
47 #include "nrnb.h"
48
49 #include "gromacs/simd/math_x86_avx_256_double.h"
50 #include "kernelutil_x86_avx_256_double.h"
51
52 /*
53  * Gromacs nonbonded kernel:   nb_kernel_ElecEwSw_VdwLJSw_GeomW4P1_VF_avx_256_double
54  * Electrostatics interaction: Ewald
55  * VdW interaction:            LennardJones
56  * Geometry:                   Water4-Particle
57  * Calculate force/pot:        PotentialAndForce
58  */
59 void
60 nb_kernel_ElecEwSw_VdwLJSw_GeomW4P1_VF_avx_256_double
61                     (t_nblist                    * gmx_restrict       nlist,
62                      rvec                        * gmx_restrict          xx,
63                      rvec                        * gmx_restrict          ff,
64                      t_forcerec                  * gmx_restrict          fr,
65                      t_mdatoms                   * gmx_restrict     mdatoms,
66                      nb_kernel_data_t gmx_unused * gmx_restrict kernel_data,
67                      t_nrnb                      * gmx_restrict        nrnb)
68 {
69     /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
70      * just 0 for non-waters.
71      * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
72      * jnr indices corresponding to data put in the four positions in the SIMD register.
73      */
74     int              i_shift_offset,i_coord_offset,outeriter,inneriter;
75     int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
76     int              jnrA,jnrB,jnrC,jnrD;
77     int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
78     int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
79     int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
80     int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
81     real             rcutoff_scalar;
82     real             *shiftvec,*fshift,*x,*f;
83     real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
84     real             scratch[4*DIM];
85     __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
86     real *           vdwioffsetptr0;
87     __m256d          ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
88     real *           vdwioffsetptr1;
89     __m256d          ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
90     real *           vdwioffsetptr2;
91     __m256d          ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
92     real *           vdwioffsetptr3;
93     __m256d          ix3,iy3,iz3,fix3,fiy3,fiz3,iq3,isai3;
94     int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
95     __m256d          jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
96     __m256d          dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
97     __m256d          dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
98     __m256d          dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
99     __m256d          dx30,dy30,dz30,rsq30,rinv30,rinvsq30,r30,qq30,c6_30,c12_30;
100     __m256d          velec,felec,velecsum,facel,crf,krf,krf2;
101     real             *charge;
102     int              nvdwtype;
103     __m256d          rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
104     int              *vdwtype;
105     real             *vdwparam;
106     __m256d          one_sixth   = _mm256_set1_pd(1.0/6.0);
107     __m256d          one_twelfth = _mm256_set1_pd(1.0/12.0);
108     __m128i          ewitab;
109     __m256d          ewtabscale,eweps,sh_ewald,ewrt,ewtabhalfspace,ewtabF,ewtabFn,ewtabD,ewtabV;
110     __m256d          beta,beta2,beta3,zeta2,pmecorrF,pmecorrV,rinv3;
111     real             *ewtab;
112     __m256d          rswitch,swV3,swV4,swV5,swF2,swF3,swF4,d,d2,sw,dsw;
113     real             rswitch_scalar,d_scalar;
114     __m256d          dummy_mask,cutoff_mask;
115     __m128           tmpmask0,tmpmask1;
116     __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
117     __m256d          one     = _mm256_set1_pd(1.0);
118     __m256d          two     = _mm256_set1_pd(2.0);
119     x                = xx[0];
120     f                = ff[0];
121
122     nri              = nlist->nri;
123     iinr             = nlist->iinr;
124     jindex           = nlist->jindex;
125     jjnr             = nlist->jjnr;
126     shiftidx         = nlist->shift;
127     gid              = nlist->gid;
128     shiftvec         = fr->shift_vec[0];
129     fshift           = fr->fshift[0];
130     facel            = _mm256_set1_pd(fr->epsfac);
131     charge           = mdatoms->chargeA;
132     nvdwtype         = fr->ntype;
133     vdwparam         = fr->nbfp;
134     vdwtype          = mdatoms->typeA;
135
136     sh_ewald         = _mm256_set1_pd(fr->ic->sh_ewald);
137     beta             = _mm256_set1_pd(fr->ic->ewaldcoeff_q);
138     beta2            = _mm256_mul_pd(beta,beta);
139     beta3            = _mm256_mul_pd(beta,beta2);
140
141     ewtab            = fr->ic->tabq_coul_FDV0;
142     ewtabscale       = _mm256_set1_pd(fr->ic->tabq_scale);
143     ewtabhalfspace   = _mm256_set1_pd(0.5/fr->ic->tabq_scale);
144
145     /* Setup water-specific parameters */
146     inr              = nlist->iinr[0];
147     iq1              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+1]));
148     iq2              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+2]));
149     iq3              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+3]));
150     vdwioffsetptr0   = vdwparam+2*nvdwtype*vdwtype[inr+0];
151
152     /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
153     rcutoff_scalar   = fr->rcoulomb;
154     rcutoff          = _mm256_set1_pd(rcutoff_scalar);
155     rcutoff2         = _mm256_mul_pd(rcutoff,rcutoff);
156
157     rswitch_scalar   = fr->rcoulomb_switch;
158     rswitch          = _mm256_set1_pd(rswitch_scalar);
159     /* Setup switch parameters */
160     d_scalar         = rcutoff_scalar-rswitch_scalar;
161     d                = _mm256_set1_pd(d_scalar);
162     swV3             = _mm256_set1_pd(-10.0/(d_scalar*d_scalar*d_scalar));
163     swV4             = _mm256_set1_pd( 15.0/(d_scalar*d_scalar*d_scalar*d_scalar));
164     swV5             = _mm256_set1_pd( -6.0/(d_scalar*d_scalar*d_scalar*d_scalar*d_scalar));
165     swF2             = _mm256_set1_pd(-30.0/(d_scalar*d_scalar*d_scalar));
166     swF3             = _mm256_set1_pd( 60.0/(d_scalar*d_scalar*d_scalar*d_scalar));
167     swF4             = _mm256_set1_pd(-30.0/(d_scalar*d_scalar*d_scalar*d_scalar*d_scalar));
168
169     /* Avoid stupid compiler warnings */
170     jnrA = jnrB = jnrC = jnrD = 0;
171     j_coord_offsetA = 0;
172     j_coord_offsetB = 0;
173     j_coord_offsetC = 0;
174     j_coord_offsetD = 0;
175
176     outeriter        = 0;
177     inneriter        = 0;
178
179     for(iidx=0;iidx<4*DIM;iidx++)
180     {
181         scratch[iidx] = 0.0;
182     }
183
184     /* Start outer loop over neighborlists */
185     for(iidx=0; iidx<nri; iidx++)
186     {
187         /* Load shift vector for this list */
188         i_shift_offset   = DIM*shiftidx[iidx];
189
190         /* Load limits for loop over neighbors */
191         j_index_start    = jindex[iidx];
192         j_index_end      = jindex[iidx+1];
193
194         /* Get outer coordinate index */
195         inr              = iinr[iidx];
196         i_coord_offset   = DIM*inr;
197
198         /* Load i particle coords and add shift vector */
199         gmx_mm256_load_shift_and_4rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset,
200                                                     &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2,&ix3,&iy3,&iz3);
201
202         fix0             = _mm256_setzero_pd();
203         fiy0             = _mm256_setzero_pd();
204         fiz0             = _mm256_setzero_pd();
205         fix1             = _mm256_setzero_pd();
206         fiy1             = _mm256_setzero_pd();
207         fiz1             = _mm256_setzero_pd();
208         fix2             = _mm256_setzero_pd();
209         fiy2             = _mm256_setzero_pd();
210         fiz2             = _mm256_setzero_pd();
211         fix3             = _mm256_setzero_pd();
212         fiy3             = _mm256_setzero_pd();
213         fiz3             = _mm256_setzero_pd();
214
215         /* Reset potential sums */
216         velecsum         = _mm256_setzero_pd();
217         vvdwsum          = _mm256_setzero_pd();
218
219         /* Start inner kernel loop */
220         for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
221         {
222
223             /* Get j neighbor index, and coordinate index */
224             jnrA             = jjnr[jidx];
225             jnrB             = jjnr[jidx+1];
226             jnrC             = jjnr[jidx+2];
227             jnrD             = jjnr[jidx+3];
228             j_coord_offsetA  = DIM*jnrA;
229             j_coord_offsetB  = DIM*jnrB;
230             j_coord_offsetC  = DIM*jnrC;
231             j_coord_offsetD  = DIM*jnrD;
232
233             /* load j atom coordinates */
234             gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
235                                                  x+j_coord_offsetC,x+j_coord_offsetD,
236                                                  &jx0,&jy0,&jz0);
237
238             /* Calculate displacement vector */
239             dx00             = _mm256_sub_pd(ix0,jx0);
240             dy00             = _mm256_sub_pd(iy0,jy0);
241             dz00             = _mm256_sub_pd(iz0,jz0);
242             dx10             = _mm256_sub_pd(ix1,jx0);
243             dy10             = _mm256_sub_pd(iy1,jy0);
244             dz10             = _mm256_sub_pd(iz1,jz0);
245             dx20             = _mm256_sub_pd(ix2,jx0);
246             dy20             = _mm256_sub_pd(iy2,jy0);
247             dz20             = _mm256_sub_pd(iz2,jz0);
248             dx30             = _mm256_sub_pd(ix3,jx0);
249             dy30             = _mm256_sub_pd(iy3,jy0);
250             dz30             = _mm256_sub_pd(iz3,jz0);
251
252             /* Calculate squared distance and things based on it */
253             rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
254             rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
255             rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
256             rsq30            = gmx_mm256_calc_rsq_pd(dx30,dy30,dz30);
257
258             rinv00           = gmx_mm256_invsqrt_pd(rsq00);
259             rinv10           = gmx_mm256_invsqrt_pd(rsq10);
260             rinv20           = gmx_mm256_invsqrt_pd(rsq20);
261             rinv30           = gmx_mm256_invsqrt_pd(rsq30);
262
263             rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
264             rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
265             rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
266             rinvsq30         = _mm256_mul_pd(rinv30,rinv30);
267
268             /* Load parameters for j particles */
269             jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
270                                                                  charge+jnrC+0,charge+jnrD+0);
271             vdwjidx0A        = 2*vdwtype[jnrA+0];
272             vdwjidx0B        = 2*vdwtype[jnrB+0];
273             vdwjidx0C        = 2*vdwtype[jnrC+0];
274             vdwjidx0D        = 2*vdwtype[jnrD+0];
275
276             fjx0             = _mm256_setzero_pd();
277             fjy0             = _mm256_setzero_pd();
278             fjz0             = _mm256_setzero_pd();
279
280             /**************************
281              * CALCULATE INTERACTIONS *
282              **************************/
283
284             if (gmx_mm256_any_lt(rsq00,rcutoff2))
285             {
286
287             r00              = _mm256_mul_pd(rsq00,rinv00);
288
289             /* Compute parameters for interactions between i and j atoms */
290             gmx_mm256_load_4pair_swizzle_pd(vdwioffsetptr0+vdwjidx0A,
291                                             vdwioffsetptr0+vdwjidx0B,
292                                             vdwioffsetptr0+vdwjidx0C,
293                                             vdwioffsetptr0+vdwjidx0D,
294                                             &c6_00,&c12_00);
295
296             /* LENNARD-JONES DISPERSION/REPULSION */
297
298             rinvsix          = _mm256_mul_pd(_mm256_mul_pd(rinvsq00,rinvsq00),rinvsq00);
299             vvdw6            = _mm256_mul_pd(c6_00,rinvsix);
300             vvdw12           = _mm256_mul_pd(c12_00,_mm256_mul_pd(rinvsix,rinvsix));
301             vvdw             = _mm256_sub_pd( _mm256_mul_pd(vvdw12,one_twelfth) , _mm256_mul_pd(vvdw6,one_sixth) );
302             fvdw             = _mm256_mul_pd(_mm256_sub_pd(vvdw12,vvdw6),rinvsq00);
303
304             d                = _mm256_sub_pd(r00,rswitch);
305             d                = _mm256_max_pd(d,_mm256_setzero_pd());
306             d2               = _mm256_mul_pd(d,d);
307             sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
308
309             dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
310
311             /* Evaluate switch function */
312             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
313             fvdw             = _mm256_sub_pd( _mm256_mul_pd(fvdw,sw) , _mm256_mul_pd(rinv00,_mm256_mul_pd(vvdw,dsw)) );
314             vvdw             = _mm256_mul_pd(vvdw,sw);
315             cutoff_mask      = _mm256_cmp_pd(rsq00,rcutoff2,_CMP_LT_OQ);
316
317             /* Update potential sum for this i atom from the interaction with this j atom. */
318             vvdw             = _mm256_and_pd(vvdw,cutoff_mask);
319             vvdwsum          = _mm256_add_pd(vvdwsum,vvdw);
320
321             fscal            = fvdw;
322
323             fscal            = _mm256_and_pd(fscal,cutoff_mask);
324
325             /* Calculate temporary vectorial force */
326             tx               = _mm256_mul_pd(fscal,dx00);
327             ty               = _mm256_mul_pd(fscal,dy00);
328             tz               = _mm256_mul_pd(fscal,dz00);
329
330             /* Update vectorial force */
331             fix0             = _mm256_add_pd(fix0,tx);
332             fiy0             = _mm256_add_pd(fiy0,ty);
333             fiz0             = _mm256_add_pd(fiz0,tz);
334
335             fjx0             = _mm256_add_pd(fjx0,tx);
336             fjy0             = _mm256_add_pd(fjy0,ty);
337             fjz0             = _mm256_add_pd(fjz0,tz);
338
339             }
340
341             /**************************
342              * CALCULATE INTERACTIONS *
343              **************************/
344
345             if (gmx_mm256_any_lt(rsq10,rcutoff2))
346             {
347
348             r10              = _mm256_mul_pd(rsq10,rinv10);
349
350             /* Compute parameters for interactions between i and j atoms */
351             qq10             = _mm256_mul_pd(iq1,jq0);
352
353             /* EWALD ELECTROSTATICS */
354
355             /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
356             ewrt             = _mm256_mul_pd(r10,ewtabscale);
357             ewitab           = _mm256_cvttpd_epi32(ewrt);
358             eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
359             ewitab           = _mm_slli_epi32(ewitab,2);
360             ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
361             ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
362             ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
363             ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
364             GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
365             felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
366             velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
367             velec            = _mm256_mul_pd(qq10,_mm256_sub_pd(rinv10,velec));
368             felec            = _mm256_mul_pd(_mm256_mul_pd(qq10,rinv10),_mm256_sub_pd(rinvsq10,felec));
369
370             d                = _mm256_sub_pd(r10,rswitch);
371             d                = _mm256_max_pd(d,_mm256_setzero_pd());
372             d2               = _mm256_mul_pd(d,d);
373             sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
374
375             dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
376
377             /* Evaluate switch function */
378             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
379             felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv10,_mm256_mul_pd(velec,dsw)) );
380             velec            = _mm256_mul_pd(velec,sw);
381             cutoff_mask      = _mm256_cmp_pd(rsq10,rcutoff2,_CMP_LT_OQ);
382
383             /* Update potential sum for this i atom from the interaction with this j atom. */
384             velec            = _mm256_and_pd(velec,cutoff_mask);
385             velecsum         = _mm256_add_pd(velecsum,velec);
386
387             fscal            = felec;
388
389             fscal            = _mm256_and_pd(fscal,cutoff_mask);
390
391             /* Calculate temporary vectorial force */
392             tx               = _mm256_mul_pd(fscal,dx10);
393             ty               = _mm256_mul_pd(fscal,dy10);
394             tz               = _mm256_mul_pd(fscal,dz10);
395
396             /* Update vectorial force */
397             fix1             = _mm256_add_pd(fix1,tx);
398             fiy1             = _mm256_add_pd(fiy1,ty);
399             fiz1             = _mm256_add_pd(fiz1,tz);
400
401             fjx0             = _mm256_add_pd(fjx0,tx);
402             fjy0             = _mm256_add_pd(fjy0,ty);
403             fjz0             = _mm256_add_pd(fjz0,tz);
404
405             }
406
407             /**************************
408              * CALCULATE INTERACTIONS *
409              **************************/
410
411             if (gmx_mm256_any_lt(rsq20,rcutoff2))
412             {
413
414             r20              = _mm256_mul_pd(rsq20,rinv20);
415
416             /* Compute parameters for interactions between i and j atoms */
417             qq20             = _mm256_mul_pd(iq2,jq0);
418
419             /* EWALD ELECTROSTATICS */
420
421             /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
422             ewrt             = _mm256_mul_pd(r20,ewtabscale);
423             ewitab           = _mm256_cvttpd_epi32(ewrt);
424             eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
425             ewitab           = _mm_slli_epi32(ewitab,2);
426             ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
427             ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
428             ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
429             ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
430             GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
431             felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
432             velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
433             velec            = _mm256_mul_pd(qq20,_mm256_sub_pd(rinv20,velec));
434             felec            = _mm256_mul_pd(_mm256_mul_pd(qq20,rinv20),_mm256_sub_pd(rinvsq20,felec));
435
436             d                = _mm256_sub_pd(r20,rswitch);
437             d                = _mm256_max_pd(d,_mm256_setzero_pd());
438             d2               = _mm256_mul_pd(d,d);
439             sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
440
441             dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
442
443             /* Evaluate switch function */
444             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
445             felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv20,_mm256_mul_pd(velec,dsw)) );
446             velec            = _mm256_mul_pd(velec,sw);
447             cutoff_mask      = _mm256_cmp_pd(rsq20,rcutoff2,_CMP_LT_OQ);
448
449             /* Update potential sum for this i atom from the interaction with this j atom. */
450             velec            = _mm256_and_pd(velec,cutoff_mask);
451             velecsum         = _mm256_add_pd(velecsum,velec);
452
453             fscal            = felec;
454
455             fscal            = _mm256_and_pd(fscal,cutoff_mask);
456
457             /* Calculate temporary vectorial force */
458             tx               = _mm256_mul_pd(fscal,dx20);
459             ty               = _mm256_mul_pd(fscal,dy20);
460             tz               = _mm256_mul_pd(fscal,dz20);
461
462             /* Update vectorial force */
463             fix2             = _mm256_add_pd(fix2,tx);
464             fiy2             = _mm256_add_pd(fiy2,ty);
465             fiz2             = _mm256_add_pd(fiz2,tz);
466
467             fjx0             = _mm256_add_pd(fjx0,tx);
468             fjy0             = _mm256_add_pd(fjy0,ty);
469             fjz0             = _mm256_add_pd(fjz0,tz);
470
471             }
472
473             /**************************
474              * CALCULATE INTERACTIONS *
475              **************************/
476
477             if (gmx_mm256_any_lt(rsq30,rcutoff2))
478             {
479
480             r30              = _mm256_mul_pd(rsq30,rinv30);
481
482             /* Compute parameters for interactions between i and j atoms */
483             qq30             = _mm256_mul_pd(iq3,jq0);
484
485             /* EWALD ELECTROSTATICS */
486
487             /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
488             ewrt             = _mm256_mul_pd(r30,ewtabscale);
489             ewitab           = _mm256_cvttpd_epi32(ewrt);
490             eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
491             ewitab           = _mm_slli_epi32(ewitab,2);
492             ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
493             ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
494             ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
495             ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
496             GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
497             felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
498             velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
499             velec            = _mm256_mul_pd(qq30,_mm256_sub_pd(rinv30,velec));
500             felec            = _mm256_mul_pd(_mm256_mul_pd(qq30,rinv30),_mm256_sub_pd(rinvsq30,felec));
501
502             d                = _mm256_sub_pd(r30,rswitch);
503             d                = _mm256_max_pd(d,_mm256_setzero_pd());
504             d2               = _mm256_mul_pd(d,d);
505             sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
506
507             dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
508
509             /* Evaluate switch function */
510             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
511             felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv30,_mm256_mul_pd(velec,dsw)) );
512             velec            = _mm256_mul_pd(velec,sw);
513             cutoff_mask      = _mm256_cmp_pd(rsq30,rcutoff2,_CMP_LT_OQ);
514
515             /* Update potential sum for this i atom from the interaction with this j atom. */
516             velec            = _mm256_and_pd(velec,cutoff_mask);
517             velecsum         = _mm256_add_pd(velecsum,velec);
518
519             fscal            = felec;
520
521             fscal            = _mm256_and_pd(fscal,cutoff_mask);
522
523             /* Calculate temporary vectorial force */
524             tx               = _mm256_mul_pd(fscal,dx30);
525             ty               = _mm256_mul_pd(fscal,dy30);
526             tz               = _mm256_mul_pd(fscal,dz30);
527
528             /* Update vectorial force */
529             fix3             = _mm256_add_pd(fix3,tx);
530             fiy3             = _mm256_add_pd(fiy3,ty);
531             fiz3             = _mm256_add_pd(fiz3,tz);
532
533             fjx0             = _mm256_add_pd(fjx0,tx);
534             fjy0             = _mm256_add_pd(fjy0,ty);
535             fjz0             = _mm256_add_pd(fjz0,tz);
536
537             }
538
539             fjptrA             = f+j_coord_offsetA;
540             fjptrB             = f+j_coord_offsetB;
541             fjptrC             = f+j_coord_offsetC;
542             fjptrD             = f+j_coord_offsetD;
543
544             gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,fjx0,fjy0,fjz0);
545
546             /* Inner loop uses 257 flops */
547         }
548
549         if(jidx<j_index_end)
550         {
551
552             /* Get j neighbor index, and coordinate index */
553             jnrlistA         = jjnr[jidx];
554             jnrlistB         = jjnr[jidx+1];
555             jnrlistC         = jjnr[jidx+2];
556             jnrlistD         = jjnr[jidx+3];
557             /* Sign of each element will be negative for non-real atoms.
558              * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
559              * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
560              */
561             tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
562
563             tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
564             tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
565             dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
566
567             jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
568             jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
569             jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
570             jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
571             j_coord_offsetA  = DIM*jnrA;
572             j_coord_offsetB  = DIM*jnrB;
573             j_coord_offsetC  = DIM*jnrC;
574             j_coord_offsetD  = DIM*jnrD;
575
576             /* load j atom coordinates */
577             gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
578                                                  x+j_coord_offsetC,x+j_coord_offsetD,
579                                                  &jx0,&jy0,&jz0);
580
581             /* Calculate displacement vector */
582             dx00             = _mm256_sub_pd(ix0,jx0);
583             dy00             = _mm256_sub_pd(iy0,jy0);
584             dz00             = _mm256_sub_pd(iz0,jz0);
585             dx10             = _mm256_sub_pd(ix1,jx0);
586             dy10             = _mm256_sub_pd(iy1,jy0);
587             dz10             = _mm256_sub_pd(iz1,jz0);
588             dx20             = _mm256_sub_pd(ix2,jx0);
589             dy20             = _mm256_sub_pd(iy2,jy0);
590             dz20             = _mm256_sub_pd(iz2,jz0);
591             dx30             = _mm256_sub_pd(ix3,jx0);
592             dy30             = _mm256_sub_pd(iy3,jy0);
593             dz30             = _mm256_sub_pd(iz3,jz0);
594
595             /* Calculate squared distance and things based on it */
596             rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
597             rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
598             rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
599             rsq30            = gmx_mm256_calc_rsq_pd(dx30,dy30,dz30);
600
601             rinv00           = gmx_mm256_invsqrt_pd(rsq00);
602             rinv10           = gmx_mm256_invsqrt_pd(rsq10);
603             rinv20           = gmx_mm256_invsqrt_pd(rsq20);
604             rinv30           = gmx_mm256_invsqrt_pd(rsq30);
605
606             rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
607             rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
608             rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
609             rinvsq30         = _mm256_mul_pd(rinv30,rinv30);
610
611             /* Load parameters for j particles */
612             jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
613                                                                  charge+jnrC+0,charge+jnrD+0);
614             vdwjidx0A        = 2*vdwtype[jnrA+0];
615             vdwjidx0B        = 2*vdwtype[jnrB+0];
616             vdwjidx0C        = 2*vdwtype[jnrC+0];
617             vdwjidx0D        = 2*vdwtype[jnrD+0];
618
619             fjx0             = _mm256_setzero_pd();
620             fjy0             = _mm256_setzero_pd();
621             fjz0             = _mm256_setzero_pd();
622
623             /**************************
624              * CALCULATE INTERACTIONS *
625              **************************/
626
627             if (gmx_mm256_any_lt(rsq00,rcutoff2))
628             {
629
630             r00              = _mm256_mul_pd(rsq00,rinv00);
631             r00              = _mm256_andnot_pd(dummy_mask,r00);
632
633             /* Compute parameters for interactions between i and j atoms */
634             gmx_mm256_load_4pair_swizzle_pd(vdwioffsetptr0+vdwjidx0A,
635                                             vdwioffsetptr0+vdwjidx0B,
636                                             vdwioffsetptr0+vdwjidx0C,
637                                             vdwioffsetptr0+vdwjidx0D,
638                                             &c6_00,&c12_00);
639
640             /* LENNARD-JONES DISPERSION/REPULSION */
641
642             rinvsix          = _mm256_mul_pd(_mm256_mul_pd(rinvsq00,rinvsq00),rinvsq00);
643             vvdw6            = _mm256_mul_pd(c6_00,rinvsix);
644             vvdw12           = _mm256_mul_pd(c12_00,_mm256_mul_pd(rinvsix,rinvsix));
645             vvdw             = _mm256_sub_pd( _mm256_mul_pd(vvdw12,one_twelfth) , _mm256_mul_pd(vvdw6,one_sixth) );
646             fvdw             = _mm256_mul_pd(_mm256_sub_pd(vvdw12,vvdw6),rinvsq00);
647
648             d                = _mm256_sub_pd(r00,rswitch);
649             d                = _mm256_max_pd(d,_mm256_setzero_pd());
650             d2               = _mm256_mul_pd(d,d);
651             sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
652
653             dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
654
655             /* Evaluate switch function */
656             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
657             fvdw             = _mm256_sub_pd( _mm256_mul_pd(fvdw,sw) , _mm256_mul_pd(rinv00,_mm256_mul_pd(vvdw,dsw)) );
658             vvdw             = _mm256_mul_pd(vvdw,sw);
659             cutoff_mask      = _mm256_cmp_pd(rsq00,rcutoff2,_CMP_LT_OQ);
660
661             /* Update potential sum for this i atom from the interaction with this j atom. */
662             vvdw             = _mm256_and_pd(vvdw,cutoff_mask);
663             vvdw             = _mm256_andnot_pd(dummy_mask,vvdw);
664             vvdwsum          = _mm256_add_pd(vvdwsum,vvdw);
665
666             fscal            = fvdw;
667
668             fscal            = _mm256_and_pd(fscal,cutoff_mask);
669
670             fscal            = _mm256_andnot_pd(dummy_mask,fscal);
671
672             /* Calculate temporary vectorial force */
673             tx               = _mm256_mul_pd(fscal,dx00);
674             ty               = _mm256_mul_pd(fscal,dy00);
675             tz               = _mm256_mul_pd(fscal,dz00);
676
677             /* Update vectorial force */
678             fix0             = _mm256_add_pd(fix0,tx);
679             fiy0             = _mm256_add_pd(fiy0,ty);
680             fiz0             = _mm256_add_pd(fiz0,tz);
681
682             fjx0             = _mm256_add_pd(fjx0,tx);
683             fjy0             = _mm256_add_pd(fjy0,ty);
684             fjz0             = _mm256_add_pd(fjz0,tz);
685
686             }
687
688             /**************************
689              * CALCULATE INTERACTIONS *
690              **************************/
691
692             if (gmx_mm256_any_lt(rsq10,rcutoff2))
693             {
694
695             r10              = _mm256_mul_pd(rsq10,rinv10);
696             r10              = _mm256_andnot_pd(dummy_mask,r10);
697
698             /* Compute parameters for interactions between i and j atoms */
699             qq10             = _mm256_mul_pd(iq1,jq0);
700
701             /* EWALD ELECTROSTATICS */
702
703             /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
704             ewrt             = _mm256_mul_pd(r10,ewtabscale);
705             ewitab           = _mm256_cvttpd_epi32(ewrt);
706             eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
707             ewitab           = _mm_slli_epi32(ewitab,2);
708             ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
709             ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
710             ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
711             ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
712             GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
713             felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
714             velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
715             velec            = _mm256_mul_pd(qq10,_mm256_sub_pd(rinv10,velec));
716             felec            = _mm256_mul_pd(_mm256_mul_pd(qq10,rinv10),_mm256_sub_pd(rinvsq10,felec));
717
718             d                = _mm256_sub_pd(r10,rswitch);
719             d                = _mm256_max_pd(d,_mm256_setzero_pd());
720             d2               = _mm256_mul_pd(d,d);
721             sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
722
723             dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
724
725             /* Evaluate switch function */
726             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
727             felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv10,_mm256_mul_pd(velec,dsw)) );
728             velec            = _mm256_mul_pd(velec,sw);
729             cutoff_mask      = _mm256_cmp_pd(rsq10,rcutoff2,_CMP_LT_OQ);
730
731             /* Update potential sum for this i atom from the interaction with this j atom. */
732             velec            = _mm256_and_pd(velec,cutoff_mask);
733             velec            = _mm256_andnot_pd(dummy_mask,velec);
734             velecsum         = _mm256_add_pd(velecsum,velec);
735
736             fscal            = felec;
737
738             fscal            = _mm256_and_pd(fscal,cutoff_mask);
739
740             fscal            = _mm256_andnot_pd(dummy_mask,fscal);
741
742             /* Calculate temporary vectorial force */
743             tx               = _mm256_mul_pd(fscal,dx10);
744             ty               = _mm256_mul_pd(fscal,dy10);
745             tz               = _mm256_mul_pd(fscal,dz10);
746
747             /* Update vectorial force */
748             fix1             = _mm256_add_pd(fix1,tx);
749             fiy1             = _mm256_add_pd(fiy1,ty);
750             fiz1             = _mm256_add_pd(fiz1,tz);
751
752             fjx0             = _mm256_add_pd(fjx0,tx);
753             fjy0             = _mm256_add_pd(fjy0,ty);
754             fjz0             = _mm256_add_pd(fjz0,tz);
755
756             }
757
758             /**************************
759              * CALCULATE INTERACTIONS *
760              **************************/
761
762             if (gmx_mm256_any_lt(rsq20,rcutoff2))
763             {
764
765             r20              = _mm256_mul_pd(rsq20,rinv20);
766             r20              = _mm256_andnot_pd(dummy_mask,r20);
767
768             /* Compute parameters for interactions between i and j atoms */
769             qq20             = _mm256_mul_pd(iq2,jq0);
770
771             /* EWALD ELECTROSTATICS */
772
773             /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
774             ewrt             = _mm256_mul_pd(r20,ewtabscale);
775             ewitab           = _mm256_cvttpd_epi32(ewrt);
776             eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
777             ewitab           = _mm_slli_epi32(ewitab,2);
778             ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
779             ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
780             ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
781             ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
782             GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
783             felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
784             velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
785             velec            = _mm256_mul_pd(qq20,_mm256_sub_pd(rinv20,velec));
786             felec            = _mm256_mul_pd(_mm256_mul_pd(qq20,rinv20),_mm256_sub_pd(rinvsq20,felec));
787
788             d                = _mm256_sub_pd(r20,rswitch);
789             d                = _mm256_max_pd(d,_mm256_setzero_pd());
790             d2               = _mm256_mul_pd(d,d);
791             sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
792
793             dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
794
795             /* Evaluate switch function */
796             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
797             felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv20,_mm256_mul_pd(velec,dsw)) );
798             velec            = _mm256_mul_pd(velec,sw);
799             cutoff_mask      = _mm256_cmp_pd(rsq20,rcutoff2,_CMP_LT_OQ);
800
801             /* Update potential sum for this i atom from the interaction with this j atom. */
802             velec            = _mm256_and_pd(velec,cutoff_mask);
803             velec            = _mm256_andnot_pd(dummy_mask,velec);
804             velecsum         = _mm256_add_pd(velecsum,velec);
805
806             fscal            = felec;
807
808             fscal            = _mm256_and_pd(fscal,cutoff_mask);
809
810             fscal            = _mm256_andnot_pd(dummy_mask,fscal);
811
812             /* Calculate temporary vectorial force */
813             tx               = _mm256_mul_pd(fscal,dx20);
814             ty               = _mm256_mul_pd(fscal,dy20);
815             tz               = _mm256_mul_pd(fscal,dz20);
816
817             /* Update vectorial force */
818             fix2             = _mm256_add_pd(fix2,tx);
819             fiy2             = _mm256_add_pd(fiy2,ty);
820             fiz2             = _mm256_add_pd(fiz2,tz);
821
822             fjx0             = _mm256_add_pd(fjx0,tx);
823             fjy0             = _mm256_add_pd(fjy0,ty);
824             fjz0             = _mm256_add_pd(fjz0,tz);
825
826             }
827
828             /**************************
829              * CALCULATE INTERACTIONS *
830              **************************/
831
832             if (gmx_mm256_any_lt(rsq30,rcutoff2))
833             {
834
835             r30              = _mm256_mul_pd(rsq30,rinv30);
836             r30              = _mm256_andnot_pd(dummy_mask,r30);
837
838             /* Compute parameters for interactions between i and j atoms */
839             qq30             = _mm256_mul_pd(iq3,jq0);
840
841             /* EWALD ELECTROSTATICS */
842
843             /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
844             ewrt             = _mm256_mul_pd(r30,ewtabscale);
845             ewitab           = _mm256_cvttpd_epi32(ewrt);
846             eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
847             ewitab           = _mm_slli_epi32(ewitab,2);
848             ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
849             ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
850             ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
851             ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
852             GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
853             felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
854             velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
855             velec            = _mm256_mul_pd(qq30,_mm256_sub_pd(rinv30,velec));
856             felec            = _mm256_mul_pd(_mm256_mul_pd(qq30,rinv30),_mm256_sub_pd(rinvsq30,felec));
857
858             d                = _mm256_sub_pd(r30,rswitch);
859             d                = _mm256_max_pd(d,_mm256_setzero_pd());
860             d2               = _mm256_mul_pd(d,d);
861             sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
862
863             dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
864
865             /* Evaluate switch function */
866             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
867             felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv30,_mm256_mul_pd(velec,dsw)) );
868             velec            = _mm256_mul_pd(velec,sw);
869             cutoff_mask      = _mm256_cmp_pd(rsq30,rcutoff2,_CMP_LT_OQ);
870
871             /* Update potential sum for this i atom from the interaction with this j atom. */
872             velec            = _mm256_and_pd(velec,cutoff_mask);
873             velec            = _mm256_andnot_pd(dummy_mask,velec);
874             velecsum         = _mm256_add_pd(velecsum,velec);
875
876             fscal            = felec;
877
878             fscal            = _mm256_and_pd(fscal,cutoff_mask);
879
880             fscal            = _mm256_andnot_pd(dummy_mask,fscal);
881
882             /* Calculate temporary vectorial force */
883             tx               = _mm256_mul_pd(fscal,dx30);
884             ty               = _mm256_mul_pd(fscal,dy30);
885             tz               = _mm256_mul_pd(fscal,dz30);
886
887             /* Update vectorial force */
888             fix3             = _mm256_add_pd(fix3,tx);
889             fiy3             = _mm256_add_pd(fiy3,ty);
890             fiz3             = _mm256_add_pd(fiz3,tz);
891
892             fjx0             = _mm256_add_pd(fjx0,tx);
893             fjy0             = _mm256_add_pd(fjy0,ty);
894             fjz0             = _mm256_add_pd(fjz0,tz);
895
896             }
897
898             fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
899             fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
900             fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
901             fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
902
903             gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,fjx0,fjy0,fjz0);
904
905             /* Inner loop uses 261 flops */
906         }
907
908         /* End of innermost loop */
909
910         gmx_mm256_update_iforce_4atom_swizzle_pd(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,fix3,fiy3,fiz3,
911                                                  f+i_coord_offset,fshift+i_shift_offset);
912
913         ggid                        = gid[iidx];
914         /* Update potential energies */
915         gmx_mm256_update_1pot_pd(velecsum,kernel_data->energygrp_elec+ggid);
916         gmx_mm256_update_1pot_pd(vvdwsum,kernel_data->energygrp_vdw+ggid);
917
918         /* Increment number of inner iterations */
919         inneriter                  += j_index_end - j_index_start;
920
921         /* Outer loop uses 26 flops */
922     }
923
924     /* Increment number of outer iterations */
925     outeriter        += nri;
926
927     /* Update outer/inner flops */
928
929     inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W4_VF,outeriter*26 + inneriter*261);
930 }
931 /*
932  * Gromacs nonbonded kernel:   nb_kernel_ElecEwSw_VdwLJSw_GeomW4P1_F_avx_256_double
933  * Electrostatics interaction: Ewald
934  * VdW interaction:            LennardJones
935  * Geometry:                   Water4-Particle
936  * Calculate force/pot:        Force
937  */
938 void
939 nb_kernel_ElecEwSw_VdwLJSw_GeomW4P1_F_avx_256_double
940                     (t_nblist                    * gmx_restrict       nlist,
941                      rvec                        * gmx_restrict          xx,
942                      rvec                        * gmx_restrict          ff,
943                      t_forcerec                  * gmx_restrict          fr,
944                      t_mdatoms                   * gmx_restrict     mdatoms,
945                      nb_kernel_data_t gmx_unused * gmx_restrict kernel_data,
946                      t_nrnb                      * gmx_restrict        nrnb)
947 {
948     /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
949      * just 0 for non-waters.
950      * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
951      * jnr indices corresponding to data put in the four positions in the SIMD register.
952      */
953     int              i_shift_offset,i_coord_offset,outeriter,inneriter;
954     int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
955     int              jnrA,jnrB,jnrC,jnrD;
956     int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
957     int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
958     int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
959     int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
960     real             rcutoff_scalar;
961     real             *shiftvec,*fshift,*x,*f;
962     real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
963     real             scratch[4*DIM];
964     __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
965     real *           vdwioffsetptr0;
966     __m256d          ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
967     real *           vdwioffsetptr1;
968     __m256d          ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
969     real *           vdwioffsetptr2;
970     __m256d          ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
971     real *           vdwioffsetptr3;
972     __m256d          ix3,iy3,iz3,fix3,fiy3,fiz3,iq3,isai3;
973     int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
974     __m256d          jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
975     __m256d          dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
976     __m256d          dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
977     __m256d          dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
978     __m256d          dx30,dy30,dz30,rsq30,rinv30,rinvsq30,r30,qq30,c6_30,c12_30;
979     __m256d          velec,felec,velecsum,facel,crf,krf,krf2;
980     real             *charge;
981     int              nvdwtype;
982     __m256d          rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
983     int              *vdwtype;
984     real             *vdwparam;
985     __m256d          one_sixth   = _mm256_set1_pd(1.0/6.0);
986     __m256d          one_twelfth = _mm256_set1_pd(1.0/12.0);
987     __m128i          ewitab;
988     __m256d          ewtabscale,eweps,sh_ewald,ewrt,ewtabhalfspace,ewtabF,ewtabFn,ewtabD,ewtabV;
989     __m256d          beta,beta2,beta3,zeta2,pmecorrF,pmecorrV,rinv3;
990     real             *ewtab;
991     __m256d          rswitch,swV3,swV4,swV5,swF2,swF3,swF4,d,d2,sw,dsw;
992     real             rswitch_scalar,d_scalar;
993     __m256d          dummy_mask,cutoff_mask;
994     __m128           tmpmask0,tmpmask1;
995     __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
996     __m256d          one     = _mm256_set1_pd(1.0);
997     __m256d          two     = _mm256_set1_pd(2.0);
998     x                = xx[0];
999     f                = ff[0];
1000
1001     nri              = nlist->nri;
1002     iinr             = nlist->iinr;
1003     jindex           = nlist->jindex;
1004     jjnr             = nlist->jjnr;
1005     shiftidx         = nlist->shift;
1006     gid              = nlist->gid;
1007     shiftvec         = fr->shift_vec[0];
1008     fshift           = fr->fshift[0];
1009     facel            = _mm256_set1_pd(fr->epsfac);
1010     charge           = mdatoms->chargeA;
1011     nvdwtype         = fr->ntype;
1012     vdwparam         = fr->nbfp;
1013     vdwtype          = mdatoms->typeA;
1014
1015     sh_ewald         = _mm256_set1_pd(fr->ic->sh_ewald);
1016     beta             = _mm256_set1_pd(fr->ic->ewaldcoeff_q);
1017     beta2            = _mm256_mul_pd(beta,beta);
1018     beta3            = _mm256_mul_pd(beta,beta2);
1019
1020     ewtab            = fr->ic->tabq_coul_FDV0;
1021     ewtabscale       = _mm256_set1_pd(fr->ic->tabq_scale);
1022     ewtabhalfspace   = _mm256_set1_pd(0.5/fr->ic->tabq_scale);
1023
1024     /* Setup water-specific parameters */
1025     inr              = nlist->iinr[0];
1026     iq1              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+1]));
1027     iq2              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+2]));
1028     iq3              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+3]));
1029     vdwioffsetptr0   = vdwparam+2*nvdwtype*vdwtype[inr+0];
1030
1031     /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
1032     rcutoff_scalar   = fr->rcoulomb;
1033     rcutoff          = _mm256_set1_pd(rcutoff_scalar);
1034     rcutoff2         = _mm256_mul_pd(rcutoff,rcutoff);
1035
1036     rswitch_scalar   = fr->rcoulomb_switch;
1037     rswitch          = _mm256_set1_pd(rswitch_scalar);
1038     /* Setup switch parameters */
1039     d_scalar         = rcutoff_scalar-rswitch_scalar;
1040     d                = _mm256_set1_pd(d_scalar);
1041     swV3             = _mm256_set1_pd(-10.0/(d_scalar*d_scalar*d_scalar));
1042     swV4             = _mm256_set1_pd( 15.0/(d_scalar*d_scalar*d_scalar*d_scalar));
1043     swV5             = _mm256_set1_pd( -6.0/(d_scalar*d_scalar*d_scalar*d_scalar*d_scalar));
1044     swF2             = _mm256_set1_pd(-30.0/(d_scalar*d_scalar*d_scalar));
1045     swF3             = _mm256_set1_pd( 60.0/(d_scalar*d_scalar*d_scalar*d_scalar));
1046     swF4             = _mm256_set1_pd(-30.0/(d_scalar*d_scalar*d_scalar*d_scalar*d_scalar));
1047
1048     /* Avoid stupid compiler warnings */
1049     jnrA = jnrB = jnrC = jnrD = 0;
1050     j_coord_offsetA = 0;
1051     j_coord_offsetB = 0;
1052     j_coord_offsetC = 0;
1053     j_coord_offsetD = 0;
1054
1055     outeriter        = 0;
1056     inneriter        = 0;
1057
1058     for(iidx=0;iidx<4*DIM;iidx++)
1059     {
1060         scratch[iidx] = 0.0;
1061     }
1062
1063     /* Start outer loop over neighborlists */
1064     for(iidx=0; iidx<nri; iidx++)
1065     {
1066         /* Load shift vector for this list */
1067         i_shift_offset   = DIM*shiftidx[iidx];
1068
1069         /* Load limits for loop over neighbors */
1070         j_index_start    = jindex[iidx];
1071         j_index_end      = jindex[iidx+1];
1072
1073         /* Get outer coordinate index */
1074         inr              = iinr[iidx];
1075         i_coord_offset   = DIM*inr;
1076
1077         /* Load i particle coords and add shift vector */
1078         gmx_mm256_load_shift_and_4rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset,
1079                                                     &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2,&ix3,&iy3,&iz3);
1080
1081         fix0             = _mm256_setzero_pd();
1082         fiy0             = _mm256_setzero_pd();
1083         fiz0             = _mm256_setzero_pd();
1084         fix1             = _mm256_setzero_pd();
1085         fiy1             = _mm256_setzero_pd();
1086         fiz1             = _mm256_setzero_pd();
1087         fix2             = _mm256_setzero_pd();
1088         fiy2             = _mm256_setzero_pd();
1089         fiz2             = _mm256_setzero_pd();
1090         fix3             = _mm256_setzero_pd();
1091         fiy3             = _mm256_setzero_pd();
1092         fiz3             = _mm256_setzero_pd();
1093
1094         /* Start inner kernel loop */
1095         for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
1096         {
1097
1098             /* Get j neighbor index, and coordinate index */
1099             jnrA             = jjnr[jidx];
1100             jnrB             = jjnr[jidx+1];
1101             jnrC             = jjnr[jidx+2];
1102             jnrD             = jjnr[jidx+3];
1103             j_coord_offsetA  = DIM*jnrA;
1104             j_coord_offsetB  = DIM*jnrB;
1105             j_coord_offsetC  = DIM*jnrC;
1106             j_coord_offsetD  = DIM*jnrD;
1107
1108             /* load j atom coordinates */
1109             gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
1110                                                  x+j_coord_offsetC,x+j_coord_offsetD,
1111                                                  &jx0,&jy0,&jz0);
1112
1113             /* Calculate displacement vector */
1114             dx00             = _mm256_sub_pd(ix0,jx0);
1115             dy00             = _mm256_sub_pd(iy0,jy0);
1116             dz00             = _mm256_sub_pd(iz0,jz0);
1117             dx10             = _mm256_sub_pd(ix1,jx0);
1118             dy10             = _mm256_sub_pd(iy1,jy0);
1119             dz10             = _mm256_sub_pd(iz1,jz0);
1120             dx20             = _mm256_sub_pd(ix2,jx0);
1121             dy20             = _mm256_sub_pd(iy2,jy0);
1122             dz20             = _mm256_sub_pd(iz2,jz0);
1123             dx30             = _mm256_sub_pd(ix3,jx0);
1124             dy30             = _mm256_sub_pd(iy3,jy0);
1125             dz30             = _mm256_sub_pd(iz3,jz0);
1126
1127             /* Calculate squared distance and things based on it */
1128             rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
1129             rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
1130             rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
1131             rsq30            = gmx_mm256_calc_rsq_pd(dx30,dy30,dz30);
1132
1133             rinv00           = gmx_mm256_invsqrt_pd(rsq00);
1134             rinv10           = gmx_mm256_invsqrt_pd(rsq10);
1135             rinv20           = gmx_mm256_invsqrt_pd(rsq20);
1136             rinv30           = gmx_mm256_invsqrt_pd(rsq30);
1137
1138             rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
1139             rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
1140             rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
1141             rinvsq30         = _mm256_mul_pd(rinv30,rinv30);
1142
1143             /* Load parameters for j particles */
1144             jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
1145                                                                  charge+jnrC+0,charge+jnrD+0);
1146             vdwjidx0A        = 2*vdwtype[jnrA+0];
1147             vdwjidx0B        = 2*vdwtype[jnrB+0];
1148             vdwjidx0C        = 2*vdwtype[jnrC+0];
1149             vdwjidx0D        = 2*vdwtype[jnrD+0];
1150
1151             fjx0             = _mm256_setzero_pd();
1152             fjy0             = _mm256_setzero_pd();
1153             fjz0             = _mm256_setzero_pd();
1154
1155             /**************************
1156              * CALCULATE INTERACTIONS *
1157              **************************/
1158
1159             if (gmx_mm256_any_lt(rsq00,rcutoff2))
1160             {
1161
1162             r00              = _mm256_mul_pd(rsq00,rinv00);
1163
1164             /* Compute parameters for interactions between i and j atoms */
1165             gmx_mm256_load_4pair_swizzle_pd(vdwioffsetptr0+vdwjidx0A,
1166                                             vdwioffsetptr0+vdwjidx0B,
1167                                             vdwioffsetptr0+vdwjidx0C,
1168                                             vdwioffsetptr0+vdwjidx0D,
1169                                             &c6_00,&c12_00);
1170
1171             /* LENNARD-JONES DISPERSION/REPULSION */
1172
1173             rinvsix          = _mm256_mul_pd(_mm256_mul_pd(rinvsq00,rinvsq00),rinvsq00);
1174             vvdw6            = _mm256_mul_pd(c6_00,rinvsix);
1175             vvdw12           = _mm256_mul_pd(c12_00,_mm256_mul_pd(rinvsix,rinvsix));
1176             vvdw             = _mm256_sub_pd( _mm256_mul_pd(vvdw12,one_twelfth) , _mm256_mul_pd(vvdw6,one_sixth) );
1177             fvdw             = _mm256_mul_pd(_mm256_sub_pd(vvdw12,vvdw6),rinvsq00);
1178
1179             d                = _mm256_sub_pd(r00,rswitch);
1180             d                = _mm256_max_pd(d,_mm256_setzero_pd());
1181             d2               = _mm256_mul_pd(d,d);
1182             sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
1183
1184             dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
1185
1186             /* Evaluate switch function */
1187             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
1188             fvdw             = _mm256_sub_pd( _mm256_mul_pd(fvdw,sw) , _mm256_mul_pd(rinv00,_mm256_mul_pd(vvdw,dsw)) );
1189             cutoff_mask      = _mm256_cmp_pd(rsq00,rcutoff2,_CMP_LT_OQ);
1190
1191             fscal            = fvdw;
1192
1193             fscal            = _mm256_and_pd(fscal,cutoff_mask);
1194
1195             /* Calculate temporary vectorial force */
1196             tx               = _mm256_mul_pd(fscal,dx00);
1197             ty               = _mm256_mul_pd(fscal,dy00);
1198             tz               = _mm256_mul_pd(fscal,dz00);
1199
1200             /* Update vectorial force */
1201             fix0             = _mm256_add_pd(fix0,tx);
1202             fiy0             = _mm256_add_pd(fiy0,ty);
1203             fiz0             = _mm256_add_pd(fiz0,tz);
1204
1205             fjx0             = _mm256_add_pd(fjx0,tx);
1206             fjy0             = _mm256_add_pd(fjy0,ty);
1207             fjz0             = _mm256_add_pd(fjz0,tz);
1208
1209             }
1210
1211             /**************************
1212              * CALCULATE INTERACTIONS *
1213              **************************/
1214
1215             if (gmx_mm256_any_lt(rsq10,rcutoff2))
1216             {
1217
1218             r10              = _mm256_mul_pd(rsq10,rinv10);
1219
1220             /* Compute parameters for interactions between i and j atoms */
1221             qq10             = _mm256_mul_pd(iq1,jq0);
1222
1223             /* EWALD ELECTROSTATICS */
1224
1225             /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
1226             ewrt             = _mm256_mul_pd(r10,ewtabscale);
1227             ewitab           = _mm256_cvttpd_epi32(ewrt);
1228             eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
1229             ewitab           = _mm_slli_epi32(ewitab,2);
1230             ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
1231             ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
1232             ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
1233             ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
1234             GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
1235             felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
1236             velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
1237             velec            = _mm256_mul_pd(qq10,_mm256_sub_pd(rinv10,velec));
1238             felec            = _mm256_mul_pd(_mm256_mul_pd(qq10,rinv10),_mm256_sub_pd(rinvsq10,felec));
1239
1240             d                = _mm256_sub_pd(r10,rswitch);
1241             d                = _mm256_max_pd(d,_mm256_setzero_pd());
1242             d2               = _mm256_mul_pd(d,d);
1243             sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
1244
1245             dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
1246
1247             /* Evaluate switch function */
1248             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
1249             felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv10,_mm256_mul_pd(velec,dsw)) );
1250             cutoff_mask      = _mm256_cmp_pd(rsq10,rcutoff2,_CMP_LT_OQ);
1251
1252             fscal            = felec;
1253
1254             fscal            = _mm256_and_pd(fscal,cutoff_mask);
1255
1256             /* Calculate temporary vectorial force */
1257             tx               = _mm256_mul_pd(fscal,dx10);
1258             ty               = _mm256_mul_pd(fscal,dy10);
1259             tz               = _mm256_mul_pd(fscal,dz10);
1260
1261             /* Update vectorial force */
1262             fix1             = _mm256_add_pd(fix1,tx);
1263             fiy1             = _mm256_add_pd(fiy1,ty);
1264             fiz1             = _mm256_add_pd(fiz1,tz);
1265
1266             fjx0             = _mm256_add_pd(fjx0,tx);
1267             fjy0             = _mm256_add_pd(fjy0,ty);
1268             fjz0             = _mm256_add_pd(fjz0,tz);
1269
1270             }
1271
1272             /**************************
1273              * CALCULATE INTERACTIONS *
1274              **************************/
1275
1276             if (gmx_mm256_any_lt(rsq20,rcutoff2))
1277             {
1278
1279             r20              = _mm256_mul_pd(rsq20,rinv20);
1280
1281             /* Compute parameters for interactions between i and j atoms */
1282             qq20             = _mm256_mul_pd(iq2,jq0);
1283
1284             /* EWALD ELECTROSTATICS */
1285
1286             /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
1287             ewrt             = _mm256_mul_pd(r20,ewtabscale);
1288             ewitab           = _mm256_cvttpd_epi32(ewrt);
1289             eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
1290             ewitab           = _mm_slli_epi32(ewitab,2);
1291             ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
1292             ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
1293             ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
1294             ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
1295             GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
1296             felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
1297             velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
1298             velec            = _mm256_mul_pd(qq20,_mm256_sub_pd(rinv20,velec));
1299             felec            = _mm256_mul_pd(_mm256_mul_pd(qq20,rinv20),_mm256_sub_pd(rinvsq20,felec));
1300
1301             d                = _mm256_sub_pd(r20,rswitch);
1302             d                = _mm256_max_pd(d,_mm256_setzero_pd());
1303             d2               = _mm256_mul_pd(d,d);
1304             sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
1305
1306             dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
1307
1308             /* Evaluate switch function */
1309             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
1310             felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv20,_mm256_mul_pd(velec,dsw)) );
1311             cutoff_mask      = _mm256_cmp_pd(rsq20,rcutoff2,_CMP_LT_OQ);
1312
1313             fscal            = felec;
1314
1315             fscal            = _mm256_and_pd(fscal,cutoff_mask);
1316
1317             /* Calculate temporary vectorial force */
1318             tx               = _mm256_mul_pd(fscal,dx20);
1319             ty               = _mm256_mul_pd(fscal,dy20);
1320             tz               = _mm256_mul_pd(fscal,dz20);
1321
1322             /* Update vectorial force */
1323             fix2             = _mm256_add_pd(fix2,tx);
1324             fiy2             = _mm256_add_pd(fiy2,ty);
1325             fiz2             = _mm256_add_pd(fiz2,tz);
1326
1327             fjx0             = _mm256_add_pd(fjx0,tx);
1328             fjy0             = _mm256_add_pd(fjy0,ty);
1329             fjz0             = _mm256_add_pd(fjz0,tz);
1330
1331             }
1332
1333             /**************************
1334              * CALCULATE INTERACTIONS *
1335              **************************/
1336
1337             if (gmx_mm256_any_lt(rsq30,rcutoff2))
1338             {
1339
1340             r30              = _mm256_mul_pd(rsq30,rinv30);
1341
1342             /* Compute parameters for interactions between i and j atoms */
1343             qq30             = _mm256_mul_pd(iq3,jq0);
1344
1345             /* EWALD ELECTROSTATICS */
1346
1347             /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
1348             ewrt             = _mm256_mul_pd(r30,ewtabscale);
1349             ewitab           = _mm256_cvttpd_epi32(ewrt);
1350             eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
1351             ewitab           = _mm_slli_epi32(ewitab,2);
1352             ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
1353             ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
1354             ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
1355             ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
1356             GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
1357             felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
1358             velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
1359             velec            = _mm256_mul_pd(qq30,_mm256_sub_pd(rinv30,velec));
1360             felec            = _mm256_mul_pd(_mm256_mul_pd(qq30,rinv30),_mm256_sub_pd(rinvsq30,felec));
1361
1362             d                = _mm256_sub_pd(r30,rswitch);
1363             d                = _mm256_max_pd(d,_mm256_setzero_pd());
1364             d2               = _mm256_mul_pd(d,d);
1365             sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
1366
1367             dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
1368
1369             /* Evaluate switch function */
1370             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
1371             felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv30,_mm256_mul_pd(velec,dsw)) );
1372             cutoff_mask      = _mm256_cmp_pd(rsq30,rcutoff2,_CMP_LT_OQ);
1373
1374             fscal            = felec;
1375
1376             fscal            = _mm256_and_pd(fscal,cutoff_mask);
1377
1378             /* Calculate temporary vectorial force */
1379             tx               = _mm256_mul_pd(fscal,dx30);
1380             ty               = _mm256_mul_pd(fscal,dy30);
1381             tz               = _mm256_mul_pd(fscal,dz30);
1382
1383             /* Update vectorial force */
1384             fix3             = _mm256_add_pd(fix3,tx);
1385             fiy3             = _mm256_add_pd(fiy3,ty);
1386             fiz3             = _mm256_add_pd(fiz3,tz);
1387
1388             fjx0             = _mm256_add_pd(fjx0,tx);
1389             fjy0             = _mm256_add_pd(fjy0,ty);
1390             fjz0             = _mm256_add_pd(fjz0,tz);
1391
1392             }
1393
1394             fjptrA             = f+j_coord_offsetA;
1395             fjptrB             = f+j_coord_offsetB;
1396             fjptrC             = f+j_coord_offsetC;
1397             fjptrD             = f+j_coord_offsetD;
1398
1399             gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,fjx0,fjy0,fjz0);
1400
1401             /* Inner loop uses 245 flops */
1402         }
1403
1404         if(jidx<j_index_end)
1405         {
1406
1407             /* Get j neighbor index, and coordinate index */
1408             jnrlistA         = jjnr[jidx];
1409             jnrlistB         = jjnr[jidx+1];
1410             jnrlistC         = jjnr[jidx+2];
1411             jnrlistD         = jjnr[jidx+3];
1412             /* Sign of each element will be negative for non-real atoms.
1413              * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
1414              * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
1415              */
1416             tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
1417
1418             tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
1419             tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
1420             dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
1421
1422             jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
1423             jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
1424             jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
1425             jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
1426             j_coord_offsetA  = DIM*jnrA;
1427             j_coord_offsetB  = DIM*jnrB;
1428             j_coord_offsetC  = DIM*jnrC;
1429             j_coord_offsetD  = DIM*jnrD;
1430
1431             /* load j atom coordinates */
1432             gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
1433                                                  x+j_coord_offsetC,x+j_coord_offsetD,
1434                                                  &jx0,&jy0,&jz0);
1435
1436             /* Calculate displacement vector */
1437             dx00             = _mm256_sub_pd(ix0,jx0);
1438             dy00             = _mm256_sub_pd(iy0,jy0);
1439             dz00             = _mm256_sub_pd(iz0,jz0);
1440             dx10             = _mm256_sub_pd(ix1,jx0);
1441             dy10             = _mm256_sub_pd(iy1,jy0);
1442             dz10             = _mm256_sub_pd(iz1,jz0);
1443             dx20             = _mm256_sub_pd(ix2,jx0);
1444             dy20             = _mm256_sub_pd(iy2,jy0);
1445             dz20             = _mm256_sub_pd(iz2,jz0);
1446             dx30             = _mm256_sub_pd(ix3,jx0);
1447             dy30             = _mm256_sub_pd(iy3,jy0);
1448             dz30             = _mm256_sub_pd(iz3,jz0);
1449
1450             /* Calculate squared distance and things based on it */
1451             rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
1452             rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
1453             rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
1454             rsq30            = gmx_mm256_calc_rsq_pd(dx30,dy30,dz30);
1455
1456             rinv00           = gmx_mm256_invsqrt_pd(rsq00);
1457             rinv10           = gmx_mm256_invsqrt_pd(rsq10);
1458             rinv20           = gmx_mm256_invsqrt_pd(rsq20);
1459             rinv30           = gmx_mm256_invsqrt_pd(rsq30);
1460
1461             rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
1462             rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
1463             rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
1464             rinvsq30         = _mm256_mul_pd(rinv30,rinv30);
1465
1466             /* Load parameters for j particles */
1467             jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
1468                                                                  charge+jnrC+0,charge+jnrD+0);
1469             vdwjidx0A        = 2*vdwtype[jnrA+0];
1470             vdwjidx0B        = 2*vdwtype[jnrB+0];
1471             vdwjidx0C        = 2*vdwtype[jnrC+0];
1472             vdwjidx0D        = 2*vdwtype[jnrD+0];
1473
1474             fjx0             = _mm256_setzero_pd();
1475             fjy0             = _mm256_setzero_pd();
1476             fjz0             = _mm256_setzero_pd();
1477
1478             /**************************
1479              * CALCULATE INTERACTIONS *
1480              **************************/
1481
1482             if (gmx_mm256_any_lt(rsq00,rcutoff2))
1483             {
1484
1485             r00              = _mm256_mul_pd(rsq00,rinv00);
1486             r00              = _mm256_andnot_pd(dummy_mask,r00);
1487
1488             /* Compute parameters for interactions between i and j atoms */
1489             gmx_mm256_load_4pair_swizzle_pd(vdwioffsetptr0+vdwjidx0A,
1490                                             vdwioffsetptr0+vdwjidx0B,
1491                                             vdwioffsetptr0+vdwjidx0C,
1492                                             vdwioffsetptr0+vdwjidx0D,
1493                                             &c6_00,&c12_00);
1494
1495             /* LENNARD-JONES DISPERSION/REPULSION */
1496
1497             rinvsix          = _mm256_mul_pd(_mm256_mul_pd(rinvsq00,rinvsq00),rinvsq00);
1498             vvdw6            = _mm256_mul_pd(c6_00,rinvsix);
1499             vvdw12           = _mm256_mul_pd(c12_00,_mm256_mul_pd(rinvsix,rinvsix));
1500             vvdw             = _mm256_sub_pd( _mm256_mul_pd(vvdw12,one_twelfth) , _mm256_mul_pd(vvdw6,one_sixth) );
1501             fvdw             = _mm256_mul_pd(_mm256_sub_pd(vvdw12,vvdw6),rinvsq00);
1502
1503             d                = _mm256_sub_pd(r00,rswitch);
1504             d                = _mm256_max_pd(d,_mm256_setzero_pd());
1505             d2               = _mm256_mul_pd(d,d);
1506             sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
1507
1508             dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
1509
1510             /* Evaluate switch function */
1511             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
1512             fvdw             = _mm256_sub_pd( _mm256_mul_pd(fvdw,sw) , _mm256_mul_pd(rinv00,_mm256_mul_pd(vvdw,dsw)) );
1513             cutoff_mask      = _mm256_cmp_pd(rsq00,rcutoff2,_CMP_LT_OQ);
1514
1515             fscal            = fvdw;
1516
1517             fscal            = _mm256_and_pd(fscal,cutoff_mask);
1518
1519             fscal            = _mm256_andnot_pd(dummy_mask,fscal);
1520
1521             /* Calculate temporary vectorial force */
1522             tx               = _mm256_mul_pd(fscal,dx00);
1523             ty               = _mm256_mul_pd(fscal,dy00);
1524             tz               = _mm256_mul_pd(fscal,dz00);
1525
1526             /* Update vectorial force */
1527             fix0             = _mm256_add_pd(fix0,tx);
1528             fiy0             = _mm256_add_pd(fiy0,ty);
1529             fiz0             = _mm256_add_pd(fiz0,tz);
1530
1531             fjx0             = _mm256_add_pd(fjx0,tx);
1532             fjy0             = _mm256_add_pd(fjy0,ty);
1533             fjz0             = _mm256_add_pd(fjz0,tz);
1534
1535             }
1536
1537             /**************************
1538              * CALCULATE INTERACTIONS *
1539              **************************/
1540
1541             if (gmx_mm256_any_lt(rsq10,rcutoff2))
1542             {
1543
1544             r10              = _mm256_mul_pd(rsq10,rinv10);
1545             r10              = _mm256_andnot_pd(dummy_mask,r10);
1546
1547             /* Compute parameters for interactions between i and j atoms */
1548             qq10             = _mm256_mul_pd(iq1,jq0);
1549
1550             /* EWALD ELECTROSTATICS */
1551
1552             /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
1553             ewrt             = _mm256_mul_pd(r10,ewtabscale);
1554             ewitab           = _mm256_cvttpd_epi32(ewrt);
1555             eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
1556             ewitab           = _mm_slli_epi32(ewitab,2);
1557             ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
1558             ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
1559             ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
1560             ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
1561             GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
1562             felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
1563             velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
1564             velec            = _mm256_mul_pd(qq10,_mm256_sub_pd(rinv10,velec));
1565             felec            = _mm256_mul_pd(_mm256_mul_pd(qq10,rinv10),_mm256_sub_pd(rinvsq10,felec));
1566
1567             d                = _mm256_sub_pd(r10,rswitch);
1568             d                = _mm256_max_pd(d,_mm256_setzero_pd());
1569             d2               = _mm256_mul_pd(d,d);
1570             sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
1571
1572             dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
1573
1574             /* Evaluate switch function */
1575             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
1576             felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv10,_mm256_mul_pd(velec,dsw)) );
1577             cutoff_mask      = _mm256_cmp_pd(rsq10,rcutoff2,_CMP_LT_OQ);
1578
1579             fscal            = felec;
1580
1581             fscal            = _mm256_and_pd(fscal,cutoff_mask);
1582
1583             fscal            = _mm256_andnot_pd(dummy_mask,fscal);
1584
1585             /* Calculate temporary vectorial force */
1586             tx               = _mm256_mul_pd(fscal,dx10);
1587             ty               = _mm256_mul_pd(fscal,dy10);
1588             tz               = _mm256_mul_pd(fscal,dz10);
1589
1590             /* Update vectorial force */
1591             fix1             = _mm256_add_pd(fix1,tx);
1592             fiy1             = _mm256_add_pd(fiy1,ty);
1593             fiz1             = _mm256_add_pd(fiz1,tz);
1594
1595             fjx0             = _mm256_add_pd(fjx0,tx);
1596             fjy0             = _mm256_add_pd(fjy0,ty);
1597             fjz0             = _mm256_add_pd(fjz0,tz);
1598
1599             }
1600
1601             /**************************
1602              * CALCULATE INTERACTIONS *
1603              **************************/
1604
1605             if (gmx_mm256_any_lt(rsq20,rcutoff2))
1606             {
1607
1608             r20              = _mm256_mul_pd(rsq20,rinv20);
1609             r20              = _mm256_andnot_pd(dummy_mask,r20);
1610
1611             /* Compute parameters for interactions between i and j atoms */
1612             qq20             = _mm256_mul_pd(iq2,jq0);
1613
1614             /* EWALD ELECTROSTATICS */
1615
1616             /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
1617             ewrt             = _mm256_mul_pd(r20,ewtabscale);
1618             ewitab           = _mm256_cvttpd_epi32(ewrt);
1619             eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
1620             ewitab           = _mm_slli_epi32(ewitab,2);
1621             ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
1622             ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
1623             ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
1624             ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
1625             GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
1626             felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
1627             velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
1628             velec            = _mm256_mul_pd(qq20,_mm256_sub_pd(rinv20,velec));
1629             felec            = _mm256_mul_pd(_mm256_mul_pd(qq20,rinv20),_mm256_sub_pd(rinvsq20,felec));
1630
1631             d                = _mm256_sub_pd(r20,rswitch);
1632             d                = _mm256_max_pd(d,_mm256_setzero_pd());
1633             d2               = _mm256_mul_pd(d,d);
1634             sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
1635
1636             dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
1637
1638             /* Evaluate switch function */
1639             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
1640             felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv20,_mm256_mul_pd(velec,dsw)) );
1641             cutoff_mask      = _mm256_cmp_pd(rsq20,rcutoff2,_CMP_LT_OQ);
1642
1643             fscal            = felec;
1644
1645             fscal            = _mm256_and_pd(fscal,cutoff_mask);
1646
1647             fscal            = _mm256_andnot_pd(dummy_mask,fscal);
1648
1649             /* Calculate temporary vectorial force */
1650             tx               = _mm256_mul_pd(fscal,dx20);
1651             ty               = _mm256_mul_pd(fscal,dy20);
1652             tz               = _mm256_mul_pd(fscal,dz20);
1653
1654             /* Update vectorial force */
1655             fix2             = _mm256_add_pd(fix2,tx);
1656             fiy2             = _mm256_add_pd(fiy2,ty);
1657             fiz2             = _mm256_add_pd(fiz2,tz);
1658
1659             fjx0             = _mm256_add_pd(fjx0,tx);
1660             fjy0             = _mm256_add_pd(fjy0,ty);
1661             fjz0             = _mm256_add_pd(fjz0,tz);
1662
1663             }
1664
1665             /**************************
1666              * CALCULATE INTERACTIONS *
1667              **************************/
1668
1669             if (gmx_mm256_any_lt(rsq30,rcutoff2))
1670             {
1671
1672             r30              = _mm256_mul_pd(rsq30,rinv30);
1673             r30              = _mm256_andnot_pd(dummy_mask,r30);
1674
1675             /* Compute parameters for interactions between i and j atoms */
1676             qq30             = _mm256_mul_pd(iq3,jq0);
1677
1678             /* EWALD ELECTROSTATICS */
1679
1680             /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
1681             ewrt             = _mm256_mul_pd(r30,ewtabscale);
1682             ewitab           = _mm256_cvttpd_epi32(ewrt);
1683             eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
1684             ewitab           = _mm_slli_epi32(ewitab,2);
1685             ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
1686             ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
1687             ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
1688             ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
1689             GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
1690             felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
1691             velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
1692             velec            = _mm256_mul_pd(qq30,_mm256_sub_pd(rinv30,velec));
1693             felec            = _mm256_mul_pd(_mm256_mul_pd(qq30,rinv30),_mm256_sub_pd(rinvsq30,felec));
1694
1695             d                = _mm256_sub_pd(r30,rswitch);
1696             d                = _mm256_max_pd(d,_mm256_setzero_pd());
1697             d2               = _mm256_mul_pd(d,d);
1698             sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
1699
1700             dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
1701
1702             /* Evaluate switch function */
1703             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
1704             felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv30,_mm256_mul_pd(velec,dsw)) );
1705             cutoff_mask      = _mm256_cmp_pd(rsq30,rcutoff2,_CMP_LT_OQ);
1706
1707             fscal            = felec;
1708
1709             fscal            = _mm256_and_pd(fscal,cutoff_mask);
1710
1711             fscal            = _mm256_andnot_pd(dummy_mask,fscal);
1712
1713             /* Calculate temporary vectorial force */
1714             tx               = _mm256_mul_pd(fscal,dx30);
1715             ty               = _mm256_mul_pd(fscal,dy30);
1716             tz               = _mm256_mul_pd(fscal,dz30);
1717
1718             /* Update vectorial force */
1719             fix3             = _mm256_add_pd(fix3,tx);
1720             fiy3             = _mm256_add_pd(fiy3,ty);
1721             fiz3             = _mm256_add_pd(fiz3,tz);
1722
1723             fjx0             = _mm256_add_pd(fjx0,tx);
1724             fjy0             = _mm256_add_pd(fjy0,ty);
1725             fjz0             = _mm256_add_pd(fjz0,tz);
1726
1727             }
1728
1729             fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
1730             fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
1731             fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
1732             fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
1733
1734             gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,fjx0,fjy0,fjz0);
1735
1736             /* Inner loop uses 249 flops */
1737         }
1738
1739         /* End of innermost loop */
1740
1741         gmx_mm256_update_iforce_4atom_swizzle_pd(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,fix3,fiy3,fiz3,
1742                                                  f+i_coord_offset,fshift+i_shift_offset);
1743
1744         /* Increment number of inner iterations */
1745         inneriter                  += j_index_end - j_index_start;
1746
1747         /* Outer loop uses 24 flops */
1748     }
1749
1750     /* Increment number of outer iterations */
1751     outeriter        += nri;
1752
1753     /* Update outer/inner flops */
1754
1755     inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W4_F,outeriter*24 + inneriter*249);
1756 }