0d4b027b8a6e25a548aae1791860ea4947de2153
[alexxy/gromacs.git] / src / gmxlib / nonbonded / nb_kernel_avx_256_double / nb_kernel_ElecEwSw_VdwLJSw_GeomW4P1_avx_256_double.c
1 /*
2  * Note: this file was generated by the Gromacs avx_256_double kernel generator.
3  *
4  *                This source code is part of
5  *
6  *                 G   R   O   M   A   C   S
7  *
8  * Copyright (c) 2001-2012, The GROMACS Development Team
9  *
10  * Gromacs is a library for molecular simulation and trajectory analysis,
11  * written by Erik Lindahl, David van der Spoel, Berk Hess, and others - for
12  * a full list of developers and information, check out http://www.gromacs.org
13  *
14  * This program is free software; you can redistribute it and/or modify it under
15  * the terms of the GNU Lesser General Public License as published by the Free
16  * Software Foundation; either version 2 of the License, or (at your option) any
17  * later version.
18  *
19  * To help fund GROMACS development, we humbly ask that you cite
20  * the papers people have written on it - you can find them on the website.
21  */
22 #ifdef HAVE_CONFIG_H
23 #include <config.h>
24 #endif
25
26 #include <math.h>
27
28 #include "../nb_kernel.h"
29 #include "types/simple.h"
30 #include "vec.h"
31 #include "nrnb.h"
32
33 #include "gmx_math_x86_avx_256_double.h"
34 #include "kernelutil_x86_avx_256_double.h"
35
36 /*
37  * Gromacs nonbonded kernel:   nb_kernel_ElecEwSw_VdwLJSw_GeomW4P1_VF_avx_256_double
38  * Electrostatics interaction: Ewald
39  * VdW interaction:            LennardJones
40  * Geometry:                   Water4-Particle
41  * Calculate force/pot:        PotentialAndForce
42  */
43 void
44 nb_kernel_ElecEwSw_VdwLJSw_GeomW4P1_VF_avx_256_double
45                     (t_nblist * gmx_restrict                nlist,
46                      rvec * gmx_restrict                    xx,
47                      rvec * gmx_restrict                    ff,
48                      t_forcerec * gmx_restrict              fr,
49                      t_mdatoms * gmx_restrict               mdatoms,
50                      nb_kernel_data_t * gmx_restrict        kernel_data,
51                      t_nrnb * gmx_restrict                  nrnb)
52 {
53     /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
54      * just 0 for non-waters.
55      * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
56      * jnr indices corresponding to data put in the four positions in the SIMD register.
57      */
58     int              i_shift_offset,i_coord_offset,outeriter,inneriter;
59     int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
60     int              jnrA,jnrB,jnrC,jnrD;
61     int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
62     int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
63     int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
64     int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
65     real             rcutoff_scalar;
66     real             *shiftvec,*fshift,*x,*f;
67     real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
68     real             scratch[4*DIM];
69     __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
70     real *           vdwioffsetptr0;
71     __m256d          ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
72     real *           vdwioffsetptr1;
73     __m256d          ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
74     real *           vdwioffsetptr2;
75     __m256d          ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
76     real *           vdwioffsetptr3;
77     __m256d          ix3,iy3,iz3,fix3,fiy3,fiz3,iq3,isai3;
78     int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
79     __m256d          jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
80     __m256d          dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
81     __m256d          dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
82     __m256d          dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
83     __m256d          dx30,dy30,dz30,rsq30,rinv30,rinvsq30,r30,qq30,c6_30,c12_30;
84     __m256d          velec,felec,velecsum,facel,crf,krf,krf2;
85     real             *charge;
86     int              nvdwtype;
87     __m256d          rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
88     int              *vdwtype;
89     real             *vdwparam;
90     __m256d          one_sixth   = _mm256_set1_pd(1.0/6.0);
91     __m256d          one_twelfth = _mm256_set1_pd(1.0/12.0);
92     __m128i          ewitab;
93     __m256d          ewtabscale,eweps,sh_ewald,ewrt,ewtabhalfspace,ewtabF,ewtabFn,ewtabD,ewtabV;
94     __m256d          beta,beta2,beta3,zeta2,pmecorrF,pmecorrV,rinv3;
95     real             *ewtab;
96     __m256d          rswitch,swV3,swV4,swV5,swF2,swF3,swF4,d,d2,sw,dsw;
97     real             rswitch_scalar,d_scalar;
98     __m256d          dummy_mask,cutoff_mask;
99     __m128           tmpmask0,tmpmask1;
100     __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
101     __m256d          one     = _mm256_set1_pd(1.0);
102     __m256d          two     = _mm256_set1_pd(2.0);
103     x                = xx[0];
104     f                = ff[0];
105
106     nri              = nlist->nri;
107     iinr             = nlist->iinr;
108     jindex           = nlist->jindex;
109     jjnr             = nlist->jjnr;
110     shiftidx         = nlist->shift;
111     gid              = nlist->gid;
112     shiftvec         = fr->shift_vec[0];
113     fshift           = fr->fshift[0];
114     facel            = _mm256_set1_pd(fr->epsfac);
115     charge           = mdatoms->chargeA;
116     nvdwtype         = fr->ntype;
117     vdwparam         = fr->nbfp;
118     vdwtype          = mdatoms->typeA;
119
120     sh_ewald         = _mm256_set1_pd(fr->ic->sh_ewald);
121     beta             = _mm256_set1_pd(fr->ic->ewaldcoeff);
122     beta2            = _mm256_mul_pd(beta,beta);
123     beta3            = _mm256_mul_pd(beta,beta2);
124
125     ewtab            = fr->ic->tabq_coul_FDV0;
126     ewtabscale       = _mm256_set1_pd(fr->ic->tabq_scale);
127     ewtabhalfspace   = _mm256_set1_pd(0.5/fr->ic->tabq_scale);
128
129     /* Setup water-specific parameters */
130     inr              = nlist->iinr[0];
131     iq1              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+1]));
132     iq2              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+2]));
133     iq3              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+3]));
134     vdwioffsetptr0   = vdwparam+2*nvdwtype*vdwtype[inr+0];
135
136     /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
137     rcutoff_scalar   = fr->rcoulomb;
138     rcutoff          = _mm256_set1_pd(rcutoff_scalar);
139     rcutoff2         = _mm256_mul_pd(rcutoff,rcutoff);
140
141     rswitch_scalar   = fr->rcoulomb_switch;
142     rswitch          = _mm256_set1_pd(rswitch_scalar);
143     /* Setup switch parameters */
144     d_scalar         = rcutoff_scalar-rswitch_scalar;
145     d                = _mm256_set1_pd(d_scalar);
146     swV3             = _mm256_set1_pd(-10.0/(d_scalar*d_scalar*d_scalar));
147     swV4             = _mm256_set1_pd( 15.0/(d_scalar*d_scalar*d_scalar*d_scalar));
148     swV5             = _mm256_set1_pd( -6.0/(d_scalar*d_scalar*d_scalar*d_scalar*d_scalar));
149     swF2             = _mm256_set1_pd(-30.0/(d_scalar*d_scalar*d_scalar));
150     swF3             = _mm256_set1_pd( 60.0/(d_scalar*d_scalar*d_scalar*d_scalar));
151     swF4             = _mm256_set1_pd(-30.0/(d_scalar*d_scalar*d_scalar*d_scalar*d_scalar));
152
153     /* Avoid stupid compiler warnings */
154     jnrA = jnrB = jnrC = jnrD = 0;
155     j_coord_offsetA = 0;
156     j_coord_offsetB = 0;
157     j_coord_offsetC = 0;
158     j_coord_offsetD = 0;
159
160     outeriter        = 0;
161     inneriter        = 0;
162
163     for(iidx=0;iidx<4*DIM;iidx++)
164     {
165         scratch[iidx] = 0.0;
166     }
167
168     /* Start outer loop over neighborlists */
169     for(iidx=0; iidx<nri; iidx++)
170     {
171         /* Load shift vector for this list */
172         i_shift_offset   = DIM*shiftidx[iidx];
173
174         /* Load limits for loop over neighbors */
175         j_index_start    = jindex[iidx];
176         j_index_end      = jindex[iidx+1];
177
178         /* Get outer coordinate index */
179         inr              = iinr[iidx];
180         i_coord_offset   = DIM*inr;
181
182         /* Load i particle coords and add shift vector */
183         gmx_mm256_load_shift_and_4rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset,
184                                                     &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2,&ix3,&iy3,&iz3);
185
186         fix0             = _mm256_setzero_pd();
187         fiy0             = _mm256_setzero_pd();
188         fiz0             = _mm256_setzero_pd();
189         fix1             = _mm256_setzero_pd();
190         fiy1             = _mm256_setzero_pd();
191         fiz1             = _mm256_setzero_pd();
192         fix2             = _mm256_setzero_pd();
193         fiy2             = _mm256_setzero_pd();
194         fiz2             = _mm256_setzero_pd();
195         fix3             = _mm256_setzero_pd();
196         fiy3             = _mm256_setzero_pd();
197         fiz3             = _mm256_setzero_pd();
198
199         /* Reset potential sums */
200         velecsum         = _mm256_setzero_pd();
201         vvdwsum          = _mm256_setzero_pd();
202
203         /* Start inner kernel loop */
204         for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
205         {
206
207             /* Get j neighbor index, and coordinate index */
208             jnrA             = jjnr[jidx];
209             jnrB             = jjnr[jidx+1];
210             jnrC             = jjnr[jidx+2];
211             jnrD             = jjnr[jidx+3];
212             j_coord_offsetA  = DIM*jnrA;
213             j_coord_offsetB  = DIM*jnrB;
214             j_coord_offsetC  = DIM*jnrC;
215             j_coord_offsetD  = DIM*jnrD;
216
217             /* load j atom coordinates */
218             gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
219                                                  x+j_coord_offsetC,x+j_coord_offsetD,
220                                                  &jx0,&jy0,&jz0);
221
222             /* Calculate displacement vector */
223             dx00             = _mm256_sub_pd(ix0,jx0);
224             dy00             = _mm256_sub_pd(iy0,jy0);
225             dz00             = _mm256_sub_pd(iz0,jz0);
226             dx10             = _mm256_sub_pd(ix1,jx0);
227             dy10             = _mm256_sub_pd(iy1,jy0);
228             dz10             = _mm256_sub_pd(iz1,jz0);
229             dx20             = _mm256_sub_pd(ix2,jx0);
230             dy20             = _mm256_sub_pd(iy2,jy0);
231             dz20             = _mm256_sub_pd(iz2,jz0);
232             dx30             = _mm256_sub_pd(ix3,jx0);
233             dy30             = _mm256_sub_pd(iy3,jy0);
234             dz30             = _mm256_sub_pd(iz3,jz0);
235
236             /* Calculate squared distance and things based on it */
237             rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
238             rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
239             rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
240             rsq30            = gmx_mm256_calc_rsq_pd(dx30,dy30,dz30);
241
242             rinv00           = gmx_mm256_invsqrt_pd(rsq00);
243             rinv10           = gmx_mm256_invsqrt_pd(rsq10);
244             rinv20           = gmx_mm256_invsqrt_pd(rsq20);
245             rinv30           = gmx_mm256_invsqrt_pd(rsq30);
246
247             rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
248             rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
249             rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
250             rinvsq30         = _mm256_mul_pd(rinv30,rinv30);
251
252             /* Load parameters for j particles */
253             jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
254                                                                  charge+jnrC+0,charge+jnrD+0);
255             vdwjidx0A        = 2*vdwtype[jnrA+0];
256             vdwjidx0B        = 2*vdwtype[jnrB+0];
257             vdwjidx0C        = 2*vdwtype[jnrC+0];
258             vdwjidx0D        = 2*vdwtype[jnrD+0];
259
260             fjx0             = _mm256_setzero_pd();
261             fjy0             = _mm256_setzero_pd();
262             fjz0             = _mm256_setzero_pd();
263
264             /**************************
265              * CALCULATE INTERACTIONS *
266              **************************/
267
268             if (gmx_mm256_any_lt(rsq00,rcutoff2))
269             {
270
271             r00              = _mm256_mul_pd(rsq00,rinv00);
272
273             /* Compute parameters for interactions between i and j atoms */
274             gmx_mm256_load_4pair_swizzle_pd(vdwioffsetptr0+vdwjidx0A,
275                                             vdwioffsetptr0+vdwjidx0B,
276                                             vdwioffsetptr0+vdwjidx0C,
277                                             vdwioffsetptr0+vdwjidx0D,
278                                             &c6_00,&c12_00);
279
280             /* LENNARD-JONES DISPERSION/REPULSION */
281
282             rinvsix          = _mm256_mul_pd(_mm256_mul_pd(rinvsq00,rinvsq00),rinvsq00);
283             vvdw6            = _mm256_mul_pd(c6_00,rinvsix);
284             vvdw12           = _mm256_mul_pd(c12_00,_mm256_mul_pd(rinvsix,rinvsix));
285             vvdw             = _mm256_sub_pd( _mm256_mul_pd(vvdw12,one_twelfth) , _mm256_mul_pd(vvdw6,one_sixth) );
286             fvdw             = _mm256_mul_pd(_mm256_sub_pd(vvdw12,vvdw6),rinvsq00);
287
288             d                = _mm256_sub_pd(r00,rswitch);
289             d                = _mm256_max_pd(d,_mm256_setzero_pd());
290             d2               = _mm256_mul_pd(d,d);
291             sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
292
293             dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
294
295             /* Evaluate switch function */
296             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
297             fvdw             = _mm256_sub_pd( _mm256_mul_pd(fvdw,sw) , _mm256_mul_pd(rinv00,_mm256_mul_pd(vvdw,dsw)) );
298             vvdw             = _mm256_mul_pd(vvdw,sw);
299             cutoff_mask      = _mm256_cmp_pd(rsq00,rcutoff2,_CMP_LT_OQ);
300
301             /* Update potential sum for this i atom from the interaction with this j atom. */
302             vvdw             = _mm256_and_pd(vvdw,cutoff_mask);
303             vvdwsum          = _mm256_add_pd(vvdwsum,vvdw);
304
305             fscal            = fvdw;
306
307             fscal            = _mm256_and_pd(fscal,cutoff_mask);
308
309             /* Calculate temporary vectorial force */
310             tx               = _mm256_mul_pd(fscal,dx00);
311             ty               = _mm256_mul_pd(fscal,dy00);
312             tz               = _mm256_mul_pd(fscal,dz00);
313
314             /* Update vectorial force */
315             fix0             = _mm256_add_pd(fix0,tx);
316             fiy0             = _mm256_add_pd(fiy0,ty);
317             fiz0             = _mm256_add_pd(fiz0,tz);
318
319             fjx0             = _mm256_add_pd(fjx0,tx);
320             fjy0             = _mm256_add_pd(fjy0,ty);
321             fjz0             = _mm256_add_pd(fjz0,tz);
322
323             }
324
325             /**************************
326              * CALCULATE INTERACTIONS *
327              **************************/
328
329             if (gmx_mm256_any_lt(rsq10,rcutoff2))
330             {
331
332             r10              = _mm256_mul_pd(rsq10,rinv10);
333
334             /* Compute parameters for interactions between i and j atoms */
335             qq10             = _mm256_mul_pd(iq1,jq0);
336
337             /* EWALD ELECTROSTATICS */
338
339             /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
340             ewrt             = _mm256_mul_pd(r10,ewtabscale);
341             ewitab           = _mm256_cvttpd_epi32(ewrt);
342             eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
343             ewitab           = _mm_slli_epi32(ewitab,2);
344             ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
345             ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
346             ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
347             ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
348             GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
349             felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
350             velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
351             velec            = _mm256_mul_pd(qq10,_mm256_sub_pd(rinv10,velec));
352             felec            = _mm256_mul_pd(_mm256_mul_pd(qq10,rinv10),_mm256_sub_pd(rinvsq10,felec));
353
354             d                = _mm256_sub_pd(r10,rswitch);
355             d                = _mm256_max_pd(d,_mm256_setzero_pd());
356             d2               = _mm256_mul_pd(d,d);
357             sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
358
359             dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
360
361             /* Evaluate switch function */
362             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
363             felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv10,_mm256_mul_pd(velec,dsw)) );
364             velec            = _mm256_mul_pd(velec,sw);
365             cutoff_mask      = _mm256_cmp_pd(rsq10,rcutoff2,_CMP_LT_OQ);
366
367             /* Update potential sum for this i atom from the interaction with this j atom. */
368             velec            = _mm256_and_pd(velec,cutoff_mask);
369             velecsum         = _mm256_add_pd(velecsum,velec);
370
371             fscal            = felec;
372
373             fscal            = _mm256_and_pd(fscal,cutoff_mask);
374
375             /* Calculate temporary vectorial force */
376             tx               = _mm256_mul_pd(fscal,dx10);
377             ty               = _mm256_mul_pd(fscal,dy10);
378             tz               = _mm256_mul_pd(fscal,dz10);
379
380             /* Update vectorial force */
381             fix1             = _mm256_add_pd(fix1,tx);
382             fiy1             = _mm256_add_pd(fiy1,ty);
383             fiz1             = _mm256_add_pd(fiz1,tz);
384
385             fjx0             = _mm256_add_pd(fjx0,tx);
386             fjy0             = _mm256_add_pd(fjy0,ty);
387             fjz0             = _mm256_add_pd(fjz0,tz);
388
389             }
390
391             /**************************
392              * CALCULATE INTERACTIONS *
393              **************************/
394
395             if (gmx_mm256_any_lt(rsq20,rcutoff2))
396             {
397
398             r20              = _mm256_mul_pd(rsq20,rinv20);
399
400             /* Compute parameters for interactions between i and j atoms */
401             qq20             = _mm256_mul_pd(iq2,jq0);
402
403             /* EWALD ELECTROSTATICS */
404
405             /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
406             ewrt             = _mm256_mul_pd(r20,ewtabscale);
407             ewitab           = _mm256_cvttpd_epi32(ewrt);
408             eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
409             ewitab           = _mm_slli_epi32(ewitab,2);
410             ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
411             ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
412             ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
413             ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
414             GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
415             felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
416             velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
417             velec            = _mm256_mul_pd(qq20,_mm256_sub_pd(rinv20,velec));
418             felec            = _mm256_mul_pd(_mm256_mul_pd(qq20,rinv20),_mm256_sub_pd(rinvsq20,felec));
419
420             d                = _mm256_sub_pd(r20,rswitch);
421             d                = _mm256_max_pd(d,_mm256_setzero_pd());
422             d2               = _mm256_mul_pd(d,d);
423             sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
424
425             dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
426
427             /* Evaluate switch function */
428             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
429             felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv20,_mm256_mul_pd(velec,dsw)) );
430             velec            = _mm256_mul_pd(velec,sw);
431             cutoff_mask      = _mm256_cmp_pd(rsq20,rcutoff2,_CMP_LT_OQ);
432
433             /* Update potential sum for this i atom from the interaction with this j atom. */
434             velec            = _mm256_and_pd(velec,cutoff_mask);
435             velecsum         = _mm256_add_pd(velecsum,velec);
436
437             fscal            = felec;
438
439             fscal            = _mm256_and_pd(fscal,cutoff_mask);
440
441             /* Calculate temporary vectorial force */
442             tx               = _mm256_mul_pd(fscal,dx20);
443             ty               = _mm256_mul_pd(fscal,dy20);
444             tz               = _mm256_mul_pd(fscal,dz20);
445
446             /* Update vectorial force */
447             fix2             = _mm256_add_pd(fix2,tx);
448             fiy2             = _mm256_add_pd(fiy2,ty);
449             fiz2             = _mm256_add_pd(fiz2,tz);
450
451             fjx0             = _mm256_add_pd(fjx0,tx);
452             fjy0             = _mm256_add_pd(fjy0,ty);
453             fjz0             = _mm256_add_pd(fjz0,tz);
454
455             }
456
457             /**************************
458              * CALCULATE INTERACTIONS *
459              **************************/
460
461             if (gmx_mm256_any_lt(rsq30,rcutoff2))
462             {
463
464             r30              = _mm256_mul_pd(rsq30,rinv30);
465
466             /* Compute parameters for interactions between i and j atoms */
467             qq30             = _mm256_mul_pd(iq3,jq0);
468
469             /* EWALD ELECTROSTATICS */
470
471             /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
472             ewrt             = _mm256_mul_pd(r30,ewtabscale);
473             ewitab           = _mm256_cvttpd_epi32(ewrt);
474             eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
475             ewitab           = _mm_slli_epi32(ewitab,2);
476             ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
477             ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
478             ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
479             ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
480             GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
481             felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
482             velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
483             velec            = _mm256_mul_pd(qq30,_mm256_sub_pd(rinv30,velec));
484             felec            = _mm256_mul_pd(_mm256_mul_pd(qq30,rinv30),_mm256_sub_pd(rinvsq30,felec));
485
486             d                = _mm256_sub_pd(r30,rswitch);
487             d                = _mm256_max_pd(d,_mm256_setzero_pd());
488             d2               = _mm256_mul_pd(d,d);
489             sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
490
491             dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
492
493             /* Evaluate switch function */
494             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
495             felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv30,_mm256_mul_pd(velec,dsw)) );
496             velec            = _mm256_mul_pd(velec,sw);
497             cutoff_mask      = _mm256_cmp_pd(rsq30,rcutoff2,_CMP_LT_OQ);
498
499             /* Update potential sum for this i atom from the interaction with this j atom. */
500             velec            = _mm256_and_pd(velec,cutoff_mask);
501             velecsum         = _mm256_add_pd(velecsum,velec);
502
503             fscal            = felec;
504
505             fscal            = _mm256_and_pd(fscal,cutoff_mask);
506
507             /* Calculate temporary vectorial force */
508             tx               = _mm256_mul_pd(fscal,dx30);
509             ty               = _mm256_mul_pd(fscal,dy30);
510             tz               = _mm256_mul_pd(fscal,dz30);
511
512             /* Update vectorial force */
513             fix3             = _mm256_add_pd(fix3,tx);
514             fiy3             = _mm256_add_pd(fiy3,ty);
515             fiz3             = _mm256_add_pd(fiz3,tz);
516
517             fjx0             = _mm256_add_pd(fjx0,tx);
518             fjy0             = _mm256_add_pd(fjy0,ty);
519             fjz0             = _mm256_add_pd(fjz0,tz);
520
521             }
522
523             fjptrA             = f+j_coord_offsetA;
524             fjptrB             = f+j_coord_offsetB;
525             fjptrC             = f+j_coord_offsetC;
526             fjptrD             = f+j_coord_offsetD;
527
528             gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,fjx0,fjy0,fjz0);
529
530             /* Inner loop uses 257 flops */
531         }
532
533         if(jidx<j_index_end)
534         {
535
536             /* Get j neighbor index, and coordinate index */
537             jnrlistA         = jjnr[jidx];
538             jnrlistB         = jjnr[jidx+1];
539             jnrlistC         = jjnr[jidx+2];
540             jnrlistD         = jjnr[jidx+3];
541             /* Sign of each element will be negative for non-real atoms.
542              * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
543              * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
544              */
545             tmpmask0 = gmx_mm_castsi128_pd(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
546
547             tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
548             tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
549             dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
550
551             jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
552             jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
553             jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
554             jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
555             j_coord_offsetA  = DIM*jnrA;
556             j_coord_offsetB  = DIM*jnrB;
557             j_coord_offsetC  = DIM*jnrC;
558             j_coord_offsetD  = DIM*jnrD;
559
560             /* load j atom coordinates */
561             gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
562                                                  x+j_coord_offsetC,x+j_coord_offsetD,
563                                                  &jx0,&jy0,&jz0);
564
565             /* Calculate displacement vector */
566             dx00             = _mm256_sub_pd(ix0,jx0);
567             dy00             = _mm256_sub_pd(iy0,jy0);
568             dz00             = _mm256_sub_pd(iz0,jz0);
569             dx10             = _mm256_sub_pd(ix1,jx0);
570             dy10             = _mm256_sub_pd(iy1,jy0);
571             dz10             = _mm256_sub_pd(iz1,jz0);
572             dx20             = _mm256_sub_pd(ix2,jx0);
573             dy20             = _mm256_sub_pd(iy2,jy0);
574             dz20             = _mm256_sub_pd(iz2,jz0);
575             dx30             = _mm256_sub_pd(ix3,jx0);
576             dy30             = _mm256_sub_pd(iy3,jy0);
577             dz30             = _mm256_sub_pd(iz3,jz0);
578
579             /* Calculate squared distance and things based on it */
580             rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
581             rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
582             rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
583             rsq30            = gmx_mm256_calc_rsq_pd(dx30,dy30,dz30);
584
585             rinv00           = gmx_mm256_invsqrt_pd(rsq00);
586             rinv10           = gmx_mm256_invsqrt_pd(rsq10);
587             rinv20           = gmx_mm256_invsqrt_pd(rsq20);
588             rinv30           = gmx_mm256_invsqrt_pd(rsq30);
589
590             rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
591             rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
592             rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
593             rinvsq30         = _mm256_mul_pd(rinv30,rinv30);
594
595             /* Load parameters for j particles */
596             jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
597                                                                  charge+jnrC+0,charge+jnrD+0);
598             vdwjidx0A        = 2*vdwtype[jnrA+0];
599             vdwjidx0B        = 2*vdwtype[jnrB+0];
600             vdwjidx0C        = 2*vdwtype[jnrC+0];
601             vdwjidx0D        = 2*vdwtype[jnrD+0];
602
603             fjx0             = _mm256_setzero_pd();
604             fjy0             = _mm256_setzero_pd();
605             fjz0             = _mm256_setzero_pd();
606
607             /**************************
608              * CALCULATE INTERACTIONS *
609              **************************/
610
611             if (gmx_mm256_any_lt(rsq00,rcutoff2))
612             {
613
614             r00              = _mm256_mul_pd(rsq00,rinv00);
615             r00              = _mm256_andnot_pd(dummy_mask,r00);
616
617             /* Compute parameters for interactions between i and j atoms */
618             gmx_mm256_load_4pair_swizzle_pd(vdwioffsetptr0+vdwjidx0A,
619                                             vdwioffsetptr0+vdwjidx0B,
620                                             vdwioffsetptr0+vdwjidx0C,
621                                             vdwioffsetptr0+vdwjidx0D,
622                                             &c6_00,&c12_00);
623
624             /* LENNARD-JONES DISPERSION/REPULSION */
625
626             rinvsix          = _mm256_mul_pd(_mm256_mul_pd(rinvsq00,rinvsq00),rinvsq00);
627             vvdw6            = _mm256_mul_pd(c6_00,rinvsix);
628             vvdw12           = _mm256_mul_pd(c12_00,_mm256_mul_pd(rinvsix,rinvsix));
629             vvdw             = _mm256_sub_pd( _mm256_mul_pd(vvdw12,one_twelfth) , _mm256_mul_pd(vvdw6,one_sixth) );
630             fvdw             = _mm256_mul_pd(_mm256_sub_pd(vvdw12,vvdw6),rinvsq00);
631
632             d                = _mm256_sub_pd(r00,rswitch);
633             d                = _mm256_max_pd(d,_mm256_setzero_pd());
634             d2               = _mm256_mul_pd(d,d);
635             sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
636
637             dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
638
639             /* Evaluate switch function */
640             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
641             fvdw             = _mm256_sub_pd( _mm256_mul_pd(fvdw,sw) , _mm256_mul_pd(rinv00,_mm256_mul_pd(vvdw,dsw)) );
642             vvdw             = _mm256_mul_pd(vvdw,sw);
643             cutoff_mask      = _mm256_cmp_pd(rsq00,rcutoff2,_CMP_LT_OQ);
644
645             /* Update potential sum for this i atom from the interaction with this j atom. */
646             vvdw             = _mm256_and_pd(vvdw,cutoff_mask);
647             vvdw             = _mm256_andnot_pd(dummy_mask,vvdw);
648             vvdwsum          = _mm256_add_pd(vvdwsum,vvdw);
649
650             fscal            = fvdw;
651
652             fscal            = _mm256_and_pd(fscal,cutoff_mask);
653
654             fscal            = _mm256_andnot_pd(dummy_mask,fscal);
655
656             /* Calculate temporary vectorial force */
657             tx               = _mm256_mul_pd(fscal,dx00);
658             ty               = _mm256_mul_pd(fscal,dy00);
659             tz               = _mm256_mul_pd(fscal,dz00);
660
661             /* Update vectorial force */
662             fix0             = _mm256_add_pd(fix0,tx);
663             fiy0             = _mm256_add_pd(fiy0,ty);
664             fiz0             = _mm256_add_pd(fiz0,tz);
665
666             fjx0             = _mm256_add_pd(fjx0,tx);
667             fjy0             = _mm256_add_pd(fjy0,ty);
668             fjz0             = _mm256_add_pd(fjz0,tz);
669
670             }
671
672             /**************************
673              * CALCULATE INTERACTIONS *
674              **************************/
675
676             if (gmx_mm256_any_lt(rsq10,rcutoff2))
677             {
678
679             r10              = _mm256_mul_pd(rsq10,rinv10);
680             r10              = _mm256_andnot_pd(dummy_mask,r10);
681
682             /* Compute parameters for interactions between i and j atoms */
683             qq10             = _mm256_mul_pd(iq1,jq0);
684
685             /* EWALD ELECTROSTATICS */
686
687             /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
688             ewrt             = _mm256_mul_pd(r10,ewtabscale);
689             ewitab           = _mm256_cvttpd_epi32(ewrt);
690             eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
691             ewitab           = _mm_slli_epi32(ewitab,2);
692             ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
693             ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
694             ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
695             ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
696             GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
697             felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
698             velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
699             velec            = _mm256_mul_pd(qq10,_mm256_sub_pd(rinv10,velec));
700             felec            = _mm256_mul_pd(_mm256_mul_pd(qq10,rinv10),_mm256_sub_pd(rinvsq10,felec));
701
702             d                = _mm256_sub_pd(r10,rswitch);
703             d                = _mm256_max_pd(d,_mm256_setzero_pd());
704             d2               = _mm256_mul_pd(d,d);
705             sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
706
707             dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
708
709             /* Evaluate switch function */
710             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
711             felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv10,_mm256_mul_pd(velec,dsw)) );
712             velec            = _mm256_mul_pd(velec,sw);
713             cutoff_mask      = _mm256_cmp_pd(rsq10,rcutoff2,_CMP_LT_OQ);
714
715             /* Update potential sum for this i atom from the interaction with this j atom. */
716             velec            = _mm256_and_pd(velec,cutoff_mask);
717             velec            = _mm256_andnot_pd(dummy_mask,velec);
718             velecsum         = _mm256_add_pd(velecsum,velec);
719
720             fscal            = felec;
721
722             fscal            = _mm256_and_pd(fscal,cutoff_mask);
723
724             fscal            = _mm256_andnot_pd(dummy_mask,fscal);
725
726             /* Calculate temporary vectorial force */
727             tx               = _mm256_mul_pd(fscal,dx10);
728             ty               = _mm256_mul_pd(fscal,dy10);
729             tz               = _mm256_mul_pd(fscal,dz10);
730
731             /* Update vectorial force */
732             fix1             = _mm256_add_pd(fix1,tx);
733             fiy1             = _mm256_add_pd(fiy1,ty);
734             fiz1             = _mm256_add_pd(fiz1,tz);
735
736             fjx0             = _mm256_add_pd(fjx0,tx);
737             fjy0             = _mm256_add_pd(fjy0,ty);
738             fjz0             = _mm256_add_pd(fjz0,tz);
739
740             }
741
742             /**************************
743              * CALCULATE INTERACTIONS *
744              **************************/
745
746             if (gmx_mm256_any_lt(rsq20,rcutoff2))
747             {
748
749             r20              = _mm256_mul_pd(rsq20,rinv20);
750             r20              = _mm256_andnot_pd(dummy_mask,r20);
751
752             /* Compute parameters for interactions between i and j atoms */
753             qq20             = _mm256_mul_pd(iq2,jq0);
754
755             /* EWALD ELECTROSTATICS */
756
757             /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
758             ewrt             = _mm256_mul_pd(r20,ewtabscale);
759             ewitab           = _mm256_cvttpd_epi32(ewrt);
760             eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
761             ewitab           = _mm_slli_epi32(ewitab,2);
762             ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
763             ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
764             ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
765             ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
766             GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
767             felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
768             velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
769             velec            = _mm256_mul_pd(qq20,_mm256_sub_pd(rinv20,velec));
770             felec            = _mm256_mul_pd(_mm256_mul_pd(qq20,rinv20),_mm256_sub_pd(rinvsq20,felec));
771
772             d                = _mm256_sub_pd(r20,rswitch);
773             d                = _mm256_max_pd(d,_mm256_setzero_pd());
774             d2               = _mm256_mul_pd(d,d);
775             sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
776
777             dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
778
779             /* Evaluate switch function */
780             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
781             felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv20,_mm256_mul_pd(velec,dsw)) );
782             velec            = _mm256_mul_pd(velec,sw);
783             cutoff_mask      = _mm256_cmp_pd(rsq20,rcutoff2,_CMP_LT_OQ);
784
785             /* Update potential sum for this i atom from the interaction with this j atom. */
786             velec            = _mm256_and_pd(velec,cutoff_mask);
787             velec            = _mm256_andnot_pd(dummy_mask,velec);
788             velecsum         = _mm256_add_pd(velecsum,velec);
789
790             fscal            = felec;
791
792             fscal            = _mm256_and_pd(fscal,cutoff_mask);
793
794             fscal            = _mm256_andnot_pd(dummy_mask,fscal);
795
796             /* Calculate temporary vectorial force */
797             tx               = _mm256_mul_pd(fscal,dx20);
798             ty               = _mm256_mul_pd(fscal,dy20);
799             tz               = _mm256_mul_pd(fscal,dz20);
800
801             /* Update vectorial force */
802             fix2             = _mm256_add_pd(fix2,tx);
803             fiy2             = _mm256_add_pd(fiy2,ty);
804             fiz2             = _mm256_add_pd(fiz2,tz);
805
806             fjx0             = _mm256_add_pd(fjx0,tx);
807             fjy0             = _mm256_add_pd(fjy0,ty);
808             fjz0             = _mm256_add_pd(fjz0,tz);
809
810             }
811
812             /**************************
813              * CALCULATE INTERACTIONS *
814              **************************/
815
816             if (gmx_mm256_any_lt(rsq30,rcutoff2))
817             {
818
819             r30              = _mm256_mul_pd(rsq30,rinv30);
820             r30              = _mm256_andnot_pd(dummy_mask,r30);
821
822             /* Compute parameters for interactions between i and j atoms */
823             qq30             = _mm256_mul_pd(iq3,jq0);
824
825             /* EWALD ELECTROSTATICS */
826
827             /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
828             ewrt             = _mm256_mul_pd(r30,ewtabscale);
829             ewitab           = _mm256_cvttpd_epi32(ewrt);
830             eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
831             ewitab           = _mm_slli_epi32(ewitab,2);
832             ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
833             ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
834             ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
835             ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
836             GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
837             felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
838             velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
839             velec            = _mm256_mul_pd(qq30,_mm256_sub_pd(rinv30,velec));
840             felec            = _mm256_mul_pd(_mm256_mul_pd(qq30,rinv30),_mm256_sub_pd(rinvsq30,felec));
841
842             d                = _mm256_sub_pd(r30,rswitch);
843             d                = _mm256_max_pd(d,_mm256_setzero_pd());
844             d2               = _mm256_mul_pd(d,d);
845             sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
846
847             dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
848
849             /* Evaluate switch function */
850             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
851             felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv30,_mm256_mul_pd(velec,dsw)) );
852             velec            = _mm256_mul_pd(velec,sw);
853             cutoff_mask      = _mm256_cmp_pd(rsq30,rcutoff2,_CMP_LT_OQ);
854
855             /* Update potential sum for this i atom from the interaction with this j atom. */
856             velec            = _mm256_and_pd(velec,cutoff_mask);
857             velec            = _mm256_andnot_pd(dummy_mask,velec);
858             velecsum         = _mm256_add_pd(velecsum,velec);
859
860             fscal            = felec;
861
862             fscal            = _mm256_and_pd(fscal,cutoff_mask);
863
864             fscal            = _mm256_andnot_pd(dummy_mask,fscal);
865
866             /* Calculate temporary vectorial force */
867             tx               = _mm256_mul_pd(fscal,dx30);
868             ty               = _mm256_mul_pd(fscal,dy30);
869             tz               = _mm256_mul_pd(fscal,dz30);
870
871             /* Update vectorial force */
872             fix3             = _mm256_add_pd(fix3,tx);
873             fiy3             = _mm256_add_pd(fiy3,ty);
874             fiz3             = _mm256_add_pd(fiz3,tz);
875
876             fjx0             = _mm256_add_pd(fjx0,tx);
877             fjy0             = _mm256_add_pd(fjy0,ty);
878             fjz0             = _mm256_add_pd(fjz0,tz);
879
880             }
881
882             fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
883             fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
884             fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
885             fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
886
887             gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,fjx0,fjy0,fjz0);
888
889             /* Inner loop uses 261 flops */
890         }
891
892         /* End of innermost loop */
893
894         gmx_mm256_update_iforce_4atom_swizzle_pd(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,fix3,fiy3,fiz3,
895                                                  f+i_coord_offset,fshift+i_shift_offset);
896
897         ggid                        = gid[iidx];
898         /* Update potential energies */
899         gmx_mm256_update_1pot_pd(velecsum,kernel_data->energygrp_elec+ggid);
900         gmx_mm256_update_1pot_pd(vvdwsum,kernel_data->energygrp_vdw+ggid);
901
902         /* Increment number of inner iterations */
903         inneriter                  += j_index_end - j_index_start;
904
905         /* Outer loop uses 26 flops */
906     }
907
908     /* Increment number of outer iterations */
909     outeriter        += nri;
910
911     /* Update outer/inner flops */
912
913     inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W4_VF,outeriter*26 + inneriter*261);
914 }
915 /*
916  * Gromacs nonbonded kernel:   nb_kernel_ElecEwSw_VdwLJSw_GeomW4P1_F_avx_256_double
917  * Electrostatics interaction: Ewald
918  * VdW interaction:            LennardJones
919  * Geometry:                   Water4-Particle
920  * Calculate force/pot:        Force
921  */
922 void
923 nb_kernel_ElecEwSw_VdwLJSw_GeomW4P1_F_avx_256_double
924                     (t_nblist * gmx_restrict                nlist,
925                      rvec * gmx_restrict                    xx,
926                      rvec * gmx_restrict                    ff,
927                      t_forcerec * gmx_restrict              fr,
928                      t_mdatoms * gmx_restrict               mdatoms,
929                      nb_kernel_data_t * gmx_restrict        kernel_data,
930                      t_nrnb * gmx_restrict                  nrnb)
931 {
932     /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
933      * just 0 for non-waters.
934      * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
935      * jnr indices corresponding to data put in the four positions in the SIMD register.
936      */
937     int              i_shift_offset,i_coord_offset,outeriter,inneriter;
938     int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
939     int              jnrA,jnrB,jnrC,jnrD;
940     int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
941     int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
942     int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
943     int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
944     real             rcutoff_scalar;
945     real             *shiftvec,*fshift,*x,*f;
946     real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
947     real             scratch[4*DIM];
948     __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
949     real *           vdwioffsetptr0;
950     __m256d          ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
951     real *           vdwioffsetptr1;
952     __m256d          ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
953     real *           vdwioffsetptr2;
954     __m256d          ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
955     real *           vdwioffsetptr3;
956     __m256d          ix3,iy3,iz3,fix3,fiy3,fiz3,iq3,isai3;
957     int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
958     __m256d          jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
959     __m256d          dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
960     __m256d          dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
961     __m256d          dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
962     __m256d          dx30,dy30,dz30,rsq30,rinv30,rinvsq30,r30,qq30,c6_30,c12_30;
963     __m256d          velec,felec,velecsum,facel,crf,krf,krf2;
964     real             *charge;
965     int              nvdwtype;
966     __m256d          rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
967     int              *vdwtype;
968     real             *vdwparam;
969     __m256d          one_sixth   = _mm256_set1_pd(1.0/6.0);
970     __m256d          one_twelfth = _mm256_set1_pd(1.0/12.0);
971     __m128i          ewitab;
972     __m256d          ewtabscale,eweps,sh_ewald,ewrt,ewtabhalfspace,ewtabF,ewtabFn,ewtabD,ewtabV;
973     __m256d          beta,beta2,beta3,zeta2,pmecorrF,pmecorrV,rinv3;
974     real             *ewtab;
975     __m256d          rswitch,swV3,swV4,swV5,swF2,swF3,swF4,d,d2,sw,dsw;
976     real             rswitch_scalar,d_scalar;
977     __m256d          dummy_mask,cutoff_mask;
978     __m128           tmpmask0,tmpmask1;
979     __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
980     __m256d          one     = _mm256_set1_pd(1.0);
981     __m256d          two     = _mm256_set1_pd(2.0);
982     x                = xx[0];
983     f                = ff[0];
984
985     nri              = nlist->nri;
986     iinr             = nlist->iinr;
987     jindex           = nlist->jindex;
988     jjnr             = nlist->jjnr;
989     shiftidx         = nlist->shift;
990     gid              = nlist->gid;
991     shiftvec         = fr->shift_vec[0];
992     fshift           = fr->fshift[0];
993     facel            = _mm256_set1_pd(fr->epsfac);
994     charge           = mdatoms->chargeA;
995     nvdwtype         = fr->ntype;
996     vdwparam         = fr->nbfp;
997     vdwtype          = mdatoms->typeA;
998
999     sh_ewald         = _mm256_set1_pd(fr->ic->sh_ewald);
1000     beta             = _mm256_set1_pd(fr->ic->ewaldcoeff);
1001     beta2            = _mm256_mul_pd(beta,beta);
1002     beta3            = _mm256_mul_pd(beta,beta2);
1003
1004     ewtab            = fr->ic->tabq_coul_FDV0;
1005     ewtabscale       = _mm256_set1_pd(fr->ic->tabq_scale);
1006     ewtabhalfspace   = _mm256_set1_pd(0.5/fr->ic->tabq_scale);
1007
1008     /* Setup water-specific parameters */
1009     inr              = nlist->iinr[0];
1010     iq1              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+1]));
1011     iq2              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+2]));
1012     iq3              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+3]));
1013     vdwioffsetptr0   = vdwparam+2*nvdwtype*vdwtype[inr+0];
1014
1015     /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
1016     rcutoff_scalar   = fr->rcoulomb;
1017     rcutoff          = _mm256_set1_pd(rcutoff_scalar);
1018     rcutoff2         = _mm256_mul_pd(rcutoff,rcutoff);
1019
1020     rswitch_scalar   = fr->rcoulomb_switch;
1021     rswitch          = _mm256_set1_pd(rswitch_scalar);
1022     /* Setup switch parameters */
1023     d_scalar         = rcutoff_scalar-rswitch_scalar;
1024     d                = _mm256_set1_pd(d_scalar);
1025     swV3             = _mm256_set1_pd(-10.0/(d_scalar*d_scalar*d_scalar));
1026     swV4             = _mm256_set1_pd( 15.0/(d_scalar*d_scalar*d_scalar*d_scalar));
1027     swV5             = _mm256_set1_pd( -6.0/(d_scalar*d_scalar*d_scalar*d_scalar*d_scalar));
1028     swF2             = _mm256_set1_pd(-30.0/(d_scalar*d_scalar*d_scalar));
1029     swF3             = _mm256_set1_pd( 60.0/(d_scalar*d_scalar*d_scalar*d_scalar));
1030     swF4             = _mm256_set1_pd(-30.0/(d_scalar*d_scalar*d_scalar*d_scalar*d_scalar));
1031
1032     /* Avoid stupid compiler warnings */
1033     jnrA = jnrB = jnrC = jnrD = 0;
1034     j_coord_offsetA = 0;
1035     j_coord_offsetB = 0;
1036     j_coord_offsetC = 0;
1037     j_coord_offsetD = 0;
1038
1039     outeriter        = 0;
1040     inneriter        = 0;
1041
1042     for(iidx=0;iidx<4*DIM;iidx++)
1043     {
1044         scratch[iidx] = 0.0;
1045     }
1046
1047     /* Start outer loop over neighborlists */
1048     for(iidx=0; iidx<nri; iidx++)
1049     {
1050         /* Load shift vector for this list */
1051         i_shift_offset   = DIM*shiftidx[iidx];
1052
1053         /* Load limits for loop over neighbors */
1054         j_index_start    = jindex[iidx];
1055         j_index_end      = jindex[iidx+1];
1056
1057         /* Get outer coordinate index */
1058         inr              = iinr[iidx];
1059         i_coord_offset   = DIM*inr;
1060
1061         /* Load i particle coords and add shift vector */
1062         gmx_mm256_load_shift_and_4rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset,
1063                                                     &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2,&ix3,&iy3,&iz3);
1064
1065         fix0             = _mm256_setzero_pd();
1066         fiy0             = _mm256_setzero_pd();
1067         fiz0             = _mm256_setzero_pd();
1068         fix1             = _mm256_setzero_pd();
1069         fiy1             = _mm256_setzero_pd();
1070         fiz1             = _mm256_setzero_pd();
1071         fix2             = _mm256_setzero_pd();
1072         fiy2             = _mm256_setzero_pd();
1073         fiz2             = _mm256_setzero_pd();
1074         fix3             = _mm256_setzero_pd();
1075         fiy3             = _mm256_setzero_pd();
1076         fiz3             = _mm256_setzero_pd();
1077
1078         /* Start inner kernel loop */
1079         for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
1080         {
1081
1082             /* Get j neighbor index, and coordinate index */
1083             jnrA             = jjnr[jidx];
1084             jnrB             = jjnr[jidx+1];
1085             jnrC             = jjnr[jidx+2];
1086             jnrD             = jjnr[jidx+3];
1087             j_coord_offsetA  = DIM*jnrA;
1088             j_coord_offsetB  = DIM*jnrB;
1089             j_coord_offsetC  = DIM*jnrC;
1090             j_coord_offsetD  = DIM*jnrD;
1091
1092             /* load j atom coordinates */
1093             gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
1094                                                  x+j_coord_offsetC,x+j_coord_offsetD,
1095                                                  &jx0,&jy0,&jz0);
1096
1097             /* Calculate displacement vector */
1098             dx00             = _mm256_sub_pd(ix0,jx0);
1099             dy00             = _mm256_sub_pd(iy0,jy0);
1100             dz00             = _mm256_sub_pd(iz0,jz0);
1101             dx10             = _mm256_sub_pd(ix1,jx0);
1102             dy10             = _mm256_sub_pd(iy1,jy0);
1103             dz10             = _mm256_sub_pd(iz1,jz0);
1104             dx20             = _mm256_sub_pd(ix2,jx0);
1105             dy20             = _mm256_sub_pd(iy2,jy0);
1106             dz20             = _mm256_sub_pd(iz2,jz0);
1107             dx30             = _mm256_sub_pd(ix3,jx0);
1108             dy30             = _mm256_sub_pd(iy3,jy0);
1109             dz30             = _mm256_sub_pd(iz3,jz0);
1110
1111             /* Calculate squared distance and things based on it */
1112             rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
1113             rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
1114             rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
1115             rsq30            = gmx_mm256_calc_rsq_pd(dx30,dy30,dz30);
1116
1117             rinv00           = gmx_mm256_invsqrt_pd(rsq00);
1118             rinv10           = gmx_mm256_invsqrt_pd(rsq10);
1119             rinv20           = gmx_mm256_invsqrt_pd(rsq20);
1120             rinv30           = gmx_mm256_invsqrt_pd(rsq30);
1121
1122             rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
1123             rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
1124             rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
1125             rinvsq30         = _mm256_mul_pd(rinv30,rinv30);
1126
1127             /* Load parameters for j particles */
1128             jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
1129                                                                  charge+jnrC+0,charge+jnrD+0);
1130             vdwjidx0A        = 2*vdwtype[jnrA+0];
1131             vdwjidx0B        = 2*vdwtype[jnrB+0];
1132             vdwjidx0C        = 2*vdwtype[jnrC+0];
1133             vdwjidx0D        = 2*vdwtype[jnrD+0];
1134
1135             fjx0             = _mm256_setzero_pd();
1136             fjy0             = _mm256_setzero_pd();
1137             fjz0             = _mm256_setzero_pd();
1138
1139             /**************************
1140              * CALCULATE INTERACTIONS *
1141              **************************/
1142
1143             if (gmx_mm256_any_lt(rsq00,rcutoff2))
1144             {
1145
1146             r00              = _mm256_mul_pd(rsq00,rinv00);
1147
1148             /* Compute parameters for interactions between i and j atoms */
1149             gmx_mm256_load_4pair_swizzle_pd(vdwioffsetptr0+vdwjidx0A,
1150                                             vdwioffsetptr0+vdwjidx0B,
1151                                             vdwioffsetptr0+vdwjidx0C,
1152                                             vdwioffsetptr0+vdwjidx0D,
1153                                             &c6_00,&c12_00);
1154
1155             /* LENNARD-JONES DISPERSION/REPULSION */
1156
1157             rinvsix          = _mm256_mul_pd(_mm256_mul_pd(rinvsq00,rinvsq00),rinvsq00);
1158             vvdw6            = _mm256_mul_pd(c6_00,rinvsix);
1159             vvdw12           = _mm256_mul_pd(c12_00,_mm256_mul_pd(rinvsix,rinvsix));
1160             vvdw             = _mm256_sub_pd( _mm256_mul_pd(vvdw12,one_twelfth) , _mm256_mul_pd(vvdw6,one_sixth) );
1161             fvdw             = _mm256_mul_pd(_mm256_sub_pd(vvdw12,vvdw6),rinvsq00);
1162
1163             d                = _mm256_sub_pd(r00,rswitch);
1164             d                = _mm256_max_pd(d,_mm256_setzero_pd());
1165             d2               = _mm256_mul_pd(d,d);
1166             sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
1167
1168             dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
1169
1170             /* Evaluate switch function */
1171             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
1172             fvdw             = _mm256_sub_pd( _mm256_mul_pd(fvdw,sw) , _mm256_mul_pd(rinv00,_mm256_mul_pd(vvdw,dsw)) );
1173             cutoff_mask      = _mm256_cmp_pd(rsq00,rcutoff2,_CMP_LT_OQ);
1174
1175             fscal            = fvdw;
1176
1177             fscal            = _mm256_and_pd(fscal,cutoff_mask);
1178
1179             /* Calculate temporary vectorial force */
1180             tx               = _mm256_mul_pd(fscal,dx00);
1181             ty               = _mm256_mul_pd(fscal,dy00);
1182             tz               = _mm256_mul_pd(fscal,dz00);
1183
1184             /* Update vectorial force */
1185             fix0             = _mm256_add_pd(fix0,tx);
1186             fiy0             = _mm256_add_pd(fiy0,ty);
1187             fiz0             = _mm256_add_pd(fiz0,tz);
1188
1189             fjx0             = _mm256_add_pd(fjx0,tx);
1190             fjy0             = _mm256_add_pd(fjy0,ty);
1191             fjz0             = _mm256_add_pd(fjz0,tz);
1192
1193             }
1194
1195             /**************************
1196              * CALCULATE INTERACTIONS *
1197              **************************/
1198
1199             if (gmx_mm256_any_lt(rsq10,rcutoff2))
1200             {
1201
1202             r10              = _mm256_mul_pd(rsq10,rinv10);
1203
1204             /* Compute parameters for interactions between i and j atoms */
1205             qq10             = _mm256_mul_pd(iq1,jq0);
1206
1207             /* EWALD ELECTROSTATICS */
1208
1209             /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
1210             ewrt             = _mm256_mul_pd(r10,ewtabscale);
1211             ewitab           = _mm256_cvttpd_epi32(ewrt);
1212             eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
1213             ewitab           = _mm_slli_epi32(ewitab,2);
1214             ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
1215             ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
1216             ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
1217             ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
1218             GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
1219             felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
1220             velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
1221             velec            = _mm256_mul_pd(qq10,_mm256_sub_pd(rinv10,velec));
1222             felec            = _mm256_mul_pd(_mm256_mul_pd(qq10,rinv10),_mm256_sub_pd(rinvsq10,felec));
1223
1224             d                = _mm256_sub_pd(r10,rswitch);
1225             d                = _mm256_max_pd(d,_mm256_setzero_pd());
1226             d2               = _mm256_mul_pd(d,d);
1227             sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
1228
1229             dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
1230
1231             /* Evaluate switch function */
1232             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
1233             felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv10,_mm256_mul_pd(velec,dsw)) );
1234             cutoff_mask      = _mm256_cmp_pd(rsq10,rcutoff2,_CMP_LT_OQ);
1235
1236             fscal            = felec;
1237
1238             fscal            = _mm256_and_pd(fscal,cutoff_mask);
1239
1240             /* Calculate temporary vectorial force */
1241             tx               = _mm256_mul_pd(fscal,dx10);
1242             ty               = _mm256_mul_pd(fscal,dy10);
1243             tz               = _mm256_mul_pd(fscal,dz10);
1244
1245             /* Update vectorial force */
1246             fix1             = _mm256_add_pd(fix1,tx);
1247             fiy1             = _mm256_add_pd(fiy1,ty);
1248             fiz1             = _mm256_add_pd(fiz1,tz);
1249
1250             fjx0             = _mm256_add_pd(fjx0,tx);
1251             fjy0             = _mm256_add_pd(fjy0,ty);
1252             fjz0             = _mm256_add_pd(fjz0,tz);
1253
1254             }
1255
1256             /**************************
1257              * CALCULATE INTERACTIONS *
1258              **************************/
1259
1260             if (gmx_mm256_any_lt(rsq20,rcutoff2))
1261             {
1262
1263             r20              = _mm256_mul_pd(rsq20,rinv20);
1264
1265             /* Compute parameters for interactions between i and j atoms */
1266             qq20             = _mm256_mul_pd(iq2,jq0);
1267
1268             /* EWALD ELECTROSTATICS */
1269
1270             /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
1271             ewrt             = _mm256_mul_pd(r20,ewtabscale);
1272             ewitab           = _mm256_cvttpd_epi32(ewrt);
1273             eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
1274             ewitab           = _mm_slli_epi32(ewitab,2);
1275             ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
1276             ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
1277             ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
1278             ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
1279             GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
1280             felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
1281             velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
1282             velec            = _mm256_mul_pd(qq20,_mm256_sub_pd(rinv20,velec));
1283             felec            = _mm256_mul_pd(_mm256_mul_pd(qq20,rinv20),_mm256_sub_pd(rinvsq20,felec));
1284
1285             d                = _mm256_sub_pd(r20,rswitch);
1286             d                = _mm256_max_pd(d,_mm256_setzero_pd());
1287             d2               = _mm256_mul_pd(d,d);
1288             sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
1289
1290             dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
1291
1292             /* Evaluate switch function */
1293             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
1294             felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv20,_mm256_mul_pd(velec,dsw)) );
1295             cutoff_mask      = _mm256_cmp_pd(rsq20,rcutoff2,_CMP_LT_OQ);
1296
1297             fscal            = felec;
1298
1299             fscal            = _mm256_and_pd(fscal,cutoff_mask);
1300
1301             /* Calculate temporary vectorial force */
1302             tx               = _mm256_mul_pd(fscal,dx20);
1303             ty               = _mm256_mul_pd(fscal,dy20);
1304             tz               = _mm256_mul_pd(fscal,dz20);
1305
1306             /* Update vectorial force */
1307             fix2             = _mm256_add_pd(fix2,tx);
1308             fiy2             = _mm256_add_pd(fiy2,ty);
1309             fiz2             = _mm256_add_pd(fiz2,tz);
1310
1311             fjx0             = _mm256_add_pd(fjx0,tx);
1312             fjy0             = _mm256_add_pd(fjy0,ty);
1313             fjz0             = _mm256_add_pd(fjz0,tz);
1314
1315             }
1316
1317             /**************************
1318              * CALCULATE INTERACTIONS *
1319              **************************/
1320
1321             if (gmx_mm256_any_lt(rsq30,rcutoff2))
1322             {
1323
1324             r30              = _mm256_mul_pd(rsq30,rinv30);
1325
1326             /* Compute parameters for interactions between i and j atoms */
1327             qq30             = _mm256_mul_pd(iq3,jq0);
1328
1329             /* EWALD ELECTROSTATICS */
1330
1331             /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
1332             ewrt             = _mm256_mul_pd(r30,ewtabscale);
1333             ewitab           = _mm256_cvttpd_epi32(ewrt);
1334             eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
1335             ewitab           = _mm_slli_epi32(ewitab,2);
1336             ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
1337             ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
1338             ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
1339             ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
1340             GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
1341             felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
1342             velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
1343             velec            = _mm256_mul_pd(qq30,_mm256_sub_pd(rinv30,velec));
1344             felec            = _mm256_mul_pd(_mm256_mul_pd(qq30,rinv30),_mm256_sub_pd(rinvsq30,felec));
1345
1346             d                = _mm256_sub_pd(r30,rswitch);
1347             d                = _mm256_max_pd(d,_mm256_setzero_pd());
1348             d2               = _mm256_mul_pd(d,d);
1349             sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
1350
1351             dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
1352
1353             /* Evaluate switch function */
1354             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
1355             felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv30,_mm256_mul_pd(velec,dsw)) );
1356             cutoff_mask      = _mm256_cmp_pd(rsq30,rcutoff2,_CMP_LT_OQ);
1357
1358             fscal            = felec;
1359
1360             fscal            = _mm256_and_pd(fscal,cutoff_mask);
1361
1362             /* Calculate temporary vectorial force */
1363             tx               = _mm256_mul_pd(fscal,dx30);
1364             ty               = _mm256_mul_pd(fscal,dy30);
1365             tz               = _mm256_mul_pd(fscal,dz30);
1366
1367             /* Update vectorial force */
1368             fix3             = _mm256_add_pd(fix3,tx);
1369             fiy3             = _mm256_add_pd(fiy3,ty);
1370             fiz3             = _mm256_add_pd(fiz3,tz);
1371
1372             fjx0             = _mm256_add_pd(fjx0,tx);
1373             fjy0             = _mm256_add_pd(fjy0,ty);
1374             fjz0             = _mm256_add_pd(fjz0,tz);
1375
1376             }
1377
1378             fjptrA             = f+j_coord_offsetA;
1379             fjptrB             = f+j_coord_offsetB;
1380             fjptrC             = f+j_coord_offsetC;
1381             fjptrD             = f+j_coord_offsetD;
1382
1383             gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,fjx0,fjy0,fjz0);
1384
1385             /* Inner loop uses 245 flops */
1386         }
1387
1388         if(jidx<j_index_end)
1389         {
1390
1391             /* Get j neighbor index, and coordinate index */
1392             jnrlistA         = jjnr[jidx];
1393             jnrlistB         = jjnr[jidx+1];
1394             jnrlistC         = jjnr[jidx+2];
1395             jnrlistD         = jjnr[jidx+3];
1396             /* Sign of each element will be negative for non-real atoms.
1397              * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
1398              * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
1399              */
1400             tmpmask0 = gmx_mm_castsi128_pd(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
1401
1402             tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
1403             tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
1404             dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
1405
1406             jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
1407             jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
1408             jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
1409             jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
1410             j_coord_offsetA  = DIM*jnrA;
1411             j_coord_offsetB  = DIM*jnrB;
1412             j_coord_offsetC  = DIM*jnrC;
1413             j_coord_offsetD  = DIM*jnrD;
1414
1415             /* load j atom coordinates */
1416             gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
1417                                                  x+j_coord_offsetC,x+j_coord_offsetD,
1418                                                  &jx0,&jy0,&jz0);
1419
1420             /* Calculate displacement vector */
1421             dx00             = _mm256_sub_pd(ix0,jx0);
1422             dy00             = _mm256_sub_pd(iy0,jy0);
1423             dz00             = _mm256_sub_pd(iz0,jz0);
1424             dx10             = _mm256_sub_pd(ix1,jx0);
1425             dy10             = _mm256_sub_pd(iy1,jy0);
1426             dz10             = _mm256_sub_pd(iz1,jz0);
1427             dx20             = _mm256_sub_pd(ix2,jx0);
1428             dy20             = _mm256_sub_pd(iy2,jy0);
1429             dz20             = _mm256_sub_pd(iz2,jz0);
1430             dx30             = _mm256_sub_pd(ix3,jx0);
1431             dy30             = _mm256_sub_pd(iy3,jy0);
1432             dz30             = _mm256_sub_pd(iz3,jz0);
1433
1434             /* Calculate squared distance and things based on it */
1435             rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
1436             rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
1437             rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
1438             rsq30            = gmx_mm256_calc_rsq_pd(dx30,dy30,dz30);
1439
1440             rinv00           = gmx_mm256_invsqrt_pd(rsq00);
1441             rinv10           = gmx_mm256_invsqrt_pd(rsq10);
1442             rinv20           = gmx_mm256_invsqrt_pd(rsq20);
1443             rinv30           = gmx_mm256_invsqrt_pd(rsq30);
1444
1445             rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
1446             rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
1447             rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
1448             rinvsq30         = _mm256_mul_pd(rinv30,rinv30);
1449
1450             /* Load parameters for j particles */
1451             jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
1452                                                                  charge+jnrC+0,charge+jnrD+0);
1453             vdwjidx0A        = 2*vdwtype[jnrA+0];
1454             vdwjidx0B        = 2*vdwtype[jnrB+0];
1455             vdwjidx0C        = 2*vdwtype[jnrC+0];
1456             vdwjidx0D        = 2*vdwtype[jnrD+0];
1457
1458             fjx0             = _mm256_setzero_pd();
1459             fjy0             = _mm256_setzero_pd();
1460             fjz0             = _mm256_setzero_pd();
1461
1462             /**************************
1463              * CALCULATE INTERACTIONS *
1464              **************************/
1465
1466             if (gmx_mm256_any_lt(rsq00,rcutoff2))
1467             {
1468
1469             r00              = _mm256_mul_pd(rsq00,rinv00);
1470             r00              = _mm256_andnot_pd(dummy_mask,r00);
1471
1472             /* Compute parameters for interactions between i and j atoms */
1473             gmx_mm256_load_4pair_swizzle_pd(vdwioffsetptr0+vdwjidx0A,
1474                                             vdwioffsetptr0+vdwjidx0B,
1475                                             vdwioffsetptr0+vdwjidx0C,
1476                                             vdwioffsetptr0+vdwjidx0D,
1477                                             &c6_00,&c12_00);
1478
1479             /* LENNARD-JONES DISPERSION/REPULSION */
1480
1481             rinvsix          = _mm256_mul_pd(_mm256_mul_pd(rinvsq00,rinvsq00),rinvsq00);
1482             vvdw6            = _mm256_mul_pd(c6_00,rinvsix);
1483             vvdw12           = _mm256_mul_pd(c12_00,_mm256_mul_pd(rinvsix,rinvsix));
1484             vvdw             = _mm256_sub_pd( _mm256_mul_pd(vvdw12,one_twelfth) , _mm256_mul_pd(vvdw6,one_sixth) );
1485             fvdw             = _mm256_mul_pd(_mm256_sub_pd(vvdw12,vvdw6),rinvsq00);
1486
1487             d                = _mm256_sub_pd(r00,rswitch);
1488             d                = _mm256_max_pd(d,_mm256_setzero_pd());
1489             d2               = _mm256_mul_pd(d,d);
1490             sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
1491
1492             dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
1493
1494             /* Evaluate switch function */
1495             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
1496             fvdw             = _mm256_sub_pd( _mm256_mul_pd(fvdw,sw) , _mm256_mul_pd(rinv00,_mm256_mul_pd(vvdw,dsw)) );
1497             cutoff_mask      = _mm256_cmp_pd(rsq00,rcutoff2,_CMP_LT_OQ);
1498
1499             fscal            = fvdw;
1500
1501             fscal            = _mm256_and_pd(fscal,cutoff_mask);
1502
1503             fscal            = _mm256_andnot_pd(dummy_mask,fscal);
1504
1505             /* Calculate temporary vectorial force */
1506             tx               = _mm256_mul_pd(fscal,dx00);
1507             ty               = _mm256_mul_pd(fscal,dy00);
1508             tz               = _mm256_mul_pd(fscal,dz00);
1509
1510             /* Update vectorial force */
1511             fix0             = _mm256_add_pd(fix0,tx);
1512             fiy0             = _mm256_add_pd(fiy0,ty);
1513             fiz0             = _mm256_add_pd(fiz0,tz);
1514
1515             fjx0             = _mm256_add_pd(fjx0,tx);
1516             fjy0             = _mm256_add_pd(fjy0,ty);
1517             fjz0             = _mm256_add_pd(fjz0,tz);
1518
1519             }
1520
1521             /**************************
1522              * CALCULATE INTERACTIONS *
1523              **************************/
1524
1525             if (gmx_mm256_any_lt(rsq10,rcutoff2))
1526             {
1527
1528             r10              = _mm256_mul_pd(rsq10,rinv10);
1529             r10              = _mm256_andnot_pd(dummy_mask,r10);
1530
1531             /* Compute parameters for interactions between i and j atoms */
1532             qq10             = _mm256_mul_pd(iq1,jq0);
1533
1534             /* EWALD ELECTROSTATICS */
1535
1536             /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
1537             ewrt             = _mm256_mul_pd(r10,ewtabscale);
1538             ewitab           = _mm256_cvttpd_epi32(ewrt);
1539             eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
1540             ewitab           = _mm_slli_epi32(ewitab,2);
1541             ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
1542             ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
1543             ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
1544             ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
1545             GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
1546             felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
1547             velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
1548             velec            = _mm256_mul_pd(qq10,_mm256_sub_pd(rinv10,velec));
1549             felec            = _mm256_mul_pd(_mm256_mul_pd(qq10,rinv10),_mm256_sub_pd(rinvsq10,felec));
1550
1551             d                = _mm256_sub_pd(r10,rswitch);
1552             d                = _mm256_max_pd(d,_mm256_setzero_pd());
1553             d2               = _mm256_mul_pd(d,d);
1554             sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
1555
1556             dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
1557
1558             /* Evaluate switch function */
1559             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
1560             felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv10,_mm256_mul_pd(velec,dsw)) );
1561             cutoff_mask      = _mm256_cmp_pd(rsq10,rcutoff2,_CMP_LT_OQ);
1562
1563             fscal            = felec;
1564
1565             fscal            = _mm256_and_pd(fscal,cutoff_mask);
1566
1567             fscal            = _mm256_andnot_pd(dummy_mask,fscal);
1568
1569             /* Calculate temporary vectorial force */
1570             tx               = _mm256_mul_pd(fscal,dx10);
1571             ty               = _mm256_mul_pd(fscal,dy10);
1572             tz               = _mm256_mul_pd(fscal,dz10);
1573
1574             /* Update vectorial force */
1575             fix1             = _mm256_add_pd(fix1,tx);
1576             fiy1             = _mm256_add_pd(fiy1,ty);
1577             fiz1             = _mm256_add_pd(fiz1,tz);
1578
1579             fjx0             = _mm256_add_pd(fjx0,tx);
1580             fjy0             = _mm256_add_pd(fjy0,ty);
1581             fjz0             = _mm256_add_pd(fjz0,tz);
1582
1583             }
1584
1585             /**************************
1586              * CALCULATE INTERACTIONS *
1587              **************************/
1588
1589             if (gmx_mm256_any_lt(rsq20,rcutoff2))
1590             {
1591
1592             r20              = _mm256_mul_pd(rsq20,rinv20);
1593             r20              = _mm256_andnot_pd(dummy_mask,r20);
1594
1595             /* Compute parameters for interactions between i and j atoms */
1596             qq20             = _mm256_mul_pd(iq2,jq0);
1597
1598             /* EWALD ELECTROSTATICS */
1599
1600             /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
1601             ewrt             = _mm256_mul_pd(r20,ewtabscale);
1602             ewitab           = _mm256_cvttpd_epi32(ewrt);
1603             eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
1604             ewitab           = _mm_slli_epi32(ewitab,2);
1605             ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
1606             ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
1607             ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
1608             ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
1609             GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
1610             felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
1611             velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
1612             velec            = _mm256_mul_pd(qq20,_mm256_sub_pd(rinv20,velec));
1613             felec            = _mm256_mul_pd(_mm256_mul_pd(qq20,rinv20),_mm256_sub_pd(rinvsq20,felec));
1614
1615             d                = _mm256_sub_pd(r20,rswitch);
1616             d                = _mm256_max_pd(d,_mm256_setzero_pd());
1617             d2               = _mm256_mul_pd(d,d);
1618             sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
1619
1620             dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
1621
1622             /* Evaluate switch function */
1623             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
1624             felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv20,_mm256_mul_pd(velec,dsw)) );
1625             cutoff_mask      = _mm256_cmp_pd(rsq20,rcutoff2,_CMP_LT_OQ);
1626
1627             fscal            = felec;
1628
1629             fscal            = _mm256_and_pd(fscal,cutoff_mask);
1630
1631             fscal            = _mm256_andnot_pd(dummy_mask,fscal);
1632
1633             /* Calculate temporary vectorial force */
1634             tx               = _mm256_mul_pd(fscal,dx20);
1635             ty               = _mm256_mul_pd(fscal,dy20);
1636             tz               = _mm256_mul_pd(fscal,dz20);
1637
1638             /* Update vectorial force */
1639             fix2             = _mm256_add_pd(fix2,tx);
1640             fiy2             = _mm256_add_pd(fiy2,ty);
1641             fiz2             = _mm256_add_pd(fiz2,tz);
1642
1643             fjx0             = _mm256_add_pd(fjx0,tx);
1644             fjy0             = _mm256_add_pd(fjy0,ty);
1645             fjz0             = _mm256_add_pd(fjz0,tz);
1646
1647             }
1648
1649             /**************************
1650              * CALCULATE INTERACTIONS *
1651              **************************/
1652
1653             if (gmx_mm256_any_lt(rsq30,rcutoff2))
1654             {
1655
1656             r30              = _mm256_mul_pd(rsq30,rinv30);
1657             r30              = _mm256_andnot_pd(dummy_mask,r30);
1658
1659             /* Compute parameters for interactions between i and j atoms */
1660             qq30             = _mm256_mul_pd(iq3,jq0);
1661
1662             /* EWALD ELECTROSTATICS */
1663
1664             /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
1665             ewrt             = _mm256_mul_pd(r30,ewtabscale);
1666             ewitab           = _mm256_cvttpd_epi32(ewrt);
1667             eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
1668             ewitab           = _mm_slli_epi32(ewitab,2);
1669             ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
1670             ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
1671             ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
1672             ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
1673             GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
1674             felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
1675             velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
1676             velec            = _mm256_mul_pd(qq30,_mm256_sub_pd(rinv30,velec));
1677             felec            = _mm256_mul_pd(_mm256_mul_pd(qq30,rinv30),_mm256_sub_pd(rinvsq30,felec));
1678
1679             d                = _mm256_sub_pd(r30,rswitch);
1680             d                = _mm256_max_pd(d,_mm256_setzero_pd());
1681             d2               = _mm256_mul_pd(d,d);
1682             sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
1683
1684             dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
1685
1686             /* Evaluate switch function */
1687             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
1688             felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv30,_mm256_mul_pd(velec,dsw)) );
1689             cutoff_mask      = _mm256_cmp_pd(rsq30,rcutoff2,_CMP_LT_OQ);
1690
1691             fscal            = felec;
1692
1693             fscal            = _mm256_and_pd(fscal,cutoff_mask);
1694
1695             fscal            = _mm256_andnot_pd(dummy_mask,fscal);
1696
1697             /* Calculate temporary vectorial force */
1698             tx               = _mm256_mul_pd(fscal,dx30);
1699             ty               = _mm256_mul_pd(fscal,dy30);
1700             tz               = _mm256_mul_pd(fscal,dz30);
1701
1702             /* Update vectorial force */
1703             fix3             = _mm256_add_pd(fix3,tx);
1704             fiy3             = _mm256_add_pd(fiy3,ty);
1705             fiz3             = _mm256_add_pd(fiz3,tz);
1706
1707             fjx0             = _mm256_add_pd(fjx0,tx);
1708             fjy0             = _mm256_add_pd(fjy0,ty);
1709             fjz0             = _mm256_add_pd(fjz0,tz);
1710
1711             }
1712
1713             fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
1714             fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
1715             fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
1716             fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
1717
1718             gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,fjx0,fjy0,fjz0);
1719
1720             /* Inner loop uses 249 flops */
1721         }
1722
1723         /* End of innermost loop */
1724
1725         gmx_mm256_update_iforce_4atom_swizzle_pd(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,fix3,fiy3,fiz3,
1726                                                  f+i_coord_offset,fshift+i_shift_offset);
1727
1728         /* Increment number of inner iterations */
1729         inneriter                  += j_index_end - j_index_start;
1730
1731         /* Outer loop uses 24 flops */
1732     }
1733
1734     /* Increment number of outer iterations */
1735     outeriter        += nri;
1736
1737     /* Update outer/inner flops */
1738
1739     inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W4_F,outeriter*24 + inneriter*249);
1740 }