Remove no-inline-max-size and suppress remark
[alexxy/gromacs.git] / src / gromacs / gmxlib / nonbonded / nb_kernel_avx_256_double / nb_kernel_ElecEwSh_VdwLJEwSh_GeomW4P1_avx_256_double.c
1 /*
2  * This file is part of the GROMACS molecular simulation package.
3  *
4  * Copyright (c) 2012,2013,2014, by the GROMACS development team, led by
5  * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6  * and including many others, as listed in the AUTHORS file in the
7  * top-level source directory and at http://www.gromacs.org.
8  *
9  * GROMACS is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU Lesser General Public License
11  * as published by the Free Software Foundation; either version 2.1
12  * of the License, or (at your option) any later version.
13  *
14  * GROMACS is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
17  * Lesser General Public License for more details.
18  *
19  * You should have received a copy of the GNU Lesser General Public
20  * License along with GROMACS; if not, see
21  * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA.
23  *
24  * If you want to redistribute modifications to GROMACS, please
25  * consider that scientific software is very special. Version
26  * control is crucial - bugs must be traceable. We will be happy to
27  * consider code for inclusion in the official distribution, but
28  * derived work must not be called official GROMACS. Details are found
29  * in the README & COPYING files - if they are missing, get the
30  * official version at http://www.gromacs.org.
31  *
32  * To help us fund GROMACS development, we humbly ask that you cite
33  * the research papers on the package. Check out http://www.gromacs.org.
34  */
35 /*
36  * Note: this file was generated by the GROMACS avx_256_double kernel generator.
37  */
38 #ifdef HAVE_CONFIG_H
39 #include <config.h>
40 #endif
41
42 #include <math.h>
43
44 #include "../nb_kernel.h"
45 #include "types/simple.h"
46 #include "vec.h"
47 #include "nrnb.h"
48
49 #include "gromacs/simd/math_x86_avx_256_double.h"
50 #include "kernelutil_x86_avx_256_double.h"
51
52 /*
53  * Gromacs nonbonded kernel:   nb_kernel_ElecEwSh_VdwLJEwSh_GeomW4P1_VF_avx_256_double
54  * Electrostatics interaction: Ewald
55  * VdW interaction:            LJEwald
56  * Geometry:                   Water4-Particle
57  * Calculate force/pot:        PotentialAndForce
58  */
59 void
60 nb_kernel_ElecEwSh_VdwLJEwSh_GeomW4P1_VF_avx_256_double
61                     (t_nblist                    * gmx_restrict       nlist,
62                      rvec                        * gmx_restrict          xx,
63                      rvec                        * gmx_restrict          ff,
64                      t_forcerec                  * gmx_restrict          fr,
65                      t_mdatoms                   * gmx_restrict     mdatoms,
66                      nb_kernel_data_t gmx_unused * gmx_restrict kernel_data,
67                      t_nrnb                      * gmx_restrict        nrnb)
68 {
69     /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
70      * just 0 for non-waters.
71      * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
72      * jnr indices corresponding to data put in the four positions in the SIMD register.
73      */
74     int              i_shift_offset,i_coord_offset,outeriter,inneriter;
75     int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
76     int              jnrA,jnrB,jnrC,jnrD;
77     int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
78     int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
79     int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
80     int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
81     real             rcutoff_scalar;
82     real             *shiftvec,*fshift,*x,*f;
83     real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
84     real             scratch[4*DIM];
85     __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
86     real *           vdwioffsetptr0;
87     real *           vdwgridioffsetptr0;
88     __m256d          ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
89     real *           vdwioffsetptr1;
90     real *           vdwgridioffsetptr1;
91     __m256d          ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
92     real *           vdwioffsetptr2;
93     real *           vdwgridioffsetptr2;
94     __m256d          ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
95     real *           vdwioffsetptr3;
96     real *           vdwgridioffsetptr3;
97     __m256d          ix3,iy3,iz3,fix3,fiy3,fiz3,iq3,isai3;
98     int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
99     __m256d          jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
100     __m256d          dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
101     __m256d          dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
102     __m256d          dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
103     __m256d          dx30,dy30,dz30,rsq30,rinv30,rinvsq30,r30,qq30,c6_30,c12_30;
104     __m256d          velec,felec,velecsum,facel,crf,krf,krf2;
105     real             *charge;
106     int              nvdwtype;
107     __m256d          rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
108     int              *vdwtype;
109     real             *vdwparam;
110     __m256d          one_sixth   = _mm256_set1_pd(1.0/6.0);
111     __m256d          one_twelfth = _mm256_set1_pd(1.0/12.0);
112     __m256d           c6grid_00;
113     __m256d           c6grid_10;
114     __m256d           c6grid_20;
115     __m256d           c6grid_30;
116     real             *vdwgridparam;
117     __m256d           ewclj,ewclj2,ewclj6,ewcljrsq,poly,exponent,f6A,f6B,sh_lj_ewald;
118     __m256d           one_half  = _mm256_set1_pd(0.5);
119     __m256d           minus_one = _mm256_set1_pd(-1.0);
120     __m128i          ewitab;
121     __m256d          ewtabscale,eweps,sh_ewald,ewrt,ewtabhalfspace,ewtabF,ewtabFn,ewtabD,ewtabV;
122     __m256d          beta,beta2,beta3,zeta2,pmecorrF,pmecorrV,rinv3;
123     real             *ewtab;
124     __m256d          dummy_mask,cutoff_mask;
125     __m128           tmpmask0,tmpmask1;
126     __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
127     __m256d          one     = _mm256_set1_pd(1.0);
128     __m256d          two     = _mm256_set1_pd(2.0);
129     x                = xx[0];
130     f                = ff[0];
131
132     nri              = nlist->nri;
133     iinr             = nlist->iinr;
134     jindex           = nlist->jindex;
135     jjnr             = nlist->jjnr;
136     shiftidx         = nlist->shift;
137     gid              = nlist->gid;
138     shiftvec         = fr->shift_vec[0];
139     fshift           = fr->fshift[0];
140     facel            = _mm256_set1_pd(fr->epsfac);
141     charge           = mdatoms->chargeA;
142     nvdwtype         = fr->ntype;
143     vdwparam         = fr->nbfp;
144     vdwtype          = mdatoms->typeA;
145     vdwgridparam     = fr->ljpme_c6grid;
146     sh_lj_ewald      = _mm256_set1_pd(fr->ic->sh_lj_ewald);
147     ewclj            = _mm256_set1_pd(fr->ewaldcoeff_lj);
148     ewclj2           = _mm256_mul_pd(minus_one,_mm256_mul_pd(ewclj,ewclj));
149
150     sh_ewald         = _mm256_set1_pd(fr->ic->sh_ewald);
151     beta             = _mm256_set1_pd(fr->ic->ewaldcoeff_q);
152     beta2            = _mm256_mul_pd(beta,beta);
153     beta3            = _mm256_mul_pd(beta,beta2);
154
155     ewtab            = fr->ic->tabq_coul_FDV0;
156     ewtabscale       = _mm256_set1_pd(fr->ic->tabq_scale);
157     ewtabhalfspace   = _mm256_set1_pd(0.5/fr->ic->tabq_scale);
158
159     /* Setup water-specific parameters */
160     inr              = nlist->iinr[0];
161     iq1              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+1]));
162     iq2              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+2]));
163     iq3              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+3]));
164     vdwioffsetptr0   = vdwparam+2*nvdwtype*vdwtype[inr+0];
165     vdwgridioffsetptr0 = vdwgridparam+2*nvdwtype*vdwtype[inr+0];
166
167     /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
168     rcutoff_scalar   = fr->rcoulomb;
169     rcutoff          = _mm256_set1_pd(rcutoff_scalar);
170     rcutoff2         = _mm256_mul_pd(rcutoff,rcutoff);
171
172     sh_vdw_invrcut6  = _mm256_set1_pd(fr->ic->sh_invrc6);
173     rvdw             = _mm256_set1_pd(fr->rvdw);
174
175     /* Avoid stupid compiler warnings */
176     jnrA = jnrB = jnrC = jnrD = 0;
177     j_coord_offsetA = 0;
178     j_coord_offsetB = 0;
179     j_coord_offsetC = 0;
180     j_coord_offsetD = 0;
181
182     outeriter        = 0;
183     inneriter        = 0;
184
185     for(iidx=0;iidx<4*DIM;iidx++)
186     {
187         scratch[iidx] = 0.0;
188     }
189
190     /* Start outer loop over neighborlists */
191     for(iidx=0; iidx<nri; iidx++)
192     {
193         /* Load shift vector for this list */
194         i_shift_offset   = DIM*shiftidx[iidx];
195
196         /* Load limits for loop over neighbors */
197         j_index_start    = jindex[iidx];
198         j_index_end      = jindex[iidx+1];
199
200         /* Get outer coordinate index */
201         inr              = iinr[iidx];
202         i_coord_offset   = DIM*inr;
203
204         /* Load i particle coords and add shift vector */
205         gmx_mm256_load_shift_and_4rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset,
206                                                     &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2,&ix3,&iy3,&iz3);
207
208         fix0             = _mm256_setzero_pd();
209         fiy0             = _mm256_setzero_pd();
210         fiz0             = _mm256_setzero_pd();
211         fix1             = _mm256_setzero_pd();
212         fiy1             = _mm256_setzero_pd();
213         fiz1             = _mm256_setzero_pd();
214         fix2             = _mm256_setzero_pd();
215         fiy2             = _mm256_setzero_pd();
216         fiz2             = _mm256_setzero_pd();
217         fix3             = _mm256_setzero_pd();
218         fiy3             = _mm256_setzero_pd();
219         fiz3             = _mm256_setzero_pd();
220
221         /* Reset potential sums */
222         velecsum         = _mm256_setzero_pd();
223         vvdwsum          = _mm256_setzero_pd();
224
225         /* Start inner kernel loop */
226         for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
227         {
228
229             /* Get j neighbor index, and coordinate index */
230             jnrA             = jjnr[jidx];
231             jnrB             = jjnr[jidx+1];
232             jnrC             = jjnr[jidx+2];
233             jnrD             = jjnr[jidx+3];
234             j_coord_offsetA  = DIM*jnrA;
235             j_coord_offsetB  = DIM*jnrB;
236             j_coord_offsetC  = DIM*jnrC;
237             j_coord_offsetD  = DIM*jnrD;
238
239             /* load j atom coordinates */
240             gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
241                                                  x+j_coord_offsetC,x+j_coord_offsetD,
242                                                  &jx0,&jy0,&jz0);
243
244             /* Calculate displacement vector */
245             dx00             = _mm256_sub_pd(ix0,jx0);
246             dy00             = _mm256_sub_pd(iy0,jy0);
247             dz00             = _mm256_sub_pd(iz0,jz0);
248             dx10             = _mm256_sub_pd(ix1,jx0);
249             dy10             = _mm256_sub_pd(iy1,jy0);
250             dz10             = _mm256_sub_pd(iz1,jz0);
251             dx20             = _mm256_sub_pd(ix2,jx0);
252             dy20             = _mm256_sub_pd(iy2,jy0);
253             dz20             = _mm256_sub_pd(iz2,jz0);
254             dx30             = _mm256_sub_pd(ix3,jx0);
255             dy30             = _mm256_sub_pd(iy3,jy0);
256             dz30             = _mm256_sub_pd(iz3,jz0);
257
258             /* Calculate squared distance and things based on it */
259             rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
260             rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
261             rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
262             rsq30            = gmx_mm256_calc_rsq_pd(dx30,dy30,dz30);
263
264             rinv00           = gmx_mm256_invsqrt_pd(rsq00);
265             rinv10           = gmx_mm256_invsqrt_pd(rsq10);
266             rinv20           = gmx_mm256_invsqrt_pd(rsq20);
267             rinv30           = gmx_mm256_invsqrt_pd(rsq30);
268
269             rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
270             rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
271             rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
272             rinvsq30         = _mm256_mul_pd(rinv30,rinv30);
273
274             /* Load parameters for j particles */
275             jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
276                                                                  charge+jnrC+0,charge+jnrD+0);
277             vdwjidx0A        = 2*vdwtype[jnrA+0];
278             vdwjidx0B        = 2*vdwtype[jnrB+0];
279             vdwjidx0C        = 2*vdwtype[jnrC+0];
280             vdwjidx0D        = 2*vdwtype[jnrD+0];
281
282             fjx0             = _mm256_setzero_pd();
283             fjy0             = _mm256_setzero_pd();
284             fjz0             = _mm256_setzero_pd();
285
286             /**************************
287              * CALCULATE INTERACTIONS *
288              **************************/
289
290             if (gmx_mm256_any_lt(rsq00,rcutoff2))
291             {
292
293             r00              = _mm256_mul_pd(rsq00,rinv00);
294
295             /* Compute parameters for interactions between i and j atoms */
296             gmx_mm256_load_4pair_swizzle_pd(vdwioffsetptr0+vdwjidx0A,
297                                             vdwioffsetptr0+vdwjidx0B,
298                                             vdwioffsetptr0+vdwjidx0C,
299                                             vdwioffsetptr0+vdwjidx0D,
300                                             &c6_00,&c12_00);
301
302             c6grid_00       = gmx_mm256_load_4real_swizzle_pd(vdwgridioffsetptr0+vdwjidx0A,
303                                                                   vdwgridioffsetptr0+vdwjidx0B,
304                                                                   vdwgridioffsetptr0+vdwjidx0C,
305                                                                   vdwgridioffsetptr0+vdwjidx0D);
306
307             /* Analytical LJ-PME */
308             rinvsix          = _mm256_mul_pd(_mm256_mul_pd(rinvsq00,rinvsq00),rinvsq00);
309             ewcljrsq         = _mm256_mul_pd(ewclj2,rsq00);
310             ewclj6           = _mm256_mul_pd(ewclj2,_mm256_mul_pd(ewclj2,ewclj2));
311             exponent         = gmx_simd_exp_d(ewcljrsq);
312             /* poly = exp(-(beta*r)^2) * (1 + (beta*r)^2 + (beta*r)^4 /2) */
313             poly             = _mm256_mul_pd(exponent,_mm256_add_pd(_mm256_sub_pd(one,ewcljrsq),_mm256_mul_pd(_mm256_mul_pd(ewcljrsq,ewcljrsq),one_half)));
314             /* vvdw6 = [C6 - C6grid * (1-poly)]/r6 */
315             vvdw6            = _mm256_mul_pd(_mm256_sub_pd(c6_00,_mm256_mul_pd(c6grid_00,_mm256_sub_pd(one,poly))),rinvsix);
316             vvdw12           = _mm256_mul_pd(c12_00,_mm256_mul_pd(rinvsix,rinvsix));
317             vvdw             = _mm256_sub_pd(_mm256_mul_pd( _mm256_sub_pd(vvdw12 , _mm256_mul_pd(c12_00,_mm256_mul_pd(sh_vdw_invrcut6,sh_vdw_invrcut6))), one_twelfth) ,
318                                           _mm256_mul_pd( _mm256_sub_pd(vvdw6,_mm256_add_pd(_mm256_mul_pd(c6_00,sh_vdw_invrcut6),_mm256_mul_pd(c6grid_00,sh_lj_ewald))),one_sixth));
319             /* fvdw = vvdw12/r - (vvdw6/r + (C6grid * exponent * beta^6)/r) */
320             fvdw             = _mm256_mul_pd(_mm256_sub_pd(vvdw12,_mm256_sub_pd(vvdw6,_mm256_mul_pd(_mm256_mul_pd(c6grid_00,one_sixth),_mm256_mul_pd(exponent,ewclj6)))),rinvsq00);
321
322             cutoff_mask      = _mm256_cmp_pd(rsq00,rcutoff2,_CMP_LT_OQ);
323
324             /* Update potential sum for this i atom from the interaction with this j atom. */
325             vvdw             = _mm256_and_pd(vvdw,cutoff_mask);
326             vvdwsum          = _mm256_add_pd(vvdwsum,vvdw);
327
328             fscal            = fvdw;
329
330             fscal            = _mm256_and_pd(fscal,cutoff_mask);
331
332             /* Calculate temporary vectorial force */
333             tx               = _mm256_mul_pd(fscal,dx00);
334             ty               = _mm256_mul_pd(fscal,dy00);
335             tz               = _mm256_mul_pd(fscal,dz00);
336
337             /* Update vectorial force */
338             fix0             = _mm256_add_pd(fix0,tx);
339             fiy0             = _mm256_add_pd(fiy0,ty);
340             fiz0             = _mm256_add_pd(fiz0,tz);
341
342             fjx0             = _mm256_add_pd(fjx0,tx);
343             fjy0             = _mm256_add_pd(fjy0,ty);
344             fjz0             = _mm256_add_pd(fjz0,tz);
345
346             }
347
348             /**************************
349              * CALCULATE INTERACTIONS *
350              **************************/
351
352             if (gmx_mm256_any_lt(rsq10,rcutoff2))
353             {
354
355             r10              = _mm256_mul_pd(rsq10,rinv10);
356
357             /* Compute parameters for interactions between i and j atoms */
358             qq10             = _mm256_mul_pd(iq1,jq0);
359
360             /* EWALD ELECTROSTATICS */
361
362             /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
363             ewrt             = _mm256_mul_pd(r10,ewtabscale);
364             ewitab           = _mm256_cvttpd_epi32(ewrt);
365             eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
366             ewitab           = _mm_slli_epi32(ewitab,2);
367             ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
368             ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
369             ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
370             ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
371             GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
372             felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
373             velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
374             velec            = _mm256_mul_pd(qq10,_mm256_sub_pd(_mm256_sub_pd(rinv10,sh_ewald),velec));
375             felec            = _mm256_mul_pd(_mm256_mul_pd(qq10,rinv10),_mm256_sub_pd(rinvsq10,felec));
376
377             cutoff_mask      = _mm256_cmp_pd(rsq10,rcutoff2,_CMP_LT_OQ);
378
379             /* Update potential sum for this i atom from the interaction with this j atom. */
380             velec            = _mm256_and_pd(velec,cutoff_mask);
381             velecsum         = _mm256_add_pd(velecsum,velec);
382
383             fscal            = felec;
384
385             fscal            = _mm256_and_pd(fscal,cutoff_mask);
386
387             /* Calculate temporary vectorial force */
388             tx               = _mm256_mul_pd(fscal,dx10);
389             ty               = _mm256_mul_pd(fscal,dy10);
390             tz               = _mm256_mul_pd(fscal,dz10);
391
392             /* Update vectorial force */
393             fix1             = _mm256_add_pd(fix1,tx);
394             fiy1             = _mm256_add_pd(fiy1,ty);
395             fiz1             = _mm256_add_pd(fiz1,tz);
396
397             fjx0             = _mm256_add_pd(fjx0,tx);
398             fjy0             = _mm256_add_pd(fjy0,ty);
399             fjz0             = _mm256_add_pd(fjz0,tz);
400
401             }
402
403             /**************************
404              * CALCULATE INTERACTIONS *
405              **************************/
406
407             if (gmx_mm256_any_lt(rsq20,rcutoff2))
408             {
409
410             r20              = _mm256_mul_pd(rsq20,rinv20);
411
412             /* Compute parameters for interactions between i and j atoms */
413             qq20             = _mm256_mul_pd(iq2,jq0);
414
415             /* EWALD ELECTROSTATICS */
416
417             /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
418             ewrt             = _mm256_mul_pd(r20,ewtabscale);
419             ewitab           = _mm256_cvttpd_epi32(ewrt);
420             eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
421             ewitab           = _mm_slli_epi32(ewitab,2);
422             ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
423             ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
424             ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
425             ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
426             GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
427             felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
428             velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
429             velec            = _mm256_mul_pd(qq20,_mm256_sub_pd(_mm256_sub_pd(rinv20,sh_ewald),velec));
430             felec            = _mm256_mul_pd(_mm256_mul_pd(qq20,rinv20),_mm256_sub_pd(rinvsq20,felec));
431
432             cutoff_mask      = _mm256_cmp_pd(rsq20,rcutoff2,_CMP_LT_OQ);
433
434             /* Update potential sum for this i atom from the interaction with this j atom. */
435             velec            = _mm256_and_pd(velec,cutoff_mask);
436             velecsum         = _mm256_add_pd(velecsum,velec);
437
438             fscal            = felec;
439
440             fscal            = _mm256_and_pd(fscal,cutoff_mask);
441
442             /* Calculate temporary vectorial force */
443             tx               = _mm256_mul_pd(fscal,dx20);
444             ty               = _mm256_mul_pd(fscal,dy20);
445             tz               = _mm256_mul_pd(fscal,dz20);
446
447             /* Update vectorial force */
448             fix2             = _mm256_add_pd(fix2,tx);
449             fiy2             = _mm256_add_pd(fiy2,ty);
450             fiz2             = _mm256_add_pd(fiz2,tz);
451
452             fjx0             = _mm256_add_pd(fjx0,tx);
453             fjy0             = _mm256_add_pd(fjy0,ty);
454             fjz0             = _mm256_add_pd(fjz0,tz);
455
456             }
457
458             /**************************
459              * CALCULATE INTERACTIONS *
460              **************************/
461
462             if (gmx_mm256_any_lt(rsq30,rcutoff2))
463             {
464
465             r30              = _mm256_mul_pd(rsq30,rinv30);
466
467             /* Compute parameters for interactions between i and j atoms */
468             qq30             = _mm256_mul_pd(iq3,jq0);
469
470             /* EWALD ELECTROSTATICS */
471
472             /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
473             ewrt             = _mm256_mul_pd(r30,ewtabscale);
474             ewitab           = _mm256_cvttpd_epi32(ewrt);
475             eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
476             ewitab           = _mm_slli_epi32(ewitab,2);
477             ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
478             ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
479             ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
480             ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
481             GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
482             felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
483             velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
484             velec            = _mm256_mul_pd(qq30,_mm256_sub_pd(_mm256_sub_pd(rinv30,sh_ewald),velec));
485             felec            = _mm256_mul_pd(_mm256_mul_pd(qq30,rinv30),_mm256_sub_pd(rinvsq30,felec));
486
487             cutoff_mask      = _mm256_cmp_pd(rsq30,rcutoff2,_CMP_LT_OQ);
488
489             /* Update potential sum for this i atom from the interaction with this j atom. */
490             velec            = _mm256_and_pd(velec,cutoff_mask);
491             velecsum         = _mm256_add_pd(velecsum,velec);
492
493             fscal            = felec;
494
495             fscal            = _mm256_and_pd(fscal,cutoff_mask);
496
497             /* Calculate temporary vectorial force */
498             tx               = _mm256_mul_pd(fscal,dx30);
499             ty               = _mm256_mul_pd(fscal,dy30);
500             tz               = _mm256_mul_pd(fscal,dz30);
501
502             /* Update vectorial force */
503             fix3             = _mm256_add_pd(fix3,tx);
504             fiy3             = _mm256_add_pd(fiy3,ty);
505             fiz3             = _mm256_add_pd(fiz3,tz);
506
507             fjx0             = _mm256_add_pd(fjx0,tx);
508             fjy0             = _mm256_add_pd(fjy0,ty);
509             fjz0             = _mm256_add_pd(fjz0,tz);
510
511             }
512
513             fjptrA             = f+j_coord_offsetA;
514             fjptrB             = f+j_coord_offsetB;
515             fjptrC             = f+j_coord_offsetC;
516             fjptrD             = f+j_coord_offsetD;
517
518             gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,fjx0,fjy0,fjz0);
519
520             /* Inner loop uses 203 flops */
521         }
522
523         if(jidx<j_index_end)
524         {
525
526             /* Get j neighbor index, and coordinate index */
527             jnrlistA         = jjnr[jidx];
528             jnrlistB         = jjnr[jidx+1];
529             jnrlistC         = jjnr[jidx+2];
530             jnrlistD         = jjnr[jidx+3];
531             /* Sign of each element will be negative for non-real atoms.
532              * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
533              * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
534              */
535             tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
536
537             tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
538             tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
539             dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
540
541             jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
542             jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
543             jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
544             jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
545             j_coord_offsetA  = DIM*jnrA;
546             j_coord_offsetB  = DIM*jnrB;
547             j_coord_offsetC  = DIM*jnrC;
548             j_coord_offsetD  = DIM*jnrD;
549
550             /* load j atom coordinates */
551             gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
552                                                  x+j_coord_offsetC,x+j_coord_offsetD,
553                                                  &jx0,&jy0,&jz0);
554
555             /* Calculate displacement vector */
556             dx00             = _mm256_sub_pd(ix0,jx0);
557             dy00             = _mm256_sub_pd(iy0,jy0);
558             dz00             = _mm256_sub_pd(iz0,jz0);
559             dx10             = _mm256_sub_pd(ix1,jx0);
560             dy10             = _mm256_sub_pd(iy1,jy0);
561             dz10             = _mm256_sub_pd(iz1,jz0);
562             dx20             = _mm256_sub_pd(ix2,jx0);
563             dy20             = _mm256_sub_pd(iy2,jy0);
564             dz20             = _mm256_sub_pd(iz2,jz0);
565             dx30             = _mm256_sub_pd(ix3,jx0);
566             dy30             = _mm256_sub_pd(iy3,jy0);
567             dz30             = _mm256_sub_pd(iz3,jz0);
568
569             /* Calculate squared distance and things based on it */
570             rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
571             rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
572             rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
573             rsq30            = gmx_mm256_calc_rsq_pd(dx30,dy30,dz30);
574
575             rinv00           = gmx_mm256_invsqrt_pd(rsq00);
576             rinv10           = gmx_mm256_invsqrt_pd(rsq10);
577             rinv20           = gmx_mm256_invsqrt_pd(rsq20);
578             rinv30           = gmx_mm256_invsqrt_pd(rsq30);
579
580             rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
581             rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
582             rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
583             rinvsq30         = _mm256_mul_pd(rinv30,rinv30);
584
585             /* Load parameters for j particles */
586             jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
587                                                                  charge+jnrC+0,charge+jnrD+0);
588             vdwjidx0A        = 2*vdwtype[jnrA+0];
589             vdwjidx0B        = 2*vdwtype[jnrB+0];
590             vdwjidx0C        = 2*vdwtype[jnrC+0];
591             vdwjidx0D        = 2*vdwtype[jnrD+0];
592
593             fjx0             = _mm256_setzero_pd();
594             fjy0             = _mm256_setzero_pd();
595             fjz0             = _mm256_setzero_pd();
596
597             /**************************
598              * CALCULATE INTERACTIONS *
599              **************************/
600
601             if (gmx_mm256_any_lt(rsq00,rcutoff2))
602             {
603
604             r00              = _mm256_mul_pd(rsq00,rinv00);
605             r00              = _mm256_andnot_pd(dummy_mask,r00);
606
607             /* Compute parameters for interactions between i and j atoms */
608             gmx_mm256_load_4pair_swizzle_pd(vdwioffsetptr0+vdwjidx0A,
609                                             vdwioffsetptr0+vdwjidx0B,
610                                             vdwioffsetptr0+vdwjidx0C,
611                                             vdwioffsetptr0+vdwjidx0D,
612                                             &c6_00,&c12_00);
613
614             c6grid_00       = gmx_mm256_load_4real_swizzle_pd(vdwgridioffsetptr0+vdwjidx0A,
615                                                                   vdwgridioffsetptr0+vdwjidx0B,
616                                                                   vdwgridioffsetptr0+vdwjidx0C,
617                                                                   vdwgridioffsetptr0+vdwjidx0D);
618
619             /* Analytical LJ-PME */
620             rinvsix          = _mm256_mul_pd(_mm256_mul_pd(rinvsq00,rinvsq00),rinvsq00);
621             ewcljrsq         = _mm256_mul_pd(ewclj2,rsq00);
622             ewclj6           = _mm256_mul_pd(ewclj2,_mm256_mul_pd(ewclj2,ewclj2));
623             exponent         = gmx_simd_exp_d(ewcljrsq);
624             /* poly = exp(-(beta*r)^2) * (1 + (beta*r)^2 + (beta*r)^4 /2) */
625             poly             = _mm256_mul_pd(exponent,_mm256_add_pd(_mm256_sub_pd(one,ewcljrsq),_mm256_mul_pd(_mm256_mul_pd(ewcljrsq,ewcljrsq),one_half)));
626             /* vvdw6 = [C6 - C6grid * (1-poly)]/r6 */
627             vvdw6            = _mm256_mul_pd(_mm256_sub_pd(c6_00,_mm256_mul_pd(c6grid_00,_mm256_sub_pd(one,poly))),rinvsix);
628             vvdw12           = _mm256_mul_pd(c12_00,_mm256_mul_pd(rinvsix,rinvsix));
629             vvdw             = _mm256_sub_pd(_mm256_mul_pd( _mm256_sub_pd(vvdw12 , _mm256_mul_pd(c12_00,_mm256_mul_pd(sh_vdw_invrcut6,sh_vdw_invrcut6))), one_twelfth) ,
630                                           _mm256_mul_pd( _mm256_sub_pd(vvdw6,_mm256_add_pd(_mm256_mul_pd(c6_00,sh_vdw_invrcut6),_mm256_mul_pd(c6grid_00,sh_lj_ewald))),one_sixth));
631             /* fvdw = vvdw12/r - (vvdw6/r + (C6grid * exponent * beta^6)/r) */
632             fvdw             = _mm256_mul_pd(_mm256_sub_pd(vvdw12,_mm256_sub_pd(vvdw6,_mm256_mul_pd(_mm256_mul_pd(c6grid_00,one_sixth),_mm256_mul_pd(exponent,ewclj6)))),rinvsq00);
633
634             cutoff_mask      = _mm256_cmp_pd(rsq00,rcutoff2,_CMP_LT_OQ);
635
636             /* Update potential sum for this i atom from the interaction with this j atom. */
637             vvdw             = _mm256_and_pd(vvdw,cutoff_mask);
638             vvdw             = _mm256_andnot_pd(dummy_mask,vvdw);
639             vvdwsum          = _mm256_add_pd(vvdwsum,vvdw);
640
641             fscal            = fvdw;
642
643             fscal            = _mm256_and_pd(fscal,cutoff_mask);
644
645             fscal            = _mm256_andnot_pd(dummy_mask,fscal);
646
647             /* Calculate temporary vectorial force */
648             tx               = _mm256_mul_pd(fscal,dx00);
649             ty               = _mm256_mul_pd(fscal,dy00);
650             tz               = _mm256_mul_pd(fscal,dz00);
651
652             /* Update vectorial force */
653             fix0             = _mm256_add_pd(fix0,tx);
654             fiy0             = _mm256_add_pd(fiy0,ty);
655             fiz0             = _mm256_add_pd(fiz0,tz);
656
657             fjx0             = _mm256_add_pd(fjx0,tx);
658             fjy0             = _mm256_add_pd(fjy0,ty);
659             fjz0             = _mm256_add_pd(fjz0,tz);
660
661             }
662
663             /**************************
664              * CALCULATE INTERACTIONS *
665              **************************/
666
667             if (gmx_mm256_any_lt(rsq10,rcutoff2))
668             {
669
670             r10              = _mm256_mul_pd(rsq10,rinv10);
671             r10              = _mm256_andnot_pd(dummy_mask,r10);
672
673             /* Compute parameters for interactions between i and j atoms */
674             qq10             = _mm256_mul_pd(iq1,jq0);
675
676             /* EWALD ELECTROSTATICS */
677
678             /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
679             ewrt             = _mm256_mul_pd(r10,ewtabscale);
680             ewitab           = _mm256_cvttpd_epi32(ewrt);
681             eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
682             ewitab           = _mm_slli_epi32(ewitab,2);
683             ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
684             ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
685             ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
686             ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
687             GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
688             felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
689             velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
690             velec            = _mm256_mul_pd(qq10,_mm256_sub_pd(_mm256_sub_pd(rinv10,sh_ewald),velec));
691             felec            = _mm256_mul_pd(_mm256_mul_pd(qq10,rinv10),_mm256_sub_pd(rinvsq10,felec));
692
693             cutoff_mask      = _mm256_cmp_pd(rsq10,rcutoff2,_CMP_LT_OQ);
694
695             /* Update potential sum for this i atom from the interaction with this j atom. */
696             velec            = _mm256_and_pd(velec,cutoff_mask);
697             velec            = _mm256_andnot_pd(dummy_mask,velec);
698             velecsum         = _mm256_add_pd(velecsum,velec);
699
700             fscal            = felec;
701
702             fscal            = _mm256_and_pd(fscal,cutoff_mask);
703
704             fscal            = _mm256_andnot_pd(dummy_mask,fscal);
705
706             /* Calculate temporary vectorial force */
707             tx               = _mm256_mul_pd(fscal,dx10);
708             ty               = _mm256_mul_pd(fscal,dy10);
709             tz               = _mm256_mul_pd(fscal,dz10);
710
711             /* Update vectorial force */
712             fix1             = _mm256_add_pd(fix1,tx);
713             fiy1             = _mm256_add_pd(fiy1,ty);
714             fiz1             = _mm256_add_pd(fiz1,tz);
715
716             fjx0             = _mm256_add_pd(fjx0,tx);
717             fjy0             = _mm256_add_pd(fjy0,ty);
718             fjz0             = _mm256_add_pd(fjz0,tz);
719
720             }
721
722             /**************************
723              * CALCULATE INTERACTIONS *
724              **************************/
725
726             if (gmx_mm256_any_lt(rsq20,rcutoff2))
727             {
728
729             r20              = _mm256_mul_pd(rsq20,rinv20);
730             r20              = _mm256_andnot_pd(dummy_mask,r20);
731
732             /* Compute parameters for interactions between i and j atoms */
733             qq20             = _mm256_mul_pd(iq2,jq0);
734
735             /* EWALD ELECTROSTATICS */
736
737             /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
738             ewrt             = _mm256_mul_pd(r20,ewtabscale);
739             ewitab           = _mm256_cvttpd_epi32(ewrt);
740             eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
741             ewitab           = _mm_slli_epi32(ewitab,2);
742             ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
743             ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
744             ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
745             ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
746             GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
747             felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
748             velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
749             velec            = _mm256_mul_pd(qq20,_mm256_sub_pd(_mm256_sub_pd(rinv20,sh_ewald),velec));
750             felec            = _mm256_mul_pd(_mm256_mul_pd(qq20,rinv20),_mm256_sub_pd(rinvsq20,felec));
751
752             cutoff_mask      = _mm256_cmp_pd(rsq20,rcutoff2,_CMP_LT_OQ);
753
754             /* Update potential sum for this i atom from the interaction with this j atom. */
755             velec            = _mm256_and_pd(velec,cutoff_mask);
756             velec            = _mm256_andnot_pd(dummy_mask,velec);
757             velecsum         = _mm256_add_pd(velecsum,velec);
758
759             fscal            = felec;
760
761             fscal            = _mm256_and_pd(fscal,cutoff_mask);
762
763             fscal            = _mm256_andnot_pd(dummy_mask,fscal);
764
765             /* Calculate temporary vectorial force */
766             tx               = _mm256_mul_pd(fscal,dx20);
767             ty               = _mm256_mul_pd(fscal,dy20);
768             tz               = _mm256_mul_pd(fscal,dz20);
769
770             /* Update vectorial force */
771             fix2             = _mm256_add_pd(fix2,tx);
772             fiy2             = _mm256_add_pd(fiy2,ty);
773             fiz2             = _mm256_add_pd(fiz2,tz);
774
775             fjx0             = _mm256_add_pd(fjx0,tx);
776             fjy0             = _mm256_add_pd(fjy0,ty);
777             fjz0             = _mm256_add_pd(fjz0,tz);
778
779             }
780
781             /**************************
782              * CALCULATE INTERACTIONS *
783              **************************/
784
785             if (gmx_mm256_any_lt(rsq30,rcutoff2))
786             {
787
788             r30              = _mm256_mul_pd(rsq30,rinv30);
789             r30              = _mm256_andnot_pd(dummy_mask,r30);
790
791             /* Compute parameters for interactions between i and j atoms */
792             qq30             = _mm256_mul_pd(iq3,jq0);
793
794             /* EWALD ELECTROSTATICS */
795
796             /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
797             ewrt             = _mm256_mul_pd(r30,ewtabscale);
798             ewitab           = _mm256_cvttpd_epi32(ewrt);
799             eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
800             ewitab           = _mm_slli_epi32(ewitab,2);
801             ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
802             ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
803             ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
804             ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
805             GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
806             felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
807             velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
808             velec            = _mm256_mul_pd(qq30,_mm256_sub_pd(_mm256_sub_pd(rinv30,sh_ewald),velec));
809             felec            = _mm256_mul_pd(_mm256_mul_pd(qq30,rinv30),_mm256_sub_pd(rinvsq30,felec));
810
811             cutoff_mask      = _mm256_cmp_pd(rsq30,rcutoff2,_CMP_LT_OQ);
812
813             /* Update potential sum for this i atom from the interaction with this j atom. */
814             velec            = _mm256_and_pd(velec,cutoff_mask);
815             velec            = _mm256_andnot_pd(dummy_mask,velec);
816             velecsum         = _mm256_add_pd(velecsum,velec);
817
818             fscal            = felec;
819
820             fscal            = _mm256_and_pd(fscal,cutoff_mask);
821
822             fscal            = _mm256_andnot_pd(dummy_mask,fscal);
823
824             /* Calculate temporary vectorial force */
825             tx               = _mm256_mul_pd(fscal,dx30);
826             ty               = _mm256_mul_pd(fscal,dy30);
827             tz               = _mm256_mul_pd(fscal,dz30);
828
829             /* Update vectorial force */
830             fix3             = _mm256_add_pd(fix3,tx);
831             fiy3             = _mm256_add_pd(fiy3,ty);
832             fiz3             = _mm256_add_pd(fiz3,tz);
833
834             fjx0             = _mm256_add_pd(fjx0,tx);
835             fjy0             = _mm256_add_pd(fjy0,ty);
836             fjz0             = _mm256_add_pd(fjz0,tz);
837
838             }
839
840             fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
841             fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
842             fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
843             fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
844
845             gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,fjx0,fjy0,fjz0);
846
847             /* Inner loop uses 207 flops */
848         }
849
850         /* End of innermost loop */
851
852         gmx_mm256_update_iforce_4atom_swizzle_pd(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,fix3,fiy3,fiz3,
853                                                  f+i_coord_offset,fshift+i_shift_offset);
854
855         ggid                        = gid[iidx];
856         /* Update potential energies */
857         gmx_mm256_update_1pot_pd(velecsum,kernel_data->energygrp_elec+ggid);
858         gmx_mm256_update_1pot_pd(vvdwsum,kernel_data->energygrp_vdw+ggid);
859
860         /* Increment number of inner iterations */
861         inneriter                  += j_index_end - j_index_start;
862
863         /* Outer loop uses 26 flops */
864     }
865
866     /* Increment number of outer iterations */
867     outeriter        += nri;
868
869     /* Update outer/inner flops */
870
871     inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W4_VF,outeriter*26 + inneriter*207);
872 }
873 /*
874  * Gromacs nonbonded kernel:   nb_kernel_ElecEwSh_VdwLJEwSh_GeomW4P1_F_avx_256_double
875  * Electrostatics interaction: Ewald
876  * VdW interaction:            LJEwald
877  * Geometry:                   Water4-Particle
878  * Calculate force/pot:        Force
879  */
880 void
881 nb_kernel_ElecEwSh_VdwLJEwSh_GeomW4P1_F_avx_256_double
882                     (t_nblist                    * gmx_restrict       nlist,
883                      rvec                        * gmx_restrict          xx,
884                      rvec                        * gmx_restrict          ff,
885                      t_forcerec                  * gmx_restrict          fr,
886                      t_mdatoms                   * gmx_restrict     mdatoms,
887                      nb_kernel_data_t gmx_unused * gmx_restrict kernel_data,
888                      t_nrnb                      * gmx_restrict        nrnb)
889 {
890     /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
891      * just 0 for non-waters.
892      * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
893      * jnr indices corresponding to data put in the four positions in the SIMD register.
894      */
895     int              i_shift_offset,i_coord_offset,outeriter,inneriter;
896     int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
897     int              jnrA,jnrB,jnrC,jnrD;
898     int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
899     int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
900     int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
901     int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
902     real             rcutoff_scalar;
903     real             *shiftvec,*fshift,*x,*f;
904     real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
905     real             scratch[4*DIM];
906     __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
907     real *           vdwioffsetptr0;
908     real *           vdwgridioffsetptr0;
909     __m256d          ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
910     real *           vdwioffsetptr1;
911     real *           vdwgridioffsetptr1;
912     __m256d          ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
913     real *           vdwioffsetptr2;
914     real *           vdwgridioffsetptr2;
915     __m256d          ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
916     real *           vdwioffsetptr3;
917     real *           vdwgridioffsetptr3;
918     __m256d          ix3,iy3,iz3,fix3,fiy3,fiz3,iq3,isai3;
919     int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
920     __m256d          jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
921     __m256d          dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
922     __m256d          dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
923     __m256d          dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
924     __m256d          dx30,dy30,dz30,rsq30,rinv30,rinvsq30,r30,qq30,c6_30,c12_30;
925     __m256d          velec,felec,velecsum,facel,crf,krf,krf2;
926     real             *charge;
927     int              nvdwtype;
928     __m256d          rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
929     int              *vdwtype;
930     real             *vdwparam;
931     __m256d          one_sixth   = _mm256_set1_pd(1.0/6.0);
932     __m256d          one_twelfth = _mm256_set1_pd(1.0/12.0);
933     __m256d           c6grid_00;
934     __m256d           c6grid_10;
935     __m256d           c6grid_20;
936     __m256d           c6grid_30;
937     real             *vdwgridparam;
938     __m256d           ewclj,ewclj2,ewclj6,ewcljrsq,poly,exponent,f6A,f6B,sh_lj_ewald;
939     __m256d           one_half  = _mm256_set1_pd(0.5);
940     __m256d           minus_one = _mm256_set1_pd(-1.0);
941     __m128i          ewitab;
942     __m256d          ewtabscale,eweps,sh_ewald,ewrt,ewtabhalfspace,ewtabF,ewtabFn,ewtabD,ewtabV;
943     __m256d          beta,beta2,beta3,zeta2,pmecorrF,pmecorrV,rinv3;
944     real             *ewtab;
945     __m256d          dummy_mask,cutoff_mask;
946     __m128           tmpmask0,tmpmask1;
947     __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
948     __m256d          one     = _mm256_set1_pd(1.0);
949     __m256d          two     = _mm256_set1_pd(2.0);
950     x                = xx[0];
951     f                = ff[0];
952
953     nri              = nlist->nri;
954     iinr             = nlist->iinr;
955     jindex           = nlist->jindex;
956     jjnr             = nlist->jjnr;
957     shiftidx         = nlist->shift;
958     gid              = nlist->gid;
959     shiftvec         = fr->shift_vec[0];
960     fshift           = fr->fshift[0];
961     facel            = _mm256_set1_pd(fr->epsfac);
962     charge           = mdatoms->chargeA;
963     nvdwtype         = fr->ntype;
964     vdwparam         = fr->nbfp;
965     vdwtype          = mdatoms->typeA;
966     vdwgridparam     = fr->ljpme_c6grid;
967     sh_lj_ewald      = _mm256_set1_pd(fr->ic->sh_lj_ewald);
968     ewclj            = _mm256_set1_pd(fr->ewaldcoeff_lj);
969     ewclj2           = _mm256_mul_pd(minus_one,_mm256_mul_pd(ewclj,ewclj));
970
971     sh_ewald         = _mm256_set1_pd(fr->ic->sh_ewald);
972     beta             = _mm256_set1_pd(fr->ic->ewaldcoeff_q);
973     beta2            = _mm256_mul_pd(beta,beta);
974     beta3            = _mm256_mul_pd(beta,beta2);
975
976     ewtab            = fr->ic->tabq_coul_F;
977     ewtabscale       = _mm256_set1_pd(fr->ic->tabq_scale);
978     ewtabhalfspace   = _mm256_set1_pd(0.5/fr->ic->tabq_scale);
979
980     /* Setup water-specific parameters */
981     inr              = nlist->iinr[0];
982     iq1              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+1]));
983     iq2              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+2]));
984     iq3              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+3]));
985     vdwioffsetptr0   = vdwparam+2*nvdwtype*vdwtype[inr+0];
986     vdwgridioffsetptr0 = vdwgridparam+2*nvdwtype*vdwtype[inr+0];
987
988     /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
989     rcutoff_scalar   = fr->rcoulomb;
990     rcutoff          = _mm256_set1_pd(rcutoff_scalar);
991     rcutoff2         = _mm256_mul_pd(rcutoff,rcutoff);
992
993     sh_vdw_invrcut6  = _mm256_set1_pd(fr->ic->sh_invrc6);
994     rvdw             = _mm256_set1_pd(fr->rvdw);
995
996     /* Avoid stupid compiler warnings */
997     jnrA = jnrB = jnrC = jnrD = 0;
998     j_coord_offsetA = 0;
999     j_coord_offsetB = 0;
1000     j_coord_offsetC = 0;
1001     j_coord_offsetD = 0;
1002
1003     outeriter        = 0;
1004     inneriter        = 0;
1005
1006     for(iidx=0;iidx<4*DIM;iidx++)
1007     {
1008         scratch[iidx] = 0.0;
1009     }
1010
1011     /* Start outer loop over neighborlists */
1012     for(iidx=0; iidx<nri; iidx++)
1013     {
1014         /* Load shift vector for this list */
1015         i_shift_offset   = DIM*shiftidx[iidx];
1016
1017         /* Load limits for loop over neighbors */
1018         j_index_start    = jindex[iidx];
1019         j_index_end      = jindex[iidx+1];
1020
1021         /* Get outer coordinate index */
1022         inr              = iinr[iidx];
1023         i_coord_offset   = DIM*inr;
1024
1025         /* Load i particle coords and add shift vector */
1026         gmx_mm256_load_shift_and_4rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset,
1027                                                     &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2,&ix3,&iy3,&iz3);
1028
1029         fix0             = _mm256_setzero_pd();
1030         fiy0             = _mm256_setzero_pd();
1031         fiz0             = _mm256_setzero_pd();
1032         fix1             = _mm256_setzero_pd();
1033         fiy1             = _mm256_setzero_pd();
1034         fiz1             = _mm256_setzero_pd();
1035         fix2             = _mm256_setzero_pd();
1036         fiy2             = _mm256_setzero_pd();
1037         fiz2             = _mm256_setzero_pd();
1038         fix3             = _mm256_setzero_pd();
1039         fiy3             = _mm256_setzero_pd();
1040         fiz3             = _mm256_setzero_pd();
1041
1042         /* Start inner kernel loop */
1043         for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
1044         {
1045
1046             /* Get j neighbor index, and coordinate index */
1047             jnrA             = jjnr[jidx];
1048             jnrB             = jjnr[jidx+1];
1049             jnrC             = jjnr[jidx+2];
1050             jnrD             = jjnr[jidx+3];
1051             j_coord_offsetA  = DIM*jnrA;
1052             j_coord_offsetB  = DIM*jnrB;
1053             j_coord_offsetC  = DIM*jnrC;
1054             j_coord_offsetD  = DIM*jnrD;
1055
1056             /* load j atom coordinates */
1057             gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
1058                                                  x+j_coord_offsetC,x+j_coord_offsetD,
1059                                                  &jx0,&jy0,&jz0);
1060
1061             /* Calculate displacement vector */
1062             dx00             = _mm256_sub_pd(ix0,jx0);
1063             dy00             = _mm256_sub_pd(iy0,jy0);
1064             dz00             = _mm256_sub_pd(iz0,jz0);
1065             dx10             = _mm256_sub_pd(ix1,jx0);
1066             dy10             = _mm256_sub_pd(iy1,jy0);
1067             dz10             = _mm256_sub_pd(iz1,jz0);
1068             dx20             = _mm256_sub_pd(ix2,jx0);
1069             dy20             = _mm256_sub_pd(iy2,jy0);
1070             dz20             = _mm256_sub_pd(iz2,jz0);
1071             dx30             = _mm256_sub_pd(ix3,jx0);
1072             dy30             = _mm256_sub_pd(iy3,jy0);
1073             dz30             = _mm256_sub_pd(iz3,jz0);
1074
1075             /* Calculate squared distance and things based on it */
1076             rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
1077             rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
1078             rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
1079             rsq30            = gmx_mm256_calc_rsq_pd(dx30,dy30,dz30);
1080
1081             rinv00           = gmx_mm256_invsqrt_pd(rsq00);
1082             rinv10           = gmx_mm256_invsqrt_pd(rsq10);
1083             rinv20           = gmx_mm256_invsqrt_pd(rsq20);
1084             rinv30           = gmx_mm256_invsqrt_pd(rsq30);
1085
1086             rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
1087             rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
1088             rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
1089             rinvsq30         = _mm256_mul_pd(rinv30,rinv30);
1090
1091             /* Load parameters for j particles */
1092             jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
1093                                                                  charge+jnrC+0,charge+jnrD+0);
1094             vdwjidx0A        = 2*vdwtype[jnrA+0];
1095             vdwjidx0B        = 2*vdwtype[jnrB+0];
1096             vdwjidx0C        = 2*vdwtype[jnrC+0];
1097             vdwjidx0D        = 2*vdwtype[jnrD+0];
1098
1099             fjx0             = _mm256_setzero_pd();
1100             fjy0             = _mm256_setzero_pd();
1101             fjz0             = _mm256_setzero_pd();
1102
1103             /**************************
1104              * CALCULATE INTERACTIONS *
1105              **************************/
1106
1107             if (gmx_mm256_any_lt(rsq00,rcutoff2))
1108             {
1109
1110             r00              = _mm256_mul_pd(rsq00,rinv00);
1111
1112             /* Compute parameters for interactions between i and j atoms */
1113             gmx_mm256_load_4pair_swizzle_pd(vdwioffsetptr0+vdwjidx0A,
1114                                             vdwioffsetptr0+vdwjidx0B,
1115                                             vdwioffsetptr0+vdwjidx0C,
1116                                             vdwioffsetptr0+vdwjidx0D,
1117                                             &c6_00,&c12_00);
1118
1119             c6grid_00       = gmx_mm256_load_4real_swizzle_pd(vdwgridioffsetptr0+vdwjidx0A,
1120                                                                   vdwgridioffsetptr0+vdwjidx0B,
1121                                                                   vdwgridioffsetptr0+vdwjidx0C,
1122                                                                   vdwgridioffsetptr0+vdwjidx0D);
1123
1124             /* Analytical LJ-PME */
1125             rinvsix          = _mm256_mul_pd(_mm256_mul_pd(rinvsq00,rinvsq00),rinvsq00);
1126             ewcljrsq         = _mm256_mul_pd(ewclj2,rsq00);
1127             ewclj6           = _mm256_mul_pd(ewclj2,_mm256_mul_pd(ewclj2,ewclj2));
1128             exponent         = gmx_simd_exp_d(ewcljrsq);
1129             /* poly = exp(-(beta*r)^2) * (1 + (beta*r)^2 + (beta*r)^4 /2) */
1130             poly             = _mm256_mul_pd(exponent,_mm256_add_pd(_mm256_sub_pd(one,ewcljrsq),_mm256_mul_pd(_mm256_mul_pd(ewcljrsq,ewcljrsq),one_half)));
1131             /* f6A = 6 * C6grid * (1 - poly) */
1132             f6A              = _mm256_mul_pd(c6grid_00,_mm256_sub_pd(one,poly));
1133             /* f6B = C6grid * exponent * beta^6 */
1134             f6B              = _mm256_mul_pd(_mm256_mul_pd(c6grid_00,one_sixth),_mm256_mul_pd(exponent,ewclj6));
1135             /* fvdw = 12*C12/r13 - ((6*C6 - f6A)/r6 + f6B)/r */
1136             fvdw              = _mm256_mul_pd(_mm256_add_pd(_mm256_mul_pd(_mm256_sub_pd(_mm256_mul_pd(c12_00,rinvsix),_mm256_sub_pd(c6_00,f6A)),rinvsix),f6B),rinvsq00);
1137
1138             cutoff_mask      = _mm256_cmp_pd(rsq00,rcutoff2,_CMP_LT_OQ);
1139
1140             fscal            = fvdw;
1141
1142             fscal            = _mm256_and_pd(fscal,cutoff_mask);
1143
1144             /* Calculate temporary vectorial force */
1145             tx               = _mm256_mul_pd(fscal,dx00);
1146             ty               = _mm256_mul_pd(fscal,dy00);
1147             tz               = _mm256_mul_pd(fscal,dz00);
1148
1149             /* Update vectorial force */
1150             fix0             = _mm256_add_pd(fix0,tx);
1151             fiy0             = _mm256_add_pd(fiy0,ty);
1152             fiz0             = _mm256_add_pd(fiz0,tz);
1153
1154             fjx0             = _mm256_add_pd(fjx0,tx);
1155             fjy0             = _mm256_add_pd(fjy0,ty);
1156             fjz0             = _mm256_add_pd(fjz0,tz);
1157
1158             }
1159
1160             /**************************
1161              * CALCULATE INTERACTIONS *
1162              **************************/
1163
1164             if (gmx_mm256_any_lt(rsq10,rcutoff2))
1165             {
1166
1167             r10              = _mm256_mul_pd(rsq10,rinv10);
1168
1169             /* Compute parameters for interactions between i and j atoms */
1170             qq10             = _mm256_mul_pd(iq1,jq0);
1171
1172             /* EWALD ELECTROSTATICS */
1173
1174             /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
1175             ewrt             = _mm256_mul_pd(r10,ewtabscale);
1176             ewitab           = _mm256_cvttpd_epi32(ewrt);
1177             eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
1178             gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
1179                                             ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
1180                                             &ewtabF,&ewtabFn);
1181             felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
1182             felec            = _mm256_mul_pd(_mm256_mul_pd(qq10,rinv10),_mm256_sub_pd(rinvsq10,felec));
1183
1184             cutoff_mask      = _mm256_cmp_pd(rsq10,rcutoff2,_CMP_LT_OQ);
1185
1186             fscal            = felec;
1187
1188             fscal            = _mm256_and_pd(fscal,cutoff_mask);
1189
1190             /* Calculate temporary vectorial force */
1191             tx               = _mm256_mul_pd(fscal,dx10);
1192             ty               = _mm256_mul_pd(fscal,dy10);
1193             tz               = _mm256_mul_pd(fscal,dz10);
1194
1195             /* Update vectorial force */
1196             fix1             = _mm256_add_pd(fix1,tx);
1197             fiy1             = _mm256_add_pd(fiy1,ty);
1198             fiz1             = _mm256_add_pd(fiz1,tz);
1199
1200             fjx0             = _mm256_add_pd(fjx0,tx);
1201             fjy0             = _mm256_add_pd(fjy0,ty);
1202             fjz0             = _mm256_add_pd(fjz0,tz);
1203
1204             }
1205
1206             /**************************
1207              * CALCULATE INTERACTIONS *
1208              **************************/
1209
1210             if (gmx_mm256_any_lt(rsq20,rcutoff2))
1211             {
1212
1213             r20              = _mm256_mul_pd(rsq20,rinv20);
1214
1215             /* Compute parameters for interactions between i and j atoms */
1216             qq20             = _mm256_mul_pd(iq2,jq0);
1217
1218             /* EWALD ELECTROSTATICS */
1219
1220             /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
1221             ewrt             = _mm256_mul_pd(r20,ewtabscale);
1222             ewitab           = _mm256_cvttpd_epi32(ewrt);
1223             eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
1224             gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
1225                                             ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
1226                                             &ewtabF,&ewtabFn);
1227             felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
1228             felec            = _mm256_mul_pd(_mm256_mul_pd(qq20,rinv20),_mm256_sub_pd(rinvsq20,felec));
1229
1230             cutoff_mask      = _mm256_cmp_pd(rsq20,rcutoff2,_CMP_LT_OQ);
1231
1232             fscal            = felec;
1233
1234             fscal            = _mm256_and_pd(fscal,cutoff_mask);
1235
1236             /* Calculate temporary vectorial force */
1237             tx               = _mm256_mul_pd(fscal,dx20);
1238             ty               = _mm256_mul_pd(fscal,dy20);
1239             tz               = _mm256_mul_pd(fscal,dz20);
1240
1241             /* Update vectorial force */
1242             fix2             = _mm256_add_pd(fix2,tx);
1243             fiy2             = _mm256_add_pd(fiy2,ty);
1244             fiz2             = _mm256_add_pd(fiz2,tz);
1245
1246             fjx0             = _mm256_add_pd(fjx0,tx);
1247             fjy0             = _mm256_add_pd(fjy0,ty);
1248             fjz0             = _mm256_add_pd(fjz0,tz);
1249
1250             }
1251
1252             /**************************
1253              * CALCULATE INTERACTIONS *
1254              **************************/
1255
1256             if (gmx_mm256_any_lt(rsq30,rcutoff2))
1257             {
1258
1259             r30              = _mm256_mul_pd(rsq30,rinv30);
1260
1261             /* Compute parameters for interactions between i and j atoms */
1262             qq30             = _mm256_mul_pd(iq3,jq0);
1263
1264             /* EWALD ELECTROSTATICS */
1265
1266             /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
1267             ewrt             = _mm256_mul_pd(r30,ewtabscale);
1268             ewitab           = _mm256_cvttpd_epi32(ewrt);
1269             eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
1270             gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
1271                                             ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
1272                                             &ewtabF,&ewtabFn);
1273             felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
1274             felec            = _mm256_mul_pd(_mm256_mul_pd(qq30,rinv30),_mm256_sub_pd(rinvsq30,felec));
1275
1276             cutoff_mask      = _mm256_cmp_pd(rsq30,rcutoff2,_CMP_LT_OQ);
1277
1278             fscal            = felec;
1279
1280             fscal            = _mm256_and_pd(fscal,cutoff_mask);
1281
1282             /* Calculate temporary vectorial force */
1283             tx               = _mm256_mul_pd(fscal,dx30);
1284             ty               = _mm256_mul_pd(fscal,dy30);
1285             tz               = _mm256_mul_pd(fscal,dz30);
1286
1287             /* Update vectorial force */
1288             fix3             = _mm256_add_pd(fix3,tx);
1289             fiy3             = _mm256_add_pd(fiy3,ty);
1290             fiz3             = _mm256_add_pd(fiz3,tz);
1291
1292             fjx0             = _mm256_add_pd(fjx0,tx);
1293             fjy0             = _mm256_add_pd(fjy0,ty);
1294             fjz0             = _mm256_add_pd(fjz0,tz);
1295
1296             }
1297
1298             fjptrA             = f+j_coord_offsetA;
1299             fjptrB             = f+j_coord_offsetB;
1300             fjptrC             = f+j_coord_offsetC;
1301             fjptrD             = f+j_coord_offsetD;
1302
1303             gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,fjx0,fjy0,fjz0);
1304
1305             /* Inner loop uses 169 flops */
1306         }
1307
1308         if(jidx<j_index_end)
1309         {
1310
1311             /* Get j neighbor index, and coordinate index */
1312             jnrlistA         = jjnr[jidx];
1313             jnrlistB         = jjnr[jidx+1];
1314             jnrlistC         = jjnr[jidx+2];
1315             jnrlistD         = jjnr[jidx+3];
1316             /* Sign of each element will be negative for non-real atoms.
1317              * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
1318              * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
1319              */
1320             tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
1321
1322             tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
1323             tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
1324             dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
1325
1326             jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
1327             jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
1328             jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
1329             jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
1330             j_coord_offsetA  = DIM*jnrA;
1331             j_coord_offsetB  = DIM*jnrB;
1332             j_coord_offsetC  = DIM*jnrC;
1333             j_coord_offsetD  = DIM*jnrD;
1334
1335             /* load j atom coordinates */
1336             gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
1337                                                  x+j_coord_offsetC,x+j_coord_offsetD,
1338                                                  &jx0,&jy0,&jz0);
1339
1340             /* Calculate displacement vector */
1341             dx00             = _mm256_sub_pd(ix0,jx0);
1342             dy00             = _mm256_sub_pd(iy0,jy0);
1343             dz00             = _mm256_sub_pd(iz0,jz0);
1344             dx10             = _mm256_sub_pd(ix1,jx0);
1345             dy10             = _mm256_sub_pd(iy1,jy0);
1346             dz10             = _mm256_sub_pd(iz1,jz0);
1347             dx20             = _mm256_sub_pd(ix2,jx0);
1348             dy20             = _mm256_sub_pd(iy2,jy0);
1349             dz20             = _mm256_sub_pd(iz2,jz0);
1350             dx30             = _mm256_sub_pd(ix3,jx0);
1351             dy30             = _mm256_sub_pd(iy3,jy0);
1352             dz30             = _mm256_sub_pd(iz3,jz0);
1353
1354             /* Calculate squared distance and things based on it */
1355             rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
1356             rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
1357             rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
1358             rsq30            = gmx_mm256_calc_rsq_pd(dx30,dy30,dz30);
1359
1360             rinv00           = gmx_mm256_invsqrt_pd(rsq00);
1361             rinv10           = gmx_mm256_invsqrt_pd(rsq10);
1362             rinv20           = gmx_mm256_invsqrt_pd(rsq20);
1363             rinv30           = gmx_mm256_invsqrt_pd(rsq30);
1364
1365             rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
1366             rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
1367             rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
1368             rinvsq30         = _mm256_mul_pd(rinv30,rinv30);
1369
1370             /* Load parameters for j particles */
1371             jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
1372                                                                  charge+jnrC+0,charge+jnrD+0);
1373             vdwjidx0A        = 2*vdwtype[jnrA+0];
1374             vdwjidx0B        = 2*vdwtype[jnrB+0];
1375             vdwjidx0C        = 2*vdwtype[jnrC+0];
1376             vdwjidx0D        = 2*vdwtype[jnrD+0];
1377
1378             fjx0             = _mm256_setzero_pd();
1379             fjy0             = _mm256_setzero_pd();
1380             fjz0             = _mm256_setzero_pd();
1381
1382             /**************************
1383              * CALCULATE INTERACTIONS *
1384              **************************/
1385
1386             if (gmx_mm256_any_lt(rsq00,rcutoff2))
1387             {
1388
1389             r00              = _mm256_mul_pd(rsq00,rinv00);
1390             r00              = _mm256_andnot_pd(dummy_mask,r00);
1391
1392             /* Compute parameters for interactions between i and j atoms */
1393             gmx_mm256_load_4pair_swizzle_pd(vdwioffsetptr0+vdwjidx0A,
1394                                             vdwioffsetptr0+vdwjidx0B,
1395                                             vdwioffsetptr0+vdwjidx0C,
1396                                             vdwioffsetptr0+vdwjidx0D,
1397                                             &c6_00,&c12_00);
1398
1399             c6grid_00       = gmx_mm256_load_4real_swizzle_pd(vdwgridioffsetptr0+vdwjidx0A,
1400                                                                   vdwgridioffsetptr0+vdwjidx0B,
1401                                                                   vdwgridioffsetptr0+vdwjidx0C,
1402                                                                   vdwgridioffsetptr0+vdwjidx0D);
1403
1404             /* Analytical LJ-PME */
1405             rinvsix          = _mm256_mul_pd(_mm256_mul_pd(rinvsq00,rinvsq00),rinvsq00);
1406             ewcljrsq         = _mm256_mul_pd(ewclj2,rsq00);
1407             ewclj6           = _mm256_mul_pd(ewclj2,_mm256_mul_pd(ewclj2,ewclj2));
1408             exponent         = gmx_simd_exp_d(ewcljrsq);
1409             /* poly = exp(-(beta*r)^2) * (1 + (beta*r)^2 + (beta*r)^4 /2) */
1410             poly             = _mm256_mul_pd(exponent,_mm256_add_pd(_mm256_sub_pd(one,ewcljrsq),_mm256_mul_pd(_mm256_mul_pd(ewcljrsq,ewcljrsq),one_half)));
1411             /* f6A = 6 * C6grid * (1 - poly) */
1412             f6A              = _mm256_mul_pd(c6grid_00,_mm256_sub_pd(one,poly));
1413             /* f6B = C6grid * exponent * beta^6 */
1414             f6B              = _mm256_mul_pd(_mm256_mul_pd(c6grid_00,one_sixth),_mm256_mul_pd(exponent,ewclj6));
1415             /* fvdw = 12*C12/r13 - ((6*C6 - f6A)/r6 + f6B)/r */
1416             fvdw              = _mm256_mul_pd(_mm256_add_pd(_mm256_mul_pd(_mm256_sub_pd(_mm256_mul_pd(c12_00,rinvsix),_mm256_sub_pd(c6_00,f6A)),rinvsix),f6B),rinvsq00);
1417
1418             cutoff_mask      = _mm256_cmp_pd(rsq00,rcutoff2,_CMP_LT_OQ);
1419
1420             fscal            = fvdw;
1421
1422             fscal            = _mm256_and_pd(fscal,cutoff_mask);
1423
1424             fscal            = _mm256_andnot_pd(dummy_mask,fscal);
1425
1426             /* Calculate temporary vectorial force */
1427             tx               = _mm256_mul_pd(fscal,dx00);
1428             ty               = _mm256_mul_pd(fscal,dy00);
1429             tz               = _mm256_mul_pd(fscal,dz00);
1430
1431             /* Update vectorial force */
1432             fix0             = _mm256_add_pd(fix0,tx);
1433             fiy0             = _mm256_add_pd(fiy0,ty);
1434             fiz0             = _mm256_add_pd(fiz0,tz);
1435
1436             fjx0             = _mm256_add_pd(fjx0,tx);
1437             fjy0             = _mm256_add_pd(fjy0,ty);
1438             fjz0             = _mm256_add_pd(fjz0,tz);
1439
1440             }
1441
1442             /**************************
1443              * CALCULATE INTERACTIONS *
1444              **************************/
1445
1446             if (gmx_mm256_any_lt(rsq10,rcutoff2))
1447             {
1448
1449             r10              = _mm256_mul_pd(rsq10,rinv10);
1450             r10              = _mm256_andnot_pd(dummy_mask,r10);
1451
1452             /* Compute parameters for interactions between i and j atoms */
1453             qq10             = _mm256_mul_pd(iq1,jq0);
1454
1455             /* EWALD ELECTROSTATICS */
1456
1457             /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
1458             ewrt             = _mm256_mul_pd(r10,ewtabscale);
1459             ewitab           = _mm256_cvttpd_epi32(ewrt);
1460             eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
1461             gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
1462                                             ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
1463                                             &ewtabF,&ewtabFn);
1464             felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
1465             felec            = _mm256_mul_pd(_mm256_mul_pd(qq10,rinv10),_mm256_sub_pd(rinvsq10,felec));
1466
1467             cutoff_mask      = _mm256_cmp_pd(rsq10,rcutoff2,_CMP_LT_OQ);
1468
1469             fscal            = felec;
1470
1471             fscal            = _mm256_and_pd(fscal,cutoff_mask);
1472
1473             fscal            = _mm256_andnot_pd(dummy_mask,fscal);
1474
1475             /* Calculate temporary vectorial force */
1476             tx               = _mm256_mul_pd(fscal,dx10);
1477             ty               = _mm256_mul_pd(fscal,dy10);
1478             tz               = _mm256_mul_pd(fscal,dz10);
1479
1480             /* Update vectorial force */
1481             fix1             = _mm256_add_pd(fix1,tx);
1482             fiy1             = _mm256_add_pd(fiy1,ty);
1483             fiz1             = _mm256_add_pd(fiz1,tz);
1484
1485             fjx0             = _mm256_add_pd(fjx0,tx);
1486             fjy0             = _mm256_add_pd(fjy0,ty);
1487             fjz0             = _mm256_add_pd(fjz0,tz);
1488
1489             }
1490
1491             /**************************
1492              * CALCULATE INTERACTIONS *
1493              **************************/
1494
1495             if (gmx_mm256_any_lt(rsq20,rcutoff2))
1496             {
1497
1498             r20              = _mm256_mul_pd(rsq20,rinv20);
1499             r20              = _mm256_andnot_pd(dummy_mask,r20);
1500
1501             /* Compute parameters for interactions between i and j atoms */
1502             qq20             = _mm256_mul_pd(iq2,jq0);
1503
1504             /* EWALD ELECTROSTATICS */
1505
1506             /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
1507             ewrt             = _mm256_mul_pd(r20,ewtabscale);
1508             ewitab           = _mm256_cvttpd_epi32(ewrt);
1509             eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
1510             gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
1511                                             ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
1512                                             &ewtabF,&ewtabFn);
1513             felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
1514             felec            = _mm256_mul_pd(_mm256_mul_pd(qq20,rinv20),_mm256_sub_pd(rinvsq20,felec));
1515
1516             cutoff_mask      = _mm256_cmp_pd(rsq20,rcutoff2,_CMP_LT_OQ);
1517
1518             fscal            = felec;
1519
1520             fscal            = _mm256_and_pd(fscal,cutoff_mask);
1521
1522             fscal            = _mm256_andnot_pd(dummy_mask,fscal);
1523
1524             /* Calculate temporary vectorial force */
1525             tx               = _mm256_mul_pd(fscal,dx20);
1526             ty               = _mm256_mul_pd(fscal,dy20);
1527             tz               = _mm256_mul_pd(fscal,dz20);
1528
1529             /* Update vectorial force */
1530             fix2             = _mm256_add_pd(fix2,tx);
1531             fiy2             = _mm256_add_pd(fiy2,ty);
1532             fiz2             = _mm256_add_pd(fiz2,tz);
1533
1534             fjx0             = _mm256_add_pd(fjx0,tx);
1535             fjy0             = _mm256_add_pd(fjy0,ty);
1536             fjz0             = _mm256_add_pd(fjz0,tz);
1537
1538             }
1539
1540             /**************************
1541              * CALCULATE INTERACTIONS *
1542              **************************/
1543
1544             if (gmx_mm256_any_lt(rsq30,rcutoff2))
1545             {
1546
1547             r30              = _mm256_mul_pd(rsq30,rinv30);
1548             r30              = _mm256_andnot_pd(dummy_mask,r30);
1549
1550             /* Compute parameters for interactions between i and j atoms */
1551             qq30             = _mm256_mul_pd(iq3,jq0);
1552
1553             /* EWALD ELECTROSTATICS */
1554
1555             /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
1556             ewrt             = _mm256_mul_pd(r30,ewtabscale);
1557             ewitab           = _mm256_cvttpd_epi32(ewrt);
1558             eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
1559             gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
1560                                             ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
1561                                             &ewtabF,&ewtabFn);
1562             felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
1563             felec            = _mm256_mul_pd(_mm256_mul_pd(qq30,rinv30),_mm256_sub_pd(rinvsq30,felec));
1564
1565             cutoff_mask      = _mm256_cmp_pd(rsq30,rcutoff2,_CMP_LT_OQ);
1566
1567             fscal            = felec;
1568
1569             fscal            = _mm256_and_pd(fscal,cutoff_mask);
1570
1571             fscal            = _mm256_andnot_pd(dummy_mask,fscal);
1572
1573             /* Calculate temporary vectorial force */
1574             tx               = _mm256_mul_pd(fscal,dx30);
1575             ty               = _mm256_mul_pd(fscal,dy30);
1576             tz               = _mm256_mul_pd(fscal,dz30);
1577
1578             /* Update vectorial force */
1579             fix3             = _mm256_add_pd(fix3,tx);
1580             fiy3             = _mm256_add_pd(fiy3,ty);
1581             fiz3             = _mm256_add_pd(fiz3,tz);
1582
1583             fjx0             = _mm256_add_pd(fjx0,tx);
1584             fjy0             = _mm256_add_pd(fjy0,ty);
1585             fjz0             = _mm256_add_pd(fjz0,tz);
1586
1587             }
1588
1589             fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
1590             fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
1591             fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
1592             fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
1593
1594             gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,fjx0,fjy0,fjz0);
1595
1596             /* Inner loop uses 173 flops */
1597         }
1598
1599         /* End of innermost loop */
1600
1601         gmx_mm256_update_iforce_4atom_swizzle_pd(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,fix3,fiy3,fiz3,
1602                                                  f+i_coord_offset,fshift+i_shift_offset);
1603
1604         /* Increment number of inner iterations */
1605         inneriter                  += j_index_end - j_index_start;
1606
1607         /* Outer loop uses 24 flops */
1608     }
1609
1610     /* Increment number of outer iterations */
1611     outeriter        += nri;
1612
1613     /* Update outer/inner flops */
1614
1615     inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W4_F,outeriter*24 + inneriter*173);
1616 }