Merge release-4-6 into master
[alexxy/gromacs.git] / src / gromacs / gmxlib / nonbonded / nb_kernel_avx_256_double / nb_kernel_ElecCoul_VdwCSTab_GeomP1P1_avx_256_double.c
1 /*
2  * Note: this file was generated by the Gromacs avx_256_double kernel generator.
3  *
4  *                This source code is part of
5  *
6  *                 G   R   O   M   A   C   S
7  *
8  * Copyright (c) 2001-2012, The GROMACS Development Team
9  *
10  * Gromacs is a library for molecular simulation and trajectory analysis,
11  * written by Erik Lindahl, David van der Spoel, Berk Hess, and others - for
12  * a full list of developers and information, check out http://www.gromacs.org
13  *
14  * This program is free software; you can redistribute it and/or modify it under
15  * the terms of the GNU Lesser General Public License as published by the Free
16  * Software Foundation; either version 2 of the License, or (at your option) any
17  * later version.
18  *
19  * To help fund GROMACS development, we humbly ask that you cite
20  * the papers people have written on it - you can find them on the website.
21  */
22 #ifdef HAVE_CONFIG_H
23 #include <config.h>
24 #endif
25
26 #include <math.h>
27
28 #include "../nb_kernel.h"
29 #include "types/simple.h"
30 #include "vec.h"
31 #include "nrnb.h"
32
33 #include "gmx_math_x86_avx_256_double.h"
34 #include "kernelutil_x86_avx_256_double.h"
35
36 /*
37  * Gromacs nonbonded kernel:   nb_kernel_ElecCoul_VdwCSTab_GeomP1P1_VF_avx_256_double
38  * Electrostatics interaction: Coulomb
39  * VdW interaction:            CubicSplineTable
40  * Geometry:                   Particle-Particle
41  * Calculate force/pot:        PotentialAndForce
42  */
43 void
44 nb_kernel_ElecCoul_VdwCSTab_GeomP1P1_VF_avx_256_double
45                     (t_nblist * gmx_restrict                nlist,
46                      rvec * gmx_restrict                    xx,
47                      rvec * gmx_restrict                    ff,
48                      t_forcerec * gmx_restrict              fr,
49                      t_mdatoms * gmx_restrict               mdatoms,
50                      nb_kernel_data_t * gmx_restrict        kernel_data,
51                      t_nrnb * gmx_restrict                  nrnb)
52 {
53     /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
54      * just 0 for non-waters.
55      * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
56      * jnr indices corresponding to data put in the four positions in the SIMD register.
57      */
58     int              i_shift_offset,i_coord_offset,outeriter,inneriter;
59     int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
60     int              jnrA,jnrB,jnrC,jnrD;
61     int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
62     int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
63     int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
64     int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
65     real             rcutoff_scalar;
66     real             *shiftvec,*fshift,*x,*f;
67     real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
68     real             scratch[4*DIM];
69     __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
70     real *           vdwioffsetptr0;
71     __m256d          ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
72     int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
73     __m256d          jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
74     __m256d          dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
75     __m256d          velec,felec,velecsum,facel,crf,krf,krf2;
76     real             *charge;
77     int              nvdwtype;
78     __m256d          rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
79     int              *vdwtype;
80     real             *vdwparam;
81     __m256d          one_sixth   = _mm256_set1_pd(1.0/6.0);
82     __m256d          one_twelfth = _mm256_set1_pd(1.0/12.0);
83     __m128i          vfitab;
84     __m128i          ifour       = _mm_set1_epi32(4);
85     __m256d          rt,vfeps,vftabscale,Y,F,G,H,Heps,Fp,VV,FF;
86     real             *vftab;
87     __m256d          dummy_mask,cutoff_mask;
88     __m128           tmpmask0,tmpmask1;
89     __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
90     __m256d          one     = _mm256_set1_pd(1.0);
91     __m256d          two     = _mm256_set1_pd(2.0);
92     x                = xx[0];
93     f                = ff[0];
94
95     nri              = nlist->nri;
96     iinr             = nlist->iinr;
97     jindex           = nlist->jindex;
98     jjnr             = nlist->jjnr;
99     shiftidx         = nlist->shift;
100     gid              = nlist->gid;
101     shiftvec         = fr->shift_vec[0];
102     fshift           = fr->fshift[0];
103     facel            = _mm256_set1_pd(fr->epsfac);
104     charge           = mdatoms->chargeA;
105     nvdwtype         = fr->ntype;
106     vdwparam         = fr->nbfp;
107     vdwtype          = mdatoms->typeA;
108
109     vftab            = kernel_data->table_vdw->data;
110     vftabscale       = _mm256_set1_pd(kernel_data->table_vdw->scale);
111
112     /* Avoid stupid compiler warnings */
113     jnrA = jnrB = jnrC = jnrD = 0;
114     j_coord_offsetA = 0;
115     j_coord_offsetB = 0;
116     j_coord_offsetC = 0;
117     j_coord_offsetD = 0;
118
119     outeriter        = 0;
120     inneriter        = 0;
121
122     for(iidx=0;iidx<4*DIM;iidx++)
123     {
124         scratch[iidx] = 0.0;
125     }
126
127     /* Start outer loop over neighborlists */
128     for(iidx=0; iidx<nri; iidx++)
129     {
130         /* Load shift vector for this list */
131         i_shift_offset   = DIM*shiftidx[iidx];
132
133         /* Load limits for loop over neighbors */
134         j_index_start    = jindex[iidx];
135         j_index_end      = jindex[iidx+1];
136
137         /* Get outer coordinate index */
138         inr              = iinr[iidx];
139         i_coord_offset   = DIM*inr;
140
141         /* Load i particle coords and add shift vector */
142         gmx_mm256_load_shift_and_1rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset,&ix0,&iy0,&iz0);
143
144         fix0             = _mm256_setzero_pd();
145         fiy0             = _mm256_setzero_pd();
146         fiz0             = _mm256_setzero_pd();
147
148         /* Load parameters for i particles */
149         iq0              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+0]));
150         vdwioffsetptr0   = vdwparam+2*nvdwtype*vdwtype[inr+0];
151
152         /* Reset potential sums */
153         velecsum         = _mm256_setzero_pd();
154         vvdwsum          = _mm256_setzero_pd();
155
156         /* Start inner kernel loop */
157         for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
158         {
159
160             /* Get j neighbor index, and coordinate index */
161             jnrA             = jjnr[jidx];
162             jnrB             = jjnr[jidx+1];
163             jnrC             = jjnr[jidx+2];
164             jnrD             = jjnr[jidx+3];
165             j_coord_offsetA  = DIM*jnrA;
166             j_coord_offsetB  = DIM*jnrB;
167             j_coord_offsetC  = DIM*jnrC;
168             j_coord_offsetD  = DIM*jnrD;
169
170             /* load j atom coordinates */
171             gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
172                                                  x+j_coord_offsetC,x+j_coord_offsetD,
173                                                  &jx0,&jy0,&jz0);
174
175             /* Calculate displacement vector */
176             dx00             = _mm256_sub_pd(ix0,jx0);
177             dy00             = _mm256_sub_pd(iy0,jy0);
178             dz00             = _mm256_sub_pd(iz0,jz0);
179
180             /* Calculate squared distance and things based on it */
181             rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
182
183             rinv00           = gmx_mm256_invsqrt_pd(rsq00);
184
185             rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
186
187             /* Load parameters for j particles */
188             jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
189                                                                  charge+jnrC+0,charge+jnrD+0);
190             vdwjidx0A        = 2*vdwtype[jnrA+0];
191             vdwjidx0B        = 2*vdwtype[jnrB+0];
192             vdwjidx0C        = 2*vdwtype[jnrC+0];
193             vdwjidx0D        = 2*vdwtype[jnrD+0];
194
195             /**************************
196              * CALCULATE INTERACTIONS *
197              **************************/
198
199             r00              = _mm256_mul_pd(rsq00,rinv00);
200
201             /* Compute parameters for interactions between i and j atoms */
202             qq00             = _mm256_mul_pd(iq0,jq0);
203             gmx_mm256_load_4pair_swizzle_pd(vdwioffsetptr0+vdwjidx0A,
204                                             vdwioffsetptr0+vdwjidx0B,
205                                             vdwioffsetptr0+vdwjidx0C,
206                                             vdwioffsetptr0+vdwjidx0D,
207                                             &c6_00,&c12_00);
208
209             /* Calculate table index by multiplying r with table scale and truncate to integer */
210             rt               = _mm256_mul_pd(r00,vftabscale);
211             vfitab           = _mm256_cvttpd_epi32(rt);
212             vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
213             vfitab           = _mm_slli_epi32(vfitab,3);
214
215             /* COULOMB ELECTROSTATICS */
216             velec            = _mm256_mul_pd(qq00,rinv00);
217             felec            = _mm256_mul_pd(velec,rinvsq00);
218
219             /* CUBIC SPLINE TABLE DISPERSION */
220             Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
221             F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
222             G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
223             H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
224             GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
225             Heps             = _mm256_mul_pd(vfeps,H);
226             Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
227             VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
228             vvdw6            = _mm256_mul_pd(c6_00,VV);
229             FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
230             fvdw6            = _mm256_mul_pd(c6_00,FF);
231
232             /* CUBIC SPLINE TABLE REPULSION */
233             vfitab           = _mm_add_epi32(vfitab,ifour);
234             Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
235             F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
236             G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
237             H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
238             GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
239             Heps             = _mm256_mul_pd(vfeps,H);
240             Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
241             VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
242             vvdw12           = _mm256_mul_pd(c12_00,VV);
243             FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
244             fvdw12           = _mm256_mul_pd(c12_00,FF);
245             vvdw             = _mm256_add_pd(vvdw12,vvdw6);
246             fvdw             = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_add_pd(fvdw6,fvdw12),_mm256_mul_pd(vftabscale,rinv00)));
247
248             /* Update potential sum for this i atom from the interaction with this j atom. */
249             velecsum         = _mm256_add_pd(velecsum,velec);
250             vvdwsum          = _mm256_add_pd(vvdwsum,vvdw);
251
252             fscal            = _mm256_add_pd(felec,fvdw);
253
254             /* Calculate temporary vectorial force */
255             tx               = _mm256_mul_pd(fscal,dx00);
256             ty               = _mm256_mul_pd(fscal,dy00);
257             tz               = _mm256_mul_pd(fscal,dz00);
258
259             /* Update vectorial force */
260             fix0             = _mm256_add_pd(fix0,tx);
261             fiy0             = _mm256_add_pd(fiy0,ty);
262             fiz0             = _mm256_add_pd(fiz0,tz);
263
264             fjptrA             = f+j_coord_offsetA;
265             fjptrB             = f+j_coord_offsetB;
266             fjptrC             = f+j_coord_offsetC;
267             fjptrD             = f+j_coord_offsetD;
268             gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,tx,ty,tz);
269
270             /* Inner loop uses 62 flops */
271         }
272
273         if(jidx<j_index_end)
274         {
275
276             /* Get j neighbor index, and coordinate index */
277             jnrlistA         = jjnr[jidx];
278             jnrlistB         = jjnr[jidx+1];
279             jnrlistC         = jjnr[jidx+2];
280             jnrlistD         = jjnr[jidx+3];
281             /* Sign of each element will be negative for non-real atoms.
282              * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
283              * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
284              */
285             tmpmask0 = gmx_mm_castsi128_pd(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
286
287             tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
288             tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
289             dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
290
291             jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
292             jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
293             jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
294             jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
295             j_coord_offsetA  = DIM*jnrA;
296             j_coord_offsetB  = DIM*jnrB;
297             j_coord_offsetC  = DIM*jnrC;
298             j_coord_offsetD  = DIM*jnrD;
299
300             /* load j atom coordinates */
301             gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
302                                                  x+j_coord_offsetC,x+j_coord_offsetD,
303                                                  &jx0,&jy0,&jz0);
304
305             /* Calculate displacement vector */
306             dx00             = _mm256_sub_pd(ix0,jx0);
307             dy00             = _mm256_sub_pd(iy0,jy0);
308             dz00             = _mm256_sub_pd(iz0,jz0);
309
310             /* Calculate squared distance and things based on it */
311             rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
312
313             rinv00           = gmx_mm256_invsqrt_pd(rsq00);
314
315             rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
316
317             /* Load parameters for j particles */
318             jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
319                                                                  charge+jnrC+0,charge+jnrD+0);
320             vdwjidx0A        = 2*vdwtype[jnrA+0];
321             vdwjidx0B        = 2*vdwtype[jnrB+0];
322             vdwjidx0C        = 2*vdwtype[jnrC+0];
323             vdwjidx0D        = 2*vdwtype[jnrD+0];
324
325             /**************************
326              * CALCULATE INTERACTIONS *
327              **************************/
328
329             r00              = _mm256_mul_pd(rsq00,rinv00);
330             r00              = _mm256_andnot_pd(dummy_mask,r00);
331
332             /* Compute parameters for interactions between i and j atoms */
333             qq00             = _mm256_mul_pd(iq0,jq0);
334             gmx_mm256_load_4pair_swizzle_pd(vdwioffsetptr0+vdwjidx0A,
335                                             vdwioffsetptr0+vdwjidx0B,
336                                             vdwioffsetptr0+vdwjidx0C,
337                                             vdwioffsetptr0+vdwjidx0D,
338                                             &c6_00,&c12_00);
339
340             /* Calculate table index by multiplying r with table scale and truncate to integer */
341             rt               = _mm256_mul_pd(r00,vftabscale);
342             vfitab           = _mm256_cvttpd_epi32(rt);
343             vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
344             vfitab           = _mm_slli_epi32(vfitab,3);
345
346             /* COULOMB ELECTROSTATICS */
347             velec            = _mm256_mul_pd(qq00,rinv00);
348             felec            = _mm256_mul_pd(velec,rinvsq00);
349
350             /* CUBIC SPLINE TABLE DISPERSION */
351             Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
352             F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
353             G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
354             H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
355             GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
356             Heps             = _mm256_mul_pd(vfeps,H);
357             Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
358             VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
359             vvdw6            = _mm256_mul_pd(c6_00,VV);
360             FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
361             fvdw6            = _mm256_mul_pd(c6_00,FF);
362
363             /* CUBIC SPLINE TABLE REPULSION */
364             vfitab           = _mm_add_epi32(vfitab,ifour);
365             Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
366             F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
367             G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
368             H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
369             GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
370             Heps             = _mm256_mul_pd(vfeps,H);
371             Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
372             VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
373             vvdw12           = _mm256_mul_pd(c12_00,VV);
374             FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
375             fvdw12           = _mm256_mul_pd(c12_00,FF);
376             vvdw             = _mm256_add_pd(vvdw12,vvdw6);
377             fvdw             = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_add_pd(fvdw6,fvdw12),_mm256_mul_pd(vftabscale,rinv00)));
378
379             /* Update potential sum for this i atom from the interaction with this j atom. */
380             velec            = _mm256_andnot_pd(dummy_mask,velec);
381             velecsum         = _mm256_add_pd(velecsum,velec);
382             vvdw             = _mm256_andnot_pd(dummy_mask,vvdw);
383             vvdwsum          = _mm256_add_pd(vvdwsum,vvdw);
384
385             fscal            = _mm256_add_pd(felec,fvdw);
386
387             fscal            = _mm256_andnot_pd(dummy_mask,fscal);
388
389             /* Calculate temporary vectorial force */
390             tx               = _mm256_mul_pd(fscal,dx00);
391             ty               = _mm256_mul_pd(fscal,dy00);
392             tz               = _mm256_mul_pd(fscal,dz00);
393
394             /* Update vectorial force */
395             fix0             = _mm256_add_pd(fix0,tx);
396             fiy0             = _mm256_add_pd(fiy0,ty);
397             fiz0             = _mm256_add_pd(fiz0,tz);
398
399             fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
400             fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
401             fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
402             fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
403             gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,tx,ty,tz);
404
405             /* Inner loop uses 63 flops */
406         }
407
408         /* End of innermost loop */
409
410         gmx_mm256_update_iforce_1atom_swizzle_pd(fix0,fiy0,fiz0,
411                                                  f+i_coord_offset,fshift+i_shift_offset);
412
413         ggid                        = gid[iidx];
414         /* Update potential energies */
415         gmx_mm256_update_1pot_pd(velecsum,kernel_data->energygrp_elec+ggid);
416         gmx_mm256_update_1pot_pd(vvdwsum,kernel_data->energygrp_vdw+ggid);
417
418         /* Increment number of inner iterations */
419         inneriter                  += j_index_end - j_index_start;
420
421         /* Outer loop uses 9 flops */
422     }
423
424     /* Increment number of outer iterations */
425     outeriter        += nri;
426
427     /* Update outer/inner flops */
428
429     inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_VF,outeriter*9 + inneriter*63);
430 }
431 /*
432  * Gromacs nonbonded kernel:   nb_kernel_ElecCoul_VdwCSTab_GeomP1P1_F_avx_256_double
433  * Electrostatics interaction: Coulomb
434  * VdW interaction:            CubicSplineTable
435  * Geometry:                   Particle-Particle
436  * Calculate force/pot:        Force
437  */
438 void
439 nb_kernel_ElecCoul_VdwCSTab_GeomP1P1_F_avx_256_double
440                     (t_nblist * gmx_restrict                nlist,
441                      rvec * gmx_restrict                    xx,
442                      rvec * gmx_restrict                    ff,
443                      t_forcerec * gmx_restrict              fr,
444                      t_mdatoms * gmx_restrict               mdatoms,
445                      nb_kernel_data_t * gmx_restrict        kernel_data,
446                      t_nrnb * gmx_restrict                  nrnb)
447 {
448     /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
449      * just 0 for non-waters.
450      * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
451      * jnr indices corresponding to data put in the four positions in the SIMD register.
452      */
453     int              i_shift_offset,i_coord_offset,outeriter,inneriter;
454     int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
455     int              jnrA,jnrB,jnrC,jnrD;
456     int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
457     int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
458     int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
459     int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
460     real             rcutoff_scalar;
461     real             *shiftvec,*fshift,*x,*f;
462     real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
463     real             scratch[4*DIM];
464     __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
465     real *           vdwioffsetptr0;
466     __m256d          ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
467     int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
468     __m256d          jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
469     __m256d          dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
470     __m256d          velec,felec,velecsum,facel,crf,krf,krf2;
471     real             *charge;
472     int              nvdwtype;
473     __m256d          rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
474     int              *vdwtype;
475     real             *vdwparam;
476     __m256d          one_sixth   = _mm256_set1_pd(1.0/6.0);
477     __m256d          one_twelfth = _mm256_set1_pd(1.0/12.0);
478     __m128i          vfitab;
479     __m128i          ifour       = _mm_set1_epi32(4);
480     __m256d          rt,vfeps,vftabscale,Y,F,G,H,Heps,Fp,VV,FF;
481     real             *vftab;
482     __m256d          dummy_mask,cutoff_mask;
483     __m128           tmpmask0,tmpmask1;
484     __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
485     __m256d          one     = _mm256_set1_pd(1.0);
486     __m256d          two     = _mm256_set1_pd(2.0);
487     x                = xx[0];
488     f                = ff[0];
489
490     nri              = nlist->nri;
491     iinr             = nlist->iinr;
492     jindex           = nlist->jindex;
493     jjnr             = nlist->jjnr;
494     shiftidx         = nlist->shift;
495     gid              = nlist->gid;
496     shiftvec         = fr->shift_vec[0];
497     fshift           = fr->fshift[0];
498     facel            = _mm256_set1_pd(fr->epsfac);
499     charge           = mdatoms->chargeA;
500     nvdwtype         = fr->ntype;
501     vdwparam         = fr->nbfp;
502     vdwtype          = mdatoms->typeA;
503
504     vftab            = kernel_data->table_vdw->data;
505     vftabscale       = _mm256_set1_pd(kernel_data->table_vdw->scale);
506
507     /* Avoid stupid compiler warnings */
508     jnrA = jnrB = jnrC = jnrD = 0;
509     j_coord_offsetA = 0;
510     j_coord_offsetB = 0;
511     j_coord_offsetC = 0;
512     j_coord_offsetD = 0;
513
514     outeriter        = 0;
515     inneriter        = 0;
516
517     for(iidx=0;iidx<4*DIM;iidx++)
518     {
519         scratch[iidx] = 0.0;
520     }
521
522     /* Start outer loop over neighborlists */
523     for(iidx=0; iidx<nri; iidx++)
524     {
525         /* Load shift vector for this list */
526         i_shift_offset   = DIM*shiftidx[iidx];
527
528         /* Load limits for loop over neighbors */
529         j_index_start    = jindex[iidx];
530         j_index_end      = jindex[iidx+1];
531
532         /* Get outer coordinate index */
533         inr              = iinr[iidx];
534         i_coord_offset   = DIM*inr;
535
536         /* Load i particle coords and add shift vector */
537         gmx_mm256_load_shift_and_1rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset,&ix0,&iy0,&iz0);
538
539         fix0             = _mm256_setzero_pd();
540         fiy0             = _mm256_setzero_pd();
541         fiz0             = _mm256_setzero_pd();
542
543         /* Load parameters for i particles */
544         iq0              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+0]));
545         vdwioffsetptr0   = vdwparam+2*nvdwtype*vdwtype[inr+0];
546
547         /* Start inner kernel loop */
548         for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
549         {
550
551             /* Get j neighbor index, and coordinate index */
552             jnrA             = jjnr[jidx];
553             jnrB             = jjnr[jidx+1];
554             jnrC             = jjnr[jidx+2];
555             jnrD             = jjnr[jidx+3];
556             j_coord_offsetA  = DIM*jnrA;
557             j_coord_offsetB  = DIM*jnrB;
558             j_coord_offsetC  = DIM*jnrC;
559             j_coord_offsetD  = DIM*jnrD;
560
561             /* load j atom coordinates */
562             gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
563                                                  x+j_coord_offsetC,x+j_coord_offsetD,
564                                                  &jx0,&jy0,&jz0);
565
566             /* Calculate displacement vector */
567             dx00             = _mm256_sub_pd(ix0,jx0);
568             dy00             = _mm256_sub_pd(iy0,jy0);
569             dz00             = _mm256_sub_pd(iz0,jz0);
570
571             /* Calculate squared distance and things based on it */
572             rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
573
574             rinv00           = gmx_mm256_invsqrt_pd(rsq00);
575
576             rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
577
578             /* Load parameters for j particles */
579             jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
580                                                                  charge+jnrC+0,charge+jnrD+0);
581             vdwjidx0A        = 2*vdwtype[jnrA+0];
582             vdwjidx0B        = 2*vdwtype[jnrB+0];
583             vdwjidx0C        = 2*vdwtype[jnrC+0];
584             vdwjidx0D        = 2*vdwtype[jnrD+0];
585
586             /**************************
587              * CALCULATE INTERACTIONS *
588              **************************/
589
590             r00              = _mm256_mul_pd(rsq00,rinv00);
591
592             /* Compute parameters for interactions between i and j atoms */
593             qq00             = _mm256_mul_pd(iq0,jq0);
594             gmx_mm256_load_4pair_swizzle_pd(vdwioffsetptr0+vdwjidx0A,
595                                             vdwioffsetptr0+vdwjidx0B,
596                                             vdwioffsetptr0+vdwjidx0C,
597                                             vdwioffsetptr0+vdwjidx0D,
598                                             &c6_00,&c12_00);
599
600             /* Calculate table index by multiplying r with table scale and truncate to integer */
601             rt               = _mm256_mul_pd(r00,vftabscale);
602             vfitab           = _mm256_cvttpd_epi32(rt);
603             vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
604             vfitab           = _mm_slli_epi32(vfitab,3);
605
606             /* COULOMB ELECTROSTATICS */
607             velec            = _mm256_mul_pd(qq00,rinv00);
608             felec            = _mm256_mul_pd(velec,rinvsq00);
609
610             /* CUBIC SPLINE TABLE DISPERSION */
611             Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
612             F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
613             G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
614             H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
615             GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
616             Heps             = _mm256_mul_pd(vfeps,H);
617             Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
618             FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
619             fvdw6            = _mm256_mul_pd(c6_00,FF);
620
621             /* CUBIC SPLINE TABLE REPULSION */
622             vfitab           = _mm_add_epi32(vfitab,ifour);
623             Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
624             F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
625             G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
626             H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
627             GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
628             Heps             = _mm256_mul_pd(vfeps,H);
629             Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
630             FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
631             fvdw12           = _mm256_mul_pd(c12_00,FF);
632             fvdw             = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_add_pd(fvdw6,fvdw12),_mm256_mul_pd(vftabscale,rinv00)));
633
634             fscal            = _mm256_add_pd(felec,fvdw);
635
636             /* Calculate temporary vectorial force */
637             tx               = _mm256_mul_pd(fscal,dx00);
638             ty               = _mm256_mul_pd(fscal,dy00);
639             tz               = _mm256_mul_pd(fscal,dz00);
640
641             /* Update vectorial force */
642             fix0             = _mm256_add_pd(fix0,tx);
643             fiy0             = _mm256_add_pd(fiy0,ty);
644             fiz0             = _mm256_add_pd(fiz0,tz);
645
646             fjptrA             = f+j_coord_offsetA;
647             fjptrB             = f+j_coord_offsetB;
648             fjptrC             = f+j_coord_offsetC;
649             fjptrD             = f+j_coord_offsetD;
650             gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,tx,ty,tz);
651
652             /* Inner loop uses 53 flops */
653         }
654
655         if(jidx<j_index_end)
656         {
657
658             /* Get j neighbor index, and coordinate index */
659             jnrlistA         = jjnr[jidx];
660             jnrlistB         = jjnr[jidx+1];
661             jnrlistC         = jjnr[jidx+2];
662             jnrlistD         = jjnr[jidx+3];
663             /* Sign of each element will be negative for non-real atoms.
664              * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
665              * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
666              */
667             tmpmask0 = gmx_mm_castsi128_pd(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
668
669             tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
670             tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
671             dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
672
673             jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
674             jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
675             jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
676             jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
677             j_coord_offsetA  = DIM*jnrA;
678             j_coord_offsetB  = DIM*jnrB;
679             j_coord_offsetC  = DIM*jnrC;
680             j_coord_offsetD  = DIM*jnrD;
681
682             /* load j atom coordinates */
683             gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
684                                                  x+j_coord_offsetC,x+j_coord_offsetD,
685                                                  &jx0,&jy0,&jz0);
686
687             /* Calculate displacement vector */
688             dx00             = _mm256_sub_pd(ix0,jx0);
689             dy00             = _mm256_sub_pd(iy0,jy0);
690             dz00             = _mm256_sub_pd(iz0,jz0);
691
692             /* Calculate squared distance and things based on it */
693             rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
694
695             rinv00           = gmx_mm256_invsqrt_pd(rsq00);
696
697             rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
698
699             /* Load parameters for j particles */
700             jq0              = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
701                                                                  charge+jnrC+0,charge+jnrD+0);
702             vdwjidx0A        = 2*vdwtype[jnrA+0];
703             vdwjidx0B        = 2*vdwtype[jnrB+0];
704             vdwjidx0C        = 2*vdwtype[jnrC+0];
705             vdwjidx0D        = 2*vdwtype[jnrD+0];
706
707             /**************************
708              * CALCULATE INTERACTIONS *
709              **************************/
710
711             r00              = _mm256_mul_pd(rsq00,rinv00);
712             r00              = _mm256_andnot_pd(dummy_mask,r00);
713
714             /* Compute parameters for interactions between i and j atoms */
715             qq00             = _mm256_mul_pd(iq0,jq0);
716             gmx_mm256_load_4pair_swizzle_pd(vdwioffsetptr0+vdwjidx0A,
717                                             vdwioffsetptr0+vdwjidx0B,
718                                             vdwioffsetptr0+vdwjidx0C,
719                                             vdwioffsetptr0+vdwjidx0D,
720                                             &c6_00,&c12_00);
721
722             /* Calculate table index by multiplying r with table scale and truncate to integer */
723             rt               = _mm256_mul_pd(r00,vftabscale);
724             vfitab           = _mm256_cvttpd_epi32(rt);
725             vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
726             vfitab           = _mm_slli_epi32(vfitab,3);
727
728             /* COULOMB ELECTROSTATICS */
729             velec            = _mm256_mul_pd(qq00,rinv00);
730             felec            = _mm256_mul_pd(velec,rinvsq00);
731
732             /* CUBIC SPLINE TABLE DISPERSION */
733             Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
734             F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
735             G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
736             H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
737             GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
738             Heps             = _mm256_mul_pd(vfeps,H);
739             Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
740             FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
741             fvdw6            = _mm256_mul_pd(c6_00,FF);
742
743             /* CUBIC SPLINE TABLE REPULSION */
744             vfitab           = _mm_add_epi32(vfitab,ifour);
745             Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
746             F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
747             G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
748             H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
749             GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
750             Heps             = _mm256_mul_pd(vfeps,H);
751             Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
752             FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
753             fvdw12           = _mm256_mul_pd(c12_00,FF);
754             fvdw             = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_add_pd(fvdw6,fvdw12),_mm256_mul_pd(vftabscale,rinv00)));
755
756             fscal            = _mm256_add_pd(felec,fvdw);
757
758             fscal            = _mm256_andnot_pd(dummy_mask,fscal);
759
760             /* Calculate temporary vectorial force */
761             tx               = _mm256_mul_pd(fscal,dx00);
762             ty               = _mm256_mul_pd(fscal,dy00);
763             tz               = _mm256_mul_pd(fscal,dz00);
764
765             /* Update vectorial force */
766             fix0             = _mm256_add_pd(fix0,tx);
767             fiy0             = _mm256_add_pd(fiy0,ty);
768             fiz0             = _mm256_add_pd(fiz0,tz);
769
770             fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
771             fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
772             fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
773             fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
774             gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,tx,ty,tz);
775
776             /* Inner loop uses 54 flops */
777         }
778
779         /* End of innermost loop */
780
781         gmx_mm256_update_iforce_1atom_swizzle_pd(fix0,fiy0,fiz0,
782                                                  f+i_coord_offset,fshift+i_shift_offset);
783
784         /* Increment number of inner iterations */
785         inneriter                  += j_index_end - j_index_start;
786
787         /* Outer loop uses 7 flops */
788     }
789
790     /* Increment number of outer iterations */
791     outeriter        += nri;
792
793     /* Update outer/inner flops */
794
795     inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_F,outeriter*7 + inneriter*54);
796 }