made errors during GPU detection non-fatal
[alexxy/gromacs.git] / src / gmxlib / nonbonded / nb_kernel_avx_256_single / nb_kernel_ElecRFCut_VdwCSTab_GeomP1P1_avx_256_single.c
1 /*
2  * Note: this file was generated by the Gromacs avx_256_single kernel generator.
3  *
4  *                This source code is part of
5  *
6  *                 G   R   O   M   A   C   S
7  *
8  * Copyright (c) 2001-2012, The GROMACS Development Team
9  *
10  * Gromacs is a library for molecular simulation and trajectory analysis,
11  * written by Erik Lindahl, David van der Spoel, Berk Hess, and others - for
12  * a full list of developers and information, check out http://www.gromacs.org
13  *
14  * This program is free software; you can redistribute it and/or modify it under
15  * the terms of the GNU Lesser General Public License as published by the Free
16  * Software Foundation; either version 2 of the License, or (at your option) any
17  * later version.
18  *
19  * To help fund GROMACS development, we humbly ask that you cite
20  * the papers people have written on it - you can find them on the website.
21  */
22 #ifdef HAVE_CONFIG_H
23 #include <config.h>
24 #endif
25
26 #include <math.h>
27
28 #include "../nb_kernel.h"
29 #include "types/simple.h"
30 #include "vec.h"
31 #include "nrnb.h"
32
33 #include "gmx_math_x86_avx_256_single.h"
34 #include "kernelutil_x86_avx_256_single.h"
35
36 /*
37  * Gromacs nonbonded kernel:   nb_kernel_ElecRFCut_VdwCSTab_GeomP1P1_VF_avx_256_single
38  * Electrostatics interaction: ReactionField
39  * VdW interaction:            CubicSplineTable
40  * Geometry:                   Particle-Particle
41  * Calculate force/pot:        PotentialAndForce
42  */
43 void
44 nb_kernel_ElecRFCut_VdwCSTab_GeomP1P1_VF_avx_256_single
45                     (t_nblist * gmx_restrict                nlist,
46                      rvec * gmx_restrict                    xx,
47                      rvec * gmx_restrict                    ff,
48                      t_forcerec * gmx_restrict              fr,
49                      t_mdatoms * gmx_restrict               mdatoms,
50                      nb_kernel_data_t * gmx_restrict        kernel_data,
51                      t_nrnb * gmx_restrict                  nrnb)
52 {
53     /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
54      * just 0 for non-waters.
55      * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
56      * jnr indices corresponding to data put in the four positions in the SIMD register.
57      */
58     int              i_shift_offset,i_coord_offset,outeriter,inneriter;
59     int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
60     int              jnrA,jnrB,jnrC,jnrD;
61     int              jnrE,jnrF,jnrG,jnrH;
62     int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
63     int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
64     int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
65     int              j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
66     int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
67     real             rcutoff_scalar;
68     real             *shiftvec,*fshift,*x,*f;
69     real             *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
70     real             scratch[4*DIM];
71     __m256           tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
72     real *           vdwioffsetptr0;
73     __m256           ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
74     int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
75     __m256           jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
76     __m256           dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
77     __m256           velec,felec,velecsum,facel,crf,krf,krf2;
78     real             *charge;
79     int              nvdwtype;
80     __m256           rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
81     int              *vdwtype;
82     real             *vdwparam;
83     __m256           one_sixth   = _mm256_set1_ps(1.0/6.0);
84     __m256           one_twelfth = _mm256_set1_ps(1.0/12.0);
85     __m256i          vfitab;
86     __m128i          vfitab_lo,vfitab_hi;
87     __m128i          ifour       = _mm_set1_epi32(4);
88     __m256           rt,vfeps,vftabscale,Y,F,G,H,Heps,Fp,VV,FF;
89     real             *vftab;
90     __m256           dummy_mask,cutoff_mask;
91     __m256           signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
92     __m256           one     = _mm256_set1_ps(1.0);
93     __m256           two     = _mm256_set1_ps(2.0);
94     x                = xx[0];
95     f                = ff[0];
96
97     nri              = nlist->nri;
98     iinr             = nlist->iinr;
99     jindex           = nlist->jindex;
100     jjnr             = nlist->jjnr;
101     shiftidx         = nlist->shift;
102     gid              = nlist->gid;
103     shiftvec         = fr->shift_vec[0];
104     fshift           = fr->fshift[0];
105     facel            = _mm256_set1_ps(fr->epsfac);
106     charge           = mdatoms->chargeA;
107     krf              = _mm256_set1_ps(fr->ic->k_rf);
108     krf2             = _mm256_set1_ps(fr->ic->k_rf*2.0);
109     crf              = _mm256_set1_ps(fr->ic->c_rf);
110     nvdwtype         = fr->ntype;
111     vdwparam         = fr->nbfp;
112     vdwtype          = mdatoms->typeA;
113
114     vftab            = kernel_data->table_vdw->data;
115     vftabscale       = _mm256_set1_ps(kernel_data->table_vdw->scale);
116
117     /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
118     rcutoff_scalar   = fr->rcoulomb;
119     rcutoff          = _mm256_set1_ps(rcutoff_scalar);
120     rcutoff2         = _mm256_mul_ps(rcutoff,rcutoff);
121
122     /* Avoid stupid compiler warnings */
123     jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
124     j_coord_offsetA = 0;
125     j_coord_offsetB = 0;
126     j_coord_offsetC = 0;
127     j_coord_offsetD = 0;
128     j_coord_offsetE = 0;
129     j_coord_offsetF = 0;
130     j_coord_offsetG = 0;
131     j_coord_offsetH = 0;
132
133     outeriter        = 0;
134     inneriter        = 0;
135
136     for(iidx=0;iidx<4*DIM;iidx++)
137     {
138         scratch[iidx] = 0.0;
139     }
140
141     /* Start outer loop over neighborlists */
142     for(iidx=0; iidx<nri; iidx++)
143     {
144         /* Load shift vector for this list */
145         i_shift_offset   = DIM*shiftidx[iidx];
146
147         /* Load limits for loop over neighbors */
148         j_index_start    = jindex[iidx];
149         j_index_end      = jindex[iidx+1];
150
151         /* Get outer coordinate index */
152         inr              = iinr[iidx];
153         i_coord_offset   = DIM*inr;
154
155         /* Load i particle coords and add shift vector */
156         gmx_mm256_load_shift_and_1rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,&ix0,&iy0,&iz0);
157
158         fix0             = _mm256_setzero_ps();
159         fiy0             = _mm256_setzero_ps();
160         fiz0             = _mm256_setzero_ps();
161
162         /* Load parameters for i particles */
163         iq0              = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+0]));
164         vdwioffsetptr0   = vdwparam+2*nvdwtype*vdwtype[inr+0];
165
166         /* Reset potential sums */
167         velecsum         = _mm256_setzero_ps();
168         vvdwsum          = _mm256_setzero_ps();
169
170         /* Start inner kernel loop */
171         for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
172         {
173
174             /* Get j neighbor index, and coordinate index */
175             jnrA             = jjnr[jidx];
176             jnrB             = jjnr[jidx+1];
177             jnrC             = jjnr[jidx+2];
178             jnrD             = jjnr[jidx+3];
179             jnrE             = jjnr[jidx+4];
180             jnrF             = jjnr[jidx+5];
181             jnrG             = jjnr[jidx+6];
182             jnrH             = jjnr[jidx+7];
183             j_coord_offsetA  = DIM*jnrA;
184             j_coord_offsetB  = DIM*jnrB;
185             j_coord_offsetC  = DIM*jnrC;
186             j_coord_offsetD  = DIM*jnrD;
187             j_coord_offsetE  = DIM*jnrE;
188             j_coord_offsetF  = DIM*jnrF;
189             j_coord_offsetG  = DIM*jnrG;
190             j_coord_offsetH  = DIM*jnrH;
191
192             /* load j atom coordinates */
193             gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
194                                                  x+j_coord_offsetC,x+j_coord_offsetD,
195                                                  x+j_coord_offsetE,x+j_coord_offsetF,
196                                                  x+j_coord_offsetG,x+j_coord_offsetH,
197                                                  &jx0,&jy0,&jz0);
198
199             /* Calculate displacement vector */
200             dx00             = _mm256_sub_ps(ix0,jx0);
201             dy00             = _mm256_sub_ps(iy0,jy0);
202             dz00             = _mm256_sub_ps(iz0,jz0);
203
204             /* Calculate squared distance and things based on it */
205             rsq00            = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
206
207             rinv00           = gmx_mm256_invsqrt_ps(rsq00);
208
209             rinvsq00         = _mm256_mul_ps(rinv00,rinv00);
210
211             /* Load parameters for j particles */
212             jq0              = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
213                                                                  charge+jnrC+0,charge+jnrD+0,
214                                                                  charge+jnrE+0,charge+jnrF+0,
215                                                                  charge+jnrG+0,charge+jnrH+0);
216             vdwjidx0A        = 2*vdwtype[jnrA+0];
217             vdwjidx0B        = 2*vdwtype[jnrB+0];
218             vdwjidx0C        = 2*vdwtype[jnrC+0];
219             vdwjidx0D        = 2*vdwtype[jnrD+0];
220             vdwjidx0E        = 2*vdwtype[jnrE+0];
221             vdwjidx0F        = 2*vdwtype[jnrF+0];
222             vdwjidx0G        = 2*vdwtype[jnrG+0];
223             vdwjidx0H        = 2*vdwtype[jnrH+0];
224
225             /**************************
226              * CALCULATE INTERACTIONS *
227              **************************/
228
229             if (gmx_mm256_any_lt(rsq00,rcutoff2))
230             {
231
232             r00              = _mm256_mul_ps(rsq00,rinv00);
233
234             /* Compute parameters for interactions between i and j atoms */
235             qq00             = _mm256_mul_ps(iq0,jq0);
236             gmx_mm256_load_8pair_swizzle_ps(vdwioffsetptr0+vdwjidx0A,
237                                             vdwioffsetptr0+vdwjidx0B,
238                                             vdwioffsetptr0+vdwjidx0C,
239                                             vdwioffsetptr0+vdwjidx0D,
240                                             vdwioffsetptr0+vdwjidx0E,
241                                             vdwioffsetptr0+vdwjidx0F,
242                                             vdwioffsetptr0+vdwjidx0G,
243                                             vdwioffsetptr0+vdwjidx0H,
244                                             &c6_00,&c12_00);
245
246             /* Calculate table index by multiplying r with table scale and truncate to integer */
247             rt               = _mm256_mul_ps(r00,vftabscale);
248             vfitab           = _mm256_cvttps_epi32(rt);
249             vfeps            = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
250             /*         AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
251             vfitab_lo        = _mm256_extractf128_si256(vfitab,0x0);
252             vfitab_hi        = _mm256_extractf128_si256(vfitab,0x1);
253             vfitab_lo        = _mm_slli_epi32(vfitab_lo,3);
254             vfitab_hi        = _mm_slli_epi32(vfitab_hi,3);
255
256             /* REACTION-FIELD ELECTROSTATICS */
257             velec            = _mm256_mul_ps(qq00,_mm256_sub_ps(_mm256_add_ps(rinv00,_mm256_mul_ps(krf,rsq00)),crf));
258             felec            = _mm256_mul_ps(qq00,_mm256_sub_ps(_mm256_mul_ps(rinv00,rinvsq00),krf2));
259
260             /* CUBIC SPLINE TABLE DISPERSION */
261             Y                = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
262                                                   _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
263             F                = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
264                                                   _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
265             G                = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
266                                                   _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
267             H                = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
268                                                   _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
269             GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
270             Heps             = _mm256_mul_ps(vfeps,H);
271             Fp               = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
272             VV               = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
273             vvdw6            = _mm256_mul_ps(c6_00,VV);
274             FF               = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
275             fvdw6            = _mm256_mul_ps(c6_00,FF);
276
277             /* CUBIC SPLINE TABLE REPULSION */
278             vfitab_lo        = _mm_add_epi32(vfitab_lo,ifour);
279             vfitab_hi        = _mm_add_epi32(vfitab_hi,ifour);
280             Y                = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
281                                                   _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
282             F                = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
283                                                   _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
284             G                = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
285                                                   _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
286             H                = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
287                                                   _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
288             GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
289             Heps             = _mm256_mul_ps(vfeps,H);
290             Fp               = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
291             VV               = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
292             vvdw12           = _mm256_mul_ps(c12_00,VV);
293             FF               = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
294             fvdw12           = _mm256_mul_ps(c12_00,FF);
295             vvdw             = _mm256_add_ps(vvdw12,vvdw6);
296             fvdw             = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_add_ps(fvdw6,fvdw12),_mm256_mul_ps(vftabscale,rinv00)));
297
298             cutoff_mask      = _mm256_cmp_ps(rsq00,rcutoff2,_CMP_LT_OQ);
299
300             /* Update potential sum for this i atom from the interaction with this j atom. */
301             velec            = _mm256_and_ps(velec,cutoff_mask);
302             velecsum         = _mm256_add_ps(velecsum,velec);
303             vvdw             = _mm256_and_ps(vvdw,cutoff_mask);
304             vvdwsum          = _mm256_add_ps(vvdwsum,vvdw);
305
306             fscal            = _mm256_add_ps(felec,fvdw);
307
308             fscal            = _mm256_and_ps(fscal,cutoff_mask);
309
310             /* Calculate temporary vectorial force */
311             tx               = _mm256_mul_ps(fscal,dx00);
312             ty               = _mm256_mul_ps(fscal,dy00);
313             tz               = _mm256_mul_ps(fscal,dz00);
314
315             /* Update vectorial force */
316             fix0             = _mm256_add_ps(fix0,tx);
317             fiy0             = _mm256_add_ps(fiy0,ty);
318             fiz0             = _mm256_add_ps(fiz0,tz);
319
320             fjptrA             = f+j_coord_offsetA;
321             fjptrB             = f+j_coord_offsetB;
322             fjptrC             = f+j_coord_offsetC;
323             fjptrD             = f+j_coord_offsetD;
324             fjptrE             = f+j_coord_offsetE;
325             fjptrF             = f+j_coord_offsetF;
326             fjptrG             = f+j_coord_offsetG;
327             fjptrH             = f+j_coord_offsetH;
328             gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,tx,ty,tz);
329
330             }
331
332             /* Inner loop uses 72 flops */
333         }
334
335         if(jidx<j_index_end)
336         {
337
338             /* Get j neighbor index, and coordinate index */
339             jnrlistA         = jjnr[jidx];
340             jnrlistB         = jjnr[jidx+1];
341             jnrlistC         = jjnr[jidx+2];
342             jnrlistD         = jjnr[jidx+3];
343             jnrlistE         = jjnr[jidx+4];
344             jnrlistF         = jjnr[jidx+5];
345             jnrlistG         = jjnr[jidx+6];
346             jnrlistH         = jjnr[jidx+7];
347             /* Sign of each element will be negative for non-real atoms.
348              * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
349              * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
350              */
351             dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
352                                             gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
353                                             
354             jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
355             jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
356             jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
357             jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
358             jnrE       = (jnrlistE>=0) ? jnrlistE : 0;
359             jnrF       = (jnrlistF>=0) ? jnrlistF : 0;
360             jnrG       = (jnrlistG>=0) ? jnrlistG : 0;
361             jnrH       = (jnrlistH>=0) ? jnrlistH : 0;
362             j_coord_offsetA  = DIM*jnrA;
363             j_coord_offsetB  = DIM*jnrB;
364             j_coord_offsetC  = DIM*jnrC;
365             j_coord_offsetD  = DIM*jnrD;
366             j_coord_offsetE  = DIM*jnrE;
367             j_coord_offsetF  = DIM*jnrF;
368             j_coord_offsetG  = DIM*jnrG;
369             j_coord_offsetH  = DIM*jnrH;
370
371             /* load j atom coordinates */
372             gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
373                                                  x+j_coord_offsetC,x+j_coord_offsetD,
374                                                  x+j_coord_offsetE,x+j_coord_offsetF,
375                                                  x+j_coord_offsetG,x+j_coord_offsetH,
376                                                  &jx0,&jy0,&jz0);
377
378             /* Calculate displacement vector */
379             dx00             = _mm256_sub_ps(ix0,jx0);
380             dy00             = _mm256_sub_ps(iy0,jy0);
381             dz00             = _mm256_sub_ps(iz0,jz0);
382
383             /* Calculate squared distance and things based on it */
384             rsq00            = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
385
386             rinv00           = gmx_mm256_invsqrt_ps(rsq00);
387
388             rinvsq00         = _mm256_mul_ps(rinv00,rinv00);
389
390             /* Load parameters for j particles */
391             jq0              = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
392                                                                  charge+jnrC+0,charge+jnrD+0,
393                                                                  charge+jnrE+0,charge+jnrF+0,
394                                                                  charge+jnrG+0,charge+jnrH+0);
395             vdwjidx0A        = 2*vdwtype[jnrA+0];
396             vdwjidx0B        = 2*vdwtype[jnrB+0];
397             vdwjidx0C        = 2*vdwtype[jnrC+0];
398             vdwjidx0D        = 2*vdwtype[jnrD+0];
399             vdwjidx0E        = 2*vdwtype[jnrE+0];
400             vdwjidx0F        = 2*vdwtype[jnrF+0];
401             vdwjidx0G        = 2*vdwtype[jnrG+0];
402             vdwjidx0H        = 2*vdwtype[jnrH+0];
403
404             /**************************
405              * CALCULATE INTERACTIONS *
406              **************************/
407
408             if (gmx_mm256_any_lt(rsq00,rcutoff2))
409             {
410
411             r00              = _mm256_mul_ps(rsq00,rinv00);
412             r00              = _mm256_andnot_ps(dummy_mask,r00);
413
414             /* Compute parameters for interactions between i and j atoms */
415             qq00             = _mm256_mul_ps(iq0,jq0);
416             gmx_mm256_load_8pair_swizzle_ps(vdwioffsetptr0+vdwjidx0A,
417                                             vdwioffsetptr0+vdwjidx0B,
418                                             vdwioffsetptr0+vdwjidx0C,
419                                             vdwioffsetptr0+vdwjidx0D,
420                                             vdwioffsetptr0+vdwjidx0E,
421                                             vdwioffsetptr0+vdwjidx0F,
422                                             vdwioffsetptr0+vdwjidx0G,
423                                             vdwioffsetptr0+vdwjidx0H,
424                                             &c6_00,&c12_00);
425
426             /* Calculate table index by multiplying r with table scale and truncate to integer */
427             rt               = _mm256_mul_ps(r00,vftabscale);
428             vfitab           = _mm256_cvttps_epi32(rt);
429             vfeps            = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
430             /*         AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
431             vfitab_lo        = _mm256_extractf128_si256(vfitab,0x0);
432             vfitab_hi        = _mm256_extractf128_si256(vfitab,0x1);
433             vfitab_lo        = _mm_slli_epi32(vfitab_lo,3);
434             vfitab_hi        = _mm_slli_epi32(vfitab_hi,3);
435
436             /* REACTION-FIELD ELECTROSTATICS */
437             velec            = _mm256_mul_ps(qq00,_mm256_sub_ps(_mm256_add_ps(rinv00,_mm256_mul_ps(krf,rsq00)),crf));
438             felec            = _mm256_mul_ps(qq00,_mm256_sub_ps(_mm256_mul_ps(rinv00,rinvsq00),krf2));
439
440             /* CUBIC SPLINE TABLE DISPERSION */
441             Y                = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
442                                                   _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
443             F                = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
444                                                   _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
445             G                = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
446                                                   _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
447             H                = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
448                                                   _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
449             GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
450             Heps             = _mm256_mul_ps(vfeps,H);
451             Fp               = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
452             VV               = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
453             vvdw6            = _mm256_mul_ps(c6_00,VV);
454             FF               = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
455             fvdw6            = _mm256_mul_ps(c6_00,FF);
456
457             /* CUBIC SPLINE TABLE REPULSION */
458             vfitab_lo        = _mm_add_epi32(vfitab_lo,ifour);
459             vfitab_hi        = _mm_add_epi32(vfitab_hi,ifour);
460             Y                = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
461                                                   _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
462             F                = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
463                                                   _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
464             G                = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
465                                                   _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
466             H                = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
467                                                   _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
468             GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
469             Heps             = _mm256_mul_ps(vfeps,H);
470             Fp               = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
471             VV               = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
472             vvdw12           = _mm256_mul_ps(c12_00,VV);
473             FF               = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
474             fvdw12           = _mm256_mul_ps(c12_00,FF);
475             vvdw             = _mm256_add_ps(vvdw12,vvdw6);
476             fvdw             = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_add_ps(fvdw6,fvdw12),_mm256_mul_ps(vftabscale,rinv00)));
477
478             cutoff_mask      = _mm256_cmp_ps(rsq00,rcutoff2,_CMP_LT_OQ);
479
480             /* Update potential sum for this i atom from the interaction with this j atom. */
481             velec            = _mm256_and_ps(velec,cutoff_mask);
482             velec            = _mm256_andnot_ps(dummy_mask,velec);
483             velecsum         = _mm256_add_ps(velecsum,velec);
484             vvdw             = _mm256_and_ps(vvdw,cutoff_mask);
485             vvdw             = _mm256_andnot_ps(dummy_mask,vvdw);
486             vvdwsum          = _mm256_add_ps(vvdwsum,vvdw);
487
488             fscal            = _mm256_add_ps(felec,fvdw);
489
490             fscal            = _mm256_and_ps(fscal,cutoff_mask);
491
492             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
493
494             /* Calculate temporary vectorial force */
495             tx               = _mm256_mul_ps(fscal,dx00);
496             ty               = _mm256_mul_ps(fscal,dy00);
497             tz               = _mm256_mul_ps(fscal,dz00);
498
499             /* Update vectorial force */
500             fix0             = _mm256_add_ps(fix0,tx);
501             fiy0             = _mm256_add_ps(fiy0,ty);
502             fiz0             = _mm256_add_ps(fiz0,tz);
503
504             fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
505             fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
506             fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
507             fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
508             fjptrE             = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
509             fjptrF             = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
510             fjptrG             = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
511             fjptrH             = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
512             gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,tx,ty,tz);
513
514             }
515
516             /* Inner loop uses 73 flops */
517         }
518
519         /* End of innermost loop */
520
521         gmx_mm256_update_iforce_1atom_swizzle_ps(fix0,fiy0,fiz0,
522                                                  f+i_coord_offset,fshift+i_shift_offset);
523
524         ggid                        = gid[iidx];
525         /* Update potential energies */
526         gmx_mm256_update_1pot_ps(velecsum,kernel_data->energygrp_elec+ggid);
527         gmx_mm256_update_1pot_ps(vvdwsum,kernel_data->energygrp_vdw+ggid);
528
529         /* Increment number of inner iterations */
530         inneriter                  += j_index_end - j_index_start;
531
532         /* Outer loop uses 9 flops */
533     }
534
535     /* Increment number of outer iterations */
536     outeriter        += nri;
537
538     /* Update outer/inner flops */
539
540     inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_VF,outeriter*9 + inneriter*73);
541 }
542 /*
543  * Gromacs nonbonded kernel:   nb_kernel_ElecRFCut_VdwCSTab_GeomP1P1_F_avx_256_single
544  * Electrostatics interaction: ReactionField
545  * VdW interaction:            CubicSplineTable
546  * Geometry:                   Particle-Particle
547  * Calculate force/pot:        Force
548  */
549 void
550 nb_kernel_ElecRFCut_VdwCSTab_GeomP1P1_F_avx_256_single
551                     (t_nblist * gmx_restrict                nlist,
552                      rvec * gmx_restrict                    xx,
553                      rvec * gmx_restrict                    ff,
554                      t_forcerec * gmx_restrict              fr,
555                      t_mdatoms * gmx_restrict               mdatoms,
556                      nb_kernel_data_t * gmx_restrict        kernel_data,
557                      t_nrnb * gmx_restrict                  nrnb)
558 {
559     /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
560      * just 0 for non-waters.
561      * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
562      * jnr indices corresponding to data put in the four positions in the SIMD register.
563      */
564     int              i_shift_offset,i_coord_offset,outeriter,inneriter;
565     int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
566     int              jnrA,jnrB,jnrC,jnrD;
567     int              jnrE,jnrF,jnrG,jnrH;
568     int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
569     int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
570     int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
571     int              j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
572     int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
573     real             rcutoff_scalar;
574     real             *shiftvec,*fshift,*x,*f;
575     real             *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
576     real             scratch[4*DIM];
577     __m256           tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
578     real *           vdwioffsetptr0;
579     __m256           ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
580     int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
581     __m256           jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
582     __m256           dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
583     __m256           velec,felec,velecsum,facel,crf,krf,krf2;
584     real             *charge;
585     int              nvdwtype;
586     __m256           rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
587     int              *vdwtype;
588     real             *vdwparam;
589     __m256           one_sixth   = _mm256_set1_ps(1.0/6.0);
590     __m256           one_twelfth = _mm256_set1_ps(1.0/12.0);
591     __m256i          vfitab;
592     __m128i          vfitab_lo,vfitab_hi;
593     __m128i          ifour       = _mm_set1_epi32(4);
594     __m256           rt,vfeps,vftabscale,Y,F,G,H,Heps,Fp,VV,FF;
595     real             *vftab;
596     __m256           dummy_mask,cutoff_mask;
597     __m256           signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
598     __m256           one     = _mm256_set1_ps(1.0);
599     __m256           two     = _mm256_set1_ps(2.0);
600     x                = xx[0];
601     f                = ff[0];
602
603     nri              = nlist->nri;
604     iinr             = nlist->iinr;
605     jindex           = nlist->jindex;
606     jjnr             = nlist->jjnr;
607     shiftidx         = nlist->shift;
608     gid              = nlist->gid;
609     shiftvec         = fr->shift_vec[0];
610     fshift           = fr->fshift[0];
611     facel            = _mm256_set1_ps(fr->epsfac);
612     charge           = mdatoms->chargeA;
613     krf              = _mm256_set1_ps(fr->ic->k_rf);
614     krf2             = _mm256_set1_ps(fr->ic->k_rf*2.0);
615     crf              = _mm256_set1_ps(fr->ic->c_rf);
616     nvdwtype         = fr->ntype;
617     vdwparam         = fr->nbfp;
618     vdwtype          = mdatoms->typeA;
619
620     vftab            = kernel_data->table_vdw->data;
621     vftabscale       = _mm256_set1_ps(kernel_data->table_vdw->scale);
622
623     /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
624     rcutoff_scalar   = fr->rcoulomb;
625     rcutoff          = _mm256_set1_ps(rcutoff_scalar);
626     rcutoff2         = _mm256_mul_ps(rcutoff,rcutoff);
627
628     /* Avoid stupid compiler warnings */
629     jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
630     j_coord_offsetA = 0;
631     j_coord_offsetB = 0;
632     j_coord_offsetC = 0;
633     j_coord_offsetD = 0;
634     j_coord_offsetE = 0;
635     j_coord_offsetF = 0;
636     j_coord_offsetG = 0;
637     j_coord_offsetH = 0;
638
639     outeriter        = 0;
640     inneriter        = 0;
641
642     for(iidx=0;iidx<4*DIM;iidx++)
643     {
644         scratch[iidx] = 0.0;
645     }
646
647     /* Start outer loop over neighborlists */
648     for(iidx=0; iidx<nri; iidx++)
649     {
650         /* Load shift vector for this list */
651         i_shift_offset   = DIM*shiftidx[iidx];
652
653         /* Load limits for loop over neighbors */
654         j_index_start    = jindex[iidx];
655         j_index_end      = jindex[iidx+1];
656
657         /* Get outer coordinate index */
658         inr              = iinr[iidx];
659         i_coord_offset   = DIM*inr;
660
661         /* Load i particle coords and add shift vector */
662         gmx_mm256_load_shift_and_1rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,&ix0,&iy0,&iz0);
663
664         fix0             = _mm256_setzero_ps();
665         fiy0             = _mm256_setzero_ps();
666         fiz0             = _mm256_setzero_ps();
667
668         /* Load parameters for i particles */
669         iq0              = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+0]));
670         vdwioffsetptr0   = vdwparam+2*nvdwtype*vdwtype[inr+0];
671
672         /* Start inner kernel loop */
673         for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
674         {
675
676             /* Get j neighbor index, and coordinate index */
677             jnrA             = jjnr[jidx];
678             jnrB             = jjnr[jidx+1];
679             jnrC             = jjnr[jidx+2];
680             jnrD             = jjnr[jidx+3];
681             jnrE             = jjnr[jidx+4];
682             jnrF             = jjnr[jidx+5];
683             jnrG             = jjnr[jidx+6];
684             jnrH             = jjnr[jidx+7];
685             j_coord_offsetA  = DIM*jnrA;
686             j_coord_offsetB  = DIM*jnrB;
687             j_coord_offsetC  = DIM*jnrC;
688             j_coord_offsetD  = DIM*jnrD;
689             j_coord_offsetE  = DIM*jnrE;
690             j_coord_offsetF  = DIM*jnrF;
691             j_coord_offsetG  = DIM*jnrG;
692             j_coord_offsetH  = DIM*jnrH;
693
694             /* load j atom coordinates */
695             gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
696                                                  x+j_coord_offsetC,x+j_coord_offsetD,
697                                                  x+j_coord_offsetE,x+j_coord_offsetF,
698                                                  x+j_coord_offsetG,x+j_coord_offsetH,
699                                                  &jx0,&jy0,&jz0);
700
701             /* Calculate displacement vector */
702             dx00             = _mm256_sub_ps(ix0,jx0);
703             dy00             = _mm256_sub_ps(iy0,jy0);
704             dz00             = _mm256_sub_ps(iz0,jz0);
705
706             /* Calculate squared distance and things based on it */
707             rsq00            = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
708
709             rinv00           = gmx_mm256_invsqrt_ps(rsq00);
710
711             rinvsq00         = _mm256_mul_ps(rinv00,rinv00);
712
713             /* Load parameters for j particles */
714             jq0              = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
715                                                                  charge+jnrC+0,charge+jnrD+0,
716                                                                  charge+jnrE+0,charge+jnrF+0,
717                                                                  charge+jnrG+0,charge+jnrH+0);
718             vdwjidx0A        = 2*vdwtype[jnrA+0];
719             vdwjidx0B        = 2*vdwtype[jnrB+0];
720             vdwjidx0C        = 2*vdwtype[jnrC+0];
721             vdwjidx0D        = 2*vdwtype[jnrD+0];
722             vdwjidx0E        = 2*vdwtype[jnrE+0];
723             vdwjidx0F        = 2*vdwtype[jnrF+0];
724             vdwjidx0G        = 2*vdwtype[jnrG+0];
725             vdwjidx0H        = 2*vdwtype[jnrH+0];
726
727             /**************************
728              * CALCULATE INTERACTIONS *
729              **************************/
730
731             if (gmx_mm256_any_lt(rsq00,rcutoff2))
732             {
733
734             r00              = _mm256_mul_ps(rsq00,rinv00);
735
736             /* Compute parameters for interactions between i and j atoms */
737             qq00             = _mm256_mul_ps(iq0,jq0);
738             gmx_mm256_load_8pair_swizzle_ps(vdwioffsetptr0+vdwjidx0A,
739                                             vdwioffsetptr0+vdwjidx0B,
740                                             vdwioffsetptr0+vdwjidx0C,
741                                             vdwioffsetptr0+vdwjidx0D,
742                                             vdwioffsetptr0+vdwjidx0E,
743                                             vdwioffsetptr0+vdwjidx0F,
744                                             vdwioffsetptr0+vdwjidx0G,
745                                             vdwioffsetptr0+vdwjidx0H,
746                                             &c6_00,&c12_00);
747
748             /* Calculate table index by multiplying r with table scale and truncate to integer */
749             rt               = _mm256_mul_ps(r00,vftabscale);
750             vfitab           = _mm256_cvttps_epi32(rt);
751             vfeps            = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
752             /*         AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
753             vfitab_lo        = _mm256_extractf128_si256(vfitab,0x0);
754             vfitab_hi        = _mm256_extractf128_si256(vfitab,0x1);
755             vfitab_lo        = _mm_slli_epi32(vfitab_lo,3);
756             vfitab_hi        = _mm_slli_epi32(vfitab_hi,3);
757
758             /* REACTION-FIELD ELECTROSTATICS */
759             felec            = _mm256_mul_ps(qq00,_mm256_sub_ps(_mm256_mul_ps(rinv00,rinvsq00),krf2));
760
761             /* CUBIC SPLINE TABLE DISPERSION */
762             Y                = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
763                                                   _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
764             F                = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
765                                                   _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
766             G                = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
767                                                   _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
768             H                = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
769                                                   _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
770             GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
771             Heps             = _mm256_mul_ps(vfeps,H);
772             Fp               = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
773             FF               = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
774             fvdw6            = _mm256_mul_ps(c6_00,FF);
775
776             /* CUBIC SPLINE TABLE REPULSION */
777             vfitab_lo        = _mm_add_epi32(vfitab_lo,ifour);
778             vfitab_hi        = _mm_add_epi32(vfitab_hi,ifour);
779             Y                = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
780                                                   _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
781             F                = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
782                                                   _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
783             G                = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
784                                                   _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
785             H                = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
786                                                   _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
787             GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
788             Heps             = _mm256_mul_ps(vfeps,H);
789             Fp               = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
790             FF               = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
791             fvdw12           = _mm256_mul_ps(c12_00,FF);
792             fvdw             = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_add_ps(fvdw6,fvdw12),_mm256_mul_ps(vftabscale,rinv00)));
793
794             cutoff_mask      = _mm256_cmp_ps(rsq00,rcutoff2,_CMP_LT_OQ);
795
796             fscal            = _mm256_add_ps(felec,fvdw);
797
798             fscal            = _mm256_and_ps(fscal,cutoff_mask);
799
800             /* Calculate temporary vectorial force */
801             tx               = _mm256_mul_ps(fscal,dx00);
802             ty               = _mm256_mul_ps(fscal,dy00);
803             tz               = _mm256_mul_ps(fscal,dz00);
804
805             /* Update vectorial force */
806             fix0             = _mm256_add_ps(fix0,tx);
807             fiy0             = _mm256_add_ps(fiy0,ty);
808             fiz0             = _mm256_add_ps(fiz0,tz);
809
810             fjptrA             = f+j_coord_offsetA;
811             fjptrB             = f+j_coord_offsetB;
812             fjptrC             = f+j_coord_offsetC;
813             fjptrD             = f+j_coord_offsetD;
814             fjptrE             = f+j_coord_offsetE;
815             fjptrF             = f+j_coord_offsetF;
816             fjptrG             = f+j_coord_offsetG;
817             fjptrH             = f+j_coord_offsetH;
818             gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,tx,ty,tz);
819
820             }
821
822             /* Inner loop uses 57 flops */
823         }
824
825         if(jidx<j_index_end)
826         {
827
828             /* Get j neighbor index, and coordinate index */
829             jnrlistA         = jjnr[jidx];
830             jnrlistB         = jjnr[jidx+1];
831             jnrlistC         = jjnr[jidx+2];
832             jnrlistD         = jjnr[jidx+3];
833             jnrlistE         = jjnr[jidx+4];
834             jnrlistF         = jjnr[jidx+5];
835             jnrlistG         = jjnr[jidx+6];
836             jnrlistH         = jjnr[jidx+7];
837             /* Sign of each element will be negative for non-real atoms.
838              * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
839              * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
840              */
841             dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
842                                             gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
843                                             
844             jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
845             jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
846             jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
847             jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
848             jnrE       = (jnrlistE>=0) ? jnrlistE : 0;
849             jnrF       = (jnrlistF>=0) ? jnrlistF : 0;
850             jnrG       = (jnrlistG>=0) ? jnrlistG : 0;
851             jnrH       = (jnrlistH>=0) ? jnrlistH : 0;
852             j_coord_offsetA  = DIM*jnrA;
853             j_coord_offsetB  = DIM*jnrB;
854             j_coord_offsetC  = DIM*jnrC;
855             j_coord_offsetD  = DIM*jnrD;
856             j_coord_offsetE  = DIM*jnrE;
857             j_coord_offsetF  = DIM*jnrF;
858             j_coord_offsetG  = DIM*jnrG;
859             j_coord_offsetH  = DIM*jnrH;
860
861             /* load j atom coordinates */
862             gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
863                                                  x+j_coord_offsetC,x+j_coord_offsetD,
864                                                  x+j_coord_offsetE,x+j_coord_offsetF,
865                                                  x+j_coord_offsetG,x+j_coord_offsetH,
866                                                  &jx0,&jy0,&jz0);
867
868             /* Calculate displacement vector */
869             dx00             = _mm256_sub_ps(ix0,jx0);
870             dy00             = _mm256_sub_ps(iy0,jy0);
871             dz00             = _mm256_sub_ps(iz0,jz0);
872
873             /* Calculate squared distance and things based on it */
874             rsq00            = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
875
876             rinv00           = gmx_mm256_invsqrt_ps(rsq00);
877
878             rinvsq00         = _mm256_mul_ps(rinv00,rinv00);
879
880             /* Load parameters for j particles */
881             jq0              = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
882                                                                  charge+jnrC+0,charge+jnrD+0,
883                                                                  charge+jnrE+0,charge+jnrF+0,
884                                                                  charge+jnrG+0,charge+jnrH+0);
885             vdwjidx0A        = 2*vdwtype[jnrA+0];
886             vdwjidx0B        = 2*vdwtype[jnrB+0];
887             vdwjidx0C        = 2*vdwtype[jnrC+0];
888             vdwjidx0D        = 2*vdwtype[jnrD+0];
889             vdwjidx0E        = 2*vdwtype[jnrE+0];
890             vdwjidx0F        = 2*vdwtype[jnrF+0];
891             vdwjidx0G        = 2*vdwtype[jnrG+0];
892             vdwjidx0H        = 2*vdwtype[jnrH+0];
893
894             /**************************
895              * CALCULATE INTERACTIONS *
896              **************************/
897
898             if (gmx_mm256_any_lt(rsq00,rcutoff2))
899             {
900
901             r00              = _mm256_mul_ps(rsq00,rinv00);
902             r00              = _mm256_andnot_ps(dummy_mask,r00);
903
904             /* Compute parameters for interactions between i and j atoms */
905             qq00             = _mm256_mul_ps(iq0,jq0);
906             gmx_mm256_load_8pair_swizzle_ps(vdwioffsetptr0+vdwjidx0A,
907                                             vdwioffsetptr0+vdwjidx0B,
908                                             vdwioffsetptr0+vdwjidx0C,
909                                             vdwioffsetptr0+vdwjidx0D,
910                                             vdwioffsetptr0+vdwjidx0E,
911                                             vdwioffsetptr0+vdwjidx0F,
912                                             vdwioffsetptr0+vdwjidx0G,
913                                             vdwioffsetptr0+vdwjidx0H,
914                                             &c6_00,&c12_00);
915
916             /* Calculate table index by multiplying r with table scale and truncate to integer */
917             rt               = _mm256_mul_ps(r00,vftabscale);
918             vfitab           = _mm256_cvttps_epi32(rt);
919             vfeps            = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
920             /*         AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
921             vfitab_lo        = _mm256_extractf128_si256(vfitab,0x0);
922             vfitab_hi        = _mm256_extractf128_si256(vfitab,0x1);
923             vfitab_lo        = _mm_slli_epi32(vfitab_lo,3);
924             vfitab_hi        = _mm_slli_epi32(vfitab_hi,3);
925
926             /* REACTION-FIELD ELECTROSTATICS */
927             felec            = _mm256_mul_ps(qq00,_mm256_sub_ps(_mm256_mul_ps(rinv00,rinvsq00),krf2));
928
929             /* CUBIC SPLINE TABLE DISPERSION */
930             Y                = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
931                                                   _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
932             F                = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
933                                                   _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
934             G                = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
935                                                   _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
936             H                = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
937                                                   _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
938             GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
939             Heps             = _mm256_mul_ps(vfeps,H);
940             Fp               = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
941             FF               = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
942             fvdw6            = _mm256_mul_ps(c6_00,FF);
943
944             /* CUBIC SPLINE TABLE REPULSION */
945             vfitab_lo        = _mm_add_epi32(vfitab_lo,ifour);
946             vfitab_hi        = _mm_add_epi32(vfitab_hi,ifour);
947             Y                = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
948                                                   _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
949             F                = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
950                                                   _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
951             G                = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
952                                                   _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
953             H                = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
954                                                   _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
955             GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
956             Heps             = _mm256_mul_ps(vfeps,H);
957             Fp               = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
958             FF               = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
959             fvdw12           = _mm256_mul_ps(c12_00,FF);
960             fvdw             = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_add_ps(fvdw6,fvdw12),_mm256_mul_ps(vftabscale,rinv00)));
961
962             cutoff_mask      = _mm256_cmp_ps(rsq00,rcutoff2,_CMP_LT_OQ);
963
964             fscal            = _mm256_add_ps(felec,fvdw);
965
966             fscal            = _mm256_and_ps(fscal,cutoff_mask);
967
968             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
969
970             /* Calculate temporary vectorial force */
971             tx               = _mm256_mul_ps(fscal,dx00);
972             ty               = _mm256_mul_ps(fscal,dy00);
973             tz               = _mm256_mul_ps(fscal,dz00);
974
975             /* Update vectorial force */
976             fix0             = _mm256_add_ps(fix0,tx);
977             fiy0             = _mm256_add_ps(fiy0,ty);
978             fiz0             = _mm256_add_ps(fiz0,tz);
979
980             fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
981             fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
982             fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
983             fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
984             fjptrE             = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
985             fjptrF             = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
986             fjptrG             = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
987             fjptrH             = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
988             gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,tx,ty,tz);
989
990             }
991
992             /* Inner loop uses 58 flops */
993         }
994
995         /* End of innermost loop */
996
997         gmx_mm256_update_iforce_1atom_swizzle_ps(fix0,fiy0,fiz0,
998                                                  f+i_coord_offset,fshift+i_shift_offset);
999
1000         /* Increment number of inner iterations */
1001         inneriter                  += j_index_end - j_index_start;
1002
1003         /* Outer loop uses 7 flops */
1004     }
1005
1006     /* Increment number of outer iterations */
1007     outeriter        += nri;
1008
1009     /* Update outer/inner flops */
1010
1011     inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_F,outeriter*7 + inneriter*58);
1012 }