Merge release-4-6 into master
[alexxy/gromacs.git] / src / gromacs / gmxlib / nonbonded / nb_kernel_avx_256_double / nb_kernel_ElecNone_VdwLJSh_GeomP1P1_avx_256_double.c
1 /*
2  * Note: this file was generated by the Gromacs avx_256_double kernel generator.
3  *
4  *                This source code is part of
5  *
6  *                 G   R   O   M   A   C   S
7  *
8  * Copyright (c) 2001-2012, The GROMACS Development Team
9  *
10  * Gromacs is a library for molecular simulation and trajectory analysis,
11  * written by Erik Lindahl, David van der Spoel, Berk Hess, and others - for
12  * a full list of developers and information, check out http://www.gromacs.org
13  *
14  * This program is free software; you can redistribute it and/or modify it under
15  * the terms of the GNU Lesser General Public License as published by the Free
16  * Software Foundation; either version 2 of the License, or (at your option) any
17  * later version.
18  *
19  * To help fund GROMACS development, we humbly ask that you cite
20  * the papers people have written on it - you can find them on the website.
21  */
22 #ifdef HAVE_CONFIG_H
23 #include <config.h>
24 #endif
25
26 #include <math.h>
27
28 #include "../nb_kernel.h"
29 #include "types/simple.h"
30 #include "vec.h"
31 #include "nrnb.h"
32
33 #include "gmx_math_x86_avx_256_double.h"
34 #include "kernelutil_x86_avx_256_double.h"
35
36 /*
37  * Gromacs nonbonded kernel:   nb_kernel_ElecNone_VdwLJSh_GeomP1P1_VF_avx_256_double
38  * Electrostatics interaction: None
39  * VdW interaction:            LennardJones
40  * Geometry:                   Particle-Particle
41  * Calculate force/pot:        PotentialAndForce
42  */
43 void
44 nb_kernel_ElecNone_VdwLJSh_GeomP1P1_VF_avx_256_double
45                     (t_nblist * gmx_restrict                nlist,
46                      rvec * gmx_restrict                    xx,
47                      rvec * gmx_restrict                    ff,
48                      t_forcerec * gmx_restrict              fr,
49                      t_mdatoms * gmx_restrict               mdatoms,
50                      nb_kernel_data_t * gmx_restrict        kernel_data,
51                      t_nrnb * gmx_restrict                  nrnb)
52 {
53     /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
54      * just 0 for non-waters.
55      * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
56      * jnr indices corresponding to data put in the four positions in the SIMD register.
57      */
58     int              i_shift_offset,i_coord_offset,outeriter,inneriter;
59     int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
60     int              jnrA,jnrB,jnrC,jnrD;
61     int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
62     int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
63     int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
64     int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
65     real             rcutoff_scalar;
66     real             *shiftvec,*fshift,*x,*f;
67     real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
68     real             scratch[4*DIM];
69     __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
70     real *           vdwioffsetptr0;
71     __m256d          ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
72     int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
73     __m256d          jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
74     __m256d          dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
75     int              nvdwtype;
76     __m256d          rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
77     int              *vdwtype;
78     real             *vdwparam;
79     __m256d          one_sixth   = _mm256_set1_pd(1.0/6.0);
80     __m256d          one_twelfth = _mm256_set1_pd(1.0/12.0);
81     __m256d          dummy_mask,cutoff_mask;
82     __m128           tmpmask0,tmpmask1;
83     __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
84     __m256d          one     = _mm256_set1_pd(1.0);
85     __m256d          two     = _mm256_set1_pd(2.0);
86     x                = xx[0];
87     f                = ff[0];
88
89     nri              = nlist->nri;
90     iinr             = nlist->iinr;
91     jindex           = nlist->jindex;
92     jjnr             = nlist->jjnr;
93     shiftidx         = nlist->shift;
94     gid              = nlist->gid;
95     shiftvec         = fr->shift_vec[0];
96     fshift           = fr->fshift[0];
97     nvdwtype         = fr->ntype;
98     vdwparam         = fr->nbfp;
99     vdwtype          = mdatoms->typeA;
100
101     rcutoff_scalar   = fr->rvdw;
102     rcutoff          = _mm256_set1_pd(rcutoff_scalar);
103     rcutoff2         = _mm256_mul_pd(rcutoff,rcutoff);
104
105     sh_vdw_invrcut6  = _mm256_set1_pd(fr->ic->sh_invrc6);
106     rvdw             = _mm256_set1_pd(fr->rvdw);
107
108     /* Avoid stupid compiler warnings */
109     jnrA = jnrB = jnrC = jnrD = 0;
110     j_coord_offsetA = 0;
111     j_coord_offsetB = 0;
112     j_coord_offsetC = 0;
113     j_coord_offsetD = 0;
114
115     outeriter        = 0;
116     inneriter        = 0;
117
118     for(iidx=0;iidx<4*DIM;iidx++)
119     {
120         scratch[iidx] = 0.0;
121     }
122
123     /* Start outer loop over neighborlists */
124     for(iidx=0; iidx<nri; iidx++)
125     {
126         /* Load shift vector for this list */
127         i_shift_offset   = DIM*shiftidx[iidx];
128
129         /* Load limits for loop over neighbors */
130         j_index_start    = jindex[iidx];
131         j_index_end      = jindex[iidx+1];
132
133         /* Get outer coordinate index */
134         inr              = iinr[iidx];
135         i_coord_offset   = DIM*inr;
136
137         /* Load i particle coords and add shift vector */
138         gmx_mm256_load_shift_and_1rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset,&ix0,&iy0,&iz0);
139
140         fix0             = _mm256_setzero_pd();
141         fiy0             = _mm256_setzero_pd();
142         fiz0             = _mm256_setzero_pd();
143
144         /* Load parameters for i particles */
145         vdwioffsetptr0   = vdwparam+2*nvdwtype*vdwtype[inr+0];
146
147         /* Reset potential sums */
148         vvdwsum          = _mm256_setzero_pd();
149
150         /* Start inner kernel loop */
151         for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
152         {
153
154             /* Get j neighbor index, and coordinate index */
155             jnrA             = jjnr[jidx];
156             jnrB             = jjnr[jidx+1];
157             jnrC             = jjnr[jidx+2];
158             jnrD             = jjnr[jidx+3];
159             j_coord_offsetA  = DIM*jnrA;
160             j_coord_offsetB  = DIM*jnrB;
161             j_coord_offsetC  = DIM*jnrC;
162             j_coord_offsetD  = DIM*jnrD;
163
164             /* load j atom coordinates */
165             gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
166                                                  x+j_coord_offsetC,x+j_coord_offsetD,
167                                                  &jx0,&jy0,&jz0);
168
169             /* Calculate displacement vector */
170             dx00             = _mm256_sub_pd(ix0,jx0);
171             dy00             = _mm256_sub_pd(iy0,jy0);
172             dz00             = _mm256_sub_pd(iz0,jz0);
173
174             /* Calculate squared distance and things based on it */
175             rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
176
177             rinvsq00         = gmx_mm256_inv_pd(rsq00);
178
179             /* Load parameters for j particles */
180             vdwjidx0A        = 2*vdwtype[jnrA+0];
181             vdwjidx0B        = 2*vdwtype[jnrB+0];
182             vdwjidx0C        = 2*vdwtype[jnrC+0];
183             vdwjidx0D        = 2*vdwtype[jnrD+0];
184
185             /**************************
186              * CALCULATE INTERACTIONS *
187              **************************/
188
189             if (gmx_mm256_any_lt(rsq00,rcutoff2))
190             {
191
192             /* Compute parameters for interactions between i and j atoms */
193             gmx_mm256_load_4pair_swizzle_pd(vdwioffsetptr0+vdwjidx0A,
194                                             vdwioffsetptr0+vdwjidx0B,
195                                             vdwioffsetptr0+vdwjidx0C,
196                                             vdwioffsetptr0+vdwjidx0D,
197                                             &c6_00,&c12_00);
198
199             /* LENNARD-JONES DISPERSION/REPULSION */
200
201             rinvsix          = _mm256_mul_pd(_mm256_mul_pd(rinvsq00,rinvsq00),rinvsq00);
202             vvdw6            = _mm256_mul_pd(c6_00,rinvsix);
203             vvdw12           = _mm256_mul_pd(c12_00,_mm256_mul_pd(rinvsix,rinvsix));
204             vvdw             = _mm256_sub_pd(_mm256_mul_pd( _mm256_sub_pd(vvdw12 , _mm256_mul_pd(c12_00,_mm256_mul_pd(sh_vdw_invrcut6,sh_vdw_invrcut6))), one_twelfth) ,
205                                           _mm256_mul_pd( _mm256_sub_pd(vvdw6,_mm256_mul_pd(c6_00,sh_vdw_invrcut6)),one_sixth));
206             fvdw             = _mm256_mul_pd(_mm256_sub_pd(vvdw12,vvdw6),rinvsq00);
207
208             cutoff_mask      = _mm256_cmp_pd(rsq00,rcutoff2,_CMP_LT_OQ);
209
210             /* Update potential sum for this i atom from the interaction with this j atom. */
211             vvdw             = _mm256_and_pd(vvdw,cutoff_mask);
212             vvdwsum          = _mm256_add_pd(vvdwsum,vvdw);
213
214             fscal            = fvdw;
215
216             fscal            = _mm256_and_pd(fscal,cutoff_mask);
217
218             /* Calculate temporary vectorial force */
219             tx               = _mm256_mul_pd(fscal,dx00);
220             ty               = _mm256_mul_pd(fscal,dy00);
221             tz               = _mm256_mul_pd(fscal,dz00);
222
223             /* Update vectorial force */
224             fix0             = _mm256_add_pd(fix0,tx);
225             fiy0             = _mm256_add_pd(fiy0,ty);
226             fiz0             = _mm256_add_pd(fiz0,tz);
227
228             fjptrA             = f+j_coord_offsetA;
229             fjptrB             = f+j_coord_offsetB;
230             fjptrC             = f+j_coord_offsetC;
231             fjptrD             = f+j_coord_offsetD;
232             gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,tx,ty,tz);
233
234             }
235
236             /* Inner loop uses 41 flops */
237         }
238
239         if(jidx<j_index_end)
240         {
241
242             /* Get j neighbor index, and coordinate index */
243             jnrlistA         = jjnr[jidx];
244             jnrlistB         = jjnr[jidx+1];
245             jnrlistC         = jjnr[jidx+2];
246             jnrlistD         = jjnr[jidx+3];
247             /* Sign of each element will be negative for non-real atoms.
248              * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
249              * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
250              */
251             tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
252
253             tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
254             tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
255             dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
256
257             jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
258             jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
259             jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
260             jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
261             j_coord_offsetA  = DIM*jnrA;
262             j_coord_offsetB  = DIM*jnrB;
263             j_coord_offsetC  = DIM*jnrC;
264             j_coord_offsetD  = DIM*jnrD;
265
266             /* load j atom coordinates */
267             gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
268                                                  x+j_coord_offsetC,x+j_coord_offsetD,
269                                                  &jx0,&jy0,&jz0);
270
271             /* Calculate displacement vector */
272             dx00             = _mm256_sub_pd(ix0,jx0);
273             dy00             = _mm256_sub_pd(iy0,jy0);
274             dz00             = _mm256_sub_pd(iz0,jz0);
275
276             /* Calculate squared distance and things based on it */
277             rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
278
279             rinvsq00         = gmx_mm256_inv_pd(rsq00);
280
281             /* Load parameters for j particles */
282             vdwjidx0A        = 2*vdwtype[jnrA+0];
283             vdwjidx0B        = 2*vdwtype[jnrB+0];
284             vdwjidx0C        = 2*vdwtype[jnrC+0];
285             vdwjidx0D        = 2*vdwtype[jnrD+0];
286
287             /**************************
288              * CALCULATE INTERACTIONS *
289              **************************/
290
291             if (gmx_mm256_any_lt(rsq00,rcutoff2))
292             {
293
294             /* Compute parameters for interactions between i and j atoms */
295             gmx_mm256_load_4pair_swizzle_pd(vdwioffsetptr0+vdwjidx0A,
296                                             vdwioffsetptr0+vdwjidx0B,
297                                             vdwioffsetptr0+vdwjidx0C,
298                                             vdwioffsetptr0+vdwjidx0D,
299                                             &c6_00,&c12_00);
300
301             /* LENNARD-JONES DISPERSION/REPULSION */
302
303             rinvsix          = _mm256_mul_pd(_mm256_mul_pd(rinvsq00,rinvsq00),rinvsq00);
304             vvdw6            = _mm256_mul_pd(c6_00,rinvsix);
305             vvdw12           = _mm256_mul_pd(c12_00,_mm256_mul_pd(rinvsix,rinvsix));
306             vvdw             = _mm256_sub_pd(_mm256_mul_pd( _mm256_sub_pd(vvdw12 , _mm256_mul_pd(c12_00,_mm256_mul_pd(sh_vdw_invrcut6,sh_vdw_invrcut6))), one_twelfth) ,
307                                           _mm256_mul_pd( _mm256_sub_pd(vvdw6,_mm256_mul_pd(c6_00,sh_vdw_invrcut6)),one_sixth));
308             fvdw             = _mm256_mul_pd(_mm256_sub_pd(vvdw12,vvdw6),rinvsq00);
309
310             cutoff_mask      = _mm256_cmp_pd(rsq00,rcutoff2,_CMP_LT_OQ);
311
312             /* Update potential sum for this i atom from the interaction with this j atom. */
313             vvdw             = _mm256_and_pd(vvdw,cutoff_mask);
314             vvdw             = _mm256_andnot_pd(dummy_mask,vvdw);
315             vvdwsum          = _mm256_add_pd(vvdwsum,vvdw);
316
317             fscal            = fvdw;
318
319             fscal            = _mm256_and_pd(fscal,cutoff_mask);
320
321             fscal            = _mm256_andnot_pd(dummy_mask,fscal);
322
323             /* Calculate temporary vectorial force */
324             tx               = _mm256_mul_pd(fscal,dx00);
325             ty               = _mm256_mul_pd(fscal,dy00);
326             tz               = _mm256_mul_pd(fscal,dz00);
327
328             /* Update vectorial force */
329             fix0             = _mm256_add_pd(fix0,tx);
330             fiy0             = _mm256_add_pd(fiy0,ty);
331             fiz0             = _mm256_add_pd(fiz0,tz);
332
333             fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
334             fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
335             fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
336             fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
337             gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,tx,ty,tz);
338
339             }
340
341             /* Inner loop uses 41 flops */
342         }
343
344         /* End of innermost loop */
345
346         gmx_mm256_update_iforce_1atom_swizzle_pd(fix0,fiy0,fiz0,
347                                                  f+i_coord_offset,fshift+i_shift_offset);
348
349         ggid                        = gid[iidx];
350         /* Update potential energies */
351         gmx_mm256_update_1pot_pd(vvdwsum,kernel_data->energygrp_vdw+ggid);
352
353         /* Increment number of inner iterations */
354         inneriter                  += j_index_end - j_index_start;
355
356         /* Outer loop uses 7 flops */
357     }
358
359     /* Increment number of outer iterations */
360     outeriter        += nri;
361
362     /* Update outer/inner flops */
363
364     inc_nrnb(nrnb,eNR_NBKERNEL_VDW_VF,outeriter*7 + inneriter*41);
365 }
366 /*
367  * Gromacs nonbonded kernel:   nb_kernel_ElecNone_VdwLJSh_GeomP1P1_F_avx_256_double
368  * Electrostatics interaction: None
369  * VdW interaction:            LennardJones
370  * Geometry:                   Particle-Particle
371  * Calculate force/pot:        Force
372  */
373 void
374 nb_kernel_ElecNone_VdwLJSh_GeomP1P1_F_avx_256_double
375                     (t_nblist * gmx_restrict                nlist,
376                      rvec * gmx_restrict                    xx,
377                      rvec * gmx_restrict                    ff,
378                      t_forcerec * gmx_restrict              fr,
379                      t_mdatoms * gmx_restrict               mdatoms,
380                      nb_kernel_data_t * gmx_restrict        kernel_data,
381                      t_nrnb * gmx_restrict                  nrnb)
382 {
383     /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
384      * just 0 for non-waters.
385      * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
386      * jnr indices corresponding to data put in the four positions in the SIMD register.
387      */
388     int              i_shift_offset,i_coord_offset,outeriter,inneriter;
389     int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
390     int              jnrA,jnrB,jnrC,jnrD;
391     int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
392     int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
393     int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
394     int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
395     real             rcutoff_scalar;
396     real             *shiftvec,*fshift,*x,*f;
397     real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
398     real             scratch[4*DIM];
399     __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
400     real *           vdwioffsetptr0;
401     __m256d          ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
402     int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
403     __m256d          jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
404     __m256d          dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
405     int              nvdwtype;
406     __m256d          rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
407     int              *vdwtype;
408     real             *vdwparam;
409     __m256d          one_sixth   = _mm256_set1_pd(1.0/6.0);
410     __m256d          one_twelfth = _mm256_set1_pd(1.0/12.0);
411     __m256d          dummy_mask,cutoff_mask;
412     __m128           tmpmask0,tmpmask1;
413     __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
414     __m256d          one     = _mm256_set1_pd(1.0);
415     __m256d          two     = _mm256_set1_pd(2.0);
416     x                = xx[0];
417     f                = ff[0];
418
419     nri              = nlist->nri;
420     iinr             = nlist->iinr;
421     jindex           = nlist->jindex;
422     jjnr             = nlist->jjnr;
423     shiftidx         = nlist->shift;
424     gid              = nlist->gid;
425     shiftvec         = fr->shift_vec[0];
426     fshift           = fr->fshift[0];
427     nvdwtype         = fr->ntype;
428     vdwparam         = fr->nbfp;
429     vdwtype          = mdatoms->typeA;
430
431     rcutoff_scalar   = fr->rvdw;
432     rcutoff          = _mm256_set1_pd(rcutoff_scalar);
433     rcutoff2         = _mm256_mul_pd(rcutoff,rcutoff);
434
435     sh_vdw_invrcut6  = _mm256_set1_pd(fr->ic->sh_invrc6);
436     rvdw             = _mm256_set1_pd(fr->rvdw);
437
438     /* Avoid stupid compiler warnings */
439     jnrA = jnrB = jnrC = jnrD = 0;
440     j_coord_offsetA = 0;
441     j_coord_offsetB = 0;
442     j_coord_offsetC = 0;
443     j_coord_offsetD = 0;
444
445     outeriter        = 0;
446     inneriter        = 0;
447
448     for(iidx=0;iidx<4*DIM;iidx++)
449     {
450         scratch[iidx] = 0.0;
451     }
452
453     /* Start outer loop over neighborlists */
454     for(iidx=0; iidx<nri; iidx++)
455     {
456         /* Load shift vector for this list */
457         i_shift_offset   = DIM*shiftidx[iidx];
458
459         /* Load limits for loop over neighbors */
460         j_index_start    = jindex[iidx];
461         j_index_end      = jindex[iidx+1];
462
463         /* Get outer coordinate index */
464         inr              = iinr[iidx];
465         i_coord_offset   = DIM*inr;
466
467         /* Load i particle coords and add shift vector */
468         gmx_mm256_load_shift_and_1rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset,&ix0,&iy0,&iz0);
469
470         fix0             = _mm256_setzero_pd();
471         fiy0             = _mm256_setzero_pd();
472         fiz0             = _mm256_setzero_pd();
473
474         /* Load parameters for i particles */
475         vdwioffsetptr0   = vdwparam+2*nvdwtype*vdwtype[inr+0];
476
477         /* Start inner kernel loop */
478         for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
479         {
480
481             /* Get j neighbor index, and coordinate index */
482             jnrA             = jjnr[jidx];
483             jnrB             = jjnr[jidx+1];
484             jnrC             = jjnr[jidx+2];
485             jnrD             = jjnr[jidx+3];
486             j_coord_offsetA  = DIM*jnrA;
487             j_coord_offsetB  = DIM*jnrB;
488             j_coord_offsetC  = DIM*jnrC;
489             j_coord_offsetD  = DIM*jnrD;
490
491             /* load j atom coordinates */
492             gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
493                                                  x+j_coord_offsetC,x+j_coord_offsetD,
494                                                  &jx0,&jy0,&jz0);
495
496             /* Calculate displacement vector */
497             dx00             = _mm256_sub_pd(ix0,jx0);
498             dy00             = _mm256_sub_pd(iy0,jy0);
499             dz00             = _mm256_sub_pd(iz0,jz0);
500
501             /* Calculate squared distance and things based on it */
502             rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
503
504             rinvsq00         = gmx_mm256_inv_pd(rsq00);
505
506             /* Load parameters for j particles */
507             vdwjidx0A        = 2*vdwtype[jnrA+0];
508             vdwjidx0B        = 2*vdwtype[jnrB+0];
509             vdwjidx0C        = 2*vdwtype[jnrC+0];
510             vdwjidx0D        = 2*vdwtype[jnrD+0];
511
512             /**************************
513              * CALCULATE INTERACTIONS *
514              **************************/
515
516             if (gmx_mm256_any_lt(rsq00,rcutoff2))
517             {
518
519             /* Compute parameters for interactions between i and j atoms */
520             gmx_mm256_load_4pair_swizzle_pd(vdwioffsetptr0+vdwjidx0A,
521                                             vdwioffsetptr0+vdwjidx0B,
522                                             vdwioffsetptr0+vdwjidx0C,
523                                             vdwioffsetptr0+vdwjidx0D,
524                                             &c6_00,&c12_00);
525
526             /* LENNARD-JONES DISPERSION/REPULSION */
527
528             rinvsix          = _mm256_mul_pd(_mm256_mul_pd(rinvsq00,rinvsq00),rinvsq00);
529             fvdw             = _mm256_mul_pd(_mm256_sub_pd(_mm256_mul_pd(c12_00,rinvsix),c6_00),_mm256_mul_pd(rinvsix,rinvsq00));
530
531             cutoff_mask      = _mm256_cmp_pd(rsq00,rcutoff2,_CMP_LT_OQ);
532
533             fscal            = fvdw;
534
535             fscal            = _mm256_and_pd(fscal,cutoff_mask);
536
537             /* Calculate temporary vectorial force */
538             tx               = _mm256_mul_pd(fscal,dx00);
539             ty               = _mm256_mul_pd(fscal,dy00);
540             tz               = _mm256_mul_pd(fscal,dz00);
541
542             /* Update vectorial force */
543             fix0             = _mm256_add_pd(fix0,tx);
544             fiy0             = _mm256_add_pd(fiy0,ty);
545             fiz0             = _mm256_add_pd(fiz0,tz);
546
547             fjptrA             = f+j_coord_offsetA;
548             fjptrB             = f+j_coord_offsetB;
549             fjptrC             = f+j_coord_offsetC;
550             fjptrD             = f+j_coord_offsetD;
551             gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,tx,ty,tz);
552
553             }
554
555             /* Inner loop uses 30 flops */
556         }
557
558         if(jidx<j_index_end)
559         {
560
561             /* Get j neighbor index, and coordinate index */
562             jnrlistA         = jjnr[jidx];
563             jnrlistB         = jjnr[jidx+1];
564             jnrlistC         = jjnr[jidx+2];
565             jnrlistD         = jjnr[jidx+3];
566             /* Sign of each element will be negative for non-real atoms.
567              * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
568              * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
569              */
570             tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
571
572             tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
573             tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
574             dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
575
576             jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
577             jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
578             jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
579             jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
580             j_coord_offsetA  = DIM*jnrA;
581             j_coord_offsetB  = DIM*jnrB;
582             j_coord_offsetC  = DIM*jnrC;
583             j_coord_offsetD  = DIM*jnrD;
584
585             /* load j atom coordinates */
586             gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
587                                                  x+j_coord_offsetC,x+j_coord_offsetD,
588                                                  &jx0,&jy0,&jz0);
589
590             /* Calculate displacement vector */
591             dx00             = _mm256_sub_pd(ix0,jx0);
592             dy00             = _mm256_sub_pd(iy0,jy0);
593             dz00             = _mm256_sub_pd(iz0,jz0);
594
595             /* Calculate squared distance and things based on it */
596             rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
597
598             rinvsq00         = gmx_mm256_inv_pd(rsq00);
599
600             /* Load parameters for j particles */
601             vdwjidx0A        = 2*vdwtype[jnrA+0];
602             vdwjidx0B        = 2*vdwtype[jnrB+0];
603             vdwjidx0C        = 2*vdwtype[jnrC+0];
604             vdwjidx0D        = 2*vdwtype[jnrD+0];
605
606             /**************************
607              * CALCULATE INTERACTIONS *
608              **************************/
609
610             if (gmx_mm256_any_lt(rsq00,rcutoff2))
611             {
612
613             /* Compute parameters for interactions between i and j atoms */
614             gmx_mm256_load_4pair_swizzle_pd(vdwioffsetptr0+vdwjidx0A,
615                                             vdwioffsetptr0+vdwjidx0B,
616                                             vdwioffsetptr0+vdwjidx0C,
617                                             vdwioffsetptr0+vdwjidx0D,
618                                             &c6_00,&c12_00);
619
620             /* LENNARD-JONES DISPERSION/REPULSION */
621
622             rinvsix          = _mm256_mul_pd(_mm256_mul_pd(rinvsq00,rinvsq00),rinvsq00);
623             fvdw             = _mm256_mul_pd(_mm256_sub_pd(_mm256_mul_pd(c12_00,rinvsix),c6_00),_mm256_mul_pd(rinvsix,rinvsq00));
624
625             cutoff_mask      = _mm256_cmp_pd(rsq00,rcutoff2,_CMP_LT_OQ);
626
627             fscal            = fvdw;
628
629             fscal            = _mm256_and_pd(fscal,cutoff_mask);
630
631             fscal            = _mm256_andnot_pd(dummy_mask,fscal);
632
633             /* Calculate temporary vectorial force */
634             tx               = _mm256_mul_pd(fscal,dx00);
635             ty               = _mm256_mul_pd(fscal,dy00);
636             tz               = _mm256_mul_pd(fscal,dz00);
637
638             /* Update vectorial force */
639             fix0             = _mm256_add_pd(fix0,tx);
640             fiy0             = _mm256_add_pd(fiy0,ty);
641             fiz0             = _mm256_add_pd(fiz0,tz);
642
643             fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
644             fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
645             fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
646             fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
647             gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,tx,ty,tz);
648
649             }
650
651             /* Inner loop uses 30 flops */
652         }
653
654         /* End of innermost loop */
655
656         gmx_mm256_update_iforce_1atom_swizzle_pd(fix0,fiy0,fiz0,
657                                                  f+i_coord_offset,fshift+i_shift_offset);
658
659         /* Increment number of inner iterations */
660         inneriter                  += j_index_end - j_index_start;
661
662         /* Outer loop uses 6 flops */
663     }
664
665     /* Increment number of outer iterations */
666     outeriter        += nri;
667
668     /* Update outer/inner flops */
669
670     inc_nrnb(nrnb,eNR_NBKERNEL_VDW_F,outeriter*6 + inneriter*30);
671 }