Bug Summary

File:gromacs/gmxlib/nonbonded/nb_kernel_sse4_1_single/nb_kernel_ElecNone_VdwCSTab_GeomP1P1_sse4_1_single.c
Location:line 484, column 5
Description:Value stored to 'j_coord_offsetA' is never read

Annotated Source Code

1/*
2 * This file is part of the GROMACS molecular simulation package.
3 *
4 * Copyright (c) 2012,2013,2014, by the GROMACS development team, led by
5 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6 * and including many others, as listed in the AUTHORS file in the
7 * top-level source directory and at http://www.gromacs.org.
8 *
9 * GROMACS is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public License
11 * as published by the Free Software Foundation; either version 2.1
12 * of the License, or (at your option) any later version.
13 *
14 * GROMACS is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
18 *
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with GROMACS; if not, see
21 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
23 *
24 * If you want to redistribute modifications to GROMACS, please
25 * consider that scientific software is very special. Version
26 * control is crucial - bugs must be traceable. We will be happy to
27 * consider code for inclusion in the official distribution, but
28 * derived work must not be called official GROMACS. Details are found
29 * in the README & COPYING files - if they are missing, get the
30 * official version at http://www.gromacs.org.
31 *
32 * To help us fund GROMACS development, we humbly ask that you cite
33 * the research papers on the package. Check out http://www.gromacs.org.
34 */
35/*
36 * Note: this file was generated by the GROMACS sse4_1_single kernel generator.
37 */
38#ifdef HAVE_CONFIG_H1
39#include <config.h>
40#endif
41
42#include <math.h>
43
44#include "../nb_kernel.h"
45#include "types/simple.h"
46#include "gromacs/math/vec.h"
47#include "nrnb.h"
48
49#include "gromacs/simd/math_x86_sse4_1_single.h"
50#include "kernelutil_x86_sse4_1_single.h"
51
52/*
53 * Gromacs nonbonded kernel: nb_kernel_ElecNone_VdwCSTab_GeomP1P1_VF_sse4_1_single
54 * Electrostatics interaction: None
55 * VdW interaction: CubicSplineTable
56 * Geometry: Particle-Particle
57 * Calculate force/pot: PotentialAndForce
58 */
59void
60nb_kernel_ElecNone_VdwCSTab_GeomP1P1_VF_sse4_1_single
61 (t_nblist * gmx_restrict nlist,
62 rvec * gmx_restrict xx,
63 rvec * gmx_restrict ff,
64 t_forcerec * gmx_restrict fr,
65 t_mdatoms * gmx_restrict mdatoms,
66 nb_kernel_data_t gmx_unused__attribute__ ((unused)) * gmx_restrict kernel_data,
67 t_nrnb * gmx_restrict nrnb)
68{
69 /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
70 * just 0 for non-waters.
71 * Suffixes A,B,C,D refer to j loop unrolling done with SSE, e.g. for the four different
72 * jnr indices corresponding to data put in the four positions in the SIMD register.
73 */
74 int i_shift_offset,i_coord_offset,outeriter,inneriter;
75 int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
76 int jnrA,jnrB,jnrC,jnrD;
77 int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
78 int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
79 int *iinr,*jindex,*jjnr,*shiftidx,*gid;
80 real rcutoff_scalar;
81 real *shiftvec,*fshift,*x,*f;
82 real *fjptrA,*fjptrB,*fjptrC,*fjptrD;
83 real scratch[4*DIM3];
84 __m128 tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
85 int vdwioffset0;
86 __m128 ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
87 int vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
88 __m128 jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
89 __m128 dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
90 int nvdwtype;
91 __m128 rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
92 int *vdwtype;
93 real *vdwparam;
94 __m128 one_sixth = _mm_set1_ps(1.0/6.0);
95 __m128 one_twelfth = _mm_set1_ps(1.0/12.0);
96 __m128i vfitab;
97 __m128i ifour = _mm_set1_epi32(4);
98 __m128 rt,vfeps,vftabscale,Y,F,G,H,Heps,Fp,VV,FF;
99 real *vftab;
100 __m128 dummy_mask,cutoff_mask;
101 __m128 signbit = _mm_castsi128_ps( _mm_set1_epi32(0x80000000) );
102 __m128 one = _mm_set1_ps(1.0);
103 __m128 two = _mm_set1_ps(2.0);
104 x = xx[0];
105 f = ff[0];
106
107 nri = nlist->nri;
108 iinr = nlist->iinr;
109 jindex = nlist->jindex;
110 jjnr = nlist->jjnr;
111 shiftidx = nlist->shift;
112 gid = nlist->gid;
113 shiftvec = fr->shift_vec[0];
114 fshift = fr->fshift[0];
115 nvdwtype = fr->ntype;
116 vdwparam = fr->nbfp;
117 vdwtype = mdatoms->typeA;
118
119 vftab = kernel_data->table_vdw->data;
120 vftabscale = _mm_set1_ps(kernel_data->table_vdw->scale);
121
122 /* Avoid stupid compiler warnings */
123 jnrA = jnrB = jnrC = jnrD = 0;
124 j_coord_offsetA = 0;
125 j_coord_offsetB = 0;
126 j_coord_offsetC = 0;
127 j_coord_offsetD = 0;
128
129 outeriter = 0;
130 inneriter = 0;
131
132 for(iidx=0;iidx<4*DIM3;iidx++)
133 {
134 scratch[iidx] = 0.0;
135 }
136
137 /* Start outer loop over neighborlists */
138 for(iidx=0; iidx<nri; iidx++)
139 {
140 /* Load shift vector for this list */
141 i_shift_offset = DIM3*shiftidx[iidx];
142
143 /* Load limits for loop over neighbors */
144 j_index_start = jindex[iidx];
145 j_index_end = jindex[iidx+1];
146
147 /* Get outer coordinate index */
148 inr = iinr[iidx];
149 i_coord_offset = DIM3*inr;
150
151 /* Load i particle coords and add shift vector */
152 gmx_mm_load_shift_and_1rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,&ix0,&iy0,&iz0);
153
154 fix0 = _mm_setzero_ps();
155 fiy0 = _mm_setzero_ps();
156 fiz0 = _mm_setzero_ps();
157
158 /* Load parameters for i particles */
159 vdwioffset0 = 2*nvdwtype*vdwtype[inr+0];
160
161 /* Reset potential sums */
162 vvdwsum = _mm_setzero_ps();
163
164 /* Start inner kernel loop */
165 for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
166 {
167
168 /* Get j neighbor index, and coordinate index */
169 jnrA = jjnr[jidx];
170 jnrB = jjnr[jidx+1];
171 jnrC = jjnr[jidx+2];
172 jnrD = jjnr[jidx+3];
173 j_coord_offsetA = DIM3*jnrA;
174 j_coord_offsetB = DIM3*jnrB;
175 j_coord_offsetC = DIM3*jnrC;
176 j_coord_offsetD = DIM3*jnrD;
177
178 /* load j atom coordinates */
179 gmx_mm_load_1rvec_4ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
180 x+j_coord_offsetC,x+j_coord_offsetD,
181 &jx0,&jy0,&jz0);
182
183 /* Calculate displacement vector */
184 dx00 = _mm_sub_ps(ix0,jx0);
185 dy00 = _mm_sub_ps(iy0,jy0);
186 dz00 = _mm_sub_ps(iz0,jz0);
187
188 /* Calculate squared distance and things based on it */
189 rsq00 = gmx_mm_calc_rsq_ps(dx00,dy00,dz00);
190
191 rinv00 = gmx_mm_invsqrt_psgmx_simd_invsqrt_f(rsq00);
192
193 /* Load parameters for j particles */
194 vdwjidx0A = 2*vdwtype[jnrA+0];
195 vdwjidx0B = 2*vdwtype[jnrB+0];
196 vdwjidx0C = 2*vdwtype[jnrC+0];
197 vdwjidx0D = 2*vdwtype[jnrD+0];
198
199 /**************************
200 * CALCULATE INTERACTIONS *
201 **************************/
202
203 r00 = _mm_mul_ps(rsq00,rinv00);
204
205 /* Compute parameters for interactions between i and j atoms */
206 gmx_mm_load_4pair_swizzle_ps(vdwparam+vdwioffset0+vdwjidx0A,
207 vdwparam+vdwioffset0+vdwjidx0B,
208 vdwparam+vdwioffset0+vdwjidx0C,
209 vdwparam+vdwioffset0+vdwjidx0D,
210 &c6_00,&c12_00);
211
212 /* Calculate table index by multiplying r with table scale and truncate to integer */
213 rt = _mm_mul_ps(r00,vftabscale);
214 vfitab = _mm_cvttps_epi32(rt);
215 vfeps = _mm_sub_ps(rt,_mm_round_ps(rt, _MM_FROUND_FLOOR)__extension__ ({ __m128 __X = (rt); (__m128) __builtin_ia32_roundps
((__v4sf)__X, ((0x00 | 0x01))); })
);
216 vfitab = _mm_slli_epi32(vfitab,3);
217
218 /* CUBIC SPLINE TABLE DISPERSION */
219 Y = _mm_load_ps( vftab + gmx_mm_extract_epi32(vfitab,0)(__extension__ ({ __v4si __a = (__v4si)(vfitab); __a[(0) &
3];}))
);
220 F = _mm_load_ps( vftab + gmx_mm_extract_epi32(vfitab,1)(__extension__ ({ __v4si __a = (__v4si)(vfitab); __a[(1) &
3];}))
);
221 G = _mm_load_ps( vftab + gmx_mm_extract_epi32(vfitab,2)(__extension__ ({ __v4si __a = (__v4si)(vfitab); __a[(2) &
3];}))
);
222 H = _mm_load_ps( vftab + gmx_mm_extract_epi32(vfitab,3)(__extension__ ({ __v4si __a = (__v4si)(vfitab); __a[(3) &
3];}))
);
223 _MM_TRANSPOSE4_PS(Y,F,G,H)do { __m128 tmp3, tmp2, tmp1, tmp0; tmp0 = _mm_unpacklo_ps((Y
), (F)); tmp2 = _mm_unpacklo_ps((G), (H)); tmp1 = _mm_unpackhi_ps
((Y), (F)); tmp3 = _mm_unpackhi_ps((G), (H)); (Y) = _mm_movelh_ps
(tmp0, tmp2); (F) = _mm_movehl_ps(tmp2, tmp0); (G) = _mm_movelh_ps
(tmp1, tmp3); (H) = _mm_movehl_ps(tmp3, tmp1); } while (0)
;
224 Heps = _mm_mul_ps(vfeps,H);
225 Fp = _mm_add_ps(F,_mm_mul_ps(vfeps,_mm_add_ps(G,Heps)));
226 VV = _mm_add_ps(Y,_mm_mul_ps(vfeps,Fp));
227 vvdw6 = _mm_mul_ps(c6_00,VV);
228 FF = _mm_add_ps(Fp,_mm_mul_ps(vfeps,_mm_add_ps(G,_mm_add_ps(Heps,Heps))));
229 fvdw6 = _mm_mul_ps(c6_00,FF);
230
231 /* CUBIC SPLINE TABLE REPULSION */
232 vfitab = _mm_add_epi32(vfitab,ifour);
233 Y = _mm_load_ps( vftab + gmx_mm_extract_epi32(vfitab,0)(__extension__ ({ __v4si __a = (__v4si)(vfitab); __a[(0) &
3];}))
);
234 F = _mm_load_ps( vftab + gmx_mm_extract_epi32(vfitab,1)(__extension__ ({ __v4si __a = (__v4si)(vfitab); __a[(1) &
3];}))
);
235 G = _mm_load_ps( vftab + gmx_mm_extract_epi32(vfitab,2)(__extension__ ({ __v4si __a = (__v4si)(vfitab); __a[(2) &
3];}))
);
236 H = _mm_load_ps( vftab + gmx_mm_extract_epi32(vfitab,3)(__extension__ ({ __v4si __a = (__v4si)(vfitab); __a[(3) &
3];}))
);
237 _MM_TRANSPOSE4_PS(Y,F,G,H)do { __m128 tmp3, tmp2, tmp1, tmp0; tmp0 = _mm_unpacklo_ps((Y
), (F)); tmp2 = _mm_unpacklo_ps((G), (H)); tmp1 = _mm_unpackhi_ps
((Y), (F)); tmp3 = _mm_unpackhi_ps((G), (H)); (Y) = _mm_movelh_ps
(tmp0, tmp2); (F) = _mm_movehl_ps(tmp2, tmp0); (G) = _mm_movelh_ps
(tmp1, tmp3); (H) = _mm_movehl_ps(tmp3, tmp1); } while (0)
;
238 Heps = _mm_mul_ps(vfeps,H);
239 Fp = _mm_add_ps(F,_mm_mul_ps(vfeps,_mm_add_ps(G,Heps)));
240 VV = _mm_add_ps(Y,_mm_mul_ps(vfeps,Fp));
241 vvdw12 = _mm_mul_ps(c12_00,VV);
242 FF = _mm_add_ps(Fp,_mm_mul_ps(vfeps,_mm_add_ps(G,_mm_add_ps(Heps,Heps))));
243 fvdw12 = _mm_mul_ps(c12_00,FF);
244 vvdw = _mm_add_ps(vvdw12,vvdw6);
245 fvdw = _mm_xor_ps(signbit,_mm_mul_ps(_mm_add_ps(fvdw6,fvdw12),_mm_mul_ps(vftabscale,rinv00)));
246
247 /* Update potential sum for this i atom from the interaction with this j atom. */
248 vvdwsum = _mm_add_ps(vvdwsum,vvdw);
249
250 fscal = fvdw;
251
252 /* Calculate temporary vectorial force */
253 tx = _mm_mul_ps(fscal,dx00);
254 ty = _mm_mul_ps(fscal,dy00);
255 tz = _mm_mul_ps(fscal,dz00);
256
257 /* Update vectorial force */
258 fix0 = _mm_add_ps(fix0,tx);
259 fiy0 = _mm_add_ps(fiy0,ty);
260 fiz0 = _mm_add_ps(fiz0,tz);
261
262 fjptrA = f+j_coord_offsetA;
263 fjptrB = f+j_coord_offsetB;
264 fjptrC = f+j_coord_offsetC;
265 fjptrD = f+j_coord_offsetD;
266 gmx_mm_decrement_1rvec_4ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,tx,ty,tz);
267
268 /* Inner loop uses 56 flops */
269 }
270
271 if(jidx<j_index_end)
272 {
273
274 /* Get j neighbor index, and coordinate index */
275 jnrlistA = jjnr[jidx];
276 jnrlistB = jjnr[jidx+1];
277 jnrlistC = jjnr[jidx+2];
278 jnrlistD = jjnr[jidx+3];
279 /* Sign of each element will be negative for non-real atoms.
280 * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
281 * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
282 */
283 dummy_mask = gmx_mm_castsi128_ps_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
284 jnrA = (jnrlistA>=0) ? jnrlistA : 0;
285 jnrB = (jnrlistB>=0) ? jnrlistB : 0;
286 jnrC = (jnrlistC>=0) ? jnrlistC : 0;
287 jnrD = (jnrlistD>=0) ? jnrlistD : 0;
288 j_coord_offsetA = DIM3*jnrA;
289 j_coord_offsetB = DIM3*jnrB;
290 j_coord_offsetC = DIM3*jnrC;
291 j_coord_offsetD = DIM3*jnrD;
292
293 /* load j atom coordinates */
294 gmx_mm_load_1rvec_4ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
295 x+j_coord_offsetC,x+j_coord_offsetD,
296 &jx0,&jy0,&jz0);
297
298 /* Calculate displacement vector */
299 dx00 = _mm_sub_ps(ix0,jx0);
300 dy00 = _mm_sub_ps(iy0,jy0);
301 dz00 = _mm_sub_ps(iz0,jz0);
302
303 /* Calculate squared distance and things based on it */
304 rsq00 = gmx_mm_calc_rsq_ps(dx00,dy00,dz00);
305
306 rinv00 = gmx_mm_invsqrt_psgmx_simd_invsqrt_f(rsq00);
307
308 /* Load parameters for j particles */
309 vdwjidx0A = 2*vdwtype[jnrA+0];
310 vdwjidx0B = 2*vdwtype[jnrB+0];
311 vdwjidx0C = 2*vdwtype[jnrC+0];
312 vdwjidx0D = 2*vdwtype[jnrD+0];
313
314 /**************************
315 * CALCULATE INTERACTIONS *
316 **************************/
317
318 r00 = _mm_mul_ps(rsq00,rinv00);
319 r00 = _mm_andnot_ps(dummy_mask,r00);
320
321 /* Compute parameters for interactions between i and j atoms */
322 gmx_mm_load_4pair_swizzle_ps(vdwparam+vdwioffset0+vdwjidx0A,
323 vdwparam+vdwioffset0+vdwjidx0B,
324 vdwparam+vdwioffset0+vdwjidx0C,
325 vdwparam+vdwioffset0+vdwjidx0D,
326 &c6_00,&c12_00);
327
328 /* Calculate table index by multiplying r with table scale and truncate to integer */
329 rt = _mm_mul_ps(r00,vftabscale);
330 vfitab = _mm_cvttps_epi32(rt);
331 vfeps = _mm_sub_ps(rt,_mm_round_ps(rt, _MM_FROUND_FLOOR)__extension__ ({ __m128 __X = (rt); (__m128) __builtin_ia32_roundps
((__v4sf)__X, ((0x00 | 0x01))); })
);
332 vfitab = _mm_slli_epi32(vfitab,3);
333
334 /* CUBIC SPLINE TABLE DISPERSION */
335 Y = _mm_load_ps( vftab + gmx_mm_extract_epi32(vfitab,0)(__extension__ ({ __v4si __a = (__v4si)(vfitab); __a[(0) &
3];}))
);
336 F = _mm_load_ps( vftab + gmx_mm_extract_epi32(vfitab,1)(__extension__ ({ __v4si __a = (__v4si)(vfitab); __a[(1) &
3];}))
);
337 G = _mm_load_ps( vftab + gmx_mm_extract_epi32(vfitab,2)(__extension__ ({ __v4si __a = (__v4si)(vfitab); __a[(2) &
3];}))
);
338 H = _mm_load_ps( vftab + gmx_mm_extract_epi32(vfitab,3)(__extension__ ({ __v4si __a = (__v4si)(vfitab); __a[(3) &
3];}))
);
339 _MM_TRANSPOSE4_PS(Y,F,G,H)do { __m128 tmp3, tmp2, tmp1, tmp0; tmp0 = _mm_unpacklo_ps((Y
), (F)); tmp2 = _mm_unpacklo_ps((G), (H)); tmp1 = _mm_unpackhi_ps
((Y), (F)); tmp3 = _mm_unpackhi_ps((G), (H)); (Y) = _mm_movelh_ps
(tmp0, tmp2); (F) = _mm_movehl_ps(tmp2, tmp0); (G) = _mm_movelh_ps
(tmp1, tmp3); (H) = _mm_movehl_ps(tmp3, tmp1); } while (0)
;
340 Heps = _mm_mul_ps(vfeps,H);
341 Fp = _mm_add_ps(F,_mm_mul_ps(vfeps,_mm_add_ps(G,Heps)));
342 VV = _mm_add_ps(Y,_mm_mul_ps(vfeps,Fp));
343 vvdw6 = _mm_mul_ps(c6_00,VV);
344 FF = _mm_add_ps(Fp,_mm_mul_ps(vfeps,_mm_add_ps(G,_mm_add_ps(Heps,Heps))));
345 fvdw6 = _mm_mul_ps(c6_00,FF);
346
347 /* CUBIC SPLINE TABLE REPULSION */
348 vfitab = _mm_add_epi32(vfitab,ifour);
349 Y = _mm_load_ps( vftab + gmx_mm_extract_epi32(vfitab,0)(__extension__ ({ __v4si __a = (__v4si)(vfitab); __a[(0) &
3];}))
);
350 F = _mm_load_ps( vftab + gmx_mm_extract_epi32(vfitab,1)(__extension__ ({ __v4si __a = (__v4si)(vfitab); __a[(1) &
3];}))
);
351 G = _mm_load_ps( vftab + gmx_mm_extract_epi32(vfitab,2)(__extension__ ({ __v4si __a = (__v4si)(vfitab); __a[(2) &
3];}))
);
352 H = _mm_load_ps( vftab + gmx_mm_extract_epi32(vfitab,3)(__extension__ ({ __v4si __a = (__v4si)(vfitab); __a[(3) &
3];}))
);
353 _MM_TRANSPOSE4_PS(Y,F,G,H)do { __m128 tmp3, tmp2, tmp1, tmp0; tmp0 = _mm_unpacklo_ps((Y
), (F)); tmp2 = _mm_unpacklo_ps((G), (H)); tmp1 = _mm_unpackhi_ps
((Y), (F)); tmp3 = _mm_unpackhi_ps((G), (H)); (Y) = _mm_movelh_ps
(tmp0, tmp2); (F) = _mm_movehl_ps(tmp2, tmp0); (G) = _mm_movelh_ps
(tmp1, tmp3); (H) = _mm_movehl_ps(tmp3, tmp1); } while (0)
;
354 Heps = _mm_mul_ps(vfeps,H);
355 Fp = _mm_add_ps(F,_mm_mul_ps(vfeps,_mm_add_ps(G,Heps)));
356 VV = _mm_add_ps(Y,_mm_mul_ps(vfeps,Fp));
357 vvdw12 = _mm_mul_ps(c12_00,VV);
358 FF = _mm_add_ps(Fp,_mm_mul_ps(vfeps,_mm_add_ps(G,_mm_add_ps(Heps,Heps))));
359 fvdw12 = _mm_mul_ps(c12_00,FF);
360 vvdw = _mm_add_ps(vvdw12,vvdw6);
361 fvdw = _mm_xor_ps(signbit,_mm_mul_ps(_mm_add_ps(fvdw6,fvdw12),_mm_mul_ps(vftabscale,rinv00)));
362
363 /* Update potential sum for this i atom from the interaction with this j atom. */
364 vvdw = _mm_andnot_ps(dummy_mask,vvdw);
365 vvdwsum = _mm_add_ps(vvdwsum,vvdw);
366
367 fscal = fvdw;
368
369 fscal = _mm_andnot_ps(dummy_mask,fscal);
370
371 /* Calculate temporary vectorial force */
372 tx = _mm_mul_ps(fscal,dx00);
373 ty = _mm_mul_ps(fscal,dy00);
374 tz = _mm_mul_ps(fscal,dz00);
375
376 /* Update vectorial force */
377 fix0 = _mm_add_ps(fix0,tx);
378 fiy0 = _mm_add_ps(fiy0,ty);
379 fiz0 = _mm_add_ps(fiz0,tz);
380
381 fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
382 fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
383 fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
384 fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
385 gmx_mm_decrement_1rvec_4ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,tx,ty,tz);
386
387 /* Inner loop uses 57 flops */
388 }
389
390 /* End of innermost loop */
391
392 gmx_mm_update_iforce_1atom_swizzle_ps(fix0,fiy0,fiz0,
393 f+i_coord_offset,fshift+i_shift_offset);
394
395 ggid = gid[iidx];
396 /* Update potential energies */
397 gmx_mm_update_1pot_ps(vvdwsum,kernel_data->energygrp_vdw+ggid);
398
399 /* Increment number of inner iterations */
400 inneriter += j_index_end - j_index_start;
401
402 /* Outer loop uses 7 flops */
403 }
404
405 /* Increment number of outer iterations */
406 outeriter += nri;
407
408 /* Update outer/inner flops */
409
410 inc_nrnb(nrnb,eNR_NBKERNEL_VDW_VF,outeriter*7 + inneriter*57)(nrnb)->n[eNR_NBKERNEL_VDW_VF] += outeriter*7 + inneriter*
57
;
411}
412/*
413 * Gromacs nonbonded kernel: nb_kernel_ElecNone_VdwCSTab_GeomP1P1_F_sse4_1_single
414 * Electrostatics interaction: None
415 * VdW interaction: CubicSplineTable
416 * Geometry: Particle-Particle
417 * Calculate force/pot: Force
418 */
419void
420nb_kernel_ElecNone_VdwCSTab_GeomP1P1_F_sse4_1_single
421 (t_nblist * gmx_restrict nlist,
422 rvec * gmx_restrict xx,
423 rvec * gmx_restrict ff,
424 t_forcerec * gmx_restrict fr,
425 t_mdatoms * gmx_restrict mdatoms,
426 nb_kernel_data_t gmx_unused__attribute__ ((unused)) * gmx_restrict kernel_data,
427 t_nrnb * gmx_restrict nrnb)
428{
429 /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
430 * just 0 for non-waters.
431 * Suffixes A,B,C,D refer to j loop unrolling done with SSE, e.g. for the four different
432 * jnr indices corresponding to data put in the four positions in the SIMD register.
433 */
434 int i_shift_offset,i_coord_offset,outeriter,inneriter;
435 int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
436 int jnrA,jnrB,jnrC,jnrD;
437 int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
438 int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
439 int *iinr,*jindex,*jjnr,*shiftidx,*gid;
440 real rcutoff_scalar;
441 real *shiftvec,*fshift,*x,*f;
442 real *fjptrA,*fjptrB,*fjptrC,*fjptrD;
443 real scratch[4*DIM3];
444 __m128 tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
445 int vdwioffset0;
446 __m128 ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
447 int vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
448 __m128 jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
449 __m128 dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
450 int nvdwtype;
451 __m128 rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
452 int *vdwtype;
453 real *vdwparam;
454 __m128 one_sixth = _mm_set1_ps(1.0/6.0);
455 __m128 one_twelfth = _mm_set1_ps(1.0/12.0);
456 __m128i vfitab;
457 __m128i ifour = _mm_set1_epi32(4);
458 __m128 rt,vfeps,vftabscale,Y,F,G,H,Heps,Fp,VV,FF;
459 real *vftab;
460 __m128 dummy_mask,cutoff_mask;
461 __m128 signbit = _mm_castsi128_ps( _mm_set1_epi32(0x80000000) );
462 __m128 one = _mm_set1_ps(1.0);
463 __m128 two = _mm_set1_ps(2.0);
464 x = xx[0];
465 f = ff[0];
466
467 nri = nlist->nri;
468 iinr = nlist->iinr;
469 jindex = nlist->jindex;
470 jjnr = nlist->jjnr;
471 shiftidx = nlist->shift;
472 gid = nlist->gid;
473 shiftvec = fr->shift_vec[0];
474 fshift = fr->fshift[0];
475 nvdwtype = fr->ntype;
476 vdwparam = fr->nbfp;
477 vdwtype = mdatoms->typeA;
478
479 vftab = kernel_data->table_vdw->data;
480 vftabscale = _mm_set1_ps(kernel_data->table_vdw->scale);
481
482 /* Avoid stupid compiler warnings */
483 jnrA = jnrB = jnrC = jnrD = 0;
484 j_coord_offsetA = 0;
Value stored to 'j_coord_offsetA' is never read
485 j_coord_offsetB = 0;
486 j_coord_offsetC = 0;
487 j_coord_offsetD = 0;
488
489 outeriter = 0;
490 inneriter = 0;
491
492 for(iidx=0;iidx<4*DIM3;iidx++)
493 {
494 scratch[iidx] = 0.0;
495 }
496
497 /* Start outer loop over neighborlists */
498 for(iidx=0; iidx<nri; iidx++)
499 {
500 /* Load shift vector for this list */
501 i_shift_offset = DIM3*shiftidx[iidx];
502
503 /* Load limits for loop over neighbors */
504 j_index_start = jindex[iidx];
505 j_index_end = jindex[iidx+1];
506
507 /* Get outer coordinate index */
508 inr = iinr[iidx];
509 i_coord_offset = DIM3*inr;
510
511 /* Load i particle coords and add shift vector */
512 gmx_mm_load_shift_and_1rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,&ix0,&iy0,&iz0);
513
514 fix0 = _mm_setzero_ps();
515 fiy0 = _mm_setzero_ps();
516 fiz0 = _mm_setzero_ps();
517
518 /* Load parameters for i particles */
519 vdwioffset0 = 2*nvdwtype*vdwtype[inr+0];
520
521 /* Start inner kernel loop */
522 for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
523 {
524
525 /* Get j neighbor index, and coordinate index */
526 jnrA = jjnr[jidx];
527 jnrB = jjnr[jidx+1];
528 jnrC = jjnr[jidx+2];
529 jnrD = jjnr[jidx+3];
530 j_coord_offsetA = DIM3*jnrA;
531 j_coord_offsetB = DIM3*jnrB;
532 j_coord_offsetC = DIM3*jnrC;
533 j_coord_offsetD = DIM3*jnrD;
534
535 /* load j atom coordinates */
536 gmx_mm_load_1rvec_4ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
537 x+j_coord_offsetC,x+j_coord_offsetD,
538 &jx0,&jy0,&jz0);
539
540 /* Calculate displacement vector */
541 dx00 = _mm_sub_ps(ix0,jx0);
542 dy00 = _mm_sub_ps(iy0,jy0);
543 dz00 = _mm_sub_ps(iz0,jz0);
544
545 /* Calculate squared distance and things based on it */
546 rsq00 = gmx_mm_calc_rsq_ps(dx00,dy00,dz00);
547
548 rinv00 = gmx_mm_invsqrt_psgmx_simd_invsqrt_f(rsq00);
549
550 /* Load parameters for j particles */
551 vdwjidx0A = 2*vdwtype[jnrA+0];
552 vdwjidx0B = 2*vdwtype[jnrB+0];
553 vdwjidx0C = 2*vdwtype[jnrC+0];
554 vdwjidx0D = 2*vdwtype[jnrD+0];
555
556 /**************************
557 * CALCULATE INTERACTIONS *
558 **************************/
559
560 r00 = _mm_mul_ps(rsq00,rinv00);
561
562 /* Compute parameters for interactions between i and j atoms */
563 gmx_mm_load_4pair_swizzle_ps(vdwparam+vdwioffset0+vdwjidx0A,
564 vdwparam+vdwioffset0+vdwjidx0B,
565 vdwparam+vdwioffset0+vdwjidx0C,
566 vdwparam+vdwioffset0+vdwjidx0D,
567 &c6_00,&c12_00);
568
569 /* Calculate table index by multiplying r with table scale and truncate to integer */
570 rt = _mm_mul_ps(r00,vftabscale);
571 vfitab = _mm_cvttps_epi32(rt);
572 vfeps = _mm_sub_ps(rt,_mm_round_ps(rt, _MM_FROUND_FLOOR)__extension__ ({ __m128 __X = (rt); (__m128) __builtin_ia32_roundps
((__v4sf)__X, ((0x00 | 0x01))); })
);
573 vfitab = _mm_slli_epi32(vfitab,3);
574
575 /* CUBIC SPLINE TABLE DISPERSION */
576 Y = _mm_load_ps( vftab + gmx_mm_extract_epi32(vfitab,0)(__extension__ ({ __v4si __a = (__v4si)(vfitab); __a[(0) &
3];}))
);
577 F = _mm_load_ps( vftab + gmx_mm_extract_epi32(vfitab,1)(__extension__ ({ __v4si __a = (__v4si)(vfitab); __a[(1) &
3];}))
);
578 G = _mm_load_ps( vftab + gmx_mm_extract_epi32(vfitab,2)(__extension__ ({ __v4si __a = (__v4si)(vfitab); __a[(2) &
3];}))
);
579 H = _mm_load_ps( vftab + gmx_mm_extract_epi32(vfitab,3)(__extension__ ({ __v4si __a = (__v4si)(vfitab); __a[(3) &
3];}))
);
580 _MM_TRANSPOSE4_PS(Y,F,G,H)do { __m128 tmp3, tmp2, tmp1, tmp0; tmp0 = _mm_unpacklo_ps((Y
), (F)); tmp2 = _mm_unpacklo_ps((G), (H)); tmp1 = _mm_unpackhi_ps
((Y), (F)); tmp3 = _mm_unpackhi_ps((G), (H)); (Y) = _mm_movelh_ps
(tmp0, tmp2); (F) = _mm_movehl_ps(tmp2, tmp0); (G) = _mm_movelh_ps
(tmp1, tmp3); (H) = _mm_movehl_ps(tmp3, tmp1); } while (0)
;
581 Heps = _mm_mul_ps(vfeps,H);
582 Fp = _mm_add_ps(F,_mm_mul_ps(vfeps,_mm_add_ps(G,Heps)));
583 FF = _mm_add_ps(Fp,_mm_mul_ps(vfeps,_mm_add_ps(G,_mm_add_ps(Heps,Heps))));
584 fvdw6 = _mm_mul_ps(c6_00,FF);
585
586 /* CUBIC SPLINE TABLE REPULSION */
587 vfitab = _mm_add_epi32(vfitab,ifour);
588 Y = _mm_load_ps( vftab + gmx_mm_extract_epi32(vfitab,0)(__extension__ ({ __v4si __a = (__v4si)(vfitab); __a[(0) &
3];}))
);
589 F = _mm_load_ps( vftab + gmx_mm_extract_epi32(vfitab,1)(__extension__ ({ __v4si __a = (__v4si)(vfitab); __a[(1) &
3];}))
);
590 G = _mm_load_ps( vftab + gmx_mm_extract_epi32(vfitab,2)(__extension__ ({ __v4si __a = (__v4si)(vfitab); __a[(2) &
3];}))
);
591 H = _mm_load_ps( vftab + gmx_mm_extract_epi32(vfitab,3)(__extension__ ({ __v4si __a = (__v4si)(vfitab); __a[(3) &
3];}))
);
592 _MM_TRANSPOSE4_PS(Y,F,G,H)do { __m128 tmp3, tmp2, tmp1, tmp0; tmp0 = _mm_unpacklo_ps((Y
), (F)); tmp2 = _mm_unpacklo_ps((G), (H)); tmp1 = _mm_unpackhi_ps
((Y), (F)); tmp3 = _mm_unpackhi_ps((G), (H)); (Y) = _mm_movelh_ps
(tmp0, tmp2); (F) = _mm_movehl_ps(tmp2, tmp0); (G) = _mm_movelh_ps
(tmp1, tmp3); (H) = _mm_movehl_ps(tmp3, tmp1); } while (0)
;
593 Heps = _mm_mul_ps(vfeps,H);
594 Fp = _mm_add_ps(F,_mm_mul_ps(vfeps,_mm_add_ps(G,Heps)));
595 FF = _mm_add_ps(Fp,_mm_mul_ps(vfeps,_mm_add_ps(G,_mm_add_ps(Heps,Heps))));
596 fvdw12 = _mm_mul_ps(c12_00,FF);
597 fvdw = _mm_xor_ps(signbit,_mm_mul_ps(_mm_add_ps(fvdw6,fvdw12),_mm_mul_ps(vftabscale,rinv00)));
598
599 fscal = fvdw;
600
601 /* Calculate temporary vectorial force */
602 tx = _mm_mul_ps(fscal,dx00);
603 ty = _mm_mul_ps(fscal,dy00);
604 tz = _mm_mul_ps(fscal,dz00);
605
606 /* Update vectorial force */
607 fix0 = _mm_add_ps(fix0,tx);
608 fiy0 = _mm_add_ps(fiy0,ty);
609 fiz0 = _mm_add_ps(fiz0,tz);
610
611 fjptrA = f+j_coord_offsetA;
612 fjptrB = f+j_coord_offsetB;
613 fjptrC = f+j_coord_offsetC;
614 fjptrD = f+j_coord_offsetD;
615 gmx_mm_decrement_1rvec_4ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,tx,ty,tz);
616
617 /* Inner loop uses 48 flops */
618 }
619
620 if(jidx<j_index_end)
621 {
622
623 /* Get j neighbor index, and coordinate index */
624 jnrlistA = jjnr[jidx];
625 jnrlistB = jjnr[jidx+1];
626 jnrlistC = jjnr[jidx+2];
627 jnrlistD = jjnr[jidx+3];
628 /* Sign of each element will be negative for non-real atoms.
629 * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
630 * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
631 */
632 dummy_mask = gmx_mm_castsi128_ps_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
633 jnrA = (jnrlistA>=0) ? jnrlistA : 0;
634 jnrB = (jnrlistB>=0) ? jnrlistB : 0;
635 jnrC = (jnrlistC>=0) ? jnrlistC : 0;
636 jnrD = (jnrlistD>=0) ? jnrlistD : 0;
637 j_coord_offsetA = DIM3*jnrA;
638 j_coord_offsetB = DIM3*jnrB;
639 j_coord_offsetC = DIM3*jnrC;
640 j_coord_offsetD = DIM3*jnrD;
641
642 /* load j atom coordinates */
643 gmx_mm_load_1rvec_4ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
644 x+j_coord_offsetC,x+j_coord_offsetD,
645 &jx0,&jy0,&jz0);
646
647 /* Calculate displacement vector */
648 dx00 = _mm_sub_ps(ix0,jx0);
649 dy00 = _mm_sub_ps(iy0,jy0);
650 dz00 = _mm_sub_ps(iz0,jz0);
651
652 /* Calculate squared distance and things based on it */
653 rsq00 = gmx_mm_calc_rsq_ps(dx00,dy00,dz00);
654
655 rinv00 = gmx_mm_invsqrt_psgmx_simd_invsqrt_f(rsq00);
656
657 /* Load parameters for j particles */
658 vdwjidx0A = 2*vdwtype[jnrA+0];
659 vdwjidx0B = 2*vdwtype[jnrB+0];
660 vdwjidx0C = 2*vdwtype[jnrC+0];
661 vdwjidx0D = 2*vdwtype[jnrD+0];
662
663 /**************************
664 * CALCULATE INTERACTIONS *
665 **************************/
666
667 r00 = _mm_mul_ps(rsq00,rinv00);
668 r00 = _mm_andnot_ps(dummy_mask,r00);
669
670 /* Compute parameters for interactions between i and j atoms */
671 gmx_mm_load_4pair_swizzle_ps(vdwparam+vdwioffset0+vdwjidx0A,
672 vdwparam+vdwioffset0+vdwjidx0B,
673 vdwparam+vdwioffset0+vdwjidx0C,
674 vdwparam+vdwioffset0+vdwjidx0D,
675 &c6_00,&c12_00);
676
677 /* Calculate table index by multiplying r with table scale and truncate to integer */
678 rt = _mm_mul_ps(r00,vftabscale);
679 vfitab = _mm_cvttps_epi32(rt);
680 vfeps = _mm_sub_ps(rt,_mm_round_ps(rt, _MM_FROUND_FLOOR)__extension__ ({ __m128 __X = (rt); (__m128) __builtin_ia32_roundps
((__v4sf)__X, ((0x00 | 0x01))); })
);
681 vfitab = _mm_slli_epi32(vfitab,3);
682
683 /* CUBIC SPLINE TABLE DISPERSION */
684 Y = _mm_load_ps( vftab + gmx_mm_extract_epi32(vfitab,0)(__extension__ ({ __v4si __a = (__v4si)(vfitab); __a[(0) &
3];}))
);
685 F = _mm_load_ps( vftab + gmx_mm_extract_epi32(vfitab,1)(__extension__ ({ __v4si __a = (__v4si)(vfitab); __a[(1) &
3];}))
);
686 G = _mm_load_ps( vftab + gmx_mm_extract_epi32(vfitab,2)(__extension__ ({ __v4si __a = (__v4si)(vfitab); __a[(2) &
3];}))
);
687 H = _mm_load_ps( vftab + gmx_mm_extract_epi32(vfitab,3)(__extension__ ({ __v4si __a = (__v4si)(vfitab); __a[(3) &
3];}))
);
688 _MM_TRANSPOSE4_PS(Y,F,G,H)do { __m128 tmp3, tmp2, tmp1, tmp0; tmp0 = _mm_unpacklo_ps((Y
), (F)); tmp2 = _mm_unpacklo_ps((G), (H)); tmp1 = _mm_unpackhi_ps
((Y), (F)); tmp3 = _mm_unpackhi_ps((G), (H)); (Y) = _mm_movelh_ps
(tmp0, tmp2); (F) = _mm_movehl_ps(tmp2, tmp0); (G) = _mm_movelh_ps
(tmp1, tmp3); (H) = _mm_movehl_ps(tmp3, tmp1); } while (0)
;
689 Heps = _mm_mul_ps(vfeps,H);
690 Fp = _mm_add_ps(F,_mm_mul_ps(vfeps,_mm_add_ps(G,Heps)));
691 FF = _mm_add_ps(Fp,_mm_mul_ps(vfeps,_mm_add_ps(G,_mm_add_ps(Heps,Heps))));
692 fvdw6 = _mm_mul_ps(c6_00,FF);
693
694 /* CUBIC SPLINE TABLE REPULSION */
695 vfitab = _mm_add_epi32(vfitab,ifour);
696 Y = _mm_load_ps( vftab + gmx_mm_extract_epi32(vfitab,0)(__extension__ ({ __v4si __a = (__v4si)(vfitab); __a[(0) &
3];}))
);
697 F = _mm_load_ps( vftab + gmx_mm_extract_epi32(vfitab,1)(__extension__ ({ __v4si __a = (__v4si)(vfitab); __a[(1) &
3];}))
);
698 G = _mm_load_ps( vftab + gmx_mm_extract_epi32(vfitab,2)(__extension__ ({ __v4si __a = (__v4si)(vfitab); __a[(2) &
3];}))
);
699 H = _mm_load_ps( vftab + gmx_mm_extract_epi32(vfitab,3)(__extension__ ({ __v4si __a = (__v4si)(vfitab); __a[(3) &
3];}))
);
700 _MM_TRANSPOSE4_PS(Y,F,G,H)do { __m128 tmp3, tmp2, tmp1, tmp0; tmp0 = _mm_unpacklo_ps((Y
), (F)); tmp2 = _mm_unpacklo_ps((G), (H)); tmp1 = _mm_unpackhi_ps
((Y), (F)); tmp3 = _mm_unpackhi_ps((G), (H)); (Y) = _mm_movelh_ps
(tmp0, tmp2); (F) = _mm_movehl_ps(tmp2, tmp0); (G) = _mm_movelh_ps
(tmp1, tmp3); (H) = _mm_movehl_ps(tmp3, tmp1); } while (0)
;
701 Heps = _mm_mul_ps(vfeps,H);
702 Fp = _mm_add_ps(F,_mm_mul_ps(vfeps,_mm_add_ps(G,Heps)));
703 FF = _mm_add_ps(Fp,_mm_mul_ps(vfeps,_mm_add_ps(G,_mm_add_ps(Heps,Heps))));
704 fvdw12 = _mm_mul_ps(c12_00,FF);
705 fvdw = _mm_xor_ps(signbit,_mm_mul_ps(_mm_add_ps(fvdw6,fvdw12),_mm_mul_ps(vftabscale,rinv00)));
706
707 fscal = fvdw;
708
709 fscal = _mm_andnot_ps(dummy_mask,fscal);
710
711 /* Calculate temporary vectorial force */
712 tx = _mm_mul_ps(fscal,dx00);
713 ty = _mm_mul_ps(fscal,dy00);
714 tz = _mm_mul_ps(fscal,dz00);
715
716 /* Update vectorial force */
717 fix0 = _mm_add_ps(fix0,tx);
718 fiy0 = _mm_add_ps(fiy0,ty);
719 fiz0 = _mm_add_ps(fiz0,tz);
720
721 fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
722 fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
723 fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
724 fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
725 gmx_mm_decrement_1rvec_4ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,tx,ty,tz);
726
727 /* Inner loop uses 49 flops */
728 }
729
730 /* End of innermost loop */
731
732 gmx_mm_update_iforce_1atom_swizzle_ps(fix0,fiy0,fiz0,
733 f+i_coord_offset,fshift+i_shift_offset);
734
735 /* Increment number of inner iterations */
736 inneriter += j_index_end - j_index_start;
737
738 /* Outer loop uses 6 flops */
739 }
740
741 /* Increment number of outer iterations */
742 outeriter += nri;
743
744 /* Update outer/inner flops */
745
746 inc_nrnb(nrnb,eNR_NBKERNEL_VDW_F,outeriter*6 + inneriter*49)(nrnb)->n[eNR_NBKERNEL_VDW_F] += outeriter*6 + inneriter*49;
747}