2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2012,2013,2014, by the GROMACS development team, led by
5 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6 * and including many others, as listed in the AUTHORS file in the
7 * top-level source directory and at http://www.gromacs.org.
9 * GROMACS is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public License
11 * as published by the Free Software Foundation; either version 2.1
12 * of the License, or (at your option) any later version.
14 * GROMACS is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with GROMACS; if not, see
21 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
24 * If you want to redistribute modifications to GROMACS, please
25 * consider that scientific software is very special. Version
26 * control is crucial - bugs must be traceable. We will be happy to
27 * consider code for inclusion in the official distribution, but
28 * derived work must not be called official GROMACS. Details are found
29 * in the README & COPYING files - if they are missing, get the
30 * official version at http://www.gromacs.org.
32 * To help us fund GROMACS development, we humbly ask that you cite
33 * the research papers on the package. Check out http://www.gromacs.org.
35 #ifndef _kernelutil_x86_avx_256_single_h_
36 #define _kernelutil_x86_avx_256_single_h_
38 #define gmx_mm_castsi128_ps(a) _mm_castsi128_ps(a)
40 static gmx_inline __m256
41 gmx_mm256_unpack128lo_ps(__m256 xmm1, __m256 xmm2)
43 return _mm256_permute2f128_ps(xmm1, xmm2, 0x20);
46 static gmx_inline __m256
47 gmx_mm256_unpack128hi_ps(__m256 xmm1, __m256 xmm2)
49 return _mm256_permute2f128_ps(xmm1, xmm2, 0x31);
52 static gmx_inline __m256
53 gmx_mm256_set_m128(__m128 hi, __m128 lo)
55 return _mm256_insertf128_ps(_mm256_castps128_ps256(lo), hi, 0x1);
58 /* Work around gcc bug with wrong type for mask formal parameter to maskload/maskstore */
59 #ifdef GMX_SIMD_X86_AVX_GCC_MASKLOAD_BUG
60 # define gmx_mm_maskload_ps(mem, mask) _mm_maskload_ps((mem), _mm_castsi128_ps(mask))
61 # define gmx_mm_maskstore_ps(mem, mask, x) _mm_maskstore_ps((mem), _mm_castsi128_ps(mask), (x))
62 # define gmx_mm256_maskload_ps(mem, mask) _mm256_maskload_ps((mem), _mm256_castsi256_ps(mask))
63 # define gmx_mm256_maskstore_ps(mem, mask, x) _mm256_maskstore_ps((mem), _mm256_castsi256_ps(mask), (x))
65 # define gmx_mm_maskload_ps(mem, mask) _mm_maskload_ps((mem), (mask))
66 # define gmx_mm_maskstore_ps(mem, mask, x) _mm_maskstore_ps((mem), (mask), (x))
67 # define gmx_mm256_maskload_ps(mem, mask) _mm256_maskload_ps((mem), (mask))
68 # define gmx_mm256_maskstore_ps(mem, mask, x) _mm256_maskstore_ps((mem), (mask), (x))
71 /* Transpose lower/upper half of 256-bit registers separately */
72 #define GMX_MM256_HALFTRANSPOSE4_PS(ymm0, ymm1, ymm2, ymm3) { \
73 __m256 __tmp0, __tmp1, __tmp2, __tmp3; \
75 __tmp0 = _mm256_unpacklo_ps((ymm0), (ymm1)); \
76 __tmp1 = _mm256_unpacklo_ps((ymm2), (ymm3)); \
77 __tmp2 = _mm256_unpackhi_ps((ymm0), (ymm1)); \
78 __tmp3 = _mm256_unpackhi_ps((ymm2), (ymm3)); \
79 ymm0 = _mm256_shuffle_ps(__tmp0, __tmp1, _MM_SHUFFLE(1, 0, 1, 0)); \
80 ymm1 = _mm256_shuffle_ps(__tmp0, __tmp1, _MM_SHUFFLE(3, 2, 3, 2)); \
81 ymm2 = _mm256_shuffle_ps(__tmp2, __tmp3, _MM_SHUFFLE(1, 0, 1, 0)); \
82 ymm3 = _mm256_shuffle_ps(__tmp2, __tmp3, _MM_SHUFFLE(3, 2, 3, 2)); \
86 static gmx_inline __m256
87 gmx_mm256_calc_rsq_ps(__m256 dx, __m256 dy, __m256 dz)
89 return _mm256_add_ps( _mm256_add_ps( _mm256_mul_ps(dx, dx), _mm256_mul_ps(dy, dy) ), _mm256_mul_ps(dz, dz) );
92 /* Normal sum of four ymm registers */
93 #define gmx_mm256_sum4_ps(t0, t1, t2, t3) _mm256_add_ps(_mm256_add_ps(t0, t1), _mm256_add_ps(t2, t3))
97 gmx_mm256_any_lt(__m256 a, __m256 b)
99 return _mm256_movemask_ps(_mm256_cmp_ps(a, b, _CMP_LT_OQ));
103 static gmx_inline __m256
104 gmx_mm256_load_4real_swizzle_ps(const float * gmx_restrict ptrA, const float * gmx_restrict ptrB,
105 const float * gmx_restrict ptrC, const float * gmx_restrict ptrD)
109 t1 = _mm_unpacklo_ps(_mm_load_ss(ptrA), _mm_load_ss(ptrC));
110 t2 = _mm_unpacklo_ps(_mm_load_ss(ptrB), _mm_load_ss(ptrD));
111 return _mm256_castps128_ps256(_mm_unpacklo_ps(t1, t2));
115 static gmx_inline __m256
116 gmx_mm256_load_8real_swizzle_ps(const float * gmx_restrict ptrA, const float * gmx_restrict ptrB,
117 const float * gmx_restrict ptrC, const float * gmx_restrict ptrD,
118 const float * gmx_restrict ptrE, const float * gmx_restrict ptrF,
119 const float * gmx_restrict ptrG, const float * gmx_restrict ptrH)
123 t1 = gmx_mm256_load_4real_swizzle_ps(ptrA, ptrB, ptrC, ptrD);
124 t2 = gmx_mm256_load_4real_swizzle_ps(ptrE, ptrF, ptrG, ptrH);
126 return _mm256_permute2f128_ps(t1, t2, 0x20);
131 static gmx_inline void
132 gmx_mm256_store_4real_swizzle_ps(float * gmx_restrict ptrA, float * gmx_restrict ptrB,
133 float * gmx_restrict ptrC, float * gmx_restrict ptrD, __m256 xmm1)
137 t2 = _mm256_permute_ps(xmm1, _MM_SHUFFLE(1, 1, 1, 1));
138 t3 = _mm256_permute_ps(xmm1, _MM_SHUFFLE(2, 2, 2, 2));
139 t4 = _mm256_permute_ps(xmm1, _MM_SHUFFLE(3, 3, 3, 3));
140 _mm_store_ss(ptrA, _mm256_castps256_ps128(xmm1));
141 _mm_store_ss(ptrB, _mm256_castps256_ps128(t2));
142 _mm_store_ss(ptrC, _mm256_castps256_ps128(t3));
143 _mm_store_ss(ptrD, _mm256_castps256_ps128(t4));
147 static gmx_inline void
148 gmx_mm256_store_8real_swizzle_ps(float * gmx_restrict ptrA, float * gmx_restrict ptrB,
149 float * gmx_restrict ptrC, float * gmx_restrict ptrD,
150 float * gmx_restrict ptrE, float * gmx_restrict ptrF,
151 float * gmx_restrict ptrG, float * gmx_restrict ptrH, __m256 xmm1)
155 t1 = _mm256_permute2f128_ps(xmm1, xmm1, 0x11);
157 gmx_mm256_store_4real_swizzle_ps(ptrA, ptrB, ptrC, ptrD, xmm1);
158 gmx_mm256_store_4real_swizzle_ps(ptrE, ptrF, ptrG, ptrH, t1);
162 static gmx_inline void
163 gmx_mm256_increment_4real_swizzle_ps(float * gmx_restrict ptrA, float * gmx_restrict ptrB,
164 float * gmx_restrict ptrC, float * gmx_restrict ptrD,
167 __m128 t1, t2, t3, t4;
169 t1 = _mm256_castps256_ps128(xmm1);
170 t2 = _mm_permute_ps(t1, _MM_SHUFFLE(1, 1, 1, 1));
171 t3 = _mm_permute_ps(t1, _MM_SHUFFLE(2, 2, 2, 2));
172 t4 = _mm_permute_ps(t1, _MM_SHUFFLE(3, 3, 3, 3));
174 t1 = _mm_add_ss(t1, _mm_load_ss(ptrA));
175 t2 = _mm_add_ss(t2, _mm_load_ss(ptrB));
176 t3 = _mm_add_ss(t3, _mm_load_ss(ptrC));
177 t4 = _mm_add_ss(t4, _mm_load_ss(ptrD));
179 _mm_store_ss(ptrA, t1);
180 _mm_store_ss(ptrB, t2);
181 _mm_store_ss(ptrC, t3);
182 _mm_store_ss(ptrD, t4);
185 static gmx_inline void
186 gmx_mm256_increment_8real_swizzle_ps(float * gmx_restrict ptrA, float * gmx_restrict ptrB,
187 float * gmx_restrict ptrC, float * gmx_restrict ptrD,
188 float * gmx_restrict ptrE, float * gmx_restrict ptrF,
189 float * gmx_restrict ptrG, float * gmx_restrict ptrH,
194 t1 = _mm256_permute2f128_ps(xmm1, xmm1, 0x11);
196 gmx_mm256_increment_4real_swizzle_ps(ptrA, ptrB, ptrC, ptrD, xmm1);
197 gmx_mm256_increment_4real_swizzle_ps(ptrE, ptrF, ptrG, ptrH, t1);
201 static gmx_inline void
202 gmx_mm256_load_4pair_swizzle_ps(const float * gmx_restrict p1, const float * gmx_restrict p2,
203 const float * gmx_restrict p3, const float * gmx_restrict p4,
204 __m256 * gmx_restrict c6, __m256 * gmx_restrict c12)
206 __m128 t1, t2, t3, t4;
208 t1 = _mm_loadl_pi(_mm_setzero_ps(), (__m64 *)p1); /* - - c12a c6a */
209 t2 = _mm_loadl_pi(_mm_setzero_ps(), (__m64 *)p2); /* - - c12b c6b */
210 t3 = _mm_loadl_pi(_mm_setzero_ps(), (__m64 *)p3); /* - - c12c c6c */
211 t4 = _mm_loadl_pi(_mm_setzero_ps(), (__m64 *)p4); /* - - c12d c6d */
213 t1 = _mm_unpacklo_ps(t1, t2); /* c12b c12a c6b c6a */
214 t3 = _mm_unpacklo_ps(t3, t4); /* c12d c12c c6d c6c */
216 *c6 = _mm256_castps128_ps256(_mm_shuffle_ps(t1, t3, _MM_SHUFFLE(1, 0, 1, 0)));
217 *c12 = _mm256_castps128_ps256(_mm_shuffle_ps(t1, t3, _MM_SHUFFLE(3, 2, 3, 2)));
220 static gmx_inline void
221 gmx_mm256_load_8pair_swizzle_ps(const float * gmx_restrict p1, const float * gmx_restrict p2,
222 const float * gmx_restrict p3, const float * gmx_restrict p4,
223 const float * gmx_restrict p5, const float * gmx_restrict p6,
224 const float * gmx_restrict p7, const float * gmx_restrict p8,
225 __m256 * gmx_restrict c6, __m256 * gmx_restrict c12)
227 __m256 c6l, c6h, c12l, c12h;
229 gmx_mm256_load_4pair_swizzle_ps(p1, p2, p3, p4, &c6l, &c12l);
230 gmx_mm256_load_4pair_swizzle_ps(p5, p6, p7, p8, &c6h, &c12h);
232 *c6 = _mm256_permute2f128_ps(c6l, c6h, 0x20);
233 *c12 = _mm256_permute2f128_ps(c12l, c12h, 0x20);
237 static gmx_inline void
238 gmx_mm256_load_shift_and_1rvec_broadcast_ps(const float * gmx_restrict xyz_shift,
239 const float * gmx_restrict xyz,
240 __m256 * gmx_restrict x1,
241 __m256 * gmx_restrict y1,
242 __m256 * gmx_restrict z1)
244 __m128 t1, t2, t3, t4;
246 t1 = _mm_loadl_pi(_mm_setzero_ps(), (__m64 *)xyz_shift);
247 t2 = _mm_loadl_pi(_mm_setzero_ps(), (__m64 *)xyz);
248 t3 = _mm_load_ss(xyz_shift+2);
249 t4 = _mm_load_ss(xyz+2);
250 t1 = _mm_add_ps(t1, t2);
251 t3 = _mm_add_ss(t3, t4);
253 t2 = _mm_permute_ps(t1, _MM_SHUFFLE(1, 1, 1, 1));
254 t1 = _mm_permute_ps(t1, _MM_SHUFFLE(0, 0, 0, 0));
255 t3 = _mm_permute_ps(t3, _MM_SHUFFLE(0, 0, 0, 0));
257 *x1 = gmx_mm256_set_m128(t1, t1);
258 *y1 = gmx_mm256_set_m128(t2, t2);
259 *z1 = gmx_mm256_set_m128(t3, t3);
263 static gmx_inline void
264 gmx_mm256_load_shift_and_3rvec_broadcast_ps(const float * gmx_restrict xyz_shift,
265 const float * gmx_restrict xyz,
266 __m256 * gmx_restrict x1, __m256 * gmx_restrict y1, __m256 * gmx_restrict z1,
267 __m256 * gmx_restrict x2, __m256 * gmx_restrict y2, __m256 * gmx_restrict z2,
268 __m256 * gmx_restrict x3, __m256 * gmx_restrict y3, __m256 * gmx_restrict z3)
271 __m128 t1, t2, t3, t4, t5, t6, t7, t8, t9;
273 tA = _mm_loadl_pi(_mm_setzero_ps(), (__m64 *)xyz_shift);
274 tB = _mm_load_ss(xyz_shift+2);
276 t1 = _mm_loadu_ps(xyz);
277 t2 = _mm_loadu_ps(xyz+4);
278 t3 = _mm_load_ss(xyz+8);
280 tA = _mm_movelh_ps(tA, tB);
281 t4 = _mm_permute_ps(tA, _MM_SHUFFLE(0, 2, 1, 0));
282 t5 = _mm_permute_ps(tA, _MM_SHUFFLE(1, 0, 2, 1));
283 t6 = _mm_permute_ps(tA, _MM_SHUFFLE(2, 1, 0, 2));
285 t1 = _mm_add_ps(t1, t4);
286 t2 = _mm_add_ps(t2, t5);
287 t3 = _mm_add_ss(t3, t6);
289 t9 = _mm_permute_ps(t3, _MM_SHUFFLE(0, 0, 0, 0));
290 t8 = _mm_permute_ps(t2, _MM_SHUFFLE(3, 3, 3, 3));
291 t7 = _mm_permute_ps(t2, _MM_SHUFFLE(2, 2, 2, 2));
292 t6 = _mm_permute_ps(t2, _MM_SHUFFLE(1, 1, 1, 1));
293 t5 = _mm_permute_ps(t2, _MM_SHUFFLE(0, 0, 0, 0));
294 t4 = _mm_permute_ps(t1, _MM_SHUFFLE(3, 3, 3, 3));
295 t3 = _mm_permute_ps(t1, _MM_SHUFFLE(2, 2, 2, 2));
296 t2 = _mm_permute_ps(t1, _MM_SHUFFLE(1, 1, 1, 1));
297 t1 = _mm_permute_ps(t1, _MM_SHUFFLE(0, 0, 0, 0));
299 *x1 = gmx_mm256_set_m128(t1, t1);
300 *y1 = gmx_mm256_set_m128(t2, t2);
301 *z1 = gmx_mm256_set_m128(t3, t3);
302 *x2 = gmx_mm256_set_m128(t4, t4);
303 *y2 = gmx_mm256_set_m128(t5, t5);
304 *z2 = gmx_mm256_set_m128(t6, t6);
305 *x3 = gmx_mm256_set_m128(t7, t7);
306 *y3 = gmx_mm256_set_m128(t8, t8);
307 *z3 = gmx_mm256_set_m128(t9, t9);
311 static gmx_inline void
312 gmx_mm256_load_shift_and_4rvec_broadcast_ps(const float * gmx_restrict xyz_shift,
313 const float * gmx_restrict xyz,
314 __m256 * gmx_restrict x1, __m256 * gmx_restrict y1, __m256 * gmx_restrict z1,
315 __m256 * gmx_restrict x2, __m256 * gmx_restrict y2, __m256 * gmx_restrict z2,
316 __m256 * gmx_restrict x3, __m256 * gmx_restrict y3, __m256 * gmx_restrict z3,
317 __m256 * gmx_restrict x4, __m256 * gmx_restrict y4, __m256 * gmx_restrict z4)
320 __m128 t1, t2, t3, t4, t5, t6, t7, t8, t9, t10, t11, t12;
322 tA = _mm_loadl_pi(_mm_setzero_ps(), (__m64 *)xyz_shift);
323 tB = _mm_load_ss(xyz_shift+2);
325 t1 = _mm_loadu_ps(xyz);
326 t2 = _mm_loadu_ps(xyz+4);
327 t3 = _mm_loadu_ps(xyz+8);
329 tA = _mm_movelh_ps(tA, tB);
330 t4 = _mm_permute_ps(tA, _MM_SHUFFLE(0, 2, 1, 0));
331 t5 = _mm_permute_ps(tA, _MM_SHUFFLE(1, 0, 2, 1));
332 t6 = _mm_permute_ps(tA, _MM_SHUFFLE(2, 1, 0, 2));
334 t1 = _mm_add_ps(t1, t4);
335 t2 = _mm_add_ps(t2, t5);
336 t3 = _mm_add_ps(t3, t6);
338 t12 = _mm_permute_ps(t3, _MM_SHUFFLE(3, 3, 3, 3));
339 t11 = _mm_permute_ps(t3, _MM_SHUFFLE(2, 2, 2, 2));
340 t10 = _mm_permute_ps(t3, _MM_SHUFFLE(1, 1, 1, 1));
341 t9 = _mm_permute_ps(t3, _MM_SHUFFLE(0, 0, 0, 0));
342 t8 = _mm_permute_ps(t2, _MM_SHUFFLE(3, 3, 3, 3));
343 t7 = _mm_permute_ps(t2, _MM_SHUFFLE(2, 2, 2, 2));
344 t6 = _mm_permute_ps(t2, _MM_SHUFFLE(1, 1, 1, 1));
345 t5 = _mm_permute_ps(t2, _MM_SHUFFLE(0, 0, 0, 0));
346 t4 = _mm_permute_ps(t1, _MM_SHUFFLE(3, 3, 3, 3));
347 t3 = _mm_permute_ps(t1, _MM_SHUFFLE(2, 2, 2, 2));
348 t2 = _mm_permute_ps(t1, _MM_SHUFFLE(1, 1, 1, 1));
349 t1 = _mm_permute_ps(t1, _MM_SHUFFLE(0, 0, 0, 0));
351 *x1 = gmx_mm256_set_m128(t1, t1);
352 *y1 = gmx_mm256_set_m128(t2, t2);
353 *z1 = gmx_mm256_set_m128(t3, t3);
354 *x2 = gmx_mm256_set_m128(t4, t4);
355 *y2 = gmx_mm256_set_m128(t5, t5);
356 *z2 = gmx_mm256_set_m128(t6, t6);
357 *x3 = gmx_mm256_set_m128(t7, t7);
358 *y3 = gmx_mm256_set_m128(t8, t8);
359 *z3 = gmx_mm256_set_m128(t9, t9);
360 *x4 = gmx_mm256_set_m128(t10, t10);
361 *y4 = gmx_mm256_set_m128(t11, t11);
362 *z4 = gmx_mm256_set_m128(t12, t12);
367 static gmx_inline void
368 gmx_mm256_load_1rvec_4ptr_swizzle_ps(const float * gmx_restrict ptrA, const float * gmx_restrict ptrB,
369 const float * gmx_restrict ptrC, const float * gmx_restrict ptrD,
370 __m256 * gmx_restrict x1, __m256 * gmx_restrict y1, __m256 * gmx_restrict z1)
372 __m128 t1, t2, t3, t4;
373 __m128i mask = _mm_set_epi32(0, -1, -1, -1);
374 t1 = gmx_mm_maskload_ps(ptrA, mask);
375 t2 = gmx_mm_maskload_ps(ptrB, mask);
376 t3 = gmx_mm_maskload_ps(ptrC, mask);
377 t4 = gmx_mm_maskload_ps(ptrD, mask);
378 _MM_TRANSPOSE4_PS(t1, t2, t3, t4);
379 *x1 = _mm256_castps128_ps256(t1);
380 *y1 = _mm256_castps128_ps256(t2);
381 *z1 = _mm256_castps128_ps256(t3);
385 static gmx_inline void
386 gmx_mm256_load_3rvec_4ptr_swizzle_ps(const float * gmx_restrict ptrA, const float * gmx_restrict ptrB,
387 const float * gmx_restrict ptrC, const float * gmx_restrict ptrD,
388 __m256 * gmx_restrict x1, __m256 * gmx_restrict y1, __m256 * gmx_restrict z1,
389 __m256 * gmx_restrict x2, __m256 * gmx_restrict y2, __m256 * gmx_restrict z2,
390 __m256 * gmx_restrict x3, __m256 * gmx_restrict y3, __m256 * gmx_restrict z3)
392 __m128 t1, t2, t3, t4;
393 t1 = _mm_loadu_ps(ptrA);
394 t2 = _mm_loadu_ps(ptrB);
395 t3 = _mm_loadu_ps(ptrC);
396 t4 = _mm_loadu_ps(ptrD);
397 _MM_TRANSPOSE4_PS(t1, t2, t3, t4);
398 *x1 = _mm256_castps128_ps256(t1);
399 *y1 = _mm256_castps128_ps256(t2);
400 *z1 = _mm256_castps128_ps256(t3);
401 *x2 = _mm256_castps128_ps256(t4);
402 t1 = _mm_loadu_ps(ptrA+4);
403 t2 = _mm_loadu_ps(ptrB+4);
404 t3 = _mm_loadu_ps(ptrC+4);
405 t4 = _mm_loadu_ps(ptrD+4);
406 _MM_TRANSPOSE4_PS(t1, t2, t3, t4);
407 *y2 = _mm256_castps128_ps256(t1);
408 *z2 = _mm256_castps128_ps256(t2);
409 *x3 = _mm256_castps128_ps256(t3);
410 *y3 = _mm256_castps128_ps256(t4);
411 t1 = _mm_load_ss(ptrA+8);
412 t2 = _mm_load_ss(ptrB+8);
413 t3 = _mm_load_ss(ptrC+8);
414 t4 = _mm_load_ss(ptrD+8);
415 t1 = _mm_unpacklo_ps(t1, t3);
416 t3 = _mm_unpacklo_ps(t2, t4);
417 *z3 = _mm256_castps128_ps256(_mm_unpacklo_ps(t1, t3));
422 static gmx_inline void
423 gmx_mm256_load_4rvec_4ptr_swizzle_ps(const float * gmx_restrict ptrA, const float * gmx_restrict ptrB,
424 const float * gmx_restrict ptrC, const float * gmx_restrict ptrD,
425 __m256 * gmx_restrict x1, __m256 * gmx_restrict y1, __m256 * gmx_restrict z1,
426 __m256 * gmx_restrict x2, __m256 * gmx_restrict y2, __m256 * gmx_restrict z2,
427 __m256 * gmx_restrict x3, __m256 * gmx_restrict y3, __m256 * gmx_restrict z3,
428 __m256 * gmx_restrict x4, __m256 * gmx_restrict y4, __m256 * gmx_restrict z4)
430 __m128 t1, t2, t3, t4;
431 t1 = _mm_loadu_ps(ptrA);
432 t2 = _mm_loadu_ps(ptrB);
433 t3 = _mm_loadu_ps(ptrC);
434 t4 = _mm_loadu_ps(ptrD);
435 _MM_TRANSPOSE4_PS(t1, t2, t3, t4);
436 *x1 = _mm256_castps128_ps256(t1);
437 *y1 = _mm256_castps128_ps256(t2);
438 *z1 = _mm256_castps128_ps256(t3);
439 *x2 = _mm256_castps128_ps256(t4);
440 t1 = _mm_loadu_ps(ptrA+4);
441 t2 = _mm_loadu_ps(ptrB+4);
442 t3 = _mm_loadu_ps(ptrC+4);
443 t4 = _mm_loadu_ps(ptrD+4);
444 _MM_TRANSPOSE4_PS(t1, t2, t3, t4);
445 *y2 = _mm256_castps128_ps256(t1);
446 *z2 = _mm256_castps128_ps256(t2);
447 *x3 = _mm256_castps128_ps256(t3);
448 *y3 = _mm256_castps128_ps256(t4);
449 t1 = _mm_loadu_ps(ptrA+8);
450 t2 = _mm_loadu_ps(ptrB+8);
451 t3 = _mm_loadu_ps(ptrC+8);
452 t4 = _mm_loadu_ps(ptrD+8);
453 _MM_TRANSPOSE4_PS(t1, t2, t3, t4);
454 *z3 = _mm256_castps128_ps256(t1);
455 *x4 = _mm256_castps128_ps256(t2);
456 *y4 = _mm256_castps128_ps256(t3);
457 *z4 = _mm256_castps128_ps256(t4);
461 static gmx_inline void
462 gmx_mm256_load_1rvec_8ptr_swizzle_ps(const float * gmx_restrict ptrA, const float * gmx_restrict ptrB,
463 const float * gmx_restrict ptrC, const float * gmx_restrict ptrD,
464 const float * gmx_restrict ptrE, const float * gmx_restrict ptrF,
465 const float * gmx_restrict ptrG, const float * gmx_restrict ptrH,
466 __m256 * gmx_restrict x1, __m256 * gmx_restrict y1, __m256 * gmx_restrict z1)
468 __m256 t1, t2, t3, t4, t5, t6, t7, t8;
469 __m128i mask = _mm_set_epi32(0, -1, -1, -1);
471 t1 = gmx_mm256_set_m128(gmx_mm_maskload_ps(ptrE, mask), gmx_mm_maskload_ps(ptrA, mask)); /* - zE yE xE | - zA yA xA */
472 t2 = gmx_mm256_set_m128(gmx_mm_maskload_ps(ptrF, mask), gmx_mm_maskload_ps(ptrB, mask)); /* - zF yF xF | - zB yB xB */
473 t3 = gmx_mm256_set_m128(gmx_mm_maskload_ps(ptrG, mask), gmx_mm_maskload_ps(ptrC, mask)); /* - zG yG xG | - zC yC xC */
474 t4 = gmx_mm256_set_m128(gmx_mm_maskload_ps(ptrH, mask), gmx_mm_maskload_ps(ptrD, mask)); /* - zH yH xH | - zD yD xD */
476 t5 = _mm256_unpacklo_ps(t1, t2); /* yF yE xF xE | yB yA xB xA */
477 t6 = _mm256_unpacklo_ps(t3, t4); /* yH yG xH xG | yD yC xD xC */
478 t7 = _mm256_unpackhi_ps(t1, t2); /* - - zF zE | - - zB zA */
479 t8 = _mm256_unpackhi_ps(t3, t4); /* - - zH zG | - - zD zC */
481 *x1 = _mm256_shuffle_ps(t5, t6, _MM_SHUFFLE(1, 0, 1, 0));
482 *y1 = _mm256_shuffle_ps(t5, t6, _MM_SHUFFLE(3, 2, 3, 2));
483 *z1 = _mm256_shuffle_ps(t7, t8, _MM_SHUFFLE(1, 0, 1, 0));
487 static gmx_inline void
488 gmx_mm256_load_3rvec_8ptr_swizzle_ps(const float * gmx_restrict ptrA, const float * gmx_restrict ptrB,
489 const float * gmx_restrict ptrC, const float * gmx_restrict ptrD,
490 const float * gmx_restrict ptrE, const float * gmx_restrict ptrF,
491 const float * gmx_restrict ptrG, const float * gmx_restrict ptrH,
492 __m256 * gmx_restrict x1, __m256 * gmx_restrict y1, __m256 * gmx_restrict z1,
493 __m256 * gmx_restrict x2, __m256 * gmx_restrict y2, __m256 * gmx_restrict z2,
494 __m256 * gmx_restrict x3, __m256 * gmx_restrict y3, __m256 * gmx_restrict z3)
496 __m256 t1, t2, t3, t4, t5, t6, t7, t8, t9, t10, t11, t12;
498 t1 = _mm256_loadu_ps(ptrA); /* y3a x3a z2a y2a | x2a z1a y1a x1a */
499 t2 = _mm256_loadu_ps(ptrB); /* y3b x3b z2b y2b | x2b z1b y1b x1b */
500 t3 = _mm256_loadu_ps(ptrC); /* y3c x3c z2c y2c | x2c z1c y1c x1c */
501 t4 = _mm256_loadu_ps(ptrD); /* y3d x3d z2d y2d | x2d z1d y1d x1d */
502 t5 = _mm256_loadu_ps(ptrE); /* y3e x3e z2e y2e | x2e z1e y1e x1e */
503 t6 = _mm256_loadu_ps(ptrF); /* y3f x3f z2f y2f | x2f z1f y1f x1f */
504 t7 = _mm256_loadu_ps(ptrG); /* y3g x3g z2g y2g | x2g z1g y1g x1g */
505 t8 = _mm256_loadu_ps(ptrH); /* y3h x3h z2h y2h | x2h z1h y1h x1h */
507 t9 = _mm256_unpacklo_ps(t1, t2); /* z2b z2a y2b y2a | y1b y1a x1b x1a */
508 t10 = _mm256_unpackhi_ps(t1, t2); /* y3b y3a x3b x3a | x2b x2a z1b z1a */
509 t11 = _mm256_unpacklo_ps(t3, t4); /* z2d z2c y2d y2c | y1d y1c x1d x1c */
510 t12 = _mm256_unpackhi_ps(t3, t4); /* y3d y3c x3d x3c | x2d x2c z1d z1c */
511 t1 = _mm256_unpacklo_ps(t5, t6); /* z2f z2e y2f y2e | y1f y1e x1f x1e */
512 t2 = _mm256_unpackhi_ps(t5, t6); /* y3f y3e x3f x3e | x2f x2e z1f z1e */
513 t3 = _mm256_unpacklo_ps(t7, t8); /* z2h z2g y2h y2g | y1h y1g x1h x1g */
514 t4 = _mm256_unpackhi_ps(t7, t8); /* y3h y3g x3h x3g | x2h x2g z1h z1g */
516 t5 = _mm256_shuffle_ps(t9, t11, _MM_SHUFFLE(1, 0, 1, 0)); /* y2d y2c y2b y2a | x1d x1c x1b x1a */
517 t6 = _mm256_shuffle_ps(t9, t11, _MM_SHUFFLE(3, 2, 3, 2)); /* z2d z2c z2b z2a | y1d y1c y1b y1a */
518 t7 = _mm256_shuffle_ps(t10, t12, _MM_SHUFFLE(1, 0, 1, 0)); /* x3d x3c x3b x3a | z1d z1c z1b z1a */
519 t8 = _mm256_shuffle_ps(t10, t12, _MM_SHUFFLE(3, 2, 3, 2)); /* y3d y3c y3b y3a | x2d x2c x2b x2a */
521 t9 = _mm256_shuffle_ps(t1, t3, _MM_SHUFFLE(1, 0, 1, 0)); /* y2h y2g y2f y2e | x1h x1g x1f x1e */
522 t10 = _mm256_shuffle_ps(t1, t3, _MM_SHUFFLE(3, 2, 3, 2)); /* z2h z2g z2f z2e | y1h y1g y1f y1e */
523 t11 = _mm256_shuffle_ps(t2, t4, _MM_SHUFFLE(1, 0, 1, 0)); /* x3h x3g x3f x3e | z1h z1g z1f z1e */
524 t12 = _mm256_shuffle_ps(t2, t4, _MM_SHUFFLE(3, 2, 3, 2)); /* y3h y3g y3f y3e | x2h x2g x2f x2e */
526 *x1 = _mm256_permute2f128_ps(t5, t9, 0x20);
527 *y1 = _mm256_permute2f128_ps(t6, t10, 0x20);
528 *z1 = _mm256_permute2f128_ps(t7, t11, 0x20);
529 *x2 = _mm256_permute2f128_ps(t8, t12, 0x20);
531 *y2 = _mm256_permute2f128_ps(t5, t9, 0x31);
532 *z2 = _mm256_permute2f128_ps(t6, t10, 0x31);
533 *x3 = _mm256_permute2f128_ps(t7, t11, 0x31);
534 *y3 = _mm256_permute2f128_ps(t8, t12, 0x31);
536 t1 = gmx_mm256_set_m128(_mm_load_ss(ptrE+8), _mm_load_ss(ptrA+8));
537 t2 = gmx_mm256_set_m128(_mm_load_ss(ptrF+8), _mm_load_ss(ptrB+8));
538 t3 = gmx_mm256_set_m128(_mm_load_ss(ptrG+8), _mm_load_ss(ptrC+8));
539 t4 = gmx_mm256_set_m128(_mm_load_ss(ptrH+8), _mm_load_ss(ptrD+8));
541 t1 = _mm256_unpacklo_ps(t1, t3); /* - - z3g z3e | - - z3c z3a */
542 t2 = _mm256_unpacklo_ps(t2, t4); /* - - z3h z3f | - - z3d z3b */
544 *z3 = _mm256_unpacklo_ps(t1, t2);
549 static gmx_inline void
550 gmx_mm256_load_4rvec_8ptr_swizzle_ps(const float * gmx_restrict ptrA, const float * gmx_restrict ptrB,
551 const float * gmx_restrict ptrC, const float * gmx_restrict ptrD,
552 const float * gmx_restrict ptrE, const float * gmx_restrict ptrF,
553 const float * gmx_restrict ptrG, const float * gmx_restrict ptrH,
554 __m256 * gmx_restrict x1, __m256 * gmx_restrict y1, __m256 * gmx_restrict z1,
555 __m256 * gmx_restrict x2, __m256 * gmx_restrict y2, __m256 * gmx_restrict z2,
556 __m256 * gmx_restrict x3, __m256 * gmx_restrict y3, __m256 * gmx_restrict z3,
557 __m256 * gmx_restrict x4, __m256 * gmx_restrict y4, __m256 * gmx_restrict z4)
559 __m256 t1, t2, t3, t4, t5, t6, t7, t8, t9, t10, t11, t12;
561 t1 = _mm256_loadu_ps(ptrA); /* y3a x3a z2a y2a | x2a z1a y1a x1a */
562 t2 = _mm256_loadu_ps(ptrB); /* y3b x3b z2b y2b | x2b z1b y1b x1b */
563 t3 = _mm256_loadu_ps(ptrC); /* y3c x3c z2c y2c | x2c z1c y1c x1c */
564 t4 = _mm256_loadu_ps(ptrD); /* y3d x3d z2d y2d | x2d z1d y1d x1d */
565 t5 = _mm256_loadu_ps(ptrE); /* y3e x3e z2e y2e | x2e z1e y1e x1e */
566 t6 = _mm256_loadu_ps(ptrF); /* y3f x3f z2f y2f | x2f z1f y1f x1f */
567 t7 = _mm256_loadu_ps(ptrG); /* y3g x3g z2g y2g | x2g z1g y1g x1g */
568 t8 = _mm256_loadu_ps(ptrH); /* y3h x3h z2h y2h | x2h z1h y1h x1h */
570 t9 = _mm256_unpacklo_ps(t1, t2); /* z2b z2a y2b y2a | y1b y1a x1b x1a */
571 t10 = _mm256_unpackhi_ps(t1, t2); /* y3b y3a x3b x3a | x2b x2a z1b z1a */
572 t11 = _mm256_unpacklo_ps(t3, t4); /* z2d z2c y2d y2c | y1d y1c x1d x1c */
573 t12 = _mm256_unpackhi_ps(t3, t4); /* y3d y3c x3d x3c | x2d x2c z1d z1c */
574 t1 = _mm256_unpacklo_ps(t5, t6); /* z2f z2e y2f y2e | y1f y1e x1f x1e */
575 t2 = _mm256_unpackhi_ps(t5, t6); /* y3f y3e x3f x3e | x2f x2e z1f z1e */
576 t3 = _mm256_unpacklo_ps(t7, t8); /* z2h z2g y2h y2g | y1h y1g x1h x1g */
577 t4 = _mm256_unpackhi_ps(t7, t8); /* y3h y3g x3h x3g | x2h x2g z1h z1g */
579 t5 = _mm256_shuffle_ps(t9, t11, _MM_SHUFFLE(1, 0, 1, 0)); /* y2d y2c y2b y2a | x1d x1c x1b x1a */
580 t6 = _mm256_shuffle_ps(t9, t11, _MM_SHUFFLE(3, 2, 3, 2)); /* z2d z2c z2b z2a | y1d y1c y1b y1a */
581 t7 = _mm256_shuffle_ps(t10, t12, _MM_SHUFFLE(1, 0, 1, 0)); /* x3d x3c x3b x3a | z1d z1c z1b z1a */
582 t8 = _mm256_shuffle_ps(t10, t12, _MM_SHUFFLE(3, 2, 3, 2)); /* y3d y3c y3b y3a | x2d x2c x2b x2a */
583 t9 = _mm256_shuffle_ps(t1, t3, _MM_SHUFFLE(1, 0, 1, 0)); /* y2h y2g y2f y2e | x1h x1g x1f x1e */
584 t10 = _mm256_shuffle_ps(t1, t3, _MM_SHUFFLE(3, 2, 3, 2)); /* z2h z2g z2f z2e | y1h y1g y1f y1e */
585 t11 = _mm256_shuffle_ps(t2, t4, _MM_SHUFFLE(1, 0, 1, 0)); /* x3h x3g x3f x3e | z1h z1g z1f z1e */
586 t12 = _mm256_shuffle_ps(t2, t4, _MM_SHUFFLE(3, 2, 3, 2)); /* y3h y3g y3f y3e | x2h x2g x2f x2e */
588 *x1 = _mm256_permute2f128_ps(t5, t9, 0x20);
589 *y1 = _mm256_permute2f128_ps(t6, t10, 0x20);
590 *z1 = _mm256_permute2f128_ps(t7, t11, 0x20);
591 *x2 = _mm256_permute2f128_ps(t8, t12, 0x20);
593 *y2 = _mm256_permute2f128_ps(t5, t9, 0x31);
594 *z2 = _mm256_permute2f128_ps(t6, t10, 0x31);
595 *x3 = _mm256_permute2f128_ps(t7, t11, 0x31);
596 *y3 = _mm256_permute2f128_ps(t8, t12, 0x31);
598 t1 = gmx_mm256_set_m128(_mm_loadu_ps(ptrE+8), _mm_loadu_ps(ptrA+8)); /* z4e y4e x4e z3e | z4a y4a x4a z3a */
599 t2 = gmx_mm256_set_m128(_mm_loadu_ps(ptrF+8), _mm_loadu_ps(ptrB+8)); /* z4f y4f x4f z3f | z4b y4b x4b z3b */
600 t3 = gmx_mm256_set_m128(_mm_loadu_ps(ptrG+8), _mm_loadu_ps(ptrC+8)); /* z4g y4g x4g z3g | z4c y4c x4c z3c */
601 t4 = gmx_mm256_set_m128(_mm_loadu_ps(ptrH+8), _mm_loadu_ps(ptrD+8)); /* z4h y4h x4h z3h | z4d y4d x4d z3d */
603 t5 = _mm256_unpacklo_ps(t1, t2); /* x4f x4e z3f z3e | x4b x4a z3b z3a */
604 t6 = _mm256_unpackhi_ps(t1, t2); /* z4f z4e y4f y4e | z4b z4a y4b y4a */
605 t7 = _mm256_unpacklo_ps(t3, t4); /* x4h x4g z3h z3g | x4d x4c z3d z3c */
606 t8 = _mm256_unpackhi_ps(t3, t4); /* z4h z4g y4h y4g | z4d z4c y4d y4c */
608 *z3 = _mm256_shuffle_ps(t5, t7, _MM_SHUFFLE(1, 0, 1, 0)); /* z3h z3g z3f z3e | z3d z3c z3b z3a */
609 *x4 = _mm256_shuffle_ps(t5, t7, _MM_SHUFFLE(3, 2, 3, 2)); /* x4h x4g x4f x4e | x4d x4c x4b x4a */
610 *y4 = _mm256_shuffle_ps(t6, t8, _MM_SHUFFLE(1, 0, 1, 0)); /* y4h y4g y4f y4e | y4d y4c y4b y4a */
611 *z4 = _mm256_shuffle_ps(t6, t8, _MM_SHUFFLE(3, 2, 3, 2)); /* z4h z4g z4f z4e | z4d z4c z4b z4a */
615 static gmx_inline void
616 gmx_mm256_decrement_1rvec_4ptr_swizzle_ps(float * gmx_restrict ptrA, float * gmx_restrict ptrB,
617 float * gmx_restrict ptrC, float * gmx_restrict ptrD,
618 __m256 x1, __m256 y1, __m256 z1)
620 __m128 t1, t2, t3, t4, t5, t6, t7, t8;
623 /* Construct a mask without executing any data loads */
624 mask = _mm_blend_epi16(_mm_setzero_si128(), _mm_cmpeq_epi16(_mm_setzero_si128(), _mm_setzero_si128()), 0x3F);
626 t3 = _mm_unpacklo_ps(_mm256_castps256_ps128(x1), _mm256_castps256_ps128(y1)); /* y1b x1b y1a x1a */
627 t4 = _mm_unpackhi_ps(_mm256_castps256_ps128(x1), _mm256_castps256_ps128(y1)); /* y1d x1d y1c x1c */
629 t1 = _mm_shuffle_ps(t3, _mm256_castps256_ps128(z1), _MM_SHUFFLE(0, 0, 1, 0)); /* - z1a y1a x1a */
630 t2 = _mm_shuffle_ps(t3, _mm256_castps256_ps128(z1), _MM_SHUFFLE(0, 1, 3, 2)); /* - z1b y1b x1b */
631 t3 = _mm_shuffle_ps(t4, _mm256_castps256_ps128(z1), _MM_SHUFFLE(0, 2, 1, 0)); /* - z1c y1c x1c */
632 t4 = _mm_shuffle_ps(t4, _mm256_castps256_ps128(z1), _MM_SHUFFLE(0, 3, 3, 2)); /* - z1d y1d x1d */
634 t5 = gmx_mm_maskload_ps(ptrA, mask);
635 t6 = gmx_mm_maskload_ps(ptrB, mask);
636 t7 = gmx_mm_maskload_ps(ptrC, mask);
637 t8 = gmx_mm_maskload_ps(ptrD, mask);
639 t5 = _mm_sub_ps(t5, t1);
640 t6 = _mm_sub_ps(t6, t2);
641 t7 = _mm_sub_ps(t7, t3);
642 t8 = _mm_sub_ps(t8, t4);
644 gmx_mm_maskstore_ps(ptrA, mask, t5);
645 gmx_mm_maskstore_ps(ptrB, mask, t6);
646 gmx_mm_maskstore_ps(ptrC, mask, t7);
647 gmx_mm_maskstore_ps(ptrD, mask, t8);
650 #if defined (_MSC_VER) && defined(_M_IX86)
651 /* Macro work-around since 32-bit MSVC cannot handle >3 xmm/ymm parameters */
652 #define gmx_mm256_decrement_3rvec_4ptr_swizzle_ps(ptrA, ptrB, ptrC, ptrD, \
653 x1, y1, z1, x2, y2, z2, x3, y3, z3) \
655 __m256 _t1, _t2, _t3, _t4, _t5, _t6; \
656 __m128 _tA, _tB, _tC, _tD; \
658 _t1 = _mm256_loadu_ps(ptrA); \
659 _t2 = _mm256_loadu_ps(ptrB); \
660 _t3 = _mm256_loadu_ps(ptrC); \
661 _t4 = _mm256_loadu_ps(ptrD); \
662 _tA = _mm_load_ss(ptrA+8); \
663 _tB = _mm_load_ss(ptrB+8); \
664 _tC = _mm_load_ss(ptrC+8); \
665 _tD = _mm_load_ss(ptrD+8); \
666 _t5 = _mm256_unpacklo_ps(x1, y1); \
667 x1 = _mm256_unpackhi_ps(x1, y1); \
668 y1 = _mm256_unpacklo_ps(z1, x2); \
669 z1 = _mm256_unpackhi_ps(z1, x2); \
670 x2 = _mm256_unpacklo_ps(y2, z2); \
671 y2 = _mm256_unpackhi_ps(y2, z2); \
672 _t6 = _mm256_unpacklo_ps(x3, y3); \
673 x3 = _mm256_unpackhi_ps(x3, y3); \
674 _t5 = _mm256_insertf128_ps(_t5, _mm256_castps256_ps128(x2), 0x1); \
675 x1 = _mm256_insertf128_ps(x1, _mm256_castps256_ps128(y2), 0x1); \
676 y1 = _mm256_insertf128_ps(y1, _mm256_castps256_ps128(_t6), 0x1); \
677 z1 = _mm256_insertf128_ps(z1, _mm256_castps256_ps128(x3), 0x1); \
678 z2 = _mm256_shuffle_ps(_t5, y1, _MM_SHUFFLE(1, 0, 1, 0)); \
679 _t5 = _mm256_shuffle_ps(_t5, y1, _MM_SHUFFLE(3, 2, 3, 2)); \
680 y1 = _mm256_shuffle_ps(x1, z1, _MM_SHUFFLE(1, 0, 1, 0)); \
681 x1 = _mm256_shuffle_ps(x1, z1, _MM_SHUFFLE(3, 2, 3, 2)); \
682 _t1 = _mm256_sub_ps(_t1, z2); \
683 _t2 = _mm256_sub_ps(_t2, _t5); \
684 _t3 = _mm256_sub_ps(_t3, y1); \
685 _t4 = _mm256_sub_ps(_t4, x1); \
686 _tA = _mm_sub_ss(_tA, _mm256_castps256_ps128(z3)); \
687 _tB = _mm_sub_ss(_tB, _mm_permute_ps(_mm256_castps256_ps128(z3), _MM_SHUFFLE(1, 1, 1, 1))); \
688 _tC = _mm_sub_ss(_tC, _mm_permute_ps(_mm256_castps256_ps128(z3), _MM_SHUFFLE(2, 2, 2, 2))); \
689 _tD = _mm_sub_ss(_tD, _mm_permute_ps(_mm256_castps256_ps128(z3), _MM_SHUFFLE(3, 3, 3, 3))); \
690 _mm256_storeu_ps(ptrA, _t1); \
691 _mm256_storeu_ps(ptrB, _t2); \
692 _mm256_storeu_ps(ptrC, _t3); \
693 _mm256_storeu_ps(ptrD, _t4); \
694 _mm_store_ss(ptrA+8, _tA); \
695 _mm_store_ss(ptrB+8, _tB); \
696 _mm_store_ss(ptrC+8, _tC); \
697 _mm_store_ss(ptrD+8, _tD); \
700 /* Real function for sane compilers */
701 static gmx_inline void
702 gmx_mm256_decrement_3rvec_4ptr_swizzle_ps(float * gmx_restrict ptrA, float * gmx_restrict ptrB,
703 float * gmx_restrict ptrC, float * gmx_restrict ptrD,
704 __m256 x1, __m256 y1, __m256 z1,
705 __m256 x2, __m256 y2, __m256 z2,
706 __m256 x3, __m256 y3, __m256 z3)
708 __m256 t1, t2, t3, t4, t5, t6;
709 __m128 tA, tB, tC, tD;
711 t1 = _mm256_loadu_ps(ptrA);
712 t2 = _mm256_loadu_ps(ptrB);
713 t3 = _mm256_loadu_ps(ptrC);
714 t4 = _mm256_loadu_ps(ptrD);
715 tA = _mm_load_ss(ptrA+8);
716 tB = _mm_load_ss(ptrB+8);
717 tC = _mm_load_ss(ptrC+8);
718 tD = _mm_load_ss(ptrD+8);
720 t5 = _mm256_unpacklo_ps(x1, y1); /* - - - - | y1b x1b y1a x1a */
721 x1 = _mm256_unpackhi_ps(x1, y1); /* - - - - | y1d x1d y1c x1c */
722 y1 = _mm256_unpacklo_ps(z1, x2); /* - - - - | x2b z1b x2a z1a */
723 z1 = _mm256_unpackhi_ps(z1, x2); /* - - - - | x2d z1d x2c z1c */
725 x2 = _mm256_unpacklo_ps(y2, z2); /* - - - - | z2b y2b z2a y2a */
726 y2 = _mm256_unpackhi_ps(y2, z2); /* - - - - | z2d y2d z2c y2c */
727 t6 = _mm256_unpacklo_ps(x3, y3); /* - - - - | y3b x3b y3a x3a */
728 x3 = _mm256_unpackhi_ps(x3, y3); /* - - - - | y3d x3d y3c x3c */
730 t5 = _mm256_insertf128_ps(t5, _mm256_castps256_ps128(x2), 0x1); /* z2b y2b z2a y2a | y1b x1b y1a x1a */
731 x1 = _mm256_insertf128_ps(x1, _mm256_castps256_ps128(y2), 0x1); /* z2d y2d z2c y2c | y1d x1d y1c x1c */
733 y1 = _mm256_insertf128_ps(y1, _mm256_castps256_ps128(t6), 0x1); /* y3b x3b y3a x3a | x2b z1b x2a z1a */
734 z1 = _mm256_insertf128_ps(z1, _mm256_castps256_ps128(x3), 0x1); /* y3d x3d y3c x3c | x2d z1d x2c z1c */
736 z2 = _mm256_shuffle_ps(t5, y1, _MM_SHUFFLE(1, 0, 1, 0)); /* y3a x3a z2a y2a | x2a z1a y1a x1a */
737 t5 = _mm256_shuffle_ps(t5, y1, _MM_SHUFFLE(3, 2, 3, 2)); /* y3b x3b z2b y2b | x2b z1b y1b x1b */
738 y1 = _mm256_shuffle_ps(x1, z1, _MM_SHUFFLE(1, 0, 1, 0)); /* y3c x3c z2c y2c | x2c z1c y1c x1c */
739 x1 = _mm256_shuffle_ps(x1, z1, _MM_SHUFFLE(3, 2, 3, 2)); /* y3d x3d z2d y2d | x2d z1d y1d x1d */
741 t1 = _mm256_sub_ps(t1, z2);
742 t2 = _mm256_sub_ps(t2, t5);
743 t3 = _mm256_sub_ps(t3, y1);
744 t4 = _mm256_sub_ps(t4, x1);
746 tA = _mm_sub_ss(tA, _mm256_castps256_ps128(z3));
747 tB = _mm_sub_ss(tB, _mm_permute_ps(_mm256_castps256_ps128(z3), _MM_SHUFFLE(1, 1, 1, 1)));
748 tC = _mm_sub_ss(tC, _mm_permute_ps(_mm256_castps256_ps128(z3), _MM_SHUFFLE(2, 2, 2, 2)));
749 tD = _mm_sub_ss(tD, _mm_permute_ps(_mm256_castps256_ps128(z3), _MM_SHUFFLE(3, 3, 3, 3)));
751 /* Here we store a full 256-bit value and a separate 32-bit one; no overlap can happen */
752 _mm256_storeu_ps(ptrA, t1);
753 _mm256_storeu_ps(ptrB, t2);
754 _mm256_storeu_ps(ptrC, t3);
755 _mm256_storeu_ps(ptrD, t4);
756 _mm_store_ss(ptrA+8, tA);
757 _mm_store_ss(ptrB+8, tB);
758 _mm_store_ss(ptrC+8, tC);
759 _mm_store_ss(ptrD+8, tD);
765 #if defined (_MSC_VER) && defined(_M_IX86)
766 /* Macro work-around since 32-bit MSVC cannot handle >3 xmm/ymm parameters */
767 #define gmx_mm256_decrement_4rvec_4ptr_swizzle_ps(ptrA, ptrB, ptrC, ptrD, \
768 x1, y1, z1, x2, y2, z2, x3, y3, z3, x4, y4, z4) \
770 __m256 _t1, _t2, _t3, _t4, _t5; \
771 __m128 _tA, _tB, _tC, _tD, _tE, _tF, _tG, _tH; \
773 _t1 = _mm256_loadu_ps(ptrA); \
774 _t2 = _mm256_loadu_ps(ptrB); \
775 _t3 = _mm256_loadu_ps(ptrC); \
776 _t4 = _mm256_loadu_ps(ptrD); \
777 _tA = _mm_loadu_ps(ptrA+8); \
778 _tB = _mm_loadu_ps(ptrB+8); \
779 _tC = _mm_loadu_ps(ptrC+8); \
780 _tD = _mm_loadu_ps(ptrD+8); \
781 _t5 = _mm256_unpacklo_ps(x1, y1); \
782 x1 = _mm256_unpackhi_ps(x1, y1); \
783 y1 = _mm256_unpacklo_ps(z1, x2); \
784 z1 = _mm256_unpackhi_ps(z1, x2); \
785 x2 = _mm256_unpacklo_ps(y2, z2); \
786 y2 = _mm256_unpackhi_ps(y2, z2); \
787 z2 = _mm256_unpacklo_ps(x3, y3); \
788 x3 = _mm256_unpackhi_ps(x3, y3); \
789 y3 = _mm256_unpacklo_ps(z3, x4); \
790 z3 = _mm256_unpackhi_ps(z3, x4); \
791 x4 = _mm256_unpacklo_ps(y4, z4); \
792 y4 = _mm256_unpackhi_ps(y4, z4); \
793 x2 = _mm256_insertf128_ps(_t5, _mm256_castps256_ps128(x2), 0x1); \
794 x1 = _mm256_insertf128_ps(x1, _mm256_castps256_ps128(y2), 0x1); \
795 y1 = _mm256_insertf128_ps(y1, _mm256_castps256_ps128(z2), 0x1); \
796 z1 = _mm256_insertf128_ps(z1, _mm256_castps256_ps128(x3), 0x1); \
797 z2 = _mm256_shuffle_ps(x2, y1, _MM_SHUFFLE(1, 0, 1, 0)); \
798 _t5 = _mm256_shuffle_ps(x2, y1, _MM_SHUFFLE(3, 2, 3, 2)); \
799 y1 = _mm256_shuffle_ps(x1, z1, _MM_SHUFFLE(1, 0, 1, 0)); \
800 x1 = _mm256_shuffle_ps(x1, z1, _MM_SHUFFLE(3, 2, 3, 2)); \
801 _tE = _mm_shuffle_ps(_mm256_castps256_ps128(y3), _mm256_castps256_ps128(x4), _MM_SHUFFLE(1, 0, 1, 0)); \
802 _tF = _mm_shuffle_ps(_mm256_castps256_ps128(y3), _mm256_castps256_ps128(x4), _MM_SHUFFLE(3, 2, 3, 2)); \
803 _tG = _mm_shuffle_ps(_mm256_castps256_ps128(z3), _mm256_castps256_ps128(y4), _MM_SHUFFLE(1, 0, 1, 0)); \
804 _tH = _mm_shuffle_ps(_mm256_castps256_ps128(z3), _mm256_castps256_ps128(y4), _MM_SHUFFLE(3, 2, 3, 2)); \
805 _t1 = _mm256_sub_ps(_t1, z2); \
806 _t2 = _mm256_sub_ps(_t2, _t5); \
807 _t3 = _mm256_sub_ps(_t3, y1); \
808 _t4 = _mm256_sub_ps(_t4, x1); \
809 _tA = _mm_sub_ps(_tA, _tE); \
810 _tB = _mm_sub_ps(_tB, _tF); \
811 _tC = _mm_sub_ps(_tC, _tG); \
812 _tD = _mm_sub_ps(_tD, _tH); \
813 _mm256_storeu_ps(ptrA, _t1); \
814 _mm256_storeu_ps(ptrB, _t2); \
815 _mm256_storeu_ps(ptrC, _t3); \
816 _mm256_storeu_ps(ptrD, _t4); \
817 _mm_storeu_ps(ptrA+8, _tA); \
818 _mm_storeu_ps(ptrB+8, _tB); \
819 _mm_storeu_ps(ptrC+8, _tC); \
820 _mm_storeu_ps(ptrD+8, _tD); \
823 /* Real function for sane compilers */
824 static gmx_inline void
825 gmx_mm256_decrement_4rvec_4ptr_swizzle_ps(float * gmx_restrict ptrA, float * gmx_restrict ptrB,
826 float * gmx_restrict ptrC, float * gmx_restrict ptrD,
827 __m256 x1, __m256 y1, __m256 z1,
828 __m256 x2, __m256 y2, __m256 z2,
829 __m256 x3, __m256 y3, __m256 z3,
830 __m256 x4, __m256 y4, __m256 z4)
832 __m256 t1, t2, t3, t4, t5;
833 __m128 tA, tB, tC, tD, tE, tF, tG, tH;
835 t1 = _mm256_loadu_ps(ptrA);
836 t2 = _mm256_loadu_ps(ptrB);
837 t3 = _mm256_loadu_ps(ptrC);
838 t4 = _mm256_loadu_ps(ptrD);
839 tA = _mm_loadu_ps(ptrA+8);
840 tB = _mm_loadu_ps(ptrB+8);
841 tC = _mm_loadu_ps(ptrC+8);
842 tD = _mm_loadu_ps(ptrD+8);
844 t5 = _mm256_unpacklo_ps(x1, y1); /* - - - - | y1b x1b y1a x1a */
845 x1 = _mm256_unpackhi_ps(x1, y1); /* - - - - | y1d x1d y1c x1c */
846 y1 = _mm256_unpacklo_ps(z1, x2); /* - - - - | x2b z1b x2a z1a */
847 z1 = _mm256_unpackhi_ps(z1, x2); /* - - - - | x2d z1d x2c z1c */
849 x2 = _mm256_unpacklo_ps(y2, z2); /* - - - - | z2b y2b z2a y2a */
850 y2 = _mm256_unpackhi_ps(y2, z2); /* - - - - | z2d y2d z2c y2c */
851 z2 = _mm256_unpacklo_ps(x3, y3); /* - - - - | y3b x3b y3a x3a */
852 x3 = _mm256_unpackhi_ps(x3, y3); /* - - - - | y3d x3d y3c x3c */
854 y3 = _mm256_unpacklo_ps(z3, x4); /* - - - - | x4b z3b x4a z3a */
855 z3 = _mm256_unpackhi_ps(z3, x4); /* - - - - | x4d z3d x4c z3c */
856 x4 = _mm256_unpacklo_ps(y4, z4); /* - - - - | z4b y4b z4a y4a */
857 y4 = _mm256_unpackhi_ps(y4, z4); /* - - - - | z4d y4d z4c y4c */
859 x2 = _mm256_insertf128_ps(t5, _mm256_castps256_ps128(x2), 0x1); /* z2b y2b z2a y2a | y1b x1b y1a x1a */
860 x1 = _mm256_insertf128_ps(x1, _mm256_castps256_ps128(y2), 0x1); /* z2d y2d z2c y2c | y1d x1d y1c x1c */
861 y1 = _mm256_insertf128_ps(y1, _mm256_castps256_ps128(z2), 0x1); /* y3b x3b y3a x3a | x2b z1b x2a z1a */
862 z1 = _mm256_insertf128_ps(z1, _mm256_castps256_ps128(x3), 0x1); /* y3d x3d y3c x3c | x2d z1d x2c z1c */
864 z2 = _mm256_shuffle_ps(x2, y1, _MM_SHUFFLE(1, 0, 1, 0)); /* y3a x3a z2a y2a | x2a z1a y1a x1a */
865 t5 = _mm256_shuffle_ps(x2, y1, _MM_SHUFFLE(3, 2, 3, 2)); /* y3b x3b z2b y2b | x2b z1b y1b x1b */
866 y1 = _mm256_shuffle_ps(x1, z1, _MM_SHUFFLE(1, 0, 1, 0)); /* y3c x3c z2c y2c | x2c z1c y1c x1c */
867 x1 = _mm256_shuffle_ps(x1, z1, _MM_SHUFFLE(3, 2, 3, 2)); /* y3d x3d z2d y2d | x2d z1d y1d x1d */
869 tE = _mm_shuffle_ps(_mm256_castps256_ps128(y3), _mm256_castps256_ps128(x4), _MM_SHUFFLE(1, 0, 1, 0)); /* z4a y4a x4a z3a */
870 tF = _mm_shuffle_ps(_mm256_castps256_ps128(y3), _mm256_castps256_ps128(x4), _MM_SHUFFLE(3, 2, 3, 2)); /* z4b y4b x4b z3b */
872 tG = _mm_shuffle_ps(_mm256_castps256_ps128(z3), _mm256_castps256_ps128(y4), _MM_SHUFFLE(1, 0, 1, 0)); /* z4c y4c x4c z3c */
873 tH = _mm_shuffle_ps(_mm256_castps256_ps128(z3), _mm256_castps256_ps128(y4), _MM_SHUFFLE(3, 2, 3, 2)); /* z4d y4d x4d z3d */
875 t1 = _mm256_sub_ps(t1, z2);
876 t2 = _mm256_sub_ps(t2, t5);
877 t3 = _mm256_sub_ps(t3, y1);
878 t4 = _mm256_sub_ps(t4, x1);
880 tA = _mm_sub_ps(tA, tE);
881 tB = _mm_sub_ps(tB, tF);
882 tC = _mm_sub_ps(tC, tG);
883 tD = _mm_sub_ps(tD, tH);
885 /* Here we store a full 256-bit value and a separate 128-bit one; no overlap can happen */
886 _mm256_storeu_ps(ptrA, t1);
887 _mm256_storeu_ps(ptrB, t2);
888 _mm256_storeu_ps(ptrC, t3);
889 _mm256_storeu_ps(ptrD, t4);
890 _mm_storeu_ps(ptrA+8, tA);
891 _mm_storeu_ps(ptrB+8, tB);
892 _mm_storeu_ps(ptrC+8, tC);
893 _mm_storeu_ps(ptrD+8, tD);
898 static gmx_inline void
899 gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(float * gmx_restrict ptrA, float * gmx_restrict ptrB,
900 float * gmx_restrict ptrC, float * gmx_restrict ptrD,
901 float * gmx_restrict ptrE, float * gmx_restrict ptrF,
902 float * gmx_restrict ptrG, float * gmx_restrict ptrH,
903 __m256 x1, __m256 y1, __m256 z1)
905 __m256 t1, t2, t3, t4, t5, t6;
906 __m256 tA, tB, tC, tD;
909 /* Construct a mask without executing any data loads */
910 mask = _mm_blend_epi16(_mm_setzero_si128(), _mm_cmpeq_epi16(_mm_setzero_si128(), _mm_setzero_si128()), 0x3F);
912 tA = gmx_mm256_set_m128(gmx_mm_maskload_ps(ptrE, mask), gmx_mm_maskload_ps(ptrA, mask));
913 tB = gmx_mm256_set_m128(gmx_mm_maskload_ps(ptrF, mask), gmx_mm_maskload_ps(ptrB, mask));
914 tC = gmx_mm256_set_m128(gmx_mm_maskload_ps(ptrG, mask), gmx_mm_maskload_ps(ptrC, mask));
915 tD = gmx_mm256_set_m128(gmx_mm_maskload_ps(ptrH, mask), gmx_mm_maskload_ps(ptrD, mask));
916 t1 = _mm256_unpacklo_ps(x1, y1); /* y1f x1f y1e x1e | y1b x1b y1a x1a */
917 t2 = _mm256_unpackhi_ps(x1, y1); /* y1h x1h y1g x1g | y1d x1d y1c x1c */
919 t3 = _mm256_shuffle_ps(t1, z1, _MM_SHUFFLE(0, 0, 1, 0)); /* - z1e y1e x1e | - z1a y1a x1a */
920 t4 = _mm256_shuffle_ps(t1, z1, _MM_SHUFFLE(0, 1, 3, 2)); /* - z1f y1f x1f | - z1b y1b x1b */
921 t5 = _mm256_shuffle_ps(t2, z1, _MM_SHUFFLE(0, 2, 1, 0)); /* - z1g y1g x1g | - z1c y1c x1c */
922 t6 = _mm256_shuffle_ps(t2, z1, _MM_SHUFFLE(0, 3, 3, 2)); /* - z1h y1h x1h | - z1d y1d x1d */
924 tA = _mm256_sub_ps(tA, t3);
925 tB = _mm256_sub_ps(tB, t4);
926 tC = _mm256_sub_ps(tC, t5);
927 tD = _mm256_sub_ps(tD, t6);
929 gmx_mm_maskstore_ps(ptrA, mask, _mm256_castps256_ps128(tA));
930 gmx_mm_maskstore_ps(ptrB, mask, _mm256_castps256_ps128(tB));
931 gmx_mm_maskstore_ps(ptrC, mask, _mm256_castps256_ps128(tC));
932 gmx_mm_maskstore_ps(ptrD, mask, _mm256_castps256_ps128(tD));
933 gmx_mm_maskstore_ps(ptrE, mask, _mm256_extractf128_ps(tA, 0x1));
934 gmx_mm_maskstore_ps(ptrF, mask, _mm256_extractf128_ps(tB, 0x1));
935 gmx_mm_maskstore_ps(ptrG, mask, _mm256_extractf128_ps(tC, 0x1));
936 gmx_mm_maskstore_ps(ptrH, mask, _mm256_extractf128_ps(tD, 0x1));
941 #if defined (_MSC_VER) && defined(_M_IX86)
942 /* Macro work-around since 32-bit MSVC cannot handle >3 xmm/ymm parameters */
943 #define gmx_mm256_decrement_3rvec_8ptr_swizzle_ps(ptrA, ptrB, ptrC, ptrD, ptrE, ptrF, ptrG, ptrH, _x1, _y1, _z1, _x2, _y2, _z2, _x3, _y3, _z3) \
945 __m256 _t1, _t2, _t3, _t4, _t5, _t6, _t7, _t8, _t9, _t10, _t11, _t12; \
946 __m256 _tA, _tB, _tC, _tD, _tE, _tF, _tG, _tH, _tI, _tJ, _tK, _tL; \
948 _tA = _mm256_loadu_ps(ptrA); \
949 _tB = _mm256_loadu_ps(ptrB); \
950 _tC = _mm256_loadu_ps(ptrC); \
951 _tD = _mm256_loadu_ps(ptrD); \
952 _tE = _mm256_loadu_ps(ptrE); \
953 _tF = _mm256_loadu_ps(ptrF); \
954 _tG = _mm256_loadu_ps(ptrG); \
955 _tH = _mm256_loadu_ps(ptrH); \
956 _t1 = _mm256_unpacklo_ps(_x1, _y1); \
957 _t2 = _mm256_unpackhi_ps(_x1, _y1); \
958 _t3 = _mm256_unpacklo_ps(_z1, _x2); \
959 _t4 = _mm256_unpackhi_ps(_z1, _x2); \
960 _t5 = _mm256_unpacklo_ps(_y2, _z2); \
961 _t6 = _mm256_unpackhi_ps(_y2, _z2); \
962 _t7 = _mm256_unpacklo_ps(_x3, _y3); \
963 _t8 = _mm256_unpackhi_ps(_x3, _y3); \
964 _t9 = _mm256_shuffle_ps(_t1, _t3, _MM_SHUFFLE(1, 0, 1, 0)); \
965 _t10 = _mm256_shuffle_ps(_t1, _t3, _MM_SHUFFLE(3, 2, 3, 2)); \
966 _t11 = _mm256_shuffle_ps(_t2, _t4, _MM_SHUFFLE(1, 0, 1, 0)); \
967 _t12 = _mm256_shuffle_ps(_t2, _t4, _MM_SHUFFLE(3, 2, 3, 2)); \
968 _t1 = _mm256_shuffle_ps(_t5, _t7, _MM_SHUFFLE(1, 0, 1, 0)); \
969 _t2 = _mm256_shuffle_ps(_t5, _t7, _MM_SHUFFLE(3, 2, 3, 2)); \
970 _t3 = _mm256_shuffle_ps(_t6, _t8, _MM_SHUFFLE(1, 0, 1, 0)); \
971 _t4 = _mm256_shuffle_ps(_t6, _t8, _MM_SHUFFLE(3, 2, 3, 2)); \
972 _t5 = gmx_mm256_unpack128lo_ps(_t9, _t1); \
973 _t6 = gmx_mm256_unpack128hi_ps(_t9, _t1); \
974 _t7 = gmx_mm256_unpack128lo_ps(_t10, _t2); \
975 _t8 = gmx_mm256_unpack128hi_ps(_t10, _t2); \
976 _t1 = gmx_mm256_unpack128lo_ps(_t11, _t3); \
977 _t2 = gmx_mm256_unpack128hi_ps(_t11, _t3); \
978 _t9 = gmx_mm256_unpack128lo_ps(_t12, _t4); \
979 _t10 = gmx_mm256_unpack128hi_ps(_t12, _t4); \
980 _tA = _mm256_sub_ps(_tA, _t5); \
981 _tB = _mm256_sub_ps(_tB, _t7); \
982 _tC = _mm256_sub_ps(_tC, _t1); \
983 _tD = _mm256_sub_ps(_tD, _t9); \
984 _tE = _mm256_sub_ps(_tE, _t6); \
985 _tF = _mm256_sub_ps(_tF, _t8); \
986 _tG = _mm256_sub_ps(_tG, _t2); \
987 _tH = _mm256_sub_ps(_tH, _t10); \
988 _mm256_storeu_ps(ptrA, _tA); \
989 _mm256_storeu_ps(ptrB, _tB); \
990 _mm256_storeu_ps(ptrC, _tC); \
991 _mm256_storeu_ps(ptrD, _tD); \
992 _mm256_storeu_ps(ptrE, _tE); \
993 _mm256_storeu_ps(ptrF, _tF); \
994 _mm256_storeu_ps(ptrG, _tG); \
995 _mm256_storeu_ps(ptrH, _tH); \
996 _tI = gmx_mm256_set_m128(_mm_load_ss(ptrE+8), _mm_load_ss(ptrA+8)); \
997 _tJ = gmx_mm256_set_m128(_mm_load_ss(ptrF+8), _mm_load_ss(ptrB+8)); \
998 _tK = gmx_mm256_set_m128(_mm_load_ss(ptrG+8), _mm_load_ss(ptrC+8)); \
999 _tL = gmx_mm256_set_m128(_mm_load_ss(ptrH+8), _mm_load_ss(ptrD+8)); \
1000 _tI = _mm256_unpacklo_ps(_tI, _tK); \
1001 _tJ = _mm256_unpacklo_ps(_tJ, _tL); \
1002 _tI = _mm256_unpacklo_ps(_tI, _tJ); \
1003 _tI = _mm256_sub_ps(_tI, _z3); \
1004 _tJ = _mm256_permute_ps(_tI, _MM_SHUFFLE(1, 1, 1, 1)); \
1005 _tK = _mm256_permute_ps(_tI, _MM_SHUFFLE(2, 2, 2, 2)); \
1006 _tL = _mm256_permute_ps(_tI, _MM_SHUFFLE(3, 3, 3, 3)); \
1007 _mm_store_ss(ptrA+8, _mm256_castps256_ps128(_tI)); \
1008 _mm_store_ss(ptrB+8, _mm256_castps256_ps128(_tJ)); \
1009 _mm_store_ss(ptrC+8, _mm256_castps256_ps128(_tK)); \
1010 _mm_store_ss(ptrD+8, _mm256_castps256_ps128(_tL)); \
1011 _mm_store_ss(ptrE+8, _mm256_extractf128_ps(_tI, 0x1)); \
1012 _mm_store_ss(ptrF+8, _mm256_extractf128_ps(_tJ, 0x1)); \
1013 _mm_store_ss(ptrG+8, _mm256_extractf128_ps(_tK, 0x1)); \
1014 _mm_store_ss(ptrH+8, _mm256_extractf128_ps(_tL, 0x1)); \
1017 /* Real function for sane compilers */
1018 static gmx_inline void
1019 gmx_mm256_decrement_3rvec_8ptr_swizzle_ps(float * gmx_restrict ptrA, float * gmx_restrict ptrB,
1020 float * gmx_restrict ptrC, float * gmx_restrict ptrD,
1021 float * gmx_restrict ptrE, float * gmx_restrict ptrF,
1022 float * gmx_restrict ptrG, float * gmx_restrict ptrH,
1023 __m256 x1, __m256 y1, __m256 z1,
1024 __m256 x2, __m256 y2, __m256 z2,
1025 __m256 x3, __m256 y3, __m256 z3)
1027 __m256 t1, t2, t3, t4, t5, t6, t7, t8, t9, t10, t11, t12;
1028 __m256 tA, tB, tC, tD, tE, tF, tG, tH;
1029 __m256 tI, tJ, tK, tL;
1031 tA = _mm256_loadu_ps(ptrA);
1032 tB = _mm256_loadu_ps(ptrB);
1033 tC = _mm256_loadu_ps(ptrC);
1034 tD = _mm256_loadu_ps(ptrD);
1035 tE = _mm256_loadu_ps(ptrE);
1036 tF = _mm256_loadu_ps(ptrF);
1037 tG = _mm256_loadu_ps(ptrG);
1038 tH = _mm256_loadu_ps(ptrH);
1040 t1 = _mm256_unpacklo_ps(x1, y1); /* y1f x1f y1e x1e | y1b x1b y1a x1a */
1041 t2 = _mm256_unpackhi_ps(x1, y1); /* y1h x1h y1g x1g | y1d x1d y1c x1c */
1042 t3 = _mm256_unpacklo_ps(z1, x2); /* x2f z1f x2e z1e | x2b z1b x2a z1a */
1043 t4 = _mm256_unpackhi_ps(z1, x2); /* x2h z1h x2g z1g | x2d z1d x2c z1c */
1045 t5 = _mm256_unpacklo_ps(y2, z2); /* z2f y2f z2e y2e | z2b y2b z2a y2a */
1046 t6 = _mm256_unpackhi_ps(y2, z2); /* z2h y2h z2g y2g | z2d y2d z2c y2c */
1047 t7 = _mm256_unpacklo_ps(x3, y3); /* y3f x3f y3e x3e | y3b x3b y3a x3a */
1048 t8 = _mm256_unpackhi_ps(x3, y3); /* y3h x3h y3g x3g | y3d x3d y3c x3c */
1050 t9 = _mm256_shuffle_ps(t1, t3, _MM_SHUFFLE(1, 0, 1, 0)); /* x2e z1e y1e x1e | x2a z1a y1a x1a */
1051 t10 = _mm256_shuffle_ps(t1, t3, _MM_SHUFFLE(3, 2, 3, 2)); /* x2f z1f y1f x1f | x2b z1b y1b x1b */
1052 t11 = _mm256_shuffle_ps(t2, t4, _MM_SHUFFLE(1, 0, 1, 0)); /* x2g z1g y1g x1g | x2c z1c y1c x1c */
1053 t12 = _mm256_shuffle_ps(t2, t4, _MM_SHUFFLE(3, 2, 3, 2)); /* x2h z1h y1h x1h | x2d z1d y1d x1d */
1055 t1 = _mm256_shuffle_ps(t5, t7, _MM_SHUFFLE(1, 0, 1, 0)); /* y3e x3e z2e y2e | y3a x3a z2a y2a */
1056 t2 = _mm256_shuffle_ps(t5, t7, _MM_SHUFFLE(3, 2, 3, 2)); /* y3f x3f z2f y2f | y3b x3b z2b y2b */
1057 t3 = _mm256_shuffle_ps(t6, t8, _MM_SHUFFLE(1, 0, 1, 0)); /* y3g x3g z2g y2g | y3c x3c z2c y2c */
1058 t4 = _mm256_shuffle_ps(t6, t8, _MM_SHUFFLE(3, 2, 3, 2)); /* y3h x3h z2h y2h | y3d x3d z2d y2d */
1060 t5 = gmx_mm256_unpack128lo_ps(t9, t1); /* y3a x3a z2a y2a | x2a z1a y1a x1a */
1061 t6 = gmx_mm256_unpack128hi_ps(t9, t1); /* y3e x3e z2e y2e | x2e z1e y1e x1e */
1062 t7 = gmx_mm256_unpack128lo_ps(t10, t2); /* y3b x3b z2b y2b | x2b z1b y1b x1b */
1063 t8 = gmx_mm256_unpack128hi_ps(t10, t2); /* y3f x3f z2f y2f | x2f z1f y1f x1f */
1064 t1 = gmx_mm256_unpack128lo_ps(t11, t3); /* y3c x3c z2c y2c | x2c z1c y1c x1c */
1065 t2 = gmx_mm256_unpack128hi_ps(t11, t3); /* y3g x3g z2g y2g | x2g z1g y1g x1g */
1066 t9 = gmx_mm256_unpack128lo_ps(t12, t4); /* y3d x3d z2d y2d | x2d z1d y1d x1d */
1067 t10 = gmx_mm256_unpack128hi_ps(t12, t4); /* y3h x3h z2h y2h | x2h z1h y1h x1h */
1069 tA = _mm256_sub_ps(tA, t5);
1070 tB = _mm256_sub_ps(tB, t7);
1071 tC = _mm256_sub_ps(tC, t1);
1072 tD = _mm256_sub_ps(tD, t9);
1073 tE = _mm256_sub_ps(tE, t6);
1074 tF = _mm256_sub_ps(tF, t8);
1075 tG = _mm256_sub_ps(tG, t2);
1076 tH = _mm256_sub_ps(tH, t10);
1078 _mm256_storeu_ps(ptrA, tA);
1079 _mm256_storeu_ps(ptrB, tB);
1080 _mm256_storeu_ps(ptrC, tC);
1081 _mm256_storeu_ps(ptrD, tD);
1082 _mm256_storeu_ps(ptrE, tE);
1083 _mm256_storeu_ps(ptrF, tF);
1084 _mm256_storeu_ps(ptrG, tG);
1085 _mm256_storeu_ps(ptrH, tH);
1087 tI = gmx_mm256_set_m128(_mm_load_ss(ptrE+8), _mm_load_ss(ptrA+8));
1088 tJ = gmx_mm256_set_m128(_mm_load_ss(ptrF+8), _mm_load_ss(ptrB+8));
1089 tK = gmx_mm256_set_m128(_mm_load_ss(ptrG+8), _mm_load_ss(ptrC+8));
1090 tL = gmx_mm256_set_m128(_mm_load_ss(ptrH+8), _mm_load_ss(ptrD+8));
1092 tI = _mm256_unpacklo_ps(tI, tK); /* - - zG zE | - - zC zA */
1093 tJ = _mm256_unpacklo_ps(tJ, tL); /* - - zH zF | - - zD zB */
1094 tI = _mm256_unpacklo_ps(tI, tJ); /* zH zG zF zE | zD zC zB zA */
1096 tI = _mm256_sub_ps(tI, z3);
1097 tJ = _mm256_permute_ps(tI, _MM_SHUFFLE(1, 1, 1, 1));
1098 tK = _mm256_permute_ps(tI, _MM_SHUFFLE(2, 2, 2, 2));
1099 tL = _mm256_permute_ps(tI, _MM_SHUFFLE(3, 3, 3, 3));
1101 _mm_store_ss(ptrA+8, _mm256_castps256_ps128(tI));
1102 _mm_store_ss(ptrB+8, _mm256_castps256_ps128(tJ));
1103 _mm_store_ss(ptrC+8, _mm256_castps256_ps128(tK));
1104 _mm_store_ss(ptrD+8, _mm256_castps256_ps128(tL));
1105 _mm_store_ss(ptrE+8, _mm256_extractf128_ps(tI, 0x1));
1106 _mm_store_ss(ptrF+8, _mm256_extractf128_ps(tJ, 0x1));
1107 _mm_store_ss(ptrG+8, _mm256_extractf128_ps(tK, 0x1));
1108 _mm_store_ss(ptrH+8, _mm256_extractf128_ps(tL, 0x1));
1114 #if defined (_MSC_VER) && defined(_M_IX86)
1115 /* Macro work-around since 32-bit MSVC cannot handle >3 xmm/ymm parameters */
1116 #define gmx_mm256_decrement_4rvec_8ptr_swizzle_ps(ptrA, ptrB, ptrC, ptrD, ptrE, ptrF, ptrG, ptrH, \
1117 _x1, _y1, _z1, _x2, _y2, _z2, _x3, _y3, _z3, _x4, _y4, _z4) \
1119 __m256 _t1, _t2, _t3, _t4, _t5, _t6, _t7, _t8, _t9, _t10, _t11, _t12; \
1120 __m256 _tA, _tB, _tC, _tD, _tE, _tF, _tG, _tH, _tI, _tJ, _tK, _tL; \
1122 _tA = _mm256_loadu_ps(ptrA); \
1123 _tB = _mm256_loadu_ps(ptrB); \
1124 _tC = _mm256_loadu_ps(ptrC); \
1125 _tD = _mm256_loadu_ps(ptrD); \
1126 _tE = _mm256_loadu_ps(ptrE); \
1127 _tF = _mm256_loadu_ps(ptrF); \
1128 _tG = _mm256_loadu_ps(ptrG); \
1129 _tH = _mm256_loadu_ps(ptrH); \
1130 _t1 = _mm256_unpacklo_ps(_x1, _y1); \
1131 _t2 = _mm256_unpackhi_ps(_x1, _y1); \
1132 _t3 = _mm256_unpacklo_ps(_z1, _x2); \
1133 _t4 = _mm256_unpackhi_ps(_z1, _x2); \
1134 _t5 = _mm256_unpacklo_ps(_y2, _z2); \
1135 _t6 = _mm256_unpackhi_ps(_y2, _z2); \
1136 _t7 = _mm256_unpacklo_ps(_x3, _y3); \
1137 _t8 = _mm256_unpackhi_ps(_x3, _y3); \
1138 _t9 = _mm256_shuffle_ps(_t1, _t3, _MM_SHUFFLE(1, 0, 1, 0)); \
1139 _t10 = _mm256_shuffle_ps(_t1, _t3, _MM_SHUFFLE(3, 2, 3, 2)); \
1140 _t11 = _mm256_shuffle_ps(_t2, _t4, _MM_SHUFFLE(1, 0, 1, 0)); \
1141 _t12 = _mm256_shuffle_ps(_t2, _t4, _MM_SHUFFLE(3, 2, 3, 2)); \
1142 _t1 = _mm256_shuffle_ps(_t5, _t7, _MM_SHUFFLE(1, 0, 1, 0)); \
1143 _t2 = _mm256_shuffle_ps(_t5, _t7, _MM_SHUFFLE(3, 2, 3, 2)); \
1144 _t3 = _mm256_shuffle_ps(_t6, _t8, _MM_SHUFFLE(1, 0, 1, 0)); \
1145 _t4 = _mm256_shuffle_ps(_t6, _t8, _MM_SHUFFLE(3, 2, 3, 2)); \
1146 _t5 = gmx_mm256_unpack128lo_ps(_t9, _t1); \
1147 _t6 = gmx_mm256_unpack128hi_ps(_t9, _t1); \
1148 _t7 = gmx_mm256_unpack128lo_ps(_t10, _t2); \
1149 _t8 = gmx_mm256_unpack128hi_ps(_t10, _t2); \
1150 _t1 = gmx_mm256_unpack128lo_ps(_t11, _t3); \
1151 _t2 = gmx_mm256_unpack128hi_ps(_t11, _t3); \
1152 _t9 = gmx_mm256_unpack128lo_ps(_t12, _t4); \
1153 _t10 = gmx_mm256_unpack128hi_ps(_t12, _t4); \
1154 _tA = _mm256_sub_ps(_tA, _t5); \
1155 _tB = _mm256_sub_ps(_tB, _t7); \
1156 _tC = _mm256_sub_ps(_tC, _t1); \
1157 _tD = _mm256_sub_ps(_tD, _t9); \
1158 _tE = _mm256_sub_ps(_tE, _t6); \
1159 _tF = _mm256_sub_ps(_tF, _t8); \
1160 _tG = _mm256_sub_ps(_tG, _t2); \
1161 _tH = _mm256_sub_ps(_tH, _t10); \
1162 _mm256_storeu_ps(ptrA, _tA); \
1163 _mm256_storeu_ps(ptrB, _tB); \
1164 _mm256_storeu_ps(ptrC, _tC); \
1165 _mm256_storeu_ps(ptrD, _tD); \
1166 _mm256_storeu_ps(ptrE, _tE); \
1167 _mm256_storeu_ps(ptrF, _tF); \
1168 _mm256_storeu_ps(ptrG, _tG); \
1169 _mm256_storeu_ps(ptrH, _tH); \
1170 _tI = gmx_mm256_set_m128(_mm_loadu_ps(ptrE+8), _mm_loadu_ps(ptrA+8)); \
1171 _tJ = gmx_mm256_set_m128(_mm_loadu_ps(ptrF+8), _mm_loadu_ps(ptrB+8)); \
1172 _tK = gmx_mm256_set_m128(_mm_loadu_ps(ptrG+8), _mm_loadu_ps(ptrC+8)); \
1173 _tL = gmx_mm256_set_m128(_mm_loadu_ps(ptrH+8), _mm_loadu_ps(ptrD+8)); \
1174 _t1 = _mm256_unpacklo_ps(_z3, _x4); \
1175 _t2 = _mm256_unpackhi_ps(_z3, _x4); \
1176 _t3 = _mm256_unpacklo_ps(_y4, _z4); \
1177 _t4 = _mm256_unpackhi_ps(_y4, _z4); \
1178 _t5 = _mm256_shuffle_ps(_t1, _t3, _MM_SHUFFLE(1, 0, 1, 0)); \
1179 _t6 = _mm256_shuffle_ps(_t1, _t3, _MM_SHUFFLE(3, 2, 3, 2)); \
1180 _t7 = _mm256_shuffle_ps(_t2, _t4, _MM_SHUFFLE(1, 0, 1, 0)); \
1181 _t8 = _mm256_shuffle_ps(_t2, _t4, _MM_SHUFFLE(3, 2, 3, 2)); \
1182 _tI = _mm256_sub_ps(_tI, _t5); \
1183 _tJ = _mm256_sub_ps(_tJ, _t6); \
1184 _tK = _mm256_sub_ps(_tK, _t7); \
1185 _tL = _mm256_sub_ps(_tL, _t8); \
1186 _mm_storeu_ps(ptrA+8, _mm256_castps256_ps128(_tI)); \
1187 _mm_storeu_ps(ptrB+8, _mm256_castps256_ps128(_tJ)); \
1188 _mm_storeu_ps(ptrC+8, _mm256_castps256_ps128(_tK)); \
1189 _mm_storeu_ps(ptrD+8, _mm256_castps256_ps128(_tL)); \
1190 _mm_storeu_ps(ptrE+8, _mm256_extractf128_ps(_tI, 0x1)); \
1191 _mm_storeu_ps(ptrF+8, _mm256_extractf128_ps(_tJ, 0x1)); \
1192 _mm_storeu_ps(ptrG+8, _mm256_extractf128_ps(_tK, 0x1)); \
1193 _mm_storeu_ps(ptrH+8, _mm256_extractf128_ps(_tL, 0x1)); \
1196 /* Real function for sane compilers */
1197 static gmx_inline void
1198 gmx_mm256_decrement_4rvec_8ptr_swizzle_ps(float * gmx_restrict ptrA, float * gmx_restrict ptrB,
1199 float * gmx_restrict ptrC, float * gmx_restrict ptrD,
1200 float * gmx_restrict ptrE, float * gmx_restrict ptrF,
1201 float * gmx_restrict ptrG, float * gmx_restrict ptrH,
1202 __m256 x1, __m256 y1, __m256 z1,
1203 __m256 x2, __m256 y2, __m256 z2,
1204 __m256 x3, __m256 y3, __m256 z3,
1205 __m256 x4, __m256 y4, __m256 z4)
1207 __m256 t1, t2, t3, t4, t5, t6, t7, t8, t9, t10, t11, t12;
1208 __m256 tA, tB, tC, tD, tE, tF, tG, tH;
1209 __m256 tI, tJ, tK, tL;
1211 tA = _mm256_loadu_ps(ptrA);
1212 tB = _mm256_loadu_ps(ptrB);
1213 tC = _mm256_loadu_ps(ptrC);
1214 tD = _mm256_loadu_ps(ptrD);
1215 tE = _mm256_loadu_ps(ptrE);
1216 tF = _mm256_loadu_ps(ptrF);
1217 tG = _mm256_loadu_ps(ptrG);
1218 tH = _mm256_loadu_ps(ptrH);
1220 t1 = _mm256_unpacklo_ps(x1, y1); /* y1f x1f y1e x1e | y1b x1b y1a x1a */
1221 t2 = _mm256_unpackhi_ps(x1, y1); /* y1h x1h y1g x1g | y1d x1d y1c x1c */
1222 t3 = _mm256_unpacklo_ps(z1, x2); /* x2f z1f x2e z1e | x2b z1b x2a z1a */
1223 t4 = _mm256_unpackhi_ps(z1, x2); /* x2h z1h x2g z1g | x2d z1d x2c z1c */
1225 t5 = _mm256_unpacklo_ps(y2, z2); /* z2f y2f z2e y2e | z2b y2b z2a y2a */
1226 t6 = _mm256_unpackhi_ps(y2, z2); /* z2h y2h z2g y2g | z2d y2d z2c y2c */
1227 t7 = _mm256_unpacklo_ps(x3, y3); /* y3f x3f y3e x3e | y3b x3b y3a x3a */
1228 t8 = _mm256_unpackhi_ps(x3, y3); /* y3h x3h y3g x3g | y3d x3d y3c x3c */
1230 t9 = _mm256_shuffle_ps(t1, t3, _MM_SHUFFLE(1, 0, 1, 0)); /* x2e z1e y1e x1e | x2a z1a y1a x1a */
1231 t10 = _mm256_shuffle_ps(t1, t3, _MM_SHUFFLE(3, 2, 3, 2)); /* x2f z1f y1f x1f | x2b z1b y1b x1b */
1232 t11 = _mm256_shuffle_ps(t2, t4, _MM_SHUFFLE(1, 0, 1, 0)); /* x2g z1g y1g x1g | x2c z1c y1c x1c */
1233 t12 = _mm256_shuffle_ps(t2, t4, _MM_SHUFFLE(3, 2, 3, 2)); /* x2h z1h y1h x1h | x2d z1d y1d x1d */
1235 t1 = _mm256_shuffle_ps(t5, t7, _MM_SHUFFLE(1, 0, 1, 0)); /* y3e x3e z2e y2e | y3a x3a z2a y2a */
1236 t2 = _mm256_shuffle_ps(t5, t7, _MM_SHUFFLE(3, 2, 3, 2)); /* y3f x3f z2f y2f | y3b x3b z2b y2b */
1237 t3 = _mm256_shuffle_ps(t6, t8, _MM_SHUFFLE(1, 0, 1, 0)); /* y3g x3g z2g y2g | y3c x3c z2c y2c */
1238 t4 = _mm256_shuffle_ps(t6, t8, _MM_SHUFFLE(3, 2, 3, 2)); /* y3h x3h z2h y2h | y3d x3d z2d y2d */
1240 t5 = gmx_mm256_unpack128lo_ps(t9, t1); /* y3a x3a z2a y2a | x2a z1a y1a x1a */
1241 t6 = gmx_mm256_unpack128hi_ps(t9, t1); /* y3e x3e z2e y2e | x2e z1e y1e x1e */
1242 t7 = gmx_mm256_unpack128lo_ps(t10, t2); /* y3b x3b z2b y2b | x2b z1b y1b x1b */
1243 t8 = gmx_mm256_unpack128hi_ps(t10, t2); /* y3f x3f z2f y2f | x2f z1f y1f x1f */
1244 t1 = gmx_mm256_unpack128lo_ps(t11, t3); /* y3c x3c z2c y2c | x2c z1c y1c x1c */
1245 t2 = gmx_mm256_unpack128hi_ps(t11, t3); /* y3g x3g z2g y2g | x2g z1g y1g x1g */
1246 t9 = gmx_mm256_unpack128lo_ps(t12, t4); /* y3d x3d z2d y2d | x2d z1d y1d x1d */
1247 t10 = gmx_mm256_unpack128hi_ps(t12, t4); /* y3h x3h z2h y2h | x2h z1h y1h x1h */
1249 tA = _mm256_sub_ps(tA, t5);
1250 tB = _mm256_sub_ps(tB, t7);
1251 tC = _mm256_sub_ps(tC, t1);
1252 tD = _mm256_sub_ps(tD, t9);
1253 tE = _mm256_sub_ps(tE, t6);
1254 tF = _mm256_sub_ps(tF, t8);
1255 tG = _mm256_sub_ps(tG, t2);
1256 tH = _mm256_sub_ps(tH, t10);
1258 _mm256_storeu_ps(ptrA, tA);
1259 _mm256_storeu_ps(ptrB, tB);
1260 _mm256_storeu_ps(ptrC, tC);
1261 _mm256_storeu_ps(ptrD, tD);
1262 _mm256_storeu_ps(ptrE, tE);
1263 _mm256_storeu_ps(ptrF, tF);
1264 _mm256_storeu_ps(ptrG, tG);
1265 _mm256_storeu_ps(ptrH, tH);
1267 tI = gmx_mm256_set_m128(_mm_loadu_ps(ptrE+8), _mm_loadu_ps(ptrA+8));
1268 tJ = gmx_mm256_set_m128(_mm_loadu_ps(ptrF+8), _mm_loadu_ps(ptrB+8));
1269 tK = gmx_mm256_set_m128(_mm_loadu_ps(ptrG+8), _mm_loadu_ps(ptrC+8));
1270 tL = gmx_mm256_set_m128(_mm_loadu_ps(ptrH+8), _mm_loadu_ps(ptrD+8));
1272 t1 = _mm256_unpacklo_ps(z3, x4); /* x4f z3f x4e z3e | x4b z3b x4a z3a */
1273 t2 = _mm256_unpackhi_ps(z3, x4); /* x4h z3h x4g z3g | x4d z3d x4c z3c */
1274 t3 = _mm256_unpacklo_ps(y4, z4); /* z4f y4f z4e y4e | z4b y4b z4a y4a */
1275 t4 = _mm256_unpackhi_ps(y4, z4); /* z4h y4h z4g y4g | z4d y4d z4c y4c */
1277 t5 = _mm256_shuffle_ps(t1, t3, _MM_SHUFFLE(1, 0, 1, 0)); /* z4e y4e x4e z3e | z4a y4a x4a z3a */
1278 t6 = _mm256_shuffle_ps(t1, t3, _MM_SHUFFLE(3, 2, 3, 2)); /* z4f y4f x4f z3f | z4b y4b x4b z3b */
1279 t7 = _mm256_shuffle_ps(t2, t4, _MM_SHUFFLE(1, 0, 1, 0)); /* z4g y4g x4g z3g | z4c y4c x4c z3c */
1280 t8 = _mm256_shuffle_ps(t2, t4, _MM_SHUFFLE(3, 2, 3, 2)); /* z4h y4h x4h z3h | z4d y4d x4d z3d */
1282 tI = _mm256_sub_ps(tI, t5);
1283 tJ = _mm256_sub_ps(tJ, t6);
1284 tK = _mm256_sub_ps(tK, t7);
1285 tL = _mm256_sub_ps(tL, t8);
1287 _mm_storeu_ps(ptrA+8, _mm256_castps256_ps128(tI));
1288 _mm_storeu_ps(ptrB+8, _mm256_castps256_ps128(tJ));
1289 _mm_storeu_ps(ptrC+8, _mm256_castps256_ps128(tK));
1290 _mm_storeu_ps(ptrD+8, _mm256_castps256_ps128(tL));
1291 _mm_storeu_ps(ptrE+8, _mm256_extractf128_ps(tI, 0x1));
1292 _mm_storeu_ps(ptrF+8, _mm256_extractf128_ps(tJ, 0x1));
1293 _mm_storeu_ps(ptrG+8, _mm256_extractf128_ps(tK, 0x1));
1294 _mm_storeu_ps(ptrH+8, _mm256_extractf128_ps(tL, 0x1));
1299 static gmx_inline void
1300 gmx_mm256_update_iforce_1atom_swizzle_ps(__m256 fix1, __m256 fiy1, __m256 fiz1,
1301 float * gmx_restrict fptr,
1302 float * gmx_restrict fshiftptr)
1306 fix1 = _mm256_hadd_ps(fix1, fix1);
1307 fiy1 = _mm256_hadd_ps(fiy1, fiz1);
1308 fix1 = _mm256_hadd_ps(fix1, fiy1); /* fiz1 fiy1 fix1 fix1 (in both lanes) */
1310 /* Add across the two lanes */
1311 t1 = _mm_add_ps(_mm256_castps256_ps128(fix1), _mm256_extractf128_ps(fix1, 0x1));
1313 t2 = _mm_load_ss(fptr);
1314 t2 = _mm_loadh_pi(t2, (__m64 *)(fptr+1));
1315 t3 = _mm_load_ss(fshiftptr);
1316 t3 = _mm_loadh_pi(t3, (__m64 *)(fshiftptr+1));
1318 t2 = _mm_add_ps(t2, t1);
1319 t3 = _mm_add_ps(t3, t1);
1321 _mm_store_ss(fptr, t2);
1322 _mm_storeh_pi((__m64 *)(fptr+1), t2);
1323 _mm_store_ss(fshiftptr, t3);
1324 _mm_storeh_pi((__m64 *)(fshiftptr+1), t3);
1327 #if defined (_MSC_VER) && defined(_M_IX86)
1328 /* Macro work-around since 32-bit MSVC cannot handle >3 xmm/ymm parameters */
1329 #define gmx_mm256_update_iforce_3atom_swizzle_ps(fix1, fiy1, fiz1, fix2, fiy2, fiz2, fix3, fiy3, fiz3, \
1332 __m256 _t1, _t2, _t3; \
1333 __m128 _tA, _tB, _tC; \
1335 fix1 = _mm256_hadd_ps(fix1, fiy1); \
1336 fiz1 = _mm256_hadd_ps(fiz1, fix2); \
1337 fiy2 = _mm256_hadd_ps(fiy2, fiz2); \
1338 fix3 = _mm256_hadd_ps(fix3, fiy3); \
1339 fiz3 = _mm256_hadd_ps(fiz3, _mm256_setzero_ps()); \
1340 fix1 = _mm256_hadd_ps(fix1, fiz1); \
1341 fiy2 = _mm256_hadd_ps(fiy2, fix3); \
1342 fiz3 = _mm256_hadd_ps(fiz3, _mm256_setzero_ps()); \
1344 _t1 = gmx_mm256_unpack128lo_ps(fix1, fiy2); \
1345 _t2 = gmx_mm256_unpack128hi_ps(fix1, fiy2); \
1346 _t1 = _mm256_add_ps(_t1, _t2); \
1347 _tA = _mm_add_ps(_mm256_castps256_ps128(fiz3), _mm256_extractf128_ps(fiz3, 0x1)); \
1348 _t3 = _mm256_loadu_ps(fptr); \
1349 _t3 = _mm256_add_ps(_t3, _t1); \
1350 _mm256_storeu_ps(fptr, _t3); \
1351 _tB = _mm_load_ss(fptr+8); \
1352 _tB = _mm_add_ss(_tB, _tA); \
1353 _mm_store_ss(fptr+8, _tB); \
1355 _tB = _mm256_extractf128_ps(_t1, 0x1); \
1356 _tC = _mm_shuffle_ps(_mm256_castps256_ps128(_t1), _tB, _MM_SHUFFLE(1, 0, 3, 3)); \
1357 _tB = _mm_shuffle_ps(_tB, _tA, _MM_SHUFFLE(1, 0, 3, 2)); \
1358 _tC = _mm_permute_ps(_tC, _MM_SHUFFLE(3, 3, 2, 0)); \
1359 _tB = _mm_add_ps(_tB, _mm256_castps256_ps128(_t1)); \
1360 _tA = _mm_add_ps(_tB, _tC); \
1361 _tA = _mm_blend_ps(_mm_setzero_ps(), _tA, 0x7); \
1362 _tC = _mm_loadu_ps(fshiftptr); \
1363 _tC = _mm_add_ps(_tC, _tA); \
1364 _mm_storeu_ps(fshiftptr, _tC); \
1367 /* Real function for sane compilers */
1368 static gmx_inline void
1369 gmx_mm256_update_iforce_3atom_swizzle_ps(__m256 fix1, __m256 fiy1, __m256 fiz1,
1370 __m256 fix2, __m256 fiy2, __m256 fiz2,
1371 __m256 fix3, __m256 fiy3, __m256 fiz3,
1372 float * gmx_restrict fptr,
1373 float * gmx_restrict fshiftptr)
1378 fix1 = _mm256_hadd_ps(fix1, fiy1); /* Y1g+Y1h Y1e+Y1f X1g+X1h X1e+X1f | Y1c+Y1d Y1a+Y1b X1c+X1d X1a+X1b */
1379 fiz1 = _mm256_hadd_ps(fiz1, fix2); /* X2g+X2h X2e+X2f Z1g+Z1h Z1e+Z1f | X2c+X2d X2a+X2b Z1c+Z1d Z1a+Z1b */
1380 fiy2 = _mm256_hadd_ps(fiy2, fiz2); /* Z2g+Z2h Z2e+Z2f Y2g+Y2h Y2e+Y2f | Z2c+Z2d Z2a+Z2b Y2c+Y2d Y2a+Y2b */
1381 fix3 = _mm256_hadd_ps(fix3, fiy3); /* Y3g+Y3h Y3e+Y3f X3g+X3h X3e+X3f | Y3c+Y3d Y3a+Y3b X3c+X3d X3a+X3b */
1382 fiz3 = _mm256_hadd_ps(fiz3, _mm256_setzero_ps()); /* 0 0 Z3g+Z3h Z3e+Z3f | 0 0 Z3c+Z3d Z3a+Z3b */
1384 fix1 = _mm256_hadd_ps(fix1, fiz1); /* X2e-h Z1e-h Y1e-h X1e-h | X2a-d Z1a-d Y1a-d X1a-d */
1385 fiy2 = _mm256_hadd_ps(fiy2, fix3); /* Y3e-h X3e-h Z2e-h Y2e-h | Y3a-d X3a-d Z2a-d Y2a-d */
1386 fiz3 = _mm256_hadd_ps(fiz3, _mm256_setzero_ps()); /* 0 0 0 Z3e-h | 0 0 0 Z3a-d */
1388 /* Add across the two lanes by swapping and adding back */
1389 t1 = gmx_mm256_unpack128lo_ps(fix1, fiy2); /* Y3a-d X3a-d Z2a-d Y2a-d | X2a-d Z1a-d Y1a-d X1a-d */
1390 t2 = gmx_mm256_unpack128hi_ps(fix1, fiy2); /* Y3e-h X3e-h Z2e-h Y2e-h | X2e-h Z1e-h Y1e-h X1e-h */
1391 t1 = _mm256_add_ps(t1, t2); /* y3 x3 z2 y2 | x2 z1 y1 x1 */
1393 tA = _mm_add_ps(_mm256_castps256_ps128(fiz3), _mm256_extractf128_ps(fiz3, 0x1)); /* 0 0 0 z3 */
1395 t3 = _mm256_loadu_ps(fptr);
1396 t3 = _mm256_add_ps(t3, t1);
1397 _mm256_storeu_ps(fptr, t3);
1398 tB = _mm_load_ss(fptr+8);
1399 tB = _mm_add_ss(tB, tA);
1400 _mm_store_ss(fptr+8, tB);
1402 /* Add up shift force */
1403 tB = _mm256_extractf128_ps(t1, 0x1); /* y3 x3 z2 y2 */
1404 tC = _mm_shuffle_ps(_mm256_castps256_ps128(t1), tB, _MM_SHUFFLE(1, 0, 3, 3)); /* z2 y2 x2 x2 */
1405 tB = _mm_shuffle_ps(tB, tA, _MM_SHUFFLE(1, 0, 3, 2)); /* 0 z3 y3 x3 */
1406 tC = _mm_permute_ps(tC, _MM_SHUFFLE(3, 3, 2, 0)); /* - z2 y2 x2 */
1408 tB = _mm_add_ps(tB, _mm256_castps256_ps128(t1));
1409 tA = _mm_add_ps(tB, tC); /* - z y x */
1411 tA = _mm_blend_ps(_mm_setzero_ps(), tA, 0x7); /* 0 z y x */
1413 tC = _mm_loadu_ps(fshiftptr);
1414 tC = _mm_add_ps(tC, tA);
1415 _mm_storeu_ps(fshiftptr, tC);
1420 #if defined (_MSC_VER) && defined(_M_IX86)
1421 /* Macro work-around since 32-bit MSVC cannot handle >3 xmm/ymm parameters */
1422 #define gmx_mm256_update_iforce_4atom_swizzle_ps(fix1, fiy1, fiz1, fix2, fiy2, fiz2, fix3, fiy3, fiz3, fix4, fiy4, fiz4, \
1425 __m256 _t1, _t2, _t3; \
1426 __m128 _tA, _tB, _tC; \
1428 fix1 = _mm256_hadd_ps(fix1, fiy1); \
1429 fiz1 = _mm256_hadd_ps(fiz1, fix2); \
1430 fiy2 = _mm256_hadd_ps(fiy2, fiz2); \
1431 fix3 = _mm256_hadd_ps(fix3, fiy3); \
1432 fiz3 = _mm256_hadd_ps(fiz3, fix4); \
1433 fiy4 = _mm256_hadd_ps(fiy4, fiz4); \
1435 fix1 = _mm256_hadd_ps(fix1, fiz1); \
1436 fiy2 = _mm256_hadd_ps(fiy2, fix3); \
1437 fiz3 = _mm256_hadd_ps(fiz3, fiy4); \
1439 _t1 = gmx_mm256_unpack128lo_ps(fix1, fiy2); \
1440 _t2 = gmx_mm256_unpack128hi_ps(fix1, fiy2); \
1441 _t1 = _mm256_add_ps(_t1, _t2); \
1442 _tA = _mm_add_ps(_mm256_castps256_ps128(fiz3), _mm256_extractf128_ps(fiz3, 0x1)); \
1443 _t3 = _mm256_loadu_ps(fptr); \
1444 _t3 = _mm256_add_ps(_t3, _t1); \
1445 _mm256_storeu_ps(fptr, _t3); \
1446 _tB = _mm_loadu_ps(fptr+8); \
1447 _tB = _mm_add_ps(_tB, _tA); \
1448 _mm_storeu_ps(fptr+8, _tB); \
1450 _tB = _mm256_extractf128_ps(_t1, 0x1); \
1451 _tC = _mm_shuffle_ps(_mm256_castps256_ps128(_t1), _tB, _MM_SHUFFLE(1, 0, 3, 3)); \
1452 _tB = _mm_shuffle_ps(_tB, _tA, _MM_SHUFFLE(1, 0, 3, 2)); \
1453 _tC = _mm_permute_ps(_tC, _MM_SHUFFLE(3, 3, 2, 0)); \
1454 _tA = _mm_permute_ps(_tA, _MM_SHUFFLE(0, 3, 2, 1)); \
1455 _tB = _mm_add_ps(_tB, _mm256_castps256_ps128(_t1)); \
1456 _tA = _mm_add_ps(_tA, _tC); \
1457 _tA = _mm_add_ps(_tA, _tB); \
1458 _tA = _mm_blend_ps(_mm_setzero_ps(), _tA, 0x7); \
1459 _tC = _mm_loadu_ps(fshiftptr); \
1460 _tC = _mm_add_ps(_tC, _tA); \
1461 _mm_storeu_ps(fshiftptr, _tC); \
1464 /* Real function for sane compilers */
1465 static gmx_inline void
1466 gmx_mm256_update_iforce_4atom_swizzle_ps(__m256 fix1, __m256 fiy1, __m256 fiz1,
1467 __m256 fix2, __m256 fiy2, __m256 fiz2,
1468 __m256 fix3, __m256 fiy3, __m256 fiz3,
1469 __m256 fix4, __m256 fiy4, __m256 fiz4,
1470 float * gmx_restrict fptr,
1471 float * gmx_restrict fshiftptr)
1476 fix1 = _mm256_hadd_ps(fix1, fiy1); /* Y1g+Y1h Y1e+Y1f X1g+X1h X1e+X1f | Y1c+Y1d Y1a+Y1b X1c+X1d X1a+X1b */
1477 fiz1 = _mm256_hadd_ps(fiz1, fix2); /* X2g+X2h X2e+X2f Z1g+Z1h Z1e+Z1f | X2c+X2d X2a+X2b Z1c+Z1d Z1a+Z1b */
1478 fiy2 = _mm256_hadd_ps(fiy2, fiz2); /* Z2g+Z2h Z2e+Z2f Y2g+Y2h Y2e+Y2f | Z2c+Z2d Z2a+Z2b Y2c+Y2d Y2a+Y2b */
1479 fix3 = _mm256_hadd_ps(fix3, fiy3); /* Y3g+Y3h Y3e+Y3f X3g+X3h X3e+X3f | Y3c+Y3d Y3a+Y3b X3c+X3d X3a+X3b */
1480 fiz3 = _mm256_hadd_ps(fiz3, fix4); /* X4g+X4h X4e+X4f Z3g+Z3h Z3e+Z3f | X4c+X4d X4a+X4b Z3c+Z3d Z3a+Z3b */
1481 fiy4 = _mm256_hadd_ps(fiy4, fiz4); /* Z4g+Z4h Z4e+Z4f Y4g+Y4h Y4e+Y4f | Z4c+Z4d Z4a+Z4b Y4c+Y4d Y4a+Y4b */
1483 fix1 = _mm256_hadd_ps(fix1, fiz1); /* X2e-h Z1e-h Y1e-h X1e-h | X2a-d Z1a-d Y1a-d X1a-d */
1484 fiy2 = _mm256_hadd_ps(fiy2, fix3); /* Y3e-h X3e-h Z2e-h Y2e-h | Y3a-d X3a-d Z2a-d Y2a-d */
1485 fiz3 = _mm256_hadd_ps(fiz3, fiy4); /* Z4e-h Y4e-h X4e-h Z3e-h | Z4a-d Y4a-d X4a-d Z3a-d */
1487 /* Add across the two lanes by swapping and adding back */
1488 t1 = gmx_mm256_unpack128lo_ps(fix1, fiy2); /* Y3a-d X3a-d Z2a-d Y2a-d | X2a-d Z1a-d Y1a-d X1a-d */
1489 t2 = gmx_mm256_unpack128hi_ps(fix1, fiy2); /* Y3e-h X3e-h Z2e-h Y2e-h | X2e-h Z1e-h Y1e-h X1e-h */
1490 t1 = _mm256_add_ps(t1, t2); /* y3 x3 z2 y2 | x2 z1 y1 x1 */
1492 tA = _mm_add_ps(_mm256_castps256_ps128(fiz3), _mm256_extractf128_ps(fiz3, 0x1)); /* z4 y4 x4 z3 */
1494 t3 = _mm256_loadu_ps(fptr);
1495 t3 = _mm256_add_ps(t3, t1);
1496 _mm256_storeu_ps(fptr, t3);
1498 tB = _mm_loadu_ps(fptr+8);
1499 tB = _mm_add_ps(tB, tA);
1500 _mm_storeu_ps(fptr+8, tB);
1502 /* Add up shift force */
1503 tB = _mm256_extractf128_ps(t1, 0x1); /* y3 x3 z2 y2 */
1504 tC = _mm_shuffle_ps(_mm256_castps256_ps128(t1), tB, _MM_SHUFFLE(1, 0, 3, 3)); /* z2 y2 x2 x2 */
1505 tB = _mm_shuffle_ps(tB, tA, _MM_SHUFFLE(1, 0, 3, 2)); /* 0 z3 y3 x3 */
1506 tC = _mm_permute_ps(tC, _MM_SHUFFLE(3, 3, 2, 0)); /* - z2 y2 x2 */
1507 tA = _mm_permute_ps(tA, _MM_SHUFFLE(0, 3, 2, 1)); /* - z4 y4 x4 */
1509 tB = _mm_add_ps(tB, _mm256_castps256_ps128(t1));
1510 tA = _mm_add_ps(tA, tC);
1511 tA = _mm_add_ps(tA, tB);
1513 tA = _mm_blend_ps(_mm_setzero_ps(), tA, 0x7); /* 0 z y x */
1515 tC = _mm_loadu_ps(fshiftptr);
1516 tC = _mm_add_ps(tC, tA);
1517 _mm_storeu_ps(fshiftptr, tC);
1523 static gmx_inline void
1524 gmx_mm256_update_1pot_ps(__m256 pot1, float * gmx_restrict ptrA)
1528 pot1 = _mm256_hadd_ps(pot1, pot1);
1529 pot1 = _mm256_hadd_ps(pot1, pot1);
1531 t1 = _mm_add_ps(_mm256_castps256_ps128(pot1), _mm256_extractf128_ps(pot1, 0x1));
1533 _mm_store_ss(ptrA, _mm_add_ss(_mm_load_ss(ptrA), t1));
1536 static gmx_inline void
1537 gmx_mm256_update_2pot_ps(__m256 pot1, float * gmx_restrict ptrA,
1538 __m256 pot2, float * gmx_restrict ptrB)
1542 pot1 = _mm256_hadd_ps(pot1, pot2);
1543 pot1 = _mm256_hadd_ps(pot1, pot1);
1545 t1 = _mm_add_ps(_mm256_castps256_ps128(pot1), _mm256_extractf128_ps(pot1, 0x1));
1547 t2 = _mm_permute_ps(t1, _MM_SHUFFLE(1, 1, 1, 1));
1548 _mm_store_ss(ptrA, _mm_add_ss(_mm_load_ss(ptrA), t1));
1549 _mm_store_ss(ptrB, _mm_add_ss(_mm_load_ss(ptrB), t2));
1553 #endif /* _kernelutil_x86_avx_256_single_h_ */