2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2012,2013,2014, by the GROMACS development team, led by
5 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6 * and including many others, as listed in the AUTHORS file in the
7 * top-level source directory and at http://www.gromacs.org.
9 * GROMACS is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public License
11 * as published by the Free Software Foundation; either version 2.1
12 * of the License, or (at your option) any later version.
14 * GROMACS is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with GROMACS; if not, see
21 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
24 * If you want to redistribute modifications to GROMACS, please
25 * consider that scientific software is very special. Version
26 * control is crucial - bugs must be traceable. We will be happy to
27 * consider code for inclusion in the official distribution, but
28 * derived work must not be called official GROMACS. Details are found
29 * in the README & COPYING files - if they are missing, get the
30 * official version at http://www.gromacs.org.
32 * To help us fund GROMACS development, we humbly ask that you cite
33 * the research papers on the package. Check out http://www.gromacs.org.
35 #ifndef _kernelutil_x86_avx_256_single_h_
36 #define _kernelutil_x86_avx_256_single_h_
40 #define gmx_mm_castsi128_ps(a) _mm_castsi128_ps(a)
42 static gmx_inline __m256 gmx_simdcall
43 gmx_mm256_unpack128lo_ps(__m256 xmm1, __m256 xmm2)
45 return _mm256_permute2f128_ps(xmm1, xmm2, 0x20);
48 static gmx_inline __m256 gmx_simdcall
49 gmx_mm256_unpack128hi_ps(__m256 xmm1, __m256 xmm2)
51 return _mm256_permute2f128_ps(xmm1, xmm2, 0x31);
54 static gmx_inline __m256 gmx_simdcall
55 gmx_mm256_set_m128(__m128 hi, __m128 lo)
57 return _mm256_insertf128_ps(_mm256_castps128_ps256(lo), hi, 0x1);
60 /* Work around gcc bug with wrong type for mask formal parameter to maskload/maskstore */
61 #ifdef GMX_SIMD_X86_AVX_GCC_MASKLOAD_BUG
62 # define gmx_mm_maskload_ps(mem, mask) _mm_maskload_ps((mem), _mm_castsi128_ps(mask))
63 # define gmx_mm_maskstore_ps(mem, mask, x) _mm_maskstore_ps((mem), _mm_castsi128_ps(mask), (x))
64 # define gmx_mm256_maskload_ps(mem, mask) _mm256_maskload_ps((mem), _mm256_castsi256_ps(mask))
65 # define gmx_mm256_maskstore_ps(mem, mask, x) _mm256_maskstore_ps((mem), _mm256_castsi256_ps(mask), (x))
67 # define gmx_mm_maskload_ps(mem, mask) _mm_maskload_ps((mem), (mask))
68 # define gmx_mm_maskstore_ps(mem, mask, x) _mm_maskstore_ps((mem), (mask), (x))
69 # define gmx_mm256_maskload_ps(mem, mask) _mm256_maskload_ps((mem), (mask))
70 # define gmx_mm256_maskstore_ps(mem, mask, x) _mm256_maskstore_ps((mem), (mask), (x))
73 /* Transpose lower/upper half of 256-bit registers separately */
74 #define GMX_MM256_HALFTRANSPOSE4_PS(ymm0, ymm1, ymm2, ymm3) { \
75 __m256 __tmp0, __tmp1, __tmp2, __tmp3; \
77 __tmp0 = _mm256_unpacklo_ps((ymm0), (ymm1)); \
78 __tmp1 = _mm256_unpacklo_ps((ymm2), (ymm3)); \
79 __tmp2 = _mm256_unpackhi_ps((ymm0), (ymm1)); \
80 __tmp3 = _mm256_unpackhi_ps((ymm2), (ymm3)); \
81 ymm0 = _mm256_shuffle_ps(__tmp0, __tmp1, _MM_SHUFFLE(1, 0, 1, 0)); \
82 ymm1 = _mm256_shuffle_ps(__tmp0, __tmp1, _MM_SHUFFLE(3, 2, 3, 2)); \
83 ymm2 = _mm256_shuffle_ps(__tmp2, __tmp3, _MM_SHUFFLE(1, 0, 1, 0)); \
84 ymm3 = _mm256_shuffle_ps(__tmp2, __tmp3, _MM_SHUFFLE(3, 2, 3, 2)); \
88 static gmx_inline __m256 gmx_simdcall
89 gmx_mm256_calc_rsq_ps(__m256 dx, __m256 dy, __m256 dz)
91 return _mm256_add_ps( _mm256_add_ps( _mm256_mul_ps(dx, dx), _mm256_mul_ps(dy, dy) ), _mm256_mul_ps(dz, dz) );
94 /* Normal sum of four ymm registers */
95 #define gmx_mm256_sum4_ps(t0, t1, t2, t3) _mm256_add_ps(_mm256_add_ps(t0, t1), _mm256_add_ps(t2, t3))
98 static gmx_inline int gmx_simdcall
99 gmx_mm256_any_lt(__m256 a, __m256 b)
101 return _mm256_movemask_ps(_mm256_cmp_ps(a, b, _CMP_LT_OQ));
105 static gmx_inline __m256 gmx_simdcall
106 gmx_mm256_load_4real_swizzle_ps(const float * gmx_restrict ptrA, const float * gmx_restrict ptrB,
107 const float * gmx_restrict ptrC, const float * gmx_restrict ptrD)
111 t1 = _mm_unpacklo_ps(_mm_load_ss(ptrA), _mm_load_ss(ptrC));
112 t2 = _mm_unpacklo_ps(_mm_load_ss(ptrB), _mm_load_ss(ptrD));
113 return _mm256_castps128_ps256(_mm_unpacklo_ps(t1, t2));
117 static gmx_inline __m256 gmx_simdcall
118 gmx_mm256_load_8real_swizzle_ps(const float * gmx_restrict ptrA, const float * gmx_restrict ptrB,
119 const float * gmx_restrict ptrC, const float * gmx_restrict ptrD,
120 const float * gmx_restrict ptrE, const float * gmx_restrict ptrF,
121 const float * gmx_restrict ptrG, const float * gmx_restrict ptrH)
125 t1 = gmx_mm256_load_4real_swizzle_ps(ptrA, ptrB, ptrC, ptrD);
126 t2 = gmx_mm256_load_4real_swizzle_ps(ptrE, ptrF, ptrG, ptrH);
128 return _mm256_permute2f128_ps(t1, t2, 0x20);
133 static gmx_inline void gmx_simdcall
134 gmx_mm256_store_4real_swizzle_ps(float * gmx_restrict ptrA, float * gmx_restrict ptrB,
135 float * gmx_restrict ptrC, float * gmx_restrict ptrD, __m256 xmm1)
139 t2 = _mm256_permute_ps(xmm1, _MM_SHUFFLE(1, 1, 1, 1));
140 t3 = _mm256_permute_ps(xmm1, _MM_SHUFFLE(2, 2, 2, 2));
141 t4 = _mm256_permute_ps(xmm1, _MM_SHUFFLE(3, 3, 3, 3));
142 _mm_store_ss(ptrA, _mm256_castps256_ps128(xmm1));
143 _mm_store_ss(ptrB, _mm256_castps256_ps128(t2));
144 _mm_store_ss(ptrC, _mm256_castps256_ps128(t3));
145 _mm_store_ss(ptrD, _mm256_castps256_ps128(t4));
149 static gmx_inline void gmx_simdcall
150 gmx_mm256_store_8real_swizzle_ps(float * gmx_restrict ptrA, float * gmx_restrict ptrB,
151 float * gmx_restrict ptrC, float * gmx_restrict ptrD,
152 float * gmx_restrict ptrE, float * gmx_restrict ptrF,
153 float * gmx_restrict ptrG, float * gmx_restrict ptrH, __m256 xmm1)
157 t1 = _mm256_permute2f128_ps(xmm1, xmm1, 0x11);
159 gmx_mm256_store_4real_swizzle_ps(ptrA, ptrB, ptrC, ptrD, xmm1);
160 gmx_mm256_store_4real_swizzle_ps(ptrE, ptrF, ptrG, ptrH, t1);
164 static gmx_inline void gmx_simdcall
165 gmx_mm256_increment_4real_swizzle_ps(float * gmx_restrict ptrA, float * gmx_restrict ptrB,
166 float * gmx_restrict ptrC, float * gmx_restrict ptrD,
169 __m128 t1, t2, t3, t4;
171 t1 = _mm256_castps256_ps128(xmm1);
172 t2 = _mm_permute_ps(t1, _MM_SHUFFLE(1, 1, 1, 1));
173 t3 = _mm_permute_ps(t1, _MM_SHUFFLE(2, 2, 2, 2));
174 t4 = _mm_permute_ps(t1, _MM_SHUFFLE(3, 3, 3, 3));
176 t1 = _mm_add_ss(t1, _mm_load_ss(ptrA));
177 t2 = _mm_add_ss(t2, _mm_load_ss(ptrB));
178 t3 = _mm_add_ss(t3, _mm_load_ss(ptrC));
179 t4 = _mm_add_ss(t4, _mm_load_ss(ptrD));
181 _mm_store_ss(ptrA, t1);
182 _mm_store_ss(ptrB, t2);
183 _mm_store_ss(ptrC, t3);
184 _mm_store_ss(ptrD, t4);
187 static gmx_inline void gmx_simdcall
188 gmx_mm256_increment_8real_swizzle_ps(float * gmx_restrict ptrA, float * gmx_restrict ptrB,
189 float * gmx_restrict ptrC, float * gmx_restrict ptrD,
190 float * gmx_restrict ptrE, float * gmx_restrict ptrF,
191 float * gmx_restrict ptrG, float * gmx_restrict ptrH,
196 t1 = _mm256_permute2f128_ps(xmm1, xmm1, 0x11);
198 gmx_mm256_increment_4real_swizzle_ps(ptrA, ptrB, ptrC, ptrD, xmm1);
199 gmx_mm256_increment_4real_swizzle_ps(ptrE, ptrF, ptrG, ptrH, t1);
203 static gmx_inline void gmx_simdcall
204 gmx_mm256_load_4pair_swizzle_ps(const float * gmx_restrict p1, const float * gmx_restrict p2,
205 const float * gmx_restrict p3, const float * gmx_restrict p4,
206 __m256 * gmx_restrict c6, __m256 * gmx_restrict c12)
208 __m128 t1, t2, t3, t4;
210 t1 = _mm_loadl_pi(_mm_setzero_ps(), (__m64 *)p1); /* - - c12a c6a */
211 t2 = _mm_loadl_pi(_mm_setzero_ps(), (__m64 *)p2); /* - - c12b c6b */
212 t3 = _mm_loadl_pi(_mm_setzero_ps(), (__m64 *)p3); /* - - c12c c6c */
213 t4 = _mm_loadl_pi(_mm_setzero_ps(), (__m64 *)p4); /* - - c12d c6d */
215 t1 = _mm_unpacklo_ps(t1, t2); /* c12b c12a c6b c6a */
216 t3 = _mm_unpacklo_ps(t3, t4); /* c12d c12c c6d c6c */
218 *c6 = _mm256_castps128_ps256(_mm_shuffle_ps(t1, t3, _MM_SHUFFLE(1, 0, 1, 0)));
219 *c12 = _mm256_castps128_ps256(_mm_shuffle_ps(t1, t3, _MM_SHUFFLE(3, 2, 3, 2)));
222 static gmx_inline void gmx_simdcall
223 gmx_mm256_load_8pair_swizzle_ps(const float * gmx_restrict p1, const float * gmx_restrict p2,
224 const float * gmx_restrict p3, const float * gmx_restrict p4,
225 const float * gmx_restrict p5, const float * gmx_restrict p6,
226 const float * gmx_restrict p7, const float * gmx_restrict p8,
227 __m256 * gmx_restrict c6, __m256 * gmx_restrict c12)
229 __m256 c6l, c6h, c12l, c12h;
231 gmx_mm256_load_4pair_swizzle_ps(p1, p2, p3, p4, &c6l, &c12l);
232 gmx_mm256_load_4pair_swizzle_ps(p5, p6, p7, p8, &c6h, &c12h);
234 *c6 = _mm256_permute2f128_ps(c6l, c6h, 0x20);
235 *c12 = _mm256_permute2f128_ps(c12l, c12h, 0x20);
239 static gmx_inline void gmx_simdcall
240 gmx_mm256_load_shift_and_1rvec_broadcast_ps(const float * gmx_restrict xyz_shift,
241 const float * gmx_restrict xyz,
242 __m256 * gmx_restrict x1,
243 __m256 * gmx_restrict y1,
244 __m256 * gmx_restrict z1)
246 __m128 t1, t2, t3, t4;
248 t1 = _mm_loadl_pi(_mm_setzero_ps(), (__m64 *)xyz_shift);
249 t2 = _mm_loadl_pi(_mm_setzero_ps(), (__m64 *)xyz);
250 t3 = _mm_load_ss(xyz_shift+2);
251 t4 = _mm_load_ss(xyz+2);
252 t1 = _mm_add_ps(t1, t2);
253 t3 = _mm_add_ss(t3, t4);
255 t2 = _mm_permute_ps(t1, _MM_SHUFFLE(1, 1, 1, 1));
256 t1 = _mm_permute_ps(t1, _MM_SHUFFLE(0, 0, 0, 0));
257 t3 = _mm_permute_ps(t3, _MM_SHUFFLE(0, 0, 0, 0));
259 *x1 = gmx_mm256_set_m128(t1, t1);
260 *y1 = gmx_mm256_set_m128(t2, t2);
261 *z1 = gmx_mm256_set_m128(t3, t3);
265 static gmx_inline void gmx_simdcall
266 gmx_mm256_load_shift_and_3rvec_broadcast_ps(const float * gmx_restrict xyz_shift,
267 const float * gmx_restrict xyz,
268 __m256 * gmx_restrict x1, __m256 * gmx_restrict y1, __m256 * gmx_restrict z1,
269 __m256 * gmx_restrict x2, __m256 * gmx_restrict y2, __m256 * gmx_restrict z2,
270 __m256 * gmx_restrict x3, __m256 * gmx_restrict y3, __m256 * gmx_restrict z3)
273 __m128 t1, t2, t3, t4, t5, t6, t7, t8, t9;
275 tA = _mm_loadl_pi(_mm_setzero_ps(), (__m64 *)xyz_shift);
276 tB = _mm_load_ss(xyz_shift+2);
278 t1 = _mm_loadu_ps(xyz);
279 t2 = _mm_loadu_ps(xyz+4);
280 t3 = _mm_load_ss(xyz+8);
282 tA = _mm_movelh_ps(tA, tB);
283 t4 = _mm_permute_ps(tA, _MM_SHUFFLE(0, 2, 1, 0));
284 t5 = _mm_permute_ps(tA, _MM_SHUFFLE(1, 0, 2, 1));
285 t6 = _mm_permute_ps(tA, _MM_SHUFFLE(2, 1, 0, 2));
287 t1 = _mm_add_ps(t1, t4);
288 t2 = _mm_add_ps(t2, t5);
289 t3 = _mm_add_ss(t3, t6);
291 t9 = _mm_permute_ps(t3, _MM_SHUFFLE(0, 0, 0, 0));
292 t8 = _mm_permute_ps(t2, _MM_SHUFFLE(3, 3, 3, 3));
293 t7 = _mm_permute_ps(t2, _MM_SHUFFLE(2, 2, 2, 2));
294 t6 = _mm_permute_ps(t2, _MM_SHUFFLE(1, 1, 1, 1));
295 t5 = _mm_permute_ps(t2, _MM_SHUFFLE(0, 0, 0, 0));
296 t4 = _mm_permute_ps(t1, _MM_SHUFFLE(3, 3, 3, 3));
297 t3 = _mm_permute_ps(t1, _MM_SHUFFLE(2, 2, 2, 2));
298 t2 = _mm_permute_ps(t1, _MM_SHUFFLE(1, 1, 1, 1));
299 t1 = _mm_permute_ps(t1, _MM_SHUFFLE(0, 0, 0, 0));
301 *x1 = gmx_mm256_set_m128(t1, t1);
302 *y1 = gmx_mm256_set_m128(t2, t2);
303 *z1 = gmx_mm256_set_m128(t3, t3);
304 *x2 = gmx_mm256_set_m128(t4, t4);
305 *y2 = gmx_mm256_set_m128(t5, t5);
306 *z2 = gmx_mm256_set_m128(t6, t6);
307 *x3 = gmx_mm256_set_m128(t7, t7);
308 *y3 = gmx_mm256_set_m128(t8, t8);
309 *z3 = gmx_mm256_set_m128(t9, t9);
313 static gmx_inline void gmx_simdcall
314 gmx_mm256_load_shift_and_4rvec_broadcast_ps(const float * gmx_restrict xyz_shift,
315 const float * gmx_restrict xyz,
316 __m256 * gmx_restrict x1, __m256 * gmx_restrict y1, __m256 * gmx_restrict z1,
317 __m256 * gmx_restrict x2, __m256 * gmx_restrict y2, __m256 * gmx_restrict z2,
318 __m256 * gmx_restrict x3, __m256 * gmx_restrict y3, __m256 * gmx_restrict z3,
319 __m256 * gmx_restrict x4, __m256 * gmx_restrict y4, __m256 * gmx_restrict z4)
322 __m128 t1, t2, t3, t4, t5, t6, t7, t8, t9, t10, t11, t12;
324 tA = _mm_loadl_pi(_mm_setzero_ps(), (__m64 *)xyz_shift);
325 tB = _mm_load_ss(xyz_shift+2);
327 t1 = _mm_loadu_ps(xyz);
328 t2 = _mm_loadu_ps(xyz+4);
329 t3 = _mm_loadu_ps(xyz+8);
331 tA = _mm_movelh_ps(tA, tB);
332 t4 = _mm_permute_ps(tA, _MM_SHUFFLE(0, 2, 1, 0));
333 t5 = _mm_permute_ps(tA, _MM_SHUFFLE(1, 0, 2, 1));
334 t6 = _mm_permute_ps(tA, _MM_SHUFFLE(2, 1, 0, 2));
336 t1 = _mm_add_ps(t1, t4);
337 t2 = _mm_add_ps(t2, t5);
338 t3 = _mm_add_ps(t3, t6);
340 t12 = _mm_permute_ps(t3, _MM_SHUFFLE(3, 3, 3, 3));
341 t11 = _mm_permute_ps(t3, _MM_SHUFFLE(2, 2, 2, 2));
342 t10 = _mm_permute_ps(t3, _MM_SHUFFLE(1, 1, 1, 1));
343 t9 = _mm_permute_ps(t3, _MM_SHUFFLE(0, 0, 0, 0));
344 t8 = _mm_permute_ps(t2, _MM_SHUFFLE(3, 3, 3, 3));
345 t7 = _mm_permute_ps(t2, _MM_SHUFFLE(2, 2, 2, 2));
346 t6 = _mm_permute_ps(t2, _MM_SHUFFLE(1, 1, 1, 1));
347 t5 = _mm_permute_ps(t2, _MM_SHUFFLE(0, 0, 0, 0));
348 t4 = _mm_permute_ps(t1, _MM_SHUFFLE(3, 3, 3, 3));
349 t3 = _mm_permute_ps(t1, _MM_SHUFFLE(2, 2, 2, 2));
350 t2 = _mm_permute_ps(t1, _MM_SHUFFLE(1, 1, 1, 1));
351 t1 = _mm_permute_ps(t1, _MM_SHUFFLE(0, 0, 0, 0));
353 *x1 = gmx_mm256_set_m128(t1, t1);
354 *y1 = gmx_mm256_set_m128(t2, t2);
355 *z1 = gmx_mm256_set_m128(t3, t3);
356 *x2 = gmx_mm256_set_m128(t4, t4);
357 *y2 = gmx_mm256_set_m128(t5, t5);
358 *z2 = gmx_mm256_set_m128(t6, t6);
359 *x3 = gmx_mm256_set_m128(t7, t7);
360 *y3 = gmx_mm256_set_m128(t8, t8);
361 *z3 = gmx_mm256_set_m128(t9, t9);
362 *x4 = gmx_mm256_set_m128(t10, t10);
363 *y4 = gmx_mm256_set_m128(t11, t11);
364 *z4 = gmx_mm256_set_m128(t12, t12);
369 static gmx_inline void gmx_simdcall
370 gmx_mm256_load_1rvec_4ptr_swizzle_ps(const float * gmx_restrict ptrA, const float * gmx_restrict ptrB,
371 const float * gmx_restrict ptrC, const float * gmx_restrict ptrD,
372 __m256 * gmx_restrict x1, __m256 * gmx_restrict y1, __m256 * gmx_restrict z1)
374 __m128 t1, t2, t3, t4;
375 __m128i mask = _mm_set_epi32(0, -1, -1, -1);
376 t1 = gmx_mm_maskload_ps(ptrA, mask);
377 t2 = gmx_mm_maskload_ps(ptrB, mask);
378 t3 = gmx_mm_maskload_ps(ptrC, mask);
379 t4 = gmx_mm_maskload_ps(ptrD, mask);
380 _MM_TRANSPOSE4_PS(t1, t2, t3, t4);
381 *x1 = _mm256_castps128_ps256(t1);
382 *y1 = _mm256_castps128_ps256(t2);
383 *z1 = _mm256_castps128_ps256(t3);
387 static gmx_inline void gmx_simdcall
388 gmx_mm256_load_3rvec_4ptr_swizzle_ps(const float * gmx_restrict ptrA, const float * gmx_restrict ptrB,
389 const float * gmx_restrict ptrC, const float * gmx_restrict ptrD,
390 __m256 * gmx_restrict x1, __m256 * gmx_restrict y1, __m256 * gmx_restrict z1,
391 __m256 * gmx_restrict x2, __m256 * gmx_restrict y2, __m256 * gmx_restrict z2,
392 __m256 * gmx_restrict x3, __m256 * gmx_restrict y3, __m256 * gmx_restrict z3)
394 __m128 t1, t2, t3, t4;
395 t1 = _mm_loadu_ps(ptrA);
396 t2 = _mm_loadu_ps(ptrB);
397 t3 = _mm_loadu_ps(ptrC);
398 t4 = _mm_loadu_ps(ptrD);
399 _MM_TRANSPOSE4_PS(t1, t2, t3, t4);
400 *x1 = _mm256_castps128_ps256(t1);
401 *y1 = _mm256_castps128_ps256(t2);
402 *z1 = _mm256_castps128_ps256(t3);
403 *x2 = _mm256_castps128_ps256(t4);
404 t1 = _mm_loadu_ps(ptrA+4);
405 t2 = _mm_loadu_ps(ptrB+4);
406 t3 = _mm_loadu_ps(ptrC+4);
407 t4 = _mm_loadu_ps(ptrD+4);
408 _MM_TRANSPOSE4_PS(t1, t2, t3, t4);
409 *y2 = _mm256_castps128_ps256(t1);
410 *z2 = _mm256_castps128_ps256(t2);
411 *x3 = _mm256_castps128_ps256(t3);
412 *y3 = _mm256_castps128_ps256(t4);
413 t1 = _mm_load_ss(ptrA+8);
414 t2 = _mm_load_ss(ptrB+8);
415 t3 = _mm_load_ss(ptrC+8);
416 t4 = _mm_load_ss(ptrD+8);
417 t1 = _mm_unpacklo_ps(t1, t3);
418 t3 = _mm_unpacklo_ps(t2, t4);
419 *z3 = _mm256_castps128_ps256(_mm_unpacklo_ps(t1, t3));
424 static gmx_inline void gmx_simdcall
425 gmx_mm256_load_4rvec_4ptr_swizzle_ps(const float * gmx_restrict ptrA, const float * gmx_restrict ptrB,
426 const float * gmx_restrict ptrC, const float * gmx_restrict ptrD,
427 __m256 * gmx_restrict x1, __m256 * gmx_restrict y1, __m256 * gmx_restrict z1,
428 __m256 * gmx_restrict x2, __m256 * gmx_restrict y2, __m256 * gmx_restrict z2,
429 __m256 * gmx_restrict x3, __m256 * gmx_restrict y3, __m256 * gmx_restrict z3,
430 __m256 * gmx_restrict x4, __m256 * gmx_restrict y4, __m256 * gmx_restrict z4)
432 __m128 t1, t2, t3, t4;
433 t1 = _mm_loadu_ps(ptrA);
434 t2 = _mm_loadu_ps(ptrB);
435 t3 = _mm_loadu_ps(ptrC);
436 t4 = _mm_loadu_ps(ptrD);
437 _MM_TRANSPOSE4_PS(t1, t2, t3, t4);
438 *x1 = _mm256_castps128_ps256(t1);
439 *y1 = _mm256_castps128_ps256(t2);
440 *z1 = _mm256_castps128_ps256(t3);
441 *x2 = _mm256_castps128_ps256(t4);
442 t1 = _mm_loadu_ps(ptrA+4);
443 t2 = _mm_loadu_ps(ptrB+4);
444 t3 = _mm_loadu_ps(ptrC+4);
445 t4 = _mm_loadu_ps(ptrD+4);
446 _MM_TRANSPOSE4_PS(t1, t2, t3, t4);
447 *y2 = _mm256_castps128_ps256(t1);
448 *z2 = _mm256_castps128_ps256(t2);
449 *x3 = _mm256_castps128_ps256(t3);
450 *y3 = _mm256_castps128_ps256(t4);
451 t1 = _mm_loadu_ps(ptrA+8);
452 t2 = _mm_loadu_ps(ptrB+8);
453 t3 = _mm_loadu_ps(ptrC+8);
454 t4 = _mm_loadu_ps(ptrD+8);
455 _MM_TRANSPOSE4_PS(t1, t2, t3, t4);
456 *z3 = _mm256_castps128_ps256(t1);
457 *x4 = _mm256_castps128_ps256(t2);
458 *y4 = _mm256_castps128_ps256(t3);
459 *z4 = _mm256_castps128_ps256(t4);
463 static gmx_inline void gmx_simdcall
464 gmx_mm256_load_1rvec_8ptr_swizzle_ps(const float * gmx_restrict ptrA, const float * gmx_restrict ptrB,
465 const float * gmx_restrict ptrC, const float * gmx_restrict ptrD,
466 const float * gmx_restrict ptrE, const float * gmx_restrict ptrF,
467 const float * gmx_restrict ptrG, const float * gmx_restrict ptrH,
468 __m256 * gmx_restrict x1, __m256 * gmx_restrict y1, __m256 * gmx_restrict z1)
470 __m256 t1, t2, t3, t4, t5, t6, t7, t8;
471 __m128i mask = _mm_set_epi32(0, -1, -1, -1);
473 t1 = gmx_mm256_set_m128(gmx_mm_maskload_ps(ptrE, mask), gmx_mm_maskload_ps(ptrA, mask)); /* - zE yE xE | - zA yA xA */
474 t2 = gmx_mm256_set_m128(gmx_mm_maskload_ps(ptrF, mask), gmx_mm_maskload_ps(ptrB, mask)); /* - zF yF xF | - zB yB xB */
475 t3 = gmx_mm256_set_m128(gmx_mm_maskload_ps(ptrG, mask), gmx_mm_maskload_ps(ptrC, mask)); /* - zG yG xG | - zC yC xC */
476 t4 = gmx_mm256_set_m128(gmx_mm_maskload_ps(ptrH, mask), gmx_mm_maskload_ps(ptrD, mask)); /* - zH yH xH | - zD yD xD */
478 t5 = _mm256_unpacklo_ps(t1, t2); /* yF yE xF xE | yB yA xB xA */
479 t6 = _mm256_unpacklo_ps(t3, t4); /* yH yG xH xG | yD yC xD xC */
480 t7 = _mm256_unpackhi_ps(t1, t2); /* - - zF zE | - - zB zA */
481 t8 = _mm256_unpackhi_ps(t3, t4); /* - - zH zG | - - zD zC */
483 *x1 = _mm256_shuffle_ps(t5, t6, _MM_SHUFFLE(1, 0, 1, 0));
484 *y1 = _mm256_shuffle_ps(t5, t6, _MM_SHUFFLE(3, 2, 3, 2));
485 *z1 = _mm256_shuffle_ps(t7, t8, _MM_SHUFFLE(1, 0, 1, 0));
489 static gmx_inline void gmx_simdcall
490 gmx_mm256_load_3rvec_8ptr_swizzle_ps(const float * gmx_restrict ptrA, const float * gmx_restrict ptrB,
491 const float * gmx_restrict ptrC, const float * gmx_restrict ptrD,
492 const float * gmx_restrict ptrE, const float * gmx_restrict ptrF,
493 const float * gmx_restrict ptrG, const float * gmx_restrict ptrH,
494 __m256 * gmx_restrict x1, __m256 * gmx_restrict y1, __m256 * gmx_restrict z1,
495 __m256 * gmx_restrict x2, __m256 * gmx_restrict y2, __m256 * gmx_restrict z2,
496 __m256 * gmx_restrict x3, __m256 * gmx_restrict y3, __m256 * gmx_restrict z3)
498 __m256 t1, t2, t3, t4, t5, t6, t7, t8, t9, t10, t11, t12;
500 t1 = _mm256_loadu_ps(ptrA); /* y3a x3a z2a y2a | x2a z1a y1a x1a */
501 t2 = _mm256_loadu_ps(ptrB); /* y3b x3b z2b y2b | x2b z1b y1b x1b */
502 t3 = _mm256_loadu_ps(ptrC); /* y3c x3c z2c y2c | x2c z1c y1c x1c */
503 t4 = _mm256_loadu_ps(ptrD); /* y3d x3d z2d y2d | x2d z1d y1d x1d */
504 t5 = _mm256_loadu_ps(ptrE); /* y3e x3e z2e y2e | x2e z1e y1e x1e */
505 t6 = _mm256_loadu_ps(ptrF); /* y3f x3f z2f y2f | x2f z1f y1f x1f */
506 t7 = _mm256_loadu_ps(ptrG); /* y3g x3g z2g y2g | x2g z1g y1g x1g */
507 t8 = _mm256_loadu_ps(ptrH); /* y3h x3h z2h y2h | x2h z1h y1h x1h */
509 t9 = _mm256_unpacklo_ps(t1, t2); /* z2b z2a y2b y2a | y1b y1a x1b x1a */
510 t10 = _mm256_unpackhi_ps(t1, t2); /* y3b y3a x3b x3a | x2b x2a z1b z1a */
511 t11 = _mm256_unpacklo_ps(t3, t4); /* z2d z2c y2d y2c | y1d y1c x1d x1c */
512 t12 = _mm256_unpackhi_ps(t3, t4); /* y3d y3c x3d x3c | x2d x2c z1d z1c */
513 t1 = _mm256_unpacklo_ps(t5, t6); /* z2f z2e y2f y2e | y1f y1e x1f x1e */
514 t2 = _mm256_unpackhi_ps(t5, t6); /* y3f y3e x3f x3e | x2f x2e z1f z1e */
515 t3 = _mm256_unpacklo_ps(t7, t8); /* z2h z2g y2h y2g | y1h y1g x1h x1g */
516 t4 = _mm256_unpackhi_ps(t7, t8); /* y3h y3g x3h x3g | x2h x2g z1h z1g */
518 t5 = _mm256_shuffle_ps(t9, t11, _MM_SHUFFLE(1, 0, 1, 0)); /* y2d y2c y2b y2a | x1d x1c x1b x1a */
519 t6 = _mm256_shuffle_ps(t9, t11, _MM_SHUFFLE(3, 2, 3, 2)); /* z2d z2c z2b z2a | y1d y1c y1b y1a */
520 t7 = _mm256_shuffle_ps(t10, t12, _MM_SHUFFLE(1, 0, 1, 0)); /* x3d x3c x3b x3a | z1d z1c z1b z1a */
521 t8 = _mm256_shuffle_ps(t10, t12, _MM_SHUFFLE(3, 2, 3, 2)); /* y3d y3c y3b y3a | x2d x2c x2b x2a */
523 t9 = _mm256_shuffle_ps(t1, t3, _MM_SHUFFLE(1, 0, 1, 0)); /* y2h y2g y2f y2e | x1h x1g x1f x1e */
524 t10 = _mm256_shuffle_ps(t1, t3, _MM_SHUFFLE(3, 2, 3, 2)); /* z2h z2g z2f z2e | y1h y1g y1f y1e */
525 t11 = _mm256_shuffle_ps(t2, t4, _MM_SHUFFLE(1, 0, 1, 0)); /* x3h x3g x3f x3e | z1h z1g z1f z1e */
526 t12 = _mm256_shuffle_ps(t2, t4, _MM_SHUFFLE(3, 2, 3, 2)); /* y3h y3g y3f y3e | x2h x2g x2f x2e */
528 *x1 = _mm256_permute2f128_ps(t5, t9, 0x20);
529 *y1 = _mm256_permute2f128_ps(t6, t10, 0x20);
530 *z1 = _mm256_permute2f128_ps(t7, t11, 0x20);
531 *x2 = _mm256_permute2f128_ps(t8, t12, 0x20);
533 *y2 = _mm256_permute2f128_ps(t5, t9, 0x31);
534 *z2 = _mm256_permute2f128_ps(t6, t10, 0x31);
535 *x3 = _mm256_permute2f128_ps(t7, t11, 0x31);
536 *y3 = _mm256_permute2f128_ps(t8, t12, 0x31);
538 t1 = gmx_mm256_set_m128(_mm_load_ss(ptrE+8), _mm_load_ss(ptrA+8));
539 t2 = gmx_mm256_set_m128(_mm_load_ss(ptrF+8), _mm_load_ss(ptrB+8));
540 t3 = gmx_mm256_set_m128(_mm_load_ss(ptrG+8), _mm_load_ss(ptrC+8));
541 t4 = gmx_mm256_set_m128(_mm_load_ss(ptrH+8), _mm_load_ss(ptrD+8));
543 t1 = _mm256_unpacklo_ps(t1, t3); /* - - z3g z3e | - - z3c z3a */
544 t2 = _mm256_unpacklo_ps(t2, t4); /* - - z3h z3f | - - z3d z3b */
546 *z3 = _mm256_unpacklo_ps(t1, t2);
551 static gmx_inline void gmx_simdcall
552 gmx_mm256_load_4rvec_8ptr_swizzle_ps(const float * gmx_restrict ptrA, const float * gmx_restrict ptrB,
553 const float * gmx_restrict ptrC, const float * gmx_restrict ptrD,
554 const float * gmx_restrict ptrE, const float * gmx_restrict ptrF,
555 const float * gmx_restrict ptrG, const float * gmx_restrict ptrH,
556 __m256 * gmx_restrict x1, __m256 * gmx_restrict y1, __m256 * gmx_restrict z1,
557 __m256 * gmx_restrict x2, __m256 * gmx_restrict y2, __m256 * gmx_restrict z2,
558 __m256 * gmx_restrict x3, __m256 * gmx_restrict y3, __m256 * gmx_restrict z3,
559 __m256 * gmx_restrict x4, __m256 * gmx_restrict y4, __m256 * gmx_restrict z4)
561 __m256 t1, t2, t3, t4, t5, t6, t7, t8, t9, t10, t11, t12;
563 t1 = _mm256_loadu_ps(ptrA); /* y3a x3a z2a y2a | x2a z1a y1a x1a */
564 t2 = _mm256_loadu_ps(ptrB); /* y3b x3b z2b y2b | x2b z1b y1b x1b */
565 t3 = _mm256_loadu_ps(ptrC); /* y3c x3c z2c y2c | x2c z1c y1c x1c */
566 t4 = _mm256_loadu_ps(ptrD); /* y3d x3d z2d y2d | x2d z1d y1d x1d */
567 t5 = _mm256_loadu_ps(ptrE); /* y3e x3e z2e y2e | x2e z1e y1e x1e */
568 t6 = _mm256_loadu_ps(ptrF); /* y3f x3f z2f y2f | x2f z1f y1f x1f */
569 t7 = _mm256_loadu_ps(ptrG); /* y3g x3g z2g y2g | x2g z1g y1g x1g */
570 t8 = _mm256_loadu_ps(ptrH); /* y3h x3h z2h y2h | x2h z1h y1h x1h */
572 t9 = _mm256_unpacklo_ps(t1, t2); /* z2b z2a y2b y2a | y1b y1a x1b x1a */
573 t10 = _mm256_unpackhi_ps(t1, t2); /* y3b y3a x3b x3a | x2b x2a z1b z1a */
574 t11 = _mm256_unpacklo_ps(t3, t4); /* z2d z2c y2d y2c | y1d y1c x1d x1c */
575 t12 = _mm256_unpackhi_ps(t3, t4); /* y3d y3c x3d x3c | x2d x2c z1d z1c */
576 t1 = _mm256_unpacklo_ps(t5, t6); /* z2f z2e y2f y2e | y1f y1e x1f x1e */
577 t2 = _mm256_unpackhi_ps(t5, t6); /* y3f y3e x3f x3e | x2f x2e z1f z1e */
578 t3 = _mm256_unpacklo_ps(t7, t8); /* z2h z2g y2h y2g | y1h y1g x1h x1g */
579 t4 = _mm256_unpackhi_ps(t7, t8); /* y3h y3g x3h x3g | x2h x2g z1h z1g */
581 t5 = _mm256_shuffle_ps(t9, t11, _MM_SHUFFLE(1, 0, 1, 0)); /* y2d y2c y2b y2a | x1d x1c x1b x1a */
582 t6 = _mm256_shuffle_ps(t9, t11, _MM_SHUFFLE(3, 2, 3, 2)); /* z2d z2c z2b z2a | y1d y1c y1b y1a */
583 t7 = _mm256_shuffle_ps(t10, t12, _MM_SHUFFLE(1, 0, 1, 0)); /* x3d x3c x3b x3a | z1d z1c z1b z1a */
584 t8 = _mm256_shuffle_ps(t10, t12, _MM_SHUFFLE(3, 2, 3, 2)); /* y3d y3c y3b y3a | x2d x2c x2b x2a */
585 t9 = _mm256_shuffle_ps(t1, t3, _MM_SHUFFLE(1, 0, 1, 0)); /* y2h y2g y2f y2e | x1h x1g x1f x1e */
586 t10 = _mm256_shuffle_ps(t1, t3, _MM_SHUFFLE(3, 2, 3, 2)); /* z2h z2g z2f z2e | y1h y1g y1f y1e */
587 t11 = _mm256_shuffle_ps(t2, t4, _MM_SHUFFLE(1, 0, 1, 0)); /* x3h x3g x3f x3e | z1h z1g z1f z1e */
588 t12 = _mm256_shuffle_ps(t2, t4, _MM_SHUFFLE(3, 2, 3, 2)); /* y3h y3g y3f y3e | x2h x2g x2f x2e */
590 *x1 = _mm256_permute2f128_ps(t5, t9, 0x20);
591 *y1 = _mm256_permute2f128_ps(t6, t10, 0x20);
592 *z1 = _mm256_permute2f128_ps(t7, t11, 0x20);
593 *x2 = _mm256_permute2f128_ps(t8, t12, 0x20);
595 *y2 = _mm256_permute2f128_ps(t5, t9, 0x31);
596 *z2 = _mm256_permute2f128_ps(t6, t10, 0x31);
597 *x3 = _mm256_permute2f128_ps(t7, t11, 0x31);
598 *y3 = _mm256_permute2f128_ps(t8, t12, 0x31);
600 t1 = gmx_mm256_set_m128(_mm_loadu_ps(ptrE+8), _mm_loadu_ps(ptrA+8)); /* z4e y4e x4e z3e | z4a y4a x4a z3a */
601 t2 = gmx_mm256_set_m128(_mm_loadu_ps(ptrF+8), _mm_loadu_ps(ptrB+8)); /* z4f y4f x4f z3f | z4b y4b x4b z3b */
602 t3 = gmx_mm256_set_m128(_mm_loadu_ps(ptrG+8), _mm_loadu_ps(ptrC+8)); /* z4g y4g x4g z3g | z4c y4c x4c z3c */
603 t4 = gmx_mm256_set_m128(_mm_loadu_ps(ptrH+8), _mm_loadu_ps(ptrD+8)); /* z4h y4h x4h z3h | z4d y4d x4d z3d */
605 t5 = _mm256_unpacklo_ps(t1, t2); /* x4f x4e z3f z3e | x4b x4a z3b z3a */
606 t6 = _mm256_unpackhi_ps(t1, t2); /* z4f z4e y4f y4e | z4b z4a y4b y4a */
607 t7 = _mm256_unpacklo_ps(t3, t4); /* x4h x4g z3h z3g | x4d x4c z3d z3c */
608 t8 = _mm256_unpackhi_ps(t3, t4); /* z4h z4g y4h y4g | z4d z4c y4d y4c */
610 *z3 = _mm256_shuffle_ps(t5, t7, _MM_SHUFFLE(1, 0, 1, 0)); /* z3h z3g z3f z3e | z3d z3c z3b z3a */
611 *x4 = _mm256_shuffle_ps(t5, t7, _MM_SHUFFLE(3, 2, 3, 2)); /* x4h x4g x4f x4e | x4d x4c x4b x4a */
612 *y4 = _mm256_shuffle_ps(t6, t8, _MM_SHUFFLE(1, 0, 1, 0)); /* y4h y4g y4f y4e | y4d y4c y4b y4a */
613 *z4 = _mm256_shuffle_ps(t6, t8, _MM_SHUFFLE(3, 2, 3, 2)); /* z4h z4g z4f z4e | z4d z4c z4b z4a */
617 static gmx_inline void gmx_simdcall
618 gmx_mm256_decrement_1rvec_4ptr_swizzle_ps(float * gmx_restrict ptrA, float * gmx_restrict ptrB,
619 float * gmx_restrict ptrC, float * gmx_restrict ptrD,
620 __m256 x1, __m256 y1, __m256 z1)
622 __m128 t1, t2, t3, t4, t5, t6, t7, t8;
625 /* Construct a mask without executing any data loads */
626 mask = _mm_blend_epi16(_mm_setzero_si128(), _mm_cmpeq_epi16(_mm_setzero_si128(), _mm_setzero_si128()), 0x3F);
628 t3 = _mm_unpacklo_ps(_mm256_castps256_ps128(x1), _mm256_castps256_ps128(y1)); /* y1b x1b y1a x1a */
629 t4 = _mm_unpackhi_ps(_mm256_castps256_ps128(x1), _mm256_castps256_ps128(y1)); /* y1d x1d y1c x1c */
631 t1 = _mm_shuffle_ps(t3, _mm256_castps256_ps128(z1), _MM_SHUFFLE(0, 0, 1, 0)); /* - z1a y1a x1a */
632 t2 = _mm_shuffle_ps(t3, _mm256_castps256_ps128(z1), _MM_SHUFFLE(0, 1, 3, 2)); /* - z1b y1b x1b */
633 t3 = _mm_shuffle_ps(t4, _mm256_castps256_ps128(z1), _MM_SHUFFLE(0, 2, 1, 0)); /* - z1c y1c x1c */
634 t4 = _mm_shuffle_ps(t4, _mm256_castps256_ps128(z1), _MM_SHUFFLE(0, 3, 3, 2)); /* - z1d y1d x1d */
636 t5 = gmx_mm_maskload_ps(ptrA, mask);
637 t6 = gmx_mm_maskload_ps(ptrB, mask);
638 t7 = gmx_mm_maskload_ps(ptrC, mask);
639 t8 = gmx_mm_maskload_ps(ptrD, mask);
641 t5 = _mm_sub_ps(t5, t1);
642 t6 = _mm_sub_ps(t6, t2);
643 t7 = _mm_sub_ps(t7, t3);
644 t8 = _mm_sub_ps(t8, t4);
646 gmx_mm_maskstore_ps(ptrA, mask, t5);
647 gmx_mm_maskstore_ps(ptrB, mask, t6);
648 gmx_mm_maskstore_ps(ptrC, mask, t7);
649 gmx_mm_maskstore_ps(ptrD, mask, t8);
653 static gmx_inline void gmx_simdcall
654 gmx_mm256_decrement_3rvec_4ptr_swizzle_ps(float * gmx_restrict ptrA, float * gmx_restrict ptrB,
655 float * gmx_restrict ptrC, float * gmx_restrict ptrD,
656 __m256 x1, __m256 y1, __m256 z1,
657 __m256 x2, __m256 y2, __m256 z2,
658 __m256 x3, __m256 y3, __m256 z3)
660 __m256 t1, t2, t3, t4, t5, t6;
661 __m128 tA, tB, tC, tD;
663 t1 = _mm256_loadu_ps(ptrA);
664 t2 = _mm256_loadu_ps(ptrB);
665 t3 = _mm256_loadu_ps(ptrC);
666 t4 = _mm256_loadu_ps(ptrD);
667 tA = _mm_load_ss(ptrA+8);
668 tB = _mm_load_ss(ptrB+8);
669 tC = _mm_load_ss(ptrC+8);
670 tD = _mm_load_ss(ptrD+8);
672 t5 = _mm256_unpacklo_ps(x1, y1); /* - - - - | y1b x1b y1a x1a */
673 x1 = _mm256_unpackhi_ps(x1, y1); /* - - - - | y1d x1d y1c x1c */
674 y1 = _mm256_unpacklo_ps(z1, x2); /* - - - - | x2b z1b x2a z1a */
675 z1 = _mm256_unpackhi_ps(z1, x2); /* - - - - | x2d z1d x2c z1c */
677 x2 = _mm256_unpacklo_ps(y2, z2); /* - - - - | z2b y2b z2a y2a */
678 y2 = _mm256_unpackhi_ps(y2, z2); /* - - - - | z2d y2d z2c y2c */
679 t6 = _mm256_unpacklo_ps(x3, y3); /* - - - - | y3b x3b y3a x3a */
680 x3 = _mm256_unpackhi_ps(x3, y3); /* - - - - | y3d x3d y3c x3c */
682 t5 = _mm256_insertf128_ps(t5, _mm256_castps256_ps128(x2), 0x1); /* z2b y2b z2a y2a | y1b x1b y1a x1a */
683 x1 = _mm256_insertf128_ps(x1, _mm256_castps256_ps128(y2), 0x1); /* z2d y2d z2c y2c | y1d x1d y1c x1c */
685 y1 = _mm256_insertf128_ps(y1, _mm256_castps256_ps128(t6), 0x1); /* y3b x3b y3a x3a | x2b z1b x2a z1a */
686 z1 = _mm256_insertf128_ps(z1, _mm256_castps256_ps128(x3), 0x1); /* y3d x3d y3c x3c | x2d z1d x2c z1c */
688 z2 = _mm256_shuffle_ps(t5, y1, _MM_SHUFFLE(1, 0, 1, 0)); /* y3a x3a z2a y2a | x2a z1a y1a x1a */
689 t5 = _mm256_shuffle_ps(t5, y1, _MM_SHUFFLE(3, 2, 3, 2)); /* y3b x3b z2b y2b | x2b z1b y1b x1b */
690 y1 = _mm256_shuffle_ps(x1, z1, _MM_SHUFFLE(1, 0, 1, 0)); /* y3c x3c z2c y2c | x2c z1c y1c x1c */
691 x1 = _mm256_shuffle_ps(x1, z1, _MM_SHUFFLE(3, 2, 3, 2)); /* y3d x3d z2d y2d | x2d z1d y1d x1d */
693 t1 = _mm256_sub_ps(t1, z2);
694 t2 = _mm256_sub_ps(t2, t5);
695 t3 = _mm256_sub_ps(t3, y1);
696 t4 = _mm256_sub_ps(t4, x1);
698 tA = _mm_sub_ss(tA, _mm256_castps256_ps128(z3));
699 tB = _mm_sub_ss(tB, _mm_permute_ps(_mm256_castps256_ps128(z3), _MM_SHUFFLE(1, 1, 1, 1)));
700 tC = _mm_sub_ss(tC, _mm_permute_ps(_mm256_castps256_ps128(z3), _MM_SHUFFLE(2, 2, 2, 2)));
701 tD = _mm_sub_ss(tD, _mm_permute_ps(_mm256_castps256_ps128(z3), _MM_SHUFFLE(3, 3, 3, 3)));
703 /* Here we store a full 256-bit value and a separate 32-bit one; no overlap can happen */
704 _mm256_storeu_ps(ptrA, t1);
705 _mm256_storeu_ps(ptrB, t2);
706 _mm256_storeu_ps(ptrC, t3);
707 _mm256_storeu_ps(ptrD, t4);
708 _mm_store_ss(ptrA+8, tA);
709 _mm_store_ss(ptrB+8, tB);
710 _mm_store_ss(ptrC+8, tC);
711 _mm_store_ss(ptrD+8, tD);
715 static gmx_inline void gmx_simdcall
716 gmx_mm256_decrement_4rvec_4ptr_swizzle_ps(float * gmx_restrict ptrA, float * gmx_restrict ptrB,
717 float * gmx_restrict ptrC, float * gmx_restrict ptrD,
718 __m256 x1, __m256 y1, __m256 z1,
719 __m256 x2, __m256 y2, __m256 z2,
720 __m256 x3, __m256 y3, __m256 z3,
721 __m256 x4, __m256 y4, __m256 z4)
723 __m256 t1, t2, t3, t4, t5;
724 __m128 tA, tB, tC, tD, tE, tF, tG, tH;
726 t1 = _mm256_loadu_ps(ptrA);
727 t2 = _mm256_loadu_ps(ptrB);
728 t3 = _mm256_loadu_ps(ptrC);
729 t4 = _mm256_loadu_ps(ptrD);
730 tA = _mm_loadu_ps(ptrA+8);
731 tB = _mm_loadu_ps(ptrB+8);
732 tC = _mm_loadu_ps(ptrC+8);
733 tD = _mm_loadu_ps(ptrD+8);
735 t5 = _mm256_unpacklo_ps(x1, y1); /* - - - - | y1b x1b y1a x1a */
736 x1 = _mm256_unpackhi_ps(x1, y1); /* - - - - | y1d x1d y1c x1c */
737 y1 = _mm256_unpacklo_ps(z1, x2); /* - - - - | x2b z1b x2a z1a */
738 z1 = _mm256_unpackhi_ps(z1, x2); /* - - - - | x2d z1d x2c z1c */
740 x2 = _mm256_unpacklo_ps(y2, z2); /* - - - - | z2b y2b z2a y2a */
741 y2 = _mm256_unpackhi_ps(y2, z2); /* - - - - | z2d y2d z2c y2c */
742 z2 = _mm256_unpacklo_ps(x3, y3); /* - - - - | y3b x3b y3a x3a */
743 x3 = _mm256_unpackhi_ps(x3, y3); /* - - - - | y3d x3d y3c x3c */
745 y3 = _mm256_unpacklo_ps(z3, x4); /* - - - - | x4b z3b x4a z3a */
746 z3 = _mm256_unpackhi_ps(z3, x4); /* - - - - | x4d z3d x4c z3c */
747 x4 = _mm256_unpacklo_ps(y4, z4); /* - - - - | z4b y4b z4a y4a */
748 y4 = _mm256_unpackhi_ps(y4, z4); /* - - - - | z4d y4d z4c y4c */
750 x2 = _mm256_insertf128_ps(t5, _mm256_castps256_ps128(x2), 0x1); /* z2b y2b z2a y2a | y1b x1b y1a x1a */
751 x1 = _mm256_insertf128_ps(x1, _mm256_castps256_ps128(y2), 0x1); /* z2d y2d z2c y2c | y1d x1d y1c x1c */
752 y1 = _mm256_insertf128_ps(y1, _mm256_castps256_ps128(z2), 0x1); /* y3b x3b y3a x3a | x2b z1b x2a z1a */
753 z1 = _mm256_insertf128_ps(z1, _mm256_castps256_ps128(x3), 0x1); /* y3d x3d y3c x3c | x2d z1d x2c z1c */
755 z2 = _mm256_shuffle_ps(x2, y1, _MM_SHUFFLE(1, 0, 1, 0)); /* y3a x3a z2a y2a | x2a z1a y1a x1a */
756 t5 = _mm256_shuffle_ps(x2, y1, _MM_SHUFFLE(3, 2, 3, 2)); /* y3b x3b z2b y2b | x2b z1b y1b x1b */
757 y1 = _mm256_shuffle_ps(x1, z1, _MM_SHUFFLE(1, 0, 1, 0)); /* y3c x3c z2c y2c | x2c z1c y1c x1c */
758 x1 = _mm256_shuffle_ps(x1, z1, _MM_SHUFFLE(3, 2, 3, 2)); /* y3d x3d z2d y2d | x2d z1d y1d x1d */
760 tE = _mm_shuffle_ps(_mm256_castps256_ps128(y3), _mm256_castps256_ps128(x4), _MM_SHUFFLE(1, 0, 1, 0)); /* z4a y4a x4a z3a */
761 tF = _mm_shuffle_ps(_mm256_castps256_ps128(y3), _mm256_castps256_ps128(x4), _MM_SHUFFLE(3, 2, 3, 2)); /* z4b y4b x4b z3b */
763 tG = _mm_shuffle_ps(_mm256_castps256_ps128(z3), _mm256_castps256_ps128(y4), _MM_SHUFFLE(1, 0, 1, 0)); /* z4c y4c x4c z3c */
764 tH = _mm_shuffle_ps(_mm256_castps256_ps128(z3), _mm256_castps256_ps128(y4), _MM_SHUFFLE(3, 2, 3, 2)); /* z4d y4d x4d z3d */
766 t1 = _mm256_sub_ps(t1, z2);
767 t2 = _mm256_sub_ps(t2, t5);
768 t3 = _mm256_sub_ps(t3, y1);
769 t4 = _mm256_sub_ps(t4, x1);
771 tA = _mm_sub_ps(tA, tE);
772 tB = _mm_sub_ps(tB, tF);
773 tC = _mm_sub_ps(tC, tG);
774 tD = _mm_sub_ps(tD, tH);
776 /* Here we store a full 256-bit value and a separate 128-bit one; no overlap can happen */
777 _mm256_storeu_ps(ptrA, t1);
778 _mm256_storeu_ps(ptrB, t2);
779 _mm256_storeu_ps(ptrC, t3);
780 _mm256_storeu_ps(ptrD, t4);
781 _mm_storeu_ps(ptrA+8, tA);
782 _mm_storeu_ps(ptrB+8, tB);
783 _mm_storeu_ps(ptrC+8, tC);
784 _mm_storeu_ps(ptrD+8, tD);
788 static gmx_inline void gmx_simdcall
789 gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(float * gmx_restrict ptrA, float * gmx_restrict ptrB,
790 float * gmx_restrict ptrC, float * gmx_restrict ptrD,
791 float * gmx_restrict ptrE, float * gmx_restrict ptrF,
792 float * gmx_restrict ptrG, float * gmx_restrict ptrH,
793 __m256 x1, __m256 y1, __m256 z1)
795 __m256 t1, t2, t3, t4, t5, t6;
796 __m256 tA, tB, tC, tD;
799 /* Construct a mask without executing any data loads */
800 mask = _mm_blend_epi16(_mm_setzero_si128(), _mm_cmpeq_epi16(_mm_setzero_si128(), _mm_setzero_si128()), 0x3F);
802 tA = gmx_mm256_set_m128(gmx_mm_maskload_ps(ptrE, mask), gmx_mm_maskload_ps(ptrA, mask));
803 tB = gmx_mm256_set_m128(gmx_mm_maskload_ps(ptrF, mask), gmx_mm_maskload_ps(ptrB, mask));
804 tC = gmx_mm256_set_m128(gmx_mm_maskload_ps(ptrG, mask), gmx_mm_maskload_ps(ptrC, mask));
805 tD = gmx_mm256_set_m128(gmx_mm_maskload_ps(ptrH, mask), gmx_mm_maskload_ps(ptrD, mask));
806 t1 = _mm256_unpacklo_ps(x1, y1); /* y1f x1f y1e x1e | y1b x1b y1a x1a */
807 t2 = _mm256_unpackhi_ps(x1, y1); /* y1h x1h y1g x1g | y1d x1d y1c x1c */
809 t3 = _mm256_shuffle_ps(t1, z1, _MM_SHUFFLE(0, 0, 1, 0)); /* - z1e y1e x1e | - z1a y1a x1a */
810 t4 = _mm256_shuffle_ps(t1, z1, _MM_SHUFFLE(0, 1, 3, 2)); /* - z1f y1f x1f | - z1b y1b x1b */
811 t5 = _mm256_shuffle_ps(t2, z1, _MM_SHUFFLE(0, 2, 1, 0)); /* - z1g y1g x1g | - z1c y1c x1c */
812 t6 = _mm256_shuffle_ps(t2, z1, _MM_SHUFFLE(0, 3, 3, 2)); /* - z1h y1h x1h | - z1d y1d x1d */
814 tA = _mm256_sub_ps(tA, t3);
815 tB = _mm256_sub_ps(tB, t4);
816 tC = _mm256_sub_ps(tC, t5);
817 tD = _mm256_sub_ps(tD, t6);
819 gmx_mm_maskstore_ps(ptrA, mask, _mm256_castps256_ps128(tA));
820 gmx_mm_maskstore_ps(ptrB, mask, _mm256_castps256_ps128(tB));
821 gmx_mm_maskstore_ps(ptrC, mask, _mm256_castps256_ps128(tC));
822 gmx_mm_maskstore_ps(ptrD, mask, _mm256_castps256_ps128(tD));
823 gmx_mm_maskstore_ps(ptrE, mask, _mm256_extractf128_ps(tA, 0x1));
824 gmx_mm_maskstore_ps(ptrF, mask, _mm256_extractf128_ps(tB, 0x1));
825 gmx_mm_maskstore_ps(ptrG, mask, _mm256_extractf128_ps(tC, 0x1));
826 gmx_mm_maskstore_ps(ptrH, mask, _mm256_extractf128_ps(tD, 0x1));
831 static gmx_inline void gmx_simdcall
832 gmx_mm256_decrement_3rvec_8ptr_swizzle_ps(float * gmx_restrict ptrA, float * gmx_restrict ptrB,
833 float * gmx_restrict ptrC, float * gmx_restrict ptrD,
834 float * gmx_restrict ptrE, float * gmx_restrict ptrF,
835 float * gmx_restrict ptrG, float * gmx_restrict ptrH,
836 __m256 x1, __m256 y1, __m256 z1,
837 __m256 x2, __m256 y2, __m256 z2,
838 __m256 x3, __m256 y3, __m256 z3)
840 __m256 t1, t2, t3, t4, t5, t6, t7, t8, t9, t10, t11, t12;
841 __m256 tA, tB, tC, tD, tE, tF, tG, tH;
842 __m256 tI, tJ, tK, tL;
844 tA = _mm256_loadu_ps(ptrA);
845 tB = _mm256_loadu_ps(ptrB);
846 tC = _mm256_loadu_ps(ptrC);
847 tD = _mm256_loadu_ps(ptrD);
848 tE = _mm256_loadu_ps(ptrE);
849 tF = _mm256_loadu_ps(ptrF);
850 tG = _mm256_loadu_ps(ptrG);
851 tH = _mm256_loadu_ps(ptrH);
853 t1 = _mm256_unpacklo_ps(x1, y1); /* y1f x1f y1e x1e | y1b x1b y1a x1a */
854 t2 = _mm256_unpackhi_ps(x1, y1); /* y1h x1h y1g x1g | y1d x1d y1c x1c */
855 t3 = _mm256_unpacklo_ps(z1, x2); /* x2f z1f x2e z1e | x2b z1b x2a z1a */
856 t4 = _mm256_unpackhi_ps(z1, x2); /* x2h z1h x2g z1g | x2d z1d x2c z1c */
858 t5 = _mm256_unpacklo_ps(y2, z2); /* z2f y2f z2e y2e | z2b y2b z2a y2a */
859 t6 = _mm256_unpackhi_ps(y2, z2); /* z2h y2h z2g y2g | z2d y2d z2c y2c */
860 t7 = _mm256_unpacklo_ps(x3, y3); /* y3f x3f y3e x3e | y3b x3b y3a x3a */
861 t8 = _mm256_unpackhi_ps(x3, y3); /* y3h x3h y3g x3g | y3d x3d y3c x3c */
863 t9 = _mm256_shuffle_ps(t1, t3, _MM_SHUFFLE(1, 0, 1, 0)); /* x2e z1e y1e x1e | x2a z1a y1a x1a */
864 t10 = _mm256_shuffle_ps(t1, t3, _MM_SHUFFLE(3, 2, 3, 2)); /* x2f z1f y1f x1f | x2b z1b y1b x1b */
865 t11 = _mm256_shuffle_ps(t2, t4, _MM_SHUFFLE(1, 0, 1, 0)); /* x2g z1g y1g x1g | x2c z1c y1c x1c */
866 t12 = _mm256_shuffle_ps(t2, t4, _MM_SHUFFLE(3, 2, 3, 2)); /* x2h z1h y1h x1h | x2d z1d y1d x1d */
868 t1 = _mm256_shuffle_ps(t5, t7, _MM_SHUFFLE(1, 0, 1, 0)); /* y3e x3e z2e y2e | y3a x3a z2a y2a */
869 t2 = _mm256_shuffle_ps(t5, t7, _MM_SHUFFLE(3, 2, 3, 2)); /* y3f x3f z2f y2f | y3b x3b z2b y2b */
870 t3 = _mm256_shuffle_ps(t6, t8, _MM_SHUFFLE(1, 0, 1, 0)); /* y3g x3g z2g y2g | y3c x3c z2c y2c */
871 t4 = _mm256_shuffle_ps(t6, t8, _MM_SHUFFLE(3, 2, 3, 2)); /* y3h x3h z2h y2h | y3d x3d z2d y2d */
873 t5 = gmx_mm256_unpack128lo_ps(t9, t1); /* y3a x3a z2a y2a | x2a z1a y1a x1a */
874 t6 = gmx_mm256_unpack128hi_ps(t9, t1); /* y3e x3e z2e y2e | x2e z1e y1e x1e */
875 t7 = gmx_mm256_unpack128lo_ps(t10, t2); /* y3b x3b z2b y2b | x2b z1b y1b x1b */
876 t8 = gmx_mm256_unpack128hi_ps(t10, t2); /* y3f x3f z2f y2f | x2f z1f y1f x1f */
877 t1 = gmx_mm256_unpack128lo_ps(t11, t3); /* y3c x3c z2c y2c | x2c z1c y1c x1c */
878 t2 = gmx_mm256_unpack128hi_ps(t11, t3); /* y3g x3g z2g y2g | x2g z1g y1g x1g */
879 t9 = gmx_mm256_unpack128lo_ps(t12, t4); /* y3d x3d z2d y2d | x2d z1d y1d x1d */
880 t10 = gmx_mm256_unpack128hi_ps(t12, t4); /* y3h x3h z2h y2h | x2h z1h y1h x1h */
882 tA = _mm256_sub_ps(tA, t5);
883 tB = _mm256_sub_ps(tB, t7);
884 tC = _mm256_sub_ps(tC, t1);
885 tD = _mm256_sub_ps(tD, t9);
886 tE = _mm256_sub_ps(tE, t6);
887 tF = _mm256_sub_ps(tF, t8);
888 tG = _mm256_sub_ps(tG, t2);
889 tH = _mm256_sub_ps(tH, t10);
891 _mm256_storeu_ps(ptrA, tA);
892 _mm256_storeu_ps(ptrB, tB);
893 _mm256_storeu_ps(ptrC, tC);
894 _mm256_storeu_ps(ptrD, tD);
895 _mm256_storeu_ps(ptrE, tE);
896 _mm256_storeu_ps(ptrF, tF);
897 _mm256_storeu_ps(ptrG, tG);
898 _mm256_storeu_ps(ptrH, tH);
900 tI = gmx_mm256_set_m128(_mm_load_ss(ptrE+8), _mm_load_ss(ptrA+8));
901 tJ = gmx_mm256_set_m128(_mm_load_ss(ptrF+8), _mm_load_ss(ptrB+8));
902 tK = gmx_mm256_set_m128(_mm_load_ss(ptrG+8), _mm_load_ss(ptrC+8));
903 tL = gmx_mm256_set_m128(_mm_load_ss(ptrH+8), _mm_load_ss(ptrD+8));
905 tI = _mm256_unpacklo_ps(tI, tK); /* - - zG zE | - - zC zA */
906 tJ = _mm256_unpacklo_ps(tJ, tL); /* - - zH zF | - - zD zB */
907 tI = _mm256_unpacklo_ps(tI, tJ); /* zH zG zF zE | zD zC zB zA */
909 tI = _mm256_sub_ps(tI, z3);
910 tJ = _mm256_permute_ps(tI, _MM_SHUFFLE(1, 1, 1, 1));
911 tK = _mm256_permute_ps(tI, _MM_SHUFFLE(2, 2, 2, 2));
912 tL = _mm256_permute_ps(tI, _MM_SHUFFLE(3, 3, 3, 3));
914 _mm_store_ss(ptrA+8, _mm256_castps256_ps128(tI));
915 _mm_store_ss(ptrB+8, _mm256_castps256_ps128(tJ));
916 _mm_store_ss(ptrC+8, _mm256_castps256_ps128(tK));
917 _mm_store_ss(ptrD+8, _mm256_castps256_ps128(tL));
918 _mm_store_ss(ptrE+8, _mm256_extractf128_ps(tI, 0x1));
919 _mm_store_ss(ptrF+8, _mm256_extractf128_ps(tJ, 0x1));
920 _mm_store_ss(ptrG+8, _mm256_extractf128_ps(tK, 0x1));
921 _mm_store_ss(ptrH+8, _mm256_extractf128_ps(tL, 0x1));
925 static gmx_inline void gmx_simdcall
926 gmx_mm256_decrement_4rvec_8ptr_swizzle_ps(float * gmx_restrict ptrA, float * gmx_restrict ptrB,
927 float * gmx_restrict ptrC, float * gmx_restrict ptrD,
928 float * gmx_restrict ptrE, float * gmx_restrict ptrF,
929 float * gmx_restrict ptrG, float * gmx_restrict ptrH,
930 __m256 x1, __m256 y1, __m256 z1,
931 __m256 x2, __m256 y2, __m256 z2,
932 __m256 x3, __m256 y3, __m256 z3,
933 __m256 x4, __m256 y4, __m256 z4)
935 __m256 t1, t2, t3, t4, t5, t6, t7, t8, t9, t10, t11, t12;
936 __m256 tA, tB, tC, tD, tE, tF, tG, tH;
937 __m256 tI, tJ, tK, tL;
939 tA = _mm256_loadu_ps(ptrA);
940 tB = _mm256_loadu_ps(ptrB);
941 tC = _mm256_loadu_ps(ptrC);
942 tD = _mm256_loadu_ps(ptrD);
943 tE = _mm256_loadu_ps(ptrE);
944 tF = _mm256_loadu_ps(ptrF);
945 tG = _mm256_loadu_ps(ptrG);
946 tH = _mm256_loadu_ps(ptrH);
948 t1 = _mm256_unpacklo_ps(x1, y1); /* y1f x1f y1e x1e | y1b x1b y1a x1a */
949 t2 = _mm256_unpackhi_ps(x1, y1); /* y1h x1h y1g x1g | y1d x1d y1c x1c */
950 t3 = _mm256_unpacklo_ps(z1, x2); /* x2f z1f x2e z1e | x2b z1b x2a z1a */
951 t4 = _mm256_unpackhi_ps(z1, x2); /* x2h z1h x2g z1g | x2d z1d x2c z1c */
953 t5 = _mm256_unpacklo_ps(y2, z2); /* z2f y2f z2e y2e | z2b y2b z2a y2a */
954 t6 = _mm256_unpackhi_ps(y2, z2); /* z2h y2h z2g y2g | z2d y2d z2c y2c */
955 t7 = _mm256_unpacklo_ps(x3, y3); /* y3f x3f y3e x3e | y3b x3b y3a x3a */
956 t8 = _mm256_unpackhi_ps(x3, y3); /* y3h x3h y3g x3g | y3d x3d y3c x3c */
958 t9 = _mm256_shuffle_ps(t1, t3, _MM_SHUFFLE(1, 0, 1, 0)); /* x2e z1e y1e x1e | x2a z1a y1a x1a */
959 t10 = _mm256_shuffle_ps(t1, t3, _MM_SHUFFLE(3, 2, 3, 2)); /* x2f z1f y1f x1f | x2b z1b y1b x1b */
960 t11 = _mm256_shuffle_ps(t2, t4, _MM_SHUFFLE(1, 0, 1, 0)); /* x2g z1g y1g x1g | x2c z1c y1c x1c */
961 t12 = _mm256_shuffle_ps(t2, t4, _MM_SHUFFLE(3, 2, 3, 2)); /* x2h z1h y1h x1h | x2d z1d y1d x1d */
963 t1 = _mm256_shuffle_ps(t5, t7, _MM_SHUFFLE(1, 0, 1, 0)); /* y3e x3e z2e y2e | y3a x3a z2a y2a */
964 t2 = _mm256_shuffle_ps(t5, t7, _MM_SHUFFLE(3, 2, 3, 2)); /* y3f x3f z2f y2f | y3b x3b z2b y2b */
965 t3 = _mm256_shuffle_ps(t6, t8, _MM_SHUFFLE(1, 0, 1, 0)); /* y3g x3g z2g y2g | y3c x3c z2c y2c */
966 t4 = _mm256_shuffle_ps(t6, t8, _MM_SHUFFLE(3, 2, 3, 2)); /* y3h x3h z2h y2h | y3d x3d z2d y2d */
968 t5 = gmx_mm256_unpack128lo_ps(t9, t1); /* y3a x3a z2a y2a | x2a z1a y1a x1a */
969 t6 = gmx_mm256_unpack128hi_ps(t9, t1); /* y3e x3e z2e y2e | x2e z1e y1e x1e */
970 t7 = gmx_mm256_unpack128lo_ps(t10, t2); /* y3b x3b z2b y2b | x2b z1b y1b x1b */
971 t8 = gmx_mm256_unpack128hi_ps(t10, t2); /* y3f x3f z2f y2f | x2f z1f y1f x1f */
972 t1 = gmx_mm256_unpack128lo_ps(t11, t3); /* y3c x3c z2c y2c | x2c z1c y1c x1c */
973 t2 = gmx_mm256_unpack128hi_ps(t11, t3); /* y3g x3g z2g y2g | x2g z1g y1g x1g */
974 t9 = gmx_mm256_unpack128lo_ps(t12, t4); /* y3d x3d z2d y2d | x2d z1d y1d x1d */
975 t10 = gmx_mm256_unpack128hi_ps(t12, t4); /* y3h x3h z2h y2h | x2h z1h y1h x1h */
977 tA = _mm256_sub_ps(tA, t5);
978 tB = _mm256_sub_ps(tB, t7);
979 tC = _mm256_sub_ps(tC, t1);
980 tD = _mm256_sub_ps(tD, t9);
981 tE = _mm256_sub_ps(tE, t6);
982 tF = _mm256_sub_ps(tF, t8);
983 tG = _mm256_sub_ps(tG, t2);
984 tH = _mm256_sub_ps(tH, t10);
986 _mm256_storeu_ps(ptrA, tA);
987 _mm256_storeu_ps(ptrB, tB);
988 _mm256_storeu_ps(ptrC, tC);
989 _mm256_storeu_ps(ptrD, tD);
990 _mm256_storeu_ps(ptrE, tE);
991 _mm256_storeu_ps(ptrF, tF);
992 _mm256_storeu_ps(ptrG, tG);
993 _mm256_storeu_ps(ptrH, tH);
995 tI = gmx_mm256_set_m128(_mm_loadu_ps(ptrE+8), _mm_loadu_ps(ptrA+8));
996 tJ = gmx_mm256_set_m128(_mm_loadu_ps(ptrF+8), _mm_loadu_ps(ptrB+8));
997 tK = gmx_mm256_set_m128(_mm_loadu_ps(ptrG+8), _mm_loadu_ps(ptrC+8));
998 tL = gmx_mm256_set_m128(_mm_loadu_ps(ptrH+8), _mm_loadu_ps(ptrD+8));
1000 t1 = _mm256_unpacklo_ps(z3, x4); /* x4f z3f x4e z3e | x4b z3b x4a z3a */
1001 t2 = _mm256_unpackhi_ps(z3, x4); /* x4h z3h x4g z3g | x4d z3d x4c z3c */
1002 t3 = _mm256_unpacklo_ps(y4, z4); /* z4f y4f z4e y4e | z4b y4b z4a y4a */
1003 t4 = _mm256_unpackhi_ps(y4, z4); /* z4h y4h z4g y4g | z4d y4d z4c y4c */
1005 t5 = _mm256_shuffle_ps(t1, t3, _MM_SHUFFLE(1, 0, 1, 0)); /* z4e y4e x4e z3e | z4a y4a x4a z3a */
1006 t6 = _mm256_shuffle_ps(t1, t3, _MM_SHUFFLE(3, 2, 3, 2)); /* z4f y4f x4f z3f | z4b y4b x4b z3b */
1007 t7 = _mm256_shuffle_ps(t2, t4, _MM_SHUFFLE(1, 0, 1, 0)); /* z4g y4g x4g z3g | z4c y4c x4c z3c */
1008 t8 = _mm256_shuffle_ps(t2, t4, _MM_SHUFFLE(3, 2, 3, 2)); /* z4h y4h x4h z3h | z4d y4d x4d z3d */
1010 tI = _mm256_sub_ps(tI, t5);
1011 tJ = _mm256_sub_ps(tJ, t6);
1012 tK = _mm256_sub_ps(tK, t7);
1013 tL = _mm256_sub_ps(tL, t8);
1015 _mm_storeu_ps(ptrA+8, _mm256_castps256_ps128(tI));
1016 _mm_storeu_ps(ptrB+8, _mm256_castps256_ps128(tJ));
1017 _mm_storeu_ps(ptrC+8, _mm256_castps256_ps128(tK));
1018 _mm_storeu_ps(ptrD+8, _mm256_castps256_ps128(tL));
1019 _mm_storeu_ps(ptrE+8, _mm256_extractf128_ps(tI, 0x1));
1020 _mm_storeu_ps(ptrF+8, _mm256_extractf128_ps(tJ, 0x1));
1021 _mm_storeu_ps(ptrG+8, _mm256_extractf128_ps(tK, 0x1));
1022 _mm_storeu_ps(ptrH+8, _mm256_extractf128_ps(tL, 0x1));
1026 static gmx_inline void gmx_simdcall
1027 gmx_mm256_update_iforce_1atom_swizzle_ps(__m256 fix1, __m256 fiy1, __m256 fiz1,
1028 float * gmx_restrict fptr,
1029 float * gmx_restrict fshiftptr)
1033 fix1 = _mm256_hadd_ps(fix1, fix1);
1034 fiy1 = _mm256_hadd_ps(fiy1, fiz1);
1035 fix1 = _mm256_hadd_ps(fix1, fiy1); /* fiz1 fiy1 fix1 fix1 (in both lanes) */
1037 /* Add across the two lanes */
1038 t1 = _mm_add_ps(_mm256_castps256_ps128(fix1), _mm256_extractf128_ps(fix1, 0x1));
1040 t2 = _mm_load_ss(fptr);
1041 t2 = _mm_loadh_pi(t2, (__m64 *)(fptr+1));
1042 t3 = _mm_load_ss(fshiftptr);
1043 t3 = _mm_loadh_pi(t3, (__m64 *)(fshiftptr+1));
1045 t2 = _mm_add_ps(t2, t1);
1046 t3 = _mm_add_ps(t3, t1);
1048 _mm_store_ss(fptr, t2);
1049 _mm_storeh_pi((__m64 *)(fptr+1), t2);
1050 _mm_store_ss(fshiftptr, t3);
1051 _mm_storeh_pi((__m64 *)(fshiftptr+1), t3);
1055 static gmx_inline void gmx_simdcall
1056 gmx_mm256_update_iforce_3atom_swizzle_ps(__m256 fix1, __m256 fiy1, __m256 fiz1,
1057 __m256 fix2, __m256 fiy2, __m256 fiz2,
1058 __m256 fix3, __m256 fiy3, __m256 fiz3,
1059 float * gmx_restrict fptr,
1060 float * gmx_restrict fshiftptr)
1065 fix1 = _mm256_hadd_ps(fix1, fiy1); /* Y1g+Y1h Y1e+Y1f X1g+X1h X1e+X1f | Y1c+Y1d Y1a+Y1b X1c+X1d X1a+X1b */
1066 fiz1 = _mm256_hadd_ps(fiz1, fix2); /* X2g+X2h X2e+X2f Z1g+Z1h Z1e+Z1f | X2c+X2d X2a+X2b Z1c+Z1d Z1a+Z1b */
1067 fiy2 = _mm256_hadd_ps(fiy2, fiz2); /* Z2g+Z2h Z2e+Z2f Y2g+Y2h Y2e+Y2f | Z2c+Z2d Z2a+Z2b Y2c+Y2d Y2a+Y2b */
1068 fix3 = _mm256_hadd_ps(fix3, fiy3); /* Y3g+Y3h Y3e+Y3f X3g+X3h X3e+X3f | Y3c+Y3d Y3a+Y3b X3c+X3d X3a+X3b */
1069 fiz3 = _mm256_hadd_ps(fiz3, _mm256_setzero_ps()); /* 0 0 Z3g+Z3h Z3e+Z3f | 0 0 Z3c+Z3d Z3a+Z3b */
1071 fix1 = _mm256_hadd_ps(fix1, fiz1); /* X2e-h Z1e-h Y1e-h X1e-h | X2a-d Z1a-d Y1a-d X1a-d */
1072 fiy2 = _mm256_hadd_ps(fiy2, fix3); /* Y3e-h X3e-h Z2e-h Y2e-h | Y3a-d X3a-d Z2a-d Y2a-d */
1073 fiz3 = _mm256_hadd_ps(fiz3, _mm256_setzero_ps()); /* 0 0 0 Z3e-h | 0 0 0 Z3a-d */
1075 /* Add across the two lanes by swapping and adding back */
1076 t1 = gmx_mm256_unpack128lo_ps(fix1, fiy2); /* Y3a-d X3a-d Z2a-d Y2a-d | X2a-d Z1a-d Y1a-d X1a-d */
1077 t2 = gmx_mm256_unpack128hi_ps(fix1, fiy2); /* Y3e-h X3e-h Z2e-h Y2e-h | X2e-h Z1e-h Y1e-h X1e-h */
1078 t1 = _mm256_add_ps(t1, t2); /* y3 x3 z2 y2 | x2 z1 y1 x1 */
1080 tA = _mm_add_ps(_mm256_castps256_ps128(fiz3), _mm256_extractf128_ps(fiz3, 0x1)); /* 0 0 0 z3 */
1082 t3 = _mm256_loadu_ps(fptr);
1083 t3 = _mm256_add_ps(t3, t1);
1084 _mm256_storeu_ps(fptr, t3);
1085 tB = _mm_load_ss(fptr+8);
1086 tB = _mm_add_ss(tB, tA);
1087 _mm_store_ss(fptr+8, tB);
1089 /* Add up shift force */
1090 tB = _mm256_extractf128_ps(t1, 0x1); /* y3 x3 z2 y2 */
1091 tC = _mm_shuffle_ps(_mm256_castps256_ps128(t1), tB, _MM_SHUFFLE(1, 0, 3, 3)); /* z2 y2 x2 x2 */
1092 tB = _mm_shuffle_ps(tB, tA, _MM_SHUFFLE(1, 0, 3, 2)); /* 0 z3 y3 x3 */
1093 tC = _mm_permute_ps(tC, _MM_SHUFFLE(3, 3, 2, 0)); /* - z2 y2 x2 */
1095 tB = _mm_add_ps(tB, _mm256_castps256_ps128(t1));
1096 tA = _mm_add_ps(tB, tC); /* - z y x */
1098 tA = _mm_blend_ps(_mm_setzero_ps(), tA, 0x7); /* 0 z y x */
1100 tC = _mm_loadu_ps(fshiftptr);
1101 tC = _mm_add_ps(tC, tA);
1102 _mm_storeu_ps(fshiftptr, tC);
1106 static gmx_inline void gmx_simdcall
1107 gmx_mm256_update_iforce_4atom_swizzle_ps(__m256 fix1, __m256 fiy1, __m256 fiz1,
1108 __m256 fix2, __m256 fiy2, __m256 fiz2,
1109 __m256 fix3, __m256 fiy3, __m256 fiz3,
1110 __m256 fix4, __m256 fiy4, __m256 fiz4,
1111 float * gmx_restrict fptr,
1112 float * gmx_restrict fshiftptr)
1117 fix1 = _mm256_hadd_ps(fix1, fiy1); /* Y1g+Y1h Y1e+Y1f X1g+X1h X1e+X1f | Y1c+Y1d Y1a+Y1b X1c+X1d X1a+X1b */
1118 fiz1 = _mm256_hadd_ps(fiz1, fix2); /* X2g+X2h X2e+X2f Z1g+Z1h Z1e+Z1f | X2c+X2d X2a+X2b Z1c+Z1d Z1a+Z1b */
1119 fiy2 = _mm256_hadd_ps(fiy2, fiz2); /* Z2g+Z2h Z2e+Z2f Y2g+Y2h Y2e+Y2f | Z2c+Z2d Z2a+Z2b Y2c+Y2d Y2a+Y2b */
1120 fix3 = _mm256_hadd_ps(fix3, fiy3); /* Y3g+Y3h Y3e+Y3f X3g+X3h X3e+X3f | Y3c+Y3d Y3a+Y3b X3c+X3d X3a+X3b */
1121 fiz3 = _mm256_hadd_ps(fiz3, fix4); /* X4g+X4h X4e+X4f Z3g+Z3h Z3e+Z3f | X4c+X4d X4a+X4b Z3c+Z3d Z3a+Z3b */
1122 fiy4 = _mm256_hadd_ps(fiy4, fiz4); /* Z4g+Z4h Z4e+Z4f Y4g+Y4h Y4e+Y4f | Z4c+Z4d Z4a+Z4b Y4c+Y4d Y4a+Y4b */
1124 fix1 = _mm256_hadd_ps(fix1, fiz1); /* X2e-h Z1e-h Y1e-h X1e-h | X2a-d Z1a-d Y1a-d X1a-d */
1125 fiy2 = _mm256_hadd_ps(fiy2, fix3); /* Y3e-h X3e-h Z2e-h Y2e-h | Y3a-d X3a-d Z2a-d Y2a-d */
1126 fiz3 = _mm256_hadd_ps(fiz3, fiy4); /* Z4e-h Y4e-h X4e-h Z3e-h | Z4a-d Y4a-d X4a-d Z3a-d */
1128 /* Add across the two lanes by swapping and adding back */
1129 t1 = gmx_mm256_unpack128lo_ps(fix1, fiy2); /* Y3a-d X3a-d Z2a-d Y2a-d | X2a-d Z1a-d Y1a-d X1a-d */
1130 t2 = gmx_mm256_unpack128hi_ps(fix1, fiy2); /* Y3e-h X3e-h Z2e-h Y2e-h | X2e-h Z1e-h Y1e-h X1e-h */
1131 t1 = _mm256_add_ps(t1, t2); /* y3 x3 z2 y2 | x2 z1 y1 x1 */
1133 tA = _mm_add_ps(_mm256_castps256_ps128(fiz3), _mm256_extractf128_ps(fiz3, 0x1)); /* z4 y4 x4 z3 */
1135 t3 = _mm256_loadu_ps(fptr);
1136 t3 = _mm256_add_ps(t3, t1);
1137 _mm256_storeu_ps(fptr, t3);
1139 tB = _mm_loadu_ps(fptr+8);
1140 tB = _mm_add_ps(tB, tA);
1141 _mm_storeu_ps(fptr+8, tB);
1143 /* Add up shift force */
1144 tB = _mm256_extractf128_ps(t1, 0x1); /* y3 x3 z2 y2 */
1145 tC = _mm_shuffle_ps(_mm256_castps256_ps128(t1), tB, _MM_SHUFFLE(1, 0, 3, 3)); /* z2 y2 x2 x2 */
1146 tB = _mm_shuffle_ps(tB, tA, _MM_SHUFFLE(1, 0, 3, 2)); /* 0 z3 y3 x3 */
1147 tC = _mm_permute_ps(tC, _MM_SHUFFLE(3, 3, 2, 0)); /* - z2 y2 x2 */
1148 tA = _mm_permute_ps(tA, _MM_SHUFFLE(0, 3, 2, 1)); /* - z4 y4 x4 */
1150 tB = _mm_add_ps(tB, _mm256_castps256_ps128(t1));
1151 tA = _mm_add_ps(tA, tC);
1152 tA = _mm_add_ps(tA, tB);
1154 tA = _mm_blend_ps(_mm_setzero_ps(), tA, 0x7); /* 0 z y x */
1156 tC = _mm_loadu_ps(fshiftptr);
1157 tC = _mm_add_ps(tC, tA);
1158 _mm_storeu_ps(fshiftptr, tC);
1162 static gmx_inline void gmx_simdcall
1163 gmx_mm256_update_1pot_ps(__m256 pot1, float * gmx_restrict ptrA)
1167 pot1 = _mm256_hadd_ps(pot1, pot1);
1168 pot1 = _mm256_hadd_ps(pot1, pot1);
1170 t1 = _mm_add_ps(_mm256_castps256_ps128(pot1), _mm256_extractf128_ps(pot1, 0x1));
1172 _mm_store_ss(ptrA, _mm_add_ss(_mm_load_ss(ptrA), t1));
1175 static gmx_inline void gmx_simdcall
1176 gmx_mm256_update_2pot_ps(__m256 pot1, float * gmx_restrict ptrA,
1177 __m256 pot2, float * gmx_restrict ptrB)
1181 pot1 = _mm256_hadd_ps(pot1, pot2);
1182 pot1 = _mm256_hadd_ps(pot1, pot1);
1184 t1 = _mm_add_ps(_mm256_castps256_ps128(pot1), _mm256_extractf128_ps(pot1, 0x1));
1186 t2 = _mm_permute_ps(t1, _MM_SHUFFLE(1, 1, 1, 1));
1187 _mm_store_ss(ptrA, _mm_add_ss(_mm_load_ss(ptrA), t1));
1188 _mm_store_ss(ptrB, _mm_add_ss(_mm_load_ss(ptrB), t2));
1192 #endif /* _kernelutil_x86_avx_256_single_h_ */