2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2012,2013,2014, by the GROMACS development team, led by
5 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6 * and including many others, as listed in the AUTHORS file in the
7 * top-level source directory and at http://www.gromacs.org.
9 * GROMACS is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public License
11 * as published by the Free Software Foundation; either version 2.1
12 * of the License, or (at your option) any later version.
14 * GROMACS is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with GROMACS; if not, see
21 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
24 * If you want to redistribute modifications to GROMACS, please
25 * consider that scientific software is very special. Version
26 * control is crucial - bugs must be traceable. We will be happy to
27 * consider code for inclusion in the official distribution, but
28 * derived work must not be called official GROMACS. Details are found
29 * in the README & COPYING files - if they are missing, get the
30 * official version at http://www.gromacs.org.
32 * To help us fund GROMACS development, we humbly ask that you cite
33 * the research papers on the package. Check out http://www.gromacs.org.
35 #ifndef _kernelutil_x86_avx_256_single_h_
36 #define _kernelutil_x86_avx_256_single_h_
38 #define gmx_mm_castsi128_ps(a) _mm_castsi128_ps(a)
40 static gmx_inline __m256 gmx_simdcall
41 gmx_mm256_unpack128lo_ps(__m256 xmm1, __m256 xmm2)
43 return _mm256_permute2f128_ps(xmm1, xmm2, 0x20);
46 static gmx_inline __m256 gmx_simdcall
47 gmx_mm256_unpack128hi_ps(__m256 xmm1, __m256 xmm2)
49 return _mm256_permute2f128_ps(xmm1, xmm2, 0x31);
52 static gmx_inline __m256 gmx_simdcall
53 gmx_mm256_set_m128(__m128 hi, __m128 lo)
55 return _mm256_insertf128_ps(_mm256_castps128_ps256(lo), hi, 0x1);
58 /* Work around gcc bug with wrong type for mask formal parameter to maskload/maskstore */
59 #ifdef GMX_SIMD_X86_AVX_GCC_MASKLOAD_BUG
60 # define gmx_mm_maskload_ps(mem, mask) _mm_maskload_ps((mem), _mm_castsi128_ps(mask))
61 # define gmx_mm_maskstore_ps(mem, mask, x) _mm_maskstore_ps((mem), _mm_castsi128_ps(mask), (x))
62 # define gmx_mm256_maskload_ps(mem, mask) _mm256_maskload_ps((mem), _mm256_castsi256_ps(mask))
63 # define gmx_mm256_maskstore_ps(mem, mask, x) _mm256_maskstore_ps((mem), _mm256_castsi256_ps(mask), (x))
65 # define gmx_mm_maskload_ps(mem, mask) _mm_maskload_ps((mem), (mask))
66 # define gmx_mm_maskstore_ps(mem, mask, x) _mm_maskstore_ps((mem), (mask), (x))
67 # define gmx_mm256_maskload_ps(mem, mask) _mm256_maskload_ps((mem), (mask))
68 # define gmx_mm256_maskstore_ps(mem, mask, x) _mm256_maskstore_ps((mem), (mask), (x))
71 /* Transpose lower/upper half of 256-bit registers separately */
72 #define GMX_MM256_HALFTRANSPOSE4_PS(ymm0, ymm1, ymm2, ymm3) { \
73 __m256 __tmp0, __tmp1, __tmp2, __tmp3; \
75 __tmp0 = _mm256_unpacklo_ps((ymm0), (ymm1)); \
76 __tmp1 = _mm256_unpacklo_ps((ymm2), (ymm3)); \
77 __tmp2 = _mm256_unpackhi_ps((ymm0), (ymm1)); \
78 __tmp3 = _mm256_unpackhi_ps((ymm2), (ymm3)); \
79 ymm0 = _mm256_shuffle_ps(__tmp0, __tmp1, _MM_SHUFFLE(1, 0, 1, 0)); \
80 ymm1 = _mm256_shuffle_ps(__tmp0, __tmp1, _MM_SHUFFLE(3, 2, 3, 2)); \
81 ymm2 = _mm256_shuffle_ps(__tmp2, __tmp3, _MM_SHUFFLE(1, 0, 1, 0)); \
82 ymm3 = _mm256_shuffle_ps(__tmp2, __tmp3, _MM_SHUFFLE(3, 2, 3, 2)); \
86 static gmx_inline __m256 gmx_simdcall
87 gmx_mm256_calc_rsq_ps(__m256 dx, __m256 dy, __m256 dz)
89 return _mm256_add_ps( _mm256_add_ps( _mm256_mul_ps(dx, dx), _mm256_mul_ps(dy, dy) ), _mm256_mul_ps(dz, dz) );
92 /* Normal sum of four ymm registers */
93 #define gmx_mm256_sum4_ps(t0, t1, t2, t3) _mm256_add_ps(_mm256_add_ps(t0, t1), _mm256_add_ps(t2, t3))
96 static gmx_inline int gmx_simdcall
97 gmx_mm256_any_lt(__m256 a, __m256 b)
99 return _mm256_movemask_ps(_mm256_cmp_ps(a, b, _CMP_LT_OQ));
103 static gmx_inline __m256 gmx_simdcall
104 gmx_mm256_load_4real_swizzle_ps(const float * gmx_restrict ptrA, const float * gmx_restrict ptrB,
105 const float * gmx_restrict ptrC, const float * gmx_restrict ptrD)
109 t1 = _mm_unpacklo_ps(_mm_load_ss(ptrA), _mm_load_ss(ptrC));
110 t2 = _mm_unpacklo_ps(_mm_load_ss(ptrB), _mm_load_ss(ptrD));
111 return _mm256_castps128_ps256(_mm_unpacklo_ps(t1, t2));
115 static gmx_inline __m256 gmx_simdcall
116 gmx_mm256_load_8real_swizzle_ps(const float * gmx_restrict ptrA, const float * gmx_restrict ptrB,
117 const float * gmx_restrict ptrC, const float * gmx_restrict ptrD,
118 const float * gmx_restrict ptrE, const float * gmx_restrict ptrF,
119 const float * gmx_restrict ptrG, const float * gmx_restrict ptrH)
123 t1 = gmx_mm256_load_4real_swizzle_ps(ptrA, ptrB, ptrC, ptrD);
124 t2 = gmx_mm256_load_4real_swizzle_ps(ptrE, ptrF, ptrG, ptrH);
126 return _mm256_permute2f128_ps(t1, t2, 0x20);
131 static gmx_inline void gmx_simdcall
132 gmx_mm256_store_4real_swizzle_ps(float * gmx_restrict ptrA, float * gmx_restrict ptrB,
133 float * gmx_restrict ptrC, float * gmx_restrict ptrD, __m256 xmm1)
137 t2 = _mm256_permute_ps(xmm1, _MM_SHUFFLE(1, 1, 1, 1));
138 t3 = _mm256_permute_ps(xmm1, _MM_SHUFFLE(2, 2, 2, 2));
139 t4 = _mm256_permute_ps(xmm1, _MM_SHUFFLE(3, 3, 3, 3));
140 _mm_store_ss(ptrA, _mm256_castps256_ps128(xmm1));
141 _mm_store_ss(ptrB, _mm256_castps256_ps128(t2));
142 _mm_store_ss(ptrC, _mm256_castps256_ps128(t3));
143 _mm_store_ss(ptrD, _mm256_castps256_ps128(t4));
147 static gmx_inline void gmx_simdcall
148 gmx_mm256_store_8real_swizzle_ps(float * gmx_restrict ptrA, float * gmx_restrict ptrB,
149 float * gmx_restrict ptrC, float * gmx_restrict ptrD,
150 float * gmx_restrict ptrE, float * gmx_restrict ptrF,
151 float * gmx_restrict ptrG, float * gmx_restrict ptrH, __m256 xmm1)
155 t1 = _mm256_permute2f128_ps(xmm1, xmm1, 0x11);
157 gmx_mm256_store_4real_swizzle_ps(ptrA, ptrB, ptrC, ptrD, xmm1);
158 gmx_mm256_store_4real_swizzle_ps(ptrE, ptrF, ptrG, ptrH, t1);
162 static gmx_inline void gmx_simdcall
163 gmx_mm256_increment_4real_swizzle_ps(float * gmx_restrict ptrA, float * gmx_restrict ptrB,
164 float * gmx_restrict ptrC, float * gmx_restrict ptrD,
167 __m128 t1, t2, t3, t4;
169 t1 = _mm256_castps256_ps128(xmm1);
170 t2 = _mm_permute_ps(t1, _MM_SHUFFLE(1, 1, 1, 1));
171 t3 = _mm_permute_ps(t1, _MM_SHUFFLE(2, 2, 2, 2));
172 t4 = _mm_permute_ps(t1, _MM_SHUFFLE(3, 3, 3, 3));
174 t1 = _mm_add_ss(t1, _mm_load_ss(ptrA));
175 t2 = _mm_add_ss(t2, _mm_load_ss(ptrB));
176 t3 = _mm_add_ss(t3, _mm_load_ss(ptrC));
177 t4 = _mm_add_ss(t4, _mm_load_ss(ptrD));
179 _mm_store_ss(ptrA, t1);
180 _mm_store_ss(ptrB, t2);
181 _mm_store_ss(ptrC, t3);
182 _mm_store_ss(ptrD, t4);
185 static gmx_inline void gmx_simdcall
186 gmx_mm256_increment_8real_swizzle_ps(float * gmx_restrict ptrA, float * gmx_restrict ptrB,
187 float * gmx_restrict ptrC, float * gmx_restrict ptrD,
188 float * gmx_restrict ptrE, float * gmx_restrict ptrF,
189 float * gmx_restrict ptrG, float * gmx_restrict ptrH,
194 t1 = _mm256_permute2f128_ps(xmm1, xmm1, 0x11);
196 gmx_mm256_increment_4real_swizzle_ps(ptrA, ptrB, ptrC, ptrD, xmm1);
197 gmx_mm256_increment_4real_swizzle_ps(ptrE, ptrF, ptrG, ptrH, t1);
201 static gmx_inline void gmx_simdcall
202 gmx_mm256_load_4pair_swizzle_ps(const float * gmx_restrict p1, const float * gmx_restrict p2,
203 const float * gmx_restrict p3, const float * gmx_restrict p4,
204 __m256 * gmx_restrict c6, __m256 * gmx_restrict c12)
206 __m128 t1, t2, t3, t4;
208 t1 = _mm_loadl_pi(_mm_setzero_ps(), (__m64 *)p1); /* - - c12a c6a */
209 t2 = _mm_loadl_pi(_mm_setzero_ps(), (__m64 *)p2); /* - - c12b c6b */
210 t3 = _mm_loadl_pi(_mm_setzero_ps(), (__m64 *)p3); /* - - c12c c6c */
211 t4 = _mm_loadl_pi(_mm_setzero_ps(), (__m64 *)p4); /* - - c12d c6d */
213 t1 = _mm_unpacklo_ps(t1, t2); /* c12b c12a c6b c6a */
214 t3 = _mm_unpacklo_ps(t3, t4); /* c12d c12c c6d c6c */
216 *c6 = _mm256_castps128_ps256(_mm_shuffle_ps(t1, t3, _MM_SHUFFLE(1, 0, 1, 0)));
217 *c12 = _mm256_castps128_ps256(_mm_shuffle_ps(t1, t3, _MM_SHUFFLE(3, 2, 3, 2)));
220 static gmx_inline void gmx_simdcall
221 gmx_mm256_load_8pair_swizzle_ps(const float * gmx_restrict p1, const float * gmx_restrict p2,
222 const float * gmx_restrict p3, const float * gmx_restrict p4,
223 const float * gmx_restrict p5, const float * gmx_restrict p6,
224 const float * gmx_restrict p7, const float * gmx_restrict p8,
225 __m256 * gmx_restrict c6, __m256 * gmx_restrict c12)
227 __m256 c6l, c6h, c12l, c12h;
229 gmx_mm256_load_4pair_swizzle_ps(p1, p2, p3, p4, &c6l, &c12l);
230 gmx_mm256_load_4pair_swizzle_ps(p5, p6, p7, p8, &c6h, &c12h);
232 *c6 = _mm256_permute2f128_ps(c6l, c6h, 0x20);
233 *c12 = _mm256_permute2f128_ps(c12l, c12h, 0x20);
237 static gmx_inline void gmx_simdcall
238 gmx_mm256_load_shift_and_1rvec_broadcast_ps(const float * gmx_restrict xyz_shift,
239 const float * gmx_restrict xyz,
240 __m256 * gmx_restrict x1,
241 __m256 * gmx_restrict y1,
242 __m256 * gmx_restrict z1)
244 __m128 t1, t2, t3, t4;
246 t1 = _mm_loadl_pi(_mm_setzero_ps(), (__m64 *)xyz_shift);
247 t2 = _mm_loadl_pi(_mm_setzero_ps(), (__m64 *)xyz);
248 t3 = _mm_load_ss(xyz_shift+2);
249 t4 = _mm_load_ss(xyz+2);
250 t1 = _mm_add_ps(t1, t2);
251 t3 = _mm_add_ss(t3, t4);
253 t2 = _mm_permute_ps(t1, _MM_SHUFFLE(1, 1, 1, 1));
254 t1 = _mm_permute_ps(t1, _MM_SHUFFLE(0, 0, 0, 0));
255 t3 = _mm_permute_ps(t3, _MM_SHUFFLE(0, 0, 0, 0));
257 *x1 = gmx_mm256_set_m128(t1, t1);
258 *y1 = gmx_mm256_set_m128(t2, t2);
259 *z1 = gmx_mm256_set_m128(t3, t3);
263 static gmx_inline void gmx_simdcall
264 gmx_mm256_load_shift_and_3rvec_broadcast_ps(const float * gmx_restrict xyz_shift,
265 const float * gmx_restrict xyz,
266 __m256 * gmx_restrict x1, __m256 * gmx_restrict y1, __m256 * gmx_restrict z1,
267 __m256 * gmx_restrict x2, __m256 * gmx_restrict y2, __m256 * gmx_restrict z2,
268 __m256 * gmx_restrict x3, __m256 * gmx_restrict y3, __m256 * gmx_restrict z3)
271 __m128 t1, t2, t3, t4, t5, t6, t7, t8, t9;
273 tA = _mm_loadl_pi(_mm_setzero_ps(), (__m64 *)xyz_shift);
274 tB = _mm_load_ss(xyz_shift+2);
276 t1 = _mm_loadu_ps(xyz);
277 t2 = _mm_loadu_ps(xyz+4);
278 t3 = _mm_load_ss(xyz+8);
280 tA = _mm_movelh_ps(tA, tB);
281 t4 = _mm_permute_ps(tA, _MM_SHUFFLE(0, 2, 1, 0));
282 t5 = _mm_permute_ps(tA, _MM_SHUFFLE(1, 0, 2, 1));
283 t6 = _mm_permute_ps(tA, _MM_SHUFFLE(2, 1, 0, 2));
285 t1 = _mm_add_ps(t1, t4);
286 t2 = _mm_add_ps(t2, t5);
287 t3 = _mm_add_ss(t3, t6);
289 t9 = _mm_permute_ps(t3, _MM_SHUFFLE(0, 0, 0, 0));
290 t8 = _mm_permute_ps(t2, _MM_SHUFFLE(3, 3, 3, 3));
291 t7 = _mm_permute_ps(t2, _MM_SHUFFLE(2, 2, 2, 2));
292 t6 = _mm_permute_ps(t2, _MM_SHUFFLE(1, 1, 1, 1));
293 t5 = _mm_permute_ps(t2, _MM_SHUFFLE(0, 0, 0, 0));
294 t4 = _mm_permute_ps(t1, _MM_SHUFFLE(3, 3, 3, 3));
295 t3 = _mm_permute_ps(t1, _MM_SHUFFLE(2, 2, 2, 2));
296 t2 = _mm_permute_ps(t1, _MM_SHUFFLE(1, 1, 1, 1));
297 t1 = _mm_permute_ps(t1, _MM_SHUFFLE(0, 0, 0, 0));
299 *x1 = gmx_mm256_set_m128(t1, t1);
300 *y1 = gmx_mm256_set_m128(t2, t2);
301 *z1 = gmx_mm256_set_m128(t3, t3);
302 *x2 = gmx_mm256_set_m128(t4, t4);
303 *y2 = gmx_mm256_set_m128(t5, t5);
304 *z2 = gmx_mm256_set_m128(t6, t6);
305 *x3 = gmx_mm256_set_m128(t7, t7);
306 *y3 = gmx_mm256_set_m128(t8, t8);
307 *z3 = gmx_mm256_set_m128(t9, t9);
311 static gmx_inline void gmx_simdcall
312 gmx_mm256_load_shift_and_4rvec_broadcast_ps(const float * gmx_restrict xyz_shift,
313 const float * gmx_restrict xyz,
314 __m256 * gmx_restrict x1, __m256 * gmx_restrict y1, __m256 * gmx_restrict z1,
315 __m256 * gmx_restrict x2, __m256 * gmx_restrict y2, __m256 * gmx_restrict z2,
316 __m256 * gmx_restrict x3, __m256 * gmx_restrict y3, __m256 * gmx_restrict z3,
317 __m256 * gmx_restrict x4, __m256 * gmx_restrict y4, __m256 * gmx_restrict z4)
320 __m128 t1, t2, t3, t4, t5, t6, t7, t8, t9, t10, t11, t12;
322 tA = _mm_loadl_pi(_mm_setzero_ps(), (__m64 *)xyz_shift);
323 tB = _mm_load_ss(xyz_shift+2);
325 t1 = _mm_loadu_ps(xyz);
326 t2 = _mm_loadu_ps(xyz+4);
327 t3 = _mm_loadu_ps(xyz+8);
329 tA = _mm_movelh_ps(tA, tB);
330 t4 = _mm_permute_ps(tA, _MM_SHUFFLE(0, 2, 1, 0));
331 t5 = _mm_permute_ps(tA, _MM_SHUFFLE(1, 0, 2, 1));
332 t6 = _mm_permute_ps(tA, _MM_SHUFFLE(2, 1, 0, 2));
334 t1 = _mm_add_ps(t1, t4);
335 t2 = _mm_add_ps(t2, t5);
336 t3 = _mm_add_ps(t3, t6);
338 t12 = _mm_permute_ps(t3, _MM_SHUFFLE(3, 3, 3, 3));
339 t11 = _mm_permute_ps(t3, _MM_SHUFFLE(2, 2, 2, 2));
340 t10 = _mm_permute_ps(t3, _MM_SHUFFLE(1, 1, 1, 1));
341 t9 = _mm_permute_ps(t3, _MM_SHUFFLE(0, 0, 0, 0));
342 t8 = _mm_permute_ps(t2, _MM_SHUFFLE(3, 3, 3, 3));
343 t7 = _mm_permute_ps(t2, _MM_SHUFFLE(2, 2, 2, 2));
344 t6 = _mm_permute_ps(t2, _MM_SHUFFLE(1, 1, 1, 1));
345 t5 = _mm_permute_ps(t2, _MM_SHUFFLE(0, 0, 0, 0));
346 t4 = _mm_permute_ps(t1, _MM_SHUFFLE(3, 3, 3, 3));
347 t3 = _mm_permute_ps(t1, _MM_SHUFFLE(2, 2, 2, 2));
348 t2 = _mm_permute_ps(t1, _MM_SHUFFLE(1, 1, 1, 1));
349 t1 = _mm_permute_ps(t1, _MM_SHUFFLE(0, 0, 0, 0));
351 *x1 = gmx_mm256_set_m128(t1, t1);
352 *y1 = gmx_mm256_set_m128(t2, t2);
353 *z1 = gmx_mm256_set_m128(t3, t3);
354 *x2 = gmx_mm256_set_m128(t4, t4);
355 *y2 = gmx_mm256_set_m128(t5, t5);
356 *z2 = gmx_mm256_set_m128(t6, t6);
357 *x3 = gmx_mm256_set_m128(t7, t7);
358 *y3 = gmx_mm256_set_m128(t8, t8);
359 *z3 = gmx_mm256_set_m128(t9, t9);
360 *x4 = gmx_mm256_set_m128(t10, t10);
361 *y4 = gmx_mm256_set_m128(t11, t11);
362 *z4 = gmx_mm256_set_m128(t12, t12);
367 static gmx_inline void gmx_simdcall
368 gmx_mm256_load_1rvec_4ptr_swizzle_ps(const float * gmx_restrict ptrA, const float * gmx_restrict ptrB,
369 const float * gmx_restrict ptrC, const float * gmx_restrict ptrD,
370 __m256 * gmx_restrict x1, __m256 * gmx_restrict y1, __m256 * gmx_restrict z1)
372 __m128 t1, t2, t3, t4;
373 __m128i mask = _mm_set_epi32(0, -1, -1, -1);
374 t1 = gmx_mm_maskload_ps(ptrA, mask);
375 t2 = gmx_mm_maskload_ps(ptrB, mask);
376 t3 = gmx_mm_maskload_ps(ptrC, mask);
377 t4 = gmx_mm_maskload_ps(ptrD, mask);
378 _MM_TRANSPOSE4_PS(t1, t2, t3, t4);
379 *x1 = _mm256_castps128_ps256(t1);
380 *y1 = _mm256_castps128_ps256(t2);
381 *z1 = _mm256_castps128_ps256(t3);
385 static gmx_inline void gmx_simdcall
386 gmx_mm256_load_3rvec_4ptr_swizzle_ps(const float * gmx_restrict ptrA, const float * gmx_restrict ptrB,
387 const float * gmx_restrict ptrC, const float * gmx_restrict ptrD,
388 __m256 * gmx_restrict x1, __m256 * gmx_restrict y1, __m256 * gmx_restrict z1,
389 __m256 * gmx_restrict x2, __m256 * gmx_restrict y2, __m256 * gmx_restrict z2,
390 __m256 * gmx_restrict x3, __m256 * gmx_restrict y3, __m256 * gmx_restrict z3)
392 __m128 t1, t2, t3, t4;
393 t1 = _mm_loadu_ps(ptrA);
394 t2 = _mm_loadu_ps(ptrB);
395 t3 = _mm_loadu_ps(ptrC);
396 t4 = _mm_loadu_ps(ptrD);
397 _MM_TRANSPOSE4_PS(t1, t2, t3, t4);
398 *x1 = _mm256_castps128_ps256(t1);
399 *y1 = _mm256_castps128_ps256(t2);
400 *z1 = _mm256_castps128_ps256(t3);
401 *x2 = _mm256_castps128_ps256(t4);
402 t1 = _mm_loadu_ps(ptrA+4);
403 t2 = _mm_loadu_ps(ptrB+4);
404 t3 = _mm_loadu_ps(ptrC+4);
405 t4 = _mm_loadu_ps(ptrD+4);
406 _MM_TRANSPOSE4_PS(t1, t2, t3, t4);
407 *y2 = _mm256_castps128_ps256(t1);
408 *z2 = _mm256_castps128_ps256(t2);
409 *x3 = _mm256_castps128_ps256(t3);
410 *y3 = _mm256_castps128_ps256(t4);
411 t1 = _mm_load_ss(ptrA+8);
412 t2 = _mm_load_ss(ptrB+8);
413 t3 = _mm_load_ss(ptrC+8);
414 t4 = _mm_load_ss(ptrD+8);
415 t1 = _mm_unpacklo_ps(t1, t3);
416 t3 = _mm_unpacklo_ps(t2, t4);
417 *z3 = _mm256_castps128_ps256(_mm_unpacklo_ps(t1, t3));
422 static gmx_inline void gmx_simdcall
423 gmx_mm256_load_4rvec_4ptr_swizzle_ps(const float * gmx_restrict ptrA, const float * gmx_restrict ptrB,
424 const float * gmx_restrict ptrC, const float * gmx_restrict ptrD,
425 __m256 * gmx_restrict x1, __m256 * gmx_restrict y1, __m256 * gmx_restrict z1,
426 __m256 * gmx_restrict x2, __m256 * gmx_restrict y2, __m256 * gmx_restrict z2,
427 __m256 * gmx_restrict x3, __m256 * gmx_restrict y3, __m256 * gmx_restrict z3,
428 __m256 * gmx_restrict x4, __m256 * gmx_restrict y4, __m256 * gmx_restrict z4)
430 __m128 t1, t2, t3, t4;
431 t1 = _mm_loadu_ps(ptrA);
432 t2 = _mm_loadu_ps(ptrB);
433 t3 = _mm_loadu_ps(ptrC);
434 t4 = _mm_loadu_ps(ptrD);
435 _MM_TRANSPOSE4_PS(t1, t2, t3, t4);
436 *x1 = _mm256_castps128_ps256(t1);
437 *y1 = _mm256_castps128_ps256(t2);
438 *z1 = _mm256_castps128_ps256(t3);
439 *x2 = _mm256_castps128_ps256(t4);
440 t1 = _mm_loadu_ps(ptrA+4);
441 t2 = _mm_loadu_ps(ptrB+4);
442 t3 = _mm_loadu_ps(ptrC+4);
443 t4 = _mm_loadu_ps(ptrD+4);
444 _MM_TRANSPOSE4_PS(t1, t2, t3, t4);
445 *y2 = _mm256_castps128_ps256(t1);
446 *z2 = _mm256_castps128_ps256(t2);
447 *x3 = _mm256_castps128_ps256(t3);
448 *y3 = _mm256_castps128_ps256(t4);
449 t1 = _mm_loadu_ps(ptrA+8);
450 t2 = _mm_loadu_ps(ptrB+8);
451 t3 = _mm_loadu_ps(ptrC+8);
452 t4 = _mm_loadu_ps(ptrD+8);
453 _MM_TRANSPOSE4_PS(t1, t2, t3, t4);
454 *z3 = _mm256_castps128_ps256(t1);
455 *x4 = _mm256_castps128_ps256(t2);
456 *y4 = _mm256_castps128_ps256(t3);
457 *z4 = _mm256_castps128_ps256(t4);
461 static gmx_inline void gmx_simdcall
462 gmx_mm256_load_1rvec_8ptr_swizzle_ps(const float * gmx_restrict ptrA, const float * gmx_restrict ptrB,
463 const float * gmx_restrict ptrC, const float * gmx_restrict ptrD,
464 const float * gmx_restrict ptrE, const float * gmx_restrict ptrF,
465 const float * gmx_restrict ptrG, const float * gmx_restrict ptrH,
466 __m256 * gmx_restrict x1, __m256 * gmx_restrict y1, __m256 * gmx_restrict z1)
468 __m256 t1, t2, t3, t4, t5, t6, t7, t8;
469 __m128i mask = _mm_set_epi32(0, -1, -1, -1);
471 t1 = gmx_mm256_set_m128(gmx_mm_maskload_ps(ptrE, mask), gmx_mm_maskload_ps(ptrA, mask)); /* - zE yE xE | - zA yA xA */
472 t2 = gmx_mm256_set_m128(gmx_mm_maskload_ps(ptrF, mask), gmx_mm_maskload_ps(ptrB, mask)); /* - zF yF xF | - zB yB xB */
473 t3 = gmx_mm256_set_m128(gmx_mm_maskload_ps(ptrG, mask), gmx_mm_maskload_ps(ptrC, mask)); /* - zG yG xG | - zC yC xC */
474 t4 = gmx_mm256_set_m128(gmx_mm_maskload_ps(ptrH, mask), gmx_mm_maskload_ps(ptrD, mask)); /* - zH yH xH | - zD yD xD */
476 t5 = _mm256_unpacklo_ps(t1, t2); /* yF yE xF xE | yB yA xB xA */
477 t6 = _mm256_unpacklo_ps(t3, t4); /* yH yG xH xG | yD yC xD xC */
478 t7 = _mm256_unpackhi_ps(t1, t2); /* - - zF zE | - - zB zA */
479 t8 = _mm256_unpackhi_ps(t3, t4); /* - - zH zG | - - zD zC */
481 *x1 = _mm256_shuffle_ps(t5, t6, _MM_SHUFFLE(1, 0, 1, 0));
482 *y1 = _mm256_shuffle_ps(t5, t6, _MM_SHUFFLE(3, 2, 3, 2));
483 *z1 = _mm256_shuffle_ps(t7, t8, _MM_SHUFFLE(1, 0, 1, 0));
487 static gmx_inline void gmx_simdcall
488 gmx_mm256_load_3rvec_8ptr_swizzle_ps(const float * gmx_restrict ptrA, const float * gmx_restrict ptrB,
489 const float * gmx_restrict ptrC, const float * gmx_restrict ptrD,
490 const float * gmx_restrict ptrE, const float * gmx_restrict ptrF,
491 const float * gmx_restrict ptrG, const float * gmx_restrict ptrH,
492 __m256 * gmx_restrict x1, __m256 * gmx_restrict y1, __m256 * gmx_restrict z1,
493 __m256 * gmx_restrict x2, __m256 * gmx_restrict y2, __m256 * gmx_restrict z2,
494 __m256 * gmx_restrict x3, __m256 * gmx_restrict y3, __m256 * gmx_restrict z3)
496 __m256 t1, t2, t3, t4, t5, t6, t7, t8, t9, t10, t11, t12;
498 t1 = _mm256_loadu_ps(ptrA); /* y3a x3a z2a y2a | x2a z1a y1a x1a */
499 t2 = _mm256_loadu_ps(ptrB); /* y3b x3b z2b y2b | x2b z1b y1b x1b */
500 t3 = _mm256_loadu_ps(ptrC); /* y3c x3c z2c y2c | x2c z1c y1c x1c */
501 t4 = _mm256_loadu_ps(ptrD); /* y3d x3d z2d y2d | x2d z1d y1d x1d */
502 t5 = _mm256_loadu_ps(ptrE); /* y3e x3e z2e y2e | x2e z1e y1e x1e */
503 t6 = _mm256_loadu_ps(ptrF); /* y3f x3f z2f y2f | x2f z1f y1f x1f */
504 t7 = _mm256_loadu_ps(ptrG); /* y3g x3g z2g y2g | x2g z1g y1g x1g */
505 t8 = _mm256_loadu_ps(ptrH); /* y3h x3h z2h y2h | x2h z1h y1h x1h */
507 t9 = _mm256_unpacklo_ps(t1, t2); /* z2b z2a y2b y2a | y1b y1a x1b x1a */
508 t10 = _mm256_unpackhi_ps(t1, t2); /* y3b y3a x3b x3a | x2b x2a z1b z1a */
509 t11 = _mm256_unpacklo_ps(t3, t4); /* z2d z2c y2d y2c | y1d y1c x1d x1c */
510 t12 = _mm256_unpackhi_ps(t3, t4); /* y3d y3c x3d x3c | x2d x2c z1d z1c */
511 t1 = _mm256_unpacklo_ps(t5, t6); /* z2f z2e y2f y2e | y1f y1e x1f x1e */
512 t2 = _mm256_unpackhi_ps(t5, t6); /* y3f y3e x3f x3e | x2f x2e z1f z1e */
513 t3 = _mm256_unpacklo_ps(t7, t8); /* z2h z2g y2h y2g | y1h y1g x1h x1g */
514 t4 = _mm256_unpackhi_ps(t7, t8); /* y3h y3g x3h x3g | x2h x2g z1h z1g */
516 t5 = _mm256_shuffle_ps(t9, t11, _MM_SHUFFLE(1, 0, 1, 0)); /* y2d y2c y2b y2a | x1d x1c x1b x1a */
517 t6 = _mm256_shuffle_ps(t9, t11, _MM_SHUFFLE(3, 2, 3, 2)); /* z2d z2c z2b z2a | y1d y1c y1b y1a */
518 t7 = _mm256_shuffle_ps(t10, t12, _MM_SHUFFLE(1, 0, 1, 0)); /* x3d x3c x3b x3a | z1d z1c z1b z1a */
519 t8 = _mm256_shuffle_ps(t10, t12, _MM_SHUFFLE(3, 2, 3, 2)); /* y3d y3c y3b y3a | x2d x2c x2b x2a */
521 t9 = _mm256_shuffle_ps(t1, t3, _MM_SHUFFLE(1, 0, 1, 0)); /* y2h y2g y2f y2e | x1h x1g x1f x1e */
522 t10 = _mm256_shuffle_ps(t1, t3, _MM_SHUFFLE(3, 2, 3, 2)); /* z2h z2g z2f z2e | y1h y1g y1f y1e */
523 t11 = _mm256_shuffle_ps(t2, t4, _MM_SHUFFLE(1, 0, 1, 0)); /* x3h x3g x3f x3e | z1h z1g z1f z1e */
524 t12 = _mm256_shuffle_ps(t2, t4, _MM_SHUFFLE(3, 2, 3, 2)); /* y3h y3g y3f y3e | x2h x2g x2f x2e */
526 *x1 = _mm256_permute2f128_ps(t5, t9, 0x20);
527 *y1 = _mm256_permute2f128_ps(t6, t10, 0x20);
528 *z1 = _mm256_permute2f128_ps(t7, t11, 0x20);
529 *x2 = _mm256_permute2f128_ps(t8, t12, 0x20);
531 *y2 = _mm256_permute2f128_ps(t5, t9, 0x31);
532 *z2 = _mm256_permute2f128_ps(t6, t10, 0x31);
533 *x3 = _mm256_permute2f128_ps(t7, t11, 0x31);
534 *y3 = _mm256_permute2f128_ps(t8, t12, 0x31);
536 t1 = gmx_mm256_set_m128(_mm_load_ss(ptrE+8), _mm_load_ss(ptrA+8));
537 t2 = gmx_mm256_set_m128(_mm_load_ss(ptrF+8), _mm_load_ss(ptrB+8));
538 t3 = gmx_mm256_set_m128(_mm_load_ss(ptrG+8), _mm_load_ss(ptrC+8));
539 t4 = gmx_mm256_set_m128(_mm_load_ss(ptrH+8), _mm_load_ss(ptrD+8));
541 t1 = _mm256_unpacklo_ps(t1, t3); /* - - z3g z3e | - - z3c z3a */
542 t2 = _mm256_unpacklo_ps(t2, t4); /* - - z3h z3f | - - z3d z3b */
544 *z3 = _mm256_unpacklo_ps(t1, t2);
549 static gmx_inline void gmx_simdcall
550 gmx_mm256_load_4rvec_8ptr_swizzle_ps(const float * gmx_restrict ptrA, const float * gmx_restrict ptrB,
551 const float * gmx_restrict ptrC, const float * gmx_restrict ptrD,
552 const float * gmx_restrict ptrE, const float * gmx_restrict ptrF,
553 const float * gmx_restrict ptrG, const float * gmx_restrict ptrH,
554 __m256 * gmx_restrict x1, __m256 * gmx_restrict y1, __m256 * gmx_restrict z1,
555 __m256 * gmx_restrict x2, __m256 * gmx_restrict y2, __m256 * gmx_restrict z2,
556 __m256 * gmx_restrict x3, __m256 * gmx_restrict y3, __m256 * gmx_restrict z3,
557 __m256 * gmx_restrict x4, __m256 * gmx_restrict y4, __m256 * gmx_restrict z4)
559 __m256 t1, t2, t3, t4, t5, t6, t7, t8, t9, t10, t11, t12;
561 t1 = _mm256_loadu_ps(ptrA); /* y3a x3a z2a y2a | x2a z1a y1a x1a */
562 t2 = _mm256_loadu_ps(ptrB); /* y3b x3b z2b y2b | x2b z1b y1b x1b */
563 t3 = _mm256_loadu_ps(ptrC); /* y3c x3c z2c y2c | x2c z1c y1c x1c */
564 t4 = _mm256_loadu_ps(ptrD); /* y3d x3d z2d y2d | x2d z1d y1d x1d */
565 t5 = _mm256_loadu_ps(ptrE); /* y3e x3e z2e y2e | x2e z1e y1e x1e */
566 t6 = _mm256_loadu_ps(ptrF); /* y3f x3f z2f y2f | x2f z1f y1f x1f */
567 t7 = _mm256_loadu_ps(ptrG); /* y3g x3g z2g y2g | x2g z1g y1g x1g */
568 t8 = _mm256_loadu_ps(ptrH); /* y3h x3h z2h y2h | x2h z1h y1h x1h */
570 t9 = _mm256_unpacklo_ps(t1, t2); /* z2b z2a y2b y2a | y1b y1a x1b x1a */
571 t10 = _mm256_unpackhi_ps(t1, t2); /* y3b y3a x3b x3a | x2b x2a z1b z1a */
572 t11 = _mm256_unpacklo_ps(t3, t4); /* z2d z2c y2d y2c | y1d y1c x1d x1c */
573 t12 = _mm256_unpackhi_ps(t3, t4); /* y3d y3c x3d x3c | x2d x2c z1d z1c */
574 t1 = _mm256_unpacklo_ps(t5, t6); /* z2f z2e y2f y2e | y1f y1e x1f x1e */
575 t2 = _mm256_unpackhi_ps(t5, t6); /* y3f y3e x3f x3e | x2f x2e z1f z1e */
576 t3 = _mm256_unpacklo_ps(t7, t8); /* z2h z2g y2h y2g | y1h y1g x1h x1g */
577 t4 = _mm256_unpackhi_ps(t7, t8); /* y3h y3g x3h x3g | x2h x2g z1h z1g */
579 t5 = _mm256_shuffle_ps(t9, t11, _MM_SHUFFLE(1, 0, 1, 0)); /* y2d y2c y2b y2a | x1d x1c x1b x1a */
580 t6 = _mm256_shuffle_ps(t9, t11, _MM_SHUFFLE(3, 2, 3, 2)); /* z2d z2c z2b z2a | y1d y1c y1b y1a */
581 t7 = _mm256_shuffle_ps(t10, t12, _MM_SHUFFLE(1, 0, 1, 0)); /* x3d x3c x3b x3a | z1d z1c z1b z1a */
582 t8 = _mm256_shuffle_ps(t10, t12, _MM_SHUFFLE(3, 2, 3, 2)); /* y3d y3c y3b y3a | x2d x2c x2b x2a */
583 t9 = _mm256_shuffle_ps(t1, t3, _MM_SHUFFLE(1, 0, 1, 0)); /* y2h y2g y2f y2e | x1h x1g x1f x1e */
584 t10 = _mm256_shuffle_ps(t1, t3, _MM_SHUFFLE(3, 2, 3, 2)); /* z2h z2g z2f z2e | y1h y1g y1f y1e */
585 t11 = _mm256_shuffle_ps(t2, t4, _MM_SHUFFLE(1, 0, 1, 0)); /* x3h x3g x3f x3e | z1h z1g z1f z1e */
586 t12 = _mm256_shuffle_ps(t2, t4, _MM_SHUFFLE(3, 2, 3, 2)); /* y3h y3g y3f y3e | x2h x2g x2f x2e */
588 *x1 = _mm256_permute2f128_ps(t5, t9, 0x20);
589 *y1 = _mm256_permute2f128_ps(t6, t10, 0x20);
590 *z1 = _mm256_permute2f128_ps(t7, t11, 0x20);
591 *x2 = _mm256_permute2f128_ps(t8, t12, 0x20);
593 *y2 = _mm256_permute2f128_ps(t5, t9, 0x31);
594 *z2 = _mm256_permute2f128_ps(t6, t10, 0x31);
595 *x3 = _mm256_permute2f128_ps(t7, t11, 0x31);
596 *y3 = _mm256_permute2f128_ps(t8, t12, 0x31);
598 t1 = gmx_mm256_set_m128(_mm_loadu_ps(ptrE+8), _mm_loadu_ps(ptrA+8)); /* z4e y4e x4e z3e | z4a y4a x4a z3a */
599 t2 = gmx_mm256_set_m128(_mm_loadu_ps(ptrF+8), _mm_loadu_ps(ptrB+8)); /* z4f y4f x4f z3f | z4b y4b x4b z3b */
600 t3 = gmx_mm256_set_m128(_mm_loadu_ps(ptrG+8), _mm_loadu_ps(ptrC+8)); /* z4g y4g x4g z3g | z4c y4c x4c z3c */
601 t4 = gmx_mm256_set_m128(_mm_loadu_ps(ptrH+8), _mm_loadu_ps(ptrD+8)); /* z4h y4h x4h z3h | z4d y4d x4d z3d */
603 t5 = _mm256_unpacklo_ps(t1, t2); /* x4f x4e z3f z3e | x4b x4a z3b z3a */
604 t6 = _mm256_unpackhi_ps(t1, t2); /* z4f z4e y4f y4e | z4b z4a y4b y4a */
605 t7 = _mm256_unpacklo_ps(t3, t4); /* x4h x4g z3h z3g | x4d x4c z3d z3c */
606 t8 = _mm256_unpackhi_ps(t3, t4); /* z4h z4g y4h y4g | z4d z4c y4d y4c */
608 *z3 = _mm256_shuffle_ps(t5, t7, _MM_SHUFFLE(1, 0, 1, 0)); /* z3h z3g z3f z3e | z3d z3c z3b z3a */
609 *x4 = _mm256_shuffle_ps(t5, t7, _MM_SHUFFLE(3, 2, 3, 2)); /* x4h x4g x4f x4e | x4d x4c x4b x4a */
610 *y4 = _mm256_shuffle_ps(t6, t8, _MM_SHUFFLE(1, 0, 1, 0)); /* y4h y4g y4f y4e | y4d y4c y4b y4a */
611 *z4 = _mm256_shuffle_ps(t6, t8, _MM_SHUFFLE(3, 2, 3, 2)); /* z4h z4g z4f z4e | z4d z4c z4b z4a */
615 static gmx_inline void gmx_simdcall
616 gmx_mm256_decrement_1rvec_4ptr_swizzle_ps(float * gmx_restrict ptrA, float * gmx_restrict ptrB,
617 float * gmx_restrict ptrC, float * gmx_restrict ptrD,
618 __m256 x1, __m256 y1, __m256 z1)
620 __m128 t1, t2, t3, t4, t5, t6, t7, t8;
623 /* Construct a mask without executing any data loads */
624 mask = _mm_blend_epi16(_mm_setzero_si128(), _mm_cmpeq_epi16(_mm_setzero_si128(), _mm_setzero_si128()), 0x3F);
626 t3 = _mm_unpacklo_ps(_mm256_castps256_ps128(x1), _mm256_castps256_ps128(y1)); /* y1b x1b y1a x1a */
627 t4 = _mm_unpackhi_ps(_mm256_castps256_ps128(x1), _mm256_castps256_ps128(y1)); /* y1d x1d y1c x1c */
629 t1 = _mm_shuffle_ps(t3, _mm256_castps256_ps128(z1), _MM_SHUFFLE(0, 0, 1, 0)); /* - z1a y1a x1a */
630 t2 = _mm_shuffle_ps(t3, _mm256_castps256_ps128(z1), _MM_SHUFFLE(0, 1, 3, 2)); /* - z1b y1b x1b */
631 t3 = _mm_shuffle_ps(t4, _mm256_castps256_ps128(z1), _MM_SHUFFLE(0, 2, 1, 0)); /* - z1c y1c x1c */
632 t4 = _mm_shuffle_ps(t4, _mm256_castps256_ps128(z1), _MM_SHUFFLE(0, 3, 3, 2)); /* - z1d y1d x1d */
634 t5 = gmx_mm_maskload_ps(ptrA, mask);
635 t6 = gmx_mm_maskload_ps(ptrB, mask);
636 t7 = gmx_mm_maskload_ps(ptrC, mask);
637 t8 = gmx_mm_maskload_ps(ptrD, mask);
639 t5 = _mm_sub_ps(t5, t1);
640 t6 = _mm_sub_ps(t6, t2);
641 t7 = _mm_sub_ps(t7, t3);
642 t8 = _mm_sub_ps(t8, t4);
644 gmx_mm_maskstore_ps(ptrA, mask, t5);
645 gmx_mm_maskstore_ps(ptrB, mask, t6);
646 gmx_mm_maskstore_ps(ptrC, mask, t7);
647 gmx_mm_maskstore_ps(ptrD, mask, t8);
651 static gmx_inline void gmx_simdcall
652 gmx_mm256_decrement_3rvec_4ptr_swizzle_ps(float * gmx_restrict ptrA, float * gmx_restrict ptrB,
653 float * gmx_restrict ptrC, float * gmx_restrict ptrD,
654 __m256 x1, __m256 y1, __m256 z1,
655 __m256 x2, __m256 y2, __m256 z2,
656 __m256 x3, __m256 y3, __m256 z3)
658 __m256 t1, t2, t3, t4, t5, t6;
659 __m128 tA, tB, tC, tD;
661 t1 = _mm256_loadu_ps(ptrA);
662 t2 = _mm256_loadu_ps(ptrB);
663 t3 = _mm256_loadu_ps(ptrC);
664 t4 = _mm256_loadu_ps(ptrD);
665 tA = _mm_load_ss(ptrA+8);
666 tB = _mm_load_ss(ptrB+8);
667 tC = _mm_load_ss(ptrC+8);
668 tD = _mm_load_ss(ptrD+8);
670 t5 = _mm256_unpacklo_ps(x1, y1); /* - - - - | y1b x1b y1a x1a */
671 x1 = _mm256_unpackhi_ps(x1, y1); /* - - - - | y1d x1d y1c x1c */
672 y1 = _mm256_unpacklo_ps(z1, x2); /* - - - - | x2b z1b x2a z1a */
673 z1 = _mm256_unpackhi_ps(z1, x2); /* - - - - | x2d z1d x2c z1c */
675 x2 = _mm256_unpacklo_ps(y2, z2); /* - - - - | z2b y2b z2a y2a */
676 y2 = _mm256_unpackhi_ps(y2, z2); /* - - - - | z2d y2d z2c y2c */
677 t6 = _mm256_unpacklo_ps(x3, y3); /* - - - - | y3b x3b y3a x3a */
678 x3 = _mm256_unpackhi_ps(x3, y3); /* - - - - | y3d x3d y3c x3c */
680 t5 = _mm256_insertf128_ps(t5, _mm256_castps256_ps128(x2), 0x1); /* z2b y2b z2a y2a | y1b x1b y1a x1a */
681 x1 = _mm256_insertf128_ps(x1, _mm256_castps256_ps128(y2), 0x1); /* z2d y2d z2c y2c | y1d x1d y1c x1c */
683 y1 = _mm256_insertf128_ps(y1, _mm256_castps256_ps128(t6), 0x1); /* y3b x3b y3a x3a | x2b z1b x2a z1a */
684 z1 = _mm256_insertf128_ps(z1, _mm256_castps256_ps128(x3), 0x1); /* y3d x3d y3c x3c | x2d z1d x2c z1c */
686 z2 = _mm256_shuffle_ps(t5, y1, _MM_SHUFFLE(1, 0, 1, 0)); /* y3a x3a z2a y2a | x2a z1a y1a x1a */
687 t5 = _mm256_shuffle_ps(t5, y1, _MM_SHUFFLE(3, 2, 3, 2)); /* y3b x3b z2b y2b | x2b z1b y1b x1b */
688 y1 = _mm256_shuffle_ps(x1, z1, _MM_SHUFFLE(1, 0, 1, 0)); /* y3c x3c z2c y2c | x2c z1c y1c x1c */
689 x1 = _mm256_shuffle_ps(x1, z1, _MM_SHUFFLE(3, 2, 3, 2)); /* y3d x3d z2d y2d | x2d z1d y1d x1d */
691 t1 = _mm256_sub_ps(t1, z2);
692 t2 = _mm256_sub_ps(t2, t5);
693 t3 = _mm256_sub_ps(t3, y1);
694 t4 = _mm256_sub_ps(t4, x1);
696 tA = _mm_sub_ss(tA, _mm256_castps256_ps128(z3));
697 tB = _mm_sub_ss(tB, _mm_permute_ps(_mm256_castps256_ps128(z3), _MM_SHUFFLE(1, 1, 1, 1)));
698 tC = _mm_sub_ss(tC, _mm_permute_ps(_mm256_castps256_ps128(z3), _MM_SHUFFLE(2, 2, 2, 2)));
699 tD = _mm_sub_ss(tD, _mm_permute_ps(_mm256_castps256_ps128(z3), _MM_SHUFFLE(3, 3, 3, 3)));
701 /* Here we store a full 256-bit value and a separate 32-bit one; no overlap can happen */
702 _mm256_storeu_ps(ptrA, t1);
703 _mm256_storeu_ps(ptrB, t2);
704 _mm256_storeu_ps(ptrC, t3);
705 _mm256_storeu_ps(ptrD, t4);
706 _mm_store_ss(ptrA+8, tA);
707 _mm_store_ss(ptrB+8, tB);
708 _mm_store_ss(ptrC+8, tC);
709 _mm_store_ss(ptrD+8, tD);
713 static gmx_inline void gmx_simdcall
714 gmx_mm256_decrement_4rvec_4ptr_swizzle_ps(float * gmx_restrict ptrA, float * gmx_restrict ptrB,
715 float * gmx_restrict ptrC, float * gmx_restrict ptrD,
716 __m256 x1, __m256 y1, __m256 z1,
717 __m256 x2, __m256 y2, __m256 z2,
718 __m256 x3, __m256 y3, __m256 z3,
719 __m256 x4, __m256 y4, __m256 z4)
721 __m256 t1, t2, t3, t4, t5;
722 __m128 tA, tB, tC, tD, tE, tF, tG, tH;
724 t1 = _mm256_loadu_ps(ptrA);
725 t2 = _mm256_loadu_ps(ptrB);
726 t3 = _mm256_loadu_ps(ptrC);
727 t4 = _mm256_loadu_ps(ptrD);
728 tA = _mm_loadu_ps(ptrA+8);
729 tB = _mm_loadu_ps(ptrB+8);
730 tC = _mm_loadu_ps(ptrC+8);
731 tD = _mm_loadu_ps(ptrD+8);
733 t5 = _mm256_unpacklo_ps(x1, y1); /* - - - - | y1b x1b y1a x1a */
734 x1 = _mm256_unpackhi_ps(x1, y1); /* - - - - | y1d x1d y1c x1c */
735 y1 = _mm256_unpacklo_ps(z1, x2); /* - - - - | x2b z1b x2a z1a */
736 z1 = _mm256_unpackhi_ps(z1, x2); /* - - - - | x2d z1d x2c z1c */
738 x2 = _mm256_unpacklo_ps(y2, z2); /* - - - - | z2b y2b z2a y2a */
739 y2 = _mm256_unpackhi_ps(y2, z2); /* - - - - | z2d y2d z2c y2c */
740 z2 = _mm256_unpacklo_ps(x3, y3); /* - - - - | y3b x3b y3a x3a */
741 x3 = _mm256_unpackhi_ps(x3, y3); /* - - - - | y3d x3d y3c x3c */
743 y3 = _mm256_unpacklo_ps(z3, x4); /* - - - - | x4b z3b x4a z3a */
744 z3 = _mm256_unpackhi_ps(z3, x4); /* - - - - | x4d z3d x4c z3c */
745 x4 = _mm256_unpacklo_ps(y4, z4); /* - - - - | z4b y4b z4a y4a */
746 y4 = _mm256_unpackhi_ps(y4, z4); /* - - - - | z4d y4d z4c y4c */
748 x2 = _mm256_insertf128_ps(t5, _mm256_castps256_ps128(x2), 0x1); /* z2b y2b z2a y2a | y1b x1b y1a x1a */
749 x1 = _mm256_insertf128_ps(x1, _mm256_castps256_ps128(y2), 0x1); /* z2d y2d z2c y2c | y1d x1d y1c x1c */
750 y1 = _mm256_insertf128_ps(y1, _mm256_castps256_ps128(z2), 0x1); /* y3b x3b y3a x3a | x2b z1b x2a z1a */
751 z1 = _mm256_insertf128_ps(z1, _mm256_castps256_ps128(x3), 0x1); /* y3d x3d y3c x3c | x2d z1d x2c z1c */
753 z2 = _mm256_shuffle_ps(x2, y1, _MM_SHUFFLE(1, 0, 1, 0)); /* y3a x3a z2a y2a | x2a z1a y1a x1a */
754 t5 = _mm256_shuffle_ps(x2, y1, _MM_SHUFFLE(3, 2, 3, 2)); /* y3b x3b z2b y2b | x2b z1b y1b x1b */
755 y1 = _mm256_shuffle_ps(x1, z1, _MM_SHUFFLE(1, 0, 1, 0)); /* y3c x3c z2c y2c | x2c z1c y1c x1c */
756 x1 = _mm256_shuffle_ps(x1, z1, _MM_SHUFFLE(3, 2, 3, 2)); /* y3d x3d z2d y2d | x2d z1d y1d x1d */
758 tE = _mm_shuffle_ps(_mm256_castps256_ps128(y3), _mm256_castps256_ps128(x4), _MM_SHUFFLE(1, 0, 1, 0)); /* z4a y4a x4a z3a */
759 tF = _mm_shuffle_ps(_mm256_castps256_ps128(y3), _mm256_castps256_ps128(x4), _MM_SHUFFLE(3, 2, 3, 2)); /* z4b y4b x4b z3b */
761 tG = _mm_shuffle_ps(_mm256_castps256_ps128(z3), _mm256_castps256_ps128(y4), _MM_SHUFFLE(1, 0, 1, 0)); /* z4c y4c x4c z3c */
762 tH = _mm_shuffle_ps(_mm256_castps256_ps128(z3), _mm256_castps256_ps128(y4), _MM_SHUFFLE(3, 2, 3, 2)); /* z4d y4d x4d z3d */
764 t1 = _mm256_sub_ps(t1, z2);
765 t2 = _mm256_sub_ps(t2, t5);
766 t3 = _mm256_sub_ps(t3, y1);
767 t4 = _mm256_sub_ps(t4, x1);
769 tA = _mm_sub_ps(tA, tE);
770 tB = _mm_sub_ps(tB, tF);
771 tC = _mm_sub_ps(tC, tG);
772 tD = _mm_sub_ps(tD, tH);
774 /* Here we store a full 256-bit value and a separate 128-bit one; no overlap can happen */
775 _mm256_storeu_ps(ptrA, t1);
776 _mm256_storeu_ps(ptrB, t2);
777 _mm256_storeu_ps(ptrC, t3);
778 _mm256_storeu_ps(ptrD, t4);
779 _mm_storeu_ps(ptrA+8, tA);
780 _mm_storeu_ps(ptrB+8, tB);
781 _mm_storeu_ps(ptrC+8, tC);
782 _mm_storeu_ps(ptrD+8, tD);
786 static gmx_inline void gmx_simdcall
787 gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(float * gmx_restrict ptrA, float * gmx_restrict ptrB,
788 float * gmx_restrict ptrC, float * gmx_restrict ptrD,
789 float * gmx_restrict ptrE, float * gmx_restrict ptrF,
790 float * gmx_restrict ptrG, float * gmx_restrict ptrH,
791 __m256 x1, __m256 y1, __m256 z1)
793 __m256 t1, t2, t3, t4, t5, t6;
794 __m256 tA, tB, tC, tD;
797 /* Construct a mask without executing any data loads */
798 mask = _mm_blend_epi16(_mm_setzero_si128(), _mm_cmpeq_epi16(_mm_setzero_si128(), _mm_setzero_si128()), 0x3F);
800 tA = gmx_mm256_set_m128(gmx_mm_maskload_ps(ptrE, mask), gmx_mm_maskload_ps(ptrA, mask));
801 tB = gmx_mm256_set_m128(gmx_mm_maskload_ps(ptrF, mask), gmx_mm_maskload_ps(ptrB, mask));
802 tC = gmx_mm256_set_m128(gmx_mm_maskload_ps(ptrG, mask), gmx_mm_maskload_ps(ptrC, mask));
803 tD = gmx_mm256_set_m128(gmx_mm_maskload_ps(ptrH, mask), gmx_mm_maskload_ps(ptrD, mask));
804 t1 = _mm256_unpacklo_ps(x1, y1); /* y1f x1f y1e x1e | y1b x1b y1a x1a */
805 t2 = _mm256_unpackhi_ps(x1, y1); /* y1h x1h y1g x1g | y1d x1d y1c x1c */
807 t3 = _mm256_shuffle_ps(t1, z1, _MM_SHUFFLE(0, 0, 1, 0)); /* - z1e y1e x1e | - z1a y1a x1a */
808 t4 = _mm256_shuffle_ps(t1, z1, _MM_SHUFFLE(0, 1, 3, 2)); /* - z1f y1f x1f | - z1b y1b x1b */
809 t5 = _mm256_shuffle_ps(t2, z1, _MM_SHUFFLE(0, 2, 1, 0)); /* - z1g y1g x1g | - z1c y1c x1c */
810 t6 = _mm256_shuffle_ps(t2, z1, _MM_SHUFFLE(0, 3, 3, 2)); /* - z1h y1h x1h | - z1d y1d x1d */
812 tA = _mm256_sub_ps(tA, t3);
813 tB = _mm256_sub_ps(tB, t4);
814 tC = _mm256_sub_ps(tC, t5);
815 tD = _mm256_sub_ps(tD, t6);
817 gmx_mm_maskstore_ps(ptrA, mask, _mm256_castps256_ps128(tA));
818 gmx_mm_maskstore_ps(ptrB, mask, _mm256_castps256_ps128(tB));
819 gmx_mm_maskstore_ps(ptrC, mask, _mm256_castps256_ps128(tC));
820 gmx_mm_maskstore_ps(ptrD, mask, _mm256_castps256_ps128(tD));
821 gmx_mm_maskstore_ps(ptrE, mask, _mm256_extractf128_ps(tA, 0x1));
822 gmx_mm_maskstore_ps(ptrF, mask, _mm256_extractf128_ps(tB, 0x1));
823 gmx_mm_maskstore_ps(ptrG, mask, _mm256_extractf128_ps(tC, 0x1));
824 gmx_mm_maskstore_ps(ptrH, mask, _mm256_extractf128_ps(tD, 0x1));
829 static gmx_inline void gmx_simdcall
830 gmx_mm256_decrement_3rvec_8ptr_swizzle_ps(float * gmx_restrict ptrA, float * gmx_restrict ptrB,
831 float * gmx_restrict ptrC, float * gmx_restrict ptrD,
832 float * gmx_restrict ptrE, float * gmx_restrict ptrF,
833 float * gmx_restrict ptrG, float * gmx_restrict ptrH,
834 __m256 x1, __m256 y1, __m256 z1,
835 __m256 x2, __m256 y2, __m256 z2,
836 __m256 x3, __m256 y3, __m256 z3)
838 __m256 t1, t2, t3, t4, t5, t6, t7, t8, t9, t10, t11, t12;
839 __m256 tA, tB, tC, tD, tE, tF, tG, tH;
840 __m256 tI, tJ, tK, tL;
842 tA = _mm256_loadu_ps(ptrA);
843 tB = _mm256_loadu_ps(ptrB);
844 tC = _mm256_loadu_ps(ptrC);
845 tD = _mm256_loadu_ps(ptrD);
846 tE = _mm256_loadu_ps(ptrE);
847 tF = _mm256_loadu_ps(ptrF);
848 tG = _mm256_loadu_ps(ptrG);
849 tH = _mm256_loadu_ps(ptrH);
851 t1 = _mm256_unpacklo_ps(x1, y1); /* y1f x1f y1e x1e | y1b x1b y1a x1a */
852 t2 = _mm256_unpackhi_ps(x1, y1); /* y1h x1h y1g x1g | y1d x1d y1c x1c */
853 t3 = _mm256_unpacklo_ps(z1, x2); /* x2f z1f x2e z1e | x2b z1b x2a z1a */
854 t4 = _mm256_unpackhi_ps(z1, x2); /* x2h z1h x2g z1g | x2d z1d x2c z1c */
856 t5 = _mm256_unpacklo_ps(y2, z2); /* z2f y2f z2e y2e | z2b y2b z2a y2a */
857 t6 = _mm256_unpackhi_ps(y2, z2); /* z2h y2h z2g y2g | z2d y2d z2c y2c */
858 t7 = _mm256_unpacklo_ps(x3, y3); /* y3f x3f y3e x3e | y3b x3b y3a x3a */
859 t8 = _mm256_unpackhi_ps(x3, y3); /* y3h x3h y3g x3g | y3d x3d y3c x3c */
861 t9 = _mm256_shuffle_ps(t1, t3, _MM_SHUFFLE(1, 0, 1, 0)); /* x2e z1e y1e x1e | x2a z1a y1a x1a */
862 t10 = _mm256_shuffle_ps(t1, t3, _MM_SHUFFLE(3, 2, 3, 2)); /* x2f z1f y1f x1f | x2b z1b y1b x1b */
863 t11 = _mm256_shuffle_ps(t2, t4, _MM_SHUFFLE(1, 0, 1, 0)); /* x2g z1g y1g x1g | x2c z1c y1c x1c */
864 t12 = _mm256_shuffle_ps(t2, t4, _MM_SHUFFLE(3, 2, 3, 2)); /* x2h z1h y1h x1h | x2d z1d y1d x1d */
866 t1 = _mm256_shuffle_ps(t5, t7, _MM_SHUFFLE(1, 0, 1, 0)); /* y3e x3e z2e y2e | y3a x3a z2a y2a */
867 t2 = _mm256_shuffle_ps(t5, t7, _MM_SHUFFLE(3, 2, 3, 2)); /* y3f x3f z2f y2f | y3b x3b z2b y2b */
868 t3 = _mm256_shuffle_ps(t6, t8, _MM_SHUFFLE(1, 0, 1, 0)); /* y3g x3g z2g y2g | y3c x3c z2c y2c */
869 t4 = _mm256_shuffle_ps(t6, t8, _MM_SHUFFLE(3, 2, 3, 2)); /* y3h x3h z2h y2h | y3d x3d z2d y2d */
871 t5 = gmx_mm256_unpack128lo_ps(t9, t1); /* y3a x3a z2a y2a | x2a z1a y1a x1a */
872 t6 = gmx_mm256_unpack128hi_ps(t9, t1); /* y3e x3e z2e y2e | x2e z1e y1e x1e */
873 t7 = gmx_mm256_unpack128lo_ps(t10, t2); /* y3b x3b z2b y2b | x2b z1b y1b x1b */
874 t8 = gmx_mm256_unpack128hi_ps(t10, t2); /* y3f x3f z2f y2f | x2f z1f y1f x1f */
875 t1 = gmx_mm256_unpack128lo_ps(t11, t3); /* y3c x3c z2c y2c | x2c z1c y1c x1c */
876 t2 = gmx_mm256_unpack128hi_ps(t11, t3); /* y3g x3g z2g y2g | x2g z1g y1g x1g */
877 t9 = gmx_mm256_unpack128lo_ps(t12, t4); /* y3d x3d z2d y2d | x2d z1d y1d x1d */
878 t10 = gmx_mm256_unpack128hi_ps(t12, t4); /* y3h x3h z2h y2h | x2h z1h y1h x1h */
880 tA = _mm256_sub_ps(tA, t5);
881 tB = _mm256_sub_ps(tB, t7);
882 tC = _mm256_sub_ps(tC, t1);
883 tD = _mm256_sub_ps(tD, t9);
884 tE = _mm256_sub_ps(tE, t6);
885 tF = _mm256_sub_ps(tF, t8);
886 tG = _mm256_sub_ps(tG, t2);
887 tH = _mm256_sub_ps(tH, t10);
889 _mm256_storeu_ps(ptrA, tA);
890 _mm256_storeu_ps(ptrB, tB);
891 _mm256_storeu_ps(ptrC, tC);
892 _mm256_storeu_ps(ptrD, tD);
893 _mm256_storeu_ps(ptrE, tE);
894 _mm256_storeu_ps(ptrF, tF);
895 _mm256_storeu_ps(ptrG, tG);
896 _mm256_storeu_ps(ptrH, tH);
898 tI = gmx_mm256_set_m128(_mm_load_ss(ptrE+8), _mm_load_ss(ptrA+8));
899 tJ = gmx_mm256_set_m128(_mm_load_ss(ptrF+8), _mm_load_ss(ptrB+8));
900 tK = gmx_mm256_set_m128(_mm_load_ss(ptrG+8), _mm_load_ss(ptrC+8));
901 tL = gmx_mm256_set_m128(_mm_load_ss(ptrH+8), _mm_load_ss(ptrD+8));
903 tI = _mm256_unpacklo_ps(tI, tK); /* - - zG zE | - - zC zA */
904 tJ = _mm256_unpacklo_ps(tJ, tL); /* - - zH zF | - - zD zB */
905 tI = _mm256_unpacklo_ps(tI, tJ); /* zH zG zF zE | zD zC zB zA */
907 tI = _mm256_sub_ps(tI, z3);
908 tJ = _mm256_permute_ps(tI, _MM_SHUFFLE(1, 1, 1, 1));
909 tK = _mm256_permute_ps(tI, _MM_SHUFFLE(2, 2, 2, 2));
910 tL = _mm256_permute_ps(tI, _MM_SHUFFLE(3, 3, 3, 3));
912 _mm_store_ss(ptrA+8, _mm256_castps256_ps128(tI));
913 _mm_store_ss(ptrB+8, _mm256_castps256_ps128(tJ));
914 _mm_store_ss(ptrC+8, _mm256_castps256_ps128(tK));
915 _mm_store_ss(ptrD+8, _mm256_castps256_ps128(tL));
916 _mm_store_ss(ptrE+8, _mm256_extractf128_ps(tI, 0x1));
917 _mm_store_ss(ptrF+8, _mm256_extractf128_ps(tJ, 0x1));
918 _mm_store_ss(ptrG+8, _mm256_extractf128_ps(tK, 0x1));
919 _mm_store_ss(ptrH+8, _mm256_extractf128_ps(tL, 0x1));
923 static gmx_inline void gmx_simdcall
924 gmx_mm256_decrement_4rvec_8ptr_swizzle_ps(float * gmx_restrict ptrA, float * gmx_restrict ptrB,
925 float * gmx_restrict ptrC, float * gmx_restrict ptrD,
926 float * gmx_restrict ptrE, float * gmx_restrict ptrF,
927 float * gmx_restrict ptrG, float * gmx_restrict ptrH,
928 __m256 x1, __m256 y1, __m256 z1,
929 __m256 x2, __m256 y2, __m256 z2,
930 __m256 x3, __m256 y3, __m256 z3,
931 __m256 x4, __m256 y4, __m256 z4)
933 __m256 t1, t2, t3, t4, t5, t6, t7, t8, t9, t10, t11, t12;
934 __m256 tA, tB, tC, tD, tE, tF, tG, tH;
935 __m256 tI, tJ, tK, tL;
937 tA = _mm256_loadu_ps(ptrA);
938 tB = _mm256_loadu_ps(ptrB);
939 tC = _mm256_loadu_ps(ptrC);
940 tD = _mm256_loadu_ps(ptrD);
941 tE = _mm256_loadu_ps(ptrE);
942 tF = _mm256_loadu_ps(ptrF);
943 tG = _mm256_loadu_ps(ptrG);
944 tH = _mm256_loadu_ps(ptrH);
946 t1 = _mm256_unpacklo_ps(x1, y1); /* y1f x1f y1e x1e | y1b x1b y1a x1a */
947 t2 = _mm256_unpackhi_ps(x1, y1); /* y1h x1h y1g x1g | y1d x1d y1c x1c */
948 t3 = _mm256_unpacklo_ps(z1, x2); /* x2f z1f x2e z1e | x2b z1b x2a z1a */
949 t4 = _mm256_unpackhi_ps(z1, x2); /* x2h z1h x2g z1g | x2d z1d x2c z1c */
951 t5 = _mm256_unpacklo_ps(y2, z2); /* z2f y2f z2e y2e | z2b y2b z2a y2a */
952 t6 = _mm256_unpackhi_ps(y2, z2); /* z2h y2h z2g y2g | z2d y2d z2c y2c */
953 t7 = _mm256_unpacklo_ps(x3, y3); /* y3f x3f y3e x3e | y3b x3b y3a x3a */
954 t8 = _mm256_unpackhi_ps(x3, y3); /* y3h x3h y3g x3g | y3d x3d y3c x3c */
956 t9 = _mm256_shuffle_ps(t1, t3, _MM_SHUFFLE(1, 0, 1, 0)); /* x2e z1e y1e x1e | x2a z1a y1a x1a */
957 t10 = _mm256_shuffle_ps(t1, t3, _MM_SHUFFLE(3, 2, 3, 2)); /* x2f z1f y1f x1f | x2b z1b y1b x1b */
958 t11 = _mm256_shuffle_ps(t2, t4, _MM_SHUFFLE(1, 0, 1, 0)); /* x2g z1g y1g x1g | x2c z1c y1c x1c */
959 t12 = _mm256_shuffle_ps(t2, t4, _MM_SHUFFLE(3, 2, 3, 2)); /* x2h z1h y1h x1h | x2d z1d y1d x1d */
961 t1 = _mm256_shuffle_ps(t5, t7, _MM_SHUFFLE(1, 0, 1, 0)); /* y3e x3e z2e y2e | y3a x3a z2a y2a */
962 t2 = _mm256_shuffle_ps(t5, t7, _MM_SHUFFLE(3, 2, 3, 2)); /* y3f x3f z2f y2f | y3b x3b z2b y2b */
963 t3 = _mm256_shuffle_ps(t6, t8, _MM_SHUFFLE(1, 0, 1, 0)); /* y3g x3g z2g y2g | y3c x3c z2c y2c */
964 t4 = _mm256_shuffle_ps(t6, t8, _MM_SHUFFLE(3, 2, 3, 2)); /* y3h x3h z2h y2h | y3d x3d z2d y2d */
966 t5 = gmx_mm256_unpack128lo_ps(t9, t1); /* y3a x3a z2a y2a | x2a z1a y1a x1a */
967 t6 = gmx_mm256_unpack128hi_ps(t9, t1); /* y3e x3e z2e y2e | x2e z1e y1e x1e */
968 t7 = gmx_mm256_unpack128lo_ps(t10, t2); /* y3b x3b z2b y2b | x2b z1b y1b x1b */
969 t8 = gmx_mm256_unpack128hi_ps(t10, t2); /* y3f x3f z2f y2f | x2f z1f y1f x1f */
970 t1 = gmx_mm256_unpack128lo_ps(t11, t3); /* y3c x3c z2c y2c | x2c z1c y1c x1c */
971 t2 = gmx_mm256_unpack128hi_ps(t11, t3); /* y3g x3g z2g y2g | x2g z1g y1g x1g */
972 t9 = gmx_mm256_unpack128lo_ps(t12, t4); /* y3d x3d z2d y2d | x2d z1d y1d x1d */
973 t10 = gmx_mm256_unpack128hi_ps(t12, t4); /* y3h x3h z2h y2h | x2h z1h y1h x1h */
975 tA = _mm256_sub_ps(tA, t5);
976 tB = _mm256_sub_ps(tB, t7);
977 tC = _mm256_sub_ps(tC, t1);
978 tD = _mm256_sub_ps(tD, t9);
979 tE = _mm256_sub_ps(tE, t6);
980 tF = _mm256_sub_ps(tF, t8);
981 tG = _mm256_sub_ps(tG, t2);
982 tH = _mm256_sub_ps(tH, t10);
984 _mm256_storeu_ps(ptrA, tA);
985 _mm256_storeu_ps(ptrB, tB);
986 _mm256_storeu_ps(ptrC, tC);
987 _mm256_storeu_ps(ptrD, tD);
988 _mm256_storeu_ps(ptrE, tE);
989 _mm256_storeu_ps(ptrF, tF);
990 _mm256_storeu_ps(ptrG, tG);
991 _mm256_storeu_ps(ptrH, tH);
993 tI = gmx_mm256_set_m128(_mm_loadu_ps(ptrE+8), _mm_loadu_ps(ptrA+8));
994 tJ = gmx_mm256_set_m128(_mm_loadu_ps(ptrF+8), _mm_loadu_ps(ptrB+8));
995 tK = gmx_mm256_set_m128(_mm_loadu_ps(ptrG+8), _mm_loadu_ps(ptrC+8));
996 tL = gmx_mm256_set_m128(_mm_loadu_ps(ptrH+8), _mm_loadu_ps(ptrD+8));
998 t1 = _mm256_unpacklo_ps(z3, x4); /* x4f z3f x4e z3e | x4b z3b x4a z3a */
999 t2 = _mm256_unpackhi_ps(z3, x4); /* x4h z3h x4g z3g | x4d z3d x4c z3c */
1000 t3 = _mm256_unpacklo_ps(y4, z4); /* z4f y4f z4e y4e | z4b y4b z4a y4a */
1001 t4 = _mm256_unpackhi_ps(y4, z4); /* z4h y4h z4g y4g | z4d y4d z4c y4c */
1003 t5 = _mm256_shuffle_ps(t1, t3, _MM_SHUFFLE(1, 0, 1, 0)); /* z4e y4e x4e z3e | z4a y4a x4a z3a */
1004 t6 = _mm256_shuffle_ps(t1, t3, _MM_SHUFFLE(3, 2, 3, 2)); /* z4f y4f x4f z3f | z4b y4b x4b z3b */
1005 t7 = _mm256_shuffle_ps(t2, t4, _MM_SHUFFLE(1, 0, 1, 0)); /* z4g y4g x4g z3g | z4c y4c x4c z3c */
1006 t8 = _mm256_shuffle_ps(t2, t4, _MM_SHUFFLE(3, 2, 3, 2)); /* z4h y4h x4h z3h | z4d y4d x4d z3d */
1008 tI = _mm256_sub_ps(tI, t5);
1009 tJ = _mm256_sub_ps(tJ, t6);
1010 tK = _mm256_sub_ps(tK, t7);
1011 tL = _mm256_sub_ps(tL, t8);
1013 _mm_storeu_ps(ptrA+8, _mm256_castps256_ps128(tI));
1014 _mm_storeu_ps(ptrB+8, _mm256_castps256_ps128(tJ));
1015 _mm_storeu_ps(ptrC+8, _mm256_castps256_ps128(tK));
1016 _mm_storeu_ps(ptrD+8, _mm256_castps256_ps128(tL));
1017 _mm_storeu_ps(ptrE+8, _mm256_extractf128_ps(tI, 0x1));
1018 _mm_storeu_ps(ptrF+8, _mm256_extractf128_ps(tJ, 0x1));
1019 _mm_storeu_ps(ptrG+8, _mm256_extractf128_ps(tK, 0x1));
1020 _mm_storeu_ps(ptrH+8, _mm256_extractf128_ps(tL, 0x1));
1024 static gmx_inline void gmx_simdcall
1025 gmx_mm256_update_iforce_1atom_swizzle_ps(__m256 fix1, __m256 fiy1, __m256 fiz1,
1026 float * gmx_restrict fptr,
1027 float * gmx_restrict fshiftptr)
1031 fix1 = _mm256_hadd_ps(fix1, fix1);
1032 fiy1 = _mm256_hadd_ps(fiy1, fiz1);
1033 fix1 = _mm256_hadd_ps(fix1, fiy1); /* fiz1 fiy1 fix1 fix1 (in both lanes) */
1035 /* Add across the two lanes */
1036 t1 = _mm_add_ps(_mm256_castps256_ps128(fix1), _mm256_extractf128_ps(fix1, 0x1));
1038 t2 = _mm_load_ss(fptr);
1039 t2 = _mm_loadh_pi(t2, (__m64 *)(fptr+1));
1040 t3 = _mm_load_ss(fshiftptr);
1041 t3 = _mm_loadh_pi(t3, (__m64 *)(fshiftptr+1));
1043 t2 = _mm_add_ps(t2, t1);
1044 t3 = _mm_add_ps(t3, t1);
1046 _mm_store_ss(fptr, t2);
1047 _mm_storeh_pi((__m64 *)(fptr+1), t2);
1048 _mm_store_ss(fshiftptr, t3);
1049 _mm_storeh_pi((__m64 *)(fshiftptr+1), t3);
1053 static gmx_inline void gmx_simdcall
1054 gmx_mm256_update_iforce_3atom_swizzle_ps(__m256 fix1, __m256 fiy1, __m256 fiz1,
1055 __m256 fix2, __m256 fiy2, __m256 fiz2,
1056 __m256 fix3, __m256 fiy3, __m256 fiz3,
1057 float * gmx_restrict fptr,
1058 float * gmx_restrict fshiftptr)
1063 fix1 = _mm256_hadd_ps(fix1, fiy1); /* Y1g+Y1h Y1e+Y1f X1g+X1h X1e+X1f | Y1c+Y1d Y1a+Y1b X1c+X1d X1a+X1b */
1064 fiz1 = _mm256_hadd_ps(fiz1, fix2); /* X2g+X2h X2e+X2f Z1g+Z1h Z1e+Z1f | X2c+X2d X2a+X2b Z1c+Z1d Z1a+Z1b */
1065 fiy2 = _mm256_hadd_ps(fiy2, fiz2); /* Z2g+Z2h Z2e+Z2f Y2g+Y2h Y2e+Y2f | Z2c+Z2d Z2a+Z2b Y2c+Y2d Y2a+Y2b */
1066 fix3 = _mm256_hadd_ps(fix3, fiy3); /* Y3g+Y3h Y3e+Y3f X3g+X3h X3e+X3f | Y3c+Y3d Y3a+Y3b X3c+X3d X3a+X3b */
1067 fiz3 = _mm256_hadd_ps(fiz3, _mm256_setzero_ps()); /* 0 0 Z3g+Z3h Z3e+Z3f | 0 0 Z3c+Z3d Z3a+Z3b */
1069 fix1 = _mm256_hadd_ps(fix1, fiz1); /* X2e-h Z1e-h Y1e-h X1e-h | X2a-d Z1a-d Y1a-d X1a-d */
1070 fiy2 = _mm256_hadd_ps(fiy2, fix3); /* Y3e-h X3e-h Z2e-h Y2e-h | Y3a-d X3a-d Z2a-d Y2a-d */
1071 fiz3 = _mm256_hadd_ps(fiz3, _mm256_setzero_ps()); /* 0 0 0 Z3e-h | 0 0 0 Z3a-d */
1073 /* Add across the two lanes by swapping and adding back */
1074 t1 = gmx_mm256_unpack128lo_ps(fix1, fiy2); /* Y3a-d X3a-d Z2a-d Y2a-d | X2a-d Z1a-d Y1a-d X1a-d */
1075 t2 = gmx_mm256_unpack128hi_ps(fix1, fiy2); /* Y3e-h X3e-h Z2e-h Y2e-h | X2e-h Z1e-h Y1e-h X1e-h */
1076 t1 = _mm256_add_ps(t1, t2); /* y3 x3 z2 y2 | x2 z1 y1 x1 */
1078 tA = _mm_add_ps(_mm256_castps256_ps128(fiz3), _mm256_extractf128_ps(fiz3, 0x1)); /* 0 0 0 z3 */
1080 t3 = _mm256_loadu_ps(fptr);
1081 t3 = _mm256_add_ps(t3, t1);
1082 _mm256_storeu_ps(fptr, t3);
1083 tB = _mm_load_ss(fptr+8);
1084 tB = _mm_add_ss(tB, tA);
1085 _mm_store_ss(fptr+8, tB);
1087 /* Add up shift force */
1088 tB = _mm256_extractf128_ps(t1, 0x1); /* y3 x3 z2 y2 */
1089 tC = _mm_shuffle_ps(_mm256_castps256_ps128(t1), tB, _MM_SHUFFLE(1, 0, 3, 3)); /* z2 y2 x2 x2 */
1090 tB = _mm_shuffle_ps(tB, tA, _MM_SHUFFLE(1, 0, 3, 2)); /* 0 z3 y3 x3 */
1091 tC = _mm_permute_ps(tC, _MM_SHUFFLE(3, 3, 2, 0)); /* - z2 y2 x2 */
1093 tB = _mm_add_ps(tB, _mm256_castps256_ps128(t1));
1094 tA = _mm_add_ps(tB, tC); /* - z y x */
1096 tA = _mm_blend_ps(_mm_setzero_ps(), tA, 0x7); /* 0 z y x */
1098 tC = _mm_loadu_ps(fshiftptr);
1099 tC = _mm_add_ps(tC, tA);
1100 _mm_storeu_ps(fshiftptr, tC);
1104 static gmx_inline void gmx_simdcall
1105 gmx_mm256_update_iforce_4atom_swizzle_ps(__m256 fix1, __m256 fiy1, __m256 fiz1,
1106 __m256 fix2, __m256 fiy2, __m256 fiz2,
1107 __m256 fix3, __m256 fiy3, __m256 fiz3,
1108 __m256 fix4, __m256 fiy4, __m256 fiz4,
1109 float * gmx_restrict fptr,
1110 float * gmx_restrict fshiftptr)
1115 fix1 = _mm256_hadd_ps(fix1, fiy1); /* Y1g+Y1h Y1e+Y1f X1g+X1h X1e+X1f | Y1c+Y1d Y1a+Y1b X1c+X1d X1a+X1b */
1116 fiz1 = _mm256_hadd_ps(fiz1, fix2); /* X2g+X2h X2e+X2f Z1g+Z1h Z1e+Z1f | X2c+X2d X2a+X2b Z1c+Z1d Z1a+Z1b */
1117 fiy2 = _mm256_hadd_ps(fiy2, fiz2); /* Z2g+Z2h Z2e+Z2f Y2g+Y2h Y2e+Y2f | Z2c+Z2d Z2a+Z2b Y2c+Y2d Y2a+Y2b */
1118 fix3 = _mm256_hadd_ps(fix3, fiy3); /* Y3g+Y3h Y3e+Y3f X3g+X3h X3e+X3f | Y3c+Y3d Y3a+Y3b X3c+X3d X3a+X3b */
1119 fiz3 = _mm256_hadd_ps(fiz3, fix4); /* X4g+X4h X4e+X4f Z3g+Z3h Z3e+Z3f | X4c+X4d X4a+X4b Z3c+Z3d Z3a+Z3b */
1120 fiy4 = _mm256_hadd_ps(fiy4, fiz4); /* Z4g+Z4h Z4e+Z4f Y4g+Y4h Y4e+Y4f | Z4c+Z4d Z4a+Z4b Y4c+Y4d Y4a+Y4b */
1122 fix1 = _mm256_hadd_ps(fix1, fiz1); /* X2e-h Z1e-h Y1e-h X1e-h | X2a-d Z1a-d Y1a-d X1a-d */
1123 fiy2 = _mm256_hadd_ps(fiy2, fix3); /* Y3e-h X3e-h Z2e-h Y2e-h | Y3a-d X3a-d Z2a-d Y2a-d */
1124 fiz3 = _mm256_hadd_ps(fiz3, fiy4); /* Z4e-h Y4e-h X4e-h Z3e-h | Z4a-d Y4a-d X4a-d Z3a-d */
1126 /* Add across the two lanes by swapping and adding back */
1127 t1 = gmx_mm256_unpack128lo_ps(fix1, fiy2); /* Y3a-d X3a-d Z2a-d Y2a-d | X2a-d Z1a-d Y1a-d X1a-d */
1128 t2 = gmx_mm256_unpack128hi_ps(fix1, fiy2); /* Y3e-h X3e-h Z2e-h Y2e-h | X2e-h Z1e-h Y1e-h X1e-h */
1129 t1 = _mm256_add_ps(t1, t2); /* y3 x3 z2 y2 | x2 z1 y1 x1 */
1131 tA = _mm_add_ps(_mm256_castps256_ps128(fiz3), _mm256_extractf128_ps(fiz3, 0x1)); /* z4 y4 x4 z3 */
1133 t3 = _mm256_loadu_ps(fptr);
1134 t3 = _mm256_add_ps(t3, t1);
1135 _mm256_storeu_ps(fptr, t3);
1137 tB = _mm_loadu_ps(fptr+8);
1138 tB = _mm_add_ps(tB, tA);
1139 _mm_storeu_ps(fptr+8, tB);
1141 /* Add up shift force */
1142 tB = _mm256_extractf128_ps(t1, 0x1); /* y3 x3 z2 y2 */
1143 tC = _mm_shuffle_ps(_mm256_castps256_ps128(t1), tB, _MM_SHUFFLE(1, 0, 3, 3)); /* z2 y2 x2 x2 */
1144 tB = _mm_shuffle_ps(tB, tA, _MM_SHUFFLE(1, 0, 3, 2)); /* 0 z3 y3 x3 */
1145 tC = _mm_permute_ps(tC, _MM_SHUFFLE(3, 3, 2, 0)); /* - z2 y2 x2 */
1146 tA = _mm_permute_ps(tA, _MM_SHUFFLE(0, 3, 2, 1)); /* - z4 y4 x4 */
1148 tB = _mm_add_ps(tB, _mm256_castps256_ps128(t1));
1149 tA = _mm_add_ps(tA, tC);
1150 tA = _mm_add_ps(tA, tB);
1152 tA = _mm_blend_ps(_mm_setzero_ps(), tA, 0x7); /* 0 z y x */
1154 tC = _mm_loadu_ps(fshiftptr);
1155 tC = _mm_add_ps(tC, tA);
1156 _mm_storeu_ps(fshiftptr, tC);
1160 static gmx_inline void gmx_simdcall
1161 gmx_mm256_update_1pot_ps(__m256 pot1, float * gmx_restrict ptrA)
1165 pot1 = _mm256_hadd_ps(pot1, pot1);
1166 pot1 = _mm256_hadd_ps(pot1, pot1);
1168 t1 = _mm_add_ps(_mm256_castps256_ps128(pot1), _mm256_extractf128_ps(pot1, 0x1));
1170 _mm_store_ss(ptrA, _mm_add_ss(_mm_load_ss(ptrA), t1));
1173 static gmx_inline void gmx_simdcall
1174 gmx_mm256_update_2pot_ps(__m256 pot1, float * gmx_restrict ptrA,
1175 __m256 pot2, float * gmx_restrict ptrB)
1179 pot1 = _mm256_hadd_ps(pot1, pot2);
1180 pot1 = _mm256_hadd_ps(pot1, pot1);
1182 t1 = _mm_add_ps(_mm256_castps256_ps128(pot1), _mm256_extractf128_ps(pot1, 0x1));
1184 t2 = _mm_permute_ps(t1, _MM_SHUFFLE(1, 1, 1, 1));
1185 _mm_store_ss(ptrA, _mm_add_ss(_mm_load_ss(ptrA), t1));
1186 _mm_store_ss(ptrB, _mm_add_ss(_mm_load_ss(ptrB), t2));
1190 #endif /* _kernelutil_x86_avx_256_single_h_ */