2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2013, by the GROMACS development team, led by
5 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6 * and including many others, as listed in the AUTHORS file in the
7 * top-level source directory and at http://www.gromacs.org.
9 * GROMACS is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public License
11 * as published by the Free Software Foundation; either version 2.1
12 * of the License, or (at your option) any later version.
14 * GROMACS is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with GROMACS; if not, see
21 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
24 * If you want to redistribute modifications to GROMACS, please
25 * consider that scientific software is very special. Version
26 * control is crucial - bugs must be traceable. We will be happy to
27 * consider code for inclusion in the official distribution, but
28 * derived work must not be called official GROMACS. Details are found
29 * in the README & COPYING files - if they are missing, get the
30 * official version at http://www.gromacs.org.
32 * To help us fund GROMACS development, we humbly ask that you cite
33 * the research papers on the package. Check out http://www.gromacs.org.
35 #ifndef _nbnxn_kernel_simd_utils_ibm_qpx_h_
36 #define _nbnxn_kernel_simd_utils_ibm_qpx_h_
38 typedef gmx_mm_pr gmx_exclfilter;
39 static const int filter_stride = 1;
41 /* The 4xn kernel operates on 4-wide i-force registers */
42 typedef gmx_mm_pr gmx_mm_pr4;
44 /* This files contains all functions/macros for the SIMD kernels
45 * which have explicit dependencies on the j-cluster size and/or SIMD-width.
46 * The functionality which depends on the j-cluster size is:
49 * energy group pair energy storage
52 /* Collect all [0123] elements of the 4 inputs to out[0123], respectively */
53 static gmx_inline void
54 gmx_transpose_4_ps(gmx_mm_pr a, gmx_mm_pr b, gmx_mm_pr c, gmx_mm_pr d,
55 gmx_mm_pr *out0, gmx_mm_pr *out1,
56 gmx_mm_pr *out2, gmx_mm_pr *out3)
58 /* Prepare control vectors for swizzling. In its third input,
59 vec_perm accepts indices into the effective 8-wide SIMD vector
60 created by concatenating its first two inputs. Those indices
61 map data from the input vectors to the output vector.
63 vec_gpci() converts an octal literal of the indices into the
64 correct form for vec_perm() to use. That form is an octal digit
65 in bits 0-2 of the mantissa of each double. */
66 gmx_mm_pr p6420 = vec_gpci(06420);
67 gmx_mm_pr p7531 = vec_gpci(07531);
69 /* Four-way swizzle (i.e. transpose) of vectors a = a0a1a2a3, etc. */
70 gmx_mm_pr b2b0a2a0 = vec_perm(a, b, p6420);
71 gmx_mm_pr b3b1a3a1 = vec_perm(a, b, p7531);
72 gmx_mm_pr d2d0c2c0 = vec_perm(c, d, p6420);
73 gmx_mm_pr d3d1c3c1 = vec_perm(c, d, p7531);
74 *out0 = vec_perm(d2d0c2c0, b2b0a2a0, p7531);
75 *out1 = vec_perm(d3d1c3c1, b3b1a3a1, p7531);
76 *out2 = vec_perm(d2d0c2c0, b2b0a2a0, p6420);
77 *out3 = vec_perm(d3d1c3c1, b3b1a3a1, p6420);
80 /* Collect element 0 and 1 of the 4 inputs to out0 and out1, respectively */
81 static gmx_inline void
82 gmx_shuffle_4_ps_fil01_to_2_ps(gmx_mm_pr a, gmx_mm_pr b, gmx_mm_pr c, gmx_mm_pr d,
83 gmx_mm_pr *out0, gmx_mm_pr *out1)
85 gmx_mm_pr p6420 = vec_gpci(06420);
86 gmx_mm_pr p7531 = vec_gpci(07531);
88 /* Partial four-way swizzle of vectors a = a0a1a2a3, etc. */
89 gmx_mm_pr b2b0a2a0 = vec_perm(a, b, p6420);
90 gmx_mm_pr b3b1a3a1 = vec_perm(a, b, p7531);
91 gmx_mm_pr d2d0c2c0 = vec_perm(c, d, p6420);
92 gmx_mm_pr d3d1c3c1 = vec_perm(c, d, p7531);
93 *out0 = vec_perm(d2d0c2c0, b2b0a2a0, p7531);
94 *out1 = vec_perm(d3d1c3c1, b3b1a3a1, p7531);
97 /* Collect element 2 of the 4 inputs to out */
98 static gmx_inline gmx_mm_pr
99 gmx_shuffle_4_ps_fil2_to_1_ps(gmx_mm_pr a, gmx_mm_pr b, gmx_mm_pr c, gmx_mm_pr d)
101 gmx_mm_pr p6420 = vec_gpci(06420);
103 /* Partial four-way swizzle of vectors a = a0a1a2a3, etc. */
104 gmx_mm_pr b2b0a2a0 = vec_perm(a, b, p6420);
105 gmx_mm_pr d2d0c2c0 = vec_perm(c, d, p6420);
106 return vec_perm(d2d0c2c0, b2b0a2a0, p6420);
110 /* Align a stack-based thread-local working array. Table loads on QPX
111 * use the array, but most other implementations do not. */
112 static gmx_inline int *
113 prepare_table_load_buffer(const int *array)
115 return gmx_simd_align_int(array);
118 static gmx_inline void
119 load_table_f(const real *tab_coul_FDV0, gmx_epi32 ti_S, int *ti,
120 gmx_mm_pr *ctab0_S, gmx_mm_pr *ctab1_S)
123 /* Just like 256-bit AVX, we need to use memory to get indices
124 into integer registers efficiently. */
127 vec_sta(ti_S, 0, ti);
130 /* Here we load 4 aligned reals, but we need just 2 elements of each */
131 gmx_mm_pr a = gmx_load_pr(tab_coul_FDV0 + ti[0] * nbfp_stride);
132 gmx_mm_pr b = gmx_load_pr(tab_coul_FDV0 + ti[1] * nbfp_stride);
133 gmx_mm_pr c = gmx_load_pr(tab_coul_FDV0 + ti[2] * nbfp_stride);
134 gmx_mm_pr d = gmx_load_pr(tab_coul_FDV0 + ti[3] * nbfp_stride);
136 gmx_shuffle_4_ps_fil01_to_2_ps(a, b, c, d, ctab0_S, ctab1_S);
139 static gmx_inline void
140 load_table_f_v(const real *tab_coul_FDV0,
141 gmx_epi32 ti_S, int *ti,
142 gmx_mm_pr *ctab0_S, gmx_mm_pr *ctab1_S,
146 /* Just like 256-bit AVX, we need to use memory to get indices
147 into integer registers efficiently. */
150 vec_sta(ti_S, 0, ti);
153 /* Here we load 4 aligned reals, but we need just 3 elements of each. */
154 gmx_mm_pr a = gmx_load_pr(tab_coul_FDV0 + ti[0] * nbfp_stride);
155 gmx_mm_pr b = gmx_load_pr(tab_coul_FDV0 + ti[1] * nbfp_stride);
156 gmx_mm_pr c = gmx_load_pr(tab_coul_FDV0 + ti[2] * nbfp_stride);
157 gmx_mm_pr d = gmx_load_pr(tab_coul_FDV0 + ti[3] * nbfp_stride);
159 gmx_shuffle_4_ps_fil01_to_2_ps(a, b, c, d, ctab0_S, ctab1_S);
160 *ctabv_S = gmx_shuffle_4_ps_fil2_to_1_ps(a, b, c, d);
164 /* Not required for BlueGene/Q */
168 /* Sum the elements within each input register and store the sums in out.
170 static gmx_inline gmx_mm_pr
171 gmx_mm_transpose_sum4_pr(gmx_mm_pr a, gmx_mm_pr b,
172 gmx_mm_pr c, gmx_mm_pr d)
174 gmx_mm_pr a0b0c0d0, a1b1c1d1, a2b2c2d2, a3b3c3d3;
175 gmx_transpose_4_ps(a, b, c, d,
180 /* Now reduce the transposed vectors */
181 gmx_mm_pr sum01 = gmx_add_pr(a0b0c0d0, a1b1c1d1);
182 gmx_mm_pr sim23 = gmx_add_pr(a2b2c2d2, a3b3c3d3);
183 return gmx_add_pr(sum01, sim23);
187 /* In double precision on x86 it can be faster to first calculate
188 * single precision square roots for two double precision registers at
189 * once and then use double precision Newton-Raphson iteration to
190 * reach full double precision. For QPX, we just wrap the usual
191 * reciprocal square roots.
193 static gmx_inline void
194 gmx_mm_invsqrt2_pd(gmx_mm_pr in0, gmx_mm_pr in1,
195 gmx_mm_pr *out0, gmx_mm_pr *out1)
197 *out0 = gmx_invsqrt_pr(in0);
198 *out1 = gmx_invsqrt_pr(in1);
202 static gmx_inline void
203 load_lj_pair_params(const real *nbfp, const int *type, int aj,
204 gmx_mm_pr *c6_S, gmx_mm_pr *c12_S)
206 /* Here we load 4 aligned reals, but we need just 2 elemnts of each. */
207 gmx_mm_pr a = gmx_load_pr(nbfp + type[aj+0] * nbfp_stride);
208 gmx_mm_pr b = gmx_load_pr(nbfp + type[aj+1] * nbfp_stride);
209 gmx_mm_pr c = gmx_load_pr(nbfp + type[aj+2] * nbfp_stride);
210 gmx_mm_pr d = gmx_load_pr(nbfp + type[aj+3] * nbfp_stride);
212 gmx_shuffle_4_ps_fil01_to_2_ps(a, b, c, d, c6_S, c12_S);
215 /* Define USE_FUNCTIONS_FOR_QPX to get the static inline functions
216 * that seem to exhaust xlC 12.1 during kernel compilation */
218 #ifndef USE_FUNCTIONS_FOR_QPX
220 #define gmx_load_exclusion_filter(a) vec_ldia(0, (int *) a)
221 #define gmx_load_interaction_mask_pb(a, b) vec_ld(a, (real *) b)
223 #else /* USE_FUNCTIONS_FOR_QPX */
225 static gmx_inline gmx_exclfilter gmx_load_exclusion_filter(const unsigned *a)
228 return vec_ldia(0, (int *) a);
230 return vec_ldiaa(0, (int *) a);
234 /* Code for handling loading and applying exclusion masks. Note that
235 parameter a is not treated like an array index; it is naively added
236 to b, so should be in bytes. */
237 static gmx_inline gmx_mm_pb gmx_load_interaction_mask_pb(long a, const real *b)
240 return vec_ld(a, (real *) b);
242 return vec_lda(a, (real *) b);
246 #endif /* USE_FUNCTIONS_FOR_QPX */
248 #endif /* _nbnxn_kernel_simd_utils_ibm_qpx_h_ */