8174970457d3fcfe9bfe4d6d2d0893b831c9f24f
[alexxy/gromacs.git] / src / gmxlib / nonbonded / nb_kernel_sse2_single / kernelutil_x86_sse2_single.h
1 /*
2  * This file is part of the GROMACS molecular simulation package.
3  *
4  * Copyright (c) 2011-2012, The GROMACS Development Team
5  * Copyright (c) 2012, by the GROMACS development team, led by
6  * David van der Spoel, Berk Hess, Erik Lindahl, and including many
7  * others, as listed in the AUTHORS file in the top-level source
8  * directory and at http://www.gromacs.org.
9  *
10  * GROMACS is free software; you can redistribute it and/or
11  * modify it under the terms of the GNU Lesser General Public License
12  * as published by the Free Software Foundation; either version 2.1
13  * of the License, or (at your option) any later version.
14  *
15  * GROMACS is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
18  * Lesser General Public License for more details.
19  *
20  * You should have received a copy of the GNU Lesser General Public
21  * License along with GROMACS; if not, see
22  * http://www.gnu.org/licenses, or write to the Free Software Foundation,
23  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA.
24  *
25  * If you want to redistribute modifications to GROMACS, please
26  * consider that scientific software is very special. Version
27  * control is crucial - bugs must be traceable. We will be happy to
28  * consider code for inclusion in the official distribution, but
29  * derived work must not be called official GROMACS. Details are found
30  * in the README & COPYING files - if they are missing, get the
31  * official version at http://www.gromacs.org.
32  *
33  * To help us fund GROMACS development, we humbly ask that you cite
34  * the research papers on the package. Check out http://www.gromacs.org.
35  */
36 #ifndef _kernelutil_x86_sse2_single_h_
37 #define _kernelutil_x86_sse2_single_h_
38
39 /* We require SSE2 now! */
40
41 #include <math.h> 
42
43 #include "gmx_x86_sse2.h"
44
45
46 /* Normal sum of four xmm registers */
47 #define gmx_mm_sum4_ps(t0,t1,t2,t3)  _mm_add_ps(_mm_add_ps(t0,t1),_mm_add_ps(t2,t3))
48
49 static gmx_inline __m128
50 gmx_mm_calc_rsq_ps(__m128 dx, __m128 dy, __m128 dz)
51 {
52     return _mm_add_ps( _mm_add_ps( _mm_mul_ps(dx,dx), _mm_mul_ps(dy,dy) ), _mm_mul_ps(dz,dz) );
53 }
54
55 static int
56 gmx_mm_any_lt(__m128 a, __m128 b)
57 {
58     return _mm_movemask_ps(_mm_cmplt_ps(a,b));
59 }
60
61 /* Load a single value from 1-4 places, merge into xmm register */
62
63 static __m128
64 gmx_mm_load_4real_swizzle_ps(const float * gmx_restrict ptrA,
65                              const float * gmx_restrict ptrB,
66                              const float * gmx_restrict ptrC,
67                              const float * gmx_restrict ptrD)
68 {
69     __m128 t1,t2;
70
71     t1 = _mm_unpacklo_ps(_mm_load_ss(ptrA),_mm_load_ss(ptrC));
72     t2 = _mm_unpacklo_ps(_mm_load_ss(ptrB),_mm_load_ss(ptrD));
73     return _mm_unpacklo_ps(t1,t2);
74 }
75
76 static void
77 gmx_mm_store_4real_swizzle_ps(float * gmx_restrict ptrA,
78                               float * gmx_restrict ptrB,
79                               float * gmx_restrict ptrC,
80                               float * gmx_restrict ptrD,
81                               __m128 xmm1)
82 {
83     __m128 t2,t3,t4;
84
85     t3       = _mm_movehl_ps(_mm_setzero_ps(),xmm1);
86     t2       = _mm_shuffle_ps(xmm1,xmm1,_MM_SHUFFLE(1,1,1,1));
87     t4       = _mm_shuffle_ps(t3,t3,_MM_SHUFFLE(1,1,1,1));
88     _mm_store_ss(ptrA,xmm1);
89     _mm_store_ss(ptrB,t2);
90     _mm_store_ss(ptrC,t3);
91     _mm_store_ss(ptrD,t4);
92 }
93
94 /* Similar to store, but increments value in memory */
95 static void
96 gmx_mm_increment_4real_swizzle_ps(float * gmx_restrict ptrA,
97                                   float * gmx_restrict ptrB,
98                                   float * gmx_restrict ptrC,
99                                   float * gmx_restrict ptrD, __m128 xmm1)
100 {
101     __m128 tmp;
102
103     tmp = gmx_mm_load_4real_swizzle_ps(ptrA,ptrB,ptrC,ptrD);
104     tmp = _mm_add_ps(tmp,xmm1);
105     gmx_mm_store_4real_swizzle_ps(ptrA,ptrB,ptrC,ptrD,tmp);
106 }
107
108
109 static void
110 gmx_mm_load_4pair_swizzle_ps(const float * gmx_restrict p1,
111                              const float * gmx_restrict p2,
112                              const float * gmx_restrict p3,
113                              const float * gmx_restrict p4,
114                              __m128 * gmx_restrict c6,
115                              __m128 * gmx_restrict c12)
116 {
117     __m128 t1,t2,t3,t4;
118
119     t1   = _mm_loadl_pi(_mm_setzero_ps(),(__m64 *)p1);   /* - - c12a  c6a */
120     t2   = _mm_loadl_pi(_mm_setzero_ps(),(__m64 *)p2);   /* - - c12b  c6b */
121     t3   = _mm_loadl_pi(_mm_setzero_ps(),(__m64 *)p3);   /* - - c12c  c6c */
122     t4   = _mm_loadl_pi(_mm_setzero_ps(),(__m64 *)p4);   /* - - c12d  c6d */
123     t1   = _mm_unpacklo_ps(t1,t2);
124     t2   = _mm_unpacklo_ps(t3,t4);
125     *c6  = _mm_movelh_ps(t1,t2);
126     *c12 = _mm_movehl_ps(t2,t1);
127 }
128
129 /* Routines to load 1-4 rvec from 4 places.
130  * We mainly use these to load coordinates. The extra routines
131  * are very efficient for the water-water loops, since we e.g.
132  * know that a TIP4p water has 4 atoms, so we should load 12 floats+shuffle.
133  */
134
135
136 static gmx_inline void
137 gmx_mm_load_shift_and_1rvec_broadcast_ps(const float * gmx_restrict xyz_shift,
138                                          const float * gmx_restrict xyz,
139                                          __m128 * gmx_restrict x1,
140                                          __m128 * gmx_restrict y1,
141                                          __m128 * gmx_restrict z1)
142 {
143     __m128 t1,t2,t3,t4;
144     
145     t1   = _mm_loadl_pi(_mm_setzero_ps(),(__m64 *)xyz_shift);
146     t2   = _mm_loadl_pi(_mm_setzero_ps(),(__m64 *)xyz);
147     t3   = _mm_load_ss(xyz_shift+2);
148     t4   = _mm_load_ss(xyz+2);
149     t1   = _mm_add_ps(t1,t2);
150     t3   = _mm_add_ss(t3,t4);
151     
152     *x1  = _mm_shuffle_ps(t1,t1,_MM_SHUFFLE(0,0,0,0));
153     *y1  = _mm_shuffle_ps(t1,t1,_MM_SHUFFLE(1,1,1,1));
154     *z1  = _mm_shuffle_ps(t3,t3,_MM_SHUFFLE(0,0,0,0));
155 }
156
157
158 static gmx_inline void
159 gmx_mm_load_shift_and_3rvec_broadcast_ps(const float * gmx_restrict xyz_shift,
160                                          const float * gmx_restrict xyz,
161                                          __m128 * gmx_restrict x1, __m128 * gmx_restrict y1, __m128 * gmx_restrict z1,
162                                          __m128 * gmx_restrict x2, __m128 * gmx_restrict y2, __m128 * gmx_restrict z2,
163                                          __m128 * gmx_restrict x3, __m128 * gmx_restrict y3, __m128 * gmx_restrict z3)
164 {
165     __m128 tA,tB;
166     __m128 t1,t2,t3,t4,t5,t6;
167     
168     tA   = _mm_loadl_pi(_mm_setzero_ps(),(__m64 *)xyz_shift);
169     tB   = _mm_load_ss(xyz_shift+2);
170     
171     t1   = _mm_loadu_ps(xyz);
172     t2   = _mm_loadu_ps(xyz+4);
173     t3   = _mm_load_ss(xyz+8);
174     
175     tA   = _mm_movelh_ps(tA,tB);
176     t4   = _mm_shuffle_ps(tA,tA,_MM_SHUFFLE(0,2,1,0));
177     t5   = _mm_shuffle_ps(tA,tA,_MM_SHUFFLE(1,0,2,1));
178     t6   = _mm_shuffle_ps(tA,tA,_MM_SHUFFLE(2,1,0,2));
179     
180     t1   = _mm_add_ps(t1,t4);
181     t2   = _mm_add_ps(t2,t5);
182     t3   = _mm_add_ss(t3,t6);
183     
184     *x1  = _mm_shuffle_ps(t1,t1,_MM_SHUFFLE(0,0,0,0));
185     *y1  = _mm_shuffle_ps(t1,t1,_MM_SHUFFLE(1,1,1,1));
186     *z1  = _mm_shuffle_ps(t1,t1,_MM_SHUFFLE(2,2,2,2));
187     *x2  = _mm_shuffle_ps(t1,t1,_MM_SHUFFLE(3,3,3,3));
188     *y2  = _mm_shuffle_ps(t2,t2,_MM_SHUFFLE(0,0,0,0));
189     *z2  = _mm_shuffle_ps(t2,t2,_MM_SHUFFLE(1,1,1,1));
190     *x3  = _mm_shuffle_ps(t2,t2,_MM_SHUFFLE(2,2,2,2));
191     *y3  = _mm_shuffle_ps(t2,t2,_MM_SHUFFLE(3,3,3,3));
192     *z3  = _mm_shuffle_ps(t3,t3,_MM_SHUFFLE(0,0,0,0));
193 }
194
195
196 static gmx_inline void
197 gmx_mm_load_shift_and_4rvec_broadcast_ps(const float * gmx_restrict xyz_shift,
198                                          const float * gmx_restrict xyz,
199                                          __m128 * gmx_restrict x1, __m128 * gmx_restrict y1, __m128 * gmx_restrict z1,
200                                          __m128 * gmx_restrict x2, __m128 * gmx_restrict y2, __m128 * gmx_restrict z2,
201                                          __m128 * gmx_restrict x3, __m128 * gmx_restrict y3, __m128 * gmx_restrict z3,
202                                          __m128 * gmx_restrict x4, __m128 * gmx_restrict y4, __m128 * gmx_restrict z4)
203 {
204     __m128 tA,tB;
205     __m128 t1,t2,t3,t4,t5,t6;
206     
207     tA   = _mm_castpd_ps(_mm_load_sd((const double *)xyz_shift));
208     tB   = _mm_load_ss(xyz_shift+2);
209     
210     t1   = _mm_loadu_ps(xyz);
211     t2   = _mm_loadu_ps(xyz+4);
212     t3   = _mm_loadu_ps(xyz+8);
213     
214     tA   = _mm_movelh_ps(tA,tB);
215     t4   = _mm_shuffle_ps(tA,tA,_MM_SHUFFLE(0,2,1,0));
216     t5   = _mm_shuffle_ps(tA,tA,_MM_SHUFFLE(1,0,2,1));
217     t6   = _mm_shuffle_ps(tA,tA,_MM_SHUFFLE(2,1,0,2));
218     
219     t1   = _mm_add_ps(t1,t4);
220     t2   = _mm_add_ps(t2,t5);
221     t3   = _mm_add_ps(t3,t6);
222     
223     *x1  = _mm_shuffle_ps(t1,t1,_MM_SHUFFLE(0,0,0,0));
224     *y1  = _mm_shuffle_ps(t1,t1,_MM_SHUFFLE(1,1,1,1));
225     *z1  = _mm_shuffle_ps(t1,t1,_MM_SHUFFLE(2,2,2,2));
226     *x2  = _mm_shuffle_ps(t1,t1,_MM_SHUFFLE(3,3,3,3));
227     *y2  = _mm_shuffle_ps(t2,t2,_MM_SHUFFLE(0,0,0,0));
228     *z2  = _mm_shuffle_ps(t2,t2,_MM_SHUFFLE(1,1,1,1));
229     *x3  = _mm_shuffle_ps(t2,t2,_MM_SHUFFLE(2,2,2,2));
230     *y3  = _mm_shuffle_ps(t2,t2,_MM_SHUFFLE(3,3,3,3));
231     *z3  = _mm_shuffle_ps(t3,t3,_MM_SHUFFLE(0,0,0,0));
232     *x4  = _mm_shuffle_ps(t3,t3,_MM_SHUFFLE(1,1,1,1));
233     *y4  = _mm_shuffle_ps(t3,t3,_MM_SHUFFLE(2,2,2,2));
234     *z4  = _mm_shuffle_ps(t3,t3,_MM_SHUFFLE(3,3,3,3));
235 }
236
237
238 static void
239 gmx_mm_load_1rvec_4ptr_swizzle_ps(const float * gmx_restrict ptrA,
240                                   const float * gmx_restrict ptrB,
241                                   const float * gmx_restrict ptrC,
242                                   const float * gmx_restrict ptrD,
243                                   __m128 *      gmx_restrict x1,
244                                   __m128 *      gmx_restrict y1,
245                                   __m128 *      gmx_restrict z1)
246 {
247     __m128 t1,t2,t3,t4,t5,t6,t7,t8;
248     t1   = _mm_castpd_ps(_mm_load_sd((const double *)ptrA));
249     t2   = _mm_castpd_ps(_mm_load_sd((const double *)ptrB));
250     t3   = _mm_castpd_ps(_mm_load_sd((const double *)ptrC));
251     t4   = _mm_castpd_ps(_mm_load_sd((const double *)ptrD));
252     t5 = _mm_load_ss(ptrA+2);
253     t6 = _mm_load_ss(ptrB+2);
254     t7 = _mm_load_ss(ptrC+2);
255     t8 = _mm_load_ss(ptrD+2);
256     t1 = _mm_unpacklo_ps(t1,t2);
257     t3 = _mm_unpacklo_ps(t3,t4);
258     *x1 = _mm_movelh_ps(t1,t3);
259     *y1 = _mm_movehl_ps(t3,t1);
260     t5  = _mm_unpacklo_ps(t5,t6);
261     t7  = _mm_unpacklo_ps(t7,t8);
262     *z1 = _mm_movelh_ps(t5,t7);
263 }
264
265
266 static void
267 gmx_mm_load_3rvec_4ptr_swizzle_ps(const float * gmx_restrict ptrA,
268                                   const float * gmx_restrict ptrB,
269                                   const float * gmx_restrict ptrC,
270                                   const float * gmx_restrict ptrD,
271                                   __m128 * gmx_restrict x1, __m128 * gmx_restrict y1, __m128 * gmx_restrict z1,
272                                   __m128 * gmx_restrict x2, __m128 * gmx_restrict y2, __m128 * gmx_restrict z2,
273                                   __m128 * gmx_restrict x3, __m128 * gmx_restrict y3, __m128 * gmx_restrict z3) 
274 {
275     __m128 t1,t2,t3,t4;
276     t1            = _mm_loadu_ps(ptrA);
277     t2            = _mm_loadu_ps(ptrB);
278     t3            = _mm_loadu_ps(ptrC);
279     t4            = _mm_loadu_ps(ptrD);
280     _MM_TRANSPOSE4_PS(t1,t2,t3,t4);
281     *x1           = t1;
282     *y1           = t2;
283     *z1           = t3;
284     *x2           = t4;
285     t1            = _mm_loadu_ps(ptrA+4);
286     t2            = _mm_loadu_ps(ptrB+4);
287     t3            = _mm_loadu_ps(ptrC+4);
288     t4            = _mm_loadu_ps(ptrD+4);
289     _MM_TRANSPOSE4_PS(t1,t2,t3,t4);
290     *y2           = t1;
291     *z2           = t2;
292     *x3           = t3;
293     *y3           = t4;
294     t1            = _mm_load_ss(ptrA+8);
295     t2            = _mm_load_ss(ptrB+8);
296     t3            = _mm_load_ss(ptrC+8);
297     t4            = _mm_load_ss(ptrD+8);
298     t1            = _mm_unpacklo_ps(t1,t3);
299     t3            = _mm_unpacklo_ps(t2,t4);
300     *z3           = _mm_unpacklo_ps(t1,t3);
301 }
302
303
304 static void
305 gmx_mm_load_4rvec_4ptr_swizzle_ps(const float * gmx_restrict ptrA,
306                                   const float * gmx_restrict ptrB,
307                                   const float * gmx_restrict ptrC,
308                                   const float * gmx_restrict ptrD,
309                                   __m128 * gmx_restrict x1, __m128 * gmx_restrict y1, __m128 * gmx_restrict z1,
310                                   __m128 * gmx_restrict x2, __m128 * gmx_restrict y2, __m128 * gmx_restrict z2,
311                                   __m128 * gmx_restrict x3, __m128 * gmx_restrict y3, __m128 * gmx_restrict z3,
312                                   __m128 * gmx_restrict x4, __m128 * gmx_restrict y4, __m128 * gmx_restrict z4) 
313 {
314     __m128 t1,t2,t3,t4;
315     t1            = _mm_loadu_ps(ptrA);
316     t2            = _mm_loadu_ps(ptrB);
317     t3            = _mm_loadu_ps(ptrC);
318     t4            = _mm_loadu_ps(ptrD);
319     _MM_TRANSPOSE4_PS(t1,t2,t3,t4);
320     *x1           = t1;
321     *y1           = t2;
322     *z1           = t3;
323     *x2           = t4;
324     t1            = _mm_loadu_ps(ptrA+4);
325     t2            = _mm_loadu_ps(ptrB+4);
326     t3            = _mm_loadu_ps(ptrC+4);
327     t4            = _mm_loadu_ps(ptrD+4);
328     _MM_TRANSPOSE4_PS(t1,t2,t3,t4);
329     *y2           = t1;
330     *z2           = t2;
331     *x3           = t3;
332     *y3           = t4;
333     t1            = _mm_loadu_ps(ptrA+8);
334     t2            = _mm_loadu_ps(ptrB+8);
335     t3            = _mm_loadu_ps(ptrC+8);
336     t4            = _mm_loadu_ps(ptrD+8);
337     _MM_TRANSPOSE4_PS(t1,t2,t3,t4);
338     *z3           = t1;
339     *x4           = t2;
340     *y4           = t3;
341     *z4           = t4;
342 }
343
344
345 static void
346 gmx_mm_decrement_1rvec_4ptr_swizzle_ps(float * gmx_restrict ptrA,
347                                        float * gmx_restrict ptrB,
348                                        float * gmx_restrict ptrC,
349                                        float * gmx_restrict ptrD,
350                                        __m128 x1, __m128 y1, __m128 z1)
351 {
352     __m128 t1,t2,t3,t4,t5,t6,t7,t8,t9,t10,t11,t12;
353     t5          = _mm_unpacklo_ps(y1,z1);
354     t6          = _mm_unpackhi_ps(y1,z1);
355     t7          = _mm_shuffle_ps(x1,t5,_MM_SHUFFLE(1,0,0,0));
356     t8          = _mm_shuffle_ps(x1,t5,_MM_SHUFFLE(3,2,0,1));
357     t9          = _mm_shuffle_ps(x1,t6,_MM_SHUFFLE(1,0,0,2));
358     t10         = _mm_shuffle_ps(x1,t6,_MM_SHUFFLE(3,2,0,3));
359     t1          = _mm_load_ss(ptrA);
360     t1          = _mm_loadh_pi(t1,(__m64 *)(ptrA+1));
361     t1          = _mm_sub_ps(t1,t7);
362     _mm_store_ss(ptrA,t1);
363     _mm_storeh_pi((__m64 *)(ptrA+1),t1);
364     t2          = _mm_load_ss(ptrB);
365     t2          = _mm_loadh_pi(t2,(__m64 *)(ptrB+1));
366     t2          = _mm_sub_ps(t2,t8);
367     _mm_store_ss(ptrB,t2);
368     _mm_storeh_pi((__m64 *)(ptrB+1),t2);
369     t3          = _mm_load_ss(ptrC);
370     t3          = _mm_loadh_pi(t3,(__m64 *)(ptrC+1));
371     t3          = _mm_sub_ps(t3,t9);
372     _mm_store_ss(ptrC,t3);
373     _mm_storeh_pi((__m64 *)(ptrC+1),t3);
374     t4          = _mm_load_ss(ptrD);
375     t4          = _mm_loadh_pi(t4,(__m64 *)(ptrD+1));
376     t4          = _mm_sub_ps(t4,t10);
377     _mm_store_ss(ptrD,t4);
378     _mm_storeh_pi((__m64 *)(ptrD+1),t4);
379 }
380
381
382
383 static void
384 gmx_mm_decrement_3rvec_4ptr_swizzle_ps(float * gmx_restrict ptrA, float * gmx_restrict ptrB,
385                                        float * gmx_restrict ptrC, float * gmx_restrict ptrD,
386                                        __m128 x1, __m128 y1, __m128 z1,
387                                        __m128 x2, __m128 y2, __m128 z2,
388                                        __m128 x3, __m128 y3, __m128 z3) 
389 {
390     __m128 t1,t2,t3,t4,t5,t6,t7,t8,t9,t10;
391     __m128 t11,t12,t13,t14,t15,t16,t17,t18,t19;
392     __m128 t20,t21,t22,t23,t24,t25;
393
394     t13         = _mm_unpackhi_ps(x1,y1);
395     x1          = _mm_unpacklo_ps(x1,y1);
396     t14         = _mm_unpackhi_ps(z1,x2);
397     z1          = _mm_unpacklo_ps(z1,x2);
398     t15         = _mm_unpackhi_ps(y2,z2);
399     y2          = _mm_unpacklo_ps(y2,z2);
400     t16         = _mm_unpackhi_ps(x3,y3);
401     x3          = _mm_unpacklo_ps(x3,y3);
402     t17         = _mm_shuffle_ps(z3,z3,_MM_SHUFFLE(0,0,0,1));
403     t18         = _mm_movehl_ps(z3,z3);
404     t19         = _mm_shuffle_ps(t18,t18,_MM_SHUFFLE(0,0,0,1));
405     t20         = _mm_movelh_ps(x1,z1);
406     t21         = _mm_movehl_ps(z1,x1);
407     t22         = _mm_movelh_ps(t13,t14);
408     t14         = _mm_movehl_ps(t14,t13);
409     t23         = _mm_movelh_ps(y2,x3);
410     t24         = _mm_movehl_ps(x3,y2);
411     t25         = _mm_movelh_ps(t15,t16);
412     t16         = _mm_movehl_ps(t16,t15);
413     t1          = _mm_loadu_ps(ptrA);
414     t2          = _mm_loadu_ps(ptrA+4);
415     t3          = _mm_load_ss(ptrA+8);
416     t1          = _mm_sub_ps(t1,t20);
417     t2          = _mm_sub_ps(t2,t23);
418     t3          = _mm_sub_ss(t3,z3);
419     _mm_storeu_ps(ptrA,t1);
420     _mm_storeu_ps(ptrA+4,t2);
421     _mm_store_ss(ptrA+8,t3);
422     t4          = _mm_loadu_ps(ptrB);
423     t5          = _mm_loadu_ps(ptrB+4);
424     t6          = _mm_load_ss(ptrB+8);
425     t4          = _mm_sub_ps(t4,t21);
426     t5          = _mm_sub_ps(t5,t24);
427     t6          = _mm_sub_ss(t6,t17);
428     _mm_storeu_ps(ptrB,t4);
429     _mm_storeu_ps(ptrB+4,t5);
430     _mm_store_ss(ptrB+8,t6);
431     t7          = _mm_loadu_ps(ptrC);
432     t8          = _mm_loadu_ps(ptrC+4);
433     t9          = _mm_load_ss(ptrC+8);
434     t7          = _mm_sub_ps(t7,t22);
435     t8          = _mm_sub_ps(t8,t25);
436     t9          = _mm_sub_ss(t9,t18);
437     _mm_storeu_ps(ptrC,t7);
438     _mm_storeu_ps(ptrC+4,t8);
439     _mm_store_ss(ptrC+8,t9);
440     t10         = _mm_loadu_ps(ptrD);
441     t11         = _mm_loadu_ps(ptrD+4);
442     t12         = _mm_load_ss(ptrD+8);
443     t10         = _mm_sub_ps(t10,t14);
444     t11         = _mm_sub_ps(t11,t16);
445     t12         = _mm_sub_ss(t12,t19);
446     _mm_storeu_ps(ptrD,t10);
447     _mm_storeu_ps(ptrD+4,t11);
448     _mm_store_ss(ptrD+8,t12);
449 }
450
451
452 static void
453 gmx_mm_decrement_4rvec_4ptr_swizzle_ps(float * gmx_restrict ptrA, float * gmx_restrict ptrB,
454                                        float * gmx_restrict ptrC, float * gmx_restrict ptrD,
455                                        __m128 x1, __m128 y1, __m128 z1,
456                                        __m128 x2, __m128 y2, __m128 z2,
457                                        __m128 x3, __m128 y3, __m128 z3,
458                                        __m128 x4, __m128 y4, __m128 z4) 
459 {
460     __m128 t1,t2,t3,t4,t5,t6,t7,t8,t9,t10,t11;
461     __m128 t12,t13,t14,t15,t16,t17,t18,t19,t20,t21,t22;
462     __m128 t23,t24;
463     t13         = _mm_unpackhi_ps(x1,y1);
464     x1          = _mm_unpacklo_ps(x1,y1);
465     t14         = _mm_unpackhi_ps(z1,x2);
466     z1          = _mm_unpacklo_ps(z1,x2);
467     t15         = _mm_unpackhi_ps(y2,z2);
468     y2          = _mm_unpacklo_ps(y2,z2);
469     t16         = _mm_unpackhi_ps(x3,y3);
470     x3          = _mm_unpacklo_ps(x3,y3);
471     t17         = _mm_unpackhi_ps(z3,x4);
472     z3          = _mm_unpacklo_ps(z3,x4);
473     t18         = _mm_unpackhi_ps(y4,z4);
474     y4          = _mm_unpacklo_ps(y4,z4);
475     t19         = _mm_movelh_ps(x1,z1);
476     z1          = _mm_movehl_ps(z1,x1);
477     t20         = _mm_movelh_ps(t13,t14);
478     t14         = _mm_movehl_ps(t14,t13);
479     t21         = _mm_movelh_ps(y2,x3);
480     x3          = _mm_movehl_ps(x3,y2);
481     t22         = _mm_movelh_ps(t15,t16);
482     t16         = _mm_movehl_ps(t16,t15);
483     t23         = _mm_movelh_ps(z3,y4);
484     y4          = _mm_movehl_ps(y4,z3);
485     t24         = _mm_movelh_ps(t17,t18);
486     t18         = _mm_movehl_ps(t18,t17);
487     t1          = _mm_loadu_ps(ptrA);
488     t2          = _mm_loadu_ps(ptrA+4);
489     t3          = _mm_loadu_ps(ptrA+8);
490     t1          = _mm_sub_ps(t1,t19);
491     t2          = _mm_sub_ps(t2,t21);
492     t3          = _mm_sub_ps(t3,t23);
493     _mm_storeu_ps(ptrA,t1);
494     _mm_storeu_ps(ptrA+4,t2);
495     _mm_storeu_ps(ptrA+8,t3);
496     t4          = _mm_loadu_ps(ptrB);
497     t5          = _mm_loadu_ps(ptrB+4);
498     t6          = _mm_loadu_ps(ptrB+8);
499     t4          = _mm_sub_ps(t4,z1);
500     t5          = _mm_sub_ps(t5,x3);
501     t6          = _mm_sub_ps(t6,y4);
502     _mm_storeu_ps(ptrB,t4);
503     _mm_storeu_ps(ptrB+4,t5);
504     _mm_storeu_ps(ptrB+8,t6);
505     t7          = _mm_loadu_ps(ptrC);
506     t8          = _mm_loadu_ps(ptrC+4);
507     t9          = _mm_loadu_ps(ptrC+8);
508     t7          = _mm_sub_ps(t7,t20);
509     t8          = _mm_sub_ps(t8,t22);
510     t9          = _mm_sub_ps(t9,t24);
511     _mm_storeu_ps(ptrC,t7);
512     _mm_storeu_ps(ptrC+4,t8);
513     _mm_storeu_ps(ptrC+8,t9);
514     t10         = _mm_loadu_ps(ptrD);
515     t11         = _mm_loadu_ps(ptrD+4);
516     t12         = _mm_loadu_ps(ptrD+8);
517     t10         = _mm_sub_ps(t10,t14);
518     t11         = _mm_sub_ps(t11,t16);
519     t12         = _mm_sub_ps(t12,t18);
520     _mm_storeu_ps(ptrD,t10);
521     _mm_storeu_ps(ptrD+4,t11);
522     _mm_storeu_ps(ptrD+8,t12);
523 }
524
525
526
527 static gmx_inline void
528 gmx_mm_update_iforce_1atom_swizzle_ps(__m128 fix1, __m128 fiy1, __m128 fiz1,
529                                       float * gmx_restrict fptr,
530                                       float * gmx_restrict fshiftptr)
531 {
532     __m128 t1,t2,t3;
533
534     /* transpose data */
535     t1 = fix1;
536     _MM_TRANSPOSE4_PS(fix1,t1,fiy1,fiz1);
537     fix1 = _mm_add_ps(_mm_add_ps(fix1,t1), _mm_add_ps(fiy1,fiz1));
538
539     t2 = _mm_load_ss(fptr);
540     t2 = _mm_loadh_pi(t2,(__m64 *)(fptr+1));
541     t3 = _mm_load_ss(fshiftptr);
542     t3 = _mm_loadh_pi(t3,(__m64 *)(fshiftptr+1));
543
544     t2 = _mm_add_ps(t2,fix1);
545     t3 = _mm_add_ps(t3,fix1);
546
547     _mm_store_ss(fptr,t2);
548     _mm_storeh_pi((__m64 *)(fptr+1),t2);
549     _mm_store_ss(fshiftptr,t3);
550     _mm_storeh_pi((__m64 *)(fshiftptr+1),t3);
551 }
552
553 static gmx_inline void
554 gmx_mm_update_iforce_3atom_swizzle_ps(__m128 fix1, __m128 fiy1, __m128 fiz1,
555                                       __m128 fix2, __m128 fiy2, __m128 fiz2,
556                                       __m128 fix3, __m128 fiy3, __m128 fiz3,
557                                       float * gmx_restrict fptr,
558                                       float * gmx_restrict fshiftptr)
559 {
560     __m128 t1,t2,t3,t4;
561
562     /* transpose data */
563     _MM_TRANSPOSE4_PS(fix1,fiy1,fiz1,fix2);
564     _MM_TRANSPOSE4_PS(fiy2,fiz2,fix3,fiy3);
565     t2   = _mm_movehl_ps(_mm_setzero_ps(),fiz3);
566     t1   = _mm_shuffle_ps(fiz3,fiz3,_MM_SHUFFLE(0,0,0,1));
567     t3   = _mm_shuffle_ps(t2,t2,_MM_SHUFFLE(0,0,0,1));
568
569     fix1 = _mm_add_ps(_mm_add_ps(fix1,fiy1), _mm_add_ps(fiz1,fix2));
570     fiy2 = _mm_add_ps(_mm_add_ps(fiy2,fiz2), _mm_add_ps(fix3,fiy3));
571     fiz3 = _mm_add_ss(_mm_add_ps(fiz3,t1)  , _mm_add_ps(t2,t3));
572
573     _mm_storeu_ps(fptr,  _mm_add_ps(fix1,_mm_loadu_ps(fptr)  ));
574     _mm_storeu_ps(fptr+4,_mm_add_ps(fiy2,_mm_loadu_ps(fptr+4)));
575     _mm_store_ss (fptr+8,_mm_add_ss(fiz3,_mm_load_ss(fptr+8) ));
576
577     t4 = _mm_load_ss(fshiftptr+2);
578     t4 = _mm_loadh_pi(t4,(__m64 *)(fshiftptr));
579
580     t1 = _mm_shuffle_ps(fiz3,fix1,_MM_SHUFFLE(1,0,0,0));   /* fiy1 fix1  -   fiz3 */
581     t2 = _mm_shuffle_ps(fix1,fiy2,_MM_SHUFFLE(3,2,2,2));   /* fiy3 fix3  -   fiz1 */
582     t3 = _mm_shuffle_ps(fiy2,fix1,_MM_SHUFFLE(3,3,0,1));   /* fix2 fix2 fiy2 fiz2 */
583     t3 = _mm_shuffle_ps(t3  ,t3  ,_MM_SHUFFLE(1,2,0,0));   /* fiy2 fix2  -   fiz2 */
584
585     t1 = _mm_add_ps(t1,t2);
586     t3 = _mm_add_ps(t3,t4);
587     t1 = _mm_add_ps(t1,t3); /* y x - z */
588
589     _mm_store_ss(fshiftptr+2,t1);
590     _mm_storeh_pi((__m64 *)(fshiftptr),t1);
591 }
592
593
594 static gmx_inline void
595 gmx_mm_update_iforce_4atom_swizzle_ps(__m128 fix1, __m128 fiy1, __m128 fiz1,
596                                       __m128 fix2, __m128 fiy2, __m128 fiz2,
597                                       __m128 fix3, __m128 fiy3, __m128 fiz3,
598                                       __m128 fix4, __m128 fiy4, __m128 fiz4,
599                                       float * gmx_restrict fptr,
600                                       float * gmx_restrict fshiftptr)
601 {
602     __m128 t1,t2,t3,t4,t5;
603
604     /* transpose data */
605     _MM_TRANSPOSE4_PS(fix1,fiy1,fiz1,fix2);
606     _MM_TRANSPOSE4_PS(fiy2,fiz2,fix3,fiy3);
607     _MM_TRANSPOSE4_PS(fiz3,fix4,fiy4,fiz4);
608
609     fix1 = _mm_add_ps(_mm_add_ps(fix1,fiy1), _mm_add_ps(fiz1,fix2));
610     fiy2 = _mm_add_ps(_mm_add_ps(fiy2,fiz2), _mm_add_ps(fix3,fiy3));
611     fiz3 = _mm_add_ps(_mm_add_ps(fiz3,fix4), _mm_add_ps(fiy4,fiz4));
612
613     _mm_storeu_ps(fptr,  _mm_add_ps(fix1,_mm_loadu_ps(fptr)  ));
614     _mm_storeu_ps(fptr+4,_mm_add_ps(fiy2,_mm_loadu_ps(fptr+4)));
615     _mm_storeu_ps(fptr+8,_mm_add_ps(fiz3,_mm_loadu_ps(fptr+8)));
616
617     t5 = _mm_load_ss(fshiftptr+2);
618     t5 = _mm_loadh_pi(t5,(__m64 *)(fshiftptr));
619
620     t1 = _mm_shuffle_ps(fix1,fix1,_MM_SHUFFLE(1,0,2,2));
621     t2 = _mm_shuffle_ps(fiy2,fiy2,_MM_SHUFFLE(3,2,1,1));
622     t3 = _mm_shuffle_ps(fiz3,fiz3,_MM_SHUFFLE(2,1,0,0));
623     t4 = _mm_shuffle_ps(fix1,fiy2,_MM_SHUFFLE(0,0,3,3));
624     t4 = _mm_shuffle_ps(fiz3,t4  ,_MM_SHUFFLE(2,0,3,3));
625
626     t1 = _mm_add_ps(t1,t2);
627     t3 = _mm_add_ps(t3,t4);
628     t1 = _mm_add_ps(t1,t3);
629     t5 = _mm_add_ps(t5,t1);
630
631     _mm_store_ss(fshiftptr+2,t5);
632     _mm_storeh_pi((__m64 *)(fshiftptr),t5);
633 }
634
635
636
637 static void
638 gmx_mm_update_1pot_ps(__m128 pot1, float * gmx_restrict ptrA)
639 {
640     pot1 = _mm_add_ps(pot1,_mm_movehl_ps(_mm_setzero_ps(),pot1));
641     pot1 = _mm_add_ps(pot1,_mm_shuffle_ps(pot1,pot1,_MM_SHUFFLE(0,0,0,1)));
642     _mm_store_ss(ptrA,_mm_add_ss(pot1,_mm_load_ss(ptrA)));
643 }
644
645 static void
646 gmx_mm_update_2pot_ps(__m128 pot1, float * gmx_restrict ptrA,
647                       __m128 pot2, float * gmx_restrict ptrB)
648 {
649     __m128 t1,t2;
650     t1   = _mm_movehl_ps(pot2,pot1);
651     t2   = _mm_movelh_ps(pot1,pot2);
652     t1   = _mm_add_ps(t1,t2);
653     t2   = _mm_shuffle_ps(t1,t1,_MM_SHUFFLE(3,3,1,1));
654     pot1 = _mm_add_ps(t1,t2);
655     pot2 = _mm_movehl_ps(t2,pot1);
656     _mm_store_ss(ptrA,_mm_add_ss(pot1,_mm_load_ss(ptrA)));
657     _mm_store_ss(ptrB,_mm_add_ss(pot2,_mm_load_ss(ptrB)));
658 }
659
660
661 static void
662 gmx_mm_update_4pot_ps(__m128 pot1, float * gmx_restrict ptrA,
663                       __m128 pot2, float * gmx_restrict ptrB,
664                       __m128 pot3, float * gmx_restrict ptrC,
665                       __m128 pot4, float * gmx_restrict ptrD)
666 {
667     _MM_TRANSPOSE4_PS(pot1,pot2,pot3,pot4);
668     pot1 = _mm_add_ps(_mm_add_ps(pot1,pot2),_mm_add_ps(pot3,pot4));
669     pot2 = _mm_shuffle_ps(pot1,pot1,_MM_SHUFFLE(1,1,1,1));
670     pot3 = _mm_shuffle_ps(pot1,pot1,_MM_SHUFFLE(2,2,2,2));
671     pot4 = _mm_shuffle_ps(pot1,pot1,_MM_SHUFFLE(3,3,3,3));
672     _mm_store_ss(ptrA,_mm_add_ss(pot1,_mm_load_ss(ptrA)));
673     _mm_store_ss(ptrB,_mm_add_ss(pot2,_mm_load_ss(ptrB)));
674     _mm_store_ss(ptrC,_mm_add_ss(pot3,_mm_load_ss(ptrC)));
675     _mm_store_ss(ptrD,_mm_add_ss(pot4,_mm_load_ss(ptrD)));
676 }
677
678
679 #endif /* _kernelutil_x86_sse2_single_h_ */