2 This source code file is part of thread_mpi.
3 Written by Sander Pronk, Erik Lindahl, and possibly others.
5 Copyright (c) 2009, Sander Pronk, Erik Lindahl.
8 Redistribution and use in source and binary forms, with or without
9 modification, are permitted provided that the following conditions are met:
10 1) Redistributions of source code must retain the above copyright
11 notice, this list of conditions and the following disclaimer.
12 2) Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
15 3) Neither the name of the copyright holders nor the
16 names of its contributors may be used to endorse or promote products
17 derived from this software without specific prior written permission.
19 THIS SOFTWARE IS PROVIDED BY US ''AS IS'' AND ANY
20 EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
21 WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22 DISCLAIMED. IN NO EVENT SHALL WE BE LIABLE FOR ANY
23 DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
24 (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
25 LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
26 ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
28 SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 If you want to redistribute modifications, please consider that
31 scientific software is very special. Version control is crucial -
32 bugs must be traceable. We will be happy to consider code for
33 inclusion in the official distribution, but derived work should not
34 be called official thread_mpi. Details are found in the README & COPYING
38 /* PowerPC using xlC inline assembly.
39 * Recent versions of xlC (>=7.0) _partially_ support GCC inline assembly
40 * if you use the option -qasm=gcc but we have had to hack things a bit, in
41 * particular when it comes to clobbered variables. Since this implementation
42 * _could_ be buggy, we have separated it from the known-to-be-working gcc
45 * For now, we just disable the inline keyword if we're compiling C code:
48 #if (!defined(__cplusplus)) && (!defined(inline))
49 #define inline_defined_in_atomic 1
55 /* IBM xlC compiler */
61 #define TMPI_XLC_INTRINSICS
63 /* ppc has many memory synchronization instructions */
64 /*#define tMPI_Atomic_memory_barrier() { __fence(); __sync(); __fence();}*/
65 /*#define tMPI_Atomic_memory_barrier() __isync();*/
66 /*#define tMPI_Atomic_memory_barrier() __lwsync();*/
68 /* for normal memory, this should be enough: */
69 #define tMPI_Atomic_memory_barrier() { __fence(); __eieio(); __fence(); }
70 #define tMPI_Atomic_memory_barrier_acq() { __eieio(); __fence(); }
71 #define tMPI_Atomic_memory_barrier_rel() { __fence(); __eieio(); }
72 #define TMPI_HAVE_ACQ_REL_BARRIERS
74 /*#define tMPI_Atomic_memory_barrier() __eieio();*/
77 typedef struct tMPI_Atomic
79 volatile int value __attribute__ ((aligned(64)));
84 typedef struct tMPI_Atomic_ptr
86 volatile char* volatile* value __attribute__ ((aligned(64))); /*!< Volatile, to avoid compiler aliasing */
91 typedef struct tMPI_Spinlock
93 volatile int lock __attribute__ ((aligned(64)));
96 #define TMPI_ATOMIC_HAVE_NATIVE_SPINLOCK
101 #define tMPI_Atomic_get(a) (int)((a)->value)
102 #define tMPI_Atomic_set(a, i) (((a)->value) = (i))
103 #define tMPI_Atomic_ptr_get(a) ((a)->value)
104 #define tMPI_Atomic_ptr_set(a, i) (((a)->value) = (i))
106 #define TMPI_SPINLOCK_INITIALIZER { 0 }
109 static inline int tMPI_Atomic_cas(tMPI_Atomic_t *a, int oldval, int newval)
111 #ifdef TMPI_XLC_INTRINSICS
114 __fence(); /* this one needs to be here to avoid ptr. aliasing issues */
116 ret = (__compare_and_swap(&(a->value), &oldval, newval));
118 __fence(); /* and this one needs to be here to avoid aliasing issues */
122 __asm__ __volatile__ ("1: lwarx %0,0,%2 \n"
125 "\t stwcx. %4,0,%2 \n"
129 : "=&r" (prev), "=m" (a->value)
130 : "r" (&a->value), "r" (oldval), "r" (newval),
133 return prev == oldval;
138 static inline int tMPI_Atomic_ptr_cas(tMPI_Atomic_ptr_t *a, void* oldval,
142 volatile char* volatile* oldv = oldval;
143 volatile char* volatile* newv = newval;
145 __fence(); /* this one needs to be here to avoid ptr. aliasing issues */
147 #if (!defined (__LP64__) ) && (!defined(__powerpc64__) )
148 ret = __compare_and_swap((int *)&(a->value), (int*)&oldv, (int)newv);
150 ret = __compare_and_swaplp((long *)&(a->value), (long*)&oldv, (long)newv);
161 static inline int tMPI_Atomic_add_return(tMPI_Atomic_t *a, int i)
163 #ifdef TMPI_XLC_INTRINSICS
169 __eieio(); /* these memory barriers are neccesary */
170 oldval = tMPI_Atomic_get(a);
173 /*while(!__compare_and_swap( &(a->value), &oldval, newval));*/
174 while (__check_lock_mp( (int*)&(a->value), oldval, newval));
183 __asm__ __volatile__("1: lwarx %0,0,%2 \n"
185 "\t stwcx. %0,0,%2 \n"
189 : "r" (i), "r" (&a->value) );
193 #define TMPI_ATOMIC_HAVE_NATIVE_ADD_RETURN
197 static inline int tMPI_Atomic_fetch_add(tMPI_Atomic_t *a, int i)
199 #ifdef TMPI_XLC_INTRINSICS
205 __eieio(); /* these memory barriers are neccesary */
206 oldval = tMPI_Atomic_get(a);
209 /*while(__check_lock_mp((const int*)&(a->value), oldval, newval));*/
210 while (__check_lock_mp( (int*)&(a->value), oldval, newval));
211 /*while(!__compare_and_swap( &(a->value), &oldval, newval));*/
219 __asm__ __volatile__("\t eieio\n"
220 "1: lwarx %0,0,%2 \n"
222 "\t stwcx. %0,0,%2 \n"
226 : "r" (i), "r" (&a->value));
231 #define TMPI_ATOMIC_HAVE_NATIVE_FETCH_ADD
234 static inline void tMPI_Spinlock_init(tMPI_Spinlock_t *x)
237 __clear_lock_mp((const int*)x, 0);
242 static inline void tMPI_Spinlock_lock(tMPI_Spinlock_t *x)
248 while (__check_lock_mp((int*)&(x->lock), 0, 1));
249 tMPI_Atomic_memory_barrier_acq();
253 static inline int tMPI_Spinlock_trylock(tMPI_Spinlock_t *x)
256 /* Return 0 if we got the lock */
258 ret = __check_lock_mp((int*)&(x->lock), 0, 1);
259 tMPI_Atomic_memory_barrier_acq();
264 static inline void tMPI_Spinlock_unlock(tMPI_Spinlock_t *x)
266 tMPI_Atomic_memory_barrier_rel();
267 __clear_lock_mp((int*)&(x->lock), 0);
271 static inline int tMPI_Spinlock_islocked(const tMPI_Spinlock_t *x)
275 ret = ((x->lock) != 0);
276 tMPI_Atomic_memory_barrier_acq();
281 static inline void tMPI_Spinlock_wait(tMPI_Spinlock_t *x)
286 while (tMPI_Spinlock_islocked(x));