2 This source code file is part of thread_mpi.
3 Written by Sander Pronk, Erik Lindahl, and possibly others.
5 Copyright (c) 2013, Sander Pronk, Erik Lindahl.
8 Redistribution and use in source and binary forms, with or without
9 modification, are permitted provided that the following conditions are met:
10 1) Redistributions of source code must retain the above copyright
11 notice, this list of conditions and the following disclaimer.
12 2) Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
15 3) Neither the name of the copyright holders nor the
16 names of its contributors may be used to endorse or promote products
17 derived from this software without specific prior written permission.
19 THIS SOFTWARE IS PROVIDED BY US ''AS IS'' AND ANY
20 EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
21 WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22 DISCLAIMED. IN NO EVENT SHALL WE BE LIABLE FOR ANY
23 DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
24 (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
25 LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
26 ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
28 SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 If you want to redistribute modifications, please consider that
31 scientific software is very special. Version control is crucial -
32 bugs must be traceable. We will be happy to consider code for
33 inclusion in the official distribution, but derived work should not
34 be called official thread_mpi. Details are found in the README & COPYING
39 /* These functions are fallback definitions for when there are no native
40 variants for fetch-add, spinlock, etc., but there is a native
44 /* only define this if there were no separate acquire and release barriers */
45 #ifndef TMPI_HAVE_ACQ_REL_BARRIERS
47 /* if they're not defined explicitly, we just make full barriers out of both */
48 #define tMPI_Atomic_memory_barrier_acq tMPI_Atomic_memory_barrier
49 #define tMPI_Atomic_memory_barrier_rel tMPI_Atomic_memory_barrier
53 #ifndef TMPI_ATOMIC_HAVE_NATIVE_FETCH_ADD
55 static inline int tMPI_Atomic_fetch_add(tMPI_Atomic_t *a, int i)
60 tMPI_Atomic_memory_barrier_acq();
61 oldval = tMPI_Atomic_get(a);
64 while (!tMPI_Atomic_cas(a, oldval, newval));
65 tMPI_Atomic_memory_barrier_rel();
68 #endif /* TMPI_HAVE_FETCH_ADD */
71 #ifndef TMPI_ATOMIC_HAVE_NATIVE_ADD_RETURN
73 static inline int tMPI_Atomic_add_return(tMPI_Atomic_t *a, int i)
75 /* implement in terms of fetch-add */
76 return tMPI_Atomic_fetch_add(a, i) + i;
78 #endif /* TMPI_HAVE_ADD_RETURN */
84 /* only do this if there was no better solution */
85 #ifndef TMPI_ATOMIC_HAVE_NATIVE_SWAP
87 static inline int tMPI_Atomic_swap(tMPI_Atomic_t *a, int b)
92 oldval = (int)(a->value);
94 while (!tMPI_Atomic_cas(a, oldval, b));
100 static inline void *tMPI_Atomic_ptr_swap(tMPI_Atomic_ptr_t *a, void *b)
105 oldval = (void*)(a->value);
107 while (!tMPI_Atomic_ptr_cas(a, oldval, b));
114 #ifndef TMPI_ATOMIC_HAVE_NATIVE_SPINLOCK
116 typedef struct tMPI_Spinlock
122 #define TMPI_SPINLOCK_INITIALIZER { 0 }
127 static inline void tMPI_Spinlock_init(tMPI_Spinlock_t *x)
129 tMPI_Atomic_set(&(x->a), 0);
134 static inline void tMPI_Spinlock_lock(tMPI_Spinlock_t *x)
136 tMPI_Atomic_memory_barrier_acq();
139 while (tMPI_Atomic_get(&(x->a)) == 1)
141 tMPI_Atomic_memory_barrier_acq();
144 while (!tMPI_Atomic_cas(&(x->a), 0, 1));
145 tMPI_Atomic_memory_barrier_acq();
150 static inline int tMPI_Spinlock_trylock(tMPI_Spinlock_t *x)
153 tMPI_Atomic_memory_barrier_acq();
154 ret = !tMPI_Atomic_cas(&(x->a), 0, 1);
160 static inline void tMPI_Spinlock_unlock(tMPI_Spinlock_t *x)
162 tMPI_Atomic_memory_barrier_rel();
163 tMPI_Atomic_set(&(x->a), 0);
164 tMPI_Atomic_memory_barrier_rel();
169 static inline int tMPI_Spinlock_islocked(const tMPI_Spinlock_t *x)
172 tMPI_Atomic_memory_barrier_rel();
173 ret = (tMPI_Atomic_get(&(x->a)) != 0);
179 static inline void tMPI_Spinlock_wait(tMPI_Spinlock_t *x)
184 while (tMPI_Spinlock_islocked(x));
186 #endif /* TMPI_ATOMIC_HAVE_NATIVE_SPINLOCK */