2 This source code file is part of thread_mpi.
3 Written by Sander Pronk, Erik Lindahl, and possibly others.
5 Copyright (c) 2009, Sander Pronk, Erik Lindahl.
8 Redistribution and use in source and binary forms, with or without
9 modification, are permitted provided that the following conditions are met:
10 1) Redistributions of source code must retain the above copyright
11 notice, this list of conditions and the following disclaimer.
12 2) Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
15 3) Neither the name of the copyright holders nor the
16 names of its contributors may be used to endorse or promote products
17 derived from this software without specific prior written permission.
19 THIS SOFTWARE IS PROVIDED BY US ''AS IS'' AND ANY
20 EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
21 WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22 DISCLAIMED. IN NO EVENT SHALL WE BE LIABLE FOR ANY
23 DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
24 (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
25 LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
26 ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
28 SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 If you want to redistribute modifications, please consider that
31 scientific software is very special. Version control is crucial -
32 bugs must be traceable. We will be happy to consider code for
33 inclusion in the official distribution, but derived work should not
34 be called official thread_mpi. Details are found in the README & COPYING
38 #ifndef TMPI_ATOMIC_H_
39 #define TMPI_ATOMIC_H_
43 * \brief Atomic operations for fast SMP synchronization
45 * This file defines atomic integer operations and spinlocks for
46 * fast synchronization in performance-critical regions.
48 * In general, the best option is to use functions without explicit
49 * locking, e.g. tMPI_Atomic_fetch_add() or tMPI_Atomic_cas().
51 * Depending on the architecture/compiler, these operations may either
52 * be provided as functions or macros; be aware that those macros may
53 * reference their arguments repeatedly, possibly leading to multiply
54 * evaluated code with side effects: be careful with what you use as
57 * Not all architectures support atomic operations though inline assembly,
58 * and even if they do it might not be implemented here. In that case
59 * we use a fallback mutex implementation, so you can always count on
60 * the function interfaces working.
62 * Don't use spinlocks in non-performance-critical regions like file I/O.
63 * Since they always spin busy they would waste CPU cycles instead of
64 * properly yielding to a computation thread while waiting for the disk.
66 * Finally, note that all our spinlock operations are defined to return
67 * 0 if initialization or locking completes successfully.
68 * This is the opposite of some other implementations, but the same standard
69 * as used for pthread mutexes. So, if e.g. are trying to lock a spinlock,
70 * you will have gotten the lock if the return value is 0.
72 * tMPI_Spinlock_islocked(x) obviously still returns 1 if the lock is locked,
73 * and 0 if it is available, though...
75 /* Se the comments on the non-atomic versions for explanations */
79 #include "visibility.h"
86 } /* Avoids screwing up auto-indentation */
90 #define TMPI_GCC_VERSION (__GNUC__ * 10000 \
91 + __GNUC_MINOR__ * 100 \
92 + __GNUC_PATCHLEVEL__)
96 /* first check for gcc/icc platforms.
97 Some compatible compilers, like icc on linux+mac will take this path,
99 #if ( (defined(__GNUC__) || defined(__PATHSCALE__) || defined(__PGI)) && (!defined(__xlc__)) )
103 /* now check specifically for several architectures: */
104 #if ((defined(i386) || defined(__x86_64__)) && ! defined(__OPEN64__))
106 #include "atomic/gcc_x86.h"
107 /*#include "atomic/gcc.h"*/
109 #elif (defined(__ia64__))
111 #include "atomic/gcc_ia64.h"
113 /* for now we use gcc intrinsics on gcc: */
114 /*#elif (defined(__powerpc__) || (defined(__ppc__)) )*/
115 /*#include "atomic/gcc_ppc.h"*/
118 /* otherwise, there's a generic gcc intrinsics version: */
119 #include "atomic/gcc.h"
121 #endif /* end of check for gcc specific architectures */
124 #elif (defined(_MSC_VER) && (_MSC_VER >= 1200))
125 /* Microsoft Visual C on x86, define taken from FFTW who got it from
126 Morten Nissov. icc on windows will take this path. */
127 #include "atomic/msvc.h"
129 #elif ( (defined(__IBM_GCC_ASM) || defined(__IBM_STDCPP_ASM)) && \
130 (defined(__powerpc__) || defined(__ppc__)))
132 /* PowerPC using xlC intrinsics. */
134 #include "atomic/xlc_ppc.h"
136 #elif defined(__xlC__) || defined(__xlc__)
137 /* IBM xlC compiler */
138 #include "atomic/xlc_ppc.h"
141 #elif defined (__sun) && (defined(__sparcv9) || defined(__sparc))
142 /* Solaris on SPARC (Sun C Compiler, Solaris Studio) */
143 #include "atomic/suncc-sparc.h"
149 /* No atomic operations, use mutex fallback. Documentation is in x86 section */
151 #ifdef TMPI_CHECK_ATOMICS
152 #error No atomic operations implemented for this cpu/compiler combination.
155 /** Indicates that no support for atomic operations is present. */
156 #define TMPI_NO_ATOMICS
159 /** Memory barrier operation
161 Modern CPUs rely heavily on out-of-order execution, and one common feature
162 is that load/stores might be reordered. Also, when using inline assembly
163 the compiler might already have loaded the variable we are changing into
164 a register, so any update to memory won't be visible.
166 This command creates a memory barrier, i.e. all memory results before
167 it in the code should be visible to all memory operations after it - the
168 CPU cannot propagate load/stores across it.
170 This barrier is a full barrier: all load and store operations of
171 instructions before it are completed, while all load and store operations
172 that are in instructions after it won't be done before this barrier.
176 #define tMPI_Atomic_memory_barrier()
178 /** Memory barrier operation with acquire semantics
180 This barrier is a barrier with acquire semantics: the terminology comes
181 from its common use after acquiring a lock: all load/store instructions
182 after this barrier may not be re-ordered to happen before this barrier.
186 #define tMPI_Atomic_memory_barrier_acq()
188 /** Memory barrier operation with release semantics
190 This barrier is a barrier with release semantics: the terminology comes
191 from its common use before releasing a lock: all load/store instructions
192 before this barrier may not be re-ordered to happen after this barrier.
196 #define tMPI_Atomic_memory_barrier_rel()
201 /** System mutex used for locking to guarantee atomicity */
202 static tMPI_Thread_mutex_t tMPI_Atomic_mutex = TMPI_THREAD_MUTEX_INITIALIZER;
204 /** Atomic operations datatype
206 * Portable synchronization primitives like mutexes are effective for
207 * many purposes, but usually not very high performance.
208 * One of the problem is that you have the overhead of a function call,
209 * and another is that Mutexes often have extra overhead to make the
210 * scheduling fair. Finally, if performance is important we don't want
211 * to suspend the thread if we cannot lock a mutex, but spin-lock at 100%
212 * CPU usage until the resources is available (e.g. increment a counter).
214 * These things can often be implemented with inline-assembly or other
215 * system-dependent functions, and we provide such functionality for the
216 * most common platforms. For portability we also have a fallback
217 * implementation using a mutex for locking.
219 * Performance-wise, the fastest solution is always to avoid locking
220 * completely (obvious, but remember it!). If you cannot do that, the
221 * next best thing is to use atomic operations that e.g. increment a
222 * counter without explicit locking. Spinlocks are useful to lock an
223 * entire region, but leads to more overhead and can be difficult to
224 * debug - it is up to you to make sure that only the thread owning the
227 * You should normally NOT use atomic operations for things like
228 * I/O threads. These should yield to other threads while waiting for
229 * the disk instead of spinning at 100% CPU usage.
231 * It is imperative that you use the provided routines for reading
232 * and writing, since some implementations require memory barriers before
233 * the CPU or memory sees an updated result. The structure contents is
234 * only visible here so it can be inlined for performance - it might
235 * change without further notice.
237 * \note No initialization is required for atomic variables.
239 * Currently, we have (real) atomic operations for:
241 * - gcc version 4.1 and later (all platforms)
242 * - x86 or x86_64, using GNU compilers
243 * - x86 or x86_64, using Intel compilers
244 * - x86 or x86_64, using Pathscale compilers
245 * - Itanium, using GNU compilers
246 * - Itanium, using Intel compilers
247 * - Itanium, using HP compilers
248 * - PowerPC, using GNU compilers
249 * - PowerPC, using IBM AIX compilers
250 * - PowerPC, using IBM compilers >=7.0 under Linux or Mac OS X.
256 * - tMPI_Atomic_add_return
257 * - tMPI_Atomic_fetch_add
259 typedef struct tMPI_Atomic
261 int value; /**< The atomic value. */
266 /** Atomic pointer type equivalent to tMPI_Atomic_t
268 * Useful for lock-free and wait-free data structures.
269 * The only operations available for this type are:
271 * - tMPI_Atomic_ptr_get
272 * - tMPI_Atomic_ptr_set
273 * - tMPI_Atomic_ptr_cas
275 typedef struct tMPI_Atomic_ptr
277 void* value; /**< The atomic pointer value. */
284 * Spinlocks provide a faster synchronization than mutexes,
285 * although they consume CPU-cycles while waiting. They are implemented
286 * with atomic operations and inline assembly whenever possible, and
287 * otherwise we use a fallback implementation where a spinlock is identical
288 * to a mutex (this is one of the reasons why you have to initialize them).
290 * There are no guarantees whatsoever about fair scheduling or
291 * debugging if you make a mistake and unlock a variable somebody
292 * else has locked - performance is the primary goal of spinlocks.
295 * - tMPI_Spinlock_init
296 * - tMPI_Spinlock_lock
297 * - tMPI_Spinlock_unlock
298 * - tMPI_Spinlock_trylock
299 * - tMPI_Spinlock_wait
304 tMPI_Thread_mutex_t lock; /* we don't want this documented */
307 /*#define tMPI_Spinlock_t tMPI_Thread_mutex_t*/
309 /*! \def TMPI_SPINLOCK_INITIALIZER
310 * \brief Spinlock static initializer
312 * This is used for static spinlock initialization, and has the same
313 * properties as TMPI_THREAD_MUTEX_INITIALIZER has for mutexes.
314 * This is only for inlining in the tMPI_Thread.h header file. Whether
315 * it is 0, 1, or something else when unlocked depends on the platform.
316 * Don't assume anything about it. It might even be a mutex when using the
317 * fallback implementation!
321 # define TMPI_SPINLOCK_INITIALIZER { TMPI_THREAD_MUTEX_INITIALIZER }
323 /* Since mutexes guarantee memory barriers this works fine */
324 /** Return value of an atomic integer
326 * Also implements proper memory barriers when necessary.
327 * The actual implementation is system-dependent.
329 * \param a Atomic variable to read
330 * \return Integer value of the atomic variable
335 static inline int tMPI_Atomic_get(tMPI_Atomic_t &a);
337 #define tMPI_Atomic_get(a) ((a)->value)
340 /** Write value to an atomic integer
342 * Also implements proper memory barriers when necessary.
343 * The actual implementation is system-dependent.
345 * \param a Atomic variable
346 * \param i Integer to set the atomic variable to.
351 static inline void tMPI_Atomic_set(tMPI_Atomic_t *a, int i)
353 /* Mutexes here are necessary to guarantee memory visibility */
354 tMPI_Thread_mutex_lock(&tMPI_Atomic_mutex);
356 tMPI_Thread_mutex_unlock(&tMPI_Atomic_mutex);
360 /** Return value of an atomic pointer
362 * Also implements proper memory barriers when necessary.
363 * The actual implementation is system-dependent.
365 * \param a Atomic variable to read
366 * \return Pointer value of the atomic variable
371 static inline void* tMPI_Atomic_ptr_get(tMPI_Atomic_ptr_t &a);
373 #define tMPI_Atomic_ptr_get(a) ((a)->value)
379 /** Write value to an atomic pointer
381 * Also implements proper memory barriers when necessary.
382 * The actual implementation is system-dependent.
384 * \param a Atomic variable
385 * \param p Pointer value to set the atomic variable to.
390 static inline void tMPI_Atomic_ptr_set(tMPI_Atomic_t *a, void *p)
392 /* Mutexes here are necessary to guarantee memory visibility */
393 tMPI_Thread_mutex_lock(&tMPI_Atomic_mutex);
395 tMPI_Thread_mutex_unlock(&tMPI_Atomic_mutex);
399 /** Add integer to atomic variable
401 * Also implements proper memory barriers when necessary.
402 * The actual implementation is system-dependent.
404 * \param a atomic datatype to modify
405 * \param i integer to increment with. Use i<0 to subtract atomically.
407 * \return The new value (after summation).
410 static inline int tMPI_Atomic_add_return(tMPI_Atomic_t *a, int i)
413 tMPI_Thread_mutex_lock(&tMPI_Atomic_mutex);
416 tMPI_Thread_mutex_unlock(&tMPI_Atomic_mutex);
422 /** Add to variable, return the old value.
424 * This operation is quite useful for synchronization counters.
425 * By performing a fetchadd with N, a thread can e.g. reserve a chunk
426 * with the next N iterations, and the return value is the index
427 * of the first element to treat.
429 * Also implements proper memory barriers when necessary.
430 * The actual implementation is system-dependent.
432 * \param a atomic datatype to modify
433 * \param i integer to increment with. Use i<0 to subtract atomically.
435 * \return The value of the atomic variable before addition.
438 static inline int tMPI_Atomic_fetch_add(tMPI_Atomic_t *a, int i)
442 tMPI_Thread_mutex_lock(&tMPI_Atomic_mutex);
443 old_value = a->value;
444 a->value = old_value + i;
445 tMPI_Thread_mutex_unlock(&tMPI_Atomic_mutex);
451 /** Atomic compare-and-swap operation
453 * The \a old value is compared with the memory value in the atomic datatype.
454 * If the are identical, the atomic type is swapped with the new value,
455 * and otherwise left unchanged.
457 * This is *the* synchronization primitive: it has a consensus number of
458 * infinity, and is available in some form on all modern CPU architectures.
459 * In the words of Herlihy&Shavit (The art of multiprocessor programming),
460 * it is the 'king of all wild things'.
462 * In practice, use it as follows: You can start by reading a value
463 * (without locking anything), perform some calculations, and then
464 * atomically try to update it in memory unless it has changed. If it has
465 * changed you will get an error return code - reread the new value
466 * an repeat the calculations in that case.
468 * \param a Atomic datatype ('memory' value)
469 * \param old_val Integer value read from the atomic type at an earlier point
470 * \param new_val New value to write to the atomic type if it currently is
471 * identical to the old value.
473 * \return True (1) if the swap occurred: i.e. if the value in a was equal
474 * to old_val. False (0) if the swap didn't occur and the value
475 * was not equal to old_val.
477 * \note The exchange occured if the return value is identical to \a old.
480 static inline int tMPI_Atomic_cas(tMPI_Atomic_t *a, int old_val, int new_val)
484 tMPI_Thread_mutex_lock(&tMPI_Atomic_mutex);
485 if (a->value == old_val)
490 tMPI_Thread_mutex_unlock(&tMPI_Atomic_mutex);
497 /** Atomic pointer compare-and-swap operation
499 * The \a old value is compared with the memory value in the atomic datatype.
500 * If the are identical, the atomic type is swapped with the new value,
501 * and otherwise left unchanged.
503 * This is essential for implementing wait-free lists and other data
504 * structures. See 'tMPI_Atomic_cas()'.
506 * \param a Atomic datatype ('memory' value)
507 * \param old_val Pointer value read from the atomic type at an earlier point
508 * \param new_val New value to write to the atomic type if it currently is
509 * identical to the old value.
511 * \return True (1) if the swap occurred: i.e. if the value in a was equal
512 * to old_val. False (0) if the swap didn't occur and the value
513 * was not equal to old_val.
515 * \note The exchange occured if the return value is identical to \a old.
518 static inline int tMPI_Atomic_ptr_cas(tMPI_Atomic_ptr_t * a, void *old_val,
523 tMPI_Thread_mutex_lock(&tMPI_Atomic_mutex);
524 if (a->value == old_val)
529 tMPI_Thread_mutex_unlock(&tMPI_Atomic_mutex);
534 /** Initialize spinlock
536 * In theory you can call this from multiple threads, but remember
537 * that we don't check for errors. If the first thread proceeded to
538 * lock the spinlock after initialization, the second will happily
539 * overwrite the contents and unlock it without warning you.
541 * \param x Spinlock pointer.
546 void tMPI_Spinlock_init( tMPI_Spinlock_t &x);
548 #define tMPI_Spinlock_init(x) tMPI_Thread_mutex_init((x)->lock)
553 * This routine blocks until the spinlock is available, and
554 * the locks it again before returning.
556 * \param x Spinlock pointer
559 void tMPI_Spinlock_lock( tMPI_Spinlock_t &x);
561 #define tMPI_Spinlock_lock(x) tMPI_Thread_mutex_lock((x)->lock)
565 /** Attempt to acquire spinlock
567 * This routine acquires the spinlock if possible, but if
568 * already locked it return an error code immediately.
570 * \param x Spinlock pointer
572 * \return 0 if the mutex was available so we could lock it,
573 * otherwise a non-zero integer (1) if the lock is busy.
576 void tMPI_Spinlock_trylock( tMPI_Spinlock_t &x);
578 #define tMPI_Spinlock_trylock(x) tMPI_Thread_mutex_trylock((x)->lock)
583 * \param x Spinlock pointer
585 * Unlocks the spinlock, regardless if which thread locked it.
588 void tMPI_Spinlock_unlock( tMPI_Spinlock_t &x);
590 #define tMPI_Spinlock_unlock(x) tMPI_Thread_mutex_unlock((x)->lock)
595 /** Check if spinlock is locked
597 * This routine returns immediately with the lock status.
599 * \param x Spinlock pointer
601 * \return 1 if the spinlock is locked, 0 otherwise.
604 static inline int tMPI_Spinlock_islocked(const tMPI_Spinlock_t *x)
606 if(tMPI_Spinlock_trylock(x) != 0)
613 /* We just locked it */
614 tMPI_Spinlock_unlock(x);
619 /** Wait for a spinlock to become available
621 * This routine blocks until the spinlock is unlocked,
622 * but in contrast to tMPI_Spinlock_lock() it returns without
623 * trying to lock the spinlock.
625 * \param x Spinlock pointer
628 static inline void tMPI_Spinlock_wait(tMPI_Spinlock_t *x)
630 tMPI_Spinlock_lock(x);
631 /* Got the lock now, so the waiting is over */
632 tMPI_Spinlock_unlock(x);
640 /* only do this if there was no better solution */
641 #ifndef TMPI_HAVE_SWAP
642 /** Atomic swap operation.
644 Atomically swaps the data in the tMPI_Atomic_t operand with the value of b.
645 NOTE: DON'T USE YET! (This has no good asm counterparts on many architectures).
647 \param a Pointer to atomic type
648 \param b Value to swap
649 \return the original value of a
652 static inline int tMPI_Atomic_swap(tMPI_Atomic_t *a, int b)
657 oldval=(int)(a->value);
658 } while(!tMPI_Atomic_cas(a, oldval, b));
661 /** Atomic swap pointer operation.
663 Atomically swaps the pointer in the tMPI_Atomic_ptr_t operand with the
665 NOTE: DON'T USE YET! (This has no good asm counterparts on many architectures).
667 \param a Pointer to atomic type
668 \param b Value to swap
669 \return the original value of a
672 static inline void *tMPI_Atomic_ptr_swap(tMPI_Atomic_ptr_t *a, void *b)
677 oldval=(void*)(a->value);
678 } while(!tMPI_Atomic_ptr_cas(a, oldval, b));
683 /* only define this if there were no separate acquire and release barriers */
684 #ifndef TMPI_HAVE_ACQ_REL_BARRIERS
686 /* if they're not defined explicitly, we just make full barriers out of both */
687 #define tMPI_Atomic_memory_barrier_acq tMPI_Atomic_memory_barrier
688 #define tMPI_Atomic_memory_barrier_rel tMPI_Atomic_memory_barrier
692 /* this allows us to use the inline keyword without breaking support for
693 some compilers that don't support it: */
694 #ifdef inline_defined_in_atomic
704 #endif /* TMPI_ATOMIC_H_ */