2 This source code file is part of thread_mpi.
3 Written by Sander Pronk, Erik Lindahl, and possibly others.
5 Copyright (c) 2009, Sander Pronk, Erik Lindahl.
8 Redistribution and use in source and binary forms, with or without
9 modification, are permitted provided that the following conditions are met:
10 1) Redistributions of source code must retain the above copyright
11 notice, this list of conditions and the following disclaimer.
12 2) Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
15 3) Neither the name of the copyright holders nor the
16 names of its contributors may be used to endorse or promote products
17 derived from this software without specific prior written permission.
19 THIS SOFTWARE IS PROVIDED BY US ''AS IS'' AND ANY
20 EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
21 WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22 DISCLAIMED. IN NO EVENT SHALL WE BE LIABLE FOR ANY
23 DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
24 (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
25 LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
26 ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
28 SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 If you want to redistribute modifications, please consider that
31 scientific software is very special. Version control is crucial -
32 bugs must be traceable. We will be happy to consider code for
33 inclusion in the official distribution, but derived work should not
34 be called official thread_mpi. Details are found in the README & COPYING
38 #ifndef TMPI_ATOMIC_H_
39 #define TMPI_ATOMIC_H_
43 * \brief Atomic operations for fast SMP synchronization
45 * This file defines atomic integer operations and spinlocks for
46 * fast synchronization in performance-critical regions.
48 * In general, the best option is to use functions without explicit
49 * locking, e.g. tMPI_Atomic_fetch_add() or tMPI_Atomic_cas().
51 * Depending on the architecture/compiler, these operations may either
52 * be provided as functions or macros; be aware that those macros may
53 * reference their arguments repeatedly, possibly leading to multiply
54 * evaluated code with side effects: be careful with what you use as
57 * Not all architectures support atomic operations though inline assembly,
58 * and even if they do it might not be implemented here. In that case
59 * we use a fallback mutex implementation, so you can always count on
60 * the function interfaces working.
62 * Don't use spinlocks in non-performance-critical regions like file I/O.
63 * Since they always spin busy they would waste CPU cycles instead of
64 * properly yielding to a computation thread while waiting for the disk.
66 * Finally, note that all our spinlock operations are defined to return
67 * 0 if initialization or locking completes successfully.
68 * This is the opposite of some other implementations, but the same standard
69 * as used for pthread mutexes. So, if e.g. are trying to lock a spinlock,
70 * you will have gotten the lock if the return value is 0.
72 * tMPI_Spinlock_islocked(x) obviously still returns 1 if the lock is locked,
73 * and 0 if it is available, though...
75 /* Se the comments on the non-atomic versions for explanations */
79 #include "visibility.h"
86 } /* Avoids screwing up auto-indentation */
89 /* first check for gcc/icc platforms.
90 Some compatible compilers, like icc on linux+mac will take this path,
92 #if ( (defined(__GNUC__) || defined(__PATHSCALE__) || defined(__PGI)) && \
93 (!defined(__xlc__)) && (!defined(TMPI_TEST_NO_ATOMICS)) )
96 #define TMPI_GCC_VERSION (__GNUC__ * 10000 \
97 + __GNUC_MINOR__ * 100 \
98 + __GNUC_PATCHLEVEL__)
101 /* now check specifically for several architectures: */
102 #if ((defined(i386) || defined(__x86_64__)) && !defined(__OPEN64__))
104 #include "atomic/gcc_x86.h"
106 #elif (defined(__ia64__))
108 #include "atomic/gcc_ia64.h"
110 /* for now we use gcc intrinsics on gcc: */
111 /*#elif (defined(__powerpc__) || (defined(__ppc__)) )*/
112 /*#include "atomic/gcc_ppc.h"*/
114 #elif defined(__FUJITSU) && ( defined(__sparc_v9__) || defined (__sparcv9) )
116 /* Fujitsu FX10 SPARC compiler */
117 #include "atomic/fujitsu_sparc64.h"
120 /* otherwise, there's a generic gcc intrinsics version: */
121 #include "atomic/gcc.h"
123 #endif /* end of check for gcc specific architectures */
126 #elif (defined(_MSC_VER) && (_MSC_VER >= 1200) && \
127 (!defined(TMPI_TEST_NO_ATOMICS)) )
129 /* Microsoft Visual C on x86, define taken from FFTW who got it from
130 Morten Nissov. icc on windows will take this path. */
131 #include "atomic/msvc.h"
133 #elif ( (defined(__IBM_GCC_ASM) || defined(__IBM_STDCPP_ASM)) && \
134 (defined(__powerpc__) || defined(__ppc__)) && \
135 (!defined(TMPI_TEST_NO_ATOMICS)) )
137 /* PowerPC using xlC intrinsics. */
139 #include "atomic/xlc_ppc.h"
141 #elif ( ( defined(__xlC__) || defined(__xlc__) ) && \
142 (!defined(TMPI_TEST_NO_ATOMICS)) )
143 /* IBM xlC compiler */
144 #include "atomic/xlc_ppc.h"
147 #elif (defined (__sun) && (defined(__sparcv9) || defined(__sparc)) && \
148 (!defined(TMPI_TEST_NO_ATOMICS)) )
149 /* Solaris on SPARC (Sun C Compiler, Solaris Studio) */
150 #include "atomic/suncc-sparc.h"
152 #elif defined(__FUJITSU) && defined(__sparc__)
154 /* Fujitsu FX10 SPARC compiler requires gcc compatibility with -Xg */
155 #error Atomics support for Fujitsu FX10 compiler requires -Xg (gcc compatibility)
159 /* No atomic operations, use mutex fallback. Documentation is in x86 section */
161 #ifdef TMPI_CHECK_ATOMICS
162 #error No atomic operations implemented for this cpu/compiler combination.
166 /** Indicates that no support for atomic operations is present. */
167 #define TMPI_NO_ATOMICS
170 /** Memory barrier operation
172 Modern CPUs rely heavily on out-of-order execution, and one common feature
173 is that load/stores might be reordered. Also, when using inline assembly
174 the compiler might already have loaded the variable we are changing into
175 a register, so any update to memory won't be visible.
177 This command creates a memory barrier, i.e. all memory results before
178 it in the code should be visible to all memory operations after it - the
179 CPU cannot propagate load/stores across it.
181 This barrier is a full barrier: all load and store operations of
182 instructions before it are completed, while all load and store operations
183 that are in instructions after it won't be done before this barrier.
187 #define tMPI_Atomic_memory_barrier()
189 /** Memory barrier operation with acquire semantics
191 This barrier is a barrier with acquire semantics: the terminology comes
192 from its common use after acquiring a lock: all load/store instructions
193 after this barrier may not be re-ordered to happen before this barrier.
197 #define tMPI_Atomic_memory_barrier_acq()
199 /** Memory barrier operation with release semantics
201 This barrier is a barrier with release semantics: the terminology comes
202 from its common use before releasing a lock: all load/store instructions
203 before this barrier may not be re-ordered to happen after this barrier.
207 #define tMPI_Atomic_memory_barrier_rel()
210 /* signal that they exist */
211 #define TMPI_HAVE_ACQ_REL_BARRIERS
214 /** Atomic operations datatype
216 * Portable synchronization primitives like mutexes are effective for
217 * many purposes, but usually not very high performance.
218 * One of the problem is that you have the overhead of a function call,
219 * and another is that Mutexes often have extra overhead to make the
220 * scheduling fair. Finally, if performance is important we don't want
221 * to suspend the thread if we cannot lock a mutex, but spin-lock at 100%
222 * CPU usage until the resources is available (e.g. increment a counter).
224 * These things can often be implemented with inline-assembly or other
225 * system-dependent functions, and we provide such functionality for the
226 * most common platforms. For portability we also have a fallback
227 * implementation using a mutex for locking.
229 * Performance-wise, the fastest solution is always to avoid locking
230 * completely (obvious, but remember it!). If you cannot do that, the
231 * next best thing is to use atomic operations that e.g. increment a
232 * counter without explicit locking. Spinlocks are useful to lock an
233 * entire region, but leads to more overhead and can be difficult to
234 * debug - it is up to you to make sure that only the thread owning the
237 * You should normally NOT use atomic operations for things like
238 * I/O threads. These should yield to other threads while waiting for
239 * the disk instead of spinning at 100% CPU usage.
241 * It is imperative that you use the provided routines for reading
242 * and writing, since some implementations require memory barriers before
243 * the CPU or memory sees an updated result. The structure contents is
244 * only visible here so it can be inlined for performance - it might
245 * change without further notice.
247 * \note No initialization is required for atomic variables.
249 * Currently, we have (real) atomic operations for:
251 * - gcc version 4.1 and later (all platforms)
252 * - x86 or x86_64, using GNU compilers
253 * - x86 or x86_64, using Intel compilers
254 * - x86 or x86_64, using Pathscale compilers
255 * - Itanium, using GNU compilers
256 * - Itanium, using Intel compilers
257 * - Itanium, using HP compilers
258 * - PowerPC, using GNU compilers
259 * - PowerPC, using IBM AIX compilers
260 * - PowerPC, using IBM compilers >=7.0 under Linux or Mac OS X.
261 * - Sparc64, using Fujitsu compilers.
267 * - tMPI_Atomic_add_return
268 * - tMPI_Atomic_fetch_add
270 typedef struct tMPI_Atomic
272 int value; /**< The atomic value.*/
277 /** Atomic pointer type equivalent to tMPI_Atomic_t
279 * Useful for lock-free and wait-free data structures.
280 * The only operations available for this type are:
282 * - tMPI_Atomic_ptr_get
283 * - tMPI_Atomic_ptr_set
284 * - tMPI_Atomic_ptr_cas
286 typedef struct tMPI_Atomic_ptr
288 void *value; /**< The atomic pointer. */
295 * Spinlocks provide a faster synchronization than mutexes,
296 * although they consume CPU-cycles while waiting. They are implemented
297 * with atomic operations and inline assembly whenever possible, and
298 * otherwise we use a fallback implementation where a spinlock is identical
299 * to a mutex (this is one of the reasons why you have to initialize them).
301 * There are no guarantees whatsoever about fair scheduling or
302 * debugging if you make a mistake and unlock a variable somebody
303 * else has locked - performance is the primary goal of spinlocks.
306 * - tMPI_Spinlock_init
307 * - tMPI_Spinlock_lock
308 * - tMPI_Spinlock_unlock
309 * - tMPI_Spinlock_trylock
310 * - tMPI_Spinlock_wait
312 typedef struct tMPI_Spinlock *tMPI_Spinlock_t;
314 /*! \def TMPI_SPINLOCK_INITIALIZER
315 * \brief Spinlock static initializer
317 * This is used for static spinlock initialization, and has the same
318 * properties as TMPI_THREAD_MUTEX_INITIALIZER has for mutexes.
319 * This is only for inlining in the tMPI_Thread.h header file. Whether
320 * it is 0, 1, or something else when unlocked depends on the platform.
321 * Don't assume anything about it. It might even be a mutex when using the
322 * fallback implementation!
326 #define TMPI_SPINLOCK_INITIALIZER { NULL }
328 /* Since mutexes guarantee memory barriers this works fine */
329 /** Return value of an atomic integer
331 * Also implements proper memory barriers when necessary.
332 * The actual implementation is system-dependent.
334 * \param a Atomic variable to read
335 * \return Integer value of the atomic variable
340 int tMPI_Atomic_get(const tMPI_Atomic_t *a);
342 /** Write value to an atomic integer
344 * Also implements proper memory barriers when necessary.
345 * The actual implementation is system-dependent.
347 * \param a Atomic variable
348 * \param i Integer to set the atomic variable to.
353 void tMPI_Atomic_set(tMPI_Atomic_t *a, int i);
356 /** Return value of an atomic pointer
358 * Also implements proper memory barriers when necessary.
359 * The actual implementation is system-dependent.
361 * \param a Atomic variable to read
362 * \return Pointer value of the atomic variable
367 void* tMPI_Atomic_ptr_get(const tMPI_Atomic_ptr_t *a);
372 /** Write value to an atomic pointer
374 * Also implements proper memory barriers when necessary.
375 * The actual implementation is system-dependent.
377 * \param a Atomic variable
378 * \param p Pointer value to set the atomic variable to.
383 void tMPI_Atomic_ptr_set(tMPI_Atomic_ptr_t *a, void *p);
385 /** Add integer to atomic variable
387 * Also implements proper memory barriers when necessary.
388 * The actual implementation is system-dependent.
390 * \param a atomic datatype to modify
391 * \param i integer to increment with. Use i<0 to subtract atomically.
393 * \return The new value (after summation).
396 int tMPI_Atomic_add_return(tMPI_Atomic_t *a, int i);
398 #define TMPI_ATOMIC_HAVE_NATIVE_ADD_RETURN
403 /** Add to variable, return the old value.
405 * This operation is quite useful for synchronization counters.
406 * By performing a fetchadd with N, a thread can e.g. reserve a chunk
407 * with the next N iterations, and the return value is the index
408 * of the first element to treat.
410 * Also implements proper memory barriers when necessary.
411 * The actual implementation is system-dependent.
413 * \param a atomic datatype to modify
414 * \param i integer to increment with. Use i<0 to subtract atomically.
416 * \return The value of the atomic variable before addition.
419 int tMPI_Atomic_fetch_add(tMPI_Atomic_t *a, int i);
421 #define TMPI_ATOMIC_HAVE_NATIVE_FETCH_ADD
426 /** Atomic compare-and-swap operation
428 * The \a old value is compared with the memory value in the atomic datatype.
429 * If the are identical, the atomic type is swapped with the new value,
430 * and otherwise left unchanged.
432 * This is *the* synchronization primitive: it has a consensus number of
433 * infinity, and is available in some form on all modern CPU architectures.
434 * In the words of Herlihy&Shavit (The art of multiprocessor programming),
435 * it is the 'king of all wild things'.
437 * In practice, use it as follows: You can start by reading a value
438 * (without locking anything), perform some calculations, and then
439 * atomically try to update it in memory unless it has changed. If it has
440 * changed you will get an error return code - reread the new value
441 * an repeat the calculations in that case.
443 * \param a Atomic datatype ('memory' value)
444 * \param old_val Integer value read from the atomic type at an earlier point
445 * \param new_val New value to write to the atomic type if it currently is
446 * identical to the old value.
448 * \return True (1) if the swap occurred: i.e. if the value in a was equal
449 * to old_val. False (0) if the swap didn't occur and the value
450 * was not equal to old_val.
452 * \note The exchange occured if the return value is identical to \a old.
455 int tMPI_Atomic_cas(tMPI_Atomic_t *a, int old_val, int new_val);
460 /** Atomic pointer compare-and-swap operation
462 * The \a old value is compared with the memory value in the atomic datatype.
463 * If the are identical, the atomic type is swapped with the new value,
464 * and otherwise left unchanged.
466 * This is essential for implementing wait-free lists and other data
467 * structures. See 'tMPI_Atomic_cas()'.
469 * \param a Atomic datatype ('memory' value)
470 * \param old_val Pointer value read from the atomic type at an earlier point
471 * \param new_val New value to write to the atomic type if it currently is
472 * identical to the old value.
474 * \return True (1) if the swap occurred: i.e. if the value in a was equal
475 * to old_val. False (0) if the swap didn't occur and the value
476 * was not equal to old_val.
478 * \note The exchange occured if the return value is identical to \a old.
481 int tMPI_Atomic_ptr_cas(tMPI_Atomic_ptr_t * a, void *old_val,
484 /** Atomic swap operation.
486 Atomically swaps the data in the tMPI_Atomic_t operand with the value of b.
487 Note: This has no good assembly counterparts on many architectures, so
488 it might not be faster than a repreated CAS.
490 \param a Pointer to atomic type
491 \param b Value to swap
492 \return the original value of a
495 int tMPI_Atomic_swap(tMPI_Atomic_t *a, int b);
497 /** Atomic swap pointer operation.
499 Atomically swaps the pointer in the tMPI_Atomic_ptr_t operand with the
501 Note: This has no good assembly counterparts on many architectures, so
502 it might not be faster than a repreated CAS.
504 \param a Pointer to atomic type
505 \param b Value to swap
506 \return the original value of a
509 void *tMPI_Atomic_ptr_swap(tMPI_Atomic_ptr_t *a, void *b);
511 #define TMPI_ATOMIC_HAVE_NATIVE_SWAP
515 /** Initialize spinlock
517 * In theory you can call this from multiple threads, but remember
518 * that we don't check for errors. If the first thread proceeded to
519 * lock the spinlock after initialization, the second will happily
520 * overwrite the contents and unlock it without warning you.
522 * \param x Spinlock pointer.
527 void tMPI_Spinlock_init( tMPI_Spinlock_t *x);
529 #define TMPI_ATOMIC_HAVE_NATIVE_SPINLOCK
534 * This routine blocks until the spinlock is available, and
535 * the locks it again before returning.
537 * \param x Spinlock pointer
540 void tMPI_Spinlock_lock( tMPI_Spinlock_t *x);
543 /** Attempt to acquire spinlock
545 * This routine acquires the spinlock if possible, but if
546 * already locked it return an error code immediately.
548 * \param x Spinlock pointer
550 * \return 0 if the mutex was available so we could lock it,
551 * otherwise a non-zero integer (1) if the lock is busy.
554 int tMPI_Spinlock_trylock( tMPI_Spinlock_t *x);
558 * \param x Spinlock pointer
560 * Unlocks the spinlock, regardless if which thread locked it.
563 void tMPI_Spinlock_unlock( tMPI_Spinlock_t *x);
567 /** Check if spinlock is locked
569 * This routine returns immediately with the lock status.
571 * \param x Spinlock pointer
573 * \return 1 if the spinlock is locked, 0 otherwise.
576 int tMPI_Spinlock_islocked( tMPI_Spinlock_t *x);
578 /** Wait for a spinlock to become available
580 * This routine blocks until the spinlock is unlocked,
581 * but in contrast to tMPI_Spinlock_lock() it returns without
582 * trying to lock the spinlock.
584 * \param x Spinlock pointer
587 void tMPI_Spinlock_wait(tMPI_Spinlock_t *x);
590 #endif /* platform-specific checks */
592 /* now define all the atomics that are not avaible natively. These
593 are done on the assumption that a native CAS does exist. */
594 #include "atomic/derived.h"
596 /* this allows us to use the inline keyword without breaking support for
597 some compilers that don't support it: */
598 #ifdef inline_defined_in_atomic
602 #if !defined(TMPI_NO_ATOMICS) && !defined(TMPI_ATOMICS)
603 /* Set it here to make sure the user code can check this without having to have
605 /** Indicates that support for atomic operations is present. */
615 #endif /* TMPI_ATOMIC_H_ */