2 This source code file is part of thread_mpi.
3 Written by Sander Pronk, Erik Lindahl, and possibly others.
5 Copyright (c) 2009, Sander Pronk, Erik Lindahl.
8 Redistribution and use in source and binary forms, with or without
9 modification, are permitted provided that the following conditions are met:
10 1) Redistributions of source code must retain the above copyright
11 notice, this list of conditions and the following disclaimer.
12 2) Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
15 3) Neither the name of the copyright holders nor the
16 names of its contributors may be used to endorse or promote products
17 derived from this software without specific prior written permission.
19 THIS SOFTWARE IS PROVIDED BY US ''AS IS'' AND ANY
20 EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
21 WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22 DISCLAIMED. IN NO EVENT SHALL WE BE LIABLE FOR ANY
23 DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
24 (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
25 LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
26 ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
28 SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 If you want to redistribute modifications, please consider that
31 scientific software is very special. Version control is crucial -
32 bugs must be traceable. We will be happy to consider code for
33 inclusion in the official distribution, but derived work should not
34 be called official thread_mpi. Details are found in the README & COPYING
39 #ifndef TMPI_THREADS_H_
40 #define TMPI_THREADS_H_
44 * \brief Platform-independent multithreading support.
46 * This file provides an portable thread interface very similar to POSIX
47 * threads, as a thin wrapper around the threads provided operating system
48 * (whether they be POSIX threads or something else).
50 * In other words, while the naming conventions are very similar to
51 * pthreads, you should NOT assume that a thread_mpi thread type
52 * (thread,mutex,key, etc) is the same as the Pthreads equivalent,
53 * even on platforms where we are using pthreads.
55 * Because the synchronization functions here are similar to the basic
56 * mutexes/conditions/barriers provided by the operating system,
57 * performance will most likely be worse than when using the atomic
58 * synchronization functions of atomic.h. On the other hand, because the
59 * operating system can schedule out waiting threads using these functions,
60 * they are the appropriate ones for I/O and initialization.
62 * Since this module is merely intended to be a transparent wrapper around
63 * a system-dependent thread implementation, we simply echo errors to stderr.
64 * The user should check the return codes\] and take appropriate action
65 * when using these functions (fat chance, but errors are rare).
72 #include "visibility.h"
81 } /* Avoids screwing up auto-indentation */
88 /*! \brief Thread ID: abstract tMPI_Thread type
90 * The contents of this structure depends on the actual threads
91 * implementation used.
93 typedef struct tMPI_Thread* tMPI_Thread_t;
97 /*! \brief Opaque mutex datatype
99 * This type is only defined in the header to enable static
100 * initialization with TMPI_THREAD_MUTEX_INITIALIZER.
101 * You should _never_ touch the contents or create a variable
102 * with automatic storage class without calling tMPI_Thread_mutex_init().
106 tMPI_Atomic_t initialized; /*!< Whether \a mutex has been initialized. */
107 struct tMPI_Mutex* mutex; /*!< Actual mutex data structure. */
108 } tMPI_Thread_mutex_t;
109 /*! \brief Static initializer for tMPI_Thread_mutex_t
111 * See the description of the tMPI_Thread_mutex_t datatype for instructions
112 * on how to use this. Note that any variables initialized with this value
113 * MUST have static storage allocation.
115 #define TMPI_THREAD_MUTEX_INITIALIZER { {0} , NULL }
121 /*! \brief Pthread implementation of the abstract tMPI_Thread_key type
123 * The contents of this structure depends on the actual threads
124 * implementation used.
128 tMPI_Atomic_t initialized; /*!< Whether \a key has been initialized. */
129 struct tMPI_Thread_key *key; /*!< Actual key data structure. */
136 /*! \brief One-time initialization data for thread
138 * This is an opaque datatype which is necessary for tMPI_Thread_once(),
139 * but since it needs to be initialized statically it must be defined
140 * in the header. You will be sorry if you touch the contents.
141 * Variables of this type should always be initialized statically to
142 * TMPI_THREAD_ONCE_INIT.
144 * This type is used as control data for single-time initialization.
145 * The most common example is a mutex at file scope used when calling
146 * a non-threadsafe function, e.g. the FFTW initialization routines.
151 tMPI_Atomic_t once; /*!< Whether the operation has been performed. */
152 } tMPI_Thread_once_t;
153 /*! \brief Static initializer for tMPI_Thread_once_t
155 * See the description of the tMPI_Thread_once_t datatype for instructions
156 * on how to use this. Normally, all variables of that type should be
157 * initialized statically to this value.
159 #define TMPI_THREAD_ONCE_INIT { {0} }
164 /*! \brief Condition variable handle for threads
166 * Condition variables are useful for synchronization together
167 * with a mutex: Lock the mutex and check if our thread is the last
168 * to the barrier. If no, wait for the condition to be signaled.
169 * If yes, reset whatever data you want and then signal the condition.
171 * This should be considered an opaque structure, but since it is sometimes
172 * useful to initialize it statically it must go in the header.
173 * You will be sorry if you touch the contents.
175 * There are two alternatives: Either initialize it as a static variable
176 * with TMPI_THREAD_COND_INITIALIZER, or call tMPI_Thread_cond_init()
181 tMPI_Atomic_t initialized; /*!< Whether \a condp has been initialized. */
182 struct tMPI_Thread_cond* condp; /*!< Actual condition variable data structure. */
183 } tMPI_Thread_cond_t;
184 /*! \brief Static initializer for tMPI_Thread_cond_t
186 * See the description of the tMPI_Thread_cond_t datatype for instructions
187 * on how to use this. Note that any variables initialized with this value
188 * MUST have static storage allocation.
190 #define TMPI_THREAD_COND_INITIALIZER { {0}, NULL}
197 /*! \brief Pthread implementation of barrier type.
199 * The contents of this structure depends on the actual threads
200 * implementation used.
204 tMPI_Atomic_t initialized; /*!< Whether \a barrierp has been initialized. */
205 struct tMPI_Thread_barrier* barrierp; /*!< Actual barrier data structure. */
206 volatile int threshold; /*!< Total number of members in barrier */
207 volatile int count; /*!< Remaining count before completion */
208 volatile int cycle; /*!< Alternating 0/1 to indicate round */
209 }tMPI_Thread_barrier_t;
210 /*! \brief Static initializer for tMPI_Thread_barrier_t
212 * See the description of the tMPI_Thread_barrier_t datatype for instructions
213 * on how to use this. Note that variables initialized with this value
214 * MUST have static storage allocation.
216 * \param count Threshold for barrier
218 #define TMPI_THREAD_BARRIER_INITIALIZER(count) {\
219 NULL, count, count, 0 \
227 /** Thread support status enumeration */
228 enum tMPI_Thread_support
230 TMPI_THREAD_SUPPORT_NO = 0, /*!< Starting threads will fail */
231 TMPI_THREAD_SUPPORT_YES = 1 /*!< Thread support available */
235 /** Thread setaffinity support status enumeration */
236 enum tMPI_Thread_setaffinity_support
238 TMPI_SETAFFINITY_SUPPORT_NO = 0, /*!< Setting thread affinity not
240 TMPI_SETAFFINITY_SUPPORT_YES = 1 /*!< Setting thread affinity supported */
246 /** handle a fatal error.
248 \param file source code file name of error.
249 \param line source code line number of error.
250 \param message format string for error message.
253 void tMPI_Fatal_error(const char *file, int line, const char *message, ...);
254 /** Convenience macro for the first two arguments to tMPI_Fatal_error(). */
255 #define TMPI_FARGS __FILE__,__LINE__
259 /*! \name Thread creation, destruction, and inspection
261 /** Check if threads are supported
263 * This routine provides a cleaner way to check if threads are supported
264 * instead of sprinkling your code with preprocessor conditionals.
266 * All thread functions are still available even without thread support,
267 * but some of them might return failure error codes, for instance if you try
270 * \return 1 if threads are supported, 0 if not.
273 enum tMPI_Thread_support tMPI_Thread_support(void);
276 /** Get the number of hardware threads that can be run simultaneously.
278 Returns the total number of cores and SMT threads that can run.
280 \returns The maximum number of threads that can run simulataneously.
281 If this number cannot be determined for the current architecture,
285 int tMPI_Thread_get_hw_number(void);
288 /** Create a new thread
290 * The new thread will call start_routine() with the argument arg.
292 * Please be careful not to change arg after calling this function.
294 * \param[out] thread Pointer to thread ID
295 * \param[in] start_routine The function to call in the new thread
296 * \param[in] arg Argument to call with
298 * \return Status - 0 on success, or an error code.
301 int tMPI_Thread_create(tMPI_Thread_t *thread,
302 void* (*start_routine)(void *),
308 /** Wait for a specific thread to finish executing
310 * If the thread has already finished the routine returns immediately.
312 * \param[in] thread Pointer to thread ID
313 * \param[out] value_ptr Pointer to location where to store pointer to
314 * exit value from threads that called
315 * tMPI_Thread_exit().
317 * \return 0 if the join went ok, or a non-zero error code.
320 int tMPI_Thread_join(tMPI_Thread_t thread, void **value_ptr);
323 /** Terminate calling thread
327 * \param value_ptr Pointer to a return value. Threads waiting for us to
328 * join them can read this value if they try.
332 void tMPI_Thread_exit(void *value_ptr);
336 /** Ask a thread to exit
338 * This routine tries to end the execution of another thread, but there are
339 * no guarantees it will succeed.
341 * \param thread Handle to thread we want to see dead.
342 * \return 0 or a non-zero error message.
345 int tMPI_Thread_cancel(tMPI_Thread_t thread);
350 /** Get a thread ID of the calling thread.
352 * This function also works on threads not started with tMPI_Thread_create,
353 * or any other function in thread_mpi. This makes it possible to, for
354 * example assign thread affinities to any thread.
356 * \return A thread ID of the calling thread */
358 tMPI_Thread_t tMPI_Thread_self(void);
362 /** Check whether two thread pointers point to the same thread
364 * \param[in] t1 Thread ID 1
365 * \param[in] t2 Thread ID 2
366 * \return non-zero if the thread structs refer to the same thread,
367 0 if the threads are different*/
369 int tMPI_Thread_equal(tMPI_Thread_t t1, tMPI_Thread_t t2);
372 /** Check whether this platform supports setting of thread affinity
374 * This function returns TMPI_SETAFFINITY_SUPPORT_YES if setting thread
375 * affinity is supported by the platform, and TMPI_SETAFFINITY_SUPPORT_NO
376 * if not. If this function returns 0, the function
377 * tMPI_Thread_setaffinity_single will simply return 0 itself, effectively
378 * ignoring the request.
380 * \return TMPI_SETAFFINITY_SUPPORT_YES if setting affinity is supported,
381 TMPI_SETAFFINITY_SUPPORT_NO otherwise */
383 enum tMPI_Thread_setaffinity_support tMPI_Thread_setaffinity_support(void);
386 /** Set thread affinity to a single core
388 * This function sets the thread affinity of a thread to a a specific
389 * numbered processor. This only works if the underlying operating system
390 * supports it. The processor number must be between 0 and the number returned
391 * by tMPI_Thread_get_hw_number().
393 * \param[in] thread Thread ID of the thread to set affinity for
394 * \param[in] nr Processor number to set affinity to
395 * \return zero on success, non-zero on error */
397 int tMPI_Thread_setaffinity_single(tMPI_Thread_t thread, unsigned int nr);
405 /** Initialize a new mutex
407 * This routine must be called before using any mutex not initialized
408 * with static storage class and TMPI_THREAD_MUTEX_INITIALIZER.
410 * \param mtx Pointer to a mutex opaque type.
411 * \return 0 or an error code.
414 int tMPI_Thread_mutex_init(tMPI_Thread_mutex_t *mtx);
419 /** Kill a mutex you no longer need
421 * Note that this call only frees resources allocated inside the mutex. It
422 * does not free the tMPI_Thread_mutex_t memory area itself if you created it
423 * with dynamic memory allocation.
425 * \param mtx Pointer to a mutex variable to get rid of.
426 * \return 0 or a non-zero error code.
429 int tMPI_Thread_mutex_destroy(tMPI_Thread_mutex_t *mtx);
434 /** Wait for exclusive access to a mutex
436 * This routine does not return until the mutex has been acquired.
438 * \param mtx Pointer to the mutex to lock
439 * \return 0 or a non-zero error code.
442 int tMPI_Thread_mutex_lock(tMPI_Thread_mutex_t *mtx);
447 /** Try to lock a mutex, return if busy
449 * This routine always return directly. If the mutex was available and
450 * we successfully locked it we return 0, otherwise a non-zero
451 * return code (usually meaning the mutex was already locked).
453 * \param mtx Pointer to the mutex to try and lock
454 * \return 0 or a non-zero return error code.
457 int tMPI_Thread_mutex_trylock(tMPI_Thread_mutex_t *mtx);
462 /** Release the exclusive access to a mutex
464 * \param mtx Pointer to the mutex to release
465 * \return 0 or a non-zero error code.
468 int tMPI_Thread_mutex_unlock(tMPI_Thread_mutex_t *mtx);
473 /*! \name Thread-specific storage
477 /** Initialize thread-specific-storage handle
479 * The tMPI_Thread_key_t handle must always be initialized dynamically with
480 * this routine. If you need to initialize it statically in a file, use the
481 * tMPI_Thread_once() routine and corresponding data to initialize the
482 * thread-specific-storage key the first time you access it.
484 * \param key Pointer to opaque thread key type.
485 * \param destructor Routine to call (to free memory of key) when we quit
487 * \return status - 0 on sucess or a standard error code.
491 int tMPI_Thread_key_create(tMPI_Thread_key_t *key, void (*destructor)(void *));
496 /** Delete thread-specific-storage handle
498 * Calling this routine will kill the handle, and invoke the automatic
499 * destructor routine for each non-NULL value pointed to by key.
501 * \param key Opaque key type to destroy
502 * \return 0 or a non-zero error message.
505 int tMPI_Thread_key_delete(tMPI_Thread_key_t key);
510 /** Get value for thread-specific-storage in this thread
512 * If it has not yet been set, NULL is returned.
514 * \param key Thread-specific-storage handle.
515 * \return Pointer-to-void, the value of the data in this thread.
518 void * tMPI_Thread_getspecific(tMPI_Thread_key_t key);
522 /** Set value for thread-specific-storage in this thread
524 * \param key Thread-specific-storage handle.
525 * \param value What to set the data to (pointer-to-void).
526 * \return 0 or a non-zero error message.
529 int tMPI_Thread_setspecific(tMPI_Thread_key_t key, void *value);
537 /** Run the provided routine exactly once
539 * The control data must have been initialized before calling this routine,
540 * but you can do it with the static initialzer TMPI_THREAD_ONCE_INIT.
542 * tMPI_Thread_once() will not return to any of the calling routines until
543 * the initialization function has been completed.
545 * \param once_data Initialized one-time execution data
546 * \param init_routine Function to call exactly once
547 * \return 0 or a non-zero error message.
550 int tMPI_Thread_once(tMPI_Thread_once_t *once_data,
551 void (*init_routine)(void));
555 /*! \name Condition variables
558 /** Initialize condition variable
560 * This routine must be called before using any condition variable
561 * not initialized with static storage class and TMPI_THREAD_COND_INITIALIZER.
563 * \param cond Pointer to previously allocated condition variable
564 * \return 0 or a non-zero error message.
567 int tMPI_Thread_cond_init(tMPI_Thread_cond_t *cond);
571 /** Destroy condition variable
573 * This routine should be called when you are done with a condition variable.
574 * Note that it only releases memory allocated internally, not the
575 * tMPI_Thread_cond_t structure you provide a pointer to.
577 * \param cond Pointer to condition variable.
578 * \return 0 or a non-zero error message.
581 int tMPI_Thread_cond_destroy(tMPI_Thread_cond_t *cond);
585 /** Wait for a condition to be signaled
587 * This routine releases the mutex, and waits for the condition to be
588 * signaled by another thread before it returns.
590 * Note that threads waiting for conditions with tMPI_Thread_cond_wait
591 * may be subject to spurious wakeups: use this function in a while loop
592 * and check the state of a predicate associated with the wakeup
593 * before leaving the loop.
595 * \param cond condition variable
596 * \param mtx Mutex protecting the condition variable
598 * \return 0 or a non-zero error message.
601 int tMPI_Thread_cond_wait(tMPI_Thread_cond_t *cond,
602 tMPI_Thread_mutex_t *mtx);
607 /** Unblock one waiting thread
609 * This routine signals a condition variable to one
610 * thread (if any) waiting for it after calling
611 * tMPI_Thread_cond_wait().
613 * \param cond condition variable
615 * \return 0 or a non-zero error message.
618 int tMPI_Thread_cond_signal(tMPI_Thread_cond_t *cond);
621 /** Unblock all waiting threads
623 * This routine signals a condition variable to all
624 * (if any) threads that are waiting for it after calling
625 * tMPI_Thread_cond_wait().
627 * \param cond condition variable
629 * \return 0 or a non-zero error message.
632 int tMPI_Thread_cond_broadcast(tMPI_Thread_cond_t *cond);
641 /** Initialize a synchronization barrier type
643 * You only need to initialize a barrier once. They cycle
644 * automatically, so after release it is immediately ready
645 * to accept waiting threads again.
647 * \param barrier Pointer to previously allocated barrier type
648 * \param count Number of threads to synchronize. All threads
649 * will be released after \a count calls to
650 * tMPI_Thread_barrier_wait().
653 int tMPI_Thread_barrier_init(tMPI_Thread_barrier_t *barrier, int count);
657 /** Release data in a barrier datatype
659 * \param barrier Pointer to previously
660 * initialized barrier.
663 int tMPI_Thread_barrier_destroy(tMPI_Thread_barrier_t *barrier);
666 /** Perform barrier synchronization
668 * This routine blocks until it has been called N times,
669 * where N is the count value the barrier was initialized with.
670 * After N total calls all threads return. The barrier automatically
671 * cycles, and thus requires another N calls to unblock another time.
673 * \param barrier Pointer to previously create barrier.
675 * \return The last thread returns -1, all the others 0.
678 int tMPI_Thread_barrier_wait(tMPI_Thread_barrier_t *barrier);
687 #endif /* TMPI_THREADS_H_ */