/* allocate array of processor info blocks */
- pMPI_ProcessorInfo = tMPI_Malloc( sizeof(MPI_NUMA_PROCESSOR_INFO) *
- dwTotalProcessors );
+ pMPI_ProcessorInfo = malloc( sizeof(MPI_NUMA_PROCESSOR_INFO) *
+ dwTotalProcessors );
if (pMPI_ProcessorInfo == NULL)
{
- tMPI_Fatal_error(TMPI_FARGS, "tMPI_Malloc failed for processor information");
goto cleanup;
}
if (!func_GetNumaProcessorNodeEx(pProcessorNumber, pNodeNumber))
{
- tMPI_Fatal_error(TMPI_FARGS,
- "Processor enumeration, GetNumaProcessorNodeEx failed, error code=%d",
- GetLastError());
goto cleanup;
}
if (!func_GetNumaNodeProcessorMaskEx(*pNodeNumber, pGroupAffinity))
{
- tMPI_Fatal_error(TMPI_FARGS,
- "Processor enumeration, GetNumaNodeProcessorMaskEx failed, error code=%d",
- GetLastError());
goto cleanup;
}
if (i > dwTotalProcessors)
{
- tMPI_Fatal_error(TMPI_FARGS, "Processor enumeration exceeds allocated memory!");
goto cleanup;
}
}
return 0;
}
-static void tMPI_Thread_id_list_init(void)
+static int tMPI_Thread_id_list_init(void)
{
+ int ret = 0;
+
EnterCriticalSection( &thread_id_list_lock );
N_thread_id_list = 0;
Nalloc_thread_id_list = 4; /* number of initial allocation*/
- thread_id_list = (thread_id_list_t*)tMPI_Malloc(
- sizeof(thread_id_list_t)*
- Nalloc_thread_id_list);
+ thread_id_list = (thread_id_list_t*)malloc(sizeof(thread_id_list_t)*
+ Nalloc_thread_id_list);
+ if (thread_id_list == NULL)
+ {
+ ret = ENOMEM;
+ }
LeaveCriticalSection( &thread_id_list_lock );
+ return ret;
}
/* add an entry to the thread ID list, assuming it's locked */
-static void tMPI_Thread_id_list_add_locked(DWORD thread_id,
- struct tMPI_Thread *th)
+static int tMPI_Thread_id_list_add_locked(DWORD thread_id,
+ struct tMPI_Thread *th)
{
if (Nalloc_thread_id_list < N_thread_id_list + 1)
{
/* double the size */
Nalloc_thread_id_list *= 2;
- new_list = (thread_id_list_t*)tMPI_Malloc(
- sizeof(thread_id_list_t)*
- Nalloc_thread_id_list);
+ /* and allocate the new list */
+ new_list = (thread_id_list_t*)malloc(sizeof(thread_id_list_t)*
+ Nalloc_thread_id_list);
+ if (new_list == NULL)
+ {
+ return ENOMEM;
+ }
/* and copy over all elements */
for (i = 0; i < N_thread_id_list; i++)
{
thread_id_list[ N_thread_id_list ].th = th;
N_thread_id_list++;
-
+ return 0;
}
/* add an entry to the thread ID list */
-static void tMPI_Thread_id_list_add(DWORD thread_id, struct tMPI_Thread *th)
+static int tMPI_Thread_id_list_add(DWORD thread_id, struct tMPI_Thread *th)
{
+ int ret = 0;
EnterCriticalSection( &thread_id_list_lock );
- tMPI_Thread_id_list_add_locked(thread_id, th);
+ ret = tMPI_Thread_id_list_add_locked(thread_id, th);
LeaveCriticalSection( &thread_id_list_lock );
+ return ret;
}
-/* Remove an entry from the thread_id list, assuming it's locked */
+/* Remove an entry from the thread_id list, assuming it's locked.
+ Does nothing if an entry is not found.*/
static void tMPI_Thread_id_list_remove_locked(DWORD thread_id)
{
int i;
EnterCriticalSection( &thread_id_list_lock );
ret = tMPI_Thread_id_list_find_locked(thread_id);
-
LeaveCriticalSection( &thread_id_list_lock );
return ret;
}
/* try to add the running thread to the list. Returns the tMPI_Thrread struct
- associated with this thread.*/
+ associated with this thread, or NULL in case of an error.*/
static struct tMPI_Thread *tMPI_Thread_id_list_add_self(void)
{
DWORD thread_id;
struct tMPI_Thread *th = NULL;
+ int ret;
EnterCriticalSection( &thread_id_list_lock );
if (th == NULL)
{
/* if not, create an ID, set it and return it */
- th = (struct tMPI_Thread*)tMPI_Malloc(sizeof(struct tMPI_Thread)*1);
+ th = (struct tMPI_Thread*)malloc(sizeof(struct tMPI_Thread)*1);
/* to create a handle that can be used outside of the current
thread, the handle from GetCurrentThread() must first
/* This causes a small memory leak that is hard to fix. */
th->started_by_tmpi = 0;
- tMPI_Thread_id_list_add_locked(thread_id, th);
+ ret = tMPI_Thread_id_list_add_locked(thread_id, th);
+ if (ret != 0)
+ {
+ free(th);
+ th = NULL;
+ }
}
LeaveCriticalSection( &thread_id_list_lock );
-
return th;
}
-static void tMPI_Init_initers(void)
+static int tMPI_Init_initers(void)
{
int state;
+ int ret = 0;
+
/* we can pre-check because it's atomic */
if (tMPI_Atomic_get(&init_inited) == 0)
{
InitializeCriticalSection(&barrier_init);
InitializeCriticalSection(&thread_id_list_lock);
- /* fatal errors are handled by the routine by calling
- tMPI_Fatal_error() */
- tMPI_Init_NUMA();
+ ret = tMPI_Init_NUMA();
+ if (ret != 0)
+ {
+ goto err;
+ }
+
- tMPI_Thread_id_list_init();
+ ret = tMPI_Thread_id_list_init();
+ if (ret != 0)
+ {
+ goto err;
+ }
tMPI_Atomic_memory_barrier_rel();
tMPI_Atomic_set(&init_inited, 1);
tMPI_Spinlock_unlock( &init_init );
}
-}
-
-
-
-/* TODO: this needs to go away! (there's another one in pthreads.c)
- fatal errors are thankfully really rare*/
-void tMPI_Fatal_error(const char *file, int line, const char *message, ...)
-{
- va_list ap;
-
- fprintf(stderr, "tMPI Fatal error in %s, line %d: ", file, line);
- va_start(ap, message);
- vfprintf(stderr, message, ap);
- va_end(ap);
- fprintf(stderr, "\n");
- abort();
+ return ret;
+err:
+ tMPI_Spinlock_unlock( &init_init );
+ return ret;
}
{
DWORD thread_id;
struct tMPI_Thread_starter_param *prm;
+ int ret;
+
+ ret = tMPI_Init_initers();
+ if (ret != 0)
+ {
+ return ret;
+ }
- tMPI_Init_initers();
+ if (thread == NULL)
+ {
+ return EINVAL;
+ }
/* a small memory leak to be sure that it doesn't get deallocated
once this function ends, before the newly created thread uses it. */
prm = (struct tMPI_Thread_starter_param*)
- tMPI_Malloc(sizeof(struct tMPI_Thread_starter_param));
+ malloc(sizeof(struct tMPI_Thread_starter_param));
+ if (prm == NULL)
+ {
+ return ENOMEM;
+ }
prm->start_routine = start_routine;
prm->param = arg;
- *thread = (struct tMPI_Thread*)tMPI_Malloc(sizeof(struct tMPI_Thread)*1);
-
- if (thread == NULL)
+ *thread = (struct tMPI_Thread*)malloc(sizeof(struct tMPI_Thread)*1);
+ if (*thread == NULL)
{
- tMPI_Fatal_error(TMPI_FARGS, "Invalid thread pointer.");
- return EINVAL;
+ free(prm);
+ return ENOMEM;
}
+
/* this must be locked before the thread is created to prevent a race
condition if the thread immediately wants to create its own entry */
EnterCriticalSection( &thread_id_list_lock );
prm,
0,
&thread_id);
+ if ((*thread)->th == NULL)
+ {
+ ret = -1;
+ goto err;
+ }
(*thread)->id = thread_id;
if ((*thread)->th == NULL)
{
- tMPI_Free(thread);
- tMPI_Fatal_error(TMPI_FARGS, "Failed to create thread, error code=%d",
- GetLastError());
- return -1;
+ ret = -1;
+ goto err;
+ }
+ ret = tMPI_Thread_id_list_add_locked(thread_id, (*thread));
+ if (ret != 0)
+ {
+ goto err;
}
- tMPI_Thread_id_list_add_locked(thread_id, (*thread));
LeaveCriticalSection( &thread_id_list_lock );
+#if 0
/* inherit the thread priority from the parent thread. */
/* TODO: is there value in setting this, vs. just allowing it to default
from the process? currently, this limits the effectivenes of changing
the priority in eg: TaskManager. */
SetThreadPriority(((*thread)->th), GetThreadPriority(GetCurrentThread()));
+#endif
return 0;
+err:
+ free(prm);
+ free(thread);
+ LeaveCriticalSection( &thread_id_list_lock );
+ return ret;
}
DWORD ret, retval;
ret = WaitForSingleObject(thread->th, INFINITE);
-
if (ret != 0)
{
- tMPI_Fatal_error(TMPI_FARGS, "Failed to join thread. error code=%d",
- GetLastError());
return -1;
}
{
if (!GetExitCodeThread(thread, &retval))
{
- /* TODO: somehow assign value_ptr */
- tMPI_Fatal_error(TMPI_FARGS,
- "Failed to get thread exit code: error=%d",
- GetLastError());
return -1;
}
}
CloseHandle(thread->th);
tMPI_Thread_id_list_remove(thread->id);
- tMPI_Free(thread);
+ free(thread);
return 0;
}
void tMPI_Thread_exit(void *value_ptr)
{
- /* TODO: fix exit code */
/* TODO: call destructors for thread-local storage */
ExitThread( 0 );
}
{
if (!TerminateThread( thread, -1) )
{
- tMPI_Fatal_error(TMPI_FARGS, "Failed thread_cancel, error code=%d",
- GetLastError());
return -1;
}
tMPI_Thread_id_list_remove(thread->id);
tMPI_Thread_t tMPI_Thread_self(void)
{
tMPI_Thread_t th;
- tMPI_Init_initers();
+ int ret;
- th = tMPI_Thread_id_list_add_self();
+ ret = tMPI_Init_initers();
+ if (ret != 0)
+ {
+ return NULL;
+ }
+ th = tMPI_Thread_id_list_add_self();
return th;
}
return EINVAL;
}
- mtx->mutex = (struct tMPI_Mutex*)tMPI_Malloc(sizeof(struct tMPI_Mutex)*1);
+ mtx->mutex = (struct tMPI_Mutex*)malloc(sizeof(struct tMPI_Mutex)*1);
+ if (mtx->mutex == NULL)
+ {
+ return ENOMEM;
+ }
InitializeCriticalSection(&(mtx->mutex->cs));
return 0;
}
DeleteCriticalSection(&(mtx->mutex->cs));
- tMPI_Free(mtx->mutex);
+ free(mtx->mutex);
return 0;
}
*/
/* initialize the initializers */
- tMPI_Init_initers();
+ ret = tMPI_Init_initers();
+ if (ret != 0)
+ {
+ return ret;
+ }
/* Lock the common one-time init mutex so we can check carefully */
EnterCriticalSection( &mutex_init );
{
if (key == NULL)
{
- tMPI_Fatal_error(TMPI_FARGS, "Invalid key pointer.");
return EINVAL;
}
/* TODO: make list of destructors for thread-local storage */
- key->key = (struct tMPI_Thread_key*)tMPI_Malloc(sizeof(struct
- tMPI_Thread_key)*1);
+ key->key = (struct tMPI_Thread_key*)malloc(sizeof(struct tMPI_Thread_key));
+ if (key->key == NULL)
+ {
+ return ENOMEM;
+ }
(key)->key->wkey = TlsAlloc();
if ( (key)->key->wkey == TLS_OUT_OF_INDEXES)
{
- tMPI_Fatal_error(TMPI_FARGS,
- "Failed to create thread key, error code=%d.",
- GetLastError());
return -1;
}
int tMPI_Thread_key_delete(tMPI_Thread_key_t key)
{
TlsFree(key.key->wkey);
- tMPI_Free(key.key);
+ free(key.key);
return 0;
}
if (!bStatus)
{
- tMPI_Fatal_error(TMPI_FARGS, "Failed to run thread_once routine");
return -1;
}
#else
+ int ret;
+
/* really ugly hack - and it's slow... */
- tMPI_Init_initers();
+ ret = tMPI_Init_initers();
+ if (ret != 0)
+ {
+ return ret;
+ }
+
EnterCriticalSection(&once_init);
if (tMPI_Atomic_get(&(once_control->once)) == 0)
{
}
cond->condp = (struct tMPI_Thread_cond*)
- tMPI_Malloc(sizeof(struct tMPI_Thread_cond)*1);
+ malloc(sizeof(struct tMPI_Thread_cond));
+ if (cond->condp == NULL)
+ {
+ return ENOMEM;
+ }
#if 0
/* use this code once Vista is the minimum version required */
InitializeConditionVariable( &(cond->cv) );
/* windows doesnt have this function */
#else
DeleteCriticalSection(&(cond->condp->wtr_lock));
- tMPI_Free(cond->condp);
+ free(cond->condp);
#endif
return 0;
}
*/
/* initialize the initializers */
- tMPI_Init_initers();
+ ret = tMPI_Init_initers();
+ if (ret != 0)
+ {
+ return ret;
+ }
/* Lock the common one-time init mutex so we can check carefully */
EnterCriticalSection( &cond_init );
BOOL wait_done = FALSE;
BOOL last_waiter = FALSE;
int my_cycle;
+ int ret;
/* check whether the condition is initialized */
if (tMPI_Atomic_get( &(cond->initialized) ) == 0)
{
- tMPI_Thread_cond_init_once(cond);
+ ret = tMPI_Thread_cond_init_once(cond);
+ if (ret != 0)
+ {
+ return ret;
+ }
}
/* the mutex must have been initialized because it should be locked here */
if (!ret)
{
- tMPI_Fatal_error(TMPI_FARGS, "Failed wait for condition, error code=%d",
- GetLastError());
return -1;
}
#else
/* do the actual waiting */
if (WaitForSingleObject( cond->condp->ev, INFINITE ) == WAIT_FAILED)
{
- tMPI_Fatal_error(TMPI_FARGS, "Failed event reset, error code=%d",
- GetLastError());
return -1;
}
{
if (!ResetEvent( cond->condp->ev ))
{
- tMPI_Fatal_error(TMPI_FARGS, "Failed event reset, error code=%d",
- GetLastError());
return -1;
}
}
int tMPI_Thread_cond_signal(tMPI_Thread_cond_t *cond)
{
+ int ret;
/* check whether the condition is initialized */
if (tMPI_Atomic_get( &(cond->initialized) ) == 0)
{
- tMPI_Thread_cond_init_once(cond);
+ ret = tMPI_Thread_cond_init_once(cond);
+ if (ret != 0)
+ {
+ return ret;
+ }
}
/* The condition variable is now guaranteed to be valid. */
#if 0
if (!SetEvent(cond->condp->ev)) /* actually release the
waiting threads */
{
- tMPI_Fatal_error(TMPI_FARGS, "Failed SetEvent, error code=%d",
- GetLastError());
return -1;
}
}
int tMPI_Thread_cond_broadcast(tMPI_Thread_cond_t *cond)
{
+ int ret;
/* check whether the condition is initialized */
if (tMPI_Atomic_get( &(cond->initialized) ) == 0)
{
- tMPI_Thread_cond_init_once(cond);
+ ret = tMPI_Thread_cond_init_once(cond);
+ if (ret != 0)
+ {
+ return ret;
+ }
+
}
/* The condition variable is now guaranteed to be valid. */
#if 0
if (!SetEvent(cond->condp->ev)) /* actually release the
waiting threads */
{
- tMPI_Fatal_error(TMPI_FARGS, "Failed SetEvent, error code=%d",
- GetLastError());
return -1;
}
}
int tMPI_Thread_barrier_init(tMPI_Thread_barrier_t *barrier, int n)
{
+ int ret;
+
if (barrier == NULL)
{
return EINVAL;
}
barrier->barrierp = (struct tMPI_Thread_barrier*)
- tMPI_Malloc(sizeof(struct tMPI_Thread_barrier)*1);
+ malloc(sizeof(struct tMPI_Thread_barrier)*1);
+ if (barrier->barrierp == NULL)
+ {
+ return ENOMEM;
+ }
#if 0
/* use this once Vista is the oldest supported windows version: */
InitializeCriticalSection(&(barrier->barrierp->cs));
InitializeConditionVariable(&(barrier->barrierp->cv));
#else
- tMPI_Thread_mutex_init(&(barrier->barrierp->cs));
- tMPI_Thread_cond_init(&(barrier->barrierp->cv));
+ ret = tMPI_Thread_mutex_init(&(barrier->barrierp->cs));
+ if (ret != 0)
+ {
+ return ret;
+ }
+ ret = tMPI_Thread_cond_init(&(barrier->barrierp->cv));
+ if (ret != 0)
+ {
+ return ret;
+ }
#endif
barrier->threshold = n;
int tMPI_Thread_barrier_destroy(tMPI_Thread_barrier_t *barrier)
{
+ int ret;
+
if (barrier == NULL)
{
return EINVAL;
#if 0
DeleteCriticalSection(&(barrier->barrierp->cs));
#else
- tMPI_Thread_mutex_destroy(&(barrier->barrierp->cs));
+ ret = tMPI_Thread_mutex_destroy(&(barrier->barrierp->cs));
+ if (ret != 0)
+ {
+ return ret;
+ }
#endif
- tMPI_Thread_cond_destroy(&(barrier->barrierp->cv));
+ ret = tMPI_Thread_cond_destroy(&(barrier->barrierp->cv));
+ if (ret != 0)
+ {
+ return ret;
+ }
- tMPI_Free(barrier->barrierp);
+ free(barrier->barrierp);
return 0;
}
/* initialize the initializers */
- tMPI_Init_initers();
+ ret = tMPI_Init_initers();
+ if (ret != 0)
+ {
+ return ret;
+ }
/* Lock the common one-time init mutex so we can check carefully */
EnterCriticalSection( &barrier_init );
int tMPI_Thread_barrier_wait(tMPI_Thread_barrier_t *barrier)
{
- int cycle;
- BOOL rc = FALSE;
- int ret = 0;
+ int cycle;
+ BOOL rc = FALSE;
+ int ret = 0;
/*tMPI_Thread_pthread_barrier_t *p;*/
/* check whether the barrier is initialized */
if (tMPI_Atomic_get( &(barrier->initialized) ) == 0)
{
- tMPI_Thread_barrier_init_once(barrier, barrier->threshold);
+ ret = tMPI_Thread_barrier_init_once(barrier, barrier->threshold);
+ if (ret != 0)
+ {
+ return ret;
+ }
}
-
#if 0
EnterCriticalSection( &(barrier->barrierp->cs) );
#else
- tMPI_Thread_mutex_lock( &(barrier->barrierp->cs) );
+ ret = tMPI_Thread_mutex_lock( &(barrier->barrierp->cs) );
+ if (ret != 0)
+ {
+ return ret;
+ }
#endif
#if 0
WakeAllConditionVariable( &(barrier->barrierp->cv) );
#else
- tMPI_Thread_cond_broadcast( &(barrier->barrierp->cv) );
+ ret = tMPI_Thread_cond_broadcast( &(barrier->barrierp->cv) );
+ if (ret != 0)
+ {
+ return ret;
+ }
#endif
}
else