Bug Summary

File:external/thread_mpi/src/pthreads.c
Location:line 898, column 11
Description:Null pointer passed as an argument to a 'nonnull' parameter

Annotated Source Code

1/*
2 This source code file is part of thread_mpi.
3 Written by Sander Pronk, Erik Lindahl, and possibly others.
4
5 Copyright (c) 2009, Sander Pronk, Erik Lindahl.
6 All rights reserved.
7
8 Redistribution and use in source and binary forms, with or without
9 modification, are permitted provided that the following conditions are met:
10 1) Redistributions of source code must retain the above copyright
11 notice, this list of conditions and the following disclaimer.
12 2) Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
15 3) Neither the name of the copyright holders nor the
16 names of its contributors may be used to endorse or promote products
17 derived from this software without specific prior written permission.
18
19 THIS SOFTWARE IS PROVIDED BY US ''AS IS'' AND ANY
20 EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
21 WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22 DISCLAIMED. IN NO EVENT SHALL WE BE LIABLE FOR ANY
23 DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
24 (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
25 LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
26 ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
28 SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29
30 If you want to redistribute modifications, please consider that
31 scientific software is very special. Version control is crucial -
32 bugs must be traceable. We will be happy to consider code for
33 inclusion in the official distribution, but derived work should not
34 be called official thread_mpi. Details are found in the README & COPYING
35 files.
36 */
37
38
39
40/* Include the defines that determine which thread library to use.
41 * We do not use HAVE_PTHREAD_H directly, since we might want to
42 * turn off thread support explicity (e.g. for debugging).
43 */
44#ifdef HAVE_TMPI_CONFIG_H
45#include "tmpi_config.h"
46#endif
47
48#ifdef HAVE_CONFIG_H1
49#include "config.h"
50#endif
51
52
53#ifdef THREAD_PTHREADS
54
55#ifdef HAVE_PTHREAD_SETAFFINITY
56#define _GNU_SOURCE
57#endif
58
59/* pthread.h must be the first header, apart from the defines in config.h */
60#include <pthread.h>
61
62
63#ifdef HAVE_UNISTD_H
64#include <unistd.h>
65#endif
66
67#include <errno(*__errno_location ()).h>
68#include <stdlib.h>
69#include <stdio.h>
70#include <stdarg.h>
71
72#include "thread_mpi/atomic.h"
73#include "thread_mpi/threads.h"
74#include "impl.h"
75#include "unused.h"
76
77#include "pthreads.h"
78
79/* mutex for initializing mutexes */
80static pthread_mutex_t mutex_init = PTHREAD_MUTEX_INITIALIZER{ { 0, 0, 0, 0, 0, 0, 0, { 0, 0 } } };
81/* mutex for initializing barriers */
82static pthread_mutex_t once_init = PTHREAD_MUTEX_INITIALIZER{ { 0, 0, 0, 0, 0, 0, 0, { 0, 0 } } };
83/* mutex for initializing thread_conds */
84static pthread_mutex_t cond_init = PTHREAD_MUTEX_INITIALIZER{ { 0, 0, 0, 0, 0, 0, 0, { 0, 0 } } };
85/* mutex for initializing barriers */
86static pthread_mutex_t barrier_init = PTHREAD_MUTEX_INITIALIZER{ { 0, 0, 0, 0, 0, 0, 0, { 0, 0 } } };
87
88/* mutex for managing thread IDs */
89static pthread_mutex_t thread_id_mutex = PTHREAD_MUTEX_INITIALIZER{ { 0, 0, 0, 0, 0, 0, 0, { 0, 0 } } };
90static pthread_key_t thread_id_key;
91static int thread_id_key_initialized = 0;
92
93
94
95
96enum tMPI_Thread_support tMPI_Thread_support(void)
97{
98 return TMPI_THREAD_SUPPORT_YES;
99}
100
101
102int tMPI_Thread_get_hw_number(void)
103{
104 int ret = 0;
105#ifdef HAVE_SYSCONF
106#if defined(_SC_NPROCESSORS_ONLN_SC_NPROCESSORS_ONLN)
107 ret = sysconf(_SC_NPROCESSORS_ONLN_SC_NPROCESSORS_ONLN);
108#elif defined(_SC_NPROC_ONLN)
109 ret = sysconf(_SC_NPROC_ONLN);
110#elif defined(_SC_NPROCESSORS_CONF_SC_NPROCESSORS_CONF)
111 ret = sysconf(_SC_NPROCESSORS_CONF_SC_NPROCESSORS_CONF);
112#elif defined(_SC_NPROC_CONF)
113 ret = sysconf(_SC_NPROC_CONF);
114#endif
115#endif
116
117 return ret;
118}
119
120/* destructor for thread ids */
121static void tMPI_Destroy_thread_id(void* thread_id)
122{
123 struct tMPI_Thread *thread = (struct tMPI_Thread*)thread_id;
124 if (!thread->started_by_tmpi)
125 {
126 /* if the thread is started by tMPI, it must be freed in the join()
127 call. */
128 free(thread_id);
129 }
130}
131
132
133/* Set the thread id var for this thread
134 Returns a pointer to the thread object if succesful, NULL if ENOMEM */
135static struct tMPI_Thread* tMPI_Set_thread_id_key(int started_by_tmpi)
136{
137 struct tMPI_Thread *th;
138
139 th = (struct tMPI_Thread*)malloc(sizeof(struct tMPI_Thread)*1);
140 if (th == NULL((void*)0))
141 {
142 return NULL((void*)0);
143 }
144 th->th = pthread_self();
145 th->started_by_tmpi = started_by_tmpi;
146 /* we ignore errors because any thread that needs this value will
147 re-generate it in the next iteration. */
148 pthread_setspecific(thread_id_key, th);
149 return th;
150}
151
152/* initialize the thread id vars if not already initialized */
153static int tMPI_Init_thread_ids(void)
154{
155 int ret;
156 ret = pthread_mutex_lock( &thread_id_mutex );
157 if (ret != 0)
158 {
159 return ret;
160 }
161
162 if (!thread_id_key_initialized)
163 {
164 /* initialize and set the thread id thread-specific variable */
165 struct tMPI_Thread *th;
166
167 thread_id_key_initialized = 1;
168 ret = pthread_key_create(&thread_id_key, tMPI_Destroy_thread_id);
169 if (ret != 0)
170 {
171 goto err;
172 }
173 th = tMPI_Set_thread_id_key(0);
174 if (th == NULL((void*)0))
175 {
176 ret = ENOMEM12;
177 goto err;
178 }
179 }
180
181 ret = pthread_mutex_unlock( &thread_id_mutex );
182 return ret;
183err:
184 pthread_mutex_unlock( &thread_id_mutex );
185 return ret;
186}
187
188/* structure to hold the arguments for the thread_starter function */
189struct tMPI_Thread_starter
190{
191 struct tMPI_Thread *thread;
192 void *(*start_routine)(void*);
193 void *arg;
194 pthread_mutex_t cond_lock; /* lock for initialization of thread
195 structure */
196};
197
198/* the thread_starter function that sets the thread id */
199static void *tMPI_Thread_starter(void *arg)
200{
201 struct tMPI_Thread_starter *starter = (struct tMPI_Thread_starter *)arg;
202 void *(*start_routine)(void*);
203 void *parg;
204 int ret;
205
206 /* first wait for the parent thread to signal that the starter->thread
207 structure is ready. That's done by unlocking the starter->cond_lock */
208 ret = pthread_mutex_lock(&(starter->cond_lock));
209 if (ret != 0)
210 {
211 return NULL((void*)0);
212 }
213 ret = pthread_mutex_unlock(&(starter->cond_lock));
214 if (ret != 0)
215 {
216 return NULL((void*)0);
217 }
218
219 /* now remember the tMPI_thread_t structure for this thread */
220 ret = pthread_setspecific(thread_id_key, starter->thread);
221 if (ret != 0)
222 {
223 return NULL((void*)0);
224 }
225 start_routine = starter->start_routine;
226 parg = starter->arg;
227
228 /* deallocate the starter structure. Errors here are non-fatal. */
229 pthread_mutex_destroy(&(starter->cond_lock));
230 free(starter);
231 return (*start_routine)(parg);
232}
233
234int tMPI_Thread_create(tMPI_Thread_t *thread, void *(*start_routine)(void *),
235 void *arg)
236{
237 int ret;
238 struct tMPI_Thread_starter *starter;
239
240 if (thread == NULL((void*)0))
241 {
242 return EINVAL22;
243 }
244 tMPI_Init_thread_ids();
245
246 *thread = (struct tMPI_Thread*)malloc(sizeof(struct tMPI_Thread)*1);
247 if (*thread == NULL((void*)0))
248 {
249 return ENOMEM12;
250 }
251 (*thread)->started_by_tmpi = 1;
252 starter = (struct tMPI_Thread_starter*)
253 malloc(sizeof(struct tMPI_Thread_starter)*1);
254 if (starter == NULL((void*)0))
255 {
256 return ENOMEM12;
257 }
258 /* fill the starter structure */
259 starter->thread = *thread;
260 starter->start_routine = start_routine;
261 starter->arg = arg;
262
263 ret = pthread_mutex_init(&(starter->cond_lock), NULL((void*)0));
264 if (ret != 0)
265 {
266 return ret;
267 }
268 /* now lock the mutex so we can unlock it once we know the data in
269 thread->th is safe. */
270 ret = pthread_mutex_lock(&(starter->cond_lock));
271 if (ret != 0)
272 {
273 return ret;
274 }
275
276 ret = pthread_create(&((*thread)->th), NULL((void*)0), tMPI_Thread_starter,
277 (void*)starter);
278 if (ret != 0)
279 {
280 return ret;
281 }
282
283 /* Here we know thread->th is safe. */
284 ret = pthread_mutex_unlock(&(starter->cond_lock));
285
286 return ret;
287}
288
289
290
291int tMPI_Thread_join(tMPI_Thread_t thread, void **value_ptr)
292{
293 int ret;
294 pthread_t th = thread->th;
295
296 ret = pthread_join( th, value_ptr );
297 if (ret != 0)
298 {
299 return ret;
300 }
301 free(thread);
302 return 0;
303}
304
305
306tMPI_Thread_t tMPI_Thread_self(void)
307{
308 tMPI_Thread_t th;
309 int ret;
310
311 /* make sure the key var is set */
312 ret = tMPI_Init_thread_ids();
313 if (ret != 0)
314 {
315 return NULL((void*)0);
316 }
317
318 th = pthread_getspecific(thread_id_key);
319
320 /* check if it is already in our list */
321 if (th == NULL((void*)0))
322 {
323 th = tMPI_Set_thread_id_key(0);
324 }
325 return th;
326}
327
328int tMPI_Thread_equal(tMPI_Thread_t t1, tMPI_Thread_t t2)
329{
330 return pthread_equal(t1->th, t2->th);
331}
332
333
334enum tMPI_Thread_setaffinity_support tMPI_Thread_setaffinity_support(void)
335{
336#ifdef HAVE_PTHREAD_SETAFFINITY
337 cpu_set_t set;
338 int ret;
339
340 /* run getaffinity to check whether we get back ENOSYS */
341 ret = pthread_getaffinity_np(pthread_self(), sizeof(set), &set);
342 if (ret == 0)
343 {
344 return TMPI_SETAFFINITY_SUPPORT_YES;
345 }
346 else
347 {
348 return TMPI_SETAFFINITY_SUPPORT_NO;
349 }
350#else
351 return TMPI_SETAFFINITY_SUPPORT_NO;
352#endif
353}
354
355
356/* set thread's own affinity to a processor number n */
357int tMPI_Thread_setaffinity_single(tMPI_Thread_t tmpi_unused__attribute__ ((unused)) thread,
358 unsigned int tmpi_unused__attribute__ ((unused)) nr)
359{
360#ifdef HAVE_PTHREAD_SETAFFINITY
361 int nt = tMPI_Thread_get_hw_number();
362 cpu_set_t set;
363
364 if (nt < nr)
365 {
366 return TMPI_ERR_PROCNR;
367 }
368
369 CPU_ZERO(&set)do __builtin_memset (&set, '\0', sizeof (cpu_set_t)); while
(0)
;
370 CPU_SET(nr, &set)(__extension__ ({ size_t __cpu = (nr); __cpu / 8 < (sizeof
(cpu_set_t)) ? (((__cpu_mask *) ((&set)->__bits))[((__cpu
) / (8 * sizeof (__cpu_mask)))] |= ((__cpu_mask) 1 << (
(__cpu) % (8 * sizeof (__cpu_mask))))) : 0; }))
;
371 return pthread_setaffinity_np(thread->th, sizeof(set), &set);
372#endif
373 return 0;
374}
375
376
377
378
379int tMPI_Thread_mutex_init(tMPI_Thread_mutex_t *mtx)
380{
381 int ret;
382
383 if (mtx == NULL((void*)0))
384 {
385 return EINVAL22;
386 }
387
388 mtx->mutex = (struct tMPI_Mutex*)malloc(sizeof(struct tMPI_Mutex)*1);
389 if (mtx->mutex == NULL((void*)0))
390 {
391 return ENOMEM12;
392 }
393 ret = pthread_mutex_init(&(mtx->mutex->mtx), NULL((void*)0));
394 if (ret != 0)
395 {
396 return ret;
397 }
398
399#ifndef TMPI_NO_ATOMICS
400 tMPI_Atomic_set(&(mtx->initialized), 1)(((&(mtx->initialized))->value) = (1));
401#else
402 mtx->initialized.value = 1;
403#endif
404 return 0;
405}
406
407static inline int tMPI_Thread_mutex_init_once(tMPI_Thread_mutex_t *mtx)
408{
409 int ret = 0;
410
411#ifndef TMPI_NO_ATOMICS
412 /* check whether the mutex is initialized */
413 if (tMPI_Atomic_get( &(mtx->initialized) )((&(mtx->initialized))->value) == 0)
414#endif
415 {
416 /* we're relying on the memory barrier semantics of mutex_lock/unlock
417 for the check preceding this function call to have worked */
418 ret = pthread_mutex_lock( &(mutex_init) );
419 if (ret != 0)
420 {
421 return ret;
422 }
423
424 if (mtx->mutex == NULL((void*)0))
425 {
426 mtx->mutex = (struct tMPI_Mutex*)malloc(sizeof(struct tMPI_Mutex));
427 if (mtx->mutex == NULL((void*)0))
428 {
429 ret = ENOMEM12;
430 goto err;
431 }
432 ret = pthread_mutex_init( &(mtx->mutex->mtx), NULL((void*)0));
433 if (ret != 0)
434 {
435 goto err;
436 }
437 }
438 ret = pthread_mutex_unlock( &(mutex_init) );
439 }
440 return ret;
441err:
442 pthread_mutex_unlock( &(mutex_init) );
443 return ret;
444}
445
446
447int tMPI_Thread_mutex_destroy(tMPI_Thread_mutex_t *mtx)
448{
449 int ret;
450
451 if (mtx == NULL((void*)0))
452 {
453 return EINVAL22;
454 }
455
456 ret = pthread_mutex_destroy( &(mtx->mutex->mtx) );
457 if (ret != 0)
458 {
459 return ret;
460 }
461 free(mtx->mutex);
462 return ret;
463}
464
465
466
467int tMPI_Thread_mutex_lock(tMPI_Thread_mutex_t *mtx)
468{
469 int ret;
470
471 /* check whether the mutex is initialized */
472 ret = tMPI_Thread_mutex_init_once(mtx);
473 if (ret != 0)
474 {
475 return ret;
476 }
477
478 ret = pthread_mutex_lock(&(mtx->mutex->mtx));
479 return ret;
480}
481
482
483
484
485int tMPI_Thread_mutex_trylock(tMPI_Thread_mutex_t *mtx)
486{
487 int ret;
488
489 /* check whether the mutex is initialized */
490 ret = tMPI_Thread_mutex_init_once(mtx);
491 if (ret != 0)
492 {
493 return ret;
494 }
495
496 ret = pthread_mutex_trylock(&(mtx->mutex->mtx));
497 return ret;
498}
499
500
501
502int tMPI_Thread_mutex_unlock(tMPI_Thread_mutex_t *mtx)
503{
504 int ret;
505
506 /* check whether the mutex is initialized */
507 ret = tMPI_Thread_mutex_init_once(mtx);
508 if (ret != 0)
509 {
510 return ret;
511 }
512
513 ret = pthread_mutex_unlock(&(mtx->mutex->mtx));
514 return ret;
515}
516
517
518
519int tMPI_Thread_key_create(tMPI_Thread_key_t *key, void (*destructor)(void *))
520{
521 int ret;
522
523 if (key == NULL((void*)0))
524 {
525 return EINVAL22;
526 }
527
528
529 key->key = (struct tMPI_Thread_key*)malloc(sizeof(struct
530 tMPI_Thread_key)*1);
531 if (key->key == NULL((void*)0))
532 {
533 return ENOMEM12;
534 }
535 ret = pthread_key_create(&((key)->key->pkey), destructor);
536 if (ret != 0)
537 {
538 return ret;
539 }
540
541 tMPI_Atomic_set(&(key->initialized), 1)(((&(key->initialized))->value) = (1));
542 return 0;
543}
544
545
546int tMPI_Thread_key_delete(tMPI_Thread_key_t key)
547{
548 int ret;
549
550 ret = pthread_key_delete((key.key->pkey));
551 if (ret != 0)
552 {
553 return ret;
554 }
555 free(key.key);
556
557 return 0;
558}
559
560
561
562void * tMPI_Thread_getspecific(tMPI_Thread_key_t key)
563{
564 void *p = NULL((void*)0);
565
566 p = pthread_getspecific((key.key->pkey));
567
568 return p;
569}
570
571
572int tMPI_Thread_setspecific(tMPI_Thread_key_t key, void *value)
573{
574 int ret;
575
576 ret = pthread_setspecific((key.key->pkey), value);
577
578 return ret;
579}
580
581
582int tMPI_Thread_once(tMPI_Thread_once_t *once_control,
583 void (*init_routine)(void))
584{
585 int ret;
586 if (!once_control || !init_routine)
587 {
588 return EINVAL22;
589 }
590
591 /* really ugly hack - and it's slow... */
592 ret = pthread_mutex_lock( &once_init );
593 if (ret != 0)
594 {
595 return ret;
596 }
597 if (tMPI_Atomic_get(&(once_control->once))((&(once_control->once))->value) == 0)
598 {
599 (*init_routine)();
600 tMPI_Atomic_set(&(once_control->once), 1)(((&(once_control->once))->value) = (1));
601 }
602 ret = pthread_mutex_unlock( &once_init );
603
604 return ret;
605}
606
607
608
609
610int tMPI_Thread_cond_init(tMPI_Thread_cond_t *cond)
611{
612 int ret;
613
614 if (cond == NULL((void*)0))
615 {
616 return EINVAL22;
617 }
618
619 cond->condp = (struct tMPI_Thread_cond*)malloc(
620 sizeof(struct tMPI_Thread_cond));
621 if (cond->condp == NULL((void*)0))
622 {
623 return ENOMEM12;
624 }
625
626 ret = pthread_cond_init(&(cond->condp->cond), NULL((void*)0));
627 if (ret != 0)
628 {
629 return ret;
630 }
631 tMPI_Atomic_set(&(cond->initialized), 1)(((&(cond->initialized))->value) = (1));
632 tMPI_Atomic_memory_barrier()__sync_synchronize();
633
634 return 0;
635}
636
637
638static int tMPI_Thread_cond_init_once(tMPI_Thread_cond_t *cond)
639{
640 int ret = 0;
641
642 /* we're relying on the memory barrier semantics of mutex_lock/unlock
643 for the check preceding this function call to have worked */
644 ret = pthread_mutex_lock( &(cond_init) );
645 if (ret != 0)
646 {
647 return ret;
648 }
649 if (cond->condp == NULL((void*)0))
650 {
651 cond->condp = (struct tMPI_Thread_cond*)
652 malloc(sizeof(struct tMPI_Thread_cond)*1);
653 if (cond->condp == NULL((void*)0))
654 {
655 ret = ENOMEM12;
656 goto err;
657 }
658 ret = pthread_cond_init( &(cond->condp->cond), NULL((void*)0));
659 if (ret != 0)
660 {
661 goto err;
662 }
663 }
664 ret = pthread_mutex_unlock( &(cond_init) );
665 return ret;
666err:
667 /* try to unlock anyway */
668 pthread_mutex_unlock( &(cond_init) );
669 return ret;
670}
671
672
673
674int tMPI_Thread_cond_destroy(tMPI_Thread_cond_t *cond)
675{
676 int ret;
677
678 if (cond == NULL((void*)0))
679 {
680 return EINVAL22;
681 }
682
683 ret = pthread_cond_destroy(&(cond->condp->cond));
684 if (ret != 0)
685 {
686 return ret;
687 }
688 free(cond->condp);
689
690 return 0;
691}
692
693
694int tMPI_Thread_cond_wait(tMPI_Thread_cond_t *cond, tMPI_Thread_mutex_t *mtx)
695{
696 int ret;
697
698 /* check whether the condition is initialized */
699 if (tMPI_Atomic_get( &(cond->initialized) )((&(cond->initialized))->value) == 0)
700 {
701 ret = tMPI_Thread_cond_init_once(cond);
702 if (ret != 0)
703 {
704 return ret;
705 }
706 }
707 /* the mutex must have been initialized because it should be locked here */
708
709 ret = pthread_cond_wait( &(cond->condp->cond), &(mtx->mutex->mtx) );
710
711 return ret;
712}
713
714
715
716
717int tMPI_Thread_cond_signal(tMPI_Thread_cond_t *cond)
718{
719 int ret;
720
721 /* check whether the condition is initialized */
722 if (tMPI_Atomic_get( &(cond->initialized) )((&(cond->initialized))->value) == 0)
723 {
724 ret = tMPI_Thread_cond_init_once(cond);
725 if (ret != 0)
726 {
727 return ret;
728 }
729 }
730
731 ret = pthread_cond_signal( &(cond->condp->cond) );
732
733 return ret;
734}
735
736
737
738int tMPI_Thread_cond_broadcast(tMPI_Thread_cond_t *cond)
739{
740 int ret;
741
742 /* check whether the condition is initialized */
743 if (tMPI_Atomic_get( &(cond->initialized) )((&(cond->initialized))->value) == 0)
744 {
745 ret = tMPI_Thread_cond_init_once(cond);
746 if (ret != 0)
747 {
748 return ret;
749 }
750 }
751
752 ret = pthread_cond_broadcast( &(cond->condp->cond) );
753
754 return ret;
755}
756
757
758
759
760void tMPI_Thread_exit(void *value_ptr)
761{
762 pthread_exit(value_ptr);
763}
764
765
766int tMPI_Thread_cancel(tMPI_Thread_t thread)
767{
768 #ifdef __native_client__
769 return ENOSYS38;
770 #endif
771 return pthread_cancel(thread->th);
772}
773
774
775
776
777int tMPI_Thread_barrier_init(tMPI_Thread_barrier_t *barrier, int n)
778{
779 int ret;
780 /*tMPI_Thread_pthread_barrier_t *p;*/
781
782 if (barrier == NULL((void*)0))
783 {
784 return EINVAL22;
785 }
786
787 barrier->barrierp = (struct tMPI_Thread_barrier*)
788 malloc(sizeof(struct tMPI_Thread_barrier)*1);
789 if (barrier->barrierp == NULL((void*)0))
790 {
791 return ENOMEM12;
792 }
793
794 ret = pthread_mutex_init(&(barrier->barrierp->mutex), NULL((void*)0));
795 if (ret != 0)
796 {
797 return ret;
798 }
799
800 ret = pthread_cond_init(&(barrier->barrierp->cv), NULL((void*)0));
801 if (ret != 0)
802 {
803 return ret;
804 }
805
806 barrier->threshold = n;
807 barrier->count = n;
808 barrier->cycle = 0;
809
810 tMPI_Atomic_set(&(barrier->initialized), 1)(((&(barrier->initialized))->value) = (1));
811 return 0;
812}
813
814static int tMPI_Thread_barrier_init_once(tMPI_Thread_barrier_t *barrier)
815{
816 int ret = 0;
817
818 /* we're relying on the memory barrier semantics of mutex_lock/unlock
819 for the check preceding this function call to have worked */
820 ret = pthread_mutex_lock( &(barrier_init) );
821 if (ret != 0)
822 {
823 return ret;
824 }
825
826 if (barrier->barrierp == NULL((void*)0))
827 {
828 barrier->barrierp = (struct tMPI_Thread_barrier*)
829 malloc(sizeof(struct tMPI_Thread_barrier)*1);
830 if (barrier->barrierp == NULL((void*)0))
831 {
832 ret = ENOMEM12;
833 goto err;
834 }
835
836 ret = pthread_mutex_init(&(barrier->barrierp->mutex), NULL((void*)0));
837
838 if (ret != 0)
839 {
840 goto err;
841 }
842
843 ret = pthread_cond_init(&(barrier->barrierp->cv), NULL((void*)0));
844
845 if (ret != 0)
846 {
847 goto err;
848 }
849 }
850 ret = pthread_mutex_unlock( &(barrier_init) );
851 return ret;
852err:
853 pthread_mutex_unlock( &(barrier_init) );
854 return ret;
855}
856
857
858
859
860int tMPI_Thread_barrier_destroy(tMPI_Thread_barrier_t *barrier)
861{
862 int ret;
863
864 if (barrier == NULL((void*)0))
865 {
866 return EINVAL22;
867 }
868
869 ret = pthread_mutex_destroy(&(barrier->barrierp->mutex));
870 if (ret != 0)
871 {
872 return ret;
873 }
874 ret = pthread_cond_destroy(&(barrier->barrierp->cv));
875 if (ret != 0)
876 {
877 return ret;
878 }
879
880 free(barrier->barrierp);
881
882 return 0;
883}
884
885
886int tMPI_Thread_barrier_wait(tMPI_Thread_barrier_t * barrier)
887{
888 int cycle;
889 int ret;
890
891 /* check whether the barrier is initialized */
892 if (tMPI_Atomic_get( &(barrier->initialized) )((&(barrier->initialized))->value) == 0)
1
Taking true branch
893 {
894 tMPI_Thread_barrier_init_once(barrier);
895 }
896
897
898 ret = pthread_mutex_lock(&barrier->barrierp->mutex);
2
Null pointer passed as an argument to a 'nonnull' parameter
899 if (ret != 0)
900 {
901 return ret;
902 }
903
904 cycle = barrier->cycle;
905
906 /* Decrement the count atomically and check if it is zero.
907 * This will only be true for the last thread calling us.
908 */
909 if (--barrier->count <= 0)
910 {
911 barrier->cycle = !barrier->cycle;
912 barrier->count = barrier->threshold;
913 ret = pthread_cond_broadcast(&barrier->barrierp->cv);
914
915 if (ret == 0)
916 {
917 goto err;
918 }
919 }
920 else
921 {
922 while (cycle == barrier->cycle)
923 {
924 ret = pthread_cond_wait(&barrier->barrierp->cv,
925 &barrier->barrierp->mutex);
926 if (ret != 0)
927 {
928 goto err;
929 }
930 }
931 }
932
933 ret = pthread_mutex_unlock(&barrier->barrierp->mutex);
934 return ret;
935err:
936 pthread_mutex_unlock(&barrier->barrierp->mutex);
937 return ret;
938
939}
940
941#else
942
943/* just to have some symbols */
944int tMPI_Thread_pthreads = 0;
945
946#endif /* THREAD_PTHREADS */