1 /* Locking in multithreaded situations.
2 Copyright (C) 2005-2008 Free Software Foundation, Inc.
4 This program is free software; you can redistribute it and/or modify it
5 under the terms of the GNU Library General Public License as published
6 by the Free Software Foundation; either version 2, or (at your option)
9 This program is distributed in the hope that it will be useful,
10 but WITHOUT ANY WARRANTY; without even the implied warranty of
11 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 Library General Public License for more details.
14 You should have received a copy of the GNU Library General Public
15 License along with this program; if not, write to the Free Software
16 Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
19 /* Written by Bruno Haible <bruno@clisp.org>, 2005.
20 Based on GCC's gthr-posix.h, gthr-posix95.h, gthr-solaris.h,
27 /* ========================================================================= */
31 /* -------------------------- gl_lock_t datatype -------------------------- */
33 /* ------------------------- gl_rwlock_t datatype ------------------------- */
35 # if HAVE_PTHREAD_RWLOCK
37 # if !defined PTHREAD_RWLOCK_INITIALIZER
40 glthread_rwlock_init_multithreaded (gl_rwlock_t *lock)
44 err = pthread_rwlock_init (&lock->rwlock, NULL);
47 lock->initialized = 1;
52 glthread_rwlock_rdlock_multithreaded (gl_rwlock_t *lock)
54 if (!lock->initialized)
58 err = pthread_mutex_lock (&lock->guard);
61 if (!lock->initialized)
63 err = glthread_rwlock_init_multithreaded (lock);
66 pthread_mutex_unlock (&lock->guard);
70 err = pthread_mutex_unlock (&lock->guard);
74 return pthread_rwlock_rdlock (&lock->rwlock);
78 glthread_rwlock_wrlock_multithreaded (gl_rwlock_t *lock)
80 if (!lock->initialized)
84 err = pthread_mutex_lock (&lock->guard);
87 if (!lock->initialized)
89 err = glthread_rwlock_init_multithreaded (lock);
92 pthread_mutex_unlock (&lock->guard);
96 err = pthread_mutex_unlock (&lock->guard);
100 return pthread_rwlock_wrlock (&lock->rwlock);
104 glthread_rwlock_unlock_multithreaded (gl_rwlock_t *lock)
106 if (!lock->initialized)
108 return pthread_rwlock_unlock (&lock->rwlock);
112 glthread_rwlock_destroy_multithreaded (gl_rwlock_t *lock)
116 if (!lock->initialized)
118 err = pthread_rwlock_destroy (&lock->rwlock);
121 lock->initialized = 0;
130 glthread_rwlock_init_multithreaded (gl_rwlock_t *lock)
134 err = pthread_mutex_init (&lock->lock, NULL);
137 err = pthread_cond_init (&lock->waiting_readers, NULL);
140 err = pthread_cond_init (&lock->waiting_writers, NULL);
143 lock->waiting_writers_count = 0;
149 glthread_rwlock_rdlock_multithreaded (gl_rwlock_t *lock)
153 err = pthread_mutex_lock (&lock->lock);
156 /* Test whether only readers are currently running, and whether the runcount
157 field will not overflow. */
158 /* POSIX says: "It is implementation-defined whether the calling thread
159 acquires the lock when a writer does not hold the lock and there are
160 writers blocked on the lock." Let's say, no: give the writers a higher
162 while (!(lock->runcount + 1 > 0 && lock->waiting_writers_count == 0))
164 /* This thread has to wait for a while. Enqueue it among the
166 err = pthread_cond_wait (&lock->waiting_readers, &lock->lock);
169 pthread_mutex_unlock (&lock->lock);
174 return pthread_mutex_unlock (&lock->lock);
178 glthread_rwlock_wrlock_multithreaded (gl_rwlock_t *lock)
182 err = pthread_mutex_lock (&lock->lock);
185 /* Test whether no readers or writers are currently running. */
186 while (!(lock->runcount == 0))
188 /* This thread has to wait for a while. Enqueue it among the
190 lock->waiting_writers_count++;
191 err = pthread_cond_wait (&lock->waiting_writers, &lock->lock);
194 lock->waiting_writers_count--;
195 pthread_mutex_unlock (&lock->lock);
198 lock->waiting_writers_count--;
200 lock->runcount--; /* runcount becomes -1 */
201 return pthread_mutex_unlock (&lock->lock);
205 glthread_rwlock_unlock_multithreaded (gl_rwlock_t *lock)
209 err = pthread_mutex_lock (&lock->lock);
212 if (lock->runcount < 0)
214 /* Drop a writer lock. */
215 if (!(lock->runcount == -1))
217 pthread_mutex_unlock (&lock->lock);
224 /* Drop a reader lock. */
225 if (!(lock->runcount > 0))
227 pthread_mutex_unlock (&lock->lock);
232 if (lock->runcount == 0)
234 /* POSIX recommends that "write locks shall take precedence over read
235 locks", to avoid "writer starvation". */
236 if (lock->waiting_writers_count > 0)
238 /* Wake up one of the waiting writers. */
239 err = pthread_cond_signal (&lock->waiting_writers);
242 pthread_mutex_unlock (&lock->lock);
248 /* Wake up all waiting readers. */
249 err = pthread_cond_broadcast (&lock->waiting_readers);
252 pthread_mutex_unlock (&lock->lock);
257 return pthread_mutex_unlock (&lock->lock);
261 glthread_rwlock_destroy_multithreaded (gl_rwlock_t *lock)
265 err = pthread_mutex_destroy (&lock->lock);
268 err = pthread_cond_destroy (&lock->waiting_readers);
271 err = pthread_cond_destroy (&lock->waiting_writers);
279 /* --------------------- gl_recursive_lock_t datatype --------------------- */
281 # if HAVE_PTHREAD_MUTEX_RECURSIVE
283 # if defined PTHREAD_RECURSIVE_MUTEX_INITIALIZER || defined PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP
286 glthread_recursive_lock_init_multithreaded (gl_recursive_lock_t *lock)
288 pthread_mutexattr_t attributes;
291 err = pthread_mutexattr_init (&attributes);
294 err = pthread_mutexattr_settype (&attributes, PTHREAD_MUTEX_RECURSIVE);
297 pthread_mutexattr_destroy (&attributes);
300 err = pthread_mutex_init (lock, &attributes);
303 pthread_mutexattr_destroy (&attributes);
306 err = pthread_mutexattr_destroy (&attributes);
315 glthread_recursive_lock_init_multithreaded (gl_recursive_lock_t *lock)
317 pthread_mutexattr_t attributes;
320 err = pthread_mutexattr_init (&attributes);
323 err = pthread_mutexattr_settype (&attributes, PTHREAD_MUTEX_RECURSIVE);
326 pthread_mutexattr_destroy (&attributes);
329 err = pthread_mutex_init (&lock->recmutex, &attributes);
332 pthread_mutexattr_destroy (&attributes);
335 err = pthread_mutexattr_destroy (&attributes);
338 lock->initialized = 1;
343 glthread_recursive_lock_lock_multithreaded (gl_recursive_lock_t *lock)
345 if (!lock->initialized)
349 err = pthread_mutex_lock (&lock->guard);
352 if (!lock->initialized)
354 err = glthread_recursive_lock_init_multithreaded (lock);
357 pthread_mutex_unlock (&lock->guard);
361 err = pthread_mutex_unlock (&lock->guard);
365 return pthread_mutex_lock (&lock->recmutex);
369 glthread_recursive_lock_unlock_multithreaded (gl_recursive_lock_t *lock)
371 if (!lock->initialized)
373 return pthread_mutex_unlock (&lock->recmutex);
377 glthread_recursive_lock_destroy_multithreaded (gl_recursive_lock_t *lock)
381 if (!lock->initialized)
383 err = pthread_mutex_destroy (&lock->recmutex);
386 lock->initialized = 0;
395 glthread_recursive_lock_init_multithreaded (gl_recursive_lock_t *lock)
399 err = pthread_mutex_init (&lock->mutex, NULL);
402 lock->owner = (pthread_t) 0;
408 glthread_recursive_lock_lock_multithreaded (gl_recursive_lock_t *lock)
410 pthread_t self = pthread_self ();
411 if (lock->owner != self)
415 err = pthread_mutex_lock (&lock->mutex);
420 if (++(lock->depth) == 0) /* wraparound? */
429 glthread_recursive_lock_unlock_multithreaded (gl_recursive_lock_t *lock)
431 if (lock->owner != pthread_self ())
433 if (lock->depth == 0)
435 if (--(lock->depth) == 0)
437 lock->owner = (pthread_t) 0;
438 return pthread_mutex_unlock (&lock->mutex);
445 glthread_recursive_lock_destroy_multithreaded (gl_recursive_lock_t *lock)
447 if (lock->owner != (pthread_t) 0)
449 return pthread_mutex_destroy (&lock->mutex);
454 /* -------------------------- gl_once_t datatype -------------------------- */
456 static const pthread_once_t fresh_once = PTHREAD_ONCE_INIT;
459 glthread_once_singlethreaded (pthread_once_t *once_control)
461 /* We don't know whether pthread_once_t is an integer type, a floating-point
462 type, a pointer type, or a structure type. */
463 char *firstbyte = (char *)once_control;
464 if (*firstbyte == *(const char *)&fresh_once)
466 /* First time use of once_control. Invert the first byte. */
467 *firstbyte = ~ *(const char *)&fresh_once;
476 /* ========================================================================= */
480 /* Use the GNU Pth threads library. */
482 /* -------------------------- gl_lock_t datatype -------------------------- */
484 /* ------------------------- gl_rwlock_t datatype ------------------------- */
486 /* --------------------- gl_recursive_lock_t datatype --------------------- */
488 /* -------------------------- gl_once_t datatype -------------------------- */
491 glthread_once_call (void *arg)
493 void (**gl_once_temp_addr) (void) = (void (**) (void)) arg;
494 void (*initfunction) (void) = *gl_once_temp_addr;
499 glthread_once_multithreaded (pth_once_t *once_control, void (*initfunction) (void))
501 void (*temp) (void) = initfunction;
502 return (!pth_once (once_control, glthread_once_call, &temp) ? errno : 0);
506 glthread_once_singlethreaded (pth_once_t *once_control)
508 /* We know that pth_once_t is an integer type. */
509 if (*once_control == PTH_ONCE_INIT)
511 /* First time use of once_control. Invert the marker. */
512 *once_control = ~ PTH_ONCE_INIT;
521 /* ========================================================================= */
523 #if USE_SOLARIS_THREADS
525 /* Use the old Solaris threads library. */
527 /* -------------------------- gl_lock_t datatype -------------------------- */
529 /* ------------------------- gl_rwlock_t datatype ------------------------- */
531 /* --------------------- gl_recursive_lock_t datatype --------------------- */
534 glthread_recursive_lock_init_multithreaded (gl_recursive_lock_t *lock)
538 err = mutex_init (&lock->mutex, USYNC_THREAD, NULL);
541 lock->owner = (thread_t) 0;
547 glthread_recursive_lock_lock_multithreaded (gl_recursive_lock_t *lock)
549 thread_t self = thr_self ();
550 if (lock->owner != self)
554 err = mutex_lock (&lock->mutex);
559 if (++(lock->depth) == 0) /* wraparound? */
568 glthread_recursive_lock_unlock_multithreaded (gl_recursive_lock_t *lock)
570 if (lock->owner != thr_self ())
572 if (lock->depth == 0)
574 if (--(lock->depth) == 0)
576 lock->owner = (thread_t) 0;
577 return mutex_unlock (&lock->mutex);
584 glthread_recursive_lock_destroy_multithreaded (gl_recursive_lock_t *lock)
586 if (lock->owner != (thread_t) 0)
588 return mutex_destroy (&lock->mutex);
591 /* -------------------------- gl_once_t datatype -------------------------- */
594 glthread_once_multithreaded (gl_once_t *once_control, void (*initfunction) (void))
596 if (!once_control->inited)
600 /* Use the mutex to guarantee that if another thread is already calling
601 the initfunction, this thread waits until it's finished. */
602 err = mutex_lock (&once_control->mutex);
605 if (!once_control->inited)
607 once_control->inited = 1;
610 return mutex_unlock (&once_control->mutex);
617 glthread_once_singlethreaded (gl_once_t *once_control)
619 /* We know that gl_once_t contains an integer type. */
620 if (!once_control->inited)
622 /* First time use of once_control. Invert the marker. */
623 once_control->inited = ~ 0;
632 /* ========================================================================= */
634 #if USE_WIN32_THREADS
636 /* -------------------------- gl_lock_t datatype -------------------------- */
639 glthread_lock_init_func (gl_lock_t *lock)
641 InitializeCriticalSection (&lock->lock);
642 lock->guard.done = 1;
646 glthread_lock_lock_func (gl_lock_t *lock)
648 if (!lock->guard.done)
650 if (InterlockedIncrement (&lock->guard.started) == 0)
651 /* This thread is the first one to need this lock. Initialize it. */
652 glthread_lock_init (lock);
654 /* Yield the CPU while waiting for another thread to finish
655 initializing this lock. */
656 while (!lock->guard.done)
659 EnterCriticalSection (&lock->lock);
664 glthread_lock_unlock_func (gl_lock_t *lock)
666 if (!lock->guard.done)
668 LeaveCriticalSection (&lock->lock);
673 glthread_lock_destroy_func (gl_lock_t *lock)
675 if (!lock->guard.done)
677 DeleteCriticalSection (&lock->lock);
678 lock->guard.done = 0;
682 /* ------------------------- gl_rwlock_t datatype ------------------------- */
684 /* In this file, the waitqueues are implemented as circular arrays. */
685 #define gl_waitqueue_t gl_carray_waitqueue_t
688 gl_waitqueue_init (gl_waitqueue_t *wq)
696 /* Enqueues the current thread, represented by an event, in a wait queue.
697 Returns INVALID_HANDLE_VALUE if an allocation failure occurs. */
699 gl_waitqueue_add (gl_waitqueue_t *wq)
704 if (wq->count == wq->alloc)
706 unsigned int new_alloc = 2 * wq->alloc + 1;
708 (HANDLE *) realloc (wq->array, new_alloc * sizeof (HANDLE));
709 if (new_array == NULL)
710 /* No more memory. */
711 return INVALID_HANDLE_VALUE;
712 /* Now is a good opportunity to rotate the array so that its contents
713 starts at offset 0. */
716 unsigned int old_count = wq->count;
717 unsigned int old_alloc = wq->alloc;
718 unsigned int old_offset = wq->offset;
720 if (old_offset + old_count > old_alloc)
722 unsigned int limit = old_offset + old_count - old_alloc;
723 for (i = 0; i < limit; i++)
724 new_array[old_alloc + i] = new_array[i];
726 for (i = 0; i < old_count; i++)
727 new_array[i] = new_array[old_offset + i];
730 wq->array = new_array;
731 wq->alloc = new_alloc;
733 /* Whether the created event is a manual-reset one or an auto-reset one,
734 does not matter, since we will wait on it only once. */
735 event = CreateEvent (NULL, TRUE, FALSE, NULL);
736 if (event == INVALID_HANDLE_VALUE)
737 /* No way to allocate an event. */
738 return INVALID_HANDLE_VALUE;
739 index = wq->offset + wq->count;
740 if (index >= wq->alloc)
742 wq->array[index] = event;
747 /* Notifies the first thread from a wait queue and dequeues it. */
749 gl_waitqueue_notify_first (gl_waitqueue_t *wq)
751 SetEvent (wq->array[wq->offset + 0]);
754 if (wq->count == 0 || wq->offset == wq->alloc)
758 /* Notifies all threads from a wait queue and dequeues them all. */
760 gl_waitqueue_notify_all (gl_waitqueue_t *wq)
764 for (i = 0; i < wq->count; i++)
766 unsigned int index = wq->offset + i;
767 if (index >= wq->alloc)
769 SetEvent (wq->array[index]);
776 glthread_rwlock_init_func (gl_rwlock_t *lock)
778 InitializeCriticalSection (&lock->lock);
779 gl_waitqueue_init (&lock->waiting_readers);
780 gl_waitqueue_init (&lock->waiting_writers);
782 lock->guard.done = 1;
786 glthread_rwlock_rdlock_func (gl_rwlock_t *lock)
788 if (!lock->guard.done)
790 if (InterlockedIncrement (&lock->guard.started) == 0)
791 /* This thread is the first one to need this lock. Initialize it. */
792 glthread_rwlock_init (lock);
794 /* Yield the CPU while waiting for another thread to finish
795 initializing this lock. */
796 while (!lock->guard.done)
799 EnterCriticalSection (&lock->lock);
800 /* Test whether only readers are currently running, and whether the runcount
801 field will not overflow. */
802 if (!(lock->runcount + 1 > 0))
804 /* This thread has to wait for a while. Enqueue it among the
806 HANDLE event = gl_waitqueue_add (&lock->waiting_readers);
807 if (event != INVALID_HANDLE_VALUE)
810 LeaveCriticalSection (&lock->lock);
811 /* Wait until another thread signals this event. */
812 result = WaitForSingleObject (event, INFINITE);
813 if (result == WAIT_FAILED || result == WAIT_TIMEOUT)
816 /* The thread which signalled the event already did the bookkeeping:
817 removed us from the waiting_readers, incremented lock->runcount. */
818 if (!(lock->runcount > 0))
824 /* Allocation failure. Weird. */
827 LeaveCriticalSection (&lock->lock);
829 EnterCriticalSection (&lock->lock);
831 while (!(lock->runcount + 1 > 0));
835 LeaveCriticalSection (&lock->lock);
840 glthread_rwlock_wrlock_func (gl_rwlock_t *lock)
842 if (!lock->guard.done)
844 if (InterlockedIncrement (&lock->guard.started) == 0)
845 /* This thread is the first one to need this lock. Initialize it. */
846 glthread_rwlock_init (lock);
848 /* Yield the CPU while waiting for another thread to finish
849 initializing this lock. */
850 while (!lock->guard.done)
853 EnterCriticalSection (&lock->lock);
854 /* Test whether no readers or writers are currently running. */
855 if (!(lock->runcount == 0))
857 /* This thread has to wait for a while. Enqueue it among the
859 HANDLE event = gl_waitqueue_add (&lock->waiting_writers);
860 if (event != INVALID_HANDLE_VALUE)
863 LeaveCriticalSection (&lock->lock);
864 /* Wait until another thread signals this event. */
865 result = WaitForSingleObject (event, INFINITE);
866 if (result == WAIT_FAILED || result == WAIT_TIMEOUT)
869 /* The thread which signalled the event already did the bookkeeping:
870 removed us from the waiting_writers, set lock->runcount = -1. */
871 if (!(lock->runcount == -1))
877 /* Allocation failure. Weird. */
880 LeaveCriticalSection (&lock->lock);
882 EnterCriticalSection (&lock->lock);
884 while (!(lock->runcount == 0));
887 lock->runcount--; /* runcount becomes -1 */
888 LeaveCriticalSection (&lock->lock);
893 glthread_rwlock_unlock_func (gl_rwlock_t *lock)
895 if (!lock->guard.done)
897 EnterCriticalSection (&lock->lock);
898 if (lock->runcount < 0)
900 /* Drop a writer lock. */
901 if (!(lock->runcount == -1))
907 /* Drop a reader lock. */
908 if (!(lock->runcount > 0))
910 LeaveCriticalSection (&lock->lock);
915 if (lock->runcount == 0)
917 /* POSIX recommends that "write locks shall take precedence over read
918 locks", to avoid "writer starvation". */
919 if (lock->waiting_writers.count > 0)
921 /* Wake up one of the waiting writers. */
923 gl_waitqueue_notify_first (&lock->waiting_writers);
927 /* Wake up all waiting readers. */
928 lock->runcount += lock->waiting_readers.count;
929 gl_waitqueue_notify_all (&lock->waiting_readers);
932 LeaveCriticalSection (&lock->lock);
937 glthread_rwlock_destroy_func (gl_rwlock_t *lock)
939 if (!lock->guard.done)
941 if (lock->runcount != 0)
943 DeleteCriticalSection (&lock->lock);
944 if (lock->waiting_readers.array != NULL)
945 free (lock->waiting_readers.array);
946 if (lock->waiting_writers.array != NULL)
947 free (lock->waiting_writers.array);
948 lock->guard.done = 0;
952 /* --------------------- gl_recursive_lock_t datatype --------------------- */
955 glthread_recursive_lock_init_func (gl_recursive_lock_t *lock)
959 InitializeCriticalSection (&lock->lock);
960 lock->guard.done = 1;
964 glthread_recursive_lock_lock_func (gl_recursive_lock_t *lock)
966 if (!lock->guard.done)
968 if (InterlockedIncrement (&lock->guard.started) == 0)
969 /* This thread is the first one to need this lock. Initialize it. */
970 glthread_recursive_lock_init (lock);
972 /* Yield the CPU while waiting for another thread to finish
973 initializing this lock. */
974 while (!lock->guard.done)
978 DWORD self = GetCurrentThreadId ();
979 if (lock->owner != self)
981 EnterCriticalSection (&lock->lock);
984 if (++(lock->depth) == 0) /* wraparound? */
994 glthread_recursive_lock_unlock_func (gl_recursive_lock_t *lock)
996 if (lock->owner != GetCurrentThreadId ())
998 if (lock->depth == 0)
1000 if (--(lock->depth) == 0)
1003 LeaveCriticalSection (&lock->lock);
1009 glthread_recursive_lock_destroy_func (gl_recursive_lock_t *lock)
1011 if (lock->owner != 0)
1013 DeleteCriticalSection (&lock->lock);
1014 lock->guard.done = 0;
1018 /* -------------------------- gl_once_t datatype -------------------------- */
1021 glthread_once_func (gl_once_t *once_control, void (*initfunction) (void))
1023 if (once_control->inited <= 0)
1025 if (InterlockedIncrement (&once_control->started) == 0)
1027 /* This thread is the first one to come to this once_control. */
1028 InitializeCriticalSection (&once_control->lock);
1029 EnterCriticalSection (&once_control->lock);
1030 once_control->inited = 0;
1032 once_control->inited = 1;
1033 LeaveCriticalSection (&once_control->lock);
1037 /* Undo last operation. */
1038 InterlockedDecrement (&once_control->started);
1039 /* Some other thread has already started the initialization.
1040 Yield the CPU while waiting for the other thread to finish
1041 initializing and taking the lock. */
1042 while (once_control->inited < 0)
1044 if (once_control->inited <= 0)
1046 /* Take the lock. This blocks until the other thread has
1047 finished calling the initfunction. */
1048 EnterCriticalSection (&once_control->lock);
1049 LeaveCriticalSection (&once_control->lock);
1050 if (!(once_control->inited > 0))
1059 /* ========================================================================= */