1 /* Locking in multithreaded situations.
2 Copyright (C) 2005-2010 Free Software Foundation, Inc.
4 This program is free software; you can redistribute it and/or modify
5 it under the terms of the GNU General Public License as published by
6 the Free Software Foundation; either version 3, or (at your option)
9 This program is distributed in the hope that it will be useful,
10 but WITHOUT ANY WARRANTY; without even the implied warranty of
11 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 GNU General Public License for more details.
14 You should have received a copy of the GNU General Public License
15 along with this program; if not, write to the Free Software Foundation,
16 Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */
18 /* Written by Bruno Haible <bruno@clisp.org>, 2005.
19 Based on GCC's gthr-posix.h, gthr-posix95.h, gthr-solaris.h,
24 #include "glthread/lock.h"
26 /* ========================================================================= */
30 /* -------------------------- gl_lock_t datatype -------------------------- */
32 /* ------------------------- gl_rwlock_t datatype ------------------------- */
34 # if HAVE_PTHREAD_RWLOCK
36 # if !defined PTHREAD_RWLOCK_INITIALIZER
39 glthread_rwlock_init_multithreaded (gl_rwlock_t *lock)
43 err = pthread_rwlock_init (&lock->rwlock, NULL);
46 lock->initialized = 1;
51 glthread_rwlock_rdlock_multithreaded (gl_rwlock_t *lock)
53 if (!lock->initialized)
57 err = pthread_mutex_lock (&lock->guard);
60 if (!lock->initialized)
62 err = glthread_rwlock_init_multithreaded (lock);
65 pthread_mutex_unlock (&lock->guard);
69 err = pthread_mutex_unlock (&lock->guard);
73 return pthread_rwlock_rdlock (&lock->rwlock);
77 glthread_rwlock_wrlock_multithreaded (gl_rwlock_t *lock)
79 if (!lock->initialized)
83 err = pthread_mutex_lock (&lock->guard);
86 if (!lock->initialized)
88 err = glthread_rwlock_init_multithreaded (lock);
91 pthread_mutex_unlock (&lock->guard);
95 err = pthread_mutex_unlock (&lock->guard);
99 return pthread_rwlock_wrlock (&lock->rwlock);
103 glthread_rwlock_unlock_multithreaded (gl_rwlock_t *lock)
105 if (!lock->initialized)
107 return pthread_rwlock_unlock (&lock->rwlock);
111 glthread_rwlock_destroy_multithreaded (gl_rwlock_t *lock)
115 if (!lock->initialized)
117 err = pthread_rwlock_destroy (&lock->rwlock);
120 lock->initialized = 0;
129 glthread_rwlock_init_multithreaded (gl_rwlock_t *lock)
133 err = pthread_mutex_init (&lock->lock, NULL);
136 err = pthread_cond_init (&lock->waiting_readers, NULL);
139 err = pthread_cond_init (&lock->waiting_writers, NULL);
142 lock->waiting_writers_count = 0;
148 glthread_rwlock_rdlock_multithreaded (gl_rwlock_t *lock)
152 err = pthread_mutex_lock (&lock->lock);
155 /* Test whether only readers are currently running, and whether the runcount
156 field will not overflow. */
157 /* POSIX says: "It is implementation-defined whether the calling thread
158 acquires the lock when a writer does not hold the lock and there are
159 writers blocked on the lock." Let's say, no: give the writers a higher
161 while (!(lock->runcount + 1 > 0 && lock->waiting_writers_count == 0))
163 /* This thread has to wait for a while. Enqueue it among the
165 err = pthread_cond_wait (&lock->waiting_readers, &lock->lock);
168 pthread_mutex_unlock (&lock->lock);
173 return pthread_mutex_unlock (&lock->lock);
177 glthread_rwlock_wrlock_multithreaded (gl_rwlock_t *lock)
181 err = pthread_mutex_lock (&lock->lock);
184 /* Test whether no readers or writers are currently running. */
185 while (!(lock->runcount == 0))
187 /* This thread has to wait for a while. Enqueue it among the
189 lock->waiting_writers_count++;
190 err = pthread_cond_wait (&lock->waiting_writers, &lock->lock);
193 lock->waiting_writers_count--;
194 pthread_mutex_unlock (&lock->lock);
197 lock->waiting_writers_count--;
199 lock->runcount--; /* runcount becomes -1 */
200 return pthread_mutex_unlock (&lock->lock);
204 glthread_rwlock_unlock_multithreaded (gl_rwlock_t *lock)
208 err = pthread_mutex_lock (&lock->lock);
211 if (lock->runcount < 0)
213 /* Drop a writer lock. */
214 if (!(lock->runcount == -1))
216 pthread_mutex_unlock (&lock->lock);
223 /* Drop a reader lock. */
224 if (!(lock->runcount > 0))
226 pthread_mutex_unlock (&lock->lock);
231 if (lock->runcount == 0)
233 /* POSIX recommends that "write locks shall take precedence over read
234 locks", to avoid "writer starvation". */
235 if (lock->waiting_writers_count > 0)
237 /* Wake up one of the waiting writers. */
238 err = pthread_cond_signal (&lock->waiting_writers);
241 pthread_mutex_unlock (&lock->lock);
247 /* Wake up all waiting readers. */
248 err = pthread_cond_broadcast (&lock->waiting_readers);
251 pthread_mutex_unlock (&lock->lock);
256 return pthread_mutex_unlock (&lock->lock);
260 glthread_rwlock_destroy_multithreaded (gl_rwlock_t *lock)
264 err = pthread_mutex_destroy (&lock->lock);
267 err = pthread_cond_destroy (&lock->waiting_readers);
270 err = pthread_cond_destroy (&lock->waiting_writers);
278 /* --------------------- gl_recursive_lock_t datatype --------------------- */
280 # if HAVE_PTHREAD_MUTEX_RECURSIVE
282 # if defined PTHREAD_RECURSIVE_MUTEX_INITIALIZER || defined PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP
285 glthread_recursive_lock_init_multithreaded (gl_recursive_lock_t *lock)
287 pthread_mutexattr_t attributes;
290 err = pthread_mutexattr_init (&attributes);
293 err = pthread_mutexattr_settype (&attributes, PTHREAD_MUTEX_RECURSIVE);
296 pthread_mutexattr_destroy (&attributes);
299 err = pthread_mutex_init (lock, &attributes);
302 pthread_mutexattr_destroy (&attributes);
305 err = pthread_mutexattr_destroy (&attributes);
314 glthread_recursive_lock_init_multithreaded (gl_recursive_lock_t *lock)
316 pthread_mutexattr_t attributes;
319 err = pthread_mutexattr_init (&attributes);
322 err = pthread_mutexattr_settype (&attributes, PTHREAD_MUTEX_RECURSIVE);
325 pthread_mutexattr_destroy (&attributes);
328 err = pthread_mutex_init (&lock->recmutex, &attributes);
331 pthread_mutexattr_destroy (&attributes);
334 err = pthread_mutexattr_destroy (&attributes);
337 lock->initialized = 1;
342 glthread_recursive_lock_lock_multithreaded (gl_recursive_lock_t *lock)
344 if (!lock->initialized)
348 err = pthread_mutex_lock (&lock->guard);
351 if (!lock->initialized)
353 err = glthread_recursive_lock_init_multithreaded (lock);
356 pthread_mutex_unlock (&lock->guard);
360 err = pthread_mutex_unlock (&lock->guard);
364 return pthread_mutex_lock (&lock->recmutex);
368 glthread_recursive_lock_unlock_multithreaded (gl_recursive_lock_t *lock)
370 if (!lock->initialized)
372 return pthread_mutex_unlock (&lock->recmutex);
376 glthread_recursive_lock_destroy_multithreaded (gl_recursive_lock_t *lock)
380 if (!lock->initialized)
382 err = pthread_mutex_destroy (&lock->recmutex);
385 lock->initialized = 0;
394 glthread_recursive_lock_init_multithreaded (gl_recursive_lock_t *lock)
398 err = pthread_mutex_init (&lock->mutex, NULL);
401 lock->owner = (pthread_t) 0;
407 glthread_recursive_lock_lock_multithreaded (gl_recursive_lock_t *lock)
409 pthread_t self = pthread_self ();
410 if (lock->owner != self)
414 err = pthread_mutex_lock (&lock->mutex);
419 if (++(lock->depth) == 0) /* wraparound? */
428 glthread_recursive_lock_unlock_multithreaded (gl_recursive_lock_t *lock)
430 if (lock->owner != pthread_self ())
432 if (lock->depth == 0)
434 if (--(lock->depth) == 0)
436 lock->owner = (pthread_t) 0;
437 return pthread_mutex_unlock (&lock->mutex);
444 glthread_recursive_lock_destroy_multithreaded (gl_recursive_lock_t *lock)
446 if (lock->owner != (pthread_t) 0)
448 return pthread_mutex_destroy (&lock->mutex);
453 /* -------------------------- gl_once_t datatype -------------------------- */
455 static const pthread_once_t fresh_once = PTHREAD_ONCE_INIT;
458 glthread_once_singlethreaded (pthread_once_t *once_control)
460 /* We don't know whether pthread_once_t is an integer type, a floating-point
461 type, a pointer type, or a structure type. */
462 char *firstbyte = (char *)once_control;
463 if (*firstbyte == *(const char *)&fresh_once)
465 /* First time use of once_control. Invert the first byte. */
466 *firstbyte = ~ *(const char *)&fresh_once;
475 /* ========================================================================= */
479 /* Use the GNU Pth threads library. */
481 /* -------------------------- gl_lock_t datatype -------------------------- */
483 /* ------------------------- gl_rwlock_t datatype ------------------------- */
485 /* --------------------- gl_recursive_lock_t datatype --------------------- */
487 /* -------------------------- gl_once_t datatype -------------------------- */
490 glthread_once_call (void *arg)
492 void (**gl_once_temp_addr) (void) = (void (**) (void)) arg;
493 void (*initfunction) (void) = *gl_once_temp_addr;
498 glthread_once_multithreaded (pth_once_t *once_control, void (*initfunction) (void))
500 void (*temp) (void) = initfunction;
501 return (!pth_once (once_control, glthread_once_call, &temp) ? errno : 0);
505 glthread_once_singlethreaded (pth_once_t *once_control)
507 /* We know that pth_once_t is an integer type. */
508 if (*once_control == PTH_ONCE_INIT)
510 /* First time use of once_control. Invert the marker. */
511 *once_control = ~ PTH_ONCE_INIT;
520 /* ========================================================================= */
522 #if USE_SOLARIS_THREADS
524 /* Use the old Solaris threads library. */
526 /* -------------------------- gl_lock_t datatype -------------------------- */
528 /* ------------------------- gl_rwlock_t datatype ------------------------- */
530 /* --------------------- gl_recursive_lock_t datatype --------------------- */
533 glthread_recursive_lock_init_multithreaded (gl_recursive_lock_t *lock)
537 err = mutex_init (&lock->mutex, USYNC_THREAD, NULL);
540 lock->owner = (thread_t) 0;
546 glthread_recursive_lock_lock_multithreaded (gl_recursive_lock_t *lock)
548 thread_t self = thr_self ();
549 if (lock->owner != self)
553 err = mutex_lock (&lock->mutex);
558 if (++(lock->depth) == 0) /* wraparound? */
567 glthread_recursive_lock_unlock_multithreaded (gl_recursive_lock_t *lock)
569 if (lock->owner != thr_self ())
571 if (lock->depth == 0)
573 if (--(lock->depth) == 0)
575 lock->owner = (thread_t) 0;
576 return mutex_unlock (&lock->mutex);
583 glthread_recursive_lock_destroy_multithreaded (gl_recursive_lock_t *lock)
585 if (lock->owner != (thread_t) 0)
587 return mutex_destroy (&lock->mutex);
590 /* -------------------------- gl_once_t datatype -------------------------- */
593 glthread_once_multithreaded (gl_once_t *once_control, void (*initfunction) (void))
595 if (!once_control->inited)
599 /* Use the mutex to guarantee that if another thread is already calling
600 the initfunction, this thread waits until it's finished. */
601 err = mutex_lock (&once_control->mutex);
604 if (!once_control->inited)
606 once_control->inited = 1;
609 return mutex_unlock (&once_control->mutex);
616 glthread_once_singlethreaded (gl_once_t *once_control)
618 /* We know that gl_once_t contains an integer type. */
619 if (!once_control->inited)
621 /* First time use of once_control. Invert the marker. */
622 once_control->inited = ~ 0;
631 /* ========================================================================= */
633 #if USE_WIN32_THREADS
635 /* -------------------------- gl_lock_t datatype -------------------------- */
638 glthread_lock_init_func (gl_lock_t *lock)
640 InitializeCriticalSection (&lock->lock);
641 lock->guard.done = 1;
645 glthread_lock_lock_func (gl_lock_t *lock)
647 if (!lock->guard.done)
649 if (InterlockedIncrement (&lock->guard.started) == 0)
650 /* This thread is the first one to need this lock. Initialize it. */
651 glthread_lock_init (lock);
653 /* Yield the CPU while waiting for another thread to finish
654 initializing this lock. */
655 while (!lock->guard.done)
658 EnterCriticalSection (&lock->lock);
663 glthread_lock_unlock_func (gl_lock_t *lock)
665 if (!lock->guard.done)
667 LeaveCriticalSection (&lock->lock);
672 glthread_lock_destroy_func (gl_lock_t *lock)
674 if (!lock->guard.done)
676 DeleteCriticalSection (&lock->lock);
677 lock->guard.done = 0;
681 /* ------------------------- gl_rwlock_t datatype ------------------------- */
683 /* In this file, the waitqueues are implemented as circular arrays. */
684 #define gl_waitqueue_t gl_carray_waitqueue_t
687 gl_waitqueue_init (gl_waitqueue_t *wq)
695 /* Enqueues the current thread, represented by an event, in a wait queue.
696 Returns INVALID_HANDLE_VALUE if an allocation failure occurs. */
698 gl_waitqueue_add (gl_waitqueue_t *wq)
703 if (wq->count == wq->alloc)
705 unsigned int new_alloc = 2 * wq->alloc + 1;
707 (HANDLE *) realloc (wq->array, new_alloc * sizeof (HANDLE));
708 if (new_array == NULL)
709 /* No more memory. */
710 return INVALID_HANDLE_VALUE;
711 /* Now is a good opportunity to rotate the array so that its contents
712 starts at offset 0. */
715 unsigned int old_count = wq->count;
716 unsigned int old_alloc = wq->alloc;
717 unsigned int old_offset = wq->offset;
719 if (old_offset + old_count > old_alloc)
721 unsigned int limit = old_offset + old_count - old_alloc;
722 for (i = 0; i < limit; i++)
723 new_array[old_alloc + i] = new_array[i];
725 for (i = 0; i < old_count; i++)
726 new_array[i] = new_array[old_offset + i];
729 wq->array = new_array;
730 wq->alloc = new_alloc;
732 /* Whether the created event is a manual-reset one or an auto-reset one,
733 does not matter, since we will wait on it only once. */
734 event = CreateEvent (NULL, TRUE, FALSE, NULL);
735 if (event == INVALID_HANDLE_VALUE)
736 /* No way to allocate an event. */
737 return INVALID_HANDLE_VALUE;
738 index = wq->offset + wq->count;
739 if (index >= wq->alloc)
741 wq->array[index] = event;
746 /* Notifies the first thread from a wait queue and dequeues it. */
748 gl_waitqueue_notify_first (gl_waitqueue_t *wq)
750 SetEvent (wq->array[wq->offset + 0]);
753 if (wq->count == 0 || wq->offset == wq->alloc)
757 /* Notifies all threads from a wait queue and dequeues them all. */
759 gl_waitqueue_notify_all (gl_waitqueue_t *wq)
763 for (i = 0; i < wq->count; i++)
765 unsigned int index = wq->offset + i;
766 if (index >= wq->alloc)
768 SetEvent (wq->array[index]);
775 glthread_rwlock_init_func (gl_rwlock_t *lock)
777 InitializeCriticalSection (&lock->lock);
778 gl_waitqueue_init (&lock->waiting_readers);
779 gl_waitqueue_init (&lock->waiting_writers);
781 lock->guard.done = 1;
785 glthread_rwlock_rdlock_func (gl_rwlock_t *lock)
787 if (!lock->guard.done)
789 if (InterlockedIncrement (&lock->guard.started) == 0)
790 /* This thread is the first one to need this lock. Initialize it. */
791 glthread_rwlock_init (lock);
793 /* Yield the CPU while waiting for another thread to finish
794 initializing this lock. */
795 while (!lock->guard.done)
798 EnterCriticalSection (&lock->lock);
799 /* Test whether only readers are currently running, and whether the runcount
800 field will not overflow. */
801 if (!(lock->runcount + 1 > 0))
803 /* This thread has to wait for a while. Enqueue it among the
805 HANDLE event = gl_waitqueue_add (&lock->waiting_readers);
806 if (event != INVALID_HANDLE_VALUE)
809 LeaveCriticalSection (&lock->lock);
810 /* Wait until another thread signals this event. */
811 result = WaitForSingleObject (event, INFINITE);
812 if (result == WAIT_FAILED || result == WAIT_TIMEOUT)
815 /* The thread which signalled the event already did the bookkeeping:
816 removed us from the waiting_readers, incremented lock->runcount. */
817 if (!(lock->runcount > 0))
823 /* Allocation failure. Weird. */
826 LeaveCriticalSection (&lock->lock);
828 EnterCriticalSection (&lock->lock);
830 while (!(lock->runcount + 1 > 0));
834 LeaveCriticalSection (&lock->lock);
839 glthread_rwlock_wrlock_func (gl_rwlock_t *lock)
841 if (!lock->guard.done)
843 if (InterlockedIncrement (&lock->guard.started) == 0)
844 /* This thread is the first one to need this lock. Initialize it. */
845 glthread_rwlock_init (lock);
847 /* Yield the CPU while waiting for another thread to finish
848 initializing this lock. */
849 while (!lock->guard.done)
852 EnterCriticalSection (&lock->lock);
853 /* Test whether no readers or writers are currently running. */
854 if (!(lock->runcount == 0))
856 /* This thread has to wait for a while. Enqueue it among the
858 HANDLE event = gl_waitqueue_add (&lock->waiting_writers);
859 if (event != INVALID_HANDLE_VALUE)
862 LeaveCriticalSection (&lock->lock);
863 /* Wait until another thread signals this event. */
864 result = WaitForSingleObject (event, INFINITE);
865 if (result == WAIT_FAILED || result == WAIT_TIMEOUT)
868 /* The thread which signalled the event already did the bookkeeping:
869 removed us from the waiting_writers, set lock->runcount = -1. */
870 if (!(lock->runcount == -1))
876 /* Allocation failure. Weird. */
879 LeaveCriticalSection (&lock->lock);
881 EnterCriticalSection (&lock->lock);
883 while (!(lock->runcount == 0));
886 lock->runcount--; /* runcount becomes -1 */
887 LeaveCriticalSection (&lock->lock);
892 glthread_rwlock_unlock_func (gl_rwlock_t *lock)
894 if (!lock->guard.done)
896 EnterCriticalSection (&lock->lock);
897 if (lock->runcount < 0)
899 /* Drop a writer lock. */
900 if (!(lock->runcount == -1))
906 /* Drop a reader lock. */
907 if (!(lock->runcount > 0))
909 LeaveCriticalSection (&lock->lock);
914 if (lock->runcount == 0)
916 /* POSIX recommends that "write locks shall take precedence over read
917 locks", to avoid "writer starvation". */
918 if (lock->waiting_writers.count > 0)
920 /* Wake up one of the waiting writers. */
922 gl_waitqueue_notify_first (&lock->waiting_writers);
926 /* Wake up all waiting readers. */
927 lock->runcount += lock->waiting_readers.count;
928 gl_waitqueue_notify_all (&lock->waiting_readers);
931 LeaveCriticalSection (&lock->lock);
936 glthread_rwlock_destroy_func (gl_rwlock_t *lock)
938 if (!lock->guard.done)
940 if (lock->runcount != 0)
942 DeleteCriticalSection (&lock->lock);
943 if (lock->waiting_readers.array != NULL)
944 free (lock->waiting_readers.array);
945 if (lock->waiting_writers.array != NULL)
946 free (lock->waiting_writers.array);
947 lock->guard.done = 0;
951 /* --------------------- gl_recursive_lock_t datatype --------------------- */
954 glthread_recursive_lock_init_func (gl_recursive_lock_t *lock)
958 InitializeCriticalSection (&lock->lock);
959 lock->guard.done = 1;
963 glthread_recursive_lock_lock_func (gl_recursive_lock_t *lock)
965 if (!lock->guard.done)
967 if (InterlockedIncrement (&lock->guard.started) == 0)
968 /* This thread is the first one to need this lock. Initialize it. */
969 glthread_recursive_lock_init (lock);
971 /* Yield the CPU while waiting for another thread to finish
972 initializing this lock. */
973 while (!lock->guard.done)
977 DWORD self = GetCurrentThreadId ();
978 if (lock->owner != self)
980 EnterCriticalSection (&lock->lock);
983 if (++(lock->depth) == 0) /* wraparound? */
993 glthread_recursive_lock_unlock_func (gl_recursive_lock_t *lock)
995 if (lock->owner != GetCurrentThreadId ())
997 if (lock->depth == 0)
999 if (--(lock->depth) == 0)
1002 LeaveCriticalSection (&lock->lock);
1008 glthread_recursive_lock_destroy_func (gl_recursive_lock_t *lock)
1010 if (lock->owner != 0)
1012 DeleteCriticalSection (&lock->lock);
1013 lock->guard.done = 0;
1017 /* -------------------------- gl_once_t datatype -------------------------- */
1020 glthread_once_func (gl_once_t *once_control, void (*initfunction) (void))
1022 if (once_control->inited <= 0)
1024 if (InterlockedIncrement (&once_control->started) == 0)
1026 /* This thread is the first one to come to this once_control. */
1027 InitializeCriticalSection (&once_control->lock);
1028 EnterCriticalSection (&once_control->lock);
1029 once_control->inited = 0;
1031 once_control->inited = 1;
1032 LeaveCriticalSection (&once_control->lock);
1036 /* Undo last operation. */
1037 InterlockedDecrement (&once_control->started);
1038 /* Some other thread has already started the initialization.
1039 Yield the CPU while waiting for the other thread to finish
1040 initializing and taking the lock. */
1041 while (once_control->inited < 0)
1043 if (once_control->inited <= 0)
1045 /* Take the lock. This blocks until the other thread has
1046 finished calling the initfunction. */
1047 EnterCriticalSection (&once_control->lock);
1048 LeaveCriticalSection (&once_control->lock);
1049 if (!(once_control->inited > 0))
1058 /* ========================================================================= */