Annotation of embedaddon/strongswan/src/libstrongswan/threading/rwlock.c, revision 1.1.1.1
1.1 misho 1: /*
2: * Copyright (C) 2008-2012 Tobias Brunner
3: * Copyright (C) 2008 Martin Willi
4: * HSR Hochschule fuer Technik Rapperswil
5: *
6: * This program is free software; you can redistribute it and/or modify it
7: * under the terms of the GNU General Public License as published by the
8: * Free Software Foundation; either version 2 of the License, or (at your
9: * option) any later version. See <http://www.fsf.org/copyleft/gpl.txt>.
10: *
11: * This program is distributed in the hope that it will be useful, but
12: * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
13: * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14: * for more details.
15: */
16:
17: #define _GNU_SOURCE
18: #include <pthread.h>
19:
20: #include <library.h>
21: #include <utils/debug.h>
22:
23: #include "rwlock.h"
24: #include "rwlock_condvar.h"
25: #include "thread.h"
26: #include "condvar.h"
27: #include "mutex.h"
28: #include "lock_profiler.h"
29:
30: #ifdef __APPLE__
31: /* while pthread_rwlock_rdlock(3) says that it supports multiple read locks,
32: * this does not seem to be true. After releasing a recursive rdlock,
33: * a subsequent wrlock fails... */
34: # undef HAVE_PTHREAD_RWLOCK_INIT
35: #endif
36:
37: typedef struct private_rwlock_t private_rwlock_t;
38: typedef struct private_rwlock_condvar_t private_rwlock_condvar_t;
39:
40: /**
41: * private data of rwlock
42: */
43: struct private_rwlock_t {
44:
45: /**
46: * public functions
47: */
48: rwlock_t public;
49:
50: #ifdef HAVE_PTHREAD_RWLOCK_INIT
51:
52: /**
53: * wrapped pthread rwlock
54: */
55: pthread_rwlock_t rwlock;
56:
57: #else
58:
59: /**
60: * mutex to emulate a native rwlock
61: */
62: mutex_t *mutex;
63:
64: /**
65: * condvar to handle writers
66: */
67: condvar_t *writers;
68:
69: /**
70: * condvar to handle readers
71: */
72: condvar_t *readers;
73:
74: /**
75: * number of waiting writers
76: */
77: u_int waiting_writers;
78:
79: /**
80: * number of readers holding the lock
81: */
82: u_int reader_count;
83:
84: /**
85: * TRUE, if a writer is holding the lock currently
86: */
87: bool writer;
88:
89: #endif /* HAVE_PTHREAD_RWLOCK_INIT */
90:
91: /**
92: * profiling info, if enabled
93: */
94: lock_profile_t profile;
95: };
96:
97: /**
98: * private data of condvar
99: */
100: struct private_rwlock_condvar_t {
101:
102: /**
103: * public interface
104: */
105: rwlock_condvar_t public;
106:
107: /**
108: * mutex used to implement rwlock condvar
109: */
110: mutex_t *mutex;
111:
112: /**
113: * regular condvar to implement rwlock condvar
114: */
115: condvar_t *condvar;
116: };
117:
118:
119: #ifdef HAVE_PTHREAD_RWLOCK_INIT
120:
121: METHOD(rwlock_t, read_lock, void,
122: private_rwlock_t *this)
123: {
124: int err;
125:
126: profiler_start(&this->profile);
127: err = pthread_rwlock_rdlock(&this->rwlock);
128: if (err != 0)
129: {
130: DBG1(DBG_LIB, "!!! RWLOCK READ LOCK ERROR: %s !!!", strerror(err));
131: }
132: profiler_end(&this->profile);
133: }
134:
135: METHOD(rwlock_t, write_lock, void,
136: private_rwlock_t *this)
137: {
138: int err;
139:
140: profiler_start(&this->profile);
141: err = pthread_rwlock_wrlock(&this->rwlock);
142: if (err != 0)
143: {
144: DBG1(DBG_LIB, "!!! RWLOCK WRITE LOCK ERROR: %s !!!", strerror(err));
145: }
146: profiler_end(&this->profile);
147: }
148:
149: METHOD(rwlock_t, try_write_lock, bool,
150: private_rwlock_t *this)
151: {
152: return pthread_rwlock_trywrlock(&this->rwlock) == 0;
153: }
154:
155: METHOD(rwlock_t, unlock, void,
156: private_rwlock_t *this)
157: {
158: int err;
159:
160: err = pthread_rwlock_unlock(&this->rwlock);
161: if (err != 0)
162: {
163: DBG1(DBG_LIB, "!!! RWLOCK UNLOCK ERROR: %s !!!", strerror(err));
164: }
165: }
166:
167: METHOD(rwlock_t, destroy, void,
168: private_rwlock_t *this)
169: {
170: pthread_rwlock_destroy(&this->rwlock);
171: profiler_cleanup(&this->profile);
172: free(this);
173: }
174:
175: /*
176: * see header file
177: */
178: rwlock_t *rwlock_create(rwlock_type_t type)
179: {
180: switch (type)
181: {
182: case RWLOCK_TYPE_DEFAULT:
183: default:
184: {
185: private_rwlock_t *this;
186:
187: INIT(this,
188: .public = {
189: .read_lock = _read_lock,
190: .write_lock = _write_lock,
191: .try_write_lock = _try_write_lock,
192: .unlock = _unlock,
193: .destroy = _destroy,
194: }
195: );
196:
197: pthread_rwlock_init(&this->rwlock, NULL);
198: profiler_init(&this->profile);
199:
200: return &this->public;
201: }
202: }
203: }
204:
205: #else /* HAVE_PTHREAD_RWLOCK_INIT */
206:
207: /**
208: * This implementation of the rwlock_t interface uses mutex_t and condvar_t
209: * primitives, if the pthread_rwlock_* group of functions is not available or
210: * don't allow recursive locking for readers.
211: *
212: * The following constraints are enforced:
213: * - Multiple readers can hold the lock at the same time.
214: * - Only a single writer can hold the lock at any given time.
215: * - A writer must block until all readers have released the lock before
216: * obtaining the lock exclusively.
217: * - Readers that don't hold any read lock and arrive while a writer is
218: * waiting to acquire the lock will block until after the writer has
219: * obtained and released the lock.
220: * These constraints allow for read sharing, prevent write sharing, prevent
221: * read-write sharing and (largely) prevent starvation of writers by a steady
222: * stream of incoming readers. Reader starvation is not prevented (this could
223: * happen if there are more writers than readers).
224: *
225: * The implementation supports recursive locking of the read lock but not of
226: * the write lock. Readers must not acquire the lock exclusively at the same
227: * time and vice-versa (this is not checked or enforced so behave yourself to
228: * prevent deadlocks).
229: *
230: * Since writers are preferred a thread currently holding the read lock that
231: * tries to acquire the read lock recursively while a writer is waiting would
232: * result in a deadlock. In order to avoid having to use a thread-specific
233: * value for each rwlock_t (or a list of threads) to keep track if a thread
234: * already acquired the read lock we use a single thread-specific value for all
235: * rwlock_t objects that keeps track of how many read locks a thread currently
236: * holds. Preferring readers that already hold ANY read locks prevents this
237: * deadlock while it still largely avoids writer starvation (for locks that can
238: * only be acquired while holding another read lock this will obviously not
239: * work).
240: */
241:
242: /**
243: * Keep track of how many read locks a thread holds.
244: */
245: static pthread_key_t is_reader;
246:
247: /**
248: * Only initialize the read lock counter once.
249: */
250: static pthread_once_t is_reader_initialized = PTHREAD_ONCE_INIT;
251:
252: /**
253: * Initialize the read lock counter.
254: */
255: static void initialize_is_reader()
256: {
257: pthread_key_create(&is_reader, NULL);
258: }
259:
260: METHOD(rwlock_t, read_lock, void,
261: private_rwlock_t *this)
262: {
263: uintptr_t reading;
264: bool old;
265:
266: reading = (uintptr_t)pthread_getspecific(is_reader);
267: profiler_start(&this->profile);
268: this->mutex->lock(this->mutex);
269: if (!this->writer && reading > 0)
270: {
271: /* directly allow threads that hold ANY read locks, to avoid a deadlock
272: * caused by preferring writers in the loop below */
273: }
274: else
275: {
276: old = thread_cancelability(FALSE);
277: while (this->writer || this->waiting_writers)
278: {
279: this->readers->wait(this->readers, this->mutex);
280: }
281: thread_cancelability(old);
282: }
283: this->reader_count++;
284: profiler_end(&this->profile);
285: this->mutex->unlock(this->mutex);
286: pthread_setspecific(is_reader, (void*)(reading + 1));
287: }
288:
289: METHOD(rwlock_t, write_lock, void,
290: private_rwlock_t *this)
291: {
292: bool old;
293:
294: profiler_start(&this->profile);
295: this->mutex->lock(this->mutex);
296: this->waiting_writers++;
297: old = thread_cancelability(FALSE);
298: while (this->writer || this->reader_count)
299: {
300: this->writers->wait(this->writers, this->mutex);
301: }
302: thread_cancelability(old);
303: this->waiting_writers--;
304: this->writer = TRUE;
305: profiler_end(&this->profile);
306: this->mutex->unlock(this->mutex);
307: }
308:
309: METHOD(rwlock_t, try_write_lock, bool,
310: private_rwlock_t *this)
311: {
312: bool res = FALSE;
313: this->mutex->lock(this->mutex);
314: if (!this->writer && !this->reader_count)
315: {
316: res = this->writer = TRUE;
317: }
318: this->mutex->unlock(this->mutex);
319: return res;
320: }
321:
322: METHOD(rwlock_t, unlock, void,
323: private_rwlock_t *this)
324: {
325: this->mutex->lock(this->mutex);
326: if (this->writer)
327: {
328: this->writer = FALSE;
329: }
330: else
331: {
332: uintptr_t reading;
333:
334: this->reader_count--;
335: reading = (uintptr_t)pthread_getspecific(is_reader);
336: pthread_setspecific(is_reader, (void*)(reading - 1));
337: }
338: if (!this->reader_count)
339: {
340: if (this->waiting_writers)
341: {
342: this->writers->signal(this->writers);
343: }
344: else
345: {
346: this->readers->broadcast(this->readers);
347: }
348: }
349: this->mutex->unlock(this->mutex);
350: }
351:
352: METHOD(rwlock_t, destroy, void,
353: private_rwlock_t *this)
354: {
355: this->mutex->destroy(this->mutex);
356: this->writers->destroy(this->writers);
357: this->readers->destroy(this->readers);
358: profiler_cleanup(&this->profile);
359: free(this);
360: }
361:
362: /*
363: * see header file
364: */
365: rwlock_t *rwlock_create(rwlock_type_t type)
366: {
367: pthread_once(&is_reader_initialized, initialize_is_reader);
368:
369: switch (type)
370: {
371: case RWLOCK_TYPE_DEFAULT:
372: default:
373: {
374: private_rwlock_t *this;
375:
376: INIT(this,
377: .public = {
378: .read_lock = _read_lock,
379: .write_lock = _write_lock,
380: .try_write_lock = _try_write_lock,
381: .unlock = _unlock,
382: .destroy = _destroy,
383: },
384: .mutex = mutex_create(MUTEX_TYPE_DEFAULT),
385: .writers = condvar_create(CONDVAR_TYPE_DEFAULT),
386: .readers = condvar_create(CONDVAR_TYPE_DEFAULT),
387: );
388:
389: profiler_init(&this->profile);
390:
391: return &this->public;
392: }
393: }
394: }
395:
396: #endif /* HAVE_PTHREAD_RWLOCK_INIT */
397:
398:
399: METHOD(rwlock_condvar_t, wait_, void,
400: private_rwlock_condvar_t *this, rwlock_t *lock)
401: {
402: /* at this point we have the write lock locked, to make signals more
403: * predictable we try to prevent other threads from signaling by acquiring
404: * the mutex while we still hold the write lock (this assumes they will
405: * hold the write lock themselves when signaling, which is not mandatory) */
406: this->mutex->lock(this->mutex);
407: /* unlock the rwlock and wait for a signal */
408: lock->unlock(lock);
409: /* if the calling thread enabled thread cancelability we want to replicate
410: * the behavior of the regular condvar, i.e. the lock will be held again
411: * before executing cleanup functions registered by the calling thread */
412: thread_cleanup_push((thread_cleanup_t)lock->write_lock, lock);
413: thread_cleanup_push((thread_cleanup_t)this->mutex->unlock, this->mutex);
414: this->condvar->wait(this->condvar, this->mutex);
415: /* we release the mutex to allow other threads into the condvar (might even
416: * be required so we can acquire the lock again below) */
417: thread_cleanup_pop(TRUE);
418: /* finally we reacquire the lock we held previously */
419: thread_cleanup_pop(TRUE);
420: }
421:
422: METHOD(rwlock_condvar_t, timed_wait_abs, bool,
423: private_rwlock_condvar_t *this, rwlock_t *lock, timeval_t time)
424: {
425: bool timed_out;
426:
427: /* see wait() above for details on what is going on here */
428: this->mutex->lock(this->mutex);
429: lock->unlock(lock);
430: thread_cleanup_push((thread_cleanup_t)lock->write_lock, lock);
431: thread_cleanup_push((thread_cleanup_t)this->mutex->unlock, this->mutex);
432: timed_out = this->condvar->timed_wait_abs(this->condvar, this->mutex, time);
433: thread_cleanup_pop(TRUE);
434: thread_cleanup_pop(TRUE);
435: return timed_out;
436: }
437:
438: METHOD(rwlock_condvar_t, timed_wait, bool,
439: private_rwlock_condvar_t *this, rwlock_t *lock, u_int timeout)
440: {
441: timeval_t tv;
442: u_int s, ms;
443:
444: time_monotonic(&tv);
445:
446: s = timeout / 1000;
447: ms = timeout % 1000;
448:
449: tv.tv_sec += s;
450: timeval_add_ms(&tv, ms);
451:
452: return timed_wait_abs(this, lock, tv);
453: }
454:
455: METHOD(rwlock_condvar_t, signal_, void,
456: private_rwlock_condvar_t *this)
457: {
458: this->mutex->lock(this->mutex);
459: this->condvar->signal(this->condvar);
460: this->mutex->unlock(this->mutex);
461: }
462:
463: METHOD(rwlock_condvar_t, broadcast, void,
464: private_rwlock_condvar_t *this)
465: {
466: this->mutex->lock(this->mutex);
467: this->condvar->broadcast(this->condvar);
468: this->mutex->unlock(this->mutex);
469: }
470:
471: METHOD(rwlock_condvar_t, condvar_destroy, void,
472: private_rwlock_condvar_t *this)
473: {
474: this->condvar->destroy(this->condvar);
475: this->mutex->destroy(this->mutex);
476: free(this);
477: }
478:
479: /*
480: * see header file
481: */
482: rwlock_condvar_t *rwlock_condvar_create()
483: {
484: private_rwlock_condvar_t *this;
485:
486: INIT(this,
487: .public = {
488: .wait = _wait_,
489: .timed_wait = _timed_wait,
490: .timed_wait_abs = _timed_wait_abs,
491: .signal = _signal_,
492: .broadcast = _broadcast,
493: .destroy = _condvar_destroy,
494: },
495: .mutex = mutex_create(MUTEX_TYPE_DEFAULT),
496: .condvar = condvar_create(CONDVAR_TYPE_DEFAULT),
497: );
498: return &this->public;
499: }
FreeBSD-CVSweb <freebsd-cvsweb@FreeBSD.org>