Annotation of embedaddon/strongswan/src/libstrongswan/tests/suites/test_threading.c, revision 1.1.1.1
1.1 misho 1: /*
2: * Copyright (C) 2013-2018 Tobias Brunner
3: * Copyright (C) 2008 Martin Willi
4: * HSR Hochschule fuer Technik Rapperswil
5: *
6: * This program is free software; you can redistribute it and/or modify it
7: * under the terms of the GNU General Public License as published by the
8: * Free Software Foundation; either version 2 of the License, or (at your
9: * option) any later version. See <http://www.fsf.org/copyleft/gpl.txt>.
10: *
11: * This program is distributed in the hope that it will be useful, but
12: * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
13: * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14: * for more details.
15: */
16:
17: #include "test_suite.h"
18:
19: #include <unistd.h>
20:
21: #include <threading/thread.h>
22: #include <threading/mutex.h>
23: #include <threading/condvar.h>
24: #include <threading/rwlock.h>
25: #include <threading/rwlock_condvar.h>
26: #include <threading/spinlock.h>
27: #include <threading/semaphore.h>
28: #include <threading/thread_value.h>
29:
30: #ifdef WIN32
31: /* when running on AppVeyor the wait functions seem to frequently trigger a bit
32: * early, allow this if the difference is within 5ms. */
33: static inline void time_is_at_least(timeval_t *expected, timeval_t *actual)
34: {
35: if (!timercmp(actual, expected, >))
36: {
37: timeval_t diff;
38:
39: timersub(expected, actual, &diff);
40: if (!diff.tv_sec && diff.tv_usec <= 5000)
41: {
42: warn("allow timer event %dus too early on Windows (expected: %u.%u, "
43: "actual: %u.%u)", diff.tv_usec, expected->tv_sec,
44: expected->tv_usec, actual->tv_sec, actual->tv_usec);
45: return;
46: }
47: fail("expected: %u.%u, actual: %u.%u", expected->tv_sec,
48: expected->tv_usec, actual->tv_sec, actual->tv_usec);
49: }
50: }
51: #else /* WIN32 */
52: static inline void time_is_at_least(timeval_t *expected, timeval_t *actual)
53: {
54: ck_assert_msg(timercmp(actual, expected, >), "expected: %u.%u, actual: "
55: "%u.%u", expected->tv_sec, expected->tv_usec, actual->tv_sec,
56: actual->tv_usec);
57: }
58: #endif /* WIN32 */
59:
60: /*******************************************************************************
61: * recursive mutex test
62: */
63:
64: #define THREADS 20
65:
66: /**
67: * Thread barrier data
68: */
69: typedef struct {
70: mutex_t *mutex;
71: condvar_t *cond;
72: int count;
73: int current;
74: bool active;
75: } barrier_t;
76:
77: /**
78: * Create a thread barrier for count threads
79: */
80: static barrier_t* barrier_create(int count)
81: {
82: barrier_t *this;
83:
84: INIT(this,
85: .mutex = mutex_create(MUTEX_TYPE_DEFAULT),
86: .cond = condvar_create(CONDVAR_TYPE_DEFAULT),
87: .count = count,
88: );
89:
90: return this;
91: }
92:
93: /**
94: * Destroy a thread barrier
95: */
96: static void barrier_destroy(barrier_t *this)
97: {
98: this->mutex->destroy(this->mutex);
99: this->cond->destroy(this->cond);
100: free(this);
101: }
102:
103: /**
104: * Wait to have configured number of threads in barrier
105: */
106: static bool barrier_wait(barrier_t *this)
107: {
108: bool winner = FALSE;
109:
110: this->mutex->lock(this->mutex);
111: if (!this->active)
112: { /* first, reset */
113: this->active = TRUE;
114: this->current = 0;
115: }
116:
117: this->current++;
118: while (this->current < this->count)
119: {
120: this->cond->wait(this->cond, this->mutex);
121: }
122: if (this->active)
123: { /* first, win */
124: winner = TRUE;
125: this->active = FALSE;
126: }
127: this->mutex->unlock(this->mutex);
128: this->cond->broadcast(this->cond);
129: sched_yield();
130:
131: return winner;
132: }
133:
134: /**
135: * Barrier for some tests
136: */
137: static barrier_t *barrier;
138:
139: /**
140: * A mutex for tests requiring one
141: */
142: static mutex_t *mutex;
143:
144: /**
145: * A condvar for tests requiring one
146: */
147: static condvar_t *condvar;
148:
149: /**
150: * A counter for signaling
151: */
152: static int sigcount;
153:
154: static void *mutex_run(void *data)
155: {
156: int locked = 0;
157: int i;
158:
159: /* wait for all threads before getting in action */
160: barrier_wait(barrier);
161:
162: for (i = 0; i < 100; i++)
163: {
164: mutex->lock(mutex);
165: mutex->lock(mutex);
166: mutex->lock(mutex);
167: locked++;
168: sched_yield();
169: if (locked > 1)
170: {
171: fail("two threads locked the mutex concurrently");
172: }
173: locked--;
174: mutex->unlock(mutex);
175: mutex->unlock(mutex);
176: mutex->unlock(mutex);
177: }
178: return NULL;
179: }
180:
181: START_TEST(test_mutex)
182: {
183: thread_t *threads[THREADS];
184: int i;
185:
186: barrier = barrier_create(THREADS);
187: mutex = mutex_create(MUTEX_TYPE_RECURSIVE);
188:
189: for (i = 0; i < 10; i++)
190: {
191: mutex->lock(mutex);
192: mutex->unlock(mutex);
193: }
194: for (i = 0; i < 10; i++)
195: {
196: mutex->lock(mutex);
197: }
198: for (i = 0; i < 10; i++)
199: {
200: mutex->unlock(mutex);
201: }
202:
203: for (i = 0; i < THREADS; i++)
204: {
205: threads[i] = thread_create(mutex_run, NULL);
206: }
207: for (i = 0; i < THREADS; i++)
208: {
209: threads[i]->join(threads[i]);
210: }
211:
212: mutex->destroy(mutex);
213: barrier_destroy(barrier);
214: }
215: END_TEST
216:
217: /**
218: * Spinlock for testing
219: */
220: static spinlock_t *spinlock;
221:
222: static void *spinlock_run(void *data)
223: {
224: int i, *locked = (int*)data;
225:
226: barrier_wait(barrier);
227:
228: for (i = 0; i < 1000; i++)
229: {
230: spinlock->lock(spinlock);
231: (*locked)++;
232: ck_assert_int_eq(*locked, 1);
233: (*locked)--;
234: spinlock->unlock(spinlock);
235: }
236: return NULL;
237: }
238:
239: START_TEST(test_spinlock)
240: {
241: thread_t *threads[THREADS];
242: int i, locked = 0;
243:
244: barrier = barrier_create(THREADS);
245: spinlock = spinlock_create();
246:
247: for (i = 0; i < THREADS; i++)
248: {
249: threads[i] = thread_create(spinlock_run, &locked);
250: }
251: for (i = 0; i < THREADS; i++)
252: {
253: threads[i]->join(threads[i]);
254: }
255:
256: spinlock->destroy(spinlock);
257: barrier_destroy(barrier);
258: }
259: END_TEST
260:
261: static void *condvar_run(void *data)
262: {
263: mutex->lock(mutex);
264: sigcount++;
265: condvar->signal(condvar);
266: mutex->unlock(mutex);
267: return NULL;
268: }
269:
270: START_TEST(test_condvar)
271: {
272: thread_t *threads[THREADS];
273: int i;
274:
275: mutex = mutex_create(MUTEX_TYPE_DEFAULT);
276: condvar = condvar_create(CONDVAR_TYPE_DEFAULT);
277: sigcount = 0;
278:
279: for (i = 0; i < THREADS; i++)
280: {
281: threads[i] = thread_create(condvar_run, NULL);
282: }
283:
284: mutex->lock(mutex);
285: while (sigcount < THREADS)
286: {
287: condvar->wait(condvar, mutex);
288: }
289: mutex->unlock(mutex);
290:
291: for (i = 0; i < THREADS; i++)
292: {
293: threads[i]->join(threads[i]);
294: }
295:
296: mutex->destroy(mutex);
297: condvar->destroy(condvar);
298: }
299: END_TEST
300:
301: static void *condvar_recursive_run(void *data)
302: {
303: mutex->lock(mutex);
304: mutex->lock(mutex);
305: mutex->lock(mutex);
306: sigcount++;
307: condvar->signal(condvar);
308: mutex->unlock(mutex);
309: mutex->unlock(mutex);
310: mutex->unlock(mutex);
311: return NULL;
312: }
313:
314: START_TEST(test_condvar_recursive)
315: {
316: thread_t *threads[THREADS];
317: int i;
318:
319: mutex = mutex_create(MUTEX_TYPE_RECURSIVE);
320: condvar = condvar_create(CONDVAR_TYPE_DEFAULT);
321: sigcount = 0;
322:
323: mutex->lock(mutex);
324:
325: for (i = 0; i < THREADS; i++)
326: {
327: threads[i] = thread_create(condvar_recursive_run, NULL);
328: }
329:
330: mutex->lock(mutex);
331: mutex->lock(mutex);
332: while (sigcount < THREADS)
333: {
334: condvar->wait(condvar, mutex);
335: }
336: mutex->unlock(mutex);
337: mutex->unlock(mutex);
338: mutex->unlock(mutex);
339:
340: for (i = 0; i < THREADS; i++)
341: {
342: threads[i]->join(threads[i]);
343: }
344:
345: mutex->destroy(mutex);
346: condvar->destroy(condvar);
347: }
348: END_TEST
349:
350: static void *condvar_run_broad(void *data)
351: {
352: mutex->lock(mutex);
353: while (sigcount < 0)
354: {
355: condvar->wait(condvar, mutex);
356: }
357: mutex->unlock(mutex);
358: return NULL;
359: }
360:
361: START_TEST(test_condvar_broad)
362: {
363: thread_t *threads[THREADS];
364: int i;
365:
366: mutex = mutex_create(MUTEX_TYPE_DEFAULT);
367: condvar = condvar_create(CONDVAR_TYPE_DEFAULT);
368: sigcount = 0;
369:
370: for (i = 0; i < THREADS; i++)
371: {
372: threads[i] = thread_create(condvar_run_broad, NULL);
373: }
374:
375: sched_yield();
376:
377: mutex->lock(mutex);
378: sigcount = 1;
379: condvar->broadcast(condvar);
380: mutex->unlock(mutex);
381:
382: for (i = 0; i < THREADS; i++)
383: {
384: threads[i]->join(threads[i]);
385: }
386:
387: mutex->destroy(mutex);
388: condvar->destroy(condvar);
389: }
390: END_TEST
391:
392: START_TEST(test_condvar_timed)
393: {
394: thread_t *thread;
395: timeval_t start, end, diff = { .tv_usec = 50000 };
396:
397: mutex = mutex_create(MUTEX_TYPE_DEFAULT);
398: condvar = condvar_create(CONDVAR_TYPE_DEFAULT);
399: sigcount = 0;
400:
401: mutex->lock(mutex);
402: while (TRUE)
403: {
404: time_monotonic(&start);
405: if (condvar->timed_wait(condvar, mutex, diff.tv_usec / 1000))
406: {
407: break;
408: }
409: }
410: time_monotonic(&end);
411: mutex->unlock(mutex);
412: timersub(&end, &start, &end);
413: time_is_at_least(&diff, &end);
414:
415: thread = thread_create(condvar_run, NULL);
416:
417: mutex->lock(mutex);
418: while (sigcount == 0)
419: {
420: ck_assert(!condvar->timed_wait(condvar, mutex, 1000));
421: }
422: mutex->unlock(mutex);
423:
424: thread->join(thread);
425: mutex->destroy(mutex);
426: condvar->destroy(condvar);
427: }
428: END_TEST
429:
430: START_TEST(test_condvar_timed_abs)
431: {
432: thread_t *thread;
433: timeval_t start, end, abso, diff = { .tv_usec = 50000 };
434:
435: mutex = mutex_create(MUTEX_TYPE_DEFAULT);
436: condvar = condvar_create(CONDVAR_TYPE_DEFAULT);
437: sigcount = 0;
438:
439: mutex->lock(mutex);
440: while (TRUE)
441: {
442: time_monotonic(&start);
443: timeradd(&start, &diff, &abso);
444: if (condvar->timed_wait_abs(condvar, mutex, abso))
445: {
446: break;
447: }
448: }
449: time_monotonic(&end);
450: mutex->unlock(mutex);
451: time_is_at_least(&diff, &end);
452:
453: thread = thread_create(condvar_run, NULL);
454:
455: time_monotonic(&start);
456: diff.tv_sec = 1;
457: timeradd(&start, &diff, &abso);
458: mutex->lock(mutex);
459: while (sigcount == 0)
460: {
461: ck_assert(!condvar->timed_wait_abs(condvar, mutex, abso));
462: }
463: mutex->unlock(mutex);
464:
465: thread->join(thread);
466: mutex->destroy(mutex);
467: condvar->destroy(condvar);
468: }
469: END_TEST
470:
471: static void *condvar_cancel_run(void *data)
472: {
473: thread_cancelability(FALSE);
474:
475: mutex->lock(mutex);
476:
477: sigcount++;
478: condvar->broadcast(condvar);
479:
480: thread_cleanup_push((void*)mutex->unlock, mutex);
481: thread_cancelability(TRUE);
482: while (TRUE)
483: {
484: condvar->wait(condvar, mutex);
485: }
486: thread_cleanup_pop(TRUE);
487:
488: return NULL;
489: }
490:
491: START_TEST(test_condvar_cancel)
492: {
493: thread_t *threads[THREADS];
494: int i;
495:
496: mutex = mutex_create(MUTEX_TYPE_DEFAULT);
497: condvar = condvar_create(CONDVAR_TYPE_DEFAULT);
498: sigcount = 0;
499:
500: for (i = 0; i < THREADS; i++)
501: {
502: threads[i] = thread_create(condvar_cancel_run, NULL);
503: }
504:
505: /* wait for all threads */
506: mutex->lock(mutex);
507: while (sigcount < THREADS)
508: {
509: condvar->wait(condvar, mutex);
510: }
511: mutex->unlock(mutex);
512:
513: for (i = 0; i < THREADS; i++)
514: {
515: threads[i]->cancel(threads[i]);
516: }
517: for (i = 0; i < THREADS; i++)
518: {
519: threads[i]->join(threads[i]);
520: }
521:
522: mutex->destroy(mutex);
523: condvar->destroy(condvar);
524: }
525: END_TEST
526:
527: /**
528: * RWlock for different tests
529: */
530: static rwlock_t *rwlock;
531:
532: static void *rwlock_run(refcount_t *refs)
533: {
534: rwlock->read_lock(rwlock);
535: ref_get(refs);
536: sched_yield();
537: ignore_result(ref_put(refs));
538: rwlock->unlock(rwlock);
539:
540: if (rwlock->try_write_lock(rwlock))
541: {
542: ck_assert_int_eq(*refs, 0);
543: sched_yield();
544: rwlock->unlock(rwlock);
545: }
546:
547: rwlock->write_lock(rwlock);
548: ck_assert_int_eq(*refs, 0);
549: sched_yield();
550: rwlock->unlock(rwlock);
551:
552: rwlock->read_lock(rwlock);
553: rwlock->read_lock(rwlock);
554: ref_get(refs);
555: sched_yield();
556: ignore_result(ref_put(refs));
557: rwlock->unlock(rwlock);
558: rwlock->unlock(rwlock);
559:
560: return NULL;
561: }
562:
563: START_TEST(test_rwlock)
564: {
565: thread_t *threads[THREADS];
566: refcount_t refs = 0;
567: int i;
568:
569: rwlock = rwlock_create(RWLOCK_TYPE_DEFAULT);
570:
571: for (i = 0; i < THREADS; i++)
572: {
573: threads[i] = thread_create((void*)rwlock_run, &refs);
574: }
575: for (i = 0; i < THREADS; i++)
576: {
577: threads[i]->join(threads[i]);
578: }
579:
580: rwlock->destroy(rwlock);
581: }
582: END_TEST
583:
584: static void *rwlock_try_run(void *param)
585: {
586: if (rwlock->try_write_lock(rwlock))
587: {
588: rwlock->unlock(rwlock);
589: return param;
590: }
591: return NULL;
592: }
593:
594: START_TEST(test_rwlock_try)
595: {
596: uintptr_t magic = 0xcafebabe;
597: thread_t *thread;
598:
599: rwlock = rwlock_create(RWLOCK_TYPE_DEFAULT);
600:
601: thread = thread_create(rwlock_try_run, (void*)magic);
602: ck_assert_int_eq((uintptr_t)thread->join(thread), magic);
603:
604: rwlock->read_lock(rwlock);
605: thread = thread_create(rwlock_try_run, (void*)magic);
606: ck_assert(thread->join(thread) == NULL);
607: rwlock->unlock(rwlock);
608:
609: rwlock->read_lock(rwlock);
610: rwlock->read_lock(rwlock);
611: rwlock->read_lock(rwlock);
612: thread = thread_create(rwlock_try_run, (void*)magic);
613: ck_assert(thread->join(thread) == NULL);
614: rwlock->unlock(rwlock);
615: rwlock->unlock(rwlock);
616: rwlock->unlock(rwlock);
617:
618: rwlock->write_lock(rwlock);
619: thread = thread_create(rwlock_try_run, (void*)magic);
620: ck_assert(thread->join(thread) == NULL);
621: rwlock->unlock(rwlock);
622:
623: rwlock->destroy(rwlock);
624: }
625: END_TEST
626:
627: /**
628: * Rwlock condvar
629: */
630: static rwlock_condvar_t *rwcond;
631:
632: static void *rwlock_condvar_run(void *data)
633: {
634: rwlock->write_lock(rwlock);
635: sigcount++;
636: rwcond->signal(rwcond);
637: rwlock->unlock(rwlock);
638: return NULL;
639: }
640:
641: START_TEST(test_rwlock_condvar)
642: {
643: thread_t *threads[THREADS];
644: int i;
645:
646: rwlock = rwlock_create(RWLOCK_TYPE_DEFAULT);
647: rwcond = rwlock_condvar_create();
648: sigcount = 0;
649:
650: for (i = 0; i < THREADS; i++)
651: {
652: threads[i] = thread_create(rwlock_condvar_run, NULL);
653: }
654:
655: rwlock->write_lock(rwlock);
656: while (sigcount < THREADS)
657: {
658: rwcond->wait(rwcond, rwlock);
659: }
660: rwlock->unlock(rwlock);
661:
662: for (i = 0; i < THREADS; i++)
663: {
664: threads[i]->join(threads[i]);
665: }
666:
667: rwlock->destroy(rwlock);
668: rwcond->destroy(rwcond);
669: }
670: END_TEST
671:
672: static void *rwlock_condvar_run_broad(void *data)
673: {
674: rwlock->write_lock(rwlock);
675: while (sigcount < 0)
676: {
677: rwcond->wait(rwcond, rwlock);
678: }
679: rwlock->unlock(rwlock);
680: return NULL;
681: }
682:
683: START_TEST(test_rwlock_condvar_broad)
684: {
685: thread_t *threads[THREADS];
686: int i;
687:
688: rwlock = rwlock_create(RWLOCK_TYPE_DEFAULT);
689: rwcond = rwlock_condvar_create();
690: sigcount = 0;
691:
692: for (i = 0; i < THREADS; i++)
693: {
694: threads[i] = thread_create(rwlock_condvar_run_broad, NULL);
695: }
696:
697: sched_yield();
698:
699: rwlock->write_lock(rwlock);
700: sigcount = 1;
701: rwcond->broadcast(rwcond);
702: rwlock->unlock(rwlock);
703:
704: for (i = 0; i < THREADS; i++)
705: {
706: threads[i]->join(threads[i]);
707: }
708:
709: rwlock->destroy(rwlock);
710: rwcond->destroy(rwcond);
711: }
712: END_TEST
713:
714: START_TEST(test_rwlock_condvar_timed)
715: {
716: thread_t *thread;
717: timeval_t start, end, diff = { .tv_usec = 50000 };
718:
719: rwlock = rwlock_create(RWLOCK_TYPE_DEFAULT);
720: rwcond = rwlock_condvar_create();
721: sigcount = 0;
722:
723: rwlock->write_lock(rwlock);
724: while (TRUE)
725: {
726: time_monotonic(&start);
727: if (rwcond->timed_wait(rwcond, rwlock, diff.tv_usec / 1000))
728: {
729: break;
730: }
731: }
732: rwlock->unlock(rwlock);
733: time_monotonic(&end);
734: timersub(&end, &start, &end);
735: time_is_at_least(&diff, &end);
736:
737: thread = thread_create(rwlock_condvar_run, NULL);
738:
739: rwlock->write_lock(rwlock);
740: while (sigcount == 0)
741: {
742: ck_assert(!rwcond->timed_wait(rwcond, rwlock, 1000));
743: }
744: rwlock->unlock(rwlock);
745:
746: thread->join(thread);
747: rwlock->destroy(rwlock);
748: rwcond->destroy(rwcond);
749: }
750: END_TEST
751:
752: START_TEST(test_rwlock_condvar_timed_abs)
753: {
754: thread_t *thread;
755: timeval_t start, end, abso, diff = { .tv_usec = 50000 };
756:
757: rwlock = rwlock_create(RWLOCK_TYPE_DEFAULT);
758: rwcond = rwlock_condvar_create();
759: sigcount = 0;
760:
761: rwlock->write_lock(rwlock);
762: while (TRUE)
763: {
764: time_monotonic(&start);
765: timeradd(&start, &diff, &abso);
766: if (rwcond->timed_wait_abs(rwcond, rwlock, abso))
767: {
768: break;
769: }
770: }
771: rwlock->unlock(rwlock);
772: time_monotonic(&end);
773: time_is_at_least(&abso, &end);
774:
775: thread = thread_create(rwlock_condvar_run, NULL);
776:
777: time_monotonic(&start);
778: diff.tv_sec = 1;
779: timeradd(&start, &diff, &abso);
780: rwlock->write_lock(rwlock);
781: while (sigcount == 0)
782: {
783: ck_assert(!rwcond->timed_wait_abs(rwcond, rwlock, abso));
784: }
785: rwlock->unlock(rwlock);
786:
787: thread->join(thread);
788: rwlock->destroy(rwlock);
789: rwcond->destroy(rwcond);
790: }
791: END_TEST
792:
793: static void *rwlock_condvar_cancel_run(void *data)
794: {
795: thread_cancelability(FALSE);
796:
797: rwlock->write_lock(rwlock);
798:
799: sigcount++;
800: rwcond->broadcast(rwcond);
801:
802: thread_cleanup_push((void*)rwlock->unlock, rwlock);
803: thread_cancelability(TRUE);
804: while (TRUE)
805: {
806: rwcond->wait(rwcond, rwlock);
807: }
808: thread_cleanup_pop(TRUE);
809:
810: return NULL;
811: }
812:
813: START_TEST(test_rwlock_condvar_cancel)
814: {
815: thread_t *threads[THREADS];
816: int i;
817:
818: rwlock = rwlock_create(RWLOCK_TYPE_DEFAULT);
819: rwcond = rwlock_condvar_create();
820: sigcount = 0;
821:
822: for (i = 0; i < THREADS; i++)
823: {
824: threads[i] = thread_create(rwlock_condvar_cancel_run, NULL);
825: }
826:
827: /* wait for all threads */
828: rwlock->write_lock(rwlock);
829: while (sigcount < THREADS)
830: {
831: rwcond->wait(rwcond, rwlock);
832: }
833: rwlock->unlock(rwlock);
834:
835: for (i = 0; i < THREADS; i++)
836: {
837: threads[i]->cancel(threads[i]);
838: }
839: for (i = 0; i < THREADS; i++)
840: {
841: threads[i]->join(threads[i]);
842: }
843:
844: rwlock->destroy(rwlock);
845: rwcond->destroy(rwcond);
846: }
847: END_TEST
848:
849: /**
850: * Semaphore for different tests
851: */
852: static semaphore_t *semaphore;
853:
854: static void *semaphore_run(void *data)
855: {
856: semaphore->post(semaphore);
857: return NULL;
858: }
859:
860: START_TEST(test_semaphore)
861: {
862: thread_t *threads[THREADS];
863: int i, initial = 5;
864:
865: semaphore = semaphore_create(initial);
866:
867: for (i = 0; i < THREADS; i++)
868: {
869: threads[i] = thread_create(semaphore_run, NULL);
870: }
871: for (i = 0; i < THREADS + initial; i++)
872: {
873: semaphore->wait(semaphore);
874: }
875: for (i = 0; i < THREADS; i++)
876: {
877: threads[i]->join(threads[i]);
878: }
879:
880: semaphore->destroy(semaphore);
881: }
882: END_TEST
883:
884: START_TEST(test_semaphore_timed)
885: {
886: thread_t *thread;
887: timeval_t start, end, diff = { .tv_usec = 50000 };
888:
889: semaphore = semaphore_create(0);
890:
891: time_monotonic(&start);
892: ck_assert(semaphore->timed_wait(semaphore, diff.tv_usec / 1000));
893: time_monotonic(&end);
894: timersub(&end, &start, &end);
895: time_is_at_least(&diff, &end);
896:
897: thread = thread_create(semaphore_run, NULL);
898:
899: ck_assert(!semaphore->timed_wait(semaphore, 1000));
900:
901: thread->join(thread);
902: semaphore->destroy(semaphore);
903: }
904: END_TEST
905:
906: START_TEST(test_semaphore_timed_abs)
907: {
908: thread_t *thread;
909: timeval_t start, end, abso, diff = { .tv_usec = 50000 };
910:
911: semaphore = semaphore_create(0);
912:
913: time_monotonic(&start);
914: timeradd(&start, &diff, &abso);
915: ck_assert(semaphore->timed_wait_abs(semaphore, abso));
916: time_monotonic(&end);
917: time_is_at_least(&abso, &end);
918:
919: thread = thread_create(semaphore_run, NULL);
920:
921: time_monotonic(&start);
922: diff.tv_sec = 1;
923: timeradd(&start, &diff, &abso);
924: ck_assert(!semaphore->timed_wait_abs(semaphore, abso));
925:
926: thread->join(thread);
927: semaphore->destroy(semaphore);
928: }
929: END_TEST
930:
931: static void *semaphore_cancel_run(void *data)
932: {
933: refcount_t *ready = (refcount_t*)data;
934:
935: thread_cancelability(FALSE);
936: ref_get(ready);
937:
938: thread_cancelability(TRUE);
939: semaphore->wait(semaphore);
940:
941: ck_assert(FALSE);
942: return NULL;
943: }
944:
945: START_TEST(test_semaphore_cancel)
946: {
947: thread_t *threads[THREADS];
948: refcount_t ready = 0;
949: int i;
950:
951: semaphore = semaphore_create(0);
952:
953: for (i = 0; i < THREADS; i++)
954: {
955: threads[i] = thread_create(semaphore_cancel_run, &ready);
956: }
957: while (ready < THREADS)
958: {
959: sched_yield();
960: }
961: for (i = 0; i < THREADS; i++)
962: {
963: threads[i]->cancel(threads[i]);
964: }
965: for (i = 0; i < THREADS; i++)
966: {
967: threads[i]->join(threads[i]);
968: }
969:
970: semaphore->destroy(semaphore);
971: }
972: END_TEST
973:
974: static void *join_run(void *data)
975: {
976: /* force some context switches */
977: sched_yield();
978: return (void*)((uintptr_t)data + THREADS);
979: }
980:
981: START_TEST(test_join)
982: {
983: thread_t *threads[THREADS];
984: int i;
985:
986: for (i = 0; i < THREADS; i++)
987: {
988: threads[i] = thread_create(join_run, (void*)(uintptr_t)i);
989: }
990: for (i = 0; i < THREADS; i++)
991: {
992: ck_assert_int_eq((uintptr_t)threads[i]->join(threads[i]), i + THREADS);
993: }
994: }
995: END_TEST
996:
997: static void *exit_join_run(void *data)
998: {
999: sched_yield();
1000: thread_exit((void*)((uintptr_t)data + THREADS));
1001: /* not reached */
1002: ck_assert(FALSE);
1003: return NULL;
1004: }
1005:
1006: START_TEST(test_join_exit)
1007: {
1008: thread_t *threads[THREADS];
1009: int i;
1010:
1011: for (i = 0; i < THREADS; i++)
1012: {
1013: threads[i] = thread_create(exit_join_run, (void*)(uintptr_t)i);
1014: }
1015: for (i = 0; i < THREADS; i++)
1016: {
1017: ck_assert_int_eq((uintptr_t)threads[i]->join(threads[i]), i + THREADS);
1018: }
1019: }
1020: END_TEST
1021:
1022: static void *detach_run(void *data)
1023: {
1024: refcount_t *running = (refcount_t*)data;
1025:
1026: ignore_result(ref_put(running));
1027: return NULL;
1028: }
1029:
1030: START_TEST(test_detach)
1031: {
1032: thread_t *threads[THREADS];
1033: int i;
1034: refcount_t running = 0;
1035:
1036: for (i = 0; i < THREADS; i++)
1037: {
1038: ref_get(&running);
1039: threads[i] = thread_create(detach_run, &running);
1040: }
1041: for (i = 0; i < THREADS; i++)
1042: {
1043: threads[i]->detach(threads[i]);
1044: }
1045: while (running > 0)
1046: {
1047: sched_yield();
1048: }
1049: /* no checks done here, but we check that thread state gets cleaned
1050: * up with leak detective. give the threads time to clean up. */
1051: usleep(10000);
1052: }
1053: END_TEST
1054:
1055: static void *detach_exit_run(void *data)
1056: {
1057: refcount_t *running = (refcount_t*)data;
1058:
1059: ignore_result(ref_put(running));
1060: thread_exit(NULL);
1061: /* not reached */
1062: ck_assert(FALSE);
1063: return NULL;
1064: }
1065:
1066: START_TEST(test_detach_exit)
1067: {
1068: thread_t *threads[THREADS];
1069: int i;
1070: refcount_t running = 0;
1071:
1072: for (i = 0; i < THREADS; i++)
1073: {
1074: ref_get(&running);
1075: threads[i] = thread_create(detach_exit_run, &running);
1076: }
1077: for (i = 0; i < THREADS; i++)
1078: {
1079: threads[i]->detach(threads[i]);
1080: }
1081: while (running > 0)
1082: {
1083: sched_yield();
1084: }
1085: /* no checks done here, but we check that thread state gets cleaned
1086: * up with leak detective. give the threads time to clean up. */
1087: usleep(10000);
1088: }
1089: END_TEST
1090:
1091: static void *cancel_run(void *data)
1092: {
1093: /* default cancelability should be TRUE, so don't change it */
1094: while (TRUE)
1095: {
1096: sleep(10);
1097: }
1098: return NULL;
1099: }
1100:
1101: START_TEST(test_cancel)
1102: {
1103: thread_t *threads[THREADS];
1104: int i;
1105:
1106: for (i = 0; i < THREADS; i++)
1107: {
1108: threads[i] = thread_create(cancel_run, NULL);
1109: }
1110: for (i = 0; i < THREADS; i++)
1111: {
1112: threads[i]->cancel(threads[i]);
1113: }
1114: for (i = 0; i < THREADS; i++)
1115: {
1116: threads[i]->join(threads[i]);
1117: }
1118: }
1119: END_TEST
1120:
1121: static void *cancel_onoff_run(void *data)
1122: {
1123: bool *cancellable = (bool*)data;
1124:
1125: thread_cancelability(FALSE);
1126: *cancellable = FALSE;
1127:
1128: /* we should not get cancelled here */
1129: usleep(50000);
1130:
1131: *cancellable = TRUE;
1132: thread_cancelability(TRUE);
1133:
1134: /* but here */
1135: while (TRUE)
1136: {
1137: sleep(10);
1138: }
1139: return NULL;
1140: }
1141:
1142: START_TEST(test_cancel_onoff)
1143: {
1144: thread_t *threads[THREADS];
1145: bool cancellable[THREADS];
1146: int i;
1147:
1148: for (i = 0; i < THREADS; i++)
1149: {
1150: cancellable[i] = TRUE;
1151: threads[i] = thread_create(cancel_onoff_run, &cancellable[i]);
1152: }
1153: for (i = 0; i < THREADS; i++)
1154: {
1155: /* wait until thread has cleared its cancelability */
1156: while (cancellable[i])
1157: {
1158: sched_yield();
1159: }
1160: threads[i]->cancel(threads[i]);
1161: }
1162: for (i = 0; i < THREADS; i++)
1163: {
1164: threads[i]->join(threads[i]);
1165: ck_assert(cancellable[i]);
1166: }
1167: }
1168: END_TEST
1169:
1170: static void *cancel_point_run(void *data)
1171: {
1172: thread_cancelability(FALSE);
1173: while (TRUE)
1174: {
1175: /* implicitly enables cancelability */
1176: thread_cancellation_point();
1177: }
1178: return NULL;
1179: }
1180:
1181: START_TEST(test_cancel_point)
1182: {
1183: thread_t *threads[THREADS];
1184: int i;
1185:
1186: for (i = 0; i < THREADS; i++)
1187: {
1188: threads[i] = thread_create(cancel_point_run, NULL);
1189: }
1190: sched_yield();
1191: for (i = 0; i < THREADS; i++)
1192: {
1193: threads[i]->cancel(threads[i]);
1194: }
1195: for (i = 0; i < THREADS; i++)
1196: {
1197: threads[i]->join(threads[i]);
1198: }
1199: }
1200: END_TEST
1201:
1202: static void close_fd_ptr(void *fd)
1203: {
1204: close(*(int*)fd);
1205: }
1206:
1207: static void cancellation_recv()
1208: {
1209: int sv[2];
1210: char buf[1];
1211:
1212: ck_assert(socketpair(AF_UNIX, SOCK_STREAM, 0, sv) == 0);
1213:
1214: thread_cleanup_push(close_fd_ptr, &sv[0]);
1215: thread_cleanup_push(close_fd_ptr, &sv[1]);
1216:
1217: thread_cancelability(TRUE);
1218: while (TRUE)
1219: {
1220: ck_assert(recv(sv[0], buf, sizeof(buf), 0) == 1);
1221: }
1222: }
1223:
1224: static void cancellation_read()
1225: {
1226: int sv[2];
1227: char buf[1];
1228:
1229: ck_assert(socketpair(AF_UNIX, SOCK_STREAM, 0, sv) == 0);
1230:
1231: thread_cleanup_push(close_fd_ptr, &sv[0]);
1232: thread_cleanup_push(close_fd_ptr, &sv[1]);
1233:
1234: thread_cancelability(TRUE);
1235: while (TRUE)
1236: {
1237: ck_assert(read(sv[0], buf, sizeof(buf)) == 1);
1238: }
1239: }
1240:
1241: static void cancellation_select()
1242: {
1243: int sv[2];
1244: fd_set set;
1245:
1246: ck_assert(socketpair(AF_UNIX, SOCK_STREAM, 0, sv) == 0);
1247:
1248: thread_cleanup_push(close_fd_ptr, &sv[0]);
1249: thread_cleanup_push(close_fd_ptr, &sv[1]);
1250:
1251: FD_ZERO(&set);
1252: FD_SET(sv[0], &set);
1253: thread_cancelability(TRUE);
1254: while (TRUE)
1255: {
1256: ck_assert(select(sv[0] + 1, &set, NULL, NULL, NULL) == 1);
1257: }
1258: }
1259:
1260: static void cancellation_poll()
1261: {
1262: int sv[2];
1263: struct pollfd pfd;
1264:
1265: ck_assert(socketpair(AF_UNIX, SOCK_STREAM, 0, sv) == 0);
1266:
1267: thread_cleanup_push(close_fd_ptr, &sv[0]);
1268: thread_cleanup_push(close_fd_ptr, &sv[1]);
1269:
1270: pfd.fd = sv[0];
1271: pfd.events = POLLIN;
1272: thread_cancelability(TRUE);
1273: while (TRUE)
1274: {
1275: ck_assert(poll(&pfd, 1, -1) == 1);
1276: }
1277: }
1278:
1279: static void cancellation_accept()
1280: {
1281: host_t *host;
1282: int fd, c;
1283:
1284: fd = socket(AF_INET, SOCK_STREAM, 0);
1285: ck_assert(fd >= 0);
1286: host = host_create_from_string("127.0.0.1", 0);
1287: ck_assert_msg(bind(fd, host->get_sockaddr(host),
1288: *host->get_sockaddr_len(host)) == 0, "%m");
1289: host->destroy(host);
1290: ck_assert(listen(fd, 5) == 0);
1291:
1292: thread_cleanup_push(close_fd_ptr, &fd);
1293:
1294: thread_cancelability(TRUE);
1295: while (TRUE)
1296: {
1297: c = accept(fd, NULL, NULL);
1298: ck_assert(c >= 0);
1299: close(c);
1300: }
1301: }
1302:
1303: static void cancellation_cond()
1304: {
1305: mutex_t *mutex;
1306: condvar_t *cond;
1307:
1308: mutex = mutex_create(MUTEX_TYPE_DEFAULT);
1309: cond = condvar_create(CONDVAR_TYPE_DEFAULT);
1310: mutex->lock(mutex);
1311:
1312: thread_cleanup_push((void*)mutex->destroy, mutex);
1313: thread_cleanup_push((void*)cond->destroy, cond);
1314:
1315: thread_cancelability(TRUE);
1316: while (TRUE)
1317: {
1318: cond->wait(cond, mutex);
1319: }
1320: }
1321:
1322: static void cancellation_rwcond()
1323: {
1324: rwlock_t *lock;
1325: rwlock_condvar_t *cond;
1326:
1327: lock = rwlock_create(RWLOCK_TYPE_DEFAULT);
1328: cond = rwlock_condvar_create();
1329: lock->write_lock(lock);
1330:
1331: thread_cleanup_push((void*)lock->destroy, lock);
1332: thread_cleanup_push((void*)cond->destroy, cond);
1333:
1334: thread_cancelability(TRUE);
1335: while (TRUE)
1336: {
1337: cond->wait(cond, lock);
1338: }
1339: }
1340:
1341: static void (*cancellation_points[])() = {
1342: cancellation_read,
1343: cancellation_recv,
1344: cancellation_select,
1345: cancellation_poll,
1346: cancellation_accept,
1347: cancellation_cond,
1348: cancellation_rwcond,
1349: };
1350:
1351: static void* run_cancellation_point(void (*fn)())
1352: {
1353: fn();
1354: return NULL;
1355: }
1356:
1357: static void* run_cancellation_point_pre(void (*fn)())
1358: {
1359: usleep(5000);
1360: fn();
1361: return NULL;
1362: }
1363:
1364: START_TEST(test_cancellation_point)
1365: {
1366: thread_t *thread;
1367:
1368: thread = thread_create((void*)run_cancellation_point,
1369: cancellation_points[_i]);
1370: usleep(5000);
1371: thread->cancel(thread);
1372: thread->join(thread);
1373: }
1374: END_TEST
1375:
1376: START_TEST(test_cancellation_point_pre)
1377: {
1378: thread_t *thread;
1379:
1380: thread = thread_create((void*)run_cancellation_point_pre,
1381: cancellation_points[_i]);
1382: thread->cancel(thread);
1383: thread->join(thread);
1384: }
1385: END_TEST
1386:
1387: static void cleanup1(void *data)
1388: {
1389: uintptr_t *value = (uintptr_t*)data;
1390:
1391: ck_assert_int_eq(*value, 1);
1392: (*value)++;
1393: }
1394:
1395: static void cleanup2(void *data)
1396: {
1397: uintptr_t *value = (uintptr_t*)data;
1398:
1399: ck_assert_int_eq(*value, 2);
1400: (*value)++;
1401: }
1402:
1403: static void cleanup3(void *data)
1404: {
1405: uintptr_t *value = (uintptr_t*)data;
1406:
1407: ck_assert_int_eq(*value, 3);
1408: (*value)++;
1409: }
1410:
1411: static void *cleanup_run(void *data)
1412: {
1413: thread_cleanup_push(cleanup3, data);
1414: thread_cleanup_push(cleanup2, data);
1415: thread_cleanup_push(cleanup1, data);
1416: return NULL;
1417: }
1418:
1419: START_TEST(test_cleanup)
1420: {
1421: thread_t *threads[THREADS];
1422: uintptr_t values[THREADS];
1423: int i;
1424:
1425: for (i = 0; i < THREADS; i++)
1426: {
1427: values[i] = 1;
1428: threads[i] = thread_create(cleanup_run, &values[i]);
1429: }
1430: for (i = 0; i < THREADS; i++)
1431: {
1432: threads[i]->join(threads[i]);
1433: ck_assert_int_eq(values[i], 4);
1434: }
1435: }
1436: END_TEST
1437:
1438: static void *cleanup_exit_run(void *data)
1439: {
1440: thread_cleanup_push(cleanup3, data);
1441: thread_cleanup_push(cleanup2, data);
1442: thread_cleanup_push(cleanup1, data);
1443: thread_exit(NULL);
1444: ck_assert(FALSE);
1445: return NULL;
1446: }
1447:
1448: START_TEST(test_cleanup_exit)
1449: {
1450: thread_t *threads[THREADS];
1451: uintptr_t values[THREADS];
1452: int i;
1453:
1454: for (i = 0; i < THREADS; i++)
1455: {
1456: values[i] = 1;
1457: threads[i] = thread_create(cleanup_exit_run, &values[i]);
1458: }
1459: for (i = 0; i < THREADS; i++)
1460: {
1461: threads[i]->join(threads[i]);
1462: ck_assert_int_eq(values[i], 4);
1463: }
1464: }
1465: END_TEST
1466:
1467: static void *cleanup_cancel_run(void *data)
1468: {
1469: thread_cancelability(FALSE);
1470:
1471: barrier_wait(barrier);
1472:
1473: thread_cleanup_push(cleanup3, data);
1474: thread_cleanup_push(cleanup2, data);
1475: thread_cleanup_push(cleanup1, data);
1476:
1477: thread_cancelability(TRUE);
1478:
1479: while (TRUE)
1480: {
1481: sleep(1);
1482: }
1483: return NULL;
1484: }
1485:
1486: START_TEST(test_cleanup_cancel)
1487: {
1488: thread_t *threads[THREADS];
1489: uintptr_t values[THREADS];
1490: int i;
1491:
1492: barrier = barrier_create(THREADS+1);
1493: for (i = 0; i < THREADS; i++)
1494: {
1495: values[i] = 1;
1496: threads[i] = thread_create(cleanup_cancel_run, &values[i]);
1497: }
1498: barrier_wait(barrier);
1499: for (i = 0; i < THREADS; i++)
1500: {
1501: threads[i]->cancel(threads[i]);
1502: }
1503: for (i = 0; i < THREADS; i++)
1504: {
1505: threads[i]->join(threads[i]);
1506: ck_assert_int_eq(values[i], 4);
1507: }
1508: barrier_destroy(barrier);
1509: }
1510: END_TEST
1511:
1512: static void *cleanup_pop_run(void *data)
1513: {
1514: thread_cleanup_push(cleanup3, data);
1515: thread_cleanup_push(cleanup2, data);
1516: thread_cleanup_push(cleanup1, data);
1517:
1518: thread_cleanup_push(cleanup2, data);
1519: thread_cleanup_pop(FALSE);
1520:
1521: thread_cleanup_pop(TRUE);
1522: return NULL;
1523: }
1524:
1525: START_TEST(test_cleanup_pop)
1526: {
1527: thread_t *threads[THREADS];
1528: uintptr_t values[THREADS];
1529: int i;
1530:
1531: for (i = 0; i < THREADS; i++)
1532: {
1533: values[i] = 1;
1534: threads[i] = thread_create(cleanup_pop_run, &values[i]);
1535: }
1536: for (i = 0; i < THREADS; i++)
1537: {
1538: threads[i]->join(threads[i]);
1539: ck_assert_int_eq(values[i], 4);
1540: }
1541: }
1542: END_TEST
1543:
1544: static void *cleanup_popall_run(void *data)
1545: {
1546: thread_cleanup_push(cleanup3, data);
1547: thread_cleanup_push(cleanup2, data);
1548: thread_cleanup_push(cleanup1, data);
1549:
1550: thread_cleanup_popall();
1551: return NULL;
1552: }
1553:
1554: START_TEST(test_cleanup_popall)
1555: {
1556: thread_t *threads[THREADS];
1557: uintptr_t values[THREADS];
1558: int i;
1559:
1560: for (i = 0; i < THREADS; i++)
1561: {
1562: values[i] = 1;
1563: threads[i] = thread_create(cleanup_popall_run, &values[i]);
1564: }
1565: for (i = 0; i < THREADS; i++)
1566: {
1567: threads[i]->join(threads[i]);
1568: ck_assert_int_eq(values[i], 4);
1569: }
1570: }
1571: END_TEST
1572:
1573:
1574: static thread_value_t *tls[10];
1575:
1576: static void *tls_run(void *data)
1577: {
1578: uintptr_t value = (uintptr_t)data;
1579: int i, j;
1580:
1581: for (i = 0; i < countof(tls); i++)
1582: {
1583: ck_assert(tls[i]->get(tls[i]) == NULL);
1584: }
1585: for (i = 0; i < countof(tls); i++)
1586: {
1587: tls[i]->set(tls[i], (void*)(value * i));
1588: }
1589: for (j = 0; j < 1000; j++)
1590: {
1591: for (i = 0; i < countof(tls); i++)
1592: {
1593: tls[i]->set(tls[i], (void*)(value * i));
1594: ck_assert(tls[i]->get(tls[i]) == (void*)(value * i));
1595: }
1596: sched_yield();
1597: }
1598: for (i = 0; i < countof(tls); i++)
1599: {
1600: ck_assert(tls[i]->get(tls[i]) == (void*)(value * i));
1601: }
1602: return (void*)(value + 1);
1603: }
1604:
1605: START_TEST(test_tls)
1606: {
1607: thread_t *threads[THREADS];
1608: int i;
1609:
1610: for (i = 0; i < countof(tls); i++)
1611: {
1612: tls[i] = thread_value_create(NULL);
1613: }
1614: for (i = 0; i < THREADS; i++)
1615: {
1616: threads[i] = thread_create(tls_run, (void*)(uintptr_t)i);
1617: }
1618:
1619: ck_assert_int_eq((uintptr_t)tls_run((void*)(uintptr_t)(THREADS + 1)),
1620: THREADS + 2);
1621:
1622: for (i = 0; i < THREADS; i++)
1623: {
1624: ck_assert_int_eq((uintptr_t)threads[i]->join(threads[i]), i + 1);
1625: }
1626: for (i = 0; i < countof(tls); i++)
1627: {
1628: tls[i]->destroy(tls[i]);
1629: }
1630: }
1631: END_TEST
1632:
1633: static void tls_cleanup(void *data)
1634: {
1635: uintptr_t *value = (uintptr_t*)data;
1636:
1637: (*value)--;
1638: }
1639:
1640: static void *tls_cleanup_run(void *data)
1641: {
1642: int i;
1643:
1644: for (i = 0; i < countof(tls); i++)
1645: {
1646: tls[i]->set(tls[i], data);
1647: }
1648: return NULL;
1649: }
1650:
1651: START_TEST(test_tls_cleanup)
1652: {
1653: thread_t *threads[THREADS];
1654: uintptr_t values[THREADS], main_value = countof(tls);
1655: int i;
1656:
1657: for (i = 0; i < countof(tls); i++)
1658: {
1659: tls[i] = thread_value_create(tls_cleanup);
1660: }
1661: for (i = 0; i < THREADS; i++)
1662: {
1663: values[i] = countof(tls);
1664: threads[i] = thread_create(tls_cleanup_run, &values[i]);
1665: }
1666:
1667: tls_cleanup_run(&main_value);
1668:
1669: for (i = 0; i < THREADS; i++)
1670: {
1671: threads[i]->join(threads[i]);
1672: ck_assert_int_eq(values[i], 0);
1673: }
1674: for (i = 0; i < countof(tls); i++)
1675: {
1676: tls[i]->destroy(tls[i]);
1677: }
1678: ck_assert_int_eq(main_value, 0);
1679: }
1680: END_TEST
1681:
1682: Suite *threading_suite_create()
1683: {
1684: Suite *s;
1685: TCase *tc;
1686:
1687: s = suite_create("threading");
1688:
1689: tc = tcase_create("recursive mutex");
1690: tcase_add_test(tc, test_mutex);
1691: suite_add_tcase(s, tc);
1692:
1693: tc = tcase_create("spinlock");
1694: tcase_add_test(tc, test_spinlock);
1695: suite_add_tcase(s, tc);
1696:
1697: tc = tcase_create("condvar");
1698: tcase_add_test(tc, test_condvar);
1699: tcase_add_test(tc, test_condvar_recursive);
1700: tcase_add_test(tc, test_condvar_broad);
1701: tcase_add_test(tc, test_condvar_timed);
1702: tcase_add_test(tc, test_condvar_timed_abs);
1703: tcase_add_test(tc, test_condvar_cancel);
1704: suite_add_tcase(s, tc);
1705:
1706: tc = tcase_create("rwlock");
1707: tcase_add_test(tc, test_rwlock);
1708: tcase_add_test(tc, test_rwlock_try);
1709: suite_add_tcase(s, tc);
1710:
1711: tc = tcase_create("rwlock condvar");
1712: tcase_add_test(tc, test_rwlock_condvar);
1713: tcase_add_test(tc, test_rwlock_condvar_broad);
1714: tcase_add_test(tc, test_rwlock_condvar_timed);
1715: tcase_add_test(tc, test_rwlock_condvar_timed_abs);
1716: tcase_add_test(tc, test_rwlock_condvar_cancel);
1717: suite_add_tcase(s, tc);
1718:
1719: tc = tcase_create("semaphore");
1720: tcase_add_test(tc, test_semaphore);
1721: tcase_add_test(tc, test_semaphore_timed);
1722: tcase_add_test(tc, test_semaphore_timed_abs);
1723: tcase_add_test(tc, test_semaphore_cancel);
1724: suite_add_tcase(s, tc);
1725:
1726: tc = tcase_create("thread joining");
1727: tcase_add_test(tc, test_join);
1728: tcase_add_test(tc, test_join_exit);
1729: suite_add_tcase(s, tc);
1730:
1731: tc = tcase_create("thread detaching");
1732: tcase_add_test(tc, test_detach);
1733: tcase_add_test(tc, test_detach_exit);
1734: suite_add_tcase(s, tc);
1735:
1736: tc = tcase_create("thread cancellation");
1737: tcase_add_test(tc, test_cancel);
1738: tcase_add_test(tc, test_cancel_onoff);
1739: tcase_add_test(tc, test_cancel_point);
1740: suite_add_tcase(s, tc);
1741:
1742: tc = tcase_create("thread cancellation point");
1743: tcase_add_loop_test(tc, test_cancellation_point,
1744: 0, countof(cancellation_points));
1745: tcase_add_loop_test(tc, test_cancellation_point_pre,
1746: 0, countof(cancellation_points));
1747: suite_add_tcase(s, tc);
1748:
1749: tc = tcase_create("thread cleanup");
1750: tcase_add_test(tc, test_cleanup);
1751: tcase_add_test(tc, test_cleanup_exit);
1752: tcase_add_test(tc, test_cleanup_cancel);
1753: tcase_add_test(tc, test_cleanup_pop);
1754: tcase_add_test(tc, test_cleanup_popall);
1755: suite_add_tcase(s, tc);
1756:
1757: tc = tcase_create("thread local storage");
1758: tcase_add_test(tc, test_tls);
1759: tcase_add_test(tc, test_tls_cleanup);
1760: suite_add_tcase(s, tc);
1761:
1762: return s;
1763: }
FreeBSD-CVSweb <freebsd-cvsweb@FreeBSD.org>