Return to test_threading.c CVS log | Up to [ELWIX - Embedded LightWeight unIX -] / embedaddon / strongswan / src / libstrongswan / tests / suites |
1.1 misho 1: /* 2: * Copyright (C) 2013-2018 Tobias Brunner 3: * Copyright (C) 2008 Martin Willi 4: * HSR Hochschule fuer Technik Rapperswil 5: * 6: * This program is free software; you can redistribute it and/or modify it 7: * under the terms of the GNU General Public License as published by the 8: * Free Software Foundation; either version 2 of the License, or (at your 9: * option) any later version. See <http://www.fsf.org/copyleft/gpl.txt>. 10: * 11: * This program is distributed in the hope that it will be useful, but 12: * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY 13: * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 14: * for more details. 15: */ 16: 17: #include "test_suite.h" 18: 19: #include <unistd.h> 20: 21: #include <threading/thread.h> 22: #include <threading/mutex.h> 23: #include <threading/condvar.h> 24: #include <threading/rwlock.h> 25: #include <threading/rwlock_condvar.h> 26: #include <threading/spinlock.h> 27: #include <threading/semaphore.h> 28: #include <threading/thread_value.h> 29: 30: #ifdef WIN32 31: /* when running on AppVeyor the wait functions seem to frequently trigger a bit 32: * early, allow this if the difference is within 5ms. */ 33: static inline void time_is_at_least(timeval_t *expected, timeval_t *actual) 34: { 35: if (!timercmp(actual, expected, >)) 36: { 37: timeval_t diff; 38: 39: timersub(expected, actual, &diff); 40: if (!diff.tv_sec && diff.tv_usec <= 5000) 41: { 42: warn("allow timer event %dus too early on Windows (expected: %u.%u, " 43: "actual: %u.%u)", diff.tv_usec, expected->tv_sec, 44: expected->tv_usec, actual->tv_sec, actual->tv_usec); 45: return; 46: } 47: fail("expected: %u.%u, actual: %u.%u", expected->tv_sec, 48: expected->tv_usec, actual->tv_sec, actual->tv_usec); 49: } 50: } 51: #else /* WIN32 */ 52: static inline void time_is_at_least(timeval_t *expected, timeval_t *actual) 53: { 54: ck_assert_msg(timercmp(actual, expected, >), "expected: %u.%u, actual: " 55: "%u.%u", expected->tv_sec, expected->tv_usec, actual->tv_sec, 56: actual->tv_usec); 57: } 58: #endif /* WIN32 */ 59: 60: /******************************************************************************* 61: * recursive mutex test 62: */ 63: 64: #define THREADS 20 65: 66: /** 67: * Thread barrier data 68: */ 69: typedef struct { 70: mutex_t *mutex; 71: condvar_t *cond; 72: int count; 73: int current; 74: bool active; 75: } barrier_t; 76: 77: /** 78: * Create a thread barrier for count threads 79: */ 80: static barrier_t* barrier_create(int count) 81: { 82: barrier_t *this; 83: 84: INIT(this, 85: .mutex = mutex_create(MUTEX_TYPE_DEFAULT), 86: .cond = condvar_create(CONDVAR_TYPE_DEFAULT), 87: .count = count, 88: ); 89: 90: return this; 91: } 92: 93: /** 94: * Destroy a thread barrier 95: */ 96: static void barrier_destroy(barrier_t *this) 97: { 98: this->mutex->destroy(this->mutex); 99: this->cond->destroy(this->cond); 100: free(this); 101: } 102: 103: /** 104: * Wait to have configured number of threads in barrier 105: */ 106: static bool barrier_wait(barrier_t *this) 107: { 108: bool winner = FALSE; 109: 110: this->mutex->lock(this->mutex); 111: if (!this->active) 112: { /* first, reset */ 113: this->active = TRUE; 114: this->current = 0; 115: } 116: 117: this->current++; 118: while (this->current < this->count) 119: { 120: this->cond->wait(this->cond, this->mutex); 121: } 122: if (this->active) 123: { /* first, win */ 124: winner = TRUE; 125: this->active = FALSE; 126: } 127: this->mutex->unlock(this->mutex); 128: this->cond->broadcast(this->cond); 129: sched_yield(); 130: 131: return winner; 132: } 133: 134: /** 135: * Barrier for some tests 136: */ 137: static barrier_t *barrier; 138: 139: /** 140: * A mutex for tests requiring one 141: */ 142: static mutex_t *mutex; 143: 144: /** 145: * A condvar for tests requiring one 146: */ 147: static condvar_t *condvar; 148: 149: /** 150: * A counter for signaling 151: */ 152: static int sigcount; 153: 154: static void *mutex_run(void *data) 155: { 156: int locked = 0; 157: int i; 158: 159: /* wait for all threads before getting in action */ 160: barrier_wait(barrier); 161: 162: for (i = 0; i < 100; i++) 163: { 164: mutex->lock(mutex); 165: mutex->lock(mutex); 166: mutex->lock(mutex); 167: locked++; 168: sched_yield(); 169: if (locked > 1) 170: { 171: fail("two threads locked the mutex concurrently"); 172: } 173: locked--; 174: mutex->unlock(mutex); 175: mutex->unlock(mutex); 176: mutex->unlock(mutex); 177: } 178: return NULL; 179: } 180: 181: START_TEST(test_mutex) 182: { 183: thread_t *threads[THREADS]; 184: int i; 185: 186: barrier = barrier_create(THREADS); 187: mutex = mutex_create(MUTEX_TYPE_RECURSIVE); 188: 189: for (i = 0; i < 10; i++) 190: { 191: mutex->lock(mutex); 192: mutex->unlock(mutex); 193: } 194: for (i = 0; i < 10; i++) 195: { 196: mutex->lock(mutex); 197: } 198: for (i = 0; i < 10; i++) 199: { 200: mutex->unlock(mutex); 201: } 202: 203: for (i = 0; i < THREADS; i++) 204: { 205: threads[i] = thread_create(mutex_run, NULL); 206: } 207: for (i = 0; i < THREADS; i++) 208: { 209: threads[i]->join(threads[i]); 210: } 211: 212: mutex->destroy(mutex); 213: barrier_destroy(barrier); 214: } 215: END_TEST 216: 217: /** 218: * Spinlock for testing 219: */ 220: static spinlock_t *spinlock; 221: 222: static void *spinlock_run(void *data) 223: { 224: int i, *locked = (int*)data; 225: 226: barrier_wait(barrier); 227: 228: for (i = 0; i < 1000; i++) 229: { 230: spinlock->lock(spinlock); 231: (*locked)++; 232: ck_assert_int_eq(*locked, 1); 233: (*locked)--; 234: spinlock->unlock(spinlock); 235: } 236: return NULL; 237: } 238: 239: START_TEST(test_spinlock) 240: { 241: thread_t *threads[THREADS]; 242: int i, locked = 0; 243: 244: barrier = barrier_create(THREADS); 245: spinlock = spinlock_create(); 246: 247: for (i = 0; i < THREADS; i++) 248: { 249: threads[i] = thread_create(spinlock_run, &locked); 250: } 251: for (i = 0; i < THREADS; i++) 252: { 253: threads[i]->join(threads[i]); 254: } 255: 256: spinlock->destroy(spinlock); 257: barrier_destroy(barrier); 258: } 259: END_TEST 260: 261: static void *condvar_run(void *data) 262: { 263: mutex->lock(mutex); 264: sigcount++; 265: condvar->signal(condvar); 266: mutex->unlock(mutex); 267: return NULL; 268: } 269: 270: START_TEST(test_condvar) 271: { 272: thread_t *threads[THREADS]; 273: int i; 274: 275: mutex = mutex_create(MUTEX_TYPE_DEFAULT); 276: condvar = condvar_create(CONDVAR_TYPE_DEFAULT); 277: sigcount = 0; 278: 279: for (i = 0; i < THREADS; i++) 280: { 281: threads[i] = thread_create(condvar_run, NULL); 282: } 283: 284: mutex->lock(mutex); 285: while (sigcount < THREADS) 286: { 287: condvar->wait(condvar, mutex); 288: } 289: mutex->unlock(mutex); 290: 291: for (i = 0; i < THREADS; i++) 292: { 293: threads[i]->join(threads[i]); 294: } 295: 296: mutex->destroy(mutex); 297: condvar->destroy(condvar); 298: } 299: END_TEST 300: 301: static void *condvar_recursive_run(void *data) 302: { 303: mutex->lock(mutex); 304: mutex->lock(mutex); 305: mutex->lock(mutex); 306: sigcount++; 307: condvar->signal(condvar); 308: mutex->unlock(mutex); 309: mutex->unlock(mutex); 310: mutex->unlock(mutex); 311: return NULL; 312: } 313: 314: START_TEST(test_condvar_recursive) 315: { 316: thread_t *threads[THREADS]; 317: int i; 318: 319: mutex = mutex_create(MUTEX_TYPE_RECURSIVE); 320: condvar = condvar_create(CONDVAR_TYPE_DEFAULT); 321: sigcount = 0; 322: 323: mutex->lock(mutex); 324: 325: for (i = 0; i < THREADS; i++) 326: { 327: threads[i] = thread_create(condvar_recursive_run, NULL); 328: } 329: 330: mutex->lock(mutex); 331: mutex->lock(mutex); 332: while (sigcount < THREADS) 333: { 334: condvar->wait(condvar, mutex); 335: } 336: mutex->unlock(mutex); 337: mutex->unlock(mutex); 338: mutex->unlock(mutex); 339: 340: for (i = 0; i < THREADS; i++) 341: { 342: threads[i]->join(threads[i]); 343: } 344: 345: mutex->destroy(mutex); 346: condvar->destroy(condvar); 347: } 348: END_TEST 349: 350: static void *condvar_run_broad(void *data) 351: { 352: mutex->lock(mutex); 353: while (sigcount < 0) 354: { 355: condvar->wait(condvar, mutex); 356: } 357: mutex->unlock(mutex); 358: return NULL; 359: } 360: 361: START_TEST(test_condvar_broad) 362: { 363: thread_t *threads[THREADS]; 364: int i; 365: 366: mutex = mutex_create(MUTEX_TYPE_DEFAULT); 367: condvar = condvar_create(CONDVAR_TYPE_DEFAULT); 368: sigcount = 0; 369: 370: for (i = 0; i < THREADS; i++) 371: { 372: threads[i] = thread_create(condvar_run_broad, NULL); 373: } 374: 375: sched_yield(); 376: 377: mutex->lock(mutex); 378: sigcount = 1; 379: condvar->broadcast(condvar); 380: mutex->unlock(mutex); 381: 382: for (i = 0; i < THREADS; i++) 383: { 384: threads[i]->join(threads[i]); 385: } 386: 387: mutex->destroy(mutex); 388: condvar->destroy(condvar); 389: } 390: END_TEST 391: 392: START_TEST(test_condvar_timed) 393: { 394: thread_t *thread; 395: timeval_t start, end, diff = { .tv_usec = 50000 }; 396: 397: mutex = mutex_create(MUTEX_TYPE_DEFAULT); 398: condvar = condvar_create(CONDVAR_TYPE_DEFAULT); 399: sigcount = 0; 400: 401: mutex->lock(mutex); 402: while (TRUE) 403: { 404: time_monotonic(&start); 405: if (condvar->timed_wait(condvar, mutex, diff.tv_usec / 1000)) 406: { 407: break; 408: } 409: } 410: time_monotonic(&end); 411: mutex->unlock(mutex); 412: timersub(&end, &start, &end); 413: time_is_at_least(&diff, &end); 414: 415: thread = thread_create(condvar_run, NULL); 416: 417: mutex->lock(mutex); 418: while (sigcount == 0) 419: { 420: ck_assert(!condvar->timed_wait(condvar, mutex, 1000)); 421: } 422: mutex->unlock(mutex); 423: 424: thread->join(thread); 425: mutex->destroy(mutex); 426: condvar->destroy(condvar); 427: } 428: END_TEST 429: 430: START_TEST(test_condvar_timed_abs) 431: { 432: thread_t *thread; 433: timeval_t start, end, abso, diff = { .tv_usec = 50000 }; 434: 435: mutex = mutex_create(MUTEX_TYPE_DEFAULT); 436: condvar = condvar_create(CONDVAR_TYPE_DEFAULT); 437: sigcount = 0; 438: 439: mutex->lock(mutex); 440: while (TRUE) 441: { 442: time_monotonic(&start); 443: timeradd(&start, &diff, &abso); 444: if (condvar->timed_wait_abs(condvar, mutex, abso)) 445: { 446: break; 447: } 448: } 449: time_monotonic(&end); 450: mutex->unlock(mutex); 451: time_is_at_least(&diff, &end); 452: 453: thread = thread_create(condvar_run, NULL); 454: 455: time_monotonic(&start); 456: diff.tv_sec = 1; 457: timeradd(&start, &diff, &abso); 458: mutex->lock(mutex); 459: while (sigcount == 0) 460: { 461: ck_assert(!condvar->timed_wait_abs(condvar, mutex, abso)); 462: } 463: mutex->unlock(mutex); 464: 465: thread->join(thread); 466: mutex->destroy(mutex); 467: condvar->destroy(condvar); 468: } 469: END_TEST 470: 471: static void *condvar_cancel_run(void *data) 472: { 473: thread_cancelability(FALSE); 474: 475: mutex->lock(mutex); 476: 477: sigcount++; 478: condvar->broadcast(condvar); 479: 480: thread_cleanup_push((void*)mutex->unlock, mutex); 481: thread_cancelability(TRUE); 482: while (TRUE) 483: { 484: condvar->wait(condvar, mutex); 485: } 486: thread_cleanup_pop(TRUE); 487: 488: return NULL; 489: } 490: 491: START_TEST(test_condvar_cancel) 492: { 493: thread_t *threads[THREADS]; 494: int i; 495: 496: mutex = mutex_create(MUTEX_TYPE_DEFAULT); 497: condvar = condvar_create(CONDVAR_TYPE_DEFAULT); 498: sigcount = 0; 499: 500: for (i = 0; i < THREADS; i++) 501: { 502: threads[i] = thread_create(condvar_cancel_run, NULL); 503: } 504: 505: /* wait for all threads */ 506: mutex->lock(mutex); 507: while (sigcount < THREADS) 508: { 509: condvar->wait(condvar, mutex); 510: } 511: mutex->unlock(mutex); 512: 513: for (i = 0; i < THREADS; i++) 514: { 515: threads[i]->cancel(threads[i]); 516: } 517: for (i = 0; i < THREADS; i++) 518: { 519: threads[i]->join(threads[i]); 520: } 521: 522: mutex->destroy(mutex); 523: condvar->destroy(condvar); 524: } 525: END_TEST 526: 527: /** 528: * RWlock for different tests 529: */ 530: static rwlock_t *rwlock; 531: 532: static void *rwlock_run(refcount_t *refs) 533: { 534: rwlock->read_lock(rwlock); 535: ref_get(refs); 536: sched_yield(); 537: ignore_result(ref_put(refs)); 538: rwlock->unlock(rwlock); 539: 540: if (rwlock->try_write_lock(rwlock)) 541: { 542: ck_assert_int_eq(*refs, 0); 543: sched_yield(); 544: rwlock->unlock(rwlock); 545: } 546: 547: rwlock->write_lock(rwlock); 548: ck_assert_int_eq(*refs, 0); 549: sched_yield(); 550: rwlock->unlock(rwlock); 551: 552: rwlock->read_lock(rwlock); 553: rwlock->read_lock(rwlock); 554: ref_get(refs); 555: sched_yield(); 556: ignore_result(ref_put(refs)); 557: rwlock->unlock(rwlock); 558: rwlock->unlock(rwlock); 559: 560: return NULL; 561: } 562: 563: START_TEST(test_rwlock) 564: { 565: thread_t *threads[THREADS]; 566: refcount_t refs = 0; 567: int i; 568: 569: rwlock = rwlock_create(RWLOCK_TYPE_DEFAULT); 570: 571: for (i = 0; i < THREADS; i++) 572: { 573: threads[i] = thread_create((void*)rwlock_run, &refs); 574: } 575: for (i = 0; i < THREADS; i++) 576: { 577: threads[i]->join(threads[i]); 578: } 579: 580: rwlock->destroy(rwlock); 581: } 582: END_TEST 583: 584: static void *rwlock_try_run(void *param) 585: { 586: if (rwlock->try_write_lock(rwlock)) 587: { 588: rwlock->unlock(rwlock); 589: return param; 590: } 591: return NULL; 592: } 593: 594: START_TEST(test_rwlock_try) 595: { 596: uintptr_t magic = 0xcafebabe; 597: thread_t *thread; 598: 599: rwlock = rwlock_create(RWLOCK_TYPE_DEFAULT); 600: 601: thread = thread_create(rwlock_try_run, (void*)magic); 602: ck_assert_int_eq((uintptr_t)thread->join(thread), magic); 603: 604: rwlock->read_lock(rwlock); 605: thread = thread_create(rwlock_try_run, (void*)magic); 606: ck_assert(thread->join(thread) == NULL); 607: rwlock->unlock(rwlock); 608: 609: rwlock->read_lock(rwlock); 610: rwlock->read_lock(rwlock); 611: rwlock->read_lock(rwlock); 612: thread = thread_create(rwlock_try_run, (void*)magic); 613: ck_assert(thread->join(thread) == NULL); 614: rwlock->unlock(rwlock); 615: rwlock->unlock(rwlock); 616: rwlock->unlock(rwlock); 617: 618: rwlock->write_lock(rwlock); 619: thread = thread_create(rwlock_try_run, (void*)magic); 620: ck_assert(thread->join(thread) == NULL); 621: rwlock->unlock(rwlock); 622: 623: rwlock->destroy(rwlock); 624: } 625: END_TEST 626: 627: /** 628: * Rwlock condvar 629: */ 630: static rwlock_condvar_t *rwcond; 631: 632: static void *rwlock_condvar_run(void *data) 633: { 634: rwlock->write_lock(rwlock); 635: sigcount++; 636: rwcond->signal(rwcond); 637: rwlock->unlock(rwlock); 638: return NULL; 639: } 640: 641: START_TEST(test_rwlock_condvar) 642: { 643: thread_t *threads[THREADS]; 644: int i; 645: 646: rwlock = rwlock_create(RWLOCK_TYPE_DEFAULT); 647: rwcond = rwlock_condvar_create(); 648: sigcount = 0; 649: 650: for (i = 0; i < THREADS; i++) 651: { 652: threads[i] = thread_create(rwlock_condvar_run, NULL); 653: } 654: 655: rwlock->write_lock(rwlock); 656: while (sigcount < THREADS) 657: { 658: rwcond->wait(rwcond, rwlock); 659: } 660: rwlock->unlock(rwlock); 661: 662: for (i = 0; i < THREADS; i++) 663: { 664: threads[i]->join(threads[i]); 665: } 666: 667: rwlock->destroy(rwlock); 668: rwcond->destroy(rwcond); 669: } 670: END_TEST 671: 672: static void *rwlock_condvar_run_broad(void *data) 673: { 674: rwlock->write_lock(rwlock); 675: while (sigcount < 0) 676: { 677: rwcond->wait(rwcond, rwlock); 678: } 679: rwlock->unlock(rwlock); 680: return NULL; 681: } 682: 683: START_TEST(test_rwlock_condvar_broad) 684: { 685: thread_t *threads[THREADS]; 686: int i; 687: 688: rwlock = rwlock_create(RWLOCK_TYPE_DEFAULT); 689: rwcond = rwlock_condvar_create(); 690: sigcount = 0; 691: 692: for (i = 0; i < THREADS; i++) 693: { 694: threads[i] = thread_create(rwlock_condvar_run_broad, NULL); 695: } 696: 697: sched_yield(); 698: 699: rwlock->write_lock(rwlock); 700: sigcount = 1; 701: rwcond->broadcast(rwcond); 702: rwlock->unlock(rwlock); 703: 704: for (i = 0; i < THREADS; i++) 705: { 706: threads[i]->join(threads[i]); 707: } 708: 709: rwlock->destroy(rwlock); 710: rwcond->destroy(rwcond); 711: } 712: END_TEST 713: 714: START_TEST(test_rwlock_condvar_timed) 715: { 716: thread_t *thread; 717: timeval_t start, end, diff = { .tv_usec = 50000 }; 718: 719: rwlock = rwlock_create(RWLOCK_TYPE_DEFAULT); 720: rwcond = rwlock_condvar_create(); 721: sigcount = 0; 722: 723: rwlock->write_lock(rwlock); 724: while (TRUE) 725: { 726: time_monotonic(&start); 727: if (rwcond->timed_wait(rwcond, rwlock, diff.tv_usec / 1000)) 728: { 729: break; 730: } 731: } 732: rwlock->unlock(rwlock); 733: time_monotonic(&end); 734: timersub(&end, &start, &end); 735: time_is_at_least(&diff, &end); 736: 737: thread = thread_create(rwlock_condvar_run, NULL); 738: 739: rwlock->write_lock(rwlock); 740: while (sigcount == 0) 741: { 742: ck_assert(!rwcond->timed_wait(rwcond, rwlock, 1000)); 743: } 744: rwlock->unlock(rwlock); 745: 746: thread->join(thread); 747: rwlock->destroy(rwlock); 748: rwcond->destroy(rwcond); 749: } 750: END_TEST 751: 752: START_TEST(test_rwlock_condvar_timed_abs) 753: { 754: thread_t *thread; 755: timeval_t start, end, abso, diff = { .tv_usec = 50000 }; 756: 757: rwlock = rwlock_create(RWLOCK_TYPE_DEFAULT); 758: rwcond = rwlock_condvar_create(); 759: sigcount = 0; 760: 761: rwlock->write_lock(rwlock); 762: while (TRUE) 763: { 764: time_monotonic(&start); 765: timeradd(&start, &diff, &abso); 766: if (rwcond->timed_wait_abs(rwcond, rwlock, abso)) 767: { 768: break; 769: } 770: } 771: rwlock->unlock(rwlock); 772: time_monotonic(&end); 773: time_is_at_least(&abso, &end); 774: 775: thread = thread_create(rwlock_condvar_run, NULL); 776: 777: time_monotonic(&start); 778: diff.tv_sec = 1; 779: timeradd(&start, &diff, &abso); 780: rwlock->write_lock(rwlock); 781: while (sigcount == 0) 782: { 783: ck_assert(!rwcond->timed_wait_abs(rwcond, rwlock, abso)); 784: } 785: rwlock->unlock(rwlock); 786: 787: thread->join(thread); 788: rwlock->destroy(rwlock); 789: rwcond->destroy(rwcond); 790: } 791: END_TEST 792: 793: static void *rwlock_condvar_cancel_run(void *data) 794: { 795: thread_cancelability(FALSE); 796: 797: rwlock->write_lock(rwlock); 798: 799: sigcount++; 800: rwcond->broadcast(rwcond); 801: 802: thread_cleanup_push((void*)rwlock->unlock, rwlock); 803: thread_cancelability(TRUE); 804: while (TRUE) 805: { 806: rwcond->wait(rwcond, rwlock); 807: } 808: thread_cleanup_pop(TRUE); 809: 810: return NULL; 811: } 812: 813: START_TEST(test_rwlock_condvar_cancel) 814: { 815: thread_t *threads[THREADS]; 816: int i; 817: 818: rwlock = rwlock_create(RWLOCK_TYPE_DEFAULT); 819: rwcond = rwlock_condvar_create(); 820: sigcount = 0; 821: 822: for (i = 0; i < THREADS; i++) 823: { 824: threads[i] = thread_create(rwlock_condvar_cancel_run, NULL); 825: } 826: 827: /* wait for all threads */ 828: rwlock->write_lock(rwlock); 829: while (sigcount < THREADS) 830: { 831: rwcond->wait(rwcond, rwlock); 832: } 833: rwlock->unlock(rwlock); 834: 835: for (i = 0; i < THREADS; i++) 836: { 837: threads[i]->cancel(threads[i]); 838: } 839: for (i = 0; i < THREADS; i++) 840: { 841: threads[i]->join(threads[i]); 842: } 843: 844: rwlock->destroy(rwlock); 845: rwcond->destroy(rwcond); 846: } 847: END_TEST 848: 849: /** 850: * Semaphore for different tests 851: */ 852: static semaphore_t *semaphore; 853: 854: static void *semaphore_run(void *data) 855: { 856: semaphore->post(semaphore); 857: return NULL; 858: } 859: 860: START_TEST(test_semaphore) 861: { 862: thread_t *threads[THREADS]; 863: int i, initial = 5; 864: 865: semaphore = semaphore_create(initial); 866: 867: for (i = 0; i < THREADS; i++) 868: { 869: threads[i] = thread_create(semaphore_run, NULL); 870: } 871: for (i = 0; i < THREADS + initial; i++) 872: { 873: semaphore->wait(semaphore); 874: } 875: for (i = 0; i < THREADS; i++) 876: { 877: threads[i]->join(threads[i]); 878: } 879: 880: semaphore->destroy(semaphore); 881: } 882: END_TEST 883: 884: START_TEST(test_semaphore_timed) 885: { 886: thread_t *thread; 887: timeval_t start, end, diff = { .tv_usec = 50000 }; 888: 889: semaphore = semaphore_create(0); 890: 891: time_monotonic(&start); 892: ck_assert(semaphore->timed_wait(semaphore, diff.tv_usec / 1000)); 893: time_monotonic(&end); 894: timersub(&end, &start, &end); 895: time_is_at_least(&diff, &end); 896: 897: thread = thread_create(semaphore_run, NULL); 898: 899: ck_assert(!semaphore->timed_wait(semaphore, 1000)); 900: 901: thread->join(thread); 902: semaphore->destroy(semaphore); 903: } 904: END_TEST 905: 906: START_TEST(test_semaphore_timed_abs) 907: { 908: thread_t *thread; 909: timeval_t start, end, abso, diff = { .tv_usec = 50000 }; 910: 911: semaphore = semaphore_create(0); 912: 913: time_monotonic(&start); 914: timeradd(&start, &diff, &abso); 915: ck_assert(semaphore->timed_wait_abs(semaphore, abso)); 916: time_monotonic(&end); 917: time_is_at_least(&abso, &end); 918: 919: thread = thread_create(semaphore_run, NULL); 920: 921: time_monotonic(&start); 922: diff.tv_sec = 1; 923: timeradd(&start, &diff, &abso); 924: ck_assert(!semaphore->timed_wait_abs(semaphore, abso)); 925: 926: thread->join(thread); 927: semaphore->destroy(semaphore); 928: } 929: END_TEST 930: 931: static void *semaphore_cancel_run(void *data) 932: { 933: refcount_t *ready = (refcount_t*)data; 934: 935: thread_cancelability(FALSE); 936: ref_get(ready); 937: 938: thread_cancelability(TRUE); 939: semaphore->wait(semaphore); 940: 941: ck_assert(FALSE); 942: return NULL; 943: } 944: 945: START_TEST(test_semaphore_cancel) 946: { 947: thread_t *threads[THREADS]; 948: refcount_t ready = 0; 949: int i; 950: 951: semaphore = semaphore_create(0); 952: 953: for (i = 0; i < THREADS; i++) 954: { 955: threads[i] = thread_create(semaphore_cancel_run, &ready); 956: } 957: while (ready < THREADS) 958: { 959: sched_yield(); 960: } 961: for (i = 0; i < THREADS; i++) 962: { 963: threads[i]->cancel(threads[i]); 964: } 965: for (i = 0; i < THREADS; i++) 966: { 967: threads[i]->join(threads[i]); 968: } 969: 970: semaphore->destroy(semaphore); 971: } 972: END_TEST 973: 974: static void *join_run(void *data) 975: { 976: /* force some context switches */ 977: sched_yield(); 978: return (void*)((uintptr_t)data + THREADS); 979: } 980: 981: START_TEST(test_join) 982: { 983: thread_t *threads[THREADS]; 984: int i; 985: 986: for (i = 0; i < THREADS; i++) 987: { 988: threads[i] = thread_create(join_run, (void*)(uintptr_t)i); 989: } 990: for (i = 0; i < THREADS; i++) 991: { 992: ck_assert_int_eq((uintptr_t)threads[i]->join(threads[i]), i + THREADS); 993: } 994: } 995: END_TEST 996: 997: static void *exit_join_run(void *data) 998: { 999: sched_yield(); 1000: thread_exit((void*)((uintptr_t)data + THREADS)); 1001: /* not reached */ 1002: ck_assert(FALSE); 1003: return NULL; 1004: } 1005: 1006: START_TEST(test_join_exit) 1007: { 1008: thread_t *threads[THREADS]; 1009: int i; 1010: 1011: for (i = 0; i < THREADS; i++) 1012: { 1013: threads[i] = thread_create(exit_join_run, (void*)(uintptr_t)i); 1014: } 1015: for (i = 0; i < THREADS; i++) 1016: { 1017: ck_assert_int_eq((uintptr_t)threads[i]->join(threads[i]), i + THREADS); 1018: } 1019: } 1020: END_TEST 1021: 1022: static void *detach_run(void *data) 1023: { 1024: refcount_t *running = (refcount_t*)data; 1025: 1026: ignore_result(ref_put(running)); 1027: return NULL; 1028: } 1029: 1030: START_TEST(test_detach) 1031: { 1032: thread_t *threads[THREADS]; 1033: int i; 1034: refcount_t running = 0; 1035: 1036: for (i = 0; i < THREADS; i++) 1037: { 1038: ref_get(&running); 1039: threads[i] = thread_create(detach_run, &running); 1040: } 1041: for (i = 0; i < THREADS; i++) 1042: { 1043: threads[i]->detach(threads[i]); 1044: } 1045: while (running > 0) 1046: { 1047: sched_yield(); 1048: } 1049: /* no checks done here, but we check that thread state gets cleaned 1050: * up with leak detective. give the threads time to clean up. */ 1051: usleep(10000); 1052: } 1053: END_TEST 1054: 1055: static void *detach_exit_run(void *data) 1056: { 1057: refcount_t *running = (refcount_t*)data; 1058: 1059: ignore_result(ref_put(running)); 1060: thread_exit(NULL); 1061: /* not reached */ 1062: ck_assert(FALSE); 1063: return NULL; 1064: } 1065: 1066: START_TEST(test_detach_exit) 1067: { 1068: thread_t *threads[THREADS]; 1069: int i; 1070: refcount_t running = 0; 1071: 1072: for (i = 0; i < THREADS; i++) 1073: { 1074: ref_get(&running); 1075: threads[i] = thread_create(detach_exit_run, &running); 1076: } 1077: for (i = 0; i < THREADS; i++) 1078: { 1079: threads[i]->detach(threads[i]); 1080: } 1081: while (running > 0) 1082: { 1083: sched_yield(); 1084: } 1085: /* no checks done here, but we check that thread state gets cleaned 1086: * up with leak detective. give the threads time to clean up. */ 1087: usleep(10000); 1088: } 1089: END_TEST 1090: 1091: static void *cancel_run(void *data) 1092: { 1093: /* default cancelability should be TRUE, so don't change it */ 1094: while (TRUE) 1095: { 1096: sleep(10); 1097: } 1098: return NULL; 1099: } 1100: 1101: START_TEST(test_cancel) 1102: { 1103: thread_t *threads[THREADS]; 1104: int i; 1105: 1106: for (i = 0; i < THREADS; i++) 1107: { 1108: threads[i] = thread_create(cancel_run, NULL); 1109: } 1110: for (i = 0; i < THREADS; i++) 1111: { 1112: threads[i]->cancel(threads[i]); 1113: } 1114: for (i = 0; i < THREADS; i++) 1115: { 1116: threads[i]->join(threads[i]); 1117: } 1118: } 1119: END_TEST 1120: 1.1.1.2 ! misho 1121: typedef struct { ! 1122: semaphore_t *sem; ! 1123: bool cancellable; ! 1124: } cancel_onoff_data_t; ! 1125: ! 1126: static void *cancel_onoff_run(void *data_in) 1.1 misho 1127: { 1.1.1.2 ! misho 1128: cancel_onoff_data_t *data = (cancel_onoff_data_t*)data_in; 1.1 misho 1129: 1130: thread_cancelability(FALSE); 1.1.1.2 ! misho 1131: data->cancellable = FALSE; 1.1 misho 1132: 1133: /* we should not get cancelled here */ 1.1.1.2 ! misho 1134: data->sem->wait(data->sem); 1.1 misho 1135: 1.1.1.2 ! misho 1136: data->cancellable = TRUE; 1.1 misho 1137: thread_cancelability(TRUE); 1138: 1139: /* but here */ 1140: while (TRUE) 1141: { 1142: sleep(10); 1143: } 1144: return NULL; 1145: } 1146: 1147: START_TEST(test_cancel_onoff) 1148: { 1149: thread_t *threads[THREADS]; 1.1.1.2 ! misho 1150: cancel_onoff_data_t data[THREADS]; ! 1151: semaphore_t *sem; 1.1 misho 1152: int i; 1153: 1.1.1.2 ! misho 1154: sem = semaphore_create(0); 1.1 misho 1155: for (i = 0; i < THREADS; i++) 1156: { 1.1.1.2 ! misho 1157: data[i].sem = sem; ! 1158: data[i].cancellable = TRUE; ! 1159: threads[i] = thread_create(cancel_onoff_run, &data[i]); 1.1 misho 1160: /* wait until thread has cleared its cancelability */ 1.1.1.2 ! misho 1161: while (data[i].cancellable) 1.1 misho 1162: { 1163: sched_yield(); 1164: } 1.1.1.2 ! misho 1165: } ! 1166: for (i = 0; i < THREADS; i++) ! 1167: { 1.1 misho 1168: threads[i]->cancel(threads[i]); 1169: } 1.1.1.2 ! misho 1170: /* let all threads continue */ ! 1171: for (i = 0; i < THREADS; i++) ! 1172: { ! 1173: sem->post(sem); ! 1174: } 1.1 misho 1175: for (i = 0; i < THREADS; i++) 1176: { 1177: threads[i]->join(threads[i]); 1.1.1.2 ! misho 1178: ck_assert(data[i].cancellable); 1.1 misho 1179: } 1.1.1.2 ! misho 1180: sem->destroy(sem); 1.1 misho 1181: } 1182: END_TEST 1183: 1184: static void *cancel_point_run(void *data) 1185: { 1186: thread_cancelability(FALSE); 1187: while (TRUE) 1188: { 1189: /* implicitly enables cancelability */ 1190: thread_cancellation_point(); 1191: } 1192: return NULL; 1193: } 1194: 1195: START_TEST(test_cancel_point) 1196: { 1197: thread_t *threads[THREADS]; 1198: int i; 1199: 1200: for (i = 0; i < THREADS; i++) 1201: { 1202: threads[i] = thread_create(cancel_point_run, NULL); 1203: } 1204: sched_yield(); 1205: for (i = 0; i < THREADS; i++) 1206: { 1207: threads[i]->cancel(threads[i]); 1208: } 1209: for (i = 0; i < THREADS; i++) 1210: { 1211: threads[i]->join(threads[i]); 1212: } 1213: } 1214: END_TEST 1215: 1216: static void close_fd_ptr(void *fd) 1217: { 1218: close(*(int*)fd); 1219: } 1220: 1221: static void cancellation_recv() 1222: { 1223: int sv[2]; 1224: char buf[1]; 1225: 1226: ck_assert(socketpair(AF_UNIX, SOCK_STREAM, 0, sv) == 0); 1227: 1228: thread_cleanup_push(close_fd_ptr, &sv[0]); 1229: thread_cleanup_push(close_fd_ptr, &sv[1]); 1230: 1231: thread_cancelability(TRUE); 1232: while (TRUE) 1233: { 1234: ck_assert(recv(sv[0], buf, sizeof(buf), 0) == 1); 1235: } 1236: } 1237: 1238: static void cancellation_read() 1239: { 1240: int sv[2]; 1241: char buf[1]; 1242: 1243: ck_assert(socketpair(AF_UNIX, SOCK_STREAM, 0, sv) == 0); 1244: 1245: thread_cleanup_push(close_fd_ptr, &sv[0]); 1246: thread_cleanup_push(close_fd_ptr, &sv[1]); 1247: 1248: thread_cancelability(TRUE); 1249: while (TRUE) 1250: { 1251: ck_assert(read(sv[0], buf, sizeof(buf)) == 1); 1252: } 1253: } 1254: 1255: static void cancellation_select() 1256: { 1257: int sv[2]; 1258: fd_set set; 1259: 1260: ck_assert(socketpair(AF_UNIX, SOCK_STREAM, 0, sv) == 0); 1261: 1262: thread_cleanup_push(close_fd_ptr, &sv[0]); 1263: thread_cleanup_push(close_fd_ptr, &sv[1]); 1264: 1265: FD_ZERO(&set); 1266: FD_SET(sv[0], &set); 1267: thread_cancelability(TRUE); 1268: while (TRUE) 1269: { 1270: ck_assert(select(sv[0] + 1, &set, NULL, NULL, NULL) == 1); 1271: } 1272: } 1273: 1274: static void cancellation_poll() 1275: { 1276: int sv[2]; 1277: struct pollfd pfd; 1278: 1279: ck_assert(socketpair(AF_UNIX, SOCK_STREAM, 0, sv) == 0); 1280: 1281: thread_cleanup_push(close_fd_ptr, &sv[0]); 1282: thread_cleanup_push(close_fd_ptr, &sv[1]); 1283: 1284: pfd.fd = sv[0]; 1285: pfd.events = POLLIN; 1286: thread_cancelability(TRUE); 1287: while (TRUE) 1288: { 1289: ck_assert(poll(&pfd, 1, -1) == 1); 1290: } 1291: } 1292: 1293: static void cancellation_accept() 1294: { 1295: host_t *host; 1296: int fd, c; 1297: 1298: fd = socket(AF_INET, SOCK_STREAM, 0); 1299: ck_assert(fd >= 0); 1300: host = host_create_from_string("127.0.0.1", 0); 1301: ck_assert_msg(bind(fd, host->get_sockaddr(host), 1302: *host->get_sockaddr_len(host)) == 0, "%m"); 1303: host->destroy(host); 1304: ck_assert(listen(fd, 5) == 0); 1305: 1306: thread_cleanup_push(close_fd_ptr, &fd); 1307: 1308: thread_cancelability(TRUE); 1309: while (TRUE) 1310: { 1311: c = accept(fd, NULL, NULL); 1312: ck_assert(c >= 0); 1313: close(c); 1314: } 1315: } 1316: 1317: static void cancellation_cond() 1318: { 1319: mutex_t *mutex; 1320: condvar_t *cond; 1321: 1322: mutex = mutex_create(MUTEX_TYPE_DEFAULT); 1323: cond = condvar_create(CONDVAR_TYPE_DEFAULT); 1324: mutex->lock(mutex); 1325: 1326: thread_cleanup_push((void*)mutex->destroy, mutex); 1327: thread_cleanup_push((void*)cond->destroy, cond); 1328: 1329: thread_cancelability(TRUE); 1330: while (TRUE) 1331: { 1332: cond->wait(cond, mutex); 1333: } 1334: } 1335: 1336: static void cancellation_rwcond() 1337: { 1338: rwlock_t *lock; 1339: rwlock_condvar_t *cond; 1340: 1341: lock = rwlock_create(RWLOCK_TYPE_DEFAULT); 1342: cond = rwlock_condvar_create(); 1343: lock->write_lock(lock); 1344: 1345: thread_cleanup_push((void*)lock->destroy, lock); 1346: thread_cleanup_push((void*)cond->destroy, cond); 1347: 1348: thread_cancelability(TRUE); 1349: while (TRUE) 1350: { 1351: cond->wait(cond, lock); 1352: } 1353: } 1354: 1355: static void (*cancellation_points[])() = { 1356: cancellation_read, 1357: cancellation_recv, 1358: cancellation_select, 1359: cancellation_poll, 1360: cancellation_accept, 1361: cancellation_cond, 1362: cancellation_rwcond, 1363: }; 1364: 1365: static void* run_cancellation_point(void (*fn)()) 1366: { 1367: fn(); 1368: return NULL; 1369: } 1370: 1371: static void* run_cancellation_point_pre(void (*fn)()) 1372: { 1373: usleep(5000); 1374: fn(); 1375: return NULL; 1376: } 1377: 1378: START_TEST(test_cancellation_point) 1379: { 1380: thread_t *thread; 1381: 1382: thread = thread_create((void*)run_cancellation_point, 1383: cancellation_points[_i]); 1384: usleep(5000); 1385: thread->cancel(thread); 1386: thread->join(thread); 1387: } 1388: END_TEST 1389: 1390: START_TEST(test_cancellation_point_pre) 1391: { 1392: thread_t *thread; 1393: 1394: thread = thread_create((void*)run_cancellation_point_pre, 1395: cancellation_points[_i]); 1396: thread->cancel(thread); 1397: thread->join(thread); 1398: } 1399: END_TEST 1400: 1401: static void cleanup1(void *data) 1402: { 1403: uintptr_t *value = (uintptr_t*)data; 1404: 1405: ck_assert_int_eq(*value, 1); 1406: (*value)++; 1407: } 1408: 1409: static void cleanup2(void *data) 1410: { 1411: uintptr_t *value = (uintptr_t*)data; 1412: 1413: ck_assert_int_eq(*value, 2); 1414: (*value)++; 1415: } 1416: 1417: static void cleanup3(void *data) 1418: { 1419: uintptr_t *value = (uintptr_t*)data; 1420: 1421: ck_assert_int_eq(*value, 3); 1422: (*value)++; 1423: } 1424: 1425: static void *cleanup_run(void *data) 1426: { 1427: thread_cleanup_push(cleanup3, data); 1428: thread_cleanup_push(cleanup2, data); 1429: thread_cleanup_push(cleanup1, data); 1430: return NULL; 1431: } 1432: 1433: START_TEST(test_cleanup) 1434: { 1435: thread_t *threads[THREADS]; 1436: uintptr_t values[THREADS]; 1437: int i; 1438: 1439: for (i = 0; i < THREADS; i++) 1440: { 1441: values[i] = 1; 1442: threads[i] = thread_create(cleanup_run, &values[i]); 1443: } 1444: for (i = 0; i < THREADS; i++) 1445: { 1446: threads[i]->join(threads[i]); 1447: ck_assert_int_eq(values[i], 4); 1448: } 1449: } 1450: END_TEST 1451: 1452: static void *cleanup_exit_run(void *data) 1453: { 1454: thread_cleanup_push(cleanup3, data); 1455: thread_cleanup_push(cleanup2, data); 1456: thread_cleanup_push(cleanup1, data); 1457: thread_exit(NULL); 1458: ck_assert(FALSE); 1459: return NULL; 1460: } 1461: 1462: START_TEST(test_cleanup_exit) 1463: { 1464: thread_t *threads[THREADS]; 1465: uintptr_t values[THREADS]; 1466: int i; 1467: 1468: for (i = 0; i < THREADS; i++) 1469: { 1470: values[i] = 1; 1471: threads[i] = thread_create(cleanup_exit_run, &values[i]); 1472: } 1473: for (i = 0; i < THREADS; i++) 1474: { 1475: threads[i]->join(threads[i]); 1476: ck_assert_int_eq(values[i], 4); 1477: } 1478: } 1479: END_TEST 1480: 1481: static void *cleanup_cancel_run(void *data) 1482: { 1483: thread_cancelability(FALSE); 1484: 1485: barrier_wait(barrier); 1486: 1487: thread_cleanup_push(cleanup3, data); 1488: thread_cleanup_push(cleanup2, data); 1489: thread_cleanup_push(cleanup1, data); 1490: 1491: thread_cancelability(TRUE); 1492: 1493: while (TRUE) 1494: { 1495: sleep(1); 1496: } 1497: return NULL; 1498: } 1499: 1500: START_TEST(test_cleanup_cancel) 1501: { 1502: thread_t *threads[THREADS]; 1503: uintptr_t values[THREADS]; 1504: int i; 1505: 1506: barrier = barrier_create(THREADS+1); 1507: for (i = 0; i < THREADS; i++) 1508: { 1509: values[i] = 1; 1510: threads[i] = thread_create(cleanup_cancel_run, &values[i]); 1511: } 1512: barrier_wait(barrier); 1513: for (i = 0; i < THREADS; i++) 1514: { 1515: threads[i]->cancel(threads[i]); 1516: } 1517: for (i = 0; i < THREADS; i++) 1518: { 1519: threads[i]->join(threads[i]); 1520: ck_assert_int_eq(values[i], 4); 1521: } 1522: barrier_destroy(barrier); 1523: } 1524: END_TEST 1525: 1526: static void *cleanup_pop_run(void *data) 1527: { 1528: thread_cleanup_push(cleanup3, data); 1529: thread_cleanup_push(cleanup2, data); 1530: thread_cleanup_push(cleanup1, data); 1531: 1532: thread_cleanup_push(cleanup2, data); 1533: thread_cleanup_pop(FALSE); 1534: 1535: thread_cleanup_pop(TRUE); 1536: return NULL; 1537: } 1538: 1539: START_TEST(test_cleanup_pop) 1540: { 1541: thread_t *threads[THREADS]; 1542: uintptr_t values[THREADS]; 1543: int i; 1544: 1545: for (i = 0; i < THREADS; i++) 1546: { 1547: values[i] = 1; 1548: threads[i] = thread_create(cleanup_pop_run, &values[i]); 1549: } 1550: for (i = 0; i < THREADS; i++) 1551: { 1552: threads[i]->join(threads[i]); 1553: ck_assert_int_eq(values[i], 4); 1554: } 1555: } 1556: END_TEST 1557: 1558: static void *cleanup_popall_run(void *data) 1559: { 1560: thread_cleanup_push(cleanup3, data); 1561: thread_cleanup_push(cleanup2, data); 1562: thread_cleanup_push(cleanup1, data); 1563: 1564: thread_cleanup_popall(); 1565: return NULL; 1566: } 1567: 1568: START_TEST(test_cleanup_popall) 1569: { 1570: thread_t *threads[THREADS]; 1571: uintptr_t values[THREADS]; 1572: int i; 1573: 1574: for (i = 0; i < THREADS; i++) 1575: { 1576: values[i] = 1; 1577: threads[i] = thread_create(cleanup_popall_run, &values[i]); 1578: } 1579: for (i = 0; i < THREADS; i++) 1580: { 1581: threads[i]->join(threads[i]); 1582: ck_assert_int_eq(values[i], 4); 1583: } 1584: } 1585: END_TEST 1586: 1587: 1588: static thread_value_t *tls[10]; 1589: 1590: static void *tls_run(void *data) 1591: { 1592: uintptr_t value = (uintptr_t)data; 1593: int i, j; 1594: 1595: for (i = 0; i < countof(tls); i++) 1596: { 1597: ck_assert(tls[i]->get(tls[i]) == NULL); 1598: } 1599: for (i = 0; i < countof(tls); i++) 1600: { 1601: tls[i]->set(tls[i], (void*)(value * i)); 1602: } 1603: for (j = 0; j < 1000; j++) 1604: { 1605: for (i = 0; i < countof(tls); i++) 1606: { 1607: tls[i]->set(tls[i], (void*)(value * i)); 1608: ck_assert(tls[i]->get(tls[i]) == (void*)(value * i)); 1609: } 1610: sched_yield(); 1611: } 1612: for (i = 0; i < countof(tls); i++) 1613: { 1614: ck_assert(tls[i]->get(tls[i]) == (void*)(value * i)); 1615: } 1616: return (void*)(value + 1); 1617: } 1618: 1619: START_TEST(test_tls) 1620: { 1621: thread_t *threads[THREADS]; 1622: int i; 1623: 1624: for (i = 0; i < countof(tls); i++) 1625: { 1626: tls[i] = thread_value_create(NULL); 1627: } 1628: for (i = 0; i < THREADS; i++) 1629: { 1630: threads[i] = thread_create(tls_run, (void*)(uintptr_t)i); 1631: } 1632: 1633: ck_assert_int_eq((uintptr_t)tls_run((void*)(uintptr_t)(THREADS + 1)), 1634: THREADS + 2); 1635: 1636: for (i = 0; i < THREADS; i++) 1637: { 1638: ck_assert_int_eq((uintptr_t)threads[i]->join(threads[i]), i + 1); 1639: } 1640: for (i = 0; i < countof(tls); i++) 1641: { 1642: tls[i]->destroy(tls[i]); 1643: } 1644: } 1645: END_TEST 1646: 1647: static void tls_cleanup(void *data) 1648: { 1649: uintptr_t *value = (uintptr_t*)data; 1650: 1651: (*value)--; 1652: } 1653: 1654: static void *tls_cleanup_run(void *data) 1655: { 1656: int i; 1657: 1658: for (i = 0; i < countof(tls); i++) 1659: { 1660: tls[i]->set(tls[i], data); 1661: } 1662: return NULL; 1663: } 1664: 1665: START_TEST(test_tls_cleanup) 1666: { 1667: thread_t *threads[THREADS]; 1668: uintptr_t values[THREADS], main_value = countof(tls); 1669: int i; 1670: 1671: for (i = 0; i < countof(tls); i++) 1672: { 1673: tls[i] = thread_value_create(tls_cleanup); 1674: } 1675: for (i = 0; i < THREADS; i++) 1676: { 1677: values[i] = countof(tls); 1678: threads[i] = thread_create(tls_cleanup_run, &values[i]); 1679: } 1680: 1681: tls_cleanup_run(&main_value); 1682: 1683: for (i = 0; i < THREADS; i++) 1684: { 1685: threads[i]->join(threads[i]); 1686: ck_assert_int_eq(values[i], 0); 1687: } 1688: for (i = 0; i < countof(tls); i++) 1689: { 1690: tls[i]->destroy(tls[i]); 1691: } 1692: ck_assert_int_eq(main_value, 0); 1693: } 1694: END_TEST 1695: 1696: Suite *threading_suite_create() 1697: { 1698: Suite *s; 1699: TCase *tc; 1700: 1701: s = suite_create("threading"); 1702: 1703: tc = tcase_create("recursive mutex"); 1704: tcase_add_test(tc, test_mutex); 1705: suite_add_tcase(s, tc); 1706: 1707: tc = tcase_create("spinlock"); 1708: tcase_add_test(tc, test_spinlock); 1709: suite_add_tcase(s, tc); 1710: 1711: tc = tcase_create("condvar"); 1712: tcase_add_test(tc, test_condvar); 1713: tcase_add_test(tc, test_condvar_recursive); 1714: tcase_add_test(tc, test_condvar_broad); 1715: tcase_add_test(tc, test_condvar_timed); 1716: tcase_add_test(tc, test_condvar_timed_abs); 1717: tcase_add_test(tc, test_condvar_cancel); 1718: suite_add_tcase(s, tc); 1719: 1720: tc = tcase_create("rwlock"); 1721: tcase_add_test(tc, test_rwlock); 1722: tcase_add_test(tc, test_rwlock_try); 1723: suite_add_tcase(s, tc); 1724: 1725: tc = tcase_create("rwlock condvar"); 1726: tcase_add_test(tc, test_rwlock_condvar); 1727: tcase_add_test(tc, test_rwlock_condvar_broad); 1728: tcase_add_test(tc, test_rwlock_condvar_timed); 1729: tcase_add_test(tc, test_rwlock_condvar_timed_abs); 1730: tcase_add_test(tc, test_rwlock_condvar_cancel); 1731: suite_add_tcase(s, tc); 1732: 1733: tc = tcase_create("semaphore"); 1734: tcase_add_test(tc, test_semaphore); 1735: tcase_add_test(tc, test_semaphore_timed); 1736: tcase_add_test(tc, test_semaphore_timed_abs); 1737: tcase_add_test(tc, test_semaphore_cancel); 1738: suite_add_tcase(s, tc); 1739: 1740: tc = tcase_create("thread joining"); 1741: tcase_add_test(tc, test_join); 1742: tcase_add_test(tc, test_join_exit); 1743: suite_add_tcase(s, tc); 1744: 1745: tc = tcase_create("thread detaching"); 1746: tcase_add_test(tc, test_detach); 1747: tcase_add_test(tc, test_detach_exit); 1748: suite_add_tcase(s, tc); 1749: 1750: tc = tcase_create("thread cancellation"); 1751: tcase_add_test(tc, test_cancel); 1752: tcase_add_test(tc, test_cancel_onoff); 1753: tcase_add_test(tc, test_cancel_point); 1754: suite_add_tcase(s, tc); 1755: 1756: tc = tcase_create("thread cancellation point"); 1757: tcase_add_loop_test(tc, test_cancellation_point, 1758: 0, countof(cancellation_points)); 1759: tcase_add_loop_test(tc, test_cancellation_point_pre, 1760: 0, countof(cancellation_points)); 1761: suite_add_tcase(s, tc); 1762: 1763: tc = tcase_create("thread cleanup"); 1764: tcase_add_test(tc, test_cleanup); 1765: tcase_add_test(tc, test_cleanup_exit); 1766: tcase_add_test(tc, test_cleanup_cancel); 1767: tcase_add_test(tc, test_cleanup_pop); 1768: tcase_add_test(tc, test_cleanup_popall); 1769: suite_add_tcase(s, tc); 1770: 1771: tc = tcase_create("thread local storage"); 1772: tcase_add_test(tc, test_tls); 1773: tcase_add_test(tc, test_tls_cleanup); 1774: suite_add_tcase(s, tc); 1775: 1776: return s; 1777: }