|
|
| version 1.14, 2012/08/23 02:33:12 | version 1.15, 2012/09/10 15:07:53 |
|---|---|
| Line 118 _sched_threadCleanup(sched_task_t *t) | Line 118 _sched_threadCleanup(sched_task_t *t) |
| if (TASK_FLAG(t) == PTHREAD_CREATE_JOINABLE) | if (TASK_FLAG(t) == PTHREAD_CREATE_JOINABLE) |
| pthread_detach(pthread_self()); | pthread_detach(pthread_self()); |
| pthread_mutex_lock(&TASK_ROOT(t)->root_mtx[taskTHREAD]); | |
| TAILQ_REMOVE(&TASK_ROOT(t)->root_thread, t, task_node); | |
| pthread_mutex_unlock(&TASK_ROOT(t)->root_mtx[taskTHREAD]); | |
| sched_unuseTask(t); | sched_unuseTask(t); |
| } | } |
| void * | void * |
| _sched_threadWrapper(sched_task_t *t) | _sched_threadWrapper(sched_task_t *t) |
| { | { |
| void *ret = NULL; | void *ret = NULL; |
| sem_t *s = NULL; | |
| if (!t || !TASK_ROOT(t)) | if (!t || !TASK_ROOT(t) || !TASK_RET(t)) |
| pthread_exit(ret); | pthread_exit(ret); |
| else | |
| s = (sem_t*) TASK_RET(t); | |
| pthread_cleanup_push((void (*)(void*)) _sched_threadCleanup, t); | pthread_cleanup_push((void (*)(void*)) _sched_threadCleanup, t); |
| pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, NULL); | pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, NULL); |
| pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, NULL); | pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, NULL); |
| /* notify parent, thread is ready for execution */ | |
| sem_post(s); | |
| pthread_testcancel(); | pthread_testcancel(); |
| ret = TASK_FUNC(t)(t); | ret = TASK_FUNC(t)(t); |
| Line 1206 schedCallOnce(sched_root_task_t * __restrict root, sch | Line 1216 schedCallOnce(sched_root_task_t * __restrict root, sch |
| * @func = task execution function | * @func = task execution function |
| * @arg = 1st func argument | * @arg = 1st func argument |
| * @detach = Detach thread from scheduler, if !=0 | * @detach = Detach thread from scheduler, if !=0 |
| * @ss = stack size | |
| * @opt_data = Optional data | * @opt_data = Optional data |
| * @opt_dlen = Optional data length | * @opt_dlen = Optional data length |
| * return: NULL error or !=NULL new queued task | * return: NULL error or !=NULL new queued task |
| */ | */ |
| sched_task_t * | sched_task_t * |
| schedThread(sched_root_task_t * __restrict root, sched_task_func_t func, void *arg, int detach, | schedThread(sched_root_task_t * __restrict root, sched_task_func_t func, void *arg, int detach, |
| void *opt_data, size_t opt_dlen) | size_t ss, void *opt_data, size_t opt_dlen) |
| { | { |
| #ifndef HAVE_LIBPTHREAD | #ifndef HAVE_LIBPTHREAD |
| sched_SetErr(ENOTSUP, "Not supported thread tasks"); | sched_SetErr(ENOTSUP, "Not supported thread tasks"); |
| Line 1221 schedThread(sched_root_task_t * __restrict root, sched | Line 1232 schedThread(sched_root_task_t * __restrict root, sched |
| sched_task_t *task; | sched_task_t *task; |
| void *ptr; | void *ptr; |
| pthread_attr_t attr; | pthread_attr_t attr; |
| sem_t *s = NULL; | |
| if (!root || !func) | if (!root || !func) |
| return NULL; | return NULL; |
| else { | |
| /* normalizing stack size & detach state */ | |
| if (ss) | |
| ss &= 0x7FFFFFFF; | |
| detach = detach ? PTHREAD_CREATE_DETACHED : PTHREAD_CREATE_JOINABLE; | |
| } | |
| if (!(s = (sem_t*) malloc(sizeof(sem_t)))) { | |
| LOGERR; | |
| return NULL; | |
| } | |
| if (sem_init(s, 0, 1)) { | |
| LOGERR; | |
| free(s); | |
| return NULL; | |
| } | |
| /* get new task */ | /* get new task */ |
| if (!(task = sched_useTask(root))) | if (!(task = sched_useTask(root))) { |
| sem_destroy(s); | |
| free(s); | |
| return NULL; | return NULL; |
| } | |
| task->task_func = func; | task->task_func = func; |
| TASK_TYPE(task) = taskTHREAD; | TASK_TYPE(task) = taskTHREAD; |
| TASK_ROOT(task) = root; | TASK_ROOT(task) = root; |
| TASK_ARG(task) = arg; | TASK_ARG(task) = arg; |
| TASK_FLAG(task) = detach ? PTHREAD_CREATE_DETACHED : PTHREAD_CREATE_JOINABLE; | TASK_FLAG(task) = detach; |
| TASK_RET(task) = (intptr_t) s; | |
| TASK_DATA(task) = opt_data; | TASK_DATA(task) = opt_data; |
| TASK_DATLEN(task) = opt_dlen; | TASK_DATLEN(task) = opt_dlen; |
| pthread_attr_init(&attr); | pthread_attr_init(&attr); |
| pthread_attr_setdetachstate(&attr, TASK_FLAG(task)); | pthread_attr_setdetachstate(&attr, detach); |
| if (ss && (errno = pthread_attr_setstacksize(&attr, ss))) { | |
| LOGERR; | |
| pthread_attr_destroy(&attr); | |
| sem_destroy(s); | |
| free(s); | |
| return sched_unuseTask(task); | |
| } | |
| if ((errno = pthread_attr_getstacksize(&attr, &ss))) { | |
| LOGERR; | |
| pthread_attr_destroy(&attr); | |
| sem_destroy(s); | |
| free(s); | |
| return sched_unuseTask(task); | |
| } else | |
| TASK_FLAG(task) |= (ss << 1); | |
| if ((errno = pthread_attr_setguardsize(&attr, ss))) { | |
| LOGERR; | |
| pthread_attr_destroy(&attr); | |
| sem_destroy(s); | |
| free(s); | |
| return sched_unuseTask(task); | |
| } | |
| #ifdef SCHED_RR | |
| pthread_attr_setschedpolicy(&attr, SCHED_RR); | |
| #else | |
| pthread_attr_setschedpolicy(&attr, SCHED_OTHER); | |
| #endif | |
| if (root->root_hooks.hook_add.thread) | if (root->root_hooks.hook_add.thread) |
| ptr = root->root_hooks.hook_add.thread(task, &attr); | ptr = root->root_hooks.hook_add.thread(task, &attr); |
| else | else |
| Line 1251 schedThread(sched_root_task_t * __restrict root, sched | Line 1311 schedThread(sched_root_task_t * __restrict root, sched |
| pthread_mutex_lock(&root->root_mtx[taskTHREAD]); | pthread_mutex_lock(&root->root_mtx[taskTHREAD]); |
| TAILQ_INSERT_TAIL(&root->root_thread, TASK_ID(task), task_node); | TAILQ_INSERT_TAIL(&root->root_thread, TASK_ID(task), task_node); |
| pthread_mutex_unlock(&root->root_mtx[taskTHREAD]); | pthread_mutex_unlock(&root->root_mtx[taskTHREAD]); |
| /* wait for init thread actions */ | |
| sem_wait(s); | |
| } else | } else |
| task = sched_unuseTask(task); | task = sched_unuseTask(task); |
| sem_destroy(s); | |
| free(s); | |
| return task; | return task; |
| } | } |