version 1.4, 2012/01/08 00:51:17
|
version 1.4.2.1, 2012/01/08 03:50:11
|
Line 55 _sched_useTask(sched_root_task_t * __restrict root)
|
Line 55 _sched_useTask(sched_root_task_t * __restrict root)
|
|
|
TAILQ_FOREACH(task, &root->root_unuse, task_node) { |
TAILQ_FOREACH(task, &root->root_unuse, task_node) { |
if (!TASK_ISLOCKED(task)) { |
if (!TASK_ISLOCKED(task)) { |
|
#ifdef HAVE_LIBPTHREAD |
|
pthread_mutex_lock(&root->root_mtx[taskUNUSE]); |
|
#endif |
TAILQ_REMOVE(&root->root_unuse, task, task_node); |
TAILQ_REMOVE(&root->root_unuse, task, task_node); |
|
#ifdef HAVE_LIBPTHREAD |
|
pthread_mutex_unlock(&root->root_mtx[taskUNUSE]); |
|
#endif |
break; |
break; |
} |
} |
} |
} |
Line 75 inline sched_task_t *
|
Line 81 inline sched_task_t *
|
_sched_unuseTask(sched_task_t * __restrict task) |
_sched_unuseTask(sched_task_t * __restrict task) |
{ |
{ |
TASK_UNLOCK(task); |
TASK_UNLOCK(task); |
task->task_type = taskUNUSE; | TASK_TYPE(task) = taskUNUSE; |
TAILQ_INSERT_TAIL(&task->task_root->root_unuse, task, task_node); | #ifdef HAVE_LIBPTHREAD |
| pthread_mutex_lock(&TASK_ROOT(task)->root_mtx[taskUNUSE]); |
| #endif |
| TAILQ_INSERT_TAIL(&TASK_ROOT(task)->root_unuse, task, task_node); |
| #ifdef HAVE_LIBPTHREAD |
| pthread_mutex_unlock(&TASK_ROOT(task)->root_mtx[taskUNUSE]); |
| #endif |
task = NULL; |
task = NULL; |
|
|
return task; |
return task; |
Line 109 schedRead(sched_root_task_t * __restrict root, sched_t
|
Line 121 schedRead(sched_root_task_t * __restrict root, sched_t
|
memset(task, 0, sizeof(sched_task_t)); |
memset(task, 0, sizeof(sched_task_t)); |
task->task_id = 0; |
task->task_id = 0; |
task->task_lock = 0; |
task->task_lock = 0; |
task->task_type = taskREAD; |
|
task->task_root = root; |
|
task->task_func = func; |
task->task_func = func; |
|
TASK_TYPE(task) = taskREAD; |
|
TASK_ROOT(task) = root; |
|
|
TASK_ARG(task) = arg; |
TASK_ARG(task) = arg; |
TASK_FD(task) = fd; |
TASK_FD(task) = fd; |
Line 121 schedRead(sched_root_task_t * __restrict root, sched_t
|
Line 133 schedRead(sched_root_task_t * __restrict root, sched_t
|
else |
else |
ptr = NULL; |
ptr = NULL; |
|
|
if (!ptr) | if (!ptr) { |
| #ifdef HAVE_LIBPTHREAD |
| pthread_mutex_lock(&root->root_mtx[taskREAD]); |
| #endif |
TAILQ_INSERT_TAIL(&root->root_read, task, task_node); |
TAILQ_INSERT_TAIL(&root->root_read, task, task_node); |
else | #ifdef HAVE_LIBPTHREAD |
| pthread_mutex_unlock(&root->root_mtx[taskREAD]); |
| #endif |
| } else |
task = _sched_unuseTask(task); |
task = _sched_unuseTask(task); |
|
|
return task; |
return task; |
Line 153 schedWrite(sched_root_task_t * __restrict root, sched_
|
Line 171 schedWrite(sched_root_task_t * __restrict root, sched_
|
memset(task, 0, sizeof(sched_task_t)); |
memset(task, 0, sizeof(sched_task_t)); |
task->task_id = 0; |
task->task_id = 0; |
task->task_lock = 0; |
task->task_lock = 0; |
task->task_type = taskWRITE; |
|
task->task_root = root; |
|
task->task_func = func; |
task->task_func = func; |
|
TASK_TYPE(task) = taskWRITE; |
|
TASK_ROOT(task) = root; |
|
|
TASK_ARG(task) = arg; |
TASK_ARG(task) = arg; |
TASK_FD(task) = fd; |
TASK_FD(task) = fd; |
Line 165 schedWrite(sched_root_task_t * __restrict root, sched_
|
Line 183 schedWrite(sched_root_task_t * __restrict root, sched_
|
else |
else |
ptr = NULL; |
ptr = NULL; |
|
|
if (!ptr) | if (!ptr) { |
| #ifdef HAVE_LIBPTHREAD |
| pthread_mutex_lock(&root->root_mtx[taskWRITE]); |
| #endif |
TAILQ_INSERT_TAIL(&root->root_write, task, task_node); |
TAILQ_INSERT_TAIL(&root->root_write, task, task_node); |
else | #ifdef HAVE_LIBPTHREAD |
| pthread_mutex_unlock(&root->root_mtx[taskWRITE]); |
| #endif |
| } else |
task = _sched_unuseTask(task); |
task = _sched_unuseTask(task); |
|
|
return task; |
return task; |
Line 199 schedTimer(sched_root_task_t * __restrict root, sched_
|
Line 223 schedTimer(sched_root_task_t * __restrict root, sched_
|
memset(task, 0, sizeof(sched_task_t)); |
memset(task, 0, sizeof(sched_task_t)); |
task->task_id = 0; |
task->task_id = 0; |
task->task_lock = 0; |
task->task_lock = 0; |
task->task_type = taskTIMER; |
|
task->task_root = root; |
|
task->task_func = func; |
task->task_func = func; |
|
TASK_TYPE(task) = taskTIMER; |
|
TASK_ROOT(task) = root; |
|
|
TASK_ARG(task) = arg; |
TASK_ARG(task) = arg; |
|
|
Line 224 schedTimer(sched_root_task_t * __restrict root, sched_
|
Line 248 schedTimer(sched_root_task_t * __restrict root, sched_
|
ptr = NULL; |
ptr = NULL; |
|
|
if (!ptr) { |
if (!ptr) { |
|
#ifdef HAVE_LIBPTHREAD |
|
pthread_mutex_lock(&root->root_mtx[taskTIMER]); |
|
#endif |
#ifdef TIMER_WITHOUT_SORT |
#ifdef TIMER_WITHOUT_SORT |
TAILQ_INSERT_TAIL(&root->root_timer, task, task_node); |
TAILQ_INSERT_TAIL(&root->root_timer, task, task_node); |
#else |
#else |
Line 235 schedTimer(sched_root_task_t * __restrict root, sched_
|
Line 262 schedTimer(sched_root_task_t * __restrict root, sched_
|
else |
else |
TAILQ_INSERT_BEFORE(t, task, task_node); |
TAILQ_INSERT_BEFORE(t, task, task_node); |
#endif |
#endif |
|
#ifdef HAVE_LIBPTHREAD |
|
pthread_mutex_unlock(&root->root_mtx[taskTIMER]); |
|
#endif |
} else |
} else |
task = _sched_unuseTask(task); |
task = _sched_unuseTask(task); |
|
|
Line 265 schedEvent(sched_root_task_t * __restrict root, sched_
|
Line 295 schedEvent(sched_root_task_t * __restrict root, sched_
|
memset(task, 0, sizeof(sched_task_t)); |
memset(task, 0, sizeof(sched_task_t)); |
task->task_id = 0; |
task->task_id = 0; |
task->task_lock = 0; |
task->task_lock = 0; |
task->task_type = taskEVENT; |
|
task->task_root = root; |
|
task->task_func = func; |
task->task_func = func; |
|
TASK_TYPE(task) = taskEVENT; |
|
TASK_ROOT(task) = root; |
|
|
TASK_ARG(task) = arg; |
TASK_ARG(task) = arg; |
TASK_VAL(task) = val; |
TASK_VAL(task) = val; |
Line 277 schedEvent(sched_root_task_t * __restrict root, sched_
|
Line 307 schedEvent(sched_root_task_t * __restrict root, sched_
|
else |
else |
ptr = NULL; |
ptr = NULL; |
|
|
if (!ptr) | if (!ptr) { |
| #ifdef HAVE_LIBPTHREAD |
| pthread_mutex_lock(&root->root_mtx[taskEVENT]); |
| #endif |
TAILQ_INSERT_TAIL(&root->root_event, task, task_node); |
TAILQ_INSERT_TAIL(&root->root_event, task, task_node); |
else | #ifdef HAVE_LIBPTHREAD |
| pthread_mutex_unlock(&root->root_mtx[taskEVENT]); |
| #endif |
| } else |
task = _sched_unuseTask(task); |
task = _sched_unuseTask(task); |
|
|
return task; |
return task; |
Line 310 schedEventLo(sched_root_task_t * __restrict root, sche
|
Line 346 schedEventLo(sched_root_task_t * __restrict root, sche
|
memset(task, 0, sizeof(sched_task_t)); |
memset(task, 0, sizeof(sched_task_t)); |
task->task_id = 0; |
task->task_id = 0; |
task->task_lock = 0; |
task->task_lock = 0; |
task->task_type = taskEVENT; |
|
task->task_root = root; |
|
task->task_func = func; |
task->task_func = func; |
|
TASK_TYPE(task) = taskEVENT; |
|
TASK_ROOT(task) = root; |
|
|
TASK_ARG(task) = arg; |
TASK_ARG(task) = arg; |
TASK_VAL(task) = val; |
TASK_VAL(task) = val; |
Line 322 schedEventLo(sched_root_task_t * __restrict root, sche
|
Line 358 schedEventLo(sched_root_task_t * __restrict root, sche
|
else |
else |
ptr = NULL; |
ptr = NULL; |
|
|
if (!ptr) | if (!ptr) { |
| #ifdef HAVE_LIBPTHREAD |
| pthread_mutex_lock(&root->root_mtx[taskEVENTLO]); |
| #endif |
TAILQ_INSERT_TAIL(&root->root_eventlo, task, task_node); |
TAILQ_INSERT_TAIL(&root->root_eventlo, task, task_node); |
else | #ifdef HAVE_LIBPTHREAD |
| pthread_mutex_unlock(&root->root_mtx[taskEVENTLO]); |
| #endif |
| } else |
task = _sched_unuseTask(task); |
task = _sched_unuseTask(task); |
|
|
return task; |
return task; |
Line 354 schedCallOnce(sched_root_task_t * __restrict root, sch
|
Line 396 schedCallOnce(sched_root_task_t * __restrict root, sch
|
memset(task, 0, sizeof(sched_task_t)); |
memset(task, 0, sizeof(sched_task_t)); |
task->task_id = 0; |
task->task_id = 0; |
task->task_lock = 0; |
task->task_lock = 0; |
task->task_type = taskEVENT; |
|
task->task_root = root; |
|
task->task_func = func; |
task->task_func = func; |
|
TASK_TYPE(task) = taskEVENT; |
|
TASK_ROOT(task) = root; |
|
|
TASK_ARG(task) = arg; |
TASK_ARG(task) = arg; |
TASK_VAL(task) = val; |
TASK_VAL(task) = val; |