version 1.1.1.1.2.2, 2011/08/12 23:06:56
|
version 1.4.2.6, 2012/01/24 14:51:03
|
Line 104 schedRegisterHooks(sched_root_task_t * __restrict root
|
Line 104 schedRegisterHooks(sched_root_task_t * __restrict root
|
|
|
root->root_hooks.hook_exec.cancel = sched_hook_cancel; |
root->root_hooks.hook_exec.cancel = sched_hook_cancel; |
root->root_hooks.hook_exec.fetch = sched_hook_fetch; |
root->root_hooks.hook_exec.fetch = sched_hook_fetch; |
|
root->root_hooks.hook_exec.exception = sched_hook_exception; |
|
|
root->root_hooks.hook_root.init = sched_hook_init; |
root->root_hooks.hook_root.init = sched_hook_init; |
root->root_hooks.hook_root.fini = sched_hook_fini; |
root->root_hooks.hook_root.fini = sched_hook_fini; |
Line 121 schedInit(void ** __restrict data, size_t datlen)
|
Line 122 schedInit(void ** __restrict data, size_t datlen)
|
{ |
{ |
sched_root_task_t *root = NULL; |
sched_root_task_t *root = NULL; |
int (*func)(sched_root_task_t *); |
int (*func)(sched_root_task_t *); |
|
#ifdef HAVE_LIBPTHREAD |
|
register int i; |
|
#endif |
|
|
root = malloc(sizeof(sched_root_task_t)); |
root = malloc(sizeof(sched_root_task_t)); |
if (!root) { |
if (!root) { |
LOGERR; |
LOGERR; |
} else { |
} else { |
memset(root, 0, sizeof(sched_root_task_t)); |
memset(root, 0, sizeof(sched_root_task_t)); |
|
|
|
/* INFINIT polling period by default */ |
|
root->root_poll.tv_sec = root->root_poll.tv_nsec = -1; |
|
|
|
#ifdef HAVE_LIBPTHREAD |
|
for (i = 0; i < taskMAX; i++) |
|
if (pthread_mutex_init(&root->root_mtx[i], NULL)) { |
|
LOGERR; |
|
while (i) |
|
pthread_mutex_destroy(&root->root_mtx[--i]); |
|
free(root); |
|
return NULL; |
|
} |
|
|
|
for (i = 0; i < taskMAX; i++) |
|
pthread_mutex_lock(&root->root_mtx[i]); |
|
#endif |
|
|
TAILQ_INIT(&root->root_read); |
TAILQ_INIT(&root->root_read); |
TAILQ_INIT(&root->root_write); |
TAILQ_INIT(&root->root_write); |
TAILQ_INIT(&root->root_timer); |
TAILQ_INIT(&root->root_timer); |
Line 135 schedInit(void ** __restrict data, size_t datlen)
|
Line 157 schedInit(void ** __restrict data, size_t datlen)
|
TAILQ_INIT(&root->root_ready); |
TAILQ_INIT(&root->root_ready); |
TAILQ_INIT(&root->root_unuse); |
TAILQ_INIT(&root->root_unuse); |
|
|
|
#ifdef HAVE_LIBPTHREAD |
|
for (i = 0; i < taskMAX; i++) |
|
pthread_mutex_unlock(&root->root_mtx[i]); |
|
#endif |
|
|
if (data && *data) { |
if (data && *data) { |
if (datlen) { |
if (datlen) { |
root->root_data.iov_base = *data; |
root->root_data.iov_base = *data; |
root->root_data.iov_len = datlen; |
root->root_data.iov_len = datlen; |
} else { | } else { /* if datlen == 0, switch to callbacks init mode */ |
| /* little hack :) for correct initialization of scheduler */ |
func = (int(*)(sched_root_task_t*)) data; |
func = (int(*)(sched_root_task_t*)) data; |
func(root); |
func(root); |
} |
} |
Line 158 schedInit(void ** __restrict data, size_t datlen)
|
Line 186 schedInit(void ** __restrict data, size_t datlen)
|
* return: -1 error or 0 ok |
* return: -1 error or 0 ok |
*/ |
*/ |
int |
int |
schedEnd(sched_root_task_t * __restrict root) | schedEnd(sched_root_task_t ** __restrict root) |
{ |
{ |
sched_task_t *task; |
sched_task_t *task; |
|
#ifdef HAVE_LIBPTHREAD |
|
register int i; |
|
#endif |
|
|
if (!root) | if (!root || !*root) |
return -1; |
return -1; |
|
|
TAILQ_FOREACH(task, &root->root_read, task_node) { | TAILQ_FOREACH(task, &(*root)->root_read, task_node) { |
schedCancel(task); |
schedCancel(task); |
} |
} |
TAILQ_FOREACH(task, &root->root_write, task_node) { | TAILQ_FOREACH(task, &(*root)->root_write, task_node) { |
schedCancel(task); |
schedCancel(task); |
} |
} |
TAILQ_FOREACH(task, &root->root_timer, task_node) { | TAILQ_FOREACH(task, &(*root)->root_timer, task_node) { |
schedCancel(task); |
schedCancel(task); |
} |
} |
TAILQ_FOREACH(task, &root->root_event, task_node) { | TAILQ_FOREACH(task, &(*root)->root_event, task_node) { |
schedCancel(task); |
schedCancel(task); |
} |
} |
TAILQ_FOREACH(task, &root->root_ready, task_node) { | TAILQ_FOREACH(task, &(*root)->root_eventlo, task_node) { |
schedCancel(task); |
schedCancel(task); |
} |
} |
|
TAILQ_FOREACH(task, &(*root)->root_ready, task_node) { |
|
schedCancel(task); |
|
} |
|
|
while ((task = TAILQ_FIRST(&root->root_unuse))) { | #ifdef HAVE_LIBPTHREAD |
TAILQ_REMOVE(&root->root_unuse, task, task_node); | pthread_mutex_lock(&(*root)->root_mtx[taskUNUSE]); |
| #endif |
| while ((task = TAILQ_FIRST(&(*root)->root_unuse))) { |
| TAILQ_REMOVE(&(*root)->root_unuse, task, task_node); |
free(task); |
free(task); |
} |
} |
|
#ifdef HAVE_LIBPTHREAD |
|
pthread_mutex_unlock(&(*root)->root_mtx[taskUNUSE]); |
|
#endif |
|
|
if (root->root_hooks.hook_root.fini) | if ((*root)->root_hooks.hook_root.fini) |
root->root_hooks.hook_root.fini(root, NULL); | (*root)->root_hooks.hook_root.fini(*root, NULL); |
|
|
free(root); | #ifdef HAVE_LIBPTHREAD |
| for (i = 0; i < taskMAX; i++) |
| pthread_mutex_destroy(&(*root)->root_mtx[i]); |
| #endif |
| |
| free(*root); |
| *root = NULL; |
return 0; |
return 0; |
} |
} |
|
|
Line 201 schedEnd(sched_root_task_t * __restrict root)
|
Line 247 schedEnd(sched_root_task_t * __restrict root)
|
inline void * |
inline void * |
schedCall(sched_task_t * __restrict task) |
schedCall(sched_task_t * __restrict task) |
{ |
{ |
|
void *ptr = (void*) -1; |
|
|
if (!task) |
if (!task) |
return (void*) -1; | return ptr; |
|
|
|
if (!TASK_ISLOCKED(task)) |
|
TASK_LOCK(task); |
|
|
task->task_id++; |
task->task_id++; |
return task->task_func(task); | ptr = task->task_func(task); |
| |
| TASK_UNLOCK(task); |
| return ptr; |
} |
} |
|
|
/* |
/* |
Line 239 schedCancel(sched_task_t * __restrict task)
|
Line 293 schedCancel(sched_task_t * __restrict task)
|
{ |
{ |
sched_queue_t *queue; |
sched_queue_t *queue; |
|
|
if (!task || !task->task_root) | if (!task || !TASK_ROOT(task)) |
return -1; |
return -1; |
|
|
if (task->task_root->root_hooks.hook_exec.cancel) | if (TASK_ROOT(task)->root_hooks.hook_exec.cancel) |
if (task->task_root->root_hooks.hook_exec.cancel(task, NULL)) | if (TASK_ROOT(task)->root_hooks.hook_exec.cancel(task, NULL)) |
return -1; |
return -1; |
|
|
switch (task->task_type) { | switch (TASK_TYPE(task)) { |
case taskREAD: |
case taskREAD: |
queue = &task->task_root->root_read; | queue = &TASK_ROOT(task)->root_read; |
break; |
break; |
case taskWRITE: |
case taskWRITE: |
queue = &task->task_root->root_write; | queue = &TASK_ROOT(task)->root_write; |
break; |
break; |
case taskTIMER: |
case taskTIMER: |
queue = &task->task_root->root_timer; | queue = &TASK_ROOT(task)->root_timer; |
break; |
break; |
case taskEVENT: |
case taskEVENT: |
queue = &task->task_root->root_event; | queue = &TASK_ROOT(task)->root_event; |
break; |
break; |
|
case taskEVENTLO: |
|
queue = &TASK_ROOT(task)->root_eventlo; |
|
break; |
case taskREADY: |
case taskREADY: |
queue = &task->task_root->root_ready; | queue = &TASK_ROOT(task)->root_ready; |
break; |
break; |
default: |
default: |
queue = NULL; |
queue = NULL; |
} |
} |
if (queue) | if (queue) { |
| #ifdef HAVE_LIBPTHREAD |
| pthread_mutex_lock(&TASK_ROOT(task)->root_mtx[TASK_TYPE(task)]); |
| #endif |
TAILQ_REMOVE(queue, task, task_node); |
TAILQ_REMOVE(queue, task, task_node); |
if (task->task_type != taskUNUSE) { | #ifdef HAVE_LIBPTHREAD |
task->task_type = taskUNUSE; | pthread_mutex_unlock(&TASK_ROOT(task)->root_mtx[TASK_TYPE(task)]); |
TAILQ_INSERT_TAIL(&task->task_root->root_unuse, task, task_node); | #endif |
} |
} |
|
if (TASK_TYPE(task) != taskUNUSE) |
|
_sched_unuseTask(task); |
|
|
return 0; |
return 0; |
} |
} |
Line 278 schedCancel(sched_task_t * __restrict task)
|
Line 340 schedCancel(sched_task_t * __restrict task)
|
/* |
/* |
* schedCancelby() - Cancel task from scheduler by criteria |
* schedCancelby() - Cancel task from scheduler by criteria |
* @root = root task |
* @root = root task |
* @queue = cancel from queue, if =NULL cancel same task from all queues | * @type = cancel from queue type, if =taskMAX cancel same task from all queues |
* @criteria = find task by criteria [CRITERIA_CALL|CRITERIA_ARG|CRITERIA_FD|CRITERIA_VAL|CRITERIA_TV] |
* @criteria = find task by criteria [CRITERIA_CALL|CRITERIA_ARG|CRITERIA_FD|CRITERIA_VAL|CRITERIA_TV] |
* @param = search parameter |
* @param = search parameter |
* @hook = custom cleanup hook function, may be NULL |
* @hook = custom cleanup hook function, may be NULL |
* return: -1 error or 0 ok | * return: -1 error, -2 error in sub-stage cancel execution, -3 error from custom hook or 0 ok |
*/ |
*/ |
int |
int |
schedCancelby(sched_root_task_t * __restrict root, sched_queue_t * __restrict queue, | schedCancelby(sched_root_task_t * __restrict root, sched_task_type_t type, |
u_char criteria, void *param, sched_hook_func_t hook) |
u_char criteria, void *param, sched_hook_func_t hook) |
{ |
{ |
sched_task_t *task; |
sched_task_t *task; |
|
sched_queue_t *queue; |
int flg = 0; |
int flg = 0; |
|
|
if (!root) |
if (!root) |
return -1; |
return -1; |
if (!queue) { | if (type == taskMAX) { |
if (schedCancelby(root, &root->root_read, criteria, param, hook)) | if (schedCancelby(root, taskREAD, criteria, param, hook)) |
return -2; |
return -2; |
if (schedCancelby(root, &root->root_write, criteria, param, hook)) | if (schedCancelby(root, taskWRITE, criteria, param, hook)) |
return -2; |
return -2; |
if (schedCancelby(root, &root->root_timer, criteria, param, hook)) | if (schedCancelby(root, taskTIMER, criteria, param, hook)) |
return -2; |
return -2; |
if (schedCancelby(root, &root->root_event, criteria, param, hook)) | if (schedCancelby(root, taskEVENT, criteria, param, hook)) |
return -2; |
return -2; |
if (schedCancelby(root, &root->root_ready, criteria, param, hook)) | if (schedCancelby(root, taskEVENTLO, criteria, param, hook)) |
return -2; |
return -2; |
if (schedCancelby(root, &root->root_read, criteria, param, hook)) | if (schedCancelby(root, taskREADY, criteria, param, hook)) |
return -2; |
return -2; |
return 0; |
return 0; |
} |
} |
|
switch (type) { |
|
case taskREAD: |
|
queue = &root->root_read; |
|
break; |
|
case taskWRITE: |
|
queue = &root->root_write; |
|
break; |
|
case taskTIMER: |
|
queue = &root->root_timer; |
|
break; |
|
case taskEVENT: |
|
queue = &root->root_event; |
|
break; |
|
case taskEVENTLO: |
|
queue = &root->root_eventlo; |
|
break; |
|
case taskREADY: |
|
queue = &root->root_ready; |
|
break; |
|
default: |
|
return 0; |
|
} |
|
|
|
#ifdef HAVE_LIBPTHREAD |
|
pthread_mutex_lock(&root->root_mtx[type]); |
|
#endif |
TAILQ_FOREACH(task, queue, task_node) |
TAILQ_FOREACH(task, queue, task_node) |
if (criteria == CRITERIA_CALL) { |
if (criteria == CRITERIA_CALL) { |
if (task->task_func == (sched_task_func_t) param) { |
if (task->task_func == (sched_task_func_t) param) { |
Line 321 schedCancelby(sched_root_task_t * __restrict root, sch
|
Line 409 schedCancelby(sched_root_task_t * __restrict root, sch
|
break; |
break; |
} |
} |
} else if (criteria == CRITERIA_FD) { |
} else if (criteria == CRITERIA_FD) { |
if (TASK_FD(task) == (u_long) param) { | if (TASK_FD(task) == (intptr_t) param) { |
flg++; |
flg++; |
break; |
break; |
} |
} |
Line 331 schedCancelby(sched_root_task_t * __restrict root, sch
|
Line 419 schedCancelby(sched_root_task_t * __restrict root, sch
|
break; |
break; |
} |
} |
} else if (criteria == CRITERIA_TV) { |
} else if (criteria == CRITERIA_TV) { |
if (&TASK_TV(task) == (struct timeval*) param) { | if (!timespeccmp(&TASK_TS(task), (struct timespec*) param, -)) { |
flg++; |
flg++; |
break; |
break; |
} |
} |
} else { |
} else { |
|
#ifdef HAVE_LIBPTHREAD |
|
pthread_mutex_unlock(&root->root_mtx[type]); |
|
#endif |
sched_SetErr(EINVAL, "Invalid parameter criteria %d", criteria); |
sched_SetErr(EINVAL, "Invalid parameter criteria %d", criteria); |
return -1; |
return -1; |
} |
} |
|
#ifdef HAVE_LIBPTHREAD |
|
pthread_mutex_unlock(&root->root_mtx[type]); |
|
#endif |
if (!flg || !task) /* task not found */ |
if (!flg || !task) /* task not found */ |
return 0; |
return 0; |
|
|
if (task->task_root->root_hooks.hook_exec.cancel) | if (TASK_ROOT(task)->root_hooks.hook_exec.cancel) |
if (task->task_root->root_hooks.hook_exec.cancel(task, NULL)) | if (TASK_ROOT(task)->root_hooks.hook_exec.cancel(task, NULL)) |
return -1; |
return -1; |
if (hook) |
if (hook) |
if (hook(task, NULL)) |
if (hook(task, NULL)) |
return -3; |
return -3; |
|
|
|
#ifdef HAVE_LIBPTHREAD |
|
pthread_mutex_lock(&TASK_ROOT(task)->root_mtx[type]); |
|
#endif |
TAILQ_REMOVE(queue, task, task_node); |
TAILQ_REMOVE(queue, task, task_node); |
|
#ifdef HAVE_LIBPTHREAD |
|
pthread_mutex_unlock(&TASK_ROOT(task)->root_mtx[type]); |
|
#endif |
|
|
if (task->task_type != taskUNUSE) { | if (TASK_TYPE(task) != taskUNUSE) |
task->task_type = taskUNUSE; | _sched_unuseTask(task); |
TAILQ_INSERT_TAIL(&task->task_root->root_unuse, task, task_node); | |
} | |
return 0; |
return 0; |
} |
} |
|
|
/* |
/* |
* schedRun() - Scheduler *run loop* |
* schedRun() - Scheduler *run loop* |
* @root = root task |
* @root = root task |
|
* @killState = kill condition variable, if !=0 stop scheduler loop |
* return: -1 error or 0 ok |
* return: -1 error or 0 ok |
*/ |
*/ |
int |
int |
schedRun(sched_root_task_t * __restrict root) | schedRun(sched_root_task_t * __restrict root, volatile intptr_t * __restrict killState) |
{ |
{ |
sched_task_t *task; |
sched_task_t *task; |
|
|
Line 374 schedRun(sched_root_task_t * __restrict root)
|
Line 473 schedRun(sched_root_task_t * __restrict root)
|
if (root->root_hooks.hook_exec.run) |
if (root->root_hooks.hook_exec.run) |
if (root->root_hooks.hook_exec.run(root, NULL)) |
if (root->root_hooks.hook_exec.run(root, NULL)) |
return -1; |
return -1; |
if (root->root_hooks.hook_exec.fetch) | if (root->root_hooks.hook_exec.fetch) { |
while ((task = root->root_hooks.hook_exec.fetch(root, NULL))) | if (killState) |
schedCall(task); | while (!*killState) { |
| if ((task = root->root_hooks.hook_exec.fetch(root, NULL))) |
| schedCall(task); |
| } |
| else |
| while ((task = root->root_hooks.hook_exec.fetch(root, NULL))) |
| schedCall(task); |
| } |
| |
| return 0; |
| } |
| |
| /* |
| * schedPolling() - Polling timeout period if no timer task is present |
| * @root = root task |
| * @ts = timeout polling period, if ==NULL INFINIT timeout |
| * @tsold = old timeout polling if !=NULL |
| * return: -1 error or 0 ok |
| */ |
| inline int |
| schedPolling(sched_root_task_t * __restrict root, struct timespec * __restrict ts, |
| struct timespec * __restrict tsold) |
| { |
| if (!root) |
| return -1; |
| |
| if (tsold) |
| *tsold = root->root_poll; |
| |
| if (!ts) |
| root->root_poll.tv_sec = root->root_poll.tv_nsec = -1; |
| else |
| root->root_poll = *ts; |
|
|
return 0; |
return 0; |
} |
} |