|
version 1.25, 2014/04/27 16:20:37
|
version 1.25.2.6, 2014/06/03 20:39:54
|
|
Line 82 sched_SetErr(int eno, char *estr, ...)
|
Line 82 sched_SetErr(int eno, char *estr, ...)
|
| va_end(lst); |
va_end(lst); |
| } |
} |
| |
|
| |
|
| |
/* string support functions directly imported from OpenBSD */ |
| |
|
| |
#ifndef HAVE_STRLCAT |
| |
/* |
| |
* Appends src to string dst of size siz (unlike strncat, siz is the |
| |
* full size of dst, not space left). At most siz-1 characters |
| |
* will be copied. Always NUL terminates (unless siz <= strlen(dst)). |
| |
* Returns strlen(src) + MIN(siz, strlen(initial dst)). |
| |
* If retval >= siz, truncation occurred. |
| |
*/ |
| |
size_t |
| |
strlcat(char * __restrict dst, const char * __restrict src, size_t siz) |
| |
{ |
| |
char *d = dst; |
| |
const char *s = src; |
| |
size_t n = siz; |
| |
size_t dlen; |
| |
|
| |
/* Find the end of dst and adjust bytes left but don't go past end */ |
| |
while (n-- != 0 && *d != '\0') |
| |
d++; |
| |
dlen = d - dst; |
| |
n = siz - dlen; |
| |
|
| |
if (n == 0) |
| |
return(dlen + strlen(s)); |
| |
while (*s != '\0') { |
| |
if (n != 1) { |
| |
*d++ = *s; |
| |
n--; |
| |
} |
| |
s++; |
| |
} |
| |
*d = '\0'; |
| |
|
| |
return(dlen + (s - src)); /* count does not include NUL */ |
| |
} |
| |
#endif |
| |
#ifndef HAVE_STRLCPY |
| |
/* |
| |
* Copy src to string dst of size siz. At most siz-1 characters |
| |
* will be copied. Always NUL terminates (unless siz == 0). |
| |
* Returns strlen(src); if retval >= siz, truncation occurred. |
| |
*/ |
| |
size_t |
| |
strlcpy(char * __restrict dst, const char * __restrict src, size_t siz) |
| |
{ |
| |
char *d = dst; |
| |
const char *s = src; |
| |
size_t n = siz; |
| |
|
| |
/* Copy as many bytes as will fit */ |
| |
if (n != 0) { |
| |
while (--n != 0) { |
| |
if ((*d++ = *s++) == '\0') |
| |
break; |
| |
} |
| |
} |
| |
|
| |
/* Not enough room in dst, add NUL and traverse rest of src */ |
| |
if (n == 0) { |
| |
if (siz != 0) |
| |
*d = '\0'; /* NUL-terminate dst */ |
| |
while (*s++) |
| |
; |
| |
} |
| |
|
| |
return(s - src - 1); /* count does not include NUL */ |
| |
} |
| |
#endif |
| |
|
| |
|
| /* Init and prepare scheduler functions */ |
/* Init and prepare scheduler functions */ |
| |
|
| /* |
/* |
|
Line 101 schedRegisterHooks(sched_root_task_t * __restrict root
|
Line 174 schedRegisterHooks(sched_root_task_t * __restrict root
|
| |
|
| root->root_hooks.hook_add.read = sched_hook_read; |
root->root_hooks.hook_add.read = sched_hook_read; |
| root->root_hooks.hook_add.write = sched_hook_write; |
root->root_hooks.hook_add.write = sched_hook_write; |
| root->root_hooks.hook_add.alarm = sched_hook_alarm; |
|
| #if defined(HAVE_TIMER_CREATE) && defined(HAVE_TIMER_SETTIME) && defined(HAVE_TIMER_DELETE) |
#if defined(HAVE_TIMER_CREATE) && defined(HAVE_TIMER_SETTIME) && defined(HAVE_TIMER_DELETE) |
| root->root_hooks.hook_add.rtc = sched_hook_rtc; |
root->root_hooks.hook_add.rtc = sched_hook_rtc; |
| #endif |
#endif |
| |
#if SUP_ENABLE == KQ_SUPPORT |
| |
root->root_hooks.hook_add.alarm = sched_hook_alarm; |
| root->root_hooks.hook_add.node = sched_hook_node; |
root->root_hooks.hook_add.node = sched_hook_node; |
| root->root_hooks.hook_add.proc = sched_hook_proc; |
root->root_hooks.hook_add.proc = sched_hook_proc; |
| root->root_hooks.hook_add.signal = sched_hook_signal; |
root->root_hooks.hook_add.signal = sched_hook_signal; |
| #ifdef EVFILT_USER |
#ifdef EVFILT_USER |
| root->root_hooks.hook_add.user = sched_hook_user; |
root->root_hooks.hook_add.user = sched_hook_user; |
| #endif |
#endif |
| |
#endif /* KQ_SUPPORT */ |
| #ifdef HAVE_LIBPTHREAD |
#ifdef HAVE_LIBPTHREAD |
| root->root_hooks.hook_add.thread = sched_hook_thread; |
root->root_hooks.hook_add.thread = sched_hook_thread; |
| #endif |
#endif |
|
Line 224 schedEnd(sched_root_task_t ** __restrict root)
|
Line 299 schedEnd(sched_root_task_t ** __restrict root)
|
| if (!root || !*root) |
if (!root || !*root) |
| return -1; |
return -1; |
| |
|
| |
#if 0 |
| TAILQ_FOREACH_SAFE(task, &(*root)->root_read, task_node, tmp) |
TAILQ_FOREACH_SAFE(task, &(*root)->root_read, task_node, tmp) |
| |
printf("read=%p\n", task); |
| |
TAILQ_FOREACH_SAFE(task, &(*root)->root_write, task_node, tmp) |
| |
printf("write=%p\n", task); |
| |
TAILQ_FOREACH_SAFE(task, &(*root)->root_timer, task_node, tmp) |
| |
printf("timer=%p\n", task); |
| |
TAILQ_FOREACH_SAFE(task, &(*root)->root_alarm, task_node, tmp) |
| |
printf("alarm=%p\n", task); |
| |
TAILQ_FOREACH_SAFE(task, &(*root)->root_rtc, task_node, tmp) |
| |
printf("rtc=%p\n", task); |
| |
TAILQ_FOREACH_SAFE(task, &(*root)->root_node, task_node, tmp) |
| |
printf("node=%p\n", task); |
| |
TAILQ_FOREACH_SAFE(task, &(*root)->root_proc, task_node, tmp) |
| |
printf("proc=%p\n", task); |
| |
TAILQ_FOREACH_SAFE(task, &(*root)->root_signal, task_node, tmp) |
| |
printf("signal=%p\n", task); |
| |
TAILQ_FOREACH_SAFE(task, &(*root)->root_aio, task_node, tmp) |
| |
printf("aio=%p\n", task); |
| |
TAILQ_FOREACH_SAFE(task, &(*root)->root_lio, task_node, tmp) |
| |
printf("lio=%p\n", task); |
| |
TAILQ_FOREACH_SAFE(task, &(*root)->root_user, task_node, tmp) |
| |
printf("user=%p\n", task); |
| |
TAILQ_FOREACH_SAFE(task, &(*root)->root_event, task_node, tmp) |
| |
printf("event=%p\n", task); |
| |
TAILQ_FOREACH_SAFE(task, &(*root)->root_suspend, task_node, tmp) |
| |
printf("suspend=%p\n", task); |
| |
TAILQ_FOREACH_SAFE(task, &(*root)->root_ready, task_node, tmp) |
| |
printf("ready=%p\n", task); |
| |
TAILQ_FOREACH_SAFE(task, &(*root)->root_thread, task_node, tmp) |
| |
printf("thread=%p\n", task); |
| |
TAILQ_FOREACH_SAFE(task, &(*root)->root_task, task_node, tmp) |
| |
printf("task=%p\n", task); |
| |
TAILQ_FOREACH_SAFE(task, &(*root)->root_unuse, task_node, tmp) |
| |
printf("unuse=%p\n", task); |
| |
fflush(stdout); |
| |
#endif |
| |
|
| |
TAILQ_FOREACH_SAFE(task, &(*root)->root_read, task_node, tmp) |
| schedCancel(task); |
schedCancel(task); |
| TAILQ_FOREACH_SAFE(task, &(*root)->root_write, task_node, tmp) |
TAILQ_FOREACH_SAFE(task, &(*root)->root_write, task_node, tmp) |
| schedCancel(task); |
schedCancel(task); |
|
Line 257 schedEnd(sched_root_task_t ** __restrict root)
|
Line 370 schedEnd(sched_root_task_t ** __restrict root)
|
| TAILQ_FOREACH_SAFE(task, &(*root)->root_task, task_node, tmp) |
TAILQ_FOREACH_SAFE(task, &(*root)->root_task, task_node, tmp) |
| schedCancel(task); |
schedCancel(task); |
| |
|
| #ifdef HAVE_LIBPTHREAD | SCHED_QLOCK((*root), taskUNUSE); |
| pthread_mutex_lock(&(*root)->root_mtx[taskUNUSE]); | |
| #endif | |
| TAILQ_FOREACH_SAFE(task, &(*root)->root_unuse, task_node, tmp) { |
TAILQ_FOREACH_SAFE(task, &(*root)->root_unuse, task_node, tmp) { |
| TAILQ_REMOVE(&(*root)->root_unuse, task, task_node); |
TAILQ_REMOVE(&(*root)->root_unuse, task, task_node); |
| free(task); |
free(task); |
| } |
} |
| #ifdef HAVE_LIBPTHREAD | SCHED_QUNLOCK((*root), taskUNUSE); |
| pthread_mutex_unlock(&(*root)->root_mtx[taskUNUSE]); | |
| #endif | |
| |
|
| if ((*root)->root_hooks.hook_root.fini) |
if ((*root)->root_hooks.hook_root.fini) |
| (*root)->root_hooks.hook_root.fini(*root, NULL); |
(*root)->root_hooks.hook_root.fini(*root, NULL); |
| |
|
| #ifdef HAVE_LIBPTHREAD |
#ifdef HAVE_LIBPTHREAD |
| for (i = 0; i < taskMAX; i++) | for (i = 0; i < taskMAX; i++) { |
| | SCHED_QUNLOCK(*root, i); |
| pthread_mutex_destroy(&(*root)->root_mtx[i]); |
pthread_mutex_destroy(&(*root)->root_mtx[i]); |
| |
} |
| #endif |
#endif |
| |
|
| free(*root); |
free(*root); |
|
Line 335 schedFetch(sched_root_task_t * __restrict root)
|
Line 446 schedFetch(sched_root_task_t * __restrict root)
|
| int |
int |
| schedTrigger(sched_task_t * __restrict task) |
schedTrigger(sched_task_t * __restrict task) |
| { |
{ |
| #ifdef KQ_DISABLE | #if SUP_ENABLE != KQ_SUPPORT |
| sched_SetErr(ENOTSUP, "disabled kqueue support"); |
sched_SetErr(ENOTSUP, "disabled kqueue support"); |
| return -1; |
return -1; |
| #else |
#else |
|
Line 361 schedTrigger(sched_task_t * __restrict task)
|
Line 472 schedTrigger(sched_task_t * __restrict task)
|
| |
|
| return 0; |
return 0; |
| #endif |
#endif |
| #endif /* KQ_DISABLE */ | #endif /* KQ_SUPPORT */ |
| } |
} |
| |
|
| /* |
/* |
|
Line 664 schedCancel(sched_task_t * __restrict task)
|
Line 775 schedCancel(sched_task_t * __restrict task)
|
| default: |
default: |
| queue = NULL; |
queue = NULL; |
| } |
} |
| if (queue) { | if (queue) |
| #ifdef HAVE_LIBPTHREAD | remove_task_from(task, queue); |
| pthread_mutex_lock(&TASK_ROOT(task)->root_mtx[TASK_TYPE(task)]); | |
| #endif | |
| TAILQ_REMOVE(queue, TASK_ID(task), task_node); | |
| #ifdef HAVE_LIBPTHREAD | |
| pthread_mutex_unlock(&TASK_ROOT(task)->root_mtx[TASK_TYPE(task)]); | |
| #endif | |
| } | |
| if (TASK_TYPE(task) != taskUNUSE) |
if (TASK_TYPE(task) != taskUNUSE) |
| sched_unuseTask(task); |
sched_unuseTask(task); |
| |
|
|
Line 791 schedCancelby(sched_root_task_t * __restrict root, sch
|
Line 895 schedCancelby(sched_root_task_t * __restrict root, sch
|
| return 0; |
return 0; |
| } |
} |
| |
|
| #ifdef HAVE_LIBPTHREAD | SCHED_QLOCK(root, type); |
| pthread_mutex_lock(&root->root_mtx[type]); | |
| #endif | |
| TAILQ_FOREACH_SAFE(task, queue, task_node, tmp) { |
TAILQ_FOREACH_SAFE(task, queue, task_node, tmp) { |
| flg ^= flg; |
flg ^= flg; |
| switch (criteria) { |
switch (criteria) { |
|
Line 856 schedCancelby(sched_root_task_t * __restrict root, sch
|
Line 958 schedCancelby(sched_root_task_t * __restrict root, sch
|
| flg ^= flg; /* ok */ |
flg ^= flg; /* ok */ |
| } |
} |
| } |
} |
| #ifdef HAVE_LIBPTHREAD | SCHED_QUNLOCK(root, type); |
| pthread_mutex_unlock(&root->root_mtx[type]); | |
| #endif | |
| return flg; |
return flg; |
| } |
} |
| |
|
|
Line 939 schedPolling(sched_root_task_t * __restrict root, stru
|
Line 1040 schedPolling(sched_root_task_t * __restrict root, stru
|
| * return: -1 error or 0 ok |
* return: -1 error or 0 ok |
| */ |
*/ |
| int |
int |
| schedTermCondition(sched_root_task_t * __restrict root, intptr_t condValue) | schedTermCondition(sched_root_task_t * __restrict root, intptr_t * __restrict condValue) |
| { |
{ |
| if (!root) | if (!root && !condValue) |
| return -1; |
return -1; |
| |
|
| root->root_cond = condValue; | *root->root_cond = *condValue; |
| root->root_hooks.hook_exec.condition = sched_hook_condition; |
root->root_hooks.hook_exec.condition = sched_hook_condition; |
| return 0; |
return 0; |
| } |
} |
|
Line 967 schedResumeby(sched_root_task_t * __restrict root, u_c
|
Line 1068 schedResumeby(sched_root_task_t * __restrict root, u_c
|
| if (!root) |
if (!root) |
| return -1; |
return -1; |
| |
|
| #ifdef HAVE_LIBPTHREAD | SCHED_QLOCK(root, taskSUSPEND); |
| pthread_mutex_lock(&root->root_mtx[taskSUSPEND]); | |
| #endif | |
| TAILQ_FOREACH_SAFE(task, &root->root_suspend, task_node, tmp) { |
TAILQ_FOREACH_SAFE(task, &root->root_suspend, task_node, tmp) { |
| flg ^= flg; |
flg ^= flg; |
| switch (criteria) { |
switch (criteria) { |
|
Line 1002 schedResumeby(sched_root_task_t * __restrict root, u_c
|
Line 1101 schedResumeby(sched_root_task_t * __restrict root, u_c
|
| TAILQ_REMOVE(&root->root_suspend, task, task_node); |
TAILQ_REMOVE(&root->root_suspend, task, task_node); |
| |
|
| task->task_type = taskREADY; |
task->task_type = taskREADY; |
| #ifdef HAVE_LIBPTHREAD | insert_task_to(task, &root->root_ready); |
| pthread_mutex_lock(&root->root_mtx[taskREADY]); | |
| #endif | |
| TAILQ_INSERT_TAIL(&root->root_ready, task, task_node); | |
| #ifdef HAVE_LIBPTHREAD | |
| pthread_mutex_unlock(&root->root_mtx[taskREADY]); | |
| #endif | |
| |
|
| flg ^= flg; /* ok */ |
flg ^= flg; /* ok */ |
| } |
} |
| } |
} |
| #ifdef HAVE_LIBPTHREAD | SCHED_QUNLOCK(root, taskSUSPEND); |
| pthread_mutex_unlock(&root->root_mtx[taskSUSPEND]); | |
| #endif | |
| |
|
| return flg; |
return flg; |
| } |
} |