version 1.5, 2012/03/13 10:01:59
|
version 1.6, 2012/05/14 12:09:13
|
Line 51 SUCH DAMAGE.
|
Line 51 SUCH DAMAGE.
|
* sched_hook_init() - Default INIT hook |
* sched_hook_init() - Default INIT hook |
* |
* |
* @root = root task |
* @root = root task |
* @data = optional data if !=NULL | * @arg = unused |
* return: <0 errors and 0 ok |
* return: <0 errors and 0 ok |
*/ |
*/ |
void * |
void * |
sched_hook_init(void *root, void *data) | sched_hook_init(void *root, void *arg __unused) |
{ |
{ |
sched_root_task_t *r = root; |
sched_root_task_t *r = root; |
|
|
if (!r || r->root_data.iov_base || r->root_data.iov_len) | if (!r) |
return (void*) -1; |
return (void*) -1; |
|
|
r->root_data.iov_base = malloc(sizeof(struct sched_IO)); |
|
if (!r->root_data.iov_base) { |
|
LOGERR; |
|
return (void*) -1; |
|
} else { |
|
r->root_data.iov_len = sizeof(struct sched_IO); |
|
memset(r->root_data.iov_base, 0, r->root_data.iov_len); |
|
} |
|
|
|
r->root_kq = kqueue(); |
r->root_kq = kqueue(); |
if (r->root_kq == -1) { |
if (r->root_kq == -1) { |
LOGERR; |
LOGERR; |
Line 100 sched_hook_fini(void *root, void *arg __unused)
|
Line 91 sched_hook_fini(void *root, void *arg __unused)
|
r->root_kq = 0; |
r->root_kq = 0; |
} |
} |
|
|
if (r->root_data.iov_base && r->root_data.iov_len) { |
|
free(r->root_data.iov_base); |
|
r->root_data.iov_base = NULL; |
|
r->root_data.iov_len = 0; |
|
} |
|
|
|
return NULL; |
return NULL; |
} |
} |
|
|
Line 119 sched_hook_fini(void *root, void *arg __unused)
|
Line 104 sched_hook_fini(void *root, void *arg __unused)
|
void * |
void * |
sched_hook_cancel(void *task, void *arg __unused) |
sched_hook_cancel(void *task, void *arg __unused) |
{ |
{ |
struct sched_IO *io; |
|
sched_task_t *t = task; |
sched_task_t *t = task; |
struct kevent chg[1]; |
struct kevent chg[1]; |
struct timespec timeout = { 0, 0 }; |
struct timespec timeout = { 0, 0 }; |
|
|
if (!t || !TASK_ROOT(t) || !ROOT_DATA(t->task_root) || !ROOT_DATLEN(t->task_root)) | if (!t || !TASK_ROOT(t)) |
return (void*) -1; |
return (void*) -1; |
else |
|
io = ROOT_DATA(t->task_root); |
|
|
|
switch (TASK_TYPE(t)) { |
switch (TASK_TYPE(t)) { |
case taskREAD: |
case taskREAD: |
Line 137 sched_hook_cancel(void *task, void *arg __unused)
|
Line 119 sched_hook_cancel(void *task, void *arg __unused)
|
EV_SET(&chg[0], TASK_FD(t), EVFILT_READ, EV_DELETE, 0, 0, (void*) TASK_FD(t)); |
EV_SET(&chg[0], TASK_FD(t), EVFILT_READ, EV_DELETE, 0, 0, (void*) TASK_FD(t)); |
#endif |
#endif |
kevent(TASK_ROOT(t)->root_kq, chg, 1, NULL, 0, &timeout); |
kevent(TASK_ROOT(t)->root_kq, chg, 1, NULL, 0, &timeout); |
FD_CLR(TASK_FD(t), &io->rfd); |
|
break; |
break; |
case taskWRITE: |
case taskWRITE: |
#ifdef __NetBSD__ |
#ifdef __NetBSD__ |
Line 146 sched_hook_cancel(void *task, void *arg __unused)
|
Line 127 sched_hook_cancel(void *task, void *arg __unused)
|
EV_SET(&chg[0], TASK_FD(t), EVFILT_WRITE, EV_DELETE, 0, 0, (void*) TASK_FD(t)); |
EV_SET(&chg[0], TASK_FD(t), EVFILT_WRITE, EV_DELETE, 0, 0, (void*) TASK_FD(t)); |
#endif |
#endif |
kevent(TASK_ROOT(t)->root_kq, chg, 1, NULL, 0, &timeout); |
kevent(TASK_ROOT(t)->root_kq, chg, 1, NULL, 0, &timeout); |
FD_CLR(TASK_FD(t), &io->wfd); |
|
break; |
break; |
default: |
default: |
break; |
break; |
Line 165 sched_hook_cancel(void *task, void *arg __unused)
|
Line 145 sched_hook_cancel(void *task, void *arg __unused)
|
void * |
void * |
sched_hook_read(void *task, void *arg __unused) |
sched_hook_read(void *task, void *arg __unused) |
{ |
{ |
struct sched_IO *io; |
|
sched_task_t *t = task; |
sched_task_t *t = task; |
struct kevent chg[1]; |
struct kevent chg[1]; |
struct timespec timeout = { 0, 0 }; |
struct timespec timeout = { 0, 0 }; |
|
|
if (!t || !TASK_ROOT(t) || !ROOT_DATA(t->task_root) || !ROOT_DATLEN(t->task_root)) | if (!t || !TASK_ROOT(t)) |
return (void*) -1; |
return (void*) -1; |
else |
|
io = ROOT_DATA(t->task_root); |
|
|
|
if (FD_ISSET(TASK_FD(t), &io->rfd)) |
|
return NULL; |
|
else |
|
FD_SET(TASK_FD(t), &io->rfd); |
|
|
|
#ifdef __NetBSD__ |
#ifdef __NetBSD__ |
EV_SET(&chg[0], TASK_FD(t), EVFILT_READ, EV_ADD, 0, 0, (intptr_t) TASK_FD(t)); |
EV_SET(&chg[0], TASK_FD(t), EVFILT_READ, EV_ADD, 0, 0, (intptr_t) TASK_FD(t)); |
#else |
#else |
Line 206 sched_hook_read(void *task, void *arg __unused)
|
Line 178 sched_hook_read(void *task, void *arg __unused)
|
void * |
void * |
sched_hook_write(void *task, void *arg __unused) |
sched_hook_write(void *task, void *arg __unused) |
{ |
{ |
struct sched_IO *io; |
|
sched_task_t *t = task; |
sched_task_t *t = task; |
struct kevent chg[1]; |
struct kevent chg[1]; |
struct timespec timeout = { 0, 0 }; |
struct timespec timeout = { 0, 0 }; |
|
|
if (!t || !TASK_ROOT(t) || !ROOT_DATA(t->task_root) || !ROOT_DATLEN(t->task_root)) | if (!t || !TASK_ROOT(t)) |
return (void*) -1; |
return (void*) -1; |
else |
|
io = ROOT_DATA(t->task_root); |
|
|
|
if (FD_ISSET(TASK_FD(t), &io->wfd)) |
|
return NULL; |
|
else |
|
FD_SET(TASK_FD(t), &io->wfd); |
|
|
|
#ifdef __NetBSD__ |
#ifdef __NetBSD__ |
EV_SET(&chg[0], TASK_FD(t), EVFILT_WRITE, EV_ADD, 0, 0, (intptr_t) TASK_FD(t)); |
EV_SET(&chg[0], TASK_FD(t), EVFILT_WRITE, EV_ADD, 0, 0, (intptr_t) TASK_FD(t)); |
#else |
#else |
Line 247 sched_hook_write(void *task, void *arg __unused)
|
Line 211 sched_hook_write(void *task, void *arg __unused)
|
void * |
void * |
sched_hook_fetch(void *root, void *arg __unused) |
sched_hook_fetch(void *root, void *arg __unused) |
{ |
{ |
struct sched_IO *io; |
|
sched_root_task_t *r = root; |
sched_root_task_t *r = root; |
sched_task_t *task; | sched_task_t *task, *tmp; |
struct timespec now, m, mtmp; |
struct timespec now, m, mtmp; |
struct timespec *timeout; |
struct timespec *timeout; |
struct kevent evt[1], res[KQ_EVENTS]; |
struct kevent evt[1], res[KQ_EVENTS]; |
register int i; |
register int i; |
int en; |
int en; |
|
|
if (!r || !ROOT_DATA(r) || !ROOT_DATLEN(r)) | if (!r) |
return NULL; |
return NULL; |
|
|
/* get new task by queue priority */ |
/* get new task by queue priority */ |
retry: |
|
while ((task = TAILQ_FIRST(&r->root_event))) { |
while ((task = TAILQ_FIRST(&r->root_event))) { |
#ifdef HAVE_LIBPTHREAD |
#ifdef HAVE_LIBPTHREAD |
pthread_mutex_lock(&r->root_mtx[taskEVENT]); |
pthread_mutex_lock(&r->root_mtx[taskEVENT]); |
Line 343 retry:
|
Line 305 retry:
|
if (r->root_hooks.hook_exec.exception) { |
if (r->root_hooks.hook_exec.exception) { |
if (r->root_hooks.hook_exec.exception(r, NULL)) |
if (r->root_hooks.hook_exec.exception(r, NULL)) |
return NULL; |
return NULL; |
} else | } else if (errno != EINTR) |
LOGERR; |
LOGERR; |
#ifdef NDEBUG |
|
/* kevent no exit by error, if non-debug version */ |
|
goto retry; |
|
#else |
|
/* diagnostic exit from scheduler if kevent error occur */ |
|
return NULL; |
return NULL; |
#endif |
|
} |
} |
|
|
now.tv_sec = now.tv_nsec = 0; |
now.tv_sec = now.tv_nsec = 0; |
Line 362 retry:
|
Line 318 retry:
|
/* Put read/write task to ready queue */ |
/* Put read/write task to ready queue */ |
switch (res[i].filter) { |
switch (res[i].filter) { |
case EVFILT_READ: |
case EVFILT_READ: |
TAILQ_FOREACH(task, &r->root_read, task_node) { | TAILQ_FOREACH_SAFE(task, &r->root_read, task_node, tmp) { |
if (TASK_FD(task) != ((intptr_t) res[i].udata)) |
if (TASK_FD(task) != ((intptr_t) res[i].udata)) |
continue; |
continue; |
/* remove read handle */ |
/* remove read handle */ |
io = ROOT_DATA(task->task_root); |
|
FD_CLR(TASK_FD(task), &io->rfd); |
|
|
|
#ifdef HAVE_LIBPTHREAD |
#ifdef HAVE_LIBPTHREAD |
pthread_mutex_lock(&r->root_mtx[taskREAD]); |
pthread_mutex_lock(&r->root_mtx[taskREAD]); |
#endif |
#endif |
Line 410 retry:
|
Line 363 retry:
|
} |
} |
break; |
break; |
case EVFILT_WRITE: |
case EVFILT_WRITE: |
TAILQ_FOREACH(task, &r->root_write, task_node) { | TAILQ_FOREACH_SAFE(task, &r->root_write, task_node, tmp) { |
if (TASK_FD(task) != ((intptr_t) res[i].udata)) |
if (TASK_FD(task) != ((intptr_t) res[i].udata)) |
continue; |
continue; |
/* remove write handle */ |
/* remove write handle */ |
io = ROOT_DATA(task->task_root); |
|
FD_CLR(TASK_FD(task), &io->wfd); |
|
|
|
#ifdef HAVE_LIBPTHREAD |
#ifdef HAVE_LIBPTHREAD |
pthread_mutex_lock(&r->root_mtx[taskWRITE]); |
pthread_mutex_lock(&r->root_mtx[taskWRITE]); |
#endif |
#endif |
Line 470 retry:
|
Line 420 retry:
|
/* timer update & put in ready queue */ |
/* timer update & put in ready queue */ |
clock_gettime(CLOCK_MONOTONIC, &now); |
clock_gettime(CLOCK_MONOTONIC, &now); |
|
|
TAILQ_FOREACH(task, &r->root_timer, task_node) | TAILQ_FOREACH_SAFE(task, &r->root_timer, task_node, tmp) |
if (sched_timespeccmp(&now, &TASK_TS(task), -) >= 0) { |
if (sched_timespeccmp(&now, &TASK_TS(task), -) >= 0) { |
#ifdef HAVE_LIBPTHREAD |
#ifdef HAVE_LIBPTHREAD |
pthread_mutex_lock(&r->root_mtx[taskTIMER]); |
pthread_mutex_lock(&r->root_mtx[taskTIMER]); |
Line 516 retry:
|
Line 466 retry:
|
r->root_eventlo_miss = 0; |
r->root_eventlo_miss = 0; |
|
|
/* OK, lets get ready task !!! */ |
/* OK, lets get ready task !!! */ |
if (!(task = TAILQ_FIRST(&r->root_ready))) | task = TAILQ_FIRST(&r->root_ready); |
goto retry; | if (!(task)) |
| return NULL; |
| |
#ifdef HAVE_LIBPTHREAD |
#ifdef HAVE_LIBPTHREAD |
pthread_mutex_lock(&r->root_mtx[taskREADY]); |
pthread_mutex_lock(&r->root_mtx[taskREADY]); |
#endif |
#endif |
Line 548 sched_hook_exception(void *root, void *arg)
|
Line 500 sched_hook_exception(void *root, void *arg)
|
{ |
{ |
sched_root_task_t *r = root; |
sched_root_task_t *r = root; |
|
|
if (!r || !ROOT_DATA(r) || !ROOT_DATLEN(r)) | if (!r) |
return NULL; |
return NULL; |
|
|
/* custom exception handling ... */ |
/* custom exception handling ... */ |
Line 579 sched_hook_condition(void *root, void *arg)
|
Line 531 sched_hook_condition(void *root, void *arg)
|
{ |
{ |
sched_root_task_t *r = root; |
sched_root_task_t *r = root; |
|
|
if (!r || !ROOT_DATA(r) || !ROOT_DATLEN(r)) | if (!r) |
return NULL; |
return NULL; |
|
|
return (void*) (r->root_cond - *(intptr_t*) arg); |
return (void*) (r->root_cond - *(intptr_t*) arg); |