|
|
| version 1.5.2.2, 2012/05/03 15:05:09 | version 1.6.4.2, 2012/05/30 08:34:44 |
|---|---|
| Line 107 sched_hook_cancel(void *task, void *arg __unused) | Line 107 sched_hook_cancel(void *task, void *arg __unused) |
| sched_task_t *t = task; | sched_task_t *t = task; |
| struct kevent chg[1]; | struct kevent chg[1]; |
| struct timespec timeout = { 0, 0 }; | struct timespec timeout = { 0, 0 }; |
| uintptr_t ident; | |
| if (!t || !TASK_ROOT(t)) | if (!t || !TASK_ROOT(t)) |
| return (void*) -1; | return (void*) -1; |
| Line 128 sched_hook_cancel(void *task, void *arg __unused) | Line 129 sched_hook_cancel(void *task, void *arg __unused) |
| #endif | #endif |
| kevent(TASK_ROOT(t)->root_kq, chg, 1, NULL, 0, &timeout); | kevent(TASK_ROOT(t)->root_kq, chg, 1, NULL, 0, &timeout); |
| break; | break; |
| case taskALARM: | |
| if (TASK_DATA(t)) | |
| ident = (uintptr_t) TASK_DATA(t); | |
| else | |
| ident = (uintptr_t) TASK_FUNC(t); | |
| #ifdef __NetBSD__ | |
| EV_SET(&chg[0], ident, EVFILT_TIMER, EV_DELETE, 0, 0, (intptr_t) ident); | |
| #else | |
| EV_SET(&chg[0], ident, EVFILT_TIMER, EV_DELETE, 0, 0, (void*) ident); | |
| #endif | |
| kevent(TASK_ROOT(t)->root_kq, chg, 1, NULL, 0, &timeout); | |
| break; | |
| default: | default: |
| break; | break; |
| } | } |
| Line 202 sched_hook_write(void *task, void *arg __unused) | Line 215 sched_hook_write(void *task, void *arg __unused) |
| } | } |
| /* | /* |
| * sched_hook_alarm() - Default ALARM hook | |
| * | |
| * @task = current task | |
| * @arg = unused | |
| * return: <0 errors and 0 ok | |
| */ | |
| void * | |
| sched_hook_alarm(void *task, void *arg __unused) | |
| { | |
| sched_task_t *t = task; | |
| struct kevent chg[1]; | |
| struct timespec timeout = { 0, 0 }; | |
| uintptr_t ident; | |
| if (!t || !TASK_ROOT(t)) | |
| return (void*) -1; | |
| if (TASK_DATA(t)) | |
| ident = (uintptr_t) TASK_DATA(t); | |
| else | |
| ident = (uintptr_t) TASK_FUNC(t); | |
| #ifdef __NetBSD__ | |
| EV_SET(&chg[0], ident, EVFILT_TIMER, EV_ADD | EV_ONESHOT, 0, | |
| t->task_val.ts.tv_sec * 1000 + t->task_val.ts.tv_nsec / 1000000, | |
| (intptr_t) ident); | |
| #else | |
| EV_SET(&chg[0], ident, EVFILT_TIMER, EV_ADD | EV_ONESHOT, 0, | |
| t->task_val.ts.tv_sec * 1000 + t->task_val.ts.tv_nsec / 1000000, | |
| (void*) ident); | |
| #endif | |
| if (kevent(TASK_ROOT(t)->root_kq, chg, 1, NULL, 0, &timeout) == -1) { | |
| if (TASK_ROOT(t)->root_hooks.hook_exec.exception) | |
| TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL); | |
| else | |
| LOGERR; | |
| return (void*) -1; | |
| } | |
| return NULL; | |
| } | |
| /* | |
| * sched_hook_fetch() - Default FETCH hook | * sched_hook_fetch() - Default FETCH hook |
| * | * |
| * @root = root task | * @root = root task |
| Line 212 void * | Line 268 void * |
| sched_hook_fetch(void *root, void *arg __unused) | sched_hook_fetch(void *root, void *arg __unused) |
| { | { |
| sched_root_task_t *r = root; | sched_root_task_t *r = root; |
| sched_task_t *task; | sched_task_t *task, *tmp; |
| struct timespec now, m, mtmp; | struct timespec now, m, mtmp; |
| struct timespec *timeout; | struct timespec *timeout; |
| struct kevent evt[1], res[KQ_EVENTS]; | struct kevent evt[1], res[KQ_EVENTS]; |
| register int i; | register int i; |
| int en; | int en; |
| uintptr_t ident; | |
| if (!r) | if (!r) |
| return NULL; | return NULL; |
| /* get new task by queue priority */ | /* get new task by queue priority */ |
| retry: | |
| while ((task = TAILQ_FIRST(&r->root_event))) { | while ((task = TAILQ_FIRST(&r->root_event))) { |
| #ifdef HAVE_LIBPTHREAD | #ifdef HAVE_LIBPTHREAD |
| pthread_mutex_lock(&r->root_mtx[taskEVENT]); | pthread_mutex_lock(&r->root_mtx[taskEVENT]); |
| Line 306 retry: | Line 362 retry: |
| if (r->root_hooks.hook_exec.exception) { | if (r->root_hooks.hook_exec.exception) { |
| if (r->root_hooks.hook_exec.exception(r, NULL)) | if (r->root_hooks.hook_exec.exception(r, NULL)) |
| return NULL; | return NULL; |
| } else | } else if (errno != EINTR) |
| LOGERR; | LOGERR; |
| #ifdef NDEBUG | |
| /* kevent no exit by error, if non-debug version */ | |
| goto retry; | |
| #else | |
| /* diagnostic exit from scheduler if kevent error occur */ | |
| return NULL; | return NULL; |
| #endif | |
| } | } |
| now.tv_sec = now.tv_nsec = 0; | now.tv_sec = now.tv_nsec = 0; |
| Line 325 retry: | Line 375 retry: |
| /* Put read/write task to ready queue */ | /* Put read/write task to ready queue */ |
| switch (res[i].filter) { | switch (res[i].filter) { |
| case EVFILT_READ: | case EVFILT_READ: |
| TAILQ_FOREACH(task, &r->root_read, task_node) { | TAILQ_FOREACH_SAFE(task, &r->root_read, task_node, tmp) { |
| if (TASK_FD(task) != ((intptr_t) res[i].udata)) | if (TASK_FD(task) != ((intptr_t) res[i].udata)) |
| continue; | continue; |
| /* remove read handle */ | /* remove read handle */ |
| Line 370 retry: | Line 420 retry: |
| } | } |
| break; | break; |
| case EVFILT_WRITE: | case EVFILT_WRITE: |
| TAILQ_FOREACH(task, &r->root_write, task_node) { | TAILQ_FOREACH_SAFE(task, &r->root_write, task_node, tmp) { |
| if (TASK_FD(task) != ((intptr_t) res[i].udata)) | if (TASK_FD(task) != ((intptr_t) res[i].udata)) |
| continue; | continue; |
| /* remove write handle */ | /* remove write handle */ |
| Line 414 retry: | Line 464 retry: |
| break; | break; |
| } | } |
| break; | break; |
| case EVFILT_TIMER: | |
| TAILQ_FOREACH_SAFE(task, &r->root_alarm, task_node, tmp) { | |
| if (TASK_DATA(task)) | |
| ident = (uintptr_t) TASK_DATA(task); | |
| else | |
| ident = (uintptr_t) TASK_FUNC(task); | |
| if (ident != ((uintptr_t) res[i].udata)) | |
| continue; | |
| /* remove alarm handle */ | |
| #ifdef HAVE_LIBPTHREAD | |
| pthread_mutex_lock(&r->root_mtx[taskALARM]); | |
| #endif | |
| TAILQ_REMOVE(&r->root_alarm, task, task_node); | |
| #ifdef HAVE_LIBPTHREAD | |
| pthread_mutex_unlock(&r->root_mtx[taskALARM]); | |
| #endif | |
| task->task_type = taskREADY; | |
| #ifdef HAVE_LIBPTHREAD | |
| pthread_mutex_lock(&r->root_mtx[taskREADY]); | |
| #endif | |
| TAILQ_INSERT_TAIL(&r->root_ready, task, task_node); | |
| #ifdef HAVE_LIBPTHREAD | |
| pthread_mutex_unlock(&r->root_mtx[taskREADY]); | |
| #endif | |
| break; | |
| } | |
| break; | |
| } | } |
| if (kevent(r->root_kq, evt, 1, NULL, 0, &now) == -1) { | if (kevent(r->root_kq, evt, 1, NULL, 0, &now) == -1) { |
| if (r->root_hooks.hook_exec.exception) { | if (r->root_hooks.hook_exec.exception) { |
| Line 427 retry: | Line 504 retry: |
| /* timer update & put in ready queue */ | /* timer update & put in ready queue */ |
| clock_gettime(CLOCK_MONOTONIC, &now); | clock_gettime(CLOCK_MONOTONIC, &now); |
| TAILQ_FOREACH(task, &r->root_timer, task_node) | TAILQ_FOREACH_SAFE(task, &r->root_timer, task_node, tmp) |
| if (sched_timespeccmp(&now, &TASK_TS(task), -) >= 0) { | if (sched_timespeccmp(&now, &TASK_TS(task), -) >= 0) { |
| #ifdef HAVE_LIBPTHREAD | #ifdef HAVE_LIBPTHREAD |
| pthread_mutex_lock(&r->root_mtx[taskTIMER]); | pthread_mutex_lock(&r->root_mtx[taskTIMER]); |