|
version 1.3.4.1, 2012/01/08 03:28:26
|
version 1.5.2.4, 2012/05/10 15:30:18
|
|
Line 12 terms:
|
Line 12 terms:
|
| All of the documentation and software included in the ELWIX and AITNET |
All of the documentation and software included in the ELWIX and AITNET |
| Releases is copyrighted by ELWIX - Sofia/Bulgaria <info@elwix.org> |
Releases is copyrighted by ELWIX - Sofia/Bulgaria <info@elwix.org> |
| |
|
| Copyright 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011 | Copyright 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012 |
| by Michael Pounov <misho@elwix.org>. All rights reserved. |
by Michael Pounov <misho@elwix.org>. All rights reserved. |
| |
|
| Redistribution and use in source and binary forms, with or without |
Redistribution and use in source and binary forms, with or without |
|
Line 49 SUCH DAMAGE.
|
Line 49 SUCH DAMAGE.
|
| |
|
| /* |
/* |
| * sched_hook_init() - Default INIT hook |
* sched_hook_init() - Default INIT hook |
| |
* |
| * @root = root task |
* @root = root task |
| * @data = optional data if !=NULL | * @arg = unused |
| * return: <0 errors and 0 ok |
* return: <0 errors and 0 ok |
| */ |
*/ |
| void * |
void * |
| sched_hook_init(void *root, void *data) | sched_hook_init(void *root, void *arg __unused) |
| { |
{ |
| sched_root_task_t *r = root; |
sched_root_task_t *r = root; |
| |
|
| if (!r || r->root_data.iov_base || r->root_data.iov_len) | if (!r) |
| return (void*) -1; |
return (void*) -1; |
| |
|
| r->root_data.iov_base = malloc(sizeof(struct sched_IO)); |
|
| if (!r->root_data.iov_base) { |
|
| LOGERR; |
|
| return (void*) -1; |
|
| } else { |
|
| r->root_data.iov_len = sizeof(struct sched_IO); |
|
| memset(r->root_data.iov_base, 0, r->root_data.iov_len); |
|
| } |
|
| |
|
| r->root_kq = kqueue(); |
r->root_kq = kqueue(); |
| if (r->root_kq == -1) { |
if (r->root_kq == -1) { |
| LOGERR; |
LOGERR; |
|
Line 81 sched_hook_init(void *root, void *data)
|
Line 73 sched_hook_init(void *root, void *data)
|
| |
|
| /* |
/* |
| * sched_hook_fini() - Default FINI hook |
* sched_hook_fini() - Default FINI hook |
| |
* |
| * @root = root task |
* @root = root task |
| * @arg = unused |
* @arg = unused |
| * return: <0 errors and 0 ok |
* return: <0 errors and 0 ok |
|
Line 98 sched_hook_fini(void *root, void *arg __unused)
|
Line 91 sched_hook_fini(void *root, void *arg __unused)
|
| r->root_kq = 0; |
r->root_kq = 0; |
| } |
} |
| |
|
| if (r->root_data.iov_base && r->root_data.iov_len) { |
|
| free(r->root_data.iov_base); |
|
| r->root_data.iov_base = NULL; |
|
| r->root_data.iov_len = 0; |
|
| } |
|
| |
|
| return NULL; |
return NULL; |
| } |
} |
| |
|
| /* |
/* |
| * sched_hook_cancel() - Default CANCEL hook |
* sched_hook_cancel() - Default CANCEL hook |
| |
* |
| * @task = current task |
* @task = current task |
| * @arg = unused |
* @arg = unused |
| * return: <0 errors and 0 ok |
* return: <0 errors and 0 ok |
|
Line 116 sched_hook_fini(void *root, void *arg __unused)
|
Line 104 sched_hook_fini(void *root, void *arg __unused)
|
| void * |
void * |
| sched_hook_cancel(void *task, void *arg __unused) |
sched_hook_cancel(void *task, void *arg __unused) |
| { |
{ |
| struct sched_IO *io; |
|
| sched_task_t *t = task; |
sched_task_t *t = task; |
| struct kevent chg[1]; |
struct kevent chg[1]; |
| struct timespec timeout = { 0, 0 }; |
struct timespec timeout = { 0, 0 }; |
| |
|
| if (!t || !TASK_ROOT(t) || !ROOT_DATA(t->task_root) || !ROOT_DATLEN(t->task_root)) | if (!t || !TASK_ROOT(t)) |
| return (void*) -1; |
return (void*) -1; |
| else |
|
| io = ROOT_DATA(t->task_root); |
|
| |
|
| switch (TASK_TYPE(t)) { |
switch (TASK_TYPE(t)) { |
| case taskREAD: |
case taskREAD: |
|
Line 134 sched_hook_cancel(void *task, void *arg __unused)
|
Line 119 sched_hook_cancel(void *task, void *arg __unused)
|
| EV_SET(&chg[0], TASK_FD(t), EVFILT_READ, EV_DELETE, 0, 0, (void*) TASK_FD(t)); |
EV_SET(&chg[0], TASK_FD(t), EVFILT_READ, EV_DELETE, 0, 0, (void*) TASK_FD(t)); |
| #endif |
#endif |
| kevent(TASK_ROOT(t)->root_kq, chg, 1, NULL, 0, &timeout); |
kevent(TASK_ROOT(t)->root_kq, chg, 1, NULL, 0, &timeout); |
| FD_CLR(TASK_FD(t), &io->rfd); |
|
| break; |
break; |
| case taskWRITE: |
case taskWRITE: |
| #ifdef __NetBSD__ |
#ifdef __NetBSD__ |
|
Line 143 sched_hook_cancel(void *task, void *arg __unused)
|
Line 127 sched_hook_cancel(void *task, void *arg __unused)
|
| EV_SET(&chg[0], TASK_FD(t), EVFILT_WRITE, EV_DELETE, 0, 0, (void*) TASK_FD(t)); |
EV_SET(&chg[0], TASK_FD(t), EVFILT_WRITE, EV_DELETE, 0, 0, (void*) TASK_FD(t)); |
| #endif |
#endif |
| kevent(TASK_ROOT(t)->root_kq, chg, 1, NULL, 0, &timeout); |
kevent(TASK_ROOT(t)->root_kq, chg, 1, NULL, 0, &timeout); |
| FD_CLR(TASK_FD(t), &io->wfd); |
|
| break; |
break; |
| default: |
default: |
| break; |
break; |
|
Line 154 sched_hook_cancel(void *task, void *arg __unused)
|
Line 137 sched_hook_cancel(void *task, void *arg __unused)
|
| |
|
| /* |
/* |
| * sched_hook_read() - Default READ hook |
* sched_hook_read() - Default READ hook |
| |
* |
| * @task = current task |
* @task = current task |
| * @arg = unused |
* @arg = unused |
| * return: <0 errors and 0 ok |
* return: <0 errors and 0 ok |
|
Line 161 sched_hook_cancel(void *task, void *arg __unused)
|
Line 145 sched_hook_cancel(void *task, void *arg __unused)
|
| void * |
void * |
| sched_hook_read(void *task, void *arg __unused) |
sched_hook_read(void *task, void *arg __unused) |
| { |
{ |
| struct sched_IO *io; |
|
| sched_task_t *t = task; |
sched_task_t *t = task; |
| struct kevent chg[1]; |
struct kevent chg[1]; |
| struct timespec timeout = { 0, 0 }; |
struct timespec timeout = { 0, 0 }; |
| |
|
| if (!t || !TASK_ROOT(t) || !ROOT_DATA(t->task_root) || !ROOT_DATLEN(t->task_root)) | if (!t || !TASK_ROOT(t)) |
| return (void*) -1; |
return (void*) -1; |
| else |
|
| io = ROOT_DATA(t->task_root); |
|
| |
|
| if (FD_ISSET(TASK_FD(t), &io->rfd)) |
|
| return NULL; |
|
| else |
|
| FD_SET(TASK_FD(t), &io->rfd); |
|
| |
|
| #ifdef __NetBSD__ |
#ifdef __NetBSD__ |
| EV_SET(&chg[0], TASK_FD(t), EVFILT_READ, EV_ADD, 0, 0, (intptr_t) TASK_FD(t)); |
EV_SET(&chg[0], TASK_FD(t), EVFILT_READ, EV_ADD, 0, 0, (intptr_t) TASK_FD(t)); |
| #else |
#else |
|
Line 194 sched_hook_read(void *task, void *arg __unused)
|
Line 170 sched_hook_read(void *task, void *arg __unused)
|
| |
|
| /* |
/* |
| * sched_hook_write() - Default WRITE hook |
* sched_hook_write() - Default WRITE hook |
| |
* |
| * @task = current task |
* @task = current task |
| * @arg = unused |
* @arg = unused |
| * return: <0 errors and 0 ok |
* return: <0 errors and 0 ok |
|
Line 201 sched_hook_read(void *task, void *arg __unused)
|
Line 178 sched_hook_read(void *task, void *arg __unused)
|
| void * |
void * |
| sched_hook_write(void *task, void *arg __unused) |
sched_hook_write(void *task, void *arg __unused) |
| { |
{ |
| struct sched_IO *io; |
|
| sched_task_t *t = task; |
sched_task_t *t = task; |
| struct kevent chg[1]; |
struct kevent chg[1]; |
| struct timespec timeout = { 0, 0 }; |
struct timespec timeout = { 0, 0 }; |
| |
|
| if (!t || !TASK_ROOT(t) || !ROOT_DATA(t->task_root) || !ROOT_DATLEN(t->task_root)) | if (!t || !TASK_ROOT(t)) |
| return (void*) -1; |
return (void*) -1; |
| else |
|
| io = ROOT_DATA(t->task_root); |
|
| |
|
| if (FD_ISSET(TASK_FD(t), &io->wfd)) |
|
| return NULL; |
|
| else |
|
| FD_SET(TASK_FD(t), &io->wfd); |
|
| |
|
| #ifdef __NetBSD__ |
#ifdef __NetBSD__ |
| EV_SET(&chg[0], TASK_FD(t), EVFILT_WRITE, EV_ADD, 0, 0, (intptr_t) TASK_FD(t)); |
EV_SET(&chg[0], TASK_FD(t), EVFILT_WRITE, EV_ADD, 0, 0, (intptr_t) TASK_FD(t)); |
| #else |
#else |
|
Line 234 sched_hook_write(void *task, void *arg __unused)
|
Line 203 sched_hook_write(void *task, void *arg __unused)
|
| |
|
| /* |
/* |
| * sched_hook_fetch() - Default FETCH hook |
* sched_hook_fetch() - Default FETCH hook |
| |
* |
| * @root = root task |
* @root = root task |
| * @arg = unused |
* @arg = unused |
| * return: NULL error or !=NULL fetched task |
* return: NULL error or !=NULL fetched task |
|
Line 241 sched_hook_write(void *task, void *arg __unused)
|
Line 211 sched_hook_write(void *task, void *arg __unused)
|
| void * |
void * |
| sched_hook_fetch(void *root, void *arg __unused) |
sched_hook_fetch(void *root, void *arg __unused) |
| { |
{ |
| struct sched_IO *io; |
|
| sched_root_task_t *r = root; |
sched_root_task_t *r = root; |
| sched_task_t *task; | sched_task_t *task, *tmp; |
| struct timeval now, m, mtmp; | struct timespec now, m, mtmp; |
| struct timespec nw, *timeout; | struct timespec *timeout; |
| struct kevent evt[1], res[KQ_EVENTS]; |
struct kevent evt[1], res[KQ_EVENTS]; |
| register int i; |
register int i; |
| int en; |
int en; |
| |
|
| if (!r || !ROOT_DATA(r) || !ROOT_DATLEN(r)) | if (!r) |
| return NULL; |
return NULL; |
| |
|
| /* get new task by queue priority */ |
/* get new task by queue priority */ |
| retry: |
|
| while ((task = TAILQ_FIRST(&r->root_event))) { |
while ((task = TAILQ_FIRST(&r->root_event))) { |
| #ifdef HAVE_LIBPTHREAD |
#ifdef HAVE_LIBPTHREAD |
| pthread_mutex_lock(&r->root_mtx[taskEVENT]); |
pthread_mutex_lock(&r->root_mtx[taskEVENT]); |
|
Line 293 retry:
|
Line 261 retry:
|
| } |
} |
| |
|
| #ifdef TIMER_WITHOUT_SORT |
#ifdef TIMER_WITHOUT_SORT |
| clock_gettime(CLOCK_MONOTONIC, &nw); | clock_gettime(CLOCK_MONOTONIC, &now); |
| now.tv_sec = nw.tv_sec; | |
| now.tv_usec = nw.tv_nsec / 1000; | |
| |
|
| timerclear(&r->root_wait); | sched_timespecclear(&r->root_wait); |
| TAILQ_FOREACH(task, &r->root_timer, task_node) { |
TAILQ_FOREACH(task, &r->root_timer, task_node) { |
| if (!timerisset(&r->root_wait)) | if (!sched_timespecisset(&r->root_wait)) |
| r->root_wait = TASK_TV(task); | r->root_wait = TASK_TS(task); |
| else if (timercmp(&TASK_TV(task), &r->root_wait, -) < 0) | else if (sched_timespeccmp(&TASK_TS(task), &r->root_wait, -) < 0) |
| r->root_wait = TASK_TV(task); | r->root_wait = TASK_TS(task); |
| } |
} |
| |
|
| if (TAILQ_FIRST(&r->root_timer)) { |
if (TAILQ_FIRST(&r->root_timer)) { |
| m = r->root_wait; |
m = r->root_wait; |
| timersub(&m, &now, &mtmp); | sched_timespecsub(&m, &now, &mtmp); |
| r->root_wait = mtmp; |
r->root_wait = mtmp; |
| } else { |
} else { |
| /* set wait INFTIM */ |
/* set wait INFTIM */ |
| r->root_wait.tv_sec = r->root_wait.tv_usec = -1; | sched_timespecinf(&r->root_wait); |
| } |
} |
| #else |
#else |
| if (!TAILQ_FIRST(&r->root_eventlo) && (task = TAILQ_FIRST(&r->root_timer))) { |
if (!TAILQ_FIRST(&r->root_eventlo) && (task = TAILQ_FIRST(&r->root_timer))) { |
| clock_gettime(CLOCK_MONOTONIC, &nw); | clock_gettime(CLOCK_MONOTONIC, &now); |
| now.tv_sec = nw.tv_sec; | |
| now.tv_usec = nw.tv_nsec / 1000; | |
| |
|
| m = TASK_TV(task); | m = TASK_TS(task); |
| timersub(&m, &now, &mtmp); | sched_timespecsub(&m, &now, &mtmp); |
| r->root_wait = mtmp; |
r->root_wait = mtmp; |
| } else { |
} else { |
| /* set wait INFTIM */ |
/* set wait INFTIM */ |
| r->root_wait.tv_sec = r->root_wait.tv_usec = -1; | sched_timespecinf(&r->root_wait); |
| } |
} |
| #endif |
#endif |
| /* if present member of eventLo, set NOWAIT */ |
/* if present member of eventLo, set NOWAIT */ |
| if (TAILQ_FIRST(&r->root_eventlo)) |
if (TAILQ_FIRST(&r->root_eventlo)) |
| timerclear(&r->root_wait); | sched_timespecclear(&r->root_wait); |
| |
|
| if (r->root_wait.tv_sec != -1 && r->root_wait.tv_usec != -1) { | if (r->root_wait.tv_sec != -1 && r->root_wait.tv_nsec != -1) |
| nw.tv_sec = r->root_wait.tv_sec; | timeout = &r->root_wait; |
| nw.tv_nsec = r->root_wait.tv_usec * 1000; | else if (sched_timespecisinf(&r->root_poll)) |
| timeout = &nw; | |
| } else /* wait INFTIM */ | |
| timeout = NULL; |
timeout = NULL; |
| |
else |
| |
timeout = &r->root_poll; |
| if ((en = kevent(r->root_kq, NULL, 0, res, KQ_EVENTS, timeout)) == -1) { |
if ((en = kevent(r->root_kq, NULL, 0, res, KQ_EVENTS, timeout)) == -1) { |
| if (r->root_hooks.hook_exec.exception) { |
if (r->root_hooks.hook_exec.exception) { |
| if (r->root_hooks.hook_exec.exception(r, NULL)) |
if (r->root_hooks.hook_exec.exception(r, NULL)) |
| return NULL; |
return NULL; |
| } else | } else if (errno != EINTR) |
| LOGERR; |
LOGERR; |
| #ifdef NDEBUG |
|
| /* kevent no exit by error, if non-debug version */ |
|
| goto retry; |
|
| #else |
|
| /* diagnostic exit from scheduler if kevent error occur */ |
|
| return NULL; |
return NULL; |
| #endif |
|
| } |
} |
| |
|
| nw.tv_sec = nw.tv_nsec = 0; | now.tv_sec = now.tv_nsec = 0; |
| /* Go and catch the cat into pipes ... */ |
/* Go and catch the cat into pipes ... */ |
| for (i = 0; i < en; i++) { |
for (i = 0; i < en; i++) { |
| memcpy(evt, &res[i], sizeof evt); |
memcpy(evt, &res[i], sizeof evt); |
|
Line 360 retry:
|
Line 318 retry:
|
| /* Put read/write task to ready queue */ |
/* Put read/write task to ready queue */ |
| switch (res[i].filter) { |
switch (res[i].filter) { |
| case EVFILT_READ: |
case EVFILT_READ: |
| TAILQ_FOREACH(task, &r->root_read, task_node) { | TAILQ_FOREACH_SAFE(task, &r->root_read, task_node, tmp) { |
| if (TASK_FD(task) != ((intptr_t) res[i].udata)) |
if (TASK_FD(task) != ((intptr_t) res[i].udata)) |
| continue; |
continue; |
| /* remove read handle */ |
/* remove read handle */ |
| io = ROOT_DATA(task->task_root); |
|
| FD_CLR(TASK_FD(task), &io->rfd); |
|
| |
|
| #ifdef HAVE_LIBPTHREAD |
#ifdef HAVE_LIBPTHREAD |
| pthread_mutex_lock(&r->root_mtx[taskREAD]); |
pthread_mutex_lock(&r->root_mtx[taskREAD]); |
| #endif |
#endif |
|
Line 408 retry:
|
Line 363 retry:
|
| } |
} |
| break; |
break; |
| case EVFILT_WRITE: |
case EVFILT_WRITE: |
| TAILQ_FOREACH(task, &r->root_write, task_node) { | TAILQ_FOREACH_SAFE(task, &r->root_write, task_node, tmp) { |
| if (TASK_FD(task) != ((intptr_t) res[i].udata)) |
if (TASK_FD(task) != ((intptr_t) res[i].udata)) |
| continue; |
continue; |
| /* remove write handle */ |
/* remove write handle */ |
| io = ROOT_DATA(task->task_root); |
|
| FD_CLR(TASK_FD(task), &io->wfd); |
|
| |
|
| #ifdef HAVE_LIBPTHREAD |
#ifdef HAVE_LIBPTHREAD |
| pthread_mutex_lock(&r->root_mtx[taskWRITE]); |
pthread_mutex_lock(&r->root_mtx[taskWRITE]); |
| #endif |
#endif |
|
Line 456 retry:
|
Line 408 retry:
|
| } |
} |
| break; |
break; |
| } |
} |
| if (kevent(r->root_kq, evt, 1, NULL, 0, &nw) == -1) { | if (kevent(r->root_kq, evt, 1, NULL, 0, &now) == -1) { |
| if (r->root_hooks.hook_exec.exception) { |
if (r->root_hooks.hook_exec.exception) { |
| if (r->root_hooks.hook_exec.exception(r, NULL)) |
if (r->root_hooks.hook_exec.exception(r, NULL)) |
| return NULL; |
return NULL; |
|
Line 466 retry:
|
Line 418 retry:
|
| } |
} |
| |
|
| /* timer update & put in ready queue */ |
/* timer update & put in ready queue */ |
| clock_gettime(CLOCK_MONOTONIC, &nw); | clock_gettime(CLOCK_MONOTONIC, &now); |
| now.tv_sec = nw.tv_sec; | |
| now.tv_usec = nw.tv_nsec / 1000; | |
| |
|
| TAILQ_FOREACH(task, &r->root_timer, task_node) | TAILQ_FOREACH_SAFE(task, &r->root_timer, task_node, tmp) |
| if (timercmp(&now, &TASK_TV(task), -) >= 0) { | if (sched_timespeccmp(&now, &TASK_TS(task), -) >= 0) { |
| #ifdef HAVE_LIBPTHREAD |
#ifdef HAVE_LIBPTHREAD |
| pthread_mutex_lock(&r->root_mtx[taskTIMER]); |
pthread_mutex_lock(&r->root_mtx[taskTIMER]); |
| #endif |
#endif |
|
Line 516 retry:
|
Line 466 retry:
|
| r->root_eventlo_miss = 0; |
r->root_eventlo_miss = 0; |
| |
|
| /* OK, lets get ready task !!! */ |
/* OK, lets get ready task !!! */ |
| if (!(task = TAILQ_FIRST(&r->root_ready))) | task = TAILQ_FIRST(&r->root_ready); |
| goto retry; | if (!(task)) |
| | return NULL; |
| | |
| #ifdef HAVE_LIBPTHREAD |
#ifdef HAVE_LIBPTHREAD |
| pthread_mutex_lock(&r->root_mtx[taskREADY]); |
pthread_mutex_lock(&r->root_mtx[taskREADY]); |
| #endif |
#endif |
|
Line 538 retry:
|
Line 490 retry:
|
| |
|
| /* |
/* |
| * sched_hook_exception() - Default EXCEPTION hook |
* sched_hook_exception() - Default EXCEPTION hook |
| |
* |
| * @root = root task |
* @root = root task |
| * @arg = custom handling: if arg == EV_EOF or other value; default: arg == NULL log errno |
* @arg = custom handling: if arg == EV_EOF or other value; default: arg == NULL log errno |
| * return: <0 errors and 0 ok |
* return: <0 errors and 0 ok |
|
Line 547 sched_hook_exception(void *root, void *arg)
|
Line 500 sched_hook_exception(void *root, void *arg)
|
| { |
{ |
| sched_root_task_t *r = root; |
sched_root_task_t *r = root; |
| |
|
| if (!r || !ROOT_DATA(r) || !ROOT_DATLEN(r)) | if (!r) |
| return NULL; |
return NULL; |
| |
|
| /* custom exception handling ... */ |
/* custom exception handling ... */ |
|
Line 564 sched_hook_exception(void *root, void *arg)
|
Line 517 sched_hook_exception(void *root, void *arg)
|
| /* default case! */ |
/* default case! */ |
| LOGERR; |
LOGERR; |
| return NULL; |
return NULL; |
| |
} |
| |
|
| |
/* |
| |
* sched_hook_condition() - Default CONDITION hook |
| |
* |
| |
* @root = root task |
| |
* @arg = killState from schedRun() |
| |
* return: NULL kill scheduler loop or !=NULL ok |
| |
*/ |
| |
void * |
| |
sched_hook_condition(void *root, void *arg) |
| |
{ |
| |
sched_root_task_t *r = root; |
| |
|
| |
if (!r) |
| |
return NULL; |
| |
|
| |
return (void*) (r->root_cond - *(intptr_t*) arg); |
| } |
} |