|
version 1.2.2.4, 2011/10/04 23:03:32
|
version 1.33, 2017/09/07 14:03:47
|
|
Line 12 terms:
|
Line 12 terms:
|
| All of the documentation and software included in the ELWIX and AITNET |
All of the documentation and software included in the ELWIX and AITNET |
| Releases is copyrighted by ELWIX - Sofia/Bulgaria <info@elwix.org> |
Releases is copyrighted by ELWIX - Sofia/Bulgaria <info@elwix.org> |
| |
|
| Copyright 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011 | Copyright 2004 - 2017 |
| by Michael Pounov <misho@elwix.org>. All rights reserved. |
by Michael Pounov <misho@elwix.org>. All rights reserved. |
| |
|
| Redistribution and use in source and binary forms, with or without |
Redistribution and use in source and binary forms, with or without |
|
Line 47 SUCH DAMAGE.
|
Line 47 SUCH DAMAGE.
|
| #include "hooks.h" |
#include "hooks.h" |
| |
|
| |
|
| |
static inline void |
| |
transit_task2ready(sched_task_t * __restrict t, sched_queue_t * __restrict q) |
| |
{ |
| |
remove_task_from(t, q); |
| |
|
| |
t->task_type = taskREADY; |
| |
insert_task_to(t, &(TASK_ROOT(t))->root_ready); |
| |
} |
| |
|
| |
#ifdef HAVE_LIBPTHREAD |
| |
static void * |
| |
_sched_threadWrapper(sched_task_t *t) |
| |
{ |
| |
void *ret = NULL; |
| |
sched_root_task_t *r; |
| |
|
| |
if (!t || !TASK_ROOT(t)) |
| |
pthread_exit(ret); |
| |
else |
| |
r = (sched_root_task_t*) TASK_ROOT(t); |
| |
|
| |
pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, NULL); |
| |
/* |
| |
pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, NULL); |
| |
*/ |
| |
|
| |
/* notify parent, thread is ready for execution */ |
| |
pthread_testcancel(); |
| |
|
| |
ret = schedCall(t); |
| |
r->root_ret = ret; |
| |
|
| |
if (TASK_VAL(t)) { |
| |
transit_task2unuse(t, &r->root_thread); |
| |
TASK_VAL(t) = 0; |
| |
} |
| |
|
| |
pthread_exit(ret); |
| |
} |
| |
#endif |
| |
|
| |
#if defined(HAVE_LIBRT) && defined(HAVE_TIMER_CREATE) && \ |
| |
defined(HAVE_TIMER_SETTIME) && defined(HAVE_TIMER_DELETE) |
| |
#if SUP_ENABLE == KQ_SUPPORT |
| |
static void * |
| |
_sched_rtcWrapper(sched_task_t *t) |
| |
{ |
| |
sched_task_t *task; |
| |
void *ret; |
| |
|
| |
if (!t || !TASK_ROOT(t) || !TASK_DATA(t)) |
| |
return NULL; |
| |
else { |
| |
task = (sched_task_t*) TASK_DATA(t); |
| |
timer_delete((timer_t) TASK_DATLEN(t)); |
| |
} |
| |
|
| |
ret = schedCall(task); |
| |
|
| |
transit_task2unuse(task, &(TASK_ROOT(task))->root_rtc); |
| |
return ret; |
| |
} |
| |
#else |
| |
static void |
| |
_sched_rtcSigWrapper(int sig, siginfo_t *si, void *uc) |
| |
{ |
| |
sched_task_t *task; |
| |
|
| |
if (si && si->si_value.sival_ptr) { |
| |
task = (sched_task_t*) si->si_value.sival_ptr; |
| |
timer_delete((timer_t) TASK_FLAG(task)); |
| |
|
| |
TASK_RET(task) = (intptr_t) schedCall(task); |
| |
|
| |
transit_task2unuse(task, &(TASK_ROOT(task))->root_rtc); |
| |
} |
| |
} |
| |
#endif |
| |
#endif |
| |
|
| /* |
/* |
| * sched_hook_init() - Default INIT hook |
* sched_hook_init() - Default INIT hook |
| |
* |
| * @root = root task |
* @root = root task |
| * @data = optional data if !=NULL | * @arg = unused |
| * return: <0 errors and 0 ok |
* return: <0 errors and 0 ok |
| */ |
*/ |
| void * |
void * |
| sched_hook_init(void *root, void *data) | sched_hook_init(void *root, void *arg __unused) |
| { |
{ |
| sched_root_task_t *r = root; |
sched_root_task_t *r = root; |
| |
|
| if (!r || r->root_data.iov_base || r->root_data.iov_len) | if (!r) |
| return (void*) -1; |
return (void*) -1; |
| |
|
| r->root_data.iov_base = malloc(sizeof(struct sched_IO)); | #if SUP_ENABLE == KQ_SUPPORT |
| if (!r->root_data.iov_base) { | r->root_kq = kqueue(); |
| | if (r->root_kq == -1) { |
| LOGERR; |
LOGERR; |
| return (void*) -1; |
return (void*) -1; |
| } else { |
|
| r->root_data.iov_len = sizeof(struct sched_IO); |
|
| memset(r->root_data.iov_base, 0, r->root_data.iov_len); |
|
| } |
} |
| #elif SUP_ENABLE == EP_SUPPORT |
| r->root_kq = kqueue(); | r->root_kq = epoll_create(KQ_EVENTS); |
| if (r->root_kq == -1) { |
if (r->root_kq == -1) { |
| LOGERR; |
LOGERR; |
| return (void*) -1; |
return (void*) -1; |
| } |
} |
| |
#else |
| |
r->root_kq ^= r->root_kq; |
| |
FD_ZERO(&r->root_fds[0]); |
| |
FD_ZERO(&r->root_fds[1]); |
| |
#endif |
| |
|
| return NULL; |
return NULL; |
| } |
} |
| |
|
| /* |
/* |
| * sched_hook_fini() - Default FINI hook |
* sched_hook_fini() - Default FINI hook |
| |
* |
| * @root = root task |
* @root = root task |
| * @arg = unused |
* @arg = unused |
| * return: <0 errors and 0 ok |
* return: <0 errors and 0 ok |
|
Line 93 sched_hook_fini(void *root, void *arg __unused)
|
Line 178 sched_hook_fini(void *root, void *arg __unused)
|
| if (!r) |
if (!r) |
| return (void*) -1; |
return (void*) -1; |
| |
|
| |
#if SUP_ENABLE == KQ_SUPPORT || SUP_ENABLE == EP_SUPPORT |
| if (r->root_kq > 2) { |
if (r->root_kq > 2) { |
| close(r->root_kq); |
close(r->root_kq); |
| r->root_kq = 0; |
r->root_kq = 0; |
| } |
} |
| |
#else |
| |
FD_ZERO(&r->root_fds[1]); |
| |
FD_ZERO(&r->root_fds[0]); |
| |
r->root_kq ^= r->root_kq; |
| |
#endif |
| |
|
| if (r->root_data.iov_base && r->root_data.iov_len) { |
|
| free(r->root_data.iov_base); |
|
| r->root_data.iov_base = NULL; |
|
| r->root_data.iov_len = 0; |
|
| } |
|
| |
|
| return NULL; |
return NULL; |
| } |
} |
| |
|
| /* |
/* |
| * sched_hook_cancel() - Default CANCEL hook |
* sched_hook_cancel() - Default CANCEL hook |
| |
* |
| * @task = current task |
* @task = current task |
| * @arg = unused |
* @arg = unused |
| * return: <0 errors and 0 ok |
* return: <0 errors and 0 ok |
|
Line 116 sched_hook_fini(void *root, void *arg __unused)
|
Line 202 sched_hook_fini(void *root, void *arg __unused)
|
| void * |
void * |
| sched_hook_cancel(void *task, void *arg __unused) |
sched_hook_cancel(void *task, void *arg __unused) |
| { |
{ |
| struct sched_IO *io; | sched_task_t *t = task, *tmp, *tt; |
| sched_task_t *t = task; | sched_root_task_t *r = NULL; |
| | int flg; |
| | #if SUP_ENABLE == KQ_SUPPORT |
| struct kevent chg[1]; |
struct kevent chg[1]; |
| struct timespec timeout = { 0, 0 }; |
struct timespec timeout = { 0, 0 }; |
| |
#elif SUP_ENABLE == EP_SUPPORT |
| |
struct epoll_event ee = { .events = 0, .data.fd = 0 }; |
| |
#else |
| |
register int i; |
| |
#endif |
| |
#ifdef AIO_SUPPORT |
| |
struct aiocb *acb; |
| |
#ifdef EVFILT_LIO |
| |
register int i = 0; |
| |
struct aiocb **acbs; |
| |
#endif /* EVFILT_LIO */ |
| |
#endif /* AIO_SUPPORT */ |
| |
|
| if (!t || !t->task_root || !ROOT_DATA(t->task_root) || !ROOT_DATLEN(t->task_root)) | if (!t || !TASK_ROOT(t)) |
| return (void*) -1; |
return (void*) -1; |
| else |
else |
| io = ROOT_DATA(t->task_root); | r = TASK_ROOT(t); |
| |
|
| switch (t->task_type) { | switch (TASK_TYPE(t)) { |
| case taskREAD: |
case taskREAD: |
| |
/* check for multi subscribers */ |
| |
flg = 0; |
| |
TAILQ_FOREACH_SAFE(tt, &r->root_read, task_node, tmp) |
| |
if (TASK_FD(tt) != TASK_FD(t)) |
| |
continue; |
| |
else |
| |
flg++; |
| |
#if SUP_ENABLE == KQ_SUPPORT |
| #ifdef __NetBSD__ |
#ifdef __NetBSD__ |
| EV_SET(&chg[0], TASK_FD(t), EVFILT_READ, EV_DELETE, 0, 0, (intptr_t) TASK_FD(t)); | EV_SET(&chg[0], TASK_FD(t), EVFILT_READ, flg < 2 ? EV_DELETE : 0, |
| | 0, 0, (intptr_t) TASK_FD(t)); |
| #else |
#else |
| EV_SET(&chg[0], TASK_FD(t), EVFILT_READ, EV_DELETE, 0, 0, (void*) TASK_FD(t)); | EV_SET(&chg[0], TASK_FD(t), EVFILT_READ, flg < 2 ? EV_DELETE : 0, |
| | 0, 0, (void*) TASK_FD(t)); |
| #endif |
#endif |
| kevent(t->task_root->root_kq, chg, 1, NULL, 0, &timeout); | #elif SUP_ENABLE == EP_SUPPORT |
| FD_CLR(TASK_FD(t), &io->rfd); | ee.data.fd = TASK_FD(t); |
| | ee.events ^= ee.events; |
| | if (FD_ISSET(TASK_FD(t), &r->root_fds[1])) |
| | ee.events = EPOLLOUT; |
| | |
| | if (flg < 2) |
| | FD_CLR(TASK_FD(t), &r->root_fds[0]); |
| | else |
| | ee.events |= EPOLLIN | EPOLLPRI; |
| | #else |
| | if (flg < 2) { |
| | FD_CLR(TASK_FD(t), &r->root_fds[0]); |
| | |
| | /* optimize select */ |
| | for (i = r->root_kq - 1; i > 2; i--) |
| | if (FD_ISSET(i, &r->root_fds[0]) || FD_ISSET(i, &r->root_fds[1])) |
| | break; |
| | if (i > 2) |
| | r->root_kq = i + 1; |
| | } |
| | #endif |
| break; |
break; |
| case taskWRITE: |
case taskWRITE: |
| |
/* check for multi subscribers */ |
| |
flg = 0; |
| |
TAILQ_FOREACH_SAFE(tt, &r->root_write, task_node, tmp) |
| |
if (TASK_FD(tt) != TASK_FD(t)) |
| |
continue; |
| |
else |
| |
flg++; |
| |
#if SUP_ENABLE == KQ_SUPPORT |
| #ifdef __NetBSD__ |
#ifdef __NetBSD__ |
| EV_SET(&chg[0], TASK_FD(t), EVFILT_WRITE, EV_DELETE, 0, 0, (intptr_t) TASK_FD(t)); | EV_SET(&chg[0], TASK_FD(t), EVFILT_WRITE, flg < 2 ? EV_DELETE : 0, |
| | 0, 0, (intptr_t) TASK_FD(t)); |
| #else |
#else |
| EV_SET(&chg[0], TASK_FD(t), EVFILT_WRITE, EV_DELETE, 0, 0, (void*) TASK_FD(t)); | EV_SET(&chg[0], TASK_FD(t), EVFILT_WRITE, flg < 2 ? EV_DELETE : 0, |
| | 0, 0, (void*) TASK_FD(t)); |
| #endif |
#endif |
| kevent(t->task_root->root_kq, chg, 1, NULL, 0, &timeout); | #elif SUP_ENABLE == EP_SUPPORT |
| FD_CLR(TASK_FD(t), &io->wfd); | ee.data.fd = TASK_FD(t); |
| | ee.events ^= ee.events; |
| | if (FD_ISSET(TASK_FD(t), &r->root_fds[0])) |
| | ee.events = EPOLLIN | EPOLLPRI; |
| | |
| | if (flg < 2) |
| | FD_CLR(TASK_FD(t), &r->root_fds[1]); |
| | else |
| | ee.events |= EPOLLOUT; |
| | #else |
| | if (flg < 2) { |
| | FD_CLR(TASK_FD(t), &r->root_fds[1]); |
| | |
| | /* optimize select */ |
| | for (i = r->root_kq - 1; i > 2; i--) |
| | if (FD_ISSET(i, &r->root_fds[0]) || FD_ISSET(i, &r->root_fds[1])) |
| | break; |
| | if (i > 2) |
| | r->root_kq = i + 1; |
| | } |
| | #endif |
| break; |
break; |
| default: | case taskALARM: |
| | #if SUP_ENABLE == KQ_SUPPORT |
| | /* check for multi subscribers */ |
| | flg = 0; |
| | TAILQ_FOREACH_SAFE(tt, &r->root_alarm, task_node, tmp) |
| | if (TASK_DATA(tt) != TASK_DATA(t)) |
| | continue; |
| | else |
| | flg++; |
| | #ifdef __NetBSD__ |
| | EV_SET(&chg[0], (uintptr_t) TASK_DATA(t), EVFILT_TIMER, flg < 2 ? EV_DELETE : 0, |
| | 0, 0, (intptr_t) TASK_DATA(t)); |
| | #else |
| | EV_SET(&chg[0], (uintptr_t) TASK_DATA(t), EVFILT_TIMER, flg < 2 ? EV_DELETE : 0, |
| | 0, 0, (void*) TASK_DATA(t)); |
| | #endif |
| | #endif |
| break; |
break; |
| |
case taskNODE: |
| |
#if SUP_ENABLE == KQ_SUPPORT |
| |
/* check for multi subscribers */ |
| |
flg = 0; |
| |
TAILQ_FOREACH_SAFE(tt, &r->root_node, task_node, tmp) |
| |
if (TASK_FD(tt) != TASK_FD(t)) |
| |
continue; |
| |
else |
| |
flg++; |
| |
#ifdef __NetBSD__ |
| |
EV_SET(&chg[0], TASK_FD(t), EVFILT_VNODE, flg < 2 ? EV_DELETE : 0, |
| |
0, 0, (intptr_t) TASK_FD(t)); |
| |
#else |
| |
EV_SET(&chg[0], TASK_FD(t), EVFILT_VNODE, flg < 2 ? EV_DELETE : 0, |
| |
0, 0, (void*) TASK_FD(t)); |
| |
#endif |
| |
#endif |
| |
break; |
| |
case taskPROC: |
| |
#if SUP_ENABLE == KQ_SUPPORT |
| |
/* check for multi subscribers */ |
| |
flg = 0; |
| |
TAILQ_FOREACH_SAFE(tt, &r->root_proc, task_node, tmp) |
| |
if (TASK_VAL(tt) != TASK_VAL(t)) |
| |
continue; |
| |
else |
| |
flg++; |
| |
#ifdef __NetBSD__ |
| |
EV_SET(&chg[0], TASK_VAL(t), EVFILT_PROC, flg < 2 ? EV_DELETE : 0, |
| |
0, 0, (intptr_t) TASK_VAL(t)); |
| |
#else |
| |
EV_SET(&chg[0], TASK_VAL(t), EVFILT_PROC, flg < 2 ? EV_DELETE : 0, |
| |
0, 0, (void*) TASK_VAL(t)); |
| |
#endif |
| |
#endif |
| |
break; |
| |
case taskSIGNAL: |
| |
#if SUP_ENABLE == KQ_SUPPORT |
| |
/* check for multi subscribers */ |
| |
flg = 0; |
| |
TAILQ_FOREACH_SAFE(tt, &r->root_signal, task_node, tmp) |
| |
if (TASK_VAL(tt) != TASK_VAL(t)) |
| |
continue; |
| |
else |
| |
flg++; |
| |
#ifdef __NetBSD__ |
| |
EV_SET(&chg[0], TASK_VAL(t), EVFILT_SIGNAL, flg < 2 ? EV_DELETE : 0, |
| |
0, 0, (intptr_t) TASK_VAL(t)); |
| |
#else |
| |
EV_SET(&chg[0], TASK_VAL(t), EVFILT_SIGNAL, flg < 2 ? EV_DELETE : 0, |
| |
0, 0, (void*) TASK_VAL(t)); |
| |
#endif |
| |
/* restore signal */ |
| |
if (flg < 2) |
| |
signal(TASK_VAL(t), SIG_DFL); |
| |
#endif |
| |
break; |
| |
#ifdef AIO_SUPPORT |
| |
case taskAIO: |
| |
#if SUP_ENABLE == KQ_SUPPORT |
| |
/* check for multi subscribers */ |
| |
flg = 0; |
| |
TAILQ_FOREACH_SAFE(tt, &r->root_aio, task_node, tmp) |
| |
if (TASK_VAL(tt) != TASK_VAL(t)) |
| |
continue; |
| |
else |
| |
flg++; |
| |
#ifdef __NetBSD__ |
| |
EV_SET(&chg[0], TASK_VAL(t), EVFILT_AIO, flg < 2 ? EV_DELETE : 0, |
| |
0, 0, (intptr_t) TASK_VAL(t)); |
| |
#else |
| |
EV_SET(&chg[0], TASK_VAL(t), EVFILT_AIO, flg < 2 ? EV_DELETE : 0, |
| |
0, 0, (void*) TASK_VAL(t)); |
| |
#endif |
| |
acb = (struct aiocb*) TASK_VAL(t); |
| |
if (acb) { |
| |
if (aio_cancel(acb->aio_fildes, acb) == AIO_CANCELED) |
| |
aio_return(acb); |
| |
free(acb); |
| |
TASK_VAL(t) = 0; |
| |
} |
| |
#endif |
| |
break; |
| |
#ifdef EVFILT_LIO |
| |
case taskLIO: |
| |
#if SUP_ENABLE == KQ_SUPPORT |
| |
/* check for multi subscribers */ |
| |
flg = 0; |
| |
TAILQ_FOREACH_SAFE(tt, &r->root_lio, task_node, tmp) |
| |
if (TASK_VAL(tt) != TASK_VAL(t)) |
| |
continue; |
| |
else |
| |
flg++; |
| |
#ifdef __NetBSD__ |
| |
EV_SET(&chg[0], TASK_VAL(t), EVFILT_LIO, flg < 2 ? EV_DELETE : 0, |
| |
0, 0, (intptr_t) TASK_VAL(t)); |
| |
#else |
| |
EV_SET(&chg[0], TASK_VAL(t), EVFILT_LIO, flg < 2 ? EV_DELETE : 0, |
| |
0, 0, (void*) TASK_VAL(t)); |
| |
#endif |
| |
acbs = (struct aiocb**) TASK_VAL(t); |
| |
if (acbs) { |
| |
for (i = 0; i < TASK_DATLEN(t); i++) { |
| |
if (aio_cancel(acbs[i]->aio_fildes, acbs[i]) == AIO_CANCELED) |
| |
aio_return(acbs[i]); |
| |
free(acbs[i]); |
| |
} |
| |
free(acbs); |
| |
TASK_VAL(t) = 0; |
| |
} |
| |
#endif |
| |
break; |
| |
#endif /* EVFILT_LIO */ |
| |
#endif /* AIO_SUPPORT */ |
| |
#ifdef EVFILT_USER |
| |
case taskUSER: |
| |
#if SUP_ENABLE == KQ_SUPPORT |
| |
/* check for multi subscribers */ |
| |
flg = 0; |
| |
TAILQ_FOREACH_SAFE(tt, &r->root_user, task_node, tmp) |
| |
if (TASK_VAL(tt) != TASK_VAL(t)) |
| |
continue; |
| |
else |
| |
flg++; |
| |
#ifdef __NetBSD__ |
| |
EV_SET(&chg[0], TASK_VAL(t), EVFILT_USER, flg < 2 ? EV_DELETE : 0, |
| |
0, 0, (intptr_t) TASK_VAL(t)); |
| |
#else |
| |
EV_SET(&chg[0], TASK_VAL(t), EVFILT_USER, flg < 2 ? EV_DELETE : 0, |
| |
0, 0, (void*) TASK_VAL(t)); |
| |
#endif |
| |
#endif |
| |
break; |
| |
#endif /* EVFILT_USER */ |
| |
case taskTHREAD: |
| |
#ifdef HAVE_LIBPTHREAD |
| |
if (TASK_VAL(t)) { |
| |
pthread_cancel((pthread_t) TASK_VAL(t)); |
| |
pthread_join((pthread_t) TASK_VAL(t), NULL); |
| |
if (TASK_VAL(t)) { |
| |
transit_task2unuse(t, &(TASK_ROOT(t))->root_thread); |
| |
TASK_VAL(t) = 0; |
| |
} |
| |
} |
| |
#endif |
| |
return NULL; |
| |
#if defined(HAVE_LIBRT) && defined(HAVE_TIMER_CREATE) && \ |
| |
defined(HAVE_TIMER_SETTIME) && defined(HAVE_TIMER_DELETE) |
| |
case taskRTC: |
| |
timer_delete((timer_t) TASK_FLAG(t)); |
| |
#if SUP_ENABLE == KQ_SUPPORT |
| |
schedCancel((sched_task_t*) TASK_RET(t)); |
| |
#else |
| |
/* check for multi subscribers */ |
| |
flg = 0; |
| |
TAILQ_FOREACH_SAFE(tt, &r->root_rtc, task_node, tmp) |
| |
if (TASK_DATA(tt) != TASK_DATA(t)) |
| |
continue; |
| |
else |
| |
flg++; |
| |
|
| |
/* restore signal */ |
| |
if (flg < 2) |
| |
signal((intptr_t) TASK_DATA(t) + SIGRTMIN, SIG_DFL); |
| |
#endif |
| |
return NULL; |
| |
#endif /* HAVE_TIMER_CREATE */ |
| |
default: |
| |
return NULL; |
| } |
} |
| |
|
| |
#if SUP_ENABLE == KQ_SUPPORT |
| |
kevent(r->root_kq, chg, 1, NULL, 0, &timeout); |
| |
#elif SUP_ENABLE == EP_SUPPORT |
| |
epoll_ctl(r->root_kq, ee.events ? EPOLL_CTL_MOD : EPOLL_CTL_DEL, ee.data.fd, &ee); |
| |
#endif |
| return NULL; |
return NULL; |
| } |
} |
| |
|
| |
#ifdef HAVE_LIBPTHREAD |
| /* |
/* |
| |
* sched_hook_thread() - Default THREAD hook |
| |
* |
| |
* @task = current task |
| |
* @arg = pthread attributes |
| |
* return: <0 errors and 0 ok |
| |
*/ |
| |
void * |
| |
sched_hook_thread(void *task, void *arg) |
| |
{ |
| |
sched_task_t *t = task; |
| |
pthread_t tid; |
| |
sigset_t s, o; |
| |
|
| |
if (!t || !TASK_ROOT(t)) |
| |
return (void*) -1; |
| |
|
| |
sigfillset(&s); |
| |
pthread_sigmask(SIG_BLOCK, &s, &o); |
| |
errno = pthread_create(&tid, (pthread_attr_t*) arg, |
| |
(void *(*)(void*)) _sched_threadWrapper, t); |
| |
pthread_sigmask(SIG_SETMASK, &o, NULL); |
| |
|
| |
if (errno) { |
| |
LOGERR; |
| |
return (void*) -1; |
| |
} else |
| |
TASK_VAL(t) = (u_long) tid; |
| |
|
| |
if (!TASK_ISLOCKED(t)) |
| |
TASK_LOCK(t); |
| |
|
| |
return NULL; |
| |
} |
| |
#endif |
| |
|
| |
/* |
| * sched_hook_read() - Default READ hook |
* sched_hook_read() - Default READ hook |
| |
* |
| * @task = current task |
* @task = current task |
| * @arg = unused |
* @arg = unused |
| * return: <0 errors and 0 ok |
* return: <0 errors and 0 ok |
|
Line 161 sched_hook_cancel(void *task, void *arg __unused)
|
Line 549 sched_hook_cancel(void *task, void *arg __unused)
|
| void * |
void * |
| sched_hook_read(void *task, void *arg __unused) |
sched_hook_read(void *task, void *arg __unused) |
| { |
{ |
| struct sched_IO *io; |
|
| sched_task_t *t = task; |
sched_task_t *t = task; |
| |
sched_root_task_t *r = NULL; |
| |
#if SUP_ENABLE == KQ_SUPPORT |
| struct kevent chg[1]; |
struct kevent chg[1]; |
| struct timespec timeout = { 0, 0 }; |
struct timespec timeout = { 0, 0 }; |
| |
#elif SUP_ENABLE == EP_SUPPORT |
| |
struct epoll_event ee; |
| |
int flg = 0; |
| |
#endif |
| |
|
| if (!t || !t->task_root || !ROOT_DATA(t->task_root) || !ROOT_DATLEN(t->task_root)) | if (!t || !TASK_ROOT(t)) |
| return (void*) -1; |
return (void*) -1; |
| else |
else |
| io = ROOT_DATA(t->task_root); | r = TASK_ROOT(t); |
| |
|
| if (FD_ISSET(TASK_FD(t), &io->rfd)) | #if SUP_ENABLE == KQ_SUPPORT |
| return NULL; | |
| else | |
| FD_SET(TASK_FD(t), &io->rfd); | |
| |
| #ifdef __NetBSD__ |
#ifdef __NetBSD__ |
| EV_SET(&chg[0], TASK_FD(t), EVFILT_READ, EV_ADD, 0, 0, (intptr_t) TASK_FD(t)); | EV_SET(&chg[0], TASK_FD(t), EVFILT_READ, EV_ADD | EV_CLEAR, 0, 0, (intptr_t) TASK_FD(t)); |
| #else |
#else |
| EV_SET(&chg[0], TASK_FD(t), EVFILT_READ, EV_ADD, 0, 0, (void*) TASK_FD(t)); | EV_SET(&chg[0], TASK_FD(t), EVFILT_READ, EV_ADD | EV_CLEAR, 0, 0, (void*) TASK_FD(t)); |
| #endif |
#endif |
| if (kevent(t->task_root->root_kq, chg, 1, NULL, 0, &timeout) == -1) { | if (kevent(r->root_kq, chg, 1, NULL, 0, &timeout) == -1) { |
| if (t->task_root->root_hooks.hook_exec.exception) | if (r->root_hooks.hook_exec.exception) |
| t->task_root->root_hooks.hook_exec.exception(t->task_root, NULL); | r->root_hooks.hook_exec.exception(r, NULL); |
| else |
else |
| LOGERR; |
LOGERR; |
| return (void*) -1; |
return (void*) -1; |
| } |
} |
| |
#elif SUP_ENABLE == EP_SUPPORT |
| |
ee.data.fd = TASK_FD(t); |
| |
ee.events = EPOLLIN | EPOLLPRI; |
| |
if (FD_ISSET(TASK_FD(t), &r->root_fds[0])) |
| |
flg |= 1; |
| |
if (FD_ISSET(TASK_FD(t), &r->root_fds[1])) { |
| |
flg |= 2; |
| |
ee.events |= EPOLLOUT; |
| |
} |
| |
|
| |
if (epoll_ctl(r->root_kq, flg ? EPOLL_CTL_MOD : EPOLL_CTL_ADD, TASK_FD(t), &ee) == -1) { |
| |
if (r->root_hooks.hook_exec.exception) |
| |
r->root_hooks.hook_exec.exception(r, NULL); |
| |
else |
| |
LOGERR; |
| |
return (void*) -1; |
| |
} else |
| |
FD_SET(TASK_FD(t), &r->root_fds[0]); |
| |
#else |
| |
FD_SET(TASK_FD(t), &r->root_fds[0]); |
| |
if (TASK_FD(t) >= r->root_kq) |
| |
r->root_kq = TASK_FD(t) + 1; |
| |
#endif |
| |
|
| return NULL; |
return NULL; |
| } |
} |
| |
|
| /* |
/* |
| * sched_hook_write() - Default WRITE hook |
* sched_hook_write() - Default WRITE hook |
| |
* |
| * @task = current task |
* @task = current task |
| * @arg = unused |
* @arg = unused |
| * return: <0 errors and 0 ok |
* return: <0 errors and 0 ok |
|
Line 201 sched_hook_read(void *task, void *arg __unused)
|
Line 614 sched_hook_read(void *task, void *arg __unused)
|
| void * |
void * |
| sched_hook_write(void *task, void *arg __unused) |
sched_hook_write(void *task, void *arg __unused) |
| { |
{ |
| struct sched_IO *io; |
|
| sched_task_t *t = task; |
sched_task_t *t = task; |
| |
sched_root_task_t *r = NULL; |
| |
#if SUP_ENABLE == KQ_SUPPORT |
| struct kevent chg[1]; |
struct kevent chg[1]; |
| struct timespec timeout = { 0, 0 }; |
struct timespec timeout = { 0, 0 }; |
| |
#elif SUP_ENABLE == EP_SUPPORT |
| |
struct epoll_event ee; |
| |
int flg = 0; |
| |
#endif |
| |
|
| if (!t || !t->task_root || !ROOT_DATA(t->task_root) || !ROOT_DATLEN(t->task_root)) | if (!t || !TASK_ROOT(t)) |
| return (void*) -1; |
return (void*) -1; |
| else |
else |
| io = ROOT_DATA(t->task_root); | r = TASK_ROOT(t); |
| |
|
| if (FD_ISSET(TASK_FD(t), &io->wfd)) | #if SUP_ENABLE == KQ_SUPPORT |
| return NULL; | |
| else | |
| FD_SET(TASK_FD(t), &io->wfd); | |
| |
| #ifdef __NetBSD__ |
#ifdef __NetBSD__ |
| EV_SET(&chg[0], TASK_FD(t), EVFILT_WRITE, EV_ADD, 0, 0, (intptr_t) TASK_FD(t)); | EV_SET(&chg[0], TASK_FD(t), EVFILT_WRITE, EV_ADD | EV_CLEAR, 0, 0, (intptr_t) TASK_FD(t)); |
| #else |
#else |
| EV_SET(&chg[0], TASK_FD(t), EVFILT_WRITE, EV_ADD, 0, 0, (void*) TASK_FD(t)); | EV_SET(&chg[0], TASK_FD(t), EVFILT_WRITE, EV_ADD | EV_CLEAR, 0, 0, (void*) TASK_FD(t)); |
| #endif |
#endif |
| if (kevent(t->task_root->root_kq, chg, 1, NULL, 0, &timeout) == -1) { | if (kevent(r->root_kq, chg, 1, NULL, 0, &timeout) == -1) { |
| if (t->task_root->root_hooks.hook_exec.exception) | if (r->root_hooks.hook_exec.exception) |
| t->task_root->root_hooks.hook_exec.exception(t->task_root, NULL); | r->root_hooks.hook_exec.exception(r, NULL); |
| else |
else |
| LOGERR; |
LOGERR; |
| return (void*) -1; |
return (void*) -1; |
| } |
} |
| |
#elif SUP_ENABLE == EP_SUPPORT |
| |
ee.data.fd = TASK_FD(t); |
| |
ee.events = EPOLLOUT; |
| |
|
| |
if (FD_ISSET(TASK_FD(t), &r->root_fds[0])) { |
| |
flg |= 1; |
| |
ee.events |= EPOLLIN | EPOLLPRI; |
| |
} |
| |
if (FD_ISSET(TASK_FD(t), &r->root_fds[1])) |
| |
flg |= 2; |
| |
|
| |
if (epoll_ctl(r->root_kq, flg ? EPOLL_CTL_MOD : EPOLL_CTL_ADD, TASK_FD(t), &ee) == -1) { |
| |
if (r->root_hooks.hook_exec.exception) |
| |
r->root_hooks.hook_exec.exception(r, NULL); |
| |
else |
| |
LOGERR; |
| |
return (void*) -1; |
| |
} else |
| |
FD_SET(TASK_FD(t), &r->root_fds[1]); |
| |
#else |
| |
FD_SET(TASK_FD(t), &r->root_fds[1]); |
| |
if (TASK_FD(t) >= r->root_kq) |
| |
r->root_kq = TASK_FD(t) + 1; |
| |
#endif |
| |
|
| return NULL; |
return NULL; |
| } |
} |
| |
|
| /* |
/* |
| * sched_hook_fetch() - Default FETCH hook | * sched_hook_alarm() - Default ALARM hook |
| * @root = root task | * |
| | * @task = current task |
| * @arg = unused |
* @arg = unused |
| * return: NULL error or !=NULL fetched task | * return: <0 errors and 0 ok |
| */ |
*/ |
| void * |
void * |
| sched_hook_fetch(void *root, void *arg __unused) | sched_hook_alarm(void *task, void *arg __unused) |
| { |
{ |
| struct sched_IO *io; | #if SUP_ENABLE == KQ_SUPPORT |
| sched_root_task_t *r = root; | sched_task_t *t = task; |
| sched_task_t *task; | struct kevent chg[1]; |
| struct timeval now, m, mtmp; | struct timespec timeout = { 0, 0 }; |
| struct timespec nw, *timeout; | |
| struct kevent evt[1], res[KQ_EVENTS]; | |
| register int i; | |
| int en; | |
| |
|
| if (!r || !ROOT_DATA(r) || !ROOT_DATLEN(r)) | if (!t || !TASK_ROOT(t)) |
| return NULL; | return (void*) -1; |
| |
|
| /* get new task by queue priority */ | #ifdef __NetBSD__ |
| retry: | EV_SET(&chg[0], (uintptr_t) TASK_DATA(t), EVFILT_TIMER, EV_ADD | EV_CLEAR, 0, |
| while ((task = TAILQ_FIRST(&r->root_event))) { | t->task_val.ts.tv_sec * 1000 + t->task_val.ts.tv_nsec / 1000000, |
| TAILQ_REMOVE(&r->root_event, task, task_node); | (intptr_t) TASK_DATA(t)); |
| task->task_type = taskUNUSE; | #else |
| TAILQ_INSERT_TAIL(&r->root_unuse, task, task_node); | EV_SET(&chg[0], (uintptr_t) TASK_DATA(t), EVFILT_TIMER, EV_ADD | EV_CLEAR, 0, |
| return task; | t->task_val.ts.tv_sec * 1000 + t->task_val.ts.tv_nsec / 1000000, |
| | (void*) TASK_DATA(t)); |
| | #endif |
| | if (kevent(TASK_ROOT(t)->root_kq, chg, 1, NULL, 0, &timeout) == -1) { |
| | if (TASK_ROOT(t)->root_hooks.hook_exec.exception) |
| | TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL); |
| | else |
| | LOGERR; |
| | return (void*) -1; |
| } |
} |
| while ((task = TAILQ_FIRST(&r->root_ready))) { |
|
| TAILQ_REMOVE(&r->root_ready, task, task_node); |
|
| task->task_type = taskUNUSE; |
|
| TAILQ_INSERT_TAIL(&r->root_unuse, task, task_node); |
|
| return task; |
|
| } |
|
| |
|
| #ifdef TIMER_WITHOUT_SORT | #endif |
| clock_gettime(CLOCK_MONOTONIC, &nw); | return NULL; |
| now.tv_sec = nw.tv_sec; | } |
| now.tv_usec = nw.tv_nsec / 1000; | |
| |
|
| timerclear(&r->root_wait); | /* |
| TAILQ_FOREACH(task, &r->root_timer, task_node) { | * sched_hook_node() - Default NODE hook |
| if (!timerisset(&r->root_wait)) | * |
| r->root_wait = TASK_TV(task); | * @task = current task |
| else if (timercmp(&TASK_TV(task), &r->root_wait, -) < 0) | * @arg = unused |
| r->root_wait = TASK_TV(task); | * return: <0 errors and 0 ok |
| } | */ |
| | void * |
| | sched_hook_node(void *task, void *arg __unused) |
| | { |
| | #if SUP_ENABLE == KQ_SUPPORT |
| | sched_task_t *t = task; |
| | struct kevent chg[1]; |
| | struct timespec timeout = { 0, 0 }; |
| |
|
| if (TAILQ_FIRST(&r->root_timer)) { | if (!t || !TASK_ROOT(t)) |
| m = r->root_wait; | return (void*) -1; |
| timersub(&m, &now, &mtmp); | |
| r->root_wait = mtmp; | #ifdef __NetBSD__ |
| } else { | EV_SET(&chg[0], TASK_FD(t), EVFILT_VNODE, EV_ADD | EV_CLEAR, |
| /* set wait INFTIM */ | NOTE_DELETE | NOTE_WRITE | NOTE_EXTEND | NOTE_ATTRIB | |
| r->root_wait.tv_sec = r->root_wait.tv_usec = -1; | NOTE_LINK | NOTE_RENAME | NOTE_REVOKE, 0, (intptr_t) TASK_FD(t)); |
| } | |
| #else |
#else |
| if (!TAILQ_FIRST(&r->root_eventlo) && (task = TAILQ_FIRST(&r->root_timer))) { | EV_SET(&chg[0], TASK_FD(t), EVFILT_VNODE, EV_ADD | EV_CLEAR, |
| clock_gettime(CLOCK_MONOTONIC, &nw); | NOTE_DELETE | NOTE_WRITE | NOTE_EXTEND | NOTE_ATTRIB | |
| now.tv_sec = nw.tv_sec; | NOTE_LINK | NOTE_RENAME | NOTE_REVOKE, 0, (void*) TASK_FD(t)); |
| now.tv_usec = nw.tv_nsec / 1000; | #endif |
| | if (kevent(TASK_ROOT(t)->root_kq, chg, 1, NULL, 0, &timeout) == -1) { |
| | if (TASK_ROOT(t)->root_hooks.hook_exec.exception) |
| | TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL); |
| | else |
| | LOGERR; |
| | return (void*) -1; |
| | } |
| |
|
| m = TASK_TV(task); | #endif |
| timersub(&m, &now, &mtmp); | return NULL; |
| r->root_wait = mtmp; | } |
| } else { | |
| /* set wait INFTIM */ | /* |
| r->root_wait.tv_sec = r->root_wait.tv_usec = -1; | * sched_hook_proc() - Default PROC hook |
| | * |
| | * @task = current task |
| | * @arg = unused |
| | * return: <0 errors and 0 ok |
| | */ |
| | void * |
| | sched_hook_proc(void *task, void *arg __unused) |
| | { |
| | #if SUP_ENABLE == KQ_SUPPORT |
| | sched_task_t *t = task; |
| | struct kevent chg[1]; |
| | struct timespec timeout = { 0, 0 }; |
| | |
| | if (!t || !TASK_ROOT(t)) |
| | return (void*) -1; |
| | |
| | #ifdef __NetBSD__ |
| | EV_SET(&chg[0], TASK_VAL(t), EVFILT_PROC, EV_ADD | EV_CLEAR, |
| | NOTE_EXIT | NOTE_FORK | NOTE_EXEC | NOTE_TRACK, 0, (intptr_t) TASK_VAL(t)); |
| | #else |
| | EV_SET(&chg[0], TASK_VAL(t), EVFILT_PROC, EV_ADD | EV_CLEAR, |
| | NOTE_EXIT | NOTE_FORK | NOTE_EXEC | NOTE_TRACK, 0, (void*) TASK_VAL(t)); |
| | #endif |
| | if (kevent(TASK_ROOT(t)->root_kq, chg, 1, NULL, 0, &timeout) == -1) { |
| | if (TASK_ROOT(t)->root_hooks.hook_exec.exception) |
| | TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL); |
| | else |
| | LOGERR; |
| | return (void*) -1; |
| } |
} |
| |
|
| #endif |
#endif |
| /* if present member of eventLo, set NOWAIT */ | return NULL; |
| if (TAILQ_FIRST(&r->root_eventlo)) | } |
| timerclear(&r->root_wait); | |
| |
|
| if (r->root_wait.tv_sec != -1 && r->root_wait.tv_usec != -1) { | /* |
| nw.tv_sec = r->root_wait.tv_sec; | * sched_hook_signal() - Default SIGNAL hook |
| nw.tv_nsec = r->root_wait.tv_usec * 1000; | * |
| timeout = &nw; | * @task = current task |
| } else /* wait INFTIM */ | * @arg = unused |
| timeout = NULL; | * return: <0 errors and 0 ok |
| if ((en = kevent(r->root_kq, NULL, 0, res, KQ_EVENTS, timeout)) == -1) { | */ |
| if (r->root_hooks.hook_exec.exception) { | void * |
| if (r->root_hooks.hook_exec.exception(r, NULL)) | sched_hook_signal(void *task, void *arg __unused) |
| return NULL; | { |
| } else | #if SUP_ENABLE == KQ_SUPPORT |
| | sched_task_t *t = task; |
| | struct kevent chg[1]; |
| | struct timespec timeout = { 0, 0 }; |
| | |
| | if (!t || !TASK_ROOT(t)) |
| | return (void*) -1; |
| | |
| | /* ignore signal */ |
| | signal(TASK_VAL(t), SIG_IGN); |
| | |
| | #ifdef __NetBSD__ |
| | EV_SET(&chg[0], TASK_VAL(t), EVFILT_SIGNAL, EV_ADD | EV_CLEAR, 0, 0, (intptr_t) TASK_VAL(t)); |
| | #else |
| | EV_SET(&chg[0], TASK_VAL(t), EVFILT_SIGNAL, EV_ADD | EV_CLEAR, 0, 0, (void*) TASK_VAL(t)); |
| | #endif |
| | if (kevent(TASK_ROOT(t)->root_kq, chg, 1, NULL, 0, &timeout) == -1) { |
| | if (TASK_ROOT(t)->root_hooks.hook_exec.exception) |
| | TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL); |
| | else |
| LOGERR; |
LOGERR; |
| #ifdef NDEBUG | return (void*) -1; |
| /* kevent no exit by error, if non-debug version */ | } |
| goto retry; | #endif |
| | return NULL; |
| | } |
| | |
| | /* |
| | * sched_hook_user() - Default USER hook |
| | * |
| | * @task = current task |
| | * @arg = unused |
| | * return: <0 errors and 0 ok |
| | */ |
| | #ifdef EVFILT_USER |
| | void * |
| | sched_hook_user(void *task, void *arg __unused) |
| | { |
| | #if SUP_ENABLE == KQ_SUPPORT |
| | sched_task_t *t = task; |
| | struct kevent chg[1]; |
| | struct timespec timeout = { 0, 0 }; |
| | |
| | if (!t || !TASK_ROOT(t)) |
| | return (void*) -1; |
| | |
| | #ifdef __NetBSD__ |
| | EV_SET(&chg[0], TASK_VAL(t), EVFILT_USER, EV_ADD | EV_CLEAR, TASK_DATLEN(t), |
| | 0, (intptr_t) TASK_VAL(t)); |
| #else |
#else |
| /* diagnostic exit from scheduler if kevent error occur */ | EV_SET(&chg[0], TASK_VAL(t), EVFILT_USER, EV_ADD | EV_CLEAR, TASK_DATLEN(t), |
| return NULL; | 0, (void*) TASK_VAL(t)); |
| #endif |
#endif |
| |
if (kevent(TASK_ROOT(t)->root_kq, chg, 1, NULL, 0, &timeout) == -1) { |
| |
if (TASK_ROOT(t)->root_hooks.hook_exec.exception) |
| |
TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL); |
| |
else |
| |
LOGERR; |
| |
return (void*) -1; |
| } |
} |
| |
|
| nw.tv_sec = nw.tv_nsec = 0; | #endif |
| /* Go and catch the cat into pipes ... */ | return NULL; |
| | } |
| | #endif |
| | |
| | #if SUP_ENABLE == KQ_SUPPORT |
| | static inline void |
| | fetch_hook_kevent_proceed(int en, struct kevent *res, sched_root_task_t *r) |
| | { |
| | struct kevent evt[1]; |
| | register int i; |
| | sched_task_t *task, *tmp; |
| | struct timespec now = { 0, 0 }; |
| | #ifdef AIO_SUPPORT |
| | int len, fd; |
| | struct aiocb *acb; |
| | #ifdef EVFILT_LIO |
| | int l; |
| | off_t off; |
| | struct aiocb **acbs; |
| | struct iovec *iv; |
| | #endif /* EVFILT_LIO */ |
| | #endif /* AIO_SUPPORT */ |
| | |
| for (i = 0; i < en; i++) { |
for (i = 0; i < en; i++) { |
| memcpy(evt, &res[i], sizeof evt); |
memcpy(evt, &res[i], sizeof evt); |
| evt->flags = EV_DELETE; |
evt->flags = EV_DELETE; |
| /* Put read/write task to ready queue */ |
/* Put read/write task to ready queue */ |
| switch (res[i].filter) { |
switch (res[i].filter) { |
| case EVFILT_READ: |
case EVFILT_READ: |
| TAILQ_FOREACH(task, &r->root_read, task_node) { | TAILQ_FOREACH_SAFE(task, &r->root_read, task_node, tmp) { |
| if (TASK_FD(task) != ((intptr_t) res[i].udata)) | if (TASK_FD(task) == ((intptr_t) res[i].udata)) { |
| continue; | TASK_RET(task) = res[i].data; |
| /* remove read handle */ | TASK_FLAG(task) = (u_long) res[i].fflags; |
| io = ROOT_DATA(task->task_root); | |
| FD_CLR(TASK_FD(task), &io->rfd); | |
| |
|
| TAILQ_REMOVE(&r->root_read, task, task_node); | /* remove read handle */ |
| if (r->root_hooks.hook_exec.exception && res[i].flags & EV_EOF) { | remove_task_from(task, &r->root_read); |
| if (r->root_hooks.hook_exec.exception(r, (void*) EV_EOF)) { | |
| task->task_type = taskUNUSE; | if (r->root_hooks.hook_exec.exception && res[i].flags & EV_EOF) { |
| TAILQ_INSERT_TAIL(&r->root_unuse, task, task_node); | if (r->root_hooks.hook_exec.exception(r, (void*) EV_EOF)) { |
| | task->task_type = taskUNUSE; |
| | insert_task_to(task, &r->root_unuse); |
| | } else { |
| | task->task_type = taskREADY; |
| | insert_task_to(task, &r->root_ready); |
| | } |
| } else { |
} else { |
| task->task_type = taskREADY; |
task->task_type = taskREADY; |
| TAILQ_INSERT_TAIL(&r->root_ready, task, task_node); | insert_task_to(task, &r->root_ready); |
| } |
} |
| } else { | break; |
| task->task_type = taskREADY; | |
| TAILQ_INSERT_TAIL(&r->root_ready, task, task_node); | |
| } |
} |
| break; |
|
| } |
} |
| break; |
break; |
| case EVFILT_WRITE: |
case EVFILT_WRITE: |
| TAILQ_FOREACH(task, &r->root_write, task_node) { | TAILQ_FOREACH_SAFE(task, &r->root_write, task_node, tmp) { |
| if (TASK_FD(task) != ((intptr_t) res[i].udata)) | if (TASK_FD(task) == ((intptr_t) res[i].udata)) { |
| continue; | TASK_RET(task) = res[i].data; |
| /* remove write handle */ | TASK_FLAG(task) = (u_long) res[i].fflags; |
| io = ROOT_DATA(task->task_root); | |
| FD_CLR(TASK_FD(task), &io->wfd); | |
| |
|
| TAILQ_REMOVE(&r->root_write, task, task_node); | /* remove write handle */ |
| if (r->root_hooks.hook_exec.exception && res[i].flags & EV_EOF) { | remove_task_from(task, &r->root_write); |
| if (r->root_hooks.hook_exec.exception(r, (void*) EV_EOF)) { | |
| task->task_type = taskUNUSE; | if (r->root_hooks.hook_exec.exception && res[i].flags & EV_EOF) { |
| TAILQ_INSERT_TAIL(&r->root_unuse, task, task_node); | if (r->root_hooks.hook_exec.exception(r, (void*) EV_EOF)) { |
| | task->task_type = taskUNUSE; |
| | insert_task_to(task, &r->root_unuse); |
| | } else { |
| | task->task_type = taskREADY; |
| | insert_task_to(task, &r->root_ready); |
| | } |
| } else { |
} else { |
| task->task_type = taskREADY; |
task->task_type = taskREADY; |
| TAILQ_INSERT_TAIL(&r->root_ready, task, task_node); | insert_task_to(task, &r->root_ready); |
| } |
} |
| |
break; |
| |
} |
| |
} |
| |
break; |
| |
case EVFILT_TIMER: |
| |
TAILQ_FOREACH_SAFE(task, &r->root_alarm, task_node, tmp) { |
| |
if ((uintptr_t) TASK_DATA(task) == ((uintptr_t) res[i].udata)) { |
| |
TASK_RET(task) = res[i].data; |
| |
TASK_FLAG(task) = (u_long) res[i].fflags; |
| |
|
| |
/* remove alarm handle */ |
| |
transit_task2ready(task, &r->root_alarm); |
| |
break; |
| |
} |
| |
} |
| |
break; |
| |
case EVFILT_VNODE: |
| |
TAILQ_FOREACH_SAFE(task, &r->root_node, task_node, tmp) { |
| |
if (TASK_FD(task) == ((intptr_t) res[i].udata)) { |
| |
TASK_RET(task) = res[i].data; |
| |
TASK_FLAG(task) = (u_long) res[i].fflags; |
| |
|
| |
/* remove node handle */ |
| |
transit_task2ready(task, &r->root_node); |
| |
break; |
| |
} |
| |
} |
| |
break; |
| |
case EVFILT_PROC: |
| |
TAILQ_FOREACH_SAFE(task, &r->root_proc, task_node, tmp) { |
| |
if (TASK_VAL(task) == ((uintptr_t) res[i].udata)) { |
| |
TASK_RET(task) = res[i].data; |
| |
TASK_FLAG(task) = (u_long) res[i].fflags; |
| |
|
| |
/* remove proc handle */ |
| |
transit_task2ready(task, &r->root_proc); |
| |
break; |
| |
} |
| |
} |
| |
break; |
| |
case EVFILT_SIGNAL: |
| |
TAILQ_FOREACH_SAFE(task, &r->root_signal, task_node, tmp) { |
| |
if (TASK_VAL(task) == ((uintptr_t) res[i].udata)) { |
| |
TASK_RET(task) = res[i].data; |
| |
TASK_FLAG(task) = (u_long) res[i].fflags; |
| |
|
| |
/* remove signal handle */ |
| |
transit_task2ready(task, &r->root_signal); |
| |
break; |
| |
} |
| |
} |
| |
break; |
| |
#ifdef AIO_SUPPORT |
| |
case EVFILT_AIO: |
| |
TAILQ_FOREACH_SAFE(task, &r->root_aio, task_node, tmp) { |
| |
acb = (struct aiocb*) TASK_VAL(task); |
| |
if (acb == ((struct aiocb*) res[i].udata)) { |
| |
TASK_RET(task) = res[i].data; |
| |
TASK_FLAG(task) = (u_long) res[i].fflags; |
| |
|
| |
/* remove user handle */ |
| |
transit_task2ready(task, &r->root_aio); |
| |
|
| |
fd = acb->aio_fildes; |
| |
if ((len = aio_return(acb)) != -1) { |
| |
if (lseek(fd, acb->aio_offset + len, SEEK_CUR) == -1) |
| |
LOGERR; |
| |
} else |
| |
LOGERR; |
| |
free(acb); |
| |
TASK_DATLEN(task) = (u_long) len; |
| |
TASK_FD(task) = fd; |
| |
break; |
| |
} |
| |
} |
| |
break; |
| |
#ifdef EVFILT_LIO |
| |
case EVFILT_LIO: |
| |
TAILQ_FOREACH_SAFE(task, &r->root_lio, task_node, tmp) { |
| |
acbs = (struct aiocb**) TASK_VAL(task); |
| |
if (acbs == ((struct aiocb**) res[i].udata)) { |
| |
TASK_RET(task) = res[i].data; |
| |
TASK_FLAG(task) = (u_long) res[i].fflags; |
| |
|
| |
/* remove user handle */ |
| |
transit_task2ready(task, &r->root_lio); |
| |
|
| |
iv = (struct iovec*) TASK_DATA(task); |
| |
fd = acbs[0]->aio_fildes; |
| |
off = acbs[0]->aio_offset; |
| |
for (len = 0; i < TASK_DATLEN(task); len += l, i++) { |
| |
if ((iv[i].iov_len = aio_return(acbs[i])) == -1) |
| |
l = 0; |
| |
else |
| |
l = iv[i].iov_len; |
| |
free(acbs[i]); |
| |
} |
| |
free(acbs); |
| |
TASK_DATLEN(task) = (u_long) len; |
| |
TASK_FD(task) = fd; |
| |
|
| |
if (lseek(fd, off + len, SEEK_CUR) == -1) |
| |
LOGERR; |
| |
break; |
| |
} |
| |
} |
| |
break; |
| |
#endif /* EVFILT_LIO */ |
| |
#endif /* AIO_SUPPORT */ |
| |
#ifdef EVFILT_USER |
| |
case EVFILT_USER: |
| |
TAILQ_FOREACH_SAFE(task, &r->root_user, task_node, tmp) { |
| |
if (TASK_VAL(task) == ((uintptr_t) res[i].udata)) { |
| |
TASK_RET(task) = res[i].data; |
| |
TASK_FLAG(task) = (u_long) res[i].fflags; |
| |
|
| |
/* remove user handle */ |
| |
transit_task2ready(task, &r->root_user); |
| |
break; |
| |
} |
| |
} |
| |
break; |
| |
#endif /* EVFILT_USER */ |
| |
} |
| |
|
| |
if (kevent(r->root_kq, evt, 1, NULL, 0, &now) == -1) { |
| |
if (r->root_hooks.hook_exec.exception) |
| |
r->root_hooks.hook_exec.exception(r, NULL); |
| |
else |
| |
LOGERR; |
| |
} |
| |
} |
| |
} |
| |
#endif |
| |
|
| |
#if SUP_ENABLE == EP_SUPPORT |
| |
static inline void |
| |
fetch_hook_epoll_proceed(int en, struct epoll_event *res, sched_root_task_t *r) |
| |
{ |
| |
register int i, flg; |
| |
int ops = EPOLL_CTL_DEL; |
| |
sched_task_t *t, *tmp, *task; |
| |
struct epoll_event evt[1]; |
| |
|
| |
for (i = 0; i < en; i++) { |
| |
memcpy(evt, &res[i], sizeof evt); |
| |
|
| |
if (evt->events & (EPOLLIN | EPOLLPRI)) { |
| |
flg = 0; |
| |
task = NULL; |
| |
TAILQ_FOREACH_SAFE(t, &r->root_read, task_node, tmp) { |
| |
if (TASK_FD(t) == evt->data.fd) { |
| |
if (!flg) |
| |
task = t; |
| |
flg++; |
| |
} |
| |
} |
| |
|
| |
if (flg && task) { |
| |
TASK_FLAG(task) = ioctl(TASK_FD(task), FIONREAD, &TASK_RET(task)); |
| |
/* remove read handle */ |
| |
remove_task_from(task, &r->root_read); |
| |
|
| |
if (r->root_hooks.hook_exec.exception && evt->events & (EPOLLRDHUP | EPOLLERR | EPOLLHUP)) { |
| |
if (r->root_hooks.hook_exec.exception(r, (void*) (intptr_t) |
| |
(evt->events & EPOLLERR ? EV_ERROR | EV_EOF : EV_EOF))) { |
| |
task->task_type = taskUNUSE; |
| |
insert_task_to(task, &r->root_unuse); |
| } else { |
} else { |
| task->task_type = taskREADY; |
task->task_type = taskREADY; |
| TAILQ_INSERT_TAIL(&r->root_ready, task, task_node); | insert_task_to(task, &r->root_ready); |
| } |
} |
| break; | } else { |
| | task->task_type = taskREADY; |
| | insert_task_to(task, &r->root_ready); |
| } |
} |
| break; | |
| | evt->events ^= evt->events; |
| | if (FD_ISSET(evt->data.fd, &r->root_fds[1])) { |
| | ops = EPOLL_CTL_MOD; |
| | evt->events |= EPOLLOUT; |
| | } |
| | if (flg > 1) { |
| | ops = EPOLL_CTL_MOD; |
| | evt->events |= EPOLLIN | EPOLLPRI; |
| | } else |
| | FD_CLR(evt->data.fd, &r->root_fds[0]); |
| | } |
| | } else if (evt->events & EPOLLOUT) { |
| | flg = 0; |
| | task = NULL; |
| | TAILQ_FOREACH_SAFE(t, &r->root_write, task_node, tmp) { |
| | if (TASK_FD(t) == evt->data.fd) { |
| | if (!flg) |
| | task = t; |
| | flg++; |
| | } |
| | } |
| | |
| | if (flg && task) { |
| | TASK_FLAG(task) = ioctl(TASK_FD(task), FIONWRITE, &TASK_RET(task)); |
| | /* remove write handle */ |
| | remove_task_from(task, &r->root_write); |
| | |
| | if (r->root_hooks.hook_exec.exception && evt->events & (EPOLLERR | EPOLLHUP)) { |
| | if (r->root_hooks.hook_exec.exception(r, (void*) (intptr_t) |
| | (evt->events & EPOLLERR ? EV_ERROR | EV_EOF : EV_EOF))) { |
| | task->task_type = taskUNUSE; |
| | insert_task_to(task, &r->root_unuse); |
| | } else { |
| | task->task_type = taskREADY; |
| | insert_task_to(task, &r->root_ready); |
| | } |
| | } else { |
| | task->task_type = taskREADY; |
| | insert_task_to(task, &r->root_ready); |
| | } |
| | |
| | evt->events ^= evt->events; |
| | if (FD_ISSET(evt->data.fd, &r->root_fds[0])) { |
| | ops = EPOLL_CTL_MOD; |
| | evt->events |= EPOLLIN | EPOLLPRI; |
| | } |
| | if (flg > 1) { |
| | ops = EPOLL_CTL_MOD; |
| | evt->events |= EPOLLOUT; |
| | } else |
| | FD_CLR(evt->data.fd, &r->root_fds[1]); |
| | } |
| } |
} |
| if (kevent(r->root_kq, evt, 1, NULL, 0, &nw) == -1) { | |
| | if (epoll_ctl(r->root_kq, ops, evt->data.fd, evt) == -1) { |
| if (r->root_hooks.hook_exec.exception) { |
if (r->root_hooks.hook_exec.exception) { |
| if (r->root_hooks.hook_exec.exception(r, NULL)) | r->root_hooks.hook_exec.exception(r, NULL); |
| return NULL; | |
| } else |
} else |
| LOGERR; |
LOGERR; |
| } |
} |
| } |
} |
| |
} |
| |
#endif |
| |
|
| /* timer update & put in ready queue */ | #if SUP_ENABLE == NO_SUPPORT |
| clock_gettime(CLOCK_MONOTONIC, &nw); | static inline void |
| now.tv_sec = nw.tv_sec; | fetch_hook_select_proceed(int en, fd_set rfd, fd_set wfd, fd_set xfd, sched_root_task_t *r) |
| now.tv_usec = nw.tv_nsec / 1000; | { |
| | register int i, flg; |
| | sched_task_t *t, *tmp, *task = NULL; |
| |
|
| TAILQ_FOREACH(task, &r->root_timer, task_node) | /* skip select check if return value from select is zero */ |
| if (timercmp(&now, &TASK_TV(task), -) >= 0) { | if (!en) |
| TAILQ_REMOVE(&r->root_timer, task, task_node); | return; |
| task->task_type = taskREADY; | |
| TAILQ_INSERT_TAIL(&r->root_ready, task, task_node); | for (i = 0; i < r->root_kq; i++) { |
| | if (FD_ISSET(i, &rfd) || FD_ISSET(i, &xfd)) { |
| | flg = 0; |
| | TAILQ_FOREACH_SAFE(t, &r->root_read, task_node, tmp) { |
| | if (TASK_FD(t) == i) { |
| | if (!flg) |
| | task = t; |
| | flg++; |
| | } |
| | } |
| | |
| | if (flg && task) { |
| | TASK_FLAG(task) = ioctl(TASK_FD(task), FIONREAD, &TASK_RET(task)); |
| | |
| | /* remove read handle */ |
| | remove_task_from(task, &r->root_read); |
| | |
| | if (r->root_hooks.hook_exec.exception) { |
| | if (r->root_hooks.hook_exec.exception(r, NULL)) { |
| | task->task_type = taskUNUSE; |
| | insert_task_to(task, &r->root_unuse); |
| | } else { |
| | task->task_type = taskREADY; |
| | insert_task_to(task, &r->root_ready); |
| | } |
| | } else { |
| | task->task_type = taskREADY; |
| | insert_task_to(task, &r->root_ready); |
| | } |
| | |
| | /* remove resouce */ |
| | if (flg == 1) |
| | FD_CLR(i, &r->root_fds[0]); |
| | } |
| | } else if (FD_ISSET(i, &wfd)) { |
| | flg = 0; |
| | TAILQ_FOREACH_SAFE(t, &r->root_write, task_node, tmp) { |
| | if (TASK_FD(t) == i) { |
| | if (!flg) |
| | task = t; |
| | flg++; |
| | } |
| | } |
| | |
| | if (flg && task) { |
| | TASK_FLAG(task) = ioctl(TASK_FD(task), FIONWRITE, &TASK_RET(task)); |
| | |
| | /* remove write handle */ |
| | remove_task_from(task, &r->root_write); |
| | |
| | if (r->root_hooks.hook_exec.exception) { |
| | if (r->root_hooks.hook_exec.exception(r, NULL)) { |
| | task->task_type = taskUNUSE; |
| | insert_task_to(task, &r->root_unuse); |
| | } else { |
| | task->task_type = taskREADY; |
| | insert_task_to(task, &r->root_ready); |
| | } |
| | } else { |
| | task->task_type = taskREADY; |
| | insert_task_to(task, &r->root_ready); |
| | } |
| | |
| | /* remove resouce */ |
| | if (flg == 1) |
| | FD_CLR(i, &r->root_fds[1]); |
| | } |
| } |
} |
| |
} |
| |
|
| /* put eventlo priority task to ready queue, if there is no ready task or | /* optimize select */ |
| reach max missed fetch-rotate */ | for (i = r->root_kq - 1; i > 2; i--) |
| if ((task = TAILQ_FIRST(&r->root_eventlo))) { | if (FD_ISSET(i, &r->root_fds[0]) || FD_ISSET(i, &r->root_fds[1])) |
| if (!TAILQ_FIRST(&r->root_ready) || r->root_eventlo_miss > MAX_EVENTLO_MISS) { | break; |
| r->root_eventlo_miss = 0; | if (i > 2) |
| | r->root_kq = i + 1; |
| | } |
| | #endif |
| |
|
| TAILQ_REMOVE(&r->root_eventlo, task, task_node); | /* |
| task->task_type = taskREADY; | * sched_hook_fetch() - Default FETCH hook |
| TAILQ_INSERT_TAIL(&r->root_ready, task, task_node); | * |
| | * @root = root task |
| | * @arg = unused |
| | * return: NULL error or !=NULL fetched task |
| | */ |
| | void * |
| | sched_hook_fetch(void *root, void *arg __unused) |
| | { |
| | sched_root_task_t *r = root; |
| | sched_task_t *task, *tmp; |
| | struct timespec now, m, mtmp; |
| | #if SUP_ENABLE == KQ_SUPPORT |
| | struct kevent res[KQ_EVENTS]; |
| | struct timespec *timeout; |
| | #elif SUP_ENABLE == EP_SUPPORT |
| | struct epoll_event res[KQ_EVENTS]; |
| | u_long timeout = 0; |
| | #else |
| | struct timeval *timeout, tv; |
| | fd_set rfd, wfd, xfd; |
| | #endif |
| | int en; |
| | |
| | if (!r) |
| | return NULL; |
| | |
| | /* get new task by queue priority */ |
| | while ((task = TAILQ_FIRST(&r->root_event))) { |
| | transit_task2unuse(task, &r->root_event); |
| | return task; |
| | } |
| | while ((task = TAILQ_FIRST(&r->root_ready))) { |
| | transit_task2unuse(task, &r->root_ready); |
| | return task; |
| | } |
| | |
| | /* if present member of task, set NOWAIT */ |
| | if (!TAILQ_FIRST(&r->root_task)) { |
| | /* timer tasks */ |
| | #ifdef TIMER_WITHOUT_SORT |
| | clock_gettime(CLOCK_MONOTONIC, &now); |
| | |
| | sched_timespecclear(&r->root_wait); |
| | TAILQ_FOREACH(task, &r->root_timer, task_node) { |
| | if (!sched_timespecisset(&r->root_wait)) |
| | r->root_wait = TASK_TS(task); |
| | else if (sched_timespeccmp(&TASK_TS(task), &r->root_wait, -) < 0) |
| | r->root_wait = TASK_TS(task); |
| | } |
| | |
| | if (TAILQ_FIRST(&r->root_timer)) { |
| | m = r->root_wait; |
| | sched_timespecsub(&m, &now, &mtmp); |
| | r->root_wait = mtmp; |
| | } else { |
| | /* set wait INFTIM */ |
| | sched_timespecinf(&r->root_wait); |
| | } |
| | #else /* ! TIMER_WITHOUT_SORT */ |
| | if ((task = TAILQ_FIRST(&r->root_timer))) { |
| | clock_gettime(CLOCK_MONOTONIC, &now); |
| | |
| | m = TASK_TS(task); |
| | sched_timespecsub(&m, &now, &mtmp); |
| | r->root_wait = mtmp; |
| | } else { |
| | /* set wait INFTIM */ |
| | sched_timespecinf(&r->root_wait); |
| | } |
| | #endif /* TIMER_WITHOUT_SORT */ |
| | } else /* no waiting for event, because we have ready task */ |
| | sched_timespecclear(&r->root_wait); |
| | |
| | if (r->root_wait.tv_sec != -1 && r->root_wait.tv_nsec != -1) { |
| | #if SUP_ENABLE == KQ_SUPPORT |
| | timeout = &r->root_wait; |
| | #elif SUP_ENABLE == EP_SUPPORT |
| | timeout = r->root_wait.tv_sec * 1000 + r->root_wait.tv_nsec / 1000000; |
| | #else |
| | sched_timespec2val(&r->root_wait, &tv); |
| | timeout = &tv; |
| | #endif /* KQ_SUPPORT */ |
| | } else if (sched_timespecisinf(&r->root_poll)) |
| | #if SUP_ENABLE == EP_SUPPORT |
| | timeout = -1; |
| | #else |
| | timeout = NULL; |
| | #endif |
| | else { |
| | #if SUP_ENABLE == KQ_SUPPORT |
| | timeout = &r->root_poll; |
| | #elif SUP_ENABLE == EP_SUPPORT |
| | timeout = r->root_poll.tv_sec * 1000 + r->root_poll.tv_nsec / 1000000; |
| | #else |
| | sched_timespec2val(&r->root_poll, &tv); |
| | timeout = &tv; |
| | #endif /* KQ_SUPPORT */ |
| | } |
| | |
| | #if SUP_ENABLE == KQ_SUPPORT |
| | if ((en = kevent(r->root_kq, NULL, 0, res, KQ_EVENTS, timeout)) == -1) { |
| | #elif SUP_ENABLE == EP_SUPPORT |
| | if ((en = epoll_wait(r->root_kq, res, KQ_EVENTS, timeout)) == -1) { |
| | #else |
| | rfd = xfd = r->root_fds[0]; |
| | wfd = r->root_fds[1]; |
| | if ((en = select(r->root_kq, &rfd, &wfd, &xfd, timeout)) == -1) { |
| | #endif /* KQ_SUPPORT */ |
| | if (r->root_hooks.hook_exec.exception) { |
| | if (r->root_hooks.hook_exec.exception(r, NULL)) |
| | return NULL; |
| | } else if (errno != EINTR) |
| | LOGERR; |
| | goto skip_event; |
| | } |
| | |
| | /* Go and catch the cat into pipes ... */ |
| | #if SUP_ENABLE == KQ_SUPPORT |
| | /* kevent dispatcher */ |
| | fetch_hook_kevent_proceed(en, res, r); |
| | #elif SUP_ENABLE == EP_SUPPORT |
| | /* epoll dispatcher */ |
| | fetch_hook_epoll_proceed(en, res, r); |
| | #else |
| | /* select dispatcher */ |
| | fetch_hook_select_proceed(en, rfd, wfd, xfd, r); |
| | #endif /* KQ_SUPPORT */ |
| | |
| | skip_event: |
| | /* timer update & put in ready queue */ |
| | clock_gettime(CLOCK_MONOTONIC, &now); |
| | |
| | TAILQ_FOREACH_SAFE(task, &r->root_timer, task_node, tmp) |
| | if (sched_timespeccmp(&now, &TASK_TS(task), -) >= 0) |
| | transit_task2ready(task, &r->root_timer); |
| | |
| | /* put regular task priority task to ready queue, |
| | if there is no ready task or reach max missing hit for regular task */ |
| | if ((task = TAILQ_FIRST(&r->root_task))) { |
| | if (!TAILQ_FIRST(&r->root_ready) || r->root_miss >= TASK_VAL(task)) { |
| | r->root_miss ^= r->root_miss; |
| | |
| | transit_task2ready(task, &r->root_task); |
| } else |
} else |
| r->root_eventlo_miss++; | r->root_miss++; |
| } else |
} else |
| r->root_eventlo_miss = 0; | r->root_miss ^= r->root_miss; |
| |
|
| /* OK, lets get ready task !!! */ |
/* OK, lets get ready task !!! */ |
| if (!(task = TAILQ_FIRST(&r->root_ready))) | task = TAILQ_FIRST(&r->root_ready); |
| goto retry; | if (task) |
| TAILQ_REMOVE(&r->root_ready, task, task_node); | transit_task2unuse(task, &r->root_ready); |
| task->task_type = taskUNUSE; | |
| TAILQ_INSERT_TAIL(&r->root_unuse, task, task_node); | |
| return task; |
return task; |
| } |
} |
| |
|
| /* |
/* |
| * sched_hook_exception() - Default EXCEPTION hook |
* sched_hook_exception() - Default EXCEPTION hook |
| |
* |
| * @root = root task |
* @root = root task |
| * @arg = custom handling: if arg == EV_EOF or other value; default: arg == NULL log errno |
* @arg = custom handling: if arg == EV_EOF or other value; default: arg == NULL log errno |
| * return: <0 errors and 0 ok |
* return: <0 errors and 0 ok |
|
Line 439 sched_hook_exception(void *root, void *arg)
|
Line 1431 sched_hook_exception(void *root, void *arg)
|
| { |
{ |
| sched_root_task_t *r = root; |
sched_root_task_t *r = root; |
| |
|
| if (!r || !ROOT_DATA(r) || !ROOT_DATLEN(r)) | if (!r) |
| return NULL; |
return NULL; |
| |
|
| /* custom exception handling ... */ |
/* custom exception handling ... */ |
|
Line 455 sched_hook_exception(void *root, void *arg)
|
Line 1447 sched_hook_exception(void *root, void *arg)
|
| |
|
| /* default case! */ |
/* default case! */ |
| LOGERR; |
LOGERR; |
| |
return NULL; |
| |
} |
| |
|
| |
/* |
| |
* sched_hook_condition() - Default CONDITION hook |
| |
* |
| |
* @root = root task |
| |
* @arg = killState from schedRun() |
| |
* return: NULL kill scheduler loop or !=NULL ok |
| |
*/ |
| |
void * |
| |
sched_hook_condition(void *root, void *arg) |
| |
{ |
| |
sched_root_task_t *r = root; |
| |
|
| |
if (!r) |
| |
return NULL; |
| |
|
| |
return (void*) (*r->root_cond - *(intptr_t*) arg); |
| |
} |
| |
|
| |
/* |
| |
* sched_hook_rtc() - Default RTC hook |
| |
* |
| |
* @task = current task |
| |
* @arg = unused |
| |
* return: <0 errors and 0 ok |
| |
*/ |
| |
void * |
| |
sched_hook_rtc(void *task, void *arg __unused) |
| |
{ |
| |
#if defined(HAVE_LIBRT) && defined(HAVE_TIMER_CREATE) && \ |
| |
defined(HAVE_TIMER_SETTIME) && defined(HAVE_TIMER_DELETE) |
| |
sched_task_t *sigt = NULL, *t = task; |
| |
struct itimerspec its; |
| |
struct sigevent evt; |
| |
timer_t tmr; |
| |
#if SUP_ENABLE != KQ_SUPPORT |
| |
struct sigaction sa; |
| |
#endif |
| |
|
| |
if (!t || !TASK_ROOT(t)) |
| |
return (void*) -1; |
| |
|
| |
memset(&evt, 0, sizeof evt); |
| |
evt.sigev_notify = SIGEV_SIGNAL; |
| |
evt.sigev_signo = (intptr_t) TASK_DATA(t) + SIGRTMIN; |
| |
evt.sigev_value.sival_ptr = t; |
| |
|
| |
if (timer_create(CLOCK_MONOTONIC, &evt, &tmr) == -1) { |
| |
if (TASK_ROOT(t)->root_hooks.hook_exec.exception) |
| |
TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL); |
| |
else |
| |
LOGERR; |
| |
return (void*) -1; |
| |
} else |
| |
TASK_FLAG(t) = (u_long) tmr; |
| |
|
| |
#if SUP_ENABLE == KQ_SUPPORT |
| |
if (!(sigt = schedSignal(TASK_ROOT(t), _sched_rtcWrapper, TASK_ARG(t), evt.sigev_signo, |
| |
t, (size_t) tmr))) { |
| |
if (TASK_ROOT(t)->root_hooks.hook_exec.exception) |
| |
TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL); |
| |
else |
| |
LOGERR; |
| |
timer_delete(tmr); |
| |
return (void*) -1; |
| |
} else |
| |
TASK_RET(t) = (uintptr_t) sigt; |
| |
#else |
| |
memset(&sa, 0, sizeof sa); |
| |
sigemptyset(&sa.sa_mask); |
| |
sa.sa_sigaction = _sched_rtcSigWrapper; |
| |
sa.sa_flags = SA_SIGINFO | SA_RESTART; |
| |
|
| |
if (sigaction(evt.sigev_signo, &sa, NULL) == -1) { |
| |
if (TASK_ROOT(t)->root_hooks.hook_exec.exception) |
| |
TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL); |
| |
else |
| |
LOGERR; |
| |
timer_delete(tmr); |
| |
return (void*) -1; |
| |
} |
| |
#endif |
| |
|
| |
memset(&its, 0, sizeof its); |
| |
its.it_value.tv_sec = t->task_val.ts.tv_sec; |
| |
its.it_value.tv_nsec = t->task_val.ts.tv_nsec; |
| |
|
| |
if (timer_settime(tmr, TIMER_RELTIME, &its, NULL) == -1) { |
| |
if (TASK_ROOT(t)->root_hooks.hook_exec.exception) |
| |
TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL); |
| |
else |
| |
LOGERR; |
| |
schedCancel(sigt); |
| |
timer_delete(tmr); |
| |
return (void*) -1; |
| |
} |
| |
#endif /* HAVE_TIMER_CREATE */ |
| return NULL; |
return NULL; |
| } |
} |