version 1.27.2.3, 2014/05/21 22:09:01
|
version 1.27.2.9, 2014/06/05 22:22:46
|
Line 47 SUCH DAMAGE.
|
Line 47 SUCH DAMAGE.
|
#include "hooks.h" |
#include "hooks.h" |
|
|
|
|
|
static inline void |
|
transit_task2ready(sched_task_t * __restrict t, sched_queue_t * __restrict q) |
|
{ |
|
remove_task_from(t, q); |
|
|
|
t->task_type = taskREADY; |
|
insert_task_to(t, &(TASK_ROOT(t))->root_ready); |
|
} |
|
|
|
#ifdef HAVE_LIBPTHREAD |
|
static void * |
|
_sched_threadWrapper(sched_task_t *t) |
|
{ |
|
void *ret = NULL; |
|
sched_root_task_t *r; |
|
|
|
if (!t || !TASK_ROOT(t)) |
|
pthread_exit(ret); |
|
else |
|
r = (sched_root_task_t*) TASK_ROOT(t); |
|
|
|
pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, NULL); |
|
/* |
|
pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, NULL); |
|
*/ |
|
|
|
/* notify parent, thread is ready for execution */ |
|
pthread_testcancel(); |
|
|
|
ret = schedCall(t); |
|
r->root_ret = ret; |
|
|
|
if (TASK_VAL(t)) { |
|
transit_task2unuse(t, &r->root_thread); |
|
TASK_VAL(t) = 0; |
|
} |
|
|
|
pthread_exit(ret); |
|
} |
|
#endif |
|
|
|
#if defined(HAVE_TIMER_CREATE) && defined(HAVE_TIMER_SETTIME) && defined(HAVE_TIMER_DELETE) |
|
#if SUP_ENABLE == KQ_SUPPORT |
|
static void * |
|
_sched_rtcWrapper(sched_task_t *t) |
|
{ |
|
sched_task_t *task; |
|
void *ret; |
|
|
|
if (!t || !TASK_ROOT(t) || !TASK_DATA(t)) |
|
return NULL; |
|
else { |
|
task = (sched_task_t*) TASK_DATA(t); |
|
timer_delete((timer_t) TASK_DATLEN(t)); |
|
} |
|
|
|
ret = schedCall(task); |
|
|
|
transit_task2unuse(task, &(TASK_ROOT(task))->root_rtc); |
|
return ret; |
|
} |
|
#else |
|
static void |
|
_sched_rtcSigWrapper(int sig, siginfo_t *si, void *uc) |
|
{ |
|
sched_task_t *task; |
|
|
|
if (si && si->si_value.sival_ptr) { |
|
task = (sched_task_t*) si->si_value.sival_ptr; |
|
timer_delete((timer_t) TASK_FLAG(task)); |
|
|
|
TASK_RET(task) = (intptr_t) schedCall(task); |
|
|
|
transit_task2unuse(task, &(TASK_ROOT(task))->root_rtc); |
|
} |
|
} |
|
#endif |
|
#endif |
|
|
/* |
/* |
* sched_hook_init() - Default INIT hook |
* sched_hook_init() - Default INIT hook |
* |
* |
Line 62 sched_hook_init(void *root, void *arg __unused)
|
Line 141 sched_hook_init(void *root, void *arg __unused)
|
if (!r) |
if (!r) |
return (void*) -1; |
return (void*) -1; |
|
|
#if SUP_ENABLE == KQ_ENABLE | #if SUP_ENABLE == KQ_SUPPORT |
r->root_kq = kqueue(); |
r->root_kq = kqueue(); |
if (r->root_kq == -1) { |
if (r->root_kq == -1) { |
LOGERR; |
LOGERR; |
return (void*) -1; |
return (void*) -1; |
} |
} |
|
#elif SUP_ENABLE == EP_SUPPORT |
|
r->root_kq = epoll_create(KQ_EVENTS); |
|
if (r->root_kq == -1) { |
|
LOGERR; |
|
return (void*) -1; |
|
} |
#else |
#else |
r->root_kq ^= r->root_kq; |
r->root_kq ^= r->root_kq; |
FD_ZERO(&r->root_fds[0]); |
FD_ZERO(&r->root_fds[0]); |
Line 92 sched_hook_fini(void *root, void *arg __unused)
|
Line 177 sched_hook_fini(void *root, void *arg __unused)
|
if (!r) |
if (!r) |
return (void*) -1; |
return (void*) -1; |
|
|
#if SUP_ENABLE == KQ_ENABLE | #if SUP_ENABLE == KQ_SUPPORT || SUP_ENABLE == EP_SUPPORT |
if (r->root_kq > 2) { |
if (r->root_kq > 2) { |
close(r->root_kq); |
close(r->root_kq); |
r->root_kq = 0; |
r->root_kq = 0; |
Line 116 sched_hook_fini(void *root, void *arg __unused)
|
Line 201 sched_hook_fini(void *root, void *arg __unused)
|
void * |
void * |
sched_hook_cancel(void *task, void *arg __unused) |
sched_hook_cancel(void *task, void *arg __unused) |
{ |
{ |
sched_task_t *t = task; | sched_task_t *t = task, *tmp, *tt; |
#if SUP_ENABLE == KQ_ENABLE | sched_root_task_t *r = NULL; |
| int flg; |
| #if SUP_ENABLE == KQ_SUPPORT |
struct kevent chg[1]; |
struct kevent chg[1]; |
struct timespec timeout = { 0, 0 }; |
struct timespec timeout = { 0, 0 }; |
|
#elif SUP_ENABLE == EP_SUPPORT |
|
struct epoll_event ee = { .events = 0, .data.fd = 0 }; |
#else |
#else |
sched_root_task_t *r = NULL; |
|
register int i; |
register int i; |
#endif |
#endif |
#ifdef AIO_SUPPORT |
#ifdef AIO_SUPPORT |
Line 134 sched_hook_cancel(void *task, void *arg __unused)
|
Line 222 sched_hook_cancel(void *task, void *arg __unused)
|
|
|
if (!t || !TASK_ROOT(t)) |
if (!t || !TASK_ROOT(t)) |
return (void*) -1; |
return (void*) -1; |
#if SUP_ENABLE != KQ_ENABLE | else |
r = TASK_ROOT(t); | r = TASK_ROOT(t); |
#endif | |
|
|
switch (TASK_TYPE(t)) { |
switch (TASK_TYPE(t)) { |
case taskREAD: |
case taskREAD: |
#if SUP_ENABLE == KQ_ENABLE | /* check for multi subscribers */ |
| flg = 0; |
| TAILQ_FOREACH_SAFE(tt, &r->root_read, task_node, tmp) |
| if (TASK_FD(tt) != TASK_FD(t)) |
| continue; |
| else |
| flg++; |
| #if SUP_ENABLE == KQ_SUPPORT |
#ifdef __NetBSD__ |
#ifdef __NetBSD__ |
EV_SET(&chg[0], TASK_FD(t), EVFILT_READ, EV_DELETE, 0, 0, (intptr_t) TASK_FD(t)); | EV_SET(&chg[0], TASK_FD(t), EVFILT_READ, flg < 2 ? EV_DELETE : 0, |
| 0, 0, (intptr_t) TASK_FD(t)); |
#else |
#else |
EV_SET(&chg[0], TASK_FD(t), EVFILT_READ, EV_DELETE, 0, 0, (void*) TASK_FD(t)); | EV_SET(&chg[0], TASK_FD(t), EVFILT_READ, flg < 2 ? EV_DELETE : 0, |
| 0, 0, (void*) TASK_FD(t)); |
#endif |
#endif |
|
#elif SUP_ENABLE == EP_SUPPORT |
|
ee.data.fd = TASK_FD(t); |
|
if (FD_ISSET(TASK_FD(t), &r->root_fds[1])) |
|
ee.events = EPOLLOUT; |
|
|
|
if (flg < 2) |
|
FD_CLR(TASK_FD(t), &r->root_fds[0]); |
|
else |
|
ee.events |= (EPOLLIN | EPOLLPRI | EPOLLRDHUP); |
#else |
#else |
FD_CLR(TASK_FD(t), &r->root_fds[0]); | if (flg < 2) { |
| FD_CLR(TASK_FD(t), &r->root_fds[0]); |
|
|
/* optimize select */ | /* optimize select */ |
for (i = r->root_kq - 1; i > 2; i--) | for (i = r->root_kq - 1; i > 2; i--) |
if (FD_ISSET(i, &r->root_fds[0]) || FD_ISSET(i, &r->root_fds[1])) | if (FD_ISSET(i, &r->root_fds[0]) || FD_ISSET(i, &r->root_fds[1])) |
break; | break; |
if (i > 2) | if (i > 2) |
r->root_kq = i + 1; | r->root_kq = i + 1; |
| } |
#endif |
#endif |
break; |
break; |
case taskWRITE: |
case taskWRITE: |
#if SUP_ENABLE == KQ_ENABLE | /* check for multi subscribers */ |
| flg = 0; |
| TAILQ_FOREACH_SAFE(tt, &r->root_write, task_node, tmp) |
| if (TASK_FD(tt) != TASK_FD(t)) |
| continue; |
| else |
| flg++; |
| #if SUP_ENABLE == KQ_SUPPORT |
#ifdef __NetBSD__ |
#ifdef __NetBSD__ |
EV_SET(&chg[0], TASK_FD(t), EVFILT_WRITE, EV_DELETE, 0, 0, (intptr_t) TASK_FD(t)); | EV_SET(&chg[0], TASK_FD(t), EVFILT_WRITE, flg < 2 ? EV_DELETE : 0, |
| 0, 0, (intptr_t) TASK_FD(t)); |
#else |
#else |
EV_SET(&chg[0], TASK_FD(t), EVFILT_WRITE, EV_DELETE, 0, 0, (void*) TASK_FD(t)); | EV_SET(&chg[0], TASK_FD(t), EVFILT_WRITE, flg < 2 ? EV_DELETE : 0, |
| 0, 0, (void*) TASK_FD(t)); |
#endif |
#endif |
|
#elif SUP_ENABLE == EP_SUPPORT |
|
ee.data.fd = TASK_FD(t); |
|
if (FD_ISSET(TASK_FD(t), &r->root_fds[0])) |
|
ee.events = EPOLLIN | EPOLLPRI | EPOLLRDHUP; |
|
|
|
if (flg < 2) |
|
FD_CLR(TASK_FD(t), &r->root_fds[1]); |
|
else |
|
ee.events |= EPOLLOUT; |
#else |
#else |
FD_CLR(TASK_FD(t), &r->root_fds[1]); | if (flg < 2) { |
| FD_CLR(TASK_FD(t), &r->root_fds[1]); |
|
|
/* optimize select */ | /* optimize select */ |
for (i = r->root_kq - 1; i > 2; i--) | for (i = r->root_kq - 1; i > 2; i--) |
if (FD_ISSET(i, &r->root_fds[0]) || FD_ISSET(i, &r->root_fds[1])) | if (FD_ISSET(i, &r->root_fds[0]) || FD_ISSET(i, &r->root_fds[1])) |
break; | break; |
if (i > 2) | if (i > 2) |
r->root_kq = i + 1; | r->root_kq = i + 1; |
| } |
#endif |
#endif |
break; |
break; |
case taskALARM: |
case taskALARM: |
#if SUP_ENABLE == KQ_ENABLE | #if SUP_ENABLE == KQ_SUPPORT |
| /* check for multi subscribers */ |
| flg = 0; |
| TAILQ_FOREACH_SAFE(tt, &r->root_alarm, task_node, tmp) |
| if (TASK_DATA(tt) != TASK_DATA(t)) |
| continue; |
| else |
| flg++; |
#ifdef __NetBSD__ |
#ifdef __NetBSD__ |
EV_SET(&chg[0], (uintptr_t) TASK_DATA(t), EVFILT_TIMER, EV_DELETE, | EV_SET(&chg[0], (uintptr_t) TASK_DATA(t), EVFILT_TIMER, flg < 2 ? EV_DELETE : 0, |
0, 0, (intptr_t) TASK_DATA(t)); |
0, 0, (intptr_t) TASK_DATA(t)); |
#else |
#else |
EV_SET(&chg[0], (uintptr_t) TASK_DATA(t), EVFILT_TIMER, EV_DELETE, | EV_SET(&chg[0], (uintptr_t) TASK_DATA(t), EVFILT_TIMER, flg < 2 ? EV_DELETE : 0, |
0, 0, (void*) TASK_DATA(t)); |
0, 0, (void*) TASK_DATA(t)); |
#endif |
#endif |
#endif |
#endif |
break; |
break; |
case taskNODE: |
case taskNODE: |
#if SUP_ENABLE == KQ_ENABLE | #if SUP_ENABLE == KQ_SUPPORT |
| /* check for multi subscribers */ |
| flg = 0; |
| TAILQ_FOREACH_SAFE(tt, &r->root_node, task_node, tmp) |
| if (TASK_FD(tt) != TASK_FD(t)) |
| continue; |
| else |
| flg++; |
#ifdef __NetBSD__ |
#ifdef __NetBSD__ |
EV_SET(&chg[0], TASK_FD(t), EVFILT_VNODE, EV_DELETE, 0, 0, (intptr_t) TASK_FD(t)); | EV_SET(&chg[0], TASK_FD(t), EVFILT_VNODE, flg < 2 ? EV_DELETE : 0, |
| 0, 0, (intptr_t) TASK_FD(t)); |
#else |
#else |
EV_SET(&chg[0], TASK_FD(t), EVFILT_VNODE, EV_DELETE, 0, 0, (void*) TASK_FD(t)); | EV_SET(&chg[0], TASK_FD(t), EVFILT_VNODE, flg < 2 ? EV_DELETE : 0, |
| 0, 0, (void*) TASK_FD(t)); |
#endif |
#endif |
#endif |
#endif |
break; |
break; |
case taskPROC: |
case taskPROC: |
#if SUP_ENABLE == KQ_ENABLE | #if SUP_ENABLE == KQ_SUPPORT |
| /* check for multi subscribers */ |
| flg = 0; |
| TAILQ_FOREACH_SAFE(tt, &r->root_proc, task_node, tmp) |
| if (TASK_VAL(tt) != TASK_VAL(t)) |
| continue; |
| else |
| flg++; |
#ifdef __NetBSD__ |
#ifdef __NetBSD__ |
EV_SET(&chg[0], TASK_VAL(t), EVFILT_PROC, EV_DELETE, 0, 0, (intptr_t) TASK_VAL(t)); | EV_SET(&chg[0], TASK_VAL(t), EVFILT_PROC, flg < 2 ? EV_DELETE : 0, |
| 0, 0, (intptr_t) TASK_VAL(t)); |
#else |
#else |
EV_SET(&chg[0], TASK_VAL(t), EVFILT_PROC, EV_DELETE, 0, 0, (void*) TASK_VAL(t)); | EV_SET(&chg[0], TASK_VAL(t), EVFILT_PROC, flg < 2 ? EV_DELETE : 0, |
| 0, 0, (void*) TASK_VAL(t)); |
#endif |
#endif |
#endif |
#endif |
break; |
break; |
case taskSIGNAL: |
case taskSIGNAL: |
#if SUP_ENABLE == KQ_ENABLE | #if SUP_ENABLE == KQ_SUPPORT |
| /* check for multi subscribers */ |
| flg = 0; |
| TAILQ_FOREACH_SAFE(tt, &r->root_signal, task_node, tmp) |
| if (TASK_VAL(tt) != TASK_VAL(t)) |
| continue; |
| else |
| flg++; |
#ifdef __NetBSD__ |
#ifdef __NetBSD__ |
EV_SET(&chg[0], TASK_VAL(t), EVFILT_SIGNAL, EV_DELETE, 0, 0, (intptr_t) TASK_VAL(t)); | EV_SET(&chg[0], TASK_VAL(t), EVFILT_SIGNAL, flg < 2 ? EV_DELETE : 0, |
| 0, 0, (intptr_t) TASK_VAL(t)); |
#else |
#else |
EV_SET(&chg[0], TASK_VAL(t), EVFILT_SIGNAL, EV_DELETE, 0, 0, (void*) TASK_VAL(t)); | EV_SET(&chg[0], TASK_VAL(t), EVFILT_SIGNAL, flg < 2 ? EV_DELETE : 0, |
| 0, 0, (void*) TASK_VAL(t)); |
#endif |
#endif |
/* restore signal */ |
/* restore signal */ |
signal(TASK_VAL(t), SIG_DFL); | if (flg < 2) |
| signal(TASK_VAL(t), SIG_DFL); |
#endif |
#endif |
break; |
break; |
#ifdef AIO_SUPPORT |
#ifdef AIO_SUPPORT |
case taskAIO: |
case taskAIO: |
#if SUP_ENABLE == KQ_ENABLE | #if SUP_ENABLE == KQ_SUPPORT |
| /* check for multi subscribers */ |
| flg = 0; |
| TAILQ_FOREACH_SAFE(tt, &r->root_aio, task_node, tmp) |
| if (TASK_VAL(tt) != TASK_VAL(t)) |
| continue; |
| else |
| flg++; |
#ifdef __NetBSD__ |
#ifdef __NetBSD__ |
EV_SET(&chg[0], TASK_VAL(t), EVFILT_AIO, EV_DELETE, 0, 0, (intptr_t) TASK_VAL(t)); | EV_SET(&chg[0], TASK_VAL(t), EVFILT_AIO, flg < 2 ? EV_DELETE : 0, |
| 0, 0, (intptr_t) TASK_VAL(t)); |
#else |
#else |
EV_SET(&chg[0], TASK_VAL(t), EVFILT_AIO, EV_DELETE, 0, 0, (void*) TASK_VAL(t)); | EV_SET(&chg[0], TASK_VAL(t), EVFILT_AIO, flg < 2 ? EV_DELETE : 0, |
| 0, 0, (void*) TASK_VAL(t)); |
#endif |
#endif |
acb = (struct aiocb*) TASK_VAL(t); |
acb = (struct aiocb*) TASK_VAL(t); |
if (acb) { |
if (acb) { |
Line 234 sched_hook_cancel(void *task, void *arg __unused)
|
Line 405 sched_hook_cancel(void *task, void *arg __unused)
|
break; |
break; |
#ifdef EVFILT_LIO |
#ifdef EVFILT_LIO |
case taskLIO: |
case taskLIO: |
#if SUP_ENABLE == KQ_ENABLE | #if SUP_ENABLE == KQ_SUPPORT |
| /* check for multi subscribers */ |
| flg = 0; |
| TAILQ_FOREACH_SAFE(tt, &r->root_lio, task_node, tmp) |
| if (TASK_VAL(tt) != TASK_VAL(t)) |
| continue; |
| else |
| flg++; |
#ifdef __NetBSD__ |
#ifdef __NetBSD__ |
EV_SET(&chg[0], TASK_VAL(t), EVFILT_LIO, EV_DELETE, 0, 0, (intptr_t) TASK_VAL(t)); | EV_SET(&chg[0], TASK_VAL(t), EVFILT_LIO, flg < 2 ? EV_DELETE : 0, |
| 0, 0, (intptr_t) TASK_VAL(t)); |
#else |
#else |
EV_SET(&chg[0], TASK_VAL(t), EVFILT_LIO, EV_DELETE, 0, 0, (void*) TASK_VAL(t)); | EV_SET(&chg[0], TASK_VAL(t), EVFILT_LIO, flg < 2 ? EV_DELETE : 0, |
| 0, 0, (void*) TASK_VAL(t)); |
#endif |
#endif |
acbs = (struct aiocb**) TASK_VAL(t); |
acbs = (struct aiocb**) TASK_VAL(t); |
if (acbs) { |
if (acbs) { |
Line 256 sched_hook_cancel(void *task, void *arg __unused)
|
Line 436 sched_hook_cancel(void *task, void *arg __unused)
|
#endif /* AIO_SUPPORT */ |
#endif /* AIO_SUPPORT */ |
#ifdef EVFILT_USER |
#ifdef EVFILT_USER |
case taskUSER: |
case taskUSER: |
#if SUP_ENABLE == KQ_ENABLE | #if SUP_ENABLE == KQ_SUPPORT |
| /* check for multi subscribers */ |
| flg = 0; |
| TAILQ_FOREACH_SAFE(tt, &r->root_user, task_node, tmp) |
| if (TASK_VAL(tt) != TASK_VAL(t)) |
| continue; |
| else |
| flg++; |
#ifdef __NetBSD__ |
#ifdef __NetBSD__ |
EV_SET(&chg[0], TASK_VAL(t), EVFILT_USER, EV_DELETE, 0, 0, (intptr_t) TASK_VAL(t)); | EV_SET(&chg[0], TASK_VAL(t), EVFILT_USER, flg < 2 ? EV_DELETE : 0, |
| 0, 0, (intptr_t) TASK_VAL(t)); |
#else |
#else |
EV_SET(&chg[0], TASK_VAL(t), EVFILT_USER, EV_DELETE, 0, 0, (void*) TASK_VAL(t)); | EV_SET(&chg[0], TASK_VAL(t), EVFILT_USER, flg < 2 ? EV_DELETE : 0, |
| 0, 0, (void*) TASK_VAL(t)); |
#endif |
#endif |
#endif |
#endif |
break; |
break; |
#endif /* EVFILT_USER */ |
#endif /* EVFILT_USER */ |
case taskTHREAD: |
case taskTHREAD: |
#ifdef HAVE_LIBPTHREAD |
#ifdef HAVE_LIBPTHREAD |
if (TASK_VAL(t)) | if (TASK_VAL(t)) { |
pthread_cancel((pthread_t) TASK_VAL(t)); |
pthread_cancel((pthread_t) TASK_VAL(t)); |
|
pthread_join((pthread_t) TASK_VAL(t), NULL); |
|
if (TASK_VAL(t)) { |
|
transit_task2unuse(t, &(TASK_ROOT(t))->root_thread); |
|
TASK_VAL(t) = 0; |
|
} |
|
} |
#endif |
#endif |
return NULL; |
return NULL; |
#if defined(HAVE_TIMER_CREATE) && defined(HAVE_TIMER_SETTIME) && defined(HAVE_TIMER_DELETE) |
#if defined(HAVE_TIMER_CREATE) && defined(HAVE_TIMER_SETTIME) && defined(HAVE_TIMER_DELETE) |
case taskRTC: |
case taskRTC: |
timer_delete((timer_t) TASK_FLAG(t)); |
timer_delete((timer_t) TASK_FLAG(t)); |
|
#if SUP_ENABLE == KQ_SUPPORT |
schedCancel((sched_task_t*) TASK_RET(t)); |
schedCancel((sched_task_t*) TASK_RET(t)); |
|
#else |
|
/* check for multi subscribers */ |
|
flg = 0; |
|
TAILQ_FOREACH_SAFE(tt, &r->root_rtc, task_node, tmp) |
|
if (TASK_DATA(tt) != TASK_DATA(t)) |
|
continue; |
|
else |
|
flg++; |
|
|
|
/* restore signal */ |
|
if (flg < 2) |
|
signal((intptr_t) TASK_DATA(t) + SIGRTMIN, SIG_DFL); |
|
#endif |
return NULL; |
return NULL; |
#endif /* HAVE_TIMER_CREATE */ |
#endif /* HAVE_TIMER_CREATE */ |
default: |
default: |
return NULL; |
return NULL; |
} |
} |
|
|
#if SUP_ENABLE == KQ_ENABLE | #if SUP_ENABLE == KQ_SUPPORT |
kevent(TASK_ROOT(t)->root_kq, chg, 1, NULL, 0, &timeout); | kevent(r->root_kq, chg, 1, NULL, 0, &timeout); |
| #elif SUP_ENABLE == EP_SUPPORT |
| epoll_ctl(r->root_kq, ee.events ? EPOLL_CTL_MOD : EPOLL_CTL_DEL, ee.data.fd, &ee); |
#endif |
#endif |
return NULL; |
return NULL; |
} |
} |
Line 307 sched_hook_thread(void *task, void *arg)
|
Line 518 sched_hook_thread(void *task, void *arg)
|
|
|
sigfillset(&s); |
sigfillset(&s); |
pthread_sigmask(SIG_BLOCK, &s, &o); |
pthread_sigmask(SIG_BLOCK, &s, &o); |
if ((errno = pthread_create(&tid, (pthread_attr_t*) arg, | errno = pthread_create(&tid, (pthread_attr_t*) arg, |
(void *(*)(void*)) _sched_threadWrapper, t))) { | (void *(*)(void*)) _sched_threadWrapper, t); |
| pthread_sigmask(SIG_SETMASK, &o, NULL); |
| |
| if (errno) { |
LOGERR; |
LOGERR; |
pthread_sigmask(SIG_SETMASK, &o, NULL); |
|
return (void*) -1; |
return (void*) -1; |
} else |
} else |
TASK_VAL(t) = (u_long) tid; |
TASK_VAL(t) = (u_long) tid; |
Line 318 sched_hook_thread(void *task, void *arg)
|
Line 531 sched_hook_thread(void *task, void *arg)
|
if (!TASK_ISLOCKED(t)) |
if (!TASK_ISLOCKED(t)) |
TASK_LOCK(t); |
TASK_LOCK(t); |
|
|
pthread_sigmask(SIG_SETMASK, &o, NULL); |
|
return NULL; |
return NULL; |
} |
} |
#endif |
#endif |
Line 334 void *
|
Line 546 void *
|
sched_hook_read(void *task, void *arg __unused) |
sched_hook_read(void *task, void *arg __unused) |
{ |
{ |
sched_task_t *t = task; |
sched_task_t *t = task; |
#if SUP_ENABLE == KQ_ENABLE | sched_root_task_t *r = NULL; |
| #if SUP_ENABLE == KQ_SUPPORT |
struct kevent chg[1]; |
struct kevent chg[1]; |
struct timespec timeout = { 0, 0 }; |
struct timespec timeout = { 0, 0 }; |
#else | #elif SUP_ENABLE == EP_SUPPORT |
sched_root_task_t *r = NULL; | struct epoll_event ee = { .events = EPOLLIN | EPOLLPRI | EPOLLRDHUP, .data.fd = 0 }; |
| int flg = 0; |
#endif |
#endif |
|
|
if (!t || !TASK_ROOT(t)) |
if (!t || !TASK_ROOT(t)) |
return (void*) -1; |
return (void*) -1; |
#if SUP_ENABLE != KQ_ENABLE | else |
r = TASK_ROOT(t); | r = TASK_ROOT(t); |
#endif | |
|
|
#if SUP_ENABLE == KQ_ENABLE | #if SUP_ENABLE == KQ_SUPPORT |
#ifdef __NetBSD__ |
#ifdef __NetBSD__ |
EV_SET(&chg[0], TASK_FD(t), EVFILT_READ, EV_ADD | EV_CLEAR, 0, 0, (intptr_t) TASK_FD(t)); |
EV_SET(&chg[0], TASK_FD(t), EVFILT_READ, EV_ADD | EV_CLEAR, 0, 0, (intptr_t) TASK_FD(t)); |
#else |
#else |
EV_SET(&chg[0], TASK_FD(t), EVFILT_READ, EV_ADD | EV_CLEAR, 0, 0, (void*) TASK_FD(t)); |
EV_SET(&chg[0], TASK_FD(t), EVFILT_READ, EV_ADD | EV_CLEAR, 0, 0, (void*) TASK_FD(t)); |
#endif |
#endif |
if (kevent(TASK_ROOT(t)->root_kq, chg, 1, NULL, 0, &timeout) == -1) { | if (kevent(r->root_kq, chg, 1, NULL, 0, &timeout) == -1) { |
if (TASK_ROOT(t)->root_hooks.hook_exec.exception) | if (r->root_hooks.hook_exec.exception) |
TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL); | r->root_hooks.hook_exec.exception(r, NULL); |
else |
else |
LOGERR; |
LOGERR; |
return (void*) -1; |
return (void*) -1; |
} |
} |
|
#elif SUP_ENABLE == EP_SUPPORT |
|
if (FD_ISSET(TASK_FD(t), &r->root_fds[0])) |
|
flg |= 1; |
|
if (FD_ISSET(TASK_FD(t), &r->root_fds[1])) { |
|
flg |= 2; |
|
ee.events |= EPOLLOUT; |
|
} |
|
|
|
ee.data.fd = TASK_FD(t); |
|
if (epoll_ctl(r->root_kq, flg ? EPOLL_CTL_MOD : EPOLL_CTL_ADD, TASK_FD(t), &ee) == -1) { |
|
if (r->root_hooks.hook_exec.exception) |
|
r->root_hooks.hook_exec.exception(r, NULL); |
|
else |
|
LOGERR; |
|
return (void*) -1; |
|
} else |
|
FD_SET(TASK_FD(t), &r->root_fds[0]); |
#else |
#else |
FD_SET(TASK_FD(t), &r->root_fds[0]); |
FD_SET(TASK_FD(t), &r->root_fds[0]); |
if (TASK_FD(t) >= r->root_kq) |
if (TASK_FD(t) >= r->root_kq) |
Line 380 void *
|
Line 610 void *
|
sched_hook_write(void *task, void *arg __unused) |
sched_hook_write(void *task, void *arg __unused) |
{ |
{ |
sched_task_t *t = task; |
sched_task_t *t = task; |
#if SUP_ENABLE == KQ_ENABLE | sched_root_task_t *r = NULL; |
| #if SUP_ENABLE == KQ_SUPPORT |
struct kevent chg[1]; |
struct kevent chg[1]; |
struct timespec timeout = { 0, 0 }; |
struct timespec timeout = { 0, 0 }; |
#else | #elif SUP_ENABLE == EP_SUPPORT |
sched_root_task_t *r = NULL; | struct epoll_event ee = { .events = EPOLLOUT, .data.fd = 0 }; |
| int flg = 0; |
#endif |
#endif |
|
|
if (!t || !TASK_ROOT(t)) |
if (!t || !TASK_ROOT(t)) |
return (void*) -1; |
return (void*) -1; |
#if SUP_ENABLE != KQ_ENABLE | else |
r = TASK_ROOT(t); | r = TASK_ROOT(t); |
#endif | |
|
|
#if SUP_ENABLE == KQ_ENABLE | #if SUP_ENABLE == KQ_SUPPORT |
#ifdef __NetBSD__ |
#ifdef __NetBSD__ |
EV_SET(&chg[0], TASK_FD(t), EVFILT_WRITE, EV_ADD | EV_CLEAR, 0, 0, (intptr_t) TASK_FD(t)); |
EV_SET(&chg[0], TASK_FD(t), EVFILT_WRITE, EV_ADD | EV_CLEAR, 0, 0, (intptr_t) TASK_FD(t)); |
#else |
#else |
EV_SET(&chg[0], TASK_FD(t), EVFILT_WRITE, EV_ADD | EV_CLEAR, 0, 0, (void*) TASK_FD(t)); |
EV_SET(&chg[0], TASK_FD(t), EVFILT_WRITE, EV_ADD | EV_CLEAR, 0, 0, (void*) TASK_FD(t)); |
#endif |
#endif |
if (kevent(TASK_ROOT(t)->root_kq, chg, 1, NULL, 0, &timeout) == -1) { | if (kevent(r->root_kq, chg, 1, NULL, 0, &timeout) == -1) { |
if (TASK_ROOT(t)->root_hooks.hook_exec.exception) | if (r->root_hooks.hook_exec.exception) |
TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL); | r->root_hooks.hook_exec.exception(r, NULL); |
else |
else |
LOGERR; |
LOGERR; |
return (void*) -1; |
return (void*) -1; |
} |
} |
|
#elif SUP_ENABLE == EP_SUPPORT |
|
if (FD_ISSET(TASK_FD(t), &r->root_fds[0])) { |
|
flg |= 1; |
|
ee.events |= EPOLLIN | EPOLLPRI | EPOLLRDHUP; |
|
} |
|
if (FD_ISSET(TASK_FD(t), &r->root_fds[1])) |
|
flg |= 2; |
|
|
|
ee.data.fd = TASK_FD(t); |
|
if (epoll_ctl(r->root_kq, flg ? EPOLL_CTL_MOD : EPOLL_CTL_ADD, TASK_FD(t), &ee) == -1) { |
|
if (r->root_hooks.hook_exec.exception) |
|
r->root_hooks.hook_exec.exception(r, NULL); |
|
else |
|
LOGERR; |
|
return (void*) -1; |
|
} else |
|
FD_SET(TASK_FD(t), &r->root_fds[1]); |
#else |
#else |
FD_SET(TASK_FD(t), &r->root_fds[1]); |
FD_SET(TASK_FD(t), &r->root_fds[1]); |
if (TASK_FD(t) >= r->root_kq) |
if (TASK_FD(t) >= r->root_kq) |
Line 425 sched_hook_write(void *task, void *arg __unused)
|
Line 673 sched_hook_write(void *task, void *arg __unused)
|
void * |
void * |
sched_hook_alarm(void *task, void *arg __unused) |
sched_hook_alarm(void *task, void *arg __unused) |
{ |
{ |
#if SUP_ENABLE == KQ_ENABLE | #if SUP_ENABLE == KQ_SUPPORT |
sched_task_t *t = task; |
sched_task_t *t = task; |
struct kevent chg[1]; |
struct kevent chg[1]; |
struct timespec timeout = { 0, 0 }; |
struct timespec timeout = { 0, 0 }; |
Line 464 sched_hook_alarm(void *task, void *arg __unused)
|
Line 712 sched_hook_alarm(void *task, void *arg __unused)
|
void * |
void * |
sched_hook_node(void *task, void *arg __unused) |
sched_hook_node(void *task, void *arg __unused) |
{ |
{ |
#if SUP_ENABLE == KQ_ENABLE | #if SUP_ENABLE == KQ_SUPPORT |
sched_task_t *t = task; |
sched_task_t *t = task; |
struct kevent chg[1]; |
struct kevent chg[1]; |
struct timespec timeout = { 0, 0 }; |
struct timespec timeout = { 0, 0 }; |
Line 503 sched_hook_node(void *task, void *arg __unused)
|
Line 751 sched_hook_node(void *task, void *arg __unused)
|
void * |
void * |
sched_hook_proc(void *task, void *arg __unused) |
sched_hook_proc(void *task, void *arg __unused) |
{ |
{ |
#if SUP_ENABLE == KQ_ENABLE | #if SUP_ENABLE == KQ_SUPPORT |
sched_task_t *t = task; |
sched_task_t *t = task; |
struct kevent chg[1]; |
struct kevent chg[1]; |
struct timespec timeout = { 0, 0 }; |
struct timespec timeout = { 0, 0 }; |
Line 540 sched_hook_proc(void *task, void *arg __unused)
|
Line 788 sched_hook_proc(void *task, void *arg __unused)
|
void * |
void * |
sched_hook_signal(void *task, void *arg __unused) |
sched_hook_signal(void *task, void *arg __unused) |
{ |
{ |
#if SUP_ENABLE == KQ_ENABLE | #if SUP_ENABLE == KQ_SUPPORT |
sched_task_t *t = task; |
sched_task_t *t = task; |
struct kevent chg[1]; |
struct kevent chg[1]; |
struct timespec timeout = { 0, 0 }; |
struct timespec timeout = { 0, 0 }; |
Line 563 sched_hook_signal(void *task, void *arg __unused)
|
Line 811 sched_hook_signal(void *task, void *arg __unused)
|
LOGERR; |
LOGERR; |
return (void*) -1; |
return (void*) -1; |
} |
} |
#else |
|
#if 0 |
|
sched_task_t *t = task; |
|
struct sigaction sa; |
|
|
|
memset(&sa, 0, sizeof sa); |
|
sigemptyset(&sa.sa_mask); |
|
sa.sa_handler = _sched_sigHandler; |
|
sa.sa_flags = SA_RESETHAND | SA_RESTART; |
|
|
|
if (sigaction(TASK_VAL(t), &sa, NULL) == -1) { |
|
if (TASK_ROOT(t)->root_hooks.hook_exec.exception) |
|
TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL); |
|
else |
|
LOGERR; |
|
return (void*) -1; |
|
} |
|
#endif /* 0 */ |
|
#endif |
#endif |
return NULL; |
return NULL; |
} |
} |
Line 596 sched_hook_signal(void *task, void *arg __unused)
|
Line 826 sched_hook_signal(void *task, void *arg __unused)
|
void * |
void * |
sched_hook_user(void *task, void *arg __unused) |
sched_hook_user(void *task, void *arg __unused) |
{ |
{ |
#if SUP_ENABLE == KQ_ENABLE | #if SUP_ENABLE == KQ_SUPPORT |
sched_task_t *t = task; |
sched_task_t *t = task; |
struct kevent chg[1]; |
struct kevent chg[1]; |
struct timespec timeout = { 0, 0 }; |
struct timespec timeout = { 0, 0 }; |
Line 624 sched_hook_user(void *task, void *arg __unused)
|
Line 854 sched_hook_user(void *task, void *arg __unused)
|
} |
} |
#endif |
#endif |
|
|
/* | #if SUP_ENABLE == KQ_SUPPORT |
* sched_hook_fetch() - Default FETCH hook | static inline void |
* | fetch_hook_kevent_proceed(int en, struct kevent *res, sched_root_task_t *r) |
* @root = root task | |
* @arg = unused | |
* return: NULL error or !=NULL fetched task | |
*/ | |
void * | |
sched_hook_fetch(void *root, void *arg __unused) | |
{ |
{ |
sched_root_task_t *r = root; | struct kevent evt[1]; |
| register int i; |
sched_task_t *task, *tmp; |
sched_task_t *task, *tmp; |
struct timespec now, m, mtmp; | struct timespec now = { 0, 0 }; |
#if SUP_ENABLE == KQ_ENABLE | |
struct kevent evt[1], res[KQ_EVENTS]; | |
struct timespec *timeout; | |
#else | |
struct timeval *timeout, tv; | |
fd_set rfd, wfd, xfd; | |
#endif | |
register int i, flg; | |
int en; | |
#ifdef AIO_SUPPORT |
#ifdef AIO_SUPPORT |
int len, fd; |
int len, fd; |
struct aiocb *acb; |
struct aiocb *acb; |
Line 658 sched_hook_fetch(void *root, void *arg __unused)
|
Line 874 sched_hook_fetch(void *root, void *arg __unused)
|
#endif /* EVFILT_LIO */ |
#endif /* EVFILT_LIO */ |
#endif /* AIO_SUPPORT */ |
#endif /* AIO_SUPPORT */ |
|
|
if (!r) |
|
return NULL; |
|
|
|
/* get new task by queue priority */ |
|
while ((task = TAILQ_FIRST(&r->root_event))) { |
|
#ifdef HAVE_LIBPTHREAD |
|
pthread_mutex_lock(&r->root_mtx[taskEVENT]); |
|
#endif |
|
TAILQ_REMOVE(&r->root_event, task, task_node); |
|
#ifdef HAVE_LIBPTHREAD |
|
pthread_mutex_unlock(&r->root_mtx[taskEVENT]); |
|
#endif |
|
task->task_type = taskUNUSE; |
|
#ifdef HAVE_LIBPTHREAD |
|
pthread_mutex_lock(&r->root_mtx[taskUNUSE]); |
|
#endif |
|
TAILQ_INSERT_TAIL(&r->root_unuse, task, task_node); |
|
#ifdef HAVE_LIBPTHREAD |
|
pthread_mutex_unlock(&r->root_mtx[taskUNUSE]); |
|
#endif |
|
return task; |
|
} |
|
while ((task = TAILQ_FIRST(&r->root_ready))) { |
|
#ifdef HAVE_LIBPTHREAD |
|
pthread_mutex_lock(&r->root_mtx[taskREADY]); |
|
#endif |
|
TAILQ_REMOVE(&r->root_ready, task, task_node); |
|
#ifdef HAVE_LIBPTHREAD |
|
pthread_mutex_unlock(&r->root_mtx[taskREADY]); |
|
#endif |
|
task->task_type = taskUNUSE; |
|
#ifdef HAVE_LIBPTHREAD |
|
pthread_mutex_lock(&r->root_mtx[taskUNUSE]); |
|
#endif |
|
TAILQ_INSERT_TAIL(&r->root_unuse, task, task_node); |
|
#ifdef HAVE_LIBPTHREAD |
|
pthread_mutex_unlock(&r->root_mtx[taskUNUSE]); |
|
#endif |
|
return task; |
|
} |
|
|
|
#ifdef TIMER_WITHOUT_SORT |
|
clock_gettime(CLOCK_MONOTONIC, &now); |
|
|
|
sched_timespecclear(&r->root_wait); |
|
TAILQ_FOREACH(task, &r->root_timer, task_node) { |
|
if (!sched_timespecisset(&r->root_wait)) |
|
r->root_wait = TASK_TS(task); |
|
else if (sched_timespeccmp(&TASK_TS(task), &r->root_wait, -) < 0) |
|
r->root_wait = TASK_TS(task); |
|
} |
|
|
|
if (TAILQ_FIRST(&r->root_timer)) { |
|
m = r->root_wait; |
|
sched_timespecsub(&m, &now, &mtmp); |
|
r->root_wait = mtmp; |
|
} else { |
|
/* set wait INFTIM */ |
|
sched_timespecinf(&r->root_wait); |
|
} |
|
#else /* ! TIMER_WITHOUT_SORT */ |
|
if (!TAILQ_FIRST(&r->root_task) && (task = TAILQ_FIRST(&r->root_timer))) { |
|
clock_gettime(CLOCK_MONOTONIC, &now); |
|
|
|
m = TASK_TS(task); |
|
sched_timespecsub(&m, &now, &mtmp); |
|
r->root_wait = mtmp; |
|
} else { |
|
/* set wait INFTIM */ |
|
sched_timespecinf(&r->root_wait); |
|
} |
|
#endif /* TIMER_WITHOUT_SORT */ |
|
/* if present member of task, set NOWAIT */ |
|
if (TAILQ_FIRST(&r->root_task)) |
|
sched_timespecclear(&r->root_wait); |
|
|
|
if (r->root_wait.tv_sec != -1 && r->root_wait.tv_nsec != -1) { |
|
#if SUP_ENABLE == KQ_ENABLE |
|
timeout = &r->root_wait; |
|
#else |
|
sched_timespec2val(&r->root_wait, &tv); |
|
timeout = &tv; |
|
#endif /* KQ_SUPPORT */ |
|
} else if (sched_timespecisinf(&r->root_poll)) |
|
timeout = NULL; |
|
else { |
|
#if SUP_ENABLE == KQ_ENABLE |
|
timeout = &r->root_poll; |
|
#else |
|
sched_timespec2val(&r->root_poll, &tv); |
|
timeout = &tv; |
|
#endif /* KQ_SUPPORT */ |
|
} |
|
|
|
#if SUP_ENABLE == KQ_ENABLE |
|
if ((en = kevent(r->root_kq, NULL, 0, res, KQ_EVENTS, timeout)) == -1) { |
|
#else |
|
rfd = xfd = r->root_fds[0]; |
|
wfd = r->root_fds[1]; |
|
if ((en = select(r->root_kq, &rfd, &wfd, &xfd, timeout)) == -1) { |
|
#endif /* KQ_SUPPORT */ |
|
if (r->root_hooks.hook_exec.exception) { |
|
if (r->root_hooks.hook_exec.exception(r, NULL)) |
|
return NULL; |
|
} else if (errno != EINTR) |
|
LOGERR; |
|
goto skip_event; |
|
} |
|
|
|
/* kevent dispatcher */ |
|
now.tv_sec = now.tv_nsec = 0; |
|
/* Go and catch the cat into pipes ... */ |
|
#if SUP_ENABLE == KQ_ENABLE |
|
for (i = 0; i < en; i++) { |
for (i = 0; i < en; i++) { |
memcpy(evt, &res[i], sizeof evt); |
memcpy(evt, &res[i], sizeof evt); |
evt->flags = EV_DELETE; |
evt->flags = EV_DELETE; |
/* Put read/write task to ready queue */ |
/* Put read/write task to ready queue */ |
switch (res[i].filter) { |
switch (res[i].filter) { |
case EVFILT_READ: |
case EVFILT_READ: |
flg = 0; |
|
TAILQ_FOREACH_SAFE(task, &r->root_read, task_node, tmp) { |
TAILQ_FOREACH_SAFE(task, &r->root_read, task_node, tmp) { |
if (TASK_FD(task) != ((intptr_t) res[i].udata)) |
if (TASK_FD(task) != ((intptr_t) res[i].udata)) |
continue; |
continue; |
else { |
else { |
flg++; |
|
TASK_RET(task) = res[i].data; |
TASK_RET(task) = res[i].data; |
TASK_FLAG(task) = (u_long) res[i].fflags; |
TASK_FLAG(task) = (u_long) res[i].fflags; |
} |
} |
/* remove read handle */ |
/* remove read handle */ |
#ifdef HAVE_LIBPTHREAD | remove_task_from(task, &r->root_read); |
pthread_mutex_lock(&r->root_mtx[taskREAD]); | |
#endif | |
TAILQ_REMOVE(&r->root_read, task, task_node); | |
#ifdef HAVE_LIBPTHREAD | |
pthread_mutex_unlock(&r->root_mtx[taskREAD]); | |
#endif | |
if (r->root_hooks.hook_exec.exception && res[i].flags & EV_EOF) { |
if (r->root_hooks.hook_exec.exception && res[i].flags & EV_EOF) { |
if (r->root_hooks.hook_exec.exception(r, (void*) EV_EOF)) { |
if (r->root_hooks.hook_exec.exception(r, (void*) EV_EOF)) { |
task->task_type = taskUNUSE; |
task->task_type = taskUNUSE; |
#ifdef HAVE_LIBPTHREAD | insert_task_to(task, &r->root_unuse); |
pthread_mutex_lock(&r->root_mtx[taskUNUSE]); | |
#endif | |
TAILQ_INSERT_TAIL(&r->root_unuse, task, task_node); | |
#ifdef HAVE_LIBPTHREAD | |
pthread_mutex_unlock(&r->root_mtx[taskUNUSE]); | |
#endif | |
} else { |
} else { |
task->task_type = taskREADY; |
task->task_type = taskREADY; |
#ifdef HAVE_LIBPTHREAD | insert_task_to(task, &r->root_ready); |
pthread_mutex_lock(&r->root_mtx[taskREADY]); | |
#endif | |
TAILQ_INSERT_TAIL(&r->root_ready, task, task_node); | |
#ifdef HAVE_LIBPTHREAD | |
pthread_mutex_unlock(&r->root_mtx[taskREADY]); | |
#endif | |
} |
} |
} else { |
} else { |
task->task_type = taskREADY; |
task->task_type = taskREADY; |
#ifdef HAVE_LIBPTHREAD | insert_task_to(task, &r->root_ready); |
pthread_mutex_lock(&r->root_mtx[taskREADY]); | |
#endif | |
TAILQ_INSERT_TAIL(&r->root_ready, task, task_node); | |
#ifdef HAVE_LIBPTHREAD | |
pthread_mutex_unlock(&r->root_mtx[taskREADY]); | |
#endif | |
} |
} |
} |
} |
/* if match at least 2, don't remove resouce of event */ |
|
if (flg > 1) |
|
evt->flags ^= evt->flags; |
|
break; |
break; |
case EVFILT_WRITE: |
case EVFILT_WRITE: |
flg = 0; |
|
TAILQ_FOREACH_SAFE(task, &r->root_write, task_node, tmp) { |
TAILQ_FOREACH_SAFE(task, &r->root_write, task_node, tmp) { |
if (TASK_FD(task) != ((intptr_t) res[i].udata)) |
if (TASK_FD(task) != ((intptr_t) res[i].udata)) |
continue; |
continue; |
else { |
else { |
flg++; |
|
TASK_RET(task) = res[i].data; |
TASK_RET(task) = res[i].data; |
TASK_FLAG(task) = (u_long) res[i].fflags; |
TASK_FLAG(task) = (u_long) res[i].fflags; |
} |
} |
/* remove write handle */ |
/* remove write handle */ |
#ifdef HAVE_LIBPTHREAD | remove_task_from(task, &r->root_write); |
pthread_mutex_lock(&r->root_mtx[taskWRITE]); | |
#endif | |
TAILQ_REMOVE(&r->root_write, task, task_node); | |
#ifdef HAVE_LIBPTHREAD | |
pthread_mutex_unlock(&r->root_mtx[taskWRITE]); | |
#endif | |
if (r->root_hooks.hook_exec.exception && res[i].flags & EV_EOF) { |
if (r->root_hooks.hook_exec.exception && res[i].flags & EV_EOF) { |
if (r->root_hooks.hook_exec.exception(r, (void*) EV_EOF)) { |
if (r->root_hooks.hook_exec.exception(r, (void*) EV_EOF)) { |
task->task_type = taskUNUSE; |
task->task_type = taskUNUSE; |
#ifdef HAVE_LIBPTHREAD | insert_task_to(task, &r->root_unuse); |
pthread_mutex_lock(&r->root_mtx[taskUNUSE]); | |
#endif | |
TAILQ_INSERT_TAIL(&r->root_unuse, task, task_node); | |
#ifdef HAVE_LIBPTHREAD | |
pthread_mutex_unlock(&r->root_mtx[taskUNUSE]); | |
#endif | |
} else { |
} else { |
task->task_type = taskREADY; |
task->task_type = taskREADY; |
#ifdef HAVE_LIBPTHREAD | insert_task_to(task, &r->root_ready); |
pthread_mutex_lock(&r->root_mtx[taskREADY]); | |
#endif | |
TAILQ_INSERT_TAIL(&r->root_ready, task, task_node); | |
#ifdef HAVE_LIBPTHREAD | |
pthread_mutex_unlock(&r->root_mtx[taskREADY]); | |
#endif | |
} |
} |
} else { |
} else { |
task->task_type = taskREADY; |
task->task_type = taskREADY; |
#ifdef HAVE_LIBPTHREAD | insert_task_to(task, &r->root_ready); |
pthread_mutex_lock(&r->root_mtx[taskREADY]); | |
#endif | |
TAILQ_INSERT_TAIL(&r->root_ready, task, task_node); | |
#ifdef HAVE_LIBPTHREAD | |
pthread_mutex_unlock(&r->root_mtx[taskREADY]); | |
#endif | |
} |
} |
} |
} |
/* if match at least 2, don't remove resouce of event */ |
|
if (flg > 1) |
|
evt->flags ^= evt->flags; |
|
break; |
break; |
case EVFILT_TIMER: |
case EVFILT_TIMER: |
flg = 0; |
|
TAILQ_FOREACH_SAFE(task, &r->root_alarm, task_node, tmp) { |
TAILQ_FOREACH_SAFE(task, &r->root_alarm, task_node, tmp) { |
if ((uintptr_t) TASK_DATA(task) != ((uintptr_t) res[i].udata)) |
if ((uintptr_t) TASK_DATA(task) != ((uintptr_t) res[i].udata)) |
continue; |
continue; |
else { |
else { |
flg++; |
|
TASK_RET(task) = res[i].data; |
TASK_RET(task) = res[i].data; |
TASK_FLAG(task) = (u_long) res[i].fflags; |
TASK_FLAG(task) = (u_long) res[i].fflags; |
} |
} |
/* remove alarm handle */ |
/* remove alarm handle */ |
#ifdef HAVE_LIBPTHREAD | transit_task2ready(task, &r->root_alarm); |
pthread_mutex_lock(&r->root_mtx[taskALARM]); | |
#endif | |
TAILQ_REMOVE(&r->root_alarm, task, task_node); | |
#ifdef HAVE_LIBPTHREAD | |
pthread_mutex_unlock(&r->root_mtx[taskALARM]); | |
#endif | |
task->task_type = taskREADY; | |
#ifdef HAVE_LIBPTHREAD | |
pthread_mutex_lock(&r->root_mtx[taskREADY]); | |
#endif | |
TAILQ_INSERT_TAIL(&r->root_ready, task, task_node); | |
#ifdef HAVE_LIBPTHREAD | |
pthread_mutex_unlock(&r->root_mtx[taskREADY]); | |
#endif | |
} |
} |
/* if match at least 2, don't remove resouce of event */ |
|
if (flg > 1) |
|
evt->flags ^= evt->flags; |
|
break; |
break; |
case EVFILT_VNODE: |
case EVFILT_VNODE: |
flg = 0; |
|
TAILQ_FOREACH_SAFE(task, &r->root_node, task_node, tmp) { |
TAILQ_FOREACH_SAFE(task, &r->root_node, task_node, tmp) { |
if (TASK_FD(task) != ((intptr_t) res[i].udata)) |
if (TASK_FD(task) != ((intptr_t) res[i].udata)) |
continue; |
continue; |
else { |
else { |
flg++; |
|
TASK_RET(task) = res[i].data; |
TASK_RET(task) = res[i].data; |
TASK_FLAG(task) = (u_long) res[i].fflags; |
TASK_FLAG(task) = (u_long) res[i].fflags; |
} |
} |
/* remove node handle */ |
/* remove node handle */ |
#ifdef HAVE_LIBPTHREAD | transit_task2ready(task, &r->root_node); |
pthread_mutex_lock(&r->root_mtx[taskNODE]); | |
#endif | |
TAILQ_REMOVE(&r->root_node, task, task_node); | |
#ifdef HAVE_LIBPTHREAD | |
pthread_mutex_unlock(&r->root_mtx[taskNODE]); | |
#endif | |
task->task_type = taskREADY; | |
#ifdef HAVE_LIBPTHREAD | |
pthread_mutex_lock(&r->root_mtx[taskREADY]); | |
#endif | |
TAILQ_INSERT_TAIL(&r->root_ready, task, task_node); | |
#ifdef HAVE_LIBPTHREAD | |
pthread_mutex_unlock(&r->root_mtx[taskREADY]); | |
#endif | |
} |
} |
/* if match at least 2, don't remove resouce of event */ |
|
if (flg > 1) |
|
evt->flags ^= evt->flags; |
|
break; |
break; |
case EVFILT_PROC: |
case EVFILT_PROC: |
flg = 0; |
|
TAILQ_FOREACH_SAFE(task, &r->root_proc, task_node, tmp) { |
TAILQ_FOREACH_SAFE(task, &r->root_proc, task_node, tmp) { |
if (TASK_VAL(task) != ((uintptr_t) res[i].udata)) |
if (TASK_VAL(task) != ((uintptr_t) res[i].udata)) |
continue; |
continue; |
else { |
else { |
flg++; |
|
TASK_RET(task) = res[i].data; |
TASK_RET(task) = res[i].data; |
TASK_FLAG(task) = (u_long) res[i].fflags; |
TASK_FLAG(task) = (u_long) res[i].fflags; |
} |
} |
/* remove proc handle */ |
/* remove proc handle */ |
#ifdef HAVE_LIBPTHREAD | transit_task2ready(task, &r->root_proc); |
pthread_mutex_lock(&r->root_mtx[taskPROC]); | |
#endif | |
TAILQ_REMOVE(&r->root_proc, task, task_node); | |
#ifdef HAVE_LIBPTHREAD | |
pthread_mutex_unlock(&r->root_mtx[taskPROC]); | |
#endif | |
task->task_type = taskREADY; | |
#ifdef HAVE_LIBPTHREAD | |
pthread_mutex_lock(&r->root_mtx[taskREADY]); | |
#endif | |
TAILQ_INSERT_TAIL(&r->root_ready, task, task_node); | |
#ifdef HAVE_LIBPTHREAD | |
pthread_mutex_unlock(&r->root_mtx[taskREADY]); | |
#endif | |
} |
} |
/* if match at least 2, don't remove resouce of event */ |
|
if (flg > 1) |
|
evt->flags ^= evt->flags; |
|
break; |
break; |
case EVFILT_SIGNAL: |
case EVFILT_SIGNAL: |
flg = 0; |
|
TAILQ_FOREACH_SAFE(task, &r->root_signal, task_node, tmp) { |
TAILQ_FOREACH_SAFE(task, &r->root_signal, task_node, tmp) { |
if (TASK_VAL(task) != ((uintptr_t) res[i].udata)) |
if (TASK_VAL(task) != ((uintptr_t) res[i].udata)) |
continue; |
continue; |
else { |
else { |
flg++; |
|
TASK_RET(task) = res[i].data; |
TASK_RET(task) = res[i].data; |
TASK_FLAG(task) = (u_long) res[i].fflags; |
TASK_FLAG(task) = (u_long) res[i].fflags; |
} |
} |
/* remove signal handle */ |
/* remove signal handle */ |
#ifdef HAVE_LIBPTHREAD | transit_task2ready(task, &r->root_signal); |
pthread_mutex_lock(&r->root_mtx[taskSIGNAL]); | |
#endif | |
TAILQ_REMOVE(&r->root_signal, task, task_node); | |
#ifdef HAVE_LIBPTHREAD | |
pthread_mutex_unlock(&r->root_mtx[taskSIGNAL]); | |
#endif | |
task->task_type = taskREADY; | |
#ifdef HAVE_LIBPTHREAD | |
pthread_mutex_lock(&r->root_mtx[taskREADY]); | |
#endif | |
TAILQ_INSERT_TAIL(&r->root_ready, task, task_node); | |
#ifdef HAVE_LIBPTHREAD | |
pthread_mutex_unlock(&r->root_mtx[taskREADY]); | |
#endif | |
} |
} |
/* if match at least 2, don't remove resouce of event */ |
|
if (flg > 1) |
|
evt->flags ^= evt->flags; |
|
break; |
break; |
#ifdef AIO_SUPPORT |
#ifdef AIO_SUPPORT |
case EVFILT_AIO: |
case EVFILT_AIO: |
flg = 0; |
|
TAILQ_FOREACH_SAFE(task, &r->root_aio, task_node, tmp) { |
TAILQ_FOREACH_SAFE(task, &r->root_aio, task_node, tmp) { |
acb = (struct aiocb*) TASK_VAL(task); |
acb = (struct aiocb*) TASK_VAL(task); |
if (acb != ((struct aiocb*) res[i].udata)) |
if (acb != ((struct aiocb*) res[i].udata)) |
continue; |
continue; |
else { |
else { |
flg++; |
|
TASK_RET(task) = res[i].data; |
TASK_RET(task) = res[i].data; |
TASK_FLAG(task) = (u_long) res[i].fflags; |
TASK_FLAG(task) = (u_long) res[i].fflags; |
} |
} |
/* remove user handle */ |
/* remove user handle */ |
#ifdef HAVE_LIBPTHREAD | transit_task2ready(task, &r->root_aio); |
pthread_mutex_lock(&r->root_mtx[taskAIO]); | |
#endif | |
TAILQ_REMOVE(&r->root_aio, task, task_node); | |
#ifdef HAVE_LIBPTHREAD | |
pthread_mutex_unlock(&r->root_mtx[taskAIO]); | |
#endif | |
task->task_type = taskREADY; | |
#ifdef HAVE_LIBPTHREAD | |
pthread_mutex_lock(&r->root_mtx[taskREADY]); | |
#endif | |
TAILQ_INSERT_TAIL(&r->root_ready, task, task_node); | |
#ifdef HAVE_LIBPTHREAD | |
pthread_mutex_unlock(&r->root_mtx[taskREADY]); | |
#endif | |
fd = acb->aio_fildes; |
fd = acb->aio_fildes; |
if ((len = aio_return(acb)) != -1) { |
if ((len = aio_return(acb)) != -1) { |
if (lseek(fd, acb->aio_offset + len, SEEK_CUR) == -1) |
if (lseek(fd, acb->aio_offset + len, SEEK_CUR) == -1) |
Line 1044 sched_hook_fetch(void *root, void *arg __unused)
|
Line 1000 sched_hook_fetch(void *root, void *arg __unused)
|
TASK_DATLEN(task) = (u_long) len; |
TASK_DATLEN(task) = (u_long) len; |
TASK_FD(task) = fd; |
TASK_FD(task) = fd; |
} |
} |
/* if match at least 2, don't remove resouce of event */ |
|
if (flg > 1) |
|
evt->flags ^= evt->flags; |
|
break; |
break; |
#ifdef EVFILT_LIO |
#ifdef EVFILT_LIO |
case EVFILT_LIO: |
case EVFILT_LIO: |
flg = 0; |
|
TAILQ_FOREACH_SAFE(task, &r->root_lio, task_node, tmp) { |
TAILQ_FOREACH_SAFE(task, &r->root_lio, task_node, tmp) { |
acbs = (struct aiocb**) TASK_VAL(task); |
acbs = (struct aiocb**) TASK_VAL(task); |
if (acbs != ((struct aiocb**) res[i].udata)) |
if (acbs != ((struct aiocb**) res[i].udata)) |
continue; |
continue; |
else { |
else { |
flg++; |
|
TASK_RET(task) = res[i].data; |
TASK_RET(task) = res[i].data; |
TASK_FLAG(task) = (u_long) res[i].fflags; |
TASK_FLAG(task) = (u_long) res[i].fflags; |
} |
} |
/* remove user handle */ |
/* remove user handle */ |
#ifdef HAVE_LIBPTHREAD | transit_task2ready(task, &r->root_lio); |
pthread_mutex_lock(&r->root_mtx[taskLIO]); | |
#endif | |
TAILQ_REMOVE(&r->root_lio, task, task_node); | |
#ifdef HAVE_LIBPTHREAD | |
pthread_mutex_unlock(&r->root_mtx[taskLIO]); | |
#endif | |
task->task_type = taskREADY; | |
#ifdef HAVE_LIBPTHREAD | |
pthread_mutex_lock(&r->root_mtx[taskREADY]); | |
#endif | |
TAILQ_INSERT_TAIL(&r->root_ready, task, task_node); | |
#ifdef HAVE_LIBPTHREAD | |
pthread_mutex_unlock(&r->root_mtx[taskREADY]); | |
#endif | |
iv = (struct iovec*) TASK_DATA(task); |
iv = (struct iovec*) TASK_DATA(task); |
fd = acbs[0]->aio_fildes; |
fd = acbs[0]->aio_fildes; |
off = acbs[0]->aio_offset; |
off = acbs[0]->aio_offset; |
Line 1093 sched_hook_fetch(void *root, void *arg __unused)
|
Line 1031 sched_hook_fetch(void *root, void *arg __unused)
|
if (lseek(fd, off + len, SEEK_CUR) == -1) |
if (lseek(fd, off + len, SEEK_CUR) == -1) |
LOGERR; |
LOGERR; |
} |
} |
/* if match at least 2, don't remove resouce of event */ |
|
if (flg > 1) |
|
evt->flags ^= evt->flags; |
|
break; |
break; |
#endif /* EVFILT_LIO */ |
#endif /* EVFILT_LIO */ |
#endif /* AIO_SUPPORT */ |
#endif /* AIO_SUPPORT */ |
#ifdef EVFILT_USER |
#ifdef EVFILT_USER |
case EVFILT_USER: |
case EVFILT_USER: |
flg = 0; |
|
TAILQ_FOREACH_SAFE(task, &r->root_user, task_node, tmp) { |
TAILQ_FOREACH_SAFE(task, &r->root_user, task_node, tmp) { |
if (TASK_VAL(task) != ((uintptr_t) res[i].udata)) |
if (TASK_VAL(task) != ((uintptr_t) res[i].udata)) |
continue; |
continue; |
else { |
else { |
flg++; |
|
TASK_RET(task) = res[i].data; |
TASK_RET(task) = res[i].data; |
TASK_FLAG(task) = (u_long) res[i].fflags; |
TASK_FLAG(task) = (u_long) res[i].fflags; |
} |
} |
/* remove user handle */ |
/* remove user handle */ |
#ifdef HAVE_LIBPTHREAD | transit_task2ready(task, &r->root_user); |
pthread_mutex_lock(&r->root_mtx[taskUSER]); | |
#endif | |
TAILQ_REMOVE(&r->root_user, task, task_node); | |
#ifdef HAVE_LIBPTHREAD | |
pthread_mutex_unlock(&r->root_mtx[taskUSER]); | |
#endif | |
task->task_type = taskREADY; | |
#ifdef HAVE_LIBPTHREAD | |
pthread_mutex_lock(&r->root_mtx[taskREADY]); | |
#endif | |
TAILQ_INSERT_TAIL(&r->root_ready, task, task_node); | |
#ifdef HAVE_LIBPTHREAD | |
pthread_mutex_unlock(&r->root_mtx[taskREADY]); | |
#endif | |
} |
} |
/* if match at least 2, don't remove resouce of event */ |
|
if (flg > 1) |
|
evt->flags ^= evt->flags; |
|
break; |
break; |
#endif /* EVFILT_USER */ |
#endif /* EVFILT_USER */ |
} |
} |
|
|
if (kevent(r->root_kq, evt, 1, NULL, 0, &now) == -1) { |
if (kevent(r->root_kq, evt, 1, NULL, 0, &now) == -1) { |
|
if (r->root_hooks.hook_exec.exception) |
|
r->root_hooks.hook_exec.exception(r, NULL); |
|
else |
|
LOGERR; |
|
} |
|
} |
|
} |
|
#endif |
|
|
|
#if SUP_ENABLE == EP_SUPPORT |
|
static inline void |
|
fetch_hook_epoll_proceed(int en, struct epoll_event *res, sched_root_task_t *r) |
|
{ |
|
register int i, flg; |
|
int ops = EPOLL_CTL_DEL; |
|
sched_task_t *task, *tmp; |
|
struct epoll_event evt[1]; |
|
|
|
for (i = 0; i < en; i++) { |
|
memcpy(evt, &res[i], sizeof evt); |
|
|
|
if (evt->events & (EPOLLIN | EPOLLPRI | EPOLLET)) { |
|
flg = 0; |
|
TAILQ_FOREACH_SAFE(task, &r->root_read, task_node, tmp) { |
|
if (TASK_FD(task) != evt->data.fd) |
|
continue; |
|
else { |
|
flg++; |
|
FD_CLR(TASK_FD(task), &r->root_fds[0]); |
|
TASK_FLAG(task) = ioctl(TASK_FD(task), FIONREAD, &TASK_RET(task)); |
|
|
|
evt->events &= ~(EPOLLIN | EPOLLPRI | EPOLLET | EPOLLRDHUP); |
|
if (FD_ISSET(TASK_FD(task), &r->root_fds[1])) { |
|
ops = EPOLL_CTL_MOD; |
|
evt->events |= EPOLLOUT; |
|
} |
|
} |
|
/* remove read handle */ |
|
remove_task_from(task, &r->root_read); |
|
|
|
if (r->root_hooks.hook_exec.exception && evt->events & (EPOLLRDHUP | EPOLLERR | EPOLLHUP)) { |
|
if (r->root_hooks.hook_exec.exception(r, (void*) (intptr_t) |
|
(evt->events & EPOLLERR ? EV_ERROR | EV_EOF : EV_EOF))) { |
|
task->task_type = taskUNUSE; |
|
insert_task_to(task, &r->root_unuse); |
|
} else { |
|
task->task_type = taskREADY; |
|
insert_task_to(task, &r->root_ready); |
|
} |
|
} else { |
|
task->task_type = taskREADY; |
|
insert_task_to(task, &r->root_ready); |
|
} |
|
} |
|
if (flg > 1) |
|
ops = EPOLL_CTL_MOD; |
|
} |
|
|
|
if (evt->events & EPOLLOUT) { |
|
flg = 0; |
|
TAILQ_FOREACH_SAFE(task, &r->root_write, task_node, tmp) { |
|
if (TASK_FD(task) != evt->data.fd) |
|
continue; |
|
else { |
|
flg++; |
|
FD_CLR(TASK_FD(task), &r->root_fds[1]); |
|
TASK_FLAG(task) = ioctl(TASK_FD(task), |
|
FIONWRITE, &TASK_RET(task)); |
|
|
|
evt->events &= ~EPOLLOUT; |
|
if (FD_ISSET(TASK_FD(task), &r->root_fds[0])) { |
|
ops = EPOLL_CTL_MOD; |
|
evt->events |= EPOLLIN | EPOLLPRI | EPOLLRDHUP; |
|
} |
|
} |
|
/* remove write handle */ |
|
remove_task_from(task, &r->root_write); |
|
|
|
if (r->root_hooks.hook_exec.exception && evt->events & (EPOLLERR | EPOLLHUP)) { |
|
if (r->root_hooks.hook_exec.exception(r, (void*) (intptr_t) |
|
(evt->events & EPOLLERR ? EV_ERROR | EV_EOF : EV_EOF))) { |
|
task->task_type = taskUNUSE; |
|
insert_task_to(task, &r->root_unuse); |
|
} else { |
|
task->task_type = taskREADY; |
|
insert_task_to(task, &r->root_ready); |
|
} |
|
} else { |
|
task->task_type = taskREADY; |
|
insert_task_to(task, &r->root_ready); |
|
} |
|
} |
|
if (flg > 1) |
|
ops = EPOLL_CTL_MOD; |
|
} |
|
|
|
if (epoll_ctl(r->root_kq, ops, evt->data.fd, evt) == -1) { |
if (r->root_hooks.hook_exec.exception) { |
if (r->root_hooks.hook_exec.exception) { |
if (r->root_hooks.hook_exec.exception(r, NULL)) | r->root_hooks.hook_exec.exception(r, NULL); |
return NULL; | |
} else |
} else |
LOGERR; |
LOGERR; |
} |
} |
} |
} |
#else /* end of kevent dispatcher */ | } |
| #endif |
| |
| #if SUP_ENABLE == NO_SUPPORT |
| static inline void |
| fetch_hook_select_proceed(int en, fd_set rfd, fd_set wfd, fd_set xfd, sched_root_task_t *r) |
| { |
| register int i, flg; |
| sched_task_t *task, *tmp; |
| |
| /* skip select check if return value from select is zero */ |
| if (!en) |
| return; |
| |
for (i = 0; i < r->root_kq; i++) { |
for (i = 0; i < r->root_kq; i++) { |
if (FD_ISSET(i, &rfd) || FD_ISSET(i, &xfd)) { |
if (FD_ISSET(i, &rfd) || FD_ISSET(i, &xfd)) { |
flg = 0; |
flg = 0; |
Line 1154 sched_hook_fetch(void *root, void *arg __unused)
|
Line 1180 sched_hook_fetch(void *root, void *arg __unused)
|
FIONREAD, &TASK_RET(task)); |
FIONREAD, &TASK_RET(task)); |
} |
} |
/* remove read handle */ |
/* remove read handle */ |
#ifdef HAVE_LIBPTHREAD | remove_task_from(task, &r->root_read); |
pthread_mutex_lock(&r->root_mtx[taskREAD]); | |
#endif | |
TAILQ_REMOVE(&r->root_read, task, task_node); | |
#ifdef HAVE_LIBPTHREAD | |
pthread_mutex_unlock(&r->root_mtx[taskREAD]); | |
#endif | |
if (r->root_hooks.hook_exec.exception) { |
if (r->root_hooks.hook_exec.exception) { |
if (r->root_hooks.hook_exec.exception(r, NULL)) { |
if (r->root_hooks.hook_exec.exception(r, NULL)) { |
task->task_type = taskUNUSE; |
task->task_type = taskUNUSE; |
#ifdef HAVE_LIBPTHREAD | insert_task_to(task, &r->root_unuse); |
pthread_mutex_lock(&r->root_mtx[taskUNUSE]); | |
#endif | |
TAILQ_INSERT_TAIL(&r->root_unuse, task, task_node); | |
#ifdef HAVE_LIBPTHREAD | |
pthread_mutex_unlock(&r->root_mtx[taskUNUSE]); | |
#endif | |
} else { |
} else { |
task->task_type = taskREADY; |
task->task_type = taskREADY; |
#ifdef HAVE_LIBPTHREAD | insert_task_to(task, &r->root_ready); |
pthread_mutex_lock(&r->root_mtx[taskREADY]); | |
#endif | |
TAILQ_INSERT_TAIL(&r->root_ready, task, task_node); | |
#ifdef HAVE_LIBPTHREAD | |
pthread_mutex_unlock(&r->root_mtx[taskREADY]); | |
#endif | |
} |
} |
} else { |
} else { |
task->task_type = taskREADY; |
task->task_type = taskREADY; |
#ifdef HAVE_LIBPTHREAD | insert_task_to(task, &r->root_ready); |
pthread_mutex_lock(&r->root_mtx[taskREADY]); | |
#endif | |
TAILQ_INSERT_TAIL(&r->root_ready, task, task_node); | |
#ifdef HAVE_LIBPTHREAD | |
pthread_mutex_unlock(&r->root_mtx[taskREADY]); | |
#endif | |
} |
} |
} |
} |
/* if match equal to 1, remove resouce */ |
/* if match equal to 1, remove resouce */ |
Line 1208 sched_hook_fetch(void *root, void *arg __unused)
|
Line 1211 sched_hook_fetch(void *root, void *arg __unused)
|
FIONWRITE, &TASK_RET(task)); |
FIONWRITE, &TASK_RET(task)); |
} |
} |
/* remove write handle */ |
/* remove write handle */ |
#ifdef HAVE_LIBPTHREAD | remove_task_from(task, &r->root_write); |
pthread_mutex_lock(&r->root_mtx[taskWRITE]); | |
#endif | |
TAILQ_REMOVE(&r->root_write, task, task_node); | |
#ifdef HAVE_LIBPTHREAD | |
pthread_mutex_unlock(&r->root_mtx[taskWRITE]); | |
#endif | |
if (r->root_hooks.hook_exec.exception) { |
if (r->root_hooks.hook_exec.exception) { |
if (r->root_hooks.hook_exec.exception(r, NULL)) { |
if (r->root_hooks.hook_exec.exception(r, NULL)) { |
task->task_type = taskUNUSE; |
task->task_type = taskUNUSE; |
#ifdef HAVE_LIBPTHREAD | insert_task_to(task, &r->root_unuse); |
pthread_mutex_lock(&r->root_mtx[taskUNUSE]); | |
#endif | |
TAILQ_INSERT_TAIL(&r->root_unuse, task, task_node); | |
#ifdef HAVE_LIBPTHREAD | |
pthread_mutex_unlock(&r->root_mtx[taskUNUSE]); | |
#endif | |
} else { |
} else { |
task->task_type = taskREADY; |
task->task_type = taskREADY; |
#ifdef HAVE_LIBPTHREAD | insert_task_to(task, &r->root_ready); |
pthread_mutex_lock(&r->root_mtx[taskREADY]); | |
#endif | |
TAILQ_INSERT_TAIL(&r->root_ready, task, task_node); | |
#ifdef HAVE_LIBPTHREAD | |
pthread_mutex_unlock(&r->root_mtx[taskREADY]); | |
#endif | |
} |
} |
} else { |
} else { |
task->task_type = taskREADY; |
task->task_type = taskREADY; |
#ifdef HAVE_LIBPTHREAD | insert_task_to(task, &r->root_ready); |
pthread_mutex_lock(&r->root_mtx[taskREADY]); | |
#endif | |
TAILQ_INSERT_TAIL(&r->root_ready, task, task_node); | |
#ifdef HAVE_LIBPTHREAD | |
pthread_mutex_unlock(&r->root_mtx[taskREADY]); | |
#endif | |
} |
} |
} |
} |
/* if match equal to 1, remove resouce */ |
/* if match equal to 1, remove resouce */ |
Line 1258 sched_hook_fetch(void *root, void *arg __unused)
|
Line 1238 sched_hook_fetch(void *root, void *arg __unused)
|
break; |
break; |
if (i > 2) |
if (i > 2) |
r->root_kq = i + 1; |
r->root_kq = i + 1; |
|
} |
|
#endif |
|
|
|
/* |
|
* sched_hook_fetch() - Default FETCH hook |
|
* |
|
* @root = root task |
|
* @arg = unused |
|
* return: NULL error or !=NULL fetched task |
|
*/ |
|
void * |
|
sched_hook_fetch(void *root, void *arg __unused) |
|
{ |
|
sched_root_task_t *r = root; |
|
sched_task_t *task, *tmp; |
|
struct timespec now, m, mtmp; |
|
#if SUP_ENABLE == KQ_SUPPORT |
|
struct kevent res[KQ_EVENTS]; |
|
struct timespec *timeout; |
|
#elif SUP_ENABLE == EP_SUPPORT |
|
struct epoll_event res[KQ_EVENTS]; |
|
u_long timeout = 0; |
|
#else |
|
struct timeval *timeout, tv; |
|
fd_set rfd, wfd, xfd; |
|
#endif |
|
int en; |
|
|
|
if (!r) |
|
return NULL; |
|
|
|
/* get new task by queue priority */ |
|
while ((task = TAILQ_FIRST(&r->root_event))) { |
|
transit_task2unuse(task, &r->root_event); |
|
return task; |
|
} |
|
while ((task = TAILQ_FIRST(&r->root_ready))) { |
|
transit_task2unuse(task, &r->root_ready); |
|
return task; |
|
} |
|
|
|
#ifdef TIMER_WITHOUT_SORT |
|
clock_gettime(CLOCK_MONOTONIC, &now); |
|
|
|
sched_timespecclear(&r->root_wait); |
|
TAILQ_FOREACH(task, &r->root_timer, task_node) { |
|
if (!sched_timespecisset(&r->root_wait)) |
|
r->root_wait = TASK_TS(task); |
|
else if (sched_timespeccmp(&TASK_TS(task), &r->root_wait, -) < 0) |
|
r->root_wait = TASK_TS(task); |
|
} |
|
|
|
if (TAILQ_FIRST(&r->root_timer)) { |
|
m = r->root_wait; |
|
sched_timespecsub(&m, &now, &mtmp); |
|
r->root_wait = mtmp; |
|
} else { |
|
/* set wait INFTIM */ |
|
sched_timespecinf(&r->root_wait); |
|
} |
|
#else /* ! TIMER_WITHOUT_SORT */ |
|
if (!TAILQ_FIRST(&r->root_task) && (task = TAILQ_FIRST(&r->root_timer))) { |
|
clock_gettime(CLOCK_MONOTONIC, &now); |
|
|
|
m = TASK_TS(task); |
|
sched_timespecsub(&m, &now, &mtmp); |
|
r->root_wait = mtmp; |
|
} else { |
|
/* set wait INFTIM */ |
|
sched_timespecinf(&r->root_wait); |
|
} |
|
#endif /* TIMER_WITHOUT_SORT */ |
|
/* if present member of task, set NOWAIT */ |
|
if (TAILQ_FIRST(&r->root_task)) |
|
sched_timespecclear(&r->root_wait); |
|
|
|
if (r->root_wait.tv_sec != -1 && r->root_wait.tv_nsec != -1) { |
|
#if SUP_ENABLE == KQ_SUPPORT |
|
timeout = &r->root_wait; |
|
#elif SUP_ENABLE == EP_SUPPORT |
|
timeout = r->root_wait.tv_sec * 1000 + r->root_wait.tv_nsec / 1000000; |
|
#else |
|
sched_timespec2val(&r->root_wait, &tv); |
|
timeout = &tv; |
#endif /* KQ_SUPPORT */ |
#endif /* KQ_SUPPORT */ |
|
} else if (sched_timespecisinf(&r->root_poll)) |
|
#if SUP_ENABLE == EP_SUPPORT |
|
timeout = -1; |
|
#else |
|
timeout = NULL; |
|
#endif |
|
else { |
|
#if SUP_ENABLE == KQ_SUPPORT |
|
timeout = &r->root_poll; |
|
#elif SUP_ENABLE == EP_SUPPORT |
|
timeout = r->root_poll.tv_sec * 1000 + r->root_poll.tv_nsec / 1000000; |
|
#else |
|
sched_timespec2val(&r->root_poll, &tv); |
|
timeout = &tv; |
|
#endif /* KQ_SUPPORT */ |
|
} |
|
|
|
#if SUP_ENABLE == KQ_SUPPORT |
|
if ((en = kevent(r->root_kq, NULL, 0, res, KQ_EVENTS, timeout)) == -1) { |
|
#elif SUP_ENABLE == EP_SUPPORT |
|
if ((en = epoll_wait(r->root_kq, res, KQ_EVENTS, timeout)) == -1) { |
|
#else |
|
rfd = xfd = r->root_fds[0]; |
|
wfd = r->root_fds[1]; |
|
if ((en = select(r->root_kq, &rfd, &wfd, &xfd, timeout)) == -1) { |
|
#endif /* KQ_SUPPORT */ |
|
if (r->root_hooks.hook_exec.exception) { |
|
if (r->root_hooks.hook_exec.exception(r, NULL)) |
|
return NULL; |
|
} else if (errno != EINTR) |
|
LOGERR; |
|
goto skip_event; |
|
} |
|
|
|
/* Go and catch the cat into pipes ... */ |
|
#if SUP_ENABLE == KQ_SUPPORT |
|
/* kevent dispatcher */ |
|
fetch_hook_kevent_proceed(en, res, r); |
|
#elif SUP_ENABLE == EP_SUPPORT |
|
/* epoll dispatcher */ |
|
fetch_hook_epoll_proceed(en, res, r); |
|
#else |
|
/* select dispatcher */ |
|
fetch_hook_select_proceed(en, rfd, wfd, xfd, r); |
|
#endif /* KQ_SUPPORT */ |
|
|
skip_event: |
skip_event: |
/* timer update & put in ready queue */ |
/* timer update & put in ready queue */ |
clock_gettime(CLOCK_MONOTONIC, &now); |
clock_gettime(CLOCK_MONOTONIC, &now); |
|
|
TAILQ_FOREACH_SAFE(task, &r->root_timer, task_node, tmp) |
TAILQ_FOREACH_SAFE(task, &r->root_timer, task_node, tmp) |
if (sched_timespeccmp(&now, &TASK_TS(task), -) >= 0) { | if (sched_timespeccmp(&now, &TASK_TS(task), -) >= 0) |
#ifdef HAVE_LIBPTHREAD | transit_task2ready(task, &r->root_timer); |
pthread_mutex_lock(&r->root_mtx[taskTIMER]); | |
#endif | |
TAILQ_REMOVE(&r->root_timer, task, task_node); | |
#ifdef HAVE_LIBPTHREAD | |
pthread_mutex_unlock(&r->root_mtx[taskTIMER]); | |
#endif | |
task->task_type = taskREADY; | |
#ifdef HAVE_LIBPTHREAD | |
pthread_mutex_lock(&r->root_mtx[taskREADY]); | |
#endif | |
TAILQ_INSERT_TAIL(&r->root_ready, task, task_node); | |
#ifdef HAVE_LIBPTHREAD | |
pthread_mutex_unlock(&r->root_mtx[taskREADY]); | |
#endif | |
} | |
|
|
/* put regular task priority task to ready queue, |
/* put regular task priority task to ready queue, |
if there is no ready task or reach max missing hit for regular task */ |
if there is no ready task or reach max missing hit for regular task */ |
Line 1289 skip_event:
|
Line 1383 skip_event:
|
if (!TAILQ_FIRST(&r->root_ready) || r->root_miss >= TASK_VAL(task)) { |
if (!TAILQ_FIRST(&r->root_ready) || r->root_miss >= TASK_VAL(task)) { |
r->root_miss ^= r->root_miss; |
r->root_miss ^= r->root_miss; |
|
|
#ifdef HAVE_LIBPTHREAD | transit_task2ready(task, &r->root_task); |
pthread_mutex_lock(&r->root_mtx[taskTASK]); | |
#endif | |
TAILQ_REMOVE(&r->root_task, task, task_node); | |
#ifdef HAVE_LIBPTHREAD | |
pthread_mutex_unlock(&r->root_mtx[taskTASK]); | |
#endif | |
task->task_type = taskREADY; | |
#ifdef HAVE_LIBPTHREAD | |
pthread_mutex_lock(&r->root_mtx[taskREADY]); | |
#endif | |
TAILQ_INSERT_TAIL(&r->root_ready, task, task_node); | |
#ifdef HAVE_LIBPTHREAD | |
pthread_mutex_unlock(&r->root_mtx[taskREADY]); | |
#endif | |
} else |
} else |
r->root_miss++; |
r->root_miss++; |
} else |
} else |
Line 1311 skip_event:
|
Line 1391 skip_event:
|
|
|
/* OK, lets get ready task !!! */ |
/* OK, lets get ready task !!! */ |
task = TAILQ_FIRST(&r->root_ready); |
task = TAILQ_FIRST(&r->root_ready); |
if (!(task)) | if (task) |
return NULL; | transit_task2unuse(task, &r->root_ready); |
| |
#ifdef HAVE_LIBPTHREAD | |
pthread_mutex_lock(&r->root_mtx[taskREADY]); | |
#endif | |
TAILQ_REMOVE(&r->root_ready, task, task_node); | |
#ifdef HAVE_LIBPTHREAD | |
pthread_mutex_unlock(&r->root_mtx[taskREADY]); | |
#endif | |
task->task_type = taskUNUSE; | |
#ifdef HAVE_LIBPTHREAD | |
pthread_mutex_lock(&r->root_mtx[taskUNUSE]); | |
#endif | |
TAILQ_INSERT_TAIL(&r->root_unuse, task, task_node); | |
#ifdef HAVE_LIBPTHREAD | |
pthread_mutex_unlock(&r->root_mtx[taskUNUSE]); | |
#endif | |
return task; |
return task; |
} |
} |
|
|
Line 1378 sched_hook_condition(void *root, void *arg)
|
Line 1442 sched_hook_condition(void *root, void *arg)
|
if (!r) |
if (!r) |
return NULL; |
return NULL; |
|
|
return (void*) (r->root_cond - *(intptr_t*) arg); | return (void*) (*r->root_cond - *(intptr_t*) arg); |
} |
} |
|
|
/* |
/* |
Line 1396 sched_hook_rtc(void *task, void *arg __unused)
|
Line 1460 sched_hook_rtc(void *task, void *arg __unused)
|
struct itimerspec its; |
struct itimerspec its; |
struct sigevent evt; |
struct sigevent evt; |
timer_t tmr; |
timer_t tmr; |
|
#if SUP_ENABLE != KQ_SUPPORT |
|
struct sigaction sa; |
|
#endif |
|
|
if (!t || !TASK_ROOT(t)) |
if (!t || !TASK_ROOT(t)) |
return (void*) -1; |
return (void*) -1; |
Line 1403 sched_hook_rtc(void *task, void *arg __unused)
|
Line 1470 sched_hook_rtc(void *task, void *arg __unused)
|
memset(&evt, 0, sizeof evt); |
memset(&evt, 0, sizeof evt); |
evt.sigev_notify = SIGEV_SIGNAL; |
evt.sigev_notify = SIGEV_SIGNAL; |
evt.sigev_signo = (intptr_t) TASK_DATA(t) + SIGRTMIN; |
evt.sigev_signo = (intptr_t) TASK_DATA(t) + SIGRTMIN; |
evt.sigev_value.sival_ptr = TASK_DATA(t); | evt.sigev_value.sival_ptr = t; |
|
|
if (timer_create(CLOCK_MONOTONIC, &evt, &tmr) == -1) { |
if (timer_create(CLOCK_MONOTONIC, &evt, &tmr) == -1) { |
if (TASK_ROOT(t)->root_hooks.hook_exec.exception) |
if (TASK_ROOT(t)->root_hooks.hook_exec.exception) |
Line 1414 sched_hook_rtc(void *task, void *arg __unused)
|
Line 1481 sched_hook_rtc(void *task, void *arg __unused)
|
} else |
} else |
TASK_FLAG(t) = (u_long) tmr; |
TASK_FLAG(t) = (u_long) tmr; |
|
|
|
#if SUP_ENABLE == KQ_SUPPORT |
if (!(sigt = schedSignal(TASK_ROOT(t), _sched_rtcWrapper, TASK_ARG(t), evt.sigev_signo, |
if (!(sigt = schedSignal(TASK_ROOT(t), _sched_rtcWrapper, TASK_ARG(t), evt.sigev_signo, |
t, (size_t) tmr))) { |
t, (size_t) tmr))) { |
if (TASK_ROOT(t)->root_hooks.hook_exec.exception) |
if (TASK_ROOT(t)->root_hooks.hook_exec.exception) |
Line 1424 sched_hook_rtc(void *task, void *arg __unused)
|
Line 1492 sched_hook_rtc(void *task, void *arg __unused)
|
return (void*) -1; |
return (void*) -1; |
} else |
} else |
TASK_RET(t) = (uintptr_t) sigt; |
TASK_RET(t) = (uintptr_t) sigt; |
|
#else |
|
memset(&sa, 0, sizeof sa); |
|
sigemptyset(&sa.sa_mask); |
|
sa.sa_sigaction = _sched_rtcSigWrapper; |
|
sa.sa_flags = SA_SIGINFO | SA_RESTART; |
|
|
|
if (sigaction(evt.sigev_signo, &sa, NULL) == -1) { |
|
if (TASK_ROOT(t)->root_hooks.hook_exec.exception) |
|
TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL); |
|
else |
|
LOGERR; |
|
timer_delete(tmr); |
|
return (void*) -1; |
|
} |
|
#endif |
|
|
memset(&its, 0, sizeof its); |
memset(&its, 0, sizeof its); |
its.it_value.tv_sec = t->task_val.ts.tv_sec; |
its.it_value.tv_sec = t->task_val.ts.tv_sec; |