--- libaitsched/src/hooks.c 2021/06/08 21:45:07 1.35 +++ libaitsched/src/hooks.c 2023/08/17 14:14:24 1.42 @@ -3,7 +3,7 @@ * by Michael Pounov * * $Author: misho $ -* $Id: hooks.c,v 1.35 2021/06/08 21:45:07 misho Exp $ +* $Id: hooks.c,v 1.42 2023/08/17 14:14:24 misho Exp $ * ************************************************************************** The ELWIX and AITNET software is distributed under the following @@ -12,7 +12,7 @@ terms: All of the documentation and software included in the ELWIX and AITNET Releases is copyrighted by ELWIX - Sofia/Bulgaria -Copyright 2004 - 2021 +Copyright 2004 - 2023 by Michael Pounov . All rights reserved. Redistribution and use in source and binary forms, with or without @@ -47,15 +47,6 @@ SUCH DAMAGE. #include "hooks.h" -static inline void -transit_task2ready(sched_task_t * __restrict t, sched_queue_t * __restrict q) -{ - remove_task_from(t, q); - - t->task_type = taskREADY; - insert_task_to(t, &(TASK_ROOT(t))->root_ready); -} - #ifdef HAVE_LIBPTHREAD static void * _sched_threadWrapper(sched_task_t *t) @@ -156,9 +147,11 @@ sched_hook_init(void *root, void *arg __unused) } #else r->root_kq ^= r->root_kq; +#endif + FD_ZERO(&r->root_fds[0]); FD_ZERO(&r->root_fds[1]); -#endif + FD_ZERO(&r->root_fds[2]); return NULL; } @@ -184,11 +177,13 @@ sched_hook_fini(void *root, void *arg __unused) r->root_kq = 0; } #else - FD_ZERO(&r->root_fds[1]); - FD_ZERO(&r->root_fds[0]); r->root_kq ^= r->root_kq; #endif + FD_ZERO(&r->root_fds[2]); + FD_ZERO(&r->root_fds[1]); + FD_ZERO(&r->root_fds[0]); + return NULL; } @@ -204,12 +199,12 @@ sched_hook_cancel(void *task, void *arg __unused) { sched_task_t *t = task, *tmp, *tt; sched_root_task_t *r = NULL; - int flg; + int flg = 0; #if SUP_ENABLE == KQ_SUPPORT struct kevent chg[1]; struct timespec timeout = { 0, 0 }; #elif SUP_ENABLE == EP_SUPPORT - struct epoll_event ee = { .events = 0, .data.fd = 0 }; + struct epoll_event ee = { .events = 0, .data.u64 = 0l }; #else register int i; #endif @@ -229,11 +224,8 @@ sched_hook_cancel(void *task, void *arg __unused) switch (TASK_TYPE(t)) { case taskREAD: /* check for multi subscribers */ - flg = 0; TAILQ_FOREACH_SAFE(tt, &r->root_read, task_node, tmp) - if (TASK_FD(tt) != TASK_FD(t)) - continue; - else + if (TASK_FD(tt) == TASK_FD(t)) flg++; #if SUP_ENABLE == KQ_SUPPORT #ifdef __NetBSD__ @@ -247,32 +239,36 @@ sched_hook_cancel(void *task, void *arg __unused) ee.data.fd = TASK_FD(t); ee.events ^= ee.events; if (FD_ISSET(TASK_FD(t), &r->root_fds[1])) - ee.events = EPOLLOUT; + ee.events |= EPOLLOUT; - if (flg < 2) + if (flg < 2) { FD_CLR(TASK_FD(t), &r->root_fds[0]); - else - ee.events |= EPOLLIN | EPOLLPRI; + FD_CLR(TASK_FD(t), &r->root_fds[2]); + } else { + if (FD_ISSET(TASK_FD(t), &r->root_fds[0])) + ee.events |= EPOLLIN; + if (FD_ISSET(TASK_FD(t), &r->root_fds[2])) + ee.events |= EPOLLPRI; + } #else if (flg < 2) { FD_CLR(TASK_FD(t), &r->root_fds[0]); + FD_CLR(TASK_FD(t), &r->root_fds[2]); /* optimize select */ - for (i = r->root_kq - 1; i > 2; i--) - if (FD_ISSET(i, &r->root_fds[0]) || FD_ISSET(i, &r->root_fds[1])) + for (i = r->root_kq - 1; i >= 0; i--) + if (FD_ISSET(i, &r->root_fds[0]) || + FD_ISSET(i, &r->root_fds[1]) || + FD_ISSET(i, &r->root_fds[2])) break; - if (i > 2) - r->root_kq = i + 1; + r->root_kq = i + 1; } #endif break; case taskWRITE: /* check for multi subscribers */ - flg = 0; TAILQ_FOREACH_SAFE(tt, &r->root_write, task_node, tmp) - if (TASK_FD(tt) != TASK_FD(t)) - continue; - else + if (TASK_FD(tt) == TASK_FD(t)) flg++; #if SUP_ENABLE == KQ_SUPPORT #ifdef __NetBSD__ @@ -286,7 +282,9 @@ sched_hook_cancel(void *task, void *arg __unused) ee.data.fd = TASK_FD(t); ee.events ^= ee.events; if (FD_ISSET(TASK_FD(t), &r->root_fds[0])) - ee.events = EPOLLIN | EPOLLPRI; + ee.events |= EPOLLIN; + if (FD_ISSET(TASK_FD(t), &r->root_fds[2])) + ee.events |= EPOLLPRI; if (flg < 2) FD_CLR(TASK_FD(t), &r->root_fds[1]); @@ -297,22 +295,20 @@ sched_hook_cancel(void *task, void *arg __unused) FD_CLR(TASK_FD(t), &r->root_fds[1]); /* optimize select */ - for (i = r->root_kq - 1; i > 2; i--) - if (FD_ISSET(i, &r->root_fds[0]) || FD_ISSET(i, &r->root_fds[1])) + for (i = r->root_kq - 1; i >= 0; i--) + if (FD_ISSET(i, &r->root_fds[0]) || + FD_ISSET(i, &r->root_fds[1]) || + FD_ISSET(i, &r->root_fds[2])) break; - if (i > 2) - r->root_kq = i + 1; + r->root_kq = i + 1; } #endif break; case taskALARM: #if SUP_ENABLE == KQ_SUPPORT /* check for multi subscribers */ - flg = 0; TAILQ_FOREACH_SAFE(tt, &r->root_alarm, task_node, tmp) - if (TASK_DATA(tt) != TASK_DATA(t)) - continue; - else + if (TASK_DATA(tt) == TASK_DATA(t)) flg++; #ifdef __NetBSD__ EV_SET(&chg[0], (uintptr_t) TASK_DATA(t), EVFILT_TIMER, flg < 2 ? EV_DELETE : 0, @@ -326,11 +322,8 @@ sched_hook_cancel(void *task, void *arg __unused) case taskNODE: #if SUP_ENABLE == KQ_SUPPORT /* check for multi subscribers */ - flg = 0; TAILQ_FOREACH_SAFE(tt, &r->root_node, task_node, tmp) - if (TASK_FD(tt) != TASK_FD(t)) - continue; - else + if (TASK_FD(tt) == TASK_FD(t)) flg++; #ifdef __NetBSD__ EV_SET(&chg[0], TASK_FD(t), EVFILT_VNODE, flg < 2 ? EV_DELETE : 0, @@ -344,11 +337,8 @@ sched_hook_cancel(void *task, void *arg __unused) case taskPROC: #if SUP_ENABLE == KQ_SUPPORT /* check for multi subscribers */ - flg = 0; TAILQ_FOREACH_SAFE(tt, &r->root_proc, task_node, tmp) - if (TASK_VAL(tt) != TASK_VAL(t)) - continue; - else + if (TASK_VAL(tt) == TASK_VAL(t)) flg++; #ifdef __NetBSD__ EV_SET(&chg[0], TASK_VAL(t), EVFILT_PROC, flg < 2 ? EV_DELETE : 0, @@ -360,14 +350,11 @@ sched_hook_cancel(void *task, void *arg __unused) #endif break; case taskSIGNAL: -#if SUP_ENABLE == KQ_SUPPORT /* check for multi subscribers */ - flg = 0; TAILQ_FOREACH_SAFE(tt, &r->root_signal, task_node, tmp) - if (TASK_VAL(tt) != TASK_VAL(t)) - continue; - else + if (TASK_VAL(tt) == TASK_VAL(t)) flg++; +#if SUP_ENABLE == KQ_SUPPORT #ifdef __NetBSD__ EV_SET(&chg[0], TASK_VAL(t), EVFILT_SIGNAL, flg < 2 ? EV_DELETE : 0, 0, 0, (intptr_t) TASK_VAL(t)); @@ -375,20 +362,19 @@ sched_hook_cancel(void *task, void *arg __unused) EV_SET(&chg[0], TASK_VAL(t), EVFILT_SIGNAL, flg < 2 ? EV_DELETE : 0, 0, 0, (void*) TASK_VAL(t)); #endif - /* restore signal */ - if (flg < 2) - signal(TASK_VAL(t), SIG_DFL); #endif + if (flg < 2) { + pthread_mutex_lock(&TASK_ROOT(t)->root_sigmtx); + sigdelset(&TASK_ROOT(t)->root_sigset, TASK_VAL(t)); + pthread_mutex_unlock(&TASK_ROOT(t)->root_sigmtx); + } break; #ifdef AIO_SUPPORT case taskAIO: #if SUP_ENABLE == KQ_SUPPORT /* check for multi subscribers */ - flg = 0; TAILQ_FOREACH_SAFE(tt, &r->root_aio, task_node, tmp) - if (TASK_VAL(tt) != TASK_VAL(t)) - continue; - else + if (TASK_VAL(tt) == TASK_VAL(t)) flg++; #ifdef __NetBSD__ EV_SET(&chg[0], TASK_VAL(t), EVFILT_AIO, flg < 2 ? EV_DELETE : 0, @@ -401,7 +387,7 @@ sched_hook_cancel(void *task, void *arg __unused) if (acb) { if (aio_cancel(acb->aio_fildes, acb) == AIO_CANCELED) aio_return(acb); - free(acb); + e_free(acb); TASK_VAL(t) = 0; } #endif @@ -410,11 +396,8 @@ sched_hook_cancel(void *task, void *arg __unused) case taskLIO: #if SUP_ENABLE == KQ_SUPPORT /* check for multi subscribers */ - flg = 0; TAILQ_FOREACH_SAFE(tt, &r->root_lio, task_node, tmp) - if (TASK_VAL(tt) != TASK_VAL(t)) - continue; - else + if (TASK_VAL(tt) == TASK_VAL(t)) flg++; #ifdef __NetBSD__ EV_SET(&chg[0], TASK_VAL(t), EVFILT_LIO, flg < 2 ? EV_DELETE : 0, @@ -428,9 +411,9 @@ sched_hook_cancel(void *task, void *arg __unused) for (i = 0; i < TASK_DATLEN(t); i++) { if (aio_cancel(acbs[i]->aio_fildes, acbs[i]) == AIO_CANCELED) aio_return(acbs[i]); - free(acbs[i]); + e_free(acbs[i]); } - free(acbs); + e_free(acbs); TASK_VAL(t) = 0; } #endif @@ -441,11 +424,8 @@ sched_hook_cancel(void *task, void *arg __unused) case taskUSER: #if SUP_ENABLE == KQ_SUPPORT /* check for multi subscribers */ - flg = 0; TAILQ_FOREACH_SAFE(tt, &r->root_user, task_node, tmp) - if (TASK_VAL(tt) != TASK_VAL(t)) - continue; - else + if (TASK_VAL(tt) == TASK_VAL(t)) flg++; #ifdef __NetBSD__ EV_SET(&chg[0], TASK_VAL(t), EVFILT_USER, flg < 2 ? EV_DELETE : 0, @@ -477,11 +457,8 @@ sched_hook_cancel(void *task, void *arg __unused) schedCancel((sched_task_t*) TASK_RET(t)); #else /* check for multi subscribers */ - flg = 0; TAILQ_FOREACH_SAFE(tt, &r->root_rtc, task_node, tmp) - if (TASK_DATA(tt) != TASK_DATA(t)) - continue; - else + if (TASK_DATA(tt) == TASK_DATA(t)) flg++; /* restore signal */ @@ -497,7 +474,9 @@ sched_hook_cancel(void *task, void *arg __unused) #if SUP_ENABLE == KQ_SUPPORT kevent(r->root_kq, chg, 1, NULL, 0, &timeout); #elif SUP_ENABLE == EP_SUPPORT - epoll_ctl(r->root_kq, ee.events ? EPOLL_CTL_MOD : EPOLL_CTL_DEL, ee.data.fd, &ee); + if (TASK_TYPE(t) == taskREAD || TASK_TYPE(t) == taskWRITE) { + epoll_ctl(r->root_kq, ee.events ? EPOLL_CTL_MOD : EPOLL_CTL_DEL, ee.data.fd, &ee); + } #endif return NULL; } @@ -547,15 +526,16 @@ sched_hook_thread(void *task, void *arg) * return: <0 errors and 0 ok */ void * -sched_hook_read(void *task, void *arg __unused) +sched_hook_read(void *task, void *arg) { sched_task_t *t = task; sched_root_task_t *r = NULL; + uintptr_t mask = (uintptr_t) arg; #if SUP_ENABLE == KQ_SUPPORT struct kevent chg[1]; struct timespec timeout = { 0, 0 }; #elif SUP_ENABLE == EP_SUPPORT - struct epoll_event ee; + struct epoll_event ee = { 0 }; int flg = 0; #endif @@ -566,9 +546,11 @@ sched_hook_read(void *task, void *arg __unused) #if SUP_ENABLE == KQ_SUPPORT #ifdef __NetBSD__ - EV_SET(&chg[0], TASK_FD(t), EVFILT_READ, EV_ADD | EV_CLEAR, 0, 0, (intptr_t) TASK_FD(t)); + EV_SET(&chg[0], TASK_FD(t), EVFILT_READ, EV_ADD | EV_CLEAR | mask, + 0, 0, (intptr_t) TASK_FD(t)); #else - EV_SET(&chg[0], TASK_FD(t), EVFILT_READ, EV_ADD | EV_CLEAR, 0, 0, (void*) TASK_FD(t)); + EV_SET(&chg[0], TASK_FD(t), EVFILT_READ, EV_ADD | EV_CLEAR | mask, + 0, 0, (void*) TASK_FD(t)); #endif if (kevent(r->root_kq, chg, 1, NULL, 0, &timeout) == -1) { if (r->root_hooks.hook_exec.exception) @@ -578,10 +560,18 @@ sched_hook_read(void *task, void *arg __unused) return (void*) -1; } #elif SUP_ENABLE == EP_SUPPORT + if (!mask) + mask = EPOLLIN | EPOLLPRI; ee.data.fd = TASK_FD(t); - ee.events = EPOLLIN | EPOLLPRI; - if (FD_ISSET(TASK_FD(t), &r->root_fds[0])) + ee.events = mask; + if (FD_ISSET(TASK_FD(t), &r->root_fds[2])) { + flg |= 4; + ee.events |= EPOLLPRI; + } + if (FD_ISSET(TASK_FD(t), &r->root_fds[0])) { flg |= 1; + ee.events |= EPOLLIN; + } if (FD_ISSET(TASK_FD(t), &r->root_fds[1])) { flg |= 2; ee.events |= EPOLLOUT; @@ -593,10 +583,23 @@ sched_hook_read(void *task, void *arg __unused) else LOGERR; return (void*) -1; - } else - FD_SET(TASK_FD(t), &r->root_fds[0]); + } else { + if (mask & EPOLLIN) + FD_SET(TASK_FD(t), &r->root_fds[0]); + if (mask & EPOLLPRI) + FD_SET(TASK_FD(t), &r->root_fds[2]); + } #else - FD_SET(TASK_FD(t), &r->root_fds[0]); + if (!mask) { + FD_SET(TASK_FD(t), &r->root_fds[0]); + FD_SET(TASK_FD(t), &r->root_fds[2]); + } else { + if (mask & 1) + FD_SET(TASK_FD(t), &r->root_fds[0]); + if (mask & 2) + FD_SET(TASK_FD(t), &r->root_fds[2]); + } + if (TASK_FD(t) >= r->root_kq) r->root_kq = TASK_FD(t) + 1; #endif @@ -612,15 +615,16 @@ sched_hook_read(void *task, void *arg __unused) * return: <0 errors and 0 ok */ void * -sched_hook_write(void *task, void *arg __unused) +sched_hook_write(void *task, void *arg) { sched_task_t *t = task; sched_root_task_t *r = NULL; + uintptr_t mask = (uintptr_t) arg; #if SUP_ENABLE == KQ_SUPPORT struct kevent chg[1]; struct timespec timeout = { 0, 0 }; #elif SUP_ENABLE == EP_SUPPORT - struct epoll_event ee; + struct epoll_event ee = { 0 }; int flg = 0; #endif @@ -631,9 +635,11 @@ sched_hook_write(void *task, void *arg __unused) #if SUP_ENABLE == KQ_SUPPORT #ifdef __NetBSD__ - EV_SET(&chg[0], TASK_FD(t), EVFILT_WRITE, EV_ADD | EV_CLEAR, 0, 0, (intptr_t) TASK_FD(t)); + EV_SET(&chg[0], TASK_FD(t), EVFILT_WRITE, EV_ADD | EV_CLEAR | mask, + 0, 0, (intptr_t) TASK_FD(t)); #else - EV_SET(&chg[0], TASK_FD(t), EVFILT_WRITE, EV_ADD | EV_CLEAR, 0, 0, (void*) TASK_FD(t)); + EV_SET(&chg[0], TASK_FD(t), EVFILT_WRITE, EV_ADD | EV_CLEAR | mask, + 0, 0, (void*) TASK_FD(t)); #endif if (kevent(r->root_kq, chg, 1, NULL, 0, &timeout) == -1) { if (r->root_hooks.hook_exec.exception) @@ -643,15 +649,23 @@ sched_hook_write(void *task, void *arg __unused) return (void*) -1; } #elif SUP_ENABLE == EP_SUPPORT + if (!mask) + mask = EPOLLOUT; ee.data.fd = TASK_FD(t); - ee.events = EPOLLOUT; + ee.events = mask; + if (FD_ISSET(TASK_FD(t), &r->root_fds[2])) { + flg |= 4; + ee.events |= EPOLLPRI; + } if (FD_ISSET(TASK_FD(t), &r->root_fds[0])) { flg |= 1; - ee.events |= EPOLLIN | EPOLLPRI; + ee.events |= EPOLLIN; } - if (FD_ISSET(TASK_FD(t), &r->root_fds[1])) + if (FD_ISSET(TASK_FD(t), &r->root_fds[1])) { flg |= 2; + ee.events |= EPOLLOUT; + } if (epoll_ctl(r->root_kq, flg ? EPOLL_CTL_MOD : EPOLL_CTL_ADD, TASK_FD(t), &ee) == -1) { if (r->root_hooks.hook_exec.exception) @@ -660,9 +674,15 @@ sched_hook_write(void *task, void *arg __unused) LOGERR; return (void*) -1; } else - FD_SET(TASK_FD(t), &r->root_fds[1]); + if (mask & EPOLLOUT) + FD_SET(TASK_FD(t), &r->root_fds[1]); #else - FD_SET(TASK_FD(t), &r->root_fds[1]); + if (!mask) + FD_SET(TASK_FD(t), &r->root_fds[1]); + else + if (mask & 1) + FD_SET(TASK_FD(t), &r->root_fds[1]); + if (TASK_FD(t) >= r->root_kq) r->root_kq = TASK_FD(t) + 1; #endif @@ -796,17 +816,16 @@ sched_hook_proc(void *task, void *arg __unused) void * sched_hook_signal(void *task, void *arg __unused) { -#if SUP_ENABLE == KQ_SUPPORT sched_task_t *t = task; +#if SUP_ENABLE == KQ_SUPPORT struct kevent chg[1]; struct timespec timeout = { 0, 0 }; +#endif if (!t || !TASK_ROOT(t)) return (void*) -1; - /* ignore signal */ - signal(TASK_VAL(t), SIG_IGN); - +#if SUP_ENABLE == KQ_SUPPORT #ifdef __NetBSD__ EV_SET(&chg[0], TASK_VAL(t), EVFILT_SIGNAL, EV_ADD | EV_CLEAR, 0, 0, (intptr_t) TASK_VAL(t)); #else @@ -820,6 +839,11 @@ sched_hook_signal(void *task, void *arg __unused) return (void*) -1; } #endif + + pthread_mutex_lock(&TASK_ROOT(t)->root_sigmtx); + sigaddset(&TASK_ROOT(t)->root_sigset, TASK_VAL(t)); + pthread_mutex_unlock(&TASK_ROOT(t)->root_sigmtx); + return NULL; } @@ -1015,7 +1039,7 @@ fetch_hook_kevent_proceed(int en, struct kevent *res, LOGERR; } else LOGERR; - free(acb); + e_free(acb); TASK_DATLEN(task) = (u_long) len; TASK_FD(task) = fd; } @@ -1043,9 +1067,9 @@ fetch_hook_kevent_proceed(int en, struct kevent *res, l = 0; else l = iv[i].iov_len; - free(acbs[i]); + e_free(acbs[i]); } - free(acbs); + e_free(acbs); TASK_DATLEN(task) = (u_long) len; TASK_FD(task) = fd; @@ -1093,26 +1117,27 @@ fetch_hook_kevent_proceed(int en, struct kevent *res, static inline void fetch_hook_epoll_proceed(int en, struct epoll_event *res, sched_root_task_t *r) { - register int i, flg; + register int i, rflg, wflg; int ops = EPOLL_CTL_DEL; sched_task_t *t, *tmp, *task; struct epoll_event evt[1]; for (i = 0; i < en; i++) { memcpy(evt, &res[i], sizeof evt); + evt->events ^= evt->events; + rflg = wflg = 0; - if (evt->events & (EPOLLIN | EPOLLPRI)) { - flg = 0; + if (res[i].events & (EPOLLIN | EPOLLPRI)) { task = NULL; TAILQ_FOREACH_SAFE(t, &r->root_read, task_node, tmp) { if (TASK_FD(t) == evt->data.fd) { - if (!flg) + if (!task) task = t; - flg++; + rflg++; } } - if (flg && task) { + if (task) { TASK_FLAG(task) = ioctl(TASK_FD(task), FIONREAD, &TASK_RET(task)); /* remove read handle */ remove_task_from(task, &r->root_read); @@ -1131,29 +1156,32 @@ fetch_hook_epoll_proceed(int en, struct epoll_event *r insert_task_to(task, &r->root_ready); } - evt->events ^= evt->events; - if (FD_ISSET(evt->data.fd, &r->root_fds[1])) { - ops = EPOLL_CTL_MOD; + if (!(res[i].events & EPOLLOUT) && FD_ISSET(evt->data.fd, &r->root_fds[1])) { evt->events |= EPOLLOUT; + wflg = 42; } - if (flg > 1) { - ops = EPOLL_CTL_MOD; - evt->events |= EPOLLIN | EPOLLPRI; - } else + if (rflg > 1) { + if (FD_ISSET(evt->data.fd, &r->root_fds[0])) + evt->events |= EPOLLIN; + if (FD_ISSET(evt->data.fd, &r->root_fds[2])) + evt->events |= EPOLLPRI; + } else { FD_CLR(evt->data.fd, &r->root_fds[0]); + FD_CLR(evt->data.fd, &r->root_fds[2]); + } } - } else if (evt->events & EPOLLOUT) { - flg = 0; + } + if (res[i].events & EPOLLOUT) { task = NULL; TAILQ_FOREACH_SAFE(t, &r->root_write, task_node, tmp) { if (TASK_FD(t) == evt->data.fd) { - if (!flg) + if (!task) task = t; - flg++; + wflg++; } } - if (flg && task) { + if (task) { TASK_FLAG(task) = ioctl(TASK_FD(task), FIONWRITE, &TASK_RET(task)); /* remove write handle */ remove_task_from(task, &r->root_write); @@ -1172,19 +1200,25 @@ fetch_hook_epoll_proceed(int en, struct epoll_event *r insert_task_to(task, &r->root_ready); } - evt->events ^= evt->events; - if (FD_ISSET(evt->data.fd, &r->root_fds[0])) { - ops = EPOLL_CTL_MOD; - evt->events |= EPOLLIN | EPOLLPRI; + if (!(res[i].events & EPOLLIN) && FD_ISSET(evt->data.fd, &r->root_fds[0])) { + evt->events |= EPOLLIN; + rflg = 42; } - if (flg > 1) { - ops = EPOLL_CTL_MOD; + if (!(res[i].events & EPOLLPRI) && FD_ISSET(evt->data.fd, &r->root_fds[2])) { + evt->events |= EPOLLPRI; + rflg = 42; + } + if (wflg > 1) evt->events |= EPOLLOUT; - } else + else FD_CLR(evt->data.fd, &r->root_fds[1]); } } + ops = EPOLL_CTL_DEL; + if (rflg > 1 || wflg > 1) + ops = EPOLL_CTL_MOD; + if (epoll_ctl(r->root_kq, ops, evt->data.fd, evt) == -1) { if (r->root_hooks.hook_exec.exception) { r->root_hooks.hook_exec.exception(r, NULL); @@ -1199,25 +1233,32 @@ fetch_hook_epoll_proceed(int en, struct epoll_event *r static inline void fetch_hook_select_proceed(int en, fd_set rfd, fd_set wfd, fd_set xfd, sched_root_task_t *r) { - register int i, flg; - sched_task_t *t, *tmp, *task = NULL; + register int i, rflg, wflg; + sched_task_t *t, *tmp, *task; /* skip select check if return value from select is zero */ if (!en) return; for (i = 0; i < r->root_kq; i++) { + if (!FD_ISSET(i, &r->root_fds[0]) && + !FD_ISSET(i, &r->root_fds[1]) && + !FD_ISSET(i, &r->root_fds[2])) + continue; + + rflg = wflg = 0; + if (FD_ISSET(i, &rfd) || FD_ISSET(i, &xfd)) { - flg = 0; + task = NULL; TAILQ_FOREACH_SAFE(t, &r->root_read, task_node, tmp) { if (TASK_FD(t) == i) { - if (!flg) + if (!task) task = t; - flg++; + rflg++; } } - if (flg && task) { + if (task) { TASK_FLAG(task) = ioctl(TASK_FD(task), FIONREAD, &TASK_RET(task)); /* remove read handle */ @@ -1237,20 +1278,23 @@ fetch_hook_select_proceed(int en, fd_set rfd, fd_set w } /* remove resouce */ - if (flg == 1) + if (rflg == 1) { FD_CLR(i, &r->root_fds[0]); + FD_CLR(i, &r->root_fds[2]); + } } - } else if (FD_ISSET(i, &wfd)) { - flg = 0; + } + if (FD_ISSET(i, &wfd)) { + task = NULL; TAILQ_FOREACH_SAFE(t, &r->root_write, task_node, tmp) { if (TASK_FD(t) == i) { - if (!flg) + if (!task) task = t; - flg++; + wflg++; } } - if (flg && task) { + if (task) { TASK_FLAG(task) = ioctl(TASK_FD(task), FIONWRITE, &TASK_RET(task)); /* remove write handle */ @@ -1270,18 +1314,19 @@ fetch_hook_select_proceed(int en, fd_set rfd, fd_set w } /* remove resouce */ - if (flg == 1) + if (wflg == 1) FD_CLR(i, &r->root_fds[1]); } } } /* optimize select */ - for (i = r->root_kq - 1; i > 2; i--) - if (FD_ISSET(i, &r->root_fds[0]) || FD_ISSET(i, &r->root_fds[1])) + for (i = r->root_kq - 1; i >= 0; i--) + if (FD_ISSET(i, &r->root_fds[0]) || + FD_ISSET(i, &r->root_fds[1]) || + FD_ISSET(i, &r->root_fds[2])) break; - if (i > 2) - r->root_kq = i + 1; + r->root_kq = i + 1; } #endif @@ -1297,7 +1342,7 @@ sched_hook_fetch(void *root, void *arg __unused) { sched_root_task_t *r = root; sched_task_t *task, *tmp; - struct timespec now, m, mtmp; + struct timespec now, m, mtmp, *tsmin; #if SUP_ENABLE == KQ_SUPPORT struct kevent res[KQ_EVENTS]; struct timespec *timeout; @@ -1340,7 +1385,11 @@ sched_hook_fetch(void *root, void *arg __unused) if (TAILQ_FIRST(&r->root_timer)) { m = r->root_wait; sched_timespecsub(&m, &now, &mtmp); - r->root_wait = mtmp; + if (mtmp.tv_sec < 0 || mtmp.tv_nsec < 0) + /* don't wait for events. we have ready timer */ + sched_timespecclear(&r->root_wait); + else + r->root_wait = mtmp; } else { /* set wait INFTIM */ sched_timespecinf(&r->root_wait); @@ -1351,7 +1400,11 @@ sched_hook_fetch(void *root, void *arg __unused) m = TASK_TS(task); sched_timespecsub(&m, &now, &mtmp); - r->root_wait = mtmp; + if (mtmp.tv_sec < 0 || mtmp.tv_nsec < 0) + /* don't wait for events. we have ready timer */ + sched_timespecclear(&r->root_wait); + else + r->root_wait = mtmp; } else { /* set wait INFTIM */ sched_timespecinf(&r->root_wait); @@ -1360,13 +1413,14 @@ sched_hook_fetch(void *root, void *arg __unused) } else /* no waiting for event, because we have ready task */ sched_timespecclear(&r->root_wait); - if (r->root_wait.tv_sec != -1 && r->root_wait.tv_nsec != -1) { + if (!sched_timespecisinf(&r->root_wait)) { + tsmin = sched_timespecmin(&r->root_wait, &r->root_poll); #if SUP_ENABLE == KQ_SUPPORT - timeout = &r->root_wait; + timeout = tsmin; #elif SUP_ENABLE == EP_SUPPORT - timeout = r->root_wait.tv_sec * 1000 + r->root_wait.tv_nsec / 1000000; + timeout = tsmin->tv_sec * 1000 + tsmin->tv_nsec / 1000000; #else - sched_timespec2val(&r->root_wait, &tv); + sched_timespec2val(tsmin, &tv); timeout = &tv; #endif /* KQ_SUPPORT */ } else if (sched_timespecisinf(&r->root_poll)) @@ -1391,7 +1445,8 @@ sched_hook_fetch(void *root, void *arg __unused) #elif SUP_ENABLE == EP_SUPPORT if ((en = epoll_wait(r->root_kq, res, KQ_EVENTS, timeout)) == -1) { #else - rfd = xfd = r->root_fds[0]; + xfd = r->root_fds[2]; + rfd = r->root_fds[0]; wfd = r->root_fds[1]; if ((en = select(r->root_kq, &rfd, &wfd, &xfd, timeout)) == -1) { #endif /* KQ_SUPPORT */