--- libaitsched/src/hooks.c 2015/07/02 22:43:30 1.28.2.3 +++ libaitsched/src/hooks.c 2017/09/04 08:47:43 1.32 @@ -3,7 +3,7 @@ * by Michael Pounov * * $Author: misho $ -* $Id: hooks.c,v 1.28.2.3 2015/07/02 22:43:30 misho Exp $ +* $Id: hooks.c,v 1.32 2017/09/04 08:47:43 misho Exp $ * ************************************************************************** The ELWIX and AITNET software is distributed under the following @@ -12,7 +12,7 @@ terms: All of the documentation and software included in the ELWIX and AITNET Releases is copyrighted by ELWIX - Sofia/Bulgaria -Copyright 2004 - 2015 +Copyright 2004 - 2017 by Michael Pounov . All rights reserved. Redistribution and use in source and binary forms, with or without @@ -245,13 +245,14 @@ sched_hook_cancel(void *task, void *arg __unused) #endif #elif SUP_ENABLE == EP_SUPPORT ee.data.fd = TASK_FD(t); + ee.events ^= ee.events; if (FD_ISSET(TASK_FD(t), &r->root_fds[1])) ee.events = EPOLLOUT; if (flg < 2) FD_CLR(TASK_FD(t), &r->root_fds[0]); else - ee.events |= (EPOLLIN | EPOLLPRI | EPOLLRDHUP); + ee.events |= EPOLLIN | EPOLLPRI; #else if (flg < 2) { FD_CLR(TASK_FD(t), &r->root_fds[0]); @@ -283,8 +284,9 @@ sched_hook_cancel(void *task, void *arg __unused) #endif #elif SUP_ENABLE == EP_SUPPORT ee.data.fd = TASK_FD(t); + ee.events ^= ee.events; if (FD_ISSET(TASK_FD(t), &r->root_fds[0])) - ee.events = EPOLLIN | EPOLLPRI | EPOLLRDHUP; + ee.events = EPOLLIN | EPOLLPRI; if (flg < 2) FD_CLR(TASK_FD(t), &r->root_fds[1]); @@ -553,7 +555,7 @@ sched_hook_read(void *task, void *arg __unused) struct kevent chg[1]; struct timespec timeout = { 0, 0 }; #elif SUP_ENABLE == EP_SUPPORT - struct epoll_event ee = { .events = EPOLLIN | EPOLLPRI | EPOLLRDHUP, .data.fd = 0 }; + struct epoll_event ee; int flg = 0; #endif @@ -576,6 +578,8 @@ sched_hook_read(void *task, void *arg __unused) return (void*) -1; } #elif SUP_ENABLE == EP_SUPPORT + ee.data.fd = TASK_FD(t); + ee.events = EPOLLIN | EPOLLPRI; if (FD_ISSET(TASK_FD(t), &r->root_fds[0])) flg |= 1; if (FD_ISSET(TASK_FD(t), &r->root_fds[1])) { @@ -583,7 +587,6 @@ sched_hook_read(void *task, void *arg __unused) ee.events |= EPOLLOUT; } - ee.data.fd = TASK_FD(t); if (epoll_ctl(r->root_kq, flg ? EPOLL_CTL_MOD : EPOLL_CTL_ADD, TASK_FD(t), &ee) == -1) { if (r->root_hooks.hook_exec.exception) r->root_hooks.hook_exec.exception(r, NULL); @@ -617,7 +620,7 @@ sched_hook_write(void *task, void *arg __unused) struct kevent chg[1]; struct timespec timeout = { 0, 0 }; #elif SUP_ENABLE == EP_SUPPORT - struct epoll_event ee = { .events = EPOLLOUT, .data.fd = 0 }; + struct epoll_event ee; int flg = 0; #endif @@ -640,14 +643,16 @@ sched_hook_write(void *task, void *arg __unused) return (void*) -1; } #elif SUP_ENABLE == EP_SUPPORT + ee.data.fd = TASK_FD(t); + ee.events = EPOLLOUT; + if (FD_ISSET(TASK_FD(t), &r->root_fds[0])) { flg |= 1; - ee.events |= EPOLLIN | EPOLLPRI | EPOLLRDHUP; + ee.events |= EPOLLIN | EPOLLPRI; } if (FD_ISSET(TASK_FD(t), &r->root_fds[1])) flg |= 2; - ee.data.fd = TASK_FD(t); if (epoll_ctl(r->root_kq, flg ? EPOLL_CTL_MOD : EPOLL_CTL_ADD, TASK_FD(t), &ee) == -1) { if (r->root_hooks.hook_exec.exception) r->root_hooks.hook_exec.exception(r, NULL); @@ -869,7 +874,6 @@ fetch_hook_kevent_proceed(int en, struct kevent *res, struct aiocb *acb; #ifdef EVFILT_LIO int l; - register int j; off_t off; struct aiocb **acbs; struct iovec *iv; @@ -883,155 +887,155 @@ fetch_hook_kevent_proceed(int en, struct kevent *res, switch (res[i].filter) { case EVFILT_READ: TAILQ_FOREACH_SAFE(task, &r->root_read, task_node, tmp) { - if (TASK_FD(task) != ((intptr_t) res[i].udata)) - continue; - else { + if (TASK_FD(task) == ((intptr_t) res[i].udata)) { TASK_RET(task) = res[i].data; TASK_FLAG(task) = (u_long) res[i].fflags; - } - /* remove read handle */ - remove_task_from(task, &r->root_read); - if (r->root_hooks.hook_exec.exception && res[i].flags & EV_EOF) { - if (r->root_hooks.hook_exec.exception(r, (void*) EV_EOF)) { - task->task_type = taskUNUSE; - insert_task_to(task, &r->root_unuse); + /* remove read handle */ + remove_task_from(task, &r->root_read); + + if (r->root_hooks.hook_exec.exception && res[i].flags & EV_EOF) { + if (r->root_hooks.hook_exec.exception(r, (void*) EV_EOF)) { + task->task_type = taskUNUSE; + insert_task_to(task, &r->root_unuse); + } else { + task->task_type = taskREADY; + insert_task_to(task, &r->root_ready); + } } else { task->task_type = taskREADY; insert_task_to(task, &r->root_ready); } - } else { - task->task_type = taskREADY; - insert_task_to(task, &r->root_ready); + break; } } break; case EVFILT_WRITE: TAILQ_FOREACH_SAFE(task, &r->root_write, task_node, tmp) { - if (TASK_FD(task) != ((intptr_t) res[i].udata)) - continue; - else { + if (TASK_FD(task) == ((intptr_t) res[i].udata)) { TASK_RET(task) = res[i].data; TASK_FLAG(task) = (u_long) res[i].fflags; - } - /* remove write handle */ - remove_task_from(task, &r->root_write); - if (r->root_hooks.hook_exec.exception && res[i].flags & EV_EOF) { - if (r->root_hooks.hook_exec.exception(r, (void*) EV_EOF)) { - task->task_type = taskUNUSE; - insert_task_to(task, &r->root_unuse); + /* remove write handle */ + remove_task_from(task, &r->root_write); + + if (r->root_hooks.hook_exec.exception && res[i].flags & EV_EOF) { + if (r->root_hooks.hook_exec.exception(r, (void*) EV_EOF)) { + task->task_type = taskUNUSE; + insert_task_to(task, &r->root_unuse); + } else { + task->task_type = taskREADY; + insert_task_to(task, &r->root_ready); + } } else { task->task_type = taskREADY; insert_task_to(task, &r->root_ready); } - } else { - task->task_type = taskREADY; - insert_task_to(task, &r->root_ready); + break; } } break; case EVFILT_TIMER: TAILQ_FOREACH_SAFE(task, &r->root_alarm, task_node, tmp) { - if ((uintptr_t) TASK_DATA(task) != ((uintptr_t) res[i].udata)) - continue; - else { + if ((uintptr_t) TASK_DATA(task) == ((uintptr_t) res[i].udata)) { TASK_RET(task) = res[i].data; TASK_FLAG(task) = (u_long) res[i].fflags; + + /* remove alarm handle */ + transit_task2ready(task, &r->root_alarm); + break; } - /* remove alarm handle */ - transit_task2ready(task, &r->root_alarm); } break; case EVFILT_VNODE: TAILQ_FOREACH_SAFE(task, &r->root_node, task_node, tmp) { - if (TASK_FD(task) != ((intptr_t) res[i].udata)) - continue; - else { + if (TASK_FD(task) == ((intptr_t) res[i].udata)) { TASK_RET(task) = res[i].data; TASK_FLAG(task) = (u_long) res[i].fflags; + + /* remove node handle */ + transit_task2ready(task, &r->root_node); + break; } - /* remove node handle */ - transit_task2ready(task, &r->root_node); } break; case EVFILT_PROC: TAILQ_FOREACH_SAFE(task, &r->root_proc, task_node, tmp) { - if (TASK_VAL(task) != ((uintptr_t) res[i].udata)) - continue; - else { + if (TASK_VAL(task) == ((uintptr_t) res[i].udata)) { TASK_RET(task) = res[i].data; TASK_FLAG(task) = (u_long) res[i].fflags; + + /* remove proc handle */ + transit_task2ready(task, &r->root_proc); + break; } - /* remove proc handle */ - transit_task2ready(task, &r->root_proc); } break; case EVFILT_SIGNAL: TAILQ_FOREACH_SAFE(task, &r->root_signal, task_node, tmp) { - if (TASK_VAL(task) != ((uintptr_t) res[i].udata)) - continue; - else { + if (TASK_VAL(task) == ((uintptr_t) res[i].udata)) { TASK_RET(task) = res[i].data; TASK_FLAG(task) = (u_long) res[i].fflags; + + /* remove signal handle */ + transit_task2ready(task, &r->root_signal); + break; } - /* remove signal handle */ - transit_task2ready(task, &r->root_signal); } break; #ifdef AIO_SUPPORT case EVFILT_AIO: TAILQ_FOREACH_SAFE(task, &r->root_aio, task_node, tmp) { acb = (struct aiocb*) TASK_VAL(task); - if (acb != ((struct aiocb*) res[i].udata)) - continue; - else { + if (acb == ((struct aiocb*) res[i].udata)) { TASK_RET(task) = res[i].data; TASK_FLAG(task) = (u_long) res[i].fflags; - } - /* remove user handle */ - transit_task2ready(task, &r->root_aio); + + /* remove user handle */ + transit_task2ready(task, &r->root_aio); - fd = acb->aio_fildes; - if ((len = aio_return(acb)) != -1) { - if (lseek(fd, acb->aio_offset + len, SEEK_CUR) == -1) + fd = acb->aio_fildes; + if ((len = aio_return(acb)) != -1) { + if (lseek(fd, acb->aio_offset + len, SEEK_CUR) == -1) + LOGERR; + } else LOGERR; - } else - LOGERR; - free(acb); - TASK_DATLEN(task) = (u_long) len; - TASK_FD(task) = fd; + free(acb); + TASK_DATLEN(task) = (u_long) len; + TASK_FD(task) = fd; + break; + } } break; #ifdef EVFILT_LIO case EVFILT_LIO: TAILQ_FOREACH_SAFE(task, &r->root_lio, task_node, tmp) { acbs = (struct aiocb**) TASK_VAL(task); - if (acbs != ((struct aiocb**) res[i].udata)) - continue; - else { + if (acbs == ((struct aiocb**) res[i].udata)) { TASK_RET(task) = res[i].data; TASK_FLAG(task) = (u_long) res[i].fflags; - } - /* remove user handle */ - transit_task2ready(task, &r->root_lio); - iv = (struct iovec*) TASK_DATA(task); - fd = acbs[0]->aio_fildes; - off = acbs[0]->aio_offset; - for (j = len = 0; i < TASK_DATLEN(task); len += l, i++) { - if ((iv[i].iov_len = aio_return(acbs[i])) == -1) - l = 0; - else - l = iv[i].iov_len; - free(acbs[i]); - } - free(acbs); - TASK_DATLEN(task) = (u_long) len; - TASK_FD(task) = fd; + /* remove user handle */ + transit_task2ready(task, &r->root_lio); - if (lseek(fd, off + len, SEEK_CUR) == -1) - LOGERR; + iv = (struct iovec*) TASK_DATA(task); + fd = acbs[0]->aio_fildes; + off = acbs[0]->aio_offset; + for (len = 0; i < TASK_DATLEN(task); len += l, i++) { + if ((iv[i].iov_len = aio_return(acbs[i])) == -1) + l = 0; + else + l = iv[i].iov_len; + free(acbs[i]); + } + free(acbs); + TASK_DATLEN(task) = (u_long) len; + TASK_FD(task) = fd; + + if (lseek(fd, off + len, SEEK_CUR) == -1) + LOGERR; + break; + } } break; #endif /* EVFILT_LIO */ @@ -1039,14 +1043,14 @@ fetch_hook_kevent_proceed(int en, struct kevent *res, #ifdef EVFILT_USER case EVFILT_USER: TAILQ_FOREACH_SAFE(task, &r->root_user, task_node, tmp) { - if (TASK_VAL(task) != ((uintptr_t) res[i].udata)) - continue; - else { + if (TASK_VAL(task) == ((uintptr_t) res[i].udata)) { TASK_RET(task) = res[i].data; TASK_FLAG(task) = (u_long) res[i].fflags; + + /* remove user handle */ + transit_task2ready(task, &r->root_user); + break; } - /* remove user handle */ - transit_task2ready(task, &r->root_user); } break; #endif /* EVFILT_USER */ @@ -1068,28 +1072,25 @@ fetch_hook_epoll_proceed(int en, struct epoll_event *r { register int i, flg; int ops = EPOLL_CTL_DEL; - sched_task_t *task, *tmp; + sched_task_t *t, *tmp, *task; struct epoll_event evt[1]; for (i = 0; i < en; i++) { memcpy(evt, &res[i], sizeof evt); - if (evt->events & (EPOLLIN | EPOLLPRI | EPOLLET)) { + if (evt->events & (EPOLLIN | EPOLLPRI)) { flg = 0; - TAILQ_FOREACH_SAFE(task, &r->root_read, task_node, tmp) { - if (TASK_FD(task) != evt->data.fd) - continue; - else { + task = NULL; + TAILQ_FOREACH_SAFE(t, &r->root_read, task_node, tmp) { + if (TASK_FD(t) == evt->data.fd) { + if (!flg) + task = t; flg++; - FD_CLR(TASK_FD(task), &r->root_fds[0]); - TASK_FLAG(task) = ioctl(TASK_FD(task), FIONREAD, &TASK_RET(task)); - - evt->events &= ~(EPOLLIN | EPOLLPRI | EPOLLET | EPOLLRDHUP); - if (FD_ISSET(TASK_FD(task), &r->root_fds[1])) { - ops = EPOLL_CTL_MOD; - evt->events |= EPOLLOUT; - } } + } + + if (flg && task) { + TASK_FLAG(task) = ioctl(TASK_FD(task), FIONREAD, &TASK_RET(task)); /* remove read handle */ remove_task_from(task, &r->root_read); @@ -1106,28 +1107,31 @@ fetch_hook_epoll_proceed(int en, struct epoll_event *r task->task_type = taskREADY; insert_task_to(task, &r->root_ready); } - } - if (flg > 1) - ops = EPOLL_CTL_MOD; - } - if (evt->events & EPOLLOUT) { + evt->events ^= evt->events; + if (FD_ISSET(evt->data.fd, &r->root_fds[1])) { + ops = EPOLL_CTL_MOD; + evt->events |= EPOLLOUT; + } + if (flg > 1) { + ops = EPOLL_CTL_MOD; + evt->events |= EPOLLIN | EPOLLPRI; + } else + FD_CLR(evt->data.fd, &r->root_fds[0]); + } + } else if (evt->events & EPOLLOUT) { flg = 0; - TAILQ_FOREACH_SAFE(task, &r->root_write, task_node, tmp) { - if (TASK_FD(task) != evt->data.fd) - continue; - else { + task = NULL; + TAILQ_FOREACH_SAFE(t, &r->root_write, task_node, tmp) { + if (TASK_FD(t) == evt->data.fd) { + if (!flg) + task = t; flg++; - FD_CLR(TASK_FD(task), &r->root_fds[1]); - TASK_FLAG(task) = ioctl(TASK_FD(task), - FIONWRITE, &TASK_RET(task)); - - evt->events &= ~EPOLLOUT; - if (FD_ISSET(TASK_FD(task), &r->root_fds[0])) { - ops = EPOLL_CTL_MOD; - evt->events |= EPOLLIN | EPOLLPRI | EPOLLRDHUP; - } } + } + + if (flg && task) { + TASK_FLAG(task) = ioctl(TASK_FD(task), FIONWRITE, &TASK_RET(task)); /* remove write handle */ remove_task_from(task, &r->root_write); @@ -1144,9 +1148,18 @@ fetch_hook_epoll_proceed(int en, struct epoll_event *r task->task_type = taskREADY; insert_task_to(task, &r->root_ready); } + + evt->events ^= evt->events; + if (FD_ISSET(evt->data.fd, &r->root_fds[0])) { + ops = EPOLL_CTL_MOD; + evt->events |= EPOLLIN | EPOLLPRI; + } + if (flg > 1) { + ops = EPOLL_CTL_MOD; + evt->events |= EPOLLOUT; + } else + FD_CLR(evt->data.fd, &r->root_fds[1]); } - if (flg > 1) - ops = EPOLL_CTL_MOD; } if (epoll_ctl(r->root_kq, ops, evt->data.fd, evt) == -1) { @@ -1164,7 +1177,7 @@ static inline void fetch_hook_select_proceed(int en, fd_set rfd, fd_set wfd, fd_set xfd, sched_root_task_t *r) { register int i, flg; - sched_task_t *task, *tmp; + sched_task_t *t, *tmp, *task = NULL; /* skip select check if return value from select is zero */ if (!en) @@ -1173,14 +1186,17 @@ fetch_hook_select_proceed(int en, fd_set rfd, fd_set w for (i = 0; i < r->root_kq; i++) { if (FD_ISSET(i, &rfd) || FD_ISSET(i, &xfd)) { flg = 0; - TAILQ_FOREACH_SAFE(task, &r->root_read, task_node, tmp) { - if (TASK_FD(task) != i) - continue; - else { + TAILQ_FOREACH_SAFE(t, &r->root_read, task_node, tmp) { + if (TASK_FD(t) == i) { + if (!flg) + task = t; flg++; - TASK_FLAG(task) = ioctl(TASK_FD(task), - FIONREAD, &TASK_RET(task)); } + } + + if (flg && task) { + TASK_FLAG(task) = ioctl(TASK_FD(task), FIONREAD, &TASK_RET(task)); + /* remove read handle */ remove_task_from(task, &r->root_read); @@ -1196,22 +1212,24 @@ fetch_hook_select_proceed(int en, fd_set rfd, fd_set w task->task_type = taskREADY; insert_task_to(task, &r->root_ready); } - } - /* remove resouce */ - if (flg) - FD_CLR(i, &r->root_fds[0]); - } - if (FD_ISSET(i, &wfd)) { + /* remove resouce */ + if (flg == 1) + FD_CLR(i, &r->root_fds[0]); + } + } else if (FD_ISSET(i, &wfd)) { flg = 0; - TAILQ_FOREACH_SAFE(task, &r->root_write, task_node, tmp) { - if (TASK_FD(task) != i) - continue; - else { + TAILQ_FOREACH_SAFE(t, &r->root_write, task_node, tmp) { + if (TASK_FD(t) == i) { + if (!flg) + task = t; flg++; - TASK_FLAG(task) = ioctl(TASK_FD(task), - FIONWRITE, &TASK_RET(task)); } + } + + if (flg && task) { + TASK_FLAG(task) = ioctl(TASK_FD(task), FIONWRITE, &TASK_RET(task)); + /* remove write handle */ remove_task_from(task, &r->root_write); @@ -1227,10 +1245,11 @@ fetch_hook_select_proceed(int en, fd_set rfd, fd_set w task->task_type = taskREADY; insert_task_to(task, &r->root_ready); } + + /* remove resouce */ + if (flg == 1) + FD_CLR(i, &r->root_fds[1]); } - /* remove resouce */ - if (flg) - FD_CLR(i, &r->root_fds[1]); } } @@ -1454,11 +1473,11 @@ sched_hook_condition(void *root, void *arg) * @arg = unused * return: <0 errors and 0 ok */ -#if defined(HAVE_LIBRT) && defined(HAVE_TIMER_CREATE) && \ - defined(HAVE_TIMER_SETTIME) && defined(HAVE_TIMER_DELETE) void * sched_hook_rtc(void *task, void *arg __unused) { +#if defined(HAVE_LIBRT) && defined(HAVE_TIMER_CREATE) && \ + defined(HAVE_TIMER_SETTIME) && defined(HAVE_TIMER_DELETE) sched_task_t *sigt = NULL, *t = task; struct itimerspec its; struct sigevent evt; @@ -1524,7 +1543,6 @@ sched_hook_rtc(void *task, void *arg __unused) timer_delete(tmr); return (void*) -1; } - +#endif /* HAVE_TIMER_CREATE */ return NULL; } -#endif /* HAVE_TIMER_CREATE */