/************************************************************************* * (C) 2011 AITNET ltd - Sofia/Bulgaria - * by Michael Pounov * * $Author: misho $ * $Id: hooks.c,v 1.42 2023/08/17 14:14:24 misho Exp $ * ************************************************************************** The ELWIX and AITNET software is distributed under the following terms: All of the documentation and software included in the ELWIX and AITNET Releases is copyrighted by ELWIX - Sofia/Bulgaria Copyright 2004 - 2023 by Michael Pounov . All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. All advertising materials mentioning features or use of this software must display the following acknowledgement: This product includes software developed by Michael Pounov ELWIX - Embedded LightWeight unIX and its contributors. 4. Neither the name of AITNET nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY AITNET AND CONTRIBUTORS ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include "global.h" #include "hooks.h" #ifdef HAVE_LIBPTHREAD static void * _sched_threadWrapper(sched_task_t *t) { void *ret = NULL; sched_root_task_t *r; if (!t || !TASK_ROOT(t)) pthread_exit(ret); else r = (sched_root_task_t*) TASK_ROOT(t); pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, NULL); /* pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, NULL); */ /* notify parent, thread is ready for execution */ pthread_testcancel(); ret = schedCall(t); r->root_ret = ret; if (TASK_VAL(t)) { transit_task2unuse(t, &r->root_thread); TASK_VAL(t) = 0; } pthread_exit(ret); } #endif #if defined(HAVE_LIBRT) && defined(HAVE_TIMER_CREATE) && \ defined(HAVE_TIMER_SETTIME) && defined(HAVE_TIMER_DELETE) #if SUP_ENABLE == KQ_SUPPORT static void * _sched_rtcWrapper(sched_task_t *t) { sched_task_t *task; void *ret; if (!t || !TASK_ROOT(t) || !TASK_DATA(t)) return NULL; else { task = (sched_task_t*) TASK_DATA(t); timer_delete((timer_t) TASK_DATLEN(t)); } ret = schedCall(task); transit_task2unuse(task, &(TASK_ROOT(task))->root_rtc); return ret; } #else static void _sched_rtcSigWrapper(int sig, siginfo_t *si, void *uc) { sched_task_t *task; if (si && si->si_value.sival_ptr) { task = (sched_task_t*) si->si_value.sival_ptr; timer_delete((timer_t) TASK_FLAG(task)); TASK_RET(task) = (intptr_t) schedCall(task); transit_task2unuse(task, &(TASK_ROOT(task))->root_rtc); } } #endif #endif /* * sched_hook_init() - Default INIT hook * * @root = root task * @arg = unused * return: <0 errors and 0 ok */ void * sched_hook_init(void *root, void *arg __unused) { sched_root_task_t *r = root; if (!r) return (void*) -1; #if SUP_ENABLE == KQ_SUPPORT r->root_kq = kqueue(); if (r->root_kq == -1) { LOGERR; return (void*) -1; } #elif SUP_ENABLE == EP_SUPPORT r->root_kq = epoll_create(KQ_EVENTS); if (r->root_kq == -1) { LOGERR; return (void*) -1; } #else r->root_kq ^= r->root_kq; #endif FD_ZERO(&r->root_fds[0]); FD_ZERO(&r->root_fds[1]); FD_ZERO(&r->root_fds[2]); return NULL; } /* * sched_hook_fini() - Default FINI hook * * @root = root task * @arg = unused * return: <0 errors and 0 ok */ void * sched_hook_fini(void *root, void *arg __unused) { sched_root_task_t *r = root; if (!r) return (void*) -1; #if SUP_ENABLE == KQ_SUPPORT || SUP_ENABLE == EP_SUPPORT if (r->root_kq > 2) { close(r->root_kq); r->root_kq = 0; } #else r->root_kq ^= r->root_kq; #endif FD_ZERO(&r->root_fds[2]); FD_ZERO(&r->root_fds[1]); FD_ZERO(&r->root_fds[0]); return NULL; } /* * sched_hook_cancel() - Default CANCEL hook * * @task = current task * @arg = unused * return: <0 errors and 0 ok */ void * sched_hook_cancel(void *task, void *arg __unused) { sched_task_t *t = task, *tmp, *tt; sched_root_task_t *r = NULL; int flg = 0; #if SUP_ENABLE == KQ_SUPPORT struct kevent chg[1]; struct timespec timeout = { 0, 0 }; #elif SUP_ENABLE == EP_SUPPORT struct epoll_event ee = { .events = 0, .data.u64 = 0l }; #else register int i; #endif #ifdef AIO_SUPPORT struct aiocb *acb; #ifdef EVFILT_LIO register int i = 0; struct aiocb **acbs; #endif /* EVFILT_LIO */ #endif /* AIO_SUPPORT */ if (!t || !TASK_ROOT(t)) return (void*) -1; else r = TASK_ROOT(t); switch (TASK_TYPE(t)) { case taskREAD: /* check for multi subscribers */ TAILQ_FOREACH_SAFE(tt, &r->root_read, task_node, tmp) if (TASK_FD(tt) == TASK_FD(t)) flg++; #if SUP_ENABLE == KQ_SUPPORT #ifdef __NetBSD__ EV_SET(&chg[0], TASK_FD(t), EVFILT_READ, flg < 2 ? EV_DELETE : 0, 0, 0, (intptr_t) TASK_FD(t)); #else EV_SET(&chg[0], TASK_FD(t), EVFILT_READ, flg < 2 ? EV_DELETE : 0, 0, 0, (void*) TASK_FD(t)); #endif #elif SUP_ENABLE == EP_SUPPORT ee.data.fd = TASK_FD(t); ee.events ^= ee.events; if (FD_ISSET(TASK_FD(t), &r->root_fds[1])) ee.events |= EPOLLOUT; if (flg < 2) { FD_CLR(TASK_FD(t), &r->root_fds[0]); FD_CLR(TASK_FD(t), &r->root_fds[2]); } else { if (FD_ISSET(TASK_FD(t), &r->root_fds[0])) ee.events |= EPOLLIN; if (FD_ISSET(TASK_FD(t), &r->root_fds[2])) ee.events |= EPOLLPRI; } #else if (flg < 2) { FD_CLR(TASK_FD(t), &r->root_fds[0]); FD_CLR(TASK_FD(t), &r->root_fds[2]); /* optimize select */ for (i = r->root_kq - 1; i >= 0; i--) if (FD_ISSET(i, &r->root_fds[0]) || FD_ISSET(i, &r->root_fds[1]) || FD_ISSET(i, &r->root_fds[2])) break; r->root_kq = i + 1; } #endif break; case taskWRITE: /* check for multi subscribers */ TAILQ_FOREACH_SAFE(tt, &r->root_write, task_node, tmp) if (TASK_FD(tt) == TASK_FD(t)) flg++; #if SUP_ENABLE == KQ_SUPPORT #ifdef __NetBSD__ EV_SET(&chg[0], TASK_FD(t), EVFILT_WRITE, flg < 2 ? EV_DELETE : 0, 0, 0, (intptr_t) TASK_FD(t)); #else EV_SET(&chg[0], TASK_FD(t), EVFILT_WRITE, flg < 2 ? EV_DELETE : 0, 0, 0, (void*) TASK_FD(t)); #endif #elif SUP_ENABLE == EP_SUPPORT ee.data.fd = TASK_FD(t); ee.events ^= ee.events; if (FD_ISSET(TASK_FD(t), &r->root_fds[0])) ee.events |= EPOLLIN; if (FD_ISSET(TASK_FD(t), &r->root_fds[2])) ee.events |= EPOLLPRI; if (flg < 2) FD_CLR(TASK_FD(t), &r->root_fds[1]); else ee.events |= EPOLLOUT; #else if (flg < 2) { FD_CLR(TASK_FD(t), &r->root_fds[1]); /* optimize select */ for (i = r->root_kq - 1; i >= 0; i--) if (FD_ISSET(i, &r->root_fds[0]) || FD_ISSET(i, &r->root_fds[1]) || FD_ISSET(i, &r->root_fds[2])) break; r->root_kq = i + 1; } #endif break; case taskALARM: #if SUP_ENABLE == KQ_SUPPORT /* check for multi subscribers */ TAILQ_FOREACH_SAFE(tt, &r->root_alarm, task_node, tmp) if (TASK_DATA(tt) == TASK_DATA(t)) flg++; #ifdef __NetBSD__ EV_SET(&chg[0], (uintptr_t) TASK_DATA(t), EVFILT_TIMER, flg < 2 ? EV_DELETE : 0, 0, 0, (intptr_t) TASK_DATA(t)); #else EV_SET(&chg[0], (uintptr_t) TASK_DATA(t), EVFILT_TIMER, flg < 2 ? EV_DELETE : 0, 0, 0, (void*) TASK_DATA(t)); #endif #endif break; case taskNODE: #if SUP_ENABLE == KQ_SUPPORT /* check for multi subscribers */ TAILQ_FOREACH_SAFE(tt, &r->root_node, task_node, tmp) if (TASK_FD(tt) == TASK_FD(t)) flg++; #ifdef __NetBSD__ EV_SET(&chg[0], TASK_FD(t), EVFILT_VNODE, flg < 2 ? EV_DELETE : 0, 0, 0, (intptr_t) TASK_FD(t)); #else EV_SET(&chg[0], TASK_FD(t), EVFILT_VNODE, flg < 2 ? EV_DELETE : 0, 0, 0, (void*) TASK_FD(t)); #endif #endif break; case taskPROC: #if SUP_ENABLE == KQ_SUPPORT /* check for multi subscribers */ TAILQ_FOREACH_SAFE(tt, &r->root_proc, task_node, tmp) if (TASK_VAL(tt) == TASK_VAL(t)) flg++; #ifdef __NetBSD__ EV_SET(&chg[0], TASK_VAL(t), EVFILT_PROC, flg < 2 ? EV_DELETE : 0, 0, 0, (intptr_t) TASK_VAL(t)); #else EV_SET(&chg[0], TASK_VAL(t), EVFILT_PROC, flg < 2 ? EV_DELETE : 0, 0, 0, (void*) TASK_VAL(t)); #endif #endif break; case taskSIGNAL: /* check for multi subscribers */ TAILQ_FOREACH_SAFE(tt, &r->root_signal, task_node, tmp) if (TASK_VAL(tt) == TASK_VAL(t)) flg++; #if SUP_ENABLE == KQ_SUPPORT #ifdef __NetBSD__ EV_SET(&chg[0], TASK_VAL(t), EVFILT_SIGNAL, flg < 2 ? EV_DELETE : 0, 0, 0, (intptr_t) TASK_VAL(t)); #else EV_SET(&chg[0], TASK_VAL(t), EVFILT_SIGNAL, flg < 2 ? EV_DELETE : 0, 0, 0, (void*) TASK_VAL(t)); #endif #endif if (flg < 2) { pthread_mutex_lock(&TASK_ROOT(t)->root_sigmtx); sigdelset(&TASK_ROOT(t)->root_sigset, TASK_VAL(t)); pthread_mutex_unlock(&TASK_ROOT(t)->root_sigmtx); } break; #ifdef AIO_SUPPORT case taskAIO: #if SUP_ENABLE == KQ_SUPPORT /* check for multi subscribers */ TAILQ_FOREACH_SAFE(tt, &r->root_aio, task_node, tmp) if (TASK_VAL(tt) == TASK_VAL(t)) flg++; #ifdef __NetBSD__ EV_SET(&chg[0], TASK_VAL(t), EVFILT_AIO, flg < 2 ? EV_DELETE : 0, 0, 0, (intptr_t) TASK_VAL(t)); #else EV_SET(&chg[0], TASK_VAL(t), EVFILT_AIO, flg < 2 ? EV_DELETE : 0, 0, 0, (void*) TASK_VAL(t)); #endif acb = (struct aiocb*) TASK_VAL(t); if (acb) { if (aio_cancel(acb->aio_fildes, acb) == AIO_CANCELED) aio_return(acb); e_free(acb); TASK_VAL(t) = 0; } #endif break; #ifdef EVFILT_LIO case taskLIO: #if SUP_ENABLE == KQ_SUPPORT /* check for multi subscribers */ TAILQ_FOREACH_SAFE(tt, &r->root_lio, task_node, tmp) if (TASK_VAL(tt) == TASK_VAL(t)) flg++; #ifdef __NetBSD__ EV_SET(&chg[0], TASK_VAL(t), EVFILT_LIO, flg < 2 ? EV_DELETE : 0, 0, 0, (intptr_t) TASK_VAL(t)); #else EV_SET(&chg[0], TASK_VAL(t), EVFILT_LIO, flg < 2 ? EV_DELETE : 0, 0, 0, (void*) TASK_VAL(t)); #endif acbs = (struct aiocb**) TASK_VAL(t); if (acbs) { for (i = 0; i < TASK_DATLEN(t); i++) { if (aio_cancel(acbs[i]->aio_fildes, acbs[i]) == AIO_CANCELED) aio_return(acbs[i]); e_free(acbs[i]); } e_free(acbs); TASK_VAL(t) = 0; } #endif break; #endif /* EVFILT_LIO */ #endif /* AIO_SUPPORT */ #ifdef EVFILT_USER case taskUSER: #if SUP_ENABLE == KQ_SUPPORT /* check for multi subscribers */ TAILQ_FOREACH_SAFE(tt, &r->root_user, task_node, tmp) if (TASK_VAL(tt) == TASK_VAL(t)) flg++; #ifdef __NetBSD__ EV_SET(&chg[0], TASK_VAL(t), EVFILT_USER, flg < 2 ? EV_DELETE : 0, 0, 0, (intptr_t) TASK_VAL(t)); #else EV_SET(&chg[0], TASK_VAL(t), EVFILT_USER, flg < 2 ? EV_DELETE : 0, 0, 0, (void*) TASK_VAL(t)); #endif #endif break; #endif /* EVFILT_USER */ case taskTHREAD: #ifdef HAVE_LIBPTHREAD if (TASK_VAL(t)) { pthread_cancel((pthread_t) TASK_VAL(t)); pthread_join((pthread_t) TASK_VAL(t), NULL); if (TASK_VAL(t)) { transit_task2unuse(t, &(TASK_ROOT(t))->root_thread); TASK_VAL(t) = 0; } } #endif return NULL; #if defined(HAVE_LIBRT) && defined(HAVE_TIMER_CREATE) && \ defined(HAVE_TIMER_SETTIME) && defined(HAVE_TIMER_DELETE) case taskRTC: timer_delete((timer_t) TASK_FLAG(t)); #if SUP_ENABLE == KQ_SUPPORT schedCancel((sched_task_t*) TASK_RET(t)); #else /* check for multi subscribers */ TAILQ_FOREACH_SAFE(tt, &r->root_rtc, task_node, tmp) if (TASK_DATA(tt) == TASK_DATA(t)) flg++; /* restore signal */ if (flg < 2) signal((intptr_t) TASK_DATA(t) + SIGRTMIN, SIG_DFL); #endif return NULL; #endif /* HAVE_TIMER_CREATE */ default: return NULL; } #if SUP_ENABLE == KQ_SUPPORT kevent(r->root_kq, chg, 1, NULL, 0, &timeout); #elif SUP_ENABLE == EP_SUPPORT if (TASK_TYPE(t) == taskREAD || TASK_TYPE(t) == taskWRITE) { epoll_ctl(r->root_kq, ee.events ? EPOLL_CTL_MOD : EPOLL_CTL_DEL, ee.data.fd, &ee); } #endif return NULL; } #ifdef HAVE_LIBPTHREAD /* * sched_hook_thread() - Default THREAD hook * * @task = current task * @arg = pthread attributes * return: <0 errors and 0 ok */ void * sched_hook_thread(void *task, void *arg) { sched_task_t *t = task; pthread_t tid; sigset_t s, o; if (!t || !TASK_ROOT(t)) return (void*) -1; sigfillset(&s); pthread_sigmask(SIG_BLOCK, &s, &o); errno = pthread_create(&tid, (pthread_attr_t*) arg, (void *(*)(void*)) _sched_threadWrapper, t); pthread_sigmask(SIG_SETMASK, &o, NULL); if (errno) { LOGERR; return (void*) -1; } else TASK_VAL(t) = (u_long) tid; if (!TASK_ISLOCKED(t)) TASK_LOCK(t); return NULL; } #endif /* * sched_hook_read() - Default READ hook * * @task = current task * @arg = unused * return: <0 errors and 0 ok */ void * sched_hook_read(void *task, void *arg) { sched_task_t *t = task; sched_root_task_t *r = NULL; uintptr_t mask = (uintptr_t) arg; #if SUP_ENABLE == KQ_SUPPORT struct kevent chg[1]; struct timespec timeout = { 0, 0 }; #elif SUP_ENABLE == EP_SUPPORT struct epoll_event ee = { 0 }; int flg = 0; #endif if (!t || !TASK_ROOT(t)) return (void*) -1; else r = TASK_ROOT(t); #if SUP_ENABLE == KQ_SUPPORT #ifdef __NetBSD__ EV_SET(&chg[0], TASK_FD(t), EVFILT_READ, EV_ADD | EV_CLEAR | mask, 0, 0, (intptr_t) TASK_FD(t)); #else EV_SET(&chg[0], TASK_FD(t), EVFILT_READ, EV_ADD | EV_CLEAR | mask, 0, 0, (void*) TASK_FD(t)); #endif if (kevent(r->root_kq, chg, 1, NULL, 0, &timeout) == -1) { if (r->root_hooks.hook_exec.exception) r->root_hooks.hook_exec.exception(r, NULL); else LOGERR; return (void*) -1; } #elif SUP_ENABLE == EP_SUPPORT if (!mask) mask = EPOLLIN | EPOLLPRI; ee.data.fd = TASK_FD(t); ee.events = mask; if (FD_ISSET(TASK_FD(t), &r->root_fds[2])) { flg |= 4; ee.events |= EPOLLPRI; } if (FD_ISSET(TASK_FD(t), &r->root_fds[0])) { flg |= 1; ee.events |= EPOLLIN; } if (FD_ISSET(TASK_FD(t), &r->root_fds[1])) { flg |= 2; ee.events |= EPOLLOUT; } if (epoll_ctl(r->root_kq, flg ? EPOLL_CTL_MOD : EPOLL_CTL_ADD, TASK_FD(t), &ee) == -1) { if (r->root_hooks.hook_exec.exception) r->root_hooks.hook_exec.exception(r, NULL); else LOGERR; return (void*) -1; } else { if (mask & EPOLLIN) FD_SET(TASK_FD(t), &r->root_fds[0]); if (mask & EPOLLPRI) FD_SET(TASK_FD(t), &r->root_fds[2]); } #else if (!mask) { FD_SET(TASK_FD(t), &r->root_fds[0]); FD_SET(TASK_FD(t), &r->root_fds[2]); } else { if (mask & 1) FD_SET(TASK_FD(t), &r->root_fds[0]); if (mask & 2) FD_SET(TASK_FD(t), &r->root_fds[2]); } if (TASK_FD(t) >= r->root_kq) r->root_kq = TASK_FD(t) + 1; #endif return NULL; } /* * sched_hook_write() - Default WRITE hook * * @task = current task * @arg = unused * return: <0 errors and 0 ok */ void * sched_hook_write(void *task, void *arg) { sched_task_t *t = task; sched_root_task_t *r = NULL; uintptr_t mask = (uintptr_t) arg; #if SUP_ENABLE == KQ_SUPPORT struct kevent chg[1]; struct timespec timeout = { 0, 0 }; #elif SUP_ENABLE == EP_SUPPORT struct epoll_event ee = { 0 }; int flg = 0; #endif if (!t || !TASK_ROOT(t)) return (void*) -1; else r = TASK_ROOT(t); #if SUP_ENABLE == KQ_SUPPORT #ifdef __NetBSD__ EV_SET(&chg[0], TASK_FD(t), EVFILT_WRITE, EV_ADD | EV_CLEAR | mask, 0, 0, (intptr_t) TASK_FD(t)); #else EV_SET(&chg[0], TASK_FD(t), EVFILT_WRITE, EV_ADD | EV_CLEAR | mask, 0, 0, (void*) TASK_FD(t)); #endif if (kevent(r->root_kq, chg, 1, NULL, 0, &timeout) == -1) { if (r->root_hooks.hook_exec.exception) r->root_hooks.hook_exec.exception(r, NULL); else LOGERR; return (void*) -1; } #elif SUP_ENABLE == EP_SUPPORT if (!mask) mask = EPOLLOUT; ee.data.fd = TASK_FD(t); ee.events = mask; if (FD_ISSET(TASK_FD(t), &r->root_fds[2])) { flg |= 4; ee.events |= EPOLLPRI; } if (FD_ISSET(TASK_FD(t), &r->root_fds[0])) { flg |= 1; ee.events |= EPOLLIN; } if (FD_ISSET(TASK_FD(t), &r->root_fds[1])) { flg |= 2; ee.events |= EPOLLOUT; } if (epoll_ctl(r->root_kq, flg ? EPOLL_CTL_MOD : EPOLL_CTL_ADD, TASK_FD(t), &ee) == -1) { if (r->root_hooks.hook_exec.exception) r->root_hooks.hook_exec.exception(r, NULL); else LOGERR; return (void*) -1; } else if (mask & EPOLLOUT) FD_SET(TASK_FD(t), &r->root_fds[1]); #else if (!mask) FD_SET(TASK_FD(t), &r->root_fds[1]); else if (mask & 1) FD_SET(TASK_FD(t), &r->root_fds[1]); if (TASK_FD(t) >= r->root_kq) r->root_kq = TASK_FD(t) + 1; #endif return NULL; } /* * sched_hook_alarm() - Default ALARM hook * * @task = current task * @arg = unused * return: <0 errors and 0 ok */ void * sched_hook_alarm(void *task, void *arg __unused) { #if SUP_ENABLE == KQ_SUPPORT sched_task_t *t = task; struct kevent chg[1]; struct timespec timeout = { 0, 0 }; if (!t || !TASK_ROOT(t)) return (void*) -1; #ifdef __NetBSD__ EV_SET(&chg[0], (uintptr_t) TASK_DATA(t), EVFILT_TIMER, EV_ADD | EV_CLEAR, 0, t->task_val.ts.tv_sec * 1000 + t->task_val.ts.tv_nsec / 1000000, (intptr_t) TASK_DATA(t)); #else EV_SET(&chg[0], (uintptr_t) TASK_DATA(t), EVFILT_TIMER, EV_ADD | EV_CLEAR, 0, t->task_val.ts.tv_sec * 1000 + t->task_val.ts.tv_nsec / 1000000, (void*) TASK_DATA(t)); #endif if (kevent(TASK_ROOT(t)->root_kq, chg, 1, NULL, 0, &timeout) == -1) { if (TASK_ROOT(t)->root_hooks.hook_exec.exception) TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL); else LOGERR; return (void*) -1; } #endif return NULL; } /* * sched_hook_node() - Default NODE hook * * @task = current task * @arg = if arg == 42 then waiting for all events * return: <0 errors and 0 ok */ void * sched_hook_node(void *task, void *arg) { #if SUP_ENABLE == KQ_SUPPORT sched_task_t *t = task; struct kevent chg[1]; struct timespec timeout = { 0, 0 }; u_int addflags = (u_int) (uintptr_t) arg; if (!t || !TASK_ROOT(t)) return (void*) -1; #ifdef __NetBSD__ EV_SET(&chg[0], TASK_FD(t), EVFILT_VNODE, EV_ADD | EV_CLEAR, NOTE_DELETE | NOTE_WRITE | NOTE_EXTEND | NOTE_ATTRIB | NOTE_LINK | NOTE_RENAME | NOTE_REVOKE | addflags, 0, (intptr_t) TASK_FD(t)); #else EV_SET(&chg[0], TASK_FD(t), EVFILT_VNODE, EV_ADD | EV_CLEAR, NOTE_DELETE | NOTE_WRITE | NOTE_EXTEND | NOTE_ATTRIB | NOTE_LINK | NOTE_RENAME | NOTE_REVOKE | addflags, 0, (void*) TASK_FD(t)); #endif if (kevent(TASK_ROOT(t)->root_kq, chg, 1, NULL, 0, &timeout) == -1) { if (TASK_ROOT(t)->root_hooks.hook_exec.exception) TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL); else LOGERR; return (void*) -1; } #endif return NULL; } /* * sched_hook_proc() - Default PROC hook * * @task = current task * @arg = unused * return: <0 errors and 0 ok */ void * sched_hook_proc(void *task, void *arg __unused) { #if SUP_ENABLE == KQ_SUPPORT sched_task_t *t = task; struct kevent chg[1]; struct timespec timeout = { 0, 0 }; if (!t || !TASK_ROOT(t)) return (void*) -1; #ifdef __NetBSD__ EV_SET(&chg[0], TASK_VAL(t), EVFILT_PROC, EV_ADD | EV_CLEAR, NOTE_EXIT | NOTE_FORK | NOTE_EXEC | NOTE_TRACK, 0, (intptr_t) TASK_VAL(t)); #else EV_SET(&chg[0], TASK_VAL(t), EVFILT_PROC, EV_ADD | EV_CLEAR, NOTE_EXIT | NOTE_FORK | NOTE_EXEC | NOTE_TRACK, 0, (void*) TASK_VAL(t)); #endif if (kevent(TASK_ROOT(t)->root_kq, chg, 1, NULL, 0, &timeout) == -1) { if (TASK_ROOT(t)->root_hooks.hook_exec.exception) TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL); else LOGERR; return (void*) -1; } #endif return NULL; } /* * sched_hook_signal() - Default SIGNAL hook * * @task = current task * @arg = unused * return: <0 errors and 0 ok */ void * sched_hook_signal(void *task, void *arg __unused) { sched_task_t *t = task; #if SUP_ENABLE == KQ_SUPPORT struct kevent chg[1]; struct timespec timeout = { 0, 0 }; #endif if (!t || !TASK_ROOT(t)) return (void*) -1; #if SUP_ENABLE == KQ_SUPPORT #ifdef __NetBSD__ EV_SET(&chg[0], TASK_VAL(t), EVFILT_SIGNAL, EV_ADD | EV_CLEAR, 0, 0, (intptr_t) TASK_VAL(t)); #else EV_SET(&chg[0], TASK_VAL(t), EVFILT_SIGNAL, EV_ADD | EV_CLEAR, 0, 0, (void*) TASK_VAL(t)); #endif if (kevent(TASK_ROOT(t)->root_kq, chg, 1, NULL, 0, &timeout) == -1) { if (TASK_ROOT(t)->root_hooks.hook_exec.exception) TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL); else LOGERR; return (void*) -1; } #endif pthread_mutex_lock(&TASK_ROOT(t)->root_sigmtx); sigaddset(&TASK_ROOT(t)->root_sigset, TASK_VAL(t)); pthread_mutex_unlock(&TASK_ROOT(t)->root_sigmtx); return NULL; } /* * sched_hook_user() - Default USER hook * * @task = current task * @arg = unused * return: <0 errors and 0 ok */ #ifdef EVFILT_USER void * sched_hook_user(void *task, void *arg __unused) { #if SUP_ENABLE == KQ_SUPPORT sched_task_t *t = task; struct kevent chg[1]; struct timespec timeout = { 0, 0 }; if (!t || !TASK_ROOT(t)) return (void*) -1; #ifdef __NetBSD__ EV_SET(&chg[0], TASK_VAL(t), EVFILT_USER, EV_ADD | EV_CLEAR, TASK_DATLEN(t), 0, (intptr_t) TASK_VAL(t)); #else EV_SET(&chg[0], TASK_VAL(t), EVFILT_USER, EV_ADD | EV_CLEAR, TASK_DATLEN(t), 0, (void*) TASK_VAL(t)); #endif if (kevent(TASK_ROOT(t)->root_kq, chg, 1, NULL, 0, &timeout) == -1) { if (TASK_ROOT(t)->root_hooks.hook_exec.exception) TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL); else LOGERR; return (void*) -1; } #endif return NULL; } #endif #if SUP_ENABLE == KQ_SUPPORT static inline void fetch_hook_kevent_proceed(int en, struct kevent *res, sched_root_task_t *r) { struct kevent evt[1]; register int i, flg; sched_task_t *task, *tmp; struct timespec now = { 0, 0 }; #ifdef AIO_SUPPORT int len, fd; struct aiocb *acb; #ifdef EVFILT_LIO int l; off_t off; struct aiocb **acbs; struct iovec *iv; #endif /* EVFILT_LIO */ #endif /* AIO_SUPPORT */ for (i = 0; i < en; i++) { memcpy(evt, &res[i], sizeof evt); evt->flags = EV_DELETE; /* Put read/write task to ready queue */ flg = 0; switch (res[i].filter) { case EVFILT_READ: TAILQ_FOREACH_SAFE(task, &r->root_read, task_node, tmp) { if (TASK_FD(task) == ((intptr_t) res[i].udata)) { if (!flg) { TASK_RET(task) = res[i].data; TASK_FLAG(task) = (u_long) res[i].fflags; /* remove read handle */ remove_task_from(task, &r->root_read); if (r->root_hooks.hook_exec.exception && res[i].flags & EV_EOF) { if (r->root_hooks.hook_exec.exception(r, (void*) EV_EOF)) { task->task_type = taskUNUSE; insert_task_to(task, &r->root_unuse); } else { task->task_type = taskREADY; insert_task_to(task, &r->root_ready); } } else { task->task_type = taskREADY; insert_task_to(task, &r->root_ready); } } flg++; } } break; case EVFILT_WRITE: TAILQ_FOREACH_SAFE(task, &r->root_write, task_node, tmp) { if (TASK_FD(task) == ((intptr_t) res[i].udata)) { if (!flg) { TASK_RET(task) = res[i].data; TASK_FLAG(task) = (u_long) res[i].fflags; /* remove write handle */ remove_task_from(task, &r->root_write); if (r->root_hooks.hook_exec.exception && res[i].flags & EV_EOF) { if (r->root_hooks.hook_exec.exception(r, (void*) EV_EOF)) { task->task_type = taskUNUSE; insert_task_to(task, &r->root_unuse); } else { task->task_type = taskREADY; insert_task_to(task, &r->root_ready); } } else { task->task_type = taskREADY; insert_task_to(task, &r->root_ready); } } flg++; } } break; case EVFILT_TIMER: TAILQ_FOREACH_SAFE(task, &r->root_alarm, task_node, tmp) { if ((uintptr_t) TASK_DATA(task) == ((uintptr_t) res[i].udata)) { if (!flg) { TASK_RET(task) = res[i].data; TASK_FLAG(task) = (u_long) res[i].fflags; /* remove alarm handle */ transit_task2ready(task, &r->root_alarm); } flg++; } } break; case EVFILT_VNODE: TAILQ_FOREACH_SAFE(task, &r->root_node, task_node, tmp) { if (TASK_FD(task) == ((intptr_t) res[i].udata)) { if (!flg) { TASK_RET(task) = res[i].data; TASK_FLAG(task) = (u_long) res[i].fflags; /* remove node handle */ transit_task2ready(task, &r->root_node); } flg++; } } break; case EVFILT_PROC: TAILQ_FOREACH_SAFE(task, &r->root_proc, task_node, tmp) { if (TASK_VAL(task) == ((uintptr_t) res[i].udata)) { if (!flg) { TASK_RET(task) = res[i].data; TASK_FLAG(task) = (u_long) res[i].fflags; /* remove proc handle */ transit_task2ready(task, &r->root_proc); } flg++; } } break; case EVFILT_SIGNAL: TAILQ_FOREACH_SAFE(task, &r->root_signal, task_node, tmp) { if (TASK_VAL(task) == ((uintptr_t) res[i].udata)) { if (!flg) { TASK_RET(task) = res[i].data; TASK_FLAG(task) = (u_long) res[i].fflags; /* remove signal handle */ transit_task2ready(task, &r->root_signal); } flg++; } } break; #ifdef AIO_SUPPORT case EVFILT_AIO: TAILQ_FOREACH_SAFE(task, &r->root_aio, task_node, tmp) { acb = (struct aiocb*) TASK_VAL(task); if (acb == ((struct aiocb*) res[i].udata)) { if (!flg) { TASK_RET(task) = res[i].data; TASK_FLAG(task) = (u_long) res[i].fflags; /* remove user handle */ transit_task2ready(task, &r->root_aio); fd = acb->aio_fildes; if ((len = aio_return(acb)) != -1) { if (lseek(fd, acb->aio_offset + len, SEEK_CUR) == -1) LOGERR; } else LOGERR; e_free(acb); TASK_DATLEN(task) = (u_long) len; TASK_FD(task) = fd; } flg++; } } break; #ifdef EVFILT_LIO case EVFILT_LIO: TAILQ_FOREACH_SAFE(task, &r->root_lio, task_node, tmp) { acbs = (struct aiocb**) TASK_VAL(task); if (acbs == ((struct aiocb**) res[i].udata)) { if (!flg) { TASK_RET(task) = res[i].data; TASK_FLAG(task) = (u_long) res[i].fflags; /* remove user handle */ transit_task2ready(task, &r->root_lio); iv = (struct iovec*) TASK_DATA(task); fd = acbs[0]->aio_fildes; off = acbs[0]->aio_offset; for (len = 0; i < TASK_DATLEN(task); len += l, i++) { if ((iv[i].iov_len = aio_return(acbs[i])) == -1) l = 0; else l = iv[i].iov_len; e_free(acbs[i]); } e_free(acbs); TASK_DATLEN(task) = (u_long) len; TASK_FD(task) = fd; if (lseek(fd, off + len, SEEK_CUR) == -1) LOGERR; } flg++; } } break; #endif /* EVFILT_LIO */ #endif /* AIO_SUPPORT */ #ifdef EVFILT_USER case EVFILT_USER: TAILQ_FOREACH_SAFE(task, &r->root_user, task_node, tmp) { if (TASK_VAL(task) == ((uintptr_t) res[i].udata)) { if (!flg) { TASK_RET(task) = res[i].data; TASK_FLAG(task) = (u_long) res[i].fflags; /* remove user handle */ transit_task2ready(task, &r->root_user); } flg++; } } break; #endif /* EVFILT_USER */ } if (flg > 1) evt->flags &= ~EV_DELETE; if (kevent(r->root_kq, evt, 1, NULL, 0, &now) == -1) { if (r->root_hooks.hook_exec.exception) r->root_hooks.hook_exec.exception(r, NULL); else LOGERR; } } } #endif #if SUP_ENABLE == EP_SUPPORT static inline void fetch_hook_epoll_proceed(int en, struct epoll_event *res, sched_root_task_t *r) { register int i, rflg, wflg; int ops = EPOLL_CTL_DEL; sched_task_t *t, *tmp, *task; struct epoll_event evt[1]; for (i = 0; i < en; i++) { memcpy(evt, &res[i], sizeof evt); evt->events ^= evt->events; rflg = wflg = 0; if (res[i].events & (EPOLLIN | EPOLLPRI)) { task = NULL; TAILQ_FOREACH_SAFE(t, &r->root_read, task_node, tmp) { if (TASK_FD(t) == evt->data.fd) { if (!task) task = t; rflg++; } } if (task) { TASK_FLAG(task) = ioctl(TASK_FD(task), FIONREAD, &TASK_RET(task)); /* remove read handle */ remove_task_from(task, &r->root_read); if (r->root_hooks.hook_exec.exception && evt->events & (EPOLLRDHUP | EPOLLERR | EPOLLHUP)) { if (r->root_hooks.hook_exec.exception(r, (void*) (intptr_t) (evt->events & EPOLLERR ? EV_ERROR | EV_EOF : EV_EOF))) { task->task_type = taskUNUSE; insert_task_to(task, &r->root_unuse); } else { task->task_type = taskREADY; insert_task_to(task, &r->root_ready); } } else { task->task_type = taskREADY; insert_task_to(task, &r->root_ready); } if (!(res[i].events & EPOLLOUT) && FD_ISSET(evt->data.fd, &r->root_fds[1])) { evt->events |= EPOLLOUT; wflg = 42; } if (rflg > 1) { if (FD_ISSET(evt->data.fd, &r->root_fds[0])) evt->events |= EPOLLIN; if (FD_ISSET(evt->data.fd, &r->root_fds[2])) evt->events |= EPOLLPRI; } else { FD_CLR(evt->data.fd, &r->root_fds[0]); FD_CLR(evt->data.fd, &r->root_fds[2]); } } } if (res[i].events & EPOLLOUT) { task = NULL; TAILQ_FOREACH_SAFE(t, &r->root_write, task_node, tmp) { if (TASK_FD(t) == evt->data.fd) { if (!task) task = t; wflg++; } } if (task) { TASK_FLAG(task) = ioctl(TASK_FD(task), FIONWRITE, &TASK_RET(task)); /* remove write handle */ remove_task_from(task, &r->root_write); if (r->root_hooks.hook_exec.exception && evt->events & (EPOLLERR | EPOLLHUP)) { if (r->root_hooks.hook_exec.exception(r, (void*) (intptr_t) (evt->events & EPOLLERR ? EV_ERROR | EV_EOF : EV_EOF))) { task->task_type = taskUNUSE; insert_task_to(task, &r->root_unuse); } else { task->task_type = taskREADY; insert_task_to(task, &r->root_ready); } } else { task->task_type = taskREADY; insert_task_to(task, &r->root_ready); } if (!(res[i].events & EPOLLIN) && FD_ISSET(evt->data.fd, &r->root_fds[0])) { evt->events |= EPOLLIN; rflg = 42; } if (!(res[i].events & EPOLLPRI) && FD_ISSET(evt->data.fd, &r->root_fds[2])) { evt->events |= EPOLLPRI; rflg = 42; } if (wflg > 1) evt->events |= EPOLLOUT; else FD_CLR(evt->data.fd, &r->root_fds[1]); } } ops = EPOLL_CTL_DEL; if (rflg > 1 || wflg > 1) ops = EPOLL_CTL_MOD; if (epoll_ctl(r->root_kq, ops, evt->data.fd, evt) == -1) { if (r->root_hooks.hook_exec.exception) { r->root_hooks.hook_exec.exception(r, NULL); } else LOGERR; } } } #endif #if SUP_ENABLE == NO_SUPPORT static inline void fetch_hook_select_proceed(int en, fd_set rfd, fd_set wfd, fd_set xfd, sched_root_task_t *r) { register int i, rflg, wflg; sched_task_t *t, *tmp, *task; /* skip select check if return value from select is zero */ if (!en) return; for (i = 0; i < r->root_kq; i++) { if (!FD_ISSET(i, &r->root_fds[0]) && !FD_ISSET(i, &r->root_fds[1]) && !FD_ISSET(i, &r->root_fds[2])) continue; rflg = wflg = 0; if (FD_ISSET(i, &rfd) || FD_ISSET(i, &xfd)) { task = NULL; TAILQ_FOREACH_SAFE(t, &r->root_read, task_node, tmp) { if (TASK_FD(t) == i) { if (!task) task = t; rflg++; } } if (task) { TASK_FLAG(task) = ioctl(TASK_FD(task), FIONREAD, &TASK_RET(task)); /* remove read handle */ remove_task_from(task, &r->root_read); if (r->root_hooks.hook_exec.exception) { if (r->root_hooks.hook_exec.exception(r, NULL)) { task->task_type = taskUNUSE; insert_task_to(task, &r->root_unuse); } else { task->task_type = taskREADY; insert_task_to(task, &r->root_ready); } } else { task->task_type = taskREADY; insert_task_to(task, &r->root_ready); } /* remove resouce */ if (rflg == 1) { FD_CLR(i, &r->root_fds[0]); FD_CLR(i, &r->root_fds[2]); } } } if (FD_ISSET(i, &wfd)) { task = NULL; TAILQ_FOREACH_SAFE(t, &r->root_write, task_node, tmp) { if (TASK_FD(t) == i) { if (!task) task = t; wflg++; } } if (task) { TASK_FLAG(task) = ioctl(TASK_FD(task), FIONWRITE, &TASK_RET(task)); /* remove write handle */ remove_task_from(task, &r->root_write); if (r->root_hooks.hook_exec.exception) { if (r->root_hooks.hook_exec.exception(r, NULL)) { task->task_type = taskUNUSE; insert_task_to(task, &r->root_unuse); } else { task->task_type = taskREADY; insert_task_to(task, &r->root_ready); } } else { task->task_type = taskREADY; insert_task_to(task, &r->root_ready); } /* remove resouce */ if (wflg == 1) FD_CLR(i, &r->root_fds[1]); } } } /* optimize select */ for (i = r->root_kq - 1; i >= 0; i--) if (FD_ISSET(i, &r->root_fds[0]) || FD_ISSET(i, &r->root_fds[1]) || FD_ISSET(i, &r->root_fds[2])) break; r->root_kq = i + 1; } #endif /* * sched_hook_fetch() - Default FETCH hook * * @root = root task * @arg = unused * return: NULL error or !=NULL fetched task */ void * sched_hook_fetch(void *root, void *arg __unused) { sched_root_task_t *r = root; sched_task_t *task, *tmp; struct timespec now, m, mtmp, *tsmin; #if SUP_ENABLE == KQ_SUPPORT struct kevent res[KQ_EVENTS]; struct timespec *timeout; #elif SUP_ENABLE == EP_SUPPORT struct epoll_event res[KQ_EVENTS]; u_long timeout = 0; #else struct timeval *timeout, tv; fd_set rfd, wfd, xfd; #endif int en; if (!r) return NULL; /* get new task by queue priority */ while ((task = TAILQ_FIRST(&r->root_event))) { transit_task2unuse(task, &r->root_event); return task; } while ((task = TAILQ_FIRST(&r->root_ready))) { transit_task2unuse(task, &r->root_ready); return task; } /* if present member of task, set NOWAIT */ if (!TAILQ_FIRST(&r->root_task)) { /* timer tasks */ #ifdef TIMER_WITHOUT_SORT clock_gettime(CLOCK_MONOTONIC, &now); sched_timespecclear(&r->root_wait); TAILQ_FOREACH(task, &r->root_timer, task_node) { if (!sched_timespecisset(&r->root_wait)) r->root_wait = TASK_TS(task); else if (sched_timespeccmp(&TASK_TS(task), &r->root_wait, -) < 0) r->root_wait = TASK_TS(task); } if (TAILQ_FIRST(&r->root_timer)) { m = r->root_wait; sched_timespecsub(&m, &now, &mtmp); if (mtmp.tv_sec < 0 || mtmp.tv_nsec < 0) /* don't wait for events. we have ready timer */ sched_timespecclear(&r->root_wait); else r->root_wait = mtmp; } else { /* set wait INFTIM */ sched_timespecinf(&r->root_wait); } #else /* ! TIMER_WITHOUT_SORT */ if ((task = TAILQ_FIRST(&r->root_timer))) { clock_gettime(CLOCK_MONOTONIC, &now); m = TASK_TS(task); sched_timespecsub(&m, &now, &mtmp); if (mtmp.tv_sec < 0 || mtmp.tv_nsec < 0) /* don't wait for events. we have ready timer */ sched_timespecclear(&r->root_wait); else r->root_wait = mtmp; } else { /* set wait INFTIM */ sched_timespecinf(&r->root_wait); } #endif /* TIMER_WITHOUT_SORT */ } else /* no waiting for event, because we have ready task */ sched_timespecclear(&r->root_wait); if (!sched_timespecisinf(&r->root_wait)) { tsmin = sched_timespecmin(&r->root_wait, &r->root_poll); #if SUP_ENABLE == KQ_SUPPORT timeout = tsmin; #elif SUP_ENABLE == EP_SUPPORT timeout = tsmin->tv_sec * 1000 + tsmin->tv_nsec / 1000000; #else sched_timespec2val(tsmin, &tv); timeout = &tv; #endif /* KQ_SUPPORT */ } else if (sched_timespecisinf(&r->root_poll)) #if SUP_ENABLE == EP_SUPPORT timeout = -1; #else timeout = NULL; #endif else { #if SUP_ENABLE == KQ_SUPPORT timeout = &r->root_poll; #elif SUP_ENABLE == EP_SUPPORT timeout = r->root_poll.tv_sec * 1000 + r->root_poll.tv_nsec / 1000000; #else sched_timespec2val(&r->root_poll, &tv); timeout = &tv; #endif /* KQ_SUPPORT */ } #if SUP_ENABLE == KQ_SUPPORT if ((en = kevent(r->root_kq, NULL, 0, res, KQ_EVENTS, timeout)) == -1) { #elif SUP_ENABLE == EP_SUPPORT if ((en = epoll_wait(r->root_kq, res, KQ_EVENTS, timeout)) == -1) { #else xfd = r->root_fds[2]; rfd = r->root_fds[0]; wfd = r->root_fds[1]; if ((en = select(r->root_kq, &rfd, &wfd, &xfd, timeout)) == -1) { #endif /* KQ_SUPPORT */ if (r->root_hooks.hook_exec.exception) { if (r->root_hooks.hook_exec.exception(r, NULL)) return NULL; } else if (errno != EINTR) LOGERR; goto skip_event; } /* Go and catch the cat into pipes ... */ #if SUP_ENABLE == KQ_SUPPORT /* kevent dispatcher */ fetch_hook_kevent_proceed(en, res, r); #elif SUP_ENABLE == EP_SUPPORT /* epoll dispatcher */ fetch_hook_epoll_proceed(en, res, r); #else /* select dispatcher */ fetch_hook_select_proceed(en, rfd, wfd, xfd, r); #endif /* KQ_SUPPORT */ skip_event: /* timer update & put in ready queue */ clock_gettime(CLOCK_MONOTONIC, &now); TAILQ_FOREACH_SAFE(task, &r->root_timer, task_node, tmp) if (sched_timespeccmp(&now, &TASK_TS(task), -) >= 0) transit_task2ready(task, &r->root_timer); /* put regular task priority task to ready queue, if there is no ready task or reach max missing hit for regular task */ if ((task = TAILQ_FIRST(&r->root_task))) { if (!TAILQ_FIRST(&r->root_ready) || r->root_miss >= TASK_VAL(task)) { r->root_miss ^= r->root_miss; transit_task2ready(task, &r->root_task); } else r->root_miss++; } else r->root_miss ^= r->root_miss; /* OK, lets get ready task !!! */ task = TAILQ_FIRST(&r->root_ready); if (task) transit_task2unuse(task, &r->root_ready); return task; } /* * sched_hook_exception() - Default EXCEPTION hook * * @root = root task * @arg = custom handling: if arg == EV_EOF or other value; default: arg == NULL log errno * return: <0 errors and 0 ok */ void * sched_hook_exception(void *root, void *arg) { sched_root_task_t *r = root; if (!r) return NULL; /* custom exception handling ... */ if (arg) { if (arg == (void*) EV_EOF) return NULL; return (void*) -1; /* raise scheduler error!!! */ } /* if error hook exists */ if (r->root_hooks.hook_root.error) return (r->root_hooks.hook_root.error(root, (void*) ((intptr_t) errno))); /* default case! */ LOGERR; return NULL; } /* * sched_hook_condition() - Default CONDITION hook * * @root = root task * @arg = killState from schedRun() * return: NULL kill scheduler loop or !=NULL ok */ void * sched_hook_condition(void *root, void *arg) { sched_root_task_t *r = root; if (!r) return NULL; return (void*) (*r->root_cond - *(intptr_t*) arg); } /* * sched_hook_rtc() - Default RTC hook * * @task = current task * @arg = unused * return: <0 errors and 0 ok */ void * sched_hook_rtc(void *task, void *arg __unused) { #if defined(HAVE_LIBRT) && defined(HAVE_TIMER_CREATE) && \ defined(HAVE_TIMER_SETTIME) && defined(HAVE_TIMER_DELETE) sched_task_t *sigt = NULL, *t = task; struct itimerspec its; struct sigevent evt; timer_t tmr; #if SUP_ENABLE != KQ_SUPPORT struct sigaction sa; #endif if (!t || !TASK_ROOT(t)) return (void*) -1; memset(&evt, 0, sizeof evt); evt.sigev_notify = SIGEV_SIGNAL; evt.sigev_signo = (intptr_t) TASK_DATA(t) + SIGRTMIN; evt.sigev_value.sival_ptr = t; if (timer_create(CLOCK_MONOTONIC, &evt, &tmr) == -1) { if (TASK_ROOT(t)->root_hooks.hook_exec.exception) TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL); else LOGERR; return (void*) -1; } else TASK_FLAG(t) = (u_long) tmr; #if SUP_ENABLE == KQ_SUPPORT if (!(sigt = schedSignal(TASK_ROOT(t), _sched_rtcWrapper, TASK_ARG(t), evt.sigev_signo, t, (size_t) tmr))) { if (TASK_ROOT(t)->root_hooks.hook_exec.exception) TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL); else LOGERR; timer_delete(tmr); return (void*) -1; } else TASK_RET(t) = (uintptr_t) sigt; #else memset(&sa, 0, sizeof sa); sigemptyset(&sa.sa_mask); sa.sa_sigaction = _sched_rtcSigWrapper; sa.sa_flags = SA_SIGINFO | SA_RESTART; if (sigaction(evt.sigev_signo, &sa, NULL) == -1) { if (TASK_ROOT(t)->root_hooks.hook_exec.exception) TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL); else LOGERR; timer_delete(tmr); return (void*) -1; } #endif memset(&its, 0, sizeof its); its.it_value.tv_sec = t->task_val.ts.tv_sec; its.it_value.tv_nsec = t->task_val.ts.tv_nsec; if (timer_settime(tmr, TIMER_RELTIME, &its, NULL) == -1) { if (TASK_ROOT(t)->root_hooks.hook_exec.exception) TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL); else LOGERR; schedCancel(sigt); timer_delete(tmr); return (void*) -1; } #endif /* HAVE_TIMER_CREATE */ return NULL; }