File:  [ELWIX - Embedded LightWeight unIX -] / libaitsched / src / hooks.c
Revision 1.39.4.2: download - view: text, annotated - select for diffs - revision graph
Sat Feb 25 15:46:15 2023 UTC (16 months, 1 week ago) by misho
Branches: sched7_4
Diff to: branchpoint 1.39: preferred, unified
implement signal handling and dispatcher for signal on epoll() and select()
improve wait state of event fetch hook. when we have small polling period against timer task.
then we will rolling event state machine on smallest period between polling and timer

    1: /*************************************************************************
    2: * (C) 2011 AITNET ltd - Sofia/Bulgaria - <misho@aitbg.com>
    3: *  by Michael Pounov <misho@openbsd-bg.org>
    4: *
    5: * $Author: misho $
    6: * $Id: hooks.c,v 1.39.4.2 2023/02/25 15:46:15 misho Exp $
    7: *
    8: **************************************************************************
    9: The ELWIX and AITNET software is distributed under the following
   10: terms:
   11: 
   12: All of the documentation and software included in the ELWIX and AITNET
   13: Releases is copyrighted by ELWIX - Sofia/Bulgaria <info@elwix.org>
   14: 
   15: Copyright 2004 - 2023
   16: 	by Michael Pounov <misho@elwix.org>.  All rights reserved.
   17: 
   18: Redistribution and use in source and binary forms, with or without
   19: modification, are permitted provided that the following conditions
   20: are met:
   21: 1. Redistributions of source code must retain the above copyright
   22:    notice, this list of conditions and the following disclaimer.
   23: 2. Redistributions in binary form must reproduce the above copyright
   24:    notice, this list of conditions and the following disclaimer in the
   25:    documentation and/or other materials provided with the distribution.
   26: 3. All advertising materials mentioning features or use of this software
   27:    must display the following acknowledgement:
   28: This product includes software developed by Michael Pounov <misho@elwix.org>
   29: ELWIX - Embedded LightWeight unIX and its contributors.
   30: 4. Neither the name of AITNET nor the names of its contributors
   31:    may be used to endorse or promote products derived from this software
   32:    without specific prior written permission.
   33: 
   34: THIS SOFTWARE IS PROVIDED BY AITNET AND CONTRIBUTORS ``AS IS'' AND
   35: ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   36: IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   37: ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
   38: FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   39: DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   40: OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   41: HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   42: LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   43: OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   44: SUCH DAMAGE.
   45: */
   46: #include "global.h"
   47: #include "hooks.h"
   48: 
   49: 
   50: #ifdef HAVE_LIBPTHREAD
   51: static void *
   52: _sched_threadWrapper(sched_task_t *t)
   53: {
   54: 	void *ret = NULL;
   55: 	sched_root_task_t *r;
   56: 
   57: 	if (!t || !TASK_ROOT(t))
   58: 		pthread_exit(ret);
   59: 	else
   60: 		r = (sched_root_task_t*) TASK_ROOT(t);
   61: 
   62: 	pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, NULL);
   63: 	/*
   64: 	pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, NULL);
   65: 	*/
   66: 
   67: 	/* notify parent, thread is ready for execution */
   68: 	pthread_testcancel();
   69: 
   70: 	ret = schedCall(t);
   71: 	r->root_ret = ret;
   72: 
   73: 	if (TASK_VAL(t)) {
   74: 		transit_task2unuse(t, &r->root_thread);
   75: 		TASK_VAL(t) = 0;
   76: 	}
   77: 
   78: 	pthread_exit(ret);
   79: }
   80: #endif
   81: 
   82: #if defined(HAVE_LIBRT) && defined(HAVE_TIMER_CREATE) && \
   83: 	defined(HAVE_TIMER_SETTIME) && defined(HAVE_TIMER_DELETE)
   84: #if SUP_ENABLE == KQ_SUPPORT
   85: static void *
   86: _sched_rtcWrapper(sched_task_t *t)
   87: {
   88: 	sched_task_t *task;
   89: 	void *ret;
   90: 
   91: 	if (!t || !TASK_ROOT(t) || !TASK_DATA(t))
   92: 		return NULL;
   93: 	else {
   94: 		task = (sched_task_t*) TASK_DATA(t);
   95: 		timer_delete((timer_t) TASK_DATLEN(t));
   96: 	}
   97: 
   98: 	ret = schedCall(task);
   99: 
  100: 	transit_task2unuse(task, &(TASK_ROOT(task))->root_rtc);
  101: 	return ret;
  102: }
  103: #else
  104: static void
  105: _sched_rtcSigWrapper(int sig, siginfo_t *si, void *uc)
  106: {
  107: 	sched_task_t *task;
  108: 
  109: 	if (si && si->si_value.sival_ptr) {
  110: 		task = (sched_task_t*) si->si_value.sival_ptr;
  111: 		timer_delete((timer_t) TASK_FLAG(task));
  112: 
  113: 		TASK_RET(task) = (intptr_t) schedCall(task);
  114: 
  115: 		transit_task2unuse(task, &(TASK_ROOT(task))->root_rtc);
  116: 	}
  117: }
  118: #endif
  119: #endif
  120: 
  121: /*
  122:  * sched_hook_init() - Default INIT hook
  123:  *
  124:  * @root = root task
  125:  * @arg = unused
  126:  * return: <0 errors and 0 ok
  127:  */
  128: void *
  129: sched_hook_init(void *root, void *arg __unused)
  130: {
  131: 	sched_root_task_t *r = root;
  132: 
  133: 	if (!r)
  134: 		return (void*) -1;
  135: 
  136: #if SUP_ENABLE == KQ_SUPPORT
  137: 	r->root_kq = kqueue();
  138: 	if (r->root_kq == -1) {
  139: 		LOGERR;
  140: 		return (void*) -1;
  141: 	}
  142: #elif SUP_ENABLE == EP_SUPPORT
  143: 	r->root_kq = epoll_create(KQ_EVENTS);
  144: 	if (r->root_kq == -1) {
  145: 		LOGERR;
  146: 		return (void*) -1;
  147: 	}
  148: #else
  149: 	r->root_kq ^= r->root_kq;
  150: #endif
  151: 
  152: 	FD_ZERO(&r->root_fds[0]);
  153: 	FD_ZERO(&r->root_fds[1]);
  154: 	FD_ZERO(&r->root_fds[2]);
  155: 
  156: 	return NULL;
  157: }
  158: 
  159: /*
  160:  * sched_hook_fini() - Default FINI hook
  161:  *
  162:  * @root = root task
  163:  * @arg = unused
  164:  * return: <0 errors and 0 ok
  165:  */
  166: void *
  167: sched_hook_fini(void *root, void *arg __unused)
  168: {
  169: 	sched_root_task_t *r = root;
  170: 
  171: 	if (!r)
  172: 		return (void*) -1;
  173: 
  174: #if SUP_ENABLE == KQ_SUPPORT || SUP_ENABLE == EP_SUPPORT
  175: 	if (r->root_kq > 2) {
  176: 		close(r->root_kq);
  177: 		r->root_kq = 0;
  178: 	}
  179: #else
  180: 	r->root_kq ^= r->root_kq;
  181: #endif
  182: 
  183: 	FD_ZERO(&r->root_fds[2]);
  184: 	FD_ZERO(&r->root_fds[1]);
  185: 	FD_ZERO(&r->root_fds[0]);
  186: 
  187: 	return NULL;
  188: }
  189: 
  190: /*
  191:  * sched_hook_cancel() - Default CANCEL hook
  192:  *
  193:  * @task = current task
  194:  * @arg = unused
  195:  * return: <0 errors and 0 ok
  196:  */
  197: void *
  198: sched_hook_cancel(void *task, void *arg __unused)
  199: {
  200: 	sched_task_t *t = task, *tmp, *tt;
  201: 	sched_root_task_t *r = NULL;
  202: 	int flg = 0;
  203: #if SUP_ENABLE == KQ_SUPPORT
  204: 	struct kevent chg[1];
  205: 	struct timespec timeout = { 0, 0 };
  206: #elif SUP_ENABLE == EP_SUPPORT
  207: 	struct epoll_event ee = { .events = 0, .data.u64 = 0l };
  208: #else
  209: 	register int i;
  210: #endif
  211: #ifdef AIO_SUPPORT
  212: 	struct aiocb *acb;
  213: #ifdef EVFILT_LIO
  214: 	register int i = 0;
  215: 	struct aiocb **acbs;
  216: #endif	/* EVFILT_LIO */
  217: #endif	/* AIO_SUPPORT */
  218: 
  219: 	if (!t || !TASK_ROOT(t))
  220: 		return (void*) -1;
  221: 	else
  222: 		r = TASK_ROOT(t);
  223: 
  224: 	switch (TASK_TYPE(t)) {
  225: 		case taskREAD:
  226: 			/* check for multi subscribers */
  227: 			TAILQ_FOREACH_SAFE(tt, &r->root_read, task_node, tmp)
  228: 				if (TASK_FD(tt) == TASK_FD(t))
  229: 					flg++;
  230: #if SUP_ENABLE == KQ_SUPPORT
  231: #ifdef __NetBSD__
  232: 			EV_SET(&chg[0], TASK_FD(t), EVFILT_READ, flg < 2 ? EV_DELETE : 0, 
  233: 					0, 0, (intptr_t) TASK_FD(t));
  234: #else
  235: 			EV_SET(&chg[0], TASK_FD(t), EVFILT_READ, flg < 2 ? EV_DELETE : 0, 
  236: 					0, 0, (void*) TASK_FD(t));
  237: #endif
  238: #elif SUP_ENABLE == EP_SUPPORT
  239: 			ee.data.fd = TASK_FD(t);
  240: 			ee.events ^= ee.events;
  241: 			if (FD_ISSET(TASK_FD(t), &r->root_fds[1]))
  242: 				ee.events |= EPOLLOUT;
  243: 
  244: 			if (flg < 2) {
  245: 				FD_CLR(TASK_FD(t), &r->root_fds[0]);
  246: 				FD_CLR(TASK_FD(t), &r->root_fds[2]);
  247: 			} else {
  248: 				if (FD_ISSET(TASK_FD(t), &r->root_fds[0]))
  249: 					ee.events |= EPOLLIN;
  250: 				if (FD_ISSET(TASK_FD(t), &r->root_fds[2]))
  251: 					ee.events |= EPOLLPRI;
  252: 			}
  253: #else
  254: 			if (flg < 2) {
  255: 				FD_CLR(TASK_FD(t), &r->root_fds[0]);
  256: 				FD_CLR(TASK_FD(t), &r->root_fds[2]);
  257: 
  258: 				/* optimize select */
  259: 				for (i = r->root_kq - 1; i >= 0; i--)
  260: 					if (FD_ISSET(i, &r->root_fds[0]) || 
  261: 							FD_ISSET(i, &r->root_fds[1]) || 
  262: 							FD_ISSET(i, &r->root_fds[2]))
  263: 						break;
  264: 				r->root_kq = i + 1;
  265: 			}
  266: #endif
  267: 			break;
  268: 		case taskWRITE:
  269: 			/* check for multi subscribers */
  270: 			TAILQ_FOREACH_SAFE(tt, &r->root_write, task_node, tmp)
  271: 				if (TASK_FD(tt) == TASK_FD(t))
  272: 					flg++;
  273: #if SUP_ENABLE == KQ_SUPPORT
  274: #ifdef __NetBSD__
  275: 			EV_SET(&chg[0], TASK_FD(t), EVFILT_WRITE, flg < 2 ? EV_DELETE : 0, 
  276: 					0, 0, (intptr_t) TASK_FD(t));
  277: #else
  278: 			EV_SET(&chg[0], TASK_FD(t), EVFILT_WRITE, flg < 2 ? EV_DELETE : 0, 
  279: 					0, 0, (void*) TASK_FD(t));
  280: #endif
  281: #elif SUP_ENABLE == EP_SUPPORT
  282: 			ee.data.fd = TASK_FD(t);
  283: 			ee.events ^= ee.events;
  284: 			if (FD_ISSET(TASK_FD(t), &r->root_fds[0]))
  285: 				ee.events |= EPOLLIN;
  286: 			if (FD_ISSET(TASK_FD(t), &r->root_fds[2]))
  287: 				ee.events |= EPOLLPRI;
  288: 
  289: 			if (flg < 2)
  290: 				FD_CLR(TASK_FD(t), &r->root_fds[1]);
  291: 			else
  292: 				ee.events |= EPOLLOUT;
  293: #else
  294: 			if (flg < 2) {
  295: 				FD_CLR(TASK_FD(t), &r->root_fds[1]);
  296: 
  297: 				/* optimize select */
  298: 				for (i = r->root_kq - 1; i >= 0; i--)
  299: 					if (FD_ISSET(i, &r->root_fds[0]) || 
  300: 							FD_ISSET(i, &r->root_fds[1]) || 
  301: 							FD_ISSET(i, &r->root_fds[2]))
  302: 						break;
  303: 				r->root_kq = i + 1;
  304: 			}
  305: #endif
  306: 			break;
  307: 		case taskALARM:
  308: #if SUP_ENABLE == KQ_SUPPORT
  309: 			/* check for multi subscribers */
  310: 			TAILQ_FOREACH_SAFE(tt, &r->root_alarm, task_node, tmp)
  311: 				if (TASK_DATA(tt) == TASK_DATA(t))
  312: 					flg++;
  313: #ifdef __NetBSD__
  314: 			EV_SET(&chg[0], (uintptr_t) TASK_DATA(t), EVFILT_TIMER, flg < 2 ? EV_DELETE : 0, 
  315: 					0, 0, (intptr_t) TASK_DATA(t));
  316: #else
  317: 			EV_SET(&chg[0], (uintptr_t) TASK_DATA(t), EVFILT_TIMER, flg < 2 ? EV_DELETE : 0, 
  318: 					0, 0, (void*) TASK_DATA(t));
  319: #endif
  320: #endif
  321: 			break;
  322: 		case taskNODE:
  323: #if SUP_ENABLE == KQ_SUPPORT
  324: 			/* check for multi subscribers */
  325: 			TAILQ_FOREACH_SAFE(tt, &r->root_node, task_node, tmp)
  326: 				if (TASK_FD(tt) == TASK_FD(t))
  327: 					flg++;
  328: #ifdef __NetBSD__
  329: 			EV_SET(&chg[0], TASK_FD(t), EVFILT_VNODE, flg < 2 ? EV_DELETE : 0, 
  330: 					0, 0, (intptr_t) TASK_FD(t));
  331: #else
  332: 			EV_SET(&chg[0], TASK_FD(t), EVFILT_VNODE, flg < 2 ? EV_DELETE : 0, 
  333: 					0, 0, (void*) TASK_FD(t));
  334: #endif
  335: #endif
  336: 			break;
  337: 		case taskPROC:
  338: #if SUP_ENABLE == KQ_SUPPORT
  339: 			/* check for multi subscribers */
  340: 			TAILQ_FOREACH_SAFE(tt, &r->root_proc, task_node, tmp)
  341: 				if (TASK_VAL(tt) == TASK_VAL(t))
  342: 					flg++;
  343: #ifdef __NetBSD__
  344: 			EV_SET(&chg[0], TASK_VAL(t), EVFILT_PROC, flg < 2 ? EV_DELETE : 0, 
  345: 					0, 0, (intptr_t) TASK_VAL(t));
  346: #else
  347: 			EV_SET(&chg[0], TASK_VAL(t), EVFILT_PROC, flg < 2 ? EV_DELETE : 0, 
  348: 					0, 0, (void*) TASK_VAL(t));
  349: #endif
  350: #endif
  351: 			break;
  352: 		case taskSIGNAL:
  353: 			/* check for multi subscribers */
  354: 			TAILQ_FOREACH_SAFE(tt, &r->root_signal, task_node, tmp)
  355: 				if (TASK_VAL(tt) == TASK_VAL(t))
  356: 					flg++;
  357: #if SUP_ENABLE == KQ_SUPPORT
  358: #ifdef __NetBSD__
  359: 			EV_SET(&chg[0], TASK_VAL(t), EVFILT_SIGNAL, flg < 2 ? EV_DELETE : 0, 
  360: 					0, 0, (intptr_t) TASK_VAL(t));
  361: #else
  362: 			EV_SET(&chg[0], TASK_VAL(t), EVFILT_SIGNAL, flg < 2 ? EV_DELETE : 0, 
  363: 					0, 0, (void*) TASK_VAL(t));
  364: #endif
  365: 			/* restore signal */
  366: 			if (flg < 2)
  367: 				signal(TASK_VAL(t), SIG_DFL);
  368: #else
  369: 			if (flg < 2) {
  370: 				pthread_mutex_lock(&TASK_ROOT(t)->root_sigmtx);
  371: 				sigdelset(&TASK_ROOT(t)->root_sigset, TASK_VAL(t));
  372: 				pthread_mutex_unlock(&TASK_ROOT(t)->root_sigmtx);
  373: 			}
  374: #endif
  375: 			break;
  376: #ifdef AIO_SUPPORT
  377: 		case taskAIO:
  378: #if SUP_ENABLE == KQ_SUPPORT
  379: 			/* check for multi subscribers */
  380: 			TAILQ_FOREACH_SAFE(tt, &r->root_aio, task_node, tmp)
  381: 				if (TASK_VAL(tt) == TASK_VAL(t))
  382: 					flg++;
  383: #ifdef __NetBSD__
  384: 			EV_SET(&chg[0], TASK_VAL(t), EVFILT_AIO, flg < 2 ? EV_DELETE : 0, 
  385: 					0, 0, (intptr_t) TASK_VAL(t));
  386: #else
  387: 			EV_SET(&chg[0], TASK_VAL(t), EVFILT_AIO, flg < 2 ? EV_DELETE : 0, 
  388: 					0, 0, (void*) TASK_VAL(t));
  389: #endif
  390: 			acb = (struct aiocb*) TASK_VAL(t);
  391: 			if (acb) {
  392: 				if (aio_cancel(acb->aio_fildes, acb) == AIO_CANCELED)
  393: 					aio_return(acb);
  394: 				e_free(acb);
  395: 				TASK_VAL(t) = 0;
  396: 			}
  397: #endif
  398: 			break;
  399: #ifdef EVFILT_LIO
  400: 		case taskLIO:
  401: #if SUP_ENABLE == KQ_SUPPORT
  402: 			/* check for multi subscribers */
  403: 			TAILQ_FOREACH_SAFE(tt, &r->root_lio, task_node, tmp)
  404: 				if (TASK_VAL(tt) == TASK_VAL(t))
  405: 					flg++;
  406: #ifdef __NetBSD__
  407: 			EV_SET(&chg[0], TASK_VAL(t), EVFILT_LIO, flg < 2 ? EV_DELETE : 0, 
  408: 					0, 0, (intptr_t) TASK_VAL(t));
  409: #else
  410: 			EV_SET(&chg[0], TASK_VAL(t), EVFILT_LIO, flg < 2 ? EV_DELETE : 0, 
  411: 					0, 0, (void*) TASK_VAL(t));
  412: #endif
  413: 			acbs = (struct aiocb**) TASK_VAL(t);
  414: 			if (acbs) {
  415: 				for (i = 0; i < TASK_DATLEN(t); i++) {
  416: 					if (aio_cancel(acbs[i]->aio_fildes, acbs[i]) == AIO_CANCELED)
  417: 						aio_return(acbs[i]);
  418: 					e_free(acbs[i]);
  419: 				}
  420: 				e_free(acbs);
  421: 				TASK_VAL(t) = 0;
  422: 			}
  423: #endif
  424: 			break;
  425: #endif	/* EVFILT_LIO */
  426: #endif	/* AIO_SUPPORT */
  427: #ifdef EVFILT_USER
  428: 		case taskUSER:
  429: #if SUP_ENABLE == KQ_SUPPORT
  430: 			/* check for multi subscribers */
  431: 			TAILQ_FOREACH_SAFE(tt, &r->root_user, task_node, tmp)
  432: 				if (TASK_VAL(tt) == TASK_VAL(t))
  433: 					flg++;
  434: #ifdef __NetBSD__
  435: 			EV_SET(&chg[0], TASK_VAL(t), EVFILT_USER, flg < 2 ? EV_DELETE : 0, 
  436: 					0, 0, (intptr_t) TASK_VAL(t));
  437: #else
  438: 			EV_SET(&chg[0], TASK_VAL(t), EVFILT_USER, flg < 2 ? EV_DELETE : 0, 
  439: 					0, 0, (void*) TASK_VAL(t));
  440: #endif
  441: #endif
  442: 			break;
  443: #endif	/* EVFILT_USER */
  444: 		case taskTHREAD:
  445: #ifdef HAVE_LIBPTHREAD
  446: 			if (TASK_VAL(t)) {
  447: 				pthread_cancel((pthread_t) TASK_VAL(t));
  448: 				pthread_join((pthread_t) TASK_VAL(t), NULL);
  449: 				if (TASK_VAL(t)) {
  450: 					transit_task2unuse(t, &(TASK_ROOT(t))->root_thread);
  451: 					TASK_VAL(t) = 0;
  452: 				}
  453: 			}
  454: #endif
  455: 			return NULL;
  456: #if defined(HAVE_LIBRT) && defined(HAVE_TIMER_CREATE) && \
  457: 	defined(HAVE_TIMER_SETTIME) && defined(HAVE_TIMER_DELETE)
  458: 		case taskRTC:
  459: 			timer_delete((timer_t) TASK_FLAG(t));
  460: #if SUP_ENABLE == KQ_SUPPORT
  461: 			schedCancel((sched_task_t*) TASK_RET(t));
  462: #else
  463: 			/* check for multi subscribers */
  464: 			TAILQ_FOREACH_SAFE(tt, &r->root_rtc, task_node, tmp)
  465: 				if (TASK_DATA(tt) == TASK_DATA(t))
  466: 					flg++;
  467: 
  468: 			/* restore signal */
  469: 			if (flg < 2)
  470: 				signal((intptr_t) TASK_DATA(t) + SIGRTMIN, SIG_DFL);
  471: #endif
  472: 			return NULL;
  473: #endif	/* HAVE_TIMER_CREATE */
  474: 		default:
  475: 			return NULL;
  476: 	}
  477: 
  478: #if SUP_ENABLE == KQ_SUPPORT
  479: 	kevent(r->root_kq, chg, 1, NULL, 0, &timeout);
  480: #elif SUP_ENABLE == EP_SUPPORT
  481: 	if (TASK_TYPE(t) == taskREAD || TASK_TYPE(t) == taskWRITE) {
  482: 		epoll_ctl(r->root_kq, ee.events ? EPOLL_CTL_MOD : EPOLL_CTL_DEL, ee.data.fd, &ee);
  483: 	}
  484: #endif
  485: 	return NULL;
  486: }
  487: 
  488: #ifdef HAVE_LIBPTHREAD
  489: /*
  490:  * sched_hook_thread() - Default THREAD hook
  491:  *
  492:  * @task = current task
  493:  * @arg = pthread attributes
  494:  * return: <0 errors and 0 ok
  495:  */
  496: void *
  497: sched_hook_thread(void *task, void *arg)
  498: {
  499: 	sched_task_t *t = task;
  500: 	pthread_t tid;
  501: 	sigset_t s, o;
  502: 
  503: 	if (!t || !TASK_ROOT(t))
  504: 		return (void*) -1;
  505: 
  506: 	sigfillset(&s);
  507: 	pthread_sigmask(SIG_BLOCK, &s, &o);
  508: 	errno = pthread_create(&tid, (pthread_attr_t*) arg, 
  509: 			(void *(*)(void*)) _sched_threadWrapper, t);
  510: 	pthread_sigmask(SIG_SETMASK, &o, NULL);
  511: 
  512: 	if (errno) {
  513: 		LOGERR;
  514: 		return (void*) -1;
  515: 	} else
  516: 		TASK_VAL(t) = (u_long) tid;
  517: 
  518: 	if (!TASK_ISLOCKED(t))
  519: 		TASK_LOCK(t);
  520: 
  521: 	return NULL;
  522: }
  523: #endif
  524: 
  525: /*
  526:  * sched_hook_read() - Default READ hook
  527:  *
  528:  * @task = current task
  529:  * @arg = unused
  530:  * return: <0 errors and 0 ok
  531:  */
  532: void *
  533: sched_hook_read(void *task, void *arg)
  534: {
  535: 	sched_task_t *t = task;
  536: 	sched_root_task_t *r = NULL;
  537: 	uintptr_t mask = (uintptr_t) arg;
  538: #if SUP_ENABLE == KQ_SUPPORT
  539: 	struct kevent chg[1];
  540: 	struct timespec timeout = { 0, 0 };
  541: #elif SUP_ENABLE == EP_SUPPORT
  542: 	struct epoll_event ee = { 0 };
  543: 	int flg = 0;
  544: #endif
  545: 
  546: 	if (!t || !TASK_ROOT(t))
  547: 		return (void*) -1;
  548: 	else
  549: 		r = TASK_ROOT(t);
  550: 
  551: #if SUP_ENABLE == KQ_SUPPORT
  552: #ifdef __NetBSD__
  553: 	EV_SET(&chg[0], TASK_FD(t), EVFILT_READ, EV_ADD | EV_CLEAR | mask, 
  554: 			0, 0, (intptr_t) TASK_FD(t));
  555: #else
  556: 	EV_SET(&chg[0], TASK_FD(t), EVFILT_READ, EV_ADD | EV_CLEAR | mask, 
  557: 			0, 0, (void*) TASK_FD(t));
  558: #endif
  559: 	if (kevent(r->root_kq, chg, 1, NULL, 0, &timeout) == -1) {
  560: 		if (r->root_hooks.hook_exec.exception)
  561: 			r->root_hooks.hook_exec.exception(r, NULL);
  562: 		else
  563: 			LOGERR;
  564: 		return (void*) -1;
  565: 	}
  566: #elif SUP_ENABLE == EP_SUPPORT
  567: 	if (!mask)
  568: 		mask = EPOLLIN | EPOLLPRI;
  569: 	ee.data.fd = TASK_FD(t);
  570: 	ee.events = mask;
  571: 	if (FD_ISSET(TASK_FD(t), &r->root_fds[2])) {
  572: 		flg |= 4;
  573: 		ee.events |= EPOLLPRI;
  574: 	}
  575: 	if (FD_ISSET(TASK_FD(t), &r->root_fds[0])) {
  576: 		flg |= 1;
  577: 		ee.events |= EPOLLIN;
  578: 	}
  579: 	if (FD_ISSET(TASK_FD(t), &r->root_fds[1])) {
  580: 		flg |= 2;
  581: 		ee.events |= EPOLLOUT;
  582: 	}
  583: 
  584: 	if (epoll_ctl(r->root_kq, flg ? EPOLL_CTL_MOD : EPOLL_CTL_ADD, TASK_FD(t), &ee) == -1) {
  585: 		if (r->root_hooks.hook_exec.exception)
  586: 			r->root_hooks.hook_exec.exception(r, NULL);
  587: 		else
  588: 			LOGERR;
  589: 		return (void*) -1;
  590: 	} else {
  591: 		if (mask & EPOLLIN)
  592: 			FD_SET(TASK_FD(t), &r->root_fds[0]);
  593: 		if (mask & EPOLLPRI)
  594: 			FD_SET(TASK_FD(t), &r->root_fds[2]);
  595: 	}
  596: #else
  597: 	if (!mask) {
  598: 		FD_SET(TASK_FD(t), &r->root_fds[0]);
  599: 		FD_SET(TASK_FD(t), &r->root_fds[2]);
  600: 	} else {
  601: 		if (mask & 1)
  602: 			FD_SET(TASK_FD(t), &r->root_fds[0]);
  603: 		if (mask & 2)
  604: 			FD_SET(TASK_FD(t), &r->root_fds[2]);
  605: 	}
  606: 
  607: 	if (TASK_FD(t) >= r->root_kq)
  608: 		r->root_kq = TASK_FD(t) + 1;
  609: #endif
  610: 
  611: 	return NULL;
  612: }
  613: 
  614: /*
  615:  * sched_hook_write() - Default WRITE hook
  616:  *
  617:  * @task = current task
  618:  * @arg = unused
  619:  * return: <0 errors and 0 ok
  620:  */
  621: void *
  622: sched_hook_write(void *task, void *arg)
  623: {
  624: 	sched_task_t *t = task;
  625: 	sched_root_task_t *r = NULL;
  626: 	uintptr_t mask = (uintptr_t) arg;
  627: #if SUP_ENABLE == KQ_SUPPORT
  628: 	struct kevent chg[1];
  629: 	struct timespec timeout = { 0, 0 };
  630: #elif SUP_ENABLE == EP_SUPPORT
  631: 	struct epoll_event ee = { 0 };
  632: 	int flg = 0;
  633: #endif
  634: 
  635: 	if (!t || !TASK_ROOT(t))
  636: 		return (void*) -1;
  637: 	else
  638: 		r = TASK_ROOT(t);
  639: 
  640: #if SUP_ENABLE == KQ_SUPPORT
  641: #ifdef __NetBSD__
  642: 	EV_SET(&chg[0], TASK_FD(t), EVFILT_WRITE, EV_ADD | EV_CLEAR | mask, 
  643: 			0, 0, (intptr_t) TASK_FD(t));
  644: #else
  645: 	EV_SET(&chg[0], TASK_FD(t), EVFILT_WRITE, EV_ADD | EV_CLEAR | mask, 
  646: 			0, 0, (void*) TASK_FD(t));
  647: #endif
  648: 	if (kevent(r->root_kq, chg, 1, NULL, 0, &timeout) == -1) {
  649: 		if (r->root_hooks.hook_exec.exception)
  650: 			r->root_hooks.hook_exec.exception(r, NULL);
  651: 		else
  652: 			LOGERR;
  653: 		return (void*) -1;
  654: 	}
  655: #elif SUP_ENABLE == EP_SUPPORT
  656: 	if (!mask)
  657: 		mask = EPOLLOUT;
  658: 	ee.data.fd = TASK_FD(t);
  659: 	ee.events = mask;
  660: 
  661: 	if (FD_ISSET(TASK_FD(t), &r->root_fds[2])) {
  662: 		flg |= 4;
  663: 		ee.events |= EPOLLPRI;
  664: 	}
  665: 	if (FD_ISSET(TASK_FD(t), &r->root_fds[0])) {
  666: 		flg |= 1;
  667: 		ee.events |= EPOLLIN;
  668: 	}
  669: 	if (FD_ISSET(TASK_FD(t), &r->root_fds[1])) {
  670: 		flg |= 2;
  671: 		ee.events |= EPOLLOUT;
  672: 	}
  673: 
  674: 	if (epoll_ctl(r->root_kq, flg ? EPOLL_CTL_MOD : EPOLL_CTL_ADD, TASK_FD(t), &ee) == -1) {
  675: 		if (r->root_hooks.hook_exec.exception)
  676: 			r->root_hooks.hook_exec.exception(r, NULL);
  677: 		else
  678: 			LOGERR;
  679: 		return (void*) -1;
  680: 	} else
  681: 		if (mask & EPOLLOUT)
  682: 			FD_SET(TASK_FD(t), &r->root_fds[1]);
  683: #else
  684: 	if (!mask)
  685: 		FD_SET(TASK_FD(t), &r->root_fds[1]);
  686: 	else
  687: 		if (mask & 1)
  688: 			FD_SET(TASK_FD(t), &r->root_fds[1]);
  689: 
  690: 	if (TASK_FD(t) >= r->root_kq)
  691: 		r->root_kq = TASK_FD(t) + 1;
  692: #endif
  693: 
  694: 	return NULL;
  695: }
  696: 
  697: /*
  698:  * sched_hook_alarm() - Default ALARM hook
  699:  *
  700:  * @task = current task
  701:  * @arg = unused
  702:  * return: <0 errors and 0 ok
  703:  */
  704: void *
  705: sched_hook_alarm(void *task, void *arg __unused)
  706: {
  707: #if SUP_ENABLE == KQ_SUPPORT
  708: 	sched_task_t *t = task;
  709: 	struct kevent chg[1];
  710: 	struct timespec timeout = { 0, 0 };
  711: 
  712: 	if (!t || !TASK_ROOT(t))
  713: 		return (void*) -1;
  714: 
  715: #ifdef __NetBSD__
  716: 	EV_SET(&chg[0], (uintptr_t) TASK_DATA(t), EVFILT_TIMER, EV_ADD | EV_CLEAR, 0, 
  717: 			t->task_val.ts.tv_sec * 1000 + t->task_val.ts.tv_nsec / 1000000, 
  718: 			(intptr_t) TASK_DATA(t));
  719: #else
  720: 	EV_SET(&chg[0], (uintptr_t) TASK_DATA(t), EVFILT_TIMER, EV_ADD | EV_CLEAR, 0, 
  721: 			t->task_val.ts.tv_sec * 1000 + t->task_val.ts.tv_nsec / 1000000, 
  722: 			(void*) TASK_DATA(t));
  723: #endif
  724: 	if (kevent(TASK_ROOT(t)->root_kq, chg, 1, NULL, 0, &timeout) == -1) {
  725: 		if (TASK_ROOT(t)->root_hooks.hook_exec.exception)
  726: 			TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL);
  727: 		else
  728: 			LOGERR;
  729: 		return (void*) -1;
  730: 	}
  731: 
  732: #endif
  733: 	return NULL;
  734: }
  735: 
  736: /*
  737:  * sched_hook_node() - Default NODE hook
  738:  *
  739:  * @task = current task
  740:  * @arg = if arg == 42 then waiting for all events
  741:  * return: <0 errors and 0 ok
  742:  */
  743: void *
  744: sched_hook_node(void *task, void *arg)
  745: {
  746: #if SUP_ENABLE == KQ_SUPPORT
  747: 	sched_task_t *t = task;
  748: 	struct kevent chg[1];
  749: 	struct timespec timeout = { 0, 0 };
  750: 	u_int addflags = (u_int) (uintptr_t) arg;
  751: 
  752: 	if (!t || !TASK_ROOT(t))
  753: 		return (void*) -1;
  754: 
  755: #ifdef __NetBSD__
  756: 	EV_SET(&chg[0], TASK_FD(t), EVFILT_VNODE, EV_ADD | EV_CLEAR, 
  757: 			NOTE_DELETE | NOTE_WRITE | NOTE_EXTEND | NOTE_ATTRIB | 
  758: 			NOTE_LINK | NOTE_RENAME | NOTE_REVOKE | addflags, 0, (intptr_t) TASK_FD(t));
  759: #else
  760: 	EV_SET(&chg[0], TASK_FD(t), EVFILT_VNODE, EV_ADD | EV_CLEAR, 
  761: 			NOTE_DELETE | NOTE_WRITE | NOTE_EXTEND | NOTE_ATTRIB | 
  762: 			NOTE_LINK | NOTE_RENAME | NOTE_REVOKE | addflags, 0, (void*) TASK_FD(t));
  763: #endif
  764: 	if (kevent(TASK_ROOT(t)->root_kq, chg, 1, NULL, 0, &timeout) == -1) {
  765: 		if (TASK_ROOT(t)->root_hooks.hook_exec.exception)
  766: 			TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL);
  767: 		else
  768: 			LOGERR;
  769: 		return (void*) -1;
  770: 	}
  771: 
  772: #endif
  773: 	return NULL;
  774: }
  775: 
  776: /*
  777:  * sched_hook_proc() - Default PROC hook
  778:  *
  779:  * @task = current task
  780:  * @arg = unused
  781:  * return: <0 errors and 0 ok
  782:  */
  783: void *
  784: sched_hook_proc(void *task, void *arg __unused)
  785: {
  786: #if SUP_ENABLE == KQ_SUPPORT
  787: 	sched_task_t *t = task;
  788: 	struct kevent chg[1];
  789: 	struct timespec timeout = { 0, 0 };
  790: 
  791: 	if (!t || !TASK_ROOT(t))
  792: 		return (void*) -1;
  793: 
  794: #ifdef __NetBSD__
  795: 	EV_SET(&chg[0], TASK_VAL(t), EVFILT_PROC, EV_ADD | EV_CLEAR, 
  796: 			NOTE_EXIT | NOTE_FORK | NOTE_EXEC | NOTE_TRACK, 0, (intptr_t) TASK_VAL(t));
  797: #else
  798: 	EV_SET(&chg[0], TASK_VAL(t), EVFILT_PROC, EV_ADD | EV_CLEAR, 
  799: 			NOTE_EXIT | NOTE_FORK | NOTE_EXEC | NOTE_TRACK, 0, (void*) TASK_VAL(t));
  800: #endif
  801: 	if (kevent(TASK_ROOT(t)->root_kq, chg, 1, NULL, 0, &timeout) == -1) {
  802: 		if (TASK_ROOT(t)->root_hooks.hook_exec.exception)
  803: 			TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL);
  804: 		else
  805: 			LOGERR;
  806: 		return (void*) -1;
  807: 	}
  808: 
  809: #endif
  810: 	return NULL;
  811: }
  812: 
  813: /*
  814:  * sched_hook_signal() - Default SIGNAL hook
  815:  *
  816:  * @task = current task
  817:  * @arg = unused
  818:  * return: <0 errors and 0 ok
  819:  */
  820: void *
  821: sched_hook_signal(void *task, void *arg __unused)
  822: {
  823: 	sched_task_t *t = task;
  824: 
  825: #if SUP_ENABLE == KQ_SUPPORT
  826: 	struct kevent chg[1];
  827: 	struct timespec timeout = { 0, 0 };
  828: 
  829: 	if (!t || !TASK_ROOT(t))
  830: 		return (void*) -1;
  831: 
  832: 	/* ignore signal */
  833: 	signal(TASK_VAL(t), SIG_IGN);
  834: 
  835: #ifdef __NetBSD__
  836: 	EV_SET(&chg[0], TASK_VAL(t), EVFILT_SIGNAL, EV_ADD | EV_CLEAR, 0, 0, (intptr_t) TASK_VAL(t));
  837: #else
  838: 	EV_SET(&chg[0], TASK_VAL(t), EVFILT_SIGNAL, EV_ADD | EV_CLEAR, 0, 0, (void*) TASK_VAL(t));
  839: #endif
  840: 	if (kevent(TASK_ROOT(t)->root_kq, chg, 1, NULL, 0, &timeout) == -1) {
  841: 		if (TASK_ROOT(t)->root_hooks.hook_exec.exception)
  842: 			TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL);
  843: 		else
  844: 			LOGERR;
  845: 		return (void*) -1;
  846: 	}
  847: #else
  848: 	pthread_mutex_lock(&TASK_ROOT(t)->root_sigmtx);
  849: 	sigaddset(&TASK_ROOT(t)->root_sigset, TASK_VAL(t));
  850: 	pthread_mutex_unlock(&TASK_ROOT(t)->root_sigmtx);
  851: #endif
  852: 
  853: 	return NULL;
  854: }
  855: 
  856: /*
  857:  * sched_hook_user() - Default USER hook
  858:  *
  859:  * @task = current task
  860:  * @arg = unused
  861:  * return: <0 errors and 0 ok
  862:  */
  863: #ifdef EVFILT_USER
  864: void *
  865: sched_hook_user(void *task, void *arg __unused)
  866: {
  867: #if SUP_ENABLE == KQ_SUPPORT
  868: 	sched_task_t *t = task;
  869: 	struct kevent chg[1];
  870: 	struct timespec timeout = { 0, 0 };
  871: 
  872: 	if (!t || !TASK_ROOT(t))
  873: 		return (void*) -1;
  874: 
  875: #ifdef __NetBSD__
  876: 	EV_SET(&chg[0], TASK_VAL(t), EVFILT_USER, EV_ADD | EV_CLEAR, TASK_DATLEN(t), 
  877: 			0, (intptr_t) TASK_VAL(t));
  878: #else
  879: 	EV_SET(&chg[0], TASK_VAL(t), EVFILT_USER, EV_ADD | EV_CLEAR, TASK_DATLEN(t), 
  880: 			0, (void*) TASK_VAL(t));
  881: #endif
  882: 	if (kevent(TASK_ROOT(t)->root_kq, chg, 1, NULL, 0, &timeout) == -1) {
  883: 		if (TASK_ROOT(t)->root_hooks.hook_exec.exception)
  884: 			TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL);
  885: 		else
  886: 			LOGERR;
  887: 		return (void*) -1;
  888: 	}
  889: 
  890: #endif
  891: 	return NULL;
  892: }
  893: #endif
  894: 
  895: #if SUP_ENABLE == KQ_SUPPORT
  896: static inline void 
  897: fetch_hook_kevent_proceed(int en, struct kevent *res, sched_root_task_t *r)
  898: {
  899: 	struct kevent evt[1];
  900: 	register int i, flg;
  901: 	sched_task_t *task, *tmp;
  902: 	struct timespec now = { 0, 0 };
  903: #ifdef AIO_SUPPORT
  904: 	int len, fd;
  905: 	struct aiocb *acb;
  906: #ifdef EVFILT_LIO
  907: 	int l;
  908: 	off_t off;
  909: 	struct aiocb **acbs;
  910: 	struct iovec *iv;
  911: #endif	/* EVFILT_LIO */
  912: #endif	/* AIO_SUPPORT */
  913: 
  914: 	for (i = 0; i < en; i++) {
  915: 		memcpy(evt, &res[i], sizeof evt);
  916: 		evt->flags = EV_DELETE;
  917: 		/* Put read/write task to ready queue */
  918: 		flg = 0;
  919: 		switch (res[i].filter) {
  920: 			case EVFILT_READ:
  921: 				TAILQ_FOREACH_SAFE(task, &r->root_read, task_node, tmp) {
  922: 					if (TASK_FD(task) == ((intptr_t) res[i].udata)) {
  923: 						if (!flg) {
  924: 							TASK_RET(task) = res[i].data;
  925: 							TASK_FLAG(task) = (u_long) res[i].fflags;
  926: 
  927: 							/* remove read handle */
  928: 							remove_task_from(task, &r->root_read);
  929: 
  930: 							if (r->root_hooks.hook_exec.exception && res[i].flags & EV_EOF) {
  931: 								if (r->root_hooks.hook_exec.exception(r, (void*) EV_EOF)) {
  932: 									task->task_type = taskUNUSE;
  933: 									insert_task_to(task, &r->root_unuse);
  934: 								} else {
  935: 									task->task_type = taskREADY;
  936: 									insert_task_to(task, &r->root_ready);
  937: 								}
  938: 							} else {
  939: 								task->task_type = taskREADY;
  940: 								insert_task_to(task, &r->root_ready);
  941: 							}
  942: 						}
  943: 						flg++;
  944: 					}
  945: 				}
  946: 				break;
  947: 			case EVFILT_WRITE:
  948: 				TAILQ_FOREACH_SAFE(task, &r->root_write, task_node, tmp) {
  949: 					if (TASK_FD(task) == ((intptr_t) res[i].udata)) {
  950: 						if (!flg) {
  951: 							TASK_RET(task) = res[i].data;
  952: 							TASK_FLAG(task) = (u_long) res[i].fflags;
  953: 
  954: 							/* remove write handle */
  955: 							remove_task_from(task, &r->root_write);
  956: 
  957: 							if (r->root_hooks.hook_exec.exception && res[i].flags & EV_EOF) {
  958: 								if (r->root_hooks.hook_exec.exception(r, (void*) EV_EOF)) {
  959: 									task->task_type = taskUNUSE;
  960: 									insert_task_to(task, &r->root_unuse);
  961: 								} else {
  962: 									task->task_type = taskREADY;
  963: 									insert_task_to(task, &r->root_ready);
  964: 								}
  965: 							} else {
  966: 								task->task_type = taskREADY;
  967: 								insert_task_to(task, &r->root_ready);
  968: 							}
  969: 						}
  970: 						flg++;
  971: 					}
  972: 				}
  973: 				break;
  974: 			case EVFILT_TIMER:
  975: 				TAILQ_FOREACH_SAFE(task, &r->root_alarm, task_node, tmp) {
  976: 					if ((uintptr_t) TASK_DATA(task) == ((uintptr_t) res[i].udata)) {
  977: 						if (!flg) {
  978: 							TASK_RET(task) = res[i].data;
  979: 							TASK_FLAG(task) = (u_long) res[i].fflags;
  980: 
  981: 							/* remove alarm handle */
  982: 							transit_task2ready(task, &r->root_alarm);
  983: 						}
  984: 						flg++;
  985: 					}
  986: 				}
  987: 				break;
  988: 			case EVFILT_VNODE:
  989: 				TAILQ_FOREACH_SAFE(task, &r->root_node, task_node, tmp) {
  990: 					if (TASK_FD(task) == ((intptr_t) res[i].udata)) {
  991: 						if (!flg) {
  992: 							TASK_RET(task) = res[i].data;
  993: 							TASK_FLAG(task) = (u_long) res[i].fflags;
  994: 
  995: 							/* remove node handle */
  996: 							transit_task2ready(task, &r->root_node);
  997: 						}
  998: 						flg++;
  999: 					}
 1000: 				}
 1001: 				break;
 1002: 			case EVFILT_PROC:
 1003: 				TAILQ_FOREACH_SAFE(task, &r->root_proc, task_node, tmp) {
 1004: 					if (TASK_VAL(task) == ((uintptr_t) res[i].udata)) {
 1005: 						if (!flg) {
 1006: 							TASK_RET(task) = res[i].data;
 1007: 							TASK_FLAG(task) = (u_long) res[i].fflags;
 1008: 
 1009: 							/* remove proc handle */
 1010: 							transit_task2ready(task, &r->root_proc);
 1011: 						}
 1012: 						flg++;
 1013: 					}
 1014: 				}
 1015: 				break;
 1016: 			case EVFILT_SIGNAL:
 1017: 				TAILQ_FOREACH_SAFE(task, &r->root_signal, task_node, tmp) {
 1018: 					if (TASK_VAL(task) == ((uintptr_t) res[i].udata)) {
 1019: 						if (!flg) {
 1020: 							TASK_RET(task) = res[i].data;
 1021: 							TASK_FLAG(task) = (u_long) res[i].fflags;
 1022: 
 1023: 							/* remove signal handle */
 1024: 							transit_task2ready(task, &r->root_signal);
 1025: 						}
 1026: 						flg++;
 1027: 					}
 1028: 				}
 1029: 				break;
 1030: #ifdef AIO_SUPPORT
 1031: 			case EVFILT_AIO:
 1032: 				TAILQ_FOREACH_SAFE(task, &r->root_aio, task_node, tmp) {
 1033: 					acb = (struct aiocb*) TASK_VAL(task);
 1034: 					if (acb == ((struct aiocb*) res[i].udata)) {
 1035: 						if (!flg) {
 1036: 							TASK_RET(task) = res[i].data;
 1037: 							TASK_FLAG(task) = (u_long) res[i].fflags;
 1038: 
 1039: 							/* remove user handle */
 1040: 							transit_task2ready(task, &r->root_aio);
 1041: 
 1042: 							fd = acb->aio_fildes;
 1043: 							if ((len = aio_return(acb)) != -1) {
 1044: 								if (lseek(fd, acb->aio_offset + len, SEEK_CUR) == -1)
 1045: 									LOGERR;
 1046: 							} else
 1047: 								LOGERR;
 1048: 							e_free(acb);
 1049: 							TASK_DATLEN(task) = (u_long) len;
 1050: 							TASK_FD(task) = fd;
 1051: 						}
 1052: 						flg++;
 1053: 					}
 1054: 				}
 1055: 				break;
 1056: #ifdef EVFILT_LIO
 1057: 			case EVFILT_LIO:
 1058: 				TAILQ_FOREACH_SAFE(task, &r->root_lio, task_node, tmp) {
 1059: 					acbs = (struct aiocb**) TASK_VAL(task);
 1060: 					if (acbs == ((struct aiocb**) res[i].udata)) {
 1061: 						if (!flg) {
 1062: 							TASK_RET(task) = res[i].data;
 1063: 							TASK_FLAG(task) = (u_long) res[i].fflags;
 1064: 
 1065: 							/* remove user handle */
 1066: 							transit_task2ready(task, &r->root_lio);
 1067: 
 1068: 							iv = (struct iovec*) TASK_DATA(task);
 1069: 							fd = acbs[0]->aio_fildes;
 1070: 							off = acbs[0]->aio_offset;
 1071: 							for (len = 0; i < TASK_DATLEN(task); len += l, i++) {
 1072: 								if ((iv[i].iov_len = aio_return(acbs[i])) == -1)
 1073: 									l = 0;
 1074: 								else
 1075: 									l = iv[i].iov_len;
 1076: 								e_free(acbs[i]);
 1077: 							}
 1078: 							e_free(acbs);
 1079: 							TASK_DATLEN(task) = (u_long) len;
 1080: 							TASK_FD(task) = fd;
 1081: 
 1082: 							if (lseek(fd, off + len, SEEK_CUR) == -1)
 1083: 								LOGERR;
 1084: 						}
 1085: 						flg++;
 1086: 					}
 1087: 				}
 1088: 				break;
 1089: #endif	/* EVFILT_LIO */
 1090: #endif	/* AIO_SUPPORT */
 1091: #ifdef EVFILT_USER
 1092: 			case EVFILT_USER:
 1093: 				TAILQ_FOREACH_SAFE(task, &r->root_user, task_node, tmp) {
 1094: 					if (TASK_VAL(task) == ((uintptr_t) res[i].udata)) {
 1095: 						if (!flg) {
 1096: 							TASK_RET(task) = res[i].data;
 1097: 							TASK_FLAG(task) = (u_long) res[i].fflags;
 1098: 
 1099: 							/* remove user handle */
 1100: 							transit_task2ready(task, &r->root_user);
 1101: 						}
 1102: 						flg++;
 1103: 					}
 1104: 				}
 1105: 				break;
 1106: #endif	/* EVFILT_USER */
 1107: 		}
 1108: 
 1109: 		if (flg > 1)
 1110: 			evt->flags &= ~EV_DELETE;
 1111: 
 1112: 		if (kevent(r->root_kq, evt, 1, NULL, 0, &now) == -1) {
 1113: 			if (r->root_hooks.hook_exec.exception)
 1114: 				r->root_hooks.hook_exec.exception(r, NULL);
 1115: 			else
 1116: 				LOGERR;
 1117: 		}
 1118: 	}
 1119: }
 1120: #endif
 1121: 
 1122: #if SUP_ENABLE == EP_SUPPORT
 1123: static inline void
 1124: fetch_hook_epoll_proceed(int en, struct epoll_event *res, sched_root_task_t *r)
 1125: {
 1126: 	register int i, rflg, wflg;
 1127: 	int ops = EPOLL_CTL_DEL;
 1128: 	sched_task_t *t, *tmp, *task;
 1129: 	struct epoll_event evt[1];
 1130: 
 1131: 	for (i = 0; i < en; i++) {
 1132: 		memcpy(evt, &res[i], sizeof evt);
 1133: 		evt->events ^= evt->events;
 1134: 		rflg = wflg = 0;
 1135: 
 1136: 		if (res[i].events & (EPOLLIN | EPOLLPRI)) {
 1137: 			task = NULL;
 1138: 			TAILQ_FOREACH_SAFE(t, &r->root_read, task_node, tmp) {
 1139: 				if (TASK_FD(t) == evt->data.fd) {
 1140: 					if (!task)
 1141: 						task = t;
 1142: 					rflg++;
 1143: 				}
 1144: 			}
 1145: 
 1146: 			if (task) {
 1147: 				TASK_FLAG(task) = ioctl(TASK_FD(task), FIONREAD, &TASK_RET(task));
 1148: 				/* remove read handle */
 1149: 				remove_task_from(task, &r->root_read);
 1150: 
 1151: 				if (r->root_hooks.hook_exec.exception && evt->events & (EPOLLRDHUP | EPOLLERR | EPOLLHUP)) {
 1152:  					if (r->root_hooks.hook_exec.exception(r, (void*) (intptr_t)
 1153: 								(evt->events & EPOLLERR ? EV_ERROR | EV_EOF : EV_EOF))) {
 1154: 						task->task_type = taskUNUSE;
 1155: 						insert_task_to(task, &r->root_unuse);
 1156: 					} else {
 1157: 						task->task_type = taskREADY;
 1158: 						insert_task_to(task, &r->root_ready);
 1159: 					}
 1160: 				} else {
 1161: 					task->task_type = taskREADY;
 1162: 					insert_task_to(task, &r->root_ready);
 1163: 				}
 1164: 
 1165: 				if (!(res[i].events & EPOLLOUT) && FD_ISSET(evt->data.fd, &r->root_fds[1])) {
 1166: 					evt->events |= EPOLLOUT;
 1167: 					wflg = 42;
 1168: 				}
 1169: 				if (rflg > 1) {
 1170: 					if (FD_ISSET(evt->data.fd, &r->root_fds[0]))
 1171: 						evt->events |= EPOLLIN;
 1172: 					if (FD_ISSET(evt->data.fd, &r->root_fds[2]))
 1173: 						evt->events |= EPOLLPRI;
 1174: 				} else {
 1175: 					FD_CLR(evt->data.fd, &r->root_fds[0]);
 1176: 					FD_CLR(evt->data.fd, &r->root_fds[2]);
 1177: 				}
 1178: 			}
 1179: 		}
 1180: 		if (res[i].events & EPOLLOUT) {
 1181: 			task = NULL;
 1182: 			TAILQ_FOREACH_SAFE(t, &r->root_write, task_node, tmp) {
 1183: 				if (TASK_FD(t) == evt->data.fd) {
 1184: 					if (!task)
 1185: 						task = t;
 1186: 					wflg++;
 1187: 				}
 1188: 			}
 1189: 
 1190: 			if (task) {
 1191: 				TASK_FLAG(task) = ioctl(TASK_FD(task), FIONWRITE, &TASK_RET(task));
 1192: 				/* remove write handle */
 1193: 				remove_task_from(task, &r->root_write);
 1194: 
 1195: 				if (r->root_hooks.hook_exec.exception && evt->events & (EPOLLERR | EPOLLHUP)) {
 1196:  					if (r->root_hooks.hook_exec.exception(r, (void*) (intptr_t)
 1197: 								(evt->events & EPOLLERR ? EV_ERROR | EV_EOF : EV_EOF))) {
 1198: 						task->task_type = taskUNUSE;
 1199: 						insert_task_to(task, &r->root_unuse);
 1200: 					} else {
 1201: 						task->task_type = taskREADY;
 1202: 						insert_task_to(task, &r->root_ready);
 1203: 					}
 1204: 				} else {
 1205: 					task->task_type = taskREADY;
 1206: 					insert_task_to(task, &r->root_ready);
 1207: 				}
 1208: 
 1209: 				if (!(res[i].events & EPOLLIN) && FD_ISSET(evt->data.fd, &r->root_fds[0])) {
 1210: 					evt->events |= EPOLLIN;
 1211: 					rflg = 42;
 1212: 				}
 1213: 				if (!(res[i].events & EPOLLPRI) && FD_ISSET(evt->data.fd, &r->root_fds[2])) {
 1214: 					evt->events |= EPOLLPRI;
 1215: 					rflg = 42;
 1216: 				}
 1217: 				if (wflg > 1)
 1218: 					evt->events |= EPOLLOUT;
 1219: 				else
 1220: 					FD_CLR(evt->data.fd, &r->root_fds[1]);
 1221: 			}
 1222: 		}
 1223: 
 1224: 		ops = EPOLL_CTL_DEL;
 1225: 		if (rflg > 1 || wflg > 1)
 1226: 			ops = EPOLL_CTL_MOD;
 1227: 
 1228: 		if (epoll_ctl(r->root_kq, ops, evt->data.fd, evt) == -1) {
 1229: 			if (r->root_hooks.hook_exec.exception) {
 1230: 				r->root_hooks.hook_exec.exception(r, NULL);
 1231: 			} else
 1232: 				LOGERR;
 1233: 		}
 1234: 	}
 1235: }
 1236: #endif
 1237: 
 1238: #if SUP_ENABLE == NO_SUPPORT
 1239: static inline void 
 1240: fetch_hook_select_proceed(int en, fd_set rfd, fd_set wfd, fd_set xfd, sched_root_task_t *r)
 1241: {
 1242: 	register int i, rflg, wflg;
 1243: 	sched_task_t *t, *tmp, *task;
 1244: 
 1245: 	/* skip select check if return value from select is zero */
 1246: 	if (!en)
 1247: 		return;
 1248: 
 1249: 	for (i = 0; i < r->root_kq; i++) {
 1250: 		if (!FD_ISSET(i, &r->root_fds[0]) && 
 1251: 				!FD_ISSET(i, &r->root_fds[1]) && 
 1252: 				!FD_ISSET(i, &r->root_fds[2]))
 1253: 			continue;
 1254: 
 1255: 		rflg = wflg = 0;
 1256: 
 1257: 		if (FD_ISSET(i, &rfd) || FD_ISSET(i, &xfd)) {
 1258: 			task = NULL;
 1259: 			TAILQ_FOREACH_SAFE(t, &r->root_read, task_node, tmp) {
 1260: 				if (TASK_FD(t) == i) {
 1261: 					if (!task)
 1262: 						task = t;
 1263: 					rflg++;
 1264: 				}
 1265: 			}
 1266: 
 1267: 			if (task) {
 1268: 				TASK_FLAG(task) = ioctl(TASK_FD(task), FIONREAD, &TASK_RET(task));
 1269: 
 1270: 				/* remove read handle */
 1271: 				remove_task_from(task, &r->root_read);
 1272: 
 1273: 				if (r->root_hooks.hook_exec.exception) {
 1274:  					if (r->root_hooks.hook_exec.exception(r, NULL)) {
 1275: 						task->task_type = taskUNUSE;
 1276: 						insert_task_to(task, &r->root_unuse);
 1277: 					} else {
 1278: 						task->task_type = taskREADY;
 1279: 						insert_task_to(task, &r->root_ready);
 1280: 					}
 1281: 				} else {
 1282: 					task->task_type = taskREADY;
 1283: 					insert_task_to(task, &r->root_ready);
 1284: 				}
 1285: 
 1286: 				/* remove resouce */
 1287: 				if (rflg == 1) {
 1288: 					FD_CLR(i, &r->root_fds[0]);
 1289: 					FD_CLR(i, &r->root_fds[2]);
 1290: 				}
 1291: 			}
 1292: 		}
 1293: 		if (FD_ISSET(i, &wfd)) {
 1294: 			task = NULL;
 1295: 			TAILQ_FOREACH_SAFE(t, &r->root_write, task_node, tmp) {
 1296: 				if (TASK_FD(t) == i) {
 1297: 					if (!task)
 1298: 						task = t;
 1299: 					wflg++;
 1300: 				}
 1301: 			}
 1302: 
 1303: 			if (task) {
 1304: 				TASK_FLAG(task) = ioctl(TASK_FD(task), FIONWRITE, &TASK_RET(task));
 1305: 
 1306: 				/* remove write handle */
 1307: 				remove_task_from(task, &r->root_write);
 1308: 
 1309: 				if (r->root_hooks.hook_exec.exception) {
 1310:  					if (r->root_hooks.hook_exec.exception(r, NULL)) {
 1311: 						task->task_type = taskUNUSE;
 1312: 						insert_task_to(task, &r->root_unuse);
 1313: 					} else {
 1314: 						task->task_type = taskREADY;
 1315: 						insert_task_to(task, &r->root_ready);
 1316: 					}
 1317: 				} else {
 1318: 					task->task_type = taskREADY;
 1319: 					insert_task_to(task, &r->root_ready);
 1320: 				}
 1321: 
 1322: 				/* remove resouce */
 1323: 				if (wflg == 1)
 1324: 					FD_CLR(i, &r->root_fds[1]);
 1325: 			}
 1326: 		}
 1327: 	}
 1328: 
 1329: 	/* optimize select */
 1330: 	for (i = r->root_kq - 1; i >= 0; i--)
 1331: 		if (FD_ISSET(i, &r->root_fds[0]) || 
 1332: 				FD_ISSET(i, &r->root_fds[1]) || 
 1333: 				FD_ISSET(i, &r->root_fds[2]))
 1334: 			break;
 1335: 	r->root_kq = i + 1;
 1336: }
 1337: #endif
 1338: 
 1339: /*
 1340:  * sched_hook_fetch() - Default FETCH hook
 1341:  *
 1342:  * @root = root task
 1343:  * @arg = unused
 1344:  * return: NULL error or !=NULL fetched task
 1345:  */
 1346: void *
 1347: sched_hook_fetch(void *root, void *arg __unused)
 1348: {
 1349: 	sched_root_task_t *r = root;
 1350: 	sched_task_t *task, *tmp;
 1351: 	struct timespec now, m, mtmp, *tsmin;
 1352: #if SUP_ENABLE == KQ_SUPPORT
 1353: 	struct kevent res[KQ_EVENTS];
 1354: 	struct timespec *timeout;
 1355: #elif SUP_ENABLE == EP_SUPPORT
 1356: 	struct epoll_event res[KQ_EVENTS];
 1357: 	u_long timeout = 0;
 1358: #else
 1359: 	struct timeval *timeout, tv;
 1360: 	fd_set rfd, wfd, xfd;
 1361: #endif
 1362: 	int en;
 1363: 
 1364: 	if (!r)
 1365: 		return NULL;
 1366: 
 1367: 	/* get new task by queue priority */
 1368: 	while ((task = TAILQ_FIRST(&r->root_event))) {
 1369: 		transit_task2unuse(task, &r->root_event);
 1370: 		return task;
 1371: 	}
 1372: 	while ((task = TAILQ_FIRST(&r->root_ready))) {
 1373: 		transit_task2unuse(task, &r->root_ready);
 1374: 		return task;
 1375: 	}
 1376: 
 1377: 	/* if present member of task, set NOWAIT */
 1378: 	if (!TAILQ_FIRST(&r->root_task)) {
 1379: 		/* timer tasks */
 1380: #ifdef TIMER_WITHOUT_SORT
 1381: 		clock_gettime(CLOCK_MONOTONIC, &now);
 1382: 
 1383: 		sched_timespecclear(&r->root_wait);
 1384: 		TAILQ_FOREACH(task, &r->root_timer, task_node) {
 1385: 			if (!sched_timespecisset(&r->root_wait))
 1386: 				r->root_wait = TASK_TS(task);
 1387: 			else if (sched_timespeccmp(&TASK_TS(task), &r->root_wait, -) < 0)
 1388: 				r->root_wait = TASK_TS(task);
 1389: 		}
 1390: 
 1391: 		if (TAILQ_FIRST(&r->root_timer)) {
 1392: 			m = r->root_wait;
 1393: 			sched_timespecsub(&m, &now, &mtmp);
 1394: 			r->root_wait = mtmp;
 1395: 		} else {
 1396: 			/* set wait INFTIM */
 1397: 			sched_timespecinf(&r->root_wait);
 1398: 		}
 1399: #else	/* ! TIMER_WITHOUT_SORT */
 1400: 		if ((task = TAILQ_FIRST(&r->root_timer))) {
 1401: 			clock_gettime(CLOCK_MONOTONIC, &now);
 1402: 
 1403: 			m = TASK_TS(task);
 1404: 			sched_timespecsub(&m, &now, &mtmp);
 1405: 			r->root_wait = mtmp;
 1406: 		} else {
 1407: 			/* set wait INFTIM */
 1408: 			sched_timespecinf(&r->root_wait);
 1409: 		}
 1410: #endif	/* TIMER_WITHOUT_SORT */
 1411: 	} else	/* no waiting for event, because we have ready task */
 1412: 		sched_timespecclear(&r->root_wait);
 1413: 
 1414: 	if (!sched_timespecisinf(&r->root_wait)) {
 1415: 		tsmin = sched_timespecmin(&r->root_wait, &r->root_poll);
 1416: #if SUP_ENABLE == KQ_SUPPORT
 1417: 		timeout = tsmin;
 1418: #elif SUP_ENABLE == EP_SUPPORT
 1419: 		timeout = tsmin->tv_sec * 1000 + tsmin->tv_nsec / 1000000;
 1420: #else
 1421: 		sched_timespec2val(tsmin, &tv);
 1422: 		timeout = &tv;
 1423: #endif	/* KQ_SUPPORT */
 1424: 	} else if (sched_timespecisinf(&r->root_poll))
 1425: #if SUP_ENABLE == EP_SUPPORT
 1426: 		timeout = -1;
 1427: #else
 1428: 		timeout = NULL;
 1429: #endif
 1430: 	else {
 1431: #if SUP_ENABLE == KQ_SUPPORT
 1432: 		timeout = &r->root_poll;
 1433: #elif SUP_ENABLE == EP_SUPPORT
 1434: 		timeout = r->root_poll.tv_sec * 1000 + r->root_poll.tv_nsec / 1000000;
 1435: #else
 1436: 		sched_timespec2val(&r->root_poll, &tv);
 1437: 		timeout = &tv;
 1438: #endif	/* KQ_SUPPORT */
 1439: 	}
 1440: 
 1441: #if SUP_ENABLE == KQ_SUPPORT
 1442: 	if ((en = kevent(r->root_kq, NULL, 0, res, KQ_EVENTS, timeout)) == -1) {
 1443: #elif SUP_ENABLE == EP_SUPPORT
 1444: 	if ((en = epoll_wait(r->root_kq, res, KQ_EVENTS, timeout)) == -1) {
 1445: #else
 1446: 	xfd = r->root_fds[2];
 1447: 	rfd = r->root_fds[0];
 1448: 	wfd = r->root_fds[1];
 1449: 	if ((en = select(r->root_kq, &rfd, &wfd, &xfd, timeout)) == -1) {
 1450: #endif	/* KQ_SUPPORT */
 1451: 		if (r->root_hooks.hook_exec.exception) {
 1452: 			if (r->root_hooks.hook_exec.exception(r, NULL))
 1453: 				return NULL;
 1454: 		} else if (errno != EINTR)
 1455: 			LOGERR;
 1456: 		goto skip_event;
 1457: 	}
 1458: 
 1459: 	/* Go and catch the cat into pipes ... */
 1460: #if SUP_ENABLE == KQ_SUPPORT
 1461: 	/* kevent dispatcher */
 1462: 	fetch_hook_kevent_proceed(en, res, r);
 1463: #elif SUP_ENABLE == EP_SUPPORT
 1464: 	/* epoll dispatcher */
 1465: 	fetch_hook_epoll_proceed(en, res, r);
 1466: #else
 1467: 	/* select dispatcher */
 1468: 	fetch_hook_select_proceed(en, rfd, wfd, xfd, r);
 1469: #endif	/* KQ_SUPPORT */
 1470: 
 1471: skip_event:
 1472: 	/* timer update & put in ready queue */
 1473: 	clock_gettime(CLOCK_MONOTONIC, &now);
 1474: 
 1475: 	TAILQ_FOREACH_SAFE(task, &r->root_timer, task_node, tmp)
 1476: 		if (sched_timespeccmp(&now, &TASK_TS(task), -) >= 0)
 1477: 			transit_task2ready(task, &r->root_timer);
 1478: 
 1479: 	/* put regular task priority task to ready queue, 
 1480: 		if there is no ready task or reach max missing hit for regular task */
 1481: 	if ((task = TAILQ_FIRST(&r->root_task))) {
 1482: 		if (!TAILQ_FIRST(&r->root_ready) || r->root_miss >= TASK_VAL(task)) {
 1483: 			r->root_miss ^= r->root_miss;
 1484: 
 1485: 			transit_task2ready(task, &r->root_task);
 1486: 		} else
 1487: 			r->root_miss++;
 1488: 	} else
 1489: 		r->root_miss ^= r->root_miss;
 1490: 
 1491: 	/* OK, lets get ready task !!! */
 1492: 	task = TAILQ_FIRST(&r->root_ready);
 1493: 	if (task)
 1494: 		transit_task2unuse(task, &r->root_ready);
 1495: 	return task;
 1496: }
 1497: 
 1498: /*
 1499:  * sched_hook_exception() - Default EXCEPTION hook
 1500:  *
 1501:  * @root = root task
 1502:  * @arg = custom handling: if arg == EV_EOF or other value; default: arg == NULL log errno
 1503:  * return: <0 errors and 0 ok
 1504:  */
 1505: void *
 1506: sched_hook_exception(void *root, void *arg)
 1507: {
 1508: 	sched_root_task_t *r = root;
 1509: 
 1510: 	if (!r)
 1511: 		return NULL;
 1512: 
 1513: 	/* custom exception handling ... */
 1514: 	if (arg) {
 1515: 		if (arg == (void*) EV_EOF)
 1516: 			return NULL;
 1517: 		return (void*) -1;	/* raise scheduler error!!! */
 1518: 	}
 1519: 
 1520: 	/* if error hook exists */
 1521: 	if (r->root_hooks.hook_root.error)
 1522: 		return (r->root_hooks.hook_root.error(root, (void*) ((intptr_t) errno)));
 1523: 
 1524: 	/* default case! */
 1525: 	LOGERR;
 1526: 	return NULL;
 1527: }
 1528: 
 1529: /*
 1530:  * sched_hook_condition() - Default CONDITION hook
 1531:  *
 1532:  * @root = root task
 1533:  * @arg = killState from schedRun()
 1534:  * return: NULL kill scheduler loop or !=NULL ok
 1535:  */
 1536: void *
 1537: sched_hook_condition(void *root, void *arg)
 1538: {
 1539: 	sched_root_task_t *r = root;
 1540: 
 1541: 	if (!r)
 1542: 		return NULL;
 1543: 
 1544: 	return (void*) (*r->root_cond - *(intptr_t*) arg);
 1545: }
 1546: 
 1547: /*
 1548:  * sched_hook_rtc() - Default RTC hook
 1549:  *
 1550:  * @task = current task
 1551:  * @arg = unused
 1552:  * return: <0 errors and 0 ok
 1553:  */
 1554: void *
 1555: sched_hook_rtc(void *task, void *arg __unused)
 1556: {
 1557: #if defined(HAVE_LIBRT) && defined(HAVE_TIMER_CREATE) && \
 1558: 	defined(HAVE_TIMER_SETTIME) && defined(HAVE_TIMER_DELETE)
 1559: 	sched_task_t *sigt = NULL, *t = task;
 1560: 	struct itimerspec its;
 1561: 	struct sigevent evt;
 1562: 	timer_t tmr;
 1563: #if SUP_ENABLE != KQ_SUPPORT
 1564: 	struct sigaction sa;
 1565: #endif
 1566: 
 1567: 	if (!t || !TASK_ROOT(t))
 1568: 		return (void*) -1;
 1569: 
 1570: 	memset(&evt, 0, sizeof evt);
 1571: 	evt.sigev_notify = SIGEV_SIGNAL;
 1572: 	evt.sigev_signo = (intptr_t) TASK_DATA(t) + SIGRTMIN;
 1573: 	evt.sigev_value.sival_ptr = t;
 1574: 
 1575: 	if (timer_create(CLOCK_MONOTONIC, &evt, &tmr) == -1) {
 1576: 		if (TASK_ROOT(t)->root_hooks.hook_exec.exception)
 1577: 			TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL);
 1578: 		else
 1579: 			LOGERR;
 1580: 		return (void*) -1;
 1581: 	} else
 1582: 		TASK_FLAG(t) = (u_long) tmr;
 1583: 
 1584: #if SUP_ENABLE == KQ_SUPPORT
 1585: 	if (!(sigt = schedSignal(TASK_ROOT(t), _sched_rtcWrapper, TASK_ARG(t), evt.sigev_signo, 
 1586: 				t, (size_t) tmr))) {
 1587: 		if (TASK_ROOT(t)->root_hooks.hook_exec.exception)
 1588: 			TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL);
 1589: 		else
 1590: 			LOGERR;
 1591: 		timer_delete(tmr);
 1592: 		return (void*) -1;
 1593: 	} else
 1594: 		TASK_RET(t) = (uintptr_t) sigt;
 1595: #else
 1596: 	memset(&sa, 0, sizeof sa);
 1597: 	sigemptyset(&sa.sa_mask);
 1598: 	sa.sa_sigaction = _sched_rtcSigWrapper;
 1599: 	sa.sa_flags = SA_SIGINFO | SA_RESTART;
 1600: 
 1601: 	if (sigaction(evt.sigev_signo, &sa, NULL) == -1) {
 1602: 		if (TASK_ROOT(t)->root_hooks.hook_exec.exception)
 1603: 			TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL);
 1604: 		else
 1605: 			LOGERR;
 1606: 		timer_delete(tmr);
 1607: 		return (void*) -1;
 1608: 	}
 1609: #endif
 1610: 
 1611: 	memset(&its, 0, sizeof its);
 1612: 	its.it_value.tv_sec = t->task_val.ts.tv_sec;
 1613: 	its.it_value.tv_nsec = t->task_val.ts.tv_nsec;
 1614: 
 1615: 	if (timer_settime(tmr, TIMER_RELTIME, &its, NULL) == -1) {
 1616: 		if (TASK_ROOT(t)->root_hooks.hook_exec.exception)
 1617: 			TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL);
 1618: 		else
 1619: 			LOGERR;
 1620: 		schedCancel(sigt);
 1621: 		timer_delete(tmr);
 1622: 		return (void*) -1;
 1623: 	}
 1624: #endif	/* HAVE_TIMER_CREATE */
 1625: 	return NULL;
 1626: }

FreeBSD-CVSweb <freebsd-cvsweb@FreeBSD.org>