File:  [ELWIX - Embedded LightWeight unIX -] / libaitsched / src / hooks.c
Revision 1.39: download - view: text, annotated - select for diffs - revision graph
Tue Dec 20 22:40:32 2022 UTC (18 months, 1 week ago) by misho
Branches: MAIN
CVS tags: sched7_4, sched7_3, SCHED7_3, SCHED7_2, HEAD
version 7.2

    1: /*************************************************************************
    2: * (C) 2011 AITNET ltd - Sofia/Bulgaria - <misho@aitbg.com>
    3: *  by Michael Pounov <misho@openbsd-bg.org>
    4: *
    5: * $Author: misho $
    6: * $Id: hooks.c,v 1.39 2022/12/20 22:40:32 misho Exp $
    7: *
    8: **************************************************************************
    9: The ELWIX and AITNET software is distributed under the following
   10: terms:
   11: 
   12: All of the documentation and software included in the ELWIX and AITNET
   13: Releases is copyrighted by ELWIX - Sofia/Bulgaria <info@elwix.org>
   14: 
   15: Copyright 2004 - 2022
   16: 	by Michael Pounov <misho@elwix.org>.  All rights reserved.
   17: 
   18: Redistribution and use in source and binary forms, with or without
   19: modification, are permitted provided that the following conditions
   20: are met:
   21: 1. Redistributions of source code must retain the above copyright
   22:    notice, this list of conditions and the following disclaimer.
   23: 2. Redistributions in binary form must reproduce the above copyright
   24:    notice, this list of conditions and the following disclaimer in the
   25:    documentation and/or other materials provided with the distribution.
   26: 3. All advertising materials mentioning features or use of this software
   27:    must display the following acknowledgement:
   28: This product includes software developed by Michael Pounov <misho@elwix.org>
   29: ELWIX - Embedded LightWeight unIX and its contributors.
   30: 4. Neither the name of AITNET nor the names of its contributors
   31:    may be used to endorse or promote products derived from this software
   32:    without specific prior written permission.
   33: 
   34: THIS SOFTWARE IS PROVIDED BY AITNET AND CONTRIBUTORS ``AS IS'' AND
   35: ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   36: IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   37: ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
   38: FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   39: DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   40: OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   41: HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   42: LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   43: OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   44: SUCH DAMAGE.
   45: */
   46: #include "global.h"
   47: #include "hooks.h"
   48: 
   49: 
   50: static inline void
   51: transit_task2ready(sched_task_t * __restrict t, sched_queue_t * __restrict q)
   52: {
   53: 	remove_task_from(t, q);
   54: 
   55: 	t->task_type = taskREADY;
   56: 	insert_task_to(t, &(TASK_ROOT(t))->root_ready);
   57: }
   58: 
   59: #ifdef HAVE_LIBPTHREAD
   60: static void *
   61: _sched_threadWrapper(sched_task_t *t)
   62: {
   63: 	void *ret = NULL;
   64: 	sched_root_task_t *r;
   65: 
   66: 	if (!t || !TASK_ROOT(t))
   67: 		pthread_exit(ret);
   68: 	else
   69: 		r = (sched_root_task_t*) TASK_ROOT(t);
   70: 
   71: 	pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, NULL);
   72: 	/*
   73: 	pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, NULL);
   74: 	*/
   75: 
   76: 	/* notify parent, thread is ready for execution */
   77: 	pthread_testcancel();
   78: 
   79: 	ret = schedCall(t);
   80: 	r->root_ret = ret;
   81: 
   82: 	if (TASK_VAL(t)) {
   83: 		transit_task2unuse(t, &r->root_thread);
   84: 		TASK_VAL(t) = 0;
   85: 	}
   86: 
   87: 	pthread_exit(ret);
   88: }
   89: #endif
   90: 
   91: #if defined(HAVE_LIBRT) && defined(HAVE_TIMER_CREATE) && \
   92: 	defined(HAVE_TIMER_SETTIME) && defined(HAVE_TIMER_DELETE)
   93: #if SUP_ENABLE == KQ_SUPPORT
   94: static void *
   95: _sched_rtcWrapper(sched_task_t *t)
   96: {
   97: 	sched_task_t *task;
   98: 	void *ret;
   99: 
  100: 	if (!t || !TASK_ROOT(t) || !TASK_DATA(t))
  101: 		return NULL;
  102: 	else {
  103: 		task = (sched_task_t*) TASK_DATA(t);
  104: 		timer_delete((timer_t) TASK_DATLEN(t));
  105: 	}
  106: 
  107: 	ret = schedCall(task);
  108: 
  109: 	transit_task2unuse(task, &(TASK_ROOT(task))->root_rtc);
  110: 	return ret;
  111: }
  112: #else
  113: static void
  114: _sched_rtcSigWrapper(int sig, siginfo_t *si, void *uc)
  115: {
  116: 	sched_task_t *task;
  117: 
  118: 	if (si && si->si_value.sival_ptr) {
  119: 		task = (sched_task_t*) si->si_value.sival_ptr;
  120: 		timer_delete((timer_t) TASK_FLAG(task));
  121: 
  122: 		TASK_RET(task) = (intptr_t) schedCall(task);
  123: 
  124: 		transit_task2unuse(task, &(TASK_ROOT(task))->root_rtc);
  125: 	}
  126: }
  127: #endif
  128: #endif
  129: 
  130: /*
  131:  * sched_hook_init() - Default INIT hook
  132:  *
  133:  * @root = root task
  134:  * @arg = unused
  135:  * return: <0 errors and 0 ok
  136:  */
  137: void *
  138: sched_hook_init(void *root, void *arg __unused)
  139: {
  140: 	sched_root_task_t *r = root;
  141: 
  142: 	if (!r)
  143: 		return (void*) -1;
  144: 
  145: #if SUP_ENABLE == KQ_SUPPORT
  146: 	r->root_kq = kqueue();
  147: 	if (r->root_kq == -1) {
  148: 		LOGERR;
  149: 		return (void*) -1;
  150: 	}
  151: #elif SUP_ENABLE == EP_SUPPORT
  152: 	r->root_kq = epoll_create(KQ_EVENTS);
  153: 	if (r->root_kq == -1) {
  154: 		LOGERR;
  155: 		return (void*) -1;
  156: 	}
  157: #else
  158: 	r->root_kq ^= r->root_kq;
  159: #endif
  160: 
  161: 	FD_ZERO(&r->root_fds[0]);
  162: 	FD_ZERO(&r->root_fds[1]);
  163: 	FD_ZERO(&r->root_fds[2]);
  164: 
  165: 	return NULL;
  166: }
  167: 
  168: /*
  169:  * sched_hook_fini() - Default FINI hook
  170:  *
  171:  * @root = root task
  172:  * @arg = unused
  173:  * return: <0 errors and 0 ok
  174:  */
  175: void *
  176: sched_hook_fini(void *root, void *arg __unused)
  177: {
  178: 	sched_root_task_t *r = root;
  179: 
  180: 	if (!r)
  181: 		return (void*) -1;
  182: 
  183: #if SUP_ENABLE == KQ_SUPPORT || SUP_ENABLE == EP_SUPPORT
  184: 	if (r->root_kq > 2) {
  185: 		close(r->root_kq);
  186: 		r->root_kq = 0;
  187: 	}
  188: #else
  189: 	r->root_kq ^= r->root_kq;
  190: #endif
  191: 
  192: 	FD_ZERO(&r->root_fds[2]);
  193: 	FD_ZERO(&r->root_fds[1]);
  194: 	FD_ZERO(&r->root_fds[0]);
  195: 
  196: 	return NULL;
  197: }
  198: 
  199: /*
  200:  * sched_hook_cancel() - Default CANCEL hook
  201:  *
  202:  * @task = current task
  203:  * @arg = unused
  204:  * return: <0 errors and 0 ok
  205:  */
  206: void *
  207: sched_hook_cancel(void *task, void *arg __unused)
  208: {
  209: 	sched_task_t *t = task, *tmp, *tt;
  210: 	sched_root_task_t *r = NULL;
  211: 	int flg = 0;
  212: #if SUP_ENABLE == KQ_SUPPORT
  213: 	struct kevent chg[1];
  214: 	struct timespec timeout = { 0, 0 };
  215: #elif SUP_ENABLE == EP_SUPPORT
  216: 	struct epoll_event ee = { .events = 0, .data.u64 = 0l };
  217: #else
  218: 	register int i;
  219: #endif
  220: #ifdef AIO_SUPPORT
  221: 	struct aiocb *acb;
  222: #ifdef EVFILT_LIO
  223: 	register int i = 0;
  224: 	struct aiocb **acbs;
  225: #endif	/* EVFILT_LIO */
  226: #endif	/* AIO_SUPPORT */
  227: 
  228: 	if (!t || !TASK_ROOT(t))
  229: 		return (void*) -1;
  230: 	else
  231: 		r = TASK_ROOT(t);
  232: 
  233: 	switch (TASK_TYPE(t)) {
  234: 		case taskREAD:
  235: 			/* check for multi subscribers */
  236: 			TAILQ_FOREACH_SAFE(tt, &r->root_read, task_node, tmp)
  237: 				if (TASK_FD(tt) == TASK_FD(t))
  238: 					flg++;
  239: #if SUP_ENABLE == KQ_SUPPORT
  240: #ifdef __NetBSD__
  241: 			EV_SET(&chg[0], TASK_FD(t), EVFILT_READ, flg < 2 ? EV_DELETE : 0, 
  242: 					0, 0, (intptr_t) TASK_FD(t));
  243: #else
  244: 			EV_SET(&chg[0], TASK_FD(t), EVFILT_READ, flg < 2 ? EV_DELETE : 0, 
  245: 					0, 0, (void*) TASK_FD(t));
  246: #endif
  247: #elif SUP_ENABLE == EP_SUPPORT
  248: 			ee.data.fd = TASK_FD(t);
  249: 			ee.events ^= ee.events;
  250: 			if (FD_ISSET(TASK_FD(t), &r->root_fds[1]))
  251: 				ee.events |= EPOLLOUT;
  252: 
  253: 			if (flg < 2) {
  254: 				FD_CLR(TASK_FD(t), &r->root_fds[0]);
  255: 				FD_CLR(TASK_FD(t), &r->root_fds[2]);
  256: 			} else {
  257: 				if (FD_ISSET(TASK_FD(t), &r->root_fds[0]))
  258: 					ee.events |= EPOLLIN;
  259: 				if (FD_ISSET(TASK_FD(t), &r->root_fds[2]))
  260: 					ee.events |= EPOLLPRI;
  261: 			}
  262: #else
  263: 			if (flg < 2) {
  264: 				FD_CLR(TASK_FD(t), &r->root_fds[0]);
  265: 				FD_CLR(TASK_FD(t), &r->root_fds[2]);
  266: 
  267: 				/* optimize select */
  268: 				for (i = r->root_kq - 1; i >= 0; i--)
  269: 					if (FD_ISSET(i, &r->root_fds[0]) || 
  270: 							FD_ISSET(i, &r->root_fds[1]) || 
  271: 							FD_ISSET(i, &r->root_fds[2]))
  272: 						break;
  273: 				r->root_kq = i + 1;
  274: 			}
  275: #endif
  276: 			break;
  277: 		case taskWRITE:
  278: 			/* check for multi subscribers */
  279: 			TAILQ_FOREACH_SAFE(tt, &r->root_write, task_node, tmp)
  280: 				if (TASK_FD(tt) == TASK_FD(t))
  281: 					flg++;
  282: #if SUP_ENABLE == KQ_SUPPORT
  283: #ifdef __NetBSD__
  284: 			EV_SET(&chg[0], TASK_FD(t), EVFILT_WRITE, flg < 2 ? EV_DELETE : 0, 
  285: 					0, 0, (intptr_t) TASK_FD(t));
  286: #else
  287: 			EV_SET(&chg[0], TASK_FD(t), EVFILT_WRITE, flg < 2 ? EV_DELETE : 0, 
  288: 					0, 0, (void*) TASK_FD(t));
  289: #endif
  290: #elif SUP_ENABLE == EP_SUPPORT
  291: 			ee.data.fd = TASK_FD(t);
  292: 			ee.events ^= ee.events;
  293: 			if (FD_ISSET(TASK_FD(t), &r->root_fds[0]))
  294: 				ee.events |= EPOLLIN;
  295: 			if (FD_ISSET(TASK_FD(t), &r->root_fds[2]))
  296: 				ee.events |= EPOLLPRI;
  297: 
  298: 			if (flg < 2)
  299: 				FD_CLR(TASK_FD(t), &r->root_fds[1]);
  300: 			else
  301: 				ee.events |= EPOLLOUT;
  302: #else
  303: 			if (flg < 2) {
  304: 				FD_CLR(TASK_FD(t), &r->root_fds[1]);
  305: 
  306: 				/* optimize select */
  307: 				for (i = r->root_kq - 1; i >= 0; i--)
  308: 					if (FD_ISSET(i, &r->root_fds[0]) || 
  309: 							FD_ISSET(i, &r->root_fds[1]) || 
  310: 							FD_ISSET(i, &r->root_fds[2]))
  311: 						break;
  312: 				r->root_kq = i + 1;
  313: 			}
  314: #endif
  315: 			break;
  316: 		case taskALARM:
  317: #if SUP_ENABLE == KQ_SUPPORT
  318: 			/* check for multi subscribers */
  319: 			TAILQ_FOREACH_SAFE(tt, &r->root_alarm, task_node, tmp)
  320: 				if (TASK_DATA(tt) == TASK_DATA(t))
  321: 					flg++;
  322: #ifdef __NetBSD__
  323: 			EV_SET(&chg[0], (uintptr_t) TASK_DATA(t), EVFILT_TIMER, flg < 2 ? EV_DELETE : 0, 
  324: 					0, 0, (intptr_t) TASK_DATA(t));
  325: #else
  326: 			EV_SET(&chg[0], (uintptr_t) TASK_DATA(t), EVFILT_TIMER, flg < 2 ? EV_DELETE : 0, 
  327: 					0, 0, (void*) TASK_DATA(t));
  328: #endif
  329: #endif
  330: 			break;
  331: 		case taskNODE:
  332: #if SUP_ENABLE == KQ_SUPPORT
  333: 			/* check for multi subscribers */
  334: 			TAILQ_FOREACH_SAFE(tt, &r->root_node, task_node, tmp)
  335: 				if (TASK_FD(tt) == TASK_FD(t))
  336: 					flg++;
  337: #ifdef __NetBSD__
  338: 			EV_SET(&chg[0], TASK_FD(t), EVFILT_VNODE, flg < 2 ? EV_DELETE : 0, 
  339: 					0, 0, (intptr_t) TASK_FD(t));
  340: #else
  341: 			EV_SET(&chg[0], TASK_FD(t), EVFILT_VNODE, flg < 2 ? EV_DELETE : 0, 
  342: 					0, 0, (void*) TASK_FD(t));
  343: #endif
  344: #endif
  345: 			break;
  346: 		case taskPROC:
  347: #if SUP_ENABLE == KQ_SUPPORT
  348: 			/* check for multi subscribers */
  349: 			TAILQ_FOREACH_SAFE(tt, &r->root_proc, task_node, tmp)
  350: 				if (TASK_VAL(tt) == TASK_VAL(t))
  351: 					flg++;
  352: #ifdef __NetBSD__
  353: 			EV_SET(&chg[0], TASK_VAL(t), EVFILT_PROC, flg < 2 ? EV_DELETE : 0, 
  354: 					0, 0, (intptr_t) TASK_VAL(t));
  355: #else
  356: 			EV_SET(&chg[0], TASK_VAL(t), EVFILT_PROC, flg < 2 ? EV_DELETE : 0, 
  357: 					0, 0, (void*) TASK_VAL(t));
  358: #endif
  359: #endif
  360: 			break;
  361: 		case taskSIGNAL:
  362: #if SUP_ENABLE == KQ_SUPPORT
  363: 			/* check for multi subscribers */
  364: 			TAILQ_FOREACH_SAFE(tt, &r->root_signal, task_node, tmp)
  365: 				if (TASK_VAL(tt) == TASK_VAL(t))
  366: 					flg++;
  367: #ifdef __NetBSD__
  368: 			EV_SET(&chg[0], TASK_VAL(t), EVFILT_SIGNAL, flg < 2 ? EV_DELETE : 0, 
  369: 					0, 0, (intptr_t) TASK_VAL(t));
  370: #else
  371: 			EV_SET(&chg[0], TASK_VAL(t), EVFILT_SIGNAL, flg < 2 ? EV_DELETE : 0, 
  372: 					0, 0, (void*) TASK_VAL(t));
  373: #endif
  374: 			/* restore signal */
  375: 			if (flg < 2)
  376: 				signal(TASK_VAL(t), SIG_DFL);
  377: #endif
  378: 			break;
  379: #ifdef AIO_SUPPORT
  380: 		case taskAIO:
  381: #if SUP_ENABLE == KQ_SUPPORT
  382: 			/* check for multi subscribers */
  383: 			TAILQ_FOREACH_SAFE(tt, &r->root_aio, task_node, tmp)
  384: 				if (TASK_VAL(tt) == TASK_VAL(t))
  385: 					flg++;
  386: #ifdef __NetBSD__
  387: 			EV_SET(&chg[0], TASK_VAL(t), EVFILT_AIO, flg < 2 ? EV_DELETE : 0, 
  388: 					0, 0, (intptr_t) TASK_VAL(t));
  389: #else
  390: 			EV_SET(&chg[0], TASK_VAL(t), EVFILT_AIO, flg < 2 ? EV_DELETE : 0, 
  391: 					0, 0, (void*) TASK_VAL(t));
  392: #endif
  393: 			acb = (struct aiocb*) TASK_VAL(t);
  394: 			if (acb) {
  395: 				if (aio_cancel(acb->aio_fildes, acb) == AIO_CANCELED)
  396: 					aio_return(acb);
  397: 				e_free(acb);
  398: 				TASK_VAL(t) = 0;
  399: 			}
  400: #endif
  401: 			break;
  402: #ifdef EVFILT_LIO
  403: 		case taskLIO:
  404: #if SUP_ENABLE == KQ_SUPPORT
  405: 			/* check for multi subscribers */
  406: 			TAILQ_FOREACH_SAFE(tt, &r->root_lio, task_node, tmp)
  407: 				if (TASK_VAL(tt) == TASK_VAL(t))
  408: 					flg++;
  409: #ifdef __NetBSD__
  410: 			EV_SET(&chg[0], TASK_VAL(t), EVFILT_LIO, flg < 2 ? EV_DELETE : 0, 
  411: 					0, 0, (intptr_t) TASK_VAL(t));
  412: #else
  413: 			EV_SET(&chg[0], TASK_VAL(t), EVFILT_LIO, flg < 2 ? EV_DELETE : 0, 
  414: 					0, 0, (void*) TASK_VAL(t));
  415: #endif
  416: 			acbs = (struct aiocb**) TASK_VAL(t);
  417: 			if (acbs) {
  418: 				for (i = 0; i < TASK_DATLEN(t); i++) {
  419: 					if (aio_cancel(acbs[i]->aio_fildes, acbs[i]) == AIO_CANCELED)
  420: 						aio_return(acbs[i]);
  421: 					e_free(acbs[i]);
  422: 				}
  423: 				e_free(acbs);
  424: 				TASK_VAL(t) = 0;
  425: 			}
  426: #endif
  427: 			break;
  428: #endif	/* EVFILT_LIO */
  429: #endif	/* AIO_SUPPORT */
  430: #ifdef EVFILT_USER
  431: 		case taskUSER:
  432: #if SUP_ENABLE == KQ_SUPPORT
  433: 			/* check for multi subscribers */
  434: 			TAILQ_FOREACH_SAFE(tt, &r->root_user, task_node, tmp)
  435: 				if (TASK_VAL(tt) == TASK_VAL(t))
  436: 					flg++;
  437: #ifdef __NetBSD__
  438: 			EV_SET(&chg[0], TASK_VAL(t), EVFILT_USER, flg < 2 ? EV_DELETE : 0, 
  439: 					0, 0, (intptr_t) TASK_VAL(t));
  440: #else
  441: 			EV_SET(&chg[0], TASK_VAL(t), EVFILT_USER, flg < 2 ? EV_DELETE : 0, 
  442: 					0, 0, (void*) TASK_VAL(t));
  443: #endif
  444: #endif
  445: 			break;
  446: #endif	/* EVFILT_USER */
  447: 		case taskTHREAD:
  448: #ifdef HAVE_LIBPTHREAD
  449: 			if (TASK_VAL(t)) {
  450: 				pthread_cancel((pthread_t) TASK_VAL(t));
  451: 				pthread_join((pthread_t) TASK_VAL(t), NULL);
  452: 				if (TASK_VAL(t)) {
  453: 					transit_task2unuse(t, &(TASK_ROOT(t))->root_thread);
  454: 					TASK_VAL(t) = 0;
  455: 				}
  456: 			}
  457: #endif
  458: 			return NULL;
  459: #if defined(HAVE_LIBRT) && defined(HAVE_TIMER_CREATE) && \
  460: 	defined(HAVE_TIMER_SETTIME) && defined(HAVE_TIMER_DELETE)
  461: 		case taskRTC:
  462: 			timer_delete((timer_t) TASK_FLAG(t));
  463: #if SUP_ENABLE == KQ_SUPPORT
  464: 			schedCancel((sched_task_t*) TASK_RET(t));
  465: #else
  466: 			/* check for multi subscribers */
  467: 			TAILQ_FOREACH_SAFE(tt, &r->root_rtc, task_node, tmp)
  468: 				if (TASK_DATA(tt) == TASK_DATA(t))
  469: 					flg++;
  470: 
  471: 			/* restore signal */
  472: 			if (flg < 2)
  473: 				signal((intptr_t) TASK_DATA(t) + SIGRTMIN, SIG_DFL);
  474: #endif
  475: 			return NULL;
  476: #endif	/* HAVE_TIMER_CREATE */
  477: 		default:
  478: 			return NULL;
  479: 	}
  480: 
  481: #if SUP_ENABLE == KQ_SUPPORT
  482: 	kevent(r->root_kq, chg, 1, NULL, 0, &timeout);
  483: #elif SUP_ENABLE == EP_SUPPORT
  484: 	if (TASK_TYPE(t) == taskREAD || TASK_TYPE(t) == taskWRITE) {
  485: 		epoll_ctl(r->root_kq, ee.events ? EPOLL_CTL_MOD : EPOLL_CTL_DEL, ee.data.fd, &ee);
  486: 	}
  487: #endif
  488: 	return NULL;
  489: }
  490: 
  491: #ifdef HAVE_LIBPTHREAD
  492: /*
  493:  * sched_hook_thread() - Default THREAD hook
  494:  *
  495:  * @task = current task
  496:  * @arg = pthread attributes
  497:  * return: <0 errors and 0 ok
  498:  */
  499: void *
  500: sched_hook_thread(void *task, void *arg)
  501: {
  502: 	sched_task_t *t = task;
  503: 	pthread_t tid;
  504: 	sigset_t s, o;
  505: 
  506: 	if (!t || !TASK_ROOT(t))
  507: 		return (void*) -1;
  508: 
  509: 	sigfillset(&s);
  510: 	pthread_sigmask(SIG_BLOCK, &s, &o);
  511: 	errno = pthread_create(&tid, (pthread_attr_t*) arg, 
  512: 			(void *(*)(void*)) _sched_threadWrapper, t);
  513: 	pthread_sigmask(SIG_SETMASK, &o, NULL);
  514: 
  515: 	if (errno) {
  516: 		LOGERR;
  517: 		return (void*) -1;
  518: 	} else
  519: 		TASK_VAL(t) = (u_long) tid;
  520: 
  521: 	if (!TASK_ISLOCKED(t))
  522: 		TASK_LOCK(t);
  523: 
  524: 	return NULL;
  525: }
  526: #endif
  527: 
  528: /*
  529:  * sched_hook_read() - Default READ hook
  530:  *
  531:  * @task = current task
  532:  * @arg = unused
  533:  * return: <0 errors and 0 ok
  534:  */
  535: void *
  536: sched_hook_read(void *task, void *arg)
  537: {
  538: 	sched_task_t *t = task;
  539: 	sched_root_task_t *r = NULL;
  540: 	uintptr_t mask = (uintptr_t) arg;
  541: #if SUP_ENABLE == KQ_SUPPORT
  542: 	struct kevent chg[1];
  543: 	struct timespec timeout = { 0, 0 };
  544: #elif SUP_ENABLE == EP_SUPPORT
  545: 	struct epoll_event ee = { 0 };
  546: 	int flg = 0;
  547: #endif
  548: 
  549: 	if (!t || !TASK_ROOT(t))
  550: 		return (void*) -1;
  551: 	else
  552: 		r = TASK_ROOT(t);
  553: 
  554: #if SUP_ENABLE == KQ_SUPPORT
  555: #ifdef __NetBSD__
  556: 	EV_SET(&chg[0], TASK_FD(t), EVFILT_READ, EV_ADD | EV_CLEAR | mask, 
  557: 			0, 0, (intptr_t) TASK_FD(t));
  558: #else
  559: 	EV_SET(&chg[0], TASK_FD(t), EVFILT_READ, EV_ADD | EV_CLEAR | mask, 
  560: 			0, 0, (void*) TASK_FD(t));
  561: #endif
  562: 	if (kevent(r->root_kq, chg, 1, NULL, 0, &timeout) == -1) {
  563: 		if (r->root_hooks.hook_exec.exception)
  564: 			r->root_hooks.hook_exec.exception(r, NULL);
  565: 		else
  566: 			LOGERR;
  567: 		return (void*) -1;
  568: 	}
  569: #elif SUP_ENABLE == EP_SUPPORT
  570: 	if (!mask)
  571: 		mask = EPOLLIN | EPOLLPRI;
  572: 	ee.data.fd = TASK_FD(t);
  573: 	ee.events = mask;
  574: 	if (FD_ISSET(TASK_FD(t), &r->root_fds[2])) {
  575: 		flg |= 4;
  576: 		ee.events |= EPOLLPRI;
  577: 	}
  578: 	if (FD_ISSET(TASK_FD(t), &r->root_fds[0])) {
  579: 		flg |= 1;
  580: 		ee.events |= EPOLLIN;
  581: 	}
  582: 	if (FD_ISSET(TASK_FD(t), &r->root_fds[1])) {
  583: 		flg |= 2;
  584: 		ee.events |= EPOLLOUT;
  585: 	}
  586: 
  587: 	if (epoll_ctl(r->root_kq, flg ? EPOLL_CTL_MOD : EPOLL_CTL_ADD, TASK_FD(t), &ee) == -1) {
  588: 		if (r->root_hooks.hook_exec.exception)
  589: 			r->root_hooks.hook_exec.exception(r, NULL);
  590: 		else
  591: 			LOGERR;
  592: 		return (void*) -1;
  593: 	} else {
  594: 		if (mask & EPOLLIN)
  595: 			FD_SET(TASK_FD(t), &r->root_fds[0]);
  596: 		if (mask & EPOLLPRI)
  597: 			FD_SET(TASK_FD(t), &r->root_fds[2]);
  598: 	}
  599: #else
  600: 	if (!mask) {
  601: 		FD_SET(TASK_FD(t), &r->root_fds[0]);
  602: 		FD_SET(TASK_FD(t), &r->root_fds[2]);
  603: 	} else {
  604: 		if (mask & 1)
  605: 			FD_SET(TASK_FD(t), &r->root_fds[0]);
  606: 		if (mask & 2)
  607: 			FD_SET(TASK_FD(t), &r->root_fds[2]);
  608: 	}
  609: 
  610: 	if (TASK_FD(t) >= r->root_kq)
  611: 		r->root_kq = TASK_FD(t) + 1;
  612: #endif
  613: 
  614: 	return NULL;
  615: }
  616: 
  617: /*
  618:  * sched_hook_write() - Default WRITE hook
  619:  *
  620:  * @task = current task
  621:  * @arg = unused
  622:  * return: <0 errors and 0 ok
  623:  */
  624: void *
  625: sched_hook_write(void *task, void *arg)
  626: {
  627: 	sched_task_t *t = task;
  628: 	sched_root_task_t *r = NULL;
  629: 	uintptr_t mask = (uintptr_t) arg;
  630: #if SUP_ENABLE == KQ_SUPPORT
  631: 	struct kevent chg[1];
  632: 	struct timespec timeout = { 0, 0 };
  633: #elif SUP_ENABLE == EP_SUPPORT
  634: 	struct epoll_event ee = { 0 };
  635: 	int flg = 0;
  636: #endif
  637: 
  638: 	if (!t || !TASK_ROOT(t))
  639: 		return (void*) -1;
  640: 	else
  641: 		r = TASK_ROOT(t);
  642: 
  643: #if SUP_ENABLE == KQ_SUPPORT
  644: #ifdef __NetBSD__
  645: 	EV_SET(&chg[0], TASK_FD(t), EVFILT_WRITE, EV_ADD | EV_CLEAR | mask, 
  646: 			0, 0, (intptr_t) TASK_FD(t));
  647: #else
  648: 	EV_SET(&chg[0], TASK_FD(t), EVFILT_WRITE, EV_ADD | EV_CLEAR | mask, 
  649: 			0, 0, (void*) TASK_FD(t));
  650: #endif
  651: 	if (kevent(r->root_kq, chg, 1, NULL, 0, &timeout) == -1) {
  652: 		if (r->root_hooks.hook_exec.exception)
  653: 			r->root_hooks.hook_exec.exception(r, NULL);
  654: 		else
  655: 			LOGERR;
  656: 		return (void*) -1;
  657: 	}
  658: #elif SUP_ENABLE == EP_SUPPORT
  659: 	if (!mask)
  660: 		mask = EPOLLOUT;
  661: 	ee.data.fd = TASK_FD(t);
  662: 	ee.events = mask;
  663: 
  664: 	if (FD_ISSET(TASK_FD(t), &r->root_fds[2])) {
  665: 		flg |= 4;
  666: 		ee.events |= EPOLLPRI;
  667: 	}
  668: 	if (FD_ISSET(TASK_FD(t), &r->root_fds[0])) {
  669: 		flg |= 1;
  670: 		ee.events |= EPOLLIN;
  671: 	}
  672: 	if (FD_ISSET(TASK_FD(t), &r->root_fds[1])) {
  673: 		flg |= 2;
  674: 		ee.events |= EPOLLOUT;
  675: 	}
  676: 
  677: 	if (epoll_ctl(r->root_kq, flg ? EPOLL_CTL_MOD : EPOLL_CTL_ADD, TASK_FD(t), &ee) == -1) {
  678: 		if (r->root_hooks.hook_exec.exception)
  679: 			r->root_hooks.hook_exec.exception(r, NULL);
  680: 		else
  681: 			LOGERR;
  682: 		return (void*) -1;
  683: 	} else
  684: 		if (mask & EPOLLOUT)
  685: 			FD_SET(TASK_FD(t), &r->root_fds[1]);
  686: #else
  687: 	if (!mask)
  688: 		FD_SET(TASK_FD(t), &r->root_fds[1]);
  689: 	else
  690: 		if (mask & 1)
  691: 			FD_SET(TASK_FD(t), &r->root_fds[1]);
  692: 
  693: 	if (TASK_FD(t) >= r->root_kq)
  694: 		r->root_kq = TASK_FD(t) + 1;
  695: #endif
  696: 
  697: 	return NULL;
  698: }
  699: 
  700: /*
  701:  * sched_hook_alarm() - Default ALARM hook
  702:  *
  703:  * @task = current task
  704:  * @arg = unused
  705:  * return: <0 errors and 0 ok
  706:  */
  707: void *
  708: sched_hook_alarm(void *task, void *arg __unused)
  709: {
  710: #if SUP_ENABLE == KQ_SUPPORT
  711: 	sched_task_t *t = task;
  712: 	struct kevent chg[1];
  713: 	struct timespec timeout = { 0, 0 };
  714: 
  715: 	if (!t || !TASK_ROOT(t))
  716: 		return (void*) -1;
  717: 
  718: #ifdef __NetBSD__
  719: 	EV_SET(&chg[0], (uintptr_t) TASK_DATA(t), EVFILT_TIMER, EV_ADD | EV_CLEAR, 0, 
  720: 			t->task_val.ts.tv_sec * 1000 + t->task_val.ts.tv_nsec / 1000000, 
  721: 			(intptr_t) TASK_DATA(t));
  722: #else
  723: 	EV_SET(&chg[0], (uintptr_t) TASK_DATA(t), EVFILT_TIMER, EV_ADD | EV_CLEAR, 0, 
  724: 			t->task_val.ts.tv_sec * 1000 + t->task_val.ts.tv_nsec / 1000000, 
  725: 			(void*) TASK_DATA(t));
  726: #endif
  727: 	if (kevent(TASK_ROOT(t)->root_kq, chg, 1, NULL, 0, &timeout) == -1) {
  728: 		if (TASK_ROOT(t)->root_hooks.hook_exec.exception)
  729: 			TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL);
  730: 		else
  731: 			LOGERR;
  732: 		return (void*) -1;
  733: 	}
  734: 
  735: #endif
  736: 	return NULL;
  737: }
  738: 
  739: /*
  740:  * sched_hook_node() - Default NODE hook
  741:  *
  742:  * @task = current task
  743:  * @arg = if arg == 42 then waiting for all events
  744:  * return: <0 errors and 0 ok
  745:  */
  746: void *
  747: sched_hook_node(void *task, void *arg)
  748: {
  749: #if SUP_ENABLE == KQ_SUPPORT
  750: 	sched_task_t *t = task;
  751: 	struct kevent chg[1];
  752: 	struct timespec timeout = { 0, 0 };
  753: 	u_int addflags = (u_int) (uintptr_t) arg;
  754: 
  755: 	if (!t || !TASK_ROOT(t))
  756: 		return (void*) -1;
  757: 
  758: #ifdef __NetBSD__
  759: 	EV_SET(&chg[0], TASK_FD(t), EVFILT_VNODE, EV_ADD | EV_CLEAR, 
  760: 			NOTE_DELETE | NOTE_WRITE | NOTE_EXTEND | NOTE_ATTRIB | 
  761: 			NOTE_LINK | NOTE_RENAME | NOTE_REVOKE | addflags, 0, (intptr_t) TASK_FD(t));
  762: #else
  763: 	EV_SET(&chg[0], TASK_FD(t), EVFILT_VNODE, EV_ADD | EV_CLEAR, 
  764: 			NOTE_DELETE | NOTE_WRITE | NOTE_EXTEND | NOTE_ATTRIB | 
  765: 			NOTE_LINK | NOTE_RENAME | NOTE_REVOKE | addflags, 0, (void*) TASK_FD(t));
  766: #endif
  767: 	if (kevent(TASK_ROOT(t)->root_kq, chg, 1, NULL, 0, &timeout) == -1) {
  768: 		if (TASK_ROOT(t)->root_hooks.hook_exec.exception)
  769: 			TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL);
  770: 		else
  771: 			LOGERR;
  772: 		return (void*) -1;
  773: 	}
  774: 
  775: #endif
  776: 	return NULL;
  777: }
  778: 
  779: /*
  780:  * sched_hook_proc() - Default PROC hook
  781:  *
  782:  * @task = current task
  783:  * @arg = unused
  784:  * return: <0 errors and 0 ok
  785:  */
  786: void *
  787: sched_hook_proc(void *task, void *arg __unused)
  788: {
  789: #if SUP_ENABLE == KQ_SUPPORT
  790: 	sched_task_t *t = task;
  791: 	struct kevent chg[1];
  792: 	struct timespec timeout = { 0, 0 };
  793: 
  794: 	if (!t || !TASK_ROOT(t))
  795: 		return (void*) -1;
  796: 
  797: #ifdef __NetBSD__
  798: 	EV_SET(&chg[0], TASK_VAL(t), EVFILT_PROC, EV_ADD | EV_CLEAR, 
  799: 			NOTE_EXIT | NOTE_FORK | NOTE_EXEC | NOTE_TRACK, 0, (intptr_t) TASK_VAL(t));
  800: #else
  801: 	EV_SET(&chg[0], TASK_VAL(t), EVFILT_PROC, EV_ADD | EV_CLEAR, 
  802: 			NOTE_EXIT | NOTE_FORK | NOTE_EXEC | NOTE_TRACK, 0, (void*) TASK_VAL(t));
  803: #endif
  804: 	if (kevent(TASK_ROOT(t)->root_kq, chg, 1, NULL, 0, &timeout) == -1) {
  805: 		if (TASK_ROOT(t)->root_hooks.hook_exec.exception)
  806: 			TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL);
  807: 		else
  808: 			LOGERR;
  809: 		return (void*) -1;
  810: 	}
  811: 
  812: #endif
  813: 	return NULL;
  814: }
  815: 
  816: /*
  817:  * sched_hook_signal() - Default SIGNAL hook
  818:  *
  819:  * @task = current task
  820:  * @arg = unused
  821:  * return: <0 errors and 0 ok
  822:  */
  823: void *
  824: sched_hook_signal(void *task, void *arg __unused)
  825: {
  826: #if SUP_ENABLE == KQ_SUPPORT
  827: 	sched_task_t *t = task;
  828: 	struct kevent chg[1];
  829: 	struct timespec timeout = { 0, 0 };
  830: 
  831: 	if (!t || !TASK_ROOT(t))
  832: 		return (void*) -1;
  833: 
  834: 	/* ignore signal */
  835: 	signal(TASK_VAL(t), SIG_IGN);
  836: 
  837: #ifdef __NetBSD__
  838: 	EV_SET(&chg[0], TASK_VAL(t), EVFILT_SIGNAL, EV_ADD | EV_CLEAR, 0, 0, (intptr_t) TASK_VAL(t));
  839: #else
  840: 	EV_SET(&chg[0], TASK_VAL(t), EVFILT_SIGNAL, EV_ADD | EV_CLEAR, 0, 0, (void*) TASK_VAL(t));
  841: #endif
  842: 	if (kevent(TASK_ROOT(t)->root_kq, chg, 1, NULL, 0, &timeout) == -1) {
  843: 		if (TASK_ROOT(t)->root_hooks.hook_exec.exception)
  844: 			TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL);
  845: 		else
  846: 			LOGERR;
  847: 		return (void*) -1;
  848: 	}
  849: #endif
  850: 	return NULL;
  851: }
  852: 
  853: /*
  854:  * sched_hook_user() - Default USER hook
  855:  *
  856:  * @task = current task
  857:  * @arg = unused
  858:  * return: <0 errors and 0 ok
  859:  */
  860: #ifdef EVFILT_USER
  861: void *
  862: sched_hook_user(void *task, void *arg __unused)
  863: {
  864: #if SUP_ENABLE == KQ_SUPPORT
  865: 	sched_task_t *t = task;
  866: 	struct kevent chg[1];
  867: 	struct timespec timeout = { 0, 0 };
  868: 
  869: 	if (!t || !TASK_ROOT(t))
  870: 		return (void*) -1;
  871: 
  872: #ifdef __NetBSD__
  873: 	EV_SET(&chg[0], TASK_VAL(t), EVFILT_USER, EV_ADD | EV_CLEAR, TASK_DATLEN(t), 
  874: 			0, (intptr_t) TASK_VAL(t));
  875: #else
  876: 	EV_SET(&chg[0], TASK_VAL(t), EVFILT_USER, EV_ADD | EV_CLEAR, TASK_DATLEN(t), 
  877: 			0, (void*) TASK_VAL(t));
  878: #endif
  879: 	if (kevent(TASK_ROOT(t)->root_kq, chg, 1, NULL, 0, &timeout) == -1) {
  880: 		if (TASK_ROOT(t)->root_hooks.hook_exec.exception)
  881: 			TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL);
  882: 		else
  883: 			LOGERR;
  884: 		return (void*) -1;
  885: 	}
  886: 
  887: #endif
  888: 	return NULL;
  889: }
  890: #endif
  891: 
  892: #if SUP_ENABLE == KQ_SUPPORT
  893: static inline void 
  894: fetch_hook_kevent_proceed(int en, struct kevent *res, sched_root_task_t *r)
  895: {
  896: 	struct kevent evt[1];
  897: 	register int i, flg;
  898: 	sched_task_t *task, *tmp;
  899: 	struct timespec now = { 0, 0 };
  900: #ifdef AIO_SUPPORT
  901: 	int len, fd;
  902: 	struct aiocb *acb;
  903: #ifdef EVFILT_LIO
  904: 	int l;
  905: 	off_t off;
  906: 	struct aiocb **acbs;
  907: 	struct iovec *iv;
  908: #endif	/* EVFILT_LIO */
  909: #endif	/* AIO_SUPPORT */
  910: 
  911: 	for (i = 0; i < en; i++) {
  912: 		memcpy(evt, &res[i], sizeof evt);
  913: 		evt->flags = EV_DELETE;
  914: 		/* Put read/write task to ready queue */
  915: 		flg = 0;
  916: 		switch (res[i].filter) {
  917: 			case EVFILT_READ:
  918: 				TAILQ_FOREACH_SAFE(task, &r->root_read, task_node, tmp) {
  919: 					if (TASK_FD(task) == ((intptr_t) res[i].udata)) {
  920: 						if (!flg) {
  921: 							TASK_RET(task) = res[i].data;
  922: 							TASK_FLAG(task) = (u_long) res[i].fflags;
  923: 
  924: 							/* remove read handle */
  925: 							remove_task_from(task, &r->root_read);
  926: 
  927: 							if (r->root_hooks.hook_exec.exception && res[i].flags & EV_EOF) {
  928: 								if (r->root_hooks.hook_exec.exception(r, (void*) EV_EOF)) {
  929: 									task->task_type = taskUNUSE;
  930: 									insert_task_to(task, &r->root_unuse);
  931: 								} else {
  932: 									task->task_type = taskREADY;
  933: 									insert_task_to(task, &r->root_ready);
  934: 								}
  935: 							} else {
  936: 								task->task_type = taskREADY;
  937: 								insert_task_to(task, &r->root_ready);
  938: 							}
  939: 						}
  940: 						flg++;
  941: 					}
  942: 				}
  943: 				break;
  944: 			case EVFILT_WRITE:
  945: 				TAILQ_FOREACH_SAFE(task, &r->root_write, task_node, tmp) {
  946: 					if (TASK_FD(task) == ((intptr_t) res[i].udata)) {
  947: 						if (!flg) {
  948: 							TASK_RET(task) = res[i].data;
  949: 							TASK_FLAG(task) = (u_long) res[i].fflags;
  950: 
  951: 							/* remove write handle */
  952: 							remove_task_from(task, &r->root_write);
  953: 
  954: 							if (r->root_hooks.hook_exec.exception && res[i].flags & EV_EOF) {
  955: 								if (r->root_hooks.hook_exec.exception(r, (void*) EV_EOF)) {
  956: 									task->task_type = taskUNUSE;
  957: 									insert_task_to(task, &r->root_unuse);
  958: 								} else {
  959: 									task->task_type = taskREADY;
  960: 									insert_task_to(task, &r->root_ready);
  961: 								}
  962: 							} else {
  963: 								task->task_type = taskREADY;
  964: 								insert_task_to(task, &r->root_ready);
  965: 							}
  966: 						}
  967: 						flg++;
  968: 					}
  969: 				}
  970: 				break;
  971: 			case EVFILT_TIMER:
  972: 				TAILQ_FOREACH_SAFE(task, &r->root_alarm, task_node, tmp) {
  973: 					if ((uintptr_t) TASK_DATA(task) == ((uintptr_t) res[i].udata)) {
  974: 						if (!flg) {
  975: 							TASK_RET(task) = res[i].data;
  976: 							TASK_FLAG(task) = (u_long) res[i].fflags;
  977: 
  978: 							/* remove alarm handle */
  979: 							transit_task2ready(task, &r->root_alarm);
  980: 						}
  981: 						flg++;
  982: 					}
  983: 				}
  984: 				break;
  985: 			case EVFILT_VNODE:
  986: 				TAILQ_FOREACH_SAFE(task, &r->root_node, task_node, tmp) {
  987: 					if (TASK_FD(task) == ((intptr_t) res[i].udata)) {
  988: 						if (!flg) {
  989: 							TASK_RET(task) = res[i].data;
  990: 							TASK_FLAG(task) = (u_long) res[i].fflags;
  991: 
  992: 							/* remove node handle */
  993: 							transit_task2ready(task, &r->root_node);
  994: 						}
  995: 						flg++;
  996: 					}
  997: 				}
  998: 				break;
  999: 			case EVFILT_PROC:
 1000: 				TAILQ_FOREACH_SAFE(task, &r->root_proc, task_node, tmp) {
 1001: 					if (TASK_VAL(task) == ((uintptr_t) res[i].udata)) {
 1002: 						if (!flg) {
 1003: 							TASK_RET(task) = res[i].data;
 1004: 							TASK_FLAG(task) = (u_long) res[i].fflags;
 1005: 
 1006: 							/* remove proc handle */
 1007: 							transit_task2ready(task, &r->root_proc);
 1008: 						}
 1009: 						flg++;
 1010: 					}
 1011: 				}
 1012: 				break;
 1013: 			case EVFILT_SIGNAL:
 1014: 				TAILQ_FOREACH_SAFE(task, &r->root_signal, task_node, tmp) {
 1015: 					if (TASK_VAL(task) == ((uintptr_t) res[i].udata)) {
 1016: 						if (!flg) {
 1017: 							TASK_RET(task) = res[i].data;
 1018: 							TASK_FLAG(task) = (u_long) res[i].fflags;
 1019: 
 1020: 							/* remove signal handle */
 1021: 							transit_task2ready(task, &r->root_signal);
 1022: 						}
 1023: 						flg++;
 1024: 					}
 1025: 				}
 1026: 				break;
 1027: #ifdef AIO_SUPPORT
 1028: 			case EVFILT_AIO:
 1029: 				TAILQ_FOREACH_SAFE(task, &r->root_aio, task_node, tmp) {
 1030: 					acb = (struct aiocb*) TASK_VAL(task);
 1031: 					if (acb == ((struct aiocb*) res[i].udata)) {
 1032: 						if (!flg) {
 1033: 							TASK_RET(task) = res[i].data;
 1034: 							TASK_FLAG(task) = (u_long) res[i].fflags;
 1035: 
 1036: 							/* remove user handle */
 1037: 							transit_task2ready(task, &r->root_aio);
 1038: 
 1039: 							fd = acb->aio_fildes;
 1040: 							if ((len = aio_return(acb)) != -1) {
 1041: 								if (lseek(fd, acb->aio_offset + len, SEEK_CUR) == -1)
 1042: 									LOGERR;
 1043: 							} else
 1044: 								LOGERR;
 1045: 							e_free(acb);
 1046: 							TASK_DATLEN(task) = (u_long) len;
 1047: 							TASK_FD(task) = fd;
 1048: 						}
 1049: 						flg++;
 1050: 					}
 1051: 				}
 1052: 				break;
 1053: #ifdef EVFILT_LIO
 1054: 			case EVFILT_LIO:
 1055: 				TAILQ_FOREACH_SAFE(task, &r->root_lio, task_node, tmp) {
 1056: 					acbs = (struct aiocb**) TASK_VAL(task);
 1057: 					if (acbs == ((struct aiocb**) res[i].udata)) {
 1058: 						if (!flg) {
 1059: 							TASK_RET(task) = res[i].data;
 1060: 							TASK_FLAG(task) = (u_long) res[i].fflags;
 1061: 
 1062: 							/* remove user handle */
 1063: 							transit_task2ready(task, &r->root_lio);
 1064: 
 1065: 							iv = (struct iovec*) TASK_DATA(task);
 1066: 							fd = acbs[0]->aio_fildes;
 1067: 							off = acbs[0]->aio_offset;
 1068: 							for (len = 0; i < TASK_DATLEN(task); len += l, i++) {
 1069: 								if ((iv[i].iov_len = aio_return(acbs[i])) == -1)
 1070: 									l = 0;
 1071: 								else
 1072: 									l = iv[i].iov_len;
 1073: 								e_free(acbs[i]);
 1074: 							}
 1075: 							e_free(acbs);
 1076: 							TASK_DATLEN(task) = (u_long) len;
 1077: 							TASK_FD(task) = fd;
 1078: 
 1079: 							if (lseek(fd, off + len, SEEK_CUR) == -1)
 1080: 								LOGERR;
 1081: 						}
 1082: 						flg++;
 1083: 					}
 1084: 				}
 1085: 				break;
 1086: #endif	/* EVFILT_LIO */
 1087: #endif	/* AIO_SUPPORT */
 1088: #ifdef EVFILT_USER
 1089: 			case EVFILT_USER:
 1090: 				TAILQ_FOREACH_SAFE(task, &r->root_user, task_node, tmp) {
 1091: 					if (TASK_VAL(task) == ((uintptr_t) res[i].udata)) {
 1092: 						if (!flg) {
 1093: 							TASK_RET(task) = res[i].data;
 1094: 							TASK_FLAG(task) = (u_long) res[i].fflags;
 1095: 
 1096: 							/* remove user handle */
 1097: 							transit_task2ready(task, &r->root_user);
 1098: 						}
 1099: 						flg++;
 1100: 					}
 1101: 				}
 1102: 				break;
 1103: #endif	/* EVFILT_USER */
 1104: 		}
 1105: 
 1106: 		if (flg > 1)
 1107: 			evt->flags &= ~EV_DELETE;
 1108: 
 1109: 		if (kevent(r->root_kq, evt, 1, NULL, 0, &now) == -1) {
 1110: 			if (r->root_hooks.hook_exec.exception)
 1111: 				r->root_hooks.hook_exec.exception(r, NULL);
 1112: 			else
 1113: 				LOGERR;
 1114: 		}
 1115: 	}
 1116: }
 1117: #endif
 1118: 
 1119: #if SUP_ENABLE == EP_SUPPORT
 1120: static inline void
 1121: fetch_hook_epoll_proceed(int en, struct epoll_event *res, sched_root_task_t *r)
 1122: {
 1123: 	register int i, rflg, wflg;
 1124: 	int ops = EPOLL_CTL_DEL;
 1125: 	sched_task_t *t, *tmp, *task;
 1126: 	struct epoll_event evt[1];
 1127: 
 1128: 	for (i = 0; i < en; i++) {
 1129: 		memcpy(evt, &res[i], sizeof evt);
 1130: 		evt->events ^= evt->events;
 1131: 		rflg = wflg = 0;
 1132: 
 1133: 		if (res[i].events & (EPOLLIN | EPOLLPRI)) {
 1134: 			task = NULL;
 1135: 			TAILQ_FOREACH_SAFE(t, &r->root_read, task_node, tmp) {
 1136: 				if (TASK_FD(t) == evt->data.fd) {
 1137: 					if (!task)
 1138: 						task = t;
 1139: 					rflg++;
 1140: 				}
 1141: 			}
 1142: 
 1143: 			if (task) {
 1144: 				TASK_FLAG(task) = ioctl(TASK_FD(task), FIONREAD, &TASK_RET(task));
 1145: 				/* remove read handle */
 1146: 				remove_task_from(task, &r->root_read);
 1147: 
 1148: 				if (r->root_hooks.hook_exec.exception && evt->events & (EPOLLRDHUP | EPOLLERR | EPOLLHUP)) {
 1149:  					if (r->root_hooks.hook_exec.exception(r, (void*) (intptr_t)
 1150: 								(evt->events & EPOLLERR ? EV_ERROR | EV_EOF : EV_EOF))) {
 1151: 						task->task_type = taskUNUSE;
 1152: 						insert_task_to(task, &r->root_unuse);
 1153: 					} else {
 1154: 						task->task_type = taskREADY;
 1155: 						insert_task_to(task, &r->root_ready);
 1156: 					}
 1157: 				} else {
 1158: 					task->task_type = taskREADY;
 1159: 					insert_task_to(task, &r->root_ready);
 1160: 				}
 1161: 
 1162: 				if (!(res[i].events & EPOLLOUT) && FD_ISSET(evt->data.fd, &r->root_fds[1])) {
 1163: 					evt->events |= EPOLLOUT;
 1164: 					wflg = 42;
 1165: 				}
 1166: 				if (rflg > 1) {
 1167: 					if (FD_ISSET(evt->data.fd, &r->root_fds[0]))
 1168: 						evt->events |= EPOLLIN;
 1169: 					if (FD_ISSET(evt->data.fd, &r->root_fds[2]))
 1170: 						evt->events |= EPOLLPRI;
 1171: 				} else {
 1172: 					FD_CLR(evt->data.fd, &r->root_fds[0]);
 1173: 					FD_CLR(evt->data.fd, &r->root_fds[2]);
 1174: 				}
 1175: 			}
 1176: 		}
 1177: 		if (res[i].events & EPOLLOUT) {
 1178: 			task = NULL;
 1179: 			TAILQ_FOREACH_SAFE(t, &r->root_write, task_node, tmp) {
 1180: 				if (TASK_FD(t) == evt->data.fd) {
 1181: 					if (!task)
 1182: 						task = t;
 1183: 					wflg++;
 1184: 				}
 1185: 			}
 1186: 
 1187: 			if (task) {
 1188: 				TASK_FLAG(task) = ioctl(TASK_FD(task), FIONWRITE, &TASK_RET(task));
 1189: 				/* remove write handle */
 1190: 				remove_task_from(task, &r->root_write);
 1191: 
 1192: 				if (r->root_hooks.hook_exec.exception && evt->events & (EPOLLERR | EPOLLHUP)) {
 1193:  					if (r->root_hooks.hook_exec.exception(r, (void*) (intptr_t)
 1194: 								(evt->events & EPOLLERR ? EV_ERROR | EV_EOF : EV_EOF))) {
 1195: 						task->task_type = taskUNUSE;
 1196: 						insert_task_to(task, &r->root_unuse);
 1197: 					} else {
 1198: 						task->task_type = taskREADY;
 1199: 						insert_task_to(task, &r->root_ready);
 1200: 					}
 1201: 				} else {
 1202: 					task->task_type = taskREADY;
 1203: 					insert_task_to(task, &r->root_ready);
 1204: 				}
 1205: 
 1206: 				if (!(res[i].events & EPOLLIN) && FD_ISSET(evt->data.fd, &r->root_fds[0])) {
 1207: 					evt->events |= EPOLLIN;
 1208: 					rflg = 42;
 1209: 				}
 1210: 				if (!(res[i].events & EPOLLPRI) && FD_ISSET(evt->data.fd, &r->root_fds[2])) {
 1211: 					evt->events |= EPOLLPRI;
 1212: 					rflg = 42;
 1213: 				}
 1214: 				if (wflg > 1)
 1215: 					evt->events |= EPOLLOUT;
 1216: 				else
 1217: 					FD_CLR(evt->data.fd, &r->root_fds[1]);
 1218: 			}
 1219: 		}
 1220: 
 1221: 		ops = EPOLL_CTL_DEL;
 1222: 		if (rflg > 1 || wflg > 1)
 1223: 			ops = EPOLL_CTL_MOD;
 1224: 
 1225: 		if (epoll_ctl(r->root_kq, ops, evt->data.fd, evt) == -1) {
 1226: 			if (r->root_hooks.hook_exec.exception) {
 1227: 				r->root_hooks.hook_exec.exception(r, NULL);
 1228: 			} else
 1229: 				LOGERR;
 1230: 		}
 1231: 	}
 1232: }
 1233: #endif
 1234: 
 1235: #if SUP_ENABLE == NO_SUPPORT
 1236: static inline void 
 1237: fetch_hook_select_proceed(int en, fd_set rfd, fd_set wfd, fd_set xfd, sched_root_task_t *r)
 1238: {
 1239: 	register int i, rflg, wflg;
 1240: 	sched_task_t *t, *tmp, *task;
 1241: 
 1242: 	/* skip select check if return value from select is zero */
 1243: 	if (!en)
 1244: 		return;
 1245: 
 1246: 	for (i = 0; i < r->root_kq; i++) {
 1247: 		if (!FD_ISSET(i, &r->root_fds[0]) && 
 1248: 				!FD_ISSET(i, &r->root_fds[1]) && 
 1249: 				!FD_ISSET(i, &r->root_fds[2]))
 1250: 			continue;
 1251: 
 1252: 		rflg = wflg = 0;
 1253: 
 1254: 		if (FD_ISSET(i, &rfd) || FD_ISSET(i, &xfd)) {
 1255: 			task = NULL;
 1256: 			TAILQ_FOREACH_SAFE(t, &r->root_read, task_node, tmp) {
 1257: 				if (TASK_FD(t) == i) {
 1258: 					if (!task)
 1259: 						task = t;
 1260: 					rflg++;
 1261: 				}
 1262: 			}
 1263: 
 1264: 			if (task) {
 1265: 				TASK_FLAG(task) = ioctl(TASK_FD(task), FIONREAD, &TASK_RET(task));
 1266: 
 1267: 				/* remove read handle */
 1268: 				remove_task_from(task, &r->root_read);
 1269: 
 1270: 				if (r->root_hooks.hook_exec.exception) {
 1271:  					if (r->root_hooks.hook_exec.exception(r, NULL)) {
 1272: 						task->task_type = taskUNUSE;
 1273: 						insert_task_to(task, &r->root_unuse);
 1274: 					} else {
 1275: 						task->task_type = taskREADY;
 1276: 						insert_task_to(task, &r->root_ready);
 1277: 					}
 1278: 				} else {
 1279: 					task->task_type = taskREADY;
 1280: 					insert_task_to(task, &r->root_ready);
 1281: 				}
 1282: 
 1283: 				/* remove resouce */
 1284: 				if (rflg == 1) {
 1285: 					FD_CLR(i, &r->root_fds[0]);
 1286: 					FD_CLR(i, &r->root_fds[2]);
 1287: 				}
 1288: 			}
 1289: 		}
 1290: 		if (FD_ISSET(i, &wfd)) {
 1291: 			task = NULL;
 1292: 			TAILQ_FOREACH_SAFE(t, &r->root_write, task_node, tmp) {
 1293: 				if (TASK_FD(t) == i) {
 1294: 					if (!task)
 1295: 						task = t;
 1296: 					wflg++;
 1297: 				}
 1298: 			}
 1299: 
 1300: 			if (task) {
 1301: 				TASK_FLAG(task) = ioctl(TASK_FD(task), FIONWRITE, &TASK_RET(task));
 1302: 
 1303: 				/* remove write handle */
 1304: 				remove_task_from(task, &r->root_write);
 1305: 
 1306: 				if (r->root_hooks.hook_exec.exception) {
 1307:  					if (r->root_hooks.hook_exec.exception(r, NULL)) {
 1308: 						task->task_type = taskUNUSE;
 1309: 						insert_task_to(task, &r->root_unuse);
 1310: 					} else {
 1311: 						task->task_type = taskREADY;
 1312: 						insert_task_to(task, &r->root_ready);
 1313: 					}
 1314: 				} else {
 1315: 					task->task_type = taskREADY;
 1316: 					insert_task_to(task, &r->root_ready);
 1317: 				}
 1318: 
 1319: 				/* remove resouce */
 1320: 				if (wflg == 1)
 1321: 					FD_CLR(i, &r->root_fds[1]);
 1322: 			}
 1323: 		}
 1324: 	}
 1325: 
 1326: 	/* optimize select */
 1327: 	for (i = r->root_kq - 1; i >= 0; i--)
 1328: 		if (FD_ISSET(i, &r->root_fds[0]) || 
 1329: 				FD_ISSET(i, &r->root_fds[1]) || 
 1330: 				FD_ISSET(i, &r->root_fds[2]))
 1331: 			break;
 1332: 	r->root_kq = i + 1;
 1333: }
 1334: #endif
 1335: 
 1336: /*
 1337:  * sched_hook_fetch() - Default FETCH hook
 1338:  *
 1339:  * @root = root task
 1340:  * @arg = unused
 1341:  * return: NULL error or !=NULL fetched task
 1342:  */
 1343: void *
 1344: sched_hook_fetch(void *root, void *arg __unused)
 1345: {
 1346: 	sched_root_task_t *r = root;
 1347: 	sched_task_t *task, *tmp;
 1348: 	struct timespec now, m, mtmp;
 1349: #if SUP_ENABLE == KQ_SUPPORT
 1350: 	struct kevent res[KQ_EVENTS];
 1351: 	struct timespec *timeout;
 1352: #elif SUP_ENABLE == EP_SUPPORT
 1353: 	struct epoll_event res[KQ_EVENTS];
 1354: 	u_long timeout = 0;
 1355: #else
 1356: 	struct timeval *timeout, tv;
 1357: 	fd_set rfd, wfd, xfd;
 1358: #endif
 1359: 	int en;
 1360: 
 1361: 	if (!r)
 1362: 		return NULL;
 1363: 
 1364: 	/* get new task by queue priority */
 1365: 	while ((task = TAILQ_FIRST(&r->root_event))) {
 1366: 		transit_task2unuse(task, &r->root_event);
 1367: 		return task;
 1368: 	}
 1369: 	while ((task = TAILQ_FIRST(&r->root_ready))) {
 1370: 		transit_task2unuse(task, &r->root_ready);
 1371: 		return task;
 1372: 	}
 1373: 
 1374: 	/* if present member of task, set NOWAIT */
 1375: 	if (!TAILQ_FIRST(&r->root_task)) {
 1376: 		/* timer tasks */
 1377: #ifdef TIMER_WITHOUT_SORT
 1378: 		clock_gettime(CLOCK_MONOTONIC, &now);
 1379: 
 1380: 		sched_timespecclear(&r->root_wait);
 1381: 		TAILQ_FOREACH(task, &r->root_timer, task_node) {
 1382: 			if (!sched_timespecisset(&r->root_wait))
 1383: 				r->root_wait = TASK_TS(task);
 1384: 			else if (sched_timespeccmp(&TASK_TS(task), &r->root_wait, -) < 0)
 1385: 				r->root_wait = TASK_TS(task);
 1386: 		}
 1387: 
 1388: 		if (TAILQ_FIRST(&r->root_timer)) {
 1389: 			m = r->root_wait;
 1390: 			sched_timespecsub(&m, &now, &mtmp);
 1391: 			r->root_wait = mtmp;
 1392: 		} else {
 1393: 			/* set wait INFTIM */
 1394: 			sched_timespecinf(&r->root_wait);
 1395: 		}
 1396: #else	/* ! TIMER_WITHOUT_SORT */
 1397: 		if ((task = TAILQ_FIRST(&r->root_timer))) {
 1398: 			clock_gettime(CLOCK_MONOTONIC, &now);
 1399: 
 1400: 			m = TASK_TS(task);
 1401: 			sched_timespecsub(&m, &now, &mtmp);
 1402: 			r->root_wait = mtmp;
 1403: 		} else {
 1404: 			/* set wait INFTIM */
 1405: 			sched_timespecinf(&r->root_wait);
 1406: 		}
 1407: #endif	/* TIMER_WITHOUT_SORT */
 1408: 	} else	/* no waiting for event, because we have ready task */
 1409: 		sched_timespecclear(&r->root_wait);
 1410: 
 1411: 	if (r->root_wait.tv_sec != -1 && r->root_wait.tv_nsec != -1) {
 1412: #if SUP_ENABLE == KQ_SUPPORT
 1413: 		timeout = &r->root_wait;
 1414: #elif SUP_ENABLE == EP_SUPPORT
 1415: 		timeout = r->root_wait.tv_sec * 1000 + r->root_wait.tv_nsec / 1000000;
 1416: #else
 1417: 		sched_timespec2val(&r->root_wait, &tv);
 1418: 		timeout = &tv;
 1419: #endif	/* KQ_SUPPORT */
 1420: 	} else if (sched_timespecisinf(&r->root_poll))
 1421: #if SUP_ENABLE == EP_SUPPORT
 1422: 		timeout = -1;
 1423: #else
 1424: 		timeout = NULL;
 1425: #endif
 1426: 	else {
 1427: #if SUP_ENABLE == KQ_SUPPORT
 1428: 		timeout = &r->root_poll;
 1429: #elif SUP_ENABLE == EP_SUPPORT
 1430: 		timeout = r->root_poll.tv_sec * 1000 + r->root_poll.tv_nsec / 1000000;
 1431: #else
 1432: 		sched_timespec2val(&r->root_poll, &tv);
 1433: 		timeout = &tv;
 1434: #endif	/* KQ_SUPPORT */
 1435: 	}
 1436: 
 1437: #if SUP_ENABLE == KQ_SUPPORT
 1438: 	if ((en = kevent(r->root_kq, NULL, 0, res, KQ_EVENTS, timeout)) == -1) {
 1439: #elif SUP_ENABLE == EP_SUPPORT
 1440: 	if ((en = epoll_wait(r->root_kq, res, KQ_EVENTS, timeout)) == -1) {
 1441: #else
 1442: 	xfd = r->root_fds[2];
 1443: 	rfd = r->root_fds[0];
 1444: 	wfd = r->root_fds[1];
 1445: 	if ((en = select(r->root_kq, &rfd, &wfd, &xfd, timeout)) == -1) {
 1446: #endif	/* KQ_SUPPORT */
 1447: 		if (r->root_hooks.hook_exec.exception) {
 1448: 			if (r->root_hooks.hook_exec.exception(r, NULL))
 1449: 				return NULL;
 1450: 		} else if (errno != EINTR)
 1451: 			LOGERR;
 1452: 		goto skip_event;
 1453: 	}
 1454: 
 1455: 	/* Go and catch the cat into pipes ... */
 1456: #if SUP_ENABLE == KQ_SUPPORT
 1457: 	/* kevent dispatcher */
 1458: 	fetch_hook_kevent_proceed(en, res, r);
 1459: #elif SUP_ENABLE == EP_SUPPORT
 1460: 	/* epoll dispatcher */
 1461: 	fetch_hook_epoll_proceed(en, res, r);
 1462: #else
 1463: 	/* select dispatcher */
 1464: 	fetch_hook_select_proceed(en, rfd, wfd, xfd, r);
 1465: #endif	/* KQ_SUPPORT */
 1466: 
 1467: skip_event:
 1468: 	/* timer update & put in ready queue */
 1469: 	clock_gettime(CLOCK_MONOTONIC, &now);
 1470: 
 1471: 	TAILQ_FOREACH_SAFE(task, &r->root_timer, task_node, tmp)
 1472: 		if (sched_timespeccmp(&now, &TASK_TS(task), -) >= 0)
 1473: 			transit_task2ready(task, &r->root_timer);
 1474: 
 1475: 	/* put regular task priority task to ready queue, 
 1476: 		if there is no ready task or reach max missing hit for regular task */
 1477: 	if ((task = TAILQ_FIRST(&r->root_task))) {
 1478: 		if (!TAILQ_FIRST(&r->root_ready) || r->root_miss >= TASK_VAL(task)) {
 1479: 			r->root_miss ^= r->root_miss;
 1480: 
 1481: 			transit_task2ready(task, &r->root_task);
 1482: 		} else
 1483: 			r->root_miss++;
 1484: 	} else
 1485: 		r->root_miss ^= r->root_miss;
 1486: 
 1487: 	/* OK, lets get ready task !!! */
 1488: 	task = TAILQ_FIRST(&r->root_ready);
 1489: 	if (task)
 1490: 		transit_task2unuse(task, &r->root_ready);
 1491: 	return task;
 1492: }
 1493: 
 1494: /*
 1495:  * sched_hook_exception() - Default EXCEPTION hook
 1496:  *
 1497:  * @root = root task
 1498:  * @arg = custom handling: if arg == EV_EOF or other value; default: arg == NULL log errno
 1499:  * return: <0 errors and 0 ok
 1500:  */
 1501: void *
 1502: sched_hook_exception(void *root, void *arg)
 1503: {
 1504: 	sched_root_task_t *r = root;
 1505: 
 1506: 	if (!r)
 1507: 		return NULL;
 1508: 
 1509: 	/* custom exception handling ... */
 1510: 	if (arg) {
 1511: 		if (arg == (void*) EV_EOF)
 1512: 			return NULL;
 1513: 		return (void*) -1;	/* raise scheduler error!!! */
 1514: 	}
 1515: 
 1516: 	/* if error hook exists */
 1517: 	if (r->root_hooks.hook_root.error)
 1518: 		return (r->root_hooks.hook_root.error(root, (void*) ((intptr_t) errno)));
 1519: 
 1520: 	/* default case! */
 1521: 	LOGERR;
 1522: 	return NULL;
 1523: }
 1524: 
 1525: /*
 1526:  * sched_hook_condition() - Default CONDITION hook
 1527:  *
 1528:  * @root = root task
 1529:  * @arg = killState from schedRun()
 1530:  * return: NULL kill scheduler loop or !=NULL ok
 1531:  */
 1532: void *
 1533: sched_hook_condition(void *root, void *arg)
 1534: {
 1535: 	sched_root_task_t *r = root;
 1536: 
 1537: 	if (!r)
 1538: 		return NULL;
 1539: 
 1540: 	return (void*) (*r->root_cond - *(intptr_t*) arg);
 1541: }
 1542: 
 1543: /*
 1544:  * sched_hook_rtc() - Default RTC hook
 1545:  *
 1546:  * @task = current task
 1547:  * @arg = unused
 1548:  * return: <0 errors and 0 ok
 1549:  */
 1550: void *
 1551: sched_hook_rtc(void *task, void *arg __unused)
 1552: {
 1553: #if defined(HAVE_LIBRT) && defined(HAVE_TIMER_CREATE) && \
 1554: 	defined(HAVE_TIMER_SETTIME) && defined(HAVE_TIMER_DELETE)
 1555: 	sched_task_t *sigt = NULL, *t = task;
 1556: 	struct itimerspec its;
 1557: 	struct sigevent evt;
 1558: 	timer_t tmr;
 1559: #if SUP_ENABLE != KQ_SUPPORT
 1560: 	struct sigaction sa;
 1561: #endif
 1562: 
 1563: 	if (!t || !TASK_ROOT(t))
 1564: 		return (void*) -1;
 1565: 
 1566: 	memset(&evt, 0, sizeof evt);
 1567: 	evt.sigev_notify = SIGEV_SIGNAL;
 1568: 	evt.sigev_signo = (intptr_t) TASK_DATA(t) + SIGRTMIN;
 1569: 	evt.sigev_value.sival_ptr = t;
 1570: 
 1571: 	if (timer_create(CLOCK_MONOTONIC, &evt, &tmr) == -1) {
 1572: 		if (TASK_ROOT(t)->root_hooks.hook_exec.exception)
 1573: 			TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL);
 1574: 		else
 1575: 			LOGERR;
 1576: 		return (void*) -1;
 1577: 	} else
 1578: 		TASK_FLAG(t) = (u_long) tmr;
 1579: 
 1580: #if SUP_ENABLE == KQ_SUPPORT
 1581: 	if (!(sigt = schedSignal(TASK_ROOT(t), _sched_rtcWrapper, TASK_ARG(t), evt.sigev_signo, 
 1582: 				t, (size_t) tmr))) {
 1583: 		if (TASK_ROOT(t)->root_hooks.hook_exec.exception)
 1584: 			TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL);
 1585: 		else
 1586: 			LOGERR;
 1587: 		timer_delete(tmr);
 1588: 		return (void*) -1;
 1589: 	} else
 1590: 		TASK_RET(t) = (uintptr_t) sigt;
 1591: #else
 1592: 	memset(&sa, 0, sizeof sa);
 1593: 	sigemptyset(&sa.sa_mask);
 1594: 	sa.sa_sigaction = _sched_rtcSigWrapper;
 1595: 	sa.sa_flags = SA_SIGINFO | SA_RESTART;
 1596: 
 1597: 	if (sigaction(evt.sigev_signo, &sa, NULL) == -1) {
 1598: 		if (TASK_ROOT(t)->root_hooks.hook_exec.exception)
 1599: 			TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL);
 1600: 		else
 1601: 			LOGERR;
 1602: 		timer_delete(tmr);
 1603: 		return (void*) -1;
 1604: 	}
 1605: #endif
 1606: 
 1607: 	memset(&its, 0, sizeof its);
 1608: 	its.it_value.tv_sec = t->task_val.ts.tv_sec;
 1609: 	its.it_value.tv_nsec = t->task_val.ts.tv_nsec;
 1610: 
 1611: 	if (timer_settime(tmr, TIMER_RELTIME, &its, NULL) == -1) {
 1612: 		if (TASK_ROOT(t)->root_hooks.hook_exec.exception)
 1613: 			TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL);
 1614: 		else
 1615: 			LOGERR;
 1616: 		schedCancel(sigt);
 1617: 		timer_delete(tmr);
 1618: 		return (void*) -1;
 1619: 	}
 1620: #endif	/* HAVE_TIMER_CREATE */
 1621: 	return NULL;
 1622: }

FreeBSD-CVSweb <freebsd-cvsweb@FreeBSD.org>