File:  [ELWIX - Embedded LightWeight unIX -] / libaitsched / src / hooks.c
Revision 1.37: download - view: text, annotated - select for diffs - revision graph
Wed Oct 19 01:45:08 2022 UTC (20 months, 1 week ago) by misho
Branches: MAIN
CVS tags: sched7_1, SCHED7_0, HEAD
version 7.0

    1: /*************************************************************************
    2: * (C) 2011 AITNET ltd - Sofia/Bulgaria - <misho@aitbg.com>
    3: *  by Michael Pounov <misho@openbsd-bg.org>
    4: *
    5: * $Author: misho $
    6: * $Id: hooks.c,v 1.37 2022/10/19 01:45:08 misho Exp $
    7: *
    8: **************************************************************************
    9: The ELWIX and AITNET software is distributed under the following
   10: terms:
   11: 
   12: All of the documentation and software included in the ELWIX and AITNET
   13: Releases is copyrighted by ELWIX - Sofia/Bulgaria <info@elwix.org>
   14: 
   15: Copyright 2004 - 2022
   16: 	by Michael Pounov <misho@elwix.org>.  All rights reserved.
   17: 
   18: Redistribution and use in source and binary forms, with or without
   19: modification, are permitted provided that the following conditions
   20: are met:
   21: 1. Redistributions of source code must retain the above copyright
   22:    notice, this list of conditions and the following disclaimer.
   23: 2. Redistributions in binary form must reproduce the above copyright
   24:    notice, this list of conditions and the following disclaimer in the
   25:    documentation and/or other materials provided with the distribution.
   26: 3. All advertising materials mentioning features or use of this software
   27:    must display the following acknowledgement:
   28: This product includes software developed by Michael Pounov <misho@elwix.org>
   29: ELWIX - Embedded LightWeight unIX and its contributors.
   30: 4. Neither the name of AITNET nor the names of its contributors
   31:    may be used to endorse or promote products derived from this software
   32:    without specific prior written permission.
   33: 
   34: THIS SOFTWARE IS PROVIDED BY AITNET AND CONTRIBUTORS ``AS IS'' AND
   35: ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   36: IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   37: ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
   38: FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   39: DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   40: OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   41: HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   42: LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   43: OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   44: SUCH DAMAGE.
   45: */
   46: #include "global.h"
   47: #include "hooks.h"
   48: 
   49: 
   50: static inline void
   51: transit_task2ready(sched_task_t * __restrict t, sched_queue_t * __restrict q)
   52: {
   53: 	remove_task_from(t, q);
   54: 
   55: 	t->task_type = taskREADY;
   56: 	insert_task_to(t, &(TASK_ROOT(t))->root_ready);
   57: }
   58: 
   59: #ifdef HAVE_LIBPTHREAD
   60: static void *
   61: _sched_threadWrapper(sched_task_t *t)
   62: {
   63: 	void *ret = NULL;
   64: 	sched_root_task_t *r;
   65: 
   66: 	if (!t || !TASK_ROOT(t))
   67: 		pthread_exit(ret);
   68: 	else
   69: 		r = (sched_root_task_t*) TASK_ROOT(t);
   70: 
   71: 	pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, NULL);
   72: 	/*
   73: 	pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, NULL);
   74: 	*/
   75: 
   76: 	/* notify parent, thread is ready for execution */
   77: 	pthread_testcancel();
   78: 
   79: 	ret = schedCall(t);
   80: 	r->root_ret = ret;
   81: 
   82: 	if (TASK_VAL(t)) {
   83: 		transit_task2unuse(t, &r->root_thread);
   84: 		TASK_VAL(t) = 0;
   85: 	}
   86: 
   87: 	pthread_exit(ret);
   88: }
   89: #endif
   90: 
   91: #if defined(HAVE_LIBRT) && defined(HAVE_TIMER_CREATE) && \
   92: 	defined(HAVE_TIMER_SETTIME) && defined(HAVE_TIMER_DELETE)
   93: #if SUP_ENABLE == KQ_SUPPORT
   94: static void *
   95: _sched_rtcWrapper(sched_task_t *t)
   96: {
   97: 	sched_task_t *task;
   98: 	void *ret;
   99: 
  100: 	if (!t || !TASK_ROOT(t) || !TASK_DATA(t))
  101: 		return NULL;
  102: 	else {
  103: 		task = (sched_task_t*) TASK_DATA(t);
  104: 		timer_delete((timer_t) TASK_DATLEN(t));
  105: 	}
  106: 
  107: 	ret = schedCall(task);
  108: 
  109: 	transit_task2unuse(task, &(TASK_ROOT(task))->root_rtc);
  110: 	return ret;
  111: }
  112: #else
  113: static void
  114: _sched_rtcSigWrapper(int sig, siginfo_t *si, void *uc)
  115: {
  116: 	sched_task_t *task;
  117: 
  118: 	if (si && si->si_value.sival_ptr) {
  119: 		task = (sched_task_t*) si->si_value.sival_ptr;
  120: 		timer_delete((timer_t) TASK_FLAG(task));
  121: 
  122: 		TASK_RET(task) = (intptr_t) schedCall(task);
  123: 
  124: 		transit_task2unuse(task, &(TASK_ROOT(task))->root_rtc);
  125: 	}
  126: }
  127: #endif
  128: #endif
  129: 
  130: /*
  131:  * sched_hook_init() - Default INIT hook
  132:  *
  133:  * @root = root task
  134:  * @arg = unused
  135:  * return: <0 errors and 0 ok
  136:  */
  137: void *
  138: sched_hook_init(void *root, void *arg __unused)
  139: {
  140: 	sched_root_task_t *r = root;
  141: 
  142: 	if (!r)
  143: 		return (void*) -1;
  144: 
  145: #if SUP_ENABLE == KQ_SUPPORT
  146: 	r->root_kq = kqueue();
  147: 	if (r->root_kq == -1) {
  148: 		LOGERR;
  149: 		return (void*) -1;
  150: 	}
  151: #elif SUP_ENABLE == EP_SUPPORT
  152: 	r->root_kq = epoll_create(KQ_EVENTS);
  153: 	if (r->root_kq == -1) {
  154: 		LOGERR;
  155: 		return (void*) -1;
  156: 	}
  157: #else
  158: 	r->root_kq ^= r->root_kq;
  159: 	FD_ZERO(&r->root_fds[0]);
  160: 	FD_ZERO(&r->root_fds[1]);
  161: #endif
  162: 
  163: 	return NULL;
  164: }
  165: 
  166: /*
  167:  * sched_hook_fini() - Default FINI hook
  168:  *
  169:  * @root = root task
  170:  * @arg = unused
  171:  * return: <0 errors and 0 ok
  172:  */
  173: void *
  174: sched_hook_fini(void *root, void *arg __unused)
  175: {
  176: 	sched_root_task_t *r = root;
  177: 
  178: 	if (!r)
  179: 		return (void*) -1;
  180: 
  181: #if SUP_ENABLE == KQ_SUPPORT || SUP_ENABLE == EP_SUPPORT
  182: 	if (r->root_kq > 2) {
  183: 		close(r->root_kq);
  184: 		r->root_kq = 0;
  185: 	}
  186: #else
  187: 	FD_ZERO(&r->root_fds[1]);
  188: 	FD_ZERO(&r->root_fds[0]);
  189: 	r->root_kq ^= r->root_kq;
  190: #endif
  191: 
  192: 	return NULL;
  193: }
  194: 
  195: /*
  196:  * sched_hook_cancel() - Default CANCEL hook
  197:  *
  198:  * @task = current task
  199:  * @arg = unused
  200:  * return: <0 errors and 0 ok
  201:  */
  202: void *
  203: sched_hook_cancel(void *task, void *arg __unused)
  204: {
  205: 	sched_task_t *t = task, *tmp, *tt;
  206: 	sched_root_task_t *r = NULL;
  207: 	int flg = 0;
  208: #if SUP_ENABLE == KQ_SUPPORT
  209: 	struct kevent chg[1];
  210: 	struct timespec timeout = { 0, 0 };
  211: #elif SUP_ENABLE == EP_SUPPORT
  212: 	struct epoll_event ee = { .events = 0, .data.fd = 0 };
  213: #else
  214: 	register int i;
  215: #endif
  216: #ifdef AIO_SUPPORT
  217: 	struct aiocb *acb;
  218: #ifdef EVFILT_LIO
  219: 	register int i = 0;
  220: 	struct aiocb **acbs;
  221: #endif	/* EVFILT_LIO */
  222: #endif	/* AIO_SUPPORT */
  223: 
  224: 	if (!t || !TASK_ROOT(t))
  225: 		return (void*) -1;
  226: 	else
  227: 		r = TASK_ROOT(t);
  228: 
  229: 	switch (TASK_TYPE(t)) {
  230: 		case taskREAD:
  231: 			/* check for multi subscribers */
  232: 			TAILQ_FOREACH_SAFE(tt, &r->root_read, task_node, tmp)
  233: 				if (TASK_FD(tt) == TASK_FD(t))
  234: 					flg++;
  235: #if SUP_ENABLE == KQ_SUPPORT
  236: #ifdef __NetBSD__
  237: 			EV_SET(&chg[0], TASK_FD(t), EVFILT_READ, flg < 2 ? EV_DELETE : 0, 
  238: 					0, 0, (intptr_t) TASK_FD(t));
  239: #else
  240: 			EV_SET(&chg[0], TASK_FD(t), EVFILT_READ, flg < 2 ? EV_DELETE : 0, 
  241: 					0, 0, (void*) TASK_FD(t));
  242: #endif
  243: #elif SUP_ENABLE == EP_SUPPORT
  244: 			ee.data.fd = TASK_FD(t);
  245: 			ee.events ^= ee.events;
  246: 			if (FD_ISSET(TASK_FD(t), &r->root_fds[1]))
  247: 				ee.events = EPOLLOUT;
  248: 
  249: 			if (flg < 2)
  250: 				FD_CLR(TASK_FD(t), &r->root_fds[0]);
  251: 			else
  252: 				ee.events |= EPOLLIN | EPOLLPRI;
  253: #else
  254: 			if (flg < 2) {
  255: 				FD_CLR(TASK_FD(t), &r->root_fds[0]);
  256: 
  257: 				/* optimize select */
  258: 				for (i = r->root_kq - 1; i >= 0; i--)
  259: 					if (FD_ISSET(i, &r->root_fds[0]) || FD_ISSET(i, &r->root_fds[1]))
  260: 						break;
  261: 				r->root_kq = i + 1;
  262: 			}
  263: #endif
  264: 			break;
  265: 		case taskWRITE:
  266: 			/* check for multi subscribers */
  267: 			TAILQ_FOREACH_SAFE(tt, &r->root_write, task_node, tmp)
  268: 				if (TASK_FD(tt) == TASK_FD(t))
  269: 					flg++;
  270: #if SUP_ENABLE == KQ_SUPPORT
  271: #ifdef __NetBSD__
  272: 			EV_SET(&chg[0], TASK_FD(t), EVFILT_WRITE, flg < 2 ? EV_DELETE : 0, 
  273: 					0, 0, (intptr_t) TASK_FD(t));
  274: #else
  275: 			EV_SET(&chg[0], TASK_FD(t), EVFILT_WRITE, flg < 2 ? EV_DELETE : 0, 
  276: 					0, 0, (void*) TASK_FD(t));
  277: #endif
  278: #elif SUP_ENABLE == EP_SUPPORT
  279: 			ee.data.fd = TASK_FD(t);
  280: 			ee.events ^= ee.events;
  281: 			if (FD_ISSET(TASK_FD(t), &r->root_fds[0]))
  282: 				ee.events = EPOLLIN | EPOLLPRI;
  283: 
  284: 			if (flg < 2)
  285: 				FD_CLR(TASK_FD(t), &r->root_fds[1]);
  286: 			else
  287: 				ee.events |= EPOLLOUT;
  288: #else
  289: 			if (flg < 2) {
  290: 				FD_CLR(TASK_FD(t), &r->root_fds[1]);
  291: 
  292: 				/* optimize select */
  293: 				for (i = r->root_kq - 1; i >= 0; i--)
  294: 					if (FD_ISSET(i, &r->root_fds[0]) || FD_ISSET(i, &r->root_fds[1]))
  295: 						break;
  296: 				r->root_kq = i + 1;
  297: 			}
  298: #endif
  299: 			break;
  300: 		case taskALARM:
  301: #if SUP_ENABLE == KQ_SUPPORT
  302: 			/* check for multi subscribers */
  303: 			TAILQ_FOREACH_SAFE(tt, &r->root_alarm, task_node, tmp)
  304: 				if (TASK_DATA(tt) == TASK_DATA(t))
  305: 					flg++;
  306: #ifdef __NetBSD__
  307: 			EV_SET(&chg[0], (uintptr_t) TASK_DATA(t), EVFILT_TIMER, flg < 2 ? EV_DELETE : 0, 
  308: 					0, 0, (intptr_t) TASK_DATA(t));
  309: #else
  310: 			EV_SET(&chg[0], (uintptr_t) TASK_DATA(t), EVFILT_TIMER, flg < 2 ? EV_DELETE : 0, 
  311: 					0, 0, (void*) TASK_DATA(t));
  312: #endif
  313: #endif
  314: 			break;
  315: 		case taskNODE:
  316: #if SUP_ENABLE == KQ_SUPPORT
  317: 			/* check for multi subscribers */
  318: 			TAILQ_FOREACH_SAFE(tt, &r->root_node, task_node, tmp)
  319: 				if (TASK_FD(tt) == TASK_FD(t))
  320: 					flg++;
  321: #ifdef __NetBSD__
  322: 			EV_SET(&chg[0], TASK_FD(t), EVFILT_VNODE, flg < 2 ? EV_DELETE : 0, 
  323: 					0, 0, (intptr_t) TASK_FD(t));
  324: #else
  325: 			EV_SET(&chg[0], TASK_FD(t), EVFILT_VNODE, flg < 2 ? EV_DELETE : 0, 
  326: 					0, 0, (void*) TASK_FD(t));
  327: #endif
  328: #endif
  329: 			break;
  330: 		case taskPROC:
  331: #if SUP_ENABLE == KQ_SUPPORT
  332: 			/* check for multi subscribers */
  333: 			TAILQ_FOREACH_SAFE(tt, &r->root_proc, task_node, tmp)
  334: 				if (TASK_VAL(tt) == TASK_VAL(t))
  335: 					flg++;
  336: #ifdef __NetBSD__
  337: 			EV_SET(&chg[0], TASK_VAL(t), EVFILT_PROC, flg < 2 ? EV_DELETE : 0, 
  338: 					0, 0, (intptr_t) TASK_VAL(t));
  339: #else
  340: 			EV_SET(&chg[0], TASK_VAL(t), EVFILT_PROC, flg < 2 ? EV_DELETE : 0, 
  341: 					0, 0, (void*) TASK_VAL(t));
  342: #endif
  343: #endif
  344: 			break;
  345: 		case taskSIGNAL:
  346: #if SUP_ENABLE == KQ_SUPPORT
  347: 			/* check for multi subscribers */
  348: 			TAILQ_FOREACH_SAFE(tt, &r->root_signal, task_node, tmp)
  349: 				if (TASK_VAL(tt) == TASK_VAL(t))
  350: 					flg++;
  351: #ifdef __NetBSD__
  352: 			EV_SET(&chg[0], TASK_VAL(t), EVFILT_SIGNAL, flg < 2 ? EV_DELETE : 0, 
  353: 					0, 0, (intptr_t) TASK_VAL(t));
  354: #else
  355: 			EV_SET(&chg[0], TASK_VAL(t), EVFILT_SIGNAL, flg < 2 ? EV_DELETE : 0, 
  356: 					0, 0, (void*) TASK_VAL(t));
  357: #endif
  358: 			/* restore signal */
  359: 			if (flg < 2)
  360: 				signal(TASK_VAL(t), SIG_DFL);
  361: #endif
  362: 			break;
  363: #ifdef AIO_SUPPORT
  364: 		case taskAIO:
  365: #if SUP_ENABLE == KQ_SUPPORT
  366: 			/* check for multi subscribers */
  367: 			TAILQ_FOREACH_SAFE(tt, &r->root_aio, task_node, tmp)
  368: 				if (TASK_VAL(tt) == TASK_VAL(t))
  369: 					flg++;
  370: #ifdef __NetBSD__
  371: 			EV_SET(&chg[0], TASK_VAL(t), EVFILT_AIO, flg < 2 ? EV_DELETE : 0, 
  372: 					0, 0, (intptr_t) TASK_VAL(t));
  373: #else
  374: 			EV_SET(&chg[0], TASK_VAL(t), EVFILT_AIO, flg < 2 ? EV_DELETE : 0, 
  375: 					0, 0, (void*) TASK_VAL(t));
  376: #endif
  377: 			acb = (struct aiocb*) TASK_VAL(t);
  378: 			if (acb) {
  379: 				if (aio_cancel(acb->aio_fildes, acb) == AIO_CANCELED)
  380: 					aio_return(acb);
  381: 				e_free(acb);
  382: 				TASK_VAL(t) = 0;
  383: 			}
  384: #endif
  385: 			break;
  386: #ifdef EVFILT_LIO
  387: 		case taskLIO:
  388: #if SUP_ENABLE == KQ_SUPPORT
  389: 			/* check for multi subscribers */
  390: 			TAILQ_FOREACH_SAFE(tt, &r->root_lio, task_node, tmp)
  391: 				if (TASK_VAL(tt) == TASK_VAL(t))
  392: 					flg++;
  393: #ifdef __NetBSD__
  394: 			EV_SET(&chg[0], TASK_VAL(t), EVFILT_LIO, flg < 2 ? EV_DELETE : 0, 
  395: 					0, 0, (intptr_t) TASK_VAL(t));
  396: #else
  397: 			EV_SET(&chg[0], TASK_VAL(t), EVFILT_LIO, flg < 2 ? EV_DELETE : 0, 
  398: 					0, 0, (void*) TASK_VAL(t));
  399: #endif
  400: 			acbs = (struct aiocb**) TASK_VAL(t);
  401: 			if (acbs) {
  402: 				for (i = 0; i < TASK_DATLEN(t); i++) {
  403: 					if (aio_cancel(acbs[i]->aio_fildes, acbs[i]) == AIO_CANCELED)
  404: 						aio_return(acbs[i]);
  405: 					e_free(acbs[i]);
  406: 				}
  407: 				e_free(acbs);
  408: 				TASK_VAL(t) = 0;
  409: 			}
  410: #endif
  411: 			break;
  412: #endif	/* EVFILT_LIO */
  413: #endif	/* AIO_SUPPORT */
  414: #ifdef EVFILT_USER
  415: 		case taskUSER:
  416: #if SUP_ENABLE == KQ_SUPPORT
  417: 			/* check for multi subscribers */
  418: 			TAILQ_FOREACH_SAFE(tt, &r->root_user, task_node, tmp)
  419: 				if (TASK_VAL(tt) == TASK_VAL(t))
  420: 					flg++;
  421: #ifdef __NetBSD__
  422: 			EV_SET(&chg[0], TASK_VAL(t), EVFILT_USER, flg < 2 ? EV_DELETE : 0, 
  423: 					0, 0, (intptr_t) TASK_VAL(t));
  424: #else
  425: 			EV_SET(&chg[0], TASK_VAL(t), EVFILT_USER, flg < 2 ? EV_DELETE : 0, 
  426: 					0, 0, (void*) TASK_VAL(t));
  427: #endif
  428: #endif
  429: 			break;
  430: #endif	/* EVFILT_USER */
  431: 		case taskTHREAD:
  432: #ifdef HAVE_LIBPTHREAD
  433: 			if (TASK_VAL(t)) {
  434: 				pthread_cancel((pthread_t) TASK_VAL(t));
  435: 				pthread_join((pthread_t) TASK_VAL(t), NULL);
  436: 				if (TASK_VAL(t)) {
  437: 					transit_task2unuse(t, &(TASK_ROOT(t))->root_thread);
  438: 					TASK_VAL(t) = 0;
  439: 				}
  440: 			}
  441: #endif
  442: 			return NULL;
  443: #if defined(HAVE_LIBRT) && defined(HAVE_TIMER_CREATE) && \
  444: 	defined(HAVE_TIMER_SETTIME) && defined(HAVE_TIMER_DELETE)
  445: 		case taskRTC:
  446: 			timer_delete((timer_t) TASK_FLAG(t));
  447: #if SUP_ENABLE == KQ_SUPPORT
  448: 			schedCancel((sched_task_t*) TASK_RET(t));
  449: #else
  450: 			/* check for multi subscribers */
  451: 			TAILQ_FOREACH_SAFE(tt, &r->root_rtc, task_node, tmp)
  452: 				if (TASK_DATA(tt) == TASK_DATA(t))
  453: 					flg++;
  454: 
  455: 			/* restore signal */
  456: 			if (flg < 2)
  457: 				signal((intptr_t) TASK_DATA(t) + SIGRTMIN, SIG_DFL);
  458: #endif
  459: 			return NULL;
  460: #endif	/* HAVE_TIMER_CREATE */
  461: 		default:
  462: 			return NULL;
  463: 	}
  464: 
  465: #if SUP_ENABLE == KQ_SUPPORT
  466: 	kevent(r->root_kq, chg, 1, NULL, 0, &timeout);
  467: #elif SUP_ENABLE == EP_SUPPORT
  468: 	if (TASK_TYPE(t) == taskREAD || TASK_TYPE(t) == taskWRITE) {
  469: 		epoll_ctl(r->root_kq, ee.events ? EPOLL_CTL_MOD : EPOLL_CTL_DEL, ee.data.fd, &ee);
  470: 	}
  471: #endif
  472: 	return NULL;
  473: }
  474: 
  475: #ifdef HAVE_LIBPTHREAD
  476: /*
  477:  * sched_hook_thread() - Default THREAD hook
  478:  *
  479:  * @task = current task
  480:  * @arg = pthread attributes
  481:  * return: <0 errors and 0 ok
  482:  */
  483: void *
  484: sched_hook_thread(void *task, void *arg)
  485: {
  486: 	sched_task_t *t = task;
  487: 	pthread_t tid;
  488: 	sigset_t s, o;
  489: 
  490: 	if (!t || !TASK_ROOT(t))
  491: 		return (void*) -1;
  492: 
  493: 	sigfillset(&s);
  494: 	pthread_sigmask(SIG_BLOCK, &s, &o);
  495: 	errno = pthread_create(&tid, (pthread_attr_t*) arg, 
  496: 			(void *(*)(void*)) _sched_threadWrapper, t);
  497: 	pthread_sigmask(SIG_SETMASK, &o, NULL);
  498: 
  499: 	if (errno) {
  500: 		LOGERR;
  501: 		return (void*) -1;
  502: 	} else
  503: 		TASK_VAL(t) = (u_long) tid;
  504: 
  505: 	if (!TASK_ISLOCKED(t))
  506: 		TASK_LOCK(t);
  507: 
  508: 	return NULL;
  509: }
  510: #endif
  511: 
  512: /*
  513:  * sched_hook_read() - Default READ hook
  514:  *
  515:  * @task = current task
  516:  * @arg = unused
  517:  * return: <0 errors and 0 ok
  518:  */
  519: void *
  520: sched_hook_read(void *task, void *arg __unused)
  521: {
  522: 	sched_task_t *t = task;
  523: 	sched_root_task_t *r = NULL;
  524: #if SUP_ENABLE == KQ_SUPPORT
  525: 	struct kevent chg[1];
  526: 	struct timespec timeout = { 0, 0 };
  527: #elif SUP_ENABLE == EP_SUPPORT
  528: 	struct epoll_event ee;
  529: 	int flg = 0;
  530: #endif
  531: 
  532: 	if (!t || !TASK_ROOT(t))
  533: 		return (void*) -1;
  534: 	else
  535: 		r = TASK_ROOT(t);
  536: 
  537: #if SUP_ENABLE == KQ_SUPPORT
  538: #ifdef __NetBSD__
  539: 	EV_SET(&chg[0], TASK_FD(t), EVFILT_READ, EV_ADD | EV_CLEAR, 0, 0, (intptr_t) TASK_FD(t));
  540: #else
  541: 	EV_SET(&chg[0], TASK_FD(t), EVFILT_READ, EV_ADD | EV_CLEAR, 0, 0, (void*) TASK_FD(t));
  542: #endif
  543: 	if (kevent(r->root_kq, chg, 1, NULL, 0, &timeout) == -1) {
  544: 		if (r->root_hooks.hook_exec.exception)
  545: 			r->root_hooks.hook_exec.exception(r, NULL);
  546: 		else
  547: 			LOGERR;
  548: 		return (void*) -1;
  549: 	}
  550: #elif SUP_ENABLE == EP_SUPPORT
  551: 	ee.data.fd = TASK_FD(t);
  552: 	ee.events = EPOLLIN | EPOLLPRI;
  553: 	if (FD_ISSET(TASK_FD(t), &r->root_fds[0]))
  554: 		flg |= 1;
  555: 	if (FD_ISSET(TASK_FD(t), &r->root_fds[1])) {
  556: 		flg |= 2;
  557: 		ee.events |= EPOLLOUT;
  558: 	}
  559: 
  560: 	if (epoll_ctl(r->root_kq, flg ? EPOLL_CTL_MOD : EPOLL_CTL_ADD, TASK_FD(t), &ee) == -1) {
  561: 		if (r->root_hooks.hook_exec.exception)
  562: 			r->root_hooks.hook_exec.exception(r, NULL);
  563: 		else
  564: 			LOGERR;
  565: 		return (void*) -1;
  566: 	} else
  567: 		FD_SET(TASK_FD(t), &r->root_fds[0]);
  568: #else
  569: 	FD_SET(TASK_FD(t), &r->root_fds[0]);
  570: 	if (TASK_FD(t) >= r->root_kq)
  571: 		r->root_kq = TASK_FD(t) + 1;
  572: #endif
  573: 
  574: 	return NULL;
  575: }
  576: 
  577: /*
  578:  * sched_hook_write() - Default WRITE hook
  579:  *
  580:  * @task = current task
  581:  * @arg = unused
  582:  * return: <0 errors and 0 ok
  583:  */
  584: void *
  585: sched_hook_write(void *task, void *arg __unused)
  586: {
  587: 	sched_task_t *t = task;
  588: 	sched_root_task_t *r = NULL;
  589: #if SUP_ENABLE == KQ_SUPPORT
  590: 	struct kevent chg[1];
  591: 	struct timespec timeout = { 0, 0 };
  592: #elif SUP_ENABLE == EP_SUPPORT
  593: 	struct epoll_event ee;
  594: 	int flg = 0;
  595: #endif
  596: 
  597: 	if (!t || !TASK_ROOT(t))
  598: 		return (void*) -1;
  599: 	else
  600: 		r = TASK_ROOT(t);
  601: 
  602: #if SUP_ENABLE == KQ_SUPPORT
  603: #ifdef __NetBSD__
  604: 	EV_SET(&chg[0], TASK_FD(t), EVFILT_WRITE, EV_ADD | EV_CLEAR, 0, 0, (intptr_t) TASK_FD(t));
  605: #else
  606: 	EV_SET(&chg[0], TASK_FD(t), EVFILT_WRITE, EV_ADD | EV_CLEAR, 0, 0, (void*) TASK_FD(t));
  607: #endif
  608: 	if (kevent(r->root_kq, chg, 1, NULL, 0, &timeout) == -1) {
  609: 		if (r->root_hooks.hook_exec.exception)
  610: 			r->root_hooks.hook_exec.exception(r, NULL);
  611: 		else
  612: 			LOGERR;
  613: 		return (void*) -1;
  614: 	}
  615: #elif SUP_ENABLE == EP_SUPPORT
  616: 	ee.data.fd = TASK_FD(t);
  617: 	ee.events = EPOLLOUT;
  618: 
  619: 	if (FD_ISSET(TASK_FD(t), &r->root_fds[0])) {
  620: 		flg |= 1;
  621: 		ee.events |= EPOLLIN | EPOLLPRI;
  622: 	}
  623: 	if (FD_ISSET(TASK_FD(t), &r->root_fds[1])) {
  624: 		flg |= 2;
  625: 	}
  626: 
  627: 	if (epoll_ctl(r->root_kq, flg ? EPOLL_CTL_MOD : EPOLL_CTL_ADD, TASK_FD(t), &ee) == -1) {
  628: 		if (r->root_hooks.hook_exec.exception)
  629: 			r->root_hooks.hook_exec.exception(r, NULL);
  630: 		else
  631: 			LOGERR;
  632: 		return (void*) -1;
  633: 	} else
  634: 		FD_SET(TASK_FD(t), &r->root_fds[1]);
  635: #else
  636: 	FD_SET(TASK_FD(t), &r->root_fds[1]);
  637: 	if (TASK_FD(t) >= r->root_kq)
  638: 		r->root_kq = TASK_FD(t) + 1;
  639: #endif
  640: 
  641: 	return NULL;
  642: }
  643: 
  644: /*
  645:  * sched_hook_alarm() - Default ALARM hook
  646:  *
  647:  * @task = current task
  648:  * @arg = unused
  649:  * return: <0 errors and 0 ok
  650:  */
  651: void *
  652: sched_hook_alarm(void *task, void *arg __unused)
  653: {
  654: #if SUP_ENABLE == KQ_SUPPORT
  655: 	sched_task_t *t = task;
  656: 	struct kevent chg[1];
  657: 	struct timespec timeout = { 0, 0 };
  658: 
  659: 	if (!t || !TASK_ROOT(t))
  660: 		return (void*) -1;
  661: 
  662: #ifdef __NetBSD__
  663: 	EV_SET(&chg[0], (uintptr_t) TASK_DATA(t), EVFILT_TIMER, EV_ADD | EV_CLEAR, 0, 
  664: 			t->task_val.ts.tv_sec * 1000 + t->task_val.ts.tv_nsec / 1000000, 
  665: 			(intptr_t) TASK_DATA(t));
  666: #else
  667: 	EV_SET(&chg[0], (uintptr_t) TASK_DATA(t), EVFILT_TIMER, EV_ADD | EV_CLEAR, 0, 
  668: 			t->task_val.ts.tv_sec * 1000 + t->task_val.ts.tv_nsec / 1000000, 
  669: 			(void*) TASK_DATA(t));
  670: #endif
  671: 	if (kevent(TASK_ROOT(t)->root_kq, chg, 1, NULL, 0, &timeout) == -1) {
  672: 		if (TASK_ROOT(t)->root_hooks.hook_exec.exception)
  673: 			TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL);
  674: 		else
  675: 			LOGERR;
  676: 		return (void*) -1;
  677: 	}
  678: 
  679: #endif
  680: 	return NULL;
  681: }
  682: 
  683: /*
  684:  * sched_hook_node() - Default NODE hook
  685:  *
  686:  * @task = current task
  687:  * @arg = if arg == 42 then waiting for all events
  688:  * return: <0 errors and 0 ok
  689:  */
  690: void *
  691: sched_hook_node(void *task, void *arg)
  692: {
  693: #if SUP_ENABLE == KQ_SUPPORT
  694: 	sched_task_t *t = task;
  695: 	struct kevent chg[1];
  696: 	struct timespec timeout = { 0, 0 };
  697: 	u_int addflags = (u_int) (uintptr_t) arg;
  698: 
  699: 	if (!t || !TASK_ROOT(t))
  700: 		return (void*) -1;
  701: 
  702: #ifdef __NetBSD__
  703: 	EV_SET(&chg[0], TASK_FD(t), EVFILT_VNODE, EV_ADD | EV_CLEAR, 
  704: 			NOTE_DELETE | NOTE_WRITE | NOTE_EXTEND | NOTE_ATTRIB | 
  705: 			NOTE_LINK | NOTE_RENAME | NOTE_REVOKE | addflags, 0, (intptr_t) TASK_FD(t));
  706: #else
  707: 	EV_SET(&chg[0], TASK_FD(t), EVFILT_VNODE, EV_ADD | EV_CLEAR, 
  708: 			NOTE_DELETE | NOTE_WRITE | NOTE_EXTEND | NOTE_ATTRIB | 
  709: 			NOTE_LINK | NOTE_RENAME | NOTE_REVOKE | addflags, 0, (void*) TASK_FD(t));
  710: #endif
  711: 	if (kevent(TASK_ROOT(t)->root_kq, chg, 1, NULL, 0, &timeout) == -1) {
  712: 		if (TASK_ROOT(t)->root_hooks.hook_exec.exception)
  713: 			TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL);
  714: 		else
  715: 			LOGERR;
  716: 		return (void*) -1;
  717: 	}
  718: 
  719: #endif
  720: 	return NULL;
  721: }
  722: 
  723: /*
  724:  * sched_hook_proc() - Default PROC hook
  725:  *
  726:  * @task = current task
  727:  * @arg = unused
  728:  * return: <0 errors and 0 ok
  729:  */
  730: void *
  731: sched_hook_proc(void *task, void *arg __unused)
  732: {
  733: #if SUP_ENABLE == KQ_SUPPORT
  734: 	sched_task_t *t = task;
  735: 	struct kevent chg[1];
  736: 	struct timespec timeout = { 0, 0 };
  737: 
  738: 	if (!t || !TASK_ROOT(t))
  739: 		return (void*) -1;
  740: 
  741: #ifdef __NetBSD__
  742: 	EV_SET(&chg[0], TASK_VAL(t), EVFILT_PROC, EV_ADD | EV_CLEAR, 
  743: 			NOTE_EXIT | NOTE_FORK | NOTE_EXEC | NOTE_TRACK, 0, (intptr_t) TASK_VAL(t));
  744: #else
  745: 	EV_SET(&chg[0], TASK_VAL(t), EVFILT_PROC, EV_ADD | EV_CLEAR, 
  746: 			NOTE_EXIT | NOTE_FORK | NOTE_EXEC | NOTE_TRACK, 0, (void*) TASK_VAL(t));
  747: #endif
  748: 	if (kevent(TASK_ROOT(t)->root_kq, chg, 1, NULL, 0, &timeout) == -1) {
  749: 		if (TASK_ROOT(t)->root_hooks.hook_exec.exception)
  750: 			TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL);
  751: 		else
  752: 			LOGERR;
  753: 		return (void*) -1;
  754: 	}
  755: 
  756: #endif
  757: 	return NULL;
  758: }
  759: 
  760: /*
  761:  * sched_hook_signal() - Default SIGNAL hook
  762:  *
  763:  * @task = current task
  764:  * @arg = unused
  765:  * return: <0 errors and 0 ok
  766:  */
  767: void *
  768: sched_hook_signal(void *task, void *arg __unused)
  769: {
  770: #if SUP_ENABLE == KQ_SUPPORT
  771: 	sched_task_t *t = task;
  772: 	struct kevent chg[1];
  773: 	struct timespec timeout = { 0, 0 };
  774: 
  775: 	if (!t || !TASK_ROOT(t))
  776: 		return (void*) -1;
  777: 
  778: 	/* ignore signal */
  779: 	signal(TASK_VAL(t), SIG_IGN);
  780: 
  781: #ifdef __NetBSD__
  782: 	EV_SET(&chg[0], TASK_VAL(t), EVFILT_SIGNAL, EV_ADD | EV_CLEAR, 0, 0, (intptr_t) TASK_VAL(t));
  783: #else
  784: 	EV_SET(&chg[0], TASK_VAL(t), EVFILT_SIGNAL, EV_ADD | EV_CLEAR, 0, 0, (void*) TASK_VAL(t));
  785: #endif
  786: 	if (kevent(TASK_ROOT(t)->root_kq, chg, 1, NULL, 0, &timeout) == -1) {
  787: 		if (TASK_ROOT(t)->root_hooks.hook_exec.exception)
  788: 			TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL);
  789: 		else
  790: 			LOGERR;
  791: 		return (void*) -1;
  792: 	}
  793: #endif
  794: 	return NULL;
  795: }
  796: 
  797: /*
  798:  * sched_hook_user() - Default USER hook
  799:  *
  800:  * @task = current task
  801:  * @arg = unused
  802:  * return: <0 errors and 0 ok
  803:  */
  804: #ifdef EVFILT_USER
  805: void *
  806: sched_hook_user(void *task, void *arg __unused)
  807: {
  808: #if SUP_ENABLE == KQ_SUPPORT
  809: 	sched_task_t *t = task;
  810: 	struct kevent chg[1];
  811: 	struct timespec timeout = { 0, 0 };
  812: 
  813: 	if (!t || !TASK_ROOT(t))
  814: 		return (void*) -1;
  815: 
  816: #ifdef __NetBSD__
  817: 	EV_SET(&chg[0], TASK_VAL(t), EVFILT_USER, EV_ADD | EV_CLEAR, TASK_DATLEN(t), 
  818: 			0, (intptr_t) TASK_VAL(t));
  819: #else
  820: 	EV_SET(&chg[0], TASK_VAL(t), EVFILT_USER, EV_ADD | EV_CLEAR, TASK_DATLEN(t), 
  821: 			0, (void*) TASK_VAL(t));
  822: #endif
  823: 	if (kevent(TASK_ROOT(t)->root_kq, chg, 1, NULL, 0, &timeout) == -1) {
  824: 		if (TASK_ROOT(t)->root_hooks.hook_exec.exception)
  825: 			TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL);
  826: 		else
  827: 			LOGERR;
  828: 		return (void*) -1;
  829: 	}
  830: 
  831: #endif
  832: 	return NULL;
  833: }
  834: #endif
  835: 
  836: #if SUP_ENABLE == KQ_SUPPORT
  837: static inline void 
  838: fetch_hook_kevent_proceed(int en, struct kevent *res, sched_root_task_t *r)
  839: {
  840: 	struct kevent evt[1];
  841: 	register int i, flg;
  842: 	sched_task_t *task, *tmp;
  843: 	struct timespec now = { 0, 0 };
  844: #ifdef AIO_SUPPORT
  845: 	int len, fd;
  846: 	struct aiocb *acb;
  847: #ifdef EVFILT_LIO
  848: 	int l;
  849: 	off_t off;
  850: 	struct aiocb **acbs;
  851: 	struct iovec *iv;
  852: #endif	/* EVFILT_LIO */
  853: #endif	/* AIO_SUPPORT */
  854: 
  855: 	for (i = 0; i < en; i++) {
  856: 		memcpy(evt, &res[i], sizeof evt);
  857: 		evt->flags = EV_DELETE;
  858: 		/* Put read/write task to ready queue */
  859: 		flg = 0;
  860: 		switch (res[i].filter) {
  861: 			case EVFILT_READ:
  862: 				TAILQ_FOREACH_SAFE(task, &r->root_read, task_node, tmp) {
  863: 					if (TASK_FD(task) == ((intptr_t) res[i].udata)) {
  864: 						if (!flg) {
  865: 							TASK_RET(task) = res[i].data;
  866: 							TASK_FLAG(task) = (u_long) res[i].fflags;
  867: 
  868: 							/* remove read handle */
  869: 							remove_task_from(task, &r->root_read);
  870: 
  871: 							if (r->root_hooks.hook_exec.exception && res[i].flags & EV_EOF) {
  872: 								if (r->root_hooks.hook_exec.exception(r, (void*) EV_EOF)) {
  873: 									task->task_type = taskUNUSE;
  874: 									insert_task_to(task, &r->root_unuse);
  875: 								} else {
  876: 									task->task_type = taskREADY;
  877: 									insert_task_to(task, &r->root_ready);
  878: 								}
  879: 							} else {
  880: 								task->task_type = taskREADY;
  881: 								insert_task_to(task, &r->root_ready);
  882: 							}
  883: 						}
  884: 						flg++;
  885: 					}
  886: 				}
  887: 				break;
  888: 			case EVFILT_WRITE:
  889: 				TAILQ_FOREACH_SAFE(task, &r->root_write, task_node, tmp) {
  890: 					if (TASK_FD(task) == ((intptr_t) res[i].udata)) {
  891: 						if (!flg) {
  892: 							TASK_RET(task) = res[i].data;
  893: 							TASK_FLAG(task) = (u_long) res[i].fflags;
  894: 
  895: 							/* remove write handle */
  896: 							remove_task_from(task, &r->root_write);
  897: 
  898: 							if (r->root_hooks.hook_exec.exception && res[i].flags & EV_EOF) {
  899: 								if (r->root_hooks.hook_exec.exception(r, (void*) EV_EOF)) {
  900: 									task->task_type = taskUNUSE;
  901: 									insert_task_to(task, &r->root_unuse);
  902: 								} else {
  903: 									task->task_type = taskREADY;
  904: 									insert_task_to(task, &r->root_ready);
  905: 								}
  906: 							} else {
  907: 								task->task_type = taskREADY;
  908: 								insert_task_to(task, &r->root_ready);
  909: 							}
  910: 						}
  911: 						flg++;
  912: 					}
  913: 				}
  914: 				break;
  915: 			case EVFILT_TIMER:
  916: 				TAILQ_FOREACH_SAFE(task, &r->root_alarm, task_node, tmp) {
  917: 					if ((uintptr_t) TASK_DATA(task) == ((uintptr_t) res[i].udata)) {
  918: 						if (!flg) {
  919: 							TASK_RET(task) = res[i].data;
  920: 							TASK_FLAG(task) = (u_long) res[i].fflags;
  921: 
  922: 							/* remove alarm handle */
  923: 							transit_task2ready(task, &r->root_alarm);
  924: 						}
  925: 						flg++;
  926: 					}
  927: 				}
  928: 				break;
  929: 			case EVFILT_VNODE:
  930: 				TAILQ_FOREACH_SAFE(task, &r->root_node, task_node, tmp) {
  931: 					if (TASK_FD(task) == ((intptr_t) res[i].udata)) {
  932: 						if (!flg) {
  933: 							TASK_RET(task) = res[i].data;
  934: 							TASK_FLAG(task) = (u_long) res[i].fflags;
  935: 
  936: 							/* remove node handle */
  937: 							transit_task2ready(task, &r->root_node);
  938: 						}
  939: 						flg++;
  940: 					}
  941: 				}
  942: 				break;
  943: 			case EVFILT_PROC:
  944: 				TAILQ_FOREACH_SAFE(task, &r->root_proc, task_node, tmp) {
  945: 					if (TASK_VAL(task) == ((uintptr_t) res[i].udata)) {
  946: 						if (!flg) {
  947: 							TASK_RET(task) = res[i].data;
  948: 							TASK_FLAG(task) = (u_long) res[i].fflags;
  949: 
  950: 							/* remove proc handle */
  951: 							transit_task2ready(task, &r->root_proc);
  952: 						}
  953: 						flg++;
  954: 					}
  955: 				}
  956: 				break;
  957: 			case EVFILT_SIGNAL:
  958: 				TAILQ_FOREACH_SAFE(task, &r->root_signal, task_node, tmp) {
  959: 					if (TASK_VAL(task) == ((uintptr_t) res[i].udata)) {
  960: 						if (!flg) {
  961: 							TASK_RET(task) = res[i].data;
  962: 							TASK_FLAG(task) = (u_long) res[i].fflags;
  963: 
  964: 							/* remove signal handle */
  965: 							transit_task2ready(task, &r->root_signal);
  966: 						}
  967: 						flg++;
  968: 					}
  969: 				}
  970: 				break;
  971: #ifdef AIO_SUPPORT
  972: 			case EVFILT_AIO:
  973: 				TAILQ_FOREACH_SAFE(task, &r->root_aio, task_node, tmp) {
  974: 					acb = (struct aiocb*) TASK_VAL(task);
  975: 					if (acb == ((struct aiocb*) res[i].udata)) {
  976: 						if (!flg) {
  977: 							TASK_RET(task) = res[i].data;
  978: 							TASK_FLAG(task) = (u_long) res[i].fflags;
  979: 
  980: 							/* remove user handle */
  981: 							transit_task2ready(task, &r->root_aio);
  982: 
  983: 							fd = acb->aio_fildes;
  984: 							if ((len = aio_return(acb)) != -1) {
  985: 								if (lseek(fd, acb->aio_offset + len, SEEK_CUR) == -1)
  986: 									LOGERR;
  987: 							} else
  988: 								LOGERR;
  989: 							e_free(acb);
  990: 							TASK_DATLEN(task) = (u_long) len;
  991: 							TASK_FD(task) = fd;
  992: 						}
  993: 						flg++;
  994: 					}
  995: 				}
  996: 				break;
  997: #ifdef EVFILT_LIO
  998: 			case EVFILT_LIO:
  999: 				TAILQ_FOREACH_SAFE(task, &r->root_lio, task_node, tmp) {
 1000: 					acbs = (struct aiocb**) TASK_VAL(task);
 1001: 					if (acbs == ((struct aiocb**) res[i].udata)) {
 1002: 						if (!flg) {
 1003: 							TASK_RET(task) = res[i].data;
 1004: 							TASK_FLAG(task) = (u_long) res[i].fflags;
 1005: 
 1006: 							/* remove user handle */
 1007: 							transit_task2ready(task, &r->root_lio);
 1008: 
 1009: 							iv = (struct iovec*) TASK_DATA(task);
 1010: 							fd = acbs[0]->aio_fildes;
 1011: 							off = acbs[0]->aio_offset;
 1012: 							for (len = 0; i < TASK_DATLEN(task); len += l, i++) {
 1013: 								if ((iv[i].iov_len = aio_return(acbs[i])) == -1)
 1014: 									l = 0;
 1015: 								else
 1016: 									l = iv[i].iov_len;
 1017: 								e_free(acbs[i]);
 1018: 							}
 1019: 							e_free(acbs);
 1020: 							TASK_DATLEN(task) = (u_long) len;
 1021: 							TASK_FD(task) = fd;
 1022: 
 1023: 							if (lseek(fd, off + len, SEEK_CUR) == -1)
 1024: 								LOGERR;
 1025: 						}
 1026: 						flg++;
 1027: 					}
 1028: 				}
 1029: 				break;
 1030: #endif	/* EVFILT_LIO */
 1031: #endif	/* AIO_SUPPORT */
 1032: #ifdef EVFILT_USER
 1033: 			case EVFILT_USER:
 1034: 				TAILQ_FOREACH_SAFE(task, &r->root_user, task_node, tmp) {
 1035: 					if (TASK_VAL(task) == ((uintptr_t) res[i].udata)) {
 1036: 						if (!flg) {
 1037: 							TASK_RET(task) = res[i].data;
 1038: 							TASK_FLAG(task) = (u_long) res[i].fflags;
 1039: 
 1040: 							/* remove user handle */
 1041: 							transit_task2ready(task, &r->root_user);
 1042: 						}
 1043: 						flg++;
 1044: 					}
 1045: 				}
 1046: 				break;
 1047: #endif	/* EVFILT_USER */
 1048: 		}
 1049: 
 1050: 		if (flg > 1)
 1051: 			evt->flags &= ~EV_DELETE;
 1052: 
 1053: 		if (kevent(r->root_kq, evt, 1, NULL, 0, &now) == -1) {
 1054: 			if (r->root_hooks.hook_exec.exception)
 1055: 				r->root_hooks.hook_exec.exception(r, NULL);
 1056: 			else
 1057: 				LOGERR;
 1058: 		}
 1059: 	}
 1060: }
 1061: #endif
 1062: 
 1063: #if SUP_ENABLE == EP_SUPPORT
 1064: static inline void
 1065: fetch_hook_epoll_proceed(int en, struct epoll_event *res, sched_root_task_t *r)
 1066: {
 1067: 	register int i, rflg, wflg;
 1068: 	int ops = EPOLL_CTL_DEL;
 1069: 	sched_task_t *t, *tmp, *task;
 1070: 	struct epoll_event evt[1];
 1071: 
 1072: 	for (i = 0; i < en; i++) {
 1073: 		memcpy(evt, &res[i], sizeof evt);
 1074: 		evt->events ^= evt->events;
 1075: 		rflg = wflg = 0;
 1076: 
 1077: 		if (res[i].events & (EPOLLIN | EPOLLPRI)) {
 1078: 			task = NULL;
 1079: 			TAILQ_FOREACH_SAFE(t, &r->root_read, task_node, tmp) {
 1080: 				if (TASK_FD(t) == evt->data.fd) {
 1081: 					if (!task)
 1082: 						task = t;
 1083: 					rflg++;
 1084: 				}
 1085: 			}
 1086: 
 1087: 			if (task) {
 1088: 				TASK_FLAG(task) = ioctl(TASK_FD(task), FIONREAD, &TASK_RET(task));
 1089: 				/* remove read handle */
 1090: 				remove_task_from(task, &r->root_read);
 1091: 
 1092: 				if (r->root_hooks.hook_exec.exception && evt->events & (EPOLLRDHUP | EPOLLERR | EPOLLHUP)) {
 1093:  					if (r->root_hooks.hook_exec.exception(r, (void*) (intptr_t)
 1094: 								(evt->events & EPOLLERR ? EV_ERROR | EV_EOF : EV_EOF))) {
 1095: 						task->task_type = taskUNUSE;
 1096: 						insert_task_to(task, &r->root_unuse);
 1097: 					} else {
 1098: 						task->task_type = taskREADY;
 1099: 						insert_task_to(task, &r->root_ready);
 1100: 					}
 1101: 				} else {
 1102: 					task->task_type = taskREADY;
 1103: 					insert_task_to(task, &r->root_ready);
 1104: 				}
 1105: 
 1106: 				if (!(res[i].events & EPOLLOUT) && FD_ISSET(evt->data.fd, &r->root_fds[1])) {
 1107: 					evt->events |= EPOLLOUT;
 1108: 					wflg = 42;
 1109: 				}
 1110: 				if (rflg > 1)
 1111: 					evt->events |= EPOLLIN | EPOLLPRI;
 1112: 				else
 1113: 					FD_CLR(evt->data.fd, &r->root_fds[0]);
 1114: 			}
 1115: 		}
 1116: 		if (res[i].events & EPOLLOUT) {
 1117: 			task = NULL;
 1118: 			TAILQ_FOREACH_SAFE(t, &r->root_write, task_node, tmp) {
 1119: 				if (TASK_FD(t) == evt->data.fd) {
 1120: 					if (!task)
 1121: 						task = t;
 1122: 					wflg++;
 1123: 				}
 1124: 			}
 1125: 
 1126: 			if (task) {
 1127: 				TASK_FLAG(task) = ioctl(TASK_FD(task), FIONWRITE, &TASK_RET(task));
 1128: 				/* remove write handle */
 1129: 				remove_task_from(task, &r->root_write);
 1130: 
 1131: 				if (r->root_hooks.hook_exec.exception && evt->events & (EPOLLERR | EPOLLHUP)) {
 1132:  					if (r->root_hooks.hook_exec.exception(r, (void*) (intptr_t)
 1133: 								(evt->events & EPOLLERR ? EV_ERROR | EV_EOF : EV_EOF))) {
 1134: 						task->task_type = taskUNUSE;
 1135: 						insert_task_to(task, &r->root_unuse);
 1136: 					} else {
 1137: 						task->task_type = taskREADY;
 1138: 						insert_task_to(task, &r->root_ready);
 1139: 					}
 1140: 				} else {
 1141: 					task->task_type = taskREADY;
 1142: 					insert_task_to(task, &r->root_ready);
 1143: 				}
 1144: 
 1145: 				if (!(res[i].events & (EPOLLIN | EPOLLPRI)) && FD_ISSET(evt->data.fd, &r->root_fds[0])) {
 1146: 					evt->events |= EPOLLIN | EPOLLPRI;
 1147: 					rflg = 42;
 1148: 				}
 1149: 				if (wflg > 1)
 1150: 					evt->events |= EPOLLOUT;
 1151: 				else
 1152: 					FD_CLR(evt->data.fd, &r->root_fds[1]);
 1153: 			}
 1154: 		}
 1155: 
 1156: 		if (rflg > 1 || wflg > 1)
 1157: 			ops = EPOLL_CTL_MOD;
 1158: 
 1159: 		if (epoll_ctl(r->root_kq, ops, evt->data.fd, evt) == -1) {
 1160: 			if (r->root_hooks.hook_exec.exception) {
 1161: 				r->root_hooks.hook_exec.exception(r, NULL);
 1162: 			} else
 1163: 				LOGERR;
 1164: 		}
 1165: 	}
 1166: }
 1167: #endif
 1168: 
 1169: #if SUP_ENABLE == NO_SUPPORT
 1170: static inline void 
 1171: fetch_hook_select_proceed(int en, fd_set rfd, fd_set wfd, fd_set xfd, sched_root_task_t *r)
 1172: {
 1173: 	register int i, rflg, wflg;
 1174: 	sched_task_t *t, *tmp, *task;
 1175: 
 1176: 	/* skip select check if return value from select is zero */
 1177: 	if (!en)
 1178: 		return;
 1179: 
 1180: 	for (i = 0; i < r->root_kq; i++) {
 1181: 		if (!FD_ISSET(i, &r->root_fds[0]) && !FD_ISSET(i, &r->root_fds[1]))
 1182: 			continue;
 1183: 
 1184: 		rflg = wflg = 0;
 1185: 
 1186: 		if (FD_ISSET(i, &rfd) || FD_ISSET(i, &xfd)) {
 1187: 			task = NULL;
 1188: 			TAILQ_FOREACH_SAFE(t, &r->root_read, task_node, tmp) {
 1189: 				if (TASK_FD(t) == i) {
 1190: 					if (!task)
 1191: 						task = t;
 1192: 					rflg++;
 1193: 				}
 1194: 			}
 1195: 
 1196: 			if (task) {
 1197: 				TASK_FLAG(task) = ioctl(TASK_FD(task), FIONREAD, &TASK_RET(task));
 1198: 
 1199: 				/* remove read handle */
 1200: 				remove_task_from(task, &r->root_read);
 1201: 
 1202: 				if (r->root_hooks.hook_exec.exception) {
 1203:  					if (r->root_hooks.hook_exec.exception(r, NULL)) {
 1204: 						task->task_type = taskUNUSE;
 1205: 						insert_task_to(task, &r->root_unuse);
 1206: 					} else {
 1207: 						task->task_type = taskREADY;
 1208: 						insert_task_to(task, &r->root_ready);
 1209: 					}
 1210: 				} else {
 1211: 					task->task_type = taskREADY;
 1212: 					insert_task_to(task, &r->root_ready);
 1213: 				}
 1214: 
 1215: 				/* remove resouce */
 1216: 				if (rflg == 1)
 1217: 					FD_CLR(i, &r->root_fds[0]);
 1218: 			}
 1219: 		}
 1220: 		if (FD_ISSET(i, &wfd)) {
 1221: 			task = NULL;
 1222: 			TAILQ_FOREACH_SAFE(t, &r->root_write, task_node, tmp) {
 1223: 				if (TASK_FD(t) == i) {
 1224: 					if (!task)
 1225: 						task = t;
 1226: 					wflg++;
 1227: 				}
 1228: 			}
 1229: 
 1230: 			if (task) {
 1231: 				TASK_FLAG(task) = ioctl(TASK_FD(task), FIONWRITE, &TASK_RET(task));
 1232: 
 1233: 				/* remove write handle */
 1234: 				remove_task_from(task, &r->root_write);
 1235: 
 1236: 				if (r->root_hooks.hook_exec.exception) {
 1237:  					if (r->root_hooks.hook_exec.exception(r, NULL)) {
 1238: 						task->task_type = taskUNUSE;
 1239: 						insert_task_to(task, &r->root_unuse);
 1240: 					} else {
 1241: 						task->task_type = taskREADY;
 1242: 						insert_task_to(task, &r->root_ready);
 1243: 					}
 1244: 				} else {
 1245: 					task->task_type = taskREADY;
 1246: 					insert_task_to(task, &r->root_ready);
 1247: 				}
 1248: 
 1249: 				/* remove resouce */
 1250: 				if (wflg == 1)
 1251: 					FD_CLR(i, &r->root_fds[1]);
 1252: 			}
 1253: 		}
 1254: 	}
 1255: 
 1256: 	/* optimize select */
 1257: 	for (i = r->root_kq - 1; i >= 0; i--)
 1258: 		if (FD_ISSET(i, &r->root_fds[0]) || FD_ISSET(i, &r->root_fds[1]))
 1259: 			break;
 1260: 	r->root_kq = i + 1;
 1261: }
 1262: #endif
 1263: 
 1264: /*
 1265:  * sched_hook_fetch() - Default FETCH hook
 1266:  *
 1267:  * @root = root task
 1268:  * @arg = unused
 1269:  * return: NULL error or !=NULL fetched task
 1270:  */
 1271: void *
 1272: sched_hook_fetch(void *root, void *arg __unused)
 1273: {
 1274: 	sched_root_task_t *r = root;
 1275: 	sched_task_t *task, *tmp;
 1276: 	struct timespec now, m, mtmp;
 1277: #if SUP_ENABLE == KQ_SUPPORT
 1278: 	struct kevent res[KQ_EVENTS];
 1279: 	struct timespec *timeout;
 1280: #elif SUP_ENABLE == EP_SUPPORT
 1281: 	struct epoll_event res[KQ_EVENTS];
 1282: 	u_long timeout = 0;
 1283: #else
 1284: 	struct timeval *timeout, tv;
 1285: 	fd_set rfd, wfd, xfd;
 1286: #endif
 1287: 	int en;
 1288: 
 1289: 	if (!r)
 1290: 		return NULL;
 1291: 
 1292: 	/* get new task by queue priority */
 1293: 	while ((task = TAILQ_FIRST(&r->root_event))) {
 1294: 		transit_task2unuse(task, &r->root_event);
 1295: 		return task;
 1296: 	}
 1297: 	while ((task = TAILQ_FIRST(&r->root_ready))) {
 1298: 		transit_task2unuse(task, &r->root_ready);
 1299: 		return task;
 1300: 	}
 1301: 
 1302: 	/* if present member of task, set NOWAIT */
 1303: 	if (!TAILQ_FIRST(&r->root_task)) {
 1304: 		/* timer tasks */
 1305: #ifdef TIMER_WITHOUT_SORT
 1306: 		clock_gettime(CLOCK_MONOTONIC, &now);
 1307: 
 1308: 		sched_timespecclear(&r->root_wait);
 1309: 		TAILQ_FOREACH(task, &r->root_timer, task_node) {
 1310: 			if (!sched_timespecisset(&r->root_wait))
 1311: 				r->root_wait = TASK_TS(task);
 1312: 			else if (sched_timespeccmp(&TASK_TS(task), &r->root_wait, -) < 0)
 1313: 				r->root_wait = TASK_TS(task);
 1314: 		}
 1315: 
 1316: 		if (TAILQ_FIRST(&r->root_timer)) {
 1317: 			m = r->root_wait;
 1318: 			sched_timespecsub(&m, &now, &mtmp);
 1319: 			r->root_wait = mtmp;
 1320: 		} else {
 1321: 			/* set wait INFTIM */
 1322: 			sched_timespecinf(&r->root_wait);
 1323: 		}
 1324: #else	/* ! TIMER_WITHOUT_SORT */
 1325: 		if ((task = TAILQ_FIRST(&r->root_timer))) {
 1326: 			clock_gettime(CLOCK_MONOTONIC, &now);
 1327: 
 1328: 			m = TASK_TS(task);
 1329: 			sched_timespecsub(&m, &now, &mtmp);
 1330: 			r->root_wait = mtmp;
 1331: 		} else {
 1332: 			/* set wait INFTIM */
 1333: 			sched_timespecinf(&r->root_wait);
 1334: 		}
 1335: #endif	/* TIMER_WITHOUT_SORT */
 1336: 	} else	/* no waiting for event, because we have ready task */
 1337: 		sched_timespecclear(&r->root_wait);
 1338: 
 1339: 	if (r->root_wait.tv_sec != -1 && r->root_wait.tv_nsec != -1) {
 1340: #if SUP_ENABLE == KQ_SUPPORT
 1341: 		timeout = &r->root_wait;
 1342: #elif SUP_ENABLE == EP_SUPPORT
 1343: 		timeout = r->root_wait.tv_sec * 1000 + r->root_wait.tv_nsec / 1000000;
 1344: #else
 1345: 		sched_timespec2val(&r->root_wait, &tv);
 1346: 		timeout = &tv;
 1347: #endif	/* KQ_SUPPORT */
 1348: 	} else if (sched_timespecisinf(&r->root_poll))
 1349: #if SUP_ENABLE == EP_SUPPORT
 1350: 		timeout = -1;
 1351: #else
 1352: 		timeout = NULL;
 1353: #endif
 1354: 	else {
 1355: #if SUP_ENABLE == KQ_SUPPORT
 1356: 		timeout = &r->root_poll;
 1357: #elif SUP_ENABLE == EP_SUPPORT
 1358: 		timeout = r->root_poll.tv_sec * 1000 + r->root_poll.tv_nsec / 1000000;
 1359: #else
 1360: 		sched_timespec2val(&r->root_poll, &tv);
 1361: 		timeout = &tv;
 1362: #endif	/* KQ_SUPPORT */
 1363: 	}
 1364: 
 1365: #if SUP_ENABLE == KQ_SUPPORT
 1366: 	if ((en = kevent(r->root_kq, NULL, 0, res, KQ_EVENTS, timeout)) == -1) {
 1367: #elif SUP_ENABLE == EP_SUPPORT
 1368: 	if ((en = epoll_wait(r->root_kq, res, KQ_EVENTS, timeout)) == -1) {
 1369: #else
 1370: 	rfd = xfd = r->root_fds[0];
 1371: 	wfd = r->root_fds[1];
 1372: 	if ((en = select(r->root_kq, &rfd, &wfd, &xfd, timeout)) == -1) {
 1373: #endif	/* KQ_SUPPORT */
 1374: 		if (r->root_hooks.hook_exec.exception) {
 1375: 			if (r->root_hooks.hook_exec.exception(r, NULL))
 1376: 				return NULL;
 1377: 		} else if (errno != EINTR)
 1378: 			LOGERR;
 1379: 		goto skip_event;
 1380: 	}
 1381: 
 1382: 	/* Go and catch the cat into pipes ... */
 1383: #if SUP_ENABLE == KQ_SUPPORT
 1384: 	/* kevent dispatcher */
 1385: 	fetch_hook_kevent_proceed(en, res, r);
 1386: #elif SUP_ENABLE == EP_SUPPORT
 1387: 	/* epoll dispatcher */
 1388: 	fetch_hook_epoll_proceed(en, res, r);
 1389: #else
 1390: 	/* select dispatcher */
 1391: 	fetch_hook_select_proceed(en, rfd, wfd, xfd, r);
 1392: #endif	/* KQ_SUPPORT */
 1393: 
 1394: skip_event:
 1395: 	/* timer update & put in ready queue */
 1396: 	clock_gettime(CLOCK_MONOTONIC, &now);
 1397: 
 1398: 	TAILQ_FOREACH_SAFE(task, &r->root_timer, task_node, tmp)
 1399: 		if (sched_timespeccmp(&now, &TASK_TS(task), -) >= 0)
 1400: 			transit_task2ready(task, &r->root_timer);
 1401: 
 1402: 	/* put regular task priority task to ready queue, 
 1403: 		if there is no ready task or reach max missing hit for regular task */
 1404: 	if ((task = TAILQ_FIRST(&r->root_task))) {
 1405: 		if (!TAILQ_FIRST(&r->root_ready) || r->root_miss >= TASK_VAL(task)) {
 1406: 			r->root_miss ^= r->root_miss;
 1407: 
 1408: 			transit_task2ready(task, &r->root_task);
 1409: 		} else
 1410: 			r->root_miss++;
 1411: 	} else
 1412: 		r->root_miss ^= r->root_miss;
 1413: 
 1414: 	/* OK, lets get ready task !!! */
 1415: 	task = TAILQ_FIRST(&r->root_ready);
 1416: 	if (task)
 1417: 		transit_task2unuse(task, &r->root_ready);
 1418: 	return task;
 1419: }
 1420: 
 1421: /*
 1422:  * sched_hook_exception() - Default EXCEPTION hook
 1423:  *
 1424:  * @root = root task
 1425:  * @arg = custom handling: if arg == EV_EOF or other value; default: arg == NULL log errno
 1426:  * return: <0 errors and 0 ok
 1427:  */
 1428: void *
 1429: sched_hook_exception(void *root, void *arg)
 1430: {
 1431: 	sched_root_task_t *r = root;
 1432: 
 1433: 	if (!r)
 1434: 		return NULL;
 1435: 
 1436: 	/* custom exception handling ... */
 1437: 	if (arg) {
 1438: 		if (arg == (void*) EV_EOF)
 1439: 			return NULL;
 1440: 		return (void*) -1;	/* raise scheduler error!!! */
 1441: 	}
 1442: 
 1443: 	/* if error hook exists */
 1444: 	if (r->root_hooks.hook_root.error)
 1445: 		return (r->root_hooks.hook_root.error(root, (void*) ((intptr_t) errno)));
 1446: 
 1447: 	/* default case! */
 1448: 	LOGERR;
 1449: 	return NULL;
 1450: }
 1451: 
 1452: /*
 1453:  * sched_hook_condition() - Default CONDITION hook
 1454:  *
 1455:  * @root = root task
 1456:  * @arg = killState from schedRun()
 1457:  * return: NULL kill scheduler loop or !=NULL ok
 1458:  */
 1459: void *
 1460: sched_hook_condition(void *root, void *arg)
 1461: {
 1462: 	sched_root_task_t *r = root;
 1463: 
 1464: 	if (!r)
 1465: 		return NULL;
 1466: 
 1467: 	return (void*) (*r->root_cond - *(intptr_t*) arg);
 1468: }
 1469: 
 1470: /*
 1471:  * sched_hook_rtc() - Default RTC hook
 1472:  *
 1473:  * @task = current task
 1474:  * @arg = unused
 1475:  * return: <0 errors and 0 ok
 1476:  */
 1477: void *
 1478: sched_hook_rtc(void *task, void *arg __unused)
 1479: {
 1480: #if defined(HAVE_LIBRT) && defined(HAVE_TIMER_CREATE) && \
 1481: 	defined(HAVE_TIMER_SETTIME) && defined(HAVE_TIMER_DELETE)
 1482: 	sched_task_t *sigt = NULL, *t = task;
 1483: 	struct itimerspec its;
 1484: 	struct sigevent evt;
 1485: 	timer_t tmr;
 1486: #if SUP_ENABLE != KQ_SUPPORT
 1487: 	struct sigaction sa;
 1488: #endif
 1489: 
 1490: 	if (!t || !TASK_ROOT(t))
 1491: 		return (void*) -1;
 1492: 
 1493: 	memset(&evt, 0, sizeof evt);
 1494: 	evt.sigev_notify = SIGEV_SIGNAL;
 1495: 	evt.sigev_signo = (intptr_t) TASK_DATA(t) + SIGRTMIN;
 1496: 	evt.sigev_value.sival_ptr = t;
 1497: 
 1498: 	if (timer_create(CLOCK_MONOTONIC, &evt, &tmr) == -1) {
 1499: 		if (TASK_ROOT(t)->root_hooks.hook_exec.exception)
 1500: 			TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL);
 1501: 		else
 1502: 			LOGERR;
 1503: 		return (void*) -1;
 1504: 	} else
 1505: 		TASK_FLAG(t) = (u_long) tmr;
 1506: 
 1507: #if SUP_ENABLE == KQ_SUPPORT
 1508: 	if (!(sigt = schedSignal(TASK_ROOT(t), _sched_rtcWrapper, TASK_ARG(t), evt.sigev_signo, 
 1509: 				t, (size_t) tmr))) {
 1510: 		if (TASK_ROOT(t)->root_hooks.hook_exec.exception)
 1511: 			TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL);
 1512: 		else
 1513: 			LOGERR;
 1514: 		timer_delete(tmr);
 1515: 		return (void*) -1;
 1516: 	} else
 1517: 		TASK_RET(t) = (uintptr_t) sigt;
 1518: #else
 1519: 	memset(&sa, 0, sizeof sa);
 1520: 	sigemptyset(&sa.sa_mask);
 1521: 	sa.sa_sigaction = _sched_rtcSigWrapper;
 1522: 	sa.sa_flags = SA_SIGINFO | SA_RESTART;
 1523: 
 1524: 	if (sigaction(evt.sigev_signo, &sa, NULL) == -1) {
 1525: 		if (TASK_ROOT(t)->root_hooks.hook_exec.exception)
 1526: 			TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL);
 1527: 		else
 1528: 			LOGERR;
 1529: 		timer_delete(tmr);
 1530: 		return (void*) -1;
 1531: 	}
 1532: #endif
 1533: 
 1534: 	memset(&its, 0, sizeof its);
 1535: 	its.it_value.tv_sec = t->task_val.ts.tv_sec;
 1536: 	its.it_value.tv_nsec = t->task_val.ts.tv_nsec;
 1537: 
 1538: 	if (timer_settime(tmr, TIMER_RELTIME, &its, NULL) == -1) {
 1539: 		if (TASK_ROOT(t)->root_hooks.hook_exec.exception)
 1540: 			TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL);
 1541: 		else
 1542: 			LOGERR;
 1543: 		schedCancel(sigt);
 1544: 		timer_delete(tmr);
 1545: 		return (void*) -1;
 1546: 	}
 1547: #endif	/* HAVE_TIMER_CREATE */
 1548: 	return NULL;
 1549: }

FreeBSD-CVSweb <freebsd-cvsweb@FreeBSD.org>