File:  [ELWIX - Embedded LightWeight unIX -] / libaitsched / src / hooks.c
Revision 1.27.2.8: download - view: text, annotated - select for diffs - revision graph
Thu Jun 5 22:16:00 2014 UTC (10 years, 4 months ago) by misho
Branches: sched5_2
Diff to: branchpoint 1.27: preferred, unified
add rtc tasks to select and epoll

    1: /*************************************************************************
    2: * (C) 2011 AITNET ltd - Sofia/Bulgaria - <misho@aitbg.com>
    3: *  by Michael Pounov <misho@openbsd-bg.org>
    4: *
    5: * $Author: misho $
    6: * $Id: hooks.c,v 1.27.2.8 2014/06/05 22:16:00 misho Exp $
    7: *
    8: **************************************************************************
    9: The ELWIX and AITNET software is distributed under the following
   10: terms:
   11: 
   12: All of the documentation and software included in the ELWIX and AITNET
   13: Releases is copyrighted by ELWIX - Sofia/Bulgaria <info@elwix.org>
   14: 
   15: Copyright 2004 - 2014
   16: 	by Michael Pounov <misho@elwix.org>.  All rights reserved.
   17: 
   18: Redistribution and use in source and binary forms, with or without
   19: modification, are permitted provided that the following conditions
   20: are met:
   21: 1. Redistributions of source code must retain the above copyright
   22:    notice, this list of conditions and the following disclaimer.
   23: 2. Redistributions in binary form must reproduce the above copyright
   24:    notice, this list of conditions and the following disclaimer in the
   25:    documentation and/or other materials provided with the distribution.
   26: 3. All advertising materials mentioning features or use of this software
   27:    must display the following acknowledgement:
   28: This product includes software developed by Michael Pounov <misho@elwix.org>
   29: ELWIX - Embedded LightWeight unIX and its contributors.
   30: 4. Neither the name of AITNET nor the names of its contributors
   31:    may be used to endorse or promote products derived from this software
   32:    without specific prior written permission.
   33: 
   34: THIS SOFTWARE IS PROVIDED BY AITNET AND CONTRIBUTORS ``AS IS'' AND
   35: ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   36: IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   37: ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
   38: FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   39: DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   40: OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   41: HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   42: LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   43: OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   44: SUCH DAMAGE.
   45: */
   46: #include "global.h"
   47: #include "hooks.h"
   48: 
   49: 
   50: static inline void
   51: transit_task2ready(sched_task_t * __restrict t, sched_queue_t * __restrict q)
   52: {
   53: 	remove_task_from(t, q);
   54: 
   55: 	t->task_type = taskREADY;
   56: 	insert_task_to(t, &(TASK_ROOT(t))->root_ready);
   57: }
   58: 
   59: #ifdef HAVE_LIBPTHREAD
   60: static void *
   61: _sched_threadWrapper(sched_task_t *t)
   62: {
   63: 	void *ret = NULL;
   64: 	sched_root_task_t *r;
   65: 
   66: 	if (!t || !TASK_ROOT(t))
   67: 		pthread_exit(ret);
   68: 	else
   69: 		r = (sched_root_task_t*) TASK_ROOT(t);
   70: 
   71: 	pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, NULL);
   72: 	/*
   73: 	pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, NULL);
   74: 	*/
   75: 
   76: 	/* notify parent, thread is ready for execution */
   77: 	pthread_testcancel();
   78: 
   79: 	ret = schedCall(t);
   80: 	r->root_ret = ret;
   81: 
   82: 	if (TASK_VAL(t)) {
   83: 		transit_task2unuse(t, &r->root_thread);
   84: 		TASK_VAL(t) = 0;
   85: 	}
   86: 
   87: 	pthread_exit(ret);
   88: }
   89: #endif
   90: 
   91: #if defined(HAVE_TIMER_CREATE) && defined(HAVE_TIMER_SETTIME) && defined(HAVE_TIMER_DELETE)
   92: #if SUP_ENABLE == KQ_SUPPORT
   93: static void *
   94: _sched_rtcWrapper(sched_task_t *t)
   95: {
   96: 	sched_task_t *task;
   97: 	void *ret;
   98: 
   99: 	if (!t || !TASK_ROOT(t) || !TASK_DATA(t))
  100: 		return NULL;
  101: 	else {
  102: 		task = (sched_task_t*) TASK_DATA(t);
  103: 		timer_delete((timer_t) TASK_DATLEN(t));
  104: 	}
  105: 
  106: 	ret = schedCall(task);
  107: 
  108: 	transit_task2unuse(task, &(TASK_ROOT(task))->root_rtc);
  109: 	return ret;
  110: }
  111: #else
  112: static void
  113: _sched_rtcSigWrapper(int sig, siginfo_t *si, void *uc)
  114: {
  115: 	sched_task_t *task;
  116: 
  117: 	if (si && si->si_value.sival_ptr) {
  118: 		task = (sched_task_t*) si->si_value.sival_ptr;
  119: 		timer_delete((timer_t) TASK_FLAG(task));
  120: 
  121: 		TASK_RET(task) = (intptr_t) schedCall(task);
  122: 
  123: 		transit_task2unuse(task, &(TASK_ROOT(task))->root_rtc);
  124: 	}
  125: }
  126: #endif
  127: #endif
  128: 
  129: /*
  130:  * sched_hook_init() - Default INIT hook
  131:  *
  132:  * @root = root task
  133:  * @arg = unused
  134:  * return: <0 errors and 0 ok
  135:  */
  136: void *
  137: sched_hook_init(void *root, void *arg __unused)
  138: {
  139: 	sched_root_task_t *r = root;
  140: 
  141: 	if (!r)
  142: 		return (void*) -1;
  143: 
  144: #if SUP_ENABLE == KQ_SUPPORT
  145: 	r->root_kq = kqueue();
  146: 	if (r->root_kq == -1) {
  147: 		LOGERR;
  148: 		return (void*) -1;
  149: 	}
  150: #elif SUP_ENABLE == EP_SUPPORT
  151: 	r->root_kq = epoll_create(KQ_EVENTS);
  152: 	if (r->root_kq == -1) {
  153: 		LOGERR;
  154: 		return (void*) -1;
  155: 	}
  156: #else
  157: 	r->root_kq ^= r->root_kq;
  158: 	FD_ZERO(&r->root_fds[0]);
  159: 	FD_ZERO(&r->root_fds[1]);
  160: #endif
  161: 
  162: 	return NULL;
  163: }
  164: 
  165: /*
  166:  * sched_hook_fini() - Default FINI hook
  167:  *
  168:  * @root = root task
  169:  * @arg = unused
  170:  * return: <0 errors and 0 ok
  171:  */
  172: void *
  173: sched_hook_fini(void *root, void *arg __unused)
  174: {
  175: 	sched_root_task_t *r = root;
  176: 
  177: 	if (!r)
  178: 		return (void*) -1;
  179: 
  180: #if SUP_ENABLE == KQ_SUPPORT || SUP_ENABLE == EP_SUPPORT
  181: 	if (r->root_kq > 2) {
  182: 		close(r->root_kq);
  183: 		r->root_kq = 0;
  184: 	}
  185: #else
  186: 	FD_ZERO(&r->root_fds[1]);
  187: 	FD_ZERO(&r->root_fds[0]);
  188: 	r->root_kq ^= r->root_kq;
  189: #endif
  190: 
  191: 	return NULL;
  192: }
  193: 
  194: /*
  195:  * sched_hook_cancel() - Default CANCEL hook
  196:  *
  197:  * @task = current task
  198:  * @arg = unused
  199:  * return: <0 errors and 0 ok
  200:  */
  201: void *
  202: sched_hook_cancel(void *task, void *arg __unused)
  203: {
  204: 	sched_task_t *t = task, *tmp, *tt;
  205: 	sched_root_task_t *r = NULL;
  206: 	int flg;
  207: #if SUP_ENABLE == KQ_SUPPORT
  208: 	struct kevent chg[1];
  209: 	struct timespec timeout = { 0, 0 };
  210: #elif SUP_ENABLE == EP_SUPPORT
  211: 	struct epoll_event ee = { .events = 0, .data.fd = 0 };
  212: #else
  213: 	register int i;
  214: #endif
  215: #ifdef AIO_SUPPORT
  216: 	struct aiocb *acb;
  217: #ifdef EVFILT_LIO
  218: 	register int i = 0;
  219: 	struct aiocb **acbs;
  220: #endif	/* EVFILT_LIO */
  221: #endif	/* AIO_SUPPORT */
  222: 
  223: 	if (!t || !TASK_ROOT(t))
  224: 		return (void*) -1;
  225: 	else
  226: 		r = TASK_ROOT(t);
  227: 
  228: 	switch (TASK_TYPE(t)) {
  229: 		case taskREAD:
  230: 			/* check for multi subscribers */
  231: 			flg = 0;
  232: 			TAILQ_FOREACH_SAFE(tt, &r->root_read, task_node, tmp)
  233: 				if (TASK_FD(tt) != TASK_FD(t))
  234: 					continue;
  235: 				else
  236: 					flg++;
  237: #if SUP_ENABLE == KQ_SUPPORT
  238: #ifdef __NetBSD__
  239: 			EV_SET(&chg[0], TASK_FD(t), EVFILT_READ, flg < 2 ? EV_DELETE : 0, 
  240: 					0, 0, (intptr_t) TASK_FD(t));
  241: #else
  242: 			EV_SET(&chg[0], TASK_FD(t), EVFILT_READ, flg < 2 ? EV_DELETE : 0, 
  243: 					0, 0, (void*) TASK_FD(t));
  244: #endif
  245: #elif SUP_ENABLE == EP_SUPPORT
  246: 			ee.data.fd = TASK_FD(t);
  247: 			if (FD_ISSET(TASK_FD(t), &r->root_fds[1]))
  248: 				ee.events = EPOLLOUT;
  249: 
  250: 			if (flg < 2)
  251: 				FD_CLR(TASK_FD(t), &r->root_fds[0]);
  252: 			else
  253: 				ee.events |= (EPOLLIN | EPOLLPRI | EPOLLRDHUP);
  254: #else
  255: 			if (flg < 2) {
  256: 				FD_CLR(TASK_FD(t), &r->root_fds[0]);
  257: 
  258: 				/* optimize select */
  259: 				for (i = r->root_kq - 1; i > 2; i--)
  260: 					if (FD_ISSET(i, &r->root_fds[0]) || FD_ISSET(i, &r->root_fds[1]))
  261: 						break;
  262: 				if (i > 2)
  263: 					r->root_kq = i + 1;
  264: 			}
  265: #endif
  266: 			break;
  267: 		case taskWRITE:
  268: 			/* check for multi subscribers */
  269: 			flg = 0;
  270: 			TAILQ_FOREACH_SAFE(tt, &r->root_write, task_node, tmp)
  271: 				if (TASK_FD(tt) != TASK_FD(t))
  272: 					continue;
  273: 				else
  274: 					flg++;
  275: #if SUP_ENABLE == KQ_SUPPORT
  276: #ifdef __NetBSD__
  277: 			EV_SET(&chg[0], TASK_FD(t), EVFILT_WRITE, flg < 2 ? EV_DELETE : 0, 
  278: 					0, 0, (intptr_t) TASK_FD(t));
  279: #else
  280: 			EV_SET(&chg[0], TASK_FD(t), EVFILT_WRITE, flg < 2 ? EV_DELETE : 0, 
  281: 					0, 0, (void*) TASK_FD(t));
  282: #endif
  283: #elif SUP_ENABLE == EP_SUPPORT
  284: 			ee.data.fd = TASK_FD(t);
  285: 			if (FD_ISSET(TASK_FD(t), &r->root_fds[0]))
  286: 				ee.events = EPOLLIN | EPOLLPRI | EPOLLRDHUP;
  287: 
  288: 			if (flg < 2)
  289: 				FD_CLR(TASK_FD(t), &r->root_fds[1]);
  290: 			else
  291: 				ee.events |= EPOLLOUT;
  292: #else
  293: 			if (flg < 2) {
  294: 				FD_CLR(TASK_FD(t), &r->root_fds[1]);
  295: 
  296: 				/* optimize select */
  297: 				for (i = r->root_kq - 1; i > 2; i--)
  298: 					if (FD_ISSET(i, &r->root_fds[0]) || FD_ISSET(i, &r->root_fds[1]))
  299: 						break;
  300: 				if (i > 2)
  301: 					r->root_kq = i + 1;
  302: 			}
  303: #endif
  304: 			break;
  305: 		case taskALARM:
  306: #if SUP_ENABLE == KQ_SUPPORT
  307: 			/* check for multi subscribers */
  308: 			flg = 0;
  309: 			TAILQ_FOREACH_SAFE(tt, &r->root_alarm, task_node, tmp)
  310: 				if (TASK_DATA(tt) != TASK_DATA(t))
  311: 					continue;
  312: 				else
  313: 					flg++;
  314: #ifdef __NetBSD__
  315: 			EV_SET(&chg[0], (uintptr_t) TASK_DATA(t), EVFILT_TIMER, flg < 2 ? EV_DELETE : 0, 
  316: 					0, 0, (intptr_t) TASK_DATA(t));
  317: #else
  318: 			EV_SET(&chg[0], (uintptr_t) TASK_DATA(t), EVFILT_TIMER, flg < 2 ? EV_DELETE : 0, 
  319: 					0, 0, (void*) TASK_DATA(t));
  320: #endif
  321: #endif
  322: 			break;
  323: 		case taskNODE:
  324: #if SUP_ENABLE == KQ_SUPPORT
  325: 			/* check for multi subscribers */
  326: 			flg = 0;
  327: 			TAILQ_FOREACH_SAFE(tt, &r->root_node, task_node, tmp)
  328: 				if (TASK_FD(tt) != TASK_FD(t))
  329: 					continue;
  330: 				else
  331: 					flg++;
  332: #ifdef __NetBSD__
  333: 			EV_SET(&chg[0], TASK_FD(t), EVFILT_VNODE, flg < 2 ? EV_DELETE : 0, 
  334: 					0, 0, (intptr_t) TASK_FD(t));
  335: #else
  336: 			EV_SET(&chg[0], TASK_FD(t), EVFILT_VNODE, flg < 2 ? EV_DELETE : 0, 
  337: 					0, 0, (void*) TASK_FD(t));
  338: #endif
  339: #endif
  340: 			break;
  341: 		case taskPROC:
  342: #if SUP_ENABLE == KQ_SUPPORT
  343: 			/* check for multi subscribers */
  344: 			flg = 0;
  345: 			TAILQ_FOREACH_SAFE(tt, &r->root_proc, task_node, tmp)
  346: 				if (TASK_VAL(tt) != TASK_VAL(t))
  347: 					continue;
  348: 				else
  349: 					flg++;
  350: #ifdef __NetBSD__
  351: 			EV_SET(&chg[0], TASK_VAL(t), EVFILT_PROC, flg < 2 ? EV_DELETE : 0, 
  352: 					0, 0, (intptr_t) TASK_VAL(t));
  353: #else
  354: 			EV_SET(&chg[0], TASK_VAL(t), EVFILT_PROC, flg < 2 ? EV_DELETE : 0, 
  355: 					0, 0, (void*) TASK_VAL(t));
  356: #endif
  357: #endif
  358: 			break;
  359: 		case taskSIGNAL:
  360: #if SUP_ENABLE == KQ_SUPPORT
  361: 			/* check for multi subscribers */
  362: 			flg = 0;
  363: 			TAILQ_FOREACH_SAFE(tt, &r->root_signal, task_node, tmp)
  364: 				if (TASK_VAL(tt) != TASK_VAL(t))
  365: 					continue;
  366: 				else
  367: 					flg++;
  368: #ifdef __NetBSD__
  369: 			EV_SET(&chg[0], TASK_VAL(t), EVFILT_SIGNAL, flg < 2 ? EV_DELETE : 0, 
  370: 					0, 0, (intptr_t) TASK_VAL(t));
  371: #else
  372: 			EV_SET(&chg[0], TASK_VAL(t), EVFILT_SIGNAL, flg < 2 ? EV_DELETE : 0, 
  373: 					0, 0, (void*) TASK_VAL(t));
  374: #endif
  375: 			/* restore signal */
  376: 			if (flg < 2)
  377: 				signal(TASK_VAL(t), SIG_DFL);
  378: #endif
  379: 			break;
  380: #ifdef AIO_SUPPORT
  381: 		case taskAIO:
  382: #if SUP_ENABLE == KQ_SUPPORT
  383: 			/* check for multi subscribers */
  384: 			flg = 0;
  385: 			TAILQ_FOREACH_SAFE(tt, &r->root_aio, task_node, tmp)
  386: 				if (TASK_VAL(tt) != TASK_VAL(t))
  387: 					continue;
  388: 				else
  389: 					flg++;
  390: #ifdef __NetBSD__
  391: 			EV_SET(&chg[0], TASK_VAL(t), EVFILT_AIO, flg < 2 ? EV_DELETE : 0, 
  392: 					0, 0, (intptr_t) TASK_VAL(t));
  393: #else
  394: 			EV_SET(&chg[0], TASK_VAL(t), EVFILT_AIO, flg < 2 ? EV_DELETE : 0, 
  395: 					0, 0, (void*) TASK_VAL(t));
  396: #endif
  397: 			acb = (struct aiocb*) TASK_VAL(t);
  398: 			if (acb) {
  399: 				if (aio_cancel(acb->aio_fildes, acb) == AIO_CANCELED)
  400: 					aio_return(acb);
  401: 				free(acb);
  402: 				TASK_VAL(t) = 0;
  403: 			}
  404: #endif
  405: 			break;
  406: #ifdef EVFILT_LIO
  407: 		case taskLIO:
  408: #if SUP_ENABLE == KQ_SUPPORT
  409: 			/* check for multi subscribers */
  410: 			flg = 0;
  411: 			TAILQ_FOREACH_SAFE(tt, &r->root_lio, task_node, tmp)
  412: 				if (TASK_VAL(tt) != TASK_VAL(t))
  413: 					continue;
  414: 				else
  415: 					flg++;
  416: #ifdef __NetBSD__
  417: 			EV_SET(&chg[0], TASK_VAL(t), EVFILT_LIO, flg < 2 ? EV_DELETE : 0, 
  418: 					0, 0, (intptr_t) TASK_VAL(t));
  419: #else
  420: 			EV_SET(&chg[0], TASK_VAL(t), EVFILT_LIO, flg < 2 ? EV_DELETE : 0, 
  421: 					0, 0, (void*) TASK_VAL(t));
  422: #endif
  423: 			acbs = (struct aiocb**) TASK_VAL(t);
  424: 			if (acbs) {
  425: 				for (i = 0; i < TASK_DATLEN(t); i++) {
  426: 					if (aio_cancel(acbs[i]->aio_fildes, acbs[i]) == AIO_CANCELED)
  427: 						aio_return(acbs[i]);
  428: 					free(acbs[i]);
  429: 				}
  430: 				free(acbs);
  431: 				TASK_VAL(t) = 0;
  432: 			}
  433: #endif
  434: 			break;
  435: #endif	/* EVFILT_LIO */
  436: #endif	/* AIO_SUPPORT */
  437: #ifdef EVFILT_USER
  438: 		case taskUSER:
  439: #if SUP_ENABLE == KQ_SUPPORT
  440: 			/* check for multi subscribers */
  441: 			flg = 0;
  442: 			TAILQ_FOREACH_SAFE(tt, &r->root_user, task_node, tmp)
  443: 				if (TASK_VAL(tt) != TASK_VAL(t))
  444: 					continue;
  445: 				else
  446: 					flg++;
  447: #ifdef __NetBSD__
  448: 			EV_SET(&chg[0], TASK_VAL(t), EVFILT_USER, flg < 2 ? EV_DELETE : 0, 
  449: 					0, 0, (intptr_t) TASK_VAL(t));
  450: #else
  451: 			EV_SET(&chg[0], TASK_VAL(t), EVFILT_USER, flg < 2 ? EV_DELETE : 0, 
  452: 					0, 0, (void*) TASK_VAL(t));
  453: #endif
  454: #endif
  455: 			break;
  456: #endif	/* EVFILT_USER */
  457: 		case taskTHREAD:
  458: #ifdef HAVE_LIBPTHREAD
  459: 			if (TASK_VAL(t)) {
  460: 				pthread_cancel((pthread_t) TASK_VAL(t));
  461: 				pthread_join((pthread_t) TASK_VAL(t), NULL);
  462: 				if (TASK_VAL(t)) {
  463: 					transit_task2unuse(t, &(TASK_ROOT(t))->root_thread);
  464: 					TASK_VAL(t) = 0;
  465: 				}
  466: 			}
  467: #endif
  468: 			return NULL;
  469: #if defined(HAVE_TIMER_CREATE) && defined(HAVE_TIMER_SETTIME) && defined(HAVE_TIMER_DELETE)
  470: 		case taskRTC:
  471: 			timer_delete((timer_t) TASK_FLAG(t));
  472: #if SUP_ENABLE == KQ_SUPPORT
  473: 			schedCancel((sched_task_t*) TASK_RET(t));
  474: #else
  475: 			/* check for multi subscribers */
  476: 			flg = 0;
  477: 			TAILQ_FOREACH_SAFE(tt, &r->root_rtc, task_node, tmp)
  478: 				if (TASK_DATA(tt) != TASK_DATA(t))
  479: 					continue;
  480: 				else
  481: 					flg++;
  482: 
  483: 			/* restore signal */
  484: 			if (flg < 2)
  485: 				signal((intptr_t) TASK_DATA(t) + SIGRTMIN, SIG_DFL);
  486: #endif
  487: 			return NULL;
  488: #endif	/* HAVE_TIMER_CREATE */
  489: 		default:
  490: 			return NULL;
  491: 	}
  492: 
  493: #if SUP_ENABLE == KQ_SUPPORT
  494: 	kevent(r->root_kq, chg, 1, NULL, 0, &timeout);
  495: #elif SUP_ENABLE == EP_SUPPORT
  496: 	epoll_ctl(r->root_kq, ee.events ? EPOLL_CTL_MOD : EPOLL_CTL_DEL, ee.data.fd, &ee);
  497: #endif
  498: 	return NULL;
  499: }
  500: 
  501: #ifdef HAVE_LIBPTHREAD
  502: /*
  503:  * sched_hook_thread() - Default THREAD hook
  504:  *
  505:  * @task = current task
  506:  * @arg = pthread attributes
  507:  * return: <0 errors and 0 ok
  508:  */
  509: void *
  510: sched_hook_thread(void *task, void *arg)
  511: {
  512: 	sched_task_t *t = task;
  513: 	pthread_t tid;
  514: 	sigset_t s, o;
  515: 
  516: 	if (!t || !TASK_ROOT(t))
  517: 		return (void*) -1;
  518: 
  519: 	sigfillset(&s);
  520: 	pthread_sigmask(SIG_BLOCK, &s, &o);
  521: 	errno = pthread_create(&tid, (pthread_attr_t*) arg, 
  522: 			(void *(*)(void*)) _sched_threadWrapper, t);
  523: 	pthread_sigmask(SIG_SETMASK, &o, NULL);
  524: 
  525: 	if (errno) {
  526: 		LOGERR;
  527: 		return (void*) -1;
  528: 	} else
  529: 		TASK_VAL(t) = (u_long) tid;
  530: 
  531: 	if (!TASK_ISLOCKED(t))
  532: 		TASK_LOCK(t);
  533: 
  534: 	return NULL;
  535: }
  536: #endif
  537: 
  538: /*
  539:  * sched_hook_read() - Default READ hook
  540:  *
  541:  * @task = current task
  542:  * @arg = unused
  543:  * return: <0 errors and 0 ok
  544:  */
  545: void *
  546: sched_hook_read(void *task, void *arg __unused)
  547: {
  548: 	sched_task_t *t = task;
  549: 	sched_root_task_t *r = NULL;
  550: #if SUP_ENABLE == KQ_SUPPORT
  551: 	struct kevent chg[1];
  552: 	struct timespec timeout = { 0, 0 };
  553: #elif SUP_ENABLE == EP_SUPPORT
  554: 	struct epoll_event ee = { .events = EPOLLIN | EPOLLPRI | EPOLLRDHUP, .data.fd = 0 };
  555: 	int flg = 0;
  556: #endif
  557: 
  558: 	if (!t || !TASK_ROOT(t))
  559: 		return (void*) -1;
  560: 	else
  561: 		r = TASK_ROOT(t);
  562: 
  563: #if SUP_ENABLE == KQ_SUPPORT
  564: #ifdef __NetBSD__
  565: 	EV_SET(&chg[0], TASK_FD(t), EVFILT_READ, EV_ADD | EV_CLEAR, 0, 0, (intptr_t) TASK_FD(t));
  566: #else
  567: 	EV_SET(&chg[0], TASK_FD(t), EVFILT_READ, EV_ADD | EV_CLEAR, 0, 0, (void*) TASK_FD(t));
  568: #endif
  569: 	if (kevent(r->root_kq, chg, 1, NULL, 0, &timeout) == -1) {
  570: 		if (r->root_hooks.hook_exec.exception)
  571: 			r->root_hooks.hook_exec.exception(r, NULL);
  572: 		else
  573: 			LOGERR;
  574: 		return (void*) -1;
  575: 	}
  576: #elif SUP_ENABLE == EP_SUPPORT
  577: 	if (FD_ISSET(TASK_FD(t), &r->root_fds[0]))
  578: 		flg |= 1;
  579: 	if (FD_ISSET(TASK_FD(t), &r->root_fds[1])) {
  580: 		flg |= 2;
  581: 		ee.events |= EPOLLOUT;
  582: 	}
  583: 
  584: 	ee.data.fd = TASK_FD(t);
  585: 	if (epoll_ctl(r->root_kq, flg ? EPOLL_CTL_MOD : EPOLL_CTL_ADD, TASK_FD(t), &ee) == -1) {
  586: 		if (r->root_hooks.hook_exec.exception)
  587: 			r->root_hooks.hook_exec.exception(r, NULL);
  588: 		else
  589: 			LOGERR;
  590: 		return (void*) -1;
  591: 	} else
  592: 		FD_SET(TASK_FD(t), &r->root_fds[0]);
  593: #else
  594: 	FD_SET(TASK_FD(t), &r->root_fds[0]);
  595: 	if (TASK_FD(t) >= r->root_kq)
  596: 		r->root_kq = TASK_FD(t) + 1;
  597: #endif
  598: 
  599: 	return NULL;
  600: }
  601: 
  602: /*
  603:  * sched_hook_write() - Default WRITE hook
  604:  *
  605:  * @task = current task
  606:  * @arg = unused
  607:  * return: <0 errors and 0 ok
  608:  */
  609: void *
  610: sched_hook_write(void *task, void *arg __unused)
  611: {
  612: 	sched_task_t *t = task;
  613: 	sched_root_task_t *r = NULL;
  614: #if SUP_ENABLE == KQ_SUPPORT
  615: 	struct kevent chg[1];
  616: 	struct timespec timeout = { 0, 0 };
  617: #elif SUP_ENABLE == EP_SUPPORT
  618: 	struct epoll_event ee = { .events = EPOLLOUT, .data.fd = 0 };
  619: 	int flg = 0;
  620: #endif
  621: 
  622: 	if (!t || !TASK_ROOT(t))
  623: 		return (void*) -1;
  624: 	else
  625: 		r = TASK_ROOT(t);
  626: 
  627: #if SUP_ENABLE == KQ_SUPPORT
  628: #ifdef __NetBSD__
  629: 	EV_SET(&chg[0], TASK_FD(t), EVFILT_WRITE, EV_ADD | EV_CLEAR, 0, 0, (intptr_t) TASK_FD(t));
  630: #else
  631: 	EV_SET(&chg[0], TASK_FD(t), EVFILT_WRITE, EV_ADD | EV_CLEAR, 0, 0, (void*) TASK_FD(t));
  632: #endif
  633: 	if (kevent(r->root_kq, chg, 1, NULL, 0, &timeout) == -1) {
  634: 		if (r->root_hooks.hook_exec.exception)
  635: 			r->root_hooks.hook_exec.exception(r, NULL);
  636: 		else
  637: 			LOGERR;
  638: 		return (void*) -1;
  639: 	}
  640: #elif SUP_ENABLE == EP_SUPPORT
  641: 	if (FD_ISSET(TASK_FD(t), &r->root_fds[0])) {
  642: 		flg |= 1;
  643: 		ee.events |= EPOLLIN | EPOLLPRI | EPOLLRDHUP;
  644: 	}
  645: 	if (FD_ISSET(TASK_FD(t), &r->root_fds[1]))
  646: 		flg |= 2;
  647: 
  648: 	ee.data.fd = TASK_FD(t);
  649: 	if (epoll_ctl(r->root_kq, flg ? EPOLL_CTL_MOD : EPOLL_CTL_ADD, TASK_FD(t), &ee) == -1) {
  650: 		if (r->root_hooks.hook_exec.exception)
  651: 			r->root_hooks.hook_exec.exception(r, NULL);
  652: 		else
  653: 			LOGERR;
  654: 		return (void*) -1;
  655: 	} else
  656: 		FD_SET(TASK_FD(t), &r->root_fds[1]);
  657: #else
  658: 	FD_SET(TASK_FD(t), &r->root_fds[1]);
  659: 	if (TASK_FD(t) >= r->root_kq)
  660: 		r->root_kq = TASK_FD(t) + 1;
  661: #endif
  662: 
  663: 	return NULL;
  664: }
  665: 
  666: /*
  667:  * sched_hook_alarm() - Default ALARM hook
  668:  *
  669:  * @task = current task
  670:  * @arg = unused
  671:  * return: <0 errors and 0 ok
  672:  */
  673: void *
  674: sched_hook_alarm(void *task, void *arg __unused)
  675: {
  676: #if SUP_ENABLE == KQ_SUPPORT
  677: 	sched_task_t *t = task;
  678: 	struct kevent chg[1];
  679: 	struct timespec timeout = { 0, 0 };
  680: 
  681: 	if (!t || !TASK_ROOT(t))
  682: 		return (void*) -1;
  683: 
  684: #ifdef __NetBSD__
  685: 	EV_SET(&chg[0], (uintptr_t) TASK_DATA(t), EVFILT_TIMER, EV_ADD | EV_CLEAR, 0, 
  686: 			t->task_val.ts.tv_sec * 1000 + t->task_val.ts.tv_nsec / 1000000, 
  687: 			(intptr_t) TASK_DATA(t));
  688: #else
  689: 	EV_SET(&chg[0], (uintptr_t) TASK_DATA(t), EVFILT_TIMER, EV_ADD | EV_CLEAR, 0, 
  690: 			t->task_val.ts.tv_sec * 1000 + t->task_val.ts.tv_nsec / 1000000, 
  691: 			(void*) TASK_DATA(t));
  692: #endif
  693: 	if (kevent(TASK_ROOT(t)->root_kq, chg, 1, NULL, 0, &timeout) == -1) {
  694: 		if (TASK_ROOT(t)->root_hooks.hook_exec.exception)
  695: 			TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL);
  696: 		else
  697: 			LOGERR;
  698: 		return (void*) -1;
  699: 	}
  700: 
  701: #endif
  702: 	return NULL;
  703: }
  704: 
  705: /*
  706:  * sched_hook_node() - Default NODE hook
  707:  *
  708:  * @task = current task
  709:  * @arg = unused
  710:  * return: <0 errors and 0 ok
  711:  */
  712: void *
  713: sched_hook_node(void *task, void *arg __unused)
  714: {
  715: #if SUP_ENABLE == KQ_SUPPORT
  716: 	sched_task_t *t = task;
  717: 	struct kevent chg[1];
  718: 	struct timespec timeout = { 0, 0 };
  719: 
  720: 	if (!t || !TASK_ROOT(t))
  721: 		return (void*) -1;
  722: 
  723: #ifdef __NetBSD__
  724: 	EV_SET(&chg[0], TASK_FD(t), EVFILT_VNODE, EV_ADD | EV_CLEAR, 
  725: 			NOTE_DELETE | NOTE_WRITE | NOTE_EXTEND | NOTE_ATTRIB | 
  726: 			NOTE_LINK | NOTE_RENAME | NOTE_REVOKE, 0, (intptr_t) TASK_FD(t));
  727: #else
  728: 	EV_SET(&chg[0], TASK_FD(t), EVFILT_VNODE, EV_ADD | EV_CLEAR, 
  729: 			NOTE_DELETE | NOTE_WRITE | NOTE_EXTEND | NOTE_ATTRIB | 
  730: 			NOTE_LINK | NOTE_RENAME | NOTE_REVOKE, 0, (void*) TASK_FD(t));
  731: #endif
  732: 	if (kevent(TASK_ROOT(t)->root_kq, chg, 1, NULL, 0, &timeout) == -1) {
  733: 		if (TASK_ROOT(t)->root_hooks.hook_exec.exception)
  734: 			TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL);
  735: 		else
  736: 			LOGERR;
  737: 		return (void*) -1;
  738: 	}
  739: 
  740: #endif
  741: 	return NULL;
  742: }
  743: 
  744: /*
  745:  * sched_hook_proc() - Default PROC hook
  746:  *
  747:  * @task = current task
  748:  * @arg = unused
  749:  * return: <0 errors and 0 ok
  750:  */
  751: void *
  752: sched_hook_proc(void *task, void *arg __unused)
  753: {
  754: #if SUP_ENABLE == KQ_SUPPORT
  755: 	sched_task_t *t = task;
  756: 	struct kevent chg[1];
  757: 	struct timespec timeout = { 0, 0 };
  758: 
  759: 	if (!t || !TASK_ROOT(t))
  760: 		return (void*) -1;
  761: 
  762: #ifdef __NetBSD__
  763: 	EV_SET(&chg[0], TASK_VAL(t), EVFILT_PROC, EV_ADD | EV_CLEAR, 
  764: 			NOTE_EXIT | NOTE_FORK | NOTE_EXEC | NOTE_TRACK, 0, (intptr_t) TASK_VAL(t));
  765: #else
  766: 	EV_SET(&chg[0], TASK_VAL(t), EVFILT_PROC, EV_ADD | EV_CLEAR, 
  767: 			NOTE_EXIT | NOTE_FORK | NOTE_EXEC | NOTE_TRACK, 0, (void*) TASK_VAL(t));
  768: #endif
  769: 	if (kevent(TASK_ROOT(t)->root_kq, chg, 1, NULL, 0, &timeout) == -1) {
  770: 		if (TASK_ROOT(t)->root_hooks.hook_exec.exception)
  771: 			TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL);
  772: 		else
  773: 			LOGERR;
  774: 		return (void*) -1;
  775: 	}
  776: 
  777: #endif
  778: 	return NULL;
  779: }
  780: 
  781: /*
  782:  * sched_hook_signal() - Default SIGNAL hook
  783:  *
  784:  * @task = current task
  785:  * @arg = unused
  786:  * return: <0 errors and 0 ok
  787:  */
  788: void *
  789: sched_hook_signal(void *task, void *arg __unused)
  790: {
  791: #if SUP_ENABLE == KQ_SUPPORT
  792: 	sched_task_t *t = task;
  793: 	struct kevent chg[1];
  794: 	struct timespec timeout = { 0, 0 };
  795: 
  796: 	if (!t || !TASK_ROOT(t))
  797: 		return (void*) -1;
  798: 
  799: 	/* ignore signal */
  800: 	signal(TASK_VAL(t), SIG_IGN);
  801: 
  802: #ifdef __NetBSD__
  803: 	EV_SET(&chg[0], TASK_VAL(t), EVFILT_SIGNAL, EV_ADD | EV_CLEAR, 0, 0, (intptr_t) TASK_VAL(t));
  804: #else
  805: 	EV_SET(&chg[0], TASK_VAL(t), EVFILT_SIGNAL, EV_ADD | EV_CLEAR, 0, 0, (void*) TASK_VAL(t));
  806: #endif
  807: 	if (kevent(TASK_ROOT(t)->root_kq, chg, 1, NULL, 0, &timeout) == -1) {
  808: 		if (TASK_ROOT(t)->root_hooks.hook_exec.exception)
  809: 			TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL);
  810: 		else
  811: 			LOGERR;
  812: 		return (void*) -1;
  813: 	}
  814: #endif
  815: 	return NULL;
  816: }
  817: 
  818: /*
  819:  * sched_hook_user() - Default USER hook
  820:  *
  821:  * @task = current task
  822:  * @arg = unused
  823:  * return: <0 errors and 0 ok
  824:  */
  825: #ifdef EVFILT_USER
  826: void *
  827: sched_hook_user(void *task, void *arg __unused)
  828: {
  829: #if SUP_ENABLE == KQ_SUPPORT
  830: 	sched_task_t *t = task;
  831: 	struct kevent chg[1];
  832: 	struct timespec timeout = { 0, 0 };
  833: 
  834: 	if (!t || !TASK_ROOT(t))
  835: 		return (void*) -1;
  836: 
  837: #ifdef __NetBSD__
  838: 	EV_SET(&chg[0], TASK_VAL(t), EVFILT_USER, EV_ADD | EV_CLEAR, TASK_DATLEN(t), 
  839: 			0, (intptr_t) TASK_VAL(t));
  840: #else
  841: 	EV_SET(&chg[0], TASK_VAL(t), EVFILT_USER, EV_ADD | EV_CLEAR, TASK_DATLEN(t), 
  842: 			0, (void*) TASK_VAL(t));
  843: #endif
  844: 	if (kevent(TASK_ROOT(t)->root_kq, chg, 1, NULL, 0, &timeout) == -1) {
  845: 		if (TASK_ROOT(t)->root_hooks.hook_exec.exception)
  846: 			TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL);
  847: 		else
  848: 			LOGERR;
  849: 		return (void*) -1;
  850: 	}
  851: 
  852: #endif
  853: 	return NULL;
  854: }
  855: #endif
  856: 
  857: #if SUP_ENABLE == KQ_SUPPORT
  858: static inline void 
  859: fetch_hook_kevent_proceed(int en, struct kevent *res, sched_root_task_t *r)
  860: {
  861: 	struct kevent evt[1];
  862: 	register int i, flg;
  863: 	sched_task_t *task, *tmp;
  864: 	struct timespec now = { 0, 0 };
  865: #ifdef AIO_SUPPORT
  866: 	int len, fd;
  867: 	struct aiocb *acb;
  868: #ifdef EVFILT_LIO
  869: 	int l;
  870: 	register int j;
  871: 	off_t off;
  872: 	struct aiocb **acbs;
  873: 	struct iovec *iv;
  874: #endif	/* EVFILT_LIO */
  875: #endif	/* AIO_SUPPORT */
  876: 
  877: 	for (i = 0; i < en; i++) {
  878: 		memcpy(evt, &res[i], sizeof evt);
  879: 		evt->flags = EV_DELETE;
  880: 		/* Put read/write task to ready queue */
  881: 		switch (res[i].filter) {
  882: 			case EVFILT_READ:
  883: 				flg = 0;
  884: 				TAILQ_FOREACH_SAFE(task, &r->root_read, task_node, tmp) {
  885: 					if (TASK_FD(task) != ((intptr_t) res[i].udata))
  886: 						continue;
  887: 					else {
  888: 						flg++;
  889: 						TASK_RET(task) = res[i].data;
  890: 						TASK_FLAG(task) = (u_long) res[i].fflags;
  891: 					}
  892: 					/* remove read handle */
  893: 					remove_task_from(task, &r->root_read);
  894: 
  895: 					if (r->root_hooks.hook_exec.exception && res[i].flags & EV_EOF) {
  896:  						if (r->root_hooks.hook_exec.exception(r, (void*) EV_EOF)) {
  897: 							task->task_type = taskUNUSE;
  898: 							insert_task_to(task, &r->root_unuse);
  899: 						} else {
  900: 							task->task_type = taskREADY;
  901: 							insert_task_to(task, &r->root_ready);
  902: 						}
  903: 					} else {
  904: 						task->task_type = taskREADY;
  905: 						insert_task_to(task, &r->root_ready);
  906: 					}
  907: 				}
  908: 				/* if match at least 2, don't remove resouce of event */
  909: 				if (flg > 1)
  910: 					evt->flags ^= evt->flags;
  911: 				break;
  912: 			case EVFILT_WRITE:
  913: 				flg = 0;
  914: 				TAILQ_FOREACH_SAFE(task, &r->root_write, task_node, tmp) {
  915: 					if (TASK_FD(task) != ((intptr_t) res[i].udata))
  916: 						continue;
  917: 					else {
  918: 						flg++;
  919: 						TASK_RET(task) = res[i].data;
  920: 						TASK_FLAG(task) = (u_long) res[i].fflags;
  921: 					}
  922: 					/* remove write handle */
  923: 					remove_task_from(task, &r->root_write);
  924: 
  925: 					if (r->root_hooks.hook_exec.exception && res[i].flags & EV_EOF) {
  926:  						if (r->root_hooks.hook_exec.exception(r, (void*) EV_EOF)) {
  927: 							task->task_type = taskUNUSE;
  928: 							insert_task_to(task, &r->root_unuse);
  929: 						} else {
  930: 							task->task_type = taskREADY;
  931: 							insert_task_to(task, &r->root_ready);
  932: 						}
  933: 					} else {
  934: 						task->task_type = taskREADY;
  935: 						insert_task_to(task, &r->root_ready);
  936: 					}
  937: 				}
  938: 				/* if match at least 2, don't remove resouce of event */
  939: 				if (flg > 1)
  940: 					evt->flags ^= evt->flags;
  941: 				break;
  942: 			case EVFILT_TIMER:
  943: 				flg = 0;
  944: 				TAILQ_FOREACH_SAFE(task, &r->root_alarm, task_node, tmp) {
  945: 					if ((uintptr_t) TASK_DATA(task) != ((uintptr_t) res[i].udata))
  946: 						continue;
  947: 					else {
  948: 						flg++;
  949: 						TASK_RET(task) = res[i].data;
  950: 						TASK_FLAG(task) = (u_long) res[i].fflags;
  951: 					}
  952: 					/* remove alarm handle */
  953: 					transit_task2ready(task, &r->root_alarm);
  954: 				}
  955: 				/* if match at least 2, don't remove resouce of event */
  956: 				if (flg > 1)
  957: 					evt->flags ^= evt->flags;
  958: 				break;
  959: 			case EVFILT_VNODE:
  960: 				flg = 0;
  961: 				TAILQ_FOREACH_SAFE(task, &r->root_node, task_node, tmp) {
  962: 					if (TASK_FD(task) != ((intptr_t) res[i].udata))
  963: 						continue;
  964: 					else {
  965: 						flg++;
  966: 						TASK_RET(task) = res[i].data;
  967: 						TASK_FLAG(task) = (u_long) res[i].fflags;
  968: 					}
  969: 					/* remove node handle */
  970: 					transit_task2ready(task, &r->root_node);
  971: 				}
  972: 				/* if match at least 2, don't remove resouce of event */
  973: 				if (flg > 1)
  974: 					evt->flags ^= evt->flags;
  975: 				break;
  976: 			case EVFILT_PROC:
  977: 				flg = 0;
  978: 				TAILQ_FOREACH_SAFE(task, &r->root_proc, task_node, tmp) {
  979: 					if (TASK_VAL(task) != ((uintptr_t) res[i].udata))
  980: 						continue;
  981: 					else {
  982: 						flg++;
  983: 						TASK_RET(task) = res[i].data;
  984: 						TASK_FLAG(task) = (u_long) res[i].fflags;
  985: 					}
  986: 					/* remove proc handle */
  987: 					transit_task2ready(task, &r->root_proc);
  988: 				}
  989: 				/* if match at least 2, don't remove resouce of event */
  990: 				if (flg > 1)
  991: 					evt->flags ^= evt->flags;
  992: 				break;
  993: 			case EVFILT_SIGNAL:
  994: 				flg = 0;
  995: 				TAILQ_FOREACH_SAFE(task, &r->root_signal, task_node, tmp) {
  996: 					if (TASK_VAL(task) != ((uintptr_t) res[i].udata))
  997: 						continue;
  998: 					else {
  999: 						flg++;
 1000: 						TASK_RET(task) = res[i].data;
 1001: 						TASK_FLAG(task) = (u_long) res[i].fflags;
 1002: 					}
 1003: 					/* remove signal handle */
 1004: 					transit_task2ready(task, &r->root_signal);
 1005: 				}
 1006: 				/* if match at least 2, don't remove resouce of event */
 1007: 				if (flg > 1)
 1008: 					evt->flags ^= evt->flags;
 1009: 				break;
 1010: #ifdef AIO_SUPPORT
 1011: 			case EVFILT_AIO:
 1012: 				flg = 0;
 1013: 				TAILQ_FOREACH_SAFE(task, &r->root_aio, task_node, tmp) {
 1014: 					acb = (struct aiocb*) TASK_VAL(task);
 1015: 					if (acb != ((struct aiocb*) res[i].udata))
 1016: 						continue;
 1017: 					else {
 1018: 						flg++;
 1019: 						TASK_RET(task) = res[i].data;
 1020: 						TASK_FLAG(task) = (u_long) res[i].fflags;
 1021: 					}
 1022: 					/* remove user handle */
 1023: 					transit_task2ready(task, &r->root_aio);
 1024: 
 1025: 					fd = acb->aio_fildes;
 1026: 					if ((len = aio_return(acb)) != -1) {
 1027: 						if (lseek(fd, acb->aio_offset + len, SEEK_CUR) == -1)
 1028: 							LOGERR;
 1029: 					} else
 1030: 						LOGERR;
 1031: 					free(acb);
 1032: 					TASK_DATLEN(task) = (u_long) len;
 1033: 					TASK_FD(task) = fd;
 1034: 				}
 1035: 				/* if match at least 2, don't remove resouce of event */
 1036: 				if (flg > 1)
 1037: 					evt->flags ^= evt->flags;
 1038: 				break;
 1039: #ifdef EVFILT_LIO
 1040: 			case EVFILT_LIO:
 1041: 				flg = 0;
 1042: 				TAILQ_FOREACH_SAFE(task, &r->root_lio, task_node, tmp) {
 1043: 					acbs = (struct aiocb**) TASK_VAL(task);
 1044: 					if (acbs != ((struct aiocb**) res[i].udata))
 1045: 						continue;
 1046: 					else {
 1047: 						flg++;
 1048: 						TASK_RET(task) = res[i].data;
 1049: 						TASK_FLAG(task) = (u_long) res[i].fflags;
 1050: 					}
 1051: 					/* remove user handle */
 1052: 					transit_task2ready(task, &r->root_lio);
 1053: 
 1054: 					iv = (struct iovec*) TASK_DATA(task);
 1055: 					fd = acbs[0]->aio_fildes;
 1056: 					off = acbs[0]->aio_offset;
 1057: 					for (j = len = 0; i < TASK_DATLEN(task); len += l, i++) {
 1058: 						if ((iv[i].iov_len = aio_return(acbs[i])) == -1)
 1059: 							l = 0;
 1060: 						else
 1061: 							l = iv[i].iov_len;
 1062: 						free(acbs[i]);
 1063: 					}
 1064: 					free(acbs);
 1065: 					TASK_DATLEN(task) = (u_long) len;
 1066: 					TASK_FD(task) = fd;
 1067: 
 1068: 					if (lseek(fd, off + len, SEEK_CUR) == -1)
 1069: 						LOGERR;
 1070: 				}
 1071: 				/* if match at least 2, don't remove resouce of event */
 1072: 				if (flg > 1)
 1073: 					evt->flags ^= evt->flags;
 1074: 				break;
 1075: #endif	/* EVFILT_LIO */
 1076: #endif	/* AIO_SUPPORT */
 1077: #ifdef EVFILT_USER
 1078: 			case EVFILT_USER:
 1079: 				flg = 0;
 1080: 				TAILQ_FOREACH_SAFE(task, &r->root_user, task_node, tmp) {
 1081: 					if (TASK_VAL(task) != ((uintptr_t) res[i].udata))
 1082: 						continue;
 1083: 					else {
 1084: 						flg++;
 1085: 						TASK_RET(task) = res[i].data;
 1086: 						TASK_FLAG(task) = (u_long) res[i].fflags;
 1087: 					}
 1088: 					/* remove user handle */
 1089: 					transit_task2ready(task, &r->root_user);
 1090: 				}
 1091: 				/* if match at least 2, don't remove resouce of event */
 1092: 				if (flg > 1)
 1093: 					evt->flags ^= evt->flags;
 1094: 				break;
 1095: #endif	/* EVFILT_USER */
 1096: 		}
 1097: 
 1098: 		if (kevent(r->root_kq, evt, 1, NULL, 0, &now) == -1) {
 1099: 			if (r->root_hooks.hook_exec.exception)
 1100: 				r->root_hooks.hook_exec.exception(r, NULL);
 1101: 			else
 1102: 				LOGERR;
 1103: 		}
 1104: 	}
 1105: }
 1106: #endif
 1107: 
 1108: #if SUP_ENABLE == EP_SUPPORT
 1109: static inline void
 1110: fetch_hook_epoll_proceed(int en, struct epoll_event *res, sched_root_task_t *r)
 1111: {
 1112: 	register int i, flg;
 1113: 	int ops = EPOLL_CTL_DEL;
 1114: 	sched_task_t *task, *tmp;
 1115: 	struct epoll_event evt[1];
 1116: 
 1117: 	for (i = 0; i < en; i++) {
 1118: 		memcpy(evt, &res[i], sizeof evt);
 1119: 
 1120: 		if (evt->events & (EPOLLIN | EPOLLPRI | EPOLLET)) {
 1121: 			flg = 0;
 1122: 			TAILQ_FOREACH_SAFE(task, &r->root_read, task_node, tmp) {
 1123: 				if (TASK_FD(task) != evt->data.fd)
 1124: 					continue;
 1125: 				else {
 1126: 					flg++;
 1127: 					FD_CLR(TASK_FD(task), &r->root_fds[0]);
 1128: 					TASK_FLAG(task) = ioctl(TASK_FD(task), FIONREAD, &TASK_RET(task));
 1129: 
 1130: 					evt->events &= ~(EPOLLIN | EPOLLPRI | EPOLLET | EPOLLRDHUP);
 1131: 					if (FD_ISSET(TASK_FD(task), &r->root_fds[1])) {
 1132: 						ops = EPOLL_CTL_MOD;
 1133: 						evt->events |= EPOLLOUT;
 1134: 					}
 1135: 				}
 1136: 				/* remove read handle */
 1137: 				remove_task_from(task, &r->root_read);
 1138: 
 1139: 				if (r->root_hooks.hook_exec.exception && evt->events & (EPOLLRDHUP | EPOLLERR | EPOLLHUP)) {
 1140:  					if (r->root_hooks.hook_exec.exception(r, (void*) (intptr_t)
 1141: 								(evt->events & EPOLLERR ? EV_ERROR | EV_EOF : EV_EOF))) {
 1142: 						task->task_type = taskUNUSE;
 1143: 						insert_task_to(task, &r->root_unuse);
 1144: 					} else {
 1145: 						task->task_type = taskREADY;
 1146: 						insert_task_to(task, &r->root_ready);
 1147: 					}
 1148: 				} else {
 1149: 					task->task_type = taskREADY;
 1150: 					insert_task_to(task, &r->root_ready);
 1151: 				}
 1152: 			}
 1153: 			if (flg > 1)
 1154: 				ops = EPOLL_CTL_MOD;
 1155: 		}
 1156: 
 1157: 		if (evt->events & EPOLLOUT) {
 1158: 			flg = 0;
 1159: 			TAILQ_FOREACH_SAFE(task, &r->root_write, task_node, tmp) {
 1160: 				if (TASK_FD(task) != evt->data.fd)
 1161: 					continue;
 1162: 				else {
 1163: 					flg++;
 1164: 					FD_CLR(TASK_FD(task), &r->root_fds[1]);
 1165: 					TASK_FLAG(task) = ioctl(TASK_FD(task), 
 1166: 							FIONWRITE, &TASK_RET(task));
 1167: 
 1168: 					evt->events &= ~EPOLLOUT;
 1169: 					if (FD_ISSET(TASK_FD(task), &r->root_fds[0])) {
 1170: 						ops = EPOLL_CTL_MOD;
 1171: 						evt->events |= EPOLLIN | EPOLLPRI | EPOLLRDHUP;
 1172: 					}
 1173: 				}
 1174: 				/* remove write handle */
 1175: 				remove_task_from(task, &r->root_write);
 1176: 
 1177: 				if (r->root_hooks.hook_exec.exception && evt->events & (EPOLLERR | EPOLLHUP)) {
 1178:  					if (r->root_hooks.hook_exec.exception(r, (void*) (intptr_t)
 1179: 								(evt->events & EPOLLERR ? EV_ERROR | EV_EOF : EV_EOF))) {
 1180: 						task->task_type = taskUNUSE;
 1181: 						insert_task_to(task, &r->root_unuse);
 1182: 					} else {
 1183: 						task->task_type = taskREADY;
 1184: 						insert_task_to(task, &r->root_ready);
 1185: 					}
 1186: 				} else {
 1187: 					task->task_type = taskREADY;
 1188: 					insert_task_to(task, &r->root_ready);
 1189: 				}
 1190: 			}
 1191: 			if (flg > 1)
 1192: 				ops = EPOLL_CTL_MOD;
 1193: 		}
 1194: 
 1195: 		if (epoll_ctl(r->root_kq, ops, evt->data.fd, evt) == -1) {
 1196: 			if (r->root_hooks.hook_exec.exception) {
 1197: 				r->root_hooks.hook_exec.exception(r, NULL);
 1198: 			} else
 1199: 				LOGERR;
 1200: 		}
 1201: 	}
 1202: }
 1203: #endif
 1204: 
 1205: #if SUP_ENABLE == NO_SUPPORT
 1206: static inline void 
 1207: fetch_hook_select_proceed(int en, fd_set rfd, fd_set wfd, fd_set xfd, sched_root_task_t *r)
 1208: {
 1209: 	register int i, flg;
 1210: 	sched_task_t *task, *tmp;
 1211: 
 1212: 	/* skip select check if return value from select is zero */
 1213: 	if (!en)
 1214: 		return;
 1215: 
 1216: 	for (i = 0; i < r->root_kq; i++) {
 1217: 		if (FD_ISSET(i, &rfd) || FD_ISSET(i, &xfd)) {
 1218: 			flg = 0;
 1219: 			TAILQ_FOREACH_SAFE(task, &r->root_read, task_node, tmp) {
 1220: 				if (TASK_FD(task) != i)
 1221: 					continue;
 1222: 				else {
 1223: 					flg++;
 1224: 					TASK_FLAG(task) = ioctl(TASK_FD(task), 
 1225: 							FIONREAD, &TASK_RET(task));
 1226: 				}
 1227: 				/* remove read handle */
 1228: 				remove_task_from(task, &r->root_read);
 1229: 
 1230: 				if (r->root_hooks.hook_exec.exception) {
 1231:  					if (r->root_hooks.hook_exec.exception(r, NULL)) {
 1232: 						task->task_type = taskUNUSE;
 1233: 						insert_task_to(task, &r->root_unuse);
 1234: 					} else {
 1235: 						task->task_type = taskREADY;
 1236: 						insert_task_to(task, &r->root_ready);
 1237: 					}
 1238: 				} else {
 1239: 					task->task_type = taskREADY;
 1240: 					insert_task_to(task, &r->root_ready);
 1241: 				}
 1242: 			}
 1243: 			/* if match equal to 1, remove resouce */
 1244: 			if (flg == 1)
 1245: 				FD_CLR(i, &r->root_fds[0]);
 1246: 		}
 1247: 
 1248: 		if (FD_ISSET(i, &wfd)) {
 1249: 			flg = 0;
 1250: 			TAILQ_FOREACH_SAFE(task, &r->root_write, task_node, tmp) {
 1251: 				if (TASK_FD(task) != i)
 1252: 					continue;
 1253: 				else {
 1254: 					flg++;
 1255: 					TASK_FLAG(task) = ioctl(TASK_FD(task), 
 1256: 							FIONWRITE, &TASK_RET(task));
 1257: 				}
 1258: 				/* remove write handle */
 1259: 				remove_task_from(task, &r->root_write);
 1260: 
 1261: 				if (r->root_hooks.hook_exec.exception) {
 1262:  					if (r->root_hooks.hook_exec.exception(r, NULL)) {
 1263: 						task->task_type = taskUNUSE;
 1264: 						insert_task_to(task, &r->root_unuse);
 1265: 					} else {
 1266: 						task->task_type = taskREADY;
 1267: 						insert_task_to(task, &r->root_ready);
 1268: 					}
 1269: 				} else {
 1270: 					task->task_type = taskREADY;
 1271: 					insert_task_to(task, &r->root_ready);
 1272: 				}
 1273: 			}
 1274: 			/* if match equal to 1, remove resouce */
 1275: 			if (flg == 1)
 1276: 				FD_CLR(i, &r->root_fds[1]);
 1277: 		}
 1278: 	}
 1279: 
 1280: 	/* optimize select */
 1281: 	for (i = r->root_kq - 1; i > 2; i--)
 1282: 		if (FD_ISSET(i, &r->root_fds[0]) || FD_ISSET(i, &r->root_fds[1]))
 1283: 			break;
 1284: 	if (i > 2)
 1285: 		r->root_kq = i + 1;
 1286: }
 1287: #endif
 1288: 
 1289: /*
 1290:  * sched_hook_fetch() - Default FETCH hook
 1291:  *
 1292:  * @root = root task
 1293:  * @arg = unused
 1294:  * return: NULL error or !=NULL fetched task
 1295:  */
 1296: void *
 1297: sched_hook_fetch(void *root, void *arg __unused)
 1298: {
 1299: 	sched_root_task_t *r = root;
 1300: 	sched_task_t *task, *tmp;
 1301: 	struct timespec now, m, mtmp;
 1302: #if SUP_ENABLE == KQ_SUPPORT
 1303: 	struct kevent res[KQ_EVENTS];
 1304: 	struct timespec *timeout;
 1305: #elif SUP_ENABLE == EP_SUPPORT
 1306: 	struct epoll_event res[KQ_EVENTS];
 1307: 	u_long timeout = 0;
 1308: #else
 1309: 	struct timeval *timeout, tv;
 1310: 	fd_set rfd, wfd, xfd;
 1311: #endif
 1312: 	int en;
 1313: 
 1314: 	if (!r)
 1315: 		return NULL;
 1316: 
 1317: 	/* get new task by queue priority */
 1318: 	while ((task = TAILQ_FIRST(&r->root_event))) {
 1319: 		transit_task2unuse(task, &r->root_event);
 1320: 		return task;
 1321: 	}
 1322: 	while ((task = TAILQ_FIRST(&r->root_ready))) {
 1323: 		transit_task2unuse(task, &r->root_ready);
 1324: 		return task;
 1325: 	}
 1326: 
 1327: #ifdef TIMER_WITHOUT_SORT
 1328: 	clock_gettime(CLOCK_MONOTONIC, &now);
 1329: 
 1330: 	sched_timespecclear(&r->root_wait);
 1331: 	TAILQ_FOREACH(task, &r->root_timer, task_node) {
 1332: 		if (!sched_timespecisset(&r->root_wait))
 1333: 			r->root_wait = TASK_TS(task);
 1334: 		else if (sched_timespeccmp(&TASK_TS(task), &r->root_wait, -) < 0)
 1335: 			r->root_wait = TASK_TS(task);
 1336: 	}
 1337: 
 1338: 	if (TAILQ_FIRST(&r->root_timer)) {
 1339: 		m = r->root_wait;
 1340: 		sched_timespecsub(&m, &now, &mtmp);
 1341: 		r->root_wait = mtmp;
 1342: 	} else {
 1343: 		/* set wait INFTIM */
 1344: 		sched_timespecinf(&r->root_wait);
 1345: 	}
 1346: #else	/* ! TIMER_WITHOUT_SORT */
 1347: 	if (!TAILQ_FIRST(&r->root_task) && (task = TAILQ_FIRST(&r->root_timer))) {
 1348: 		clock_gettime(CLOCK_MONOTONIC, &now);
 1349: 
 1350: 		m = TASK_TS(task);
 1351: 		sched_timespecsub(&m, &now, &mtmp);
 1352: 		r->root_wait = mtmp;
 1353: 	} else {
 1354: 		/* set wait INFTIM */
 1355: 		sched_timespecinf(&r->root_wait);
 1356: 	}
 1357: #endif	/* TIMER_WITHOUT_SORT */
 1358: 	/* if present member of task, set NOWAIT */
 1359: 	if (TAILQ_FIRST(&r->root_task))
 1360: 		sched_timespecclear(&r->root_wait);
 1361: 
 1362: 	if (r->root_wait.tv_sec != -1 && r->root_wait.tv_nsec != -1) {
 1363: #if SUP_ENABLE == KQ_SUPPORT
 1364: 		timeout = &r->root_wait;
 1365: #elif SUP_ENABLE == EP_SUPPORT
 1366: 		timeout = r->root_wait.tv_sec * 1000 + r->root_wait.tv_nsec / 1000000;
 1367: #else
 1368: 		sched_timespec2val(&r->root_wait, &tv);
 1369: 		timeout = &tv;
 1370: #endif	/* KQ_SUPPORT */
 1371: 	} else if (sched_timespecisinf(&r->root_poll))
 1372: #if SUP_ENABLE == EP_SUPPORT
 1373: 		timeout = -1;
 1374: #else
 1375: 		timeout = NULL;
 1376: #endif
 1377: 	else {
 1378: #if SUP_ENABLE == KQ_SUPPORT
 1379: 		timeout = &r->root_poll;
 1380: #elif SUP_ENABLE == EP_SUPPORT
 1381: 		timeout = r->root_poll.tv_sec * 1000 + r->root_poll.tv_nsec / 1000000;
 1382: #else
 1383: 		sched_timespec2val(&r->root_poll, &tv);
 1384: 		timeout = &tv;
 1385: #endif	/* KQ_SUPPORT */
 1386: 	}
 1387: 
 1388: #if SUP_ENABLE == KQ_SUPPORT
 1389: 	if ((en = kevent(r->root_kq, NULL, 0, res, KQ_EVENTS, timeout)) == -1) {
 1390: #elif SUP_ENABLE == EP_SUPPORT
 1391: 	if ((en = epoll_wait(r->root_kq, res, KQ_EVENTS, timeout)) == -1) {
 1392: #else
 1393: 	rfd = xfd = r->root_fds[0];
 1394: 	wfd = r->root_fds[1];
 1395: 	if ((en = select(r->root_kq, &rfd, &wfd, &xfd, timeout)) == -1) {
 1396: #endif	/* KQ_SUPPORT */
 1397: 		if (r->root_hooks.hook_exec.exception) {
 1398: 			if (r->root_hooks.hook_exec.exception(r, NULL))
 1399: 				return NULL;
 1400: 		} else if (errno != EINTR)
 1401: 			LOGERR;
 1402: 		goto skip_event;
 1403: 	}
 1404: 
 1405: 	/* Go and catch the cat into pipes ... */
 1406: #if SUP_ENABLE == KQ_SUPPORT
 1407: 	/* kevent dispatcher */
 1408: 	fetch_hook_kevent_proceed(en, res, r);
 1409: #elif SUP_ENABLE == EP_SUPPORT
 1410: 	/* epoll dispatcher */
 1411: 	fetch_hook_epoll_proceed(en, res, r);
 1412: #else
 1413: 	/* select dispatcher */
 1414: 	fetch_hook_select_proceed(en, rfd, wfd, xfd, r);
 1415: #endif	/* KQ_SUPPORT */
 1416: 
 1417: skip_event:
 1418: 	/* timer update & put in ready queue */
 1419: 	clock_gettime(CLOCK_MONOTONIC, &now);
 1420: 
 1421: 	TAILQ_FOREACH_SAFE(task, &r->root_timer, task_node, tmp)
 1422: 		if (sched_timespeccmp(&now, &TASK_TS(task), -) >= 0)
 1423: 			transit_task2ready(task, &r->root_timer);
 1424: 
 1425: 	/* put regular task priority task to ready queue, 
 1426: 		if there is no ready task or reach max missing hit for regular task */
 1427: 	if ((task = TAILQ_FIRST(&r->root_task))) {
 1428: 		if (!TAILQ_FIRST(&r->root_ready) || r->root_miss >= TASK_VAL(task)) {
 1429: 			r->root_miss ^= r->root_miss;
 1430: 
 1431: 			transit_task2ready(task, &r->root_task);
 1432: 		} else
 1433: 			r->root_miss++;
 1434: 	} else
 1435: 		r->root_miss ^= r->root_miss;
 1436: 
 1437: 	/* OK, lets get ready task !!! */
 1438: 	task = TAILQ_FIRST(&r->root_ready);
 1439: 	if (task)
 1440: 		transit_task2unuse(task, &r->root_ready);
 1441: 	return task;
 1442: }
 1443: 
 1444: /*
 1445:  * sched_hook_exception() - Default EXCEPTION hook
 1446:  *
 1447:  * @root = root task
 1448:  * @arg = custom handling: if arg == EV_EOF or other value; default: arg == NULL log errno
 1449:  * return: <0 errors and 0 ok
 1450:  */
 1451: void *
 1452: sched_hook_exception(void *root, void *arg)
 1453: {
 1454: 	sched_root_task_t *r = root;
 1455: 
 1456: 	if (!r)
 1457: 		return NULL;
 1458: 
 1459: 	/* custom exception handling ... */
 1460: 	if (arg) {
 1461: 		if (arg == (void*) EV_EOF)
 1462: 			return NULL;
 1463: 		return (void*) -1;	/* raise scheduler error!!! */
 1464: 	}
 1465: 
 1466: 	/* if error hook exists */
 1467: 	if (r->root_hooks.hook_root.error)
 1468: 		return (r->root_hooks.hook_root.error(root, (void*) ((intptr_t) errno)));
 1469: 
 1470: 	/* default case! */
 1471: 	LOGERR;
 1472: 	return NULL;
 1473: }
 1474: 
 1475: /*
 1476:  * sched_hook_condition() - Default CONDITION hook
 1477:  *
 1478:  * @root = root task
 1479:  * @arg = killState from schedRun()
 1480:  * return: NULL kill scheduler loop or !=NULL ok
 1481:  */
 1482: void *
 1483: sched_hook_condition(void *root, void *arg)
 1484: {
 1485: 	sched_root_task_t *r = root;
 1486: 
 1487: 	if (!r)
 1488: 		return NULL;
 1489: 
 1490: 	return (void*) (*r->root_cond - *(intptr_t*) arg);
 1491: }
 1492: 
 1493: /*
 1494:  * sched_hook_rtc() - Default RTC hook
 1495:  *
 1496:  * @task = current task
 1497:  * @arg = unused
 1498:  * return: <0 errors and 0 ok
 1499:  */
 1500: #if defined(HAVE_TIMER_CREATE) && defined(HAVE_TIMER_SETTIME) && defined(HAVE_TIMER_DELETE)
 1501: void *
 1502: sched_hook_rtc(void *task, void *arg __unused)
 1503: {
 1504: 	sched_task_t *sigt = NULL, *t = task;
 1505: 	struct itimerspec its;
 1506: 	struct sigevent evt;
 1507: 	timer_t tmr;
 1508: #if SUP_ENABLE != KQ_SUPPORT
 1509: 	struct sigaction sa;
 1510: #endif
 1511: 
 1512: 	if (!t || !TASK_ROOT(t))
 1513: 		return (void*) -1;
 1514: 
 1515: 	memset(&evt, 0, sizeof evt);
 1516: 	evt.sigev_notify = SIGEV_SIGNAL;
 1517: 	evt.sigev_signo = (intptr_t) TASK_DATA(t) + SIGRTMIN;
 1518: 	evt.sigev_value.sival_ptr = t;
 1519: 
 1520: 	if (timer_create(CLOCK_MONOTONIC, &evt, &tmr) == -1) {
 1521: 		if (TASK_ROOT(t)->root_hooks.hook_exec.exception)
 1522: 			TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL);
 1523: 		else
 1524: 			LOGERR;
 1525: 		return (void*) -1;
 1526: 	} else
 1527: 		TASK_FLAG(t) = (u_long) tmr;
 1528: 
 1529: #if SUP_ENABLE == KQ_SUPPORT
 1530: 	if (!(sigt = schedSignal(TASK_ROOT(t), _sched_rtcWrapper, TASK_ARG(t), evt.sigev_signo, 
 1531: 				t, (size_t) tmr))) {
 1532: 		if (TASK_ROOT(t)->root_hooks.hook_exec.exception)
 1533: 			TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL);
 1534: 		else
 1535: 			LOGERR;
 1536: 		timer_delete(tmr);
 1537: 		return (void*) -1;
 1538: 	} else
 1539: 		TASK_RET(t) = (uintptr_t) sigt;
 1540: #else
 1541: 	memset(&sa, 0, sizeof sa);
 1542: 	sigemptyset(&sa.sa_mask);
 1543: 	sa.sa_sigaction = _sched_rtcSigWrapper;
 1544: 	sa.sa_flags = SA_SIGINFO | SA_RESTART;
 1545: 
 1546: 	if (sigaction(evt.sigev_signo, &sa, NULL) == -1) {
 1547: 		if (TASK_ROOT(t)->root_hooks.hook_exec.exception)
 1548: 			TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL);
 1549: 		else
 1550: 			LOGERR;
 1551: 		timer_delete(tmr);
 1552: 		return (void*) -1;
 1553: 	}
 1554: #endif
 1555: 
 1556: 	memset(&its, 0, sizeof its);
 1557: 	its.it_value.tv_sec = t->task_val.ts.tv_sec;
 1558: 	its.it_value.tv_nsec = t->task_val.ts.tv_nsec;
 1559: 
 1560: 	if (timer_settime(tmr, TIMER_RELTIME, &its, NULL) == -1) {
 1561: 		if (TASK_ROOT(t)->root_hooks.hook_exec.exception)
 1562: 			TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL);
 1563: 		else
 1564: 			LOGERR;
 1565: 		schedCancel(sigt);
 1566: 		timer_delete(tmr);
 1567: 		return (void*) -1;
 1568: 	}
 1569: 
 1570: 	return NULL;
 1571: }
 1572: #endif	/* HAVE_TIMER_CREATE */

FreeBSD-CVSweb <freebsd-cvsweb@FreeBSD.org>