File:  [ELWIX - Embedded LightWeight unIX -] / libaitsched / src / hooks.c
Revision 1.27.2.7: download - view: text, annotated - select for diffs - revision graph
Tue Jun 3 20:42:48 2014 UTC (10 years, 1 month ago) by misho
Branches: sched5_2
Diff to: branchpoint 1.27: preferred, unified
remove unused

    1: /*************************************************************************
    2: * (C) 2011 AITNET ltd - Sofia/Bulgaria - <misho@aitbg.com>
    3: *  by Michael Pounov <misho@openbsd-bg.org>
    4: *
    5: * $Author: misho $
    6: * $Id: hooks.c,v 1.27.2.7 2014/06/03 20:42:48 misho Exp $
    7: *
    8: **************************************************************************
    9: The ELWIX and AITNET software is distributed under the following
   10: terms:
   11: 
   12: All of the documentation and software included in the ELWIX and AITNET
   13: Releases is copyrighted by ELWIX - Sofia/Bulgaria <info@elwix.org>
   14: 
   15: Copyright 2004 - 2014
   16: 	by Michael Pounov <misho@elwix.org>.  All rights reserved.
   17: 
   18: Redistribution and use in source and binary forms, with or without
   19: modification, are permitted provided that the following conditions
   20: are met:
   21: 1. Redistributions of source code must retain the above copyright
   22:    notice, this list of conditions and the following disclaimer.
   23: 2. Redistributions in binary form must reproduce the above copyright
   24:    notice, this list of conditions and the following disclaimer in the
   25:    documentation and/or other materials provided with the distribution.
   26: 3. All advertising materials mentioning features or use of this software
   27:    must display the following acknowledgement:
   28: This product includes software developed by Michael Pounov <misho@elwix.org>
   29: ELWIX - Embedded LightWeight unIX and its contributors.
   30: 4. Neither the name of AITNET nor the names of its contributors
   31:    may be used to endorse or promote products derived from this software
   32:    without specific prior written permission.
   33: 
   34: THIS SOFTWARE IS PROVIDED BY AITNET AND CONTRIBUTORS ``AS IS'' AND
   35: ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   36: IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   37: ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
   38: FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   39: DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   40: OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   41: HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   42: LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   43: OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   44: SUCH DAMAGE.
   45: */
   46: #include "global.h"
   47: #include "hooks.h"
   48: 
   49: 
   50: static inline void
   51: transit_task2ready(sched_task_t * __restrict t, sched_queue_t * __restrict q)
   52: {
   53: 	remove_task_from(t, q);
   54: 
   55: 	t->task_type = taskREADY;
   56: 	insert_task_to(t, &(TASK_ROOT(t))->root_ready);
   57: }
   58: 
   59: /*
   60:  * sched_hook_init() - Default INIT hook
   61:  *
   62:  * @root = root task
   63:  * @arg = unused
   64:  * return: <0 errors and 0 ok
   65:  */
   66: void *
   67: sched_hook_init(void *root, void *arg __unused)
   68: {
   69: 	sched_root_task_t *r = root;
   70: 
   71: 	if (!r)
   72: 		return (void*) -1;
   73: 
   74: #if SUP_ENABLE == KQ_SUPPORT
   75: 	r->root_kq = kqueue();
   76: 	if (r->root_kq == -1) {
   77: 		LOGERR;
   78: 		return (void*) -1;
   79: 	}
   80: #elif SUP_ENABLE == EP_SUPPORT
   81: 	r->root_kq = epoll_create(KQ_EVENTS);
   82: 	if (r->root_kq == -1) {
   83: 		LOGERR;
   84: 		return (void*) -1;
   85: 	}
   86: #else
   87: 	r->root_kq ^= r->root_kq;
   88: 	FD_ZERO(&r->root_fds[0]);
   89: 	FD_ZERO(&r->root_fds[1]);
   90: #endif
   91: 
   92: 	return NULL;
   93: }
   94: 
   95: /*
   96:  * sched_hook_fini() - Default FINI hook
   97:  *
   98:  * @root = root task
   99:  * @arg = unused
  100:  * return: <0 errors and 0 ok
  101:  */
  102: void *
  103: sched_hook_fini(void *root, void *arg __unused)
  104: {
  105: 	sched_root_task_t *r = root;
  106: 
  107: 	if (!r)
  108: 		return (void*) -1;
  109: 
  110: #if SUP_ENABLE == KQ_SUPPORT || SUP_ENABLE == EP_SUPPORT
  111: 	if (r->root_kq > 2) {
  112: 		close(r->root_kq);
  113: 		r->root_kq = 0;
  114: 	}
  115: #else
  116: 	FD_ZERO(&r->root_fds[1]);
  117: 	FD_ZERO(&r->root_fds[0]);
  118: 	r->root_kq ^= r->root_kq;
  119: #endif
  120: 
  121: 	return NULL;
  122: }
  123: 
  124: /*
  125:  * sched_hook_cancel() - Default CANCEL hook
  126:  *
  127:  * @task = current task
  128:  * @arg = unused
  129:  * return: <0 errors and 0 ok
  130:  */
  131: void *
  132: sched_hook_cancel(void *task, void *arg __unused)
  133: {
  134: 	sched_task_t *t = task, *tmp, *tt;
  135: 	sched_root_task_t *r = NULL;
  136: 	int flg;
  137: #if SUP_ENABLE == KQ_SUPPORT
  138: 	struct kevent chg[1];
  139: 	struct timespec timeout = { 0, 0 };
  140: #elif SUP_ENABLE == EP_SUPPORT
  141: 	struct epoll_event ee = { .events = 0, .data.fd = 0 };
  142: #else
  143: 	register int i;
  144: #endif
  145: #ifdef AIO_SUPPORT
  146: 	struct aiocb *acb;
  147: #ifdef EVFILT_LIO
  148: 	register int i = 0;
  149: 	struct aiocb **acbs;
  150: #endif	/* EVFILT_LIO */
  151: #endif	/* AIO_SUPPORT */
  152: 
  153: 	if (!t || !TASK_ROOT(t))
  154: 		return (void*) -1;
  155: 	else
  156: 		r = TASK_ROOT(t);
  157: 
  158: 	switch (TASK_TYPE(t)) {
  159: 		case taskREAD:
  160: 			/* check for multi subscribers */
  161: 			flg = 0;
  162: 			TAILQ_FOREACH_SAFE(tt, &r->root_read, task_node, tmp)
  163: 				if (TASK_FD(tt) != TASK_FD(t))
  164: 					continue;
  165: 				else
  166: 					flg++;
  167: #if SUP_ENABLE == KQ_SUPPORT
  168: #ifdef __NetBSD__
  169: 			EV_SET(&chg[0], TASK_FD(t), EVFILT_READ, flg < 2 ? EV_DELETE : 0, 
  170: 					0, 0, (intptr_t) TASK_FD(t));
  171: #else
  172: 			EV_SET(&chg[0], TASK_FD(t), EVFILT_READ, flg < 2 ? EV_DELETE : 0, 
  173: 					0, 0, (void*) TASK_FD(t));
  174: #endif
  175: #elif SUP_ENABLE == EP_SUPPORT
  176: 			ee.data.fd = TASK_FD(t);
  177: 			if (FD_ISSET(TASK_FD(t), &r->root_fds[1]))
  178: 				ee.events = EPOLLOUT;
  179: 
  180: 			if (flg < 2)
  181: 				FD_CLR(TASK_FD(t), &r->root_fds[0]);
  182: 			else
  183: 				ee.events |= (EPOLLIN | EPOLLPRI | EPOLLRDHUP);
  184: #else
  185: 			if (flg < 2) {
  186: 				FD_CLR(TASK_FD(t), &r->root_fds[0]);
  187: 
  188: 				/* optimize select */
  189: 				for (i = r->root_kq - 1; i > 2; i--)
  190: 					if (FD_ISSET(i, &r->root_fds[0]) || FD_ISSET(i, &r->root_fds[1]))
  191: 						break;
  192: 				if (i > 2)
  193: 					r->root_kq = i + 1;
  194: 			}
  195: #endif
  196: 			break;
  197: 		case taskWRITE:
  198: 			/* check for multi subscribers */
  199: 			flg = 0;
  200: 			TAILQ_FOREACH_SAFE(tt, &r->root_write, task_node, tmp)
  201: 				if (TASK_FD(tt) != TASK_FD(t))
  202: 					continue;
  203: 				else
  204: 					flg++;
  205: #if SUP_ENABLE == KQ_SUPPORT
  206: #ifdef __NetBSD__
  207: 			EV_SET(&chg[0], TASK_FD(t), EVFILT_WRITE, flg < 2 ? EV_DELETE : 0, 
  208: 					0, 0, (intptr_t) TASK_FD(t));
  209: #else
  210: 			EV_SET(&chg[0], TASK_FD(t), EVFILT_WRITE, flg < 2 ? EV_DELETE : 0, 
  211: 					0, 0, (void*) TASK_FD(t));
  212: #endif
  213: #elif SUP_ENABLE == EP_SUPPORT
  214: 			ee.data.fd = TASK_FD(t);
  215: 			if (FD_ISSET(TASK_FD(t), &r->root_fds[0]))
  216: 				ee.events = EPOLLIN | EPOLLPRI | EPOLLRDHUP;
  217: 
  218: 			if (flg < 2)
  219: 				FD_CLR(TASK_FD(t), &r->root_fds[1]);
  220: 			else
  221: 				ee.events |= EPOLLOUT;
  222: #else
  223: 			if (flg < 2) {
  224: 				FD_CLR(TASK_FD(t), &r->root_fds[1]);
  225: 
  226: 				/* optimize select */
  227: 				for (i = r->root_kq - 1; i > 2; i--)
  228: 					if (FD_ISSET(i, &r->root_fds[0]) || FD_ISSET(i, &r->root_fds[1]))
  229: 						break;
  230: 				if (i > 2)
  231: 					r->root_kq = i + 1;
  232: 			}
  233: #endif
  234: 			break;
  235: 		case taskALARM:
  236: #if SUP_ENABLE == KQ_SUPPORT
  237: 			/* check for multi subscribers */
  238: 			flg = 0;
  239: 			TAILQ_FOREACH_SAFE(tt, &r->root_alarm, task_node, tmp)
  240: 				if (TASK_DATA(tt) != TASK_DATA(t))
  241: 					continue;
  242: 				else
  243: 					flg++;
  244: #ifdef __NetBSD__
  245: 			EV_SET(&chg[0], (uintptr_t) TASK_DATA(t), EVFILT_TIMER, flg < 2 ? EV_DELETE : 0, 
  246: 					0, 0, (intptr_t) TASK_DATA(t));
  247: #else
  248: 			EV_SET(&chg[0], (uintptr_t) TASK_DATA(t), EVFILT_TIMER, flg < 2 ? EV_DELETE : 0, 
  249: 					0, 0, (void*) TASK_DATA(t));
  250: #endif
  251: #endif
  252: 			break;
  253: 		case taskNODE:
  254: #if SUP_ENABLE == KQ_SUPPORT
  255: 			/* check for multi subscribers */
  256: 			flg = 0;
  257: 			TAILQ_FOREACH_SAFE(tt, &r->root_node, task_node, tmp)
  258: 				if (TASK_FD(tt) != TASK_FD(t))
  259: 					continue;
  260: 				else
  261: 					flg++;
  262: #ifdef __NetBSD__
  263: 			EV_SET(&chg[0], TASK_FD(t), EVFILT_VNODE, flg < 2 ? EV_DELETE : 0, 
  264: 					0, 0, (intptr_t) TASK_FD(t));
  265: #else
  266: 			EV_SET(&chg[0], TASK_FD(t), EVFILT_VNODE, flg < 2 ? EV_DELETE : 0, 
  267: 					0, 0, (void*) TASK_FD(t));
  268: #endif
  269: #endif
  270: 			break;
  271: 		case taskPROC:
  272: #if SUP_ENABLE == KQ_SUPPORT
  273: 			/* check for multi subscribers */
  274: 			flg = 0;
  275: 			TAILQ_FOREACH_SAFE(tt, &r->root_proc, task_node, tmp)
  276: 				if (TASK_VAL(tt) != TASK_VAL(t))
  277: 					continue;
  278: 				else
  279: 					flg++;
  280: #ifdef __NetBSD__
  281: 			EV_SET(&chg[0], TASK_VAL(t), EVFILT_PROC, flg < 2 ? EV_DELETE : 0, 
  282: 					0, 0, (intptr_t) TASK_VAL(t));
  283: #else
  284: 			EV_SET(&chg[0], TASK_VAL(t), EVFILT_PROC, flg < 2 ? EV_DELETE : 0, 
  285: 					0, 0, (void*) TASK_VAL(t));
  286: #endif
  287: #endif
  288: 			break;
  289: 		case taskSIGNAL:
  290: #if SUP_ENABLE == KQ_SUPPORT
  291: 			/* check for multi subscribers */
  292: 			flg = 0;
  293: 			TAILQ_FOREACH_SAFE(tt, &r->root_signal, task_node, tmp)
  294: 				if (TASK_VAL(tt) != TASK_VAL(t))
  295: 					continue;
  296: 				else
  297: 					flg++;
  298: #ifdef __NetBSD__
  299: 			EV_SET(&chg[0], TASK_VAL(t), EVFILT_SIGNAL, flg < 2 ? EV_DELETE : 0, 
  300: 					0, 0, (intptr_t) TASK_VAL(t));
  301: #else
  302: 			EV_SET(&chg[0], TASK_VAL(t), EVFILT_SIGNAL, flg < 2 ? EV_DELETE : 0, 
  303: 					0, 0, (void*) TASK_VAL(t));
  304: #endif
  305: 			/* restore signal */
  306: 			signal(TASK_VAL(t), SIG_DFL);
  307: #endif
  308: 			break;
  309: #ifdef AIO_SUPPORT
  310: 		case taskAIO:
  311: #if SUP_ENABLE == KQ_SUPPORT
  312: 			/* check for multi subscribers */
  313: 			flg = 0;
  314: 			TAILQ_FOREACH_SAFE(tt, &r->root_aio, task_node, tmp)
  315: 				if (TASK_VAL(tt) != TASK_VAL(t))
  316: 					continue;
  317: 				else
  318: 					flg++;
  319: #ifdef __NetBSD__
  320: 			EV_SET(&chg[0], TASK_VAL(t), EVFILT_AIO, flg < 2 ? EV_DELETE : 0, 
  321: 					0, 0, (intptr_t) TASK_VAL(t));
  322: #else
  323: 			EV_SET(&chg[0], TASK_VAL(t), EVFILT_AIO, flg < 2 ? EV_DELETE : 0, 
  324: 					0, 0, (void*) TASK_VAL(t));
  325: #endif
  326: 			acb = (struct aiocb*) TASK_VAL(t);
  327: 			if (acb) {
  328: 				if (aio_cancel(acb->aio_fildes, acb) == AIO_CANCELED)
  329: 					aio_return(acb);
  330: 				free(acb);
  331: 				TASK_VAL(t) = 0;
  332: 			}
  333: #endif
  334: 			break;
  335: #ifdef EVFILT_LIO
  336: 		case taskLIO:
  337: #if SUP_ENABLE == KQ_SUPPORT
  338: 			/* check for multi subscribers */
  339: 			flg = 0;
  340: 			TAILQ_FOREACH_SAFE(tt, &r->root_lio, task_node, tmp)
  341: 				if (TASK_VAL(tt) != TASK_VAL(t))
  342: 					continue;
  343: 				else
  344: 					flg++;
  345: #ifdef __NetBSD__
  346: 			EV_SET(&chg[0], TASK_VAL(t), EVFILT_LIO, flg < 2 ? EV_DELETE : 0, 
  347: 					0, 0, (intptr_t) TASK_VAL(t));
  348: #else
  349: 			EV_SET(&chg[0], TASK_VAL(t), EVFILT_LIO, flg < 2 ? EV_DELETE : 0, 
  350: 					0, 0, (void*) TASK_VAL(t));
  351: #endif
  352: 			acbs = (struct aiocb**) TASK_VAL(t);
  353: 			if (acbs) {
  354: 				for (i = 0; i < TASK_DATLEN(t); i++) {
  355: 					if (aio_cancel(acbs[i]->aio_fildes, acbs[i]) == AIO_CANCELED)
  356: 						aio_return(acbs[i]);
  357: 					free(acbs[i]);
  358: 				}
  359: 				free(acbs);
  360: 				TASK_VAL(t) = 0;
  361: 			}
  362: #endif
  363: 			break;
  364: #endif	/* EVFILT_LIO */
  365: #endif	/* AIO_SUPPORT */
  366: #ifdef EVFILT_USER
  367: 		case taskUSER:
  368: #if SUP_ENABLE == KQ_SUPPORT
  369: 			/* check for multi subscribers */
  370: 			flg = 0;
  371: 			TAILQ_FOREACH_SAFE(tt, &r->root_user, task_node, tmp)
  372: 				if (TASK_VAL(tt) != TASK_VAL(t))
  373: 					continue;
  374: 				else
  375: 					flg++;
  376: #ifdef __NetBSD__
  377: 			EV_SET(&chg[0], TASK_VAL(t), EVFILT_USER, flg < 2 ? EV_DELETE : 0, 
  378: 					0, 0, (intptr_t) TASK_VAL(t));
  379: #else
  380: 			EV_SET(&chg[0], TASK_VAL(t), EVFILT_USER, flg < 2 ? EV_DELETE : 0, 
  381: 					0, 0, (void*) TASK_VAL(t));
  382: #endif
  383: #endif
  384: 			break;
  385: #endif	/* EVFILT_USER */
  386: 		case taskTHREAD:
  387: #ifdef HAVE_LIBPTHREAD
  388: 			if (TASK_VAL(t)) {
  389: 				pthread_cancel((pthread_t) TASK_VAL(t));
  390: 				pthread_join((pthread_t) TASK_VAL(t), NULL);
  391: 				if (TASK_VAL(t)) {
  392: 					transit_task2unuse(t, &(TASK_ROOT(t))->root_thread);
  393: 					TASK_VAL(t) = 0;
  394: 				}
  395: 			}
  396: #endif
  397: 			return NULL;
  398: #if defined(HAVE_TIMER_CREATE) && defined(HAVE_TIMER_SETTIME) && defined(HAVE_TIMER_DELETE)
  399: 		case taskRTC:
  400: 			timer_delete((timer_t) TASK_FLAG(t));
  401: 			schedCancel((sched_task_t*) TASK_RET(t));
  402: 			return NULL;
  403: #endif	/* HAVE_TIMER_CREATE */
  404: 		default:
  405: 			return NULL;
  406: 	}
  407: 
  408: #if SUP_ENABLE == KQ_SUPPORT
  409: 	kevent(r->root_kq, chg, 1, NULL, 0, &timeout);
  410: #elif SUP_ENABLE == EP_SUPPORT
  411: 	epoll_ctl(r->root_kq, ee.events ? EPOLL_CTL_MOD : EPOLL_CTL_DEL, ee.data.fd, &ee);
  412: #endif
  413: 	return NULL;
  414: }
  415: 
  416: #ifdef HAVE_LIBPTHREAD
  417: /*
  418:  * sched_hook_thread() - Default THREAD hook
  419:  *
  420:  * @task = current task
  421:  * @arg = pthread attributes
  422:  * return: <0 errors and 0 ok
  423:  */
  424: void *
  425: sched_hook_thread(void *task, void *arg)
  426: {
  427: 	sched_task_t *t = task;
  428: 	pthread_t tid;
  429: 	sigset_t s, o;
  430: 
  431: 	if (!t || !TASK_ROOT(t))
  432: 		return (void*) -1;
  433: 
  434: 	sigfillset(&s);
  435: 	pthread_sigmask(SIG_BLOCK, &s, &o);
  436: 	errno = pthread_create(&tid, (pthread_attr_t*) arg, 
  437: 			(void *(*)(void*)) _sched_threadWrapper, t);
  438: 	pthread_sigmask(SIG_SETMASK, &o, NULL);
  439: 
  440: 	if (errno) {
  441: 		LOGERR;
  442: 		return (void*) -1;
  443: 	} else
  444: 		TASK_VAL(t) = (u_long) tid;
  445: 
  446: 	if (!TASK_ISLOCKED(t))
  447: 		TASK_LOCK(t);
  448: 
  449: 	return NULL;
  450: }
  451: #endif
  452: 
  453: /*
  454:  * sched_hook_read() - Default READ hook
  455:  *
  456:  * @task = current task
  457:  * @arg = unused
  458:  * return: <0 errors and 0 ok
  459:  */
  460: void *
  461: sched_hook_read(void *task, void *arg __unused)
  462: {
  463: 	sched_task_t *t = task;
  464: 	sched_root_task_t *r = NULL;
  465: #if SUP_ENABLE == KQ_SUPPORT
  466: 	struct kevent chg[1];
  467: 	struct timespec timeout = { 0, 0 };
  468: #elif SUP_ENABLE == EP_SUPPORT
  469: 	struct epoll_event ee = { .events = EPOLLIN | EPOLLPRI | EPOLLRDHUP, .data.fd = 0 };
  470: 	int flg = 0;
  471: #endif
  472: 
  473: 	if (!t || !TASK_ROOT(t))
  474: 		return (void*) -1;
  475: 	else
  476: 		r = TASK_ROOT(t);
  477: 
  478: #if SUP_ENABLE == KQ_SUPPORT
  479: #ifdef __NetBSD__
  480: 	EV_SET(&chg[0], TASK_FD(t), EVFILT_READ, EV_ADD | EV_CLEAR, 0, 0, (intptr_t) TASK_FD(t));
  481: #else
  482: 	EV_SET(&chg[0], TASK_FD(t), EVFILT_READ, EV_ADD | EV_CLEAR, 0, 0, (void*) TASK_FD(t));
  483: #endif
  484: 	if (kevent(r->root_kq, chg, 1, NULL, 0, &timeout) == -1) {
  485: 		if (r->root_hooks.hook_exec.exception)
  486: 			r->root_hooks.hook_exec.exception(r, NULL);
  487: 		else
  488: 			LOGERR;
  489: 		return (void*) -1;
  490: 	}
  491: #elif SUP_ENABLE == EP_SUPPORT
  492: 	if (FD_ISSET(TASK_FD(t), &r->root_fds[0]))
  493: 		flg |= 1;
  494: 	if (FD_ISSET(TASK_FD(t), &r->root_fds[1])) {
  495: 		flg |= 2;
  496: 		ee.events |= EPOLLOUT;
  497: 	}
  498: 
  499: 	ee.data.fd = TASK_FD(t);
  500: 	if (epoll_ctl(r->root_kq, flg ? EPOLL_CTL_MOD : EPOLL_CTL_ADD, TASK_FD(t), &ee) == -1) {
  501: 		if (r->root_hooks.hook_exec.exception)
  502: 			r->root_hooks.hook_exec.exception(r, NULL);
  503: 		else
  504: 			LOGERR;
  505: 		return (void*) -1;
  506: 	} else
  507: 		FD_SET(TASK_FD(t), &r->root_fds[0]);
  508: #else
  509: 	FD_SET(TASK_FD(t), &r->root_fds[0]);
  510: 	if (TASK_FD(t) >= r->root_kq)
  511: 		r->root_kq = TASK_FD(t) + 1;
  512: #endif
  513: 
  514: 	return NULL;
  515: }
  516: 
  517: /*
  518:  * sched_hook_write() - Default WRITE hook
  519:  *
  520:  * @task = current task
  521:  * @arg = unused
  522:  * return: <0 errors and 0 ok
  523:  */
  524: void *
  525: sched_hook_write(void *task, void *arg __unused)
  526: {
  527: 	sched_task_t *t = task;
  528: 	sched_root_task_t *r = NULL;
  529: #if SUP_ENABLE == KQ_SUPPORT
  530: 	struct kevent chg[1];
  531: 	struct timespec timeout = { 0, 0 };
  532: #elif SUP_ENABLE == EP_SUPPORT
  533: 	struct epoll_event ee = { .events = EPOLLOUT, .data.fd = 0 };
  534: 	int flg = 0;
  535: #endif
  536: 
  537: 	if (!t || !TASK_ROOT(t))
  538: 		return (void*) -1;
  539: 	else
  540: 		r = TASK_ROOT(t);
  541: 
  542: #if SUP_ENABLE == KQ_SUPPORT
  543: #ifdef __NetBSD__
  544: 	EV_SET(&chg[0], TASK_FD(t), EVFILT_WRITE, EV_ADD | EV_CLEAR, 0, 0, (intptr_t) TASK_FD(t));
  545: #else
  546: 	EV_SET(&chg[0], TASK_FD(t), EVFILT_WRITE, EV_ADD | EV_CLEAR, 0, 0, (void*) TASK_FD(t));
  547: #endif
  548: 	if (kevent(r->root_kq, chg, 1, NULL, 0, &timeout) == -1) {
  549: 		if (r->root_hooks.hook_exec.exception)
  550: 			r->root_hooks.hook_exec.exception(r, NULL);
  551: 		else
  552: 			LOGERR;
  553: 		return (void*) -1;
  554: 	}
  555: #elif SUP_ENABLE == EP_SUPPORT
  556: 	if (FD_ISSET(TASK_FD(t), &r->root_fds[0])) {
  557: 		flg |= 1;
  558: 		ee.events |= EPOLLIN | EPOLLPRI | EPOLLRDHUP;
  559: 	}
  560: 	if (FD_ISSET(TASK_FD(t), &r->root_fds[1]))
  561: 		flg |= 2;
  562: 
  563: 	ee.data.fd = TASK_FD(t);
  564: 	if (epoll_ctl(r->root_kq, flg ? EPOLL_CTL_MOD : EPOLL_CTL_ADD, TASK_FD(t), &ee) == -1) {
  565: 		if (r->root_hooks.hook_exec.exception)
  566: 			r->root_hooks.hook_exec.exception(r, NULL);
  567: 		else
  568: 			LOGERR;
  569: 		return (void*) -1;
  570: 	} else
  571: 		FD_SET(TASK_FD(t), &r->root_fds[1]);
  572: #else
  573: 	FD_SET(TASK_FD(t), &r->root_fds[1]);
  574: 	if (TASK_FD(t) >= r->root_kq)
  575: 		r->root_kq = TASK_FD(t) + 1;
  576: #endif
  577: 
  578: 	return NULL;
  579: }
  580: 
  581: /*
  582:  * sched_hook_alarm() - Default ALARM hook
  583:  *
  584:  * @task = current task
  585:  * @arg = unused
  586:  * return: <0 errors and 0 ok
  587:  */
  588: void *
  589: sched_hook_alarm(void *task, void *arg __unused)
  590: {
  591: #if SUP_ENABLE == KQ_SUPPORT
  592: 	sched_task_t *t = task;
  593: 	struct kevent chg[1];
  594: 	struct timespec timeout = { 0, 0 };
  595: 
  596: 	if (!t || !TASK_ROOT(t))
  597: 		return (void*) -1;
  598: 
  599: #ifdef __NetBSD__
  600: 	EV_SET(&chg[0], (uintptr_t) TASK_DATA(t), EVFILT_TIMER, EV_ADD | EV_CLEAR, 0, 
  601: 			t->task_val.ts.tv_sec * 1000 + t->task_val.ts.tv_nsec / 1000000, 
  602: 			(intptr_t) TASK_DATA(t));
  603: #else
  604: 	EV_SET(&chg[0], (uintptr_t) TASK_DATA(t), EVFILT_TIMER, EV_ADD | EV_CLEAR, 0, 
  605: 			t->task_val.ts.tv_sec * 1000 + t->task_val.ts.tv_nsec / 1000000, 
  606: 			(void*) TASK_DATA(t));
  607: #endif
  608: 	if (kevent(TASK_ROOT(t)->root_kq, chg, 1, NULL, 0, &timeout) == -1) {
  609: 		if (TASK_ROOT(t)->root_hooks.hook_exec.exception)
  610: 			TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL);
  611: 		else
  612: 			LOGERR;
  613: 		return (void*) -1;
  614: 	}
  615: 
  616: #endif
  617: 	return NULL;
  618: }
  619: 
  620: /*
  621:  * sched_hook_node() - Default NODE hook
  622:  *
  623:  * @task = current task
  624:  * @arg = unused
  625:  * return: <0 errors and 0 ok
  626:  */
  627: void *
  628: sched_hook_node(void *task, void *arg __unused)
  629: {
  630: #if SUP_ENABLE == KQ_SUPPORT
  631: 	sched_task_t *t = task;
  632: 	struct kevent chg[1];
  633: 	struct timespec timeout = { 0, 0 };
  634: 
  635: 	if (!t || !TASK_ROOT(t))
  636: 		return (void*) -1;
  637: 
  638: #ifdef __NetBSD__
  639: 	EV_SET(&chg[0], TASK_FD(t), EVFILT_VNODE, EV_ADD | EV_CLEAR, 
  640: 			NOTE_DELETE | NOTE_WRITE | NOTE_EXTEND | NOTE_ATTRIB | 
  641: 			NOTE_LINK | NOTE_RENAME | NOTE_REVOKE, 0, (intptr_t) TASK_FD(t));
  642: #else
  643: 	EV_SET(&chg[0], TASK_FD(t), EVFILT_VNODE, EV_ADD | EV_CLEAR, 
  644: 			NOTE_DELETE | NOTE_WRITE | NOTE_EXTEND | NOTE_ATTRIB | 
  645: 			NOTE_LINK | NOTE_RENAME | NOTE_REVOKE, 0, (void*) TASK_FD(t));
  646: #endif
  647: 	if (kevent(TASK_ROOT(t)->root_kq, chg, 1, NULL, 0, &timeout) == -1) {
  648: 		if (TASK_ROOT(t)->root_hooks.hook_exec.exception)
  649: 			TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL);
  650: 		else
  651: 			LOGERR;
  652: 		return (void*) -1;
  653: 	}
  654: 
  655: #endif
  656: 	return NULL;
  657: }
  658: 
  659: /*
  660:  * sched_hook_proc() - Default PROC hook
  661:  *
  662:  * @task = current task
  663:  * @arg = unused
  664:  * return: <0 errors and 0 ok
  665:  */
  666: void *
  667: sched_hook_proc(void *task, void *arg __unused)
  668: {
  669: #if SUP_ENABLE == KQ_SUPPORT
  670: 	sched_task_t *t = task;
  671: 	struct kevent chg[1];
  672: 	struct timespec timeout = { 0, 0 };
  673: 
  674: 	if (!t || !TASK_ROOT(t))
  675: 		return (void*) -1;
  676: 
  677: #ifdef __NetBSD__
  678: 	EV_SET(&chg[0], TASK_VAL(t), EVFILT_PROC, EV_ADD | EV_CLEAR, 
  679: 			NOTE_EXIT | NOTE_FORK | NOTE_EXEC | NOTE_TRACK, 0, (intptr_t) TASK_VAL(t));
  680: #else
  681: 	EV_SET(&chg[0], TASK_VAL(t), EVFILT_PROC, EV_ADD | EV_CLEAR, 
  682: 			NOTE_EXIT | NOTE_FORK | NOTE_EXEC | NOTE_TRACK, 0, (void*) TASK_VAL(t));
  683: #endif
  684: 	if (kevent(TASK_ROOT(t)->root_kq, chg, 1, NULL, 0, &timeout) == -1) {
  685: 		if (TASK_ROOT(t)->root_hooks.hook_exec.exception)
  686: 			TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL);
  687: 		else
  688: 			LOGERR;
  689: 		return (void*) -1;
  690: 	}
  691: 
  692: #endif
  693: 	return NULL;
  694: }
  695: 
  696: /*
  697:  * sched_hook_signal() - Default SIGNAL hook
  698:  *
  699:  * @task = current task
  700:  * @arg = unused
  701:  * return: <0 errors and 0 ok
  702:  */
  703: void *
  704: sched_hook_signal(void *task, void *arg __unused)
  705: {
  706: #if SUP_ENABLE == KQ_SUPPORT
  707: 	sched_task_t *t = task;
  708: 	struct kevent chg[1];
  709: 	struct timespec timeout = { 0, 0 };
  710: 
  711: 	if (!t || !TASK_ROOT(t))
  712: 		return (void*) -1;
  713: 
  714: 	/* ignore signal */
  715: 	signal(TASK_VAL(t), SIG_IGN);
  716: 
  717: #ifdef __NetBSD__
  718: 	EV_SET(&chg[0], TASK_VAL(t), EVFILT_SIGNAL, EV_ADD | EV_CLEAR, 0, 0, (intptr_t) TASK_VAL(t));
  719: #else
  720: 	EV_SET(&chg[0], TASK_VAL(t), EVFILT_SIGNAL, EV_ADD | EV_CLEAR, 0, 0, (void*) TASK_VAL(t));
  721: #endif
  722: 	if (kevent(TASK_ROOT(t)->root_kq, chg, 1, NULL, 0, &timeout) == -1) {
  723: 		if (TASK_ROOT(t)->root_hooks.hook_exec.exception)
  724: 			TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL);
  725: 		else
  726: 			LOGERR;
  727: 		return (void*) -1;
  728: 	}
  729: #else
  730: #if 0
  731: 	sched_task_t *t = task;
  732: 	struct sigaction sa;
  733: 
  734: 	memset(&sa, 0, sizeof sa);
  735: 	sigemptyset(&sa.sa_mask);
  736: 	sa.sa_handler = _sched_sigHandler;
  737: 	sa.sa_flags = SA_RESETHAND | SA_RESTART;
  738: 
  739: 	if (sigaction(TASK_VAL(t), &sa, NULL) == -1) {
  740: 		if (TASK_ROOT(t)->root_hooks.hook_exec.exception)
  741: 			TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL);
  742: 		else
  743: 			LOGERR;
  744: 		return (void*) -1;
  745: 	}
  746: #endif	/* 0 */
  747: #endif
  748: 	return NULL;
  749: }
  750: 
  751: /*
  752:  * sched_hook_user() - Default USER hook
  753:  *
  754:  * @task = current task
  755:  * @arg = unused
  756:  * return: <0 errors and 0 ok
  757:  */
  758: #ifdef EVFILT_USER
  759: void *
  760: sched_hook_user(void *task, void *arg __unused)
  761: {
  762: #if SUP_ENABLE == KQ_SUPPORT
  763: 	sched_task_t *t = task;
  764: 	struct kevent chg[1];
  765: 	struct timespec timeout = { 0, 0 };
  766: 
  767: 	if (!t || !TASK_ROOT(t))
  768: 		return (void*) -1;
  769: 
  770: #ifdef __NetBSD__
  771: 	EV_SET(&chg[0], TASK_VAL(t), EVFILT_USER, EV_ADD | EV_CLEAR, TASK_DATLEN(t), 
  772: 			0, (intptr_t) TASK_VAL(t));
  773: #else
  774: 	EV_SET(&chg[0], TASK_VAL(t), EVFILT_USER, EV_ADD | EV_CLEAR, TASK_DATLEN(t), 
  775: 			0, (void*) TASK_VAL(t));
  776: #endif
  777: 	if (kevent(TASK_ROOT(t)->root_kq, chg, 1, NULL, 0, &timeout) == -1) {
  778: 		if (TASK_ROOT(t)->root_hooks.hook_exec.exception)
  779: 			TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL);
  780: 		else
  781: 			LOGERR;
  782: 		return (void*) -1;
  783: 	}
  784: 
  785: #endif
  786: 	return NULL;
  787: }
  788: #endif
  789: 
  790: #if SUP_ENABLE == KQ_SUPPORT
  791: static inline void 
  792: fetch_hook_kevent_proceed(int en, struct kevent *res, sched_root_task_t *r)
  793: {
  794: 	struct kevent evt[1];
  795: 	register int i, flg;
  796: 	sched_task_t *task, *tmp;
  797: 	struct timespec now = { 0, 0 };
  798: #ifdef AIO_SUPPORT
  799: 	int len, fd;
  800: 	struct aiocb *acb;
  801: #ifdef EVFILT_LIO
  802: 	int l;
  803: 	register int j;
  804: 	off_t off;
  805: 	struct aiocb **acbs;
  806: 	struct iovec *iv;
  807: #endif	/* EVFILT_LIO */
  808: #endif	/* AIO_SUPPORT */
  809: 
  810: 	for (i = 0; i < en; i++) {
  811: 		memcpy(evt, &res[i], sizeof evt);
  812: 		evt->flags = EV_DELETE;
  813: 		/* Put read/write task to ready queue */
  814: 		switch (res[i].filter) {
  815: 			case EVFILT_READ:
  816: 				flg = 0;
  817: 				TAILQ_FOREACH_SAFE(task, &r->root_read, task_node, tmp) {
  818: 					if (TASK_FD(task) != ((intptr_t) res[i].udata))
  819: 						continue;
  820: 					else {
  821: 						flg++;
  822: 						TASK_RET(task) = res[i].data;
  823: 						TASK_FLAG(task) = (u_long) res[i].fflags;
  824: 					}
  825: 					/* remove read handle */
  826: 					remove_task_from(task, &r->root_read);
  827: 
  828: 					if (r->root_hooks.hook_exec.exception && res[i].flags & EV_EOF) {
  829:  						if (r->root_hooks.hook_exec.exception(r, (void*) EV_EOF)) {
  830: 							task->task_type = taskUNUSE;
  831: 							insert_task_to(task, &r->root_unuse);
  832: 						} else {
  833: 							task->task_type = taskREADY;
  834: 							insert_task_to(task, &r->root_ready);
  835: 						}
  836: 					} else {
  837: 						task->task_type = taskREADY;
  838: 						insert_task_to(task, &r->root_ready);
  839: 					}
  840: 				}
  841: 				/* if match at least 2, don't remove resouce of event */
  842: 				if (flg > 1)
  843: 					evt->flags ^= evt->flags;
  844: 				break;
  845: 			case EVFILT_WRITE:
  846: 				flg = 0;
  847: 				TAILQ_FOREACH_SAFE(task, &r->root_write, task_node, tmp) {
  848: 					if (TASK_FD(task) != ((intptr_t) res[i].udata))
  849: 						continue;
  850: 					else {
  851: 						flg++;
  852: 						TASK_RET(task) = res[i].data;
  853: 						TASK_FLAG(task) = (u_long) res[i].fflags;
  854: 					}
  855: 					/* remove write handle */
  856: 					remove_task_from(task, &r->root_write);
  857: 
  858: 					if (r->root_hooks.hook_exec.exception && res[i].flags & EV_EOF) {
  859:  						if (r->root_hooks.hook_exec.exception(r, (void*) EV_EOF)) {
  860: 							task->task_type = taskUNUSE;
  861: 							insert_task_to(task, &r->root_unuse);
  862: 						} else {
  863: 							task->task_type = taskREADY;
  864: 							insert_task_to(task, &r->root_ready);
  865: 						}
  866: 					} else {
  867: 						task->task_type = taskREADY;
  868: 						insert_task_to(task, &r->root_ready);
  869: 					}
  870: 				}
  871: 				/* if match at least 2, don't remove resouce of event */
  872: 				if (flg > 1)
  873: 					evt->flags ^= evt->flags;
  874: 				break;
  875: 			case EVFILT_TIMER:
  876: 				flg = 0;
  877: 				TAILQ_FOREACH_SAFE(task, &r->root_alarm, task_node, tmp) {
  878: 					if ((uintptr_t) TASK_DATA(task) != ((uintptr_t) res[i].udata))
  879: 						continue;
  880: 					else {
  881: 						flg++;
  882: 						TASK_RET(task) = res[i].data;
  883: 						TASK_FLAG(task) = (u_long) res[i].fflags;
  884: 					}
  885: 					/* remove alarm handle */
  886: 					transit_task2ready(task, &r->root_alarm);
  887: 				}
  888: 				/* if match at least 2, don't remove resouce of event */
  889: 				if (flg > 1)
  890: 					evt->flags ^= evt->flags;
  891: 				break;
  892: 			case EVFILT_VNODE:
  893: 				flg = 0;
  894: 				TAILQ_FOREACH_SAFE(task, &r->root_node, task_node, tmp) {
  895: 					if (TASK_FD(task) != ((intptr_t) res[i].udata))
  896: 						continue;
  897: 					else {
  898: 						flg++;
  899: 						TASK_RET(task) = res[i].data;
  900: 						TASK_FLAG(task) = (u_long) res[i].fflags;
  901: 					}
  902: 					/* remove node handle */
  903: 					transit_task2ready(task, &r->root_node);
  904: 				}
  905: 				/* if match at least 2, don't remove resouce of event */
  906: 				if (flg > 1)
  907: 					evt->flags ^= evt->flags;
  908: 				break;
  909: 			case EVFILT_PROC:
  910: 				flg = 0;
  911: 				TAILQ_FOREACH_SAFE(task, &r->root_proc, task_node, tmp) {
  912: 					if (TASK_VAL(task) != ((uintptr_t) res[i].udata))
  913: 						continue;
  914: 					else {
  915: 						flg++;
  916: 						TASK_RET(task) = res[i].data;
  917: 						TASK_FLAG(task) = (u_long) res[i].fflags;
  918: 					}
  919: 					/* remove proc handle */
  920: 					transit_task2ready(task, &r->root_proc);
  921: 				}
  922: 				/* if match at least 2, don't remove resouce of event */
  923: 				if (flg > 1)
  924: 					evt->flags ^= evt->flags;
  925: 				break;
  926: 			case EVFILT_SIGNAL:
  927: 				flg = 0;
  928: 				TAILQ_FOREACH_SAFE(task, &r->root_signal, task_node, tmp) {
  929: 					if (TASK_VAL(task) != ((uintptr_t) res[i].udata))
  930: 						continue;
  931: 					else {
  932: 						flg++;
  933: 						TASK_RET(task) = res[i].data;
  934: 						TASK_FLAG(task) = (u_long) res[i].fflags;
  935: 					}
  936: 					/* remove signal handle */
  937: 					transit_task2ready(task, &r->root_signal);
  938: 				}
  939: 				/* if match at least 2, don't remove resouce of event */
  940: 				if (flg > 1)
  941: 					evt->flags ^= evt->flags;
  942: 				break;
  943: #ifdef AIO_SUPPORT
  944: 			case EVFILT_AIO:
  945: 				flg = 0;
  946: 				TAILQ_FOREACH_SAFE(task, &r->root_aio, task_node, tmp) {
  947: 					acb = (struct aiocb*) TASK_VAL(task);
  948: 					if (acb != ((struct aiocb*) res[i].udata))
  949: 						continue;
  950: 					else {
  951: 						flg++;
  952: 						TASK_RET(task) = res[i].data;
  953: 						TASK_FLAG(task) = (u_long) res[i].fflags;
  954: 					}
  955: 					/* remove user handle */
  956: 					transit_task2ready(task, &r->root_aio);
  957: 
  958: 					fd = acb->aio_fildes;
  959: 					if ((len = aio_return(acb)) != -1) {
  960: 						if (lseek(fd, acb->aio_offset + len, SEEK_CUR) == -1)
  961: 							LOGERR;
  962: 					} else
  963: 						LOGERR;
  964: 					free(acb);
  965: 					TASK_DATLEN(task) = (u_long) len;
  966: 					TASK_FD(task) = fd;
  967: 				}
  968: 				/* if match at least 2, don't remove resouce of event */
  969: 				if (flg > 1)
  970: 					evt->flags ^= evt->flags;
  971: 				break;
  972: #ifdef EVFILT_LIO
  973: 			case EVFILT_LIO:
  974: 				flg = 0;
  975: 				TAILQ_FOREACH_SAFE(task, &r->root_lio, task_node, tmp) {
  976: 					acbs = (struct aiocb**) TASK_VAL(task);
  977: 					if (acbs != ((struct aiocb**) res[i].udata))
  978: 						continue;
  979: 					else {
  980: 						flg++;
  981: 						TASK_RET(task) = res[i].data;
  982: 						TASK_FLAG(task) = (u_long) res[i].fflags;
  983: 					}
  984: 					/* remove user handle */
  985: 					transit_task2ready(task, &r->root_lio);
  986: 
  987: 					iv = (struct iovec*) TASK_DATA(task);
  988: 					fd = acbs[0]->aio_fildes;
  989: 					off = acbs[0]->aio_offset;
  990: 					for (j = len = 0; i < TASK_DATLEN(task); len += l, i++) {
  991: 						if ((iv[i].iov_len = aio_return(acbs[i])) == -1)
  992: 							l = 0;
  993: 						else
  994: 							l = iv[i].iov_len;
  995: 						free(acbs[i]);
  996: 					}
  997: 					free(acbs);
  998: 					TASK_DATLEN(task) = (u_long) len;
  999: 					TASK_FD(task) = fd;
 1000: 
 1001: 					if (lseek(fd, off + len, SEEK_CUR) == -1)
 1002: 						LOGERR;
 1003: 				}
 1004: 				/* if match at least 2, don't remove resouce of event */
 1005: 				if (flg > 1)
 1006: 					evt->flags ^= evt->flags;
 1007: 				break;
 1008: #endif	/* EVFILT_LIO */
 1009: #endif	/* AIO_SUPPORT */
 1010: #ifdef EVFILT_USER
 1011: 			case EVFILT_USER:
 1012: 				flg = 0;
 1013: 				TAILQ_FOREACH_SAFE(task, &r->root_user, task_node, tmp) {
 1014: 					if (TASK_VAL(task) != ((uintptr_t) res[i].udata))
 1015: 						continue;
 1016: 					else {
 1017: 						flg++;
 1018: 						TASK_RET(task) = res[i].data;
 1019: 						TASK_FLAG(task) = (u_long) res[i].fflags;
 1020: 					}
 1021: 					/* remove user handle */
 1022: 					transit_task2ready(task, &r->root_user);
 1023: 				}
 1024: 				/* if match at least 2, don't remove resouce of event */
 1025: 				if (flg > 1)
 1026: 					evt->flags ^= evt->flags;
 1027: 				break;
 1028: #endif	/* EVFILT_USER */
 1029: 		}
 1030: 
 1031: 		if (kevent(r->root_kq, evt, 1, NULL, 0, &now) == -1) {
 1032: 			if (r->root_hooks.hook_exec.exception)
 1033: 				r->root_hooks.hook_exec.exception(r, NULL);
 1034: 			else
 1035: 				LOGERR;
 1036: 		}
 1037: 	}
 1038: }
 1039: #endif
 1040: 
 1041: #if SUP_ENABLE == EP_SUPPORT
 1042: static inline void
 1043: fetch_hook_epoll_proceed(int en, struct epoll_event *res, sched_root_task_t *r)
 1044: {
 1045: 	register int i, flg;
 1046: 	int ops = EPOLL_CTL_DEL;
 1047: 	sched_task_t *task, *tmp;
 1048: 	struct epoll_event evt[1];
 1049: 
 1050: 	for (i = 0; i < en; i++) {
 1051: 		memcpy(evt, &res[i], sizeof evt);
 1052: 
 1053: 		if (evt->events & (EPOLLIN | EPOLLPRI | EPOLLET)) {
 1054: 			flg = 0;
 1055: 			TAILQ_FOREACH_SAFE(task, &r->root_read, task_node, tmp) {
 1056: 				if (TASK_FD(task) != evt->data.fd)
 1057: 					continue;
 1058: 				else {
 1059: 					flg++;
 1060: 					FD_CLR(TASK_FD(task), &r->root_fds[0]);
 1061: 					TASK_FLAG(task) = ioctl(TASK_FD(task), FIONREAD, &TASK_RET(task));
 1062: 
 1063: 					evt->events &= ~(EPOLLIN | EPOLLPRI | EPOLLET | EPOLLRDHUP);
 1064: 					if (FD_ISSET(TASK_FD(task), &r->root_fds[1])) {
 1065: 						ops = EPOLL_CTL_MOD;
 1066: 						evt->events |= EPOLLOUT;
 1067: 					}
 1068: 				}
 1069: 				/* remove read handle */
 1070: 				remove_task_from(task, &r->root_read);
 1071: 
 1072: 				if (r->root_hooks.hook_exec.exception && evt->events & (EPOLLRDHUP | EPOLLERR | EPOLLHUP)) {
 1073:  					if (r->root_hooks.hook_exec.exception(r, (void*) (intptr_t)
 1074: 								(evt->events & EPOLLERR ? EV_ERROR | EV_EOF : EV_EOF))) {
 1075: 						task->task_type = taskUNUSE;
 1076: 						insert_task_to(task, &r->root_unuse);
 1077: 					} else {
 1078: 						task->task_type = taskREADY;
 1079: 						insert_task_to(task, &r->root_ready);
 1080: 					}
 1081: 				} else {
 1082: 					task->task_type = taskREADY;
 1083: 					insert_task_to(task, &r->root_ready);
 1084: 				}
 1085: 			}
 1086: 			if (flg > 1)
 1087: 				ops = EPOLL_CTL_MOD;
 1088: 		}
 1089: 
 1090: 		if (evt->events & EPOLLOUT) {
 1091: 			flg = 0;
 1092: 			TAILQ_FOREACH_SAFE(task, &r->root_write, task_node, tmp) {
 1093: 				if (TASK_FD(task) != evt->data.fd)
 1094: 					continue;
 1095: 				else {
 1096: 					flg++;
 1097: 					FD_CLR(TASK_FD(task), &r->root_fds[1]);
 1098: 					TASK_FLAG(task) = ioctl(TASK_FD(task), 
 1099: 							FIONWRITE, &TASK_RET(task));
 1100: 
 1101: 					evt->events &= ~EPOLLOUT;
 1102: 					if (FD_ISSET(TASK_FD(task), &r->root_fds[0])) {
 1103: 						ops = EPOLL_CTL_MOD;
 1104: 						evt->events |= EPOLLIN | EPOLLPRI | EPOLLRDHUP;
 1105: 					}
 1106: 				}
 1107: 				/* remove write handle */
 1108: 				remove_task_from(task, &r->root_write);
 1109: 
 1110: 				if (r->root_hooks.hook_exec.exception && evt->events & (EPOLLERR | EPOLLHUP)) {
 1111:  					if (r->root_hooks.hook_exec.exception(r, (void*) (intptr_t)
 1112: 								(evt->events & EPOLLERR ? EV_ERROR | EV_EOF : EV_EOF))) {
 1113: 						task->task_type = taskUNUSE;
 1114: 						insert_task_to(task, &r->root_unuse);
 1115: 					} else {
 1116: 						task->task_type = taskREADY;
 1117: 						insert_task_to(task, &r->root_ready);
 1118: 					}
 1119: 				} else {
 1120: 					task->task_type = taskREADY;
 1121: 					insert_task_to(task, &r->root_ready);
 1122: 				}
 1123: 			}
 1124: 			if (flg > 1)
 1125: 				ops = EPOLL_CTL_MOD;
 1126: 		}
 1127: 
 1128: 		if (epoll_ctl(r->root_kq, ops, evt->data.fd, evt) == -1) {
 1129: 			if (r->root_hooks.hook_exec.exception) {
 1130: 				r->root_hooks.hook_exec.exception(r, NULL);
 1131: 			} else
 1132: 				LOGERR;
 1133: 		}
 1134: 	}
 1135: }
 1136: #endif
 1137: 
 1138: #if SUP_ENABLE == NO_SUPPORT
 1139: static inline void 
 1140: fetch_hook_select_proceed(int en, fd_set rfd, fd_set wfd, fd_set xfd, sched_root_task_t *r)
 1141: {
 1142: 	register int i, flg;
 1143: 	sched_task_t *task, *tmp;
 1144: 
 1145: 	/* skip select check if return value from select is zero */
 1146: 	if (!en)
 1147: 		return;
 1148: 
 1149: 	for (i = 0; i < r->root_kq; i++) {
 1150: 		if (FD_ISSET(i, &rfd) || FD_ISSET(i, &xfd)) {
 1151: 			flg = 0;
 1152: 			TAILQ_FOREACH_SAFE(task, &r->root_read, task_node, tmp) {
 1153: 				if (TASK_FD(task) != i)
 1154: 					continue;
 1155: 				else {
 1156: 					flg++;
 1157: 					TASK_FLAG(task) = ioctl(TASK_FD(task), 
 1158: 							FIONREAD, &TASK_RET(task));
 1159: 				}
 1160: 				/* remove read handle */
 1161: 				remove_task_from(task, &r->root_read);
 1162: 
 1163: 				if (r->root_hooks.hook_exec.exception) {
 1164:  					if (r->root_hooks.hook_exec.exception(r, NULL)) {
 1165: 						task->task_type = taskUNUSE;
 1166: 						insert_task_to(task, &r->root_unuse);
 1167: 					} else {
 1168: 						task->task_type = taskREADY;
 1169: 						insert_task_to(task, &r->root_ready);
 1170: 					}
 1171: 				} else {
 1172: 					task->task_type = taskREADY;
 1173: 					insert_task_to(task, &r->root_ready);
 1174: 				}
 1175: 			}
 1176: 			/* if match equal to 1, remove resouce */
 1177: 			if (flg == 1)
 1178: 				FD_CLR(i, &r->root_fds[0]);
 1179: 		}
 1180: 
 1181: 		if (FD_ISSET(i, &wfd)) {
 1182: 			flg = 0;
 1183: 			TAILQ_FOREACH_SAFE(task, &r->root_write, task_node, tmp) {
 1184: 				if (TASK_FD(task) != i)
 1185: 					continue;
 1186: 				else {
 1187: 					flg++;
 1188: 					TASK_FLAG(task) = ioctl(TASK_FD(task), 
 1189: 							FIONWRITE, &TASK_RET(task));
 1190: 				}
 1191: 				/* remove write handle */
 1192: 				remove_task_from(task, &r->root_write);
 1193: 
 1194: 				if (r->root_hooks.hook_exec.exception) {
 1195:  					if (r->root_hooks.hook_exec.exception(r, NULL)) {
 1196: 						task->task_type = taskUNUSE;
 1197: 						insert_task_to(task, &r->root_unuse);
 1198: 					} else {
 1199: 						task->task_type = taskREADY;
 1200: 						insert_task_to(task, &r->root_ready);
 1201: 					}
 1202: 				} else {
 1203: 					task->task_type = taskREADY;
 1204: 					insert_task_to(task, &r->root_ready);
 1205: 				}
 1206: 			}
 1207: 			/* if match equal to 1, remove resouce */
 1208: 			if (flg == 1)
 1209: 				FD_CLR(i, &r->root_fds[1]);
 1210: 		}
 1211: 	}
 1212: 
 1213: 	/* optimize select */
 1214: 	for (i = r->root_kq - 1; i > 2; i--)
 1215: 		if (FD_ISSET(i, &r->root_fds[0]) || FD_ISSET(i, &r->root_fds[1]))
 1216: 			break;
 1217: 	if (i > 2)
 1218: 		r->root_kq = i + 1;
 1219: }
 1220: #endif
 1221: 
 1222: /*
 1223:  * sched_hook_fetch() - Default FETCH hook
 1224:  *
 1225:  * @root = root task
 1226:  * @arg = unused
 1227:  * return: NULL error or !=NULL fetched task
 1228:  */
 1229: void *
 1230: sched_hook_fetch(void *root, void *arg __unused)
 1231: {
 1232: 	sched_root_task_t *r = root;
 1233: 	sched_task_t *task, *tmp;
 1234: 	struct timespec now, m, mtmp;
 1235: #if SUP_ENABLE == KQ_SUPPORT
 1236: 	struct kevent res[KQ_EVENTS];
 1237: 	struct timespec *timeout;
 1238: #elif SUP_ENABLE == EP_SUPPORT
 1239: 	struct epoll_event res[KQ_EVENTS];
 1240: 	u_long timeout = 0;
 1241: #else
 1242: 	struct timeval *timeout, tv;
 1243: 	fd_set rfd, wfd, xfd;
 1244: #endif
 1245: 	int en;
 1246: 
 1247: 	if (!r)
 1248: 		return NULL;
 1249: 
 1250: 	/* get new task by queue priority */
 1251: 	while ((task = TAILQ_FIRST(&r->root_event))) {
 1252: 		transit_task2unuse(task, &r->root_event);
 1253: 		return task;
 1254: 	}
 1255: 	while ((task = TAILQ_FIRST(&r->root_ready))) {
 1256: 		transit_task2unuse(task, &r->root_ready);
 1257: 		return task;
 1258: 	}
 1259: 
 1260: #ifdef TIMER_WITHOUT_SORT
 1261: 	clock_gettime(CLOCK_MONOTONIC, &now);
 1262: 
 1263: 	sched_timespecclear(&r->root_wait);
 1264: 	TAILQ_FOREACH(task, &r->root_timer, task_node) {
 1265: 		if (!sched_timespecisset(&r->root_wait))
 1266: 			r->root_wait = TASK_TS(task);
 1267: 		else if (sched_timespeccmp(&TASK_TS(task), &r->root_wait, -) < 0)
 1268: 			r->root_wait = TASK_TS(task);
 1269: 	}
 1270: 
 1271: 	if (TAILQ_FIRST(&r->root_timer)) {
 1272: 		m = r->root_wait;
 1273: 		sched_timespecsub(&m, &now, &mtmp);
 1274: 		r->root_wait = mtmp;
 1275: 	} else {
 1276: 		/* set wait INFTIM */
 1277: 		sched_timespecinf(&r->root_wait);
 1278: 	}
 1279: #else	/* ! TIMER_WITHOUT_SORT */
 1280: 	if (!TAILQ_FIRST(&r->root_task) && (task = TAILQ_FIRST(&r->root_timer))) {
 1281: 		clock_gettime(CLOCK_MONOTONIC, &now);
 1282: 
 1283: 		m = TASK_TS(task);
 1284: 		sched_timespecsub(&m, &now, &mtmp);
 1285: 		r->root_wait = mtmp;
 1286: 	} else {
 1287: 		/* set wait INFTIM */
 1288: 		sched_timespecinf(&r->root_wait);
 1289: 	}
 1290: #endif	/* TIMER_WITHOUT_SORT */
 1291: 	/* if present member of task, set NOWAIT */
 1292: 	if (TAILQ_FIRST(&r->root_task))
 1293: 		sched_timespecclear(&r->root_wait);
 1294: 
 1295: 	if (r->root_wait.tv_sec != -1 && r->root_wait.tv_nsec != -1) {
 1296: #if SUP_ENABLE == KQ_SUPPORT
 1297: 		timeout = &r->root_wait;
 1298: #elif SUP_ENABLE == EP_SUPPORT
 1299: 		timeout = r->root_wait.tv_sec * 1000 + r->root_wait.tv_nsec / 1000000;
 1300: #else
 1301: 		sched_timespec2val(&r->root_wait, &tv);
 1302: 		timeout = &tv;
 1303: #endif	/* KQ_SUPPORT */
 1304: 	} else if (sched_timespecisinf(&r->root_poll))
 1305: #if SUP_ENABLE == EP_SUPPORT
 1306: 		timeout = -1;
 1307: #else
 1308: 		timeout = NULL;
 1309: #endif
 1310: 	else {
 1311: #if SUP_ENABLE == KQ_SUPPORT
 1312: 		timeout = &r->root_poll;
 1313: #elif SUP_ENABLE == EP_SUPPORT
 1314: 		timeout = r->root_poll.tv_sec * 1000 + r->root_poll.tv_nsec / 1000000;
 1315: #else
 1316: 		sched_timespec2val(&r->root_poll, &tv);
 1317: 		timeout = &tv;
 1318: #endif	/* KQ_SUPPORT */
 1319: 	}
 1320: 
 1321: #if SUP_ENABLE == KQ_SUPPORT
 1322: 	if ((en = kevent(r->root_kq, NULL, 0, res, KQ_EVENTS, timeout)) == -1) {
 1323: #elif SUP_ENABLE == EP_SUPPORT
 1324: 	if ((en = epoll_wait(r->root_kq, res, KQ_EVENTS, timeout)) == -1) {
 1325: #else
 1326: 	rfd = xfd = r->root_fds[0];
 1327: 	wfd = r->root_fds[1];
 1328: 	if ((en = select(r->root_kq, &rfd, &wfd, &xfd, timeout)) == -1) {
 1329: #endif	/* KQ_SUPPORT */
 1330: 		if (r->root_hooks.hook_exec.exception) {
 1331: 			if (r->root_hooks.hook_exec.exception(r, NULL))
 1332: 				return NULL;
 1333: 		} else if (errno != EINTR)
 1334: 			LOGERR;
 1335: 		goto skip_event;
 1336: 	}
 1337: 
 1338: 	/* Go and catch the cat into pipes ... */
 1339: #if SUP_ENABLE == KQ_SUPPORT
 1340: 	/* kevent dispatcher */
 1341: 	fetch_hook_kevent_proceed(en, res, r);
 1342: #elif SUP_ENABLE == EP_SUPPORT
 1343: 	/* epoll dispatcher */
 1344: 	fetch_hook_epoll_proceed(en, res, r);
 1345: #else
 1346: 	/* select dispatcher */
 1347: 	fetch_hook_select_proceed(en, rfd, wfd, xfd, r);
 1348: #endif	/* KQ_SUPPORT */
 1349: 
 1350: skip_event:
 1351: 	/* timer update & put in ready queue */
 1352: 	clock_gettime(CLOCK_MONOTONIC, &now);
 1353: 
 1354: 	TAILQ_FOREACH_SAFE(task, &r->root_timer, task_node, tmp)
 1355: 		if (sched_timespeccmp(&now, &TASK_TS(task), -) >= 0)
 1356: 			transit_task2ready(task, &r->root_timer);
 1357: 
 1358: 	/* put regular task priority task to ready queue, 
 1359: 		if there is no ready task or reach max missing hit for regular task */
 1360: 	if ((task = TAILQ_FIRST(&r->root_task))) {
 1361: 		if (!TAILQ_FIRST(&r->root_ready) || r->root_miss >= TASK_VAL(task)) {
 1362: 			r->root_miss ^= r->root_miss;
 1363: 
 1364: 			transit_task2ready(task, &r->root_task);
 1365: 		} else
 1366: 			r->root_miss++;
 1367: 	} else
 1368: 		r->root_miss ^= r->root_miss;
 1369: 
 1370: 	/* OK, lets get ready task !!! */
 1371: 	task = TAILQ_FIRST(&r->root_ready);
 1372: 	if (task)
 1373: 		transit_task2unuse(task, &r->root_ready);
 1374: 	return task;
 1375: }
 1376: 
 1377: /*
 1378:  * sched_hook_exception() - Default EXCEPTION hook
 1379:  *
 1380:  * @root = root task
 1381:  * @arg = custom handling: if arg == EV_EOF or other value; default: arg == NULL log errno
 1382:  * return: <0 errors and 0 ok
 1383:  */
 1384: void *
 1385: sched_hook_exception(void *root, void *arg)
 1386: {
 1387: 	sched_root_task_t *r = root;
 1388: 
 1389: 	if (!r)
 1390: 		return NULL;
 1391: 
 1392: 	/* custom exception handling ... */
 1393: 	if (arg) {
 1394: 		if (arg == (void*) EV_EOF)
 1395: 			return NULL;
 1396: 		return (void*) -1;	/* raise scheduler error!!! */
 1397: 	}
 1398: 
 1399: 	/* if error hook exists */
 1400: 	if (r->root_hooks.hook_root.error)
 1401: 		return (r->root_hooks.hook_root.error(root, (void*) ((intptr_t) errno)));
 1402: 
 1403: 	/* default case! */
 1404: 	LOGERR;
 1405: 	return NULL;
 1406: }
 1407: 
 1408: /*
 1409:  * sched_hook_condition() - Default CONDITION hook
 1410:  *
 1411:  * @root = root task
 1412:  * @arg = killState from schedRun()
 1413:  * return: NULL kill scheduler loop or !=NULL ok
 1414:  */
 1415: void *
 1416: sched_hook_condition(void *root, void *arg)
 1417: {
 1418: 	sched_root_task_t *r = root;
 1419: 
 1420: 	if (!r)
 1421: 		return NULL;
 1422: 
 1423: 	return (void*) (*r->root_cond - *(intptr_t*) arg);
 1424: }
 1425: 
 1426: /*
 1427:  * sched_hook_rtc() - Default RTC hook
 1428:  *
 1429:  * @task = current task
 1430:  * @arg = unused
 1431:  * return: <0 errors and 0 ok
 1432:  */
 1433: #if defined(HAVE_TIMER_CREATE) && defined(HAVE_TIMER_SETTIME) && defined(HAVE_TIMER_DELETE)
 1434: void *
 1435: sched_hook_rtc(void *task, void *arg __unused)
 1436: {
 1437: 	sched_task_t *sigt = NULL, *t = task;
 1438: 	struct itimerspec its;
 1439: 	struct sigevent evt;
 1440: 	timer_t tmr;
 1441: 
 1442: 	if (!t || !TASK_ROOT(t))
 1443: 		return (void*) -1;
 1444: 
 1445: 	memset(&evt, 0, sizeof evt);
 1446: 	evt.sigev_notify = SIGEV_SIGNAL;
 1447: 	evt.sigev_signo = (intptr_t) TASK_DATA(t) + SIGRTMIN;
 1448: 	evt.sigev_value.sival_ptr = TASK_DATA(t);
 1449: 
 1450: 	if (timer_create(CLOCK_MONOTONIC, &evt, &tmr) == -1) {
 1451: 		if (TASK_ROOT(t)->root_hooks.hook_exec.exception)
 1452: 			TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL);
 1453: 		else
 1454: 			LOGERR;
 1455: 		return (void*) -1;
 1456: 	} else
 1457: 		TASK_FLAG(t) = (u_long) tmr;
 1458: 
 1459: 	if (!(sigt = schedSignal(TASK_ROOT(t), _sched_rtcWrapper, TASK_ARG(t), evt.sigev_signo, 
 1460: 				t, (size_t) tmr))) {
 1461: 		if (TASK_ROOT(t)->root_hooks.hook_exec.exception)
 1462: 			TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL);
 1463: 		else
 1464: 			LOGERR;
 1465: 		timer_delete(tmr);
 1466: 		return (void*) -1;
 1467: 	} else
 1468: 		TASK_RET(t) = (uintptr_t) sigt;
 1469: 
 1470: 	memset(&its, 0, sizeof its);
 1471: 	its.it_value.tv_sec = t->task_val.ts.tv_sec;
 1472: 	its.it_value.tv_nsec = t->task_val.ts.tv_nsec;
 1473: 
 1474: 	if (timer_settime(tmr, TIMER_RELTIME, &its, NULL) == -1) {
 1475: 		if (TASK_ROOT(t)->root_hooks.hook_exec.exception)
 1476: 			TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL);
 1477: 		else
 1478: 			LOGERR;
 1479: 		schedCancel(sigt);
 1480: 		timer_delete(tmr);
 1481: 		return (void*) -1;
 1482: 	}
 1483: 
 1484: 	return NULL;
 1485: }
 1486: #endif	/* HAVE_TIMER_CREATE */

FreeBSD-CVSweb <freebsd-cvsweb@FreeBSD.org>