File:  [ELWIX - Embedded LightWeight unIX -] / libaitsched / src / hooks.c
Revision 1.28.2.1: download - view: text, annotated - select for diffs - revision graph
Thu Jul 2 22:38:28 2015 UTC (9 years ago) by misho
Branches: sched6_0
Diff to: branchpoint 1.28: preferred, unified
add additional check for RT lib

    1: /*************************************************************************
    2: * (C) 2011 AITNET ltd - Sofia/Bulgaria - <misho@aitbg.com>
    3: *  by Michael Pounov <misho@openbsd-bg.org>
    4: *
    5: * $Author: misho $
    6: * $Id: hooks.c,v 1.28.2.1 2015/07/02 22:38:28 misho Exp $
    7: *
    8: **************************************************************************
    9: The ELWIX and AITNET software is distributed under the following
   10: terms:
   11: 
   12: All of the documentation and software included in the ELWIX and AITNET
   13: Releases is copyrighted by ELWIX - Sofia/Bulgaria <info@elwix.org>
   14: 
   15: Copyright 2004 - 2014
   16: 	by Michael Pounov <misho@elwix.org>.  All rights reserved.
   17: 
   18: Redistribution and use in source and binary forms, with or without
   19: modification, are permitted provided that the following conditions
   20: are met:
   21: 1. Redistributions of source code must retain the above copyright
   22:    notice, this list of conditions and the following disclaimer.
   23: 2. Redistributions in binary form must reproduce the above copyright
   24:    notice, this list of conditions and the following disclaimer in the
   25:    documentation and/or other materials provided with the distribution.
   26: 3. All advertising materials mentioning features or use of this software
   27:    must display the following acknowledgement:
   28: This product includes software developed by Michael Pounov <misho@elwix.org>
   29: ELWIX - Embedded LightWeight unIX and its contributors.
   30: 4. Neither the name of AITNET nor the names of its contributors
   31:    may be used to endorse or promote products derived from this software
   32:    without specific prior written permission.
   33: 
   34: THIS SOFTWARE IS PROVIDED BY AITNET AND CONTRIBUTORS ``AS IS'' AND
   35: ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   36: IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   37: ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
   38: FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   39: DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   40: OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   41: HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   42: LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   43: OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   44: SUCH DAMAGE.
   45: */
   46: #include "global.h"
   47: #include "hooks.h"
   48: 
   49: 
   50: static inline void
   51: transit_task2ready(sched_task_t * __restrict t, sched_queue_t * __restrict q)
   52: {
   53: 	remove_task_from(t, q);
   54: 
   55: 	t->task_type = taskREADY;
   56: 	insert_task_to(t, &(TASK_ROOT(t))->root_ready);
   57: }
   58: 
   59: #ifdef HAVE_LIBPTHREAD
   60: static void *
   61: _sched_threadWrapper(sched_task_t *t)
   62: {
   63: 	void *ret = NULL;
   64: 	sched_root_task_t *r;
   65: 
   66: 	if (!t || !TASK_ROOT(t))
   67: 		pthread_exit(ret);
   68: 	else
   69: 		r = (sched_root_task_t*) TASK_ROOT(t);
   70: 
   71: 	pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, NULL);
   72: 	/*
   73: 	pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, NULL);
   74: 	*/
   75: 
   76: 	/* notify parent, thread is ready for execution */
   77: 	pthread_testcancel();
   78: 
   79: 	ret = schedCall(t);
   80: 	r->root_ret = ret;
   81: 
   82: 	if (TASK_VAL(t)) {
   83: 		transit_task2unuse(t, &r->root_thread);
   84: 		TASK_VAL(t) = 0;
   85: 	}
   86: 
   87: 	pthread_exit(ret);
   88: }
   89: #endif
   90: 
   91: #if defined(HAVE_LIBRT) && defined(HAVE_TIMER_CREATE) && \
   92: 	defined(HAVE_TIMER_SETTIME) && defined(HAVE_TIMER_DELETE)
   93: #if SUP_ENABLE == KQ_SUPPORT
   94: static void *
   95: _sched_rtcWrapper(sched_task_t *t)
   96: {
   97: 	sched_task_t *task;
   98: 	void *ret;
   99: 
  100: 	if (!t || !TASK_ROOT(t) || !TASK_DATA(t))
  101: 		return NULL;
  102: 	else {
  103: 		task = (sched_task_t*) TASK_DATA(t);
  104: 		timer_delete((timer_t) TASK_DATLEN(t));
  105: 	}
  106: 
  107: 	ret = schedCall(task);
  108: 
  109: 	transit_task2unuse(task, &(TASK_ROOT(task))->root_rtc);
  110: 	return ret;
  111: }
  112: #else
  113: static void
  114: _sched_rtcSigWrapper(int sig, siginfo_t *si, void *uc)
  115: {
  116: 	sched_task_t *task;
  117: 
  118: 	if (si && si->si_value.sival_ptr) {
  119: 		task = (sched_task_t*) si->si_value.sival_ptr;
  120: 		timer_delete((timer_t) TASK_FLAG(task));
  121: 
  122: 		TASK_RET(task) = (intptr_t) schedCall(task);
  123: 
  124: 		transit_task2unuse(task, &(TASK_ROOT(task))->root_rtc);
  125: 	}
  126: }
  127: #endif
  128: #endif
  129: 
  130: /*
  131:  * sched_hook_init() - Default INIT hook
  132:  *
  133:  * @root = root task
  134:  * @arg = unused
  135:  * return: <0 errors and 0 ok
  136:  */
  137: void *
  138: sched_hook_init(void *root, void *arg __unused)
  139: {
  140: 	sched_root_task_t *r = root;
  141: 
  142: 	if (!r)
  143: 		return (void*) -1;
  144: 
  145: #if SUP_ENABLE == KQ_SUPPORT
  146: 	r->root_kq = kqueue();
  147: 	if (r->root_kq == -1) {
  148: 		LOGERR;
  149: 		return (void*) -1;
  150: 	}
  151: #elif SUP_ENABLE == EP_SUPPORT
  152: 	r->root_kq = epoll_create(KQ_EVENTS);
  153: 	if (r->root_kq == -1) {
  154: 		LOGERR;
  155: 		return (void*) -1;
  156: 	}
  157: #else
  158: 	r->root_kq ^= r->root_kq;
  159: 	FD_ZERO(&r->root_fds[0]);
  160: 	FD_ZERO(&r->root_fds[1]);
  161: #endif
  162: 
  163: 	return NULL;
  164: }
  165: 
  166: /*
  167:  * sched_hook_fini() - Default FINI hook
  168:  *
  169:  * @root = root task
  170:  * @arg = unused
  171:  * return: <0 errors and 0 ok
  172:  */
  173: void *
  174: sched_hook_fini(void *root, void *arg __unused)
  175: {
  176: 	sched_root_task_t *r = root;
  177: 
  178: 	if (!r)
  179: 		return (void*) -1;
  180: 
  181: #if SUP_ENABLE == KQ_SUPPORT || SUP_ENABLE == EP_SUPPORT
  182: 	if (r->root_kq > 2) {
  183: 		close(r->root_kq);
  184: 		r->root_kq = 0;
  185: 	}
  186: #else
  187: 	FD_ZERO(&r->root_fds[1]);
  188: 	FD_ZERO(&r->root_fds[0]);
  189: 	r->root_kq ^= r->root_kq;
  190: #endif
  191: 
  192: 	return NULL;
  193: }
  194: 
  195: /*
  196:  * sched_hook_cancel() - Default CANCEL hook
  197:  *
  198:  * @task = current task
  199:  * @arg = unused
  200:  * return: <0 errors and 0 ok
  201:  */
  202: void *
  203: sched_hook_cancel(void *task, void *arg __unused)
  204: {
  205: 	sched_task_t *t = task, *tmp, *tt;
  206: 	sched_root_task_t *r = NULL;
  207: 	int flg;
  208: #if SUP_ENABLE == KQ_SUPPORT
  209: 	struct kevent chg[1];
  210: 	struct timespec timeout = { 0, 0 };
  211: #elif SUP_ENABLE == EP_SUPPORT
  212: 	struct epoll_event ee = { .events = 0, .data.fd = 0 };
  213: #else
  214: 	register int i;
  215: #endif
  216: #ifdef AIO_SUPPORT
  217: 	struct aiocb *acb;
  218: #ifdef EVFILT_LIO
  219: 	register int i = 0;
  220: 	struct aiocb **acbs;
  221: #endif	/* EVFILT_LIO */
  222: #endif	/* AIO_SUPPORT */
  223: 
  224: 	if (!t || !TASK_ROOT(t))
  225: 		return (void*) -1;
  226: 	else
  227: 		r = TASK_ROOT(t);
  228: 
  229: 	switch (TASK_TYPE(t)) {
  230: 		case taskREAD:
  231: 			/* check for multi subscribers */
  232: 			flg = 0;
  233: 			TAILQ_FOREACH_SAFE(tt, &r->root_read, task_node, tmp)
  234: 				if (TASK_FD(tt) != TASK_FD(t))
  235: 					continue;
  236: 				else
  237: 					flg++;
  238: #if SUP_ENABLE == KQ_SUPPORT
  239: #ifdef __NetBSD__
  240: 			EV_SET(&chg[0], TASK_FD(t), EVFILT_READ, flg < 2 ? EV_DELETE : 0, 
  241: 					0, 0, (intptr_t) TASK_FD(t));
  242: #else
  243: 			EV_SET(&chg[0], TASK_FD(t), EVFILT_READ, flg < 2 ? EV_DELETE : 0, 
  244: 					0, 0, (void*) TASK_FD(t));
  245: #endif
  246: #elif SUP_ENABLE == EP_SUPPORT
  247: 			ee.data.fd = TASK_FD(t);
  248: 			if (FD_ISSET(TASK_FD(t), &r->root_fds[1]))
  249: 				ee.events = EPOLLOUT;
  250: 
  251: 			if (flg < 2)
  252: 				FD_CLR(TASK_FD(t), &r->root_fds[0]);
  253: 			else
  254: 				ee.events |= (EPOLLIN | EPOLLPRI | EPOLLRDHUP);
  255: #else
  256: 			if (flg < 2) {
  257: 				FD_CLR(TASK_FD(t), &r->root_fds[0]);
  258: 
  259: 				/* optimize select */
  260: 				for (i = r->root_kq - 1; i > 2; i--)
  261: 					if (FD_ISSET(i, &r->root_fds[0]) || FD_ISSET(i, &r->root_fds[1]))
  262: 						break;
  263: 				if (i > 2)
  264: 					r->root_kq = i + 1;
  265: 			}
  266: #endif
  267: 			break;
  268: 		case taskWRITE:
  269: 			/* check for multi subscribers */
  270: 			flg = 0;
  271: 			TAILQ_FOREACH_SAFE(tt, &r->root_write, task_node, tmp)
  272: 				if (TASK_FD(tt) != TASK_FD(t))
  273: 					continue;
  274: 				else
  275: 					flg++;
  276: #if SUP_ENABLE == KQ_SUPPORT
  277: #ifdef __NetBSD__
  278: 			EV_SET(&chg[0], TASK_FD(t), EVFILT_WRITE, flg < 2 ? EV_DELETE : 0, 
  279: 					0, 0, (intptr_t) TASK_FD(t));
  280: #else
  281: 			EV_SET(&chg[0], TASK_FD(t), EVFILT_WRITE, flg < 2 ? EV_DELETE : 0, 
  282: 					0, 0, (void*) TASK_FD(t));
  283: #endif
  284: #elif SUP_ENABLE == EP_SUPPORT
  285: 			ee.data.fd = TASK_FD(t);
  286: 			if (FD_ISSET(TASK_FD(t), &r->root_fds[0]))
  287: 				ee.events = EPOLLIN | EPOLLPRI | EPOLLRDHUP;
  288: 
  289: 			if (flg < 2)
  290: 				FD_CLR(TASK_FD(t), &r->root_fds[1]);
  291: 			else
  292: 				ee.events |= EPOLLOUT;
  293: #else
  294: 			if (flg < 2) {
  295: 				FD_CLR(TASK_FD(t), &r->root_fds[1]);
  296: 
  297: 				/* optimize select */
  298: 				for (i = r->root_kq - 1; i > 2; i--)
  299: 					if (FD_ISSET(i, &r->root_fds[0]) || FD_ISSET(i, &r->root_fds[1]))
  300: 						break;
  301: 				if (i > 2)
  302: 					r->root_kq = i + 1;
  303: 			}
  304: #endif
  305: 			break;
  306: 		case taskALARM:
  307: #if SUP_ENABLE == KQ_SUPPORT
  308: 			/* check for multi subscribers */
  309: 			flg = 0;
  310: 			TAILQ_FOREACH_SAFE(tt, &r->root_alarm, task_node, tmp)
  311: 				if (TASK_DATA(tt) != TASK_DATA(t))
  312: 					continue;
  313: 				else
  314: 					flg++;
  315: #ifdef __NetBSD__
  316: 			EV_SET(&chg[0], (uintptr_t) TASK_DATA(t), EVFILT_TIMER, flg < 2 ? EV_DELETE : 0, 
  317: 					0, 0, (intptr_t) TASK_DATA(t));
  318: #else
  319: 			EV_SET(&chg[0], (uintptr_t) TASK_DATA(t), EVFILT_TIMER, flg < 2 ? EV_DELETE : 0, 
  320: 					0, 0, (void*) TASK_DATA(t));
  321: #endif
  322: #endif
  323: 			break;
  324: 		case taskNODE:
  325: #if SUP_ENABLE == KQ_SUPPORT
  326: 			/* check for multi subscribers */
  327: 			flg = 0;
  328: 			TAILQ_FOREACH_SAFE(tt, &r->root_node, task_node, tmp)
  329: 				if (TASK_FD(tt) != TASK_FD(t))
  330: 					continue;
  331: 				else
  332: 					flg++;
  333: #ifdef __NetBSD__
  334: 			EV_SET(&chg[0], TASK_FD(t), EVFILT_VNODE, flg < 2 ? EV_DELETE : 0, 
  335: 					0, 0, (intptr_t) TASK_FD(t));
  336: #else
  337: 			EV_SET(&chg[0], TASK_FD(t), EVFILT_VNODE, flg < 2 ? EV_DELETE : 0, 
  338: 					0, 0, (void*) TASK_FD(t));
  339: #endif
  340: #endif
  341: 			break;
  342: 		case taskPROC:
  343: #if SUP_ENABLE == KQ_SUPPORT
  344: 			/* check for multi subscribers */
  345: 			flg = 0;
  346: 			TAILQ_FOREACH_SAFE(tt, &r->root_proc, task_node, tmp)
  347: 				if (TASK_VAL(tt) != TASK_VAL(t))
  348: 					continue;
  349: 				else
  350: 					flg++;
  351: #ifdef __NetBSD__
  352: 			EV_SET(&chg[0], TASK_VAL(t), EVFILT_PROC, flg < 2 ? EV_DELETE : 0, 
  353: 					0, 0, (intptr_t) TASK_VAL(t));
  354: #else
  355: 			EV_SET(&chg[0], TASK_VAL(t), EVFILT_PROC, flg < 2 ? EV_DELETE : 0, 
  356: 					0, 0, (void*) TASK_VAL(t));
  357: #endif
  358: #endif
  359: 			break;
  360: 		case taskSIGNAL:
  361: #if SUP_ENABLE == KQ_SUPPORT
  362: 			/* check for multi subscribers */
  363: 			flg = 0;
  364: 			TAILQ_FOREACH_SAFE(tt, &r->root_signal, task_node, tmp)
  365: 				if (TASK_VAL(tt) != TASK_VAL(t))
  366: 					continue;
  367: 				else
  368: 					flg++;
  369: #ifdef __NetBSD__
  370: 			EV_SET(&chg[0], TASK_VAL(t), EVFILT_SIGNAL, flg < 2 ? EV_DELETE : 0, 
  371: 					0, 0, (intptr_t) TASK_VAL(t));
  372: #else
  373: 			EV_SET(&chg[0], TASK_VAL(t), EVFILT_SIGNAL, flg < 2 ? EV_DELETE : 0, 
  374: 					0, 0, (void*) TASK_VAL(t));
  375: #endif
  376: 			/* restore signal */
  377: 			if (flg < 2)
  378: 				signal(TASK_VAL(t), SIG_DFL);
  379: #endif
  380: 			break;
  381: #ifdef AIO_SUPPORT
  382: 		case taskAIO:
  383: #if SUP_ENABLE == KQ_SUPPORT
  384: 			/* check for multi subscribers */
  385: 			flg = 0;
  386: 			TAILQ_FOREACH_SAFE(tt, &r->root_aio, task_node, tmp)
  387: 				if (TASK_VAL(tt) != TASK_VAL(t))
  388: 					continue;
  389: 				else
  390: 					flg++;
  391: #ifdef __NetBSD__
  392: 			EV_SET(&chg[0], TASK_VAL(t), EVFILT_AIO, flg < 2 ? EV_DELETE : 0, 
  393: 					0, 0, (intptr_t) TASK_VAL(t));
  394: #else
  395: 			EV_SET(&chg[0], TASK_VAL(t), EVFILT_AIO, flg < 2 ? EV_DELETE : 0, 
  396: 					0, 0, (void*) TASK_VAL(t));
  397: #endif
  398: 			acb = (struct aiocb*) TASK_VAL(t);
  399: 			if (acb) {
  400: 				if (aio_cancel(acb->aio_fildes, acb) == AIO_CANCELED)
  401: 					aio_return(acb);
  402: 				free(acb);
  403: 				TASK_VAL(t) = 0;
  404: 			}
  405: #endif
  406: 			break;
  407: #ifdef EVFILT_LIO
  408: 		case taskLIO:
  409: #if SUP_ENABLE == KQ_SUPPORT
  410: 			/* check for multi subscribers */
  411: 			flg = 0;
  412: 			TAILQ_FOREACH_SAFE(tt, &r->root_lio, task_node, tmp)
  413: 				if (TASK_VAL(tt) != TASK_VAL(t))
  414: 					continue;
  415: 				else
  416: 					flg++;
  417: #ifdef __NetBSD__
  418: 			EV_SET(&chg[0], TASK_VAL(t), EVFILT_LIO, flg < 2 ? EV_DELETE : 0, 
  419: 					0, 0, (intptr_t) TASK_VAL(t));
  420: #else
  421: 			EV_SET(&chg[0], TASK_VAL(t), EVFILT_LIO, flg < 2 ? EV_DELETE : 0, 
  422: 					0, 0, (void*) TASK_VAL(t));
  423: #endif
  424: 			acbs = (struct aiocb**) TASK_VAL(t);
  425: 			if (acbs) {
  426: 				for (i = 0; i < TASK_DATLEN(t); i++) {
  427: 					if (aio_cancel(acbs[i]->aio_fildes, acbs[i]) == AIO_CANCELED)
  428: 						aio_return(acbs[i]);
  429: 					free(acbs[i]);
  430: 				}
  431: 				free(acbs);
  432: 				TASK_VAL(t) = 0;
  433: 			}
  434: #endif
  435: 			break;
  436: #endif	/* EVFILT_LIO */
  437: #endif	/* AIO_SUPPORT */
  438: #ifdef EVFILT_USER
  439: 		case taskUSER:
  440: #if SUP_ENABLE == KQ_SUPPORT
  441: 			/* check for multi subscribers */
  442: 			flg = 0;
  443: 			TAILQ_FOREACH_SAFE(tt, &r->root_user, task_node, tmp)
  444: 				if (TASK_VAL(tt) != TASK_VAL(t))
  445: 					continue;
  446: 				else
  447: 					flg++;
  448: #ifdef __NetBSD__
  449: 			EV_SET(&chg[0], TASK_VAL(t), EVFILT_USER, flg < 2 ? EV_DELETE : 0, 
  450: 					0, 0, (intptr_t) TASK_VAL(t));
  451: #else
  452: 			EV_SET(&chg[0], TASK_VAL(t), EVFILT_USER, flg < 2 ? EV_DELETE : 0, 
  453: 					0, 0, (void*) TASK_VAL(t));
  454: #endif
  455: #endif
  456: 			break;
  457: #endif	/* EVFILT_USER */
  458: 		case taskTHREAD:
  459: #ifdef HAVE_LIBPTHREAD
  460: 			if (TASK_VAL(t)) {
  461: 				pthread_cancel((pthread_t) TASK_VAL(t));
  462: 				pthread_join((pthread_t) TASK_VAL(t), NULL);
  463: 				if (TASK_VAL(t)) {
  464: 					transit_task2unuse(t, &(TASK_ROOT(t))->root_thread);
  465: 					TASK_VAL(t) = 0;
  466: 				}
  467: 			}
  468: #endif
  469: 			return NULL;
  470: #if defined(HAVE_TIMER_CREATE) && defined(HAVE_TIMER_SETTIME) && defined(HAVE_TIMER_DELETE)
  471: 		case taskRTC:
  472: 			timer_delete((timer_t) TASK_FLAG(t));
  473: #if SUP_ENABLE == KQ_SUPPORT
  474: 			schedCancel((sched_task_t*) TASK_RET(t));
  475: #else
  476: 			/* check for multi subscribers */
  477: 			flg = 0;
  478: 			TAILQ_FOREACH_SAFE(tt, &r->root_rtc, task_node, tmp)
  479: 				if (TASK_DATA(tt) != TASK_DATA(t))
  480: 					continue;
  481: 				else
  482: 					flg++;
  483: 
  484: 			/* restore signal */
  485: 			if (flg < 2)
  486: 				signal((intptr_t) TASK_DATA(t) + SIGRTMIN, SIG_DFL);
  487: #endif
  488: 			return NULL;
  489: #endif	/* HAVE_TIMER_CREATE */
  490: 		default:
  491: 			return NULL;
  492: 	}
  493: 
  494: #if SUP_ENABLE == KQ_SUPPORT
  495: 	kevent(r->root_kq, chg, 1, NULL, 0, &timeout);
  496: #elif SUP_ENABLE == EP_SUPPORT
  497: 	epoll_ctl(r->root_kq, ee.events ? EPOLL_CTL_MOD : EPOLL_CTL_DEL, ee.data.fd, &ee);
  498: #endif
  499: 	return NULL;
  500: }
  501: 
  502: #ifdef HAVE_LIBPTHREAD
  503: /*
  504:  * sched_hook_thread() - Default THREAD hook
  505:  *
  506:  * @task = current task
  507:  * @arg = pthread attributes
  508:  * return: <0 errors and 0 ok
  509:  */
  510: void *
  511: sched_hook_thread(void *task, void *arg)
  512: {
  513: 	sched_task_t *t = task;
  514: 	pthread_t tid;
  515: 	sigset_t s, o;
  516: 
  517: 	if (!t || !TASK_ROOT(t))
  518: 		return (void*) -1;
  519: 
  520: 	sigfillset(&s);
  521: 	pthread_sigmask(SIG_BLOCK, &s, &o);
  522: 	errno = pthread_create(&tid, (pthread_attr_t*) arg, 
  523: 			(void *(*)(void*)) _sched_threadWrapper, t);
  524: 	pthread_sigmask(SIG_SETMASK, &o, NULL);
  525: 
  526: 	if (errno) {
  527: 		LOGERR;
  528: 		return (void*) -1;
  529: 	} else
  530: 		TASK_VAL(t) = (u_long) tid;
  531: 
  532: 	if (!TASK_ISLOCKED(t))
  533: 		TASK_LOCK(t);
  534: 
  535: 	return NULL;
  536: }
  537: #endif
  538: 
  539: /*
  540:  * sched_hook_read() - Default READ hook
  541:  *
  542:  * @task = current task
  543:  * @arg = unused
  544:  * return: <0 errors and 0 ok
  545:  */
  546: void *
  547: sched_hook_read(void *task, void *arg __unused)
  548: {
  549: 	sched_task_t *t = task;
  550: 	sched_root_task_t *r = NULL;
  551: #if SUP_ENABLE == KQ_SUPPORT
  552: 	struct kevent chg[1];
  553: 	struct timespec timeout = { 0, 0 };
  554: #elif SUP_ENABLE == EP_SUPPORT
  555: 	struct epoll_event ee = { .events = EPOLLIN | EPOLLPRI | EPOLLRDHUP, .data.fd = 0 };
  556: 	int flg = 0;
  557: #endif
  558: 
  559: 	if (!t || !TASK_ROOT(t))
  560: 		return (void*) -1;
  561: 	else
  562: 		r = TASK_ROOT(t);
  563: 
  564: #if SUP_ENABLE == KQ_SUPPORT
  565: #ifdef __NetBSD__
  566: 	EV_SET(&chg[0], TASK_FD(t), EVFILT_READ, EV_ADD | EV_CLEAR, 0, 0, (intptr_t) TASK_FD(t));
  567: #else
  568: 	EV_SET(&chg[0], TASK_FD(t), EVFILT_READ, EV_ADD | EV_CLEAR, 0, 0, (void*) TASK_FD(t));
  569: #endif
  570: 	if (kevent(r->root_kq, chg, 1, NULL, 0, &timeout) == -1) {
  571: 		if (r->root_hooks.hook_exec.exception)
  572: 			r->root_hooks.hook_exec.exception(r, NULL);
  573: 		else
  574: 			LOGERR;
  575: 		return (void*) -1;
  576: 	}
  577: #elif SUP_ENABLE == EP_SUPPORT
  578: 	if (FD_ISSET(TASK_FD(t), &r->root_fds[0]))
  579: 		flg |= 1;
  580: 	if (FD_ISSET(TASK_FD(t), &r->root_fds[1])) {
  581: 		flg |= 2;
  582: 		ee.events |= EPOLLOUT;
  583: 	}
  584: 
  585: 	ee.data.fd = TASK_FD(t);
  586: 	if (epoll_ctl(r->root_kq, flg ? EPOLL_CTL_MOD : EPOLL_CTL_ADD, TASK_FD(t), &ee) == -1) {
  587: 		if (r->root_hooks.hook_exec.exception)
  588: 			r->root_hooks.hook_exec.exception(r, NULL);
  589: 		else
  590: 			LOGERR;
  591: 		return (void*) -1;
  592: 	} else
  593: 		FD_SET(TASK_FD(t), &r->root_fds[0]);
  594: #else
  595: 	FD_SET(TASK_FD(t), &r->root_fds[0]);
  596: 	if (TASK_FD(t) >= r->root_kq)
  597: 		r->root_kq = TASK_FD(t) + 1;
  598: #endif
  599: 
  600: 	return NULL;
  601: }
  602: 
  603: /*
  604:  * sched_hook_write() - Default WRITE hook
  605:  *
  606:  * @task = current task
  607:  * @arg = unused
  608:  * return: <0 errors and 0 ok
  609:  */
  610: void *
  611: sched_hook_write(void *task, void *arg __unused)
  612: {
  613: 	sched_task_t *t = task;
  614: 	sched_root_task_t *r = NULL;
  615: #if SUP_ENABLE == KQ_SUPPORT
  616: 	struct kevent chg[1];
  617: 	struct timespec timeout = { 0, 0 };
  618: #elif SUP_ENABLE == EP_SUPPORT
  619: 	struct epoll_event ee = { .events = EPOLLOUT, .data.fd = 0 };
  620: 	int flg = 0;
  621: #endif
  622: 
  623: 	if (!t || !TASK_ROOT(t))
  624: 		return (void*) -1;
  625: 	else
  626: 		r = TASK_ROOT(t);
  627: 
  628: #if SUP_ENABLE == KQ_SUPPORT
  629: #ifdef __NetBSD__
  630: 	EV_SET(&chg[0], TASK_FD(t), EVFILT_WRITE, EV_ADD | EV_CLEAR, 0, 0, (intptr_t) TASK_FD(t));
  631: #else
  632: 	EV_SET(&chg[0], TASK_FD(t), EVFILT_WRITE, EV_ADD | EV_CLEAR, 0, 0, (void*) TASK_FD(t));
  633: #endif
  634: 	if (kevent(r->root_kq, chg, 1, NULL, 0, &timeout) == -1) {
  635: 		if (r->root_hooks.hook_exec.exception)
  636: 			r->root_hooks.hook_exec.exception(r, NULL);
  637: 		else
  638: 			LOGERR;
  639: 		return (void*) -1;
  640: 	}
  641: #elif SUP_ENABLE == EP_SUPPORT
  642: 	if (FD_ISSET(TASK_FD(t), &r->root_fds[0])) {
  643: 		flg |= 1;
  644: 		ee.events |= EPOLLIN | EPOLLPRI | EPOLLRDHUP;
  645: 	}
  646: 	if (FD_ISSET(TASK_FD(t), &r->root_fds[1]))
  647: 		flg |= 2;
  648: 
  649: 	ee.data.fd = TASK_FD(t);
  650: 	if (epoll_ctl(r->root_kq, flg ? EPOLL_CTL_MOD : EPOLL_CTL_ADD, TASK_FD(t), &ee) == -1) {
  651: 		if (r->root_hooks.hook_exec.exception)
  652: 			r->root_hooks.hook_exec.exception(r, NULL);
  653: 		else
  654: 			LOGERR;
  655: 		return (void*) -1;
  656: 	} else
  657: 		FD_SET(TASK_FD(t), &r->root_fds[1]);
  658: #else
  659: 	FD_SET(TASK_FD(t), &r->root_fds[1]);
  660: 	if (TASK_FD(t) >= r->root_kq)
  661: 		r->root_kq = TASK_FD(t) + 1;
  662: #endif
  663: 
  664: 	return NULL;
  665: }
  666: 
  667: /*
  668:  * sched_hook_alarm() - Default ALARM hook
  669:  *
  670:  * @task = current task
  671:  * @arg = unused
  672:  * return: <0 errors and 0 ok
  673:  */
  674: void *
  675: sched_hook_alarm(void *task, void *arg __unused)
  676: {
  677: #if SUP_ENABLE == KQ_SUPPORT
  678: 	sched_task_t *t = task;
  679: 	struct kevent chg[1];
  680: 	struct timespec timeout = { 0, 0 };
  681: 
  682: 	if (!t || !TASK_ROOT(t))
  683: 		return (void*) -1;
  684: 
  685: #ifdef __NetBSD__
  686: 	EV_SET(&chg[0], (uintptr_t) TASK_DATA(t), EVFILT_TIMER, EV_ADD | EV_CLEAR, 0, 
  687: 			t->task_val.ts.tv_sec * 1000 + t->task_val.ts.tv_nsec / 1000000, 
  688: 			(intptr_t) TASK_DATA(t));
  689: #else
  690: 	EV_SET(&chg[0], (uintptr_t) TASK_DATA(t), EVFILT_TIMER, EV_ADD | EV_CLEAR, 0, 
  691: 			t->task_val.ts.tv_sec * 1000 + t->task_val.ts.tv_nsec / 1000000, 
  692: 			(void*) TASK_DATA(t));
  693: #endif
  694: 	if (kevent(TASK_ROOT(t)->root_kq, chg, 1, NULL, 0, &timeout) == -1) {
  695: 		if (TASK_ROOT(t)->root_hooks.hook_exec.exception)
  696: 			TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL);
  697: 		else
  698: 			LOGERR;
  699: 		return (void*) -1;
  700: 	}
  701: 
  702: #endif
  703: 	return NULL;
  704: }
  705: 
  706: /*
  707:  * sched_hook_node() - Default NODE hook
  708:  *
  709:  * @task = current task
  710:  * @arg = unused
  711:  * return: <0 errors and 0 ok
  712:  */
  713: void *
  714: sched_hook_node(void *task, void *arg __unused)
  715: {
  716: #if SUP_ENABLE == KQ_SUPPORT
  717: 	sched_task_t *t = task;
  718: 	struct kevent chg[1];
  719: 	struct timespec timeout = { 0, 0 };
  720: 
  721: 	if (!t || !TASK_ROOT(t))
  722: 		return (void*) -1;
  723: 
  724: #ifdef __NetBSD__
  725: 	EV_SET(&chg[0], TASK_FD(t), EVFILT_VNODE, EV_ADD | EV_CLEAR, 
  726: 			NOTE_DELETE | NOTE_WRITE | NOTE_EXTEND | NOTE_ATTRIB | 
  727: 			NOTE_LINK | NOTE_RENAME | NOTE_REVOKE, 0, (intptr_t) TASK_FD(t));
  728: #else
  729: 	EV_SET(&chg[0], TASK_FD(t), EVFILT_VNODE, EV_ADD | EV_CLEAR, 
  730: 			NOTE_DELETE | NOTE_WRITE | NOTE_EXTEND | NOTE_ATTRIB | 
  731: 			NOTE_LINK | NOTE_RENAME | NOTE_REVOKE, 0, (void*) TASK_FD(t));
  732: #endif
  733: 	if (kevent(TASK_ROOT(t)->root_kq, chg, 1, NULL, 0, &timeout) == -1) {
  734: 		if (TASK_ROOT(t)->root_hooks.hook_exec.exception)
  735: 			TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL);
  736: 		else
  737: 			LOGERR;
  738: 		return (void*) -1;
  739: 	}
  740: 
  741: #endif
  742: 	return NULL;
  743: }
  744: 
  745: /*
  746:  * sched_hook_proc() - Default PROC hook
  747:  *
  748:  * @task = current task
  749:  * @arg = unused
  750:  * return: <0 errors and 0 ok
  751:  */
  752: void *
  753: sched_hook_proc(void *task, void *arg __unused)
  754: {
  755: #if SUP_ENABLE == KQ_SUPPORT
  756: 	sched_task_t *t = task;
  757: 	struct kevent chg[1];
  758: 	struct timespec timeout = { 0, 0 };
  759: 
  760: 	if (!t || !TASK_ROOT(t))
  761: 		return (void*) -1;
  762: 
  763: #ifdef __NetBSD__
  764: 	EV_SET(&chg[0], TASK_VAL(t), EVFILT_PROC, EV_ADD | EV_CLEAR, 
  765: 			NOTE_EXIT | NOTE_FORK | NOTE_EXEC | NOTE_TRACK, 0, (intptr_t) TASK_VAL(t));
  766: #else
  767: 	EV_SET(&chg[0], TASK_VAL(t), EVFILT_PROC, EV_ADD | EV_CLEAR, 
  768: 			NOTE_EXIT | NOTE_FORK | NOTE_EXEC | NOTE_TRACK, 0, (void*) TASK_VAL(t));
  769: #endif
  770: 	if (kevent(TASK_ROOT(t)->root_kq, chg, 1, NULL, 0, &timeout) == -1) {
  771: 		if (TASK_ROOT(t)->root_hooks.hook_exec.exception)
  772: 			TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL);
  773: 		else
  774: 			LOGERR;
  775: 		return (void*) -1;
  776: 	}
  777: 
  778: #endif
  779: 	return NULL;
  780: }
  781: 
  782: /*
  783:  * sched_hook_signal() - Default SIGNAL hook
  784:  *
  785:  * @task = current task
  786:  * @arg = unused
  787:  * return: <0 errors and 0 ok
  788:  */
  789: void *
  790: sched_hook_signal(void *task, void *arg __unused)
  791: {
  792: #if SUP_ENABLE == KQ_SUPPORT
  793: 	sched_task_t *t = task;
  794: 	struct kevent chg[1];
  795: 	struct timespec timeout = { 0, 0 };
  796: 
  797: 	if (!t || !TASK_ROOT(t))
  798: 		return (void*) -1;
  799: 
  800: 	/* ignore signal */
  801: 	signal(TASK_VAL(t), SIG_IGN);
  802: 
  803: #ifdef __NetBSD__
  804: 	EV_SET(&chg[0], TASK_VAL(t), EVFILT_SIGNAL, EV_ADD | EV_CLEAR, 0, 0, (intptr_t) TASK_VAL(t));
  805: #else
  806: 	EV_SET(&chg[0], TASK_VAL(t), EVFILT_SIGNAL, EV_ADD | EV_CLEAR, 0, 0, (void*) TASK_VAL(t));
  807: #endif
  808: 	if (kevent(TASK_ROOT(t)->root_kq, chg, 1, NULL, 0, &timeout) == -1) {
  809: 		if (TASK_ROOT(t)->root_hooks.hook_exec.exception)
  810: 			TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL);
  811: 		else
  812: 			LOGERR;
  813: 		return (void*) -1;
  814: 	}
  815: #endif
  816: 	return NULL;
  817: }
  818: 
  819: /*
  820:  * sched_hook_user() - Default USER hook
  821:  *
  822:  * @task = current task
  823:  * @arg = unused
  824:  * return: <0 errors and 0 ok
  825:  */
  826: #ifdef EVFILT_USER
  827: void *
  828: sched_hook_user(void *task, void *arg __unused)
  829: {
  830: #if SUP_ENABLE == KQ_SUPPORT
  831: 	sched_task_t *t = task;
  832: 	struct kevent chg[1];
  833: 	struct timespec timeout = { 0, 0 };
  834: 
  835: 	if (!t || !TASK_ROOT(t))
  836: 		return (void*) -1;
  837: 
  838: #ifdef __NetBSD__
  839: 	EV_SET(&chg[0], TASK_VAL(t), EVFILT_USER, EV_ADD | EV_CLEAR, TASK_DATLEN(t), 
  840: 			0, (intptr_t) TASK_VAL(t));
  841: #else
  842: 	EV_SET(&chg[0], TASK_VAL(t), EVFILT_USER, EV_ADD | EV_CLEAR, TASK_DATLEN(t), 
  843: 			0, (void*) TASK_VAL(t));
  844: #endif
  845: 	if (kevent(TASK_ROOT(t)->root_kq, chg, 1, NULL, 0, &timeout) == -1) {
  846: 		if (TASK_ROOT(t)->root_hooks.hook_exec.exception)
  847: 			TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL);
  848: 		else
  849: 			LOGERR;
  850: 		return (void*) -1;
  851: 	}
  852: 
  853: #endif
  854: 	return NULL;
  855: }
  856: #endif
  857: 
  858: #if SUP_ENABLE == KQ_SUPPORT
  859: static inline void 
  860: fetch_hook_kevent_proceed(int en, struct kevent *res, sched_root_task_t *r)
  861: {
  862: 	struct kevent evt[1];
  863: 	register int i;
  864: 	sched_task_t *task, *tmp;
  865: 	struct timespec now = { 0, 0 };
  866: #ifdef AIO_SUPPORT
  867: 	int len, fd;
  868: 	struct aiocb *acb;
  869: #ifdef EVFILT_LIO
  870: 	int l;
  871: 	register int j;
  872: 	off_t off;
  873: 	struct aiocb **acbs;
  874: 	struct iovec *iv;
  875: #endif	/* EVFILT_LIO */
  876: #endif	/* AIO_SUPPORT */
  877: 
  878: 	for (i = 0; i < en; i++) {
  879: 		memcpy(evt, &res[i], sizeof evt);
  880: 		evt->flags = EV_DELETE;
  881: 		/* Put read/write task to ready queue */
  882: 		switch (res[i].filter) {
  883: 			case EVFILT_READ:
  884: 				TAILQ_FOREACH_SAFE(task, &r->root_read, task_node, tmp) {
  885: 					if (TASK_FD(task) != ((intptr_t) res[i].udata))
  886: 						continue;
  887: 					else {
  888: 						TASK_RET(task) = res[i].data;
  889: 						TASK_FLAG(task) = (u_long) res[i].fflags;
  890: 					}
  891: 					/* remove read handle */
  892: 					remove_task_from(task, &r->root_read);
  893: 
  894: 					if (r->root_hooks.hook_exec.exception && res[i].flags & EV_EOF) {
  895:  						if (r->root_hooks.hook_exec.exception(r, (void*) EV_EOF)) {
  896: 							task->task_type = taskUNUSE;
  897: 							insert_task_to(task, &r->root_unuse);
  898: 						} else {
  899: 							task->task_type = taskREADY;
  900: 							insert_task_to(task, &r->root_ready);
  901: 						}
  902: 					} else {
  903: 						task->task_type = taskREADY;
  904: 						insert_task_to(task, &r->root_ready);
  905: 					}
  906: 				}
  907: 				break;
  908: 			case EVFILT_WRITE:
  909: 				TAILQ_FOREACH_SAFE(task, &r->root_write, task_node, tmp) {
  910: 					if (TASK_FD(task) != ((intptr_t) res[i].udata))
  911: 						continue;
  912: 					else {
  913: 						TASK_RET(task) = res[i].data;
  914: 						TASK_FLAG(task) = (u_long) res[i].fflags;
  915: 					}
  916: 					/* remove write handle */
  917: 					remove_task_from(task, &r->root_write);
  918: 
  919: 					if (r->root_hooks.hook_exec.exception && res[i].flags & EV_EOF) {
  920:  						if (r->root_hooks.hook_exec.exception(r, (void*) EV_EOF)) {
  921: 							task->task_type = taskUNUSE;
  922: 							insert_task_to(task, &r->root_unuse);
  923: 						} else {
  924: 							task->task_type = taskREADY;
  925: 							insert_task_to(task, &r->root_ready);
  926: 						}
  927: 					} else {
  928: 						task->task_type = taskREADY;
  929: 						insert_task_to(task, &r->root_ready);
  930: 					}
  931: 				}
  932: 				break;
  933: 			case EVFILT_TIMER:
  934: 				TAILQ_FOREACH_SAFE(task, &r->root_alarm, task_node, tmp) {
  935: 					if ((uintptr_t) TASK_DATA(task) != ((uintptr_t) res[i].udata))
  936: 						continue;
  937: 					else {
  938: 						TASK_RET(task) = res[i].data;
  939: 						TASK_FLAG(task) = (u_long) res[i].fflags;
  940: 					}
  941: 					/* remove alarm handle */
  942: 					transit_task2ready(task, &r->root_alarm);
  943: 				}
  944: 				break;
  945: 			case EVFILT_VNODE:
  946: 				TAILQ_FOREACH_SAFE(task, &r->root_node, task_node, tmp) {
  947: 					if (TASK_FD(task) != ((intptr_t) res[i].udata))
  948: 						continue;
  949: 					else {
  950: 						TASK_RET(task) = res[i].data;
  951: 						TASK_FLAG(task) = (u_long) res[i].fflags;
  952: 					}
  953: 					/* remove node handle */
  954: 					transit_task2ready(task, &r->root_node);
  955: 				}
  956: 				break;
  957: 			case EVFILT_PROC:
  958: 				TAILQ_FOREACH_SAFE(task, &r->root_proc, task_node, tmp) {
  959: 					if (TASK_VAL(task) != ((uintptr_t) res[i].udata))
  960: 						continue;
  961: 					else {
  962: 						TASK_RET(task) = res[i].data;
  963: 						TASK_FLAG(task) = (u_long) res[i].fflags;
  964: 					}
  965: 					/* remove proc handle */
  966: 					transit_task2ready(task, &r->root_proc);
  967: 				}
  968: 				break;
  969: 			case EVFILT_SIGNAL:
  970: 				TAILQ_FOREACH_SAFE(task, &r->root_signal, task_node, tmp) {
  971: 					if (TASK_VAL(task) != ((uintptr_t) res[i].udata))
  972: 						continue;
  973: 					else {
  974: 						TASK_RET(task) = res[i].data;
  975: 						TASK_FLAG(task) = (u_long) res[i].fflags;
  976: 					}
  977: 					/* remove signal handle */
  978: 					transit_task2ready(task, &r->root_signal);
  979: 				}
  980: 				break;
  981: #ifdef AIO_SUPPORT
  982: 			case EVFILT_AIO:
  983: 				TAILQ_FOREACH_SAFE(task, &r->root_aio, task_node, tmp) {
  984: 					acb = (struct aiocb*) TASK_VAL(task);
  985: 					if (acb != ((struct aiocb*) res[i].udata))
  986: 						continue;
  987: 					else {
  988: 						TASK_RET(task) = res[i].data;
  989: 						TASK_FLAG(task) = (u_long) res[i].fflags;
  990: 					}
  991: 					/* remove user handle */
  992: 					transit_task2ready(task, &r->root_aio);
  993: 
  994: 					fd = acb->aio_fildes;
  995: 					if ((len = aio_return(acb)) != -1) {
  996: 						if (lseek(fd, acb->aio_offset + len, SEEK_CUR) == -1)
  997: 							LOGERR;
  998: 					} else
  999: 						LOGERR;
 1000: 					free(acb);
 1001: 					TASK_DATLEN(task) = (u_long) len;
 1002: 					TASK_FD(task) = fd;
 1003: 				}
 1004: 				break;
 1005: #ifdef EVFILT_LIO
 1006: 			case EVFILT_LIO:
 1007: 				TAILQ_FOREACH_SAFE(task, &r->root_lio, task_node, tmp) {
 1008: 					acbs = (struct aiocb**) TASK_VAL(task);
 1009: 					if (acbs != ((struct aiocb**) res[i].udata))
 1010: 						continue;
 1011: 					else {
 1012: 						TASK_RET(task) = res[i].data;
 1013: 						TASK_FLAG(task) = (u_long) res[i].fflags;
 1014: 					}
 1015: 					/* remove user handle */
 1016: 					transit_task2ready(task, &r->root_lio);
 1017: 
 1018: 					iv = (struct iovec*) TASK_DATA(task);
 1019: 					fd = acbs[0]->aio_fildes;
 1020: 					off = acbs[0]->aio_offset;
 1021: 					for (j = len = 0; i < TASK_DATLEN(task); len += l, i++) {
 1022: 						if ((iv[i].iov_len = aio_return(acbs[i])) == -1)
 1023: 							l = 0;
 1024: 						else
 1025: 							l = iv[i].iov_len;
 1026: 						free(acbs[i]);
 1027: 					}
 1028: 					free(acbs);
 1029: 					TASK_DATLEN(task) = (u_long) len;
 1030: 					TASK_FD(task) = fd;
 1031: 
 1032: 					if (lseek(fd, off + len, SEEK_CUR) == -1)
 1033: 						LOGERR;
 1034: 				}
 1035: 				break;
 1036: #endif	/* EVFILT_LIO */
 1037: #endif	/* AIO_SUPPORT */
 1038: #ifdef EVFILT_USER
 1039: 			case EVFILT_USER:
 1040: 				TAILQ_FOREACH_SAFE(task, &r->root_user, task_node, tmp) {
 1041: 					if (TASK_VAL(task) != ((uintptr_t) res[i].udata))
 1042: 						continue;
 1043: 					else {
 1044: 						TASK_RET(task) = res[i].data;
 1045: 						TASK_FLAG(task) = (u_long) res[i].fflags;
 1046: 					}
 1047: 					/* remove user handle */
 1048: 					transit_task2ready(task, &r->root_user);
 1049: 				}
 1050: 				break;
 1051: #endif	/* EVFILT_USER */
 1052: 		}
 1053: 
 1054: 		if (kevent(r->root_kq, evt, 1, NULL, 0, &now) == -1) {
 1055: 			if (r->root_hooks.hook_exec.exception)
 1056: 				r->root_hooks.hook_exec.exception(r, NULL);
 1057: 			else
 1058: 				LOGERR;
 1059: 		}
 1060: 	}
 1061: }
 1062: #endif
 1063: 
 1064: #if SUP_ENABLE == EP_SUPPORT
 1065: static inline void
 1066: fetch_hook_epoll_proceed(int en, struct epoll_event *res, sched_root_task_t *r)
 1067: {
 1068: 	register int i, flg;
 1069: 	int ops = EPOLL_CTL_DEL;
 1070: 	sched_task_t *task, *tmp;
 1071: 	struct epoll_event evt[1];
 1072: 
 1073: 	for (i = 0; i < en; i++) {
 1074: 		memcpy(evt, &res[i], sizeof evt);
 1075: 
 1076: 		if (evt->events & (EPOLLIN | EPOLLPRI | EPOLLET)) {
 1077: 			flg = 0;
 1078: 			TAILQ_FOREACH_SAFE(task, &r->root_read, task_node, tmp) {
 1079: 				if (TASK_FD(task) != evt->data.fd)
 1080: 					continue;
 1081: 				else {
 1082: 					flg++;
 1083: 					FD_CLR(TASK_FD(task), &r->root_fds[0]);
 1084: 					TASK_FLAG(task) = ioctl(TASK_FD(task), FIONREAD, &TASK_RET(task));
 1085: 
 1086: 					evt->events &= ~(EPOLLIN | EPOLLPRI | EPOLLET | EPOLLRDHUP);
 1087: 					if (FD_ISSET(TASK_FD(task), &r->root_fds[1])) {
 1088: 						ops = EPOLL_CTL_MOD;
 1089: 						evt->events |= EPOLLOUT;
 1090: 					}
 1091: 				}
 1092: 				/* remove read handle */
 1093: 				remove_task_from(task, &r->root_read);
 1094: 
 1095: 				if (r->root_hooks.hook_exec.exception && evt->events & (EPOLLRDHUP | EPOLLERR | EPOLLHUP)) {
 1096:  					if (r->root_hooks.hook_exec.exception(r, (void*) (intptr_t)
 1097: 								(evt->events & EPOLLERR ? EV_ERROR | EV_EOF : EV_EOF))) {
 1098: 						task->task_type = taskUNUSE;
 1099: 						insert_task_to(task, &r->root_unuse);
 1100: 					} else {
 1101: 						task->task_type = taskREADY;
 1102: 						insert_task_to(task, &r->root_ready);
 1103: 					}
 1104: 				} else {
 1105: 					task->task_type = taskREADY;
 1106: 					insert_task_to(task, &r->root_ready);
 1107: 				}
 1108: 			}
 1109: 			if (flg > 1)
 1110: 				ops = EPOLL_CTL_MOD;
 1111: 		}
 1112: 
 1113: 		if (evt->events & EPOLLOUT) {
 1114: 			flg = 0;
 1115: 			TAILQ_FOREACH_SAFE(task, &r->root_write, task_node, tmp) {
 1116: 				if (TASK_FD(task) != evt->data.fd)
 1117: 					continue;
 1118: 				else {
 1119: 					flg++;
 1120: 					FD_CLR(TASK_FD(task), &r->root_fds[1]);
 1121: 					TASK_FLAG(task) = ioctl(TASK_FD(task), 
 1122: 							FIONWRITE, &TASK_RET(task));
 1123: 
 1124: 					evt->events &= ~EPOLLOUT;
 1125: 					if (FD_ISSET(TASK_FD(task), &r->root_fds[0])) {
 1126: 						ops = EPOLL_CTL_MOD;
 1127: 						evt->events |= EPOLLIN | EPOLLPRI | EPOLLRDHUP;
 1128: 					}
 1129: 				}
 1130: 				/* remove write handle */
 1131: 				remove_task_from(task, &r->root_write);
 1132: 
 1133: 				if (r->root_hooks.hook_exec.exception && evt->events & (EPOLLERR | EPOLLHUP)) {
 1134:  					if (r->root_hooks.hook_exec.exception(r, (void*) (intptr_t)
 1135: 								(evt->events & EPOLLERR ? EV_ERROR | EV_EOF : EV_EOF))) {
 1136: 						task->task_type = taskUNUSE;
 1137: 						insert_task_to(task, &r->root_unuse);
 1138: 					} else {
 1139: 						task->task_type = taskREADY;
 1140: 						insert_task_to(task, &r->root_ready);
 1141: 					}
 1142: 				} else {
 1143: 					task->task_type = taskREADY;
 1144: 					insert_task_to(task, &r->root_ready);
 1145: 				}
 1146: 			}
 1147: 			if (flg > 1)
 1148: 				ops = EPOLL_CTL_MOD;
 1149: 		}
 1150: 
 1151: 		if (epoll_ctl(r->root_kq, ops, evt->data.fd, evt) == -1) {
 1152: 			if (r->root_hooks.hook_exec.exception) {
 1153: 				r->root_hooks.hook_exec.exception(r, NULL);
 1154: 			} else
 1155: 				LOGERR;
 1156: 		}
 1157: 	}
 1158: }
 1159: #endif
 1160: 
 1161: #if SUP_ENABLE == NO_SUPPORT
 1162: static inline void 
 1163: fetch_hook_select_proceed(int en, fd_set rfd, fd_set wfd, fd_set xfd, sched_root_task_t *r)
 1164: {
 1165: 	register int i, flg;
 1166: 	sched_task_t *task, *tmp;
 1167: 
 1168: 	/* skip select check if return value from select is zero */
 1169: 	if (!en)
 1170: 		return;
 1171: 
 1172: 	for (i = 0; i < r->root_kq; i++) {
 1173: 		if (FD_ISSET(i, &rfd) || FD_ISSET(i, &xfd)) {
 1174: 			flg = 0;
 1175: 			TAILQ_FOREACH_SAFE(task, &r->root_read, task_node, tmp) {
 1176: 				if (TASK_FD(task) != i)
 1177: 					continue;
 1178: 				else {
 1179: 					flg++;
 1180: 					TASK_FLAG(task) = ioctl(TASK_FD(task), 
 1181: 							FIONREAD, &TASK_RET(task));
 1182: 				}
 1183: 				/* remove read handle */
 1184: 				remove_task_from(task, &r->root_read);
 1185: 
 1186: 				if (r->root_hooks.hook_exec.exception) {
 1187:  					if (r->root_hooks.hook_exec.exception(r, NULL)) {
 1188: 						task->task_type = taskUNUSE;
 1189: 						insert_task_to(task, &r->root_unuse);
 1190: 					} else {
 1191: 						task->task_type = taskREADY;
 1192: 						insert_task_to(task, &r->root_ready);
 1193: 					}
 1194: 				} else {
 1195: 					task->task_type = taskREADY;
 1196: 					insert_task_to(task, &r->root_ready);
 1197: 				}
 1198: 			}
 1199: 			/* remove resouce */
 1200: 			if (flg)
 1201: 				FD_CLR(i, &r->root_fds[0]);
 1202: 		}
 1203: 
 1204: 		if (FD_ISSET(i, &wfd)) {
 1205: 			flg = 0;
 1206: 			TAILQ_FOREACH_SAFE(task, &r->root_write, task_node, tmp) {
 1207: 				if (TASK_FD(task) != i)
 1208: 					continue;
 1209: 				else {
 1210: 					flg++;
 1211: 					TASK_FLAG(task) = ioctl(TASK_FD(task), 
 1212: 							FIONWRITE, &TASK_RET(task));
 1213: 				}
 1214: 				/* remove write handle */
 1215: 				remove_task_from(task, &r->root_write);
 1216: 
 1217: 				if (r->root_hooks.hook_exec.exception) {
 1218:  					if (r->root_hooks.hook_exec.exception(r, NULL)) {
 1219: 						task->task_type = taskUNUSE;
 1220: 						insert_task_to(task, &r->root_unuse);
 1221: 					} else {
 1222: 						task->task_type = taskREADY;
 1223: 						insert_task_to(task, &r->root_ready);
 1224: 					}
 1225: 				} else {
 1226: 					task->task_type = taskREADY;
 1227: 					insert_task_to(task, &r->root_ready);
 1228: 				}
 1229: 			}
 1230: 			/* remove resouce */
 1231: 			if (flg)
 1232: 				FD_CLR(i, &r->root_fds[1]);
 1233: 		}
 1234: 	}
 1235: 
 1236: 	/* optimize select */
 1237: 	for (i = r->root_kq - 1; i > 2; i--)
 1238: 		if (FD_ISSET(i, &r->root_fds[0]) || FD_ISSET(i, &r->root_fds[1]))
 1239: 			break;
 1240: 	if (i > 2)
 1241: 		r->root_kq = i + 1;
 1242: }
 1243: #endif
 1244: 
 1245: /*
 1246:  * sched_hook_fetch() - Default FETCH hook
 1247:  *
 1248:  * @root = root task
 1249:  * @arg = unused
 1250:  * return: NULL error or !=NULL fetched task
 1251:  */
 1252: void *
 1253: sched_hook_fetch(void *root, void *arg __unused)
 1254: {
 1255: 	sched_root_task_t *r = root;
 1256: 	sched_task_t *task, *tmp;
 1257: 	struct timespec now, m, mtmp;
 1258: #if SUP_ENABLE == KQ_SUPPORT
 1259: 	struct kevent res[KQ_EVENTS];
 1260: 	struct timespec *timeout;
 1261: #elif SUP_ENABLE == EP_SUPPORT
 1262: 	struct epoll_event res[KQ_EVENTS];
 1263: 	u_long timeout = 0;
 1264: #else
 1265: 	struct timeval *timeout, tv;
 1266: 	fd_set rfd, wfd, xfd;
 1267: #endif
 1268: 	int en;
 1269: 
 1270: 	if (!r)
 1271: 		return NULL;
 1272: 
 1273: 	/* get new task by queue priority */
 1274: 	while ((task = TAILQ_FIRST(&r->root_event))) {
 1275: 		transit_task2unuse(task, &r->root_event);
 1276: 		return task;
 1277: 	}
 1278: 	while ((task = TAILQ_FIRST(&r->root_ready))) {
 1279: 		transit_task2unuse(task, &r->root_ready);
 1280: 		return task;
 1281: 	}
 1282: 
 1283: #ifdef TIMER_WITHOUT_SORT
 1284: 	clock_gettime(CLOCK_MONOTONIC, &now);
 1285: 
 1286: 	sched_timespecclear(&r->root_wait);
 1287: 	TAILQ_FOREACH(task, &r->root_timer, task_node) {
 1288: 		if (!sched_timespecisset(&r->root_wait))
 1289: 			r->root_wait = TASK_TS(task);
 1290: 		else if (sched_timespeccmp(&TASK_TS(task), &r->root_wait, -) < 0)
 1291: 			r->root_wait = TASK_TS(task);
 1292: 	}
 1293: 
 1294: 	if (TAILQ_FIRST(&r->root_timer)) {
 1295: 		m = r->root_wait;
 1296: 		sched_timespecsub(&m, &now, &mtmp);
 1297: 		r->root_wait = mtmp;
 1298: 	} else {
 1299: 		/* set wait INFTIM */
 1300: 		sched_timespecinf(&r->root_wait);
 1301: 	}
 1302: #else	/* ! TIMER_WITHOUT_SORT */
 1303: 	if (!TAILQ_FIRST(&r->root_task) && (task = TAILQ_FIRST(&r->root_timer))) {
 1304: 		clock_gettime(CLOCK_MONOTONIC, &now);
 1305: 
 1306: 		m = TASK_TS(task);
 1307: 		sched_timespecsub(&m, &now, &mtmp);
 1308: 		r->root_wait = mtmp;
 1309: 	} else {
 1310: 		/* set wait INFTIM */
 1311: 		sched_timespecinf(&r->root_wait);
 1312: 	}
 1313: #endif	/* TIMER_WITHOUT_SORT */
 1314: 	/* if present member of task, set NOWAIT */
 1315: 	if (TAILQ_FIRST(&r->root_task))
 1316: 		sched_timespecclear(&r->root_wait);
 1317: 
 1318: 	if (r->root_wait.tv_sec != -1 && r->root_wait.tv_nsec != -1) {
 1319: #if SUP_ENABLE == KQ_SUPPORT
 1320: 		timeout = &r->root_wait;
 1321: #elif SUP_ENABLE == EP_SUPPORT
 1322: 		timeout = r->root_wait.tv_sec * 1000 + r->root_wait.tv_nsec / 1000000;
 1323: #else
 1324: 		sched_timespec2val(&r->root_wait, &tv);
 1325: 		timeout = &tv;
 1326: #endif	/* KQ_SUPPORT */
 1327: 	} else if (sched_timespecisinf(&r->root_poll))
 1328: #if SUP_ENABLE == EP_SUPPORT
 1329: 		timeout = -1;
 1330: #else
 1331: 		timeout = NULL;
 1332: #endif
 1333: 	else {
 1334: #if SUP_ENABLE == KQ_SUPPORT
 1335: 		timeout = &r->root_poll;
 1336: #elif SUP_ENABLE == EP_SUPPORT
 1337: 		timeout = r->root_poll.tv_sec * 1000 + r->root_poll.tv_nsec / 1000000;
 1338: #else
 1339: 		sched_timespec2val(&r->root_poll, &tv);
 1340: 		timeout = &tv;
 1341: #endif	/* KQ_SUPPORT */
 1342: 	}
 1343: 
 1344: #if SUP_ENABLE == KQ_SUPPORT
 1345: 	if ((en = kevent(r->root_kq, NULL, 0, res, KQ_EVENTS, timeout)) == -1) {
 1346: #elif SUP_ENABLE == EP_SUPPORT
 1347: 	if ((en = epoll_wait(r->root_kq, res, KQ_EVENTS, timeout)) == -1) {
 1348: #else
 1349: 	rfd = xfd = r->root_fds[0];
 1350: 	wfd = r->root_fds[1];
 1351: 	if ((en = select(r->root_kq, &rfd, &wfd, &xfd, timeout)) == -1) {
 1352: #endif	/* KQ_SUPPORT */
 1353: 		if (r->root_hooks.hook_exec.exception) {
 1354: 			if (r->root_hooks.hook_exec.exception(r, NULL))
 1355: 				return NULL;
 1356: 		} else if (errno != EINTR)
 1357: 			LOGERR;
 1358: 		goto skip_event;
 1359: 	}
 1360: 
 1361: 	/* Go and catch the cat into pipes ... */
 1362: #if SUP_ENABLE == KQ_SUPPORT
 1363: 	/* kevent dispatcher */
 1364: 	fetch_hook_kevent_proceed(en, res, r);
 1365: #elif SUP_ENABLE == EP_SUPPORT
 1366: 	/* epoll dispatcher */
 1367: 	fetch_hook_epoll_proceed(en, res, r);
 1368: #else
 1369: 	/* select dispatcher */
 1370: 	fetch_hook_select_proceed(en, rfd, wfd, xfd, r);
 1371: #endif	/* KQ_SUPPORT */
 1372: 
 1373: skip_event:
 1374: 	/* timer update & put in ready queue */
 1375: 	clock_gettime(CLOCK_MONOTONIC, &now);
 1376: 
 1377: 	TAILQ_FOREACH_SAFE(task, &r->root_timer, task_node, tmp)
 1378: 		if (sched_timespeccmp(&now, &TASK_TS(task), -) >= 0)
 1379: 			transit_task2ready(task, &r->root_timer);
 1380: 
 1381: 	/* put regular task priority task to ready queue, 
 1382: 		if there is no ready task or reach max missing hit for regular task */
 1383: 	if ((task = TAILQ_FIRST(&r->root_task))) {
 1384: 		if (!TAILQ_FIRST(&r->root_ready) || r->root_miss >= TASK_VAL(task)) {
 1385: 			r->root_miss ^= r->root_miss;
 1386: 
 1387: 			transit_task2ready(task, &r->root_task);
 1388: 		} else
 1389: 			r->root_miss++;
 1390: 	} else
 1391: 		r->root_miss ^= r->root_miss;
 1392: 
 1393: 	/* OK, lets get ready task !!! */
 1394: 	task = TAILQ_FIRST(&r->root_ready);
 1395: 	if (task)
 1396: 		transit_task2unuse(task, &r->root_ready);
 1397: 	return task;
 1398: }
 1399: 
 1400: /*
 1401:  * sched_hook_exception() - Default EXCEPTION hook
 1402:  *
 1403:  * @root = root task
 1404:  * @arg = custom handling: if arg == EV_EOF or other value; default: arg == NULL log errno
 1405:  * return: <0 errors and 0 ok
 1406:  */
 1407: void *
 1408: sched_hook_exception(void *root, void *arg)
 1409: {
 1410: 	sched_root_task_t *r = root;
 1411: 
 1412: 	if (!r)
 1413: 		return NULL;
 1414: 
 1415: 	/* custom exception handling ... */
 1416: 	if (arg) {
 1417: 		if (arg == (void*) EV_EOF)
 1418: 			return NULL;
 1419: 		return (void*) -1;	/* raise scheduler error!!! */
 1420: 	}
 1421: 
 1422: 	/* if error hook exists */
 1423: 	if (r->root_hooks.hook_root.error)
 1424: 		return (r->root_hooks.hook_root.error(root, (void*) ((intptr_t) errno)));
 1425: 
 1426: 	/* default case! */
 1427: 	LOGERR;
 1428: 	return NULL;
 1429: }
 1430: 
 1431: /*
 1432:  * sched_hook_condition() - Default CONDITION hook
 1433:  *
 1434:  * @root = root task
 1435:  * @arg = killState from schedRun()
 1436:  * return: NULL kill scheduler loop or !=NULL ok
 1437:  */
 1438: void *
 1439: sched_hook_condition(void *root, void *arg)
 1440: {
 1441: 	sched_root_task_t *r = root;
 1442: 
 1443: 	if (!r)
 1444: 		return NULL;
 1445: 
 1446: 	return (void*) (*r->root_cond - *(intptr_t*) arg);
 1447: }
 1448: 
 1449: /*
 1450:  * sched_hook_rtc() - Default RTC hook
 1451:  *
 1452:  * @task = current task
 1453:  * @arg = unused
 1454:  * return: <0 errors and 0 ok
 1455:  */
 1456: #if defined(HAVE_TIMER_CREATE) && defined(HAVE_TIMER_SETTIME) && defined(HAVE_TIMER_DELETE)
 1457: void *
 1458: sched_hook_rtc(void *task, void *arg __unused)
 1459: {
 1460: 	sched_task_t *sigt = NULL, *t = task;
 1461: 	struct itimerspec its;
 1462: 	struct sigevent evt;
 1463: 	timer_t tmr;
 1464: #if SUP_ENABLE != KQ_SUPPORT
 1465: 	struct sigaction sa;
 1466: #endif
 1467: 
 1468: 	if (!t || !TASK_ROOT(t))
 1469: 		return (void*) -1;
 1470: 
 1471: 	memset(&evt, 0, sizeof evt);
 1472: 	evt.sigev_notify = SIGEV_SIGNAL;
 1473: 	evt.sigev_signo = (intptr_t) TASK_DATA(t) + SIGRTMIN;
 1474: 	evt.sigev_value.sival_ptr = t;
 1475: 
 1476: 	if (timer_create(CLOCK_MONOTONIC, &evt, &tmr) == -1) {
 1477: 		if (TASK_ROOT(t)->root_hooks.hook_exec.exception)
 1478: 			TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL);
 1479: 		else
 1480: 			LOGERR;
 1481: 		return (void*) -1;
 1482: 	} else
 1483: 		TASK_FLAG(t) = (u_long) tmr;
 1484: 
 1485: #if SUP_ENABLE == KQ_SUPPORT
 1486: 	if (!(sigt = schedSignal(TASK_ROOT(t), _sched_rtcWrapper, TASK_ARG(t), evt.sigev_signo, 
 1487: 				t, (size_t) tmr))) {
 1488: 		if (TASK_ROOT(t)->root_hooks.hook_exec.exception)
 1489: 			TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL);
 1490: 		else
 1491: 			LOGERR;
 1492: 		timer_delete(tmr);
 1493: 		return (void*) -1;
 1494: 	} else
 1495: 		TASK_RET(t) = (uintptr_t) sigt;
 1496: #else
 1497: 	memset(&sa, 0, sizeof sa);
 1498: 	sigemptyset(&sa.sa_mask);
 1499: 	sa.sa_sigaction = _sched_rtcSigWrapper;
 1500: 	sa.sa_flags = SA_SIGINFO | SA_RESTART;
 1501: 
 1502: 	if (sigaction(evt.sigev_signo, &sa, NULL) == -1) {
 1503: 		if (TASK_ROOT(t)->root_hooks.hook_exec.exception)
 1504: 			TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL);
 1505: 		else
 1506: 			LOGERR;
 1507: 		timer_delete(tmr);
 1508: 		return (void*) -1;
 1509: 	}
 1510: #endif
 1511: 
 1512: 	memset(&its, 0, sizeof its);
 1513: 	its.it_value.tv_sec = t->task_val.ts.tv_sec;
 1514: 	its.it_value.tv_nsec = t->task_val.ts.tv_nsec;
 1515: 
 1516: 	if (timer_settime(tmr, TIMER_RELTIME, &its, NULL) == -1) {
 1517: 		if (TASK_ROOT(t)->root_hooks.hook_exec.exception)
 1518: 			TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL);
 1519: 		else
 1520: 			LOGERR;
 1521: 		schedCancel(sigt);
 1522: 		timer_delete(tmr);
 1523: 		return (void*) -1;
 1524: 	}
 1525: 
 1526: 	return NULL;
 1527: }
 1528: #endif	/* HAVE_TIMER_CREATE */

FreeBSD-CVSweb <freebsd-cvsweb@FreeBSD.org>