File:  [ELWIX - Embedded LightWeight unIX -] / libaitsched / src / hooks.c
Revision 1.37.2.1: download - view: text, annotated - select for diffs - revision graph
Mon Nov 28 23:06:06 2022 UTC (22 months ago) by misho
Branches: sched7_1
Diff to: branchpoint 1.37: preferred, unified
adds new fd_set

    1: /*************************************************************************
    2: * (C) 2011 AITNET ltd - Sofia/Bulgaria - <misho@aitbg.com>
    3: *  by Michael Pounov <misho@openbsd-bg.org>
    4: *
    5: * $Author: misho $
    6: * $Id: hooks.c,v 1.37.2.1 2022/11/28 23:06:06 misho Exp $
    7: *
    8: **************************************************************************
    9: The ELWIX and AITNET software is distributed under the following
   10: terms:
   11: 
   12: All of the documentation and software included in the ELWIX and AITNET
   13: Releases is copyrighted by ELWIX - Sofia/Bulgaria <info@elwix.org>
   14: 
   15: Copyright 2004 - 2022
   16: 	by Michael Pounov <misho@elwix.org>.  All rights reserved.
   17: 
   18: Redistribution and use in source and binary forms, with or without
   19: modification, are permitted provided that the following conditions
   20: are met:
   21: 1. Redistributions of source code must retain the above copyright
   22:    notice, this list of conditions and the following disclaimer.
   23: 2. Redistributions in binary form must reproduce the above copyright
   24:    notice, this list of conditions and the following disclaimer in the
   25:    documentation and/or other materials provided with the distribution.
   26: 3. All advertising materials mentioning features or use of this software
   27:    must display the following acknowledgement:
   28: This product includes software developed by Michael Pounov <misho@elwix.org>
   29: ELWIX - Embedded LightWeight unIX and its contributors.
   30: 4. Neither the name of AITNET nor the names of its contributors
   31:    may be used to endorse or promote products derived from this software
   32:    without specific prior written permission.
   33: 
   34: THIS SOFTWARE IS PROVIDED BY AITNET AND CONTRIBUTORS ``AS IS'' AND
   35: ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   36: IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   37: ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
   38: FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   39: DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   40: OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   41: HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   42: LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   43: OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   44: SUCH DAMAGE.
   45: */
   46: #include "global.h"
   47: #include "hooks.h"
   48: 
   49: 
   50: static inline void
   51: transit_task2ready(sched_task_t * __restrict t, sched_queue_t * __restrict q)
   52: {
   53: 	remove_task_from(t, q);
   54: 
   55: 	t->task_type = taskREADY;
   56: 	insert_task_to(t, &(TASK_ROOT(t))->root_ready);
   57: }
   58: 
   59: #ifdef HAVE_LIBPTHREAD
   60: static void *
   61: _sched_threadWrapper(sched_task_t *t)
   62: {
   63: 	void *ret = NULL;
   64: 	sched_root_task_t *r;
   65: 
   66: 	if (!t || !TASK_ROOT(t))
   67: 		pthread_exit(ret);
   68: 	else
   69: 		r = (sched_root_task_t*) TASK_ROOT(t);
   70: 
   71: 	pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, NULL);
   72: 	/*
   73: 	pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, NULL);
   74: 	*/
   75: 
   76: 	/* notify parent, thread is ready for execution */
   77: 	pthread_testcancel();
   78: 
   79: 	ret = schedCall(t);
   80: 	r->root_ret = ret;
   81: 
   82: 	if (TASK_VAL(t)) {
   83: 		transit_task2unuse(t, &r->root_thread);
   84: 		TASK_VAL(t) = 0;
   85: 	}
   86: 
   87: 	pthread_exit(ret);
   88: }
   89: #endif
   90: 
   91: #if defined(HAVE_LIBRT) && defined(HAVE_TIMER_CREATE) && \
   92: 	defined(HAVE_TIMER_SETTIME) && defined(HAVE_TIMER_DELETE)
   93: #if SUP_ENABLE == KQ_SUPPORT
   94: static void *
   95: _sched_rtcWrapper(sched_task_t *t)
   96: {
   97: 	sched_task_t *task;
   98: 	void *ret;
   99: 
  100: 	if (!t || !TASK_ROOT(t) || !TASK_DATA(t))
  101: 		return NULL;
  102: 	else {
  103: 		task = (sched_task_t*) TASK_DATA(t);
  104: 		timer_delete((timer_t) TASK_DATLEN(t));
  105: 	}
  106: 
  107: 	ret = schedCall(task);
  108: 
  109: 	transit_task2unuse(task, &(TASK_ROOT(task))->root_rtc);
  110: 	return ret;
  111: }
  112: #else
  113: static void
  114: _sched_rtcSigWrapper(int sig, siginfo_t *si, void *uc)
  115: {
  116: 	sched_task_t *task;
  117: 
  118: 	if (si && si->si_value.sival_ptr) {
  119: 		task = (sched_task_t*) si->si_value.sival_ptr;
  120: 		timer_delete((timer_t) TASK_FLAG(task));
  121: 
  122: 		TASK_RET(task) = (intptr_t) schedCall(task);
  123: 
  124: 		transit_task2unuse(task, &(TASK_ROOT(task))->root_rtc);
  125: 	}
  126: }
  127: #endif
  128: #endif
  129: 
  130: /*
  131:  * sched_hook_init() - Default INIT hook
  132:  *
  133:  * @root = root task
  134:  * @arg = unused
  135:  * return: <0 errors and 0 ok
  136:  */
  137: void *
  138: sched_hook_init(void *root, void *arg __unused)
  139: {
  140: 	sched_root_task_t *r = root;
  141: 
  142: 	if (!r)
  143: 		return (void*) -1;
  144: 
  145: #if SUP_ENABLE == KQ_SUPPORT
  146: 	r->root_kq = kqueue();
  147: 	if (r->root_kq == -1) {
  148: 		LOGERR;
  149: 		return (void*) -1;
  150: 	}
  151: #elif SUP_ENABLE == EP_SUPPORT
  152: 	r->root_kq = epoll_create(KQ_EVENTS);
  153: 	if (r->root_kq == -1) {
  154: 		LOGERR;
  155: 		return (void*) -1;
  156: 	}
  157: #else
  158: 	r->root_kq ^= r->root_kq;
  159: #endif
  160: 
  161: 	FD_ZERO(&r->root_fds[0]);
  162: 	FD_ZERO(&r->root_fds[1]);
  163: 	FD_ZERO(&r->root_fds[2]);
  164: 
  165: 	return NULL;
  166: }
  167: 
  168: /*
  169:  * sched_hook_fini() - Default FINI hook
  170:  *
  171:  * @root = root task
  172:  * @arg = unused
  173:  * return: <0 errors and 0 ok
  174:  */
  175: void *
  176: sched_hook_fini(void *root, void *arg __unused)
  177: {
  178: 	sched_root_task_t *r = root;
  179: 
  180: 	if (!r)
  181: 		return (void*) -1;
  182: 
  183: #if SUP_ENABLE == KQ_SUPPORT || SUP_ENABLE == EP_SUPPORT
  184: 	if (r->root_kq > 2) {
  185: 		close(r->root_kq);
  186: 		r->root_kq = 0;
  187: 	}
  188: #else
  189: 	r->root_kq ^= r->root_kq;
  190: #endif
  191: 
  192: 	FD_ZERO(&r->root_fds[2]);
  193: 	FD_ZERO(&r->root_fds[1]);
  194: 	FD_ZERO(&r->root_fds[0]);
  195: 
  196: 	return NULL;
  197: }
  198: 
  199: /*
  200:  * sched_hook_cancel() - Default CANCEL hook
  201:  *
  202:  * @task = current task
  203:  * @arg = unused
  204:  * return: <0 errors and 0 ok
  205:  */
  206: void *
  207: sched_hook_cancel(void *task, void *arg __unused)
  208: {
  209: 	sched_task_t *t = task, *tmp, *tt;
  210: 	sched_root_task_t *r = NULL;
  211: 	int flg = 0;
  212: #if SUP_ENABLE == KQ_SUPPORT
  213: 	struct kevent chg[1];
  214: 	struct timespec timeout = { 0, 0 };
  215: #elif SUP_ENABLE == EP_SUPPORT
  216: 	struct epoll_event ee = { .events = 0, .data.fd = 0 };
  217: #else
  218: 	register int i;
  219: #endif
  220: #ifdef AIO_SUPPORT
  221: 	struct aiocb *acb;
  222: #ifdef EVFILT_LIO
  223: 	register int i = 0;
  224: 	struct aiocb **acbs;
  225: #endif	/* EVFILT_LIO */
  226: #endif	/* AIO_SUPPORT */
  227: 
  228: 	if (!t || !TASK_ROOT(t))
  229: 		return (void*) -1;
  230: 	else
  231: 		r = TASK_ROOT(t);
  232: 
  233: 	switch (TASK_TYPE(t)) {
  234: 		case taskREAD:
  235: 			/* check for multi subscribers */
  236: 			TAILQ_FOREACH_SAFE(tt, &r->root_read, task_node, tmp)
  237: 				if (TASK_FD(tt) == TASK_FD(t))
  238: 					flg++;
  239: #if SUP_ENABLE == KQ_SUPPORT
  240: #ifdef __NetBSD__
  241: 			EV_SET(&chg[0], TASK_FD(t), EVFILT_READ, flg < 2 ? EV_DELETE : 0, 
  242: 					0, 0, (intptr_t) TASK_FD(t));
  243: #else
  244: 			EV_SET(&chg[0], TASK_FD(t), EVFILT_READ, flg < 2 ? EV_DELETE : 0, 
  245: 					0, 0, (void*) TASK_FD(t));
  246: #endif
  247: #elif SUP_ENABLE == EP_SUPPORT
  248: 			ee.data.fd = TASK_FD(t);
  249: 			ee.events ^= ee.events;
  250: 			if (FD_ISSET(TASK_FD(t), &r->root_fds[1]))
  251: 				ee.events |= EPOLLOUT;
  252: 
  253: 			if (flg < 2) {
  254: 				FD_CLR(TASK_FD(t), &r->root_fds[0]);
  255: 				FD_CLR(TASK_FD(t), &r->root_fds[2]);
  256: 			} else {
  257: 				if (FD_ISSET(TASK_FD(t), &r->root_fds[0]))
  258: 					ee.events |= EPOLLIN;
  259: 				if (FD_ISSET(TASK_FD(t), &r->root_fds[2]))
  260: 					ee.events |= EPOLLPRI;
  261: 			}
  262: #else
  263: 			if (flg < 2) {
  264: 				FD_CLR(TASK_FD(t), &r->root_fds[0]);
  265: 				FD_CLR(TASK_FD(t), &r->root_fds[2]);
  266: 
  267: 				/* optimize select */
  268: 				for (i = r->root_kq - 1; i >= 0; i--)
  269: 					if (FD_ISSET(i, &r->root_fds[0]) || 
  270: 							FD_ISSET(i, &r->root_fds[1]) || 
  271: 							FD_ISSET(i, &r->root_fds[2]))
  272: 						break;
  273: 				r->root_kq = i + 1;
  274: 			}
  275: #endif
  276: 			break;
  277: 		case taskWRITE:
  278: 			/* check for multi subscribers */
  279: 			TAILQ_FOREACH_SAFE(tt, &r->root_write, task_node, tmp)
  280: 				if (TASK_FD(tt) == TASK_FD(t))
  281: 					flg++;
  282: #if SUP_ENABLE == KQ_SUPPORT
  283: #ifdef __NetBSD__
  284: 			EV_SET(&chg[0], TASK_FD(t), EVFILT_WRITE, flg < 2 ? EV_DELETE : 0, 
  285: 					0, 0, (intptr_t) TASK_FD(t));
  286: #else
  287: 			EV_SET(&chg[0], TASK_FD(t), EVFILT_WRITE, flg < 2 ? EV_DELETE : 0, 
  288: 					0, 0, (void*) TASK_FD(t));
  289: #endif
  290: #elif SUP_ENABLE == EP_SUPPORT
  291: 			ee.data.fd = TASK_FD(t);
  292: 			ee.events ^= ee.events;
  293: 			if (FD_ISSET(TASK_FD(t), &r->root_fds[0]))
  294: 				ee.events |= EPOLLIN;
  295: 			if (FD_ISSET(TASK_FD(t), &r->root_fds[2]))
  296: 				ee.events |= EPOLLPRI;
  297: 
  298: 			if (flg < 2)
  299: 				FD_CLR(TASK_FD(t), &r->root_fds[1]);
  300: 			else
  301: 				ee.events |= EPOLLOUT;
  302: #else
  303: 			if (flg < 2) {
  304: 				FD_CLR(TASK_FD(t), &r->root_fds[1]);
  305: 
  306: 				/* optimize select */
  307: 				for (i = r->root_kq - 1; i >= 0; i--)
  308: 					if (FD_ISSET(i, &r->root_fds[0]) || 
  309: 							FD_ISSET(i, &r->root_fds[1]) || 
  310: 							FD_ISSET(i, &r->root_fds[2]))
  311: 						break;
  312: 				r->root_kq = i + 1;
  313: 			}
  314: #endif
  315: 			break;
  316: 		case taskALARM:
  317: #if SUP_ENABLE == KQ_SUPPORT
  318: 			/* check for multi subscribers */
  319: 			TAILQ_FOREACH_SAFE(tt, &r->root_alarm, task_node, tmp)
  320: 				if (TASK_DATA(tt) == TASK_DATA(t))
  321: 					flg++;
  322: #ifdef __NetBSD__
  323: 			EV_SET(&chg[0], (uintptr_t) TASK_DATA(t), EVFILT_TIMER, flg < 2 ? EV_DELETE : 0, 
  324: 					0, 0, (intptr_t) TASK_DATA(t));
  325: #else
  326: 			EV_SET(&chg[0], (uintptr_t) TASK_DATA(t), EVFILT_TIMER, flg < 2 ? EV_DELETE : 0, 
  327: 					0, 0, (void*) TASK_DATA(t));
  328: #endif
  329: #endif
  330: 			break;
  331: 		case taskNODE:
  332: #if SUP_ENABLE == KQ_SUPPORT
  333: 			/* check for multi subscribers */
  334: 			TAILQ_FOREACH_SAFE(tt, &r->root_node, task_node, tmp)
  335: 				if (TASK_FD(tt) == TASK_FD(t))
  336: 					flg++;
  337: #ifdef __NetBSD__
  338: 			EV_SET(&chg[0], TASK_FD(t), EVFILT_VNODE, flg < 2 ? EV_DELETE : 0, 
  339: 					0, 0, (intptr_t) TASK_FD(t));
  340: #else
  341: 			EV_SET(&chg[0], TASK_FD(t), EVFILT_VNODE, flg < 2 ? EV_DELETE : 0, 
  342: 					0, 0, (void*) TASK_FD(t));
  343: #endif
  344: #endif
  345: 			break;
  346: 		case taskPROC:
  347: #if SUP_ENABLE == KQ_SUPPORT
  348: 			/* check for multi subscribers */
  349: 			TAILQ_FOREACH_SAFE(tt, &r->root_proc, task_node, tmp)
  350: 				if (TASK_VAL(tt) == TASK_VAL(t))
  351: 					flg++;
  352: #ifdef __NetBSD__
  353: 			EV_SET(&chg[0], TASK_VAL(t), EVFILT_PROC, flg < 2 ? EV_DELETE : 0, 
  354: 					0, 0, (intptr_t) TASK_VAL(t));
  355: #else
  356: 			EV_SET(&chg[0], TASK_VAL(t), EVFILT_PROC, flg < 2 ? EV_DELETE : 0, 
  357: 					0, 0, (void*) TASK_VAL(t));
  358: #endif
  359: #endif
  360: 			break;
  361: 		case taskSIGNAL:
  362: #if SUP_ENABLE == KQ_SUPPORT
  363: 			/* check for multi subscribers */
  364: 			TAILQ_FOREACH_SAFE(tt, &r->root_signal, task_node, tmp)
  365: 				if (TASK_VAL(tt) == TASK_VAL(t))
  366: 					flg++;
  367: #ifdef __NetBSD__
  368: 			EV_SET(&chg[0], TASK_VAL(t), EVFILT_SIGNAL, flg < 2 ? EV_DELETE : 0, 
  369: 					0, 0, (intptr_t) TASK_VAL(t));
  370: #else
  371: 			EV_SET(&chg[0], TASK_VAL(t), EVFILT_SIGNAL, flg < 2 ? EV_DELETE : 0, 
  372: 					0, 0, (void*) TASK_VAL(t));
  373: #endif
  374: 			/* restore signal */
  375: 			if (flg < 2)
  376: 				signal(TASK_VAL(t), SIG_DFL);
  377: #endif
  378: 			break;
  379: #ifdef AIO_SUPPORT
  380: 		case taskAIO:
  381: #if SUP_ENABLE == KQ_SUPPORT
  382: 			/* check for multi subscribers */
  383: 			TAILQ_FOREACH_SAFE(tt, &r->root_aio, task_node, tmp)
  384: 				if (TASK_VAL(tt) == TASK_VAL(t))
  385: 					flg++;
  386: #ifdef __NetBSD__
  387: 			EV_SET(&chg[0], TASK_VAL(t), EVFILT_AIO, flg < 2 ? EV_DELETE : 0, 
  388: 					0, 0, (intptr_t) TASK_VAL(t));
  389: #else
  390: 			EV_SET(&chg[0], TASK_VAL(t), EVFILT_AIO, flg < 2 ? EV_DELETE : 0, 
  391: 					0, 0, (void*) TASK_VAL(t));
  392: #endif
  393: 			acb = (struct aiocb*) TASK_VAL(t);
  394: 			if (acb) {
  395: 				if (aio_cancel(acb->aio_fildes, acb) == AIO_CANCELED)
  396: 					aio_return(acb);
  397: 				e_free(acb);
  398: 				TASK_VAL(t) = 0;
  399: 			}
  400: #endif
  401: 			break;
  402: #ifdef EVFILT_LIO
  403: 		case taskLIO:
  404: #if SUP_ENABLE == KQ_SUPPORT
  405: 			/* check for multi subscribers */
  406: 			TAILQ_FOREACH_SAFE(tt, &r->root_lio, task_node, tmp)
  407: 				if (TASK_VAL(tt) == TASK_VAL(t))
  408: 					flg++;
  409: #ifdef __NetBSD__
  410: 			EV_SET(&chg[0], TASK_VAL(t), EVFILT_LIO, flg < 2 ? EV_DELETE : 0, 
  411: 					0, 0, (intptr_t) TASK_VAL(t));
  412: #else
  413: 			EV_SET(&chg[0], TASK_VAL(t), EVFILT_LIO, flg < 2 ? EV_DELETE : 0, 
  414: 					0, 0, (void*) TASK_VAL(t));
  415: #endif
  416: 			acbs = (struct aiocb**) TASK_VAL(t);
  417: 			if (acbs) {
  418: 				for (i = 0; i < TASK_DATLEN(t); i++) {
  419: 					if (aio_cancel(acbs[i]->aio_fildes, acbs[i]) == AIO_CANCELED)
  420: 						aio_return(acbs[i]);
  421: 					e_free(acbs[i]);
  422: 				}
  423: 				e_free(acbs);
  424: 				TASK_VAL(t) = 0;
  425: 			}
  426: #endif
  427: 			break;
  428: #endif	/* EVFILT_LIO */
  429: #endif	/* AIO_SUPPORT */
  430: #ifdef EVFILT_USER
  431: 		case taskUSER:
  432: #if SUP_ENABLE == KQ_SUPPORT
  433: 			/* check for multi subscribers */
  434: 			TAILQ_FOREACH_SAFE(tt, &r->root_user, task_node, tmp)
  435: 				if (TASK_VAL(tt) == TASK_VAL(t))
  436: 					flg++;
  437: #ifdef __NetBSD__
  438: 			EV_SET(&chg[0], TASK_VAL(t), EVFILT_USER, flg < 2 ? EV_DELETE : 0, 
  439: 					0, 0, (intptr_t) TASK_VAL(t));
  440: #else
  441: 			EV_SET(&chg[0], TASK_VAL(t), EVFILT_USER, flg < 2 ? EV_DELETE : 0, 
  442: 					0, 0, (void*) TASK_VAL(t));
  443: #endif
  444: #endif
  445: 			break;
  446: #endif	/* EVFILT_USER */
  447: 		case taskTHREAD:
  448: #ifdef HAVE_LIBPTHREAD
  449: 			if (TASK_VAL(t)) {
  450: 				pthread_cancel((pthread_t) TASK_VAL(t));
  451: 				pthread_join((pthread_t) TASK_VAL(t), NULL);
  452: 				if (TASK_VAL(t)) {
  453: 					transit_task2unuse(t, &(TASK_ROOT(t))->root_thread);
  454: 					TASK_VAL(t) = 0;
  455: 				}
  456: 			}
  457: #endif
  458: 			return NULL;
  459: #if defined(HAVE_LIBRT) && defined(HAVE_TIMER_CREATE) && \
  460: 	defined(HAVE_TIMER_SETTIME) && defined(HAVE_TIMER_DELETE)
  461: 		case taskRTC:
  462: 			timer_delete((timer_t) TASK_FLAG(t));
  463: #if SUP_ENABLE == KQ_SUPPORT
  464: 			schedCancel((sched_task_t*) TASK_RET(t));
  465: #else
  466: 			/* check for multi subscribers */
  467: 			TAILQ_FOREACH_SAFE(tt, &r->root_rtc, task_node, tmp)
  468: 				if (TASK_DATA(tt) == TASK_DATA(t))
  469: 					flg++;
  470: 
  471: 			/* restore signal */
  472: 			if (flg < 2)
  473: 				signal((intptr_t) TASK_DATA(t) + SIGRTMIN, SIG_DFL);
  474: #endif
  475: 			return NULL;
  476: #endif	/* HAVE_TIMER_CREATE */
  477: 		default:
  478: 			return NULL;
  479: 	}
  480: 
  481: #if SUP_ENABLE == KQ_SUPPORT
  482: 	kevent(r->root_kq, chg, 1, NULL, 0, &timeout);
  483: #elif SUP_ENABLE == EP_SUPPORT
  484: 	if (TASK_TYPE(t) == taskREAD || TASK_TYPE(t) == taskWRITE) {
  485: 		epoll_ctl(r->root_kq, ee.events ? EPOLL_CTL_MOD : EPOLL_CTL_DEL, ee.data.fd, &ee);
  486: 	}
  487: #endif
  488: 	return NULL;
  489: }
  490: 
  491: #ifdef HAVE_LIBPTHREAD
  492: /*
  493:  * sched_hook_thread() - Default THREAD hook
  494:  *
  495:  * @task = current task
  496:  * @arg = pthread attributes
  497:  * return: <0 errors and 0 ok
  498:  */
  499: void *
  500: sched_hook_thread(void *task, void *arg)
  501: {
  502: 	sched_task_t *t = task;
  503: 	pthread_t tid;
  504: 	sigset_t s, o;
  505: 
  506: 	if (!t || !TASK_ROOT(t))
  507: 		return (void*) -1;
  508: 
  509: 	sigfillset(&s);
  510: 	pthread_sigmask(SIG_BLOCK, &s, &o);
  511: 	errno = pthread_create(&tid, (pthread_attr_t*) arg, 
  512: 			(void *(*)(void*)) _sched_threadWrapper, t);
  513: 	pthread_sigmask(SIG_SETMASK, &o, NULL);
  514: 
  515: 	if (errno) {
  516: 		LOGERR;
  517: 		return (void*) -1;
  518: 	} else
  519: 		TASK_VAL(t) = (u_long) tid;
  520: 
  521: 	if (!TASK_ISLOCKED(t))
  522: 		TASK_LOCK(t);
  523: 
  524: 	return NULL;
  525: }
  526: #endif
  527: 
  528: /*
  529:  * sched_hook_read() - Default READ hook
  530:  *
  531:  * @task = current task
  532:  * @arg = unused
  533:  * return: <0 errors and 0 ok
  534:  */
  535: void *
  536: sched_hook_read(void *task, void *arg)
  537: {
  538: 	sched_task_t *t = task;
  539: 	sched_root_task_t *r = NULL;
  540: 	intptr_t mask = (intptr_t) arg;
  541: #if SUP_ENABLE == KQ_SUPPORT
  542: 	struct kevent chg[1];
  543: 	struct timespec timeout = { 0, 0 };
  544: #elif SUP_ENABLE == EP_SUPPORT
  545: 	struct epoll_event ee;
  546: 	int flg = 0;
  547: #endif
  548: 
  549: 	if (!t || !TASK_ROOT(t))
  550: 		return (void*) -1;
  551: 	else
  552: 		r = TASK_ROOT(t);
  553: 
  554: #if SUP_ENABLE == KQ_SUPPORT
  555: #ifdef __NetBSD__
  556: 	EV_SET(&chg[0], TASK_FD(t), EVFILT_READ, EV_ADD | EV_CLEAR, 0, 0, (intptr_t) TASK_FD(t));
  557: #else
  558: 	EV_SET(&chg[0], TASK_FD(t), EVFILT_READ, EV_ADD | EV_CLEAR, 0, 0, (void*) TASK_FD(t));
  559: #endif
  560: 	if (kevent(r->root_kq, chg, 1, NULL, 0, &timeout) == -1) {
  561: 		if (r->root_hooks.hook_exec.exception)
  562: 			r->root_hooks.hook_exec.exception(r, NULL);
  563: 		else
  564: 			LOGERR;
  565: 		return (void*) -1;
  566: 	}
  567: #elif SUP_ENABLE == EP_SUPPORT
  568: 	if (!mask)
  569: 		mask = EPOLLIN | EPOLLPRI;
  570: 	ee.data.fd = TASK_FD(t);
  571: 	ee.events = mask;
  572: 	if (FD_ISSET(TASK_FD(t), &r->root_fds[2])) {
  573: 		flg |= 4;
  574: 		ee.events |= EPOLLPRI;
  575: 	}
  576: 	if (FD_ISSET(TASK_FD(t), &r->root_fds[0])) {
  577: 		flg |= 1;
  578: 		ee.events |= EPOLLIN;
  579: 	}
  580: 	if (FD_ISSET(TASK_FD(t), &r->root_fds[1])) {
  581: 		flg |= 2;
  582: 		ee.events |= EPOLLOUT;
  583: 	}
  584: 
  585: 	if (epoll_ctl(r->root_kq, flg ? EPOLL_CTL_MOD : EPOLL_CTL_ADD, TASK_FD(t), &ee) == -1) {
  586: 		if (r->root_hooks.hook_exec.exception)
  587: 			r->root_hooks.hook_exec.exception(r, NULL);
  588: 		else
  589: 			LOGERR;
  590: 		return (void*) -1;
  591: 	} else {
  592: 		if (mask & EPOLLIN)
  593: 			FD_SET(TASK_FD(t), &r->root_fds[0]);
  594: 		if (mask & EPOLLPRI)
  595: 			FD_SET(TASK_FD(t), &r->root_fds[2]);
  596: 	}
  597: #else
  598: 	if (!mask) {
  599: 		FD_SET(TASK_FD(t), &r->root_fds[0]);
  600: 		FD_SET(TASK_FD(t), &r->root_fds[2]);
  601: 	} else {
  602: 		if (mask & 1)
  603: 			FD_SET(TASK_FD(t), &r->root_fds[0]);
  604: 		if (mask & 2)
  605: 			FD_SET(TASK_FD(t), &r->root_fds[2]);
  606: 	}
  607: 
  608: 	if (TASK_FD(t) >= r->root_kq)
  609: 		r->root_kq = TASK_FD(t) + 1;
  610: #endif
  611: 
  612: 	return NULL;
  613: }
  614: 
  615: /*
  616:  * sched_hook_write() - Default WRITE hook
  617:  *
  618:  * @task = current task
  619:  * @arg = unused
  620:  * return: <0 errors and 0 ok
  621:  */
  622: void *
  623: sched_hook_write(void *task, void *arg __unused)
  624: {
  625: 	sched_task_t *t = task;
  626: 	sched_root_task_t *r = NULL;
  627: #if SUP_ENABLE == KQ_SUPPORT
  628: 	struct kevent chg[1];
  629: 	struct timespec timeout = { 0, 0 };
  630: #elif SUP_ENABLE == EP_SUPPORT
  631: 	struct epoll_event ee;
  632: 	int flg = 0;
  633: #endif
  634: 
  635: 	if (!t || !TASK_ROOT(t))
  636: 		return (void*) -1;
  637: 	else
  638: 		r = TASK_ROOT(t);
  639: 
  640: #if SUP_ENABLE == KQ_SUPPORT
  641: #ifdef __NetBSD__
  642: 	EV_SET(&chg[0], TASK_FD(t), EVFILT_WRITE, EV_ADD | EV_CLEAR, 0, 0, (intptr_t) TASK_FD(t));
  643: #else
  644: 	EV_SET(&chg[0], TASK_FD(t), EVFILT_WRITE, EV_ADD | EV_CLEAR, 0, 0, (void*) TASK_FD(t));
  645: #endif
  646: 	if (kevent(r->root_kq, chg, 1, NULL, 0, &timeout) == -1) {
  647: 		if (r->root_hooks.hook_exec.exception)
  648: 			r->root_hooks.hook_exec.exception(r, NULL);
  649: 		else
  650: 			LOGERR;
  651: 		return (void*) -1;
  652: 	}
  653: #elif SUP_ENABLE == EP_SUPPORT
  654: 	ee.data.fd = TASK_FD(t);
  655: 	ee.events = EPOLLOUT;
  656: 
  657: 	if (FD_ISSET(TASK_FD(t), &r->root_fds[2])) {
  658: 		flg |= 4;
  659: 		ee.events |= EPOLLPRI;
  660: 	}
  661: 	if (FD_ISSET(TASK_FD(t), &r->root_fds[0])) {
  662: 		flg |= 1;
  663: 		ee.events |= EPOLLIN;
  664: 	}
  665: 	if (FD_ISSET(TASK_FD(t), &r->root_fds[1])) {
  666: 		flg |= 2;
  667: 	}
  668: 
  669: 	if (epoll_ctl(r->root_kq, flg ? EPOLL_CTL_MOD : EPOLL_CTL_ADD, TASK_FD(t), &ee) == -1) {
  670: 		if (r->root_hooks.hook_exec.exception)
  671: 			r->root_hooks.hook_exec.exception(r, NULL);
  672: 		else
  673: 			LOGERR;
  674: 		return (void*) -1;
  675: 	} else
  676: 		FD_SET(TASK_FD(t), &r->root_fds[1]);
  677: #else
  678: 	FD_SET(TASK_FD(t), &r->root_fds[1]);
  679: 
  680: 	if (TASK_FD(t) >= r->root_kq)
  681: 		r->root_kq = TASK_FD(t) + 1;
  682: #endif
  683: 
  684: 	return NULL;
  685: }
  686: 
  687: /*
  688:  * sched_hook_alarm() - Default ALARM hook
  689:  *
  690:  * @task = current task
  691:  * @arg = unused
  692:  * return: <0 errors and 0 ok
  693:  */
  694: void *
  695: sched_hook_alarm(void *task, void *arg __unused)
  696: {
  697: #if SUP_ENABLE == KQ_SUPPORT
  698: 	sched_task_t *t = task;
  699: 	struct kevent chg[1];
  700: 	struct timespec timeout = { 0, 0 };
  701: 
  702: 	if (!t || !TASK_ROOT(t))
  703: 		return (void*) -1;
  704: 
  705: #ifdef __NetBSD__
  706: 	EV_SET(&chg[0], (uintptr_t) TASK_DATA(t), EVFILT_TIMER, EV_ADD | EV_CLEAR, 0, 
  707: 			t->task_val.ts.tv_sec * 1000 + t->task_val.ts.tv_nsec / 1000000, 
  708: 			(intptr_t) TASK_DATA(t));
  709: #else
  710: 	EV_SET(&chg[0], (uintptr_t) TASK_DATA(t), EVFILT_TIMER, EV_ADD | EV_CLEAR, 0, 
  711: 			t->task_val.ts.tv_sec * 1000 + t->task_val.ts.tv_nsec / 1000000, 
  712: 			(void*) TASK_DATA(t));
  713: #endif
  714: 	if (kevent(TASK_ROOT(t)->root_kq, chg, 1, NULL, 0, &timeout) == -1) {
  715: 		if (TASK_ROOT(t)->root_hooks.hook_exec.exception)
  716: 			TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL);
  717: 		else
  718: 			LOGERR;
  719: 		return (void*) -1;
  720: 	}
  721: 
  722: #endif
  723: 	return NULL;
  724: }
  725: 
  726: /*
  727:  * sched_hook_node() - Default NODE hook
  728:  *
  729:  * @task = current task
  730:  * @arg = if arg == 42 then waiting for all events
  731:  * return: <0 errors and 0 ok
  732:  */
  733: void *
  734: sched_hook_node(void *task, void *arg)
  735: {
  736: #if SUP_ENABLE == KQ_SUPPORT
  737: 	sched_task_t *t = task;
  738: 	struct kevent chg[1];
  739: 	struct timespec timeout = { 0, 0 };
  740: 	u_int addflags = (u_int) (uintptr_t) arg;
  741: 
  742: 	if (!t || !TASK_ROOT(t))
  743: 		return (void*) -1;
  744: 
  745: #ifdef __NetBSD__
  746: 	EV_SET(&chg[0], TASK_FD(t), EVFILT_VNODE, EV_ADD | EV_CLEAR, 
  747: 			NOTE_DELETE | NOTE_WRITE | NOTE_EXTEND | NOTE_ATTRIB | 
  748: 			NOTE_LINK | NOTE_RENAME | NOTE_REVOKE | addflags, 0, (intptr_t) TASK_FD(t));
  749: #else
  750: 	EV_SET(&chg[0], TASK_FD(t), EVFILT_VNODE, EV_ADD | EV_CLEAR, 
  751: 			NOTE_DELETE | NOTE_WRITE | NOTE_EXTEND | NOTE_ATTRIB | 
  752: 			NOTE_LINK | NOTE_RENAME | NOTE_REVOKE | addflags, 0, (void*) TASK_FD(t));
  753: #endif
  754: 	if (kevent(TASK_ROOT(t)->root_kq, chg, 1, NULL, 0, &timeout) == -1) {
  755: 		if (TASK_ROOT(t)->root_hooks.hook_exec.exception)
  756: 			TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL);
  757: 		else
  758: 			LOGERR;
  759: 		return (void*) -1;
  760: 	}
  761: 
  762: #endif
  763: 	return NULL;
  764: }
  765: 
  766: /*
  767:  * sched_hook_proc() - Default PROC hook
  768:  *
  769:  * @task = current task
  770:  * @arg = unused
  771:  * return: <0 errors and 0 ok
  772:  */
  773: void *
  774: sched_hook_proc(void *task, void *arg __unused)
  775: {
  776: #if SUP_ENABLE == KQ_SUPPORT
  777: 	sched_task_t *t = task;
  778: 	struct kevent chg[1];
  779: 	struct timespec timeout = { 0, 0 };
  780: 
  781: 	if (!t || !TASK_ROOT(t))
  782: 		return (void*) -1;
  783: 
  784: #ifdef __NetBSD__
  785: 	EV_SET(&chg[0], TASK_VAL(t), EVFILT_PROC, EV_ADD | EV_CLEAR, 
  786: 			NOTE_EXIT | NOTE_FORK | NOTE_EXEC | NOTE_TRACK, 0, (intptr_t) TASK_VAL(t));
  787: #else
  788: 	EV_SET(&chg[0], TASK_VAL(t), EVFILT_PROC, EV_ADD | EV_CLEAR, 
  789: 			NOTE_EXIT | NOTE_FORK | NOTE_EXEC | NOTE_TRACK, 0, (void*) TASK_VAL(t));
  790: #endif
  791: 	if (kevent(TASK_ROOT(t)->root_kq, chg, 1, NULL, 0, &timeout) == -1) {
  792: 		if (TASK_ROOT(t)->root_hooks.hook_exec.exception)
  793: 			TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL);
  794: 		else
  795: 			LOGERR;
  796: 		return (void*) -1;
  797: 	}
  798: 
  799: #endif
  800: 	return NULL;
  801: }
  802: 
  803: /*
  804:  * sched_hook_signal() - Default SIGNAL hook
  805:  *
  806:  * @task = current task
  807:  * @arg = unused
  808:  * return: <0 errors and 0 ok
  809:  */
  810: void *
  811: sched_hook_signal(void *task, void *arg __unused)
  812: {
  813: #if SUP_ENABLE == KQ_SUPPORT
  814: 	sched_task_t *t = task;
  815: 	struct kevent chg[1];
  816: 	struct timespec timeout = { 0, 0 };
  817: 
  818: 	if (!t || !TASK_ROOT(t))
  819: 		return (void*) -1;
  820: 
  821: 	/* ignore signal */
  822: 	signal(TASK_VAL(t), SIG_IGN);
  823: 
  824: #ifdef __NetBSD__
  825: 	EV_SET(&chg[0], TASK_VAL(t), EVFILT_SIGNAL, EV_ADD | EV_CLEAR, 0, 0, (intptr_t) TASK_VAL(t));
  826: #else
  827: 	EV_SET(&chg[0], TASK_VAL(t), EVFILT_SIGNAL, EV_ADD | EV_CLEAR, 0, 0, (void*) TASK_VAL(t));
  828: #endif
  829: 	if (kevent(TASK_ROOT(t)->root_kq, chg, 1, NULL, 0, &timeout) == -1) {
  830: 		if (TASK_ROOT(t)->root_hooks.hook_exec.exception)
  831: 			TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL);
  832: 		else
  833: 			LOGERR;
  834: 		return (void*) -1;
  835: 	}
  836: #endif
  837: 	return NULL;
  838: }
  839: 
  840: /*
  841:  * sched_hook_user() - Default USER hook
  842:  *
  843:  * @task = current task
  844:  * @arg = unused
  845:  * return: <0 errors and 0 ok
  846:  */
  847: #ifdef EVFILT_USER
  848: void *
  849: sched_hook_user(void *task, void *arg __unused)
  850: {
  851: #if SUP_ENABLE == KQ_SUPPORT
  852: 	sched_task_t *t = task;
  853: 	struct kevent chg[1];
  854: 	struct timespec timeout = { 0, 0 };
  855: 
  856: 	if (!t || !TASK_ROOT(t))
  857: 		return (void*) -1;
  858: 
  859: #ifdef __NetBSD__
  860: 	EV_SET(&chg[0], TASK_VAL(t), EVFILT_USER, EV_ADD | EV_CLEAR, TASK_DATLEN(t), 
  861: 			0, (intptr_t) TASK_VAL(t));
  862: #else
  863: 	EV_SET(&chg[0], TASK_VAL(t), EVFILT_USER, EV_ADD | EV_CLEAR, TASK_DATLEN(t), 
  864: 			0, (void*) TASK_VAL(t));
  865: #endif
  866: 	if (kevent(TASK_ROOT(t)->root_kq, chg, 1, NULL, 0, &timeout) == -1) {
  867: 		if (TASK_ROOT(t)->root_hooks.hook_exec.exception)
  868: 			TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL);
  869: 		else
  870: 			LOGERR;
  871: 		return (void*) -1;
  872: 	}
  873: 
  874: #endif
  875: 	return NULL;
  876: }
  877: #endif
  878: 
  879: #if SUP_ENABLE == KQ_SUPPORT
  880: static inline void 
  881: fetch_hook_kevent_proceed(int en, struct kevent *res, sched_root_task_t *r)
  882: {
  883: 	struct kevent evt[1];
  884: 	register int i, flg;
  885: 	sched_task_t *task, *tmp;
  886: 	struct timespec now = { 0, 0 };
  887: #ifdef AIO_SUPPORT
  888: 	int len, fd;
  889: 	struct aiocb *acb;
  890: #ifdef EVFILT_LIO
  891: 	int l;
  892: 	off_t off;
  893: 	struct aiocb **acbs;
  894: 	struct iovec *iv;
  895: #endif	/* EVFILT_LIO */
  896: #endif	/* AIO_SUPPORT */
  897: 
  898: 	for (i = 0; i < en; i++) {
  899: 		memcpy(evt, &res[i], sizeof evt);
  900: 		evt->flags = EV_DELETE;
  901: 		/* Put read/write task to ready queue */
  902: 		flg = 0;
  903: 		switch (res[i].filter) {
  904: 			case EVFILT_READ:
  905: 				TAILQ_FOREACH_SAFE(task, &r->root_read, task_node, tmp) {
  906: 					if (TASK_FD(task) == ((intptr_t) res[i].udata)) {
  907: 						if (!flg) {
  908: 							TASK_RET(task) = res[i].data;
  909: 							TASK_FLAG(task) = (u_long) res[i].fflags;
  910: 
  911: 							/* remove read handle */
  912: 							remove_task_from(task, &r->root_read);
  913: 
  914: 							if (r->root_hooks.hook_exec.exception && res[i].flags & EV_EOF) {
  915: 								if (r->root_hooks.hook_exec.exception(r, (void*) EV_EOF)) {
  916: 									task->task_type = taskUNUSE;
  917: 									insert_task_to(task, &r->root_unuse);
  918: 								} else {
  919: 									task->task_type = taskREADY;
  920: 									insert_task_to(task, &r->root_ready);
  921: 								}
  922: 							} else {
  923: 								task->task_type = taskREADY;
  924: 								insert_task_to(task, &r->root_ready);
  925: 							}
  926: 						}
  927: 						flg++;
  928: 					}
  929: 				}
  930: 				break;
  931: 			case EVFILT_WRITE:
  932: 				TAILQ_FOREACH_SAFE(task, &r->root_write, task_node, tmp) {
  933: 					if (TASK_FD(task) == ((intptr_t) res[i].udata)) {
  934: 						if (!flg) {
  935: 							TASK_RET(task) = res[i].data;
  936: 							TASK_FLAG(task) = (u_long) res[i].fflags;
  937: 
  938: 							/* remove write handle */
  939: 							remove_task_from(task, &r->root_write);
  940: 
  941: 							if (r->root_hooks.hook_exec.exception && res[i].flags & EV_EOF) {
  942: 								if (r->root_hooks.hook_exec.exception(r, (void*) EV_EOF)) {
  943: 									task->task_type = taskUNUSE;
  944: 									insert_task_to(task, &r->root_unuse);
  945: 								} else {
  946: 									task->task_type = taskREADY;
  947: 									insert_task_to(task, &r->root_ready);
  948: 								}
  949: 							} else {
  950: 								task->task_type = taskREADY;
  951: 								insert_task_to(task, &r->root_ready);
  952: 							}
  953: 						}
  954: 						flg++;
  955: 					}
  956: 				}
  957: 				break;
  958: 			case EVFILT_TIMER:
  959: 				TAILQ_FOREACH_SAFE(task, &r->root_alarm, task_node, tmp) {
  960: 					if ((uintptr_t) TASK_DATA(task) == ((uintptr_t) res[i].udata)) {
  961: 						if (!flg) {
  962: 							TASK_RET(task) = res[i].data;
  963: 							TASK_FLAG(task) = (u_long) res[i].fflags;
  964: 
  965: 							/* remove alarm handle */
  966: 							transit_task2ready(task, &r->root_alarm);
  967: 						}
  968: 						flg++;
  969: 					}
  970: 				}
  971: 				break;
  972: 			case EVFILT_VNODE:
  973: 				TAILQ_FOREACH_SAFE(task, &r->root_node, task_node, tmp) {
  974: 					if (TASK_FD(task) == ((intptr_t) res[i].udata)) {
  975: 						if (!flg) {
  976: 							TASK_RET(task) = res[i].data;
  977: 							TASK_FLAG(task) = (u_long) res[i].fflags;
  978: 
  979: 							/* remove node handle */
  980: 							transit_task2ready(task, &r->root_node);
  981: 						}
  982: 						flg++;
  983: 					}
  984: 				}
  985: 				break;
  986: 			case EVFILT_PROC:
  987: 				TAILQ_FOREACH_SAFE(task, &r->root_proc, task_node, tmp) {
  988: 					if (TASK_VAL(task) == ((uintptr_t) res[i].udata)) {
  989: 						if (!flg) {
  990: 							TASK_RET(task) = res[i].data;
  991: 							TASK_FLAG(task) = (u_long) res[i].fflags;
  992: 
  993: 							/* remove proc handle */
  994: 							transit_task2ready(task, &r->root_proc);
  995: 						}
  996: 						flg++;
  997: 					}
  998: 				}
  999: 				break;
 1000: 			case EVFILT_SIGNAL:
 1001: 				TAILQ_FOREACH_SAFE(task, &r->root_signal, task_node, tmp) {
 1002: 					if (TASK_VAL(task) == ((uintptr_t) res[i].udata)) {
 1003: 						if (!flg) {
 1004: 							TASK_RET(task) = res[i].data;
 1005: 							TASK_FLAG(task) = (u_long) res[i].fflags;
 1006: 
 1007: 							/* remove signal handle */
 1008: 							transit_task2ready(task, &r->root_signal);
 1009: 						}
 1010: 						flg++;
 1011: 					}
 1012: 				}
 1013: 				break;
 1014: #ifdef AIO_SUPPORT
 1015: 			case EVFILT_AIO:
 1016: 				TAILQ_FOREACH_SAFE(task, &r->root_aio, task_node, tmp) {
 1017: 					acb = (struct aiocb*) TASK_VAL(task);
 1018: 					if (acb == ((struct aiocb*) res[i].udata)) {
 1019: 						if (!flg) {
 1020: 							TASK_RET(task) = res[i].data;
 1021: 							TASK_FLAG(task) = (u_long) res[i].fflags;
 1022: 
 1023: 							/* remove user handle */
 1024: 							transit_task2ready(task, &r->root_aio);
 1025: 
 1026: 							fd = acb->aio_fildes;
 1027: 							if ((len = aio_return(acb)) != -1) {
 1028: 								if (lseek(fd, acb->aio_offset + len, SEEK_CUR) == -1)
 1029: 									LOGERR;
 1030: 							} else
 1031: 								LOGERR;
 1032: 							e_free(acb);
 1033: 							TASK_DATLEN(task) = (u_long) len;
 1034: 							TASK_FD(task) = fd;
 1035: 						}
 1036: 						flg++;
 1037: 					}
 1038: 				}
 1039: 				break;
 1040: #ifdef EVFILT_LIO
 1041: 			case EVFILT_LIO:
 1042: 				TAILQ_FOREACH_SAFE(task, &r->root_lio, task_node, tmp) {
 1043: 					acbs = (struct aiocb**) TASK_VAL(task);
 1044: 					if (acbs == ((struct aiocb**) res[i].udata)) {
 1045: 						if (!flg) {
 1046: 							TASK_RET(task) = res[i].data;
 1047: 							TASK_FLAG(task) = (u_long) res[i].fflags;
 1048: 
 1049: 							/* remove user handle */
 1050: 							transit_task2ready(task, &r->root_lio);
 1051: 
 1052: 							iv = (struct iovec*) TASK_DATA(task);
 1053: 							fd = acbs[0]->aio_fildes;
 1054: 							off = acbs[0]->aio_offset;
 1055: 							for (len = 0; i < TASK_DATLEN(task); len += l, i++) {
 1056: 								if ((iv[i].iov_len = aio_return(acbs[i])) == -1)
 1057: 									l = 0;
 1058: 								else
 1059: 									l = iv[i].iov_len;
 1060: 								e_free(acbs[i]);
 1061: 							}
 1062: 							e_free(acbs);
 1063: 							TASK_DATLEN(task) = (u_long) len;
 1064: 							TASK_FD(task) = fd;
 1065: 
 1066: 							if (lseek(fd, off + len, SEEK_CUR) == -1)
 1067: 								LOGERR;
 1068: 						}
 1069: 						flg++;
 1070: 					}
 1071: 				}
 1072: 				break;
 1073: #endif	/* EVFILT_LIO */
 1074: #endif	/* AIO_SUPPORT */
 1075: #ifdef EVFILT_USER
 1076: 			case EVFILT_USER:
 1077: 				TAILQ_FOREACH_SAFE(task, &r->root_user, task_node, tmp) {
 1078: 					if (TASK_VAL(task) == ((uintptr_t) res[i].udata)) {
 1079: 						if (!flg) {
 1080: 							TASK_RET(task) = res[i].data;
 1081: 							TASK_FLAG(task) = (u_long) res[i].fflags;
 1082: 
 1083: 							/* remove user handle */
 1084: 							transit_task2ready(task, &r->root_user);
 1085: 						}
 1086: 						flg++;
 1087: 					}
 1088: 				}
 1089: 				break;
 1090: #endif	/* EVFILT_USER */
 1091: 		}
 1092: 
 1093: 		if (flg > 1)
 1094: 			evt->flags &= ~EV_DELETE;
 1095: 
 1096: 		if (kevent(r->root_kq, evt, 1, NULL, 0, &now) == -1) {
 1097: 			if (r->root_hooks.hook_exec.exception)
 1098: 				r->root_hooks.hook_exec.exception(r, NULL);
 1099: 			else
 1100: 				LOGERR;
 1101: 		}
 1102: 	}
 1103: }
 1104: #endif
 1105: 
 1106: #if SUP_ENABLE == EP_SUPPORT
 1107: static inline void
 1108: fetch_hook_epoll_proceed(int en, struct epoll_event *res, sched_root_task_t *r)
 1109: {
 1110: 	register int i, rflg, wflg;
 1111: 	int ops = EPOLL_CTL_DEL;
 1112: 	sched_task_t *t, *tmp, *task;
 1113: 	struct epoll_event evt[1];
 1114: 
 1115: 	for (i = 0; i < en; i++) {
 1116: 		memcpy(evt, &res[i], sizeof evt);
 1117: 		evt->events ^= evt->events;
 1118: 		rflg = wflg = 0;
 1119: 
 1120: 		if (res[i].events & (EPOLLIN | EPOLLPRI)) {
 1121: 			task = NULL;
 1122: 			TAILQ_FOREACH_SAFE(t, &r->root_read, task_node, tmp) {
 1123: 				if (TASK_FD(t) == evt->data.fd) {
 1124: 					if (!task)
 1125: 						task = t;
 1126: 					rflg++;
 1127: 				}
 1128: 			}
 1129: 
 1130: 			if (task) {
 1131: 				TASK_FLAG(task) = ioctl(TASK_FD(task), FIONREAD, &TASK_RET(task));
 1132: 				/* remove read handle */
 1133: 				remove_task_from(task, &r->root_read);
 1134: 
 1135: 				if (r->root_hooks.hook_exec.exception && evt->events & (EPOLLRDHUP | EPOLLERR | EPOLLHUP)) {
 1136:  					if (r->root_hooks.hook_exec.exception(r, (void*) (intptr_t)
 1137: 								(evt->events & EPOLLERR ? EV_ERROR | EV_EOF : EV_EOF))) {
 1138: 						task->task_type = taskUNUSE;
 1139: 						insert_task_to(task, &r->root_unuse);
 1140: 					} else {
 1141: 						task->task_type = taskREADY;
 1142: 						insert_task_to(task, &r->root_ready);
 1143: 					}
 1144: 				} else {
 1145: 					task->task_type = taskREADY;
 1146: 					insert_task_to(task, &r->root_ready);
 1147: 				}
 1148: 
 1149: 				if (!(res[i].events & EPOLLOUT) && FD_ISSET(evt->data.fd, &r->root_fds[1])) {
 1150: 					evt->events |= EPOLLOUT;
 1151: 					wflg = 42;
 1152: 				}
 1153: 				if (rflg > 1) {
 1154: 					if (FD_ISSET(evt->data.fd, &r->root_fds[0]))
 1155: 						evt->events |= EPOLLIN;
 1156: 					if (FD_ISSET(evt->data.fd, &r->root_fds[2]))
 1157: 						evt->events |= EPOLLPRI;
 1158: 				} else {
 1159: 					FD_CLR(evt->data.fd, &r->root_fds[0]);
 1160: 					FD_CLR(evt->data.fd, &r->root_fds[2]);
 1161: 				}
 1162: 			}
 1163: 		}
 1164: 		if (res[i].events & EPOLLOUT) {
 1165: 			task = NULL;
 1166: 			TAILQ_FOREACH_SAFE(t, &r->root_write, task_node, tmp) {
 1167: 				if (TASK_FD(t) == evt->data.fd) {
 1168: 					if (!task)
 1169: 						task = t;
 1170: 					wflg++;
 1171: 				}
 1172: 			}
 1173: 
 1174: 			if (task) {
 1175: 				TASK_FLAG(task) = ioctl(TASK_FD(task), FIONWRITE, &TASK_RET(task));
 1176: 				/* remove write handle */
 1177: 				remove_task_from(task, &r->root_write);
 1178: 
 1179: 				if (r->root_hooks.hook_exec.exception && evt->events & (EPOLLERR | EPOLLHUP)) {
 1180:  					if (r->root_hooks.hook_exec.exception(r, (void*) (intptr_t)
 1181: 								(evt->events & EPOLLERR ? EV_ERROR | EV_EOF : EV_EOF))) {
 1182: 						task->task_type = taskUNUSE;
 1183: 						insert_task_to(task, &r->root_unuse);
 1184: 					} else {
 1185: 						task->task_type = taskREADY;
 1186: 						insert_task_to(task, &r->root_ready);
 1187: 					}
 1188: 				} else {
 1189: 					task->task_type = taskREADY;
 1190: 					insert_task_to(task, &r->root_ready);
 1191: 				}
 1192: 
 1193: 				if (!(res[i].events & EPOLLIN) && FD_ISSET(evt->data.fd, &r->root_fds[0])) {
 1194: 					evt->events |= EPOLLIN;
 1195: 					rflg = 42;
 1196: 				}
 1197: 				if (!(res[i].events & EPOLLPRI) && FD_ISSET(evt->data.fd, &r->root_fds[2])) {
 1198: 					evt->events |= EPOLLPRI;
 1199: 					rflg = 42;
 1200: 				}
 1201: 				if (wflg > 1)
 1202: 					evt->events |= EPOLLOUT;
 1203: 				else
 1204: 					FD_CLR(evt->data.fd, &r->root_fds[1]);
 1205: 			}
 1206: 		}
 1207: 
 1208: 		if (rflg > 1 || wflg > 1)
 1209: 			ops = EPOLL_CTL_MOD;
 1210: 
 1211: 		if (epoll_ctl(r->root_kq, ops, evt->data.fd, evt) == -1) {
 1212: 			if (r->root_hooks.hook_exec.exception) {
 1213: 				r->root_hooks.hook_exec.exception(r, NULL);
 1214: 			} else
 1215: 				LOGERR;
 1216: 		}
 1217: 	}
 1218: }
 1219: #endif
 1220: 
 1221: #if SUP_ENABLE == NO_SUPPORT
 1222: static inline void 
 1223: fetch_hook_select_proceed(int en, fd_set rfd, fd_set wfd, fd_set xfd, sched_root_task_t *r)
 1224: {
 1225: 	register int i, rflg, wflg;
 1226: 	sched_task_t *t, *tmp, *task;
 1227: 
 1228: 	/* skip select check if return value from select is zero */
 1229: 	if (!en)
 1230: 		return;
 1231: 
 1232: 	for (i = 0; i < r->root_kq; i++) {
 1233: 		if (!FD_ISSET(i, &r->root_fds[0]) && 
 1234: 				!FD_ISSET(i, &r->root_fds[1]) && 
 1235: 				!FD_ISSET(i, &r->root_fds[2]))
 1236: 			continue;
 1237: 
 1238: 		rflg = wflg = 0;
 1239: 
 1240: 		if (FD_ISSET(i, &rfd) || FD_ISSET(i, &xfd)) {
 1241: 			task = NULL;
 1242: 			TAILQ_FOREACH_SAFE(t, &r->root_read, task_node, tmp) {
 1243: 				if (TASK_FD(t) == i) {
 1244: 					if (!task)
 1245: 						task = t;
 1246: 					rflg++;
 1247: 				}
 1248: 			}
 1249: 
 1250: 			if (task) {
 1251: 				TASK_FLAG(task) = ioctl(TASK_FD(task), FIONREAD, &TASK_RET(task));
 1252: 
 1253: 				/* remove read handle */
 1254: 				remove_task_from(task, &r->root_read);
 1255: 
 1256: 				if (r->root_hooks.hook_exec.exception) {
 1257:  					if (r->root_hooks.hook_exec.exception(r, NULL)) {
 1258: 						task->task_type = taskUNUSE;
 1259: 						insert_task_to(task, &r->root_unuse);
 1260: 					} else {
 1261: 						task->task_type = taskREADY;
 1262: 						insert_task_to(task, &r->root_ready);
 1263: 					}
 1264: 				} else {
 1265: 					task->task_type = taskREADY;
 1266: 					insert_task_to(task, &r->root_ready);
 1267: 				}
 1268: 
 1269: 				/* remove resouce */
 1270: 				if (rflg == 1) {
 1271: 					FD_CLR(i, &r->root_fds[0]);
 1272: 					FD_CLR(i, &r->root_fds[2]);
 1273: 				}
 1274: 			}
 1275: 		}
 1276: 		if (FD_ISSET(i, &wfd)) {
 1277: 			task = NULL;
 1278: 			TAILQ_FOREACH_SAFE(t, &r->root_write, task_node, tmp) {
 1279: 				if (TASK_FD(t) == i) {
 1280: 					if (!task)
 1281: 						task = t;
 1282: 					wflg++;
 1283: 				}
 1284: 			}
 1285: 
 1286: 			if (task) {
 1287: 				TASK_FLAG(task) = ioctl(TASK_FD(task), FIONWRITE, &TASK_RET(task));
 1288: 
 1289: 				/* remove write handle */
 1290: 				remove_task_from(task, &r->root_write);
 1291: 
 1292: 				if (r->root_hooks.hook_exec.exception) {
 1293:  					if (r->root_hooks.hook_exec.exception(r, NULL)) {
 1294: 						task->task_type = taskUNUSE;
 1295: 						insert_task_to(task, &r->root_unuse);
 1296: 					} else {
 1297: 						task->task_type = taskREADY;
 1298: 						insert_task_to(task, &r->root_ready);
 1299: 					}
 1300: 				} else {
 1301: 					task->task_type = taskREADY;
 1302: 					insert_task_to(task, &r->root_ready);
 1303: 				}
 1304: 
 1305: 				/* remove resouce */
 1306: 				if (wflg == 1)
 1307: 					FD_CLR(i, &r->root_fds[1]);
 1308: 			}
 1309: 		}
 1310: 	}
 1311: 
 1312: 	/* optimize select */
 1313: 	for (i = r->root_kq - 1; i >= 0; i--)
 1314: 		if (FD_ISSET(i, &r->root_fds[0]) || 
 1315: 				FD_ISSET(i, &r->root_fds[1]) || 
 1316: 				FD_ISSET(i, &r->root_fds[2]))
 1317: 			break;
 1318: 	r->root_kq = i + 1;
 1319: }
 1320: #endif
 1321: 
 1322: /*
 1323:  * sched_hook_fetch() - Default FETCH hook
 1324:  *
 1325:  * @root = root task
 1326:  * @arg = unused
 1327:  * return: NULL error or !=NULL fetched task
 1328:  */
 1329: void *
 1330: sched_hook_fetch(void *root, void *arg __unused)
 1331: {
 1332: 	sched_root_task_t *r = root;
 1333: 	sched_task_t *task, *tmp;
 1334: 	struct timespec now, m, mtmp;
 1335: #if SUP_ENABLE == KQ_SUPPORT
 1336: 	struct kevent res[KQ_EVENTS];
 1337: 	struct timespec *timeout;
 1338: #elif SUP_ENABLE == EP_SUPPORT
 1339: 	struct epoll_event res[KQ_EVENTS];
 1340: 	u_long timeout = 0;
 1341: #else
 1342: 	struct timeval *timeout, tv;
 1343: 	fd_set rfd, wfd, xfd;
 1344: #endif
 1345: 	int en;
 1346: 
 1347: 	if (!r)
 1348: 		return NULL;
 1349: 
 1350: 	/* get new task by queue priority */
 1351: 	while ((task = TAILQ_FIRST(&r->root_event))) {
 1352: 		transit_task2unuse(task, &r->root_event);
 1353: 		return task;
 1354: 	}
 1355: 	while ((task = TAILQ_FIRST(&r->root_ready))) {
 1356: 		transit_task2unuse(task, &r->root_ready);
 1357: 		return task;
 1358: 	}
 1359: 
 1360: 	/* if present member of task, set NOWAIT */
 1361: 	if (!TAILQ_FIRST(&r->root_task)) {
 1362: 		/* timer tasks */
 1363: #ifdef TIMER_WITHOUT_SORT
 1364: 		clock_gettime(CLOCK_MONOTONIC, &now);
 1365: 
 1366: 		sched_timespecclear(&r->root_wait);
 1367: 		TAILQ_FOREACH(task, &r->root_timer, task_node) {
 1368: 			if (!sched_timespecisset(&r->root_wait))
 1369: 				r->root_wait = TASK_TS(task);
 1370: 			else if (sched_timespeccmp(&TASK_TS(task), &r->root_wait, -) < 0)
 1371: 				r->root_wait = TASK_TS(task);
 1372: 		}
 1373: 
 1374: 		if (TAILQ_FIRST(&r->root_timer)) {
 1375: 			m = r->root_wait;
 1376: 			sched_timespecsub(&m, &now, &mtmp);
 1377: 			r->root_wait = mtmp;
 1378: 		} else {
 1379: 			/* set wait INFTIM */
 1380: 			sched_timespecinf(&r->root_wait);
 1381: 		}
 1382: #else	/* ! TIMER_WITHOUT_SORT */
 1383: 		if ((task = TAILQ_FIRST(&r->root_timer))) {
 1384: 			clock_gettime(CLOCK_MONOTONIC, &now);
 1385: 
 1386: 			m = TASK_TS(task);
 1387: 			sched_timespecsub(&m, &now, &mtmp);
 1388: 			r->root_wait = mtmp;
 1389: 		} else {
 1390: 			/* set wait INFTIM */
 1391: 			sched_timespecinf(&r->root_wait);
 1392: 		}
 1393: #endif	/* TIMER_WITHOUT_SORT */
 1394: 	} else	/* no waiting for event, because we have ready task */
 1395: 		sched_timespecclear(&r->root_wait);
 1396: 
 1397: 	if (r->root_wait.tv_sec != -1 && r->root_wait.tv_nsec != -1) {
 1398: #if SUP_ENABLE == KQ_SUPPORT
 1399: 		timeout = &r->root_wait;
 1400: #elif SUP_ENABLE == EP_SUPPORT
 1401: 		timeout = r->root_wait.tv_sec * 1000 + r->root_wait.tv_nsec / 1000000;
 1402: #else
 1403: 		sched_timespec2val(&r->root_wait, &tv);
 1404: 		timeout = &tv;
 1405: #endif	/* KQ_SUPPORT */
 1406: 	} else if (sched_timespecisinf(&r->root_poll))
 1407: #if SUP_ENABLE == EP_SUPPORT
 1408: 		timeout = -1;
 1409: #else
 1410: 		timeout = NULL;
 1411: #endif
 1412: 	else {
 1413: #if SUP_ENABLE == KQ_SUPPORT
 1414: 		timeout = &r->root_poll;
 1415: #elif SUP_ENABLE == EP_SUPPORT
 1416: 		timeout = r->root_poll.tv_sec * 1000 + r->root_poll.tv_nsec / 1000000;
 1417: #else
 1418: 		sched_timespec2val(&r->root_poll, &tv);
 1419: 		timeout = &tv;
 1420: #endif	/* KQ_SUPPORT */
 1421: 	}
 1422: 
 1423: #if SUP_ENABLE == KQ_SUPPORT
 1424: 	if ((en = kevent(r->root_kq, NULL, 0, res, KQ_EVENTS, timeout)) == -1) {
 1425: #elif SUP_ENABLE == EP_SUPPORT
 1426: 	if ((en = epoll_wait(r->root_kq, res, KQ_EVENTS, timeout)) == -1) {
 1427: #else
 1428: 	xfd = r->root_fds[2];
 1429: 	rfd = r->root_fds[0];
 1430: 	wfd = r->root_fds[1];
 1431: 	if ((en = select(r->root_kq, &rfd, &wfd, &xfd, timeout)) == -1) {
 1432: #endif	/* KQ_SUPPORT */
 1433: 		if (r->root_hooks.hook_exec.exception) {
 1434: 			if (r->root_hooks.hook_exec.exception(r, NULL))
 1435: 				return NULL;
 1436: 		} else if (errno != EINTR)
 1437: 			LOGERR;
 1438: 		goto skip_event;
 1439: 	}
 1440: 
 1441: 	/* Go and catch the cat into pipes ... */
 1442: #if SUP_ENABLE == KQ_SUPPORT
 1443: 	/* kevent dispatcher */
 1444: 	fetch_hook_kevent_proceed(en, res, r);
 1445: #elif SUP_ENABLE == EP_SUPPORT
 1446: 	/* epoll dispatcher */
 1447: 	fetch_hook_epoll_proceed(en, res, r);
 1448: #else
 1449: 	/* select dispatcher */
 1450: 	fetch_hook_select_proceed(en, rfd, wfd, xfd, r);
 1451: #endif	/* KQ_SUPPORT */
 1452: 
 1453: skip_event:
 1454: 	/* timer update & put in ready queue */
 1455: 	clock_gettime(CLOCK_MONOTONIC, &now);
 1456: 
 1457: 	TAILQ_FOREACH_SAFE(task, &r->root_timer, task_node, tmp)
 1458: 		if (sched_timespeccmp(&now, &TASK_TS(task), -) >= 0)
 1459: 			transit_task2ready(task, &r->root_timer);
 1460: 
 1461: 	/* put regular task priority task to ready queue, 
 1462: 		if there is no ready task or reach max missing hit for regular task */
 1463: 	if ((task = TAILQ_FIRST(&r->root_task))) {
 1464: 		if (!TAILQ_FIRST(&r->root_ready) || r->root_miss >= TASK_VAL(task)) {
 1465: 			r->root_miss ^= r->root_miss;
 1466: 
 1467: 			transit_task2ready(task, &r->root_task);
 1468: 		} else
 1469: 			r->root_miss++;
 1470: 	} else
 1471: 		r->root_miss ^= r->root_miss;
 1472: 
 1473: 	/* OK, lets get ready task !!! */
 1474: 	task = TAILQ_FIRST(&r->root_ready);
 1475: 	if (task)
 1476: 		transit_task2unuse(task, &r->root_ready);
 1477: 	return task;
 1478: }
 1479: 
 1480: /*
 1481:  * sched_hook_exception() - Default EXCEPTION hook
 1482:  *
 1483:  * @root = root task
 1484:  * @arg = custom handling: if arg == EV_EOF or other value; default: arg == NULL log errno
 1485:  * return: <0 errors and 0 ok
 1486:  */
 1487: void *
 1488: sched_hook_exception(void *root, void *arg)
 1489: {
 1490: 	sched_root_task_t *r = root;
 1491: 
 1492: 	if (!r)
 1493: 		return NULL;
 1494: 
 1495: 	/* custom exception handling ... */
 1496: 	if (arg) {
 1497: 		if (arg == (void*) EV_EOF)
 1498: 			return NULL;
 1499: 		return (void*) -1;	/* raise scheduler error!!! */
 1500: 	}
 1501: 
 1502: 	/* if error hook exists */
 1503: 	if (r->root_hooks.hook_root.error)
 1504: 		return (r->root_hooks.hook_root.error(root, (void*) ((intptr_t) errno)));
 1505: 
 1506: 	/* default case! */
 1507: 	LOGERR;
 1508: 	return NULL;
 1509: }
 1510: 
 1511: /*
 1512:  * sched_hook_condition() - Default CONDITION hook
 1513:  *
 1514:  * @root = root task
 1515:  * @arg = killState from schedRun()
 1516:  * return: NULL kill scheduler loop or !=NULL ok
 1517:  */
 1518: void *
 1519: sched_hook_condition(void *root, void *arg)
 1520: {
 1521: 	sched_root_task_t *r = root;
 1522: 
 1523: 	if (!r)
 1524: 		return NULL;
 1525: 
 1526: 	return (void*) (*r->root_cond - *(intptr_t*) arg);
 1527: }
 1528: 
 1529: /*
 1530:  * sched_hook_rtc() - Default RTC hook
 1531:  *
 1532:  * @task = current task
 1533:  * @arg = unused
 1534:  * return: <0 errors and 0 ok
 1535:  */
 1536: void *
 1537: sched_hook_rtc(void *task, void *arg __unused)
 1538: {
 1539: #if defined(HAVE_LIBRT) && defined(HAVE_TIMER_CREATE) && \
 1540: 	defined(HAVE_TIMER_SETTIME) && defined(HAVE_TIMER_DELETE)
 1541: 	sched_task_t *sigt = NULL, *t = task;
 1542: 	struct itimerspec its;
 1543: 	struct sigevent evt;
 1544: 	timer_t tmr;
 1545: #if SUP_ENABLE != KQ_SUPPORT
 1546: 	struct sigaction sa;
 1547: #endif
 1548: 
 1549: 	if (!t || !TASK_ROOT(t))
 1550: 		return (void*) -1;
 1551: 
 1552: 	memset(&evt, 0, sizeof evt);
 1553: 	evt.sigev_notify = SIGEV_SIGNAL;
 1554: 	evt.sigev_signo = (intptr_t) TASK_DATA(t) + SIGRTMIN;
 1555: 	evt.sigev_value.sival_ptr = t;
 1556: 
 1557: 	if (timer_create(CLOCK_MONOTONIC, &evt, &tmr) == -1) {
 1558: 		if (TASK_ROOT(t)->root_hooks.hook_exec.exception)
 1559: 			TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL);
 1560: 		else
 1561: 			LOGERR;
 1562: 		return (void*) -1;
 1563: 	} else
 1564: 		TASK_FLAG(t) = (u_long) tmr;
 1565: 
 1566: #if SUP_ENABLE == KQ_SUPPORT
 1567: 	if (!(sigt = schedSignal(TASK_ROOT(t), _sched_rtcWrapper, TASK_ARG(t), evt.sigev_signo, 
 1568: 				t, (size_t) tmr))) {
 1569: 		if (TASK_ROOT(t)->root_hooks.hook_exec.exception)
 1570: 			TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL);
 1571: 		else
 1572: 			LOGERR;
 1573: 		timer_delete(tmr);
 1574: 		return (void*) -1;
 1575: 	} else
 1576: 		TASK_RET(t) = (uintptr_t) sigt;
 1577: #else
 1578: 	memset(&sa, 0, sizeof sa);
 1579: 	sigemptyset(&sa.sa_mask);
 1580: 	sa.sa_sigaction = _sched_rtcSigWrapper;
 1581: 	sa.sa_flags = SA_SIGINFO | SA_RESTART;
 1582: 
 1583: 	if (sigaction(evt.sigev_signo, &sa, NULL) == -1) {
 1584: 		if (TASK_ROOT(t)->root_hooks.hook_exec.exception)
 1585: 			TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL);
 1586: 		else
 1587: 			LOGERR;
 1588: 		timer_delete(tmr);
 1589: 		return (void*) -1;
 1590: 	}
 1591: #endif
 1592: 
 1593: 	memset(&its, 0, sizeof its);
 1594: 	its.it_value.tv_sec = t->task_val.ts.tv_sec;
 1595: 	its.it_value.tv_nsec = t->task_val.ts.tv_nsec;
 1596: 
 1597: 	if (timer_settime(tmr, TIMER_RELTIME, &its, NULL) == -1) {
 1598: 		if (TASK_ROOT(t)->root_hooks.hook_exec.exception)
 1599: 			TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL);
 1600: 		else
 1601: 			LOGERR;
 1602: 		schedCancel(sigt);
 1603: 		timer_delete(tmr);
 1604: 		return (void*) -1;
 1605: 	}
 1606: #endif	/* HAVE_TIMER_CREATE */
 1607: 	return NULL;
 1608: }

FreeBSD-CVSweb <freebsd-cvsweb@FreeBSD.org>