File:  [ELWIX - Embedded LightWeight unIX -] / libaitsched / src / hooks.c
Revision 1.27.2.4: download - view: text, annotated - select for diffs - revision graph
Wed May 21 22:22:57 2014 UTC (10 years, 4 months ago) by misho
Branches: sched5_2
Diff to: branchpoint 1.27: preferred, unified
init epoll for linux

    1: /*************************************************************************
    2: * (C) 2011 AITNET ltd - Sofia/Bulgaria - <misho@aitbg.com>
    3: *  by Michael Pounov <misho@openbsd-bg.org>
    4: *
    5: * $Author: misho $
    6: * $Id: hooks.c,v 1.27.2.4 2014/05/21 22:22:57 misho Exp $
    7: *
    8: **************************************************************************
    9: The ELWIX and AITNET software is distributed under the following
   10: terms:
   11: 
   12: All of the documentation and software included in the ELWIX and AITNET
   13: Releases is copyrighted by ELWIX - Sofia/Bulgaria <info@elwix.org>
   14: 
   15: Copyright 2004 - 2014
   16: 	by Michael Pounov <misho@elwix.org>.  All rights reserved.
   17: 
   18: Redistribution and use in source and binary forms, with or without
   19: modification, are permitted provided that the following conditions
   20: are met:
   21: 1. Redistributions of source code must retain the above copyright
   22:    notice, this list of conditions and the following disclaimer.
   23: 2. Redistributions in binary form must reproduce the above copyright
   24:    notice, this list of conditions and the following disclaimer in the
   25:    documentation and/or other materials provided with the distribution.
   26: 3. All advertising materials mentioning features or use of this software
   27:    must display the following acknowledgement:
   28: This product includes software developed by Michael Pounov <misho@elwix.org>
   29: ELWIX - Embedded LightWeight unIX and its contributors.
   30: 4. Neither the name of AITNET nor the names of its contributors
   31:    may be used to endorse or promote products derived from this software
   32:    without specific prior written permission.
   33: 
   34: THIS SOFTWARE IS PROVIDED BY AITNET AND CONTRIBUTORS ``AS IS'' AND
   35: ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   36: IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   37: ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
   38: FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   39: DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   40: OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   41: HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   42: LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   43: OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   44: SUCH DAMAGE.
   45: */
   46: #include "global.h"
   47: #include "hooks.h"
   48: 
   49: 
   50: /*
   51:  * sched_hook_init() - Default INIT hook
   52:  *
   53:  * @root = root task
   54:  * @arg = unused
   55:  * return: <0 errors and 0 ok
   56:  */
   57: void *
   58: sched_hook_init(void *root, void *arg __unused)
   59: {
   60: 	sched_root_task_t *r = root;
   61: 
   62: 	if (!r)
   63: 		return (void*) -1;
   64: 
   65: #if SUP_ENABLE == KQ_SUPPORT
   66: 	r->root_kq = kqueue();
   67: 	if (r->root_kq == -1) {
   68: 		LOGERR;
   69: 		return (void*) -1;
   70: 	}
   71: #elif SUP_ENABLE == EP_SUPPORT
   72: 	r->root_kq = epoll_create(KQ_EVENTS);
   73: 	if (r->root_kq == -1) {
   74: 		LOGERR;
   75: 		return (void*) -1;
   76: 	}
   77: #else
   78: 	r->root_kq ^= r->root_kq;
   79: 	FD_ZERO(&r->root_fds[0]);
   80: 	FD_ZERO(&r->root_fds[1]);
   81: #endif
   82: 
   83: 	return NULL;
   84: }
   85: 
   86: /*
   87:  * sched_hook_fini() - Default FINI hook
   88:  *
   89:  * @root = root task
   90:  * @arg = unused
   91:  * return: <0 errors and 0 ok
   92:  */
   93: void *
   94: sched_hook_fini(void *root, void *arg __unused)
   95: {
   96: 	sched_root_task_t *r = root;
   97: 
   98: 	if (!r)
   99: 		return (void*) -1;
  100: 
  101: #if SUP_ENABLE == KQ_SUPPORT || SUP_ENABLE == EP_SUPPORT
  102: 	if (r->root_kq > 2) {
  103: 		close(r->root_kq);
  104: 		r->root_kq = 0;
  105: 	}
  106: #else
  107: 	FD_ZERO(&r->root_fds[1]);
  108: 	FD_ZERO(&r->root_fds[0]);
  109: 	r->root_kq ^= r->root_kq;
  110: #endif
  111: 
  112: 	return NULL;
  113: }
  114: 
  115: /*
  116:  * sched_hook_cancel() - Default CANCEL hook
  117:  *
  118:  * @task = current task
  119:  * @arg = unused
  120:  * return: <0 errors and 0 ok
  121:  */
  122: void *
  123: sched_hook_cancel(void *task, void *arg __unused)
  124: {
  125: 	sched_task_t *t = task;
  126: #if SUP_ENABLE == KQ_SUPPORT
  127: 	struct kevent chg[1];
  128: 	struct timespec timeout = { 0, 0 };
  129: #else
  130: 	sched_root_task_t *r = NULL;
  131: 	register int i;
  132: #endif
  133: #ifdef AIO_SUPPORT
  134: 	struct aiocb *acb;
  135: #ifdef EVFILT_LIO
  136: 	register int i = 0;
  137: 	struct aiocb **acbs;
  138: #endif	/* EVFILT_LIO */
  139: #endif	/* AIO_SUPPORT */
  140: 
  141: 	if (!t || !TASK_ROOT(t))
  142: 		return (void*) -1;
  143: #if SUP_ENABLE != KQ_SUPPORT
  144: 	r = TASK_ROOT(t);
  145: #endif
  146: 
  147: 	switch (TASK_TYPE(t)) {
  148: 		case taskREAD:
  149: #if SUP_ENABLE == KQ_SUPPORT
  150: #ifdef __NetBSD__
  151: 			EV_SET(&chg[0], TASK_FD(t), EVFILT_READ, EV_DELETE, 0, 0, (intptr_t) TASK_FD(t));
  152: #else
  153: 			EV_SET(&chg[0], TASK_FD(t), EVFILT_READ, EV_DELETE, 0, 0, (void*) TASK_FD(t));
  154: #endif
  155: #else
  156: 			FD_CLR(TASK_FD(t), &r->root_fds[0]);
  157: 
  158: 			/* optimize select */
  159: 			for (i = r->root_kq - 1; i > 2; i--)
  160: 				if (FD_ISSET(i, &r->root_fds[0]) || FD_ISSET(i, &r->root_fds[1]))
  161: 					break;
  162: 			if (i > 2)
  163: 				r->root_kq = i + 1;
  164: #endif
  165: 			break;
  166: 		case taskWRITE:
  167: #if SUP_ENABLE == KQ_SUPPORT
  168: #ifdef __NetBSD__
  169: 			EV_SET(&chg[0], TASK_FD(t), EVFILT_WRITE, EV_DELETE, 0, 0, (intptr_t) TASK_FD(t));
  170: #else
  171: 			EV_SET(&chg[0], TASK_FD(t), EVFILT_WRITE, EV_DELETE, 0, 0, (void*) TASK_FD(t));
  172: #endif
  173: #else
  174: 			FD_CLR(TASK_FD(t), &r->root_fds[1]);
  175: 
  176: 			/* optimize select */
  177: 			for (i = r->root_kq - 1; i > 2; i--)
  178: 				if (FD_ISSET(i, &r->root_fds[0]) || FD_ISSET(i, &r->root_fds[1]))
  179: 					break;
  180: 			if (i > 2)
  181: 				r->root_kq = i + 1;
  182: #endif
  183: 			break;
  184: 		case taskALARM:
  185: #if SUP_ENABLE == KQ_SUPPORT
  186: #ifdef __NetBSD__
  187: 			EV_SET(&chg[0], (uintptr_t) TASK_DATA(t), EVFILT_TIMER, EV_DELETE, 
  188: 					0, 0, (intptr_t) TASK_DATA(t));
  189: #else
  190: 			EV_SET(&chg[0], (uintptr_t) TASK_DATA(t), EVFILT_TIMER, EV_DELETE, 
  191: 					0, 0, (void*) TASK_DATA(t));
  192: #endif
  193: #endif
  194: 			break;
  195: 		case taskNODE:
  196: #if SUP_ENABLE == KQ_SUPPORT
  197: #ifdef __NetBSD__
  198: 			EV_SET(&chg[0], TASK_FD(t), EVFILT_VNODE, EV_DELETE, 0, 0, (intptr_t) TASK_FD(t));
  199: #else
  200: 			EV_SET(&chg[0], TASK_FD(t), EVFILT_VNODE, EV_DELETE, 0, 0, (void*) TASK_FD(t));
  201: #endif
  202: #endif
  203: 			break;
  204: 		case taskPROC:
  205: #if SUP_ENABLE == KQ_SUPPORT
  206: #ifdef __NetBSD__
  207: 			EV_SET(&chg[0], TASK_VAL(t), EVFILT_PROC, EV_DELETE, 0, 0, (intptr_t) TASK_VAL(t));
  208: #else
  209: 			EV_SET(&chg[0], TASK_VAL(t), EVFILT_PROC, EV_DELETE, 0, 0, (void*) TASK_VAL(t));
  210: #endif
  211: #endif
  212: 			break;
  213: 		case taskSIGNAL:
  214: #if SUP_ENABLE == KQ_SUPPORT
  215: #ifdef __NetBSD__
  216: 			EV_SET(&chg[0], TASK_VAL(t), EVFILT_SIGNAL, EV_DELETE, 0, 0, (intptr_t) TASK_VAL(t));
  217: #else
  218: 			EV_SET(&chg[0], TASK_VAL(t), EVFILT_SIGNAL, EV_DELETE, 0, 0, (void*) TASK_VAL(t));
  219: #endif
  220: 			/* restore signal */
  221: 			signal(TASK_VAL(t), SIG_DFL);
  222: #endif
  223: 			break;
  224: #ifdef AIO_SUPPORT
  225: 		case taskAIO:
  226: #if SUP_ENABLE == KQ_SUPPORT
  227: #ifdef __NetBSD__
  228: 			EV_SET(&chg[0], TASK_VAL(t), EVFILT_AIO, EV_DELETE, 0, 0, (intptr_t) TASK_VAL(t));
  229: #else
  230: 			EV_SET(&chg[0], TASK_VAL(t), EVFILT_AIO, EV_DELETE, 0, 0, (void*) TASK_VAL(t));
  231: #endif
  232: 			acb = (struct aiocb*) TASK_VAL(t);
  233: 			if (acb) {
  234: 				if (aio_cancel(acb->aio_fildes, acb) == AIO_CANCELED)
  235: 					aio_return(acb);
  236: 				free(acb);
  237: 				TASK_VAL(t) = 0;
  238: 			}
  239: #endif
  240: 			break;
  241: #ifdef EVFILT_LIO
  242: 		case taskLIO:
  243: #if SUP_ENABLE == KQ_SUPPORT
  244: #ifdef __NetBSD__
  245: 			EV_SET(&chg[0], TASK_VAL(t), EVFILT_LIO, EV_DELETE, 0, 0, (intptr_t) TASK_VAL(t));
  246: #else
  247: 			EV_SET(&chg[0], TASK_VAL(t), EVFILT_LIO, EV_DELETE, 0, 0, (void*) TASK_VAL(t));
  248: #endif
  249: 			acbs = (struct aiocb**) TASK_VAL(t);
  250: 			if (acbs) {
  251: 				for (i = 0; i < TASK_DATLEN(t); i++) {
  252: 					if (aio_cancel(acbs[i]->aio_fildes, acbs[i]) == AIO_CANCELED)
  253: 						aio_return(acbs[i]);
  254: 					free(acbs[i]);
  255: 				}
  256: 				free(acbs);
  257: 				TASK_VAL(t) = 0;
  258: 			}
  259: #endif
  260: 			break;
  261: #endif	/* EVFILT_LIO */
  262: #endif	/* AIO_SUPPORT */
  263: #ifdef EVFILT_USER
  264: 		case taskUSER:
  265: #if SUP_ENABLE == KQ_SUPPORT
  266: #ifdef __NetBSD__
  267: 			EV_SET(&chg[0], TASK_VAL(t), EVFILT_USER, EV_DELETE, 0, 0, (intptr_t) TASK_VAL(t));
  268: #else
  269: 			EV_SET(&chg[0], TASK_VAL(t), EVFILT_USER, EV_DELETE, 0, 0, (void*) TASK_VAL(t));
  270: #endif
  271: #endif
  272: 			break;
  273: #endif	/* EVFILT_USER */
  274: 		case taskTHREAD:
  275: #ifdef HAVE_LIBPTHREAD
  276: 			if (TASK_VAL(t))
  277: 				pthread_cancel((pthread_t) TASK_VAL(t));
  278: #endif
  279: 			return NULL;
  280: #if defined(HAVE_TIMER_CREATE) && defined(HAVE_TIMER_SETTIME) && defined(HAVE_TIMER_DELETE)
  281: 		case taskRTC:
  282: 			timer_delete((timer_t) TASK_FLAG(t));
  283: 			schedCancel((sched_task_t*) TASK_RET(t));
  284: 			return NULL;
  285: #endif	/* HAVE_TIMER_CREATE */
  286: 		default:
  287: 			return NULL;
  288: 	}
  289: 
  290: #if SUP_ENABLE == KQ_SUPPORT
  291: 	kevent(TASK_ROOT(t)->root_kq, chg, 1, NULL, 0, &timeout);
  292: #endif
  293: 	return NULL;
  294: }
  295: 
  296: #ifdef HAVE_LIBPTHREAD
  297: /*
  298:  * sched_hook_thread() - Default THREAD hook
  299:  *
  300:  * @task = current task
  301:  * @arg = pthread attributes
  302:  * return: <0 errors and 0 ok
  303:  */
  304: void *
  305: sched_hook_thread(void *task, void *arg)
  306: {
  307: 	sched_task_t *t = task;
  308: 	pthread_t tid;
  309: 	sigset_t s, o;
  310: 
  311: 	if (!t || !TASK_ROOT(t))
  312: 		return (void*) -1;
  313: 
  314: 	sigfillset(&s);
  315: 	pthread_sigmask(SIG_BLOCK, &s, &o);
  316: 	if ((errno = pthread_create(&tid, (pthread_attr_t*) arg, 
  317: 				(void *(*)(void*)) _sched_threadWrapper, t))) {
  318: 		LOGERR;
  319: 		pthread_sigmask(SIG_SETMASK, &o, NULL);
  320: 		return (void*) -1;
  321: 	} else
  322: 		TASK_VAL(t) = (u_long) tid;
  323: 
  324: 	if (!TASK_ISLOCKED(t))
  325: 		TASK_LOCK(t);
  326: 
  327: 	pthread_sigmask(SIG_SETMASK, &o, NULL);
  328: 	return NULL;
  329: }
  330: #endif
  331: 
  332: /*
  333:  * sched_hook_read() - Default READ hook
  334:  *
  335:  * @task = current task
  336:  * @arg = unused
  337:  * return: <0 errors and 0 ok
  338:  */
  339: void *
  340: sched_hook_read(void *task, void *arg __unused)
  341: {
  342: 	sched_task_t *t = task;
  343: #if SUP_ENABLE == KQ_SUPPORT
  344: 	struct kevent chg[1];
  345: 	struct timespec timeout = { 0, 0 };
  346: #else
  347: 	sched_root_task_t *r = NULL;
  348: #endif
  349: 
  350: 	if (!t || !TASK_ROOT(t))
  351: 		return (void*) -1;
  352: #if SUP_ENABLE != KQ_SUPPORT
  353: 	r = TASK_ROOT(t);
  354: #endif
  355: 
  356: #if SUP_ENABLE == KQ_SUPPORT
  357: #ifdef __NetBSD__
  358: 	EV_SET(&chg[0], TASK_FD(t), EVFILT_READ, EV_ADD | EV_CLEAR, 0, 0, (intptr_t) TASK_FD(t));
  359: #else
  360: 	EV_SET(&chg[0], TASK_FD(t), EVFILT_READ, EV_ADD | EV_CLEAR, 0, 0, (void*) TASK_FD(t));
  361: #endif
  362: 	if (kevent(TASK_ROOT(t)->root_kq, chg, 1, NULL, 0, &timeout) == -1) {
  363: 		if (TASK_ROOT(t)->root_hooks.hook_exec.exception)
  364: 			TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL);
  365: 		else
  366: 			LOGERR;
  367: 		return (void*) -1;
  368: 	}
  369: #else
  370: 	FD_SET(TASK_FD(t), &r->root_fds[0]);
  371: 	if (TASK_FD(t) >= r->root_kq)
  372: 		r->root_kq = TASK_FD(t) + 1;
  373: #endif
  374: 
  375: 	return NULL;
  376: }
  377: 
  378: /*
  379:  * sched_hook_write() - Default WRITE hook
  380:  *
  381:  * @task = current task
  382:  * @arg = unused
  383:  * return: <0 errors and 0 ok
  384:  */
  385: void *
  386: sched_hook_write(void *task, void *arg __unused)
  387: {
  388: 	sched_task_t *t = task;
  389: #if SUP_ENABLE == KQ_SUPPORT
  390: 	struct kevent chg[1];
  391: 	struct timespec timeout = { 0, 0 };
  392: #else
  393: 	sched_root_task_t *r = NULL;
  394: #endif
  395: 
  396: 	if (!t || !TASK_ROOT(t))
  397: 		return (void*) -1;
  398: #if SUP_ENABLE != KQ_SUPPORT
  399: 	r = TASK_ROOT(t);
  400: #endif
  401: 
  402: #if SUP_ENABLE == KQ_SUPPORT
  403: #ifdef __NetBSD__
  404: 	EV_SET(&chg[0], TASK_FD(t), EVFILT_WRITE, EV_ADD | EV_CLEAR, 0, 0, (intptr_t) TASK_FD(t));
  405: #else
  406: 	EV_SET(&chg[0], TASK_FD(t), EVFILT_WRITE, EV_ADD | EV_CLEAR, 0, 0, (void*) TASK_FD(t));
  407: #endif
  408: 	if (kevent(TASK_ROOT(t)->root_kq, chg, 1, NULL, 0, &timeout) == -1) {
  409: 		if (TASK_ROOT(t)->root_hooks.hook_exec.exception)
  410: 			TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL);
  411: 		else
  412: 			LOGERR;
  413: 		return (void*) -1;
  414: 	}
  415: #else
  416: 	FD_SET(TASK_FD(t), &r->root_fds[1]);
  417: 	if (TASK_FD(t) >= r->root_kq)
  418: 		r->root_kq = TASK_FD(t) + 1;
  419: #endif
  420: 
  421: 	return NULL;
  422: }
  423: 
  424: /*
  425:  * sched_hook_alarm() - Default ALARM hook
  426:  *
  427:  * @task = current task
  428:  * @arg = unused
  429:  * return: <0 errors and 0 ok
  430:  */
  431: void *
  432: sched_hook_alarm(void *task, void *arg __unused)
  433: {
  434: #if SUP_ENABLE == KQ_SUPPORT
  435: 	sched_task_t *t = task;
  436: 	struct kevent chg[1];
  437: 	struct timespec timeout = { 0, 0 };
  438: 
  439: 	if (!t || !TASK_ROOT(t))
  440: 		return (void*) -1;
  441: 
  442: #ifdef __NetBSD__
  443: 	EV_SET(&chg[0], (uintptr_t) TASK_DATA(t), EVFILT_TIMER, EV_ADD | EV_CLEAR, 0, 
  444: 			t->task_val.ts.tv_sec * 1000 + t->task_val.ts.tv_nsec / 1000000, 
  445: 			(intptr_t) TASK_DATA(t));
  446: #else
  447: 	EV_SET(&chg[0], (uintptr_t) TASK_DATA(t), EVFILT_TIMER, EV_ADD | EV_CLEAR, 0, 
  448: 			t->task_val.ts.tv_sec * 1000 + t->task_val.ts.tv_nsec / 1000000, 
  449: 			(void*) TASK_DATA(t));
  450: #endif
  451: 	if (kevent(TASK_ROOT(t)->root_kq, chg, 1, NULL, 0, &timeout) == -1) {
  452: 		if (TASK_ROOT(t)->root_hooks.hook_exec.exception)
  453: 			TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL);
  454: 		else
  455: 			LOGERR;
  456: 		return (void*) -1;
  457: 	}
  458: 
  459: #endif
  460: 	return NULL;
  461: }
  462: 
  463: /*
  464:  * sched_hook_node() - Default NODE hook
  465:  *
  466:  * @task = current task
  467:  * @arg = unused
  468:  * return: <0 errors and 0 ok
  469:  */
  470: void *
  471: sched_hook_node(void *task, void *arg __unused)
  472: {
  473: #if SUP_ENABLE == KQ_SUPPORT
  474: 	sched_task_t *t = task;
  475: 	struct kevent chg[1];
  476: 	struct timespec timeout = { 0, 0 };
  477: 
  478: 	if (!t || !TASK_ROOT(t))
  479: 		return (void*) -1;
  480: 
  481: #ifdef __NetBSD__
  482: 	EV_SET(&chg[0], TASK_FD(t), EVFILT_VNODE, EV_ADD | EV_CLEAR, 
  483: 			NOTE_DELETE | NOTE_WRITE | NOTE_EXTEND | NOTE_ATTRIB | 
  484: 			NOTE_LINK | NOTE_RENAME | NOTE_REVOKE, 0, (intptr_t) TASK_FD(t));
  485: #else
  486: 	EV_SET(&chg[0], TASK_FD(t), EVFILT_VNODE, EV_ADD | EV_CLEAR, 
  487: 			NOTE_DELETE | NOTE_WRITE | NOTE_EXTEND | NOTE_ATTRIB | 
  488: 			NOTE_LINK | NOTE_RENAME | NOTE_REVOKE, 0, (void*) TASK_FD(t));
  489: #endif
  490: 	if (kevent(TASK_ROOT(t)->root_kq, chg, 1, NULL, 0, &timeout) == -1) {
  491: 		if (TASK_ROOT(t)->root_hooks.hook_exec.exception)
  492: 			TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL);
  493: 		else
  494: 			LOGERR;
  495: 		return (void*) -1;
  496: 	}
  497: 
  498: #endif
  499: 	return NULL;
  500: }
  501: 
  502: /*
  503:  * sched_hook_proc() - Default PROC hook
  504:  *
  505:  * @task = current task
  506:  * @arg = unused
  507:  * return: <0 errors and 0 ok
  508:  */
  509: void *
  510: sched_hook_proc(void *task, void *arg __unused)
  511: {
  512: #if SUP_ENABLE == KQ_SUPPORT
  513: 	sched_task_t *t = task;
  514: 	struct kevent chg[1];
  515: 	struct timespec timeout = { 0, 0 };
  516: 
  517: 	if (!t || !TASK_ROOT(t))
  518: 		return (void*) -1;
  519: 
  520: #ifdef __NetBSD__
  521: 	EV_SET(&chg[0], TASK_VAL(t), EVFILT_PROC, EV_ADD | EV_CLEAR, 
  522: 			NOTE_EXIT | NOTE_FORK | NOTE_EXEC | NOTE_TRACK, 0, (intptr_t) TASK_VAL(t));
  523: #else
  524: 	EV_SET(&chg[0], TASK_VAL(t), EVFILT_PROC, EV_ADD | EV_CLEAR, 
  525: 			NOTE_EXIT | NOTE_FORK | NOTE_EXEC | NOTE_TRACK, 0, (void*) TASK_VAL(t));
  526: #endif
  527: 	if (kevent(TASK_ROOT(t)->root_kq, chg, 1, NULL, 0, &timeout) == -1) {
  528: 		if (TASK_ROOT(t)->root_hooks.hook_exec.exception)
  529: 			TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL);
  530: 		else
  531: 			LOGERR;
  532: 		return (void*) -1;
  533: 	}
  534: 
  535: #endif
  536: 	return NULL;
  537: }
  538: 
  539: /*
  540:  * sched_hook_signal() - Default SIGNAL hook
  541:  *
  542:  * @task = current task
  543:  * @arg = unused
  544:  * return: <0 errors and 0 ok
  545:  */
  546: void *
  547: sched_hook_signal(void *task, void *arg __unused)
  548: {
  549: #if SUP_ENABLE == KQ_SUPPORT
  550: 	sched_task_t *t = task;
  551: 	struct kevent chg[1];
  552: 	struct timespec timeout = { 0, 0 };
  553: 
  554: 	if (!t || !TASK_ROOT(t))
  555: 		return (void*) -1;
  556: 
  557: 	/* ignore signal */
  558: 	signal(TASK_VAL(t), SIG_IGN);
  559: 
  560: #ifdef __NetBSD__
  561: 	EV_SET(&chg[0], TASK_VAL(t), EVFILT_SIGNAL, EV_ADD | EV_CLEAR, 0, 0, (intptr_t) TASK_VAL(t));
  562: #else
  563: 	EV_SET(&chg[0], TASK_VAL(t), EVFILT_SIGNAL, EV_ADD | EV_CLEAR, 0, 0, (void*) TASK_VAL(t));
  564: #endif
  565: 	if (kevent(TASK_ROOT(t)->root_kq, chg, 1, NULL, 0, &timeout) == -1) {
  566: 		if (TASK_ROOT(t)->root_hooks.hook_exec.exception)
  567: 			TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL);
  568: 		else
  569: 			LOGERR;
  570: 		return (void*) -1;
  571: 	}
  572: #else
  573: #if 0
  574: 	sched_task_t *t = task;
  575: 	struct sigaction sa;
  576: 
  577: 	memset(&sa, 0, sizeof sa);
  578: 	sigemptyset(&sa.sa_mask);
  579: 	sa.sa_handler = _sched_sigHandler;
  580: 	sa.sa_flags = SA_RESETHAND | SA_RESTART;
  581: 
  582: 	if (sigaction(TASK_VAL(t), &sa, NULL) == -1) {
  583: 		if (TASK_ROOT(t)->root_hooks.hook_exec.exception)
  584: 			TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL);
  585: 		else
  586: 			LOGERR;
  587: 		return (void*) -1;
  588: 	}
  589: #endif	/* 0 */
  590: #endif
  591: 	return NULL;
  592: }
  593: 
  594: /*
  595:  * sched_hook_user() - Default USER hook
  596:  *
  597:  * @task = current task
  598:  * @arg = unused
  599:  * return: <0 errors and 0 ok
  600:  */
  601: #ifdef EVFILT_USER
  602: void *
  603: sched_hook_user(void *task, void *arg __unused)
  604: {
  605: #if SUP_ENABLE == KQ_SUPPORT
  606: 	sched_task_t *t = task;
  607: 	struct kevent chg[1];
  608: 	struct timespec timeout = { 0, 0 };
  609: 
  610: 	if (!t || !TASK_ROOT(t))
  611: 		return (void*) -1;
  612: 
  613: #ifdef __NetBSD__
  614: 	EV_SET(&chg[0], TASK_VAL(t), EVFILT_USER, EV_ADD | EV_CLEAR, TASK_DATLEN(t), 
  615: 			0, (intptr_t) TASK_VAL(t));
  616: #else
  617: 	EV_SET(&chg[0], TASK_VAL(t), EVFILT_USER, EV_ADD | EV_CLEAR, TASK_DATLEN(t), 
  618: 			0, (void*) TASK_VAL(t));
  619: #endif
  620: 	if (kevent(TASK_ROOT(t)->root_kq, chg, 1, NULL, 0, &timeout) == -1) {
  621: 		if (TASK_ROOT(t)->root_hooks.hook_exec.exception)
  622: 			TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL);
  623: 		else
  624: 			LOGERR;
  625: 		return (void*) -1;
  626: 	}
  627: 
  628: #endif
  629: 	return NULL;
  630: }
  631: #endif
  632: 
  633: /*
  634:  * sched_hook_fetch() - Default FETCH hook
  635:  *
  636:  * @root = root task
  637:  * @arg = unused
  638:  * return: NULL error or !=NULL fetched task
  639:  */
  640: void *
  641: sched_hook_fetch(void *root, void *arg __unused)
  642: {
  643: 	sched_root_task_t *r = root;
  644: 	sched_task_t *task, *tmp;
  645: 	struct timespec now, m, mtmp;
  646: #if SUP_ENABLE == KQ_SUPPORT
  647: 	struct kevent evt[1], res[KQ_EVENTS];
  648: 	struct timespec *timeout;
  649: #else
  650: 	struct timeval *timeout, tv;
  651: 	fd_set rfd, wfd, xfd;
  652: #endif
  653: 	register int i, flg;
  654: 	int en;
  655: #ifdef AIO_SUPPORT
  656: 	int len, fd;
  657: 	struct aiocb *acb;
  658: #ifdef EVFILT_LIO
  659: 	int l;
  660: 	register int j;
  661: 	off_t off;
  662: 	struct aiocb **acbs;
  663: 	struct iovec *iv;
  664: #endif	/* EVFILT_LIO */
  665: #endif	/* AIO_SUPPORT */
  666: 
  667: 	if (!r)
  668: 		return NULL;
  669: 
  670: 	/* get new task by queue priority */
  671: 	while ((task = TAILQ_FIRST(&r->root_event))) {
  672: #ifdef HAVE_LIBPTHREAD
  673: 		pthread_mutex_lock(&r->root_mtx[taskEVENT]);
  674: #endif
  675: 		TAILQ_REMOVE(&r->root_event, task, task_node);
  676: #ifdef HAVE_LIBPTHREAD
  677: 		pthread_mutex_unlock(&r->root_mtx[taskEVENT]);
  678: #endif
  679: 		task->task_type = taskUNUSE;
  680: #ifdef HAVE_LIBPTHREAD
  681: 		pthread_mutex_lock(&r->root_mtx[taskUNUSE]);
  682: #endif
  683: 		TAILQ_INSERT_TAIL(&r->root_unuse, task, task_node);
  684: #ifdef HAVE_LIBPTHREAD
  685: 		pthread_mutex_unlock(&r->root_mtx[taskUNUSE]);
  686: #endif
  687: 		return task;
  688: 	}
  689: 	while ((task = TAILQ_FIRST(&r->root_ready))) {
  690: #ifdef HAVE_LIBPTHREAD
  691: 		pthread_mutex_lock(&r->root_mtx[taskREADY]);
  692: #endif
  693: 		TAILQ_REMOVE(&r->root_ready, task, task_node);
  694: #ifdef HAVE_LIBPTHREAD
  695: 		pthread_mutex_unlock(&r->root_mtx[taskREADY]);
  696: #endif
  697: 		task->task_type = taskUNUSE;
  698: #ifdef HAVE_LIBPTHREAD
  699: 		pthread_mutex_lock(&r->root_mtx[taskUNUSE]);
  700: #endif
  701: 		TAILQ_INSERT_TAIL(&r->root_unuse, task, task_node);
  702: #ifdef HAVE_LIBPTHREAD
  703: 		pthread_mutex_unlock(&r->root_mtx[taskUNUSE]);
  704: #endif
  705: 		return task;
  706: 	}
  707: 
  708: #ifdef TIMER_WITHOUT_SORT
  709: 	clock_gettime(CLOCK_MONOTONIC, &now);
  710: 
  711: 	sched_timespecclear(&r->root_wait);
  712: 	TAILQ_FOREACH(task, &r->root_timer, task_node) {
  713: 		if (!sched_timespecisset(&r->root_wait))
  714: 			r->root_wait = TASK_TS(task);
  715: 		else if (sched_timespeccmp(&TASK_TS(task), &r->root_wait, -) < 0)
  716: 			r->root_wait = TASK_TS(task);
  717: 	}
  718: 
  719: 	if (TAILQ_FIRST(&r->root_timer)) {
  720: 		m = r->root_wait;
  721: 		sched_timespecsub(&m, &now, &mtmp);
  722: 		r->root_wait = mtmp;
  723: 	} else {
  724: 		/* set wait INFTIM */
  725: 		sched_timespecinf(&r->root_wait);
  726: 	}
  727: #else	/* ! TIMER_WITHOUT_SORT */
  728: 	if (!TAILQ_FIRST(&r->root_task) && (task = TAILQ_FIRST(&r->root_timer))) {
  729: 		clock_gettime(CLOCK_MONOTONIC, &now);
  730: 
  731: 		m = TASK_TS(task);
  732: 		sched_timespecsub(&m, &now, &mtmp);
  733: 		r->root_wait = mtmp;
  734: 	} else {
  735: 		/* set wait INFTIM */
  736: 		sched_timespecinf(&r->root_wait);
  737: 	}
  738: #endif	/* TIMER_WITHOUT_SORT */
  739: 	/* if present member of task, set NOWAIT */
  740: 	if (TAILQ_FIRST(&r->root_task))
  741: 		sched_timespecclear(&r->root_wait);
  742: 
  743: 	if (r->root_wait.tv_sec != -1 && r->root_wait.tv_nsec != -1) {
  744: #if SUP_ENABLE == KQ_SUPPORT
  745: 		timeout = &r->root_wait;
  746: #else
  747: 		sched_timespec2val(&r->root_wait, &tv);
  748: 		timeout = &tv;
  749: #endif	/* KQ_SUPPORT */
  750: 	} else if (sched_timespecisinf(&r->root_poll))
  751: 		timeout = NULL;
  752: 	else {
  753: #if SUP_ENABLE == KQ_SUPPORT
  754: 		timeout = &r->root_poll;
  755: #else
  756: 		sched_timespec2val(&r->root_poll, &tv);
  757: 		timeout = &tv;
  758: #endif	/* KQ_SUPPORT */
  759: 	}
  760: 
  761: #if SUP_ENABLE == KQ_SUPPORT
  762: 	if ((en = kevent(r->root_kq, NULL, 0, res, KQ_EVENTS, timeout)) == -1) {
  763: #else
  764: 	rfd = xfd = r->root_fds[0];
  765: 	wfd = r->root_fds[1];
  766: 	if ((en = select(r->root_kq, &rfd, &wfd, &xfd, timeout)) == -1) {
  767: #endif	/* KQ_SUPPORT */
  768: 		if (r->root_hooks.hook_exec.exception) {
  769: 			if (r->root_hooks.hook_exec.exception(r, NULL))
  770: 				return NULL;
  771: 		} else if (errno != EINTR)
  772: 			LOGERR;
  773: 		goto skip_event;
  774: 	}
  775: 
  776: 	/* kevent dispatcher */
  777: 	now.tv_sec = now.tv_nsec = 0;
  778: 	/* Go and catch the cat into pipes ... */
  779: #if SUP_ENABLE == KQ_SUPPORT
  780: 	for (i = 0; i < en; i++) {
  781: 		memcpy(evt, &res[i], sizeof evt);
  782: 		evt->flags = EV_DELETE;
  783: 		/* Put read/write task to ready queue */
  784: 		switch (res[i].filter) {
  785: 			case EVFILT_READ:
  786: 				flg = 0;
  787: 				TAILQ_FOREACH_SAFE(task, &r->root_read, task_node, tmp) {
  788: 					if (TASK_FD(task) != ((intptr_t) res[i].udata))
  789: 						continue;
  790: 					else {
  791: 						flg++;
  792: 						TASK_RET(task) = res[i].data;
  793: 						TASK_FLAG(task) = (u_long) res[i].fflags;
  794: 					}
  795: 					/* remove read handle */
  796: #ifdef HAVE_LIBPTHREAD
  797: 					pthread_mutex_lock(&r->root_mtx[taskREAD]);
  798: #endif
  799: 					TAILQ_REMOVE(&r->root_read, task, task_node);
  800: #ifdef HAVE_LIBPTHREAD
  801: 					pthread_mutex_unlock(&r->root_mtx[taskREAD]);
  802: #endif
  803: 					if (r->root_hooks.hook_exec.exception && res[i].flags & EV_EOF) {
  804:  						if (r->root_hooks.hook_exec.exception(r, (void*) EV_EOF)) {
  805: 							task->task_type = taskUNUSE;
  806: #ifdef HAVE_LIBPTHREAD
  807: 							pthread_mutex_lock(&r->root_mtx[taskUNUSE]);
  808: #endif
  809: 							TAILQ_INSERT_TAIL(&r->root_unuse, task, task_node);
  810: #ifdef HAVE_LIBPTHREAD
  811: 							pthread_mutex_unlock(&r->root_mtx[taskUNUSE]);
  812: #endif
  813: 						} else {
  814: 							task->task_type = taskREADY;
  815: #ifdef HAVE_LIBPTHREAD
  816: 							pthread_mutex_lock(&r->root_mtx[taskREADY]);
  817: #endif
  818: 							TAILQ_INSERT_TAIL(&r->root_ready, task, task_node);
  819: #ifdef HAVE_LIBPTHREAD
  820: 							pthread_mutex_unlock(&r->root_mtx[taskREADY]);
  821: #endif
  822: 						}
  823: 					} else {
  824: 						task->task_type = taskREADY;
  825: #ifdef HAVE_LIBPTHREAD
  826: 						pthread_mutex_lock(&r->root_mtx[taskREADY]);
  827: #endif
  828: 						TAILQ_INSERT_TAIL(&r->root_ready, task, task_node);
  829: #ifdef HAVE_LIBPTHREAD
  830: 						pthread_mutex_unlock(&r->root_mtx[taskREADY]);
  831: #endif
  832: 					}
  833: 				}
  834: 				/* if match at least 2, don't remove resouce of event */
  835: 				if (flg > 1)
  836: 					evt->flags ^= evt->flags;
  837: 				break;
  838: 			case EVFILT_WRITE:
  839: 				flg = 0;
  840: 				TAILQ_FOREACH_SAFE(task, &r->root_write, task_node, tmp) {
  841: 					if (TASK_FD(task) != ((intptr_t) res[i].udata))
  842: 						continue;
  843: 					else {
  844: 						flg++;
  845: 						TASK_RET(task) = res[i].data;
  846: 						TASK_FLAG(task) = (u_long) res[i].fflags;
  847: 					}
  848: 					/* remove write handle */
  849: #ifdef HAVE_LIBPTHREAD
  850: 					pthread_mutex_lock(&r->root_mtx[taskWRITE]);
  851: #endif
  852: 					TAILQ_REMOVE(&r->root_write, task, task_node);
  853: #ifdef HAVE_LIBPTHREAD
  854: 					pthread_mutex_unlock(&r->root_mtx[taskWRITE]);
  855: #endif
  856: 					if (r->root_hooks.hook_exec.exception && res[i].flags & EV_EOF) {
  857:  						if (r->root_hooks.hook_exec.exception(r, (void*) EV_EOF)) {
  858: 							task->task_type = taskUNUSE;
  859: #ifdef HAVE_LIBPTHREAD
  860: 							pthread_mutex_lock(&r->root_mtx[taskUNUSE]);
  861: #endif
  862: 							TAILQ_INSERT_TAIL(&r->root_unuse, task, task_node);
  863: #ifdef HAVE_LIBPTHREAD
  864: 							pthread_mutex_unlock(&r->root_mtx[taskUNUSE]);
  865: #endif
  866: 						} else {
  867: 							task->task_type = taskREADY;
  868: #ifdef HAVE_LIBPTHREAD
  869: 							pthread_mutex_lock(&r->root_mtx[taskREADY]);
  870: #endif
  871: 							TAILQ_INSERT_TAIL(&r->root_ready, task, task_node);
  872: #ifdef HAVE_LIBPTHREAD
  873: 							pthread_mutex_unlock(&r->root_mtx[taskREADY]);
  874: #endif
  875: 						}
  876: 					} else {
  877: 						task->task_type = taskREADY;
  878: #ifdef HAVE_LIBPTHREAD
  879: 						pthread_mutex_lock(&r->root_mtx[taskREADY]);
  880: #endif
  881: 						TAILQ_INSERT_TAIL(&r->root_ready, task, task_node);
  882: #ifdef HAVE_LIBPTHREAD
  883: 						pthread_mutex_unlock(&r->root_mtx[taskREADY]);
  884: #endif
  885: 					}
  886: 				}
  887: 				/* if match at least 2, don't remove resouce of event */
  888: 				if (flg > 1)
  889: 					evt->flags ^= evt->flags;
  890: 				break;
  891: 			case EVFILT_TIMER:
  892: 				flg = 0;
  893: 				TAILQ_FOREACH_SAFE(task, &r->root_alarm, task_node, tmp) {
  894: 					if ((uintptr_t) TASK_DATA(task) != ((uintptr_t) res[i].udata))
  895: 						continue;
  896: 					else {
  897: 						flg++;
  898: 						TASK_RET(task) = res[i].data;
  899: 						TASK_FLAG(task) = (u_long) res[i].fflags;
  900: 					}
  901: 					/* remove alarm handle */
  902: #ifdef HAVE_LIBPTHREAD
  903: 					pthread_mutex_lock(&r->root_mtx[taskALARM]);
  904: #endif
  905: 					TAILQ_REMOVE(&r->root_alarm, task, task_node);
  906: #ifdef HAVE_LIBPTHREAD
  907: 					pthread_mutex_unlock(&r->root_mtx[taskALARM]);
  908: #endif
  909: 					task->task_type = taskREADY;
  910: #ifdef HAVE_LIBPTHREAD
  911: 					pthread_mutex_lock(&r->root_mtx[taskREADY]);
  912: #endif
  913: 					TAILQ_INSERT_TAIL(&r->root_ready, task, task_node);
  914: #ifdef HAVE_LIBPTHREAD
  915: 					pthread_mutex_unlock(&r->root_mtx[taskREADY]);
  916: #endif
  917: 				}
  918: 				/* if match at least 2, don't remove resouce of event */
  919: 				if (flg > 1)
  920: 					evt->flags ^= evt->flags;
  921: 				break;
  922: 			case EVFILT_VNODE:
  923: 				flg = 0;
  924: 				TAILQ_FOREACH_SAFE(task, &r->root_node, task_node, tmp) {
  925: 					if (TASK_FD(task) != ((intptr_t) res[i].udata))
  926: 						continue;
  927: 					else {
  928: 						flg++;
  929: 						TASK_RET(task) = res[i].data;
  930: 						TASK_FLAG(task) = (u_long) res[i].fflags;
  931: 					}
  932: 					/* remove node handle */
  933: #ifdef HAVE_LIBPTHREAD
  934: 					pthread_mutex_lock(&r->root_mtx[taskNODE]);
  935: #endif
  936: 					TAILQ_REMOVE(&r->root_node, task, task_node);
  937: #ifdef HAVE_LIBPTHREAD
  938: 					pthread_mutex_unlock(&r->root_mtx[taskNODE]);
  939: #endif
  940: 					task->task_type = taskREADY;
  941: #ifdef HAVE_LIBPTHREAD
  942: 					pthread_mutex_lock(&r->root_mtx[taskREADY]);
  943: #endif
  944: 					TAILQ_INSERT_TAIL(&r->root_ready, task, task_node);
  945: #ifdef HAVE_LIBPTHREAD
  946: 					pthread_mutex_unlock(&r->root_mtx[taskREADY]);
  947: #endif
  948: 				}
  949: 				/* if match at least 2, don't remove resouce of event */
  950: 				if (flg > 1)
  951: 					evt->flags ^= evt->flags;
  952: 				break;
  953: 			case EVFILT_PROC:
  954: 				flg = 0;
  955: 				TAILQ_FOREACH_SAFE(task, &r->root_proc, task_node, tmp) {
  956: 					if (TASK_VAL(task) != ((uintptr_t) res[i].udata))
  957: 						continue;
  958: 					else {
  959: 						flg++;
  960: 						TASK_RET(task) = res[i].data;
  961: 						TASK_FLAG(task) = (u_long) res[i].fflags;
  962: 					}
  963: 					/* remove proc handle */
  964: #ifdef HAVE_LIBPTHREAD
  965: 					pthread_mutex_lock(&r->root_mtx[taskPROC]);
  966: #endif
  967: 					TAILQ_REMOVE(&r->root_proc, task, task_node);
  968: #ifdef HAVE_LIBPTHREAD
  969: 					pthread_mutex_unlock(&r->root_mtx[taskPROC]);
  970: #endif
  971: 					task->task_type = taskREADY;
  972: #ifdef HAVE_LIBPTHREAD
  973: 					pthread_mutex_lock(&r->root_mtx[taskREADY]);
  974: #endif
  975: 					TAILQ_INSERT_TAIL(&r->root_ready, task, task_node);
  976: #ifdef HAVE_LIBPTHREAD
  977: 					pthread_mutex_unlock(&r->root_mtx[taskREADY]);
  978: #endif
  979: 				}
  980: 				/* if match at least 2, don't remove resouce of event */
  981: 				if (flg > 1)
  982: 					evt->flags ^= evt->flags;
  983: 				break;
  984: 			case EVFILT_SIGNAL:
  985: 				flg = 0;
  986: 				TAILQ_FOREACH_SAFE(task, &r->root_signal, task_node, tmp) {
  987: 					if (TASK_VAL(task) != ((uintptr_t) res[i].udata))
  988: 						continue;
  989: 					else {
  990: 						flg++;
  991: 						TASK_RET(task) = res[i].data;
  992: 						TASK_FLAG(task) = (u_long) res[i].fflags;
  993: 					}
  994: 					/* remove signal handle */
  995: #ifdef HAVE_LIBPTHREAD
  996: 					pthread_mutex_lock(&r->root_mtx[taskSIGNAL]);
  997: #endif
  998: 					TAILQ_REMOVE(&r->root_signal, task, task_node);
  999: #ifdef HAVE_LIBPTHREAD
 1000: 					pthread_mutex_unlock(&r->root_mtx[taskSIGNAL]);
 1001: #endif
 1002: 					task->task_type = taskREADY;
 1003: #ifdef HAVE_LIBPTHREAD
 1004: 					pthread_mutex_lock(&r->root_mtx[taskREADY]);
 1005: #endif
 1006: 					TAILQ_INSERT_TAIL(&r->root_ready, task, task_node);
 1007: #ifdef HAVE_LIBPTHREAD
 1008: 					pthread_mutex_unlock(&r->root_mtx[taskREADY]);
 1009: #endif
 1010: 				}
 1011: 				/* if match at least 2, don't remove resouce of event */
 1012: 				if (flg > 1)
 1013: 					evt->flags ^= evt->flags;
 1014: 				break;
 1015: #ifdef AIO_SUPPORT
 1016: 			case EVFILT_AIO:
 1017: 				flg = 0;
 1018: 				TAILQ_FOREACH_SAFE(task, &r->root_aio, task_node, tmp) {
 1019: 					acb = (struct aiocb*) TASK_VAL(task);
 1020: 					if (acb != ((struct aiocb*) res[i].udata))
 1021: 						continue;
 1022: 					else {
 1023: 						flg++;
 1024: 						TASK_RET(task) = res[i].data;
 1025: 						TASK_FLAG(task) = (u_long) res[i].fflags;
 1026: 					}
 1027: 					/* remove user handle */
 1028: #ifdef HAVE_LIBPTHREAD
 1029: 					pthread_mutex_lock(&r->root_mtx[taskAIO]);
 1030: #endif
 1031: 					TAILQ_REMOVE(&r->root_aio, task, task_node);
 1032: #ifdef HAVE_LIBPTHREAD
 1033: 					pthread_mutex_unlock(&r->root_mtx[taskAIO]);
 1034: #endif
 1035: 					task->task_type = taskREADY;
 1036: #ifdef HAVE_LIBPTHREAD
 1037: 					pthread_mutex_lock(&r->root_mtx[taskREADY]);
 1038: #endif
 1039: 					TAILQ_INSERT_TAIL(&r->root_ready, task, task_node);
 1040: #ifdef HAVE_LIBPTHREAD
 1041: 					pthread_mutex_unlock(&r->root_mtx[taskREADY]);
 1042: #endif
 1043: 					fd = acb->aio_fildes;
 1044: 					if ((len = aio_return(acb)) != -1) {
 1045: 						if (lseek(fd, acb->aio_offset + len, SEEK_CUR) == -1)
 1046: 							LOGERR;
 1047: 					} else
 1048: 						LOGERR;
 1049: 					free(acb);
 1050: 					TASK_DATLEN(task) = (u_long) len;
 1051: 					TASK_FD(task) = fd;
 1052: 				}
 1053: 				/* if match at least 2, don't remove resouce of event */
 1054: 				if (flg > 1)
 1055: 					evt->flags ^= evt->flags;
 1056: 				break;
 1057: #ifdef EVFILT_LIO
 1058: 			case EVFILT_LIO:
 1059: 				flg = 0;
 1060: 				TAILQ_FOREACH_SAFE(task, &r->root_lio, task_node, tmp) {
 1061: 					acbs = (struct aiocb**) TASK_VAL(task);
 1062: 					if (acbs != ((struct aiocb**) res[i].udata))
 1063: 						continue;
 1064: 					else {
 1065: 						flg++;
 1066: 						TASK_RET(task) = res[i].data;
 1067: 						TASK_FLAG(task) = (u_long) res[i].fflags;
 1068: 					}
 1069: 					/* remove user handle */
 1070: #ifdef HAVE_LIBPTHREAD
 1071: 					pthread_mutex_lock(&r->root_mtx[taskLIO]);
 1072: #endif
 1073: 					TAILQ_REMOVE(&r->root_lio, task, task_node);
 1074: #ifdef HAVE_LIBPTHREAD
 1075: 					pthread_mutex_unlock(&r->root_mtx[taskLIO]);
 1076: #endif
 1077: 					task->task_type = taskREADY;
 1078: #ifdef HAVE_LIBPTHREAD
 1079: 					pthread_mutex_lock(&r->root_mtx[taskREADY]);
 1080: #endif
 1081: 					TAILQ_INSERT_TAIL(&r->root_ready, task, task_node);
 1082: #ifdef HAVE_LIBPTHREAD
 1083: 					pthread_mutex_unlock(&r->root_mtx[taskREADY]);
 1084: #endif
 1085: 					iv = (struct iovec*) TASK_DATA(task);
 1086: 					fd = acbs[0]->aio_fildes;
 1087: 					off = acbs[0]->aio_offset;
 1088: 					for (j = len = 0; i < TASK_DATLEN(task); len += l, i++) {
 1089: 						if ((iv[i].iov_len = aio_return(acbs[i])) == -1)
 1090: 							l = 0;
 1091: 						else
 1092: 							l = iv[i].iov_len;
 1093: 						free(acbs[i]);
 1094: 					}
 1095: 					free(acbs);
 1096: 					TASK_DATLEN(task) = (u_long) len;
 1097: 					TASK_FD(task) = fd;
 1098: 
 1099: 					if (lseek(fd, off + len, SEEK_CUR) == -1)
 1100: 						LOGERR;
 1101: 				}
 1102: 				/* if match at least 2, don't remove resouce of event */
 1103: 				if (flg > 1)
 1104: 					evt->flags ^= evt->flags;
 1105: 				break;
 1106: #endif	/* EVFILT_LIO */
 1107: #endif	/* AIO_SUPPORT */
 1108: #ifdef EVFILT_USER
 1109: 			case EVFILT_USER:
 1110: 				flg = 0;
 1111: 				TAILQ_FOREACH_SAFE(task, &r->root_user, task_node, tmp) {
 1112: 					if (TASK_VAL(task) != ((uintptr_t) res[i].udata))
 1113: 						continue;
 1114: 					else {
 1115: 						flg++;
 1116: 						TASK_RET(task) = res[i].data;
 1117: 						TASK_FLAG(task) = (u_long) res[i].fflags;
 1118: 					}
 1119: 					/* remove user handle */
 1120: #ifdef HAVE_LIBPTHREAD
 1121: 					pthread_mutex_lock(&r->root_mtx[taskUSER]);
 1122: #endif
 1123: 					TAILQ_REMOVE(&r->root_user, task, task_node);
 1124: #ifdef HAVE_LIBPTHREAD
 1125: 					pthread_mutex_unlock(&r->root_mtx[taskUSER]);
 1126: #endif
 1127: 					task->task_type = taskREADY;
 1128: #ifdef HAVE_LIBPTHREAD
 1129: 					pthread_mutex_lock(&r->root_mtx[taskREADY]);
 1130: #endif
 1131: 					TAILQ_INSERT_TAIL(&r->root_ready, task, task_node);
 1132: #ifdef HAVE_LIBPTHREAD
 1133: 					pthread_mutex_unlock(&r->root_mtx[taskREADY]);
 1134: #endif
 1135: 				}
 1136: 				/* if match at least 2, don't remove resouce of event */
 1137: 				if (flg > 1)
 1138: 					evt->flags ^= evt->flags;
 1139: 				break;
 1140: #endif	/* EVFILT_USER */
 1141: 		}
 1142: 		if (kevent(r->root_kq, evt, 1, NULL, 0, &now) == -1) {
 1143: 			if (r->root_hooks.hook_exec.exception) {
 1144: 				if (r->root_hooks.hook_exec.exception(r, NULL))
 1145: 					return NULL;
 1146: 			} else
 1147: 				LOGERR;
 1148: 		}
 1149: 	}
 1150: #else	/* end of kevent dispatcher */
 1151: 	for (i = 0; i < r->root_kq; i++) {
 1152: 		if (FD_ISSET(i, &rfd) || FD_ISSET(i, &xfd)) {
 1153: 			flg = 0;
 1154: 			TAILQ_FOREACH_SAFE(task, &r->root_read, task_node, tmp) {
 1155: 				if (TASK_FD(task) != i)
 1156: 					continue;
 1157: 				else {
 1158: 					flg++;
 1159: 					TASK_FLAG(task) = ioctl(TASK_FD(task), 
 1160: 							FIONREAD, &TASK_RET(task));
 1161: 				}
 1162: 				/* remove read handle */
 1163: #ifdef HAVE_LIBPTHREAD
 1164: 				pthread_mutex_lock(&r->root_mtx[taskREAD]);
 1165: #endif
 1166: 				TAILQ_REMOVE(&r->root_read, task, task_node);
 1167: #ifdef HAVE_LIBPTHREAD
 1168: 				pthread_mutex_unlock(&r->root_mtx[taskREAD]);
 1169: #endif
 1170: 				if (r->root_hooks.hook_exec.exception) {
 1171:  					if (r->root_hooks.hook_exec.exception(r, NULL)) {
 1172: 						task->task_type = taskUNUSE;
 1173: #ifdef HAVE_LIBPTHREAD
 1174: 						pthread_mutex_lock(&r->root_mtx[taskUNUSE]);
 1175: #endif
 1176: 						TAILQ_INSERT_TAIL(&r->root_unuse, task, task_node);
 1177: #ifdef HAVE_LIBPTHREAD
 1178: 						pthread_mutex_unlock(&r->root_mtx[taskUNUSE]);
 1179: #endif
 1180: 					} else {
 1181: 						task->task_type = taskREADY;
 1182: #ifdef HAVE_LIBPTHREAD
 1183: 						pthread_mutex_lock(&r->root_mtx[taskREADY]);
 1184: #endif
 1185: 						TAILQ_INSERT_TAIL(&r->root_ready, task, task_node);
 1186: #ifdef HAVE_LIBPTHREAD
 1187: 						pthread_mutex_unlock(&r->root_mtx[taskREADY]);
 1188: #endif
 1189: 					}
 1190: 				} else {
 1191: 					task->task_type = taskREADY;
 1192: #ifdef HAVE_LIBPTHREAD
 1193: 					pthread_mutex_lock(&r->root_mtx[taskREADY]);
 1194: #endif
 1195: 					TAILQ_INSERT_TAIL(&r->root_ready, task, task_node);
 1196: #ifdef HAVE_LIBPTHREAD
 1197: 					pthread_mutex_unlock(&r->root_mtx[taskREADY]);
 1198: #endif
 1199: 				}
 1200: 			}
 1201: 			/* if match equal to 1, remove resouce */
 1202: 			if (flg == 1)
 1203: 				FD_CLR(i, &r->root_fds[0]);
 1204: 		}
 1205: 
 1206: 		if (FD_ISSET(i, &wfd)) {
 1207: 			flg = 0;
 1208: 			TAILQ_FOREACH_SAFE(task, &r->root_write, task_node, tmp) {
 1209: 				if (TASK_FD(task) != i)
 1210: 					continue;
 1211: 				else {
 1212: 					flg++;
 1213: 					TASK_FLAG(task) = ioctl(TASK_FD(task), 
 1214: 							FIONWRITE, &TASK_RET(task));
 1215: 				}
 1216: 				/* remove write handle */
 1217: #ifdef HAVE_LIBPTHREAD
 1218: 				pthread_mutex_lock(&r->root_mtx[taskWRITE]);
 1219: #endif
 1220: 				TAILQ_REMOVE(&r->root_write, task, task_node);
 1221: #ifdef HAVE_LIBPTHREAD
 1222: 				pthread_mutex_unlock(&r->root_mtx[taskWRITE]);
 1223: #endif
 1224: 				if (r->root_hooks.hook_exec.exception) {
 1225:  					if (r->root_hooks.hook_exec.exception(r, NULL)) {
 1226: 						task->task_type = taskUNUSE;
 1227: #ifdef HAVE_LIBPTHREAD
 1228: 						pthread_mutex_lock(&r->root_mtx[taskUNUSE]);
 1229: #endif
 1230: 						TAILQ_INSERT_TAIL(&r->root_unuse, task, task_node);
 1231: #ifdef HAVE_LIBPTHREAD
 1232: 						pthread_mutex_unlock(&r->root_mtx[taskUNUSE]);
 1233: #endif
 1234: 					} else {
 1235: 						task->task_type = taskREADY;
 1236: #ifdef HAVE_LIBPTHREAD
 1237: 						pthread_mutex_lock(&r->root_mtx[taskREADY]);
 1238: #endif
 1239: 						TAILQ_INSERT_TAIL(&r->root_ready, task, task_node);
 1240: #ifdef HAVE_LIBPTHREAD
 1241: 						pthread_mutex_unlock(&r->root_mtx[taskREADY]);
 1242: #endif
 1243: 					}
 1244: 				} else {
 1245: 					task->task_type = taskREADY;
 1246: #ifdef HAVE_LIBPTHREAD
 1247: 					pthread_mutex_lock(&r->root_mtx[taskREADY]);
 1248: #endif
 1249: 					TAILQ_INSERT_TAIL(&r->root_ready, task, task_node);
 1250: #ifdef HAVE_LIBPTHREAD
 1251: 					pthread_mutex_unlock(&r->root_mtx[taskREADY]);
 1252: #endif
 1253: 				}
 1254: 			}
 1255: 			/* if match equal to 1, remove resouce */
 1256: 			if (flg == 1)
 1257: 				FD_CLR(i, &r->root_fds[1]);
 1258: 		}
 1259: 	}
 1260: 
 1261: 	/* optimize select */
 1262: 	for (i = r->root_kq - 1; i > 2; i--)
 1263: 		if (FD_ISSET(i, &r->root_fds[0]) || FD_ISSET(i, &r->root_fds[1]))
 1264: 			break;
 1265: 	if (i > 2)
 1266: 		r->root_kq = i + 1;
 1267: #endif	/* KQ_SUPPORT */
 1268: 
 1269: skip_event:
 1270: 	/* timer update & put in ready queue */
 1271: 	clock_gettime(CLOCK_MONOTONIC, &now);
 1272: 
 1273: 	TAILQ_FOREACH_SAFE(task, &r->root_timer, task_node, tmp)
 1274: 		if (sched_timespeccmp(&now, &TASK_TS(task), -) >= 0) {
 1275: #ifdef HAVE_LIBPTHREAD
 1276: 			pthread_mutex_lock(&r->root_mtx[taskTIMER]);
 1277: #endif
 1278: 			TAILQ_REMOVE(&r->root_timer, task, task_node);
 1279: #ifdef HAVE_LIBPTHREAD
 1280: 			pthread_mutex_unlock(&r->root_mtx[taskTIMER]);
 1281: #endif
 1282: 			task->task_type = taskREADY;
 1283: #ifdef HAVE_LIBPTHREAD
 1284: 			pthread_mutex_lock(&r->root_mtx[taskREADY]);
 1285: #endif
 1286: 			TAILQ_INSERT_TAIL(&r->root_ready, task, task_node);
 1287: #ifdef HAVE_LIBPTHREAD
 1288: 			pthread_mutex_unlock(&r->root_mtx[taskREADY]);
 1289: #endif
 1290: 		}
 1291: 
 1292: 	/* put regular task priority task to ready queue, 
 1293: 		if there is no ready task or reach max missing hit for regular task */
 1294: 	if ((task = TAILQ_FIRST(&r->root_task))) {
 1295: 		if (!TAILQ_FIRST(&r->root_ready) || r->root_miss >= TASK_VAL(task)) {
 1296: 			r->root_miss ^= r->root_miss;
 1297: 
 1298: #ifdef HAVE_LIBPTHREAD
 1299: 			pthread_mutex_lock(&r->root_mtx[taskTASK]);
 1300: #endif
 1301: 			TAILQ_REMOVE(&r->root_task, task, task_node);
 1302: #ifdef HAVE_LIBPTHREAD
 1303: 			pthread_mutex_unlock(&r->root_mtx[taskTASK]);
 1304: #endif
 1305: 			task->task_type = taskREADY;
 1306: #ifdef HAVE_LIBPTHREAD
 1307: 			pthread_mutex_lock(&r->root_mtx[taskREADY]);
 1308: #endif
 1309: 			TAILQ_INSERT_TAIL(&r->root_ready, task, task_node);
 1310: #ifdef HAVE_LIBPTHREAD
 1311: 			pthread_mutex_unlock(&r->root_mtx[taskREADY]);
 1312: #endif
 1313: 		} else
 1314: 			r->root_miss++;
 1315: 	} else
 1316: 		r->root_miss ^= r->root_miss;
 1317: 
 1318: 	/* OK, lets get ready task !!! */
 1319: 	task = TAILQ_FIRST(&r->root_ready);
 1320: 	if (!(task))
 1321: 		return NULL;
 1322: 
 1323: #ifdef HAVE_LIBPTHREAD
 1324: 	pthread_mutex_lock(&r->root_mtx[taskREADY]);
 1325: #endif
 1326: 	TAILQ_REMOVE(&r->root_ready, task, task_node);
 1327: #ifdef HAVE_LIBPTHREAD
 1328: 	pthread_mutex_unlock(&r->root_mtx[taskREADY]);
 1329: #endif
 1330: 	task->task_type = taskUNUSE;
 1331: #ifdef HAVE_LIBPTHREAD
 1332: 	pthread_mutex_lock(&r->root_mtx[taskUNUSE]);
 1333: #endif
 1334: 	TAILQ_INSERT_TAIL(&r->root_unuse, task, task_node);
 1335: #ifdef HAVE_LIBPTHREAD
 1336: 	pthread_mutex_unlock(&r->root_mtx[taskUNUSE]);
 1337: #endif
 1338: 	return task;
 1339: }
 1340: 
 1341: /*
 1342:  * sched_hook_exception() - Default EXCEPTION hook
 1343:  *
 1344:  * @root = root task
 1345:  * @arg = custom handling: if arg == EV_EOF or other value; default: arg == NULL log errno
 1346:  * return: <0 errors and 0 ok
 1347:  */
 1348: void *
 1349: sched_hook_exception(void *root, void *arg)
 1350: {
 1351: 	sched_root_task_t *r = root;
 1352: 
 1353: 	if (!r)
 1354: 		return NULL;
 1355: 
 1356: 	/* custom exception handling ... */
 1357: 	if (arg) {
 1358: 		if (arg == (void*) EV_EOF)
 1359: 			return NULL;
 1360: 		return (void*) -1;	/* raise scheduler error!!! */
 1361: 	}
 1362: 
 1363: 	/* if error hook exists */
 1364: 	if (r->root_hooks.hook_root.error)
 1365: 		return (r->root_hooks.hook_root.error(root, (void*) ((intptr_t) errno)));
 1366: 
 1367: 	/* default case! */
 1368: 	LOGERR;
 1369: 	return NULL;
 1370: }
 1371: 
 1372: /*
 1373:  * sched_hook_condition() - Default CONDITION hook
 1374:  *
 1375:  * @root = root task
 1376:  * @arg = killState from schedRun()
 1377:  * return: NULL kill scheduler loop or !=NULL ok
 1378:  */
 1379: void *
 1380: sched_hook_condition(void *root, void *arg)
 1381: {
 1382: 	sched_root_task_t *r = root;
 1383: 
 1384: 	if (!r)
 1385: 		return NULL;
 1386: 
 1387: 	return (void*) (r->root_cond - *(intptr_t*) arg);
 1388: }
 1389: 
 1390: /*
 1391:  * sched_hook_rtc() - Default RTC hook
 1392:  *
 1393:  * @task = current task
 1394:  * @arg = unused
 1395:  * return: <0 errors and 0 ok
 1396:  */
 1397: #if defined(HAVE_TIMER_CREATE) && defined(HAVE_TIMER_SETTIME) && defined(HAVE_TIMER_DELETE)
 1398: void *
 1399: sched_hook_rtc(void *task, void *arg __unused)
 1400: {
 1401: 	sched_task_t *sigt = NULL, *t = task;
 1402: 	struct itimerspec its;
 1403: 	struct sigevent evt;
 1404: 	timer_t tmr;
 1405: 
 1406: 	if (!t || !TASK_ROOT(t))
 1407: 		return (void*) -1;
 1408: 
 1409: 	memset(&evt, 0, sizeof evt);
 1410: 	evt.sigev_notify = SIGEV_SIGNAL;
 1411: 	evt.sigev_signo = (intptr_t) TASK_DATA(t) + SIGRTMIN;
 1412: 	evt.sigev_value.sival_ptr = TASK_DATA(t);
 1413: 
 1414: 	if (timer_create(CLOCK_MONOTONIC, &evt, &tmr) == -1) {
 1415: 		if (TASK_ROOT(t)->root_hooks.hook_exec.exception)
 1416: 			TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL);
 1417: 		else
 1418: 			LOGERR;
 1419: 		return (void*) -1;
 1420: 	} else
 1421: 		TASK_FLAG(t) = (u_long) tmr;
 1422: 
 1423: 	if (!(sigt = schedSignal(TASK_ROOT(t), _sched_rtcWrapper, TASK_ARG(t), evt.sigev_signo, 
 1424: 				t, (size_t) tmr))) {
 1425: 		if (TASK_ROOT(t)->root_hooks.hook_exec.exception)
 1426: 			TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL);
 1427: 		else
 1428: 			LOGERR;
 1429: 		timer_delete(tmr);
 1430: 		return (void*) -1;
 1431: 	} else
 1432: 		TASK_RET(t) = (uintptr_t) sigt;
 1433: 
 1434: 	memset(&its, 0, sizeof its);
 1435: 	its.it_value.tv_sec = t->task_val.ts.tv_sec;
 1436: 	its.it_value.tv_nsec = t->task_val.ts.tv_nsec;
 1437: 
 1438: 	if (timer_settime(tmr, TIMER_RELTIME, &its, NULL) == -1) {
 1439: 		if (TASK_ROOT(t)->root_hooks.hook_exec.exception)
 1440: 			TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL);
 1441: 		else
 1442: 			LOGERR;
 1443: 		schedCancel(sigt);
 1444: 		timer_delete(tmr);
 1445: 		return (void*) -1;
 1446: 	}
 1447: 
 1448: 	return NULL;
 1449: }
 1450: #endif	/* HAVE_TIMER_CREATE */

FreeBSD-CVSweb <freebsd-cvsweb@FreeBSD.org>