File:  [ELWIX - Embedded LightWeight unIX -] / libaitsched / src / hooks.c
Revision 1.18.4.1: download - view: text, annotated - select for diffs - revision graph
Thu Aug 15 14:53:49 2013 UTC (10 years, 10 months ago) by misho
Branches: sched3_8
Diff to: branchpoint 1.18: preferred, unified
new ver 3.8
change kevent flags

    1: /*************************************************************************
    2: * (C) 2011 AITNET ltd - Sofia/Bulgaria - <misho@aitbg.com>
    3: *  by Michael Pounov <misho@openbsd-bg.org>
    4: *
    5: * $Author: misho $
    6: * $Id: hooks.c,v 1.18.4.1 2013/08/15 14:53:49 misho Exp $
    7: *
    8: **************************************************************************
    9: The ELWIX and AITNET software is distributed under the following
   10: terms:
   11: 
   12: All of the documentation and software included in the ELWIX and AITNET
   13: Releases is copyrighted by ELWIX - Sofia/Bulgaria <info@elwix.org>
   14: 
   15: Copyright 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013
   16: 	by Michael Pounov <misho@elwix.org>.  All rights reserved.
   17: 
   18: Redistribution and use in source and binary forms, with or without
   19: modification, are permitted provided that the following conditions
   20: are met:
   21: 1. Redistributions of source code must retain the above copyright
   22:    notice, this list of conditions and the following disclaimer.
   23: 2. Redistributions in binary form must reproduce the above copyright
   24:    notice, this list of conditions and the following disclaimer in the
   25:    documentation and/or other materials provided with the distribution.
   26: 3. All advertising materials mentioning features or use of this software
   27:    must display the following acknowledgement:
   28: This product includes software developed by Michael Pounov <misho@elwix.org>
   29: ELWIX - Embedded LightWeight unIX and its contributors.
   30: 4. Neither the name of AITNET nor the names of its contributors
   31:    may be used to endorse or promote products derived from this software
   32:    without specific prior written permission.
   33: 
   34: THIS SOFTWARE IS PROVIDED BY AITNET AND CONTRIBUTORS ``AS IS'' AND
   35: ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   36: IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   37: ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
   38: FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   39: DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   40: OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   41: HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   42: LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   43: OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   44: SUCH DAMAGE.
   45: */
   46: #include "global.h"
   47: #include "hooks.h"
   48: 
   49: 
   50: /*
   51:  * sched_hook_init() - Default INIT hook
   52:  *
   53:  * @root = root task
   54:  * @arg = unused
   55:  * return: <0 errors and 0 ok
   56:  */
   57: void *
   58: sched_hook_init(void *root, void *arg __unused)
   59: {
   60: 	sched_root_task_t *r = root;
   61: 
   62: 	if (!r)
   63: 		return (void*) -1;
   64: 
   65: 	r->root_kq = kqueue();
   66: 	if (r->root_kq == -1) {
   67: 		LOGERR;
   68: 		return (void*) -1;
   69: 	}
   70: 
   71: 	return NULL;
   72: }
   73: 
   74: /*
   75:  * sched_hook_fini() - Default FINI hook
   76:  *
   77:  * @root = root task
   78:  * @arg = unused
   79:  * return: <0 errors and 0 ok
   80:  */
   81: void *
   82: sched_hook_fini(void *root, void *arg __unused)
   83: {
   84: 	sched_root_task_t *r = root;
   85: 
   86: 	if (!r)
   87: 		return (void*) -1;
   88: 
   89: 	if (r->root_kq > 2) {
   90: 		close(r->root_kq);
   91: 		r->root_kq = 0;
   92: 	}
   93: 
   94: 	return NULL;
   95: }
   96: 
   97: /*
   98:  * sched_hook_cancel() - Default CANCEL hook
   99:  *
  100:  * @task = current task
  101:  * @arg = unused
  102:  * return: <0 errors and 0 ok
  103:  */
  104: void *
  105: sched_hook_cancel(void *task, void *arg __unused)
  106: {
  107: 	sched_task_t *t = task;
  108: 	struct kevent chg[1];
  109: 	struct timespec timeout = { 0, 0 };
  110: #ifdef AIO_SUPPORT
  111: 	struct aiocb *acb;
  112: #ifdef EVFILT_LIO
  113: 	register int i = 0;
  114: 	struct aiocb **acbs;
  115: #endif	/* EVFILT_LIO */
  116: #endif	/* AIO_SUPPORT */
  117: 
  118: 	if (!t || !TASK_ROOT(t))
  119: 		return (void*) -1;
  120: 
  121: 	switch (TASK_TYPE(t)) {
  122: 		case taskREAD:
  123: #ifdef __NetBSD__
  124: 			EV_SET(&chg[0], TASK_FD(t), EVFILT_READ, EV_DELETE, 0, 0, (intptr_t) TASK_FD(t));
  125: #else
  126: 			EV_SET(&chg[0], TASK_FD(t), EVFILT_READ, EV_DELETE, 0, 0, (void*) TASK_FD(t));
  127: #endif
  128: 			break;
  129: 		case taskWRITE:
  130: #ifdef __NetBSD__
  131: 			EV_SET(&chg[0], TASK_FD(t), EVFILT_WRITE, EV_DELETE, 0, 0, (intptr_t) TASK_FD(t));
  132: #else
  133: 			EV_SET(&chg[0], TASK_FD(t), EVFILT_WRITE, EV_DELETE, 0, 0, (void*) TASK_FD(t));
  134: #endif
  135: 			break;
  136: 		case taskALARM:
  137: #ifdef __NetBSD__
  138: 			EV_SET(&chg[0], (uintptr_t) TASK_DATA(t), EVFILT_TIMER, EV_DELETE, 
  139: 					0, 0, (intptr_t) TASK_DATA(t));
  140: #else
  141: 			EV_SET(&chg[0], (uintptr_t) TASK_DATA(t), EVFILT_TIMER, EV_DELETE, 
  142: 					0, 0, (void*) TASK_DATA(t));
  143: #endif
  144: 			break;
  145: 		case taskNODE:
  146: #ifdef __NetBSD__
  147: 			EV_SET(&chg[0], TASK_FD(t), EVFILT_VNODE, EV_DELETE, 0, 0, (intptr_t) TASK_FD(t));
  148: #else
  149: 			EV_SET(&chg[0], TASK_FD(t), EVFILT_VNODE, EV_DELETE, 0, 0, (void*) TASK_FD(t));
  150: #endif
  151: 			break;
  152: 		case taskPROC:
  153: #ifdef __NetBSD__
  154: 			EV_SET(&chg[0], TASK_VAL(t), EVFILT_PROC, EV_DELETE, 0, 0, (intptr_t) TASK_VAL(t));
  155: #else
  156: 			EV_SET(&chg[0], TASK_VAL(t), EVFILT_PROC, EV_DELETE, 0, 0, (void*) TASK_VAL(t));
  157: #endif
  158: 			break;
  159: 		case taskSIGNAL:
  160: #ifdef __NetBSD__
  161: 			EV_SET(&chg[0], TASK_VAL(t), EVFILT_SIGNAL, EV_DELETE, 0, 0, (intptr_t) TASK_VAL(t));
  162: #else
  163: 			EV_SET(&chg[0], TASK_VAL(t), EVFILT_SIGNAL, EV_DELETE, 0, 0, (void*) TASK_VAL(t));
  164: #endif
  165: 			/* restore signal */
  166: 			signal(TASK_VAL(t), SIG_DFL);
  167: 			break;
  168: #ifdef AIO_SUPPORT
  169: 		case taskAIO:
  170: #ifdef __NetBSD__
  171: 			EV_SET(&chg[0], TASK_VAL(t), EVFILT_AIO, EV_DELETE, 0, 0, (intptr_t) TASK_VAL(t));
  172: #else
  173: 			EV_SET(&chg[0], TASK_VAL(t), EVFILT_AIO, EV_DELETE, 0, 0, (void*) TASK_VAL(t));
  174: #endif
  175: 			acb = (struct aiocb*) TASK_VAL(t);
  176: 			if (acb) {
  177: 				if (aio_cancel(acb->aio_fildes, acb) == AIO_CANCELED)
  178: 					aio_return(acb);
  179: 				free(acb);
  180: 				TASK_VAL(t) = 0;
  181: 			}
  182: 			break;
  183: #ifdef EVFILT_LIO
  184: 		case taskLIO:
  185: #ifdef __NetBSD__
  186: 			EV_SET(&chg[0], TASK_VAL(t), EVFILT_LIO, EV_DELETE, 0, 0, (intptr_t) TASK_VAL(t));
  187: #else
  188: 			EV_SET(&chg[0], TASK_VAL(t), EVFILT_LIO, EV_DELETE, 0, 0, (void*) TASK_VAL(t));
  189: #endif
  190: 			acbs = (struct aiocb**) TASK_VAL(t);
  191: 			if (acbs) {
  192: 				for (i = 0; i < TASK_DATLEN(t); i++) {
  193: 					if (aio_cancel(acbs[i]->aio_fildes, acbs[i]) == AIO_CANCELED)
  194: 						aio_return(acbs[i]);
  195: 					free(acbs[i]);
  196: 				}
  197: 				free(acbs);
  198: 				TASK_VAL(t) = 0;
  199: 			}
  200: 			break;
  201: #endif	/* EVFILT_LIO */
  202: #endif	/* AIO_SUPPORT */
  203: #ifdef EVFILT_USER
  204: 		case taskUSER:
  205: #ifdef __NetBSD__
  206: 			EV_SET(&chg[0], TASK_VAL(t), EVFILT_USER, EV_DELETE, 0, 0, (intptr_t) TASK_VAL(t));
  207: #else
  208: 			EV_SET(&chg[0], TASK_VAL(t), EVFILT_USER, EV_DELETE, 0, 0, (void*) TASK_VAL(t));
  209: #endif
  210: 			break;
  211: #endif
  212: 		case taskTHREAD:
  213: #ifdef HAVE_LIBPTHREAD
  214: 			pthread_cancel((pthread_t) TASK_VAL(t));
  215: #endif
  216: 		default:
  217: 			return NULL;
  218: 	}
  219: 
  220: 	kevent(TASK_ROOT(t)->root_kq, chg, 1, NULL, 0, &timeout);
  221: 	return NULL;
  222: }
  223: 
  224: #ifdef HAVE_LIBPTHREAD
  225: /*
  226:  * sched_hook_thread() - Default THREAD hook
  227:  *
  228:  * @task = current task
  229:  * @arg = pthread attributes
  230:  * return: <0 errors and 0 ok
  231:  */
  232: void *
  233: sched_hook_thread(void *task, void *arg)
  234: {
  235: 	sched_task_t *t = task;
  236: 	pthread_t tid;
  237: 	sigset_t s, o;
  238: 
  239: 	if (!t || !TASK_ROOT(t))
  240: 		return (void*) -1;
  241: 
  242: 	sigfillset(&s);
  243: 	pthread_sigmask(SIG_BLOCK, &s, &o);
  244: 	if ((errno = pthread_create(&tid, (pthread_attr_t*) arg, 
  245: 				(void *(*)(void*)) _sched_threadWrapper, t))) {
  246: 		LOGERR;
  247: 		pthread_sigmask(SIG_SETMASK, &o, NULL);
  248: 		return (void*) -1;
  249: 	} else
  250: 		TASK_VAL(t) = (u_long) tid;
  251: 
  252: 	if (!TASK_ISLOCKED(t))
  253: 		TASK_LOCK(t);
  254: 
  255: 	pthread_sigmask(SIG_SETMASK, &o, NULL);
  256: 	return NULL;
  257: }
  258: #endif
  259: 
  260: /*
  261:  * sched_hook_read() - Default READ hook
  262:  *
  263:  * @task = current task
  264:  * @arg = unused
  265:  * return: <0 errors and 0 ok
  266:  */
  267: void *
  268: sched_hook_read(void *task, void *arg __unused)
  269: {
  270: 	sched_task_t *t = task;
  271: 	struct kevent chg[1];
  272: 	struct timespec timeout = { 0, 0 };
  273: 
  274: 	if (!t || !TASK_ROOT(t))
  275: 		return (void*) -1;
  276: 
  277: #ifdef __NetBSD__
  278: 	EV_SET(&chg[0], TASK_FD(t), EVFILT_READ, EV_ADD | EV_CLEAR, 0, 0, (intptr_t) TASK_FD(t));
  279: #else
  280: 	EV_SET(&chg[0], TASK_FD(t), EVFILT_READ, EV_ADD | EV_CLEAR, 0, 0, (void*) TASK_FD(t));
  281: #endif
  282: 	if (kevent(TASK_ROOT(t)->root_kq, chg, 1, NULL, 0, &timeout) == -1) {
  283: 		if (TASK_ROOT(t)->root_hooks.hook_exec.exception)
  284: 			TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL);
  285: 		else
  286: 			LOGERR;
  287: 		return (void*) -1;
  288: 	}
  289: 
  290: 	return NULL;
  291: }
  292: 
  293: /*
  294:  * sched_hook_write() - Default WRITE hook
  295:  *
  296:  * @task = current task
  297:  * @arg = unused
  298:  * return: <0 errors and 0 ok
  299:  */
  300: void *
  301: sched_hook_write(void *task, void *arg __unused)
  302: {
  303: 	sched_task_t *t = task;
  304: 	struct kevent chg[1];
  305: 	struct timespec timeout = { 0, 0 };
  306: 
  307: 	if (!t || !TASK_ROOT(t))
  308: 		return (void*) -1;
  309: 
  310: #ifdef __NetBSD__
  311: 	EV_SET(&chg[0], TASK_FD(t), EVFILT_WRITE, EV_ADD | EV_CLEAR, 0, 0, (intptr_t) TASK_FD(t));
  312: #else
  313: 	EV_SET(&chg[0], TASK_FD(t), EVFILT_WRITE, EV_ADD | EV_CLEAR, 0, 0, (void*) TASK_FD(t));
  314: #endif
  315: 	if (kevent(TASK_ROOT(t)->root_kq, chg, 1, NULL, 0, &timeout) == -1) {
  316: 		if (TASK_ROOT(t)->root_hooks.hook_exec.exception)
  317: 			TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL);
  318: 		else
  319: 			LOGERR;
  320: 		return (void*) -1;
  321: 	}
  322: 
  323: 	return NULL;
  324: }
  325: 
  326: /*
  327:  * sched_hook_alarm() - Default ALARM hook
  328:  *
  329:  * @task = current task
  330:  * @arg = unused
  331:  * return: <0 errors and 0 ok
  332:  */
  333: void *
  334: sched_hook_alarm(void *task, void *arg __unused)
  335: {
  336: 	sched_task_t *t = task;
  337: 	struct kevent chg[1];
  338: 	struct timespec timeout = { 0, 0 };
  339: 
  340: 	if (!t || !TASK_ROOT(t))
  341: 		return (void*) -1;
  342: 
  343: #ifdef __NetBSD__
  344: 	EV_SET(&chg[0], (uintptr_t) TASK_DATA(t), EVFILT_TIMER, EV_ADD | EV_CLEAR, 0, 
  345: 			t->task_val.ts.tv_sec * 1000 + t->task_val.ts.tv_nsec / 1000000, 
  346: 			(intptr_t) TASK_DATA(t));
  347: #else
  348: 	EV_SET(&chg[0], (uintptr_t) TASK_DATA(t), EVFILT_TIMER, EV_ADD | EV_CLEAR, 0, 
  349: 			t->task_val.ts.tv_sec * 1000 + t->task_val.ts.tv_nsec / 1000000, 
  350: 			(void*) TASK_DATA(t));
  351: #endif
  352: 	if (kevent(TASK_ROOT(t)->root_kq, chg, 1, NULL, 0, &timeout) == -1) {
  353: 		if (TASK_ROOT(t)->root_hooks.hook_exec.exception)
  354: 			TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL);
  355: 		else
  356: 			LOGERR;
  357: 		return (void*) -1;
  358: 	}
  359: 
  360: 	return NULL;
  361: }
  362: 
  363: /*
  364:  * sched_hook_node() - Default NODE hook
  365:  *
  366:  * @task = current task
  367:  * @arg = unused
  368:  * return: <0 errors and 0 ok
  369:  */
  370: void *
  371: sched_hook_node(void *task, void *arg __unused)
  372: {
  373: 	sched_task_t *t = task;
  374: 	struct kevent chg[1];
  375: 	struct timespec timeout = { 0, 0 };
  376: 
  377: 	if (!t || !TASK_ROOT(t))
  378: 		return (void*) -1;
  379: 
  380: #ifdef __NetBSD__
  381: 	EV_SET(&chg[0], TASK_FD(t), EVFILT_VNODE, EV_ADD | EV_CLEAR, 
  382: 			NOTE_DELETE | NOTE_WRITE | NOTE_EXTEND | NOTE_ATTRIB | 
  383: 			NOTE_LINK | NOTE_RENAME | NOTE_REVOKE, 0, (intptr_t) TASK_FD(t));
  384: #else
  385: 	EV_SET(&chg[0], TASK_FD(t), EVFILT_VNODE, EV_ADD | EV_CLEAR, 
  386: 			NOTE_DELETE | NOTE_WRITE | NOTE_EXTEND | NOTE_ATTRIB | 
  387: 			NOTE_LINK | NOTE_RENAME | NOTE_REVOKE, 0, (void*) TASK_FD(t));
  388: #endif
  389: 	if (kevent(TASK_ROOT(t)->root_kq, chg, 1, NULL, 0, &timeout) == -1) {
  390: 		if (TASK_ROOT(t)->root_hooks.hook_exec.exception)
  391: 			TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL);
  392: 		else
  393: 			LOGERR;
  394: 		return (void*) -1;
  395: 	}
  396: 
  397: 	return NULL;
  398: }
  399: 
  400: /*
  401:  * sched_hook_proc() - Default PROC hook
  402:  *
  403:  * @task = current task
  404:  * @arg = unused
  405:  * return: <0 errors and 0 ok
  406:  */
  407: void *
  408: sched_hook_proc(void *task, void *arg __unused)
  409: {
  410: 	sched_task_t *t = task;
  411: 	struct kevent chg[1];
  412: 	struct timespec timeout = { 0, 0 };
  413: 
  414: 	if (!t || !TASK_ROOT(t))
  415: 		return (void*) -1;
  416: 
  417: #ifdef __NetBSD__
  418: 	EV_SET(&chg[0], TASK_VAL(t), EVFILT_PROC, EV_ADD | EV_CLEAR, 
  419: 			NOTE_EXIT | NOTE_FORK | NOTE_EXEC | NOTE_TRACK, 0, (intptr_t) TASK_VAL(t));
  420: #else
  421: 	EV_SET(&chg[0], TASK_VAL(t), EVFILT_PROC, EV_ADD | EV_CLEAR, 
  422: 			NOTE_EXIT | NOTE_FORK | NOTE_EXEC | NOTE_TRACK, 0, (void*) TASK_VAL(t));
  423: #endif
  424: 	if (kevent(TASK_ROOT(t)->root_kq, chg, 1, NULL, 0, &timeout) == -1) {
  425: 		if (TASK_ROOT(t)->root_hooks.hook_exec.exception)
  426: 			TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL);
  427: 		else
  428: 			LOGERR;
  429: 		return (void*) -1;
  430: 	}
  431: 
  432: 	return NULL;
  433: }
  434: 
  435: /*
  436:  * sched_hook_signal() - Default SIGNAL hook
  437:  *
  438:  * @task = current task
  439:  * @arg = unused
  440:  * return: <0 errors and 0 ok
  441:  */
  442: void *
  443: sched_hook_signal(void *task, void *arg __unused)
  444: {
  445: 	sched_task_t *t = task;
  446: 	struct kevent chg[1];
  447: 	struct timespec timeout = { 0, 0 };
  448: 
  449: 	if (!t || !TASK_ROOT(t))
  450: 		return (void*) -1;
  451: 
  452: 	/* ignore signal */
  453: 	signal(TASK_VAL(t), SIG_IGN);
  454: 
  455: #ifdef __NetBSD__
  456: 	EV_SET(&chg[0], TASK_VAL(t), EVFILT_SIGNAL, EV_ADD | EV_CLEAR, 0, 0, (intptr_t) TASK_VAL(t));
  457: #else
  458: 	EV_SET(&chg[0], TASK_VAL(t), EVFILT_SIGNAL, EV_ADD | EV_CLEAR, 0, 0, (void*) TASK_VAL(t));
  459: #endif
  460: 	if (kevent(TASK_ROOT(t)->root_kq, chg, 1, NULL, 0, &timeout) == -1) {
  461: 		if (TASK_ROOT(t)->root_hooks.hook_exec.exception)
  462: 			TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL);
  463: 		else
  464: 			LOGERR;
  465: 		return (void*) -1;
  466: 	}
  467: 
  468: 	return NULL;
  469: }
  470: 
  471: /*
  472:  * sched_hook_user() - Default USER hook
  473:  *
  474:  * @task = current task
  475:  * @arg = unused
  476:  * return: <0 errors and 0 ok
  477:  */
  478: #ifdef EVFILT_USER
  479: void *
  480: sched_hook_user(void *task, void *arg __unused)
  481: {
  482: 	sched_task_t *t = task;
  483: 	struct kevent chg[1];
  484: 	struct timespec timeout = { 0, 0 };
  485: 
  486: 	if (!t || !TASK_ROOT(t))
  487: 		return (void*) -1;
  488: 
  489: #ifdef __NetBSD__
  490: 	EV_SET(&chg[0], TASK_VAL(t), EVFILT_USER, EV_ADD | EV_CLEAR, TASK_DATLEN(t), 
  491: 			0, (intptr_t) TASK_VAL(t));
  492: #else
  493: 	EV_SET(&chg[0], TASK_VAL(t), EVFILT_USER, EV_ADD | EV_CLEAR, TASK_DATLEN(t), 
  494: 			0, (void*) TASK_VAL(t));
  495: #endif
  496: 	if (kevent(TASK_ROOT(t)->root_kq, chg, 1, NULL, 0, &timeout) == -1) {
  497: 		if (TASK_ROOT(t)->root_hooks.hook_exec.exception)
  498: 			TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL);
  499: 		else
  500: 			LOGERR;
  501: 		return (void*) -1;
  502: 	}
  503: 
  504: 	return NULL;
  505: }
  506: #endif
  507: 
  508: /*
  509:  * sched_hook_fetch() - Default FETCH hook
  510:  *
  511:  * @root = root task
  512:  * @arg = unused
  513:  * return: NULL error or !=NULL fetched task
  514:  */
  515: void *
  516: sched_hook_fetch(void *root, void *arg __unused)
  517: {
  518: 	sched_root_task_t *r = root;
  519: 	sched_task_t *task, *tmp;
  520: 	struct timespec now, m, mtmp;
  521: 	struct timespec *timeout;
  522: 	struct kevent evt[1], res[KQ_EVENTS];
  523: 	register int i, flg;
  524: 	int en;
  525: #ifdef AIO_SUPPORT
  526: 	int len, fd;
  527: 	struct aiocb *acb;
  528: #ifdef EVFILT_LIO
  529: 	int l;
  530: 	register int j;
  531: 	off_t off;
  532: 	struct aiocb **acbs;
  533: 	struct iovec *iv;
  534: #endif	/* EVFILT_LIO */
  535: #endif	/* AIO_SUPPORT */
  536: 
  537: 	if (!r)
  538: 		return NULL;
  539: 
  540: 	/* get new task by queue priority */
  541: 	while ((task = TAILQ_FIRST(&r->root_event))) {
  542: #ifdef HAVE_LIBPTHREAD
  543: 		pthread_mutex_lock(&r->root_mtx[taskEVENT]);
  544: #endif
  545: 		TAILQ_REMOVE(&r->root_event, task, task_node);
  546: #ifdef HAVE_LIBPTHREAD
  547: 		pthread_mutex_unlock(&r->root_mtx[taskEVENT]);
  548: #endif
  549: 		task->task_type = taskUNUSE;
  550: #ifdef HAVE_LIBPTHREAD
  551: 		pthread_mutex_lock(&r->root_mtx[taskUNUSE]);
  552: #endif
  553: 		TAILQ_INSERT_TAIL(&r->root_unuse, task, task_node);
  554: #ifdef HAVE_LIBPTHREAD
  555: 		pthread_mutex_unlock(&r->root_mtx[taskUNUSE]);
  556: #endif
  557: 		return task;
  558: 	}
  559: 	while ((task = TAILQ_FIRST(&r->root_ready))) {
  560: #ifdef HAVE_LIBPTHREAD
  561: 		pthread_mutex_lock(&r->root_mtx[taskREADY]);
  562: #endif
  563: 		TAILQ_REMOVE(&r->root_ready, task, task_node);
  564: #ifdef HAVE_LIBPTHREAD
  565: 		pthread_mutex_unlock(&r->root_mtx[taskREADY]);
  566: #endif
  567: 		task->task_type = taskUNUSE;
  568: #ifdef HAVE_LIBPTHREAD
  569: 		pthread_mutex_lock(&r->root_mtx[taskUNUSE]);
  570: #endif
  571: 		TAILQ_INSERT_TAIL(&r->root_unuse, task, task_node);
  572: #ifdef HAVE_LIBPTHREAD
  573: 		pthread_mutex_unlock(&r->root_mtx[taskUNUSE]);
  574: #endif
  575: 		return task;
  576: 	}
  577: 
  578: #ifdef TIMER_WITHOUT_SORT
  579: 	clock_gettime(CLOCK_MONOTONIC, &now);
  580: 
  581: 	sched_timespecclear(&r->root_wait);
  582: 	TAILQ_FOREACH(task, &r->root_timer, task_node) {
  583: 		if (!sched_timespecisset(&r->root_wait))
  584: 			r->root_wait = TASK_TS(task);
  585: 		else if (sched_timespeccmp(&TASK_TS(task), &r->root_wait, -) < 0)
  586: 			r->root_wait = TASK_TS(task);
  587: 	}
  588: 
  589: 	if (TAILQ_FIRST(&r->root_timer)) {
  590: 		m = r->root_wait;
  591: 		sched_timespecsub(&m, &now, &mtmp);
  592: 		r->root_wait = mtmp;
  593: 	} else {
  594: 		/* set wait INFTIM */
  595: 		sched_timespecinf(&r->root_wait);
  596: 	}
  597: #else
  598: 	if (!TAILQ_FIRST(&r->root_task) && (task = TAILQ_FIRST(&r->root_timer))) {
  599: 		clock_gettime(CLOCK_MONOTONIC, &now);
  600: 
  601: 		m = TASK_TS(task);
  602: 		sched_timespecsub(&m, &now, &mtmp);
  603: 		r->root_wait = mtmp;
  604: 	} else {
  605: 		/* set wait INFTIM */
  606: 		sched_timespecinf(&r->root_wait);
  607: 	}
  608: #endif
  609: 	/* if present member of task, set NOWAIT */
  610: 	if (TAILQ_FIRST(&r->root_task))
  611: 		sched_timespecclear(&r->root_wait);
  612: 
  613: 	if (r->root_wait.tv_sec != -1 && r->root_wait.tv_nsec != -1)
  614: 		timeout = &r->root_wait;
  615: 	else if (sched_timespecisinf(&r->root_poll))
  616: 		timeout = NULL;
  617: 	else
  618: 		timeout = &r->root_poll;
  619: 	if ((en = kevent(r->root_kq, NULL, 0, res, KQ_EVENTS, timeout)) == -1) {
  620: 		if (r->root_hooks.hook_exec.exception) {
  621: 			if (r->root_hooks.hook_exec.exception(r, NULL))
  622: 				return NULL;
  623: 		} else if (errno != EINTR)
  624: 			LOGERR;
  625: 		return NULL;
  626: 	}
  627: 
  628: 	now.tv_sec = now.tv_nsec = 0;
  629: 	/* Go and catch the cat into pipes ... */
  630: 	for (i = 0; i < en; i++) {
  631: 		memcpy(evt, &res[i], sizeof evt);
  632: 		evt->flags = EV_DELETE;
  633: 		/* Put read/write task to ready queue */
  634: 		switch (res[i].filter) {
  635: 			case EVFILT_READ:
  636: 				flg = 0;
  637: 				TAILQ_FOREACH_SAFE(task, &r->root_read, task_node, tmp) {
  638: 					if (TASK_FD(task) != ((intptr_t) res[i].udata))
  639: 						continue;
  640: 					else {
  641: 						flg++;
  642: 						TASK_RET(task) = res[i].data;
  643: 						TASK_FLAG(task) = res[i].fflags;
  644: 					}
  645: 					/* remove read handle */
  646: #ifdef HAVE_LIBPTHREAD
  647: 					pthread_mutex_lock(&r->root_mtx[taskREAD]);
  648: #endif
  649: 					TAILQ_REMOVE(&r->root_read, task, task_node);
  650: #ifdef HAVE_LIBPTHREAD
  651: 					pthread_mutex_unlock(&r->root_mtx[taskREAD]);
  652: #endif
  653: 					if (r->root_hooks.hook_exec.exception && res[i].flags & EV_EOF) {
  654:  						if (r->root_hooks.hook_exec.exception(r, (void*) EV_EOF)) {
  655: 							task->task_type = taskUNUSE;
  656: #ifdef HAVE_LIBPTHREAD
  657: 							pthread_mutex_lock(&r->root_mtx[taskUNUSE]);
  658: #endif
  659: 							TAILQ_INSERT_TAIL(&r->root_unuse, task, task_node);
  660: #ifdef HAVE_LIBPTHREAD
  661: 							pthread_mutex_unlock(&r->root_mtx[taskUNUSE]);
  662: #endif
  663: 						} else {
  664: 							task->task_type = taskREADY;
  665: #ifdef HAVE_LIBPTHREAD
  666: 							pthread_mutex_lock(&r->root_mtx[taskREADY]);
  667: #endif
  668: 							TAILQ_INSERT_TAIL(&r->root_ready, task, task_node);
  669: #ifdef HAVE_LIBPTHREAD
  670: 							pthread_mutex_unlock(&r->root_mtx[taskREADY]);
  671: #endif
  672: 						}
  673: 					} else {
  674: 						task->task_type = taskREADY;
  675: #ifdef HAVE_LIBPTHREAD
  676: 						pthread_mutex_lock(&r->root_mtx[taskREADY]);
  677: #endif
  678: 						TAILQ_INSERT_TAIL(&r->root_ready, task, task_node);
  679: #ifdef HAVE_LIBPTHREAD
  680: 						pthread_mutex_unlock(&r->root_mtx[taskREADY]);
  681: #endif
  682: 					}
  683: 				}
  684: 				/* if match at least 2, don't remove resouce of event */
  685: 				if (flg > 1)
  686: 					evt->flags ^= evt->flags;
  687: 				break;
  688: 			case EVFILT_WRITE:
  689: 				flg = 0;
  690: 				TAILQ_FOREACH_SAFE(task, &r->root_write, task_node, tmp) {
  691: 					if (TASK_FD(task) != ((intptr_t) res[i].udata))
  692: 						continue;
  693: 					else {
  694: 						flg++;
  695: 						TASK_RET(task) = res[i].data;
  696: 						TASK_FLAG(task) = res[i].fflags;
  697: 					}
  698: 					/* remove write handle */
  699: #ifdef HAVE_LIBPTHREAD
  700: 					pthread_mutex_lock(&r->root_mtx[taskWRITE]);
  701: #endif
  702: 					TAILQ_REMOVE(&r->root_write, task, task_node);
  703: #ifdef HAVE_LIBPTHREAD
  704: 					pthread_mutex_unlock(&r->root_mtx[taskWRITE]);
  705: #endif
  706: 					if (r->root_hooks.hook_exec.exception && res[i].flags & EV_EOF) {
  707:  						if (r->root_hooks.hook_exec.exception(r, (void*) EV_EOF)) {
  708: 							task->task_type = taskUNUSE;
  709: #ifdef HAVE_LIBPTHREAD
  710: 							pthread_mutex_lock(&r->root_mtx[taskUNUSE]);
  711: #endif
  712: 							TAILQ_INSERT_TAIL(&r->root_unuse, task, task_node);
  713: #ifdef HAVE_LIBPTHREAD
  714: 							pthread_mutex_unlock(&r->root_mtx[taskUNUSE]);
  715: #endif
  716: 						} else {
  717: 							task->task_type = taskREADY;
  718: #ifdef HAVE_LIBPTHREAD
  719: 							pthread_mutex_lock(&r->root_mtx[taskREADY]);
  720: #endif
  721: 							TAILQ_INSERT_TAIL(&r->root_ready, task, task_node);
  722: #ifdef HAVE_LIBPTHREAD
  723: 							pthread_mutex_unlock(&r->root_mtx[taskREADY]);
  724: #endif
  725: 						}
  726: 					} else {
  727: 						task->task_type = taskREADY;
  728: #ifdef HAVE_LIBPTHREAD
  729: 						pthread_mutex_lock(&r->root_mtx[taskREADY]);
  730: #endif
  731: 						TAILQ_INSERT_TAIL(&r->root_ready, task, task_node);
  732: #ifdef HAVE_LIBPTHREAD
  733: 						pthread_mutex_unlock(&r->root_mtx[taskREADY]);
  734: #endif
  735: 					}
  736: 				}
  737: 				/* if match at least 2, don't remove resouce of event */
  738: 				if (flg > 1)
  739: 					evt->flags ^= evt->flags;
  740: 				break;
  741: 			case EVFILT_TIMER:
  742: 				flg = 0;
  743: 				TAILQ_FOREACH_SAFE(task, &r->root_alarm, task_node, tmp) {
  744: 					if ((uintptr_t) TASK_DATA(task) != ((uintptr_t) res[i].udata))
  745: 						continue;
  746: 					else {
  747: 						flg++;
  748: 						TASK_RET(task) = res[i].data;
  749: 						TASK_FLAG(task) = res[i].fflags;
  750: 					}
  751: 					/* remove alarm handle */
  752: #ifdef HAVE_LIBPTHREAD
  753: 					pthread_mutex_lock(&r->root_mtx[taskALARM]);
  754: #endif
  755: 					TAILQ_REMOVE(&r->root_alarm, task, task_node);
  756: #ifdef HAVE_LIBPTHREAD
  757: 					pthread_mutex_unlock(&r->root_mtx[taskALARM]);
  758: #endif
  759: 					task->task_type = taskREADY;
  760: #ifdef HAVE_LIBPTHREAD
  761: 					pthread_mutex_lock(&r->root_mtx[taskREADY]);
  762: #endif
  763: 					TAILQ_INSERT_TAIL(&r->root_ready, task, task_node);
  764: #ifdef HAVE_LIBPTHREAD
  765: 					pthread_mutex_unlock(&r->root_mtx[taskREADY]);
  766: #endif
  767: 				}
  768: 				/* if match at least 2, don't remove resouce of event */
  769: 				if (flg > 1)
  770: 					evt->flags ^= evt->flags;
  771: 				break;
  772: 			case EVFILT_VNODE:
  773: 				flg = 0;
  774: 				TAILQ_FOREACH_SAFE(task, &r->root_node, task_node, tmp) {
  775: 					if (TASK_FD(task) != ((intptr_t) res[i].udata))
  776: 						continue;
  777: 					else {
  778: 						flg++;
  779: 						TASK_RET(task) = res[i].data;
  780: 						TASK_FLAG(task) = res[i].fflags;
  781: 					}
  782: 					/* remove node handle */
  783: #ifdef HAVE_LIBPTHREAD
  784: 					pthread_mutex_lock(&r->root_mtx[taskNODE]);
  785: #endif
  786: 					TAILQ_REMOVE(&r->root_node, task, task_node);
  787: #ifdef HAVE_LIBPTHREAD
  788: 					pthread_mutex_unlock(&r->root_mtx[taskNODE]);
  789: #endif
  790: 					task->task_type = taskREADY;
  791: #ifdef HAVE_LIBPTHREAD
  792: 					pthread_mutex_lock(&r->root_mtx[taskREADY]);
  793: #endif
  794: 					TAILQ_INSERT_TAIL(&r->root_ready, task, task_node);
  795: #ifdef HAVE_LIBPTHREAD
  796: 					pthread_mutex_unlock(&r->root_mtx[taskREADY]);
  797: #endif
  798: 				}
  799: 				/* if match at least 2, don't remove resouce of event */
  800: 				if (flg > 1)
  801: 					evt->flags ^= evt->flags;
  802: 				break;
  803: 			case EVFILT_PROC:
  804: 				flg = 0;
  805: 				TAILQ_FOREACH_SAFE(task, &r->root_proc, task_node, tmp) {
  806: 					if (TASK_VAL(task) != ((uintptr_t) res[i].udata))
  807: 						continue;
  808: 					else {
  809: 						flg++;
  810: 						TASK_RET(task) = res[i].data;
  811: 						TASK_FLAG(task) = res[i].fflags;
  812: 					}
  813: 					/* remove proc handle */
  814: #ifdef HAVE_LIBPTHREAD
  815: 					pthread_mutex_lock(&r->root_mtx[taskPROC]);
  816: #endif
  817: 					TAILQ_REMOVE(&r->root_proc, task, task_node);
  818: #ifdef HAVE_LIBPTHREAD
  819: 					pthread_mutex_unlock(&r->root_mtx[taskPROC]);
  820: #endif
  821: 					task->task_type = taskREADY;
  822: #ifdef HAVE_LIBPTHREAD
  823: 					pthread_mutex_lock(&r->root_mtx[taskREADY]);
  824: #endif
  825: 					TAILQ_INSERT_TAIL(&r->root_ready, task, task_node);
  826: #ifdef HAVE_LIBPTHREAD
  827: 					pthread_mutex_unlock(&r->root_mtx[taskREADY]);
  828: #endif
  829: 				}
  830: 				/* if match at least 2, don't remove resouce of event */
  831: 				if (flg > 1)
  832: 					evt->flags ^= evt->flags;
  833: 				break;
  834: 			case EVFILT_SIGNAL:
  835: 				flg = 0;
  836: 				TAILQ_FOREACH_SAFE(task, &r->root_signal, task_node, tmp) {
  837: 					if (TASK_VAL(task) != ((uintptr_t) res[i].udata))
  838: 						continue;
  839: 					else {
  840: 						flg++;
  841: 						TASK_RET(task) = res[i].data;
  842: 						TASK_FLAG(task) = res[i].fflags;
  843: 					}
  844: 					/* remove signal handle */
  845: #ifdef HAVE_LIBPTHREAD
  846: 					pthread_mutex_lock(&r->root_mtx[taskSIGNAL]);
  847: #endif
  848: 					TAILQ_REMOVE(&r->root_signal, task, task_node);
  849: #ifdef HAVE_LIBPTHREAD
  850: 					pthread_mutex_unlock(&r->root_mtx[taskSIGNAL]);
  851: #endif
  852: 					task->task_type = taskREADY;
  853: #ifdef HAVE_LIBPTHREAD
  854: 					pthread_mutex_lock(&r->root_mtx[taskREADY]);
  855: #endif
  856: 					TAILQ_INSERT_TAIL(&r->root_ready, task, task_node);
  857: #ifdef HAVE_LIBPTHREAD
  858: 					pthread_mutex_unlock(&r->root_mtx[taskREADY]);
  859: #endif
  860: 				}
  861: 				/* if match at least 2, don't remove resouce of event */
  862: 				if (flg > 1)
  863: 					evt->flags ^= evt->flags;
  864: 				break;
  865: #ifdef AIO_SUPPORT
  866: 			case EVFILT_AIO:
  867: 				flg = 0;
  868: 				TAILQ_FOREACH_SAFE(task, &r->root_aio, task_node, tmp) {
  869: 					acb = (struct aiocb*) TASK_VAL(task);
  870: 					if (acb != ((struct aiocb*) res[i].udata))
  871: 						continue;
  872: 					else {
  873: 						flg++;
  874: 						TASK_RET(task) = res[i].data;
  875: 						TASK_FLAG(task) = res[i].fflags;
  876: 					}
  877: 					/* remove user handle */
  878: #ifdef HAVE_LIBPTHREAD
  879: 					pthread_mutex_lock(&r->root_mtx[taskAIO]);
  880: #endif
  881: 					TAILQ_REMOVE(&r->root_aio, task, task_node);
  882: #ifdef HAVE_LIBPTHREAD
  883: 					pthread_mutex_unlock(&r->root_mtx[taskAIO]);
  884: #endif
  885: 					task->task_type = taskREADY;
  886: #ifdef HAVE_LIBPTHREAD
  887: 					pthread_mutex_lock(&r->root_mtx[taskREADY]);
  888: #endif
  889: 					TAILQ_INSERT_TAIL(&r->root_ready, task, task_node);
  890: #ifdef HAVE_LIBPTHREAD
  891: 					pthread_mutex_unlock(&r->root_mtx[taskREADY]);
  892: #endif
  893: 					fd = acb->aio_fildes;
  894: 					if ((len = aio_return(acb)) != -1) {
  895: 						if (lseek(fd, acb->aio_offset + len, SEEK_CUR) == -1)
  896: 							LOGERR;
  897: 					} else
  898: 						LOGERR;
  899: 					free(acb);
  900: 					TASK_DATLEN(task) = (u_long) len;
  901: 					TASK_FD(task) = fd;
  902: 				}
  903: 				/* if match at least 2, don't remove resouce of event */
  904: 				if (flg > 1)
  905: 					evt->flags ^= evt->flags;
  906: 				break;
  907: #ifdef EVFILT_LIO
  908: 			case EVFILT_LIO:
  909: 				flg = 0;
  910: 				TAILQ_FOREACH_SAFE(task, &r->root_lio, task_node, tmp) {
  911: 					acbs = (struct aiocb**) TASK_VAL(task);
  912: 					if (acbs != ((struct aiocb**) res[i].udata))
  913: 						continue;
  914: 					else {
  915: 						flg++;
  916: 						TASK_RET(task) = res[i].data;
  917: 						TASK_FLAG(task) = res[i].fflags;
  918: 					}
  919: 					/* remove user handle */
  920: #ifdef HAVE_LIBPTHREAD
  921: 					pthread_mutex_lock(&r->root_mtx[taskLIO]);
  922: #endif
  923: 					TAILQ_REMOVE(&r->root_lio, task, task_node);
  924: #ifdef HAVE_LIBPTHREAD
  925: 					pthread_mutex_unlock(&r->root_mtx[taskLIO]);
  926: #endif
  927: 					task->task_type = taskREADY;
  928: #ifdef HAVE_LIBPTHREAD
  929: 					pthread_mutex_lock(&r->root_mtx[taskREADY]);
  930: #endif
  931: 					TAILQ_INSERT_TAIL(&r->root_ready, task, task_node);
  932: #ifdef HAVE_LIBPTHREAD
  933: 					pthread_mutex_unlock(&r->root_mtx[taskREADY]);
  934: #endif
  935: 					iv = (struct iovec*) TASK_DATA(task);
  936: 					fd = acbs[0]->aio_fildes;
  937: 					off = acbs[0]->aio_offset;
  938: 					for (j = len = 0; i < TASK_DATLEN(task); len += l, i++) {
  939: 						if ((iv[i].iov_len = aio_return(acbs[i])) == -1)
  940: 							l = 0;
  941: 						else
  942: 							l = iv[i].iov_len;
  943: 						free(acbs[i]);
  944: 					}
  945: 					free(acbs);
  946: 					TASK_DATLEN(task) = (u_long) len;
  947: 					TASK_FD(task) = fd;
  948: 
  949: 					if (lseek(fd, off + len, SEEK_CUR) == -1)
  950: 						LOGERR;
  951: 				}
  952: 				/* if match at least 2, don't remove resouce of event */
  953: 				if (flg > 1)
  954: 					evt->flags ^= evt->flags;
  955: 				break;
  956: #endif	/* EVFILT_LIO */
  957: #endif	/* AIO_SUPPORT */
  958: #ifdef EVFILT_USER
  959: 			case EVFILT_USER:
  960: 				flg = 0;
  961: 				TAILQ_FOREACH_SAFE(task, &r->root_user, task_node, tmp) {
  962: 					if (TASK_VAL(task) != ((uintptr_t) res[i].udata))
  963: 						continue;
  964: 					else {
  965: 						flg++;
  966: 						TASK_RET(task) = res[i].data;
  967: 						TASK_FLAG(task) = res[i].fflags;
  968: 					}
  969: 					/* remove user handle */
  970: #ifdef HAVE_LIBPTHREAD
  971: 					pthread_mutex_lock(&r->root_mtx[taskUSER]);
  972: #endif
  973: 					TAILQ_REMOVE(&r->root_user, task, task_node);
  974: #ifdef HAVE_LIBPTHREAD
  975: 					pthread_mutex_unlock(&r->root_mtx[taskUSER]);
  976: #endif
  977: 					task->task_type = taskREADY;
  978: #ifdef HAVE_LIBPTHREAD
  979: 					pthread_mutex_lock(&r->root_mtx[taskREADY]);
  980: #endif
  981: 					TAILQ_INSERT_TAIL(&r->root_ready, task, task_node);
  982: #ifdef HAVE_LIBPTHREAD
  983: 					pthread_mutex_unlock(&r->root_mtx[taskREADY]);
  984: #endif
  985: 				}
  986: 				/* if match at least 2, don't remove resouce of event */
  987: 				if (flg > 1)
  988: 					evt->flags ^= evt->flags;
  989: 				break;
  990: #endif	/* EVFILT_USER */
  991: 		}
  992: 		if (kevent(r->root_kq, evt, 1, NULL, 0, &now) == -1) {
  993: 			if (r->root_hooks.hook_exec.exception) {
  994: 				if (r->root_hooks.hook_exec.exception(r, NULL))
  995: 					return NULL;
  996: 			} else
  997: 				LOGERR;
  998: 		}
  999: 	}
 1000: 
 1001: 	/* timer update & put in ready queue */
 1002: 	clock_gettime(CLOCK_MONOTONIC, &now);
 1003: 
 1004: 	TAILQ_FOREACH_SAFE(task, &r->root_timer, task_node, tmp)
 1005: 		if (sched_timespeccmp(&now, &TASK_TS(task), -) >= 0) {
 1006: #ifdef HAVE_LIBPTHREAD
 1007: 			pthread_mutex_lock(&r->root_mtx[taskTIMER]);
 1008: #endif
 1009: 			TAILQ_REMOVE(&r->root_timer, task, task_node);
 1010: #ifdef HAVE_LIBPTHREAD
 1011: 			pthread_mutex_unlock(&r->root_mtx[taskTIMER]);
 1012: #endif
 1013: 			task->task_type = taskREADY;
 1014: #ifdef HAVE_LIBPTHREAD
 1015: 			pthread_mutex_lock(&r->root_mtx[taskREADY]);
 1016: #endif
 1017: 			TAILQ_INSERT_TAIL(&r->root_ready, task, task_node);
 1018: #ifdef HAVE_LIBPTHREAD
 1019: 			pthread_mutex_unlock(&r->root_mtx[taskREADY]);
 1020: #endif
 1021: 		}
 1022: 
 1023: 	/* put regular task priority task to ready queue, 
 1024: 		if there is no ready task or reach max missing hit for regular task */
 1025: 	if ((task = TAILQ_FIRST(&r->root_task))) {
 1026: 		if (!TAILQ_FIRST(&r->root_ready) || r->root_miss >= TASK_VAL(task)) {
 1027: 			r->root_miss ^= r->root_miss;
 1028: 
 1029: #ifdef HAVE_LIBPTHREAD
 1030: 			pthread_mutex_lock(&r->root_mtx[taskTASK]);
 1031: #endif
 1032: 			TAILQ_REMOVE(&r->root_task, task, task_node);
 1033: #ifdef HAVE_LIBPTHREAD
 1034: 			pthread_mutex_unlock(&r->root_mtx[taskTASK]);
 1035: #endif
 1036: 			task->task_type = taskREADY;
 1037: #ifdef HAVE_LIBPTHREAD
 1038: 			pthread_mutex_lock(&r->root_mtx[taskREADY]);
 1039: #endif
 1040: 			TAILQ_INSERT_TAIL(&r->root_ready, task, task_node);
 1041: #ifdef HAVE_LIBPTHREAD
 1042: 			pthread_mutex_unlock(&r->root_mtx[taskREADY]);
 1043: #endif
 1044: 		} else
 1045: 			r->root_miss++;
 1046: 	} else
 1047: 		r->root_miss ^= r->root_miss;
 1048: 
 1049: 	/* OK, lets get ready task !!! */
 1050: 	task = TAILQ_FIRST(&r->root_ready);
 1051: 	if (!(task))
 1052: 		return NULL;
 1053: 
 1054: #ifdef HAVE_LIBPTHREAD
 1055: 	pthread_mutex_lock(&r->root_mtx[taskREADY]);
 1056: #endif
 1057: 	TAILQ_REMOVE(&r->root_ready, task, task_node);
 1058: #ifdef HAVE_LIBPTHREAD
 1059: 	pthread_mutex_unlock(&r->root_mtx[taskREADY]);
 1060: #endif
 1061: 	task->task_type = taskUNUSE;
 1062: #ifdef HAVE_LIBPTHREAD
 1063: 	pthread_mutex_lock(&r->root_mtx[taskUNUSE]);
 1064: #endif
 1065: 	TAILQ_INSERT_TAIL(&r->root_unuse, task, task_node);
 1066: #ifdef HAVE_LIBPTHREAD
 1067: 	pthread_mutex_unlock(&r->root_mtx[taskUNUSE]);
 1068: #endif
 1069: 	return task;
 1070: }
 1071: 
 1072: /*
 1073:  * sched_hook_exception() - Default EXCEPTION hook
 1074:  *
 1075:  * @root = root task
 1076:  * @arg = custom handling: if arg == EV_EOF or other value; default: arg == NULL log errno
 1077:  * return: <0 errors and 0 ok
 1078:  */
 1079: void *
 1080: sched_hook_exception(void *root, void *arg)
 1081: {
 1082: 	sched_root_task_t *r = root;
 1083: 
 1084: 	if (!r)
 1085: 		return NULL;
 1086: 
 1087: 	/* custom exception handling ... */
 1088: 	if (arg) {
 1089: 		if (arg == (void*) EV_EOF)
 1090: 			return NULL;
 1091: 		return (void*) -1;	/* raise scheduler error!!! */
 1092: 	}
 1093: 
 1094: 	/* if error hook exists */
 1095: 	if (r->root_hooks.hook_root.error)
 1096: 		return (r->root_hooks.hook_root.error(root, (void*) ((intptr_t) errno)));
 1097: 
 1098: 	/* default case! */
 1099: 	LOGERR;
 1100: 	return NULL;
 1101: }
 1102: 
 1103: /*
 1104:  * sched_hook_condition() - Default CONDITION hook
 1105:  *
 1106:  * @root = root task
 1107:  * @arg = killState from schedRun()
 1108:  * return: NULL kill scheduler loop or !=NULL ok
 1109:  */
 1110: void *
 1111: sched_hook_condition(void *root, void *arg)
 1112: {
 1113: 	sched_root_task_t *r = root;
 1114: 
 1115: 	if (!r)
 1116: 		return NULL;
 1117: 
 1118: 	return (void*) (r->root_cond - *(intptr_t*) arg);
 1119: }

FreeBSD-CVSweb <freebsd-cvsweb@FreeBSD.org>