File:  [ELWIX - Embedded LightWeight unIX -] / libaitsched / src / hooks.c
Revision 1.14.2.5: download - view: text, annotated - select for diffs - revision graph
Thu Aug 23 00:31:41 2012 UTC (12 years, 1 month ago) by misho
Branches: sched3_3
Diff to: branchpoint 1.14: preferred, unified
search solution for fast cancels

    1: /*************************************************************************
    2: * (C) 2011 AITNET ltd - Sofia/Bulgaria - <misho@aitbg.com>
    3: *  by Michael Pounov <misho@openbsd-bg.org>
    4: *
    5: * $Author: misho $
    6: * $Id: hooks.c,v 1.14.2.5 2012/08/23 00:31:41 misho Exp $
    7: *
    8: **************************************************************************
    9: The ELWIX and AITNET software is distributed under the following
   10: terms:
   11: 
   12: All of the documentation and software included in the ELWIX and AITNET
   13: Releases is copyrighted by ELWIX - Sofia/Bulgaria <info@elwix.org>
   14: 
   15: Copyright 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012
   16: 	by Michael Pounov <misho@elwix.org>.  All rights reserved.
   17: 
   18: Redistribution and use in source and binary forms, with or without
   19: modification, are permitted provided that the following conditions
   20: are met:
   21: 1. Redistributions of source code must retain the above copyright
   22:    notice, this list of conditions and the following disclaimer.
   23: 2. Redistributions in binary form must reproduce the above copyright
   24:    notice, this list of conditions and the following disclaimer in the
   25:    documentation and/or other materials provided with the distribution.
   26: 3. All advertising materials mentioning features or use of this software
   27:    must display the following acknowledgement:
   28: This product includes software developed by Michael Pounov <misho@elwix.org>
   29: ELWIX - Embedded LightWeight unIX and its contributors.
   30: 4. Neither the name of AITNET nor the names of its contributors
   31:    may be used to endorse or promote products derived from this software
   32:    without specific prior written permission.
   33: 
   34: THIS SOFTWARE IS PROVIDED BY AITNET AND CONTRIBUTORS ``AS IS'' AND
   35: ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   36: IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   37: ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
   38: FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   39: DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   40: OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   41: HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   42: LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   43: OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   44: SUCH DAMAGE.
   45: */
   46: #include "global.h"
   47: #include "hooks.h"
   48: 
   49: 
   50: /*
   51:  * sched_hook_init() - Default INIT hook
   52:  *
   53:  * @root = root task
   54:  * @arg = unused
   55:  * return: <0 errors and 0 ok
   56:  */
   57: void *
   58: sched_hook_init(void *root, void *arg __unused)
   59: {
   60: 	sched_root_task_t *r = root;
   61: 
   62: 	if (!r)
   63: 		return (void*) -1;
   64: 
   65: 	r->root_kq = kqueue();
   66: 	if (r->root_kq == -1) {
   67: 		LOGERR;
   68: 		return (void*) -1;
   69: 	}
   70: 
   71: 	return NULL;
   72: }
   73: 
   74: /*
   75:  * sched_hook_fini() - Default FINI hook
   76:  *
   77:  * @root = root task
   78:  * @arg = unused
   79:  * return: <0 errors and 0 ok
   80:  */
   81: void *
   82: sched_hook_fini(void *root, void *arg __unused)
   83: {
   84: 	sched_root_task_t *r = root;
   85: 
   86: 	if (!r)
   87: 		return (void*) -1;
   88: 
   89: 	if (r->root_kq > 2) {
   90: 		close(r->root_kq);
   91: 		r->root_kq = 0;
   92: 	}
   93: 
   94: 	return NULL;
   95: }
   96: 
   97: /*
   98:  * sched_hook_cancel() - Default CANCEL hook
   99:  *
  100:  * @task = current task
  101:  * @arg = unused
  102:  * return: <0 errors and 0 ok
  103:  */
  104: void *
  105: sched_hook_cancel(void *task, void *arg __unused)
  106: {
  107: 	sched_task_t *t = task;
  108: 	struct kevent chg[1];
  109: 	struct timespec timeout = { 0, 0 };
  110: 	register int i = 0;
  111: #ifdef AIO_SUPPORT
  112: 	struct aiocb *acb;
  113: #ifdef EVFILT_LIO
  114: 	struct aiocb **acbs;
  115: #endif	/* EVFILT_LIO */
  116: #endif	/* AIO_SUPPORT */
  117: 
  118: 	if (!t || !TASK_ROOT(t))
  119: 		return (void*) -1;
  120: 
  121: 	switch (TASK_TYPE(t)) {
  122: 		case taskREAD:
  123: #ifdef __NetBSD__
  124: 			EV_SET(&chg[0], TASK_FD(t), EVFILT_READ, EV_DELETE, 0, 0, (intptr_t) TASK_FD(t));
  125: #else
  126: 			EV_SET(&chg[0], TASK_FD(t), EVFILT_READ, EV_DELETE, 0, 0, (void*) TASK_FD(t));
  127: #endif
  128: 			break;
  129: 		case taskWRITE:
  130: #ifdef __NetBSD__
  131: 			EV_SET(&chg[0], TASK_FD(t), EVFILT_WRITE, EV_DELETE, 0, 0, (intptr_t) TASK_FD(t));
  132: #else
  133: 			EV_SET(&chg[0], TASK_FD(t), EVFILT_WRITE, EV_DELETE, 0, 0, (void*) TASK_FD(t));
  134: #endif
  135: 			break;
  136: 		case taskALARM:
  137: #ifdef __NetBSD__
  138: 			EV_SET(&chg[0], (uintptr_t) TASK_DATA(t), EVFILT_TIMER, EV_DELETE, 
  139: 					0, 0, (intptr_t) TASK_DATA(t));
  140: #else
  141: 			EV_SET(&chg[0], (uintptr_t) TASK_DATA(t), EVFILT_TIMER, EV_DELETE, 
  142: 					0, 0, (void*) TASK_DATA(t));
  143: #endif
  144: 			break;
  145: 		case taskNODE:
  146: #ifdef __NetBSD__
  147: 			EV_SET(&chg[0], TASK_FD(t), EVFILT_VNODE, EV_DELETE, 0, 0, (intptr_t) TASK_FD(t));
  148: #else
  149: 			EV_SET(&chg[0], TASK_FD(t), EVFILT_VNODE, EV_DELETE, 0, 0, (void*) TASK_FD(t));
  150: #endif
  151: 			break;
  152: 		case taskPROC:
  153: #ifdef __NetBSD__
  154: 			EV_SET(&chg[0], TASK_VAL(t), EVFILT_PROC, EV_DELETE, 0, 0, (intptr_t) TASK_VAL(t));
  155: #else
  156: 			EV_SET(&chg[0], TASK_VAL(t), EVFILT_PROC, EV_DELETE, 0, 0, (void*) TASK_VAL(t));
  157: #endif
  158: 			break;
  159: 		case taskSIGNAL:
  160: #ifdef __NetBSD__
  161: 			EV_SET(&chg[0], TASK_VAL(t), EVFILT_SIGNAL, EV_DELETE, 0, 0, (intptr_t) TASK_VAL(t));
  162: #else
  163: 			EV_SET(&chg[0], TASK_VAL(t), EVFILT_SIGNAL, EV_DELETE, 0, 0, (void*) TASK_VAL(t));
  164: #endif
  165: 			break;
  166: #ifdef AIO_SUPPORT
  167: 		case taskAIO:
  168: #ifdef __NetBSD__
  169: 			EV_SET(&chg[0], TASK_VAL(t), EVFILT_AIO, EV_DELETE, 0, 0, (intptr_t) TASK_VAL(t));
  170: #else
  171: 			EV_SET(&chg[0], TASK_VAL(t), EVFILT_AIO, EV_DELETE, 0, 0, (void*) TASK_VAL(t));
  172: #endif
  173: 			acb = (struct aiocb*) TASK_VAL(t);
  174: 			if (acb) {
  175: 				if (aio_cancel(acb->aio_fildes, acb) == AIO_CANCELED)
  176: 					aio_return(acb);
  177: 				free(acb);
  178: 				TASK_VAL(t) = 0;
  179: 			}
  180: 			break;
  181: #ifdef EVFILT_LIO
  182: 		case taskLIO:
  183: #ifdef __NetBSD__
  184: 			EV_SET(&chg[0], TASK_VAL(t), EVFILT_LIO, EV_DELETE, 0, 0, (intptr_t) TASK_VAL(t));
  185: #else
  186: 			EV_SET(&chg[0], TASK_VAL(t), EVFILT_LIO, EV_DELETE, 0, 0, (void*) TASK_VAL(t));
  187: #endif
  188: 			acbs = (struct aiocb**) TASK_VAL(t);
  189: 			if (acbs) {
  190: 				for (i = 0; i < TASK_DATLEN(t); i++) {
  191: 					if (aio_cancel(acbs[i]->aio_fildes, acbs[i]) == AIO_CANCELED)
  192: 						aio_return(acbs[i]);
  193: 					free(acbs[i]);
  194: 				}
  195: 				free(acbs);
  196: 				TASK_VAL(t) = 0;
  197: 			}
  198: 			break;
  199: #endif	/* EVFILT_LIO */
  200: #endif	/* AIO_SUPPORT */
  201: #ifdef EVFILT_USER
  202: 		case taskUSER:
  203: #ifdef __NetBSD__
  204: 			EV_SET(&chg[0], TASK_VAL(t), EVFILT_USER, EV_DELETE, 0, 0, (intptr_t) TASK_VAL(t));
  205: #else
  206: 			EV_SET(&chg[0], TASK_VAL(t), EVFILT_USER, EV_DELETE, 0, 0, (void*) TASK_VAL(t));
  207: #endif
  208: 			break;
  209: #endif
  210: 		case taskTHREAD:
  211: #ifdef HAVE_LIBPTHREAD
  212: 			for (i = 0; i < MAX_TASK_MISS; i++)
  213: 				if (!pthread_cancel((pthread_t) TASK_VAL(t))) {
  214: 					/* joinable thread */
  215: 					if (TASK_FLAG(t) == PTHREAD_CREATE_JOINABLE)
  216: 						schedTask(TASK_ROOT(t), _sched_threadJoin, 
  217: 								TASK_ARG(t), TASK_VAL(t), 
  218: 								TASK_DATA(t), TASK_DATLEN(t));
  219: 					return NULL;
  220: 				}
  221: 			return (void*) -1;
  222: #endif
  223: 		default:
  224: 			return NULL;
  225: 	}
  226: 
  227: 	kevent(TASK_ROOT(t)->root_kq, chg, 1, NULL, 0, &timeout);
  228: 	return NULL;
  229: }
  230: 
  231: #ifdef HAVE_LIBPTHREAD
  232: /*
  233:  * sched_hook_thread() - Default THREAD hook
  234:  *
  235:  * @task = current task
  236:  * @arg = pthread attributes
  237:  * return: <0 errors and 0 ok
  238:  */
  239: void *
  240: sched_hook_thread(void *task, void *arg)
  241: {
  242: 	sched_task_t *t = task;
  243: 	pthread_t tid;
  244: 	sigset_t s, o;
  245: 
  246: 	if (!t || !TASK_ROOT(t))
  247: 		return (void*) -1;
  248: 
  249: 	sigfillset(&s);
  250: 	pthread_sigmask(SIG_BLOCK, &s, &o);
  251: 	if (pthread_create(&tid, (pthread_attr_t*) arg, 
  252: 				(void *(*)(void*)) TASK_FUNC(t), t)) {
  253: 		LOGERR;
  254: 		pthread_sigmask(SIG_SETMASK, &o, NULL);
  255: 		return (void*) -1;
  256: 	}
  257: 
  258: 	if (!TASK_ISLOCKED(t))
  259: 		TASK_LOCK(t);
  260: 
  261: 	TASK_VAL(t) = (u_long) tid;
  262: 	pthread_sigmask(SIG_SETMASK, &o, NULL);
  263: 	return NULL;
  264: }
  265: #endif
  266: 
  267: /*
  268:  * sched_hook_read() - Default READ hook
  269:  *
  270:  * @task = current task
  271:  * @arg = unused
  272:  * return: <0 errors and 0 ok
  273:  */
  274: void *
  275: sched_hook_read(void *task, void *arg __unused)
  276: {
  277: 	sched_task_t *t = task;
  278: 	struct kevent chg[1];
  279: 	struct timespec timeout = { 0, 0 };
  280: 
  281: 	if (!t || !TASK_ROOT(t))
  282: 		return (void*) -1;
  283: 
  284: #ifdef __NetBSD__
  285: 	EV_SET(&chg[0], TASK_FD(t), EVFILT_READ, EV_ADD | EV_CLEAR, 0, 0, (intptr_t) TASK_FD(t));
  286: #else
  287: 	EV_SET(&chg[0], TASK_FD(t), EVFILT_READ, EV_ADD | EV_CLEAR, 0, 0, (void*) TASK_FD(t));
  288: #endif
  289: 	if (kevent(TASK_ROOT(t)->root_kq, chg, 1, NULL, 0, &timeout) == -1) {
  290: 		if (TASK_ROOT(t)->root_hooks.hook_exec.exception)
  291: 			TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL);
  292: 		else
  293: 			LOGERR;
  294: 		return (void*) -1;
  295: 	}
  296: 
  297: 	return NULL;
  298: }
  299: 
  300: /*
  301:  * sched_hook_write() - Default WRITE hook
  302:  *
  303:  * @task = current task
  304:  * @arg = unused
  305:  * return: <0 errors and 0 ok
  306:  */
  307: void *
  308: sched_hook_write(void *task, void *arg __unused)
  309: {
  310: 	sched_task_t *t = task;
  311: 	struct kevent chg[1];
  312: 	struct timespec timeout = { 0, 0 };
  313: 
  314: 	if (!t || !TASK_ROOT(t))
  315: 		return (void*) -1;
  316: 
  317: #ifdef __NetBSD__
  318: 	EV_SET(&chg[0], TASK_FD(t), EVFILT_WRITE, EV_ADD | EV_CLEAR, 0, 0, (intptr_t) TASK_FD(t));
  319: #else
  320: 	EV_SET(&chg[0], TASK_FD(t), EVFILT_WRITE, EV_ADD | EV_CLEAR, 0, 0, (void*) TASK_FD(t));
  321: #endif
  322: 	if (kevent(TASK_ROOT(t)->root_kq, chg, 1, NULL, 0, &timeout) == -1) {
  323: 		if (TASK_ROOT(t)->root_hooks.hook_exec.exception)
  324: 			TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL);
  325: 		else
  326: 			LOGERR;
  327: 		return (void*) -1;
  328: 	}
  329: 
  330: 	return NULL;
  331: }
  332: 
  333: /*
  334:  * sched_hook_alarm() - Default ALARM hook
  335:  *
  336:  * @task = current task
  337:  * @arg = unused
  338:  * return: <0 errors and 0 ok
  339:  */
  340: void *
  341: sched_hook_alarm(void *task, void *arg __unused)
  342: {
  343: 	sched_task_t *t = task;
  344: 	struct kevent chg[1];
  345: 	struct timespec timeout = { 0, 0 };
  346: 
  347: 	if (!t || !TASK_ROOT(t))
  348: 		return (void*) -1;
  349: 
  350: #ifdef __NetBSD__
  351: 	EV_SET(&chg[0], (uintptr_t) TASK_DATA(t), EVFILT_TIMER, EV_ADD | EV_ONESHOT, 0, 
  352: 			t->task_val.ts.tv_sec * 1000 + t->task_val.ts.tv_nsec / 1000000, 
  353: 			(intptr_t) TASK_DATA(t));
  354: #else
  355: 	EV_SET(&chg[0], (uintptr_t) TASK_DATA(t), EVFILT_TIMER, EV_ADD | EV_ONESHOT, 0, 
  356: 			t->task_val.ts.tv_sec * 1000 + t->task_val.ts.tv_nsec / 1000000, 
  357: 			(void*) TASK_DATA(t));
  358: #endif
  359: 	if (kevent(TASK_ROOT(t)->root_kq, chg, 1, NULL, 0, &timeout) == -1) {
  360: 		if (TASK_ROOT(t)->root_hooks.hook_exec.exception)
  361: 			TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL);
  362: 		else
  363: 			LOGERR;
  364: 		return (void*) -1;
  365: 	}
  366: 
  367: 	return NULL;
  368: }
  369: 
  370: /*
  371:  * sched_hook_node() - Default NODE hook
  372:  *
  373:  * @task = current task
  374:  * @arg = unused
  375:  * return: <0 errors and 0 ok
  376:  */
  377: void *
  378: sched_hook_node(void *task, void *arg __unused)
  379: {
  380: 	sched_task_t *t = task;
  381: 	struct kevent chg[1];
  382: 	struct timespec timeout = { 0, 0 };
  383: 
  384: 	if (!t || !TASK_ROOT(t))
  385: 		return (void*) -1;
  386: 
  387: #ifdef __NetBSD__
  388: 	EV_SET(&chg[0], TASK_FD(t), EVFILT_VNODE, EV_ADD | EV_CLEAR, 
  389: 			NOTE_DELETE | NOTE_WRITE | NOTE_EXTEND | NOTE_ATTRIB | 
  390: 			NOTE_LINK | NOTE_RENAME | NOTE_REVOKE, 0, (intptr_t) TASK_FD(t));
  391: #else
  392: 	EV_SET(&chg[0], TASK_FD(t), EVFILT_VNODE, EV_ADD | EV_CLEAR, 
  393: 			NOTE_DELETE | NOTE_WRITE | NOTE_EXTEND | NOTE_ATTRIB | 
  394: 			NOTE_LINK | NOTE_RENAME | NOTE_REVOKE, 0, (void*) TASK_FD(t));
  395: #endif
  396: 	if (kevent(TASK_ROOT(t)->root_kq, chg, 1, NULL, 0, &timeout) == -1) {
  397: 		if (TASK_ROOT(t)->root_hooks.hook_exec.exception)
  398: 			TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL);
  399: 		else
  400: 			LOGERR;
  401: 		return (void*) -1;
  402: 	}
  403: 
  404: 	return NULL;
  405: }
  406: 
  407: /*
  408:  * sched_hook_proc() - Default PROC hook
  409:  *
  410:  * @task = current task
  411:  * @arg = unused
  412:  * return: <0 errors and 0 ok
  413:  */
  414: void *
  415: sched_hook_proc(void *task, void *arg __unused)
  416: {
  417: 	sched_task_t *t = task;
  418: 	struct kevent chg[1];
  419: 	struct timespec timeout = { 0, 0 };
  420: 
  421: 	if (!t || !TASK_ROOT(t))
  422: 		return (void*) -1;
  423: 
  424: #ifdef __NetBSD__
  425: 	EV_SET(&chg[0], TASK_VAL(t), EVFILT_PROC, EV_ADD | EV_CLEAR, 
  426: 			NOTE_EXIT | NOTE_FORK | NOTE_EXEC | NOTE_TRACK, 0, (intptr_t) TASK_VAL(t));
  427: #else
  428: 	EV_SET(&chg[0], TASK_VAL(t), EVFILT_PROC, EV_ADD | EV_CLEAR, 
  429: 			NOTE_EXIT | NOTE_FORK | NOTE_EXEC | NOTE_TRACK, 0, (void*) TASK_VAL(t));
  430: #endif
  431: 	if (kevent(TASK_ROOT(t)->root_kq, chg, 1, NULL, 0, &timeout) == -1) {
  432: 		if (TASK_ROOT(t)->root_hooks.hook_exec.exception)
  433: 			TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL);
  434: 		else
  435: 			LOGERR;
  436: 		return (void*) -1;
  437: 	}
  438: 
  439: 	return NULL;
  440: }
  441: 
  442: /*
  443:  * sched_hook_signal() - Default SIGNAL hook
  444:  *
  445:  * @task = current task
  446:  * @arg = unused
  447:  * return: <0 errors and 0 ok
  448:  */
  449: void *
  450: sched_hook_signal(void *task, void *arg __unused)
  451: {
  452: 	sched_task_t *t = task;
  453: 	struct kevent chg[1];
  454: 	struct timespec timeout = { 0, 0 };
  455: 
  456: 	if (!t || !TASK_ROOT(t))
  457: 		return (void*) -1;
  458: 
  459: #ifdef __NetBSD__
  460: 	EV_SET(&chg[0], TASK_VAL(t), EVFILT_SIGNAL, EV_ADD, 0, 0, (intptr_t) TASK_VAL(t));
  461: #else
  462: 	EV_SET(&chg[0], TASK_VAL(t), EVFILT_SIGNAL, EV_ADD, 0, 0, (void*) TASK_VAL(t));
  463: #endif
  464: 	if (kevent(TASK_ROOT(t)->root_kq, chg, 1, NULL, 0, &timeout) == -1) {
  465: 		if (TASK_ROOT(t)->root_hooks.hook_exec.exception)
  466: 			TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL);
  467: 		else
  468: 			LOGERR;
  469: 		return (void*) -1;
  470: 	}
  471: 
  472: 	return NULL;
  473: }
  474: 
  475: /*
  476:  * sched_hook_user() - Default USER hook
  477:  *
  478:  * @task = current task
  479:  * @arg = unused
  480:  * return: <0 errors and 0 ok
  481:  */
  482: #ifdef EVFILT_USER
  483: void *
  484: sched_hook_user(void *task, void *arg __unused)
  485: {
  486: 	sched_task_t *t = task;
  487: 	struct kevent chg[1];
  488: 	struct timespec timeout = { 0, 0 };
  489: 
  490: 	if (!t || !TASK_ROOT(t))
  491: 		return (void*) -1;
  492: 
  493: #ifdef __NetBSD__
  494: 	EV_SET(&chg[0], TASK_VAL(t), EVFILT_USER, EV_ADD | EV_CLEAR, TASK_DATLEN(t), 
  495: 			0, (intptr_t) TASK_VAL(t));
  496: #else
  497: 	EV_SET(&chg[0], TASK_VAL(t), EVFILT_USER, EV_ADD | EV_CLEAR, TASK_DATLEN(t), 
  498: 			0, (void*) TASK_VAL(t));
  499: #endif
  500: 	if (kevent(TASK_ROOT(t)->root_kq, chg, 1, NULL, 0, &timeout) == -1) {
  501: 		if (TASK_ROOT(t)->root_hooks.hook_exec.exception)
  502: 			TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL);
  503: 		else
  504: 			LOGERR;
  505: 		return (void*) -1;
  506: 	}
  507: 
  508: 	return NULL;
  509: }
  510: #endif
  511: 
  512: /*
  513:  * sched_hook_fetch() - Default FETCH hook
  514:  *
  515:  * @root = root task
  516:  * @arg = unused
  517:  * return: NULL error or !=NULL fetched task
  518:  */
  519: void *
  520: sched_hook_fetch(void *root, void *arg __unused)
  521: {
  522: 	sched_root_task_t *r = root;
  523: 	sched_task_t *task, *tmp;
  524: 	struct timespec now, m, mtmp;
  525: 	struct timespec *timeout;
  526: 	struct kevent evt[1], res[KQ_EVENTS];
  527: 	register int i, flg;
  528: 	int en;
  529: #ifdef AIO_SUPPORT
  530: 	int len, fd;
  531: 	struct aiocb *acb;
  532: #ifdef EVFILT_LIO
  533: 	int l;
  534: 	register int j;
  535: 	off_t off;
  536: 	struct aiocb **acbs;
  537: 	struct iovec *iv;
  538: #endif	/* EVFILT_LIO */
  539: #endif	/* AIO_SUPPORT */
  540: 
  541: 	if (!r)
  542: 		return NULL;
  543: 
  544: 	/* get new task by queue priority */
  545: 	while ((task = TAILQ_FIRST(&r->root_event))) {
  546: #ifdef HAVE_LIBPTHREAD
  547: 		pthread_mutex_lock(&r->root_mtx[taskEVENT]);
  548: #endif
  549: 		TAILQ_REMOVE(&r->root_event, task, task_node);
  550: #ifdef HAVE_LIBPTHREAD
  551: 		pthread_mutex_unlock(&r->root_mtx[taskEVENT]);
  552: #endif
  553: 		task->task_type = taskUNUSE;
  554: #ifdef HAVE_LIBPTHREAD
  555: 		pthread_mutex_lock(&r->root_mtx[taskUNUSE]);
  556: #endif
  557: 		TAILQ_INSERT_TAIL(&r->root_unuse, task, task_node);
  558: #ifdef HAVE_LIBPTHREAD
  559: 		pthread_mutex_unlock(&r->root_mtx[taskUNUSE]);
  560: #endif
  561: 		return task;
  562: 	}
  563: 	while ((task = TAILQ_FIRST(&r->root_ready))) {
  564: #ifdef HAVE_LIBPTHREAD
  565: 		pthread_mutex_lock(&r->root_mtx[taskREADY]);
  566: #endif
  567: 		TAILQ_REMOVE(&r->root_ready, task, task_node);
  568: #ifdef HAVE_LIBPTHREAD
  569: 		pthread_mutex_unlock(&r->root_mtx[taskREADY]);
  570: #endif
  571: 		task->task_type = taskUNUSE;
  572: #ifdef HAVE_LIBPTHREAD
  573: 		pthread_mutex_lock(&r->root_mtx[taskUNUSE]);
  574: #endif
  575: 		TAILQ_INSERT_TAIL(&r->root_unuse, task, task_node);
  576: #ifdef HAVE_LIBPTHREAD
  577: 		pthread_mutex_unlock(&r->root_mtx[taskUNUSE]);
  578: #endif
  579: 		return task;
  580: 	}
  581: 
  582: #ifdef TIMER_WITHOUT_SORT
  583: 	clock_gettime(CLOCK_MONOTONIC, &now);
  584: 
  585: 	sched_timespecclear(&r->root_wait);
  586: 	TAILQ_FOREACH(task, &r->root_timer, task_node) {
  587: 		if (!sched_timespecisset(&r->root_wait))
  588: 			r->root_wait = TASK_TS(task);
  589: 		else if (sched_timespeccmp(&TASK_TS(task), &r->root_wait, -) < 0)
  590: 			r->root_wait = TASK_TS(task);
  591: 	}
  592: 
  593: 	if (TAILQ_FIRST(&r->root_timer)) {
  594: 		m = r->root_wait;
  595: 		sched_timespecsub(&m, &now, &mtmp);
  596: 		r->root_wait = mtmp;
  597: 	} else {
  598: 		/* set wait INFTIM */
  599: 		sched_timespecinf(&r->root_wait);
  600: 	}
  601: #else
  602: 	if (!TAILQ_FIRST(&r->root_task) && (task = TAILQ_FIRST(&r->root_timer))) {
  603: 		clock_gettime(CLOCK_MONOTONIC, &now);
  604: 
  605: 		m = TASK_TS(task);
  606: 		sched_timespecsub(&m, &now, &mtmp);
  607: 		r->root_wait = mtmp;
  608: 	} else {
  609: 		/* set wait INFTIM */
  610: 		sched_timespecinf(&r->root_wait);
  611: 	}
  612: #endif
  613: 	/* if present member of task, set NOWAIT */
  614: 	if (TAILQ_FIRST(&r->root_task))
  615: 		sched_timespecclear(&r->root_wait);
  616: 
  617: 	if (r->root_wait.tv_sec != -1 && r->root_wait.tv_nsec != -1)
  618: 		timeout = &r->root_wait;
  619: 	else if (sched_timespecisinf(&r->root_poll))
  620: 		timeout = NULL;
  621: 	else
  622: 		timeout = &r->root_poll;
  623: 	if ((en = kevent(r->root_kq, NULL, 0, res, KQ_EVENTS, timeout)) == -1) {
  624: 		if (r->root_hooks.hook_exec.exception) {
  625: 			if (r->root_hooks.hook_exec.exception(r, NULL))
  626: 				return NULL;
  627: 		} else if (errno != EINTR)
  628: 			LOGERR;
  629: 		return NULL;
  630: 	}
  631: 
  632: 	now.tv_sec = now.tv_nsec = 0;
  633: 	/* Go and catch the cat into pipes ... */
  634: 	for (i = 0; i < en; i++) {
  635: 		memcpy(evt, &res[i], sizeof evt);
  636: 		evt->flags = EV_DELETE;
  637: 		/* Put read/write task to ready queue */
  638: 		switch (res[i].filter) {
  639: 			case EVFILT_READ:
  640: 				flg = 0;
  641: 				TAILQ_FOREACH_SAFE(task, &r->root_read, task_node, tmp) {
  642: 					if (TASK_FD(task) != ((intptr_t) res[i].udata))
  643: 						continue;
  644: 					else {
  645: 						flg++;
  646: 						TASK_RET(task) = res[i].data;
  647: 						TASK_FLAG(task) = res[i].fflags;
  648: 					}
  649: 					/* remove read handle */
  650: #ifdef HAVE_LIBPTHREAD
  651: 					pthread_mutex_lock(&r->root_mtx[taskREAD]);
  652: #endif
  653: 					TAILQ_REMOVE(&r->root_read, task, task_node);
  654: #ifdef HAVE_LIBPTHREAD
  655: 					pthread_mutex_unlock(&r->root_mtx[taskREAD]);
  656: #endif
  657: 					if (r->root_hooks.hook_exec.exception && res[i].flags & EV_EOF) {
  658:  						if (r->root_hooks.hook_exec.exception(r, (void*) EV_EOF)) {
  659: 							task->task_type = taskUNUSE;
  660: #ifdef HAVE_LIBPTHREAD
  661: 							pthread_mutex_lock(&r->root_mtx[taskUNUSE]);
  662: #endif
  663: 							TAILQ_INSERT_TAIL(&r->root_unuse, task, task_node);
  664: #ifdef HAVE_LIBPTHREAD
  665: 							pthread_mutex_unlock(&r->root_mtx[taskUNUSE]);
  666: #endif
  667: 						} else {
  668: 							task->task_type = taskREADY;
  669: #ifdef HAVE_LIBPTHREAD
  670: 							pthread_mutex_lock(&r->root_mtx[taskREADY]);
  671: #endif
  672: 							TAILQ_INSERT_TAIL(&r->root_ready, task, task_node);
  673: #ifdef HAVE_LIBPTHREAD
  674: 							pthread_mutex_unlock(&r->root_mtx[taskREADY]);
  675: #endif
  676: 						}
  677: 					} else {
  678: 						task->task_type = taskREADY;
  679: #ifdef HAVE_LIBPTHREAD
  680: 						pthread_mutex_lock(&r->root_mtx[taskREADY]);
  681: #endif
  682: 						TAILQ_INSERT_TAIL(&r->root_ready, task, task_node);
  683: #ifdef HAVE_LIBPTHREAD
  684: 						pthread_mutex_unlock(&r->root_mtx[taskREADY]);
  685: #endif
  686: 					}
  687: 				}
  688: 				/* if match at least 2, don't remove resouce of event */
  689: 				if (flg > 1)
  690: 					evt->flags ^= evt->flags;
  691: 				break;
  692: 			case EVFILT_WRITE:
  693: 				flg = 0;
  694: 				TAILQ_FOREACH_SAFE(task, &r->root_write, task_node, tmp) {
  695: 					if (TASK_FD(task) != ((intptr_t) res[i].udata))
  696: 						continue;
  697: 					else {
  698: 						flg++;
  699: 						TASK_RET(task) = res[i].data;
  700: 						TASK_FLAG(task) = res[i].fflags;
  701: 					}
  702: 					/* remove write handle */
  703: #ifdef HAVE_LIBPTHREAD
  704: 					pthread_mutex_lock(&r->root_mtx[taskWRITE]);
  705: #endif
  706: 					TAILQ_REMOVE(&r->root_write, task, task_node);
  707: #ifdef HAVE_LIBPTHREAD
  708: 					pthread_mutex_unlock(&r->root_mtx[taskWRITE]);
  709: #endif
  710: 					if (r->root_hooks.hook_exec.exception && res[i].flags & EV_EOF) {
  711:  						if (r->root_hooks.hook_exec.exception(r, (void*) EV_EOF)) {
  712: 							task->task_type = taskUNUSE;
  713: #ifdef HAVE_LIBPTHREAD
  714: 							pthread_mutex_lock(&r->root_mtx[taskUNUSE]);
  715: #endif
  716: 							TAILQ_INSERT_TAIL(&r->root_unuse, task, task_node);
  717: #ifdef HAVE_LIBPTHREAD
  718: 							pthread_mutex_unlock(&r->root_mtx[taskUNUSE]);
  719: #endif
  720: 						} else {
  721: 							task->task_type = taskREADY;
  722: #ifdef HAVE_LIBPTHREAD
  723: 							pthread_mutex_lock(&r->root_mtx[taskREADY]);
  724: #endif
  725: 							TAILQ_INSERT_TAIL(&r->root_ready, task, task_node);
  726: #ifdef HAVE_LIBPTHREAD
  727: 							pthread_mutex_unlock(&r->root_mtx[taskREADY]);
  728: #endif
  729: 						}
  730: 					} else {
  731: 						task->task_type = taskREADY;
  732: #ifdef HAVE_LIBPTHREAD
  733: 						pthread_mutex_lock(&r->root_mtx[taskREADY]);
  734: #endif
  735: 						TAILQ_INSERT_TAIL(&r->root_ready, task, task_node);
  736: #ifdef HAVE_LIBPTHREAD
  737: 						pthread_mutex_unlock(&r->root_mtx[taskREADY]);
  738: #endif
  739: 					}
  740: 				}
  741: 				/* if match at least 2, don't remove resouce of event */
  742: 				if (flg > 1)
  743: 					evt->flags ^= evt->flags;
  744: 				break;
  745: 			case EVFILT_TIMER:
  746: 				flg = 0;
  747: 				TAILQ_FOREACH_SAFE(task, &r->root_alarm, task_node, tmp) {
  748: 					if ((uintptr_t) TASK_DATA(task) != ((uintptr_t) res[i].udata))
  749: 						continue;
  750: 					else {
  751: 						flg++;
  752: 						TASK_RET(task) = res[i].data;
  753: 						TASK_FLAG(task) = res[i].fflags;
  754: 					}
  755: 					/* remove alarm handle */
  756: #ifdef HAVE_LIBPTHREAD
  757: 					pthread_mutex_lock(&r->root_mtx[taskALARM]);
  758: #endif
  759: 					TAILQ_REMOVE(&r->root_alarm, task, task_node);
  760: #ifdef HAVE_LIBPTHREAD
  761: 					pthread_mutex_unlock(&r->root_mtx[taskALARM]);
  762: #endif
  763: 					task->task_type = taskREADY;
  764: #ifdef HAVE_LIBPTHREAD
  765: 					pthread_mutex_lock(&r->root_mtx[taskREADY]);
  766: #endif
  767: 					TAILQ_INSERT_TAIL(&r->root_ready, task, task_node);
  768: #ifdef HAVE_LIBPTHREAD
  769: 					pthread_mutex_unlock(&r->root_mtx[taskREADY]);
  770: #endif
  771: 				}
  772: 				/* if match at least 2, don't remove resouce of event */
  773: 				if (flg > 1)
  774: 					evt->flags ^= evt->flags;
  775: 				break;
  776: 			case EVFILT_VNODE:
  777: 				flg = 0;
  778: 				TAILQ_FOREACH_SAFE(task, &r->root_node, task_node, tmp) {
  779: 					if (TASK_FD(task) != ((intptr_t) res[i].udata))
  780: 						continue;
  781: 					else {
  782: 						flg++;
  783: 						TASK_RET(task) = res[i].data;
  784: 						TASK_FLAG(task) = res[i].fflags;
  785: 					}
  786: 					/* remove node handle */
  787: #ifdef HAVE_LIBPTHREAD
  788: 					pthread_mutex_lock(&r->root_mtx[taskNODE]);
  789: #endif
  790: 					TAILQ_REMOVE(&r->root_node, task, task_node);
  791: #ifdef HAVE_LIBPTHREAD
  792: 					pthread_mutex_unlock(&r->root_mtx[taskNODE]);
  793: #endif
  794: 					task->task_type = taskREADY;
  795: #ifdef HAVE_LIBPTHREAD
  796: 					pthread_mutex_lock(&r->root_mtx[taskREADY]);
  797: #endif
  798: 					TAILQ_INSERT_TAIL(&r->root_ready, task, task_node);
  799: #ifdef HAVE_LIBPTHREAD
  800: 					pthread_mutex_unlock(&r->root_mtx[taskREADY]);
  801: #endif
  802: 				}
  803: 				/* if match at least 2, don't remove resouce of event */
  804: 				if (flg > 1)
  805: 					evt->flags ^= evt->flags;
  806: 				break;
  807: 			case EVFILT_PROC:
  808: 				flg = 0;
  809: 				TAILQ_FOREACH_SAFE(task, &r->root_proc, task_node, tmp) {
  810: 					if (TASK_VAL(task) != ((uintptr_t) res[i].udata))
  811: 						continue;
  812: 					else {
  813: 						flg++;
  814: 						TASK_RET(task) = res[i].data;
  815: 						TASK_FLAG(task) = res[i].fflags;
  816: 					}
  817: 					/* remove proc handle */
  818: #ifdef HAVE_LIBPTHREAD
  819: 					pthread_mutex_lock(&r->root_mtx[taskPROC]);
  820: #endif
  821: 					TAILQ_REMOVE(&r->root_proc, task, task_node);
  822: #ifdef HAVE_LIBPTHREAD
  823: 					pthread_mutex_unlock(&r->root_mtx[taskPROC]);
  824: #endif
  825: 					task->task_type = taskREADY;
  826: #ifdef HAVE_LIBPTHREAD
  827: 					pthread_mutex_lock(&r->root_mtx[taskREADY]);
  828: #endif
  829: 					TAILQ_INSERT_TAIL(&r->root_ready, task, task_node);
  830: #ifdef HAVE_LIBPTHREAD
  831: 					pthread_mutex_unlock(&r->root_mtx[taskREADY]);
  832: #endif
  833: 				}
  834: 				/* if match at least 2, don't remove resouce of event */
  835: 				if (flg > 1)
  836: 					evt->flags ^= evt->flags;
  837: 				break;
  838: 			case EVFILT_SIGNAL:
  839: 				flg = 0;
  840: 				TAILQ_FOREACH_SAFE(task, &r->root_signal, task_node, tmp) {
  841: 					if (TASK_VAL(task) != ((uintptr_t) res[i].udata))
  842: 						continue;
  843: 					else {
  844: 						flg++;
  845: 						TASK_RET(task) = res[i].data;
  846: 						TASK_FLAG(task) = res[i].fflags;
  847: 					}
  848: 					/* remove signal handle */
  849: #ifdef HAVE_LIBPTHREAD
  850: 					pthread_mutex_lock(&r->root_mtx[taskSIGNAL]);
  851: #endif
  852: 					TAILQ_REMOVE(&r->root_signal, task, task_node);
  853: #ifdef HAVE_LIBPTHREAD
  854: 					pthread_mutex_unlock(&r->root_mtx[taskSIGNAL]);
  855: #endif
  856: 					task->task_type = taskREADY;
  857: #ifdef HAVE_LIBPTHREAD
  858: 					pthread_mutex_lock(&r->root_mtx[taskREADY]);
  859: #endif
  860: 					TAILQ_INSERT_TAIL(&r->root_ready, task, task_node);
  861: #ifdef HAVE_LIBPTHREAD
  862: 					pthread_mutex_unlock(&r->root_mtx[taskREADY]);
  863: #endif
  864: 				}
  865: 				/* if match at least 2, don't remove resouce of event */
  866: 				if (flg > 1)
  867: 					evt->flags ^= evt->flags;
  868: 				break;
  869: #ifdef AIO_SUPPORT
  870: 			case EVFILT_AIO:
  871: 				flg = 0;
  872: 				TAILQ_FOREACH_SAFE(task, &r->root_aio, task_node, tmp) {
  873: 					acb = (struct aiocb*) TASK_VAL(task);
  874: 					if (acb != ((struct aiocb*) res[i].udata))
  875: 						continue;
  876: 					else {
  877: 						flg++;
  878: 						TASK_RET(task) = res[i].data;
  879: 						TASK_FLAG(task) = res[i].fflags;
  880: 					}
  881: 					/* remove user handle */
  882: #ifdef HAVE_LIBPTHREAD
  883: 					pthread_mutex_lock(&r->root_mtx[taskAIO]);
  884: #endif
  885: 					TAILQ_REMOVE(&r->root_aio, task, task_node);
  886: #ifdef HAVE_LIBPTHREAD
  887: 					pthread_mutex_unlock(&r->root_mtx[taskAIO]);
  888: #endif
  889: 					task->task_type = taskREADY;
  890: #ifdef HAVE_LIBPTHREAD
  891: 					pthread_mutex_lock(&r->root_mtx[taskREADY]);
  892: #endif
  893: 					TAILQ_INSERT_TAIL(&r->root_ready, task, task_node);
  894: #ifdef HAVE_LIBPTHREAD
  895: 					pthread_mutex_unlock(&r->root_mtx[taskREADY]);
  896: #endif
  897: 					fd = acb->aio_fildes;
  898: 					if ((len = aio_return(acb)) != -1) {
  899: 						if (lseek(fd, acb->aio_offset + len, SEEK_CUR) == -1)
  900: 							LOGERR;
  901: 					} else
  902: 						LOGERR;
  903: 					free(acb);
  904: 					TASK_DATLEN(task) = (u_long) len;
  905: 					TASK_FD(task) = fd;
  906: 				}
  907: 				/* if match at least 2, don't remove resouce of event */
  908: 				if (flg > 1)
  909: 					evt->flags ^= evt->flags;
  910: 				break;
  911: #ifdef EVFILT_LIO
  912: 			case EVFILT_LIO:
  913: 				flg = 0;
  914: 				TAILQ_FOREACH_SAFE(task, &r->root_lio, task_node, tmp) {
  915: 					acbs = (struct aiocb**) TASK_VAL(task);
  916: 					if (acbs != ((struct aiocb**) res[i].udata))
  917: 						continue;
  918: 					else {
  919: 						flg++;
  920: 						TASK_RET(task) = res[i].data;
  921: 						TASK_FLAG(task) = res[i].fflags;
  922: 					}
  923: 					/* remove user handle */
  924: #ifdef HAVE_LIBPTHREAD
  925: 					pthread_mutex_lock(&r->root_mtx[taskLIO]);
  926: #endif
  927: 					TAILQ_REMOVE(&r->root_lio, task, task_node);
  928: #ifdef HAVE_LIBPTHREAD
  929: 					pthread_mutex_unlock(&r->root_mtx[taskLIO]);
  930: #endif
  931: 					task->task_type = taskREADY;
  932: #ifdef HAVE_LIBPTHREAD
  933: 					pthread_mutex_lock(&r->root_mtx[taskREADY]);
  934: #endif
  935: 					TAILQ_INSERT_TAIL(&r->root_ready, task, task_node);
  936: #ifdef HAVE_LIBPTHREAD
  937: 					pthread_mutex_unlock(&r->root_mtx[taskREADY]);
  938: #endif
  939: 					iv = (struct iovec*) TASK_DATA(task);
  940: 					fd = acbs[0]->aio_fildes;
  941: 					off = acbs[0]->aio_offset;
  942: 					for (j = len = 0; i < TASK_DATLEN(task); len += l, i++) {
  943: 						if ((iv[i].iov_len = aio_return(acbs[i])) == -1)
  944: 							l = 0;
  945: 						else
  946: 							l = iv[i].iov_len;
  947: 						free(acbs[i]);
  948: 					}
  949: 					free(acbs);
  950: 					TASK_DATLEN(task) = (u_long) len;
  951: 					TASK_FD(task) = fd;
  952: 
  953: 					if (lseek(fd, off + len, SEEK_CUR) == -1)
  954: 						LOGERR;
  955: 				}
  956: 				/* if match at least 2, don't remove resouce of event */
  957: 				if (flg > 1)
  958: 					evt->flags ^= evt->flags;
  959: 				break;
  960: #endif	/* EVFILT_LIO */
  961: #endif	/* AIO_SUPPORT */
  962: #ifdef EVFILT_USER
  963: 			case EVFILT_USER:
  964: 				flg = 0;
  965: 				TAILQ_FOREACH_SAFE(task, &r->root_user, task_node, tmp) {
  966: 					if (TASK_VAL(task) != ((uintptr_t) res[i].udata))
  967: 						continue;
  968: 					else {
  969: 						flg++;
  970: 						TASK_RET(task) = res[i].data;
  971: 						TASK_FLAG(task) = res[i].fflags;
  972: 					}
  973: 					/* remove user handle */
  974: #ifdef HAVE_LIBPTHREAD
  975: 					pthread_mutex_lock(&r->root_mtx[taskUSER]);
  976: #endif
  977: 					TAILQ_REMOVE(&r->root_user, task, task_node);
  978: #ifdef HAVE_LIBPTHREAD
  979: 					pthread_mutex_unlock(&r->root_mtx[taskUSER]);
  980: #endif
  981: 					task->task_type = taskREADY;
  982: #ifdef HAVE_LIBPTHREAD
  983: 					pthread_mutex_lock(&r->root_mtx[taskREADY]);
  984: #endif
  985: 					TAILQ_INSERT_TAIL(&r->root_ready, task, task_node);
  986: #ifdef HAVE_LIBPTHREAD
  987: 					pthread_mutex_unlock(&r->root_mtx[taskREADY]);
  988: #endif
  989: 				}
  990: 				/* if match at least 2, don't remove resouce of event */
  991: 				if (flg > 1)
  992: 					evt->flags ^= evt->flags;
  993: 				break;
  994: #endif	/* EVFILT_USER */
  995: 		}
  996: 		if (kevent(r->root_kq, evt, 1, NULL, 0, &now) == -1) {
  997: 			if (r->root_hooks.hook_exec.exception) {
  998: 				if (r->root_hooks.hook_exec.exception(r, NULL))
  999: 					return NULL;
 1000: 			} else
 1001: 				LOGERR;
 1002: 		}
 1003: 	}
 1004: 
 1005: 	/* timer update & put in ready queue */
 1006: 	clock_gettime(CLOCK_MONOTONIC, &now);
 1007: 
 1008: 	TAILQ_FOREACH_SAFE(task, &r->root_timer, task_node, tmp)
 1009: 		if (sched_timespeccmp(&now, &TASK_TS(task), -) >= 0) {
 1010: #ifdef HAVE_LIBPTHREAD
 1011: 			pthread_mutex_lock(&r->root_mtx[taskTIMER]);
 1012: #endif
 1013: 			TAILQ_REMOVE(&r->root_timer, task, task_node);
 1014: #ifdef HAVE_LIBPTHREAD
 1015: 			pthread_mutex_unlock(&r->root_mtx[taskTIMER]);
 1016: #endif
 1017: 			task->task_type = taskREADY;
 1018: #ifdef HAVE_LIBPTHREAD
 1019: 			pthread_mutex_lock(&r->root_mtx[taskREADY]);
 1020: #endif
 1021: 			TAILQ_INSERT_TAIL(&r->root_ready, task, task_node);
 1022: #ifdef HAVE_LIBPTHREAD
 1023: 			pthread_mutex_unlock(&r->root_mtx[taskREADY]);
 1024: #endif
 1025: 		}
 1026: 
 1027: 	/* put regular task priority task to ready queue, 
 1028: 		if there is no ready task or reach max missing hit for regular task */
 1029: 	if ((task = TAILQ_FIRST(&r->root_task))) {
 1030: 		if (!TAILQ_FIRST(&r->root_ready) || r->root_miss >= TASK_VAL(task)) {
 1031: 			r->root_miss ^= r->root_miss;
 1032: 
 1033: #ifdef HAVE_LIBPTHREAD
 1034: 			pthread_mutex_lock(&r->root_mtx[taskTASK]);
 1035: #endif
 1036: 			TAILQ_REMOVE(&r->root_task, task, task_node);
 1037: #ifdef HAVE_LIBPTHREAD
 1038: 			pthread_mutex_unlock(&r->root_mtx[taskTASK]);
 1039: #endif
 1040: 			task->task_type = taskREADY;
 1041: #ifdef HAVE_LIBPTHREAD
 1042: 			pthread_mutex_lock(&r->root_mtx[taskREADY]);
 1043: #endif
 1044: 			TAILQ_INSERT_TAIL(&r->root_ready, task, task_node);
 1045: #ifdef HAVE_LIBPTHREAD
 1046: 			pthread_mutex_unlock(&r->root_mtx[taskREADY]);
 1047: #endif
 1048: 		} else
 1049: 			r->root_miss++;
 1050: 	} else
 1051: 		r->root_miss ^= r->root_miss;
 1052: 
 1053: 	/* OK, lets get ready task !!! */
 1054: 	task = TAILQ_FIRST(&r->root_ready);
 1055: 	if (!(task))
 1056: 		return NULL;
 1057: 
 1058: #ifdef HAVE_LIBPTHREAD
 1059: 	pthread_mutex_lock(&r->root_mtx[taskREADY]);
 1060: #endif
 1061: 	TAILQ_REMOVE(&r->root_ready, task, task_node);
 1062: #ifdef HAVE_LIBPTHREAD
 1063: 	pthread_mutex_unlock(&r->root_mtx[taskREADY]);
 1064: #endif
 1065: 	task->task_type = taskUNUSE;
 1066: #ifdef HAVE_LIBPTHREAD
 1067: 	pthread_mutex_lock(&r->root_mtx[taskUNUSE]);
 1068: #endif
 1069: 	TAILQ_INSERT_TAIL(&r->root_unuse, task, task_node);
 1070: #ifdef HAVE_LIBPTHREAD
 1071: 	pthread_mutex_unlock(&r->root_mtx[taskUNUSE]);
 1072: #endif
 1073: 	return task;
 1074: }
 1075: 
 1076: /*
 1077:  * sched_hook_exception() - Default EXCEPTION hook
 1078:  *
 1079:  * @root = root task
 1080:  * @arg = custom handling: if arg == EV_EOF or other value; default: arg == NULL log errno
 1081:  * return: <0 errors and 0 ok
 1082:  */
 1083: void *
 1084: sched_hook_exception(void *root, void *arg)
 1085: {
 1086: 	sched_root_task_t *r = root;
 1087: 
 1088: 	if (!r)
 1089: 		return NULL;
 1090: 
 1091: 	/* custom exception handling ... */
 1092: 	if (arg) {
 1093: 		if (arg == (void*) EV_EOF)
 1094: 			return NULL;
 1095: 		return (void*) -1;	/* raise scheduler error!!! */
 1096: 	}
 1097: 
 1098: 	/* if error hook exists */
 1099: 	if (r->root_hooks.hook_root.error)
 1100: 		return (r->root_hooks.hook_root.error(root, (void*) ((intptr_t) errno)));
 1101: 
 1102: 	/* default case! */
 1103: 	LOGERR;
 1104: 	return NULL;
 1105: }
 1106: 
 1107: /*
 1108:  * sched_hook_condition() - Default CONDITION hook
 1109:  *
 1110:  * @root = root task
 1111:  * @arg = killState from schedRun()
 1112:  * return: NULL kill scheduler loop or !=NULL ok
 1113:  */
 1114: void *
 1115: sched_hook_condition(void *root, void *arg)
 1116: {
 1117: 	sched_root_task_t *r = root;
 1118: 
 1119: 	if (!r)
 1120: 		return NULL;
 1121: 
 1122: 	return (void*) (r->root_cond - *(intptr_t*) arg);
 1123: }

FreeBSD-CVSweb <freebsd-cvsweb@FreeBSD.org>