File:  [ELWIX - Embedded LightWeight unIX -] / libaitsched / src / hooks.c
Revision 1.17: download - view: text, annotated - select for diffs - revision graph
Thu May 30 09:13:52 2013 UTC (11 years, 1 month ago) by misho
Branches: MAIN
CVS tags: sched3_6, SCHED3_5, HEAD
version 3.5

    1: /*************************************************************************
    2: * (C) 2011 AITNET ltd - Sofia/Bulgaria - <misho@aitbg.com>
    3: *  by Michael Pounov <misho@openbsd-bg.org>
    4: *
    5: * $Author: misho $
    6: * $Id: hooks.c,v 1.17 2013/05/30 09:13:52 misho Exp $
    7: *
    8: **************************************************************************
    9: The ELWIX and AITNET software is distributed under the following
   10: terms:
   11: 
   12: All of the documentation and software included in the ELWIX and AITNET
   13: Releases is copyrighted by ELWIX - Sofia/Bulgaria <info@elwix.org>
   14: 
   15: Copyright 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013
   16: 	by Michael Pounov <misho@elwix.org>.  All rights reserved.
   17: 
   18: Redistribution and use in source and binary forms, with or without
   19: modification, are permitted provided that the following conditions
   20: are met:
   21: 1. Redistributions of source code must retain the above copyright
   22:    notice, this list of conditions and the following disclaimer.
   23: 2. Redistributions in binary form must reproduce the above copyright
   24:    notice, this list of conditions and the following disclaimer in the
   25:    documentation and/or other materials provided with the distribution.
   26: 3. All advertising materials mentioning features or use of this software
   27:    must display the following acknowledgement:
   28: This product includes software developed by Michael Pounov <misho@elwix.org>
   29: ELWIX - Embedded LightWeight unIX and its contributors.
   30: 4. Neither the name of AITNET nor the names of its contributors
   31:    may be used to endorse or promote products derived from this software
   32:    without specific prior written permission.
   33: 
   34: THIS SOFTWARE IS PROVIDED BY AITNET AND CONTRIBUTORS ``AS IS'' AND
   35: ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   36: IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   37: ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
   38: FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   39: DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   40: OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   41: HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   42: LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   43: OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   44: SUCH DAMAGE.
   45: */
   46: #include "global.h"
   47: #include "hooks.h"
   48: 
   49: 
   50: /*
   51:  * sched_hook_init() - Default INIT hook
   52:  *
   53:  * @root = root task
   54:  * @arg = unused
   55:  * return: <0 errors and 0 ok
   56:  */
   57: void *
   58: sched_hook_init(void *root, void *arg __unused)
   59: {
   60: 	sched_root_task_t *r = root;
   61: 
   62: 	if (!r)
   63: 		return (void*) -1;
   64: 
   65: 	r->root_kq = kqueue();
   66: 	if (r->root_kq == -1) {
   67: 		LOGERR;
   68: 		return (void*) -1;
   69: 	}
   70: 
   71: 	return NULL;
   72: }
   73: 
   74: /*
   75:  * sched_hook_fini() - Default FINI hook
   76:  *
   77:  * @root = root task
   78:  * @arg = unused
   79:  * return: <0 errors and 0 ok
   80:  */
   81: void *
   82: sched_hook_fini(void *root, void *arg __unused)
   83: {
   84: 	sched_root_task_t *r = root;
   85: 
   86: 	if (!r)
   87: 		return (void*) -1;
   88: 
   89: 	if (r->root_kq > 2) {
   90: 		close(r->root_kq);
   91: 		r->root_kq = 0;
   92: 	}
   93: 
   94: 	return NULL;
   95: }
   96: 
   97: /*
   98:  * sched_hook_cancel() - Default CANCEL hook
   99:  *
  100:  * @task = current task
  101:  * @arg = unused
  102:  * return: <0 errors and 0 ok
  103:  */
  104: void *
  105: sched_hook_cancel(void *task, void *arg __unused)
  106: {
  107: 	sched_task_t *t = task;
  108: 	struct kevent chg[1];
  109: 	struct timespec timeout = { 0, 0 };
  110: #ifdef AIO_SUPPORT
  111: 	struct aiocb *acb;
  112: #ifdef EVFILT_LIO
  113: 	register int i = 0;
  114: 	struct aiocb **acbs;
  115: #endif	/* EVFILT_LIO */
  116: #endif	/* AIO_SUPPORT */
  117: 
  118: 	if (!t || !TASK_ROOT(t))
  119: 		return (void*) -1;
  120: 
  121: 	switch (TASK_TYPE(t)) {
  122: 		case taskREAD:
  123: #ifdef __NetBSD__
  124: 			EV_SET(&chg[0], TASK_FD(t), EVFILT_READ, EV_DELETE, 0, 0, (intptr_t) TASK_FD(t));
  125: #else
  126: 			EV_SET(&chg[0], TASK_FD(t), EVFILT_READ, EV_DELETE, 0, 0, (void*) TASK_FD(t));
  127: #endif
  128: 			break;
  129: 		case taskWRITE:
  130: #ifdef __NetBSD__
  131: 			EV_SET(&chg[0], TASK_FD(t), EVFILT_WRITE, EV_DELETE, 0, 0, (intptr_t) TASK_FD(t));
  132: #else
  133: 			EV_SET(&chg[0], TASK_FD(t), EVFILT_WRITE, EV_DELETE, 0, 0, (void*) TASK_FD(t));
  134: #endif
  135: 			break;
  136: 		case taskALARM:
  137: #ifdef __NetBSD__
  138: 			EV_SET(&chg[0], (uintptr_t) TASK_DATA(t), EVFILT_TIMER, EV_DELETE, 
  139: 					0, 0, (intptr_t) TASK_DATA(t));
  140: #else
  141: 			EV_SET(&chg[0], (uintptr_t) TASK_DATA(t), EVFILT_TIMER, EV_DELETE, 
  142: 					0, 0, (void*) TASK_DATA(t));
  143: #endif
  144: 			break;
  145: 		case taskNODE:
  146: #ifdef __NetBSD__
  147: 			EV_SET(&chg[0], TASK_FD(t), EVFILT_VNODE, EV_DELETE, 0, 0, (intptr_t) TASK_FD(t));
  148: #else
  149: 			EV_SET(&chg[0], TASK_FD(t), EVFILT_VNODE, EV_DELETE, 0, 0, (void*) TASK_FD(t));
  150: #endif
  151: 			break;
  152: 		case taskPROC:
  153: #ifdef __NetBSD__
  154: 			EV_SET(&chg[0], TASK_VAL(t), EVFILT_PROC, EV_DELETE, 0, 0, (intptr_t) TASK_VAL(t));
  155: #else
  156: 			EV_SET(&chg[0], TASK_VAL(t), EVFILT_PROC, EV_DELETE, 0, 0, (void*) TASK_VAL(t));
  157: #endif
  158: 			break;
  159: 		case taskSIGNAL:
  160: #ifdef __NetBSD__
  161: 			EV_SET(&chg[0], TASK_VAL(t), EVFILT_SIGNAL, EV_DELETE, 0, 0, (intptr_t) TASK_VAL(t));
  162: #else
  163: 			EV_SET(&chg[0], TASK_VAL(t), EVFILT_SIGNAL, EV_DELETE, 0, 0, (void*) TASK_VAL(t));
  164: #endif
  165: 			break;
  166: #ifdef AIO_SUPPORT
  167: 		case taskAIO:
  168: #ifdef __NetBSD__
  169: 			EV_SET(&chg[0], TASK_VAL(t), EVFILT_AIO, EV_DELETE, 0, 0, (intptr_t) TASK_VAL(t));
  170: #else
  171: 			EV_SET(&chg[0], TASK_VAL(t), EVFILT_AIO, EV_DELETE, 0, 0, (void*) TASK_VAL(t));
  172: #endif
  173: 			acb = (struct aiocb*) TASK_VAL(t);
  174: 			if (acb) {
  175: 				if (aio_cancel(acb->aio_fildes, acb) == AIO_CANCELED)
  176: 					aio_return(acb);
  177: 				free(acb);
  178: 				TASK_VAL(t) = 0;
  179: 			}
  180: 			break;
  181: #ifdef EVFILT_LIO
  182: 		case taskLIO:
  183: #ifdef __NetBSD__
  184: 			EV_SET(&chg[0], TASK_VAL(t), EVFILT_LIO, EV_DELETE, 0, 0, (intptr_t) TASK_VAL(t));
  185: #else
  186: 			EV_SET(&chg[0], TASK_VAL(t), EVFILT_LIO, EV_DELETE, 0, 0, (void*) TASK_VAL(t));
  187: #endif
  188: 			acbs = (struct aiocb**) TASK_VAL(t);
  189: 			if (acbs) {
  190: 				for (i = 0; i < TASK_DATLEN(t); i++) {
  191: 					if (aio_cancel(acbs[i]->aio_fildes, acbs[i]) == AIO_CANCELED)
  192: 						aio_return(acbs[i]);
  193: 					free(acbs[i]);
  194: 				}
  195: 				free(acbs);
  196: 				TASK_VAL(t) = 0;
  197: 			}
  198: 			break;
  199: #endif	/* EVFILT_LIO */
  200: #endif	/* AIO_SUPPORT */
  201: #ifdef EVFILT_USER
  202: 		case taskUSER:
  203: #ifdef __NetBSD__
  204: 			EV_SET(&chg[0], TASK_VAL(t), EVFILT_USER, EV_DELETE, 0, 0, (intptr_t) TASK_VAL(t));
  205: #else
  206: 			EV_SET(&chg[0], TASK_VAL(t), EVFILT_USER, EV_DELETE, 0, 0, (void*) TASK_VAL(t));
  207: #endif
  208: 			break;
  209: #endif
  210: 		case taskTHREAD:
  211: #ifdef HAVE_LIBPTHREAD
  212: 			pthread_cancel((pthread_t) TASK_VAL(t));
  213: #endif
  214: 		default:
  215: 			return NULL;
  216: 	}
  217: 
  218: 	kevent(TASK_ROOT(t)->root_kq, chg, 1, NULL, 0, &timeout);
  219: 	return NULL;
  220: }
  221: 
  222: #ifdef HAVE_LIBPTHREAD
  223: /*
  224:  * sched_hook_thread() - Default THREAD hook
  225:  *
  226:  * @task = current task
  227:  * @arg = pthread attributes
  228:  * return: <0 errors and 0 ok
  229:  */
  230: void *
  231: sched_hook_thread(void *task, void *arg)
  232: {
  233: 	sched_task_t *t = task;
  234: 	pthread_t tid;
  235: 	sigset_t s, o;
  236: 
  237: 	if (!t || !TASK_ROOT(t))
  238: 		return (void*) -1;
  239: 
  240: 	sigfillset(&s);
  241: 	pthread_sigmask(SIG_BLOCK, &s, &o);
  242: 	if ((errno = pthread_create(&tid, (pthread_attr_t*) arg, 
  243: 				(void *(*)(void*)) _sched_threadWrapper, t))) {
  244: 		LOGERR;
  245: 		pthread_sigmask(SIG_SETMASK, &o, NULL);
  246: 		return (void*) -1;
  247: 	} else
  248: 		TASK_VAL(t) = (u_long) tid;
  249: 
  250: 	if (!TASK_ISLOCKED(t))
  251: 		TASK_LOCK(t);
  252: 
  253: 	pthread_sigmask(SIG_SETMASK, &o, NULL);
  254: 	return NULL;
  255: }
  256: #endif
  257: 
  258: /*
  259:  * sched_hook_read() - Default READ hook
  260:  *
  261:  * @task = current task
  262:  * @arg = unused
  263:  * return: <0 errors and 0 ok
  264:  */
  265: void *
  266: sched_hook_read(void *task, void *arg __unused)
  267: {
  268: 	sched_task_t *t = task;
  269: 	struct kevent chg[1];
  270: 	struct timespec timeout = { 0, 0 };
  271: 
  272: 	if (!t || !TASK_ROOT(t))
  273: 		return (void*) -1;
  274: 
  275: #ifdef __NetBSD__
  276: 	EV_SET(&chg[0], TASK_FD(t), EVFILT_READ, EV_ADD | EV_CLEAR, 0, 0, (intptr_t) TASK_FD(t));
  277: #else
  278: 	EV_SET(&chg[0], TASK_FD(t), EVFILT_READ, EV_ADD | EV_CLEAR, 0, 0, (void*) TASK_FD(t));
  279: #endif
  280: 	if (kevent(TASK_ROOT(t)->root_kq, chg, 1, NULL, 0, &timeout) == -1) {
  281: 		if (TASK_ROOT(t)->root_hooks.hook_exec.exception)
  282: 			TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL);
  283: 		else
  284: 			LOGERR;
  285: 		return (void*) -1;
  286: 	}
  287: 
  288: 	return NULL;
  289: }
  290: 
  291: /*
  292:  * sched_hook_write() - Default WRITE hook
  293:  *
  294:  * @task = current task
  295:  * @arg = unused
  296:  * return: <0 errors and 0 ok
  297:  */
  298: void *
  299: sched_hook_write(void *task, void *arg __unused)
  300: {
  301: 	sched_task_t *t = task;
  302: 	struct kevent chg[1];
  303: 	struct timespec timeout = { 0, 0 };
  304: 
  305: 	if (!t || !TASK_ROOT(t))
  306: 		return (void*) -1;
  307: 
  308: #ifdef __NetBSD__
  309: 	EV_SET(&chg[0], TASK_FD(t), EVFILT_WRITE, EV_ADD | EV_CLEAR, 0, 0, (intptr_t) TASK_FD(t));
  310: #else
  311: 	EV_SET(&chg[0], TASK_FD(t), EVFILT_WRITE, EV_ADD | EV_CLEAR, 0, 0, (void*) TASK_FD(t));
  312: #endif
  313: 	if (kevent(TASK_ROOT(t)->root_kq, chg, 1, NULL, 0, &timeout) == -1) {
  314: 		if (TASK_ROOT(t)->root_hooks.hook_exec.exception)
  315: 			TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL);
  316: 		else
  317: 			LOGERR;
  318: 		return (void*) -1;
  319: 	}
  320: 
  321: 	return NULL;
  322: }
  323: 
  324: /*
  325:  * sched_hook_alarm() - Default ALARM hook
  326:  *
  327:  * @task = current task
  328:  * @arg = unused
  329:  * return: <0 errors and 0 ok
  330:  */
  331: void *
  332: sched_hook_alarm(void *task, void *arg __unused)
  333: {
  334: 	sched_task_t *t = task;
  335: 	struct kevent chg[1];
  336: 	struct timespec timeout = { 0, 0 };
  337: 
  338: 	if (!t || !TASK_ROOT(t))
  339: 		return (void*) -1;
  340: 
  341: #ifdef __NetBSD__
  342: 	EV_SET(&chg[0], (uintptr_t) TASK_DATA(t), EVFILT_TIMER, EV_ADD | EV_ONESHOT, 0, 
  343: 			t->task_val.ts.tv_sec * 1000 + t->task_val.ts.tv_nsec / 1000000, 
  344: 			(intptr_t) TASK_DATA(t));
  345: #else
  346: 	EV_SET(&chg[0], (uintptr_t) TASK_DATA(t), EVFILT_TIMER, EV_ADD | EV_ONESHOT, 0, 
  347: 			t->task_val.ts.tv_sec * 1000 + t->task_val.ts.tv_nsec / 1000000, 
  348: 			(void*) TASK_DATA(t));
  349: #endif
  350: 	if (kevent(TASK_ROOT(t)->root_kq, chg, 1, NULL, 0, &timeout) == -1) {
  351: 		if (TASK_ROOT(t)->root_hooks.hook_exec.exception)
  352: 			TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL);
  353: 		else
  354: 			LOGERR;
  355: 		return (void*) -1;
  356: 	}
  357: 
  358: 	return NULL;
  359: }
  360: 
  361: /*
  362:  * sched_hook_node() - Default NODE hook
  363:  *
  364:  * @task = current task
  365:  * @arg = unused
  366:  * return: <0 errors and 0 ok
  367:  */
  368: void *
  369: sched_hook_node(void *task, void *arg __unused)
  370: {
  371: 	sched_task_t *t = task;
  372: 	struct kevent chg[1];
  373: 	struct timespec timeout = { 0, 0 };
  374: 
  375: 	if (!t || !TASK_ROOT(t))
  376: 		return (void*) -1;
  377: 
  378: #ifdef __NetBSD__
  379: 	EV_SET(&chg[0], TASK_FD(t), EVFILT_VNODE, EV_ADD | EV_CLEAR, 
  380: 			NOTE_DELETE | NOTE_WRITE | NOTE_EXTEND | NOTE_ATTRIB | 
  381: 			NOTE_LINK | NOTE_RENAME | NOTE_REVOKE, 0, (intptr_t) TASK_FD(t));
  382: #else
  383: 	EV_SET(&chg[0], TASK_FD(t), EVFILT_VNODE, EV_ADD | EV_CLEAR, 
  384: 			NOTE_DELETE | NOTE_WRITE | NOTE_EXTEND | NOTE_ATTRIB | 
  385: 			NOTE_LINK | NOTE_RENAME | NOTE_REVOKE, 0, (void*) TASK_FD(t));
  386: #endif
  387: 	if (kevent(TASK_ROOT(t)->root_kq, chg, 1, NULL, 0, &timeout) == -1) {
  388: 		if (TASK_ROOT(t)->root_hooks.hook_exec.exception)
  389: 			TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL);
  390: 		else
  391: 			LOGERR;
  392: 		return (void*) -1;
  393: 	}
  394: 
  395: 	return NULL;
  396: }
  397: 
  398: /*
  399:  * sched_hook_proc() - Default PROC hook
  400:  *
  401:  * @task = current task
  402:  * @arg = unused
  403:  * return: <0 errors and 0 ok
  404:  */
  405: void *
  406: sched_hook_proc(void *task, void *arg __unused)
  407: {
  408: 	sched_task_t *t = task;
  409: 	struct kevent chg[1];
  410: 	struct timespec timeout = { 0, 0 };
  411: 
  412: 	if (!t || !TASK_ROOT(t))
  413: 		return (void*) -1;
  414: 
  415: #ifdef __NetBSD__
  416: 	EV_SET(&chg[0], TASK_VAL(t), EVFILT_PROC, EV_ADD | EV_CLEAR, 
  417: 			NOTE_EXIT | NOTE_FORK | NOTE_EXEC | NOTE_TRACK, 0, (intptr_t) TASK_VAL(t));
  418: #else
  419: 	EV_SET(&chg[0], TASK_VAL(t), EVFILT_PROC, EV_ADD | EV_CLEAR, 
  420: 			NOTE_EXIT | NOTE_FORK | NOTE_EXEC | NOTE_TRACK, 0, (void*) TASK_VAL(t));
  421: #endif
  422: 	if (kevent(TASK_ROOT(t)->root_kq, chg, 1, NULL, 0, &timeout) == -1) {
  423: 		if (TASK_ROOT(t)->root_hooks.hook_exec.exception)
  424: 			TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL);
  425: 		else
  426: 			LOGERR;
  427: 		return (void*) -1;
  428: 	}
  429: 
  430: 	return NULL;
  431: }
  432: 
  433: /*
  434:  * sched_hook_signal() - Default SIGNAL hook
  435:  *
  436:  * @task = current task
  437:  * @arg = unused
  438:  * return: <0 errors and 0 ok
  439:  */
  440: void *
  441: sched_hook_signal(void *task, void *arg __unused)
  442: {
  443: 	sched_task_t *t = task;
  444: 	struct kevent chg[1];
  445: 	struct timespec timeout = { 0, 0 };
  446: 
  447: 	if (!t || !TASK_ROOT(t))
  448: 		return (void*) -1;
  449: 
  450: #ifdef __NetBSD__
  451: 	EV_SET(&chg[0], TASK_VAL(t), EVFILT_SIGNAL, EV_ADD, 0, 0, (intptr_t) TASK_VAL(t));
  452: #else
  453: 	EV_SET(&chg[0], TASK_VAL(t), EVFILT_SIGNAL, EV_ADD, 0, 0, (void*) TASK_VAL(t));
  454: #endif
  455: 	if (kevent(TASK_ROOT(t)->root_kq, chg, 1, NULL, 0, &timeout) == -1) {
  456: 		if (TASK_ROOT(t)->root_hooks.hook_exec.exception)
  457: 			TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL);
  458: 		else
  459: 			LOGERR;
  460: 		return (void*) -1;
  461: 	}
  462: 
  463: 	return NULL;
  464: }
  465: 
  466: /*
  467:  * sched_hook_user() - Default USER hook
  468:  *
  469:  * @task = current task
  470:  * @arg = unused
  471:  * return: <0 errors and 0 ok
  472:  */
  473: #ifdef EVFILT_USER
  474: void *
  475: sched_hook_user(void *task, void *arg __unused)
  476: {
  477: 	sched_task_t *t = task;
  478: 	struct kevent chg[1];
  479: 	struct timespec timeout = { 0, 0 };
  480: 
  481: 	if (!t || !TASK_ROOT(t))
  482: 		return (void*) -1;
  483: 
  484: #ifdef __NetBSD__
  485: 	EV_SET(&chg[0], TASK_VAL(t), EVFILT_USER, EV_ADD | EV_CLEAR, TASK_DATLEN(t), 
  486: 			0, (intptr_t) TASK_VAL(t));
  487: #else
  488: 	EV_SET(&chg[0], TASK_VAL(t), EVFILT_USER, EV_ADD | EV_CLEAR, TASK_DATLEN(t), 
  489: 			0, (void*) TASK_VAL(t));
  490: #endif
  491: 	if (kevent(TASK_ROOT(t)->root_kq, chg, 1, NULL, 0, &timeout) == -1) {
  492: 		if (TASK_ROOT(t)->root_hooks.hook_exec.exception)
  493: 			TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL);
  494: 		else
  495: 			LOGERR;
  496: 		return (void*) -1;
  497: 	}
  498: 
  499: 	return NULL;
  500: }
  501: #endif
  502: 
  503: /*
  504:  * sched_hook_fetch() - Default FETCH hook
  505:  *
  506:  * @root = root task
  507:  * @arg = unused
  508:  * return: NULL error or !=NULL fetched task
  509:  */
  510: void *
  511: sched_hook_fetch(void *root, void *arg __unused)
  512: {
  513: 	sched_root_task_t *r = root;
  514: 	sched_task_t *task, *tmp;
  515: 	struct timespec now, m, mtmp;
  516: 	struct timespec *timeout;
  517: 	struct kevent evt[1], res[KQ_EVENTS];
  518: 	register int i, flg;
  519: 	int en;
  520: #ifdef AIO_SUPPORT
  521: 	int len, fd;
  522: 	struct aiocb *acb;
  523: #ifdef EVFILT_LIO
  524: 	int l;
  525: 	register int j;
  526: 	off_t off;
  527: 	struct aiocb **acbs;
  528: 	struct iovec *iv;
  529: #endif	/* EVFILT_LIO */
  530: #endif	/* AIO_SUPPORT */
  531: 
  532: 	if (!r)
  533: 		return NULL;
  534: 
  535: 	/* get new task by queue priority */
  536: 	while ((task = TAILQ_FIRST(&r->root_event))) {
  537: #ifdef HAVE_LIBPTHREAD
  538: 		pthread_mutex_lock(&r->root_mtx[taskEVENT]);
  539: #endif
  540: 		TAILQ_REMOVE(&r->root_event, task, task_node);
  541: #ifdef HAVE_LIBPTHREAD
  542: 		pthread_mutex_unlock(&r->root_mtx[taskEVENT]);
  543: #endif
  544: 		task->task_type = taskUNUSE;
  545: #ifdef HAVE_LIBPTHREAD
  546: 		pthread_mutex_lock(&r->root_mtx[taskUNUSE]);
  547: #endif
  548: 		TAILQ_INSERT_TAIL(&r->root_unuse, task, task_node);
  549: #ifdef HAVE_LIBPTHREAD
  550: 		pthread_mutex_unlock(&r->root_mtx[taskUNUSE]);
  551: #endif
  552: 		return task;
  553: 	}
  554: 	while ((task = TAILQ_FIRST(&r->root_ready))) {
  555: #ifdef HAVE_LIBPTHREAD
  556: 		pthread_mutex_lock(&r->root_mtx[taskREADY]);
  557: #endif
  558: 		TAILQ_REMOVE(&r->root_ready, task, task_node);
  559: #ifdef HAVE_LIBPTHREAD
  560: 		pthread_mutex_unlock(&r->root_mtx[taskREADY]);
  561: #endif
  562: 		task->task_type = taskUNUSE;
  563: #ifdef HAVE_LIBPTHREAD
  564: 		pthread_mutex_lock(&r->root_mtx[taskUNUSE]);
  565: #endif
  566: 		TAILQ_INSERT_TAIL(&r->root_unuse, task, task_node);
  567: #ifdef HAVE_LIBPTHREAD
  568: 		pthread_mutex_unlock(&r->root_mtx[taskUNUSE]);
  569: #endif
  570: 		return task;
  571: 	}
  572: 
  573: #ifdef TIMER_WITHOUT_SORT
  574: 	clock_gettime(CLOCK_MONOTONIC, &now);
  575: 
  576: 	sched_timespecclear(&r->root_wait);
  577: 	TAILQ_FOREACH(task, &r->root_timer, task_node) {
  578: 		if (!sched_timespecisset(&r->root_wait))
  579: 			r->root_wait = TASK_TS(task);
  580: 		else if (sched_timespeccmp(&TASK_TS(task), &r->root_wait, -) < 0)
  581: 			r->root_wait = TASK_TS(task);
  582: 	}
  583: 
  584: 	if (TAILQ_FIRST(&r->root_timer)) {
  585: 		m = r->root_wait;
  586: 		sched_timespecsub(&m, &now, &mtmp);
  587: 		r->root_wait = mtmp;
  588: 	} else {
  589: 		/* set wait INFTIM */
  590: 		sched_timespecinf(&r->root_wait);
  591: 	}
  592: #else
  593: 	if (!TAILQ_FIRST(&r->root_task) && (task = TAILQ_FIRST(&r->root_timer))) {
  594: 		clock_gettime(CLOCK_MONOTONIC, &now);
  595: 
  596: 		m = TASK_TS(task);
  597: 		sched_timespecsub(&m, &now, &mtmp);
  598: 		r->root_wait = mtmp;
  599: 	} else {
  600: 		/* set wait INFTIM */
  601: 		sched_timespecinf(&r->root_wait);
  602: 	}
  603: #endif
  604: 	/* if present member of task, set NOWAIT */
  605: 	if (TAILQ_FIRST(&r->root_task))
  606: 		sched_timespecclear(&r->root_wait);
  607: 
  608: 	if (r->root_wait.tv_sec != -1 && r->root_wait.tv_nsec != -1)
  609: 		timeout = &r->root_wait;
  610: 	else if (sched_timespecisinf(&r->root_poll))
  611: 		timeout = NULL;
  612: 	else
  613: 		timeout = &r->root_poll;
  614: 	if ((en = kevent(r->root_kq, NULL, 0, res, KQ_EVENTS, timeout)) == -1) {
  615: 		if (r->root_hooks.hook_exec.exception) {
  616: 			if (r->root_hooks.hook_exec.exception(r, NULL))
  617: 				return NULL;
  618: 		} else if (errno != EINTR)
  619: 			LOGERR;
  620: 		return NULL;
  621: 	}
  622: 
  623: 	now.tv_sec = now.tv_nsec = 0;
  624: 	/* Go and catch the cat into pipes ... */
  625: 	for (i = 0; i < en; i++) {
  626: 		memcpy(evt, &res[i], sizeof evt);
  627: 		evt->flags = EV_DELETE;
  628: 		/* Put read/write task to ready queue */
  629: 		switch (res[i].filter) {
  630: 			case EVFILT_READ:
  631: 				flg = 0;
  632: 				TAILQ_FOREACH_SAFE(task, &r->root_read, task_node, tmp) {
  633: 					if (TASK_FD(task) != ((intptr_t) res[i].udata))
  634: 						continue;
  635: 					else {
  636: 						flg++;
  637: 						TASK_RET(task) = res[i].data;
  638: 						TASK_FLAG(task) = res[i].fflags;
  639: 					}
  640: 					/* remove read handle */
  641: #ifdef HAVE_LIBPTHREAD
  642: 					pthread_mutex_lock(&r->root_mtx[taskREAD]);
  643: #endif
  644: 					TAILQ_REMOVE(&r->root_read, task, task_node);
  645: #ifdef HAVE_LIBPTHREAD
  646: 					pthread_mutex_unlock(&r->root_mtx[taskREAD]);
  647: #endif
  648: 					if (r->root_hooks.hook_exec.exception && res[i].flags & EV_EOF) {
  649:  						if (r->root_hooks.hook_exec.exception(r, (void*) EV_EOF)) {
  650: 							task->task_type = taskUNUSE;
  651: #ifdef HAVE_LIBPTHREAD
  652: 							pthread_mutex_lock(&r->root_mtx[taskUNUSE]);
  653: #endif
  654: 							TAILQ_INSERT_TAIL(&r->root_unuse, task, task_node);
  655: #ifdef HAVE_LIBPTHREAD
  656: 							pthread_mutex_unlock(&r->root_mtx[taskUNUSE]);
  657: #endif
  658: 						} else {
  659: 							task->task_type = taskREADY;
  660: #ifdef HAVE_LIBPTHREAD
  661: 							pthread_mutex_lock(&r->root_mtx[taskREADY]);
  662: #endif
  663: 							TAILQ_INSERT_TAIL(&r->root_ready, task, task_node);
  664: #ifdef HAVE_LIBPTHREAD
  665: 							pthread_mutex_unlock(&r->root_mtx[taskREADY]);
  666: #endif
  667: 						}
  668: 					} else {
  669: 						task->task_type = taskREADY;
  670: #ifdef HAVE_LIBPTHREAD
  671: 						pthread_mutex_lock(&r->root_mtx[taskREADY]);
  672: #endif
  673: 						TAILQ_INSERT_TAIL(&r->root_ready, task, task_node);
  674: #ifdef HAVE_LIBPTHREAD
  675: 						pthread_mutex_unlock(&r->root_mtx[taskREADY]);
  676: #endif
  677: 					}
  678: 				}
  679: 				/* if match at least 2, don't remove resouce of event */
  680: 				if (flg > 1)
  681: 					evt->flags ^= evt->flags;
  682: 				break;
  683: 			case EVFILT_WRITE:
  684: 				flg = 0;
  685: 				TAILQ_FOREACH_SAFE(task, &r->root_write, task_node, tmp) {
  686: 					if (TASK_FD(task) != ((intptr_t) res[i].udata))
  687: 						continue;
  688: 					else {
  689: 						flg++;
  690: 						TASK_RET(task) = res[i].data;
  691: 						TASK_FLAG(task) = res[i].fflags;
  692: 					}
  693: 					/* remove write handle */
  694: #ifdef HAVE_LIBPTHREAD
  695: 					pthread_mutex_lock(&r->root_mtx[taskWRITE]);
  696: #endif
  697: 					TAILQ_REMOVE(&r->root_write, task, task_node);
  698: #ifdef HAVE_LIBPTHREAD
  699: 					pthread_mutex_unlock(&r->root_mtx[taskWRITE]);
  700: #endif
  701: 					if (r->root_hooks.hook_exec.exception && res[i].flags & EV_EOF) {
  702:  						if (r->root_hooks.hook_exec.exception(r, (void*) EV_EOF)) {
  703: 							task->task_type = taskUNUSE;
  704: #ifdef HAVE_LIBPTHREAD
  705: 							pthread_mutex_lock(&r->root_mtx[taskUNUSE]);
  706: #endif
  707: 							TAILQ_INSERT_TAIL(&r->root_unuse, task, task_node);
  708: #ifdef HAVE_LIBPTHREAD
  709: 							pthread_mutex_unlock(&r->root_mtx[taskUNUSE]);
  710: #endif
  711: 						} else {
  712: 							task->task_type = taskREADY;
  713: #ifdef HAVE_LIBPTHREAD
  714: 							pthread_mutex_lock(&r->root_mtx[taskREADY]);
  715: #endif
  716: 							TAILQ_INSERT_TAIL(&r->root_ready, task, task_node);
  717: #ifdef HAVE_LIBPTHREAD
  718: 							pthread_mutex_unlock(&r->root_mtx[taskREADY]);
  719: #endif
  720: 						}
  721: 					} else {
  722: 						task->task_type = taskREADY;
  723: #ifdef HAVE_LIBPTHREAD
  724: 						pthread_mutex_lock(&r->root_mtx[taskREADY]);
  725: #endif
  726: 						TAILQ_INSERT_TAIL(&r->root_ready, task, task_node);
  727: #ifdef HAVE_LIBPTHREAD
  728: 						pthread_mutex_unlock(&r->root_mtx[taskREADY]);
  729: #endif
  730: 					}
  731: 				}
  732: 				/* if match at least 2, don't remove resouce of event */
  733: 				if (flg > 1)
  734: 					evt->flags ^= evt->flags;
  735: 				break;
  736: 			case EVFILT_TIMER:
  737: 				flg = 0;
  738: 				TAILQ_FOREACH_SAFE(task, &r->root_alarm, task_node, tmp) {
  739: 					if ((uintptr_t) TASK_DATA(task) != ((uintptr_t) res[i].udata))
  740: 						continue;
  741: 					else {
  742: 						flg++;
  743: 						TASK_RET(task) = res[i].data;
  744: 						TASK_FLAG(task) = res[i].fflags;
  745: 					}
  746: 					/* remove alarm handle */
  747: #ifdef HAVE_LIBPTHREAD
  748: 					pthread_mutex_lock(&r->root_mtx[taskALARM]);
  749: #endif
  750: 					TAILQ_REMOVE(&r->root_alarm, task, task_node);
  751: #ifdef HAVE_LIBPTHREAD
  752: 					pthread_mutex_unlock(&r->root_mtx[taskALARM]);
  753: #endif
  754: 					task->task_type = taskREADY;
  755: #ifdef HAVE_LIBPTHREAD
  756: 					pthread_mutex_lock(&r->root_mtx[taskREADY]);
  757: #endif
  758: 					TAILQ_INSERT_TAIL(&r->root_ready, task, task_node);
  759: #ifdef HAVE_LIBPTHREAD
  760: 					pthread_mutex_unlock(&r->root_mtx[taskREADY]);
  761: #endif
  762: 				}
  763: 				/* if match at least 2, don't remove resouce of event */
  764: 				if (flg > 1)
  765: 					evt->flags ^= evt->flags;
  766: 				break;
  767: 			case EVFILT_VNODE:
  768: 				flg = 0;
  769: 				TAILQ_FOREACH_SAFE(task, &r->root_node, task_node, tmp) {
  770: 					if (TASK_FD(task) != ((intptr_t) res[i].udata))
  771: 						continue;
  772: 					else {
  773: 						flg++;
  774: 						TASK_RET(task) = res[i].data;
  775: 						TASK_FLAG(task) = res[i].fflags;
  776: 					}
  777: 					/* remove node handle */
  778: #ifdef HAVE_LIBPTHREAD
  779: 					pthread_mutex_lock(&r->root_mtx[taskNODE]);
  780: #endif
  781: 					TAILQ_REMOVE(&r->root_node, task, task_node);
  782: #ifdef HAVE_LIBPTHREAD
  783: 					pthread_mutex_unlock(&r->root_mtx[taskNODE]);
  784: #endif
  785: 					task->task_type = taskREADY;
  786: #ifdef HAVE_LIBPTHREAD
  787: 					pthread_mutex_lock(&r->root_mtx[taskREADY]);
  788: #endif
  789: 					TAILQ_INSERT_TAIL(&r->root_ready, task, task_node);
  790: #ifdef HAVE_LIBPTHREAD
  791: 					pthread_mutex_unlock(&r->root_mtx[taskREADY]);
  792: #endif
  793: 				}
  794: 				/* if match at least 2, don't remove resouce of event */
  795: 				if (flg > 1)
  796: 					evt->flags ^= evt->flags;
  797: 				break;
  798: 			case EVFILT_PROC:
  799: 				flg = 0;
  800: 				TAILQ_FOREACH_SAFE(task, &r->root_proc, task_node, tmp) {
  801: 					if (TASK_VAL(task) != ((uintptr_t) res[i].udata))
  802: 						continue;
  803: 					else {
  804: 						flg++;
  805: 						TASK_RET(task) = res[i].data;
  806: 						TASK_FLAG(task) = res[i].fflags;
  807: 					}
  808: 					/* remove proc handle */
  809: #ifdef HAVE_LIBPTHREAD
  810: 					pthread_mutex_lock(&r->root_mtx[taskPROC]);
  811: #endif
  812: 					TAILQ_REMOVE(&r->root_proc, task, task_node);
  813: #ifdef HAVE_LIBPTHREAD
  814: 					pthread_mutex_unlock(&r->root_mtx[taskPROC]);
  815: #endif
  816: 					task->task_type = taskREADY;
  817: #ifdef HAVE_LIBPTHREAD
  818: 					pthread_mutex_lock(&r->root_mtx[taskREADY]);
  819: #endif
  820: 					TAILQ_INSERT_TAIL(&r->root_ready, task, task_node);
  821: #ifdef HAVE_LIBPTHREAD
  822: 					pthread_mutex_unlock(&r->root_mtx[taskREADY]);
  823: #endif
  824: 				}
  825: 				/* if match at least 2, don't remove resouce of event */
  826: 				if (flg > 1)
  827: 					evt->flags ^= evt->flags;
  828: 				break;
  829: 			case EVFILT_SIGNAL:
  830: 				flg = 0;
  831: 				TAILQ_FOREACH_SAFE(task, &r->root_signal, task_node, tmp) {
  832: 					if (TASK_VAL(task) != ((uintptr_t) res[i].udata))
  833: 						continue;
  834: 					else {
  835: 						flg++;
  836: 						TASK_RET(task) = res[i].data;
  837: 						TASK_FLAG(task) = res[i].fflags;
  838: 					}
  839: 					/* remove signal handle */
  840: #ifdef HAVE_LIBPTHREAD
  841: 					pthread_mutex_lock(&r->root_mtx[taskSIGNAL]);
  842: #endif
  843: 					TAILQ_REMOVE(&r->root_signal, task, task_node);
  844: #ifdef HAVE_LIBPTHREAD
  845: 					pthread_mutex_unlock(&r->root_mtx[taskSIGNAL]);
  846: #endif
  847: 					task->task_type = taskREADY;
  848: #ifdef HAVE_LIBPTHREAD
  849: 					pthread_mutex_lock(&r->root_mtx[taskREADY]);
  850: #endif
  851: 					TAILQ_INSERT_TAIL(&r->root_ready, task, task_node);
  852: #ifdef HAVE_LIBPTHREAD
  853: 					pthread_mutex_unlock(&r->root_mtx[taskREADY]);
  854: #endif
  855: 				}
  856: 				/* if match at least 2, don't remove resouce of event */
  857: 				if (flg > 1)
  858: 					evt->flags ^= evt->flags;
  859: 				break;
  860: #ifdef AIO_SUPPORT
  861: 			case EVFILT_AIO:
  862: 				flg = 0;
  863: 				TAILQ_FOREACH_SAFE(task, &r->root_aio, task_node, tmp) {
  864: 					acb = (struct aiocb*) TASK_VAL(task);
  865: 					if (acb != ((struct aiocb*) res[i].udata))
  866: 						continue;
  867: 					else {
  868: 						flg++;
  869: 						TASK_RET(task) = res[i].data;
  870: 						TASK_FLAG(task) = res[i].fflags;
  871: 					}
  872: 					/* remove user handle */
  873: #ifdef HAVE_LIBPTHREAD
  874: 					pthread_mutex_lock(&r->root_mtx[taskAIO]);
  875: #endif
  876: 					TAILQ_REMOVE(&r->root_aio, task, task_node);
  877: #ifdef HAVE_LIBPTHREAD
  878: 					pthread_mutex_unlock(&r->root_mtx[taskAIO]);
  879: #endif
  880: 					task->task_type = taskREADY;
  881: #ifdef HAVE_LIBPTHREAD
  882: 					pthread_mutex_lock(&r->root_mtx[taskREADY]);
  883: #endif
  884: 					TAILQ_INSERT_TAIL(&r->root_ready, task, task_node);
  885: #ifdef HAVE_LIBPTHREAD
  886: 					pthread_mutex_unlock(&r->root_mtx[taskREADY]);
  887: #endif
  888: 					fd = acb->aio_fildes;
  889: 					if ((len = aio_return(acb)) != -1) {
  890: 						if (lseek(fd, acb->aio_offset + len, SEEK_CUR) == -1)
  891: 							LOGERR;
  892: 					} else
  893: 						LOGERR;
  894: 					free(acb);
  895: 					TASK_DATLEN(task) = (u_long) len;
  896: 					TASK_FD(task) = fd;
  897: 				}
  898: 				/* if match at least 2, don't remove resouce of event */
  899: 				if (flg > 1)
  900: 					evt->flags ^= evt->flags;
  901: 				break;
  902: #ifdef EVFILT_LIO
  903: 			case EVFILT_LIO:
  904: 				flg = 0;
  905: 				TAILQ_FOREACH_SAFE(task, &r->root_lio, task_node, tmp) {
  906: 					acbs = (struct aiocb**) TASK_VAL(task);
  907: 					if (acbs != ((struct aiocb**) res[i].udata))
  908: 						continue;
  909: 					else {
  910: 						flg++;
  911: 						TASK_RET(task) = res[i].data;
  912: 						TASK_FLAG(task) = res[i].fflags;
  913: 					}
  914: 					/* remove user handle */
  915: #ifdef HAVE_LIBPTHREAD
  916: 					pthread_mutex_lock(&r->root_mtx[taskLIO]);
  917: #endif
  918: 					TAILQ_REMOVE(&r->root_lio, task, task_node);
  919: #ifdef HAVE_LIBPTHREAD
  920: 					pthread_mutex_unlock(&r->root_mtx[taskLIO]);
  921: #endif
  922: 					task->task_type = taskREADY;
  923: #ifdef HAVE_LIBPTHREAD
  924: 					pthread_mutex_lock(&r->root_mtx[taskREADY]);
  925: #endif
  926: 					TAILQ_INSERT_TAIL(&r->root_ready, task, task_node);
  927: #ifdef HAVE_LIBPTHREAD
  928: 					pthread_mutex_unlock(&r->root_mtx[taskREADY]);
  929: #endif
  930: 					iv = (struct iovec*) TASK_DATA(task);
  931: 					fd = acbs[0]->aio_fildes;
  932: 					off = acbs[0]->aio_offset;
  933: 					for (j = len = 0; i < TASK_DATLEN(task); len += l, i++) {
  934: 						if ((iv[i].iov_len = aio_return(acbs[i])) == -1)
  935: 							l = 0;
  936: 						else
  937: 							l = iv[i].iov_len;
  938: 						free(acbs[i]);
  939: 					}
  940: 					free(acbs);
  941: 					TASK_DATLEN(task) = (u_long) len;
  942: 					TASK_FD(task) = fd;
  943: 
  944: 					if (lseek(fd, off + len, SEEK_CUR) == -1)
  945: 						LOGERR;
  946: 				}
  947: 				/* if match at least 2, don't remove resouce of event */
  948: 				if (flg > 1)
  949: 					evt->flags ^= evt->flags;
  950: 				break;
  951: #endif	/* EVFILT_LIO */
  952: #endif	/* AIO_SUPPORT */
  953: #ifdef EVFILT_USER
  954: 			case EVFILT_USER:
  955: 				flg = 0;
  956: 				TAILQ_FOREACH_SAFE(task, &r->root_user, task_node, tmp) {
  957: 					if (TASK_VAL(task) != ((uintptr_t) res[i].udata))
  958: 						continue;
  959: 					else {
  960: 						flg++;
  961: 						TASK_RET(task) = res[i].data;
  962: 						TASK_FLAG(task) = res[i].fflags;
  963: 					}
  964: 					/* remove user handle */
  965: #ifdef HAVE_LIBPTHREAD
  966: 					pthread_mutex_lock(&r->root_mtx[taskUSER]);
  967: #endif
  968: 					TAILQ_REMOVE(&r->root_user, task, task_node);
  969: #ifdef HAVE_LIBPTHREAD
  970: 					pthread_mutex_unlock(&r->root_mtx[taskUSER]);
  971: #endif
  972: 					task->task_type = taskREADY;
  973: #ifdef HAVE_LIBPTHREAD
  974: 					pthread_mutex_lock(&r->root_mtx[taskREADY]);
  975: #endif
  976: 					TAILQ_INSERT_TAIL(&r->root_ready, task, task_node);
  977: #ifdef HAVE_LIBPTHREAD
  978: 					pthread_mutex_unlock(&r->root_mtx[taskREADY]);
  979: #endif
  980: 				}
  981: 				/* if match at least 2, don't remove resouce of event */
  982: 				if (flg > 1)
  983: 					evt->flags ^= evt->flags;
  984: 				break;
  985: #endif	/* EVFILT_USER */
  986: 		}
  987: 		if (kevent(r->root_kq, evt, 1, NULL, 0, &now) == -1) {
  988: 			if (r->root_hooks.hook_exec.exception) {
  989: 				if (r->root_hooks.hook_exec.exception(r, NULL))
  990: 					return NULL;
  991: 			} else
  992: 				LOGERR;
  993: 		}
  994: 	}
  995: 
  996: 	/* timer update & put in ready queue */
  997: 	clock_gettime(CLOCK_MONOTONIC, &now);
  998: 
  999: 	TAILQ_FOREACH_SAFE(task, &r->root_timer, task_node, tmp)
 1000: 		if (sched_timespeccmp(&now, &TASK_TS(task), -) >= 0) {
 1001: #ifdef HAVE_LIBPTHREAD
 1002: 			pthread_mutex_lock(&r->root_mtx[taskTIMER]);
 1003: #endif
 1004: 			TAILQ_REMOVE(&r->root_timer, task, task_node);
 1005: #ifdef HAVE_LIBPTHREAD
 1006: 			pthread_mutex_unlock(&r->root_mtx[taskTIMER]);
 1007: #endif
 1008: 			task->task_type = taskREADY;
 1009: #ifdef HAVE_LIBPTHREAD
 1010: 			pthread_mutex_lock(&r->root_mtx[taskREADY]);
 1011: #endif
 1012: 			TAILQ_INSERT_TAIL(&r->root_ready, task, task_node);
 1013: #ifdef HAVE_LIBPTHREAD
 1014: 			pthread_mutex_unlock(&r->root_mtx[taskREADY]);
 1015: #endif
 1016: 		}
 1017: 
 1018: 	/* put regular task priority task to ready queue, 
 1019: 		if there is no ready task or reach max missing hit for regular task */
 1020: 	if ((task = TAILQ_FIRST(&r->root_task))) {
 1021: 		if (!TAILQ_FIRST(&r->root_ready) || r->root_miss >= TASK_VAL(task)) {
 1022: 			r->root_miss ^= r->root_miss;
 1023: 
 1024: #ifdef HAVE_LIBPTHREAD
 1025: 			pthread_mutex_lock(&r->root_mtx[taskTASK]);
 1026: #endif
 1027: 			TAILQ_REMOVE(&r->root_task, task, task_node);
 1028: #ifdef HAVE_LIBPTHREAD
 1029: 			pthread_mutex_unlock(&r->root_mtx[taskTASK]);
 1030: #endif
 1031: 			task->task_type = taskREADY;
 1032: #ifdef HAVE_LIBPTHREAD
 1033: 			pthread_mutex_lock(&r->root_mtx[taskREADY]);
 1034: #endif
 1035: 			TAILQ_INSERT_TAIL(&r->root_ready, task, task_node);
 1036: #ifdef HAVE_LIBPTHREAD
 1037: 			pthread_mutex_unlock(&r->root_mtx[taskREADY]);
 1038: #endif
 1039: 		} else
 1040: 			r->root_miss++;
 1041: 	} else
 1042: 		r->root_miss ^= r->root_miss;
 1043: 
 1044: 	/* OK, lets get ready task !!! */
 1045: 	task = TAILQ_FIRST(&r->root_ready);
 1046: 	if (!(task))
 1047: 		return NULL;
 1048: 
 1049: #ifdef HAVE_LIBPTHREAD
 1050: 	pthread_mutex_lock(&r->root_mtx[taskREADY]);
 1051: #endif
 1052: 	TAILQ_REMOVE(&r->root_ready, task, task_node);
 1053: #ifdef HAVE_LIBPTHREAD
 1054: 	pthread_mutex_unlock(&r->root_mtx[taskREADY]);
 1055: #endif
 1056: 	task->task_type = taskUNUSE;
 1057: #ifdef HAVE_LIBPTHREAD
 1058: 	pthread_mutex_lock(&r->root_mtx[taskUNUSE]);
 1059: #endif
 1060: 	TAILQ_INSERT_TAIL(&r->root_unuse, task, task_node);
 1061: #ifdef HAVE_LIBPTHREAD
 1062: 	pthread_mutex_unlock(&r->root_mtx[taskUNUSE]);
 1063: #endif
 1064: 	return task;
 1065: }
 1066: 
 1067: /*
 1068:  * sched_hook_exception() - Default EXCEPTION hook
 1069:  *
 1070:  * @root = root task
 1071:  * @arg = custom handling: if arg == EV_EOF or other value; default: arg == NULL log errno
 1072:  * return: <0 errors and 0 ok
 1073:  */
 1074: void *
 1075: sched_hook_exception(void *root, void *arg)
 1076: {
 1077: 	sched_root_task_t *r = root;
 1078: 
 1079: 	if (!r)
 1080: 		return NULL;
 1081: 
 1082: 	/* custom exception handling ... */
 1083: 	if (arg) {
 1084: 		if (arg == (void*) EV_EOF)
 1085: 			return NULL;
 1086: 		return (void*) -1;	/* raise scheduler error!!! */
 1087: 	}
 1088: 
 1089: 	/* if error hook exists */
 1090: 	if (r->root_hooks.hook_root.error)
 1091: 		return (r->root_hooks.hook_root.error(root, (void*) ((intptr_t) errno)));
 1092: 
 1093: 	/* default case! */
 1094: 	LOGERR;
 1095: 	return NULL;
 1096: }
 1097: 
 1098: /*
 1099:  * sched_hook_condition() - Default CONDITION hook
 1100:  *
 1101:  * @root = root task
 1102:  * @arg = killState from schedRun()
 1103:  * return: NULL kill scheduler loop or !=NULL ok
 1104:  */
 1105: void *
 1106: sched_hook_condition(void *root, void *arg)
 1107: {
 1108: 	sched_root_task_t *r = root;
 1109: 
 1110: 	if (!r)
 1111: 		return NULL;
 1112: 
 1113: 	return (void*) (r->root_cond - *(intptr_t*) arg);
 1114: }

FreeBSD-CVSweb <freebsd-cvsweb@FreeBSD.org>