File:  [ELWIX - Embedded LightWeight unIX -] / libaitsched / src / hooks.c
Revision 1.13.2.2: download - view: text, annotated - select for diffs - revision graph
Tue Aug 21 11:45:35 2012 UTC (12 years, 1 month ago) by misho
Branches: sched3_2
Diff to: branchpoint 1.13: preferred, unified
fix macro taskExit
change return field of kevent
added ability to read/write for return information how many bytes wait in buffer

    1: /*************************************************************************
    2: * (C) 2011 AITNET ltd - Sofia/Bulgaria - <misho@aitbg.com>
    3: *  by Michael Pounov <misho@openbsd-bg.org>
    4: *
    5: * $Author: misho $
    6: * $Id: hooks.c,v 1.13.2.2 2012/08/21 11:45:35 misho Exp $
    7: *
    8: **************************************************************************
    9: The ELWIX and AITNET software is distributed under the following
   10: terms:
   11: 
   12: All of the documentation and software included in the ELWIX and AITNET
   13: Releases is copyrighted by ELWIX - Sofia/Bulgaria <info@elwix.org>
   14: 
   15: Copyright 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012
   16: 	by Michael Pounov <misho@elwix.org>.  All rights reserved.
   17: 
   18: Redistribution and use in source and binary forms, with or without
   19: modification, are permitted provided that the following conditions
   20: are met:
   21: 1. Redistributions of source code must retain the above copyright
   22:    notice, this list of conditions and the following disclaimer.
   23: 2. Redistributions in binary form must reproduce the above copyright
   24:    notice, this list of conditions and the following disclaimer in the
   25:    documentation and/or other materials provided with the distribution.
   26: 3. All advertising materials mentioning features or use of this software
   27:    must display the following acknowledgement:
   28: This product includes software developed by Michael Pounov <misho@elwix.org>
   29: ELWIX - Embedded LightWeight unIX and its contributors.
   30: 4. Neither the name of AITNET nor the names of its contributors
   31:    may be used to endorse or promote products derived from this software
   32:    without specific prior written permission.
   33: 
   34: THIS SOFTWARE IS PROVIDED BY AITNET AND CONTRIBUTORS ``AS IS'' AND
   35: ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   36: IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   37: ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
   38: FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   39: DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   40: OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   41: HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   42: LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   43: OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   44: SUCH DAMAGE.
   45: */
   46: #include "global.h"
   47: #include "hooks.h"
   48: 
   49: 
   50: /*
   51:  * sched_hook_init() - Default INIT hook
   52:  *
   53:  * @root = root task
   54:  * @arg = unused
   55:  * return: <0 errors and 0 ok
   56:  */
   57: void *
   58: sched_hook_init(void *root, void *arg __unused)
   59: {
   60: 	sched_root_task_t *r = root;
   61: 
   62: 	if (!r)
   63: 		return (void*) -1;
   64: 
   65: 	r->root_kq = kqueue();
   66: 	if (r->root_kq == -1) {
   67: 		LOGERR;
   68: 		return (void*) -1;
   69: 	}
   70: 
   71: 	return NULL;
   72: }
   73: 
   74: /*
   75:  * sched_hook_fini() - Default FINI hook
   76:  *
   77:  * @root = root task
   78:  * @arg = unused
   79:  * return: <0 errors and 0 ok
   80:  */
   81: void *
   82: sched_hook_fini(void *root, void *arg __unused)
   83: {
   84: 	sched_root_task_t *r = root;
   85: 
   86: 	if (!r)
   87: 		return (void*) -1;
   88: 
   89: 	if (r->root_kq > 2) {
   90: 		close(r->root_kq);
   91: 		r->root_kq = 0;
   92: 	}
   93: 
   94: 	return NULL;
   95: }
   96: 
   97: /*
   98:  * sched_hook_cancel() - Default CANCEL hook
   99:  *
  100:  * @task = current task
  101:  * @arg = unused
  102:  * return: <0 errors and 0 ok
  103:  */
  104: void *
  105: sched_hook_cancel(void *task, void *arg __unused)
  106: {
  107: 	sched_task_t *t = task;
  108: 	struct kevent chg[1];
  109: 	struct timespec timeout = { 0, 0 };
  110: #ifdef AIO_SUPPORT
  111: 	struct aiocb *acb;
  112: #ifdef EVFILT_LIO
  113: 	struct aiocb **acbs;
  114: 	register int i;
  115: #endif	/* EVFILT_LIO */
  116: #endif	/* AIO_SUPPORT */
  117: 
  118: 	if (!t || !TASK_ROOT(t))
  119: 		return (void*) -1;
  120: 
  121: 	switch (TASK_TYPE(t)) {
  122: 		case taskREAD:
  123: #ifdef __NetBSD__
  124: 			EV_SET(&chg[0], TASK_FD(t), EVFILT_READ, EV_DELETE, 0, 0, (intptr_t) TASK_FD(t));
  125: #else
  126: 			EV_SET(&chg[0], TASK_FD(t), EVFILT_READ, EV_DELETE, 0, 0, (void*) TASK_FD(t));
  127: #endif
  128: 			break;
  129: 		case taskWRITE:
  130: #ifdef __NetBSD__
  131: 			EV_SET(&chg[0], TASK_FD(t), EVFILT_WRITE, EV_DELETE, 0, 0, (intptr_t) TASK_FD(t));
  132: #else
  133: 			EV_SET(&chg[0], TASK_FD(t), EVFILT_WRITE, EV_DELETE, 0, 0, (void*) TASK_FD(t));
  134: #endif
  135: 			break;
  136: 		case taskALARM:
  137: #ifdef __NetBSD__
  138: 			EV_SET(&chg[0], (uintptr_t) TASK_DATA(t), EVFILT_TIMER, EV_DELETE, 
  139: 					0, 0, (intptr_t) TASK_DATA(t));
  140: #else
  141: 			EV_SET(&chg[0], (uintptr_t) TASK_DATA(t), EVFILT_TIMER, EV_DELETE, 
  142: 					0, 0, (void*) TASK_DATA(t));
  143: #endif
  144: 			break;
  145: 		case taskNODE:
  146: #ifdef __NetBSD__
  147: 			EV_SET(&chg[0], TASK_FD(t), EVFILT_VNODE, EV_DELETE, 0, 0, (intptr_t) TASK_FD(t));
  148: #else
  149: 			EV_SET(&chg[0], TASK_FD(t), EVFILT_VNODE, EV_DELETE, 0, 0, (void*) TASK_FD(t));
  150: #endif
  151: 			break;
  152: 		case taskPROC:
  153: #ifdef __NetBSD__
  154: 			EV_SET(&chg[0], TASK_VAL(t), EVFILT_PROC, EV_DELETE, 0, 0, (intptr_t) TASK_VAL(t));
  155: #else
  156: 			EV_SET(&chg[0], TASK_VAL(t), EVFILT_PROC, EV_DELETE, 0, 0, (void*) TASK_VAL(t));
  157: #endif
  158: 			break;
  159: 		case taskSIGNAL:
  160: #ifdef __NetBSD__
  161: 			EV_SET(&chg[0], TASK_VAL(t), EVFILT_SIGNAL, EV_DELETE, 0, 0, (intptr_t) TASK_VAL(t));
  162: #else
  163: 			EV_SET(&chg[0], TASK_VAL(t), EVFILT_SIGNAL, EV_DELETE, 0, 0, (void*) TASK_VAL(t));
  164: #endif
  165: 			break;
  166: #ifdef AIO_SUPPORT
  167: 		case taskAIO:
  168: #ifdef __NetBSD__
  169: 			EV_SET(&chg[0], TASK_VAL(t), EVFILT_AIO, EV_DELETE, 0, 0, (intptr_t) TASK_VAL(t));
  170: #else
  171: 			EV_SET(&chg[0], TASK_VAL(t), EVFILT_AIO, EV_DELETE, 0, 0, (void*) TASK_VAL(t));
  172: #endif
  173: 			acb = (struct aiocb*) TASK_VAL(t);
  174: 			if (acb) {
  175: 				if (aio_cancel(acb->aio_fildes, acb) == AIO_CANCELED)
  176: 					aio_return(acb);
  177: 				free(acb);
  178: 				TASK_VAL(t) = 0;
  179: 			}
  180: 			break;
  181: #ifdef EVFILT_LIO
  182: 		case taskLIO:
  183: #ifdef __NetBSD__
  184: 			EV_SET(&chg[0], TASK_VAL(t), EVFILT_LIO, EV_DELETE, 0, 0, (intptr_t) TASK_VAL(t));
  185: #else
  186: 			EV_SET(&chg[0], TASK_VAL(t), EVFILT_LIO, EV_DELETE, 0, 0, (void*) TASK_VAL(t));
  187: #endif
  188: 			acbs = (struct aiocb**) TASK_VAL(t);
  189: 			if (acbs) {
  190: 				for (i = 0; i < TASK_DATLEN(t); i++) {
  191: 					if (aio_cancel(acbs[i]->aio_fildes, acbs[i]) == AIO_CANCELED)
  192: 						aio_return(acbs[i]);
  193: 					free(acbs[i]);
  194: 				}
  195: 				free(acbs);
  196: 				TASK_VAL(t) = 0;
  197: 			}
  198: 			break;
  199: #endif	/* EVFILT_LIO */
  200: #endif	/* AIO_SUPPORT */
  201: #ifdef EVFILT_USER
  202: 		case taskUSER:
  203: #ifdef __NetBSD__
  204: 			EV_SET(&chg[0], TASK_VAL(t), EVFILT_USER, EV_DELETE, 0, 0, (intptr_t) TASK_VAL(t));
  205: #else
  206: 			EV_SET(&chg[0], TASK_VAL(t), EVFILT_USER, EV_DELETE, 0, 0, (void*) TASK_VAL(t));
  207: #endif
  208: 			break;
  209: #endif
  210: 		case taskTHREAD:
  211: #ifdef HAVE_LIBPTHREAD
  212: 			pthread_cancel((pthread_t) TASK_VAL(t));
  213: #endif
  214: 		default:
  215: 			return NULL;
  216: 	}
  217: 
  218: 	kevent(TASK_ROOT(t)->root_kq, chg, 1, NULL, 0, &timeout);
  219: 	return NULL;
  220: }
  221: 
  222: /*
  223:  * sched_hook_read() - Default READ hook
  224:  *
  225:  * @task = current task
  226:  * @arg = unused
  227:  * return: <0 errors and 0 ok
  228:  */
  229: void *
  230: sched_hook_read(void *task, void *arg __unused)
  231: {
  232: 	sched_task_t *t = task;
  233: 	struct kevent chg[1];
  234: 	struct timespec timeout = { 0, 0 };
  235: 
  236: 	if (!t || !TASK_ROOT(t))
  237: 		return (void*) -1;
  238: 
  239: #ifdef __NetBSD__
  240: 	EV_SET(&chg[0], TASK_FD(t), EVFILT_READ, EV_ADD | EV_CLEAR, 0, 0, (intptr_t) TASK_FD(t));
  241: #else
  242: 	EV_SET(&chg[0], TASK_FD(t), EVFILT_READ, EV_ADD | EV_CLEAR, 0, 0, (void*) TASK_FD(t));
  243: #endif
  244: 	if (kevent(TASK_ROOT(t)->root_kq, chg, 1, NULL, 0, &timeout) == -1) {
  245: 		if (TASK_ROOT(t)->root_hooks.hook_exec.exception)
  246: 			TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL);
  247: 		else
  248: 			LOGERR;
  249: 		return (void*) -1;
  250: 	}
  251: 
  252: 	return NULL;
  253: }
  254: 
  255: /*
  256:  * sched_hook_write() - Default WRITE hook
  257:  *
  258:  * @task = current task
  259:  * @arg = unused
  260:  * return: <0 errors and 0 ok
  261:  */
  262: void *
  263: sched_hook_write(void *task, void *arg __unused)
  264: {
  265: 	sched_task_t *t = task;
  266: 	struct kevent chg[1];
  267: 	struct timespec timeout = { 0, 0 };
  268: 
  269: 	if (!t || !TASK_ROOT(t))
  270: 		return (void*) -1;
  271: 
  272: #ifdef __NetBSD__
  273: 	EV_SET(&chg[0], TASK_FD(t), EVFILT_WRITE, EV_ADD | EV_CLEAR, 0, 0, (intptr_t) TASK_FD(t));
  274: #else
  275: 	EV_SET(&chg[0], TASK_FD(t), EVFILT_WRITE, EV_ADD | EV_CLEAR, 0, 0, (void*) TASK_FD(t));
  276: #endif
  277: 	if (kevent(TASK_ROOT(t)->root_kq, chg, 1, NULL, 0, &timeout) == -1) {
  278: 		if (TASK_ROOT(t)->root_hooks.hook_exec.exception)
  279: 			TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL);
  280: 		else
  281: 			LOGERR;
  282: 		return (void*) -1;
  283: 	}
  284: 
  285: 	return NULL;
  286: }
  287: 
  288: /*
  289:  * sched_hook_alarm() - Default ALARM hook
  290:  *
  291:  * @task = current task
  292:  * @arg = unused
  293:  * return: <0 errors and 0 ok
  294:  */
  295: void *
  296: sched_hook_alarm(void *task, void *arg __unused)
  297: {
  298: 	sched_task_t *t = task;
  299: 	struct kevent chg[1];
  300: 	struct timespec timeout = { 0, 0 };
  301: 
  302: 	if (!t || !TASK_ROOT(t))
  303: 		return (void*) -1;
  304: 
  305: #ifdef __NetBSD__
  306: 	EV_SET(&chg[0], (uintptr_t) TASK_DATA(t), EVFILT_TIMER, EV_ADD | EV_ONESHOT, 0, 
  307: 			t->task_val.ts.tv_sec * 1000 + t->task_val.ts.tv_nsec / 1000000, 
  308: 			(intptr_t) TASK_DATA(t));
  309: #else
  310: 	EV_SET(&chg[0], (uintptr_t) TASK_DATA(t), EVFILT_TIMER, EV_ADD | EV_ONESHOT, 0, 
  311: 			t->task_val.ts.tv_sec * 1000 + t->task_val.ts.tv_nsec / 1000000, 
  312: 			(void*) TASK_DATA(t));
  313: #endif
  314: 	if (kevent(TASK_ROOT(t)->root_kq, chg, 1, NULL, 0, &timeout) == -1) {
  315: 		if (TASK_ROOT(t)->root_hooks.hook_exec.exception)
  316: 			TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL);
  317: 		else
  318: 			LOGERR;
  319: 		return (void*) -1;
  320: 	}
  321: 
  322: 	return NULL;
  323: }
  324: 
  325: /*
  326:  * sched_hook_node() - Default NODE hook
  327:  *
  328:  * @task = current task
  329:  * @arg = unused
  330:  * return: <0 errors and 0 ok
  331:  */
  332: void *
  333: sched_hook_node(void *task, void *arg __unused)
  334: {
  335: 	sched_task_t *t = task;
  336: 	struct kevent chg[1];
  337: 	struct timespec timeout = { 0, 0 };
  338: 
  339: 	if (!t || !TASK_ROOT(t))
  340: 		return (void*) -1;
  341: 
  342: #ifdef __NetBSD__
  343: 	EV_SET(&chg[0], TASK_FD(t), EVFILT_VNODE, EV_ADD | EV_CLEAR, 
  344: 			NOTE_DELETE | NOTE_WRITE | NOTE_EXTEND | NOTE_ATTRIB | 
  345: 			NOTE_LINK | NOTE_RENAME | NOTE_REVOKE, 0, (intptr_t) TASK_FD(t));
  346: #else
  347: 	EV_SET(&chg[0], TASK_FD(t), EVFILT_VNODE, EV_ADD | EV_CLEAR, 
  348: 			NOTE_DELETE | NOTE_WRITE | NOTE_EXTEND | NOTE_ATTRIB | 
  349: 			NOTE_LINK | NOTE_RENAME | NOTE_REVOKE, 0, (void*) TASK_FD(t));
  350: #endif
  351: 	if (kevent(TASK_ROOT(t)->root_kq, chg, 1, NULL, 0, &timeout) == -1) {
  352: 		if (TASK_ROOT(t)->root_hooks.hook_exec.exception)
  353: 			TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL);
  354: 		else
  355: 			LOGERR;
  356: 		return (void*) -1;
  357: 	}
  358: 
  359: 	return NULL;
  360: }
  361: 
  362: /*
  363:  * sched_hook_proc() - Default PROC hook
  364:  *
  365:  * @task = current task
  366:  * @arg = unused
  367:  * return: <0 errors and 0 ok
  368:  */
  369: void *
  370: sched_hook_proc(void *task, void *arg __unused)
  371: {
  372: 	sched_task_t *t = task;
  373: 	struct kevent chg[1];
  374: 	struct timespec timeout = { 0, 0 };
  375: 
  376: 	if (!t || !TASK_ROOT(t))
  377: 		return (void*) -1;
  378: 
  379: #ifdef __NetBSD__
  380: 	EV_SET(&chg[0], TASK_VAL(t), EVFILT_PROC, EV_ADD | EV_CLEAR, 
  381: 			NOTE_EXIT | NOTE_FORK | NOTE_EXEC | NOTE_TRACK, 0, (intptr_t) TASK_VAL(t));
  382: #else
  383: 	EV_SET(&chg[0], TASK_VAL(t), EVFILT_PROC, EV_ADD | EV_CLEAR, 
  384: 			NOTE_EXIT | NOTE_FORK | NOTE_EXEC | NOTE_TRACK, 0, (void*) TASK_VAL(t));
  385: #endif
  386: 	if (kevent(TASK_ROOT(t)->root_kq, chg, 1, NULL, 0, &timeout) == -1) {
  387: 		if (TASK_ROOT(t)->root_hooks.hook_exec.exception)
  388: 			TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL);
  389: 		else
  390: 			LOGERR;
  391: 		return (void*) -1;
  392: 	}
  393: 
  394: 	return NULL;
  395: }
  396: 
  397: /*
  398:  * sched_hook_signal() - Default SIGNAL hook
  399:  *
  400:  * @task = current task
  401:  * @arg = unused
  402:  * return: <0 errors and 0 ok
  403:  */
  404: void *
  405: sched_hook_signal(void *task, void *arg __unused)
  406: {
  407: 	sched_task_t *t = task;
  408: 	struct kevent chg[1];
  409: 	struct timespec timeout = { 0, 0 };
  410: 
  411: 	if (!t || !TASK_ROOT(t))
  412: 		return (void*) -1;
  413: 
  414: #ifdef __NetBSD__
  415: 	EV_SET(&chg[0], TASK_VAL(t), EVFILT_SIGNAL, EV_ADD, 0, 0, (intptr_t) TASK_VAL(t));
  416: #else
  417: 	EV_SET(&chg[0], TASK_VAL(t), EVFILT_SIGNAL, EV_ADD, 0, 0, (void*) TASK_VAL(t));
  418: #endif
  419: 	if (kevent(TASK_ROOT(t)->root_kq, chg, 1, NULL, 0, &timeout) == -1) {
  420: 		if (TASK_ROOT(t)->root_hooks.hook_exec.exception)
  421: 			TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL);
  422: 		else
  423: 			LOGERR;
  424: 		return (void*) -1;
  425: 	}
  426: 
  427: 	return NULL;
  428: }
  429: 
  430: /*
  431:  * sched_hook_user() - Default USER hook
  432:  *
  433:  * @task = current task
  434:  * @arg = unused
  435:  * return: <0 errors and 0 ok
  436:  */
  437: #ifdef EVFILT_USER
  438: void *
  439: sched_hook_user(void *task, void *arg __unused)
  440: {
  441: 	sched_task_t *t = task;
  442: 	struct kevent chg[1];
  443: 	struct timespec timeout = { 0, 0 };
  444: 
  445: 	if (!t || !TASK_ROOT(t))
  446: 		return (void*) -1;
  447: 
  448: #ifdef __NetBSD__
  449: 	EV_SET(&chg[0], TASK_VAL(t), EVFILT_USER, EV_ADD | EV_CLEAR, TASK_DATLEN(t), 
  450: 			0, (intptr_t) TASK_VAL(t));
  451: #else
  452: 	EV_SET(&chg[0], TASK_VAL(t), EVFILT_USER, EV_ADD | EV_CLEAR, TASK_DATLEN(t), 
  453: 			0, (void*) TASK_VAL(t));
  454: #endif
  455: 	if (kevent(TASK_ROOT(t)->root_kq, chg, 1, NULL, 0, &timeout) == -1) {
  456: 		if (TASK_ROOT(t)->root_hooks.hook_exec.exception)
  457: 			TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL);
  458: 		else
  459: 			LOGERR;
  460: 		return (void*) -1;
  461: 	}
  462: 
  463: 	return NULL;
  464: }
  465: #endif
  466: 
  467: /*
  468:  * sched_hook_fetch() - Default FETCH hook
  469:  *
  470:  * @root = root task
  471:  * @arg = unused
  472:  * return: NULL error or !=NULL fetched task
  473:  */
  474: void *
  475: sched_hook_fetch(void *root, void *arg __unused)
  476: {
  477: 	sched_root_task_t *r = root;
  478: 	sched_task_t *task, *tmp;
  479: 	struct timespec now, m, mtmp;
  480: 	struct timespec *timeout;
  481: 	struct kevent evt[1], res[KQ_EVENTS];
  482: 	register int i, flg;
  483: 	int en;
  484: #ifdef AIO_SUPPORT
  485: 	int len, fd;
  486: 	struct aiocb *acb;
  487: #ifdef EVFILT_LIO
  488: 	int l;
  489: 	register int j;
  490: 	off_t off;
  491: 	struct aiocb **acbs;
  492: 	struct iovec *iv;
  493: #endif	/* EVFILT_LIO */
  494: #endif	/* AIO_SUPPORT */
  495: 
  496: 	if (!r)
  497: 		return NULL;
  498: 
  499: 	/* get new task by queue priority */
  500: 	while ((task = TAILQ_FIRST(&r->root_event))) {
  501: #ifdef HAVE_LIBPTHREAD
  502: 		pthread_mutex_lock(&r->root_mtx[taskEVENT]);
  503: #endif
  504: 		TAILQ_REMOVE(&r->root_event, task, task_node);
  505: #ifdef HAVE_LIBPTHREAD
  506: 		pthread_mutex_unlock(&r->root_mtx[taskEVENT]);
  507: #endif
  508: 		task->task_type = taskUNUSE;
  509: #ifdef HAVE_LIBPTHREAD
  510: 		pthread_mutex_lock(&r->root_mtx[taskUNUSE]);
  511: #endif
  512: 		TAILQ_INSERT_TAIL(&r->root_unuse, task, task_node);
  513: #ifdef HAVE_LIBPTHREAD
  514: 		pthread_mutex_unlock(&r->root_mtx[taskUNUSE]);
  515: #endif
  516: 		return task;
  517: 	}
  518: 	while ((task = TAILQ_FIRST(&r->root_ready))) {
  519: #ifdef HAVE_LIBPTHREAD
  520: 		pthread_mutex_lock(&r->root_mtx[taskREADY]);
  521: #endif
  522: 		TAILQ_REMOVE(&r->root_ready, task, task_node);
  523: #ifdef HAVE_LIBPTHREAD
  524: 		pthread_mutex_unlock(&r->root_mtx[taskREADY]);
  525: #endif
  526: 		task->task_type = taskUNUSE;
  527: #ifdef HAVE_LIBPTHREAD
  528: 		pthread_mutex_lock(&r->root_mtx[taskUNUSE]);
  529: #endif
  530: 		TAILQ_INSERT_TAIL(&r->root_unuse, task, task_node);
  531: #ifdef HAVE_LIBPTHREAD
  532: 		pthread_mutex_unlock(&r->root_mtx[taskUNUSE]);
  533: #endif
  534: 		return task;
  535: 	}
  536: 
  537: #ifdef TIMER_WITHOUT_SORT
  538: 	clock_gettime(CLOCK_MONOTONIC, &now);
  539: 
  540: 	sched_timespecclear(&r->root_wait);
  541: 	TAILQ_FOREACH(task, &r->root_timer, task_node) {
  542: 		if (!sched_timespecisset(&r->root_wait))
  543: 			r->root_wait = TASK_TS(task);
  544: 		else if (sched_timespeccmp(&TASK_TS(task), &r->root_wait, -) < 0)
  545: 			r->root_wait = TASK_TS(task);
  546: 	}
  547: 
  548: 	if (TAILQ_FIRST(&r->root_timer)) {
  549: 		m = r->root_wait;
  550: 		sched_timespecsub(&m, &now, &mtmp);
  551: 		r->root_wait = mtmp;
  552: 	} else {
  553: 		/* set wait INFTIM */
  554: 		sched_timespecinf(&r->root_wait);
  555: 	}
  556: #else
  557: 	if (!TAILQ_FIRST(&r->root_task) && (task = TAILQ_FIRST(&r->root_timer))) {
  558: 		clock_gettime(CLOCK_MONOTONIC, &now);
  559: 
  560: 		m = TASK_TS(task);
  561: 		sched_timespecsub(&m, &now, &mtmp);
  562: 		r->root_wait = mtmp;
  563: 	} else {
  564: 		/* set wait INFTIM */
  565: 		sched_timespecinf(&r->root_wait);
  566: 	}
  567: #endif
  568: 	/* if present member of task, set NOWAIT */
  569: 	if (TAILQ_FIRST(&r->root_task))
  570: 		sched_timespecclear(&r->root_wait);
  571: 
  572: 	if (r->root_wait.tv_sec != -1 && r->root_wait.tv_nsec != -1)
  573: 		timeout = &r->root_wait;
  574: 	else if (sched_timespecisinf(&r->root_poll))
  575: 		timeout = NULL;
  576: 	else
  577: 		timeout = &r->root_poll;
  578: 	if ((en = kevent(r->root_kq, NULL, 0, res, KQ_EVENTS, timeout)) == -1) {
  579: 		if (r->root_hooks.hook_exec.exception) {
  580: 			if (r->root_hooks.hook_exec.exception(r, NULL))
  581: 				return NULL;
  582: 		} else if (errno != EINTR)
  583: 			LOGERR;
  584: 		return NULL;
  585: 	}
  586: 
  587: 	now.tv_sec = now.tv_nsec = 0;
  588: 	/* Go and catch the cat into pipes ... */
  589: 	for (i = 0; i < en; i++) {
  590: 		memcpy(evt, &res[i], sizeof evt);
  591: 		evt->flags = EV_DELETE;
  592: 		/* Put read/write task to ready queue */
  593: 		switch (res[i].filter) {
  594: 			case EVFILT_READ:
  595: 				flg = 0;
  596: 				TAILQ_FOREACH_SAFE(task, &r->root_read, task_node, tmp) {
  597: 					if (TASK_FD(task) != ((intptr_t) res[i].udata))
  598: 						continue;
  599: 					else {
  600: 						flg++;
  601: 						TASK_RET(task) = res[i].data;
  602: 						TASK_FLAG(task) = res[i].fflags;
  603: 					}
  604: 					/* remove read handle */
  605: #ifdef HAVE_LIBPTHREAD
  606: 					pthread_mutex_lock(&r->root_mtx[taskREAD]);
  607: #endif
  608: 					TAILQ_REMOVE(&r->root_read, task, task_node);
  609: #ifdef HAVE_LIBPTHREAD
  610: 					pthread_mutex_unlock(&r->root_mtx[taskREAD]);
  611: #endif
  612: 					if (r->root_hooks.hook_exec.exception && res[i].flags & EV_EOF) {
  613:  						if (r->root_hooks.hook_exec.exception(r, (void*) EV_EOF)) {
  614: 							task->task_type = taskUNUSE;
  615: #ifdef HAVE_LIBPTHREAD
  616: 							pthread_mutex_lock(&r->root_mtx[taskUNUSE]);
  617: #endif
  618: 							TAILQ_INSERT_TAIL(&r->root_unuse, task, task_node);
  619: #ifdef HAVE_LIBPTHREAD
  620: 							pthread_mutex_unlock(&r->root_mtx[taskUNUSE]);
  621: #endif
  622: 						} else {
  623: 							task->task_type = taskREADY;
  624: #ifdef HAVE_LIBPTHREAD
  625: 							pthread_mutex_lock(&r->root_mtx[taskREADY]);
  626: #endif
  627: 							TAILQ_INSERT_TAIL(&r->root_ready, task, task_node);
  628: #ifdef HAVE_LIBPTHREAD
  629: 							pthread_mutex_unlock(&r->root_mtx[taskREADY]);
  630: #endif
  631: 						}
  632: 					} else {
  633: 						task->task_type = taskREADY;
  634: #ifdef HAVE_LIBPTHREAD
  635: 						pthread_mutex_lock(&r->root_mtx[taskREADY]);
  636: #endif
  637: 						TAILQ_INSERT_TAIL(&r->root_ready, task, task_node);
  638: #ifdef HAVE_LIBPTHREAD
  639: 						pthread_mutex_unlock(&r->root_mtx[taskREADY]);
  640: #endif
  641: 					}
  642: 				}
  643: 				/* if match at least 2, don't remove resouce of event */
  644: 				if (flg > 1)
  645: 					evt->flags ^= evt->flags;
  646: 				break;
  647: 			case EVFILT_WRITE:
  648: 				flg = 0;
  649: 				TAILQ_FOREACH_SAFE(task, &r->root_write, task_node, tmp) {
  650: 					if (TASK_FD(task) != ((intptr_t) res[i].udata))
  651: 						continue;
  652: 					else {
  653: 						flg++;
  654: 						TASK_RET(task) = res[i].data;
  655: 						TASK_FLAG(task) = res[i].fflags;
  656: 					}
  657: 					/* remove write handle */
  658: #ifdef HAVE_LIBPTHREAD
  659: 					pthread_mutex_lock(&r->root_mtx[taskWRITE]);
  660: #endif
  661: 					TAILQ_REMOVE(&r->root_write, task, task_node);
  662: #ifdef HAVE_LIBPTHREAD
  663: 					pthread_mutex_unlock(&r->root_mtx[taskWRITE]);
  664: #endif
  665: 					if (r->root_hooks.hook_exec.exception && res[i].flags & EV_EOF) {
  666:  						if (r->root_hooks.hook_exec.exception(r, (void*) EV_EOF)) {
  667: 							task->task_type = taskUNUSE;
  668: #ifdef HAVE_LIBPTHREAD
  669: 							pthread_mutex_lock(&r->root_mtx[taskUNUSE]);
  670: #endif
  671: 							TAILQ_INSERT_TAIL(&r->root_unuse, task, task_node);
  672: #ifdef HAVE_LIBPTHREAD
  673: 							pthread_mutex_unlock(&r->root_mtx[taskUNUSE]);
  674: #endif
  675: 						} else {
  676: 							task->task_type = taskREADY;
  677: #ifdef HAVE_LIBPTHREAD
  678: 							pthread_mutex_lock(&r->root_mtx[taskREADY]);
  679: #endif
  680: 							TAILQ_INSERT_TAIL(&r->root_ready, task, task_node);
  681: #ifdef HAVE_LIBPTHREAD
  682: 							pthread_mutex_unlock(&r->root_mtx[taskREADY]);
  683: #endif
  684: 						}
  685: 					} else {
  686: 						task->task_type = taskREADY;
  687: #ifdef HAVE_LIBPTHREAD
  688: 						pthread_mutex_lock(&r->root_mtx[taskREADY]);
  689: #endif
  690: 						TAILQ_INSERT_TAIL(&r->root_ready, task, task_node);
  691: #ifdef HAVE_LIBPTHREAD
  692: 						pthread_mutex_unlock(&r->root_mtx[taskREADY]);
  693: #endif
  694: 					}
  695: 				}
  696: 				/* if match at least 2, don't remove resouce of event */
  697: 				if (flg > 1)
  698: 					evt->flags ^= evt->flags;
  699: 				break;
  700: 			case EVFILT_TIMER:
  701: 				flg = 0;
  702: 				TAILQ_FOREACH_SAFE(task, &r->root_alarm, task_node, tmp) {
  703: 					if ((uintptr_t) TASK_DATA(task) != ((uintptr_t) res[i].udata))
  704: 						continue;
  705: 					else {
  706: 						flg++;
  707: 						TASK_RET(task) = res[i].data;
  708: 						TASK_FLAG(task) = res[i].fflags;
  709: 					}
  710: 					/* remove alarm handle */
  711: #ifdef HAVE_LIBPTHREAD
  712: 					pthread_mutex_lock(&r->root_mtx[taskALARM]);
  713: #endif
  714: 					TAILQ_REMOVE(&r->root_alarm, task, task_node);
  715: #ifdef HAVE_LIBPTHREAD
  716: 					pthread_mutex_unlock(&r->root_mtx[taskALARM]);
  717: #endif
  718: 					task->task_type = taskREADY;
  719: #ifdef HAVE_LIBPTHREAD
  720: 					pthread_mutex_lock(&r->root_mtx[taskREADY]);
  721: #endif
  722: 					TAILQ_INSERT_TAIL(&r->root_ready, task, task_node);
  723: #ifdef HAVE_LIBPTHREAD
  724: 					pthread_mutex_unlock(&r->root_mtx[taskREADY]);
  725: #endif
  726: 				}
  727: 				/* if match at least 2, don't remove resouce of event */
  728: 				if (flg > 1)
  729: 					evt->flags ^= evt->flags;
  730: 				break;
  731: 			case EVFILT_VNODE:
  732: 				flg = 0;
  733: 				TAILQ_FOREACH_SAFE(task, &r->root_node, task_node, tmp) {
  734: 					if (TASK_FD(task) != ((intptr_t) res[i].udata))
  735: 						continue;
  736: 					else {
  737: 						flg++;
  738: 						TASK_RET(task) = res[i].data;
  739: 						TASK_FLAG(task) = res[i].fflags;
  740: 					}
  741: 					/* remove node handle */
  742: #ifdef HAVE_LIBPTHREAD
  743: 					pthread_mutex_lock(&r->root_mtx[taskNODE]);
  744: #endif
  745: 					TAILQ_REMOVE(&r->root_node, task, task_node);
  746: #ifdef HAVE_LIBPTHREAD
  747: 					pthread_mutex_unlock(&r->root_mtx[taskNODE]);
  748: #endif
  749: 					task->task_type = taskREADY;
  750: #ifdef HAVE_LIBPTHREAD
  751: 					pthread_mutex_lock(&r->root_mtx[taskREADY]);
  752: #endif
  753: 					TAILQ_INSERT_TAIL(&r->root_ready, task, task_node);
  754: #ifdef HAVE_LIBPTHREAD
  755: 					pthread_mutex_unlock(&r->root_mtx[taskREADY]);
  756: #endif
  757: 				}
  758: 				/* if match at least 2, don't remove resouce of event */
  759: 				if (flg > 1)
  760: 					evt->flags ^= evt->flags;
  761: 				break;
  762: 			case EVFILT_PROC:
  763: 				flg = 0;
  764: 				TAILQ_FOREACH_SAFE(task, &r->root_proc, task_node, tmp) {
  765: 					if (TASK_VAL(task) != ((uintptr_t) res[i].udata))
  766: 						continue;
  767: 					else {
  768: 						flg++;
  769: 						TASK_RET(task) = res[i].data;
  770: 						TASK_FLAG(task) = res[i].fflags;
  771: 					}
  772: 					/* remove proc handle */
  773: #ifdef HAVE_LIBPTHREAD
  774: 					pthread_mutex_lock(&r->root_mtx[taskPROC]);
  775: #endif
  776: 					TAILQ_REMOVE(&r->root_proc, task, task_node);
  777: #ifdef HAVE_LIBPTHREAD
  778: 					pthread_mutex_unlock(&r->root_mtx[taskPROC]);
  779: #endif
  780: 					task->task_type = taskREADY;
  781: #ifdef HAVE_LIBPTHREAD
  782: 					pthread_mutex_lock(&r->root_mtx[taskREADY]);
  783: #endif
  784: 					TAILQ_INSERT_TAIL(&r->root_ready, task, task_node);
  785: #ifdef HAVE_LIBPTHREAD
  786: 					pthread_mutex_unlock(&r->root_mtx[taskREADY]);
  787: #endif
  788: 				}
  789: 				/* if match at least 2, don't remove resouce of event */
  790: 				if (flg > 1)
  791: 					evt->flags ^= evt->flags;
  792: 				break;
  793: 			case EVFILT_SIGNAL:
  794: 				flg = 0;
  795: 				TAILQ_FOREACH_SAFE(task, &r->root_signal, task_node, tmp) {
  796: 					if (TASK_VAL(task) != ((uintptr_t) res[i].udata))
  797: 						continue;
  798: 					else {
  799: 						flg++;
  800: 						TASK_RET(task) = res[i].data;
  801: 						TASK_FLAG(task) = res[i].fflags;
  802: 					}
  803: 					/* remove signal handle */
  804: #ifdef HAVE_LIBPTHREAD
  805: 					pthread_mutex_lock(&r->root_mtx[taskSIGNAL]);
  806: #endif
  807: 					TAILQ_REMOVE(&r->root_signal, task, task_node);
  808: #ifdef HAVE_LIBPTHREAD
  809: 					pthread_mutex_unlock(&r->root_mtx[taskSIGNAL]);
  810: #endif
  811: 					task->task_type = taskREADY;
  812: #ifdef HAVE_LIBPTHREAD
  813: 					pthread_mutex_lock(&r->root_mtx[taskREADY]);
  814: #endif
  815: 					TAILQ_INSERT_TAIL(&r->root_ready, task, task_node);
  816: #ifdef HAVE_LIBPTHREAD
  817: 					pthread_mutex_unlock(&r->root_mtx[taskREADY]);
  818: #endif
  819: 				}
  820: 				/* if match at least 2, don't remove resouce of event */
  821: 				if (flg > 1)
  822: 					evt->flags ^= evt->flags;
  823: 				break;
  824: #ifdef AIO_SUPPORT
  825: 			case EVFILT_AIO:
  826: 				flg = 0;
  827: 				TAILQ_FOREACH_SAFE(task, &r->root_aio, task_node, tmp) {
  828: 					acb = (struct aiocb*) TASK_VAL(task);
  829: 					if (acb != ((struct aiocb*) res[i].udata))
  830: 						continue;
  831: 					else {
  832: 						flg++;
  833: 						TASK_RET(task) = res[i].data;
  834: 						TASK_FLAG(task) = res[i].fflags;
  835: 					}
  836: 					/* remove user handle */
  837: #ifdef HAVE_LIBPTHREAD
  838: 					pthread_mutex_lock(&r->root_mtx[taskAIO]);
  839: #endif
  840: 					TAILQ_REMOVE(&r->root_aio, task, task_node);
  841: #ifdef HAVE_LIBPTHREAD
  842: 					pthread_mutex_unlock(&r->root_mtx[taskAIO]);
  843: #endif
  844: 					task->task_type = taskREADY;
  845: #ifdef HAVE_LIBPTHREAD
  846: 					pthread_mutex_lock(&r->root_mtx[taskREADY]);
  847: #endif
  848: 					TAILQ_INSERT_TAIL(&r->root_ready, task, task_node);
  849: #ifdef HAVE_LIBPTHREAD
  850: 					pthread_mutex_unlock(&r->root_mtx[taskREADY]);
  851: #endif
  852: 					fd = acb->aio_fildes;
  853: 					if ((len = aio_return(acb)) != -1) {
  854: 						if (lseek(fd, acb->aio_offset + len, SEEK_CUR) == -1)
  855: 							LOGERR;
  856: 					} else
  857: 						LOGERR;
  858: 					free(acb);
  859: 					TASK_DATLEN(task) = (u_long) len;
  860: 					TASK_FD(task) = fd;
  861: 				}
  862: 				/* if match at least 2, don't remove resouce of event */
  863: 				if (flg > 1)
  864: 					evt->flags ^= evt->flags;
  865: 				break;
  866: #ifdef EVFILT_LIO
  867: 			case EVFILT_LIO:
  868: 				flg = 0;
  869: 				TAILQ_FOREACH_SAFE(task, &r->root_lio, task_node, tmp) {
  870: 					acbs = (struct aiocb**) TASK_VAL(task);
  871: 					if (acbs != ((struct aiocb**) res[i].udata))
  872: 						continue;
  873: 					else {
  874: 						flg++;
  875: 						TASK_RET(task) = res[i].data;
  876: 						TASK_FLAG(task) = res[i].fflags;
  877: 					}
  878: 					/* remove user handle */
  879: #ifdef HAVE_LIBPTHREAD
  880: 					pthread_mutex_lock(&r->root_mtx[taskLIO]);
  881: #endif
  882: 					TAILQ_REMOVE(&r->root_lio, task, task_node);
  883: #ifdef HAVE_LIBPTHREAD
  884: 					pthread_mutex_unlock(&r->root_mtx[taskLIO]);
  885: #endif
  886: 					task->task_type = taskREADY;
  887: #ifdef HAVE_LIBPTHREAD
  888: 					pthread_mutex_lock(&r->root_mtx[taskREADY]);
  889: #endif
  890: 					TAILQ_INSERT_TAIL(&r->root_ready, task, task_node);
  891: #ifdef HAVE_LIBPTHREAD
  892: 					pthread_mutex_unlock(&r->root_mtx[taskREADY]);
  893: #endif
  894: 					iv = (struct iovec*) TASK_DATA(task);
  895: 					fd = acbs[0]->aio_fildes;
  896: 					off = acbs[0]->aio_offset;
  897: 					for (j = len = 0; i < TASK_DATLEN(task); len += l, i++) {
  898: 						if ((iv[i].iov_len = aio_return(acbs[i])) == -1)
  899: 							l = 0;
  900: 						else
  901: 							l = iv[i].iov_len;
  902: 						free(acbs[i]);
  903: 					}
  904: 					free(acbs);
  905: 					TASK_DATLEN(task) = (u_long) len;
  906: 					TASK_FD(task) = fd;
  907: 
  908: 					if (lseek(fd, off + len, SEEK_CUR) == -1)
  909: 						LOGERR;
  910: 				}
  911: 				/* if match at least 2, don't remove resouce of event */
  912: 				if (flg > 1)
  913: 					evt->flags ^= evt->flags;
  914: 				break;
  915: #endif	/* EVFILT_LIO */
  916: #endif	/* AIO_SUPPORT */
  917: #ifdef EVFILT_USER
  918: 			case EVFILT_USER:
  919: 				flg = 0;
  920: 				TAILQ_FOREACH_SAFE(task, &r->root_user, task_node, tmp) {
  921: 					if (TASK_VAL(task) != ((uintptr_t) res[i].udata))
  922: 						continue;
  923: 					else {
  924: 						flg++;
  925: 						TASK_RET(task) = res[i].data;
  926: 						TASK_FLAG(task) = res[i].fflags;
  927: 					}
  928: 					/* remove user handle */
  929: #ifdef HAVE_LIBPTHREAD
  930: 					pthread_mutex_lock(&r->root_mtx[taskUSER]);
  931: #endif
  932: 					TAILQ_REMOVE(&r->root_user, task, task_node);
  933: #ifdef HAVE_LIBPTHREAD
  934: 					pthread_mutex_unlock(&r->root_mtx[taskUSER]);
  935: #endif
  936: 					task->task_type = taskREADY;
  937: #ifdef HAVE_LIBPTHREAD
  938: 					pthread_mutex_lock(&r->root_mtx[taskREADY]);
  939: #endif
  940: 					TAILQ_INSERT_TAIL(&r->root_ready, task, task_node);
  941: #ifdef HAVE_LIBPTHREAD
  942: 					pthread_mutex_unlock(&r->root_mtx[taskREADY]);
  943: #endif
  944: 				}
  945: 				/* if match at least 2, don't remove resouce of event */
  946: 				if (flg > 1)
  947: 					evt->flags ^= evt->flags;
  948: 				break;
  949: #endif	/* EVFILT_USER */
  950: 		}
  951: 		if (kevent(r->root_kq, evt, 1, NULL, 0, &now) == -1) {
  952: 			if (r->root_hooks.hook_exec.exception) {
  953: 				if (r->root_hooks.hook_exec.exception(r, NULL))
  954: 					return NULL;
  955: 			} else
  956: 				LOGERR;
  957: 		}
  958: 	}
  959: 
  960: 	/* timer update & put in ready queue */
  961: 	clock_gettime(CLOCK_MONOTONIC, &now);
  962: 
  963: 	TAILQ_FOREACH_SAFE(task, &r->root_timer, task_node, tmp)
  964: 		if (sched_timespeccmp(&now, &TASK_TS(task), -) >= 0) {
  965: #ifdef HAVE_LIBPTHREAD
  966: 			pthread_mutex_lock(&r->root_mtx[taskTIMER]);
  967: #endif
  968: 			TAILQ_REMOVE(&r->root_timer, task, task_node);
  969: #ifdef HAVE_LIBPTHREAD
  970: 			pthread_mutex_unlock(&r->root_mtx[taskTIMER]);
  971: #endif
  972: 			task->task_type = taskREADY;
  973: #ifdef HAVE_LIBPTHREAD
  974: 			pthread_mutex_lock(&r->root_mtx[taskREADY]);
  975: #endif
  976: 			TAILQ_INSERT_TAIL(&r->root_ready, task, task_node);
  977: #ifdef HAVE_LIBPTHREAD
  978: 			pthread_mutex_unlock(&r->root_mtx[taskREADY]);
  979: #endif
  980: 		}
  981: 
  982: 	/* put regular task priority task to ready queue, 
  983: 		if there is no ready task or reach max missing hit for regular task */
  984: 	if ((task = TAILQ_FIRST(&r->root_task))) {
  985: 		if (!TAILQ_FIRST(&r->root_ready) || r->root_miss >= TASK_VAL(task)) {
  986: 			r->root_miss ^= r->root_miss;
  987: 
  988: #ifdef HAVE_LIBPTHREAD
  989: 			pthread_mutex_lock(&r->root_mtx[taskTASK]);
  990: #endif
  991: 			TAILQ_REMOVE(&r->root_task, task, task_node);
  992: #ifdef HAVE_LIBPTHREAD
  993: 			pthread_mutex_unlock(&r->root_mtx[taskTASK]);
  994: #endif
  995: 			task->task_type = taskREADY;
  996: #ifdef HAVE_LIBPTHREAD
  997: 			pthread_mutex_lock(&r->root_mtx[taskREADY]);
  998: #endif
  999: 			TAILQ_INSERT_TAIL(&r->root_ready, task, task_node);
 1000: #ifdef HAVE_LIBPTHREAD
 1001: 			pthread_mutex_unlock(&r->root_mtx[taskREADY]);
 1002: #endif
 1003: 		} else
 1004: 			r->root_miss++;
 1005: 	} else
 1006: 		r->root_miss ^= r->root_miss;
 1007: 
 1008: 	/* OK, lets get ready task !!! */
 1009: 	task = TAILQ_FIRST(&r->root_ready);
 1010: 	if (!(task))
 1011: 		return NULL;
 1012: 
 1013: #ifdef HAVE_LIBPTHREAD
 1014: 	pthread_mutex_lock(&r->root_mtx[taskREADY]);
 1015: #endif
 1016: 	TAILQ_REMOVE(&r->root_ready, task, task_node);
 1017: #ifdef HAVE_LIBPTHREAD
 1018: 	pthread_mutex_unlock(&r->root_mtx[taskREADY]);
 1019: #endif
 1020: 	task->task_type = taskUNUSE;
 1021: #ifdef HAVE_LIBPTHREAD
 1022: 	pthread_mutex_lock(&r->root_mtx[taskUNUSE]);
 1023: #endif
 1024: 	TAILQ_INSERT_TAIL(&r->root_unuse, task, task_node);
 1025: #ifdef HAVE_LIBPTHREAD
 1026: 	pthread_mutex_unlock(&r->root_mtx[taskUNUSE]);
 1027: #endif
 1028: 	return task;
 1029: }
 1030: 
 1031: /*
 1032:  * sched_hook_exception() - Default EXCEPTION hook
 1033:  *
 1034:  * @root = root task
 1035:  * @arg = custom handling: if arg == EV_EOF or other value; default: arg == NULL log errno
 1036:  * return: <0 errors and 0 ok
 1037:  */
 1038: void *
 1039: sched_hook_exception(void *root, void *arg)
 1040: {
 1041: 	sched_root_task_t *r = root;
 1042: 
 1043: 	if (!r)
 1044: 		return NULL;
 1045: 
 1046: 	/* custom exception handling ... */
 1047: 	if (arg) {
 1048: 		if (arg == (void*) EV_EOF)
 1049: 			return NULL;
 1050: 		return (void*) -1;	/* raise scheduler error!!! */
 1051: 	}
 1052: 
 1053: 	/* if error hook exists */
 1054: 	if (r->root_hooks.hook_root.error)
 1055: 		return (r->root_hooks.hook_root.error(root, (void*) ((intptr_t) errno)));
 1056: 
 1057: 	/* default case! */
 1058: 	LOGERR;
 1059: 	return NULL;
 1060: }
 1061: 
 1062: /*
 1063:  * sched_hook_condition() - Default CONDITION hook
 1064:  *
 1065:  * @root = root task
 1066:  * @arg = killState from schedRun()
 1067:  * return: NULL kill scheduler loop or !=NULL ok
 1068:  */
 1069: void *
 1070: sched_hook_condition(void *root, void *arg)
 1071: {
 1072: 	sched_root_task_t *r = root;
 1073: 
 1074: 	if (!r)
 1075: 		return NULL;
 1076: 
 1077: 	return (void*) (r->root_cond - *(intptr_t*) arg);
 1078: }

FreeBSD-CVSweb <freebsd-cvsweb@FreeBSD.org>