File:  [ELWIX - Embedded LightWeight unIX -] / libaitsched / src / hooks.c
Revision 1.18.4.3: download - view: text, annotated - select for diffs - revision graph
Thu Aug 15 18:14:54 2013 UTC (10 years, 10 months ago) by misho
Branches: sched3_8
Diff to: branchpoint 1.18: preferred, unified
patch cancel

    1: /*************************************************************************
    2: * (C) 2011 AITNET ltd - Sofia/Bulgaria - <misho@aitbg.com>
    3: *  by Michael Pounov <misho@openbsd-bg.org>
    4: *
    5: * $Author: misho $
    6: * $Id: hooks.c,v 1.18.4.3 2013/08/15 18:14:54 misho Exp $
    7: *
    8: **************************************************************************
    9: The ELWIX and AITNET software is distributed under the following
   10: terms:
   11: 
   12: All of the documentation and software included in the ELWIX and AITNET
   13: Releases is copyrighted by ELWIX - Sofia/Bulgaria <info@elwix.org>
   14: 
   15: Copyright 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013
   16: 	by Michael Pounov <misho@elwix.org>.  All rights reserved.
   17: 
   18: Redistribution and use in source and binary forms, with or without
   19: modification, are permitted provided that the following conditions
   20: are met:
   21: 1. Redistributions of source code must retain the above copyright
   22:    notice, this list of conditions and the following disclaimer.
   23: 2. Redistributions in binary form must reproduce the above copyright
   24:    notice, this list of conditions and the following disclaimer in the
   25:    documentation and/or other materials provided with the distribution.
   26: 3. All advertising materials mentioning features or use of this software
   27:    must display the following acknowledgement:
   28: This product includes software developed by Michael Pounov <misho@elwix.org>
   29: ELWIX - Embedded LightWeight unIX and its contributors.
   30: 4. Neither the name of AITNET nor the names of its contributors
   31:    may be used to endorse or promote products derived from this software
   32:    without specific prior written permission.
   33: 
   34: THIS SOFTWARE IS PROVIDED BY AITNET AND CONTRIBUTORS ``AS IS'' AND
   35: ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   36: IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   37: ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
   38: FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   39: DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   40: OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   41: HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   42: LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   43: OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   44: SUCH DAMAGE.
   45: */
   46: #include "global.h"
   47: #include "hooks.h"
   48: 
   49: 
   50: /*
   51:  * sched_hook_init() - Default INIT hook
   52:  *
   53:  * @root = root task
   54:  * @arg = unused
   55:  * return: <0 errors and 0 ok
   56:  */
   57: void *
   58: sched_hook_init(void *root, void *arg __unused)
   59: {
   60: 	sched_root_task_t *r = root;
   61: 
   62: 	if (!r)
   63: 		return (void*) -1;
   64: 
   65: 	r->root_kq = kqueue();
   66: 	if (r->root_kq == -1) {
   67: 		LOGERR;
   68: 		return (void*) -1;
   69: 	}
   70: 
   71: 	return NULL;
   72: }
   73: 
   74: /*
   75:  * sched_hook_fini() - Default FINI hook
   76:  *
   77:  * @root = root task
   78:  * @arg = unused
   79:  * return: <0 errors and 0 ok
   80:  */
   81: void *
   82: sched_hook_fini(void *root, void *arg __unused)
   83: {
   84: 	sched_root_task_t *r = root;
   85: 
   86: 	if (!r)
   87: 		return (void*) -1;
   88: 
   89: 	if (r->root_kq > 2) {
   90: 		close(r->root_kq);
   91: 		r->root_kq = 0;
   92: 	}
   93: 
   94: 	return NULL;
   95: }
   96: 
   97: /*
   98:  * sched_hook_cancel() - Default CANCEL hook
   99:  *
  100:  * @task = current task
  101:  * @arg = unused
  102:  * return: <0 errors and 0 ok
  103:  */
  104: void *
  105: sched_hook_cancel(void *task, void *arg __unused)
  106: {
  107: 	sched_task_t *t = task;
  108: 	struct kevent chg[1];
  109: 	struct timespec timeout = { 0, 0 };
  110: #ifdef AIO_SUPPORT
  111: 	struct aiocb *acb;
  112: #ifdef EVFILT_LIO
  113: 	register int i = 0;
  114: 	struct aiocb **acbs;
  115: #endif	/* EVFILT_LIO */
  116: #endif	/* AIO_SUPPORT */
  117: 
  118: 	if (!t || !TASK_ROOT(t))
  119: 		return (void*) -1;
  120: 
  121: 	switch (TASK_TYPE(t)) {
  122: 		case taskREAD:
  123: #ifdef __NetBSD__
  124: 			EV_SET(&chg[0], TASK_FD(t), EVFILT_READ, EV_DELETE, 0, 0, (intptr_t) TASK_FD(t));
  125: #else
  126: 			EV_SET(&chg[0], TASK_FD(t), EVFILT_READ, EV_DELETE, 0, 0, (void*) TASK_FD(t));
  127: #endif
  128: 			break;
  129: 		case taskWRITE:
  130: #ifdef __NetBSD__
  131: 			EV_SET(&chg[0], TASK_FD(t), EVFILT_WRITE, EV_DELETE, 0, 0, (intptr_t) TASK_FD(t));
  132: #else
  133: 			EV_SET(&chg[0], TASK_FD(t), EVFILT_WRITE, EV_DELETE, 0, 0, (void*) TASK_FD(t));
  134: #endif
  135: 			break;
  136: 		case taskALARM:
  137: #ifdef __NetBSD__
  138: 			EV_SET(&chg[0], (uintptr_t) TASK_DATA(t), EVFILT_TIMER, EV_DELETE, 
  139: 					0, 0, (intptr_t) TASK_DATA(t));
  140: #else
  141: 			EV_SET(&chg[0], (uintptr_t) TASK_DATA(t), EVFILT_TIMER, EV_DELETE, 
  142: 					0, 0, (void*) TASK_DATA(t));
  143: #endif
  144: 			break;
  145: 		case taskNODE:
  146: #ifdef __NetBSD__
  147: 			EV_SET(&chg[0], TASK_FD(t), EVFILT_VNODE, EV_DELETE, 0, 0, (intptr_t) TASK_FD(t));
  148: #else
  149: 			EV_SET(&chg[0], TASK_FD(t), EVFILT_VNODE, EV_DELETE, 0, 0, (void*) TASK_FD(t));
  150: #endif
  151: 			break;
  152: 		case taskPROC:
  153: #ifdef __NetBSD__
  154: 			EV_SET(&chg[0], TASK_VAL(t), EVFILT_PROC, EV_DELETE, 0, 0, (intptr_t) TASK_VAL(t));
  155: #else
  156: 			EV_SET(&chg[0], TASK_VAL(t), EVFILT_PROC, EV_DELETE, 0, 0, (void*) TASK_VAL(t));
  157: #endif
  158: 			break;
  159: 		case taskSIGNAL:
  160: #ifdef __NetBSD__
  161: 			EV_SET(&chg[0], TASK_VAL(t), EVFILT_SIGNAL, EV_DELETE, 0, 0, (intptr_t) TASK_VAL(t));
  162: #else
  163: 			EV_SET(&chg[0], TASK_VAL(t), EVFILT_SIGNAL, EV_DELETE, 0, 0, (void*) TASK_VAL(t));
  164: #endif
  165: 			/* restore signal */
  166: 			signal(TASK_VAL(t), SIG_DFL);
  167: 			break;
  168: #ifdef AIO_SUPPORT
  169: 		case taskAIO:
  170: #ifdef __NetBSD__
  171: 			EV_SET(&chg[0], TASK_VAL(t), EVFILT_AIO, EV_DELETE, 0, 0, (intptr_t) TASK_VAL(t));
  172: #else
  173: 			EV_SET(&chg[0], TASK_VAL(t), EVFILT_AIO, EV_DELETE, 0, 0, (void*) TASK_VAL(t));
  174: #endif
  175: 			acb = (struct aiocb*) TASK_VAL(t);
  176: 			if (acb) {
  177: 				if (aio_cancel(acb->aio_fildes, acb) == AIO_CANCELED)
  178: 					aio_return(acb);
  179: 				free(acb);
  180: 				TASK_VAL(t) = 0;
  181: 			}
  182: 			break;
  183: #ifdef EVFILT_LIO
  184: 		case taskLIO:
  185: #ifdef __NetBSD__
  186: 			EV_SET(&chg[0], TASK_VAL(t), EVFILT_LIO, EV_DELETE, 0, 0, (intptr_t) TASK_VAL(t));
  187: #else
  188: 			EV_SET(&chg[0], TASK_VAL(t), EVFILT_LIO, EV_DELETE, 0, 0, (void*) TASK_VAL(t));
  189: #endif
  190: 			acbs = (struct aiocb**) TASK_VAL(t);
  191: 			if (acbs) {
  192: 				for (i = 0; i < TASK_DATLEN(t); i++) {
  193: 					if (aio_cancel(acbs[i]->aio_fildes, acbs[i]) == AIO_CANCELED)
  194: 						aio_return(acbs[i]);
  195: 					free(acbs[i]);
  196: 				}
  197: 				free(acbs);
  198: 				TASK_VAL(t) = 0;
  199: 			}
  200: 			break;
  201: #endif	/* EVFILT_LIO */
  202: #endif	/* AIO_SUPPORT */
  203: #ifdef EVFILT_USER
  204: 		case taskUSER:
  205: #ifdef __NetBSD__
  206: 			EV_SET(&chg[0], TASK_VAL(t), EVFILT_USER, EV_DELETE, 0, 0, (intptr_t) TASK_VAL(t));
  207: #else
  208: 			EV_SET(&chg[0], TASK_VAL(t), EVFILT_USER, EV_DELETE, 0, 0, (void*) TASK_VAL(t));
  209: #endif
  210: 			break;
  211: #endif
  212: 		case taskTHREAD:
  213: #ifdef HAVE_LIBPTHREAD
  214: 			pthread_cancel((pthread_t) TASK_VAL(t));
  215: #endif
  216: 			return NULL;
  217: 		case taskRTC:
  218: 			timer_delete(*(timer_t*) TASK_DATLEN(t));
  219: 			schedCancel((sched_task_t*) TASK_RET(t));
  220: 			free((void*) TASK_DATLEN(t));
  221: 			return NULL;
  222: 		default:
  223: 			return NULL;
  224: 	}
  225: 
  226: 	kevent(TASK_ROOT(t)->root_kq, chg, 1, NULL, 0, &timeout);
  227: 	return NULL;
  228: }
  229: 
  230: #ifdef HAVE_LIBPTHREAD
  231: /*
  232:  * sched_hook_thread() - Default THREAD hook
  233:  *
  234:  * @task = current task
  235:  * @arg = pthread attributes
  236:  * return: <0 errors and 0 ok
  237:  */
  238: void *
  239: sched_hook_thread(void *task, void *arg)
  240: {
  241: 	sched_task_t *t = task;
  242: 	pthread_t tid;
  243: 	sigset_t s, o;
  244: 
  245: 	if (!t || !TASK_ROOT(t))
  246: 		return (void*) -1;
  247: 
  248: 	sigfillset(&s);
  249: 	pthread_sigmask(SIG_BLOCK, &s, &o);
  250: 	if ((errno = pthread_create(&tid, (pthread_attr_t*) arg, 
  251: 				(void *(*)(void*)) _sched_threadWrapper, t))) {
  252: 		LOGERR;
  253: 		pthread_sigmask(SIG_SETMASK, &o, NULL);
  254: 		return (void*) -1;
  255: 	} else
  256: 		TASK_VAL(t) = (u_long) tid;
  257: 
  258: 	if (!TASK_ISLOCKED(t))
  259: 		TASK_LOCK(t);
  260: 
  261: 	pthread_sigmask(SIG_SETMASK, &o, NULL);
  262: 	return NULL;
  263: }
  264: #endif
  265: 
  266: /*
  267:  * sched_hook_read() - Default READ hook
  268:  *
  269:  * @task = current task
  270:  * @arg = unused
  271:  * return: <0 errors and 0 ok
  272:  */
  273: void *
  274: sched_hook_read(void *task, void *arg __unused)
  275: {
  276: 	sched_task_t *t = task;
  277: 	struct kevent chg[1];
  278: 	struct timespec timeout = { 0, 0 };
  279: 
  280: 	if (!t || !TASK_ROOT(t))
  281: 		return (void*) -1;
  282: 
  283: #ifdef __NetBSD__
  284: 	EV_SET(&chg[0], TASK_FD(t), EVFILT_READ, EV_ADD | EV_CLEAR, 0, 0, (intptr_t) TASK_FD(t));
  285: #else
  286: 	EV_SET(&chg[0], TASK_FD(t), EVFILT_READ, EV_ADD | EV_CLEAR, 0, 0, (void*) TASK_FD(t));
  287: #endif
  288: 	if (kevent(TASK_ROOT(t)->root_kq, chg, 1, NULL, 0, &timeout) == -1) {
  289: 		if (TASK_ROOT(t)->root_hooks.hook_exec.exception)
  290: 			TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL);
  291: 		else
  292: 			LOGERR;
  293: 		return (void*) -1;
  294: 	}
  295: 
  296: 	return NULL;
  297: }
  298: 
  299: /*
  300:  * sched_hook_write() - Default WRITE hook
  301:  *
  302:  * @task = current task
  303:  * @arg = unused
  304:  * return: <0 errors and 0 ok
  305:  */
  306: void *
  307: sched_hook_write(void *task, void *arg __unused)
  308: {
  309: 	sched_task_t *t = task;
  310: 	struct kevent chg[1];
  311: 	struct timespec timeout = { 0, 0 };
  312: 
  313: 	if (!t || !TASK_ROOT(t))
  314: 		return (void*) -1;
  315: 
  316: #ifdef __NetBSD__
  317: 	EV_SET(&chg[0], TASK_FD(t), EVFILT_WRITE, EV_ADD | EV_CLEAR, 0, 0, (intptr_t) TASK_FD(t));
  318: #else
  319: 	EV_SET(&chg[0], TASK_FD(t), EVFILT_WRITE, EV_ADD | EV_CLEAR, 0, 0, (void*) TASK_FD(t));
  320: #endif
  321: 	if (kevent(TASK_ROOT(t)->root_kq, chg, 1, NULL, 0, &timeout) == -1) {
  322: 		if (TASK_ROOT(t)->root_hooks.hook_exec.exception)
  323: 			TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL);
  324: 		else
  325: 			LOGERR;
  326: 		return (void*) -1;
  327: 	}
  328: 
  329: 	return NULL;
  330: }
  331: 
  332: /*
  333:  * sched_hook_alarm() - Default ALARM hook
  334:  *
  335:  * @task = current task
  336:  * @arg = unused
  337:  * return: <0 errors and 0 ok
  338:  */
  339: void *
  340: sched_hook_alarm(void *task, void *arg __unused)
  341: {
  342: 	sched_task_t *t = task;
  343: 	struct kevent chg[1];
  344: 	struct timespec timeout = { 0, 0 };
  345: 
  346: 	if (!t || !TASK_ROOT(t))
  347: 		return (void*) -1;
  348: 
  349: #ifdef __NetBSD__
  350: 	EV_SET(&chg[0], (uintptr_t) TASK_DATA(t), EVFILT_TIMER, EV_ADD | EV_CLEAR, 0, 
  351: 			t->task_val.ts.tv_sec * 1000 + t->task_val.ts.tv_nsec / 1000000, 
  352: 			(intptr_t) TASK_DATA(t));
  353: #else
  354: 	EV_SET(&chg[0], (uintptr_t) TASK_DATA(t), EVFILT_TIMER, EV_ADD | EV_CLEAR, 0, 
  355: 			t->task_val.ts.tv_sec * 1000 + t->task_val.ts.tv_nsec / 1000000, 
  356: 			(void*) TASK_DATA(t));
  357: #endif
  358: 	if (kevent(TASK_ROOT(t)->root_kq, chg, 1, NULL, 0, &timeout) == -1) {
  359: 		if (TASK_ROOT(t)->root_hooks.hook_exec.exception)
  360: 			TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL);
  361: 		else
  362: 			LOGERR;
  363: 		return (void*) -1;
  364: 	}
  365: 
  366: 	return NULL;
  367: }
  368: 
  369: /*
  370:  * sched_hook_node() - Default NODE hook
  371:  *
  372:  * @task = current task
  373:  * @arg = unused
  374:  * return: <0 errors and 0 ok
  375:  */
  376: void *
  377: sched_hook_node(void *task, void *arg __unused)
  378: {
  379: 	sched_task_t *t = task;
  380: 	struct kevent chg[1];
  381: 	struct timespec timeout = { 0, 0 };
  382: 
  383: 	if (!t || !TASK_ROOT(t))
  384: 		return (void*) -1;
  385: 
  386: #ifdef __NetBSD__
  387: 	EV_SET(&chg[0], TASK_FD(t), EVFILT_VNODE, EV_ADD | EV_CLEAR, 
  388: 			NOTE_DELETE | NOTE_WRITE | NOTE_EXTEND | NOTE_ATTRIB | 
  389: 			NOTE_LINK | NOTE_RENAME | NOTE_REVOKE, 0, (intptr_t) TASK_FD(t));
  390: #else
  391: 	EV_SET(&chg[0], TASK_FD(t), EVFILT_VNODE, EV_ADD | EV_CLEAR, 
  392: 			NOTE_DELETE | NOTE_WRITE | NOTE_EXTEND | NOTE_ATTRIB | 
  393: 			NOTE_LINK | NOTE_RENAME | NOTE_REVOKE, 0, (void*) TASK_FD(t));
  394: #endif
  395: 	if (kevent(TASK_ROOT(t)->root_kq, chg, 1, NULL, 0, &timeout) == -1) {
  396: 		if (TASK_ROOT(t)->root_hooks.hook_exec.exception)
  397: 			TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL);
  398: 		else
  399: 			LOGERR;
  400: 		return (void*) -1;
  401: 	}
  402: 
  403: 	return NULL;
  404: }
  405: 
  406: /*
  407:  * sched_hook_proc() - Default PROC hook
  408:  *
  409:  * @task = current task
  410:  * @arg = unused
  411:  * return: <0 errors and 0 ok
  412:  */
  413: void *
  414: sched_hook_proc(void *task, void *arg __unused)
  415: {
  416: 	sched_task_t *t = task;
  417: 	struct kevent chg[1];
  418: 	struct timespec timeout = { 0, 0 };
  419: 
  420: 	if (!t || !TASK_ROOT(t))
  421: 		return (void*) -1;
  422: 
  423: #ifdef __NetBSD__
  424: 	EV_SET(&chg[0], TASK_VAL(t), EVFILT_PROC, EV_ADD | EV_CLEAR, 
  425: 			NOTE_EXIT | NOTE_FORK | NOTE_EXEC | NOTE_TRACK, 0, (intptr_t) TASK_VAL(t));
  426: #else
  427: 	EV_SET(&chg[0], TASK_VAL(t), EVFILT_PROC, EV_ADD | EV_CLEAR, 
  428: 			NOTE_EXIT | NOTE_FORK | NOTE_EXEC | NOTE_TRACK, 0, (void*) TASK_VAL(t));
  429: #endif
  430: 	if (kevent(TASK_ROOT(t)->root_kq, chg, 1, NULL, 0, &timeout) == -1) {
  431: 		if (TASK_ROOT(t)->root_hooks.hook_exec.exception)
  432: 			TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL);
  433: 		else
  434: 			LOGERR;
  435: 		return (void*) -1;
  436: 	}
  437: 
  438: 	return NULL;
  439: }
  440: 
  441: /*
  442:  * sched_hook_signal() - Default SIGNAL hook
  443:  *
  444:  * @task = current task
  445:  * @arg = unused
  446:  * return: <0 errors and 0 ok
  447:  */
  448: void *
  449: sched_hook_signal(void *task, void *arg __unused)
  450: {
  451: 	sched_task_t *t = task;
  452: 	struct kevent chg[1];
  453: 	struct timespec timeout = { 0, 0 };
  454: 
  455: 	if (!t || !TASK_ROOT(t))
  456: 		return (void*) -1;
  457: 
  458: 	/* ignore signal */
  459: 	signal(TASK_VAL(t), SIG_IGN);
  460: 
  461: #ifdef __NetBSD__
  462: 	EV_SET(&chg[0], TASK_VAL(t), EVFILT_SIGNAL, EV_ADD | EV_CLEAR, 0, 0, (intptr_t) TASK_VAL(t));
  463: #else
  464: 	EV_SET(&chg[0], TASK_VAL(t), EVFILT_SIGNAL, EV_ADD | EV_CLEAR, 0, 0, (void*) TASK_VAL(t));
  465: #endif
  466: 	if (kevent(TASK_ROOT(t)->root_kq, chg, 1, NULL, 0, &timeout) == -1) {
  467: 		if (TASK_ROOT(t)->root_hooks.hook_exec.exception)
  468: 			TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL);
  469: 		else
  470: 			LOGERR;
  471: 		return (void*) -1;
  472: 	}
  473: 
  474: 	return NULL;
  475: }
  476: 
  477: /*
  478:  * sched_hook_user() - Default USER hook
  479:  *
  480:  * @task = current task
  481:  * @arg = unused
  482:  * return: <0 errors and 0 ok
  483:  */
  484: #ifdef EVFILT_USER
  485: void *
  486: sched_hook_user(void *task, void *arg __unused)
  487: {
  488: 	sched_task_t *t = task;
  489: 	struct kevent chg[1];
  490: 	struct timespec timeout = { 0, 0 };
  491: 
  492: 	if (!t || !TASK_ROOT(t))
  493: 		return (void*) -1;
  494: 
  495: #ifdef __NetBSD__
  496: 	EV_SET(&chg[0], TASK_VAL(t), EVFILT_USER, EV_ADD | EV_CLEAR, TASK_DATLEN(t), 
  497: 			0, (intptr_t) TASK_VAL(t));
  498: #else
  499: 	EV_SET(&chg[0], TASK_VAL(t), EVFILT_USER, EV_ADD | EV_CLEAR, TASK_DATLEN(t), 
  500: 			0, (void*) TASK_VAL(t));
  501: #endif
  502: 	if (kevent(TASK_ROOT(t)->root_kq, chg, 1, NULL, 0, &timeout) == -1) {
  503: 		if (TASK_ROOT(t)->root_hooks.hook_exec.exception)
  504: 			TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL);
  505: 		else
  506: 			LOGERR;
  507: 		return (void*) -1;
  508: 	}
  509: 
  510: 	return NULL;
  511: }
  512: #endif
  513: 
  514: /*
  515:  * sched_hook_fetch() - Default FETCH hook
  516:  *
  517:  * @root = root task
  518:  * @arg = unused
  519:  * return: NULL error or !=NULL fetched task
  520:  */
  521: void *
  522: sched_hook_fetch(void *root, void *arg __unused)
  523: {
  524: 	sched_root_task_t *r = root;
  525: 	sched_task_t *task, *tmp;
  526: 	struct timespec now, m, mtmp;
  527: 	struct timespec *timeout;
  528: 	struct kevent evt[1], res[KQ_EVENTS];
  529: 	register int i, flg;
  530: 	int en;
  531: #ifdef AIO_SUPPORT
  532: 	int len, fd;
  533: 	struct aiocb *acb;
  534: #ifdef EVFILT_LIO
  535: 	int l;
  536: 	register int j;
  537: 	off_t off;
  538: 	struct aiocb **acbs;
  539: 	struct iovec *iv;
  540: #endif	/* EVFILT_LIO */
  541: #endif	/* AIO_SUPPORT */
  542: 
  543: 	if (!r)
  544: 		return NULL;
  545: 
  546: 	/* get new task by queue priority */
  547: 	while ((task = TAILQ_FIRST(&r->root_event))) {
  548: #ifdef HAVE_LIBPTHREAD
  549: 		pthread_mutex_lock(&r->root_mtx[taskEVENT]);
  550: #endif
  551: 		TAILQ_REMOVE(&r->root_event, task, task_node);
  552: #ifdef HAVE_LIBPTHREAD
  553: 		pthread_mutex_unlock(&r->root_mtx[taskEVENT]);
  554: #endif
  555: 		task->task_type = taskUNUSE;
  556: #ifdef HAVE_LIBPTHREAD
  557: 		pthread_mutex_lock(&r->root_mtx[taskUNUSE]);
  558: #endif
  559: 		TAILQ_INSERT_TAIL(&r->root_unuse, task, task_node);
  560: #ifdef HAVE_LIBPTHREAD
  561: 		pthread_mutex_unlock(&r->root_mtx[taskUNUSE]);
  562: #endif
  563: 		return task;
  564: 	}
  565: 	while ((task = TAILQ_FIRST(&r->root_ready))) {
  566: #ifdef HAVE_LIBPTHREAD
  567: 		pthread_mutex_lock(&r->root_mtx[taskREADY]);
  568: #endif
  569: 		TAILQ_REMOVE(&r->root_ready, task, task_node);
  570: #ifdef HAVE_LIBPTHREAD
  571: 		pthread_mutex_unlock(&r->root_mtx[taskREADY]);
  572: #endif
  573: 		task->task_type = taskUNUSE;
  574: #ifdef HAVE_LIBPTHREAD
  575: 		pthread_mutex_lock(&r->root_mtx[taskUNUSE]);
  576: #endif
  577: 		TAILQ_INSERT_TAIL(&r->root_unuse, task, task_node);
  578: #ifdef HAVE_LIBPTHREAD
  579: 		pthread_mutex_unlock(&r->root_mtx[taskUNUSE]);
  580: #endif
  581: 		return task;
  582: 	}
  583: 
  584: #ifdef TIMER_WITHOUT_SORT
  585: 	clock_gettime(CLOCK_MONOTONIC, &now);
  586: 
  587: 	sched_timespecclear(&r->root_wait);
  588: 	TAILQ_FOREACH(task, &r->root_timer, task_node) {
  589: 		if (!sched_timespecisset(&r->root_wait))
  590: 			r->root_wait = TASK_TS(task);
  591: 		else if (sched_timespeccmp(&TASK_TS(task), &r->root_wait, -) < 0)
  592: 			r->root_wait = TASK_TS(task);
  593: 	}
  594: 
  595: 	if (TAILQ_FIRST(&r->root_timer)) {
  596: 		m = r->root_wait;
  597: 		sched_timespecsub(&m, &now, &mtmp);
  598: 		r->root_wait = mtmp;
  599: 	} else {
  600: 		/* set wait INFTIM */
  601: 		sched_timespecinf(&r->root_wait);
  602: 	}
  603: #else
  604: 	if (!TAILQ_FIRST(&r->root_task) && (task = TAILQ_FIRST(&r->root_timer))) {
  605: 		clock_gettime(CLOCK_MONOTONIC, &now);
  606: 
  607: 		m = TASK_TS(task);
  608: 		sched_timespecsub(&m, &now, &mtmp);
  609: 		r->root_wait = mtmp;
  610: 	} else {
  611: 		/* set wait INFTIM */
  612: 		sched_timespecinf(&r->root_wait);
  613: 	}
  614: #endif
  615: 	/* if present member of task, set NOWAIT */
  616: 	if (TAILQ_FIRST(&r->root_task))
  617: 		sched_timespecclear(&r->root_wait);
  618: 
  619: 	if (r->root_wait.tv_sec != -1 && r->root_wait.tv_nsec != -1)
  620: 		timeout = &r->root_wait;
  621: 	else if (sched_timespecisinf(&r->root_poll))
  622: 		timeout = NULL;
  623: 	else
  624: 		timeout = &r->root_poll;
  625: 	if ((en = kevent(r->root_kq, NULL, 0, res, KQ_EVENTS, timeout)) == -1) {
  626: 		if (r->root_hooks.hook_exec.exception) {
  627: 			if (r->root_hooks.hook_exec.exception(r, NULL))
  628: 				return NULL;
  629: 		} else if (errno != EINTR)
  630: 			LOGERR;
  631: 		return NULL;
  632: 	}
  633: 
  634: 	now.tv_sec = now.tv_nsec = 0;
  635: 	/* Go and catch the cat into pipes ... */
  636: 	for (i = 0; i < en; i++) {
  637: 		memcpy(evt, &res[i], sizeof evt);
  638: 		evt->flags = EV_DELETE;
  639: 		/* Put read/write task to ready queue */
  640: 		switch (res[i].filter) {
  641: 			case EVFILT_READ:
  642: 				flg = 0;
  643: 				TAILQ_FOREACH_SAFE(task, &r->root_read, task_node, tmp) {
  644: 					if (TASK_FD(task) != ((intptr_t) res[i].udata))
  645: 						continue;
  646: 					else {
  647: 						flg++;
  648: 						TASK_RET(task) = res[i].data;
  649: 						TASK_FLAG(task) = res[i].fflags;
  650: 					}
  651: 					/* remove read handle */
  652: #ifdef HAVE_LIBPTHREAD
  653: 					pthread_mutex_lock(&r->root_mtx[taskREAD]);
  654: #endif
  655: 					TAILQ_REMOVE(&r->root_read, task, task_node);
  656: #ifdef HAVE_LIBPTHREAD
  657: 					pthread_mutex_unlock(&r->root_mtx[taskREAD]);
  658: #endif
  659: 					if (r->root_hooks.hook_exec.exception && res[i].flags & EV_EOF) {
  660:  						if (r->root_hooks.hook_exec.exception(r, (void*) EV_EOF)) {
  661: 							task->task_type = taskUNUSE;
  662: #ifdef HAVE_LIBPTHREAD
  663: 							pthread_mutex_lock(&r->root_mtx[taskUNUSE]);
  664: #endif
  665: 							TAILQ_INSERT_TAIL(&r->root_unuse, task, task_node);
  666: #ifdef HAVE_LIBPTHREAD
  667: 							pthread_mutex_unlock(&r->root_mtx[taskUNUSE]);
  668: #endif
  669: 						} else {
  670: 							task->task_type = taskREADY;
  671: #ifdef HAVE_LIBPTHREAD
  672: 							pthread_mutex_lock(&r->root_mtx[taskREADY]);
  673: #endif
  674: 							TAILQ_INSERT_TAIL(&r->root_ready, task, task_node);
  675: #ifdef HAVE_LIBPTHREAD
  676: 							pthread_mutex_unlock(&r->root_mtx[taskREADY]);
  677: #endif
  678: 						}
  679: 					} else {
  680: 						task->task_type = taskREADY;
  681: #ifdef HAVE_LIBPTHREAD
  682: 						pthread_mutex_lock(&r->root_mtx[taskREADY]);
  683: #endif
  684: 						TAILQ_INSERT_TAIL(&r->root_ready, task, task_node);
  685: #ifdef HAVE_LIBPTHREAD
  686: 						pthread_mutex_unlock(&r->root_mtx[taskREADY]);
  687: #endif
  688: 					}
  689: 				}
  690: 				/* if match at least 2, don't remove resouce of event */
  691: 				if (flg > 1)
  692: 					evt->flags ^= evt->flags;
  693: 				break;
  694: 			case EVFILT_WRITE:
  695: 				flg = 0;
  696: 				TAILQ_FOREACH_SAFE(task, &r->root_write, task_node, tmp) {
  697: 					if (TASK_FD(task) != ((intptr_t) res[i].udata))
  698: 						continue;
  699: 					else {
  700: 						flg++;
  701: 						TASK_RET(task) = res[i].data;
  702: 						TASK_FLAG(task) = res[i].fflags;
  703: 					}
  704: 					/* remove write handle */
  705: #ifdef HAVE_LIBPTHREAD
  706: 					pthread_mutex_lock(&r->root_mtx[taskWRITE]);
  707: #endif
  708: 					TAILQ_REMOVE(&r->root_write, task, task_node);
  709: #ifdef HAVE_LIBPTHREAD
  710: 					pthread_mutex_unlock(&r->root_mtx[taskWRITE]);
  711: #endif
  712: 					if (r->root_hooks.hook_exec.exception && res[i].flags & EV_EOF) {
  713:  						if (r->root_hooks.hook_exec.exception(r, (void*) EV_EOF)) {
  714: 							task->task_type = taskUNUSE;
  715: #ifdef HAVE_LIBPTHREAD
  716: 							pthread_mutex_lock(&r->root_mtx[taskUNUSE]);
  717: #endif
  718: 							TAILQ_INSERT_TAIL(&r->root_unuse, task, task_node);
  719: #ifdef HAVE_LIBPTHREAD
  720: 							pthread_mutex_unlock(&r->root_mtx[taskUNUSE]);
  721: #endif
  722: 						} else {
  723: 							task->task_type = taskREADY;
  724: #ifdef HAVE_LIBPTHREAD
  725: 							pthread_mutex_lock(&r->root_mtx[taskREADY]);
  726: #endif
  727: 							TAILQ_INSERT_TAIL(&r->root_ready, task, task_node);
  728: #ifdef HAVE_LIBPTHREAD
  729: 							pthread_mutex_unlock(&r->root_mtx[taskREADY]);
  730: #endif
  731: 						}
  732: 					} else {
  733: 						task->task_type = taskREADY;
  734: #ifdef HAVE_LIBPTHREAD
  735: 						pthread_mutex_lock(&r->root_mtx[taskREADY]);
  736: #endif
  737: 						TAILQ_INSERT_TAIL(&r->root_ready, task, task_node);
  738: #ifdef HAVE_LIBPTHREAD
  739: 						pthread_mutex_unlock(&r->root_mtx[taskREADY]);
  740: #endif
  741: 					}
  742: 				}
  743: 				/* if match at least 2, don't remove resouce of event */
  744: 				if (flg > 1)
  745: 					evt->flags ^= evt->flags;
  746: 				break;
  747: 			case EVFILT_TIMER:
  748: 				flg = 0;
  749: 				TAILQ_FOREACH_SAFE(task, &r->root_alarm, task_node, tmp) {
  750: 					if ((uintptr_t) TASK_DATA(task) != ((uintptr_t) res[i].udata))
  751: 						continue;
  752: 					else {
  753: 						flg++;
  754: 						TASK_RET(task) = res[i].data;
  755: 						TASK_FLAG(task) = res[i].fflags;
  756: 					}
  757: 					/* remove alarm handle */
  758: #ifdef HAVE_LIBPTHREAD
  759: 					pthread_mutex_lock(&r->root_mtx[taskALARM]);
  760: #endif
  761: 					TAILQ_REMOVE(&r->root_alarm, task, task_node);
  762: #ifdef HAVE_LIBPTHREAD
  763: 					pthread_mutex_unlock(&r->root_mtx[taskALARM]);
  764: #endif
  765: 					task->task_type = taskREADY;
  766: #ifdef HAVE_LIBPTHREAD
  767: 					pthread_mutex_lock(&r->root_mtx[taskREADY]);
  768: #endif
  769: 					TAILQ_INSERT_TAIL(&r->root_ready, task, task_node);
  770: #ifdef HAVE_LIBPTHREAD
  771: 					pthread_mutex_unlock(&r->root_mtx[taskREADY]);
  772: #endif
  773: 				}
  774: 				/* if match at least 2, don't remove resouce of event */
  775: 				if (flg > 1)
  776: 					evt->flags ^= evt->flags;
  777: 				break;
  778: 			case EVFILT_VNODE:
  779: 				flg = 0;
  780: 				TAILQ_FOREACH_SAFE(task, &r->root_node, task_node, tmp) {
  781: 					if (TASK_FD(task) != ((intptr_t) res[i].udata))
  782: 						continue;
  783: 					else {
  784: 						flg++;
  785: 						TASK_RET(task) = res[i].data;
  786: 						TASK_FLAG(task) = res[i].fflags;
  787: 					}
  788: 					/* remove node handle */
  789: #ifdef HAVE_LIBPTHREAD
  790: 					pthread_mutex_lock(&r->root_mtx[taskNODE]);
  791: #endif
  792: 					TAILQ_REMOVE(&r->root_node, task, task_node);
  793: #ifdef HAVE_LIBPTHREAD
  794: 					pthread_mutex_unlock(&r->root_mtx[taskNODE]);
  795: #endif
  796: 					task->task_type = taskREADY;
  797: #ifdef HAVE_LIBPTHREAD
  798: 					pthread_mutex_lock(&r->root_mtx[taskREADY]);
  799: #endif
  800: 					TAILQ_INSERT_TAIL(&r->root_ready, task, task_node);
  801: #ifdef HAVE_LIBPTHREAD
  802: 					pthread_mutex_unlock(&r->root_mtx[taskREADY]);
  803: #endif
  804: 				}
  805: 				/* if match at least 2, don't remove resouce of event */
  806: 				if (flg > 1)
  807: 					evt->flags ^= evt->flags;
  808: 				break;
  809: 			case EVFILT_PROC:
  810: 				flg = 0;
  811: 				TAILQ_FOREACH_SAFE(task, &r->root_proc, task_node, tmp) {
  812: 					if (TASK_VAL(task) != ((uintptr_t) res[i].udata))
  813: 						continue;
  814: 					else {
  815: 						flg++;
  816: 						TASK_RET(task) = res[i].data;
  817: 						TASK_FLAG(task) = res[i].fflags;
  818: 					}
  819: 					/* remove proc handle */
  820: #ifdef HAVE_LIBPTHREAD
  821: 					pthread_mutex_lock(&r->root_mtx[taskPROC]);
  822: #endif
  823: 					TAILQ_REMOVE(&r->root_proc, task, task_node);
  824: #ifdef HAVE_LIBPTHREAD
  825: 					pthread_mutex_unlock(&r->root_mtx[taskPROC]);
  826: #endif
  827: 					task->task_type = taskREADY;
  828: #ifdef HAVE_LIBPTHREAD
  829: 					pthread_mutex_lock(&r->root_mtx[taskREADY]);
  830: #endif
  831: 					TAILQ_INSERT_TAIL(&r->root_ready, task, task_node);
  832: #ifdef HAVE_LIBPTHREAD
  833: 					pthread_mutex_unlock(&r->root_mtx[taskREADY]);
  834: #endif
  835: 				}
  836: 				/* if match at least 2, don't remove resouce of event */
  837: 				if (flg > 1)
  838: 					evt->flags ^= evt->flags;
  839: 				break;
  840: 			case EVFILT_SIGNAL:
  841: 				flg = 0;
  842: 				TAILQ_FOREACH_SAFE(task, &r->root_signal, task_node, tmp) {
  843: 					if (TASK_VAL(task) != ((uintptr_t) res[i].udata))
  844: 						continue;
  845: 					else {
  846: 						flg++;
  847: 						TASK_RET(task) = res[i].data;
  848: 						TASK_FLAG(task) = res[i].fflags;
  849: 					}
  850: 					/* remove signal handle */
  851: #ifdef HAVE_LIBPTHREAD
  852: 					pthread_mutex_lock(&r->root_mtx[taskSIGNAL]);
  853: #endif
  854: 					TAILQ_REMOVE(&r->root_signal, task, task_node);
  855: #ifdef HAVE_LIBPTHREAD
  856: 					pthread_mutex_unlock(&r->root_mtx[taskSIGNAL]);
  857: #endif
  858: 					task->task_type = taskREADY;
  859: #ifdef HAVE_LIBPTHREAD
  860: 					pthread_mutex_lock(&r->root_mtx[taskREADY]);
  861: #endif
  862: 					TAILQ_INSERT_TAIL(&r->root_ready, task, task_node);
  863: #ifdef HAVE_LIBPTHREAD
  864: 					pthread_mutex_unlock(&r->root_mtx[taskREADY]);
  865: #endif
  866: 				}
  867: 				/* if match at least 2, don't remove resouce of event */
  868: 				if (flg > 1)
  869: 					evt->flags ^= evt->flags;
  870: 				break;
  871: #ifdef AIO_SUPPORT
  872: 			case EVFILT_AIO:
  873: 				flg = 0;
  874: 				TAILQ_FOREACH_SAFE(task, &r->root_aio, task_node, tmp) {
  875: 					acb = (struct aiocb*) TASK_VAL(task);
  876: 					if (acb != ((struct aiocb*) res[i].udata))
  877: 						continue;
  878: 					else {
  879: 						flg++;
  880: 						TASK_RET(task) = res[i].data;
  881: 						TASK_FLAG(task) = res[i].fflags;
  882: 					}
  883: 					/* remove user handle */
  884: #ifdef HAVE_LIBPTHREAD
  885: 					pthread_mutex_lock(&r->root_mtx[taskAIO]);
  886: #endif
  887: 					TAILQ_REMOVE(&r->root_aio, task, task_node);
  888: #ifdef HAVE_LIBPTHREAD
  889: 					pthread_mutex_unlock(&r->root_mtx[taskAIO]);
  890: #endif
  891: 					task->task_type = taskREADY;
  892: #ifdef HAVE_LIBPTHREAD
  893: 					pthread_mutex_lock(&r->root_mtx[taskREADY]);
  894: #endif
  895: 					TAILQ_INSERT_TAIL(&r->root_ready, task, task_node);
  896: #ifdef HAVE_LIBPTHREAD
  897: 					pthread_mutex_unlock(&r->root_mtx[taskREADY]);
  898: #endif
  899: 					fd = acb->aio_fildes;
  900: 					if ((len = aio_return(acb)) != -1) {
  901: 						if (lseek(fd, acb->aio_offset + len, SEEK_CUR) == -1)
  902: 							LOGERR;
  903: 					} else
  904: 						LOGERR;
  905: 					free(acb);
  906: 					TASK_DATLEN(task) = (u_long) len;
  907: 					TASK_FD(task) = fd;
  908: 				}
  909: 				/* if match at least 2, don't remove resouce of event */
  910: 				if (flg > 1)
  911: 					evt->flags ^= evt->flags;
  912: 				break;
  913: #ifdef EVFILT_LIO
  914: 			case EVFILT_LIO:
  915: 				flg = 0;
  916: 				TAILQ_FOREACH_SAFE(task, &r->root_lio, task_node, tmp) {
  917: 					acbs = (struct aiocb**) TASK_VAL(task);
  918: 					if (acbs != ((struct aiocb**) res[i].udata))
  919: 						continue;
  920: 					else {
  921: 						flg++;
  922: 						TASK_RET(task) = res[i].data;
  923: 						TASK_FLAG(task) = res[i].fflags;
  924: 					}
  925: 					/* remove user handle */
  926: #ifdef HAVE_LIBPTHREAD
  927: 					pthread_mutex_lock(&r->root_mtx[taskLIO]);
  928: #endif
  929: 					TAILQ_REMOVE(&r->root_lio, task, task_node);
  930: #ifdef HAVE_LIBPTHREAD
  931: 					pthread_mutex_unlock(&r->root_mtx[taskLIO]);
  932: #endif
  933: 					task->task_type = taskREADY;
  934: #ifdef HAVE_LIBPTHREAD
  935: 					pthread_mutex_lock(&r->root_mtx[taskREADY]);
  936: #endif
  937: 					TAILQ_INSERT_TAIL(&r->root_ready, task, task_node);
  938: #ifdef HAVE_LIBPTHREAD
  939: 					pthread_mutex_unlock(&r->root_mtx[taskREADY]);
  940: #endif
  941: 					iv = (struct iovec*) TASK_DATA(task);
  942: 					fd = acbs[0]->aio_fildes;
  943: 					off = acbs[0]->aio_offset;
  944: 					for (j = len = 0; i < TASK_DATLEN(task); len += l, i++) {
  945: 						if ((iv[i].iov_len = aio_return(acbs[i])) == -1)
  946: 							l = 0;
  947: 						else
  948: 							l = iv[i].iov_len;
  949: 						free(acbs[i]);
  950: 					}
  951: 					free(acbs);
  952: 					TASK_DATLEN(task) = (u_long) len;
  953: 					TASK_FD(task) = fd;
  954: 
  955: 					if (lseek(fd, off + len, SEEK_CUR) == -1)
  956: 						LOGERR;
  957: 				}
  958: 				/* if match at least 2, don't remove resouce of event */
  959: 				if (flg > 1)
  960: 					evt->flags ^= evt->flags;
  961: 				break;
  962: #endif	/* EVFILT_LIO */
  963: #endif	/* AIO_SUPPORT */
  964: #ifdef EVFILT_USER
  965: 			case EVFILT_USER:
  966: 				flg = 0;
  967: 				TAILQ_FOREACH_SAFE(task, &r->root_user, task_node, tmp) {
  968: 					if (TASK_VAL(task) != ((uintptr_t) res[i].udata))
  969: 						continue;
  970: 					else {
  971: 						flg++;
  972: 						TASK_RET(task) = res[i].data;
  973: 						TASK_FLAG(task) = res[i].fflags;
  974: 					}
  975: 					/* remove user handle */
  976: #ifdef HAVE_LIBPTHREAD
  977: 					pthread_mutex_lock(&r->root_mtx[taskUSER]);
  978: #endif
  979: 					TAILQ_REMOVE(&r->root_user, task, task_node);
  980: #ifdef HAVE_LIBPTHREAD
  981: 					pthread_mutex_unlock(&r->root_mtx[taskUSER]);
  982: #endif
  983: 					task->task_type = taskREADY;
  984: #ifdef HAVE_LIBPTHREAD
  985: 					pthread_mutex_lock(&r->root_mtx[taskREADY]);
  986: #endif
  987: 					TAILQ_INSERT_TAIL(&r->root_ready, task, task_node);
  988: #ifdef HAVE_LIBPTHREAD
  989: 					pthread_mutex_unlock(&r->root_mtx[taskREADY]);
  990: #endif
  991: 				}
  992: 				/* if match at least 2, don't remove resouce of event */
  993: 				if (flg > 1)
  994: 					evt->flags ^= evt->flags;
  995: 				break;
  996: #endif	/* EVFILT_USER */
  997: 		}
  998: 		if (kevent(r->root_kq, evt, 1, NULL, 0, &now) == -1) {
  999: 			if (r->root_hooks.hook_exec.exception) {
 1000: 				if (r->root_hooks.hook_exec.exception(r, NULL))
 1001: 					return NULL;
 1002: 			} else
 1003: 				LOGERR;
 1004: 		}
 1005: 	}
 1006: 
 1007: 	/* timer update & put in ready queue */
 1008: 	clock_gettime(CLOCK_MONOTONIC, &now);
 1009: 
 1010: 	TAILQ_FOREACH_SAFE(task, &r->root_timer, task_node, tmp)
 1011: 		if (sched_timespeccmp(&now, &TASK_TS(task), -) >= 0) {
 1012: #ifdef HAVE_LIBPTHREAD
 1013: 			pthread_mutex_lock(&r->root_mtx[taskTIMER]);
 1014: #endif
 1015: 			TAILQ_REMOVE(&r->root_timer, task, task_node);
 1016: #ifdef HAVE_LIBPTHREAD
 1017: 			pthread_mutex_unlock(&r->root_mtx[taskTIMER]);
 1018: #endif
 1019: 			task->task_type = taskREADY;
 1020: #ifdef HAVE_LIBPTHREAD
 1021: 			pthread_mutex_lock(&r->root_mtx[taskREADY]);
 1022: #endif
 1023: 			TAILQ_INSERT_TAIL(&r->root_ready, task, task_node);
 1024: #ifdef HAVE_LIBPTHREAD
 1025: 			pthread_mutex_unlock(&r->root_mtx[taskREADY]);
 1026: #endif
 1027: 		}
 1028: 
 1029: 	/* put regular task priority task to ready queue, 
 1030: 		if there is no ready task or reach max missing hit for regular task */
 1031: 	if ((task = TAILQ_FIRST(&r->root_task))) {
 1032: 		if (!TAILQ_FIRST(&r->root_ready) || r->root_miss >= TASK_VAL(task)) {
 1033: 			r->root_miss ^= r->root_miss;
 1034: 
 1035: #ifdef HAVE_LIBPTHREAD
 1036: 			pthread_mutex_lock(&r->root_mtx[taskTASK]);
 1037: #endif
 1038: 			TAILQ_REMOVE(&r->root_task, task, task_node);
 1039: #ifdef HAVE_LIBPTHREAD
 1040: 			pthread_mutex_unlock(&r->root_mtx[taskTASK]);
 1041: #endif
 1042: 			task->task_type = taskREADY;
 1043: #ifdef HAVE_LIBPTHREAD
 1044: 			pthread_mutex_lock(&r->root_mtx[taskREADY]);
 1045: #endif
 1046: 			TAILQ_INSERT_TAIL(&r->root_ready, task, task_node);
 1047: #ifdef HAVE_LIBPTHREAD
 1048: 			pthread_mutex_unlock(&r->root_mtx[taskREADY]);
 1049: #endif
 1050: 		} else
 1051: 			r->root_miss++;
 1052: 	} else
 1053: 		r->root_miss ^= r->root_miss;
 1054: 
 1055: 	/* OK, lets get ready task !!! */
 1056: 	task = TAILQ_FIRST(&r->root_ready);
 1057: 	if (!(task))
 1058: 		return NULL;
 1059: 
 1060: #ifdef HAVE_LIBPTHREAD
 1061: 	pthread_mutex_lock(&r->root_mtx[taskREADY]);
 1062: #endif
 1063: 	TAILQ_REMOVE(&r->root_ready, task, task_node);
 1064: #ifdef HAVE_LIBPTHREAD
 1065: 	pthread_mutex_unlock(&r->root_mtx[taskREADY]);
 1066: #endif
 1067: 	task->task_type = taskUNUSE;
 1068: #ifdef HAVE_LIBPTHREAD
 1069: 	pthread_mutex_lock(&r->root_mtx[taskUNUSE]);
 1070: #endif
 1071: 	TAILQ_INSERT_TAIL(&r->root_unuse, task, task_node);
 1072: #ifdef HAVE_LIBPTHREAD
 1073: 	pthread_mutex_unlock(&r->root_mtx[taskUNUSE]);
 1074: #endif
 1075: 	return task;
 1076: }
 1077: 
 1078: /*
 1079:  * sched_hook_exception() - Default EXCEPTION hook
 1080:  *
 1081:  * @root = root task
 1082:  * @arg = custom handling: if arg == EV_EOF or other value; default: arg == NULL log errno
 1083:  * return: <0 errors and 0 ok
 1084:  */
 1085: void *
 1086: sched_hook_exception(void *root, void *arg)
 1087: {
 1088: 	sched_root_task_t *r = root;
 1089: 
 1090: 	if (!r)
 1091: 		return NULL;
 1092: 
 1093: 	/* custom exception handling ... */
 1094: 	if (arg) {
 1095: 		if (arg == (void*) EV_EOF)
 1096: 			return NULL;
 1097: 		return (void*) -1;	/* raise scheduler error!!! */
 1098: 	}
 1099: 
 1100: 	/* if error hook exists */
 1101: 	if (r->root_hooks.hook_root.error)
 1102: 		return (r->root_hooks.hook_root.error(root, (void*) ((intptr_t) errno)));
 1103: 
 1104: 	/* default case! */
 1105: 	LOGERR;
 1106: 	return NULL;
 1107: }
 1108: 
 1109: /*
 1110:  * sched_hook_condition() - Default CONDITION hook
 1111:  *
 1112:  * @root = root task
 1113:  * @arg = killState from schedRun()
 1114:  * return: NULL kill scheduler loop or !=NULL ok
 1115:  */
 1116: void *
 1117: sched_hook_condition(void *root, void *arg)
 1118: {
 1119: 	sched_root_task_t *r = root;
 1120: 
 1121: 	if (!r)
 1122: 		return NULL;
 1123: 
 1124: 	return (void*) (r->root_cond - *(intptr_t*) arg);
 1125: }
 1126: 
 1127: /*
 1128:  * sched_hook_rtc() - Default RTC hook
 1129:  *
 1130:  * @task = current task
 1131:  * @arg = unused
 1132:  * return: <0 errors and 0 ok
 1133:  */
 1134: void *
 1135: sched_hook_rtc(void *task, void *arg __unused)
 1136: {
 1137: 	sched_task_t *sigt = NULL, *t = task;
 1138: 	struct itimerspec its;
 1139: 	struct sigevent evt;
 1140: 	timer_t *tmr = NULL;
 1141: 
 1142: 	if (!t || !TASK_ROOT(t))
 1143: 		return (void*) -1;
 1144: 
 1145: 	tmr = malloc(sizeof(timer_t));
 1146: 	if (!tmr) {
 1147: 		if (TASK_ROOT(t)->root_hooks.hook_exec.exception)
 1148: 			TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL);
 1149: 		else
 1150: 			LOGERR;
 1151: 		return (void*) -1;
 1152: 	}
 1153: 
 1154: 	memset(&evt, 0, sizeof evt);
 1155: 	evt.sigev_notify = SIGEV_SIGNAL;
 1156: 	evt.sigev_signo = (int) TASK_DATA(t) + SIGRTMIN;
 1157: 	evt.sigev_value.sival_ptr = tmr;
 1158: 
 1159: 	if (timer_create((clockid_t) TASK_DATA(t), &evt, evt.sigev_value.sival_ptr) == -1) {
 1160: 		if (TASK_ROOT(t)->root_hooks.hook_exec.exception)
 1161: 			TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL);
 1162: 		else
 1163: 			LOGERR;
 1164: 		if (tmr)
 1165: 			free(tmr);
 1166: 		return (void*) -1;
 1167: 	} else
 1168: 		TASK_DATLEN(t) = (size_t) tmr;
 1169: 
 1170: 	if (!(sigt = schedSignal(TASK_ROOT(t), TASK_FUNC(t), TASK_ARG(t), evt.sigev_signo, 
 1171: 				TASK_DATA(t), TASK_DATLEN(t)))) {
 1172: 		if (TASK_ROOT(t)->root_hooks.hook_exec.exception)
 1173: 			TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL);
 1174: 		else
 1175: 			LOGERR;
 1176: 		timer_delete(*tmr);
 1177: 		free(tmr);
 1178: 		return (void*) -1;
 1179: 	} else
 1180: 		TASK_RET(t) = (uintptr_t) sigt;
 1181: 
 1182: 	memset(&its, 0, sizeof its);
 1183: 	its.it_value.tv_sec = t->task_val.ts.tv_sec;
 1184: 	its.it_value.tv_nsec = t->task_val.ts.tv_nsec;
 1185: 
 1186: 	if (timer_settime(*tmr, 0, &its, NULL) == -1) {
 1187: 		if (TASK_ROOT(t)->root_hooks.hook_exec.exception)
 1188: 			TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL);
 1189: 		else
 1190: 			LOGERR;
 1191: 		schedCancel(sigt);
 1192: 		timer_delete(*tmr);
 1193: 		free(tmr);
 1194: 		return (void*) -1;
 1195: 	}
 1196: 
 1197: 	return NULL;
 1198: }

FreeBSD-CVSweb <freebsd-cvsweb@FreeBSD.org>