File:  [ELWIX - Embedded LightWeight unIX -] / libaitsched / src / hooks.c
Revision 1.24.4.6: download - view: text, annotated - select for diffs - revision graph
Tue Jan 28 12:14:20 2014 UTC (10 years, 5 months ago) by misho
Branches: sched4_7
Diff to: branchpoint 1.24: preferred, unified
stop signal if kq disabled

    1: /*************************************************************************
    2: * (C) 2011 AITNET ltd - Sofia/Bulgaria - <misho@aitbg.com>
    3: *  by Michael Pounov <misho@openbsd-bg.org>
    4: *
    5: * $Author: misho $
    6: * $Id: hooks.c,v 1.24.4.6 2014/01/28 12:14:20 misho Exp $
    7: *
    8: **************************************************************************
    9: The ELWIX and AITNET software is distributed under the following
   10: terms:
   11: 
   12: All of the documentation and software included in the ELWIX and AITNET
   13: Releases is copyrighted by ELWIX - Sofia/Bulgaria <info@elwix.org>
   14: 
   15: Copyright 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013
   16: 	by Michael Pounov <misho@elwix.org>.  All rights reserved.
   17: 
   18: Redistribution and use in source and binary forms, with or without
   19: modification, are permitted provided that the following conditions
   20: are met:
   21: 1. Redistributions of source code must retain the above copyright
   22:    notice, this list of conditions and the following disclaimer.
   23: 2. Redistributions in binary form must reproduce the above copyright
   24:    notice, this list of conditions and the following disclaimer in the
   25:    documentation and/or other materials provided with the distribution.
   26: 3. All advertising materials mentioning features or use of this software
   27:    must display the following acknowledgement:
   28: This product includes software developed by Michael Pounov <misho@elwix.org>
   29: ELWIX - Embedded LightWeight unIX and its contributors.
   30: 4. Neither the name of AITNET nor the names of its contributors
   31:    may be used to endorse or promote products derived from this software
   32:    without specific prior written permission.
   33: 
   34: THIS SOFTWARE IS PROVIDED BY AITNET AND CONTRIBUTORS ``AS IS'' AND
   35: ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   36: IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   37: ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
   38: FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   39: DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   40: OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   41: HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   42: LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   43: OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   44: SUCH DAMAGE.
   45: */
   46: #include "global.h"
   47: #include "hooks.h"
   48: 
   49: 
   50: /*
   51:  * sched_hook_init() - Default INIT hook
   52:  *
   53:  * @root = root task
   54:  * @arg = unused
   55:  * return: <0 errors and 0 ok
   56:  */
   57: void *
   58: sched_hook_init(void *root, void *arg __unused)
   59: {
   60: 	sched_root_task_t *r = root;
   61: 
   62: 	if (!r)
   63: 		return (void*) -1;
   64: 
   65: #ifndef KQ_DISABLE
   66: 	r->root_kq = kqueue();
   67: 	if (r->root_kq == -1) {
   68: 		LOGERR;
   69: 		return (void*) -1;
   70: 	}
   71: #else
   72: 	r->root_kq ^= r->root_kq;
   73: 	FD_ZERO(&r->root_fds[0]);
   74: 	FD_ZERO(&r->root_fds[1]);
   75: #endif
   76: 
   77: 	return NULL;
   78: }
   79: 
   80: /*
   81:  * sched_hook_fini() - Default FINI hook
   82:  *
   83:  * @root = root task
   84:  * @arg = unused
   85:  * return: <0 errors and 0 ok
   86:  */
   87: void *
   88: sched_hook_fini(void *root, void *arg __unused)
   89: {
   90: 	sched_root_task_t *r = root;
   91: 
   92: 	if (!r)
   93: 		return (void*) -1;
   94: 
   95: #ifndef KQ_DISABLE
   96: 	if (r->root_kq > 2) {
   97: 		close(r->root_kq);
   98: 		r->root_kq = 0;
   99: 	}
  100: #else
  101: 	FD_ZERO(&r->root_fds[1]);
  102: 	FD_ZERO(&r->root_fds[0]);
  103: 	r->root_kq ^= r->root_kq;
  104: #endif
  105: 
  106: 	return NULL;
  107: }
  108: 
  109: /*
  110:  * sched_hook_cancel() - Default CANCEL hook
  111:  *
  112:  * @task = current task
  113:  * @arg = unused
  114:  * return: <0 errors and 0 ok
  115:  */
  116: void *
  117: sched_hook_cancel(void *task, void *arg __unused)
  118: {
  119: 	sched_task_t *t = task;
  120: #ifndef KQ_DISABLE
  121: 	struct kevent chg[1];
  122: 	struct timespec timeout = { 0, 0 };
  123: #else
  124: 	sched_root_task_t *r = NULL;
  125: 	register int i;
  126: #endif
  127: #ifdef AIO_SUPPORT
  128: 	struct aiocb *acb;
  129: #ifdef EVFILT_LIO
  130: 	register int i = 0;
  131: 	struct aiocb **acbs;
  132: #endif	/* EVFILT_LIO */
  133: #endif	/* AIO_SUPPORT */
  134: 
  135: 	if (!t || !TASK_ROOT(t))
  136: 		return (void*) -1;
  137: #ifdef KQ_DISABLE
  138: 	r = TASK_ROOT(t);
  139: #endif
  140: 
  141: 	switch (TASK_TYPE(t)) {
  142: 		case taskREAD:
  143: #ifndef KQ_DISABLE
  144: #ifdef __NetBSD__
  145: 			EV_SET(&chg[0], TASK_FD(t), EVFILT_READ, EV_DELETE, 0, 0, (intptr_t) TASK_FD(t));
  146: #else
  147: 			EV_SET(&chg[0], TASK_FD(t), EVFILT_READ, EV_DELETE, 0, 0, (void*) TASK_FD(t));
  148: #endif
  149: #else
  150: 			FD_CLR(TASK_FD(t), &r->root_fds[0]);
  151: 
  152: 			/* optimize select */
  153: 			for (i = r->root_kq - 1; i > 2; i--)
  154: 				if (FD_ISSET(i, &r->root_fds[0]) || FD_ISSET(i, &r->root_fds[1]))
  155: 					break;
  156: 			if (i > 2)
  157: 				r->root_kq = i + 1;
  158: #endif
  159: 			break;
  160: 		case taskWRITE:
  161: #ifndef KQ_DISABLE
  162: #ifdef __NetBSD__
  163: 			EV_SET(&chg[0], TASK_FD(t), EVFILT_WRITE, EV_DELETE, 0, 0, (intptr_t) TASK_FD(t));
  164: #else
  165: 			EV_SET(&chg[0], TASK_FD(t), EVFILT_WRITE, EV_DELETE, 0, 0, (void*) TASK_FD(t));
  166: #endif
  167: #else
  168: 			FD_CLR(TASK_FD(t), &r->root_fds[1]);
  169: 
  170: 			/* optimize select */
  171: 			for (i = r->root_kq - 1; i > 2; i--)
  172: 				if (FD_ISSET(i, &r->root_fds[0]) || FD_ISSET(i, &r->root_fds[1]))
  173: 					break;
  174: 			if (i > 2)
  175: 				r->root_kq = i + 1;
  176: #endif
  177: 			break;
  178: 		case taskALARM:
  179: #ifndef KQ_DISABLE
  180: #ifdef __NetBSD__
  181: 			EV_SET(&chg[0], (uintptr_t) TASK_DATA(t), EVFILT_TIMER, EV_DELETE, 
  182: 					0, 0, (intptr_t) TASK_DATA(t));
  183: #else
  184: 			EV_SET(&chg[0], (uintptr_t) TASK_DATA(t), EVFILT_TIMER, EV_DELETE, 
  185: 					0, 0, (void*) TASK_DATA(t));
  186: #endif
  187: #endif
  188: 			break;
  189: 		case taskNODE:
  190: #ifndef KQ_DISABLE
  191: #ifdef __NetBSD__
  192: 			EV_SET(&chg[0], TASK_FD(t), EVFILT_VNODE, EV_DELETE, 0, 0, (intptr_t) TASK_FD(t));
  193: #else
  194: 			EV_SET(&chg[0], TASK_FD(t), EVFILT_VNODE, EV_DELETE, 0, 0, (void*) TASK_FD(t));
  195: #endif
  196: #endif
  197: 			break;
  198: 		case taskPROC:
  199: #ifndef KQ_DISABLE
  200: #ifdef __NetBSD__
  201: 			EV_SET(&chg[0], TASK_VAL(t), EVFILT_PROC, EV_DELETE, 0, 0, (intptr_t) TASK_VAL(t));
  202: #else
  203: 			EV_SET(&chg[0], TASK_VAL(t), EVFILT_PROC, EV_DELETE, 0, 0, (void*) TASK_VAL(t));
  204: #endif
  205: #endif
  206: 			break;
  207: 		case taskSIGNAL:
  208: #ifndef KQ_DISABLE
  209: #ifdef __NetBSD__
  210: 			EV_SET(&chg[0], TASK_VAL(t), EVFILT_SIGNAL, EV_DELETE, 0, 0, (intptr_t) TASK_VAL(t));
  211: #else
  212: 			EV_SET(&chg[0], TASK_VAL(t), EVFILT_SIGNAL, EV_DELETE, 0, 0, (void*) TASK_VAL(t));
  213: #endif
  214: 			/* restore signal */
  215: 			signal(TASK_VAL(t), SIG_DFL);
  216: #endif
  217: 			break;
  218: #ifdef AIO_SUPPORT
  219: 		case taskAIO:
  220: #ifndef KQ_DISABLE
  221: #ifdef __NetBSD__
  222: 			EV_SET(&chg[0], TASK_VAL(t), EVFILT_AIO, EV_DELETE, 0, 0, (intptr_t) TASK_VAL(t));
  223: #else
  224: 			EV_SET(&chg[0], TASK_VAL(t), EVFILT_AIO, EV_DELETE, 0, 0, (void*) TASK_VAL(t));
  225: #endif
  226: 			acb = (struct aiocb*) TASK_VAL(t);
  227: 			if (acb) {
  228: 				if (aio_cancel(acb->aio_fildes, acb) == AIO_CANCELED)
  229: 					aio_return(acb);
  230: 				free(acb);
  231: 				TASK_VAL(t) = 0;
  232: 			}
  233: #endif
  234: 			break;
  235: #ifdef EVFILT_LIO
  236: 		case taskLIO:
  237: #ifndef KQ_DISABLE
  238: #ifdef __NetBSD__
  239: 			EV_SET(&chg[0], TASK_VAL(t), EVFILT_LIO, EV_DELETE, 0, 0, (intptr_t) TASK_VAL(t));
  240: #else
  241: 			EV_SET(&chg[0], TASK_VAL(t), EVFILT_LIO, EV_DELETE, 0, 0, (void*) TASK_VAL(t));
  242: #endif
  243: 			acbs = (struct aiocb**) TASK_VAL(t);
  244: 			if (acbs) {
  245: 				for (i = 0; i < TASK_DATLEN(t); i++) {
  246: 					if (aio_cancel(acbs[i]->aio_fildes, acbs[i]) == AIO_CANCELED)
  247: 						aio_return(acbs[i]);
  248: 					free(acbs[i]);
  249: 				}
  250: 				free(acbs);
  251: 				TASK_VAL(t) = 0;
  252: 			}
  253: #endif
  254: 			break;
  255: #endif	/* EVFILT_LIO */
  256: #endif	/* AIO_SUPPORT */
  257: #ifdef EVFILT_USER
  258: 		case taskUSER:
  259: #ifndef KQ_DISABLE
  260: #ifdef __NetBSD__
  261: 			EV_SET(&chg[0], TASK_VAL(t), EVFILT_USER, EV_DELETE, 0, 0, (intptr_t) TASK_VAL(t));
  262: #else
  263: 			EV_SET(&chg[0], TASK_VAL(t), EVFILT_USER, EV_DELETE, 0, 0, (void*) TASK_VAL(t));
  264: #endif
  265: #endif
  266: 			break;
  267: #endif	/* EVFILT_USER */
  268: 		case taskTHREAD:
  269: #ifdef HAVE_LIBPTHREAD
  270: 			pthread_cancel((pthread_t) TASK_VAL(t));
  271: #endif
  272: 			return NULL;
  273: #if defined(HAVE_TIMER_CREATE) && defined(HAVE_TIMER_SETTIME)
  274: 		case taskRTC:
  275: 			timer_delete((timer_t) TASK_FLAG(t));
  276: 			schedCancel((sched_task_t*) TASK_RET(t));
  277: 			return NULL;
  278: #endif	/* HAVE_TIMER_CREATE */
  279: 		default:
  280: 			return NULL;
  281: 	}
  282: 
  283: #ifndef KQ_DISABLE
  284: 	kevent(TASK_ROOT(t)->root_kq, chg, 1, NULL, 0, &timeout);
  285: #endif
  286: 	return NULL;
  287: }
  288: 
  289: #ifdef HAVE_LIBPTHREAD
  290: /*
  291:  * sched_hook_thread() - Default THREAD hook
  292:  *
  293:  * @task = current task
  294:  * @arg = pthread attributes
  295:  * return: <0 errors and 0 ok
  296:  */
  297: void *
  298: sched_hook_thread(void *task, void *arg)
  299: {
  300: 	sched_task_t *t = task;
  301: 	pthread_t tid;
  302: 	sigset_t s, o;
  303: 
  304: 	if (!t || !TASK_ROOT(t))
  305: 		return (void*) -1;
  306: 
  307: 	sigfillset(&s);
  308: 	pthread_sigmask(SIG_BLOCK, &s, &o);
  309: 	if ((errno = pthread_create(&tid, (pthread_attr_t*) arg, 
  310: 				(void *(*)(void*)) _sched_threadWrapper, t))) {
  311: 		LOGERR;
  312: 		pthread_sigmask(SIG_SETMASK, &o, NULL);
  313: 		return (void*) -1;
  314: 	} else
  315: 		TASK_VAL(t) = (u_long) tid;
  316: 
  317: 	if (!TASK_ISLOCKED(t))
  318: 		TASK_LOCK(t);
  319: 
  320: 	pthread_sigmask(SIG_SETMASK, &o, NULL);
  321: 	return NULL;
  322: }
  323: #endif
  324: 
  325: /*
  326:  * sched_hook_read() - Default READ hook
  327:  *
  328:  * @task = current task
  329:  * @arg = unused
  330:  * return: <0 errors and 0 ok
  331:  */
  332: void *
  333: sched_hook_read(void *task, void *arg __unused)
  334: {
  335: 	sched_task_t *t = task;
  336: #ifndef KQ_DISABLE
  337: 	struct kevent chg[1];
  338: 	struct timespec timeout = { 0, 0 };
  339: #else
  340: 	sched_root_task_t *r = NULL;
  341: #endif
  342: 
  343: 	if (!t || !TASK_ROOT(t))
  344: 		return (void*) -1;
  345: #ifdef KQ_DISABLE
  346: 	r = TASK_ROOT(t);
  347: #endif
  348: 
  349: #ifndef KQ_DISABLE
  350: #ifdef __NetBSD__
  351: 	EV_SET(&chg[0], TASK_FD(t), EVFILT_READ, EV_ADD | EV_CLEAR, 0, 0, (intptr_t) TASK_FD(t));
  352: #else
  353: 	EV_SET(&chg[0], TASK_FD(t), EVFILT_READ, EV_ADD | EV_CLEAR, 0, 0, (void*) TASK_FD(t));
  354: #endif
  355: 	if (kevent(TASK_ROOT(t)->root_kq, chg, 1, NULL, 0, &timeout) == -1) {
  356: 		if (TASK_ROOT(t)->root_hooks.hook_exec.exception)
  357: 			TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL);
  358: 		else
  359: 			LOGERR;
  360: 		return (void*) -1;
  361: 	}
  362: #else
  363: 	FD_SET(TASK_FD(t), &r->root_fds[0]);
  364: 	if (TASK_FD(t) >= r->root_kq)
  365: 		r->root_kq = TASK_FD(t) + 1;
  366: #endif
  367: 
  368: 	return NULL;
  369: }
  370: 
  371: /*
  372:  * sched_hook_write() - Default WRITE hook
  373:  *
  374:  * @task = current task
  375:  * @arg = unused
  376:  * return: <0 errors and 0 ok
  377:  */
  378: void *
  379: sched_hook_write(void *task, void *arg __unused)
  380: {
  381: 	sched_task_t *t = task;
  382: #ifndef KQ_DISABLE
  383: 	struct kevent chg[1];
  384: 	struct timespec timeout = { 0, 0 };
  385: #else
  386: 	sched_root_task_t *r = NULL;
  387: #endif
  388: 
  389: 	if (!t || !TASK_ROOT(t))
  390: 		return (void*) -1;
  391: #ifdef KQ_DISABLE
  392: 	r = TASK_ROOT(t);
  393: #endif
  394: 
  395: #ifndef KQ_DISABLE
  396: #ifdef __NetBSD__
  397: 	EV_SET(&chg[0], TASK_FD(t), EVFILT_WRITE, EV_ADD | EV_CLEAR, 0, 0, (intptr_t) TASK_FD(t));
  398: #else
  399: 	EV_SET(&chg[0], TASK_FD(t), EVFILT_WRITE, EV_ADD | EV_CLEAR, 0, 0, (void*) TASK_FD(t));
  400: #endif
  401: 	if (kevent(TASK_ROOT(t)->root_kq, chg, 1, NULL, 0, &timeout) == -1) {
  402: 		if (TASK_ROOT(t)->root_hooks.hook_exec.exception)
  403: 			TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL);
  404: 		else
  405: 			LOGERR;
  406: 		return (void*) -1;
  407: 	}
  408: #else
  409: 	FD_SET(TASK_FD(t), &r->root_fds[1]);
  410: 	if (TASK_FD(t) >= r->root_kq)
  411: 		r->root_kq = TASK_FD(t) + 1;
  412: #endif
  413: 
  414: 	return NULL;
  415: }
  416: 
  417: /*
  418:  * sched_hook_alarm() - Default ALARM hook
  419:  *
  420:  * @task = current task
  421:  * @arg = unused
  422:  * return: <0 errors and 0 ok
  423:  */
  424: void *
  425: sched_hook_alarm(void *task, void *arg __unused)
  426: {
  427: #ifndef KQ_DISABLE
  428: 	sched_task_t *t = task;
  429: 	struct kevent chg[1];
  430: 	struct timespec timeout = { 0, 0 };
  431: 
  432: 	if (!t || !TASK_ROOT(t))
  433: 		return (void*) -1;
  434: 
  435: #ifdef __NetBSD__
  436: 	EV_SET(&chg[0], (uintptr_t) TASK_DATA(t), EVFILT_TIMER, EV_ADD | EV_CLEAR, 0, 
  437: 			t->task_val.ts.tv_sec * 1000 + t->task_val.ts.tv_nsec / 1000000, 
  438: 			(intptr_t) TASK_DATA(t));
  439: #else
  440: 	EV_SET(&chg[0], (uintptr_t) TASK_DATA(t), EVFILT_TIMER, EV_ADD | EV_CLEAR, 0, 
  441: 			t->task_val.ts.tv_sec * 1000 + t->task_val.ts.tv_nsec / 1000000, 
  442: 			(void*) TASK_DATA(t));
  443: #endif
  444: 	if (kevent(TASK_ROOT(t)->root_kq, chg, 1, NULL, 0, &timeout) == -1) {
  445: 		if (TASK_ROOT(t)->root_hooks.hook_exec.exception)
  446: 			TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL);
  447: 		else
  448: 			LOGERR;
  449: 		return (void*) -1;
  450: 	}
  451: 
  452: #endif
  453: 	return NULL;
  454: }
  455: 
  456: /*
  457:  * sched_hook_node() - Default NODE hook
  458:  *
  459:  * @task = current task
  460:  * @arg = unused
  461:  * return: <0 errors and 0 ok
  462:  */
  463: void *
  464: sched_hook_node(void *task, void *arg __unused)
  465: {
  466: #ifndef KQ_DISABLE
  467: 	sched_task_t *t = task;
  468: 	struct kevent chg[1];
  469: 	struct timespec timeout = { 0, 0 };
  470: 
  471: 	if (!t || !TASK_ROOT(t))
  472: 		return (void*) -1;
  473: 
  474: #ifdef __NetBSD__
  475: 	EV_SET(&chg[0], TASK_FD(t), EVFILT_VNODE, EV_ADD | EV_CLEAR, 
  476: 			NOTE_DELETE | NOTE_WRITE | NOTE_EXTEND | NOTE_ATTRIB | 
  477: 			NOTE_LINK | NOTE_RENAME | NOTE_REVOKE, 0, (intptr_t) TASK_FD(t));
  478: #else
  479: 	EV_SET(&chg[0], TASK_FD(t), EVFILT_VNODE, EV_ADD | EV_CLEAR, 
  480: 			NOTE_DELETE | NOTE_WRITE | NOTE_EXTEND | NOTE_ATTRIB | 
  481: 			NOTE_LINK | NOTE_RENAME | NOTE_REVOKE, 0, (void*) TASK_FD(t));
  482: #endif
  483: 	if (kevent(TASK_ROOT(t)->root_kq, chg, 1, NULL, 0, &timeout) == -1) {
  484: 		if (TASK_ROOT(t)->root_hooks.hook_exec.exception)
  485: 			TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL);
  486: 		else
  487: 			LOGERR;
  488: 		return (void*) -1;
  489: 	}
  490: 
  491: #endif
  492: 	return NULL;
  493: }
  494: 
  495: /*
  496:  * sched_hook_proc() - Default PROC hook
  497:  *
  498:  * @task = current task
  499:  * @arg = unused
  500:  * return: <0 errors and 0 ok
  501:  */
  502: void *
  503: sched_hook_proc(void *task, void *arg __unused)
  504: {
  505: #ifndef KQ_DISABLE
  506: 	sched_task_t *t = task;
  507: 	struct kevent chg[1];
  508: 	struct timespec timeout = { 0, 0 };
  509: 
  510: 	if (!t || !TASK_ROOT(t))
  511: 		return (void*) -1;
  512: 
  513: #ifdef __NetBSD__
  514: 	EV_SET(&chg[0], TASK_VAL(t), EVFILT_PROC, EV_ADD | EV_CLEAR, 
  515: 			NOTE_EXIT | NOTE_FORK | NOTE_EXEC | NOTE_TRACK, 0, (intptr_t) TASK_VAL(t));
  516: #else
  517: 	EV_SET(&chg[0], TASK_VAL(t), EVFILT_PROC, EV_ADD | EV_CLEAR, 
  518: 			NOTE_EXIT | NOTE_FORK | NOTE_EXEC | NOTE_TRACK, 0, (void*) TASK_VAL(t));
  519: #endif
  520: 	if (kevent(TASK_ROOT(t)->root_kq, chg, 1, NULL, 0, &timeout) == -1) {
  521: 		if (TASK_ROOT(t)->root_hooks.hook_exec.exception)
  522: 			TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL);
  523: 		else
  524: 			LOGERR;
  525: 		return (void*) -1;
  526: 	}
  527: 
  528: #endif
  529: 	return NULL;
  530: }
  531: 
  532: /*
  533:  * sched_hook_signal() - Default SIGNAL hook
  534:  *
  535:  * @task = current task
  536:  * @arg = unused
  537:  * return: <0 errors and 0 ok
  538:  */
  539: void *
  540: sched_hook_signal(void *task, void *arg __unused)
  541: {
  542: #ifndef KQ_DISABLE
  543: 	sched_task_t *t = task;
  544: 	struct kevent chg[1];
  545: 	struct timespec timeout = { 0, 0 };
  546: 
  547: 	if (!t || !TASK_ROOT(t))
  548: 		return (void*) -1;
  549: 
  550: 	/* ignore signal */
  551: 	signal(TASK_VAL(t), SIG_IGN);
  552: 
  553: #ifdef __NetBSD__
  554: 	EV_SET(&chg[0], TASK_VAL(t), EVFILT_SIGNAL, EV_ADD | EV_CLEAR, 0, 0, (intptr_t) TASK_VAL(t));
  555: #else
  556: 	EV_SET(&chg[0], TASK_VAL(t), EVFILT_SIGNAL, EV_ADD | EV_CLEAR, 0, 0, (void*) TASK_VAL(t));
  557: #endif
  558: 	if (kevent(TASK_ROOT(t)->root_kq, chg, 1, NULL, 0, &timeout) == -1) {
  559: 		if (TASK_ROOT(t)->root_hooks.hook_exec.exception)
  560: 			TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL);
  561: 		else
  562: 			LOGERR;
  563: 		return (void*) -1;
  564: 	}
  565: #else
  566: #if 0
  567: 	sched_task_t *t = task;
  568: 	struct sigaction sa;
  569: 
  570: 	memset(&sa, 0, sizeof sa);
  571: 	sigemptyset(&sa.sa_mask);
  572: 	sa.sa_handler = _sched_sigHandler;
  573: 	sa.sa_flags = SA_RESETHAND | SA_RESTART;
  574: 
  575: 	if (sigaction(TASK_VAL(t), &sa, NULL) == -1) {
  576: 		if (TASK_ROOT(t)->root_hooks.hook_exec.exception)
  577: 			TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL);
  578: 		else
  579: 			LOGERR;
  580: 		return (void*) -1;
  581: 	}
  582: #endif	/* 0 */
  583: #endif
  584: 	return NULL;
  585: }
  586: 
  587: /*
  588:  * sched_hook_user() - Default USER hook
  589:  *
  590:  * @task = current task
  591:  * @arg = unused
  592:  * return: <0 errors and 0 ok
  593:  */
  594: #ifdef EVFILT_USER
  595: void *
  596: sched_hook_user(void *task, void *arg __unused)
  597: {
  598: #ifndef KQ_DISABLE
  599: 	sched_task_t *t = task;
  600: 	struct kevent chg[1];
  601: 	struct timespec timeout = { 0, 0 };
  602: 
  603: 	if (!t || !TASK_ROOT(t))
  604: 		return (void*) -1;
  605: 
  606: #ifdef __NetBSD__
  607: 	EV_SET(&chg[0], TASK_VAL(t), EVFILT_USER, EV_ADD | EV_CLEAR, TASK_DATLEN(t), 
  608: 			0, (intptr_t) TASK_VAL(t));
  609: #else
  610: 	EV_SET(&chg[0], TASK_VAL(t), EVFILT_USER, EV_ADD | EV_CLEAR, TASK_DATLEN(t), 
  611: 			0, (void*) TASK_VAL(t));
  612: #endif
  613: 	if (kevent(TASK_ROOT(t)->root_kq, chg, 1, NULL, 0, &timeout) == -1) {
  614: 		if (TASK_ROOT(t)->root_hooks.hook_exec.exception)
  615: 			TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL);
  616: 		else
  617: 			LOGERR;
  618: 		return (void*) -1;
  619: 	}
  620: 
  621: #endif
  622: 	return NULL;
  623: }
  624: #endif
  625: 
  626: /*
  627:  * sched_hook_fetch() - Default FETCH hook
  628:  *
  629:  * @root = root task
  630:  * @arg = unused
  631:  * return: NULL error or !=NULL fetched task
  632:  */
  633: void *
  634: sched_hook_fetch(void *root, void *arg __unused)
  635: {
  636: 	sched_root_task_t *r = root;
  637: 	sched_task_t *task, *tmp;
  638: 	struct timespec now, m, mtmp;
  639: #ifndef KQ_DISABLE
  640: 	struct kevent evt[1], res[KQ_EVENTS];
  641: 	struct timespec *timeout, m, mtmp;
  642: #else
  643: 	struct timeval *timeout, tv;
  644: #endif
  645: 	register int i, flg;
  646: 	int en;
  647: #ifdef AIO_SUPPORT
  648: 	int len, fd;
  649: 	struct aiocb *acb;
  650: #ifdef EVFILT_LIO
  651: 	int l;
  652: 	register int j;
  653: 	off_t off;
  654: 	struct aiocb **acbs;
  655: 	struct iovec *iv;
  656: #endif	/* EVFILT_LIO */
  657: #endif	/* AIO_SUPPORT */
  658: 
  659: 	if (!r)
  660: 		return NULL;
  661: 
  662: 	/* get new task by queue priority */
  663: 	while ((task = TAILQ_FIRST(&r->root_event))) {
  664: #ifdef HAVE_LIBPTHREAD
  665: 		pthread_mutex_lock(&r->root_mtx[taskEVENT]);
  666: #endif
  667: 		TAILQ_REMOVE(&r->root_event, task, task_node);
  668: #ifdef HAVE_LIBPTHREAD
  669: 		pthread_mutex_unlock(&r->root_mtx[taskEVENT]);
  670: #endif
  671: 		task->task_type = taskUNUSE;
  672: #ifdef HAVE_LIBPTHREAD
  673: 		pthread_mutex_lock(&r->root_mtx[taskUNUSE]);
  674: #endif
  675: 		TAILQ_INSERT_TAIL(&r->root_unuse, task, task_node);
  676: #ifdef HAVE_LIBPTHREAD
  677: 		pthread_mutex_unlock(&r->root_mtx[taskUNUSE]);
  678: #endif
  679: 		return task;
  680: 	}
  681: 	while ((task = TAILQ_FIRST(&r->root_ready))) {
  682: #ifdef HAVE_LIBPTHREAD
  683: 		pthread_mutex_lock(&r->root_mtx[taskREADY]);
  684: #endif
  685: 		TAILQ_REMOVE(&r->root_ready, task, task_node);
  686: #ifdef HAVE_LIBPTHREAD
  687: 		pthread_mutex_unlock(&r->root_mtx[taskREADY]);
  688: #endif
  689: 		task->task_type = taskUNUSE;
  690: #ifdef HAVE_LIBPTHREAD
  691: 		pthread_mutex_lock(&r->root_mtx[taskUNUSE]);
  692: #endif
  693: 		TAILQ_INSERT_TAIL(&r->root_unuse, task, task_node);
  694: #ifdef HAVE_LIBPTHREAD
  695: 		pthread_mutex_unlock(&r->root_mtx[taskUNUSE]);
  696: #endif
  697: 		return task;
  698: 	}
  699: 
  700: #ifdef TIMER_WITHOUT_SORT
  701: 	clock_gettime(CLOCK_MONOTONIC, &now);
  702: 
  703: 	sched_timespecclear(&r->root_wait);
  704: 	TAILQ_FOREACH(task, &r->root_timer, task_node) {
  705: 		if (!sched_timespecisset(&r->root_wait))
  706: 			r->root_wait = TASK_TS(task);
  707: 		else if (sched_timespeccmp(&TASK_TS(task), &r->root_wait, -) < 0)
  708: 			r->root_wait = TASK_TS(task);
  709: 	}
  710: 
  711: 	if (TAILQ_FIRST(&r->root_timer)) {
  712: 		m = r->root_wait;
  713: 		sched_timespecsub(&m, &now, &mtmp);
  714: 		r->root_wait = mtmp;
  715: 	} else {
  716: 		/* set wait INFTIM */
  717: 		sched_timespecinf(&r->root_wait);
  718: 	}
  719: #else	/* ! TIMER_WITHOUT_SORT */
  720: 	if (!TAILQ_FIRST(&r->root_task) && (task = TAILQ_FIRST(&r->root_timer))) {
  721: 		clock_gettime(CLOCK_MONOTONIC, &now);
  722: 
  723: 		m = TASK_TS(task);
  724: 		sched_timespecsub(&m, &now, &mtmp);
  725: 		r->root_wait = mtmp;
  726: 	} else {
  727: 		/* set wait INFTIM */
  728: 		sched_timespecinf(&r->root_wait);
  729: 	}
  730: #endif	/* TIMER_WITHOUT_SORT */
  731: 	/* if present member of task, set NOWAIT */
  732: 	if (TAILQ_FIRST(&r->root_task))
  733: 		sched_timespecclear(&r->root_wait);
  734: 
  735: 	if (r->root_wait.tv_sec != -1 && r->root_wait.tv_nsec != -1) {
  736: #ifndef KQ_DISABLE
  737: 		timeout = &r->root_wait;
  738: #else
  739: 		sched_timespec2val(&r->root_wait, &tv);
  740: 		timeout = &tv;
  741: #endif	/* KQ_DISABLE */
  742: 	} else if (sched_timespecisinf(&r->root_poll))
  743: 		timeout = NULL;
  744: 	else {
  745: #ifndef KQ_DISABLE
  746: 		timeout = &r->root_poll;
  747: #else
  748: 		sched_timespec2val(&r->root_poll, &tv);
  749: 		timeout = &tv;
  750: #endif	/* KQ_DISABLE */
  751: 	}
  752: 
  753: #ifndef KQ_DISABLE
  754: 	if ((en = kevent(r->root_kq, NULL, 0, res, KQ_EVENTS, timeout)) == -1) {
  755: #else
  756: 	if ((en = select(r->root_kq, &r->root_fds[0], &r->root_fds[1], 
  757: 					&r->root_fds[0], timeout)) == -1) {
  758: #endif	/* KQ_DISABLE */
  759: 		if (r->root_hooks.hook_exec.exception) {
  760: 			if (r->root_hooks.hook_exec.exception(r, NULL))
  761: 				return NULL;
  762: 		} else if (errno != EINTR)
  763: 			LOGERR;
  764: 		goto skip_event;
  765: 	}
  766: 
  767: 	/* kevent dispatcher */
  768: 	now.tv_sec = now.tv_nsec = 0;
  769: 	/* Go and catch the cat into pipes ... */
  770: #ifndef KQ_DISABLE
  771: 	for (i = 0; i < en; i++) {
  772: 		memcpy(evt, &res[i], sizeof evt);
  773: 		evt->flags = EV_DELETE;
  774: 		/* Put read/write task to ready queue */
  775: 		switch (res[i].filter) {
  776: 			case EVFILT_READ:
  777: 				flg = 0;
  778: 				TAILQ_FOREACH_SAFE(task, &r->root_read, task_node, tmp) {
  779: 					if (TASK_FD(task) != ((intptr_t) res[i].udata))
  780: 						continue;
  781: 					else {
  782: 						flg++;
  783: 						TASK_RET(task) = res[i].data;
  784: 						TASK_FLAG(task) = (u_long) res[i].fflags;
  785: 					}
  786: 					/* remove read handle */
  787: #ifdef HAVE_LIBPTHREAD
  788: 					pthread_mutex_lock(&r->root_mtx[taskREAD]);
  789: #endif
  790: 					TAILQ_REMOVE(&r->root_read, task, task_node);
  791: #ifdef HAVE_LIBPTHREAD
  792: 					pthread_mutex_unlock(&r->root_mtx[taskREAD]);
  793: #endif
  794: 					if (r->root_hooks.hook_exec.exception && res[i].flags & EV_EOF) {
  795:  						if (r->root_hooks.hook_exec.exception(r, (void*) EV_EOF)) {
  796: 							task->task_type = taskUNUSE;
  797: #ifdef HAVE_LIBPTHREAD
  798: 							pthread_mutex_lock(&r->root_mtx[taskUNUSE]);
  799: #endif
  800: 							TAILQ_INSERT_TAIL(&r->root_unuse, task, task_node);
  801: #ifdef HAVE_LIBPTHREAD
  802: 							pthread_mutex_unlock(&r->root_mtx[taskUNUSE]);
  803: #endif
  804: 						} else {
  805: 							task->task_type = taskREADY;
  806: #ifdef HAVE_LIBPTHREAD
  807: 							pthread_mutex_lock(&r->root_mtx[taskREADY]);
  808: #endif
  809: 							TAILQ_INSERT_TAIL(&r->root_ready, task, task_node);
  810: #ifdef HAVE_LIBPTHREAD
  811: 							pthread_mutex_unlock(&r->root_mtx[taskREADY]);
  812: #endif
  813: 						}
  814: 					} else {
  815: 						task->task_type = taskREADY;
  816: #ifdef HAVE_LIBPTHREAD
  817: 						pthread_mutex_lock(&r->root_mtx[taskREADY]);
  818: #endif
  819: 						TAILQ_INSERT_TAIL(&r->root_ready, task, task_node);
  820: #ifdef HAVE_LIBPTHREAD
  821: 						pthread_mutex_unlock(&r->root_mtx[taskREADY]);
  822: #endif
  823: 					}
  824: 				}
  825: 				/* if match at least 2, don't remove resouce of event */
  826: 				if (flg > 1)
  827: 					evt->flags ^= evt->flags;
  828: 				break;
  829: 			case EVFILT_WRITE:
  830: 				flg = 0;
  831: 				TAILQ_FOREACH_SAFE(task, &r->root_write, task_node, tmp) {
  832: 					if (TASK_FD(task) != ((intptr_t) res[i].udata))
  833: 						continue;
  834: 					else {
  835: 						flg++;
  836: 						TASK_RET(task) = res[i].data;
  837: 						TASK_FLAG(task) = (u_long) res[i].fflags;
  838: 					}
  839: 					/* remove write handle */
  840: #ifdef HAVE_LIBPTHREAD
  841: 					pthread_mutex_lock(&r->root_mtx[taskWRITE]);
  842: #endif
  843: 					TAILQ_REMOVE(&r->root_write, task, task_node);
  844: #ifdef HAVE_LIBPTHREAD
  845: 					pthread_mutex_unlock(&r->root_mtx[taskWRITE]);
  846: #endif
  847: 					if (r->root_hooks.hook_exec.exception && res[i].flags & EV_EOF) {
  848:  						if (r->root_hooks.hook_exec.exception(r, (void*) EV_EOF)) {
  849: 							task->task_type = taskUNUSE;
  850: #ifdef HAVE_LIBPTHREAD
  851: 							pthread_mutex_lock(&r->root_mtx[taskUNUSE]);
  852: #endif
  853: 							TAILQ_INSERT_TAIL(&r->root_unuse, task, task_node);
  854: #ifdef HAVE_LIBPTHREAD
  855: 							pthread_mutex_unlock(&r->root_mtx[taskUNUSE]);
  856: #endif
  857: 						} else {
  858: 							task->task_type = taskREADY;
  859: #ifdef HAVE_LIBPTHREAD
  860: 							pthread_mutex_lock(&r->root_mtx[taskREADY]);
  861: #endif
  862: 							TAILQ_INSERT_TAIL(&r->root_ready, task, task_node);
  863: #ifdef HAVE_LIBPTHREAD
  864: 							pthread_mutex_unlock(&r->root_mtx[taskREADY]);
  865: #endif
  866: 						}
  867: 					} else {
  868: 						task->task_type = taskREADY;
  869: #ifdef HAVE_LIBPTHREAD
  870: 						pthread_mutex_lock(&r->root_mtx[taskREADY]);
  871: #endif
  872: 						TAILQ_INSERT_TAIL(&r->root_ready, task, task_node);
  873: #ifdef HAVE_LIBPTHREAD
  874: 						pthread_mutex_unlock(&r->root_mtx[taskREADY]);
  875: #endif
  876: 					}
  877: 				}
  878: 				/* if match at least 2, don't remove resouce of event */
  879: 				if (flg > 1)
  880: 					evt->flags ^= evt->flags;
  881: 				break;
  882: 			case EVFILT_TIMER:
  883: 				flg = 0;
  884: 				TAILQ_FOREACH_SAFE(task, &r->root_alarm, task_node, tmp) {
  885: 					if ((uintptr_t) TASK_DATA(task) != ((uintptr_t) res[i].udata))
  886: 						continue;
  887: 					else {
  888: 						flg++;
  889: 						TASK_RET(task) = res[i].data;
  890: 						TASK_FLAG(task) = (u_long) res[i].fflags;
  891: 					}
  892: 					/* remove alarm handle */
  893: #ifdef HAVE_LIBPTHREAD
  894: 					pthread_mutex_lock(&r->root_mtx[taskALARM]);
  895: #endif
  896: 					TAILQ_REMOVE(&r->root_alarm, task, task_node);
  897: #ifdef HAVE_LIBPTHREAD
  898: 					pthread_mutex_unlock(&r->root_mtx[taskALARM]);
  899: #endif
  900: 					task->task_type = taskREADY;
  901: #ifdef HAVE_LIBPTHREAD
  902: 					pthread_mutex_lock(&r->root_mtx[taskREADY]);
  903: #endif
  904: 					TAILQ_INSERT_TAIL(&r->root_ready, task, task_node);
  905: #ifdef HAVE_LIBPTHREAD
  906: 					pthread_mutex_unlock(&r->root_mtx[taskREADY]);
  907: #endif
  908: 				}
  909: 				/* if match at least 2, don't remove resouce of event */
  910: 				if (flg > 1)
  911: 					evt->flags ^= evt->flags;
  912: 				break;
  913: 			case EVFILT_VNODE:
  914: 				flg = 0;
  915: 				TAILQ_FOREACH_SAFE(task, &r->root_node, task_node, tmp) {
  916: 					if (TASK_FD(task) != ((intptr_t) res[i].udata))
  917: 						continue;
  918: 					else {
  919: 						flg++;
  920: 						TASK_RET(task) = res[i].data;
  921: 						TASK_FLAG(task) = (u_long) res[i].fflags;
  922: 					}
  923: 					/* remove node handle */
  924: #ifdef HAVE_LIBPTHREAD
  925: 					pthread_mutex_lock(&r->root_mtx[taskNODE]);
  926: #endif
  927: 					TAILQ_REMOVE(&r->root_node, task, task_node);
  928: #ifdef HAVE_LIBPTHREAD
  929: 					pthread_mutex_unlock(&r->root_mtx[taskNODE]);
  930: #endif
  931: 					task->task_type = taskREADY;
  932: #ifdef HAVE_LIBPTHREAD
  933: 					pthread_mutex_lock(&r->root_mtx[taskREADY]);
  934: #endif
  935: 					TAILQ_INSERT_TAIL(&r->root_ready, task, task_node);
  936: #ifdef HAVE_LIBPTHREAD
  937: 					pthread_mutex_unlock(&r->root_mtx[taskREADY]);
  938: #endif
  939: 				}
  940: 				/* if match at least 2, don't remove resouce of event */
  941: 				if (flg > 1)
  942: 					evt->flags ^= evt->flags;
  943: 				break;
  944: 			case EVFILT_PROC:
  945: 				flg = 0;
  946: 				TAILQ_FOREACH_SAFE(task, &r->root_proc, task_node, tmp) {
  947: 					if (TASK_VAL(task) != ((uintptr_t) res[i].udata))
  948: 						continue;
  949: 					else {
  950: 						flg++;
  951: 						TASK_RET(task) = res[i].data;
  952: 						TASK_FLAG(task) = (u_long) res[i].fflags;
  953: 					}
  954: 					/* remove proc handle */
  955: #ifdef HAVE_LIBPTHREAD
  956: 					pthread_mutex_lock(&r->root_mtx[taskPROC]);
  957: #endif
  958: 					TAILQ_REMOVE(&r->root_proc, task, task_node);
  959: #ifdef HAVE_LIBPTHREAD
  960: 					pthread_mutex_unlock(&r->root_mtx[taskPROC]);
  961: #endif
  962: 					task->task_type = taskREADY;
  963: #ifdef HAVE_LIBPTHREAD
  964: 					pthread_mutex_lock(&r->root_mtx[taskREADY]);
  965: #endif
  966: 					TAILQ_INSERT_TAIL(&r->root_ready, task, task_node);
  967: #ifdef HAVE_LIBPTHREAD
  968: 					pthread_mutex_unlock(&r->root_mtx[taskREADY]);
  969: #endif
  970: 				}
  971: 				/* if match at least 2, don't remove resouce of event */
  972: 				if (flg > 1)
  973: 					evt->flags ^= evt->flags;
  974: 				break;
  975: 			case EVFILT_SIGNAL:
  976: 				flg = 0;
  977: 				TAILQ_FOREACH_SAFE(task, &r->root_signal, task_node, tmp) {
  978: 					if (TASK_VAL(task) != ((uintptr_t) res[i].udata))
  979: 						continue;
  980: 					else {
  981: 						flg++;
  982: 						TASK_RET(task) = res[i].data;
  983: 						TASK_FLAG(task) = (u_long) res[i].fflags;
  984: 					}
  985: 					/* remove signal handle */
  986: #ifdef HAVE_LIBPTHREAD
  987: 					pthread_mutex_lock(&r->root_mtx[taskSIGNAL]);
  988: #endif
  989: 					TAILQ_REMOVE(&r->root_signal, task, task_node);
  990: #ifdef HAVE_LIBPTHREAD
  991: 					pthread_mutex_unlock(&r->root_mtx[taskSIGNAL]);
  992: #endif
  993: 					task->task_type = taskREADY;
  994: #ifdef HAVE_LIBPTHREAD
  995: 					pthread_mutex_lock(&r->root_mtx[taskREADY]);
  996: #endif
  997: 					TAILQ_INSERT_TAIL(&r->root_ready, task, task_node);
  998: #ifdef HAVE_LIBPTHREAD
  999: 					pthread_mutex_unlock(&r->root_mtx[taskREADY]);
 1000: #endif
 1001: 				}
 1002: 				/* if match at least 2, don't remove resouce of event */
 1003: 				if (flg > 1)
 1004: 					evt->flags ^= evt->flags;
 1005: 				break;
 1006: #ifdef AIO_SUPPORT
 1007: 			case EVFILT_AIO:
 1008: 				flg = 0;
 1009: 				TAILQ_FOREACH_SAFE(task, &r->root_aio, task_node, tmp) {
 1010: 					acb = (struct aiocb*) TASK_VAL(task);
 1011: 					if (acb != ((struct aiocb*) res[i].udata))
 1012: 						continue;
 1013: 					else {
 1014: 						flg++;
 1015: 						TASK_RET(task) = res[i].data;
 1016: 						TASK_FLAG(task) = (u_long) res[i].fflags;
 1017: 					}
 1018: 					/* remove user handle */
 1019: #ifdef HAVE_LIBPTHREAD
 1020: 					pthread_mutex_lock(&r->root_mtx[taskAIO]);
 1021: #endif
 1022: 					TAILQ_REMOVE(&r->root_aio, task, task_node);
 1023: #ifdef HAVE_LIBPTHREAD
 1024: 					pthread_mutex_unlock(&r->root_mtx[taskAIO]);
 1025: #endif
 1026: 					task->task_type = taskREADY;
 1027: #ifdef HAVE_LIBPTHREAD
 1028: 					pthread_mutex_lock(&r->root_mtx[taskREADY]);
 1029: #endif
 1030: 					TAILQ_INSERT_TAIL(&r->root_ready, task, task_node);
 1031: #ifdef HAVE_LIBPTHREAD
 1032: 					pthread_mutex_unlock(&r->root_mtx[taskREADY]);
 1033: #endif
 1034: 					fd = acb->aio_fildes;
 1035: 					if ((len = aio_return(acb)) != -1) {
 1036: 						if (lseek(fd, acb->aio_offset + len, SEEK_CUR) == -1)
 1037: 							LOGERR;
 1038: 					} else
 1039: 						LOGERR;
 1040: 					free(acb);
 1041: 					TASK_DATLEN(task) = (u_long) len;
 1042: 					TASK_FD(task) = fd;
 1043: 				}
 1044: 				/* if match at least 2, don't remove resouce of event */
 1045: 				if (flg > 1)
 1046: 					evt->flags ^= evt->flags;
 1047: 				break;
 1048: #ifdef EVFILT_LIO
 1049: 			case EVFILT_LIO:
 1050: 				flg = 0;
 1051: 				TAILQ_FOREACH_SAFE(task, &r->root_lio, task_node, tmp) {
 1052: 					acbs = (struct aiocb**) TASK_VAL(task);
 1053: 					if (acbs != ((struct aiocb**) res[i].udata))
 1054: 						continue;
 1055: 					else {
 1056: 						flg++;
 1057: 						TASK_RET(task) = res[i].data;
 1058: 						TASK_FLAG(task) = (u_long) res[i].fflags;
 1059: 					}
 1060: 					/* remove user handle */
 1061: #ifdef HAVE_LIBPTHREAD
 1062: 					pthread_mutex_lock(&r->root_mtx[taskLIO]);
 1063: #endif
 1064: 					TAILQ_REMOVE(&r->root_lio, task, task_node);
 1065: #ifdef HAVE_LIBPTHREAD
 1066: 					pthread_mutex_unlock(&r->root_mtx[taskLIO]);
 1067: #endif
 1068: 					task->task_type = taskREADY;
 1069: #ifdef HAVE_LIBPTHREAD
 1070: 					pthread_mutex_lock(&r->root_mtx[taskREADY]);
 1071: #endif
 1072: 					TAILQ_INSERT_TAIL(&r->root_ready, task, task_node);
 1073: #ifdef HAVE_LIBPTHREAD
 1074: 					pthread_mutex_unlock(&r->root_mtx[taskREADY]);
 1075: #endif
 1076: 					iv = (struct iovec*) TASK_DATA(task);
 1077: 					fd = acbs[0]->aio_fildes;
 1078: 					off = acbs[0]->aio_offset;
 1079: 					for (j = len = 0; i < TASK_DATLEN(task); len += l, i++) {
 1080: 						if ((iv[i].iov_len = aio_return(acbs[i])) == -1)
 1081: 							l = 0;
 1082: 						else
 1083: 							l = iv[i].iov_len;
 1084: 						free(acbs[i]);
 1085: 					}
 1086: 					free(acbs);
 1087: 					TASK_DATLEN(task) = (u_long) len;
 1088: 					TASK_FD(task) = fd;
 1089: 
 1090: 					if (lseek(fd, off + len, SEEK_CUR) == -1)
 1091: 						LOGERR;
 1092: 				}
 1093: 				/* if match at least 2, don't remove resouce of event */
 1094: 				if (flg > 1)
 1095: 					evt->flags ^= evt->flags;
 1096: 				break;
 1097: #endif	/* EVFILT_LIO */
 1098: #endif	/* AIO_SUPPORT */
 1099: #ifdef EVFILT_USER
 1100: 			case EVFILT_USER:
 1101: 				flg = 0;
 1102: 				TAILQ_FOREACH_SAFE(task, &r->root_user, task_node, tmp) {
 1103: 					if (TASK_VAL(task) != ((uintptr_t) res[i].udata))
 1104: 						continue;
 1105: 					else {
 1106: 						flg++;
 1107: 						TASK_RET(task) = res[i].data;
 1108: 						TASK_FLAG(task) = (u_long) res[i].fflags;
 1109: 					}
 1110: 					/* remove user handle */
 1111: #ifdef HAVE_LIBPTHREAD
 1112: 					pthread_mutex_lock(&r->root_mtx[taskUSER]);
 1113: #endif
 1114: 					TAILQ_REMOVE(&r->root_user, task, task_node);
 1115: #ifdef HAVE_LIBPTHREAD
 1116: 					pthread_mutex_unlock(&r->root_mtx[taskUSER]);
 1117: #endif
 1118: 					task->task_type = taskREADY;
 1119: #ifdef HAVE_LIBPTHREAD
 1120: 					pthread_mutex_lock(&r->root_mtx[taskREADY]);
 1121: #endif
 1122: 					TAILQ_INSERT_TAIL(&r->root_ready, task, task_node);
 1123: #ifdef HAVE_LIBPTHREAD
 1124: 					pthread_mutex_unlock(&r->root_mtx[taskREADY]);
 1125: #endif
 1126: 				}
 1127: 				/* if match at least 2, don't remove resouce of event */
 1128: 				if (flg > 1)
 1129: 					evt->flags ^= evt->flags;
 1130: 				break;
 1131: #endif	/* EVFILT_USER */
 1132: 		}
 1133: 		if (kevent(r->root_kq, evt, 1, NULL, 0, &now) == -1) {
 1134: 			if (r->root_hooks.hook_exec.exception) {
 1135: 				if (r->root_hooks.hook_exec.exception(r, NULL))
 1136: 					return NULL;
 1137: 			} else
 1138: 				LOGERR;
 1139: 		}
 1140: 	}
 1141: #else	/* end of kevent dispatcher */
 1142: 	for (i = 0; i < r->root_kq; i++) {
 1143: 		if (FD_ISSET(i, &r->root_fds[0])) {
 1144: 			flg = 0;
 1145: 			TAILQ_FOREACH_SAFE(task, &r->root_read, task_node, tmp) {
 1146: 				if (TASK_FD(task) != i)
 1147: 					continue;
 1148: 				else {
 1149: 					flg++;
 1150: 					TASK_RET(task) ^= TASK_RET(task);
 1151: 					TASK_FLAG(task) ^= TASK_FLAG(task);
 1152: 				}
 1153: 				/* remove read handle */
 1154: #ifdef HAVE_LIBPTHREAD
 1155: 				pthread_mutex_lock(&r->root_mtx[taskREAD]);
 1156: #endif
 1157: 				TAILQ_REMOVE(&r->root_read, task, task_node);
 1158: #ifdef HAVE_LIBPTHREAD
 1159: 				pthread_mutex_unlock(&r->root_mtx[taskREAD]);
 1160: #endif
 1161: 				if (r->root_hooks.hook_exec.exception) {
 1162:  					if (r->root_hooks.hook_exec.exception(r, NULL)) {
 1163: 						task->task_type = taskUNUSE;
 1164: #ifdef HAVE_LIBPTHREAD
 1165: 						pthread_mutex_lock(&r->root_mtx[taskUNUSE]);
 1166: #endif
 1167: 						TAILQ_INSERT_TAIL(&r->root_unuse, task, task_node);
 1168: #ifdef HAVE_LIBPTHREAD
 1169: 						pthread_mutex_unlock(&r->root_mtx[taskUNUSE]);
 1170: #endif
 1171: 					} else {
 1172: 						task->task_type = taskREADY;
 1173: #ifdef HAVE_LIBPTHREAD
 1174: 						pthread_mutex_lock(&r->root_mtx[taskREADY]);
 1175: #endif
 1176: 						TAILQ_INSERT_TAIL(&r->root_ready, task, task_node);
 1177: #ifdef HAVE_LIBPTHREAD
 1178: 						pthread_mutex_unlock(&r->root_mtx[taskREADY]);
 1179: #endif
 1180: 					}
 1181: 				} else {
 1182: 					task->task_type = taskREADY;
 1183: #ifdef HAVE_LIBPTHREAD
 1184: 					pthread_mutex_lock(&r->root_mtx[taskREADY]);
 1185: #endif
 1186: 					TAILQ_INSERT_TAIL(&r->root_ready, task, task_node);
 1187: #ifdef HAVE_LIBPTHREAD
 1188: 					pthread_mutex_unlock(&r->root_mtx[taskREADY]);
 1189: #endif
 1190: 				}
 1191: 			}
 1192: 			/* if match equal to 1, remove resouce */
 1193: 			if (flg == 1)
 1194: 				FD_CLR(i, &r->root_fds[0]);
 1195: 		}
 1196: 
 1197: 		if (FD_ISSET(i, &r->root_fds[1])) {
 1198: 			flg = 0;
 1199: 			TAILQ_FOREACH_SAFE(task, &r->root_write, task_node, tmp) {
 1200: 				if (TASK_FD(task) != i)
 1201: 					continue;
 1202: 				else {
 1203: 					flg++;
 1204: 					TASK_RET(task) ^= TASK_RET(task);
 1205: 					TASK_FLAG(task) ^= TASK_FLAG(task);
 1206: 				}
 1207: 				/* remove write handle */
 1208: #ifdef HAVE_LIBPTHREAD
 1209: 				pthread_mutex_lock(&r->root_mtx[taskWRITE]);
 1210: #endif
 1211: 				TAILQ_REMOVE(&r->root_write, task, task_node);
 1212: #ifdef HAVE_LIBPTHREAD
 1213: 				pthread_mutex_unlock(&r->root_mtx[taskWRITE]);
 1214: #endif
 1215: 				if (r->root_hooks.hook_exec.exception) {
 1216:  					if (r->root_hooks.hook_exec.exception(r, NULL)) {
 1217: 						task->task_type = taskUNUSE;
 1218: #ifdef HAVE_LIBPTHREAD
 1219: 						pthread_mutex_lock(&r->root_mtx[taskUNUSE]);
 1220: #endif
 1221: 						TAILQ_INSERT_TAIL(&r->root_unuse, task, task_node);
 1222: #ifdef HAVE_LIBPTHREAD
 1223: 						pthread_mutex_unlock(&r->root_mtx[taskUNUSE]);
 1224: #endif
 1225: 					} else {
 1226: 						task->task_type = taskREADY;
 1227: #ifdef HAVE_LIBPTHREAD
 1228: 						pthread_mutex_lock(&r->root_mtx[taskREADY]);
 1229: #endif
 1230: 						TAILQ_INSERT_TAIL(&r->root_ready, task, task_node);
 1231: #ifdef HAVE_LIBPTHREAD
 1232: 						pthread_mutex_unlock(&r->root_mtx[taskREADY]);
 1233: #endif
 1234: 					}
 1235: 				} else {
 1236: 					task->task_type = taskREADY;
 1237: #ifdef HAVE_LIBPTHREAD
 1238: 					pthread_mutex_lock(&r->root_mtx[taskREADY]);
 1239: #endif
 1240: 					TAILQ_INSERT_TAIL(&r->root_ready, task, task_node);
 1241: #ifdef HAVE_LIBPTHREAD
 1242: 					pthread_mutex_unlock(&r->root_mtx[taskREADY]);
 1243: #endif
 1244: 				}
 1245: 			}
 1246: 			/* if match equal to 1, remove resouce */
 1247: 			if (flg == 1)
 1248: 				FD_CLR(i, &r->root_fds[1]);
 1249: 		}
 1250: 	}
 1251: 
 1252: 	/* optimize select */
 1253: 	for (i = r->root_kq - 1; i > 2; i--)
 1254: 		if (FD_ISSET(i, &r->root_fds[0]) || FD_ISSET(i, &r->root_fds[1]))
 1255: 			break;
 1256: 	if (i > 2)
 1257: 		r->root_kq = i + 1;
 1258: #endif	/* KQ_DISABLE */
 1259: 
 1260: skip_event:
 1261: 	/* timer update & put in ready queue */
 1262: 	clock_gettime(CLOCK_MONOTONIC, &now);
 1263: 
 1264: 	TAILQ_FOREACH_SAFE(task, &r->root_timer, task_node, tmp)
 1265: 		if (sched_timespeccmp(&now, &TASK_TS(task), -) >= 0) {
 1266: #ifdef HAVE_LIBPTHREAD
 1267: 			pthread_mutex_lock(&r->root_mtx[taskTIMER]);
 1268: #endif
 1269: 			TAILQ_REMOVE(&r->root_timer, task, task_node);
 1270: #ifdef HAVE_LIBPTHREAD
 1271: 			pthread_mutex_unlock(&r->root_mtx[taskTIMER]);
 1272: #endif
 1273: 			task->task_type = taskREADY;
 1274: #ifdef HAVE_LIBPTHREAD
 1275: 			pthread_mutex_lock(&r->root_mtx[taskREADY]);
 1276: #endif
 1277: 			TAILQ_INSERT_TAIL(&r->root_ready, task, task_node);
 1278: #ifdef HAVE_LIBPTHREAD
 1279: 			pthread_mutex_unlock(&r->root_mtx[taskREADY]);
 1280: #endif
 1281: 		}
 1282: 
 1283: 	/* put regular task priority task to ready queue, 
 1284: 		if there is no ready task or reach max missing hit for regular task */
 1285: 	if ((task = TAILQ_FIRST(&r->root_task))) {
 1286: 		if (!TAILQ_FIRST(&r->root_ready) || r->root_miss >= TASK_VAL(task)) {
 1287: 			r->root_miss ^= r->root_miss;
 1288: 
 1289: #ifdef HAVE_LIBPTHREAD
 1290: 			pthread_mutex_lock(&r->root_mtx[taskTASK]);
 1291: #endif
 1292: 			TAILQ_REMOVE(&r->root_task, task, task_node);
 1293: #ifdef HAVE_LIBPTHREAD
 1294: 			pthread_mutex_unlock(&r->root_mtx[taskTASK]);
 1295: #endif
 1296: 			task->task_type = taskREADY;
 1297: #ifdef HAVE_LIBPTHREAD
 1298: 			pthread_mutex_lock(&r->root_mtx[taskREADY]);
 1299: #endif
 1300: 			TAILQ_INSERT_TAIL(&r->root_ready, task, task_node);
 1301: #ifdef HAVE_LIBPTHREAD
 1302: 			pthread_mutex_unlock(&r->root_mtx[taskREADY]);
 1303: #endif
 1304: 		} else
 1305: 			r->root_miss++;
 1306: 	} else
 1307: 		r->root_miss ^= r->root_miss;
 1308: 
 1309: 	/* OK, lets get ready task !!! */
 1310: 	task = TAILQ_FIRST(&r->root_ready);
 1311: 	if (!(task))
 1312: 		return NULL;
 1313: 
 1314: #ifdef HAVE_LIBPTHREAD
 1315: 	pthread_mutex_lock(&r->root_mtx[taskREADY]);
 1316: #endif
 1317: 	TAILQ_REMOVE(&r->root_ready, task, task_node);
 1318: #ifdef HAVE_LIBPTHREAD
 1319: 	pthread_mutex_unlock(&r->root_mtx[taskREADY]);
 1320: #endif
 1321: 	task->task_type = taskUNUSE;
 1322: #ifdef HAVE_LIBPTHREAD
 1323: 	pthread_mutex_lock(&r->root_mtx[taskUNUSE]);
 1324: #endif
 1325: 	TAILQ_INSERT_TAIL(&r->root_unuse, task, task_node);
 1326: #ifdef HAVE_LIBPTHREAD
 1327: 	pthread_mutex_unlock(&r->root_mtx[taskUNUSE]);
 1328: #endif
 1329: 	return task;
 1330: }
 1331: 
 1332: /*
 1333:  * sched_hook_exception() - Default EXCEPTION hook
 1334:  *
 1335:  * @root = root task
 1336:  * @arg = custom handling: if arg == EV_EOF or other value; default: arg == NULL log errno
 1337:  * return: <0 errors and 0 ok
 1338:  */
 1339: void *
 1340: sched_hook_exception(void *root, void *arg)
 1341: {
 1342: 	sched_root_task_t *r = root;
 1343: 
 1344: 	if (!r)
 1345: 		return NULL;
 1346: 
 1347: 	/* custom exception handling ... */
 1348: 	if (arg) {
 1349: 		if (arg == (void*) EV_EOF)
 1350: 			return NULL;
 1351: 		return (void*) -1;	/* raise scheduler error!!! */
 1352: 	}
 1353: 
 1354: 	/* if error hook exists */
 1355: 	if (r->root_hooks.hook_root.error)
 1356: 		return (r->root_hooks.hook_root.error(root, (void*) ((intptr_t) errno)));
 1357: 
 1358: 	/* default case! */
 1359: 	LOGERR;
 1360: 	return NULL;
 1361: }
 1362: 
 1363: /*
 1364:  * sched_hook_condition() - Default CONDITION hook
 1365:  *
 1366:  * @root = root task
 1367:  * @arg = killState from schedRun()
 1368:  * return: NULL kill scheduler loop or !=NULL ok
 1369:  */
 1370: void *
 1371: sched_hook_condition(void *root, void *arg)
 1372: {
 1373: 	sched_root_task_t *r = root;
 1374: 
 1375: 	if (!r)
 1376: 		return NULL;
 1377: 
 1378: 	return (void*) (r->root_cond - *(intptr_t*) arg);
 1379: }
 1380: 
 1381: /*
 1382:  * sched_hook_rtc() - Default RTC hook
 1383:  *
 1384:  * @task = current task
 1385:  * @arg = unused
 1386:  * return: <0 errors and 0 ok
 1387:  */
 1388: #if defined(HAVE_TIMER_CREATE) && defined(HAVE_TIMER_SETTIME)
 1389: void *
 1390: sched_hook_rtc(void *task, void *arg __unused)
 1391: {
 1392: 	sched_task_t *sigt = NULL, *t = task;
 1393: 	struct itimerspec its;
 1394: 	struct sigevent evt;
 1395: 	timer_t tmr;
 1396: 
 1397: 	if (!t || !TASK_ROOT(t))
 1398: 		return (void*) -1;
 1399: 
 1400: 	memset(&evt, 0, sizeof evt);
 1401: 	evt.sigev_notify = SIGEV_SIGNAL;
 1402: 	evt.sigev_signo = (intptr_t) TASK_DATA(t) + SIGRTMIN;
 1403: 	evt.sigev_value.sival_ptr = TASK_DATA(t);
 1404: 
 1405: 	if (timer_create(CLOCK_MONOTONIC, &evt, &tmr) == -1) {
 1406: 		if (TASK_ROOT(t)->root_hooks.hook_exec.exception)
 1407: 			TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL);
 1408: 		else
 1409: 			LOGERR;
 1410: 		return (void*) -1;
 1411: 	} else
 1412: 		TASK_FLAG(t) = (u_long) tmr;
 1413: 
 1414: 	if (!(sigt = schedSignal(TASK_ROOT(t), _sched_rtcWrapper, TASK_ARG(t), evt.sigev_signo, 
 1415: 				t, (size_t) tmr))) {
 1416: 		if (TASK_ROOT(t)->root_hooks.hook_exec.exception)
 1417: 			TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL);
 1418: 		else
 1419: 			LOGERR;
 1420: 		timer_delete(tmr);
 1421: 		return (void*) -1;
 1422: 	} else
 1423: 		TASK_RET(t) = (uintptr_t) sigt;
 1424: 
 1425: 	memset(&its, 0, sizeof its);
 1426: 	its.it_value.tv_sec = t->task_val.ts.tv_sec;
 1427: 	its.it_value.tv_nsec = t->task_val.ts.tv_nsec;
 1428: 
 1429: 	if (timer_settime(tmr, TIMER_RELTIME, &its, NULL) == -1) {
 1430: 		if (TASK_ROOT(t)->root_hooks.hook_exec.exception)
 1431: 			TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL);
 1432: 		else
 1433: 			LOGERR;
 1434: 		schedCancel(sigt);
 1435: 		timer_delete(tmr);
 1436: 		return (void*) -1;
 1437: 	}
 1438: 
 1439: 	return NULL;
 1440: }
 1441: #endif	/* HAVE_TIMER_CREATE */

FreeBSD-CVSweb <freebsd-cvsweb@FreeBSD.org>