File:  [ELWIX - Embedded LightWeight unIX -] / libaitsched / src / aitsched.c
Revision 1.13: download - view: text, annotated - select for diffs - revision graph
Wed Aug 8 08:25:39 2012 UTC (11 years, 10 months ago) by misho
Branches: MAIN
CVS tags: sched3_1, SCHED3_0, HEAD
version 3.0

    1: /*************************************************************************
    2: * (C) 2011 AITNET ltd - Sofia/Bulgaria - <misho@aitbg.com>
    3: *  by Michael Pounov <misho@openbsd-bg.org>
    4: *
    5: * $Author: misho $
    6: * $Id: aitsched.c,v 1.13 2012/08/08 08:25:39 misho Exp $
    7: *
    8: **************************************************************************
    9: The ELWIX and AITNET software is distributed under the following
   10: terms:
   11: 
   12: All of the documentation and software included in the ELWIX and AITNET
   13: Releases is copyrighted by ELWIX - Sofia/Bulgaria <info@elwix.org>
   14: 
   15: Copyright 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012
   16: 	by Michael Pounov <misho@elwix.org>.  All rights reserved.
   17: 
   18: Redistribution and use in source and binary forms, with or without
   19: modification, are permitted provided that the following conditions
   20: are met:
   21: 1. Redistributions of source code must retain the above copyright
   22:    notice, this list of conditions and the following disclaimer.
   23: 2. Redistributions in binary form must reproduce the above copyright
   24:    notice, this list of conditions and the following disclaimer in the
   25:    documentation and/or other materials provided with the distribution.
   26: 3. All advertising materials mentioning features or use of this software
   27:    must display the following acknowledgement:
   28: This product includes software developed by Michael Pounov <misho@elwix.org>
   29: ELWIX - Embedded LightWeight unIX and its contributors.
   30: 4. Neither the name of AITNET nor the names of its contributors
   31:    may be used to endorse or promote products derived from this software
   32:    without specific prior written permission.
   33: 
   34: THIS SOFTWARE IS PROVIDED BY AITNET AND CONTRIBUTORS ``AS IS'' AND
   35: ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   36: IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   37: ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
   38: FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   39: DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   40: OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   41: HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   42: LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   43: OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   44: SUCH DAMAGE.
   45: */
   46: #include "global.h"
   47: #include "hooks.h"
   48: 
   49: 
   50: #pragma GCC visibility push(hidden)
   51: 
   52: int sched_Errno;
   53: char sched_Error[STRSIZ];
   54: 
   55: #pragma GCC visibility pop
   56: 
   57: 
   58: // sched_GetErrno() Get error code of last operation
   59: inline int
   60: sched_GetErrno()
   61: {
   62: 	return sched_Errno;
   63: }
   64: 
   65: // sched_GetError() Get error text of last operation
   66: inline const char *
   67: sched_GetError()
   68: {
   69: 	return sched_Error;
   70: }
   71: 
   72: // sched_SetErr() Set error to variables for internal use!!!
   73: inline void
   74: sched_SetErr(int eno, char *estr, ...)
   75: {
   76: 	va_list lst;
   77: 
   78: 	sched_Errno = eno;
   79: 	memset(sched_Error, 0, sizeof sched_Error);
   80: 	va_start(lst, estr);
   81: 	vsnprintf(sched_Error, sizeof sched_Error, estr, lst);
   82: 	va_end(lst);
   83: }
   84: 
   85: /* Init and prepare scheduler functions */
   86: 
   87: /*
   88:  * schedRegisterHooks() - Register IO handles and bind tasks to it
   89:  *
   90:  * @root = root task
   91:  * return: -1 error or 0 ok
   92:  */
   93: int
   94: schedRegisterHooks(sched_root_task_t * __restrict root)
   95: {
   96: 	assert(root);
   97: 
   98: 	if (root->root_hooks.hook_root.fini)
   99: 		root->root_hooks.hook_root.fini(root, NULL);
  100: 	memset(&root->root_hooks, 0, sizeof root->root_hooks);
  101: 
  102: 	root->root_hooks.hook_add.read = sched_hook_read;
  103: 	root->root_hooks.hook_add.write = sched_hook_write;
  104: 	root->root_hooks.hook_add.alarm = sched_hook_alarm;
  105: 	root->root_hooks.hook_add.node = sched_hook_node;
  106: 	root->root_hooks.hook_add.proc = sched_hook_proc;
  107: 	root->root_hooks.hook_add.signal = sched_hook_signal;
  108: #ifdef EVFILT_USER
  109: 	root->root_hooks.hook_add.user = sched_hook_user;
  110: #endif
  111: 
  112: 	root->root_hooks.hook_exec.cancel = sched_hook_cancel;
  113: 	root->root_hooks.hook_exec.fetch = sched_hook_fetch;
  114: 	root->root_hooks.hook_exec.exception = sched_hook_exception;
  115: 
  116: 	root->root_hooks.hook_root.init = sched_hook_init;
  117: 	root->root_hooks.hook_root.fini = sched_hook_fini;
  118: 	return 0;
  119: }
  120: 
  121: /*
  122:  * schedInit() - Init scheduler
  123:  *
  124:  * @data = optional data if !=NULL
  125:  * @datlen = data len if data is set
  126:  * return: allocated root task if ok or NULL error
  127:  */
  128: sched_root_task_t *
  129: schedInit(void ** __restrict data, size_t datlen)
  130: {
  131: 	sched_root_task_t *root = NULL;
  132: 	int (*func)(sched_root_task_t *);
  133: #ifdef HAVE_LIBPTHREAD
  134: 	register int i;
  135: #endif
  136: 
  137: 	root = malloc(sizeof(sched_root_task_t));
  138: 	if (!root) {
  139: 		LOGERR;
  140: 	} else {
  141: 		memset(root, 0, sizeof(sched_root_task_t));
  142: 
  143: 		/* set default maximum regular task hit misses */
  144: 		root->root_miss = MAX_TASK_MISS;
  145: 
  146: 		/* INFINIT polling period by default */
  147: 		sched_timespecinf(&root->root_poll);
  148: 
  149: #ifdef HAVE_LIBPTHREAD
  150: 		for (i = 0; i < taskMAX; i++)
  151: 			if (pthread_mutex_init(&root->root_mtx[i], NULL)) {
  152: 				LOGERR;
  153: 				while (i)
  154: 					pthread_mutex_destroy(&root->root_mtx[--i]);
  155: 				free(root);
  156: 				return NULL;
  157: 			}
  158: 
  159: 		for (i = 0; i < taskMAX; i++)
  160: 			pthread_mutex_lock(&root->root_mtx[i]);
  161: #endif
  162: 
  163: 		TAILQ_INIT(&root->root_read);
  164: 		TAILQ_INIT(&root->root_write);
  165: 		TAILQ_INIT(&root->root_timer);
  166: 		TAILQ_INIT(&root->root_alarm);
  167: 		TAILQ_INIT(&root->root_node);
  168: 		TAILQ_INIT(&root->root_proc);
  169: 		TAILQ_INIT(&root->root_signal);
  170: 		TAILQ_INIT(&root->root_aio);
  171: 		TAILQ_INIT(&root->root_lio);
  172: 		TAILQ_INIT(&root->root_user);
  173: 		TAILQ_INIT(&root->root_event);
  174: 		TAILQ_INIT(&root->root_task);
  175: 		TAILQ_INIT(&root->root_suspend);
  176: 		TAILQ_INIT(&root->root_ready);
  177: 		TAILQ_INIT(&root->root_unuse);
  178: 
  179: #ifdef HAVE_LIBPTHREAD
  180: 		for (i = 0; i < taskMAX; i++)
  181: 			pthread_mutex_unlock(&root->root_mtx[i]);
  182: #endif
  183: 
  184: 		if (data && *data) {
  185: 			if (datlen) {
  186: 				root->root_data.iov_base = *data;
  187: 				root->root_data.iov_len = datlen;
  188: 			} else { /* if datlen == 0, switch to callbacks init mode */
  189: 				 /* little hack :) for correct initialization of scheduler */
  190: 				func = (int(*)(sched_root_task_t*)) data;
  191: 				func(root);
  192: 			}
  193: 		}
  194: 
  195: 		if (root->root_hooks.hook_root.init)
  196: 			root->root_hooks.hook_root.init(root, NULL);
  197: 	}
  198: 
  199: 	return root;
  200: }
  201: 
  202: /*
  203:  * schedEnd() - End scheduler & free all resources
  204:  *
  205:  * @root = root task
  206:  * return: -1 error or 0 ok
  207:  */
  208: int
  209: schedEnd(sched_root_task_t ** __restrict root)
  210: {
  211: 	sched_task_t *task, *tmp;
  212: #ifdef HAVE_LIBPTHREAD
  213: 	register int i;
  214: #endif
  215: 
  216: 	if (!root || !*root)
  217: 		return -1;
  218: 
  219: 	TAILQ_FOREACH_SAFE(task, &(*root)->root_read, task_node, tmp)
  220: 		schedCancel(task);
  221: 	TAILQ_FOREACH_SAFE(task, &(*root)->root_write, task_node, tmp)
  222: 		schedCancel(task);
  223: 	TAILQ_FOREACH_SAFE(task, &(*root)->root_timer, task_node, tmp)
  224: 		schedCancel(task);
  225: 	TAILQ_FOREACH_SAFE(task, &(*root)->root_alarm, task_node, tmp)
  226: 		schedCancel(task);
  227: 	TAILQ_FOREACH_SAFE(task, &(*root)->root_node, task_node, tmp)
  228: 		schedCancel(task);
  229: 	TAILQ_FOREACH_SAFE(task, &(*root)->root_proc, task_node, tmp)
  230: 		schedCancel(task);
  231: 	TAILQ_FOREACH_SAFE(task, &(*root)->root_signal, task_node, tmp)
  232: 		schedCancel(task);
  233: 	TAILQ_FOREACH_SAFE(task, &(*root)->root_aio, task_node, tmp)
  234: 		schedCancel(task);
  235: 	TAILQ_FOREACH_SAFE(task, &(*root)->root_lio, task_node, tmp)
  236: 		schedCancel(task);
  237: 	TAILQ_FOREACH_SAFE(task, &(*root)->root_user, task_node, tmp)
  238: 		schedCancel(task);
  239: 	TAILQ_FOREACH_SAFE(task, &(*root)->root_event, task_node, tmp)
  240: 		schedCancel(task);
  241: 	TAILQ_FOREACH_SAFE(task, &(*root)->root_task, task_node, tmp)
  242: 		schedCancel(task);
  243: 	TAILQ_FOREACH_SAFE(task, &(*root)->root_suspend, task_node, tmp)
  244: 		schedCancel(task);
  245: 	TAILQ_FOREACH_SAFE(task, &(*root)->root_ready, task_node, tmp)
  246: 		schedCancel(task);
  247: 
  248: #ifdef HAVE_LIBPTHREAD
  249: 	pthread_mutex_lock(&(*root)->root_mtx[taskUNUSE]);
  250: #endif
  251: 	TAILQ_FOREACH_SAFE(task, &(*root)->root_unuse, task_node, tmp) {
  252: 		TAILQ_REMOVE(&(*root)->root_unuse, task, task_node);
  253: 		free(task);
  254: 	}
  255: #ifdef HAVE_LIBPTHREAD
  256: 	pthread_mutex_unlock(&(*root)->root_mtx[taskUNUSE]);
  257: #endif
  258: 
  259: 	if ((*root)->root_hooks.hook_root.fini)
  260: 		(*root)->root_hooks.hook_root.fini(*root, NULL);
  261: 
  262: #ifdef HAVE_LIBPTHREAD
  263: 	for (i = 0; i < taskMAX; i++)
  264: 		pthread_mutex_destroy(&(*root)->root_mtx[i]);
  265: #endif
  266: 
  267: 	free(*root);
  268: 	*root = NULL;
  269: 	return 0;
  270: }
  271: 
  272: /*
  273:  * schedCall() - Call task execution function
  274:  *
  275:  * @task = current task
  276:  * return: !=NULL error or =NULL ok
  277:  */
  278: inline void *
  279: schedCall(sched_task_t * __restrict task)
  280: {
  281: 	void *ptr = (void*) -1;
  282: 
  283: 	if (!task)
  284: 		return ptr;
  285: 
  286: 	if (!TASK_ISLOCKED(task))
  287: 		TASK_LOCK(task);
  288: 
  289: 	ptr = task->task_func(task);
  290: 
  291: 	TASK_UNLOCK(task);
  292: 	return ptr;
  293: }
  294: 
  295: /*
  296:  * schedFetch() - Fetch ready task
  297:  *
  298:  * @root = root task
  299:  * return: =NULL error or !=NULL ready task
  300:  */
  301: inline void *
  302: schedFetch(sched_root_task_t * __restrict root)
  303: {
  304: 	void *ptr;
  305: 
  306: 	if (!root)
  307: 		return NULL;
  308: 
  309: 	if (root->root_hooks.hook_exec.fetch)
  310: 		ptr = root->root_hooks.hook_exec.fetch(root, NULL);
  311: 	else
  312: 		ptr = NULL;
  313: 
  314: 	return ptr;
  315: }
  316: 
  317: /*
  318:  * schedTrigger() - Triggering USER task
  319:  *
  320:  * @task = task
  321:  * return: -1 error or 0 ok
  322:  */
  323: int
  324: schedTrigger(sched_task_t * __restrict task)
  325: {
  326: #ifndef EVFILT_USER
  327: 	sched_SetErr(ENOTSUP, "Not supported kevent() filter");
  328: 	return -1;
  329: #else
  330: 	struct kevent chg[1];
  331: 	struct timespec timeout = { 0, 0 };
  332: 
  333: 	if (!task || !TASK_ROOT(task))
  334: 		return -1;
  335: 
  336: #ifdef __NetBSD__
  337: 	EV_SET(chg, TASK_VAL(task), EVFILT_USER, 0, NOTE_TRIGGER, 0, (intptr_t) TASK_VAL(task));
  338: #else
  339: 	EV_SET(chg, TASK_VAL(task), EVFILT_USER, 0, NOTE_TRIGGER, 0, (void*) TASK_VAL(task));
  340: #endif
  341: 	if (kevent(TASK_ROOT(task)->root_kq, chg, 1, NULL, 0, &timeout) == -1) {
  342: 		LOGERR;
  343: 		return -1;
  344: 	}
  345: 
  346: 	return 0;
  347: #endif
  348: }
  349: 
  350: /*
  351:  * schedCancel() - Cancel task from scheduler
  352:  *
  353:  * @task = task
  354:  * return: -1 error or 0 ok
  355:  */
  356: int
  357: schedCancel(sched_task_t * __restrict task)
  358: {
  359: 	sched_queue_t *queue;
  360: 
  361: 	if (!task || !TASK_ROOT(task))
  362: 		return -1;
  363: 
  364: 	if (TASK_ROOT(task)->root_hooks.hook_exec.cancel)
  365: 		if (TASK_ROOT(task)->root_hooks.hook_exec.cancel(task, NULL))
  366: 			return -1;
  367: 
  368: 	switch (TASK_TYPE(task)) {
  369: 		case taskREAD:
  370: 			queue = &TASK_ROOT(task)->root_read;
  371: 			break;
  372: 		case taskWRITE:
  373: 			queue = &TASK_ROOT(task)->root_write;
  374: 			break;
  375: 		case taskTIMER:
  376: 			queue = &TASK_ROOT(task)->root_timer;
  377: 			break;
  378: 		case taskALARM:
  379: 			queue = &TASK_ROOT(task)->root_alarm;
  380: 			break;
  381: 		case taskNODE:
  382: 			queue = &TASK_ROOT(task)->root_node;
  383: 			break;
  384: 		case taskPROC:
  385: 			queue = &TASK_ROOT(task)->root_proc;
  386: 			break;
  387: 		case taskSIGNAL:
  388: 			queue = &TASK_ROOT(task)->root_signal;
  389: 			break;
  390: 		case taskAIO:
  391: 			queue = &TASK_ROOT(task)->root_aio;
  392: 			break;
  393: 		case taskLIO:
  394: 			queue = &TASK_ROOT(task)->root_lio;
  395: 			break;
  396: 		case taskUSER:
  397: 			queue = &TASK_ROOT(task)->root_user;
  398: 			break;
  399: 		case taskEVENT:
  400: 			queue = &TASK_ROOT(task)->root_event;
  401: 			break;
  402: 		case taskTASK:
  403: 			queue = &TASK_ROOT(task)->root_task;
  404: 			break;
  405: 		case taskSUSPEND:
  406: 			queue = &TASK_ROOT(task)->root_suspend;
  407: 			break;
  408: 		case taskREADY:
  409: 			queue = &TASK_ROOT(task)->root_ready;
  410: 			break;
  411: 		default:
  412: 			queue = NULL;
  413: 	}
  414: 	if (queue) {
  415: #ifdef HAVE_LIBPTHREAD
  416: 		pthread_mutex_lock(&TASK_ROOT(task)->root_mtx[TASK_TYPE(task)]);
  417: #endif
  418: 		TAILQ_REMOVE(queue, TASK_ID(task), task_node);
  419: #ifdef HAVE_LIBPTHREAD
  420: 		pthread_mutex_unlock(&TASK_ROOT(task)->root_mtx[TASK_TYPE(task)]);
  421: #endif
  422: 	}
  423: 	if (TASK_TYPE(task) != taskUNUSE)
  424: 		_sched_unuseTask(task);
  425: 
  426: 	return 0;
  427: }
  428: 
  429: /*
  430:  * schedCancelby() - Cancel task from scheduler by criteria
  431:  *
  432:  * @root = root task
  433:  * @type = cancel from queue type, if =taskMAX cancel same task from all queues
  434:  * @criteria = find task by criteria 
  435:  * 	[CRITERIA_ANY|CRITERIA_CALL|CRITERIA_ARG|CRITERIA_FD|CRITERIA_VAL|CRITERIA_ID|CRITERIA_TS|CRITERIA_DATA]
  436:  * @param = search parameter
  437:  * @hook = custom cleanup hook function, may be NULL
  438:  * return: -1 error, -2 error in sub-stage cancel execution, -3 error from custom hook or 0 ok
  439:  */
  440: int
  441: schedCancelby(sched_root_task_t * __restrict root, sched_task_type_t type, 
  442: 		u_char criteria, void *param, sched_hook_func_t hook)
  443: {
  444: 	sched_task_t *task, *tmp;
  445: 	sched_queue_t *queue;
  446: 	register int flg = 0;
  447: 
  448: 	if (!root)
  449: 		return -1;
  450: 	/* if type == taskMAX check in all queues */
  451: 	if (type == taskMAX) {
  452: 		if (schedCancelby(root, taskREAD, criteria, param, hook))
  453: 			return -2;
  454: 		if (schedCancelby(root, taskWRITE, criteria, param, hook))
  455: 			return -2;
  456: 		if (schedCancelby(root, taskTIMER, criteria, param, hook))
  457: 			return -2;
  458: 		if (schedCancelby(root, taskALARM, criteria, param, hook))
  459: 			return -2;
  460: 		if (schedCancelby(root, taskNODE, criteria, param, hook))
  461: 			return -2;
  462: 		if (schedCancelby(root, taskPROC, criteria, param, hook))
  463: 			return -2;
  464: 		if (schedCancelby(root, taskSIGNAL, criteria, param, hook))
  465: 			return -2;
  466: 		if (schedCancelby(root, taskAIO, criteria, param, hook))
  467: 			return -2;
  468: 		if (schedCancelby(root, taskLIO, criteria, param, hook))
  469: 			return -2;
  470: 		if (schedCancelby(root, taskUSER, criteria, param, hook))
  471: 			return -2;
  472: 		if (schedCancelby(root, taskEVENT, criteria, param, hook))
  473: 			return -2;
  474: 		if (schedCancelby(root, taskTASK, criteria, param, hook))
  475: 			return -2;
  476: 		if (schedCancelby(root, taskSUSPEND, criteria, param, hook))
  477: 			return -2;
  478: 		if (schedCancelby(root, taskREADY, criteria, param, hook))
  479: 			return -2;
  480: 		return 0;
  481: 	}
  482: 	/* choosen queue */
  483: 	switch (type) {
  484: 		case taskREAD:
  485: 			queue = &root->root_read;
  486: 			break;
  487: 		case taskWRITE:
  488: 			queue = &root->root_write;
  489: 			break;
  490: 		case taskTIMER:
  491: 			queue = &root->root_timer;
  492: 			break;
  493: 		case taskALARM:
  494: 			queue = &root->root_alarm;
  495: 			break;
  496: 		case taskNODE:
  497: 			queue = &root->root_node;
  498: 			break;
  499: 		case taskPROC:
  500: 			queue = &root->root_proc;
  501: 			break;
  502: 		case taskSIGNAL:
  503: 			queue = &root->root_signal;
  504: 			break;
  505: 		case taskAIO:
  506: 			queue = &root->root_aio;
  507: 			break;
  508: 		case taskLIO:
  509: 			queue = &root->root_lio;
  510: 			break;
  511: 		case taskUSER:
  512: 			queue = &root->root_user;
  513: 			break;
  514: 		case taskEVENT:
  515: 			queue = &root->root_event;
  516: 			break;
  517: 		case taskTASK:
  518: 			queue = &root->root_task;
  519: 			break;
  520: 		case taskSUSPEND:
  521: 			queue = &root->root_suspend;
  522: 			break;
  523: 		case taskREADY:
  524: 			queue = &root->root_ready;
  525: 			break;
  526: 		default:
  527: 			return 0;
  528: 	}
  529: 
  530: #ifdef HAVE_LIBPTHREAD
  531: 	pthread_mutex_lock(&root->root_mtx[type]);
  532: #endif
  533: 	TAILQ_FOREACH_SAFE(task, queue, task_node, tmp) {
  534: 		flg ^= flg;
  535: 		switch (criteria) {
  536: 			case CRITERIA_ANY:
  537: 				flg = 1;
  538: 				break;
  539: 			case CRITERIA_CALL:
  540: 				if (TASK_FUNC(task) == (sched_task_func_t) param)
  541: 					flg = 1;
  542: 				break;
  543: 			case CRITERIA_ARG:
  544: 				if (TASK_ARG(task) == param)
  545: 					flg = 1;
  546: 				break;
  547: 			case CRITERIA_FD:
  548: 				if (TASK_FD(task) == (intptr_t) param)
  549: 					flg = 1;
  550: 				break;
  551: 			case CRITERIA_ID:
  552: 			case CRITERIA_VAL:
  553: 				if (TASK_VAL(task) == (u_long) param)
  554: 					flg = 1;
  555: 				break;
  556: 			case CRITERIA_TS:
  557: 				if (!sched_timespeccmp(&TASK_TS(task), (struct timespec*) param, -))
  558: 					flg = 1;
  559: 				break;
  560: 			case CRITERIA_DATA:
  561: 				if (TASK_DATA(task) == param)
  562: 					flg = 1;
  563: 				break;
  564: 			default:
  565: 				sched_SetErr(EINVAL, "Invalid parameter criteria %d", criteria);
  566: 				flg = -1;
  567: 		}
  568: 		if (flg < 0)		/* error */
  569: 			break;
  570: 		/* cancel choosen task */
  571: 		if (flg > 0) {
  572: 			if (TASK_ROOT(task)->root_hooks.hook_exec.cancel)
  573: 				if (TASK_ROOT(task)->root_hooks.hook_exec.cancel(task, NULL)) {
  574: 					flg = -1;
  575: 					break;
  576: 				}
  577: 			/* custom hook */
  578: 			if (hook)
  579: 				if (hook(task, NULL)) {
  580: 					flg = -3;
  581: 					break;
  582: 				}
  583: 
  584: 			TAILQ_REMOVE(queue, task, task_node);
  585: 			if (TASK_TYPE(task) != taskUNUSE)
  586: 				_sched_unuseTask(task);
  587: 
  588: 			flg ^= flg;	/* ok */
  589: 		}
  590: 	}
  591: #ifdef HAVE_LIBPTHREAD
  592: 	pthread_mutex_unlock(&root->root_mtx[type]);
  593: #endif
  594: 	return flg;
  595: }
  596: 
  597: /*
  598:  * schedRun() - Scheduler *run loop*
  599:  *
  600:  * @root = root task
  601:  * @killState = kill condition variable, if !=0 stop scheduler loop
  602:  * return: -1 error or 0 ok
  603:  */
  604: int
  605: schedRun(sched_root_task_t *root, volatile intptr_t * __restrict killState)
  606: {
  607: 	sched_task_t *task;
  608: 
  609: 	if (!root)
  610: 		return -1;
  611: 
  612: 	if (root->root_hooks.hook_exec.run)
  613: 		if (root->root_hooks.hook_exec.run(root, NULL))
  614: 			return -1;
  615: 
  616: 	if (killState) {
  617: 		if (root->root_hooks.hook_exec.condition)
  618: 			/* condition scheduler loop */
  619: 			while (root && root->root_hooks.hook_exec.fetch && 
  620: 					root->root_hooks.hook_exec.condition && 
  621: 					root->root_hooks.hook_exec.condition(root, (void*) killState)) {
  622: 				if ((task = root->root_hooks.hook_exec.fetch(root, NULL)))
  623: 					root->root_ret = schedCall(task);
  624: 			}
  625: 		else
  626: 			/* trigger scheduler loop */
  627: 			while (!*killState && root && root->root_hooks.hook_exec.fetch) {
  628: 				if ((task = root->root_hooks.hook_exec.fetch(root, NULL)))
  629: 					root->root_ret = schedCall(task);
  630: 			}
  631: 	} else
  632: 		/* infinite scheduler loop */
  633: 		while (root && root->root_hooks.hook_exec.fetch)
  634: 			if ((task = root->root_hooks.hook_exec.fetch(root, NULL)))
  635: 				root->root_ret = schedCall(task);
  636: 
  637: 	return 0;
  638: }
  639: 
  640: /*
  641:  * schedPolling() - Polling timeout period if no timer task is present
  642:  *
  643:  * @root = root task
  644:  * @ts = timeout polling period, if ==NULL INFINIT timeout
  645:  * @tsold = old timeout polling if !=NULL
  646:  * return: -1 error or 0 ok
  647:  */
  648: inline int
  649: schedPolling(sched_root_task_t * __restrict root, struct timespec * __restrict ts, 
  650: 		struct timespec * __restrict tsold)
  651: {
  652: 	if (!root)
  653: 		return -1;
  654: 
  655: 	if (tsold)
  656: 		*tsold = root->root_poll;
  657: 
  658: 	if (!ts)
  659: 		sched_timespecinf(&root->root_poll);
  660: 	else
  661: 		root->root_poll = *ts;
  662: 
  663: 	return 0;
  664: }
  665: 
  666: /*
  667:  * schedTermCondition() - Activate hook for scheduler condition kill
  668:  *
  669:  * @root = root task
  670:  * @condValue = condition value, kill schedRun() if condValue == killState
  671:  * return: -1 error or 0 ok
  672:  */
  673: inline int
  674: schedTermCondition(sched_root_task_t * __restrict root, intptr_t condValue)
  675: {
  676: 	if (!root)
  677: 		return -1;
  678: 
  679: 	root->root_cond = condValue;
  680: 	root->root_hooks.hook_exec.condition = sched_hook_condition;
  681: 	return 0;
  682: }
  683: 
  684: /*
  685:  * schedMissEvents() - Set new miss events rate for regular tasks
  686:  *
  687:  * @root = root task
  688:  * @missEvents = maximum number of missed events before firing tasks
  689:  * return: -1 error or 0 ok
  690:  */
  691: inline int
  692: schedMissEvents(sched_root_task_t * __restrict root, u_int missEvents)
  693: {
  694: 	if (!root)
  695: 		return -1;
  696: 
  697: 	root->root_miss = missEvents;
  698: 	return 0;
  699: }
  700: 
  701: /*
  702:  * schedResumeby() - Resume suspended task
  703:  *
  704:  * @root = root task
  705:  * @criteria = find task by criteria 
  706:  * 	[CRITERIA_ANY|CRITERIA_ID|CRITERIA_DATA]
  707:  * @param = search parameter (sched_task_t *task| u_long id)
  708:  * return: -1 error or 0 resumed ok
  709:  */
  710: int
  711: schedResumeby(sched_root_task_t * __restrict root, u_char criteria, void *param)
  712: {
  713: 	sched_task_t *task, *tmp;
  714: 	register int flg = 0;
  715: 
  716: 	if (!root)
  717: 		return -1;
  718: 
  719: #ifdef HAVE_LIBPTHREAD
  720: 	pthread_mutex_lock(&root->root_mtx[taskSUSPEND]);
  721: #endif
  722: 	TAILQ_FOREACH_SAFE(task, &root->root_suspend, task_node, tmp) {
  723: 		flg ^= flg;
  724: 		switch (criteria) {
  725: 			case CRITERIA_ANY:
  726: 				flg = 1;
  727: 				break;
  728: 			case CRITERIA_ID:
  729: 				if (TASK_VAL(task) == (u_long) param)
  730: 					flg = 1;
  731: 				break;
  732: 			case CRITERIA_DATA:
  733: 				if (TASK_ID(task) == (sched_task_t*) param)
  734: 					flg = 1;
  735: 				break;
  736: 			default:
  737: 				sched_SetErr(EINVAL, "Invalid parameter criteria %d", criteria);
  738: 				flg = -1;
  739: 		}
  740: 		if (flg < 0)
  741: 			break;
  742: 		/* resume choosen task */
  743: 		if (flg > 0) {
  744: 			if (root->root_hooks.hook_exec.resume)
  745: 				if (root->root_hooks.hook_exec.resume(task, NULL)) {
  746: 					flg = -1;
  747: 					break;
  748: 				}
  749: 
  750: 			TAILQ_REMOVE(&root->root_suspend, task, task_node);
  751: 
  752: 			task->task_type = taskREADY;
  753: #ifdef HAVE_LIBPTHREAD
  754: 			pthread_mutex_lock(&root->root_mtx[taskREADY]);
  755: #endif
  756: 			TAILQ_INSERT_TAIL(&root->root_ready, task, task_node);
  757: #ifdef HAVE_LIBPTHREAD
  758: 			pthread_mutex_unlock(&root->root_mtx[taskREADY]);
  759: #endif
  760: 
  761: 			flg ^= flg;	/* ok */
  762: 		}
  763: 	}
  764: #ifdef HAVE_LIBPTHREAD
  765: 	pthread_mutex_unlock(&root->root_mtx[taskSUSPEND]);
  766: #endif
  767: 
  768: 	return flg;
  769: }

FreeBSD-CVSweb <freebsd-cvsweb@FreeBSD.org>