--- libaitsched/src/aitsched.c 2012/08/08 23:00:05 1.13.2.1 +++ libaitsched/src/aitsched.c 2013/08/15 17:58:31 1.18.6.1 @@ -3,7 +3,7 @@ * by Michael Pounov * * $Author: misho $ -* $Id: aitsched.c,v 1.13.2.1 2012/08/08 23:00:05 misho Exp $ +* $Id: aitsched.c,v 1.18.6.1 2013/08/15 17:58:31 misho Exp $ * ************************************************************************** The ELWIX and AITNET software is distributed under the following @@ -12,7 +12,7 @@ terms: All of the documentation and software included in the ELWIX and AITNET Releases is copyrighted by ELWIX - Sofia/Bulgaria -Copyright 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012 +Copyright 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 by Michael Pounov . All rights reserved. Redistribution and use in source and binary forms, with or without @@ -56,21 +56,21 @@ char sched_Error[STRSIZ]; // sched_GetErrno() Get error code of last operation -inline int +int sched_GetErrno() { return sched_Errno; } // sched_GetError() Get error text of last operation -inline const char * +const char * sched_GetError() { return sched_Error; } // sched_SetErr() Set error to variables for internal use!!! -inline void +void sched_SetErr(int eno, char *estr, ...) { va_list lst; @@ -102,12 +102,16 @@ schedRegisterHooks(sched_root_task_t * __restrict root root->root_hooks.hook_add.read = sched_hook_read; root->root_hooks.hook_add.write = sched_hook_write; root->root_hooks.hook_add.alarm = sched_hook_alarm; + root->root_hooks.hook_add.rtc = sched_hook_rtc; root->root_hooks.hook_add.node = sched_hook_node; root->root_hooks.hook_add.proc = sched_hook_proc; root->root_hooks.hook_add.signal = sched_hook_signal; #ifdef EVFILT_USER root->root_hooks.hook_add.user = sched_hook_user; #endif +#ifdef HAVE_LIBPTHREAD + root->root_hooks.hook_add.thread = sched_hook_thread; +#endif root->root_hooks.hook_exec.cancel = sched_hook_cancel; root->root_hooks.hook_exec.fetch = sched_hook_fetch; @@ -148,7 +152,7 @@ schedInit(void ** __restrict data, size_t datlen) #ifdef HAVE_LIBPTHREAD for (i = 0; i < taskMAX; i++) - if (pthread_mutex_init(&root->root_mtx[i], NULL)) { + if ((errno = pthread_mutex_init(&root->root_mtx[i], NULL))) { LOGERR; while (i) pthread_mutex_destroy(&root->root_mtx[--i]); @@ -164,6 +168,7 @@ schedInit(void ** __restrict data, size_t datlen) TAILQ_INIT(&root->root_write); TAILQ_INIT(&root->root_timer); TAILQ_INIT(&root->root_alarm); + TAILQ_INIT(&root->root_rtc); TAILQ_INIT(&root->root_node); TAILQ_INIT(&root->root_proc); TAILQ_INIT(&root->root_signal); @@ -175,6 +180,7 @@ schedInit(void ** __restrict data, size_t datlen) TAILQ_INIT(&root->root_suspend); TAILQ_INIT(&root->root_ready); TAILQ_INIT(&root->root_unuse); + TAILQ_INIT(&root->root_thread); #ifdef HAVE_LIBPTHREAD for (i = 0; i < taskMAX; i++) @@ -224,6 +230,8 @@ schedEnd(sched_root_task_t ** __restrict root) schedCancel(task); TAILQ_FOREACH_SAFE(task, &(*root)->root_alarm, task_node, tmp) schedCancel(task); + TAILQ_FOREACH_SAFE(task, &(*root)->root_rtc, task_node, tmp) + schedCancel(task); TAILQ_FOREACH_SAFE(task, &(*root)->root_node, task_node, tmp) schedCancel(task); TAILQ_FOREACH_SAFE(task, &(*root)->root_proc, task_node, tmp) @@ -238,12 +246,14 @@ schedEnd(sched_root_task_t ** __restrict root) schedCancel(task); TAILQ_FOREACH_SAFE(task, &(*root)->root_event, task_node, tmp) schedCancel(task); - TAILQ_FOREACH_SAFE(task, &(*root)->root_task, task_node, tmp) - schedCancel(task); TAILQ_FOREACH_SAFE(task, &(*root)->root_suspend, task_node, tmp) schedCancel(task); TAILQ_FOREACH_SAFE(task, &(*root)->root_ready, task_node, tmp) schedCancel(task); + TAILQ_FOREACH_SAFE(task, &(*root)->root_thread, task_node, tmp) + schedCancel(task); + TAILQ_FOREACH_SAFE(task, &(*root)->root_task, task_node, tmp) + schedCancel(task); #ifdef HAVE_LIBPTHREAD pthread_mutex_lock(&(*root)->root_mtx[taskUNUSE]); @@ -275,7 +285,7 @@ schedEnd(sched_root_task_t ** __restrict root) * @task = current task * return: !=NULL error or =NULL ok */ -inline void * +void * schedCall(sched_task_t * __restrict task) { void *ptr = (void*) -1; @@ -298,7 +308,7 @@ schedCall(sched_task_t * __restrict task) * @root = root task * return: =NULL error or !=NULL ready task */ -inline void * +void * schedFetch(sched_root_task_t * __restrict root) { void *ptr; @@ -378,6 +388,9 @@ schedCancel(sched_task_t * __restrict task) case taskALARM: queue = &TASK_ROOT(task)->root_alarm; break; + case taskRTC: + queue = &TASK_ROOT(task)->root_rtc; + break; case taskNODE: queue = &TASK_ROOT(task)->root_node; break; @@ -408,6 +421,9 @@ schedCancel(sched_task_t * __restrict task) case taskREADY: queue = &TASK_ROOT(task)->root_ready; break; + case taskTHREAD: + queue = &TASK_ROOT(task)->root_thread; + break; default: queue = NULL; } @@ -421,7 +437,7 @@ schedCancel(sched_task_t * __restrict task) #endif } if (TASK_TYPE(task) != taskUNUSE) - _sched_unuseTask(task); + sched_unuseTask(task); return 0; } @@ -457,6 +473,8 @@ schedCancelby(sched_root_task_t * __restrict root, sch return -2; if (schedCancelby(root, taskALARM, criteria, param, hook)) return -2; + if (schedCancelby(root, taskRTC, criteria, param, hook)) + return -2; if (schedCancelby(root, taskNODE, criteria, param, hook)) return -2; if (schedCancelby(root, taskPROC, criteria, param, hook)) @@ -477,6 +495,8 @@ schedCancelby(sched_root_task_t * __restrict root, sch return -2; if (schedCancelby(root, taskREADY, criteria, param, hook)) return -2; + if (schedCancelby(root, taskTHREAD, criteria, param, hook)) + return -2; return 0; } /* choosen queue */ @@ -493,6 +513,9 @@ schedCancelby(sched_root_task_t * __restrict root, sch case taskALARM: queue = &root->root_alarm; break; + case taskRTC: + queue = &root->root_rtc; + break; case taskNODE: queue = &root->root_node; break; @@ -523,6 +546,9 @@ schedCancelby(sched_root_task_t * __restrict root, sch case taskREADY: queue = &root->root_ready; break; + case taskTHREAD: + queue = &root->root_thread; + break; default: return 0; } @@ -583,7 +609,7 @@ schedCancelby(sched_root_task_t * __restrict root, sch TAILQ_REMOVE(queue, task, task_node); if (TASK_TYPE(task) != taskUNUSE) - _sched_unuseTask(task); + sched_unuseTask(task); flg ^= flg; /* ok */ } @@ -645,7 +671,7 @@ schedRun(sched_root_task_t *root, volatile intptr_t * * @tsold = old timeout polling if !=NULL * return: -1 error or 0 ok */ -inline int +int schedPolling(sched_root_task_t * __restrict root, struct timespec * __restrict ts, struct timespec * __restrict tsold) { @@ -670,7 +696,7 @@ schedPolling(sched_root_task_t * __restrict root, stru * @condValue = condition value, kill schedRun() if condValue == killState * return: -1 error or 0 ok */ -inline int +int schedTermCondition(sched_root_task_t * __restrict root, intptr_t condValue) { if (!root)