|
version 1.14.2.1, 2012/08/21 11:07:16
|
version 1.18, 2013/05/30 09:13:52
|
|
Line 12 terms:
|
Line 12 terms:
|
| All of the documentation and software included in the ELWIX and AITNET |
All of the documentation and software included in the ELWIX and AITNET |
| Releases is copyrighted by ELWIX - Sofia/Bulgaria <info@elwix.org> |
Releases is copyrighted by ELWIX - Sofia/Bulgaria <info@elwix.org> |
| |
|
| Copyright 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012 | Copyright 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 |
| by Michael Pounov <misho@elwix.org>. All rights reserved. |
by Michael Pounov <misho@elwix.org>. All rights reserved. |
| |
|
| Redistribution and use in source and binary forms, with or without |
Redistribution and use in source and binary forms, with or without |
|
Line 56 char sched_Error[STRSIZ];
|
Line 56 char sched_Error[STRSIZ];
|
| |
|
| |
|
| // sched_GetErrno() Get error code of last operation |
// sched_GetErrno() Get error code of last operation |
| inline int | int |
| sched_GetErrno() |
sched_GetErrno() |
| { |
{ |
| return sched_Errno; |
return sched_Errno; |
| } |
} |
| |
|
| // sched_GetError() Get error text of last operation |
// sched_GetError() Get error text of last operation |
| inline const char * | const char * |
| sched_GetError() |
sched_GetError() |
| { |
{ |
| return sched_Error; |
return sched_Error; |
| } |
} |
| |
|
| // sched_SetErr() Set error to variables for internal use!!! |
// sched_SetErr() Set error to variables for internal use!!! |
| inline void | void |
| sched_SetErr(int eno, char *estr, ...) |
sched_SetErr(int eno, char *estr, ...) |
| { |
{ |
| va_list lst; |
va_list lst; |
|
Line 108 schedRegisterHooks(sched_root_task_t * __restrict root
|
Line 108 schedRegisterHooks(sched_root_task_t * __restrict root
|
| #ifdef EVFILT_USER |
#ifdef EVFILT_USER |
| root->root_hooks.hook_add.user = sched_hook_user; |
root->root_hooks.hook_add.user = sched_hook_user; |
| #endif |
#endif |
| |
#ifdef HAVE_LIBPTHREAD |
| |
root->root_hooks.hook_add.thread = sched_hook_thread; |
| |
#endif |
| |
|
| root->root_hooks.hook_exec.cancel = sched_hook_cancel; |
root->root_hooks.hook_exec.cancel = sched_hook_cancel; |
| root->root_hooks.hook_exec.fetch = sched_hook_fetch; |
root->root_hooks.hook_exec.fetch = sched_hook_fetch; |
|
Line 148 schedInit(void ** __restrict data, size_t datlen)
|
Line 151 schedInit(void ** __restrict data, size_t datlen)
|
| |
|
| #ifdef HAVE_LIBPTHREAD |
#ifdef HAVE_LIBPTHREAD |
| for (i = 0; i < taskMAX; i++) |
for (i = 0; i < taskMAX; i++) |
| if (pthread_mutex_init(&root->root_mtx[i], NULL)) { | if ((errno = pthread_mutex_init(&root->root_mtx[i], NULL))) { |
| LOGERR; |
LOGERR; |
| while (i) |
while (i) |
| pthread_mutex_destroy(&root->root_mtx[--i]); |
pthread_mutex_destroy(&root->root_mtx[--i]); |
|
Line 239 schedEnd(sched_root_task_t ** __restrict root)
|
Line 242 schedEnd(sched_root_task_t ** __restrict root)
|
| schedCancel(task); |
schedCancel(task); |
| TAILQ_FOREACH_SAFE(task, &(*root)->root_event, task_node, tmp) |
TAILQ_FOREACH_SAFE(task, &(*root)->root_event, task_node, tmp) |
| schedCancel(task); |
schedCancel(task); |
| TAILQ_FOREACH_SAFE(task, &(*root)->root_task, task_node, tmp) |
|
| schedCancel(task); |
|
| TAILQ_FOREACH_SAFE(task, &(*root)->root_suspend, task_node, tmp) |
TAILQ_FOREACH_SAFE(task, &(*root)->root_suspend, task_node, tmp) |
| schedCancel(task); |
schedCancel(task); |
| TAILQ_FOREACH_SAFE(task, &(*root)->root_ready, task_node, tmp) |
TAILQ_FOREACH_SAFE(task, &(*root)->root_ready, task_node, tmp) |
| schedCancel(task); |
schedCancel(task); |
| TAILQ_FOREACH_SAFE(task, &(*root)->root_thread, task_node, tmp) |
TAILQ_FOREACH_SAFE(task, &(*root)->root_thread, task_node, tmp) |
| schedCancel(task); |
schedCancel(task); |
| |
TAILQ_FOREACH_SAFE(task, &(*root)->root_task, task_node, tmp) |
| |
schedCancel(task); |
| |
|
| #ifdef HAVE_LIBPTHREAD |
#ifdef HAVE_LIBPTHREAD |
| pthread_mutex_lock(&(*root)->root_mtx[taskUNUSE]); |
pthread_mutex_lock(&(*root)->root_mtx[taskUNUSE]); |
|
Line 278 schedEnd(sched_root_task_t ** __restrict root)
|
Line 281 schedEnd(sched_root_task_t ** __restrict root)
|
| * @task = current task |
* @task = current task |
| * return: !=NULL error or =NULL ok |
* return: !=NULL error or =NULL ok |
| */ |
*/ |
| inline void * | void * |
| schedCall(sched_task_t * __restrict task) |
schedCall(sched_task_t * __restrict task) |
| { |
{ |
| void *ptr = (void*) -1; |
void *ptr = (void*) -1; |
|
Line 301 schedCall(sched_task_t * __restrict task)
|
Line 304 schedCall(sched_task_t * __restrict task)
|
| * @root = root task |
* @root = root task |
| * return: =NULL error or !=NULL ready task |
* return: =NULL error or !=NULL ready task |
| */ |
*/ |
| inline void * | void * |
| schedFetch(sched_root_task_t * __restrict root) |
schedFetch(sched_root_task_t * __restrict root) |
| { |
{ |
| void *ptr; |
void *ptr; |
|
Line 427 schedCancel(sched_task_t * __restrict task)
|
Line 430 schedCancel(sched_task_t * __restrict task)
|
| #endif |
#endif |
| } |
} |
| if (TASK_TYPE(task) != taskUNUSE) |
if (TASK_TYPE(task) != taskUNUSE) |
| _sched_unuseTask(task); | sched_unuseTask(task); |
| |
|
| return 0; |
return 0; |
| } |
} |
|
Line 594 schedCancelby(sched_root_task_t * __restrict root, sch
|
Line 597 schedCancelby(sched_root_task_t * __restrict root, sch
|
| |
|
| TAILQ_REMOVE(queue, task, task_node); |
TAILQ_REMOVE(queue, task, task_node); |
| if (TASK_TYPE(task) != taskUNUSE) |
if (TASK_TYPE(task) != taskUNUSE) |
| _sched_unuseTask(task); | sched_unuseTask(task); |
| |
|
| flg ^= flg; /* ok */ |
flg ^= flg; /* ok */ |
| } |
} |
|
Line 656 schedRun(sched_root_task_t *root, volatile intptr_t *
|
Line 659 schedRun(sched_root_task_t *root, volatile intptr_t *
|
| * @tsold = old timeout polling if !=NULL |
* @tsold = old timeout polling if !=NULL |
| * return: -1 error or 0 ok |
* return: -1 error or 0 ok |
| */ |
*/ |
| inline int | int |
| schedPolling(sched_root_task_t * __restrict root, struct timespec * __restrict ts, |
schedPolling(sched_root_task_t * __restrict root, struct timespec * __restrict ts, |
| struct timespec * __restrict tsold) |
struct timespec * __restrict tsold) |
| { |
{ |
|
Line 681 schedPolling(sched_root_task_t * __restrict root, stru
|
Line 684 schedPolling(sched_root_task_t * __restrict root, stru
|
| * @condValue = condition value, kill schedRun() if condValue == killState |
* @condValue = condition value, kill schedRun() if condValue == killState |
| * return: -1 error or 0 ok |
* return: -1 error or 0 ok |
| */ |
*/ |
| inline int | int |
| schedTermCondition(sched_root_task_t * __restrict root, intptr_t condValue) |
schedTermCondition(sched_root_task_t * __restrict root, intptr_t condValue) |
| { |
{ |
| if (!root) |
if (!root) |