version 1.4.2.1, 2012/01/08 03:50:11
|
version 1.13.2.2, 2012/08/22 10:38:21
|
Line 12 terms:
|
Line 12 terms:
|
All of the documentation and software included in the ELWIX and AITNET |
All of the documentation and software included in the ELWIX and AITNET |
Releases is copyrighted by ELWIX - Sofia/Bulgaria <info@elwix.org> |
Releases is copyrighted by ELWIX - Sofia/Bulgaria <info@elwix.org> |
|
|
Copyright 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011 | Copyright 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012 |
by Michael Pounov <misho@elwix.org>. All rights reserved. |
by Michael Pounov <misho@elwix.org>. All rights reserved. |
|
|
Redistribution and use in source and binary forms, with or without |
Redistribution and use in source and binary forms, with or without |
Line 46 SUCH DAMAGE.
|
Line 46 SUCH DAMAGE.
|
#include "global.h" |
#include "global.h" |
|
|
|
|
#pragma GCC visibility push(hidden) | /* |
| * sched_useTask() - Get and init new task |
| * |
| * @root = root task |
| * return: NULL error or !=NULL prepared task |
| */ |
inline sched_task_t * |
inline sched_task_t * |
_sched_useTask(sched_root_task_t * __restrict root) | sched_useTask(sched_root_task_t * __restrict root) |
{ |
{ |
sched_task_t *task; | sched_task_t *task, *tmp; |
|
|
TAILQ_FOREACH(task, &root->root_unuse, task_node) { | TAILQ_FOREACH_SAFE(task, &root->root_unuse, task_node, tmp) { |
if (!TASK_ISLOCKED(task)) { |
if (!TASK_ISLOCKED(task)) { |
#ifdef HAVE_LIBPTHREAD |
#ifdef HAVE_LIBPTHREAD |
pthread_mutex_lock(&root->root_mtx[taskUNUSE]); |
pthread_mutex_lock(&root->root_mtx[taskUNUSE]); |
Line 74 _sched_useTask(sched_root_task_t * __restrict root)
|
Line 78 _sched_useTask(sched_root_task_t * __restrict root)
|
} |
} |
} |
} |
|
|
|
memset(task, 0, sizeof(sched_task_t)); |
|
task->task_id = (uintptr_t) task; |
return task; |
return task; |
} |
} |
|
|
|
/* |
|
* sched_unuseTask() - Unlock and put task to unuse queue |
|
* |
|
* @task = task |
|
* return: always is NULL |
|
*/ |
inline sched_task_t * |
inline sched_task_t * |
_sched_unuseTask(sched_task_t * __restrict task) | sched_unuseTask(sched_task_t * __restrict task) |
{ |
{ |
TASK_UNLOCK(task); |
TASK_UNLOCK(task); |
TASK_TYPE(task) = taskUNUSE; |
TASK_TYPE(task) = taskUNUSE; |
#ifdef HAVE_LIBPTHREAD |
#ifdef HAVE_LIBPTHREAD |
pthread_mutex_lock(&TASK_ROOT(task)->root_mtx[taskUNUSE]); |
pthread_mutex_lock(&TASK_ROOT(task)->root_mtx[taskUNUSE]); |
#endif |
#endif |
TAILQ_INSERT_TAIL(&TASK_ROOT(task)->root_unuse, task, task_node); | TAILQ_INSERT_TAIL(&TASK_ROOT(task)->root_unuse, TASK_ID(task), task_node); |
#ifdef HAVE_LIBPTHREAD |
#ifdef HAVE_LIBPTHREAD |
pthread_mutex_unlock(&TASK_ROOT(task)->root_mtx[taskUNUSE]); |
pthread_mutex_unlock(&TASK_ROOT(task)->root_mtx[taskUNUSE]); |
#endif |
#endif |
Line 94 _sched_unuseTask(sched_task_t * __restrict task)
|
Line 106 _sched_unuseTask(sched_task_t * __restrict task)
|
return task; |
return task; |
} |
} |
|
|
|
#pragma GCC visibility push(hidden) |
|
|
|
void * |
|
_sched_threadJoin(sched_task_t *task) |
|
{ |
|
void *ret = NULL; |
|
|
|
if (!task) |
|
return NULL; |
|
|
|
#ifdef HAVE_LIBPTHREAD |
|
pthread_join((pthread_t) TASK_VAL(task), &ret); |
|
TASK_ROOT(task)->root_ret = ret; |
|
#endif |
|
|
|
return NULL; |
|
} |
|
|
#pragma GCC visibility pop |
#pragma GCC visibility pop |
|
|
|
/* |
|
* sched_taskExit() - Exit routine for scheduler task, explicit required for thread tasks |
|
* |
|
* @task = current task |
|
* @retcode = return code |
|
* return: return code |
|
*/ |
|
inline void * |
|
sched_taskExit(sched_task_t *task, intptr_t retcode) |
|
{ |
|
if (!task || !TASK_ROOT(task)) |
|
return (void*) -1; |
|
|
|
if (TASK_ROOT(task)->root_hooks.hook_exec.exit) |
|
TASK_ROOT(task)->root_hooks.hook_exec.exit(task, (void*) retcode); |
|
|
|
TASK_ROOT(task)->root_ret = (void*) retcode; |
|
|
|
#ifdef HAVE_LIBPTHREAD |
|
if (TASK_TYPE(task) == taskTHREAD) { |
|
if (TASK_FLAG(task) == PTHREAD_CREATE_JOINABLE) /* joinable thread */ |
|
schedTask(TASK_ROOT(task), _sched_threadJoin, TASK_ARG(task), |
|
TASK_VAL(task), TASK_DATA(task), TASK_DATLEN(task)); |
|
sched_unuseTask(task); |
|
pthread_exit((void*) retcode); |
|
} |
|
#endif |
|
|
|
return (void*) retcode; |
|
} |
|
|
|
|
/* |
/* |
* schedRead() - Add READ I/O task to scheduler queue |
* schedRead() - Add READ I/O task to scheduler queue |
|
* |
* @root = root task |
* @root = root task |
* @func = task execution function |
* @func = task execution function |
* @arg = 1st func argument |
* @arg = 1st func argument |
* @fd = fd handle |
* @fd = fd handle |
|
* @opt_data = Optional data |
|
* @opt_dlen = Optional data length |
* return: NULL error or !=NULL new queued task |
* return: NULL error or !=NULL new queued task |
*/ |
*/ |
sched_task_t * |
sched_task_t * |
schedRead(sched_root_task_t * __restrict root, sched_task_func_t func, void *arg, int fd) | schedRead(sched_root_task_t * __restrict root, sched_task_func_t func, void *arg, int fd, |
| void *opt_data, size_t opt_dlen) |
{ |
{ |
sched_task_t *task; |
sched_task_t *task; |
void *ptr; |
void *ptr; |
Line 115 schedRead(sched_root_task_t * __restrict root, sched_t
|
Line 180 schedRead(sched_root_task_t * __restrict root, sched_t
|
return NULL; |
return NULL; |
|
|
/* get new task */ |
/* get new task */ |
if (!(task = _sched_useTask(root))) | if (!(task = sched_useTask(root))) |
return NULL; |
return NULL; |
|
|
memset(task, 0, sizeof(sched_task_t)); |
|
task->task_id = 0; |
|
task->task_lock = 0; |
|
task->task_func = func; |
task->task_func = func; |
TASK_TYPE(task) = taskREAD; |
TASK_TYPE(task) = taskREAD; |
TASK_ROOT(task) = root; |
TASK_ROOT(task) = root; |
Line 128 schedRead(sched_root_task_t * __restrict root, sched_t
|
Line 190 schedRead(sched_root_task_t * __restrict root, sched_t
|
TASK_ARG(task) = arg; |
TASK_ARG(task) = arg; |
TASK_FD(task) = fd; |
TASK_FD(task) = fd; |
|
|
|
TASK_DATA(task) = opt_data; |
|
TASK_DATLEN(task) = opt_dlen; |
|
|
if (root->root_hooks.hook_add.read) |
if (root->root_hooks.hook_add.read) |
ptr = root->root_hooks.hook_add.read(task, NULL); |
ptr = root->root_hooks.hook_add.read(task, NULL); |
else |
else |
Line 137 schedRead(sched_root_task_t * __restrict root, sched_t
|
Line 202 schedRead(sched_root_task_t * __restrict root, sched_t
|
#ifdef HAVE_LIBPTHREAD |
#ifdef HAVE_LIBPTHREAD |
pthread_mutex_lock(&root->root_mtx[taskREAD]); |
pthread_mutex_lock(&root->root_mtx[taskREAD]); |
#endif |
#endif |
TAILQ_INSERT_TAIL(&root->root_read, task, task_node); | TAILQ_INSERT_TAIL(&root->root_read, TASK_ID(task), task_node); |
#ifdef HAVE_LIBPTHREAD |
#ifdef HAVE_LIBPTHREAD |
pthread_mutex_unlock(&root->root_mtx[taskREAD]); |
pthread_mutex_unlock(&root->root_mtx[taskREAD]); |
#endif |
#endif |
} else |
} else |
task = _sched_unuseTask(task); | task = sched_unuseTask(task); |
|
|
return task; |
return task; |
} |
} |
|
|
/* |
/* |
* schedWrite() - Add WRITE I/O task to scheduler queue |
* schedWrite() - Add WRITE I/O task to scheduler queue |
|
* |
* @root = root task |
* @root = root task |
* @func = task execution function |
* @func = task execution function |
* @arg = 1st func argument |
* @arg = 1st func argument |
* @fd = fd handle |
* @fd = fd handle |
|
* @opt_data = Optional data |
|
* @opt_dlen = Optional data length |
* return: NULL error or !=NULL new queued task |
* return: NULL error or !=NULL new queued task |
*/ |
*/ |
sched_task_t * |
sched_task_t * |
schedWrite(sched_root_task_t * __restrict root, sched_task_func_t func, void *arg, int fd) | schedWrite(sched_root_task_t * __restrict root, sched_task_func_t func, void *arg, int fd, |
| void *opt_data, size_t opt_dlen) |
{ |
{ |
sched_task_t *task; |
sched_task_t *task; |
void *ptr; |
void *ptr; |
Line 165 schedWrite(sched_root_task_t * __restrict root, sched_
|
Line 234 schedWrite(sched_root_task_t * __restrict root, sched_
|
return NULL; |
return NULL; |
|
|
/* get new task */ |
/* get new task */ |
if (!(task = _sched_useTask(root))) | if (!(task = sched_useTask(root))) |
return NULL; |
return NULL; |
|
|
memset(task, 0, sizeof(sched_task_t)); |
|
task->task_id = 0; |
|
task->task_lock = 0; |
|
task->task_func = func; |
task->task_func = func; |
TASK_TYPE(task) = taskWRITE; |
TASK_TYPE(task) = taskWRITE; |
TASK_ROOT(task) = root; |
TASK_ROOT(task) = root; |
Line 178 schedWrite(sched_root_task_t * __restrict root, sched_
|
Line 244 schedWrite(sched_root_task_t * __restrict root, sched_
|
TASK_ARG(task) = arg; |
TASK_ARG(task) = arg; |
TASK_FD(task) = fd; |
TASK_FD(task) = fd; |
|
|
|
TASK_DATA(task) = opt_data; |
|
TASK_DATLEN(task) = opt_dlen; |
|
|
if (root->root_hooks.hook_add.write) |
if (root->root_hooks.hook_add.write) |
ptr = root->root_hooks.hook_add.write(task, NULL); |
ptr = root->root_hooks.hook_add.write(task, NULL); |
else |
else |
Line 187 schedWrite(sched_root_task_t * __restrict root, sched_
|
Line 256 schedWrite(sched_root_task_t * __restrict root, sched_
|
#ifdef HAVE_LIBPTHREAD |
#ifdef HAVE_LIBPTHREAD |
pthread_mutex_lock(&root->root_mtx[taskWRITE]); |
pthread_mutex_lock(&root->root_mtx[taskWRITE]); |
#endif |
#endif |
TAILQ_INSERT_TAIL(&root->root_write, task, task_node); | TAILQ_INSERT_TAIL(&root->root_write, TASK_ID(task), task_node); |
#ifdef HAVE_LIBPTHREAD |
#ifdef HAVE_LIBPTHREAD |
pthread_mutex_unlock(&root->root_mtx[taskWRITE]); |
pthread_mutex_unlock(&root->root_mtx[taskWRITE]); |
#endif |
#endif |
} else |
} else |
task = _sched_unuseTask(task); | task = sched_unuseTask(task); |
|
|
return task; |
return task; |
} |
} |
|
|
/* |
/* |
|
* schedNode() - Add NODE task to scheduler queue |
|
* |
|
* @root = root task |
|
* @func = task execution function |
|
* @arg = 1st func argument |
|
* @fd = fd handle |
|
* @opt_data = Optional data |
|
* @opt_dlen = Optional data length |
|
* return: NULL error or !=NULL new queued task |
|
*/ |
|
sched_task_t * |
|
schedNode(sched_root_task_t * __restrict root, sched_task_func_t func, void *arg, int fd, |
|
void *opt_data, size_t opt_dlen) |
|
{ |
|
sched_task_t *task; |
|
void *ptr; |
|
|
|
if (!root || !func) |
|
return NULL; |
|
|
|
/* get new task */ |
|
if (!(task = sched_useTask(root))) |
|
return NULL; |
|
|
|
task->task_func = func; |
|
TASK_TYPE(task) = taskNODE; |
|
TASK_ROOT(task) = root; |
|
|
|
TASK_ARG(task) = arg; |
|
TASK_FD(task) = fd; |
|
|
|
TASK_DATA(task) = opt_data; |
|
TASK_DATLEN(task) = opt_dlen; |
|
|
|
if (root->root_hooks.hook_add.node) |
|
ptr = root->root_hooks.hook_add.node(task, NULL); |
|
else |
|
ptr = NULL; |
|
|
|
if (!ptr) { |
|
#ifdef HAVE_LIBPTHREAD |
|
pthread_mutex_lock(&root->root_mtx[taskNODE]); |
|
#endif |
|
TAILQ_INSERT_TAIL(&root->root_node, TASK_ID(task), task_node); |
|
#ifdef HAVE_LIBPTHREAD |
|
pthread_mutex_unlock(&root->root_mtx[taskNODE]); |
|
#endif |
|
} else |
|
task = sched_unuseTask(task); |
|
|
|
return task; |
|
} |
|
|
|
/* |
|
* schedProc() - Add PROC task to scheduler queue |
|
* |
|
* @root = root task |
|
* @func = task execution function |
|
* @arg = 1st func argument |
|
* @pid = PID |
|
* @opt_data = Optional data |
|
* @opt_dlen = Optional data length |
|
* return: NULL error or !=NULL new queued task |
|
*/ |
|
sched_task_t * |
|
schedProc(sched_root_task_t * __restrict root, sched_task_func_t func, void *arg, u_long pid, |
|
void *opt_data, size_t opt_dlen) |
|
{ |
|
sched_task_t *task; |
|
void *ptr; |
|
|
|
if (!root || !func) |
|
return NULL; |
|
|
|
/* get new task */ |
|
if (!(task = sched_useTask(root))) |
|
return NULL; |
|
|
|
task->task_func = func; |
|
TASK_TYPE(task) = taskPROC; |
|
TASK_ROOT(task) = root; |
|
|
|
TASK_ARG(task) = arg; |
|
TASK_VAL(task) = pid; |
|
|
|
TASK_DATA(task) = opt_data; |
|
TASK_DATLEN(task) = opt_dlen; |
|
|
|
if (root->root_hooks.hook_add.proc) |
|
ptr = root->root_hooks.hook_add.proc(task, NULL); |
|
else |
|
ptr = NULL; |
|
|
|
if (!ptr) { |
|
#ifdef HAVE_LIBPTHREAD |
|
pthread_mutex_lock(&root->root_mtx[taskPROC]); |
|
#endif |
|
TAILQ_INSERT_TAIL(&root->root_proc, TASK_ID(task), task_node); |
|
#ifdef HAVE_LIBPTHREAD |
|
pthread_mutex_unlock(&root->root_mtx[taskPROC]); |
|
#endif |
|
} else |
|
task = sched_unuseTask(task); |
|
|
|
return task; |
|
} |
|
|
|
/* |
|
* schedUser() - Add trigger USER task to scheduler queue |
|
* |
|
* @root = root task |
|
* @func = task execution function |
|
* @arg = 1st func argument |
|
* @id = Trigger ID |
|
* @opt_data = Optional data |
|
* @opt_dlen = Optional user's trigger flags |
|
* return: NULL error or !=NULL new queued task |
|
*/ |
|
sched_task_t * |
|
schedUser(sched_root_task_t * __restrict root, sched_task_func_t func, void *arg, u_long id, |
|
void *opt_data, size_t opt_dlen) |
|
{ |
|
#ifndef EVFILT_USER |
|
sched_SetErr(ENOTSUP, "Not supported kevent() filter"); |
|
return NULL; |
|
#else |
|
sched_task_t *task; |
|
void *ptr; |
|
|
|
if (!root || !func) |
|
return NULL; |
|
|
|
/* get new task */ |
|
if (!(task = sched_useTask(root))) |
|
return NULL; |
|
|
|
task->task_func = func; |
|
TASK_TYPE(task) = taskUSER; |
|
TASK_ROOT(task) = root; |
|
|
|
TASK_ARG(task) = arg; |
|
TASK_VAL(task) = id; |
|
|
|
TASK_DATA(task) = opt_data; |
|
TASK_DATLEN(task) = opt_dlen; |
|
|
|
if (root->root_hooks.hook_add.user) |
|
ptr = root->root_hooks.hook_add.user(task, NULL); |
|
else |
|
ptr = NULL; |
|
|
|
if (!ptr) { |
|
#ifdef HAVE_LIBPTHREAD |
|
pthread_mutex_lock(&root->root_mtx[taskUSER]); |
|
#endif |
|
TAILQ_INSERT_TAIL(&root->root_user, TASK_ID(task), task_node); |
|
#ifdef HAVE_LIBPTHREAD |
|
pthread_mutex_unlock(&root->root_mtx[taskUSER]); |
|
#endif |
|
} else |
|
task = sched_unuseTask(task); |
|
|
|
return task; |
|
#endif |
|
} |
|
|
|
/* |
|
* schedSignal() - Add SIGNAL task to scheduler queue |
|
* |
|
* @root = root task |
|
* @func = task execution function |
|
* @arg = 1st func argument |
|
* @sig = Signal |
|
* @opt_data = Optional data |
|
* @opt_dlen = Optional data length |
|
* return: NULL error or !=NULL new queued task |
|
*/ |
|
sched_task_t * |
|
schedSignal(sched_root_task_t * __restrict root, sched_task_func_t func, void *arg, u_long sig, |
|
void *opt_data, size_t opt_dlen) |
|
{ |
|
sched_task_t *task; |
|
void *ptr; |
|
|
|
if (!root || !func) |
|
return NULL; |
|
|
|
/* get new task */ |
|
if (!(task = sched_useTask(root))) |
|
return NULL; |
|
|
|
task->task_func = func; |
|
TASK_TYPE(task) = taskSIGNAL; |
|
TASK_ROOT(task) = root; |
|
|
|
TASK_ARG(task) = arg; |
|
TASK_VAL(task) = sig; |
|
|
|
TASK_DATA(task) = opt_data; |
|
TASK_DATLEN(task) = opt_dlen; |
|
|
|
if (root->root_hooks.hook_add.signal) |
|
ptr = root->root_hooks.hook_add.signal(task, NULL); |
|
else |
|
ptr = NULL; |
|
|
|
if (!ptr) { |
|
#ifdef HAVE_LIBPTHREAD |
|
pthread_mutex_lock(&root->root_mtx[taskSIGNAL]); |
|
#endif |
|
TAILQ_INSERT_TAIL(&root->root_signal, TASK_ID(task), task_node); |
|
#ifdef HAVE_LIBPTHREAD |
|
pthread_mutex_unlock(&root->root_mtx[taskSIGNAL]); |
|
#endif |
|
} else |
|
task = sched_unuseTask(task); |
|
|
|
return task; |
|
} |
|
|
|
/* |
|
* schedAlarm() - Add ALARM task to scheduler queue |
|
* |
|
* @root = root task |
|
* @func = task execution function |
|
* @arg = 1st func argument |
|
* @ts = timeout argument structure, minimum alarm timer resolution is 1msec! |
|
* @opt_data = Optional data |
|
* @opt_dlen = Optional data length |
|
* return: NULL error or !=NULL new queued task |
|
*/ |
|
sched_task_t * |
|
schedAlarm(sched_root_task_t * __restrict root, sched_task_func_t func, void *arg, struct timespec ts, |
|
void *opt_data, size_t opt_dlen) |
|
{ |
|
sched_task_t *task; |
|
void *ptr; |
|
|
|
if (!root || !func) |
|
return NULL; |
|
|
|
/* get new task */ |
|
if (!(task = sched_useTask(root))) |
|
return NULL; |
|
|
|
task->task_func = func; |
|
TASK_TYPE(task) = taskALARM; |
|
TASK_ROOT(task) = root; |
|
|
|
TASK_ARG(task) = arg; |
|
TASK_TS(task) = ts; |
|
|
|
TASK_DATA(task) = opt_data; |
|
TASK_DATLEN(task) = opt_dlen; |
|
|
|
if (root->root_hooks.hook_add.alarm) |
|
ptr = root->root_hooks.hook_add.alarm(task, NULL); |
|
else |
|
ptr = NULL; |
|
|
|
if (!ptr) { |
|
#ifdef HAVE_LIBPTHREAD |
|
pthread_mutex_lock(&root->root_mtx[taskALARM]); |
|
#endif |
|
TAILQ_INSERT_TAIL(&root->root_alarm, TASK_ID(task), task_node); |
|
#ifdef HAVE_LIBPTHREAD |
|
pthread_mutex_unlock(&root->root_mtx[taskALARM]); |
|
#endif |
|
} else |
|
task = sched_unuseTask(task); |
|
|
|
return task; |
|
} |
|
|
|
#ifdef AIO_SUPPORT |
|
/* |
|
* schedAIO() - Add AIO task to scheduler queue |
|
* |
|
* @root = root task |
|
* @func = task execution function |
|
* @arg = 1st func argument |
|
* @acb = AIO cb structure address |
|
* @opt_data = Optional data |
|
* @opt_dlen = Optional data length |
|
* return: NULL error or !=NULL new queued task |
|
*/ |
|
sched_task_t * |
|
schedAIO(sched_root_task_t * __restrict root, sched_task_func_t func, void *arg, |
|
struct aiocb * __restrict acb, void *opt_data, size_t opt_dlen) |
|
{ |
|
sched_task_t *task; |
|
void *ptr; |
|
|
|
if (!root || !func || !acb || !opt_dlen) |
|
return NULL; |
|
|
|
/* get new task */ |
|
if (!(task = sched_useTask(root))) |
|
return NULL; |
|
|
|
task->task_func = func; |
|
TASK_TYPE(task) = taskAIO; |
|
TASK_ROOT(task) = root; |
|
|
|
TASK_ARG(task) = arg; |
|
TASK_VAL(task) = (u_long) acb; |
|
|
|
TASK_DATA(task) = opt_data; |
|
TASK_DATLEN(task) = opt_dlen; |
|
|
|
if (root->root_hooks.hook_add.aio) |
|
ptr = root->root_hooks.hook_add.aio(task, NULL); |
|
else |
|
ptr = NULL; |
|
|
|
if (!ptr) { |
|
#ifdef HAVE_LIBPTHREAD |
|
pthread_mutex_lock(&root->root_mtx[taskAIO]); |
|
#endif |
|
TAILQ_INSERT_TAIL(&root->root_aio, TASK_ID(task), task_node); |
|
#ifdef HAVE_LIBPTHREAD |
|
pthread_mutex_unlock(&root->root_mtx[taskAIO]); |
|
#endif |
|
} else |
|
task = sched_unuseTask(task); |
|
|
|
return task; |
|
} |
|
|
|
/* |
|
* schedAIORead() - Add AIO read task to scheduler queue |
|
* |
|
* @root = root task |
|
* @func = task execution function |
|
* @arg = 1st func argument |
|
* @fd = file descriptor |
|
* @buffer = Buffer |
|
* @buflen = Buffer length |
|
* @offset = Offset from start of file, if =-1 from current position |
|
* return: NULL error or !=NULL new queued task |
|
*/ |
|
inline sched_task_t * |
|
schedAIORead(sched_root_task_t * __restrict root, sched_task_func_t func, void *arg, int fd, |
|
void *buffer, size_t buflen, off_t offset) |
|
{ |
|
struct aiocb *acb; |
|
off_t off; |
|
|
|
if (!root || !func || !buffer || !buflen) |
|
return NULL; |
|
|
|
if (offset == (off_t) -1) { |
|
off = lseek(fd, 0, SEEK_CUR); |
|
if (off == -1) { |
|
LOGERR; |
|
return NULL; |
|
} |
|
} else |
|
off = offset; |
|
|
|
if (!(acb = malloc(sizeof(struct aiocb)))) { |
|
LOGERR; |
|
return NULL; |
|
} else |
|
memset(acb, 0, sizeof(struct aiocb)); |
|
|
|
acb->aio_fildes = fd; |
|
acb->aio_nbytes = buflen; |
|
acb->aio_buf = buffer; |
|
acb->aio_offset = off; |
|
acb->aio_sigevent.sigev_notify = SIGEV_KEVENT; |
|
acb->aio_sigevent.sigev_notify_kqueue = root->root_kq; |
|
acb->aio_sigevent.sigev_value.sival_ptr = acb; |
|
|
|
if (aio_read(acb)) { |
|
LOGERR; |
|
free(acb); |
|
return NULL; |
|
} |
|
|
|
return schedAIO(root, func, arg, acb, buffer, buflen); |
|
} |
|
|
|
/* |
|
* schedAIOWrite() - Add AIO write task to scheduler queue |
|
* |
|
* @root = root task |
|
* @func = task execution function |
|
* @arg = 1st func argument |
|
* @fd = file descriptor |
|
* @buffer = Buffer |
|
* @buflen = Buffer length |
|
* @offset = Offset from start of file, if =-1 from current position |
|
* return: NULL error or !=NULL new queued task |
|
*/ |
|
inline sched_task_t * |
|
schedAIOWrite(sched_root_task_t * __restrict root, sched_task_func_t func, void *arg, int fd, |
|
void *buffer, size_t buflen, off_t offset) |
|
{ |
|
struct aiocb *acb; |
|
off_t off; |
|
|
|
if (!root || !func || !buffer || !buflen) |
|
return NULL; |
|
|
|
if (offset == (off_t) -1) { |
|
off = lseek(fd, 0, SEEK_CUR); |
|
if (off == -1) { |
|
LOGERR; |
|
return NULL; |
|
} |
|
} else |
|
off = offset; |
|
|
|
if (!(acb = malloc(sizeof(struct aiocb)))) { |
|
LOGERR; |
|
return NULL; |
|
} else |
|
memset(acb, 0, sizeof(struct aiocb)); |
|
|
|
acb->aio_fildes = fd; |
|
acb->aio_nbytes = buflen; |
|
acb->aio_buf = buffer; |
|
acb->aio_offset = off; |
|
acb->aio_sigevent.sigev_notify = SIGEV_KEVENT; |
|
acb->aio_sigevent.sigev_notify_kqueue = root->root_kq; |
|
acb->aio_sigevent.sigev_value.sival_ptr = acb; |
|
|
|
if (aio_write(acb)) { |
|
LOGERR; |
|
free(acb); |
|
return NULL; |
|
} |
|
|
|
return schedAIO(root, func, arg, acb, buffer, buflen); |
|
} |
|
|
|
#ifdef EVFILT_LIO |
|
/* |
|
* schedLIO() - Add AIO bulk tasks to scheduler queue |
|
* |
|
* @root = root task |
|
* @func = task execution function |
|
* @arg = 1st func argument |
|
* @acbs = AIO cb structure addresses |
|
* @opt_data = Optional data |
|
* @opt_dlen = Optional data length |
|
* return: NULL error or !=NULL new queued task |
|
*/ |
|
sched_task_t * |
|
schedLIO(sched_root_task_t * __restrict root, sched_task_func_t func, void *arg, |
|
struct aiocb ** __restrict acbs, void *opt_data, size_t opt_dlen) |
|
{ |
|
sched_task_t *task; |
|
void *ptr; |
|
|
|
if (!root || !func || !acbs || !opt_dlen) |
|
return NULL; |
|
|
|
/* get new task */ |
|
if (!(task = sched_useTask(root))) |
|
return NULL; |
|
|
|
task->task_func = func; |
|
TASK_TYPE(task) = taskLIO; |
|
TASK_ROOT(task) = root; |
|
|
|
TASK_ARG(task) = arg; |
|
TASK_VAL(task) = (u_long) acbs; |
|
|
|
TASK_DATA(task) = opt_data; |
|
TASK_DATLEN(task) = opt_dlen; |
|
|
|
if (root->root_hooks.hook_add.lio) |
|
ptr = root->root_hooks.hook_add.lio(task, NULL); |
|
else |
|
ptr = NULL; |
|
|
|
if (!ptr) { |
|
#ifdef HAVE_LIBPTHREAD |
|
pthread_mutex_lock(&root->root_mtx[taskLIO]); |
|
#endif |
|
TAILQ_INSERT_TAIL(&root->root_lio, TASK_ID(task), task_node); |
|
#ifdef HAVE_LIBPTHREAD |
|
pthread_mutex_unlock(&root->root_mtx[taskLIO]); |
|
#endif |
|
} else |
|
task = sched_unuseTask(task); |
|
|
|
return task; |
|
} |
|
|
|
/* |
|
* schedLIORead() - Add list of AIO read tasks to scheduler queue |
|
* |
|
* @root = root task |
|
* @func = task execution function |
|
* @arg = 1st func argument |
|
* @fd = file descriptor |
|
* @bufs = Buffer's list |
|
* @nbufs = Number of Buffers |
|
* @offset = Offset from start of file, if =-1 from current position |
|
* return: NULL error or !=NULL new queued task |
|
*/ |
|
sched_task_t * |
|
schedLIORead(sched_root_task_t * __restrict root, sched_task_func_t func, void *arg, int fd, |
|
struct iovec *bufs, size_t nbufs, off_t offset) |
|
{ |
|
struct sigevent sig; |
|
struct aiocb **acb; |
|
off_t off; |
|
register int i; |
|
|
|
if (!root || !func || !bufs || !nbufs) |
|
return NULL; |
|
|
|
if (offset == (off_t) -1) { |
|
off = lseek(fd, 0, SEEK_CUR); |
|
if (off == -1) { |
|
LOGERR; |
|
return NULL; |
|
} |
|
} else |
|
off = offset; |
|
|
|
if (!(acb = calloc(sizeof(void*), nbufs))) { |
|
LOGERR; |
|
return NULL; |
|
} else |
|
memset(acb, 0, sizeof(void*) * nbufs); |
|
for (i = 0; i < nbufs; off += bufs[i++].iov_len) { |
|
acb[i] = malloc(sizeof(struct aiocb)); |
|
if (!acb[i]) { |
|
LOGERR; |
|
for (i = 0; i < nbufs; i++) |
|
if (acb[i]) |
|
free(acb[i]); |
|
free(acb); |
|
return NULL; |
|
} else |
|
memset(acb[i], 0, sizeof(struct aiocb)); |
|
acb[i]->aio_fildes = fd; |
|
acb[i]->aio_nbytes = bufs[i].iov_len; |
|
acb[i]->aio_buf = bufs[i].iov_base; |
|
acb[i]->aio_offset = off; |
|
acb[i]->aio_lio_opcode = LIO_READ; |
|
} |
|
memset(&sig, 0, sizeof sig); |
|
sig.sigev_notify = SIGEV_KEVENT; |
|
sig.sigev_notify_kqueue = root->root_kq; |
|
sig.sigev_value.sival_ptr = acb; |
|
|
|
if (lio_listio(LIO_NOWAIT, acb, nbufs, &sig)) { |
|
LOGERR; |
|
for (i = 0; i < nbufs; i++) |
|
if (acb[i]) |
|
free(acb[i]); |
|
free(acb); |
|
return NULL; |
|
} |
|
|
|
return schedLIO(root, func, arg, (void*) acb, bufs, nbufs); |
|
} |
|
|
|
/* |
|
* schedLIOWrite() - Add list of AIO write tasks to scheduler queue |
|
* |
|
* @root = root task |
|
* @func = task execution function |
|
* @arg = 1st func argument |
|
* @fd = file descriptor |
|
* @bufs = Buffer's list |
|
* @nbufs = Number of Buffers |
|
* @offset = Offset from start of file, if =-1 from current position |
|
* return: NULL error or !=NULL new queued task |
|
*/ |
|
inline sched_task_t * |
|
schedLIOWrite(sched_root_task_t * __restrict root, sched_task_func_t func, void *arg, int fd, |
|
struct iovec *bufs, size_t nbufs, off_t offset) |
|
{ |
|
struct sigevent sig; |
|
struct aiocb **acb; |
|
off_t off; |
|
register int i; |
|
|
|
if (!root || !func || !bufs || !nbufs) |
|
return NULL; |
|
|
|
if (offset == (off_t) -1) { |
|
off = lseek(fd, 0, SEEK_CUR); |
|
if (off == -1) { |
|
LOGERR; |
|
return NULL; |
|
} |
|
} else |
|
off = offset; |
|
|
|
if (!(acb = calloc(sizeof(void*), nbufs))) { |
|
LOGERR; |
|
return NULL; |
|
} else |
|
memset(acb, 0, sizeof(void*) * nbufs); |
|
for (i = 0; i < nbufs; off += bufs[i++].iov_len) { |
|
acb[i] = malloc(sizeof(struct aiocb)); |
|
if (!acb[i]) { |
|
LOGERR; |
|
for (i = 0; i < nbufs; i++) |
|
if (acb[i]) |
|
free(acb[i]); |
|
free(acb); |
|
return NULL; |
|
} else |
|
memset(acb[i], 0, sizeof(struct aiocb)); |
|
acb[i]->aio_fildes = fd; |
|
acb[i]->aio_nbytes = bufs[i].iov_len; |
|
acb[i]->aio_buf = bufs[i].iov_base; |
|
acb[i]->aio_offset = off; |
|
acb[i]->aio_lio_opcode = LIO_WRITE; |
|
} |
|
memset(&sig, 0, sizeof sig); |
|
sig.sigev_notify = SIGEV_KEVENT; |
|
sig.sigev_notify_kqueue = root->root_kq; |
|
sig.sigev_value.sival_ptr = acb; |
|
|
|
if (lio_listio(LIO_NOWAIT, acb, nbufs, &sig)) { |
|
LOGERR; |
|
for (i = 0; i < nbufs; i++) |
|
if (acb[i]) |
|
free(acb[i]); |
|
free(acb); |
|
return NULL; |
|
} |
|
|
|
return schedLIO(root, func, arg, (void*) acb, bufs, nbufs); |
|
} |
|
#endif /* EVFILT_LIO */ |
|
#endif /* AIO_SUPPORT */ |
|
|
|
/* |
* schedTimer() - Add TIMER task to scheduler queue |
* schedTimer() - Add TIMER task to scheduler queue |
|
* |
* @root = root task |
* @root = root task |
* @func = task execution function |
* @func = task execution function |
* @arg = 1st func argument |
* @arg = 1st func argument |
* @tv = timeout argument structure | * @ts = timeout argument structure |
| * @opt_data = Optional data |
| * @opt_dlen = Optional data length |
* return: NULL error or !=NULL new queued task |
* return: NULL error or !=NULL new queued task |
*/ |
*/ |
sched_task_t * |
sched_task_t * |
schedTimer(sched_root_task_t * __restrict root, sched_task_func_t func, void *arg, struct timeval tv) | schedTimer(sched_root_task_t * __restrict root, sched_task_func_t func, void *arg, struct timespec ts, |
| void *opt_data, size_t opt_dlen) |
{ |
{ |
sched_task_t *task, *t = NULL; | sched_task_t *task, *tmp, *t = NULL; |
void *ptr; |
void *ptr; |
struct timeval now; | struct timespec now; |
struct timespec nw; | |
|
|
if (!root || !func) |
if (!root || !func) |
return NULL; |
return NULL; |
|
|
/* get new task */ |
/* get new task */ |
if (!(task = _sched_useTask(root))) | if (!(task = sched_useTask(root))) |
return NULL; |
return NULL; |
|
|
memset(task, 0, sizeof(sched_task_t)); |
|
task->task_id = 0; |
|
task->task_lock = 0; |
|
task->task_func = func; |
task->task_func = func; |
TASK_TYPE(task) = taskTIMER; |
TASK_TYPE(task) = taskTIMER; |
TASK_ROOT(task) = root; |
TASK_ROOT(task) = root; |
|
|
TASK_ARG(task) = arg; |
TASK_ARG(task) = arg; |
|
|
|
TASK_DATA(task) = opt_data; |
|
TASK_DATLEN(task) = opt_dlen; |
|
|
/* calculate timeval structure */ |
/* calculate timeval structure */ |
clock_gettime(CLOCK_MONOTONIC, &nw); | clock_gettime(CLOCK_MONOTONIC, &now); |
now.tv_sec = nw.tv_sec + tv.tv_sec; | now.tv_sec += ts.tv_sec; |
now.tv_usec = nw.tv_nsec / 1000 + tv.tv_usec; | now.tv_nsec += ts.tv_nsec; |
if (now.tv_usec >= 1000000) { | if (now.tv_nsec >= 1000000000L) { |
now.tv_sec++; |
now.tv_sec++; |
now.tv_usec -= 1000000; | now.tv_nsec -= 1000000000L; |
} else if (now.tv_usec < 0) { | } else if (now.tv_nsec < 0) { |
now.tv_sec--; |
now.tv_sec--; |
now.tv_usec += 1000000; | now.tv_nsec += 1000000000L; |
} |
} |
TASK_TV(task) = now; | TASK_TS(task) = now; |
|
|
if (root->root_hooks.hook_add.timer) |
if (root->root_hooks.hook_add.timer) |
ptr = root->root_hooks.hook_add.timer(task, NULL); |
ptr = root->root_hooks.hook_add.timer(task, NULL); |
Line 252 schedTimer(sched_root_task_t * __restrict root, sched_
|
Line 963 schedTimer(sched_root_task_t * __restrict root, sched_
|
pthread_mutex_lock(&root->root_mtx[taskTIMER]); |
pthread_mutex_lock(&root->root_mtx[taskTIMER]); |
#endif |
#endif |
#ifdef TIMER_WITHOUT_SORT |
#ifdef TIMER_WITHOUT_SORT |
TAILQ_INSERT_TAIL(&root->root_timer, task, task_node); | TAILQ_INSERT_TAIL(&root->root_timer, TASK_ID(task), task_node); |
#else |
#else |
TAILQ_FOREACH(t, &root->root_timer, task_node) | TAILQ_FOREACH_SAFE(t, &root->root_timer, task_node, tmp) |
if (timercmp(&TASK_TV(task), &TASK_TV(t), -) < 1) | if (sched_timespeccmp(&TASK_TS(task), &TASK_TS(t), -) < 1) |
break; |
break; |
if (!t) |
if (!t) |
TAILQ_INSERT_TAIL(&root->root_timer, task, task_node); | TAILQ_INSERT_TAIL(&root->root_timer, TASK_ID(task), task_node); |
else |
else |
TAILQ_INSERT_BEFORE(t, task, task_node); | TAILQ_INSERT_BEFORE(t, TASK_ID(task), task_node); |
#endif |
#endif |
#ifdef HAVE_LIBPTHREAD |
#ifdef HAVE_LIBPTHREAD |
pthread_mutex_unlock(&root->root_mtx[taskTIMER]); |
pthread_mutex_unlock(&root->root_mtx[taskTIMER]); |
#endif |
#endif |
} else |
} else |
task = _sched_unuseTask(task); | task = sched_unuseTask(task); |
|
|
return task; |
return task; |
} |
} |
|
|
/* |
/* |
* schedEvent() - Add EVENT task to scheduler queue |
* schedEvent() - Add EVENT task to scheduler queue |
|
* |
* @root = root task |
* @root = root task |
* @func = task execution function |
* @func = task execution function |
* @arg = 1st func argument |
* @arg = 1st func argument |
* @val = additional func argument |
* @val = additional func argument |
|
* @opt_data = Optional data |
|
* @opt_dlen = Optional data length |
* return: NULL error or !=NULL new queued task |
* return: NULL error or !=NULL new queued task |
*/ |
*/ |
sched_task_t * |
sched_task_t * |
schedEvent(sched_root_task_t * __restrict root, sched_task_func_t func, void *arg, u_long val) | schedEvent(sched_root_task_t * __restrict root, sched_task_func_t func, void *arg, u_long val, |
| void *opt_data, size_t opt_dlen) |
{ |
{ |
sched_task_t *task; |
sched_task_t *task; |
void *ptr; |
void *ptr; |
Line 289 schedEvent(sched_root_task_t * __restrict root, sched_
|
Line 1004 schedEvent(sched_root_task_t * __restrict root, sched_
|
return NULL; |
return NULL; |
|
|
/* get new task */ |
/* get new task */ |
if (!(task = _sched_useTask(root))) | if (!(task = sched_useTask(root))) |
return NULL; |
return NULL; |
|
|
memset(task, 0, sizeof(sched_task_t)); |
|
task->task_id = 0; |
|
task->task_lock = 0; |
|
task->task_func = func; |
task->task_func = func; |
TASK_TYPE(task) = taskEVENT; |
TASK_TYPE(task) = taskEVENT; |
TASK_ROOT(task) = root; |
TASK_ROOT(task) = root; |
Line 302 schedEvent(sched_root_task_t * __restrict root, sched_
|
Line 1014 schedEvent(sched_root_task_t * __restrict root, sched_
|
TASK_ARG(task) = arg; |
TASK_ARG(task) = arg; |
TASK_VAL(task) = val; |
TASK_VAL(task) = val; |
|
|
|
TASK_DATA(task) = opt_data; |
|
TASK_DATLEN(task) = opt_dlen; |
|
|
if (root->root_hooks.hook_add.event) |
if (root->root_hooks.hook_add.event) |
ptr = root->root_hooks.hook_add.event(task, NULL); |
ptr = root->root_hooks.hook_add.event(task, NULL); |
else |
else |
Line 311 schedEvent(sched_root_task_t * __restrict root, sched_
|
Line 1026 schedEvent(sched_root_task_t * __restrict root, sched_
|
#ifdef HAVE_LIBPTHREAD |
#ifdef HAVE_LIBPTHREAD |
pthread_mutex_lock(&root->root_mtx[taskEVENT]); |
pthread_mutex_lock(&root->root_mtx[taskEVENT]); |
#endif |
#endif |
TAILQ_INSERT_TAIL(&root->root_event, task, task_node); | TAILQ_INSERT_TAIL(&root->root_event, TASK_ID(task), task_node); |
#ifdef HAVE_LIBPTHREAD |
#ifdef HAVE_LIBPTHREAD |
pthread_mutex_unlock(&root->root_mtx[taskEVENT]); |
pthread_mutex_unlock(&root->root_mtx[taskEVENT]); |
#endif |
#endif |
} else |
} else |
task = _sched_unuseTask(task); | task = sched_unuseTask(task); |
|
|
return task; |
return task; |
} |
} |
|
|
|
|
/* |
/* |
* schedEventLo() - Add EVENT_Lo task to scheduler queue | * schedTask() - Add regular task to scheduler queue |
| * |
* @root = root task |
* @root = root task |
* @func = task execution function |
* @func = task execution function |
* @arg = 1st func argument |
* @arg = 1st func argument |
* @val = additional func argument | * @prio = regular task priority, 0 is hi priority for regular tasks |
| * @opt_data = Optional data |
| * @opt_dlen = Optional data length |
* return: NULL error or !=NULL new queued task |
* return: NULL error or !=NULL new queued task |
*/ |
*/ |
sched_task_t * |
sched_task_t * |
schedEventLo(sched_root_task_t * __restrict root, sched_task_func_t func, void *arg, u_long val) | schedTask(sched_root_task_t * __restrict root, sched_task_func_t func, void *arg, u_long prio, |
| void *opt_data, size_t opt_dlen) |
{ |
{ |
|
sched_task_t *task, *tmp, *t = NULL; |
|
void *ptr; |
|
|
|
if (!root || !func) |
|
return NULL; |
|
|
|
/* get new task */ |
|
if (!(task = sched_useTask(root))) |
|
return NULL; |
|
|
|
task->task_func = func; |
|
TASK_TYPE(task) = taskTASK; |
|
TASK_ROOT(task) = root; |
|
|
|
TASK_ARG(task) = arg; |
|
TASK_VAL(task) = prio; |
|
|
|
TASK_DATA(task) = opt_data; |
|
TASK_DATLEN(task) = opt_dlen; |
|
|
|
if (root->root_hooks.hook_add.task) |
|
ptr = root->root_hooks.hook_add.task(task, NULL); |
|
else |
|
ptr = NULL; |
|
|
|
if (!ptr) { |
|
#ifdef HAVE_LIBPTHREAD |
|
pthread_mutex_lock(&root->root_mtx[taskTASK]); |
|
#endif |
|
TAILQ_FOREACH_SAFE(t, &root->root_task, task_node, tmp) |
|
if (TASK_VAL(task) < TASK_VAL(t)) |
|
break; |
|
if (!t) |
|
TAILQ_INSERT_TAIL(&root->root_task, TASK_ID(task), task_node); |
|
else |
|
TAILQ_INSERT_BEFORE(t, TASK_ID(task), task_node); |
|
#ifdef HAVE_LIBPTHREAD |
|
pthread_mutex_unlock(&root->root_mtx[taskTASK]); |
|
#endif |
|
} else |
|
task = sched_unuseTask(task); |
|
|
|
return task; |
|
} |
|
|
|
/* |
|
* schedSuspend() - Add Suspended task to scheduler queue |
|
* |
|
* @root = root task |
|
* @func = task execution function |
|
* @arg = 1st func argument |
|
* @id = Trigger ID |
|
* @opt_data = Optional data |
|
* @opt_dlen = Optional data length |
|
* return: NULL error or !=NULL new queued task |
|
*/ |
|
sched_task_t * |
|
schedSuspend(sched_root_task_t * __restrict root, sched_task_func_t func, void *arg, u_long id, |
|
void *opt_data, size_t opt_dlen) |
|
{ |
sched_task_t *task; |
sched_task_t *task; |
void *ptr; |
void *ptr; |
|
|
Line 340 schedEventLo(sched_root_task_t * __restrict root, sche
|
Line 1119 schedEventLo(sched_root_task_t * __restrict root, sche
|
return NULL; |
return NULL; |
|
|
/* get new task */ |
/* get new task */ |
if (!(task = _sched_useTask(root))) | if (!(task = sched_useTask(root))) |
return NULL; |
return NULL; |
|
|
memset(task, 0, sizeof(sched_task_t)); |
|
task->task_id = 0; |
|
task->task_lock = 0; |
|
task->task_func = func; |
task->task_func = func; |
TASK_TYPE(task) = taskEVENT; | TASK_TYPE(task) = taskSUSPEND; |
TASK_ROOT(task) = root; |
TASK_ROOT(task) = root; |
|
|
TASK_ARG(task) = arg; |
TASK_ARG(task) = arg; |
TASK_VAL(task) = val; | TASK_VAL(task) = id; |
|
|
if (root->root_hooks.hook_add.eventlo) | TASK_DATA(task) = opt_data; |
ptr = root->root_hooks.hook_add.eventlo(task, NULL); | TASK_DATLEN(task) = opt_dlen; |
| |
| if (root->root_hooks.hook_add.suspend) |
| ptr = root->root_hooks.hook_add.suspend(task, NULL); |
else |
else |
ptr = NULL; |
ptr = NULL; |
|
|
if (!ptr) { |
if (!ptr) { |
#ifdef HAVE_LIBPTHREAD |
#ifdef HAVE_LIBPTHREAD |
pthread_mutex_lock(&root->root_mtx[taskEVENTLO]); | pthread_mutex_lock(&root->root_mtx[taskSUSPEND]); |
#endif |
#endif |
TAILQ_INSERT_TAIL(&root->root_eventlo, task, task_node); | TAILQ_INSERT_TAIL(&root->root_suspend, TASK_ID(task), task_node); |
#ifdef HAVE_LIBPTHREAD |
#ifdef HAVE_LIBPTHREAD |
pthread_mutex_unlock(&root->root_mtx[taskEVENTLO]); | pthread_mutex_unlock(&root->root_mtx[taskSUSPEND]); |
#endif |
#endif |
} else |
} else |
task = _sched_unuseTask(task); | task = sched_unuseTask(task); |
|
|
return task; |
return task; |
} |
} |
|
|
/* |
/* |
* schedCallOnce() - Call once from scheduler |
* schedCallOnce() - Call once from scheduler |
|
* |
* @root = root task |
* @root = root task |
* @func = task execution function |
* @func = task execution function |
* @arg = 1st func argument |
* @arg = 1st func argument |
* @val = additional func argument |
* @val = additional func argument |
|
* @opt_data = Optional data |
|
* @opt_dlen = Optional data length |
* return: return value from called func |
* return: return value from called func |
*/ |
*/ |
sched_task_t * |
sched_task_t * |
schedCallOnce(sched_root_task_t * __restrict root, sched_task_func_t func, void *arg, u_long val) | schedCallOnce(sched_root_task_t * __restrict root, sched_task_func_t func, void *arg, u_long val, |
| void *opt_data, size_t opt_dlen) |
{ |
{ |
sched_task_t *task; |
sched_task_t *task; |
void *ret; |
void *ret; |
Line 390 schedCallOnce(sched_root_task_t * __restrict root, sch
|
Line 1173 schedCallOnce(sched_root_task_t * __restrict root, sch
|
return NULL; |
return NULL; |
|
|
/* get new task */ |
/* get new task */ |
if (!(task = _sched_useTask(root))) | if (!(task = sched_useTask(root))) |
return NULL; |
return NULL; |
|
|
memset(task, 0, sizeof(sched_task_t)); |
|
task->task_id = 0; |
|
task->task_lock = 0; |
|
task->task_func = func; |
task->task_func = func; |
TASK_TYPE(task) = taskEVENT; |
TASK_TYPE(task) = taskEVENT; |
TASK_ROOT(task) = root; |
TASK_ROOT(task) = root; |
Line 403 schedCallOnce(sched_root_task_t * __restrict root, sch
|
Line 1183 schedCallOnce(sched_root_task_t * __restrict root, sch
|
TASK_ARG(task) = arg; |
TASK_ARG(task) = arg; |
TASK_VAL(task) = val; |
TASK_VAL(task) = val; |
|
|
|
TASK_DATA(task) = opt_data; |
|
TASK_DATLEN(task) = opt_dlen; |
|
|
ret = schedCall(task); |
ret = schedCall(task); |
|
|
_sched_unuseTask(task); | sched_unuseTask(task); |
return ret; |
return ret; |
} |
} |
|
|
|
/* |
|
* schedThread() - Add thread task to scheduler queue |
|
* |
|
* @root = root task |
|
* @func = task execution function |
|
* @arg = 1st func argument |
|
* @detach = Detach thread from scheduler, if !=0 |
|
* @opt_data = Optional data |
|
* @opt_dlen = Optional data length |
|
* return: NULL error or !=NULL new queued task |
|
*/ |
|
sched_task_t * |
|
schedThread(sched_root_task_t * __restrict root, sched_task_func_t func, void *arg, int detach, |
|
void *opt_data, size_t opt_dlen) |
|
{ |
|
#ifndef HAVE_LIBPTHREAD |
|
sched_SetErr(ENOTSUP, "Not supported thread tasks"); |
|
return NULL; |
|
#endif |
|
sched_task_t *task; |
|
void *ptr; |
|
pthread_attr_t attr; |
|
|
|
if (!root || !func) |
|
return NULL; |
|
|
|
/* get new task */ |
|
if (!(task = sched_useTask(root))) |
|
return NULL; |
|
|
|
task->task_func = func; |
|
TASK_TYPE(task) = taskTHREAD; |
|
TASK_ROOT(task) = root; |
|
|
|
TASK_ARG(task) = arg; |
|
TASK_FLAG(task) = detach ? PTHREAD_CREATE_DETACHED : PTHREAD_CREATE_JOINABLE; |
|
|
|
TASK_DATA(task) = opt_data; |
|
TASK_DATLEN(task) = opt_dlen; |
|
|
|
pthread_attr_init(&attr); |
|
pthread_attr_setdetachstate(&attr, TASK_FLAG(task)); |
|
if (root->root_hooks.hook_add.thread) |
|
ptr = root->root_hooks.hook_add.thread(task, &attr); |
|
else |
|
ptr = NULL; |
|
pthread_attr_destroy(&attr); |
|
|
|
if (!ptr) { |
|
pthread_mutex_lock(&root->root_mtx[taskTHREAD]); |
|
TAILQ_INSERT_TAIL(&root->root_thread, TASK_ID(task), task_node); |
|
pthread_mutex_unlock(&root->root_mtx[taskTHREAD]); |
|
} else |
|
task = sched_unuseTask(task); |
|
|
|
return task; |
|
} |
|
|