version 1.24.2.4, 2014/06/03 20:39:54
|
version 1.30.6.2, 2023/02/24 16:21:23
|
Line 12 terms:
|
Line 12 terms:
|
All of the documentation and software included in the ELWIX and AITNET |
All of the documentation and software included in the ELWIX and AITNET |
Releases is copyrighted by ELWIX - Sofia/Bulgaria <info@elwix.org> |
Releases is copyrighted by ELWIX - Sofia/Bulgaria <info@elwix.org> |
|
|
Copyright 2004 - 2014 | Copyright 2004 - 2023 |
by Michael Pounov <misho@elwix.org>. All rights reserved. |
by Michael Pounov <misho@elwix.org>. All rights reserved. |
|
|
Redistribution and use in source and binary forms, with or without |
Redistribution and use in source and binary forms, with or without |
Line 67 sched_useTask(sched_root_task_t * __restrict root)
|
Line 67 sched_useTask(sched_root_task_t * __restrict root)
|
SCHED_QUNLOCK(root, taskUNUSE); |
SCHED_QUNLOCK(root, taskUNUSE); |
|
|
if (!task) { |
if (!task) { |
task = malloc(sizeof(sched_task_t)); | task = e_malloc(sizeof(sched_task_t)); |
if (!task) { |
if (!task) { |
LOGERR; |
LOGERR; |
return NULL; |
return NULL; |
Line 97 sched_unuseTask(sched_task_t * __restrict task)
|
Line 97 sched_unuseTask(sched_task_t * __restrict task)
|
return task; |
return task; |
} |
} |
|
|
#pragma GCC visibility push(hidden) |
|
|
|
#ifdef HAVE_LIBPTHREAD |
|
void * |
|
_sched_threadWrapper(sched_task_t *t) |
|
{ |
|
void *ret = NULL; |
|
sched_root_task_t *r; |
|
|
|
if (!t || !TASK_ROOT(t)) |
|
pthread_exit(ret); |
|
else |
|
r = (sched_root_task_t*) TASK_ROOT(t); |
|
|
|
pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, NULL); |
|
/* |
|
pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, NULL); |
|
*/ |
|
|
|
/* notify parent, thread is ready for execution */ |
|
pthread_testcancel(); |
|
|
|
ret = schedCall(t); |
|
r->root_ret = ret; |
|
|
|
if (TASK_VAL(t)) { |
|
transit_task2unuse(t, &r->root_thread); |
|
TASK_VAL(t) = 0; |
|
} |
|
|
|
pthread_exit(ret); |
|
} |
|
#endif |
|
|
|
#if defined(HAVE_TIMER_CREATE) && defined(HAVE_TIMER_SETTIME) && defined(HAVE_TIMER_DELETE) |
|
void * |
|
_sched_rtcWrapper(sched_task_t *t) |
|
{ |
|
sched_task_t *task; |
|
void *ret; |
|
|
|
if (!t || !TASK_ROOT(t) || !TASK_DATA(t)) |
|
return NULL; |
|
else { |
|
task = (sched_task_t*) TASK_DATA(t); |
|
timer_delete((timer_t) TASK_DATLEN(t)); |
|
} |
|
|
|
ret = schedCall(task); |
|
|
|
transit_task2unuse(task, &(TASK_ROOT(task))->root_rtc); |
|
return ret; |
|
} |
|
#endif |
|
|
|
#pragma GCC visibility pop |
|
|
|
/* |
/* |
* sched_taskExit() - Exit routine for scheduler task, explicit required for thread tasks |
* sched_taskExit() - Exit routine for scheduler task, explicit required for thread tasks |
* |
* |
Line 190 sched_task_t *
|
Line 133 sched_task_t *
|
schedRead(sched_root_task_t * __restrict root, sched_task_func_t func, void *arg, int fd, |
schedRead(sched_root_task_t * __restrict root, sched_task_func_t func, void *arg, int fd, |
void *opt_data, size_t opt_dlen) |
void *opt_data, size_t opt_dlen) |
{ |
{ |
|
return schedReadExt(root, func, arg, fd, opt_data, opt_dlen, 0); |
|
} |
|
|
|
/* |
|
* schedReadExt() - Add READ I/O task to scheduler queue with custom event mask |
|
* |
|
* @root = root task |
|
* @func = task execution function |
|
* @arg = 1st func argument |
|
* @fd = fd handle |
|
* @opt_data = Optional data |
|
* @opt_dlen = Optional data length |
|
* @mask = Event mask |
|
* return: NULL error or !=NULL new queued task |
|
*/ |
|
sched_task_t * |
|
schedReadExt(sched_root_task_t * __restrict root, sched_task_func_t func, void *arg, int fd, |
|
void *opt_data, size_t opt_dlen, u_long mask) |
|
{ |
sched_task_t *task; |
sched_task_t *task; |
void *ptr; |
void *ptr; |
|
|
Line 210 schedRead(sched_root_task_t * __restrict root, sched_t
|
Line 172 schedRead(sched_root_task_t * __restrict root, sched_t
|
TASK_DATA(task) = opt_data; |
TASK_DATA(task) = opt_data; |
TASK_DATLEN(task) = opt_dlen; |
TASK_DATLEN(task) = opt_dlen; |
|
|
|
TASK_HARG(task) = mask; |
|
|
if (root->root_hooks.hook_add.read) |
if (root->root_hooks.hook_add.read) |
ptr = root->root_hooks.hook_add.read(task, NULL); | ptr = root->root_hooks.hook_add.read(task, |
| (void*) task->task_harg); |
else |
else |
ptr = NULL; |
ptr = NULL; |
|
|
Line 238 sched_task_t *
|
Line 203 sched_task_t *
|
schedWrite(sched_root_task_t * __restrict root, sched_task_func_t func, void *arg, int fd, |
schedWrite(sched_root_task_t * __restrict root, sched_task_func_t func, void *arg, int fd, |
void *opt_data, size_t opt_dlen) |
void *opt_data, size_t opt_dlen) |
{ |
{ |
|
return schedWriteExt(root, func, arg, fd, opt_data, opt_dlen, 0); |
|
} |
|
|
|
/* |
|
* schedWriteExt() - Add WRITE I/O task to scheduler queue with custom event mask |
|
* |
|
* @root = root task |
|
* @func = task execution function |
|
* @arg = 1st func argument |
|
* @fd = fd handle |
|
* @opt_data = Optional data |
|
* @opt_dlen = Optional data length |
|
* @mask = Event mask |
|
* return: NULL error or !=NULL new queued task |
|
*/ |
|
sched_task_t * |
|
schedWriteExt(sched_root_task_t * __restrict root, sched_task_func_t func, void *arg, int fd, |
|
void *opt_data, size_t opt_dlen, u_long mask) |
|
{ |
sched_task_t *task; |
sched_task_t *task; |
void *ptr; |
void *ptr; |
|
|
Line 258 schedWrite(sched_root_task_t * __restrict root, sched_
|
Line 242 schedWrite(sched_root_task_t * __restrict root, sched_
|
TASK_DATA(task) = opt_data; |
TASK_DATA(task) = opt_data; |
TASK_DATLEN(task) = opt_dlen; |
TASK_DATLEN(task) = opt_dlen; |
|
|
|
TASK_HARG(task) = mask; |
|
|
if (root->root_hooks.hook_add.write) |
if (root->root_hooks.hook_add.write) |
ptr = root->root_hooks.hook_add.write(task, NULL); | ptr = root->root_hooks.hook_add.write(task, |
| (void*) task->task_harg); |
else |
else |
ptr = NULL; |
ptr = NULL; |
|
|
Line 325 schedNode(sched_root_task_t * __restrict root, sched_t
|
Line 312 schedNode(sched_root_task_t * __restrict root, sched_t
|
} |
} |
|
|
/* |
/* |
|
* schedNode2() - Add NODE task with all events to scheduler queue |
|
* |
|
* @root = root task |
|
* @func = task execution function |
|
* @arg = 1st func argument |
|
* @fd = fd handle |
|
* @opt_data = Optional data |
|
* @opt_dlen = Optional data length |
|
* return: NULL error or !=NULL new queued task |
|
*/ |
|
sched_task_t * |
|
schedNode2(sched_root_task_t * __restrict root, sched_task_func_t func, void *arg, int fd, |
|
void *opt_data, size_t opt_dlen) |
|
{ |
|
#if SUP_ENABLE != KQ_SUPPORT |
|
sched_SetErr(ENOTSUP, "disabled kqueue support"); |
|
return NULL; |
|
#else |
|
sched_task_t *task; |
|
void *ptr; |
|
|
|
if (!root || !func) |
|
return NULL; |
|
|
|
/* get new task */ |
|
if (!(task = sched_useTask(root))) |
|
return NULL; |
|
|
|
TASK_FUNC(task) = func; |
|
TASK_TYPE(task) = taskNODE; |
|
TASK_ROOT(task) = root; |
|
|
|
TASK_ARG(task) = arg; |
|
TASK_FD(task) = fd; |
|
|
|
TASK_DATA(task) = opt_data; |
|
TASK_DATLEN(task) = opt_dlen; |
|
|
|
if (root->root_hooks.hook_add.node) |
|
#ifdef __FreeBSD__ |
|
ptr = root->root_hooks.hook_add.node(task, |
|
(void*) (NOTE_READ | NOTE_CLOSE_WRITE | NOTE_CLOSE | NOTE_OPEN)); |
|
#else |
|
ptr = root->root_hooks.hook_add.node(task, NULL); |
|
#endif |
|
else |
|
ptr = NULL; |
|
|
|
if (!ptr) |
|
insert_task_to(task, &root->root_node); |
|
else |
|
task = sched_unuseTask(task); |
|
|
|
return task; |
|
#endif /* KQ_SUPPORT */ |
|
} |
|
|
|
/* |
* schedProc() - Add PROC task to scheduler queue |
* schedProc() - Add PROC task to scheduler queue |
* |
* |
* @root = root task |
* @root = root task |
Line 450 sched_task_t *
|
Line 495 sched_task_t *
|
schedSignal(sched_root_task_t * __restrict root, sched_task_func_t func, void *arg, u_long sig, |
schedSignal(sched_root_task_t * __restrict root, sched_task_func_t func, void *arg, u_long sig, |
void *opt_data, size_t opt_dlen) |
void *opt_data, size_t opt_dlen) |
{ |
{ |
#if SUP_ENABLE != KQ_SUPPORT |
|
sched_SetErr(ENOTSUP, "disabled kqueue support"); |
|
return NULL; |
|
#else |
|
sched_task_t *task; |
sched_task_t *task; |
void *ptr; |
void *ptr; |
|
|
Line 485 schedSignal(sched_root_task_t * __restrict root, sched
|
Line 526 schedSignal(sched_root_task_t * __restrict root, sched
|
task = sched_unuseTask(task); |
task = sched_unuseTask(task); |
|
|
return task; |
return task; |
#endif /* KQ_SUPPORT */ |
|
} |
} |
|
|
/* |
/* |
Line 630 schedAIORead(sched_root_task_t * __restrict root, sche
|
Line 670 schedAIORead(sched_root_task_t * __restrict root, sche
|
} else |
} else |
off = offset; |
off = offset; |
|
|
if (!(acb = malloc(sizeof(struct aiocb)))) { | if (!(acb = e_malloc(sizeof(struct aiocb)))) { |
LOGERR; |
LOGERR; |
return NULL; |
return NULL; |
} else |
} else |
Line 646 schedAIORead(sched_root_task_t * __restrict root, sche
|
Line 686 schedAIORead(sched_root_task_t * __restrict root, sche
|
|
|
if (aio_read(acb)) { |
if (aio_read(acb)) { |
LOGERR; |
LOGERR; |
free(acb); | e_free(acb); |
return NULL; |
return NULL; |
} |
} |
|
|
Line 689 schedAIOWrite(sched_root_task_t * __restrict root, sch
|
Line 729 schedAIOWrite(sched_root_task_t * __restrict root, sch
|
} else |
} else |
off = offset; |
off = offset; |
|
|
if (!(acb = malloc(sizeof(struct aiocb)))) { | if (!(acb = e_malloc(sizeof(struct aiocb)))) { |
LOGERR; |
LOGERR; |
return NULL; |
return NULL; |
} else |
} else |
Line 705 schedAIOWrite(sched_root_task_t * __restrict root, sch
|
Line 745 schedAIOWrite(sched_root_task_t * __restrict root, sch
|
|
|
if (aio_write(acb)) { |
if (aio_write(acb)) { |
LOGERR; |
LOGERR; |
free(acb); | e_free(acb); |
return NULL; |
return NULL; |
} |
} |
|
|
Line 804 schedLIORead(sched_root_task_t * __restrict root, sche
|
Line 844 schedLIORead(sched_root_task_t * __restrict root, sche
|
} else |
} else |
off = offset; |
off = offset; |
|
|
if (!(acb = calloc(sizeof(void*), nbufs))) { | if (!(acb = e_calloc(sizeof(void*), nbufs))) { |
LOGERR; |
LOGERR; |
return NULL; |
return NULL; |
} else |
} else |
memset(acb, 0, sizeof(void*) * nbufs); |
memset(acb, 0, sizeof(void*) * nbufs); |
for (i = 0; i < nbufs; off += bufs[i++].iov_len) { |
for (i = 0; i < nbufs; off += bufs[i++].iov_len) { |
acb[i] = malloc(sizeof(struct aiocb)); | acb[i] = e_malloc(sizeof(struct aiocb)); |
if (!acb[i]) { |
if (!acb[i]) { |
LOGERR; |
LOGERR; |
for (i = 0; i < nbufs; i++) |
for (i = 0; i < nbufs; i++) |
if (acb[i]) |
if (acb[i]) |
free(acb[i]); | e_free(acb[i]); |
free(acb); | e_free(acb); |
return NULL; |
return NULL; |
} else |
} else |
memset(acb[i], 0, sizeof(struct aiocb)); |
memset(acb[i], 0, sizeof(struct aiocb)); |
Line 835 schedLIORead(sched_root_task_t * __restrict root, sche
|
Line 875 schedLIORead(sched_root_task_t * __restrict root, sche
|
LOGERR; |
LOGERR; |
for (i = 0; i < nbufs; i++) |
for (i = 0; i < nbufs; i++) |
if (acb[i]) |
if (acb[i]) |
free(acb[i]); | e_free(acb[i]); |
free(acb); | e_free(acb); |
return NULL; |
return NULL; |
} |
} |
|
|
Line 881 schedLIOWrite(sched_root_task_t * __restrict root, sch
|
Line 921 schedLIOWrite(sched_root_task_t * __restrict root, sch
|
} else |
} else |
off = offset; |
off = offset; |
|
|
if (!(acb = calloc(sizeof(void*), nbufs))) { | if (!(acb = e_calloc(sizeof(void*), nbufs))) { |
LOGERR; |
LOGERR; |
return NULL; |
return NULL; |
} else |
} else |
memset(acb, 0, sizeof(void*) * nbufs); |
memset(acb, 0, sizeof(void*) * nbufs); |
for (i = 0; i < nbufs; off += bufs[i++].iov_len) { |
for (i = 0; i < nbufs; off += bufs[i++].iov_len) { |
acb[i] = malloc(sizeof(struct aiocb)); | acb[i] = e_malloc(sizeof(struct aiocb)); |
if (!acb[i]) { |
if (!acb[i]) { |
LOGERR; |
LOGERR; |
for (i = 0; i < nbufs; i++) |
for (i = 0; i < nbufs; i++) |
if (acb[i]) |
if (acb[i]) |
free(acb[i]); | e_free(acb[i]); |
free(acb); | e_free(acb); |
return NULL; |
return NULL; |
} else |
} else |
memset(acb[i], 0, sizeof(struct aiocb)); |
memset(acb[i], 0, sizeof(struct aiocb)); |
Line 912 schedLIOWrite(sched_root_task_t * __restrict root, sch
|
Line 952 schedLIOWrite(sched_root_task_t * __restrict root, sch
|
LOGERR; |
LOGERR; |
for (i = 0; i < nbufs; i++) |
for (i = 0; i < nbufs; i++) |
if (acb[i]) |
if (acb[i]) |
free(acb[i]); | e_free(acb[i]); |
free(acb); | e_free(acb); |
return NULL; |
return NULL; |
} |
} |
|
|
Line 1278 sched_task_t *
|
Line 1318 sched_task_t *
|
schedRTC(sched_root_task_t * __restrict root, sched_task_func_t func, void *arg, struct timespec ts, |
schedRTC(sched_root_task_t * __restrict root, sched_task_func_t func, void *arg, struct timespec ts, |
void *opt_data, size_t opt_dlen) |
void *opt_data, size_t opt_dlen) |
{ |
{ |
#if defined(HAVE_TIMER_CREATE) && defined(HAVE_TIMER_SETTIME) && defined(HAVE_TIMER_DELETE) | #if defined(HAVE_LIBRT) && defined(HAVE_TIMER_CREATE) && \ |
| defined(HAVE_TIMER_SETTIME) && defined(HAVE_TIMER_DELETE) |
sched_task_t *task; |
sched_task_t *task; |
void *ptr; |
void *ptr; |
|
|