/************************************************************************* * (C) 2011 AITNET ltd - Sofia/Bulgaria - * by Michael Pounov * * $Author: misho $ * $Id: tasks.c,v 1.22.8.3 2014/01/28 13:17:08 misho Exp $ * ************************************************************************** The ELWIX and AITNET software is distributed under the following terms: All of the documentation and software included in the ELWIX and AITNET Releases is copyrighted by ELWIX - Sofia/Bulgaria Copyright 2004 - 2014 by Michael Pounov . All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. All advertising materials mentioning features or use of this software must display the following acknowledgement: This product includes software developed by Michael Pounov ELWIX - Embedded LightWeight unIX and its contributors. 4. Neither the name of AITNET nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY AITNET AND CONTRIBUTORS ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include "global.h" /* * sched_useTask() - Get and init new task * * @root = root task * return: NULL error or !=NULL prepared task */ sched_task_t * sched_useTask(sched_root_task_t * __restrict root) { sched_task_t *task, *tmp; #ifdef HAVE_LIBPTHREAD pthread_mutex_lock(&root->root_mtx[taskUNUSE]); #endif TAILQ_FOREACH_SAFE(task, &root->root_unuse, task_node, tmp) { if (!TASK_ISLOCKED(task)) { TAILQ_REMOVE(&root->root_unuse, task, task_node); break; } } #ifdef HAVE_LIBPTHREAD pthread_mutex_unlock(&root->root_mtx[taskUNUSE]); #endif if (!task) { task = malloc(sizeof(sched_task_t)); if (!task) { LOGERR; return NULL; } } memset(task, 0, sizeof(sched_task_t)); task->task_id = (uintptr_t) task; return task; } /* * sched_unuseTask() - Unlock and put task to unuse queue * * @task = task * return: always is NULL */ sched_task_t * sched_unuseTask(sched_task_t * __restrict task) { TASK_UNLOCK(task); TASK_TYPE(task) = taskUNUSE; #ifdef HAVE_LIBPTHREAD pthread_mutex_lock(&TASK_ROOT(task)->root_mtx[taskUNUSE]); #endif TAILQ_INSERT_TAIL(&TASK_ROOT(task)->root_unuse, TASK_ID(task), task_node); #ifdef HAVE_LIBPTHREAD pthread_mutex_unlock(&TASK_ROOT(task)->root_mtx[taskUNUSE]); #endif task = NULL; return task; } #pragma GCC visibility push(hidden) #ifdef HAVE_LIBPTHREAD static void _sched_threadCleanup(sched_task_t *t) { if (!t || !TASK_ROOT(t)) return; pthread_mutex_lock(&TASK_ROOT(t)->root_mtx[taskTHREAD]); TAILQ_REMOVE(&TASK_ROOT(t)->root_thread, t, task_node); pthread_mutex_unlock(&TASK_ROOT(t)->root_mtx[taskTHREAD]); sched_unuseTask(t); } void * _sched_threadWrapper(sched_task_t *t) { void *ret = NULL; pthread_cleanup_push((void (*)(void*)) _sched_threadCleanup, t); if (!t || !TASK_ROOT(t)) pthread_exit(ret); pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, NULL); /* pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, NULL); */ /* notify parent, thread is ready for execution */ pthread_testcancel(); ret = schedCall(t); pthread_cleanup_pop(42); TASK_ROOT(t)->root_ret = ret; pthread_exit(ret); } #endif #if defined(HAVE_TIMER_CREATE) && defined(HAVE_TIMER_SETTIME) void * _sched_rtcWrapper(sched_task_t *t) { sched_task_func_t func; sched_task_t *task; sched_root_task_t *r; if (!t || !TASK_ROOT(t) || !TASK_DATA(t)) return NULL; else { r = TASK_ROOT(t); task = (sched_task_t*) TASK_DATA(t); func = TASK_FUNC(task); } #ifdef HAVE_LIBPTHREAD pthread_mutex_lock(&r->root_mtx[taskRTC]); #endif TAILQ_REMOVE(&r->root_rtc, task, task_node); #ifdef HAVE_LIBPTHREAD pthread_mutex_unlock(&r->root_mtx[taskRTC]); #endif sched_unuseTask(task); timer_delete((timer_t) TASK_DATLEN(t)); return schedCall(task); } #endif #pragma GCC visibility pop /* * sched_taskExit() - Exit routine for scheduler task, explicit required for thread tasks * * @task = current task * @retcode = return code * return: return code */ void * sched_taskExit(sched_task_t *task, intptr_t retcode) { if (!task || !TASK_ROOT(task)) return (void*) -1; if (TASK_ROOT(task)->root_hooks.hook_exec.exit) TASK_ROOT(task)->root_hooks.hook_exec.exit(task, (void*) retcode); TASK_ROOT(task)->root_ret = (void*) retcode; return (void*) retcode; } /* * schedRead() - Add READ I/O task to scheduler queue * * @root = root task * @func = task execution function * @arg = 1st func argument * @fd = fd handle * @opt_data = Optional data * @opt_dlen = Optional data length * return: NULL error or !=NULL new queued task */ sched_task_t * schedRead(sched_root_task_t * __restrict root, sched_task_func_t func, void *arg, int fd, void *opt_data, size_t opt_dlen) { sched_task_t *task; void *ptr; if (!root || !func) return NULL; /* get new task */ if (!(task = sched_useTask(root))) return NULL; task->task_func = func; TASK_TYPE(task) = taskREAD; TASK_ROOT(task) = root; TASK_ARG(task) = arg; TASK_FD(task) = fd; TASK_DATA(task) = opt_data; TASK_DATLEN(task) = opt_dlen; if (root->root_hooks.hook_add.read) ptr = root->root_hooks.hook_add.read(task, NULL); else ptr = NULL; if (!ptr) { #ifdef HAVE_LIBPTHREAD pthread_mutex_lock(&root->root_mtx[taskREAD]); #endif TAILQ_INSERT_TAIL(&root->root_read, TASK_ID(task), task_node); #ifdef HAVE_LIBPTHREAD pthread_mutex_unlock(&root->root_mtx[taskREAD]); #endif } else task = sched_unuseTask(task); return task; } /* * schedWrite() - Add WRITE I/O task to scheduler queue * * @root = root task * @func = task execution function * @arg = 1st func argument * @fd = fd handle * @opt_data = Optional data * @opt_dlen = Optional data length * return: NULL error or !=NULL new queued task */ sched_task_t * schedWrite(sched_root_task_t * __restrict root, sched_task_func_t func, void *arg, int fd, void *opt_data, size_t opt_dlen) { sched_task_t *task; void *ptr; if (!root || !func) return NULL; /* get new task */ if (!(task = sched_useTask(root))) return NULL; task->task_func = func; TASK_TYPE(task) = taskWRITE; TASK_ROOT(task) = root; TASK_ARG(task) = arg; TASK_FD(task) = fd; TASK_DATA(task) = opt_data; TASK_DATLEN(task) = opt_dlen; if (root->root_hooks.hook_add.write) ptr = root->root_hooks.hook_add.write(task, NULL); else ptr = NULL; if (!ptr) { #ifdef HAVE_LIBPTHREAD pthread_mutex_lock(&root->root_mtx[taskWRITE]); #endif TAILQ_INSERT_TAIL(&root->root_write, TASK_ID(task), task_node); #ifdef HAVE_LIBPTHREAD pthread_mutex_unlock(&root->root_mtx[taskWRITE]); #endif } else task = sched_unuseTask(task); return task; } /* * schedNode() - Add NODE task to scheduler queue * * @root = root task * @func = task execution function * @arg = 1st func argument * @fd = fd handle * @opt_data = Optional data * @opt_dlen = Optional data length * return: NULL error or !=NULL new queued task */ sched_task_t * schedNode(sched_root_task_t * __restrict root, sched_task_func_t func, void *arg, int fd, void *opt_data, size_t opt_dlen) { #ifdef KQ_DISABLE sched_SetErr(ENOTSUP, "disabled kqueue support"); return NULL; #else sched_task_t *task; void *ptr; if (!root || !func) return NULL; /* get new task */ if (!(task = sched_useTask(root))) return NULL; task->task_func = func; TASK_TYPE(task) = taskNODE; TASK_ROOT(task) = root; TASK_ARG(task) = arg; TASK_FD(task) = fd; TASK_DATA(task) = opt_data; TASK_DATLEN(task) = opt_dlen; if (root->root_hooks.hook_add.node) ptr = root->root_hooks.hook_add.node(task, NULL); else ptr = NULL; if (!ptr) { #ifdef HAVE_LIBPTHREAD pthread_mutex_lock(&root->root_mtx[taskNODE]); #endif TAILQ_INSERT_TAIL(&root->root_node, TASK_ID(task), task_node); #ifdef HAVE_LIBPTHREAD pthread_mutex_unlock(&root->root_mtx[taskNODE]); #endif } else task = sched_unuseTask(task); return task; #endif /* KQ_DISABLE */ } /* * schedProc() - Add PROC task to scheduler queue * * @root = root task * @func = task execution function * @arg = 1st func argument * @pid = PID * @opt_data = Optional data * @opt_dlen = Optional data length * return: NULL error or !=NULL new queued task */ sched_task_t * schedProc(sched_root_task_t * __restrict root, sched_task_func_t func, void *arg, u_long pid, void *opt_data, size_t opt_dlen) { #ifdef KQ_DISABLE sched_SetErr(ENOTSUP, "disabled kqueue support"); return NULL; #else sched_task_t *task; void *ptr; if (!root || !func) return NULL; /* get new task */ if (!(task = sched_useTask(root))) return NULL; task->task_func = func; TASK_TYPE(task) = taskPROC; TASK_ROOT(task) = root; TASK_ARG(task) = arg; TASK_VAL(task) = pid; TASK_DATA(task) = opt_data; TASK_DATLEN(task) = opt_dlen; if (root->root_hooks.hook_add.proc) ptr = root->root_hooks.hook_add.proc(task, NULL); else ptr = NULL; if (!ptr) { #ifdef HAVE_LIBPTHREAD pthread_mutex_lock(&root->root_mtx[taskPROC]); #endif TAILQ_INSERT_TAIL(&root->root_proc, TASK_ID(task), task_node); #ifdef HAVE_LIBPTHREAD pthread_mutex_unlock(&root->root_mtx[taskPROC]); #endif } else task = sched_unuseTask(task); return task; #endif /* KQ_DISABLE */ } /* * schedUser() - Add trigger USER task to scheduler queue * * @root = root task * @func = task execution function * @arg = 1st func argument * @id = Trigger ID * @opt_data = Optional data * @opt_dlen = Optional user's trigger flags * return: NULL error or !=NULL new queued task */ sched_task_t * schedUser(sched_root_task_t * __restrict root, sched_task_func_t func, void *arg, u_long id, void *opt_data, size_t opt_dlen) { #ifdef KQ_DISABLE sched_SetErr(ENOTSUP, "disabled kqueue support"); return NULL; #else #ifndef EVFILT_USER sched_SetErr(ENOTSUP, "Not supported kevent() filter"); return NULL; #else sched_task_t *task; void *ptr; if (!root || !func) return NULL; /* get new task */ if (!(task = sched_useTask(root))) return NULL; task->task_func = func; TASK_TYPE(task) = taskUSER; TASK_ROOT(task) = root; TASK_ARG(task) = arg; TASK_VAL(task) = id; TASK_DATA(task) = opt_data; TASK_DATLEN(task) = opt_dlen; if (root->root_hooks.hook_add.user) ptr = root->root_hooks.hook_add.user(task, NULL); else ptr = NULL; if (!ptr) { #ifdef HAVE_LIBPTHREAD pthread_mutex_lock(&root->root_mtx[taskUSER]); #endif TAILQ_INSERT_TAIL(&root->root_user, TASK_ID(task), task_node); #ifdef HAVE_LIBPTHREAD pthread_mutex_unlock(&root->root_mtx[taskUSER]); #endif } else task = sched_unuseTask(task); return task; #endif /* EVFILT_USER */ #endif /* KQ_DISABLE */ } /* * schedSignal() - Add SIGNAL task to scheduler queue * * @root = root task * @func = task execution function * @arg = 1st func argument * @sig = Signal * @opt_data = Optional data * @opt_dlen = Optional data length * return: NULL error or !=NULL new queued task */ sched_task_t * schedSignal(sched_root_task_t * __restrict root, sched_task_func_t func, void *arg, u_long sig, void *opt_data, size_t opt_dlen) { #ifdef KQ_DISABLE sched_SetErr(ENOTSUP, "disabled kqueue support"); return NULL; #else sched_task_t *task; void *ptr; if (!root || !func) return NULL; /* get new task */ if (!(task = sched_useTask(root))) return NULL; task->task_func = func; TASK_TYPE(task) = taskSIGNAL; TASK_ROOT(task) = root; TASK_ARG(task) = arg; TASK_VAL(task) = sig; TASK_DATA(task) = opt_data; TASK_DATLEN(task) = opt_dlen; if (root->root_hooks.hook_add.signal) ptr = root->root_hooks.hook_add.signal(task, NULL); else ptr = NULL; if (!ptr) { #ifdef HAVE_LIBPTHREAD pthread_mutex_lock(&root->root_mtx[taskSIGNAL]); #endif TAILQ_INSERT_TAIL(&root->root_signal, TASK_ID(task), task_node); #ifdef HAVE_LIBPTHREAD pthread_mutex_unlock(&root->root_mtx[taskSIGNAL]); #endif } else task = sched_unuseTask(task); return task; #endif /* KQ_DISABLE */ } /* * schedAlarm() - Add ALARM task to scheduler queue * * @root = root task * @func = task execution function * @arg = 1st func argument * @ts = timeout argument structure, minimum alarm timer resolution is 1msec! * @opt_data = Alarm timer ID * @opt_dlen = Optional data length * return: NULL error or !=NULL new queued task */ sched_task_t * schedAlarm(sched_root_task_t * __restrict root, sched_task_func_t func, void *arg, struct timespec ts, void *opt_data, size_t opt_dlen) { #ifdef KQ_DISABLE sched_SetErr(ENOTSUP, "disabled kqueue support"); return NULL; #else sched_task_t *task; void *ptr; if (!root || !func) return NULL; /* get new task */ if (!(task = sched_useTask(root))) return NULL; task->task_func = func; TASK_TYPE(task) = taskALARM; TASK_ROOT(task) = root; TASK_ARG(task) = arg; TASK_TS(task) = ts; TASK_DATA(task) = opt_data; TASK_DATLEN(task) = opt_dlen; if (root->root_hooks.hook_add.alarm) ptr = root->root_hooks.hook_add.alarm(task, NULL); else ptr = NULL; if (!ptr) { #ifdef HAVE_LIBPTHREAD pthread_mutex_lock(&root->root_mtx[taskALARM]); #endif TAILQ_INSERT_TAIL(&root->root_alarm, TASK_ID(task), task_node); #ifdef HAVE_LIBPTHREAD pthread_mutex_unlock(&root->root_mtx[taskALARM]); #endif } else task = sched_unuseTask(task); return task; #endif /* KQ_DISABLE */ } #ifdef AIO_SUPPORT /* * schedAIO() - Add AIO task to scheduler queue * * @root = root task * @func = task execution function * @arg = 1st func argument * @acb = AIO cb structure address * @opt_data = Optional data * @opt_dlen = Optional data length * return: NULL error or !=NULL new queued task */ sched_task_t * schedAIO(sched_root_task_t * __restrict root, sched_task_func_t func, void *arg, struct aiocb * __restrict acb, void *opt_data, size_t opt_dlen) { #ifdef KQ_DISABLE sched_SetErr(ENOTSUP, "disabled kqueue support"); return NULL; #else sched_task_t *task; void *ptr; if (!root || !func || !acb || !opt_dlen) return NULL; /* get new task */ if (!(task = sched_useTask(root))) return NULL; task->task_func = func; TASK_TYPE(task) = taskAIO; TASK_ROOT(task) = root; TASK_ARG(task) = arg; TASK_VAL(task) = (u_long) acb; TASK_DATA(task) = opt_data; TASK_DATLEN(task) = opt_dlen; if (root->root_hooks.hook_add.aio) ptr = root->root_hooks.hook_add.aio(task, NULL); else ptr = NULL; if (!ptr) { #ifdef HAVE_LIBPTHREAD pthread_mutex_lock(&root->root_mtx[taskAIO]); #endif TAILQ_INSERT_TAIL(&root->root_aio, TASK_ID(task), task_node); #ifdef HAVE_LIBPTHREAD pthread_mutex_unlock(&root->root_mtx[taskAIO]); #endif } else task = sched_unuseTask(task); return task; #endif /* KQ_DISABLE */ } /* * schedAIORead() - Add AIO read task to scheduler queue * * @root = root task * @func = task execution function * @arg = 1st func argument * @fd = file descriptor * @buffer = Buffer * @buflen = Buffer length * @offset = Offset from start of file, if =-1 from current position * return: NULL error or !=NULL new queued task */ sched_task_t * schedAIORead(sched_root_task_t * __restrict root, sched_task_func_t func, void *arg, int fd, void *buffer, size_t buflen, off_t offset) { #ifdef KQ_DISABLE sched_SetErr(ENOTSUP, "disabled kqueue support"); return NULL; #else struct aiocb *acb; off_t off; if (!root || !func || !buffer || !buflen) return NULL; if (offset == (off_t) -1) { off = lseek(fd, 0, SEEK_CUR); if (off == -1) { LOGERR; return NULL; } } else off = offset; if (!(acb = malloc(sizeof(struct aiocb)))) { LOGERR; return NULL; } else memset(acb, 0, sizeof(struct aiocb)); acb->aio_fildes = fd; acb->aio_nbytes = buflen; acb->aio_buf = buffer; acb->aio_offset = off; acb->aio_sigevent.sigev_notify = SIGEV_KEVENT; acb->aio_sigevent.sigev_notify_kqueue = root->root_kq; acb->aio_sigevent.sigev_value.sival_ptr = acb; if (aio_read(acb)) { LOGERR; free(acb); return NULL; } return schedAIO(root, func, arg, acb, buffer, buflen); #endif /* KQ_DISABLE */ } /* * schedAIOWrite() - Add AIO write task to scheduler queue * * @root = root task * @func = task execution function * @arg = 1st func argument * @fd = file descriptor * @buffer = Buffer * @buflen = Buffer length * @offset = Offset from start of file, if =-1 from current position * return: NULL error or !=NULL new queued task */ sched_task_t * schedAIOWrite(sched_root_task_t * __restrict root, sched_task_func_t func, void *arg, int fd, void *buffer, size_t buflen, off_t offset) { #ifdef KQ_DISABLE sched_SetErr(ENOTSUP, "disabled kqueue support"); return NULL; #else struct aiocb *acb; off_t off; if (!root || !func || !buffer || !buflen) return NULL; if (offset == (off_t) -1) { off = lseek(fd, 0, SEEK_CUR); if (off == -1) { LOGERR; return NULL; } } else off = offset; if (!(acb = malloc(sizeof(struct aiocb)))) { LOGERR; return NULL; } else memset(acb, 0, sizeof(struct aiocb)); acb->aio_fildes = fd; acb->aio_nbytes = buflen; acb->aio_buf = buffer; acb->aio_offset = off; acb->aio_sigevent.sigev_notify = SIGEV_KEVENT; acb->aio_sigevent.sigev_notify_kqueue = root->root_kq; acb->aio_sigevent.sigev_value.sival_ptr = acb; if (aio_write(acb)) { LOGERR; free(acb); return NULL; } return schedAIO(root, func, arg, acb, buffer, buflen); #endif /* KQ_DISABLE */ } #ifdef EVFILT_LIO /* * schedLIO() - Add AIO bulk tasks to scheduler queue * * @root = root task * @func = task execution function * @arg = 1st func argument * @acbs = AIO cb structure addresses * @opt_data = Optional data * @opt_dlen = Optional data length * return: NULL error or !=NULL new queued task */ sched_task_t * schedLIO(sched_root_task_t * __restrict root, sched_task_func_t func, void *arg, struct aiocb ** __restrict acbs, void *opt_data, size_t opt_dlen) { #ifdef KQ_DISABLE sched_SetErr(ENOTSUP, "disabled kqueue support"); return NULL; #else sched_task_t *task; void *ptr; if (!root || !func || !acbs || !opt_dlen) return NULL; /* get new task */ if (!(task = sched_useTask(root))) return NULL; task->task_func = func; TASK_TYPE(task) = taskLIO; TASK_ROOT(task) = root; TASK_ARG(task) = arg; TASK_VAL(task) = (u_long) acbs; TASK_DATA(task) = opt_data; TASK_DATLEN(task) = opt_dlen; if (root->root_hooks.hook_add.lio) ptr = root->root_hooks.hook_add.lio(task, NULL); else ptr = NULL; if (!ptr) { #ifdef HAVE_LIBPTHREAD pthread_mutex_lock(&root->root_mtx[taskLIO]); #endif TAILQ_INSERT_TAIL(&root->root_lio, TASK_ID(task), task_node); #ifdef HAVE_LIBPTHREAD pthread_mutex_unlock(&root->root_mtx[taskLIO]); #endif } else task = sched_unuseTask(task); return task; #endif /* KQ_DISABLE */ } /* * schedLIORead() - Add list of AIO read tasks to scheduler queue * * @root = root task * @func = task execution function * @arg = 1st func argument * @fd = file descriptor * @bufs = Buffer's list * @nbufs = Number of Buffers * @offset = Offset from start of file, if =-1 from current position * return: NULL error or !=NULL new queued task */ sched_task_t * schedLIORead(sched_root_task_t * __restrict root, sched_task_func_t func, void *arg, int fd, struct iovec *bufs, size_t nbufs, off_t offset) { #ifdef KQ_DISABLE sched_SetErr(ENOTSUP, "disabled kqueue support"); return NULL; #else struct sigevent sig; struct aiocb **acb; off_t off; register int i; if (!root || !func || !bufs || !nbufs) return NULL; if (offset == (off_t) -1) { off = lseek(fd, 0, SEEK_CUR); if (off == -1) { LOGERR; return NULL; } } else off = offset; if (!(acb = calloc(sizeof(void*), nbufs))) { LOGERR; return NULL; } else memset(acb, 0, sizeof(void*) * nbufs); for (i = 0; i < nbufs; off += bufs[i++].iov_len) { acb[i] = malloc(sizeof(struct aiocb)); if (!acb[i]) { LOGERR; for (i = 0; i < nbufs; i++) if (acb[i]) free(acb[i]); free(acb); return NULL; } else memset(acb[i], 0, sizeof(struct aiocb)); acb[i]->aio_fildes = fd; acb[i]->aio_nbytes = bufs[i].iov_len; acb[i]->aio_buf = bufs[i].iov_base; acb[i]->aio_offset = off; acb[i]->aio_lio_opcode = LIO_READ; } memset(&sig, 0, sizeof sig); sig.sigev_notify = SIGEV_KEVENT; sig.sigev_notify_kqueue = root->root_kq; sig.sigev_value.sival_ptr = acb; if (lio_listio(LIO_NOWAIT, acb, nbufs, &sig)) { LOGERR; for (i = 0; i < nbufs; i++) if (acb[i]) free(acb[i]); free(acb); return NULL; } return schedLIO(root, func, arg, (void*) acb, bufs, nbufs); #endif /* KQ_DISABLE */ } /* * schedLIOWrite() - Add list of AIO write tasks to scheduler queue * * @root = root task * @func = task execution function * @arg = 1st func argument * @fd = file descriptor * @bufs = Buffer's list * @nbufs = Number of Buffers * @offset = Offset from start of file, if =-1 from current position * return: NULL error or !=NULL new queued task */ sched_task_t * schedLIOWrite(sched_root_task_t * __restrict root, sched_task_func_t func, void *arg, int fd, struct iovec *bufs, size_t nbufs, off_t offset) { #ifdef KQ_DISABLE sched_SetErr(ENOTSUP, "disabled kqueue support"); return NULL; #else struct sigevent sig; struct aiocb **acb; off_t off; register int i; if (!root || !func || !bufs || !nbufs) return NULL; if (offset == (off_t) -1) { off = lseek(fd, 0, SEEK_CUR); if (off == -1) { LOGERR; return NULL; } } else off = offset; if (!(acb = calloc(sizeof(void*), nbufs))) { LOGERR; return NULL; } else memset(acb, 0, sizeof(void*) * nbufs); for (i = 0; i < nbufs; off += bufs[i++].iov_len) { acb[i] = malloc(sizeof(struct aiocb)); if (!acb[i]) { LOGERR; for (i = 0; i < nbufs; i++) if (acb[i]) free(acb[i]); free(acb); return NULL; } else memset(acb[i], 0, sizeof(struct aiocb)); acb[i]->aio_fildes = fd; acb[i]->aio_nbytes = bufs[i].iov_len; acb[i]->aio_buf = bufs[i].iov_base; acb[i]->aio_offset = off; acb[i]->aio_lio_opcode = LIO_WRITE; } memset(&sig, 0, sizeof sig); sig.sigev_notify = SIGEV_KEVENT; sig.sigev_notify_kqueue = root->root_kq; sig.sigev_value.sival_ptr = acb; if (lio_listio(LIO_NOWAIT, acb, nbufs, &sig)) { LOGERR; for (i = 0; i < nbufs; i++) if (acb[i]) free(acb[i]); free(acb); return NULL; } return schedLIO(root, func, arg, (void*) acb, bufs, nbufs); #endif /* KQ_DISABLE */ } #endif /* EVFILT_LIO */ #endif /* AIO_SUPPORT */ /* * schedTimer() - Add TIMER task to scheduler queue * * @root = root task * @func = task execution function * @arg = 1st func argument * @ts = timeout argument structure * @opt_data = Optional data * @opt_dlen = Optional data length * return: NULL error or !=NULL new queued task */ sched_task_t * schedTimer(sched_root_task_t * __restrict root, sched_task_func_t func, void *arg, struct timespec ts, void *opt_data, size_t opt_dlen) { sched_task_t *task, *tmp, *t = NULL; void *ptr; struct timespec now; if (!root || !func) return NULL; /* get new task */ if (!(task = sched_useTask(root))) return NULL; task->task_func = func; TASK_TYPE(task) = taskTIMER; TASK_ROOT(task) = root; TASK_ARG(task) = arg; TASK_DATA(task) = opt_data; TASK_DATLEN(task) = opt_dlen; /* calculate timeval structure */ clock_gettime(CLOCK_MONOTONIC, &now); now.tv_sec += ts.tv_sec; now.tv_nsec += ts.tv_nsec; if (now.tv_nsec >= 1000000000L) { now.tv_sec++; now.tv_nsec -= 1000000000L; } else if (now.tv_nsec < 0) { now.tv_sec--; now.tv_nsec += 1000000000L; } TASK_TS(task) = now; if (root->root_hooks.hook_add.timer) ptr = root->root_hooks.hook_add.timer(task, NULL); else ptr = NULL; if (!ptr) { #ifdef HAVE_LIBPTHREAD pthread_mutex_lock(&root->root_mtx[taskTIMER]); #endif #ifdef TIMER_WITHOUT_SORT TAILQ_INSERT_TAIL(&root->root_timer, TASK_ID(task), task_node); #else TAILQ_FOREACH_SAFE(t, &root->root_timer, task_node, tmp) if (sched_timespeccmp(&TASK_TS(task), &TASK_TS(t), -) < 1) break; if (!t) TAILQ_INSERT_TAIL(&root->root_timer, TASK_ID(task), task_node); else TAILQ_INSERT_BEFORE(t, TASK_ID(task), task_node); #endif #ifdef HAVE_LIBPTHREAD pthread_mutex_unlock(&root->root_mtx[taskTIMER]); #endif } else task = sched_unuseTask(task); return task; } /* * schedEvent() - Add EVENT task to scheduler queue * * @root = root task * @func = task execution function * @arg = 1st func argument * @val = additional func argument * @opt_data = Optional data * @opt_dlen = Optional data length * return: NULL error or !=NULL new queued task */ sched_task_t * schedEvent(sched_root_task_t * __restrict root, sched_task_func_t func, void *arg, u_long val, void *opt_data, size_t opt_dlen) { sched_task_t *task; void *ptr; if (!root || !func) return NULL; /* get new task */ if (!(task = sched_useTask(root))) return NULL; task->task_func = func; TASK_TYPE(task) = taskEVENT; TASK_ROOT(task) = root; TASK_ARG(task) = arg; TASK_VAL(task) = val; TASK_DATA(task) = opt_data; TASK_DATLEN(task) = opt_dlen; if (root->root_hooks.hook_add.event) ptr = root->root_hooks.hook_add.event(task, NULL); else ptr = NULL; if (!ptr) { #ifdef HAVE_LIBPTHREAD pthread_mutex_lock(&root->root_mtx[taskEVENT]); #endif TAILQ_INSERT_TAIL(&root->root_event, TASK_ID(task), task_node); #ifdef HAVE_LIBPTHREAD pthread_mutex_unlock(&root->root_mtx[taskEVENT]); #endif } else task = sched_unuseTask(task); return task; } /* * schedTask() - Add regular task to scheduler queue * * @root = root task * @func = task execution function * @arg = 1st func argument * @prio = regular task priority, 0 is hi priority for regular tasks * @opt_data = Optional data * @opt_dlen = Optional data length * return: NULL error or !=NULL new queued task */ sched_task_t * schedTask(sched_root_task_t * __restrict root, sched_task_func_t func, void *arg, u_long prio, void *opt_data, size_t opt_dlen) { sched_task_t *task, *tmp, *t = NULL; void *ptr; if (!root || !func) return NULL; /* get new task */ if (!(task = sched_useTask(root))) return NULL; task->task_func = func; TASK_TYPE(task) = taskTASK; TASK_ROOT(task) = root; TASK_ARG(task) = arg; TASK_VAL(task) = prio; TASK_DATA(task) = opt_data; TASK_DATLEN(task) = opt_dlen; if (root->root_hooks.hook_add.task) ptr = root->root_hooks.hook_add.task(task, NULL); else ptr = NULL; if (!ptr) { #ifdef HAVE_LIBPTHREAD pthread_mutex_lock(&root->root_mtx[taskTASK]); #endif TAILQ_FOREACH_SAFE(t, &root->root_task, task_node, tmp) if (TASK_VAL(task) < TASK_VAL(t)) break; if (!t) TAILQ_INSERT_TAIL(&root->root_task, TASK_ID(task), task_node); else TAILQ_INSERT_BEFORE(t, TASK_ID(task), task_node); #ifdef HAVE_LIBPTHREAD pthread_mutex_unlock(&root->root_mtx[taskTASK]); #endif } else task = sched_unuseTask(task); return task; } /* * schedSuspend() - Add Suspended task to scheduler queue * * @root = root task * @func = task execution function * @arg = 1st func argument * @id = Trigger ID * @opt_data = Optional data * @opt_dlen = Optional data length * return: NULL error or !=NULL new queued task */ sched_task_t * schedSuspend(sched_root_task_t * __restrict root, sched_task_func_t func, void *arg, u_long id, void *opt_data, size_t opt_dlen) { sched_task_t *task; void *ptr; if (!root || !func) return NULL; /* get new task */ if (!(task = sched_useTask(root))) return NULL; task->task_func = func; TASK_TYPE(task) = taskSUSPEND; TASK_ROOT(task) = root; TASK_ARG(task) = arg; TASK_VAL(task) = id; TASK_DATA(task) = opt_data; TASK_DATLEN(task) = opt_dlen; if (root->root_hooks.hook_add.suspend) ptr = root->root_hooks.hook_add.suspend(task, NULL); else ptr = NULL; if (!ptr) { #ifdef HAVE_LIBPTHREAD pthread_mutex_lock(&root->root_mtx[taskSUSPEND]); #endif TAILQ_INSERT_TAIL(&root->root_suspend, TASK_ID(task), task_node); #ifdef HAVE_LIBPTHREAD pthread_mutex_unlock(&root->root_mtx[taskSUSPEND]); #endif } else task = sched_unuseTask(task); return task; } /* * schedCallOnce() - Call once from scheduler * * @root = root task * @func = task execution function * @arg = 1st func argument * @val = additional func argument * @opt_data = Optional data * @opt_dlen = Optional data length * return: return value from called func */ sched_task_t * schedCallOnce(sched_root_task_t * __restrict root, sched_task_func_t func, void *arg, u_long val, void *opt_data, size_t opt_dlen) { sched_task_t *task; void *ret; if (!root || !func) return NULL; /* get new task */ if (!(task = sched_useTask(root))) return NULL; task->task_func = func; TASK_TYPE(task) = taskEVENT; TASK_ROOT(task) = root; TASK_ARG(task) = arg; TASK_VAL(task) = val; TASK_DATA(task) = opt_data; TASK_DATLEN(task) = opt_dlen; ret = schedCall(task); sched_unuseTask(task); return ret; } /* * schedThread() - Add thread task to scheduler queue * * @root = root task * @func = task execution function * @arg = 1st func argument * @ss = stack size * @opt_data = Optional data * @opt_dlen = Optional data length * return: NULL error or !=NULL new queued task */ sched_task_t * schedThread(sched_root_task_t * __restrict root, sched_task_func_t func, void *arg, size_t ss, void *opt_data, size_t opt_dlen) { #ifndef HAVE_LIBPTHREAD sched_SetErr(ENOTSUP, "Not supported thread tasks"); return NULL; #endif sched_task_t *task; pthread_attr_t attr; if (!root || !func) return NULL; /* get new task */ if (!(task = sched_useTask(root))) { return NULL; } task->task_func = func; TASK_TYPE(task) = taskTHREAD; TASK_ROOT(task) = root; TASK_ARG(task) = arg; TASK_DATA(task) = opt_data; TASK_DATLEN(task) = opt_dlen; pthread_attr_init(&attr); pthread_attr_setdetachstate(&attr, PTHREAD_DETACHED); if (ss && (errno = pthread_attr_setstacksize(&attr, ss))) { LOGERR; pthread_attr_destroy(&attr); return sched_unuseTask(task); } if ((errno = pthread_attr_getstacksize(&attr, &ss))) { LOGERR; pthread_attr_destroy(&attr); return sched_unuseTask(task); } else TASK_FLAG(task) = ss; if ((errno = pthread_attr_setguardsize(&attr, ss))) { LOGERR; pthread_attr_destroy(&attr); return sched_unuseTask(task); } #ifdef SCHED_RR pthread_attr_setschedpolicy(&attr, SCHED_RR); #else pthread_attr_setschedpolicy(&attr, SCHED_OTHER); #endif pthread_mutex_lock(&root->root_mtx[taskTHREAD]); TAILQ_INSERT_TAIL(&root->root_thread, TASK_ID(task), task_node); pthread_mutex_unlock(&root->root_mtx[taskTHREAD]); if (root->root_hooks.hook_add.thread) if (root->root_hooks.hook_add.thread(task, &attr)) { schedCancel(task); task = NULL; } pthread_attr_destroy(&attr); return task; } /* * schedRTC() - Add RTC task to scheduler queue * * @root = root task * @func = task execution function * @arg = 1st func argument * @ts = timeout argument structure, minimum alarm timer resolution is 1msec! * @opt_data = Optional RTC ID * @opt_dlen = Optional data length * return: NULL error or !=NULL new queued task */ sched_task_t * schedRTC(sched_root_task_t * __restrict root, sched_task_func_t func, void *arg, struct timespec ts, void *opt_data, size_t opt_dlen) { #if defined(HAVE_TIMER_CREATE) && defined(HAVE_TIMER_SETTIME) sched_task_t *task; void *ptr; if (!root || !func) return NULL; /* get new task */ if (!(task = sched_useTask(root))) return NULL; task->task_func = func; TASK_TYPE(task) = taskRTC; TASK_ROOT(task) = root; TASK_ARG(task) = arg; TASK_TS(task) = ts; TASK_DATA(task) = opt_data; TASK_DATLEN(task) = opt_dlen; if (root->root_hooks.hook_add.rtc) ptr = root->root_hooks.hook_add.rtc(task, NULL); else ptr = NULL; if (!ptr) { #ifdef HAVE_LIBPTHREAD pthread_mutex_lock(&root->root_mtx[taskRTC]); #endif TAILQ_INSERT_TAIL(&root->root_rtc, TASK_ID(task), task_node); #ifdef HAVE_LIBPTHREAD pthread_mutex_unlock(&root->root_mtx[taskRTC]); #endif } else task = sched_unuseTask(task); return task; #else sched_SetErr(ENOTSUP, "Not supported realtime clock extensions"); return NULL; #endif }