--- libaitsched/src/hooks.c 2014/01/27 16:52:56 1.24.4.1 +++ libaitsched/src/hooks.c 2014/02/01 14:06:08 1.26.2.1 @@ -3,7 +3,7 @@ * by Michael Pounov * * $Author: misho $ -* $Id: hooks.c,v 1.24.4.1 2014/01/27 16:52:56 misho Exp $ +* $Id: hooks.c,v 1.26.2.1 2014/02/01 14:06:08 misho Exp $ * ************************************************************************** The ELWIX and AITNET software is distributed under the following @@ -12,7 +12,7 @@ terms: All of the documentation and software included in the ELWIX and AITNET Releases is copyrighted by ELWIX - Sofia/Bulgaria -Copyright 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 +Copyright 2004 - 2014 by Michael Pounov . All rights reserved. Redistribution and use in source and binary forms, with or without @@ -270,7 +270,7 @@ sched_hook_cancel(void *task, void *arg __unused) pthread_cancel((pthread_t) TASK_VAL(t)); #endif return NULL; -#if defined(HAVE_TIMER_CREATE) && defined(HAVE_TIMER_SETTIME) +#if defined(HAVE_TIMER_CREATE) && defined(HAVE_TIMER_SETTIME) && defined(HAVE_TIMER_DELETE) case taskRTC: timer_delete((timer_t) TASK_FLAG(t)); schedCancel((sched_task_t*) TASK_RET(t)); @@ -562,7 +562,24 @@ sched_hook_signal(void *task, void *arg __unused) LOGERR; return (void*) -1; } +#else +#if 0 + sched_task_t *t = task; + struct sigaction sa; + memset(&sa, 0, sizeof sa); + sigemptyset(&sa.sa_mask); + sa.sa_handler = _sched_sigHandler; + sa.sa_flags = SA_RESETHAND | SA_RESTART; + + if (sigaction(TASK_VAL(t), &sa, NULL) == -1) { + if (TASK_ROOT(t)->root_hooks.hook_exec.exception) + TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL); + else + LOGERR; + return (void*) -1; + } +#endif /* 0 */ #endif return NULL; } @@ -618,12 +635,13 @@ sched_hook_fetch(void *root, void *arg __unused) { sched_root_task_t *r = root; sched_task_t *task, *tmp; - struct timespec now; + struct timespec now, m, mtmp; #ifndef KQ_DISABLE struct kevent evt[1], res[KQ_EVENTS]; - struct timespec *timeout, m, mtmp; + struct timespec *timeout; #else - struct timeval *timeout, m, mtmp, now2; + struct timeval *timeout, tv; + fd_set rfd, wfd, xfd; #endif register int i, flg; int en; @@ -683,7 +701,6 @@ sched_hook_fetch(void *root, void *arg __unused) #ifdef TIMER_WITHOUT_SORT clock_gettime(CLOCK_MONOTONIC, &now); -#ifndef KQ_DISABLE sched_timespecclear(&r->root_wait); TAILQ_FOREACH(task, &r->root_timer, task_node) { if (!sched_timespecisset(&r->root_wait)) @@ -691,86 +708,55 @@ sched_hook_fetch(void *root, void *arg __unused) else if (sched_timespeccmp(&TASK_TS(task), &r->root_wait, -) < 0) r->root_wait = TASK_TS(task); } -#else - sched_timevalclear(&r->root_wait); - TAILQ_FOREACH(task, &r->root_timer, task_node) { - if (!sched_timevalisset(&r->root_wait)) - TASK_TS2TV(task, &r->root_wait); - else { - TASK_TS2TV(task, &m); - if (sched_timevalcmp(&m, &r->root_wait, -) < 0) - TASK_TS2TV(task, &r->root_wait); - } - } -#endif /* KQ_DISABLE */ if (TAILQ_FIRST(&r->root_timer)) { m = r->root_wait; -#ifndef KQ_DISABLE sched_timespecsub(&m, &now, &mtmp); -#else - sched_timespec2val(&now, &now2); - sched_timevalsub(&m, &now2, &mtmp); -#endif /* KQ_DISABLE */ r->root_wait = mtmp; } else { /* set wait INFTIM */ -#ifndef KQ_DISABLE sched_timespecinf(&r->root_wait); -#else - sched_timevalinf(&r->root_wait); -#endif /* KQ_DISABLE */ } #else /* ! TIMER_WITHOUT_SORT */ if (!TAILQ_FIRST(&r->root_task) && (task = TAILQ_FIRST(&r->root_timer))) { clock_gettime(CLOCK_MONOTONIC, &now); -#ifndef KQ_DISABLE m = TASK_TS(task); sched_timespecsub(&m, &now, &mtmp); -#else - TASK_TS2TV(task, &m); - sched_timespec2val(&now, &now2); - sched_timevalsub(&m, &now2, &mtmp); -#endif /* KQ_DISABLE */ r->root_wait = mtmp; } else { /* set wait INFTIM */ -#ifndef KQ_DISABLE sched_timespecinf(&r->root_wait); -#else - sched_timevalinf(&r->root_wait); -#endif /* KQ_DISABLE */ } #endif /* TIMER_WITHOUT_SORT */ /* if present member of task, set NOWAIT */ if (TAILQ_FIRST(&r->root_task)) -#ifndef KQ_DISABLE sched_timespecclear(&r->root_wait); -#else - sched_timevalclear(&r->root_wait); -#endif /* KQ_DISABLE */ + if (r->root_wait.tv_sec != -1 && r->root_wait.tv_nsec != -1) { #ifndef KQ_DISABLE - if (r->root_wait.tv_sec != -1 && r->root_wait.tv_nsec != -1) -#else - if (r->root_wait.tv_sec != -1 && r->root_wait.tv_usec != -1) -#endif /* KQ_DISABLE */ timeout = &r->root_wait; -#ifndef KQ_DISABLE - else if (sched_timespecisinf(&r->root_poll)) #else - else if (sched_timevalisinf(&r->root_poll)) + sched_timespec2val(&r->root_wait, &tv); + timeout = &tv; #endif /* KQ_DISABLE */ + } else if (sched_timespecisinf(&r->root_poll)) timeout = NULL; - else + else { +#ifndef KQ_DISABLE timeout = &r->root_poll; +#else + sched_timespec2val(&r->root_poll, &tv); + timeout = &tv; +#endif /* KQ_DISABLE */ + } #ifndef KQ_DISABLE if ((en = kevent(r->root_kq, NULL, 0, res, KQ_EVENTS, timeout)) == -1) { #else - if ((en = select(r->root_kq, &r->root_fds[0], &r->root_fds[1], - &r->root_fds[0], timeout)) == -1) { + rfd = xfd = r->root_fds[0]; + wfd = r->root_fds[1]; + if ((en = select(r->root_kq, &rfd, &wfd, &xfd, timeout)) == -1) { #endif /* KQ_DISABLE */ if (r->root_hooks.hook_exec.exception) { if (r->root_hooks.hook_exec.exception(r, NULL)) @@ -783,8 +769,8 @@ sched_hook_fetch(void *root, void *arg __unused) /* kevent dispatcher */ now.tv_sec = now.tv_nsec = 0; /* Go and catch the cat into pipes ... */ - for (i = 0; i < en; i++) { #ifndef KQ_DISABLE + for (i = 0; i < en; i++) { memcpy(evt, &res[i], sizeof evt); evt->flags = EV_DELETE; /* Put read/write task to ready queue */ @@ -1153,10 +1139,126 @@ sched_hook_fetch(void *root, void *arg __unused) } else LOGERR; } + } #else /* end of kevent dispatcher */ -#endif /* KQ_DISABLE */ + for (i = 0; i < r->root_kq; i++) { + if (FD_ISSET(i, &rfd) || FD_ISSET(i, &xfd)) { + flg = 0; + TAILQ_FOREACH_SAFE(task, &r->root_read, task_node, tmp) { + if (TASK_FD(task) != i) + continue; + else { + flg++; + TASK_FLAG(task) = ioctl(TASK_FD(task), + FIONREAD, &TASK_RET(task)); + } + /* remove read handle */ +#ifdef HAVE_LIBPTHREAD + pthread_mutex_lock(&r->root_mtx[taskREAD]); +#endif + TAILQ_REMOVE(&r->root_read, task, task_node); +#ifdef HAVE_LIBPTHREAD + pthread_mutex_unlock(&r->root_mtx[taskREAD]); +#endif + if (r->root_hooks.hook_exec.exception) { + if (r->root_hooks.hook_exec.exception(r, NULL)) { + task->task_type = taskUNUSE; +#ifdef HAVE_LIBPTHREAD + pthread_mutex_lock(&r->root_mtx[taskUNUSE]); +#endif + TAILQ_INSERT_TAIL(&r->root_unuse, task, task_node); +#ifdef HAVE_LIBPTHREAD + pthread_mutex_unlock(&r->root_mtx[taskUNUSE]); +#endif + } else { + task->task_type = taskREADY; +#ifdef HAVE_LIBPTHREAD + pthread_mutex_lock(&r->root_mtx[taskREADY]); +#endif + TAILQ_INSERT_TAIL(&r->root_ready, task, task_node); +#ifdef HAVE_LIBPTHREAD + pthread_mutex_unlock(&r->root_mtx[taskREADY]); +#endif + } + } else { + task->task_type = taskREADY; +#ifdef HAVE_LIBPTHREAD + pthread_mutex_lock(&r->root_mtx[taskREADY]); +#endif + TAILQ_INSERT_TAIL(&r->root_ready, task, task_node); +#ifdef HAVE_LIBPTHREAD + pthread_mutex_unlock(&r->root_mtx[taskREADY]); +#endif + } + } + /* if match equal to 1, remove resouce */ + if (flg == 1) + FD_CLR(i, &r->root_fds[0]); + } + + if (FD_ISSET(i, &wfd)) { + flg = 0; + TAILQ_FOREACH_SAFE(task, &r->root_write, task_node, tmp) { + if (TASK_FD(task) != i) + continue; + else { + flg++; + TASK_FLAG(task) = ioctl(TASK_FD(task), + FIONWRITE, &TASK_RET(task)); + } + /* remove write handle */ +#ifdef HAVE_LIBPTHREAD + pthread_mutex_lock(&r->root_mtx[taskWRITE]); +#endif + TAILQ_REMOVE(&r->root_write, task, task_node); +#ifdef HAVE_LIBPTHREAD + pthread_mutex_unlock(&r->root_mtx[taskWRITE]); +#endif + if (r->root_hooks.hook_exec.exception) { + if (r->root_hooks.hook_exec.exception(r, NULL)) { + task->task_type = taskUNUSE; +#ifdef HAVE_LIBPTHREAD + pthread_mutex_lock(&r->root_mtx[taskUNUSE]); +#endif + TAILQ_INSERT_TAIL(&r->root_unuse, task, task_node); +#ifdef HAVE_LIBPTHREAD + pthread_mutex_unlock(&r->root_mtx[taskUNUSE]); +#endif + } else { + task->task_type = taskREADY; +#ifdef HAVE_LIBPTHREAD + pthread_mutex_lock(&r->root_mtx[taskREADY]); +#endif + TAILQ_INSERT_TAIL(&r->root_ready, task, task_node); +#ifdef HAVE_LIBPTHREAD + pthread_mutex_unlock(&r->root_mtx[taskREADY]); +#endif + } + } else { + task->task_type = taskREADY; +#ifdef HAVE_LIBPTHREAD + pthread_mutex_lock(&r->root_mtx[taskREADY]); +#endif + TAILQ_INSERT_TAIL(&r->root_ready, task, task_node); +#ifdef HAVE_LIBPTHREAD + pthread_mutex_unlock(&r->root_mtx[taskREADY]); +#endif + } + } + /* if match equal to 1, remove resouce */ + if (flg == 1) + FD_CLR(i, &r->root_fds[1]); + } } + /* optimize select */ + for (i = r->root_kq - 1; i > 2; i--) + if (FD_ISSET(i, &r->root_fds[0]) || FD_ISSET(i, &r->root_fds[1])) + break; + if (i > 2) + r->root_kq = i + 1; +#endif /* KQ_DISABLE */ + skip_event: /* timer update & put in ready queue */ clock_gettime(CLOCK_MONOTONIC, &now); @@ -1285,7 +1387,7 @@ sched_hook_condition(void *root, void *arg) * @arg = unused * return: <0 errors and 0 ok */ -#if defined(HAVE_TIMER_CREATE) && defined(HAVE_TIMER_SETTIME) +#if defined(HAVE_TIMER_CREATE) && defined(HAVE_TIMER_SETTIME) && defined(HAVE_TIMER_DELETE) void * sched_hook_rtc(void *task, void *arg __unused) {