--- libaitsched/src/hooks.c 2012/08/01 15:17:38 1.10.2.5 +++ libaitsched/src/hooks.c 2012/08/21 11:45:35 1.13.2.2 @@ -3,7 +3,7 @@ * by Michael Pounov * * $Author: misho $ -* $Id: hooks.c,v 1.10.2.5 2012/08/01 15:17:38 misho Exp $ +* $Id: hooks.c,v 1.13.2.2 2012/08/21 11:45:35 misho Exp $ * ************************************************************************** The ELWIX and AITNET software is distributed under the following @@ -107,9 +107,13 @@ sched_hook_cancel(void *task, void *arg __unused) sched_task_t *t = task; struct kevent chg[1]; struct timespec timeout = { 0, 0 }; -#ifdef EVFILT_AIO +#ifdef AIO_SUPPORT struct aiocb *acb; -#endif +#ifdef EVFILT_LIO + struct aiocb **acbs; + register int i; +#endif /* EVFILT_LIO */ +#endif /* AIO_SUPPORT */ if (!t || !TASK_ROOT(t)) return (void*) -1; @@ -159,7 +163,7 @@ sched_hook_cancel(void *task, void *arg __unused) EV_SET(&chg[0], TASK_VAL(t), EVFILT_SIGNAL, EV_DELETE, 0, 0, (void*) TASK_VAL(t)); #endif break; -#ifdef EVFILT_AIO +#ifdef AIO_SUPPORT case taskAIO: #ifdef __NetBSD__ EV_SET(&chg[0], TASK_VAL(t), EVFILT_AIO, EV_DELETE, 0, 0, (intptr_t) TASK_VAL(t)); @@ -168,12 +172,32 @@ sched_hook_cancel(void *task, void *arg __unused) #endif acb = (struct aiocb*) TASK_VAL(t); if (acb) { - aio_cancel(acb->aio_fildes, acb); + if (aio_cancel(acb->aio_fildes, acb) == AIO_CANCELED) + aio_return(acb); free(acb); TASK_VAL(t) = 0; } break; +#ifdef EVFILT_LIO + case taskLIO: +#ifdef __NetBSD__ + EV_SET(&chg[0], TASK_VAL(t), EVFILT_LIO, EV_DELETE, 0, 0, (intptr_t) TASK_VAL(t)); +#else + EV_SET(&chg[0], TASK_VAL(t), EVFILT_LIO, EV_DELETE, 0, 0, (void*) TASK_VAL(t)); #endif + acbs = (struct aiocb**) TASK_VAL(t); + if (acbs) { + for (i = 0; i < TASK_DATLEN(t); i++) { + if (aio_cancel(acbs[i]->aio_fildes, acbs[i]) == AIO_CANCELED) + aio_return(acbs[i]); + free(acbs[i]); + } + free(acbs); + TASK_VAL(t) = 0; + } + break; +#endif /* EVFILT_LIO */ +#endif /* AIO_SUPPORT */ #ifdef EVFILT_USER case taskUSER: #ifdef __NetBSD__ @@ -183,6 +207,10 @@ sched_hook_cancel(void *task, void *arg __unused) #endif break; #endif + case taskTHREAD: +#ifdef HAVE_LIBPTHREAD + pthread_cancel((pthread_t) TASK_VAL(t)); +#endif default: return NULL; } @@ -453,10 +481,17 @@ sched_hook_fetch(void *root, void *arg __unused) struct kevent evt[1], res[KQ_EVENTS]; register int i, flg; int en; -#ifdef EVFILT_AIO +#ifdef AIO_SUPPORT int len, fd; struct aiocb *acb; -#endif +#ifdef EVFILT_LIO + int l; + register int j; + off_t off; + struct aiocb **acbs; + struct iovec *iv; +#endif /* EVFILT_LIO */ +#endif /* AIO_SUPPORT */ if (!r) return NULL; @@ -519,7 +554,7 @@ sched_hook_fetch(void *root, void *arg __unused) sched_timespecinf(&r->root_wait); } #else - if (!TAILQ_FIRST(&r->root_eventlo) && (task = TAILQ_FIRST(&r->root_timer))) { + if (!TAILQ_FIRST(&r->root_task) && (task = TAILQ_FIRST(&r->root_timer))) { clock_gettime(CLOCK_MONOTONIC, &now); m = TASK_TS(task); @@ -530,8 +565,8 @@ sched_hook_fetch(void *root, void *arg __unused) sched_timespecinf(&r->root_wait); } #endif - /* if present member of eventLo, set NOWAIT */ - if (TAILQ_FIRST(&r->root_eventlo)) + /* if present member of task, set NOWAIT */ + if (TAILQ_FIRST(&r->root_task)) sched_timespecclear(&r->root_wait); if (r->root_wait.tv_sec != -1 && r->root_wait.tv_nsec != -1) @@ -561,8 +596,11 @@ sched_hook_fetch(void *root, void *arg __unused) TAILQ_FOREACH_SAFE(task, &r->root_read, task_node, tmp) { if (TASK_FD(task) != ((intptr_t) res[i].udata)) continue; - else + else { flg++; + TASK_RET(task) = res[i].data; + TASK_FLAG(task) = res[i].fflags; + } /* remove read handle */ #ifdef HAVE_LIBPTHREAD pthread_mutex_lock(&r->root_mtx[taskREAD]); @@ -611,8 +649,11 @@ sched_hook_fetch(void *root, void *arg __unused) TAILQ_FOREACH_SAFE(task, &r->root_write, task_node, tmp) { if (TASK_FD(task) != ((intptr_t) res[i].udata)) continue; - else + else { flg++; + TASK_RET(task) = res[i].data; + TASK_FLAG(task) = res[i].fflags; + } /* remove write handle */ #ifdef HAVE_LIBPTHREAD pthread_mutex_lock(&r->root_mtx[taskWRITE]); @@ -661,8 +702,11 @@ sched_hook_fetch(void *root, void *arg __unused) TAILQ_FOREACH_SAFE(task, &r->root_alarm, task_node, tmp) { if ((uintptr_t) TASK_DATA(task) != ((uintptr_t) res[i].udata)) continue; - else + else { flg++; + TASK_RET(task) = res[i].data; + TASK_FLAG(task) = res[i].fflags; + } /* remove alarm handle */ #ifdef HAVE_LIBPTHREAD pthread_mutex_lock(&r->root_mtx[taskALARM]); @@ -691,8 +735,8 @@ sched_hook_fetch(void *root, void *arg __unused) continue; else { flg++; - TASK_DATA(task) = (void*) (uintptr_t) res[i].data; - TASK_DATLEN(task) = res[i].fflags; + TASK_RET(task) = res[i].data; + TASK_FLAG(task) = res[i].fflags; } /* remove node handle */ #ifdef HAVE_LIBPTHREAD @@ -722,8 +766,8 @@ sched_hook_fetch(void *root, void *arg __unused) continue; else { flg++; - TASK_DATA(task) = (void*) (uintptr_t) res[i].data; - TASK_DATLEN(task) = res[i].fflags; + TASK_RET(task) = res[i].data; + TASK_FLAG(task) = res[i].fflags; } /* remove proc handle */ #ifdef HAVE_LIBPTHREAD @@ -751,8 +795,11 @@ sched_hook_fetch(void *root, void *arg __unused) TAILQ_FOREACH_SAFE(task, &r->root_signal, task_node, tmp) { if (TASK_VAL(task) != ((uintptr_t) res[i].udata)) continue; - else + else { flg++; + TASK_RET(task) = res[i].data; + TASK_FLAG(task) = res[i].fflags; + } /* remove signal handle */ #ifdef HAVE_LIBPTHREAD pthread_mutex_lock(&r->root_mtx[taskSIGNAL]); @@ -774,16 +821,18 @@ sched_hook_fetch(void *root, void *arg __unused) if (flg > 1) evt->flags ^= evt->flags; break; -#ifdef EVFILT_AIO +#ifdef AIO_SUPPORT case EVFILT_AIO: flg = 0; TAILQ_FOREACH_SAFE(task, &r->root_aio, task_node, tmp) { acb = (struct aiocb*) TASK_VAL(task); - if (acb != ((struct aiocb*) res[i].ident) || - acb->aio_sigevent.sigev_value.sival_ptr != res[i].udata) + if (acb != ((struct aiocb*) res[i].udata)) continue; - else + else { flg++; + TASK_RET(task) = res[i].data; + TASK_FLAG(task) = res[i].fflags; + } /* remove user handle */ #ifdef HAVE_LIBPTHREAD pthread_mutex_lock(&r->root_mtx[taskAIO]); @@ -806,16 +855,65 @@ sched_hook_fetch(void *root, void *arg __unused) LOGERR; } else LOGERR; - free(acb); - TASK_FD(task) = (u_long) fd; TASK_DATLEN(task) = (u_long) len; + TASK_FD(task) = fd; } /* if match at least 2, don't remove resouce of event */ if (flg > 1) evt->flags ^= evt->flags; break; -#endif /* EVFILT_AIO */ +#ifdef EVFILT_LIO + case EVFILT_LIO: + flg = 0; + TAILQ_FOREACH_SAFE(task, &r->root_lio, task_node, tmp) { + acbs = (struct aiocb**) TASK_VAL(task); + if (acbs != ((struct aiocb**) res[i].udata)) + continue; + else { + flg++; + TASK_RET(task) = res[i].data; + TASK_FLAG(task) = res[i].fflags; + } + /* remove user handle */ +#ifdef HAVE_LIBPTHREAD + pthread_mutex_lock(&r->root_mtx[taskLIO]); +#endif + TAILQ_REMOVE(&r->root_lio, task, task_node); +#ifdef HAVE_LIBPTHREAD + pthread_mutex_unlock(&r->root_mtx[taskLIO]); +#endif + task->task_type = taskREADY; +#ifdef HAVE_LIBPTHREAD + pthread_mutex_lock(&r->root_mtx[taskREADY]); +#endif + TAILQ_INSERT_TAIL(&r->root_ready, task, task_node); +#ifdef HAVE_LIBPTHREAD + pthread_mutex_unlock(&r->root_mtx[taskREADY]); +#endif + iv = (struct iovec*) TASK_DATA(task); + fd = acbs[0]->aio_fildes; + off = acbs[0]->aio_offset; + for (j = len = 0; i < TASK_DATLEN(task); len += l, i++) { + if ((iv[i].iov_len = aio_return(acbs[i])) == -1) + l = 0; + else + l = iv[i].iov_len; + free(acbs[i]); + } + free(acbs); + TASK_DATLEN(task) = (u_long) len; + TASK_FD(task) = fd; + + if (lseek(fd, off + len, SEEK_CUR) == -1) + LOGERR; + } + /* if match at least 2, don't remove resouce of event */ + if (flg > 1) + evt->flags ^= evt->flags; + break; +#endif /* EVFILT_LIO */ +#endif /* AIO_SUPPORT */ #ifdef EVFILT_USER case EVFILT_USER: flg = 0; @@ -824,8 +922,8 @@ sched_hook_fetch(void *root, void *arg __unused) continue; else { flg++; - TASK_DATA(task) = (void*) res[i].data; - TASK_DATLEN(task) = res[i].fflags; + TASK_RET(task) = res[i].data; + TASK_FLAG(task) = res[i].fflags; } /* remove user handle */ #ifdef HAVE_LIBPTHREAD @@ -881,18 +979,18 @@ sched_hook_fetch(void *root, void *arg __unused) #endif } - /* put eventlo priority task to ready queue, if there is no ready task or - reach max missed fetch-rotate */ - if ((task = TAILQ_FIRST(&r->root_eventlo))) { - if (!TAILQ_FIRST(&r->root_ready) || r->root_eventlo_miss > MAX_EVENTLO_MISS) { - r->root_eventlo_miss = 0; + /* put regular task priority task to ready queue, + if there is no ready task or reach max missing hit for regular task */ + if ((task = TAILQ_FIRST(&r->root_task))) { + if (!TAILQ_FIRST(&r->root_ready) || r->root_miss >= TASK_VAL(task)) { + r->root_miss ^= r->root_miss; #ifdef HAVE_LIBPTHREAD - pthread_mutex_lock(&r->root_mtx[taskEVENTLO]); + pthread_mutex_lock(&r->root_mtx[taskTASK]); #endif - TAILQ_REMOVE(&r->root_eventlo, task, task_node); + TAILQ_REMOVE(&r->root_task, task, task_node); #ifdef HAVE_LIBPTHREAD - pthread_mutex_unlock(&r->root_mtx[taskEVENTLO]); + pthread_mutex_unlock(&r->root_mtx[taskTASK]); #endif task->task_type = taskREADY; #ifdef HAVE_LIBPTHREAD @@ -903,9 +1001,9 @@ sched_hook_fetch(void *root, void *arg __unused) pthread_mutex_unlock(&r->root_mtx[taskREADY]); #endif } else - r->root_eventlo_miss++; + r->root_miss++; } else - r->root_eventlo_miss = 0; + r->root_miss ^= r->root_miss; /* OK, lets get ready task !!! */ task = TAILQ_FIRST(&r->root_ready);