|
version 1.10.2.9, 2012/08/02 12:32:07
|
version 1.15, 2012/08/23 02:33:12
|
|
Line 107 sched_hook_cancel(void *task, void *arg __unused)
|
Line 107 sched_hook_cancel(void *task, void *arg __unused)
|
| sched_task_t *t = task; |
sched_task_t *t = task; |
| struct kevent chg[1]; |
struct kevent chg[1]; |
| struct timespec timeout = { 0, 0 }; |
struct timespec timeout = { 0, 0 }; |
| #if defined(EVFILT_AIO) && defined(SIGEV_KEVENT) | #ifdef AIO_SUPPORT |
| struct aiocb *acb; |
struct aiocb *acb; |
| #endif | #ifdef EVFILT_LIO |
| | register int i = 0; |
| | struct aiocb **acbs; |
| | #endif /* EVFILT_LIO */ |
| | #endif /* AIO_SUPPORT */ |
| |
|
| if (!t || !TASK_ROOT(t)) |
if (!t || !TASK_ROOT(t)) |
| return (void*) -1; |
return (void*) -1; |
|
Line 159 sched_hook_cancel(void *task, void *arg __unused)
|
Line 163 sched_hook_cancel(void *task, void *arg __unused)
|
| EV_SET(&chg[0], TASK_VAL(t), EVFILT_SIGNAL, EV_DELETE, 0, 0, (void*) TASK_VAL(t)); |
EV_SET(&chg[0], TASK_VAL(t), EVFILT_SIGNAL, EV_DELETE, 0, 0, (void*) TASK_VAL(t)); |
| #endif |
#endif |
| break; |
break; |
| #if defined(EVFILT_AIO) && defined(SIGEV_KEVENT) | #ifdef AIO_SUPPORT |
| case taskAIO: |
case taskAIO: |
| #ifdef __NetBSD__ |
#ifdef __NetBSD__ |
| EV_SET(&chg[0], TASK_VAL(t), EVFILT_AIO, EV_DELETE, 0, 0, (intptr_t) TASK_VAL(t)); |
EV_SET(&chg[0], TASK_VAL(t), EVFILT_AIO, EV_DELETE, 0, 0, (intptr_t) TASK_VAL(t)); |
|
Line 174 sched_hook_cancel(void *task, void *arg __unused)
|
Line 178 sched_hook_cancel(void *task, void *arg __unused)
|
| TASK_VAL(t) = 0; |
TASK_VAL(t) = 0; |
| } |
} |
| break; |
break; |
| |
#ifdef EVFILT_LIO |
| |
case taskLIO: |
| |
#ifdef __NetBSD__ |
| |
EV_SET(&chg[0], TASK_VAL(t), EVFILT_LIO, EV_DELETE, 0, 0, (intptr_t) TASK_VAL(t)); |
| |
#else |
| |
EV_SET(&chg[0], TASK_VAL(t), EVFILT_LIO, EV_DELETE, 0, 0, (void*) TASK_VAL(t)); |
| #endif |
#endif |
| |
acbs = (struct aiocb**) TASK_VAL(t); |
| |
if (acbs) { |
| |
for (i = 0; i < TASK_DATLEN(t); i++) { |
| |
if (aio_cancel(acbs[i]->aio_fildes, acbs[i]) == AIO_CANCELED) |
| |
aio_return(acbs[i]); |
| |
free(acbs[i]); |
| |
} |
| |
free(acbs); |
| |
TASK_VAL(t) = 0; |
| |
} |
| |
break; |
| |
#endif /* EVFILT_LIO */ |
| |
#endif /* AIO_SUPPORT */ |
| #ifdef EVFILT_USER |
#ifdef EVFILT_USER |
| case taskUSER: |
case taskUSER: |
| #ifdef __NetBSD__ |
#ifdef __NetBSD__ |
|
Line 184 sched_hook_cancel(void *task, void *arg __unused)
|
Line 207 sched_hook_cancel(void *task, void *arg __unused)
|
| #endif |
#endif |
| break; |
break; |
| #endif |
#endif |
| |
case taskTHREAD: |
| |
#ifdef HAVE_LIBPTHREAD |
| |
pthread_cancel((pthread_t) TASK_VAL(t)); |
| |
#endif |
| default: |
default: |
| return NULL; |
return NULL; |
| } |
} |
|
Line 192 sched_hook_cancel(void *task, void *arg __unused)
|
Line 219 sched_hook_cancel(void *task, void *arg __unused)
|
| return NULL; |
return NULL; |
| } |
} |
| |
|
| |
#ifdef HAVE_LIBPTHREAD |
| /* |
/* |
| |
* sched_hook_thread() - Default THREAD hook |
| |
* |
| |
* @task = current task |
| |
* @arg = pthread attributes |
| |
* return: <0 errors and 0 ok |
| |
*/ |
| |
void * |
| |
sched_hook_thread(void *task, void *arg) |
| |
{ |
| |
sched_task_t *t = task; |
| |
pthread_t tid; |
| |
sigset_t s, o; |
| |
|
| |
if (!t || !TASK_ROOT(t)) |
| |
return (void*) -1; |
| |
|
| |
sigfillset(&s); |
| |
pthread_sigmask(SIG_BLOCK, &s, &o); |
| |
if (pthread_create(&tid, (pthread_attr_t*) arg, |
| |
(void *(*)(void*)) _sched_threadWrapper, t)) { |
| |
LOGERR; |
| |
pthread_sigmask(SIG_SETMASK, &o, NULL); |
| |
return (void*) -1; |
| |
} else |
| |
TASK_VAL(t) = (u_long) tid; |
| |
|
| |
if (!TASK_ISLOCKED(t)) |
| |
TASK_LOCK(t); |
| |
|
| |
pthread_sigmask(SIG_SETMASK, &o, NULL); |
| |
return NULL; |
| |
} |
| |
#endif |
| |
|
| |
/* |
| * sched_hook_read() - Default READ hook |
* sched_hook_read() - Default READ hook |
| * |
* |
| * @task = current task |
* @task = current task |
|
Line 454 sched_hook_fetch(void *root, void *arg __unused)
|
Line 517 sched_hook_fetch(void *root, void *arg __unused)
|
| struct kevent evt[1], res[KQ_EVENTS]; |
struct kevent evt[1], res[KQ_EVENTS]; |
| register int i, flg; |
register int i, flg; |
| int en; |
int en; |
| #if defined(EVFILT_AIO) && defined(SIGEV_KEVENT) | #ifdef AIO_SUPPORT |
| int len, fd; |
int len, fd; |
| struct aiocb *acb; |
struct aiocb *acb; |
| #ifdef EVFILT_LIO |
#ifdef EVFILT_LIO |
|
Line 464 sched_hook_fetch(void *root, void *arg __unused)
|
Line 527 sched_hook_fetch(void *root, void *arg __unused)
|
| struct aiocb **acbs; |
struct aiocb **acbs; |
| struct iovec *iv; |
struct iovec *iv; |
| #endif /* EVFILT_LIO */ |
#endif /* EVFILT_LIO */ |
| #endif /* EVFILT_AIO */ | #endif /* AIO_SUPPORT */ |
| |
|
| if (!r) |
if (!r) |
| return NULL; |
return NULL; |
|
Line 527 sched_hook_fetch(void *root, void *arg __unused)
|
Line 590 sched_hook_fetch(void *root, void *arg __unused)
|
| sched_timespecinf(&r->root_wait); |
sched_timespecinf(&r->root_wait); |
| } |
} |
| #else |
#else |
| if (!TAILQ_FIRST(&r->root_eventlo) && (task = TAILQ_FIRST(&r->root_timer))) { | if (!TAILQ_FIRST(&r->root_task) && (task = TAILQ_FIRST(&r->root_timer))) { |
| clock_gettime(CLOCK_MONOTONIC, &now); |
clock_gettime(CLOCK_MONOTONIC, &now); |
| |
|
| m = TASK_TS(task); |
m = TASK_TS(task); |
|
Line 538 sched_hook_fetch(void *root, void *arg __unused)
|
Line 601 sched_hook_fetch(void *root, void *arg __unused)
|
| sched_timespecinf(&r->root_wait); |
sched_timespecinf(&r->root_wait); |
| } |
} |
| #endif |
#endif |
| /* if present member of eventLo, set NOWAIT */ | /* if present member of task, set NOWAIT */ |
| if (TAILQ_FIRST(&r->root_eventlo)) | if (TAILQ_FIRST(&r->root_task)) |
| sched_timespecclear(&r->root_wait); |
sched_timespecclear(&r->root_wait); |
| |
|
| if (r->root_wait.tv_sec != -1 && r->root_wait.tv_nsec != -1) |
if (r->root_wait.tv_sec != -1 && r->root_wait.tv_nsec != -1) |
|
Line 569 sched_hook_fetch(void *root, void *arg __unused)
|
Line 632 sched_hook_fetch(void *root, void *arg __unused)
|
| TAILQ_FOREACH_SAFE(task, &r->root_read, task_node, tmp) { |
TAILQ_FOREACH_SAFE(task, &r->root_read, task_node, tmp) { |
| if (TASK_FD(task) != ((intptr_t) res[i].udata)) |
if (TASK_FD(task) != ((intptr_t) res[i].udata)) |
| continue; |
continue; |
| else | else { |
| flg++; |
flg++; |
| |
TASK_RET(task) = res[i].data; |
| |
TASK_FLAG(task) = res[i].fflags; |
| |
} |
| /* remove read handle */ |
/* remove read handle */ |
| #ifdef HAVE_LIBPTHREAD |
#ifdef HAVE_LIBPTHREAD |
| pthread_mutex_lock(&r->root_mtx[taskREAD]); |
pthread_mutex_lock(&r->root_mtx[taskREAD]); |
|
Line 619 sched_hook_fetch(void *root, void *arg __unused)
|
Line 685 sched_hook_fetch(void *root, void *arg __unused)
|
| TAILQ_FOREACH_SAFE(task, &r->root_write, task_node, tmp) { |
TAILQ_FOREACH_SAFE(task, &r->root_write, task_node, tmp) { |
| if (TASK_FD(task) != ((intptr_t) res[i].udata)) |
if (TASK_FD(task) != ((intptr_t) res[i].udata)) |
| continue; |
continue; |
| else | else { |
| flg++; |
flg++; |
| |
TASK_RET(task) = res[i].data; |
| |
TASK_FLAG(task) = res[i].fflags; |
| |
} |
| /* remove write handle */ |
/* remove write handle */ |
| #ifdef HAVE_LIBPTHREAD |
#ifdef HAVE_LIBPTHREAD |
| pthread_mutex_lock(&r->root_mtx[taskWRITE]); |
pthread_mutex_lock(&r->root_mtx[taskWRITE]); |
|
Line 669 sched_hook_fetch(void *root, void *arg __unused)
|
Line 738 sched_hook_fetch(void *root, void *arg __unused)
|
| TAILQ_FOREACH_SAFE(task, &r->root_alarm, task_node, tmp) { |
TAILQ_FOREACH_SAFE(task, &r->root_alarm, task_node, tmp) { |
| if ((uintptr_t) TASK_DATA(task) != ((uintptr_t) res[i].udata)) |
if ((uintptr_t) TASK_DATA(task) != ((uintptr_t) res[i].udata)) |
| continue; |
continue; |
| else | else { |
| flg++; |
flg++; |
| |
TASK_RET(task) = res[i].data; |
| |
TASK_FLAG(task) = res[i].fflags; |
| |
} |
| /* remove alarm handle */ |
/* remove alarm handle */ |
| #ifdef HAVE_LIBPTHREAD |
#ifdef HAVE_LIBPTHREAD |
| pthread_mutex_lock(&r->root_mtx[taskALARM]); |
pthread_mutex_lock(&r->root_mtx[taskALARM]); |
|
Line 699 sched_hook_fetch(void *root, void *arg __unused)
|
Line 771 sched_hook_fetch(void *root, void *arg __unused)
|
| continue; |
continue; |
| else { |
else { |
| flg++; |
flg++; |
| TASK_DATA(task) = (void*) (uintptr_t) res[i].data; | TASK_RET(task) = res[i].data; |
| TASK_DATLEN(task) = res[i].fflags; | TASK_FLAG(task) = res[i].fflags; |
| } |
} |
| /* remove node handle */ |
/* remove node handle */ |
| #ifdef HAVE_LIBPTHREAD |
#ifdef HAVE_LIBPTHREAD |
|
Line 730 sched_hook_fetch(void *root, void *arg __unused)
|
Line 802 sched_hook_fetch(void *root, void *arg __unused)
|
| continue; |
continue; |
| else { |
else { |
| flg++; |
flg++; |
| TASK_DATA(task) = (void*) (uintptr_t) res[i].data; | TASK_RET(task) = res[i].data; |
| TASK_DATLEN(task) = res[i].fflags; | TASK_FLAG(task) = res[i].fflags; |
| } |
} |
| /* remove proc handle */ |
/* remove proc handle */ |
| #ifdef HAVE_LIBPTHREAD |
#ifdef HAVE_LIBPTHREAD |
|
Line 759 sched_hook_fetch(void *root, void *arg __unused)
|
Line 831 sched_hook_fetch(void *root, void *arg __unused)
|
| TAILQ_FOREACH_SAFE(task, &r->root_signal, task_node, tmp) { |
TAILQ_FOREACH_SAFE(task, &r->root_signal, task_node, tmp) { |
| if (TASK_VAL(task) != ((uintptr_t) res[i].udata)) |
if (TASK_VAL(task) != ((uintptr_t) res[i].udata)) |
| continue; |
continue; |
| else | else { |
| flg++; |
flg++; |
| |
TASK_RET(task) = res[i].data; |
| |
TASK_FLAG(task) = res[i].fflags; |
| |
} |
| /* remove signal handle */ |
/* remove signal handle */ |
| #ifdef HAVE_LIBPTHREAD |
#ifdef HAVE_LIBPTHREAD |
| pthread_mutex_lock(&r->root_mtx[taskSIGNAL]); |
pthread_mutex_lock(&r->root_mtx[taskSIGNAL]); |
|
Line 782 sched_hook_fetch(void *root, void *arg __unused)
|
Line 857 sched_hook_fetch(void *root, void *arg __unused)
|
| if (flg > 1) |
if (flg > 1) |
| evt->flags ^= evt->flags; |
evt->flags ^= evt->flags; |
| break; |
break; |
| #if defined(EVFILT_AIO) && defined(SIGEV_KEVENT) | #ifdef AIO_SUPPORT |
| case EVFILT_AIO: |
case EVFILT_AIO: |
| #ifdef EVFILT_LIO |
|
| case EVFILT_LIO: |
|
| #endif /* EVFILT_LIO */ |
|
| flg = 0; |
flg = 0; |
| TAILQ_FOREACH_SAFE(task, &r->root_aio, task_node, tmp) { |
TAILQ_FOREACH_SAFE(task, &r->root_aio, task_node, tmp) { |
| acb = (struct aiocb*) TASK_VAL(task); |
acb = (struct aiocb*) TASK_VAL(task); |
| if (acb != ((struct aiocb*) res[i].udata)) |
if (acb != ((struct aiocb*) res[i].udata)) |
| continue; |
continue; |
| else | else { |
| flg++; |
flg++; |
| |
TASK_RET(task) = res[i].data; |
| |
TASK_FLAG(task) = res[i].fflags; |
| |
} |
| /* remove user handle */ |
/* remove user handle */ |
| #ifdef HAVE_LIBPTHREAD |
#ifdef HAVE_LIBPTHREAD |
| pthread_mutex_lock(&r->root_mtx[taskAIO]); |
pthread_mutex_lock(&r->root_mtx[taskAIO]); |
|
Line 810 sched_hook_fetch(void *root, void *arg __unused)
|
Line 885 sched_hook_fetch(void *root, void *arg __unused)
|
| #ifdef HAVE_LIBPTHREAD |
#ifdef HAVE_LIBPTHREAD |
| pthread_mutex_unlock(&r->root_mtx[taskREADY]); |
pthread_mutex_unlock(&r->root_mtx[taskREADY]); |
| #endif |
#endif |
| #ifdef EVFILT_LIO | fd = acb->aio_fildes; |
| if (res[i].filter == EVFILT_LIO) { | if ((len = aio_return(acb)) != -1) { |
| acbs = (struct aiocb**) TASK_VAL(task); | if (lseek(fd, acb->aio_offset + len, SEEK_CUR) == -1) |
| iv = (struct iovec*) TASK_DATA(task); | |
| fd = acbs[0]->aio_fildes; | |
| off = acbs[0]->aio_offset; | |
| for (j = len = 0; i < TASK_DATLEN(task); len += l, i++) { | |
| if ((iv[i].iov_len = aio_return(acbs[i])) == -1) | |
| l = 0; | |
| else | |
| l = iv[i].iov_len; | |
| free(acbs[i]); | |
| } | |
| free(acbs); | |
| if (lseek(fd, off + len, SEEK_CUR) == -1) | |
| LOGERR; |
LOGERR; |
| } else |
} else |
| #endif /* EVFILT_LIO */ | LOGERR; |
| { | free(acb); |
| fd = acb->aio_fildes; | TASK_DATLEN(task) = (u_long) len; |
| if ((len = aio_return(acb)) != -1) { | TASK_FD(task) = fd; |
| if (lseek(fd, acb->aio_offset + len, SEEK_CUR) == -1) | } |
| LOGERR; | /* if match at least 2, don't remove resouce of event */ |
| } else | if (flg > 1) |
| LOGERR; | evt->flags ^= evt->flags; |
| break; |
| free(acb); | #ifdef EVFILT_LIO |
| | case EVFILT_LIO: |
| | flg = 0; |
| | TAILQ_FOREACH_SAFE(task, &r->root_lio, task_node, tmp) { |
| | acbs = (struct aiocb**) TASK_VAL(task); |
| | if (acbs != ((struct aiocb**) res[i].udata)) |
| | continue; |
| | else { |
| | flg++; |
| | TASK_RET(task) = res[i].data; |
| | TASK_FLAG(task) = res[i].fflags; |
| } |
} |
| |
/* remove user handle */ |
| |
#ifdef HAVE_LIBPTHREAD |
| |
pthread_mutex_lock(&r->root_mtx[taskLIO]); |
| |
#endif |
| |
TAILQ_REMOVE(&r->root_lio, task, task_node); |
| |
#ifdef HAVE_LIBPTHREAD |
| |
pthread_mutex_unlock(&r->root_mtx[taskLIO]); |
| |
#endif |
| |
task->task_type = taskREADY; |
| |
#ifdef HAVE_LIBPTHREAD |
| |
pthread_mutex_lock(&r->root_mtx[taskREADY]); |
| |
#endif |
| |
TAILQ_INSERT_TAIL(&r->root_ready, task, task_node); |
| |
#ifdef HAVE_LIBPTHREAD |
| |
pthread_mutex_unlock(&r->root_mtx[taskREADY]); |
| |
#endif |
| |
iv = (struct iovec*) TASK_DATA(task); |
| |
fd = acbs[0]->aio_fildes; |
| |
off = acbs[0]->aio_offset; |
| |
for (j = len = 0; i < TASK_DATLEN(task); len += l, i++) { |
| |
if ((iv[i].iov_len = aio_return(acbs[i])) == -1) |
| |
l = 0; |
| |
else |
| |
l = iv[i].iov_len; |
| |
free(acbs[i]); |
| |
} |
| |
free(acbs); |
| TASK_DATLEN(task) = (u_long) len; |
TASK_DATLEN(task) = (u_long) len; |
| TASK_FD(task) = fd; |
TASK_FD(task) = fd; |
| |
|
| |
if (lseek(fd, off + len, SEEK_CUR) == -1) |
| |
LOGERR; |
| } |
} |
| /* if match at least 2, don't remove resouce of event */ |
/* if match at least 2, don't remove resouce of event */ |
| if (flg > 1) |
if (flg > 1) |
| evt->flags ^= evt->flags; |
evt->flags ^= evt->flags; |
| break; |
break; |
| #endif /* EVFILT_AIO */ | #endif /* EVFILT_LIO */ |
| | #endif /* AIO_SUPPORT */ |
| #ifdef EVFILT_USER |
#ifdef EVFILT_USER |
| case EVFILT_USER: |
case EVFILT_USER: |
| flg = 0; |
flg = 0; |
|
Line 854 sched_hook_fetch(void *root, void *arg __unused)
|
Line 958 sched_hook_fetch(void *root, void *arg __unused)
|
| continue; |
continue; |
| else { |
else { |
| flg++; |
flg++; |
| TASK_DATA(task) = (void*) res[i].data; | TASK_RET(task) = res[i].data; |
| TASK_DATLEN(task) = res[i].fflags; | TASK_FLAG(task) = res[i].fflags; |
| } |
} |
| /* remove user handle */ |
/* remove user handle */ |
| #ifdef HAVE_LIBPTHREAD |
#ifdef HAVE_LIBPTHREAD |
|
Line 911 sched_hook_fetch(void *root, void *arg __unused)
|
Line 1015 sched_hook_fetch(void *root, void *arg __unused)
|
| #endif |
#endif |
| } |
} |
| |
|
| /* put eventlo priority task to ready queue, if there is no ready task or | /* put regular task priority task to ready queue, |
| reach max missed fetch-rotate */ | if there is no ready task or reach max missing hit for regular task */ |
| if ((task = TAILQ_FIRST(&r->root_eventlo))) { | if ((task = TAILQ_FIRST(&r->root_task))) { |
| if (!TAILQ_FIRST(&r->root_ready) || r->root_eventlo_miss > MAX_EVENTLO_MISS) { | if (!TAILQ_FIRST(&r->root_ready) || r->root_miss >= TASK_VAL(task)) { |
| r->root_eventlo_miss = 0; | r->root_miss ^= r->root_miss; |
| |
|
| #ifdef HAVE_LIBPTHREAD |
#ifdef HAVE_LIBPTHREAD |
| pthread_mutex_lock(&r->root_mtx[taskEVENTLO]); | pthread_mutex_lock(&r->root_mtx[taskTASK]); |
| #endif |
#endif |
| TAILQ_REMOVE(&r->root_eventlo, task, task_node); | TAILQ_REMOVE(&r->root_task, task, task_node); |
| #ifdef HAVE_LIBPTHREAD |
#ifdef HAVE_LIBPTHREAD |
| pthread_mutex_unlock(&r->root_mtx[taskEVENTLO]); | pthread_mutex_unlock(&r->root_mtx[taskTASK]); |
| #endif |
#endif |
| task->task_type = taskREADY; |
task->task_type = taskREADY; |
| #ifdef HAVE_LIBPTHREAD |
#ifdef HAVE_LIBPTHREAD |
|
Line 933 sched_hook_fetch(void *root, void *arg __unused)
|
Line 1037 sched_hook_fetch(void *root, void *arg __unused)
|
| pthread_mutex_unlock(&r->root_mtx[taskREADY]); |
pthread_mutex_unlock(&r->root_mtx[taskREADY]); |
| #endif |
#endif |
| } else |
} else |
| r->root_eventlo_miss++; | r->root_miss++; |
| } else |
} else |
| r->root_eventlo_miss = 0; | r->root_miss ^= r->root_miss; |
| |
|
| /* OK, lets get ready task !!! */ |
/* OK, lets get ready task !!! */ |
| task = TAILQ_FIRST(&r->root_ready); |
task = TAILQ_FIRST(&r->root_ready); |