|
version 1.31.2.4, 2017/08/31 15:26:15
|
version 1.35, 2021/06/08 21:45:07
|
|
Line 12 terms:
|
Line 12 terms:
|
| All of the documentation and software included in the ELWIX and AITNET |
All of the documentation and software included in the ELWIX and AITNET |
| Releases is copyrighted by ELWIX - Sofia/Bulgaria <info@elwix.org> |
Releases is copyrighted by ELWIX - Sofia/Bulgaria <info@elwix.org> |
| |
|
| Copyright 2004 - 2017 | Copyright 2004 - 2021 |
| by Michael Pounov <misho@elwix.org>. All rights reserved. |
by Michael Pounov <misho@elwix.org>. All rights reserved. |
| |
|
| Redistribution and use in source and binary forms, with or without |
Redistribution and use in source and binary forms, with or without |
|
Line 713 sched_hook_alarm(void *task, void *arg __unused)
|
Line 713 sched_hook_alarm(void *task, void *arg __unused)
|
| * sched_hook_node() - Default NODE hook |
* sched_hook_node() - Default NODE hook |
| * |
* |
| * @task = current task |
* @task = current task |
| * @arg = unused | * @arg = if arg == 42 then waiting for all events |
| * return: <0 errors and 0 ok |
* return: <0 errors and 0 ok |
| */ |
*/ |
| void * |
void * |
| sched_hook_node(void *task, void *arg __unused) | sched_hook_node(void *task, void *arg) |
| { |
{ |
| #if SUP_ENABLE == KQ_SUPPORT |
#if SUP_ENABLE == KQ_SUPPORT |
| sched_task_t *t = task; |
sched_task_t *t = task; |
| struct kevent chg[1]; |
struct kevent chg[1]; |
| struct timespec timeout = { 0, 0 }; |
struct timespec timeout = { 0, 0 }; |
| |
u_int addflags = (u_int) (uintptr_t) arg; |
| |
|
| if (!t || !TASK_ROOT(t)) |
if (!t || !TASK_ROOT(t)) |
| return (void*) -1; |
return (void*) -1; |
|
Line 730 sched_hook_node(void *task, void *arg __unused)
|
Line 731 sched_hook_node(void *task, void *arg __unused)
|
| #ifdef __NetBSD__ |
#ifdef __NetBSD__ |
| EV_SET(&chg[0], TASK_FD(t), EVFILT_VNODE, EV_ADD | EV_CLEAR, |
EV_SET(&chg[0], TASK_FD(t), EVFILT_VNODE, EV_ADD | EV_CLEAR, |
| NOTE_DELETE | NOTE_WRITE | NOTE_EXTEND | NOTE_ATTRIB | |
NOTE_DELETE | NOTE_WRITE | NOTE_EXTEND | NOTE_ATTRIB | |
| NOTE_LINK | NOTE_RENAME | NOTE_REVOKE, 0, (intptr_t) TASK_FD(t)); | NOTE_LINK | NOTE_RENAME | NOTE_REVOKE | addflags, 0, (intptr_t) TASK_FD(t)); |
| #else |
#else |
| EV_SET(&chg[0], TASK_FD(t), EVFILT_VNODE, EV_ADD | EV_CLEAR, |
EV_SET(&chg[0], TASK_FD(t), EVFILT_VNODE, EV_ADD | EV_CLEAR, |
| NOTE_DELETE | NOTE_WRITE | NOTE_EXTEND | NOTE_ATTRIB | |
NOTE_DELETE | NOTE_WRITE | NOTE_EXTEND | NOTE_ATTRIB | |
| NOTE_LINK | NOTE_RENAME | NOTE_REVOKE, 0, (void*) TASK_FD(t)); | NOTE_LINK | NOTE_RENAME | NOTE_REVOKE | addflags, 0, (void*) TASK_FD(t)); |
| #endif |
#endif |
| if (kevent(TASK_ROOT(t)->root_kq, chg, 1, NULL, 0, &timeout) == -1) { |
if (kevent(TASK_ROOT(t)->root_kq, chg, 1, NULL, 0, &timeout) == -1) { |
| if (TASK_ROOT(t)->root_hooks.hook_exec.exception) |
if (TASK_ROOT(t)->root_hooks.hook_exec.exception) |
|
Line 866 static inline void
|
Line 867 static inline void
|
| fetch_hook_kevent_proceed(int en, struct kevent *res, sched_root_task_t *r) |
fetch_hook_kevent_proceed(int en, struct kevent *res, sched_root_task_t *r) |
| { |
{ |
| struct kevent evt[1]; |
struct kevent evt[1]; |
| register int i; | register int i, flg; |
| sched_task_t *task, *tmp; |
sched_task_t *task, *tmp; |
| struct timespec now = { 0, 0 }; |
struct timespec now = { 0, 0 }; |
| #ifdef AIO_SUPPORT |
#ifdef AIO_SUPPORT |
|
Line 884 fetch_hook_kevent_proceed(int en, struct kevent *res,
|
Line 885 fetch_hook_kevent_proceed(int en, struct kevent *res,
|
| memcpy(evt, &res[i], sizeof evt); |
memcpy(evt, &res[i], sizeof evt); |
| evt->flags = EV_DELETE; |
evt->flags = EV_DELETE; |
| /* Put read/write task to ready queue */ |
/* Put read/write task to ready queue */ |
| |
flg = 0; |
| switch (res[i].filter) { |
switch (res[i].filter) { |
| case EVFILT_READ: |
case EVFILT_READ: |
| TAILQ_FOREACH_SAFE(task, &r->root_read, task_node, tmp) { |
TAILQ_FOREACH_SAFE(task, &r->root_read, task_node, tmp) { |
| if (TASK_FD(task) == ((intptr_t) res[i].udata)) { |
if (TASK_FD(task) == ((intptr_t) res[i].udata)) { |
| TASK_RET(task) = res[i].data; | if (!flg) { |
| TASK_FLAG(task) = (u_long) res[i].fflags; | TASK_RET(task) = res[i].data; |
| | TASK_FLAG(task) = (u_long) res[i].fflags; |
| |
|
| /* remove read handle */ | /* remove read handle */ |
| remove_task_from(task, &r->root_read); | remove_task_from(task, &r->root_read); |
| |
|
| if (r->root_hooks.hook_exec.exception && res[i].flags & EV_EOF) { | if (r->root_hooks.hook_exec.exception && res[i].flags & EV_EOF) { |
| if (r->root_hooks.hook_exec.exception(r, (void*) EV_EOF)) { | if (r->root_hooks.hook_exec.exception(r, (void*) EV_EOF)) { |
| task->task_type = taskUNUSE; | task->task_type = taskUNUSE; |
| insert_task_to(task, &r->root_unuse); | insert_task_to(task, &r->root_unuse); |
| | } else { |
| | task->task_type = taskREADY; |
| | insert_task_to(task, &r->root_ready); |
| | } |
| } else { |
} else { |
| task->task_type = taskREADY; |
task->task_type = taskREADY; |
| insert_task_to(task, &r->root_ready); |
insert_task_to(task, &r->root_ready); |
| } |
} |
| } else { |
|
| task->task_type = taskREADY; |
|
| insert_task_to(task, &r->root_ready); |
|
| } |
} |
| break; | flg++; |
| } |
} |
| } |
} |
| break; |
break; |
| case EVFILT_WRITE: |
case EVFILT_WRITE: |
| TAILQ_FOREACH_SAFE(task, &r->root_write, task_node, tmp) { |
TAILQ_FOREACH_SAFE(task, &r->root_write, task_node, tmp) { |
| if (TASK_FD(task) == ((intptr_t) res[i].udata)) { |
if (TASK_FD(task) == ((intptr_t) res[i].udata)) { |
| TASK_RET(task) = res[i].data; | if (!flg) { |
| TASK_FLAG(task) = (u_long) res[i].fflags; | TASK_RET(task) = res[i].data; |
| | TASK_FLAG(task) = (u_long) res[i].fflags; |
| |
|
| /* remove write handle */ | /* remove write handle */ |
| remove_task_from(task, &r->root_write); | remove_task_from(task, &r->root_write); |
| |
|
| if (r->root_hooks.hook_exec.exception && res[i].flags & EV_EOF) { | if (r->root_hooks.hook_exec.exception && res[i].flags & EV_EOF) { |
| if (r->root_hooks.hook_exec.exception(r, (void*) EV_EOF)) { | if (r->root_hooks.hook_exec.exception(r, (void*) EV_EOF)) { |
| task->task_type = taskUNUSE; | task->task_type = taskUNUSE; |
| insert_task_to(task, &r->root_unuse); | insert_task_to(task, &r->root_unuse); |
| | } else { |
| | task->task_type = taskREADY; |
| | insert_task_to(task, &r->root_ready); |
| | } |
| } else { |
} else { |
| task->task_type = taskREADY; |
task->task_type = taskREADY; |
| insert_task_to(task, &r->root_ready); |
insert_task_to(task, &r->root_ready); |
| } |
} |
| } else { |
|
| task->task_type = taskREADY; |
|
| insert_task_to(task, &r->root_ready); |
|
| } |
} |
| break; | flg++; |
| } |
} |
| } |
} |
| break; |
break; |
| case EVFILT_TIMER: |
case EVFILT_TIMER: |
| TAILQ_FOREACH_SAFE(task, &r->root_alarm, task_node, tmp) { |
TAILQ_FOREACH_SAFE(task, &r->root_alarm, task_node, tmp) { |
| if ((uintptr_t) TASK_DATA(task) == ((uintptr_t) res[i].udata)) { |
if ((uintptr_t) TASK_DATA(task) == ((uintptr_t) res[i].udata)) { |
| TASK_RET(task) = res[i].data; | if (!flg) { |
| TASK_FLAG(task) = (u_long) res[i].fflags; | TASK_RET(task) = res[i].data; |
| | TASK_FLAG(task) = (u_long) res[i].fflags; |
| |
|
| /* remove alarm handle */ | /* remove alarm handle */ |
| transit_task2ready(task, &r->root_alarm); | transit_task2ready(task, &r->root_alarm); |
| break; | } |
| | flg++; |
| } |
} |
| } |
} |
| break; |
break; |
| case EVFILT_VNODE: |
case EVFILT_VNODE: |
| TAILQ_FOREACH_SAFE(task, &r->root_node, task_node, tmp) { |
TAILQ_FOREACH_SAFE(task, &r->root_node, task_node, tmp) { |
| if (TASK_FD(task) == ((intptr_t) res[i].udata)) { |
if (TASK_FD(task) == ((intptr_t) res[i].udata)) { |
| TASK_RET(task) = res[i].data; | if (!flg) { |
| TASK_FLAG(task) = (u_long) res[i].fflags; | TASK_RET(task) = res[i].data; |
| | TASK_FLAG(task) = (u_long) res[i].fflags; |
| |
|
| /* remove node handle */ | /* remove node handle */ |
| transit_task2ready(task, &r->root_node); | transit_task2ready(task, &r->root_node); |
| break; | } |
| | flg++; |
| } |
} |
| } |
} |
| break; |
break; |
| case EVFILT_PROC: |
case EVFILT_PROC: |
| TAILQ_FOREACH_SAFE(task, &r->root_proc, task_node, tmp) { |
TAILQ_FOREACH_SAFE(task, &r->root_proc, task_node, tmp) { |
| if (TASK_VAL(task) == ((uintptr_t) res[i].udata)) { |
if (TASK_VAL(task) == ((uintptr_t) res[i].udata)) { |
| TASK_RET(task) = res[i].data; | if (!flg) { |
| TASK_FLAG(task) = (u_long) res[i].fflags; | TASK_RET(task) = res[i].data; |
| | TASK_FLAG(task) = (u_long) res[i].fflags; |
| |
|
| /* remove proc handle */ | /* remove proc handle */ |
| transit_task2ready(task, &r->root_proc); | transit_task2ready(task, &r->root_proc); |
| break; | } |
| | flg++; |
| } |
} |
| } |
} |
| break; |
break; |
| case EVFILT_SIGNAL: |
case EVFILT_SIGNAL: |
| TAILQ_FOREACH_SAFE(task, &r->root_signal, task_node, tmp) { |
TAILQ_FOREACH_SAFE(task, &r->root_signal, task_node, tmp) { |
| if (TASK_VAL(task) == ((uintptr_t) res[i].udata)) { |
if (TASK_VAL(task) == ((uintptr_t) res[i].udata)) { |
| TASK_RET(task) = res[i].data; | if (!flg) { |
| TASK_FLAG(task) = (u_long) res[i].fflags; | TASK_RET(task) = res[i].data; |
| | TASK_FLAG(task) = (u_long) res[i].fflags; |
| |
|
| /* remove signal handle */ | /* remove signal handle */ |
| transit_task2ready(task, &r->root_signal); | transit_task2ready(task, &r->root_signal); |
| break; | } |
| | flg++; |
| } |
} |
| } |
} |
| break; |
break; |
|
Line 988 fetch_hook_kevent_proceed(int en, struct kevent *res,
|
Line 1002 fetch_hook_kevent_proceed(int en, struct kevent *res,
|
| TAILQ_FOREACH_SAFE(task, &r->root_aio, task_node, tmp) { |
TAILQ_FOREACH_SAFE(task, &r->root_aio, task_node, tmp) { |
| acb = (struct aiocb*) TASK_VAL(task); |
acb = (struct aiocb*) TASK_VAL(task); |
| if (acb == ((struct aiocb*) res[i].udata)) { |
if (acb == ((struct aiocb*) res[i].udata)) { |
| TASK_RET(task) = res[i].data; | if (!flg) { |
| TASK_FLAG(task) = (u_long) res[i].fflags; | TASK_RET(task) = res[i].data; |
| | TASK_FLAG(task) = (u_long) res[i].fflags; |
| /* remove user handle */ | |
| transit_task2ready(task, &r->root_aio); | |
| |
|
| fd = acb->aio_fildes; | /* remove user handle */ |
| if ((len = aio_return(acb)) != -1) { | transit_task2ready(task, &r->root_aio); |
| if (lseek(fd, acb->aio_offset + len, SEEK_CUR) == -1) | |
| | fd = acb->aio_fildes; |
| | if ((len = aio_return(acb)) != -1) { |
| | if (lseek(fd, acb->aio_offset + len, SEEK_CUR) == -1) |
| | LOGERR; |
| | } else |
| LOGERR; |
LOGERR; |
| } else | free(acb); |
| LOGERR; | TASK_DATLEN(task) = (u_long) len; |
| free(acb); | TASK_FD(task) = fd; |
| TASK_DATLEN(task) = (u_long) len; | } |
| TASK_FD(task) = fd; | flg++; |
| break; | |
| } |
} |
| } |
} |
| break; |
break; |
|
Line 1012 fetch_hook_kevent_proceed(int en, struct kevent *res,
|
Line 1028 fetch_hook_kevent_proceed(int en, struct kevent *res,
|
| TAILQ_FOREACH_SAFE(task, &r->root_lio, task_node, tmp) { |
TAILQ_FOREACH_SAFE(task, &r->root_lio, task_node, tmp) { |
| acbs = (struct aiocb**) TASK_VAL(task); |
acbs = (struct aiocb**) TASK_VAL(task); |
| if (acbs == ((struct aiocb**) res[i].udata)) { |
if (acbs == ((struct aiocb**) res[i].udata)) { |
| TASK_RET(task) = res[i].data; | if (!flg) { |
| TASK_FLAG(task) = (u_long) res[i].fflags; | TASK_RET(task) = res[i].data; |
| | TASK_FLAG(task) = (u_long) res[i].fflags; |
| |
|
| /* remove user handle */ | /* remove user handle */ |
| transit_task2ready(task, &r->root_lio); | transit_task2ready(task, &r->root_lio); |
| |
|
| iv = (struct iovec*) TASK_DATA(task); | iv = (struct iovec*) TASK_DATA(task); |
| fd = acbs[0]->aio_fildes; | fd = acbs[0]->aio_fildes; |
| off = acbs[0]->aio_offset; | off = acbs[0]->aio_offset; |
| for (len = 0; i < TASK_DATLEN(task); len += l, i++) { | for (len = 0; i < TASK_DATLEN(task); len += l, i++) { |
| if ((iv[i].iov_len = aio_return(acbs[i])) == -1) | if ((iv[i].iov_len = aio_return(acbs[i])) == -1) |
| l = 0; | l = 0; |
| else | else |
| l = iv[i].iov_len; | l = iv[i].iov_len; |
| free(acbs[i]); | free(acbs[i]); |
| } | } |
| free(acbs); | free(acbs); |
| TASK_DATLEN(task) = (u_long) len; | TASK_DATLEN(task) = (u_long) len; |
| TASK_FD(task) = fd; | TASK_FD(task) = fd; |
| |
|
| if (lseek(fd, off + len, SEEK_CUR) == -1) | if (lseek(fd, off + len, SEEK_CUR) == -1) |
| LOGERR; | LOGERR; |
| break; | } |
| | flg++; |
| } |
} |
| } |
} |
| break; |
break; |
|
Line 1044 fetch_hook_kevent_proceed(int en, struct kevent *res,
|
Line 1062 fetch_hook_kevent_proceed(int en, struct kevent *res,
|
| case EVFILT_USER: |
case EVFILT_USER: |
| TAILQ_FOREACH_SAFE(task, &r->root_user, task_node, tmp) { |
TAILQ_FOREACH_SAFE(task, &r->root_user, task_node, tmp) { |
| if (TASK_VAL(task) == ((uintptr_t) res[i].udata)) { |
if (TASK_VAL(task) == ((uintptr_t) res[i].udata)) { |
| TASK_RET(task) = res[i].data; | if (!flg) { |
| TASK_FLAG(task) = (u_long) res[i].fflags; | TASK_RET(task) = res[i].data; |
| | TASK_FLAG(task) = (u_long) res[i].fflags; |
| |
|
| /* remove user handle */ | /* remove user handle */ |
| transit_task2ready(task, &r->root_user); | transit_task2ready(task, &r->root_user); |
| break; | } |
| | flg++; |
| } |
} |
| } |
} |
| break; |
break; |
| #endif /* EVFILT_USER */ |
#endif /* EVFILT_USER */ |
| } |
} |
| |
|
| |
if (flg > 1) |
| |
evt->flags &= ~EV_DELETE; |
| |
|
| if (kevent(r->root_kq, evt, 1, NULL, 0, &now) == -1) { |
if (kevent(r->root_kq, evt, 1, NULL, 0, &now) == -1) { |
| if (r->root_hooks.hook_exec.exception) |
if (r->root_hooks.hook_exec.exception) |
| r->root_hooks.hook_exec.exception(r, NULL); |
r->root_hooks.hook_exec.exception(r, NULL); |
|
Line 1300 sched_hook_fetch(void *root, void *arg __unused)
|
Line 1323 sched_hook_fetch(void *root, void *arg __unused)
|
| return task; |
return task; |
| } |
} |
| |
|
| |
/* if present member of task, set NOWAIT */ |
| |
if (!TAILQ_FIRST(&r->root_task)) { |
| |
/* timer tasks */ |
| #ifdef TIMER_WITHOUT_SORT |
#ifdef TIMER_WITHOUT_SORT |
| clock_gettime(CLOCK_MONOTONIC, &now); | clock_gettime(CLOCK_MONOTONIC, &now); |
| |
|
| sched_timespecclear(&r->root_wait); | sched_timespecclear(&r->root_wait); |
| TAILQ_FOREACH(task, &r->root_timer, task_node) { | TAILQ_FOREACH(task, &r->root_timer, task_node) { |
| if (!sched_timespecisset(&r->root_wait)) | if (!sched_timespecisset(&r->root_wait)) |
| r->root_wait = TASK_TS(task); | r->root_wait = TASK_TS(task); |
| else if (sched_timespeccmp(&TASK_TS(task), &r->root_wait, -) < 0) | else if (sched_timespeccmp(&TASK_TS(task), &r->root_wait, -) < 0) |
| r->root_wait = TASK_TS(task); | r->root_wait = TASK_TS(task); |
| } | } |
| |
|
| if (TAILQ_FIRST(&r->root_timer)) { | if (TAILQ_FIRST(&r->root_timer)) { |
| m = r->root_wait; | m = r->root_wait; |
| sched_timespecsub(&m, &now, &mtmp); | sched_timespecsub(&m, &now, &mtmp); |
| r->root_wait = mtmp; | r->root_wait = mtmp; |
| } else { | } else { |
| /* set wait INFTIM */ | /* set wait INFTIM */ |
| sched_timespecinf(&r->root_wait); | sched_timespecinf(&r->root_wait); |
| } | } |
| #else /* ! TIMER_WITHOUT_SORT */ |
#else /* ! TIMER_WITHOUT_SORT */ |
| if (!TAILQ_FIRST(&r->root_task) && (task = TAILQ_FIRST(&r->root_timer))) { | if ((task = TAILQ_FIRST(&r->root_timer))) { |
| clock_gettime(CLOCK_MONOTONIC, &now); | clock_gettime(CLOCK_MONOTONIC, &now); |
| |
|
| m = TASK_TS(task); | m = TASK_TS(task); |
| sched_timespecsub(&m, &now, &mtmp); | sched_timespecsub(&m, &now, &mtmp); |
| r->root_wait = mtmp; | r->root_wait = mtmp; |
| } else { | } else { |
| /* set wait INFTIM */ | /* set wait INFTIM */ |
| sched_timespecinf(&r->root_wait); | sched_timespecinf(&r->root_wait); |
| } | } |
| #endif /* TIMER_WITHOUT_SORT */ |
#endif /* TIMER_WITHOUT_SORT */ |
| /* if present member of task, set NOWAIT */ | } else /* no waiting for event, because we have ready task */ |
| if (TAILQ_FIRST(&r->root_task)) | |
| sched_timespecclear(&r->root_wait); |
sched_timespecclear(&r->root_wait); |
| |
|
| if (r->root_wait.tv_sec != -1 && r->root_wait.tv_nsec != -1) { |
if (r->root_wait.tv_sec != -1 && r->root_wait.tv_nsec != -1) { |