version 1.39.4.1, 2023/02/23 17:13:01
|
version 1.41.4.1, 2023/08/17 14:13:07
|
Line 362 sched_hook_cancel(void *task, void *arg __unused)
|
Line 362 sched_hook_cancel(void *task, void *arg __unused)
|
EV_SET(&chg[0], TASK_VAL(t), EVFILT_SIGNAL, flg < 2 ? EV_DELETE : 0, |
EV_SET(&chg[0], TASK_VAL(t), EVFILT_SIGNAL, flg < 2 ? EV_DELETE : 0, |
0, 0, (void*) TASK_VAL(t)); |
0, 0, (void*) TASK_VAL(t)); |
#endif |
#endif |
/* restore signal */ |
|
if (flg < 2) |
|
signal(TASK_VAL(t), SIG_DFL); |
|
#else |
|
if (flg < 2) |
|
sigdelset(&TASK_ROOT(t)->root_sigset, TASK_VAL(t)); |
|
#endif |
#endif |
|
if (flg < 2) { |
|
pthread_mutex_lock(&TASK_ROOT(t)->root_sigmtx); |
|
sigdelset(&TASK_ROOT(t)->root_sigset, TASK_VAL(t)); |
|
pthread_mutex_unlock(&TASK_ROOT(t)->root_sigmtx); |
|
} |
break; |
break; |
#ifdef AIO_SUPPORT |
#ifdef AIO_SUPPORT |
case taskAIO: |
case taskAIO: |
Line 818 void *
|
Line 817 void *
|
sched_hook_signal(void *task, void *arg __unused) |
sched_hook_signal(void *task, void *arg __unused) |
{ |
{ |
sched_task_t *t = task; |
sched_task_t *t = task; |
|
|
#if SUP_ENABLE == KQ_SUPPORT |
#if SUP_ENABLE == KQ_SUPPORT |
struct kevent chg[1]; |
struct kevent chg[1]; |
struct timespec timeout = { 0, 0 }; |
struct timespec timeout = { 0, 0 }; |
|
#endif |
|
|
if (!t || !TASK_ROOT(t)) |
if (!t || !TASK_ROOT(t)) |
return (void*) -1; |
return (void*) -1; |
|
|
/* ignore signal */ | #if SUP_ENABLE == KQ_SUPPORT |
signal(TASK_VAL(t), SIG_IGN); | |
| |
#ifdef __NetBSD__ |
#ifdef __NetBSD__ |
EV_SET(&chg[0], TASK_VAL(t), EVFILT_SIGNAL, EV_ADD | EV_CLEAR, 0, 0, (intptr_t) TASK_VAL(t)); |
EV_SET(&chg[0], TASK_VAL(t), EVFILT_SIGNAL, EV_ADD | EV_CLEAR, 0, 0, (intptr_t) TASK_VAL(t)); |
#else |
#else |
Line 841 sched_hook_signal(void *task, void *arg __unused)
|
Line 838 sched_hook_signal(void *task, void *arg __unused)
|
LOGERR; |
LOGERR; |
return (void*) -1; |
return (void*) -1; |
} |
} |
#else |
|
sigaddset(&TASK_ROOT(t)->root_sigset, TASK_VAL(t)); |
|
#endif |
#endif |
|
|
|
pthread_mutex_lock(&TASK_ROOT(t)->root_sigmtx); |
|
sigaddset(&TASK_ROOT(t)->root_sigset, TASK_VAL(t)); |
|
pthread_mutex_unlock(&TASK_ROOT(t)->root_sigmtx); |
|
|
return NULL; |
return NULL; |
} |
} |
|
|
Line 1343 sched_hook_fetch(void *root, void *arg __unused)
|
Line 1342 sched_hook_fetch(void *root, void *arg __unused)
|
{ |
{ |
sched_root_task_t *r = root; |
sched_root_task_t *r = root; |
sched_task_t *task, *tmp; |
sched_task_t *task, *tmp; |
struct timespec now, m, mtmp; | struct timespec now, m, mtmp, *tsmin; |
#if SUP_ENABLE == KQ_SUPPORT |
#if SUP_ENABLE == KQ_SUPPORT |
struct kevent res[KQ_EVENTS]; |
struct kevent res[KQ_EVENTS]; |
struct timespec *timeout; |
struct timespec *timeout; |
Line 1386 sched_hook_fetch(void *root, void *arg __unused)
|
Line 1385 sched_hook_fetch(void *root, void *arg __unused)
|
if (TAILQ_FIRST(&r->root_timer)) { |
if (TAILQ_FIRST(&r->root_timer)) { |
m = r->root_wait; |
m = r->root_wait; |
sched_timespecsub(&m, &now, &mtmp); |
sched_timespecsub(&m, &now, &mtmp); |
r->root_wait = mtmp; | if (mtmp.tv_sec < 0 || mtmp.tv_nsec < 0) |
| /* don't wait for events. we have ready timer */ |
| sched_timespecclear(&r->root_wait); |
| else |
| r->root_wait = mtmp; |
} else { |
} else { |
/* set wait INFTIM */ |
/* set wait INFTIM */ |
sched_timespecinf(&r->root_wait); |
sched_timespecinf(&r->root_wait); |
Line 1397 sched_hook_fetch(void *root, void *arg __unused)
|
Line 1400 sched_hook_fetch(void *root, void *arg __unused)
|
|
|
m = TASK_TS(task); |
m = TASK_TS(task); |
sched_timespecsub(&m, &now, &mtmp); |
sched_timespecsub(&m, &now, &mtmp); |
r->root_wait = mtmp; | if (mtmp.tv_sec < 0 || mtmp.tv_nsec < 0) |
| /* don't wait for events. we have ready timer */ |
| sched_timespecclear(&r->root_wait); |
| else |
| r->root_wait = mtmp; |
} else { |
} else { |
/* set wait INFTIM */ |
/* set wait INFTIM */ |
sched_timespecinf(&r->root_wait); |
sched_timespecinf(&r->root_wait); |
Line 1406 sched_hook_fetch(void *root, void *arg __unused)
|
Line 1413 sched_hook_fetch(void *root, void *arg __unused)
|
} else /* no waiting for event, because we have ready task */ |
} else /* no waiting for event, because we have ready task */ |
sched_timespecclear(&r->root_wait); |
sched_timespecclear(&r->root_wait); |
|
|
if (r->root_wait.tv_sec != -1 && r->root_wait.tv_nsec != -1) { | if (!sched_timespecisinf(&r->root_wait)) { |
| tsmin = sched_timespecmin(&r->root_wait, &r->root_poll); |
#if SUP_ENABLE == KQ_SUPPORT |
#if SUP_ENABLE == KQ_SUPPORT |
timeout = &r->root_wait; | timeout = tsmin; |
#elif SUP_ENABLE == EP_SUPPORT |
#elif SUP_ENABLE == EP_SUPPORT |
timeout = r->root_wait.tv_sec * 1000 + r->root_wait.tv_nsec / 1000000; | timeout = tsmin->tv_sec * 1000 + tsmin->tv_nsec / 1000000; |
#else |
#else |
sched_timespec2val(&r->root_wait, &tv); | sched_timespec2val(tsmin, &tv); |
timeout = &tv; |
timeout = &tv; |
#endif /* KQ_SUPPORT */ |
#endif /* KQ_SUPPORT */ |
} else if (sched_timespecisinf(&r->root_poll)) |
} else if (sched_timespecisinf(&r->root_poll)) |