|
version 1.39, 2022/12/20 22:40:32
|
version 1.42, 2023/08/17 14:14:24
|
|
Line 12 terms:
|
Line 12 terms:
|
| All of the documentation and software included in the ELWIX and AITNET |
All of the documentation and software included in the ELWIX and AITNET |
| Releases is copyrighted by ELWIX - Sofia/Bulgaria <info@elwix.org> |
Releases is copyrighted by ELWIX - Sofia/Bulgaria <info@elwix.org> |
| |
|
| Copyright 2004 - 2022 | Copyright 2004 - 2023 |
| by Michael Pounov <misho@elwix.org>. All rights reserved. |
by Michael Pounov <misho@elwix.org>. All rights reserved. |
| |
|
| Redistribution and use in source and binary forms, with or without |
Redistribution and use in source and binary forms, with or without |
|
Line 47 SUCH DAMAGE.
|
Line 47 SUCH DAMAGE.
|
| #include "hooks.h" |
#include "hooks.h" |
| |
|
| |
|
| static inline void |
|
| transit_task2ready(sched_task_t * __restrict t, sched_queue_t * __restrict q) |
|
| { |
|
| remove_task_from(t, q); |
|
| |
|
| t->task_type = taskREADY; |
|
| insert_task_to(t, &(TASK_ROOT(t))->root_ready); |
|
| } |
|
| |
|
| #ifdef HAVE_LIBPTHREAD |
#ifdef HAVE_LIBPTHREAD |
| static void * |
static void * |
| _sched_threadWrapper(sched_task_t *t) |
_sched_threadWrapper(sched_task_t *t) |
|
Line 359 sched_hook_cancel(void *task, void *arg __unused)
|
Line 350 sched_hook_cancel(void *task, void *arg __unused)
|
| #endif |
#endif |
| break; |
break; |
| case taskSIGNAL: |
case taskSIGNAL: |
| #if SUP_ENABLE == KQ_SUPPORT |
|
| /* check for multi subscribers */ |
/* check for multi subscribers */ |
| TAILQ_FOREACH_SAFE(tt, &r->root_signal, task_node, tmp) |
TAILQ_FOREACH_SAFE(tt, &r->root_signal, task_node, tmp) |
| if (TASK_VAL(tt) == TASK_VAL(t)) |
if (TASK_VAL(tt) == TASK_VAL(t)) |
| flg++; |
flg++; |
| |
#if SUP_ENABLE == KQ_SUPPORT |
| #ifdef __NetBSD__ |
#ifdef __NetBSD__ |
| EV_SET(&chg[0], TASK_VAL(t), EVFILT_SIGNAL, flg < 2 ? EV_DELETE : 0, |
EV_SET(&chg[0], TASK_VAL(t), EVFILT_SIGNAL, flg < 2 ? EV_DELETE : 0, |
| 0, 0, (intptr_t) TASK_VAL(t)); |
0, 0, (intptr_t) TASK_VAL(t)); |
|
Line 371 sched_hook_cancel(void *task, void *arg __unused)
|
Line 362 sched_hook_cancel(void *task, void *arg __unused)
|
| EV_SET(&chg[0], TASK_VAL(t), EVFILT_SIGNAL, flg < 2 ? EV_DELETE : 0, |
EV_SET(&chg[0], TASK_VAL(t), EVFILT_SIGNAL, flg < 2 ? EV_DELETE : 0, |
| 0, 0, (void*) TASK_VAL(t)); |
0, 0, (void*) TASK_VAL(t)); |
| #endif |
#endif |
| /* restore signal */ |
|
| if (flg < 2) |
|
| signal(TASK_VAL(t), SIG_DFL); |
|
| #endif |
#endif |
| |
if (flg < 2) { |
| |
pthread_mutex_lock(&TASK_ROOT(t)->root_sigmtx); |
| |
sigdelset(&TASK_ROOT(t)->root_sigset, TASK_VAL(t)); |
| |
pthread_mutex_unlock(&TASK_ROOT(t)->root_sigmtx); |
| |
} |
| break; |
break; |
| #ifdef AIO_SUPPORT |
#ifdef AIO_SUPPORT |
| case taskAIO: |
case taskAIO: |
|
Line 823 sched_hook_proc(void *task, void *arg __unused)
|
Line 816 sched_hook_proc(void *task, void *arg __unused)
|
| void * |
void * |
| sched_hook_signal(void *task, void *arg __unused) |
sched_hook_signal(void *task, void *arg __unused) |
| { |
{ |
| #if SUP_ENABLE == KQ_SUPPORT |
|
| sched_task_t *t = task; |
sched_task_t *t = task; |
| |
#if SUP_ENABLE == KQ_SUPPORT |
| struct kevent chg[1]; |
struct kevent chg[1]; |
| struct timespec timeout = { 0, 0 }; |
struct timespec timeout = { 0, 0 }; |
| |
#endif |
| |
|
| if (!t || !TASK_ROOT(t)) |
if (!t || !TASK_ROOT(t)) |
| return (void*) -1; |
return (void*) -1; |
| |
|
| /* ignore signal */ | #if SUP_ENABLE == KQ_SUPPORT |
| signal(TASK_VAL(t), SIG_IGN); | |
| |
| #ifdef __NetBSD__ |
#ifdef __NetBSD__ |
| EV_SET(&chg[0], TASK_VAL(t), EVFILT_SIGNAL, EV_ADD | EV_CLEAR, 0, 0, (intptr_t) TASK_VAL(t)); |
EV_SET(&chg[0], TASK_VAL(t), EVFILT_SIGNAL, EV_ADD | EV_CLEAR, 0, 0, (intptr_t) TASK_VAL(t)); |
| #else |
#else |
|
Line 847 sched_hook_signal(void *task, void *arg __unused)
|
Line 839 sched_hook_signal(void *task, void *arg __unused)
|
| return (void*) -1; |
return (void*) -1; |
| } |
} |
| #endif |
#endif |
| |
|
| |
pthread_mutex_lock(&TASK_ROOT(t)->root_sigmtx); |
| |
sigaddset(&TASK_ROOT(t)->root_sigset, TASK_VAL(t)); |
| |
pthread_mutex_unlock(&TASK_ROOT(t)->root_sigmtx); |
| |
|
| return NULL; |
return NULL; |
| } |
} |
| |
|
|
Line 1345 sched_hook_fetch(void *root, void *arg __unused)
|
Line 1342 sched_hook_fetch(void *root, void *arg __unused)
|
| { |
{ |
| sched_root_task_t *r = root; |
sched_root_task_t *r = root; |
| sched_task_t *task, *tmp; |
sched_task_t *task, *tmp; |
| struct timespec now, m, mtmp; | struct timespec now, m, mtmp, *tsmin; |
| #if SUP_ENABLE == KQ_SUPPORT |
#if SUP_ENABLE == KQ_SUPPORT |
| struct kevent res[KQ_EVENTS]; |
struct kevent res[KQ_EVENTS]; |
| struct timespec *timeout; |
struct timespec *timeout; |
|
Line 1388 sched_hook_fetch(void *root, void *arg __unused)
|
Line 1385 sched_hook_fetch(void *root, void *arg __unused)
|
| if (TAILQ_FIRST(&r->root_timer)) { |
if (TAILQ_FIRST(&r->root_timer)) { |
| m = r->root_wait; |
m = r->root_wait; |
| sched_timespecsub(&m, &now, &mtmp); |
sched_timespecsub(&m, &now, &mtmp); |
| r->root_wait = mtmp; | if (mtmp.tv_sec < 0 || mtmp.tv_nsec < 0) |
| | /* don't wait for events. we have ready timer */ |
| | sched_timespecclear(&r->root_wait); |
| | else |
| | r->root_wait = mtmp; |
| } else { |
} else { |
| /* set wait INFTIM */ |
/* set wait INFTIM */ |
| sched_timespecinf(&r->root_wait); |
sched_timespecinf(&r->root_wait); |
|
Line 1399 sched_hook_fetch(void *root, void *arg __unused)
|
Line 1400 sched_hook_fetch(void *root, void *arg __unused)
|
| |
|
| m = TASK_TS(task); |
m = TASK_TS(task); |
| sched_timespecsub(&m, &now, &mtmp); |
sched_timespecsub(&m, &now, &mtmp); |
| r->root_wait = mtmp; | if (mtmp.tv_sec < 0 || mtmp.tv_nsec < 0) |
| | /* don't wait for events. we have ready timer */ |
| | sched_timespecclear(&r->root_wait); |
| | else |
| | r->root_wait = mtmp; |
| } else { |
} else { |
| /* set wait INFTIM */ |
/* set wait INFTIM */ |
| sched_timespecinf(&r->root_wait); |
sched_timespecinf(&r->root_wait); |
|
Line 1408 sched_hook_fetch(void *root, void *arg __unused)
|
Line 1413 sched_hook_fetch(void *root, void *arg __unused)
|
| } else /* no waiting for event, because we have ready task */ |
} else /* no waiting for event, because we have ready task */ |
| sched_timespecclear(&r->root_wait); |
sched_timespecclear(&r->root_wait); |
| |
|
| if (r->root_wait.tv_sec != -1 && r->root_wait.tv_nsec != -1) { | if (!sched_timespecisinf(&r->root_wait)) { |
| | tsmin = sched_timespecmin(&r->root_wait, &r->root_poll); |
| #if SUP_ENABLE == KQ_SUPPORT |
#if SUP_ENABLE == KQ_SUPPORT |
| timeout = &r->root_wait; | timeout = tsmin; |
| #elif SUP_ENABLE == EP_SUPPORT |
#elif SUP_ENABLE == EP_SUPPORT |
| timeout = r->root_wait.tv_sec * 1000 + r->root_wait.tv_nsec / 1000000; | timeout = tsmin->tv_sec * 1000 + tsmin->tv_nsec / 1000000; |
| #else |
#else |
| sched_timespec2val(&r->root_wait, &tv); | sched_timespec2val(tsmin, &tv); |
| timeout = &tv; |
timeout = &tv; |
| #endif /* KQ_SUPPORT */ |
#endif /* KQ_SUPPORT */ |
| } else if (sched_timespecisinf(&r->root_poll)) |
} else if (sched_timespecisinf(&r->root_poll)) |