|
version 1.13.2.2, 2014/05/19 23:35:40
|
version 1.18, 2023/02/25 15:55:01
|
|
Line 12 terms:
|
Line 12 terms:
|
| All of the documentation and software included in the ELWIX and AITNET |
All of the documentation and software included in the ELWIX and AITNET |
| Releases is copyrighted by ELWIX - Sofia/Bulgaria <info@elwix.org> |
Releases is copyrighted by ELWIX - Sofia/Bulgaria <info@elwix.org> |
| |
|
| Copyright 2004 - 2014 | Copyright 2004 - 2023 |
| by Michael Pounov <misho@elwix.org>. All rights reserved. |
by Michael Pounov <misho@elwix.org>. All rights reserved. |
| |
|
| Redistribution and use in source and binary forms, with or without |
Redistribution and use in source and binary forms, with or without |
|
Line 63 SUCH DAMAGE.
|
Line 63 SUCH DAMAGE.
|
| #define SIGRTMIN 65 |
#define SIGRTMIN 65 |
| #endif |
#endif |
| |
|
| |
#ifndef FIONWRITE |
| |
#define FIONWRITE SIOCOUTQ |
| |
#endif |
| |
|
| |
#ifndef TIMER_RELTIME |
| |
#define TIMER_RELTIME 0 |
| |
#endif |
| |
|
| |
#ifndef EV_EOF |
| |
#define EV_EOF 0x8000 /* EOF detected */ |
| |
#endif |
| |
#ifndef EV_ERROR |
| |
#define EV_ERROR 0x4000 /* error, data contains errno */ |
| |
#endif |
| |
|
| #ifndef __unused |
#ifndef __unused |
| #define __unused __attribute__((unused)) |
#define __unused __attribute__((unused)) |
| #endif |
#endif |
| |
|
| |
#ifndef __ELWIX |
| |
#define e_malloc malloc |
| |
#define e_calloc calloc |
| |
#define e_realloc realloc |
| |
#define e_free free |
| |
#define e_strdup strdup |
| |
#endif |
| |
|
| #define MAX_TASK_MISS 12 |
#define MAX_TASK_MISS 12 |
| |
|
| #define LOGERR { \ |
#define LOGERR { \ |
|
Line 74 SUCH DAMAGE.
|
Line 97 SUCH DAMAGE.
|
| strlcpy(sched_Error, strerror(errno), STRSIZ); \ |
strlcpy(sched_Error, strerror(errno), STRSIZ); \ |
| } |
} |
| |
|
| |
#ifndef HAVE_LIBPTHREAD |
| |
#define pthread_mutex_lock(x) |
| |
#define pthread_mutex_unlock(x) |
| |
#endif |
| |
|
| |
#define SCHED_QTRYLOCK(r, x) pthread_mutex_trylock(&(r)->root_mtx[(x)]) |
| |
#define SCHED_QLOCK(r, x) pthread_mutex_lock(&(r)->root_mtx[(x)]) |
| |
#define SCHED_QUNLOCK(r, x) pthread_mutex_unlock(&(r)->root_mtx[(x)]) |
| |
|
| #define sched_timespecclear(tsp) ((tsp)->tv_sec = (tsp)->tv_nsec = 0) |
#define sched_timespecclear(tsp) ((tsp)->tv_sec = (tsp)->tv_nsec = 0) |
| #define sched_timespecinf(tsp) ((tsp)->tv_sec = (tsp)->tv_nsec = -1) |
#define sched_timespecinf(tsp) ((tsp)->tv_sec = (tsp)->tv_nsec = -1) |
| #define sched_timespecisinf(tsp) ((tsp)->tv_sec == -1 && (tsp)->tv_nsec == -1) |
#define sched_timespecisinf(tsp) ((tsp)->tv_sec == -1 && (tsp)->tv_nsec == -1) |
|
Line 149 extern char sched_Error[];
|
Line 181 extern char sched_Error[];
|
| |
|
| void sched_SetErr(int, char *, ...); |
void sched_SetErr(int, char *, ...); |
| |
|
| void *_sched_threadWrapper(sched_task_t *); | |
| void *_sched_rtcWrapper(sched_task_t *); | static inline struct timespec * |
| | sched_timespecmin(struct timespec * __restrict spa, struct timespec * __restrict spb) |
| | { |
| | assert(spa && spb); |
| | |
| | if (sched_timespecisinf(spa)) |
| | return spb; |
| | if (sched_timespecisinf(spb)) |
| | return spa; |
| | |
| | if (spa->tv_sec == spb->tv_sec) |
| | return (spa->tv_nsec < spb->tv_nsec) ? spa : spb; |
| | else |
| | return (spa->tv_sec < spb->tv_sec) ? spa : spb; |
| | } |
| | |
| | static inline void |
| | remove_task_from(sched_task_t * __restrict t, sched_queue_t * __restrict q) |
| | { |
| | assert(t && q); |
| | |
| | SCHED_QLOCK(TASK_ROOT(t), TASK_TYPE(t)); |
| | TAILQ_REMOVE(q, t, task_node); |
| | SCHED_QUNLOCK(TASK_ROOT(t), TASK_TYPE(t)); |
| | } |
| | |
| | static inline void |
| | insert_task_to(sched_task_t * __restrict t, sched_queue_t * __restrict q) |
| | { |
| | assert(t && q); |
| | |
| | SCHED_QLOCK(TASK_ROOT(t), TASK_TYPE(t)); |
| | TAILQ_INSERT_TAIL(q, t, task_node); |
| | SCHED_QUNLOCK(TASK_ROOT(t), TASK_TYPE(t)); |
| | } |
| | |
| | static inline void |
| | transit_task2unuse(sched_task_t * __restrict t, sched_queue_t * __restrict q) |
| | { |
| | assert(t && q); |
| | |
| | remove_task_from(t, q); |
| | |
| | TASK_UNLOCK(t); |
| | TASK_TYPE(t) = taskUNUSE; |
| | insert_task_to(t, &(TASK_ROOT(t))->root_unuse); |
| | } |
| | |
| | static inline void |
| | transit_task2ready(sched_task_t * __restrict t, sched_queue_t * __restrict q) |
| | { |
| | remove_task_from(t, q); |
| | |
| | t->task_type = taskREADY; |
| | insert_task_to(t, &(TASK_ROOT(t))->root_ready); |
| | } |
| |
|
| |
|
| #endif |
#endif |