|
version 1.14.2.1, 2015/07/02 22:42:44
|
version 1.17.10.2, 2023/02/25 15:40:48
|
|
Line 12 terms:
|
Line 12 terms:
|
| All of the documentation and software included in the ELWIX and AITNET |
All of the documentation and software included in the ELWIX and AITNET |
| Releases is copyrighted by ELWIX - Sofia/Bulgaria <info@elwix.org> |
Releases is copyrighted by ELWIX - Sofia/Bulgaria <info@elwix.org> |
| |
|
| Copyright 2004 - 2015 | Copyright 2004 - 2023 |
| by Michael Pounov <misho@elwix.org>. All rights reserved. |
by Michael Pounov <misho@elwix.org>. All rights reserved. |
| |
|
| Redistribution and use in source and binary forms, with or without |
Redistribution and use in source and binary forms, with or without |
|
Line 82 SUCH DAMAGE.
|
Line 82 SUCH DAMAGE.
|
| #define __unused __attribute__((unused)) |
#define __unused __attribute__((unused)) |
| #endif |
#endif |
| |
|
| |
#ifndef __ELWIX |
| |
#define e_malloc malloc |
| |
#define e_calloc calloc |
| |
#define e_realloc realloc |
| |
#define e_free free |
| |
#define e_strdup strdup |
| |
#endif |
| |
|
| #define MAX_TASK_MISS 12 |
#define MAX_TASK_MISS 12 |
| |
|
| #define LOGERR { \ |
#define LOGERR { \ |
|
Line 94 SUCH DAMAGE.
|
Line 102 SUCH DAMAGE.
|
| #define pthread_mutex_unlock(x) |
#define pthread_mutex_unlock(x) |
| #endif |
#endif |
| |
|
| |
#define SCHED_QTRYLOCK(r, x) pthread_mutex_trylock(&(r)->root_mtx[(x)]) |
| #define SCHED_QLOCK(r, x) pthread_mutex_lock(&(r)->root_mtx[(x)]) |
#define SCHED_QLOCK(r, x) pthread_mutex_lock(&(r)->root_mtx[(x)]) |
| #define SCHED_QUNLOCK(r, x) pthread_mutex_unlock(&(r)->root_mtx[(x)]) |
#define SCHED_QUNLOCK(r, x) pthread_mutex_unlock(&(r)->root_mtx[(x)]) |
| |
|
|
Line 173 extern char sched_Error[];
|
Line 182 extern char sched_Error[];
|
| void sched_SetErr(int, char *, ...); |
void sched_SetErr(int, char *, ...); |
| |
|
| |
|
| |
static inline struct timespec * |
| |
sched_timespecmin(struct timespec * __restrict spa, struct timespec * __restrict spb) |
| |
{ |
| |
assert(spa && spb); |
| |
|
| |
if (sched_timespecisinf(spa)) |
| |
return spb; |
| |
if (sched_timespecisinf(spb)) |
| |
return spa; |
| |
|
| |
if (spa->tv_sec == spb->tv_sec) |
| |
return (spa->tv_nsec < spb->tv_nsec) ? spa : spb; |
| |
else |
| |
return (spa->tv_sec < spb->tv_sec) ? spa : spb; |
| |
} |
| |
|
| static inline void |
static inline void |
| remove_task_from(sched_task_t * __restrict t, sched_queue_t * __restrict q) |
remove_task_from(sched_task_t * __restrict t, sched_queue_t * __restrict q) |
| { |
{ |
|
Line 203 transit_task2unuse(sched_task_t * __restrict t, sched_
|
Line 228 transit_task2unuse(sched_task_t * __restrict t, sched_
|
| TASK_UNLOCK(t); |
TASK_UNLOCK(t); |
| TASK_TYPE(t) = taskUNUSE; |
TASK_TYPE(t) = taskUNUSE; |
| insert_task_to(t, &(TASK_ROOT(t))->root_unuse); |
insert_task_to(t, &(TASK_ROOT(t))->root_unuse); |
| |
} |
| |
|
| |
static inline void |
| |
transit_task2ready(sched_task_t * __restrict t, sched_queue_t * __restrict q) |
| |
{ |
| |
remove_task_from(t, q); |
| |
|
| |
t->task_type = taskREADY; |
| |
insert_task_to(t, &(TASK_ROOT(t))->root_ready); |
| } |
} |
| |
|
| |
|