version 1.10, 2013/06/19 00:16:35
|
version 1.17.10.1, 2023/02/23 17:13:01
|
Line 12 terms:
|
Line 12 terms:
|
All of the documentation and software included in the ELWIX and AITNET |
All of the documentation and software included in the ELWIX and AITNET |
Releases is copyrighted by ELWIX - Sofia/Bulgaria <info@elwix.org> |
Releases is copyrighted by ELWIX - Sofia/Bulgaria <info@elwix.org> |
|
|
Copyright 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012 | Copyright 2004 - 2023 |
by Michael Pounov <misho@elwix.org>. All rights reserved. |
by Michael Pounov <misho@elwix.org>. All rights reserved. |
|
|
Redistribution and use in source and binary forms, with or without |
Redistribution and use in source and binary forms, with or without |
Line 55 SUCH DAMAGE.
|
Line 55 SUCH DAMAGE.
|
#define KQ_EVENTS 32 |
#define KQ_EVENTS 32 |
#endif |
#endif |
|
|
|
#ifndef EV_EOF |
|
#define EV_EOF 0x8000 |
|
#endif |
|
|
|
#ifndef SIGRTMIN |
|
#define SIGRTMIN 65 |
|
#endif |
|
|
|
#ifndef FIONWRITE |
|
#define FIONWRITE SIOCOUTQ |
|
#endif |
|
|
|
#ifndef TIMER_RELTIME |
|
#define TIMER_RELTIME 0 |
|
#endif |
|
|
|
#ifndef EV_EOF |
|
#define EV_EOF 0x8000 /* EOF detected */ |
|
#endif |
|
#ifndef EV_ERROR |
|
#define EV_ERROR 0x4000 /* error, data contains errno */ |
|
#endif |
|
|
|
#ifndef __unused |
|
#define __unused __attribute__((unused)) |
|
#endif |
|
|
|
#ifndef __ELWIX |
|
#define e_malloc malloc |
|
#define e_calloc calloc |
|
#define e_realloc realloc |
|
#define e_free free |
|
#define e_strdup strdup |
|
#endif |
|
|
#define MAX_TASK_MISS 12 |
#define MAX_TASK_MISS 12 |
|
|
#define LOGERR { \ |
#define LOGERR { \ |
Line 62 SUCH DAMAGE.
|
Line 97 SUCH DAMAGE.
|
strlcpy(sched_Error, strerror(errno), STRSIZ); \ |
strlcpy(sched_Error, strerror(errno), STRSIZ); \ |
} |
} |
|
|
|
#ifndef HAVE_LIBPTHREAD |
|
#define pthread_mutex_lock(x) |
|
#define pthread_mutex_unlock(x) |
|
#endif |
|
|
|
#define SCHED_QTRYLOCK(r, x) pthread_mutex_trylock(&(r)->root_mtx[(x)]) |
|
#define SCHED_QLOCK(r, x) pthread_mutex_lock(&(r)->root_mtx[(x)]) |
|
#define SCHED_QUNLOCK(r, x) pthread_mutex_unlock(&(r)->root_mtx[(x)]) |
|
|
#define sched_timespecclear(tsp) ((tsp)->tv_sec = (tsp)->tv_nsec = 0) |
#define sched_timespecclear(tsp) ((tsp)->tv_sec = (tsp)->tv_nsec = 0) |
#define sched_timespecinf(tsp) ((tsp)->tv_sec = (tsp)->tv_nsec = -1) |
#define sched_timespecinf(tsp) ((tsp)->tv_sec = (tsp)->tv_nsec = -1) |
#define sched_timespecisinf(tsp) ((tsp)->tv_sec == -1 && (tsp)->tv_nsec == -1) |
#define sched_timespecisinf(tsp) ((tsp)->tv_sec == -1 && (tsp)->tv_nsec == -1) |
Line 90 SUCH DAMAGE.
|
Line 134 SUCH DAMAGE.
|
} \ |
} \ |
} while (0) |
} while (0) |
|
|
|
#define sched_timespec2val(tsp, tvp) ((tvp)->tv_sec = (tsp)->tv_sec, \ |
|
(tvp)->tv_usec = (tsp)->tv_nsec / 1000) |
|
#define sched_timeval2spec(tvp, tsp) ((tsp)->tv_sec = (tvp)->tv_sec, \ |
|
(tsp)->tv_nsec = (tvp)->tv_usec * 1000) |
|
|
|
#define sched_timevalclear(tvp) ((tvp)->tv_sec = (tvp)->tv_usec = 0) |
|
#define sched_timevalinf(tvp) ((tvp)->tv_sec = (tvp)->tv_usec = -1) |
|
#define sched_timevalisinf(tvp) ((tvp)->tv_sec == -1 && (tvp)->tv_usec == -1) |
|
#define sched_timevalisset(tvp) (((tvp)->tv_sec && (tvp)->tv_sec != -1) || \ |
|
((tvp)->tv_usec && (tvp)->tv_usec != -1)) |
|
#define sched_timevalcmp(tvp, uvp, cmp) \ |
|
(((tvp)->tv_sec == (uvp)->tv_sec) ? \ |
|
((tvp)->tv_usec cmp (uvp)->tv_usec) : \ |
|
((tvp)->tv_sec cmp (uvp)->tv_sec)) |
|
#define sched_timevaladd(tvp, uvp, vvp) \ |
|
do { \ |
|
(vvp)->tv_sec = (tvp)->tv_sec + (uvp)->tv_sec; \ |
|
(vvp)->tv_usec = (tvp)->tv_usec + (uvp)->tv_usec; \ |
|
if ((vvp)->tv_usec >= 1000000) { \ |
|
(vvp)->tv_sec++; \ |
|
(vvp)->tv_usec -= 1000000; \ |
|
} \ |
|
} while (0) |
|
#define sched_timevalsub(tvp, uvp, vvp) \ |
|
do { \ |
|
(vvp)->tv_sec = (tvp)->tv_sec - (uvp)->tv_sec; \ |
|
(vvp)->tv_usec = (tvp)->tv_usec - (uvp)->tv_usec; \ |
|
if ((vvp)->tv_usec < 0) { \ |
|
(vvp)->tv_sec--; \ |
|
(vvp)->tv_usec += 1000000; \ |
|
} \ |
|
} while (0) |
|
|
|
|
|
#ifndef HAVE_STRLCAT |
|
size_t strlcat(char * __restrict dst, const char * __restrict src, size_t siz); |
|
#endif |
|
#ifndef HAVE_STRLCPY |
|
size_t strlcpy(char * __restrict dst, const char * __restrict src, size_t siz); |
|
#endif |
|
|
|
|
extern int sched_Errno; |
extern int sched_Errno; |
extern char sched_Error[]; |
extern char sched_Error[]; |
|
|
inline void sched_SetErr(int, char *, ...); | void sched_SetErr(int, char *, ...); |
|
|
void *_sched_threadWrapper(sched_task_t *); | |
| static inline void |
| remove_task_from(sched_task_t * __restrict t, sched_queue_t * __restrict q) |
| { |
| assert(t && q); |
| |
| SCHED_QLOCK(TASK_ROOT(t), TASK_TYPE(t)); |
| TAILQ_REMOVE(q, t, task_node); |
| SCHED_QUNLOCK(TASK_ROOT(t), TASK_TYPE(t)); |
| } |
| |
| static inline void |
| insert_task_to(sched_task_t * __restrict t, sched_queue_t * __restrict q) |
| { |
| assert(t && q); |
| |
| SCHED_QLOCK(TASK_ROOT(t), TASK_TYPE(t)); |
| TAILQ_INSERT_TAIL(q, t, task_node); |
| SCHED_QUNLOCK(TASK_ROOT(t), TASK_TYPE(t)); |
| } |
| |
| static inline void |
| transit_task2unuse(sched_task_t * __restrict t, sched_queue_t * __restrict q) |
| { |
| assert(t && q); |
| |
| remove_task_from(t, q); |
| |
| TASK_UNLOCK(t); |
| TASK_TYPE(t) = taskUNUSE; |
| insert_task_to(t, &(TASK_ROOT(t))->root_unuse); |
| } |
| |
| static inline void |
| transit_task2ready(sched_task_t * __restrict t, sched_queue_t * __restrict q) |
| { |
| remove_task_from(t, q); |
| |
| t->task_type = taskREADY; |
| insert_task_to(t, &(TASK_ROOT(t))->root_ready); |
| } |
|
|
|
|
#endif |
#endif |