version 1.13.2.1, 2014/05/19 23:21:41
|
version 1.14, 2014/06/05 22:37:29
|
Line 63 SUCH DAMAGE.
|
Line 63 SUCH DAMAGE.
|
#define SIGRTMIN 65 |
#define SIGRTMIN 65 |
#endif |
#endif |
|
|
|
#ifndef FIONWRITE |
|
#define FIONWRITE SIOCOUTQ |
|
#endif |
|
|
|
#ifndef TIMER_RELTIME |
|
#define TIMER_RELTIME 0 |
|
#endif |
|
|
|
#ifndef EV_EOF |
|
#define EV_EOF 0x8000 /* EOF detected */ |
|
#endif |
|
#ifndef EV_ERROR |
|
#define EV_ERROR 0x4000 /* error, data contains errno */ |
|
#endif |
|
|
#ifndef __unused |
#ifndef __unused |
#define __unused __attribute__((unused)) |
#define __unused __attribute__((unused)) |
#endif |
#endif |
Line 74 SUCH DAMAGE.
|
Line 89 SUCH DAMAGE.
|
strlcpy(sched_Error, strerror(errno), STRSIZ); \ |
strlcpy(sched_Error, strerror(errno), STRSIZ); \ |
} |
} |
|
|
|
#ifndef HAVE_LIBPTHREAD |
|
#define pthread_mutex_lock(x) |
|
#define pthread_mutex_unlock(x) |
|
#endif |
|
|
|
#define SCHED_QLOCK(r, x) pthread_mutex_lock(&(r)->root_mtx[(x)]) |
|
#define SCHED_QUNLOCK(r, x) pthread_mutex_unlock(&(r)->root_mtx[(x)]) |
|
|
#define sched_timespecclear(tsp) ((tsp)->tv_sec = (tsp)->tv_nsec = 0) |
#define sched_timespecclear(tsp) ((tsp)->tv_sec = (tsp)->tv_nsec = 0) |
#define sched_timespecinf(tsp) ((tsp)->tv_sec = (tsp)->tv_nsec = -1) |
#define sched_timespecinf(tsp) ((tsp)->tv_sec = (tsp)->tv_nsec = -1) |
#define sched_timespecisinf(tsp) ((tsp)->tv_sec == -1 && (tsp)->tv_nsec == -1) |
#define sched_timespecisinf(tsp) ((tsp)->tv_sec == -1 && (tsp)->tv_nsec == -1) |
Line 136 SUCH DAMAGE.
|
Line 159 SUCH DAMAGE.
|
} while (0) |
} while (0) |
|
|
|
|
|
#ifndef HAVE_STRLCAT |
|
size_t strlcat(char * __restrict dst, const char * __restrict src, size_t siz); |
|
#endif |
|
#ifndef HAVE_STRLCPY |
|
size_t strlcpy(char * __restrict dst, const char * __restrict src, size_t siz); |
|
#endif |
|
|
|
|
extern int sched_Errno; |
extern int sched_Errno; |
extern char sched_Error[]; |
extern char sched_Error[]; |
|
|
void sched_SetErr(int, char *, ...); |
void sched_SetErr(int, char *, ...); |
|
|
void *_sched_threadWrapper(sched_task_t *); | |
void *_sched_rtcWrapper(sched_task_t *); | static inline void |
| remove_task_from(sched_task_t * __restrict t, sched_queue_t * __restrict q) |
| { |
| assert(t && q); |
| |
| SCHED_QLOCK(TASK_ROOT(t), TASK_TYPE(t)); |
| TAILQ_REMOVE(q, t, task_node); |
| SCHED_QUNLOCK(TASK_ROOT(t), TASK_TYPE(t)); |
| } |
| |
| static inline void |
| insert_task_to(sched_task_t * __restrict t, sched_queue_t * __restrict q) |
| { |
| assert(t && q); |
| |
| SCHED_QLOCK(TASK_ROOT(t), TASK_TYPE(t)); |
| TAILQ_INSERT_TAIL(q, t, task_node); |
| SCHED_QUNLOCK(TASK_ROOT(t), TASK_TYPE(t)); |
| } |
| |
| static inline void |
| transit_task2unuse(sched_task_t * __restrict t, sched_queue_t * __restrict q) |
| { |
| assert(t && q); |
| |
| remove_task_from(t, q); |
| |
| TASK_UNLOCK(t); |
| TASK_TYPE(t) = taskUNUSE; |
| insert_task_to(t, &(TASK_ROOT(t))->root_unuse); |
| } |
|
|
|
|
#endif |
#endif |