--- libaitsched/inc/defs.h 2012/03/13 10:01:59 1.5 +++ libaitsched/inc/defs.h 2023/02/25 15:55:01 1.18 @@ -3,7 +3,7 @@ * by Michael Pounov * * $Author: misho $ -* $Id: defs.h,v 1.5 2012/03/13 10:01:59 misho Exp $ +* $Id: defs.h,v 1.18 2023/02/25 15:55:01 misho Exp $ * ************************************************************************** The ELWIX and AITNET software is distributed under the following @@ -12,7 +12,7 @@ terms: All of the documentation and software included in the ELWIX and AITNET Releases is copyrighted by ELWIX - Sofia/Bulgaria -Copyright 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012 +Copyright 2004 - 2023 by Michael Pounov . All rights reserved. Redistribution and use in source and binary forms, with or without @@ -47,15 +47,65 @@ SUCH DAMAGE. #define __DEFS_H +#ifndef STRSIZ #define STRSIZ 256 -#define KQ_EVENTS 24 -#define MAX_EVENTLO_MISS 48 +#endif +#ifndef KQ_EVENTS +#define KQ_EVENTS 32 +#endif + +#ifndef EV_EOF +#define EV_EOF 0x8000 +#endif + +#ifndef SIGRTMIN +#define SIGRTMIN 65 +#endif + +#ifndef FIONWRITE +#define FIONWRITE SIOCOUTQ +#endif + +#ifndef TIMER_RELTIME +#define TIMER_RELTIME 0 +#endif + +#ifndef EV_EOF +#define EV_EOF 0x8000 /* EOF detected */ +#endif +#ifndef EV_ERROR +#define EV_ERROR 0x4000 /* error, data contains errno */ +#endif + +#ifndef __unused +#define __unused __attribute__((unused)) +#endif + +#ifndef __ELWIX +#define e_malloc malloc +#define e_calloc calloc +#define e_realloc realloc +#define e_free free +#define e_strdup strdup +#endif + +#define MAX_TASK_MISS 12 + #define LOGERR { \ sched_Errno = errno; \ strlcpy(sched_Error, strerror(errno), STRSIZ); \ } +#ifndef HAVE_LIBPTHREAD +#define pthread_mutex_lock(x) +#define pthread_mutex_unlock(x) +#endif + +#define SCHED_QTRYLOCK(r, x) pthread_mutex_trylock(&(r)->root_mtx[(x)]) +#define SCHED_QLOCK(r, x) pthread_mutex_lock(&(r)->root_mtx[(x)]) +#define SCHED_QUNLOCK(r, x) pthread_mutex_unlock(&(r)->root_mtx[(x)]) + #define sched_timespecclear(tsp) ((tsp)->tv_sec = (tsp)->tv_nsec = 0) #define sched_timespecinf(tsp) ((tsp)->tv_sec = (tsp)->tv_nsec = -1) #define sched_timespecisinf(tsp) ((tsp)->tv_sec == -1 && (tsp)->tv_nsec == -1) @@ -84,13 +134,110 @@ SUCH DAMAGE. } \ } while (0) +#define sched_timespec2val(tsp, tvp) ((tvp)->tv_sec = (tsp)->tv_sec, \ + (tvp)->tv_usec = (tsp)->tv_nsec / 1000) +#define sched_timeval2spec(tvp, tsp) ((tsp)->tv_sec = (tvp)->tv_sec, \ + (tsp)->tv_nsec = (tvp)->tv_usec * 1000) + +#define sched_timevalclear(tvp) ((tvp)->tv_sec = (tvp)->tv_usec = 0) +#define sched_timevalinf(tvp) ((tvp)->tv_sec = (tvp)->tv_usec = -1) +#define sched_timevalisinf(tvp) ((tvp)->tv_sec == -1 && (tvp)->tv_usec == -1) +#define sched_timevalisset(tvp) (((tvp)->tv_sec && (tvp)->tv_sec != -1) || \ + ((tvp)->tv_usec && (tvp)->tv_usec != -1)) +#define sched_timevalcmp(tvp, uvp, cmp) \ + (((tvp)->tv_sec == (uvp)->tv_sec) ? \ + ((tvp)->tv_usec cmp (uvp)->tv_usec) : \ + ((tvp)->tv_sec cmp (uvp)->tv_sec)) +#define sched_timevaladd(tvp, uvp, vvp) \ + do { \ + (vvp)->tv_sec = (tvp)->tv_sec + (uvp)->tv_sec; \ + (vvp)->tv_usec = (tvp)->tv_usec + (uvp)->tv_usec; \ + if ((vvp)->tv_usec >= 1000000) { \ + (vvp)->tv_sec++; \ + (vvp)->tv_usec -= 1000000; \ + } \ + } while (0) +#define sched_timevalsub(tvp, uvp, vvp) \ + do { \ + (vvp)->tv_sec = (tvp)->tv_sec - (uvp)->tv_sec; \ + (vvp)->tv_usec = (tvp)->tv_usec - (uvp)->tv_usec; \ + if ((vvp)->tv_usec < 0) { \ + (vvp)->tv_sec--; \ + (vvp)->tv_usec += 1000000; \ + } \ + } while (0) + + +#ifndef HAVE_STRLCAT +size_t strlcat(char * __restrict dst, const char * __restrict src, size_t siz); +#endif +#ifndef HAVE_STRLCPY +size_t strlcpy(char * __restrict dst, const char * __restrict src, size_t siz); +#endif + + extern int sched_Errno; extern char sched_Error[]; -inline void sched_SetErr(int, char *, ...); +void sched_SetErr(int, char *, ...); -inline sched_task_t *_sched_useTask(sched_root_task_t * __restrict root); -inline sched_task_t *_sched_unuseTask(sched_task_t * __restrict task); + +static inline struct timespec * +sched_timespecmin(struct timespec * __restrict spa, struct timespec * __restrict spb) +{ + assert(spa && spb); + + if (sched_timespecisinf(spa)) + return spb; + if (sched_timespecisinf(spb)) + return spa; + + if (spa->tv_sec == spb->tv_sec) + return (spa->tv_nsec < spb->tv_nsec) ? spa : spb; + else + return (spa->tv_sec < spb->tv_sec) ? spa : spb; +} + +static inline void +remove_task_from(sched_task_t * __restrict t, sched_queue_t * __restrict q) +{ + assert(t && q); + + SCHED_QLOCK(TASK_ROOT(t), TASK_TYPE(t)); + TAILQ_REMOVE(q, t, task_node); + SCHED_QUNLOCK(TASK_ROOT(t), TASK_TYPE(t)); +} + +static inline void +insert_task_to(sched_task_t * __restrict t, sched_queue_t * __restrict q) +{ + assert(t && q); + + SCHED_QLOCK(TASK_ROOT(t), TASK_TYPE(t)); + TAILQ_INSERT_TAIL(q, t, task_node); + SCHED_QUNLOCK(TASK_ROOT(t), TASK_TYPE(t)); +} + +static inline void +transit_task2unuse(sched_task_t * __restrict t, sched_queue_t * __restrict q) +{ + assert(t && q); + + remove_task_from(t, q); + + TASK_UNLOCK(t); + TASK_TYPE(t) = taskUNUSE; + insert_task_to(t, &(TASK_ROOT(t))->root_unuse); +} + +static inline void +transit_task2ready(sched_task_t * __restrict t, sched_queue_t * __restrict q) +{ + remove_task_from(t, q); + + t->task_type = taskREADY; + insert_task_to(t, &(TASK_ROOT(t))->root_ready); +} #endif