--- libaitsched/src/tasks.c 2014/04/27 16:20:37 1.24 +++ libaitsched/src/tasks.c 2014/05/21 22:09:01 1.24.2.3 @@ -3,7 +3,7 @@ * by Michael Pounov * * $Author: misho $ -* $Id: tasks.c,v 1.24 2014/04/27 16:20:37 misho Exp $ +* $Id: tasks.c,v 1.24.2.3 2014/05/21 22:09:01 misho Exp $ * ************************************************************************** The ELWIX and AITNET software is distributed under the following @@ -323,7 +323,7 @@ sched_task_t * schedNode(sched_root_task_t * __restrict root, sched_task_func_t func, void *arg, int fd, void *opt_data, size_t opt_dlen) { -#ifdef KQ_DISABLE +#if SUP_ENABLE != KQ_SUPPORT sched_SetErr(ENOTSUP, "disabled kqueue support"); return NULL; #else @@ -364,7 +364,7 @@ schedNode(sched_root_task_t * __restrict root, sched_t task = sched_unuseTask(task); return task; -#endif /* KQ_DISABLE */ +#endif /* KQ_SUPPORT */ } /* @@ -382,7 +382,7 @@ sched_task_t * schedProc(sched_root_task_t * __restrict root, sched_task_func_t func, void *arg, u_long pid, void *opt_data, size_t opt_dlen) { -#ifdef KQ_DISABLE +#if SUP_ENABLE != KQ_SUPPORT sched_SetErr(ENOTSUP, "disabled kqueue support"); return NULL; #else @@ -423,7 +423,7 @@ schedProc(sched_root_task_t * __restrict root, sched_t task = sched_unuseTask(task); return task; -#endif /* KQ_DISABLE */ +#endif /* KQ_SUPPORT */ } /* @@ -441,7 +441,7 @@ sched_task_t * schedUser(sched_root_task_t * __restrict root, sched_task_func_t func, void *arg, u_long id, void *opt_data, size_t opt_dlen) { -#ifdef KQ_DISABLE +#if SUP_ENABLE != KQ_SUPPORT sched_SetErr(ENOTSUP, "disabled kqueue support"); return NULL; #else @@ -487,7 +487,7 @@ schedUser(sched_root_task_t * __restrict root, sched_t return task; #endif /* EVFILT_USER */ -#endif /* KQ_DISABLE */ +#endif /* KQ_SUPPORT */ } /* @@ -505,7 +505,7 @@ sched_task_t * schedSignal(sched_root_task_t * __restrict root, sched_task_func_t func, void *arg, u_long sig, void *opt_data, size_t opt_dlen) { -#ifdef KQ_DISABLE +#if SUP_ENABLE != KQ_SUPPORT sched_SetErr(ENOTSUP, "disabled kqueue support"); return NULL; #else @@ -546,7 +546,7 @@ schedSignal(sched_root_task_t * __restrict root, sched task = sched_unuseTask(task); return task; -#endif /* KQ_DISABLE */ +#endif /* KQ_SUPPORT */ } /* @@ -564,7 +564,7 @@ sched_task_t * schedAlarm(sched_root_task_t * __restrict root, sched_task_func_t func, void *arg, struct timespec ts, void *opt_data, size_t opt_dlen) { -#ifdef KQ_DISABLE +#if SUP_ENABLE != KQ_SUPPORT sched_SetErr(ENOTSUP, "disabled kqueue support"); return NULL; #else @@ -605,7 +605,7 @@ schedAlarm(sched_root_task_t * __restrict root, sched_ task = sched_unuseTask(task); return task; -#endif /* KQ_DISABLE */ +#endif /* KQ_SUPPORT */ } #ifdef AIO_SUPPORT @@ -624,7 +624,7 @@ sched_task_t * schedAIO(sched_root_task_t * __restrict root, sched_task_func_t func, void *arg, struct aiocb * __restrict acb, void *opt_data, size_t opt_dlen) { -#ifdef KQ_DISABLE +#if SUP_ENABLE != KQ_SUPPORT sched_SetErr(ENOTSUP, "disabled kqueue support"); return NULL; #else @@ -665,7 +665,7 @@ schedAIO(sched_root_task_t * __restrict root, sched_ta task = sched_unuseTask(task); return task; -#endif /* KQ_DISABLE */ +#endif /* KQ_SUPPORT */ } /* @@ -684,7 +684,7 @@ sched_task_t * schedAIORead(sched_root_task_t * __restrict root, sched_task_func_t func, void *arg, int fd, void *buffer, size_t buflen, off_t offset) { -#ifdef KQ_DISABLE +#if SUP_ENABLE != KQ_SUPPORT sched_SetErr(ENOTSUP, "disabled kqueue support"); return NULL; #else @@ -724,7 +724,7 @@ schedAIORead(sched_root_task_t * __restrict root, sche } return schedAIO(root, func, arg, acb, buffer, buflen); -#endif /* KQ_DISABLE */ +#endif /* KQ_SUPPORT */ } /* @@ -743,7 +743,7 @@ sched_task_t * schedAIOWrite(sched_root_task_t * __restrict root, sched_task_func_t func, void *arg, int fd, void *buffer, size_t buflen, off_t offset) { -#ifdef KQ_DISABLE +#if SUP_ENABLE != KQ_SUPPORT sched_SetErr(ENOTSUP, "disabled kqueue support"); return NULL; #else @@ -783,7 +783,7 @@ schedAIOWrite(sched_root_task_t * __restrict root, sch } return schedAIO(root, func, arg, acb, buffer, buflen); -#endif /* KQ_DISABLE */ +#endif /* KQ_SUPPORT */ } #ifdef EVFILT_LIO @@ -802,7 +802,7 @@ sched_task_t * schedLIO(sched_root_task_t * __restrict root, sched_task_func_t func, void *arg, struct aiocb ** __restrict acbs, void *opt_data, size_t opt_dlen) { -#ifdef KQ_DISABLE +#if SUP_ENABLE != KQ_SUPPORT sched_SetErr(ENOTSUP, "disabled kqueue support"); return NULL; #else @@ -843,7 +843,7 @@ schedLIO(sched_root_task_t * __restrict root, sched_ta task = sched_unuseTask(task); return task; -#endif /* KQ_DISABLE */ +#endif /* KQ_SUPPORT */ } /* @@ -862,7 +862,7 @@ sched_task_t * schedLIORead(sched_root_task_t * __restrict root, sched_task_func_t func, void *arg, int fd, struct iovec *bufs, size_t nbufs, off_t offset) { -#ifdef KQ_DISABLE +#if SUP_ENABLE != KQ_SUPPORT sched_SetErr(ENOTSUP, "disabled kqueue support"); return NULL; #else @@ -920,7 +920,7 @@ schedLIORead(sched_root_task_t * __restrict root, sche } return schedLIO(root, func, arg, (void*) acb, bufs, nbufs); -#endif /* KQ_DISABLE */ +#endif /* KQ_SUPPORT */ } /* @@ -939,7 +939,7 @@ sched_task_t * schedLIOWrite(sched_root_task_t * __restrict root, sched_task_func_t func, void *arg, int fd, struct iovec *bufs, size_t nbufs, off_t offset) { -#ifdef KQ_DISABLE +#if SUP_ENABLE != KQ_SUPPORT sched_SetErr(ENOTSUP, "disabled kqueue support"); return NULL; #else @@ -997,7 +997,7 @@ schedLIOWrite(sched_root_task_t * __restrict root, sch } return schedLIO(root, func, arg, (void*) acb, bufs, nbufs); -#endif /* KQ_DISABLE */ +#endif /* KQ_SUPPORT */ } #endif /* EVFILT_LIO */ #endif /* AIO_SUPPORT */ @@ -1330,7 +1330,7 @@ schedThread(sched_root_task_t * __restrict root, sched TASK_DATLEN(task) = opt_dlen; pthread_attr_init(&attr); - pthread_attr_setdetachstate(&attr, PTHREAD_DETACHED); + pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED); if (ss && (errno = pthread_attr_setstacksize(&attr, ss))) { LOGERR; pthread_attr_destroy(&attr); @@ -1342,11 +1342,7 @@ schedThread(sched_root_task_t * __restrict root, sched return sched_unuseTask(task); } else TASK_FLAG(task) = ss; - if ((errno = pthread_attr_setguardsize(&attr, ss))) { - LOGERR; - pthread_attr_destroy(&attr); - return sched_unuseTask(task); - } + #ifdef SCHED_RR pthread_attr_setschedpolicy(&attr, SCHED_RR); #else