File:  [ELWIX - Embedded LightWeight unIX -] / libaitsched / src / tasks.c
Revision 1.31: download - view: text, annotated - select for diffs - revision graph
Sat Feb 25 15:55:01 2023 UTC (14 months, 3 weeks ago) by misho
Branches: MAIN
CVS tags: sched8_3, sched8_2, sched8_1, sched8_0, sched7_9, sched7_8, sched7_7, sched7_6, sched7_5, SCHED8_2, SCHED8_1, SCHED8_0, SCHED7_9, SCHED7_8, SCHED7_7, SCHED7_6, SCHED7_5, SCHED7_4, HEAD
version 7.4

/*************************************************************************
* (C) 2011 AITNET ltd - Sofia/Bulgaria - <misho@aitbg.com>
*  by Michael Pounov <misho@openbsd-bg.org>
*
* $Author: misho $
* $Id: tasks.c,v 1.31 2023/02/25 15:55:01 misho Exp $
*
**************************************************************************
The ELWIX and AITNET software is distributed under the following
terms:

All of the documentation and software included in the ELWIX and AITNET
Releases is copyrighted by ELWIX - Sofia/Bulgaria <info@elwix.org>

Copyright 2004 - 2023
	by Michael Pounov <misho@elwix.org>.  All rights reserved.

Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
   notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
   notice, this list of conditions and the following disclaimer in the
   documentation and/or other materials provided with the distribution.
3. All advertising materials mentioning features or use of this software
   must display the following acknowledgement:
This product includes software developed by Michael Pounov <misho@elwix.org>
ELWIX - Embedded LightWeight unIX and its contributors.
4. Neither the name of AITNET nor the names of its contributors
   may be used to endorse or promote products derived from this software
   without specific prior written permission.

THIS SOFTWARE IS PROVIDED BY AITNET AND CONTRIBUTORS ``AS IS'' AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
SUCH DAMAGE.
*/
#include "global.h"


/*
 * sched_useTask() - Get and init new task
 *
 * @root = root task
 * return: NULL error or !=NULL prepared task
 */
sched_task_t *
sched_useTask(sched_root_task_t * __restrict root)
{
	sched_task_t *task, *tmp;

	SCHED_QLOCK(root, taskUNUSE);
	TAILQ_FOREACH_SAFE(task, &root->root_unuse, task_node, tmp) {
		if (!TASK_ISLOCKED(task)) {
			TAILQ_REMOVE(&root->root_unuse, task, task_node);
			break;
		}
	}
	SCHED_QUNLOCK(root, taskUNUSE);

	if (!task) {
		task = e_malloc(sizeof(sched_task_t));
		if (!task) {
			LOGERR;
			return NULL;
		}
	}

	memset(task, 0, sizeof(sched_task_t));
	task->task_id = (uintptr_t) task;
	return task;
}

/*
 * sched_unuseTask() - Unlock and put task to unuse queue
 *
 * @task = task
 * return: always is NULL
 */
sched_task_t *
sched_unuseTask(sched_task_t * __restrict task)
{
	TASK_UNLOCK(task);

	TASK_TYPE(task) = taskUNUSE;
	insert_task_to(task, &(TASK_ROOT(task))->root_unuse);

	task = NULL;
	return task;
}

/*
 * sched_taskExit() - Exit routine for scheduler task, explicit required for thread tasks
 *
 * @task = current task
 * @retcode = return code
 * return: return code
 */
void *
sched_taskExit(sched_task_t *task, intptr_t retcode)
{
	if (!task || !TASK_ROOT(task))
		return (void*) -1;

	if (TASK_ROOT(task)->root_hooks.hook_exec.exit)
		TASK_ROOT(task)->root_hooks.hook_exec.exit(task, (void*) retcode);

	TASK_ROOT(task)->root_ret = (void*) retcode;
	return (void*) retcode;
}


/*
 * schedRead() - Add READ I/O task to scheduler queue
 *
 * @root = root task
 * @func = task execution function
 * @arg = 1st func argument
 * @fd = fd handle
 * @opt_data = Optional data
 * @opt_dlen = Optional data length
 * return: NULL error or !=NULL new queued task
 */
sched_task_t *
schedRead(sched_root_task_t * __restrict root, sched_task_func_t func, void *arg, int fd, 
		void *opt_data, size_t opt_dlen)
{
	return schedReadExt(root, func, arg, fd, opt_data, opt_dlen, 0);
}

/*
 * schedReadExt() - Add READ I/O task to scheduler queue with custom event mask
 *
 * @root = root task
 * @func = task execution function
 * @arg = 1st func argument
 * @fd = fd handle
 * @opt_data = Optional data
 * @opt_dlen = Optional data length
 * @mask = Event mask
 * return: NULL error or !=NULL new queued task
 */
sched_task_t *
schedReadExt(sched_root_task_t * __restrict root, sched_task_func_t func, void *arg, int fd, 
		void *opt_data, size_t opt_dlen, u_long mask)
{
	sched_task_t *task;
	void *ptr;

	if (!root || !func)
		return NULL;

	/* get new task */
	if (!(task = sched_useTask(root)))
		return NULL;

	TASK_FUNC(task) = func;
	TASK_TYPE(task) = taskREAD;
	TASK_ROOT(task) = root;

	TASK_ARG(task) = arg;
	TASK_FD(task) = fd;

	TASK_DATA(task) = opt_data;
	TASK_DATLEN(task) = opt_dlen;

	TASK_HARG(task) = mask;

	if (root->root_hooks.hook_add.read)
		ptr = root->root_hooks.hook_add.read(task, 
				(void*) task->task_harg);
	else
		ptr = NULL;

	if (!ptr)
		insert_task_to(task, &root->root_read);
	else
		task = sched_unuseTask(task);

	return task;
}

/*
 * schedWrite() - Add WRITE I/O task to scheduler queue
 *
 * @root = root task
 * @func = task execution function
 * @arg = 1st func argument
 * @fd = fd handle
 * @opt_data = Optional data
 * @opt_dlen = Optional data length
 * return: NULL error or !=NULL new queued task
 */
sched_task_t *
schedWrite(sched_root_task_t * __restrict root, sched_task_func_t func, void *arg, int fd, 
		void *opt_data, size_t opt_dlen)
{
	return schedWriteExt(root, func, arg, fd, opt_data, opt_dlen, 0);
}

/*
 * schedWriteExt() - Add WRITE I/O task to scheduler queue with custom event mask
 *
 * @root = root task
 * @func = task execution function
 * @arg = 1st func argument
 * @fd = fd handle
 * @opt_data = Optional data
 * @opt_dlen = Optional data length
 * @mask = Event mask
 * return: NULL error or !=NULL new queued task
 */
sched_task_t *
schedWriteExt(sched_root_task_t * __restrict root, sched_task_func_t func, void *arg, int fd, 
		void *opt_data, size_t opt_dlen, u_long mask)
{
	sched_task_t *task;
	void *ptr;

	if (!root || !func)
		return NULL;

	/* get new task */
	if (!(task = sched_useTask(root)))
		return NULL;

	TASK_FUNC(task) = func;
	TASK_TYPE(task) = taskWRITE;
	TASK_ROOT(task) = root;

	TASK_ARG(task) = arg;
	TASK_FD(task) = fd;

	TASK_DATA(task) = opt_data;
	TASK_DATLEN(task) = opt_dlen;

	TASK_HARG(task) = mask;

	if (root->root_hooks.hook_add.write)
		ptr = root->root_hooks.hook_add.write(task, 
				(void*) task->task_harg);
	else
		ptr = NULL;

	if (!ptr)
		insert_task_to(task, &root->root_write);
	else
		task = sched_unuseTask(task);

	return task;
}

/*
 * schedNode() - Add NODE task to scheduler queue
 *
 * @root = root task
 * @func = task execution function
 * @arg = 1st func argument
 * @fd = fd handle
 * @opt_data = Optional data
 * @opt_dlen = Optional data length
 * return: NULL error or !=NULL new queued task
 */
sched_task_t *
schedNode(sched_root_task_t * __restrict root, sched_task_func_t func, void *arg, int fd, 
		void *opt_data, size_t opt_dlen)
{
#if SUP_ENABLE != KQ_SUPPORT
	sched_SetErr(ENOTSUP, "disabled kqueue support");
	return NULL;
#else
	sched_task_t *task;
	void *ptr;

	if (!root || !func)
		return NULL;

	/* get new task */
	if (!(task = sched_useTask(root)))
		return NULL;

	TASK_FUNC(task) = func;
	TASK_TYPE(task) = taskNODE;
	TASK_ROOT(task) = root;

	TASK_ARG(task) = arg;
	TASK_FD(task) = fd;

	TASK_DATA(task) = opt_data;
	TASK_DATLEN(task) = opt_dlen;

	if (root->root_hooks.hook_add.node)
		ptr = root->root_hooks.hook_add.node(task, NULL);
	else
		ptr = NULL;

	if (!ptr)
		insert_task_to(task, &root->root_node);
	else
		task = sched_unuseTask(task);

	return task;
#endif	/* KQ_SUPPORT */
}

/*
 * schedNode2() - Add NODE task with all events to scheduler queue
 *
 * @root = root task
 * @func = task execution function
 * @arg = 1st func argument
 * @fd = fd handle
 * @opt_data = Optional data
 * @opt_dlen = Optional data length
 * return: NULL error or !=NULL new queued task
 */
sched_task_t *
schedNode2(sched_root_task_t * __restrict root, sched_task_func_t func, void *arg, int fd, 
		void *opt_data, size_t opt_dlen)
{
#if SUP_ENABLE != KQ_SUPPORT
	sched_SetErr(ENOTSUP, "disabled kqueue support");
	return NULL;
#else
	sched_task_t *task;
	void *ptr;

	if (!root || !func)
		return NULL;

	/* get new task */
	if (!(task = sched_useTask(root)))
		return NULL;

	TASK_FUNC(task) = func;
	TASK_TYPE(task) = taskNODE;
	TASK_ROOT(task) = root;

	TASK_ARG(task) = arg;
	TASK_FD(task) = fd;

	TASK_DATA(task) = opt_data;
	TASK_DATLEN(task) = opt_dlen;

	if (root->root_hooks.hook_add.node)
#ifdef __FreeBSD__
		ptr = root->root_hooks.hook_add.node(task, 
				(void*) (NOTE_READ | NOTE_CLOSE_WRITE | NOTE_CLOSE | NOTE_OPEN));
#else
		ptr = root->root_hooks.hook_add.node(task, NULL);
#endif
	else
		ptr = NULL;

	if (!ptr)
		insert_task_to(task, &root->root_node);
	else
		task = sched_unuseTask(task);

	return task;
#endif	/* KQ_SUPPORT */
}

/*
 * schedProc() - Add PROC task to scheduler queue
 *
 * @root = root task
 * @func = task execution function
 * @arg = 1st func argument
 * @pid = PID
 * @opt_data = Optional data
 * @opt_dlen = Optional data length
 * return: NULL error or !=NULL new queued task
 */
sched_task_t *
schedProc(sched_root_task_t * __restrict root, sched_task_func_t func, void *arg, u_long pid, 
		void *opt_data, size_t opt_dlen)
{
#if SUP_ENABLE != KQ_SUPPORT
	sched_SetErr(ENOTSUP, "disabled kqueue support");
	return NULL;
#else
	sched_task_t *task;
	void *ptr;

	if (!root || !func)
		return NULL;

	/* get new task */
	if (!(task = sched_useTask(root)))
		return NULL;

	TASK_FUNC(task) = func;
	TASK_TYPE(task) = taskPROC;
	TASK_ROOT(task) = root;

	TASK_ARG(task) = arg;
	TASK_VAL(task) = pid;

	TASK_DATA(task) = opt_data;
	TASK_DATLEN(task) = opt_dlen;

	if (root->root_hooks.hook_add.proc)
		ptr = root->root_hooks.hook_add.proc(task, NULL);
	else
		ptr = NULL;

	if (!ptr)
		insert_task_to(task, &root->root_proc);
	else
		task = sched_unuseTask(task);

	return task;
#endif	/* KQ_SUPPORT */
}

/*
 * schedUser() - Add trigger USER task to scheduler queue
 *
 * @root = root task
 * @func = task execution function
 * @arg = 1st func argument
 * @id = Trigger ID
 * @opt_data = Optional data
 * @opt_dlen = Optional user's trigger flags
 * return: NULL error or !=NULL new queued task
 */
sched_task_t *
schedUser(sched_root_task_t * __restrict root, sched_task_func_t func, void *arg, u_long id, 
		void *opt_data, size_t opt_dlen)
{
#if SUP_ENABLE != KQ_SUPPORT
	sched_SetErr(ENOTSUP, "disabled kqueue support");
	return NULL;
#else
#ifndef EVFILT_USER
	sched_SetErr(ENOTSUP, "Not supported kevent() filter");
	return NULL;
#else
	sched_task_t *task;
	void *ptr;

	if (!root || !func)
		return NULL;

	/* get new task */
	if (!(task = sched_useTask(root)))
		return NULL;

	TASK_FUNC(task) = func;
	TASK_TYPE(task) = taskUSER;
	TASK_ROOT(task) = root;

	TASK_ARG(task) = arg;
	TASK_VAL(task) = id;

	TASK_DATA(task) = opt_data;
	TASK_DATLEN(task) = opt_dlen;

	if (root->root_hooks.hook_add.user)
		ptr = root->root_hooks.hook_add.user(task, NULL);
	else
		ptr = NULL;

	if (!ptr)
		insert_task_to(task, &root->root_user);
	else
		task = sched_unuseTask(task);

	return task;
#endif	/* EVFILT_USER */
#endif	/* KQ_SUPPORT */
}

/*
 * schedSignal() - Add SIGNAL task to scheduler queue
 *
 * @root = root task
 * @func = task execution function
 * @arg = 1st func argument
 * @sig = Signal
 * @opt_data = Optional data
 * @opt_dlen = Optional data length
 * return: NULL error or !=NULL new queued task
 */
sched_task_t *
schedSignal(sched_root_task_t * __restrict root, sched_task_func_t func, void *arg, u_long sig, 
		void *opt_data, size_t opt_dlen)
{
	sched_task_t *task;
	void *ptr;

	if (!root || !func)
		return NULL;

	/* get new task */
	if (!(task = sched_useTask(root)))
		return NULL;

	TASK_FUNC(task) = func;
	TASK_TYPE(task) = taskSIGNAL;
	TASK_ROOT(task) = root;

	TASK_ARG(task) = arg;
	TASK_VAL(task) = sig;

	TASK_DATA(task) = opt_data;
	TASK_DATLEN(task) = opt_dlen;

	if (root->root_hooks.hook_add.signal)
		ptr = root->root_hooks.hook_add.signal(task, NULL);
	else
		ptr = NULL;

	if (!ptr)
		insert_task_to(task, &root->root_signal);
	else
		task = sched_unuseTask(task);

	return task;
}

/*
 * schedAlarm() - Add ALARM task to scheduler queue
 *
 * @root = root task
 * @func = task execution function
 * @arg = 1st func argument
 * @ts = timeout argument structure, minimum alarm timer resolution is 1msec!
 * @opt_data = Alarm timer ID
 * @opt_dlen = Optional data length
 * return: NULL error or !=NULL new queued task
 */
sched_task_t *
schedAlarm(sched_root_task_t * __restrict root, sched_task_func_t func, void *arg, struct timespec ts, 
		void *opt_data, size_t opt_dlen)
{
#if SUP_ENABLE != KQ_SUPPORT
	sched_SetErr(ENOTSUP, "disabled kqueue support");
	return NULL;
#else
	sched_task_t *task;
	void *ptr;

	if (!root || !func)
		return NULL;

	/* get new task */
	if (!(task = sched_useTask(root)))
		return NULL;

	TASK_FUNC(task) = func;
	TASK_TYPE(task) = taskALARM;
	TASK_ROOT(task) = root;

	TASK_ARG(task) = arg;
	TASK_TS(task) = ts;

	TASK_DATA(task) = opt_data;
	TASK_DATLEN(task) = opt_dlen;

	if (root->root_hooks.hook_add.alarm)
		ptr = root->root_hooks.hook_add.alarm(task, NULL);
	else
		ptr = NULL;

	if (!ptr)
		insert_task_to(task, &root->root_alarm);
	else
		task = sched_unuseTask(task);

	return task;
#endif	/* KQ_SUPPORT */
}

#ifdef AIO_SUPPORT
/*
 * schedAIO() - Add AIO task to scheduler queue
 *
 * @root = root task
 * @func = task execution function
 * @arg = 1st func argument
 * @acb = AIO cb structure address
 * @opt_data = Optional data
 * @opt_dlen = Optional data length
 * return: NULL error or !=NULL new queued task
 */
sched_task_t *
schedAIO(sched_root_task_t * __restrict root, sched_task_func_t func, void *arg, 
		struct aiocb * __restrict acb, void *opt_data, size_t opt_dlen)
{
#if SUP_ENABLE != KQ_SUPPORT
	sched_SetErr(ENOTSUP, "disabled kqueue support");
	return NULL;
#else
	sched_task_t *task;
	void *ptr;

	if (!root || !func || !acb || !opt_dlen)
		return NULL;

	/* get new task */
	if (!(task = sched_useTask(root)))
		return NULL;

	TASK_FUNC(task) = func;
	TASK_TYPE(task) = taskAIO;
	TASK_ROOT(task) = root;

	TASK_ARG(task) = arg;
	TASK_VAL(task) = (u_long) acb;

	TASK_DATA(task) = opt_data;
	TASK_DATLEN(task) = opt_dlen;

	if (root->root_hooks.hook_add.aio)
		ptr = root->root_hooks.hook_add.aio(task, NULL);
	else
		ptr = NULL;

	if (!ptr)
		insert_task_to(task, &root->root_aio);
	else
		task = sched_unuseTask(task);

	return task;
#endif	/* KQ_SUPPORT */
}

/*
 * schedAIORead() - Add AIO read task to scheduler queue
 *
 * @root = root task
 * @func = task execution function
 * @arg = 1st func argument
 * @fd = file descriptor
 * @buffer = Buffer
 * @buflen = Buffer length
 * @offset = Offset from start of file, if =-1 from current position
 * return: NULL error or !=NULL new queued task
 */
sched_task_t *
schedAIORead(sched_root_task_t * __restrict root, sched_task_func_t func, void *arg, int fd, 
		void *buffer, size_t buflen, off_t offset)
{
#if SUP_ENABLE != KQ_SUPPORT
	sched_SetErr(ENOTSUP, "disabled kqueue support");
	return NULL;
#else
	struct aiocb *acb;
	off_t off;

	if (!root || !func || !buffer || !buflen)
		return NULL;

	if (offset == (off_t) -1) {
		off = lseek(fd, 0, SEEK_CUR);
		if (off == -1) {
			LOGERR;
			return NULL;
		}
	} else
		off = offset;

	if (!(acb = e_malloc(sizeof(struct aiocb)))) {
		LOGERR;
		return NULL;
	} else
		memset(acb, 0, sizeof(struct aiocb));

	acb->aio_fildes = fd;
	acb->aio_nbytes = buflen;
	acb->aio_buf = buffer;
	acb->aio_offset = off;
	acb->aio_sigevent.sigev_notify = SIGEV_KEVENT;
	acb->aio_sigevent.sigev_notify_kqueue = root->root_kq;
	acb->aio_sigevent.sigev_value.sival_ptr = acb;

	if (aio_read(acb)) {
		LOGERR;
		e_free(acb);
		return NULL;
	}

	return schedAIO(root, func, arg, acb, buffer, buflen);
#endif	/* KQ_SUPPORT */
}

/*
 * schedAIOWrite() - Add AIO write task to scheduler queue
 *
 * @root = root task
 * @func = task execution function
 * @arg = 1st func argument
 * @fd = file descriptor
 * @buffer = Buffer
 * @buflen = Buffer length
 * @offset = Offset from start of file, if =-1 from current position
 * return: NULL error or !=NULL new queued task
 */
sched_task_t *
schedAIOWrite(sched_root_task_t * __restrict root, sched_task_func_t func, void *arg, int fd, 
		void *buffer, size_t buflen, off_t offset)
{
#if SUP_ENABLE != KQ_SUPPORT
	sched_SetErr(ENOTSUP, "disabled kqueue support");
	return NULL;
#else
	struct aiocb *acb;
	off_t off;

	if (!root || !func || !buffer || !buflen)
		return NULL;

	if (offset == (off_t) -1) {
		off = lseek(fd, 0, SEEK_CUR);
		if (off == -1) {
			LOGERR;
			return NULL;
		}
	} else
		off = offset;

	if (!(acb = e_malloc(sizeof(struct aiocb)))) {
		LOGERR;
		return NULL;
	} else
		memset(acb, 0, sizeof(struct aiocb));

	acb->aio_fildes = fd;
	acb->aio_nbytes = buflen;
	acb->aio_buf = buffer;
	acb->aio_offset = off;
	acb->aio_sigevent.sigev_notify = SIGEV_KEVENT;
	acb->aio_sigevent.sigev_notify_kqueue = root->root_kq;
	acb->aio_sigevent.sigev_value.sival_ptr = acb;

	if (aio_write(acb)) {
		LOGERR;
		e_free(acb);
		return NULL;
	}

	return schedAIO(root, func, arg, acb, buffer, buflen);
#endif	/* KQ_SUPPORT */
}

#ifdef EVFILT_LIO
/*
 * schedLIO() - Add AIO bulk tasks to scheduler queue
 *
 * @root = root task
 * @func = task execution function
 * @arg = 1st func argument
 * @acbs = AIO cb structure addresses
 * @opt_data = Optional data
 * @opt_dlen = Optional data length
 * return: NULL error or !=NULL new queued task
 */
sched_task_t *
schedLIO(sched_root_task_t * __restrict root, sched_task_func_t func, void *arg, 
		struct aiocb ** __restrict acbs, void *opt_data, size_t opt_dlen)
{
#if SUP_ENABLE != KQ_SUPPORT
	sched_SetErr(ENOTSUP, "disabled kqueue support");
	return NULL;
#else
	sched_task_t *task;
	void *ptr;

	if (!root || !func || !acbs || !opt_dlen)
		return NULL;

	/* get new task */
	if (!(task = sched_useTask(root)))
		return NULL;

	TASK_FUNC(task) = func;
	TASK_TYPE(task) = taskLIO;
	TASK_ROOT(task) = root;

	TASK_ARG(task) = arg;
	TASK_VAL(task) = (u_long) acbs;

	TASK_DATA(task) = opt_data;
	TASK_DATLEN(task) = opt_dlen;

	if (root->root_hooks.hook_add.lio)
		ptr = root->root_hooks.hook_add.lio(task, NULL);
	else
		ptr = NULL;

	if (!ptr)
		insert_task_to(task, &root->root_lio);
	else
		task = sched_unuseTask(task);

	return task;
#endif	/* KQ_SUPPORT */
}

/*
 * schedLIORead() - Add list of AIO read tasks to scheduler queue
 *
 * @root = root task
 * @func = task execution function
 * @arg = 1st func argument
 * @fd = file descriptor
 * @bufs = Buffer's list
 * @nbufs = Number of Buffers
 * @offset = Offset from start of file, if =-1 from current position
 * return: NULL error or !=NULL new queued task
 */
sched_task_t *
schedLIORead(sched_root_task_t * __restrict root, sched_task_func_t func, void *arg, int fd, 
		struct iovec *bufs, size_t nbufs, off_t offset)
{
#if SUP_ENABLE != KQ_SUPPORT
	sched_SetErr(ENOTSUP, "disabled kqueue support");
	return NULL;
#else
	struct sigevent sig;
	struct aiocb **acb;
	off_t off;
	register int i;

	if (!root || !func || !bufs || !nbufs)
		return NULL;

	if (offset == (off_t) -1) {
		off = lseek(fd, 0, SEEK_CUR);
		if (off == -1) {
			LOGERR;
			return NULL;
		}
	} else
		off = offset;

	if (!(acb = e_calloc(sizeof(void*), nbufs))) {
		LOGERR;
		return NULL;
	} else
		memset(acb, 0, sizeof(void*) * nbufs);
	for (i = 0; i < nbufs; off += bufs[i++].iov_len) {
		acb[i] = e_malloc(sizeof(struct aiocb));
		if (!acb[i]) {
			LOGERR;
			for (i = 0; i < nbufs; i++)
				if (acb[i])
					e_free(acb[i]);
			e_free(acb);
			return NULL;
		} else
			memset(acb[i], 0, sizeof(struct aiocb));
		acb[i]->aio_fildes = fd;
		acb[i]->aio_nbytes = bufs[i].iov_len;
		acb[i]->aio_buf = bufs[i].iov_base;
		acb[i]->aio_offset = off;
		acb[i]->aio_lio_opcode = LIO_READ;
	}
	memset(&sig, 0, sizeof sig);
	sig.sigev_notify = SIGEV_KEVENT;
	sig.sigev_notify_kqueue = root->root_kq;
	sig.sigev_value.sival_ptr = acb;

	if (lio_listio(LIO_NOWAIT, acb, nbufs, &sig)) {
		LOGERR;
		for (i = 0; i < nbufs; i++)
			if (acb[i])
				e_free(acb[i]);
		e_free(acb);
		return NULL;
	}

	return schedLIO(root, func, arg, (void*) acb, bufs, nbufs);
#endif	/* KQ_SUPPORT */
}

/*
 * schedLIOWrite() - Add list of AIO write tasks to scheduler queue
 *
 * @root = root task
 * @func = task execution function
 * @arg = 1st func argument
 * @fd = file descriptor
 * @bufs = Buffer's list
 * @nbufs = Number of Buffers
 * @offset = Offset from start of file, if =-1 from current position
 * return: NULL error or !=NULL new queued task
 */
sched_task_t *
schedLIOWrite(sched_root_task_t * __restrict root, sched_task_func_t func, void *arg, int fd, 
		struct iovec *bufs, size_t nbufs, off_t offset)
{
#if SUP_ENABLE != KQ_SUPPORT
	sched_SetErr(ENOTSUP, "disabled kqueue support");
	return NULL;
#else
	struct sigevent sig;
	struct aiocb **acb;
	off_t off;
	register int i;

	if (!root || !func || !bufs || !nbufs)
		return NULL;

	if (offset == (off_t) -1) {
		off = lseek(fd, 0, SEEK_CUR);
		if (off == -1) {
			LOGERR;
			return NULL;
		}
	} else
		off = offset;

	if (!(acb = e_calloc(sizeof(void*), nbufs))) {
		LOGERR;
		return NULL;
	} else
		memset(acb, 0, sizeof(void*) * nbufs);
	for (i = 0; i < nbufs; off += bufs[i++].iov_len) {
		acb[i] = e_malloc(sizeof(struct aiocb));
		if (!acb[i]) {
			LOGERR;
			for (i = 0; i < nbufs; i++)
				if (acb[i])
					e_free(acb[i]);
			e_free(acb);
			return NULL;
		} else
			memset(acb[i], 0, sizeof(struct aiocb));
		acb[i]->aio_fildes = fd;
		acb[i]->aio_nbytes = bufs[i].iov_len;
		acb[i]->aio_buf = bufs[i].iov_base;
		acb[i]->aio_offset = off;
		acb[i]->aio_lio_opcode = LIO_WRITE;
	}
	memset(&sig, 0, sizeof sig);
	sig.sigev_notify = SIGEV_KEVENT;
	sig.sigev_notify_kqueue = root->root_kq;
	sig.sigev_value.sival_ptr = acb;

	if (lio_listio(LIO_NOWAIT, acb, nbufs, &sig)) {
		LOGERR;
		for (i = 0; i < nbufs; i++)
			if (acb[i])
				e_free(acb[i]);
		e_free(acb);
		return NULL;
	}

	return schedLIO(root, func, arg, (void*) acb, bufs, nbufs);
#endif	/* KQ_SUPPORT */
}
#endif	/* EVFILT_LIO */
#endif	/* AIO_SUPPORT */

/*
 * schedTimer() - Add TIMER task to scheduler queue
 *
 * @root = root task
 * @func = task execution function
 * @arg = 1st func argument
 * @ts = timeout argument structure
 * @opt_data = Optional data
 * @opt_dlen = Optional data length
 * return: NULL error or !=NULL new queued task
 */
sched_task_t *
schedTimer(sched_root_task_t * __restrict root, sched_task_func_t func, void *arg, struct timespec ts, 
		void *opt_data, size_t opt_dlen)
{
	sched_task_t *task, *tmp, *t = NULL;
	void *ptr;
	struct timespec now;

	if (!root || !func)
		return NULL;

	/* get new task */
	if (!(task = sched_useTask(root)))
		return NULL;

	TASK_FUNC(task) = func;
	TASK_TYPE(task) = taskTIMER;
	TASK_ROOT(task) = root;

	TASK_ARG(task) = arg;

	TASK_DATA(task) = opt_data;
	TASK_DATLEN(task) = opt_dlen;

	/* calculate timeval structure */
	clock_gettime(CLOCK_MONOTONIC, &now);
	now.tv_sec += ts.tv_sec;
	now.tv_nsec += ts.tv_nsec;
	if (now.tv_nsec >= 1000000000L) {
		now.tv_sec++;
		now.tv_nsec -= 1000000000L;
	} else if (now.tv_nsec < 0) {
		now.tv_sec--;
		now.tv_nsec += 1000000000L;
	}
	TASK_TS(task) = now;

	if (root->root_hooks.hook_add.timer)
		ptr = root->root_hooks.hook_add.timer(task, NULL);
	else
		ptr = NULL;

	if (!ptr) {
		SCHED_QLOCK(root, taskTIMER);
#ifdef TIMER_WITHOUT_SORT
		TAILQ_INSERT_TAIL(&root->root_timer, task, task_node);
#else
		TAILQ_FOREACH_SAFE(t, &root->root_timer, task_node, tmp)
			if (sched_timespeccmp(&TASK_TS(task), &TASK_TS(t), -) < 1)
				break;
		if (!t)
			TAILQ_INSERT_TAIL(&root->root_timer, task, task_node);
		else
			TAILQ_INSERT_BEFORE(t, task, task_node);
#endif
		SCHED_QUNLOCK(root, taskTIMER);
	} else
		task = sched_unuseTask(task);

	return task;
}

/*
 * schedEvent() - Add EVENT task to scheduler queue
 *
 * @root = root task
 * @func = task execution function
 * @arg = 1st func argument
 * @val = additional func argument
 * @opt_data = Optional data
 * @opt_dlen = Optional data length
 * return: NULL error or !=NULL new queued task
 */
sched_task_t *
schedEvent(sched_root_task_t * __restrict root, sched_task_func_t func, void *arg, u_long val, 
		void *opt_data, size_t opt_dlen)
{
	sched_task_t *task;
	void *ptr;

	if (!root || !func)
		return NULL;

	/* get new task */
	if (!(task = sched_useTask(root)))
		return NULL;

	TASK_FUNC(task) = func;
	TASK_TYPE(task) = taskEVENT;
	TASK_ROOT(task) = root;

	TASK_ARG(task) = arg;
	TASK_VAL(task) = val;

	TASK_DATA(task) = opt_data;
	TASK_DATLEN(task) = opt_dlen;

	if (root->root_hooks.hook_add.event)
		ptr = root->root_hooks.hook_add.event(task, NULL);
	else
		ptr = NULL;

	if (!ptr)
		insert_task_to(task, &root->root_event);
	else
		task = sched_unuseTask(task);

	return task;
}


/*
 * schedTask() - Add regular task to scheduler queue
 *
 * @root = root task
 * @func = task execution function
 * @arg = 1st func argument
 * @prio = regular task priority, 0 is hi priority for regular tasks
 * @opt_data = Optional data
 * @opt_dlen = Optional data length
 * return: NULL error or !=NULL new queued task
 */
sched_task_t *
schedTask(sched_root_task_t * __restrict root, sched_task_func_t func, void *arg, u_long prio, 
		void *opt_data, size_t opt_dlen)
{
	sched_task_t *task, *tmp, *t = NULL;
	void *ptr;

	if (!root || !func)
		return NULL;

	/* get new task */
	if (!(task = sched_useTask(root)))
		return NULL;

	TASK_FUNC(task) = func;
	TASK_TYPE(task) = taskTASK;
	TASK_ROOT(task) = root;

	TASK_ARG(task) = arg;
	TASK_VAL(task) = prio;

	TASK_DATA(task) = opt_data;
	TASK_DATLEN(task) = opt_dlen;

	if (root->root_hooks.hook_add.task)
		ptr = root->root_hooks.hook_add.task(task, NULL);
	else
		ptr = NULL;

	if (!ptr) {
		SCHED_QLOCK(root, taskTASK);
		TAILQ_FOREACH_SAFE(t, &root->root_task, task_node, tmp)
			if (TASK_VAL(task) < TASK_VAL(t))
				break;
		if (!t)
			TAILQ_INSERT_TAIL(&root->root_task, task, task_node);
		else
			TAILQ_INSERT_BEFORE(t, task, task_node);
		SCHED_QUNLOCK(root, taskTASK);
	} else
		task = sched_unuseTask(task);

	return task;
}

/*
 * schedSuspend() - Add Suspended task to scheduler queue
 *
 * @root = root task
 * @func = task execution function
 * @arg = 1st func argument
 * @id = Trigger ID
 * @opt_data = Optional data
 * @opt_dlen = Optional data length
 * return: NULL error or !=NULL new queued task
 */
sched_task_t *
schedSuspend(sched_root_task_t * __restrict root, sched_task_func_t func, void *arg, u_long id, 
		void *opt_data, size_t opt_dlen)
{
	sched_task_t *task;
	void *ptr;

	if (!root || !func)
		return NULL;

	/* get new task */
	if (!(task = sched_useTask(root)))
		return NULL;

	TASK_FUNC(task) = func;
	TASK_TYPE(task) = taskSUSPEND;
	TASK_ROOT(task) = root;

	TASK_ARG(task) = arg;
	TASK_VAL(task) = id;

	TASK_DATA(task) = opt_data;
	TASK_DATLEN(task) = opt_dlen;

	if (root->root_hooks.hook_add.suspend)
		ptr = root->root_hooks.hook_add.suspend(task, NULL);
	else
		ptr = NULL;

	if (!ptr)
		insert_task_to(task, &root->root_suspend);
	else
		task = sched_unuseTask(task);

	return task;
}

/*
 * schedCallOnce() - Call once from scheduler
 *
 * @root = root task
 * @func = task execution function
 * @arg = 1st func argument
 * @val = additional func argument
 * @opt_data = Optional data
 * @opt_dlen = Optional data length
 * return: return value from called func
 */
sched_task_t *
schedCallOnce(sched_root_task_t * __restrict root, sched_task_func_t func, void *arg, u_long val, 
		void *opt_data, size_t opt_dlen)
{
	sched_task_t *task;
	void *ret;

	if (!root || !func)
		return NULL;

	/* get new task */
	if (!(task = sched_useTask(root)))
		return NULL;

	TASK_FUNC(task) = func;
	TASK_TYPE(task) = taskEVENT;
	TASK_ROOT(task) = root;

	TASK_ARG(task) = arg;
	TASK_VAL(task) = val;

	TASK_DATA(task) = opt_data;
	TASK_DATLEN(task) = opt_dlen;

	ret = schedCall(task);

	sched_unuseTask(task);
	return ret;
}

/*
 * schedThread() - Add thread task to scheduler queue
 *
 * @root = root task
 * @func = task execution function
 * @arg = 1st func argument
 * @ss = stack size
 * @opt_data = Optional data
 * @opt_dlen = Optional data length
 * return: NULL error or !=NULL new queued task
 */
sched_task_t *
schedThread(sched_root_task_t * __restrict root, sched_task_func_t func, void *arg, 
		size_t ss, void *opt_data, size_t opt_dlen)
{
#ifndef HAVE_LIBPTHREAD
	sched_SetErr(ENOTSUP, "Not supported thread tasks");
	return NULL;
#endif
	sched_task_t *task;
	pthread_attr_t attr;
	void *ptr;

	if (!root || !func)
		return NULL;

	/* get new task */
	if (!(task = sched_useTask(root)))
		return NULL;

	TASK_FUNC(task) = func;
	TASK_TYPE(task) = taskTHREAD;
	TASK_ROOT(task) = root;

	TASK_ARG(task) = arg;

	TASK_DATA(task) = opt_data;
	TASK_DATLEN(task) = opt_dlen;

	pthread_attr_init(&attr);
	pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
	if (ss && (errno = pthread_attr_setstacksize(&attr, ss))) {
		LOGERR;
		pthread_attr_destroy(&attr);
		return sched_unuseTask(task);
	}
	if ((errno = pthread_attr_getstacksize(&attr, &ss))) {
		LOGERR;
		pthread_attr_destroy(&attr);
		return sched_unuseTask(task);
	} else
		TASK_FLAG(task) = ss;

#ifdef SCHED_RR
	pthread_attr_setschedpolicy(&attr, SCHED_RR);
#else
	pthread_attr_setschedpolicy(&attr, SCHED_OTHER);
#endif

	if (root->root_hooks.hook_add.thread)
		ptr = root->root_hooks.hook_add.thread(task, &attr);
	else
		ptr = NULL;

	if (!ptr)
		insert_task_to(task, &root->root_thread);
	else
		task = sched_unuseTask(task);

	pthread_attr_destroy(&attr);
	return task;
}

/*
 * schedRTC() - Add RTC task to scheduler queue
 *
 * @root = root task
 * @func = task execution function
 * @arg = 1st func argument
 * @ts = timeout argument structure, minimum alarm timer resolution is 1msec!
 * @opt_data = Optional RTC ID
 * @opt_dlen = Optional data length
 * return: NULL error or !=NULL new queued task
 */
sched_task_t *
schedRTC(sched_root_task_t * __restrict root, sched_task_func_t func, void *arg, struct timespec ts, 
		void *opt_data, size_t opt_dlen)
{
#if defined(HAVE_LIBRT) && defined(HAVE_TIMER_CREATE) && \
	defined(HAVE_TIMER_SETTIME) && defined(HAVE_TIMER_DELETE)
	sched_task_t *task;
	void *ptr;

	if (!root || !func)
		return NULL;

	/* get new task */
	if (!(task = sched_useTask(root)))
		return NULL;

	TASK_FUNC(task) = func;
	TASK_TYPE(task) = taskRTC;
	TASK_ROOT(task) = root;

	TASK_ARG(task) = arg;
	TASK_TS(task) = ts;

	TASK_DATA(task) = opt_data;
	TASK_DATLEN(task) = opt_dlen;

	if (root->root_hooks.hook_add.rtc)
		ptr = root->root_hooks.hook_add.rtc(task, NULL);
	else
		ptr = NULL;

	if (!ptr)
		insert_task_to(task, &root->root_rtc);
	else
		task = sched_unuseTask(task);

	return task;
#else
	sched_SetErr(ENOTSUP, "Not supported realtime clock extensions");
	return NULL;
#endif
}

FreeBSD-CVSweb <freebsd-cvsweb@FreeBSD.org>