File:  [ELWIX - Embedded LightWeight unIX -] / libaitsched / src / tasks.c
Revision 1.13.2.1: download - view: text, annotated - select for diffs - revision graph
Wed Aug 22 10:33:45 2012 UTC (11 years, 9 months ago) by misho
Branches: sched3_3
Diff to: branchpoint 1.13: preferred, colored
do some extension for thread tasks

/*************************************************************************
* (C) 2011 AITNET ltd - Sofia/Bulgaria - <misho@aitbg.com>
*  by Michael Pounov <misho@openbsd-bg.org>
*
* $Author: misho $
* $Id: tasks.c,v 1.13.2.1 2012/08/22 10:33:45 misho Exp $
*
**************************************************************************
The ELWIX and AITNET software is distributed under the following
terms:

All of the documentation and software included in the ELWIX and AITNET
Releases is copyrighted by ELWIX - Sofia/Bulgaria <info@elwix.org>

Copyright 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012
	by Michael Pounov <misho@elwix.org>.  All rights reserved.

Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
   notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
   notice, this list of conditions and the following disclaimer in the
   documentation and/or other materials provided with the distribution.
3. All advertising materials mentioning features or use of this software
   must display the following acknowledgement:
This product includes software developed by Michael Pounov <misho@elwix.org>
ELWIX - Embedded LightWeight unIX and its contributors.
4. Neither the name of AITNET nor the names of its contributors
   may be used to endorse or promote products derived from this software
   without specific prior written permission.

THIS SOFTWARE IS PROVIDED BY AITNET AND CONTRIBUTORS ``AS IS'' AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
SUCH DAMAGE.
*/
#include "global.h"


/*
 * sched_useTask() - Get and init new task
 *
 * @root = root task
 * return: NULL error or !=NULL prepared task
 */
inline sched_task_t *
sched_useTask(sched_root_task_t * __restrict root)
{
	sched_task_t *task, *tmp;

	TAILQ_FOREACH_SAFE(task, &root->root_unuse, task_node, tmp) {
		if (!TASK_ISLOCKED(task)) {
#ifdef HAVE_LIBPTHREAD
			pthread_mutex_lock(&root->root_mtx[taskUNUSE]);
#endif
			TAILQ_REMOVE(&root->root_unuse, task, task_node);
#ifdef HAVE_LIBPTHREAD
			pthread_mutex_unlock(&root->root_mtx[taskUNUSE]);
#endif
			break;
		}
	}

	if (!task) {
		task = malloc(sizeof(sched_task_t));
		if (!task) {
			LOGERR;
			return NULL;
		}
	}

	memset(task, 0, sizeof(sched_task_t));
	task->task_id = (uintptr_t) task;
	return task;
}

/*
 * sched_unuseTask() - Unlock and put task to unuse queue
 *
 * @task = task
 * return: always is NULL
 */
inline sched_task_t *
sched_unuseTask(sched_task_t * __restrict task)
{
	TASK_UNLOCK(task);
	TASK_TYPE(task) = taskUNUSE;
#ifdef HAVE_LIBPTHREAD
	pthread_mutex_lock(&TASK_ROOT(task)->root_mtx[taskUNUSE]);
#endif
	TAILQ_INSERT_TAIL(&TASK_ROOT(task)->root_unuse, TASK_ID(task), task_node);
#ifdef HAVE_LIBPTHREAD
	pthread_mutex_unlock(&TASK_ROOT(task)->root_mtx[taskUNUSE]);
#endif
	task = NULL;

	return task;
}

void *
_sched_threadJoin(sched_task_t *task)
{
	void *ret = NULL;

	if (!task)
		return NULL;

#ifdef HAVE_LIBPTHREAD
	pthread_join((pthread_t) TASK_VAL(task), &ret);
	TASK_ROOT(task)->root_ret = ret;
#endif

	return NULL;
}

/*
 * sched_taskExit() - Exit routine for scheduler task, explicit required for thread tasks
 *
 * @task = current task
 * @retcode = return code
 * return: return code
 */
inline void *
sched_taskExit(sched_task_t *task, intptr_t retcode)
{
	if (!task || !TASK_ROOT(task))
		return (void*) -1;

	if (TASK_ROOT(task)->root_hooks.hook_exec.exit)
		TASK_ROOT(task)->root_hooks.hook_exec.exit(task, (void*) retcode);

	TASK_ROOT(task)->root_ret = (void*) retcode;

#ifdef HAVE_LIBPTHREAD
	if (TASK_TYPE(task) == taskTHREAD) {
		if (TASK_FLAG(task) == PTHREAD_CREATE_JOINABLE)	/* joinable thread */
			schedTask(TASK_ROOT(task), _sched_threadJoin, TASK_ARG(task),
					TASK_VAL(task), TASK_DATA(task), TASK_DATLEN(task));
		sched_unuseTask(task);
		pthread_exit((void*) retcode);
	}
#endif

	return (void*) retcode;
}


/*
 * schedRead() - Add READ I/O task to scheduler queue
 *
 * @root = root task
 * @func = task execution function
 * @arg = 1st func argument
 * @fd = fd handle
 * @opt_data = Optional data
 * @opt_dlen = Optional data length
 * return: NULL error or !=NULL new queued task
 */
sched_task_t *
schedRead(sched_root_task_t * __restrict root, sched_task_func_t func, void *arg, int fd, 
		void *opt_data, size_t opt_dlen)
{
	sched_task_t *task;
	void *ptr;

	if (!root || !func)
		return NULL;

	/* get new task */
	if (!(task = sched_useTask(root)))
		return NULL;

	task->task_func = func;
	TASK_TYPE(task) = taskREAD;
	TASK_ROOT(task) = root;

	TASK_ARG(task) = arg;
	TASK_FD(task) = fd;

	TASK_DATA(task) = opt_data;
	TASK_DATLEN(task) = opt_dlen;

	if (root->root_hooks.hook_add.read)
		ptr = root->root_hooks.hook_add.read(task, NULL);
	else
		ptr = NULL;

	if (!ptr) {
#ifdef HAVE_LIBPTHREAD
		pthread_mutex_lock(&root->root_mtx[taskREAD]);
#endif
		TAILQ_INSERT_TAIL(&root->root_read, TASK_ID(task), task_node);
#ifdef HAVE_LIBPTHREAD
		pthread_mutex_unlock(&root->root_mtx[taskREAD]);
#endif
	} else
		task = sched_unuseTask(task);

	return task;
}

/*
 * schedWrite() - Add WRITE I/O task to scheduler queue
 *
 * @root = root task
 * @func = task execution function
 * @arg = 1st func argument
 * @fd = fd handle
 * @opt_data = Optional data
 * @opt_dlen = Optional data length
 * return: NULL error or !=NULL new queued task
 */
sched_task_t *
schedWrite(sched_root_task_t * __restrict root, sched_task_func_t func, void *arg, int fd, 
		void *opt_data, size_t opt_dlen)
{
	sched_task_t *task;
	void *ptr;

	if (!root || !func)
		return NULL;

	/* get new task */
	if (!(task = sched_useTask(root)))
		return NULL;

	task->task_func = func;
	TASK_TYPE(task) = taskWRITE;
	TASK_ROOT(task) = root;

	TASK_ARG(task) = arg;
	TASK_FD(task) = fd;

	TASK_DATA(task) = opt_data;
	TASK_DATLEN(task) = opt_dlen;

	if (root->root_hooks.hook_add.write)
		ptr = root->root_hooks.hook_add.write(task, NULL);
	else
		ptr = NULL;

	if (!ptr) {
#ifdef HAVE_LIBPTHREAD
		pthread_mutex_lock(&root->root_mtx[taskWRITE]);
#endif
		TAILQ_INSERT_TAIL(&root->root_write, TASK_ID(task), task_node);
#ifdef HAVE_LIBPTHREAD
		pthread_mutex_unlock(&root->root_mtx[taskWRITE]);
#endif
	} else
		task = sched_unuseTask(task);

	return task;
}

/*
 * schedNode() - Add NODE task to scheduler queue
 *
 * @root = root task
 * @func = task execution function
 * @arg = 1st func argument
 * @fd = fd handle
 * @opt_data = Optional data
 * @opt_dlen = Optional data length
 * return: NULL error or !=NULL new queued task
 */
sched_task_t *
schedNode(sched_root_task_t * __restrict root, sched_task_func_t func, void *arg, int fd, 
		void *opt_data, size_t opt_dlen)
{
	sched_task_t *task;
	void *ptr;

	if (!root || !func)
		return NULL;

	/* get new task */
	if (!(task = sched_useTask(root)))
		return NULL;

	task->task_func = func;
	TASK_TYPE(task) = taskNODE;
	TASK_ROOT(task) = root;

	TASK_ARG(task) = arg;
	TASK_FD(task) = fd;

	TASK_DATA(task) = opt_data;
	TASK_DATLEN(task) = opt_dlen;

	if (root->root_hooks.hook_add.node)
		ptr = root->root_hooks.hook_add.node(task, NULL);
	else
		ptr = NULL;

	if (!ptr) {
#ifdef HAVE_LIBPTHREAD
		pthread_mutex_lock(&root->root_mtx[taskNODE]);
#endif
		TAILQ_INSERT_TAIL(&root->root_node, TASK_ID(task), task_node);
#ifdef HAVE_LIBPTHREAD
		pthread_mutex_unlock(&root->root_mtx[taskNODE]);
#endif
	} else
		task = sched_unuseTask(task);

	return task;
}

/*
 * schedProc() - Add PROC task to scheduler queue
 *
 * @root = root task
 * @func = task execution function
 * @arg = 1st func argument
 * @pid = PID
 * @opt_data = Optional data
 * @opt_dlen = Optional data length
 * return: NULL error or !=NULL new queued task
 */
sched_task_t *
schedProc(sched_root_task_t * __restrict root, sched_task_func_t func, void *arg, u_long pid, 
		void *opt_data, size_t opt_dlen)
{
	sched_task_t *task;
	void *ptr;

	if (!root || !func)
		return NULL;

	/* get new task */
	if (!(task = sched_useTask(root)))
		return NULL;

	task->task_func = func;
	TASK_TYPE(task) = taskPROC;
	TASK_ROOT(task) = root;

	TASK_ARG(task) = arg;
	TASK_VAL(task) = pid;

	TASK_DATA(task) = opt_data;
	TASK_DATLEN(task) = opt_dlen;

	if (root->root_hooks.hook_add.proc)
		ptr = root->root_hooks.hook_add.proc(task, NULL);
	else
		ptr = NULL;

	if (!ptr) {
#ifdef HAVE_LIBPTHREAD
		pthread_mutex_lock(&root->root_mtx[taskPROC]);
#endif
		TAILQ_INSERT_TAIL(&root->root_proc, TASK_ID(task), task_node);
#ifdef HAVE_LIBPTHREAD
		pthread_mutex_unlock(&root->root_mtx[taskPROC]);
#endif
	} else
		task = sched_unuseTask(task);

	return task;
}

/*
 * schedUser() - Add trigger USER task to scheduler queue
 *
 * @root = root task
 * @func = task execution function
 * @arg = 1st func argument
 * @id = Trigger ID
 * @opt_data = Optional data
 * @opt_dlen = Optional user's trigger flags
 * return: NULL error or !=NULL new queued task
 */
sched_task_t *
schedUser(sched_root_task_t * __restrict root, sched_task_func_t func, void *arg, u_long id, 
		void *opt_data, size_t opt_dlen)
{
#ifndef EVFILT_USER
	sched_SetErr(ENOTSUP, "Not supported kevent() filter");
	return NULL;
#else
	sched_task_t *task;
	void *ptr;

	if (!root || !func)
		return NULL;

	/* get new task */
	if (!(task = sched_useTask(root)))
		return NULL;

	task->task_func = func;
	TASK_TYPE(task) = taskUSER;
	TASK_ROOT(task) = root;

	TASK_ARG(task) = arg;
	TASK_VAL(task) = id;

	TASK_DATA(task) = opt_data;
	TASK_DATLEN(task) = opt_dlen;

	if (root->root_hooks.hook_add.user)
		ptr = root->root_hooks.hook_add.user(task, NULL);
	else
		ptr = NULL;

	if (!ptr) {
#ifdef HAVE_LIBPTHREAD
		pthread_mutex_lock(&root->root_mtx[taskUSER]);
#endif
		TAILQ_INSERT_TAIL(&root->root_user, TASK_ID(task), task_node);
#ifdef HAVE_LIBPTHREAD
		pthread_mutex_unlock(&root->root_mtx[taskUSER]);
#endif
	} else
		task = sched_unuseTask(task);

	return task;
#endif
}

/*
 * schedSignal() - Add SIGNAL task to scheduler queue
 *
 * @root = root task
 * @func = task execution function
 * @arg = 1st func argument
 * @sig = Signal
 * @opt_data = Optional data
 * @opt_dlen = Optional data length
 * return: NULL error or !=NULL new queued task
 */
sched_task_t *
schedSignal(sched_root_task_t * __restrict root, sched_task_func_t func, void *arg, u_long sig, 
		void *opt_data, size_t opt_dlen)
{
	sched_task_t *task;
	void *ptr;

	if (!root || !func)
		return NULL;

	/* get new task */
	if (!(task = sched_useTask(root)))
		return NULL;

	task->task_func = func;
	TASK_TYPE(task) = taskSIGNAL;
	TASK_ROOT(task) = root;

	TASK_ARG(task) = arg;
	TASK_VAL(task) = sig;

	TASK_DATA(task) = opt_data;
	TASK_DATLEN(task) = opt_dlen;

	if (root->root_hooks.hook_add.signal)
		ptr = root->root_hooks.hook_add.signal(task, NULL);
	else
		ptr = NULL;

	if (!ptr) {
#ifdef HAVE_LIBPTHREAD
		pthread_mutex_lock(&root->root_mtx[taskSIGNAL]);
#endif
		TAILQ_INSERT_TAIL(&root->root_signal, TASK_ID(task), task_node);
#ifdef HAVE_LIBPTHREAD
		pthread_mutex_unlock(&root->root_mtx[taskSIGNAL]);
#endif
	} else
		task = sched_unuseTask(task);

	return task;
}

/*
 * schedAlarm() - Add ALARM task to scheduler queue
 *
 * @root = root task
 * @func = task execution function
 * @arg = 1st func argument
 * @ts = timeout argument structure, minimum alarm timer resolution is 1msec!
 * @opt_data = Optional data
 * @opt_dlen = Optional data length
 * return: NULL error or !=NULL new queued task
 */
sched_task_t *
schedAlarm(sched_root_task_t * __restrict root, sched_task_func_t func, void *arg, struct timespec ts, 
		void *opt_data, size_t opt_dlen)
{
	sched_task_t *task;
	void *ptr;

	if (!root || !func)
		return NULL;

	/* get new task */
	if (!(task = sched_useTask(root)))
		return NULL;

	task->task_func = func;
	TASK_TYPE(task) = taskALARM;
	TASK_ROOT(task) = root;

	TASK_ARG(task) = arg;
	TASK_TS(task) = ts;

	TASK_DATA(task) = opt_data;
	TASK_DATLEN(task) = opt_dlen;

	if (root->root_hooks.hook_add.alarm)
		ptr = root->root_hooks.hook_add.alarm(task, NULL);
	else
		ptr = NULL;

	if (!ptr) {
#ifdef HAVE_LIBPTHREAD
		pthread_mutex_lock(&root->root_mtx[taskALARM]);
#endif
		TAILQ_INSERT_TAIL(&root->root_alarm, TASK_ID(task), task_node);
#ifdef HAVE_LIBPTHREAD
		pthread_mutex_unlock(&root->root_mtx[taskALARM]);
#endif
	} else
		task = sched_unuseTask(task);

	return task;
}

#ifdef AIO_SUPPORT
/*
 * schedAIO() - Add AIO task to scheduler queue
 *
 * @root = root task
 * @func = task execution function
 * @arg = 1st func argument
 * @acb = AIO cb structure address
 * @opt_data = Optional data
 * @opt_dlen = Optional data length
 * return: NULL error or !=NULL new queued task
 */
sched_task_t *
schedAIO(sched_root_task_t * __restrict root, sched_task_func_t func, void *arg, 
		struct aiocb * __restrict acb, void *opt_data, size_t opt_dlen)
{
	sched_task_t *task;
	void *ptr;

	if (!root || !func || !acb || !opt_dlen)
		return NULL;

	/* get new task */
	if (!(task = sched_useTask(root)))
		return NULL;

	task->task_func = func;
	TASK_TYPE(task) = taskAIO;
	TASK_ROOT(task) = root;

	TASK_ARG(task) = arg;
	TASK_VAL(task) = (u_long) acb;

	TASK_DATA(task) = opt_data;
	TASK_DATLEN(task) = opt_dlen;

	if (root->root_hooks.hook_add.aio)
		ptr = root->root_hooks.hook_add.aio(task, NULL);
	else
		ptr = NULL;

	if (!ptr) {
#ifdef HAVE_LIBPTHREAD
		pthread_mutex_lock(&root->root_mtx[taskAIO]);
#endif
		TAILQ_INSERT_TAIL(&root->root_aio, TASK_ID(task), task_node);
#ifdef HAVE_LIBPTHREAD
		pthread_mutex_unlock(&root->root_mtx[taskAIO]);
#endif
	} else
		task = sched_unuseTask(task);

	return task;
}

/*
 * schedAIORead() - Add AIO read task to scheduler queue
 *
 * @root = root task
 * @func = task execution function
 * @arg = 1st func argument
 * @fd = file descriptor
 * @buffer = Buffer
 * @buflen = Buffer length
 * @offset = Offset from start of file, if =-1 from current position
 * return: NULL error or !=NULL new queued task
 */
inline sched_task_t *
schedAIORead(sched_root_task_t * __restrict root, sched_task_func_t func, void *arg, int fd, 
		void *buffer, size_t buflen, off_t offset)
{
	struct aiocb *acb;
	off_t off;

	if (!root || !func || !buffer || !buflen)
		return NULL;

	if (offset == (off_t) -1) {
		off = lseek(fd, 0, SEEK_CUR);
		if (off == -1) {
			LOGERR;
			return NULL;
		}
	} else
		off = offset;

	if (!(acb = malloc(sizeof(struct aiocb)))) {
		LOGERR;
		return NULL;
	} else
		memset(acb, 0, sizeof(struct aiocb));

	acb->aio_fildes = fd;
	acb->aio_nbytes = buflen;
	acb->aio_buf = buffer;
	acb->aio_offset = off;
	acb->aio_sigevent.sigev_notify = SIGEV_KEVENT;
	acb->aio_sigevent.sigev_notify_kqueue = root->root_kq;
	acb->aio_sigevent.sigev_value.sival_ptr = acb;

	if (aio_read(acb)) {
		LOGERR;
		free(acb);
		return NULL;
	}

	return schedAIO(root, func, arg, acb, buffer, buflen);
}

/*
 * schedAIOWrite() - Add AIO write task to scheduler queue
 *
 * @root = root task
 * @func = task execution function
 * @arg = 1st func argument
 * @fd = file descriptor
 * @buffer = Buffer
 * @buflen = Buffer length
 * @offset = Offset from start of file, if =-1 from current position
 * return: NULL error or !=NULL new queued task
 */
inline sched_task_t *
schedAIOWrite(sched_root_task_t * __restrict root, sched_task_func_t func, void *arg, int fd, 
		void *buffer, size_t buflen, off_t offset)
{
	struct aiocb *acb;
	off_t off;

	if (!root || !func || !buffer || !buflen)
		return NULL;

	if (offset == (off_t) -1) {
		off = lseek(fd, 0, SEEK_CUR);
		if (off == -1) {
			LOGERR;
			return NULL;
		}
	} else
		off = offset;

	if (!(acb = malloc(sizeof(struct aiocb)))) {
		LOGERR;
		return NULL;
	} else
		memset(acb, 0, sizeof(struct aiocb));

	acb->aio_fildes = fd;
	acb->aio_nbytes = buflen;
	acb->aio_buf = buffer;
	acb->aio_offset = off;
	acb->aio_sigevent.sigev_notify = SIGEV_KEVENT;
	acb->aio_sigevent.sigev_notify_kqueue = root->root_kq;
	acb->aio_sigevent.sigev_value.sival_ptr = acb;

	if (aio_write(acb)) {
		LOGERR;
		free(acb);
		return NULL;
	}

	return schedAIO(root, func, arg, acb, buffer, buflen);
}

#ifdef EVFILT_LIO
/*
 * schedLIO() - Add AIO bulk tasks to scheduler queue
 *
 * @root = root task
 * @func = task execution function
 * @arg = 1st func argument
 * @acbs = AIO cb structure addresses
 * @opt_data = Optional data
 * @opt_dlen = Optional data length
 * return: NULL error or !=NULL new queued task
 */
sched_task_t *
schedLIO(sched_root_task_t * __restrict root, sched_task_func_t func, void *arg, 
		struct aiocb ** __restrict acbs, void *opt_data, size_t opt_dlen)
{
	sched_task_t *task;
	void *ptr;

	if (!root || !func || !acbs || !opt_dlen)
		return NULL;

	/* get new task */
	if (!(task = sched_useTask(root)))
		return NULL;

	task->task_func = func;
	TASK_TYPE(task) = taskLIO;
	TASK_ROOT(task) = root;

	TASK_ARG(task) = arg;
	TASK_VAL(task) = (u_long) acbs;

	TASK_DATA(task) = opt_data;
	TASK_DATLEN(task) = opt_dlen;

	if (root->root_hooks.hook_add.lio)
		ptr = root->root_hooks.hook_add.lio(task, NULL);
	else
		ptr = NULL;

	if (!ptr) {
#ifdef HAVE_LIBPTHREAD
		pthread_mutex_lock(&root->root_mtx[taskLIO]);
#endif
		TAILQ_INSERT_TAIL(&root->root_lio, TASK_ID(task), task_node);
#ifdef HAVE_LIBPTHREAD
		pthread_mutex_unlock(&root->root_mtx[taskLIO]);
#endif
	} else
		task = sched_unuseTask(task);

	return task;
}

/*
 * schedLIORead() - Add list of AIO read tasks to scheduler queue
 *
 * @root = root task
 * @func = task execution function
 * @arg = 1st func argument
 * @fd = file descriptor
 * @bufs = Buffer's list
 * @nbufs = Number of Buffers
 * @offset = Offset from start of file, if =-1 from current position
 * return: NULL error or !=NULL new queued task
 */
sched_task_t *
schedLIORead(sched_root_task_t * __restrict root, sched_task_func_t func, void *arg, int fd, 
		struct iovec *bufs, size_t nbufs, off_t offset)
{
	struct sigevent sig;
	struct aiocb **acb;
	off_t off;
	register int i;

	if (!root || !func || !bufs || !nbufs)
		return NULL;

	if (offset == (off_t) -1) {
		off = lseek(fd, 0, SEEK_CUR);
		if (off == -1) {
			LOGERR;
			return NULL;
		}
	} else
		off = offset;

	if (!(acb = calloc(sizeof(void*), nbufs))) {
		LOGERR;
		return NULL;
	} else
		memset(acb, 0, sizeof(void*) * nbufs);
	for (i = 0; i < nbufs; off += bufs[i++].iov_len) {
		acb[i] = malloc(sizeof(struct aiocb));
		if (!acb[i]) {
			LOGERR;
			for (i = 0; i < nbufs; i++)
				if (acb[i])
					free(acb[i]);
			free(acb);
			return NULL;
		} else
			memset(acb[i], 0, sizeof(struct aiocb));
		acb[i]->aio_fildes = fd;
		acb[i]->aio_nbytes = bufs[i].iov_len;
		acb[i]->aio_buf = bufs[i].iov_base;
		acb[i]->aio_offset = off;
		acb[i]->aio_lio_opcode = LIO_READ;
	}
	memset(&sig, 0, sizeof sig);
	sig.sigev_notify = SIGEV_KEVENT;
	sig.sigev_notify_kqueue = root->root_kq;
	sig.sigev_value.sival_ptr = acb;

	if (lio_listio(LIO_NOWAIT, acb, nbufs, &sig)) {
		LOGERR;
		for (i = 0; i < nbufs; i++)
			if (acb[i])
				free(acb[i]);
		free(acb);
		return NULL;
	}

	return schedLIO(root, func, arg, (void*) acb, bufs, nbufs);
}

/*
 * schedLIOWrite() - Add list of AIO write tasks to scheduler queue
 *
 * @root = root task
 * @func = task execution function
 * @arg = 1st func argument
 * @fd = file descriptor
 * @bufs = Buffer's list
 * @nbufs = Number of Buffers
 * @offset = Offset from start of file, if =-1 from current position
 * return: NULL error or !=NULL new queued task
 */
inline sched_task_t *
schedLIOWrite(sched_root_task_t * __restrict root, sched_task_func_t func, void *arg, int fd, 
		struct iovec *bufs, size_t nbufs, off_t offset)
{
	struct sigevent sig;
	struct aiocb **acb;
	off_t off;
	register int i;

	if (!root || !func || !bufs || !nbufs)
		return NULL;

	if (offset == (off_t) -1) {
		off = lseek(fd, 0, SEEK_CUR);
		if (off == -1) {
			LOGERR;
			return NULL;
		}
	} else
		off = offset;

	if (!(acb = calloc(sizeof(void*), nbufs))) {
		LOGERR;
		return NULL;
	} else
		memset(acb, 0, sizeof(void*) * nbufs);
	for (i = 0; i < nbufs; off += bufs[i++].iov_len) {
		acb[i] = malloc(sizeof(struct aiocb));
		if (!acb[i]) {
			LOGERR;
			for (i = 0; i < nbufs; i++)
				if (acb[i])
					free(acb[i]);
			free(acb);
			return NULL;
		} else
			memset(acb[i], 0, sizeof(struct aiocb));
		acb[i]->aio_fildes = fd;
		acb[i]->aio_nbytes = bufs[i].iov_len;
		acb[i]->aio_buf = bufs[i].iov_base;
		acb[i]->aio_offset = off;
		acb[i]->aio_lio_opcode = LIO_WRITE;
	}
	memset(&sig, 0, sizeof sig);
	sig.sigev_notify = SIGEV_KEVENT;
	sig.sigev_notify_kqueue = root->root_kq;
	sig.sigev_value.sival_ptr = acb;

	if (lio_listio(LIO_NOWAIT, acb, nbufs, &sig)) {
		LOGERR;
		for (i = 0; i < nbufs; i++)
			if (acb[i])
				free(acb[i]);
		free(acb);
		return NULL;
	}

	return schedLIO(root, func, arg, (void*) acb, bufs, nbufs);
}
#endif	/* EVFILT_LIO */
#endif	/* AIO_SUPPORT */

/*
 * schedTimer() - Add TIMER task to scheduler queue
 *
 * @root = root task
 * @func = task execution function
 * @arg = 1st func argument
 * @ts = timeout argument structure
 * @opt_data = Optional data
 * @opt_dlen = Optional data length
 * return: NULL error or !=NULL new queued task
 */
sched_task_t *
schedTimer(sched_root_task_t * __restrict root, sched_task_func_t func, void *arg, struct timespec ts, 
		void *opt_data, size_t opt_dlen)
{
	sched_task_t *task, *tmp, *t = NULL;
	void *ptr;
	struct timespec now;

	if (!root || !func)
		return NULL;

	/* get new task */
	if (!(task = sched_useTask(root)))
		return NULL;

	task->task_func = func;
	TASK_TYPE(task) = taskTIMER;
	TASK_ROOT(task) = root;

	TASK_ARG(task) = arg;

	TASK_DATA(task) = opt_data;
	TASK_DATLEN(task) = opt_dlen;

	/* calculate timeval structure */
	clock_gettime(CLOCK_MONOTONIC, &now);
	now.tv_sec += ts.tv_sec;
	now.tv_nsec += ts.tv_nsec;
	if (now.tv_nsec >= 1000000000L) {
		now.tv_sec++;
		now.tv_nsec -= 1000000000L;
	} else if (now.tv_nsec < 0) {
		now.tv_sec--;
		now.tv_nsec += 1000000000L;
	}
	TASK_TS(task) = now;

	if (root->root_hooks.hook_add.timer)
		ptr = root->root_hooks.hook_add.timer(task, NULL);
	else
		ptr = NULL;

	if (!ptr) {
#ifdef HAVE_LIBPTHREAD
		pthread_mutex_lock(&root->root_mtx[taskTIMER]);
#endif
#ifdef TIMER_WITHOUT_SORT
		TAILQ_INSERT_TAIL(&root->root_timer, TASK_ID(task), task_node);
#else
		TAILQ_FOREACH_SAFE(t, &root->root_timer, task_node, tmp)
			if (sched_timespeccmp(&TASK_TS(task), &TASK_TS(t), -) < 1)
				break;
		if (!t)
			TAILQ_INSERT_TAIL(&root->root_timer, TASK_ID(task), task_node);
		else
			TAILQ_INSERT_BEFORE(t, TASK_ID(task), task_node);
#endif
#ifdef HAVE_LIBPTHREAD
		pthread_mutex_unlock(&root->root_mtx[taskTIMER]);
#endif
	} else
		task = sched_unuseTask(task);

	return task;
}

/*
 * schedEvent() - Add EVENT task to scheduler queue
 *
 * @root = root task
 * @func = task execution function
 * @arg = 1st func argument
 * @val = additional func argument
 * @opt_data = Optional data
 * @opt_dlen = Optional data length
 * return: NULL error or !=NULL new queued task
 */
sched_task_t *
schedEvent(sched_root_task_t * __restrict root, sched_task_func_t func, void *arg, u_long val, 
		void *opt_data, size_t opt_dlen)
{
	sched_task_t *task;
	void *ptr;

	if (!root || !func)
		return NULL;

	/* get new task */
	if (!(task = sched_useTask(root)))
		return NULL;

	task->task_func = func;
	TASK_TYPE(task) = taskEVENT;
	TASK_ROOT(task) = root;

	TASK_ARG(task) = arg;
	TASK_VAL(task) = val;

	TASK_DATA(task) = opt_data;
	TASK_DATLEN(task) = opt_dlen;

	if (root->root_hooks.hook_add.event)
		ptr = root->root_hooks.hook_add.event(task, NULL);
	else
		ptr = NULL;

	if (!ptr) {
#ifdef HAVE_LIBPTHREAD
		pthread_mutex_lock(&root->root_mtx[taskEVENT]);
#endif
		TAILQ_INSERT_TAIL(&root->root_event, TASK_ID(task), task_node);
#ifdef HAVE_LIBPTHREAD
		pthread_mutex_unlock(&root->root_mtx[taskEVENT]);
#endif
	} else
		task = sched_unuseTask(task);

	return task;
}


/*
 * schedTask() - Add regular task to scheduler queue
 *
 * @root = root task
 * @func = task execution function
 * @arg = 1st func argument
 * @prio = regular task priority, 0 is hi priority for regular tasks
 * @opt_data = Optional data
 * @opt_dlen = Optional data length
 * return: NULL error or !=NULL new queued task
 */
sched_task_t *
schedTask(sched_root_task_t * __restrict root, sched_task_func_t func, void *arg, u_long prio, 
		void *opt_data, size_t opt_dlen)
{
	sched_task_t *task, *tmp, *t = NULL;
	void *ptr;

	if (!root || !func)
		return NULL;

	/* get new task */
	if (!(task = sched_useTask(root)))
		return NULL;

	task->task_func = func;
	TASK_TYPE(task) = taskTASK;
	TASK_ROOT(task) = root;

	TASK_ARG(task) = arg;
	TASK_VAL(task) = prio;

	TASK_DATA(task) = opt_data;
	TASK_DATLEN(task) = opt_dlen;

	if (root->root_hooks.hook_add.task)
		ptr = root->root_hooks.hook_add.task(task, NULL);
	else
		ptr = NULL;

	if (!ptr) {
#ifdef HAVE_LIBPTHREAD
		pthread_mutex_lock(&root->root_mtx[taskTASK]);
#endif
		TAILQ_FOREACH_SAFE(t, &root->root_task, task_node, tmp)
			if (TASK_VAL(task) < TASK_VAL(t))
				break;
		if (!t)
			TAILQ_INSERT_TAIL(&root->root_task, TASK_ID(task), task_node);
		else
			TAILQ_INSERT_BEFORE(t, TASK_ID(task), task_node);
#ifdef HAVE_LIBPTHREAD
		pthread_mutex_unlock(&root->root_mtx[taskTASK]);
#endif
	} else
		task = sched_unuseTask(task);

	return task;
}

/*
 * schedSuspend() - Add Suspended task to scheduler queue
 *
 * @root = root task
 * @func = task execution function
 * @arg = 1st func argument
 * @id = Trigger ID
 * @opt_data = Optional data
 * @opt_dlen = Optional data length
 * return: NULL error or !=NULL new queued task
 */
sched_task_t *
schedSuspend(sched_root_task_t * __restrict root, sched_task_func_t func, void *arg, u_long id, 
		void *opt_data, size_t opt_dlen)
{
	sched_task_t *task;
	void *ptr;

	if (!root || !func)
		return NULL;

	/* get new task */
	if (!(task = sched_useTask(root)))
		return NULL;

	task->task_func = func;
	TASK_TYPE(task) = taskSUSPEND;
	TASK_ROOT(task) = root;

	TASK_ARG(task) = arg;
	TASK_VAL(task) = id;

	TASK_DATA(task) = opt_data;
	TASK_DATLEN(task) = opt_dlen;

	if (root->root_hooks.hook_add.suspend)
		ptr = root->root_hooks.hook_add.suspend(task, NULL);
	else
		ptr = NULL;

	if (!ptr) {
#ifdef HAVE_LIBPTHREAD
		pthread_mutex_lock(&root->root_mtx[taskSUSPEND]);
#endif
		TAILQ_INSERT_TAIL(&root->root_suspend, TASK_ID(task), task_node);
#ifdef HAVE_LIBPTHREAD
		pthread_mutex_unlock(&root->root_mtx[taskSUSPEND]);
#endif
	} else
		task = sched_unuseTask(task);

	return task;
}

/*
 * schedCallOnce() - Call once from scheduler
 *
 * @root = root task
 * @func = task execution function
 * @arg = 1st func argument
 * @val = additional func argument
 * @opt_data = Optional data
 * @opt_dlen = Optional data length
 * return: return value from called func
 */
sched_task_t *
schedCallOnce(sched_root_task_t * __restrict root, sched_task_func_t func, void *arg, u_long val, 
		void *opt_data, size_t opt_dlen)
{
	sched_task_t *task;
	void *ret;

	if (!root || !func)
		return NULL;

	/* get new task */
	if (!(task = sched_useTask(root)))
		return NULL;

	task->task_func = func;
	TASK_TYPE(task) = taskEVENT;
	TASK_ROOT(task) = root;

	TASK_ARG(task) = arg;
	TASK_VAL(task) = val;

	TASK_DATA(task) = opt_data;
	TASK_DATLEN(task) = opt_dlen;

	ret = schedCall(task);

	sched_unuseTask(task);
	return ret;
}

/*
 * schedThread() - Add thread task to scheduler queue
 *
 * @root = root task
 * @func = task execution function
 * @arg = 1st func argument
 * @detach = Detach thread from scheduler, if !=0
 * @opt_data = Optional data
 * @opt_dlen = Optional data length
 * return: NULL error or !=NULL new queued task
 */
sched_task_t *
schedThread(sched_root_task_t * __restrict root, sched_task_func_t func, void *arg, int detach, 
		void *opt_data, size_t opt_dlen)
{
#ifndef HAVE_LIBPTHREAD
	sched_SetErr(ENOTSUP, "Not supported thread tasks");
	return NULL;
#endif
	sched_task_t *task;
	void *ptr;
	pthread_attr_t attr;

	if (!root || !func)
		return NULL;

	/* get new task */
	if (!(task = sched_useTask(root)))
		return NULL;

	task->task_func = func;
	TASK_TYPE(task) = taskTHREAD;
	TASK_ROOT(task) = root;

	TASK_ARG(task) = arg;
	TASK_FLAG(task) = detach ? PTHREAD_CREATE_DETACHED : PTHREAD_CREATE_JOINABLE;

	TASK_DATA(task) = opt_data;
	TASK_DATLEN(task) = opt_dlen;

	pthread_attr_init(&attr);
	pthread_attr_setdetachstate(&attr, TASK_FLAG(task));
	if (root->root_hooks.hook_add.thread)
		ptr = root->root_hooks.hook_add.thread(task, &attr);
	else
		ptr = NULL;
	pthread_attr_destroy(&attr);

	if (!ptr) {
		pthread_mutex_lock(&root->root_mtx[taskTHREAD]);
		TAILQ_INSERT_TAIL(&root->root_thread, TASK_ID(task), task_node);
		pthread_mutex_unlock(&root->root_mtx[taskTHREAD]);
	} else
		task = sched_unuseTask(task);

	return task;
}


FreeBSD-CVSweb <freebsd-cvsweb@FreeBSD.org>