1: /*************************************************************************
2: * (C) 2011 AITNET ltd - Sofia/Bulgaria - <misho@aitbg.com>
3: * by Michael Pounov <misho@openbsd-bg.org>
4: *
5: * $Author: misho $
6: * $Id: tasks.c,v 1.5.2.1 2012/03/13 10:00:37 misho Exp $
7: *
8: **************************************************************************
9: The ELWIX and AITNET software is distributed under the following
10: terms:
11:
12: All of the documentation and software included in the ELWIX and AITNET
13: Releases is copyrighted by ELWIX - Sofia/Bulgaria <info@elwix.org>
14:
15: Copyright 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012
16: by Michael Pounov <misho@elwix.org>. All rights reserved.
17:
18: Redistribution and use in source and binary forms, with or without
19: modification, are permitted provided that the following conditions
20: are met:
21: 1. Redistributions of source code must retain the above copyright
22: notice, this list of conditions and the following disclaimer.
23: 2. Redistributions in binary form must reproduce the above copyright
24: notice, this list of conditions and the following disclaimer in the
25: documentation and/or other materials provided with the distribution.
26: 3. All advertising materials mentioning features or use of this software
27: must display the following acknowledgement:
28: This product includes software developed by Michael Pounov <misho@elwix.org>
29: ELWIX - Embedded LightWeight unIX and its contributors.
30: 4. Neither the name of AITNET nor the names of its contributors
31: may be used to endorse or promote products derived from this software
32: without specific prior written permission.
33:
34: THIS SOFTWARE IS PROVIDED BY AITNET AND CONTRIBUTORS ``AS IS'' AND
35: ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
36: IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
37: ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
38: FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
39: DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
40: OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
41: HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
42: LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
43: OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
44: SUCH DAMAGE.
45: */
46: #include "global.h"
47:
48:
49: #pragma GCC visibility push(hidden)
50:
51: inline sched_task_t *
52: _sched_useTask(sched_root_task_t * __restrict root)
53: {
54: sched_task_t *task;
55:
56: TAILQ_FOREACH(task, &root->root_unuse, task_node) {
57: if (!TASK_ISLOCKED(task)) {
58: #ifdef HAVE_LIBPTHREAD
59: pthread_mutex_lock(&root->root_mtx[taskUNUSE]);
60: #endif
61: TAILQ_REMOVE(&root->root_unuse, task, task_node);
62: #ifdef HAVE_LIBPTHREAD
63: pthread_mutex_unlock(&root->root_mtx[taskUNUSE]);
64: #endif
65: break;
66: }
67: }
68:
69: if (!task) {
70: task = malloc(sizeof(sched_task_t));
71: if (!task) {
72: LOGERR;
73: return NULL;
74: }
75: }
76:
77: return task;
78: }
79:
80: inline sched_task_t *
81: _sched_unuseTask(sched_task_t * __restrict task)
82: {
83: TASK_UNLOCK(task);
84: TASK_TYPE(task) = taskUNUSE;
85: #ifdef HAVE_LIBPTHREAD
86: pthread_mutex_lock(&TASK_ROOT(task)->root_mtx[taskUNUSE]);
87: #endif
88: TAILQ_INSERT_TAIL(&TASK_ROOT(task)->root_unuse, task, task_node);
89: #ifdef HAVE_LIBPTHREAD
90: pthread_mutex_unlock(&TASK_ROOT(task)->root_mtx[taskUNUSE]);
91: #endif
92: task = NULL;
93:
94: return task;
95: }
96:
97: #pragma GCC visibility pop
98:
99:
100: /*
101: * schedRead() - Add READ I/O task to scheduler queue
102: *
103: * @root = root task
104: * @func = task execution function
105: * @arg = 1st func argument
106: * @fd = fd handle
107: * @opt_data = Optional data
108: * @opt_dlen = Optional data length
109: * return: NULL error or !=NULL new queued task
110: */
111: sched_task_t *
112: schedRead(sched_root_task_t * __restrict root, sched_task_func_t func, void *arg, int fd,
113: void *opt_data, size_t opt_dlen)
114: {
115: sched_task_t *task;
116: void *ptr;
117:
118: if (!root || !func)
119: return NULL;
120:
121: /* get new task */
122: if (!(task = _sched_useTask(root)))
123: return NULL;
124:
125: memset(task, 0, sizeof(sched_task_t));
126: task->task_id = 0;
127: task->task_lock = 0;
128: task->task_func = func;
129: TASK_TYPE(task) = taskREAD;
130: TASK_ROOT(task) = root;
131:
132: TASK_ARG(task) = arg;
133: TASK_FD(task) = fd;
134:
135: TASK_DATA(task) = opt_data;
136: TASK_DATLEN(task) = opt_dlen;
137:
138: if (root->root_hooks.hook_add.read)
139: ptr = root->root_hooks.hook_add.read(task, NULL);
140: else
141: ptr = NULL;
142:
143: if (!ptr) {
144: #ifdef HAVE_LIBPTHREAD
145: pthread_mutex_lock(&root->root_mtx[taskREAD]);
146: #endif
147: TAILQ_INSERT_TAIL(&root->root_read, task, task_node);
148: #ifdef HAVE_LIBPTHREAD
149: pthread_mutex_unlock(&root->root_mtx[taskREAD]);
150: #endif
151: } else
152: task = _sched_unuseTask(task);
153:
154: return task;
155: }
156:
157: /*
158: * schedWrite() - Add WRITE I/O task to scheduler queue
159: *
160: * @root = root task
161: * @func = task execution function
162: * @arg = 1st func argument
163: * @fd = fd handle
164: * @opt_data = Optional data
165: * @opt_dlen = Optional data length
166: * return: NULL error or !=NULL new queued task
167: */
168: sched_task_t *
169: schedWrite(sched_root_task_t * __restrict root, sched_task_func_t func, void *arg, int fd,
170: void *opt_data, size_t opt_dlen)
171: {
172: sched_task_t *task;
173: void *ptr;
174:
175: if (!root || !func)
176: return NULL;
177:
178: /* get new task */
179: if (!(task = _sched_useTask(root)))
180: return NULL;
181:
182: memset(task, 0, sizeof(sched_task_t));
183: task->task_id = 0;
184: task->task_lock = 0;
185: task->task_func = func;
186: TASK_TYPE(task) = taskWRITE;
187: TASK_ROOT(task) = root;
188:
189: TASK_ARG(task) = arg;
190: TASK_FD(task) = fd;
191:
192: TASK_DATA(task) = opt_data;
193: TASK_DATLEN(task) = opt_dlen;
194:
195: if (root->root_hooks.hook_add.write)
196: ptr = root->root_hooks.hook_add.write(task, NULL);
197: else
198: ptr = NULL;
199:
200: if (!ptr) {
201: #ifdef HAVE_LIBPTHREAD
202: pthread_mutex_lock(&root->root_mtx[taskWRITE]);
203: #endif
204: TAILQ_INSERT_TAIL(&root->root_write, task, task_node);
205: #ifdef HAVE_LIBPTHREAD
206: pthread_mutex_unlock(&root->root_mtx[taskWRITE]);
207: #endif
208: } else
209: task = _sched_unuseTask(task);
210:
211: return task;
212: }
213:
214: /*
215: * schedTimer() - Add TIMER task to scheduler queue
216: *
217: * @root = root task
218: * @func = task execution function
219: * @arg = 1st func argument
220: * @ts = timeout argument structure
221: * @opt_data = Optional data
222: * @opt_dlen = Optional data length
223: * return: NULL error or !=NULL new queued task
224: */
225: sched_task_t *
226: schedTimer(sched_root_task_t * __restrict root, sched_task_func_t func, void *arg, struct timespec ts,
227: void *opt_data, size_t opt_dlen)
228: {
229: sched_task_t *task, *t = NULL;
230: void *ptr;
231: struct timespec now;
232:
233: if (!root || !func)
234: return NULL;
235:
236: /* get new task */
237: if (!(task = _sched_useTask(root)))
238: return NULL;
239:
240: memset(task, 0, sizeof(sched_task_t));
241: task->task_id = 0;
242: task->task_lock = 0;
243: task->task_func = func;
244: TASK_TYPE(task) = taskTIMER;
245: TASK_ROOT(task) = root;
246:
247: TASK_ARG(task) = arg;
248:
249: TASK_DATA(task) = opt_data;
250: TASK_DATLEN(task) = opt_dlen;
251:
252: /* calculate timeval structure */
253: clock_gettime(CLOCK_MONOTONIC, &now);
254: now.tv_sec += ts.tv_sec;
255: now.tv_nsec += ts.tv_nsec;
256: if (now.tv_nsec >= 1000000000L) {
257: now.tv_sec++;
258: now.tv_nsec -= 1000000000L;
259: } else if (now.tv_nsec < 0) {
260: now.tv_sec--;
261: now.tv_nsec += 1000000000L;
262: }
263: TASK_TS(task) = now;
264:
265: if (root->root_hooks.hook_add.timer)
266: ptr = root->root_hooks.hook_add.timer(task, NULL);
267: else
268: ptr = NULL;
269:
270: if (!ptr) {
271: #ifdef HAVE_LIBPTHREAD
272: pthread_mutex_lock(&root->root_mtx[taskTIMER]);
273: #endif
274: #ifdef TIMER_WITHOUT_SORT
275: TAILQ_INSERT_TAIL(&root->root_timer, task, task_node);
276: #else
277: TAILQ_FOREACH(t, &root->root_timer, task_node)
278: if (sched_timespeccmp(&TASK_TS(task), &TASK_TS(t), -) < 1)
279: break;
280: if (!t)
281: TAILQ_INSERT_TAIL(&root->root_timer, task, task_node);
282: else
283: TAILQ_INSERT_BEFORE(t, task, task_node);
284: #endif
285: #ifdef HAVE_LIBPTHREAD
286: pthread_mutex_unlock(&root->root_mtx[taskTIMER]);
287: #endif
288: } else
289: task = _sched_unuseTask(task);
290:
291: return task;
292: }
293:
294: /*
295: * schedEvent() - Add EVENT task to scheduler queue
296: *
297: * @root = root task
298: * @func = task execution function
299: * @arg = 1st func argument
300: * @val = additional func argument
301: * @opt_data = Optional data
302: * @opt_dlen = Optional data length
303: * return: NULL error or !=NULL new queued task
304: */
305: sched_task_t *
306: schedEvent(sched_root_task_t * __restrict root, sched_task_func_t func, void *arg, u_long val,
307: void *opt_data, size_t opt_dlen)
308: {
309: sched_task_t *task;
310: void *ptr;
311:
312: if (!root || !func)
313: return NULL;
314:
315: /* get new task */
316: if (!(task = _sched_useTask(root)))
317: return NULL;
318:
319: memset(task, 0, sizeof(sched_task_t));
320: task->task_id = 0;
321: task->task_lock = 0;
322: task->task_func = func;
323: TASK_TYPE(task) = taskEVENT;
324: TASK_ROOT(task) = root;
325:
326: TASK_ARG(task) = arg;
327: TASK_VAL(task) = val;
328:
329: TASK_DATA(task) = opt_data;
330: TASK_DATLEN(task) = opt_dlen;
331:
332: if (root->root_hooks.hook_add.event)
333: ptr = root->root_hooks.hook_add.event(task, NULL);
334: else
335: ptr = NULL;
336:
337: if (!ptr) {
338: #ifdef HAVE_LIBPTHREAD
339: pthread_mutex_lock(&root->root_mtx[taskEVENT]);
340: #endif
341: TAILQ_INSERT_TAIL(&root->root_event, task, task_node);
342: #ifdef HAVE_LIBPTHREAD
343: pthread_mutex_unlock(&root->root_mtx[taskEVENT]);
344: #endif
345: } else
346: task = _sched_unuseTask(task);
347:
348: return task;
349: }
350:
351:
352: /*
353: * schedEventLo() - Add EVENT_Lo task to scheduler queue
354: *
355: * @root = root task
356: * @func = task execution function
357: * @arg = 1st func argument
358: * @val = additional func argument
359: * @opt_data = Optional data
360: * @opt_dlen = Optional data length
361: * return: NULL error or !=NULL new queued task
362: */
363: sched_task_t *
364: schedEventLo(sched_root_task_t * __restrict root, sched_task_func_t func, void *arg, u_long val,
365: void *opt_data, size_t opt_dlen)
366: {
367: sched_task_t *task;
368: void *ptr;
369:
370: if (!root || !func)
371: return NULL;
372:
373: /* get new task */
374: if (!(task = _sched_useTask(root)))
375: return NULL;
376:
377: memset(task, 0, sizeof(sched_task_t));
378: task->task_id = 0;
379: task->task_lock = 0;
380: task->task_func = func;
381: TASK_TYPE(task) = taskEVENT;
382: TASK_ROOT(task) = root;
383:
384: TASK_ARG(task) = arg;
385: TASK_VAL(task) = val;
386:
387: TASK_DATA(task) = opt_data;
388: TASK_DATLEN(task) = opt_dlen;
389:
390: if (root->root_hooks.hook_add.eventlo)
391: ptr = root->root_hooks.hook_add.eventlo(task, NULL);
392: else
393: ptr = NULL;
394:
395: if (!ptr) {
396: #ifdef HAVE_LIBPTHREAD
397: pthread_mutex_lock(&root->root_mtx[taskEVENTLO]);
398: #endif
399: TAILQ_INSERT_TAIL(&root->root_eventlo, task, task_node);
400: #ifdef HAVE_LIBPTHREAD
401: pthread_mutex_unlock(&root->root_mtx[taskEVENTLO]);
402: #endif
403: } else
404: task = _sched_unuseTask(task);
405:
406: return task;
407: }
408:
409: /*
410: * schedCallOnce() - Call once from scheduler
411: *
412: * @root = root task
413: * @func = task execution function
414: * @arg = 1st func argument
415: * @val = additional func argument
416: * @opt_data = Optional data
417: * @opt_dlen = Optional data length
418: * return: return value from called func
419: */
420: sched_task_t *
421: schedCallOnce(sched_root_task_t * __restrict root, sched_task_func_t func, void *arg, u_long val,
422: void *opt_data, size_t opt_dlen)
423: {
424: sched_task_t *task;
425: void *ret;
426:
427: if (!root || !func)
428: return NULL;
429:
430: /* get new task */
431: if (!(task = _sched_useTask(root)))
432: return NULL;
433:
434: memset(task, 0, sizeof(sched_task_t));
435: task->task_id = 0;
436: task->task_lock = 0;
437: task->task_func = func;
438: TASK_TYPE(task) = taskEVENT;
439: TASK_ROOT(task) = root;
440:
441: TASK_ARG(task) = arg;
442: TASK_VAL(task) = val;
443:
444: TASK_DATA(task) = opt_data;
445: TASK_DATLEN(task) = opt_dlen;
446:
447: ret = schedCall(task);
448:
449: _sched_unuseTask(task);
450: return ret;
451: }
FreeBSD-CVSweb <freebsd-cvsweb@FreeBSD.org>