1: /*************************************************************************
2: * (C) 2011 AITNET ltd - Sofia/Bulgaria - <misho@aitbg.com>
3: * by Michael Pounov <misho@openbsd-bg.org>
4: *
5: * $Author: misho $
6: * $Id: tasks.c,v 1.4.2.1 2012/01/08 03:50:11 misho Exp $
7: *
8: **************************************************************************
9: The ELWIX and AITNET software is distributed under the following
10: terms:
11:
12: All of the documentation and software included in the ELWIX and AITNET
13: Releases is copyrighted by ELWIX - Sofia/Bulgaria <info@elwix.org>
14:
15: Copyright 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011
16: by Michael Pounov <misho@elwix.org>. All rights reserved.
17:
18: Redistribution and use in source and binary forms, with or without
19: modification, are permitted provided that the following conditions
20: are met:
21: 1. Redistributions of source code must retain the above copyright
22: notice, this list of conditions and the following disclaimer.
23: 2. Redistributions in binary form must reproduce the above copyright
24: notice, this list of conditions and the following disclaimer in the
25: documentation and/or other materials provided with the distribution.
26: 3. All advertising materials mentioning features or use of this software
27: must display the following acknowledgement:
28: This product includes software developed by Michael Pounov <misho@elwix.org>
29: ELWIX - Embedded LightWeight unIX and its contributors.
30: 4. Neither the name of AITNET nor the names of its contributors
31: may be used to endorse or promote products derived from this software
32: without specific prior written permission.
33:
34: THIS SOFTWARE IS PROVIDED BY AITNET AND CONTRIBUTORS ``AS IS'' AND
35: ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
36: IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
37: ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
38: FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
39: DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
40: OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
41: HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
42: LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
43: OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
44: SUCH DAMAGE.
45: */
46: #include "global.h"
47:
48:
49: #pragma GCC visibility push(hidden)
50:
51: inline sched_task_t *
52: _sched_useTask(sched_root_task_t * __restrict root)
53: {
54: sched_task_t *task;
55:
56: TAILQ_FOREACH(task, &root->root_unuse, task_node) {
57: if (!TASK_ISLOCKED(task)) {
58: #ifdef HAVE_LIBPTHREAD
59: pthread_mutex_lock(&root->root_mtx[taskUNUSE]);
60: #endif
61: TAILQ_REMOVE(&root->root_unuse, task, task_node);
62: #ifdef HAVE_LIBPTHREAD
63: pthread_mutex_unlock(&root->root_mtx[taskUNUSE]);
64: #endif
65: break;
66: }
67: }
68:
69: if (!task) {
70: task = malloc(sizeof(sched_task_t));
71: if (!task) {
72: LOGERR;
73: return NULL;
74: }
75: }
76:
77: return task;
78: }
79:
80: inline sched_task_t *
81: _sched_unuseTask(sched_task_t * __restrict task)
82: {
83: TASK_UNLOCK(task);
84: TASK_TYPE(task) = taskUNUSE;
85: #ifdef HAVE_LIBPTHREAD
86: pthread_mutex_lock(&TASK_ROOT(task)->root_mtx[taskUNUSE]);
87: #endif
88: TAILQ_INSERT_TAIL(&TASK_ROOT(task)->root_unuse, task, task_node);
89: #ifdef HAVE_LIBPTHREAD
90: pthread_mutex_unlock(&TASK_ROOT(task)->root_mtx[taskUNUSE]);
91: #endif
92: task = NULL;
93:
94: return task;
95: }
96:
97: #pragma GCC visibility pop
98:
99:
100: /*
101: * schedRead() - Add READ I/O task to scheduler queue
102: * @root = root task
103: * @func = task execution function
104: * @arg = 1st func argument
105: * @fd = fd handle
106: * return: NULL error or !=NULL new queued task
107: */
108: sched_task_t *
109: schedRead(sched_root_task_t * __restrict root, sched_task_func_t func, void *arg, int fd)
110: {
111: sched_task_t *task;
112: void *ptr;
113:
114: if (!root || !func)
115: return NULL;
116:
117: /* get new task */
118: if (!(task = _sched_useTask(root)))
119: return NULL;
120:
121: memset(task, 0, sizeof(sched_task_t));
122: task->task_id = 0;
123: task->task_lock = 0;
124: task->task_func = func;
125: TASK_TYPE(task) = taskREAD;
126: TASK_ROOT(task) = root;
127:
128: TASK_ARG(task) = arg;
129: TASK_FD(task) = fd;
130:
131: if (root->root_hooks.hook_add.read)
132: ptr = root->root_hooks.hook_add.read(task, NULL);
133: else
134: ptr = NULL;
135:
136: if (!ptr) {
137: #ifdef HAVE_LIBPTHREAD
138: pthread_mutex_lock(&root->root_mtx[taskREAD]);
139: #endif
140: TAILQ_INSERT_TAIL(&root->root_read, task, task_node);
141: #ifdef HAVE_LIBPTHREAD
142: pthread_mutex_unlock(&root->root_mtx[taskREAD]);
143: #endif
144: } else
145: task = _sched_unuseTask(task);
146:
147: return task;
148: }
149:
150: /*
151: * schedWrite() - Add WRITE I/O task to scheduler queue
152: * @root = root task
153: * @func = task execution function
154: * @arg = 1st func argument
155: * @fd = fd handle
156: * return: NULL error or !=NULL new queued task
157: */
158: sched_task_t *
159: schedWrite(sched_root_task_t * __restrict root, sched_task_func_t func, void *arg, int fd)
160: {
161: sched_task_t *task;
162: void *ptr;
163:
164: if (!root || !func)
165: return NULL;
166:
167: /* get new task */
168: if (!(task = _sched_useTask(root)))
169: return NULL;
170:
171: memset(task, 0, sizeof(sched_task_t));
172: task->task_id = 0;
173: task->task_lock = 0;
174: task->task_func = func;
175: TASK_TYPE(task) = taskWRITE;
176: TASK_ROOT(task) = root;
177:
178: TASK_ARG(task) = arg;
179: TASK_FD(task) = fd;
180:
181: if (root->root_hooks.hook_add.write)
182: ptr = root->root_hooks.hook_add.write(task, NULL);
183: else
184: ptr = NULL;
185:
186: if (!ptr) {
187: #ifdef HAVE_LIBPTHREAD
188: pthread_mutex_lock(&root->root_mtx[taskWRITE]);
189: #endif
190: TAILQ_INSERT_TAIL(&root->root_write, task, task_node);
191: #ifdef HAVE_LIBPTHREAD
192: pthread_mutex_unlock(&root->root_mtx[taskWRITE]);
193: #endif
194: } else
195: task = _sched_unuseTask(task);
196:
197: return task;
198: }
199:
200: /*
201: * schedTimer() - Add TIMER task to scheduler queue
202: * @root = root task
203: * @func = task execution function
204: * @arg = 1st func argument
205: * @tv = timeout argument structure
206: * return: NULL error or !=NULL new queued task
207: */
208: sched_task_t *
209: schedTimer(sched_root_task_t * __restrict root, sched_task_func_t func, void *arg, struct timeval tv)
210: {
211: sched_task_t *task, *t = NULL;
212: void *ptr;
213: struct timeval now;
214: struct timespec nw;
215:
216: if (!root || !func)
217: return NULL;
218:
219: /* get new task */
220: if (!(task = _sched_useTask(root)))
221: return NULL;
222:
223: memset(task, 0, sizeof(sched_task_t));
224: task->task_id = 0;
225: task->task_lock = 0;
226: task->task_func = func;
227: TASK_TYPE(task) = taskTIMER;
228: TASK_ROOT(task) = root;
229:
230: TASK_ARG(task) = arg;
231:
232: /* calculate timeval structure */
233: clock_gettime(CLOCK_MONOTONIC, &nw);
234: now.tv_sec = nw.tv_sec + tv.tv_sec;
235: now.tv_usec = nw.tv_nsec / 1000 + tv.tv_usec;
236: if (now.tv_usec >= 1000000) {
237: now.tv_sec++;
238: now.tv_usec -= 1000000;
239: } else if (now.tv_usec < 0) {
240: now.tv_sec--;
241: now.tv_usec += 1000000;
242: }
243: TASK_TV(task) = now;
244:
245: if (root->root_hooks.hook_add.timer)
246: ptr = root->root_hooks.hook_add.timer(task, NULL);
247: else
248: ptr = NULL;
249:
250: if (!ptr) {
251: #ifdef HAVE_LIBPTHREAD
252: pthread_mutex_lock(&root->root_mtx[taskTIMER]);
253: #endif
254: #ifdef TIMER_WITHOUT_SORT
255: TAILQ_INSERT_TAIL(&root->root_timer, task, task_node);
256: #else
257: TAILQ_FOREACH(t, &root->root_timer, task_node)
258: if (timercmp(&TASK_TV(task), &TASK_TV(t), -) < 1)
259: break;
260: if (!t)
261: TAILQ_INSERT_TAIL(&root->root_timer, task, task_node);
262: else
263: TAILQ_INSERT_BEFORE(t, task, task_node);
264: #endif
265: #ifdef HAVE_LIBPTHREAD
266: pthread_mutex_unlock(&root->root_mtx[taskTIMER]);
267: #endif
268: } else
269: task = _sched_unuseTask(task);
270:
271: return task;
272: }
273:
274: /*
275: * schedEvent() - Add EVENT task to scheduler queue
276: * @root = root task
277: * @func = task execution function
278: * @arg = 1st func argument
279: * @val = additional func argument
280: * return: NULL error or !=NULL new queued task
281: */
282: sched_task_t *
283: schedEvent(sched_root_task_t * __restrict root, sched_task_func_t func, void *arg, u_long val)
284: {
285: sched_task_t *task;
286: void *ptr;
287:
288: if (!root || !func)
289: return NULL;
290:
291: /* get new task */
292: if (!(task = _sched_useTask(root)))
293: return NULL;
294:
295: memset(task, 0, sizeof(sched_task_t));
296: task->task_id = 0;
297: task->task_lock = 0;
298: task->task_func = func;
299: TASK_TYPE(task) = taskEVENT;
300: TASK_ROOT(task) = root;
301:
302: TASK_ARG(task) = arg;
303: TASK_VAL(task) = val;
304:
305: if (root->root_hooks.hook_add.event)
306: ptr = root->root_hooks.hook_add.event(task, NULL);
307: else
308: ptr = NULL;
309:
310: if (!ptr) {
311: #ifdef HAVE_LIBPTHREAD
312: pthread_mutex_lock(&root->root_mtx[taskEVENT]);
313: #endif
314: TAILQ_INSERT_TAIL(&root->root_event, task, task_node);
315: #ifdef HAVE_LIBPTHREAD
316: pthread_mutex_unlock(&root->root_mtx[taskEVENT]);
317: #endif
318: } else
319: task = _sched_unuseTask(task);
320:
321: return task;
322: }
323:
324:
325: /*
326: * schedEventLo() - Add EVENT_Lo task to scheduler queue
327: * @root = root task
328: * @func = task execution function
329: * @arg = 1st func argument
330: * @val = additional func argument
331: * return: NULL error or !=NULL new queued task
332: */
333: sched_task_t *
334: schedEventLo(sched_root_task_t * __restrict root, sched_task_func_t func, void *arg, u_long val)
335: {
336: sched_task_t *task;
337: void *ptr;
338:
339: if (!root || !func)
340: return NULL;
341:
342: /* get new task */
343: if (!(task = _sched_useTask(root)))
344: return NULL;
345:
346: memset(task, 0, sizeof(sched_task_t));
347: task->task_id = 0;
348: task->task_lock = 0;
349: task->task_func = func;
350: TASK_TYPE(task) = taskEVENT;
351: TASK_ROOT(task) = root;
352:
353: TASK_ARG(task) = arg;
354: TASK_VAL(task) = val;
355:
356: if (root->root_hooks.hook_add.eventlo)
357: ptr = root->root_hooks.hook_add.eventlo(task, NULL);
358: else
359: ptr = NULL;
360:
361: if (!ptr) {
362: #ifdef HAVE_LIBPTHREAD
363: pthread_mutex_lock(&root->root_mtx[taskEVENTLO]);
364: #endif
365: TAILQ_INSERT_TAIL(&root->root_eventlo, task, task_node);
366: #ifdef HAVE_LIBPTHREAD
367: pthread_mutex_unlock(&root->root_mtx[taskEVENTLO]);
368: #endif
369: } else
370: task = _sched_unuseTask(task);
371:
372: return task;
373: }
374:
375: /*
376: * schedCallOnce() - Call once from scheduler
377: * @root = root task
378: * @func = task execution function
379: * @arg = 1st func argument
380: * @val = additional func argument
381: * return: return value from called func
382: */
383: sched_task_t *
384: schedCallOnce(sched_root_task_t * __restrict root, sched_task_func_t func, void *arg, u_long val)
385: {
386: sched_task_t *task;
387: void *ret;
388:
389: if (!root || !func)
390: return NULL;
391:
392: /* get new task */
393: if (!(task = _sched_useTask(root)))
394: return NULL;
395:
396: memset(task, 0, sizeof(sched_task_t));
397: task->task_id = 0;
398: task->task_lock = 0;
399: task->task_func = func;
400: TASK_TYPE(task) = taskEVENT;
401: TASK_ROOT(task) = root;
402:
403: TASK_ARG(task) = arg;
404: TASK_VAL(task) = val;
405:
406: ret = schedCall(task);
407:
408: _sched_unuseTask(task);
409: return ret;
410: }
FreeBSD-CVSweb <freebsd-cvsweb@FreeBSD.org>