1: /*************************************************************************
2: * (C) 2011 AITNET ltd - Sofia/Bulgaria - <misho@aitbg.com>
3: * by Michael Pounov <misho@openbsd-bg.org>
4: *
5: * $Author: misho $
6: * $Id: hooks.c,v 1.3.4.1 2012/01/08 03:28:26 misho Exp $
7: *
8: **************************************************************************
9: The ELWIX and AITNET software is distributed under the following
10: terms:
11:
12: All of the documentation and software included in the ELWIX and AITNET
13: Releases is copyrighted by ELWIX - Sofia/Bulgaria <info@elwix.org>
14:
15: Copyright 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011
16: by Michael Pounov <misho@elwix.org>. All rights reserved.
17:
18: Redistribution and use in source and binary forms, with or without
19: modification, are permitted provided that the following conditions
20: are met:
21: 1. Redistributions of source code must retain the above copyright
22: notice, this list of conditions and the following disclaimer.
23: 2. Redistributions in binary form must reproduce the above copyright
24: notice, this list of conditions and the following disclaimer in the
25: documentation and/or other materials provided with the distribution.
26: 3. All advertising materials mentioning features or use of this software
27: must display the following acknowledgement:
28: This product includes software developed by Michael Pounov <misho@elwix.org>
29: ELWIX - Embedded LightWeight unIX and its contributors.
30: 4. Neither the name of AITNET nor the names of its contributors
31: may be used to endorse or promote products derived from this software
32: without specific prior written permission.
33:
34: THIS SOFTWARE IS PROVIDED BY AITNET AND CONTRIBUTORS ``AS IS'' AND
35: ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
36: IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
37: ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
38: FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
39: DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
40: OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
41: HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
42: LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
43: OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
44: SUCH DAMAGE.
45: */
46: #include "global.h"
47: #include "hooks.h"
48:
49:
50: /*
51: * sched_hook_init() - Default INIT hook
52: * @root = root task
53: * @data = optional data if !=NULL
54: * return: <0 errors and 0 ok
55: */
56: void *
57: sched_hook_init(void *root, void *data)
58: {
59: sched_root_task_t *r = root;
60:
61: if (!r || r->root_data.iov_base || r->root_data.iov_len)
62: return (void*) -1;
63:
64: r->root_data.iov_base = malloc(sizeof(struct sched_IO));
65: if (!r->root_data.iov_base) {
66: LOGERR;
67: return (void*) -1;
68: } else {
69: r->root_data.iov_len = sizeof(struct sched_IO);
70: memset(r->root_data.iov_base, 0, r->root_data.iov_len);
71: }
72:
73: r->root_kq = kqueue();
74: if (r->root_kq == -1) {
75: LOGERR;
76: return (void*) -1;
77: }
78:
79: return NULL;
80: }
81:
82: /*
83: * sched_hook_fini() - Default FINI hook
84: * @root = root task
85: * @arg = unused
86: * return: <0 errors and 0 ok
87: */
88: void *
89: sched_hook_fini(void *root, void *arg __unused)
90: {
91: sched_root_task_t *r = root;
92:
93: if (!r)
94: return (void*) -1;
95:
96: if (r->root_kq > 2) {
97: close(r->root_kq);
98: r->root_kq = 0;
99: }
100:
101: if (r->root_data.iov_base && r->root_data.iov_len) {
102: free(r->root_data.iov_base);
103: r->root_data.iov_base = NULL;
104: r->root_data.iov_len = 0;
105: }
106:
107: return NULL;
108: }
109:
110: /*
111: * sched_hook_cancel() - Default CANCEL hook
112: * @task = current task
113: * @arg = unused
114: * return: <0 errors and 0 ok
115: */
116: void *
117: sched_hook_cancel(void *task, void *arg __unused)
118: {
119: struct sched_IO *io;
120: sched_task_t *t = task;
121: struct kevent chg[1];
122: struct timespec timeout = { 0, 0 };
123:
124: if (!t || !TASK_ROOT(t) || !ROOT_DATA(t->task_root) || !ROOT_DATLEN(t->task_root))
125: return (void*) -1;
126: else
127: io = ROOT_DATA(t->task_root);
128:
129: switch (TASK_TYPE(t)) {
130: case taskREAD:
131: #ifdef __NetBSD__
132: EV_SET(&chg[0], TASK_FD(t), EVFILT_READ, EV_DELETE, 0, 0, (intptr_t) TASK_FD(t));
133: #else
134: EV_SET(&chg[0], TASK_FD(t), EVFILT_READ, EV_DELETE, 0, 0, (void*) TASK_FD(t));
135: #endif
136: kevent(TASK_ROOT(t)->root_kq, chg, 1, NULL, 0, &timeout);
137: FD_CLR(TASK_FD(t), &io->rfd);
138: break;
139: case taskWRITE:
140: #ifdef __NetBSD__
141: EV_SET(&chg[0], TASK_FD(t), EVFILT_WRITE, EV_DELETE, 0, 0, (intptr_t) TASK_FD(t));
142: #else
143: EV_SET(&chg[0], TASK_FD(t), EVFILT_WRITE, EV_DELETE, 0, 0, (void*) TASK_FD(t));
144: #endif
145: kevent(TASK_ROOT(t)->root_kq, chg, 1, NULL, 0, &timeout);
146: FD_CLR(TASK_FD(t), &io->wfd);
147: break;
148: default:
149: break;
150: }
151:
152: return NULL;
153: }
154:
155: /*
156: * sched_hook_read() - Default READ hook
157: * @task = current task
158: * @arg = unused
159: * return: <0 errors and 0 ok
160: */
161: void *
162: sched_hook_read(void *task, void *arg __unused)
163: {
164: struct sched_IO *io;
165: sched_task_t *t = task;
166: struct kevent chg[1];
167: struct timespec timeout = { 0, 0 };
168:
169: if (!t || !TASK_ROOT(t) || !ROOT_DATA(t->task_root) || !ROOT_DATLEN(t->task_root))
170: return (void*) -1;
171: else
172: io = ROOT_DATA(t->task_root);
173:
174: if (FD_ISSET(TASK_FD(t), &io->rfd))
175: return NULL;
176: else
177: FD_SET(TASK_FD(t), &io->rfd);
178:
179: #ifdef __NetBSD__
180: EV_SET(&chg[0], TASK_FD(t), EVFILT_READ, EV_ADD, 0, 0, (intptr_t) TASK_FD(t));
181: #else
182: EV_SET(&chg[0], TASK_FD(t), EVFILT_READ, EV_ADD, 0, 0, (void*) TASK_FD(t));
183: #endif
184: if (kevent(TASK_ROOT(t)->root_kq, chg, 1, NULL, 0, &timeout) == -1) {
185: if (TASK_ROOT(t)->root_hooks.hook_exec.exception)
186: TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL);
187: else
188: LOGERR;
189: return (void*) -1;
190: }
191:
192: return NULL;
193: }
194:
195: /*
196: * sched_hook_write() - Default WRITE hook
197: * @task = current task
198: * @arg = unused
199: * return: <0 errors and 0 ok
200: */
201: void *
202: sched_hook_write(void *task, void *arg __unused)
203: {
204: struct sched_IO *io;
205: sched_task_t *t = task;
206: struct kevent chg[1];
207: struct timespec timeout = { 0, 0 };
208:
209: if (!t || !TASK_ROOT(t) || !ROOT_DATA(t->task_root) || !ROOT_DATLEN(t->task_root))
210: return (void*) -1;
211: else
212: io = ROOT_DATA(t->task_root);
213:
214: if (FD_ISSET(TASK_FD(t), &io->wfd))
215: return NULL;
216: else
217: FD_SET(TASK_FD(t), &io->wfd);
218:
219: #ifdef __NetBSD__
220: EV_SET(&chg[0], TASK_FD(t), EVFILT_WRITE, EV_ADD, 0, 0, (intptr_t) TASK_FD(t));
221: #else
222: EV_SET(&chg[0], TASK_FD(t), EVFILT_WRITE, EV_ADD, 0, 0, (void*) TASK_FD(t));
223: #endif
224: if (kevent(TASK_ROOT(t)->root_kq, chg, 1, NULL, 0, &timeout) == -1) {
225: if (TASK_ROOT(t)->root_hooks.hook_exec.exception)
226: TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL);
227: else
228: LOGERR;
229: return (void*) -1;
230: }
231:
232: return NULL;
233: }
234:
235: /*
236: * sched_hook_fetch() - Default FETCH hook
237: * @root = root task
238: * @arg = unused
239: * return: NULL error or !=NULL fetched task
240: */
241: void *
242: sched_hook_fetch(void *root, void *arg __unused)
243: {
244: struct sched_IO *io;
245: sched_root_task_t *r = root;
246: sched_task_t *task;
247: struct timeval now, m, mtmp;
248: struct timespec nw, *timeout;
249: struct kevent evt[1], res[KQ_EVENTS];
250: register int i;
251: int en;
252:
253: if (!r || !ROOT_DATA(r) || !ROOT_DATLEN(r))
254: return NULL;
255:
256: /* get new task by queue priority */
257: retry:
258: while ((task = TAILQ_FIRST(&r->root_event))) {
259: #ifdef HAVE_LIBPTHREAD
260: pthread_mutex_lock(&r->root_mtx[taskEVENT]);
261: #endif
262: TAILQ_REMOVE(&r->root_event, task, task_node);
263: #ifdef HAVE_LIBPTHREAD
264: pthread_mutex_unlock(&r->root_mtx[taskEVENT]);
265: #endif
266: task->task_type = taskUNUSE;
267: #ifdef HAVE_LIBPTHREAD
268: pthread_mutex_lock(&r->root_mtx[taskUNUSE]);
269: #endif
270: TAILQ_INSERT_TAIL(&r->root_unuse, task, task_node);
271: #ifdef HAVE_LIBPTHREAD
272: pthread_mutex_unlock(&r->root_mtx[taskUNUSE]);
273: #endif
274: return task;
275: }
276: while ((task = TAILQ_FIRST(&r->root_ready))) {
277: #ifdef HAVE_LIBPTHREAD
278: pthread_mutex_lock(&r->root_mtx[taskREADY]);
279: #endif
280: TAILQ_REMOVE(&r->root_ready, task, task_node);
281: #ifdef HAVE_LIBPTHREAD
282: pthread_mutex_unlock(&r->root_mtx[taskREADY]);
283: #endif
284: task->task_type = taskUNUSE;
285: #ifdef HAVE_LIBPTHREAD
286: pthread_mutex_lock(&r->root_mtx[taskUNUSE]);
287: #endif
288: TAILQ_INSERT_TAIL(&r->root_unuse, task, task_node);
289: #ifdef HAVE_LIBPTHREAD
290: pthread_mutex_unlock(&r->root_mtx[taskUNUSE]);
291: #endif
292: return task;
293: }
294:
295: #ifdef TIMER_WITHOUT_SORT
296: clock_gettime(CLOCK_MONOTONIC, &nw);
297: now.tv_sec = nw.tv_sec;
298: now.tv_usec = nw.tv_nsec / 1000;
299:
300: timerclear(&r->root_wait);
301: TAILQ_FOREACH(task, &r->root_timer, task_node) {
302: if (!timerisset(&r->root_wait))
303: r->root_wait = TASK_TV(task);
304: else if (timercmp(&TASK_TV(task), &r->root_wait, -) < 0)
305: r->root_wait = TASK_TV(task);
306: }
307:
308: if (TAILQ_FIRST(&r->root_timer)) {
309: m = r->root_wait;
310: timersub(&m, &now, &mtmp);
311: r->root_wait = mtmp;
312: } else {
313: /* set wait INFTIM */
314: r->root_wait.tv_sec = r->root_wait.tv_usec = -1;
315: }
316: #else
317: if (!TAILQ_FIRST(&r->root_eventlo) && (task = TAILQ_FIRST(&r->root_timer))) {
318: clock_gettime(CLOCK_MONOTONIC, &nw);
319: now.tv_sec = nw.tv_sec;
320: now.tv_usec = nw.tv_nsec / 1000;
321:
322: m = TASK_TV(task);
323: timersub(&m, &now, &mtmp);
324: r->root_wait = mtmp;
325: } else {
326: /* set wait INFTIM */
327: r->root_wait.tv_sec = r->root_wait.tv_usec = -1;
328: }
329: #endif
330: /* if present member of eventLo, set NOWAIT */
331: if (TAILQ_FIRST(&r->root_eventlo))
332: timerclear(&r->root_wait);
333:
334: if (r->root_wait.tv_sec != -1 && r->root_wait.tv_usec != -1) {
335: nw.tv_sec = r->root_wait.tv_sec;
336: nw.tv_nsec = r->root_wait.tv_usec * 1000;
337: timeout = &nw;
338: } else /* wait INFTIM */
339: timeout = NULL;
340: if ((en = kevent(r->root_kq, NULL, 0, res, KQ_EVENTS, timeout)) == -1) {
341: if (r->root_hooks.hook_exec.exception) {
342: if (r->root_hooks.hook_exec.exception(r, NULL))
343: return NULL;
344: } else
345: LOGERR;
346: #ifdef NDEBUG
347: /* kevent no exit by error, if non-debug version */
348: goto retry;
349: #else
350: /* diagnostic exit from scheduler if kevent error occur */
351: return NULL;
352: #endif
353: }
354:
355: nw.tv_sec = nw.tv_nsec = 0;
356: /* Go and catch the cat into pipes ... */
357: for (i = 0; i < en; i++) {
358: memcpy(evt, &res[i], sizeof evt);
359: evt->flags = EV_DELETE;
360: /* Put read/write task to ready queue */
361: switch (res[i].filter) {
362: case EVFILT_READ:
363: TAILQ_FOREACH(task, &r->root_read, task_node) {
364: if (TASK_FD(task) != ((intptr_t) res[i].udata))
365: continue;
366: /* remove read handle */
367: io = ROOT_DATA(task->task_root);
368: FD_CLR(TASK_FD(task), &io->rfd);
369:
370: #ifdef HAVE_LIBPTHREAD
371: pthread_mutex_lock(&r->root_mtx[taskREAD]);
372: #endif
373: TAILQ_REMOVE(&r->root_read, task, task_node);
374: #ifdef HAVE_LIBPTHREAD
375: pthread_mutex_unlock(&r->root_mtx[taskREAD]);
376: #endif
377: if (r->root_hooks.hook_exec.exception && res[i].flags & EV_EOF) {
378: if (r->root_hooks.hook_exec.exception(r, (void*) EV_EOF)) {
379: task->task_type = taskUNUSE;
380: #ifdef HAVE_LIBPTHREAD
381: pthread_mutex_lock(&r->root_mtx[taskUNUSE]);
382: #endif
383: TAILQ_INSERT_TAIL(&r->root_unuse, task, task_node);
384: #ifdef HAVE_LIBPTHREAD
385: pthread_mutex_unlock(&r->root_mtx[taskUNUSE]);
386: #endif
387: } else {
388: task->task_type = taskREADY;
389: #ifdef HAVE_LIBPTHREAD
390: pthread_mutex_lock(&r->root_mtx[taskREADY]);
391: #endif
392: TAILQ_INSERT_TAIL(&r->root_ready, task, task_node);
393: #ifdef HAVE_LIBPTHREAD
394: pthread_mutex_unlock(&r->root_mtx[taskREADY]);
395: #endif
396: }
397: } else {
398: task->task_type = taskREADY;
399: #ifdef HAVE_LIBPTHREAD
400: pthread_mutex_lock(&r->root_mtx[taskREADY]);
401: #endif
402: TAILQ_INSERT_TAIL(&r->root_ready, task, task_node);
403: #ifdef HAVE_LIBPTHREAD
404: pthread_mutex_unlock(&r->root_mtx[taskREADY]);
405: #endif
406: }
407: break;
408: }
409: break;
410: case EVFILT_WRITE:
411: TAILQ_FOREACH(task, &r->root_write, task_node) {
412: if (TASK_FD(task) != ((intptr_t) res[i].udata))
413: continue;
414: /* remove write handle */
415: io = ROOT_DATA(task->task_root);
416: FD_CLR(TASK_FD(task), &io->wfd);
417:
418: #ifdef HAVE_LIBPTHREAD
419: pthread_mutex_lock(&r->root_mtx[taskWRITE]);
420: #endif
421: TAILQ_REMOVE(&r->root_write, task, task_node);
422: #ifdef HAVE_LIBPTHREAD
423: pthread_mutex_unlock(&r->root_mtx[taskWRITE]);
424: #endif
425: if (r->root_hooks.hook_exec.exception && res[i].flags & EV_EOF) {
426: if (r->root_hooks.hook_exec.exception(r, (void*) EV_EOF)) {
427: task->task_type = taskUNUSE;
428: #ifdef HAVE_LIBPTHREAD
429: pthread_mutex_lock(&r->root_mtx[taskUNUSE]);
430: #endif
431: TAILQ_INSERT_TAIL(&r->root_unuse, task, task_node);
432: #ifdef HAVE_LIBPTHREAD
433: pthread_mutex_unlock(&r->root_mtx[taskUNUSE]);
434: #endif
435: } else {
436: task->task_type = taskREADY;
437: #ifdef HAVE_LIBPTHREAD
438: pthread_mutex_lock(&r->root_mtx[taskREADY]);
439: #endif
440: TAILQ_INSERT_TAIL(&r->root_ready, task, task_node);
441: #ifdef HAVE_LIBPTHREAD
442: pthread_mutex_unlock(&r->root_mtx[taskREADY]);
443: #endif
444: }
445: } else {
446: task->task_type = taskREADY;
447: #ifdef HAVE_LIBPTHREAD
448: pthread_mutex_lock(&r->root_mtx[taskREADY]);
449: #endif
450: TAILQ_INSERT_TAIL(&r->root_ready, task, task_node);
451: #ifdef HAVE_LIBPTHREAD
452: pthread_mutex_unlock(&r->root_mtx[taskREADY]);
453: #endif
454: }
455: break;
456: }
457: break;
458: }
459: if (kevent(r->root_kq, evt, 1, NULL, 0, &nw) == -1) {
460: if (r->root_hooks.hook_exec.exception) {
461: if (r->root_hooks.hook_exec.exception(r, NULL))
462: return NULL;
463: } else
464: LOGERR;
465: }
466: }
467:
468: /* timer update & put in ready queue */
469: clock_gettime(CLOCK_MONOTONIC, &nw);
470: now.tv_sec = nw.tv_sec;
471: now.tv_usec = nw.tv_nsec / 1000;
472:
473: TAILQ_FOREACH(task, &r->root_timer, task_node)
474: if (timercmp(&now, &TASK_TV(task), -) >= 0) {
475: #ifdef HAVE_LIBPTHREAD
476: pthread_mutex_lock(&r->root_mtx[taskTIMER]);
477: #endif
478: TAILQ_REMOVE(&r->root_timer, task, task_node);
479: #ifdef HAVE_LIBPTHREAD
480: pthread_mutex_unlock(&r->root_mtx[taskTIMER]);
481: #endif
482: task->task_type = taskREADY;
483: #ifdef HAVE_LIBPTHREAD
484: pthread_mutex_lock(&r->root_mtx[taskREADY]);
485: #endif
486: TAILQ_INSERT_TAIL(&r->root_ready, task, task_node);
487: #ifdef HAVE_LIBPTHREAD
488: pthread_mutex_unlock(&r->root_mtx[taskREADY]);
489: #endif
490: }
491:
492: /* put eventlo priority task to ready queue, if there is no ready task or
493: reach max missed fetch-rotate */
494: if ((task = TAILQ_FIRST(&r->root_eventlo))) {
495: if (!TAILQ_FIRST(&r->root_ready) || r->root_eventlo_miss > MAX_EVENTLO_MISS) {
496: r->root_eventlo_miss = 0;
497:
498: #ifdef HAVE_LIBPTHREAD
499: pthread_mutex_lock(&r->root_mtx[taskEVENTLO]);
500: #endif
501: TAILQ_REMOVE(&r->root_eventlo, task, task_node);
502: #ifdef HAVE_LIBPTHREAD
503: pthread_mutex_unlock(&r->root_mtx[taskEVENTLO]);
504: #endif
505: task->task_type = taskREADY;
506: #ifdef HAVE_LIBPTHREAD
507: pthread_mutex_lock(&r->root_mtx[taskREADY]);
508: #endif
509: TAILQ_INSERT_TAIL(&r->root_ready, task, task_node);
510: #ifdef HAVE_LIBPTHREAD
511: pthread_mutex_unlock(&r->root_mtx[taskREADY]);
512: #endif
513: } else
514: r->root_eventlo_miss++;
515: } else
516: r->root_eventlo_miss = 0;
517:
518: /* OK, lets get ready task !!! */
519: if (!(task = TAILQ_FIRST(&r->root_ready)))
520: goto retry;
521: #ifdef HAVE_LIBPTHREAD
522: pthread_mutex_lock(&r->root_mtx[taskREADY]);
523: #endif
524: TAILQ_REMOVE(&r->root_ready, task, task_node);
525: #ifdef HAVE_LIBPTHREAD
526: pthread_mutex_unlock(&r->root_mtx[taskREADY]);
527: #endif
528: task->task_type = taskUNUSE;
529: #ifdef HAVE_LIBPTHREAD
530: pthread_mutex_lock(&r->root_mtx[taskUNUSE]);
531: #endif
532: TAILQ_INSERT_TAIL(&r->root_unuse, task, task_node);
533: #ifdef HAVE_LIBPTHREAD
534: pthread_mutex_unlock(&r->root_mtx[taskUNUSE]);
535: #endif
536: return task;
537: }
538:
539: /*
540: * sched_hook_exception() - Default EXCEPTION hook
541: * @root = root task
542: * @arg = custom handling: if arg == EV_EOF or other value; default: arg == NULL log errno
543: * return: <0 errors and 0 ok
544: */
545: void *
546: sched_hook_exception(void *root, void *arg)
547: {
548: sched_root_task_t *r = root;
549:
550: if (!r || !ROOT_DATA(r) || !ROOT_DATLEN(r))
551: return NULL;
552:
553: /* custom exception handling ... */
554: if (arg) {
555: if (arg == (void*) EV_EOF)
556: return NULL;
557: return (void*) -1; /* raise scheduler error!!! */
558: }
559:
560: /* if error hook exists */
561: if (r->root_hooks.hook_root.error)
562: return (r->root_hooks.hook_root.error(root, (void*) ((intptr_t) errno)));
563:
564: /* default case! */
565: LOGERR;
566: return NULL;
567: }
FreeBSD-CVSweb <freebsd-cvsweb@FreeBSD.org>