1: /*************************************************************************
2: * (C) 2011 AITNET ltd - Sofia/Bulgaria - <misho@aitbg.com>
3: * by Michael Pounov <misho@openbsd-bg.org>
4: *
5: * $Author: misho $
6: * $Id: hooks.c,v 1.27.2.5 2014/05/21 23:05:43 misho Exp $
7: *
8: **************************************************************************
9: The ELWIX and AITNET software is distributed under the following
10: terms:
11:
12: All of the documentation and software included in the ELWIX and AITNET
13: Releases is copyrighted by ELWIX - Sofia/Bulgaria <info@elwix.org>
14:
15: Copyright 2004 - 2014
16: by Michael Pounov <misho@elwix.org>. All rights reserved.
17:
18: Redistribution and use in source and binary forms, with or without
19: modification, are permitted provided that the following conditions
20: are met:
21: 1. Redistributions of source code must retain the above copyright
22: notice, this list of conditions and the following disclaimer.
23: 2. Redistributions in binary form must reproduce the above copyright
24: notice, this list of conditions and the following disclaimer in the
25: documentation and/or other materials provided with the distribution.
26: 3. All advertising materials mentioning features or use of this software
27: must display the following acknowledgement:
28: This product includes software developed by Michael Pounov <misho@elwix.org>
29: ELWIX - Embedded LightWeight unIX and its contributors.
30: 4. Neither the name of AITNET nor the names of its contributors
31: may be used to endorse or promote products derived from this software
32: without specific prior written permission.
33:
34: THIS SOFTWARE IS PROVIDED BY AITNET AND CONTRIBUTORS ``AS IS'' AND
35: ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
36: IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
37: ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
38: FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
39: DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
40: OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
41: HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
42: LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
43: OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
44: SUCH DAMAGE.
45: */
46: #include "global.h"
47: #include "hooks.h"
48:
49:
50: /*
51: * sched_hook_init() - Default INIT hook
52: *
53: * @root = root task
54: * @arg = unused
55: * return: <0 errors and 0 ok
56: */
57: void *
58: sched_hook_init(void *root, void *arg __unused)
59: {
60: sched_root_task_t *r = root;
61:
62: if (!r)
63: return (void*) -1;
64:
65: #if SUP_ENABLE == KQ_SUPPORT
66: r->root_kq = kqueue();
67: if (r->root_kq == -1) {
68: LOGERR;
69: return (void*) -1;
70: }
71: #elif SUP_ENABLE == EP_SUPPORT
72: r->root_kq = epoll_create(KQ_EVENTS);
73: if (r->root_kq == -1) {
74: LOGERR;
75: return (void*) -1;
76: }
77: #else
78: r->root_kq ^= r->root_kq;
79: FD_ZERO(&r->root_fds[0]);
80: FD_ZERO(&r->root_fds[1]);
81: #endif
82:
83: return NULL;
84: }
85:
86: /*
87: * sched_hook_fini() - Default FINI hook
88: *
89: * @root = root task
90: * @arg = unused
91: * return: <0 errors and 0 ok
92: */
93: void *
94: sched_hook_fini(void *root, void *arg __unused)
95: {
96: sched_root_task_t *r = root;
97:
98: if (!r)
99: return (void*) -1;
100:
101: #if SUP_ENABLE == KQ_SUPPORT || SUP_ENABLE == EP_SUPPORT
102: if (r->root_kq > 2) {
103: close(r->root_kq);
104: r->root_kq = 0;
105: }
106: #else
107: FD_ZERO(&r->root_fds[1]);
108: FD_ZERO(&r->root_fds[0]);
109: r->root_kq ^= r->root_kq;
110: #endif
111:
112: return NULL;
113: }
114:
115: /*
116: * sched_hook_cancel() - Default CANCEL hook
117: *
118: * @task = current task
119: * @arg = unused
120: * return: <0 errors and 0 ok
121: */
122: void *
123: sched_hook_cancel(void *task, void *arg __unused)
124: {
125: sched_task_t *t = task;
126: #if SUP_ENABLE == KQ_SUPPORT
127: struct kevent chg[1];
128: struct timespec timeout = { 0, 0 };
129: #else
130: sched_root_task_t *r = NULL;
131: register int i = -1;
132: #endif
133: #ifdef AIO_SUPPORT
134: struct aiocb *acb;
135: #ifdef EVFILT_LIO
136: register int i = 0;
137: struct aiocb **acbs;
138: #endif /* EVFILT_LIO */
139: #endif /* AIO_SUPPORT */
140:
141: if (!t || !TASK_ROOT(t))
142: return (void*) -1;
143: #if SUP_ENABLE != KQ_SUPPORT
144: r = TASK_ROOT(t);
145: #endif
146:
147: switch (TASK_TYPE(t)) {
148: case taskREAD:
149: #if SUP_ENABLE == KQ_SUPPORT
150: #ifdef __NetBSD__
151: EV_SET(&chg[0], TASK_FD(t), EVFILT_READ, EV_DELETE, 0, 0, (intptr_t) TASK_FD(t));
152: #else
153: EV_SET(&chg[0], TASK_FD(t), EVFILT_READ, EV_DELETE, 0, 0, (void*) TASK_FD(t));
154: #endif
155: #elif SUP_ENABLE == EP_SUPPORT
156: i = TASK_FD(t);
157: #else
158: FD_CLR(TASK_FD(t), &r->root_fds[0]);
159:
160: /* optimize select */
161: for (i = r->root_kq - 1; i > 2; i--)
162: if (FD_ISSET(i, &r->root_fds[0]) || FD_ISSET(i, &r->root_fds[1]))
163: break;
164: if (i > 2)
165: r->root_kq = i + 1;
166: #endif
167: break;
168: case taskWRITE:
169: #if SUP_ENABLE == KQ_SUPPORT
170: #ifdef __NetBSD__
171: EV_SET(&chg[0], TASK_FD(t), EVFILT_WRITE, EV_DELETE, 0, 0, (intptr_t) TASK_FD(t));
172: #else
173: EV_SET(&chg[0], TASK_FD(t), EVFILT_WRITE, EV_DELETE, 0, 0, (void*) TASK_FD(t));
174: #endif
175: #elif SUP_ENABLE == EP_SUPPORT
176: i = TASK_FD(t);
177: #else
178: FD_CLR(TASK_FD(t), &r->root_fds[1]);
179:
180: /* optimize select */
181: for (i = r->root_kq - 1; i > 2; i--)
182: if (FD_ISSET(i, &r->root_fds[0]) || FD_ISSET(i, &r->root_fds[1]))
183: break;
184: if (i > 2)
185: r->root_kq = i + 1;
186: #endif
187: break;
188: case taskALARM:
189: #if SUP_ENABLE == KQ_SUPPORT
190: #ifdef __NetBSD__
191: EV_SET(&chg[0], (uintptr_t) TASK_DATA(t), EVFILT_TIMER, EV_DELETE,
192: 0, 0, (intptr_t) TASK_DATA(t));
193: #else
194: EV_SET(&chg[0], (uintptr_t) TASK_DATA(t), EVFILT_TIMER, EV_DELETE,
195: 0, 0, (void*) TASK_DATA(t));
196: #endif
197: #endif
198: break;
199: case taskNODE:
200: #if SUP_ENABLE == KQ_SUPPORT
201: #ifdef __NetBSD__
202: EV_SET(&chg[0], TASK_FD(t), EVFILT_VNODE, EV_DELETE, 0, 0, (intptr_t) TASK_FD(t));
203: #else
204: EV_SET(&chg[0], TASK_FD(t), EVFILT_VNODE, EV_DELETE, 0, 0, (void*) TASK_FD(t));
205: #endif
206: #endif
207: break;
208: case taskPROC:
209: #if SUP_ENABLE == KQ_SUPPORT
210: #ifdef __NetBSD__
211: EV_SET(&chg[0], TASK_VAL(t), EVFILT_PROC, EV_DELETE, 0, 0, (intptr_t) TASK_VAL(t));
212: #else
213: EV_SET(&chg[0], TASK_VAL(t), EVFILT_PROC, EV_DELETE, 0, 0, (void*) TASK_VAL(t));
214: #endif
215: #endif
216: break;
217: case taskSIGNAL:
218: #if SUP_ENABLE == KQ_SUPPORT
219: #ifdef __NetBSD__
220: EV_SET(&chg[0], TASK_VAL(t), EVFILT_SIGNAL, EV_DELETE, 0, 0, (intptr_t) TASK_VAL(t));
221: #else
222: EV_SET(&chg[0], TASK_VAL(t), EVFILT_SIGNAL, EV_DELETE, 0, 0, (void*) TASK_VAL(t));
223: #endif
224: /* restore signal */
225: signal(TASK_VAL(t), SIG_DFL);
226: #endif
227: break;
228: #ifdef AIO_SUPPORT
229: case taskAIO:
230: #if SUP_ENABLE == KQ_SUPPORT
231: #ifdef __NetBSD__
232: EV_SET(&chg[0], TASK_VAL(t), EVFILT_AIO, EV_DELETE, 0, 0, (intptr_t) TASK_VAL(t));
233: #else
234: EV_SET(&chg[0], TASK_VAL(t), EVFILT_AIO, EV_DELETE, 0, 0, (void*) TASK_VAL(t));
235: #endif
236: acb = (struct aiocb*) TASK_VAL(t);
237: if (acb) {
238: if (aio_cancel(acb->aio_fildes, acb) == AIO_CANCELED)
239: aio_return(acb);
240: free(acb);
241: TASK_VAL(t) = 0;
242: }
243: #endif
244: break;
245: #ifdef EVFILT_LIO
246: case taskLIO:
247: #if SUP_ENABLE == KQ_SUPPORT
248: #ifdef __NetBSD__
249: EV_SET(&chg[0], TASK_VAL(t), EVFILT_LIO, EV_DELETE, 0, 0, (intptr_t) TASK_VAL(t));
250: #else
251: EV_SET(&chg[0], TASK_VAL(t), EVFILT_LIO, EV_DELETE, 0, 0, (void*) TASK_VAL(t));
252: #endif
253: acbs = (struct aiocb**) TASK_VAL(t);
254: if (acbs) {
255: for (i = 0; i < TASK_DATLEN(t); i++) {
256: if (aio_cancel(acbs[i]->aio_fildes, acbs[i]) == AIO_CANCELED)
257: aio_return(acbs[i]);
258: free(acbs[i]);
259: }
260: free(acbs);
261: TASK_VAL(t) = 0;
262: }
263: #endif
264: break;
265: #endif /* EVFILT_LIO */
266: #endif /* AIO_SUPPORT */
267: #ifdef EVFILT_USER
268: case taskUSER:
269: #if SUP_ENABLE == KQ_SUPPORT
270: #ifdef __NetBSD__
271: EV_SET(&chg[0], TASK_VAL(t), EVFILT_USER, EV_DELETE, 0, 0, (intptr_t) TASK_VAL(t));
272: #else
273: EV_SET(&chg[0], TASK_VAL(t), EVFILT_USER, EV_DELETE, 0, 0, (void*) TASK_VAL(t));
274: #endif
275: #endif
276: break;
277: #endif /* EVFILT_USER */
278: case taskTHREAD:
279: #ifdef HAVE_LIBPTHREAD
280: if (TASK_VAL(t))
281: pthread_cancel((pthread_t) TASK_VAL(t));
282: #endif
283: return NULL;
284: #if defined(HAVE_TIMER_CREATE) && defined(HAVE_TIMER_SETTIME) && defined(HAVE_TIMER_DELETE)
285: case taskRTC:
286: timer_delete((timer_t) TASK_FLAG(t));
287: schedCancel((sched_task_t*) TASK_RET(t));
288: return NULL;
289: #endif /* HAVE_TIMER_CREATE */
290: default:
291: return NULL;
292: }
293:
294: #if SUP_ENABLE == KQ_SUPPORT
295: kevent(TASK_ROOT(t)->root_kq, chg, 1, NULL, 0, &timeout);
296: #elif SUP_ENABLE == EP_SUPPORT
297: if (i != -1)
298: epoll_ctl(TASK_ROOT(t)->root_kq, EPOLL_CTL_DEL, i, NULL);
299: #endif
300: return NULL;
301: }
302:
303: #ifdef HAVE_LIBPTHREAD
304: /*
305: * sched_hook_thread() - Default THREAD hook
306: *
307: * @task = current task
308: * @arg = pthread attributes
309: * return: <0 errors and 0 ok
310: */
311: void *
312: sched_hook_thread(void *task, void *arg)
313: {
314: sched_task_t *t = task;
315: pthread_t tid;
316: sigset_t s, o;
317:
318: if (!t || !TASK_ROOT(t))
319: return (void*) -1;
320:
321: sigfillset(&s);
322: pthread_sigmask(SIG_BLOCK, &s, &o);
323: if ((errno = pthread_create(&tid, (pthread_attr_t*) arg,
324: (void *(*)(void*)) _sched_threadWrapper, t))) {
325: LOGERR;
326: pthread_sigmask(SIG_SETMASK, &o, NULL);
327: return (void*) -1;
328: } else
329: TASK_VAL(t) = (u_long) tid;
330:
331: if (!TASK_ISLOCKED(t))
332: TASK_LOCK(t);
333:
334: pthread_sigmask(SIG_SETMASK, &o, NULL);
335: return NULL;
336: }
337: #endif
338:
339: /*
340: * sched_hook_read() - Default READ hook
341: *
342: * @task = current task
343: * @arg = unused
344: * return: <0 errors and 0 ok
345: */
346: void *
347: sched_hook_read(void *task, void *arg __unused)
348: {
349: sched_task_t *t = task;
350: #if SUP_ENABLE == KQ_SUPPORT
351: struct kevent chg[1];
352: struct timespec timeout = { 0, 0 };
353: #elif SUP_ENABLE == EP_SUPPORT
354: struct epoll_event ee = { .events = EPOLLIN | EPOLLPRI | EPOLLRDHUP, .data.fd = 0 };
355: #else
356: sched_root_task_t *r = NULL;
357: #endif
358:
359: if (!t || !TASK_ROOT(t))
360: return (void*) -1;
361:
362: #if SUP_ENABLE == KQ_SUPPORT
363: #ifdef __NetBSD__
364: EV_SET(&chg[0], TASK_FD(t), EVFILT_READ, EV_ADD | EV_CLEAR, 0, 0, (intptr_t) TASK_FD(t));
365: #else
366: EV_SET(&chg[0], TASK_FD(t), EVFILT_READ, EV_ADD | EV_CLEAR, 0, 0, (void*) TASK_FD(t));
367: #endif
368: if (kevent(TASK_ROOT(t)->root_kq, chg, 1, NULL, 0, &timeout) == -1) {
369: if (TASK_ROOT(t)->root_hooks.hook_exec.exception)
370: TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL);
371: else
372: LOGERR;
373: return (void*) -1;
374: }
375: #elif SUP_ENABLE == EP_SUPPORT
376: ee.data.fd = TASK_FD(t);
377: if (epoll_ctl(TASK_ROOT(t)->root_kq, EPOLL_CTL_ADD, TASK_FD(t), &ee) == -1) {
378: if (TASK_ROOT(t)->root_hooks.hook_exec.exception)
379: TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL);
380: else
381: LOGERR;
382: return (void*) -1;
383: }
384: #else
385: r = TASK_ROOT(t);
386: FD_SET(TASK_FD(t), &r->root_fds[0]);
387: if (TASK_FD(t) >= r->root_kq)
388: r->root_kq = TASK_FD(t) + 1;
389: #endif
390:
391: return NULL;
392: }
393:
394: /*
395: * sched_hook_write() - Default WRITE hook
396: *
397: * @task = current task
398: * @arg = unused
399: * return: <0 errors and 0 ok
400: */
401: void *
402: sched_hook_write(void *task, void *arg __unused)
403: {
404: sched_task_t *t = task;
405: #if SUP_ENABLE == KQ_SUPPORT
406: struct kevent chg[1];
407: struct timespec timeout = { 0, 0 };
408: #elif SUP_ENABLE == EP_SUPPORT
409: struct epoll_event ee = { .events = EPOLLOUT, .data.fd = 0 };
410: #else
411: sched_root_task_t *r = NULL;
412: #endif
413:
414: if (!t || !TASK_ROOT(t))
415: return (void*) -1;
416:
417: #if SUP_ENABLE == KQ_SUPPORT
418: #ifdef __NetBSD__
419: EV_SET(&chg[0], TASK_FD(t), EVFILT_WRITE, EV_ADD | EV_CLEAR, 0, 0, (intptr_t) TASK_FD(t));
420: #else
421: EV_SET(&chg[0], TASK_FD(t), EVFILT_WRITE, EV_ADD | EV_CLEAR, 0, 0, (void*) TASK_FD(t));
422: #endif
423: if (kevent(TASK_ROOT(t)->root_kq, chg, 1, NULL, 0, &timeout) == -1) {
424: if (TASK_ROOT(t)->root_hooks.hook_exec.exception)
425: TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL);
426: else
427: LOGERR;
428: return (void*) -1;
429: }
430: #elif SUP_ENABLE == EP_SUPPORT
431: ee.data.fd = TASK_FD(t);
432: if (epoll_ctl(TASK_ROOT(t)->root_kq, EPOLL_CTL_ADD, TASK_FD(t), &ee) == -1) {
433: if (TASK_ROOT(t)->root_hooks.hook_exec.exception)
434: TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL);
435: else
436: LOGERR;
437: return (void*) -1;
438: }
439: #else
440: r = TASK_ROOT(t);
441: FD_SET(TASK_FD(t), &r->root_fds[1]);
442: if (TASK_FD(t) >= r->root_kq)
443: r->root_kq = TASK_FD(t) + 1;
444: #endif
445:
446: return NULL;
447: }
448:
449: /*
450: * sched_hook_alarm() - Default ALARM hook
451: *
452: * @task = current task
453: * @arg = unused
454: * return: <0 errors and 0 ok
455: */
456: void *
457: sched_hook_alarm(void *task, void *arg __unused)
458: {
459: #if SUP_ENABLE == KQ_SUPPORT
460: sched_task_t *t = task;
461: struct kevent chg[1];
462: struct timespec timeout = { 0, 0 };
463:
464: if (!t || !TASK_ROOT(t))
465: return (void*) -1;
466:
467: #ifdef __NetBSD__
468: EV_SET(&chg[0], (uintptr_t) TASK_DATA(t), EVFILT_TIMER, EV_ADD | EV_CLEAR, 0,
469: t->task_val.ts.tv_sec * 1000 + t->task_val.ts.tv_nsec / 1000000,
470: (intptr_t) TASK_DATA(t));
471: #else
472: EV_SET(&chg[0], (uintptr_t) TASK_DATA(t), EVFILT_TIMER, EV_ADD | EV_CLEAR, 0,
473: t->task_val.ts.tv_sec * 1000 + t->task_val.ts.tv_nsec / 1000000,
474: (void*) TASK_DATA(t));
475: #endif
476: if (kevent(TASK_ROOT(t)->root_kq, chg, 1, NULL, 0, &timeout) == -1) {
477: if (TASK_ROOT(t)->root_hooks.hook_exec.exception)
478: TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL);
479: else
480: LOGERR;
481: return (void*) -1;
482: }
483:
484: #endif
485: return NULL;
486: }
487:
488: /*
489: * sched_hook_node() - Default NODE hook
490: *
491: * @task = current task
492: * @arg = unused
493: * return: <0 errors and 0 ok
494: */
495: void *
496: sched_hook_node(void *task, void *arg __unused)
497: {
498: #if SUP_ENABLE == KQ_SUPPORT
499: sched_task_t *t = task;
500: struct kevent chg[1];
501: struct timespec timeout = { 0, 0 };
502:
503: if (!t || !TASK_ROOT(t))
504: return (void*) -1;
505:
506: #ifdef __NetBSD__
507: EV_SET(&chg[0], TASK_FD(t), EVFILT_VNODE, EV_ADD | EV_CLEAR,
508: NOTE_DELETE | NOTE_WRITE | NOTE_EXTEND | NOTE_ATTRIB |
509: NOTE_LINK | NOTE_RENAME | NOTE_REVOKE, 0, (intptr_t) TASK_FD(t));
510: #else
511: EV_SET(&chg[0], TASK_FD(t), EVFILT_VNODE, EV_ADD | EV_CLEAR,
512: NOTE_DELETE | NOTE_WRITE | NOTE_EXTEND | NOTE_ATTRIB |
513: NOTE_LINK | NOTE_RENAME | NOTE_REVOKE, 0, (void*) TASK_FD(t));
514: #endif
515: if (kevent(TASK_ROOT(t)->root_kq, chg, 1, NULL, 0, &timeout) == -1) {
516: if (TASK_ROOT(t)->root_hooks.hook_exec.exception)
517: TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL);
518: else
519: LOGERR;
520: return (void*) -1;
521: }
522:
523: #endif
524: return NULL;
525: }
526:
527: /*
528: * sched_hook_proc() - Default PROC hook
529: *
530: * @task = current task
531: * @arg = unused
532: * return: <0 errors and 0 ok
533: */
534: void *
535: sched_hook_proc(void *task, void *arg __unused)
536: {
537: #if SUP_ENABLE == KQ_SUPPORT
538: sched_task_t *t = task;
539: struct kevent chg[1];
540: struct timespec timeout = { 0, 0 };
541:
542: if (!t || !TASK_ROOT(t))
543: return (void*) -1;
544:
545: #ifdef __NetBSD__
546: EV_SET(&chg[0], TASK_VAL(t), EVFILT_PROC, EV_ADD | EV_CLEAR,
547: NOTE_EXIT | NOTE_FORK | NOTE_EXEC | NOTE_TRACK, 0, (intptr_t) TASK_VAL(t));
548: #else
549: EV_SET(&chg[0], TASK_VAL(t), EVFILT_PROC, EV_ADD | EV_CLEAR,
550: NOTE_EXIT | NOTE_FORK | NOTE_EXEC | NOTE_TRACK, 0, (void*) TASK_VAL(t));
551: #endif
552: if (kevent(TASK_ROOT(t)->root_kq, chg, 1, NULL, 0, &timeout) == -1) {
553: if (TASK_ROOT(t)->root_hooks.hook_exec.exception)
554: TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL);
555: else
556: LOGERR;
557: return (void*) -1;
558: }
559:
560: #endif
561: return NULL;
562: }
563:
564: /*
565: * sched_hook_signal() - Default SIGNAL hook
566: *
567: * @task = current task
568: * @arg = unused
569: * return: <0 errors and 0 ok
570: */
571: void *
572: sched_hook_signal(void *task, void *arg __unused)
573: {
574: #if SUP_ENABLE == KQ_SUPPORT
575: sched_task_t *t = task;
576: struct kevent chg[1];
577: struct timespec timeout = { 0, 0 };
578:
579: if (!t || !TASK_ROOT(t))
580: return (void*) -1;
581:
582: /* ignore signal */
583: signal(TASK_VAL(t), SIG_IGN);
584:
585: #ifdef __NetBSD__
586: EV_SET(&chg[0], TASK_VAL(t), EVFILT_SIGNAL, EV_ADD | EV_CLEAR, 0, 0, (intptr_t) TASK_VAL(t));
587: #else
588: EV_SET(&chg[0], TASK_VAL(t), EVFILT_SIGNAL, EV_ADD | EV_CLEAR, 0, 0, (void*) TASK_VAL(t));
589: #endif
590: if (kevent(TASK_ROOT(t)->root_kq, chg, 1, NULL, 0, &timeout) == -1) {
591: if (TASK_ROOT(t)->root_hooks.hook_exec.exception)
592: TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL);
593: else
594: LOGERR;
595: return (void*) -1;
596: }
597: #else
598: #if 0
599: sched_task_t *t = task;
600: struct sigaction sa;
601:
602: memset(&sa, 0, sizeof sa);
603: sigemptyset(&sa.sa_mask);
604: sa.sa_handler = _sched_sigHandler;
605: sa.sa_flags = SA_RESETHAND | SA_RESTART;
606:
607: if (sigaction(TASK_VAL(t), &sa, NULL) == -1) {
608: if (TASK_ROOT(t)->root_hooks.hook_exec.exception)
609: TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL);
610: else
611: LOGERR;
612: return (void*) -1;
613: }
614: #endif /* 0 */
615: #endif
616: return NULL;
617: }
618:
619: /*
620: * sched_hook_user() - Default USER hook
621: *
622: * @task = current task
623: * @arg = unused
624: * return: <0 errors and 0 ok
625: */
626: #ifdef EVFILT_USER
627: void *
628: sched_hook_user(void *task, void *arg __unused)
629: {
630: #if SUP_ENABLE == KQ_SUPPORT
631: sched_task_t *t = task;
632: struct kevent chg[1];
633: struct timespec timeout = { 0, 0 };
634:
635: if (!t || !TASK_ROOT(t))
636: return (void*) -1;
637:
638: #ifdef __NetBSD__
639: EV_SET(&chg[0], TASK_VAL(t), EVFILT_USER, EV_ADD | EV_CLEAR, TASK_DATLEN(t),
640: 0, (intptr_t) TASK_VAL(t));
641: #else
642: EV_SET(&chg[0], TASK_VAL(t), EVFILT_USER, EV_ADD | EV_CLEAR, TASK_DATLEN(t),
643: 0, (void*) TASK_VAL(t));
644: #endif
645: if (kevent(TASK_ROOT(t)->root_kq, chg, 1, NULL, 0, &timeout) == -1) {
646: if (TASK_ROOT(t)->root_hooks.hook_exec.exception)
647: TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL);
648: else
649: LOGERR;
650: return (void*) -1;
651: }
652:
653: #endif
654: return NULL;
655: }
656: #endif
657:
658: /*
659: * sched_hook_fetch() - Default FETCH hook
660: *
661: * @root = root task
662: * @arg = unused
663: * return: NULL error or !=NULL fetched task
664: */
665: void *
666: sched_hook_fetch(void *root, void *arg __unused)
667: {
668: sched_root_task_t *r = root;
669: sched_task_t *task, *tmp;
670: struct timespec now, m, mtmp;
671: #if SUP_ENABLE == KQ_SUPPORT
672: struct kevent evt[1], res[KQ_EVENTS];
673: struct timespec *timeout;
674: #elif SUP_ENABLE == EP_SUPPORT
675: struct epoll_event res[KQ_EVENTS];
676: u_long timeout = 0;
677: #else
678: struct timeval *timeout, tv;
679: fd_set rfd, wfd, xfd;
680: #endif
681: register int i, flg;
682: int en;
683: #ifdef AIO_SUPPORT
684: int len, fd;
685: struct aiocb *acb;
686: #ifdef EVFILT_LIO
687: int l;
688: register int j;
689: off_t off;
690: struct aiocb **acbs;
691: struct iovec *iv;
692: #endif /* EVFILT_LIO */
693: #endif /* AIO_SUPPORT */
694:
695: if (!r)
696: return NULL;
697:
698: /* get new task by queue priority */
699: while ((task = TAILQ_FIRST(&r->root_event))) {
700: #ifdef HAVE_LIBPTHREAD
701: pthread_mutex_lock(&r->root_mtx[taskEVENT]);
702: #endif
703: TAILQ_REMOVE(&r->root_event, task, task_node);
704: #ifdef HAVE_LIBPTHREAD
705: pthread_mutex_unlock(&r->root_mtx[taskEVENT]);
706: #endif
707: task->task_type = taskUNUSE;
708: #ifdef HAVE_LIBPTHREAD
709: pthread_mutex_lock(&r->root_mtx[taskUNUSE]);
710: #endif
711: TAILQ_INSERT_TAIL(&r->root_unuse, task, task_node);
712: #ifdef HAVE_LIBPTHREAD
713: pthread_mutex_unlock(&r->root_mtx[taskUNUSE]);
714: #endif
715: return task;
716: }
717: while ((task = TAILQ_FIRST(&r->root_ready))) {
718: #ifdef HAVE_LIBPTHREAD
719: pthread_mutex_lock(&r->root_mtx[taskREADY]);
720: #endif
721: TAILQ_REMOVE(&r->root_ready, task, task_node);
722: #ifdef HAVE_LIBPTHREAD
723: pthread_mutex_unlock(&r->root_mtx[taskREADY]);
724: #endif
725: task->task_type = taskUNUSE;
726: #ifdef HAVE_LIBPTHREAD
727: pthread_mutex_lock(&r->root_mtx[taskUNUSE]);
728: #endif
729: TAILQ_INSERT_TAIL(&r->root_unuse, task, task_node);
730: #ifdef HAVE_LIBPTHREAD
731: pthread_mutex_unlock(&r->root_mtx[taskUNUSE]);
732: #endif
733: return task;
734: }
735:
736: #ifdef TIMER_WITHOUT_SORT
737: clock_gettime(CLOCK_MONOTONIC, &now);
738:
739: sched_timespecclear(&r->root_wait);
740: TAILQ_FOREACH(task, &r->root_timer, task_node) {
741: if (!sched_timespecisset(&r->root_wait))
742: r->root_wait = TASK_TS(task);
743: else if (sched_timespeccmp(&TASK_TS(task), &r->root_wait, -) < 0)
744: r->root_wait = TASK_TS(task);
745: }
746:
747: if (TAILQ_FIRST(&r->root_timer)) {
748: m = r->root_wait;
749: sched_timespecsub(&m, &now, &mtmp);
750: r->root_wait = mtmp;
751: } else {
752: /* set wait INFTIM */
753: sched_timespecinf(&r->root_wait);
754: }
755: #else /* ! TIMER_WITHOUT_SORT */
756: if (!TAILQ_FIRST(&r->root_task) && (task = TAILQ_FIRST(&r->root_timer))) {
757: clock_gettime(CLOCK_MONOTONIC, &now);
758:
759: m = TASK_TS(task);
760: sched_timespecsub(&m, &now, &mtmp);
761: r->root_wait = mtmp;
762: } else {
763: /* set wait INFTIM */
764: sched_timespecinf(&r->root_wait);
765: }
766: #endif /* TIMER_WITHOUT_SORT */
767: /* if present member of task, set NOWAIT */
768: if (TAILQ_FIRST(&r->root_task))
769: sched_timespecclear(&r->root_wait);
770:
771: if (r->root_wait.tv_sec != -1 && r->root_wait.tv_nsec != -1) {
772: #if SUP_ENABLE == KQ_SUPPORT
773: timeout = &r->root_wait;
774: #elif SUP_ENABLE == EP_SUPPORT
775: timeout = r->root_wait.tv_sec * 1000 + r->root_wait.tv_nsec / 1000000;
776: #else
777: sched_timespec2val(&r->root_wait, &tv);
778: timeout = &tv;
779: #endif /* KQ_SUPPORT */
780: } else if (sched_timespecisinf(&r->root_poll))
781: #if SUP_ENABLE == EP_SUPPORT
782: timeout = -1;
783: #else
784: timeout = NULL;
785: #endif
786: else {
787: #if SUP_ENABLE == KQ_SUPPORT
788: timeout = &r->root_poll;
789: #elif SUP_ENABLE == EP_SUPPORT
790: timeout = r->root_poll.tv_sec * 1000 + r->root_poll.tv_nsec / 1000000;
791: #else
792: sched_timespec2val(&r->root_poll, &tv);
793: timeout = &tv;
794: #endif /* KQ_SUPPORT */
795: }
796:
797: #if SUP_ENABLE == KQ_SUPPORT
798: if ((en = kevent(r->root_kq, NULL, 0, res, KQ_EVENTS, timeout)) == -1) {
799: #elif SUP_ENABLE == EP_SUPPORT
800: if ((en = epoll_wait(r->root_kq, res, KQ_EVENTS, timeout)) == -1) {
801: #else
802: rfd = xfd = r->root_fds[0];
803: wfd = r->root_fds[1];
804: if ((en = select(r->root_kq, &rfd, &wfd, &xfd, timeout)) == -1) {
805: #endif /* KQ_SUPPORT */
806: if (r->root_hooks.hook_exec.exception) {
807: if (r->root_hooks.hook_exec.exception(r, NULL))
808: return NULL;
809: } else if (errno != EINTR)
810: LOGERR;
811: goto skip_event;
812: }
813:
814: /* kevent dispatcher */
815: now.tv_sec = now.tv_nsec = 0;
816: /* Go and catch the cat into pipes ... */
817: #if SUP_ENABLE == KQ_SUPPORT
818: for (i = 0; i < en; i++) {
819: memcpy(evt, &res[i], sizeof evt);
820: evt->flags = EV_DELETE;
821: /* Put read/write task to ready queue */
822: switch (res[i].filter) {
823: case EVFILT_READ:
824: flg = 0;
825: TAILQ_FOREACH_SAFE(task, &r->root_read, task_node, tmp) {
826: if (TASK_FD(task) != ((intptr_t) res[i].udata))
827: continue;
828: else {
829: flg++;
830: TASK_RET(task) = res[i].data;
831: TASK_FLAG(task) = (u_long) res[i].fflags;
832: }
833: /* remove read handle */
834: #ifdef HAVE_LIBPTHREAD
835: pthread_mutex_lock(&r->root_mtx[taskREAD]);
836: #endif
837: TAILQ_REMOVE(&r->root_read, task, task_node);
838: #ifdef HAVE_LIBPTHREAD
839: pthread_mutex_unlock(&r->root_mtx[taskREAD]);
840: #endif
841: if (r->root_hooks.hook_exec.exception && res[i].flags & EV_EOF) {
842: if (r->root_hooks.hook_exec.exception(r, (void*) EV_EOF)) {
843: task->task_type = taskUNUSE;
844: #ifdef HAVE_LIBPTHREAD
845: pthread_mutex_lock(&r->root_mtx[taskUNUSE]);
846: #endif
847: TAILQ_INSERT_TAIL(&r->root_unuse, task, task_node);
848: #ifdef HAVE_LIBPTHREAD
849: pthread_mutex_unlock(&r->root_mtx[taskUNUSE]);
850: #endif
851: } else {
852: task->task_type = taskREADY;
853: #ifdef HAVE_LIBPTHREAD
854: pthread_mutex_lock(&r->root_mtx[taskREADY]);
855: #endif
856: TAILQ_INSERT_TAIL(&r->root_ready, task, task_node);
857: #ifdef HAVE_LIBPTHREAD
858: pthread_mutex_unlock(&r->root_mtx[taskREADY]);
859: #endif
860: }
861: } else {
862: task->task_type = taskREADY;
863: #ifdef HAVE_LIBPTHREAD
864: pthread_mutex_lock(&r->root_mtx[taskREADY]);
865: #endif
866: TAILQ_INSERT_TAIL(&r->root_ready, task, task_node);
867: #ifdef HAVE_LIBPTHREAD
868: pthread_mutex_unlock(&r->root_mtx[taskREADY]);
869: #endif
870: }
871: }
872: /* if match at least 2, don't remove resouce of event */
873: if (flg > 1)
874: evt->flags ^= evt->flags;
875: break;
876: case EVFILT_WRITE:
877: flg = 0;
878: TAILQ_FOREACH_SAFE(task, &r->root_write, task_node, tmp) {
879: if (TASK_FD(task) != ((intptr_t) res[i].udata))
880: continue;
881: else {
882: flg++;
883: TASK_RET(task) = res[i].data;
884: TASK_FLAG(task) = (u_long) res[i].fflags;
885: }
886: /* remove write handle */
887: #ifdef HAVE_LIBPTHREAD
888: pthread_mutex_lock(&r->root_mtx[taskWRITE]);
889: #endif
890: TAILQ_REMOVE(&r->root_write, task, task_node);
891: #ifdef HAVE_LIBPTHREAD
892: pthread_mutex_unlock(&r->root_mtx[taskWRITE]);
893: #endif
894: if (r->root_hooks.hook_exec.exception && res[i].flags & EV_EOF) {
895: if (r->root_hooks.hook_exec.exception(r, (void*) EV_EOF)) {
896: task->task_type = taskUNUSE;
897: #ifdef HAVE_LIBPTHREAD
898: pthread_mutex_lock(&r->root_mtx[taskUNUSE]);
899: #endif
900: TAILQ_INSERT_TAIL(&r->root_unuse, task, task_node);
901: #ifdef HAVE_LIBPTHREAD
902: pthread_mutex_unlock(&r->root_mtx[taskUNUSE]);
903: #endif
904: } else {
905: task->task_type = taskREADY;
906: #ifdef HAVE_LIBPTHREAD
907: pthread_mutex_lock(&r->root_mtx[taskREADY]);
908: #endif
909: TAILQ_INSERT_TAIL(&r->root_ready, task, task_node);
910: #ifdef HAVE_LIBPTHREAD
911: pthread_mutex_unlock(&r->root_mtx[taskREADY]);
912: #endif
913: }
914: } else {
915: task->task_type = taskREADY;
916: #ifdef HAVE_LIBPTHREAD
917: pthread_mutex_lock(&r->root_mtx[taskREADY]);
918: #endif
919: TAILQ_INSERT_TAIL(&r->root_ready, task, task_node);
920: #ifdef HAVE_LIBPTHREAD
921: pthread_mutex_unlock(&r->root_mtx[taskREADY]);
922: #endif
923: }
924: }
925: /* if match at least 2, don't remove resouce of event */
926: if (flg > 1)
927: evt->flags ^= evt->flags;
928: break;
929: case EVFILT_TIMER:
930: flg = 0;
931: TAILQ_FOREACH_SAFE(task, &r->root_alarm, task_node, tmp) {
932: if ((uintptr_t) TASK_DATA(task) != ((uintptr_t) res[i].udata))
933: continue;
934: else {
935: flg++;
936: TASK_RET(task) = res[i].data;
937: TASK_FLAG(task) = (u_long) res[i].fflags;
938: }
939: /* remove alarm handle */
940: #ifdef HAVE_LIBPTHREAD
941: pthread_mutex_lock(&r->root_mtx[taskALARM]);
942: #endif
943: TAILQ_REMOVE(&r->root_alarm, task, task_node);
944: #ifdef HAVE_LIBPTHREAD
945: pthread_mutex_unlock(&r->root_mtx[taskALARM]);
946: #endif
947: task->task_type = taskREADY;
948: #ifdef HAVE_LIBPTHREAD
949: pthread_mutex_lock(&r->root_mtx[taskREADY]);
950: #endif
951: TAILQ_INSERT_TAIL(&r->root_ready, task, task_node);
952: #ifdef HAVE_LIBPTHREAD
953: pthread_mutex_unlock(&r->root_mtx[taskREADY]);
954: #endif
955: }
956: /* if match at least 2, don't remove resouce of event */
957: if (flg > 1)
958: evt->flags ^= evt->flags;
959: break;
960: case EVFILT_VNODE:
961: flg = 0;
962: TAILQ_FOREACH_SAFE(task, &r->root_node, task_node, tmp) {
963: if (TASK_FD(task) != ((intptr_t) res[i].udata))
964: continue;
965: else {
966: flg++;
967: TASK_RET(task) = res[i].data;
968: TASK_FLAG(task) = (u_long) res[i].fflags;
969: }
970: /* remove node handle */
971: #ifdef HAVE_LIBPTHREAD
972: pthread_mutex_lock(&r->root_mtx[taskNODE]);
973: #endif
974: TAILQ_REMOVE(&r->root_node, task, task_node);
975: #ifdef HAVE_LIBPTHREAD
976: pthread_mutex_unlock(&r->root_mtx[taskNODE]);
977: #endif
978: task->task_type = taskREADY;
979: #ifdef HAVE_LIBPTHREAD
980: pthread_mutex_lock(&r->root_mtx[taskREADY]);
981: #endif
982: TAILQ_INSERT_TAIL(&r->root_ready, task, task_node);
983: #ifdef HAVE_LIBPTHREAD
984: pthread_mutex_unlock(&r->root_mtx[taskREADY]);
985: #endif
986: }
987: /* if match at least 2, don't remove resouce of event */
988: if (flg > 1)
989: evt->flags ^= evt->flags;
990: break;
991: case EVFILT_PROC:
992: flg = 0;
993: TAILQ_FOREACH_SAFE(task, &r->root_proc, task_node, tmp) {
994: if (TASK_VAL(task) != ((uintptr_t) res[i].udata))
995: continue;
996: else {
997: flg++;
998: TASK_RET(task) = res[i].data;
999: TASK_FLAG(task) = (u_long) res[i].fflags;
1000: }
1001: /* remove proc handle */
1002: #ifdef HAVE_LIBPTHREAD
1003: pthread_mutex_lock(&r->root_mtx[taskPROC]);
1004: #endif
1005: TAILQ_REMOVE(&r->root_proc, task, task_node);
1006: #ifdef HAVE_LIBPTHREAD
1007: pthread_mutex_unlock(&r->root_mtx[taskPROC]);
1008: #endif
1009: task->task_type = taskREADY;
1010: #ifdef HAVE_LIBPTHREAD
1011: pthread_mutex_lock(&r->root_mtx[taskREADY]);
1012: #endif
1013: TAILQ_INSERT_TAIL(&r->root_ready, task, task_node);
1014: #ifdef HAVE_LIBPTHREAD
1015: pthread_mutex_unlock(&r->root_mtx[taskREADY]);
1016: #endif
1017: }
1018: /* if match at least 2, don't remove resouce of event */
1019: if (flg > 1)
1020: evt->flags ^= evt->flags;
1021: break;
1022: case EVFILT_SIGNAL:
1023: flg = 0;
1024: TAILQ_FOREACH_SAFE(task, &r->root_signal, task_node, tmp) {
1025: if (TASK_VAL(task) != ((uintptr_t) res[i].udata))
1026: continue;
1027: else {
1028: flg++;
1029: TASK_RET(task) = res[i].data;
1030: TASK_FLAG(task) = (u_long) res[i].fflags;
1031: }
1032: /* remove signal handle */
1033: #ifdef HAVE_LIBPTHREAD
1034: pthread_mutex_lock(&r->root_mtx[taskSIGNAL]);
1035: #endif
1036: TAILQ_REMOVE(&r->root_signal, task, task_node);
1037: #ifdef HAVE_LIBPTHREAD
1038: pthread_mutex_unlock(&r->root_mtx[taskSIGNAL]);
1039: #endif
1040: task->task_type = taskREADY;
1041: #ifdef HAVE_LIBPTHREAD
1042: pthread_mutex_lock(&r->root_mtx[taskREADY]);
1043: #endif
1044: TAILQ_INSERT_TAIL(&r->root_ready, task, task_node);
1045: #ifdef HAVE_LIBPTHREAD
1046: pthread_mutex_unlock(&r->root_mtx[taskREADY]);
1047: #endif
1048: }
1049: /* if match at least 2, don't remove resouce of event */
1050: if (flg > 1)
1051: evt->flags ^= evt->flags;
1052: break;
1053: #ifdef AIO_SUPPORT
1054: case EVFILT_AIO:
1055: flg = 0;
1056: TAILQ_FOREACH_SAFE(task, &r->root_aio, task_node, tmp) {
1057: acb = (struct aiocb*) TASK_VAL(task);
1058: if (acb != ((struct aiocb*) res[i].udata))
1059: continue;
1060: else {
1061: flg++;
1062: TASK_RET(task) = res[i].data;
1063: TASK_FLAG(task) = (u_long) res[i].fflags;
1064: }
1065: /* remove user handle */
1066: #ifdef HAVE_LIBPTHREAD
1067: pthread_mutex_lock(&r->root_mtx[taskAIO]);
1068: #endif
1069: TAILQ_REMOVE(&r->root_aio, task, task_node);
1070: #ifdef HAVE_LIBPTHREAD
1071: pthread_mutex_unlock(&r->root_mtx[taskAIO]);
1072: #endif
1073: task->task_type = taskREADY;
1074: #ifdef HAVE_LIBPTHREAD
1075: pthread_mutex_lock(&r->root_mtx[taskREADY]);
1076: #endif
1077: TAILQ_INSERT_TAIL(&r->root_ready, task, task_node);
1078: #ifdef HAVE_LIBPTHREAD
1079: pthread_mutex_unlock(&r->root_mtx[taskREADY]);
1080: #endif
1081: fd = acb->aio_fildes;
1082: if ((len = aio_return(acb)) != -1) {
1083: if (lseek(fd, acb->aio_offset + len, SEEK_CUR) == -1)
1084: LOGERR;
1085: } else
1086: LOGERR;
1087: free(acb);
1088: TASK_DATLEN(task) = (u_long) len;
1089: TASK_FD(task) = fd;
1090: }
1091: /* if match at least 2, don't remove resouce of event */
1092: if (flg > 1)
1093: evt->flags ^= evt->flags;
1094: break;
1095: #ifdef EVFILT_LIO
1096: case EVFILT_LIO:
1097: flg = 0;
1098: TAILQ_FOREACH_SAFE(task, &r->root_lio, task_node, tmp) {
1099: acbs = (struct aiocb**) TASK_VAL(task);
1100: if (acbs != ((struct aiocb**) res[i].udata))
1101: continue;
1102: else {
1103: flg++;
1104: TASK_RET(task) = res[i].data;
1105: TASK_FLAG(task) = (u_long) res[i].fflags;
1106: }
1107: /* remove user handle */
1108: #ifdef HAVE_LIBPTHREAD
1109: pthread_mutex_lock(&r->root_mtx[taskLIO]);
1110: #endif
1111: TAILQ_REMOVE(&r->root_lio, task, task_node);
1112: #ifdef HAVE_LIBPTHREAD
1113: pthread_mutex_unlock(&r->root_mtx[taskLIO]);
1114: #endif
1115: task->task_type = taskREADY;
1116: #ifdef HAVE_LIBPTHREAD
1117: pthread_mutex_lock(&r->root_mtx[taskREADY]);
1118: #endif
1119: TAILQ_INSERT_TAIL(&r->root_ready, task, task_node);
1120: #ifdef HAVE_LIBPTHREAD
1121: pthread_mutex_unlock(&r->root_mtx[taskREADY]);
1122: #endif
1123: iv = (struct iovec*) TASK_DATA(task);
1124: fd = acbs[0]->aio_fildes;
1125: off = acbs[0]->aio_offset;
1126: for (j = len = 0; i < TASK_DATLEN(task); len += l, i++) {
1127: if ((iv[i].iov_len = aio_return(acbs[i])) == -1)
1128: l = 0;
1129: else
1130: l = iv[i].iov_len;
1131: free(acbs[i]);
1132: }
1133: free(acbs);
1134: TASK_DATLEN(task) = (u_long) len;
1135: TASK_FD(task) = fd;
1136:
1137: if (lseek(fd, off + len, SEEK_CUR) == -1)
1138: LOGERR;
1139: }
1140: /* if match at least 2, don't remove resouce of event */
1141: if (flg > 1)
1142: evt->flags ^= evt->flags;
1143: break;
1144: #endif /* EVFILT_LIO */
1145: #endif /* AIO_SUPPORT */
1146: #ifdef EVFILT_USER
1147: case EVFILT_USER:
1148: flg = 0;
1149: TAILQ_FOREACH_SAFE(task, &r->root_user, task_node, tmp) {
1150: if (TASK_VAL(task) != ((uintptr_t) res[i].udata))
1151: continue;
1152: else {
1153: flg++;
1154: TASK_RET(task) = res[i].data;
1155: TASK_FLAG(task) = (u_long) res[i].fflags;
1156: }
1157: /* remove user handle */
1158: #ifdef HAVE_LIBPTHREAD
1159: pthread_mutex_lock(&r->root_mtx[taskUSER]);
1160: #endif
1161: TAILQ_REMOVE(&r->root_user, task, task_node);
1162: #ifdef HAVE_LIBPTHREAD
1163: pthread_mutex_unlock(&r->root_mtx[taskUSER]);
1164: #endif
1165: task->task_type = taskREADY;
1166: #ifdef HAVE_LIBPTHREAD
1167: pthread_mutex_lock(&r->root_mtx[taskREADY]);
1168: #endif
1169: TAILQ_INSERT_TAIL(&r->root_ready, task, task_node);
1170: #ifdef HAVE_LIBPTHREAD
1171: pthread_mutex_unlock(&r->root_mtx[taskREADY]);
1172: #endif
1173: }
1174: /* if match at least 2, don't remove resouce of event */
1175: if (flg > 1)
1176: evt->flags ^= evt->flags;
1177: break;
1178: #endif /* EVFILT_USER */
1179: }
1180: if (kevent(r->root_kq, evt, 1, NULL, 0, &now) == -1) {
1181: if (r->root_hooks.hook_exec.exception) {
1182: if (r->root_hooks.hook_exec.exception(r, NULL))
1183: return NULL;
1184: } else
1185: LOGERR;
1186: }
1187: }
1188: #else /* end of kevent dispatcher */
1189: for (i = 0; i < r->root_kq; i++) {
1190: if (FD_ISSET(i, &rfd) || FD_ISSET(i, &xfd)) {
1191: flg = 0;
1192: TAILQ_FOREACH_SAFE(task, &r->root_read, task_node, tmp) {
1193: if (TASK_FD(task) != i)
1194: continue;
1195: else {
1196: flg++;
1197: TASK_FLAG(task) = ioctl(TASK_FD(task),
1198: FIONREAD, &TASK_RET(task));
1199: }
1200: /* remove read handle */
1201: #ifdef HAVE_LIBPTHREAD
1202: pthread_mutex_lock(&r->root_mtx[taskREAD]);
1203: #endif
1204: TAILQ_REMOVE(&r->root_read, task, task_node);
1205: #ifdef HAVE_LIBPTHREAD
1206: pthread_mutex_unlock(&r->root_mtx[taskREAD]);
1207: #endif
1208: if (r->root_hooks.hook_exec.exception) {
1209: if (r->root_hooks.hook_exec.exception(r, NULL)) {
1210: task->task_type = taskUNUSE;
1211: #ifdef HAVE_LIBPTHREAD
1212: pthread_mutex_lock(&r->root_mtx[taskUNUSE]);
1213: #endif
1214: TAILQ_INSERT_TAIL(&r->root_unuse, task, task_node);
1215: #ifdef HAVE_LIBPTHREAD
1216: pthread_mutex_unlock(&r->root_mtx[taskUNUSE]);
1217: #endif
1218: } else {
1219: task->task_type = taskREADY;
1220: #ifdef HAVE_LIBPTHREAD
1221: pthread_mutex_lock(&r->root_mtx[taskREADY]);
1222: #endif
1223: TAILQ_INSERT_TAIL(&r->root_ready, task, task_node);
1224: #ifdef HAVE_LIBPTHREAD
1225: pthread_mutex_unlock(&r->root_mtx[taskREADY]);
1226: #endif
1227: }
1228: } else {
1229: task->task_type = taskREADY;
1230: #ifdef HAVE_LIBPTHREAD
1231: pthread_mutex_lock(&r->root_mtx[taskREADY]);
1232: #endif
1233: TAILQ_INSERT_TAIL(&r->root_ready, task, task_node);
1234: #ifdef HAVE_LIBPTHREAD
1235: pthread_mutex_unlock(&r->root_mtx[taskREADY]);
1236: #endif
1237: }
1238: }
1239: /* if match equal to 1, remove resouce */
1240: if (flg == 1)
1241: FD_CLR(i, &r->root_fds[0]);
1242: }
1243:
1244: if (FD_ISSET(i, &wfd)) {
1245: flg = 0;
1246: TAILQ_FOREACH_SAFE(task, &r->root_write, task_node, tmp) {
1247: if (TASK_FD(task) != i)
1248: continue;
1249: else {
1250: flg++;
1251: TASK_FLAG(task) = ioctl(TASK_FD(task),
1252: FIONWRITE, &TASK_RET(task));
1253: }
1254: /* remove write handle */
1255: #ifdef HAVE_LIBPTHREAD
1256: pthread_mutex_lock(&r->root_mtx[taskWRITE]);
1257: #endif
1258: TAILQ_REMOVE(&r->root_write, task, task_node);
1259: #ifdef HAVE_LIBPTHREAD
1260: pthread_mutex_unlock(&r->root_mtx[taskWRITE]);
1261: #endif
1262: if (r->root_hooks.hook_exec.exception) {
1263: if (r->root_hooks.hook_exec.exception(r, NULL)) {
1264: task->task_type = taskUNUSE;
1265: #ifdef HAVE_LIBPTHREAD
1266: pthread_mutex_lock(&r->root_mtx[taskUNUSE]);
1267: #endif
1268: TAILQ_INSERT_TAIL(&r->root_unuse, task, task_node);
1269: #ifdef HAVE_LIBPTHREAD
1270: pthread_mutex_unlock(&r->root_mtx[taskUNUSE]);
1271: #endif
1272: } else {
1273: task->task_type = taskREADY;
1274: #ifdef HAVE_LIBPTHREAD
1275: pthread_mutex_lock(&r->root_mtx[taskREADY]);
1276: #endif
1277: TAILQ_INSERT_TAIL(&r->root_ready, task, task_node);
1278: #ifdef HAVE_LIBPTHREAD
1279: pthread_mutex_unlock(&r->root_mtx[taskREADY]);
1280: #endif
1281: }
1282: } else {
1283: task->task_type = taskREADY;
1284: #ifdef HAVE_LIBPTHREAD
1285: pthread_mutex_lock(&r->root_mtx[taskREADY]);
1286: #endif
1287: TAILQ_INSERT_TAIL(&r->root_ready, task, task_node);
1288: #ifdef HAVE_LIBPTHREAD
1289: pthread_mutex_unlock(&r->root_mtx[taskREADY]);
1290: #endif
1291: }
1292: }
1293: /* if match equal to 1, remove resouce */
1294: if (flg == 1)
1295: FD_CLR(i, &r->root_fds[1]);
1296: }
1297: }
1298:
1299: /* optimize select */
1300: for (i = r->root_kq - 1; i > 2; i--)
1301: if (FD_ISSET(i, &r->root_fds[0]) || FD_ISSET(i, &r->root_fds[1]))
1302: break;
1303: if (i > 2)
1304: r->root_kq = i + 1;
1305: #endif /* KQ_SUPPORT */
1306:
1307: skip_event:
1308: /* timer update & put in ready queue */
1309: clock_gettime(CLOCK_MONOTONIC, &now);
1310:
1311: TAILQ_FOREACH_SAFE(task, &r->root_timer, task_node, tmp)
1312: if (sched_timespeccmp(&now, &TASK_TS(task), -) >= 0) {
1313: #ifdef HAVE_LIBPTHREAD
1314: pthread_mutex_lock(&r->root_mtx[taskTIMER]);
1315: #endif
1316: TAILQ_REMOVE(&r->root_timer, task, task_node);
1317: #ifdef HAVE_LIBPTHREAD
1318: pthread_mutex_unlock(&r->root_mtx[taskTIMER]);
1319: #endif
1320: task->task_type = taskREADY;
1321: #ifdef HAVE_LIBPTHREAD
1322: pthread_mutex_lock(&r->root_mtx[taskREADY]);
1323: #endif
1324: TAILQ_INSERT_TAIL(&r->root_ready, task, task_node);
1325: #ifdef HAVE_LIBPTHREAD
1326: pthread_mutex_unlock(&r->root_mtx[taskREADY]);
1327: #endif
1328: }
1329:
1330: /* put regular task priority task to ready queue,
1331: if there is no ready task or reach max missing hit for regular task */
1332: if ((task = TAILQ_FIRST(&r->root_task))) {
1333: if (!TAILQ_FIRST(&r->root_ready) || r->root_miss >= TASK_VAL(task)) {
1334: r->root_miss ^= r->root_miss;
1335:
1336: #ifdef HAVE_LIBPTHREAD
1337: pthread_mutex_lock(&r->root_mtx[taskTASK]);
1338: #endif
1339: TAILQ_REMOVE(&r->root_task, task, task_node);
1340: #ifdef HAVE_LIBPTHREAD
1341: pthread_mutex_unlock(&r->root_mtx[taskTASK]);
1342: #endif
1343: task->task_type = taskREADY;
1344: #ifdef HAVE_LIBPTHREAD
1345: pthread_mutex_lock(&r->root_mtx[taskREADY]);
1346: #endif
1347: TAILQ_INSERT_TAIL(&r->root_ready, task, task_node);
1348: #ifdef HAVE_LIBPTHREAD
1349: pthread_mutex_unlock(&r->root_mtx[taskREADY]);
1350: #endif
1351: } else
1352: r->root_miss++;
1353: } else
1354: r->root_miss ^= r->root_miss;
1355:
1356: /* OK, lets get ready task !!! */
1357: task = TAILQ_FIRST(&r->root_ready);
1358: if (!(task))
1359: return NULL;
1360:
1361: #ifdef HAVE_LIBPTHREAD
1362: pthread_mutex_lock(&r->root_mtx[taskREADY]);
1363: #endif
1364: TAILQ_REMOVE(&r->root_ready, task, task_node);
1365: #ifdef HAVE_LIBPTHREAD
1366: pthread_mutex_unlock(&r->root_mtx[taskREADY]);
1367: #endif
1368: task->task_type = taskUNUSE;
1369: #ifdef HAVE_LIBPTHREAD
1370: pthread_mutex_lock(&r->root_mtx[taskUNUSE]);
1371: #endif
1372: TAILQ_INSERT_TAIL(&r->root_unuse, task, task_node);
1373: #ifdef HAVE_LIBPTHREAD
1374: pthread_mutex_unlock(&r->root_mtx[taskUNUSE]);
1375: #endif
1376: return task;
1377: }
1378:
1379: /*
1380: * sched_hook_exception() - Default EXCEPTION hook
1381: *
1382: * @root = root task
1383: * @arg = custom handling: if arg == EV_EOF or other value; default: arg == NULL log errno
1384: * return: <0 errors and 0 ok
1385: */
1386: void *
1387: sched_hook_exception(void *root, void *arg)
1388: {
1389: sched_root_task_t *r = root;
1390:
1391: if (!r)
1392: return NULL;
1393:
1394: /* custom exception handling ... */
1395: if (arg) {
1396: if (arg == (void*) EV_EOF)
1397: return NULL;
1398: return (void*) -1; /* raise scheduler error!!! */
1399: }
1400:
1401: /* if error hook exists */
1402: if (r->root_hooks.hook_root.error)
1403: return (r->root_hooks.hook_root.error(root, (void*) ((intptr_t) errno)));
1404:
1405: /* default case! */
1406: LOGERR;
1407: return NULL;
1408: }
1409:
1410: /*
1411: * sched_hook_condition() - Default CONDITION hook
1412: *
1413: * @root = root task
1414: * @arg = killState from schedRun()
1415: * return: NULL kill scheduler loop or !=NULL ok
1416: */
1417: void *
1418: sched_hook_condition(void *root, void *arg)
1419: {
1420: sched_root_task_t *r = root;
1421:
1422: if (!r)
1423: return NULL;
1424:
1425: return (void*) (r->root_cond - *(intptr_t*) arg);
1426: }
1427:
1428: /*
1429: * sched_hook_rtc() - Default RTC hook
1430: *
1431: * @task = current task
1432: * @arg = unused
1433: * return: <0 errors and 0 ok
1434: */
1435: #if defined(HAVE_TIMER_CREATE) && defined(HAVE_TIMER_SETTIME) && defined(HAVE_TIMER_DELETE)
1436: void *
1437: sched_hook_rtc(void *task, void *arg __unused)
1438: {
1439: sched_task_t *sigt = NULL, *t = task;
1440: struct itimerspec its;
1441: struct sigevent evt;
1442: timer_t tmr;
1443:
1444: if (!t || !TASK_ROOT(t))
1445: return (void*) -1;
1446:
1447: memset(&evt, 0, sizeof evt);
1448: evt.sigev_notify = SIGEV_SIGNAL;
1449: evt.sigev_signo = (intptr_t) TASK_DATA(t) + SIGRTMIN;
1450: evt.sigev_value.sival_ptr = TASK_DATA(t);
1451:
1452: if (timer_create(CLOCK_MONOTONIC, &evt, &tmr) == -1) {
1453: if (TASK_ROOT(t)->root_hooks.hook_exec.exception)
1454: TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL);
1455: else
1456: LOGERR;
1457: return (void*) -1;
1458: } else
1459: TASK_FLAG(t) = (u_long) tmr;
1460:
1461: if (!(sigt = schedSignal(TASK_ROOT(t), _sched_rtcWrapper, TASK_ARG(t), evt.sigev_signo,
1462: t, (size_t) tmr))) {
1463: if (TASK_ROOT(t)->root_hooks.hook_exec.exception)
1464: TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL);
1465: else
1466: LOGERR;
1467: timer_delete(tmr);
1468: return (void*) -1;
1469: } else
1470: TASK_RET(t) = (uintptr_t) sigt;
1471:
1472: memset(&its, 0, sizeof its);
1473: its.it_value.tv_sec = t->task_val.ts.tv_sec;
1474: its.it_value.tv_nsec = t->task_val.ts.tv_nsec;
1475:
1476: if (timer_settime(tmr, TIMER_RELTIME, &its, NULL) == -1) {
1477: if (TASK_ROOT(t)->root_hooks.hook_exec.exception)
1478: TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL);
1479: else
1480: LOGERR;
1481: schedCancel(sigt);
1482: timer_delete(tmr);
1483: return (void*) -1;
1484: }
1485:
1486: return NULL;
1487: }
1488: #endif /* HAVE_TIMER_CREATE */
FreeBSD-CVSweb <freebsd-cvsweb@FreeBSD.org>