1: /*************************************************************************
2: * (C) 2011 AITNET ltd - Sofia/Bulgaria - <misho@aitbg.com>
3: * by Michael Pounov <misho@openbsd-bg.org>
4: *
5: * $Author: misho $
6: * $Id: hooks.c,v 1.31 2016/09/29 13:44:23 misho Exp $
7: *
8: **************************************************************************
9: The ELWIX and AITNET software is distributed under the following
10: terms:
11:
12: All of the documentation and software included in the ELWIX and AITNET
13: Releases is copyrighted by ELWIX - Sofia/Bulgaria <info@elwix.org>
14:
15: Copyright 2004 - 2016
16: by Michael Pounov <misho@elwix.org>. All rights reserved.
17:
18: Redistribution and use in source and binary forms, with or without
19: modification, are permitted provided that the following conditions
20: are met:
21: 1. Redistributions of source code must retain the above copyright
22: notice, this list of conditions and the following disclaimer.
23: 2. Redistributions in binary form must reproduce the above copyright
24: notice, this list of conditions and the following disclaimer in the
25: documentation and/or other materials provided with the distribution.
26: 3. All advertising materials mentioning features or use of this software
27: must display the following acknowledgement:
28: This product includes software developed by Michael Pounov <misho@elwix.org>
29: ELWIX - Embedded LightWeight unIX and its contributors.
30: 4. Neither the name of AITNET nor the names of its contributors
31: may be used to endorse or promote products derived from this software
32: without specific prior written permission.
33:
34: THIS SOFTWARE IS PROVIDED BY AITNET AND CONTRIBUTORS ``AS IS'' AND
35: ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
36: IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
37: ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
38: FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
39: DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
40: OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
41: HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
42: LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
43: OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
44: SUCH DAMAGE.
45: */
46: #include "global.h"
47: #include "hooks.h"
48:
49:
50: static inline void
51: transit_task2ready(sched_task_t * __restrict t, sched_queue_t * __restrict q)
52: {
53: remove_task_from(t, q);
54:
55: t->task_type = taskREADY;
56: insert_task_to(t, &(TASK_ROOT(t))->root_ready);
57: }
58:
59: #ifdef HAVE_LIBPTHREAD
60: static void *
61: _sched_threadWrapper(sched_task_t *t)
62: {
63: void *ret = NULL;
64: sched_root_task_t *r;
65:
66: if (!t || !TASK_ROOT(t))
67: pthread_exit(ret);
68: else
69: r = (sched_root_task_t*) TASK_ROOT(t);
70:
71: pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, NULL);
72: /*
73: pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, NULL);
74: */
75:
76: /* notify parent, thread is ready for execution */
77: pthread_testcancel();
78:
79: ret = schedCall(t);
80: r->root_ret = ret;
81:
82: if (TASK_VAL(t)) {
83: transit_task2unuse(t, &r->root_thread);
84: TASK_VAL(t) = 0;
85: }
86:
87: pthread_exit(ret);
88: }
89: #endif
90:
91: #if defined(HAVE_LIBRT) && defined(HAVE_TIMER_CREATE) && \
92: defined(HAVE_TIMER_SETTIME) && defined(HAVE_TIMER_DELETE)
93: #if SUP_ENABLE == KQ_SUPPORT
94: static void *
95: _sched_rtcWrapper(sched_task_t *t)
96: {
97: sched_task_t *task;
98: void *ret;
99:
100: if (!t || !TASK_ROOT(t) || !TASK_DATA(t))
101: return NULL;
102: else {
103: task = (sched_task_t*) TASK_DATA(t);
104: timer_delete((timer_t) TASK_DATLEN(t));
105: }
106:
107: ret = schedCall(task);
108:
109: transit_task2unuse(task, &(TASK_ROOT(task))->root_rtc);
110: return ret;
111: }
112: #else
113: static void
114: _sched_rtcSigWrapper(int sig, siginfo_t *si, void *uc)
115: {
116: sched_task_t *task;
117:
118: if (si && si->si_value.sival_ptr) {
119: task = (sched_task_t*) si->si_value.sival_ptr;
120: timer_delete((timer_t) TASK_FLAG(task));
121:
122: TASK_RET(task) = (intptr_t) schedCall(task);
123:
124: transit_task2unuse(task, &(TASK_ROOT(task))->root_rtc);
125: }
126: }
127: #endif
128: #endif
129:
130: /*
131: * sched_hook_init() - Default INIT hook
132: *
133: * @root = root task
134: * @arg = unused
135: * return: <0 errors and 0 ok
136: */
137: void *
138: sched_hook_init(void *root, void *arg __unused)
139: {
140: sched_root_task_t *r = root;
141:
142: if (!r)
143: return (void*) -1;
144:
145: #if SUP_ENABLE == KQ_SUPPORT
146: r->root_kq = kqueue();
147: if (r->root_kq == -1) {
148: LOGERR;
149: return (void*) -1;
150: }
151: #elif SUP_ENABLE == EP_SUPPORT
152: r->root_kq = epoll_create(KQ_EVENTS);
153: if (r->root_kq == -1) {
154: LOGERR;
155: return (void*) -1;
156: }
157: #else
158: r->root_kq ^= r->root_kq;
159: FD_ZERO(&r->root_fds[0]);
160: FD_ZERO(&r->root_fds[1]);
161: #endif
162:
163: return NULL;
164: }
165:
166: /*
167: * sched_hook_fini() - Default FINI hook
168: *
169: * @root = root task
170: * @arg = unused
171: * return: <0 errors and 0 ok
172: */
173: void *
174: sched_hook_fini(void *root, void *arg __unused)
175: {
176: sched_root_task_t *r = root;
177:
178: if (!r)
179: return (void*) -1;
180:
181: #if SUP_ENABLE == KQ_SUPPORT || SUP_ENABLE == EP_SUPPORT
182: if (r->root_kq > 2) {
183: close(r->root_kq);
184: r->root_kq = 0;
185: }
186: #else
187: FD_ZERO(&r->root_fds[1]);
188: FD_ZERO(&r->root_fds[0]);
189: r->root_kq ^= r->root_kq;
190: #endif
191:
192: return NULL;
193: }
194:
195: /*
196: * sched_hook_cancel() - Default CANCEL hook
197: *
198: * @task = current task
199: * @arg = unused
200: * return: <0 errors and 0 ok
201: */
202: void *
203: sched_hook_cancel(void *task, void *arg __unused)
204: {
205: sched_task_t *t = task, *tmp, *tt;
206: sched_root_task_t *r = NULL;
207: int flg;
208: #if SUP_ENABLE == KQ_SUPPORT
209: struct kevent chg[1];
210: struct timespec timeout = { 0, 0 };
211: #elif SUP_ENABLE == EP_SUPPORT
212: struct epoll_event ee = { .events = 0, .data.fd = 0 };
213: #else
214: register int i;
215: #endif
216: #ifdef AIO_SUPPORT
217: struct aiocb *acb;
218: #ifdef EVFILT_LIO
219: register int i = 0;
220: struct aiocb **acbs;
221: #endif /* EVFILT_LIO */
222: #endif /* AIO_SUPPORT */
223:
224: if (!t || !TASK_ROOT(t))
225: return (void*) -1;
226: else
227: r = TASK_ROOT(t);
228:
229: switch (TASK_TYPE(t)) {
230: case taskREAD:
231: /* check for multi subscribers */
232: flg = 0;
233: TAILQ_FOREACH_SAFE(tt, &r->root_read, task_node, tmp)
234: if (TASK_FD(tt) != TASK_FD(t))
235: continue;
236: else
237: flg++;
238: #if SUP_ENABLE == KQ_SUPPORT
239: #ifdef __NetBSD__
240: EV_SET(&chg[0], TASK_FD(t), EVFILT_READ, flg < 2 ? EV_DELETE : 0,
241: 0, 0, (intptr_t) TASK_FD(t));
242: #else
243: EV_SET(&chg[0], TASK_FD(t), EVFILT_READ, flg < 2 ? EV_DELETE : 0,
244: 0, 0, (void*) TASK_FD(t));
245: #endif
246: #elif SUP_ENABLE == EP_SUPPORT
247: ee.data.fd = TASK_FD(t);
248: if (FD_ISSET(TASK_FD(t), &r->root_fds[1]))
249: ee.events = EPOLLOUT;
250:
251: if (flg < 2)
252: FD_CLR(TASK_FD(t), &r->root_fds[0]);
253: else
254: ee.events |= (EPOLLIN | EPOLLPRI | EPOLLRDHUP);
255: #else
256: if (flg < 2) {
257: FD_CLR(TASK_FD(t), &r->root_fds[0]);
258:
259: /* optimize select */
260: for (i = r->root_kq - 1; i > 2; i--)
261: if (FD_ISSET(i, &r->root_fds[0]) || FD_ISSET(i, &r->root_fds[1]))
262: break;
263: if (i > 2)
264: r->root_kq = i + 1;
265: }
266: #endif
267: break;
268: case taskWRITE:
269: /* check for multi subscribers */
270: flg = 0;
271: TAILQ_FOREACH_SAFE(tt, &r->root_write, task_node, tmp)
272: if (TASK_FD(tt) != TASK_FD(t))
273: continue;
274: else
275: flg++;
276: #if SUP_ENABLE == KQ_SUPPORT
277: #ifdef __NetBSD__
278: EV_SET(&chg[0], TASK_FD(t), EVFILT_WRITE, flg < 2 ? EV_DELETE : 0,
279: 0, 0, (intptr_t) TASK_FD(t));
280: #else
281: EV_SET(&chg[0], TASK_FD(t), EVFILT_WRITE, flg < 2 ? EV_DELETE : 0,
282: 0, 0, (void*) TASK_FD(t));
283: #endif
284: #elif SUP_ENABLE == EP_SUPPORT
285: ee.data.fd = TASK_FD(t);
286: if (FD_ISSET(TASK_FD(t), &r->root_fds[0]))
287: ee.events = EPOLLIN | EPOLLPRI | EPOLLRDHUP;
288:
289: if (flg < 2)
290: FD_CLR(TASK_FD(t), &r->root_fds[1]);
291: else
292: ee.events |= EPOLLOUT;
293: #else
294: if (flg < 2) {
295: FD_CLR(TASK_FD(t), &r->root_fds[1]);
296:
297: /* optimize select */
298: for (i = r->root_kq - 1; i > 2; i--)
299: if (FD_ISSET(i, &r->root_fds[0]) || FD_ISSET(i, &r->root_fds[1]))
300: break;
301: if (i > 2)
302: r->root_kq = i + 1;
303: }
304: #endif
305: break;
306: case taskALARM:
307: #if SUP_ENABLE == KQ_SUPPORT
308: /* check for multi subscribers */
309: flg = 0;
310: TAILQ_FOREACH_SAFE(tt, &r->root_alarm, task_node, tmp)
311: if (TASK_DATA(tt) != TASK_DATA(t))
312: continue;
313: else
314: flg++;
315: #ifdef __NetBSD__
316: EV_SET(&chg[0], (uintptr_t) TASK_DATA(t), EVFILT_TIMER, flg < 2 ? EV_DELETE : 0,
317: 0, 0, (intptr_t) TASK_DATA(t));
318: #else
319: EV_SET(&chg[0], (uintptr_t) TASK_DATA(t), EVFILT_TIMER, flg < 2 ? EV_DELETE : 0,
320: 0, 0, (void*) TASK_DATA(t));
321: #endif
322: #endif
323: break;
324: case taskNODE:
325: #if SUP_ENABLE == KQ_SUPPORT
326: /* check for multi subscribers */
327: flg = 0;
328: TAILQ_FOREACH_SAFE(tt, &r->root_node, task_node, tmp)
329: if (TASK_FD(tt) != TASK_FD(t))
330: continue;
331: else
332: flg++;
333: #ifdef __NetBSD__
334: EV_SET(&chg[0], TASK_FD(t), EVFILT_VNODE, flg < 2 ? EV_DELETE : 0,
335: 0, 0, (intptr_t) TASK_FD(t));
336: #else
337: EV_SET(&chg[0], TASK_FD(t), EVFILT_VNODE, flg < 2 ? EV_DELETE : 0,
338: 0, 0, (void*) TASK_FD(t));
339: #endif
340: #endif
341: break;
342: case taskPROC:
343: #if SUP_ENABLE == KQ_SUPPORT
344: /* check for multi subscribers */
345: flg = 0;
346: TAILQ_FOREACH_SAFE(tt, &r->root_proc, task_node, tmp)
347: if (TASK_VAL(tt) != TASK_VAL(t))
348: continue;
349: else
350: flg++;
351: #ifdef __NetBSD__
352: EV_SET(&chg[0], TASK_VAL(t), EVFILT_PROC, flg < 2 ? EV_DELETE : 0,
353: 0, 0, (intptr_t) TASK_VAL(t));
354: #else
355: EV_SET(&chg[0], TASK_VAL(t), EVFILT_PROC, flg < 2 ? EV_DELETE : 0,
356: 0, 0, (void*) TASK_VAL(t));
357: #endif
358: #endif
359: break;
360: case taskSIGNAL:
361: #if SUP_ENABLE == KQ_SUPPORT
362: /* check for multi subscribers */
363: flg = 0;
364: TAILQ_FOREACH_SAFE(tt, &r->root_signal, task_node, tmp)
365: if (TASK_VAL(tt) != TASK_VAL(t))
366: continue;
367: else
368: flg++;
369: #ifdef __NetBSD__
370: EV_SET(&chg[0], TASK_VAL(t), EVFILT_SIGNAL, flg < 2 ? EV_DELETE : 0,
371: 0, 0, (intptr_t) TASK_VAL(t));
372: #else
373: EV_SET(&chg[0], TASK_VAL(t), EVFILT_SIGNAL, flg < 2 ? EV_DELETE : 0,
374: 0, 0, (void*) TASK_VAL(t));
375: #endif
376: /* restore signal */
377: if (flg < 2)
378: signal(TASK_VAL(t), SIG_DFL);
379: #endif
380: break;
381: #ifdef AIO_SUPPORT
382: case taskAIO:
383: #if SUP_ENABLE == KQ_SUPPORT
384: /* check for multi subscribers */
385: flg = 0;
386: TAILQ_FOREACH_SAFE(tt, &r->root_aio, task_node, tmp)
387: if (TASK_VAL(tt) != TASK_VAL(t))
388: continue;
389: else
390: flg++;
391: #ifdef __NetBSD__
392: EV_SET(&chg[0], TASK_VAL(t), EVFILT_AIO, flg < 2 ? EV_DELETE : 0,
393: 0, 0, (intptr_t) TASK_VAL(t));
394: #else
395: EV_SET(&chg[0], TASK_VAL(t), EVFILT_AIO, flg < 2 ? EV_DELETE : 0,
396: 0, 0, (void*) TASK_VAL(t));
397: #endif
398: acb = (struct aiocb*) TASK_VAL(t);
399: if (acb) {
400: if (aio_cancel(acb->aio_fildes, acb) == AIO_CANCELED)
401: aio_return(acb);
402: free(acb);
403: TASK_VAL(t) = 0;
404: }
405: #endif
406: break;
407: #ifdef EVFILT_LIO
408: case taskLIO:
409: #if SUP_ENABLE == KQ_SUPPORT
410: /* check for multi subscribers */
411: flg = 0;
412: TAILQ_FOREACH_SAFE(tt, &r->root_lio, task_node, tmp)
413: if (TASK_VAL(tt) != TASK_VAL(t))
414: continue;
415: else
416: flg++;
417: #ifdef __NetBSD__
418: EV_SET(&chg[0], TASK_VAL(t), EVFILT_LIO, flg < 2 ? EV_DELETE : 0,
419: 0, 0, (intptr_t) TASK_VAL(t));
420: #else
421: EV_SET(&chg[0], TASK_VAL(t), EVFILT_LIO, flg < 2 ? EV_DELETE : 0,
422: 0, 0, (void*) TASK_VAL(t));
423: #endif
424: acbs = (struct aiocb**) TASK_VAL(t);
425: if (acbs) {
426: for (i = 0; i < TASK_DATLEN(t); i++) {
427: if (aio_cancel(acbs[i]->aio_fildes, acbs[i]) == AIO_CANCELED)
428: aio_return(acbs[i]);
429: free(acbs[i]);
430: }
431: free(acbs);
432: TASK_VAL(t) = 0;
433: }
434: #endif
435: break;
436: #endif /* EVFILT_LIO */
437: #endif /* AIO_SUPPORT */
438: #ifdef EVFILT_USER
439: case taskUSER:
440: #if SUP_ENABLE == KQ_SUPPORT
441: /* check for multi subscribers */
442: flg = 0;
443: TAILQ_FOREACH_SAFE(tt, &r->root_user, task_node, tmp)
444: if (TASK_VAL(tt) != TASK_VAL(t))
445: continue;
446: else
447: flg++;
448: #ifdef __NetBSD__
449: EV_SET(&chg[0], TASK_VAL(t), EVFILT_USER, flg < 2 ? EV_DELETE : 0,
450: 0, 0, (intptr_t) TASK_VAL(t));
451: #else
452: EV_SET(&chg[0], TASK_VAL(t), EVFILT_USER, flg < 2 ? EV_DELETE : 0,
453: 0, 0, (void*) TASK_VAL(t));
454: #endif
455: #endif
456: break;
457: #endif /* EVFILT_USER */
458: case taskTHREAD:
459: #ifdef HAVE_LIBPTHREAD
460: if (TASK_VAL(t)) {
461: pthread_cancel((pthread_t) TASK_VAL(t));
462: pthread_join((pthread_t) TASK_VAL(t), NULL);
463: if (TASK_VAL(t)) {
464: transit_task2unuse(t, &(TASK_ROOT(t))->root_thread);
465: TASK_VAL(t) = 0;
466: }
467: }
468: #endif
469: return NULL;
470: #if defined(HAVE_LIBRT) && defined(HAVE_TIMER_CREATE) && \
471: defined(HAVE_TIMER_SETTIME) && defined(HAVE_TIMER_DELETE)
472: case taskRTC:
473: timer_delete((timer_t) TASK_FLAG(t));
474: #if SUP_ENABLE == KQ_SUPPORT
475: schedCancel((sched_task_t*) TASK_RET(t));
476: #else
477: /* check for multi subscribers */
478: flg = 0;
479: TAILQ_FOREACH_SAFE(tt, &r->root_rtc, task_node, tmp)
480: if (TASK_DATA(tt) != TASK_DATA(t))
481: continue;
482: else
483: flg++;
484:
485: /* restore signal */
486: if (flg < 2)
487: signal((intptr_t) TASK_DATA(t) + SIGRTMIN, SIG_DFL);
488: #endif
489: return NULL;
490: #endif /* HAVE_TIMER_CREATE */
491: default:
492: return NULL;
493: }
494:
495: #if SUP_ENABLE == KQ_SUPPORT
496: kevent(r->root_kq, chg, 1, NULL, 0, &timeout);
497: #elif SUP_ENABLE == EP_SUPPORT
498: epoll_ctl(r->root_kq, ee.events ? EPOLL_CTL_MOD : EPOLL_CTL_DEL, ee.data.fd, &ee);
499: #endif
500: return NULL;
501: }
502:
503: #ifdef HAVE_LIBPTHREAD
504: /*
505: * sched_hook_thread() - Default THREAD hook
506: *
507: * @task = current task
508: * @arg = pthread attributes
509: * return: <0 errors and 0 ok
510: */
511: void *
512: sched_hook_thread(void *task, void *arg)
513: {
514: sched_task_t *t = task;
515: pthread_t tid;
516: sigset_t s, o;
517:
518: if (!t || !TASK_ROOT(t))
519: return (void*) -1;
520:
521: sigfillset(&s);
522: pthread_sigmask(SIG_BLOCK, &s, &o);
523: errno = pthread_create(&tid, (pthread_attr_t*) arg,
524: (void *(*)(void*)) _sched_threadWrapper, t);
525: pthread_sigmask(SIG_SETMASK, &o, NULL);
526:
527: if (errno) {
528: LOGERR;
529: return (void*) -1;
530: } else
531: TASK_VAL(t) = (u_long) tid;
532:
533: if (!TASK_ISLOCKED(t))
534: TASK_LOCK(t);
535:
536: return NULL;
537: }
538: #endif
539:
540: /*
541: * sched_hook_read() - Default READ hook
542: *
543: * @task = current task
544: * @arg = unused
545: * return: <0 errors and 0 ok
546: */
547: void *
548: sched_hook_read(void *task, void *arg __unused)
549: {
550: sched_task_t *t = task;
551: sched_root_task_t *r = NULL;
552: #if SUP_ENABLE == KQ_SUPPORT
553: struct kevent chg[1];
554: struct timespec timeout = { 0, 0 };
555: #elif SUP_ENABLE == EP_SUPPORT
556: struct epoll_event ee = { .events = EPOLLIN | EPOLLPRI | EPOLLRDHUP, .data.fd = 0 };
557: int flg = 0;
558: #endif
559:
560: if (!t || !TASK_ROOT(t))
561: return (void*) -1;
562: else
563: r = TASK_ROOT(t);
564:
565: #if SUP_ENABLE == KQ_SUPPORT
566: #ifdef __NetBSD__
567: EV_SET(&chg[0], TASK_FD(t), EVFILT_READ, EV_ADD | EV_CLEAR, 0, 0, (intptr_t) TASK_FD(t));
568: #else
569: EV_SET(&chg[0], TASK_FD(t), EVFILT_READ, EV_ADD | EV_CLEAR, 0, 0, (void*) TASK_FD(t));
570: #endif
571: if (kevent(r->root_kq, chg, 1, NULL, 0, &timeout) == -1) {
572: if (r->root_hooks.hook_exec.exception)
573: r->root_hooks.hook_exec.exception(r, NULL);
574: else
575: LOGERR;
576: return (void*) -1;
577: }
578: #elif SUP_ENABLE == EP_SUPPORT
579: if (FD_ISSET(TASK_FD(t), &r->root_fds[0]))
580: flg |= 1;
581: if (FD_ISSET(TASK_FD(t), &r->root_fds[1])) {
582: flg |= 2;
583: ee.events |= EPOLLOUT;
584: }
585:
586: ee.data.fd = TASK_FD(t);
587: if (epoll_ctl(r->root_kq, flg ? EPOLL_CTL_MOD : EPOLL_CTL_ADD, TASK_FD(t), &ee) == -1) {
588: if (r->root_hooks.hook_exec.exception)
589: r->root_hooks.hook_exec.exception(r, NULL);
590: else
591: LOGERR;
592: return (void*) -1;
593: } else
594: FD_SET(TASK_FD(t), &r->root_fds[0]);
595: #else
596: FD_SET(TASK_FD(t), &r->root_fds[0]);
597: if (TASK_FD(t) >= r->root_kq)
598: r->root_kq = TASK_FD(t) + 1;
599: #endif
600:
601: return NULL;
602: }
603:
604: /*
605: * sched_hook_write() - Default WRITE hook
606: *
607: * @task = current task
608: * @arg = unused
609: * return: <0 errors and 0 ok
610: */
611: void *
612: sched_hook_write(void *task, void *arg __unused)
613: {
614: sched_task_t *t = task;
615: sched_root_task_t *r = NULL;
616: #if SUP_ENABLE == KQ_SUPPORT
617: struct kevent chg[1];
618: struct timespec timeout = { 0, 0 };
619: #elif SUP_ENABLE == EP_SUPPORT
620: struct epoll_event ee = { .events = EPOLLOUT, .data.fd = 0 };
621: int flg = 0;
622: #endif
623:
624: if (!t || !TASK_ROOT(t))
625: return (void*) -1;
626: else
627: r = TASK_ROOT(t);
628:
629: #if SUP_ENABLE == KQ_SUPPORT
630: #ifdef __NetBSD__
631: EV_SET(&chg[0], TASK_FD(t), EVFILT_WRITE, EV_ADD | EV_CLEAR, 0, 0, (intptr_t) TASK_FD(t));
632: #else
633: EV_SET(&chg[0], TASK_FD(t), EVFILT_WRITE, EV_ADD | EV_CLEAR, 0, 0, (void*) TASK_FD(t));
634: #endif
635: if (kevent(r->root_kq, chg, 1, NULL, 0, &timeout) == -1) {
636: if (r->root_hooks.hook_exec.exception)
637: r->root_hooks.hook_exec.exception(r, NULL);
638: else
639: LOGERR;
640: return (void*) -1;
641: }
642: #elif SUP_ENABLE == EP_SUPPORT
643: if (FD_ISSET(TASK_FD(t), &r->root_fds[0])) {
644: flg |= 1;
645: ee.events |= EPOLLIN | EPOLLPRI | EPOLLRDHUP;
646: }
647: if (FD_ISSET(TASK_FD(t), &r->root_fds[1]))
648: flg |= 2;
649:
650: ee.data.fd = TASK_FD(t);
651: if (epoll_ctl(r->root_kq, flg ? EPOLL_CTL_MOD : EPOLL_CTL_ADD, TASK_FD(t), &ee) == -1) {
652: if (r->root_hooks.hook_exec.exception)
653: r->root_hooks.hook_exec.exception(r, NULL);
654: else
655: LOGERR;
656: return (void*) -1;
657: } else
658: FD_SET(TASK_FD(t), &r->root_fds[1]);
659: #else
660: FD_SET(TASK_FD(t), &r->root_fds[1]);
661: if (TASK_FD(t) >= r->root_kq)
662: r->root_kq = TASK_FD(t) + 1;
663: #endif
664:
665: return NULL;
666: }
667:
668: /*
669: * sched_hook_alarm() - Default ALARM hook
670: *
671: * @task = current task
672: * @arg = unused
673: * return: <0 errors and 0 ok
674: */
675: void *
676: sched_hook_alarm(void *task, void *arg __unused)
677: {
678: #if SUP_ENABLE == KQ_SUPPORT
679: sched_task_t *t = task;
680: struct kevent chg[1];
681: struct timespec timeout = { 0, 0 };
682:
683: if (!t || !TASK_ROOT(t))
684: return (void*) -1;
685:
686: #ifdef __NetBSD__
687: EV_SET(&chg[0], (uintptr_t) TASK_DATA(t), EVFILT_TIMER, EV_ADD | EV_CLEAR, 0,
688: t->task_val.ts.tv_sec * 1000 + t->task_val.ts.tv_nsec / 1000000,
689: (intptr_t) TASK_DATA(t));
690: #else
691: EV_SET(&chg[0], (uintptr_t) TASK_DATA(t), EVFILT_TIMER, EV_ADD | EV_CLEAR, 0,
692: t->task_val.ts.tv_sec * 1000 + t->task_val.ts.tv_nsec / 1000000,
693: (void*) TASK_DATA(t));
694: #endif
695: if (kevent(TASK_ROOT(t)->root_kq, chg, 1, NULL, 0, &timeout) == -1) {
696: if (TASK_ROOT(t)->root_hooks.hook_exec.exception)
697: TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL);
698: else
699: LOGERR;
700: return (void*) -1;
701: }
702:
703: #endif
704: return NULL;
705: }
706:
707: /*
708: * sched_hook_node() - Default NODE hook
709: *
710: * @task = current task
711: * @arg = unused
712: * return: <0 errors and 0 ok
713: */
714: void *
715: sched_hook_node(void *task, void *arg __unused)
716: {
717: #if SUP_ENABLE == KQ_SUPPORT
718: sched_task_t *t = task;
719: struct kevent chg[1];
720: struct timespec timeout = { 0, 0 };
721:
722: if (!t || !TASK_ROOT(t))
723: return (void*) -1;
724:
725: #ifdef __NetBSD__
726: EV_SET(&chg[0], TASK_FD(t), EVFILT_VNODE, EV_ADD | EV_CLEAR,
727: NOTE_DELETE | NOTE_WRITE | NOTE_EXTEND | NOTE_ATTRIB |
728: NOTE_LINK | NOTE_RENAME | NOTE_REVOKE, 0, (intptr_t) TASK_FD(t));
729: #else
730: EV_SET(&chg[0], TASK_FD(t), EVFILT_VNODE, EV_ADD | EV_CLEAR,
731: NOTE_DELETE | NOTE_WRITE | NOTE_EXTEND | NOTE_ATTRIB |
732: NOTE_LINK | NOTE_RENAME | NOTE_REVOKE, 0, (void*) TASK_FD(t));
733: #endif
734: if (kevent(TASK_ROOT(t)->root_kq, chg, 1, NULL, 0, &timeout) == -1) {
735: if (TASK_ROOT(t)->root_hooks.hook_exec.exception)
736: TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL);
737: else
738: LOGERR;
739: return (void*) -1;
740: }
741:
742: #endif
743: return NULL;
744: }
745:
746: /*
747: * sched_hook_proc() - Default PROC hook
748: *
749: * @task = current task
750: * @arg = unused
751: * return: <0 errors and 0 ok
752: */
753: void *
754: sched_hook_proc(void *task, void *arg __unused)
755: {
756: #if SUP_ENABLE == KQ_SUPPORT
757: sched_task_t *t = task;
758: struct kevent chg[1];
759: struct timespec timeout = { 0, 0 };
760:
761: if (!t || !TASK_ROOT(t))
762: return (void*) -1;
763:
764: #ifdef __NetBSD__
765: EV_SET(&chg[0], TASK_VAL(t), EVFILT_PROC, EV_ADD | EV_CLEAR,
766: NOTE_EXIT | NOTE_FORK | NOTE_EXEC | NOTE_TRACK, 0, (intptr_t) TASK_VAL(t));
767: #else
768: EV_SET(&chg[0], TASK_VAL(t), EVFILT_PROC, EV_ADD | EV_CLEAR,
769: NOTE_EXIT | NOTE_FORK | NOTE_EXEC | NOTE_TRACK, 0, (void*) TASK_VAL(t));
770: #endif
771: if (kevent(TASK_ROOT(t)->root_kq, chg, 1, NULL, 0, &timeout) == -1) {
772: if (TASK_ROOT(t)->root_hooks.hook_exec.exception)
773: TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL);
774: else
775: LOGERR;
776: return (void*) -1;
777: }
778:
779: #endif
780: return NULL;
781: }
782:
783: /*
784: * sched_hook_signal() - Default SIGNAL hook
785: *
786: * @task = current task
787: * @arg = unused
788: * return: <0 errors and 0 ok
789: */
790: void *
791: sched_hook_signal(void *task, void *arg __unused)
792: {
793: #if SUP_ENABLE == KQ_SUPPORT
794: sched_task_t *t = task;
795: struct kevent chg[1];
796: struct timespec timeout = { 0, 0 };
797:
798: if (!t || !TASK_ROOT(t))
799: return (void*) -1;
800:
801: /* ignore signal */
802: signal(TASK_VAL(t), SIG_IGN);
803:
804: #ifdef __NetBSD__
805: EV_SET(&chg[0], TASK_VAL(t), EVFILT_SIGNAL, EV_ADD | EV_CLEAR, 0, 0, (intptr_t) TASK_VAL(t));
806: #else
807: EV_SET(&chg[0], TASK_VAL(t), EVFILT_SIGNAL, EV_ADD | EV_CLEAR, 0, 0, (void*) TASK_VAL(t));
808: #endif
809: if (kevent(TASK_ROOT(t)->root_kq, chg, 1, NULL, 0, &timeout) == -1) {
810: if (TASK_ROOT(t)->root_hooks.hook_exec.exception)
811: TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL);
812: else
813: LOGERR;
814: return (void*) -1;
815: }
816: #endif
817: return NULL;
818: }
819:
820: /*
821: * sched_hook_user() - Default USER hook
822: *
823: * @task = current task
824: * @arg = unused
825: * return: <0 errors and 0 ok
826: */
827: #ifdef EVFILT_USER
828: void *
829: sched_hook_user(void *task, void *arg __unused)
830: {
831: #if SUP_ENABLE == KQ_SUPPORT
832: sched_task_t *t = task;
833: struct kevent chg[1];
834: struct timespec timeout = { 0, 0 };
835:
836: if (!t || !TASK_ROOT(t))
837: return (void*) -1;
838:
839: #ifdef __NetBSD__
840: EV_SET(&chg[0], TASK_VAL(t), EVFILT_USER, EV_ADD | EV_CLEAR, TASK_DATLEN(t),
841: 0, (intptr_t) TASK_VAL(t));
842: #else
843: EV_SET(&chg[0], TASK_VAL(t), EVFILT_USER, EV_ADD | EV_CLEAR, TASK_DATLEN(t),
844: 0, (void*) TASK_VAL(t));
845: #endif
846: if (kevent(TASK_ROOT(t)->root_kq, chg, 1, NULL, 0, &timeout) == -1) {
847: if (TASK_ROOT(t)->root_hooks.hook_exec.exception)
848: TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL);
849: else
850: LOGERR;
851: return (void*) -1;
852: }
853:
854: #endif
855: return NULL;
856: }
857: #endif
858:
859: #if SUP_ENABLE == KQ_SUPPORT
860: static inline void
861: fetch_hook_kevent_proceed(int en, struct kevent *res, sched_root_task_t *r)
862: {
863: struct kevent evt[1];
864: register int i;
865: sched_task_t *task, *tmp;
866: struct timespec now = { 0, 0 };
867: #ifdef AIO_SUPPORT
868: int len, fd;
869: struct aiocb *acb;
870: #ifdef EVFILT_LIO
871: int l;
872: off_t off;
873: struct aiocb **acbs;
874: struct iovec *iv;
875: #endif /* EVFILT_LIO */
876: #endif /* AIO_SUPPORT */
877:
878: for (i = 0; i < en; i++) {
879: memcpy(evt, &res[i], sizeof evt);
880: evt->flags = EV_DELETE;
881: /* Put read/write task to ready queue */
882: switch (res[i].filter) {
883: case EVFILT_READ:
884: TAILQ_FOREACH_SAFE(task, &r->root_read, task_node, tmp) {
885: if (TASK_FD(task) != ((intptr_t) res[i].udata))
886: continue;
887: else {
888: TASK_RET(task) = res[i].data;
889: TASK_FLAG(task) = (u_long) res[i].fflags;
890: }
891: /* remove read handle */
892: remove_task_from(task, &r->root_read);
893:
894: if (r->root_hooks.hook_exec.exception && res[i].flags & EV_EOF) {
895: if (r->root_hooks.hook_exec.exception(r, (void*) EV_EOF)) {
896: task->task_type = taskUNUSE;
897: insert_task_to(task, &r->root_unuse);
898: } else {
899: task->task_type = taskREADY;
900: insert_task_to(task, &r->root_ready);
901: }
902: } else {
903: task->task_type = taskREADY;
904: insert_task_to(task, &r->root_ready);
905: }
906: }
907: break;
908: case EVFILT_WRITE:
909: TAILQ_FOREACH_SAFE(task, &r->root_write, task_node, tmp) {
910: if (TASK_FD(task) != ((intptr_t) res[i].udata))
911: continue;
912: else {
913: TASK_RET(task) = res[i].data;
914: TASK_FLAG(task) = (u_long) res[i].fflags;
915: }
916: /* remove write handle */
917: remove_task_from(task, &r->root_write);
918:
919: if (r->root_hooks.hook_exec.exception && res[i].flags & EV_EOF) {
920: if (r->root_hooks.hook_exec.exception(r, (void*) EV_EOF)) {
921: task->task_type = taskUNUSE;
922: insert_task_to(task, &r->root_unuse);
923: } else {
924: task->task_type = taskREADY;
925: insert_task_to(task, &r->root_ready);
926: }
927: } else {
928: task->task_type = taskREADY;
929: insert_task_to(task, &r->root_ready);
930: }
931: }
932: break;
933: case EVFILT_TIMER:
934: TAILQ_FOREACH_SAFE(task, &r->root_alarm, task_node, tmp) {
935: if ((uintptr_t) TASK_DATA(task) != ((uintptr_t) res[i].udata))
936: continue;
937: else {
938: TASK_RET(task) = res[i].data;
939: TASK_FLAG(task) = (u_long) res[i].fflags;
940: }
941: /* remove alarm handle */
942: transit_task2ready(task, &r->root_alarm);
943: }
944: break;
945: case EVFILT_VNODE:
946: TAILQ_FOREACH_SAFE(task, &r->root_node, task_node, tmp) {
947: if (TASK_FD(task) != ((intptr_t) res[i].udata))
948: continue;
949: else {
950: TASK_RET(task) = res[i].data;
951: TASK_FLAG(task) = (u_long) res[i].fflags;
952: }
953: /* remove node handle */
954: transit_task2ready(task, &r->root_node);
955: }
956: break;
957: case EVFILT_PROC:
958: TAILQ_FOREACH_SAFE(task, &r->root_proc, task_node, tmp) {
959: if (TASK_VAL(task) != ((uintptr_t) res[i].udata))
960: continue;
961: else {
962: TASK_RET(task) = res[i].data;
963: TASK_FLAG(task) = (u_long) res[i].fflags;
964: }
965: /* remove proc handle */
966: transit_task2ready(task, &r->root_proc);
967: }
968: break;
969: case EVFILT_SIGNAL:
970: TAILQ_FOREACH_SAFE(task, &r->root_signal, task_node, tmp) {
971: if (TASK_VAL(task) != ((uintptr_t) res[i].udata))
972: continue;
973: else {
974: TASK_RET(task) = res[i].data;
975: TASK_FLAG(task) = (u_long) res[i].fflags;
976: }
977: /* remove signal handle */
978: transit_task2ready(task, &r->root_signal);
979: }
980: break;
981: #ifdef AIO_SUPPORT
982: case EVFILT_AIO:
983: TAILQ_FOREACH_SAFE(task, &r->root_aio, task_node, tmp) {
984: acb = (struct aiocb*) TASK_VAL(task);
985: if (acb != ((struct aiocb*) res[i].udata))
986: continue;
987: else {
988: TASK_RET(task) = res[i].data;
989: TASK_FLAG(task) = (u_long) res[i].fflags;
990: }
991: /* remove user handle */
992: transit_task2ready(task, &r->root_aio);
993:
994: fd = acb->aio_fildes;
995: if ((len = aio_return(acb)) != -1) {
996: if (lseek(fd, acb->aio_offset + len, SEEK_CUR) == -1)
997: LOGERR;
998: } else
999: LOGERR;
1000: free(acb);
1001: TASK_DATLEN(task) = (u_long) len;
1002: TASK_FD(task) = fd;
1003: }
1004: break;
1005: #ifdef EVFILT_LIO
1006: case EVFILT_LIO:
1007: TAILQ_FOREACH_SAFE(task, &r->root_lio, task_node, tmp) {
1008: acbs = (struct aiocb**) TASK_VAL(task);
1009: if (acbs != ((struct aiocb**) res[i].udata))
1010: continue;
1011: else {
1012: TASK_RET(task) = res[i].data;
1013: TASK_FLAG(task) = (u_long) res[i].fflags;
1014: }
1015: /* remove user handle */
1016: transit_task2ready(task, &r->root_lio);
1017:
1018: iv = (struct iovec*) TASK_DATA(task);
1019: fd = acbs[0]->aio_fildes;
1020: off = acbs[0]->aio_offset;
1021: for (len = 0; i < TASK_DATLEN(task); len += l, i++) {
1022: if ((iv[i].iov_len = aio_return(acbs[i])) == -1)
1023: l = 0;
1024: else
1025: l = iv[i].iov_len;
1026: free(acbs[i]);
1027: }
1028: free(acbs);
1029: TASK_DATLEN(task) = (u_long) len;
1030: TASK_FD(task) = fd;
1031:
1032: if (lseek(fd, off + len, SEEK_CUR) == -1)
1033: LOGERR;
1034: }
1035: break;
1036: #endif /* EVFILT_LIO */
1037: #endif /* AIO_SUPPORT */
1038: #ifdef EVFILT_USER
1039: case EVFILT_USER:
1040: TAILQ_FOREACH_SAFE(task, &r->root_user, task_node, tmp) {
1041: if (TASK_VAL(task) != ((uintptr_t) res[i].udata))
1042: continue;
1043: else {
1044: TASK_RET(task) = res[i].data;
1045: TASK_FLAG(task) = (u_long) res[i].fflags;
1046: }
1047: /* remove user handle */
1048: transit_task2ready(task, &r->root_user);
1049: }
1050: break;
1051: #endif /* EVFILT_USER */
1052: }
1053:
1054: if (kevent(r->root_kq, evt, 1, NULL, 0, &now) == -1) {
1055: if (r->root_hooks.hook_exec.exception)
1056: r->root_hooks.hook_exec.exception(r, NULL);
1057: else
1058: LOGERR;
1059: }
1060: }
1061: }
1062: #endif
1063:
1064: #if SUP_ENABLE == EP_SUPPORT
1065: static inline void
1066: fetch_hook_epoll_proceed(int en, struct epoll_event *res, sched_root_task_t *r)
1067: {
1068: register int i, flg;
1069: int ops = EPOLL_CTL_DEL;
1070: sched_task_t *task, *tmp;
1071: struct epoll_event evt[1];
1072:
1073: for (i = 0; i < en; i++) {
1074: memcpy(evt, &res[i], sizeof evt);
1075:
1076: if (evt->events & (EPOLLIN | EPOLLPRI | EPOLLET)) {
1077: flg = 0;
1078: TAILQ_FOREACH_SAFE(task, &r->root_read, task_node, tmp) {
1079: if (TASK_FD(task) != evt->data.fd)
1080: continue;
1081: else {
1082: flg++;
1083: FD_CLR(TASK_FD(task), &r->root_fds[0]);
1084: TASK_FLAG(task) = ioctl(TASK_FD(task), FIONREAD, &TASK_RET(task));
1085:
1086: evt->events &= ~(EPOLLIN | EPOLLPRI | EPOLLET | EPOLLRDHUP);
1087: if (FD_ISSET(TASK_FD(task), &r->root_fds[1])) {
1088: ops = EPOLL_CTL_MOD;
1089: evt->events |= EPOLLOUT;
1090: }
1091: }
1092: /* remove read handle */
1093: remove_task_from(task, &r->root_read);
1094:
1095: if (r->root_hooks.hook_exec.exception && evt->events & (EPOLLRDHUP | EPOLLERR | EPOLLHUP)) {
1096: if (r->root_hooks.hook_exec.exception(r, (void*) (intptr_t)
1097: (evt->events & EPOLLERR ? EV_ERROR | EV_EOF : EV_EOF))) {
1098: task->task_type = taskUNUSE;
1099: insert_task_to(task, &r->root_unuse);
1100: } else {
1101: task->task_type = taskREADY;
1102: insert_task_to(task, &r->root_ready);
1103: }
1104: } else {
1105: task->task_type = taskREADY;
1106: insert_task_to(task, &r->root_ready);
1107: }
1108: }
1109: if (flg > 1)
1110: ops = EPOLL_CTL_MOD;
1111: }
1112:
1113: if (evt->events & EPOLLOUT) {
1114: flg = 0;
1115: TAILQ_FOREACH_SAFE(task, &r->root_write, task_node, tmp) {
1116: if (TASK_FD(task) != evt->data.fd)
1117: continue;
1118: else {
1119: flg++;
1120: FD_CLR(TASK_FD(task), &r->root_fds[1]);
1121: TASK_FLAG(task) = ioctl(TASK_FD(task),
1122: FIONWRITE, &TASK_RET(task));
1123:
1124: evt->events &= ~EPOLLOUT;
1125: if (FD_ISSET(TASK_FD(task), &r->root_fds[0])) {
1126: ops = EPOLL_CTL_MOD;
1127: evt->events |= EPOLLIN | EPOLLPRI | EPOLLRDHUP;
1128: }
1129: }
1130: /* remove write handle */
1131: remove_task_from(task, &r->root_write);
1132:
1133: if (r->root_hooks.hook_exec.exception && evt->events & (EPOLLERR | EPOLLHUP)) {
1134: if (r->root_hooks.hook_exec.exception(r, (void*) (intptr_t)
1135: (evt->events & EPOLLERR ? EV_ERROR | EV_EOF : EV_EOF))) {
1136: task->task_type = taskUNUSE;
1137: insert_task_to(task, &r->root_unuse);
1138: } else {
1139: task->task_type = taskREADY;
1140: insert_task_to(task, &r->root_ready);
1141: }
1142: } else {
1143: task->task_type = taskREADY;
1144: insert_task_to(task, &r->root_ready);
1145: }
1146: }
1147: if (flg > 1)
1148: ops = EPOLL_CTL_MOD;
1149: }
1150:
1151: if (epoll_ctl(r->root_kq, ops, evt->data.fd, evt) == -1) {
1152: if (r->root_hooks.hook_exec.exception) {
1153: r->root_hooks.hook_exec.exception(r, NULL);
1154: } else
1155: LOGERR;
1156: }
1157: }
1158: }
1159: #endif
1160:
1161: #if SUP_ENABLE == NO_SUPPORT
1162: static inline void
1163: fetch_hook_select_proceed(int en, fd_set rfd, fd_set wfd, fd_set xfd, sched_root_task_t *r)
1164: {
1165: register int i, flg;
1166: sched_task_t *task, *tmp;
1167:
1168: /* skip select check if return value from select is zero */
1169: if (!en)
1170: return;
1171:
1172: for (i = 0; i < r->root_kq; i++) {
1173: if (FD_ISSET(i, &rfd) || FD_ISSET(i, &xfd)) {
1174: flg = 0;
1175: TAILQ_FOREACH_SAFE(task, &r->root_read, task_node, tmp) {
1176: if (TASK_FD(task) != i)
1177: continue;
1178: else {
1179: flg++;
1180: TASK_FLAG(task) = ioctl(TASK_FD(task),
1181: FIONREAD, &TASK_RET(task));
1182: }
1183: /* remove read handle */
1184: remove_task_from(task, &r->root_read);
1185:
1186: if (r->root_hooks.hook_exec.exception) {
1187: if (r->root_hooks.hook_exec.exception(r, NULL)) {
1188: task->task_type = taskUNUSE;
1189: insert_task_to(task, &r->root_unuse);
1190: } else {
1191: task->task_type = taskREADY;
1192: insert_task_to(task, &r->root_ready);
1193: }
1194: } else {
1195: task->task_type = taskREADY;
1196: insert_task_to(task, &r->root_ready);
1197: }
1198: }
1199: /* remove resouce */
1200: if (flg)
1201: FD_CLR(i, &r->root_fds[0]);
1202: }
1203:
1204: if (FD_ISSET(i, &wfd)) {
1205: flg = 0;
1206: TAILQ_FOREACH_SAFE(task, &r->root_write, task_node, tmp) {
1207: if (TASK_FD(task) != i)
1208: continue;
1209: else {
1210: flg++;
1211: TASK_FLAG(task) = ioctl(TASK_FD(task),
1212: FIONWRITE, &TASK_RET(task));
1213: }
1214: /* remove write handle */
1215: remove_task_from(task, &r->root_write);
1216:
1217: if (r->root_hooks.hook_exec.exception) {
1218: if (r->root_hooks.hook_exec.exception(r, NULL)) {
1219: task->task_type = taskUNUSE;
1220: insert_task_to(task, &r->root_unuse);
1221: } else {
1222: task->task_type = taskREADY;
1223: insert_task_to(task, &r->root_ready);
1224: }
1225: } else {
1226: task->task_type = taskREADY;
1227: insert_task_to(task, &r->root_ready);
1228: }
1229: }
1230: /* remove resouce */
1231: if (flg)
1232: FD_CLR(i, &r->root_fds[1]);
1233: }
1234: }
1235:
1236: /* optimize select */
1237: for (i = r->root_kq - 1; i > 2; i--)
1238: if (FD_ISSET(i, &r->root_fds[0]) || FD_ISSET(i, &r->root_fds[1]))
1239: break;
1240: if (i > 2)
1241: r->root_kq = i + 1;
1242: }
1243: #endif
1244:
1245: /*
1246: * sched_hook_fetch() - Default FETCH hook
1247: *
1248: * @root = root task
1249: * @arg = unused
1250: * return: NULL error or !=NULL fetched task
1251: */
1252: void *
1253: sched_hook_fetch(void *root, void *arg __unused)
1254: {
1255: sched_root_task_t *r = root;
1256: sched_task_t *task, *tmp;
1257: struct timespec now, m, mtmp;
1258: #if SUP_ENABLE == KQ_SUPPORT
1259: struct kevent res[KQ_EVENTS];
1260: struct timespec *timeout;
1261: #elif SUP_ENABLE == EP_SUPPORT
1262: struct epoll_event res[KQ_EVENTS];
1263: u_long timeout = 0;
1264: #else
1265: struct timeval *timeout, tv;
1266: fd_set rfd, wfd, xfd;
1267: #endif
1268: int en;
1269:
1270: if (!r)
1271: return NULL;
1272:
1273: /* get new task by queue priority */
1274: while ((task = TAILQ_FIRST(&r->root_event))) {
1275: transit_task2unuse(task, &r->root_event);
1276: return task;
1277: }
1278: while ((task = TAILQ_FIRST(&r->root_ready))) {
1279: transit_task2unuse(task, &r->root_ready);
1280: return task;
1281: }
1282:
1283: #ifdef TIMER_WITHOUT_SORT
1284: clock_gettime(CLOCK_MONOTONIC, &now);
1285:
1286: sched_timespecclear(&r->root_wait);
1287: TAILQ_FOREACH(task, &r->root_timer, task_node) {
1288: if (!sched_timespecisset(&r->root_wait))
1289: r->root_wait = TASK_TS(task);
1290: else if (sched_timespeccmp(&TASK_TS(task), &r->root_wait, -) < 0)
1291: r->root_wait = TASK_TS(task);
1292: }
1293:
1294: if (TAILQ_FIRST(&r->root_timer)) {
1295: m = r->root_wait;
1296: sched_timespecsub(&m, &now, &mtmp);
1297: r->root_wait = mtmp;
1298: } else {
1299: /* set wait INFTIM */
1300: sched_timespecinf(&r->root_wait);
1301: }
1302: #else /* ! TIMER_WITHOUT_SORT */
1303: if (!TAILQ_FIRST(&r->root_task) && (task = TAILQ_FIRST(&r->root_timer))) {
1304: clock_gettime(CLOCK_MONOTONIC, &now);
1305:
1306: m = TASK_TS(task);
1307: sched_timespecsub(&m, &now, &mtmp);
1308: r->root_wait = mtmp;
1309: } else {
1310: /* set wait INFTIM */
1311: sched_timespecinf(&r->root_wait);
1312: }
1313: #endif /* TIMER_WITHOUT_SORT */
1314: /* if present member of task, set NOWAIT */
1315: if (TAILQ_FIRST(&r->root_task))
1316: sched_timespecclear(&r->root_wait);
1317:
1318: if (r->root_wait.tv_sec != -1 && r->root_wait.tv_nsec != -1) {
1319: #if SUP_ENABLE == KQ_SUPPORT
1320: timeout = &r->root_wait;
1321: #elif SUP_ENABLE == EP_SUPPORT
1322: timeout = r->root_wait.tv_sec * 1000 + r->root_wait.tv_nsec / 1000000;
1323: #else
1324: sched_timespec2val(&r->root_wait, &tv);
1325: timeout = &tv;
1326: #endif /* KQ_SUPPORT */
1327: } else if (sched_timespecisinf(&r->root_poll))
1328: #if SUP_ENABLE == EP_SUPPORT
1329: timeout = -1;
1330: #else
1331: timeout = NULL;
1332: #endif
1333: else {
1334: #if SUP_ENABLE == KQ_SUPPORT
1335: timeout = &r->root_poll;
1336: #elif SUP_ENABLE == EP_SUPPORT
1337: timeout = r->root_poll.tv_sec * 1000 + r->root_poll.tv_nsec / 1000000;
1338: #else
1339: sched_timespec2val(&r->root_poll, &tv);
1340: timeout = &tv;
1341: #endif /* KQ_SUPPORT */
1342: }
1343:
1344: #if SUP_ENABLE == KQ_SUPPORT
1345: if ((en = kevent(r->root_kq, NULL, 0, res, KQ_EVENTS, timeout)) == -1) {
1346: #elif SUP_ENABLE == EP_SUPPORT
1347: if ((en = epoll_wait(r->root_kq, res, KQ_EVENTS, timeout)) == -1) {
1348: #else
1349: rfd = xfd = r->root_fds[0];
1350: wfd = r->root_fds[1];
1351: if ((en = select(r->root_kq, &rfd, &wfd, &xfd, timeout)) == -1) {
1352: #endif /* KQ_SUPPORT */
1353: if (r->root_hooks.hook_exec.exception) {
1354: if (r->root_hooks.hook_exec.exception(r, NULL))
1355: return NULL;
1356: } else if (errno != EINTR)
1357: LOGERR;
1358: goto skip_event;
1359: }
1360:
1361: /* Go and catch the cat into pipes ... */
1362: #if SUP_ENABLE == KQ_SUPPORT
1363: /* kevent dispatcher */
1364: fetch_hook_kevent_proceed(en, res, r);
1365: #elif SUP_ENABLE == EP_SUPPORT
1366: /* epoll dispatcher */
1367: fetch_hook_epoll_proceed(en, res, r);
1368: #else
1369: /* select dispatcher */
1370: fetch_hook_select_proceed(en, rfd, wfd, xfd, r);
1371: #endif /* KQ_SUPPORT */
1372:
1373: skip_event:
1374: /* timer update & put in ready queue */
1375: clock_gettime(CLOCK_MONOTONIC, &now);
1376:
1377: TAILQ_FOREACH_SAFE(task, &r->root_timer, task_node, tmp)
1378: if (sched_timespeccmp(&now, &TASK_TS(task), -) >= 0)
1379: transit_task2ready(task, &r->root_timer);
1380:
1381: /* put regular task priority task to ready queue,
1382: if there is no ready task or reach max missing hit for regular task */
1383: if ((task = TAILQ_FIRST(&r->root_task))) {
1384: if (!TAILQ_FIRST(&r->root_ready) || r->root_miss >= TASK_VAL(task)) {
1385: r->root_miss ^= r->root_miss;
1386:
1387: transit_task2ready(task, &r->root_task);
1388: } else
1389: r->root_miss++;
1390: } else
1391: r->root_miss ^= r->root_miss;
1392:
1393: /* OK, lets get ready task !!! */
1394: task = TAILQ_FIRST(&r->root_ready);
1395: if (task)
1396: transit_task2unuse(task, &r->root_ready);
1397: return task;
1398: }
1399:
1400: /*
1401: * sched_hook_exception() - Default EXCEPTION hook
1402: *
1403: * @root = root task
1404: * @arg = custom handling: if arg == EV_EOF or other value; default: arg == NULL log errno
1405: * return: <0 errors and 0 ok
1406: */
1407: void *
1408: sched_hook_exception(void *root, void *arg)
1409: {
1410: sched_root_task_t *r = root;
1411:
1412: if (!r)
1413: return NULL;
1414:
1415: /* custom exception handling ... */
1416: if (arg) {
1417: if (arg == (void*) EV_EOF)
1418: return NULL;
1419: return (void*) -1; /* raise scheduler error!!! */
1420: }
1421:
1422: /* if error hook exists */
1423: if (r->root_hooks.hook_root.error)
1424: return (r->root_hooks.hook_root.error(root, (void*) ((intptr_t) errno)));
1425:
1426: /* default case! */
1427: LOGERR;
1428: return NULL;
1429: }
1430:
1431: /*
1432: * sched_hook_condition() - Default CONDITION hook
1433: *
1434: * @root = root task
1435: * @arg = killState from schedRun()
1436: * return: NULL kill scheduler loop or !=NULL ok
1437: */
1438: void *
1439: sched_hook_condition(void *root, void *arg)
1440: {
1441: sched_root_task_t *r = root;
1442:
1443: if (!r)
1444: return NULL;
1445:
1446: return (void*) (*r->root_cond - *(intptr_t*) arg);
1447: }
1448:
1449: /*
1450: * sched_hook_rtc() - Default RTC hook
1451: *
1452: * @task = current task
1453: * @arg = unused
1454: * return: <0 errors and 0 ok
1455: */
1456: void *
1457: sched_hook_rtc(void *task, void *arg __unused)
1458: {
1459: #if defined(HAVE_LIBRT) && defined(HAVE_TIMER_CREATE) && \
1460: defined(HAVE_TIMER_SETTIME) && defined(HAVE_TIMER_DELETE)
1461: sched_task_t *sigt = NULL, *t = task;
1462: struct itimerspec its;
1463: struct sigevent evt;
1464: timer_t tmr;
1465: #if SUP_ENABLE != KQ_SUPPORT
1466: struct sigaction sa;
1467: #endif
1468:
1469: if (!t || !TASK_ROOT(t))
1470: return (void*) -1;
1471:
1472: memset(&evt, 0, sizeof evt);
1473: evt.sigev_notify = SIGEV_SIGNAL;
1474: evt.sigev_signo = (intptr_t) TASK_DATA(t) + SIGRTMIN;
1475: evt.sigev_value.sival_ptr = t;
1476:
1477: if (timer_create(CLOCK_MONOTONIC, &evt, &tmr) == -1) {
1478: if (TASK_ROOT(t)->root_hooks.hook_exec.exception)
1479: TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL);
1480: else
1481: LOGERR;
1482: return (void*) -1;
1483: } else
1484: TASK_FLAG(t) = (u_long) tmr;
1485:
1486: #if SUP_ENABLE == KQ_SUPPORT
1487: if (!(sigt = schedSignal(TASK_ROOT(t), _sched_rtcWrapper, TASK_ARG(t), evt.sigev_signo,
1488: t, (size_t) tmr))) {
1489: if (TASK_ROOT(t)->root_hooks.hook_exec.exception)
1490: TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL);
1491: else
1492: LOGERR;
1493: timer_delete(tmr);
1494: return (void*) -1;
1495: } else
1496: TASK_RET(t) = (uintptr_t) sigt;
1497: #else
1498: memset(&sa, 0, sizeof sa);
1499: sigemptyset(&sa.sa_mask);
1500: sa.sa_sigaction = _sched_rtcSigWrapper;
1501: sa.sa_flags = SA_SIGINFO | SA_RESTART;
1502:
1503: if (sigaction(evt.sigev_signo, &sa, NULL) == -1) {
1504: if (TASK_ROOT(t)->root_hooks.hook_exec.exception)
1505: TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL);
1506: else
1507: LOGERR;
1508: timer_delete(tmr);
1509: return (void*) -1;
1510: }
1511: #endif
1512:
1513: memset(&its, 0, sizeof its);
1514: its.it_value.tv_sec = t->task_val.ts.tv_sec;
1515: its.it_value.tv_nsec = t->task_val.ts.tv_nsec;
1516:
1517: if (timer_settime(tmr, TIMER_RELTIME, &its, NULL) == -1) {
1518: if (TASK_ROOT(t)->root_hooks.hook_exec.exception)
1519: TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL);
1520: else
1521: LOGERR;
1522: schedCancel(sigt);
1523: timer_delete(tmr);
1524: return (void*) -1;
1525: }
1526: #endif /* HAVE_TIMER_CREATE */
1527: return NULL;
1528: }
FreeBSD-CVSweb <freebsd-cvsweb@FreeBSD.org>