1: /*************************************************************************
2: * (C) 2011 AITNET ltd - Sofia/Bulgaria - <misho@aitbg.com>
3: * by Michael Pounov <misho@openbsd-bg.org>
4: *
5: * $Author: misho $
6: * $Id: hooks.c,v 1.27.2.9 2014/06/05 22:22:46 misho Exp $
7: *
8: **************************************************************************
9: The ELWIX and AITNET software is distributed under the following
10: terms:
11:
12: All of the documentation and software included in the ELWIX and AITNET
13: Releases is copyrighted by ELWIX - Sofia/Bulgaria <info@elwix.org>
14:
15: Copyright 2004 - 2014
16: by Michael Pounov <misho@elwix.org>. All rights reserved.
17:
18: Redistribution and use in source and binary forms, with or without
19: modification, are permitted provided that the following conditions
20: are met:
21: 1. Redistributions of source code must retain the above copyright
22: notice, this list of conditions and the following disclaimer.
23: 2. Redistributions in binary form must reproduce the above copyright
24: notice, this list of conditions and the following disclaimer in the
25: documentation and/or other materials provided with the distribution.
26: 3. All advertising materials mentioning features or use of this software
27: must display the following acknowledgement:
28: This product includes software developed by Michael Pounov <misho@elwix.org>
29: ELWIX - Embedded LightWeight unIX and its contributors.
30: 4. Neither the name of AITNET nor the names of its contributors
31: may be used to endorse or promote products derived from this software
32: without specific prior written permission.
33:
34: THIS SOFTWARE IS PROVIDED BY AITNET AND CONTRIBUTORS ``AS IS'' AND
35: ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
36: IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
37: ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
38: FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
39: DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
40: OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
41: HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
42: LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
43: OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
44: SUCH DAMAGE.
45: */
46: #include "global.h"
47: #include "hooks.h"
48:
49:
50: static inline void
51: transit_task2ready(sched_task_t * __restrict t, sched_queue_t * __restrict q)
52: {
53: remove_task_from(t, q);
54:
55: t->task_type = taskREADY;
56: insert_task_to(t, &(TASK_ROOT(t))->root_ready);
57: }
58:
59: #ifdef HAVE_LIBPTHREAD
60: static void *
61: _sched_threadWrapper(sched_task_t *t)
62: {
63: void *ret = NULL;
64: sched_root_task_t *r;
65:
66: if (!t || !TASK_ROOT(t))
67: pthread_exit(ret);
68: else
69: r = (sched_root_task_t*) TASK_ROOT(t);
70:
71: pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, NULL);
72: /*
73: pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, NULL);
74: */
75:
76: /* notify parent, thread is ready for execution */
77: pthread_testcancel();
78:
79: ret = schedCall(t);
80: r->root_ret = ret;
81:
82: if (TASK_VAL(t)) {
83: transit_task2unuse(t, &r->root_thread);
84: TASK_VAL(t) = 0;
85: }
86:
87: pthread_exit(ret);
88: }
89: #endif
90:
91: #if defined(HAVE_TIMER_CREATE) && defined(HAVE_TIMER_SETTIME) && defined(HAVE_TIMER_DELETE)
92: #if SUP_ENABLE == KQ_SUPPORT
93: static void *
94: _sched_rtcWrapper(sched_task_t *t)
95: {
96: sched_task_t *task;
97: void *ret;
98:
99: if (!t || !TASK_ROOT(t) || !TASK_DATA(t))
100: return NULL;
101: else {
102: task = (sched_task_t*) TASK_DATA(t);
103: timer_delete((timer_t) TASK_DATLEN(t));
104: }
105:
106: ret = schedCall(task);
107:
108: transit_task2unuse(task, &(TASK_ROOT(task))->root_rtc);
109: return ret;
110: }
111: #else
112: static void
113: _sched_rtcSigWrapper(int sig, siginfo_t *si, void *uc)
114: {
115: sched_task_t *task;
116:
117: if (si && si->si_value.sival_ptr) {
118: task = (sched_task_t*) si->si_value.sival_ptr;
119: timer_delete((timer_t) TASK_FLAG(task));
120:
121: TASK_RET(task) = (intptr_t) schedCall(task);
122:
123: transit_task2unuse(task, &(TASK_ROOT(task))->root_rtc);
124: }
125: }
126: #endif
127: #endif
128:
129: /*
130: * sched_hook_init() - Default INIT hook
131: *
132: * @root = root task
133: * @arg = unused
134: * return: <0 errors and 0 ok
135: */
136: void *
137: sched_hook_init(void *root, void *arg __unused)
138: {
139: sched_root_task_t *r = root;
140:
141: if (!r)
142: return (void*) -1;
143:
144: #if SUP_ENABLE == KQ_SUPPORT
145: r->root_kq = kqueue();
146: if (r->root_kq == -1) {
147: LOGERR;
148: return (void*) -1;
149: }
150: #elif SUP_ENABLE == EP_SUPPORT
151: r->root_kq = epoll_create(KQ_EVENTS);
152: if (r->root_kq == -1) {
153: LOGERR;
154: return (void*) -1;
155: }
156: #else
157: r->root_kq ^= r->root_kq;
158: FD_ZERO(&r->root_fds[0]);
159: FD_ZERO(&r->root_fds[1]);
160: #endif
161:
162: return NULL;
163: }
164:
165: /*
166: * sched_hook_fini() - Default FINI hook
167: *
168: * @root = root task
169: * @arg = unused
170: * return: <0 errors and 0 ok
171: */
172: void *
173: sched_hook_fini(void *root, void *arg __unused)
174: {
175: sched_root_task_t *r = root;
176:
177: if (!r)
178: return (void*) -1;
179:
180: #if SUP_ENABLE == KQ_SUPPORT || SUP_ENABLE == EP_SUPPORT
181: if (r->root_kq > 2) {
182: close(r->root_kq);
183: r->root_kq = 0;
184: }
185: #else
186: FD_ZERO(&r->root_fds[1]);
187: FD_ZERO(&r->root_fds[0]);
188: r->root_kq ^= r->root_kq;
189: #endif
190:
191: return NULL;
192: }
193:
194: /*
195: * sched_hook_cancel() - Default CANCEL hook
196: *
197: * @task = current task
198: * @arg = unused
199: * return: <0 errors and 0 ok
200: */
201: void *
202: sched_hook_cancel(void *task, void *arg __unused)
203: {
204: sched_task_t *t = task, *tmp, *tt;
205: sched_root_task_t *r = NULL;
206: int flg;
207: #if SUP_ENABLE == KQ_SUPPORT
208: struct kevent chg[1];
209: struct timespec timeout = { 0, 0 };
210: #elif SUP_ENABLE == EP_SUPPORT
211: struct epoll_event ee = { .events = 0, .data.fd = 0 };
212: #else
213: register int i;
214: #endif
215: #ifdef AIO_SUPPORT
216: struct aiocb *acb;
217: #ifdef EVFILT_LIO
218: register int i = 0;
219: struct aiocb **acbs;
220: #endif /* EVFILT_LIO */
221: #endif /* AIO_SUPPORT */
222:
223: if (!t || !TASK_ROOT(t))
224: return (void*) -1;
225: else
226: r = TASK_ROOT(t);
227:
228: switch (TASK_TYPE(t)) {
229: case taskREAD:
230: /* check for multi subscribers */
231: flg = 0;
232: TAILQ_FOREACH_SAFE(tt, &r->root_read, task_node, tmp)
233: if (TASK_FD(tt) != TASK_FD(t))
234: continue;
235: else
236: flg++;
237: #if SUP_ENABLE == KQ_SUPPORT
238: #ifdef __NetBSD__
239: EV_SET(&chg[0], TASK_FD(t), EVFILT_READ, flg < 2 ? EV_DELETE : 0,
240: 0, 0, (intptr_t) TASK_FD(t));
241: #else
242: EV_SET(&chg[0], TASK_FD(t), EVFILT_READ, flg < 2 ? EV_DELETE : 0,
243: 0, 0, (void*) TASK_FD(t));
244: #endif
245: #elif SUP_ENABLE == EP_SUPPORT
246: ee.data.fd = TASK_FD(t);
247: if (FD_ISSET(TASK_FD(t), &r->root_fds[1]))
248: ee.events = EPOLLOUT;
249:
250: if (flg < 2)
251: FD_CLR(TASK_FD(t), &r->root_fds[0]);
252: else
253: ee.events |= (EPOLLIN | EPOLLPRI | EPOLLRDHUP);
254: #else
255: if (flg < 2) {
256: FD_CLR(TASK_FD(t), &r->root_fds[0]);
257:
258: /* optimize select */
259: for (i = r->root_kq - 1; i > 2; i--)
260: if (FD_ISSET(i, &r->root_fds[0]) || FD_ISSET(i, &r->root_fds[1]))
261: break;
262: if (i > 2)
263: r->root_kq = i + 1;
264: }
265: #endif
266: break;
267: case taskWRITE:
268: /* check for multi subscribers */
269: flg = 0;
270: TAILQ_FOREACH_SAFE(tt, &r->root_write, task_node, tmp)
271: if (TASK_FD(tt) != TASK_FD(t))
272: continue;
273: else
274: flg++;
275: #if SUP_ENABLE == KQ_SUPPORT
276: #ifdef __NetBSD__
277: EV_SET(&chg[0], TASK_FD(t), EVFILT_WRITE, flg < 2 ? EV_DELETE : 0,
278: 0, 0, (intptr_t) TASK_FD(t));
279: #else
280: EV_SET(&chg[0], TASK_FD(t), EVFILT_WRITE, flg < 2 ? EV_DELETE : 0,
281: 0, 0, (void*) TASK_FD(t));
282: #endif
283: #elif SUP_ENABLE == EP_SUPPORT
284: ee.data.fd = TASK_FD(t);
285: if (FD_ISSET(TASK_FD(t), &r->root_fds[0]))
286: ee.events = EPOLLIN | EPOLLPRI | EPOLLRDHUP;
287:
288: if (flg < 2)
289: FD_CLR(TASK_FD(t), &r->root_fds[1]);
290: else
291: ee.events |= EPOLLOUT;
292: #else
293: if (flg < 2) {
294: FD_CLR(TASK_FD(t), &r->root_fds[1]);
295:
296: /* optimize select */
297: for (i = r->root_kq - 1; i > 2; i--)
298: if (FD_ISSET(i, &r->root_fds[0]) || FD_ISSET(i, &r->root_fds[1]))
299: break;
300: if (i > 2)
301: r->root_kq = i + 1;
302: }
303: #endif
304: break;
305: case taskALARM:
306: #if SUP_ENABLE == KQ_SUPPORT
307: /* check for multi subscribers */
308: flg = 0;
309: TAILQ_FOREACH_SAFE(tt, &r->root_alarm, task_node, tmp)
310: if (TASK_DATA(tt) != TASK_DATA(t))
311: continue;
312: else
313: flg++;
314: #ifdef __NetBSD__
315: EV_SET(&chg[0], (uintptr_t) TASK_DATA(t), EVFILT_TIMER, flg < 2 ? EV_DELETE : 0,
316: 0, 0, (intptr_t) TASK_DATA(t));
317: #else
318: EV_SET(&chg[0], (uintptr_t) TASK_DATA(t), EVFILT_TIMER, flg < 2 ? EV_DELETE : 0,
319: 0, 0, (void*) TASK_DATA(t));
320: #endif
321: #endif
322: break;
323: case taskNODE:
324: #if SUP_ENABLE == KQ_SUPPORT
325: /* check for multi subscribers */
326: flg = 0;
327: TAILQ_FOREACH_SAFE(tt, &r->root_node, task_node, tmp)
328: if (TASK_FD(tt) != TASK_FD(t))
329: continue;
330: else
331: flg++;
332: #ifdef __NetBSD__
333: EV_SET(&chg[0], TASK_FD(t), EVFILT_VNODE, flg < 2 ? EV_DELETE : 0,
334: 0, 0, (intptr_t) TASK_FD(t));
335: #else
336: EV_SET(&chg[0], TASK_FD(t), EVFILT_VNODE, flg < 2 ? EV_DELETE : 0,
337: 0, 0, (void*) TASK_FD(t));
338: #endif
339: #endif
340: break;
341: case taskPROC:
342: #if SUP_ENABLE == KQ_SUPPORT
343: /* check for multi subscribers */
344: flg = 0;
345: TAILQ_FOREACH_SAFE(tt, &r->root_proc, task_node, tmp)
346: if (TASK_VAL(tt) != TASK_VAL(t))
347: continue;
348: else
349: flg++;
350: #ifdef __NetBSD__
351: EV_SET(&chg[0], TASK_VAL(t), EVFILT_PROC, flg < 2 ? EV_DELETE : 0,
352: 0, 0, (intptr_t) TASK_VAL(t));
353: #else
354: EV_SET(&chg[0], TASK_VAL(t), EVFILT_PROC, flg < 2 ? EV_DELETE : 0,
355: 0, 0, (void*) TASK_VAL(t));
356: #endif
357: #endif
358: break;
359: case taskSIGNAL:
360: #if SUP_ENABLE == KQ_SUPPORT
361: /* check for multi subscribers */
362: flg = 0;
363: TAILQ_FOREACH_SAFE(tt, &r->root_signal, task_node, tmp)
364: if (TASK_VAL(tt) != TASK_VAL(t))
365: continue;
366: else
367: flg++;
368: #ifdef __NetBSD__
369: EV_SET(&chg[0], TASK_VAL(t), EVFILT_SIGNAL, flg < 2 ? EV_DELETE : 0,
370: 0, 0, (intptr_t) TASK_VAL(t));
371: #else
372: EV_SET(&chg[0], TASK_VAL(t), EVFILT_SIGNAL, flg < 2 ? EV_DELETE : 0,
373: 0, 0, (void*) TASK_VAL(t));
374: #endif
375: /* restore signal */
376: if (flg < 2)
377: signal(TASK_VAL(t), SIG_DFL);
378: #endif
379: break;
380: #ifdef AIO_SUPPORT
381: case taskAIO:
382: #if SUP_ENABLE == KQ_SUPPORT
383: /* check for multi subscribers */
384: flg = 0;
385: TAILQ_FOREACH_SAFE(tt, &r->root_aio, task_node, tmp)
386: if (TASK_VAL(tt) != TASK_VAL(t))
387: continue;
388: else
389: flg++;
390: #ifdef __NetBSD__
391: EV_SET(&chg[0], TASK_VAL(t), EVFILT_AIO, flg < 2 ? EV_DELETE : 0,
392: 0, 0, (intptr_t) TASK_VAL(t));
393: #else
394: EV_SET(&chg[0], TASK_VAL(t), EVFILT_AIO, flg < 2 ? EV_DELETE : 0,
395: 0, 0, (void*) TASK_VAL(t));
396: #endif
397: acb = (struct aiocb*) TASK_VAL(t);
398: if (acb) {
399: if (aio_cancel(acb->aio_fildes, acb) == AIO_CANCELED)
400: aio_return(acb);
401: free(acb);
402: TASK_VAL(t) = 0;
403: }
404: #endif
405: break;
406: #ifdef EVFILT_LIO
407: case taskLIO:
408: #if SUP_ENABLE == KQ_SUPPORT
409: /* check for multi subscribers */
410: flg = 0;
411: TAILQ_FOREACH_SAFE(tt, &r->root_lio, task_node, tmp)
412: if (TASK_VAL(tt) != TASK_VAL(t))
413: continue;
414: else
415: flg++;
416: #ifdef __NetBSD__
417: EV_SET(&chg[0], TASK_VAL(t), EVFILT_LIO, flg < 2 ? EV_DELETE : 0,
418: 0, 0, (intptr_t) TASK_VAL(t));
419: #else
420: EV_SET(&chg[0], TASK_VAL(t), EVFILT_LIO, flg < 2 ? EV_DELETE : 0,
421: 0, 0, (void*) TASK_VAL(t));
422: #endif
423: acbs = (struct aiocb**) TASK_VAL(t);
424: if (acbs) {
425: for (i = 0; i < TASK_DATLEN(t); i++) {
426: if (aio_cancel(acbs[i]->aio_fildes, acbs[i]) == AIO_CANCELED)
427: aio_return(acbs[i]);
428: free(acbs[i]);
429: }
430: free(acbs);
431: TASK_VAL(t) = 0;
432: }
433: #endif
434: break;
435: #endif /* EVFILT_LIO */
436: #endif /* AIO_SUPPORT */
437: #ifdef EVFILT_USER
438: case taskUSER:
439: #if SUP_ENABLE == KQ_SUPPORT
440: /* check for multi subscribers */
441: flg = 0;
442: TAILQ_FOREACH_SAFE(tt, &r->root_user, task_node, tmp)
443: if (TASK_VAL(tt) != TASK_VAL(t))
444: continue;
445: else
446: flg++;
447: #ifdef __NetBSD__
448: EV_SET(&chg[0], TASK_VAL(t), EVFILT_USER, flg < 2 ? EV_DELETE : 0,
449: 0, 0, (intptr_t) TASK_VAL(t));
450: #else
451: EV_SET(&chg[0], TASK_VAL(t), EVFILT_USER, flg < 2 ? EV_DELETE : 0,
452: 0, 0, (void*) TASK_VAL(t));
453: #endif
454: #endif
455: break;
456: #endif /* EVFILT_USER */
457: case taskTHREAD:
458: #ifdef HAVE_LIBPTHREAD
459: if (TASK_VAL(t)) {
460: pthread_cancel((pthread_t) TASK_VAL(t));
461: pthread_join((pthread_t) TASK_VAL(t), NULL);
462: if (TASK_VAL(t)) {
463: transit_task2unuse(t, &(TASK_ROOT(t))->root_thread);
464: TASK_VAL(t) = 0;
465: }
466: }
467: #endif
468: return NULL;
469: #if defined(HAVE_TIMER_CREATE) && defined(HAVE_TIMER_SETTIME) && defined(HAVE_TIMER_DELETE)
470: case taskRTC:
471: timer_delete((timer_t) TASK_FLAG(t));
472: #if SUP_ENABLE == KQ_SUPPORT
473: schedCancel((sched_task_t*) TASK_RET(t));
474: #else
475: /* check for multi subscribers */
476: flg = 0;
477: TAILQ_FOREACH_SAFE(tt, &r->root_rtc, task_node, tmp)
478: if (TASK_DATA(tt) != TASK_DATA(t))
479: continue;
480: else
481: flg++;
482:
483: /* restore signal */
484: if (flg < 2)
485: signal((intptr_t) TASK_DATA(t) + SIGRTMIN, SIG_DFL);
486: #endif
487: return NULL;
488: #endif /* HAVE_TIMER_CREATE */
489: default:
490: return NULL;
491: }
492:
493: #if SUP_ENABLE == KQ_SUPPORT
494: kevent(r->root_kq, chg, 1, NULL, 0, &timeout);
495: #elif SUP_ENABLE == EP_SUPPORT
496: epoll_ctl(r->root_kq, ee.events ? EPOLL_CTL_MOD : EPOLL_CTL_DEL, ee.data.fd, &ee);
497: #endif
498: return NULL;
499: }
500:
501: #ifdef HAVE_LIBPTHREAD
502: /*
503: * sched_hook_thread() - Default THREAD hook
504: *
505: * @task = current task
506: * @arg = pthread attributes
507: * return: <0 errors and 0 ok
508: */
509: void *
510: sched_hook_thread(void *task, void *arg)
511: {
512: sched_task_t *t = task;
513: pthread_t tid;
514: sigset_t s, o;
515:
516: if (!t || !TASK_ROOT(t))
517: return (void*) -1;
518:
519: sigfillset(&s);
520: pthread_sigmask(SIG_BLOCK, &s, &o);
521: errno = pthread_create(&tid, (pthread_attr_t*) arg,
522: (void *(*)(void*)) _sched_threadWrapper, t);
523: pthread_sigmask(SIG_SETMASK, &o, NULL);
524:
525: if (errno) {
526: LOGERR;
527: return (void*) -1;
528: } else
529: TASK_VAL(t) = (u_long) tid;
530:
531: if (!TASK_ISLOCKED(t))
532: TASK_LOCK(t);
533:
534: return NULL;
535: }
536: #endif
537:
538: /*
539: * sched_hook_read() - Default READ hook
540: *
541: * @task = current task
542: * @arg = unused
543: * return: <0 errors and 0 ok
544: */
545: void *
546: sched_hook_read(void *task, void *arg __unused)
547: {
548: sched_task_t *t = task;
549: sched_root_task_t *r = NULL;
550: #if SUP_ENABLE == KQ_SUPPORT
551: struct kevent chg[1];
552: struct timespec timeout = { 0, 0 };
553: #elif SUP_ENABLE == EP_SUPPORT
554: struct epoll_event ee = { .events = EPOLLIN | EPOLLPRI | EPOLLRDHUP, .data.fd = 0 };
555: int flg = 0;
556: #endif
557:
558: if (!t || !TASK_ROOT(t))
559: return (void*) -1;
560: else
561: r = TASK_ROOT(t);
562:
563: #if SUP_ENABLE == KQ_SUPPORT
564: #ifdef __NetBSD__
565: EV_SET(&chg[0], TASK_FD(t), EVFILT_READ, EV_ADD | EV_CLEAR, 0, 0, (intptr_t) TASK_FD(t));
566: #else
567: EV_SET(&chg[0], TASK_FD(t), EVFILT_READ, EV_ADD | EV_CLEAR, 0, 0, (void*) TASK_FD(t));
568: #endif
569: if (kevent(r->root_kq, chg, 1, NULL, 0, &timeout) == -1) {
570: if (r->root_hooks.hook_exec.exception)
571: r->root_hooks.hook_exec.exception(r, NULL);
572: else
573: LOGERR;
574: return (void*) -1;
575: }
576: #elif SUP_ENABLE == EP_SUPPORT
577: if (FD_ISSET(TASK_FD(t), &r->root_fds[0]))
578: flg |= 1;
579: if (FD_ISSET(TASK_FD(t), &r->root_fds[1])) {
580: flg |= 2;
581: ee.events |= EPOLLOUT;
582: }
583:
584: ee.data.fd = TASK_FD(t);
585: if (epoll_ctl(r->root_kq, flg ? EPOLL_CTL_MOD : EPOLL_CTL_ADD, TASK_FD(t), &ee) == -1) {
586: if (r->root_hooks.hook_exec.exception)
587: r->root_hooks.hook_exec.exception(r, NULL);
588: else
589: LOGERR;
590: return (void*) -1;
591: } else
592: FD_SET(TASK_FD(t), &r->root_fds[0]);
593: #else
594: FD_SET(TASK_FD(t), &r->root_fds[0]);
595: if (TASK_FD(t) >= r->root_kq)
596: r->root_kq = TASK_FD(t) + 1;
597: #endif
598:
599: return NULL;
600: }
601:
602: /*
603: * sched_hook_write() - Default WRITE hook
604: *
605: * @task = current task
606: * @arg = unused
607: * return: <0 errors and 0 ok
608: */
609: void *
610: sched_hook_write(void *task, void *arg __unused)
611: {
612: sched_task_t *t = task;
613: sched_root_task_t *r = NULL;
614: #if SUP_ENABLE == KQ_SUPPORT
615: struct kevent chg[1];
616: struct timespec timeout = { 0, 0 };
617: #elif SUP_ENABLE == EP_SUPPORT
618: struct epoll_event ee = { .events = EPOLLOUT, .data.fd = 0 };
619: int flg = 0;
620: #endif
621:
622: if (!t || !TASK_ROOT(t))
623: return (void*) -1;
624: else
625: r = TASK_ROOT(t);
626:
627: #if SUP_ENABLE == KQ_SUPPORT
628: #ifdef __NetBSD__
629: EV_SET(&chg[0], TASK_FD(t), EVFILT_WRITE, EV_ADD | EV_CLEAR, 0, 0, (intptr_t) TASK_FD(t));
630: #else
631: EV_SET(&chg[0], TASK_FD(t), EVFILT_WRITE, EV_ADD | EV_CLEAR, 0, 0, (void*) TASK_FD(t));
632: #endif
633: if (kevent(r->root_kq, chg, 1, NULL, 0, &timeout) == -1) {
634: if (r->root_hooks.hook_exec.exception)
635: r->root_hooks.hook_exec.exception(r, NULL);
636: else
637: LOGERR;
638: return (void*) -1;
639: }
640: #elif SUP_ENABLE == EP_SUPPORT
641: if (FD_ISSET(TASK_FD(t), &r->root_fds[0])) {
642: flg |= 1;
643: ee.events |= EPOLLIN | EPOLLPRI | EPOLLRDHUP;
644: }
645: if (FD_ISSET(TASK_FD(t), &r->root_fds[1]))
646: flg |= 2;
647:
648: ee.data.fd = TASK_FD(t);
649: if (epoll_ctl(r->root_kq, flg ? EPOLL_CTL_MOD : EPOLL_CTL_ADD, TASK_FD(t), &ee) == -1) {
650: if (r->root_hooks.hook_exec.exception)
651: r->root_hooks.hook_exec.exception(r, NULL);
652: else
653: LOGERR;
654: return (void*) -1;
655: } else
656: FD_SET(TASK_FD(t), &r->root_fds[1]);
657: #else
658: FD_SET(TASK_FD(t), &r->root_fds[1]);
659: if (TASK_FD(t) >= r->root_kq)
660: r->root_kq = TASK_FD(t) + 1;
661: #endif
662:
663: return NULL;
664: }
665:
666: /*
667: * sched_hook_alarm() - Default ALARM hook
668: *
669: * @task = current task
670: * @arg = unused
671: * return: <0 errors and 0 ok
672: */
673: void *
674: sched_hook_alarm(void *task, void *arg __unused)
675: {
676: #if SUP_ENABLE == KQ_SUPPORT
677: sched_task_t *t = task;
678: struct kevent chg[1];
679: struct timespec timeout = { 0, 0 };
680:
681: if (!t || !TASK_ROOT(t))
682: return (void*) -1;
683:
684: #ifdef __NetBSD__
685: EV_SET(&chg[0], (uintptr_t) TASK_DATA(t), EVFILT_TIMER, EV_ADD | EV_CLEAR, 0,
686: t->task_val.ts.tv_sec * 1000 + t->task_val.ts.tv_nsec / 1000000,
687: (intptr_t) TASK_DATA(t));
688: #else
689: EV_SET(&chg[0], (uintptr_t) TASK_DATA(t), EVFILT_TIMER, EV_ADD | EV_CLEAR, 0,
690: t->task_val.ts.tv_sec * 1000 + t->task_val.ts.tv_nsec / 1000000,
691: (void*) TASK_DATA(t));
692: #endif
693: if (kevent(TASK_ROOT(t)->root_kq, chg, 1, NULL, 0, &timeout) == -1) {
694: if (TASK_ROOT(t)->root_hooks.hook_exec.exception)
695: TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL);
696: else
697: LOGERR;
698: return (void*) -1;
699: }
700:
701: #endif
702: return NULL;
703: }
704:
705: /*
706: * sched_hook_node() - Default NODE hook
707: *
708: * @task = current task
709: * @arg = unused
710: * return: <0 errors and 0 ok
711: */
712: void *
713: sched_hook_node(void *task, void *arg __unused)
714: {
715: #if SUP_ENABLE == KQ_SUPPORT
716: sched_task_t *t = task;
717: struct kevent chg[1];
718: struct timespec timeout = { 0, 0 };
719:
720: if (!t || !TASK_ROOT(t))
721: return (void*) -1;
722:
723: #ifdef __NetBSD__
724: EV_SET(&chg[0], TASK_FD(t), EVFILT_VNODE, EV_ADD | EV_CLEAR,
725: NOTE_DELETE | NOTE_WRITE | NOTE_EXTEND | NOTE_ATTRIB |
726: NOTE_LINK | NOTE_RENAME | NOTE_REVOKE, 0, (intptr_t) TASK_FD(t));
727: #else
728: EV_SET(&chg[0], TASK_FD(t), EVFILT_VNODE, EV_ADD | EV_CLEAR,
729: NOTE_DELETE | NOTE_WRITE | NOTE_EXTEND | NOTE_ATTRIB |
730: NOTE_LINK | NOTE_RENAME | NOTE_REVOKE, 0, (void*) TASK_FD(t));
731: #endif
732: if (kevent(TASK_ROOT(t)->root_kq, chg, 1, NULL, 0, &timeout) == -1) {
733: if (TASK_ROOT(t)->root_hooks.hook_exec.exception)
734: TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL);
735: else
736: LOGERR;
737: return (void*) -1;
738: }
739:
740: #endif
741: return NULL;
742: }
743:
744: /*
745: * sched_hook_proc() - Default PROC hook
746: *
747: * @task = current task
748: * @arg = unused
749: * return: <0 errors and 0 ok
750: */
751: void *
752: sched_hook_proc(void *task, void *arg __unused)
753: {
754: #if SUP_ENABLE == KQ_SUPPORT
755: sched_task_t *t = task;
756: struct kevent chg[1];
757: struct timespec timeout = { 0, 0 };
758:
759: if (!t || !TASK_ROOT(t))
760: return (void*) -1;
761:
762: #ifdef __NetBSD__
763: EV_SET(&chg[0], TASK_VAL(t), EVFILT_PROC, EV_ADD | EV_CLEAR,
764: NOTE_EXIT | NOTE_FORK | NOTE_EXEC | NOTE_TRACK, 0, (intptr_t) TASK_VAL(t));
765: #else
766: EV_SET(&chg[0], TASK_VAL(t), EVFILT_PROC, EV_ADD | EV_CLEAR,
767: NOTE_EXIT | NOTE_FORK | NOTE_EXEC | NOTE_TRACK, 0, (void*) TASK_VAL(t));
768: #endif
769: if (kevent(TASK_ROOT(t)->root_kq, chg, 1, NULL, 0, &timeout) == -1) {
770: if (TASK_ROOT(t)->root_hooks.hook_exec.exception)
771: TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL);
772: else
773: LOGERR;
774: return (void*) -1;
775: }
776:
777: #endif
778: return NULL;
779: }
780:
781: /*
782: * sched_hook_signal() - Default SIGNAL hook
783: *
784: * @task = current task
785: * @arg = unused
786: * return: <0 errors and 0 ok
787: */
788: void *
789: sched_hook_signal(void *task, void *arg __unused)
790: {
791: #if SUP_ENABLE == KQ_SUPPORT
792: sched_task_t *t = task;
793: struct kevent chg[1];
794: struct timespec timeout = { 0, 0 };
795:
796: if (!t || !TASK_ROOT(t))
797: return (void*) -1;
798:
799: /* ignore signal */
800: signal(TASK_VAL(t), SIG_IGN);
801:
802: #ifdef __NetBSD__
803: EV_SET(&chg[0], TASK_VAL(t), EVFILT_SIGNAL, EV_ADD | EV_CLEAR, 0, 0, (intptr_t) TASK_VAL(t));
804: #else
805: EV_SET(&chg[0], TASK_VAL(t), EVFILT_SIGNAL, EV_ADD | EV_CLEAR, 0, 0, (void*) TASK_VAL(t));
806: #endif
807: if (kevent(TASK_ROOT(t)->root_kq, chg, 1, NULL, 0, &timeout) == -1) {
808: if (TASK_ROOT(t)->root_hooks.hook_exec.exception)
809: TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL);
810: else
811: LOGERR;
812: return (void*) -1;
813: }
814: #endif
815: return NULL;
816: }
817:
818: /*
819: * sched_hook_user() - Default USER hook
820: *
821: * @task = current task
822: * @arg = unused
823: * return: <0 errors and 0 ok
824: */
825: #ifdef EVFILT_USER
826: void *
827: sched_hook_user(void *task, void *arg __unused)
828: {
829: #if SUP_ENABLE == KQ_SUPPORT
830: sched_task_t *t = task;
831: struct kevent chg[1];
832: struct timespec timeout = { 0, 0 };
833:
834: if (!t || !TASK_ROOT(t))
835: return (void*) -1;
836:
837: #ifdef __NetBSD__
838: EV_SET(&chg[0], TASK_VAL(t), EVFILT_USER, EV_ADD | EV_CLEAR, TASK_DATLEN(t),
839: 0, (intptr_t) TASK_VAL(t));
840: #else
841: EV_SET(&chg[0], TASK_VAL(t), EVFILT_USER, EV_ADD | EV_CLEAR, TASK_DATLEN(t),
842: 0, (void*) TASK_VAL(t));
843: #endif
844: if (kevent(TASK_ROOT(t)->root_kq, chg, 1, NULL, 0, &timeout) == -1) {
845: if (TASK_ROOT(t)->root_hooks.hook_exec.exception)
846: TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL);
847: else
848: LOGERR;
849: return (void*) -1;
850: }
851:
852: #endif
853: return NULL;
854: }
855: #endif
856:
857: #if SUP_ENABLE == KQ_SUPPORT
858: static inline void
859: fetch_hook_kevent_proceed(int en, struct kevent *res, sched_root_task_t *r)
860: {
861: struct kevent evt[1];
862: register int i;
863: sched_task_t *task, *tmp;
864: struct timespec now = { 0, 0 };
865: #ifdef AIO_SUPPORT
866: int len, fd;
867: struct aiocb *acb;
868: #ifdef EVFILT_LIO
869: int l;
870: register int j;
871: off_t off;
872: struct aiocb **acbs;
873: struct iovec *iv;
874: #endif /* EVFILT_LIO */
875: #endif /* AIO_SUPPORT */
876:
877: for (i = 0; i < en; i++) {
878: memcpy(evt, &res[i], sizeof evt);
879: evt->flags = EV_DELETE;
880: /* Put read/write task to ready queue */
881: switch (res[i].filter) {
882: case EVFILT_READ:
883: TAILQ_FOREACH_SAFE(task, &r->root_read, task_node, tmp) {
884: if (TASK_FD(task) != ((intptr_t) res[i].udata))
885: continue;
886: else {
887: TASK_RET(task) = res[i].data;
888: TASK_FLAG(task) = (u_long) res[i].fflags;
889: }
890: /* remove read handle */
891: remove_task_from(task, &r->root_read);
892:
893: if (r->root_hooks.hook_exec.exception && res[i].flags & EV_EOF) {
894: if (r->root_hooks.hook_exec.exception(r, (void*) EV_EOF)) {
895: task->task_type = taskUNUSE;
896: insert_task_to(task, &r->root_unuse);
897: } else {
898: task->task_type = taskREADY;
899: insert_task_to(task, &r->root_ready);
900: }
901: } else {
902: task->task_type = taskREADY;
903: insert_task_to(task, &r->root_ready);
904: }
905: }
906: break;
907: case EVFILT_WRITE:
908: TAILQ_FOREACH_SAFE(task, &r->root_write, task_node, tmp) {
909: if (TASK_FD(task) != ((intptr_t) res[i].udata))
910: continue;
911: else {
912: TASK_RET(task) = res[i].data;
913: TASK_FLAG(task) = (u_long) res[i].fflags;
914: }
915: /* remove write handle */
916: remove_task_from(task, &r->root_write);
917:
918: if (r->root_hooks.hook_exec.exception && res[i].flags & EV_EOF) {
919: if (r->root_hooks.hook_exec.exception(r, (void*) EV_EOF)) {
920: task->task_type = taskUNUSE;
921: insert_task_to(task, &r->root_unuse);
922: } else {
923: task->task_type = taskREADY;
924: insert_task_to(task, &r->root_ready);
925: }
926: } else {
927: task->task_type = taskREADY;
928: insert_task_to(task, &r->root_ready);
929: }
930: }
931: break;
932: case EVFILT_TIMER:
933: TAILQ_FOREACH_SAFE(task, &r->root_alarm, task_node, tmp) {
934: if ((uintptr_t) TASK_DATA(task) != ((uintptr_t) res[i].udata))
935: continue;
936: else {
937: TASK_RET(task) = res[i].data;
938: TASK_FLAG(task) = (u_long) res[i].fflags;
939: }
940: /* remove alarm handle */
941: transit_task2ready(task, &r->root_alarm);
942: }
943: break;
944: case EVFILT_VNODE:
945: TAILQ_FOREACH_SAFE(task, &r->root_node, task_node, tmp) {
946: if (TASK_FD(task) != ((intptr_t) res[i].udata))
947: continue;
948: else {
949: TASK_RET(task) = res[i].data;
950: TASK_FLAG(task) = (u_long) res[i].fflags;
951: }
952: /* remove node handle */
953: transit_task2ready(task, &r->root_node);
954: }
955: break;
956: case EVFILT_PROC:
957: TAILQ_FOREACH_SAFE(task, &r->root_proc, task_node, tmp) {
958: if (TASK_VAL(task) != ((uintptr_t) res[i].udata))
959: continue;
960: else {
961: TASK_RET(task) = res[i].data;
962: TASK_FLAG(task) = (u_long) res[i].fflags;
963: }
964: /* remove proc handle */
965: transit_task2ready(task, &r->root_proc);
966: }
967: break;
968: case EVFILT_SIGNAL:
969: TAILQ_FOREACH_SAFE(task, &r->root_signal, task_node, tmp) {
970: if (TASK_VAL(task) != ((uintptr_t) res[i].udata))
971: continue;
972: else {
973: TASK_RET(task) = res[i].data;
974: TASK_FLAG(task) = (u_long) res[i].fflags;
975: }
976: /* remove signal handle */
977: transit_task2ready(task, &r->root_signal);
978: }
979: break;
980: #ifdef AIO_SUPPORT
981: case EVFILT_AIO:
982: TAILQ_FOREACH_SAFE(task, &r->root_aio, task_node, tmp) {
983: acb = (struct aiocb*) TASK_VAL(task);
984: if (acb != ((struct aiocb*) res[i].udata))
985: continue;
986: else {
987: TASK_RET(task) = res[i].data;
988: TASK_FLAG(task) = (u_long) res[i].fflags;
989: }
990: /* remove user handle */
991: transit_task2ready(task, &r->root_aio);
992:
993: fd = acb->aio_fildes;
994: if ((len = aio_return(acb)) != -1) {
995: if (lseek(fd, acb->aio_offset + len, SEEK_CUR) == -1)
996: LOGERR;
997: } else
998: LOGERR;
999: free(acb);
1000: TASK_DATLEN(task) = (u_long) len;
1001: TASK_FD(task) = fd;
1002: }
1003: break;
1004: #ifdef EVFILT_LIO
1005: case EVFILT_LIO:
1006: TAILQ_FOREACH_SAFE(task, &r->root_lio, task_node, tmp) {
1007: acbs = (struct aiocb**) TASK_VAL(task);
1008: if (acbs != ((struct aiocb**) res[i].udata))
1009: continue;
1010: else {
1011: TASK_RET(task) = res[i].data;
1012: TASK_FLAG(task) = (u_long) res[i].fflags;
1013: }
1014: /* remove user handle */
1015: transit_task2ready(task, &r->root_lio);
1016:
1017: iv = (struct iovec*) TASK_DATA(task);
1018: fd = acbs[0]->aio_fildes;
1019: off = acbs[0]->aio_offset;
1020: for (j = len = 0; i < TASK_DATLEN(task); len += l, i++) {
1021: if ((iv[i].iov_len = aio_return(acbs[i])) == -1)
1022: l = 0;
1023: else
1024: l = iv[i].iov_len;
1025: free(acbs[i]);
1026: }
1027: free(acbs);
1028: TASK_DATLEN(task) = (u_long) len;
1029: TASK_FD(task) = fd;
1030:
1031: if (lseek(fd, off + len, SEEK_CUR) == -1)
1032: LOGERR;
1033: }
1034: break;
1035: #endif /* EVFILT_LIO */
1036: #endif /* AIO_SUPPORT */
1037: #ifdef EVFILT_USER
1038: case EVFILT_USER:
1039: TAILQ_FOREACH_SAFE(task, &r->root_user, task_node, tmp) {
1040: if (TASK_VAL(task) != ((uintptr_t) res[i].udata))
1041: continue;
1042: else {
1043: TASK_RET(task) = res[i].data;
1044: TASK_FLAG(task) = (u_long) res[i].fflags;
1045: }
1046: /* remove user handle */
1047: transit_task2ready(task, &r->root_user);
1048: }
1049: break;
1050: #endif /* EVFILT_USER */
1051: }
1052:
1053: if (kevent(r->root_kq, evt, 1, NULL, 0, &now) == -1) {
1054: if (r->root_hooks.hook_exec.exception)
1055: r->root_hooks.hook_exec.exception(r, NULL);
1056: else
1057: LOGERR;
1058: }
1059: }
1060: }
1061: #endif
1062:
1063: #if SUP_ENABLE == EP_SUPPORT
1064: static inline void
1065: fetch_hook_epoll_proceed(int en, struct epoll_event *res, sched_root_task_t *r)
1066: {
1067: register int i, flg;
1068: int ops = EPOLL_CTL_DEL;
1069: sched_task_t *task, *tmp;
1070: struct epoll_event evt[1];
1071:
1072: for (i = 0; i < en; i++) {
1073: memcpy(evt, &res[i], sizeof evt);
1074:
1075: if (evt->events & (EPOLLIN | EPOLLPRI | EPOLLET)) {
1076: flg = 0;
1077: TAILQ_FOREACH_SAFE(task, &r->root_read, task_node, tmp) {
1078: if (TASK_FD(task) != evt->data.fd)
1079: continue;
1080: else {
1081: flg++;
1082: FD_CLR(TASK_FD(task), &r->root_fds[0]);
1083: TASK_FLAG(task) = ioctl(TASK_FD(task), FIONREAD, &TASK_RET(task));
1084:
1085: evt->events &= ~(EPOLLIN | EPOLLPRI | EPOLLET | EPOLLRDHUP);
1086: if (FD_ISSET(TASK_FD(task), &r->root_fds[1])) {
1087: ops = EPOLL_CTL_MOD;
1088: evt->events |= EPOLLOUT;
1089: }
1090: }
1091: /* remove read handle */
1092: remove_task_from(task, &r->root_read);
1093:
1094: if (r->root_hooks.hook_exec.exception && evt->events & (EPOLLRDHUP | EPOLLERR | EPOLLHUP)) {
1095: if (r->root_hooks.hook_exec.exception(r, (void*) (intptr_t)
1096: (evt->events & EPOLLERR ? EV_ERROR | EV_EOF : EV_EOF))) {
1097: task->task_type = taskUNUSE;
1098: insert_task_to(task, &r->root_unuse);
1099: } else {
1100: task->task_type = taskREADY;
1101: insert_task_to(task, &r->root_ready);
1102: }
1103: } else {
1104: task->task_type = taskREADY;
1105: insert_task_to(task, &r->root_ready);
1106: }
1107: }
1108: if (flg > 1)
1109: ops = EPOLL_CTL_MOD;
1110: }
1111:
1112: if (evt->events & EPOLLOUT) {
1113: flg = 0;
1114: TAILQ_FOREACH_SAFE(task, &r->root_write, task_node, tmp) {
1115: if (TASK_FD(task) != evt->data.fd)
1116: continue;
1117: else {
1118: flg++;
1119: FD_CLR(TASK_FD(task), &r->root_fds[1]);
1120: TASK_FLAG(task) = ioctl(TASK_FD(task),
1121: FIONWRITE, &TASK_RET(task));
1122:
1123: evt->events &= ~EPOLLOUT;
1124: if (FD_ISSET(TASK_FD(task), &r->root_fds[0])) {
1125: ops = EPOLL_CTL_MOD;
1126: evt->events |= EPOLLIN | EPOLLPRI | EPOLLRDHUP;
1127: }
1128: }
1129: /* remove write handle */
1130: remove_task_from(task, &r->root_write);
1131:
1132: if (r->root_hooks.hook_exec.exception && evt->events & (EPOLLERR | EPOLLHUP)) {
1133: if (r->root_hooks.hook_exec.exception(r, (void*) (intptr_t)
1134: (evt->events & EPOLLERR ? EV_ERROR | EV_EOF : EV_EOF))) {
1135: task->task_type = taskUNUSE;
1136: insert_task_to(task, &r->root_unuse);
1137: } else {
1138: task->task_type = taskREADY;
1139: insert_task_to(task, &r->root_ready);
1140: }
1141: } else {
1142: task->task_type = taskREADY;
1143: insert_task_to(task, &r->root_ready);
1144: }
1145: }
1146: if (flg > 1)
1147: ops = EPOLL_CTL_MOD;
1148: }
1149:
1150: if (epoll_ctl(r->root_kq, ops, evt->data.fd, evt) == -1) {
1151: if (r->root_hooks.hook_exec.exception) {
1152: r->root_hooks.hook_exec.exception(r, NULL);
1153: } else
1154: LOGERR;
1155: }
1156: }
1157: }
1158: #endif
1159:
1160: #if SUP_ENABLE == NO_SUPPORT
1161: static inline void
1162: fetch_hook_select_proceed(int en, fd_set rfd, fd_set wfd, fd_set xfd, sched_root_task_t *r)
1163: {
1164: register int i, flg;
1165: sched_task_t *task, *tmp;
1166:
1167: /* skip select check if return value from select is zero */
1168: if (!en)
1169: return;
1170:
1171: for (i = 0; i < r->root_kq; i++) {
1172: if (FD_ISSET(i, &rfd) || FD_ISSET(i, &xfd)) {
1173: flg = 0;
1174: TAILQ_FOREACH_SAFE(task, &r->root_read, task_node, tmp) {
1175: if (TASK_FD(task) != i)
1176: continue;
1177: else {
1178: flg++;
1179: TASK_FLAG(task) = ioctl(TASK_FD(task),
1180: FIONREAD, &TASK_RET(task));
1181: }
1182: /* remove read handle */
1183: remove_task_from(task, &r->root_read);
1184:
1185: if (r->root_hooks.hook_exec.exception) {
1186: if (r->root_hooks.hook_exec.exception(r, NULL)) {
1187: task->task_type = taskUNUSE;
1188: insert_task_to(task, &r->root_unuse);
1189: } else {
1190: task->task_type = taskREADY;
1191: insert_task_to(task, &r->root_ready);
1192: }
1193: } else {
1194: task->task_type = taskREADY;
1195: insert_task_to(task, &r->root_ready);
1196: }
1197: }
1198: /* if match equal to 1, remove resouce */
1199: if (flg == 1)
1200: FD_CLR(i, &r->root_fds[0]);
1201: }
1202:
1203: if (FD_ISSET(i, &wfd)) {
1204: flg = 0;
1205: TAILQ_FOREACH_SAFE(task, &r->root_write, task_node, tmp) {
1206: if (TASK_FD(task) != i)
1207: continue;
1208: else {
1209: flg++;
1210: TASK_FLAG(task) = ioctl(TASK_FD(task),
1211: FIONWRITE, &TASK_RET(task));
1212: }
1213: /* remove write handle */
1214: remove_task_from(task, &r->root_write);
1215:
1216: if (r->root_hooks.hook_exec.exception) {
1217: if (r->root_hooks.hook_exec.exception(r, NULL)) {
1218: task->task_type = taskUNUSE;
1219: insert_task_to(task, &r->root_unuse);
1220: } else {
1221: task->task_type = taskREADY;
1222: insert_task_to(task, &r->root_ready);
1223: }
1224: } else {
1225: task->task_type = taskREADY;
1226: insert_task_to(task, &r->root_ready);
1227: }
1228: }
1229: /* if match equal to 1, remove resouce */
1230: if (flg == 1)
1231: FD_CLR(i, &r->root_fds[1]);
1232: }
1233: }
1234:
1235: /* optimize select */
1236: for (i = r->root_kq - 1; i > 2; i--)
1237: if (FD_ISSET(i, &r->root_fds[0]) || FD_ISSET(i, &r->root_fds[1]))
1238: break;
1239: if (i > 2)
1240: r->root_kq = i + 1;
1241: }
1242: #endif
1243:
1244: /*
1245: * sched_hook_fetch() - Default FETCH hook
1246: *
1247: * @root = root task
1248: * @arg = unused
1249: * return: NULL error or !=NULL fetched task
1250: */
1251: void *
1252: sched_hook_fetch(void *root, void *arg __unused)
1253: {
1254: sched_root_task_t *r = root;
1255: sched_task_t *task, *tmp;
1256: struct timespec now, m, mtmp;
1257: #if SUP_ENABLE == KQ_SUPPORT
1258: struct kevent res[KQ_EVENTS];
1259: struct timespec *timeout;
1260: #elif SUP_ENABLE == EP_SUPPORT
1261: struct epoll_event res[KQ_EVENTS];
1262: u_long timeout = 0;
1263: #else
1264: struct timeval *timeout, tv;
1265: fd_set rfd, wfd, xfd;
1266: #endif
1267: int en;
1268:
1269: if (!r)
1270: return NULL;
1271:
1272: /* get new task by queue priority */
1273: while ((task = TAILQ_FIRST(&r->root_event))) {
1274: transit_task2unuse(task, &r->root_event);
1275: return task;
1276: }
1277: while ((task = TAILQ_FIRST(&r->root_ready))) {
1278: transit_task2unuse(task, &r->root_ready);
1279: return task;
1280: }
1281:
1282: #ifdef TIMER_WITHOUT_SORT
1283: clock_gettime(CLOCK_MONOTONIC, &now);
1284:
1285: sched_timespecclear(&r->root_wait);
1286: TAILQ_FOREACH(task, &r->root_timer, task_node) {
1287: if (!sched_timespecisset(&r->root_wait))
1288: r->root_wait = TASK_TS(task);
1289: else if (sched_timespeccmp(&TASK_TS(task), &r->root_wait, -) < 0)
1290: r->root_wait = TASK_TS(task);
1291: }
1292:
1293: if (TAILQ_FIRST(&r->root_timer)) {
1294: m = r->root_wait;
1295: sched_timespecsub(&m, &now, &mtmp);
1296: r->root_wait = mtmp;
1297: } else {
1298: /* set wait INFTIM */
1299: sched_timespecinf(&r->root_wait);
1300: }
1301: #else /* ! TIMER_WITHOUT_SORT */
1302: if (!TAILQ_FIRST(&r->root_task) && (task = TAILQ_FIRST(&r->root_timer))) {
1303: clock_gettime(CLOCK_MONOTONIC, &now);
1304:
1305: m = TASK_TS(task);
1306: sched_timespecsub(&m, &now, &mtmp);
1307: r->root_wait = mtmp;
1308: } else {
1309: /* set wait INFTIM */
1310: sched_timespecinf(&r->root_wait);
1311: }
1312: #endif /* TIMER_WITHOUT_SORT */
1313: /* if present member of task, set NOWAIT */
1314: if (TAILQ_FIRST(&r->root_task))
1315: sched_timespecclear(&r->root_wait);
1316:
1317: if (r->root_wait.tv_sec != -1 && r->root_wait.tv_nsec != -1) {
1318: #if SUP_ENABLE == KQ_SUPPORT
1319: timeout = &r->root_wait;
1320: #elif SUP_ENABLE == EP_SUPPORT
1321: timeout = r->root_wait.tv_sec * 1000 + r->root_wait.tv_nsec / 1000000;
1322: #else
1323: sched_timespec2val(&r->root_wait, &tv);
1324: timeout = &tv;
1325: #endif /* KQ_SUPPORT */
1326: } else if (sched_timespecisinf(&r->root_poll))
1327: #if SUP_ENABLE == EP_SUPPORT
1328: timeout = -1;
1329: #else
1330: timeout = NULL;
1331: #endif
1332: else {
1333: #if SUP_ENABLE == KQ_SUPPORT
1334: timeout = &r->root_poll;
1335: #elif SUP_ENABLE == EP_SUPPORT
1336: timeout = r->root_poll.tv_sec * 1000 + r->root_poll.tv_nsec / 1000000;
1337: #else
1338: sched_timespec2val(&r->root_poll, &tv);
1339: timeout = &tv;
1340: #endif /* KQ_SUPPORT */
1341: }
1342:
1343: #if SUP_ENABLE == KQ_SUPPORT
1344: if ((en = kevent(r->root_kq, NULL, 0, res, KQ_EVENTS, timeout)) == -1) {
1345: #elif SUP_ENABLE == EP_SUPPORT
1346: if ((en = epoll_wait(r->root_kq, res, KQ_EVENTS, timeout)) == -1) {
1347: #else
1348: rfd = xfd = r->root_fds[0];
1349: wfd = r->root_fds[1];
1350: if ((en = select(r->root_kq, &rfd, &wfd, &xfd, timeout)) == -1) {
1351: #endif /* KQ_SUPPORT */
1352: if (r->root_hooks.hook_exec.exception) {
1353: if (r->root_hooks.hook_exec.exception(r, NULL))
1354: return NULL;
1355: } else if (errno != EINTR)
1356: LOGERR;
1357: goto skip_event;
1358: }
1359:
1360: /* Go and catch the cat into pipes ... */
1361: #if SUP_ENABLE == KQ_SUPPORT
1362: /* kevent dispatcher */
1363: fetch_hook_kevent_proceed(en, res, r);
1364: #elif SUP_ENABLE == EP_SUPPORT
1365: /* epoll dispatcher */
1366: fetch_hook_epoll_proceed(en, res, r);
1367: #else
1368: /* select dispatcher */
1369: fetch_hook_select_proceed(en, rfd, wfd, xfd, r);
1370: #endif /* KQ_SUPPORT */
1371:
1372: skip_event:
1373: /* timer update & put in ready queue */
1374: clock_gettime(CLOCK_MONOTONIC, &now);
1375:
1376: TAILQ_FOREACH_SAFE(task, &r->root_timer, task_node, tmp)
1377: if (sched_timespeccmp(&now, &TASK_TS(task), -) >= 0)
1378: transit_task2ready(task, &r->root_timer);
1379:
1380: /* put regular task priority task to ready queue,
1381: if there is no ready task or reach max missing hit for regular task */
1382: if ((task = TAILQ_FIRST(&r->root_task))) {
1383: if (!TAILQ_FIRST(&r->root_ready) || r->root_miss >= TASK_VAL(task)) {
1384: r->root_miss ^= r->root_miss;
1385:
1386: transit_task2ready(task, &r->root_task);
1387: } else
1388: r->root_miss++;
1389: } else
1390: r->root_miss ^= r->root_miss;
1391:
1392: /* OK, lets get ready task !!! */
1393: task = TAILQ_FIRST(&r->root_ready);
1394: if (task)
1395: transit_task2unuse(task, &r->root_ready);
1396: return task;
1397: }
1398:
1399: /*
1400: * sched_hook_exception() - Default EXCEPTION hook
1401: *
1402: * @root = root task
1403: * @arg = custom handling: if arg == EV_EOF or other value; default: arg == NULL log errno
1404: * return: <0 errors and 0 ok
1405: */
1406: void *
1407: sched_hook_exception(void *root, void *arg)
1408: {
1409: sched_root_task_t *r = root;
1410:
1411: if (!r)
1412: return NULL;
1413:
1414: /* custom exception handling ... */
1415: if (arg) {
1416: if (arg == (void*) EV_EOF)
1417: return NULL;
1418: return (void*) -1; /* raise scheduler error!!! */
1419: }
1420:
1421: /* if error hook exists */
1422: if (r->root_hooks.hook_root.error)
1423: return (r->root_hooks.hook_root.error(root, (void*) ((intptr_t) errno)));
1424:
1425: /* default case! */
1426: LOGERR;
1427: return NULL;
1428: }
1429:
1430: /*
1431: * sched_hook_condition() - Default CONDITION hook
1432: *
1433: * @root = root task
1434: * @arg = killState from schedRun()
1435: * return: NULL kill scheduler loop or !=NULL ok
1436: */
1437: void *
1438: sched_hook_condition(void *root, void *arg)
1439: {
1440: sched_root_task_t *r = root;
1441:
1442: if (!r)
1443: return NULL;
1444:
1445: return (void*) (*r->root_cond - *(intptr_t*) arg);
1446: }
1447:
1448: /*
1449: * sched_hook_rtc() - Default RTC hook
1450: *
1451: * @task = current task
1452: * @arg = unused
1453: * return: <0 errors and 0 ok
1454: */
1455: #if defined(HAVE_TIMER_CREATE) && defined(HAVE_TIMER_SETTIME) && defined(HAVE_TIMER_DELETE)
1456: void *
1457: sched_hook_rtc(void *task, void *arg __unused)
1458: {
1459: sched_task_t *sigt = NULL, *t = task;
1460: struct itimerspec its;
1461: struct sigevent evt;
1462: timer_t tmr;
1463: #if SUP_ENABLE != KQ_SUPPORT
1464: struct sigaction sa;
1465: #endif
1466:
1467: if (!t || !TASK_ROOT(t))
1468: return (void*) -1;
1469:
1470: memset(&evt, 0, sizeof evt);
1471: evt.sigev_notify = SIGEV_SIGNAL;
1472: evt.sigev_signo = (intptr_t) TASK_DATA(t) + SIGRTMIN;
1473: evt.sigev_value.sival_ptr = t;
1474:
1475: if (timer_create(CLOCK_MONOTONIC, &evt, &tmr) == -1) {
1476: if (TASK_ROOT(t)->root_hooks.hook_exec.exception)
1477: TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL);
1478: else
1479: LOGERR;
1480: return (void*) -1;
1481: } else
1482: TASK_FLAG(t) = (u_long) tmr;
1483:
1484: #if SUP_ENABLE == KQ_SUPPORT
1485: if (!(sigt = schedSignal(TASK_ROOT(t), _sched_rtcWrapper, TASK_ARG(t), evt.sigev_signo,
1486: t, (size_t) tmr))) {
1487: if (TASK_ROOT(t)->root_hooks.hook_exec.exception)
1488: TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL);
1489: else
1490: LOGERR;
1491: timer_delete(tmr);
1492: return (void*) -1;
1493: } else
1494: TASK_RET(t) = (uintptr_t) sigt;
1495: #else
1496: memset(&sa, 0, sizeof sa);
1497: sigemptyset(&sa.sa_mask);
1498: sa.sa_sigaction = _sched_rtcSigWrapper;
1499: sa.sa_flags = SA_SIGINFO | SA_RESTART;
1500:
1501: if (sigaction(evt.sigev_signo, &sa, NULL) == -1) {
1502: if (TASK_ROOT(t)->root_hooks.hook_exec.exception)
1503: TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL);
1504: else
1505: LOGERR;
1506: timer_delete(tmr);
1507: return (void*) -1;
1508: }
1509: #endif
1510:
1511: memset(&its, 0, sizeof its);
1512: its.it_value.tv_sec = t->task_val.ts.tv_sec;
1513: its.it_value.tv_nsec = t->task_val.ts.tv_nsec;
1514:
1515: if (timer_settime(tmr, TIMER_RELTIME, &its, NULL) == -1) {
1516: if (TASK_ROOT(t)->root_hooks.hook_exec.exception)
1517: TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL);
1518: else
1519: LOGERR;
1520: schedCancel(sigt);
1521: timer_delete(tmr);
1522: return (void*) -1;
1523: }
1524:
1525: return NULL;
1526: }
1527: #endif /* HAVE_TIMER_CREATE */
FreeBSD-CVSweb <freebsd-cvsweb@FreeBSD.org>