1: /*************************************************************************
2: * (C) 2011 AITNET ltd - Sofia/Bulgaria - <misho@aitbg.com>
3: * by Michael Pounov <misho@openbsd-bg.org>
4: *
5: * $Author: misho $
6: * $Id: hooks.c,v 1.34 2019/01/14 15:58:50 misho Exp $
7: *
8: **************************************************************************
9: The ELWIX and AITNET software is distributed under the following
10: terms:
11:
12: All of the documentation and software included in the ELWIX and AITNET
13: Releases is copyrighted by ELWIX - Sofia/Bulgaria <info@elwix.org>
14:
15: Copyright 2004 - 2018
16: by Michael Pounov <misho@elwix.org>. All rights reserved.
17:
18: Redistribution and use in source and binary forms, with or without
19: modification, are permitted provided that the following conditions
20: are met:
21: 1. Redistributions of source code must retain the above copyright
22: notice, this list of conditions and the following disclaimer.
23: 2. Redistributions in binary form must reproduce the above copyright
24: notice, this list of conditions and the following disclaimer in the
25: documentation and/or other materials provided with the distribution.
26: 3. All advertising materials mentioning features or use of this software
27: must display the following acknowledgement:
28: This product includes software developed by Michael Pounov <misho@elwix.org>
29: ELWIX - Embedded LightWeight unIX and its contributors.
30: 4. Neither the name of AITNET nor the names of its contributors
31: may be used to endorse or promote products derived from this software
32: without specific prior written permission.
33:
34: THIS SOFTWARE IS PROVIDED BY AITNET AND CONTRIBUTORS ``AS IS'' AND
35: ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
36: IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
37: ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
38: FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
39: DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
40: OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
41: HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
42: LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
43: OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
44: SUCH DAMAGE.
45: */
46: #include "global.h"
47: #include "hooks.h"
48:
49:
50: static inline void
51: transit_task2ready(sched_task_t * __restrict t, sched_queue_t * __restrict q)
52: {
53: remove_task_from(t, q);
54:
55: t->task_type = taskREADY;
56: insert_task_to(t, &(TASK_ROOT(t))->root_ready);
57: }
58:
59: #ifdef HAVE_LIBPTHREAD
60: static void *
61: _sched_threadWrapper(sched_task_t *t)
62: {
63: void *ret = NULL;
64: sched_root_task_t *r;
65:
66: if (!t || !TASK_ROOT(t))
67: pthread_exit(ret);
68: else
69: r = (sched_root_task_t*) TASK_ROOT(t);
70:
71: pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, NULL);
72: /*
73: pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, NULL);
74: */
75:
76: /* notify parent, thread is ready for execution */
77: pthread_testcancel();
78:
79: ret = schedCall(t);
80: r->root_ret = ret;
81:
82: if (TASK_VAL(t)) {
83: transit_task2unuse(t, &r->root_thread);
84: TASK_VAL(t) = 0;
85: }
86:
87: pthread_exit(ret);
88: }
89: #endif
90:
91: #if defined(HAVE_LIBRT) && defined(HAVE_TIMER_CREATE) && \
92: defined(HAVE_TIMER_SETTIME) && defined(HAVE_TIMER_DELETE)
93: #if SUP_ENABLE == KQ_SUPPORT
94: static void *
95: _sched_rtcWrapper(sched_task_t *t)
96: {
97: sched_task_t *task;
98: void *ret;
99:
100: if (!t || !TASK_ROOT(t) || !TASK_DATA(t))
101: return NULL;
102: else {
103: task = (sched_task_t*) TASK_DATA(t);
104: timer_delete((timer_t) TASK_DATLEN(t));
105: }
106:
107: ret = schedCall(task);
108:
109: transit_task2unuse(task, &(TASK_ROOT(task))->root_rtc);
110: return ret;
111: }
112: #else
113: static void
114: _sched_rtcSigWrapper(int sig, siginfo_t *si, void *uc)
115: {
116: sched_task_t *task;
117:
118: if (si && si->si_value.sival_ptr) {
119: task = (sched_task_t*) si->si_value.sival_ptr;
120: timer_delete((timer_t) TASK_FLAG(task));
121:
122: TASK_RET(task) = (intptr_t) schedCall(task);
123:
124: transit_task2unuse(task, &(TASK_ROOT(task))->root_rtc);
125: }
126: }
127: #endif
128: #endif
129:
130: /*
131: * sched_hook_init() - Default INIT hook
132: *
133: * @root = root task
134: * @arg = unused
135: * return: <0 errors and 0 ok
136: */
137: void *
138: sched_hook_init(void *root, void *arg __unused)
139: {
140: sched_root_task_t *r = root;
141:
142: if (!r)
143: return (void*) -1;
144:
145: #if SUP_ENABLE == KQ_SUPPORT
146: r->root_kq = kqueue();
147: if (r->root_kq == -1) {
148: LOGERR;
149: return (void*) -1;
150: }
151: #elif SUP_ENABLE == EP_SUPPORT
152: r->root_kq = epoll_create(KQ_EVENTS);
153: if (r->root_kq == -1) {
154: LOGERR;
155: return (void*) -1;
156: }
157: #else
158: r->root_kq ^= r->root_kq;
159: FD_ZERO(&r->root_fds[0]);
160: FD_ZERO(&r->root_fds[1]);
161: #endif
162:
163: return NULL;
164: }
165:
166: /*
167: * sched_hook_fini() - Default FINI hook
168: *
169: * @root = root task
170: * @arg = unused
171: * return: <0 errors and 0 ok
172: */
173: void *
174: sched_hook_fini(void *root, void *arg __unused)
175: {
176: sched_root_task_t *r = root;
177:
178: if (!r)
179: return (void*) -1;
180:
181: #if SUP_ENABLE == KQ_SUPPORT || SUP_ENABLE == EP_SUPPORT
182: if (r->root_kq > 2) {
183: close(r->root_kq);
184: r->root_kq = 0;
185: }
186: #else
187: FD_ZERO(&r->root_fds[1]);
188: FD_ZERO(&r->root_fds[0]);
189: r->root_kq ^= r->root_kq;
190: #endif
191:
192: return NULL;
193: }
194:
195: /*
196: * sched_hook_cancel() - Default CANCEL hook
197: *
198: * @task = current task
199: * @arg = unused
200: * return: <0 errors and 0 ok
201: */
202: void *
203: sched_hook_cancel(void *task, void *arg __unused)
204: {
205: sched_task_t *t = task, *tmp, *tt;
206: sched_root_task_t *r = NULL;
207: int flg;
208: #if SUP_ENABLE == KQ_SUPPORT
209: struct kevent chg[1];
210: struct timespec timeout = { 0, 0 };
211: #elif SUP_ENABLE == EP_SUPPORT
212: struct epoll_event ee = { .events = 0, .data.fd = 0 };
213: #else
214: register int i;
215: #endif
216: #ifdef AIO_SUPPORT
217: struct aiocb *acb;
218: #ifdef EVFILT_LIO
219: register int i = 0;
220: struct aiocb **acbs;
221: #endif /* EVFILT_LIO */
222: #endif /* AIO_SUPPORT */
223:
224: if (!t || !TASK_ROOT(t))
225: return (void*) -1;
226: else
227: r = TASK_ROOT(t);
228:
229: switch (TASK_TYPE(t)) {
230: case taskREAD:
231: /* check for multi subscribers */
232: flg = 0;
233: TAILQ_FOREACH_SAFE(tt, &r->root_read, task_node, tmp)
234: if (TASK_FD(tt) != TASK_FD(t))
235: continue;
236: else
237: flg++;
238: #if SUP_ENABLE == KQ_SUPPORT
239: #ifdef __NetBSD__
240: EV_SET(&chg[0], TASK_FD(t), EVFILT_READ, flg < 2 ? EV_DELETE : 0,
241: 0, 0, (intptr_t) TASK_FD(t));
242: #else
243: EV_SET(&chg[0], TASK_FD(t), EVFILT_READ, flg < 2 ? EV_DELETE : 0,
244: 0, 0, (void*) TASK_FD(t));
245: #endif
246: #elif SUP_ENABLE == EP_SUPPORT
247: ee.data.fd = TASK_FD(t);
248: ee.events ^= ee.events;
249: if (FD_ISSET(TASK_FD(t), &r->root_fds[1]))
250: ee.events = EPOLLOUT;
251:
252: if (flg < 2)
253: FD_CLR(TASK_FD(t), &r->root_fds[0]);
254: else
255: ee.events |= EPOLLIN | EPOLLPRI;
256: #else
257: if (flg < 2) {
258: FD_CLR(TASK_FD(t), &r->root_fds[0]);
259:
260: /* optimize select */
261: for (i = r->root_kq - 1; i > 2; i--)
262: if (FD_ISSET(i, &r->root_fds[0]) || FD_ISSET(i, &r->root_fds[1]))
263: break;
264: if (i > 2)
265: r->root_kq = i + 1;
266: }
267: #endif
268: break;
269: case taskWRITE:
270: /* check for multi subscribers */
271: flg = 0;
272: TAILQ_FOREACH_SAFE(tt, &r->root_write, task_node, tmp)
273: if (TASK_FD(tt) != TASK_FD(t))
274: continue;
275: else
276: flg++;
277: #if SUP_ENABLE == KQ_SUPPORT
278: #ifdef __NetBSD__
279: EV_SET(&chg[0], TASK_FD(t), EVFILT_WRITE, flg < 2 ? EV_DELETE : 0,
280: 0, 0, (intptr_t) TASK_FD(t));
281: #else
282: EV_SET(&chg[0], TASK_FD(t), EVFILT_WRITE, flg < 2 ? EV_DELETE : 0,
283: 0, 0, (void*) TASK_FD(t));
284: #endif
285: #elif SUP_ENABLE == EP_SUPPORT
286: ee.data.fd = TASK_FD(t);
287: ee.events ^= ee.events;
288: if (FD_ISSET(TASK_FD(t), &r->root_fds[0]))
289: ee.events = EPOLLIN | EPOLLPRI;
290:
291: if (flg < 2)
292: FD_CLR(TASK_FD(t), &r->root_fds[1]);
293: else
294: ee.events |= EPOLLOUT;
295: #else
296: if (flg < 2) {
297: FD_CLR(TASK_FD(t), &r->root_fds[1]);
298:
299: /* optimize select */
300: for (i = r->root_kq - 1; i > 2; i--)
301: if (FD_ISSET(i, &r->root_fds[0]) || FD_ISSET(i, &r->root_fds[1]))
302: break;
303: if (i > 2)
304: r->root_kq = i + 1;
305: }
306: #endif
307: break;
308: case taskALARM:
309: #if SUP_ENABLE == KQ_SUPPORT
310: /* check for multi subscribers */
311: flg = 0;
312: TAILQ_FOREACH_SAFE(tt, &r->root_alarm, task_node, tmp)
313: if (TASK_DATA(tt) != TASK_DATA(t))
314: continue;
315: else
316: flg++;
317: #ifdef __NetBSD__
318: EV_SET(&chg[0], (uintptr_t) TASK_DATA(t), EVFILT_TIMER, flg < 2 ? EV_DELETE : 0,
319: 0, 0, (intptr_t) TASK_DATA(t));
320: #else
321: EV_SET(&chg[0], (uintptr_t) TASK_DATA(t), EVFILT_TIMER, flg < 2 ? EV_DELETE : 0,
322: 0, 0, (void*) TASK_DATA(t));
323: #endif
324: #endif
325: break;
326: case taskNODE:
327: #if SUP_ENABLE == KQ_SUPPORT
328: /* check for multi subscribers */
329: flg = 0;
330: TAILQ_FOREACH_SAFE(tt, &r->root_node, task_node, tmp)
331: if (TASK_FD(tt) != TASK_FD(t))
332: continue;
333: else
334: flg++;
335: #ifdef __NetBSD__
336: EV_SET(&chg[0], TASK_FD(t), EVFILT_VNODE, flg < 2 ? EV_DELETE : 0,
337: 0, 0, (intptr_t) TASK_FD(t));
338: #else
339: EV_SET(&chg[0], TASK_FD(t), EVFILT_VNODE, flg < 2 ? EV_DELETE : 0,
340: 0, 0, (void*) TASK_FD(t));
341: #endif
342: #endif
343: break;
344: case taskPROC:
345: #if SUP_ENABLE == KQ_SUPPORT
346: /* check for multi subscribers */
347: flg = 0;
348: TAILQ_FOREACH_SAFE(tt, &r->root_proc, task_node, tmp)
349: if (TASK_VAL(tt) != TASK_VAL(t))
350: continue;
351: else
352: flg++;
353: #ifdef __NetBSD__
354: EV_SET(&chg[0], TASK_VAL(t), EVFILT_PROC, flg < 2 ? EV_DELETE : 0,
355: 0, 0, (intptr_t) TASK_VAL(t));
356: #else
357: EV_SET(&chg[0], TASK_VAL(t), EVFILT_PROC, flg < 2 ? EV_DELETE : 0,
358: 0, 0, (void*) TASK_VAL(t));
359: #endif
360: #endif
361: break;
362: case taskSIGNAL:
363: #if SUP_ENABLE == KQ_SUPPORT
364: /* check for multi subscribers */
365: flg = 0;
366: TAILQ_FOREACH_SAFE(tt, &r->root_signal, task_node, tmp)
367: if (TASK_VAL(tt) != TASK_VAL(t))
368: continue;
369: else
370: flg++;
371: #ifdef __NetBSD__
372: EV_SET(&chg[0], TASK_VAL(t), EVFILT_SIGNAL, flg < 2 ? EV_DELETE : 0,
373: 0, 0, (intptr_t) TASK_VAL(t));
374: #else
375: EV_SET(&chg[0], TASK_VAL(t), EVFILT_SIGNAL, flg < 2 ? EV_DELETE : 0,
376: 0, 0, (void*) TASK_VAL(t));
377: #endif
378: /* restore signal */
379: if (flg < 2)
380: signal(TASK_VAL(t), SIG_DFL);
381: #endif
382: break;
383: #ifdef AIO_SUPPORT
384: case taskAIO:
385: #if SUP_ENABLE == KQ_SUPPORT
386: /* check for multi subscribers */
387: flg = 0;
388: TAILQ_FOREACH_SAFE(tt, &r->root_aio, task_node, tmp)
389: if (TASK_VAL(tt) != TASK_VAL(t))
390: continue;
391: else
392: flg++;
393: #ifdef __NetBSD__
394: EV_SET(&chg[0], TASK_VAL(t), EVFILT_AIO, flg < 2 ? EV_DELETE : 0,
395: 0, 0, (intptr_t) TASK_VAL(t));
396: #else
397: EV_SET(&chg[0], TASK_VAL(t), EVFILT_AIO, flg < 2 ? EV_DELETE : 0,
398: 0, 0, (void*) TASK_VAL(t));
399: #endif
400: acb = (struct aiocb*) TASK_VAL(t);
401: if (acb) {
402: if (aio_cancel(acb->aio_fildes, acb) == AIO_CANCELED)
403: aio_return(acb);
404: free(acb);
405: TASK_VAL(t) = 0;
406: }
407: #endif
408: break;
409: #ifdef EVFILT_LIO
410: case taskLIO:
411: #if SUP_ENABLE == KQ_SUPPORT
412: /* check for multi subscribers */
413: flg = 0;
414: TAILQ_FOREACH_SAFE(tt, &r->root_lio, task_node, tmp)
415: if (TASK_VAL(tt) != TASK_VAL(t))
416: continue;
417: else
418: flg++;
419: #ifdef __NetBSD__
420: EV_SET(&chg[0], TASK_VAL(t), EVFILT_LIO, flg < 2 ? EV_DELETE : 0,
421: 0, 0, (intptr_t) TASK_VAL(t));
422: #else
423: EV_SET(&chg[0], TASK_VAL(t), EVFILT_LIO, flg < 2 ? EV_DELETE : 0,
424: 0, 0, (void*) TASK_VAL(t));
425: #endif
426: acbs = (struct aiocb**) TASK_VAL(t);
427: if (acbs) {
428: for (i = 0; i < TASK_DATLEN(t); i++) {
429: if (aio_cancel(acbs[i]->aio_fildes, acbs[i]) == AIO_CANCELED)
430: aio_return(acbs[i]);
431: free(acbs[i]);
432: }
433: free(acbs);
434: TASK_VAL(t) = 0;
435: }
436: #endif
437: break;
438: #endif /* EVFILT_LIO */
439: #endif /* AIO_SUPPORT */
440: #ifdef EVFILT_USER
441: case taskUSER:
442: #if SUP_ENABLE == KQ_SUPPORT
443: /* check for multi subscribers */
444: flg = 0;
445: TAILQ_FOREACH_SAFE(tt, &r->root_user, task_node, tmp)
446: if (TASK_VAL(tt) != TASK_VAL(t))
447: continue;
448: else
449: flg++;
450: #ifdef __NetBSD__
451: EV_SET(&chg[0], TASK_VAL(t), EVFILT_USER, flg < 2 ? EV_DELETE : 0,
452: 0, 0, (intptr_t) TASK_VAL(t));
453: #else
454: EV_SET(&chg[0], TASK_VAL(t), EVFILT_USER, flg < 2 ? EV_DELETE : 0,
455: 0, 0, (void*) TASK_VAL(t));
456: #endif
457: #endif
458: break;
459: #endif /* EVFILT_USER */
460: case taskTHREAD:
461: #ifdef HAVE_LIBPTHREAD
462: if (TASK_VAL(t)) {
463: pthread_cancel((pthread_t) TASK_VAL(t));
464: pthread_join((pthread_t) TASK_VAL(t), NULL);
465: if (TASK_VAL(t)) {
466: transit_task2unuse(t, &(TASK_ROOT(t))->root_thread);
467: TASK_VAL(t) = 0;
468: }
469: }
470: #endif
471: return NULL;
472: #if defined(HAVE_LIBRT) && defined(HAVE_TIMER_CREATE) && \
473: defined(HAVE_TIMER_SETTIME) && defined(HAVE_TIMER_DELETE)
474: case taskRTC:
475: timer_delete((timer_t) TASK_FLAG(t));
476: #if SUP_ENABLE == KQ_SUPPORT
477: schedCancel((sched_task_t*) TASK_RET(t));
478: #else
479: /* check for multi subscribers */
480: flg = 0;
481: TAILQ_FOREACH_SAFE(tt, &r->root_rtc, task_node, tmp)
482: if (TASK_DATA(tt) != TASK_DATA(t))
483: continue;
484: else
485: flg++;
486:
487: /* restore signal */
488: if (flg < 2)
489: signal((intptr_t) TASK_DATA(t) + SIGRTMIN, SIG_DFL);
490: #endif
491: return NULL;
492: #endif /* HAVE_TIMER_CREATE */
493: default:
494: return NULL;
495: }
496:
497: #if SUP_ENABLE == KQ_SUPPORT
498: kevent(r->root_kq, chg, 1, NULL, 0, &timeout);
499: #elif SUP_ENABLE == EP_SUPPORT
500: epoll_ctl(r->root_kq, ee.events ? EPOLL_CTL_MOD : EPOLL_CTL_DEL, ee.data.fd, &ee);
501: #endif
502: return NULL;
503: }
504:
505: #ifdef HAVE_LIBPTHREAD
506: /*
507: * sched_hook_thread() - Default THREAD hook
508: *
509: * @task = current task
510: * @arg = pthread attributes
511: * return: <0 errors and 0 ok
512: */
513: void *
514: sched_hook_thread(void *task, void *arg)
515: {
516: sched_task_t *t = task;
517: pthread_t tid;
518: sigset_t s, o;
519:
520: if (!t || !TASK_ROOT(t))
521: return (void*) -1;
522:
523: sigfillset(&s);
524: pthread_sigmask(SIG_BLOCK, &s, &o);
525: errno = pthread_create(&tid, (pthread_attr_t*) arg,
526: (void *(*)(void*)) _sched_threadWrapper, t);
527: pthread_sigmask(SIG_SETMASK, &o, NULL);
528:
529: if (errno) {
530: LOGERR;
531: return (void*) -1;
532: } else
533: TASK_VAL(t) = (u_long) tid;
534:
535: if (!TASK_ISLOCKED(t))
536: TASK_LOCK(t);
537:
538: return NULL;
539: }
540: #endif
541:
542: /*
543: * sched_hook_read() - Default READ hook
544: *
545: * @task = current task
546: * @arg = unused
547: * return: <0 errors and 0 ok
548: */
549: void *
550: sched_hook_read(void *task, void *arg __unused)
551: {
552: sched_task_t *t = task;
553: sched_root_task_t *r = NULL;
554: #if SUP_ENABLE == KQ_SUPPORT
555: struct kevent chg[1];
556: struct timespec timeout = { 0, 0 };
557: #elif SUP_ENABLE == EP_SUPPORT
558: struct epoll_event ee;
559: int flg = 0;
560: #endif
561:
562: if (!t || !TASK_ROOT(t))
563: return (void*) -1;
564: else
565: r = TASK_ROOT(t);
566:
567: #if SUP_ENABLE == KQ_SUPPORT
568: #ifdef __NetBSD__
569: EV_SET(&chg[0], TASK_FD(t), EVFILT_READ, EV_ADD | EV_CLEAR, 0, 0, (intptr_t) TASK_FD(t));
570: #else
571: EV_SET(&chg[0], TASK_FD(t), EVFILT_READ, EV_ADD | EV_CLEAR, 0, 0, (void*) TASK_FD(t));
572: #endif
573: if (kevent(r->root_kq, chg, 1, NULL, 0, &timeout) == -1) {
574: if (r->root_hooks.hook_exec.exception)
575: r->root_hooks.hook_exec.exception(r, NULL);
576: else
577: LOGERR;
578: return (void*) -1;
579: }
580: #elif SUP_ENABLE == EP_SUPPORT
581: ee.data.fd = TASK_FD(t);
582: ee.events = EPOLLIN | EPOLLPRI;
583: if (FD_ISSET(TASK_FD(t), &r->root_fds[0]))
584: flg |= 1;
585: if (FD_ISSET(TASK_FD(t), &r->root_fds[1])) {
586: flg |= 2;
587: ee.events |= EPOLLOUT;
588: }
589:
590: if (epoll_ctl(r->root_kq, flg ? EPOLL_CTL_MOD : EPOLL_CTL_ADD, TASK_FD(t), &ee) == -1) {
591: if (r->root_hooks.hook_exec.exception)
592: r->root_hooks.hook_exec.exception(r, NULL);
593: else
594: LOGERR;
595: return (void*) -1;
596: } else
597: FD_SET(TASK_FD(t), &r->root_fds[0]);
598: #else
599: FD_SET(TASK_FD(t), &r->root_fds[0]);
600: if (TASK_FD(t) >= r->root_kq)
601: r->root_kq = TASK_FD(t) + 1;
602: #endif
603:
604: return NULL;
605: }
606:
607: /*
608: * sched_hook_write() - Default WRITE hook
609: *
610: * @task = current task
611: * @arg = unused
612: * return: <0 errors and 0 ok
613: */
614: void *
615: sched_hook_write(void *task, void *arg __unused)
616: {
617: sched_task_t *t = task;
618: sched_root_task_t *r = NULL;
619: #if SUP_ENABLE == KQ_SUPPORT
620: struct kevent chg[1];
621: struct timespec timeout = { 0, 0 };
622: #elif SUP_ENABLE == EP_SUPPORT
623: struct epoll_event ee;
624: int flg = 0;
625: #endif
626:
627: if (!t || !TASK_ROOT(t))
628: return (void*) -1;
629: else
630: r = TASK_ROOT(t);
631:
632: #if SUP_ENABLE == KQ_SUPPORT
633: #ifdef __NetBSD__
634: EV_SET(&chg[0], TASK_FD(t), EVFILT_WRITE, EV_ADD | EV_CLEAR, 0, 0, (intptr_t) TASK_FD(t));
635: #else
636: EV_SET(&chg[0], TASK_FD(t), EVFILT_WRITE, EV_ADD | EV_CLEAR, 0, 0, (void*) TASK_FD(t));
637: #endif
638: if (kevent(r->root_kq, chg, 1, NULL, 0, &timeout) == -1) {
639: if (r->root_hooks.hook_exec.exception)
640: r->root_hooks.hook_exec.exception(r, NULL);
641: else
642: LOGERR;
643: return (void*) -1;
644: }
645: #elif SUP_ENABLE == EP_SUPPORT
646: ee.data.fd = TASK_FD(t);
647: ee.events = EPOLLOUT;
648:
649: if (FD_ISSET(TASK_FD(t), &r->root_fds[0])) {
650: flg |= 1;
651: ee.events |= EPOLLIN | EPOLLPRI;
652: }
653: if (FD_ISSET(TASK_FD(t), &r->root_fds[1]))
654: flg |= 2;
655:
656: if (epoll_ctl(r->root_kq, flg ? EPOLL_CTL_MOD : EPOLL_CTL_ADD, TASK_FD(t), &ee) == -1) {
657: if (r->root_hooks.hook_exec.exception)
658: r->root_hooks.hook_exec.exception(r, NULL);
659: else
660: LOGERR;
661: return (void*) -1;
662: } else
663: FD_SET(TASK_FD(t), &r->root_fds[1]);
664: #else
665: FD_SET(TASK_FD(t), &r->root_fds[1]);
666: if (TASK_FD(t) >= r->root_kq)
667: r->root_kq = TASK_FD(t) + 1;
668: #endif
669:
670: return NULL;
671: }
672:
673: /*
674: * sched_hook_alarm() - Default ALARM hook
675: *
676: * @task = current task
677: * @arg = unused
678: * return: <0 errors and 0 ok
679: */
680: void *
681: sched_hook_alarm(void *task, void *arg __unused)
682: {
683: #if SUP_ENABLE == KQ_SUPPORT
684: sched_task_t *t = task;
685: struct kevent chg[1];
686: struct timespec timeout = { 0, 0 };
687:
688: if (!t || !TASK_ROOT(t))
689: return (void*) -1;
690:
691: #ifdef __NetBSD__
692: EV_SET(&chg[0], (uintptr_t) TASK_DATA(t), EVFILT_TIMER, EV_ADD | EV_CLEAR, 0,
693: t->task_val.ts.tv_sec * 1000 + t->task_val.ts.tv_nsec / 1000000,
694: (intptr_t) TASK_DATA(t));
695: #else
696: EV_SET(&chg[0], (uintptr_t) TASK_DATA(t), EVFILT_TIMER, EV_ADD | EV_CLEAR, 0,
697: t->task_val.ts.tv_sec * 1000 + t->task_val.ts.tv_nsec / 1000000,
698: (void*) TASK_DATA(t));
699: #endif
700: if (kevent(TASK_ROOT(t)->root_kq, chg, 1, NULL, 0, &timeout) == -1) {
701: if (TASK_ROOT(t)->root_hooks.hook_exec.exception)
702: TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL);
703: else
704: LOGERR;
705: return (void*) -1;
706: }
707:
708: #endif
709: return NULL;
710: }
711:
712: /*
713: * sched_hook_node() - Default NODE hook
714: *
715: * @task = current task
716: * @arg = if arg == 42 then waiting for all events
717: * return: <0 errors and 0 ok
718: */
719: void *
720: sched_hook_node(void *task, void *arg)
721: {
722: #if SUP_ENABLE == KQ_SUPPORT
723: sched_task_t *t = task;
724: struct kevent chg[1];
725: struct timespec timeout = { 0, 0 };
726: u_int addflags = (u_int) arg;
727:
728: if (!t || !TASK_ROOT(t))
729: return (void*) -1;
730:
731: #ifdef __NetBSD__
732: EV_SET(&chg[0], TASK_FD(t), EVFILT_VNODE, EV_ADD | EV_CLEAR,
733: NOTE_DELETE | NOTE_WRITE | NOTE_EXTEND | NOTE_ATTRIB |
734: NOTE_LINK | NOTE_RENAME | NOTE_REVOKE | addflags, 0, (intptr_t) TASK_FD(t));
735: #else
736: EV_SET(&chg[0], TASK_FD(t), EVFILT_VNODE, EV_ADD | EV_CLEAR,
737: NOTE_DELETE | NOTE_WRITE | NOTE_EXTEND | NOTE_ATTRIB |
738: NOTE_LINK | NOTE_RENAME | NOTE_REVOKE | addflags, 0, (void*) TASK_FD(t));
739: #endif
740: if (kevent(TASK_ROOT(t)->root_kq, chg, 1, NULL, 0, &timeout) == -1) {
741: if (TASK_ROOT(t)->root_hooks.hook_exec.exception)
742: TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL);
743: else
744: LOGERR;
745: return (void*) -1;
746: }
747:
748: #endif
749: return NULL;
750: }
751:
752: /*
753: * sched_hook_proc() - Default PROC hook
754: *
755: * @task = current task
756: * @arg = unused
757: * return: <0 errors and 0 ok
758: */
759: void *
760: sched_hook_proc(void *task, void *arg __unused)
761: {
762: #if SUP_ENABLE == KQ_SUPPORT
763: sched_task_t *t = task;
764: struct kevent chg[1];
765: struct timespec timeout = { 0, 0 };
766:
767: if (!t || !TASK_ROOT(t))
768: return (void*) -1;
769:
770: #ifdef __NetBSD__
771: EV_SET(&chg[0], TASK_VAL(t), EVFILT_PROC, EV_ADD | EV_CLEAR,
772: NOTE_EXIT | NOTE_FORK | NOTE_EXEC | NOTE_TRACK, 0, (intptr_t) TASK_VAL(t));
773: #else
774: EV_SET(&chg[0], TASK_VAL(t), EVFILT_PROC, EV_ADD | EV_CLEAR,
775: NOTE_EXIT | NOTE_FORK | NOTE_EXEC | NOTE_TRACK, 0, (void*) TASK_VAL(t));
776: #endif
777: if (kevent(TASK_ROOT(t)->root_kq, chg, 1, NULL, 0, &timeout) == -1) {
778: if (TASK_ROOT(t)->root_hooks.hook_exec.exception)
779: TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL);
780: else
781: LOGERR;
782: return (void*) -1;
783: }
784:
785: #endif
786: return NULL;
787: }
788:
789: /*
790: * sched_hook_signal() - Default SIGNAL hook
791: *
792: * @task = current task
793: * @arg = unused
794: * return: <0 errors and 0 ok
795: */
796: void *
797: sched_hook_signal(void *task, void *arg __unused)
798: {
799: #if SUP_ENABLE == KQ_SUPPORT
800: sched_task_t *t = task;
801: struct kevent chg[1];
802: struct timespec timeout = { 0, 0 };
803:
804: if (!t || !TASK_ROOT(t))
805: return (void*) -1;
806:
807: /* ignore signal */
808: signal(TASK_VAL(t), SIG_IGN);
809:
810: #ifdef __NetBSD__
811: EV_SET(&chg[0], TASK_VAL(t), EVFILT_SIGNAL, EV_ADD | EV_CLEAR, 0, 0, (intptr_t) TASK_VAL(t));
812: #else
813: EV_SET(&chg[0], TASK_VAL(t), EVFILT_SIGNAL, EV_ADD | EV_CLEAR, 0, 0, (void*) TASK_VAL(t));
814: #endif
815: if (kevent(TASK_ROOT(t)->root_kq, chg, 1, NULL, 0, &timeout) == -1) {
816: if (TASK_ROOT(t)->root_hooks.hook_exec.exception)
817: TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL);
818: else
819: LOGERR;
820: return (void*) -1;
821: }
822: #endif
823: return NULL;
824: }
825:
826: /*
827: * sched_hook_user() - Default USER hook
828: *
829: * @task = current task
830: * @arg = unused
831: * return: <0 errors and 0 ok
832: */
833: #ifdef EVFILT_USER
834: void *
835: sched_hook_user(void *task, void *arg __unused)
836: {
837: #if SUP_ENABLE == KQ_SUPPORT
838: sched_task_t *t = task;
839: struct kevent chg[1];
840: struct timespec timeout = { 0, 0 };
841:
842: if (!t || !TASK_ROOT(t))
843: return (void*) -1;
844:
845: #ifdef __NetBSD__
846: EV_SET(&chg[0], TASK_VAL(t), EVFILT_USER, EV_ADD | EV_CLEAR, TASK_DATLEN(t),
847: 0, (intptr_t) TASK_VAL(t));
848: #else
849: EV_SET(&chg[0], TASK_VAL(t), EVFILT_USER, EV_ADD | EV_CLEAR, TASK_DATLEN(t),
850: 0, (void*) TASK_VAL(t));
851: #endif
852: if (kevent(TASK_ROOT(t)->root_kq, chg, 1, NULL, 0, &timeout) == -1) {
853: if (TASK_ROOT(t)->root_hooks.hook_exec.exception)
854: TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL);
855: else
856: LOGERR;
857: return (void*) -1;
858: }
859:
860: #endif
861: return NULL;
862: }
863: #endif
864:
865: #if SUP_ENABLE == KQ_SUPPORT
866: static inline void
867: fetch_hook_kevent_proceed(int en, struct kevent *res, sched_root_task_t *r)
868: {
869: struct kevent evt[1];
870: register int i;
871: sched_task_t *task, *tmp;
872: struct timespec now = { 0, 0 };
873: #ifdef AIO_SUPPORT
874: int len, fd;
875: struct aiocb *acb;
876: #ifdef EVFILT_LIO
877: int l;
878: off_t off;
879: struct aiocb **acbs;
880: struct iovec *iv;
881: #endif /* EVFILT_LIO */
882: #endif /* AIO_SUPPORT */
883:
884: for (i = 0; i < en; i++) {
885: memcpy(evt, &res[i], sizeof evt);
886: evt->flags = EV_DELETE;
887: /* Put read/write task to ready queue */
888: switch (res[i].filter) {
889: case EVFILT_READ:
890: TAILQ_FOREACH_SAFE(task, &r->root_read, task_node, tmp) {
891: if (TASK_FD(task) == ((intptr_t) res[i].udata)) {
892: TASK_RET(task) = res[i].data;
893: TASK_FLAG(task) = (u_long) res[i].fflags;
894:
895: /* remove read handle */
896: remove_task_from(task, &r->root_read);
897:
898: if (r->root_hooks.hook_exec.exception && res[i].flags & EV_EOF) {
899: if (r->root_hooks.hook_exec.exception(r, (void*) EV_EOF)) {
900: task->task_type = taskUNUSE;
901: insert_task_to(task, &r->root_unuse);
902: } else {
903: task->task_type = taskREADY;
904: insert_task_to(task, &r->root_ready);
905: }
906: } else {
907: task->task_type = taskREADY;
908: insert_task_to(task, &r->root_ready);
909: }
910: break;
911: }
912: }
913: break;
914: case EVFILT_WRITE:
915: TAILQ_FOREACH_SAFE(task, &r->root_write, task_node, tmp) {
916: if (TASK_FD(task) == ((intptr_t) res[i].udata)) {
917: TASK_RET(task) = res[i].data;
918: TASK_FLAG(task) = (u_long) res[i].fflags;
919:
920: /* remove write handle */
921: remove_task_from(task, &r->root_write);
922:
923: if (r->root_hooks.hook_exec.exception && res[i].flags & EV_EOF) {
924: if (r->root_hooks.hook_exec.exception(r, (void*) EV_EOF)) {
925: task->task_type = taskUNUSE;
926: insert_task_to(task, &r->root_unuse);
927: } else {
928: task->task_type = taskREADY;
929: insert_task_to(task, &r->root_ready);
930: }
931: } else {
932: task->task_type = taskREADY;
933: insert_task_to(task, &r->root_ready);
934: }
935: break;
936: }
937: }
938: break;
939: case EVFILT_TIMER:
940: TAILQ_FOREACH_SAFE(task, &r->root_alarm, task_node, tmp) {
941: if ((uintptr_t) TASK_DATA(task) == ((uintptr_t) res[i].udata)) {
942: TASK_RET(task) = res[i].data;
943: TASK_FLAG(task) = (u_long) res[i].fflags;
944:
945: /* remove alarm handle */
946: transit_task2ready(task, &r->root_alarm);
947: break;
948: }
949: }
950: break;
951: case EVFILT_VNODE:
952: TAILQ_FOREACH_SAFE(task, &r->root_node, task_node, tmp) {
953: if (TASK_FD(task) == ((intptr_t) res[i].udata)) {
954: TASK_RET(task) = res[i].data;
955: TASK_FLAG(task) = (u_long) res[i].fflags;
956:
957: /* remove node handle */
958: transit_task2ready(task, &r->root_node);
959: break;
960: }
961: }
962: break;
963: case EVFILT_PROC:
964: TAILQ_FOREACH_SAFE(task, &r->root_proc, task_node, tmp) {
965: if (TASK_VAL(task) == ((uintptr_t) res[i].udata)) {
966: TASK_RET(task) = res[i].data;
967: TASK_FLAG(task) = (u_long) res[i].fflags;
968:
969: /* remove proc handle */
970: transit_task2ready(task, &r->root_proc);
971: break;
972: }
973: }
974: break;
975: case EVFILT_SIGNAL:
976: TAILQ_FOREACH_SAFE(task, &r->root_signal, task_node, tmp) {
977: if (TASK_VAL(task) == ((uintptr_t) res[i].udata)) {
978: TASK_RET(task) = res[i].data;
979: TASK_FLAG(task) = (u_long) res[i].fflags;
980:
981: /* remove signal handle */
982: transit_task2ready(task, &r->root_signal);
983: break;
984: }
985: }
986: break;
987: #ifdef AIO_SUPPORT
988: case EVFILT_AIO:
989: TAILQ_FOREACH_SAFE(task, &r->root_aio, task_node, tmp) {
990: acb = (struct aiocb*) TASK_VAL(task);
991: if (acb == ((struct aiocb*) res[i].udata)) {
992: TASK_RET(task) = res[i].data;
993: TASK_FLAG(task) = (u_long) res[i].fflags;
994:
995: /* remove user handle */
996: transit_task2ready(task, &r->root_aio);
997:
998: fd = acb->aio_fildes;
999: if ((len = aio_return(acb)) != -1) {
1000: if (lseek(fd, acb->aio_offset + len, SEEK_CUR) == -1)
1001: LOGERR;
1002: } else
1003: LOGERR;
1004: free(acb);
1005: TASK_DATLEN(task) = (u_long) len;
1006: TASK_FD(task) = fd;
1007: break;
1008: }
1009: }
1010: break;
1011: #ifdef EVFILT_LIO
1012: case EVFILT_LIO:
1013: TAILQ_FOREACH_SAFE(task, &r->root_lio, task_node, tmp) {
1014: acbs = (struct aiocb**) TASK_VAL(task);
1015: if (acbs == ((struct aiocb**) res[i].udata)) {
1016: TASK_RET(task) = res[i].data;
1017: TASK_FLAG(task) = (u_long) res[i].fflags;
1018:
1019: /* remove user handle */
1020: transit_task2ready(task, &r->root_lio);
1021:
1022: iv = (struct iovec*) TASK_DATA(task);
1023: fd = acbs[0]->aio_fildes;
1024: off = acbs[0]->aio_offset;
1025: for (len = 0; i < TASK_DATLEN(task); len += l, i++) {
1026: if ((iv[i].iov_len = aio_return(acbs[i])) == -1)
1027: l = 0;
1028: else
1029: l = iv[i].iov_len;
1030: free(acbs[i]);
1031: }
1032: free(acbs);
1033: TASK_DATLEN(task) = (u_long) len;
1034: TASK_FD(task) = fd;
1035:
1036: if (lseek(fd, off + len, SEEK_CUR) == -1)
1037: LOGERR;
1038: break;
1039: }
1040: }
1041: break;
1042: #endif /* EVFILT_LIO */
1043: #endif /* AIO_SUPPORT */
1044: #ifdef EVFILT_USER
1045: case EVFILT_USER:
1046: TAILQ_FOREACH_SAFE(task, &r->root_user, task_node, tmp) {
1047: if (TASK_VAL(task) == ((uintptr_t) res[i].udata)) {
1048: TASK_RET(task) = res[i].data;
1049: TASK_FLAG(task) = (u_long) res[i].fflags;
1050:
1051: /* remove user handle */
1052: transit_task2ready(task, &r->root_user);
1053: break;
1054: }
1055: }
1056: break;
1057: #endif /* EVFILT_USER */
1058: }
1059:
1060: if (kevent(r->root_kq, evt, 1, NULL, 0, &now) == -1) {
1061: if (r->root_hooks.hook_exec.exception)
1062: r->root_hooks.hook_exec.exception(r, NULL);
1063: else
1064: LOGERR;
1065: }
1066: }
1067: }
1068: #endif
1069:
1070: #if SUP_ENABLE == EP_SUPPORT
1071: static inline void
1072: fetch_hook_epoll_proceed(int en, struct epoll_event *res, sched_root_task_t *r)
1073: {
1074: register int i, flg;
1075: int ops = EPOLL_CTL_DEL;
1076: sched_task_t *t, *tmp, *task;
1077: struct epoll_event evt[1];
1078:
1079: for (i = 0; i < en; i++) {
1080: memcpy(evt, &res[i], sizeof evt);
1081:
1082: if (evt->events & (EPOLLIN | EPOLLPRI)) {
1083: flg = 0;
1084: task = NULL;
1085: TAILQ_FOREACH_SAFE(t, &r->root_read, task_node, tmp) {
1086: if (TASK_FD(t) == evt->data.fd) {
1087: if (!flg)
1088: task = t;
1089: flg++;
1090: }
1091: }
1092:
1093: if (flg && task) {
1094: TASK_FLAG(task) = ioctl(TASK_FD(task), FIONREAD, &TASK_RET(task));
1095: /* remove read handle */
1096: remove_task_from(task, &r->root_read);
1097:
1098: if (r->root_hooks.hook_exec.exception && evt->events & (EPOLLRDHUP | EPOLLERR | EPOLLHUP)) {
1099: if (r->root_hooks.hook_exec.exception(r, (void*) (intptr_t)
1100: (evt->events & EPOLLERR ? EV_ERROR | EV_EOF : EV_EOF))) {
1101: task->task_type = taskUNUSE;
1102: insert_task_to(task, &r->root_unuse);
1103: } else {
1104: task->task_type = taskREADY;
1105: insert_task_to(task, &r->root_ready);
1106: }
1107: } else {
1108: task->task_type = taskREADY;
1109: insert_task_to(task, &r->root_ready);
1110: }
1111:
1112: evt->events ^= evt->events;
1113: if (FD_ISSET(evt->data.fd, &r->root_fds[1])) {
1114: ops = EPOLL_CTL_MOD;
1115: evt->events |= EPOLLOUT;
1116: }
1117: if (flg > 1) {
1118: ops = EPOLL_CTL_MOD;
1119: evt->events |= EPOLLIN | EPOLLPRI;
1120: } else
1121: FD_CLR(evt->data.fd, &r->root_fds[0]);
1122: }
1123: } else if (evt->events & EPOLLOUT) {
1124: flg = 0;
1125: task = NULL;
1126: TAILQ_FOREACH_SAFE(t, &r->root_write, task_node, tmp) {
1127: if (TASK_FD(t) == evt->data.fd) {
1128: if (!flg)
1129: task = t;
1130: flg++;
1131: }
1132: }
1133:
1134: if (flg && task) {
1135: TASK_FLAG(task) = ioctl(TASK_FD(task), FIONWRITE, &TASK_RET(task));
1136: /* remove write handle */
1137: remove_task_from(task, &r->root_write);
1138:
1139: if (r->root_hooks.hook_exec.exception && evt->events & (EPOLLERR | EPOLLHUP)) {
1140: if (r->root_hooks.hook_exec.exception(r, (void*) (intptr_t)
1141: (evt->events & EPOLLERR ? EV_ERROR | EV_EOF : EV_EOF))) {
1142: task->task_type = taskUNUSE;
1143: insert_task_to(task, &r->root_unuse);
1144: } else {
1145: task->task_type = taskREADY;
1146: insert_task_to(task, &r->root_ready);
1147: }
1148: } else {
1149: task->task_type = taskREADY;
1150: insert_task_to(task, &r->root_ready);
1151: }
1152:
1153: evt->events ^= evt->events;
1154: if (FD_ISSET(evt->data.fd, &r->root_fds[0])) {
1155: ops = EPOLL_CTL_MOD;
1156: evt->events |= EPOLLIN | EPOLLPRI;
1157: }
1158: if (flg > 1) {
1159: ops = EPOLL_CTL_MOD;
1160: evt->events |= EPOLLOUT;
1161: } else
1162: FD_CLR(evt->data.fd, &r->root_fds[1]);
1163: }
1164: }
1165:
1166: if (epoll_ctl(r->root_kq, ops, evt->data.fd, evt) == -1) {
1167: if (r->root_hooks.hook_exec.exception) {
1168: r->root_hooks.hook_exec.exception(r, NULL);
1169: } else
1170: LOGERR;
1171: }
1172: }
1173: }
1174: #endif
1175:
1176: #if SUP_ENABLE == NO_SUPPORT
1177: static inline void
1178: fetch_hook_select_proceed(int en, fd_set rfd, fd_set wfd, fd_set xfd, sched_root_task_t *r)
1179: {
1180: register int i, flg;
1181: sched_task_t *t, *tmp, *task = NULL;
1182:
1183: /* skip select check if return value from select is zero */
1184: if (!en)
1185: return;
1186:
1187: for (i = 0; i < r->root_kq; i++) {
1188: if (FD_ISSET(i, &rfd) || FD_ISSET(i, &xfd)) {
1189: flg = 0;
1190: TAILQ_FOREACH_SAFE(t, &r->root_read, task_node, tmp) {
1191: if (TASK_FD(t) == i) {
1192: if (!flg)
1193: task = t;
1194: flg++;
1195: }
1196: }
1197:
1198: if (flg && task) {
1199: TASK_FLAG(task) = ioctl(TASK_FD(task), FIONREAD, &TASK_RET(task));
1200:
1201: /* remove read handle */
1202: remove_task_from(task, &r->root_read);
1203:
1204: if (r->root_hooks.hook_exec.exception) {
1205: if (r->root_hooks.hook_exec.exception(r, NULL)) {
1206: task->task_type = taskUNUSE;
1207: insert_task_to(task, &r->root_unuse);
1208: } else {
1209: task->task_type = taskREADY;
1210: insert_task_to(task, &r->root_ready);
1211: }
1212: } else {
1213: task->task_type = taskREADY;
1214: insert_task_to(task, &r->root_ready);
1215: }
1216:
1217: /* remove resouce */
1218: if (flg == 1)
1219: FD_CLR(i, &r->root_fds[0]);
1220: }
1221: } else if (FD_ISSET(i, &wfd)) {
1222: flg = 0;
1223: TAILQ_FOREACH_SAFE(t, &r->root_write, task_node, tmp) {
1224: if (TASK_FD(t) == i) {
1225: if (!flg)
1226: task = t;
1227: flg++;
1228: }
1229: }
1230:
1231: if (flg && task) {
1232: TASK_FLAG(task) = ioctl(TASK_FD(task), FIONWRITE, &TASK_RET(task));
1233:
1234: /* remove write handle */
1235: remove_task_from(task, &r->root_write);
1236:
1237: if (r->root_hooks.hook_exec.exception) {
1238: if (r->root_hooks.hook_exec.exception(r, NULL)) {
1239: task->task_type = taskUNUSE;
1240: insert_task_to(task, &r->root_unuse);
1241: } else {
1242: task->task_type = taskREADY;
1243: insert_task_to(task, &r->root_ready);
1244: }
1245: } else {
1246: task->task_type = taskREADY;
1247: insert_task_to(task, &r->root_ready);
1248: }
1249:
1250: /* remove resouce */
1251: if (flg == 1)
1252: FD_CLR(i, &r->root_fds[1]);
1253: }
1254: }
1255: }
1256:
1257: /* optimize select */
1258: for (i = r->root_kq - 1; i > 2; i--)
1259: if (FD_ISSET(i, &r->root_fds[0]) || FD_ISSET(i, &r->root_fds[1]))
1260: break;
1261: if (i > 2)
1262: r->root_kq = i + 1;
1263: }
1264: #endif
1265:
1266: /*
1267: * sched_hook_fetch() - Default FETCH hook
1268: *
1269: * @root = root task
1270: * @arg = unused
1271: * return: NULL error or !=NULL fetched task
1272: */
1273: void *
1274: sched_hook_fetch(void *root, void *arg __unused)
1275: {
1276: sched_root_task_t *r = root;
1277: sched_task_t *task, *tmp;
1278: struct timespec now, m, mtmp;
1279: #if SUP_ENABLE == KQ_SUPPORT
1280: struct kevent res[KQ_EVENTS];
1281: struct timespec *timeout;
1282: #elif SUP_ENABLE == EP_SUPPORT
1283: struct epoll_event res[KQ_EVENTS];
1284: u_long timeout = 0;
1285: #else
1286: struct timeval *timeout, tv;
1287: fd_set rfd, wfd, xfd;
1288: #endif
1289: int en;
1290:
1291: if (!r)
1292: return NULL;
1293:
1294: /* get new task by queue priority */
1295: while ((task = TAILQ_FIRST(&r->root_event))) {
1296: transit_task2unuse(task, &r->root_event);
1297: return task;
1298: }
1299: while ((task = TAILQ_FIRST(&r->root_ready))) {
1300: transit_task2unuse(task, &r->root_ready);
1301: return task;
1302: }
1303:
1304: /* if present member of task, set NOWAIT */
1305: if (!TAILQ_FIRST(&r->root_task)) {
1306: /* timer tasks */
1307: #ifdef TIMER_WITHOUT_SORT
1308: clock_gettime(CLOCK_MONOTONIC, &now);
1309:
1310: sched_timespecclear(&r->root_wait);
1311: TAILQ_FOREACH(task, &r->root_timer, task_node) {
1312: if (!sched_timespecisset(&r->root_wait))
1313: r->root_wait = TASK_TS(task);
1314: else if (sched_timespeccmp(&TASK_TS(task), &r->root_wait, -) < 0)
1315: r->root_wait = TASK_TS(task);
1316: }
1317:
1318: if (TAILQ_FIRST(&r->root_timer)) {
1319: m = r->root_wait;
1320: sched_timespecsub(&m, &now, &mtmp);
1321: r->root_wait = mtmp;
1322: } else {
1323: /* set wait INFTIM */
1324: sched_timespecinf(&r->root_wait);
1325: }
1326: #else /* ! TIMER_WITHOUT_SORT */
1327: if ((task = TAILQ_FIRST(&r->root_timer))) {
1328: clock_gettime(CLOCK_MONOTONIC, &now);
1329:
1330: m = TASK_TS(task);
1331: sched_timespecsub(&m, &now, &mtmp);
1332: r->root_wait = mtmp;
1333: } else {
1334: /* set wait INFTIM */
1335: sched_timespecinf(&r->root_wait);
1336: }
1337: #endif /* TIMER_WITHOUT_SORT */
1338: } else /* no waiting for event, because we have ready task */
1339: sched_timespecclear(&r->root_wait);
1340:
1341: if (r->root_wait.tv_sec != -1 && r->root_wait.tv_nsec != -1) {
1342: #if SUP_ENABLE == KQ_SUPPORT
1343: timeout = &r->root_wait;
1344: #elif SUP_ENABLE == EP_SUPPORT
1345: timeout = r->root_wait.tv_sec * 1000 + r->root_wait.tv_nsec / 1000000;
1346: #else
1347: sched_timespec2val(&r->root_wait, &tv);
1348: timeout = &tv;
1349: #endif /* KQ_SUPPORT */
1350: } else if (sched_timespecisinf(&r->root_poll))
1351: #if SUP_ENABLE == EP_SUPPORT
1352: timeout = -1;
1353: #else
1354: timeout = NULL;
1355: #endif
1356: else {
1357: #if SUP_ENABLE == KQ_SUPPORT
1358: timeout = &r->root_poll;
1359: #elif SUP_ENABLE == EP_SUPPORT
1360: timeout = r->root_poll.tv_sec * 1000 + r->root_poll.tv_nsec / 1000000;
1361: #else
1362: sched_timespec2val(&r->root_poll, &tv);
1363: timeout = &tv;
1364: #endif /* KQ_SUPPORT */
1365: }
1366:
1367: #if SUP_ENABLE == KQ_SUPPORT
1368: if ((en = kevent(r->root_kq, NULL, 0, res, KQ_EVENTS, timeout)) == -1) {
1369: #elif SUP_ENABLE == EP_SUPPORT
1370: if ((en = epoll_wait(r->root_kq, res, KQ_EVENTS, timeout)) == -1) {
1371: #else
1372: rfd = xfd = r->root_fds[0];
1373: wfd = r->root_fds[1];
1374: if ((en = select(r->root_kq, &rfd, &wfd, &xfd, timeout)) == -1) {
1375: #endif /* KQ_SUPPORT */
1376: if (r->root_hooks.hook_exec.exception) {
1377: if (r->root_hooks.hook_exec.exception(r, NULL))
1378: return NULL;
1379: } else if (errno != EINTR)
1380: LOGERR;
1381: goto skip_event;
1382: }
1383:
1384: /* Go and catch the cat into pipes ... */
1385: #if SUP_ENABLE == KQ_SUPPORT
1386: /* kevent dispatcher */
1387: fetch_hook_kevent_proceed(en, res, r);
1388: #elif SUP_ENABLE == EP_SUPPORT
1389: /* epoll dispatcher */
1390: fetch_hook_epoll_proceed(en, res, r);
1391: #else
1392: /* select dispatcher */
1393: fetch_hook_select_proceed(en, rfd, wfd, xfd, r);
1394: #endif /* KQ_SUPPORT */
1395:
1396: skip_event:
1397: /* timer update & put in ready queue */
1398: clock_gettime(CLOCK_MONOTONIC, &now);
1399:
1400: TAILQ_FOREACH_SAFE(task, &r->root_timer, task_node, tmp)
1401: if (sched_timespeccmp(&now, &TASK_TS(task), -) >= 0)
1402: transit_task2ready(task, &r->root_timer);
1403:
1404: /* put regular task priority task to ready queue,
1405: if there is no ready task or reach max missing hit for regular task */
1406: if ((task = TAILQ_FIRST(&r->root_task))) {
1407: if (!TAILQ_FIRST(&r->root_ready) || r->root_miss >= TASK_VAL(task)) {
1408: r->root_miss ^= r->root_miss;
1409:
1410: transit_task2ready(task, &r->root_task);
1411: } else
1412: r->root_miss++;
1413: } else
1414: r->root_miss ^= r->root_miss;
1415:
1416: /* OK, lets get ready task !!! */
1417: task = TAILQ_FIRST(&r->root_ready);
1418: if (task)
1419: transit_task2unuse(task, &r->root_ready);
1420: return task;
1421: }
1422:
1423: /*
1424: * sched_hook_exception() - Default EXCEPTION hook
1425: *
1426: * @root = root task
1427: * @arg = custom handling: if arg == EV_EOF or other value; default: arg == NULL log errno
1428: * return: <0 errors and 0 ok
1429: */
1430: void *
1431: sched_hook_exception(void *root, void *arg)
1432: {
1433: sched_root_task_t *r = root;
1434:
1435: if (!r)
1436: return NULL;
1437:
1438: /* custom exception handling ... */
1439: if (arg) {
1440: if (arg == (void*) EV_EOF)
1441: return NULL;
1442: return (void*) -1; /* raise scheduler error!!! */
1443: }
1444:
1445: /* if error hook exists */
1446: if (r->root_hooks.hook_root.error)
1447: return (r->root_hooks.hook_root.error(root, (void*) ((intptr_t) errno)));
1448:
1449: /* default case! */
1450: LOGERR;
1451: return NULL;
1452: }
1453:
1454: /*
1455: * sched_hook_condition() - Default CONDITION hook
1456: *
1457: * @root = root task
1458: * @arg = killState from schedRun()
1459: * return: NULL kill scheduler loop or !=NULL ok
1460: */
1461: void *
1462: sched_hook_condition(void *root, void *arg)
1463: {
1464: sched_root_task_t *r = root;
1465:
1466: if (!r)
1467: return NULL;
1468:
1469: return (void*) (*r->root_cond - *(intptr_t*) arg);
1470: }
1471:
1472: /*
1473: * sched_hook_rtc() - Default RTC hook
1474: *
1475: * @task = current task
1476: * @arg = unused
1477: * return: <0 errors and 0 ok
1478: */
1479: void *
1480: sched_hook_rtc(void *task, void *arg __unused)
1481: {
1482: #if defined(HAVE_LIBRT) && defined(HAVE_TIMER_CREATE) && \
1483: defined(HAVE_TIMER_SETTIME) && defined(HAVE_TIMER_DELETE)
1484: sched_task_t *sigt = NULL, *t = task;
1485: struct itimerspec its;
1486: struct sigevent evt;
1487: timer_t tmr;
1488: #if SUP_ENABLE != KQ_SUPPORT
1489: struct sigaction sa;
1490: #endif
1491:
1492: if (!t || !TASK_ROOT(t))
1493: return (void*) -1;
1494:
1495: memset(&evt, 0, sizeof evt);
1496: evt.sigev_notify = SIGEV_SIGNAL;
1497: evt.sigev_signo = (intptr_t) TASK_DATA(t) + SIGRTMIN;
1498: evt.sigev_value.sival_ptr = t;
1499:
1500: if (timer_create(CLOCK_MONOTONIC, &evt, &tmr) == -1) {
1501: if (TASK_ROOT(t)->root_hooks.hook_exec.exception)
1502: TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL);
1503: else
1504: LOGERR;
1505: return (void*) -1;
1506: } else
1507: TASK_FLAG(t) = (u_long) tmr;
1508:
1509: #if SUP_ENABLE == KQ_SUPPORT
1510: if (!(sigt = schedSignal(TASK_ROOT(t), _sched_rtcWrapper, TASK_ARG(t), evt.sigev_signo,
1511: t, (size_t) tmr))) {
1512: if (TASK_ROOT(t)->root_hooks.hook_exec.exception)
1513: TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL);
1514: else
1515: LOGERR;
1516: timer_delete(tmr);
1517: return (void*) -1;
1518: } else
1519: TASK_RET(t) = (uintptr_t) sigt;
1520: #else
1521: memset(&sa, 0, sizeof sa);
1522: sigemptyset(&sa.sa_mask);
1523: sa.sa_sigaction = _sched_rtcSigWrapper;
1524: sa.sa_flags = SA_SIGINFO | SA_RESTART;
1525:
1526: if (sigaction(evt.sigev_signo, &sa, NULL) == -1) {
1527: if (TASK_ROOT(t)->root_hooks.hook_exec.exception)
1528: TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL);
1529: else
1530: LOGERR;
1531: timer_delete(tmr);
1532: return (void*) -1;
1533: }
1534: #endif
1535:
1536: memset(&its, 0, sizeof its);
1537: its.it_value.tv_sec = t->task_val.ts.tv_sec;
1538: its.it_value.tv_nsec = t->task_val.ts.tv_nsec;
1539:
1540: if (timer_settime(tmr, TIMER_RELTIME, &its, NULL) == -1) {
1541: if (TASK_ROOT(t)->root_hooks.hook_exec.exception)
1542: TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL);
1543: else
1544: LOGERR;
1545: schedCancel(sigt);
1546: timer_delete(tmr);
1547: return (void*) -1;
1548: }
1549: #endif /* HAVE_TIMER_CREATE */
1550: return NULL;
1551: }
FreeBSD-CVSweb <freebsd-cvsweb@FreeBSD.org>