1: /*************************************************************************
2: * (C) 2011 AITNET ltd - Sofia/Bulgaria - <misho@aitbg.com>
3: * by Michael Pounov <misho@openbsd-bg.org>
4: *
5: * $Author: misho $
6: * $Id: hooks.c,v 1.31.2.1 2017/08/31 12:18:38 misho Exp $
7: *
8: **************************************************************************
9: The ELWIX and AITNET software is distributed under the following
10: terms:
11:
12: All of the documentation and software included in the ELWIX and AITNET
13: Releases is copyrighted by ELWIX - Sofia/Bulgaria <info@elwix.org>
14:
15: Copyright 2004 - 2016
16: by Michael Pounov <misho@elwix.org>. All rights reserved.
17:
18: Redistribution and use in source and binary forms, with or without
19: modification, are permitted provided that the following conditions
20: are met:
21: 1. Redistributions of source code must retain the above copyright
22: notice, this list of conditions and the following disclaimer.
23: 2. Redistributions in binary form must reproduce the above copyright
24: notice, this list of conditions and the following disclaimer in the
25: documentation and/or other materials provided with the distribution.
26: 3. All advertising materials mentioning features or use of this software
27: must display the following acknowledgement:
28: This product includes software developed by Michael Pounov <misho@elwix.org>
29: ELWIX - Embedded LightWeight unIX and its contributors.
30: 4. Neither the name of AITNET nor the names of its contributors
31: may be used to endorse or promote products derived from this software
32: without specific prior written permission.
33:
34: THIS SOFTWARE IS PROVIDED BY AITNET AND CONTRIBUTORS ``AS IS'' AND
35: ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
36: IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
37: ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
38: FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
39: DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
40: OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
41: HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
42: LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
43: OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
44: SUCH DAMAGE.
45: */
46: #include "global.h"
47: #include "hooks.h"
48:
49:
50: static inline void
51: transit_task2ready(sched_task_t * __restrict t, sched_queue_t * __restrict q)
52: {
53: remove_task_from(t, q);
54:
55: t->task_type = taskREADY;
56: insert_task_to(t, &(TASK_ROOT(t))->root_ready);
57: }
58:
59: #ifdef HAVE_LIBPTHREAD
60: static void *
61: _sched_threadWrapper(sched_task_t *t)
62: {
63: void *ret = NULL;
64: sched_root_task_t *r;
65:
66: if (!t || !TASK_ROOT(t))
67: pthread_exit(ret);
68: else
69: r = (sched_root_task_t*) TASK_ROOT(t);
70:
71: pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, NULL);
72: /*
73: pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, NULL);
74: */
75:
76: /* notify parent, thread is ready for execution */
77: pthread_testcancel();
78:
79: ret = schedCall(t);
80: r->root_ret = ret;
81:
82: if (TASK_VAL(t)) {
83: transit_task2unuse(t, &r->root_thread);
84: TASK_VAL(t) = 0;
85: }
86:
87: pthread_exit(ret);
88: }
89: #endif
90:
91: #if defined(HAVE_LIBRT) && defined(HAVE_TIMER_CREATE) && \
92: defined(HAVE_TIMER_SETTIME) && defined(HAVE_TIMER_DELETE)
93: #if SUP_ENABLE == KQ_SUPPORT
94: static void *
95: _sched_rtcWrapper(sched_task_t *t)
96: {
97: sched_task_t *task;
98: void *ret;
99:
100: if (!t || !TASK_ROOT(t) || !TASK_DATA(t))
101: return NULL;
102: else {
103: task = (sched_task_t*) TASK_DATA(t);
104: timer_delete((timer_t) TASK_DATLEN(t));
105: }
106:
107: ret = schedCall(task);
108:
109: transit_task2unuse(task, &(TASK_ROOT(task))->root_rtc);
110: return ret;
111: }
112: #else
113: static void
114: _sched_rtcSigWrapper(int sig, siginfo_t *si, void *uc)
115: {
116: sched_task_t *task;
117:
118: if (si && si->si_value.sival_ptr) {
119: task = (sched_task_t*) si->si_value.sival_ptr;
120: timer_delete((timer_t) TASK_FLAG(task));
121:
122: TASK_RET(task) = (intptr_t) schedCall(task);
123:
124: transit_task2unuse(task, &(TASK_ROOT(task))->root_rtc);
125: }
126: }
127: #endif
128: #endif
129:
130: /*
131: * sched_hook_init() - Default INIT hook
132: *
133: * @root = root task
134: * @arg = unused
135: * return: <0 errors and 0 ok
136: */
137: void *
138: sched_hook_init(void *root, void *arg __unused)
139: {
140: sched_root_task_t *r = root;
141:
142: if (!r)
143: return (void*) -1;
144:
145: #if SUP_ENABLE == KQ_SUPPORT
146: r->root_kq = kqueue();
147: if (r->root_kq == -1) {
148: LOGERR;
149: return (void*) -1;
150: }
151: #elif SUP_ENABLE == EP_SUPPORT
152: r->root_kq = epoll_create(KQ_EVENTS);
153: if (r->root_kq == -1) {
154: LOGERR;
155: return (void*) -1;
156: }
157: #else
158: r->root_kq ^= r->root_kq;
159: FD_ZERO(&r->root_fds[0]);
160: FD_ZERO(&r->root_fds[1]);
161: #endif
162:
163: return NULL;
164: }
165:
166: /*
167: * sched_hook_fini() - Default FINI hook
168: *
169: * @root = root task
170: * @arg = unused
171: * return: <0 errors and 0 ok
172: */
173: void *
174: sched_hook_fini(void *root, void *arg __unused)
175: {
176: sched_root_task_t *r = root;
177:
178: if (!r)
179: return (void*) -1;
180:
181: #if SUP_ENABLE == KQ_SUPPORT || SUP_ENABLE == EP_SUPPORT
182: if (r->root_kq > 2) {
183: close(r->root_kq);
184: r->root_kq = 0;
185: }
186: #else
187: FD_ZERO(&r->root_fds[1]);
188: FD_ZERO(&r->root_fds[0]);
189: r->root_kq ^= r->root_kq;
190: #endif
191:
192: return NULL;
193: }
194:
195: /*
196: * sched_hook_cancel() - Default CANCEL hook
197: *
198: * @task = current task
199: * @arg = unused
200: * return: <0 errors and 0 ok
201: */
202: void *
203: sched_hook_cancel(void *task, void *arg __unused)
204: {
205: sched_task_t *t = task, *tmp, *tt;
206: sched_root_task_t *r = NULL;
207: int flg;
208: #if SUP_ENABLE == KQ_SUPPORT
209: struct kevent chg[1];
210: struct timespec timeout = { 0, 0 };
211: #elif SUP_ENABLE == EP_SUPPORT
212: struct epoll_event ee = { .events = 0, .data.fd = 0 };
213: #else
214: register int i;
215: #endif
216: #ifdef AIO_SUPPORT
217: struct aiocb *acb;
218: #ifdef EVFILT_LIO
219: register int i = 0;
220: struct aiocb **acbs;
221: #endif /* EVFILT_LIO */
222: #endif /* AIO_SUPPORT */
223:
224: if (!t || !TASK_ROOT(t))
225: return (void*) -1;
226: else
227: r = TASK_ROOT(t);
228:
229: switch (TASK_TYPE(t)) {
230: case taskREAD:
231: /* check for multi subscribers */
232: flg = 0;
233: TAILQ_FOREACH_SAFE(tt, &r->root_read, task_node, tmp)
234: if (TASK_FD(tt) != TASK_FD(t))
235: continue;
236: else
237: flg++;
238: #if SUP_ENABLE == KQ_SUPPORT
239: #ifdef __NetBSD__
240: EV_SET(&chg[0], TASK_FD(t), EVFILT_READ, flg < 2 ? EV_DELETE : 0,
241: 0, 0, (intptr_t) TASK_FD(t));
242: #else
243: EV_SET(&chg[0], TASK_FD(t), EVFILT_READ, flg < 2 ? EV_DELETE : 0,
244: 0, 0, (void*) TASK_FD(t));
245: #endif
246: #elif SUP_ENABLE == EP_SUPPORT
247: ee.data.fd = TASK_FD(t);
248: ee.events ^= ee.events;
249: if (FD_ISSET(TASK_FD(t), &r->root_fds[1]))
250: ee.events = EPOLLOUT;
251:
252: if (flg < 2)
253: FD_CLR(TASK_FD(t), &r->root_fds[0]);
254: else
255: ee.events |= EPOLLIN | EPOLLPRI;
256: #else
257: if (flg < 2) {
258: FD_CLR(TASK_FD(t), &r->root_fds[0]);
259:
260: /* optimize select */
261: for (i = r->root_kq - 1; i > 2; i--)
262: if (FD_ISSET(i, &r->root_fds[0]) || FD_ISSET(i, &r->root_fds[1]))
263: break;
264: if (i > 2)
265: r->root_kq = i + 1;
266: }
267: #endif
268: break;
269: case taskWRITE:
270: /* check for multi subscribers */
271: flg = 0;
272: TAILQ_FOREACH_SAFE(tt, &r->root_write, task_node, tmp)
273: if (TASK_FD(tt) != TASK_FD(t))
274: continue;
275: else
276: flg++;
277: #if SUP_ENABLE == KQ_SUPPORT
278: #ifdef __NetBSD__
279: EV_SET(&chg[0], TASK_FD(t), EVFILT_WRITE, flg < 2 ? EV_DELETE : 0,
280: 0, 0, (intptr_t) TASK_FD(t));
281: #else
282: EV_SET(&chg[0], TASK_FD(t), EVFILT_WRITE, flg < 2 ? EV_DELETE : 0,
283: 0, 0, (void*) TASK_FD(t));
284: #endif
285: #elif SUP_ENABLE == EP_SUPPORT
286: ee.data.fd = TASK_FD(t);
287: ee.events ^= ee.events;
288: if (FD_ISSET(TASK_FD(t), &r->root_fds[0]))
289: ee.events = EPOLLIN | EPOLLPRI;
290:
291: if (flg < 2)
292: FD_CLR(TASK_FD(t), &r->root_fds[1]);
293: else
294: ee.events |= EPOLLOUT;
295: #else
296: if (flg < 2) {
297: FD_CLR(TASK_FD(t), &r->root_fds[1]);
298:
299: /* optimize select */
300: for (i = r->root_kq - 1; i > 2; i--)
301: if (FD_ISSET(i, &r->root_fds[0]) || FD_ISSET(i, &r->root_fds[1]))
302: break;
303: if (i > 2)
304: r->root_kq = i + 1;
305: }
306: #endif
307: break;
308: case taskALARM:
309: #if SUP_ENABLE == KQ_SUPPORT
310: /* check for multi subscribers */
311: flg = 0;
312: TAILQ_FOREACH_SAFE(tt, &r->root_alarm, task_node, tmp)
313: if (TASK_DATA(tt) != TASK_DATA(t))
314: continue;
315: else
316: flg++;
317: #ifdef __NetBSD__
318: EV_SET(&chg[0], (uintptr_t) TASK_DATA(t), EVFILT_TIMER, flg < 2 ? EV_DELETE : 0,
319: 0, 0, (intptr_t) TASK_DATA(t));
320: #else
321: EV_SET(&chg[0], (uintptr_t) TASK_DATA(t), EVFILT_TIMER, flg < 2 ? EV_DELETE : 0,
322: 0, 0, (void*) TASK_DATA(t));
323: #endif
324: #endif
325: break;
326: case taskNODE:
327: #if SUP_ENABLE == KQ_SUPPORT
328: /* check for multi subscribers */
329: flg = 0;
330: TAILQ_FOREACH_SAFE(tt, &r->root_node, task_node, tmp)
331: if (TASK_FD(tt) != TASK_FD(t))
332: continue;
333: else
334: flg++;
335: #ifdef __NetBSD__
336: EV_SET(&chg[0], TASK_FD(t), EVFILT_VNODE, flg < 2 ? EV_DELETE : 0,
337: 0, 0, (intptr_t) TASK_FD(t));
338: #else
339: EV_SET(&chg[0], TASK_FD(t), EVFILT_VNODE, flg < 2 ? EV_DELETE : 0,
340: 0, 0, (void*) TASK_FD(t));
341: #endif
342: #endif
343: break;
344: case taskPROC:
345: #if SUP_ENABLE == KQ_SUPPORT
346: /* check for multi subscribers */
347: flg = 0;
348: TAILQ_FOREACH_SAFE(tt, &r->root_proc, task_node, tmp)
349: if (TASK_VAL(tt) != TASK_VAL(t))
350: continue;
351: else
352: flg++;
353: #ifdef __NetBSD__
354: EV_SET(&chg[0], TASK_VAL(t), EVFILT_PROC, flg < 2 ? EV_DELETE : 0,
355: 0, 0, (intptr_t) TASK_VAL(t));
356: #else
357: EV_SET(&chg[0], TASK_VAL(t), EVFILT_PROC, flg < 2 ? EV_DELETE : 0,
358: 0, 0, (void*) TASK_VAL(t));
359: #endif
360: #endif
361: break;
362: case taskSIGNAL:
363: #if SUP_ENABLE == KQ_SUPPORT
364: /* check for multi subscribers */
365: flg = 0;
366: TAILQ_FOREACH_SAFE(tt, &r->root_signal, task_node, tmp)
367: if (TASK_VAL(tt) != TASK_VAL(t))
368: continue;
369: else
370: flg++;
371: #ifdef __NetBSD__
372: EV_SET(&chg[0], TASK_VAL(t), EVFILT_SIGNAL, flg < 2 ? EV_DELETE : 0,
373: 0, 0, (intptr_t) TASK_VAL(t));
374: #else
375: EV_SET(&chg[0], TASK_VAL(t), EVFILT_SIGNAL, flg < 2 ? EV_DELETE : 0,
376: 0, 0, (void*) TASK_VAL(t));
377: #endif
378: /* restore signal */
379: if (flg < 2)
380: signal(TASK_VAL(t), SIG_DFL);
381: #endif
382: break;
383: #ifdef AIO_SUPPORT
384: case taskAIO:
385: #if SUP_ENABLE == KQ_SUPPORT
386: /* check for multi subscribers */
387: flg = 0;
388: TAILQ_FOREACH_SAFE(tt, &r->root_aio, task_node, tmp)
389: if (TASK_VAL(tt) != TASK_VAL(t))
390: continue;
391: else
392: flg++;
393: #ifdef __NetBSD__
394: EV_SET(&chg[0], TASK_VAL(t), EVFILT_AIO, flg < 2 ? EV_DELETE : 0,
395: 0, 0, (intptr_t) TASK_VAL(t));
396: #else
397: EV_SET(&chg[0], TASK_VAL(t), EVFILT_AIO, flg < 2 ? EV_DELETE : 0,
398: 0, 0, (void*) TASK_VAL(t));
399: #endif
400: acb = (struct aiocb*) TASK_VAL(t);
401: if (acb) {
402: if (aio_cancel(acb->aio_fildes, acb) == AIO_CANCELED)
403: aio_return(acb);
404: free(acb);
405: TASK_VAL(t) = 0;
406: }
407: #endif
408: break;
409: #ifdef EVFILT_LIO
410: case taskLIO:
411: #if SUP_ENABLE == KQ_SUPPORT
412: /* check for multi subscribers */
413: flg = 0;
414: TAILQ_FOREACH_SAFE(tt, &r->root_lio, task_node, tmp)
415: if (TASK_VAL(tt) != TASK_VAL(t))
416: continue;
417: else
418: flg++;
419: #ifdef __NetBSD__
420: EV_SET(&chg[0], TASK_VAL(t), EVFILT_LIO, flg < 2 ? EV_DELETE : 0,
421: 0, 0, (intptr_t) TASK_VAL(t));
422: #else
423: EV_SET(&chg[0], TASK_VAL(t), EVFILT_LIO, flg < 2 ? EV_DELETE : 0,
424: 0, 0, (void*) TASK_VAL(t));
425: #endif
426: acbs = (struct aiocb**) TASK_VAL(t);
427: if (acbs) {
428: for (i = 0; i < TASK_DATLEN(t); i++) {
429: if (aio_cancel(acbs[i]->aio_fildes, acbs[i]) == AIO_CANCELED)
430: aio_return(acbs[i]);
431: free(acbs[i]);
432: }
433: free(acbs);
434: TASK_VAL(t) = 0;
435: }
436: #endif
437: break;
438: #endif /* EVFILT_LIO */
439: #endif /* AIO_SUPPORT */
440: #ifdef EVFILT_USER
441: case taskUSER:
442: #if SUP_ENABLE == KQ_SUPPORT
443: /* check for multi subscribers */
444: flg = 0;
445: TAILQ_FOREACH_SAFE(tt, &r->root_user, task_node, tmp)
446: if (TASK_VAL(tt) != TASK_VAL(t))
447: continue;
448: else
449: flg++;
450: #ifdef __NetBSD__
451: EV_SET(&chg[0], TASK_VAL(t), EVFILT_USER, flg < 2 ? EV_DELETE : 0,
452: 0, 0, (intptr_t) TASK_VAL(t));
453: #else
454: EV_SET(&chg[0], TASK_VAL(t), EVFILT_USER, flg < 2 ? EV_DELETE : 0,
455: 0, 0, (void*) TASK_VAL(t));
456: #endif
457: #endif
458: break;
459: #endif /* EVFILT_USER */
460: case taskTHREAD:
461: #ifdef HAVE_LIBPTHREAD
462: if (TASK_VAL(t)) {
463: pthread_cancel((pthread_t) TASK_VAL(t));
464: pthread_join((pthread_t) TASK_VAL(t), NULL);
465: if (TASK_VAL(t)) {
466: transit_task2unuse(t, &(TASK_ROOT(t))->root_thread);
467: TASK_VAL(t) = 0;
468: }
469: }
470: #endif
471: return NULL;
472: #if defined(HAVE_LIBRT) && defined(HAVE_TIMER_CREATE) && \
473: defined(HAVE_TIMER_SETTIME) && defined(HAVE_TIMER_DELETE)
474: case taskRTC:
475: timer_delete((timer_t) TASK_FLAG(t));
476: #if SUP_ENABLE == KQ_SUPPORT
477: schedCancel((sched_task_t*) TASK_RET(t));
478: #else
479: /* check for multi subscribers */
480: flg = 0;
481: TAILQ_FOREACH_SAFE(tt, &r->root_rtc, task_node, tmp)
482: if (TASK_DATA(tt) != TASK_DATA(t))
483: continue;
484: else
485: flg++;
486:
487: /* restore signal */
488: if (flg < 2)
489: signal((intptr_t) TASK_DATA(t) + SIGRTMIN, SIG_DFL);
490: #endif
491: return NULL;
492: #endif /* HAVE_TIMER_CREATE */
493: default:
494: return NULL;
495: }
496:
497: #if SUP_ENABLE == KQ_SUPPORT
498: kevent(r->root_kq, chg, 1, NULL, 0, &timeout);
499: #elif SUP_ENABLE == EP_SUPPORT
500: epoll_ctl(r->root_kq, ee.events ? EPOLL_CTL_MOD : EPOLL_CTL_DEL, ee.data.fd, &ee);
501: #endif
502: return NULL;
503: }
504:
505: #ifdef HAVE_LIBPTHREAD
506: /*
507: * sched_hook_thread() - Default THREAD hook
508: *
509: * @task = current task
510: * @arg = pthread attributes
511: * return: <0 errors and 0 ok
512: */
513: void *
514: sched_hook_thread(void *task, void *arg)
515: {
516: sched_task_t *t = task;
517: pthread_t tid;
518: sigset_t s, o;
519:
520: if (!t || !TASK_ROOT(t))
521: return (void*) -1;
522:
523: sigfillset(&s);
524: pthread_sigmask(SIG_BLOCK, &s, &o);
525: errno = pthread_create(&tid, (pthread_attr_t*) arg,
526: (void *(*)(void*)) _sched_threadWrapper, t);
527: pthread_sigmask(SIG_SETMASK, &o, NULL);
528:
529: if (errno) {
530: LOGERR;
531: return (void*) -1;
532: } else
533: TASK_VAL(t) = (u_long) tid;
534:
535: if (!TASK_ISLOCKED(t))
536: TASK_LOCK(t);
537:
538: return NULL;
539: }
540: #endif
541:
542: /*
543: * sched_hook_read() - Default READ hook
544: *
545: * @task = current task
546: * @arg = unused
547: * return: <0 errors and 0 ok
548: */
549: void *
550: sched_hook_read(void *task, void *arg __unused)
551: {
552: sched_task_t *t = task;
553: sched_root_task_t *r = NULL;
554: #if SUP_ENABLE == KQ_SUPPORT
555: struct kevent chg[1];
556: struct timespec timeout = { 0, 0 };
557: #elif SUP_ENABLE == EP_SUPPORT
558: struct epoll_event ee = { .events = EPOLLIN | EPOLLPRI, .data.fd = 0 };
559: int flg = 0;
560: #endif
561:
562: if (!t || !TASK_ROOT(t))
563: return (void*) -1;
564: else
565: r = TASK_ROOT(t);
566:
567: #if SUP_ENABLE == KQ_SUPPORT
568: #ifdef __NetBSD__
569: EV_SET(&chg[0], TASK_FD(t), EVFILT_READ, EV_ADD | EV_CLEAR, 0, 0, (intptr_t) TASK_FD(t));
570: #else
571: EV_SET(&chg[0], TASK_FD(t), EVFILT_READ, EV_ADD | EV_CLEAR, 0, 0, (void*) TASK_FD(t));
572: #endif
573: if (kevent(r->root_kq, chg, 1, NULL, 0, &timeout) == -1) {
574: if (r->root_hooks.hook_exec.exception)
575: r->root_hooks.hook_exec.exception(r, NULL);
576: else
577: LOGERR;
578: return (void*) -1;
579: }
580: #elif SUP_ENABLE == EP_SUPPORT
581: if (FD_ISSET(TASK_FD(t), &r->root_fds[0]))
582: flg |= 1;
583: if (FD_ISSET(TASK_FD(t), &r->root_fds[1])) {
584: flg |= 2;
585: ee.events |= EPOLLOUT;
586: }
587:
588: ee.data.fd = TASK_FD(t);
589: if (epoll_ctl(r->root_kq, flg ? EPOLL_CTL_MOD : EPOLL_CTL_ADD, TASK_FD(t), &ee) == -1) {
590: if (r->root_hooks.hook_exec.exception)
591: r->root_hooks.hook_exec.exception(r, NULL);
592: else
593: LOGERR;
594: return (void*) -1;
595: } else
596: FD_SET(TASK_FD(t), &r->root_fds[0]);
597: #else
598: FD_SET(TASK_FD(t), &r->root_fds[0]);
599: if (TASK_FD(t) >= r->root_kq)
600: r->root_kq = TASK_FD(t) + 1;
601: #endif
602:
603: return NULL;
604: }
605:
606: /*
607: * sched_hook_write() - Default WRITE hook
608: *
609: * @task = current task
610: * @arg = unused
611: * return: <0 errors and 0 ok
612: */
613: void *
614: sched_hook_write(void *task, void *arg __unused)
615: {
616: sched_task_t *t = task;
617: sched_root_task_t *r = NULL;
618: #if SUP_ENABLE == KQ_SUPPORT
619: struct kevent chg[1];
620: struct timespec timeout = { 0, 0 };
621: #elif SUP_ENABLE == EP_SUPPORT
622: struct epoll_event ee = { .events = EPOLLOUT, .data.fd = 0 };
623: int flg = 0;
624: #endif
625:
626: if (!t || !TASK_ROOT(t))
627: return (void*) -1;
628: else
629: r = TASK_ROOT(t);
630:
631: #if SUP_ENABLE == KQ_SUPPORT
632: #ifdef __NetBSD__
633: EV_SET(&chg[0], TASK_FD(t), EVFILT_WRITE, EV_ADD | EV_CLEAR, 0, 0, (intptr_t) TASK_FD(t));
634: #else
635: EV_SET(&chg[0], TASK_FD(t), EVFILT_WRITE, EV_ADD | EV_CLEAR, 0, 0, (void*) TASK_FD(t));
636: #endif
637: if (kevent(r->root_kq, chg, 1, NULL, 0, &timeout) == -1) {
638: if (r->root_hooks.hook_exec.exception)
639: r->root_hooks.hook_exec.exception(r, NULL);
640: else
641: LOGERR;
642: return (void*) -1;
643: }
644: #elif SUP_ENABLE == EP_SUPPORT
645: if (FD_ISSET(TASK_FD(t), &r->root_fds[0])) {
646: flg |= 1;
647: ee.events |= EPOLLIN | EPOLLPRI;
648: }
649: if (FD_ISSET(TASK_FD(t), &r->root_fds[1]))
650: flg |= 2;
651:
652: ee.data.fd = TASK_FD(t);
653: if (epoll_ctl(r->root_kq, flg ? EPOLL_CTL_MOD : EPOLL_CTL_ADD, TASK_FD(t), &ee) == -1) {
654: if (r->root_hooks.hook_exec.exception)
655: r->root_hooks.hook_exec.exception(r, NULL);
656: else
657: LOGERR;
658: return (void*) -1;
659: } else
660: FD_SET(TASK_FD(t), &r->root_fds[1]);
661: #else
662: FD_SET(TASK_FD(t), &r->root_fds[1]);
663: if (TASK_FD(t) >= r->root_kq)
664: r->root_kq = TASK_FD(t) + 1;
665: #endif
666:
667: return NULL;
668: }
669:
670: /*
671: * sched_hook_alarm() - Default ALARM hook
672: *
673: * @task = current task
674: * @arg = unused
675: * return: <0 errors and 0 ok
676: */
677: void *
678: sched_hook_alarm(void *task, void *arg __unused)
679: {
680: #if SUP_ENABLE == KQ_SUPPORT
681: sched_task_t *t = task;
682: struct kevent chg[1];
683: struct timespec timeout = { 0, 0 };
684:
685: if (!t || !TASK_ROOT(t))
686: return (void*) -1;
687:
688: #ifdef __NetBSD__
689: EV_SET(&chg[0], (uintptr_t) TASK_DATA(t), EVFILT_TIMER, EV_ADD | EV_CLEAR, 0,
690: t->task_val.ts.tv_sec * 1000 + t->task_val.ts.tv_nsec / 1000000,
691: (intptr_t) TASK_DATA(t));
692: #else
693: EV_SET(&chg[0], (uintptr_t) TASK_DATA(t), EVFILT_TIMER, EV_ADD | EV_CLEAR, 0,
694: t->task_val.ts.tv_sec * 1000 + t->task_val.ts.tv_nsec / 1000000,
695: (void*) TASK_DATA(t));
696: #endif
697: if (kevent(TASK_ROOT(t)->root_kq, chg, 1, NULL, 0, &timeout) == -1) {
698: if (TASK_ROOT(t)->root_hooks.hook_exec.exception)
699: TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL);
700: else
701: LOGERR;
702: return (void*) -1;
703: }
704:
705: #endif
706: return NULL;
707: }
708:
709: /*
710: * sched_hook_node() - Default NODE hook
711: *
712: * @task = current task
713: * @arg = unused
714: * return: <0 errors and 0 ok
715: */
716: void *
717: sched_hook_node(void *task, void *arg __unused)
718: {
719: #if SUP_ENABLE == KQ_SUPPORT
720: sched_task_t *t = task;
721: struct kevent chg[1];
722: struct timespec timeout = { 0, 0 };
723:
724: if (!t || !TASK_ROOT(t))
725: return (void*) -1;
726:
727: #ifdef __NetBSD__
728: EV_SET(&chg[0], TASK_FD(t), EVFILT_VNODE, EV_ADD | EV_CLEAR,
729: NOTE_DELETE | NOTE_WRITE | NOTE_EXTEND | NOTE_ATTRIB |
730: NOTE_LINK | NOTE_RENAME | NOTE_REVOKE, 0, (intptr_t) TASK_FD(t));
731: #else
732: EV_SET(&chg[0], TASK_FD(t), EVFILT_VNODE, EV_ADD | EV_CLEAR,
733: NOTE_DELETE | NOTE_WRITE | NOTE_EXTEND | NOTE_ATTRIB |
734: NOTE_LINK | NOTE_RENAME | NOTE_REVOKE, 0, (void*) TASK_FD(t));
735: #endif
736: if (kevent(TASK_ROOT(t)->root_kq, chg, 1, NULL, 0, &timeout) == -1) {
737: if (TASK_ROOT(t)->root_hooks.hook_exec.exception)
738: TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL);
739: else
740: LOGERR;
741: return (void*) -1;
742: }
743:
744: #endif
745: return NULL;
746: }
747:
748: /*
749: * sched_hook_proc() - Default PROC hook
750: *
751: * @task = current task
752: * @arg = unused
753: * return: <0 errors and 0 ok
754: */
755: void *
756: sched_hook_proc(void *task, void *arg __unused)
757: {
758: #if SUP_ENABLE == KQ_SUPPORT
759: sched_task_t *t = task;
760: struct kevent chg[1];
761: struct timespec timeout = { 0, 0 };
762:
763: if (!t || !TASK_ROOT(t))
764: return (void*) -1;
765:
766: #ifdef __NetBSD__
767: EV_SET(&chg[0], TASK_VAL(t), EVFILT_PROC, EV_ADD | EV_CLEAR,
768: NOTE_EXIT | NOTE_FORK | NOTE_EXEC | NOTE_TRACK, 0, (intptr_t) TASK_VAL(t));
769: #else
770: EV_SET(&chg[0], TASK_VAL(t), EVFILT_PROC, EV_ADD | EV_CLEAR,
771: NOTE_EXIT | NOTE_FORK | NOTE_EXEC | NOTE_TRACK, 0, (void*) TASK_VAL(t));
772: #endif
773: if (kevent(TASK_ROOT(t)->root_kq, chg, 1, NULL, 0, &timeout) == -1) {
774: if (TASK_ROOT(t)->root_hooks.hook_exec.exception)
775: TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL);
776: else
777: LOGERR;
778: return (void*) -1;
779: }
780:
781: #endif
782: return NULL;
783: }
784:
785: /*
786: * sched_hook_signal() - Default SIGNAL hook
787: *
788: * @task = current task
789: * @arg = unused
790: * return: <0 errors and 0 ok
791: */
792: void *
793: sched_hook_signal(void *task, void *arg __unused)
794: {
795: #if SUP_ENABLE == KQ_SUPPORT
796: sched_task_t *t = task;
797: struct kevent chg[1];
798: struct timespec timeout = { 0, 0 };
799:
800: if (!t || !TASK_ROOT(t))
801: return (void*) -1;
802:
803: /* ignore signal */
804: signal(TASK_VAL(t), SIG_IGN);
805:
806: #ifdef __NetBSD__
807: EV_SET(&chg[0], TASK_VAL(t), EVFILT_SIGNAL, EV_ADD | EV_CLEAR, 0, 0, (intptr_t) TASK_VAL(t));
808: #else
809: EV_SET(&chg[0], TASK_VAL(t), EVFILT_SIGNAL, EV_ADD | EV_CLEAR, 0, 0, (void*) TASK_VAL(t));
810: #endif
811: if (kevent(TASK_ROOT(t)->root_kq, chg, 1, NULL, 0, &timeout) == -1) {
812: if (TASK_ROOT(t)->root_hooks.hook_exec.exception)
813: TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL);
814: else
815: LOGERR;
816: return (void*) -1;
817: }
818: #endif
819: return NULL;
820: }
821:
822: /*
823: * sched_hook_user() - Default USER hook
824: *
825: * @task = current task
826: * @arg = unused
827: * return: <0 errors and 0 ok
828: */
829: #ifdef EVFILT_USER
830: void *
831: sched_hook_user(void *task, void *arg __unused)
832: {
833: #if SUP_ENABLE == KQ_SUPPORT
834: sched_task_t *t = task;
835: struct kevent chg[1];
836: struct timespec timeout = { 0, 0 };
837:
838: if (!t || !TASK_ROOT(t))
839: return (void*) -1;
840:
841: #ifdef __NetBSD__
842: EV_SET(&chg[0], TASK_VAL(t), EVFILT_USER, EV_ADD | EV_CLEAR, TASK_DATLEN(t),
843: 0, (intptr_t) TASK_VAL(t));
844: #else
845: EV_SET(&chg[0], TASK_VAL(t), EVFILT_USER, EV_ADD | EV_CLEAR, TASK_DATLEN(t),
846: 0, (void*) TASK_VAL(t));
847: #endif
848: if (kevent(TASK_ROOT(t)->root_kq, chg, 1, NULL, 0, &timeout) == -1) {
849: if (TASK_ROOT(t)->root_hooks.hook_exec.exception)
850: TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL);
851: else
852: LOGERR;
853: return (void*) -1;
854: }
855:
856: #endif
857: return NULL;
858: }
859: #endif
860:
861: #if SUP_ENABLE == KQ_SUPPORT
862: static inline void
863: fetch_hook_kevent_proceed(int en, struct kevent *res, sched_root_task_t *r)
864: {
865: struct kevent evt[1];
866: register int i;
867: sched_task_t *task, *tmp;
868: struct timespec now = { 0, 0 };
869: #ifdef AIO_SUPPORT
870: int len, fd;
871: struct aiocb *acb;
872: #ifdef EVFILT_LIO
873: int l;
874: off_t off;
875: struct aiocb **acbs;
876: struct iovec *iv;
877: #endif /* EVFILT_LIO */
878: #endif /* AIO_SUPPORT */
879:
880: for (i = 0; i < en; i++) {
881: memcpy(evt, &res[i], sizeof evt);
882: evt->flags = EV_DELETE;
883: /* Put read/write task to ready queue */
884: switch (res[i].filter) {
885: case EVFILT_READ:
886: TAILQ_FOREACH_SAFE(task, &r->root_read, task_node, tmp) {
887: if (TASK_FD(task) != ((intptr_t) res[i].udata))
888: continue;
889: else {
890: TASK_RET(task) = res[i].data;
891: TASK_FLAG(task) = (u_long) res[i].fflags;
892: }
893: /* remove read handle */
894: remove_task_from(task, &r->root_read);
895:
896: if (r->root_hooks.hook_exec.exception && res[i].flags & EV_EOF) {
897: if (r->root_hooks.hook_exec.exception(r, (void*) EV_EOF)) {
898: task->task_type = taskUNUSE;
899: insert_task_to(task, &r->root_unuse);
900: } else {
901: task->task_type = taskREADY;
902: insert_task_to(task, &r->root_ready);
903: }
904: } else {
905: task->task_type = taskREADY;
906: insert_task_to(task, &r->root_ready);
907: }
908: }
909: break;
910: case EVFILT_WRITE:
911: TAILQ_FOREACH_SAFE(task, &r->root_write, task_node, tmp) {
912: if (TASK_FD(task) != ((intptr_t) res[i].udata))
913: continue;
914: else {
915: TASK_RET(task) = res[i].data;
916: TASK_FLAG(task) = (u_long) res[i].fflags;
917: }
918: /* remove write handle */
919: remove_task_from(task, &r->root_write);
920:
921: if (r->root_hooks.hook_exec.exception && res[i].flags & EV_EOF) {
922: if (r->root_hooks.hook_exec.exception(r, (void*) EV_EOF)) {
923: task->task_type = taskUNUSE;
924: insert_task_to(task, &r->root_unuse);
925: } else {
926: task->task_type = taskREADY;
927: insert_task_to(task, &r->root_ready);
928: }
929: } else {
930: task->task_type = taskREADY;
931: insert_task_to(task, &r->root_ready);
932: }
933: }
934: break;
935: case EVFILT_TIMER:
936: TAILQ_FOREACH_SAFE(task, &r->root_alarm, task_node, tmp) {
937: if ((uintptr_t) TASK_DATA(task) != ((uintptr_t) res[i].udata))
938: continue;
939: else {
940: TASK_RET(task) = res[i].data;
941: TASK_FLAG(task) = (u_long) res[i].fflags;
942: }
943: /* remove alarm handle */
944: transit_task2ready(task, &r->root_alarm);
945: }
946: break;
947: case EVFILT_VNODE:
948: TAILQ_FOREACH_SAFE(task, &r->root_node, task_node, tmp) {
949: if (TASK_FD(task) != ((intptr_t) res[i].udata))
950: continue;
951: else {
952: TASK_RET(task) = res[i].data;
953: TASK_FLAG(task) = (u_long) res[i].fflags;
954: }
955: /* remove node handle */
956: transit_task2ready(task, &r->root_node);
957: }
958: break;
959: case EVFILT_PROC:
960: TAILQ_FOREACH_SAFE(task, &r->root_proc, task_node, tmp) {
961: if (TASK_VAL(task) != ((uintptr_t) res[i].udata))
962: continue;
963: else {
964: TASK_RET(task) = res[i].data;
965: TASK_FLAG(task) = (u_long) res[i].fflags;
966: }
967: /* remove proc handle */
968: transit_task2ready(task, &r->root_proc);
969: }
970: break;
971: case EVFILT_SIGNAL:
972: TAILQ_FOREACH_SAFE(task, &r->root_signal, task_node, tmp) {
973: if (TASK_VAL(task) != ((uintptr_t) res[i].udata))
974: continue;
975: else {
976: TASK_RET(task) = res[i].data;
977: TASK_FLAG(task) = (u_long) res[i].fflags;
978: }
979: /* remove signal handle */
980: transit_task2ready(task, &r->root_signal);
981: }
982: break;
983: #ifdef AIO_SUPPORT
984: case EVFILT_AIO:
985: TAILQ_FOREACH_SAFE(task, &r->root_aio, task_node, tmp) {
986: acb = (struct aiocb*) TASK_VAL(task);
987: if (acb != ((struct aiocb*) res[i].udata))
988: continue;
989: else {
990: TASK_RET(task) = res[i].data;
991: TASK_FLAG(task) = (u_long) res[i].fflags;
992: }
993: /* remove user handle */
994: transit_task2ready(task, &r->root_aio);
995:
996: fd = acb->aio_fildes;
997: if ((len = aio_return(acb)) != -1) {
998: if (lseek(fd, acb->aio_offset + len, SEEK_CUR) == -1)
999: LOGERR;
1000: } else
1001: LOGERR;
1002: free(acb);
1003: TASK_DATLEN(task) = (u_long) len;
1004: TASK_FD(task) = fd;
1005: }
1006: break;
1007: #ifdef EVFILT_LIO
1008: case EVFILT_LIO:
1009: TAILQ_FOREACH_SAFE(task, &r->root_lio, task_node, tmp) {
1010: acbs = (struct aiocb**) TASK_VAL(task);
1011: if (acbs != ((struct aiocb**) res[i].udata))
1012: continue;
1013: else {
1014: TASK_RET(task) = res[i].data;
1015: TASK_FLAG(task) = (u_long) res[i].fflags;
1016: }
1017: /* remove user handle */
1018: transit_task2ready(task, &r->root_lio);
1019:
1020: iv = (struct iovec*) TASK_DATA(task);
1021: fd = acbs[0]->aio_fildes;
1022: off = acbs[0]->aio_offset;
1023: for (len = 0; i < TASK_DATLEN(task); len += l, i++) {
1024: if ((iv[i].iov_len = aio_return(acbs[i])) == -1)
1025: l = 0;
1026: else
1027: l = iv[i].iov_len;
1028: free(acbs[i]);
1029: }
1030: free(acbs);
1031: TASK_DATLEN(task) = (u_long) len;
1032: TASK_FD(task) = fd;
1033:
1034: if (lseek(fd, off + len, SEEK_CUR) == -1)
1035: LOGERR;
1036: }
1037: break;
1038: #endif /* EVFILT_LIO */
1039: #endif /* AIO_SUPPORT */
1040: #ifdef EVFILT_USER
1041: case EVFILT_USER:
1042: TAILQ_FOREACH_SAFE(task, &r->root_user, task_node, tmp) {
1043: if (TASK_VAL(task) != ((uintptr_t) res[i].udata))
1044: continue;
1045: else {
1046: TASK_RET(task) = res[i].data;
1047: TASK_FLAG(task) = (u_long) res[i].fflags;
1048: }
1049: /* remove user handle */
1050: transit_task2ready(task, &r->root_user);
1051: }
1052: break;
1053: #endif /* EVFILT_USER */
1054: }
1055:
1056: if (kevent(r->root_kq, evt, 1, NULL, 0, &now) == -1) {
1057: if (r->root_hooks.hook_exec.exception)
1058: r->root_hooks.hook_exec.exception(r, NULL);
1059: else
1060: LOGERR;
1061: }
1062: }
1063: }
1064: #endif
1065:
1066: #if SUP_ENABLE == EP_SUPPORT
1067: static inline void
1068: fetch_hook_epoll_proceed(int en, struct epoll_event *res, sched_root_task_t *r)
1069: {
1070: register int i, flg, oevt;
1071: int ops = EPOLL_CTL_DEL;
1072: sched_task_t *task, *tmp;
1073: struct epoll_event evt[1];
1074:
1075: for (i = 0; i < en; i++) {
1076: memcpy(evt, &res[i], sizeof evt);
1077: oevt = evt->events & EPOLLOUT;
1078:
1079: if (evt->events & (EPOLLIN | EPOLLPRI)) {
1080: flg = 0;
1081: TAILQ_FOREACH_SAFE(task, &r->root_read, task_node, tmp) {
1082: if (TASK_FD(task) != evt->data.fd)
1083: continue;
1084: else {
1085: flg++;
1086: TASK_FLAG(task) = ioctl(TASK_FD(task), FIONREAD, &TASK_RET(task));
1087: }
1088: /* remove read handle */
1089: remove_task_from(task, &r->root_read);
1090:
1091: if (r->root_hooks.hook_exec.exception && evt->events & (EPOLLRDHUP | EPOLLERR | EPOLLHUP)) {
1092: if (r->root_hooks.hook_exec.exception(r, (void*) (intptr_t)
1093: (evt->events & EPOLLERR ? EV_ERROR | EV_EOF : EV_EOF))) {
1094: task->task_type = taskUNUSE;
1095: insert_task_to(task, &r->root_unuse);
1096: } else {
1097: task->task_type = taskREADY;
1098: insert_task_to(task, &r->root_ready);
1099: }
1100: } else {
1101: task->task_type = taskREADY;
1102: insert_task_to(task, &r->root_ready);
1103: }
1104: }
1105:
1106: if (flg) {
1107: evt->events ^= evt->events;
1108: if (FD_ISSET(evt->data.fd, &r->root_fds[1])) {
1109: ops = EPOLL_CTL_MOD;
1110: evt->events |= EPOLLOUT;
1111: }
1112: if (flg > 1) {
1113: ops = EPOLL_CTL_MOD;
1114: evt->events |= EPOLLIN | EPOLLPRI;
1115: } else
1116: FD_CLR(evt->data.fd, &r->root_fds[0]);
1117: }
1118: }
1119:
1120: if (oevt) {
1121: flg = 0;
1122: TAILQ_FOREACH_SAFE(task, &r->root_write, task_node, tmp) {
1123: if (TASK_FD(task) != evt->data.fd)
1124: continue;
1125: else {
1126: flg++;
1127: TASK_FLAG(task) = ioctl(TASK_FD(task),
1128: FIONWRITE, &TASK_RET(task));
1129: }
1130: /* remove write handle */
1131: remove_task_from(task, &r->root_write);
1132:
1133: if (r->root_hooks.hook_exec.exception && evt->events & (EPOLLERR | EPOLLHUP)) {
1134: if (r->root_hooks.hook_exec.exception(r, (void*) (intptr_t)
1135: (evt->events & EPOLLERR ? EV_ERROR | EV_EOF : EV_EOF))) {
1136: task->task_type = taskUNUSE;
1137: insert_task_to(task, &r->root_unuse);
1138: } else {
1139: task->task_type = taskREADY;
1140: insert_task_to(task, &r->root_ready);
1141: }
1142: } else {
1143: task->task_type = taskREADY;
1144: insert_task_to(task, &r->root_ready);
1145: }
1146: }
1147:
1148: if (flg) {
1149: evt->events ^= evt->events;
1150: if (FD_ISSET(evt->data.fd, &r->root_fds[0])) {
1151: ops = EPOLL_CTL_MOD;
1152: evt->events |= EPOLLIN | EPOLLPRI;
1153: }
1154: if (flg > 1) {
1155: ops = EPOLL_CTL_MOD;
1156: evt->events |= EPOLLOUT;
1157: } else
1158: FD_CLR(evt->data.fd, &r->root_fds[1]);
1159: }
1160: }
1161:
1162: if (epoll_ctl(r->root_kq, ops, evt->data.fd, evt) == -1) {
1163: if (r->root_hooks.hook_exec.exception) {
1164: r->root_hooks.hook_exec.exception(r, NULL);
1165: } else
1166: LOGERR;
1167: }
1168: }
1169: }
1170: #endif
1171:
1172: #if SUP_ENABLE == NO_SUPPORT
1173: static inline void
1174: fetch_hook_select_proceed(int en, fd_set rfd, fd_set wfd, fd_set xfd, sched_root_task_t *r)
1175: {
1176: register int i, flg;
1177: sched_task_t *task, *tmp;
1178:
1179: /* skip select check if return value from select is zero */
1180: if (!en)
1181: return;
1182:
1183: for (i = 0; i < r->root_kq; i++) {
1184: if (FD_ISSET(i, &rfd) || FD_ISSET(i, &xfd)) {
1185: flg = 0;
1186: TAILQ_FOREACH_SAFE(task, &r->root_read, task_node, tmp) {
1187: if (TASK_FD(task) != i)
1188: continue;
1189: else {
1190: flg++;
1191: TASK_FLAG(task) = ioctl(TASK_FD(task),
1192: FIONREAD, &TASK_RET(task));
1193: }
1194: /* remove read handle */
1195: remove_task_from(task, &r->root_read);
1196:
1197: if (r->root_hooks.hook_exec.exception) {
1198: if (r->root_hooks.hook_exec.exception(r, NULL)) {
1199: task->task_type = taskUNUSE;
1200: insert_task_to(task, &r->root_unuse);
1201: } else {
1202: task->task_type = taskREADY;
1203: insert_task_to(task, &r->root_ready);
1204: }
1205: } else {
1206: task->task_type = taskREADY;
1207: insert_task_to(task, &r->root_ready);
1208: }
1209: }
1210: /* remove resouce */
1211: if (flg)
1212: FD_CLR(i, &r->root_fds[0]);
1213: }
1214:
1215: if (FD_ISSET(i, &wfd)) {
1216: flg = 0;
1217: TAILQ_FOREACH_SAFE(task, &r->root_write, task_node, tmp) {
1218: if (TASK_FD(task) != i)
1219: continue;
1220: else {
1221: flg++;
1222: TASK_FLAG(task) = ioctl(TASK_FD(task),
1223: FIONWRITE, &TASK_RET(task));
1224: }
1225: /* remove write handle */
1226: remove_task_from(task, &r->root_write);
1227:
1228: if (r->root_hooks.hook_exec.exception) {
1229: if (r->root_hooks.hook_exec.exception(r, NULL)) {
1230: task->task_type = taskUNUSE;
1231: insert_task_to(task, &r->root_unuse);
1232: } else {
1233: task->task_type = taskREADY;
1234: insert_task_to(task, &r->root_ready);
1235: }
1236: } else {
1237: task->task_type = taskREADY;
1238: insert_task_to(task, &r->root_ready);
1239: }
1240: }
1241: /* remove resouce */
1242: if (flg)
1243: FD_CLR(i, &r->root_fds[1]);
1244: }
1245: }
1246:
1247: /* optimize select */
1248: for (i = r->root_kq - 1; i > 2; i--)
1249: if (FD_ISSET(i, &r->root_fds[0]) || FD_ISSET(i, &r->root_fds[1]))
1250: break;
1251: if (i > 2)
1252: r->root_kq = i + 1;
1253: }
1254: #endif
1255:
1256: /*
1257: * sched_hook_fetch() - Default FETCH hook
1258: *
1259: * @root = root task
1260: * @arg = unused
1261: * return: NULL error or !=NULL fetched task
1262: */
1263: void *
1264: sched_hook_fetch(void *root, void *arg __unused)
1265: {
1266: sched_root_task_t *r = root;
1267: sched_task_t *task, *tmp;
1268: struct timespec now, m, mtmp;
1269: #if SUP_ENABLE == KQ_SUPPORT
1270: struct kevent res[KQ_EVENTS];
1271: struct timespec *timeout;
1272: #elif SUP_ENABLE == EP_SUPPORT
1273: struct epoll_event res[KQ_EVENTS];
1274: u_long timeout = 0;
1275: #else
1276: struct timeval *timeout, tv;
1277: fd_set rfd, wfd, xfd;
1278: #endif
1279: int en;
1280:
1281: if (!r)
1282: return NULL;
1283:
1284: /* get new task by queue priority */
1285: while ((task = TAILQ_FIRST(&r->root_event))) {
1286: transit_task2unuse(task, &r->root_event);
1287: return task;
1288: }
1289: while ((task = TAILQ_FIRST(&r->root_ready))) {
1290: transit_task2unuse(task, &r->root_ready);
1291: return task;
1292: }
1293:
1294: #ifdef TIMER_WITHOUT_SORT
1295: clock_gettime(CLOCK_MONOTONIC, &now);
1296:
1297: sched_timespecclear(&r->root_wait);
1298: TAILQ_FOREACH(task, &r->root_timer, task_node) {
1299: if (!sched_timespecisset(&r->root_wait))
1300: r->root_wait = TASK_TS(task);
1301: else if (sched_timespeccmp(&TASK_TS(task), &r->root_wait, -) < 0)
1302: r->root_wait = TASK_TS(task);
1303: }
1304:
1305: if (TAILQ_FIRST(&r->root_timer)) {
1306: m = r->root_wait;
1307: sched_timespecsub(&m, &now, &mtmp);
1308: r->root_wait = mtmp;
1309: } else {
1310: /* set wait INFTIM */
1311: sched_timespecinf(&r->root_wait);
1312: }
1313: #else /* ! TIMER_WITHOUT_SORT */
1314: if (!TAILQ_FIRST(&r->root_task) && (task = TAILQ_FIRST(&r->root_timer))) {
1315: clock_gettime(CLOCK_MONOTONIC, &now);
1316:
1317: m = TASK_TS(task);
1318: sched_timespecsub(&m, &now, &mtmp);
1319: r->root_wait = mtmp;
1320: } else {
1321: /* set wait INFTIM */
1322: sched_timespecinf(&r->root_wait);
1323: }
1324: #endif /* TIMER_WITHOUT_SORT */
1325: /* if present member of task, set NOWAIT */
1326: if (TAILQ_FIRST(&r->root_task))
1327: sched_timespecclear(&r->root_wait);
1328:
1329: if (r->root_wait.tv_sec != -1 && r->root_wait.tv_nsec != -1) {
1330: #if SUP_ENABLE == KQ_SUPPORT
1331: timeout = &r->root_wait;
1332: #elif SUP_ENABLE == EP_SUPPORT
1333: timeout = r->root_wait.tv_sec * 1000 + r->root_wait.tv_nsec / 1000000;
1334: #else
1335: sched_timespec2val(&r->root_wait, &tv);
1336: timeout = &tv;
1337: #endif /* KQ_SUPPORT */
1338: } else if (sched_timespecisinf(&r->root_poll))
1339: #if SUP_ENABLE == EP_SUPPORT
1340: timeout = -1;
1341: #else
1342: timeout = NULL;
1343: #endif
1344: else {
1345: #if SUP_ENABLE == KQ_SUPPORT
1346: timeout = &r->root_poll;
1347: #elif SUP_ENABLE == EP_SUPPORT
1348: timeout = r->root_poll.tv_sec * 1000 + r->root_poll.tv_nsec / 1000000;
1349: #else
1350: sched_timespec2val(&r->root_poll, &tv);
1351: timeout = &tv;
1352: #endif /* KQ_SUPPORT */
1353: }
1354:
1355: #if SUP_ENABLE == KQ_SUPPORT
1356: if ((en = kevent(r->root_kq, NULL, 0, res, KQ_EVENTS, timeout)) == -1) {
1357: #elif SUP_ENABLE == EP_SUPPORT
1358: if ((en = epoll_wait(r->root_kq, res, KQ_EVENTS, timeout)) == -1) {
1359: #else
1360: rfd = xfd = r->root_fds[0];
1361: wfd = r->root_fds[1];
1362: if ((en = select(r->root_kq, &rfd, &wfd, &xfd, timeout)) == -1) {
1363: #endif /* KQ_SUPPORT */
1364: if (r->root_hooks.hook_exec.exception) {
1365: if (r->root_hooks.hook_exec.exception(r, NULL))
1366: return NULL;
1367: } else if (errno != EINTR)
1368: LOGERR;
1369: goto skip_event;
1370: }
1371:
1372: /* Go and catch the cat into pipes ... */
1373: #if SUP_ENABLE == KQ_SUPPORT
1374: /* kevent dispatcher */
1375: fetch_hook_kevent_proceed(en, res, r);
1376: #elif SUP_ENABLE == EP_SUPPORT
1377: /* epoll dispatcher */
1378: fetch_hook_epoll_proceed(en, res, r);
1379: #else
1380: /* select dispatcher */
1381: fetch_hook_select_proceed(en, rfd, wfd, xfd, r);
1382: #endif /* KQ_SUPPORT */
1383:
1384: skip_event:
1385: /* timer update & put in ready queue */
1386: clock_gettime(CLOCK_MONOTONIC, &now);
1387:
1388: TAILQ_FOREACH_SAFE(task, &r->root_timer, task_node, tmp)
1389: if (sched_timespeccmp(&now, &TASK_TS(task), -) >= 0)
1390: transit_task2ready(task, &r->root_timer);
1391:
1392: /* put regular task priority task to ready queue,
1393: if there is no ready task or reach max missing hit for regular task */
1394: if ((task = TAILQ_FIRST(&r->root_task))) {
1395: if (!TAILQ_FIRST(&r->root_ready) || r->root_miss >= TASK_VAL(task)) {
1396: r->root_miss ^= r->root_miss;
1397:
1398: transit_task2ready(task, &r->root_task);
1399: } else
1400: r->root_miss++;
1401: } else
1402: r->root_miss ^= r->root_miss;
1403:
1404: /* OK, lets get ready task !!! */
1405: task = TAILQ_FIRST(&r->root_ready);
1406: if (task)
1407: transit_task2unuse(task, &r->root_ready);
1408: return task;
1409: }
1410:
1411: /*
1412: * sched_hook_exception() - Default EXCEPTION hook
1413: *
1414: * @root = root task
1415: * @arg = custom handling: if arg == EV_EOF or other value; default: arg == NULL log errno
1416: * return: <0 errors and 0 ok
1417: */
1418: void *
1419: sched_hook_exception(void *root, void *arg)
1420: {
1421: sched_root_task_t *r = root;
1422:
1423: if (!r)
1424: return NULL;
1425:
1426: /* custom exception handling ... */
1427: if (arg) {
1428: if (arg == (void*) EV_EOF)
1429: return NULL;
1430: return (void*) -1; /* raise scheduler error!!! */
1431: }
1432:
1433: /* if error hook exists */
1434: if (r->root_hooks.hook_root.error)
1435: return (r->root_hooks.hook_root.error(root, (void*) ((intptr_t) errno)));
1436:
1437: /* default case! */
1438: LOGERR;
1439: return NULL;
1440: }
1441:
1442: /*
1443: * sched_hook_condition() - Default CONDITION hook
1444: *
1445: * @root = root task
1446: * @arg = killState from schedRun()
1447: * return: NULL kill scheduler loop or !=NULL ok
1448: */
1449: void *
1450: sched_hook_condition(void *root, void *arg)
1451: {
1452: sched_root_task_t *r = root;
1453:
1454: if (!r)
1455: return NULL;
1456:
1457: return (void*) (*r->root_cond - *(intptr_t*) arg);
1458: }
1459:
1460: /*
1461: * sched_hook_rtc() - Default RTC hook
1462: *
1463: * @task = current task
1464: * @arg = unused
1465: * return: <0 errors and 0 ok
1466: */
1467: void *
1468: sched_hook_rtc(void *task, void *arg __unused)
1469: {
1470: #if defined(HAVE_LIBRT) && defined(HAVE_TIMER_CREATE) && \
1471: defined(HAVE_TIMER_SETTIME) && defined(HAVE_TIMER_DELETE)
1472: sched_task_t *sigt = NULL, *t = task;
1473: struct itimerspec its;
1474: struct sigevent evt;
1475: timer_t tmr;
1476: #if SUP_ENABLE != KQ_SUPPORT
1477: struct sigaction sa;
1478: #endif
1479:
1480: if (!t || !TASK_ROOT(t))
1481: return (void*) -1;
1482:
1483: memset(&evt, 0, sizeof evt);
1484: evt.sigev_notify = SIGEV_SIGNAL;
1485: evt.sigev_signo = (intptr_t) TASK_DATA(t) + SIGRTMIN;
1486: evt.sigev_value.sival_ptr = t;
1487:
1488: if (timer_create(CLOCK_MONOTONIC, &evt, &tmr) == -1) {
1489: if (TASK_ROOT(t)->root_hooks.hook_exec.exception)
1490: TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL);
1491: else
1492: LOGERR;
1493: return (void*) -1;
1494: } else
1495: TASK_FLAG(t) = (u_long) tmr;
1496:
1497: #if SUP_ENABLE == KQ_SUPPORT
1498: if (!(sigt = schedSignal(TASK_ROOT(t), _sched_rtcWrapper, TASK_ARG(t), evt.sigev_signo,
1499: t, (size_t) tmr))) {
1500: if (TASK_ROOT(t)->root_hooks.hook_exec.exception)
1501: TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL);
1502: else
1503: LOGERR;
1504: timer_delete(tmr);
1505: return (void*) -1;
1506: } else
1507: TASK_RET(t) = (uintptr_t) sigt;
1508: #else
1509: memset(&sa, 0, sizeof sa);
1510: sigemptyset(&sa.sa_mask);
1511: sa.sa_sigaction = _sched_rtcSigWrapper;
1512: sa.sa_flags = SA_SIGINFO | SA_RESTART;
1513:
1514: if (sigaction(evt.sigev_signo, &sa, NULL) == -1) {
1515: if (TASK_ROOT(t)->root_hooks.hook_exec.exception)
1516: TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL);
1517: else
1518: LOGERR;
1519: timer_delete(tmr);
1520: return (void*) -1;
1521: }
1522: #endif
1523:
1524: memset(&its, 0, sizeof its);
1525: its.it_value.tv_sec = t->task_val.ts.tv_sec;
1526: its.it_value.tv_nsec = t->task_val.ts.tv_nsec;
1527:
1528: if (timer_settime(tmr, TIMER_RELTIME, &its, NULL) == -1) {
1529: if (TASK_ROOT(t)->root_hooks.hook_exec.exception)
1530: TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL);
1531: else
1532: LOGERR;
1533: schedCancel(sigt);
1534: timer_delete(tmr);
1535: return (void*) -1;
1536: }
1537: #endif /* HAVE_TIMER_CREATE */
1538: return NULL;
1539: }
FreeBSD-CVSweb <freebsd-cvsweb@FreeBSD.org>