1: /*************************************************************************
2: * (C) 2011 AITNET ltd - Sofia/Bulgaria - <misho@aitbg.com>
3: * by Michael Pounov <misho@openbsd-bg.org>
4: *
5: * $Author: misho $
6: * $Id: hooks.c,v 1.37.2.4 2022/11/29 11:55:58 misho Exp $
7: *
8: **************************************************************************
9: The ELWIX and AITNET software is distributed under the following
10: terms:
11:
12: All of the documentation and software included in the ELWIX and AITNET
13: Releases is copyrighted by ELWIX - Sofia/Bulgaria <info@elwix.org>
14:
15: Copyright 2004 - 2022
16: by Michael Pounov <misho@elwix.org>. All rights reserved.
17:
18: Redistribution and use in source and binary forms, with or without
19: modification, are permitted provided that the following conditions
20: are met:
21: 1. Redistributions of source code must retain the above copyright
22: notice, this list of conditions and the following disclaimer.
23: 2. Redistributions in binary form must reproduce the above copyright
24: notice, this list of conditions and the following disclaimer in the
25: documentation and/or other materials provided with the distribution.
26: 3. All advertising materials mentioning features or use of this software
27: must display the following acknowledgement:
28: This product includes software developed by Michael Pounov <misho@elwix.org>
29: ELWIX - Embedded LightWeight unIX and its contributors.
30: 4. Neither the name of AITNET nor the names of its contributors
31: may be used to endorse or promote products derived from this software
32: without specific prior written permission.
33:
34: THIS SOFTWARE IS PROVIDED BY AITNET AND CONTRIBUTORS ``AS IS'' AND
35: ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
36: IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
37: ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
38: FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
39: DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
40: OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
41: HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
42: LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
43: OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
44: SUCH DAMAGE.
45: */
46: #include "global.h"
47: #include "hooks.h"
48:
49:
50: static inline void
51: transit_task2ready(sched_task_t * __restrict t, sched_queue_t * __restrict q)
52: {
53: remove_task_from(t, q);
54:
55: t->task_type = taskREADY;
56: insert_task_to(t, &(TASK_ROOT(t))->root_ready);
57: }
58:
59: #ifdef HAVE_LIBPTHREAD
60: static void *
61: _sched_threadWrapper(sched_task_t *t)
62: {
63: void *ret = NULL;
64: sched_root_task_t *r;
65:
66: if (!t || !TASK_ROOT(t))
67: pthread_exit(ret);
68: else
69: r = (sched_root_task_t*) TASK_ROOT(t);
70:
71: pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, NULL);
72: /*
73: pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, NULL);
74: */
75:
76: /* notify parent, thread is ready for execution */
77: pthread_testcancel();
78:
79: ret = schedCall(t);
80: r->root_ret = ret;
81:
82: if (TASK_VAL(t)) {
83: transit_task2unuse(t, &r->root_thread);
84: TASK_VAL(t) = 0;
85: }
86:
87: pthread_exit(ret);
88: }
89: #endif
90:
91: #if defined(HAVE_LIBRT) && defined(HAVE_TIMER_CREATE) && \
92: defined(HAVE_TIMER_SETTIME) && defined(HAVE_TIMER_DELETE)
93: #if SUP_ENABLE == KQ_SUPPORT
94: static void *
95: _sched_rtcWrapper(sched_task_t *t)
96: {
97: sched_task_t *task;
98: void *ret;
99:
100: if (!t || !TASK_ROOT(t) || !TASK_DATA(t))
101: return NULL;
102: else {
103: task = (sched_task_t*) TASK_DATA(t);
104: timer_delete((timer_t) TASK_DATLEN(t));
105: }
106:
107: ret = schedCall(task);
108:
109: transit_task2unuse(task, &(TASK_ROOT(task))->root_rtc);
110: return ret;
111: }
112: #else
113: static void
114: _sched_rtcSigWrapper(int sig, siginfo_t *si, void *uc)
115: {
116: sched_task_t *task;
117:
118: if (si && si->si_value.sival_ptr) {
119: task = (sched_task_t*) si->si_value.sival_ptr;
120: timer_delete((timer_t) TASK_FLAG(task));
121:
122: TASK_RET(task) = (intptr_t) schedCall(task);
123:
124: transit_task2unuse(task, &(TASK_ROOT(task))->root_rtc);
125: }
126: }
127: #endif
128: #endif
129:
130: /*
131: * sched_hook_init() - Default INIT hook
132: *
133: * @root = root task
134: * @arg = unused
135: * return: <0 errors and 0 ok
136: */
137: void *
138: sched_hook_init(void *root, void *arg __unused)
139: {
140: sched_root_task_t *r = root;
141:
142: if (!r)
143: return (void*) -1;
144:
145: #if SUP_ENABLE == KQ_SUPPORT
146: r->root_kq = kqueue();
147: if (r->root_kq == -1) {
148: LOGERR;
149: return (void*) -1;
150: }
151: #elif SUP_ENABLE == EP_SUPPORT
152: r->root_kq = epoll_create(KQ_EVENTS);
153: if (r->root_kq == -1) {
154: LOGERR;
155: return (void*) -1;
156: }
157: #else
158: r->root_kq ^= r->root_kq;
159: #endif
160:
161: FD_ZERO(&r->root_fds[0]);
162: FD_ZERO(&r->root_fds[1]);
163: FD_ZERO(&r->root_fds[2]);
164:
165: return NULL;
166: }
167:
168: /*
169: * sched_hook_fini() - Default FINI hook
170: *
171: * @root = root task
172: * @arg = unused
173: * return: <0 errors and 0 ok
174: */
175: void *
176: sched_hook_fini(void *root, void *arg __unused)
177: {
178: sched_root_task_t *r = root;
179:
180: if (!r)
181: return (void*) -1;
182:
183: #if SUP_ENABLE == KQ_SUPPORT || SUP_ENABLE == EP_SUPPORT
184: if (r->root_kq > 2) {
185: close(r->root_kq);
186: r->root_kq = 0;
187: }
188: #else
189: r->root_kq ^= r->root_kq;
190: #endif
191:
192: FD_ZERO(&r->root_fds[2]);
193: FD_ZERO(&r->root_fds[1]);
194: FD_ZERO(&r->root_fds[0]);
195:
196: return NULL;
197: }
198:
199: /*
200: * sched_hook_cancel() - Default CANCEL hook
201: *
202: * @task = current task
203: * @arg = unused
204: * return: <0 errors and 0 ok
205: */
206: void *
207: sched_hook_cancel(void *task, void *arg __unused)
208: {
209: sched_task_t *t = task, *tmp, *tt;
210: sched_root_task_t *r = NULL;
211: int flg = 0;
212: #if SUP_ENABLE == KQ_SUPPORT
213: struct kevent chg[1];
214: struct timespec timeout = { 0, 0 };
215: #elif SUP_ENABLE == EP_SUPPORT
216: struct epoll_event ee = { .events = 0, .data.fd = 0 };
217: #else
218: register int i;
219: #endif
220: #ifdef AIO_SUPPORT
221: struct aiocb *acb;
222: #ifdef EVFILT_LIO
223: register int i = 0;
224: struct aiocb **acbs;
225: #endif /* EVFILT_LIO */
226: #endif /* AIO_SUPPORT */
227:
228: if (!t || !TASK_ROOT(t))
229: return (void*) -1;
230: else
231: r = TASK_ROOT(t);
232:
233: switch (TASK_TYPE(t)) {
234: case taskREAD:
235: /* check for multi subscribers */
236: TAILQ_FOREACH_SAFE(tt, &r->root_read, task_node, tmp)
237: if (TASK_FD(tt) == TASK_FD(t))
238: flg++;
239: #if SUP_ENABLE == KQ_SUPPORT
240: #ifdef __NetBSD__
241: EV_SET(&chg[0], TASK_FD(t), EVFILT_READ, flg < 2 ? EV_DELETE : 0,
242: 0, 0, (intptr_t) TASK_FD(t));
243: #else
244: EV_SET(&chg[0], TASK_FD(t), EVFILT_READ, flg < 2 ? EV_DELETE : 0,
245: 0, 0, (void*) TASK_FD(t));
246: #endif
247: #elif SUP_ENABLE == EP_SUPPORT
248: ee.data.fd = TASK_FD(t);
249: ee.events ^= ee.events;
250: if (FD_ISSET(TASK_FD(t), &r->root_fds[1]))
251: ee.events |= EPOLLOUT;
252:
253: if (flg < 2) {
254: FD_CLR(TASK_FD(t), &r->root_fds[0]);
255: FD_CLR(TASK_FD(t), &r->root_fds[2]);
256: } else {
257: if (FD_ISSET(TASK_FD(t), &r->root_fds[0]))
258: ee.events |= EPOLLIN;
259: if (FD_ISSET(TASK_FD(t), &r->root_fds[2]))
260: ee.events |= EPOLLPRI;
261: }
262: #else
263: if (flg < 2) {
264: FD_CLR(TASK_FD(t), &r->root_fds[0]);
265: FD_CLR(TASK_FD(t), &r->root_fds[2]);
266:
267: /* optimize select */
268: for (i = r->root_kq - 1; i >= 0; i--)
269: if (FD_ISSET(i, &r->root_fds[0]) ||
270: FD_ISSET(i, &r->root_fds[1]) ||
271: FD_ISSET(i, &r->root_fds[2]))
272: break;
273: r->root_kq = i + 1;
274: }
275: #endif
276: break;
277: case taskWRITE:
278: /* check for multi subscribers */
279: TAILQ_FOREACH_SAFE(tt, &r->root_write, task_node, tmp)
280: if (TASK_FD(tt) == TASK_FD(t))
281: flg++;
282: #if SUP_ENABLE == KQ_SUPPORT
283: #ifdef __NetBSD__
284: EV_SET(&chg[0], TASK_FD(t), EVFILT_WRITE, flg < 2 ? EV_DELETE : 0,
285: 0, 0, (intptr_t) TASK_FD(t));
286: #else
287: EV_SET(&chg[0], TASK_FD(t), EVFILT_WRITE, flg < 2 ? EV_DELETE : 0,
288: 0, 0, (void*) TASK_FD(t));
289: #endif
290: #elif SUP_ENABLE == EP_SUPPORT
291: ee.data.fd = TASK_FD(t);
292: ee.events ^= ee.events;
293: if (FD_ISSET(TASK_FD(t), &r->root_fds[0]))
294: ee.events |= EPOLLIN;
295: if (FD_ISSET(TASK_FD(t), &r->root_fds[2]))
296: ee.events |= EPOLLPRI;
297:
298: if (flg < 2)
299: FD_CLR(TASK_FD(t), &r->root_fds[1]);
300: else
301: ee.events |= EPOLLOUT;
302: #else
303: if (flg < 2) {
304: FD_CLR(TASK_FD(t), &r->root_fds[1]);
305:
306: /* optimize select */
307: for (i = r->root_kq - 1; i >= 0; i--)
308: if (FD_ISSET(i, &r->root_fds[0]) ||
309: FD_ISSET(i, &r->root_fds[1]) ||
310: FD_ISSET(i, &r->root_fds[2]))
311: break;
312: r->root_kq = i + 1;
313: }
314: #endif
315: break;
316: case taskALARM:
317: #if SUP_ENABLE == KQ_SUPPORT
318: /* check for multi subscribers */
319: TAILQ_FOREACH_SAFE(tt, &r->root_alarm, task_node, tmp)
320: if (TASK_DATA(tt) == TASK_DATA(t))
321: flg++;
322: #ifdef __NetBSD__
323: EV_SET(&chg[0], (uintptr_t) TASK_DATA(t), EVFILT_TIMER, flg < 2 ? EV_DELETE : 0,
324: 0, 0, (intptr_t) TASK_DATA(t));
325: #else
326: EV_SET(&chg[0], (uintptr_t) TASK_DATA(t), EVFILT_TIMER, flg < 2 ? EV_DELETE : 0,
327: 0, 0, (void*) TASK_DATA(t));
328: #endif
329: #endif
330: break;
331: case taskNODE:
332: #if SUP_ENABLE == KQ_SUPPORT
333: /* check for multi subscribers */
334: TAILQ_FOREACH_SAFE(tt, &r->root_node, task_node, tmp)
335: if (TASK_FD(tt) == TASK_FD(t))
336: flg++;
337: #ifdef __NetBSD__
338: EV_SET(&chg[0], TASK_FD(t), EVFILT_VNODE, flg < 2 ? EV_DELETE : 0,
339: 0, 0, (intptr_t) TASK_FD(t));
340: #else
341: EV_SET(&chg[0], TASK_FD(t), EVFILT_VNODE, flg < 2 ? EV_DELETE : 0,
342: 0, 0, (void*) TASK_FD(t));
343: #endif
344: #endif
345: break;
346: case taskPROC:
347: #if SUP_ENABLE == KQ_SUPPORT
348: /* check for multi subscribers */
349: TAILQ_FOREACH_SAFE(tt, &r->root_proc, task_node, tmp)
350: if (TASK_VAL(tt) == TASK_VAL(t))
351: flg++;
352: #ifdef __NetBSD__
353: EV_SET(&chg[0], TASK_VAL(t), EVFILT_PROC, flg < 2 ? EV_DELETE : 0,
354: 0, 0, (intptr_t) TASK_VAL(t));
355: #else
356: EV_SET(&chg[0], TASK_VAL(t), EVFILT_PROC, flg < 2 ? EV_DELETE : 0,
357: 0, 0, (void*) TASK_VAL(t));
358: #endif
359: #endif
360: break;
361: case taskSIGNAL:
362: #if SUP_ENABLE == KQ_SUPPORT
363: /* check for multi subscribers */
364: TAILQ_FOREACH_SAFE(tt, &r->root_signal, task_node, tmp)
365: if (TASK_VAL(tt) == TASK_VAL(t))
366: flg++;
367: #ifdef __NetBSD__
368: EV_SET(&chg[0], TASK_VAL(t), EVFILT_SIGNAL, flg < 2 ? EV_DELETE : 0,
369: 0, 0, (intptr_t) TASK_VAL(t));
370: #else
371: EV_SET(&chg[0], TASK_VAL(t), EVFILT_SIGNAL, flg < 2 ? EV_DELETE : 0,
372: 0, 0, (void*) TASK_VAL(t));
373: #endif
374: /* restore signal */
375: if (flg < 2)
376: signal(TASK_VAL(t), SIG_DFL);
377: #endif
378: break;
379: #ifdef AIO_SUPPORT
380: case taskAIO:
381: #if SUP_ENABLE == KQ_SUPPORT
382: /* check for multi subscribers */
383: TAILQ_FOREACH_SAFE(tt, &r->root_aio, task_node, tmp)
384: if (TASK_VAL(tt) == TASK_VAL(t))
385: flg++;
386: #ifdef __NetBSD__
387: EV_SET(&chg[0], TASK_VAL(t), EVFILT_AIO, flg < 2 ? EV_DELETE : 0,
388: 0, 0, (intptr_t) TASK_VAL(t));
389: #else
390: EV_SET(&chg[0], TASK_VAL(t), EVFILT_AIO, flg < 2 ? EV_DELETE : 0,
391: 0, 0, (void*) TASK_VAL(t));
392: #endif
393: acb = (struct aiocb*) TASK_VAL(t);
394: if (acb) {
395: if (aio_cancel(acb->aio_fildes, acb) == AIO_CANCELED)
396: aio_return(acb);
397: e_free(acb);
398: TASK_VAL(t) = 0;
399: }
400: #endif
401: break;
402: #ifdef EVFILT_LIO
403: case taskLIO:
404: #if SUP_ENABLE == KQ_SUPPORT
405: /* check for multi subscribers */
406: TAILQ_FOREACH_SAFE(tt, &r->root_lio, task_node, tmp)
407: if (TASK_VAL(tt) == TASK_VAL(t))
408: flg++;
409: #ifdef __NetBSD__
410: EV_SET(&chg[0], TASK_VAL(t), EVFILT_LIO, flg < 2 ? EV_DELETE : 0,
411: 0, 0, (intptr_t) TASK_VAL(t));
412: #else
413: EV_SET(&chg[0], TASK_VAL(t), EVFILT_LIO, flg < 2 ? EV_DELETE : 0,
414: 0, 0, (void*) TASK_VAL(t));
415: #endif
416: acbs = (struct aiocb**) TASK_VAL(t);
417: if (acbs) {
418: for (i = 0; i < TASK_DATLEN(t); i++) {
419: if (aio_cancel(acbs[i]->aio_fildes, acbs[i]) == AIO_CANCELED)
420: aio_return(acbs[i]);
421: e_free(acbs[i]);
422: }
423: e_free(acbs);
424: TASK_VAL(t) = 0;
425: }
426: #endif
427: break;
428: #endif /* EVFILT_LIO */
429: #endif /* AIO_SUPPORT */
430: #ifdef EVFILT_USER
431: case taskUSER:
432: #if SUP_ENABLE == KQ_SUPPORT
433: /* check for multi subscribers */
434: TAILQ_FOREACH_SAFE(tt, &r->root_user, task_node, tmp)
435: if (TASK_VAL(tt) == TASK_VAL(t))
436: flg++;
437: #ifdef __NetBSD__
438: EV_SET(&chg[0], TASK_VAL(t), EVFILT_USER, flg < 2 ? EV_DELETE : 0,
439: 0, 0, (intptr_t) TASK_VAL(t));
440: #else
441: EV_SET(&chg[0], TASK_VAL(t), EVFILT_USER, flg < 2 ? EV_DELETE : 0,
442: 0, 0, (void*) TASK_VAL(t));
443: #endif
444: #endif
445: break;
446: #endif /* EVFILT_USER */
447: case taskTHREAD:
448: #ifdef HAVE_LIBPTHREAD
449: if (TASK_VAL(t)) {
450: pthread_cancel((pthread_t) TASK_VAL(t));
451: pthread_join((pthread_t) TASK_VAL(t), NULL);
452: if (TASK_VAL(t)) {
453: transit_task2unuse(t, &(TASK_ROOT(t))->root_thread);
454: TASK_VAL(t) = 0;
455: }
456: }
457: #endif
458: return NULL;
459: #if defined(HAVE_LIBRT) && defined(HAVE_TIMER_CREATE) && \
460: defined(HAVE_TIMER_SETTIME) && defined(HAVE_TIMER_DELETE)
461: case taskRTC:
462: timer_delete((timer_t) TASK_FLAG(t));
463: #if SUP_ENABLE == KQ_SUPPORT
464: schedCancel((sched_task_t*) TASK_RET(t));
465: #else
466: /* check for multi subscribers */
467: TAILQ_FOREACH_SAFE(tt, &r->root_rtc, task_node, tmp)
468: if (TASK_DATA(tt) == TASK_DATA(t))
469: flg++;
470:
471: /* restore signal */
472: if (flg < 2)
473: signal((intptr_t) TASK_DATA(t) + SIGRTMIN, SIG_DFL);
474: #endif
475: return NULL;
476: #endif /* HAVE_TIMER_CREATE */
477: default:
478: return NULL;
479: }
480:
481: #if SUP_ENABLE == KQ_SUPPORT
482: kevent(r->root_kq, chg, 1, NULL, 0, &timeout);
483: #elif SUP_ENABLE == EP_SUPPORT
484: if (TASK_TYPE(t) == taskREAD || TASK_TYPE(t) == taskWRITE) {
485: epoll_ctl(r->root_kq, ee.events ? EPOLL_CTL_MOD : EPOLL_CTL_DEL, ee.data.fd, &ee);
486: }
487: #endif
488: return NULL;
489: }
490:
491: #ifdef HAVE_LIBPTHREAD
492: /*
493: * sched_hook_thread() - Default THREAD hook
494: *
495: * @task = current task
496: * @arg = pthread attributes
497: * return: <0 errors and 0 ok
498: */
499: void *
500: sched_hook_thread(void *task, void *arg)
501: {
502: sched_task_t *t = task;
503: pthread_t tid;
504: sigset_t s, o;
505:
506: if (!t || !TASK_ROOT(t))
507: return (void*) -1;
508:
509: sigfillset(&s);
510: pthread_sigmask(SIG_BLOCK, &s, &o);
511: errno = pthread_create(&tid, (pthread_attr_t*) arg,
512: (void *(*)(void*)) _sched_threadWrapper, t);
513: pthread_sigmask(SIG_SETMASK, &o, NULL);
514:
515: if (errno) {
516: LOGERR;
517: return (void*) -1;
518: } else
519: TASK_VAL(t) = (u_long) tid;
520:
521: if (!TASK_ISLOCKED(t))
522: TASK_LOCK(t);
523:
524: return NULL;
525: }
526: #endif
527:
528: /*
529: * sched_hook_read() - Default READ hook
530: *
531: * @task = current task
532: * @arg = unused
533: * return: <0 errors and 0 ok
534: */
535: void *
536: sched_hook_read(void *task, void *arg)
537: {
538: sched_task_t *t = task;
539: sched_root_task_t *r = NULL;
540: uintptr_t mask = (uintptr_t) arg;
541: #if SUP_ENABLE == KQ_SUPPORT
542: struct kevent chg[1];
543: struct timespec timeout = { 0, 0 };
544: #elif SUP_ENABLE == EP_SUPPORT
545: struct epoll_event ee;
546: int flg = 0;
547: #endif
548:
549: if (!t || !TASK_ROOT(t))
550: return (void*) -1;
551: else
552: r = TASK_ROOT(t);
553:
554: #if SUP_ENABLE == KQ_SUPPORT
555: #ifdef __NetBSD__
556: EV_SET(&chg[0], TASK_FD(t), EVFILT_READ, EV_ADD | EV_CLEAR | mask,
557: 0, 0, (intptr_t) TASK_FD(t));
558: #else
559: EV_SET(&chg[0], TASK_FD(t), EVFILT_READ, EV_ADD | EV_CLEAR | mask,
560: 0, 0, (void*) TASK_FD(t));
561: #endif
562: if (kevent(r->root_kq, chg, 1, NULL, 0, &timeout) == -1) {
563: if (r->root_hooks.hook_exec.exception)
564: r->root_hooks.hook_exec.exception(r, NULL);
565: else
566: LOGERR;
567: return (void*) -1;
568: }
569: #elif SUP_ENABLE == EP_SUPPORT
570: if (!mask)
571: mask = EPOLLIN | EPOLLPRI;
572: ee.data.fd = TASK_FD(t);
573: ee.events = mask;
574: if (FD_ISSET(TASK_FD(t), &r->root_fds[2])) {
575: flg |= 4;
576: ee.events |= EPOLLPRI;
577: }
578: if (FD_ISSET(TASK_FD(t), &r->root_fds[0])) {
579: flg |= 1;
580: ee.events |= EPOLLIN;
581: }
582: if (FD_ISSET(TASK_FD(t), &r->root_fds[1])) {
583: flg |= 2;
584: ee.events |= EPOLLOUT;
585: }
586:
587: if (epoll_ctl(r->root_kq, flg ? EPOLL_CTL_MOD : EPOLL_CTL_ADD, TASK_FD(t), &ee) == -1) {
588: if (r->root_hooks.hook_exec.exception)
589: r->root_hooks.hook_exec.exception(r, NULL);
590: else
591: LOGERR;
592: return (void*) -1;
593: } else {
594: if (mask & EPOLLIN)
595: FD_SET(TASK_FD(t), &r->root_fds[0]);
596: if (mask & EPOLLPRI)
597: FD_SET(TASK_FD(t), &r->root_fds[2]);
598: }
599: #else
600: if (!mask) {
601: FD_SET(TASK_FD(t), &r->root_fds[0]);
602: FD_SET(TASK_FD(t), &r->root_fds[2]);
603: } else {
604: if (mask & 1)
605: FD_SET(TASK_FD(t), &r->root_fds[0]);
606: if (mask & 2)
607: FD_SET(TASK_FD(t), &r->root_fds[2]);
608: }
609:
610: if (TASK_FD(t) >= r->root_kq)
611: r->root_kq = TASK_FD(t) + 1;
612: #endif
613:
614: return NULL;
615: }
616:
617: /*
618: * sched_hook_write() - Default WRITE hook
619: *
620: * @task = current task
621: * @arg = unused
622: * return: <0 errors and 0 ok
623: */
624: void *
625: sched_hook_write(void *task, void *arg)
626: {
627: sched_task_t *t = task;
628: sched_root_task_t *r = NULL;
629: uintptr_t mask = (uintptr_t) arg;
630: #if SUP_ENABLE == KQ_SUPPORT
631: struct kevent chg[1];
632: struct timespec timeout = { 0, 0 };
633: #elif SUP_ENABLE == EP_SUPPORT
634: struct epoll_event ee;
635: int flg = 0;
636: #endif
637:
638: if (!t || !TASK_ROOT(t))
639: return (void*) -1;
640: else
641: r = TASK_ROOT(t);
642:
643: #if SUP_ENABLE == KQ_SUPPORT
644: #ifdef __NetBSD__
645: EV_SET(&chg[0], TASK_FD(t), EVFILT_WRITE, EV_ADD | EV_CLEAR | mask,
646: 0, 0, (intptr_t) TASK_FD(t));
647: #else
648: EV_SET(&chg[0], TASK_FD(t), EVFILT_WRITE, EV_ADD | EV_CLEAR | mask,
649: 0, 0, (void*) TASK_FD(t));
650: #endif
651: if (kevent(r->root_kq, chg, 1, NULL, 0, &timeout) == -1) {
652: if (r->root_hooks.hook_exec.exception)
653: r->root_hooks.hook_exec.exception(r, NULL);
654: else
655: LOGERR;
656: return (void*) -1;
657: }
658: #elif SUP_ENABLE == EP_SUPPORT
659: if (!mask)
660: mask = EPOLLOUT;
661: ee.data.fd = TASK_FD(t);
662: ee.events = mask;
663:
664: if (FD_ISSET(TASK_FD(t), &r->root_fds[2])) {
665: flg |= 4;
666: ee.events |= EPOLLPRI;
667: }
668: if (FD_ISSET(TASK_FD(t), &r->root_fds[0])) {
669: flg |= 1;
670: ee.events |= EPOLLIN;
671: }
672: if (FD_ISSET(TASK_FD(t), &r->root_fds[1])) {
673: flg |= 2;
674: ee.events |= EPOLLOUT;
675: }
676:
677: if (epoll_ctl(r->root_kq, flg ? EPOLL_CTL_MOD : EPOLL_CTL_ADD, TASK_FD(t), &ee) == -1) {
678: if (r->root_hooks.hook_exec.exception)
679: r->root_hooks.hook_exec.exception(r, NULL);
680: else
681: LOGERR;
682: return (void*) -1;
683: } else
684: if (mask & EPOLLOUT)
685: FD_SET(TASK_FD(t), &r->root_fds[1]);
686: #else
687: if (!mask)
688: FD_SET(TASK_FD(t), &r->root_fds[1]);
689: else
690: if (mask & 1)
691: FD_SET(TASK_FD(t), &r->root_fds[1]);
692:
693: if (TASK_FD(t) >= r->root_kq)
694: r->root_kq = TASK_FD(t) + 1;
695: #endif
696:
697: return NULL;
698: }
699:
700: /*
701: * sched_hook_alarm() - Default ALARM hook
702: *
703: * @task = current task
704: * @arg = unused
705: * return: <0 errors and 0 ok
706: */
707: void *
708: sched_hook_alarm(void *task, void *arg __unused)
709: {
710: #if SUP_ENABLE == KQ_SUPPORT
711: sched_task_t *t = task;
712: struct kevent chg[1];
713: struct timespec timeout = { 0, 0 };
714:
715: if (!t || !TASK_ROOT(t))
716: return (void*) -1;
717:
718: #ifdef __NetBSD__
719: EV_SET(&chg[0], (uintptr_t) TASK_DATA(t), EVFILT_TIMER, EV_ADD | EV_CLEAR, 0,
720: t->task_val.ts.tv_sec * 1000 + t->task_val.ts.tv_nsec / 1000000,
721: (intptr_t) TASK_DATA(t));
722: #else
723: EV_SET(&chg[0], (uintptr_t) TASK_DATA(t), EVFILT_TIMER, EV_ADD | EV_CLEAR, 0,
724: t->task_val.ts.tv_sec * 1000 + t->task_val.ts.tv_nsec / 1000000,
725: (void*) TASK_DATA(t));
726: #endif
727: if (kevent(TASK_ROOT(t)->root_kq, chg, 1, NULL, 0, &timeout) == -1) {
728: if (TASK_ROOT(t)->root_hooks.hook_exec.exception)
729: TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL);
730: else
731: LOGERR;
732: return (void*) -1;
733: }
734:
735: #endif
736: return NULL;
737: }
738:
739: /*
740: * sched_hook_node() - Default NODE hook
741: *
742: * @task = current task
743: * @arg = if arg == 42 then waiting for all events
744: * return: <0 errors and 0 ok
745: */
746: void *
747: sched_hook_node(void *task, void *arg)
748: {
749: #if SUP_ENABLE == KQ_SUPPORT
750: sched_task_t *t = task;
751: struct kevent chg[1];
752: struct timespec timeout = { 0, 0 };
753: u_int addflags = (u_int) (uintptr_t) arg;
754:
755: if (!t || !TASK_ROOT(t))
756: return (void*) -1;
757:
758: #ifdef __NetBSD__
759: EV_SET(&chg[0], TASK_FD(t), EVFILT_VNODE, EV_ADD | EV_CLEAR,
760: NOTE_DELETE | NOTE_WRITE | NOTE_EXTEND | NOTE_ATTRIB |
761: NOTE_LINK | NOTE_RENAME | NOTE_REVOKE | addflags, 0, (intptr_t) TASK_FD(t));
762: #else
763: EV_SET(&chg[0], TASK_FD(t), EVFILT_VNODE, EV_ADD | EV_CLEAR,
764: NOTE_DELETE | NOTE_WRITE | NOTE_EXTEND | NOTE_ATTRIB |
765: NOTE_LINK | NOTE_RENAME | NOTE_REVOKE | addflags, 0, (void*) TASK_FD(t));
766: #endif
767: if (kevent(TASK_ROOT(t)->root_kq, chg, 1, NULL, 0, &timeout) == -1) {
768: if (TASK_ROOT(t)->root_hooks.hook_exec.exception)
769: TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL);
770: else
771: LOGERR;
772: return (void*) -1;
773: }
774:
775: #endif
776: return NULL;
777: }
778:
779: /*
780: * sched_hook_proc() - Default PROC hook
781: *
782: * @task = current task
783: * @arg = unused
784: * return: <0 errors and 0 ok
785: */
786: void *
787: sched_hook_proc(void *task, void *arg __unused)
788: {
789: #if SUP_ENABLE == KQ_SUPPORT
790: sched_task_t *t = task;
791: struct kevent chg[1];
792: struct timespec timeout = { 0, 0 };
793:
794: if (!t || !TASK_ROOT(t))
795: return (void*) -1;
796:
797: #ifdef __NetBSD__
798: EV_SET(&chg[0], TASK_VAL(t), EVFILT_PROC, EV_ADD | EV_CLEAR,
799: NOTE_EXIT | NOTE_FORK | NOTE_EXEC | NOTE_TRACK, 0, (intptr_t) TASK_VAL(t));
800: #else
801: EV_SET(&chg[0], TASK_VAL(t), EVFILT_PROC, EV_ADD | EV_CLEAR,
802: NOTE_EXIT | NOTE_FORK | NOTE_EXEC | NOTE_TRACK, 0, (void*) TASK_VAL(t));
803: #endif
804: if (kevent(TASK_ROOT(t)->root_kq, chg, 1, NULL, 0, &timeout) == -1) {
805: if (TASK_ROOT(t)->root_hooks.hook_exec.exception)
806: TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL);
807: else
808: LOGERR;
809: return (void*) -1;
810: }
811:
812: #endif
813: return NULL;
814: }
815:
816: /*
817: * sched_hook_signal() - Default SIGNAL hook
818: *
819: * @task = current task
820: * @arg = unused
821: * return: <0 errors and 0 ok
822: */
823: void *
824: sched_hook_signal(void *task, void *arg __unused)
825: {
826: #if SUP_ENABLE == KQ_SUPPORT
827: sched_task_t *t = task;
828: struct kevent chg[1];
829: struct timespec timeout = { 0, 0 };
830:
831: if (!t || !TASK_ROOT(t))
832: return (void*) -1;
833:
834: /* ignore signal */
835: signal(TASK_VAL(t), SIG_IGN);
836:
837: #ifdef __NetBSD__
838: EV_SET(&chg[0], TASK_VAL(t), EVFILT_SIGNAL, EV_ADD | EV_CLEAR, 0, 0, (intptr_t) TASK_VAL(t));
839: #else
840: EV_SET(&chg[0], TASK_VAL(t), EVFILT_SIGNAL, EV_ADD | EV_CLEAR, 0, 0, (void*) TASK_VAL(t));
841: #endif
842: if (kevent(TASK_ROOT(t)->root_kq, chg, 1, NULL, 0, &timeout) == -1) {
843: if (TASK_ROOT(t)->root_hooks.hook_exec.exception)
844: TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL);
845: else
846: LOGERR;
847: return (void*) -1;
848: }
849: #endif
850: return NULL;
851: }
852:
853: /*
854: * sched_hook_user() - Default USER hook
855: *
856: * @task = current task
857: * @arg = unused
858: * return: <0 errors and 0 ok
859: */
860: #ifdef EVFILT_USER
861: void *
862: sched_hook_user(void *task, void *arg __unused)
863: {
864: #if SUP_ENABLE == KQ_SUPPORT
865: sched_task_t *t = task;
866: struct kevent chg[1];
867: struct timespec timeout = { 0, 0 };
868:
869: if (!t || !TASK_ROOT(t))
870: return (void*) -1;
871:
872: #ifdef __NetBSD__
873: EV_SET(&chg[0], TASK_VAL(t), EVFILT_USER, EV_ADD | EV_CLEAR, TASK_DATLEN(t),
874: 0, (intptr_t) TASK_VAL(t));
875: #else
876: EV_SET(&chg[0], TASK_VAL(t), EVFILT_USER, EV_ADD | EV_CLEAR, TASK_DATLEN(t),
877: 0, (void*) TASK_VAL(t));
878: #endif
879: if (kevent(TASK_ROOT(t)->root_kq, chg, 1, NULL, 0, &timeout) == -1) {
880: if (TASK_ROOT(t)->root_hooks.hook_exec.exception)
881: TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL);
882: else
883: LOGERR;
884: return (void*) -1;
885: }
886:
887: #endif
888: return NULL;
889: }
890: #endif
891:
892: #if SUP_ENABLE == KQ_SUPPORT
893: static inline void
894: fetch_hook_kevent_proceed(int en, struct kevent *res, sched_root_task_t *r)
895: {
896: struct kevent evt[1];
897: register int i, flg;
898: sched_task_t *task, *tmp;
899: struct timespec now = { 0, 0 };
900: #ifdef AIO_SUPPORT
901: int len, fd;
902: struct aiocb *acb;
903: #ifdef EVFILT_LIO
904: int l;
905: off_t off;
906: struct aiocb **acbs;
907: struct iovec *iv;
908: #endif /* EVFILT_LIO */
909: #endif /* AIO_SUPPORT */
910:
911: for (i = 0; i < en; i++) {
912: memcpy(evt, &res[i], sizeof evt);
913: evt->flags = EV_DELETE;
914: /* Put read/write task to ready queue */
915: flg = 0;
916: switch (res[i].filter) {
917: case EVFILT_READ:
918: TAILQ_FOREACH_SAFE(task, &r->root_read, task_node, tmp) {
919: if (TASK_FD(task) == ((intptr_t) res[i].udata)) {
920: if (!flg) {
921: TASK_RET(task) = res[i].data;
922: TASK_FLAG(task) = (u_long) res[i].fflags;
923:
924: /* remove read handle */
925: remove_task_from(task, &r->root_read);
926:
927: if (r->root_hooks.hook_exec.exception && res[i].flags & EV_EOF) {
928: if (r->root_hooks.hook_exec.exception(r, (void*) EV_EOF)) {
929: task->task_type = taskUNUSE;
930: insert_task_to(task, &r->root_unuse);
931: } else {
932: task->task_type = taskREADY;
933: insert_task_to(task, &r->root_ready);
934: }
935: } else {
936: task->task_type = taskREADY;
937: insert_task_to(task, &r->root_ready);
938: }
939: }
940: flg++;
941: }
942: }
943: break;
944: case EVFILT_WRITE:
945: TAILQ_FOREACH_SAFE(task, &r->root_write, task_node, tmp) {
946: if (TASK_FD(task) == ((intptr_t) res[i].udata)) {
947: if (!flg) {
948: TASK_RET(task) = res[i].data;
949: TASK_FLAG(task) = (u_long) res[i].fflags;
950:
951: /* remove write handle */
952: remove_task_from(task, &r->root_write);
953:
954: if (r->root_hooks.hook_exec.exception && res[i].flags & EV_EOF) {
955: if (r->root_hooks.hook_exec.exception(r, (void*) EV_EOF)) {
956: task->task_type = taskUNUSE;
957: insert_task_to(task, &r->root_unuse);
958: } else {
959: task->task_type = taskREADY;
960: insert_task_to(task, &r->root_ready);
961: }
962: } else {
963: task->task_type = taskREADY;
964: insert_task_to(task, &r->root_ready);
965: }
966: }
967: flg++;
968: }
969: }
970: break;
971: case EVFILT_TIMER:
972: TAILQ_FOREACH_SAFE(task, &r->root_alarm, task_node, tmp) {
973: if ((uintptr_t) TASK_DATA(task) == ((uintptr_t) res[i].udata)) {
974: if (!flg) {
975: TASK_RET(task) = res[i].data;
976: TASK_FLAG(task) = (u_long) res[i].fflags;
977:
978: /* remove alarm handle */
979: transit_task2ready(task, &r->root_alarm);
980: }
981: flg++;
982: }
983: }
984: break;
985: case EVFILT_VNODE:
986: TAILQ_FOREACH_SAFE(task, &r->root_node, task_node, tmp) {
987: if (TASK_FD(task) == ((intptr_t) res[i].udata)) {
988: if (!flg) {
989: TASK_RET(task) = res[i].data;
990: TASK_FLAG(task) = (u_long) res[i].fflags;
991:
992: /* remove node handle */
993: transit_task2ready(task, &r->root_node);
994: }
995: flg++;
996: }
997: }
998: break;
999: case EVFILT_PROC:
1000: TAILQ_FOREACH_SAFE(task, &r->root_proc, task_node, tmp) {
1001: if (TASK_VAL(task) == ((uintptr_t) res[i].udata)) {
1002: if (!flg) {
1003: TASK_RET(task) = res[i].data;
1004: TASK_FLAG(task) = (u_long) res[i].fflags;
1005:
1006: /* remove proc handle */
1007: transit_task2ready(task, &r->root_proc);
1008: }
1009: flg++;
1010: }
1011: }
1012: break;
1013: case EVFILT_SIGNAL:
1014: TAILQ_FOREACH_SAFE(task, &r->root_signal, task_node, tmp) {
1015: if (TASK_VAL(task) == ((uintptr_t) res[i].udata)) {
1016: if (!flg) {
1017: TASK_RET(task) = res[i].data;
1018: TASK_FLAG(task) = (u_long) res[i].fflags;
1019:
1020: /* remove signal handle */
1021: transit_task2ready(task, &r->root_signal);
1022: }
1023: flg++;
1024: }
1025: }
1026: break;
1027: #ifdef AIO_SUPPORT
1028: case EVFILT_AIO:
1029: TAILQ_FOREACH_SAFE(task, &r->root_aio, task_node, tmp) {
1030: acb = (struct aiocb*) TASK_VAL(task);
1031: if (acb == ((struct aiocb*) res[i].udata)) {
1032: if (!flg) {
1033: TASK_RET(task) = res[i].data;
1034: TASK_FLAG(task) = (u_long) res[i].fflags;
1035:
1036: /* remove user handle */
1037: transit_task2ready(task, &r->root_aio);
1038:
1039: fd = acb->aio_fildes;
1040: if ((len = aio_return(acb)) != -1) {
1041: if (lseek(fd, acb->aio_offset + len, SEEK_CUR) == -1)
1042: LOGERR;
1043: } else
1044: LOGERR;
1045: e_free(acb);
1046: TASK_DATLEN(task) = (u_long) len;
1047: TASK_FD(task) = fd;
1048: }
1049: flg++;
1050: }
1051: }
1052: break;
1053: #ifdef EVFILT_LIO
1054: case EVFILT_LIO:
1055: TAILQ_FOREACH_SAFE(task, &r->root_lio, task_node, tmp) {
1056: acbs = (struct aiocb**) TASK_VAL(task);
1057: if (acbs == ((struct aiocb**) res[i].udata)) {
1058: if (!flg) {
1059: TASK_RET(task) = res[i].data;
1060: TASK_FLAG(task) = (u_long) res[i].fflags;
1061:
1062: /* remove user handle */
1063: transit_task2ready(task, &r->root_lio);
1064:
1065: iv = (struct iovec*) TASK_DATA(task);
1066: fd = acbs[0]->aio_fildes;
1067: off = acbs[0]->aio_offset;
1068: for (len = 0; i < TASK_DATLEN(task); len += l, i++) {
1069: if ((iv[i].iov_len = aio_return(acbs[i])) == -1)
1070: l = 0;
1071: else
1072: l = iv[i].iov_len;
1073: e_free(acbs[i]);
1074: }
1075: e_free(acbs);
1076: TASK_DATLEN(task) = (u_long) len;
1077: TASK_FD(task) = fd;
1078:
1079: if (lseek(fd, off + len, SEEK_CUR) == -1)
1080: LOGERR;
1081: }
1082: flg++;
1083: }
1084: }
1085: break;
1086: #endif /* EVFILT_LIO */
1087: #endif /* AIO_SUPPORT */
1088: #ifdef EVFILT_USER
1089: case EVFILT_USER:
1090: TAILQ_FOREACH_SAFE(task, &r->root_user, task_node, tmp) {
1091: if (TASK_VAL(task) == ((uintptr_t) res[i].udata)) {
1092: if (!flg) {
1093: TASK_RET(task) = res[i].data;
1094: TASK_FLAG(task) = (u_long) res[i].fflags;
1095:
1096: /* remove user handle */
1097: transit_task2ready(task, &r->root_user);
1098: }
1099: flg++;
1100: }
1101: }
1102: break;
1103: #endif /* EVFILT_USER */
1104: }
1105:
1106: if (flg > 1)
1107: evt->flags &= ~EV_DELETE;
1108:
1109: if (kevent(r->root_kq, evt, 1, NULL, 0, &now) == -1) {
1110: if (r->root_hooks.hook_exec.exception)
1111: r->root_hooks.hook_exec.exception(r, NULL);
1112: else
1113: LOGERR;
1114: }
1115: }
1116: }
1117: #endif
1118:
1119: #if SUP_ENABLE == EP_SUPPORT
1120: static inline void
1121: fetch_hook_epoll_proceed(int en, struct epoll_event *res, sched_root_task_t *r)
1122: {
1123: register int i, rflg, wflg;
1124: int ops = EPOLL_CTL_DEL;
1125: sched_task_t *t, *tmp, *task;
1126: struct epoll_event evt[1];
1127:
1128: for (i = 0; i < en; i++) {
1129: memcpy(evt, &res[i], sizeof evt);
1130: evt->events ^= evt->events;
1131: rflg = wflg = 0;
1132:
1133: if (res[i].events & (EPOLLIN | EPOLLPRI)) {
1134: task = NULL;
1135: TAILQ_FOREACH_SAFE(t, &r->root_read, task_node, tmp) {
1136: if (TASK_FD(t) == evt->data.fd) {
1137: if (!task)
1138: task = t;
1139: rflg++;
1140: }
1141: }
1142:
1143: if (task) {
1144: TASK_FLAG(task) = ioctl(TASK_FD(task), FIONREAD, &TASK_RET(task));
1145: /* remove read handle */
1146: remove_task_from(task, &r->root_read);
1147:
1148: if (r->root_hooks.hook_exec.exception && evt->events & (EPOLLRDHUP | EPOLLERR | EPOLLHUP)) {
1149: if (r->root_hooks.hook_exec.exception(r, (void*) (intptr_t)
1150: (evt->events & EPOLLERR ? EV_ERROR | EV_EOF : EV_EOF))) {
1151: task->task_type = taskUNUSE;
1152: insert_task_to(task, &r->root_unuse);
1153: } else {
1154: task->task_type = taskREADY;
1155: insert_task_to(task, &r->root_ready);
1156: }
1157: } else {
1158: task->task_type = taskREADY;
1159: insert_task_to(task, &r->root_ready);
1160: }
1161:
1162: if (!(res[i].events & EPOLLOUT) && FD_ISSET(evt->data.fd, &r->root_fds[1])) {
1163: evt->events |= EPOLLOUT;
1164: wflg = 42;
1165: }
1166: if (rflg > 1) {
1167: if (FD_ISSET(evt->data.fd, &r->root_fds[0]))
1168: evt->events |= EPOLLIN;
1169: if (FD_ISSET(evt->data.fd, &r->root_fds[2]))
1170: evt->events |= EPOLLPRI;
1171: } else {
1172: FD_CLR(evt->data.fd, &r->root_fds[0]);
1173: FD_CLR(evt->data.fd, &r->root_fds[2]);
1174: }
1175: }
1176: }
1177: if (res[i].events & EPOLLOUT) {
1178: task = NULL;
1179: TAILQ_FOREACH_SAFE(t, &r->root_write, task_node, tmp) {
1180: if (TASK_FD(t) == evt->data.fd) {
1181: if (!task)
1182: task = t;
1183: wflg++;
1184: }
1185: }
1186:
1187: if (task) {
1188: TASK_FLAG(task) = ioctl(TASK_FD(task), FIONWRITE, &TASK_RET(task));
1189: /* remove write handle */
1190: remove_task_from(task, &r->root_write);
1191:
1192: if (r->root_hooks.hook_exec.exception && evt->events & (EPOLLERR | EPOLLHUP)) {
1193: if (r->root_hooks.hook_exec.exception(r, (void*) (intptr_t)
1194: (evt->events & EPOLLERR ? EV_ERROR | EV_EOF : EV_EOF))) {
1195: task->task_type = taskUNUSE;
1196: insert_task_to(task, &r->root_unuse);
1197: } else {
1198: task->task_type = taskREADY;
1199: insert_task_to(task, &r->root_ready);
1200: }
1201: } else {
1202: task->task_type = taskREADY;
1203: insert_task_to(task, &r->root_ready);
1204: }
1205:
1206: if (!(res[i].events & EPOLLIN) && FD_ISSET(evt->data.fd, &r->root_fds[0])) {
1207: evt->events |= EPOLLIN;
1208: rflg = 42;
1209: }
1210: if (!(res[i].events & EPOLLPRI) && FD_ISSET(evt->data.fd, &r->root_fds[2])) {
1211: evt->events |= EPOLLPRI;
1212: rflg = 42;
1213: }
1214: if (wflg > 1)
1215: evt->events |= EPOLLOUT;
1216: else
1217: FD_CLR(evt->data.fd, &r->root_fds[1]);
1218: }
1219: }
1220:
1221: if (rflg > 1 || wflg > 1)
1222: ops = EPOLL_CTL_MOD;
1223:
1224: if (epoll_ctl(r->root_kq, ops, evt->data.fd, evt) == -1) {
1225: if (r->root_hooks.hook_exec.exception) {
1226: r->root_hooks.hook_exec.exception(r, NULL);
1227: } else
1228: LOGERR;
1229: }
1230: }
1231: }
1232: #endif
1233:
1234: #if SUP_ENABLE == NO_SUPPORT
1235: static inline void
1236: fetch_hook_select_proceed(int en, fd_set rfd, fd_set wfd, fd_set xfd, sched_root_task_t *r)
1237: {
1238: register int i, rflg, wflg;
1239: sched_task_t *t, *tmp, *task;
1240:
1241: /* skip select check if return value from select is zero */
1242: if (!en)
1243: return;
1244:
1245: for (i = 0; i < r->root_kq; i++) {
1246: if (!FD_ISSET(i, &r->root_fds[0]) &&
1247: !FD_ISSET(i, &r->root_fds[1]) &&
1248: !FD_ISSET(i, &r->root_fds[2]))
1249: continue;
1250:
1251: rflg = wflg = 0;
1252:
1253: if (FD_ISSET(i, &rfd) || FD_ISSET(i, &xfd)) {
1254: task = NULL;
1255: TAILQ_FOREACH_SAFE(t, &r->root_read, task_node, tmp) {
1256: if (TASK_FD(t) == i) {
1257: if (!task)
1258: task = t;
1259: rflg++;
1260: }
1261: }
1262:
1263: if (task) {
1264: TASK_FLAG(task) = ioctl(TASK_FD(task), FIONREAD, &TASK_RET(task));
1265:
1266: /* remove read handle */
1267: remove_task_from(task, &r->root_read);
1268:
1269: if (r->root_hooks.hook_exec.exception) {
1270: if (r->root_hooks.hook_exec.exception(r, NULL)) {
1271: task->task_type = taskUNUSE;
1272: insert_task_to(task, &r->root_unuse);
1273: } else {
1274: task->task_type = taskREADY;
1275: insert_task_to(task, &r->root_ready);
1276: }
1277: } else {
1278: task->task_type = taskREADY;
1279: insert_task_to(task, &r->root_ready);
1280: }
1281:
1282: /* remove resouce */
1283: if (rflg == 1) {
1284: FD_CLR(i, &r->root_fds[0]);
1285: FD_CLR(i, &r->root_fds[2]);
1286: }
1287: }
1288: }
1289: if (FD_ISSET(i, &wfd)) {
1290: task = NULL;
1291: TAILQ_FOREACH_SAFE(t, &r->root_write, task_node, tmp) {
1292: if (TASK_FD(t) == i) {
1293: if (!task)
1294: task = t;
1295: wflg++;
1296: }
1297: }
1298:
1299: if (task) {
1300: TASK_FLAG(task) = ioctl(TASK_FD(task), FIONWRITE, &TASK_RET(task));
1301:
1302: /* remove write handle */
1303: remove_task_from(task, &r->root_write);
1304:
1305: if (r->root_hooks.hook_exec.exception) {
1306: if (r->root_hooks.hook_exec.exception(r, NULL)) {
1307: task->task_type = taskUNUSE;
1308: insert_task_to(task, &r->root_unuse);
1309: } else {
1310: task->task_type = taskREADY;
1311: insert_task_to(task, &r->root_ready);
1312: }
1313: } else {
1314: task->task_type = taskREADY;
1315: insert_task_to(task, &r->root_ready);
1316: }
1317:
1318: /* remove resouce */
1319: if (wflg == 1)
1320: FD_CLR(i, &r->root_fds[1]);
1321: }
1322: }
1323: }
1324:
1325: /* optimize select */
1326: for (i = r->root_kq - 1; i >= 0; i--)
1327: if (FD_ISSET(i, &r->root_fds[0]) ||
1328: FD_ISSET(i, &r->root_fds[1]) ||
1329: FD_ISSET(i, &r->root_fds[2]))
1330: break;
1331: r->root_kq = i + 1;
1332: }
1333: #endif
1334:
1335: /*
1336: * sched_hook_fetch() - Default FETCH hook
1337: *
1338: * @root = root task
1339: * @arg = unused
1340: * return: NULL error or !=NULL fetched task
1341: */
1342: void *
1343: sched_hook_fetch(void *root, void *arg __unused)
1344: {
1345: sched_root_task_t *r = root;
1346: sched_task_t *task, *tmp;
1347: struct timespec now, m, mtmp;
1348: #if SUP_ENABLE == KQ_SUPPORT
1349: struct kevent res[KQ_EVENTS];
1350: struct timespec *timeout;
1351: #elif SUP_ENABLE == EP_SUPPORT
1352: struct epoll_event res[KQ_EVENTS];
1353: u_long timeout = 0;
1354: #else
1355: struct timeval *timeout, tv;
1356: fd_set rfd, wfd, xfd;
1357: #endif
1358: int en;
1359:
1360: if (!r)
1361: return NULL;
1362:
1363: /* get new task by queue priority */
1364: while ((task = TAILQ_FIRST(&r->root_event))) {
1365: transit_task2unuse(task, &r->root_event);
1366: return task;
1367: }
1368: while ((task = TAILQ_FIRST(&r->root_ready))) {
1369: transit_task2unuse(task, &r->root_ready);
1370: return task;
1371: }
1372:
1373: /* if present member of task, set NOWAIT */
1374: if (!TAILQ_FIRST(&r->root_task)) {
1375: /* timer tasks */
1376: #ifdef TIMER_WITHOUT_SORT
1377: clock_gettime(CLOCK_MONOTONIC, &now);
1378:
1379: sched_timespecclear(&r->root_wait);
1380: TAILQ_FOREACH(task, &r->root_timer, task_node) {
1381: if (!sched_timespecisset(&r->root_wait))
1382: r->root_wait = TASK_TS(task);
1383: else if (sched_timespeccmp(&TASK_TS(task), &r->root_wait, -) < 0)
1384: r->root_wait = TASK_TS(task);
1385: }
1386:
1387: if (TAILQ_FIRST(&r->root_timer)) {
1388: m = r->root_wait;
1389: sched_timespecsub(&m, &now, &mtmp);
1390: r->root_wait = mtmp;
1391: } else {
1392: /* set wait INFTIM */
1393: sched_timespecinf(&r->root_wait);
1394: }
1395: #else /* ! TIMER_WITHOUT_SORT */
1396: if ((task = TAILQ_FIRST(&r->root_timer))) {
1397: clock_gettime(CLOCK_MONOTONIC, &now);
1398:
1399: m = TASK_TS(task);
1400: sched_timespecsub(&m, &now, &mtmp);
1401: r->root_wait = mtmp;
1402: } else {
1403: /* set wait INFTIM */
1404: sched_timespecinf(&r->root_wait);
1405: }
1406: #endif /* TIMER_WITHOUT_SORT */
1407: } else /* no waiting for event, because we have ready task */
1408: sched_timespecclear(&r->root_wait);
1409:
1410: if (r->root_wait.tv_sec != -1 && r->root_wait.tv_nsec != -1) {
1411: #if SUP_ENABLE == KQ_SUPPORT
1412: timeout = &r->root_wait;
1413: #elif SUP_ENABLE == EP_SUPPORT
1414: timeout = r->root_wait.tv_sec * 1000 + r->root_wait.tv_nsec / 1000000;
1415: #else
1416: sched_timespec2val(&r->root_wait, &tv);
1417: timeout = &tv;
1418: #endif /* KQ_SUPPORT */
1419: } else if (sched_timespecisinf(&r->root_poll))
1420: #if SUP_ENABLE == EP_SUPPORT
1421: timeout = -1;
1422: #else
1423: timeout = NULL;
1424: #endif
1425: else {
1426: #if SUP_ENABLE == KQ_SUPPORT
1427: timeout = &r->root_poll;
1428: #elif SUP_ENABLE == EP_SUPPORT
1429: timeout = r->root_poll.tv_sec * 1000 + r->root_poll.tv_nsec / 1000000;
1430: #else
1431: sched_timespec2val(&r->root_poll, &tv);
1432: timeout = &tv;
1433: #endif /* KQ_SUPPORT */
1434: }
1435:
1436: #if SUP_ENABLE == KQ_SUPPORT
1437: if ((en = kevent(r->root_kq, NULL, 0, res, KQ_EVENTS, timeout)) == -1) {
1438: #elif SUP_ENABLE == EP_SUPPORT
1439: if ((en = epoll_wait(r->root_kq, res, KQ_EVENTS, timeout)) == -1) {
1440: #else
1441: xfd = r->root_fds[2];
1442: rfd = r->root_fds[0];
1443: wfd = r->root_fds[1];
1444: if ((en = select(r->root_kq, &rfd, &wfd, &xfd, timeout)) == -1) {
1445: #endif /* KQ_SUPPORT */
1446: if (r->root_hooks.hook_exec.exception) {
1447: if (r->root_hooks.hook_exec.exception(r, NULL))
1448: return NULL;
1449: } else if (errno != EINTR)
1450: LOGERR;
1451: goto skip_event;
1452: }
1453:
1454: /* Go and catch the cat into pipes ... */
1455: #if SUP_ENABLE == KQ_SUPPORT
1456: /* kevent dispatcher */
1457: fetch_hook_kevent_proceed(en, res, r);
1458: #elif SUP_ENABLE == EP_SUPPORT
1459: /* epoll dispatcher */
1460: fetch_hook_epoll_proceed(en, res, r);
1461: #else
1462: /* select dispatcher */
1463: fetch_hook_select_proceed(en, rfd, wfd, xfd, r);
1464: #endif /* KQ_SUPPORT */
1465:
1466: skip_event:
1467: /* timer update & put in ready queue */
1468: clock_gettime(CLOCK_MONOTONIC, &now);
1469:
1470: TAILQ_FOREACH_SAFE(task, &r->root_timer, task_node, tmp)
1471: if (sched_timespeccmp(&now, &TASK_TS(task), -) >= 0)
1472: transit_task2ready(task, &r->root_timer);
1473:
1474: /* put regular task priority task to ready queue,
1475: if there is no ready task or reach max missing hit for regular task */
1476: if ((task = TAILQ_FIRST(&r->root_task))) {
1477: if (!TAILQ_FIRST(&r->root_ready) || r->root_miss >= TASK_VAL(task)) {
1478: r->root_miss ^= r->root_miss;
1479:
1480: transit_task2ready(task, &r->root_task);
1481: } else
1482: r->root_miss++;
1483: } else
1484: r->root_miss ^= r->root_miss;
1485:
1486: /* OK, lets get ready task !!! */
1487: task = TAILQ_FIRST(&r->root_ready);
1488: if (task)
1489: transit_task2unuse(task, &r->root_ready);
1490: return task;
1491: }
1492:
1493: /*
1494: * sched_hook_exception() - Default EXCEPTION hook
1495: *
1496: * @root = root task
1497: * @arg = custom handling: if arg == EV_EOF or other value; default: arg == NULL log errno
1498: * return: <0 errors and 0 ok
1499: */
1500: void *
1501: sched_hook_exception(void *root, void *arg)
1502: {
1503: sched_root_task_t *r = root;
1504:
1505: if (!r)
1506: return NULL;
1507:
1508: /* custom exception handling ... */
1509: if (arg) {
1510: if (arg == (void*) EV_EOF)
1511: return NULL;
1512: return (void*) -1; /* raise scheduler error!!! */
1513: }
1514:
1515: /* if error hook exists */
1516: if (r->root_hooks.hook_root.error)
1517: return (r->root_hooks.hook_root.error(root, (void*) ((intptr_t) errno)));
1518:
1519: /* default case! */
1520: LOGERR;
1521: return NULL;
1522: }
1523:
1524: /*
1525: * sched_hook_condition() - Default CONDITION hook
1526: *
1527: * @root = root task
1528: * @arg = killState from schedRun()
1529: * return: NULL kill scheduler loop or !=NULL ok
1530: */
1531: void *
1532: sched_hook_condition(void *root, void *arg)
1533: {
1534: sched_root_task_t *r = root;
1535:
1536: if (!r)
1537: return NULL;
1538:
1539: return (void*) (*r->root_cond - *(intptr_t*) arg);
1540: }
1541:
1542: /*
1543: * sched_hook_rtc() - Default RTC hook
1544: *
1545: * @task = current task
1546: * @arg = unused
1547: * return: <0 errors and 0 ok
1548: */
1549: void *
1550: sched_hook_rtc(void *task, void *arg __unused)
1551: {
1552: #if defined(HAVE_LIBRT) && defined(HAVE_TIMER_CREATE) && \
1553: defined(HAVE_TIMER_SETTIME) && defined(HAVE_TIMER_DELETE)
1554: sched_task_t *sigt = NULL, *t = task;
1555: struct itimerspec its;
1556: struct sigevent evt;
1557: timer_t tmr;
1558: #if SUP_ENABLE != KQ_SUPPORT
1559: struct sigaction sa;
1560: #endif
1561:
1562: if (!t || !TASK_ROOT(t))
1563: return (void*) -1;
1564:
1565: memset(&evt, 0, sizeof evt);
1566: evt.sigev_notify = SIGEV_SIGNAL;
1567: evt.sigev_signo = (intptr_t) TASK_DATA(t) + SIGRTMIN;
1568: evt.sigev_value.sival_ptr = t;
1569:
1570: if (timer_create(CLOCK_MONOTONIC, &evt, &tmr) == -1) {
1571: if (TASK_ROOT(t)->root_hooks.hook_exec.exception)
1572: TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL);
1573: else
1574: LOGERR;
1575: return (void*) -1;
1576: } else
1577: TASK_FLAG(t) = (u_long) tmr;
1578:
1579: #if SUP_ENABLE == KQ_SUPPORT
1580: if (!(sigt = schedSignal(TASK_ROOT(t), _sched_rtcWrapper, TASK_ARG(t), evt.sigev_signo,
1581: t, (size_t) tmr))) {
1582: if (TASK_ROOT(t)->root_hooks.hook_exec.exception)
1583: TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL);
1584: else
1585: LOGERR;
1586: timer_delete(tmr);
1587: return (void*) -1;
1588: } else
1589: TASK_RET(t) = (uintptr_t) sigt;
1590: #else
1591: memset(&sa, 0, sizeof sa);
1592: sigemptyset(&sa.sa_mask);
1593: sa.sa_sigaction = _sched_rtcSigWrapper;
1594: sa.sa_flags = SA_SIGINFO | SA_RESTART;
1595:
1596: if (sigaction(evt.sigev_signo, &sa, NULL) == -1) {
1597: if (TASK_ROOT(t)->root_hooks.hook_exec.exception)
1598: TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL);
1599: else
1600: LOGERR;
1601: timer_delete(tmr);
1602: return (void*) -1;
1603: }
1604: #endif
1605:
1606: memset(&its, 0, sizeof its);
1607: its.it_value.tv_sec = t->task_val.ts.tv_sec;
1608: its.it_value.tv_nsec = t->task_val.ts.tv_nsec;
1609:
1610: if (timer_settime(tmr, TIMER_RELTIME, &its, NULL) == -1) {
1611: if (TASK_ROOT(t)->root_hooks.hook_exec.exception)
1612: TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL);
1613: else
1614: LOGERR;
1615: schedCancel(sigt);
1616: timer_delete(tmr);
1617: return (void*) -1;
1618: }
1619: #endif /* HAVE_TIMER_CREATE */
1620: return NULL;
1621: }
FreeBSD-CVSweb <freebsd-cvsweb@FreeBSD.org>