1: /*************************************************************************
2: * (C) 2011 AITNET ltd - Sofia/Bulgaria - <misho@aitbg.com>
3: * by Michael Pounov <misho@openbsd-bg.org>
4: *
5: * $Author: misho $
6: * $Id: hooks.c,v 1.41 2023/07/27 20:51:28 misho Exp $
7: *
8: **************************************************************************
9: The ELWIX and AITNET software is distributed under the following
10: terms:
11:
12: All of the documentation and software included in the ELWIX and AITNET
13: Releases is copyrighted by ELWIX - Sofia/Bulgaria <info@elwix.org>
14:
15: Copyright 2004 - 2023
16: by Michael Pounov <misho@elwix.org>. All rights reserved.
17:
18: Redistribution and use in source and binary forms, with or without
19: modification, are permitted provided that the following conditions
20: are met:
21: 1. Redistributions of source code must retain the above copyright
22: notice, this list of conditions and the following disclaimer.
23: 2. Redistributions in binary form must reproduce the above copyright
24: notice, this list of conditions and the following disclaimer in the
25: documentation and/or other materials provided with the distribution.
26: 3. All advertising materials mentioning features or use of this software
27: must display the following acknowledgement:
28: This product includes software developed by Michael Pounov <misho@elwix.org>
29: ELWIX - Embedded LightWeight unIX and its contributors.
30: 4. Neither the name of AITNET nor the names of its contributors
31: may be used to endorse or promote products derived from this software
32: without specific prior written permission.
33:
34: THIS SOFTWARE IS PROVIDED BY AITNET AND CONTRIBUTORS ``AS IS'' AND
35: ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
36: IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
37: ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
38: FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
39: DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
40: OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
41: HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
42: LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
43: OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
44: SUCH DAMAGE.
45: */
46: #include "global.h"
47: #include "hooks.h"
48:
49:
50: #ifdef HAVE_LIBPTHREAD
51: static void *
52: _sched_threadWrapper(sched_task_t *t)
53: {
54: void *ret = NULL;
55: sched_root_task_t *r;
56:
57: if (!t || !TASK_ROOT(t))
58: pthread_exit(ret);
59: else
60: r = (sched_root_task_t*) TASK_ROOT(t);
61:
62: pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, NULL);
63: /*
64: pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, NULL);
65: */
66:
67: /* notify parent, thread is ready for execution */
68: pthread_testcancel();
69:
70: ret = schedCall(t);
71: r->root_ret = ret;
72:
73: if (TASK_VAL(t)) {
74: transit_task2unuse(t, &r->root_thread);
75: TASK_VAL(t) = 0;
76: }
77:
78: pthread_exit(ret);
79: }
80: #endif
81:
82: #if defined(HAVE_LIBRT) && defined(HAVE_TIMER_CREATE) && \
83: defined(HAVE_TIMER_SETTIME) && defined(HAVE_TIMER_DELETE)
84: #if SUP_ENABLE == KQ_SUPPORT
85: static void *
86: _sched_rtcWrapper(sched_task_t *t)
87: {
88: sched_task_t *task;
89: void *ret;
90:
91: if (!t || !TASK_ROOT(t) || !TASK_DATA(t))
92: return NULL;
93: else {
94: task = (sched_task_t*) TASK_DATA(t);
95: timer_delete((timer_t) TASK_DATLEN(t));
96: }
97:
98: ret = schedCall(task);
99:
100: transit_task2unuse(task, &(TASK_ROOT(task))->root_rtc);
101: return ret;
102: }
103: #else
104: static void
105: _sched_rtcSigWrapper(int sig, siginfo_t *si, void *uc)
106: {
107: sched_task_t *task;
108:
109: if (si && si->si_value.sival_ptr) {
110: task = (sched_task_t*) si->si_value.sival_ptr;
111: timer_delete((timer_t) TASK_FLAG(task));
112:
113: TASK_RET(task) = (intptr_t) schedCall(task);
114:
115: transit_task2unuse(task, &(TASK_ROOT(task))->root_rtc);
116: }
117: }
118: #endif
119: #endif
120:
121: /*
122: * sched_hook_init() - Default INIT hook
123: *
124: * @root = root task
125: * @arg = unused
126: * return: <0 errors and 0 ok
127: */
128: void *
129: sched_hook_init(void *root, void *arg __unused)
130: {
131: sched_root_task_t *r = root;
132:
133: if (!r)
134: return (void*) -1;
135:
136: #if SUP_ENABLE == KQ_SUPPORT
137: r->root_kq = kqueue();
138: if (r->root_kq == -1) {
139: LOGERR;
140: return (void*) -1;
141: }
142: #elif SUP_ENABLE == EP_SUPPORT
143: r->root_kq = epoll_create(KQ_EVENTS);
144: if (r->root_kq == -1) {
145: LOGERR;
146: return (void*) -1;
147: }
148: #else
149: r->root_kq ^= r->root_kq;
150: #endif
151:
152: FD_ZERO(&r->root_fds[0]);
153: FD_ZERO(&r->root_fds[1]);
154: FD_ZERO(&r->root_fds[2]);
155:
156: return NULL;
157: }
158:
159: /*
160: * sched_hook_fini() - Default FINI hook
161: *
162: * @root = root task
163: * @arg = unused
164: * return: <0 errors and 0 ok
165: */
166: void *
167: sched_hook_fini(void *root, void *arg __unused)
168: {
169: sched_root_task_t *r = root;
170:
171: if (!r)
172: return (void*) -1;
173:
174: #if SUP_ENABLE == KQ_SUPPORT || SUP_ENABLE == EP_SUPPORT
175: if (r->root_kq > 2) {
176: close(r->root_kq);
177: r->root_kq = 0;
178: }
179: #else
180: r->root_kq ^= r->root_kq;
181: #endif
182:
183: FD_ZERO(&r->root_fds[2]);
184: FD_ZERO(&r->root_fds[1]);
185: FD_ZERO(&r->root_fds[0]);
186:
187: return NULL;
188: }
189:
190: /*
191: * sched_hook_cancel() - Default CANCEL hook
192: *
193: * @task = current task
194: * @arg = unused
195: * return: <0 errors and 0 ok
196: */
197: void *
198: sched_hook_cancel(void *task, void *arg __unused)
199: {
200: sched_task_t *t = task, *tmp, *tt;
201: sched_root_task_t *r = NULL;
202: int flg = 0;
203: #if SUP_ENABLE == KQ_SUPPORT
204: struct kevent chg[1];
205: struct timespec timeout = { 0, 0 };
206: #elif SUP_ENABLE == EP_SUPPORT
207: struct epoll_event ee = { .events = 0, .data.u64 = 0l };
208: #else
209: register int i;
210: #endif
211: #ifdef AIO_SUPPORT
212: struct aiocb *acb;
213: #ifdef EVFILT_LIO
214: register int i = 0;
215: struct aiocb **acbs;
216: #endif /* EVFILT_LIO */
217: #endif /* AIO_SUPPORT */
218:
219: if (!t || !TASK_ROOT(t))
220: return (void*) -1;
221: else
222: r = TASK_ROOT(t);
223:
224: switch (TASK_TYPE(t)) {
225: case taskREAD:
226: /* check for multi subscribers */
227: TAILQ_FOREACH_SAFE(tt, &r->root_read, task_node, tmp)
228: if (TASK_FD(tt) == TASK_FD(t))
229: flg++;
230: #if SUP_ENABLE == KQ_SUPPORT
231: #ifdef __NetBSD__
232: EV_SET(&chg[0], TASK_FD(t), EVFILT_READ, flg < 2 ? EV_DELETE : 0,
233: 0, 0, (intptr_t) TASK_FD(t));
234: #else
235: EV_SET(&chg[0], TASK_FD(t), EVFILT_READ, flg < 2 ? EV_DELETE : 0,
236: 0, 0, (void*) TASK_FD(t));
237: #endif
238: #elif SUP_ENABLE == EP_SUPPORT
239: ee.data.fd = TASK_FD(t);
240: ee.events ^= ee.events;
241: if (FD_ISSET(TASK_FD(t), &r->root_fds[1]))
242: ee.events |= EPOLLOUT;
243:
244: if (flg < 2) {
245: FD_CLR(TASK_FD(t), &r->root_fds[0]);
246: FD_CLR(TASK_FD(t), &r->root_fds[2]);
247: } else {
248: if (FD_ISSET(TASK_FD(t), &r->root_fds[0]))
249: ee.events |= EPOLLIN;
250: if (FD_ISSET(TASK_FD(t), &r->root_fds[2]))
251: ee.events |= EPOLLPRI;
252: }
253: #else
254: if (flg < 2) {
255: FD_CLR(TASK_FD(t), &r->root_fds[0]);
256: FD_CLR(TASK_FD(t), &r->root_fds[2]);
257:
258: /* optimize select */
259: for (i = r->root_kq - 1; i >= 0; i--)
260: if (FD_ISSET(i, &r->root_fds[0]) ||
261: FD_ISSET(i, &r->root_fds[1]) ||
262: FD_ISSET(i, &r->root_fds[2]))
263: break;
264: r->root_kq = i + 1;
265: }
266: #endif
267: break;
268: case taskWRITE:
269: /* check for multi subscribers */
270: TAILQ_FOREACH_SAFE(tt, &r->root_write, task_node, tmp)
271: if (TASK_FD(tt) == TASK_FD(t))
272: flg++;
273: #if SUP_ENABLE == KQ_SUPPORT
274: #ifdef __NetBSD__
275: EV_SET(&chg[0], TASK_FD(t), EVFILT_WRITE, flg < 2 ? EV_DELETE : 0,
276: 0, 0, (intptr_t) TASK_FD(t));
277: #else
278: EV_SET(&chg[0], TASK_FD(t), EVFILT_WRITE, flg < 2 ? EV_DELETE : 0,
279: 0, 0, (void*) TASK_FD(t));
280: #endif
281: #elif SUP_ENABLE == EP_SUPPORT
282: ee.data.fd = TASK_FD(t);
283: ee.events ^= ee.events;
284: if (FD_ISSET(TASK_FD(t), &r->root_fds[0]))
285: ee.events |= EPOLLIN;
286: if (FD_ISSET(TASK_FD(t), &r->root_fds[2]))
287: ee.events |= EPOLLPRI;
288:
289: if (flg < 2)
290: FD_CLR(TASK_FD(t), &r->root_fds[1]);
291: else
292: ee.events |= EPOLLOUT;
293: #else
294: if (flg < 2) {
295: FD_CLR(TASK_FD(t), &r->root_fds[1]);
296:
297: /* optimize select */
298: for (i = r->root_kq - 1; i >= 0; i--)
299: if (FD_ISSET(i, &r->root_fds[0]) ||
300: FD_ISSET(i, &r->root_fds[1]) ||
301: FD_ISSET(i, &r->root_fds[2]))
302: break;
303: r->root_kq = i + 1;
304: }
305: #endif
306: break;
307: case taskALARM:
308: #if SUP_ENABLE == KQ_SUPPORT
309: /* check for multi subscribers */
310: TAILQ_FOREACH_SAFE(tt, &r->root_alarm, task_node, tmp)
311: if (TASK_DATA(tt) == TASK_DATA(t))
312: flg++;
313: #ifdef __NetBSD__
314: EV_SET(&chg[0], (uintptr_t) TASK_DATA(t), EVFILT_TIMER, flg < 2 ? EV_DELETE : 0,
315: 0, 0, (intptr_t) TASK_DATA(t));
316: #else
317: EV_SET(&chg[0], (uintptr_t) TASK_DATA(t), EVFILT_TIMER, flg < 2 ? EV_DELETE : 0,
318: 0, 0, (void*) TASK_DATA(t));
319: #endif
320: #endif
321: break;
322: case taskNODE:
323: #if SUP_ENABLE == KQ_SUPPORT
324: /* check for multi subscribers */
325: TAILQ_FOREACH_SAFE(tt, &r->root_node, task_node, tmp)
326: if (TASK_FD(tt) == TASK_FD(t))
327: flg++;
328: #ifdef __NetBSD__
329: EV_SET(&chg[0], TASK_FD(t), EVFILT_VNODE, flg < 2 ? EV_DELETE : 0,
330: 0, 0, (intptr_t) TASK_FD(t));
331: #else
332: EV_SET(&chg[0], TASK_FD(t), EVFILT_VNODE, flg < 2 ? EV_DELETE : 0,
333: 0, 0, (void*) TASK_FD(t));
334: #endif
335: #endif
336: break;
337: case taskPROC:
338: #if SUP_ENABLE == KQ_SUPPORT
339: /* check for multi subscribers */
340: TAILQ_FOREACH_SAFE(tt, &r->root_proc, task_node, tmp)
341: if (TASK_VAL(tt) == TASK_VAL(t))
342: flg++;
343: #ifdef __NetBSD__
344: EV_SET(&chg[0], TASK_VAL(t), EVFILT_PROC, flg < 2 ? EV_DELETE : 0,
345: 0, 0, (intptr_t) TASK_VAL(t));
346: #else
347: EV_SET(&chg[0], TASK_VAL(t), EVFILT_PROC, flg < 2 ? EV_DELETE : 0,
348: 0, 0, (void*) TASK_VAL(t));
349: #endif
350: #endif
351: break;
352: case taskSIGNAL:
353: /* check for multi subscribers */
354: TAILQ_FOREACH_SAFE(tt, &r->root_signal, task_node, tmp)
355: if (TASK_VAL(tt) == TASK_VAL(t))
356: flg++;
357: #if SUP_ENABLE == KQ_SUPPORT
358: #ifdef __NetBSD__
359: EV_SET(&chg[0], TASK_VAL(t), EVFILT_SIGNAL, flg < 2 ? EV_DELETE : 0,
360: 0, 0, (intptr_t) TASK_VAL(t));
361: #else
362: EV_SET(&chg[0], TASK_VAL(t), EVFILT_SIGNAL, flg < 2 ? EV_DELETE : 0,
363: 0, 0, (void*) TASK_VAL(t));
364: #endif
365: #endif
366: if (flg < 2) {
367: pthread_mutex_lock(&TASK_ROOT(t)->root_sigmtx);
368: sigdelset(&TASK_ROOT(t)->root_sigset, TASK_VAL(t));
369: pthread_mutex_unlock(&TASK_ROOT(t)->root_sigmtx);
370: }
371: break;
372: #ifdef AIO_SUPPORT
373: case taskAIO:
374: #if SUP_ENABLE == KQ_SUPPORT
375: /* check for multi subscribers */
376: TAILQ_FOREACH_SAFE(tt, &r->root_aio, task_node, tmp)
377: if (TASK_VAL(tt) == TASK_VAL(t))
378: flg++;
379: #ifdef __NetBSD__
380: EV_SET(&chg[0], TASK_VAL(t), EVFILT_AIO, flg < 2 ? EV_DELETE : 0,
381: 0, 0, (intptr_t) TASK_VAL(t));
382: #else
383: EV_SET(&chg[0], TASK_VAL(t), EVFILT_AIO, flg < 2 ? EV_DELETE : 0,
384: 0, 0, (void*) TASK_VAL(t));
385: #endif
386: acb = (struct aiocb*) TASK_VAL(t);
387: if (acb) {
388: if (aio_cancel(acb->aio_fildes, acb) == AIO_CANCELED)
389: aio_return(acb);
390: e_free(acb);
391: TASK_VAL(t) = 0;
392: }
393: #endif
394: break;
395: #ifdef EVFILT_LIO
396: case taskLIO:
397: #if SUP_ENABLE == KQ_SUPPORT
398: /* check for multi subscribers */
399: TAILQ_FOREACH_SAFE(tt, &r->root_lio, task_node, tmp)
400: if (TASK_VAL(tt) == TASK_VAL(t))
401: flg++;
402: #ifdef __NetBSD__
403: EV_SET(&chg[0], TASK_VAL(t), EVFILT_LIO, flg < 2 ? EV_DELETE : 0,
404: 0, 0, (intptr_t) TASK_VAL(t));
405: #else
406: EV_SET(&chg[0], TASK_VAL(t), EVFILT_LIO, flg < 2 ? EV_DELETE : 0,
407: 0, 0, (void*) TASK_VAL(t));
408: #endif
409: acbs = (struct aiocb**) TASK_VAL(t);
410: if (acbs) {
411: for (i = 0; i < TASK_DATLEN(t); i++) {
412: if (aio_cancel(acbs[i]->aio_fildes, acbs[i]) == AIO_CANCELED)
413: aio_return(acbs[i]);
414: e_free(acbs[i]);
415: }
416: e_free(acbs);
417: TASK_VAL(t) = 0;
418: }
419: #endif
420: break;
421: #endif /* EVFILT_LIO */
422: #endif /* AIO_SUPPORT */
423: #ifdef EVFILT_USER
424: case taskUSER:
425: #if SUP_ENABLE == KQ_SUPPORT
426: /* check for multi subscribers */
427: TAILQ_FOREACH_SAFE(tt, &r->root_user, task_node, tmp)
428: if (TASK_VAL(tt) == TASK_VAL(t))
429: flg++;
430: #ifdef __NetBSD__
431: EV_SET(&chg[0], TASK_VAL(t), EVFILT_USER, flg < 2 ? EV_DELETE : 0,
432: 0, 0, (intptr_t) TASK_VAL(t));
433: #else
434: EV_SET(&chg[0], TASK_VAL(t), EVFILT_USER, flg < 2 ? EV_DELETE : 0,
435: 0, 0, (void*) TASK_VAL(t));
436: #endif
437: #endif
438: break;
439: #endif /* EVFILT_USER */
440: case taskTHREAD:
441: #ifdef HAVE_LIBPTHREAD
442: if (TASK_VAL(t)) {
443: pthread_cancel((pthread_t) TASK_VAL(t));
444: pthread_join((pthread_t) TASK_VAL(t), NULL);
445: if (TASK_VAL(t)) {
446: transit_task2unuse(t, &(TASK_ROOT(t))->root_thread);
447: TASK_VAL(t) = 0;
448: }
449: }
450: #endif
451: return NULL;
452: #if defined(HAVE_LIBRT) && defined(HAVE_TIMER_CREATE) && \
453: defined(HAVE_TIMER_SETTIME) && defined(HAVE_TIMER_DELETE)
454: case taskRTC:
455: timer_delete((timer_t) TASK_FLAG(t));
456: #if SUP_ENABLE == KQ_SUPPORT
457: schedCancel((sched_task_t*) TASK_RET(t));
458: #else
459: /* check for multi subscribers */
460: TAILQ_FOREACH_SAFE(tt, &r->root_rtc, task_node, tmp)
461: if (TASK_DATA(tt) == TASK_DATA(t))
462: flg++;
463:
464: /* restore signal */
465: if (flg < 2)
466: signal((intptr_t) TASK_DATA(t) + SIGRTMIN, SIG_DFL);
467: #endif
468: return NULL;
469: #endif /* HAVE_TIMER_CREATE */
470: default:
471: return NULL;
472: }
473:
474: #if SUP_ENABLE == KQ_SUPPORT
475: kevent(r->root_kq, chg, 1, NULL, 0, &timeout);
476: #elif SUP_ENABLE == EP_SUPPORT
477: if (TASK_TYPE(t) == taskREAD || TASK_TYPE(t) == taskWRITE) {
478: epoll_ctl(r->root_kq, ee.events ? EPOLL_CTL_MOD : EPOLL_CTL_DEL, ee.data.fd, &ee);
479: }
480: #endif
481: return NULL;
482: }
483:
484: #ifdef HAVE_LIBPTHREAD
485: /*
486: * sched_hook_thread() - Default THREAD hook
487: *
488: * @task = current task
489: * @arg = pthread attributes
490: * return: <0 errors and 0 ok
491: */
492: void *
493: sched_hook_thread(void *task, void *arg)
494: {
495: sched_task_t *t = task;
496: pthread_t tid;
497: sigset_t s, o;
498:
499: if (!t || !TASK_ROOT(t))
500: return (void*) -1;
501:
502: sigfillset(&s);
503: pthread_sigmask(SIG_BLOCK, &s, &o);
504: errno = pthread_create(&tid, (pthread_attr_t*) arg,
505: (void *(*)(void*)) _sched_threadWrapper, t);
506: pthread_sigmask(SIG_SETMASK, &o, NULL);
507:
508: if (errno) {
509: LOGERR;
510: return (void*) -1;
511: } else
512: TASK_VAL(t) = (u_long) tid;
513:
514: if (!TASK_ISLOCKED(t))
515: TASK_LOCK(t);
516:
517: return NULL;
518: }
519: #endif
520:
521: /*
522: * sched_hook_read() - Default READ hook
523: *
524: * @task = current task
525: * @arg = unused
526: * return: <0 errors and 0 ok
527: */
528: void *
529: sched_hook_read(void *task, void *arg)
530: {
531: sched_task_t *t = task;
532: sched_root_task_t *r = NULL;
533: uintptr_t mask = (uintptr_t) arg;
534: #if SUP_ENABLE == KQ_SUPPORT
535: struct kevent chg[1];
536: struct timespec timeout = { 0, 0 };
537: #elif SUP_ENABLE == EP_SUPPORT
538: struct epoll_event ee = { 0 };
539: int flg = 0;
540: #endif
541:
542: if (!t || !TASK_ROOT(t))
543: return (void*) -1;
544: else
545: r = TASK_ROOT(t);
546:
547: #if SUP_ENABLE == KQ_SUPPORT
548: #ifdef __NetBSD__
549: EV_SET(&chg[0], TASK_FD(t), EVFILT_READ, EV_ADD | EV_CLEAR | mask,
550: 0, 0, (intptr_t) TASK_FD(t));
551: #else
552: EV_SET(&chg[0], TASK_FD(t), EVFILT_READ, EV_ADD | EV_CLEAR | mask,
553: 0, 0, (void*) TASK_FD(t));
554: #endif
555: if (kevent(r->root_kq, chg, 1, NULL, 0, &timeout) == -1) {
556: if (r->root_hooks.hook_exec.exception)
557: r->root_hooks.hook_exec.exception(r, NULL);
558: else
559: LOGERR;
560: return (void*) -1;
561: }
562: #elif SUP_ENABLE == EP_SUPPORT
563: if (!mask)
564: mask = EPOLLIN | EPOLLPRI;
565: ee.data.fd = TASK_FD(t);
566: ee.events = mask;
567: if (FD_ISSET(TASK_FD(t), &r->root_fds[2])) {
568: flg |= 4;
569: ee.events |= EPOLLPRI;
570: }
571: if (FD_ISSET(TASK_FD(t), &r->root_fds[0])) {
572: flg |= 1;
573: ee.events |= EPOLLIN;
574: }
575: if (FD_ISSET(TASK_FD(t), &r->root_fds[1])) {
576: flg |= 2;
577: ee.events |= EPOLLOUT;
578: }
579:
580: if (epoll_ctl(r->root_kq, flg ? EPOLL_CTL_MOD : EPOLL_CTL_ADD, TASK_FD(t), &ee) == -1) {
581: if (r->root_hooks.hook_exec.exception)
582: r->root_hooks.hook_exec.exception(r, NULL);
583: else
584: LOGERR;
585: return (void*) -1;
586: } else {
587: if (mask & EPOLLIN)
588: FD_SET(TASK_FD(t), &r->root_fds[0]);
589: if (mask & EPOLLPRI)
590: FD_SET(TASK_FD(t), &r->root_fds[2]);
591: }
592: #else
593: if (!mask) {
594: FD_SET(TASK_FD(t), &r->root_fds[0]);
595: FD_SET(TASK_FD(t), &r->root_fds[2]);
596: } else {
597: if (mask & 1)
598: FD_SET(TASK_FD(t), &r->root_fds[0]);
599: if (mask & 2)
600: FD_SET(TASK_FD(t), &r->root_fds[2]);
601: }
602:
603: if (TASK_FD(t) >= r->root_kq)
604: r->root_kq = TASK_FD(t) + 1;
605: #endif
606:
607: return NULL;
608: }
609:
610: /*
611: * sched_hook_write() - Default WRITE hook
612: *
613: * @task = current task
614: * @arg = unused
615: * return: <0 errors and 0 ok
616: */
617: void *
618: sched_hook_write(void *task, void *arg)
619: {
620: sched_task_t *t = task;
621: sched_root_task_t *r = NULL;
622: uintptr_t mask = (uintptr_t) arg;
623: #if SUP_ENABLE == KQ_SUPPORT
624: struct kevent chg[1];
625: struct timespec timeout = { 0, 0 };
626: #elif SUP_ENABLE == EP_SUPPORT
627: struct epoll_event ee = { 0 };
628: int flg = 0;
629: #endif
630:
631: if (!t || !TASK_ROOT(t))
632: return (void*) -1;
633: else
634: r = TASK_ROOT(t);
635:
636: #if SUP_ENABLE == KQ_SUPPORT
637: #ifdef __NetBSD__
638: EV_SET(&chg[0], TASK_FD(t), EVFILT_WRITE, EV_ADD | EV_CLEAR | mask,
639: 0, 0, (intptr_t) TASK_FD(t));
640: #else
641: EV_SET(&chg[0], TASK_FD(t), EVFILT_WRITE, EV_ADD | EV_CLEAR | mask,
642: 0, 0, (void*) TASK_FD(t));
643: #endif
644: if (kevent(r->root_kq, chg, 1, NULL, 0, &timeout) == -1) {
645: if (r->root_hooks.hook_exec.exception)
646: r->root_hooks.hook_exec.exception(r, NULL);
647: else
648: LOGERR;
649: return (void*) -1;
650: }
651: #elif SUP_ENABLE == EP_SUPPORT
652: if (!mask)
653: mask = EPOLLOUT;
654: ee.data.fd = TASK_FD(t);
655: ee.events = mask;
656:
657: if (FD_ISSET(TASK_FD(t), &r->root_fds[2])) {
658: flg |= 4;
659: ee.events |= EPOLLPRI;
660: }
661: if (FD_ISSET(TASK_FD(t), &r->root_fds[0])) {
662: flg |= 1;
663: ee.events |= EPOLLIN;
664: }
665: if (FD_ISSET(TASK_FD(t), &r->root_fds[1])) {
666: flg |= 2;
667: ee.events |= EPOLLOUT;
668: }
669:
670: if (epoll_ctl(r->root_kq, flg ? EPOLL_CTL_MOD : EPOLL_CTL_ADD, TASK_FD(t), &ee) == -1) {
671: if (r->root_hooks.hook_exec.exception)
672: r->root_hooks.hook_exec.exception(r, NULL);
673: else
674: LOGERR;
675: return (void*) -1;
676: } else
677: if (mask & EPOLLOUT)
678: FD_SET(TASK_FD(t), &r->root_fds[1]);
679: #else
680: if (!mask)
681: FD_SET(TASK_FD(t), &r->root_fds[1]);
682: else
683: if (mask & 1)
684: FD_SET(TASK_FD(t), &r->root_fds[1]);
685:
686: if (TASK_FD(t) >= r->root_kq)
687: r->root_kq = TASK_FD(t) + 1;
688: #endif
689:
690: return NULL;
691: }
692:
693: /*
694: * sched_hook_alarm() - Default ALARM hook
695: *
696: * @task = current task
697: * @arg = unused
698: * return: <0 errors and 0 ok
699: */
700: void *
701: sched_hook_alarm(void *task, void *arg __unused)
702: {
703: #if SUP_ENABLE == KQ_SUPPORT
704: sched_task_t *t = task;
705: struct kevent chg[1];
706: struct timespec timeout = { 0, 0 };
707:
708: if (!t || !TASK_ROOT(t))
709: return (void*) -1;
710:
711: #ifdef __NetBSD__
712: EV_SET(&chg[0], (uintptr_t) TASK_DATA(t), EVFILT_TIMER, EV_ADD | EV_CLEAR, 0,
713: t->task_val.ts.tv_sec * 1000 + t->task_val.ts.tv_nsec / 1000000,
714: (intptr_t) TASK_DATA(t));
715: #else
716: EV_SET(&chg[0], (uintptr_t) TASK_DATA(t), EVFILT_TIMER, EV_ADD | EV_CLEAR, 0,
717: t->task_val.ts.tv_sec * 1000 + t->task_val.ts.tv_nsec / 1000000,
718: (void*) TASK_DATA(t));
719: #endif
720: if (kevent(TASK_ROOT(t)->root_kq, chg, 1, NULL, 0, &timeout) == -1) {
721: if (TASK_ROOT(t)->root_hooks.hook_exec.exception)
722: TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL);
723: else
724: LOGERR;
725: return (void*) -1;
726: }
727:
728: #endif
729: return NULL;
730: }
731:
732: /*
733: * sched_hook_node() - Default NODE hook
734: *
735: * @task = current task
736: * @arg = if arg == 42 then waiting for all events
737: * return: <0 errors and 0 ok
738: */
739: void *
740: sched_hook_node(void *task, void *arg)
741: {
742: #if SUP_ENABLE == KQ_SUPPORT
743: sched_task_t *t = task;
744: struct kevent chg[1];
745: struct timespec timeout = { 0, 0 };
746: u_int addflags = (u_int) (uintptr_t) arg;
747:
748: if (!t || !TASK_ROOT(t))
749: return (void*) -1;
750:
751: #ifdef __NetBSD__
752: EV_SET(&chg[0], TASK_FD(t), EVFILT_VNODE, EV_ADD | EV_CLEAR,
753: NOTE_DELETE | NOTE_WRITE | NOTE_EXTEND | NOTE_ATTRIB |
754: NOTE_LINK | NOTE_RENAME | NOTE_REVOKE | addflags, 0, (intptr_t) TASK_FD(t));
755: #else
756: EV_SET(&chg[0], TASK_FD(t), EVFILT_VNODE, EV_ADD | EV_CLEAR,
757: NOTE_DELETE | NOTE_WRITE | NOTE_EXTEND | NOTE_ATTRIB |
758: NOTE_LINK | NOTE_RENAME | NOTE_REVOKE | addflags, 0, (void*) TASK_FD(t));
759: #endif
760: if (kevent(TASK_ROOT(t)->root_kq, chg, 1, NULL, 0, &timeout) == -1) {
761: if (TASK_ROOT(t)->root_hooks.hook_exec.exception)
762: TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL);
763: else
764: LOGERR;
765: return (void*) -1;
766: }
767:
768: #endif
769: return NULL;
770: }
771:
772: /*
773: * sched_hook_proc() - Default PROC hook
774: *
775: * @task = current task
776: * @arg = unused
777: * return: <0 errors and 0 ok
778: */
779: void *
780: sched_hook_proc(void *task, void *arg __unused)
781: {
782: #if SUP_ENABLE == KQ_SUPPORT
783: sched_task_t *t = task;
784: struct kevent chg[1];
785: struct timespec timeout = { 0, 0 };
786:
787: if (!t || !TASK_ROOT(t))
788: return (void*) -1;
789:
790: #ifdef __NetBSD__
791: EV_SET(&chg[0], TASK_VAL(t), EVFILT_PROC, EV_ADD | EV_CLEAR,
792: NOTE_EXIT | NOTE_FORK | NOTE_EXEC | NOTE_TRACK, 0, (intptr_t) TASK_VAL(t));
793: #else
794: EV_SET(&chg[0], TASK_VAL(t), EVFILT_PROC, EV_ADD | EV_CLEAR,
795: NOTE_EXIT | NOTE_FORK | NOTE_EXEC | NOTE_TRACK, 0, (void*) TASK_VAL(t));
796: #endif
797: if (kevent(TASK_ROOT(t)->root_kq, chg, 1, NULL, 0, &timeout) == -1) {
798: if (TASK_ROOT(t)->root_hooks.hook_exec.exception)
799: TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL);
800: else
801: LOGERR;
802: return (void*) -1;
803: }
804:
805: #endif
806: return NULL;
807: }
808:
809: /*
810: * sched_hook_signal() - Default SIGNAL hook
811: *
812: * @task = current task
813: * @arg = unused
814: * return: <0 errors and 0 ok
815: */
816: void *
817: sched_hook_signal(void *task, void *arg __unused)
818: {
819: sched_task_t *t = task;
820: #if SUP_ENABLE == KQ_SUPPORT
821: struct kevent chg[1];
822: struct timespec timeout = { 0, 0 };
823: #endif
824:
825: if (!t || !TASK_ROOT(t))
826: return (void*) -1;
827:
828: #if SUP_ENABLE == KQ_SUPPORT
829: #ifdef __NetBSD__
830: EV_SET(&chg[0], TASK_VAL(t), EVFILT_SIGNAL, EV_ADD | EV_CLEAR, 0, 0, (intptr_t) TASK_VAL(t));
831: #else
832: EV_SET(&chg[0], TASK_VAL(t), EVFILT_SIGNAL, EV_ADD | EV_CLEAR, 0, 0, (void*) TASK_VAL(t));
833: #endif
834: if (kevent(TASK_ROOT(t)->root_kq, chg, 1, NULL, 0, &timeout) == -1) {
835: if (TASK_ROOT(t)->root_hooks.hook_exec.exception)
836: TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL);
837: else
838: LOGERR;
839: return (void*) -1;
840: }
841: #endif
842:
843: pthread_mutex_lock(&TASK_ROOT(t)->root_sigmtx);
844: sigaddset(&TASK_ROOT(t)->root_sigset, TASK_VAL(t));
845: pthread_mutex_unlock(&TASK_ROOT(t)->root_sigmtx);
846:
847: return NULL;
848: }
849:
850: /*
851: * sched_hook_user() - Default USER hook
852: *
853: * @task = current task
854: * @arg = unused
855: * return: <0 errors and 0 ok
856: */
857: #ifdef EVFILT_USER
858: void *
859: sched_hook_user(void *task, void *arg __unused)
860: {
861: #if SUP_ENABLE == KQ_SUPPORT
862: sched_task_t *t = task;
863: struct kevent chg[1];
864: struct timespec timeout = { 0, 0 };
865:
866: if (!t || !TASK_ROOT(t))
867: return (void*) -1;
868:
869: #ifdef __NetBSD__
870: EV_SET(&chg[0], TASK_VAL(t), EVFILT_USER, EV_ADD | EV_CLEAR, TASK_DATLEN(t),
871: 0, (intptr_t) TASK_VAL(t));
872: #else
873: EV_SET(&chg[0], TASK_VAL(t), EVFILT_USER, EV_ADD | EV_CLEAR, TASK_DATLEN(t),
874: 0, (void*) TASK_VAL(t));
875: #endif
876: if (kevent(TASK_ROOT(t)->root_kq, chg, 1, NULL, 0, &timeout) == -1) {
877: if (TASK_ROOT(t)->root_hooks.hook_exec.exception)
878: TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL);
879: else
880: LOGERR;
881: return (void*) -1;
882: }
883:
884: #endif
885: return NULL;
886: }
887: #endif
888:
889: #if SUP_ENABLE == KQ_SUPPORT
890: static inline void
891: fetch_hook_kevent_proceed(int en, struct kevent *res, sched_root_task_t *r)
892: {
893: struct kevent evt[1];
894: register int i, flg;
895: sched_task_t *task, *tmp;
896: struct timespec now = { 0, 0 };
897: #ifdef AIO_SUPPORT
898: int len, fd;
899: struct aiocb *acb;
900: #ifdef EVFILT_LIO
901: int l;
902: off_t off;
903: struct aiocb **acbs;
904: struct iovec *iv;
905: #endif /* EVFILT_LIO */
906: #endif /* AIO_SUPPORT */
907:
908: for (i = 0; i < en; i++) {
909: memcpy(evt, &res[i], sizeof evt);
910: evt->flags = EV_DELETE;
911: /* Put read/write task to ready queue */
912: flg = 0;
913: switch (res[i].filter) {
914: case EVFILT_READ:
915: TAILQ_FOREACH_SAFE(task, &r->root_read, task_node, tmp) {
916: if (TASK_FD(task) == ((intptr_t) res[i].udata)) {
917: if (!flg) {
918: TASK_RET(task) = res[i].data;
919: TASK_FLAG(task) = (u_long) res[i].fflags;
920:
921: /* remove read handle */
922: remove_task_from(task, &r->root_read);
923:
924: if (r->root_hooks.hook_exec.exception && res[i].flags & EV_EOF) {
925: if (r->root_hooks.hook_exec.exception(r, (void*) EV_EOF)) {
926: task->task_type = taskUNUSE;
927: insert_task_to(task, &r->root_unuse);
928: } else {
929: task->task_type = taskREADY;
930: insert_task_to(task, &r->root_ready);
931: }
932: } else {
933: task->task_type = taskREADY;
934: insert_task_to(task, &r->root_ready);
935: }
936: }
937: flg++;
938: }
939: }
940: break;
941: case EVFILT_WRITE:
942: TAILQ_FOREACH_SAFE(task, &r->root_write, task_node, tmp) {
943: if (TASK_FD(task) == ((intptr_t) res[i].udata)) {
944: if (!flg) {
945: TASK_RET(task) = res[i].data;
946: TASK_FLAG(task) = (u_long) res[i].fflags;
947:
948: /* remove write handle */
949: remove_task_from(task, &r->root_write);
950:
951: if (r->root_hooks.hook_exec.exception && res[i].flags & EV_EOF) {
952: if (r->root_hooks.hook_exec.exception(r, (void*) EV_EOF)) {
953: task->task_type = taskUNUSE;
954: insert_task_to(task, &r->root_unuse);
955: } else {
956: task->task_type = taskREADY;
957: insert_task_to(task, &r->root_ready);
958: }
959: } else {
960: task->task_type = taskREADY;
961: insert_task_to(task, &r->root_ready);
962: }
963: }
964: flg++;
965: }
966: }
967: break;
968: case EVFILT_TIMER:
969: TAILQ_FOREACH_SAFE(task, &r->root_alarm, task_node, tmp) {
970: if ((uintptr_t) TASK_DATA(task) == ((uintptr_t) res[i].udata)) {
971: if (!flg) {
972: TASK_RET(task) = res[i].data;
973: TASK_FLAG(task) = (u_long) res[i].fflags;
974:
975: /* remove alarm handle */
976: transit_task2ready(task, &r->root_alarm);
977: }
978: flg++;
979: }
980: }
981: break;
982: case EVFILT_VNODE:
983: TAILQ_FOREACH_SAFE(task, &r->root_node, task_node, tmp) {
984: if (TASK_FD(task) == ((intptr_t) res[i].udata)) {
985: if (!flg) {
986: TASK_RET(task) = res[i].data;
987: TASK_FLAG(task) = (u_long) res[i].fflags;
988:
989: /* remove node handle */
990: transit_task2ready(task, &r->root_node);
991: }
992: flg++;
993: }
994: }
995: break;
996: case EVFILT_PROC:
997: TAILQ_FOREACH_SAFE(task, &r->root_proc, task_node, tmp) {
998: if (TASK_VAL(task) == ((uintptr_t) res[i].udata)) {
999: if (!flg) {
1000: TASK_RET(task) = res[i].data;
1001: TASK_FLAG(task) = (u_long) res[i].fflags;
1002:
1003: /* remove proc handle */
1004: transit_task2ready(task, &r->root_proc);
1005: }
1006: flg++;
1007: }
1008: }
1009: break;
1010: case EVFILT_SIGNAL:
1011: TAILQ_FOREACH_SAFE(task, &r->root_signal, task_node, tmp) {
1012: if (TASK_VAL(task) == ((uintptr_t) res[i].udata)) {
1013: if (!flg) {
1014: TASK_RET(task) = res[i].data;
1015: TASK_FLAG(task) = (u_long) res[i].fflags;
1016:
1017: /* remove signal handle */
1018: transit_task2ready(task, &r->root_signal);
1019: }
1020: flg++;
1021: }
1022: }
1023: break;
1024: #ifdef AIO_SUPPORT
1025: case EVFILT_AIO:
1026: TAILQ_FOREACH_SAFE(task, &r->root_aio, task_node, tmp) {
1027: acb = (struct aiocb*) TASK_VAL(task);
1028: if (acb == ((struct aiocb*) res[i].udata)) {
1029: if (!flg) {
1030: TASK_RET(task) = res[i].data;
1031: TASK_FLAG(task) = (u_long) res[i].fflags;
1032:
1033: /* remove user handle */
1034: transit_task2ready(task, &r->root_aio);
1035:
1036: fd = acb->aio_fildes;
1037: if ((len = aio_return(acb)) != -1) {
1038: if (lseek(fd, acb->aio_offset + len, SEEK_CUR) == -1)
1039: LOGERR;
1040: } else
1041: LOGERR;
1042: e_free(acb);
1043: TASK_DATLEN(task) = (u_long) len;
1044: TASK_FD(task) = fd;
1045: }
1046: flg++;
1047: }
1048: }
1049: break;
1050: #ifdef EVFILT_LIO
1051: case EVFILT_LIO:
1052: TAILQ_FOREACH_SAFE(task, &r->root_lio, task_node, tmp) {
1053: acbs = (struct aiocb**) TASK_VAL(task);
1054: if (acbs == ((struct aiocb**) res[i].udata)) {
1055: if (!flg) {
1056: TASK_RET(task) = res[i].data;
1057: TASK_FLAG(task) = (u_long) res[i].fflags;
1058:
1059: /* remove user handle */
1060: transit_task2ready(task, &r->root_lio);
1061:
1062: iv = (struct iovec*) TASK_DATA(task);
1063: fd = acbs[0]->aio_fildes;
1064: off = acbs[0]->aio_offset;
1065: for (len = 0; i < TASK_DATLEN(task); len += l, i++) {
1066: if ((iv[i].iov_len = aio_return(acbs[i])) == -1)
1067: l = 0;
1068: else
1069: l = iv[i].iov_len;
1070: e_free(acbs[i]);
1071: }
1072: e_free(acbs);
1073: TASK_DATLEN(task) = (u_long) len;
1074: TASK_FD(task) = fd;
1075:
1076: if (lseek(fd, off + len, SEEK_CUR) == -1)
1077: LOGERR;
1078: }
1079: flg++;
1080: }
1081: }
1082: break;
1083: #endif /* EVFILT_LIO */
1084: #endif /* AIO_SUPPORT */
1085: #ifdef EVFILT_USER
1086: case EVFILT_USER:
1087: TAILQ_FOREACH_SAFE(task, &r->root_user, task_node, tmp) {
1088: if (TASK_VAL(task) == ((uintptr_t) res[i].udata)) {
1089: if (!flg) {
1090: TASK_RET(task) = res[i].data;
1091: TASK_FLAG(task) = (u_long) res[i].fflags;
1092:
1093: /* remove user handle */
1094: transit_task2ready(task, &r->root_user);
1095: }
1096: flg++;
1097: }
1098: }
1099: break;
1100: #endif /* EVFILT_USER */
1101: }
1102:
1103: if (flg > 1)
1104: evt->flags &= ~EV_DELETE;
1105:
1106: if (kevent(r->root_kq, evt, 1, NULL, 0, &now) == -1) {
1107: if (r->root_hooks.hook_exec.exception)
1108: r->root_hooks.hook_exec.exception(r, NULL);
1109: else
1110: LOGERR;
1111: }
1112: }
1113: }
1114: #endif
1115:
1116: #if SUP_ENABLE == EP_SUPPORT
1117: static inline void
1118: fetch_hook_epoll_proceed(int en, struct epoll_event *res, sched_root_task_t *r)
1119: {
1120: register int i, rflg, wflg;
1121: int ops = EPOLL_CTL_DEL;
1122: sched_task_t *t, *tmp, *task;
1123: struct epoll_event evt[1];
1124:
1125: for (i = 0; i < en; i++) {
1126: memcpy(evt, &res[i], sizeof evt);
1127: evt->events ^= evt->events;
1128: rflg = wflg = 0;
1129:
1130: if (res[i].events & (EPOLLIN | EPOLLPRI)) {
1131: task = NULL;
1132: TAILQ_FOREACH_SAFE(t, &r->root_read, task_node, tmp) {
1133: if (TASK_FD(t) == evt->data.fd) {
1134: if (!task)
1135: task = t;
1136: rflg++;
1137: }
1138: }
1139:
1140: if (task) {
1141: TASK_FLAG(task) = ioctl(TASK_FD(task), FIONREAD, &TASK_RET(task));
1142: /* remove read handle */
1143: remove_task_from(task, &r->root_read);
1144:
1145: if (r->root_hooks.hook_exec.exception && evt->events & (EPOLLRDHUP | EPOLLERR | EPOLLHUP)) {
1146: if (r->root_hooks.hook_exec.exception(r, (void*) (intptr_t)
1147: (evt->events & EPOLLERR ? EV_ERROR | EV_EOF : EV_EOF))) {
1148: task->task_type = taskUNUSE;
1149: insert_task_to(task, &r->root_unuse);
1150: } else {
1151: task->task_type = taskREADY;
1152: insert_task_to(task, &r->root_ready);
1153: }
1154: } else {
1155: task->task_type = taskREADY;
1156: insert_task_to(task, &r->root_ready);
1157: }
1158:
1159: if (!(res[i].events & EPOLLOUT) && FD_ISSET(evt->data.fd, &r->root_fds[1])) {
1160: evt->events |= EPOLLOUT;
1161: wflg = 42;
1162: }
1163: if (rflg > 1) {
1164: if (FD_ISSET(evt->data.fd, &r->root_fds[0]))
1165: evt->events |= EPOLLIN;
1166: if (FD_ISSET(evt->data.fd, &r->root_fds[2]))
1167: evt->events |= EPOLLPRI;
1168: } else {
1169: FD_CLR(evt->data.fd, &r->root_fds[0]);
1170: FD_CLR(evt->data.fd, &r->root_fds[2]);
1171: }
1172: }
1173: }
1174: if (res[i].events & EPOLLOUT) {
1175: task = NULL;
1176: TAILQ_FOREACH_SAFE(t, &r->root_write, task_node, tmp) {
1177: if (TASK_FD(t) == evt->data.fd) {
1178: if (!task)
1179: task = t;
1180: wflg++;
1181: }
1182: }
1183:
1184: if (task) {
1185: TASK_FLAG(task) = ioctl(TASK_FD(task), FIONWRITE, &TASK_RET(task));
1186: /* remove write handle */
1187: remove_task_from(task, &r->root_write);
1188:
1189: if (r->root_hooks.hook_exec.exception && evt->events & (EPOLLERR | EPOLLHUP)) {
1190: if (r->root_hooks.hook_exec.exception(r, (void*) (intptr_t)
1191: (evt->events & EPOLLERR ? EV_ERROR | EV_EOF : EV_EOF))) {
1192: task->task_type = taskUNUSE;
1193: insert_task_to(task, &r->root_unuse);
1194: } else {
1195: task->task_type = taskREADY;
1196: insert_task_to(task, &r->root_ready);
1197: }
1198: } else {
1199: task->task_type = taskREADY;
1200: insert_task_to(task, &r->root_ready);
1201: }
1202:
1203: if (!(res[i].events & EPOLLIN) && FD_ISSET(evt->data.fd, &r->root_fds[0])) {
1204: evt->events |= EPOLLIN;
1205: rflg = 42;
1206: }
1207: if (!(res[i].events & EPOLLPRI) && FD_ISSET(evt->data.fd, &r->root_fds[2])) {
1208: evt->events |= EPOLLPRI;
1209: rflg = 42;
1210: }
1211: if (wflg > 1)
1212: evt->events |= EPOLLOUT;
1213: else
1214: FD_CLR(evt->data.fd, &r->root_fds[1]);
1215: }
1216: }
1217:
1218: ops = EPOLL_CTL_DEL;
1219: if (rflg > 1 || wflg > 1)
1220: ops = EPOLL_CTL_MOD;
1221:
1222: if (epoll_ctl(r->root_kq, ops, evt->data.fd, evt) == -1) {
1223: if (r->root_hooks.hook_exec.exception) {
1224: r->root_hooks.hook_exec.exception(r, NULL);
1225: } else
1226: LOGERR;
1227: }
1228: }
1229: }
1230: #endif
1231:
1232: #if SUP_ENABLE == NO_SUPPORT
1233: static inline void
1234: fetch_hook_select_proceed(int en, fd_set rfd, fd_set wfd, fd_set xfd, sched_root_task_t *r)
1235: {
1236: register int i, rflg, wflg;
1237: sched_task_t *t, *tmp, *task;
1238:
1239: /* skip select check if return value from select is zero */
1240: if (!en)
1241: return;
1242:
1243: for (i = 0; i < r->root_kq; i++) {
1244: if (!FD_ISSET(i, &r->root_fds[0]) &&
1245: !FD_ISSET(i, &r->root_fds[1]) &&
1246: !FD_ISSET(i, &r->root_fds[2]))
1247: continue;
1248:
1249: rflg = wflg = 0;
1250:
1251: if (FD_ISSET(i, &rfd) || FD_ISSET(i, &xfd)) {
1252: task = NULL;
1253: TAILQ_FOREACH_SAFE(t, &r->root_read, task_node, tmp) {
1254: if (TASK_FD(t) == i) {
1255: if (!task)
1256: task = t;
1257: rflg++;
1258: }
1259: }
1260:
1261: if (task) {
1262: TASK_FLAG(task) = ioctl(TASK_FD(task), FIONREAD, &TASK_RET(task));
1263:
1264: /* remove read handle */
1265: remove_task_from(task, &r->root_read);
1266:
1267: if (r->root_hooks.hook_exec.exception) {
1268: if (r->root_hooks.hook_exec.exception(r, NULL)) {
1269: task->task_type = taskUNUSE;
1270: insert_task_to(task, &r->root_unuse);
1271: } else {
1272: task->task_type = taskREADY;
1273: insert_task_to(task, &r->root_ready);
1274: }
1275: } else {
1276: task->task_type = taskREADY;
1277: insert_task_to(task, &r->root_ready);
1278: }
1279:
1280: /* remove resouce */
1281: if (rflg == 1) {
1282: FD_CLR(i, &r->root_fds[0]);
1283: FD_CLR(i, &r->root_fds[2]);
1284: }
1285: }
1286: }
1287: if (FD_ISSET(i, &wfd)) {
1288: task = NULL;
1289: TAILQ_FOREACH_SAFE(t, &r->root_write, task_node, tmp) {
1290: if (TASK_FD(t) == i) {
1291: if (!task)
1292: task = t;
1293: wflg++;
1294: }
1295: }
1296:
1297: if (task) {
1298: TASK_FLAG(task) = ioctl(TASK_FD(task), FIONWRITE, &TASK_RET(task));
1299:
1300: /* remove write handle */
1301: remove_task_from(task, &r->root_write);
1302:
1303: if (r->root_hooks.hook_exec.exception) {
1304: if (r->root_hooks.hook_exec.exception(r, NULL)) {
1305: task->task_type = taskUNUSE;
1306: insert_task_to(task, &r->root_unuse);
1307: } else {
1308: task->task_type = taskREADY;
1309: insert_task_to(task, &r->root_ready);
1310: }
1311: } else {
1312: task->task_type = taskREADY;
1313: insert_task_to(task, &r->root_ready);
1314: }
1315:
1316: /* remove resouce */
1317: if (wflg == 1)
1318: FD_CLR(i, &r->root_fds[1]);
1319: }
1320: }
1321: }
1322:
1323: /* optimize select */
1324: for (i = r->root_kq - 1; i >= 0; i--)
1325: if (FD_ISSET(i, &r->root_fds[0]) ||
1326: FD_ISSET(i, &r->root_fds[1]) ||
1327: FD_ISSET(i, &r->root_fds[2]))
1328: break;
1329: r->root_kq = i + 1;
1330: }
1331: #endif
1332:
1333: /*
1334: * sched_hook_fetch() - Default FETCH hook
1335: *
1336: * @root = root task
1337: * @arg = unused
1338: * return: NULL error or !=NULL fetched task
1339: */
1340: void *
1341: sched_hook_fetch(void *root, void *arg __unused)
1342: {
1343: sched_root_task_t *r = root;
1344: sched_task_t *task, *tmp;
1345: struct timespec now, m, mtmp, *tsmin;
1346: #if SUP_ENABLE == KQ_SUPPORT
1347: struct kevent res[KQ_EVENTS];
1348: struct timespec *timeout;
1349: #elif SUP_ENABLE == EP_SUPPORT
1350: struct epoll_event res[KQ_EVENTS];
1351: u_long timeout = 0;
1352: #else
1353: struct timeval *timeout, tv;
1354: fd_set rfd, wfd, xfd;
1355: #endif
1356: int en;
1357:
1358: if (!r)
1359: return NULL;
1360:
1361: /* get new task by queue priority */
1362: while ((task = TAILQ_FIRST(&r->root_event))) {
1363: transit_task2unuse(task, &r->root_event);
1364: return task;
1365: }
1366: while ((task = TAILQ_FIRST(&r->root_ready))) {
1367: transit_task2unuse(task, &r->root_ready);
1368: return task;
1369: }
1370:
1371: /* if present member of task, set NOWAIT */
1372: if (!TAILQ_FIRST(&r->root_task)) {
1373: /* timer tasks */
1374: #ifdef TIMER_WITHOUT_SORT
1375: clock_gettime(CLOCK_MONOTONIC, &now);
1376:
1377: sched_timespecclear(&r->root_wait);
1378: TAILQ_FOREACH(task, &r->root_timer, task_node) {
1379: if (!sched_timespecisset(&r->root_wait))
1380: r->root_wait = TASK_TS(task);
1381: else if (sched_timespeccmp(&TASK_TS(task), &r->root_wait, -) < 0)
1382: r->root_wait = TASK_TS(task);
1383: }
1384:
1385: if (TAILQ_FIRST(&r->root_timer)) {
1386: m = r->root_wait;
1387: sched_timespecsub(&m, &now, &mtmp);
1388: r->root_wait = mtmp;
1389: } else {
1390: /* set wait INFTIM */
1391: sched_timespecinf(&r->root_wait);
1392: }
1393: #else /* ! TIMER_WITHOUT_SORT */
1394: if ((task = TAILQ_FIRST(&r->root_timer))) {
1395: clock_gettime(CLOCK_MONOTONIC, &now);
1396:
1397: m = TASK_TS(task);
1398: sched_timespecsub(&m, &now, &mtmp);
1399: r->root_wait = mtmp;
1400: } else {
1401: /* set wait INFTIM */
1402: sched_timespecinf(&r->root_wait);
1403: }
1404: #endif /* TIMER_WITHOUT_SORT */
1405: } else /* no waiting for event, because we have ready task */
1406: sched_timespecclear(&r->root_wait);
1407:
1408: if (!sched_timespecisinf(&r->root_wait)) {
1409: tsmin = sched_timespecmin(&r->root_wait, &r->root_poll);
1410: #if SUP_ENABLE == KQ_SUPPORT
1411: timeout = tsmin;
1412: #elif SUP_ENABLE == EP_SUPPORT
1413: timeout = tsmin->tv_sec * 1000 + tsmin->tv_nsec / 1000000;
1414: #else
1415: sched_timespec2val(tsmin, &tv);
1416: timeout = &tv;
1417: #endif /* KQ_SUPPORT */
1418: } else if (sched_timespecisinf(&r->root_poll))
1419: #if SUP_ENABLE == EP_SUPPORT
1420: timeout = -1;
1421: #else
1422: timeout = NULL;
1423: #endif
1424: else {
1425: #if SUP_ENABLE == KQ_SUPPORT
1426: timeout = &r->root_poll;
1427: #elif SUP_ENABLE == EP_SUPPORT
1428: timeout = r->root_poll.tv_sec * 1000 + r->root_poll.tv_nsec / 1000000;
1429: #else
1430: sched_timespec2val(&r->root_poll, &tv);
1431: timeout = &tv;
1432: #endif /* KQ_SUPPORT */
1433: }
1434:
1435: #if SUP_ENABLE == KQ_SUPPORT
1436: if ((en = kevent(r->root_kq, NULL, 0, res, KQ_EVENTS, timeout)) == -1) {
1437: #elif SUP_ENABLE == EP_SUPPORT
1438: if ((en = epoll_wait(r->root_kq, res, KQ_EVENTS, timeout)) == -1) {
1439: #else
1440: xfd = r->root_fds[2];
1441: rfd = r->root_fds[0];
1442: wfd = r->root_fds[1];
1443: if ((en = select(r->root_kq, &rfd, &wfd, &xfd, timeout)) == -1) {
1444: #endif /* KQ_SUPPORT */
1445: if (r->root_hooks.hook_exec.exception) {
1446: if (r->root_hooks.hook_exec.exception(r, NULL))
1447: return NULL;
1448: } else if (errno != EINTR)
1449: LOGERR;
1450: goto skip_event;
1451: }
1452:
1453: /* Go and catch the cat into pipes ... */
1454: #if SUP_ENABLE == KQ_SUPPORT
1455: /* kevent dispatcher */
1456: fetch_hook_kevent_proceed(en, res, r);
1457: #elif SUP_ENABLE == EP_SUPPORT
1458: /* epoll dispatcher */
1459: fetch_hook_epoll_proceed(en, res, r);
1460: #else
1461: /* select dispatcher */
1462: fetch_hook_select_proceed(en, rfd, wfd, xfd, r);
1463: #endif /* KQ_SUPPORT */
1464:
1465: skip_event:
1466: /* timer update & put in ready queue */
1467: clock_gettime(CLOCK_MONOTONIC, &now);
1468:
1469: TAILQ_FOREACH_SAFE(task, &r->root_timer, task_node, tmp)
1470: if (sched_timespeccmp(&now, &TASK_TS(task), -) >= 0)
1471: transit_task2ready(task, &r->root_timer);
1472:
1473: /* put regular task priority task to ready queue,
1474: if there is no ready task or reach max missing hit for regular task */
1475: if ((task = TAILQ_FIRST(&r->root_task))) {
1476: if (!TAILQ_FIRST(&r->root_ready) || r->root_miss >= TASK_VAL(task)) {
1477: r->root_miss ^= r->root_miss;
1478:
1479: transit_task2ready(task, &r->root_task);
1480: } else
1481: r->root_miss++;
1482: } else
1483: r->root_miss ^= r->root_miss;
1484:
1485: /* OK, lets get ready task !!! */
1486: task = TAILQ_FIRST(&r->root_ready);
1487: if (task)
1488: transit_task2unuse(task, &r->root_ready);
1489: return task;
1490: }
1491:
1492: /*
1493: * sched_hook_exception() - Default EXCEPTION hook
1494: *
1495: * @root = root task
1496: * @arg = custom handling: if arg == EV_EOF or other value; default: arg == NULL log errno
1497: * return: <0 errors and 0 ok
1498: */
1499: void *
1500: sched_hook_exception(void *root, void *arg)
1501: {
1502: sched_root_task_t *r = root;
1503:
1504: if (!r)
1505: return NULL;
1506:
1507: /* custom exception handling ... */
1508: if (arg) {
1509: if (arg == (void*) EV_EOF)
1510: return NULL;
1511: return (void*) -1; /* raise scheduler error!!! */
1512: }
1513:
1514: /* if error hook exists */
1515: if (r->root_hooks.hook_root.error)
1516: return (r->root_hooks.hook_root.error(root, (void*) ((intptr_t) errno)));
1517:
1518: /* default case! */
1519: LOGERR;
1520: return NULL;
1521: }
1522:
1523: /*
1524: * sched_hook_condition() - Default CONDITION hook
1525: *
1526: * @root = root task
1527: * @arg = killState from schedRun()
1528: * return: NULL kill scheduler loop or !=NULL ok
1529: */
1530: void *
1531: sched_hook_condition(void *root, void *arg)
1532: {
1533: sched_root_task_t *r = root;
1534:
1535: if (!r)
1536: return NULL;
1537:
1538: return (void*) (*r->root_cond - *(intptr_t*) arg);
1539: }
1540:
1541: /*
1542: * sched_hook_rtc() - Default RTC hook
1543: *
1544: * @task = current task
1545: * @arg = unused
1546: * return: <0 errors and 0 ok
1547: */
1548: void *
1549: sched_hook_rtc(void *task, void *arg __unused)
1550: {
1551: #if defined(HAVE_LIBRT) && defined(HAVE_TIMER_CREATE) && \
1552: defined(HAVE_TIMER_SETTIME) && defined(HAVE_TIMER_DELETE)
1553: sched_task_t *sigt = NULL, *t = task;
1554: struct itimerspec its;
1555: struct sigevent evt;
1556: timer_t tmr;
1557: #if SUP_ENABLE != KQ_SUPPORT
1558: struct sigaction sa;
1559: #endif
1560:
1561: if (!t || !TASK_ROOT(t))
1562: return (void*) -1;
1563:
1564: memset(&evt, 0, sizeof evt);
1565: evt.sigev_notify = SIGEV_SIGNAL;
1566: evt.sigev_signo = (intptr_t) TASK_DATA(t) + SIGRTMIN;
1567: evt.sigev_value.sival_ptr = t;
1568:
1569: if (timer_create(CLOCK_MONOTONIC, &evt, &tmr) == -1) {
1570: if (TASK_ROOT(t)->root_hooks.hook_exec.exception)
1571: TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL);
1572: else
1573: LOGERR;
1574: return (void*) -1;
1575: } else
1576: TASK_FLAG(t) = (u_long) tmr;
1577:
1578: #if SUP_ENABLE == KQ_SUPPORT
1579: if (!(sigt = schedSignal(TASK_ROOT(t), _sched_rtcWrapper, TASK_ARG(t), evt.sigev_signo,
1580: t, (size_t) tmr))) {
1581: if (TASK_ROOT(t)->root_hooks.hook_exec.exception)
1582: TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL);
1583: else
1584: LOGERR;
1585: timer_delete(tmr);
1586: return (void*) -1;
1587: } else
1588: TASK_RET(t) = (uintptr_t) sigt;
1589: #else
1590: memset(&sa, 0, sizeof sa);
1591: sigemptyset(&sa.sa_mask);
1592: sa.sa_sigaction = _sched_rtcSigWrapper;
1593: sa.sa_flags = SA_SIGINFO | SA_RESTART;
1594:
1595: if (sigaction(evt.sigev_signo, &sa, NULL) == -1) {
1596: if (TASK_ROOT(t)->root_hooks.hook_exec.exception)
1597: TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL);
1598: else
1599: LOGERR;
1600: timer_delete(tmr);
1601: return (void*) -1;
1602: }
1603: #endif
1604:
1605: memset(&its, 0, sizeof its);
1606: its.it_value.tv_sec = t->task_val.ts.tv_sec;
1607: its.it_value.tv_nsec = t->task_val.ts.tv_nsec;
1608:
1609: if (timer_settime(tmr, TIMER_RELTIME, &its, NULL) == -1) {
1610: if (TASK_ROOT(t)->root_hooks.hook_exec.exception)
1611: TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL);
1612: else
1613: LOGERR;
1614: schedCancel(sigt);
1615: timer_delete(tmr);
1616: return (void*) -1;
1617: }
1618: #endif /* HAVE_TIMER_CREATE */
1619: return NULL;
1620: }
FreeBSD-CVSweb <freebsd-cvsweb@FreeBSD.org>