1: /*************************************************************************
2: * (C) 2011 AITNET ltd - Sofia/Bulgaria - <misho@aitbg.com>
3: * by Michael Pounov <misho@openbsd-bg.org>
4: *
5: * $Author: misho $
6: * $Id: hooks.c,v 1.27.2.2 2014/05/21 21:55:10 misho Exp $
7: *
8: **************************************************************************
9: The ELWIX and AITNET software is distributed under the following
10: terms:
11:
12: All of the documentation and software included in the ELWIX and AITNET
13: Releases is copyrighted by ELWIX - Sofia/Bulgaria <info@elwix.org>
14:
15: Copyright 2004 - 2014
16: by Michael Pounov <misho@elwix.org>. All rights reserved.
17:
18: Redistribution and use in source and binary forms, with or without
19: modification, are permitted provided that the following conditions
20: are met:
21: 1. Redistributions of source code must retain the above copyright
22: notice, this list of conditions and the following disclaimer.
23: 2. Redistributions in binary form must reproduce the above copyright
24: notice, this list of conditions and the following disclaimer in the
25: documentation and/or other materials provided with the distribution.
26: 3. All advertising materials mentioning features or use of this software
27: must display the following acknowledgement:
28: This product includes software developed by Michael Pounov <misho@elwix.org>
29: ELWIX - Embedded LightWeight unIX and its contributors.
30: 4. Neither the name of AITNET nor the names of its contributors
31: may be used to endorse or promote products derived from this software
32: without specific prior written permission.
33:
34: THIS SOFTWARE IS PROVIDED BY AITNET AND CONTRIBUTORS ``AS IS'' AND
35: ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
36: IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
37: ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
38: FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
39: DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
40: OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
41: HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
42: LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
43: OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
44: SUCH DAMAGE.
45: */
46: #include "global.h"
47: #include "hooks.h"
48:
49:
50: /*
51: * sched_hook_init() - Default INIT hook
52: *
53: * @root = root task
54: * @arg = unused
55: * return: <0 errors and 0 ok
56: */
57: void *
58: sched_hook_init(void *root, void *arg __unused)
59: {
60: sched_root_task_t *r = root;
61:
62: if (!r)
63: return (void*) -1;
64:
65: #if SUP_ENABLE == KQ_ENABLE
66: r->root_kq = kqueue();
67: if (r->root_kq == -1) {
68: LOGERR;
69: return (void*) -1;
70: }
71: #else
72: r->root_kq ^= r->root_kq;
73: FD_ZERO(&r->root_fds[0]);
74: FD_ZERO(&r->root_fds[1]);
75: #endif
76:
77: return NULL;
78: }
79:
80: /*
81: * sched_hook_fini() - Default FINI hook
82: *
83: * @root = root task
84: * @arg = unused
85: * return: <0 errors and 0 ok
86: */
87: void *
88: sched_hook_fini(void *root, void *arg __unused)
89: {
90: sched_root_task_t *r = root;
91:
92: if (!r)
93: return (void*) -1;
94:
95: #if SUP_ENABLE == KQ_ENABLE
96: if (r->root_kq > 2) {
97: close(r->root_kq);
98: r->root_kq = 0;
99: }
100: #else
101: FD_ZERO(&r->root_fds[1]);
102: FD_ZERO(&r->root_fds[0]);
103: r->root_kq ^= r->root_kq;
104: #endif
105:
106: return NULL;
107: }
108:
109: /*
110: * sched_hook_cancel() - Default CANCEL hook
111: *
112: * @task = current task
113: * @arg = unused
114: * return: <0 errors and 0 ok
115: */
116: void *
117: sched_hook_cancel(void *task, void *arg __unused)
118: {
119: sched_task_t *t = task;
120: #if SUP_ENABLE == KQ_ENABLE
121: struct kevent chg[1];
122: struct timespec timeout = { 0, 0 };
123: #else
124: sched_root_task_t *r = NULL;
125: register int i;
126: #endif
127: #ifdef AIO_SUPPORT
128: struct aiocb *acb;
129: #ifdef EVFILT_LIO
130: register int i = 0;
131: struct aiocb **acbs;
132: #endif /* EVFILT_LIO */
133: #endif /* AIO_SUPPORT */
134:
135: if (!t || !TASK_ROOT(t))
136: return (void*) -1;
137: #if SUP_ENABLE != KQ_ENABLE
138: r = TASK_ROOT(t);
139: #endif
140:
141: switch (TASK_TYPE(t)) {
142: case taskREAD:
143: #if SUP_ENABLE == KQ_ENABLE
144: #ifdef __NetBSD__
145: EV_SET(&chg[0], TASK_FD(t), EVFILT_READ, EV_DELETE, 0, 0, (intptr_t) TASK_FD(t));
146: #else
147: EV_SET(&chg[0], TASK_FD(t), EVFILT_READ, EV_DELETE, 0, 0, (void*) TASK_FD(t));
148: #endif
149: #else
150: FD_CLR(TASK_FD(t), &r->root_fds[0]);
151:
152: /* optimize select */
153: for (i = r->root_kq - 1; i > 2; i--)
154: if (FD_ISSET(i, &r->root_fds[0]) || FD_ISSET(i, &r->root_fds[1]))
155: break;
156: if (i > 2)
157: r->root_kq = i + 1;
158: #endif
159: break;
160: case taskWRITE:
161: #if SUP_ENABLE == KQ_ENABLE
162: #ifdef __NetBSD__
163: EV_SET(&chg[0], TASK_FD(t), EVFILT_WRITE, EV_DELETE, 0, 0, (intptr_t) TASK_FD(t));
164: #else
165: EV_SET(&chg[0], TASK_FD(t), EVFILT_WRITE, EV_DELETE, 0, 0, (void*) TASK_FD(t));
166: #endif
167: #else
168: FD_CLR(TASK_FD(t), &r->root_fds[1]);
169:
170: /* optimize select */
171: for (i = r->root_kq - 1; i > 2; i--)
172: if (FD_ISSET(i, &r->root_fds[0]) || FD_ISSET(i, &r->root_fds[1]))
173: break;
174: if (i > 2)
175: r->root_kq = i + 1;
176: #endif
177: break;
178: case taskALARM:
179: #if SUP_ENABLE == KQ_ENABLE
180: #ifdef __NetBSD__
181: EV_SET(&chg[0], (uintptr_t) TASK_DATA(t), EVFILT_TIMER, EV_DELETE,
182: 0, 0, (intptr_t) TASK_DATA(t));
183: #else
184: EV_SET(&chg[0], (uintptr_t) TASK_DATA(t), EVFILT_TIMER, EV_DELETE,
185: 0, 0, (void*) TASK_DATA(t));
186: #endif
187: #endif
188: break;
189: case taskNODE:
190: #if SUP_ENABLE == KQ_ENABLE
191: #ifdef __NetBSD__
192: EV_SET(&chg[0], TASK_FD(t), EVFILT_VNODE, EV_DELETE, 0, 0, (intptr_t) TASK_FD(t));
193: #else
194: EV_SET(&chg[0], TASK_FD(t), EVFILT_VNODE, EV_DELETE, 0, 0, (void*) TASK_FD(t));
195: #endif
196: #endif
197: break;
198: case taskPROC:
199: #if SUP_ENABLE == KQ_ENABLE
200: #ifdef __NetBSD__
201: EV_SET(&chg[0], TASK_VAL(t), EVFILT_PROC, EV_DELETE, 0, 0, (intptr_t) TASK_VAL(t));
202: #else
203: EV_SET(&chg[0], TASK_VAL(t), EVFILT_PROC, EV_DELETE, 0, 0, (void*) TASK_VAL(t));
204: #endif
205: #endif
206: break;
207: case taskSIGNAL:
208: #if SUP_ENABLE == KQ_ENABLE
209: #ifdef __NetBSD__
210: EV_SET(&chg[0], TASK_VAL(t), EVFILT_SIGNAL, EV_DELETE, 0, 0, (intptr_t) TASK_VAL(t));
211: #else
212: EV_SET(&chg[0], TASK_VAL(t), EVFILT_SIGNAL, EV_DELETE, 0, 0, (void*) TASK_VAL(t));
213: #endif
214: /* restore signal */
215: signal(TASK_VAL(t), SIG_DFL);
216: #endif
217: break;
218: #ifdef AIO_SUPPORT
219: case taskAIO:
220: #if SUP_ENABLE == KQ_ENABLE
221: #ifdef __NetBSD__
222: EV_SET(&chg[0], TASK_VAL(t), EVFILT_AIO, EV_DELETE, 0, 0, (intptr_t) TASK_VAL(t));
223: #else
224: EV_SET(&chg[0], TASK_VAL(t), EVFILT_AIO, EV_DELETE, 0, 0, (void*) TASK_VAL(t));
225: #endif
226: acb = (struct aiocb*) TASK_VAL(t);
227: if (acb) {
228: if (aio_cancel(acb->aio_fildes, acb) == AIO_CANCELED)
229: aio_return(acb);
230: free(acb);
231: TASK_VAL(t) = 0;
232: }
233: #endif
234: break;
235: #ifdef EVFILT_LIO
236: case taskLIO:
237: #if SUP_ENABLE == KQ_ENABLE
238: #ifdef __NetBSD__
239: EV_SET(&chg[0], TASK_VAL(t), EVFILT_LIO, EV_DELETE, 0, 0, (intptr_t) TASK_VAL(t));
240: #else
241: EV_SET(&chg[0], TASK_VAL(t), EVFILT_LIO, EV_DELETE, 0, 0, (void*) TASK_VAL(t));
242: #endif
243: acbs = (struct aiocb**) TASK_VAL(t);
244: if (acbs) {
245: for (i = 0; i < TASK_DATLEN(t); i++) {
246: if (aio_cancel(acbs[i]->aio_fildes, acbs[i]) == AIO_CANCELED)
247: aio_return(acbs[i]);
248: free(acbs[i]);
249: }
250: free(acbs);
251: TASK_VAL(t) = 0;
252: }
253: #endif
254: break;
255: #endif /* EVFILT_LIO */
256: #endif /* AIO_SUPPORT */
257: #ifdef EVFILT_USER
258: case taskUSER:
259: #if SUP_ENABLE == KQ_ENABLE
260: #ifdef __NetBSD__
261: EV_SET(&chg[0], TASK_VAL(t), EVFILT_USER, EV_DELETE, 0, 0, (intptr_t) TASK_VAL(t));
262: #else
263: EV_SET(&chg[0], TASK_VAL(t), EVFILT_USER, EV_DELETE, 0, 0, (void*) TASK_VAL(t));
264: #endif
265: #endif
266: break;
267: #endif /* EVFILT_USER */
268: case taskTHREAD:
269: #ifdef HAVE_LIBPTHREAD
270: if (TASK_VAL(t))
271: pthread_cancel((pthread_t) TASK_VAL(t));
272: #endif
273: return NULL;
274: #if defined(HAVE_TIMER_CREATE) && defined(HAVE_TIMER_SETTIME) && defined(HAVE_TIMER_DELETE)
275: case taskRTC:
276: timer_delete((timer_t) TASK_FLAG(t));
277: schedCancel((sched_task_t*) TASK_RET(t));
278: return NULL;
279: #endif /* HAVE_TIMER_CREATE */
280: default:
281: return NULL;
282: }
283:
284: #if SUP_ENABLE == KQ_ENABLE
285: kevent(TASK_ROOT(t)->root_kq, chg, 1, NULL, 0, &timeout);
286: #endif
287: return NULL;
288: }
289:
290: #ifdef HAVE_LIBPTHREAD
291: /*
292: * sched_hook_thread() - Default THREAD hook
293: *
294: * @task = current task
295: * @arg = pthread attributes
296: * return: <0 errors and 0 ok
297: */
298: void *
299: sched_hook_thread(void *task, void *arg)
300: {
301: sched_task_t *t = task;
302: pthread_t tid;
303: sigset_t s, o;
304:
305: if (!t || !TASK_ROOT(t))
306: return (void*) -1;
307:
308: sigfillset(&s);
309: pthread_sigmask(SIG_BLOCK, &s, &o);
310: if ((errno = pthread_create(&tid, (pthread_attr_t*) arg,
311: (void *(*)(void*)) _sched_threadWrapper, t))) {
312: LOGERR;
313: pthread_sigmask(SIG_SETMASK, &o, NULL);
314: return (void*) -1;
315: } else
316: TASK_VAL(t) = (u_long) tid;
317:
318: if (!TASK_ISLOCKED(t))
319: TASK_LOCK(t);
320:
321: pthread_sigmask(SIG_SETMASK, &o, NULL);
322: return NULL;
323: }
324: #endif
325:
326: /*
327: * sched_hook_read() - Default READ hook
328: *
329: * @task = current task
330: * @arg = unused
331: * return: <0 errors and 0 ok
332: */
333: void *
334: sched_hook_read(void *task, void *arg __unused)
335: {
336: sched_task_t *t = task;
337: #if SUP_ENABLE == KQ_ENABLE
338: struct kevent chg[1];
339: struct timespec timeout = { 0, 0 };
340: #else
341: sched_root_task_t *r = NULL;
342: #endif
343:
344: if (!t || !TASK_ROOT(t))
345: return (void*) -1;
346: #if SUP_ENABLE != KQ_ENABLE
347: r = TASK_ROOT(t);
348: #endif
349:
350: #if SUP_ENABLE == KQ_ENABLE
351: #ifdef __NetBSD__
352: EV_SET(&chg[0], TASK_FD(t), EVFILT_READ, EV_ADD | EV_CLEAR, 0, 0, (intptr_t) TASK_FD(t));
353: #else
354: EV_SET(&chg[0], TASK_FD(t), EVFILT_READ, EV_ADD | EV_CLEAR, 0, 0, (void*) TASK_FD(t));
355: #endif
356: if (kevent(TASK_ROOT(t)->root_kq, chg, 1, NULL, 0, &timeout) == -1) {
357: if (TASK_ROOT(t)->root_hooks.hook_exec.exception)
358: TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL);
359: else
360: LOGERR;
361: return (void*) -1;
362: }
363: #else
364: FD_SET(TASK_FD(t), &r->root_fds[0]);
365: if (TASK_FD(t) >= r->root_kq)
366: r->root_kq = TASK_FD(t) + 1;
367: #endif
368:
369: return NULL;
370: }
371:
372: /*
373: * sched_hook_write() - Default WRITE hook
374: *
375: * @task = current task
376: * @arg = unused
377: * return: <0 errors and 0 ok
378: */
379: void *
380: sched_hook_write(void *task, void *arg __unused)
381: {
382: sched_task_t *t = task;
383: #if SUP_ENABLE == KQ_ENABLE
384: struct kevent chg[1];
385: struct timespec timeout = { 0, 0 };
386: #else
387: sched_root_task_t *r = NULL;
388: #endif
389:
390: if (!t || !TASK_ROOT(t))
391: return (void*) -1;
392: #if SUP_ENABLE != KQ_ENABLE
393: r = TASK_ROOT(t);
394: #endif
395:
396: #if SUP_ENABLE == KQ_ENABLE
397: #ifdef __NetBSD__
398: EV_SET(&chg[0], TASK_FD(t), EVFILT_WRITE, EV_ADD | EV_CLEAR, 0, 0, (intptr_t) TASK_FD(t));
399: #else
400: EV_SET(&chg[0], TASK_FD(t), EVFILT_WRITE, EV_ADD | EV_CLEAR, 0, 0, (void*) TASK_FD(t));
401: #endif
402: if (kevent(TASK_ROOT(t)->root_kq, chg, 1, NULL, 0, &timeout) == -1) {
403: if (TASK_ROOT(t)->root_hooks.hook_exec.exception)
404: TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL);
405: else
406: LOGERR;
407: return (void*) -1;
408: }
409: #else
410: FD_SET(TASK_FD(t), &r->root_fds[1]);
411: if (TASK_FD(t) >= r->root_kq)
412: r->root_kq = TASK_FD(t) + 1;
413: #endif
414:
415: return NULL;
416: }
417:
418: /*
419: * sched_hook_alarm() - Default ALARM hook
420: *
421: * @task = current task
422: * @arg = unused
423: * return: <0 errors and 0 ok
424: */
425: void *
426: sched_hook_alarm(void *task, void *arg __unused)
427: {
428: #if SUP_ENABLE == KQ_ENABLE
429: sched_task_t *t = task;
430: struct kevent chg[1];
431: struct timespec timeout = { 0, 0 };
432:
433: if (!t || !TASK_ROOT(t))
434: return (void*) -1;
435:
436: #ifdef __NetBSD__
437: EV_SET(&chg[0], (uintptr_t) TASK_DATA(t), EVFILT_TIMER, EV_ADD | EV_CLEAR, 0,
438: t->task_val.ts.tv_sec * 1000 + t->task_val.ts.tv_nsec / 1000000,
439: (intptr_t) TASK_DATA(t));
440: #else
441: EV_SET(&chg[0], (uintptr_t) TASK_DATA(t), EVFILT_TIMER, EV_ADD | EV_CLEAR, 0,
442: t->task_val.ts.tv_sec * 1000 + t->task_val.ts.tv_nsec / 1000000,
443: (void*) TASK_DATA(t));
444: #endif
445: if (kevent(TASK_ROOT(t)->root_kq, chg, 1, NULL, 0, &timeout) == -1) {
446: if (TASK_ROOT(t)->root_hooks.hook_exec.exception)
447: TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL);
448: else
449: LOGERR;
450: return (void*) -1;
451: }
452:
453: #endif
454: return NULL;
455: }
456:
457: /*
458: * sched_hook_node() - Default NODE hook
459: *
460: * @task = current task
461: * @arg = unused
462: * return: <0 errors and 0 ok
463: */
464: void *
465: sched_hook_node(void *task, void *arg __unused)
466: {
467: #if SUP_ENABLE == KQ_ENABLE
468: sched_task_t *t = task;
469: struct kevent chg[1];
470: struct timespec timeout = { 0, 0 };
471:
472: if (!t || !TASK_ROOT(t))
473: return (void*) -1;
474:
475: #ifdef __NetBSD__
476: EV_SET(&chg[0], TASK_FD(t), EVFILT_VNODE, EV_ADD | EV_CLEAR,
477: NOTE_DELETE | NOTE_WRITE | NOTE_EXTEND | NOTE_ATTRIB |
478: NOTE_LINK | NOTE_RENAME | NOTE_REVOKE, 0, (intptr_t) TASK_FD(t));
479: #else
480: EV_SET(&chg[0], TASK_FD(t), EVFILT_VNODE, EV_ADD | EV_CLEAR,
481: NOTE_DELETE | NOTE_WRITE | NOTE_EXTEND | NOTE_ATTRIB |
482: NOTE_LINK | NOTE_RENAME | NOTE_REVOKE, 0, (void*) TASK_FD(t));
483: #endif
484: if (kevent(TASK_ROOT(t)->root_kq, chg, 1, NULL, 0, &timeout) == -1) {
485: if (TASK_ROOT(t)->root_hooks.hook_exec.exception)
486: TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL);
487: else
488: LOGERR;
489: return (void*) -1;
490: }
491:
492: #endif
493: return NULL;
494: }
495:
496: /*
497: * sched_hook_proc() - Default PROC hook
498: *
499: * @task = current task
500: * @arg = unused
501: * return: <0 errors and 0 ok
502: */
503: void *
504: sched_hook_proc(void *task, void *arg __unused)
505: {
506: #if SUP_ENABLE == KQ_ENABLE
507: sched_task_t *t = task;
508: struct kevent chg[1];
509: struct timespec timeout = { 0, 0 };
510:
511: if (!t || !TASK_ROOT(t))
512: return (void*) -1;
513:
514: #ifdef __NetBSD__
515: EV_SET(&chg[0], TASK_VAL(t), EVFILT_PROC, EV_ADD | EV_CLEAR,
516: NOTE_EXIT | NOTE_FORK | NOTE_EXEC | NOTE_TRACK, 0, (intptr_t) TASK_VAL(t));
517: #else
518: EV_SET(&chg[0], TASK_VAL(t), EVFILT_PROC, EV_ADD | EV_CLEAR,
519: NOTE_EXIT | NOTE_FORK | NOTE_EXEC | NOTE_TRACK, 0, (void*) TASK_VAL(t));
520: #endif
521: if (kevent(TASK_ROOT(t)->root_kq, chg, 1, NULL, 0, &timeout) == -1) {
522: if (TASK_ROOT(t)->root_hooks.hook_exec.exception)
523: TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL);
524: else
525: LOGERR;
526: return (void*) -1;
527: }
528:
529: #endif
530: return NULL;
531: }
532:
533: /*
534: * sched_hook_signal() - Default SIGNAL hook
535: *
536: * @task = current task
537: * @arg = unused
538: * return: <0 errors and 0 ok
539: */
540: void *
541: sched_hook_signal(void *task, void *arg __unused)
542: {
543: #if SUP_ENABLE == KQ_ENABLE
544: sched_task_t *t = task;
545: struct kevent chg[1];
546: struct timespec timeout = { 0, 0 };
547:
548: if (!t || !TASK_ROOT(t))
549: return (void*) -1;
550:
551: /* ignore signal */
552: signal(TASK_VAL(t), SIG_IGN);
553:
554: #ifdef __NetBSD__
555: EV_SET(&chg[0], TASK_VAL(t), EVFILT_SIGNAL, EV_ADD | EV_CLEAR, 0, 0, (intptr_t) TASK_VAL(t));
556: #else
557: EV_SET(&chg[0], TASK_VAL(t), EVFILT_SIGNAL, EV_ADD | EV_CLEAR, 0, 0, (void*) TASK_VAL(t));
558: #endif
559: if (kevent(TASK_ROOT(t)->root_kq, chg, 1, NULL, 0, &timeout) == -1) {
560: if (TASK_ROOT(t)->root_hooks.hook_exec.exception)
561: TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL);
562: else
563: LOGERR;
564: return (void*) -1;
565: }
566: #else
567: #if 0
568: sched_task_t *t = task;
569: struct sigaction sa;
570:
571: memset(&sa, 0, sizeof sa);
572: sigemptyset(&sa.sa_mask);
573: sa.sa_handler = _sched_sigHandler;
574: sa.sa_flags = SA_RESETHAND | SA_RESTART;
575:
576: if (sigaction(TASK_VAL(t), &sa, NULL) == -1) {
577: if (TASK_ROOT(t)->root_hooks.hook_exec.exception)
578: TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL);
579: else
580: LOGERR;
581: return (void*) -1;
582: }
583: #endif /* 0 */
584: #endif
585: return NULL;
586: }
587:
588: /*
589: * sched_hook_user() - Default USER hook
590: *
591: * @task = current task
592: * @arg = unused
593: * return: <0 errors and 0 ok
594: */
595: #ifdef EVFILT_USER
596: void *
597: sched_hook_user(void *task, void *arg __unused)
598: {
599: #if SUP_ENABLE == KQ_ENABLE
600: sched_task_t *t = task;
601: struct kevent chg[1];
602: struct timespec timeout = { 0, 0 };
603:
604: if (!t || !TASK_ROOT(t))
605: return (void*) -1;
606:
607: #ifdef __NetBSD__
608: EV_SET(&chg[0], TASK_VAL(t), EVFILT_USER, EV_ADD | EV_CLEAR, TASK_DATLEN(t),
609: 0, (intptr_t) TASK_VAL(t));
610: #else
611: EV_SET(&chg[0], TASK_VAL(t), EVFILT_USER, EV_ADD | EV_CLEAR, TASK_DATLEN(t),
612: 0, (void*) TASK_VAL(t));
613: #endif
614: if (kevent(TASK_ROOT(t)->root_kq, chg, 1, NULL, 0, &timeout) == -1) {
615: if (TASK_ROOT(t)->root_hooks.hook_exec.exception)
616: TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL);
617: else
618: LOGERR;
619: return (void*) -1;
620: }
621:
622: #endif
623: return NULL;
624: }
625: #endif
626:
627: /*
628: * sched_hook_fetch() - Default FETCH hook
629: *
630: * @root = root task
631: * @arg = unused
632: * return: NULL error or !=NULL fetched task
633: */
634: void *
635: sched_hook_fetch(void *root, void *arg __unused)
636: {
637: sched_root_task_t *r = root;
638: sched_task_t *task, *tmp;
639: struct timespec now, m, mtmp;
640: #if SUP_ENABLE == KQ_ENABLE
641: struct kevent evt[1], res[KQ_EVENTS];
642: struct timespec *timeout;
643: #else
644: struct timeval *timeout, tv;
645: fd_set rfd, wfd, xfd;
646: #endif
647: register int i, flg;
648: int en;
649: #ifdef AIO_SUPPORT
650: int len, fd;
651: struct aiocb *acb;
652: #ifdef EVFILT_LIO
653: int l;
654: register int j;
655: off_t off;
656: struct aiocb **acbs;
657: struct iovec *iv;
658: #endif /* EVFILT_LIO */
659: #endif /* AIO_SUPPORT */
660:
661: if (!r)
662: return NULL;
663:
664: /* get new task by queue priority */
665: while ((task = TAILQ_FIRST(&r->root_event))) {
666: #ifdef HAVE_LIBPTHREAD
667: pthread_mutex_lock(&r->root_mtx[taskEVENT]);
668: #endif
669: TAILQ_REMOVE(&r->root_event, task, task_node);
670: #ifdef HAVE_LIBPTHREAD
671: pthread_mutex_unlock(&r->root_mtx[taskEVENT]);
672: #endif
673: task->task_type = taskUNUSE;
674: #ifdef HAVE_LIBPTHREAD
675: pthread_mutex_lock(&r->root_mtx[taskUNUSE]);
676: #endif
677: TAILQ_INSERT_TAIL(&r->root_unuse, task, task_node);
678: #ifdef HAVE_LIBPTHREAD
679: pthread_mutex_unlock(&r->root_mtx[taskUNUSE]);
680: #endif
681: return task;
682: }
683: while ((task = TAILQ_FIRST(&r->root_ready))) {
684: #ifdef HAVE_LIBPTHREAD
685: pthread_mutex_lock(&r->root_mtx[taskREADY]);
686: #endif
687: TAILQ_REMOVE(&r->root_ready, task, task_node);
688: #ifdef HAVE_LIBPTHREAD
689: pthread_mutex_unlock(&r->root_mtx[taskREADY]);
690: #endif
691: task->task_type = taskUNUSE;
692: #ifdef HAVE_LIBPTHREAD
693: pthread_mutex_lock(&r->root_mtx[taskUNUSE]);
694: #endif
695: TAILQ_INSERT_TAIL(&r->root_unuse, task, task_node);
696: #ifdef HAVE_LIBPTHREAD
697: pthread_mutex_unlock(&r->root_mtx[taskUNUSE]);
698: #endif
699: return task;
700: }
701:
702: #ifdef TIMER_WITHOUT_SORT
703: clock_gettime(CLOCK_MONOTONIC, &now);
704:
705: sched_timespecclear(&r->root_wait);
706: TAILQ_FOREACH(task, &r->root_timer, task_node) {
707: if (!sched_timespecisset(&r->root_wait))
708: r->root_wait = TASK_TS(task);
709: else if (sched_timespeccmp(&TASK_TS(task), &r->root_wait, -) < 0)
710: r->root_wait = TASK_TS(task);
711: }
712:
713: if (TAILQ_FIRST(&r->root_timer)) {
714: m = r->root_wait;
715: sched_timespecsub(&m, &now, &mtmp);
716: r->root_wait = mtmp;
717: } else {
718: /* set wait INFTIM */
719: sched_timespecinf(&r->root_wait);
720: }
721: #else /* ! TIMER_WITHOUT_SORT */
722: if (!TAILQ_FIRST(&r->root_task) && (task = TAILQ_FIRST(&r->root_timer))) {
723: clock_gettime(CLOCK_MONOTONIC, &now);
724:
725: m = TASK_TS(task);
726: sched_timespecsub(&m, &now, &mtmp);
727: r->root_wait = mtmp;
728: } else {
729: /* set wait INFTIM */
730: sched_timespecinf(&r->root_wait);
731: }
732: #endif /* TIMER_WITHOUT_SORT */
733: /* if present member of task, set NOWAIT */
734: if (TAILQ_FIRST(&r->root_task))
735: sched_timespecclear(&r->root_wait);
736:
737: if (r->root_wait.tv_sec != -1 && r->root_wait.tv_nsec != -1) {
738: #if SUP_ENABLE == KQ_ENABLE
739: timeout = &r->root_wait;
740: #else
741: sched_timespec2val(&r->root_wait, &tv);
742: timeout = &tv;
743: #endif /* KQ_DISABLE */
744: } else if (sched_timespecisinf(&r->root_poll))
745: timeout = NULL;
746: else {
747: #if SUP_ENABLE == KQ_ENABLE
748: timeout = &r->root_poll;
749: #else
750: sched_timespec2val(&r->root_poll, &tv);
751: timeout = &tv;
752: #endif /* KQ_DISABLE */
753: }
754:
755: #if SUP_ENABLE == KQ_ENABLE
756: if ((en = kevent(r->root_kq, NULL, 0, res, KQ_EVENTS, timeout)) == -1) {
757: #else
758: rfd = xfd = r->root_fds[0];
759: wfd = r->root_fds[1];
760: if ((en = select(r->root_kq, &rfd, &wfd, &xfd, timeout)) == -1) {
761: #endif /* KQ_DISABLE */
762: if (r->root_hooks.hook_exec.exception) {
763: if (r->root_hooks.hook_exec.exception(r, NULL))
764: return NULL;
765: } else if (errno != EINTR)
766: LOGERR;
767: goto skip_event;
768: }
769:
770: /* kevent dispatcher */
771: now.tv_sec = now.tv_nsec = 0;
772: /* Go and catch the cat into pipes ... */
773: #if SUP_ENABLE == KQ_ENABLE
774: for (i = 0; i < en; i++) {
775: memcpy(evt, &res[i], sizeof evt);
776: evt->flags = EV_DELETE;
777: /* Put read/write task to ready queue */
778: switch (res[i].filter) {
779: case EVFILT_READ:
780: flg = 0;
781: TAILQ_FOREACH_SAFE(task, &r->root_read, task_node, tmp) {
782: if (TASK_FD(task) != ((intptr_t) res[i].udata))
783: continue;
784: else {
785: flg++;
786: TASK_RET(task) = res[i].data;
787: TASK_FLAG(task) = (u_long) res[i].fflags;
788: }
789: /* remove read handle */
790: #ifdef HAVE_LIBPTHREAD
791: pthread_mutex_lock(&r->root_mtx[taskREAD]);
792: #endif
793: TAILQ_REMOVE(&r->root_read, task, task_node);
794: #ifdef HAVE_LIBPTHREAD
795: pthread_mutex_unlock(&r->root_mtx[taskREAD]);
796: #endif
797: if (r->root_hooks.hook_exec.exception && res[i].flags & EV_EOF) {
798: if (r->root_hooks.hook_exec.exception(r, (void*) EV_EOF)) {
799: task->task_type = taskUNUSE;
800: #ifdef HAVE_LIBPTHREAD
801: pthread_mutex_lock(&r->root_mtx[taskUNUSE]);
802: #endif
803: TAILQ_INSERT_TAIL(&r->root_unuse, task, task_node);
804: #ifdef HAVE_LIBPTHREAD
805: pthread_mutex_unlock(&r->root_mtx[taskUNUSE]);
806: #endif
807: } else {
808: task->task_type = taskREADY;
809: #ifdef HAVE_LIBPTHREAD
810: pthread_mutex_lock(&r->root_mtx[taskREADY]);
811: #endif
812: TAILQ_INSERT_TAIL(&r->root_ready, task, task_node);
813: #ifdef HAVE_LIBPTHREAD
814: pthread_mutex_unlock(&r->root_mtx[taskREADY]);
815: #endif
816: }
817: } else {
818: task->task_type = taskREADY;
819: #ifdef HAVE_LIBPTHREAD
820: pthread_mutex_lock(&r->root_mtx[taskREADY]);
821: #endif
822: TAILQ_INSERT_TAIL(&r->root_ready, task, task_node);
823: #ifdef HAVE_LIBPTHREAD
824: pthread_mutex_unlock(&r->root_mtx[taskREADY]);
825: #endif
826: }
827: }
828: /* if match at least 2, don't remove resouce of event */
829: if (flg > 1)
830: evt->flags ^= evt->flags;
831: break;
832: case EVFILT_WRITE:
833: flg = 0;
834: TAILQ_FOREACH_SAFE(task, &r->root_write, task_node, tmp) {
835: if (TASK_FD(task) != ((intptr_t) res[i].udata))
836: continue;
837: else {
838: flg++;
839: TASK_RET(task) = res[i].data;
840: TASK_FLAG(task) = (u_long) res[i].fflags;
841: }
842: /* remove write handle */
843: #ifdef HAVE_LIBPTHREAD
844: pthread_mutex_lock(&r->root_mtx[taskWRITE]);
845: #endif
846: TAILQ_REMOVE(&r->root_write, task, task_node);
847: #ifdef HAVE_LIBPTHREAD
848: pthread_mutex_unlock(&r->root_mtx[taskWRITE]);
849: #endif
850: if (r->root_hooks.hook_exec.exception && res[i].flags & EV_EOF) {
851: if (r->root_hooks.hook_exec.exception(r, (void*) EV_EOF)) {
852: task->task_type = taskUNUSE;
853: #ifdef HAVE_LIBPTHREAD
854: pthread_mutex_lock(&r->root_mtx[taskUNUSE]);
855: #endif
856: TAILQ_INSERT_TAIL(&r->root_unuse, task, task_node);
857: #ifdef HAVE_LIBPTHREAD
858: pthread_mutex_unlock(&r->root_mtx[taskUNUSE]);
859: #endif
860: } else {
861: task->task_type = taskREADY;
862: #ifdef HAVE_LIBPTHREAD
863: pthread_mutex_lock(&r->root_mtx[taskREADY]);
864: #endif
865: TAILQ_INSERT_TAIL(&r->root_ready, task, task_node);
866: #ifdef HAVE_LIBPTHREAD
867: pthread_mutex_unlock(&r->root_mtx[taskREADY]);
868: #endif
869: }
870: } else {
871: task->task_type = taskREADY;
872: #ifdef HAVE_LIBPTHREAD
873: pthread_mutex_lock(&r->root_mtx[taskREADY]);
874: #endif
875: TAILQ_INSERT_TAIL(&r->root_ready, task, task_node);
876: #ifdef HAVE_LIBPTHREAD
877: pthread_mutex_unlock(&r->root_mtx[taskREADY]);
878: #endif
879: }
880: }
881: /* if match at least 2, don't remove resouce of event */
882: if (flg > 1)
883: evt->flags ^= evt->flags;
884: break;
885: case EVFILT_TIMER:
886: flg = 0;
887: TAILQ_FOREACH_SAFE(task, &r->root_alarm, task_node, tmp) {
888: if ((uintptr_t) TASK_DATA(task) != ((uintptr_t) res[i].udata))
889: continue;
890: else {
891: flg++;
892: TASK_RET(task) = res[i].data;
893: TASK_FLAG(task) = (u_long) res[i].fflags;
894: }
895: /* remove alarm handle */
896: #ifdef HAVE_LIBPTHREAD
897: pthread_mutex_lock(&r->root_mtx[taskALARM]);
898: #endif
899: TAILQ_REMOVE(&r->root_alarm, task, task_node);
900: #ifdef HAVE_LIBPTHREAD
901: pthread_mutex_unlock(&r->root_mtx[taskALARM]);
902: #endif
903: task->task_type = taskREADY;
904: #ifdef HAVE_LIBPTHREAD
905: pthread_mutex_lock(&r->root_mtx[taskREADY]);
906: #endif
907: TAILQ_INSERT_TAIL(&r->root_ready, task, task_node);
908: #ifdef HAVE_LIBPTHREAD
909: pthread_mutex_unlock(&r->root_mtx[taskREADY]);
910: #endif
911: }
912: /* if match at least 2, don't remove resouce of event */
913: if (flg > 1)
914: evt->flags ^= evt->flags;
915: break;
916: case EVFILT_VNODE:
917: flg = 0;
918: TAILQ_FOREACH_SAFE(task, &r->root_node, task_node, tmp) {
919: if (TASK_FD(task) != ((intptr_t) res[i].udata))
920: continue;
921: else {
922: flg++;
923: TASK_RET(task) = res[i].data;
924: TASK_FLAG(task) = (u_long) res[i].fflags;
925: }
926: /* remove node handle */
927: #ifdef HAVE_LIBPTHREAD
928: pthread_mutex_lock(&r->root_mtx[taskNODE]);
929: #endif
930: TAILQ_REMOVE(&r->root_node, task, task_node);
931: #ifdef HAVE_LIBPTHREAD
932: pthread_mutex_unlock(&r->root_mtx[taskNODE]);
933: #endif
934: task->task_type = taskREADY;
935: #ifdef HAVE_LIBPTHREAD
936: pthread_mutex_lock(&r->root_mtx[taskREADY]);
937: #endif
938: TAILQ_INSERT_TAIL(&r->root_ready, task, task_node);
939: #ifdef HAVE_LIBPTHREAD
940: pthread_mutex_unlock(&r->root_mtx[taskREADY]);
941: #endif
942: }
943: /* if match at least 2, don't remove resouce of event */
944: if (flg > 1)
945: evt->flags ^= evt->flags;
946: break;
947: case EVFILT_PROC:
948: flg = 0;
949: TAILQ_FOREACH_SAFE(task, &r->root_proc, task_node, tmp) {
950: if (TASK_VAL(task) != ((uintptr_t) res[i].udata))
951: continue;
952: else {
953: flg++;
954: TASK_RET(task) = res[i].data;
955: TASK_FLAG(task) = (u_long) res[i].fflags;
956: }
957: /* remove proc handle */
958: #ifdef HAVE_LIBPTHREAD
959: pthread_mutex_lock(&r->root_mtx[taskPROC]);
960: #endif
961: TAILQ_REMOVE(&r->root_proc, task, task_node);
962: #ifdef HAVE_LIBPTHREAD
963: pthread_mutex_unlock(&r->root_mtx[taskPROC]);
964: #endif
965: task->task_type = taskREADY;
966: #ifdef HAVE_LIBPTHREAD
967: pthread_mutex_lock(&r->root_mtx[taskREADY]);
968: #endif
969: TAILQ_INSERT_TAIL(&r->root_ready, task, task_node);
970: #ifdef HAVE_LIBPTHREAD
971: pthread_mutex_unlock(&r->root_mtx[taskREADY]);
972: #endif
973: }
974: /* if match at least 2, don't remove resouce of event */
975: if (flg > 1)
976: evt->flags ^= evt->flags;
977: break;
978: case EVFILT_SIGNAL:
979: flg = 0;
980: TAILQ_FOREACH_SAFE(task, &r->root_signal, task_node, tmp) {
981: if (TASK_VAL(task) != ((uintptr_t) res[i].udata))
982: continue;
983: else {
984: flg++;
985: TASK_RET(task) = res[i].data;
986: TASK_FLAG(task) = (u_long) res[i].fflags;
987: }
988: /* remove signal handle */
989: #ifdef HAVE_LIBPTHREAD
990: pthread_mutex_lock(&r->root_mtx[taskSIGNAL]);
991: #endif
992: TAILQ_REMOVE(&r->root_signal, task, task_node);
993: #ifdef HAVE_LIBPTHREAD
994: pthread_mutex_unlock(&r->root_mtx[taskSIGNAL]);
995: #endif
996: task->task_type = taskREADY;
997: #ifdef HAVE_LIBPTHREAD
998: pthread_mutex_lock(&r->root_mtx[taskREADY]);
999: #endif
1000: TAILQ_INSERT_TAIL(&r->root_ready, task, task_node);
1001: #ifdef HAVE_LIBPTHREAD
1002: pthread_mutex_unlock(&r->root_mtx[taskREADY]);
1003: #endif
1004: }
1005: /* if match at least 2, don't remove resouce of event */
1006: if (flg > 1)
1007: evt->flags ^= evt->flags;
1008: break;
1009: #ifdef AIO_SUPPORT
1010: case EVFILT_AIO:
1011: flg = 0;
1012: TAILQ_FOREACH_SAFE(task, &r->root_aio, task_node, tmp) {
1013: acb = (struct aiocb*) TASK_VAL(task);
1014: if (acb != ((struct aiocb*) res[i].udata))
1015: continue;
1016: else {
1017: flg++;
1018: TASK_RET(task) = res[i].data;
1019: TASK_FLAG(task) = (u_long) res[i].fflags;
1020: }
1021: /* remove user handle */
1022: #ifdef HAVE_LIBPTHREAD
1023: pthread_mutex_lock(&r->root_mtx[taskAIO]);
1024: #endif
1025: TAILQ_REMOVE(&r->root_aio, task, task_node);
1026: #ifdef HAVE_LIBPTHREAD
1027: pthread_mutex_unlock(&r->root_mtx[taskAIO]);
1028: #endif
1029: task->task_type = taskREADY;
1030: #ifdef HAVE_LIBPTHREAD
1031: pthread_mutex_lock(&r->root_mtx[taskREADY]);
1032: #endif
1033: TAILQ_INSERT_TAIL(&r->root_ready, task, task_node);
1034: #ifdef HAVE_LIBPTHREAD
1035: pthread_mutex_unlock(&r->root_mtx[taskREADY]);
1036: #endif
1037: fd = acb->aio_fildes;
1038: if ((len = aio_return(acb)) != -1) {
1039: if (lseek(fd, acb->aio_offset + len, SEEK_CUR) == -1)
1040: LOGERR;
1041: } else
1042: LOGERR;
1043: free(acb);
1044: TASK_DATLEN(task) = (u_long) len;
1045: TASK_FD(task) = fd;
1046: }
1047: /* if match at least 2, don't remove resouce of event */
1048: if (flg > 1)
1049: evt->flags ^= evt->flags;
1050: break;
1051: #ifdef EVFILT_LIO
1052: case EVFILT_LIO:
1053: flg = 0;
1054: TAILQ_FOREACH_SAFE(task, &r->root_lio, task_node, tmp) {
1055: acbs = (struct aiocb**) TASK_VAL(task);
1056: if (acbs != ((struct aiocb**) res[i].udata))
1057: continue;
1058: else {
1059: flg++;
1060: TASK_RET(task) = res[i].data;
1061: TASK_FLAG(task) = (u_long) res[i].fflags;
1062: }
1063: /* remove user handle */
1064: #ifdef HAVE_LIBPTHREAD
1065: pthread_mutex_lock(&r->root_mtx[taskLIO]);
1066: #endif
1067: TAILQ_REMOVE(&r->root_lio, task, task_node);
1068: #ifdef HAVE_LIBPTHREAD
1069: pthread_mutex_unlock(&r->root_mtx[taskLIO]);
1070: #endif
1071: task->task_type = taskREADY;
1072: #ifdef HAVE_LIBPTHREAD
1073: pthread_mutex_lock(&r->root_mtx[taskREADY]);
1074: #endif
1075: TAILQ_INSERT_TAIL(&r->root_ready, task, task_node);
1076: #ifdef HAVE_LIBPTHREAD
1077: pthread_mutex_unlock(&r->root_mtx[taskREADY]);
1078: #endif
1079: iv = (struct iovec*) TASK_DATA(task);
1080: fd = acbs[0]->aio_fildes;
1081: off = acbs[0]->aio_offset;
1082: for (j = len = 0; i < TASK_DATLEN(task); len += l, i++) {
1083: if ((iv[i].iov_len = aio_return(acbs[i])) == -1)
1084: l = 0;
1085: else
1086: l = iv[i].iov_len;
1087: free(acbs[i]);
1088: }
1089: free(acbs);
1090: TASK_DATLEN(task) = (u_long) len;
1091: TASK_FD(task) = fd;
1092:
1093: if (lseek(fd, off + len, SEEK_CUR) == -1)
1094: LOGERR;
1095: }
1096: /* if match at least 2, don't remove resouce of event */
1097: if (flg > 1)
1098: evt->flags ^= evt->flags;
1099: break;
1100: #endif /* EVFILT_LIO */
1101: #endif /* AIO_SUPPORT */
1102: #ifdef EVFILT_USER
1103: case EVFILT_USER:
1104: flg = 0;
1105: TAILQ_FOREACH_SAFE(task, &r->root_user, task_node, tmp) {
1106: if (TASK_VAL(task) != ((uintptr_t) res[i].udata))
1107: continue;
1108: else {
1109: flg++;
1110: TASK_RET(task) = res[i].data;
1111: TASK_FLAG(task) = (u_long) res[i].fflags;
1112: }
1113: /* remove user handle */
1114: #ifdef HAVE_LIBPTHREAD
1115: pthread_mutex_lock(&r->root_mtx[taskUSER]);
1116: #endif
1117: TAILQ_REMOVE(&r->root_user, task, task_node);
1118: #ifdef HAVE_LIBPTHREAD
1119: pthread_mutex_unlock(&r->root_mtx[taskUSER]);
1120: #endif
1121: task->task_type = taskREADY;
1122: #ifdef HAVE_LIBPTHREAD
1123: pthread_mutex_lock(&r->root_mtx[taskREADY]);
1124: #endif
1125: TAILQ_INSERT_TAIL(&r->root_ready, task, task_node);
1126: #ifdef HAVE_LIBPTHREAD
1127: pthread_mutex_unlock(&r->root_mtx[taskREADY]);
1128: #endif
1129: }
1130: /* if match at least 2, don't remove resouce of event */
1131: if (flg > 1)
1132: evt->flags ^= evt->flags;
1133: break;
1134: #endif /* EVFILT_USER */
1135: }
1136: if (kevent(r->root_kq, evt, 1, NULL, 0, &now) == -1) {
1137: if (r->root_hooks.hook_exec.exception) {
1138: if (r->root_hooks.hook_exec.exception(r, NULL))
1139: return NULL;
1140: } else
1141: LOGERR;
1142: }
1143: }
1144: #else /* end of kevent dispatcher */
1145: for (i = 0; i < r->root_kq; i++) {
1146: if (FD_ISSET(i, &rfd) || FD_ISSET(i, &xfd)) {
1147: flg = 0;
1148: TAILQ_FOREACH_SAFE(task, &r->root_read, task_node, tmp) {
1149: if (TASK_FD(task) != i)
1150: continue;
1151: else {
1152: flg++;
1153: TASK_FLAG(task) = ioctl(TASK_FD(task),
1154: FIONREAD, &TASK_RET(task));
1155: }
1156: /* remove read handle */
1157: #ifdef HAVE_LIBPTHREAD
1158: pthread_mutex_lock(&r->root_mtx[taskREAD]);
1159: #endif
1160: TAILQ_REMOVE(&r->root_read, task, task_node);
1161: #ifdef HAVE_LIBPTHREAD
1162: pthread_mutex_unlock(&r->root_mtx[taskREAD]);
1163: #endif
1164: if (r->root_hooks.hook_exec.exception) {
1165: if (r->root_hooks.hook_exec.exception(r, NULL)) {
1166: task->task_type = taskUNUSE;
1167: #ifdef HAVE_LIBPTHREAD
1168: pthread_mutex_lock(&r->root_mtx[taskUNUSE]);
1169: #endif
1170: TAILQ_INSERT_TAIL(&r->root_unuse, task, task_node);
1171: #ifdef HAVE_LIBPTHREAD
1172: pthread_mutex_unlock(&r->root_mtx[taskUNUSE]);
1173: #endif
1174: } else {
1175: task->task_type = taskREADY;
1176: #ifdef HAVE_LIBPTHREAD
1177: pthread_mutex_lock(&r->root_mtx[taskREADY]);
1178: #endif
1179: TAILQ_INSERT_TAIL(&r->root_ready, task, task_node);
1180: #ifdef HAVE_LIBPTHREAD
1181: pthread_mutex_unlock(&r->root_mtx[taskREADY]);
1182: #endif
1183: }
1184: } else {
1185: task->task_type = taskREADY;
1186: #ifdef HAVE_LIBPTHREAD
1187: pthread_mutex_lock(&r->root_mtx[taskREADY]);
1188: #endif
1189: TAILQ_INSERT_TAIL(&r->root_ready, task, task_node);
1190: #ifdef HAVE_LIBPTHREAD
1191: pthread_mutex_unlock(&r->root_mtx[taskREADY]);
1192: #endif
1193: }
1194: }
1195: /* if match equal to 1, remove resouce */
1196: if (flg == 1)
1197: FD_CLR(i, &r->root_fds[0]);
1198: }
1199:
1200: if (FD_ISSET(i, &wfd)) {
1201: flg = 0;
1202: TAILQ_FOREACH_SAFE(task, &r->root_write, task_node, tmp) {
1203: if (TASK_FD(task) != i)
1204: continue;
1205: else {
1206: flg++;
1207: TASK_FLAG(task) = ioctl(TASK_FD(task),
1208: FIONWRITE, &TASK_RET(task));
1209: }
1210: /* remove write handle */
1211: #ifdef HAVE_LIBPTHREAD
1212: pthread_mutex_lock(&r->root_mtx[taskWRITE]);
1213: #endif
1214: TAILQ_REMOVE(&r->root_write, task, task_node);
1215: #ifdef HAVE_LIBPTHREAD
1216: pthread_mutex_unlock(&r->root_mtx[taskWRITE]);
1217: #endif
1218: if (r->root_hooks.hook_exec.exception) {
1219: if (r->root_hooks.hook_exec.exception(r, NULL)) {
1220: task->task_type = taskUNUSE;
1221: #ifdef HAVE_LIBPTHREAD
1222: pthread_mutex_lock(&r->root_mtx[taskUNUSE]);
1223: #endif
1224: TAILQ_INSERT_TAIL(&r->root_unuse, task, task_node);
1225: #ifdef HAVE_LIBPTHREAD
1226: pthread_mutex_unlock(&r->root_mtx[taskUNUSE]);
1227: #endif
1228: } else {
1229: task->task_type = taskREADY;
1230: #ifdef HAVE_LIBPTHREAD
1231: pthread_mutex_lock(&r->root_mtx[taskREADY]);
1232: #endif
1233: TAILQ_INSERT_TAIL(&r->root_ready, task, task_node);
1234: #ifdef HAVE_LIBPTHREAD
1235: pthread_mutex_unlock(&r->root_mtx[taskREADY]);
1236: #endif
1237: }
1238: } else {
1239: task->task_type = taskREADY;
1240: #ifdef HAVE_LIBPTHREAD
1241: pthread_mutex_lock(&r->root_mtx[taskREADY]);
1242: #endif
1243: TAILQ_INSERT_TAIL(&r->root_ready, task, task_node);
1244: #ifdef HAVE_LIBPTHREAD
1245: pthread_mutex_unlock(&r->root_mtx[taskREADY]);
1246: #endif
1247: }
1248: }
1249: /* if match equal to 1, remove resouce */
1250: if (flg == 1)
1251: FD_CLR(i, &r->root_fds[1]);
1252: }
1253: }
1254:
1255: /* optimize select */
1256: for (i = r->root_kq - 1; i > 2; i--)
1257: if (FD_ISSET(i, &r->root_fds[0]) || FD_ISSET(i, &r->root_fds[1]))
1258: break;
1259: if (i > 2)
1260: r->root_kq = i + 1;
1261: #endif /* KQ_DISABLE */
1262:
1263: skip_event:
1264: /* timer update & put in ready queue */
1265: clock_gettime(CLOCK_MONOTONIC, &now);
1266:
1267: TAILQ_FOREACH_SAFE(task, &r->root_timer, task_node, tmp)
1268: if (sched_timespeccmp(&now, &TASK_TS(task), -) >= 0) {
1269: #ifdef HAVE_LIBPTHREAD
1270: pthread_mutex_lock(&r->root_mtx[taskTIMER]);
1271: #endif
1272: TAILQ_REMOVE(&r->root_timer, task, task_node);
1273: #ifdef HAVE_LIBPTHREAD
1274: pthread_mutex_unlock(&r->root_mtx[taskTIMER]);
1275: #endif
1276: task->task_type = taskREADY;
1277: #ifdef HAVE_LIBPTHREAD
1278: pthread_mutex_lock(&r->root_mtx[taskREADY]);
1279: #endif
1280: TAILQ_INSERT_TAIL(&r->root_ready, task, task_node);
1281: #ifdef HAVE_LIBPTHREAD
1282: pthread_mutex_unlock(&r->root_mtx[taskREADY]);
1283: #endif
1284: }
1285:
1286: /* put regular task priority task to ready queue,
1287: if there is no ready task or reach max missing hit for regular task */
1288: if ((task = TAILQ_FIRST(&r->root_task))) {
1289: if (!TAILQ_FIRST(&r->root_ready) || r->root_miss >= TASK_VAL(task)) {
1290: r->root_miss ^= r->root_miss;
1291:
1292: #ifdef HAVE_LIBPTHREAD
1293: pthread_mutex_lock(&r->root_mtx[taskTASK]);
1294: #endif
1295: TAILQ_REMOVE(&r->root_task, task, task_node);
1296: #ifdef HAVE_LIBPTHREAD
1297: pthread_mutex_unlock(&r->root_mtx[taskTASK]);
1298: #endif
1299: task->task_type = taskREADY;
1300: #ifdef HAVE_LIBPTHREAD
1301: pthread_mutex_lock(&r->root_mtx[taskREADY]);
1302: #endif
1303: TAILQ_INSERT_TAIL(&r->root_ready, task, task_node);
1304: #ifdef HAVE_LIBPTHREAD
1305: pthread_mutex_unlock(&r->root_mtx[taskREADY]);
1306: #endif
1307: } else
1308: r->root_miss++;
1309: } else
1310: r->root_miss ^= r->root_miss;
1311:
1312: /* OK, lets get ready task !!! */
1313: task = TAILQ_FIRST(&r->root_ready);
1314: if (!(task))
1315: return NULL;
1316:
1317: #ifdef HAVE_LIBPTHREAD
1318: pthread_mutex_lock(&r->root_mtx[taskREADY]);
1319: #endif
1320: TAILQ_REMOVE(&r->root_ready, task, task_node);
1321: #ifdef HAVE_LIBPTHREAD
1322: pthread_mutex_unlock(&r->root_mtx[taskREADY]);
1323: #endif
1324: task->task_type = taskUNUSE;
1325: #ifdef HAVE_LIBPTHREAD
1326: pthread_mutex_lock(&r->root_mtx[taskUNUSE]);
1327: #endif
1328: TAILQ_INSERT_TAIL(&r->root_unuse, task, task_node);
1329: #ifdef HAVE_LIBPTHREAD
1330: pthread_mutex_unlock(&r->root_mtx[taskUNUSE]);
1331: #endif
1332: return task;
1333: }
1334:
1335: /*
1336: * sched_hook_exception() - Default EXCEPTION hook
1337: *
1338: * @root = root task
1339: * @arg = custom handling: if arg == EV_EOF or other value; default: arg == NULL log errno
1340: * return: <0 errors and 0 ok
1341: */
1342: void *
1343: sched_hook_exception(void *root, void *arg)
1344: {
1345: sched_root_task_t *r = root;
1346:
1347: if (!r)
1348: return NULL;
1349:
1350: /* custom exception handling ... */
1351: if (arg) {
1352: if (arg == (void*) EV_EOF)
1353: return NULL;
1354: return (void*) -1; /* raise scheduler error!!! */
1355: }
1356:
1357: /* if error hook exists */
1358: if (r->root_hooks.hook_root.error)
1359: return (r->root_hooks.hook_root.error(root, (void*) ((intptr_t) errno)));
1360:
1361: /* default case! */
1362: LOGERR;
1363: return NULL;
1364: }
1365:
1366: /*
1367: * sched_hook_condition() - Default CONDITION hook
1368: *
1369: * @root = root task
1370: * @arg = killState from schedRun()
1371: * return: NULL kill scheduler loop or !=NULL ok
1372: */
1373: void *
1374: sched_hook_condition(void *root, void *arg)
1375: {
1376: sched_root_task_t *r = root;
1377:
1378: if (!r)
1379: return NULL;
1380:
1381: return (void*) (r->root_cond - *(intptr_t*) arg);
1382: }
1383:
1384: /*
1385: * sched_hook_rtc() - Default RTC hook
1386: *
1387: * @task = current task
1388: * @arg = unused
1389: * return: <0 errors and 0 ok
1390: */
1391: #if defined(HAVE_TIMER_CREATE) && defined(HAVE_TIMER_SETTIME) && defined(HAVE_TIMER_DELETE)
1392: void *
1393: sched_hook_rtc(void *task, void *arg __unused)
1394: {
1395: sched_task_t *sigt = NULL, *t = task;
1396: struct itimerspec its;
1397: struct sigevent evt;
1398: timer_t tmr;
1399:
1400: if (!t || !TASK_ROOT(t))
1401: return (void*) -1;
1402:
1403: memset(&evt, 0, sizeof evt);
1404: evt.sigev_notify = SIGEV_SIGNAL;
1405: evt.sigev_signo = (intptr_t) TASK_DATA(t) + SIGRTMIN;
1406: evt.sigev_value.sival_ptr = TASK_DATA(t);
1407:
1408: if (timer_create(CLOCK_MONOTONIC, &evt, &tmr) == -1) {
1409: if (TASK_ROOT(t)->root_hooks.hook_exec.exception)
1410: TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL);
1411: else
1412: LOGERR;
1413: return (void*) -1;
1414: } else
1415: TASK_FLAG(t) = (u_long) tmr;
1416:
1417: if (!(sigt = schedSignal(TASK_ROOT(t), _sched_rtcWrapper, TASK_ARG(t), evt.sigev_signo,
1418: t, (size_t) tmr))) {
1419: if (TASK_ROOT(t)->root_hooks.hook_exec.exception)
1420: TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL);
1421: else
1422: LOGERR;
1423: timer_delete(tmr);
1424: return (void*) -1;
1425: } else
1426: TASK_RET(t) = (uintptr_t) sigt;
1427:
1428: memset(&its, 0, sizeof its);
1429: its.it_value.tv_sec = t->task_val.ts.tv_sec;
1430: its.it_value.tv_nsec = t->task_val.ts.tv_nsec;
1431:
1432: if (timer_settime(tmr, TIMER_RELTIME, &its, NULL) == -1) {
1433: if (TASK_ROOT(t)->root_hooks.hook_exec.exception)
1434: TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL);
1435: else
1436: LOGERR;
1437: schedCancel(sigt);
1438: timer_delete(tmr);
1439: return (void*) -1;
1440: }
1441:
1442: return NULL;
1443: }
1444: #endif /* HAVE_TIMER_CREATE */
FreeBSD-CVSweb <freebsd-cvsweb@FreeBSD.org>