1: /*************************************************************************
2: * (C) 2011 AITNET ltd - Sofia/Bulgaria - <misho@aitbg.com>
3: * by Michael Pounov <misho@openbsd-bg.org>
4: *
5: * $Author: misho $
6: * $Id: hooks.c,v 1.21 2013/08/26 08:20:55 misho Exp $
7: *
8: **************************************************************************
9: The ELWIX and AITNET software is distributed under the following
10: terms:
11:
12: All of the documentation and software included in the ELWIX and AITNET
13: Releases is copyrighted by ELWIX - Sofia/Bulgaria <info@elwix.org>
14:
15: Copyright 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013
16: by Michael Pounov <misho@elwix.org>. All rights reserved.
17:
18: Redistribution and use in source and binary forms, with or without
19: modification, are permitted provided that the following conditions
20: are met:
21: 1. Redistributions of source code must retain the above copyright
22: notice, this list of conditions and the following disclaimer.
23: 2. Redistributions in binary form must reproduce the above copyright
24: notice, this list of conditions and the following disclaimer in the
25: documentation and/or other materials provided with the distribution.
26: 3. All advertising materials mentioning features or use of this software
27: must display the following acknowledgement:
28: This product includes software developed by Michael Pounov <misho@elwix.org>
29: ELWIX - Embedded LightWeight unIX and its contributors.
30: 4. Neither the name of AITNET nor the names of its contributors
31: may be used to endorse or promote products derived from this software
32: without specific prior written permission.
33:
34: THIS SOFTWARE IS PROVIDED BY AITNET AND CONTRIBUTORS ``AS IS'' AND
35: ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
36: IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
37: ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
38: FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
39: DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
40: OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
41: HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
42: LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
43: OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
44: SUCH DAMAGE.
45: */
46: #include "global.h"
47: #include "hooks.h"
48:
49:
50: /*
51: * sched_hook_init() - Default INIT hook
52: *
53: * @root = root task
54: * @arg = unused
55: * return: <0 errors and 0 ok
56: */
57: void *
58: sched_hook_init(void *root, void *arg __unused)
59: {
60: sched_root_task_t *r = root;
61:
62: if (!r)
63: return (void*) -1;
64:
65: r->root_kq = kqueue();
66: if (r->root_kq == -1) {
67: LOGERR;
68: return (void*) -1;
69: }
70:
71: return NULL;
72: }
73:
74: /*
75: * sched_hook_fini() - Default FINI hook
76: *
77: * @root = root task
78: * @arg = unused
79: * return: <0 errors and 0 ok
80: */
81: void *
82: sched_hook_fini(void *root, void *arg __unused)
83: {
84: sched_root_task_t *r = root;
85:
86: if (!r)
87: return (void*) -1;
88:
89: if (r->root_kq > 2) {
90: close(r->root_kq);
91: r->root_kq = 0;
92: }
93:
94: return NULL;
95: }
96:
97: /*
98: * sched_hook_cancel() - Default CANCEL hook
99: *
100: * @task = current task
101: * @arg = unused
102: * return: <0 errors and 0 ok
103: */
104: void *
105: sched_hook_cancel(void *task, void *arg __unused)
106: {
107: sched_task_t *t = task;
108: struct kevent chg[1];
109: struct timespec timeout = { 0, 0 };
110: #ifdef AIO_SUPPORT
111: struct aiocb *acb;
112: #ifdef EVFILT_LIO
113: register int i = 0;
114: struct aiocb **acbs;
115: #endif /* EVFILT_LIO */
116: #endif /* AIO_SUPPORT */
117:
118: if (!t || !TASK_ROOT(t))
119: return (void*) -1;
120:
121: switch (TASK_TYPE(t)) {
122: case taskREAD:
123: #ifdef __NetBSD__
124: EV_SET(&chg[0], TASK_FD(t), EVFILT_READ, EV_DELETE, 0, 0, (intptr_t) TASK_FD(t));
125: #else
126: EV_SET(&chg[0], TASK_FD(t), EVFILT_READ, EV_DELETE, 0, 0, (void*) TASK_FD(t));
127: #endif
128: break;
129: case taskWRITE:
130: #ifdef __NetBSD__
131: EV_SET(&chg[0], TASK_FD(t), EVFILT_WRITE, EV_DELETE, 0, 0, (intptr_t) TASK_FD(t));
132: #else
133: EV_SET(&chg[0], TASK_FD(t), EVFILT_WRITE, EV_DELETE, 0, 0, (void*) TASK_FD(t));
134: #endif
135: break;
136: case taskALARM:
137: #ifdef __NetBSD__
138: EV_SET(&chg[0], (uintptr_t) TASK_DATA(t), EVFILT_TIMER, EV_DELETE,
139: 0, 0, (intptr_t) TASK_DATA(t));
140: #else
141: EV_SET(&chg[0], (uintptr_t) TASK_DATA(t), EVFILT_TIMER, EV_DELETE,
142: 0, 0, (void*) TASK_DATA(t));
143: #endif
144: break;
145: case taskNODE:
146: #ifdef __NetBSD__
147: EV_SET(&chg[0], TASK_FD(t), EVFILT_VNODE, EV_DELETE, 0, 0, (intptr_t) TASK_FD(t));
148: #else
149: EV_SET(&chg[0], TASK_FD(t), EVFILT_VNODE, EV_DELETE, 0, 0, (void*) TASK_FD(t));
150: #endif
151: break;
152: case taskPROC:
153: #ifdef __NetBSD__
154: EV_SET(&chg[0], TASK_VAL(t), EVFILT_PROC, EV_DELETE, 0, 0, (intptr_t) TASK_VAL(t));
155: #else
156: EV_SET(&chg[0], TASK_VAL(t), EVFILT_PROC, EV_DELETE, 0, 0, (void*) TASK_VAL(t));
157: #endif
158: break;
159: case taskSIGNAL:
160: #ifdef __NetBSD__
161: EV_SET(&chg[0], TASK_VAL(t), EVFILT_SIGNAL, EV_DELETE, 0, 0, (intptr_t) TASK_VAL(t));
162: #else
163: EV_SET(&chg[0], TASK_VAL(t), EVFILT_SIGNAL, EV_DELETE, 0, 0, (void*) TASK_VAL(t));
164: #endif
165: /* restore signal */
166: signal(TASK_VAL(t), SIG_DFL);
167: break;
168: #ifdef AIO_SUPPORT
169: case taskAIO:
170: #ifdef __NetBSD__
171: EV_SET(&chg[0], TASK_VAL(t), EVFILT_AIO, EV_DELETE, 0, 0, (intptr_t) TASK_VAL(t));
172: #else
173: EV_SET(&chg[0], TASK_VAL(t), EVFILT_AIO, EV_DELETE, 0, 0, (void*) TASK_VAL(t));
174: #endif
175: acb = (struct aiocb*) TASK_VAL(t);
176: if (acb) {
177: if (aio_cancel(acb->aio_fildes, acb) == AIO_CANCELED)
178: aio_return(acb);
179: free(acb);
180: TASK_VAL(t) = 0;
181: }
182: break;
183: #ifdef EVFILT_LIO
184: case taskLIO:
185: #ifdef __NetBSD__
186: EV_SET(&chg[0], TASK_VAL(t), EVFILT_LIO, EV_DELETE, 0, 0, (intptr_t) TASK_VAL(t));
187: #else
188: EV_SET(&chg[0], TASK_VAL(t), EVFILT_LIO, EV_DELETE, 0, 0, (void*) TASK_VAL(t));
189: #endif
190: acbs = (struct aiocb**) TASK_VAL(t);
191: if (acbs) {
192: for (i = 0; i < TASK_DATLEN(t); i++) {
193: if (aio_cancel(acbs[i]->aio_fildes, acbs[i]) == AIO_CANCELED)
194: aio_return(acbs[i]);
195: free(acbs[i]);
196: }
197: free(acbs);
198: TASK_VAL(t) = 0;
199: }
200: break;
201: #endif /* EVFILT_LIO */
202: #endif /* AIO_SUPPORT */
203: #ifdef EVFILT_USER
204: case taskUSER:
205: #ifdef __NetBSD__
206: EV_SET(&chg[0], TASK_VAL(t), EVFILT_USER, EV_DELETE, 0, 0, (intptr_t) TASK_VAL(t));
207: #else
208: EV_SET(&chg[0], TASK_VAL(t), EVFILT_USER, EV_DELETE, 0, 0, (void*) TASK_VAL(t));
209: #endif
210: break;
211: #endif /* EVFILT_USER */
212: case taskTHREAD:
213: #ifdef HAVE_LIBPTHREAD
214: pthread_cancel((pthread_t) TASK_VAL(t));
215: #endif
216: return NULL;
217: #if defined(HAVE_TIMER_CREATE) && defined(HAVE_TIMER_SETTIME)
218: case taskRTC:
219: timer_delete((timer_t) TASK_FLAG(t));
220: schedCancel((sched_task_t*) TASK_RET(t));
221: return NULL;
222: #endif /* HAVE_TIMER_CREATE */
223: default:
224: return NULL;
225: }
226:
227: kevent(TASK_ROOT(t)->root_kq, chg, 1, NULL, 0, &timeout);
228: return NULL;
229: }
230:
231: #ifdef HAVE_LIBPTHREAD
232: /*
233: * sched_hook_thread() - Default THREAD hook
234: *
235: * @task = current task
236: * @arg = pthread attributes
237: * return: <0 errors and 0 ok
238: */
239: void *
240: sched_hook_thread(void *task, void *arg)
241: {
242: sched_task_t *t = task;
243: pthread_t tid;
244: sigset_t s, o;
245:
246: if (!t || !TASK_ROOT(t))
247: return (void*) -1;
248:
249: sigfillset(&s);
250: pthread_sigmask(SIG_BLOCK, &s, &o);
251: if ((errno = pthread_create(&tid, (pthread_attr_t*) arg,
252: (void *(*)(void*)) _sched_threadWrapper, t))) {
253: LOGERR;
254: pthread_sigmask(SIG_SETMASK, &o, NULL);
255: return (void*) -1;
256: } else
257: TASK_VAL(t) = (u_long) tid;
258:
259: if (!TASK_ISLOCKED(t))
260: TASK_LOCK(t);
261:
262: pthread_sigmask(SIG_SETMASK, &o, NULL);
263: return NULL;
264: }
265: #endif
266:
267: /*
268: * sched_hook_read() - Default READ hook
269: *
270: * @task = current task
271: * @arg = unused
272: * return: <0 errors and 0 ok
273: */
274: void *
275: sched_hook_read(void *task, void *arg __unused)
276: {
277: sched_task_t *t = task;
278: struct kevent chg[1];
279: struct timespec timeout = { 0, 0 };
280:
281: if (!t || !TASK_ROOT(t))
282: return (void*) -1;
283:
284: #ifdef __NetBSD__
285: EV_SET(&chg[0], TASK_FD(t), EVFILT_READ, EV_ADD | EV_CLEAR, 0, 0, (intptr_t) TASK_FD(t));
286: #else
287: EV_SET(&chg[0], TASK_FD(t), EVFILT_READ, EV_ADD | EV_CLEAR, 0, 0, (void*) TASK_FD(t));
288: #endif
289: if (kevent(TASK_ROOT(t)->root_kq, chg, 1, NULL, 0, &timeout) == -1) {
290: if (TASK_ROOT(t)->root_hooks.hook_exec.exception)
291: TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL);
292: else
293: LOGERR;
294: return (void*) -1;
295: }
296:
297: return NULL;
298: }
299:
300: /*
301: * sched_hook_write() - Default WRITE hook
302: *
303: * @task = current task
304: * @arg = unused
305: * return: <0 errors and 0 ok
306: */
307: void *
308: sched_hook_write(void *task, void *arg __unused)
309: {
310: sched_task_t *t = task;
311: struct kevent chg[1];
312: struct timespec timeout = { 0, 0 };
313:
314: if (!t || !TASK_ROOT(t))
315: return (void*) -1;
316:
317: #ifdef __NetBSD__
318: EV_SET(&chg[0], TASK_FD(t), EVFILT_WRITE, EV_ADD | EV_CLEAR, 0, 0, (intptr_t) TASK_FD(t));
319: #else
320: EV_SET(&chg[0], TASK_FD(t), EVFILT_WRITE, EV_ADD | EV_CLEAR, 0, 0, (void*) TASK_FD(t));
321: #endif
322: if (kevent(TASK_ROOT(t)->root_kq, chg, 1, NULL, 0, &timeout) == -1) {
323: if (TASK_ROOT(t)->root_hooks.hook_exec.exception)
324: TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL);
325: else
326: LOGERR;
327: return (void*) -1;
328: }
329:
330: return NULL;
331: }
332:
333: /*
334: * sched_hook_alarm() - Default ALARM hook
335: *
336: * @task = current task
337: * @arg = unused
338: * return: <0 errors and 0 ok
339: */
340: void *
341: sched_hook_alarm(void *task, void *arg __unused)
342: {
343: sched_task_t *t = task;
344: struct kevent chg[1];
345: struct timespec timeout = { 0, 0 };
346:
347: if (!t || !TASK_ROOT(t))
348: return (void*) -1;
349:
350: #ifdef __NetBSD__
351: EV_SET(&chg[0], (uintptr_t) TASK_DATA(t), EVFILT_TIMER, EV_ADD | EV_CLEAR, 0,
352: t->task_val.ts.tv_sec * 1000 + t->task_val.ts.tv_nsec / 1000000,
353: (intptr_t) TASK_DATA(t));
354: #else
355: EV_SET(&chg[0], (uintptr_t) TASK_DATA(t), EVFILT_TIMER, EV_ADD | EV_CLEAR, 0,
356: t->task_val.ts.tv_sec * 1000 + t->task_val.ts.tv_nsec / 1000000,
357: (void*) TASK_DATA(t));
358: #endif
359: if (kevent(TASK_ROOT(t)->root_kq, chg, 1, NULL, 0, &timeout) == -1) {
360: if (TASK_ROOT(t)->root_hooks.hook_exec.exception)
361: TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL);
362: else
363: LOGERR;
364: return (void*) -1;
365: }
366:
367: return NULL;
368: }
369:
370: /*
371: * sched_hook_node() - Default NODE hook
372: *
373: * @task = current task
374: * @arg = unused
375: * return: <0 errors and 0 ok
376: */
377: void *
378: sched_hook_node(void *task, void *arg __unused)
379: {
380: sched_task_t *t = task;
381: struct kevent chg[1];
382: struct timespec timeout = { 0, 0 };
383:
384: if (!t || !TASK_ROOT(t))
385: return (void*) -1;
386:
387: #ifdef __NetBSD__
388: EV_SET(&chg[0], TASK_FD(t), EVFILT_VNODE, EV_ADD | EV_CLEAR,
389: NOTE_DELETE | NOTE_WRITE | NOTE_EXTEND | NOTE_ATTRIB |
390: NOTE_LINK | NOTE_RENAME | NOTE_REVOKE, 0, (intptr_t) TASK_FD(t));
391: #else
392: EV_SET(&chg[0], TASK_FD(t), EVFILT_VNODE, EV_ADD | EV_CLEAR,
393: NOTE_DELETE | NOTE_WRITE | NOTE_EXTEND | NOTE_ATTRIB |
394: NOTE_LINK | NOTE_RENAME | NOTE_REVOKE, 0, (void*) TASK_FD(t));
395: #endif
396: if (kevent(TASK_ROOT(t)->root_kq, chg, 1, NULL, 0, &timeout) == -1) {
397: if (TASK_ROOT(t)->root_hooks.hook_exec.exception)
398: TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL);
399: else
400: LOGERR;
401: return (void*) -1;
402: }
403:
404: return NULL;
405: }
406:
407: /*
408: * sched_hook_proc() - Default PROC hook
409: *
410: * @task = current task
411: * @arg = unused
412: * return: <0 errors and 0 ok
413: */
414: void *
415: sched_hook_proc(void *task, void *arg __unused)
416: {
417: sched_task_t *t = task;
418: struct kevent chg[1];
419: struct timespec timeout = { 0, 0 };
420:
421: if (!t || !TASK_ROOT(t))
422: return (void*) -1;
423:
424: #ifdef __NetBSD__
425: EV_SET(&chg[0], TASK_VAL(t), EVFILT_PROC, EV_ADD | EV_CLEAR,
426: NOTE_EXIT | NOTE_FORK | NOTE_EXEC | NOTE_TRACK, 0, (intptr_t) TASK_VAL(t));
427: #else
428: EV_SET(&chg[0], TASK_VAL(t), EVFILT_PROC, EV_ADD | EV_CLEAR,
429: NOTE_EXIT | NOTE_FORK | NOTE_EXEC | NOTE_TRACK, 0, (void*) TASK_VAL(t));
430: #endif
431: if (kevent(TASK_ROOT(t)->root_kq, chg, 1, NULL, 0, &timeout) == -1) {
432: if (TASK_ROOT(t)->root_hooks.hook_exec.exception)
433: TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL);
434: else
435: LOGERR;
436: return (void*) -1;
437: }
438:
439: return NULL;
440: }
441:
442: /*
443: * sched_hook_signal() - Default SIGNAL hook
444: *
445: * @task = current task
446: * @arg = unused
447: * return: <0 errors and 0 ok
448: */
449: void *
450: sched_hook_signal(void *task, void *arg __unused)
451: {
452: sched_task_t *t = task;
453: struct kevent chg[1];
454: struct timespec timeout = { 0, 0 };
455:
456: if (!t || !TASK_ROOT(t))
457: return (void*) -1;
458:
459: /* ignore signal */
460: signal(TASK_VAL(t), SIG_IGN);
461:
462: #ifdef __NetBSD__
463: EV_SET(&chg[0], TASK_VAL(t), EVFILT_SIGNAL, EV_ADD | EV_CLEAR, 0, 0, (intptr_t) TASK_VAL(t));
464: #else
465: EV_SET(&chg[0], TASK_VAL(t), EVFILT_SIGNAL, EV_ADD | EV_CLEAR, 0, 0, (void*) TASK_VAL(t));
466: #endif
467: if (kevent(TASK_ROOT(t)->root_kq, chg, 1, NULL, 0, &timeout) == -1) {
468: if (TASK_ROOT(t)->root_hooks.hook_exec.exception)
469: TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL);
470: else
471: LOGERR;
472: return (void*) -1;
473: }
474:
475: return NULL;
476: }
477:
478: /*
479: * sched_hook_user() - Default USER hook
480: *
481: * @task = current task
482: * @arg = unused
483: * return: <0 errors and 0 ok
484: */
485: #ifdef EVFILT_USER
486: void *
487: sched_hook_user(void *task, void *arg __unused)
488: {
489: sched_task_t *t = task;
490: struct kevent chg[1];
491: struct timespec timeout = { 0, 0 };
492:
493: if (!t || !TASK_ROOT(t))
494: return (void*) -1;
495:
496: #ifdef __NetBSD__
497: EV_SET(&chg[0], TASK_VAL(t), EVFILT_USER, EV_ADD | EV_CLEAR, TASK_DATLEN(t),
498: 0, (intptr_t) TASK_VAL(t));
499: #else
500: EV_SET(&chg[0], TASK_VAL(t), EVFILT_USER, EV_ADD | EV_CLEAR, TASK_DATLEN(t),
501: 0, (void*) TASK_VAL(t));
502: #endif
503: if (kevent(TASK_ROOT(t)->root_kq, chg, 1, NULL, 0, &timeout) == -1) {
504: if (TASK_ROOT(t)->root_hooks.hook_exec.exception)
505: TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL);
506: else
507: LOGERR;
508: return (void*) -1;
509: }
510:
511: return NULL;
512: }
513: #endif
514:
515: /*
516: * sched_hook_fetch() - Default FETCH hook
517: *
518: * @root = root task
519: * @arg = unused
520: * return: NULL error or !=NULL fetched task
521: */
522: void *
523: sched_hook_fetch(void *root, void *arg __unused)
524: {
525: sched_root_task_t *r = root;
526: sched_task_t *task, *tmp;
527: struct timespec now, m, mtmp;
528: struct timespec *timeout;
529: struct kevent evt[1], res[KQ_EVENTS];
530: register int i, flg;
531: int en;
532: #ifdef AIO_SUPPORT
533: int len, fd;
534: struct aiocb *acb;
535: #ifdef EVFILT_LIO
536: int l;
537: register int j;
538: off_t off;
539: struct aiocb **acbs;
540: struct iovec *iv;
541: #endif /* EVFILT_LIO */
542: #endif /* AIO_SUPPORT */
543:
544: if (!r)
545: return NULL;
546:
547: /* get new task by queue priority */
548: while ((task = TAILQ_FIRST(&r->root_event))) {
549: #ifdef HAVE_LIBPTHREAD
550: pthread_mutex_lock(&r->root_mtx[taskEVENT]);
551: #endif
552: TAILQ_REMOVE(&r->root_event, task, task_node);
553: #ifdef HAVE_LIBPTHREAD
554: pthread_mutex_unlock(&r->root_mtx[taskEVENT]);
555: #endif
556: task->task_type = taskUNUSE;
557: #ifdef HAVE_LIBPTHREAD
558: pthread_mutex_lock(&r->root_mtx[taskUNUSE]);
559: #endif
560: TAILQ_INSERT_TAIL(&r->root_unuse, task, task_node);
561: #ifdef HAVE_LIBPTHREAD
562: pthread_mutex_unlock(&r->root_mtx[taskUNUSE]);
563: #endif
564: return task;
565: }
566: while ((task = TAILQ_FIRST(&r->root_ready))) {
567: #ifdef HAVE_LIBPTHREAD
568: pthread_mutex_lock(&r->root_mtx[taskREADY]);
569: #endif
570: TAILQ_REMOVE(&r->root_ready, task, task_node);
571: #ifdef HAVE_LIBPTHREAD
572: pthread_mutex_unlock(&r->root_mtx[taskREADY]);
573: #endif
574: task->task_type = taskUNUSE;
575: #ifdef HAVE_LIBPTHREAD
576: pthread_mutex_lock(&r->root_mtx[taskUNUSE]);
577: #endif
578: TAILQ_INSERT_TAIL(&r->root_unuse, task, task_node);
579: #ifdef HAVE_LIBPTHREAD
580: pthread_mutex_unlock(&r->root_mtx[taskUNUSE]);
581: #endif
582: return task;
583: }
584:
585: #ifdef TIMER_WITHOUT_SORT
586: clock_gettime(CLOCK_MONOTONIC, &now);
587:
588: sched_timespecclear(&r->root_wait);
589: TAILQ_FOREACH(task, &r->root_timer, task_node) {
590: if (!sched_timespecisset(&r->root_wait))
591: r->root_wait = TASK_TS(task);
592: else if (sched_timespeccmp(&TASK_TS(task), &r->root_wait, -) < 0)
593: r->root_wait = TASK_TS(task);
594: }
595:
596: if (TAILQ_FIRST(&r->root_timer)) {
597: m = r->root_wait;
598: sched_timespecsub(&m, &now, &mtmp);
599: r->root_wait = mtmp;
600: } else {
601: /* set wait INFTIM */
602: sched_timespecinf(&r->root_wait);
603: }
604: #else
605: if (!TAILQ_FIRST(&r->root_task) && (task = TAILQ_FIRST(&r->root_timer))) {
606: clock_gettime(CLOCK_MONOTONIC, &now);
607:
608: m = TASK_TS(task);
609: sched_timespecsub(&m, &now, &mtmp);
610: r->root_wait = mtmp;
611: } else {
612: /* set wait INFTIM */
613: sched_timespecinf(&r->root_wait);
614: }
615: #endif
616: /* if present member of task, set NOWAIT */
617: if (TAILQ_FIRST(&r->root_task))
618: sched_timespecclear(&r->root_wait);
619:
620: if (r->root_wait.tv_sec != -1 && r->root_wait.tv_nsec != -1)
621: timeout = &r->root_wait;
622: else if (sched_timespecisinf(&r->root_poll))
623: timeout = NULL;
624: else
625: timeout = &r->root_poll;
626: if ((en = kevent(r->root_kq, NULL, 0, res, KQ_EVENTS, timeout)) == -1) {
627: if (r->root_hooks.hook_exec.exception) {
628: if (r->root_hooks.hook_exec.exception(r, NULL))
629: return NULL;
630: } else if (errno != EINTR)
631: LOGERR;
632: return NULL;
633: }
634:
635: now.tv_sec = now.tv_nsec = 0;
636: /* Go and catch the cat into pipes ... */
637: for (i = 0; i < en; i++) {
638: memcpy(evt, &res[i], sizeof evt);
639: evt->flags = EV_DELETE;
640: /* Put read/write task to ready queue */
641: switch (res[i].filter) {
642: case EVFILT_READ:
643: flg = 0;
644: TAILQ_FOREACH_SAFE(task, &r->root_read, task_node, tmp) {
645: if (TASK_FD(task) != ((intptr_t) res[i].udata))
646: continue;
647: else {
648: flg++;
649: TASK_RET(task) = res[i].data;
650: TASK_FLAG(task) = (u_long) res[i].fflags;
651: }
652: /* remove read handle */
653: #ifdef HAVE_LIBPTHREAD
654: pthread_mutex_lock(&r->root_mtx[taskREAD]);
655: #endif
656: TAILQ_REMOVE(&r->root_read, task, task_node);
657: #ifdef HAVE_LIBPTHREAD
658: pthread_mutex_unlock(&r->root_mtx[taskREAD]);
659: #endif
660: if (r->root_hooks.hook_exec.exception && res[i].flags & EV_EOF) {
661: if (r->root_hooks.hook_exec.exception(r, (void*) EV_EOF)) {
662: task->task_type = taskUNUSE;
663: #ifdef HAVE_LIBPTHREAD
664: pthread_mutex_lock(&r->root_mtx[taskUNUSE]);
665: #endif
666: TAILQ_INSERT_TAIL(&r->root_unuse, task, task_node);
667: #ifdef HAVE_LIBPTHREAD
668: pthread_mutex_unlock(&r->root_mtx[taskUNUSE]);
669: #endif
670: } else {
671: task->task_type = taskREADY;
672: #ifdef HAVE_LIBPTHREAD
673: pthread_mutex_lock(&r->root_mtx[taskREADY]);
674: #endif
675: TAILQ_INSERT_TAIL(&r->root_ready, task, task_node);
676: #ifdef HAVE_LIBPTHREAD
677: pthread_mutex_unlock(&r->root_mtx[taskREADY]);
678: #endif
679: }
680: } else {
681: task->task_type = taskREADY;
682: #ifdef HAVE_LIBPTHREAD
683: pthread_mutex_lock(&r->root_mtx[taskREADY]);
684: #endif
685: TAILQ_INSERT_TAIL(&r->root_ready, task, task_node);
686: #ifdef HAVE_LIBPTHREAD
687: pthread_mutex_unlock(&r->root_mtx[taskREADY]);
688: #endif
689: }
690: }
691: /* if match at least 2, don't remove resouce of event */
692: if (flg > 1)
693: evt->flags ^= evt->flags;
694: break;
695: case EVFILT_WRITE:
696: flg = 0;
697: TAILQ_FOREACH_SAFE(task, &r->root_write, task_node, tmp) {
698: if (TASK_FD(task) != ((intptr_t) res[i].udata))
699: continue;
700: else {
701: flg++;
702: TASK_RET(task) = res[i].data;
703: TASK_FLAG(task) = (u_long) res[i].fflags;
704: }
705: /* remove write handle */
706: #ifdef HAVE_LIBPTHREAD
707: pthread_mutex_lock(&r->root_mtx[taskWRITE]);
708: #endif
709: TAILQ_REMOVE(&r->root_write, task, task_node);
710: #ifdef HAVE_LIBPTHREAD
711: pthread_mutex_unlock(&r->root_mtx[taskWRITE]);
712: #endif
713: if (r->root_hooks.hook_exec.exception && res[i].flags & EV_EOF) {
714: if (r->root_hooks.hook_exec.exception(r, (void*) EV_EOF)) {
715: task->task_type = taskUNUSE;
716: #ifdef HAVE_LIBPTHREAD
717: pthread_mutex_lock(&r->root_mtx[taskUNUSE]);
718: #endif
719: TAILQ_INSERT_TAIL(&r->root_unuse, task, task_node);
720: #ifdef HAVE_LIBPTHREAD
721: pthread_mutex_unlock(&r->root_mtx[taskUNUSE]);
722: #endif
723: } else {
724: task->task_type = taskREADY;
725: #ifdef HAVE_LIBPTHREAD
726: pthread_mutex_lock(&r->root_mtx[taskREADY]);
727: #endif
728: TAILQ_INSERT_TAIL(&r->root_ready, task, task_node);
729: #ifdef HAVE_LIBPTHREAD
730: pthread_mutex_unlock(&r->root_mtx[taskREADY]);
731: #endif
732: }
733: } else {
734: task->task_type = taskREADY;
735: #ifdef HAVE_LIBPTHREAD
736: pthread_mutex_lock(&r->root_mtx[taskREADY]);
737: #endif
738: TAILQ_INSERT_TAIL(&r->root_ready, task, task_node);
739: #ifdef HAVE_LIBPTHREAD
740: pthread_mutex_unlock(&r->root_mtx[taskREADY]);
741: #endif
742: }
743: }
744: /* if match at least 2, don't remove resouce of event */
745: if (flg > 1)
746: evt->flags ^= evt->flags;
747: break;
748: case EVFILT_TIMER:
749: flg = 0;
750: TAILQ_FOREACH_SAFE(task, &r->root_alarm, task_node, tmp) {
751: if ((uintptr_t) TASK_DATA(task) != ((uintptr_t) res[i].udata))
752: continue;
753: else {
754: flg++;
755: TASK_RET(task) = res[i].data;
756: TASK_FLAG(task) = (u_long) res[i].fflags;
757: }
758: /* remove alarm handle */
759: #ifdef HAVE_LIBPTHREAD
760: pthread_mutex_lock(&r->root_mtx[taskALARM]);
761: #endif
762: TAILQ_REMOVE(&r->root_alarm, task, task_node);
763: #ifdef HAVE_LIBPTHREAD
764: pthread_mutex_unlock(&r->root_mtx[taskALARM]);
765: #endif
766: task->task_type = taskREADY;
767: #ifdef HAVE_LIBPTHREAD
768: pthread_mutex_lock(&r->root_mtx[taskREADY]);
769: #endif
770: TAILQ_INSERT_TAIL(&r->root_ready, task, task_node);
771: #ifdef HAVE_LIBPTHREAD
772: pthread_mutex_unlock(&r->root_mtx[taskREADY]);
773: #endif
774: }
775: /* if match at least 2, don't remove resouce of event */
776: if (flg > 1)
777: evt->flags ^= evt->flags;
778: break;
779: case EVFILT_VNODE:
780: flg = 0;
781: TAILQ_FOREACH_SAFE(task, &r->root_node, task_node, tmp) {
782: if (TASK_FD(task) != ((intptr_t) res[i].udata))
783: continue;
784: else {
785: flg++;
786: TASK_RET(task) = res[i].data;
787: TASK_FLAG(task) = (u_long) res[i].fflags;
788: }
789: /* remove node handle */
790: #ifdef HAVE_LIBPTHREAD
791: pthread_mutex_lock(&r->root_mtx[taskNODE]);
792: #endif
793: TAILQ_REMOVE(&r->root_node, task, task_node);
794: #ifdef HAVE_LIBPTHREAD
795: pthread_mutex_unlock(&r->root_mtx[taskNODE]);
796: #endif
797: task->task_type = taskREADY;
798: #ifdef HAVE_LIBPTHREAD
799: pthread_mutex_lock(&r->root_mtx[taskREADY]);
800: #endif
801: TAILQ_INSERT_TAIL(&r->root_ready, task, task_node);
802: #ifdef HAVE_LIBPTHREAD
803: pthread_mutex_unlock(&r->root_mtx[taskREADY]);
804: #endif
805: }
806: /* if match at least 2, don't remove resouce of event */
807: if (flg > 1)
808: evt->flags ^= evt->flags;
809: break;
810: case EVFILT_PROC:
811: flg = 0;
812: TAILQ_FOREACH_SAFE(task, &r->root_proc, task_node, tmp) {
813: if (TASK_VAL(task) != ((uintptr_t) res[i].udata))
814: continue;
815: else {
816: flg++;
817: TASK_RET(task) = res[i].data;
818: TASK_FLAG(task) = (u_long) res[i].fflags;
819: }
820: /* remove proc handle */
821: #ifdef HAVE_LIBPTHREAD
822: pthread_mutex_lock(&r->root_mtx[taskPROC]);
823: #endif
824: TAILQ_REMOVE(&r->root_proc, task, task_node);
825: #ifdef HAVE_LIBPTHREAD
826: pthread_mutex_unlock(&r->root_mtx[taskPROC]);
827: #endif
828: task->task_type = taskREADY;
829: #ifdef HAVE_LIBPTHREAD
830: pthread_mutex_lock(&r->root_mtx[taskREADY]);
831: #endif
832: TAILQ_INSERT_TAIL(&r->root_ready, task, task_node);
833: #ifdef HAVE_LIBPTHREAD
834: pthread_mutex_unlock(&r->root_mtx[taskREADY]);
835: #endif
836: }
837: /* if match at least 2, don't remove resouce of event */
838: if (flg > 1)
839: evt->flags ^= evt->flags;
840: break;
841: case EVFILT_SIGNAL:
842: flg = 0;
843: TAILQ_FOREACH_SAFE(task, &r->root_signal, task_node, tmp) {
844: if (TASK_VAL(task) != ((uintptr_t) res[i].udata))
845: continue;
846: else {
847: flg++;
848: TASK_RET(task) = res[i].data;
849: TASK_FLAG(task) = (u_long) res[i].fflags;
850: }
851: /* remove signal handle */
852: #ifdef HAVE_LIBPTHREAD
853: pthread_mutex_lock(&r->root_mtx[taskSIGNAL]);
854: #endif
855: TAILQ_REMOVE(&r->root_signal, task, task_node);
856: #ifdef HAVE_LIBPTHREAD
857: pthread_mutex_unlock(&r->root_mtx[taskSIGNAL]);
858: #endif
859: task->task_type = taskREADY;
860: #ifdef HAVE_LIBPTHREAD
861: pthread_mutex_lock(&r->root_mtx[taskREADY]);
862: #endif
863: TAILQ_INSERT_TAIL(&r->root_ready, task, task_node);
864: #ifdef HAVE_LIBPTHREAD
865: pthread_mutex_unlock(&r->root_mtx[taskREADY]);
866: #endif
867: }
868: /* if match at least 2, don't remove resouce of event */
869: if (flg > 1)
870: evt->flags ^= evt->flags;
871: break;
872: #ifdef AIO_SUPPORT
873: case EVFILT_AIO:
874: flg = 0;
875: TAILQ_FOREACH_SAFE(task, &r->root_aio, task_node, tmp) {
876: acb = (struct aiocb*) TASK_VAL(task);
877: if (acb != ((struct aiocb*) res[i].udata))
878: continue;
879: else {
880: flg++;
881: TASK_RET(task) = res[i].data;
882: TASK_FLAG(task) = (u_long) res[i].fflags;
883: }
884: /* remove user handle */
885: #ifdef HAVE_LIBPTHREAD
886: pthread_mutex_lock(&r->root_mtx[taskAIO]);
887: #endif
888: TAILQ_REMOVE(&r->root_aio, task, task_node);
889: #ifdef HAVE_LIBPTHREAD
890: pthread_mutex_unlock(&r->root_mtx[taskAIO]);
891: #endif
892: task->task_type = taskREADY;
893: #ifdef HAVE_LIBPTHREAD
894: pthread_mutex_lock(&r->root_mtx[taskREADY]);
895: #endif
896: TAILQ_INSERT_TAIL(&r->root_ready, task, task_node);
897: #ifdef HAVE_LIBPTHREAD
898: pthread_mutex_unlock(&r->root_mtx[taskREADY]);
899: #endif
900: fd = acb->aio_fildes;
901: if ((len = aio_return(acb)) != -1) {
902: if (lseek(fd, acb->aio_offset + len, SEEK_CUR) == -1)
903: LOGERR;
904: } else
905: LOGERR;
906: free(acb);
907: TASK_DATLEN(task) = (u_long) len;
908: TASK_FD(task) = fd;
909: }
910: /* if match at least 2, don't remove resouce of event */
911: if (flg > 1)
912: evt->flags ^= evt->flags;
913: break;
914: #ifdef EVFILT_LIO
915: case EVFILT_LIO:
916: flg = 0;
917: TAILQ_FOREACH_SAFE(task, &r->root_lio, task_node, tmp) {
918: acbs = (struct aiocb**) TASK_VAL(task);
919: if (acbs != ((struct aiocb**) res[i].udata))
920: continue;
921: else {
922: flg++;
923: TASK_RET(task) = res[i].data;
924: TASK_FLAG(task) = (u_long) res[i].fflags;
925: }
926: /* remove user handle */
927: #ifdef HAVE_LIBPTHREAD
928: pthread_mutex_lock(&r->root_mtx[taskLIO]);
929: #endif
930: TAILQ_REMOVE(&r->root_lio, task, task_node);
931: #ifdef HAVE_LIBPTHREAD
932: pthread_mutex_unlock(&r->root_mtx[taskLIO]);
933: #endif
934: task->task_type = taskREADY;
935: #ifdef HAVE_LIBPTHREAD
936: pthread_mutex_lock(&r->root_mtx[taskREADY]);
937: #endif
938: TAILQ_INSERT_TAIL(&r->root_ready, task, task_node);
939: #ifdef HAVE_LIBPTHREAD
940: pthread_mutex_unlock(&r->root_mtx[taskREADY]);
941: #endif
942: iv = (struct iovec*) TASK_DATA(task);
943: fd = acbs[0]->aio_fildes;
944: off = acbs[0]->aio_offset;
945: for (j = len = 0; i < TASK_DATLEN(task); len += l, i++) {
946: if ((iv[i].iov_len = aio_return(acbs[i])) == -1)
947: l = 0;
948: else
949: l = iv[i].iov_len;
950: free(acbs[i]);
951: }
952: free(acbs);
953: TASK_DATLEN(task) = (u_long) len;
954: TASK_FD(task) = fd;
955:
956: if (lseek(fd, off + len, SEEK_CUR) == -1)
957: LOGERR;
958: }
959: /* if match at least 2, don't remove resouce of event */
960: if (flg > 1)
961: evt->flags ^= evt->flags;
962: break;
963: #endif /* EVFILT_LIO */
964: #endif /* AIO_SUPPORT */
965: #ifdef EVFILT_USER
966: case EVFILT_USER:
967: flg = 0;
968: TAILQ_FOREACH_SAFE(task, &r->root_user, task_node, tmp) {
969: if (TASK_VAL(task) != ((uintptr_t) res[i].udata))
970: continue;
971: else {
972: flg++;
973: TASK_RET(task) = res[i].data;
974: TASK_FLAG(task) = (u_long) res[i].fflags;
975: }
976: /* remove user handle */
977: #ifdef HAVE_LIBPTHREAD
978: pthread_mutex_lock(&r->root_mtx[taskUSER]);
979: #endif
980: TAILQ_REMOVE(&r->root_user, task, task_node);
981: #ifdef HAVE_LIBPTHREAD
982: pthread_mutex_unlock(&r->root_mtx[taskUSER]);
983: #endif
984: task->task_type = taskREADY;
985: #ifdef HAVE_LIBPTHREAD
986: pthread_mutex_lock(&r->root_mtx[taskREADY]);
987: #endif
988: TAILQ_INSERT_TAIL(&r->root_ready, task, task_node);
989: #ifdef HAVE_LIBPTHREAD
990: pthread_mutex_unlock(&r->root_mtx[taskREADY]);
991: #endif
992: }
993: /* if match at least 2, don't remove resouce of event */
994: if (flg > 1)
995: evt->flags ^= evt->flags;
996: break;
997: #endif /* EVFILT_USER */
998: }
999: if (kevent(r->root_kq, evt, 1, NULL, 0, &now) == -1) {
1000: if (r->root_hooks.hook_exec.exception) {
1001: if (r->root_hooks.hook_exec.exception(r, NULL))
1002: return NULL;
1003: } else
1004: LOGERR;
1005: }
1006: }
1007:
1008: /* timer update & put in ready queue */
1009: clock_gettime(CLOCK_MONOTONIC, &now);
1010:
1011: TAILQ_FOREACH_SAFE(task, &r->root_timer, task_node, tmp)
1012: if (sched_timespeccmp(&now, &TASK_TS(task), -) >= 0) {
1013: #ifdef HAVE_LIBPTHREAD
1014: pthread_mutex_lock(&r->root_mtx[taskTIMER]);
1015: #endif
1016: TAILQ_REMOVE(&r->root_timer, task, task_node);
1017: #ifdef HAVE_LIBPTHREAD
1018: pthread_mutex_unlock(&r->root_mtx[taskTIMER]);
1019: #endif
1020: task->task_type = taskREADY;
1021: #ifdef HAVE_LIBPTHREAD
1022: pthread_mutex_lock(&r->root_mtx[taskREADY]);
1023: #endif
1024: TAILQ_INSERT_TAIL(&r->root_ready, task, task_node);
1025: #ifdef HAVE_LIBPTHREAD
1026: pthread_mutex_unlock(&r->root_mtx[taskREADY]);
1027: #endif
1028: }
1029:
1030: /* put regular task priority task to ready queue,
1031: if there is no ready task or reach max missing hit for regular task */
1032: if ((task = TAILQ_FIRST(&r->root_task))) {
1033: if (!TAILQ_FIRST(&r->root_ready) || r->root_miss >= TASK_VAL(task)) {
1034: r->root_miss ^= r->root_miss;
1035:
1036: #ifdef HAVE_LIBPTHREAD
1037: pthread_mutex_lock(&r->root_mtx[taskTASK]);
1038: #endif
1039: TAILQ_REMOVE(&r->root_task, task, task_node);
1040: #ifdef HAVE_LIBPTHREAD
1041: pthread_mutex_unlock(&r->root_mtx[taskTASK]);
1042: #endif
1043: task->task_type = taskREADY;
1044: #ifdef HAVE_LIBPTHREAD
1045: pthread_mutex_lock(&r->root_mtx[taskREADY]);
1046: #endif
1047: TAILQ_INSERT_TAIL(&r->root_ready, task, task_node);
1048: #ifdef HAVE_LIBPTHREAD
1049: pthread_mutex_unlock(&r->root_mtx[taskREADY]);
1050: #endif
1051: } else
1052: r->root_miss++;
1053: } else
1054: r->root_miss ^= r->root_miss;
1055:
1056: /* OK, lets get ready task !!! */
1057: task = TAILQ_FIRST(&r->root_ready);
1058: if (!(task))
1059: return NULL;
1060:
1061: #ifdef HAVE_LIBPTHREAD
1062: pthread_mutex_lock(&r->root_mtx[taskREADY]);
1063: #endif
1064: TAILQ_REMOVE(&r->root_ready, task, task_node);
1065: #ifdef HAVE_LIBPTHREAD
1066: pthread_mutex_unlock(&r->root_mtx[taskREADY]);
1067: #endif
1068: task->task_type = taskUNUSE;
1069: #ifdef HAVE_LIBPTHREAD
1070: pthread_mutex_lock(&r->root_mtx[taskUNUSE]);
1071: #endif
1072: TAILQ_INSERT_TAIL(&r->root_unuse, task, task_node);
1073: #ifdef HAVE_LIBPTHREAD
1074: pthread_mutex_unlock(&r->root_mtx[taskUNUSE]);
1075: #endif
1076: return task;
1077: }
1078:
1079: /*
1080: * sched_hook_exception() - Default EXCEPTION hook
1081: *
1082: * @root = root task
1083: * @arg = custom handling: if arg == EV_EOF or other value; default: arg == NULL log errno
1084: * return: <0 errors and 0 ok
1085: */
1086: void *
1087: sched_hook_exception(void *root, void *arg)
1088: {
1089: sched_root_task_t *r = root;
1090:
1091: if (!r)
1092: return NULL;
1093:
1094: /* custom exception handling ... */
1095: if (arg) {
1096: if (arg == (void*) EV_EOF)
1097: return NULL;
1098: return (void*) -1; /* raise scheduler error!!! */
1099: }
1100:
1101: /* if error hook exists */
1102: if (r->root_hooks.hook_root.error)
1103: return (r->root_hooks.hook_root.error(root, (void*) ((intptr_t) errno)));
1104:
1105: /* default case! */
1106: LOGERR;
1107: return NULL;
1108: }
1109:
1110: /*
1111: * sched_hook_condition() - Default CONDITION hook
1112: *
1113: * @root = root task
1114: * @arg = killState from schedRun()
1115: * return: NULL kill scheduler loop or !=NULL ok
1116: */
1117: void *
1118: sched_hook_condition(void *root, void *arg)
1119: {
1120: sched_root_task_t *r = root;
1121:
1122: if (!r)
1123: return NULL;
1124:
1125: return (void*) (r->root_cond - *(intptr_t*) arg);
1126: }
1127:
1128: /*
1129: * sched_hook_rtc() - Default RTC hook
1130: *
1131: * @task = current task
1132: * @arg = unused
1133: * return: <0 errors and 0 ok
1134: */
1135: #if defined(HAVE_TIMER_CREATE) && defined(HAVE_TIMER_SETTIME)
1136: void *
1137: sched_hook_rtc(void *task, void *arg __unused)
1138: {
1139: sched_task_t *sigt = NULL, *t = task;
1140: struct itimerspec its;
1141: struct sigevent evt;
1142: timer_t tmr;
1143:
1144: if (!t || !TASK_ROOT(t))
1145: return (void*) -1;
1146:
1147: memset(&evt, 0, sizeof evt);
1148: evt.sigev_notify = SIGEV_SIGNAL;
1149: evt.sigev_signo = (intptr_t) TASK_DATA(t) + SIGRTMIN;
1150: evt.sigev_value.sival_ptr = TASK_DATA(t);
1151:
1152: if (timer_create(CLOCK_MONOTONIC, &evt, &tmr) == -1) {
1153: if (TASK_ROOT(t)->root_hooks.hook_exec.exception)
1154: TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL);
1155: else
1156: LOGERR;
1157: return (void*) -1;
1158: } else
1159: TASK_FLAG(t) = (u_long) tmr;
1160:
1161: if (!(sigt = schedSignal(TASK_ROOT(t), _sched_rtcWrapper, TASK_ARG(t), evt.sigev_signo,
1162: t, (size_t) tmr))) {
1163: if (TASK_ROOT(t)->root_hooks.hook_exec.exception)
1164: TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL);
1165: else
1166: LOGERR;
1167: timer_delete(tmr);
1168: return (void*) -1;
1169: } else
1170: TASK_RET(t) = (uintptr_t) sigt;
1171:
1172: memset(&its, 0, sizeof its);
1173: its.it_value.tv_sec = t->task_val.ts.tv_sec;
1174: its.it_value.tv_nsec = t->task_val.ts.tv_nsec;
1175:
1176: if (timer_settime(tmr, TIMER_RELTIME, &its, NULL) == -1) {
1177: if (TASK_ROOT(t)->root_hooks.hook_exec.exception)
1178: TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL);
1179: else
1180: LOGERR;
1181: schedCancel(sigt);
1182: timer_delete(tmr);
1183: return (void*) -1;
1184: }
1185:
1186: return NULL;
1187: }
1188: #endif /* HAVE_TIMER_CREATE */
FreeBSD-CVSweb <freebsd-cvsweb@FreeBSD.org>