1: /*************************************************************************
2: * (C) 2011 AITNET ltd - Sofia/Bulgaria - <misho@aitbg.com>
3: * by Michael Pounov <misho@openbsd-bg.org>
4: *
5: * $Author: misho $
6: * $Id: hooks.c,v 1.13.2.3 2012/08/21 12:50:08 misho Exp $
7: *
8: **************************************************************************
9: The ELWIX and AITNET software is distributed under the following
10: terms:
11:
12: All of the documentation and software included in the ELWIX and AITNET
13: Releases is copyrighted by ELWIX - Sofia/Bulgaria <info@elwix.org>
14:
15: Copyright 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012
16: by Michael Pounov <misho@elwix.org>. All rights reserved.
17:
18: Redistribution and use in source and binary forms, with or without
19: modification, are permitted provided that the following conditions
20: are met:
21: 1. Redistributions of source code must retain the above copyright
22: notice, this list of conditions and the following disclaimer.
23: 2. Redistributions in binary form must reproduce the above copyright
24: notice, this list of conditions and the following disclaimer in the
25: documentation and/or other materials provided with the distribution.
26: 3. All advertising materials mentioning features or use of this software
27: must display the following acknowledgement:
28: This product includes software developed by Michael Pounov <misho@elwix.org>
29: ELWIX - Embedded LightWeight unIX and its contributors.
30: 4. Neither the name of AITNET nor the names of its contributors
31: may be used to endorse or promote products derived from this software
32: without specific prior written permission.
33:
34: THIS SOFTWARE IS PROVIDED BY AITNET AND CONTRIBUTORS ``AS IS'' AND
35: ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
36: IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
37: ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
38: FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
39: DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
40: OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
41: HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
42: LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
43: OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
44: SUCH DAMAGE.
45: */
46: #include "global.h"
47: #include "hooks.h"
48:
49:
50: /*
51: * sched_hook_init() - Default INIT hook
52: *
53: * @root = root task
54: * @arg = unused
55: * return: <0 errors and 0 ok
56: */
57: void *
58: sched_hook_init(void *root, void *arg __unused)
59: {
60: sched_root_task_t *r = root;
61:
62: if (!r)
63: return (void*) -1;
64:
65: r->root_kq = kqueue();
66: if (r->root_kq == -1) {
67: LOGERR;
68: return (void*) -1;
69: }
70:
71: return NULL;
72: }
73:
74: /*
75: * sched_hook_fini() - Default FINI hook
76: *
77: * @root = root task
78: * @arg = unused
79: * return: <0 errors and 0 ok
80: */
81: void *
82: sched_hook_fini(void *root, void *arg __unused)
83: {
84: sched_root_task_t *r = root;
85:
86: if (!r)
87: return (void*) -1;
88:
89: if (r->root_kq > 2) {
90: close(r->root_kq);
91: r->root_kq = 0;
92: }
93:
94: return NULL;
95: }
96:
97: /*
98: * sched_hook_cancel() - Default CANCEL hook
99: *
100: * @task = current task
101: * @arg = unused
102: * return: <0 errors and 0 ok
103: */
104: void *
105: sched_hook_cancel(void *task, void *arg __unused)
106: {
107: sched_task_t *t = task;
108: struct kevent chg[1];
109: struct timespec timeout = { 0, 0 };
110: #ifdef AIO_SUPPORT
111: struct aiocb *acb;
112: #ifdef EVFILT_LIO
113: struct aiocb **acbs;
114: register int i;
115: #endif /* EVFILT_LIO */
116: #endif /* AIO_SUPPORT */
117:
118: if (!t || !TASK_ROOT(t))
119: return (void*) -1;
120:
121: switch (TASK_TYPE(t)) {
122: case taskREAD:
123: #ifdef __NetBSD__
124: EV_SET(&chg[0], TASK_FD(t), EVFILT_READ, EV_DELETE, 0, 0, (intptr_t) TASK_FD(t));
125: #else
126: EV_SET(&chg[0], TASK_FD(t), EVFILT_READ, EV_DELETE, 0, 0, (void*) TASK_FD(t));
127: #endif
128: break;
129: case taskWRITE:
130: #ifdef __NetBSD__
131: EV_SET(&chg[0], TASK_FD(t), EVFILT_WRITE, EV_DELETE, 0, 0, (intptr_t) TASK_FD(t));
132: #else
133: EV_SET(&chg[0], TASK_FD(t), EVFILT_WRITE, EV_DELETE, 0, 0, (void*) TASK_FD(t));
134: #endif
135: break;
136: case taskALARM:
137: #ifdef __NetBSD__
138: EV_SET(&chg[0], (uintptr_t) TASK_DATA(t), EVFILT_TIMER, EV_DELETE,
139: 0, 0, (intptr_t) TASK_DATA(t));
140: #else
141: EV_SET(&chg[0], (uintptr_t) TASK_DATA(t), EVFILT_TIMER, EV_DELETE,
142: 0, 0, (void*) TASK_DATA(t));
143: #endif
144: break;
145: case taskNODE:
146: #ifdef __NetBSD__
147: EV_SET(&chg[0], TASK_FD(t), EVFILT_VNODE, EV_DELETE, 0, 0, (intptr_t) TASK_FD(t));
148: #else
149: EV_SET(&chg[0], TASK_FD(t), EVFILT_VNODE, EV_DELETE, 0, 0, (void*) TASK_FD(t));
150: #endif
151: break;
152: case taskPROC:
153: #ifdef __NetBSD__
154: EV_SET(&chg[0], TASK_VAL(t), EVFILT_PROC, EV_DELETE, 0, 0, (intptr_t) TASK_VAL(t));
155: #else
156: EV_SET(&chg[0], TASK_VAL(t), EVFILT_PROC, EV_DELETE, 0, 0, (void*) TASK_VAL(t));
157: #endif
158: break;
159: case taskSIGNAL:
160: #ifdef __NetBSD__
161: EV_SET(&chg[0], TASK_VAL(t), EVFILT_SIGNAL, EV_DELETE, 0, 0, (intptr_t) TASK_VAL(t));
162: #else
163: EV_SET(&chg[0], TASK_VAL(t), EVFILT_SIGNAL, EV_DELETE, 0, 0, (void*) TASK_VAL(t));
164: #endif
165: break;
166: #ifdef AIO_SUPPORT
167: case taskAIO:
168: #ifdef __NetBSD__
169: EV_SET(&chg[0], TASK_VAL(t), EVFILT_AIO, EV_DELETE, 0, 0, (intptr_t) TASK_VAL(t));
170: #else
171: EV_SET(&chg[0], TASK_VAL(t), EVFILT_AIO, EV_DELETE, 0, 0, (void*) TASK_VAL(t));
172: #endif
173: acb = (struct aiocb*) TASK_VAL(t);
174: if (acb) {
175: if (aio_cancel(acb->aio_fildes, acb) == AIO_CANCELED)
176: aio_return(acb);
177: free(acb);
178: TASK_VAL(t) = 0;
179: }
180: break;
181: #ifdef EVFILT_LIO
182: case taskLIO:
183: #ifdef __NetBSD__
184: EV_SET(&chg[0], TASK_VAL(t), EVFILT_LIO, EV_DELETE, 0, 0, (intptr_t) TASK_VAL(t));
185: #else
186: EV_SET(&chg[0], TASK_VAL(t), EVFILT_LIO, EV_DELETE, 0, 0, (void*) TASK_VAL(t));
187: #endif
188: acbs = (struct aiocb**) TASK_VAL(t);
189: if (acbs) {
190: for (i = 0; i < TASK_DATLEN(t); i++) {
191: if (aio_cancel(acbs[i]->aio_fildes, acbs[i]) == AIO_CANCELED)
192: aio_return(acbs[i]);
193: free(acbs[i]);
194: }
195: free(acbs);
196: TASK_VAL(t) = 0;
197: }
198: break;
199: #endif /* EVFILT_LIO */
200: #endif /* AIO_SUPPORT */
201: #ifdef EVFILT_USER
202: case taskUSER:
203: #ifdef __NetBSD__
204: EV_SET(&chg[0], TASK_VAL(t), EVFILT_USER, EV_DELETE, 0, 0, (intptr_t) TASK_VAL(t));
205: #else
206: EV_SET(&chg[0], TASK_VAL(t), EVFILT_USER, EV_DELETE, 0, 0, (void*) TASK_VAL(t));
207: #endif
208: break;
209: #endif
210: case taskTHREAD:
211: #ifdef HAVE_LIBPTHREAD
212: pthread_cancel((pthread_t) TASK_VAL(t));
213: #endif
214: TASK_UNLOCK(t);
215: default:
216: return NULL;
217: }
218:
219: kevent(TASK_ROOT(t)->root_kq, chg, 1, NULL, 0, &timeout);
220: return NULL;
221: }
222:
223: #ifdef HAVE_LIBPTHREAD
224: /*
225: * sched_hook_thread() - Default THREAD hook
226: *
227: * @task = current task
228: * @arg = pthread attributes
229: * return: <0 errors and 0 ok
230: */
231: void *
232: sched_hook_thread(void *task, void *arg)
233: {
234: sched_task_t *t = task;
235: pthread_t tid;
236:
237: if (!t || !TASK_ROOT(t))
238: return (void*) -1;
239:
240: if (pthread_create(&tid, (pthread_attr_t*) arg,
241: (void *(*)(void*)) TASK_FUNC(t), t)) {
242: LOGERR;
243: return (void*) -1;
244: }
245:
246: if (!TASK_ISLOCKED(t))
247: TASK_LOCK(t);
248:
249: TASK_VAL(t) = (u_long) tid;
250: return NULL;
251: }
252: #endif
253:
254: /*
255: * sched_hook_read() - Default READ hook
256: *
257: * @task = current task
258: * @arg = unused
259: * return: <0 errors and 0 ok
260: */
261: void *
262: sched_hook_read(void *task, void *arg __unused)
263: {
264: sched_task_t *t = task;
265: struct kevent chg[1];
266: struct timespec timeout = { 0, 0 };
267:
268: if (!t || !TASK_ROOT(t))
269: return (void*) -1;
270:
271: #ifdef __NetBSD__
272: EV_SET(&chg[0], TASK_FD(t), EVFILT_READ, EV_ADD | EV_CLEAR, 0, 0, (intptr_t) TASK_FD(t));
273: #else
274: EV_SET(&chg[0], TASK_FD(t), EVFILT_READ, EV_ADD | EV_CLEAR, 0, 0, (void*) TASK_FD(t));
275: #endif
276: if (kevent(TASK_ROOT(t)->root_kq, chg, 1, NULL, 0, &timeout) == -1) {
277: if (TASK_ROOT(t)->root_hooks.hook_exec.exception)
278: TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL);
279: else
280: LOGERR;
281: return (void*) -1;
282: }
283:
284: return NULL;
285: }
286:
287: /*
288: * sched_hook_write() - Default WRITE hook
289: *
290: * @task = current task
291: * @arg = unused
292: * return: <0 errors and 0 ok
293: */
294: void *
295: sched_hook_write(void *task, void *arg __unused)
296: {
297: sched_task_t *t = task;
298: struct kevent chg[1];
299: struct timespec timeout = { 0, 0 };
300:
301: if (!t || !TASK_ROOT(t))
302: return (void*) -1;
303:
304: #ifdef __NetBSD__
305: EV_SET(&chg[0], TASK_FD(t), EVFILT_WRITE, EV_ADD | EV_CLEAR, 0, 0, (intptr_t) TASK_FD(t));
306: #else
307: EV_SET(&chg[0], TASK_FD(t), EVFILT_WRITE, EV_ADD | EV_CLEAR, 0, 0, (void*) TASK_FD(t));
308: #endif
309: if (kevent(TASK_ROOT(t)->root_kq, chg, 1, NULL, 0, &timeout) == -1) {
310: if (TASK_ROOT(t)->root_hooks.hook_exec.exception)
311: TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL);
312: else
313: LOGERR;
314: return (void*) -1;
315: }
316:
317: return NULL;
318: }
319:
320: /*
321: * sched_hook_alarm() - Default ALARM hook
322: *
323: * @task = current task
324: * @arg = unused
325: * return: <0 errors and 0 ok
326: */
327: void *
328: sched_hook_alarm(void *task, void *arg __unused)
329: {
330: sched_task_t *t = task;
331: struct kevent chg[1];
332: struct timespec timeout = { 0, 0 };
333:
334: if (!t || !TASK_ROOT(t))
335: return (void*) -1;
336:
337: #ifdef __NetBSD__
338: EV_SET(&chg[0], (uintptr_t) TASK_DATA(t), EVFILT_TIMER, EV_ADD | EV_ONESHOT, 0,
339: t->task_val.ts.tv_sec * 1000 + t->task_val.ts.tv_nsec / 1000000,
340: (intptr_t) TASK_DATA(t));
341: #else
342: EV_SET(&chg[0], (uintptr_t) TASK_DATA(t), EVFILT_TIMER, EV_ADD | EV_ONESHOT, 0,
343: t->task_val.ts.tv_sec * 1000 + t->task_val.ts.tv_nsec / 1000000,
344: (void*) TASK_DATA(t));
345: #endif
346: if (kevent(TASK_ROOT(t)->root_kq, chg, 1, NULL, 0, &timeout) == -1) {
347: if (TASK_ROOT(t)->root_hooks.hook_exec.exception)
348: TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL);
349: else
350: LOGERR;
351: return (void*) -1;
352: }
353:
354: return NULL;
355: }
356:
357: /*
358: * sched_hook_node() - Default NODE hook
359: *
360: * @task = current task
361: * @arg = unused
362: * return: <0 errors and 0 ok
363: */
364: void *
365: sched_hook_node(void *task, void *arg __unused)
366: {
367: sched_task_t *t = task;
368: struct kevent chg[1];
369: struct timespec timeout = { 0, 0 };
370:
371: if (!t || !TASK_ROOT(t))
372: return (void*) -1;
373:
374: #ifdef __NetBSD__
375: EV_SET(&chg[0], TASK_FD(t), EVFILT_VNODE, EV_ADD | EV_CLEAR,
376: NOTE_DELETE | NOTE_WRITE | NOTE_EXTEND | NOTE_ATTRIB |
377: NOTE_LINK | NOTE_RENAME | NOTE_REVOKE, 0, (intptr_t) TASK_FD(t));
378: #else
379: EV_SET(&chg[0], TASK_FD(t), EVFILT_VNODE, EV_ADD | EV_CLEAR,
380: NOTE_DELETE | NOTE_WRITE | NOTE_EXTEND | NOTE_ATTRIB |
381: NOTE_LINK | NOTE_RENAME | NOTE_REVOKE, 0, (void*) TASK_FD(t));
382: #endif
383: if (kevent(TASK_ROOT(t)->root_kq, chg, 1, NULL, 0, &timeout) == -1) {
384: if (TASK_ROOT(t)->root_hooks.hook_exec.exception)
385: TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL);
386: else
387: LOGERR;
388: return (void*) -1;
389: }
390:
391: return NULL;
392: }
393:
394: /*
395: * sched_hook_proc() - Default PROC hook
396: *
397: * @task = current task
398: * @arg = unused
399: * return: <0 errors and 0 ok
400: */
401: void *
402: sched_hook_proc(void *task, void *arg __unused)
403: {
404: sched_task_t *t = task;
405: struct kevent chg[1];
406: struct timespec timeout = { 0, 0 };
407:
408: if (!t || !TASK_ROOT(t))
409: return (void*) -1;
410:
411: #ifdef __NetBSD__
412: EV_SET(&chg[0], TASK_VAL(t), EVFILT_PROC, EV_ADD | EV_CLEAR,
413: NOTE_EXIT | NOTE_FORK | NOTE_EXEC | NOTE_TRACK, 0, (intptr_t) TASK_VAL(t));
414: #else
415: EV_SET(&chg[0], TASK_VAL(t), EVFILT_PROC, EV_ADD | EV_CLEAR,
416: NOTE_EXIT | NOTE_FORK | NOTE_EXEC | NOTE_TRACK, 0, (void*) TASK_VAL(t));
417: #endif
418: if (kevent(TASK_ROOT(t)->root_kq, chg, 1, NULL, 0, &timeout) == -1) {
419: if (TASK_ROOT(t)->root_hooks.hook_exec.exception)
420: TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL);
421: else
422: LOGERR;
423: return (void*) -1;
424: }
425:
426: return NULL;
427: }
428:
429: /*
430: * sched_hook_signal() - Default SIGNAL hook
431: *
432: * @task = current task
433: * @arg = unused
434: * return: <0 errors and 0 ok
435: */
436: void *
437: sched_hook_signal(void *task, void *arg __unused)
438: {
439: sched_task_t *t = task;
440: struct kevent chg[1];
441: struct timespec timeout = { 0, 0 };
442:
443: if (!t || !TASK_ROOT(t))
444: return (void*) -1;
445:
446: #ifdef __NetBSD__
447: EV_SET(&chg[0], TASK_VAL(t), EVFILT_SIGNAL, EV_ADD, 0, 0, (intptr_t) TASK_VAL(t));
448: #else
449: EV_SET(&chg[0], TASK_VAL(t), EVFILT_SIGNAL, EV_ADD, 0, 0, (void*) TASK_VAL(t));
450: #endif
451: if (kevent(TASK_ROOT(t)->root_kq, chg, 1, NULL, 0, &timeout) == -1) {
452: if (TASK_ROOT(t)->root_hooks.hook_exec.exception)
453: TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL);
454: else
455: LOGERR;
456: return (void*) -1;
457: }
458:
459: return NULL;
460: }
461:
462: /*
463: * sched_hook_user() - Default USER hook
464: *
465: * @task = current task
466: * @arg = unused
467: * return: <0 errors and 0 ok
468: */
469: #ifdef EVFILT_USER
470: void *
471: sched_hook_user(void *task, void *arg __unused)
472: {
473: sched_task_t *t = task;
474: struct kevent chg[1];
475: struct timespec timeout = { 0, 0 };
476:
477: if (!t || !TASK_ROOT(t))
478: return (void*) -1;
479:
480: #ifdef __NetBSD__
481: EV_SET(&chg[0], TASK_VAL(t), EVFILT_USER, EV_ADD | EV_CLEAR, TASK_DATLEN(t),
482: 0, (intptr_t) TASK_VAL(t));
483: #else
484: EV_SET(&chg[0], TASK_VAL(t), EVFILT_USER, EV_ADD | EV_CLEAR, TASK_DATLEN(t),
485: 0, (void*) TASK_VAL(t));
486: #endif
487: if (kevent(TASK_ROOT(t)->root_kq, chg, 1, NULL, 0, &timeout) == -1) {
488: if (TASK_ROOT(t)->root_hooks.hook_exec.exception)
489: TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL);
490: else
491: LOGERR;
492: return (void*) -1;
493: }
494:
495: return NULL;
496: }
497: #endif
498:
499: /*
500: * sched_hook_fetch() - Default FETCH hook
501: *
502: * @root = root task
503: * @arg = unused
504: * return: NULL error or !=NULL fetched task
505: */
506: void *
507: sched_hook_fetch(void *root, void *arg __unused)
508: {
509: sched_root_task_t *r = root;
510: sched_task_t *task, *tmp;
511: struct timespec now, m, mtmp;
512: struct timespec *timeout;
513: struct kevent evt[1], res[KQ_EVENTS];
514: register int i, flg;
515: int en;
516: #ifdef AIO_SUPPORT
517: int len, fd;
518: struct aiocb *acb;
519: #ifdef EVFILT_LIO
520: int l;
521: register int j;
522: off_t off;
523: struct aiocb **acbs;
524: struct iovec *iv;
525: #endif /* EVFILT_LIO */
526: #endif /* AIO_SUPPORT */
527:
528: if (!r)
529: return NULL;
530:
531: /* get new task by queue priority */
532: while ((task = TAILQ_FIRST(&r->root_event))) {
533: #ifdef HAVE_LIBPTHREAD
534: pthread_mutex_lock(&r->root_mtx[taskEVENT]);
535: #endif
536: TAILQ_REMOVE(&r->root_event, task, task_node);
537: #ifdef HAVE_LIBPTHREAD
538: pthread_mutex_unlock(&r->root_mtx[taskEVENT]);
539: #endif
540: task->task_type = taskUNUSE;
541: #ifdef HAVE_LIBPTHREAD
542: pthread_mutex_lock(&r->root_mtx[taskUNUSE]);
543: #endif
544: TAILQ_INSERT_TAIL(&r->root_unuse, task, task_node);
545: #ifdef HAVE_LIBPTHREAD
546: pthread_mutex_unlock(&r->root_mtx[taskUNUSE]);
547: #endif
548: return task;
549: }
550: while ((task = TAILQ_FIRST(&r->root_ready))) {
551: #ifdef HAVE_LIBPTHREAD
552: pthread_mutex_lock(&r->root_mtx[taskREADY]);
553: #endif
554: TAILQ_REMOVE(&r->root_ready, task, task_node);
555: #ifdef HAVE_LIBPTHREAD
556: pthread_mutex_unlock(&r->root_mtx[taskREADY]);
557: #endif
558: task->task_type = taskUNUSE;
559: #ifdef HAVE_LIBPTHREAD
560: pthread_mutex_lock(&r->root_mtx[taskUNUSE]);
561: #endif
562: TAILQ_INSERT_TAIL(&r->root_unuse, task, task_node);
563: #ifdef HAVE_LIBPTHREAD
564: pthread_mutex_unlock(&r->root_mtx[taskUNUSE]);
565: #endif
566: return task;
567: }
568:
569: #ifdef TIMER_WITHOUT_SORT
570: clock_gettime(CLOCK_MONOTONIC, &now);
571:
572: sched_timespecclear(&r->root_wait);
573: TAILQ_FOREACH(task, &r->root_timer, task_node) {
574: if (!sched_timespecisset(&r->root_wait))
575: r->root_wait = TASK_TS(task);
576: else if (sched_timespeccmp(&TASK_TS(task), &r->root_wait, -) < 0)
577: r->root_wait = TASK_TS(task);
578: }
579:
580: if (TAILQ_FIRST(&r->root_timer)) {
581: m = r->root_wait;
582: sched_timespecsub(&m, &now, &mtmp);
583: r->root_wait = mtmp;
584: } else {
585: /* set wait INFTIM */
586: sched_timespecinf(&r->root_wait);
587: }
588: #else
589: if (!TAILQ_FIRST(&r->root_task) && (task = TAILQ_FIRST(&r->root_timer))) {
590: clock_gettime(CLOCK_MONOTONIC, &now);
591:
592: m = TASK_TS(task);
593: sched_timespecsub(&m, &now, &mtmp);
594: r->root_wait = mtmp;
595: } else {
596: /* set wait INFTIM */
597: sched_timespecinf(&r->root_wait);
598: }
599: #endif
600: /* if present member of task, set NOWAIT */
601: if (TAILQ_FIRST(&r->root_task))
602: sched_timespecclear(&r->root_wait);
603:
604: if (r->root_wait.tv_sec != -1 && r->root_wait.tv_nsec != -1)
605: timeout = &r->root_wait;
606: else if (sched_timespecisinf(&r->root_poll))
607: timeout = NULL;
608: else
609: timeout = &r->root_poll;
610: if ((en = kevent(r->root_kq, NULL, 0, res, KQ_EVENTS, timeout)) == -1) {
611: if (r->root_hooks.hook_exec.exception) {
612: if (r->root_hooks.hook_exec.exception(r, NULL))
613: return NULL;
614: } else if (errno != EINTR)
615: LOGERR;
616: return NULL;
617: }
618:
619: now.tv_sec = now.tv_nsec = 0;
620: /* Go and catch the cat into pipes ... */
621: for (i = 0; i < en; i++) {
622: memcpy(evt, &res[i], sizeof evt);
623: evt->flags = EV_DELETE;
624: /* Put read/write task to ready queue */
625: switch (res[i].filter) {
626: case EVFILT_READ:
627: flg = 0;
628: TAILQ_FOREACH_SAFE(task, &r->root_read, task_node, tmp) {
629: if (TASK_FD(task) != ((intptr_t) res[i].udata))
630: continue;
631: else {
632: flg++;
633: TASK_RET(task) = res[i].data;
634: TASK_FLAG(task) = res[i].fflags;
635: }
636: /* remove read handle */
637: #ifdef HAVE_LIBPTHREAD
638: pthread_mutex_lock(&r->root_mtx[taskREAD]);
639: #endif
640: TAILQ_REMOVE(&r->root_read, task, task_node);
641: #ifdef HAVE_LIBPTHREAD
642: pthread_mutex_unlock(&r->root_mtx[taskREAD]);
643: #endif
644: if (r->root_hooks.hook_exec.exception && res[i].flags & EV_EOF) {
645: if (r->root_hooks.hook_exec.exception(r, (void*) EV_EOF)) {
646: task->task_type = taskUNUSE;
647: #ifdef HAVE_LIBPTHREAD
648: pthread_mutex_lock(&r->root_mtx[taskUNUSE]);
649: #endif
650: TAILQ_INSERT_TAIL(&r->root_unuse, task, task_node);
651: #ifdef HAVE_LIBPTHREAD
652: pthread_mutex_unlock(&r->root_mtx[taskUNUSE]);
653: #endif
654: } else {
655: task->task_type = taskREADY;
656: #ifdef HAVE_LIBPTHREAD
657: pthread_mutex_lock(&r->root_mtx[taskREADY]);
658: #endif
659: TAILQ_INSERT_TAIL(&r->root_ready, task, task_node);
660: #ifdef HAVE_LIBPTHREAD
661: pthread_mutex_unlock(&r->root_mtx[taskREADY]);
662: #endif
663: }
664: } else {
665: task->task_type = taskREADY;
666: #ifdef HAVE_LIBPTHREAD
667: pthread_mutex_lock(&r->root_mtx[taskREADY]);
668: #endif
669: TAILQ_INSERT_TAIL(&r->root_ready, task, task_node);
670: #ifdef HAVE_LIBPTHREAD
671: pthread_mutex_unlock(&r->root_mtx[taskREADY]);
672: #endif
673: }
674: }
675: /* if match at least 2, don't remove resouce of event */
676: if (flg > 1)
677: evt->flags ^= evt->flags;
678: break;
679: case EVFILT_WRITE:
680: flg = 0;
681: TAILQ_FOREACH_SAFE(task, &r->root_write, task_node, tmp) {
682: if (TASK_FD(task) != ((intptr_t) res[i].udata))
683: continue;
684: else {
685: flg++;
686: TASK_RET(task) = res[i].data;
687: TASK_FLAG(task) = res[i].fflags;
688: }
689: /* remove write handle */
690: #ifdef HAVE_LIBPTHREAD
691: pthread_mutex_lock(&r->root_mtx[taskWRITE]);
692: #endif
693: TAILQ_REMOVE(&r->root_write, task, task_node);
694: #ifdef HAVE_LIBPTHREAD
695: pthread_mutex_unlock(&r->root_mtx[taskWRITE]);
696: #endif
697: if (r->root_hooks.hook_exec.exception && res[i].flags & EV_EOF) {
698: if (r->root_hooks.hook_exec.exception(r, (void*) EV_EOF)) {
699: task->task_type = taskUNUSE;
700: #ifdef HAVE_LIBPTHREAD
701: pthread_mutex_lock(&r->root_mtx[taskUNUSE]);
702: #endif
703: TAILQ_INSERT_TAIL(&r->root_unuse, task, task_node);
704: #ifdef HAVE_LIBPTHREAD
705: pthread_mutex_unlock(&r->root_mtx[taskUNUSE]);
706: #endif
707: } else {
708: task->task_type = taskREADY;
709: #ifdef HAVE_LIBPTHREAD
710: pthread_mutex_lock(&r->root_mtx[taskREADY]);
711: #endif
712: TAILQ_INSERT_TAIL(&r->root_ready, task, task_node);
713: #ifdef HAVE_LIBPTHREAD
714: pthread_mutex_unlock(&r->root_mtx[taskREADY]);
715: #endif
716: }
717: } else {
718: task->task_type = taskREADY;
719: #ifdef HAVE_LIBPTHREAD
720: pthread_mutex_lock(&r->root_mtx[taskREADY]);
721: #endif
722: TAILQ_INSERT_TAIL(&r->root_ready, task, task_node);
723: #ifdef HAVE_LIBPTHREAD
724: pthread_mutex_unlock(&r->root_mtx[taskREADY]);
725: #endif
726: }
727: }
728: /* if match at least 2, don't remove resouce of event */
729: if (flg > 1)
730: evt->flags ^= evt->flags;
731: break;
732: case EVFILT_TIMER:
733: flg = 0;
734: TAILQ_FOREACH_SAFE(task, &r->root_alarm, task_node, tmp) {
735: if ((uintptr_t) TASK_DATA(task) != ((uintptr_t) res[i].udata))
736: continue;
737: else {
738: flg++;
739: TASK_RET(task) = res[i].data;
740: TASK_FLAG(task) = res[i].fflags;
741: }
742: /* remove alarm handle */
743: #ifdef HAVE_LIBPTHREAD
744: pthread_mutex_lock(&r->root_mtx[taskALARM]);
745: #endif
746: TAILQ_REMOVE(&r->root_alarm, task, task_node);
747: #ifdef HAVE_LIBPTHREAD
748: pthread_mutex_unlock(&r->root_mtx[taskALARM]);
749: #endif
750: task->task_type = taskREADY;
751: #ifdef HAVE_LIBPTHREAD
752: pthread_mutex_lock(&r->root_mtx[taskREADY]);
753: #endif
754: TAILQ_INSERT_TAIL(&r->root_ready, task, task_node);
755: #ifdef HAVE_LIBPTHREAD
756: pthread_mutex_unlock(&r->root_mtx[taskREADY]);
757: #endif
758: }
759: /* if match at least 2, don't remove resouce of event */
760: if (flg > 1)
761: evt->flags ^= evt->flags;
762: break;
763: case EVFILT_VNODE:
764: flg = 0;
765: TAILQ_FOREACH_SAFE(task, &r->root_node, task_node, tmp) {
766: if (TASK_FD(task) != ((intptr_t) res[i].udata))
767: continue;
768: else {
769: flg++;
770: TASK_RET(task) = res[i].data;
771: TASK_FLAG(task) = res[i].fflags;
772: }
773: /* remove node handle */
774: #ifdef HAVE_LIBPTHREAD
775: pthread_mutex_lock(&r->root_mtx[taskNODE]);
776: #endif
777: TAILQ_REMOVE(&r->root_node, task, task_node);
778: #ifdef HAVE_LIBPTHREAD
779: pthread_mutex_unlock(&r->root_mtx[taskNODE]);
780: #endif
781: task->task_type = taskREADY;
782: #ifdef HAVE_LIBPTHREAD
783: pthread_mutex_lock(&r->root_mtx[taskREADY]);
784: #endif
785: TAILQ_INSERT_TAIL(&r->root_ready, task, task_node);
786: #ifdef HAVE_LIBPTHREAD
787: pthread_mutex_unlock(&r->root_mtx[taskREADY]);
788: #endif
789: }
790: /* if match at least 2, don't remove resouce of event */
791: if (flg > 1)
792: evt->flags ^= evt->flags;
793: break;
794: case EVFILT_PROC:
795: flg = 0;
796: TAILQ_FOREACH_SAFE(task, &r->root_proc, task_node, tmp) {
797: if (TASK_VAL(task) != ((uintptr_t) res[i].udata))
798: continue;
799: else {
800: flg++;
801: TASK_RET(task) = res[i].data;
802: TASK_FLAG(task) = res[i].fflags;
803: }
804: /* remove proc handle */
805: #ifdef HAVE_LIBPTHREAD
806: pthread_mutex_lock(&r->root_mtx[taskPROC]);
807: #endif
808: TAILQ_REMOVE(&r->root_proc, task, task_node);
809: #ifdef HAVE_LIBPTHREAD
810: pthread_mutex_unlock(&r->root_mtx[taskPROC]);
811: #endif
812: task->task_type = taskREADY;
813: #ifdef HAVE_LIBPTHREAD
814: pthread_mutex_lock(&r->root_mtx[taskREADY]);
815: #endif
816: TAILQ_INSERT_TAIL(&r->root_ready, task, task_node);
817: #ifdef HAVE_LIBPTHREAD
818: pthread_mutex_unlock(&r->root_mtx[taskREADY]);
819: #endif
820: }
821: /* if match at least 2, don't remove resouce of event */
822: if (flg > 1)
823: evt->flags ^= evt->flags;
824: break;
825: case EVFILT_SIGNAL:
826: flg = 0;
827: TAILQ_FOREACH_SAFE(task, &r->root_signal, task_node, tmp) {
828: if (TASK_VAL(task) != ((uintptr_t) res[i].udata))
829: continue;
830: else {
831: flg++;
832: TASK_RET(task) = res[i].data;
833: TASK_FLAG(task) = res[i].fflags;
834: }
835: /* remove signal handle */
836: #ifdef HAVE_LIBPTHREAD
837: pthread_mutex_lock(&r->root_mtx[taskSIGNAL]);
838: #endif
839: TAILQ_REMOVE(&r->root_signal, task, task_node);
840: #ifdef HAVE_LIBPTHREAD
841: pthread_mutex_unlock(&r->root_mtx[taskSIGNAL]);
842: #endif
843: task->task_type = taskREADY;
844: #ifdef HAVE_LIBPTHREAD
845: pthread_mutex_lock(&r->root_mtx[taskREADY]);
846: #endif
847: TAILQ_INSERT_TAIL(&r->root_ready, task, task_node);
848: #ifdef HAVE_LIBPTHREAD
849: pthread_mutex_unlock(&r->root_mtx[taskREADY]);
850: #endif
851: }
852: /* if match at least 2, don't remove resouce of event */
853: if (flg > 1)
854: evt->flags ^= evt->flags;
855: break;
856: #ifdef AIO_SUPPORT
857: case EVFILT_AIO:
858: flg = 0;
859: TAILQ_FOREACH_SAFE(task, &r->root_aio, task_node, tmp) {
860: acb = (struct aiocb*) TASK_VAL(task);
861: if (acb != ((struct aiocb*) res[i].udata))
862: continue;
863: else {
864: flg++;
865: TASK_RET(task) = res[i].data;
866: TASK_FLAG(task) = res[i].fflags;
867: }
868: /* remove user handle */
869: #ifdef HAVE_LIBPTHREAD
870: pthread_mutex_lock(&r->root_mtx[taskAIO]);
871: #endif
872: TAILQ_REMOVE(&r->root_aio, task, task_node);
873: #ifdef HAVE_LIBPTHREAD
874: pthread_mutex_unlock(&r->root_mtx[taskAIO]);
875: #endif
876: task->task_type = taskREADY;
877: #ifdef HAVE_LIBPTHREAD
878: pthread_mutex_lock(&r->root_mtx[taskREADY]);
879: #endif
880: TAILQ_INSERT_TAIL(&r->root_ready, task, task_node);
881: #ifdef HAVE_LIBPTHREAD
882: pthread_mutex_unlock(&r->root_mtx[taskREADY]);
883: #endif
884: fd = acb->aio_fildes;
885: if ((len = aio_return(acb)) != -1) {
886: if (lseek(fd, acb->aio_offset + len, SEEK_CUR) == -1)
887: LOGERR;
888: } else
889: LOGERR;
890: free(acb);
891: TASK_DATLEN(task) = (u_long) len;
892: TASK_FD(task) = fd;
893: }
894: /* if match at least 2, don't remove resouce of event */
895: if (flg > 1)
896: evt->flags ^= evt->flags;
897: break;
898: #ifdef EVFILT_LIO
899: case EVFILT_LIO:
900: flg = 0;
901: TAILQ_FOREACH_SAFE(task, &r->root_lio, task_node, tmp) {
902: acbs = (struct aiocb**) TASK_VAL(task);
903: if (acbs != ((struct aiocb**) res[i].udata))
904: continue;
905: else {
906: flg++;
907: TASK_RET(task) = res[i].data;
908: TASK_FLAG(task) = res[i].fflags;
909: }
910: /* remove user handle */
911: #ifdef HAVE_LIBPTHREAD
912: pthread_mutex_lock(&r->root_mtx[taskLIO]);
913: #endif
914: TAILQ_REMOVE(&r->root_lio, task, task_node);
915: #ifdef HAVE_LIBPTHREAD
916: pthread_mutex_unlock(&r->root_mtx[taskLIO]);
917: #endif
918: task->task_type = taskREADY;
919: #ifdef HAVE_LIBPTHREAD
920: pthread_mutex_lock(&r->root_mtx[taskREADY]);
921: #endif
922: TAILQ_INSERT_TAIL(&r->root_ready, task, task_node);
923: #ifdef HAVE_LIBPTHREAD
924: pthread_mutex_unlock(&r->root_mtx[taskREADY]);
925: #endif
926: iv = (struct iovec*) TASK_DATA(task);
927: fd = acbs[0]->aio_fildes;
928: off = acbs[0]->aio_offset;
929: for (j = len = 0; i < TASK_DATLEN(task); len += l, i++) {
930: if ((iv[i].iov_len = aio_return(acbs[i])) == -1)
931: l = 0;
932: else
933: l = iv[i].iov_len;
934: free(acbs[i]);
935: }
936: free(acbs);
937: TASK_DATLEN(task) = (u_long) len;
938: TASK_FD(task) = fd;
939:
940: if (lseek(fd, off + len, SEEK_CUR) == -1)
941: LOGERR;
942: }
943: /* if match at least 2, don't remove resouce of event */
944: if (flg > 1)
945: evt->flags ^= evt->flags;
946: break;
947: #endif /* EVFILT_LIO */
948: #endif /* AIO_SUPPORT */
949: #ifdef EVFILT_USER
950: case EVFILT_USER:
951: flg = 0;
952: TAILQ_FOREACH_SAFE(task, &r->root_user, task_node, tmp) {
953: if (TASK_VAL(task) != ((uintptr_t) res[i].udata))
954: continue;
955: else {
956: flg++;
957: TASK_RET(task) = res[i].data;
958: TASK_FLAG(task) = res[i].fflags;
959: }
960: /* remove user handle */
961: #ifdef HAVE_LIBPTHREAD
962: pthread_mutex_lock(&r->root_mtx[taskUSER]);
963: #endif
964: TAILQ_REMOVE(&r->root_user, task, task_node);
965: #ifdef HAVE_LIBPTHREAD
966: pthread_mutex_unlock(&r->root_mtx[taskUSER]);
967: #endif
968: task->task_type = taskREADY;
969: #ifdef HAVE_LIBPTHREAD
970: pthread_mutex_lock(&r->root_mtx[taskREADY]);
971: #endif
972: TAILQ_INSERT_TAIL(&r->root_ready, task, task_node);
973: #ifdef HAVE_LIBPTHREAD
974: pthread_mutex_unlock(&r->root_mtx[taskREADY]);
975: #endif
976: }
977: /* if match at least 2, don't remove resouce of event */
978: if (flg > 1)
979: evt->flags ^= evt->flags;
980: break;
981: #endif /* EVFILT_USER */
982: }
983: if (kevent(r->root_kq, evt, 1, NULL, 0, &now) == -1) {
984: if (r->root_hooks.hook_exec.exception) {
985: if (r->root_hooks.hook_exec.exception(r, NULL))
986: return NULL;
987: } else
988: LOGERR;
989: }
990: }
991:
992: /* timer update & put in ready queue */
993: clock_gettime(CLOCK_MONOTONIC, &now);
994:
995: TAILQ_FOREACH_SAFE(task, &r->root_timer, task_node, tmp)
996: if (sched_timespeccmp(&now, &TASK_TS(task), -) >= 0) {
997: #ifdef HAVE_LIBPTHREAD
998: pthread_mutex_lock(&r->root_mtx[taskTIMER]);
999: #endif
1000: TAILQ_REMOVE(&r->root_timer, task, task_node);
1001: #ifdef HAVE_LIBPTHREAD
1002: pthread_mutex_unlock(&r->root_mtx[taskTIMER]);
1003: #endif
1004: task->task_type = taskREADY;
1005: #ifdef HAVE_LIBPTHREAD
1006: pthread_mutex_lock(&r->root_mtx[taskREADY]);
1007: #endif
1008: TAILQ_INSERT_TAIL(&r->root_ready, task, task_node);
1009: #ifdef HAVE_LIBPTHREAD
1010: pthread_mutex_unlock(&r->root_mtx[taskREADY]);
1011: #endif
1012: }
1013:
1014: /* put regular task priority task to ready queue,
1015: if there is no ready task or reach max missing hit for regular task */
1016: if ((task = TAILQ_FIRST(&r->root_task))) {
1017: if (!TAILQ_FIRST(&r->root_ready) || r->root_miss >= TASK_VAL(task)) {
1018: r->root_miss ^= r->root_miss;
1019:
1020: #ifdef HAVE_LIBPTHREAD
1021: pthread_mutex_lock(&r->root_mtx[taskTASK]);
1022: #endif
1023: TAILQ_REMOVE(&r->root_task, task, task_node);
1024: #ifdef HAVE_LIBPTHREAD
1025: pthread_mutex_unlock(&r->root_mtx[taskTASK]);
1026: #endif
1027: task->task_type = taskREADY;
1028: #ifdef HAVE_LIBPTHREAD
1029: pthread_mutex_lock(&r->root_mtx[taskREADY]);
1030: #endif
1031: TAILQ_INSERT_TAIL(&r->root_ready, task, task_node);
1032: #ifdef HAVE_LIBPTHREAD
1033: pthread_mutex_unlock(&r->root_mtx[taskREADY]);
1034: #endif
1035: } else
1036: r->root_miss++;
1037: } else
1038: r->root_miss ^= r->root_miss;
1039:
1040: /* OK, lets get ready task !!! */
1041: task = TAILQ_FIRST(&r->root_ready);
1042: if (!(task))
1043: return NULL;
1044:
1045: #ifdef HAVE_LIBPTHREAD
1046: pthread_mutex_lock(&r->root_mtx[taskREADY]);
1047: #endif
1048: TAILQ_REMOVE(&r->root_ready, task, task_node);
1049: #ifdef HAVE_LIBPTHREAD
1050: pthread_mutex_unlock(&r->root_mtx[taskREADY]);
1051: #endif
1052: task->task_type = taskUNUSE;
1053: #ifdef HAVE_LIBPTHREAD
1054: pthread_mutex_lock(&r->root_mtx[taskUNUSE]);
1055: #endif
1056: TAILQ_INSERT_TAIL(&r->root_unuse, task, task_node);
1057: #ifdef HAVE_LIBPTHREAD
1058: pthread_mutex_unlock(&r->root_mtx[taskUNUSE]);
1059: #endif
1060: return task;
1061: }
1062:
1063: /*
1064: * sched_hook_exception() - Default EXCEPTION hook
1065: *
1066: * @root = root task
1067: * @arg = custom handling: if arg == EV_EOF or other value; default: arg == NULL log errno
1068: * return: <0 errors and 0 ok
1069: */
1070: void *
1071: sched_hook_exception(void *root, void *arg)
1072: {
1073: sched_root_task_t *r = root;
1074:
1075: if (!r)
1076: return NULL;
1077:
1078: /* custom exception handling ... */
1079: if (arg) {
1080: if (arg == (void*) EV_EOF)
1081: return NULL;
1082: return (void*) -1; /* raise scheduler error!!! */
1083: }
1084:
1085: /* if error hook exists */
1086: if (r->root_hooks.hook_root.error)
1087: return (r->root_hooks.hook_root.error(root, (void*) ((intptr_t) errno)));
1088:
1089: /* default case! */
1090: LOGERR;
1091: return NULL;
1092: }
1093:
1094: /*
1095: * sched_hook_condition() - Default CONDITION hook
1096: *
1097: * @root = root task
1098: * @arg = killState from schedRun()
1099: * return: NULL kill scheduler loop or !=NULL ok
1100: */
1101: void *
1102: sched_hook_condition(void *root, void *arg)
1103: {
1104: sched_root_task_t *r = root;
1105:
1106: if (!r)
1107: return NULL;
1108:
1109: return (void*) (r->root_cond - *(intptr_t*) arg);
1110: }
FreeBSD-CVSweb <freebsd-cvsweb@FreeBSD.org>