1: /*************************************************************************
2: * (C) 2011 AITNET ltd - Sofia/Bulgaria - <misho@aitbg.com>
3: * by Michael Pounov <misho@openbsd-bg.org>
4: *
5: * $Author: misho $
6: * $Id: hooks.c,v 1.12 2012/08/08 08:25:39 misho Exp $
7: *
8: **************************************************************************
9: The ELWIX and AITNET software is distributed under the following
10: terms:
11:
12: All of the documentation and software included in the ELWIX and AITNET
13: Releases is copyrighted by ELWIX - Sofia/Bulgaria <info@elwix.org>
14:
15: Copyright 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012
16: by Michael Pounov <misho@elwix.org>. All rights reserved.
17:
18: Redistribution and use in source and binary forms, with or without
19: modification, are permitted provided that the following conditions
20: are met:
21: 1. Redistributions of source code must retain the above copyright
22: notice, this list of conditions and the following disclaimer.
23: 2. Redistributions in binary form must reproduce the above copyright
24: notice, this list of conditions and the following disclaimer in the
25: documentation and/or other materials provided with the distribution.
26: 3. All advertising materials mentioning features or use of this software
27: must display the following acknowledgement:
28: This product includes software developed by Michael Pounov <misho@elwix.org>
29: ELWIX - Embedded LightWeight unIX and its contributors.
30: 4. Neither the name of AITNET nor the names of its contributors
31: may be used to endorse or promote products derived from this software
32: without specific prior written permission.
33:
34: THIS SOFTWARE IS PROVIDED BY AITNET AND CONTRIBUTORS ``AS IS'' AND
35: ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
36: IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
37: ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
38: FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
39: DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
40: OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
41: HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
42: LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
43: OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
44: SUCH DAMAGE.
45: */
46: #include "global.h"
47: #include "hooks.h"
48:
49:
50: /*
51: * sched_hook_init() - Default INIT hook
52: *
53: * @root = root task
54: * @arg = unused
55: * return: <0 errors and 0 ok
56: */
57: void *
58: sched_hook_init(void *root, void *arg __unused)
59: {
60: sched_root_task_t *r = root;
61:
62: if (!r)
63: return (void*) -1;
64:
65: r->root_kq = kqueue();
66: if (r->root_kq == -1) {
67: LOGERR;
68: return (void*) -1;
69: }
70:
71: return NULL;
72: }
73:
74: /*
75: * sched_hook_fini() - Default FINI hook
76: *
77: * @root = root task
78: * @arg = unused
79: * return: <0 errors and 0 ok
80: */
81: void *
82: sched_hook_fini(void *root, void *arg __unused)
83: {
84: sched_root_task_t *r = root;
85:
86: if (!r)
87: return (void*) -1;
88:
89: if (r->root_kq > 2) {
90: close(r->root_kq);
91: r->root_kq = 0;
92: }
93:
94: return NULL;
95: }
96:
97: /*
98: * sched_hook_cancel() - Default CANCEL hook
99: *
100: * @task = current task
101: * @arg = unused
102: * return: <0 errors and 0 ok
103: */
104: void *
105: sched_hook_cancel(void *task, void *arg __unused)
106: {
107: sched_task_t *t = task;
108: struct kevent chg[1];
109: struct timespec timeout = { 0, 0 };
110: #ifdef AIO_SUPPORT
111: struct aiocb *acb;
112: #ifdef EVFILT_LIO
113: struct aiocb **acbs;
114: register int i;
115: #endif /* EVFILT_LIO */
116: #endif /* AIO_SUPPORT */
117:
118: if (!t || !TASK_ROOT(t))
119: return (void*) -1;
120:
121: switch (TASK_TYPE(t)) {
122: case taskREAD:
123: #ifdef __NetBSD__
124: EV_SET(&chg[0], TASK_FD(t), EVFILT_READ, EV_DELETE, 0, 0, (intptr_t) TASK_FD(t));
125: #else
126: EV_SET(&chg[0], TASK_FD(t), EVFILT_READ, EV_DELETE, 0, 0, (void*) TASK_FD(t));
127: #endif
128: break;
129: case taskWRITE:
130: #ifdef __NetBSD__
131: EV_SET(&chg[0], TASK_FD(t), EVFILT_WRITE, EV_DELETE, 0, 0, (intptr_t) TASK_FD(t));
132: #else
133: EV_SET(&chg[0], TASK_FD(t), EVFILT_WRITE, EV_DELETE, 0, 0, (void*) TASK_FD(t));
134: #endif
135: break;
136: case taskALARM:
137: #ifdef __NetBSD__
138: EV_SET(&chg[0], (uintptr_t) TASK_DATA(t), EVFILT_TIMER, EV_DELETE,
139: 0, 0, (intptr_t) TASK_DATA(t));
140: #else
141: EV_SET(&chg[0], (uintptr_t) TASK_DATA(t), EVFILT_TIMER, EV_DELETE,
142: 0, 0, (void*) TASK_DATA(t));
143: #endif
144: break;
145: case taskNODE:
146: #ifdef __NetBSD__
147: EV_SET(&chg[0], TASK_FD(t), EVFILT_VNODE, EV_DELETE, 0, 0, (intptr_t) TASK_FD(t));
148: #else
149: EV_SET(&chg[0], TASK_FD(t), EVFILT_VNODE, EV_DELETE, 0, 0, (void*) TASK_FD(t));
150: #endif
151: break;
152: case taskPROC:
153: #ifdef __NetBSD__
154: EV_SET(&chg[0], TASK_VAL(t), EVFILT_PROC, EV_DELETE, 0, 0, (intptr_t) TASK_VAL(t));
155: #else
156: EV_SET(&chg[0], TASK_VAL(t), EVFILT_PROC, EV_DELETE, 0, 0, (void*) TASK_VAL(t));
157: #endif
158: break;
159: case taskSIGNAL:
160: #ifdef __NetBSD__
161: EV_SET(&chg[0], TASK_VAL(t), EVFILT_SIGNAL, EV_DELETE, 0, 0, (intptr_t) TASK_VAL(t));
162: #else
163: EV_SET(&chg[0], TASK_VAL(t), EVFILT_SIGNAL, EV_DELETE, 0, 0, (void*) TASK_VAL(t));
164: #endif
165: break;
166: #ifdef AIO_SUPPORT
167: case taskAIO:
168: #ifdef __NetBSD__
169: EV_SET(&chg[0], TASK_VAL(t), EVFILT_AIO, EV_DELETE, 0, 0, (intptr_t) TASK_VAL(t));
170: #else
171: EV_SET(&chg[0], TASK_VAL(t), EVFILT_AIO, EV_DELETE, 0, 0, (void*) TASK_VAL(t));
172: #endif
173: acb = (struct aiocb*) TASK_VAL(t);
174: if (acb) {
175: if (aio_cancel(acb->aio_fildes, acb) == AIO_CANCELED)
176: aio_return(acb);
177: free(acb);
178: TASK_VAL(t) = 0;
179: }
180: break;
181: #ifdef EVFILT_LIO
182: case taskLIO:
183: #ifdef __NetBSD__
184: EV_SET(&chg[0], TASK_VAL(t), EVFILT_LIO, EV_DELETE, 0, 0, (intptr_t) TASK_VAL(t));
185: #else
186: EV_SET(&chg[0], TASK_VAL(t), EVFILT_LIO, EV_DELETE, 0, 0, (void*) TASK_VAL(t));
187: #endif
188: acbs = (struct aiocb**) TASK_VAL(t);
189: if (acbs) {
190: for (i = 0; i < TASK_DATLEN(t); i++) {
191: if (aio_cancel(acbs[i]->aio_fildes, acbs[i]) == AIO_CANCELED)
192: aio_return(acbs[i]);
193: free(acbs[i]);
194: }
195: free(acbs);
196: TASK_VAL(t) = 0;
197: }
198: break;
199: #endif /* EVFILT_LIO */
200: #endif /* AIO_SUPPORT */
201: #ifdef EVFILT_USER
202: case taskUSER:
203: #ifdef __NetBSD__
204: EV_SET(&chg[0], TASK_VAL(t), EVFILT_USER, EV_DELETE, 0, 0, (intptr_t) TASK_VAL(t));
205: #else
206: EV_SET(&chg[0], TASK_VAL(t), EVFILT_USER, EV_DELETE, 0, 0, (void*) TASK_VAL(t));
207: #endif
208: break;
209: #endif
210: default:
211: return NULL;
212: }
213:
214: kevent(TASK_ROOT(t)->root_kq, chg, 1, NULL, 0, &timeout);
215: return NULL;
216: }
217:
218: /*
219: * sched_hook_read() - Default READ hook
220: *
221: * @task = current task
222: * @arg = unused
223: * return: <0 errors and 0 ok
224: */
225: void *
226: sched_hook_read(void *task, void *arg __unused)
227: {
228: sched_task_t *t = task;
229: struct kevent chg[1];
230: struct timespec timeout = { 0, 0 };
231:
232: if (!t || !TASK_ROOT(t))
233: return (void*) -1;
234:
235: #ifdef __NetBSD__
236: EV_SET(&chg[0], TASK_FD(t), EVFILT_READ, EV_ADD | EV_CLEAR, 0, 0, (intptr_t) TASK_FD(t));
237: #else
238: EV_SET(&chg[0], TASK_FD(t), EVFILT_READ, EV_ADD | EV_CLEAR, 0, 0, (void*) TASK_FD(t));
239: #endif
240: if (kevent(TASK_ROOT(t)->root_kq, chg, 1, NULL, 0, &timeout) == -1) {
241: if (TASK_ROOT(t)->root_hooks.hook_exec.exception)
242: TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL);
243: else
244: LOGERR;
245: return (void*) -1;
246: }
247:
248: return NULL;
249: }
250:
251: /*
252: * sched_hook_write() - Default WRITE hook
253: *
254: * @task = current task
255: * @arg = unused
256: * return: <0 errors and 0 ok
257: */
258: void *
259: sched_hook_write(void *task, void *arg __unused)
260: {
261: sched_task_t *t = task;
262: struct kevent chg[1];
263: struct timespec timeout = { 0, 0 };
264:
265: if (!t || !TASK_ROOT(t))
266: return (void*) -1;
267:
268: #ifdef __NetBSD__
269: EV_SET(&chg[0], TASK_FD(t), EVFILT_WRITE, EV_ADD | EV_CLEAR, 0, 0, (intptr_t) TASK_FD(t));
270: #else
271: EV_SET(&chg[0], TASK_FD(t), EVFILT_WRITE, EV_ADD | EV_CLEAR, 0, 0, (void*) TASK_FD(t));
272: #endif
273: if (kevent(TASK_ROOT(t)->root_kq, chg, 1, NULL, 0, &timeout) == -1) {
274: if (TASK_ROOT(t)->root_hooks.hook_exec.exception)
275: TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL);
276: else
277: LOGERR;
278: return (void*) -1;
279: }
280:
281: return NULL;
282: }
283:
284: /*
285: * sched_hook_alarm() - Default ALARM hook
286: *
287: * @task = current task
288: * @arg = unused
289: * return: <0 errors and 0 ok
290: */
291: void *
292: sched_hook_alarm(void *task, void *arg __unused)
293: {
294: sched_task_t *t = task;
295: struct kevent chg[1];
296: struct timespec timeout = { 0, 0 };
297:
298: if (!t || !TASK_ROOT(t))
299: return (void*) -1;
300:
301: #ifdef __NetBSD__
302: EV_SET(&chg[0], (uintptr_t) TASK_DATA(t), EVFILT_TIMER, EV_ADD | EV_ONESHOT, 0,
303: t->task_val.ts.tv_sec * 1000 + t->task_val.ts.tv_nsec / 1000000,
304: (intptr_t) TASK_DATA(t));
305: #else
306: EV_SET(&chg[0], (uintptr_t) TASK_DATA(t), EVFILT_TIMER, EV_ADD | EV_ONESHOT, 0,
307: t->task_val.ts.tv_sec * 1000 + t->task_val.ts.tv_nsec / 1000000,
308: (void*) TASK_DATA(t));
309: #endif
310: if (kevent(TASK_ROOT(t)->root_kq, chg, 1, NULL, 0, &timeout) == -1) {
311: if (TASK_ROOT(t)->root_hooks.hook_exec.exception)
312: TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL);
313: else
314: LOGERR;
315: return (void*) -1;
316: }
317:
318: return NULL;
319: }
320:
321: /*
322: * sched_hook_node() - Default NODE hook
323: *
324: * @task = current task
325: * @arg = unused
326: * return: <0 errors and 0 ok
327: */
328: void *
329: sched_hook_node(void *task, void *arg __unused)
330: {
331: sched_task_t *t = task;
332: struct kevent chg[1];
333: struct timespec timeout = { 0, 0 };
334:
335: if (!t || !TASK_ROOT(t))
336: return (void*) -1;
337:
338: #ifdef __NetBSD__
339: EV_SET(&chg[0], TASK_FD(t), EVFILT_VNODE, EV_ADD | EV_CLEAR,
340: NOTE_DELETE | NOTE_WRITE | NOTE_EXTEND | NOTE_ATTRIB |
341: NOTE_LINK | NOTE_RENAME | NOTE_REVOKE, 0, (intptr_t) TASK_FD(t));
342: #else
343: EV_SET(&chg[0], TASK_FD(t), EVFILT_VNODE, EV_ADD | EV_CLEAR,
344: NOTE_DELETE | NOTE_WRITE | NOTE_EXTEND | NOTE_ATTRIB |
345: NOTE_LINK | NOTE_RENAME | NOTE_REVOKE, 0, (void*) TASK_FD(t));
346: #endif
347: if (kevent(TASK_ROOT(t)->root_kq, chg, 1, NULL, 0, &timeout) == -1) {
348: if (TASK_ROOT(t)->root_hooks.hook_exec.exception)
349: TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL);
350: else
351: LOGERR;
352: return (void*) -1;
353: }
354:
355: return NULL;
356: }
357:
358: /*
359: * sched_hook_proc() - Default PROC hook
360: *
361: * @task = current task
362: * @arg = unused
363: * return: <0 errors and 0 ok
364: */
365: void *
366: sched_hook_proc(void *task, void *arg __unused)
367: {
368: sched_task_t *t = task;
369: struct kevent chg[1];
370: struct timespec timeout = { 0, 0 };
371:
372: if (!t || !TASK_ROOT(t))
373: return (void*) -1;
374:
375: #ifdef __NetBSD__
376: EV_SET(&chg[0], TASK_VAL(t), EVFILT_PROC, EV_ADD | EV_CLEAR,
377: NOTE_EXIT | NOTE_FORK | NOTE_EXEC | NOTE_TRACK, 0, (intptr_t) TASK_VAL(t));
378: #else
379: EV_SET(&chg[0], TASK_VAL(t), EVFILT_PROC, EV_ADD | EV_CLEAR,
380: NOTE_EXIT | NOTE_FORK | NOTE_EXEC | NOTE_TRACK, 0, (void*) TASK_VAL(t));
381: #endif
382: if (kevent(TASK_ROOT(t)->root_kq, chg, 1, NULL, 0, &timeout) == -1) {
383: if (TASK_ROOT(t)->root_hooks.hook_exec.exception)
384: TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL);
385: else
386: LOGERR;
387: return (void*) -1;
388: }
389:
390: return NULL;
391: }
392:
393: /*
394: * sched_hook_signal() - Default SIGNAL hook
395: *
396: * @task = current task
397: * @arg = unused
398: * return: <0 errors and 0 ok
399: */
400: void *
401: sched_hook_signal(void *task, void *arg __unused)
402: {
403: sched_task_t *t = task;
404: struct kevent chg[1];
405: struct timespec timeout = { 0, 0 };
406:
407: if (!t || !TASK_ROOT(t))
408: return (void*) -1;
409:
410: #ifdef __NetBSD__
411: EV_SET(&chg[0], TASK_VAL(t), EVFILT_SIGNAL, EV_ADD, 0, 0, (intptr_t) TASK_VAL(t));
412: #else
413: EV_SET(&chg[0], TASK_VAL(t), EVFILT_SIGNAL, EV_ADD, 0, 0, (void*) TASK_VAL(t));
414: #endif
415: if (kevent(TASK_ROOT(t)->root_kq, chg, 1, NULL, 0, &timeout) == -1) {
416: if (TASK_ROOT(t)->root_hooks.hook_exec.exception)
417: TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL);
418: else
419: LOGERR;
420: return (void*) -1;
421: }
422:
423: return NULL;
424: }
425:
426: /*
427: * sched_hook_user() - Default USER hook
428: *
429: * @task = current task
430: * @arg = unused
431: * return: <0 errors and 0 ok
432: */
433: #ifdef EVFILT_USER
434: void *
435: sched_hook_user(void *task, void *arg __unused)
436: {
437: sched_task_t *t = task;
438: struct kevent chg[1];
439: struct timespec timeout = { 0, 0 };
440:
441: if (!t || !TASK_ROOT(t))
442: return (void*) -1;
443:
444: #ifdef __NetBSD__
445: EV_SET(&chg[0], TASK_VAL(t), EVFILT_USER, EV_ADD | EV_CLEAR, TASK_DATLEN(t),
446: 0, (intptr_t) TASK_VAL(t));
447: #else
448: EV_SET(&chg[0], TASK_VAL(t), EVFILT_USER, EV_ADD | EV_CLEAR, TASK_DATLEN(t),
449: 0, (void*) TASK_VAL(t));
450: #endif
451: if (kevent(TASK_ROOT(t)->root_kq, chg, 1, NULL, 0, &timeout) == -1) {
452: if (TASK_ROOT(t)->root_hooks.hook_exec.exception)
453: TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL);
454: else
455: LOGERR;
456: return (void*) -1;
457: }
458:
459: return NULL;
460: }
461: #endif
462:
463: /*
464: * sched_hook_fetch() - Default FETCH hook
465: *
466: * @root = root task
467: * @arg = unused
468: * return: NULL error or !=NULL fetched task
469: */
470: void *
471: sched_hook_fetch(void *root, void *arg __unused)
472: {
473: sched_root_task_t *r = root;
474: sched_task_t *task, *tmp;
475: struct timespec now, m, mtmp;
476: struct timespec *timeout;
477: struct kevent evt[1], res[KQ_EVENTS];
478: register int i, flg;
479: int en;
480: #ifdef AIO_SUPPORT
481: int len, fd;
482: struct aiocb *acb;
483: #ifdef EVFILT_LIO
484: int l;
485: register int j;
486: off_t off;
487: struct aiocb **acbs;
488: struct iovec *iv;
489: #endif /* EVFILT_LIO */
490: #endif /* AIO_SUPPORT */
491:
492: if (!r)
493: return NULL;
494:
495: /* get new task by queue priority */
496: while ((task = TAILQ_FIRST(&r->root_event))) {
497: #ifdef HAVE_LIBPTHREAD
498: pthread_mutex_lock(&r->root_mtx[taskEVENT]);
499: #endif
500: TAILQ_REMOVE(&r->root_event, task, task_node);
501: #ifdef HAVE_LIBPTHREAD
502: pthread_mutex_unlock(&r->root_mtx[taskEVENT]);
503: #endif
504: task->task_type = taskUNUSE;
505: #ifdef HAVE_LIBPTHREAD
506: pthread_mutex_lock(&r->root_mtx[taskUNUSE]);
507: #endif
508: TAILQ_INSERT_TAIL(&r->root_unuse, task, task_node);
509: #ifdef HAVE_LIBPTHREAD
510: pthread_mutex_unlock(&r->root_mtx[taskUNUSE]);
511: #endif
512: return task;
513: }
514: while ((task = TAILQ_FIRST(&r->root_ready))) {
515: #ifdef HAVE_LIBPTHREAD
516: pthread_mutex_lock(&r->root_mtx[taskREADY]);
517: #endif
518: TAILQ_REMOVE(&r->root_ready, task, task_node);
519: #ifdef HAVE_LIBPTHREAD
520: pthread_mutex_unlock(&r->root_mtx[taskREADY]);
521: #endif
522: task->task_type = taskUNUSE;
523: #ifdef HAVE_LIBPTHREAD
524: pthread_mutex_lock(&r->root_mtx[taskUNUSE]);
525: #endif
526: TAILQ_INSERT_TAIL(&r->root_unuse, task, task_node);
527: #ifdef HAVE_LIBPTHREAD
528: pthread_mutex_unlock(&r->root_mtx[taskUNUSE]);
529: #endif
530: return task;
531: }
532:
533: #ifdef TIMER_WITHOUT_SORT
534: clock_gettime(CLOCK_MONOTONIC, &now);
535:
536: sched_timespecclear(&r->root_wait);
537: TAILQ_FOREACH(task, &r->root_timer, task_node) {
538: if (!sched_timespecisset(&r->root_wait))
539: r->root_wait = TASK_TS(task);
540: else if (sched_timespeccmp(&TASK_TS(task), &r->root_wait, -) < 0)
541: r->root_wait = TASK_TS(task);
542: }
543:
544: if (TAILQ_FIRST(&r->root_timer)) {
545: m = r->root_wait;
546: sched_timespecsub(&m, &now, &mtmp);
547: r->root_wait = mtmp;
548: } else {
549: /* set wait INFTIM */
550: sched_timespecinf(&r->root_wait);
551: }
552: #else
553: if (!TAILQ_FIRST(&r->root_task) && (task = TAILQ_FIRST(&r->root_timer))) {
554: clock_gettime(CLOCK_MONOTONIC, &now);
555:
556: m = TASK_TS(task);
557: sched_timespecsub(&m, &now, &mtmp);
558: r->root_wait = mtmp;
559: } else {
560: /* set wait INFTIM */
561: sched_timespecinf(&r->root_wait);
562: }
563: #endif
564: /* if present member of task, set NOWAIT */
565: if (TAILQ_FIRST(&r->root_task))
566: sched_timespecclear(&r->root_wait);
567:
568: if (r->root_wait.tv_sec != -1 && r->root_wait.tv_nsec != -1)
569: timeout = &r->root_wait;
570: else if (sched_timespecisinf(&r->root_poll))
571: timeout = NULL;
572: else
573: timeout = &r->root_poll;
574: if ((en = kevent(r->root_kq, NULL, 0, res, KQ_EVENTS, timeout)) == -1) {
575: if (r->root_hooks.hook_exec.exception) {
576: if (r->root_hooks.hook_exec.exception(r, NULL))
577: return NULL;
578: } else if (errno != EINTR)
579: LOGERR;
580: return NULL;
581: }
582:
583: now.tv_sec = now.tv_nsec = 0;
584: /* Go and catch the cat into pipes ... */
585: for (i = 0; i < en; i++) {
586: memcpy(evt, &res[i], sizeof evt);
587: evt->flags = EV_DELETE;
588: /* Put read/write task to ready queue */
589: switch (res[i].filter) {
590: case EVFILT_READ:
591: flg = 0;
592: TAILQ_FOREACH_SAFE(task, &r->root_read, task_node, tmp) {
593: if (TASK_FD(task) != ((intptr_t) res[i].udata))
594: continue;
595: else
596: flg++;
597: /* remove read handle */
598: #ifdef HAVE_LIBPTHREAD
599: pthread_mutex_lock(&r->root_mtx[taskREAD]);
600: #endif
601: TAILQ_REMOVE(&r->root_read, task, task_node);
602: #ifdef HAVE_LIBPTHREAD
603: pthread_mutex_unlock(&r->root_mtx[taskREAD]);
604: #endif
605: if (r->root_hooks.hook_exec.exception && res[i].flags & EV_EOF) {
606: if (r->root_hooks.hook_exec.exception(r, (void*) EV_EOF)) {
607: task->task_type = taskUNUSE;
608: #ifdef HAVE_LIBPTHREAD
609: pthread_mutex_lock(&r->root_mtx[taskUNUSE]);
610: #endif
611: TAILQ_INSERT_TAIL(&r->root_unuse, task, task_node);
612: #ifdef HAVE_LIBPTHREAD
613: pthread_mutex_unlock(&r->root_mtx[taskUNUSE]);
614: #endif
615: } else {
616: task->task_type = taskREADY;
617: #ifdef HAVE_LIBPTHREAD
618: pthread_mutex_lock(&r->root_mtx[taskREADY]);
619: #endif
620: TAILQ_INSERT_TAIL(&r->root_ready, task, task_node);
621: #ifdef HAVE_LIBPTHREAD
622: pthread_mutex_unlock(&r->root_mtx[taskREADY]);
623: #endif
624: }
625: } else {
626: task->task_type = taskREADY;
627: #ifdef HAVE_LIBPTHREAD
628: pthread_mutex_lock(&r->root_mtx[taskREADY]);
629: #endif
630: TAILQ_INSERT_TAIL(&r->root_ready, task, task_node);
631: #ifdef HAVE_LIBPTHREAD
632: pthread_mutex_unlock(&r->root_mtx[taskREADY]);
633: #endif
634: }
635: }
636: /* if match at least 2, don't remove resouce of event */
637: if (flg > 1)
638: evt->flags ^= evt->flags;
639: break;
640: case EVFILT_WRITE:
641: flg = 0;
642: TAILQ_FOREACH_SAFE(task, &r->root_write, task_node, tmp) {
643: if (TASK_FD(task) != ((intptr_t) res[i].udata))
644: continue;
645: else
646: flg++;
647: /* remove write handle */
648: #ifdef HAVE_LIBPTHREAD
649: pthread_mutex_lock(&r->root_mtx[taskWRITE]);
650: #endif
651: TAILQ_REMOVE(&r->root_write, task, task_node);
652: #ifdef HAVE_LIBPTHREAD
653: pthread_mutex_unlock(&r->root_mtx[taskWRITE]);
654: #endif
655: if (r->root_hooks.hook_exec.exception && res[i].flags & EV_EOF) {
656: if (r->root_hooks.hook_exec.exception(r, (void*) EV_EOF)) {
657: task->task_type = taskUNUSE;
658: #ifdef HAVE_LIBPTHREAD
659: pthread_mutex_lock(&r->root_mtx[taskUNUSE]);
660: #endif
661: TAILQ_INSERT_TAIL(&r->root_unuse, task, task_node);
662: #ifdef HAVE_LIBPTHREAD
663: pthread_mutex_unlock(&r->root_mtx[taskUNUSE]);
664: #endif
665: } else {
666: task->task_type = taskREADY;
667: #ifdef HAVE_LIBPTHREAD
668: pthread_mutex_lock(&r->root_mtx[taskREADY]);
669: #endif
670: TAILQ_INSERT_TAIL(&r->root_ready, task, task_node);
671: #ifdef HAVE_LIBPTHREAD
672: pthread_mutex_unlock(&r->root_mtx[taskREADY]);
673: #endif
674: }
675: } else {
676: task->task_type = taskREADY;
677: #ifdef HAVE_LIBPTHREAD
678: pthread_mutex_lock(&r->root_mtx[taskREADY]);
679: #endif
680: TAILQ_INSERT_TAIL(&r->root_ready, task, task_node);
681: #ifdef HAVE_LIBPTHREAD
682: pthread_mutex_unlock(&r->root_mtx[taskREADY]);
683: #endif
684: }
685: }
686: /* if match at least 2, don't remove resouce of event */
687: if (flg > 1)
688: evt->flags ^= evt->flags;
689: break;
690: case EVFILT_TIMER:
691: flg = 0;
692: TAILQ_FOREACH_SAFE(task, &r->root_alarm, task_node, tmp) {
693: if ((uintptr_t) TASK_DATA(task) != ((uintptr_t) res[i].udata))
694: continue;
695: else
696: flg++;
697: /* remove alarm handle */
698: #ifdef HAVE_LIBPTHREAD
699: pthread_mutex_lock(&r->root_mtx[taskALARM]);
700: #endif
701: TAILQ_REMOVE(&r->root_alarm, task, task_node);
702: #ifdef HAVE_LIBPTHREAD
703: pthread_mutex_unlock(&r->root_mtx[taskALARM]);
704: #endif
705: task->task_type = taskREADY;
706: #ifdef HAVE_LIBPTHREAD
707: pthread_mutex_lock(&r->root_mtx[taskREADY]);
708: #endif
709: TAILQ_INSERT_TAIL(&r->root_ready, task, task_node);
710: #ifdef HAVE_LIBPTHREAD
711: pthread_mutex_unlock(&r->root_mtx[taskREADY]);
712: #endif
713: }
714: /* if match at least 2, don't remove resouce of event */
715: if (flg > 1)
716: evt->flags ^= evt->flags;
717: break;
718: case EVFILT_VNODE:
719: flg = 0;
720: TAILQ_FOREACH_SAFE(task, &r->root_node, task_node, tmp) {
721: if (TASK_FD(task) != ((intptr_t) res[i].udata))
722: continue;
723: else {
724: flg++;
725: TASK_DATA(task) = (void*) (uintptr_t) res[i].data;
726: TASK_DATLEN(task) = res[i].fflags;
727: }
728: /* remove node handle */
729: #ifdef HAVE_LIBPTHREAD
730: pthread_mutex_lock(&r->root_mtx[taskNODE]);
731: #endif
732: TAILQ_REMOVE(&r->root_node, task, task_node);
733: #ifdef HAVE_LIBPTHREAD
734: pthread_mutex_unlock(&r->root_mtx[taskNODE]);
735: #endif
736: task->task_type = taskREADY;
737: #ifdef HAVE_LIBPTHREAD
738: pthread_mutex_lock(&r->root_mtx[taskREADY]);
739: #endif
740: TAILQ_INSERT_TAIL(&r->root_ready, task, task_node);
741: #ifdef HAVE_LIBPTHREAD
742: pthread_mutex_unlock(&r->root_mtx[taskREADY]);
743: #endif
744: }
745: /* if match at least 2, don't remove resouce of event */
746: if (flg > 1)
747: evt->flags ^= evt->flags;
748: break;
749: case EVFILT_PROC:
750: flg = 0;
751: TAILQ_FOREACH_SAFE(task, &r->root_proc, task_node, tmp) {
752: if (TASK_VAL(task) != ((uintptr_t) res[i].udata))
753: continue;
754: else {
755: flg++;
756: TASK_DATA(task) = (void*) (uintptr_t) res[i].data;
757: TASK_DATLEN(task) = res[i].fflags;
758: }
759: /* remove proc handle */
760: #ifdef HAVE_LIBPTHREAD
761: pthread_mutex_lock(&r->root_mtx[taskPROC]);
762: #endif
763: TAILQ_REMOVE(&r->root_proc, task, task_node);
764: #ifdef HAVE_LIBPTHREAD
765: pthread_mutex_unlock(&r->root_mtx[taskPROC]);
766: #endif
767: task->task_type = taskREADY;
768: #ifdef HAVE_LIBPTHREAD
769: pthread_mutex_lock(&r->root_mtx[taskREADY]);
770: #endif
771: TAILQ_INSERT_TAIL(&r->root_ready, task, task_node);
772: #ifdef HAVE_LIBPTHREAD
773: pthread_mutex_unlock(&r->root_mtx[taskREADY]);
774: #endif
775: }
776: /* if match at least 2, don't remove resouce of event */
777: if (flg > 1)
778: evt->flags ^= evt->flags;
779: break;
780: case EVFILT_SIGNAL:
781: flg = 0;
782: TAILQ_FOREACH_SAFE(task, &r->root_signal, task_node, tmp) {
783: if (TASK_VAL(task) != ((uintptr_t) res[i].udata))
784: continue;
785: else
786: flg++;
787: /* remove signal handle */
788: #ifdef HAVE_LIBPTHREAD
789: pthread_mutex_lock(&r->root_mtx[taskSIGNAL]);
790: #endif
791: TAILQ_REMOVE(&r->root_signal, task, task_node);
792: #ifdef HAVE_LIBPTHREAD
793: pthread_mutex_unlock(&r->root_mtx[taskSIGNAL]);
794: #endif
795: task->task_type = taskREADY;
796: #ifdef HAVE_LIBPTHREAD
797: pthread_mutex_lock(&r->root_mtx[taskREADY]);
798: #endif
799: TAILQ_INSERT_TAIL(&r->root_ready, task, task_node);
800: #ifdef HAVE_LIBPTHREAD
801: pthread_mutex_unlock(&r->root_mtx[taskREADY]);
802: #endif
803: }
804: /* if match at least 2, don't remove resouce of event */
805: if (flg > 1)
806: evt->flags ^= evt->flags;
807: break;
808: #ifdef AIO_SUPPORT
809: case EVFILT_AIO:
810: flg = 0;
811: TAILQ_FOREACH_SAFE(task, &r->root_aio, task_node, tmp) {
812: acb = (struct aiocb*) TASK_VAL(task);
813: if (acb != ((struct aiocb*) res[i].udata))
814: continue;
815: else
816: flg++;
817: /* remove user handle */
818: #ifdef HAVE_LIBPTHREAD
819: pthread_mutex_lock(&r->root_mtx[taskAIO]);
820: #endif
821: TAILQ_REMOVE(&r->root_aio, task, task_node);
822: #ifdef HAVE_LIBPTHREAD
823: pthread_mutex_unlock(&r->root_mtx[taskAIO]);
824: #endif
825: task->task_type = taskREADY;
826: #ifdef HAVE_LIBPTHREAD
827: pthread_mutex_lock(&r->root_mtx[taskREADY]);
828: #endif
829: TAILQ_INSERT_TAIL(&r->root_ready, task, task_node);
830: #ifdef HAVE_LIBPTHREAD
831: pthread_mutex_unlock(&r->root_mtx[taskREADY]);
832: #endif
833: fd = acb->aio_fildes;
834: if ((len = aio_return(acb)) != -1) {
835: if (lseek(fd, acb->aio_offset + len, SEEK_CUR) == -1)
836: LOGERR;
837: } else
838: LOGERR;
839: free(acb);
840: TASK_DATLEN(task) = (u_long) len;
841: TASK_FD(task) = fd;
842: }
843: /* if match at least 2, don't remove resouce of event */
844: if (flg > 1)
845: evt->flags ^= evt->flags;
846: break;
847: #ifdef EVFILT_LIO
848: case EVFILT_LIO:
849: flg = 0;
850: TAILQ_FOREACH_SAFE(task, &r->root_lio, task_node, tmp) {
851: acbs = (struct aiocb**) TASK_VAL(task);
852: if (acbs != ((struct aiocb**) res[i].udata))
853: continue;
854: else
855: flg++;
856: /* remove user handle */
857: #ifdef HAVE_LIBPTHREAD
858: pthread_mutex_lock(&r->root_mtx[taskLIO]);
859: #endif
860: TAILQ_REMOVE(&r->root_lio, task, task_node);
861: #ifdef HAVE_LIBPTHREAD
862: pthread_mutex_unlock(&r->root_mtx[taskLIO]);
863: #endif
864: task->task_type = taskREADY;
865: #ifdef HAVE_LIBPTHREAD
866: pthread_mutex_lock(&r->root_mtx[taskREADY]);
867: #endif
868: TAILQ_INSERT_TAIL(&r->root_ready, task, task_node);
869: #ifdef HAVE_LIBPTHREAD
870: pthread_mutex_unlock(&r->root_mtx[taskREADY]);
871: #endif
872: iv = (struct iovec*) TASK_DATA(task);
873: fd = acbs[0]->aio_fildes;
874: off = acbs[0]->aio_offset;
875: for (j = len = 0; i < TASK_DATLEN(task); len += l, i++) {
876: if ((iv[i].iov_len = aio_return(acbs[i])) == -1)
877: l = 0;
878: else
879: l = iv[i].iov_len;
880: free(acbs[i]);
881: }
882: free(acbs);
883: TASK_DATLEN(task) = (u_long) len;
884: TASK_FD(task) = fd;
885:
886: if (lseek(fd, off + len, SEEK_CUR) == -1)
887: LOGERR;
888: }
889: /* if match at least 2, don't remove resouce of event */
890: if (flg > 1)
891: evt->flags ^= evt->flags;
892: break;
893: #endif /* EVFILT_LIO */
894: #endif /* AIO_SUPPORT */
895: #ifdef EVFILT_USER
896: case EVFILT_USER:
897: flg = 0;
898: TAILQ_FOREACH_SAFE(task, &r->root_user, task_node, tmp) {
899: if (TASK_VAL(task) != ((uintptr_t) res[i].udata))
900: continue;
901: else {
902: flg++;
903: TASK_DATA(task) = (void*) res[i].data;
904: TASK_DATLEN(task) = res[i].fflags;
905: }
906: /* remove user handle */
907: #ifdef HAVE_LIBPTHREAD
908: pthread_mutex_lock(&r->root_mtx[taskUSER]);
909: #endif
910: TAILQ_REMOVE(&r->root_user, task, task_node);
911: #ifdef HAVE_LIBPTHREAD
912: pthread_mutex_unlock(&r->root_mtx[taskUSER]);
913: #endif
914: task->task_type = taskREADY;
915: #ifdef HAVE_LIBPTHREAD
916: pthread_mutex_lock(&r->root_mtx[taskREADY]);
917: #endif
918: TAILQ_INSERT_TAIL(&r->root_ready, task, task_node);
919: #ifdef HAVE_LIBPTHREAD
920: pthread_mutex_unlock(&r->root_mtx[taskREADY]);
921: #endif
922: }
923: /* if match at least 2, don't remove resouce of event */
924: if (flg > 1)
925: evt->flags ^= evt->flags;
926: break;
927: #endif /* EVFILT_USER */
928: }
929: if (kevent(r->root_kq, evt, 1, NULL, 0, &now) == -1) {
930: if (r->root_hooks.hook_exec.exception) {
931: if (r->root_hooks.hook_exec.exception(r, NULL))
932: return NULL;
933: } else
934: LOGERR;
935: }
936: }
937:
938: /* timer update & put in ready queue */
939: clock_gettime(CLOCK_MONOTONIC, &now);
940:
941: TAILQ_FOREACH_SAFE(task, &r->root_timer, task_node, tmp)
942: if (sched_timespeccmp(&now, &TASK_TS(task), -) >= 0) {
943: #ifdef HAVE_LIBPTHREAD
944: pthread_mutex_lock(&r->root_mtx[taskTIMER]);
945: #endif
946: TAILQ_REMOVE(&r->root_timer, task, task_node);
947: #ifdef HAVE_LIBPTHREAD
948: pthread_mutex_unlock(&r->root_mtx[taskTIMER]);
949: #endif
950: task->task_type = taskREADY;
951: #ifdef HAVE_LIBPTHREAD
952: pthread_mutex_lock(&r->root_mtx[taskREADY]);
953: #endif
954: TAILQ_INSERT_TAIL(&r->root_ready, task, task_node);
955: #ifdef HAVE_LIBPTHREAD
956: pthread_mutex_unlock(&r->root_mtx[taskREADY]);
957: #endif
958: }
959:
960: /* put regular task priority task to ready queue,
961: if there is no ready task or reach max missing hit events */
962: if ((task = TAILQ_FIRST(&r->root_task))) {
963: if (!TAILQ_FIRST(&r->root_ready) || r->root_task_miss > r->root_miss) {
964: r->root_task_miss ^= r->root_task_miss;
965:
966: #ifdef HAVE_LIBPTHREAD
967: pthread_mutex_lock(&r->root_mtx[taskTASK]);
968: #endif
969: TAILQ_REMOVE(&r->root_task, task, task_node);
970: #ifdef HAVE_LIBPTHREAD
971: pthread_mutex_unlock(&r->root_mtx[taskTASK]);
972: #endif
973: task->task_type = taskREADY;
974: #ifdef HAVE_LIBPTHREAD
975: pthread_mutex_lock(&r->root_mtx[taskREADY]);
976: #endif
977: TAILQ_INSERT_TAIL(&r->root_ready, task, task_node);
978: #ifdef HAVE_LIBPTHREAD
979: pthread_mutex_unlock(&r->root_mtx[taskREADY]);
980: #endif
981: } else
982: r->root_task_miss++;
983: } else
984: r->root_task_miss ^= r->root_task_miss;
985:
986: /* OK, lets get ready task !!! */
987: task = TAILQ_FIRST(&r->root_ready);
988: if (!(task))
989: return NULL;
990:
991: #ifdef HAVE_LIBPTHREAD
992: pthread_mutex_lock(&r->root_mtx[taskREADY]);
993: #endif
994: TAILQ_REMOVE(&r->root_ready, task, task_node);
995: #ifdef HAVE_LIBPTHREAD
996: pthread_mutex_unlock(&r->root_mtx[taskREADY]);
997: #endif
998: task->task_type = taskUNUSE;
999: #ifdef HAVE_LIBPTHREAD
1000: pthread_mutex_lock(&r->root_mtx[taskUNUSE]);
1001: #endif
1002: TAILQ_INSERT_TAIL(&r->root_unuse, task, task_node);
1003: #ifdef HAVE_LIBPTHREAD
1004: pthread_mutex_unlock(&r->root_mtx[taskUNUSE]);
1005: #endif
1006: return task;
1007: }
1008:
1009: /*
1010: * sched_hook_exception() - Default EXCEPTION hook
1011: *
1012: * @root = root task
1013: * @arg = custom handling: if arg == EV_EOF or other value; default: arg == NULL log errno
1014: * return: <0 errors and 0 ok
1015: */
1016: void *
1017: sched_hook_exception(void *root, void *arg)
1018: {
1019: sched_root_task_t *r = root;
1020:
1021: if (!r)
1022: return NULL;
1023:
1024: /* custom exception handling ... */
1025: if (arg) {
1026: if (arg == (void*) EV_EOF)
1027: return NULL;
1028: return (void*) -1; /* raise scheduler error!!! */
1029: }
1030:
1031: /* if error hook exists */
1032: if (r->root_hooks.hook_root.error)
1033: return (r->root_hooks.hook_root.error(root, (void*) ((intptr_t) errno)));
1034:
1035: /* default case! */
1036: LOGERR;
1037: return NULL;
1038: }
1039:
1040: /*
1041: * sched_hook_condition() - Default CONDITION hook
1042: *
1043: * @root = root task
1044: * @arg = killState from schedRun()
1045: * return: NULL kill scheduler loop or !=NULL ok
1046: */
1047: void *
1048: sched_hook_condition(void *root, void *arg)
1049: {
1050: sched_root_task_t *r = root;
1051:
1052: if (!r)
1053: return NULL;
1054:
1055: return (void*) (r->root_cond - *(intptr_t*) arg);
1056: }
FreeBSD-CVSweb <freebsd-cvsweb@FreeBSD.org>