1: /*************************************************************************
2: * (C) 2011 AITNET ltd - Sofia/Bulgaria - <misho@aitbg.com>
3: * by Michael Pounov <misho@openbsd-bg.org>
4: *
5: * $Author: misho $
6: * $Id: hooks.c,v 1.14.2.2 2012/08/22 13:37:23 misho Exp $
7: *
8: **************************************************************************
9: The ELWIX and AITNET software is distributed under the following
10: terms:
11:
12: All of the documentation and software included in the ELWIX and AITNET
13: Releases is copyrighted by ELWIX - Sofia/Bulgaria <info@elwix.org>
14:
15: Copyright 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012
16: by Michael Pounov <misho@elwix.org>. All rights reserved.
17:
18: Redistribution and use in source and binary forms, with or without
19: modification, are permitted provided that the following conditions
20: are met:
21: 1. Redistributions of source code must retain the above copyright
22: notice, this list of conditions and the following disclaimer.
23: 2. Redistributions in binary form must reproduce the above copyright
24: notice, this list of conditions and the following disclaimer in the
25: documentation and/or other materials provided with the distribution.
26: 3. All advertising materials mentioning features or use of this software
27: must display the following acknowledgement:
28: This product includes software developed by Michael Pounov <misho@elwix.org>
29: ELWIX - Embedded LightWeight unIX and its contributors.
30: 4. Neither the name of AITNET nor the names of its contributors
31: may be used to endorse or promote products derived from this software
32: without specific prior written permission.
33:
34: THIS SOFTWARE IS PROVIDED BY AITNET AND CONTRIBUTORS ``AS IS'' AND
35: ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
36: IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
37: ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
38: FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
39: DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
40: OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
41: HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
42: LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
43: OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
44: SUCH DAMAGE.
45: */
46: #include "global.h"
47: #include "hooks.h"
48:
49:
50: /*
51: * sched_hook_init() - Default INIT hook
52: *
53: * @root = root task
54: * @arg = unused
55: * return: <0 errors and 0 ok
56: */
57: void *
58: sched_hook_init(void *root, void *arg __unused)
59: {
60: sched_root_task_t *r = root;
61:
62: if (!r)
63: return (void*) -1;
64:
65: r->root_kq = kqueue();
66: if (r->root_kq == -1) {
67: LOGERR;
68: return (void*) -1;
69: }
70:
71: return NULL;
72: }
73:
74: /*
75: * sched_hook_fini() - Default FINI hook
76: *
77: * @root = root task
78: * @arg = unused
79: * return: <0 errors and 0 ok
80: */
81: void *
82: sched_hook_fini(void *root, void *arg __unused)
83: {
84: sched_root_task_t *r = root;
85:
86: if (!r)
87: return (void*) -1;
88:
89: if (r->root_kq > 2) {
90: close(r->root_kq);
91: r->root_kq = 0;
92: }
93:
94: return NULL;
95: }
96:
97: /*
98: * sched_hook_cancel() - Default CANCEL hook
99: *
100: * @task = current task
101: * @arg = unused
102: * return: <0 errors and 0 ok
103: */
104: void *
105: sched_hook_cancel(void *task, void *arg __unused)
106: {
107: sched_task_t *t = task;
108: struct kevent chg[1];
109: struct timespec timeout = { 0, 0 };
110: #ifdef AIO_SUPPORT
111: struct aiocb *acb;
112: #ifdef EVFILT_LIO
113: struct aiocb **acbs;
114: register int i;
115: #endif /* EVFILT_LIO */
116: #endif /* AIO_SUPPORT */
117:
118: if (!t || !TASK_ROOT(t))
119: return (void*) -1;
120:
121: switch (TASK_TYPE(t)) {
122: case taskREAD:
123: #ifdef __NetBSD__
124: EV_SET(&chg[0], TASK_FD(t), EVFILT_READ, EV_DELETE, 0, 0, (intptr_t) TASK_FD(t));
125: #else
126: EV_SET(&chg[0], TASK_FD(t), EVFILT_READ, EV_DELETE, 0, 0, (void*) TASK_FD(t));
127: #endif
128: break;
129: case taskWRITE:
130: #ifdef __NetBSD__
131: EV_SET(&chg[0], TASK_FD(t), EVFILT_WRITE, EV_DELETE, 0, 0, (intptr_t) TASK_FD(t));
132: #else
133: EV_SET(&chg[0], TASK_FD(t), EVFILT_WRITE, EV_DELETE, 0, 0, (void*) TASK_FD(t));
134: #endif
135: break;
136: case taskALARM:
137: #ifdef __NetBSD__
138: EV_SET(&chg[0], (uintptr_t) TASK_DATA(t), EVFILT_TIMER, EV_DELETE,
139: 0, 0, (intptr_t) TASK_DATA(t));
140: #else
141: EV_SET(&chg[0], (uintptr_t) TASK_DATA(t), EVFILT_TIMER, EV_DELETE,
142: 0, 0, (void*) TASK_DATA(t));
143: #endif
144: break;
145: case taskNODE:
146: #ifdef __NetBSD__
147: EV_SET(&chg[0], TASK_FD(t), EVFILT_VNODE, EV_DELETE, 0, 0, (intptr_t) TASK_FD(t));
148: #else
149: EV_SET(&chg[0], TASK_FD(t), EVFILT_VNODE, EV_DELETE, 0, 0, (void*) TASK_FD(t));
150: #endif
151: break;
152: case taskPROC:
153: #ifdef __NetBSD__
154: EV_SET(&chg[0], TASK_VAL(t), EVFILT_PROC, EV_DELETE, 0, 0, (intptr_t) TASK_VAL(t));
155: #else
156: EV_SET(&chg[0], TASK_VAL(t), EVFILT_PROC, EV_DELETE, 0, 0, (void*) TASK_VAL(t));
157: #endif
158: break;
159: case taskSIGNAL:
160: #ifdef __NetBSD__
161: EV_SET(&chg[0], TASK_VAL(t), EVFILT_SIGNAL, EV_DELETE, 0, 0, (intptr_t) TASK_VAL(t));
162: #else
163: EV_SET(&chg[0], TASK_VAL(t), EVFILT_SIGNAL, EV_DELETE, 0, 0, (void*) TASK_VAL(t));
164: #endif
165: break;
166: #ifdef AIO_SUPPORT
167: case taskAIO:
168: #ifdef __NetBSD__
169: EV_SET(&chg[0], TASK_VAL(t), EVFILT_AIO, EV_DELETE, 0, 0, (intptr_t) TASK_VAL(t));
170: #else
171: EV_SET(&chg[0], TASK_VAL(t), EVFILT_AIO, EV_DELETE, 0, 0, (void*) TASK_VAL(t));
172: #endif
173: acb = (struct aiocb*) TASK_VAL(t);
174: if (acb) {
175: if (aio_cancel(acb->aio_fildes, acb) == AIO_CANCELED)
176: aio_return(acb);
177: free(acb);
178: TASK_VAL(t) = 0;
179: }
180: break;
181: #ifdef EVFILT_LIO
182: case taskLIO:
183: #ifdef __NetBSD__
184: EV_SET(&chg[0], TASK_VAL(t), EVFILT_LIO, EV_DELETE, 0, 0, (intptr_t) TASK_VAL(t));
185: #else
186: EV_SET(&chg[0], TASK_VAL(t), EVFILT_LIO, EV_DELETE, 0, 0, (void*) TASK_VAL(t));
187: #endif
188: acbs = (struct aiocb**) TASK_VAL(t);
189: if (acbs) {
190: for (i = 0; i < TASK_DATLEN(t); i++) {
191: if (aio_cancel(acbs[i]->aio_fildes, acbs[i]) == AIO_CANCELED)
192: aio_return(acbs[i]);
193: free(acbs[i]);
194: }
195: free(acbs);
196: TASK_VAL(t) = 0;
197: }
198: break;
199: #endif /* EVFILT_LIO */
200: #endif /* AIO_SUPPORT */
201: #ifdef EVFILT_USER
202: case taskUSER:
203: #ifdef __NetBSD__
204: EV_SET(&chg[0], TASK_VAL(t), EVFILT_USER, EV_DELETE, 0, 0, (intptr_t) TASK_VAL(t));
205: #else
206: EV_SET(&chg[0], TASK_VAL(t), EVFILT_USER, EV_DELETE, 0, 0, (void*) TASK_VAL(t));
207: #endif
208: break;
209: #endif
210: case taskTHREAD:
211: #ifdef HAVE_LIBPTHREAD
212: pthread_cancel((pthread_t) TASK_VAL(t));
213: if (TASK_FLAG(t) == PTHREAD_CREATE_JOINABLE) /* joinable thread */
214: schedTask(TASK_ROOT(t), _sched_threadJoin, TASK_ARG(t),
215: TASK_VAL(t), TASK_DATA(t), TASK_DATLEN(t));
216: #endif
217: TASK_UNLOCK(t);
218: default:
219: return NULL;
220: }
221:
222: kevent(TASK_ROOT(t)->root_kq, chg, 1, NULL, 0, &timeout);
223: return NULL;
224: }
225:
226: #ifdef HAVE_LIBPTHREAD
227: /*
228: * sched_hook_thread() - Default THREAD hook
229: *
230: * @task = current task
231: * @arg = pthread attributes
232: * return: <0 errors and 0 ok
233: */
234: void *
235: sched_hook_thread(void *task, void *arg)
236: {
237: sched_task_t *t = task;
238: pthread_t tid;
239: sigset_t ns, os;
240:
241: if (!t || !TASK_ROOT(t))
242: return (void*) -1;
243:
244: sigfillset(&ns);
245: pthread_sigmask(SIG_BLOCK, &ns, &os);
246: if (pthread_create(&tid, (pthread_attr_t*) arg,
247: (void *(*)(void*)) TASK_FUNC(t), t)) {
248: LOGERR;
249: pthread_sigmask(SIG_SETMASK, &os, NULL);
250: return (void*) -1;
251: }
252: pthread_sigmask(SIG_SETMASK, &os, NULL);
253:
254: if (!TASK_ISLOCKED(t))
255: TASK_LOCK(t);
256:
257: TASK_VAL(t) = (u_long) tid;
258: return NULL;
259: }
260: #endif
261:
262: /*
263: * sched_hook_read() - Default READ hook
264: *
265: * @task = current task
266: * @arg = unused
267: * return: <0 errors and 0 ok
268: */
269: void *
270: sched_hook_read(void *task, void *arg __unused)
271: {
272: sched_task_t *t = task;
273: struct kevent chg[1];
274: struct timespec timeout = { 0, 0 };
275:
276: if (!t || !TASK_ROOT(t))
277: return (void*) -1;
278:
279: #ifdef __NetBSD__
280: EV_SET(&chg[0], TASK_FD(t), EVFILT_READ, EV_ADD | EV_CLEAR, 0, 0, (intptr_t) TASK_FD(t));
281: #else
282: EV_SET(&chg[0], TASK_FD(t), EVFILT_READ, EV_ADD | EV_CLEAR, 0, 0, (void*) TASK_FD(t));
283: #endif
284: if (kevent(TASK_ROOT(t)->root_kq, chg, 1, NULL, 0, &timeout) == -1) {
285: if (TASK_ROOT(t)->root_hooks.hook_exec.exception)
286: TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL);
287: else
288: LOGERR;
289: return (void*) -1;
290: }
291:
292: return NULL;
293: }
294:
295: /*
296: * sched_hook_write() - Default WRITE hook
297: *
298: * @task = current task
299: * @arg = unused
300: * return: <0 errors and 0 ok
301: */
302: void *
303: sched_hook_write(void *task, void *arg __unused)
304: {
305: sched_task_t *t = task;
306: struct kevent chg[1];
307: struct timespec timeout = { 0, 0 };
308:
309: if (!t || !TASK_ROOT(t))
310: return (void*) -1;
311:
312: #ifdef __NetBSD__
313: EV_SET(&chg[0], TASK_FD(t), EVFILT_WRITE, EV_ADD | EV_CLEAR, 0, 0, (intptr_t) TASK_FD(t));
314: #else
315: EV_SET(&chg[0], TASK_FD(t), EVFILT_WRITE, EV_ADD | EV_CLEAR, 0, 0, (void*) TASK_FD(t));
316: #endif
317: if (kevent(TASK_ROOT(t)->root_kq, chg, 1, NULL, 0, &timeout) == -1) {
318: if (TASK_ROOT(t)->root_hooks.hook_exec.exception)
319: TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL);
320: else
321: LOGERR;
322: return (void*) -1;
323: }
324:
325: return NULL;
326: }
327:
328: /*
329: * sched_hook_alarm() - Default ALARM hook
330: *
331: * @task = current task
332: * @arg = unused
333: * return: <0 errors and 0 ok
334: */
335: void *
336: sched_hook_alarm(void *task, void *arg __unused)
337: {
338: sched_task_t *t = task;
339: struct kevent chg[1];
340: struct timespec timeout = { 0, 0 };
341:
342: if (!t || !TASK_ROOT(t))
343: return (void*) -1;
344:
345: #ifdef __NetBSD__
346: EV_SET(&chg[0], (uintptr_t) TASK_DATA(t), EVFILT_TIMER, EV_ADD | EV_ONESHOT, 0,
347: t->task_val.ts.tv_sec * 1000 + t->task_val.ts.tv_nsec / 1000000,
348: (intptr_t) TASK_DATA(t));
349: #else
350: EV_SET(&chg[0], (uintptr_t) TASK_DATA(t), EVFILT_TIMER, EV_ADD | EV_ONESHOT, 0,
351: t->task_val.ts.tv_sec * 1000 + t->task_val.ts.tv_nsec / 1000000,
352: (void*) TASK_DATA(t));
353: #endif
354: if (kevent(TASK_ROOT(t)->root_kq, chg, 1, NULL, 0, &timeout) == -1) {
355: if (TASK_ROOT(t)->root_hooks.hook_exec.exception)
356: TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL);
357: else
358: LOGERR;
359: return (void*) -1;
360: }
361:
362: return NULL;
363: }
364:
365: /*
366: * sched_hook_node() - Default NODE hook
367: *
368: * @task = current task
369: * @arg = unused
370: * return: <0 errors and 0 ok
371: */
372: void *
373: sched_hook_node(void *task, void *arg __unused)
374: {
375: sched_task_t *t = task;
376: struct kevent chg[1];
377: struct timespec timeout = { 0, 0 };
378:
379: if (!t || !TASK_ROOT(t))
380: return (void*) -1;
381:
382: #ifdef __NetBSD__
383: EV_SET(&chg[0], TASK_FD(t), EVFILT_VNODE, EV_ADD | EV_CLEAR,
384: NOTE_DELETE | NOTE_WRITE | NOTE_EXTEND | NOTE_ATTRIB |
385: NOTE_LINK | NOTE_RENAME | NOTE_REVOKE, 0, (intptr_t) TASK_FD(t));
386: #else
387: EV_SET(&chg[0], TASK_FD(t), EVFILT_VNODE, EV_ADD | EV_CLEAR,
388: NOTE_DELETE | NOTE_WRITE | NOTE_EXTEND | NOTE_ATTRIB |
389: NOTE_LINK | NOTE_RENAME | NOTE_REVOKE, 0, (void*) TASK_FD(t));
390: #endif
391: if (kevent(TASK_ROOT(t)->root_kq, chg, 1, NULL, 0, &timeout) == -1) {
392: if (TASK_ROOT(t)->root_hooks.hook_exec.exception)
393: TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL);
394: else
395: LOGERR;
396: return (void*) -1;
397: }
398:
399: return NULL;
400: }
401:
402: /*
403: * sched_hook_proc() - Default PROC hook
404: *
405: * @task = current task
406: * @arg = unused
407: * return: <0 errors and 0 ok
408: */
409: void *
410: sched_hook_proc(void *task, void *arg __unused)
411: {
412: sched_task_t *t = task;
413: struct kevent chg[1];
414: struct timespec timeout = { 0, 0 };
415:
416: if (!t || !TASK_ROOT(t))
417: return (void*) -1;
418:
419: #ifdef __NetBSD__
420: EV_SET(&chg[0], TASK_VAL(t), EVFILT_PROC, EV_ADD | EV_CLEAR,
421: NOTE_EXIT | NOTE_FORK | NOTE_EXEC | NOTE_TRACK, 0, (intptr_t) TASK_VAL(t));
422: #else
423: EV_SET(&chg[0], TASK_VAL(t), EVFILT_PROC, EV_ADD | EV_CLEAR,
424: NOTE_EXIT | NOTE_FORK | NOTE_EXEC | NOTE_TRACK, 0, (void*) TASK_VAL(t));
425: #endif
426: if (kevent(TASK_ROOT(t)->root_kq, chg, 1, NULL, 0, &timeout) == -1) {
427: if (TASK_ROOT(t)->root_hooks.hook_exec.exception)
428: TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL);
429: else
430: LOGERR;
431: return (void*) -1;
432: }
433:
434: return NULL;
435: }
436:
437: /*
438: * sched_hook_signal() - Default SIGNAL hook
439: *
440: * @task = current task
441: * @arg = unused
442: * return: <0 errors and 0 ok
443: */
444: void *
445: sched_hook_signal(void *task, void *arg __unused)
446: {
447: sched_task_t *t = task;
448: struct kevent chg[1];
449: struct timespec timeout = { 0, 0 };
450:
451: if (!t || !TASK_ROOT(t))
452: return (void*) -1;
453:
454: #ifdef __NetBSD__
455: EV_SET(&chg[0], TASK_VAL(t), EVFILT_SIGNAL, EV_ADD, 0, 0, (intptr_t) TASK_VAL(t));
456: #else
457: EV_SET(&chg[0], TASK_VAL(t), EVFILT_SIGNAL, EV_ADD, 0, 0, (void*) TASK_VAL(t));
458: #endif
459: if (kevent(TASK_ROOT(t)->root_kq, chg, 1, NULL, 0, &timeout) == -1) {
460: if (TASK_ROOT(t)->root_hooks.hook_exec.exception)
461: TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL);
462: else
463: LOGERR;
464: return (void*) -1;
465: }
466:
467: return NULL;
468: }
469:
470: /*
471: * sched_hook_user() - Default USER hook
472: *
473: * @task = current task
474: * @arg = unused
475: * return: <0 errors and 0 ok
476: */
477: #ifdef EVFILT_USER
478: void *
479: sched_hook_user(void *task, void *arg __unused)
480: {
481: sched_task_t *t = task;
482: struct kevent chg[1];
483: struct timespec timeout = { 0, 0 };
484:
485: if (!t || !TASK_ROOT(t))
486: return (void*) -1;
487:
488: #ifdef __NetBSD__
489: EV_SET(&chg[0], TASK_VAL(t), EVFILT_USER, EV_ADD | EV_CLEAR, TASK_DATLEN(t),
490: 0, (intptr_t) TASK_VAL(t));
491: #else
492: EV_SET(&chg[0], TASK_VAL(t), EVFILT_USER, EV_ADD | EV_CLEAR, TASK_DATLEN(t),
493: 0, (void*) TASK_VAL(t));
494: #endif
495: if (kevent(TASK_ROOT(t)->root_kq, chg, 1, NULL, 0, &timeout) == -1) {
496: if (TASK_ROOT(t)->root_hooks.hook_exec.exception)
497: TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL);
498: else
499: LOGERR;
500: return (void*) -1;
501: }
502:
503: return NULL;
504: }
505: #endif
506:
507: /*
508: * sched_hook_fetch() - Default FETCH hook
509: *
510: * @root = root task
511: * @arg = unused
512: * return: NULL error or !=NULL fetched task
513: */
514: void *
515: sched_hook_fetch(void *root, void *arg __unused)
516: {
517: sched_root_task_t *r = root;
518: sched_task_t *task, *tmp;
519: struct timespec now, m, mtmp;
520: struct timespec *timeout;
521: struct kevent evt[1], res[KQ_EVENTS];
522: register int i, flg;
523: int en;
524: #ifdef AIO_SUPPORT
525: int len, fd;
526: struct aiocb *acb;
527: #ifdef EVFILT_LIO
528: int l;
529: register int j;
530: off_t off;
531: struct aiocb **acbs;
532: struct iovec *iv;
533: #endif /* EVFILT_LIO */
534: #endif /* AIO_SUPPORT */
535:
536: if (!r)
537: return NULL;
538:
539: /* get new task by queue priority */
540: while ((task = TAILQ_FIRST(&r->root_event))) {
541: #ifdef HAVE_LIBPTHREAD
542: pthread_mutex_lock(&r->root_mtx[taskEVENT]);
543: #endif
544: TAILQ_REMOVE(&r->root_event, task, task_node);
545: #ifdef HAVE_LIBPTHREAD
546: pthread_mutex_unlock(&r->root_mtx[taskEVENT]);
547: #endif
548: task->task_type = taskUNUSE;
549: #ifdef HAVE_LIBPTHREAD
550: pthread_mutex_lock(&r->root_mtx[taskUNUSE]);
551: #endif
552: TAILQ_INSERT_TAIL(&r->root_unuse, task, task_node);
553: #ifdef HAVE_LIBPTHREAD
554: pthread_mutex_unlock(&r->root_mtx[taskUNUSE]);
555: #endif
556: return task;
557: }
558: while ((task = TAILQ_FIRST(&r->root_ready))) {
559: #ifdef HAVE_LIBPTHREAD
560: pthread_mutex_lock(&r->root_mtx[taskREADY]);
561: #endif
562: TAILQ_REMOVE(&r->root_ready, task, task_node);
563: #ifdef HAVE_LIBPTHREAD
564: pthread_mutex_unlock(&r->root_mtx[taskREADY]);
565: #endif
566: task->task_type = taskUNUSE;
567: #ifdef HAVE_LIBPTHREAD
568: pthread_mutex_lock(&r->root_mtx[taskUNUSE]);
569: #endif
570: TAILQ_INSERT_TAIL(&r->root_unuse, task, task_node);
571: #ifdef HAVE_LIBPTHREAD
572: pthread_mutex_unlock(&r->root_mtx[taskUNUSE]);
573: #endif
574: return task;
575: }
576:
577: #ifdef TIMER_WITHOUT_SORT
578: clock_gettime(CLOCK_MONOTONIC, &now);
579:
580: sched_timespecclear(&r->root_wait);
581: TAILQ_FOREACH(task, &r->root_timer, task_node) {
582: if (!sched_timespecisset(&r->root_wait))
583: r->root_wait = TASK_TS(task);
584: else if (sched_timespeccmp(&TASK_TS(task), &r->root_wait, -) < 0)
585: r->root_wait = TASK_TS(task);
586: }
587:
588: if (TAILQ_FIRST(&r->root_timer)) {
589: m = r->root_wait;
590: sched_timespecsub(&m, &now, &mtmp);
591: r->root_wait = mtmp;
592: } else {
593: /* set wait INFTIM */
594: sched_timespecinf(&r->root_wait);
595: }
596: #else
597: if (!TAILQ_FIRST(&r->root_task) && (task = TAILQ_FIRST(&r->root_timer))) {
598: clock_gettime(CLOCK_MONOTONIC, &now);
599:
600: m = TASK_TS(task);
601: sched_timespecsub(&m, &now, &mtmp);
602: r->root_wait = mtmp;
603: } else {
604: /* set wait INFTIM */
605: sched_timespecinf(&r->root_wait);
606: }
607: #endif
608: /* if present member of task, set NOWAIT */
609: if (TAILQ_FIRST(&r->root_task))
610: sched_timespecclear(&r->root_wait);
611:
612: if (r->root_wait.tv_sec != -1 && r->root_wait.tv_nsec != -1)
613: timeout = &r->root_wait;
614: else if (sched_timespecisinf(&r->root_poll))
615: timeout = NULL;
616: else
617: timeout = &r->root_poll;
618: if ((en = kevent(r->root_kq, NULL, 0, res, KQ_EVENTS, timeout)) == -1) {
619: if (r->root_hooks.hook_exec.exception) {
620: if (r->root_hooks.hook_exec.exception(r, NULL))
621: return NULL;
622: } else if (errno != EINTR)
623: LOGERR;
624: return NULL;
625: }
626:
627: now.tv_sec = now.tv_nsec = 0;
628: /* Go and catch the cat into pipes ... */
629: for (i = 0; i < en; i++) {
630: memcpy(evt, &res[i], sizeof evt);
631: evt->flags = EV_DELETE;
632: /* Put read/write task to ready queue */
633: switch (res[i].filter) {
634: case EVFILT_READ:
635: flg = 0;
636: TAILQ_FOREACH_SAFE(task, &r->root_read, task_node, tmp) {
637: if (TASK_FD(task) != ((intptr_t) res[i].udata))
638: continue;
639: else {
640: flg++;
641: TASK_RET(task) = res[i].data;
642: TASK_FLAG(task) = res[i].fflags;
643: }
644: /* remove read handle */
645: #ifdef HAVE_LIBPTHREAD
646: pthread_mutex_lock(&r->root_mtx[taskREAD]);
647: #endif
648: TAILQ_REMOVE(&r->root_read, task, task_node);
649: #ifdef HAVE_LIBPTHREAD
650: pthread_mutex_unlock(&r->root_mtx[taskREAD]);
651: #endif
652: if (r->root_hooks.hook_exec.exception && res[i].flags & EV_EOF) {
653: if (r->root_hooks.hook_exec.exception(r, (void*) EV_EOF)) {
654: task->task_type = taskUNUSE;
655: #ifdef HAVE_LIBPTHREAD
656: pthread_mutex_lock(&r->root_mtx[taskUNUSE]);
657: #endif
658: TAILQ_INSERT_TAIL(&r->root_unuse, task, task_node);
659: #ifdef HAVE_LIBPTHREAD
660: pthread_mutex_unlock(&r->root_mtx[taskUNUSE]);
661: #endif
662: } else {
663: task->task_type = taskREADY;
664: #ifdef HAVE_LIBPTHREAD
665: pthread_mutex_lock(&r->root_mtx[taskREADY]);
666: #endif
667: TAILQ_INSERT_TAIL(&r->root_ready, task, task_node);
668: #ifdef HAVE_LIBPTHREAD
669: pthread_mutex_unlock(&r->root_mtx[taskREADY]);
670: #endif
671: }
672: } else {
673: task->task_type = taskREADY;
674: #ifdef HAVE_LIBPTHREAD
675: pthread_mutex_lock(&r->root_mtx[taskREADY]);
676: #endif
677: TAILQ_INSERT_TAIL(&r->root_ready, task, task_node);
678: #ifdef HAVE_LIBPTHREAD
679: pthread_mutex_unlock(&r->root_mtx[taskREADY]);
680: #endif
681: }
682: }
683: /* if match at least 2, don't remove resouce of event */
684: if (flg > 1)
685: evt->flags ^= evt->flags;
686: break;
687: case EVFILT_WRITE:
688: flg = 0;
689: TAILQ_FOREACH_SAFE(task, &r->root_write, task_node, tmp) {
690: if (TASK_FD(task) != ((intptr_t) res[i].udata))
691: continue;
692: else {
693: flg++;
694: TASK_RET(task) = res[i].data;
695: TASK_FLAG(task) = res[i].fflags;
696: }
697: /* remove write handle */
698: #ifdef HAVE_LIBPTHREAD
699: pthread_mutex_lock(&r->root_mtx[taskWRITE]);
700: #endif
701: TAILQ_REMOVE(&r->root_write, task, task_node);
702: #ifdef HAVE_LIBPTHREAD
703: pthread_mutex_unlock(&r->root_mtx[taskWRITE]);
704: #endif
705: if (r->root_hooks.hook_exec.exception && res[i].flags & EV_EOF) {
706: if (r->root_hooks.hook_exec.exception(r, (void*) EV_EOF)) {
707: task->task_type = taskUNUSE;
708: #ifdef HAVE_LIBPTHREAD
709: pthread_mutex_lock(&r->root_mtx[taskUNUSE]);
710: #endif
711: TAILQ_INSERT_TAIL(&r->root_unuse, task, task_node);
712: #ifdef HAVE_LIBPTHREAD
713: pthread_mutex_unlock(&r->root_mtx[taskUNUSE]);
714: #endif
715: } else {
716: task->task_type = taskREADY;
717: #ifdef HAVE_LIBPTHREAD
718: pthread_mutex_lock(&r->root_mtx[taskREADY]);
719: #endif
720: TAILQ_INSERT_TAIL(&r->root_ready, task, task_node);
721: #ifdef HAVE_LIBPTHREAD
722: pthread_mutex_unlock(&r->root_mtx[taskREADY]);
723: #endif
724: }
725: } else {
726: task->task_type = taskREADY;
727: #ifdef HAVE_LIBPTHREAD
728: pthread_mutex_lock(&r->root_mtx[taskREADY]);
729: #endif
730: TAILQ_INSERT_TAIL(&r->root_ready, task, task_node);
731: #ifdef HAVE_LIBPTHREAD
732: pthread_mutex_unlock(&r->root_mtx[taskREADY]);
733: #endif
734: }
735: }
736: /* if match at least 2, don't remove resouce of event */
737: if (flg > 1)
738: evt->flags ^= evt->flags;
739: break;
740: case EVFILT_TIMER:
741: flg = 0;
742: TAILQ_FOREACH_SAFE(task, &r->root_alarm, task_node, tmp) {
743: if ((uintptr_t) TASK_DATA(task) != ((uintptr_t) res[i].udata))
744: continue;
745: else {
746: flg++;
747: TASK_RET(task) = res[i].data;
748: TASK_FLAG(task) = res[i].fflags;
749: }
750: /* remove alarm handle */
751: #ifdef HAVE_LIBPTHREAD
752: pthread_mutex_lock(&r->root_mtx[taskALARM]);
753: #endif
754: TAILQ_REMOVE(&r->root_alarm, task, task_node);
755: #ifdef HAVE_LIBPTHREAD
756: pthread_mutex_unlock(&r->root_mtx[taskALARM]);
757: #endif
758: task->task_type = taskREADY;
759: #ifdef HAVE_LIBPTHREAD
760: pthread_mutex_lock(&r->root_mtx[taskREADY]);
761: #endif
762: TAILQ_INSERT_TAIL(&r->root_ready, task, task_node);
763: #ifdef HAVE_LIBPTHREAD
764: pthread_mutex_unlock(&r->root_mtx[taskREADY]);
765: #endif
766: }
767: /* if match at least 2, don't remove resouce of event */
768: if (flg > 1)
769: evt->flags ^= evt->flags;
770: break;
771: case EVFILT_VNODE:
772: flg = 0;
773: TAILQ_FOREACH_SAFE(task, &r->root_node, task_node, tmp) {
774: if (TASK_FD(task) != ((intptr_t) res[i].udata))
775: continue;
776: else {
777: flg++;
778: TASK_RET(task) = res[i].data;
779: TASK_FLAG(task) = res[i].fflags;
780: }
781: /* remove node handle */
782: #ifdef HAVE_LIBPTHREAD
783: pthread_mutex_lock(&r->root_mtx[taskNODE]);
784: #endif
785: TAILQ_REMOVE(&r->root_node, task, task_node);
786: #ifdef HAVE_LIBPTHREAD
787: pthread_mutex_unlock(&r->root_mtx[taskNODE]);
788: #endif
789: task->task_type = taskREADY;
790: #ifdef HAVE_LIBPTHREAD
791: pthread_mutex_lock(&r->root_mtx[taskREADY]);
792: #endif
793: TAILQ_INSERT_TAIL(&r->root_ready, task, task_node);
794: #ifdef HAVE_LIBPTHREAD
795: pthread_mutex_unlock(&r->root_mtx[taskREADY]);
796: #endif
797: }
798: /* if match at least 2, don't remove resouce of event */
799: if (flg > 1)
800: evt->flags ^= evt->flags;
801: break;
802: case EVFILT_PROC:
803: flg = 0;
804: TAILQ_FOREACH_SAFE(task, &r->root_proc, task_node, tmp) {
805: if (TASK_VAL(task) != ((uintptr_t) res[i].udata))
806: continue;
807: else {
808: flg++;
809: TASK_RET(task) = res[i].data;
810: TASK_FLAG(task) = res[i].fflags;
811: }
812: /* remove proc handle */
813: #ifdef HAVE_LIBPTHREAD
814: pthread_mutex_lock(&r->root_mtx[taskPROC]);
815: #endif
816: TAILQ_REMOVE(&r->root_proc, task, task_node);
817: #ifdef HAVE_LIBPTHREAD
818: pthread_mutex_unlock(&r->root_mtx[taskPROC]);
819: #endif
820: task->task_type = taskREADY;
821: #ifdef HAVE_LIBPTHREAD
822: pthread_mutex_lock(&r->root_mtx[taskREADY]);
823: #endif
824: TAILQ_INSERT_TAIL(&r->root_ready, task, task_node);
825: #ifdef HAVE_LIBPTHREAD
826: pthread_mutex_unlock(&r->root_mtx[taskREADY]);
827: #endif
828: }
829: /* if match at least 2, don't remove resouce of event */
830: if (flg > 1)
831: evt->flags ^= evt->flags;
832: break;
833: case EVFILT_SIGNAL:
834: flg = 0;
835: TAILQ_FOREACH_SAFE(task, &r->root_signal, task_node, tmp) {
836: if (TASK_VAL(task) != ((uintptr_t) res[i].udata))
837: continue;
838: else {
839: flg++;
840: TASK_RET(task) = res[i].data;
841: TASK_FLAG(task) = res[i].fflags;
842: }
843: /* remove signal handle */
844: #ifdef HAVE_LIBPTHREAD
845: pthread_mutex_lock(&r->root_mtx[taskSIGNAL]);
846: #endif
847: TAILQ_REMOVE(&r->root_signal, task, task_node);
848: #ifdef HAVE_LIBPTHREAD
849: pthread_mutex_unlock(&r->root_mtx[taskSIGNAL]);
850: #endif
851: task->task_type = taskREADY;
852: #ifdef HAVE_LIBPTHREAD
853: pthread_mutex_lock(&r->root_mtx[taskREADY]);
854: #endif
855: TAILQ_INSERT_TAIL(&r->root_ready, task, task_node);
856: #ifdef HAVE_LIBPTHREAD
857: pthread_mutex_unlock(&r->root_mtx[taskREADY]);
858: #endif
859: }
860: /* if match at least 2, don't remove resouce of event */
861: if (flg > 1)
862: evt->flags ^= evt->flags;
863: break;
864: #ifdef AIO_SUPPORT
865: case EVFILT_AIO:
866: flg = 0;
867: TAILQ_FOREACH_SAFE(task, &r->root_aio, task_node, tmp) {
868: acb = (struct aiocb*) TASK_VAL(task);
869: if (acb != ((struct aiocb*) res[i].udata))
870: continue;
871: else {
872: flg++;
873: TASK_RET(task) = res[i].data;
874: TASK_FLAG(task) = res[i].fflags;
875: }
876: /* remove user handle */
877: #ifdef HAVE_LIBPTHREAD
878: pthread_mutex_lock(&r->root_mtx[taskAIO]);
879: #endif
880: TAILQ_REMOVE(&r->root_aio, task, task_node);
881: #ifdef HAVE_LIBPTHREAD
882: pthread_mutex_unlock(&r->root_mtx[taskAIO]);
883: #endif
884: task->task_type = taskREADY;
885: #ifdef HAVE_LIBPTHREAD
886: pthread_mutex_lock(&r->root_mtx[taskREADY]);
887: #endif
888: TAILQ_INSERT_TAIL(&r->root_ready, task, task_node);
889: #ifdef HAVE_LIBPTHREAD
890: pthread_mutex_unlock(&r->root_mtx[taskREADY]);
891: #endif
892: fd = acb->aio_fildes;
893: if ((len = aio_return(acb)) != -1) {
894: if (lseek(fd, acb->aio_offset + len, SEEK_CUR) == -1)
895: LOGERR;
896: } else
897: LOGERR;
898: free(acb);
899: TASK_DATLEN(task) = (u_long) len;
900: TASK_FD(task) = fd;
901: }
902: /* if match at least 2, don't remove resouce of event */
903: if (flg > 1)
904: evt->flags ^= evt->flags;
905: break;
906: #ifdef EVFILT_LIO
907: case EVFILT_LIO:
908: flg = 0;
909: TAILQ_FOREACH_SAFE(task, &r->root_lio, task_node, tmp) {
910: acbs = (struct aiocb**) TASK_VAL(task);
911: if (acbs != ((struct aiocb**) res[i].udata))
912: continue;
913: else {
914: flg++;
915: TASK_RET(task) = res[i].data;
916: TASK_FLAG(task) = res[i].fflags;
917: }
918: /* remove user handle */
919: #ifdef HAVE_LIBPTHREAD
920: pthread_mutex_lock(&r->root_mtx[taskLIO]);
921: #endif
922: TAILQ_REMOVE(&r->root_lio, task, task_node);
923: #ifdef HAVE_LIBPTHREAD
924: pthread_mutex_unlock(&r->root_mtx[taskLIO]);
925: #endif
926: task->task_type = taskREADY;
927: #ifdef HAVE_LIBPTHREAD
928: pthread_mutex_lock(&r->root_mtx[taskREADY]);
929: #endif
930: TAILQ_INSERT_TAIL(&r->root_ready, task, task_node);
931: #ifdef HAVE_LIBPTHREAD
932: pthread_mutex_unlock(&r->root_mtx[taskREADY]);
933: #endif
934: iv = (struct iovec*) TASK_DATA(task);
935: fd = acbs[0]->aio_fildes;
936: off = acbs[0]->aio_offset;
937: for (j = len = 0; i < TASK_DATLEN(task); len += l, i++) {
938: if ((iv[i].iov_len = aio_return(acbs[i])) == -1)
939: l = 0;
940: else
941: l = iv[i].iov_len;
942: free(acbs[i]);
943: }
944: free(acbs);
945: TASK_DATLEN(task) = (u_long) len;
946: TASK_FD(task) = fd;
947:
948: if (lseek(fd, off + len, SEEK_CUR) == -1)
949: LOGERR;
950: }
951: /* if match at least 2, don't remove resouce of event */
952: if (flg > 1)
953: evt->flags ^= evt->flags;
954: break;
955: #endif /* EVFILT_LIO */
956: #endif /* AIO_SUPPORT */
957: #ifdef EVFILT_USER
958: case EVFILT_USER:
959: flg = 0;
960: TAILQ_FOREACH_SAFE(task, &r->root_user, task_node, tmp) {
961: if (TASK_VAL(task) != ((uintptr_t) res[i].udata))
962: continue;
963: else {
964: flg++;
965: TASK_RET(task) = res[i].data;
966: TASK_FLAG(task) = res[i].fflags;
967: }
968: /* remove user handle */
969: #ifdef HAVE_LIBPTHREAD
970: pthread_mutex_lock(&r->root_mtx[taskUSER]);
971: #endif
972: TAILQ_REMOVE(&r->root_user, task, task_node);
973: #ifdef HAVE_LIBPTHREAD
974: pthread_mutex_unlock(&r->root_mtx[taskUSER]);
975: #endif
976: task->task_type = taskREADY;
977: #ifdef HAVE_LIBPTHREAD
978: pthread_mutex_lock(&r->root_mtx[taskREADY]);
979: #endif
980: TAILQ_INSERT_TAIL(&r->root_ready, task, task_node);
981: #ifdef HAVE_LIBPTHREAD
982: pthread_mutex_unlock(&r->root_mtx[taskREADY]);
983: #endif
984: }
985: /* if match at least 2, don't remove resouce of event */
986: if (flg > 1)
987: evt->flags ^= evt->flags;
988: break;
989: #endif /* EVFILT_USER */
990: }
991: if (kevent(r->root_kq, evt, 1, NULL, 0, &now) == -1) {
992: if (r->root_hooks.hook_exec.exception) {
993: if (r->root_hooks.hook_exec.exception(r, NULL))
994: return NULL;
995: } else
996: LOGERR;
997: }
998: }
999:
1000: /* timer update & put in ready queue */
1001: clock_gettime(CLOCK_MONOTONIC, &now);
1002:
1003: TAILQ_FOREACH_SAFE(task, &r->root_timer, task_node, tmp)
1004: if (sched_timespeccmp(&now, &TASK_TS(task), -) >= 0) {
1005: #ifdef HAVE_LIBPTHREAD
1006: pthread_mutex_lock(&r->root_mtx[taskTIMER]);
1007: #endif
1008: TAILQ_REMOVE(&r->root_timer, task, task_node);
1009: #ifdef HAVE_LIBPTHREAD
1010: pthread_mutex_unlock(&r->root_mtx[taskTIMER]);
1011: #endif
1012: task->task_type = taskREADY;
1013: #ifdef HAVE_LIBPTHREAD
1014: pthread_mutex_lock(&r->root_mtx[taskREADY]);
1015: #endif
1016: TAILQ_INSERT_TAIL(&r->root_ready, task, task_node);
1017: #ifdef HAVE_LIBPTHREAD
1018: pthread_mutex_unlock(&r->root_mtx[taskREADY]);
1019: #endif
1020: }
1021:
1022: /* put regular task priority task to ready queue,
1023: if there is no ready task or reach max missing hit for regular task */
1024: if ((task = TAILQ_FIRST(&r->root_task))) {
1025: if (!TAILQ_FIRST(&r->root_ready) || r->root_miss >= TASK_VAL(task)) {
1026: r->root_miss ^= r->root_miss;
1027:
1028: #ifdef HAVE_LIBPTHREAD
1029: pthread_mutex_lock(&r->root_mtx[taskTASK]);
1030: #endif
1031: TAILQ_REMOVE(&r->root_task, task, task_node);
1032: #ifdef HAVE_LIBPTHREAD
1033: pthread_mutex_unlock(&r->root_mtx[taskTASK]);
1034: #endif
1035: task->task_type = taskREADY;
1036: #ifdef HAVE_LIBPTHREAD
1037: pthread_mutex_lock(&r->root_mtx[taskREADY]);
1038: #endif
1039: TAILQ_INSERT_TAIL(&r->root_ready, task, task_node);
1040: #ifdef HAVE_LIBPTHREAD
1041: pthread_mutex_unlock(&r->root_mtx[taskREADY]);
1042: #endif
1043: } else
1044: r->root_miss++;
1045: } else
1046: r->root_miss ^= r->root_miss;
1047:
1048: /* OK, lets get ready task !!! */
1049: task = TAILQ_FIRST(&r->root_ready);
1050: if (!(task))
1051: return NULL;
1052:
1053: #ifdef HAVE_LIBPTHREAD
1054: pthread_mutex_lock(&r->root_mtx[taskREADY]);
1055: #endif
1056: TAILQ_REMOVE(&r->root_ready, task, task_node);
1057: #ifdef HAVE_LIBPTHREAD
1058: pthread_mutex_unlock(&r->root_mtx[taskREADY]);
1059: #endif
1060: task->task_type = taskUNUSE;
1061: #ifdef HAVE_LIBPTHREAD
1062: pthread_mutex_lock(&r->root_mtx[taskUNUSE]);
1063: #endif
1064: TAILQ_INSERT_TAIL(&r->root_unuse, task, task_node);
1065: #ifdef HAVE_LIBPTHREAD
1066: pthread_mutex_unlock(&r->root_mtx[taskUNUSE]);
1067: #endif
1068: return task;
1069: }
1070:
1071: /*
1072: * sched_hook_exception() - Default EXCEPTION hook
1073: *
1074: * @root = root task
1075: * @arg = custom handling: if arg == EV_EOF or other value; default: arg == NULL log errno
1076: * return: <0 errors and 0 ok
1077: */
1078: void *
1079: sched_hook_exception(void *root, void *arg)
1080: {
1081: sched_root_task_t *r = root;
1082:
1083: if (!r)
1084: return NULL;
1085:
1086: /* custom exception handling ... */
1087: if (arg) {
1088: if (arg == (void*) EV_EOF)
1089: return NULL;
1090: return (void*) -1; /* raise scheduler error!!! */
1091: }
1092:
1093: /* if error hook exists */
1094: if (r->root_hooks.hook_root.error)
1095: return (r->root_hooks.hook_root.error(root, (void*) ((intptr_t) errno)));
1096:
1097: /* default case! */
1098: LOGERR;
1099: return NULL;
1100: }
1101:
1102: /*
1103: * sched_hook_condition() - Default CONDITION hook
1104: *
1105: * @root = root task
1106: * @arg = killState from schedRun()
1107: * return: NULL kill scheduler loop or !=NULL ok
1108: */
1109: void *
1110: sched_hook_condition(void *root, void *arg)
1111: {
1112: sched_root_task_t *r = root;
1113:
1114: if (!r)
1115: return NULL;
1116:
1117: return (void*) (r->root_cond - *(intptr_t*) arg);
1118: }
FreeBSD-CVSweb <freebsd-cvsweb@FreeBSD.org>