1: /*************************************************************************
2: * (C) 2011 AITNET ltd - Sofia/Bulgaria - <misho@aitbg.com>
3: * by Michael Pounov <misho@openbsd-bg.org>
4: *
5: * $Author: misho $
6: * $Id: hooks.c,v 1.13.2.1 2012/08/21 11:07:16 misho Exp $
7: *
8: **************************************************************************
9: The ELWIX and AITNET software is distributed under the following
10: terms:
11:
12: All of the documentation and software included in the ELWIX and AITNET
13: Releases is copyrighted by ELWIX - Sofia/Bulgaria <info@elwix.org>
14:
15: Copyright 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012
16: by Michael Pounov <misho@elwix.org>. All rights reserved.
17:
18: Redistribution and use in source and binary forms, with or without
19: modification, are permitted provided that the following conditions
20: are met:
21: 1. Redistributions of source code must retain the above copyright
22: notice, this list of conditions and the following disclaimer.
23: 2. Redistributions in binary form must reproduce the above copyright
24: notice, this list of conditions and the following disclaimer in the
25: documentation and/or other materials provided with the distribution.
26: 3. All advertising materials mentioning features or use of this software
27: must display the following acknowledgement:
28: This product includes software developed by Michael Pounov <misho@elwix.org>
29: ELWIX - Embedded LightWeight unIX and its contributors.
30: 4. Neither the name of AITNET nor the names of its contributors
31: may be used to endorse or promote products derived from this software
32: without specific prior written permission.
33:
34: THIS SOFTWARE IS PROVIDED BY AITNET AND CONTRIBUTORS ``AS IS'' AND
35: ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
36: IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
37: ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
38: FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
39: DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
40: OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
41: HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
42: LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
43: OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
44: SUCH DAMAGE.
45: */
46: #include "global.h"
47: #include "hooks.h"
48:
49:
50: /*
51: * sched_hook_init() - Default INIT hook
52: *
53: * @root = root task
54: * @arg = unused
55: * return: <0 errors and 0 ok
56: */
57: void *
58: sched_hook_init(void *root, void *arg __unused)
59: {
60: sched_root_task_t *r = root;
61:
62: if (!r)
63: return (void*) -1;
64:
65: r->root_kq = kqueue();
66: if (r->root_kq == -1) {
67: LOGERR;
68: return (void*) -1;
69: }
70:
71: return NULL;
72: }
73:
74: /*
75: * sched_hook_fini() - Default FINI hook
76: *
77: * @root = root task
78: * @arg = unused
79: * return: <0 errors and 0 ok
80: */
81: void *
82: sched_hook_fini(void *root, void *arg __unused)
83: {
84: sched_root_task_t *r = root;
85:
86: if (!r)
87: return (void*) -1;
88:
89: if (r->root_kq > 2) {
90: close(r->root_kq);
91: r->root_kq = 0;
92: }
93:
94: return NULL;
95: }
96:
97: /*
98: * sched_hook_cancel() - Default CANCEL hook
99: *
100: * @task = current task
101: * @arg = unused
102: * return: <0 errors and 0 ok
103: */
104: void *
105: sched_hook_cancel(void *task, void *arg __unused)
106: {
107: sched_task_t *t = task;
108: struct kevent chg[1];
109: struct timespec timeout = { 0, 0 };
110: #ifdef AIO_SUPPORT
111: struct aiocb *acb;
112: #ifdef EVFILT_LIO
113: struct aiocb **acbs;
114: register int i;
115: #endif /* EVFILT_LIO */
116: #endif /* AIO_SUPPORT */
117:
118: if (!t || !TASK_ROOT(t))
119: return (void*) -1;
120:
121: switch (TASK_TYPE(t)) {
122: case taskREAD:
123: #ifdef __NetBSD__
124: EV_SET(&chg[0], TASK_FD(t), EVFILT_READ, EV_DELETE, 0, 0, (intptr_t) TASK_FD(t));
125: #else
126: EV_SET(&chg[0], TASK_FD(t), EVFILT_READ, EV_DELETE, 0, 0, (void*) TASK_FD(t));
127: #endif
128: break;
129: case taskWRITE:
130: #ifdef __NetBSD__
131: EV_SET(&chg[0], TASK_FD(t), EVFILT_WRITE, EV_DELETE, 0, 0, (intptr_t) TASK_FD(t));
132: #else
133: EV_SET(&chg[0], TASK_FD(t), EVFILT_WRITE, EV_DELETE, 0, 0, (void*) TASK_FD(t));
134: #endif
135: break;
136: case taskALARM:
137: #ifdef __NetBSD__
138: EV_SET(&chg[0], (uintptr_t) TASK_DATA(t), EVFILT_TIMER, EV_DELETE,
139: 0, 0, (intptr_t) TASK_DATA(t));
140: #else
141: EV_SET(&chg[0], (uintptr_t) TASK_DATA(t), EVFILT_TIMER, EV_DELETE,
142: 0, 0, (void*) TASK_DATA(t));
143: #endif
144: break;
145: case taskNODE:
146: #ifdef __NetBSD__
147: EV_SET(&chg[0], TASK_FD(t), EVFILT_VNODE, EV_DELETE, 0, 0, (intptr_t) TASK_FD(t));
148: #else
149: EV_SET(&chg[0], TASK_FD(t), EVFILT_VNODE, EV_DELETE, 0, 0, (void*) TASK_FD(t));
150: #endif
151: break;
152: case taskPROC:
153: #ifdef __NetBSD__
154: EV_SET(&chg[0], TASK_VAL(t), EVFILT_PROC, EV_DELETE, 0, 0, (intptr_t) TASK_VAL(t));
155: #else
156: EV_SET(&chg[0], TASK_VAL(t), EVFILT_PROC, EV_DELETE, 0, 0, (void*) TASK_VAL(t));
157: #endif
158: break;
159: case taskSIGNAL:
160: #ifdef __NetBSD__
161: EV_SET(&chg[0], TASK_VAL(t), EVFILT_SIGNAL, EV_DELETE, 0, 0, (intptr_t) TASK_VAL(t));
162: #else
163: EV_SET(&chg[0], TASK_VAL(t), EVFILT_SIGNAL, EV_DELETE, 0, 0, (void*) TASK_VAL(t));
164: #endif
165: break;
166: #ifdef AIO_SUPPORT
167: case taskAIO:
168: #ifdef __NetBSD__
169: EV_SET(&chg[0], TASK_VAL(t), EVFILT_AIO, EV_DELETE, 0, 0, (intptr_t) TASK_VAL(t));
170: #else
171: EV_SET(&chg[0], TASK_VAL(t), EVFILT_AIO, EV_DELETE, 0, 0, (void*) TASK_VAL(t));
172: #endif
173: acb = (struct aiocb*) TASK_VAL(t);
174: if (acb) {
175: if (aio_cancel(acb->aio_fildes, acb) == AIO_CANCELED)
176: aio_return(acb);
177: free(acb);
178: TASK_VAL(t) = 0;
179: }
180: break;
181: #ifdef EVFILT_LIO
182: case taskLIO:
183: #ifdef __NetBSD__
184: EV_SET(&chg[0], TASK_VAL(t), EVFILT_LIO, EV_DELETE, 0, 0, (intptr_t) TASK_VAL(t));
185: #else
186: EV_SET(&chg[0], TASK_VAL(t), EVFILT_LIO, EV_DELETE, 0, 0, (void*) TASK_VAL(t));
187: #endif
188: acbs = (struct aiocb**) TASK_VAL(t);
189: if (acbs) {
190: for (i = 0; i < TASK_DATLEN(t); i++) {
191: if (aio_cancel(acbs[i]->aio_fildes, acbs[i]) == AIO_CANCELED)
192: aio_return(acbs[i]);
193: free(acbs[i]);
194: }
195: free(acbs);
196: TASK_VAL(t) = 0;
197: }
198: break;
199: #endif /* EVFILT_LIO */
200: #endif /* AIO_SUPPORT */
201: #ifdef EVFILT_USER
202: case taskUSER:
203: #ifdef __NetBSD__
204: EV_SET(&chg[0], TASK_VAL(t), EVFILT_USER, EV_DELETE, 0, 0, (intptr_t) TASK_VAL(t));
205: #else
206: EV_SET(&chg[0], TASK_VAL(t), EVFILT_USER, EV_DELETE, 0, 0, (void*) TASK_VAL(t));
207: #endif
208: break;
209: #endif
210: case taskTHREAD:
211: #ifdef HAVE_LIBPTHREAD
212: pthread_cancel((pthread_t) TASK_VAL(t));
213: #endif
214: default:
215: return NULL;
216: }
217:
218: kevent(TASK_ROOT(t)->root_kq, chg, 1, NULL, 0, &timeout);
219: return NULL;
220: }
221:
222: /*
223: * sched_hook_read() - Default READ hook
224: *
225: * @task = current task
226: * @arg = unused
227: * return: <0 errors and 0 ok
228: */
229: void *
230: sched_hook_read(void *task, void *arg __unused)
231: {
232: sched_task_t *t = task;
233: struct kevent chg[1];
234: struct timespec timeout = { 0, 0 };
235:
236: if (!t || !TASK_ROOT(t))
237: return (void*) -1;
238:
239: #ifdef __NetBSD__
240: EV_SET(&chg[0], TASK_FD(t), EVFILT_READ, EV_ADD | EV_CLEAR, 0, 0, (intptr_t) TASK_FD(t));
241: #else
242: EV_SET(&chg[0], TASK_FD(t), EVFILT_READ, EV_ADD | EV_CLEAR, 0, 0, (void*) TASK_FD(t));
243: #endif
244: if (kevent(TASK_ROOT(t)->root_kq, chg, 1, NULL, 0, &timeout) == -1) {
245: if (TASK_ROOT(t)->root_hooks.hook_exec.exception)
246: TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL);
247: else
248: LOGERR;
249: return (void*) -1;
250: }
251:
252: return NULL;
253: }
254:
255: /*
256: * sched_hook_write() - Default WRITE hook
257: *
258: * @task = current task
259: * @arg = unused
260: * return: <0 errors and 0 ok
261: */
262: void *
263: sched_hook_write(void *task, void *arg __unused)
264: {
265: sched_task_t *t = task;
266: struct kevent chg[1];
267: struct timespec timeout = { 0, 0 };
268:
269: if (!t || !TASK_ROOT(t))
270: return (void*) -1;
271:
272: #ifdef __NetBSD__
273: EV_SET(&chg[0], TASK_FD(t), EVFILT_WRITE, EV_ADD | EV_CLEAR, 0, 0, (intptr_t) TASK_FD(t));
274: #else
275: EV_SET(&chg[0], TASK_FD(t), EVFILT_WRITE, EV_ADD | EV_CLEAR, 0, 0, (void*) TASK_FD(t));
276: #endif
277: if (kevent(TASK_ROOT(t)->root_kq, chg, 1, NULL, 0, &timeout) == -1) {
278: if (TASK_ROOT(t)->root_hooks.hook_exec.exception)
279: TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL);
280: else
281: LOGERR;
282: return (void*) -1;
283: }
284:
285: return NULL;
286: }
287:
288: /*
289: * sched_hook_alarm() - Default ALARM hook
290: *
291: * @task = current task
292: * @arg = unused
293: * return: <0 errors and 0 ok
294: */
295: void *
296: sched_hook_alarm(void *task, void *arg __unused)
297: {
298: sched_task_t *t = task;
299: struct kevent chg[1];
300: struct timespec timeout = { 0, 0 };
301:
302: if (!t || !TASK_ROOT(t))
303: return (void*) -1;
304:
305: #ifdef __NetBSD__
306: EV_SET(&chg[0], (uintptr_t) TASK_DATA(t), EVFILT_TIMER, EV_ADD | EV_ONESHOT, 0,
307: t->task_val.ts.tv_sec * 1000 + t->task_val.ts.tv_nsec / 1000000,
308: (intptr_t) TASK_DATA(t));
309: #else
310: EV_SET(&chg[0], (uintptr_t) TASK_DATA(t), EVFILT_TIMER, EV_ADD | EV_ONESHOT, 0,
311: t->task_val.ts.tv_sec * 1000 + t->task_val.ts.tv_nsec / 1000000,
312: (void*) TASK_DATA(t));
313: #endif
314: if (kevent(TASK_ROOT(t)->root_kq, chg, 1, NULL, 0, &timeout) == -1) {
315: if (TASK_ROOT(t)->root_hooks.hook_exec.exception)
316: TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL);
317: else
318: LOGERR;
319: return (void*) -1;
320: }
321:
322: return NULL;
323: }
324:
325: /*
326: * sched_hook_node() - Default NODE hook
327: *
328: * @task = current task
329: * @arg = unused
330: * return: <0 errors and 0 ok
331: */
332: void *
333: sched_hook_node(void *task, void *arg __unused)
334: {
335: sched_task_t *t = task;
336: struct kevent chg[1];
337: struct timespec timeout = { 0, 0 };
338:
339: if (!t || !TASK_ROOT(t))
340: return (void*) -1;
341:
342: #ifdef __NetBSD__
343: EV_SET(&chg[0], TASK_FD(t), EVFILT_VNODE, EV_ADD | EV_CLEAR,
344: NOTE_DELETE | NOTE_WRITE | NOTE_EXTEND | NOTE_ATTRIB |
345: NOTE_LINK | NOTE_RENAME | NOTE_REVOKE, 0, (intptr_t) TASK_FD(t));
346: #else
347: EV_SET(&chg[0], TASK_FD(t), EVFILT_VNODE, EV_ADD | EV_CLEAR,
348: NOTE_DELETE | NOTE_WRITE | NOTE_EXTEND | NOTE_ATTRIB |
349: NOTE_LINK | NOTE_RENAME | NOTE_REVOKE, 0, (void*) TASK_FD(t));
350: #endif
351: if (kevent(TASK_ROOT(t)->root_kq, chg, 1, NULL, 0, &timeout) == -1) {
352: if (TASK_ROOT(t)->root_hooks.hook_exec.exception)
353: TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL);
354: else
355: LOGERR;
356: return (void*) -1;
357: }
358:
359: return NULL;
360: }
361:
362: /*
363: * sched_hook_proc() - Default PROC hook
364: *
365: * @task = current task
366: * @arg = unused
367: * return: <0 errors and 0 ok
368: */
369: void *
370: sched_hook_proc(void *task, void *arg __unused)
371: {
372: sched_task_t *t = task;
373: struct kevent chg[1];
374: struct timespec timeout = { 0, 0 };
375:
376: if (!t || !TASK_ROOT(t))
377: return (void*) -1;
378:
379: #ifdef __NetBSD__
380: EV_SET(&chg[0], TASK_VAL(t), EVFILT_PROC, EV_ADD | EV_CLEAR,
381: NOTE_EXIT | NOTE_FORK | NOTE_EXEC | NOTE_TRACK, 0, (intptr_t) TASK_VAL(t));
382: #else
383: EV_SET(&chg[0], TASK_VAL(t), EVFILT_PROC, EV_ADD | EV_CLEAR,
384: NOTE_EXIT | NOTE_FORK | NOTE_EXEC | NOTE_TRACK, 0, (void*) TASK_VAL(t));
385: #endif
386: if (kevent(TASK_ROOT(t)->root_kq, chg, 1, NULL, 0, &timeout) == -1) {
387: if (TASK_ROOT(t)->root_hooks.hook_exec.exception)
388: TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL);
389: else
390: LOGERR;
391: return (void*) -1;
392: }
393:
394: return NULL;
395: }
396:
397: /*
398: * sched_hook_signal() - Default SIGNAL hook
399: *
400: * @task = current task
401: * @arg = unused
402: * return: <0 errors and 0 ok
403: */
404: void *
405: sched_hook_signal(void *task, void *arg __unused)
406: {
407: sched_task_t *t = task;
408: struct kevent chg[1];
409: struct timespec timeout = { 0, 0 };
410:
411: if (!t || !TASK_ROOT(t))
412: return (void*) -1;
413:
414: #ifdef __NetBSD__
415: EV_SET(&chg[0], TASK_VAL(t), EVFILT_SIGNAL, EV_ADD, 0, 0, (intptr_t) TASK_VAL(t));
416: #else
417: EV_SET(&chg[0], TASK_VAL(t), EVFILT_SIGNAL, EV_ADD, 0, 0, (void*) TASK_VAL(t));
418: #endif
419: if (kevent(TASK_ROOT(t)->root_kq, chg, 1, NULL, 0, &timeout) == -1) {
420: if (TASK_ROOT(t)->root_hooks.hook_exec.exception)
421: TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL);
422: else
423: LOGERR;
424: return (void*) -1;
425: }
426:
427: return NULL;
428: }
429:
430: /*
431: * sched_hook_user() - Default USER hook
432: *
433: * @task = current task
434: * @arg = unused
435: * return: <0 errors and 0 ok
436: */
437: #ifdef EVFILT_USER
438: void *
439: sched_hook_user(void *task, void *arg __unused)
440: {
441: sched_task_t *t = task;
442: struct kevent chg[1];
443: struct timespec timeout = { 0, 0 };
444:
445: if (!t || !TASK_ROOT(t))
446: return (void*) -1;
447:
448: #ifdef __NetBSD__
449: EV_SET(&chg[0], TASK_VAL(t), EVFILT_USER, EV_ADD | EV_CLEAR, TASK_DATLEN(t),
450: 0, (intptr_t) TASK_VAL(t));
451: #else
452: EV_SET(&chg[0], TASK_VAL(t), EVFILT_USER, EV_ADD | EV_CLEAR, TASK_DATLEN(t),
453: 0, (void*) TASK_VAL(t));
454: #endif
455: if (kevent(TASK_ROOT(t)->root_kq, chg, 1, NULL, 0, &timeout) == -1) {
456: if (TASK_ROOT(t)->root_hooks.hook_exec.exception)
457: TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL);
458: else
459: LOGERR;
460: return (void*) -1;
461: }
462:
463: return NULL;
464: }
465: #endif
466:
467: /*
468: * sched_hook_fetch() - Default FETCH hook
469: *
470: * @root = root task
471: * @arg = unused
472: * return: NULL error or !=NULL fetched task
473: */
474: void *
475: sched_hook_fetch(void *root, void *arg __unused)
476: {
477: sched_root_task_t *r = root;
478: sched_task_t *task, *tmp;
479: struct timespec now, m, mtmp;
480: struct timespec *timeout;
481: struct kevent evt[1], res[KQ_EVENTS];
482: register int i, flg;
483: int en;
484: #ifdef AIO_SUPPORT
485: int len, fd;
486: struct aiocb *acb;
487: #ifdef EVFILT_LIO
488: int l;
489: register int j;
490: off_t off;
491: struct aiocb **acbs;
492: struct iovec *iv;
493: #endif /* EVFILT_LIO */
494: #endif /* AIO_SUPPORT */
495:
496: if (!r)
497: return NULL;
498:
499: /* get new task by queue priority */
500: while ((task = TAILQ_FIRST(&r->root_event))) {
501: #ifdef HAVE_LIBPTHREAD
502: pthread_mutex_lock(&r->root_mtx[taskEVENT]);
503: #endif
504: TAILQ_REMOVE(&r->root_event, task, task_node);
505: #ifdef HAVE_LIBPTHREAD
506: pthread_mutex_unlock(&r->root_mtx[taskEVENT]);
507: #endif
508: task->task_type = taskUNUSE;
509: #ifdef HAVE_LIBPTHREAD
510: pthread_mutex_lock(&r->root_mtx[taskUNUSE]);
511: #endif
512: TAILQ_INSERT_TAIL(&r->root_unuse, task, task_node);
513: #ifdef HAVE_LIBPTHREAD
514: pthread_mutex_unlock(&r->root_mtx[taskUNUSE]);
515: #endif
516: return task;
517: }
518: while ((task = TAILQ_FIRST(&r->root_ready))) {
519: #ifdef HAVE_LIBPTHREAD
520: pthread_mutex_lock(&r->root_mtx[taskREADY]);
521: #endif
522: TAILQ_REMOVE(&r->root_ready, task, task_node);
523: #ifdef HAVE_LIBPTHREAD
524: pthread_mutex_unlock(&r->root_mtx[taskREADY]);
525: #endif
526: task->task_type = taskUNUSE;
527: #ifdef HAVE_LIBPTHREAD
528: pthread_mutex_lock(&r->root_mtx[taskUNUSE]);
529: #endif
530: TAILQ_INSERT_TAIL(&r->root_unuse, task, task_node);
531: #ifdef HAVE_LIBPTHREAD
532: pthread_mutex_unlock(&r->root_mtx[taskUNUSE]);
533: #endif
534: return task;
535: }
536:
537: #ifdef TIMER_WITHOUT_SORT
538: clock_gettime(CLOCK_MONOTONIC, &now);
539:
540: sched_timespecclear(&r->root_wait);
541: TAILQ_FOREACH(task, &r->root_timer, task_node) {
542: if (!sched_timespecisset(&r->root_wait))
543: r->root_wait = TASK_TS(task);
544: else if (sched_timespeccmp(&TASK_TS(task), &r->root_wait, -) < 0)
545: r->root_wait = TASK_TS(task);
546: }
547:
548: if (TAILQ_FIRST(&r->root_timer)) {
549: m = r->root_wait;
550: sched_timespecsub(&m, &now, &mtmp);
551: r->root_wait = mtmp;
552: } else {
553: /* set wait INFTIM */
554: sched_timespecinf(&r->root_wait);
555: }
556: #else
557: if (!TAILQ_FIRST(&r->root_task) && (task = TAILQ_FIRST(&r->root_timer))) {
558: clock_gettime(CLOCK_MONOTONIC, &now);
559:
560: m = TASK_TS(task);
561: sched_timespecsub(&m, &now, &mtmp);
562: r->root_wait = mtmp;
563: } else {
564: /* set wait INFTIM */
565: sched_timespecinf(&r->root_wait);
566: }
567: #endif
568: /* if present member of task, set NOWAIT */
569: if (TAILQ_FIRST(&r->root_task))
570: sched_timespecclear(&r->root_wait);
571:
572: if (r->root_wait.tv_sec != -1 && r->root_wait.tv_nsec != -1)
573: timeout = &r->root_wait;
574: else if (sched_timespecisinf(&r->root_poll))
575: timeout = NULL;
576: else
577: timeout = &r->root_poll;
578: if ((en = kevent(r->root_kq, NULL, 0, res, KQ_EVENTS, timeout)) == -1) {
579: if (r->root_hooks.hook_exec.exception) {
580: if (r->root_hooks.hook_exec.exception(r, NULL))
581: return NULL;
582: } else if (errno != EINTR)
583: LOGERR;
584: return NULL;
585: }
586:
587: now.tv_sec = now.tv_nsec = 0;
588: /* Go and catch the cat into pipes ... */
589: for (i = 0; i < en; i++) {
590: memcpy(evt, &res[i], sizeof evt);
591: evt->flags = EV_DELETE;
592: /* Put read/write task to ready queue */
593: switch (res[i].filter) {
594: case EVFILT_READ:
595: flg = 0;
596: TAILQ_FOREACH_SAFE(task, &r->root_read, task_node, tmp) {
597: if (TASK_FD(task) != ((intptr_t) res[i].udata))
598: continue;
599: else
600: flg++;
601: /* remove read handle */
602: #ifdef HAVE_LIBPTHREAD
603: pthread_mutex_lock(&r->root_mtx[taskREAD]);
604: #endif
605: TAILQ_REMOVE(&r->root_read, task, task_node);
606: #ifdef HAVE_LIBPTHREAD
607: pthread_mutex_unlock(&r->root_mtx[taskREAD]);
608: #endif
609: if (r->root_hooks.hook_exec.exception && res[i].flags & EV_EOF) {
610: if (r->root_hooks.hook_exec.exception(r, (void*) EV_EOF)) {
611: task->task_type = taskUNUSE;
612: #ifdef HAVE_LIBPTHREAD
613: pthread_mutex_lock(&r->root_mtx[taskUNUSE]);
614: #endif
615: TAILQ_INSERT_TAIL(&r->root_unuse, task, task_node);
616: #ifdef HAVE_LIBPTHREAD
617: pthread_mutex_unlock(&r->root_mtx[taskUNUSE]);
618: #endif
619: } else {
620: task->task_type = taskREADY;
621: #ifdef HAVE_LIBPTHREAD
622: pthread_mutex_lock(&r->root_mtx[taskREADY]);
623: #endif
624: TAILQ_INSERT_TAIL(&r->root_ready, task, task_node);
625: #ifdef HAVE_LIBPTHREAD
626: pthread_mutex_unlock(&r->root_mtx[taskREADY]);
627: #endif
628: }
629: } else {
630: task->task_type = taskREADY;
631: #ifdef HAVE_LIBPTHREAD
632: pthread_mutex_lock(&r->root_mtx[taskREADY]);
633: #endif
634: TAILQ_INSERT_TAIL(&r->root_ready, task, task_node);
635: #ifdef HAVE_LIBPTHREAD
636: pthread_mutex_unlock(&r->root_mtx[taskREADY]);
637: #endif
638: }
639: }
640: /* if match at least 2, don't remove resouce of event */
641: if (flg > 1)
642: evt->flags ^= evt->flags;
643: break;
644: case EVFILT_WRITE:
645: flg = 0;
646: TAILQ_FOREACH_SAFE(task, &r->root_write, task_node, tmp) {
647: if (TASK_FD(task) != ((intptr_t) res[i].udata))
648: continue;
649: else
650: flg++;
651: /* remove write handle */
652: #ifdef HAVE_LIBPTHREAD
653: pthread_mutex_lock(&r->root_mtx[taskWRITE]);
654: #endif
655: TAILQ_REMOVE(&r->root_write, task, task_node);
656: #ifdef HAVE_LIBPTHREAD
657: pthread_mutex_unlock(&r->root_mtx[taskWRITE]);
658: #endif
659: if (r->root_hooks.hook_exec.exception && res[i].flags & EV_EOF) {
660: if (r->root_hooks.hook_exec.exception(r, (void*) EV_EOF)) {
661: task->task_type = taskUNUSE;
662: #ifdef HAVE_LIBPTHREAD
663: pthread_mutex_lock(&r->root_mtx[taskUNUSE]);
664: #endif
665: TAILQ_INSERT_TAIL(&r->root_unuse, task, task_node);
666: #ifdef HAVE_LIBPTHREAD
667: pthread_mutex_unlock(&r->root_mtx[taskUNUSE]);
668: #endif
669: } else {
670: task->task_type = taskREADY;
671: #ifdef HAVE_LIBPTHREAD
672: pthread_mutex_lock(&r->root_mtx[taskREADY]);
673: #endif
674: TAILQ_INSERT_TAIL(&r->root_ready, task, task_node);
675: #ifdef HAVE_LIBPTHREAD
676: pthread_mutex_unlock(&r->root_mtx[taskREADY]);
677: #endif
678: }
679: } else {
680: task->task_type = taskREADY;
681: #ifdef HAVE_LIBPTHREAD
682: pthread_mutex_lock(&r->root_mtx[taskREADY]);
683: #endif
684: TAILQ_INSERT_TAIL(&r->root_ready, task, task_node);
685: #ifdef HAVE_LIBPTHREAD
686: pthread_mutex_unlock(&r->root_mtx[taskREADY]);
687: #endif
688: }
689: }
690: /* if match at least 2, don't remove resouce of event */
691: if (flg > 1)
692: evt->flags ^= evt->flags;
693: break;
694: case EVFILT_TIMER:
695: flg = 0;
696: TAILQ_FOREACH_SAFE(task, &r->root_alarm, task_node, tmp) {
697: if ((uintptr_t) TASK_DATA(task) != ((uintptr_t) res[i].udata))
698: continue;
699: else
700: flg++;
701: /* remove alarm handle */
702: #ifdef HAVE_LIBPTHREAD
703: pthread_mutex_lock(&r->root_mtx[taskALARM]);
704: #endif
705: TAILQ_REMOVE(&r->root_alarm, task, task_node);
706: #ifdef HAVE_LIBPTHREAD
707: pthread_mutex_unlock(&r->root_mtx[taskALARM]);
708: #endif
709: task->task_type = taskREADY;
710: #ifdef HAVE_LIBPTHREAD
711: pthread_mutex_lock(&r->root_mtx[taskREADY]);
712: #endif
713: TAILQ_INSERT_TAIL(&r->root_ready, task, task_node);
714: #ifdef HAVE_LIBPTHREAD
715: pthread_mutex_unlock(&r->root_mtx[taskREADY]);
716: #endif
717: }
718: /* if match at least 2, don't remove resouce of event */
719: if (flg > 1)
720: evt->flags ^= evt->flags;
721: break;
722: case EVFILT_VNODE:
723: flg = 0;
724: TAILQ_FOREACH_SAFE(task, &r->root_node, task_node, tmp) {
725: if (TASK_FD(task) != ((intptr_t) res[i].udata))
726: continue;
727: else {
728: flg++;
729: TASK_DATA(task) = (void*) (uintptr_t) res[i].data;
730: TASK_DATLEN(task) = res[i].fflags;
731: }
732: /* remove node handle */
733: #ifdef HAVE_LIBPTHREAD
734: pthread_mutex_lock(&r->root_mtx[taskNODE]);
735: #endif
736: TAILQ_REMOVE(&r->root_node, task, task_node);
737: #ifdef HAVE_LIBPTHREAD
738: pthread_mutex_unlock(&r->root_mtx[taskNODE]);
739: #endif
740: task->task_type = taskREADY;
741: #ifdef HAVE_LIBPTHREAD
742: pthread_mutex_lock(&r->root_mtx[taskREADY]);
743: #endif
744: TAILQ_INSERT_TAIL(&r->root_ready, task, task_node);
745: #ifdef HAVE_LIBPTHREAD
746: pthread_mutex_unlock(&r->root_mtx[taskREADY]);
747: #endif
748: }
749: /* if match at least 2, don't remove resouce of event */
750: if (flg > 1)
751: evt->flags ^= evt->flags;
752: break;
753: case EVFILT_PROC:
754: flg = 0;
755: TAILQ_FOREACH_SAFE(task, &r->root_proc, task_node, tmp) {
756: if (TASK_VAL(task) != ((uintptr_t) res[i].udata))
757: continue;
758: else {
759: flg++;
760: TASK_DATA(task) = (void*) (uintptr_t) res[i].data;
761: TASK_DATLEN(task) = res[i].fflags;
762: }
763: /* remove proc handle */
764: #ifdef HAVE_LIBPTHREAD
765: pthread_mutex_lock(&r->root_mtx[taskPROC]);
766: #endif
767: TAILQ_REMOVE(&r->root_proc, task, task_node);
768: #ifdef HAVE_LIBPTHREAD
769: pthread_mutex_unlock(&r->root_mtx[taskPROC]);
770: #endif
771: task->task_type = taskREADY;
772: #ifdef HAVE_LIBPTHREAD
773: pthread_mutex_lock(&r->root_mtx[taskREADY]);
774: #endif
775: TAILQ_INSERT_TAIL(&r->root_ready, task, task_node);
776: #ifdef HAVE_LIBPTHREAD
777: pthread_mutex_unlock(&r->root_mtx[taskREADY]);
778: #endif
779: }
780: /* if match at least 2, don't remove resouce of event */
781: if (flg > 1)
782: evt->flags ^= evt->flags;
783: break;
784: case EVFILT_SIGNAL:
785: flg = 0;
786: TAILQ_FOREACH_SAFE(task, &r->root_signal, task_node, tmp) {
787: if (TASK_VAL(task) != ((uintptr_t) res[i].udata))
788: continue;
789: else
790: flg++;
791: /* remove signal handle */
792: #ifdef HAVE_LIBPTHREAD
793: pthread_mutex_lock(&r->root_mtx[taskSIGNAL]);
794: #endif
795: TAILQ_REMOVE(&r->root_signal, task, task_node);
796: #ifdef HAVE_LIBPTHREAD
797: pthread_mutex_unlock(&r->root_mtx[taskSIGNAL]);
798: #endif
799: task->task_type = taskREADY;
800: #ifdef HAVE_LIBPTHREAD
801: pthread_mutex_lock(&r->root_mtx[taskREADY]);
802: #endif
803: TAILQ_INSERT_TAIL(&r->root_ready, task, task_node);
804: #ifdef HAVE_LIBPTHREAD
805: pthread_mutex_unlock(&r->root_mtx[taskREADY]);
806: #endif
807: }
808: /* if match at least 2, don't remove resouce of event */
809: if (flg > 1)
810: evt->flags ^= evt->flags;
811: break;
812: #ifdef AIO_SUPPORT
813: case EVFILT_AIO:
814: flg = 0;
815: TAILQ_FOREACH_SAFE(task, &r->root_aio, task_node, tmp) {
816: acb = (struct aiocb*) TASK_VAL(task);
817: if (acb != ((struct aiocb*) res[i].udata))
818: continue;
819: else
820: flg++;
821: /* remove user handle */
822: #ifdef HAVE_LIBPTHREAD
823: pthread_mutex_lock(&r->root_mtx[taskAIO]);
824: #endif
825: TAILQ_REMOVE(&r->root_aio, task, task_node);
826: #ifdef HAVE_LIBPTHREAD
827: pthread_mutex_unlock(&r->root_mtx[taskAIO]);
828: #endif
829: task->task_type = taskREADY;
830: #ifdef HAVE_LIBPTHREAD
831: pthread_mutex_lock(&r->root_mtx[taskREADY]);
832: #endif
833: TAILQ_INSERT_TAIL(&r->root_ready, task, task_node);
834: #ifdef HAVE_LIBPTHREAD
835: pthread_mutex_unlock(&r->root_mtx[taskREADY]);
836: #endif
837: fd = acb->aio_fildes;
838: if ((len = aio_return(acb)) != -1) {
839: if (lseek(fd, acb->aio_offset + len, SEEK_CUR) == -1)
840: LOGERR;
841: } else
842: LOGERR;
843: free(acb);
844: TASK_DATLEN(task) = (u_long) len;
845: TASK_FD(task) = fd;
846: }
847: /* if match at least 2, don't remove resouce of event */
848: if (flg > 1)
849: evt->flags ^= evt->flags;
850: break;
851: #ifdef EVFILT_LIO
852: case EVFILT_LIO:
853: flg = 0;
854: TAILQ_FOREACH_SAFE(task, &r->root_lio, task_node, tmp) {
855: acbs = (struct aiocb**) TASK_VAL(task);
856: if (acbs != ((struct aiocb**) res[i].udata))
857: continue;
858: else
859: flg++;
860: /* remove user handle */
861: #ifdef HAVE_LIBPTHREAD
862: pthread_mutex_lock(&r->root_mtx[taskLIO]);
863: #endif
864: TAILQ_REMOVE(&r->root_lio, task, task_node);
865: #ifdef HAVE_LIBPTHREAD
866: pthread_mutex_unlock(&r->root_mtx[taskLIO]);
867: #endif
868: task->task_type = taskREADY;
869: #ifdef HAVE_LIBPTHREAD
870: pthread_mutex_lock(&r->root_mtx[taskREADY]);
871: #endif
872: TAILQ_INSERT_TAIL(&r->root_ready, task, task_node);
873: #ifdef HAVE_LIBPTHREAD
874: pthread_mutex_unlock(&r->root_mtx[taskREADY]);
875: #endif
876: iv = (struct iovec*) TASK_DATA(task);
877: fd = acbs[0]->aio_fildes;
878: off = acbs[0]->aio_offset;
879: for (j = len = 0; i < TASK_DATLEN(task); len += l, i++) {
880: if ((iv[i].iov_len = aio_return(acbs[i])) == -1)
881: l = 0;
882: else
883: l = iv[i].iov_len;
884: free(acbs[i]);
885: }
886: free(acbs);
887: TASK_DATLEN(task) = (u_long) len;
888: TASK_FD(task) = fd;
889:
890: if (lseek(fd, off + len, SEEK_CUR) == -1)
891: LOGERR;
892: }
893: /* if match at least 2, don't remove resouce of event */
894: if (flg > 1)
895: evt->flags ^= evt->flags;
896: break;
897: #endif /* EVFILT_LIO */
898: #endif /* AIO_SUPPORT */
899: #ifdef EVFILT_USER
900: case EVFILT_USER:
901: flg = 0;
902: TAILQ_FOREACH_SAFE(task, &r->root_user, task_node, tmp) {
903: if (TASK_VAL(task) != ((uintptr_t) res[i].udata))
904: continue;
905: else {
906: flg++;
907: TASK_DATA(task) = (void*) res[i].data;
908: TASK_DATLEN(task) = res[i].fflags;
909: }
910: /* remove user handle */
911: #ifdef HAVE_LIBPTHREAD
912: pthread_mutex_lock(&r->root_mtx[taskUSER]);
913: #endif
914: TAILQ_REMOVE(&r->root_user, task, task_node);
915: #ifdef HAVE_LIBPTHREAD
916: pthread_mutex_unlock(&r->root_mtx[taskUSER]);
917: #endif
918: task->task_type = taskREADY;
919: #ifdef HAVE_LIBPTHREAD
920: pthread_mutex_lock(&r->root_mtx[taskREADY]);
921: #endif
922: TAILQ_INSERT_TAIL(&r->root_ready, task, task_node);
923: #ifdef HAVE_LIBPTHREAD
924: pthread_mutex_unlock(&r->root_mtx[taskREADY]);
925: #endif
926: }
927: /* if match at least 2, don't remove resouce of event */
928: if (flg > 1)
929: evt->flags ^= evt->flags;
930: break;
931: #endif /* EVFILT_USER */
932: }
933: if (kevent(r->root_kq, evt, 1, NULL, 0, &now) == -1) {
934: if (r->root_hooks.hook_exec.exception) {
935: if (r->root_hooks.hook_exec.exception(r, NULL))
936: return NULL;
937: } else
938: LOGERR;
939: }
940: }
941:
942: /* timer update & put in ready queue */
943: clock_gettime(CLOCK_MONOTONIC, &now);
944:
945: TAILQ_FOREACH_SAFE(task, &r->root_timer, task_node, tmp)
946: if (sched_timespeccmp(&now, &TASK_TS(task), -) >= 0) {
947: #ifdef HAVE_LIBPTHREAD
948: pthread_mutex_lock(&r->root_mtx[taskTIMER]);
949: #endif
950: TAILQ_REMOVE(&r->root_timer, task, task_node);
951: #ifdef HAVE_LIBPTHREAD
952: pthread_mutex_unlock(&r->root_mtx[taskTIMER]);
953: #endif
954: task->task_type = taskREADY;
955: #ifdef HAVE_LIBPTHREAD
956: pthread_mutex_lock(&r->root_mtx[taskREADY]);
957: #endif
958: TAILQ_INSERT_TAIL(&r->root_ready, task, task_node);
959: #ifdef HAVE_LIBPTHREAD
960: pthread_mutex_unlock(&r->root_mtx[taskREADY]);
961: #endif
962: }
963:
964: /* put regular task priority task to ready queue,
965: if there is no ready task or reach max missing hit for regular task */
966: if ((task = TAILQ_FIRST(&r->root_task))) {
967: if (!TAILQ_FIRST(&r->root_ready) || r->root_miss >= TASK_VAL(task)) {
968: r->root_miss ^= r->root_miss;
969:
970: #ifdef HAVE_LIBPTHREAD
971: pthread_mutex_lock(&r->root_mtx[taskTASK]);
972: #endif
973: TAILQ_REMOVE(&r->root_task, task, task_node);
974: #ifdef HAVE_LIBPTHREAD
975: pthread_mutex_unlock(&r->root_mtx[taskTASK]);
976: #endif
977: task->task_type = taskREADY;
978: #ifdef HAVE_LIBPTHREAD
979: pthread_mutex_lock(&r->root_mtx[taskREADY]);
980: #endif
981: TAILQ_INSERT_TAIL(&r->root_ready, task, task_node);
982: #ifdef HAVE_LIBPTHREAD
983: pthread_mutex_unlock(&r->root_mtx[taskREADY]);
984: #endif
985: } else
986: r->root_miss++;
987: } else
988: r->root_miss ^= r->root_miss;
989:
990: /* OK, lets get ready task !!! */
991: task = TAILQ_FIRST(&r->root_ready);
992: if (!(task))
993: return NULL;
994:
995: #ifdef HAVE_LIBPTHREAD
996: pthread_mutex_lock(&r->root_mtx[taskREADY]);
997: #endif
998: TAILQ_REMOVE(&r->root_ready, task, task_node);
999: #ifdef HAVE_LIBPTHREAD
1000: pthread_mutex_unlock(&r->root_mtx[taskREADY]);
1001: #endif
1002: task->task_type = taskUNUSE;
1003: #ifdef HAVE_LIBPTHREAD
1004: pthread_mutex_lock(&r->root_mtx[taskUNUSE]);
1005: #endif
1006: TAILQ_INSERT_TAIL(&r->root_unuse, task, task_node);
1007: #ifdef HAVE_LIBPTHREAD
1008: pthread_mutex_unlock(&r->root_mtx[taskUNUSE]);
1009: #endif
1010: return task;
1011: }
1012:
1013: /*
1014: * sched_hook_exception() - Default EXCEPTION hook
1015: *
1016: * @root = root task
1017: * @arg = custom handling: if arg == EV_EOF or other value; default: arg == NULL log errno
1018: * return: <0 errors and 0 ok
1019: */
1020: void *
1021: sched_hook_exception(void *root, void *arg)
1022: {
1023: sched_root_task_t *r = root;
1024:
1025: if (!r)
1026: return NULL;
1027:
1028: /* custom exception handling ... */
1029: if (arg) {
1030: if (arg == (void*) EV_EOF)
1031: return NULL;
1032: return (void*) -1; /* raise scheduler error!!! */
1033: }
1034:
1035: /* if error hook exists */
1036: if (r->root_hooks.hook_root.error)
1037: return (r->root_hooks.hook_root.error(root, (void*) ((intptr_t) errno)));
1038:
1039: /* default case! */
1040: LOGERR;
1041: return NULL;
1042: }
1043:
1044: /*
1045: * sched_hook_condition() - Default CONDITION hook
1046: *
1047: * @root = root task
1048: * @arg = killState from schedRun()
1049: * return: NULL kill scheduler loop or !=NULL ok
1050: */
1051: void *
1052: sched_hook_condition(void *root, void *arg)
1053: {
1054: sched_root_task_t *r = root;
1055:
1056: if (!r)
1057: return NULL;
1058:
1059: return (void*) (r->root_cond - *(intptr_t*) arg);
1060: }
FreeBSD-CVSweb <freebsd-cvsweb@FreeBSD.org>