1: /*************************************************************************
2: * (C) 2011 AITNET ltd - Sofia/Bulgaria - <misho@aitbg.com>
3: * by Michael Pounov <misho@openbsd-bg.org>
4: *
5: * $Author: misho $
6: * $Id: hooks.c,v 1.4 2012/01/24 21:59:47 misho Exp $
7: *
8: **************************************************************************
9: The ELWIX and AITNET software is distributed under the following
10: terms:
11:
12: All of the documentation and software included in the ELWIX and AITNET
13: Releases is copyrighted by ELWIX - Sofia/Bulgaria <info@elwix.org>
14:
15: Copyright 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011
16: by Michael Pounov <misho@elwix.org>. All rights reserved.
17:
18: Redistribution and use in source and binary forms, with or without
19: modification, are permitted provided that the following conditions
20: are met:
21: 1. Redistributions of source code must retain the above copyright
22: notice, this list of conditions and the following disclaimer.
23: 2. Redistributions in binary form must reproduce the above copyright
24: notice, this list of conditions and the following disclaimer in the
25: documentation and/or other materials provided with the distribution.
26: 3. All advertising materials mentioning features or use of this software
27: must display the following acknowledgement:
28: This product includes software developed by Michael Pounov <misho@elwix.org>
29: ELWIX - Embedded LightWeight unIX and its contributors.
30: 4. Neither the name of AITNET nor the names of its contributors
31: may be used to endorse or promote products derived from this software
32: without specific prior written permission.
33:
34: THIS SOFTWARE IS PROVIDED BY AITNET AND CONTRIBUTORS ``AS IS'' AND
35: ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
36: IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
37: ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
38: FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
39: DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
40: OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
41: HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
42: LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
43: OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
44: SUCH DAMAGE.
45: */
46: #include "global.h"
47: #include "hooks.h"
48:
49:
50: /*
51: * sched_hook_init() - Default INIT hook
52: * @root = root task
53: * @data = optional data if !=NULL
54: * return: <0 errors and 0 ok
55: */
56: void *
57: sched_hook_init(void *root, void *data)
58: {
59: sched_root_task_t *r = root;
60:
61: if (!r || r->root_data.iov_base || r->root_data.iov_len)
62: return (void*) -1;
63:
64: r->root_data.iov_base = malloc(sizeof(struct sched_IO));
65: if (!r->root_data.iov_base) {
66: LOGERR;
67: return (void*) -1;
68: } else {
69: r->root_data.iov_len = sizeof(struct sched_IO);
70: memset(r->root_data.iov_base, 0, r->root_data.iov_len);
71: }
72:
73: r->root_kq = kqueue();
74: if (r->root_kq == -1) {
75: LOGERR;
76: return (void*) -1;
77: }
78:
79: return NULL;
80: }
81:
82: /*
83: * sched_hook_fini() - Default FINI hook
84: * @root = root task
85: * @arg = unused
86: * return: <0 errors and 0 ok
87: */
88: void *
89: sched_hook_fini(void *root, void *arg __unused)
90: {
91: sched_root_task_t *r = root;
92:
93: if (!r)
94: return (void*) -1;
95:
96: if (r->root_kq > 2) {
97: close(r->root_kq);
98: r->root_kq = 0;
99: }
100:
101: if (r->root_data.iov_base && r->root_data.iov_len) {
102: free(r->root_data.iov_base);
103: r->root_data.iov_base = NULL;
104: r->root_data.iov_len = 0;
105: }
106:
107: return NULL;
108: }
109:
110: /*
111: * sched_hook_cancel() - Default CANCEL hook
112: * @task = current task
113: * @arg = unused
114: * return: <0 errors and 0 ok
115: */
116: void *
117: sched_hook_cancel(void *task, void *arg __unused)
118: {
119: struct sched_IO *io;
120: sched_task_t *t = task;
121: struct kevent chg[1];
122: struct timespec timeout = { 0, 0 };
123:
124: if (!t || !TASK_ROOT(t) || !ROOT_DATA(t->task_root) || !ROOT_DATLEN(t->task_root))
125: return (void*) -1;
126: else
127: io = ROOT_DATA(t->task_root);
128:
129: switch (TASK_TYPE(t)) {
130: case taskREAD:
131: #ifdef __NetBSD__
132: EV_SET(&chg[0], TASK_FD(t), EVFILT_READ, EV_DELETE, 0, 0, (intptr_t) TASK_FD(t));
133: #else
134: EV_SET(&chg[0], TASK_FD(t), EVFILT_READ, EV_DELETE, 0, 0, (void*) TASK_FD(t));
135: #endif
136: kevent(TASK_ROOT(t)->root_kq, chg, 1, NULL, 0, &timeout);
137: FD_CLR(TASK_FD(t), &io->rfd);
138: break;
139: case taskWRITE:
140: #ifdef __NetBSD__
141: EV_SET(&chg[0], TASK_FD(t), EVFILT_WRITE, EV_DELETE, 0, 0, (intptr_t) TASK_FD(t));
142: #else
143: EV_SET(&chg[0], TASK_FD(t), EVFILT_WRITE, EV_DELETE, 0, 0, (void*) TASK_FD(t));
144: #endif
145: kevent(TASK_ROOT(t)->root_kq, chg, 1, NULL, 0, &timeout);
146: FD_CLR(TASK_FD(t), &io->wfd);
147: break;
148: default:
149: break;
150: }
151:
152: return NULL;
153: }
154:
155: /*
156: * sched_hook_read() - Default READ hook
157: * @task = current task
158: * @arg = unused
159: * return: <0 errors and 0 ok
160: */
161: void *
162: sched_hook_read(void *task, void *arg __unused)
163: {
164: struct sched_IO *io;
165: sched_task_t *t = task;
166: struct kevent chg[1];
167: struct timespec timeout = { 0, 0 };
168:
169: if (!t || !TASK_ROOT(t) || !ROOT_DATA(t->task_root) || !ROOT_DATLEN(t->task_root))
170: return (void*) -1;
171: else
172: io = ROOT_DATA(t->task_root);
173:
174: if (FD_ISSET(TASK_FD(t), &io->rfd))
175: return NULL;
176: else
177: FD_SET(TASK_FD(t), &io->rfd);
178:
179: #ifdef __NetBSD__
180: EV_SET(&chg[0], TASK_FD(t), EVFILT_READ, EV_ADD, 0, 0, (intptr_t) TASK_FD(t));
181: #else
182: EV_SET(&chg[0], TASK_FD(t), EVFILT_READ, EV_ADD, 0, 0, (void*) TASK_FD(t));
183: #endif
184: if (kevent(TASK_ROOT(t)->root_kq, chg, 1, NULL, 0, &timeout) == -1) {
185: if (TASK_ROOT(t)->root_hooks.hook_exec.exception)
186: TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL);
187: else
188: LOGERR;
189: return (void*) -1;
190: }
191:
192: return NULL;
193: }
194:
195: /*
196: * sched_hook_write() - Default WRITE hook
197: * @task = current task
198: * @arg = unused
199: * return: <0 errors and 0 ok
200: */
201: void *
202: sched_hook_write(void *task, void *arg __unused)
203: {
204: struct sched_IO *io;
205: sched_task_t *t = task;
206: struct kevent chg[1];
207: struct timespec timeout = { 0, 0 };
208:
209: if (!t || !TASK_ROOT(t) || !ROOT_DATA(t->task_root) || !ROOT_DATLEN(t->task_root))
210: return (void*) -1;
211: else
212: io = ROOT_DATA(t->task_root);
213:
214: if (FD_ISSET(TASK_FD(t), &io->wfd))
215: return NULL;
216: else
217: FD_SET(TASK_FD(t), &io->wfd);
218:
219: #ifdef __NetBSD__
220: EV_SET(&chg[0], TASK_FD(t), EVFILT_WRITE, EV_ADD, 0, 0, (intptr_t) TASK_FD(t));
221: #else
222: EV_SET(&chg[0], TASK_FD(t), EVFILT_WRITE, EV_ADD, 0, 0, (void*) TASK_FD(t));
223: #endif
224: if (kevent(TASK_ROOT(t)->root_kq, chg, 1, NULL, 0, &timeout) == -1) {
225: if (TASK_ROOT(t)->root_hooks.hook_exec.exception)
226: TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL);
227: else
228: LOGERR;
229: return (void*) -1;
230: }
231:
232: return NULL;
233: }
234:
235: /*
236: * sched_hook_fetch() - Default FETCH hook
237: * @root = root task
238: * @arg = unused
239: * return: NULL error or !=NULL fetched task
240: */
241: void *
242: sched_hook_fetch(void *root, void *arg __unused)
243: {
244: struct sched_IO *io;
245: sched_root_task_t *r = root;
246: sched_task_t *task;
247: struct timespec now, m, mtmp;
248: struct timespec *timeout;
249: struct kevent evt[1], res[KQ_EVENTS];
250: register int i;
251: int en;
252:
253: if (!r || !ROOT_DATA(r) || !ROOT_DATLEN(r))
254: return NULL;
255:
256: /* get new task by queue priority */
257: retry:
258: while ((task = TAILQ_FIRST(&r->root_event))) {
259: #ifdef HAVE_LIBPTHREAD
260: pthread_mutex_lock(&r->root_mtx[taskEVENT]);
261: #endif
262: TAILQ_REMOVE(&r->root_event, task, task_node);
263: #ifdef HAVE_LIBPTHREAD
264: pthread_mutex_unlock(&r->root_mtx[taskEVENT]);
265: #endif
266: task->task_type = taskUNUSE;
267: #ifdef HAVE_LIBPTHREAD
268: pthread_mutex_lock(&r->root_mtx[taskUNUSE]);
269: #endif
270: TAILQ_INSERT_TAIL(&r->root_unuse, task, task_node);
271: #ifdef HAVE_LIBPTHREAD
272: pthread_mutex_unlock(&r->root_mtx[taskUNUSE]);
273: #endif
274: return task;
275: }
276: while ((task = TAILQ_FIRST(&r->root_ready))) {
277: #ifdef HAVE_LIBPTHREAD
278: pthread_mutex_lock(&r->root_mtx[taskREADY]);
279: #endif
280: TAILQ_REMOVE(&r->root_ready, task, task_node);
281: #ifdef HAVE_LIBPTHREAD
282: pthread_mutex_unlock(&r->root_mtx[taskREADY]);
283: #endif
284: task->task_type = taskUNUSE;
285: #ifdef HAVE_LIBPTHREAD
286: pthread_mutex_lock(&r->root_mtx[taskUNUSE]);
287: #endif
288: TAILQ_INSERT_TAIL(&r->root_unuse, task, task_node);
289: #ifdef HAVE_LIBPTHREAD
290: pthread_mutex_unlock(&r->root_mtx[taskUNUSE]);
291: #endif
292: return task;
293: }
294:
295: #ifdef TIMER_WITHOUT_SORT
296: clock_gettime(CLOCK_MONOTONIC, &now);
297:
298: sched_timespecclear(&r->root_wait);
299: TAILQ_FOREACH(task, &r->root_timer, task_node) {
300: if (!sched_timespecisset(&r->root_wait))
301: r->root_wait = TASK_TS(task);
302: else if (sched_timespeccmp(&TASK_TS(task), &r->root_wait, -) < 0)
303: r->root_wait = TASK_TS(task);
304: }
305:
306: if (TAILQ_FIRST(&r->root_timer)) {
307: m = r->root_wait;
308: sched_timespecsub(&m, &now, &mtmp);
309: r->root_wait = mtmp;
310: } else {
311: /* set wait INFTIM */
312: sched_timespecinf(&r->root_wait);
313: }
314: #else
315: if (!TAILQ_FIRST(&r->root_eventlo) && (task = TAILQ_FIRST(&r->root_timer))) {
316: clock_gettime(CLOCK_MONOTONIC, &now);
317:
318: m = TASK_TS(task);
319: sched_timespecsub(&m, &now, &mtmp);
320: r->root_wait = mtmp;
321: } else {
322: /* set wait INFTIM */
323: sched_timespecinf(&r->root_wait);
324: }
325: #endif
326: /* if present member of eventLo, set NOWAIT */
327: if (TAILQ_FIRST(&r->root_eventlo))
328: sched_timespecclear(&r->root_wait);
329:
330: if (r->root_wait.tv_sec != -1 && r->root_wait.tv_nsec != -1)
331: timeout = &r->root_wait;
332: else if (sched_timespecisinf(&r->root_poll))
333: timeout = NULL;
334: else
335: timeout = &r->root_poll;
336: if ((en = kevent(r->root_kq, NULL, 0, res, KQ_EVENTS, timeout)) == -1) {
337: if (r->root_hooks.hook_exec.exception) {
338: if (r->root_hooks.hook_exec.exception(r, NULL))
339: return NULL;
340: } else
341: LOGERR;
342: #ifdef NDEBUG
343: /* kevent no exit by error, if non-debug version */
344: goto retry;
345: #else
346: /* diagnostic exit from scheduler if kevent error occur */
347: return NULL;
348: #endif
349: }
350:
351: now.tv_sec = now.tv_nsec = 0;
352: /* Go and catch the cat into pipes ... */
353: for (i = 0; i < en; i++) {
354: memcpy(evt, &res[i], sizeof evt);
355: evt->flags = EV_DELETE;
356: /* Put read/write task to ready queue */
357: switch (res[i].filter) {
358: case EVFILT_READ:
359: TAILQ_FOREACH(task, &r->root_read, task_node) {
360: if (TASK_FD(task) != ((intptr_t) res[i].udata))
361: continue;
362: /* remove read handle */
363: io = ROOT_DATA(task->task_root);
364: FD_CLR(TASK_FD(task), &io->rfd);
365:
366: #ifdef HAVE_LIBPTHREAD
367: pthread_mutex_lock(&r->root_mtx[taskREAD]);
368: #endif
369: TAILQ_REMOVE(&r->root_read, task, task_node);
370: #ifdef HAVE_LIBPTHREAD
371: pthread_mutex_unlock(&r->root_mtx[taskREAD]);
372: #endif
373: if (r->root_hooks.hook_exec.exception && res[i].flags & EV_EOF) {
374: if (r->root_hooks.hook_exec.exception(r, (void*) EV_EOF)) {
375: task->task_type = taskUNUSE;
376: #ifdef HAVE_LIBPTHREAD
377: pthread_mutex_lock(&r->root_mtx[taskUNUSE]);
378: #endif
379: TAILQ_INSERT_TAIL(&r->root_unuse, task, task_node);
380: #ifdef HAVE_LIBPTHREAD
381: pthread_mutex_unlock(&r->root_mtx[taskUNUSE]);
382: #endif
383: } else {
384: task->task_type = taskREADY;
385: #ifdef HAVE_LIBPTHREAD
386: pthread_mutex_lock(&r->root_mtx[taskREADY]);
387: #endif
388: TAILQ_INSERT_TAIL(&r->root_ready, task, task_node);
389: #ifdef HAVE_LIBPTHREAD
390: pthread_mutex_unlock(&r->root_mtx[taskREADY]);
391: #endif
392: }
393: } else {
394: task->task_type = taskREADY;
395: #ifdef HAVE_LIBPTHREAD
396: pthread_mutex_lock(&r->root_mtx[taskREADY]);
397: #endif
398: TAILQ_INSERT_TAIL(&r->root_ready, task, task_node);
399: #ifdef HAVE_LIBPTHREAD
400: pthread_mutex_unlock(&r->root_mtx[taskREADY]);
401: #endif
402: }
403: break;
404: }
405: break;
406: case EVFILT_WRITE:
407: TAILQ_FOREACH(task, &r->root_write, task_node) {
408: if (TASK_FD(task) != ((intptr_t) res[i].udata))
409: continue;
410: /* remove write handle */
411: io = ROOT_DATA(task->task_root);
412: FD_CLR(TASK_FD(task), &io->wfd);
413:
414: #ifdef HAVE_LIBPTHREAD
415: pthread_mutex_lock(&r->root_mtx[taskWRITE]);
416: #endif
417: TAILQ_REMOVE(&r->root_write, task, task_node);
418: #ifdef HAVE_LIBPTHREAD
419: pthread_mutex_unlock(&r->root_mtx[taskWRITE]);
420: #endif
421: if (r->root_hooks.hook_exec.exception && res[i].flags & EV_EOF) {
422: if (r->root_hooks.hook_exec.exception(r, (void*) EV_EOF)) {
423: task->task_type = taskUNUSE;
424: #ifdef HAVE_LIBPTHREAD
425: pthread_mutex_lock(&r->root_mtx[taskUNUSE]);
426: #endif
427: TAILQ_INSERT_TAIL(&r->root_unuse, task, task_node);
428: #ifdef HAVE_LIBPTHREAD
429: pthread_mutex_unlock(&r->root_mtx[taskUNUSE]);
430: #endif
431: } else {
432: task->task_type = taskREADY;
433: #ifdef HAVE_LIBPTHREAD
434: pthread_mutex_lock(&r->root_mtx[taskREADY]);
435: #endif
436: TAILQ_INSERT_TAIL(&r->root_ready, task, task_node);
437: #ifdef HAVE_LIBPTHREAD
438: pthread_mutex_unlock(&r->root_mtx[taskREADY]);
439: #endif
440: }
441: } else {
442: task->task_type = taskREADY;
443: #ifdef HAVE_LIBPTHREAD
444: pthread_mutex_lock(&r->root_mtx[taskREADY]);
445: #endif
446: TAILQ_INSERT_TAIL(&r->root_ready, task, task_node);
447: #ifdef HAVE_LIBPTHREAD
448: pthread_mutex_unlock(&r->root_mtx[taskREADY]);
449: #endif
450: }
451: break;
452: }
453: break;
454: }
455: if (kevent(r->root_kq, evt, 1, NULL, 0, &now) == -1) {
456: if (r->root_hooks.hook_exec.exception) {
457: if (r->root_hooks.hook_exec.exception(r, NULL))
458: return NULL;
459: } else
460: LOGERR;
461: }
462: }
463:
464: /* timer update & put in ready queue */
465: clock_gettime(CLOCK_MONOTONIC, &now);
466:
467: TAILQ_FOREACH(task, &r->root_timer, task_node)
468: if (sched_timespeccmp(&now, &TASK_TS(task), -) >= 0) {
469: #ifdef HAVE_LIBPTHREAD
470: pthread_mutex_lock(&r->root_mtx[taskTIMER]);
471: #endif
472: TAILQ_REMOVE(&r->root_timer, task, task_node);
473: #ifdef HAVE_LIBPTHREAD
474: pthread_mutex_unlock(&r->root_mtx[taskTIMER]);
475: #endif
476: task->task_type = taskREADY;
477: #ifdef HAVE_LIBPTHREAD
478: pthread_mutex_lock(&r->root_mtx[taskREADY]);
479: #endif
480: TAILQ_INSERT_TAIL(&r->root_ready, task, task_node);
481: #ifdef HAVE_LIBPTHREAD
482: pthread_mutex_unlock(&r->root_mtx[taskREADY]);
483: #endif
484: }
485:
486: /* put eventlo priority task to ready queue, if there is no ready task or
487: reach max missed fetch-rotate */
488: if ((task = TAILQ_FIRST(&r->root_eventlo))) {
489: if (!TAILQ_FIRST(&r->root_ready) || r->root_eventlo_miss > MAX_EVENTLO_MISS) {
490: r->root_eventlo_miss = 0;
491:
492: #ifdef HAVE_LIBPTHREAD
493: pthread_mutex_lock(&r->root_mtx[taskEVENTLO]);
494: #endif
495: TAILQ_REMOVE(&r->root_eventlo, task, task_node);
496: #ifdef HAVE_LIBPTHREAD
497: pthread_mutex_unlock(&r->root_mtx[taskEVENTLO]);
498: #endif
499: task->task_type = taskREADY;
500: #ifdef HAVE_LIBPTHREAD
501: pthread_mutex_lock(&r->root_mtx[taskREADY]);
502: #endif
503: TAILQ_INSERT_TAIL(&r->root_ready, task, task_node);
504: #ifdef HAVE_LIBPTHREAD
505: pthread_mutex_unlock(&r->root_mtx[taskREADY]);
506: #endif
507: } else
508: r->root_eventlo_miss++;
509: } else
510: r->root_eventlo_miss = 0;
511:
512: /* OK, lets get ready task !!! */
513: if (!(task = TAILQ_FIRST(&r->root_ready)))
514: goto retry;
515: #ifdef HAVE_LIBPTHREAD
516: pthread_mutex_lock(&r->root_mtx[taskREADY]);
517: #endif
518: TAILQ_REMOVE(&r->root_ready, task, task_node);
519: #ifdef HAVE_LIBPTHREAD
520: pthread_mutex_unlock(&r->root_mtx[taskREADY]);
521: #endif
522: task->task_type = taskUNUSE;
523: #ifdef HAVE_LIBPTHREAD
524: pthread_mutex_lock(&r->root_mtx[taskUNUSE]);
525: #endif
526: TAILQ_INSERT_TAIL(&r->root_unuse, task, task_node);
527: #ifdef HAVE_LIBPTHREAD
528: pthread_mutex_unlock(&r->root_mtx[taskUNUSE]);
529: #endif
530: return task;
531: }
532:
533: /*
534: * sched_hook_exception() - Default EXCEPTION hook
535: * @root = root task
536: * @arg = custom handling: if arg == EV_EOF or other value; default: arg == NULL log errno
537: * return: <0 errors and 0 ok
538: */
539: void *
540: sched_hook_exception(void *root, void *arg)
541: {
542: sched_root_task_t *r = root;
543:
544: if (!r || !ROOT_DATA(r) || !ROOT_DATLEN(r))
545: return NULL;
546:
547: /* custom exception handling ... */
548: if (arg) {
549: if (arg == (void*) EV_EOF)
550: return NULL;
551: return (void*) -1; /* raise scheduler error!!! */
552: }
553:
554: /* if error hook exists */
555: if (r->root_hooks.hook_root.error)
556: return (r->root_hooks.hook_root.error(root, (void*) ((intptr_t) errno)));
557:
558: /* default case! */
559: LOGERR;
560: return NULL;
561: }
FreeBSD-CVSweb <freebsd-cvsweb@FreeBSD.org>