1: /*************************************************************************
2: * (C) 2011 AITNET ltd - Sofia/Bulgaria - <misho@aitbg.com>
3: * by Michael Pounov <misho@openbsd-bg.org>
4: *
5: * $Author: misho $
6: * $Id: hooks.c,v 1.5.2.1 2012/04/24 13:29:28 misho Exp $
7: *
8: **************************************************************************
9: The ELWIX and AITNET software is distributed under the following
10: terms:
11:
12: All of the documentation and software included in the ELWIX and AITNET
13: Releases is copyrighted by ELWIX - Sofia/Bulgaria <info@elwix.org>
14:
15: Copyright 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012
16: by Michael Pounov <misho@elwix.org>. All rights reserved.
17:
18: Redistribution and use in source and binary forms, with or without
19: modification, are permitted provided that the following conditions
20: are met:
21: 1. Redistributions of source code must retain the above copyright
22: notice, this list of conditions and the following disclaimer.
23: 2. Redistributions in binary form must reproduce the above copyright
24: notice, this list of conditions and the following disclaimer in the
25: documentation and/or other materials provided with the distribution.
26: 3. All advertising materials mentioning features or use of this software
27: must display the following acknowledgement:
28: This product includes software developed by Michael Pounov <misho@elwix.org>
29: ELWIX - Embedded LightWeight unIX and its contributors.
30: 4. Neither the name of AITNET nor the names of its contributors
31: may be used to endorse or promote products derived from this software
32: without specific prior written permission.
33:
34: THIS SOFTWARE IS PROVIDED BY AITNET AND CONTRIBUTORS ``AS IS'' AND
35: ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
36: IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
37: ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
38: FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
39: DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
40: OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
41: HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
42: LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
43: OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
44: SUCH DAMAGE.
45: */
46: #include "global.h"
47: #include "hooks.h"
48:
49:
50: /*
51: * sched_hook_init() - Default INIT hook
52: *
53: * @root = root task
54: * @data = optional data if !=NULL
55: * return: <0 errors and 0 ok
56: */
57: void *
58: sched_hook_init(void *root, void *data)
59: {
60: sched_root_task_t *r = root;
61:
62: if (!r || r->root_data.iov_base || r->root_data.iov_len)
63: return (void*) -1;
64:
65: r->root_data.iov_base = malloc(sizeof(struct sched_IO));
66: if (!r->root_data.iov_base) {
67: LOGERR;
68: return (void*) -1;
69: } else {
70: r->root_data.iov_len = sizeof(struct sched_IO);
71: memset(r->root_data.iov_base, 0, r->root_data.iov_len);
72: }
73:
74: r->root_kq = kqueue();
75: if (r->root_kq == -1) {
76: LOGERR;
77: return (void*) -1;
78: }
79:
80: return NULL;
81: }
82:
83: /*
84: * sched_hook_fini() - Default FINI hook
85: *
86: * @root = root task
87: * @arg = unused
88: * return: <0 errors and 0 ok
89: */
90: void *
91: sched_hook_fini(void *root, void *arg __unused)
92: {
93: sched_root_task_t *r = root;
94:
95: if (!r)
96: return (void*) -1;
97:
98: if (r->root_kq > 2) {
99: close(r->root_kq);
100: r->root_kq = 0;
101: }
102:
103: if (r->root_data.iov_base && r->root_data.iov_len) {
104: free(r->root_data.iov_base);
105: r->root_data.iov_base = NULL;
106: r->root_data.iov_len = 0;
107: }
108:
109: return NULL;
110: }
111:
112: /*
113: * sched_hook_cancel() - Default CANCEL hook
114: *
115: * @task = current task
116: * @arg = unused
117: * return: <0 errors and 0 ok
118: */
119: void *
120: sched_hook_cancel(void *task, void *arg __unused)
121: {
122: struct sched_IO *io;
123: sched_task_t *t = task;
124: struct kevent chg[1];
125: struct timespec timeout = { 0, 0 };
126:
127: if (!t || !TASK_ROOT(t) || !ROOT_DATA(t->task_root) || !ROOT_DATLEN(t->task_root))
128: return (void*) -1;
129: else
130: io = ROOT_DATA(t->task_root);
131:
132: switch (TASK_TYPE(t)) {
133: case taskREAD:
134: #ifdef __NetBSD__
135: EV_SET(&chg[0], TASK_FD(t), EVFILT_READ, EV_DELETE, 0, 0, (intptr_t) TASK_FD(t));
136: #else
137: EV_SET(&chg[0], TASK_FD(t), EVFILT_READ, EV_DELETE, 0, 0, (void*) TASK_FD(t));
138: #endif
139: kevent(TASK_ROOT(t)->root_kq, chg, 1, NULL, 0, &timeout);
140: FD_CLR(TASK_FD(t), &io->rfd);
141: break;
142: case taskWRITE:
143: #ifdef __NetBSD__
144: EV_SET(&chg[0], TASK_FD(t), EVFILT_WRITE, EV_DELETE, 0, 0, (intptr_t) TASK_FD(t));
145: #else
146: EV_SET(&chg[0], TASK_FD(t), EVFILT_WRITE, EV_DELETE, 0, 0, (void*) TASK_FD(t));
147: #endif
148: kevent(TASK_ROOT(t)->root_kq, chg, 1, NULL, 0, &timeout);
149: FD_CLR(TASK_FD(t), &io->wfd);
150: break;
151: default:
152: break;
153: }
154:
155: return NULL;
156: }
157:
158: /*
159: * sched_hook_read() - Default READ hook
160: *
161: * @task = current task
162: * @arg = unused
163: * return: <0 errors and 0 ok
164: */
165: void *
166: sched_hook_read(void *task, void *arg __unused)
167: {
168: struct sched_IO *io;
169: sched_task_t *t = task;
170: struct kevent chg[1];
171: struct timespec timeout = { 0, 0 };
172:
173: if (!t || !TASK_ROOT(t) || !ROOT_DATA(t->task_root) || !ROOT_DATLEN(t->task_root))
174: return (void*) -1;
175: else
176: io = ROOT_DATA(t->task_root);
177:
178: if (FD_ISSET(TASK_FD(t), &io->rfd))
179: return NULL;
180: else
181: FD_SET(TASK_FD(t), &io->rfd);
182:
183: #ifdef __NetBSD__
184: EV_SET(&chg[0], TASK_FD(t), EVFILT_READ, EV_ADD, 0, 0, (intptr_t) TASK_FD(t));
185: #else
186: EV_SET(&chg[0], TASK_FD(t), EVFILT_READ, EV_ADD, 0, 0, (void*) TASK_FD(t));
187: #endif
188: if (kevent(TASK_ROOT(t)->root_kq, chg, 1, NULL, 0, &timeout) == -1) {
189: if (TASK_ROOT(t)->root_hooks.hook_exec.exception)
190: TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL);
191: else
192: LOGERR;
193: return (void*) -1;
194: }
195:
196: return NULL;
197: }
198:
199: /*
200: * sched_hook_write() - Default WRITE hook
201: *
202: * @task = current task
203: * @arg = unused
204: * return: <0 errors and 0 ok
205: */
206: void *
207: sched_hook_write(void *task, void *arg __unused)
208: {
209: struct sched_IO *io;
210: sched_task_t *t = task;
211: struct kevent chg[1];
212: struct timespec timeout = { 0, 0 };
213:
214: if (!t || !TASK_ROOT(t) || !ROOT_DATA(t->task_root) || !ROOT_DATLEN(t->task_root))
215: return (void*) -1;
216: else
217: io = ROOT_DATA(t->task_root);
218:
219: if (FD_ISSET(TASK_FD(t), &io->wfd))
220: return NULL;
221: else
222: FD_SET(TASK_FD(t), &io->wfd);
223:
224: #ifdef __NetBSD__
225: EV_SET(&chg[0], TASK_FD(t), EVFILT_WRITE, EV_ADD, 0, 0, (intptr_t) TASK_FD(t));
226: #else
227: EV_SET(&chg[0], TASK_FD(t), EVFILT_WRITE, EV_ADD, 0, 0, (void*) TASK_FD(t));
228: #endif
229: if (kevent(TASK_ROOT(t)->root_kq, chg, 1, NULL, 0, &timeout) == -1) {
230: if (TASK_ROOT(t)->root_hooks.hook_exec.exception)
231: TASK_ROOT(t)->root_hooks.hook_exec.exception(TASK_ROOT(t), NULL);
232: else
233: LOGERR;
234: return (void*) -1;
235: }
236:
237: return NULL;
238: }
239:
240: /*
241: * sched_hook_fetch() - Default FETCH hook
242: *
243: * @root = root task
244: * @arg = unused
245: * return: NULL error or !=NULL fetched task
246: */
247: void *
248: sched_hook_fetch(void *root, void *arg __unused)
249: {
250: struct sched_IO *io;
251: sched_root_task_t *r = root;
252: sched_task_t *task;
253: struct timespec now, m, mtmp;
254: struct timespec *timeout;
255: struct kevent evt[1], res[KQ_EVENTS];
256: register int i;
257: int en;
258:
259: if (!r || !ROOT_DATA(r) || !ROOT_DATLEN(r))
260: return NULL;
261:
262: /* get new task by queue priority */
263: retry:
264: while ((task = TAILQ_FIRST(&r->root_event))) {
265: #ifdef HAVE_LIBPTHREAD
266: pthread_mutex_lock(&r->root_mtx[taskEVENT]);
267: #endif
268: TAILQ_REMOVE(&r->root_event, task, task_node);
269: #ifdef HAVE_LIBPTHREAD
270: pthread_mutex_unlock(&r->root_mtx[taskEVENT]);
271: #endif
272: task->task_type = taskUNUSE;
273: #ifdef HAVE_LIBPTHREAD
274: pthread_mutex_lock(&r->root_mtx[taskUNUSE]);
275: #endif
276: TAILQ_INSERT_TAIL(&r->root_unuse, task, task_node);
277: #ifdef HAVE_LIBPTHREAD
278: pthread_mutex_unlock(&r->root_mtx[taskUNUSE]);
279: #endif
280: return task;
281: }
282: while ((task = TAILQ_FIRST(&r->root_ready))) {
283: #ifdef HAVE_LIBPTHREAD
284: pthread_mutex_lock(&r->root_mtx[taskREADY]);
285: #endif
286: TAILQ_REMOVE(&r->root_ready, task, task_node);
287: #ifdef HAVE_LIBPTHREAD
288: pthread_mutex_unlock(&r->root_mtx[taskREADY]);
289: #endif
290: task->task_type = taskUNUSE;
291: #ifdef HAVE_LIBPTHREAD
292: pthread_mutex_lock(&r->root_mtx[taskUNUSE]);
293: #endif
294: TAILQ_INSERT_TAIL(&r->root_unuse, task, task_node);
295: #ifdef HAVE_LIBPTHREAD
296: pthread_mutex_unlock(&r->root_mtx[taskUNUSE]);
297: #endif
298: return task;
299: }
300:
301: #ifdef TIMER_WITHOUT_SORT
302: clock_gettime(CLOCK_MONOTONIC, &now);
303:
304: sched_timespecclear(&r->root_wait);
305: TAILQ_FOREACH(task, &r->root_timer, task_node) {
306: if (!sched_timespecisset(&r->root_wait))
307: r->root_wait = TASK_TS(task);
308: else if (sched_timespeccmp(&TASK_TS(task), &r->root_wait, -) < 0)
309: r->root_wait = TASK_TS(task);
310: }
311:
312: if (TAILQ_FIRST(&r->root_timer)) {
313: m = r->root_wait;
314: sched_timespecsub(&m, &now, &mtmp);
315: r->root_wait = mtmp;
316: } else {
317: /* set wait INFTIM */
318: sched_timespecinf(&r->root_wait);
319: }
320: #else
321: if (!TAILQ_FIRST(&r->root_eventlo) && (task = TAILQ_FIRST(&r->root_timer))) {
322: clock_gettime(CLOCK_MONOTONIC, &now);
323:
324: m = TASK_TS(task);
325: sched_timespecsub(&m, &now, &mtmp);
326: r->root_wait = mtmp;
327: } else {
328: /* set wait INFTIM */
329: sched_timespecinf(&r->root_wait);
330: }
331: #endif
332: /* if present member of eventLo, set NOWAIT */
333: if (TAILQ_FIRST(&r->root_eventlo))
334: sched_timespecclear(&r->root_wait);
335:
336: if (r->root_wait.tv_sec != -1 && r->root_wait.tv_nsec != -1)
337: timeout = &r->root_wait;
338: else if (sched_timespecisinf(&r->root_poll))
339: timeout = NULL;
340: else
341: timeout = &r->root_poll;
342: if ((en = kevent(r->root_kq, NULL, 0, res, KQ_EVENTS, timeout)) == -1) {
343: if (r->root_hooks.hook_exec.exception) {
344: if (r->root_hooks.hook_exec.exception(r, NULL))
345: return NULL;
346: } else
347: LOGERR;
348: #ifdef NDEBUG
349: /* kevent no exit by error, if non-debug version */
350: goto retry;
351: #else
352: /* diagnostic exit from scheduler if kevent error occur */
353: return NULL;
354: #endif
355: }
356:
357: now.tv_sec = now.tv_nsec = 0;
358: /* Go and catch the cat into pipes ... */
359: for (i = 0; i < en; i++) {
360: memcpy(evt, &res[i], sizeof evt);
361: evt->flags = EV_DELETE;
362: /* Put read/write task to ready queue */
363: switch (res[i].filter) {
364: case EVFILT_READ:
365: TAILQ_FOREACH(task, &r->root_read, task_node) {
366: if (TASK_FD(task) != ((intptr_t) res[i].udata))
367: continue;
368: /* remove read handle */
369: io = ROOT_DATA(task->task_root);
370: FD_CLR(TASK_FD(task), &io->rfd);
371:
372: #ifdef HAVE_LIBPTHREAD
373: pthread_mutex_lock(&r->root_mtx[taskREAD]);
374: #endif
375: TAILQ_REMOVE(&r->root_read, task, task_node);
376: #ifdef HAVE_LIBPTHREAD
377: pthread_mutex_unlock(&r->root_mtx[taskREAD]);
378: #endif
379: if (r->root_hooks.hook_exec.exception && res[i].flags & EV_EOF) {
380: if (r->root_hooks.hook_exec.exception(r, (void*) EV_EOF)) {
381: task->task_type = taskUNUSE;
382: #ifdef HAVE_LIBPTHREAD
383: pthread_mutex_lock(&r->root_mtx[taskUNUSE]);
384: #endif
385: TAILQ_INSERT_TAIL(&r->root_unuse, task, task_node);
386: #ifdef HAVE_LIBPTHREAD
387: pthread_mutex_unlock(&r->root_mtx[taskUNUSE]);
388: #endif
389: } else {
390: task->task_type = taskREADY;
391: #ifdef HAVE_LIBPTHREAD
392: pthread_mutex_lock(&r->root_mtx[taskREADY]);
393: #endif
394: TAILQ_INSERT_TAIL(&r->root_ready, task, task_node);
395: #ifdef HAVE_LIBPTHREAD
396: pthread_mutex_unlock(&r->root_mtx[taskREADY]);
397: #endif
398: }
399: } else {
400: task->task_type = taskREADY;
401: #ifdef HAVE_LIBPTHREAD
402: pthread_mutex_lock(&r->root_mtx[taskREADY]);
403: #endif
404: TAILQ_INSERT_TAIL(&r->root_ready, task, task_node);
405: #ifdef HAVE_LIBPTHREAD
406: pthread_mutex_unlock(&r->root_mtx[taskREADY]);
407: #endif
408: }
409: break;
410: }
411: break;
412: case EVFILT_WRITE:
413: TAILQ_FOREACH(task, &r->root_write, task_node) {
414: if (TASK_FD(task) != ((intptr_t) res[i].udata))
415: continue;
416: /* remove write handle */
417: io = ROOT_DATA(task->task_root);
418: FD_CLR(TASK_FD(task), &io->wfd);
419:
420: #ifdef HAVE_LIBPTHREAD
421: pthread_mutex_lock(&r->root_mtx[taskWRITE]);
422: #endif
423: TAILQ_REMOVE(&r->root_write, task, task_node);
424: #ifdef HAVE_LIBPTHREAD
425: pthread_mutex_unlock(&r->root_mtx[taskWRITE]);
426: #endif
427: if (r->root_hooks.hook_exec.exception && res[i].flags & EV_EOF) {
428: if (r->root_hooks.hook_exec.exception(r, (void*) EV_EOF)) {
429: task->task_type = taskUNUSE;
430: #ifdef HAVE_LIBPTHREAD
431: pthread_mutex_lock(&r->root_mtx[taskUNUSE]);
432: #endif
433: TAILQ_INSERT_TAIL(&r->root_unuse, task, task_node);
434: #ifdef HAVE_LIBPTHREAD
435: pthread_mutex_unlock(&r->root_mtx[taskUNUSE]);
436: #endif
437: } else {
438: task->task_type = taskREADY;
439: #ifdef HAVE_LIBPTHREAD
440: pthread_mutex_lock(&r->root_mtx[taskREADY]);
441: #endif
442: TAILQ_INSERT_TAIL(&r->root_ready, task, task_node);
443: #ifdef HAVE_LIBPTHREAD
444: pthread_mutex_unlock(&r->root_mtx[taskREADY]);
445: #endif
446: }
447: } else {
448: task->task_type = taskREADY;
449: #ifdef HAVE_LIBPTHREAD
450: pthread_mutex_lock(&r->root_mtx[taskREADY]);
451: #endif
452: TAILQ_INSERT_TAIL(&r->root_ready, task, task_node);
453: #ifdef HAVE_LIBPTHREAD
454: pthread_mutex_unlock(&r->root_mtx[taskREADY]);
455: #endif
456: }
457: break;
458: }
459: break;
460: }
461: if (kevent(r->root_kq, evt, 1, NULL, 0, &now) == -1) {
462: if (r->root_hooks.hook_exec.exception) {
463: if (r->root_hooks.hook_exec.exception(r, NULL))
464: return NULL;
465: } else
466: LOGERR;
467: }
468: }
469:
470: /* timer update & put in ready queue */
471: clock_gettime(CLOCK_MONOTONIC, &now);
472:
473: TAILQ_FOREACH(task, &r->root_timer, task_node)
474: if (sched_timespeccmp(&now, &TASK_TS(task), -) >= 0) {
475: #ifdef HAVE_LIBPTHREAD
476: pthread_mutex_lock(&r->root_mtx[taskTIMER]);
477: #endif
478: TAILQ_REMOVE(&r->root_timer, task, task_node);
479: #ifdef HAVE_LIBPTHREAD
480: pthread_mutex_unlock(&r->root_mtx[taskTIMER]);
481: #endif
482: task->task_type = taskREADY;
483: #ifdef HAVE_LIBPTHREAD
484: pthread_mutex_lock(&r->root_mtx[taskREADY]);
485: #endif
486: TAILQ_INSERT_TAIL(&r->root_ready, task, task_node);
487: #ifdef HAVE_LIBPTHREAD
488: pthread_mutex_unlock(&r->root_mtx[taskREADY]);
489: #endif
490: }
491:
492: /* put eventlo priority task to ready queue, if there is no ready task or
493: reach max missed fetch-rotate */
494: if ((task = TAILQ_FIRST(&r->root_eventlo))) {
495: if (!TAILQ_FIRST(&r->root_ready) || r->root_eventlo_miss > MAX_EVENTLO_MISS) {
496: r->root_eventlo_miss = 0;
497:
498: #ifdef HAVE_LIBPTHREAD
499: pthread_mutex_lock(&r->root_mtx[taskEVENTLO]);
500: #endif
501: TAILQ_REMOVE(&r->root_eventlo, task, task_node);
502: #ifdef HAVE_LIBPTHREAD
503: pthread_mutex_unlock(&r->root_mtx[taskEVENTLO]);
504: #endif
505: task->task_type = taskREADY;
506: #ifdef HAVE_LIBPTHREAD
507: pthread_mutex_lock(&r->root_mtx[taskREADY]);
508: #endif
509: TAILQ_INSERT_TAIL(&r->root_ready, task, task_node);
510: #ifdef HAVE_LIBPTHREAD
511: pthread_mutex_unlock(&r->root_mtx[taskREADY]);
512: #endif
513: } else
514: r->root_eventlo_miss++;
515: } else
516: r->root_eventlo_miss = 0;
517:
518: /* OK, lets get ready task !!! */
519: task = TAILQ_FIRST(&r->root_ready);
520: if (!(task))
521: return NULL;
522:
523: #ifdef HAVE_LIBPTHREAD
524: pthread_mutex_lock(&r->root_mtx[taskREADY]);
525: #endif
526: TAILQ_REMOVE(&r->root_ready, task, task_node);
527: #ifdef HAVE_LIBPTHREAD
528: pthread_mutex_unlock(&r->root_mtx[taskREADY]);
529: #endif
530: task->task_type = taskUNUSE;
531: #ifdef HAVE_LIBPTHREAD
532: pthread_mutex_lock(&r->root_mtx[taskUNUSE]);
533: #endif
534: TAILQ_INSERT_TAIL(&r->root_unuse, task, task_node);
535: #ifdef HAVE_LIBPTHREAD
536: pthread_mutex_unlock(&r->root_mtx[taskUNUSE]);
537: #endif
538: return task;
539: }
540:
541: /*
542: * sched_hook_exception() - Default EXCEPTION hook
543: *
544: * @root = root task
545: * @arg = custom handling: if arg == EV_EOF or other value; default: arg == NULL log errno
546: * return: <0 errors and 0 ok
547: */
548: void *
549: sched_hook_exception(void *root, void *arg)
550: {
551: sched_root_task_t *r = root;
552:
553: if (!r || !ROOT_DATA(r) || !ROOT_DATLEN(r))
554: return NULL;
555:
556: /* custom exception handling ... */
557: if (arg) {
558: if (arg == (void*) EV_EOF)
559: return NULL;
560: return (void*) -1; /* raise scheduler error!!! */
561: }
562:
563: /* if error hook exists */
564: if (r->root_hooks.hook_root.error)
565: return (r->root_hooks.hook_root.error(root, (void*) ((intptr_t) errno)));
566:
567: /* default case! */
568: LOGERR;
569: return NULL;
570: }
571:
572: /*
573: * sched_hook_condition() - Default CONDITION hook
574: *
575: * @root = root task
576: * @arg = killState from schedRun()
577: * return: NULL kill scheduler loop or !=NULL ok
578: */
579: void *
580: sched_hook_condition(void *root, void *arg)
581: {
582: sched_root_task_t *r = root;
583:
584: if (!r || !ROOT_DATA(r) || !ROOT_DATLEN(r))
585: return NULL;
586:
587: return (void*) (r->root_cond - *(intptr_t*) arg);
588: }
FreeBSD-CVSweb <freebsd-cvsweb@FreeBSD.org>