Annotation of embedaddon/libevent/event.c, revision 1.1.1.1.2.1
1.1 misho 1: /*
2: * Copyright (c) 2000-2004 Niels Provos <provos@citi.umich.edu>
3: * All rights reserved.
4: *
5: * Redistribution and use in source and binary forms, with or without
6: * modification, are permitted provided that the following conditions
7: * are met:
8: * 1. Redistributions of source code must retain the above copyright
9: * notice, this list of conditions and the following disclaimer.
10: * 2. Redistributions in binary form must reproduce the above copyright
11: * notice, this list of conditions and the following disclaimer in the
12: * documentation and/or other materials provided with the distribution.
13: * 3. The name of the author may not be used to endorse or promote products
14: * derived from this software without specific prior written permission.
15: *
16: * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17: * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18: * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19: * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20: * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21: * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22: * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23: * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24: * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25: * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26: */
27: #ifdef HAVE_CONFIG_H
28: #include "config.h"
29: #endif
30:
31: #ifdef WIN32
32: #define WIN32_LEAN_AND_MEAN
33: #include <windows.h>
34: #undef WIN32_LEAN_AND_MEAN
35: #endif
36: #include <sys/types.h>
37: #ifdef HAVE_SYS_TIME_H
38: #include <sys/time.h>
39: #else
40: #include <sys/_libevent_time.h>
41: #endif
42: #include <sys/queue.h>
43: #include <stdio.h>
44: #include <stdlib.h>
45: #ifndef WIN32
46: #include <unistd.h>
47: #endif
48: #include <errno.h>
49: #include <signal.h>
50: #include <string.h>
51: #include <assert.h>
52: #include <time.h>
53:
54: #include "event.h"
55: #include "event-internal.h"
56: #include "evutil.h"
57: #include "log.h"
58:
59: #ifdef HAVE_EVENT_PORTS
60: extern const struct eventop evportops;
61: #endif
62: #ifdef HAVE_SELECT
63: extern const struct eventop selectops;
64: #endif
65: #ifdef HAVE_POLL
66: extern const struct eventop pollops;
67: #endif
68: #ifdef HAVE_EPOLL
69: extern const struct eventop epollops;
70: #endif
71: #ifdef HAVE_WORKING_KQUEUE
72: extern const struct eventop kqops;
73: #endif
74: #ifdef HAVE_DEVPOLL
75: extern const struct eventop devpollops;
76: #endif
77: #ifdef WIN32
78: extern const struct eventop win32ops;
79: #endif
80:
81: /* In order of preference */
82: static const struct eventop *eventops[] = {
83: #ifdef HAVE_EVENT_PORTS
84: &evportops,
85: #endif
86: #ifdef HAVE_WORKING_KQUEUE
87: &kqops,
88: #endif
89: #ifdef HAVE_EPOLL
90: &epollops,
91: #endif
92: #ifdef HAVE_DEVPOLL
93: &devpollops,
94: #endif
95: #ifdef HAVE_POLL
96: &pollops,
97: #endif
98: #ifdef HAVE_SELECT
99: &selectops,
100: #endif
101: #ifdef WIN32
102: &win32ops,
103: #endif
104: NULL
105: };
106:
107: /* Global state */
108: struct event_base *current_base = NULL;
109: extern struct event_base *evsignal_base;
110: static int use_monotonic;
111:
112: /* Handle signals - This is a deprecated interface */
113: int (*event_sigcb)(void); /* Signal callback when gotsig is set */
114: volatile sig_atomic_t event_gotsig; /* Set in signal handler */
115:
116: /* Prototypes */
117: static void event_queue_insert(struct event_base *, struct event *, int);
118: static void event_queue_remove(struct event_base *, struct event *, int);
119: static int event_haveevents(struct event_base *);
120:
121: static void event_process_active(struct event_base *);
122:
123: static int timeout_next(struct event_base *, struct timeval **);
124: static void timeout_process(struct event_base *);
125: static void timeout_correct(struct event_base *, struct timeval *);
126:
127: static void
128: detect_monotonic(void)
129: {
130: #if defined(HAVE_CLOCK_GETTIME) && defined(CLOCK_MONOTONIC)
131: struct timespec ts;
132:
133: if (clock_gettime(CLOCK_MONOTONIC, &ts) == 0)
134: use_monotonic = 1;
135: #endif
136: }
137:
138: static int
139: gettime(struct event_base *base, struct timeval *tp)
140: {
141: if (base->tv_cache.tv_sec) {
142: *tp = base->tv_cache;
143: return (0);
144: }
145:
146: #if defined(HAVE_CLOCK_GETTIME) && defined(CLOCK_MONOTONIC)
147: if (use_monotonic) {
148: struct timespec ts;
149:
150: if (clock_gettime(CLOCK_MONOTONIC, &ts) == -1)
151: return (-1);
152:
153: tp->tv_sec = ts.tv_sec;
154: tp->tv_usec = ts.tv_nsec / 1000;
155: return (0);
156: }
157: #endif
158:
159: return (evutil_gettimeofday(tp, NULL));
160: }
161:
162: struct event_base *
163: event_init(void)
164: {
165: struct event_base *base = event_base_new();
166:
167: if (base != NULL)
168: current_base = base;
169:
170: return (base);
171: }
172:
173: struct event_base *
174: event_base_new(void)
175: {
176: int i;
177: struct event_base *base;
178:
179: if ((base = calloc(1, sizeof(struct event_base))) == NULL)
180: event_err(1, "%s: calloc", __func__);
181:
182: event_sigcb = NULL;
183: event_gotsig = 0;
184:
185: detect_monotonic();
186: gettime(base, &base->event_tv);
187:
188: min_heap_ctor(&base->timeheap);
189: TAILQ_INIT(&base->eventqueue);
190: base->sig.ev_signal_pair[0] = -1;
191: base->sig.ev_signal_pair[1] = -1;
192:
193: base->evbase = NULL;
194: for (i = 0; eventops[i] && !base->evbase; i++) {
195: base->evsel = eventops[i];
196:
197: base->evbase = base->evsel->init(base);
198: }
199:
200: if (base->evbase == NULL)
201: event_errx(1, "%s: no event mechanism available", __func__);
202:
203: if (evutil_getenv("EVENT_SHOW_METHOD"))
204: event_msgx("libevent using: %s\n",
205: base->evsel->name);
206:
207: /* allocate a single active event queue */
208: event_base_priority_init(base, 1);
209:
210: return (base);
211: }
212:
213: void
214: event_base_free(struct event_base *base)
215: {
216: int i, n_deleted=0;
217: struct event *ev;
218:
219: if (base == NULL && current_base)
220: base = current_base;
221: if (base == current_base)
222: current_base = NULL;
223:
224: /* XXX(niels) - check for internal events first */
225: assert(base);
226: /* Delete all non-internal events. */
227: for (ev = TAILQ_FIRST(&base->eventqueue); ev; ) {
228: struct event *next = TAILQ_NEXT(ev, ev_next);
229: if (!(ev->ev_flags & EVLIST_INTERNAL)) {
230: event_del(ev);
231: ++n_deleted;
232: }
233: ev = next;
234: }
235: while ((ev = min_heap_top(&base->timeheap)) != NULL) {
236: event_del(ev);
237: ++n_deleted;
238: }
239:
240: for (i = 0; i < base->nactivequeues; ++i) {
241: for (ev = TAILQ_FIRST(base->activequeues[i]); ev; ) {
242: struct event *next = TAILQ_NEXT(ev, ev_active_next);
243: if (!(ev->ev_flags & EVLIST_INTERNAL)) {
244: event_del(ev);
245: ++n_deleted;
246: }
247: ev = next;
248: }
249: }
250:
251: if (n_deleted)
252: event_debug(("%s: %d events were still set in base",
253: __func__, n_deleted));
254:
255: if (base->evsel->dealloc != NULL)
256: base->evsel->dealloc(base, base->evbase);
257:
258: for (i = 0; i < base->nactivequeues; ++i)
259: assert(TAILQ_EMPTY(base->activequeues[i]));
260:
261: assert(min_heap_empty(&base->timeheap));
262: min_heap_dtor(&base->timeheap);
263:
264: for (i = 0; i < base->nactivequeues; ++i)
265: free(base->activequeues[i]);
266: free(base->activequeues);
267:
268: assert(TAILQ_EMPTY(&base->eventqueue));
269:
270: free(base);
271: }
272:
273: /* reinitialized the event base after a fork */
274: int
275: event_reinit(struct event_base *base)
276: {
277: const struct eventop *evsel = base->evsel;
278: void *evbase = base->evbase;
279: int res = 0;
280: struct event *ev;
281:
1.1.1.1.2.1! misho 282: #if 0
! 283: /* Right now, reinit always takes effect, since even if the
! 284: backend doesn't require it, the signal socketpair code does.
! 285: */
1.1 misho 286: /* check if this event mechanism requires reinit */
287: if (!evsel->need_reinit)
288: return (0);
1.1.1.1.2.1! misho 289: #endif
1.1 misho 290:
291: /* prevent internal delete */
292: if (base->sig.ev_signal_added) {
293: /* we cannot call event_del here because the base has
294: * not been reinitialized yet. */
295: event_queue_remove(base, &base->sig.ev_signal,
296: EVLIST_INSERTED);
297: if (base->sig.ev_signal.ev_flags & EVLIST_ACTIVE)
298: event_queue_remove(base, &base->sig.ev_signal,
299: EVLIST_ACTIVE);
300: base->sig.ev_signal_added = 0;
301: }
1.1.1.1.2.1! misho 302:
1.1 misho 303: if (base->evsel->dealloc != NULL)
304: base->evsel->dealloc(base, base->evbase);
305: evbase = base->evbase = evsel->init(base);
306: if (base->evbase == NULL)
307: event_errx(1, "%s: could not reinitialize event mechanism",
308: __func__);
309:
310: TAILQ_FOREACH(ev, &base->eventqueue, ev_next) {
311: if (evsel->add(evbase, ev) == -1)
312: res = -1;
313: }
314:
315: return (res);
316: }
317:
318: int
319: event_priority_init(int npriorities)
320: {
321: return event_base_priority_init(current_base, npriorities);
322: }
323:
324: int
325: event_base_priority_init(struct event_base *base, int npriorities)
326: {
327: int i;
328:
329: if (base->event_count_active)
330: return (-1);
331:
332: if (npriorities == base->nactivequeues)
333: return (0);
334:
335: if (base->nactivequeues) {
336: for (i = 0; i < base->nactivequeues; ++i) {
337: free(base->activequeues[i]);
338: }
339: free(base->activequeues);
340: }
341:
342: /* Allocate our priority queues */
343: base->nactivequeues = npriorities;
344: base->activequeues = (struct event_list **)
345: calloc(base->nactivequeues, sizeof(struct event_list *));
346: if (base->activequeues == NULL)
347: event_err(1, "%s: calloc", __func__);
348:
349: for (i = 0; i < base->nactivequeues; ++i) {
350: base->activequeues[i] = malloc(sizeof(struct event_list));
351: if (base->activequeues[i] == NULL)
352: event_err(1, "%s: malloc", __func__);
353: TAILQ_INIT(base->activequeues[i]);
354: }
355:
356: return (0);
357: }
358:
359: int
360: event_haveevents(struct event_base *base)
361: {
362: return (base->event_count > 0);
363: }
364:
365: /*
366: * Active events are stored in priority queues. Lower priorities are always
367: * process before higher priorities. Low priority events can starve high
368: * priority ones.
369: */
370:
371: static void
372: event_process_active(struct event_base *base)
373: {
374: struct event *ev;
375: struct event_list *activeq = NULL;
376: int i;
377: short ncalls;
378:
379: for (i = 0; i < base->nactivequeues; ++i) {
380: if (TAILQ_FIRST(base->activequeues[i]) != NULL) {
381: activeq = base->activequeues[i];
382: break;
383: }
384: }
385:
386: assert(activeq != NULL);
387:
388: for (ev = TAILQ_FIRST(activeq); ev; ev = TAILQ_FIRST(activeq)) {
389: if (ev->ev_events & EV_PERSIST)
390: event_queue_remove(base, ev, EVLIST_ACTIVE);
391: else
392: event_del(ev);
393:
394: /* Allows deletes to work */
395: ncalls = ev->ev_ncalls;
396: ev->ev_pncalls = &ncalls;
397: while (ncalls) {
398: ncalls--;
399: ev->ev_ncalls = ncalls;
400: (*ev->ev_callback)((int)ev->ev_fd, ev->ev_res, ev->ev_arg);
401: if (event_gotsig || base->event_break)
402: return;
403: }
404: }
405: }
406:
407: /*
408: * Wait continously for events. We exit only if no events are left.
409: */
410:
411: int
412: event_dispatch(void)
413: {
414: return (event_loop(0));
415: }
416:
417: int
418: event_base_dispatch(struct event_base *event_base)
419: {
420: return (event_base_loop(event_base, 0));
421: }
422:
423: const char *
424: event_base_get_method(struct event_base *base)
425: {
426: assert(base);
427: return (base->evsel->name);
428: }
429:
430: static void
431: event_loopexit_cb(int fd, short what, void *arg)
432: {
433: struct event_base *base = arg;
434: base->event_gotterm = 1;
435: }
436:
437: /* not thread safe */
438: int
439: event_loopexit(const struct timeval *tv)
440: {
441: return (event_once(-1, EV_TIMEOUT, event_loopexit_cb,
442: current_base, tv));
443: }
444:
445: int
446: event_base_loopexit(struct event_base *event_base, const struct timeval *tv)
447: {
448: return (event_base_once(event_base, -1, EV_TIMEOUT, event_loopexit_cb,
449: event_base, tv));
450: }
451:
452: /* not thread safe */
453: int
454: event_loopbreak(void)
455: {
456: return (event_base_loopbreak(current_base));
457: }
458:
459: int
460: event_base_loopbreak(struct event_base *event_base)
461: {
462: if (event_base == NULL)
463: return (-1);
464:
465: event_base->event_break = 1;
466: return (0);
467: }
468:
469:
470:
471: /* not thread safe */
472:
473: int
474: event_loop(int flags)
475: {
476: return event_base_loop(current_base, flags);
477: }
478:
479: int
480: event_base_loop(struct event_base *base, int flags)
481: {
482: const struct eventop *evsel = base->evsel;
483: void *evbase = base->evbase;
484: struct timeval tv;
485: struct timeval *tv_p;
486: int res, done;
487:
488: /* clear time cache */
489: base->tv_cache.tv_sec = 0;
490:
491: if (base->sig.ev_signal_added)
492: evsignal_base = base;
493: done = 0;
494: while (!done) {
495: /* Terminate the loop if we have been asked to */
496: if (base->event_gotterm) {
497: base->event_gotterm = 0;
498: break;
499: }
500:
501: if (base->event_break) {
502: base->event_break = 0;
503: break;
504: }
505:
506: /* You cannot use this interface for multi-threaded apps */
507: while (event_gotsig) {
508: event_gotsig = 0;
509: if (event_sigcb) {
510: res = (*event_sigcb)();
511: if (res == -1) {
512: errno = EINTR;
513: return (-1);
514: }
515: }
516: }
517:
518: timeout_correct(base, &tv);
519:
520: tv_p = &tv;
521: if (!base->event_count_active && !(flags & EVLOOP_NONBLOCK)) {
522: timeout_next(base, &tv_p);
523: } else {
524: /*
525: * if we have active events, we just poll new events
526: * without waiting.
527: */
528: evutil_timerclear(&tv);
529: }
530:
531: /* If we have no events, we just exit */
532: if (!event_haveevents(base)) {
533: event_debug(("%s: no events registered.", __func__));
534: return (1);
535: }
536:
537: /* update last old time */
538: gettime(base, &base->event_tv);
539:
540: /* clear time cache */
541: base->tv_cache.tv_sec = 0;
542:
543: res = evsel->dispatch(base, evbase, tv_p);
544:
545: if (res == -1)
546: return (-1);
547: gettime(base, &base->tv_cache);
548:
549: timeout_process(base);
550:
551: if (base->event_count_active) {
552: event_process_active(base);
553: if (!base->event_count_active && (flags & EVLOOP_ONCE))
554: done = 1;
555: } else if (flags & EVLOOP_NONBLOCK)
556: done = 1;
557: }
558:
559: /* clear time cache */
560: base->tv_cache.tv_sec = 0;
561:
562: event_debug(("%s: asked to terminate loop.", __func__));
563: return (0);
564: }
565:
566: /* Sets up an event for processing once */
567:
568: struct event_once {
569: struct event ev;
570:
571: void (*cb)(int, short, void *);
572: void *arg;
573: };
574:
575: /* One-time callback, it deletes itself */
576:
577: static void
578: event_once_cb(int fd, short events, void *arg)
579: {
580: struct event_once *eonce = arg;
581:
582: (*eonce->cb)(fd, events, eonce->arg);
583: free(eonce);
584: }
585:
586: /* not threadsafe, event scheduled once. */
587: int
588: event_once(int fd, short events,
589: void (*callback)(int, short, void *), void *arg, const struct timeval *tv)
590: {
591: return event_base_once(current_base, fd, events, callback, arg, tv);
592: }
593:
594: /* Schedules an event once */
595: int
596: event_base_once(struct event_base *base, int fd, short events,
597: void (*callback)(int, short, void *), void *arg, const struct timeval *tv)
598: {
599: struct event_once *eonce;
600: struct timeval etv;
601: int res;
602:
603: /* We cannot support signals that just fire once */
604: if (events & EV_SIGNAL)
605: return (-1);
606:
607: if ((eonce = calloc(1, sizeof(struct event_once))) == NULL)
608: return (-1);
609:
610: eonce->cb = callback;
611: eonce->arg = arg;
612:
613: if (events == EV_TIMEOUT) {
614: if (tv == NULL) {
615: evutil_timerclear(&etv);
616: tv = &etv;
617: }
618:
619: evtimer_set(&eonce->ev, event_once_cb, eonce);
620: } else if (events & (EV_READ|EV_WRITE)) {
621: events &= EV_READ|EV_WRITE;
622:
623: event_set(&eonce->ev, fd, events, event_once_cb, eonce);
624: } else {
625: /* Bad event combination */
626: free(eonce);
627: return (-1);
628: }
629:
630: res = event_base_set(base, &eonce->ev);
631: if (res == 0)
632: res = event_add(&eonce->ev, tv);
633: if (res != 0) {
634: free(eonce);
635: return (res);
636: }
637:
638: return (0);
639: }
640:
641: void
642: event_set(struct event *ev, int fd, short events,
643: void (*callback)(int, short, void *), void *arg)
644: {
645: /* Take the current base - caller needs to set the real base later */
646: ev->ev_base = current_base;
647:
648: ev->ev_callback = callback;
649: ev->ev_arg = arg;
650: ev->ev_fd = fd;
651: ev->ev_events = events;
652: ev->ev_res = 0;
653: ev->ev_flags = EVLIST_INIT;
654: ev->ev_ncalls = 0;
655: ev->ev_pncalls = NULL;
656:
657: min_heap_elem_init(ev);
658:
659: /* by default, we put new events into the middle priority */
660: if(current_base)
661: ev->ev_pri = current_base->nactivequeues/2;
662: }
663:
664: int
665: event_base_set(struct event_base *base, struct event *ev)
666: {
667: /* Only innocent events may be assigned to a different base */
668: if (ev->ev_flags != EVLIST_INIT)
669: return (-1);
670:
671: ev->ev_base = base;
672: ev->ev_pri = base->nactivequeues/2;
673:
674: return (0);
675: }
676:
677: /*
678: * Set's the priority of an event - if an event is already scheduled
679: * changing the priority is going to fail.
680: */
681:
682: int
683: event_priority_set(struct event *ev, int pri)
684: {
685: if (ev->ev_flags & EVLIST_ACTIVE)
686: return (-1);
687: if (pri < 0 || pri >= ev->ev_base->nactivequeues)
688: return (-1);
689:
690: ev->ev_pri = pri;
691:
692: return (0);
693: }
694:
695: /*
696: * Checks if a specific event is pending or scheduled.
697: */
698:
699: int
700: event_pending(struct event *ev, short event, struct timeval *tv)
701: {
702: struct timeval now, res;
703: int flags = 0;
704:
705: if (ev->ev_flags & EVLIST_INSERTED)
706: flags |= (ev->ev_events & (EV_READ|EV_WRITE|EV_SIGNAL));
707: if (ev->ev_flags & EVLIST_ACTIVE)
708: flags |= ev->ev_res;
709: if (ev->ev_flags & EVLIST_TIMEOUT)
710: flags |= EV_TIMEOUT;
711:
712: event &= (EV_TIMEOUT|EV_READ|EV_WRITE|EV_SIGNAL);
713:
714: /* See if there is a timeout that we should report */
715: if (tv != NULL && (flags & event & EV_TIMEOUT)) {
716: gettime(ev->ev_base, &now);
717: evutil_timersub(&ev->ev_timeout, &now, &res);
718: /* correctly remap to real time */
719: evutil_gettimeofday(&now, NULL);
720: evutil_timeradd(&now, &res, tv);
721: }
722:
723: return (flags & event);
724: }
725:
726: int
727: event_add(struct event *ev, const struct timeval *tv)
728: {
729: struct event_base *base = ev->ev_base;
730: const struct eventop *evsel = base->evsel;
731: void *evbase = base->evbase;
732: int res = 0;
733:
734: event_debug((
735: "event_add: event: %p, %s%s%scall %p",
736: ev,
737: ev->ev_events & EV_READ ? "EV_READ " : " ",
738: ev->ev_events & EV_WRITE ? "EV_WRITE " : " ",
739: tv ? "EV_TIMEOUT " : " ",
740: ev->ev_callback));
741:
742: assert(!(ev->ev_flags & ~EVLIST_ALL));
743:
744: /*
745: * prepare for timeout insertion further below, if we get a
746: * failure on any step, we should not change any state.
747: */
748: if (tv != NULL && !(ev->ev_flags & EVLIST_TIMEOUT)) {
749: if (min_heap_reserve(&base->timeheap,
750: 1 + min_heap_size(&base->timeheap)) == -1)
751: return (-1); /* ENOMEM == errno */
752: }
753:
754: if ((ev->ev_events & (EV_READ|EV_WRITE|EV_SIGNAL)) &&
755: !(ev->ev_flags & (EVLIST_INSERTED|EVLIST_ACTIVE))) {
756: res = evsel->add(evbase, ev);
757: if (res != -1)
758: event_queue_insert(base, ev, EVLIST_INSERTED);
759: }
760:
761: /*
762: * we should change the timout state only if the previous event
763: * addition succeeded.
764: */
765: if (res != -1 && tv != NULL) {
766: struct timeval now;
767:
768: /*
769: * we already reserved memory above for the case where we
770: * are not replacing an exisiting timeout.
771: */
772: if (ev->ev_flags & EVLIST_TIMEOUT)
773: event_queue_remove(base, ev, EVLIST_TIMEOUT);
774:
775: /* Check if it is active due to a timeout. Rescheduling
776: * this timeout before the callback can be executed
777: * removes it from the active list. */
778: if ((ev->ev_flags & EVLIST_ACTIVE) &&
779: (ev->ev_res & EV_TIMEOUT)) {
780: /* See if we are just active executing this
781: * event in a loop
782: */
783: if (ev->ev_ncalls && ev->ev_pncalls) {
784: /* Abort loop */
785: *ev->ev_pncalls = 0;
786: }
787:
788: event_queue_remove(base, ev, EVLIST_ACTIVE);
789: }
790:
791: gettime(base, &now);
792: evutil_timeradd(&now, tv, &ev->ev_timeout);
793:
794: event_debug((
795: "event_add: timeout in %ld seconds, call %p",
796: tv->tv_sec, ev->ev_callback));
797:
798: event_queue_insert(base, ev, EVLIST_TIMEOUT);
799: }
800:
801: return (res);
802: }
803:
804: int
805: event_del(struct event *ev)
806: {
807: struct event_base *base;
808: const struct eventop *evsel;
809: void *evbase;
810:
811: event_debug(("event_del: %p, callback %p",
812: ev, ev->ev_callback));
813:
814: /* An event without a base has not been added */
815: if (ev->ev_base == NULL)
816: return (-1);
817:
818: base = ev->ev_base;
819: evsel = base->evsel;
820: evbase = base->evbase;
821:
822: assert(!(ev->ev_flags & ~EVLIST_ALL));
823:
824: /* See if we are just active executing this event in a loop */
825: if (ev->ev_ncalls && ev->ev_pncalls) {
826: /* Abort loop */
827: *ev->ev_pncalls = 0;
828: }
829:
830: if (ev->ev_flags & EVLIST_TIMEOUT)
831: event_queue_remove(base, ev, EVLIST_TIMEOUT);
832:
833: if (ev->ev_flags & EVLIST_ACTIVE)
834: event_queue_remove(base, ev, EVLIST_ACTIVE);
835:
836: if (ev->ev_flags & EVLIST_INSERTED) {
837: event_queue_remove(base, ev, EVLIST_INSERTED);
838: return (evsel->del(evbase, ev));
839: }
840:
841: return (0);
842: }
843:
844: void
845: event_active(struct event *ev, int res, short ncalls)
846: {
847: /* We get different kinds of events, add them together */
848: if (ev->ev_flags & EVLIST_ACTIVE) {
849: ev->ev_res |= res;
850: return;
851: }
852:
853: ev->ev_res = res;
854: ev->ev_ncalls = ncalls;
855: ev->ev_pncalls = NULL;
856: event_queue_insert(ev->ev_base, ev, EVLIST_ACTIVE);
857: }
858:
859: static int
860: timeout_next(struct event_base *base, struct timeval **tv_p)
861: {
862: struct timeval now;
863: struct event *ev;
864: struct timeval *tv = *tv_p;
865:
866: if ((ev = min_heap_top(&base->timeheap)) == NULL) {
867: /* if no time-based events are active wait for I/O */
868: *tv_p = NULL;
869: return (0);
870: }
871:
872: if (gettime(base, &now) == -1)
873: return (-1);
874:
875: if (evutil_timercmp(&ev->ev_timeout, &now, <=)) {
876: evutil_timerclear(tv);
877: return (0);
878: }
879:
880: evutil_timersub(&ev->ev_timeout, &now, tv);
881:
882: assert(tv->tv_sec >= 0);
883: assert(tv->tv_usec >= 0);
884:
885: event_debug(("timeout_next: in %ld seconds", tv->tv_sec));
886: return (0);
887: }
888:
889: /*
890: * Determines if the time is running backwards by comparing the current
891: * time against the last time we checked. Not needed when using clock
892: * monotonic.
893: */
894:
895: static void
896: timeout_correct(struct event_base *base, struct timeval *tv)
897: {
898: struct event **pev;
899: unsigned int size;
900: struct timeval off;
901:
902: if (use_monotonic)
903: return;
904:
905: /* Check if time is running backwards */
906: gettime(base, tv);
907: if (evutil_timercmp(tv, &base->event_tv, >=)) {
908: base->event_tv = *tv;
909: return;
910: }
911:
912: event_debug(("%s: time is running backwards, corrected",
913: __func__));
914: evutil_timersub(&base->event_tv, tv, &off);
915:
916: /*
917: * We can modify the key element of the node without destroying
918: * the key, beause we apply it to all in the right order.
919: */
920: pev = base->timeheap.p;
921: size = base->timeheap.n;
922: for (; size-- > 0; ++pev) {
923: struct timeval *ev_tv = &(**pev).ev_timeout;
924: evutil_timersub(ev_tv, &off, ev_tv);
925: }
926: /* Now remember what the new time turned out to be. */
927: base->event_tv = *tv;
928: }
929:
930: void
931: timeout_process(struct event_base *base)
932: {
933: struct timeval now;
934: struct event *ev;
935:
936: if (min_heap_empty(&base->timeheap))
937: return;
938:
939: gettime(base, &now);
940:
941: while ((ev = min_heap_top(&base->timeheap))) {
942: if (evutil_timercmp(&ev->ev_timeout, &now, >))
943: break;
944:
945: /* delete this event from the I/O queues */
946: event_del(ev);
947:
948: event_debug(("timeout_process: call %p",
949: ev->ev_callback));
950: event_active(ev, EV_TIMEOUT, 1);
951: }
952: }
953:
954: void
955: event_queue_remove(struct event_base *base, struct event *ev, int queue)
956: {
957: if (!(ev->ev_flags & queue))
958: event_errx(1, "%s: %p(fd %d) not on queue %x", __func__,
959: ev, ev->ev_fd, queue);
960:
961: if (~ev->ev_flags & EVLIST_INTERNAL)
962: base->event_count--;
963:
964: ev->ev_flags &= ~queue;
965: switch (queue) {
966: case EVLIST_INSERTED:
967: TAILQ_REMOVE(&base->eventqueue, ev, ev_next);
968: break;
969: case EVLIST_ACTIVE:
970: base->event_count_active--;
971: TAILQ_REMOVE(base->activequeues[ev->ev_pri],
972: ev, ev_active_next);
973: break;
974: case EVLIST_TIMEOUT:
975: min_heap_erase(&base->timeheap, ev);
976: break;
977: default:
978: event_errx(1, "%s: unknown queue %x", __func__, queue);
979: }
980: }
981:
982: void
983: event_queue_insert(struct event_base *base, struct event *ev, int queue)
984: {
985: if (ev->ev_flags & queue) {
986: /* Double insertion is possible for active events */
987: if (queue & EVLIST_ACTIVE)
988: return;
989:
990: event_errx(1, "%s: %p(fd %d) already on queue %x", __func__,
991: ev, ev->ev_fd, queue);
992: }
993:
994: if (~ev->ev_flags & EVLIST_INTERNAL)
995: base->event_count++;
996:
997: ev->ev_flags |= queue;
998: switch (queue) {
999: case EVLIST_INSERTED:
1000: TAILQ_INSERT_TAIL(&base->eventqueue, ev, ev_next);
1001: break;
1002: case EVLIST_ACTIVE:
1003: base->event_count_active++;
1004: TAILQ_INSERT_TAIL(base->activequeues[ev->ev_pri],
1005: ev,ev_active_next);
1006: break;
1007: case EVLIST_TIMEOUT: {
1008: min_heap_push(&base->timeheap, ev);
1009: break;
1010: }
1011: default:
1012: event_errx(1, "%s: unknown queue %x", __func__, queue);
1013: }
1014: }
1015:
1016: /* Functions for debugging */
1017:
1018: const char *
1019: event_get_version(void)
1020: {
1021: return (VERSION);
1022: }
1023:
1024: /*
1025: * No thread-safe interface needed - the information should be the same
1026: * for all threads.
1027: */
1028:
1029: const char *
1030: event_get_method(void)
1031: {
1032: return (current_base->evsel->name);
1033: }
FreeBSD-CVSweb <freebsd-cvsweb@FreeBSD.org>