Annotation of embedaddon/libevent/event.c, revision 1.1.1.1
1.1 misho 1: /*
2: * Copyright (c) 2000-2004 Niels Provos <provos@citi.umich.edu>
3: * All rights reserved.
4: *
5: * Redistribution and use in source and binary forms, with or without
6: * modification, are permitted provided that the following conditions
7: * are met:
8: * 1. Redistributions of source code must retain the above copyright
9: * notice, this list of conditions and the following disclaimer.
10: * 2. Redistributions in binary form must reproduce the above copyright
11: * notice, this list of conditions and the following disclaimer in the
12: * documentation and/or other materials provided with the distribution.
13: * 3. The name of the author may not be used to endorse or promote products
14: * derived from this software without specific prior written permission.
15: *
16: * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17: * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18: * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19: * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20: * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21: * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22: * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23: * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24: * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25: * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26: */
27: #ifdef HAVE_CONFIG_H
28: #include "config.h"
29: #endif
30:
31: #ifdef WIN32
32: #define WIN32_LEAN_AND_MEAN
33: #include <windows.h>
34: #undef WIN32_LEAN_AND_MEAN
35: #endif
36: #include <sys/types.h>
37: #ifdef HAVE_SYS_TIME_H
38: #include <sys/time.h>
39: #else
40: #include <sys/_libevent_time.h>
41: #endif
42: #include <sys/queue.h>
43: #include <stdio.h>
44: #include <stdlib.h>
45: #ifndef WIN32
46: #include <unistd.h>
47: #endif
48: #include <errno.h>
49: #include <signal.h>
50: #include <string.h>
51: #include <assert.h>
52: #include <time.h>
53:
54: #include "event.h"
55: #include "event-internal.h"
56: #include "evutil.h"
57: #include "log.h"
58:
59: #ifdef HAVE_EVENT_PORTS
60: extern const struct eventop evportops;
61: #endif
62: #ifdef HAVE_SELECT
63: extern const struct eventop selectops;
64: #endif
65: #ifdef HAVE_POLL
66: extern const struct eventop pollops;
67: #endif
68: #ifdef HAVE_EPOLL
69: extern const struct eventop epollops;
70: #endif
71: #ifdef HAVE_WORKING_KQUEUE
72: extern const struct eventop kqops;
73: #endif
74: #ifdef HAVE_DEVPOLL
75: extern const struct eventop devpollops;
76: #endif
77: #ifdef WIN32
78: extern const struct eventop win32ops;
79: #endif
80:
81: /* In order of preference */
82: static const struct eventop *eventops[] = {
83: #ifdef HAVE_EVENT_PORTS
84: &evportops,
85: #endif
86: #ifdef HAVE_WORKING_KQUEUE
87: &kqops,
88: #endif
89: #ifdef HAVE_EPOLL
90: &epollops,
91: #endif
92: #ifdef HAVE_DEVPOLL
93: &devpollops,
94: #endif
95: #ifdef HAVE_POLL
96: &pollops,
97: #endif
98: #ifdef HAVE_SELECT
99: &selectops,
100: #endif
101: #ifdef WIN32
102: &win32ops,
103: #endif
104: NULL
105: };
106:
107: /* Global state */
108: struct event_base *current_base = NULL;
109: extern struct event_base *evsignal_base;
110: static int use_monotonic;
111:
112: /* Handle signals - This is a deprecated interface */
113: int (*event_sigcb)(void); /* Signal callback when gotsig is set */
114: volatile sig_atomic_t event_gotsig; /* Set in signal handler */
115:
116: /* Prototypes */
117: static void event_queue_insert(struct event_base *, struct event *, int);
118: static void event_queue_remove(struct event_base *, struct event *, int);
119: static int event_haveevents(struct event_base *);
120:
121: static void event_process_active(struct event_base *);
122:
123: static int timeout_next(struct event_base *, struct timeval **);
124: static void timeout_process(struct event_base *);
125: static void timeout_correct(struct event_base *, struct timeval *);
126:
127: static void
128: detect_monotonic(void)
129: {
130: #if defined(HAVE_CLOCK_GETTIME) && defined(CLOCK_MONOTONIC)
131: struct timespec ts;
132:
133: if (clock_gettime(CLOCK_MONOTONIC, &ts) == 0)
134: use_monotonic = 1;
135: #endif
136: }
137:
138: static int
139: gettime(struct event_base *base, struct timeval *tp)
140: {
141: if (base->tv_cache.tv_sec) {
142: *tp = base->tv_cache;
143: return (0);
144: }
145:
146: #if defined(HAVE_CLOCK_GETTIME) && defined(CLOCK_MONOTONIC)
147: if (use_monotonic) {
148: struct timespec ts;
149:
150: if (clock_gettime(CLOCK_MONOTONIC, &ts) == -1)
151: return (-1);
152:
153: tp->tv_sec = ts.tv_sec;
154: tp->tv_usec = ts.tv_nsec / 1000;
155: return (0);
156: }
157: #endif
158:
159: return (evutil_gettimeofday(tp, NULL));
160: }
161:
162: struct event_base *
163: event_init(void)
164: {
165: struct event_base *base = event_base_new();
166:
167: if (base != NULL)
168: current_base = base;
169:
170: return (base);
171: }
172:
173: struct event_base *
174: event_base_new(void)
175: {
176: int i;
177: struct event_base *base;
178:
179: if ((base = calloc(1, sizeof(struct event_base))) == NULL)
180: event_err(1, "%s: calloc", __func__);
181:
182: event_sigcb = NULL;
183: event_gotsig = 0;
184:
185: detect_monotonic();
186: gettime(base, &base->event_tv);
187:
188: min_heap_ctor(&base->timeheap);
189: TAILQ_INIT(&base->eventqueue);
190: base->sig.ev_signal_pair[0] = -1;
191: base->sig.ev_signal_pair[1] = -1;
192:
193: base->evbase = NULL;
194: for (i = 0; eventops[i] && !base->evbase; i++) {
195: base->evsel = eventops[i];
196:
197: base->evbase = base->evsel->init(base);
198: }
199:
200: if (base->evbase == NULL)
201: event_errx(1, "%s: no event mechanism available", __func__);
202:
203: if (evutil_getenv("EVENT_SHOW_METHOD"))
204: event_msgx("libevent using: %s\n",
205: base->evsel->name);
206:
207: /* allocate a single active event queue */
208: event_base_priority_init(base, 1);
209:
210: return (base);
211: }
212:
213: void
214: event_base_free(struct event_base *base)
215: {
216: int i, n_deleted=0;
217: struct event *ev;
218:
219: if (base == NULL && current_base)
220: base = current_base;
221: if (base == current_base)
222: current_base = NULL;
223:
224: /* XXX(niels) - check for internal events first */
225: assert(base);
226: /* Delete all non-internal events. */
227: for (ev = TAILQ_FIRST(&base->eventqueue); ev; ) {
228: struct event *next = TAILQ_NEXT(ev, ev_next);
229: if (!(ev->ev_flags & EVLIST_INTERNAL)) {
230: event_del(ev);
231: ++n_deleted;
232: }
233: ev = next;
234: }
235: while ((ev = min_heap_top(&base->timeheap)) != NULL) {
236: event_del(ev);
237: ++n_deleted;
238: }
239:
240: for (i = 0; i < base->nactivequeues; ++i) {
241: for (ev = TAILQ_FIRST(base->activequeues[i]); ev; ) {
242: struct event *next = TAILQ_NEXT(ev, ev_active_next);
243: if (!(ev->ev_flags & EVLIST_INTERNAL)) {
244: event_del(ev);
245: ++n_deleted;
246: }
247: ev = next;
248: }
249: }
250:
251: if (n_deleted)
252: event_debug(("%s: %d events were still set in base",
253: __func__, n_deleted));
254:
255: if (base->evsel->dealloc != NULL)
256: base->evsel->dealloc(base, base->evbase);
257:
258: for (i = 0; i < base->nactivequeues; ++i)
259: assert(TAILQ_EMPTY(base->activequeues[i]));
260:
261: assert(min_heap_empty(&base->timeheap));
262: min_heap_dtor(&base->timeheap);
263:
264: for (i = 0; i < base->nactivequeues; ++i)
265: free(base->activequeues[i]);
266: free(base->activequeues);
267:
268: assert(TAILQ_EMPTY(&base->eventqueue));
269:
270: free(base);
271: }
272:
273: /* reinitialized the event base after a fork */
274: int
275: event_reinit(struct event_base *base)
276: {
277: const struct eventop *evsel = base->evsel;
278: void *evbase = base->evbase;
279: int res = 0;
280: struct event *ev;
281:
282: /* check if this event mechanism requires reinit */
283: if (!evsel->need_reinit)
284: return (0);
285:
286: /* prevent internal delete */
287: if (base->sig.ev_signal_added) {
288: /* we cannot call event_del here because the base has
289: * not been reinitialized yet. */
290: event_queue_remove(base, &base->sig.ev_signal,
291: EVLIST_INSERTED);
292: if (base->sig.ev_signal.ev_flags & EVLIST_ACTIVE)
293: event_queue_remove(base, &base->sig.ev_signal,
294: EVLIST_ACTIVE);
295: base->sig.ev_signal_added = 0;
296: }
297:
298: if (base->evsel->dealloc != NULL)
299: base->evsel->dealloc(base, base->evbase);
300: evbase = base->evbase = evsel->init(base);
301: if (base->evbase == NULL)
302: event_errx(1, "%s: could not reinitialize event mechanism",
303: __func__);
304:
305: TAILQ_FOREACH(ev, &base->eventqueue, ev_next) {
306: if (evsel->add(evbase, ev) == -1)
307: res = -1;
308: }
309:
310: return (res);
311: }
312:
313: int
314: event_priority_init(int npriorities)
315: {
316: return event_base_priority_init(current_base, npriorities);
317: }
318:
319: int
320: event_base_priority_init(struct event_base *base, int npriorities)
321: {
322: int i;
323:
324: if (base->event_count_active)
325: return (-1);
326:
327: if (npriorities == base->nactivequeues)
328: return (0);
329:
330: if (base->nactivequeues) {
331: for (i = 0; i < base->nactivequeues; ++i) {
332: free(base->activequeues[i]);
333: }
334: free(base->activequeues);
335: }
336:
337: /* Allocate our priority queues */
338: base->nactivequeues = npriorities;
339: base->activequeues = (struct event_list **)
340: calloc(base->nactivequeues, sizeof(struct event_list *));
341: if (base->activequeues == NULL)
342: event_err(1, "%s: calloc", __func__);
343:
344: for (i = 0; i < base->nactivequeues; ++i) {
345: base->activequeues[i] = malloc(sizeof(struct event_list));
346: if (base->activequeues[i] == NULL)
347: event_err(1, "%s: malloc", __func__);
348: TAILQ_INIT(base->activequeues[i]);
349: }
350:
351: return (0);
352: }
353:
354: int
355: event_haveevents(struct event_base *base)
356: {
357: return (base->event_count > 0);
358: }
359:
360: /*
361: * Active events are stored in priority queues. Lower priorities are always
362: * process before higher priorities. Low priority events can starve high
363: * priority ones.
364: */
365:
366: static void
367: event_process_active(struct event_base *base)
368: {
369: struct event *ev;
370: struct event_list *activeq = NULL;
371: int i;
372: short ncalls;
373:
374: for (i = 0; i < base->nactivequeues; ++i) {
375: if (TAILQ_FIRST(base->activequeues[i]) != NULL) {
376: activeq = base->activequeues[i];
377: break;
378: }
379: }
380:
381: assert(activeq != NULL);
382:
383: for (ev = TAILQ_FIRST(activeq); ev; ev = TAILQ_FIRST(activeq)) {
384: if (ev->ev_events & EV_PERSIST)
385: event_queue_remove(base, ev, EVLIST_ACTIVE);
386: else
387: event_del(ev);
388:
389: /* Allows deletes to work */
390: ncalls = ev->ev_ncalls;
391: ev->ev_pncalls = &ncalls;
392: while (ncalls) {
393: ncalls--;
394: ev->ev_ncalls = ncalls;
395: (*ev->ev_callback)((int)ev->ev_fd, ev->ev_res, ev->ev_arg);
396: if (event_gotsig || base->event_break)
397: return;
398: }
399: }
400: }
401:
402: /*
403: * Wait continously for events. We exit only if no events are left.
404: */
405:
406: int
407: event_dispatch(void)
408: {
409: return (event_loop(0));
410: }
411:
412: int
413: event_base_dispatch(struct event_base *event_base)
414: {
415: return (event_base_loop(event_base, 0));
416: }
417:
418: const char *
419: event_base_get_method(struct event_base *base)
420: {
421: assert(base);
422: return (base->evsel->name);
423: }
424:
425: static void
426: event_loopexit_cb(int fd, short what, void *arg)
427: {
428: struct event_base *base = arg;
429: base->event_gotterm = 1;
430: }
431:
432: /* not thread safe */
433: int
434: event_loopexit(const struct timeval *tv)
435: {
436: return (event_once(-1, EV_TIMEOUT, event_loopexit_cb,
437: current_base, tv));
438: }
439:
440: int
441: event_base_loopexit(struct event_base *event_base, const struct timeval *tv)
442: {
443: return (event_base_once(event_base, -1, EV_TIMEOUT, event_loopexit_cb,
444: event_base, tv));
445: }
446:
447: /* not thread safe */
448: int
449: event_loopbreak(void)
450: {
451: return (event_base_loopbreak(current_base));
452: }
453:
454: int
455: event_base_loopbreak(struct event_base *event_base)
456: {
457: if (event_base == NULL)
458: return (-1);
459:
460: event_base->event_break = 1;
461: return (0);
462: }
463:
464:
465:
466: /* not thread safe */
467:
468: int
469: event_loop(int flags)
470: {
471: return event_base_loop(current_base, flags);
472: }
473:
474: int
475: event_base_loop(struct event_base *base, int flags)
476: {
477: const struct eventop *evsel = base->evsel;
478: void *evbase = base->evbase;
479: struct timeval tv;
480: struct timeval *tv_p;
481: int res, done;
482:
483: /* clear time cache */
484: base->tv_cache.tv_sec = 0;
485:
486: if (base->sig.ev_signal_added)
487: evsignal_base = base;
488: done = 0;
489: while (!done) {
490: /* Terminate the loop if we have been asked to */
491: if (base->event_gotterm) {
492: base->event_gotterm = 0;
493: break;
494: }
495:
496: if (base->event_break) {
497: base->event_break = 0;
498: break;
499: }
500:
501: /* You cannot use this interface for multi-threaded apps */
502: while (event_gotsig) {
503: event_gotsig = 0;
504: if (event_sigcb) {
505: res = (*event_sigcb)();
506: if (res == -1) {
507: errno = EINTR;
508: return (-1);
509: }
510: }
511: }
512:
513: timeout_correct(base, &tv);
514:
515: tv_p = &tv;
516: if (!base->event_count_active && !(flags & EVLOOP_NONBLOCK)) {
517: timeout_next(base, &tv_p);
518: } else {
519: /*
520: * if we have active events, we just poll new events
521: * without waiting.
522: */
523: evutil_timerclear(&tv);
524: }
525:
526: /* If we have no events, we just exit */
527: if (!event_haveevents(base)) {
528: event_debug(("%s: no events registered.", __func__));
529: return (1);
530: }
531:
532: /* update last old time */
533: gettime(base, &base->event_tv);
534:
535: /* clear time cache */
536: base->tv_cache.tv_sec = 0;
537:
538: res = evsel->dispatch(base, evbase, tv_p);
539:
540: if (res == -1)
541: return (-1);
542: gettime(base, &base->tv_cache);
543:
544: timeout_process(base);
545:
546: if (base->event_count_active) {
547: event_process_active(base);
548: if (!base->event_count_active && (flags & EVLOOP_ONCE))
549: done = 1;
550: } else if (flags & EVLOOP_NONBLOCK)
551: done = 1;
552: }
553:
554: /* clear time cache */
555: base->tv_cache.tv_sec = 0;
556:
557: event_debug(("%s: asked to terminate loop.", __func__));
558: return (0);
559: }
560:
561: /* Sets up an event for processing once */
562:
563: struct event_once {
564: struct event ev;
565:
566: void (*cb)(int, short, void *);
567: void *arg;
568: };
569:
570: /* One-time callback, it deletes itself */
571:
572: static void
573: event_once_cb(int fd, short events, void *arg)
574: {
575: struct event_once *eonce = arg;
576:
577: (*eonce->cb)(fd, events, eonce->arg);
578: free(eonce);
579: }
580:
581: /* not threadsafe, event scheduled once. */
582: int
583: event_once(int fd, short events,
584: void (*callback)(int, short, void *), void *arg, const struct timeval *tv)
585: {
586: return event_base_once(current_base, fd, events, callback, arg, tv);
587: }
588:
589: /* Schedules an event once */
590: int
591: event_base_once(struct event_base *base, int fd, short events,
592: void (*callback)(int, short, void *), void *arg, const struct timeval *tv)
593: {
594: struct event_once *eonce;
595: struct timeval etv;
596: int res;
597:
598: /* We cannot support signals that just fire once */
599: if (events & EV_SIGNAL)
600: return (-1);
601:
602: if ((eonce = calloc(1, sizeof(struct event_once))) == NULL)
603: return (-1);
604:
605: eonce->cb = callback;
606: eonce->arg = arg;
607:
608: if (events == EV_TIMEOUT) {
609: if (tv == NULL) {
610: evutil_timerclear(&etv);
611: tv = &etv;
612: }
613:
614: evtimer_set(&eonce->ev, event_once_cb, eonce);
615: } else if (events & (EV_READ|EV_WRITE)) {
616: events &= EV_READ|EV_WRITE;
617:
618: event_set(&eonce->ev, fd, events, event_once_cb, eonce);
619: } else {
620: /* Bad event combination */
621: free(eonce);
622: return (-1);
623: }
624:
625: res = event_base_set(base, &eonce->ev);
626: if (res == 0)
627: res = event_add(&eonce->ev, tv);
628: if (res != 0) {
629: free(eonce);
630: return (res);
631: }
632:
633: return (0);
634: }
635:
636: void
637: event_set(struct event *ev, int fd, short events,
638: void (*callback)(int, short, void *), void *arg)
639: {
640: /* Take the current base - caller needs to set the real base later */
641: ev->ev_base = current_base;
642:
643: ev->ev_callback = callback;
644: ev->ev_arg = arg;
645: ev->ev_fd = fd;
646: ev->ev_events = events;
647: ev->ev_res = 0;
648: ev->ev_flags = EVLIST_INIT;
649: ev->ev_ncalls = 0;
650: ev->ev_pncalls = NULL;
651:
652: min_heap_elem_init(ev);
653:
654: /* by default, we put new events into the middle priority */
655: if(current_base)
656: ev->ev_pri = current_base->nactivequeues/2;
657: }
658:
659: int
660: event_base_set(struct event_base *base, struct event *ev)
661: {
662: /* Only innocent events may be assigned to a different base */
663: if (ev->ev_flags != EVLIST_INIT)
664: return (-1);
665:
666: ev->ev_base = base;
667: ev->ev_pri = base->nactivequeues/2;
668:
669: return (0);
670: }
671:
672: /*
673: * Set's the priority of an event - if an event is already scheduled
674: * changing the priority is going to fail.
675: */
676:
677: int
678: event_priority_set(struct event *ev, int pri)
679: {
680: if (ev->ev_flags & EVLIST_ACTIVE)
681: return (-1);
682: if (pri < 0 || pri >= ev->ev_base->nactivequeues)
683: return (-1);
684:
685: ev->ev_pri = pri;
686:
687: return (0);
688: }
689:
690: /*
691: * Checks if a specific event is pending or scheduled.
692: */
693:
694: int
695: event_pending(struct event *ev, short event, struct timeval *tv)
696: {
697: struct timeval now, res;
698: int flags = 0;
699:
700: if (ev->ev_flags & EVLIST_INSERTED)
701: flags |= (ev->ev_events & (EV_READ|EV_WRITE|EV_SIGNAL));
702: if (ev->ev_flags & EVLIST_ACTIVE)
703: flags |= ev->ev_res;
704: if (ev->ev_flags & EVLIST_TIMEOUT)
705: flags |= EV_TIMEOUT;
706:
707: event &= (EV_TIMEOUT|EV_READ|EV_WRITE|EV_SIGNAL);
708:
709: /* See if there is a timeout that we should report */
710: if (tv != NULL && (flags & event & EV_TIMEOUT)) {
711: gettime(ev->ev_base, &now);
712: evutil_timersub(&ev->ev_timeout, &now, &res);
713: /* correctly remap to real time */
714: evutil_gettimeofday(&now, NULL);
715: evutil_timeradd(&now, &res, tv);
716: }
717:
718: return (flags & event);
719: }
720:
721: int
722: event_add(struct event *ev, const struct timeval *tv)
723: {
724: struct event_base *base = ev->ev_base;
725: const struct eventop *evsel = base->evsel;
726: void *evbase = base->evbase;
727: int res = 0;
728:
729: event_debug((
730: "event_add: event: %p, %s%s%scall %p",
731: ev,
732: ev->ev_events & EV_READ ? "EV_READ " : " ",
733: ev->ev_events & EV_WRITE ? "EV_WRITE " : " ",
734: tv ? "EV_TIMEOUT " : " ",
735: ev->ev_callback));
736:
737: assert(!(ev->ev_flags & ~EVLIST_ALL));
738:
739: /*
740: * prepare for timeout insertion further below, if we get a
741: * failure on any step, we should not change any state.
742: */
743: if (tv != NULL && !(ev->ev_flags & EVLIST_TIMEOUT)) {
744: if (min_heap_reserve(&base->timeheap,
745: 1 + min_heap_size(&base->timeheap)) == -1)
746: return (-1); /* ENOMEM == errno */
747: }
748:
749: if ((ev->ev_events & (EV_READ|EV_WRITE|EV_SIGNAL)) &&
750: !(ev->ev_flags & (EVLIST_INSERTED|EVLIST_ACTIVE))) {
751: res = evsel->add(evbase, ev);
752: if (res != -1)
753: event_queue_insert(base, ev, EVLIST_INSERTED);
754: }
755:
756: /*
757: * we should change the timout state only if the previous event
758: * addition succeeded.
759: */
760: if (res != -1 && tv != NULL) {
761: struct timeval now;
762:
763: /*
764: * we already reserved memory above for the case where we
765: * are not replacing an exisiting timeout.
766: */
767: if (ev->ev_flags & EVLIST_TIMEOUT)
768: event_queue_remove(base, ev, EVLIST_TIMEOUT);
769:
770: /* Check if it is active due to a timeout. Rescheduling
771: * this timeout before the callback can be executed
772: * removes it from the active list. */
773: if ((ev->ev_flags & EVLIST_ACTIVE) &&
774: (ev->ev_res & EV_TIMEOUT)) {
775: /* See if we are just active executing this
776: * event in a loop
777: */
778: if (ev->ev_ncalls && ev->ev_pncalls) {
779: /* Abort loop */
780: *ev->ev_pncalls = 0;
781: }
782:
783: event_queue_remove(base, ev, EVLIST_ACTIVE);
784: }
785:
786: gettime(base, &now);
787: evutil_timeradd(&now, tv, &ev->ev_timeout);
788:
789: event_debug((
790: "event_add: timeout in %ld seconds, call %p",
791: tv->tv_sec, ev->ev_callback));
792:
793: event_queue_insert(base, ev, EVLIST_TIMEOUT);
794: }
795:
796: return (res);
797: }
798:
799: int
800: event_del(struct event *ev)
801: {
802: struct event_base *base;
803: const struct eventop *evsel;
804: void *evbase;
805:
806: event_debug(("event_del: %p, callback %p",
807: ev, ev->ev_callback));
808:
809: /* An event without a base has not been added */
810: if (ev->ev_base == NULL)
811: return (-1);
812:
813: base = ev->ev_base;
814: evsel = base->evsel;
815: evbase = base->evbase;
816:
817: assert(!(ev->ev_flags & ~EVLIST_ALL));
818:
819: /* See if we are just active executing this event in a loop */
820: if (ev->ev_ncalls && ev->ev_pncalls) {
821: /* Abort loop */
822: *ev->ev_pncalls = 0;
823: }
824:
825: if (ev->ev_flags & EVLIST_TIMEOUT)
826: event_queue_remove(base, ev, EVLIST_TIMEOUT);
827:
828: if (ev->ev_flags & EVLIST_ACTIVE)
829: event_queue_remove(base, ev, EVLIST_ACTIVE);
830:
831: if (ev->ev_flags & EVLIST_INSERTED) {
832: event_queue_remove(base, ev, EVLIST_INSERTED);
833: return (evsel->del(evbase, ev));
834: }
835:
836: return (0);
837: }
838:
839: void
840: event_active(struct event *ev, int res, short ncalls)
841: {
842: /* We get different kinds of events, add them together */
843: if (ev->ev_flags & EVLIST_ACTIVE) {
844: ev->ev_res |= res;
845: return;
846: }
847:
848: ev->ev_res = res;
849: ev->ev_ncalls = ncalls;
850: ev->ev_pncalls = NULL;
851: event_queue_insert(ev->ev_base, ev, EVLIST_ACTIVE);
852: }
853:
854: static int
855: timeout_next(struct event_base *base, struct timeval **tv_p)
856: {
857: struct timeval now;
858: struct event *ev;
859: struct timeval *tv = *tv_p;
860:
861: if ((ev = min_heap_top(&base->timeheap)) == NULL) {
862: /* if no time-based events are active wait for I/O */
863: *tv_p = NULL;
864: return (0);
865: }
866:
867: if (gettime(base, &now) == -1)
868: return (-1);
869:
870: if (evutil_timercmp(&ev->ev_timeout, &now, <=)) {
871: evutil_timerclear(tv);
872: return (0);
873: }
874:
875: evutil_timersub(&ev->ev_timeout, &now, tv);
876:
877: assert(tv->tv_sec >= 0);
878: assert(tv->tv_usec >= 0);
879:
880: event_debug(("timeout_next: in %ld seconds", tv->tv_sec));
881: return (0);
882: }
883:
884: /*
885: * Determines if the time is running backwards by comparing the current
886: * time against the last time we checked. Not needed when using clock
887: * monotonic.
888: */
889:
890: static void
891: timeout_correct(struct event_base *base, struct timeval *tv)
892: {
893: struct event **pev;
894: unsigned int size;
895: struct timeval off;
896:
897: if (use_monotonic)
898: return;
899:
900: /* Check if time is running backwards */
901: gettime(base, tv);
902: if (evutil_timercmp(tv, &base->event_tv, >=)) {
903: base->event_tv = *tv;
904: return;
905: }
906:
907: event_debug(("%s: time is running backwards, corrected",
908: __func__));
909: evutil_timersub(&base->event_tv, tv, &off);
910:
911: /*
912: * We can modify the key element of the node without destroying
913: * the key, beause we apply it to all in the right order.
914: */
915: pev = base->timeheap.p;
916: size = base->timeheap.n;
917: for (; size-- > 0; ++pev) {
918: struct timeval *ev_tv = &(**pev).ev_timeout;
919: evutil_timersub(ev_tv, &off, ev_tv);
920: }
921: /* Now remember what the new time turned out to be. */
922: base->event_tv = *tv;
923: }
924:
925: void
926: timeout_process(struct event_base *base)
927: {
928: struct timeval now;
929: struct event *ev;
930:
931: if (min_heap_empty(&base->timeheap))
932: return;
933:
934: gettime(base, &now);
935:
936: while ((ev = min_heap_top(&base->timeheap))) {
937: if (evutil_timercmp(&ev->ev_timeout, &now, >))
938: break;
939:
940: /* delete this event from the I/O queues */
941: event_del(ev);
942:
943: event_debug(("timeout_process: call %p",
944: ev->ev_callback));
945: event_active(ev, EV_TIMEOUT, 1);
946: }
947: }
948:
949: void
950: event_queue_remove(struct event_base *base, struct event *ev, int queue)
951: {
952: if (!(ev->ev_flags & queue))
953: event_errx(1, "%s: %p(fd %d) not on queue %x", __func__,
954: ev, ev->ev_fd, queue);
955:
956: if (~ev->ev_flags & EVLIST_INTERNAL)
957: base->event_count--;
958:
959: ev->ev_flags &= ~queue;
960: switch (queue) {
961: case EVLIST_INSERTED:
962: TAILQ_REMOVE(&base->eventqueue, ev, ev_next);
963: break;
964: case EVLIST_ACTIVE:
965: base->event_count_active--;
966: TAILQ_REMOVE(base->activequeues[ev->ev_pri],
967: ev, ev_active_next);
968: break;
969: case EVLIST_TIMEOUT:
970: min_heap_erase(&base->timeheap, ev);
971: break;
972: default:
973: event_errx(1, "%s: unknown queue %x", __func__, queue);
974: }
975: }
976:
977: void
978: event_queue_insert(struct event_base *base, struct event *ev, int queue)
979: {
980: if (ev->ev_flags & queue) {
981: /* Double insertion is possible for active events */
982: if (queue & EVLIST_ACTIVE)
983: return;
984:
985: event_errx(1, "%s: %p(fd %d) already on queue %x", __func__,
986: ev, ev->ev_fd, queue);
987: }
988:
989: if (~ev->ev_flags & EVLIST_INTERNAL)
990: base->event_count++;
991:
992: ev->ev_flags |= queue;
993: switch (queue) {
994: case EVLIST_INSERTED:
995: TAILQ_INSERT_TAIL(&base->eventqueue, ev, ev_next);
996: break;
997: case EVLIST_ACTIVE:
998: base->event_count_active++;
999: TAILQ_INSERT_TAIL(base->activequeues[ev->ev_pri],
1000: ev,ev_active_next);
1001: break;
1002: case EVLIST_TIMEOUT: {
1003: min_heap_push(&base->timeheap, ev);
1004: break;
1005: }
1006: default:
1007: event_errx(1, "%s: unknown queue %x", __func__, queue);
1008: }
1009: }
1010:
1011: /* Functions for debugging */
1012:
1013: const char *
1014: event_get_version(void)
1015: {
1016: return (VERSION);
1017: }
1018:
1019: /*
1020: * No thread-safe interface needed - the information should be the same
1021: * for all threads.
1022: */
1023:
1024: const char *
1025: event_get_method(void)
1026: {
1027: return (current_base->evsel->name);
1028: }
FreeBSD-CVSweb <freebsd-cvsweb@FreeBSD.org>