2 * Amanda, The Advanced Maryland Automatic Network Disk Archiver
3 * Copyright (c) 1999 University of Maryland at College Park
6 * Permission to use, copy, modify, distribute, and sell this software and its
7 * documentation for any purpose is hereby granted without fee, provided that
8 * the above copyright notice appear in all copies and that both that
9 * copyright notice and this permission notice appear in supporting
10 * documentation, and that the name of U.M. not be used in advertising or
11 * publicity pertaining to distribution of the software without specific,
12 * written prior permission. U.M. makes no representations about the
13 * suitability of this software for any purpose. It is provided "as is"
14 * without express or implied warranty.
16 * U.M. DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING ALL
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO EVENT SHALL U.M.
18 * BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
19 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
20 * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
21 * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
23 * Authors: the Amanda Development Team. Its members are listed in a
24 * file named AUTHORS, in the root directory of this distribution.
27 * $Id: event.c,v 1.24 2006/06/16 10:55:05 martinea Exp $
29 * Event handler. Serializes different kinds of events to allow for
30 * a uniform interface, central state storage, and centralized
31 * interdependency logic.
39 #define event_debug(i, ...) do { \
40 if ((i) <= debug_event) { \
41 dbprintf(__VA_ARGS__); \
46 * The opaque handle passed back to the caller. This is typedefed to
47 * event_handle_t in our header file.
50 event_fn_t fn; /* function to call when this fires */
51 void *arg; /* argument to pass to previous function */
52 event_type_t type; /* type of event */
53 event_id_t data; /* type data */
54 time_t lastfired; /* timestamp of last fired (EV_TIME only) */
55 LIST_ENTRY(event_handle) le; /* queue handle */
59 * eventq is a queue of currently active events.
60 * cache is a queue of unused handles. We keep a few around to avoid
61 * malloc overhead when doing a lot of register/releases.
64 LIST_HEAD(, event_handle) listhead;
67 LIST_HEAD_INITIALIZER(eventq.listhead), 0
69 LIST_HEAD_INITIALIZER(eventq.listhead), 0
71 #define eventq_first(q) LIST_FIRST(&q.listhead)
72 #define eventq_next(eh) LIST_NEXT(eh, le)
73 #define eventq_add(q, eh) LIST_INSERT_HEAD(&q.listhead, eh, le);
74 #define eventq_remove(eh) LIST_REMOVE(eh, le);
77 * How many items we can have in the handle cache before we start
83 * A table of currently set signal handlers.
85 static struct sigtabent {
86 event_handle_t *handle; /* handle for this signal */
87 int score; /* number of signals recvd since last checked */
88 void (*oldhandler)(int);/* old handler (for unsetting) */
91 static const char *event_type2str(event_type_t);
92 #define fire(eh) (*(eh)->fn)((eh)->arg)
93 static void signal_handler(int);
94 static event_handle_t *gethandle(void);
95 static void puthandle(event_handle_t *);
96 static int event_loop_wait (event_handle_t *, const int);
99 * Add a new event. See the comment in event.h for what the arguments
109 event_handle_t *handle;
111 if ((type == EV_READFD) || (type == EV_WRITEFD)) {
112 /* make sure we aren't given a high fd that will overflow a fd_set */
113 if (data >= (int)FD_SETSIZE) {
114 error(_("event_register: Invalid file descriptor %lu"), data);
117 #if !defined(__lint) /* Global checking knows that these are never called */
118 } else if (type == EV_SIG) {
119 /* make sure signals are within range */
121 error(_("event_register: Invalid signal %lu"), data);
124 if (sigtable[data].handle != NULL) {
125 error(_("event_register: signal %lu already registered"), data);
128 } else if (type >= EV_DEAD) {
129 error(_("event_register: Invalid event type %d"), type);
134 handle = gethandle();
139 handle->lastfired = -1;
140 eventq_add(eventq, handle);
143 event_debug(1, _("event: register: %p->data=%lu, type=%s\n"),
144 handle, handle->data, event_type2str(handle->type));
149 * Mark an event to be released. Because we may be traversing the queue
150 * when this is called, we must wait until later to actually remove
155 event_handle_t *handle)
158 assert(handle != NULL);
160 event_debug(1, _("event: release (mark): %p data=%lu, type=%s\n"),
161 handle, handle->data,
162 event_type2str(handle->type));
163 assert(handle->type != EV_DEAD);
166 * For signal events, we need to specially remove then from the
167 * signal event table.
169 if (handle->type == EV_SIG) {
170 struct sigtabent *se = &sigtable[handle->data];
172 assert(se->handle == handle);
173 signal((int)handle->data, se->oldhandler);
179 * Decrement the qlength now since this is no longer a real
185 * Mark it as dead and leave it for the loop to remove.
187 handle->type = EV_DEAD;
191 * Fire all EV_WAIT events waiting on the specified id.
200 event_debug(1, _("event: wakeup: enter (%lu)\n"), id);
202 for (eh = eventq_first(eventq); eh != NULL; eh = eventq_next(eh)) {
204 if (eh->type == EV_WAIT && eh->data == id) {
205 event_debug(1, _("event: wakeup: %p id=%lu\n"), eh, id);
215 * The event loop. We need to be specially careful here with adds and
216 * deletes. Since adds and deletes will often happen while this is running,
217 * we need to make sure we don't end up referencing a dead event handle.
223 event_loop_wait((event_handle_t *)NULL, dontblock);
232 return event_loop_wait(eh, 0);
236 * The event loop. We need to be specially careful here with adds and
237 * deletes. Since adds and deletes will often happen while this is running,
238 * we need to make sure we don't end up referencing a dead event handle.
242 event_handle_t *wait_eh,
246 static int entry = 0;
248 SELECT_ARG_TYPE readfds, writefds, errfds, werrfds;
249 struct timeval timeout, *tvptr;
250 int ntries, maxfd, rc;
253 event_handle_t *eh, *nexteh;
254 struct sigtabent *se;
255 int event_wait_fired = 0;
258 event_debug(1, _("event: loop: enter: dontblock=%d, qlength=%d, eh=%p\n"),
259 dontblock, eventq.qlength, wait_eh);
262 * If we have no events, we have nothing to do
264 if (eventq.qlength == 0)
268 * We must not be entered twice
270 assert(++entry == 1);
275 * Save a copy of the current time once, to reduce syscall load
278 curtime = time(NULL);
281 if (debug_event >= 1) {
282 event_debug(1, _("event: loop: dontblock=%d, qlength=%d eh=%p\n"),
283 dontblock, eventq.qlength, wait_eh);
284 for (eh = eventq_first(eventq); eh != NULL; eh = eventq_next(eh)) {
285 event_debug(1, _("%p): %s data=%lu fn=%p arg=%p\n"),
286 eh, event_type2str(eh->type), eh->data, eh->fn,
291 * Set ourselves up with no timeout initially.
297 * If we can block, initially set the tvptr to NULL. If
298 * we come across timeout events in the loop below, they
299 * will set it to an appropriate buffer. If we don't
300 * see any timeout events, then tvptr will remain NULL
301 * and the select will properly block indefinately.
303 * If we can't block, set it to point to the timeout buf above.
311 * Rebuild the select bitmasks each time.
318 see_event = (wait_eh == (event_handle_t *)NULL);
320 * Run through each event handle and setup the events.
321 * We save our next pointer early in case we GC some dead
324 for (eh = eventq_first(eventq); eh != NULL; eh = nexteh) {
325 nexteh = eventq_next(eh);
330 * Read fds just get set into the select bitmask
333 FD_SET((int)eh->data, &readfds);
334 FD_SET((int)eh->data, &errfds);
335 maxfd = max(maxfd, (int)eh->data);
336 see_event |= (eh == wait_eh);
340 * Likewise with write fds
343 FD_SET((int)eh->data, &writefds);
344 FD_SET((int)eh->data, &errfds);
345 maxfd = max(maxfd, (int)eh->data);
346 see_event |= (eh == wait_eh);
350 * Only set signals that aren't already set to avoid unnecessary
354 se = &sigtable[eh->data];
355 see_event |= (eh == wait_eh);
357 if (se->handle == eh)
360 /* no previous handle */
361 assert(se->handle == NULL);
365 se->oldhandler = signal((int)eh->data, signal_handler);
370 * Compute the timeout for this select
373 /* if we're not supposed to block, then leave it at 0 */
377 if (eh->lastfired == -1)
378 eh->lastfired = curtime;
380 interval = (long)(eh->data - (curtime - eh->lastfired));
385 timeout.tv_sec = min(timeout.tv_sec, interval);
387 /* this is the first timeout */
389 timeout.tv_sec = interval;
391 see_event |= (eh == wait_eh);
395 * Wait events are processed immediately by event_wakeup()
398 see_event |= (eh == wait_eh);
416 assert(--entry == 0);
424 _("event: select: dontblock=%d, maxfd=%d, timeout=%ld\n"),
426 tvptr != NULL ? timeout.tv_sec : -1);
427 rc = select(maxfd + 1, &readfds, &writefds, &errfds, tvptr);
428 event_debug(1, _("event: select returns %d\n"), rc);
431 * Select errors can mean many things. Interrupted events should
432 * not be fatal, since they could be delivered signals which still
433 * need to have their events fired.
436 if (errno != EINTR) {
438 error(_("select failed: %s"), strerror(errno));
443 /* proceed if errno == EINTR, we may have caught a signal */
445 /* contents cannot be trusted */
452 * Grab the current time again for use in timed events.
454 curtime = time(NULL);
457 * We need to copy the errfds into werrfds, so file descriptors
458 * that are being polled for both reading and writing have
459 * both of their poll events 'see' the error.
461 memcpy(&werrfds, &errfds, SIZEOF(werrfds));
464 * Now run through the events and fire the ones that are ready.
465 * Don't handle file descriptor events if the select failed.
467 for (eh = eventq_first(eventq); eh != NULL; eh = eventq_next(eh)) {
472 * Read fds: just fire the event if set in the bitmask
475 if (FD_ISSET((int)eh->data, &readfds) ||
476 FD_ISSET((int)eh->data, &errfds)) {
477 FD_CLR((int)eh->data, &readfds);
478 FD_CLR((int)eh->data, &errfds);
480 if(eh == wait_eh) event_wait_fired = 1;
485 * Write fds: same as Read fds
488 if (FD_ISSET((int)eh->data, &writefds) ||
489 FD_ISSET((int)eh->data, &werrfds)) {
490 FD_CLR((int)eh->data, &writefds);
491 FD_CLR((int)eh->data, &werrfds);
493 if(eh == wait_eh) event_wait_fired = 1;
498 * Signal events: check the score for fires, and run the
499 * event if we got one.
502 se = &sigtable[eh->data];
504 assert(se->handle == eh);
507 if(eh == wait_eh) event_wait_fired = 1;
512 * Timed events: check the interval elapsed since last fired,
513 * and set it off if greater or equal to requested interval.
516 if (eh->lastfired == -1)
517 eh->lastfired = curtime;
518 if ((curtime - eh->lastfired) >= (time_t)eh->data) {
519 eh->lastfired = curtime;
521 if(eh == wait_eh) event_wait_fired = 1;
526 * Wait events are handled immediately by event_wakeup()
527 * Dead events are handled by the pre-select loop.
538 } while (!dontblock && eventq.qlength > 0 && event_wait_fired == 0);
540 assert(--entry == 0);
542 return (event_wait_fired == 1);
546 * Generic signal handler. Used to count caught signals for the event
554 assert((signo >= 0) && ((size_t)signo < (size_t)(sizeof(sigtable) / sizeof(sigtable[0]))));
555 sigtable[signo].score++;
559 * Return a new handle. Take from the handle cache if not empty. Otherwise,
562 static event_handle_t *
567 if ((eh = eventq_first(cache)) != NULL) {
568 assert(cache.qlength > 0);
573 assert(cache.qlength == 0);
574 return (alloc(SIZEOF(*eh)));
578 * Free a handle. If there's space in the handle cache, put it there.
579 * Otherwise, free it.
586 if (cache.qlength > CACHEDEPTH) {
590 eventq_add(cache, eh);
595 * Convert an event type into a string
601 static const struct {
605 #define X(s) { s, stringize(s) }
616 for (i = 0; i < (size_t)(sizeof(event_types) / sizeof(event_types[0])); i++)
617 if (type == event_types[i].type)
618 return (event_types[i].name);
619 return (_("BOGUS EVENT TYPE"));