2 * Amanda, The Advanced Maryland Automatic Network Disk Archiver
3 * Copyright (c) 1999 University of Maryland at College Park
6 * Permission to use, copy, modify, distribute, and sell this software and its
7 * documentation for any purpose is hereby granted without fee, provided that
8 * the above copyright notice appear in all copies and that both that
9 * copyright notice and this permission notice appear in supporting
10 * documentation, and that the name of U.M. not be used in advertising or
11 * publicity pertaining to distribution of the software without specific,
12 * written prior permission. U.M. makes no representations about the
13 * suitability of this software for any purpose. It is provided "as is"
14 * without express or implied warranty.
16 * U.M. DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING ALL
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO EVENT SHALL U.M.
18 * BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
19 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
20 * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
21 * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
23 * Authors: the Amanda Development Team. Its members are listed in a
24 * file named AUTHORS, in the root directory of this distribution.
27 * $Id: event.c,v 1.24 2006/06/16 10:55:05 martinea Exp $
29 * Event handler. Serializes different kinds of events to allow for
30 * a uniform interface, central state storage, and centralized
31 * interdependency logic.
39 #define event_debug(i,x) do { \
40 if ((i) <= debug_event) { \
46 * The opaque handle passed back to the caller. This is typedefed to
47 * event_handle_t in our header file.
50 event_fn_t fn; /* function to call when this fires */
51 void *arg; /* argument to pass to previous function */
52 event_type_t type; /* type of event */
53 event_id_t data; /* type data */
54 time_t lastfired; /* timestamp of last fired (EV_TIME only) */
55 LIST_ENTRY(event_handle) le; /* queue handle */
59 * eventq is a queue of currently active events.
60 * cache is a queue of unused handles. We keep a few around to avoid
61 * malloc overhead when doing a lot of register/releases.
64 LIST_HEAD(, event_handle) listhead;
67 LIST_HEAD_INITIALIZER(eventq.listhead), 0
69 LIST_HEAD_INITIALIZER(eventq.listhead), 0
71 #define eventq_first(q) LIST_FIRST(&q.listhead)
72 #define eventq_next(eh) LIST_NEXT(eh, le)
73 #define eventq_add(q, eh) LIST_INSERT_HEAD(&q.listhead, eh, le);
74 #define eventq_remove(eh) LIST_REMOVE(eh, le);
77 * How many items we can have in the handle cache before we start
83 * A table of currently set signal handlers.
85 static struct sigtabent {
86 event_handle_t *handle; /* handle for this signal */
87 int score; /* number of signals recvd since last checked */
88 void (*oldhandler)(int);/* old handler (for unsetting) */
91 static const char *event_type2str(event_type_t);
92 #define fire(eh) (*(eh)->fn)((eh)->arg)
93 static void signal_handler(int);
94 static event_handle_t *gethandle(void);
95 static void puthandle(event_handle_t *);
96 static int event_loop_wait (event_handle_t *, const int);
99 * Add a new event. See the comment in event.h for what the arguments
109 event_handle_t *handle;
111 if ((type == EV_READFD) || (type == EV_WRITEFD)) {
112 /* make sure we aren't given a high fd that will overflow a fd_set */
113 if (data >= FD_SETSIZE) {
114 error("event_register: Invalid file descriptor %lu", data);
117 #if !defined(__lint) /* Global checking knows that these are never called */
118 } else if (type == EV_SIG) {
119 /* make sure signals are within range */
121 error("event_register: Invalid signal %lu", data);
124 if (sigtable[data].handle != NULL) {
125 error("event_register: signal %lu already registered", data);
128 } else if (type >= EV_DEAD) {
129 error("event_register: Invalid event type %d", type);
134 handle = gethandle();
139 handle->lastfired = -1;
140 eventq_add(eventq, handle);
143 event_debug(1, ("%s: event: register: %p->data=%lu, type=%s\n",
144 debug_prefix_time(NULL), handle, handle->data,
145 event_type2str(handle->type)));
150 * Mark an event to be released. Because we may be traversing the queue
151 * when this is called, we must wait until later to actually remove
156 event_handle_t *handle)
159 assert(handle != NULL);
161 event_debug(1, ("%s: event: release (mark): %p data=%lu, type=%s\n",
162 debug_prefix_time(NULL), handle, handle->data,
163 event_type2str(handle->type)));
164 assert(handle->type != EV_DEAD);
167 * For signal events, we need to specially remove then from the
168 * signal event table.
170 if (handle->type == EV_SIG) {
171 struct sigtabent *se = &sigtable[handle->data];
173 assert(se->handle == handle);
174 signal((int)handle->data, se->oldhandler);
180 * Decrement the qlength now since this is no longer a real
186 * Mark it as dead and leave it for the loop to remove.
188 handle->type = EV_DEAD;
192 * Fire all EV_WAIT events waiting on the specified id.
201 event_debug(1, ("%s: event: wakeup: enter (%lu)\n",
202 debug_prefix_time(NULL), id));
204 for (eh = eventq_first(eventq); eh != NULL; eh = eventq_next(eh)) {
206 if (eh->type == EV_WAIT && eh->data == id) {
207 event_debug(1, ("%s: event: wakeup: %p id=%lu\n",
208 debug_prefix_time(NULL), eh, id));
218 * The event loop. We need to be specially careful here with adds and
219 * deletes. Since adds and deletes will often happen while this is running,
220 * we need to make sure we don't end up referencing a dead event handle.
226 event_loop_wait((event_handle_t *)NULL, dontblock);
235 return event_loop_wait(eh, 0);
239 * The event loop. We need to be specially careful here with adds and
240 * deletes. Since adds and deletes will often happen while this is running,
241 * we need to make sure we don't end up referencing a dead event handle.
245 event_handle_t *wait_eh,
249 static int entry = 0;
251 SELECT_ARG_TYPE readfds, writefds, errfds, werrfds;
252 struct timeval timeout, *tvptr;
253 int ntries, maxfd, rc;
256 event_handle_t *eh, *nexteh;
257 struct sigtabent *se;
258 int event_wait_fired = 0;
261 event_debug(1, ("%s: event: loop: enter: dontblock=%d, qlength=%d, eh=%p\n",
262 debug_prefix_time(NULL),
263 dontblock, eventq.qlength, wait_eh));
266 * If we have no events, we have nothing to do
268 if (eventq.qlength == 0)
272 * We must not be entered twice
274 assert(++entry == 1);
279 * Save a copy of the current time once, to reduce syscall load
282 curtime = time(NULL);
285 if (debug_event >= 1) {
286 event_debug(1, ("%s: event: loop: dontblock=%d, qlength=%d eh=%p\n",
287 debug_prefix_time(NULL), dontblock, eventq.qlength,
289 for (eh = eventq_first(eventq); eh != NULL; eh = eventq_next(eh)) {
290 event_debug(1, ("%s: %p): %s data=%lu fn=%p arg=%p\n",
291 debug_prefix_time(NULL), eh,
292 event_type2str(eh->type), eh->data, eh->fn,
297 * Set ourselves up with no timeout initially.
303 * If we can block, initially set the tvptr to NULL. If
304 * we come across timeout events in the loop below, they
305 * will set it to an appropriate buffer. If we don't
306 * see any timeout events, then tvptr will remain NULL
307 * and the select will properly block indefinately.
309 * If we can't block, set it to point to the timeout buf above.
317 * Rebuild the select bitmasks each time.
324 see_event = (wait_eh == (event_handle_t *)NULL);
326 * Run through each event handle and setup the events.
327 * We save our next pointer early in case we GC some dead
330 for (eh = eventq_first(eventq); eh != NULL; eh = nexteh) {
331 nexteh = eventq_next(eh);
336 * Read fds just get set into the select bitmask
339 FD_SET((int)eh->data, &readfds);
340 FD_SET((int)eh->data, &errfds);
341 maxfd = max(maxfd, (int)eh->data);
342 see_event |= (eh == wait_eh);
346 * Likewise with write fds
349 FD_SET((int)eh->data, &writefds);
350 FD_SET((int)eh->data, &errfds);
351 maxfd = max(maxfd, (int)eh->data);
352 see_event |= (eh == wait_eh);
356 * Only set signals that aren't already set to avoid unnecessary
360 se = &sigtable[eh->data];
361 see_event |= (eh == wait_eh);
363 if (se->handle == eh)
366 /* no previous handle */
367 assert(se->handle == NULL);
371 se->oldhandler = signal((int)eh->data, signal_handler);
376 * Compute the timeout for this select
379 /* if we're not supposed to block, then leave it at 0 */
383 if (eh->lastfired == -1)
384 eh->lastfired = curtime;
386 interval = (long)(eh->data - (curtime - eh->lastfired));
391 timeout.tv_sec = min(timeout.tv_sec, interval);
393 /* this is the first timeout */
395 timeout.tv_sec = interval;
397 see_event |= (eh == wait_eh);
401 * Wait events are processed immediately by event_wakeup()
404 see_event |= (eh == wait_eh);
422 assert(--entry == 0);
430 ("%s: event: select: dontblock=%d, maxfd=%d, timeout=%ld\n",
431 debug_prefix_time(NULL), dontblock, maxfd,
432 tvptr != NULL ? timeout.tv_sec : -1));
433 rc = select(maxfd + 1, &readfds, &writefds, &errfds, tvptr);
434 event_debug(1, ("%s: event: select returns %d\n",
435 debug_prefix_time(NULL), rc));
438 * Select errors can mean many things. Interrupted events should
439 * not be fatal, since they could be delivered signals which still
440 * need to have their events fired.
443 if (errno != EINTR) {
445 error("select failed: %s", strerror(errno));
450 /* proceed if errno == EINTR, we may have caught a signal */
452 /* contents cannot be trusted */
459 * Grab the current time again for use in timed events.
461 curtime = time(NULL);
464 * We need to copy the errfds into werrfds, so file descriptors
465 * that are being polled for both reading and writing have
466 * both of their poll events 'see' the error.
468 memcpy(&werrfds, &errfds, SIZEOF(werrfds));
471 * Now run through the events and fire the ones that are ready.
472 * Don't handle file descriptor events if the select failed.
474 for (eh = eventq_first(eventq); eh != NULL; eh = eventq_next(eh)) {
479 * Read fds: just fire the event if set in the bitmask
482 if (FD_ISSET((int)eh->data, &readfds) ||
483 FD_ISSET((int)eh->data, &errfds)) {
484 FD_CLR((int)eh->data, &readfds);
485 FD_CLR((int)eh->data, &errfds);
487 if(eh == wait_eh) event_wait_fired = 1;
492 * Write fds: same as Read fds
495 if (FD_ISSET((int)eh->data, &writefds) ||
496 FD_ISSET((int)eh->data, &werrfds)) {
497 FD_CLR((int)eh->data, &writefds);
498 FD_CLR((int)eh->data, &werrfds);
500 if(eh == wait_eh) event_wait_fired = 1;
505 * Signal events: check the score for fires, and run the
506 * event if we got one.
509 se = &sigtable[eh->data];
511 assert(se->handle == eh);
514 if(eh == wait_eh) event_wait_fired = 1;
519 * Timed events: check the interval elapsed since last fired,
520 * and set it off if greater or equal to requested interval.
523 if (eh->lastfired == -1)
524 eh->lastfired = curtime;
525 if ((curtime - eh->lastfired) >= (time_t)eh->data) {
526 eh->lastfired = curtime;
528 if(eh == wait_eh) event_wait_fired = 1;
533 * Wait events are handled immediately by event_wakeup()
534 * Dead events are handled by the pre-select loop.
545 } while (!dontblock && eventq.qlength > 0 && event_wait_fired == 0);
547 assert(--entry == 0);
549 return (event_wait_fired == 1);
553 * Generic signal handler. Used to count caught signals for the event
561 assert((signo >= 0) && ((size_t)signo < (size_t)(sizeof(sigtable) / sizeof(sigtable[0]))));
562 sigtable[signo].score++;
566 * Return a new handle. Take from the handle cache if not empty. Otherwise,
569 static event_handle_t *
574 if ((eh = eventq_first(cache)) != NULL) {
575 assert(cache.qlength > 0);
580 assert(cache.qlength == 0);
581 return (alloc(SIZEOF(*eh)));
585 * Free a handle. If there's space in the handle cache, put it there.
586 * Otherwise, free it.
593 if (cache.qlength > CACHEDEPTH) {
597 eventq_add(cache, eh);
602 * Convert an event type into a string
608 static const struct {
612 #define X(s) { s, stringize(s) }
623 for (i = 0; i < (size_t)(sizeof(event_types) / sizeof(event_types[0])); i++)
624 if (type == event_types[i].type)
625 return (event_types[i].name);
626 return ("BOGUS EVENT TYPE");