1 |
|
|
/* $OpenBSD: event.c,v 1.38 2015/01/06 23:11:23 bluhm Exp $ */ |
2 |
|
|
|
3 |
|
|
/* |
4 |
|
|
* Copyright (c) 2000-2004 Niels Provos <provos@citi.umich.edu> |
5 |
|
|
* All rights reserved. |
6 |
|
|
* |
7 |
|
|
* Redistribution and use in source and binary forms, with or without |
8 |
|
|
* modification, are permitted provided that the following conditions |
9 |
|
|
* are met: |
10 |
|
|
* 1. Redistributions of source code must retain the above copyright |
11 |
|
|
* notice, this list of conditions and the following disclaimer. |
12 |
|
|
* 2. Redistributions in binary form must reproduce the above copyright |
13 |
|
|
* notice, this list of conditions and the following disclaimer in the |
14 |
|
|
* documentation and/or other materials provided with the distribution. |
15 |
|
|
* 3. The name of the author may not be used to endorse or promote products |
16 |
|
|
* derived from this software without specific prior written permission. |
17 |
|
|
* |
18 |
|
|
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
19 |
|
|
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
20 |
|
|
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
21 |
|
|
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
22 |
|
|
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
23 |
|
|
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
24 |
|
|
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
25 |
|
|
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
26 |
|
|
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
27 |
|
|
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
28 |
|
|
*/ |
29 |
|
|
|
30 |
|
|
#include <sys/types.h> |
31 |
|
|
#include <sys/socket.h> |
32 |
|
|
#include <sys/time.h> |
33 |
|
|
#include <sys/queue.h> |
34 |
|
|
|
35 |
|
|
#include <stdio.h> |
36 |
|
|
#include <stdlib.h> |
37 |
|
|
#include <unistd.h> |
38 |
|
|
#include <errno.h> |
39 |
|
|
#include <signal.h> |
40 |
|
|
#include <string.h> |
41 |
|
|
#include <assert.h> |
42 |
|
|
#include <time.h> |
43 |
|
|
#include <netdb.h> |
44 |
|
|
#include <asr.h> |
45 |
|
|
|
46 |
|
|
#include "event.h" |
47 |
|
|
#include "event-internal.h" |
48 |
|
|
#include "log.h" |
49 |
|
|
|
50 |
|
|
extern const struct eventop selectops; |
51 |
|
|
extern const struct eventop pollops; |
52 |
|
|
extern const struct eventop kqops; |
53 |
|
|
|
54 |
|
|
/* In order of preference */ |
55 |
|
|
static const struct eventop *eventops[] = { |
56 |
|
|
&kqops, |
57 |
|
|
&pollops, |
58 |
|
|
&selectops, |
59 |
|
|
NULL |
60 |
|
|
}; |
61 |
|
|
|
62 |
|
|
/* Global state */ |
63 |
|
|
struct event_base *current_base = NULL; |
64 |
|
|
extern struct event_base *evsignal_base; |
65 |
|
|
static int use_monotonic; |
66 |
|
|
|
67 |
|
|
/* Handle signals - This is a deprecated interface */ |
68 |
|
|
int (*event_sigcb)(void); /* Signal callback when gotsig is set */ |
69 |
|
|
volatile sig_atomic_t event_gotsig; /* Set in signal handler */ |
70 |
|
|
|
71 |
|
|
/* Prototypes */ |
72 |
|
|
static void event_queue_insert(struct event_base *, struct event *, int); |
73 |
|
|
static void event_queue_remove(struct event_base *, struct event *, int); |
74 |
|
|
static int event_haveevents(struct event_base *); |
75 |
|
|
|
76 |
|
|
static void event_process_active(struct event_base *); |
77 |
|
|
|
78 |
|
|
static int timeout_next(struct event_base *, struct timeval **); |
79 |
|
|
static void timeout_process(struct event_base *); |
80 |
|
|
static void timeout_correct(struct event_base *, struct timeval *); |
81 |
|
|
|
82 |
|
|
static void |
83 |
|
|
detect_monotonic(void) |
84 |
|
|
{ |
85 |
|
414 |
struct timespec ts; |
86 |
|
|
|
87 |
✓✗ |
207 |
if (clock_gettime(CLOCK_MONOTONIC, &ts) == 0) |
88 |
|
207 |
use_monotonic = 1; |
89 |
|
207 |
} |
90 |
|
|
|
91 |
|
|
static int |
92 |
|
|
gettime(struct event_base *base, struct timeval *tp) |
93 |
|
|
{ |
94 |
✓✓ |
18622294 |
if (base->tv_cache.tv_sec) { |
95 |
|
9064322 |
*tp = base->tv_cache; |
96 |
|
9064322 |
return (0); |
97 |
|
|
} |
98 |
|
|
|
99 |
✓✗ |
246825 |
if (use_monotonic) { |
100 |
|
246825 |
struct timespec ts; |
101 |
|
|
|
102 |
✗✓ |
246825 |
if (clock_gettime(CLOCK_MONOTONIC, &ts) == -1) |
103 |
|
|
return (-1); |
104 |
|
|
|
105 |
|
246825 |
tp->tv_sec = ts.tv_sec; |
106 |
|
246825 |
tp->tv_usec = ts.tv_nsec / 1000; |
107 |
|
246825 |
return (0); |
108 |
|
246825 |
} |
109 |
|
|
|
110 |
|
|
return (gettimeofday(tp, NULL)); |
111 |
|
9311147 |
} |
112 |
|
|
|
113 |
|
|
struct event_base * |
114 |
|
|
event_init(void) |
115 |
|
|
{ |
116 |
|
324 |
struct event_base *base = event_base_new(); |
117 |
|
|
|
118 |
✓✗ |
162 |
if (base != NULL) |
119 |
|
162 |
current_base = base; |
120 |
|
|
|
121 |
|
162 |
return (base); |
122 |
|
|
} |
123 |
|
|
|
124 |
|
|
struct event_base * |
125 |
|
|
event_base_new(void) |
126 |
|
|
{ |
127 |
|
|
int i; |
128 |
|
|
struct event_base *base; |
129 |
|
|
|
130 |
✗✓ |
414 |
if ((base = calloc(1, sizeof(struct event_base))) == NULL) |
131 |
|
|
event_err(1, "%s: calloc", __func__); |
132 |
|
|
|
133 |
|
207 |
event_sigcb = NULL; |
134 |
|
207 |
event_gotsig = 0; |
135 |
|
|
|
136 |
|
207 |
detect_monotonic(); |
137 |
|
207 |
gettime(base, &base->event_tv); |
138 |
|
|
|
139 |
|
207 |
min_heap_ctor(&base->timeheap); |
140 |
|
207 |
TAILQ_INIT(&base->eventqueue); |
141 |
|
207 |
base->sig.ev_signal_pair[0] = -1; |
142 |
|
207 |
base->sig.ev_signal_pair[1] = -1; |
143 |
|
|
|
144 |
|
207 |
base->evbase = NULL; |
145 |
✓✓✓✓
|
1794 |
for (i = 0; eventops[i] && !base->evbase; i++) { |
146 |
|
414 |
base->evsel = eventops[i]; |
147 |
|
|
|
148 |
|
414 |
base->evbase = base->evsel->init(base); |
149 |
|
|
} |
150 |
|
|
|
151 |
✗✓ |
207 |
if (base->evbase == NULL) |
152 |
|
|
event_errx(1, "%s: no event mechanism available", __func__); |
153 |
|
|
|
154 |
✓✗✗✓
|
414 |
if (!issetugid() && getenv("EVENT_SHOW_METHOD")) |
155 |
|
|
event_msgx("libevent using: %s", base->evsel->name); |
156 |
|
|
|
157 |
|
|
/* allocate a single active event queue */ |
158 |
|
207 |
event_base_priority_init(base, 1); |
159 |
|
|
|
160 |
|
207 |
return (base); |
161 |
|
|
} |
162 |
|
|
|
163 |
|
|
void |
164 |
|
|
event_base_free(struct event_base *base) |
165 |
|
|
{ |
166 |
|
|
int i, n_deleted=0; |
167 |
|
|
struct event *ev; |
168 |
|
|
|
169 |
✗✓ |
270 |
if (base == NULL && current_base) |
170 |
|
|
base = current_base; |
171 |
✓✓ |
135 |
if (base == current_base) |
172 |
|
72 |
current_base = NULL; |
173 |
|
|
|
174 |
|
|
/* XXX(niels) - check for internal events first */ |
175 |
|
|
assert(base); |
176 |
|
|
/* Delete all non-internal events. */ |
177 |
✓✓ |
396 |
for (ev = TAILQ_FIRST(&base->eventqueue); ev; ) { |
178 |
|
63 |
struct event *next = TAILQ_NEXT(ev, ev_next); |
179 |
✓✓ |
63 |
if (!(ev->ev_flags & EVLIST_INTERNAL)) { |
180 |
|
27 |
event_del(ev); |
181 |
|
27 |
++n_deleted; |
182 |
|
27 |
} |
183 |
|
|
ev = next; |
184 |
|
|
} |
185 |
✓✓ |
306 |
while ((ev = min_heap_top(&base->timeheap)) != NULL) { |
186 |
|
18 |
event_del(ev); |
187 |
|
18 |
++n_deleted; |
188 |
|
|
} |
189 |
|
|
|
190 |
✓✓ |
540 |
for (i = 0; i < base->nactivequeues; ++i) { |
191 |
✗✓ |
270 |
for (ev = TAILQ_FIRST(base->activequeues[i]); ev; ) { |
192 |
|
|
struct event *next = TAILQ_NEXT(ev, ev_active_next); |
193 |
|
|
if (!(ev->ev_flags & EVLIST_INTERNAL)) { |
194 |
|
|
event_del(ev); |
195 |
|
|
++n_deleted; |
196 |
|
|
} |
197 |
|
|
ev = next; |
198 |
|
|
} |
199 |
|
|
} |
200 |
|
|
|
201 |
|
|
if (n_deleted) |
202 |
|
|
event_debug(("%s: %d events were still set in base", |
203 |
|
|
__func__, n_deleted)); |
204 |
|
|
|
205 |
✓✗ |
135 |
if (base->evsel->dealloc != NULL) |
206 |
|
135 |
base->evsel->dealloc(base, base->evbase); |
207 |
|
|
|
208 |
✓✓ |
540 |
for (i = 0; i < base->nactivequeues; ++i) |
209 |
|
|
assert(TAILQ_EMPTY(base->activequeues[i])); |
210 |
|
|
|
211 |
|
|
assert(min_heap_empty(&base->timeheap)); |
212 |
|
135 |
min_heap_dtor(&base->timeheap); |
213 |
|
|
|
214 |
✓✓ |
540 |
for (i = 0; i < base->nactivequeues; ++i) |
215 |
|
135 |
free(base->activequeues[i]); |
216 |
|
135 |
free(base->activequeues); |
217 |
|
|
|
218 |
|
|
assert(TAILQ_EMPTY(&base->eventqueue)); |
219 |
|
|
|
220 |
|
135 |
free(base); |
221 |
|
135 |
} |
222 |
|
|
|
223 |
|
|
/* reinitialized the event base after a fork */ |
224 |
|
|
int |
225 |
|
|
event_reinit(struct event_base *base) |
226 |
|
|
{ |
227 |
|
18 |
const struct eventop *evsel = base->evsel; |
228 |
|
9 |
void *evbase = base->evbase; |
229 |
|
|
int res = 0; |
230 |
|
|
struct event *ev; |
231 |
|
|
|
232 |
|
|
#if 0 |
233 |
|
|
/* Right now, reinit always takes effect, since even if the |
234 |
|
|
backend doesn't require it, the signal socketpair code does. |
235 |
|
|
*/ |
236 |
|
|
/* check if this event mechanism requires reinit */ |
237 |
|
|
if (!evsel->need_reinit) |
238 |
|
|
return (0); |
239 |
|
|
#endif |
240 |
|
|
|
241 |
|
|
/* prevent internal delete */ |
242 |
✓✓ |
9 |
if (base->sig.ev_signal_added) { |
243 |
|
|
/* we cannot call event_del here because the base has |
244 |
|
|
* not been reinitialized yet. */ |
245 |
|
6 |
event_queue_remove(base, &base->sig.ev_signal, |
246 |
|
|
EVLIST_INSERTED); |
247 |
✗✓ |
6 |
if (base->sig.ev_signal.ev_flags & EVLIST_ACTIVE) |
248 |
|
|
event_queue_remove(base, &base->sig.ev_signal, |
249 |
|
|
EVLIST_ACTIVE); |
250 |
|
6 |
base->sig.ev_signal_added = 0; |
251 |
|
6 |
} |
252 |
|
|
|
253 |
✓✗ |
9 |
if (base->evsel->dealloc != NULL) |
254 |
|
9 |
base->evsel->dealloc(base, base->evbase); |
255 |
|
9 |
evbase = base->evbase = evsel->init(base); |
256 |
✗✓ |
9 |
if (base->evbase == NULL) |
257 |
|
|
event_errx(1, "%s: could not reinitialize event mechanism", |
258 |
|
|
__func__); |
259 |
|
|
|
260 |
✓✓ |
66 |
TAILQ_FOREACH(ev, &base->eventqueue, ev_next) { |
261 |
✗✓ |
24 |
if (evsel->add(evbase, ev) == -1) |
262 |
|
|
res = -1; |
263 |
|
|
} |
264 |
|
|
|
265 |
|
9 |
return (res); |
266 |
|
|
} |
267 |
|
|
|
268 |
|
|
int |
269 |
|
|
event_priority_init(int npriorities) |
270 |
|
|
{ |
271 |
|
|
return event_base_priority_init(current_base, npriorities); |
272 |
|
|
} |
273 |
|
|
|
274 |
|
|
int |
275 |
|
|
event_base_priority_init(struct event_base *base, int npriorities) |
276 |
|
|
{ |
277 |
|
|
int i; |
278 |
|
|
|
279 |
✗✓ |
522 |
if (base->event_count_active) |
280 |
|
|
return (-1); |
281 |
|
|
|
282 |
✓✓ |
261 |
if (npriorities == base->nactivequeues) |
283 |
|
18 |
return (0); |
284 |
|
|
|
285 |
✓✓ |
243 |
if (base->nactivequeues) { |
286 |
✓✓ |
180 |
for (i = 0; i < base->nactivequeues; ++i) { |
287 |
|
54 |
free(base->activequeues[i]); |
288 |
|
|
} |
289 |
|
36 |
free(base->activequeues); |
290 |
|
36 |
} |
291 |
|
|
|
292 |
|
|
/* Allocate our priority queues */ |
293 |
|
243 |
base->nactivequeues = npriorities; |
294 |
|
243 |
base->activequeues = (struct event_list **) |
295 |
|
243 |
calloc(base->nactivequeues, sizeof(struct event_list *)); |
296 |
✗✓ |
243 |
if (base->activequeues == NULL) |
297 |
|
|
event_err(1, "%s: calloc", __func__); |
298 |
|
|
|
299 |
✓✓ |
1080 |
for (i = 0; i < base->nactivequeues; ++i) { |
300 |
|
297 |
base->activequeues[i] = malloc(sizeof(struct event_list)); |
301 |
✗✓ |
297 |
if (base->activequeues[i] == NULL) |
302 |
|
|
event_err(1, "%s: malloc", __func__); |
303 |
|
297 |
TAILQ_INIT(base->activequeues[i]); |
304 |
|
|
} |
305 |
|
|
|
306 |
|
243 |
return (0); |
307 |
|
261 |
} |
308 |
|
|
|
309 |
|
|
int |
310 |
|
|
event_haveevents(struct event_base *base) |
311 |
|
|
{ |
312 |
|
128142 |
return (base->event_count > 0); |
313 |
|
|
} |
314 |
|
|
|
315 |
|
|
/* |
316 |
|
|
* Active events are stored in priority queues. Lower priorities are always |
317 |
|
|
* process before higher priorities. Low priority events can starve high |
318 |
|
|
* priority ones. |
319 |
|
|
*/ |
320 |
|
|
|
321 |
|
|
static void |
322 |
|
|
event_process_active(struct event_base *base) |
323 |
|
|
{ |
324 |
|
|
struct event *ev; |
325 |
|
|
struct event_list *activeq = NULL; |
326 |
|
|
int i; |
327 |
|
126954 |
short ncalls; |
328 |
|
|
|
329 |
✓✗ |
244278 |
for (i = 0; i < base->nactivequeues; ++i) { |
330 |
✓✓ |
122139 |
if (TAILQ_FIRST(base->activequeues[i]) != NULL) { |
331 |
|
|
activeq = base->activequeues[i]; |
332 |
|
63477 |
break; |
333 |
|
|
} |
334 |
|
|
} |
335 |
|
|
|
336 |
|
|
assert(activeq != NULL); |
337 |
|
|
|
338 |
✓✓ |
4127462 |
for (ev = TAILQ_FIRST(activeq); ev; ev = TAILQ_FIRST(activeq)) { |
339 |
✓✓ |
2000263 |
if (ev->ev_events & EV_PERSIST) |
340 |
|
66159 |
event_queue_remove(base, ev, EVLIST_ACTIVE); |
341 |
|
|
else |
342 |
|
1934104 |
event_del(ev); |
343 |
|
|
|
344 |
|
|
/* Allows deletes to work */ |
345 |
|
2000263 |
ncalls = ev->ev_ncalls; |
346 |
|
2000263 |
ev->ev_pncalls = &ncalls; |
347 |
✓✓ |
8001034 |
while (ncalls) { |
348 |
|
2000263 |
ncalls--; |
349 |
|
2000263 |
ev->ev_ncalls = ncalls; |
350 |
|
2000263 |
(*ev->ev_callback)((int)ev->ev_fd, ev->ev_res, ev->ev_arg); |
351 |
✓✗✓✓
|
4000526 |
if (event_gotsig || base->event_break) |
352 |
|
9 |
return; |
353 |
|
|
} |
354 |
|
|
} |
355 |
|
126945 |
} |
356 |
|
|
|
357 |
|
|
/* |
358 |
|
|
* Wait continously for events. We exit only if no events are left. |
359 |
|
|
*/ |
360 |
|
|
|
361 |
|
|
int |
362 |
|
|
event_dispatch(void) |
363 |
|
|
{ |
364 |
|
486 |
return (event_loop(0)); |
365 |
|
|
} |
366 |
|
|
|
367 |
|
|
int |
368 |
|
|
event_base_dispatch(struct event_base *event_base) |
369 |
|
|
{ |
370 |
|
54 |
return (event_base_loop(event_base, 0)); |
371 |
|
|
} |
372 |
|
|
|
373 |
|
|
const char * |
374 |
|
|
event_base_get_method(struct event_base *base) |
375 |
|
|
{ |
376 |
|
|
assert(base); |
377 |
|
|
return (base->evsel->name); |
378 |
|
|
} |
379 |
|
|
|
380 |
|
|
static void |
381 |
|
|
event_loopexit_cb(int fd, short what, void *arg) |
382 |
|
|
{ |
383 |
|
234 |
struct event_base *base = arg; |
384 |
|
117 |
base->event_gotterm = 1; |
385 |
|
117 |
} |
386 |
|
|
|
387 |
|
|
/* not thread safe */ |
388 |
|
|
int |
389 |
|
|
event_loopexit(const struct timeval *tv) |
390 |
|
|
{ |
391 |
|
108 |
return (event_once(-1, EV_TIMEOUT, event_loopexit_cb, |
392 |
|
216 |
current_base, tv)); |
393 |
|
|
} |
394 |
|
|
|
395 |
|
|
int |
396 |
|
|
event_base_loopexit(struct event_base *event_base, const struct timeval *tv) |
397 |
|
|
{ |
398 |
|
18 |
return (event_base_once(event_base, -1, EV_TIMEOUT, event_loopexit_cb, |
399 |
|
36 |
event_base, tv)); |
400 |
|
|
} |
401 |
|
|
|
402 |
|
|
/* not thread safe */ |
403 |
|
|
int |
404 |
|
|
event_loopbreak(void) |
405 |
|
|
{ |
406 |
|
18 |
return (event_base_loopbreak(current_base)); |
407 |
|
|
} |
408 |
|
|
|
409 |
|
|
int |
410 |
|
|
event_base_loopbreak(struct event_base *event_base) |
411 |
|
|
{ |
412 |
✗✓ |
18 |
if (event_base == NULL) |
413 |
|
|
return (-1); |
414 |
|
|
|
415 |
|
9 |
event_base->event_break = 1; |
416 |
|
9 |
return (0); |
417 |
|
9 |
} |
418 |
|
|
|
419 |
|
|
|
420 |
|
|
|
421 |
|
|
/* not thread safe */ |
422 |
|
|
|
423 |
|
|
int |
424 |
|
|
event_loop(int flags) |
425 |
|
|
{ |
426 |
|
540 |
return event_base_loop(current_base, flags); |
427 |
|
|
} |
428 |
|
|
|
429 |
|
|
int |
430 |
|
|
event_base_loop(struct event_base *base, int flags) |
431 |
|
|
{ |
432 |
|
5010 |
const struct eventop *evsel = base->evsel; |
433 |
|
2505 |
void *evbase = base->evbase; |
434 |
|
2505 |
struct timeval tv; |
435 |
|
2505 |
struct timeval *tv_p; |
436 |
|
|
int res, done; |
437 |
|
|
|
438 |
|
|
/* clear time cache */ |
439 |
|
2505 |
base->tv_cache.tv_sec = 0; |
440 |
|
|
|
441 |
✓✓ |
2505 |
if (base->sig.ev_signal_added) |
442 |
|
132 |
evsignal_base = base; |
443 |
|
|
done = 0; |
444 |
✓✓ |
132726 |
while (!done) { |
445 |
|
|
/* Terminate the loop if we have been asked to */ |
446 |
✓✓ |
64179 |
if (base->event_gotterm) { |
447 |
|
99 |
base->event_gotterm = 0; |
448 |
|
99 |
break; |
449 |
|
|
} |
450 |
|
|
|
451 |
✓✓ |
64080 |
if (base->event_break) { |
452 |
|
9 |
base->event_break = 0; |
453 |
|
9 |
break; |
454 |
|
|
} |
455 |
|
|
|
456 |
|
|
/* You cannot use this interface for multi-threaded apps */ |
457 |
✗✓ |
128142 |
while (event_gotsig) { |
458 |
|
|
event_gotsig = 0; |
459 |
|
|
if (event_sigcb) { |
460 |
|
|
res = (*event_sigcb)(); |
461 |
|
|
if (res == -1) { |
462 |
|
|
errno = EINTR; |
463 |
|
|
return (-1); |
464 |
|
|
} |
465 |
|
|
} |
466 |
|
|
} |
467 |
|
|
|
468 |
|
64071 |
timeout_correct(base, &tv); |
469 |
|
|
|
470 |
|
64071 |
tv_p = &tv; |
471 |
✓✓✓✓
|
127998 |
if (!base->event_count_active && !(flags & EVLOOP_NONBLOCK)) { |
472 |
|
63855 |
timeout_next(base, &tv_p); |
473 |
|
63855 |
} else { |
474 |
|
|
/* |
475 |
|
|
* if we have active events, we just poll new events |
476 |
|
|
* without waiting. |
477 |
|
|
*/ |
478 |
|
216 |
timerclear(&tv); |
479 |
|
|
} |
480 |
|
|
|
481 |
|
|
/* If we have no events, we just exit */ |
482 |
✓✓ |
64071 |
if (!event_haveevents(base)) { |
483 |
|
|
event_debug(("%s: no events registered.", __func__)); |
484 |
|
213 |
return (1); |
485 |
|
|
} |
486 |
|
|
|
487 |
|
|
/* update last old time */ |
488 |
|
63858 |
gettime(base, &base->event_tv); |
489 |
|
|
|
490 |
|
|
/* clear time cache */ |
491 |
|
63858 |
base->tv_cache.tv_sec = 0; |
492 |
|
|
|
493 |
|
63858 |
res = evsel->dispatch(base, evbase, tv_p); |
494 |
|
|
|
495 |
✗✓ |
63858 |
if (res == -1) |
496 |
|
|
return (-1); |
497 |
|
63858 |
gettime(base, &base->tv_cache); |
498 |
|
|
|
499 |
|
63858 |
timeout_process(base); |
500 |
|
|
|
501 |
✓✓ |
63858 |
if (base->event_count_active) { |
502 |
|
63477 |
event_process_active(base); |
503 |
✓✓✓✓
|
126783 |
if (!base->event_count_active && (flags & EVLOOP_ONCE)) |
504 |
|
2172 |
done = 1; |
505 |
✓✓ |
381 |
} else if (flags & EVLOOP_NONBLOCK) |
506 |
|
12 |
done = 1; |
507 |
|
|
} |
508 |
|
|
|
509 |
|
|
/* clear time cache */ |
510 |
|
2292 |
base->tv_cache.tv_sec = 0; |
511 |
|
|
|
512 |
|
|
event_debug(("%s: asked to terminate loop.", __func__)); |
513 |
|
2292 |
return (0); |
514 |
|
2505 |
} |
515 |
|
|
|
516 |
|
|
/* Sets up an event for processing once */ |
517 |
|
|
|
518 |
|
|
struct event_once { |
519 |
|
|
struct event ev; |
520 |
|
|
|
521 |
|
|
void (*cb)(int, short, void *); |
522 |
|
|
void *arg; |
523 |
|
|
}; |
524 |
|
|
|
525 |
|
|
/* One-time callback, it deletes itself */ |
526 |
|
|
|
527 |
|
|
static void |
528 |
|
|
event_once_cb(int fd, short events, void *arg) |
529 |
|
|
{ |
530 |
|
234 |
struct event_once *eonce = arg; |
531 |
|
|
|
532 |
|
117 |
(*eonce->cb)(fd, events, eonce->arg); |
533 |
|
117 |
free(eonce); |
534 |
|
117 |
} |
535 |
|
|
|
536 |
|
|
/* not threadsafe, event scheduled once. */ |
537 |
|
|
int |
538 |
|
|
event_once(int fd, short events, |
539 |
|
|
void (*callback)(int, short, void *), void *arg, const struct timeval *tv) |
540 |
|
|
{ |
541 |
|
216 |
return event_base_once(current_base, fd, events, callback, arg, tv); |
542 |
|
|
} |
543 |
|
|
|
544 |
|
|
/* Schedules an event once */ |
545 |
|
|
int |
546 |
|
|
event_base_once(struct event_base *base, int fd, short events, |
547 |
|
|
void (*callback)(int, short, void *), void *arg, const struct timeval *tv) |
548 |
|
|
{ |
549 |
|
|
struct event_once *eonce; |
550 |
|
252 |
struct timeval etv; |
551 |
|
|
int res; |
552 |
|
|
|
553 |
|
|
/* We cannot support signals that just fire once */ |
554 |
✗✓ |
126 |
if (events & EV_SIGNAL) |
555 |
|
|
return (-1); |
556 |
|
|
|
557 |
✗✓ |
126 |
if ((eonce = calloc(1, sizeof(struct event_once))) == NULL) |
558 |
|
|
return (-1); |
559 |
|
|
|
560 |
|
126 |
eonce->cb = callback; |
561 |
|
126 |
eonce->arg = arg; |
562 |
|
|
|
563 |
✓✗ |
126 |
if (events == EV_TIMEOUT) { |
564 |
✓✓ |
126 |
if (tv == NULL) { |
565 |
|
81 |
timerclear(&etv); |
566 |
|
|
tv = &etv; |
567 |
|
81 |
} |
568 |
|
|
|
569 |
|
126 |
evtimer_set(&eonce->ev, event_once_cb, eonce); |
570 |
✗✗ |
126 |
} else if (events & (EV_READ|EV_WRITE)) { |
571 |
|
|
events &= EV_READ|EV_WRITE; |
572 |
|
|
|
573 |
|
|
event_set(&eonce->ev, fd, events, event_once_cb, eonce); |
574 |
|
|
} else { |
575 |
|
|
/* Bad event combination */ |
576 |
|
|
free(eonce); |
577 |
|
|
return (-1); |
578 |
|
|
} |
579 |
|
|
|
580 |
|
126 |
res = event_base_set(base, &eonce->ev); |
581 |
✓✗ |
126 |
if (res == 0) |
582 |
|
126 |
res = event_add(&eonce->ev, tv); |
583 |
✗✓ |
126 |
if (res != 0) { |
584 |
|
|
free(eonce); |
585 |
|
|
return (res); |
586 |
|
|
} |
587 |
|
|
|
588 |
|
126 |
return (0); |
589 |
|
126 |
} |
590 |
|
|
|
591 |
|
|
void |
592 |
|
|
event_set(struct event *ev, int fd, short events, |
593 |
|
|
void (*callback)(int, short, void *), void *arg) |
594 |
|
|
{ |
595 |
|
|
/* Take the current base - caller needs to set the real base later */ |
596 |
|
366072 |
ev->ev_base = current_base; |
597 |
|
|
|
598 |
|
183036 |
ev->ev_callback = callback; |
599 |
|
183036 |
ev->ev_arg = arg; |
600 |
|
183036 |
ev->ev_fd = fd; |
601 |
|
183036 |
ev->ev_events = events; |
602 |
|
183036 |
ev->ev_res = 0; |
603 |
|
183036 |
ev->ev_flags = EVLIST_INIT; |
604 |
|
183036 |
ev->ev_ncalls = 0; |
605 |
|
183036 |
ev->ev_pncalls = NULL; |
606 |
|
|
|
607 |
|
183036 |
min_heap_elem_init(ev); |
608 |
|
|
|
609 |
|
|
/* by default, we put new events into the middle priority */ |
610 |
✓✓ |
183036 |
if(current_base) |
611 |
|
182958 |
ev->ev_pri = current_base->nactivequeues/2; |
612 |
|
183036 |
} |
613 |
|
|
|
614 |
|
|
int |
615 |
|
|
event_base_set(struct event_base *base, struct event *ev) |
616 |
|
|
{ |
617 |
|
|
/* Only innocent events may be assigned to a different base */ |
618 |
✗✓ |
4704 |
if (ev->ev_flags != EVLIST_INIT) |
619 |
|
|
return (-1); |
620 |
|
|
|
621 |
|
2352 |
ev->ev_base = base; |
622 |
|
2352 |
ev->ev_pri = base->nactivequeues/2; |
623 |
|
|
|
624 |
|
2352 |
return (0); |
625 |
|
2352 |
} |
626 |
|
|
|
627 |
|
|
/* |
628 |
|
|
* Set's the priority of an event - if an event is already scheduled |
629 |
|
|
* changing the priority is going to fail. |
630 |
|
|
*/ |
631 |
|
|
|
632 |
|
|
int |
633 |
|
|
event_priority_set(struct event *ev, int pri) |
634 |
|
|
{ |
635 |
✗✓ |
216 |
if (ev->ev_flags & EVLIST_ACTIVE) |
636 |
|
|
return (-1); |
637 |
✓✗✗✓
|
216 |
if (pri < 0 || pri >= ev->ev_base->nactivequeues) |
638 |
|
|
return (-1); |
639 |
|
|
|
640 |
|
108 |
ev->ev_pri = pri; |
641 |
|
|
|
642 |
|
108 |
return (0); |
643 |
|
108 |
} |
644 |
|
|
|
645 |
|
|
/* |
646 |
|
|
* Checks if a specific event is pending or scheduled. |
647 |
|
|
*/ |
648 |
|
|
|
649 |
|
|
int |
650 |
|
|
event_pending(struct event *ev, short event, struct timeval *tv) |
651 |
|
|
{ |
652 |
|
|
struct timeval now, res; |
653 |
|
|
int flags = 0; |
654 |
|
|
|
655 |
|
|
if (ev->ev_flags & EVLIST_INSERTED) |
656 |
|
|
flags |= (ev->ev_events & (EV_READ|EV_WRITE|EV_SIGNAL)); |
657 |
|
|
if (ev->ev_flags & EVLIST_ACTIVE) |
658 |
|
|
flags |= ev->ev_res; |
659 |
|
|
if (ev->ev_flags & EVLIST_TIMEOUT) |
660 |
|
|
flags |= EV_TIMEOUT; |
661 |
|
|
|
662 |
|
|
event &= (EV_TIMEOUT|EV_READ|EV_WRITE|EV_SIGNAL); |
663 |
|
|
|
664 |
|
|
/* See if there is a timeout that we should report */ |
665 |
|
|
if (tv != NULL && (flags & event & EV_TIMEOUT)) { |
666 |
|
|
gettime(ev->ev_base, &now); |
667 |
|
|
timersub(&ev->ev_timeout, &now, &res); |
668 |
|
|
/* correctly remap to real time */ |
669 |
|
|
gettimeofday(&now, NULL); |
670 |
|
|
timeradd(&now, &res, tv); |
671 |
|
|
} |
672 |
|
|
|
673 |
|
|
return (flags & event); |
674 |
|
|
} |
675 |
|
|
|
676 |
|
|
int |
677 |
|
|
event_add(struct event *ev, const struct timeval *tv) |
678 |
|
|
{ |
679 |
|
18606586 |
struct event_base *base = ev->ev_base; |
680 |
|
9303293 |
const struct eventop *evsel = base->evsel; |
681 |
|
9303293 |
void *evbase = base->evbase; |
682 |
|
|
int res = 0; |
683 |
|
|
|
684 |
|
|
event_debug(( |
685 |
|
|
"event_add: event: %p, %s%s%scall %p", |
686 |
|
|
ev, |
687 |
|
|
ev->ev_events & EV_READ ? "EV_READ " : " ", |
688 |
|
|
ev->ev_events & EV_WRITE ? "EV_WRITE " : " ", |
689 |
|
|
tv ? "EV_TIMEOUT " : " ", |
690 |
|
|
ev->ev_callback)); |
691 |
|
|
|
692 |
|
|
assert(!(ev->ev_flags & ~EVLIST_ALL)); |
693 |
|
|
|
694 |
|
|
/* |
695 |
|
|
* prepare for timeout insertion further below, if we get a |
696 |
|
|
* failure on any step, we should not change any state. |
697 |
|
|
*/ |
698 |
✓✓✓✓
|
18484049 |
if (tv != NULL && !(ev->ev_flags & EVLIST_TIMEOUT)) { |
699 |
✗✓ |
18650646 |
if (min_heap_reserve(&base->timeheap, |
700 |
|
12433764 |
1 + min_heap_size(&base->timeheap)) == -1) |
701 |
|
|
return (-1); /* ENOMEM == errno */ |
702 |
|
|
} |
703 |
|
|
|
704 |
✓✓✓✓
|
9425830 |
if ((ev->ev_events & (EV_READ|EV_WRITE|EV_SIGNAL)) && |
705 |
|
122537 |
!(ev->ev_flags & (EVLIST_INSERTED|EVLIST_ACTIVE))) { |
706 |
|
122519 |
res = evsel->add(evbase, ev); |
707 |
✓✗ |
122519 |
if (res != -1) |
708 |
|
122519 |
event_queue_insert(base, ev, EVLIST_INSERTED); |
709 |
|
|
} |
710 |
|
|
|
711 |
|
|
/* |
712 |
|
|
* we should change the timout state only if the previous event |
713 |
|
|
* addition succeeded. |
714 |
|
|
*/ |
715 |
✓✓ |
9303293 |
if (res != -1 && tv != NULL) { |
716 |
|
9180756 |
struct timeval now; |
717 |
|
|
|
718 |
|
|
/* |
719 |
|
|
* we already reserved memory above for the case where we |
720 |
|
|
* are not replacing an exisiting timeout. |
721 |
|
|
*/ |
722 |
✓✓ |
9180756 |
if (ev->ev_flags & EVLIST_TIMEOUT) |
723 |
|
2963874 |
event_queue_remove(base, ev, EVLIST_TIMEOUT); |
724 |
|
|
|
725 |
|
|
/* Check if it is active due to a timeout. Rescheduling |
726 |
|
|
* this timeout before the callback can be executed |
727 |
|
|
* removes it from the active list. */ |
728 |
✓✓✓✗
|
9871075 |
if ((ev->ev_flags & EVLIST_ACTIVE) && |
729 |
|
690319 |
(ev->ev_res & EV_TIMEOUT)) { |
730 |
|
|
/* See if we are just active executing this |
731 |
|
|
* event in a loop |
732 |
|
|
*/ |
733 |
✓✗✗✓
|
1380638 |
if (ev->ev_ncalls && ev->ev_pncalls) { |
734 |
|
|
/* Abort loop */ |
735 |
|
|
*ev->ev_pncalls = 0; |
736 |
|
|
} |
737 |
|
|
|
738 |
|
690319 |
event_queue_remove(base, ev, EVLIST_ACTIVE); |
739 |
|
690319 |
} |
740 |
|
|
|
741 |
|
9180756 |
gettime(base, &now); |
742 |
✓✓ |
9375547 |
timeradd(&now, tv, &ev->ev_timeout); |
743 |
|
|
|
744 |
|
|
event_debug(( |
745 |
|
|
"event_add: timeout in %lld seconds, call %p", |
746 |
|
|
(long long)tv->tv_sec, ev->ev_callback)); |
747 |
|
|
|
748 |
|
9180756 |
event_queue_insert(base, ev, EVLIST_TIMEOUT); |
749 |
|
9180756 |
} |
750 |
|
|
|
751 |
|
9303293 |
return (res); |
752 |
|
9303293 |
} |
753 |
|
|
|
754 |
|
|
int |
755 |
|
|
event_del(struct event *ev) |
756 |
|
|
{ |
757 |
|
|
struct event_base *base; |
758 |
|
|
const struct eventop *evsel; |
759 |
|
|
void *evbase; |
760 |
|
|
|
761 |
|
|
event_debug(("event_del: %p, callback %p", |
762 |
|
|
ev, ev->ev_callback)); |
763 |
|
|
|
764 |
|
|
/* An event without a base has not been added */ |
765 |
✗✓ |
28497156 |
if (ev->ev_base == NULL) |
766 |
|
|
return (-1); |
767 |
|
|
|
768 |
|
|
base = ev->ev_base; |
769 |
|
14248578 |
evsel = base->evsel; |
770 |
|
14248578 |
evbase = base->evbase; |
771 |
|
|
|
772 |
|
|
assert(!(ev->ev_flags & ~EVLIST_ALL)); |
773 |
|
|
|
774 |
|
|
/* See if we are just active executing this event in a loop */ |
775 |
✓✓✗✓
|
21384444 |
if (ev->ev_ncalls && ev->ev_pncalls) { |
776 |
|
|
/* Abort loop */ |
777 |
|
|
*ev->ev_pncalls = 0; |
778 |
|
|
} |
779 |
|
|
|
780 |
✓✓ |
14248578 |
if (ev->ev_flags & EVLIST_TIMEOUT) |
781 |
|
6216882 |
event_queue_remove(base, ev, EVLIST_TIMEOUT); |
782 |
|
|
|
783 |
✓✓ |
14248578 |
if (ev->ev_flags & EVLIST_ACTIVE) |
784 |
|
2624799 |
event_queue_remove(base, ev, EVLIST_ACTIVE); |
785 |
|
|
|
786 |
✓✓ |
14248578 |
if (ev->ev_flags & EVLIST_INSERTED) { |
787 |
|
122501 |
event_queue_remove(base, ev, EVLIST_INSERTED); |
788 |
|
122501 |
return (evsel->del(evbase, ev)); |
789 |
|
|
} |
790 |
|
|
|
791 |
|
14126077 |
return (0); |
792 |
|
14248578 |
} |
793 |
|
|
|
794 |
|
|
void |
795 |
|
|
event_active(struct event *ev, int res, short ncalls) |
796 |
|
|
{ |
797 |
|
|
/* We get different kinds of events, add them together */ |
798 |
✗✓ |
6762554 |
if (ev->ev_flags & EVLIST_ACTIVE) { |
799 |
|
|
ev->ev_res |= res; |
800 |
|
|
return; |
801 |
|
|
} |
802 |
|
|
|
803 |
|
3381277 |
ev->ev_res = res; |
804 |
|
3381277 |
ev->ev_ncalls = ncalls; |
805 |
|
3381277 |
ev->ev_pncalls = NULL; |
806 |
|
3381277 |
event_queue_insert(ev->ev_base, ev, EVLIST_ACTIVE); |
807 |
|
6762554 |
} |
808 |
|
|
|
809 |
|
|
static int |
810 |
|
|
timeout_next(struct event_base *base, struct timeval **tv_p) |
811 |
|
|
{ |
812 |
|
127710 |
struct timeval now; |
813 |
|
|
struct event *ev; |
814 |
|
63855 |
struct timeval *tv = *tv_p; |
815 |
|
|
|
816 |
✓✓ |
63855 |
if ((ev = min_heap_top(&base->timeheap)) == NULL) { |
817 |
|
|
/* if no time-based events are active wait for I/O */ |
818 |
|
62693 |
*tv_p = NULL; |
819 |
|
62693 |
return (0); |
820 |
|
|
} |
821 |
|
|
|
822 |
✗✓ |
1162 |
if (gettime(base, &now) == -1) |
823 |
|
|
return (-1); |
824 |
|
|
|
825 |
✓✓✓✓ ✗✓ |
2324 |
if (timercmp(&ev->ev_timeout, &now, <=)) { |
826 |
|
162 |
timerclear(tv); |
827 |
|
162 |
return (0); |
828 |
|
|
} |
829 |
|
|
|
830 |
✓✓ |
1091 |
timersub(&ev->ev_timeout, &now, tv); |
831 |
|
|
|
832 |
|
|
assert(tv->tv_sec >= 0); |
833 |
|
|
assert(tv->tv_usec >= 0); |
834 |
|
|
|
835 |
|
|
event_debug(("timeout_next: in %lld seconds", (long long)tv->tv_sec)); |
836 |
|
1000 |
return (0); |
837 |
|
63855 |
} |
838 |
|
|
|
839 |
|
|
/* |
840 |
|
|
* Determines if the time is running backwards by comparing the current |
841 |
|
|
* time against the last time we checked. Not needed when using clock |
842 |
|
|
* monotonic. |
843 |
|
|
*/ |
844 |
|
|
|
845 |
|
|
static void |
846 |
|
|
timeout_correct(struct event_base *base, struct timeval *tv) |
847 |
|
|
{ |
848 |
|
|
struct event **pev; |
849 |
|
|
unsigned int size; |
850 |
|
|
struct timeval off; |
851 |
|
|
|
852 |
✓✗ |
128142 |
if (use_monotonic) |
853 |
|
64071 |
return; |
854 |
|
|
|
855 |
|
|
/* Check if time is running backwards */ |
856 |
|
|
gettime(base, tv); |
857 |
|
|
if (timercmp(tv, &base->event_tv, >=)) { |
858 |
|
|
base->event_tv = *tv; |
859 |
|
|
return; |
860 |
|
|
} |
861 |
|
|
|
862 |
|
|
event_debug(("%s: time is running backwards, corrected", |
863 |
|
|
__func__)); |
864 |
|
|
timersub(&base->event_tv, tv, &off); |
865 |
|
|
|
866 |
|
|
/* |
867 |
|
|
* We can modify the key element of the node without destroying |
868 |
|
|
* the key, beause we apply it to all in the right order. |
869 |
|
|
*/ |
870 |
|
|
pev = base->timeheap.p; |
871 |
|
|
size = base->timeheap.n; |
872 |
|
|
for (; size-- > 0; ++pev) { |
873 |
|
|
struct timeval *ev_tv = &(**pev).ev_timeout; |
874 |
|
|
timersub(ev_tv, &off, ev_tv); |
875 |
|
|
} |
876 |
|
|
/* Now remember what the new time turned out to be. */ |
877 |
|
|
base->event_tv = *tv; |
878 |
|
64071 |
} |
879 |
|
|
|
880 |
|
|
void |
881 |
|
|
timeout_process(struct event_base *base) |
882 |
|
|
{ |
883 |
|
127716 |
struct timeval now; |
884 |
|
|
struct event *ev; |
885 |
|
|
|
886 |
✓✓ |
63858 |
if (min_heap_empty(&base->timeheap)) |
887 |
|
62552 |
return; |
888 |
|
|
|
889 |
|
1306 |
gettime(base, &now); |
890 |
|
|
|
891 |
✓✓ |
6509640 |
while ((ev = min_heap_top(&base->timeheap))) { |
892 |
✓✓✓✓ ✓✓ |
6508992 |
if (timercmp(&ev->ev_timeout, &now, >)) |
893 |
|
|
break; |
894 |
|
|
|
895 |
|
|
/* delete this event from the I/O queues */ |
896 |
|
3253514 |
event_del(ev); |
897 |
|
|
|
898 |
|
|
event_debug(("timeout_process: call %p", |
899 |
|
|
ev->ev_callback)); |
900 |
|
3253514 |
event_active(ev, EV_TIMEOUT, 1); |
901 |
|
|
} |
902 |
|
65164 |
} |
903 |
|
|
|
904 |
|
|
void |
905 |
|
|
event_queue_remove(struct event_base *base, struct event *ev, int queue) |
906 |
|
|
{ |
907 |
✗✓ |
25369080 |
if (!(ev->ev_flags & queue)) |
908 |
|
|
event_errx(1, "%s: %p(fd %d) not on queue %x", __func__, |
909 |
|
|
ev, ev->ev_fd, queue); |
910 |
|
|
|
911 |
✓✓ |
12684540 |
if (~ev->ev_flags & EVLIST_INTERNAL) |
912 |
|
12684432 |
base->event_count--; |
913 |
|
|
|
914 |
|
12684540 |
ev->ev_flags &= ~queue; |
915 |
✓✓✓✗
|
12684540 |
switch (queue) { |
916 |
|
|
case EVLIST_INSERTED: |
917 |
✓✓ |
367521 |
TAILQ_REMOVE(&base->eventqueue, ev, ev_next); |
918 |
|
122507 |
break; |
919 |
|
|
case EVLIST_ACTIVE: |
920 |
|
3381277 |
base->event_count_active--; |
921 |
✓✓ |
10143831 |
TAILQ_REMOVE(base->activequeues[ev->ev_pri], |
922 |
|
|
ev, ev_active_next); |
923 |
|
3381277 |
break; |
924 |
|
|
case EVLIST_TIMEOUT: |
925 |
|
9180756 |
min_heap_erase(&base->timeheap, ev); |
926 |
|
9180756 |
break; |
927 |
|
|
default: |
928 |
|
|
event_errx(1, "%s: unknown queue %x", __func__, queue); |
929 |
|
|
} |
930 |
|
12684540 |
} |
931 |
|
|
|
932 |
|
|
void |
933 |
|
|
event_queue_insert(struct event_base *base, struct event *ev, int queue) |
934 |
|
|
{ |
935 |
✗✓ |
25369104 |
if (ev->ev_flags & queue) { |
936 |
|
|
/* Double insertion is possible for active events */ |
937 |
|
|
if (queue & EVLIST_ACTIVE) |
938 |
|
|
return; |
939 |
|
|
|
940 |
|
|
event_errx(1, "%s: %p(fd %d) already on queue %x", __func__, |
941 |
|
|
ev, ev->ev_fd, queue); |
942 |
|
|
} |
943 |
|
|
|
944 |
✓✓ |
12684552 |
if (~ev->ev_flags & EVLIST_INTERNAL) |
945 |
|
12684432 |
base->event_count++; |
946 |
|
|
|
947 |
|
12684552 |
ev->ev_flags |= queue; |
948 |
✓✓✓✗
|
12684552 |
switch (queue) { |
949 |
|
|
case EVLIST_INSERTED: |
950 |
|
122519 |
TAILQ_INSERT_TAIL(&base->eventqueue, ev, ev_next); |
951 |
|
122519 |
break; |
952 |
|
|
case EVLIST_ACTIVE: |
953 |
|
3381277 |
base->event_count_active++; |
954 |
|
3381277 |
TAILQ_INSERT_TAIL(base->activequeues[ev->ev_pri], |
955 |
|
|
ev,ev_active_next); |
956 |
|
3381277 |
break; |
957 |
|
|
case EVLIST_TIMEOUT: { |
958 |
|
9180756 |
min_heap_push(&base->timeheap, ev); |
959 |
|
9180756 |
break; |
960 |
|
|
} |
961 |
|
|
default: |
962 |
|
|
event_errx(1, "%s: unknown queue %x", __func__, queue); |
963 |
|
|
} |
964 |
|
12684552 |
} |
965 |
|
|
|
966 |
|
|
/* Functions for debugging */ |
967 |
|
|
|
968 |
|
|
const char * |
969 |
|
|
event_get_version(void) |
970 |
|
|
{ |
971 |
|
|
return (_EVENT_VERSION); |
972 |
|
|
} |
973 |
|
|
|
974 |
|
|
/* |
975 |
|
|
* No thread-safe interface needed - the information should be the same |
976 |
|
|
* for all threads. |
977 |
|
|
*/ |
978 |
|
|
|
979 |
|
|
const char * |
980 |
|
|
event_get_method(void) |
981 |
|
|
{ |
982 |
|
18 |
return (current_base->evsel->name); |
983 |
|
|
} |
984 |
|
|
|
985 |
|
|
|
986 |
|
|
/* |
987 |
|
|
* Libevent glue for ASR. |
988 |
|
|
*/ |
989 |
|
|
struct event_asr { |
990 |
|
|
struct event ev; |
991 |
|
|
struct asr_query *async; |
992 |
|
|
void (*cb)(struct asr_result *, void *); |
993 |
|
|
void *arg; |
994 |
|
|
}; |
995 |
|
|
|
996 |
|
|
static void |
997 |
|
|
event_asr_dispatch(int fd __attribute__((__unused__)), |
998 |
|
|
short ev __attribute__((__unused__)), void *arg) |
999 |
|
|
{ |
1000 |
|
|
struct event_asr *eva = arg; |
1001 |
|
|
struct asr_result ar; |
1002 |
|
|
struct timeval tv; |
1003 |
|
|
|
1004 |
|
|
event_del(&eva->ev); |
1005 |
|
|
|
1006 |
|
|
if (asr_run(eva->async, &ar)) { |
1007 |
|
|
eva->cb(&ar, eva->arg); |
1008 |
|
|
free(eva); |
1009 |
|
|
} else { |
1010 |
|
|
event_set(&eva->ev, ar.ar_fd, |
1011 |
|
|
ar.ar_cond == ASR_WANT_READ ? EV_READ : EV_WRITE, |
1012 |
|
|
event_asr_dispatch, eva); |
1013 |
|
|
tv.tv_sec = ar.ar_timeout / 1000; |
1014 |
|
|
tv.tv_usec = (ar.ar_timeout % 1000) * 1000; |
1015 |
|
|
event_add(&eva->ev, &tv); |
1016 |
|
|
} |
1017 |
|
|
} |
1018 |
|
|
|
1019 |
|
|
struct event_asr * |
1020 |
|
|
event_asr_run(struct asr_query *async, void (*cb)(struct asr_result *, void *), |
1021 |
|
|
void *arg) |
1022 |
|
|
{ |
1023 |
|
|
struct event_asr *eva; |
1024 |
|
|
struct timeval tv; |
1025 |
|
|
|
1026 |
|
|
eva = calloc(1, sizeof *eva); |
1027 |
|
|
if (eva == NULL) |
1028 |
|
|
return (NULL); |
1029 |
|
|
eva->async = async; |
1030 |
|
|
eva->cb = cb; |
1031 |
|
|
eva->arg = arg; |
1032 |
|
|
tv.tv_sec = 0; |
1033 |
|
|
tv.tv_usec = 0; |
1034 |
|
|
evtimer_set(&eva->ev, event_asr_dispatch, eva); |
1035 |
|
|
evtimer_add(&eva->ev, &tv); |
1036 |
|
|
return (eva); |
1037 |
|
|
} |
1038 |
|
|
|
1039 |
|
|
void |
1040 |
|
|
event_asr_abort(struct event_asr *eva) |
1041 |
|
|
{ |
1042 |
|
|
asr_abort(eva->async); |
1043 |
|
|
event_del(&eva->ev); |
1044 |
|
|
free(eva); |
1045 |
|
|
} |