1 |
|
|
/* $OpenBSD: event.c,v 1.38 2015/01/06 23:11:23 bluhm Exp $ */ |
2 |
|
|
|
3 |
|
|
/* |
4 |
|
|
* Copyright (c) 2000-2004 Niels Provos <provos@citi.umich.edu> |
5 |
|
|
* All rights reserved. |
6 |
|
|
* |
7 |
|
|
* Redistribution and use in source and binary forms, with or without |
8 |
|
|
* modification, are permitted provided that the following conditions |
9 |
|
|
* are met: |
10 |
|
|
* 1. Redistributions of source code must retain the above copyright |
11 |
|
|
* notice, this list of conditions and the following disclaimer. |
12 |
|
|
* 2. Redistributions in binary form must reproduce the above copyright |
13 |
|
|
* notice, this list of conditions and the following disclaimer in the |
14 |
|
|
* documentation and/or other materials provided with the distribution. |
15 |
|
|
* 3. The name of the author may not be used to endorse or promote products |
16 |
|
|
* derived from this software without specific prior written permission. |
17 |
|
|
* |
18 |
|
|
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
19 |
|
|
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
20 |
|
|
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
21 |
|
|
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
22 |
|
|
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
23 |
|
|
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
24 |
|
|
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
25 |
|
|
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
26 |
|
|
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
27 |
|
|
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
28 |
|
|
*/ |
29 |
|
|
|
30 |
|
|
#include <sys/types.h> |
31 |
|
|
#include <sys/socket.h> |
32 |
|
|
#include <sys/time.h> |
33 |
|
|
#include <sys/queue.h> |
34 |
|
|
|
35 |
|
|
#include <stdio.h> |
36 |
|
|
#include <stdlib.h> |
37 |
|
|
#include <unistd.h> |
38 |
|
|
#include <errno.h> |
39 |
|
|
#include <signal.h> |
40 |
|
|
#include <string.h> |
41 |
|
|
#include <assert.h> |
42 |
|
|
#include <time.h> |
43 |
|
|
#include <netdb.h> |
44 |
|
|
#include <asr.h> |
45 |
|
|
|
46 |
|
|
#include "event.h" |
47 |
|
|
#include "event-internal.h" |
48 |
|
|
#include "log.h" |
49 |
|
|
|
50 |
|
|
extern const struct eventop selectops; |
51 |
|
|
extern const struct eventop pollops; |
52 |
|
|
extern const struct eventop kqops; |
53 |
|
|
|
54 |
|
|
/* In order of preference */ |
55 |
|
|
static const struct eventop *eventops[] = { |
56 |
|
|
&kqops, |
57 |
|
|
&pollops, |
58 |
|
|
&selectops, |
59 |
|
|
NULL |
60 |
|
|
}; |
61 |
|
|
|
62 |
|
|
/* Global state */ |
63 |
|
|
struct event_base *current_base = NULL; |
64 |
|
|
extern struct event_base *evsignal_base; |
65 |
|
|
static int use_monotonic; |
66 |
|
|
|
67 |
|
|
/* Handle signals - This is a deprecated interface */ |
68 |
|
|
int (*event_sigcb)(void); /* Signal callback when gotsig is set */ |
69 |
|
|
volatile sig_atomic_t event_gotsig; /* Set in signal handler */ |
70 |
|
|
|
71 |
|
|
/* Prototypes */ |
72 |
|
|
static void event_queue_insert(struct event_base *, struct event *, int); |
73 |
|
|
static void event_queue_remove(struct event_base *, struct event *, int); |
74 |
|
|
static int event_haveevents(struct event_base *); |
75 |
|
|
|
76 |
|
|
static void event_process_active(struct event_base *); |
77 |
|
|
|
78 |
|
|
static int timeout_next(struct event_base *, struct timeval **); |
79 |
|
|
static void timeout_process(struct event_base *); |
80 |
|
|
static void timeout_correct(struct event_base *, struct timeval *); |
81 |
|
|
|
82 |
|
|
static void |
83 |
|
|
detect_monotonic(void) |
84 |
|
|
{ |
85 |
|
1436 |
struct timespec ts; |
86 |
|
|
|
87 |
✓✗ |
718 |
if (clock_gettime(CLOCK_MONOTONIC, &ts) == 0) |
88 |
|
718 |
use_monotonic = 1; |
89 |
|
718 |
} |
90 |
|
|
|
91 |
|
|
static int |
92 |
|
|
gettime(struct event_base *base, struct timeval *tp) |
93 |
|
|
{ |
94 |
✓✓ |
31117184 |
if (base->tv_cache.tv_sec) { |
95 |
|
15126051 |
*tp = base->tv_cache; |
96 |
|
15126051 |
return (0); |
97 |
|
|
} |
98 |
|
|
|
99 |
✓✗ |
432541 |
if (use_monotonic) { |
100 |
|
432541 |
struct timespec ts; |
101 |
|
|
|
102 |
✗✓ |
432541 |
if (clock_gettime(CLOCK_MONOTONIC, &ts) == -1) |
103 |
|
|
return (-1); |
104 |
|
|
|
105 |
|
432541 |
tp->tv_sec = ts.tv_sec; |
106 |
|
432541 |
tp->tv_usec = ts.tv_nsec / 1000; |
107 |
|
432541 |
return (0); |
108 |
|
432541 |
} |
109 |
|
|
|
110 |
|
|
return (gettimeofday(tp, NULL)); |
111 |
|
15558592 |
} |
112 |
|
|
|
113 |
|
|
struct event_base * |
114 |
|
|
event_init(void) |
115 |
|
|
{ |
116 |
|
1262 |
struct event_base *base = event_base_new(); |
117 |
|
|
|
118 |
✓✗ |
631 |
if (base != NULL) |
119 |
|
631 |
current_base = base; |
120 |
|
|
|
121 |
|
631 |
return (base); |
122 |
|
|
} |
123 |
|
|
|
124 |
|
|
struct event_base * |
125 |
|
|
event_base_new(void) |
126 |
|
|
{ |
127 |
|
|
int i; |
128 |
|
|
struct event_base *base; |
129 |
|
|
|
130 |
✗✓ |
1436 |
if ((base = calloc(1, sizeof(struct event_base))) == NULL) |
131 |
|
|
event_err(1, "%s: calloc", __func__); |
132 |
|
|
|
133 |
|
718 |
event_sigcb = NULL; |
134 |
|
718 |
event_gotsig = 0; |
135 |
|
|
|
136 |
|
718 |
detect_monotonic(); |
137 |
|
718 |
gettime(base, &base->event_tv); |
138 |
|
|
|
139 |
|
718 |
min_heap_ctor(&base->timeheap); |
140 |
|
718 |
TAILQ_INIT(&base->eventqueue); |
141 |
|
718 |
base->sig.ev_signal_pair[0] = -1; |
142 |
|
718 |
base->sig.ev_signal_pair[1] = -1; |
143 |
|
|
|
144 |
|
718 |
base->evbase = NULL; |
145 |
✓✓✓✓
|
5326 |
for (i = 0; eventops[i] && !base->evbase; i++) { |
146 |
|
1098 |
base->evsel = eventops[i]; |
147 |
|
|
|
148 |
|
1098 |
base->evbase = base->evsel->init(base); |
149 |
|
|
} |
150 |
|
|
|
151 |
✗✓ |
718 |
if (base->evbase == NULL) |
152 |
|
|
event_errx(1, "%s: no event mechanism available", __func__); |
153 |
|
|
|
154 |
✓✗✗✓
|
1436 |
if (!issetugid() && getenv("EVENT_SHOW_METHOD")) |
155 |
|
|
event_msgx("libevent using: %s", base->evsel->name); |
156 |
|
|
|
157 |
|
|
/* allocate a single active event queue */ |
158 |
|
718 |
event_base_priority_init(base, 1); |
159 |
|
|
|
160 |
|
718 |
return (base); |
161 |
|
|
} |
162 |
|
|
|
163 |
|
|
void |
164 |
|
|
event_base_free(struct event_base *base) |
165 |
|
|
{ |
166 |
|
|
int i, n_deleted=0; |
167 |
|
|
struct event *ev; |
168 |
|
|
|
169 |
✗✓ |
516 |
if (base == NULL && current_base) |
170 |
|
|
base = current_base; |
171 |
✓✓ |
258 |
if (base == current_base) |
172 |
|
137 |
current_base = NULL; |
173 |
|
|
|
174 |
|
|
/* XXX(niels) - check for internal events first */ |
175 |
|
|
assert(base); |
176 |
|
|
/* Delete all non-internal events. */ |
177 |
✓✓ |
752 |
for (ev = TAILQ_FIRST(&base->eventqueue); ev; ) { |
178 |
|
118 |
struct event *next = TAILQ_NEXT(ev, ev_next); |
179 |
✓✓ |
118 |
if (!(ev->ev_flags & EVLIST_INTERNAL)) { |
180 |
|
52 |
event_del(ev); |
181 |
|
52 |
++n_deleted; |
182 |
|
52 |
} |
183 |
|
|
ev = next; |
184 |
|
|
} |
185 |
✓✓ |
326 |
while ((ev = min_heap_top(&base->timeheap)) != NULL) { |
186 |
|
34 |
event_del(ev); |
187 |
|
34 |
++n_deleted; |
188 |
|
|
} |
189 |
|
|
|
190 |
✓✓ |
1032 |
for (i = 0; i < base->nactivequeues; ++i) { |
191 |
✗✓ |
516 |
for (ev = TAILQ_FIRST(base->activequeues[i]); ev; ) { |
192 |
|
|
struct event *next = TAILQ_NEXT(ev, ev_active_next); |
193 |
|
|
if (!(ev->ev_flags & EVLIST_INTERNAL)) { |
194 |
|
|
event_del(ev); |
195 |
|
|
++n_deleted; |
196 |
|
|
} |
197 |
|
|
ev = next; |
198 |
|
|
} |
199 |
|
|
} |
200 |
|
|
|
201 |
|
|
if (n_deleted) |
202 |
|
|
event_debug(("%s: %d events were still set in base", |
203 |
|
|
__func__, n_deleted)); |
204 |
|
|
|
205 |
✓✗ |
258 |
if (base->evsel->dealloc != NULL) |
206 |
|
258 |
base->evsel->dealloc(base, base->evbase); |
207 |
|
|
|
208 |
✓✓ |
1032 |
for (i = 0; i < base->nactivequeues; ++i) |
209 |
|
|
assert(TAILQ_EMPTY(base->activequeues[i])); |
210 |
|
|
|
211 |
|
|
assert(min_heap_empty(&base->timeheap)); |
212 |
|
258 |
min_heap_dtor(&base->timeheap); |
213 |
|
|
|
214 |
✓✓ |
1032 |
for (i = 0; i < base->nactivequeues; ++i) |
215 |
|
258 |
free(base->activequeues[i]); |
216 |
|
258 |
free(base->activequeues); |
217 |
|
|
|
218 |
|
|
assert(TAILQ_EMPTY(&base->eventqueue)); |
219 |
|
|
|
220 |
|
258 |
free(base); |
221 |
|
258 |
} |
222 |
|
|
|
223 |
|
|
/* reinitialized the event base after a fork */ |
224 |
|
|
int |
225 |
|
|
event_reinit(struct event_base *base) |
226 |
|
|
{ |
227 |
|
36 |
const struct eventop *evsel = base->evsel; |
228 |
|
18 |
void *evbase = base->evbase; |
229 |
|
|
int res = 0; |
230 |
|
|
struct event *ev; |
231 |
|
|
|
232 |
|
|
#if 0 |
233 |
|
|
/* Right now, reinit always takes effect, since even if the |
234 |
|
|
backend doesn't require it, the signal socketpair code does. |
235 |
|
|
*/ |
236 |
|
|
/* check if this event mechanism requires reinit */ |
237 |
|
|
if (!evsel->need_reinit) |
238 |
|
|
return (0); |
239 |
|
|
#endif |
240 |
|
|
|
241 |
|
|
/* prevent internal delete */ |
242 |
✓✓ |
18 |
if (base->sig.ev_signal_added) { |
243 |
|
|
/* we cannot call event_del here because the base has |
244 |
|
|
* not been reinitialized yet. */ |
245 |
|
12 |
event_queue_remove(base, &base->sig.ev_signal, |
246 |
|
|
EVLIST_INSERTED); |
247 |
✗✓ |
12 |
if (base->sig.ev_signal.ev_flags & EVLIST_ACTIVE) |
248 |
|
|
event_queue_remove(base, &base->sig.ev_signal, |
249 |
|
|
EVLIST_ACTIVE); |
250 |
|
12 |
base->sig.ev_signal_added = 0; |
251 |
|
12 |
} |
252 |
|
|
|
253 |
✓✗ |
18 |
if (base->evsel->dealloc != NULL) |
254 |
|
18 |
base->evsel->dealloc(base, base->evbase); |
255 |
|
18 |
evbase = base->evbase = evsel->init(base); |
256 |
✗✓ |
18 |
if (base->evbase == NULL) |
257 |
|
|
event_errx(1, "%s: could not reinitialize event mechanism", |
258 |
|
|
__func__); |
259 |
|
|
|
260 |
✓✓ |
132 |
TAILQ_FOREACH(ev, &base->eventqueue, ev_next) { |
261 |
✗✓ |
48 |
if (evsel->add(evbase, ev) == -1) |
262 |
|
|
res = -1; |
263 |
|
|
} |
264 |
|
|
|
265 |
|
18 |
return (res); |
266 |
|
|
} |
267 |
|
|
|
268 |
|
|
int |
269 |
|
|
event_priority_init(int npriorities) |
270 |
|
|
{ |
271 |
|
|
return event_base_priority_init(current_base, npriorities); |
272 |
|
|
} |
273 |
|
|
|
274 |
|
|
int |
275 |
|
|
event_base_priority_init(struct event_base *base, int npriorities) |
276 |
|
|
{ |
277 |
|
|
int i; |
278 |
|
|
|
279 |
✗✓ |
1646 |
if (base->event_count_active) |
280 |
|
|
return (-1); |
281 |
|
|
|
282 |
✓✓ |
823 |
if (npriorities == base->nactivequeues) |
283 |
|
35 |
return (0); |
284 |
|
|
|
285 |
✓✓ |
788 |
if (base->nactivequeues) { |
286 |
✓✓ |
350 |
for (i = 0; i < base->nactivequeues; ++i) { |
287 |
|
105 |
free(base->activequeues[i]); |
288 |
|
|
} |
289 |
|
70 |
free(base->activequeues); |
290 |
|
70 |
} |
291 |
|
|
|
292 |
|
|
/* Allocate our priority queues */ |
293 |
|
788 |
base->nactivequeues = npriorities; |
294 |
|
788 |
base->activequeues = (struct event_list **) |
295 |
|
788 |
calloc(base->nactivequeues, sizeof(struct event_list *)); |
296 |
✗✓ |
788 |
if (base->activequeues == NULL) |
297 |
|
|
event_err(1, "%s: calloc", __func__); |
298 |
|
|
|
299 |
✓✓ |
3362 |
for (i = 0; i < base->nactivequeues; ++i) { |
300 |
|
893 |
base->activequeues[i] = malloc(sizeof(struct event_list)); |
301 |
✗✓ |
893 |
if (base->activequeues[i] == NULL) |
302 |
|
|
event_err(1, "%s: malloc", __func__); |
303 |
|
893 |
TAILQ_INIT(base->activequeues[i]); |
304 |
|
|
} |
305 |
|
|
|
306 |
|
788 |
return (0); |
307 |
|
823 |
} |
308 |
|
|
|
309 |
|
|
int |
310 |
|
|
event_haveevents(struct event_base *base) |
311 |
|
|
{ |
312 |
|
252014 |
return (base->event_count > 0); |
313 |
|
|
} |
314 |
|
|
|
315 |
|
|
/* |
316 |
|
|
* Active events are stored in priority queues. Lower priorities are always |
317 |
|
|
* process before higher priorities. Low priority events can starve high |
318 |
|
|
* priority ones. |
319 |
|
|
*/ |
320 |
|
|
|
321 |
|
|
static void |
322 |
|
|
event_process_active(struct event_base *base) |
323 |
|
|
{ |
324 |
|
|
struct event *ev; |
325 |
|
|
struct event_list *activeq = NULL; |
326 |
|
|
int i; |
327 |
|
249034 |
short ncalls; |
328 |
|
|
|
329 |
✓✗ |
477164 |
for (i = 0; i < base->nactivequeues; ++i) { |
330 |
✓✓ |
238582 |
if (TAILQ_FIRST(base->activequeues[i]) != NULL) { |
331 |
|
|
activeq = base->activequeues[i]; |
332 |
|
124517 |
break; |
333 |
|
|
} |
334 |
|
|
} |
335 |
|
|
|
336 |
|
|
assert(activeq != NULL); |
337 |
|
|
|
338 |
✓✓ |
6995264 |
for (ev = TAILQ_FIRST(activeq); ev; ev = TAILQ_FIRST(activeq)) { |
339 |
✓✓ |
3373440 |
if (ev->ev_events & EV_PERSIST) |
340 |
|
128966 |
event_queue_remove(base, ev, EVLIST_ACTIVE); |
341 |
|
|
else |
342 |
|
3244474 |
event_del(ev); |
343 |
|
|
|
344 |
|
|
/* Allows deletes to work */ |
345 |
|
3373440 |
ncalls = ev->ev_ncalls; |
346 |
|
3373440 |
ev->ev_pncalls = &ncalls; |
347 |
✓✓ |
10119995 |
while (ncalls) { |
348 |
|
3373132 |
ncalls--; |
349 |
|
3373132 |
ev->ev_ncalls = ncalls; |
350 |
|
3373132 |
(*ev->ev_callback)((int)ev->ev_fd, ev->ev_res, ev->ev_arg); |
351 |
✓✗✓✓
|
6746264 |
if (event_gotsig || base->event_break) |
352 |
|
17 |
return; |
353 |
|
|
} |
354 |
|
|
} |
355 |
|
248401 |
} |
356 |
|
|
|
357 |
|
|
/* |
358 |
|
|
* Wait continously for events. We exit only if no events are left. |
359 |
|
|
*/ |
360 |
|
|
|
361 |
|
|
int |
362 |
|
|
event_dispatch(void) |
363 |
|
|
{ |
364 |
|
1572 |
return (event_loop(0)); |
365 |
|
|
} |
366 |
|
|
|
367 |
|
|
int |
368 |
|
|
event_base_dispatch(struct event_base *event_base) |
369 |
|
|
{ |
370 |
|
104 |
return (event_base_loop(event_base, 0)); |
371 |
|
|
} |
372 |
|
|
|
373 |
|
|
const char * |
374 |
|
|
event_base_get_method(struct event_base *base) |
375 |
|
|
{ |
376 |
|
|
assert(base); |
377 |
|
|
return (base->evsel->name); |
378 |
|
|
} |
379 |
|
|
|
380 |
|
|
static void |
381 |
|
|
event_loopexit_cb(int fd, short what, void *arg) |
382 |
|
|
{ |
383 |
|
482 |
struct event_base *base = arg; |
384 |
|
241 |
base->event_gotterm = 1; |
385 |
|
241 |
} |
386 |
|
|
|
387 |
|
|
/* not thread safe */ |
388 |
|
|
int |
389 |
|
|
event_loopexit(const struct timeval *tv) |
390 |
|
|
{ |
391 |
|
224 |
return (event_once(-1, EV_TIMEOUT, event_loopexit_cb, |
392 |
|
448 |
current_base, tv)); |
393 |
|
|
} |
394 |
|
|
|
395 |
|
|
int |
396 |
|
|
event_base_loopexit(struct event_base *event_base, const struct timeval *tv) |
397 |
|
|
{ |
398 |
|
34 |
return (event_base_once(event_base, -1, EV_TIMEOUT, event_loopexit_cb, |
399 |
|
68 |
event_base, tv)); |
400 |
|
|
} |
401 |
|
|
|
402 |
|
|
/* not thread safe */ |
403 |
|
|
int |
404 |
|
|
event_loopbreak(void) |
405 |
|
|
{ |
406 |
|
34 |
return (event_base_loopbreak(current_base)); |
407 |
|
|
} |
408 |
|
|
|
409 |
|
|
int |
410 |
|
|
event_base_loopbreak(struct event_base *event_base) |
411 |
|
|
{ |
412 |
✗✓ |
34 |
if (event_base == NULL) |
413 |
|
|
return (-1); |
414 |
|
|
|
415 |
|
17 |
event_base->event_break = 1; |
416 |
|
17 |
return (0); |
417 |
|
17 |
} |
418 |
|
|
|
419 |
|
|
|
420 |
|
|
|
421 |
|
|
/* not thread safe */ |
422 |
|
|
|
423 |
|
|
int |
424 |
|
|
event_loop(int flags) |
425 |
|
|
{ |
426 |
|
1674 |
return event_base_loop(current_base, flags); |
427 |
|
|
} |
428 |
|
|
|
429 |
|
|
int |
430 |
|
|
event_base_loop(struct event_base *base, int flags) |
431 |
|
|
{ |
432 |
|
10360 |
const struct eventop *evsel = base->evsel; |
433 |
|
5180 |
void *evbase = base->evbase; |
434 |
|
5180 |
struct timeval tv; |
435 |
|
5180 |
struct timeval *tv_p; |
436 |
|
|
int res, done; |
437 |
|
|
|
438 |
|
|
/* clear time cache */ |
439 |
|
5180 |
base->tv_cache.tv_sec = 0; |
440 |
|
|
|
441 |
✓✓ |
5180 |
if (base->sig.ev_signal_added) |
442 |
|
243 |
evsignal_base = base; |
443 |
|
|
done = 0; |
444 |
✓✓ |
135655 |
while (!done) { |
445 |
|
|
/* Terminate the loop if we have been asked to */ |
446 |
✓✓ |
126230 |
if (base->event_gotterm) { |
447 |
|
206 |
base->event_gotterm = 0; |
448 |
|
206 |
break; |
449 |
|
|
} |
450 |
|
|
|
451 |
✓✓ |
126024 |
if (base->event_break) { |
452 |
|
17 |
base->event_break = 0; |
453 |
|
17 |
break; |
454 |
|
|
} |
455 |
|
|
|
456 |
|
|
/* You cannot use this interface for multi-threaded apps */ |
457 |
✗✓ |
126007 |
while (event_gotsig) { |
458 |
|
|
event_gotsig = 0; |
459 |
|
|
if (event_sigcb) { |
460 |
|
|
res = (*event_sigcb)(); |
461 |
|
|
if (res == -1) { |
462 |
|
|
errno = EINTR; |
463 |
|
|
return (-1); |
464 |
|
|
} |
465 |
|
|
} |
466 |
|
|
} |
467 |
|
|
|
468 |
|
126007 |
timeout_correct(base, &tv); |
469 |
|
|
|
470 |
|
126007 |
tv_p = &tv; |
471 |
✓✓✓✓
|
251734 |
if (!base->event_count_active && !(flags & EVLOOP_NONBLOCK)) { |
472 |
|
125592 |
timeout_next(base, &tv_p); |
473 |
|
125592 |
} else { |
474 |
|
|
/* |
475 |
|
|
* if we have active events, we just poll new events |
476 |
|
|
* without waiting. |
477 |
|
|
*/ |
478 |
|
415 |
timerclear(&tv); |
479 |
|
|
} |
480 |
|
|
|
481 |
|
|
/* If we have no events, we just exit */ |
482 |
✓✓ |
126007 |
if (!event_haveevents(base)) { |
483 |
|
|
event_debug(("%s: no events registered.", __func__)); |
484 |
|
404 |
return (1); |
485 |
|
|
} |
486 |
|
|
|
487 |
|
|
/* update last old time */ |
488 |
|
125603 |
gettime(base, &base->event_tv); |
489 |
|
|
|
490 |
|
|
/* clear time cache */ |
491 |
|
125603 |
base->tv_cache.tv_sec = 0; |
492 |
|
|
|
493 |
|
125603 |
res = evsel->dispatch(base, evbase, tv_p); |
494 |
|
|
|
495 |
✗✓ |
125603 |
if (res == -1) |
496 |
|
|
return (-1); |
497 |
|
125603 |
gettime(base, &base->tv_cache); |
498 |
|
|
|
499 |
|
125603 |
timeout_process(base); |
500 |
|
|
|
501 |
✓✓ |
125603 |
if (base->event_count_active) { |
502 |
|
124209 |
event_process_active(base); |
503 |
✓✓✓✓
|
248086 |
if (!base->event_count_active && (flags & EVLOOP_ONCE)) |
504 |
|
4223 |
done = 1; |
505 |
✓✓ |
1086 |
} else if (flags & EVLOOP_NONBLOCK) |
506 |
|
22 |
done = 1; |
507 |
|
|
} |
508 |
|
|
|
509 |
|
|
/* clear time cache */ |
510 |
|
4468 |
base->tv_cache.tv_sec = 0; |
511 |
|
|
|
512 |
|
|
event_debug(("%s: asked to terminate loop.", __func__)); |
513 |
|
4468 |
return (0); |
514 |
|
4872 |
} |
515 |
|
|
|
516 |
|
|
/* Sets up an event for processing once */ |
517 |
|
|
|
518 |
|
|
struct event_once { |
519 |
|
|
struct event ev; |
520 |
|
|
|
521 |
|
|
void (*cb)(int, short, void *); |
522 |
|
|
void *arg; |
523 |
|
|
}; |
524 |
|
|
|
525 |
|
|
/* One-time callback, it deletes itself */ |
526 |
|
|
|
527 |
|
|
static void |
528 |
|
|
event_once_cb(int fd, short events, void *arg) |
529 |
|
|
{ |
530 |
|
482 |
struct event_once *eonce = arg; |
531 |
|
|
|
532 |
|
241 |
(*eonce->cb)(fd, events, eonce->arg); |
533 |
|
241 |
free(eonce); |
534 |
|
241 |
} |
535 |
|
|
|
536 |
|
|
/* not threadsafe, event scheduled once. */ |
537 |
|
|
int |
538 |
|
|
event_once(int fd, short events, |
539 |
|
|
void (*callback)(int, short, void *), void *arg, const struct timeval *tv) |
540 |
|
|
{ |
541 |
|
448 |
return event_base_once(current_base, fd, events, callback, arg, tv); |
542 |
|
|
} |
543 |
|
|
|
544 |
|
|
/* Schedules an event once */ |
545 |
|
|
int |
546 |
|
|
event_base_once(struct event_base *base, int fd, short events, |
547 |
|
|
void (*callback)(int, short, void *), void *arg, const struct timeval *tv) |
548 |
|
|
{ |
549 |
|
|
struct event_once *eonce; |
550 |
|
516 |
struct timeval etv; |
551 |
|
|
int res; |
552 |
|
|
|
553 |
|
|
/* We cannot support signals that just fire once */ |
554 |
✗✓ |
258 |
if (events & EV_SIGNAL) |
555 |
|
|
return (-1); |
556 |
|
|
|
557 |
✗✓ |
258 |
if ((eonce = calloc(1, sizeof(struct event_once))) == NULL) |
558 |
|
|
return (-1); |
559 |
|
|
|
560 |
|
258 |
eonce->cb = callback; |
561 |
|
258 |
eonce->arg = arg; |
562 |
|
|
|
563 |
✓✗ |
258 |
if (events == EV_TIMEOUT) { |
564 |
✓✓ |
258 |
if (tv == NULL) { |
565 |
|
173 |
timerclear(&etv); |
566 |
|
|
tv = &etv; |
567 |
|
173 |
} |
568 |
|
|
|
569 |
|
258 |
evtimer_set(&eonce->ev, event_once_cb, eonce); |
570 |
✗✗ |
258 |
} else if (events & (EV_READ|EV_WRITE)) { |
571 |
|
|
events &= EV_READ|EV_WRITE; |
572 |
|
|
|
573 |
|
|
event_set(&eonce->ev, fd, events, event_once_cb, eonce); |
574 |
|
|
} else { |
575 |
|
|
/* Bad event combination */ |
576 |
|
|
free(eonce); |
577 |
|
|
return (-1); |
578 |
|
|
} |
579 |
|
|
|
580 |
|
258 |
res = event_base_set(base, &eonce->ev); |
581 |
✓✗ |
258 |
if (res == 0) |
582 |
|
258 |
res = event_add(&eonce->ev, tv); |
583 |
✗✓ |
258 |
if (res != 0) { |
584 |
|
|
free(eonce); |
585 |
|
|
return (res); |
586 |
|
|
} |
587 |
|
|
|
588 |
|
258 |
return (0); |
589 |
|
258 |
} |
590 |
|
|
|
591 |
|
|
void |
592 |
|
|
event_set(struct event *ev, int fd, short events, |
593 |
|
|
void (*callback)(int, short, void *), void *arg) |
594 |
|
|
{ |
595 |
|
|
/* Take the current base - caller needs to set the real base later */ |
596 |
|
648410 |
ev->ev_base = current_base; |
597 |
|
|
|
598 |
|
324205 |
ev->ev_callback = callback; |
599 |
|
324205 |
ev->ev_arg = arg; |
600 |
|
324205 |
ev->ev_fd = fd; |
601 |
|
324205 |
ev->ev_events = events; |
602 |
|
324205 |
ev->ev_res = 0; |
603 |
|
324205 |
ev->ev_flags = EVLIST_INIT; |
604 |
|
324205 |
ev->ev_ncalls = 0; |
605 |
|
324205 |
ev->ev_pncalls = NULL; |
606 |
|
|
|
607 |
|
324205 |
min_heap_elem_init(ev); |
608 |
|
|
|
609 |
|
|
/* by default, we put new events into the middle priority */ |
610 |
✓✓ |
324205 |
if(current_base) |
611 |
|
324060 |
ev->ev_pri = current_base->nactivequeues/2; |
612 |
|
324205 |
} |
613 |
|
|
|
614 |
|
|
int |
615 |
|
|
event_base_set(struct event_base *base, struct event *ev) |
616 |
|
|
{ |
617 |
|
|
/* Only innocent events may be assigned to a different base */ |
618 |
✗✓ |
9172 |
if (ev->ev_flags != EVLIST_INIT) |
619 |
|
|
return (-1); |
620 |
|
|
|
621 |
|
4586 |
ev->ev_base = base; |
622 |
|
4586 |
ev->ev_pri = base->nactivequeues/2; |
623 |
|
|
|
624 |
|
4586 |
return (0); |
625 |
|
4586 |
} |
626 |
|
|
|
627 |
|
|
/* |
628 |
|
|
* Set's the priority of an event - if an event is already scheduled |
629 |
|
|
* changing the priority is going to fail. |
630 |
|
|
*/ |
631 |
|
|
|
632 |
|
|
int |
633 |
|
|
event_priority_set(struct event *ev, int pri) |
634 |
|
|
{ |
635 |
✗✓ |
420 |
if (ev->ev_flags & EVLIST_ACTIVE) |
636 |
|
|
return (-1); |
637 |
✓✗✗✓
|
420 |
if (pri < 0 || pri >= ev->ev_base->nactivequeues) |
638 |
|
|
return (-1); |
639 |
|
|
|
640 |
|
210 |
ev->ev_pri = pri; |
641 |
|
|
|
642 |
|
210 |
return (0); |
643 |
|
210 |
} |
644 |
|
|
|
645 |
|
|
/* |
646 |
|
|
* Checks if a specific event is pending or scheduled. |
647 |
|
|
*/ |
648 |
|
|
|
649 |
|
|
int |
650 |
|
|
event_pending(struct event *ev, short event, struct timeval *tv) |
651 |
|
|
{ |
652 |
|
|
struct timeval now, res; |
653 |
|
|
int flags = 0; |
654 |
|
|
|
655 |
|
|
if (ev->ev_flags & EVLIST_INSERTED) |
656 |
|
|
flags |= (ev->ev_events & (EV_READ|EV_WRITE|EV_SIGNAL)); |
657 |
|
|
if (ev->ev_flags & EVLIST_ACTIVE) |
658 |
|
|
flags |= ev->ev_res; |
659 |
|
|
if (ev->ev_flags & EVLIST_TIMEOUT) |
660 |
|
|
flags |= EV_TIMEOUT; |
661 |
|
|
|
662 |
|
|
event &= (EV_TIMEOUT|EV_READ|EV_WRITE|EV_SIGNAL); |
663 |
|
|
|
664 |
|
|
/* See if there is a timeout that we should report */ |
665 |
|
|
if (tv != NULL && (flags & event & EV_TIMEOUT)) { |
666 |
|
|
gettime(ev->ev_base, &now); |
667 |
|
|
timersub(&ev->ev_timeout, &now, &res); |
668 |
|
|
/* correctly remap to real time */ |
669 |
|
|
gettimeofday(&now, NULL); |
670 |
|
|
timeradd(&now, &res, tv); |
671 |
|
|
} |
672 |
|
|
|
673 |
|
|
return (flags & event); |
674 |
|
|
} |
675 |
|
|
|
676 |
|
|
int |
677 |
|
|
event_add(struct event *ev, const struct timeval *tv) |
678 |
|
|
{ |
679 |
|
31111282 |
struct event_base *base = ev->ev_base; |
680 |
|
15555641 |
const struct eventop *evsel = base->evsel; |
681 |
|
15555641 |
void *evbase = base->evbase; |
682 |
|
|
int res = 0; |
683 |
|
|
|
684 |
|
|
event_debug(( |
685 |
|
|
"event_add: event: %p, %s%s%scall %p", |
686 |
|
|
ev, |
687 |
|
|
ev->ev_events & EV_READ ? "EV_READ " : " ", |
688 |
|
|
ev->ev_events & EV_WRITE ? "EV_WRITE " : " ", |
689 |
|
|
tv ? "EV_TIMEOUT " : " ", |
690 |
|
|
ev->ev_callback)); |
691 |
|
|
|
692 |
|
|
assert(!(ev->ev_flags & ~EVLIST_ALL)); |
693 |
|
|
|
694 |
|
|
/* |
695 |
|
|
* prepare for timeout insertion further below, if we get a |
696 |
|
|
* failure on any step, we should not change any state. |
697 |
|
|
*/ |
698 |
✓✓✓✓
|
30855103 |
if (tv != NULL && !(ev->ev_flags & EVLIST_TIMEOUT)) { |
699 |
✗✓ |
31226985 |
if (min_heap_reserve(&base->timeheap, |
700 |
|
20817990 |
1 + min_heap_size(&base->timeheap)) == -1) |
701 |
|
|
return (-1); /* ENOMEM == errno */ |
702 |
|
|
} |
703 |
|
|
|
704 |
✓✓✓✓
|
15811820 |
if ((ev->ev_events & (EV_READ|EV_WRITE|EV_SIGNAL)) && |
705 |
|
256179 |
!(ev->ev_flags & (EVLIST_INSERTED|EVLIST_ACTIVE))) { |
706 |
|
256144 |
res = evsel->add(evbase, ev); |
707 |
✓✗ |
256144 |
if (res != -1) |
708 |
|
256144 |
event_queue_insert(base, ev, EVLIST_INSERTED); |
709 |
|
|
} |
710 |
|
|
|
711 |
|
|
/* |
712 |
|
|
* we should change the timout state only if the previous event |
713 |
|
|
* addition succeeded. |
714 |
|
|
*/ |
715 |
✓✓ |
15555641 |
if (res != -1 && tv != NULL) { |
716 |
|
15299462 |
struct timeval now; |
717 |
|
|
|
718 |
|
|
/* |
719 |
|
|
* we already reserved memory above for the case where we |
720 |
|
|
* are not replacing an exisiting timeout. |
721 |
|
|
*/ |
722 |
✓✓ |
15299462 |
if (ev->ev_flags & EVLIST_TIMEOUT) |
723 |
|
4890467 |
event_queue_remove(base, ev, EVLIST_TIMEOUT); |
724 |
|
|
|
725 |
|
|
/* Check if it is active due to a timeout. Rescheduling |
726 |
|
|
* this timeout before the callback can be executed |
727 |
|
|
* removes it from the active list. */ |
728 |
✓✓✓✗
|
16497516 |
if ((ev->ev_flags & EVLIST_ACTIVE) && |
729 |
|
1198054 |
(ev->ev_res & EV_TIMEOUT)) { |
730 |
|
|
/* See if we are just active executing this |
731 |
|
|
* event in a loop |
732 |
|
|
*/ |
733 |
✓✗✗✓
|
2396108 |
if (ev->ev_ncalls && ev->ev_pncalls) { |
734 |
|
|
/* Abort loop */ |
735 |
|
|
*ev->ev_pncalls = 0; |
736 |
|
|
} |
737 |
|
|
|
738 |
|
1198054 |
event_queue_remove(base, ev, EVLIST_ACTIVE); |
739 |
|
1198054 |
} |
740 |
|
|
|
741 |
|
15299462 |
gettime(base, &now); |
742 |
✓✓ |
15705061 |
timeradd(&now, tv, &ev->ev_timeout); |
743 |
|
|
|
744 |
|
|
event_debug(( |
745 |
|
|
"event_add: timeout in %lld seconds, call %p", |
746 |
|
|
(long long)tv->tv_sec, ev->ev_callback)); |
747 |
|
|
|
748 |
|
15299462 |
event_queue_insert(base, ev, EVLIST_TIMEOUT); |
749 |
|
15299462 |
} |
750 |
|
|
|
751 |
|
15555641 |
return (res); |
752 |
|
15555641 |
} |
753 |
|
|
|
754 |
|
|
int |
755 |
|
|
event_del(struct event *ev) |
756 |
|
|
{ |
757 |
|
|
struct event_base *base; |
758 |
|
|
const struct eventop *evsel; |
759 |
|
|
void *evbase; |
760 |
|
|
|
761 |
|
|
event_debug(("event_del: %p, callback %p", |
762 |
|
|
ev, ev->ev_callback)); |
763 |
|
|
|
764 |
|
|
/* An event without a base has not been added */ |
765 |
✓✓ |
47801072 |
if (ev->ev_base == NULL) |
766 |
|
556 |
return (-1); |
767 |
|
|
|
768 |
|
|
base = ev->ev_base; |
769 |
|
23899980 |
evsel = base->evsel; |
770 |
|
23899980 |
evbase = base->evbase; |
771 |
|
|
|
772 |
|
|
assert(!(ev->ev_flags & ~EVLIST_ALL)); |
773 |
|
|
|
774 |
|
|
/* See if we are just active executing this event in a loop */ |
775 |
✓✓✗✓
|
36227247 |
if (ev->ev_ncalls && ev->ev_pncalls) { |
776 |
|
|
/* Abort loop */ |
777 |
|
|
*ev->ev_pncalls = 0; |
778 |
|
|
} |
779 |
|
|
|
780 |
✓✓ |
23899980 |
if (ev->ev_flags & EVLIST_TIMEOUT) |
781 |
|
10408719 |
event_queue_remove(base, ev, EVLIST_TIMEOUT); |
782 |
|
|
|
783 |
✓✓ |
23899980 |
if (ev->ev_flags & EVLIST_ACTIVE) |
784 |
|
4443537 |
event_queue_remove(base, ev, EVLIST_ACTIVE); |
785 |
|
|
|
786 |
✓✓ |
23899980 |
if (ev->ev_flags & EVLIST_INSERTED) { |
787 |
|
254473 |
event_queue_remove(base, ev, EVLIST_INSERTED); |
788 |
|
254473 |
return (evsel->del(evbase, ev)); |
789 |
|
|
} |
790 |
|
|
|
791 |
|
23645507 |
return (0); |
792 |
|
23900536 |
} |
793 |
|
|
|
794 |
|
|
void |
795 |
|
|
event_active(struct event *ev, int res, short ncalls) |
796 |
|
|
{ |
797 |
|
|
/* We get different kinds of events, add them together */ |
798 |
✗✓ |
11541114 |
if (ev->ev_flags & EVLIST_ACTIVE) { |
799 |
|
|
ev->ev_res |= res; |
800 |
|
|
return; |
801 |
|
|
} |
802 |
|
|
|
803 |
|
5770557 |
ev->ev_res = res; |
804 |
|
5770557 |
ev->ev_ncalls = ncalls; |
805 |
|
5770557 |
ev->ev_pncalls = NULL; |
806 |
|
5770557 |
event_queue_insert(ev->ev_base, ev, EVLIST_ACTIVE); |
807 |
|
11541114 |
} |
808 |
|
|
|
809 |
|
|
static int |
810 |
|
|
timeout_next(struct event_base *base, struct timeval **tv_p) |
811 |
|
|
{ |
812 |
|
251184 |
struct timeval now; |
813 |
|
|
struct event *ev; |
814 |
|
125592 |
struct timeval *tv = *tv_p; |
815 |
|
|
|
816 |
✓✓ |
125592 |
if ((ev = min_heap_top(&base->timeheap)) == NULL) { |
817 |
|
|
/* if no time-based events are active wait for I/O */ |
818 |
|
122129 |
*tv_p = NULL; |
819 |
|
122129 |
return (0); |
820 |
|
|
} |
821 |
|
|
|
822 |
✗✓ |
3463 |
if (gettime(base, &now) == -1) |
823 |
|
|
return (-1); |
824 |
|
|
|
825 |
✓✓✓✓ ✗✓ |
6926 |
if (timercmp(&ev->ev_timeout, &now, <=)) { |
826 |
|
327 |
timerclear(tv); |
827 |
|
327 |
return (0); |
828 |
|
|
} |
829 |
|
|
|
830 |
✓✓ |
4620 |
timersub(&ev->ev_timeout, &now, tv); |
831 |
|
|
|
832 |
|
|
assert(tv->tv_sec >= 0); |
833 |
|
|
assert(tv->tv_usec >= 0); |
834 |
|
|
|
835 |
|
|
event_debug(("timeout_next: in %lld seconds", (long long)tv->tv_sec)); |
836 |
|
3136 |
return (0); |
837 |
|
125592 |
} |
838 |
|
|
|
839 |
|
|
/* |
840 |
|
|
* Determines if the time is running backwards by comparing the current |
841 |
|
|
* time against the last time we checked. Not needed when using clock |
842 |
|
|
* monotonic. |
843 |
|
|
*/ |
844 |
|
|
|
845 |
|
|
static void |
846 |
|
|
timeout_correct(struct event_base *base, struct timeval *tv) |
847 |
|
|
{ |
848 |
|
|
struct event **pev; |
849 |
|
|
unsigned int size; |
850 |
|
|
struct timeval off; |
851 |
|
|
|
852 |
✓✗ |
252014 |
if (use_monotonic) |
853 |
|
126007 |
return; |
854 |
|
|
|
855 |
|
|
/* Check if time is running backwards */ |
856 |
|
|
gettime(base, tv); |
857 |
|
|
if (timercmp(tv, &base->event_tv, >=)) { |
858 |
|
|
base->event_tv = *tv; |
859 |
|
|
return; |
860 |
|
|
} |
861 |
|
|
|
862 |
|
|
event_debug(("%s: time is running backwards, corrected", |
863 |
|
|
__func__)); |
864 |
|
|
timersub(&base->event_tv, tv, &off); |
865 |
|
|
|
866 |
|
|
/* |
867 |
|
|
* We can modify the key element of the node without destroying |
868 |
|
|
* the key, beause we apply it to all in the right order. |
869 |
|
|
*/ |
870 |
|
|
pev = base->timeheap.p; |
871 |
|
|
size = base->timeheap.n; |
872 |
|
|
for (; size-- > 0; ++pev) { |
873 |
|
|
struct timeval *ev_tv = &(**pev).ev_timeout; |
874 |
|
|
timersub(ev_tv, &off, ev_tv); |
875 |
|
|
} |
876 |
|
|
/* Now remember what the new time turned out to be. */ |
877 |
|
|
base->event_tv = *tv; |
878 |
|
126007 |
} |
879 |
|
|
|
880 |
|
|
void |
881 |
|
|
timeout_process(struct event_base *base) |
882 |
|
|
{ |
883 |
|
251206 |
struct timeval now; |
884 |
|
|
struct event *ev; |
885 |
|
|
|
886 |
✓✓ |
125603 |
if (min_heap_empty(&base->timeheap)) |
887 |
|
121860 |
return; |
888 |
|
|
|
889 |
|
3743 |
gettime(base, &now); |
890 |
|
|
|
891 |
✓✓ |
11044020 |
while ((ev = min_heap_top(&base->timeheap))) { |
892 |
✓✓✓✓ ✓✓ |
11042738 |
if (timercmp(&ev->ev_timeout, &now, >)) |
893 |
|
|
break; |
894 |
|
|
|
895 |
|
|
/* delete this event from the I/O queues */ |
896 |
|
5518267 |
event_del(ev); |
897 |
|
|
|
898 |
|
|
event_debug(("timeout_process: call %p", |
899 |
|
|
ev->ev_callback)); |
900 |
|
5518267 |
event_active(ev, EV_TIMEOUT, 1); |
901 |
|
|
} |
902 |
|
129346 |
} |
903 |
|
|
|
904 |
|
|
void |
905 |
|
|
event_queue_remove(struct event_base *base, struct event *ev, int queue) |
906 |
|
|
{ |
907 |
✗✓ |
42648456 |
if (!(ev->ev_flags & queue)) |
908 |
|
|
event_errx(1, "%s: %p(fd %d) not on queue %x", __func__, |
909 |
|
|
ev, ev->ev_fd, queue); |
910 |
|
|
|
911 |
✓✓ |
21324228 |
if (~ev->ev_flags & EVLIST_INTERNAL) |
912 |
|
21324029 |
base->event_count--; |
913 |
|
|
|
914 |
|
21324228 |
ev->ev_flags &= ~queue; |
915 |
✓✓✓✗
|
21324228 |
switch (queue) { |
916 |
|
|
case EVLIST_INSERTED: |
917 |
✓✓ |
763455 |
TAILQ_REMOVE(&base->eventqueue, ev, ev_next); |
918 |
|
254485 |
break; |
919 |
|
|
case EVLIST_ACTIVE: |
920 |
|
5770557 |
base->event_count_active--; |
921 |
✓✓ |
17311671 |
TAILQ_REMOVE(base->activequeues[ev->ev_pri], |
922 |
|
|
ev, ev_active_next); |
923 |
|
5770557 |
break; |
924 |
|
|
case EVLIST_TIMEOUT: |
925 |
|
15299186 |
min_heap_erase(&base->timeheap, ev); |
926 |
|
15299186 |
break; |
927 |
|
|
default: |
928 |
|
|
event_errx(1, "%s: unknown queue %x", __func__, queue); |
929 |
|
|
} |
930 |
|
21324228 |
} |
931 |
|
|
|
932 |
|
|
void |
933 |
|
|
event_queue_insert(struct event_base *base, struct event *ev, int queue) |
934 |
|
|
{ |
935 |
✗✓ |
42652326 |
if (ev->ev_flags & queue) { |
936 |
|
|
/* Double insertion is possible for active events */ |
937 |
|
|
if (queue & EVLIST_ACTIVE) |
938 |
|
|
return; |
939 |
|
|
|
940 |
|
|
event_errx(1, "%s: %p(fd %d) already on queue %x", __func__, |
941 |
|
|
ev, ev->ev_fd, queue); |
942 |
|
|
} |
943 |
|
|
|
944 |
✓✓ |
21326163 |
if (~ev->ev_flags & EVLIST_INTERNAL) |
945 |
|
21325941 |
base->event_count++; |
946 |
|
|
|
947 |
|
21326163 |
ev->ev_flags |= queue; |
948 |
✓✓✓✗
|
21326163 |
switch (queue) { |
949 |
|
|
case EVLIST_INSERTED: |
950 |
|
256144 |
TAILQ_INSERT_TAIL(&base->eventqueue, ev, ev_next); |
951 |
|
256144 |
break; |
952 |
|
|
case EVLIST_ACTIVE: |
953 |
|
5770557 |
base->event_count_active++; |
954 |
|
5770557 |
TAILQ_INSERT_TAIL(base->activequeues[ev->ev_pri], |
955 |
|
|
ev,ev_active_next); |
956 |
|
5770557 |
break; |
957 |
|
|
case EVLIST_TIMEOUT: { |
958 |
|
15299462 |
min_heap_push(&base->timeheap, ev); |
959 |
|
15299462 |
break; |
960 |
|
|
} |
961 |
|
|
default: |
962 |
|
|
event_errx(1, "%s: unknown queue %x", __func__, queue); |
963 |
|
|
} |
964 |
|
21326163 |
} |
965 |
|
|
|
966 |
|
|
/* Functions for debugging */ |
967 |
|
|
|
968 |
|
|
const char * |
969 |
|
|
event_get_version(void) |
970 |
|
|
{ |
971 |
|
|
return (_EVENT_VERSION); |
972 |
|
|
} |
973 |
|
|
|
974 |
|
|
/* |
975 |
|
|
* No thread-safe interface needed - the information should be the same |
976 |
|
|
* for all threads. |
977 |
|
|
*/ |
978 |
|
|
|
979 |
|
|
const char * |
980 |
|
|
event_get_method(void) |
981 |
|
|
{ |
982 |
|
34 |
return (current_base->evsel->name); |
983 |
|
|
} |
984 |
|
|
|
985 |
|
|
|
986 |
|
|
/* |
987 |
|
|
* Libevent glue for ASR. |
988 |
|
|
*/ |
989 |
|
|
struct event_asr { |
990 |
|
|
struct event ev; |
991 |
|
|
struct asr_query *async; |
992 |
|
|
void (*cb)(struct asr_result *, void *); |
993 |
|
|
void *arg; |
994 |
|
|
}; |
995 |
|
|
|
996 |
|
|
static void |
997 |
|
|
event_asr_dispatch(int fd __attribute__((__unused__)), |
998 |
|
|
short ev __attribute__((__unused__)), void *arg) |
999 |
|
|
{ |
1000 |
|
|
struct event_asr *eva = arg; |
1001 |
|
|
struct asr_result ar; |
1002 |
|
|
struct timeval tv; |
1003 |
|
|
|
1004 |
|
|
event_del(&eva->ev); |
1005 |
|
|
|
1006 |
|
|
if (asr_run(eva->async, &ar)) { |
1007 |
|
|
eva->cb(&ar, eva->arg); |
1008 |
|
|
free(eva); |
1009 |
|
|
} else { |
1010 |
|
|
event_set(&eva->ev, ar.ar_fd, |
1011 |
|
|
ar.ar_cond == ASR_WANT_READ ? EV_READ : EV_WRITE, |
1012 |
|
|
event_asr_dispatch, eva); |
1013 |
|
|
tv.tv_sec = ar.ar_timeout / 1000; |
1014 |
|
|
tv.tv_usec = (ar.ar_timeout % 1000) * 1000; |
1015 |
|
|
event_add(&eva->ev, &tv); |
1016 |
|
|
} |
1017 |
|
|
} |
1018 |
|
|
|
1019 |
|
|
struct event_asr * |
1020 |
|
|
event_asr_run(struct asr_query *async, void (*cb)(struct asr_result *, void *), |
1021 |
|
|
void *arg) |
1022 |
|
|
{ |
1023 |
|
|
struct event_asr *eva; |
1024 |
|
|
struct timeval tv; |
1025 |
|
|
|
1026 |
|
|
eva = calloc(1, sizeof *eva); |
1027 |
|
|
if (eva == NULL) |
1028 |
|
|
return (NULL); |
1029 |
|
|
eva->async = async; |
1030 |
|
|
eva->cb = cb; |
1031 |
|
|
eva->arg = arg; |
1032 |
|
|
tv.tv_sec = 0; |
1033 |
|
|
tv.tv_usec = 0; |
1034 |
|
|
evtimer_set(&eva->ev, event_asr_dispatch, eva); |
1035 |
|
|
evtimer_add(&eva->ev, &tv); |
1036 |
|
|
return (eva); |
1037 |
|
|
} |
1038 |
|
|
|
1039 |
|
|
void |
1040 |
|
|
event_asr_abort(struct event_asr *eva) |
1041 |
|
|
{ |
1042 |
|
|
asr_abort(eva->async); |
1043 |
|
|
event_del(&eva->ev); |
1044 |
|
|
free(eva); |
1045 |
|
|
} |