1 |
|
|
/* $OpenBSD: rde.c,v 1.108 2017/01/24 04:24:25 benno Exp $ */ |
2 |
|
|
|
3 |
|
|
/* |
4 |
|
|
* Copyright (c) 2004, 2005 Claudio Jeker <claudio@openbsd.org> |
5 |
|
|
* Copyright (c) 2004 Esben Norby <norby@openbsd.org> |
6 |
|
|
* Copyright (c) 2003, 2004 Henning Brauer <henning@openbsd.org> |
7 |
|
|
* |
8 |
|
|
* Permission to use, copy, modify, and distribute this software for any |
9 |
|
|
* purpose with or without fee is hereby granted, provided that the above |
10 |
|
|
* copyright notice and this permission notice appear in all copies. |
11 |
|
|
* |
12 |
|
|
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES |
13 |
|
|
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF |
14 |
|
|
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR |
15 |
|
|
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES |
16 |
|
|
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN |
17 |
|
|
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF |
18 |
|
|
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. |
19 |
|
|
*/ |
20 |
|
|
|
21 |
|
|
#include <sys/types.h> |
22 |
|
|
#include <sys/socket.h> |
23 |
|
|
#include <sys/queue.h> |
24 |
|
|
#include <netinet/in.h> |
25 |
|
|
#include <arpa/inet.h> |
26 |
|
|
#include <err.h> |
27 |
|
|
#include <errno.h> |
28 |
|
|
#include <stdlib.h> |
29 |
|
|
#include <signal.h> |
30 |
|
|
#include <string.h> |
31 |
|
|
#include <pwd.h> |
32 |
|
|
#include <unistd.h> |
33 |
|
|
#include <event.h> |
34 |
|
|
|
35 |
|
|
#include "ospf.h" |
36 |
|
|
#include "ospfd.h" |
37 |
|
|
#include "ospfe.h" |
38 |
|
|
#include "log.h" |
39 |
|
|
#include "rde.h" |
40 |
|
|
|
41 |
|
|
void rde_sig_handler(int sig, short, void *); |
42 |
|
|
__dead void rde_shutdown(void); |
43 |
|
|
void rde_dispatch_imsg(int, short, void *); |
44 |
|
|
void rde_dispatch_parent(int, short, void *); |
45 |
|
|
void rde_dump_area(struct area *, int, pid_t); |
46 |
|
|
|
47 |
|
|
void rde_send_summary(pid_t); |
48 |
|
|
void rde_send_summary_area(struct area *, pid_t); |
49 |
|
|
void rde_nbr_init(u_int32_t); |
50 |
|
|
void rde_nbr_free(void); |
51 |
|
|
struct rde_nbr *rde_nbr_find(u_int32_t); |
52 |
|
|
struct rde_nbr *rde_nbr_new(u_int32_t, struct rde_nbr *); |
53 |
|
|
void rde_nbr_del(struct rde_nbr *); |
54 |
|
|
|
55 |
|
|
void rde_req_list_add(struct rde_nbr *, struct lsa_hdr *); |
56 |
|
|
int rde_req_list_exists(struct rde_nbr *, struct lsa_hdr *); |
57 |
|
|
void rde_req_list_del(struct rde_nbr *, struct lsa_hdr *); |
58 |
|
|
void rde_req_list_free(struct rde_nbr *); |
59 |
|
|
|
60 |
|
|
struct iface *rde_asext_lookup(u_int32_t, int); |
61 |
|
|
void rde_asext_get(struct kroute *); |
62 |
|
|
void rde_asext_put(struct kroute *); |
63 |
|
|
void rde_asext_free(void); |
64 |
|
|
struct lsa *orig_asext_lsa(struct kroute *, u_int32_t, u_int16_t); |
65 |
|
|
struct lsa *orig_sum_lsa(struct rt_node *, struct area *, u_int8_t, int); |
66 |
|
|
|
67 |
|
|
struct ospfd_conf *rdeconf = NULL, *nconf = NULL; |
68 |
|
|
struct imsgev *iev_ospfe; |
69 |
|
|
struct imsgev *iev_main; |
70 |
|
|
struct rde_nbr *nbrself; |
71 |
|
|
struct lsa_tree asext_tree; |
72 |
|
|
|
73 |
|
|
/* ARGSUSED */ |
74 |
|
|
void |
75 |
|
|
rde_sig_handler(int sig, short event, void *arg) |
76 |
|
|
{ |
77 |
|
|
/* |
78 |
|
|
* signal handler rules don't apply, libevent decouples for us |
79 |
|
|
*/ |
80 |
|
|
|
81 |
|
|
switch (sig) { |
82 |
|
|
case SIGINT: |
83 |
|
|
case SIGTERM: |
84 |
|
|
rde_shutdown(); |
85 |
|
|
/* NOTREACHED */ |
86 |
|
|
default: |
87 |
|
|
fatalx("unexpected signal"); |
88 |
|
|
} |
89 |
|
|
} |
90 |
|
|
|
91 |
|
|
/* route decision engine */ |
92 |
|
|
pid_t |
93 |
|
|
rde(struct ospfd_conf *xconf, int pipe_parent2rde[2], int pipe_ospfe2rde[2], |
94 |
|
|
int pipe_parent2ospfe[2]) |
95 |
|
|
{ |
96 |
|
48 |
struct event ev_sigint, ev_sigterm; |
97 |
|
24 |
struct timeval now; |
98 |
|
|
struct area *area; |
99 |
|
|
struct iface *iface; |
100 |
|
|
struct passwd *pw; |
101 |
|
|
struct redistribute *r; |
102 |
|
|
pid_t pid; |
103 |
|
|
|
104 |
✗✓✗ |
24 |
switch (pid = fork()) { |
105 |
|
|
case -1: |
106 |
|
|
fatal("cannot fork"); |
107 |
|
|
/* NOTREACHED */ |
108 |
|
|
case 0: |
109 |
|
|
break; |
110 |
|
|
default: |
111 |
|
24 |
return (pid); |
112 |
|
|
} |
113 |
|
|
|
114 |
|
|
/* cleanup a bit */ |
115 |
|
|
kif_clear(); |
116 |
|
|
|
117 |
|
|
rdeconf = xconf; |
118 |
|
|
|
119 |
|
|
if ((pw = getpwnam(OSPFD_USER)) == NULL) |
120 |
|
|
fatal("getpwnam"); |
121 |
|
|
|
122 |
|
|
if (chroot(pw->pw_dir) == -1) |
123 |
|
|
fatal("chroot"); |
124 |
|
|
if (chdir("/") == -1) |
125 |
|
|
fatal("chdir(\"/\")"); |
126 |
|
|
|
127 |
|
|
setproctitle("route decision engine"); |
128 |
|
|
/* |
129 |
|
|
* XXX needed with fork+exec |
130 |
|
|
* log_init(debug, LOG_DAEMON); |
131 |
|
|
* log_setverbose(verbose); |
132 |
|
|
*/ |
133 |
|
|
|
134 |
|
|
ospfd_process = PROC_RDE_ENGINE; |
135 |
|
|
log_procinit(log_procnames[ospfd_process]); |
136 |
|
|
|
137 |
|
|
if (setgroups(1, &pw->pw_gid) || |
138 |
|
|
setresgid(pw->pw_gid, pw->pw_gid, pw->pw_gid) || |
139 |
|
|
setresuid(pw->pw_uid, pw->pw_uid, pw->pw_uid)) |
140 |
|
|
fatal("can't drop privileges"); |
141 |
|
|
|
142 |
|
|
if (pledge("stdio flock rpath cpath wpath", NULL) == -1) |
143 |
|
|
fatal("pledge"); |
144 |
|
|
|
145 |
|
|
event_init(); |
146 |
|
|
rde_nbr_init(NBR_HASHSIZE); |
147 |
|
|
lsa_init(&asext_tree); |
148 |
|
|
|
149 |
|
|
/* setup signal handler */ |
150 |
|
|
signal_set(&ev_sigint, SIGINT, rde_sig_handler, NULL); |
151 |
|
|
signal_set(&ev_sigterm, SIGTERM, rde_sig_handler, NULL); |
152 |
|
|
signal_add(&ev_sigint, NULL); |
153 |
|
|
signal_add(&ev_sigterm, NULL); |
154 |
|
|
signal(SIGPIPE, SIG_IGN); |
155 |
|
|
signal(SIGHUP, SIG_IGN); |
156 |
|
|
|
157 |
|
|
/* setup pipes */ |
158 |
|
|
close(pipe_ospfe2rde[0]); |
159 |
|
|
close(pipe_parent2rde[0]); |
160 |
|
|
close(pipe_parent2ospfe[0]); |
161 |
|
|
close(pipe_parent2ospfe[1]); |
162 |
|
|
|
163 |
|
|
if ((iev_ospfe = malloc(sizeof(struct imsgev))) == NULL || |
164 |
|
|
(iev_main = malloc(sizeof(struct imsgev))) == NULL) |
165 |
|
|
fatal(NULL); |
166 |
|
|
imsg_init(&iev_ospfe->ibuf, pipe_ospfe2rde[1]); |
167 |
|
|
iev_ospfe->handler = rde_dispatch_imsg; |
168 |
|
|
imsg_init(&iev_main->ibuf, pipe_parent2rde[1]); |
169 |
|
|
iev_main->handler = rde_dispatch_parent; |
170 |
|
|
|
171 |
|
|
/* setup event handler */ |
172 |
|
|
iev_ospfe->events = EV_READ; |
173 |
|
|
event_set(&iev_ospfe->ev, iev_ospfe->ibuf.fd, iev_ospfe->events, |
174 |
|
|
iev_ospfe->handler, iev_ospfe); |
175 |
|
|
event_add(&iev_ospfe->ev, NULL); |
176 |
|
|
|
177 |
|
|
iev_main->events = EV_READ; |
178 |
|
|
event_set(&iev_main->ev, iev_main->ibuf.fd, iev_main->events, |
179 |
|
|
iev_main->handler, iev_main); |
180 |
|
|
event_add(&iev_main->ev, NULL); |
181 |
|
|
|
182 |
|
|
evtimer_set(&rdeconf->ev, spf_timer, rdeconf); |
183 |
|
|
cand_list_init(); |
184 |
|
|
rt_init(); |
185 |
|
|
|
186 |
|
|
/* remove unneeded stuff from config */ |
187 |
|
|
LIST_FOREACH(area, &rdeconf->area_list, entry) |
188 |
|
|
LIST_FOREACH(iface, &area->iface_list, entry) |
189 |
|
|
md_list_clr(&iface->auth_md_list); |
190 |
|
|
|
191 |
|
|
while ((r = SIMPLEQ_FIRST(&rdeconf->redist_list)) != NULL) { |
192 |
|
|
SIMPLEQ_REMOVE_HEAD(&rdeconf->redist_list, entry); |
193 |
|
|
free(r); |
194 |
|
|
} |
195 |
|
|
|
196 |
|
|
gettimeofday(&now, NULL); |
197 |
|
|
rdeconf->uptime = now.tv_sec; |
198 |
|
|
|
199 |
|
|
event_dispatch(); |
200 |
|
|
|
201 |
|
|
rde_shutdown(); |
202 |
|
|
/* NOTREACHED */ |
203 |
|
|
|
204 |
|
|
return (0); |
205 |
|
24 |
} |
206 |
|
|
|
207 |
|
|
__dead void |
208 |
|
|
rde_shutdown(void) |
209 |
|
|
{ |
210 |
|
|
struct area *a; |
211 |
|
|
struct vertex *v, *nv; |
212 |
|
|
|
213 |
|
|
/* close pipes */ |
214 |
|
|
msgbuf_clear(&iev_ospfe->ibuf.w); |
215 |
|
|
close(iev_ospfe->ibuf.fd); |
216 |
|
|
msgbuf_clear(&iev_main->ibuf.w); |
217 |
|
|
close(iev_main->ibuf.fd); |
218 |
|
|
|
219 |
|
|
stop_spf_timer(rdeconf); |
220 |
|
|
cand_list_clr(); |
221 |
|
|
rt_clear(); |
222 |
|
|
|
223 |
|
|
while ((a = LIST_FIRST(&rdeconf->area_list)) != NULL) { |
224 |
|
|
LIST_REMOVE(a, entry); |
225 |
|
|
area_del(a); |
226 |
|
|
} |
227 |
|
|
for (v = RB_MIN(lsa_tree, &asext_tree); v != NULL; v = nv) { |
228 |
|
|
nv = RB_NEXT(lsa_tree, &asext_tree, v); |
229 |
|
|
vertex_free(v); |
230 |
|
|
} |
231 |
|
|
rde_asext_free(); |
232 |
|
|
rde_nbr_free(); |
233 |
|
|
|
234 |
|
|
free(iev_ospfe); |
235 |
|
|
free(iev_main); |
236 |
|
|
free(rdeconf); |
237 |
|
|
|
238 |
|
|
log_info("route decision engine exiting"); |
239 |
|
|
_exit(0); |
240 |
|
|
} |
241 |
|
|
|
242 |
|
|
int |
243 |
|
|
rde_imsg_compose_ospfe(int type, u_int32_t peerid, pid_t pid, void *data, |
244 |
|
|
u_int16_t datalen) |
245 |
|
|
{ |
246 |
|
|
return (imsg_compose_event(iev_ospfe, type, peerid, pid, -1, |
247 |
|
|
data, datalen)); |
248 |
|
|
} |
249 |
|
|
|
250 |
|
|
/* ARGSUSED */ |
251 |
|
|
void |
252 |
|
|
rde_dispatch_imsg(int fd, short event, void *bula) |
253 |
|
|
{ |
254 |
|
|
struct imsgev *iev = bula; |
255 |
|
|
struct imsgbuf *ibuf; |
256 |
|
|
struct imsg imsg; |
257 |
|
|
struct in_addr aid; |
258 |
|
|
struct ls_req_hdr req_hdr; |
259 |
|
|
struct lsa_hdr lsa_hdr, *db_hdr; |
260 |
|
|
struct rde_nbr rn, *nbr; |
261 |
|
|
struct timespec tp; |
262 |
|
|
struct lsa *lsa; |
263 |
|
|
struct area *area; |
264 |
|
|
struct vertex *v; |
265 |
|
|
char *buf; |
266 |
|
|
ssize_t n; |
267 |
|
|
time_t now; |
268 |
|
|
int r, state, self, error, shut = 0, verbose; |
269 |
|
|
u_int16_t l; |
270 |
|
|
|
271 |
|
|
ibuf = &iev->ibuf; |
272 |
|
|
|
273 |
|
|
if (event & EV_READ) { |
274 |
|
|
if ((n = imsg_read(ibuf)) == -1 && errno != EAGAIN) |
275 |
|
|
fatal("imsg_read error"); |
276 |
|
|
if (n == 0) /* connection closed */ |
277 |
|
|
shut = 1; |
278 |
|
|
} |
279 |
|
|
if (event & EV_WRITE) { |
280 |
|
|
if ((n = msgbuf_write(&ibuf->w)) == -1 && errno != EAGAIN) |
281 |
|
|
fatal("msgbuf_write"); |
282 |
|
|
if (n == 0) /* connection closed */ |
283 |
|
|
shut = 1; |
284 |
|
|
} |
285 |
|
|
|
286 |
|
|
clock_gettime(CLOCK_MONOTONIC, &tp); |
287 |
|
|
now = tp.tv_sec; |
288 |
|
|
|
289 |
|
|
for (;;) { |
290 |
|
|
if ((n = imsg_get(ibuf, &imsg)) == -1) |
291 |
|
|
fatal("rde_dispatch_imsg: imsg_get error"); |
292 |
|
|
if (n == 0) |
293 |
|
|
break; |
294 |
|
|
|
295 |
|
|
switch (imsg.hdr.type) { |
296 |
|
|
case IMSG_NEIGHBOR_UP: |
297 |
|
|
if (imsg.hdr.len - IMSG_HEADER_SIZE != sizeof(rn)) |
298 |
|
|
fatalx("invalid size of OE request"); |
299 |
|
|
memcpy(&rn, imsg.data, sizeof(rn)); |
300 |
|
|
|
301 |
|
|
if (rde_nbr_new(imsg.hdr.peerid, &rn) == NULL) |
302 |
|
|
fatalx("rde_dispatch_imsg: " |
303 |
|
|
"neighbor already exists"); |
304 |
|
|
break; |
305 |
|
|
case IMSG_NEIGHBOR_DOWN: |
306 |
|
|
rde_nbr_del(rde_nbr_find(imsg.hdr.peerid)); |
307 |
|
|
break; |
308 |
|
|
case IMSG_NEIGHBOR_CHANGE: |
309 |
|
|
if (imsg.hdr.len - IMSG_HEADER_SIZE != sizeof(state)) |
310 |
|
|
fatalx("invalid size of OE request"); |
311 |
|
|
memcpy(&state, imsg.data, sizeof(state)); |
312 |
|
|
|
313 |
|
|
nbr = rde_nbr_find(imsg.hdr.peerid); |
314 |
|
|
if (nbr == NULL) |
315 |
|
|
break; |
316 |
|
|
|
317 |
|
|
nbr->state = state; |
318 |
|
|
if (nbr->state & NBR_STA_FULL) |
319 |
|
|
rde_req_list_free(nbr); |
320 |
|
|
break; |
321 |
|
|
case IMSG_NEIGHBOR_CAPA: |
322 |
|
|
if (imsg.hdr.len - IMSG_HEADER_SIZE != sizeof(u_int8_t)) |
323 |
|
|
fatalx("invalid size of OE request"); |
324 |
|
|
nbr = rde_nbr_find(imsg.hdr.peerid); |
325 |
|
|
if (nbr == NULL) |
326 |
|
|
break; |
327 |
|
|
nbr->capa_options = *(u_int8_t *)imsg.data; |
328 |
|
|
break; |
329 |
|
|
case IMSG_AREA_CHANGE: |
330 |
|
|
if (imsg.hdr.len - IMSG_HEADER_SIZE != sizeof(state)) |
331 |
|
|
fatalx("invalid size of OE request"); |
332 |
|
|
|
333 |
|
|
LIST_FOREACH(area, &rdeconf->area_list, entry) { |
334 |
|
|
if (area->id.s_addr == imsg.hdr.peerid) |
335 |
|
|
break; |
336 |
|
|
} |
337 |
|
|
if (area == NULL) |
338 |
|
|
break; |
339 |
|
|
memcpy(&state, imsg.data, sizeof(state)); |
340 |
|
|
area->active = state; |
341 |
|
|
break; |
342 |
|
|
case IMSG_DB_SNAPSHOT: |
343 |
|
|
nbr = rde_nbr_find(imsg.hdr.peerid); |
344 |
|
|
if (nbr == NULL) |
345 |
|
|
break; |
346 |
|
|
|
347 |
|
|
lsa_snap(nbr); |
348 |
|
|
|
349 |
|
|
imsg_compose_event(iev_ospfe, IMSG_DB_END, imsg.hdr.peerid, |
350 |
|
|
0, -1, NULL, 0); |
351 |
|
|
break; |
352 |
|
|
case IMSG_DD: |
353 |
|
|
nbr = rde_nbr_find(imsg.hdr.peerid); |
354 |
|
|
if (nbr == NULL) |
355 |
|
|
break; |
356 |
|
|
|
357 |
|
|
buf = imsg.data; |
358 |
|
|
error = 0; |
359 |
|
|
for (l = imsg.hdr.len - IMSG_HEADER_SIZE; |
360 |
|
|
l >= sizeof(lsa_hdr); l -= sizeof(lsa_hdr)) { |
361 |
|
|
memcpy(&lsa_hdr, buf, sizeof(lsa_hdr)); |
362 |
|
|
buf += sizeof(lsa_hdr); |
363 |
|
|
|
364 |
|
|
if (lsa_hdr.type == LSA_TYPE_EXTERNAL && |
365 |
|
|
nbr->area->stub) { |
366 |
|
|
error = 1; |
367 |
|
|
break; |
368 |
|
|
} |
369 |
|
|
v = lsa_find(nbr->iface, lsa_hdr.type, |
370 |
|
|
lsa_hdr.ls_id, lsa_hdr.adv_rtr); |
371 |
|
|
if (v == NULL) |
372 |
|
|
db_hdr = NULL; |
373 |
|
|
else |
374 |
|
|
db_hdr = &v->lsa->hdr; |
375 |
|
|
|
376 |
|
|
if (lsa_newer(&lsa_hdr, db_hdr) > 0) { |
377 |
|
|
/* |
378 |
|
|
* only request LSAs that are |
379 |
|
|
* newer or missing |
380 |
|
|
*/ |
381 |
|
|
rde_req_list_add(nbr, &lsa_hdr); |
382 |
|
|
imsg_compose_event(iev_ospfe, IMSG_DD, |
383 |
|
|
imsg.hdr.peerid, 0, -1, &lsa_hdr, |
384 |
|
|
sizeof(lsa_hdr)); |
385 |
|
|
} |
386 |
|
|
} |
387 |
|
|
if (l != 0 && !error) |
388 |
|
|
log_warnx("rde_dispatch_imsg: peerid %u, " |
389 |
|
|
"trailing garbage in Database Description " |
390 |
|
|
"packet", imsg.hdr.peerid); |
391 |
|
|
|
392 |
|
|
if (!error) |
393 |
|
|
imsg_compose_event(iev_ospfe, IMSG_DD_END, |
394 |
|
|
imsg.hdr.peerid, 0, -1, NULL, 0); |
395 |
|
|
else |
396 |
|
|
imsg_compose_event(iev_ospfe, IMSG_DD_BADLSA, |
397 |
|
|
imsg.hdr.peerid, 0, -1, NULL, 0); |
398 |
|
|
break; |
399 |
|
|
case IMSG_LS_REQ: |
400 |
|
|
nbr = rde_nbr_find(imsg.hdr.peerid); |
401 |
|
|
if (nbr == NULL) |
402 |
|
|
break; |
403 |
|
|
|
404 |
|
|
buf = imsg.data; |
405 |
|
|
for (l = imsg.hdr.len - IMSG_HEADER_SIZE; |
406 |
|
|
l >= sizeof(req_hdr); l -= sizeof(req_hdr)) { |
407 |
|
|
memcpy(&req_hdr, buf, sizeof(req_hdr)); |
408 |
|
|
buf += sizeof(req_hdr); |
409 |
|
|
|
410 |
|
|
if ((v = lsa_find(nbr->iface, |
411 |
|
|
ntohl(req_hdr.type), req_hdr.ls_id, |
412 |
|
|
req_hdr.adv_rtr)) == NULL) { |
413 |
|
|
log_debug("rde_dispatch_imsg: " |
414 |
|
|
"requested LSA not found"); |
415 |
|
|
imsg_compose_event(iev_ospfe, |
416 |
|
|
IMSG_LS_BADREQ, imsg.hdr.peerid, |
417 |
|
|
0, -1, NULL, 0); |
418 |
|
|
continue; |
419 |
|
|
} |
420 |
|
|
imsg_compose_event(iev_ospfe, IMSG_LS_UPD, |
421 |
|
|
imsg.hdr.peerid, 0, -1, v->lsa, |
422 |
|
|
ntohs(v->lsa->hdr.len)); |
423 |
|
|
} |
424 |
|
|
if (l != 0) |
425 |
|
|
log_warnx("rde_dispatch_imsg: peerid %u, " |
426 |
|
|
"trailing garbage in LS Request " |
427 |
|
|
"packet", imsg.hdr.peerid); |
428 |
|
|
break; |
429 |
|
|
case IMSG_LS_UPD: |
430 |
|
|
nbr = rde_nbr_find(imsg.hdr.peerid); |
431 |
|
|
if (nbr == NULL) |
432 |
|
|
break; |
433 |
|
|
|
434 |
|
|
lsa = malloc(imsg.hdr.len - IMSG_HEADER_SIZE); |
435 |
|
|
if (lsa == NULL) |
436 |
|
|
fatal(NULL); |
437 |
|
|
memcpy(lsa, imsg.data, imsg.hdr.len - IMSG_HEADER_SIZE); |
438 |
|
|
|
439 |
|
|
if (!lsa_check(nbr, lsa, |
440 |
|
|
imsg.hdr.len - IMSG_HEADER_SIZE)) { |
441 |
|
|
free(lsa); |
442 |
|
|
break; |
443 |
|
|
} |
444 |
|
|
|
445 |
|
|
v = lsa_find(nbr->iface, lsa->hdr.type, lsa->hdr.ls_id, |
446 |
|
|
lsa->hdr.adv_rtr); |
447 |
|
|
if (v == NULL) |
448 |
|
|
db_hdr = NULL; |
449 |
|
|
else |
450 |
|
|
db_hdr = &v->lsa->hdr; |
451 |
|
|
|
452 |
|
|
if (nbr->self) { |
453 |
|
|
lsa_merge(nbr, lsa, v); |
454 |
|
|
/* lsa_merge frees the right lsa */ |
455 |
|
|
break; |
456 |
|
|
} |
457 |
|
|
|
458 |
|
|
r = lsa_newer(&lsa->hdr, db_hdr); |
459 |
|
|
if (r > 0) { |
460 |
|
|
/* new LSA newer than DB */ |
461 |
|
|
if (v && v->flooded && |
462 |
|
|
v->changed + MIN_LS_ARRIVAL >= now) { |
463 |
|
|
free(lsa); |
464 |
|
|
break; |
465 |
|
|
} |
466 |
|
|
|
467 |
|
|
rde_req_list_del(nbr, &lsa->hdr); |
468 |
|
|
|
469 |
|
|
if (!(self = lsa_self(nbr, lsa, v))) |
470 |
|
|
if (lsa_add(nbr, lsa)) |
471 |
|
|
/* delayed lsa */ |
472 |
|
|
break; |
473 |
|
|
|
474 |
|
|
/* flood and perhaps ack LSA */ |
475 |
|
|
imsg_compose_event(iev_ospfe, IMSG_LS_FLOOD, |
476 |
|
|
imsg.hdr.peerid, 0, -1, lsa, |
477 |
|
|
ntohs(lsa->hdr.len)); |
478 |
|
|
|
479 |
|
|
/* reflood self originated LSA */ |
480 |
|
|
if (self && v) |
481 |
|
|
imsg_compose_event(iev_ospfe, |
482 |
|
|
IMSG_LS_FLOOD, v->peerid, 0, -1, |
483 |
|
|
v->lsa, ntohs(v->lsa->hdr.len)); |
484 |
|
|
/* new LSA was not added so free it */ |
485 |
|
|
if (self) |
486 |
|
|
free(lsa); |
487 |
|
|
} else if (r < 0) { |
488 |
|
|
/* |
489 |
|
|
* point 6 of "The Flooding Procedure" |
490 |
|
|
* We are violating the RFC here because |
491 |
|
|
* it does not make sense to reset a session |
492 |
|
|
* because an equal LSA is already in the table. |
493 |
|
|
* Only if the LSA sent is older than the one |
494 |
|
|
* in the table we should reset the session. |
495 |
|
|
*/ |
496 |
|
|
if (rde_req_list_exists(nbr, &lsa->hdr)) { |
497 |
|
|
imsg_compose_event(iev_ospfe, |
498 |
|
|
IMSG_LS_BADREQ, imsg.hdr.peerid, |
499 |
|
|
0, -1, NULL, 0); |
500 |
|
|
free(lsa); |
501 |
|
|
break; |
502 |
|
|
} |
503 |
|
|
|
504 |
|
|
/* lsa no longer needed */ |
505 |
|
|
free(lsa); |
506 |
|
|
|
507 |
|
|
/* new LSA older than DB */ |
508 |
|
|
if (ntohl(db_hdr->seq_num) == MAX_SEQ_NUM && |
509 |
|
|
ntohs(db_hdr->age) == MAX_AGE) |
510 |
|
|
/* seq-num wrap */ |
511 |
|
|
break; |
512 |
|
|
|
513 |
|
|
if (v->changed + MIN_LS_ARRIVAL >= now) |
514 |
|
|
break; |
515 |
|
|
|
516 |
|
|
/* directly send current LSA, no ack */ |
517 |
|
|
imsg_compose_event(iev_ospfe, IMSG_LS_UPD, |
518 |
|
|
imsg.hdr.peerid, 0, -1, v->lsa, |
519 |
|
|
ntohs(v->lsa->hdr.len)); |
520 |
|
|
} else { |
521 |
|
|
/* LSA equal send direct ack */ |
522 |
|
|
imsg_compose_event(iev_ospfe, IMSG_LS_ACK, |
523 |
|
|
imsg.hdr.peerid, 0, -1, &lsa->hdr, |
524 |
|
|
sizeof(lsa->hdr)); |
525 |
|
|
free(lsa); |
526 |
|
|
} |
527 |
|
|
break; |
528 |
|
|
case IMSG_LS_MAXAGE: |
529 |
|
|
nbr = rde_nbr_find(imsg.hdr.peerid); |
530 |
|
|
if (nbr == NULL) |
531 |
|
|
break; |
532 |
|
|
|
533 |
|
|
if (imsg.hdr.len != IMSG_HEADER_SIZE + |
534 |
|
|
sizeof(struct lsa_hdr)) |
535 |
|
|
fatalx("invalid size of OE request"); |
536 |
|
|
memcpy(&lsa_hdr, imsg.data, sizeof(lsa_hdr)); |
537 |
|
|
|
538 |
|
|
if (rde_nbr_loading(nbr->area)) |
539 |
|
|
break; |
540 |
|
|
|
541 |
|
|
v = lsa_find(nbr->iface, lsa_hdr.type, lsa_hdr.ls_id, |
542 |
|
|
lsa_hdr.adv_rtr); |
543 |
|
|
if (v == NULL) |
544 |
|
|
db_hdr = NULL; |
545 |
|
|
else |
546 |
|
|
db_hdr = &v->lsa->hdr; |
547 |
|
|
|
548 |
|
|
/* |
549 |
|
|
* only delete LSA if the one in the db is not newer |
550 |
|
|
*/ |
551 |
|
|
if (lsa_newer(db_hdr, &lsa_hdr) <= 0) |
552 |
|
|
lsa_del(nbr, &lsa_hdr); |
553 |
|
|
break; |
554 |
|
|
case IMSG_CTL_SHOW_DATABASE: |
555 |
|
|
case IMSG_CTL_SHOW_DB_EXT: |
556 |
|
|
case IMSG_CTL_SHOW_DB_NET: |
557 |
|
|
case IMSG_CTL_SHOW_DB_RTR: |
558 |
|
|
case IMSG_CTL_SHOW_DB_SELF: |
559 |
|
|
case IMSG_CTL_SHOW_DB_SUM: |
560 |
|
|
case IMSG_CTL_SHOW_DB_ASBR: |
561 |
|
|
case IMSG_CTL_SHOW_DB_OPAQ: |
562 |
|
|
if (imsg.hdr.len != IMSG_HEADER_SIZE && |
563 |
|
|
imsg.hdr.len != IMSG_HEADER_SIZE + sizeof(aid)) { |
564 |
|
|
log_warnx("rde_dispatch_imsg: wrong imsg len"); |
565 |
|
|
break; |
566 |
|
|
} |
567 |
|
|
if (imsg.hdr.len == IMSG_HEADER_SIZE) { |
568 |
|
|
LIST_FOREACH(area, &rdeconf->area_list, entry) { |
569 |
|
|
rde_dump_area(area, imsg.hdr.type, |
570 |
|
|
imsg.hdr.pid); |
571 |
|
|
} |
572 |
|
|
lsa_dump(&asext_tree, imsg.hdr.type, |
573 |
|
|
imsg.hdr.pid); |
574 |
|
|
} else { |
575 |
|
|
memcpy(&aid, imsg.data, sizeof(aid)); |
576 |
|
|
if ((area = area_find(rdeconf, aid)) != NULL) { |
577 |
|
|
rde_dump_area(area, imsg.hdr.type, |
578 |
|
|
imsg.hdr.pid); |
579 |
|
|
if (!area->stub) |
580 |
|
|
lsa_dump(&asext_tree, |
581 |
|
|
imsg.hdr.type, |
582 |
|
|
imsg.hdr.pid); |
583 |
|
|
} |
584 |
|
|
} |
585 |
|
|
imsg_compose_event(iev_ospfe, IMSG_CTL_END, 0, |
586 |
|
|
imsg.hdr.pid, -1, NULL, 0); |
587 |
|
|
break; |
588 |
|
|
case IMSG_CTL_SHOW_RIB: |
589 |
|
|
LIST_FOREACH(area, &rdeconf->area_list, entry) { |
590 |
|
|
imsg_compose_event(iev_ospfe, IMSG_CTL_AREA, |
591 |
|
|
0, imsg.hdr.pid, -1, area, sizeof(*area)); |
592 |
|
|
|
593 |
|
|
rt_dump(area->id, imsg.hdr.pid, RIB_RTR); |
594 |
|
|
rt_dump(area->id, imsg.hdr.pid, RIB_NET); |
595 |
|
|
} |
596 |
|
|
aid.s_addr = 0; |
597 |
|
|
rt_dump(aid, imsg.hdr.pid, RIB_EXT); |
598 |
|
|
|
599 |
|
|
imsg_compose_event(iev_ospfe, IMSG_CTL_END, 0, |
600 |
|
|
imsg.hdr.pid, -1, NULL, 0); |
601 |
|
|
break; |
602 |
|
|
case IMSG_CTL_SHOW_SUM: |
603 |
|
|
rde_send_summary(imsg.hdr.pid); |
604 |
|
|
LIST_FOREACH(area, &rdeconf->area_list, entry) |
605 |
|
|
rde_send_summary_area(area, imsg.hdr.pid); |
606 |
|
|
imsg_compose_event(iev_ospfe, IMSG_CTL_END, 0, |
607 |
|
|
imsg.hdr.pid, -1, NULL, 0); |
608 |
|
|
break; |
609 |
|
|
case IMSG_CTL_LOG_VERBOSE: |
610 |
|
|
/* already checked by ospfe */ |
611 |
|
|
memcpy(&verbose, imsg.data, sizeof(verbose)); |
612 |
|
|
log_setverbose(verbose); |
613 |
|
|
break; |
614 |
|
|
default: |
615 |
|
|
log_debug("rde_dispatch_imsg: unexpected imsg %d", |
616 |
|
|
imsg.hdr.type); |
617 |
|
|
break; |
618 |
|
|
} |
619 |
|
|
imsg_free(&imsg); |
620 |
|
|
} |
621 |
|
|
if (!shut) |
622 |
|
|
imsg_event_add(iev); |
623 |
|
|
else { |
624 |
|
|
/* this pipe is dead, so remove the event handler */ |
625 |
|
|
event_del(&iev->ev); |
626 |
|
|
event_loopexit(NULL); |
627 |
|
|
} |
628 |
|
|
} |
629 |
|
|
|
630 |
|
|
/* ARGSUSED */ |
631 |
|
|
void |
632 |
|
|
rde_dispatch_parent(int fd, short event, void *bula) |
633 |
|
|
{ |
634 |
|
|
static struct area *narea; |
635 |
|
|
struct iface *niface; |
636 |
|
|
struct imsg imsg; |
637 |
|
|
struct kroute rr; |
638 |
|
|
struct imsgev *iev = bula; |
639 |
|
|
struct imsgbuf *ibuf; |
640 |
|
|
struct redistribute *nred; |
641 |
|
|
ssize_t n; |
642 |
|
|
int shut = 0; |
643 |
|
|
|
644 |
|
|
ibuf = &iev->ibuf; |
645 |
|
|
|
646 |
|
|
if (event & EV_READ) { |
647 |
|
|
if ((n = imsg_read(ibuf)) == -1 && errno != EAGAIN) |
648 |
|
|
fatal("imsg_read error"); |
649 |
|
|
if (n == 0) /* connection closed */ |
650 |
|
|
shut = 1; |
651 |
|
|
} |
652 |
|
|
if (event & EV_WRITE) { |
653 |
|
|
if ((n = msgbuf_write(&ibuf->w)) == -1 && errno != EAGAIN) |
654 |
|
|
fatal("msgbuf_write"); |
655 |
|
|
if (n == 0) /* connection closed */ |
656 |
|
|
shut = 1; |
657 |
|
|
} |
658 |
|
|
|
659 |
|
|
for (;;) { |
660 |
|
|
if ((n = imsg_get(ibuf, &imsg)) == -1) |
661 |
|
|
fatal("rde_dispatch_parent: imsg_get error"); |
662 |
|
|
if (n == 0) |
663 |
|
|
break; |
664 |
|
|
|
665 |
|
|
switch (imsg.hdr.type) { |
666 |
|
|
case IMSG_NETWORK_ADD: |
667 |
|
|
if (imsg.hdr.len != IMSG_HEADER_SIZE + sizeof(rr)) { |
668 |
|
|
log_warnx("rde_dispatch_parent: " |
669 |
|
|
"wrong imsg len"); |
670 |
|
|
break; |
671 |
|
|
} |
672 |
|
|
memcpy(&rr, imsg.data, sizeof(rr)); |
673 |
|
|
rde_asext_get(&rr); |
674 |
|
|
break; |
675 |
|
|
case IMSG_NETWORK_DEL: |
676 |
|
|
if (imsg.hdr.len != IMSG_HEADER_SIZE + sizeof(rr)) { |
677 |
|
|
log_warnx("rde_dispatch_parent: " |
678 |
|
|
"wrong imsg len"); |
679 |
|
|
break; |
680 |
|
|
} |
681 |
|
|
memcpy(&rr, imsg.data, sizeof(rr)); |
682 |
|
|
rde_asext_put(&rr); |
683 |
|
|
break; |
684 |
|
|
case IMSG_RECONF_CONF: |
685 |
|
|
if ((nconf = malloc(sizeof(struct ospfd_conf))) == |
686 |
|
|
NULL) |
687 |
|
|
fatal(NULL); |
688 |
|
|
memcpy(nconf, imsg.data, sizeof(struct ospfd_conf)); |
689 |
|
|
|
690 |
|
|
LIST_INIT(&nconf->area_list); |
691 |
|
|
LIST_INIT(&nconf->cand_list); |
692 |
|
|
break; |
693 |
|
|
case IMSG_RECONF_AREA: |
694 |
|
|
if ((narea = area_new()) == NULL) |
695 |
|
|
fatal(NULL); |
696 |
|
|
memcpy(narea, imsg.data, sizeof(struct area)); |
697 |
|
|
|
698 |
|
|
LIST_INIT(&narea->iface_list); |
699 |
|
|
LIST_INIT(&narea->nbr_list); |
700 |
|
|
RB_INIT(&narea->lsa_tree); |
701 |
|
|
SIMPLEQ_INIT(&narea->redist_list); |
702 |
|
|
|
703 |
|
|
LIST_INSERT_HEAD(&nconf->area_list, narea, entry); |
704 |
|
|
break; |
705 |
|
|
case IMSG_RECONF_REDIST: |
706 |
|
|
if ((nred= malloc(sizeof(struct redistribute))) == NULL) |
707 |
|
|
fatal(NULL); |
708 |
|
|
memcpy(nred, imsg.data, sizeof(struct redistribute)); |
709 |
|
|
|
710 |
|
|
SIMPLEQ_INSERT_TAIL(&narea->redist_list, nred, entry); |
711 |
|
|
break; |
712 |
|
|
case IMSG_RECONF_IFACE: |
713 |
|
|
if ((niface = malloc(sizeof(struct iface))) == NULL) |
714 |
|
|
fatal(NULL); |
715 |
|
|
memcpy(niface, imsg.data, sizeof(struct iface)); |
716 |
|
|
|
717 |
|
|
LIST_INIT(&niface->nbr_list); |
718 |
|
|
TAILQ_INIT(&niface->ls_ack_list); |
719 |
|
|
TAILQ_INIT(&niface->auth_md_list); |
720 |
|
|
RB_INIT(&niface->lsa_tree); |
721 |
|
|
|
722 |
|
|
niface->area = narea; |
723 |
|
|
LIST_INSERT_HEAD(&narea->iface_list, niface, entry); |
724 |
|
|
|
725 |
|
|
break; |
726 |
|
|
case IMSG_RECONF_END: |
727 |
|
|
merge_config(rdeconf, nconf); |
728 |
|
|
nconf = NULL; |
729 |
|
|
break; |
730 |
|
|
default: |
731 |
|
|
log_debug("rde_dispatch_parent: unexpected imsg %d", |
732 |
|
|
imsg.hdr.type); |
733 |
|
|
break; |
734 |
|
|
} |
735 |
|
|
imsg_free(&imsg); |
736 |
|
|
} |
737 |
|
|
if (!shut) |
738 |
|
|
imsg_event_add(iev); |
739 |
|
|
else { |
740 |
|
|
/* this pipe is dead, so remove the event handler */ |
741 |
|
|
event_del(&iev->ev); |
742 |
|
|
event_loopexit(NULL); |
743 |
|
|
} |
744 |
|
|
} |
745 |
|
|
|
746 |
|
|
void |
747 |
|
|
rde_dump_area(struct area *area, int imsg_type, pid_t pid) |
748 |
|
|
{ |
749 |
|
|
struct iface *iface; |
750 |
|
|
|
751 |
|
|
/* dump header */ |
752 |
|
|
imsg_compose_event(iev_ospfe, IMSG_CTL_AREA, 0, pid, -1, |
753 |
|
|
area, sizeof(*area)); |
754 |
|
|
|
755 |
|
|
/* dump link local lsa */ |
756 |
|
|
LIST_FOREACH(iface, &area->iface_list, entry) { |
757 |
|
|
imsg_compose_event(iev_ospfe, IMSG_CTL_IFACE, |
758 |
|
|
0, pid, -1, iface, sizeof(*iface)); |
759 |
|
|
lsa_dump(&iface->lsa_tree, imsg_type, pid); |
760 |
|
|
} |
761 |
|
|
|
762 |
|
|
/* dump area lsa */ |
763 |
|
|
lsa_dump(&area->lsa_tree, imsg_type, pid); |
764 |
|
|
} |
765 |
|
|
|
766 |
|
|
u_int32_t |
767 |
|
|
rde_router_id(void) |
768 |
|
|
{ |
769 |
|
|
return (rdeconf->rtr_id.s_addr); |
770 |
|
|
} |
771 |
|
|
|
772 |
|
|
struct area * |
773 |
|
|
rde_backbone_area(void) |
774 |
|
|
{ |
775 |
|
|
struct in_addr id; |
776 |
|
|
|
777 |
|
|
id.s_addr = INADDR_ANY; |
778 |
|
|
|
779 |
|
|
return (area_find(rdeconf, id)); |
780 |
|
|
} |
781 |
|
|
|
782 |
|
|
void |
783 |
|
|
rde_send_change_kroute(struct rt_node *r) |
784 |
|
|
{ |
785 |
|
|
int krcount = 0; |
786 |
|
|
struct kroute kr; |
787 |
|
|
struct rt_nexthop *rn; |
788 |
|
|
struct ibuf *wbuf; |
789 |
|
|
|
790 |
|
|
if ((wbuf = imsg_create(&iev_main->ibuf, IMSG_KROUTE_CHANGE, 0, 0, |
791 |
|
|
sizeof(kr))) == NULL) { |
792 |
|
|
return; |
793 |
|
|
} |
794 |
|
|
|
795 |
|
|
TAILQ_FOREACH(rn, &r->nexthop, entry) { |
796 |
|
|
if (rn->invalid) |
797 |
|
|
continue; |
798 |
|
|
if (rn->connected) |
799 |
|
|
/* skip self-originated routes */ |
800 |
|
|
continue; |
801 |
|
|
krcount++; |
802 |
|
|
|
803 |
|
|
bzero(&kr, sizeof(kr)); |
804 |
|
|
kr.prefix.s_addr = r->prefix.s_addr; |
805 |
|
|
kr.nexthop.s_addr = rn->nexthop.s_addr; |
806 |
|
|
kr.prefixlen = r->prefixlen; |
807 |
|
|
kr.ext_tag = r->ext_tag; |
808 |
|
|
imsg_add(wbuf, &kr, sizeof(kr)); |
809 |
|
|
} |
810 |
|
|
if (krcount == 0) { |
811 |
|
|
/* no valid nexthop or self originated, so remove */ |
812 |
|
|
ibuf_free(wbuf); |
813 |
|
|
rde_send_delete_kroute(r); |
814 |
|
|
return; |
815 |
|
|
} |
816 |
|
|
imsg_close(&iev_main->ibuf, wbuf); |
817 |
|
|
imsg_event_add(iev_main); |
818 |
|
|
} |
819 |
|
|
|
820 |
|
|
void |
821 |
|
|
rde_send_delete_kroute(struct rt_node *r) |
822 |
|
|
{ |
823 |
|
|
struct kroute kr; |
824 |
|
|
|
825 |
|
|
bzero(&kr, sizeof(kr)); |
826 |
|
|
kr.prefix.s_addr = r->prefix.s_addr; |
827 |
|
|
kr.prefixlen = r->prefixlen; |
828 |
|
|
|
829 |
|
|
imsg_compose_event(iev_main, IMSG_KROUTE_DELETE, 0, 0, -1, |
830 |
|
|
&kr, sizeof(kr)); |
831 |
|
|
} |
832 |
|
|
|
833 |
|
|
void |
834 |
|
|
rde_send_summary(pid_t pid) |
835 |
|
|
{ |
836 |
|
|
static struct ctl_sum sumctl; |
837 |
|
|
struct timeval now; |
838 |
|
|
struct area *area; |
839 |
|
|
struct vertex *v; |
840 |
|
|
|
841 |
|
|
bzero(&sumctl, sizeof(struct ctl_sum)); |
842 |
|
|
|
843 |
|
|
sumctl.rtr_id.s_addr = rde_router_id(); |
844 |
|
|
sumctl.spf_delay = rdeconf->spf_delay; |
845 |
|
|
sumctl.spf_hold_time = rdeconf->spf_hold_time; |
846 |
|
|
|
847 |
|
|
LIST_FOREACH(area, &rdeconf->area_list, entry) |
848 |
|
|
sumctl.num_area++; |
849 |
|
|
|
850 |
|
|
RB_FOREACH(v, lsa_tree, &asext_tree) { |
851 |
|
|
sumctl.num_ext_lsa++; |
852 |
|
|
sumctl.ext_lsa_cksum += ntohs(v->lsa->hdr.ls_chksum); |
853 |
|
|
} |
854 |
|
|
|
855 |
|
|
gettimeofday(&now, NULL); |
856 |
|
|
if (rdeconf->uptime < now.tv_sec) |
857 |
|
|
sumctl.uptime = now.tv_sec - rdeconf->uptime; |
858 |
|
|
else |
859 |
|
|
sumctl.uptime = 0; |
860 |
|
|
|
861 |
|
|
sumctl.rfc1583compat = rdeconf->rfc1583compat; |
862 |
|
|
|
863 |
|
|
rde_imsg_compose_ospfe(IMSG_CTL_SHOW_SUM, 0, pid, &sumctl, |
864 |
|
|
sizeof(sumctl)); |
865 |
|
|
} |
866 |
|
|
|
867 |
|
|
void |
868 |
|
|
rde_send_summary_area(struct area *area, pid_t pid) |
869 |
|
|
{ |
870 |
|
|
static struct ctl_sum_area sumareactl; |
871 |
|
|
struct iface *iface; |
872 |
|
|
struct rde_nbr *nbr; |
873 |
|
|
struct lsa_tree *tree = &area->lsa_tree; |
874 |
|
|
struct vertex *v; |
875 |
|
|
|
876 |
|
|
bzero(&sumareactl, sizeof(struct ctl_sum_area)); |
877 |
|
|
|
878 |
|
|
sumareactl.area.s_addr = area->id.s_addr; |
879 |
|
|
sumareactl.num_spf_calc = area->num_spf_calc; |
880 |
|
|
|
881 |
|
|
LIST_FOREACH(iface, &area->iface_list, entry) |
882 |
|
|
sumareactl.num_iface++; |
883 |
|
|
|
884 |
|
|
LIST_FOREACH(nbr, &area->nbr_list, entry) |
885 |
|
|
if (nbr->state == NBR_STA_FULL && !nbr->self) |
886 |
|
|
sumareactl.num_adj_nbr++; |
887 |
|
|
|
888 |
|
|
RB_FOREACH(v, lsa_tree, tree) { |
889 |
|
|
sumareactl.num_lsa++; |
890 |
|
|
sumareactl.lsa_cksum += ntohs(v->lsa->hdr.ls_chksum); |
891 |
|
|
} |
892 |
|
|
|
893 |
|
|
rde_imsg_compose_ospfe(IMSG_CTL_SHOW_SUM_AREA, 0, pid, &sumareactl, |
894 |
|
|
sizeof(sumareactl)); |
895 |
|
|
} |
896 |
|
|
|
897 |
|
|
LIST_HEAD(rde_nbr_head, rde_nbr); |
898 |
|
|
|
899 |
|
|
struct nbr_table { |
900 |
|
|
struct rde_nbr_head *hashtbl; |
901 |
|
|
u_int32_t hashmask; |
902 |
|
|
} rdenbrtable; |
903 |
|
|
|
904 |
|
|
#define RDE_NBR_HASH(x) \ |
905 |
|
|
&rdenbrtable.hashtbl[(x) & rdenbrtable.hashmask] |
906 |
|
|
|
907 |
|
|
void |
908 |
|
|
rde_nbr_init(u_int32_t hashsize) |
909 |
|
|
{ |
910 |
|
|
struct rde_nbr_head *head; |
911 |
|
|
u_int32_t hs, i; |
912 |
|
|
|
913 |
|
|
for (hs = 1; hs < hashsize; hs <<= 1) |
914 |
|
|
; |
915 |
|
|
rdenbrtable.hashtbl = calloc(hs, sizeof(struct rde_nbr_head)); |
916 |
|
|
if (rdenbrtable.hashtbl == NULL) |
917 |
|
|
fatal("rde_nbr_init"); |
918 |
|
|
|
919 |
|
|
for (i = 0; i < hs; i++) |
920 |
|
|
LIST_INIT(&rdenbrtable.hashtbl[i]); |
921 |
|
|
|
922 |
|
|
rdenbrtable.hashmask = hs - 1; |
923 |
|
|
|
924 |
|
|
if ((nbrself = calloc(1, sizeof(*nbrself))) == NULL) |
925 |
|
|
fatal("rde_nbr_init"); |
926 |
|
|
|
927 |
|
|
nbrself->id.s_addr = rde_router_id(); |
928 |
|
|
nbrself->peerid = NBR_IDSELF; |
929 |
|
|
nbrself->state = NBR_STA_DOWN; |
930 |
|
|
nbrself->self = 1; |
931 |
|
|
head = RDE_NBR_HASH(NBR_IDSELF); |
932 |
|
|
LIST_INSERT_HEAD(head, nbrself, hash); |
933 |
|
|
} |
934 |
|
|
|
935 |
|
|
void |
936 |
|
|
rde_nbr_free(void) |
937 |
|
|
{ |
938 |
|
|
free(nbrself); |
939 |
|
|
free(rdenbrtable.hashtbl); |
940 |
|
|
} |
941 |
|
|
|
942 |
|
|
struct rde_nbr * |
943 |
|
|
rde_nbr_find(u_int32_t peerid) |
944 |
|
|
{ |
945 |
|
|
struct rde_nbr_head *head; |
946 |
|
|
struct rde_nbr *nbr; |
947 |
|
|
|
948 |
|
|
head = RDE_NBR_HASH(peerid); |
949 |
|
|
|
950 |
|
|
LIST_FOREACH(nbr, head, hash) { |
951 |
|
|
if (nbr->peerid == peerid) |
952 |
|
|
return (nbr); |
953 |
|
|
} |
954 |
|
|
|
955 |
|
|
return (NULL); |
956 |
|
|
} |
957 |
|
|
|
958 |
|
|
struct rde_nbr * |
959 |
|
|
rde_nbr_new(u_int32_t peerid, struct rde_nbr *new) |
960 |
|
|
{ |
961 |
|
|
struct rde_nbr_head *head; |
962 |
|
|
struct rde_nbr *nbr; |
963 |
|
|
struct area *area; |
964 |
|
|
struct iface *iface; |
965 |
|
|
|
966 |
|
|
if (rde_nbr_find(peerid)) |
967 |
|
|
return (NULL); |
968 |
|
|
if ((area = area_find(rdeconf, new->area_id)) == NULL) |
969 |
|
|
fatalx("rde_nbr_new: unknown area"); |
970 |
|
|
|
971 |
|
|
LIST_FOREACH(iface, &area->iface_list, entry) { |
972 |
|
|
if (iface->ifindex == new->ifindex) |
973 |
|
|
break; |
974 |
|
|
} |
975 |
|
|
if (iface == NULL) |
976 |
|
|
fatalx("rde_nbr_new: unknown interface"); |
977 |
|
|
|
978 |
|
|
if ((nbr = calloc(1, sizeof(*nbr))) == NULL) |
979 |
|
|
fatal("rde_nbr_new"); |
980 |
|
|
|
981 |
|
|
memcpy(nbr, new, sizeof(*nbr)); |
982 |
|
|
nbr->peerid = peerid; |
983 |
|
|
nbr->area = area; |
984 |
|
|
nbr->iface = iface; |
985 |
|
|
|
986 |
|
|
TAILQ_INIT(&nbr->req_list); |
987 |
|
|
|
988 |
|
|
head = RDE_NBR_HASH(peerid); |
989 |
|
|
LIST_INSERT_HEAD(head, nbr, hash); |
990 |
|
|
LIST_INSERT_HEAD(&area->nbr_list, nbr, entry); |
991 |
|
|
|
992 |
|
|
return (nbr); |
993 |
|
|
} |
994 |
|
|
|
995 |
|
|
void |
996 |
|
|
rde_nbr_iface_del(struct iface *iface) |
997 |
|
|
{ |
998 |
|
|
struct rde_nbr_head *head; |
999 |
|
|
struct rde_nbr *nbr, *xnbr; |
1000 |
|
|
u_int32_t i; |
1001 |
|
|
|
1002 |
|
|
for (i = 0; i <= rdenbrtable.hashmask; i++) { |
1003 |
|
|
head = &rdenbrtable.hashtbl[i]; |
1004 |
|
|
LIST_FOREACH_SAFE(nbr, head, hash, xnbr) { |
1005 |
|
|
if (nbr->iface == iface) |
1006 |
|
|
rde_nbr_del(nbr); |
1007 |
|
|
} |
1008 |
|
|
} |
1009 |
|
|
} |
1010 |
|
|
|
1011 |
|
|
void |
1012 |
|
|
rde_nbr_del(struct rde_nbr *nbr) |
1013 |
|
|
{ |
1014 |
|
|
if (nbr == NULL) |
1015 |
|
|
return; |
1016 |
|
|
|
1017 |
|
|
rde_req_list_free(nbr); |
1018 |
|
|
|
1019 |
|
|
LIST_REMOVE(nbr, entry); |
1020 |
|
|
LIST_REMOVE(nbr, hash); |
1021 |
|
|
|
1022 |
|
|
free(nbr); |
1023 |
|
|
} |
1024 |
|
|
|
1025 |
|
|
int |
1026 |
|
|
rde_nbr_loading(struct area *area) |
1027 |
|
|
{ |
1028 |
|
|
struct rde_nbr *nbr; |
1029 |
|
|
int checkall = 0; |
1030 |
|
|
|
1031 |
|
|
if (area == NULL) { |
1032 |
|
|
area = LIST_FIRST(&rdeconf->area_list); |
1033 |
|
|
checkall = 1; |
1034 |
|
|
} |
1035 |
|
|
|
1036 |
|
|
while (area != NULL) { |
1037 |
|
|
LIST_FOREACH(nbr, &area->nbr_list, entry) { |
1038 |
|
|
if (nbr->self) |
1039 |
|
|
continue; |
1040 |
|
|
if (nbr->state & NBR_STA_XCHNG || |
1041 |
|
|
nbr->state & NBR_STA_LOAD) |
1042 |
|
|
return (1); |
1043 |
|
|
} |
1044 |
|
|
if (!checkall) |
1045 |
|
|
break; |
1046 |
|
|
area = LIST_NEXT(area, entry); |
1047 |
|
|
} |
1048 |
|
|
|
1049 |
|
|
return (0); |
1050 |
|
|
} |
1051 |
|
|
|
1052 |
|
|
struct rde_nbr * |
1053 |
|
|
rde_nbr_self(struct area *area) |
1054 |
|
|
{ |
1055 |
|
|
struct rde_nbr *nbr; |
1056 |
|
|
|
1057 |
|
|
LIST_FOREACH(nbr, &area->nbr_list, entry) |
1058 |
|
|
if (nbr->self) |
1059 |
|
|
return (nbr); |
1060 |
|
|
|
1061 |
|
|
/* this may not happen */ |
1062 |
|
|
fatalx("rde_nbr_self: area without self"); |
1063 |
|
|
return (NULL); |
1064 |
|
|
} |
1065 |
|
|
|
1066 |
|
|
/* |
1067 |
|
|
* LSA req list |
1068 |
|
|
*/ |
1069 |
|
|
void |
1070 |
|
|
rde_req_list_add(struct rde_nbr *nbr, struct lsa_hdr *lsa) |
1071 |
|
|
{ |
1072 |
|
|
struct rde_req_entry *le; |
1073 |
|
|
|
1074 |
|
|
if ((le = calloc(1, sizeof(*le))) == NULL) |
1075 |
|
|
fatal("rde_req_list_add"); |
1076 |
|
|
|
1077 |
|
|
TAILQ_INSERT_TAIL(&nbr->req_list, le, entry); |
1078 |
|
|
le->type = lsa->type; |
1079 |
|
|
le->ls_id = lsa->ls_id; |
1080 |
|
|
le->adv_rtr = lsa->adv_rtr; |
1081 |
|
|
} |
1082 |
|
|
|
1083 |
|
|
int |
1084 |
|
|
rde_req_list_exists(struct rde_nbr *nbr, struct lsa_hdr *lsa_hdr) |
1085 |
|
|
{ |
1086 |
|
|
struct rde_req_entry *le; |
1087 |
|
|
|
1088 |
|
|
TAILQ_FOREACH(le, &nbr->req_list, entry) { |
1089 |
|
|
if ((lsa_hdr->type == le->type) && |
1090 |
|
|
(lsa_hdr->ls_id == le->ls_id) && |
1091 |
|
|
(lsa_hdr->adv_rtr == le->adv_rtr)) |
1092 |
|
|
return (1); |
1093 |
|
|
} |
1094 |
|
|
return (0); |
1095 |
|
|
} |
1096 |
|
|
|
1097 |
|
|
void |
1098 |
|
|
rde_req_list_del(struct rde_nbr *nbr, struct lsa_hdr *lsa_hdr) |
1099 |
|
|
{ |
1100 |
|
|
struct rde_req_entry *le; |
1101 |
|
|
|
1102 |
|
|
TAILQ_FOREACH(le, &nbr->req_list, entry) { |
1103 |
|
|
if ((lsa_hdr->type == le->type) && |
1104 |
|
|
(lsa_hdr->ls_id == le->ls_id) && |
1105 |
|
|
(lsa_hdr->adv_rtr == le->adv_rtr)) { |
1106 |
|
|
TAILQ_REMOVE(&nbr->req_list, le, entry); |
1107 |
|
|
free(le); |
1108 |
|
|
return; |
1109 |
|
|
} |
1110 |
|
|
} |
1111 |
|
|
} |
1112 |
|
|
|
1113 |
|
|
void |
1114 |
|
|
rde_req_list_free(struct rde_nbr *nbr) |
1115 |
|
|
{ |
1116 |
|
|
struct rde_req_entry *le; |
1117 |
|
|
|
1118 |
|
|
while ((le = TAILQ_FIRST(&nbr->req_list)) != NULL) { |
1119 |
|
|
TAILQ_REMOVE(&nbr->req_list, le, entry); |
1120 |
|
|
free(le); |
1121 |
|
|
} |
1122 |
|
|
} |
1123 |
|
|
|
1124 |
|
|
/* |
1125 |
|
|
* as-external LSA handling |
1126 |
|
|
*/ |
1127 |
|
|
struct asext_node { |
1128 |
|
|
RB_ENTRY(asext_node) entry; |
1129 |
|
|
struct kroute r; |
1130 |
|
|
u_int32_t ls_id; |
1131 |
|
|
}; |
1132 |
|
|
|
1133 |
|
|
static __inline int asext_compare(struct asext_node *, struct asext_node *); |
1134 |
|
|
struct asext_node *asext_find(u_int32_t, u_int8_t); |
1135 |
|
|
|
1136 |
|
|
RB_HEAD(asext_tree, asext_node) ast; |
1137 |
|
|
RB_PROTOTYPE(asext_tree, asext_node, entry, asext_compare) |
1138 |
|
|
RB_GENERATE(asext_tree, asext_node, entry, asext_compare) |
1139 |
|
|
|
1140 |
|
|
static __inline int |
1141 |
|
|
asext_compare(struct asext_node *a, struct asext_node *b) |
1142 |
|
|
{ |
1143 |
|
|
if (ntohl(a->r.prefix.s_addr) < ntohl(b->r.prefix.s_addr)) |
1144 |
|
|
return (-1); |
1145 |
|
|
if (ntohl(a->r.prefix.s_addr) > ntohl(b->r.prefix.s_addr)) |
1146 |
|
|
return (1); |
1147 |
|
|
if (a->r.prefixlen < b->r.prefixlen) |
1148 |
|
|
return (-1); |
1149 |
|
|
if (a->r.prefixlen > b->r.prefixlen) |
1150 |
|
|
return (1); |
1151 |
|
|
return (0); |
1152 |
|
|
} |
1153 |
|
|
|
1154 |
|
|
struct asext_node * |
1155 |
|
|
asext_find(u_int32_t addr, u_int8_t prefixlen) |
1156 |
|
|
{ |
1157 |
|
|
struct asext_node a; |
1158 |
|
|
|
1159 |
|
|
a.r.prefix.s_addr = addr; |
1160 |
|
|
a.r.prefixlen = prefixlen; |
1161 |
|
|
|
1162 |
|
|
return (RB_FIND(asext_tree, &ast, &a)); |
1163 |
|
|
} |
1164 |
|
|
|
1165 |
|
|
struct iface * |
1166 |
|
|
rde_asext_lookup(u_int32_t prefix, int plen) |
1167 |
|
|
{ |
1168 |
|
|
struct area *area; |
1169 |
|
|
struct iface *iface; |
1170 |
|
|
|
1171 |
|
|
LIST_FOREACH(area, &rdeconf->area_list, entry) { |
1172 |
|
|
LIST_FOREACH(iface, &area->iface_list, entry) { |
1173 |
|
|
if ((iface->addr.s_addr & iface->mask.s_addr) == |
1174 |
|
|
(prefix & iface->mask.s_addr) && (plen == -1 || |
1175 |
|
|
iface->mask.s_addr == prefixlen2mask(plen))) |
1176 |
|
|
return (iface); |
1177 |
|
|
} |
1178 |
|
|
} |
1179 |
|
|
return (NULL); |
1180 |
|
|
} |
1181 |
|
|
|
1182 |
|
|
void |
1183 |
|
|
rde_asext_get(struct kroute *kr) |
1184 |
|
|
{ |
1185 |
|
|
struct asext_node *an, *oan; |
1186 |
|
|
struct vertex *v; |
1187 |
|
|
struct lsa *lsa; |
1188 |
|
|
u_int32_t mask; |
1189 |
|
|
|
1190 |
|
|
if (rde_asext_lookup(kr->prefix.s_addr, kr->prefixlen)) { |
1191 |
|
|
/* already announced as (stub) net LSA */ |
1192 |
|
|
log_debug("rde_asext_get: %s/%d is net LSA", |
1193 |
|
|
inet_ntoa(kr->prefix), kr->prefixlen); |
1194 |
|
|
return; |
1195 |
|
|
} |
1196 |
|
|
|
1197 |
|
|
an = asext_find(kr->prefix.s_addr, kr->prefixlen); |
1198 |
|
|
if (an == NULL) { |
1199 |
|
|
if ((an = calloc(1, sizeof(*an))) == NULL) |
1200 |
|
|
fatal("rde_asext_get"); |
1201 |
|
|
bcopy(kr, &an->r, sizeof(*kr)); |
1202 |
|
|
an->ls_id = kr->prefix.s_addr; |
1203 |
|
|
RB_INSERT(asext_tree, &ast, an); |
1204 |
|
|
} else { |
1205 |
|
|
/* the bcopy does not change the lookup key so it is save */ |
1206 |
|
|
bcopy(kr, &an->r, sizeof(*kr)); |
1207 |
|
|
} |
1208 |
|
|
|
1209 |
|
|
/* |
1210 |
|
|
* ls_id must be unique, for overlapping routes this may |
1211 |
|
|
* not be true. In this case a unique ls_id needs to be found. |
1212 |
|
|
* The algorithm will change the ls_id of the less specific |
1213 |
|
|
* route. E.g. in the case of 10.0.0.0/16 and 10.0.0.0/24 |
1214 |
|
|
* 10.0.0.0/24 will get the 10.0.0.0 ls_id and 10.0.0.0/16 |
1215 |
|
|
* will change the ls_id to 10.0.255.255 and see if that is unique. |
1216 |
|
|
*/ |
1217 |
|
|
oan = an; |
1218 |
|
|
mask = prefixlen2mask(oan->r.prefixlen); |
1219 |
|
|
v = lsa_find(NULL, LSA_TYPE_EXTERNAL, oan->ls_id, |
1220 |
|
|
rdeconf->rtr_id.s_addr); |
1221 |
|
|
while (v && v->lsa->data.asext.mask != mask) { |
1222 |
|
|
/* conflict needs to be resolved. change less specific lsa */ |
1223 |
|
|
if (ntohl(v->lsa->data.asext.mask) < ntohl(mask)) { |
1224 |
|
|
/* lsa to insert is more specific, fix other lsa */ |
1225 |
|
|
mask = v->lsa->data.asext.mask; |
1226 |
|
|
oan = asext_find(v->lsa->hdr.ls_id & mask, |
1227 |
|
|
mask2prefixlen(mask)); |
1228 |
|
|
if (oan == NULL) |
1229 |
|
|
fatalx("as-ext LSA DB corrupted"); |
1230 |
|
|
} |
1231 |
|
|
/* oan is less specific and needs new ls_id */ |
1232 |
|
|
if (oan->ls_id == oan->r.prefix.s_addr) |
1233 |
|
|
oan->ls_id |= ~mask; |
1234 |
|
|
else { |
1235 |
|
|
u_int32_t tmp = ntohl(oan->ls_id); |
1236 |
|
|
oan->ls_id = htonl(tmp - 1); |
1237 |
|
|
if (oan->ls_id == oan->r.prefix.s_addr) { |
1238 |
|
|
log_warnx("prefix %s/%d can not be " |
1239 |
|
|
"redistributed, no unique ls_id found.", |
1240 |
|
|
inet_ntoa(kr->prefix), kr->prefixlen); |
1241 |
|
|
RB_REMOVE(asext_tree, &ast, an); |
1242 |
|
|
free(an); |
1243 |
|
|
return; |
1244 |
|
|
} |
1245 |
|
|
} |
1246 |
|
|
mask = prefixlen2mask(oan->r.prefixlen); |
1247 |
|
|
v = lsa_find(NULL, LSA_TYPE_EXTERNAL, oan->ls_id, |
1248 |
|
|
rdeconf->rtr_id.s_addr); |
1249 |
|
|
} |
1250 |
|
|
|
1251 |
|
|
v = lsa_find(NULL, LSA_TYPE_EXTERNAL, an->ls_id, |
1252 |
|
|
rdeconf->rtr_id.s_addr); |
1253 |
|
|
lsa = orig_asext_lsa(kr, an->ls_id, DEFAULT_AGE); |
1254 |
|
|
lsa_merge(nbrself, lsa, v); |
1255 |
|
|
|
1256 |
|
|
if (oan != an) { |
1257 |
|
|
v = lsa_find(NULL, LSA_TYPE_EXTERNAL, oan->ls_id, |
1258 |
|
|
rdeconf->rtr_id.s_addr); |
1259 |
|
|
lsa = orig_asext_lsa(&oan->r, oan->ls_id, DEFAULT_AGE); |
1260 |
|
|
lsa_merge(nbrself, lsa, v); |
1261 |
|
|
} |
1262 |
|
|
} |
1263 |
|
|
|
1264 |
|
|
void |
1265 |
|
|
rde_asext_put(struct kroute *kr) |
1266 |
|
|
{ |
1267 |
|
|
struct asext_node *an; |
1268 |
|
|
struct vertex *v; |
1269 |
|
|
struct lsa *lsa; |
1270 |
|
|
|
1271 |
|
|
/* |
1272 |
|
|
* just try to remove the LSA. If the prefix is announced as |
1273 |
|
|
* stub net LSA asext_find() will fail and nothing will happen. |
1274 |
|
|
*/ |
1275 |
|
|
an = asext_find(kr->prefix.s_addr, kr->prefixlen); |
1276 |
|
|
if (an == NULL) { |
1277 |
|
|
log_debug("rde_asext_put: NO SUCH LSA %s/%d", |
1278 |
|
|
inet_ntoa(kr->prefix), kr->prefixlen); |
1279 |
|
|
return; |
1280 |
|
|
} |
1281 |
|
|
|
1282 |
|
|
/* inherit metric and ext_tag from the current LSA, |
1283 |
|
|
* some routers don't like to get withdraws that are |
1284 |
|
|
* different from what they have in their table. |
1285 |
|
|
*/ |
1286 |
|
|
v = lsa_find(NULL, LSA_TYPE_EXTERNAL, an->ls_id, |
1287 |
|
|
rdeconf->rtr_id.s_addr); |
1288 |
|
|
if (v != NULL) { |
1289 |
|
|
kr->metric = ntohl(v->lsa->data.asext.metric); |
1290 |
|
|
kr->ext_tag = ntohl(v->lsa->data.asext.ext_tag); |
1291 |
|
|
} |
1292 |
|
|
|
1293 |
|
|
/* remove by reflooding with MAX_AGE */ |
1294 |
|
|
lsa = orig_asext_lsa(kr, an->ls_id, MAX_AGE); |
1295 |
|
|
lsa_merge(nbrself, lsa, v); |
1296 |
|
|
|
1297 |
|
|
RB_REMOVE(asext_tree, &ast, an); |
1298 |
|
|
free(an); |
1299 |
|
|
} |
1300 |
|
|
|
1301 |
|
|
void |
1302 |
|
|
rde_asext_free(void) |
1303 |
|
|
{ |
1304 |
|
|
struct asext_node *an, *nan; |
1305 |
|
|
|
1306 |
|
|
for (an = RB_MIN(asext_tree, &ast); an != NULL; an = nan) { |
1307 |
|
|
nan = RB_NEXT(asext_tree, &ast, an); |
1308 |
|
|
RB_REMOVE(asext_tree, &ast, an); |
1309 |
|
|
free(an); |
1310 |
|
|
} |
1311 |
|
|
} |
1312 |
|
|
|
1313 |
|
|
struct lsa * |
1314 |
|
|
orig_asext_lsa(struct kroute *kr, u_int32_t ls_id, u_int16_t age) |
1315 |
|
|
{ |
1316 |
|
|
struct lsa *lsa; |
1317 |
|
|
struct iface *iface; |
1318 |
|
|
u_int16_t len; |
1319 |
|
|
|
1320 |
|
|
len = sizeof(struct lsa_hdr) + sizeof(struct lsa_asext); |
1321 |
|
|
if ((lsa = calloc(1, len)) == NULL) |
1322 |
|
|
fatal("orig_asext_lsa"); |
1323 |
|
|
|
1324 |
|
|
log_debug("orig_asext_lsa: %s/%d age %d", |
1325 |
|
|
inet_ntoa(kr->prefix), kr->prefixlen, age); |
1326 |
|
|
|
1327 |
|
|
/* LSA header */ |
1328 |
|
|
lsa->hdr.age = htons(age); |
1329 |
|
|
lsa->hdr.opts = area_ospf_options(NULL); |
1330 |
|
|
lsa->hdr.type = LSA_TYPE_EXTERNAL; |
1331 |
|
|
lsa->hdr.adv_rtr = rdeconf->rtr_id.s_addr; |
1332 |
|
|
/* update of seqnum is done by lsa_merge */ |
1333 |
|
|
lsa->hdr.seq_num = htonl(INIT_SEQ_NUM); |
1334 |
|
|
lsa->hdr.len = htons(len); |
1335 |
|
|
|
1336 |
|
|
/* prefix and mask */ |
1337 |
|
|
lsa->hdr.ls_id = ls_id; |
1338 |
|
|
lsa->data.asext.mask = prefixlen2mask(kr->prefixlen); |
1339 |
|
|
|
1340 |
|
|
/* |
1341 |
|
|
* nexthop -- on connected routes we are the nexthop, |
1342 |
|
|
* in other cases we may announce the true nexthop if the |
1343 |
|
|
* nexthop is reachable via an OSPF enabled interface but only |
1344 |
|
|
* broadcast & NBMA interfaces are considered in that case. |
1345 |
|
|
* It does not make sense to announce the nexthop of a point-to-point |
1346 |
|
|
* link since the traffic has to go through this box anyway. |
1347 |
|
|
* Some implementations actually check that there are multiple |
1348 |
|
|
* neighbors on the particular segment, we skip that check. |
1349 |
|
|
*/ |
1350 |
|
|
iface = rde_asext_lookup(kr->nexthop.s_addr, -1); |
1351 |
|
|
if (kr->flags & F_CONNECTED) |
1352 |
|
|
lsa->data.asext.fw_addr = 0; |
1353 |
|
|
else if (iface && (iface->type == IF_TYPE_BROADCAST || |
1354 |
|
|
iface->type == IF_TYPE_NBMA)) |
1355 |
|
|
lsa->data.asext.fw_addr = kr->nexthop.s_addr; |
1356 |
|
|
else |
1357 |
|
|
lsa->data.asext.fw_addr = 0; |
1358 |
|
|
|
1359 |
|
|
lsa->data.asext.metric = htonl(kr->metric); |
1360 |
|
|
lsa->data.asext.ext_tag = htonl(kr->ext_tag); |
1361 |
|
|
|
1362 |
|
|
lsa->hdr.ls_chksum = 0; |
1363 |
|
|
lsa->hdr.ls_chksum = htons(iso_cksum(lsa, len, LS_CKSUM_OFFSET)); |
1364 |
|
|
|
1365 |
|
|
return (lsa); |
1366 |
|
|
} |
1367 |
|
|
|
1368 |
|
|
/* |
1369 |
|
|
* summary LSA stuff |
1370 |
|
|
*/ |
1371 |
|
|
void |
1372 |
|
|
rde_summary_update(struct rt_node *rte, struct area *area) |
1373 |
|
|
{ |
1374 |
|
|
struct rt_nexthop *rn; |
1375 |
|
|
struct rt_node *nr; |
1376 |
|
|
struct vertex *v = NULL; |
1377 |
|
|
struct lsa *lsa; |
1378 |
|
|
u_int8_t type = 0; |
1379 |
|
|
|
1380 |
|
|
/* first check if we actually need to announce this route */ |
1381 |
|
|
if (!(rte->d_type == DT_NET || rte->flags & OSPF_RTR_E)) |
1382 |
|
|
return; |
1383 |
|
|
/* route is invalid, lsa_remove_invalid_sums() will do the cleanup */ |
1384 |
|
|
if (rte->cost >= LS_INFINITY) |
1385 |
|
|
return; |
1386 |
|
|
/* never create summaries for as-ext LSA */ |
1387 |
|
|
if (rte->p_type == PT_TYPE1_EXT || rte->p_type == PT_TYPE2_EXT) |
1388 |
|
|
return; |
1389 |
|
|
/* no need for summary LSA in the originating area */ |
1390 |
|
|
if (rte->area.s_addr == area->id.s_addr) |
1391 |
|
|
return; |
1392 |
|
|
/* no need to originate inter-area routes to the backbone */ |
1393 |
|
|
if (rte->p_type == PT_INTER_AREA && area->id.s_addr == INADDR_ANY) |
1394 |
|
|
return; |
1395 |
|
|
/* nexthop check, nexthop part of area -> no summary */ |
1396 |
|
|
TAILQ_FOREACH(rn, &rte->nexthop, entry) { |
1397 |
|
|
if (rn->invalid) |
1398 |
|
|
continue; |
1399 |
|
|
nr = rt_lookup(DT_NET, rn->nexthop.s_addr); |
1400 |
|
|
if (nr && nr->area.s_addr == area->id.s_addr) |
1401 |
|
|
continue; |
1402 |
|
|
break; |
1403 |
|
|
} |
1404 |
|
|
if (rn == NULL) |
1405 |
|
|
/* all nexthops belong to this area or are invalid */ |
1406 |
|
|
return; |
1407 |
|
|
|
1408 |
|
|
/* TODO AS border router specific checks */ |
1409 |
|
|
/* TODO inter-area network route stuff */ |
1410 |
|
|
/* TODO intra-area stuff -- condense LSA ??? */ |
1411 |
|
|
|
1412 |
|
|
if (rte->d_type == DT_NET) { |
1413 |
|
|
type = LSA_TYPE_SUM_NETWORK; |
1414 |
|
|
} else if (rte->d_type == DT_RTR) { |
1415 |
|
|
if (area->stub) |
1416 |
|
|
/* do not redistribute type 4 LSA into stub areas */ |
1417 |
|
|
return; |
1418 |
|
|
type = LSA_TYPE_SUM_ROUTER; |
1419 |
|
|
} else |
1420 |
|
|
fatalx("rde_summary_update: unknown route type"); |
1421 |
|
|
|
1422 |
|
|
/* update lsa but only if it was changed */ |
1423 |
|
|
v = lsa_find_area(area, type, rte->prefix.s_addr, rde_router_id()); |
1424 |
|
|
lsa = orig_sum_lsa(rte, area, type, rte->invalid); |
1425 |
|
|
lsa_merge(rde_nbr_self(area), lsa, v); |
1426 |
|
|
|
1427 |
|
|
if (v == NULL) |
1428 |
|
|
v = lsa_find_area(area, type, rte->prefix.s_addr, |
1429 |
|
|
rde_router_id()); |
1430 |
|
|
|
1431 |
|
|
/* suppressed/deleted routes are not found in the second lsa_find */ |
1432 |
|
|
if (v) |
1433 |
|
|
v->cost = rte->cost; |
1434 |
|
|
} |
1435 |
|
|
|
1436 |
|
|
struct lsa * |
1437 |
|
|
orig_sum_lsa(struct rt_node *rte, struct area *area, u_int8_t type, int invalid) |
1438 |
|
|
{ |
1439 |
|
|
struct lsa *lsa; |
1440 |
|
|
u_int16_t len; |
1441 |
|
|
|
1442 |
|
|
len = sizeof(struct lsa_hdr) + sizeof(struct lsa_sum); |
1443 |
|
|
if ((lsa = calloc(1, len)) == NULL) |
1444 |
|
|
fatal("orig_sum_lsa"); |
1445 |
|
|
|
1446 |
|
|
/* LSA header */ |
1447 |
|
|
lsa->hdr.age = htons(invalid ? MAX_AGE : DEFAULT_AGE); |
1448 |
|
|
lsa->hdr.opts = area_ospf_options(area); |
1449 |
|
|
lsa->hdr.type = type; |
1450 |
|
|
lsa->hdr.adv_rtr = rdeconf->rtr_id.s_addr; |
1451 |
|
|
lsa->hdr.seq_num = htonl(INIT_SEQ_NUM); |
1452 |
|
|
lsa->hdr.len = htons(len); |
1453 |
|
|
|
1454 |
|
|
/* prefix and mask */ |
1455 |
|
|
/* |
1456 |
|
|
* TODO ls_id must be unique, for overlapping routes this may |
1457 |
|
|
* not be true. In this case a hack needs to be done to |
1458 |
|
|
* make the ls_id unique. |
1459 |
|
|
*/ |
1460 |
|
|
lsa->hdr.ls_id = rte->prefix.s_addr; |
1461 |
|
|
if (type == LSA_TYPE_SUM_NETWORK) |
1462 |
|
|
lsa->data.sum.mask = prefixlen2mask(rte->prefixlen); |
1463 |
|
|
else |
1464 |
|
|
lsa->data.sum.mask = 0; /* must be zero per RFC */ |
1465 |
|
|
|
1466 |
|
|
lsa->data.sum.metric = htonl(rte->cost & LSA_METRIC_MASK); |
1467 |
|
|
|
1468 |
|
|
lsa->hdr.ls_chksum = 0; |
1469 |
|
|
lsa->hdr.ls_chksum = |
1470 |
|
|
htons(iso_cksum(lsa, len, LS_CKSUM_OFFSET)); |
1471 |
|
|
|
1472 |
|
|
return (lsa); |
1473 |
|
|
} |