1 |
|
|
/* $OpenBSD: rde.c,v 1.72 2017/08/12 16:27:50 benno Exp $ */ |
2 |
|
|
|
3 |
|
|
/* |
4 |
|
|
* Copyright (c) 2004, 2005 Claudio Jeker <claudio@openbsd.org> |
5 |
|
|
* Copyright (c) 2004 Esben Norby <norby@openbsd.org> |
6 |
|
|
* Copyright (c) 2003, 2004 Henning Brauer <henning@openbsd.org> |
7 |
|
|
* |
8 |
|
|
* Permission to use, copy, modify, and distribute this software for any |
9 |
|
|
* purpose with or without fee is hereby granted, provided that the above |
10 |
|
|
* copyright notice and this permission notice appear in all copies. |
11 |
|
|
* |
12 |
|
|
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES |
13 |
|
|
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF |
14 |
|
|
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR |
15 |
|
|
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES |
16 |
|
|
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN |
17 |
|
|
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF |
18 |
|
|
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. |
19 |
|
|
*/ |
20 |
|
|
|
21 |
|
|
#include <sys/types.h> |
22 |
|
|
#include <sys/socket.h> |
23 |
|
|
#include <sys/queue.h> |
24 |
|
|
#include <net/if_types.h> |
25 |
|
|
#include <netinet/in.h> |
26 |
|
|
#include <arpa/inet.h> |
27 |
|
|
#include <err.h> |
28 |
|
|
#include <errno.h> |
29 |
|
|
#include <stdlib.h> |
30 |
|
|
#include <signal.h> |
31 |
|
|
#include <string.h> |
32 |
|
|
#include <pwd.h> |
33 |
|
|
#include <unistd.h> |
34 |
|
|
#include <event.h> |
35 |
|
|
|
36 |
|
|
#include "ospf6.h" |
37 |
|
|
#include "ospf6d.h" |
38 |
|
|
#include "ospfe.h" |
39 |
|
|
#include "log.h" |
40 |
|
|
#include "rde.h" |
41 |
|
|
|
42 |
|
|
#define MINIMUM(a, b) (((a) < (b)) ? (a) : (b)) |
43 |
|
|
|
44 |
|
|
void rde_sig_handler(int sig, short, void *); |
45 |
|
|
__dead void rde_shutdown(void); |
46 |
|
|
void rde_dispatch_imsg(int, short, void *); |
47 |
|
|
void rde_dispatch_parent(int, short, void *); |
48 |
|
|
void rde_dump_area(struct area *, int, pid_t); |
49 |
|
|
|
50 |
|
|
void rde_send_summary(pid_t); |
51 |
|
|
void rde_send_summary_area(struct area *, pid_t); |
52 |
|
|
void rde_nbr_init(u_int32_t); |
53 |
|
|
void rde_nbr_free(void); |
54 |
|
|
struct rde_nbr *rde_nbr_new(u_int32_t, struct rde_nbr *); |
55 |
|
|
void rde_nbr_del(struct rde_nbr *); |
56 |
|
|
|
57 |
|
|
void rde_req_list_add(struct rde_nbr *, struct lsa_hdr *); |
58 |
|
|
int rde_req_list_exists(struct rde_nbr *, struct lsa_hdr *); |
59 |
|
|
void rde_req_list_del(struct rde_nbr *, struct lsa_hdr *); |
60 |
|
|
void rde_req_list_free(struct rde_nbr *); |
61 |
|
|
|
62 |
|
|
struct lsa *rde_asext_get(struct kroute *); |
63 |
|
|
struct lsa *rde_asext_put(struct kroute *); |
64 |
|
|
|
65 |
|
|
int comp_asext(struct lsa *, struct lsa *); |
66 |
|
|
struct lsa *orig_asext_lsa(struct kroute *, u_int16_t); |
67 |
|
|
struct lsa *orig_sum_lsa(struct rt_node *, struct area *, u_int8_t, int); |
68 |
|
|
struct lsa *orig_intra_lsa_net(struct area *, struct iface *, |
69 |
|
|
struct vertex *); |
70 |
|
|
struct lsa *orig_intra_lsa_rtr(struct area *, struct vertex *); |
71 |
|
|
void append_prefix_lsa(struct lsa **, u_int16_t *, |
72 |
|
|
struct lsa_prefix *); |
73 |
|
|
|
74 |
|
|
/* A 32-bit value != any ifindex. |
75 |
|
|
* We assume ifindex is bound by [1, USHRT_MAX] inclusive. */ |
76 |
|
|
#define LS_ID_INTRA_RTR 0x01000000 |
77 |
|
|
|
78 |
|
|
/* Tree of prefixes with global scope on given a link, |
79 |
|
|
* see orig_intra_lsa_*() */ |
80 |
|
|
struct prefix_node { |
81 |
|
|
RB_ENTRY(prefix_node) entry; |
82 |
|
|
struct lsa_prefix *prefix; |
83 |
|
|
}; |
84 |
|
|
RB_HEAD(prefix_tree, prefix_node); |
85 |
|
|
RB_PROTOTYPE(prefix_tree, prefix_node, entry, prefix_compare); |
86 |
|
|
int prefix_compare(struct prefix_node *, struct prefix_node *); |
87 |
|
|
void prefix_tree_add(struct prefix_tree *, struct lsa_link *); |
88 |
|
|
|
89 |
|
|
struct ospfd_conf *rdeconf = NULL, *nconf = NULL; |
90 |
|
|
struct imsgev *iev_ospfe; |
91 |
|
|
struct imsgev *iev_main; |
92 |
|
|
struct rde_nbr *nbrself; |
93 |
|
|
struct lsa_tree asext_tree; |
94 |
|
|
|
95 |
|
|
/* ARGSUSED */ |
96 |
|
|
void |
97 |
|
|
rde_sig_handler(int sig, short event, void *arg) |
98 |
|
|
{ |
99 |
|
|
/* |
100 |
|
|
* signal handler rules don't apply, libevent decouples for us |
101 |
|
|
*/ |
102 |
|
|
|
103 |
|
|
switch (sig) { |
104 |
|
|
case SIGINT: |
105 |
|
|
case SIGTERM: |
106 |
|
|
rde_shutdown(); |
107 |
|
|
/* NOTREACHED */ |
108 |
|
|
default: |
109 |
|
|
fatalx("unexpected signal"); |
110 |
|
|
} |
111 |
|
|
} |
112 |
|
|
|
113 |
|
|
/* route decision engine */ |
114 |
|
|
pid_t |
115 |
|
|
rde(struct ospfd_conf *xconf, int pipe_parent2rde[2], int pipe_ospfe2rde[2], |
116 |
|
|
int pipe_parent2ospfe[2]) |
117 |
|
|
{ |
118 |
|
|
struct event ev_sigint, ev_sigterm; |
119 |
|
|
struct timeval now; |
120 |
|
|
struct passwd *pw; |
121 |
|
|
struct redistribute *r; |
122 |
|
|
pid_t pid; |
123 |
|
|
|
124 |
|
|
switch (pid = fork()) { |
125 |
|
|
case -1: |
126 |
|
|
fatal("cannot fork"); |
127 |
|
|
/* NOTREACHED */ |
128 |
|
|
case 0: |
129 |
|
|
break; |
130 |
|
|
default: |
131 |
|
|
return (pid); |
132 |
|
|
} |
133 |
|
|
|
134 |
|
|
rdeconf = xconf; |
135 |
|
|
|
136 |
|
|
if ((pw = getpwnam(OSPF6D_USER)) == NULL) |
137 |
|
|
fatal("getpwnam"); |
138 |
|
|
|
139 |
|
|
if (chroot(pw->pw_dir) == -1) |
140 |
|
|
fatal("chroot"); |
141 |
|
|
if (chdir("/") == -1) |
142 |
|
|
fatal("chdir(\"/\")"); |
143 |
|
|
|
144 |
|
|
setproctitle("route decision engine"); |
145 |
|
|
/* |
146 |
|
|
* XXX needed with fork+exec |
147 |
|
|
* log_init(debug, LOG_DAEMON); |
148 |
|
|
* log_setverbose(verbose); |
149 |
|
|
*/ |
150 |
|
|
|
151 |
|
|
ospfd_process = PROC_RDE_ENGINE; |
152 |
|
|
log_procinit(log_procnames[ospfd_process]); |
153 |
|
|
|
154 |
|
|
if (setgroups(1, &pw->pw_gid) || |
155 |
|
|
setresgid(pw->pw_gid, pw->pw_gid, pw->pw_gid) || |
156 |
|
|
setresuid(pw->pw_uid, pw->pw_uid, pw->pw_uid)) |
157 |
|
|
fatal("can't drop privileges"); |
158 |
|
|
|
159 |
|
|
event_init(); |
160 |
|
|
rde_nbr_init(NBR_HASHSIZE); |
161 |
|
|
lsa_init(&asext_tree); |
162 |
|
|
|
163 |
|
|
/* setup signal handler */ |
164 |
|
|
signal_set(&ev_sigint, SIGINT, rde_sig_handler, NULL); |
165 |
|
|
signal_set(&ev_sigterm, SIGTERM, rde_sig_handler, NULL); |
166 |
|
|
signal_add(&ev_sigint, NULL); |
167 |
|
|
signal_add(&ev_sigterm, NULL); |
168 |
|
|
signal(SIGPIPE, SIG_IGN); |
169 |
|
|
signal(SIGHUP, SIG_IGN); |
170 |
|
|
|
171 |
|
|
/* setup pipes */ |
172 |
|
|
close(pipe_ospfe2rde[0]); |
173 |
|
|
close(pipe_parent2rde[0]); |
174 |
|
|
close(pipe_parent2ospfe[0]); |
175 |
|
|
close(pipe_parent2ospfe[1]); |
176 |
|
|
|
177 |
|
|
if ((iev_ospfe = malloc(sizeof(struct imsgev))) == NULL || |
178 |
|
|
(iev_main = malloc(sizeof(struct imsgev))) == NULL) |
179 |
|
|
fatal(NULL); |
180 |
|
|
imsg_init(&iev_ospfe->ibuf, pipe_ospfe2rde[1]); |
181 |
|
|
iev_ospfe->handler = rde_dispatch_imsg; |
182 |
|
|
imsg_init(&iev_main->ibuf, pipe_parent2rde[1]); |
183 |
|
|
iev_main->handler = rde_dispatch_parent; |
184 |
|
|
|
185 |
|
|
/* setup event handler */ |
186 |
|
|
iev_ospfe->events = EV_READ; |
187 |
|
|
event_set(&iev_ospfe->ev, iev_ospfe->ibuf.fd, iev_ospfe->events, |
188 |
|
|
iev_ospfe->handler, iev_ospfe); |
189 |
|
|
event_add(&iev_ospfe->ev, NULL); |
190 |
|
|
|
191 |
|
|
iev_main->events = EV_READ; |
192 |
|
|
event_set(&iev_main->ev, iev_main->ibuf.fd, iev_main->events, |
193 |
|
|
iev_main->handler, iev_main); |
194 |
|
|
event_add(&iev_main->ev, NULL); |
195 |
|
|
|
196 |
|
|
evtimer_set(&rdeconf->ev, spf_timer, rdeconf); |
197 |
|
|
cand_list_init(); |
198 |
|
|
rt_init(); |
199 |
|
|
|
200 |
|
|
while ((r = SIMPLEQ_FIRST(&rdeconf->redist_list)) != NULL) { |
201 |
|
|
SIMPLEQ_REMOVE_HEAD(&rdeconf->redist_list, entry); |
202 |
|
|
free(r); |
203 |
|
|
} |
204 |
|
|
|
205 |
|
|
gettimeofday(&now, NULL); |
206 |
|
|
rdeconf->uptime = now.tv_sec; |
207 |
|
|
|
208 |
|
|
event_dispatch(); |
209 |
|
|
|
210 |
|
|
rde_shutdown(); |
211 |
|
|
/* NOTREACHED */ |
212 |
|
|
|
213 |
|
|
return (0); |
214 |
|
|
} |
215 |
|
|
|
216 |
|
|
__dead void |
217 |
|
|
rde_shutdown(void) |
218 |
|
|
{ |
219 |
|
|
struct area *a; |
220 |
|
|
|
221 |
|
|
/* close pipes */ |
222 |
|
|
msgbuf_clear(&iev_ospfe->ibuf.w); |
223 |
|
|
close(iev_ospfe->ibuf.fd); |
224 |
|
|
msgbuf_clear(&iev_main->ibuf.w); |
225 |
|
|
close(iev_main->ibuf.fd); |
226 |
|
|
|
227 |
|
|
stop_spf_timer(rdeconf); |
228 |
|
|
cand_list_clr(); |
229 |
|
|
rt_clear(); |
230 |
|
|
|
231 |
|
|
while ((a = LIST_FIRST(&rdeconf->area_list)) != NULL) { |
232 |
|
|
LIST_REMOVE(a, entry); |
233 |
|
|
area_del(a); |
234 |
|
|
} |
235 |
|
|
rde_nbr_free(); |
236 |
|
|
|
237 |
|
|
free(iev_ospfe); |
238 |
|
|
free(iev_main); |
239 |
|
|
free(rdeconf); |
240 |
|
|
|
241 |
|
|
log_info("route decision engine exiting"); |
242 |
|
|
_exit(0); |
243 |
|
|
} |
244 |
|
|
|
245 |
|
|
int |
246 |
|
|
rde_imsg_compose_ospfe(int type, u_int32_t peerid, pid_t pid, void *data, |
247 |
|
|
u_int16_t datalen) |
248 |
|
|
{ |
249 |
|
|
return (imsg_compose_event(iev_ospfe, type, peerid, pid, -1, |
250 |
|
|
data, datalen)); |
251 |
|
|
} |
252 |
|
|
|
253 |
|
|
/* ARGSUSED */ |
254 |
|
|
void |
255 |
|
|
rde_dispatch_imsg(int fd, short event, void *bula) |
256 |
|
|
{ |
257 |
|
|
struct imsgev *iev = bula; |
258 |
|
|
struct imsgbuf *ibuf = &iev->ibuf; |
259 |
|
|
struct imsg imsg; |
260 |
|
|
struct in_addr aid; |
261 |
|
|
struct ls_req_hdr req_hdr; |
262 |
|
|
struct lsa_hdr lsa_hdr, *db_hdr; |
263 |
|
|
struct rde_nbr rn, *nbr; |
264 |
|
|
struct timespec tp; |
265 |
|
|
struct lsa *lsa; |
266 |
|
|
struct area *area; |
267 |
|
|
struct vertex *v; |
268 |
|
|
char *buf; |
269 |
|
|
ssize_t n; |
270 |
|
|
time_t now; |
271 |
|
|
int r, state, self, shut = 0, verbose; |
272 |
|
|
u_int16_t l; |
273 |
|
|
|
274 |
|
|
if (event & EV_READ) { |
275 |
|
|
if ((n = imsg_read(ibuf)) == -1 && errno != EAGAIN) |
276 |
|
|
fatal("imsg_read error"); |
277 |
|
|
if (n == 0) /* connection closed */ |
278 |
|
|
shut = 1; |
279 |
|
|
} |
280 |
|
|
if (event & EV_WRITE) { |
281 |
|
|
if ((n = msgbuf_write(&ibuf->w)) == -1 && errno != EAGAIN) |
282 |
|
|
fatal("msgbuf_write"); |
283 |
|
|
if (n == 0) /* connection closed */ |
284 |
|
|
shut = 1; |
285 |
|
|
} |
286 |
|
|
|
287 |
|
|
clock_gettime(CLOCK_MONOTONIC, &tp); |
288 |
|
|
now = tp.tv_sec; |
289 |
|
|
|
290 |
|
|
for (;;) { |
291 |
|
|
if ((n = imsg_get(ibuf, &imsg)) == -1) |
292 |
|
|
fatal("rde_dispatch_imsg: imsg_get error"); |
293 |
|
|
if (n == 0) |
294 |
|
|
break; |
295 |
|
|
|
296 |
|
|
switch (imsg.hdr.type) { |
297 |
|
|
case IMSG_NEIGHBOR_UP: |
298 |
|
|
if (imsg.hdr.len - IMSG_HEADER_SIZE != sizeof(rn)) |
299 |
|
|
fatalx("invalid size of OE request"); |
300 |
|
|
memcpy(&rn, imsg.data, sizeof(rn)); |
301 |
|
|
|
302 |
|
|
if (rde_nbr_new(imsg.hdr.peerid, &rn) == NULL) |
303 |
|
|
fatalx("rde_dispatch_imsg: " |
304 |
|
|
"neighbor already exists"); |
305 |
|
|
break; |
306 |
|
|
case IMSG_NEIGHBOR_DOWN: |
307 |
|
|
rde_nbr_del(rde_nbr_find(imsg.hdr.peerid)); |
308 |
|
|
break; |
309 |
|
|
case IMSG_NEIGHBOR_CHANGE: |
310 |
|
|
if (imsg.hdr.len - IMSG_HEADER_SIZE != sizeof(state)) |
311 |
|
|
fatalx("invalid size of OE request"); |
312 |
|
|
memcpy(&state, imsg.data, sizeof(state)); |
313 |
|
|
|
314 |
|
|
nbr = rde_nbr_find(imsg.hdr.peerid); |
315 |
|
|
if (nbr == NULL) |
316 |
|
|
break; |
317 |
|
|
|
318 |
|
|
if (state != nbr->state && |
319 |
|
|
(nbr->state & NBR_STA_FULL || |
320 |
|
|
state & NBR_STA_FULL)) { |
321 |
|
|
nbr->state = state; |
322 |
|
|
area_track(nbr->area, state); |
323 |
|
|
orig_intra_area_prefix_lsas(nbr->area); |
324 |
|
|
} |
325 |
|
|
|
326 |
|
|
nbr->state = state; |
327 |
|
|
if (nbr->state & NBR_STA_FULL) |
328 |
|
|
rde_req_list_free(nbr); |
329 |
|
|
break; |
330 |
|
|
case IMSG_DB_SNAPSHOT: |
331 |
|
|
nbr = rde_nbr_find(imsg.hdr.peerid); |
332 |
|
|
if (nbr == NULL) |
333 |
|
|
break; |
334 |
|
|
|
335 |
|
|
lsa_snap(nbr, imsg.hdr.peerid); |
336 |
|
|
|
337 |
|
|
imsg_compose_event(iev_ospfe, IMSG_DB_END, imsg.hdr.peerid, |
338 |
|
|
0, -1, NULL, 0); |
339 |
|
|
break; |
340 |
|
|
case IMSG_DD: |
341 |
|
|
nbr = rde_nbr_find(imsg.hdr.peerid); |
342 |
|
|
if (nbr == NULL) |
343 |
|
|
break; |
344 |
|
|
|
345 |
|
|
buf = imsg.data; |
346 |
|
|
for (l = imsg.hdr.len - IMSG_HEADER_SIZE; |
347 |
|
|
l >= sizeof(lsa_hdr); l -= sizeof(lsa_hdr)) { |
348 |
|
|
memcpy(&lsa_hdr, buf, sizeof(lsa_hdr)); |
349 |
|
|
buf += sizeof(lsa_hdr); |
350 |
|
|
|
351 |
|
|
v = lsa_find(nbr->iface, lsa_hdr.type, |
352 |
|
|
lsa_hdr.ls_id, lsa_hdr.adv_rtr); |
353 |
|
|
if (v == NULL) |
354 |
|
|
db_hdr = NULL; |
355 |
|
|
else |
356 |
|
|
db_hdr = &v->lsa->hdr; |
357 |
|
|
|
358 |
|
|
if (lsa_newer(&lsa_hdr, db_hdr) > 0) { |
359 |
|
|
/* |
360 |
|
|
* only request LSAs that are |
361 |
|
|
* newer or missing |
362 |
|
|
*/ |
363 |
|
|
rde_req_list_add(nbr, &lsa_hdr); |
364 |
|
|
imsg_compose_event(iev_ospfe, IMSG_DD, |
365 |
|
|
imsg.hdr.peerid, 0, -1, &lsa_hdr, |
366 |
|
|
sizeof(lsa_hdr)); |
367 |
|
|
} |
368 |
|
|
} |
369 |
|
|
if (l != 0) |
370 |
|
|
log_warnx("rde_dispatch_imsg: peerid %u, " |
371 |
|
|
"trailing garbage in Database Description " |
372 |
|
|
"packet", imsg.hdr.peerid); |
373 |
|
|
|
374 |
|
|
imsg_compose_event(iev_ospfe, IMSG_DD_END, |
375 |
|
|
imsg.hdr.peerid, 0, -1, NULL, 0); |
376 |
|
|
break; |
377 |
|
|
case IMSG_LS_REQ: |
378 |
|
|
nbr = rde_nbr_find(imsg.hdr.peerid); |
379 |
|
|
if (nbr == NULL) |
380 |
|
|
break; |
381 |
|
|
|
382 |
|
|
buf = imsg.data; |
383 |
|
|
for (l = imsg.hdr.len - IMSG_HEADER_SIZE; |
384 |
|
|
l >= sizeof(req_hdr); l -= sizeof(req_hdr)) { |
385 |
|
|
memcpy(&req_hdr, buf, sizeof(req_hdr)); |
386 |
|
|
buf += sizeof(req_hdr); |
387 |
|
|
|
388 |
|
|
if ((v = lsa_find(nbr->iface, |
389 |
|
|
req_hdr.type, req_hdr.ls_id, |
390 |
|
|
req_hdr.adv_rtr)) == NULL) { |
391 |
|
|
imsg_compose_event(iev_ospfe, |
392 |
|
|
IMSG_LS_BADREQ, imsg.hdr.peerid, |
393 |
|
|
0, -1, NULL, 0); |
394 |
|
|
continue; |
395 |
|
|
} |
396 |
|
|
imsg_compose_event(iev_ospfe, IMSG_LS_UPD, |
397 |
|
|
imsg.hdr.peerid, 0, -1, v->lsa, |
398 |
|
|
ntohs(v->lsa->hdr.len)); |
399 |
|
|
} |
400 |
|
|
if (l != 0) |
401 |
|
|
log_warnx("rde_dispatch_imsg: peerid %u, " |
402 |
|
|
"trailing garbage in LS Request " |
403 |
|
|
"packet", imsg.hdr.peerid); |
404 |
|
|
break; |
405 |
|
|
case IMSG_LS_UPD: |
406 |
|
|
nbr = rde_nbr_find(imsg.hdr.peerid); |
407 |
|
|
if (nbr == NULL) |
408 |
|
|
break; |
409 |
|
|
|
410 |
|
|
lsa = malloc(imsg.hdr.len - IMSG_HEADER_SIZE); |
411 |
|
|
if (lsa == NULL) |
412 |
|
|
fatal(NULL); |
413 |
|
|
memcpy(lsa, imsg.data, imsg.hdr.len - IMSG_HEADER_SIZE); |
414 |
|
|
|
415 |
|
|
if (!lsa_check(nbr, lsa, |
416 |
|
|
imsg.hdr.len - IMSG_HEADER_SIZE)) { |
417 |
|
|
free(lsa); |
418 |
|
|
break; |
419 |
|
|
} |
420 |
|
|
|
421 |
|
|
v = lsa_find(nbr->iface, lsa->hdr.type, lsa->hdr.ls_id, |
422 |
|
|
lsa->hdr.adv_rtr); |
423 |
|
|
if (v == NULL) |
424 |
|
|
db_hdr = NULL; |
425 |
|
|
else |
426 |
|
|
db_hdr = &v->lsa->hdr; |
427 |
|
|
|
428 |
|
|
if (nbr->self) { |
429 |
|
|
lsa_merge(nbr, lsa, v); |
430 |
|
|
/* lsa_merge frees the right lsa */ |
431 |
|
|
break; |
432 |
|
|
} |
433 |
|
|
|
434 |
|
|
r = lsa_newer(&lsa->hdr, db_hdr); |
435 |
|
|
if (r > 0) { |
436 |
|
|
/* new LSA newer than DB */ |
437 |
|
|
if (v && v->flooded && |
438 |
|
|
v->changed + MIN_LS_ARRIVAL >= now) { |
439 |
|
|
free(lsa); |
440 |
|
|
break; |
441 |
|
|
} |
442 |
|
|
|
443 |
|
|
rde_req_list_del(nbr, &lsa->hdr); |
444 |
|
|
|
445 |
|
|
self = lsa_self(lsa); |
446 |
|
|
if (self) { |
447 |
|
|
if (v == NULL) |
448 |
|
|
/* LSA is no longer announced, |
449 |
|
|
* remove by premature aging. */ |
450 |
|
|
lsa_flush(nbr, lsa); |
451 |
|
|
else |
452 |
|
|
lsa_reflood(v, lsa); |
453 |
|
|
} else if (lsa_add(nbr, lsa)) |
454 |
|
|
/* delayed lsa, don't flood yet */ |
455 |
|
|
break; |
456 |
|
|
|
457 |
|
|
/* flood and perhaps ack LSA */ |
458 |
|
|
imsg_compose_event(iev_ospfe, IMSG_LS_FLOOD, |
459 |
|
|
imsg.hdr.peerid, 0, -1, lsa, |
460 |
|
|
ntohs(lsa->hdr.len)); |
461 |
|
|
|
462 |
|
|
/* reflood self originated LSA */ |
463 |
|
|
if (self && v) |
464 |
|
|
imsg_compose_event(iev_ospfe, |
465 |
|
|
IMSG_LS_FLOOD, v->peerid, 0, -1, |
466 |
|
|
v->lsa, ntohs(v->lsa->hdr.len)); |
467 |
|
|
/* new LSA was not added so free it */ |
468 |
|
|
if (self) |
469 |
|
|
free(lsa); |
470 |
|
|
} else if (r < 0) { |
471 |
|
|
/* |
472 |
|
|
* point 6 of "The Flooding Procedure" |
473 |
|
|
* We are violating the RFC here because |
474 |
|
|
* it does not make sense to reset a session |
475 |
|
|
* because an equal LSA is already in the table. |
476 |
|
|
* Only if the LSA sent is older than the one |
477 |
|
|
* in the table we should reset the session. |
478 |
|
|
*/ |
479 |
|
|
if (rde_req_list_exists(nbr, &lsa->hdr)) { |
480 |
|
|
imsg_compose_event(iev_ospfe, |
481 |
|
|
IMSG_LS_BADREQ, imsg.hdr.peerid, |
482 |
|
|
0, -1, NULL, 0); |
483 |
|
|
free(lsa); |
484 |
|
|
break; |
485 |
|
|
} |
486 |
|
|
|
487 |
|
|
/* lsa no longer needed */ |
488 |
|
|
free(lsa); |
489 |
|
|
|
490 |
|
|
/* new LSA older than DB */ |
491 |
|
|
if (ntohl(db_hdr->seq_num) == MAX_SEQ_NUM && |
492 |
|
|
ntohs(db_hdr->age) == MAX_AGE) |
493 |
|
|
/* seq-num wrap */ |
494 |
|
|
break; |
495 |
|
|
|
496 |
|
|
if (v->changed + MIN_LS_ARRIVAL >= now) |
497 |
|
|
break; |
498 |
|
|
|
499 |
|
|
/* directly send current LSA, no ack */ |
500 |
|
|
imsg_compose_event(iev_ospfe, IMSG_LS_UPD, |
501 |
|
|
imsg.hdr.peerid, 0, -1, v->lsa, |
502 |
|
|
ntohs(v->lsa->hdr.len)); |
503 |
|
|
} else { |
504 |
|
|
/* LSA equal send direct ack */ |
505 |
|
|
imsg_compose_event(iev_ospfe, IMSG_LS_ACK, |
506 |
|
|
imsg.hdr.peerid, 0, -1, &lsa->hdr, |
507 |
|
|
sizeof(lsa->hdr)); |
508 |
|
|
free(lsa); |
509 |
|
|
} |
510 |
|
|
break; |
511 |
|
|
case IMSG_LS_MAXAGE: |
512 |
|
|
nbr = rde_nbr_find(imsg.hdr.peerid); |
513 |
|
|
if (nbr == NULL) |
514 |
|
|
break; |
515 |
|
|
|
516 |
|
|
if (imsg.hdr.len != IMSG_HEADER_SIZE + |
517 |
|
|
sizeof(struct lsa_hdr)) |
518 |
|
|
fatalx("invalid size of OE request"); |
519 |
|
|
memcpy(&lsa_hdr, imsg.data, sizeof(lsa_hdr)); |
520 |
|
|
|
521 |
|
|
if (rde_nbr_loading(nbr->area)) |
522 |
|
|
break; |
523 |
|
|
|
524 |
|
|
v = lsa_find(nbr->iface, lsa_hdr.type, lsa_hdr.ls_id, |
525 |
|
|
lsa_hdr.adv_rtr); |
526 |
|
|
if (v == NULL) |
527 |
|
|
db_hdr = NULL; |
528 |
|
|
else |
529 |
|
|
db_hdr = &v->lsa->hdr; |
530 |
|
|
|
531 |
|
|
/* |
532 |
|
|
* only delete LSA if the one in the db is not newer |
533 |
|
|
*/ |
534 |
|
|
if (lsa_newer(db_hdr, &lsa_hdr) <= 0) |
535 |
|
|
lsa_del(nbr, &lsa_hdr); |
536 |
|
|
break; |
537 |
|
|
case IMSG_CTL_SHOW_DATABASE: |
538 |
|
|
case IMSG_CTL_SHOW_DB_EXT: |
539 |
|
|
case IMSG_CTL_SHOW_DB_LINK: |
540 |
|
|
case IMSG_CTL_SHOW_DB_NET: |
541 |
|
|
case IMSG_CTL_SHOW_DB_RTR: |
542 |
|
|
case IMSG_CTL_SHOW_DB_INTRA: |
543 |
|
|
case IMSG_CTL_SHOW_DB_SELF: |
544 |
|
|
case IMSG_CTL_SHOW_DB_SUM: |
545 |
|
|
case IMSG_CTL_SHOW_DB_ASBR: |
546 |
|
|
if (imsg.hdr.len != IMSG_HEADER_SIZE && |
547 |
|
|
imsg.hdr.len != IMSG_HEADER_SIZE + sizeof(aid)) { |
548 |
|
|
log_warnx("rde_dispatch_imsg: wrong imsg len"); |
549 |
|
|
break; |
550 |
|
|
} |
551 |
|
|
if (imsg.hdr.len == IMSG_HEADER_SIZE) { |
552 |
|
|
LIST_FOREACH(area, &rdeconf->area_list, entry) { |
553 |
|
|
rde_dump_area(area, imsg.hdr.type, |
554 |
|
|
imsg.hdr.pid); |
555 |
|
|
} |
556 |
|
|
lsa_dump(&asext_tree, imsg.hdr.type, |
557 |
|
|
imsg.hdr.pid); |
558 |
|
|
} else { |
559 |
|
|
memcpy(&aid, imsg.data, sizeof(aid)); |
560 |
|
|
if ((area = area_find(rdeconf, aid)) != NULL) { |
561 |
|
|
rde_dump_area(area, imsg.hdr.type, |
562 |
|
|
imsg.hdr.pid); |
563 |
|
|
if (!area->stub) |
564 |
|
|
lsa_dump(&asext_tree, |
565 |
|
|
imsg.hdr.type, |
566 |
|
|
imsg.hdr.pid); |
567 |
|
|
} |
568 |
|
|
} |
569 |
|
|
imsg_compose_event(iev_ospfe, IMSG_CTL_END, 0, |
570 |
|
|
imsg.hdr.pid, -1, NULL, 0); |
571 |
|
|
break; |
572 |
|
|
case IMSG_CTL_SHOW_RIB: |
573 |
|
|
LIST_FOREACH(area, &rdeconf->area_list, entry) { |
574 |
|
|
imsg_compose_event(iev_ospfe, IMSG_CTL_AREA, |
575 |
|
|
0, imsg.hdr.pid, -1, area, sizeof(*area)); |
576 |
|
|
|
577 |
|
|
rt_dump(area->id, imsg.hdr.pid, RIB_RTR); |
578 |
|
|
rt_dump(area->id, imsg.hdr.pid, RIB_NET); |
579 |
|
|
} |
580 |
|
|
aid.s_addr = 0; |
581 |
|
|
rt_dump(aid, imsg.hdr.pid, RIB_EXT); |
582 |
|
|
|
583 |
|
|
imsg_compose_event(iev_ospfe, IMSG_CTL_END, 0, |
584 |
|
|
imsg.hdr.pid, -1, NULL, 0); |
585 |
|
|
break; |
586 |
|
|
case IMSG_CTL_SHOW_SUM: |
587 |
|
|
rde_send_summary(imsg.hdr.pid); |
588 |
|
|
LIST_FOREACH(area, &rdeconf->area_list, entry) |
589 |
|
|
rde_send_summary_area(area, imsg.hdr.pid); |
590 |
|
|
imsg_compose_event(iev_ospfe, IMSG_CTL_END, 0, |
591 |
|
|
imsg.hdr.pid, -1, NULL, 0); |
592 |
|
|
break; |
593 |
|
|
case IMSG_IFINFO: |
594 |
|
|
if (imsg.hdr.len != IMSG_HEADER_SIZE + |
595 |
|
|
sizeof(int)) |
596 |
|
|
fatalx("IFINFO imsg with wrong len"); |
597 |
|
|
|
598 |
|
|
nbr = rde_nbr_find(imsg.hdr.peerid); |
599 |
|
|
if (nbr == NULL) |
600 |
|
|
fatalx("IFINFO imsg with bad peerid"); |
601 |
|
|
memcpy(&nbr->iface->state, imsg.data, sizeof(int)); |
602 |
|
|
|
603 |
|
|
/* Resend LSAs if interface state changes. */ |
604 |
|
|
orig_intra_area_prefix_lsas(nbr->area); |
605 |
|
|
break; |
606 |
|
|
case IMSG_CTL_LOG_VERBOSE: |
607 |
|
|
/* already checked by ospfe */ |
608 |
|
|
memcpy(&verbose, imsg.data, sizeof(verbose)); |
609 |
|
|
log_setverbose(verbose); |
610 |
|
|
break; |
611 |
|
|
default: |
612 |
|
|
log_debug("rde_dispatch_imsg: unexpected imsg %d", |
613 |
|
|
imsg.hdr.type); |
614 |
|
|
break; |
615 |
|
|
} |
616 |
|
|
imsg_free(&imsg); |
617 |
|
|
} |
618 |
|
|
if (!shut) |
619 |
|
|
imsg_event_add(iev); |
620 |
|
|
else { |
621 |
|
|
/* this pipe is dead, so remove the event handler */ |
622 |
|
|
event_del(&iev->ev); |
623 |
|
|
event_loopexit(NULL); |
624 |
|
|
} |
625 |
|
|
} |
626 |
|
|
|
627 |
|
|
/* ARGSUSED */ |
628 |
|
|
void |
629 |
|
|
rde_dispatch_parent(int fd, short event, void *bula) |
630 |
|
|
{ |
631 |
|
|
static struct area *narea; |
632 |
|
|
struct area *area; |
633 |
|
|
struct iface *iface, *ifp; |
634 |
|
|
struct ifaddrchange *ifc; |
635 |
|
|
struct iface_addr *ia, *nia; |
636 |
|
|
struct imsg imsg; |
637 |
|
|
struct kroute kr; |
638 |
|
|
struct imsgev *iev = bula; |
639 |
|
|
struct imsgbuf *ibuf = &iev->ibuf; |
640 |
|
|
struct lsa *lsa; |
641 |
|
|
struct vertex *v; |
642 |
|
|
ssize_t n; |
643 |
|
|
int shut = 0, wasvalid; |
644 |
|
|
unsigned int ifindex; |
645 |
|
|
|
646 |
|
|
if (event & EV_READ) { |
647 |
|
|
if ((n = imsg_read(ibuf)) == -1 && errno != EAGAIN) |
648 |
|
|
fatal("imsg_read error"); |
649 |
|
|
if (n == 0) /* connection closed */ |
650 |
|
|
shut = 1; |
651 |
|
|
} |
652 |
|
|
if (event & EV_WRITE) { |
653 |
|
|
if ((n = msgbuf_write(&ibuf->w)) == -1 && errno != EAGAIN) |
654 |
|
|
fatal("msgbuf_write"); |
655 |
|
|
if (n == 0) /* connection closed */ |
656 |
|
|
shut = 1; |
657 |
|
|
} |
658 |
|
|
|
659 |
|
|
for (;;) { |
660 |
|
|
if ((n = imsg_get(ibuf, &imsg)) == -1) |
661 |
|
|
fatal("rde_dispatch_parent: imsg_get error"); |
662 |
|
|
if (n == 0) |
663 |
|
|
break; |
664 |
|
|
|
665 |
|
|
switch (imsg.hdr.type) { |
666 |
|
|
case IMSG_NETWORK_ADD: |
667 |
|
|
if (imsg.hdr.len != IMSG_HEADER_SIZE + sizeof(kr)) { |
668 |
|
|
log_warnx("rde_dispatch_parent: " |
669 |
|
|
"wrong imsg len"); |
670 |
|
|
break; |
671 |
|
|
} |
672 |
|
|
memcpy(&kr, imsg.data, sizeof(kr)); |
673 |
|
|
|
674 |
|
|
if ((lsa = rde_asext_get(&kr)) != NULL) { |
675 |
|
|
v = lsa_find(NULL, lsa->hdr.type, |
676 |
|
|
lsa->hdr.ls_id, lsa->hdr.adv_rtr); |
677 |
|
|
|
678 |
|
|
lsa_merge(nbrself, lsa, v); |
679 |
|
|
} |
680 |
|
|
break; |
681 |
|
|
case IMSG_NETWORK_DEL: |
682 |
|
|
if (imsg.hdr.len != IMSG_HEADER_SIZE + sizeof(kr)) { |
683 |
|
|
log_warnx("rde_dispatch_parent: " |
684 |
|
|
"wrong imsg len"); |
685 |
|
|
break; |
686 |
|
|
} |
687 |
|
|
memcpy(&kr, imsg.data, sizeof(kr)); |
688 |
|
|
|
689 |
|
|
if ((lsa = rde_asext_put(&kr)) != NULL) { |
690 |
|
|
v = lsa_find(NULL, lsa->hdr.type, |
691 |
|
|
lsa->hdr.ls_id, lsa->hdr.adv_rtr); |
692 |
|
|
|
693 |
|
|
/* |
694 |
|
|
* if v == NULL no LSA is in the table and |
695 |
|
|
* nothing has to be done. |
696 |
|
|
*/ |
697 |
|
|
if (v) |
698 |
|
|
lsa_merge(nbrself, lsa, v); |
699 |
|
|
else |
700 |
|
|
free(lsa); |
701 |
|
|
} |
702 |
|
|
break; |
703 |
|
|
case IMSG_IFINFO: |
704 |
|
|
if (imsg.hdr.len != IMSG_HEADER_SIZE + |
705 |
|
|
sizeof(struct iface)) |
706 |
|
|
fatalx("IFINFO imsg with wrong len"); |
707 |
|
|
|
708 |
|
|
ifp = imsg.data; |
709 |
|
|
iface = if_find(ifp->ifindex); |
710 |
|
|
if (iface == NULL) |
711 |
|
|
fatalx("interface lost in rde"); |
712 |
|
|
|
713 |
|
|
wasvalid = (iface->flags & IFF_UP) && |
714 |
|
|
LINK_STATE_IS_UP(iface->linkstate); |
715 |
|
|
|
716 |
|
|
if_update(iface, ifp->mtu, ifp->flags, ifp->if_type, |
717 |
|
|
ifp->linkstate, ifp->baudrate); |
718 |
|
|
|
719 |
|
|
/* Resend LSAs if interface state changes. */ |
720 |
|
|
if (wasvalid != (iface->flags & IFF_UP) && |
721 |
|
|
LINK_STATE_IS_UP(iface->linkstate)) { |
722 |
|
|
area = area_find(rdeconf, iface->area_id); |
723 |
|
|
if (!area) |
724 |
|
|
fatalx("interface lost area"); |
725 |
|
|
orig_intra_area_prefix_lsas(area); |
726 |
|
|
} |
727 |
|
|
break; |
728 |
|
|
case IMSG_IFADD: |
729 |
|
|
if ((iface = malloc(sizeof(struct iface))) == NULL) |
730 |
|
|
fatal(NULL); |
731 |
|
|
memcpy(iface, imsg.data, sizeof(struct iface)); |
732 |
|
|
|
733 |
|
|
LIST_INIT(&iface->nbr_list); |
734 |
|
|
TAILQ_INIT(&iface->ls_ack_list); |
735 |
|
|
RB_INIT(&iface->lsa_tree); |
736 |
|
|
|
737 |
|
|
area = area_find(rdeconf, iface->area_id); |
738 |
|
|
LIST_INSERT_HEAD(&area->iface_list, iface, entry); |
739 |
|
|
break; |
740 |
|
|
case IMSG_IFDELETE: |
741 |
|
|
if (imsg.hdr.len != IMSG_HEADER_SIZE + |
742 |
|
|
sizeof(ifindex)) |
743 |
|
|
fatalx("IFDELETE imsg with wrong len"); |
744 |
|
|
|
745 |
|
|
memcpy(&ifindex, imsg.data, sizeof(ifindex)); |
746 |
|
|
iface = if_find(ifindex); |
747 |
|
|
if (iface == NULL) |
748 |
|
|
fatalx("interface lost in rde"); |
749 |
|
|
|
750 |
|
|
LIST_REMOVE(iface, entry); |
751 |
|
|
if_del(iface); |
752 |
|
|
break; |
753 |
|
|
case IMSG_IFADDRNEW: |
754 |
|
|
if (imsg.hdr.len != IMSG_HEADER_SIZE + |
755 |
|
|
sizeof(struct ifaddrchange)) |
756 |
|
|
fatalx("IFADDRNEW imsg with wrong len"); |
757 |
|
|
ifc = imsg.data; |
758 |
|
|
|
759 |
|
|
iface = if_find(ifc->ifindex); |
760 |
|
|
if (iface == NULL) |
761 |
|
|
fatalx("IFADDRNEW interface lost in rde"); |
762 |
|
|
|
763 |
|
|
if ((ia = calloc(1, sizeof(struct iface_addr))) == |
764 |
|
|
NULL) |
765 |
|
|
fatal("rde_dispatch_parent IFADDRNEW"); |
766 |
|
|
ia->addr = ifc->addr; |
767 |
|
|
ia->dstbrd = ifc->dstbrd; |
768 |
|
|
ia->prefixlen = ifc->prefixlen; |
769 |
|
|
|
770 |
|
|
TAILQ_INSERT_TAIL(&iface->ifa_list, ia, entry); |
771 |
|
|
area = area_find(rdeconf, iface->area_id); |
772 |
|
|
if (area) |
773 |
|
|
orig_intra_area_prefix_lsas(area); |
774 |
|
|
break; |
775 |
|
|
case IMSG_IFADDRDEL: |
776 |
|
|
if (imsg.hdr.len != IMSG_HEADER_SIZE + |
777 |
|
|
sizeof(struct ifaddrchange)) |
778 |
|
|
fatalx("IFADDRDEL imsg with wrong len"); |
779 |
|
|
ifc = imsg.data; |
780 |
|
|
|
781 |
|
|
iface = if_find(ifc->ifindex); |
782 |
|
|
if (iface == NULL) |
783 |
|
|
fatalx("IFADDRDEL interface lost in rde"); |
784 |
|
|
|
785 |
|
|
for (ia = TAILQ_FIRST(&iface->ifa_list); ia != NULL; |
786 |
|
|
ia = nia) { |
787 |
|
|
nia = TAILQ_NEXT(ia, entry); |
788 |
|
|
|
789 |
|
|
if (IN6_ARE_ADDR_EQUAL(&ia->addr, |
790 |
|
|
&ifc->addr)) { |
791 |
|
|
TAILQ_REMOVE(&iface->ifa_list, ia, |
792 |
|
|
entry); |
793 |
|
|
free(ia); |
794 |
|
|
break; |
795 |
|
|
} |
796 |
|
|
} |
797 |
|
|
area = area_find(rdeconf, iface->area_id); |
798 |
|
|
if (area) |
799 |
|
|
orig_intra_area_prefix_lsas(area); |
800 |
|
|
break; |
801 |
|
|
case IMSG_RECONF_CONF: |
802 |
|
|
if ((nconf = malloc(sizeof(struct ospfd_conf))) == |
803 |
|
|
NULL) |
804 |
|
|
fatal(NULL); |
805 |
|
|
memcpy(nconf, imsg.data, sizeof(struct ospfd_conf)); |
806 |
|
|
|
807 |
|
|
LIST_INIT(&nconf->area_list); |
808 |
|
|
LIST_INIT(&nconf->cand_list); |
809 |
|
|
break; |
810 |
|
|
case IMSG_RECONF_AREA: |
811 |
|
|
if ((narea = area_new()) == NULL) |
812 |
|
|
fatal(NULL); |
813 |
|
|
memcpy(narea, imsg.data, sizeof(struct area)); |
814 |
|
|
|
815 |
|
|
LIST_INIT(&narea->iface_list); |
816 |
|
|
LIST_INIT(&narea->nbr_list); |
817 |
|
|
RB_INIT(&narea->lsa_tree); |
818 |
|
|
|
819 |
|
|
LIST_INSERT_HEAD(&nconf->area_list, narea, entry); |
820 |
|
|
break; |
821 |
|
|
case IMSG_RECONF_END: |
822 |
|
|
merge_config(rdeconf, nconf); |
823 |
|
|
nconf = NULL; |
824 |
|
|
break; |
825 |
|
|
default: |
826 |
|
|
log_debug("rde_dispatch_parent: unexpected imsg %d", |
827 |
|
|
imsg.hdr.type); |
828 |
|
|
break; |
829 |
|
|
} |
830 |
|
|
imsg_free(&imsg); |
831 |
|
|
} |
832 |
|
|
if (!shut) |
833 |
|
|
imsg_event_add(iev); |
834 |
|
|
else { |
835 |
|
|
/* this pipe is dead, so remove the event handler */ |
836 |
|
|
event_del(&iev->ev); |
837 |
|
|
event_loopexit(NULL); |
838 |
|
|
} |
839 |
|
|
} |
840 |
|
|
|
841 |
|
|
void |
842 |
|
|
rde_dump_area(struct area *area, int imsg_type, pid_t pid) |
843 |
|
|
{ |
844 |
|
|
struct iface *iface; |
845 |
|
|
|
846 |
|
|
/* dump header */ |
847 |
|
|
imsg_compose_event(iev_ospfe, IMSG_CTL_AREA, 0, pid, -1, |
848 |
|
|
area, sizeof(*area)); |
849 |
|
|
|
850 |
|
|
/* dump link local lsa */ |
851 |
|
|
LIST_FOREACH(iface, &area->iface_list, entry) { |
852 |
|
|
imsg_compose_event(iev_ospfe, IMSG_CTL_IFACE, |
853 |
|
|
0, pid, -1, iface, sizeof(*iface)); |
854 |
|
|
lsa_dump(&iface->lsa_tree, imsg_type, pid); |
855 |
|
|
} |
856 |
|
|
|
857 |
|
|
/* dump area lsa */ |
858 |
|
|
lsa_dump(&area->lsa_tree, imsg_type, pid); |
859 |
|
|
} |
860 |
|
|
|
861 |
|
|
u_int32_t |
862 |
|
|
rde_router_id(void) |
863 |
|
|
{ |
864 |
|
|
return (rdeconf->rtr_id.s_addr); |
865 |
|
|
} |
866 |
|
|
|
867 |
|
|
void |
868 |
|
|
rde_send_change_kroute(struct rt_node *r) |
869 |
|
|
{ |
870 |
|
|
int krcount = 0; |
871 |
|
|
struct kroute kr; |
872 |
|
|
struct rt_nexthop *rn; |
873 |
|
|
struct ibuf *wbuf; |
874 |
|
|
|
875 |
|
|
if ((wbuf = imsg_create(&iev_main->ibuf, IMSG_KROUTE_CHANGE, 0, 0, |
876 |
|
|
sizeof(kr))) == NULL) { |
877 |
|
|
return; |
878 |
|
|
} |
879 |
|
|
|
880 |
|
|
TAILQ_FOREACH(rn, &r->nexthop, entry) { |
881 |
|
|
if (rn->invalid) |
882 |
|
|
continue; |
883 |
|
|
krcount++; |
884 |
|
|
|
885 |
|
|
bzero(&kr, sizeof(kr)); |
886 |
|
|
kr.prefix = r->prefix; |
887 |
|
|
kr.nexthop = rn->nexthop; |
888 |
|
|
if (IN6_IS_ADDR_LINKLOCAL(&rn->nexthop) || |
889 |
|
|
IN6_IS_ADDR_MC_LINKLOCAL(&rn->nexthop)) |
890 |
|
|
kr.scope = rn->ifindex; |
891 |
|
|
kr.ifindex = rn->ifindex; |
892 |
|
|
kr.prefixlen = r->prefixlen; |
893 |
|
|
kr.ext_tag = r->ext_tag; |
894 |
|
|
imsg_add(wbuf, &kr, sizeof(kr)); |
895 |
|
|
} |
896 |
|
|
if (krcount == 0) |
897 |
|
|
fatalx("rde_send_change_kroute: no valid nexthop found"); |
898 |
|
|
|
899 |
|
|
imsg_close(&iev_main->ibuf, wbuf); |
900 |
|
|
imsg_event_add(iev_main); |
901 |
|
|
} |
902 |
|
|
|
903 |
|
|
void |
904 |
|
|
rde_send_delete_kroute(struct rt_node *r) |
905 |
|
|
{ |
906 |
|
|
struct kroute kr; |
907 |
|
|
|
908 |
|
|
bzero(&kr, sizeof(kr)); |
909 |
|
|
kr.prefix = r->prefix; |
910 |
|
|
kr.prefixlen = r->prefixlen; |
911 |
|
|
|
912 |
|
|
imsg_compose_event(iev_main, IMSG_KROUTE_DELETE, 0, 0, -1, |
913 |
|
|
&kr, sizeof(kr)); |
914 |
|
|
} |
915 |
|
|
|
916 |
|
|
void |
917 |
|
|
rde_send_summary(pid_t pid) |
918 |
|
|
{ |
919 |
|
|
static struct ctl_sum sumctl; |
920 |
|
|
struct timeval now; |
921 |
|
|
struct area *area; |
922 |
|
|
struct vertex *v; |
923 |
|
|
|
924 |
|
|
bzero(&sumctl, sizeof(struct ctl_sum)); |
925 |
|
|
|
926 |
|
|
sumctl.rtr_id.s_addr = rde_router_id(); |
927 |
|
|
sumctl.spf_delay = rdeconf->spf_delay; |
928 |
|
|
sumctl.spf_hold_time = rdeconf->spf_hold_time; |
929 |
|
|
|
930 |
|
|
LIST_FOREACH(area, &rdeconf->area_list, entry) |
931 |
|
|
sumctl.num_area++; |
932 |
|
|
|
933 |
|
|
RB_FOREACH(v, lsa_tree, &asext_tree) |
934 |
|
|
sumctl.num_ext_lsa++; |
935 |
|
|
|
936 |
|
|
gettimeofday(&now, NULL); |
937 |
|
|
if (rdeconf->uptime < now.tv_sec) |
938 |
|
|
sumctl.uptime = now.tv_sec - rdeconf->uptime; |
939 |
|
|
else |
940 |
|
|
sumctl.uptime = 0; |
941 |
|
|
|
942 |
|
|
rde_imsg_compose_ospfe(IMSG_CTL_SHOW_SUM, 0, pid, &sumctl, |
943 |
|
|
sizeof(sumctl)); |
944 |
|
|
} |
945 |
|
|
|
946 |
|
|
void |
947 |
|
|
rde_send_summary_area(struct area *area, pid_t pid) |
948 |
|
|
{ |
949 |
|
|
static struct ctl_sum_area sumareactl; |
950 |
|
|
struct iface *iface; |
951 |
|
|
struct rde_nbr *nbr; |
952 |
|
|
struct lsa_tree *tree = &area->lsa_tree; |
953 |
|
|
struct vertex *v; |
954 |
|
|
|
955 |
|
|
bzero(&sumareactl, sizeof(struct ctl_sum_area)); |
956 |
|
|
|
957 |
|
|
sumareactl.area.s_addr = area->id.s_addr; |
958 |
|
|
sumareactl.num_spf_calc = area->num_spf_calc; |
959 |
|
|
|
960 |
|
|
LIST_FOREACH(iface, &area->iface_list, entry) |
961 |
|
|
sumareactl.num_iface++; |
962 |
|
|
|
963 |
|
|
LIST_FOREACH(nbr, &area->nbr_list, entry) |
964 |
|
|
if (nbr->state == NBR_STA_FULL && !nbr->self) |
965 |
|
|
sumareactl.num_adj_nbr++; |
966 |
|
|
|
967 |
|
|
RB_FOREACH(v, lsa_tree, tree) |
968 |
|
|
sumareactl.num_lsa++; |
969 |
|
|
|
970 |
|
|
rde_imsg_compose_ospfe(IMSG_CTL_SHOW_SUM_AREA, 0, pid, &sumareactl, |
971 |
|
|
sizeof(sumareactl)); |
972 |
|
|
} |
973 |
|
|
|
974 |
|
|
LIST_HEAD(rde_nbr_head, rde_nbr); |
975 |
|
|
|
976 |
|
|
struct nbr_table { |
977 |
|
|
struct rde_nbr_head *hashtbl; |
978 |
|
|
u_int32_t hashmask; |
979 |
|
|
} rdenbrtable; |
980 |
|
|
|
981 |
|
|
#define RDE_NBR_HASH(x) \ |
982 |
|
|
&rdenbrtable.hashtbl[(x) & rdenbrtable.hashmask] |
983 |
|
|
|
984 |
|
|
void |
985 |
|
|
rde_nbr_init(u_int32_t hashsize) |
986 |
|
|
{ |
987 |
|
|
struct rde_nbr_head *head; |
988 |
|
|
u_int32_t hs, i; |
989 |
|
|
|
990 |
|
|
for (hs = 1; hs < hashsize; hs <<= 1) |
991 |
|
|
; |
992 |
|
|
rdenbrtable.hashtbl = calloc(hs, sizeof(struct rde_nbr_head)); |
993 |
|
|
if (rdenbrtable.hashtbl == NULL) |
994 |
|
|
fatal("rde_nbr_init"); |
995 |
|
|
|
996 |
|
|
for (i = 0; i < hs; i++) |
997 |
|
|
LIST_INIT(&rdenbrtable.hashtbl[i]); |
998 |
|
|
|
999 |
|
|
rdenbrtable.hashmask = hs - 1; |
1000 |
|
|
|
1001 |
|
|
if ((nbrself = calloc(1, sizeof(*nbrself))) == NULL) |
1002 |
|
|
fatal("rde_nbr_init"); |
1003 |
|
|
|
1004 |
|
|
nbrself->id.s_addr = rde_router_id(); |
1005 |
|
|
nbrself->peerid = NBR_IDSELF; |
1006 |
|
|
nbrself->state = NBR_STA_DOWN; |
1007 |
|
|
nbrself->self = 1; |
1008 |
|
|
head = RDE_NBR_HASH(NBR_IDSELF); |
1009 |
|
|
LIST_INSERT_HEAD(head, nbrself, hash); |
1010 |
|
|
} |
1011 |
|
|
|
1012 |
|
|
void |
1013 |
|
|
rde_nbr_free(void) |
1014 |
|
|
{ |
1015 |
|
|
free(nbrself); |
1016 |
|
|
free(rdenbrtable.hashtbl); |
1017 |
|
|
} |
1018 |
|
|
|
1019 |
|
|
struct rde_nbr * |
1020 |
|
|
rde_nbr_find(u_int32_t peerid) |
1021 |
|
|
{ |
1022 |
|
|
struct rde_nbr_head *head; |
1023 |
|
|
struct rde_nbr *nbr; |
1024 |
|
|
|
1025 |
|
|
head = RDE_NBR_HASH(peerid); |
1026 |
|
|
|
1027 |
|
|
LIST_FOREACH(nbr, head, hash) { |
1028 |
|
|
if (nbr->peerid == peerid) |
1029 |
|
|
return (nbr); |
1030 |
|
|
} |
1031 |
|
|
|
1032 |
|
|
return (NULL); |
1033 |
|
|
} |
1034 |
|
|
|
1035 |
|
|
struct rde_nbr * |
1036 |
|
|
rde_nbr_new(u_int32_t peerid, struct rde_nbr *new) |
1037 |
|
|
{ |
1038 |
|
|
struct rde_nbr_head *head; |
1039 |
|
|
struct rde_nbr *nbr; |
1040 |
|
|
struct area *area; |
1041 |
|
|
struct iface *iface; |
1042 |
|
|
|
1043 |
|
|
if (rde_nbr_find(peerid)) |
1044 |
|
|
return (NULL); |
1045 |
|
|
if ((area = area_find(rdeconf, new->area_id)) == NULL) |
1046 |
|
|
fatalx("rde_nbr_new: unknown area"); |
1047 |
|
|
|
1048 |
|
|
if ((iface = if_find(new->ifindex)) == NULL) |
1049 |
|
|
fatalx("rde_nbr_new: unknown interface"); |
1050 |
|
|
|
1051 |
|
|
if ((nbr = calloc(1, sizeof(*nbr))) == NULL) |
1052 |
|
|
fatal("rde_nbr_new"); |
1053 |
|
|
|
1054 |
|
|
memcpy(nbr, new, sizeof(*nbr)); |
1055 |
|
|
nbr->peerid = peerid; |
1056 |
|
|
nbr->area = area; |
1057 |
|
|
nbr->iface = iface; |
1058 |
|
|
|
1059 |
|
|
TAILQ_INIT(&nbr->req_list); |
1060 |
|
|
|
1061 |
|
|
head = RDE_NBR_HASH(peerid); |
1062 |
|
|
LIST_INSERT_HEAD(head, nbr, hash); |
1063 |
|
|
LIST_INSERT_HEAD(&area->nbr_list, nbr, entry); |
1064 |
|
|
|
1065 |
|
|
return (nbr); |
1066 |
|
|
} |
1067 |
|
|
|
1068 |
|
|
void |
1069 |
|
|
rde_nbr_del(struct rde_nbr *nbr) |
1070 |
|
|
{ |
1071 |
|
|
if (nbr == NULL) |
1072 |
|
|
return; |
1073 |
|
|
|
1074 |
|
|
rde_req_list_free(nbr); |
1075 |
|
|
|
1076 |
|
|
LIST_REMOVE(nbr, entry); |
1077 |
|
|
LIST_REMOVE(nbr, hash); |
1078 |
|
|
|
1079 |
|
|
free(nbr); |
1080 |
|
|
} |
1081 |
|
|
|
1082 |
|
|
int |
1083 |
|
|
rde_nbr_loading(struct area *area) |
1084 |
|
|
{ |
1085 |
|
|
struct rde_nbr *nbr; |
1086 |
|
|
int checkall = 0; |
1087 |
|
|
|
1088 |
|
|
if (area == NULL) { |
1089 |
|
|
area = LIST_FIRST(&rdeconf->area_list); |
1090 |
|
|
checkall = 1; |
1091 |
|
|
} |
1092 |
|
|
|
1093 |
|
|
while (area != NULL) { |
1094 |
|
|
LIST_FOREACH(nbr, &area->nbr_list, entry) { |
1095 |
|
|
if (nbr->self) |
1096 |
|
|
continue; |
1097 |
|
|
if (nbr->state & NBR_STA_XCHNG || |
1098 |
|
|
nbr->state & NBR_STA_LOAD) |
1099 |
|
|
return (1); |
1100 |
|
|
} |
1101 |
|
|
if (!checkall) |
1102 |
|
|
break; |
1103 |
|
|
area = LIST_NEXT(area, entry); |
1104 |
|
|
} |
1105 |
|
|
|
1106 |
|
|
return (0); |
1107 |
|
|
} |
1108 |
|
|
|
1109 |
|
|
struct rde_nbr * |
1110 |
|
|
rde_nbr_self(struct area *area) |
1111 |
|
|
{ |
1112 |
|
|
struct rde_nbr *nbr; |
1113 |
|
|
|
1114 |
|
|
LIST_FOREACH(nbr, &area->nbr_list, entry) |
1115 |
|
|
if (nbr->self) |
1116 |
|
|
return (nbr); |
1117 |
|
|
|
1118 |
|
|
/* this may not happen */ |
1119 |
|
|
fatalx("rde_nbr_self: area without self"); |
1120 |
|
|
return (NULL); |
1121 |
|
|
} |
1122 |
|
|
|
1123 |
|
|
/* |
1124 |
|
|
* LSA req list |
1125 |
|
|
*/ |
1126 |
|
|
void |
1127 |
|
|
rde_req_list_add(struct rde_nbr *nbr, struct lsa_hdr *lsa) |
1128 |
|
|
{ |
1129 |
|
|
struct rde_req_entry *le; |
1130 |
|
|
|
1131 |
|
|
if ((le = calloc(1, sizeof(*le))) == NULL) |
1132 |
|
|
fatal("rde_req_list_add"); |
1133 |
|
|
|
1134 |
|
|
TAILQ_INSERT_TAIL(&nbr->req_list, le, entry); |
1135 |
|
|
le->type = lsa->type; |
1136 |
|
|
le->ls_id = lsa->ls_id; |
1137 |
|
|
le->adv_rtr = lsa->adv_rtr; |
1138 |
|
|
} |
1139 |
|
|
|
1140 |
|
|
int |
1141 |
|
|
rde_req_list_exists(struct rde_nbr *nbr, struct lsa_hdr *lsa_hdr) |
1142 |
|
|
{ |
1143 |
|
|
struct rde_req_entry *le; |
1144 |
|
|
|
1145 |
|
|
TAILQ_FOREACH(le, &nbr->req_list, entry) { |
1146 |
|
|
if ((lsa_hdr->type == le->type) && |
1147 |
|
|
(lsa_hdr->ls_id == le->ls_id) && |
1148 |
|
|
(lsa_hdr->adv_rtr == le->adv_rtr)) |
1149 |
|
|
return (1); |
1150 |
|
|
} |
1151 |
|
|
return (0); |
1152 |
|
|
} |
1153 |
|
|
|
1154 |
|
|
void |
1155 |
|
|
rde_req_list_del(struct rde_nbr *nbr, struct lsa_hdr *lsa_hdr) |
1156 |
|
|
{ |
1157 |
|
|
struct rde_req_entry *le; |
1158 |
|
|
|
1159 |
|
|
TAILQ_FOREACH(le, &nbr->req_list, entry) { |
1160 |
|
|
if ((lsa_hdr->type == le->type) && |
1161 |
|
|
(lsa_hdr->ls_id == le->ls_id) && |
1162 |
|
|
(lsa_hdr->adv_rtr == le->adv_rtr)) { |
1163 |
|
|
TAILQ_REMOVE(&nbr->req_list, le, entry); |
1164 |
|
|
free(le); |
1165 |
|
|
return; |
1166 |
|
|
} |
1167 |
|
|
} |
1168 |
|
|
} |
1169 |
|
|
|
1170 |
|
|
void |
1171 |
|
|
rde_req_list_free(struct rde_nbr *nbr) |
1172 |
|
|
{ |
1173 |
|
|
struct rde_req_entry *le; |
1174 |
|
|
|
1175 |
|
|
while ((le = TAILQ_FIRST(&nbr->req_list)) != NULL) { |
1176 |
|
|
TAILQ_REMOVE(&nbr->req_list, le, entry); |
1177 |
|
|
free(le); |
1178 |
|
|
} |
1179 |
|
|
} |
1180 |
|
|
|
1181 |
|
|
/* |
1182 |
|
|
* as-external LSA handling |
1183 |
|
|
*/ |
1184 |
|
|
struct lsa * |
1185 |
|
|
rde_asext_get(struct kroute *kr) |
1186 |
|
|
{ |
1187 |
|
|
struct area *area; |
1188 |
|
|
struct iface *iface; |
1189 |
|
|
struct iface_addr *ia; |
1190 |
|
|
struct in6_addr addr; |
1191 |
|
|
|
1192 |
|
|
LIST_FOREACH(area, &rdeconf->area_list, entry) |
1193 |
|
|
LIST_FOREACH(iface, &area->iface_list, entry) |
1194 |
|
|
TAILQ_FOREACH(ia, &iface->ifa_list, entry) { |
1195 |
|
|
if (IN6_IS_ADDR_LINKLOCAL(&ia->addr)) |
1196 |
|
|
continue; |
1197 |
|
|
|
1198 |
|
|
inet6applymask(&addr, &ia->addr, |
1199 |
|
|
kr->prefixlen); |
1200 |
|
|
if (!memcmp(&addr, &kr->prefix, |
1201 |
|
|
sizeof(addr)) && kr->prefixlen == |
1202 |
|
|
ia->prefixlen) { |
1203 |
|
|
/* already announced as Prefix LSA */ |
1204 |
|
|
log_debug("rde_asext_get: %s/%d is " |
1205 |
|
|
"part of prefix LSA", |
1206 |
|
|
log_in6addr(&kr->prefix), |
1207 |
|
|
kr->prefixlen); |
1208 |
|
|
return (NULL); |
1209 |
|
|
} |
1210 |
|
|
} |
1211 |
|
|
|
1212 |
|
|
/* update of seqnum is done by lsa_merge */ |
1213 |
|
|
return (orig_asext_lsa(kr, DEFAULT_AGE)); |
1214 |
|
|
} |
1215 |
|
|
|
1216 |
|
|
struct lsa * |
1217 |
|
|
rde_asext_put(struct kroute *kr) |
1218 |
|
|
{ |
1219 |
|
|
/* |
1220 |
|
|
* just try to remove the LSA. If the prefix is announced as |
1221 |
|
|
* stub net LSA lsa_find() will fail later and nothing will happen. |
1222 |
|
|
*/ |
1223 |
|
|
|
1224 |
|
|
/* remove by reflooding with MAX_AGE */ |
1225 |
|
|
return (orig_asext_lsa(kr, MAX_AGE)); |
1226 |
|
|
} |
1227 |
|
|
|
1228 |
|
|
/* |
1229 |
|
|
* summary LSA stuff |
1230 |
|
|
*/ |
1231 |
|
|
void |
1232 |
|
|
rde_summary_update(struct rt_node *rte, struct area *area) |
1233 |
|
|
{ |
1234 |
|
|
struct vertex *v = NULL; |
1235 |
|
|
//XXX struct lsa *lsa; |
1236 |
|
|
u_int16_t type = 0; |
1237 |
|
|
|
1238 |
|
|
/* first check if we actually need to announce this route */ |
1239 |
|
|
if (!(rte->d_type == DT_NET || rte->flags & OSPF_RTR_E)) |
1240 |
|
|
return; |
1241 |
|
|
/* never create summaries for as-ext LSA */ |
1242 |
|
|
if (rte->p_type == PT_TYPE1_EXT || rte->p_type == PT_TYPE2_EXT) |
1243 |
|
|
return; |
1244 |
|
|
/* no need for summary LSA in the originating area */ |
1245 |
|
|
if (rte->area.s_addr == area->id.s_addr) |
1246 |
|
|
return; |
1247 |
|
|
/* no need to originate inter-area routes to the backbone */ |
1248 |
|
|
if (rte->p_type == PT_INTER_AREA && area->id.s_addr == INADDR_ANY) |
1249 |
|
|
return; |
1250 |
|
|
/* TODO nexthop check, nexthop part of area -> no summary */ |
1251 |
|
|
if (rte->cost >= LS_INFINITY) |
1252 |
|
|
return; |
1253 |
|
|
/* TODO AS border router specific checks */ |
1254 |
|
|
/* TODO inter-area network route stuff */ |
1255 |
|
|
/* TODO intra-area stuff -- condense LSA ??? */ |
1256 |
|
|
|
1257 |
|
|
if (rte->d_type == DT_NET) { |
1258 |
|
|
type = LSA_TYPE_INTER_A_PREFIX; |
1259 |
|
|
} else if (rte->d_type == DT_RTR) { |
1260 |
|
|
type = LSA_TYPE_INTER_A_ROUTER; |
1261 |
|
|
} else |
1262 |
|
|
|
1263 |
|
|
#if 0 /* XXX a lot todo */ |
1264 |
|
|
/* update lsa but only if it was changed */ |
1265 |
|
|
v = lsa_find(area, type, rte->prefix.s_addr, rde_router_id()); |
1266 |
|
|
lsa = orig_sum_lsa(rte, area, type, rte->invalid); |
1267 |
|
|
lsa_merge(rde_nbr_self(area), lsa, v); |
1268 |
|
|
|
1269 |
|
|
if (v == NULL) |
1270 |
|
|
v = lsa_find(area, type, rte->prefix.s_addr, rde_router_id()); |
1271 |
|
|
#endif |
1272 |
|
|
|
1273 |
|
|
/* suppressed/deleted routes are not found in the second lsa_find */ |
1274 |
|
|
if (v) |
1275 |
|
|
v->cost = rte->cost; |
1276 |
|
|
} |
1277 |
|
|
|
1278 |
|
|
/* |
1279 |
|
|
* Functions for self-originated LSAs |
1280 |
|
|
*/ |
1281 |
|
|
|
1282 |
|
|
/* Prefix LSAs have variable size. We have to be careful to copy the right |
1283 |
|
|
* amount of bytes, and to realloc() the right amount of memory. */ |
1284 |
|
|
void |
1285 |
|
|
append_prefix_lsa(struct lsa **lsa, u_int16_t *len, struct lsa_prefix *prefix) |
1286 |
|
|
{ |
1287 |
|
|
struct lsa_prefix *copy; |
1288 |
|
|
unsigned int lsa_prefix_len; |
1289 |
|
|
unsigned int new_len; |
1290 |
|
|
char *new_lsa; |
1291 |
|
|
|
1292 |
|
|
lsa_prefix_len = sizeof(struct lsa_prefix) |
1293 |
|
|
+ LSA_PREFIXSIZE(prefix->prefixlen); |
1294 |
|
|
|
1295 |
|
|
new_len = *len + lsa_prefix_len; |
1296 |
|
|
|
1297 |
|
|
/* Make sure we have enough space for this prefix. */ |
1298 |
|
|
if ((new_lsa = realloc(*lsa, new_len)) == NULL) |
1299 |
|
|
fatalx("append_prefix_lsa"); |
1300 |
|
|
|
1301 |
|
|
/* Append prefix to LSA. */ |
1302 |
|
|
copy = (struct lsa_prefix *)(new_lsa + *len); |
1303 |
|
|
memcpy(copy, prefix, lsa_prefix_len); |
1304 |
|
|
copy->metric = 0; |
1305 |
|
|
|
1306 |
|
|
*lsa = (struct lsa *)new_lsa; |
1307 |
|
|
*len = new_len; |
1308 |
|
|
} |
1309 |
|
|
|
1310 |
|
|
int |
1311 |
|
|
prefix_compare(struct prefix_node *a, struct prefix_node *b) |
1312 |
|
|
{ |
1313 |
|
|
struct lsa_prefix *p; |
1314 |
|
|
struct lsa_prefix *q; |
1315 |
|
|
int i; |
1316 |
|
|
int len; |
1317 |
|
|
|
1318 |
|
|
p = a->prefix; |
1319 |
|
|
q = b->prefix; |
1320 |
|
|
|
1321 |
|
|
len = MINIMUM(LSA_PREFIXSIZE(p->prefixlen), LSA_PREFIXSIZE(q->prefixlen)); |
1322 |
|
|
|
1323 |
|
|
i = memcmp(p + 1, q + 1, len); |
1324 |
|
|
if (i) |
1325 |
|
|
return (i); |
1326 |
|
|
if (p->prefixlen < q->prefixlen) |
1327 |
|
|
return (-1); |
1328 |
|
|
if (p->prefixlen > q->prefixlen) |
1329 |
|
|
return (1); |
1330 |
|
|
return (0); |
1331 |
|
|
} |
1332 |
|
|
|
1333 |
|
|
void |
1334 |
|
|
prefix_tree_add(struct prefix_tree *tree, struct lsa_link *lsa) |
1335 |
|
|
{ |
1336 |
|
|
struct prefix_node *old; |
1337 |
|
|
struct prefix_node *new; |
1338 |
|
|
struct in6_addr addr; |
1339 |
|
|
unsigned int len; |
1340 |
|
|
unsigned int i; |
1341 |
|
|
char *cur_prefix; |
1342 |
|
|
|
1343 |
|
|
cur_prefix = (char *)(lsa + 1); |
1344 |
|
|
|
1345 |
|
|
for (i = 0; i < ntohl(lsa->numprefix); i++) { |
1346 |
|
|
if ((new = calloc(1, sizeof(*new))) == NULL) |
1347 |
|
|
fatal("prefix_tree_add"); |
1348 |
|
|
new->prefix = (struct lsa_prefix *)cur_prefix; |
1349 |
|
|
|
1350 |
|
|
len = sizeof(*new->prefix) |
1351 |
|
|
+ LSA_PREFIXSIZE(new->prefix->prefixlen); |
1352 |
|
|
|
1353 |
|
|
bzero(&addr, sizeof(addr)); |
1354 |
|
|
memcpy(&addr, new->prefix + 1, |
1355 |
|
|
LSA_PREFIXSIZE(new->prefix->prefixlen)); |
1356 |
|
|
|
1357 |
|
|
if (!(IN6_IS_ADDR_LINKLOCAL(&addr)) && |
1358 |
|
|
(new->prefix->options & OSPF_PREFIX_NU) == 0 && |
1359 |
|
|
(new->prefix->options & OSPF_PREFIX_LA) == 0) { |
1360 |
|
|
old = RB_INSERT(prefix_tree, tree, new); |
1361 |
|
|
if (old != NULL) { |
1362 |
|
|
old->prefix->options |= new->prefix->options; |
1363 |
|
|
free(new); |
1364 |
|
|
} |
1365 |
|
|
} |
1366 |
|
|
|
1367 |
|
|
cur_prefix = cur_prefix + len; |
1368 |
|
|
} |
1369 |
|
|
} |
1370 |
|
|
|
1371 |
|
|
RB_GENERATE(prefix_tree, prefix_node, entry, prefix_compare) |
1372 |
|
|
|
1373 |
|
|
struct lsa * |
1374 |
|
|
orig_intra_lsa_net(struct area *area, struct iface *iface, struct vertex *old) |
1375 |
|
|
{ |
1376 |
|
|
struct lsa *lsa; |
1377 |
|
|
struct vertex *v; |
1378 |
|
|
struct rde_nbr *nbr; |
1379 |
|
|
struct prefix_node *node; |
1380 |
|
|
struct prefix_tree tree; |
1381 |
|
|
int num_full_nbr; |
1382 |
|
|
u_int16_t len; |
1383 |
|
|
u_int16_t numprefix; |
1384 |
|
|
|
1385 |
|
|
log_debug("orig_intra_lsa_net: area %s, interface %s", |
1386 |
|
|
inet_ntoa(area->id), iface->name); |
1387 |
|
|
|
1388 |
|
|
RB_INIT(&tree); |
1389 |
|
|
|
1390 |
|
|
if (iface->state & IF_STA_DR) { |
1391 |
|
|
num_full_nbr = 0; |
1392 |
|
|
LIST_FOREACH(nbr, &area->nbr_list, entry) { |
1393 |
|
|
if (nbr->self || |
1394 |
|
|
nbr->iface->ifindex != iface->ifindex || |
1395 |
|
|
(nbr->state & NBR_STA_FULL) == 0) |
1396 |
|
|
continue; |
1397 |
|
|
num_full_nbr++; |
1398 |
|
|
v = lsa_find(iface, htons(LSA_TYPE_LINK), |
1399 |
|
|
htonl(nbr->iface_id), nbr->id.s_addr); |
1400 |
|
|
if (v) |
1401 |
|
|
prefix_tree_add(&tree, &v->lsa->data.link); |
1402 |
|
|
} |
1403 |
|
|
if (num_full_nbr == 0) { |
1404 |
|
|
/* There are no adjacent neighbors on link. |
1405 |
|
|
* If a copy of this LSA already exists in DB, |
1406 |
|
|
* it needs to be flushed. orig_intra_lsa_rtr() |
1407 |
|
|
* will take care of prefixes configured on |
1408 |
|
|
* this interface. */ |
1409 |
|
|
if (!old) |
1410 |
|
|
return NULL; |
1411 |
|
|
} else { |
1412 |
|
|
/* Add our own prefixes configured for this link. */ |
1413 |
|
|
v = lsa_find(iface, htons(LSA_TYPE_LINK), |
1414 |
|
|
htonl(iface->ifindex), rde_router_id()); |
1415 |
|
|
if (v) |
1416 |
|
|
prefix_tree_add(&tree, &v->lsa->data.link); |
1417 |
|
|
} |
1418 |
|
|
/* Continue only if a copy of this LSA already exists in DB. |
1419 |
|
|
* It needs to be flushed. */ |
1420 |
|
|
} else if (!old) |
1421 |
|
|
return NULL; |
1422 |
|
|
|
1423 |
|
|
len = sizeof(struct lsa_hdr) + sizeof(struct lsa_intra_prefix); |
1424 |
|
|
if ((lsa = calloc(1, len)) == NULL) |
1425 |
|
|
fatal("orig_intra_lsa_net"); |
1426 |
|
|
|
1427 |
|
|
lsa->data.pref_intra.ref_type = htons(LSA_TYPE_NETWORK); |
1428 |
|
|
lsa->data.pref_intra.ref_ls_id = htonl(iface->ifindex); |
1429 |
|
|
lsa->data.pref_intra.ref_adv_rtr = rde_router_id(); |
1430 |
|
|
|
1431 |
|
|
numprefix = 0; |
1432 |
|
|
RB_FOREACH(node, prefix_tree, &tree) { |
1433 |
|
|
append_prefix_lsa(&lsa, &len, node->prefix); |
1434 |
|
|
numprefix++; |
1435 |
|
|
} |
1436 |
|
|
|
1437 |
|
|
lsa->data.pref_intra.numprefix = htons(numprefix); |
1438 |
|
|
|
1439 |
|
|
while (!RB_EMPTY(&tree)) |
1440 |
|
|
free(RB_REMOVE(prefix_tree, &tree, RB_ROOT(&tree))); |
1441 |
|
|
|
1442 |
|
|
/* LSA header */ |
1443 |
|
|
/* If numprefix is zero, originate with MAX_AGE to flush LSA. */ |
1444 |
|
|
lsa->hdr.age = numprefix == 0 ? htons(MAX_AGE) : htons(DEFAULT_AGE); |
1445 |
|
|
lsa->hdr.type = htons(LSA_TYPE_INTRA_A_PREFIX); |
1446 |
|
|
lsa->hdr.ls_id = htonl(iface->ifindex); |
1447 |
|
|
lsa->hdr.adv_rtr = rde_router_id(); |
1448 |
|
|
lsa->hdr.seq_num = htonl(INIT_SEQ_NUM); |
1449 |
|
|
lsa->hdr.len = htons(len); |
1450 |
|
|
lsa->hdr.ls_chksum = htons(iso_cksum(lsa, len, LS_CKSUM_OFFSET)); |
1451 |
|
|
|
1452 |
|
|
return lsa; |
1453 |
|
|
} |
1454 |
|
|
|
1455 |
|
|
struct lsa * |
1456 |
|
|
orig_intra_lsa_rtr(struct area *area, struct vertex *old) |
1457 |
|
|
{ |
1458 |
|
|
char lsa_prefix_buf[sizeof(struct lsa_prefix) |
1459 |
|
|
+ sizeof(struct in6_addr)]; |
1460 |
|
|
struct lsa *lsa; |
1461 |
|
|
struct lsa_prefix *lsa_prefix; |
1462 |
|
|
struct in6_addr *prefix; |
1463 |
|
|
struct iface *iface; |
1464 |
|
|
struct iface_addr *ia; |
1465 |
|
|
struct rde_nbr *nbr; |
1466 |
|
|
u_int16_t len; |
1467 |
|
|
u_int16_t numprefix; |
1468 |
|
|
|
1469 |
|
|
len = sizeof(struct lsa_hdr) + sizeof(struct lsa_intra_prefix); |
1470 |
|
|
if ((lsa = calloc(1, len)) == NULL) |
1471 |
|
|
fatal("orig_intra_lsa_rtr"); |
1472 |
|
|
|
1473 |
|
|
lsa->data.pref_intra.ref_type = htons(LSA_TYPE_ROUTER); |
1474 |
|
|
lsa->data.pref_intra.ref_ls_id = 0; |
1475 |
|
|
lsa->data.pref_intra.ref_adv_rtr = rde_router_id(); |
1476 |
|
|
|
1477 |
|
|
numprefix = 0; |
1478 |
|
|
LIST_FOREACH(iface, &area->iface_list, entry) { |
1479 |
|
|
if (!((iface->flags & IFF_UP) && |
1480 |
|
|
LINK_STATE_IS_UP(iface->linkstate))) |
1481 |
|
|
/* interface or link state down */ |
1482 |
|
|
continue; |
1483 |
|
|
if ((iface->state & IF_STA_DOWN) && |
1484 |
|
|
!(iface->cflags & F_IFACE_PASSIVE)) |
1485 |
|
|
/* passive interfaces stay in state DOWN */ |
1486 |
|
|
continue; |
1487 |
|
|
|
1488 |
|
|
/* Broadcast links with adjacencies are handled |
1489 |
|
|
* by orig_intra_lsa_net(), ignore. */ |
1490 |
|
|
if (iface->type == IF_TYPE_BROADCAST || |
1491 |
|
|
iface->type == IF_TYPE_NBMA) { |
1492 |
|
|
if (iface->state & IF_STA_WAITING) |
1493 |
|
|
/* Skip, we're still waiting for |
1494 |
|
|
* adjacencies to form. */ |
1495 |
|
|
continue; |
1496 |
|
|
|
1497 |
|
|
LIST_FOREACH(nbr, &area->nbr_list, entry) |
1498 |
|
|
if (!nbr->self && |
1499 |
|
|
nbr->iface->ifindex == iface->ifindex && |
1500 |
|
|
nbr->state & NBR_STA_FULL) |
1501 |
|
|
break; |
1502 |
|
|
if (nbr) |
1503 |
|
|
continue; |
1504 |
|
|
} |
1505 |
|
|
|
1506 |
|
|
lsa_prefix = (struct lsa_prefix *)lsa_prefix_buf; |
1507 |
|
|
|
1508 |
|
|
TAILQ_FOREACH(ia, &iface->ifa_list, entry) { |
1509 |
|
|
if (IN6_IS_ADDR_LINKLOCAL(&ia->addr)) |
1510 |
|
|
continue; |
1511 |
|
|
|
1512 |
|
|
bzero(lsa_prefix_buf, sizeof(lsa_prefix_buf)); |
1513 |
|
|
|
1514 |
|
|
if (iface->type == IF_TYPE_POINTOMULTIPOINT || |
1515 |
|
|
iface->state & IF_STA_LOOPBACK) { |
1516 |
|
|
lsa_prefix->prefixlen = 128; |
1517 |
|
|
} else { |
1518 |
|
|
lsa_prefix->prefixlen = ia->prefixlen; |
1519 |
|
|
lsa_prefix->metric = htons(iface->metric); |
1520 |
|
|
} |
1521 |
|
|
|
1522 |
|
|
if (lsa_prefix->prefixlen == 128) |
1523 |
|
|
lsa_prefix->options |= OSPF_PREFIX_LA; |
1524 |
|
|
|
1525 |
|
|
log_debug("orig_intra_lsa_rtr: area %s, interface %s: " |
1526 |
|
|
"%s/%d", inet_ntoa(area->id), |
1527 |
|
|
iface->name, log_in6addr(&ia->addr), |
1528 |
|
|
lsa_prefix->prefixlen); |
1529 |
|
|
|
1530 |
|
|
prefix = (struct in6_addr *)(lsa_prefix + 1); |
1531 |
|
|
inet6applymask(prefix, &ia->addr, |
1532 |
|
|
lsa_prefix->prefixlen); |
1533 |
|
|
append_prefix_lsa(&lsa, &len, lsa_prefix); |
1534 |
|
|
numprefix++; |
1535 |
|
|
} |
1536 |
|
|
|
1537 |
|
|
/* TOD: Add prefixes of directly attached hosts, too */ |
1538 |
|
|
/* TOD: Add prefixes for virtual links */ |
1539 |
|
|
} |
1540 |
|
|
|
1541 |
|
|
/* If no prefixes were included, continue only if a copy of this |
1542 |
|
|
* LSA already exists in DB. It needs to be flushed. */ |
1543 |
|
|
if (numprefix == 0 && !old) { |
1544 |
|
|
free(lsa); |
1545 |
|
|
return NULL; |
1546 |
|
|
} |
1547 |
|
|
|
1548 |
|
|
lsa->data.pref_intra.numprefix = htons(numprefix); |
1549 |
|
|
|
1550 |
|
|
/* LSA header */ |
1551 |
|
|
/* If numprefix is zero, originate with MAX_AGE to flush LSA. */ |
1552 |
|
|
lsa->hdr.age = numprefix == 0 ? htons(MAX_AGE) : htons(DEFAULT_AGE); |
1553 |
|
|
lsa->hdr.type = htons(LSA_TYPE_INTRA_A_PREFIX); |
1554 |
|
|
lsa->hdr.ls_id = htonl(LS_ID_INTRA_RTR); |
1555 |
|
|
lsa->hdr.adv_rtr = rde_router_id(); |
1556 |
|
|
lsa->hdr.seq_num = htonl(INIT_SEQ_NUM); |
1557 |
|
|
lsa->hdr.len = htons(len); |
1558 |
|
|
lsa->hdr.ls_chksum = htons(iso_cksum(lsa, len, LS_CKSUM_OFFSET)); |
1559 |
|
|
|
1560 |
|
|
return lsa; |
1561 |
|
|
} |
1562 |
|
|
|
1563 |
|
|
void |
1564 |
|
|
orig_intra_area_prefix_lsas(struct area *area) |
1565 |
|
|
{ |
1566 |
|
|
struct lsa *lsa; |
1567 |
|
|
struct vertex *old; |
1568 |
|
|
struct iface *iface; |
1569 |
|
|
|
1570 |
|
|
LIST_FOREACH(iface, &area->iface_list, entry) { |
1571 |
|
|
if (iface->type == IF_TYPE_BROADCAST || |
1572 |
|
|
iface->type == IF_TYPE_NBMA) { |
1573 |
|
|
old = lsa_find(iface, htons(LSA_TYPE_INTRA_A_PREFIX), |
1574 |
|
|
htonl(iface->ifindex), rde_router_id()); |
1575 |
|
|
lsa = orig_intra_lsa_net(area, iface, old); |
1576 |
|
|
if (lsa) |
1577 |
|
|
lsa_merge(rde_nbr_self(area), lsa, old); |
1578 |
|
|
} |
1579 |
|
|
} |
1580 |
|
|
|
1581 |
|
|
old = lsa_find_tree(&area->lsa_tree, htons(LSA_TYPE_INTRA_A_PREFIX), |
1582 |
|
|
htonl(LS_ID_INTRA_RTR), rde_router_id()); |
1583 |
|
|
lsa = orig_intra_lsa_rtr(area, old); |
1584 |
|
|
if (lsa) |
1585 |
|
|
lsa_merge(rde_nbr_self(area), lsa, old); |
1586 |
|
|
} |
1587 |
|
|
|
1588 |
|
|
int |
1589 |
|
|
comp_asext(struct lsa *a, struct lsa *b) |
1590 |
|
|
{ |
1591 |
|
|
/* compare prefixes, if they are equal or not */ |
1592 |
|
|
if (a->data.asext.prefix.prefixlen != b->data.asext.prefix.prefixlen) |
1593 |
|
|
return (-1); |
1594 |
|
|
return (memcmp( |
1595 |
|
|
(char *)a + sizeof(struct lsa_hdr) + sizeof(struct lsa_asext), |
1596 |
|
|
(char *)b + sizeof(struct lsa_hdr) + sizeof(struct lsa_asext), |
1597 |
|
|
LSA_PREFIXSIZE(a->data.asext.prefix.prefixlen))); |
1598 |
|
|
} |
1599 |
|
|
|
1600 |
|
|
struct lsa * |
1601 |
|
|
orig_asext_lsa(struct kroute *kr, u_int16_t age) |
1602 |
|
|
{ |
1603 |
|
|
struct lsa *lsa; |
1604 |
|
|
u_int32_t ext_tag; |
1605 |
|
|
u_int16_t len, ext_off; |
1606 |
|
|
|
1607 |
|
|
len = sizeof(struct lsa_hdr) + sizeof(struct lsa_asext) + |
1608 |
|
|
LSA_PREFIXSIZE(kr->prefixlen); |
1609 |
|
|
|
1610 |
|
|
/* |
1611 |
|
|
* nexthop -- on connected routes we are the nexthop, |
1612 |
|
|
* on all other cases we should announce the true nexthop |
1613 |
|
|
* unless that nexthop is outside of the ospf cloud. |
1614 |
|
|
* XXX for now we don't do this. |
1615 |
|
|
*/ |
1616 |
|
|
|
1617 |
|
|
ext_off = len; |
1618 |
|
|
if (kr->ext_tag) { |
1619 |
|
|
len += sizeof(ext_tag); |
1620 |
|
|
} |
1621 |
|
|
if ((lsa = calloc(1, len)) == NULL) |
1622 |
|
|
fatal("orig_asext_lsa"); |
1623 |
|
|
|
1624 |
|
|
log_debug("orig_asext_lsa: %s/%d age %d", |
1625 |
|
|
log_in6addr(&kr->prefix), kr->prefixlen, age); |
1626 |
|
|
|
1627 |
|
|
/* LSA header */ |
1628 |
|
|
lsa->hdr.age = htons(age); |
1629 |
|
|
lsa->hdr.type = htons(LSA_TYPE_EXTERNAL); |
1630 |
|
|
lsa->hdr.adv_rtr = rdeconf->rtr_id.s_addr; |
1631 |
|
|
lsa->hdr.seq_num = htonl(INIT_SEQ_NUM); |
1632 |
|
|
lsa->hdr.len = htons(len); |
1633 |
|
|
|
1634 |
|
|
lsa->data.asext.prefix.prefixlen = kr->prefixlen; |
1635 |
|
|
memcpy((char *)lsa + sizeof(struct lsa_hdr) + sizeof(struct lsa_asext), |
1636 |
|
|
&kr->prefix, LSA_PREFIXSIZE(kr->prefixlen)); |
1637 |
|
|
|
1638 |
|
|
lsa->hdr.ls_id = lsa_find_lsid(&asext_tree, lsa->hdr.type, |
1639 |
|
|
lsa->hdr.adv_rtr, comp_asext, lsa); |
1640 |
|
|
|
1641 |
|
|
if (age == MAX_AGE) { |
1642 |
|
|
/* inherit metric and ext_tag from the current LSA, |
1643 |
|
|
* some routers don't like to get withdraws that are |
1644 |
|
|
* different from what they have in their table. |
1645 |
|
|
*/ |
1646 |
|
|
struct vertex *v; |
1647 |
|
|
v = lsa_find(NULL, lsa->hdr.type, lsa->hdr.ls_id, |
1648 |
|
|
lsa->hdr.adv_rtr); |
1649 |
|
|
if (v != NULL) { |
1650 |
|
|
kr->metric = ntohl(v->lsa->data.asext.metric); |
1651 |
|
|
if (kr->metric & LSA_ASEXT_T_FLAG) { |
1652 |
|
|
memcpy(&ext_tag, (char *)v->lsa + ext_off, |
1653 |
|
|
sizeof(ext_tag)); |
1654 |
|
|
kr->ext_tag = ntohl(ext_tag); |
1655 |
|
|
} |
1656 |
|
|
kr->metric &= LSA_METRIC_MASK; |
1657 |
|
|
} |
1658 |
|
|
} |
1659 |
|
|
|
1660 |
|
|
if (kr->ext_tag) { |
1661 |
|
|
lsa->data.asext.metric = htonl(kr->metric | LSA_ASEXT_T_FLAG); |
1662 |
|
|
ext_tag = htonl(kr->ext_tag); |
1663 |
|
|
memcpy((char *)lsa + ext_off, &ext_tag, sizeof(ext_tag)); |
1664 |
|
|
} else { |
1665 |
|
|
lsa->data.asext.metric = htonl(kr->metric); |
1666 |
|
|
} |
1667 |
|
|
|
1668 |
|
|
lsa->hdr.ls_chksum = 0; |
1669 |
|
|
lsa->hdr.ls_chksum = |
1670 |
|
|
htons(iso_cksum(lsa, len, LS_CKSUM_OFFSET)); |
1671 |
|
|
|
1672 |
|
|
return (lsa); |
1673 |
|
|
} |
1674 |
|
|
|
1675 |
|
|
struct lsa * |
1676 |
|
|
orig_sum_lsa(struct rt_node *rte, struct area *area, u_int8_t type, int invalid) |
1677 |
|
|
{ |
1678 |
|
|
#if 0 /* XXX a lot todo */ |
1679 |
|
|
struct lsa *lsa; |
1680 |
|
|
u_int16_t len; |
1681 |
|
|
|
1682 |
|
|
len = sizeof(struct lsa_hdr) + sizeof(struct lsa_sum); |
1683 |
|
|
if ((lsa = calloc(1, len)) == NULL) |
1684 |
|
|
fatal("orig_sum_lsa"); |
1685 |
|
|
|
1686 |
|
|
/* LSA header */ |
1687 |
|
|
lsa->hdr.age = htons(invalid ? MAX_AGE : DEFAULT_AGE); |
1688 |
|
|
lsa->hdr.type = type; |
1689 |
|
|
lsa->hdr.adv_rtr = rdeconf->rtr_id.s_addr; |
1690 |
|
|
lsa->hdr.seq_num = htonl(INIT_SEQ_NUM); |
1691 |
|
|
lsa->hdr.len = htons(len); |
1692 |
|
|
|
1693 |
|
|
/* prefix and mask */ |
1694 |
|
|
/* |
1695 |
|
|
* TODO ls_id must be unique, for overlapping routes this may |
1696 |
|
|
* not be true. In this case a hack needs to be done to |
1697 |
|
|
* make the ls_id unique. |
1698 |
|
|
*/ |
1699 |
|
|
lsa->hdr.ls_id = rte->prefix.s_addr; |
1700 |
|
|
if (type == LSA_TYPE_SUM_NETWORK) |
1701 |
|
|
lsa->data.sum.mask = prefixlen2mask(rte->prefixlen); |
1702 |
|
|
else |
1703 |
|
|
lsa->data.sum.mask = 0; /* must be zero per RFC */ |
1704 |
|
|
|
1705 |
|
|
lsa->data.sum.metric = htonl(rte->cost & LSA_METRIC_MASK); |
1706 |
|
|
|
1707 |
|
|
lsa->hdr.ls_chksum = 0; |
1708 |
|
|
lsa->hdr.ls_chksum = |
1709 |
|
|
htons(iso_cksum(lsa, len, LS_CKSUM_OFFSET)); |
1710 |
|
|
|
1711 |
|
|
return (lsa); |
1712 |
|
|
#endif |
1713 |
|
|
return NULL; |
1714 |
|
|
} |