1 |
|
|
/* $OpenBSD: kroute.c,v 1.18 2017/07/24 11:00:01 friehm Exp $ */ |
2 |
|
|
|
3 |
|
|
/* |
4 |
|
|
* Copyright (c) 2015 Renato Westphal <renato@openbsd.org> |
5 |
|
|
* Copyright (c) 2004 Esben Norby <norby@openbsd.org> |
6 |
|
|
* Copyright (c) 2003, 2004 Henning Brauer <henning@openbsd.org> |
7 |
|
|
* |
8 |
|
|
* Permission to use, copy, modify, and distribute this software for any |
9 |
|
|
* purpose with or without fee is hereby granted, provided that the above |
10 |
|
|
* copyright notice and this permission notice appear in all copies. |
11 |
|
|
* |
12 |
|
|
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES |
13 |
|
|
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF |
14 |
|
|
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR |
15 |
|
|
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES |
16 |
|
|
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN |
17 |
|
|
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF |
18 |
|
|
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. |
19 |
|
|
*/ |
20 |
|
|
|
21 |
|
|
#include <sys/types.h> |
22 |
|
|
#include <sys/socket.h> |
23 |
|
|
#include <sys/sysctl.h> |
24 |
|
|
#include <net/if.h> |
25 |
|
|
#include <net/if_dl.h> |
26 |
|
|
#include <net/route.h> |
27 |
|
|
#include <netinet/in.h> |
28 |
|
|
|
29 |
|
|
#include <arpa/inet.h> |
30 |
|
|
#include <errno.h> |
31 |
|
|
#include <stdlib.h> |
32 |
|
|
#include <string.h> |
33 |
|
|
#include <unistd.h> |
34 |
|
|
|
35 |
|
|
#include "eigrpd.h" |
36 |
|
|
#include "log.h" |
37 |
|
|
|
38 |
|
|
static struct { |
39 |
|
|
uint32_t rtseq; |
40 |
|
|
pid_t pid; |
41 |
|
|
int fib_sync; |
42 |
|
|
int fd; |
43 |
|
|
struct event ev; |
44 |
|
|
unsigned int rdomain; |
45 |
|
|
} kr_state; |
46 |
|
|
|
47 |
|
|
struct kroute_node { |
48 |
|
|
TAILQ_ENTRY(kroute_node) entry; |
49 |
|
|
struct kroute_priority *kprio; /* back pointer */ |
50 |
|
|
struct kroute r; |
51 |
|
|
}; |
52 |
|
|
|
53 |
|
|
struct kroute_priority { |
54 |
|
|
TAILQ_ENTRY(kroute_priority) entry; |
55 |
|
|
struct kroute_prefix *kp; /* back pointer */ |
56 |
|
|
uint8_t priority; |
57 |
|
|
TAILQ_HEAD(, kroute_node) nexthops; |
58 |
|
|
}; |
59 |
|
|
|
60 |
|
|
struct kroute_prefix { |
61 |
|
|
RB_ENTRY(kroute_prefix) entry; |
62 |
|
|
int af; |
63 |
|
|
union eigrpd_addr prefix; |
64 |
|
|
uint8_t prefixlen; |
65 |
|
|
TAILQ_HEAD(plist, kroute_priority) priorities; |
66 |
|
|
}; |
67 |
|
|
RB_HEAD(kroute_tree, kroute_prefix); |
68 |
|
|
RB_PROTOTYPE(kroute_tree, kroute_prefix, entry, kroute_compare) |
69 |
|
|
|
70 |
|
|
struct kif_addr { |
71 |
|
|
TAILQ_ENTRY(kif_addr) entry; |
72 |
|
|
struct kaddr a; |
73 |
|
|
}; |
74 |
|
|
|
75 |
|
|
struct kif_node { |
76 |
|
|
RB_ENTRY(kif_node) entry; |
77 |
|
|
TAILQ_HEAD(, kif_addr) addrs; |
78 |
|
|
struct kif k; |
79 |
|
|
}; |
80 |
|
|
RB_HEAD(kif_tree, kif_node); |
81 |
|
|
RB_PROTOTYPE(kif_tree, kif_node, entry, kif_compare) |
82 |
|
|
|
83 |
|
|
static void kr_dispatch_msg(int, short, void *); |
84 |
|
|
static void kr_redist_remove(struct kroute *); |
85 |
|
|
static int kr_redist_eval(struct kroute *); |
86 |
|
|
static void kr_redistribute(struct kroute_prefix *); |
87 |
|
|
static __inline int kroute_compare(struct kroute_prefix *, |
88 |
|
|
struct kroute_prefix *); |
89 |
|
|
static struct kroute_prefix *kroute_find_prefix(int, union eigrpd_addr *, |
90 |
|
|
uint8_t); |
91 |
|
|
static struct kroute_priority *kroute_find_prio(struct kroute_prefix *, |
92 |
|
|
uint8_t); |
93 |
|
|
static struct kroute_node *kroute_find_gw(struct kroute_priority *, |
94 |
|
|
union eigrpd_addr *); |
95 |
|
|
static struct kroute_node *kroute_insert(struct kroute *); |
96 |
|
|
static int kroute_remove(struct kroute *); |
97 |
|
|
static void kroute_clear(void); |
98 |
|
|
static __inline int kif_compare(struct kif_node *, struct kif_node *); |
99 |
|
|
static struct kif_node *kif_find(unsigned short); |
100 |
|
|
static struct kif_node *kif_insert(unsigned short); |
101 |
|
|
static int kif_remove(struct kif_node *); |
102 |
|
|
static struct kif *kif_update(unsigned short, int, struct if_data *, |
103 |
|
|
struct sockaddr_dl *); |
104 |
|
|
static int kif_validate(unsigned short); |
105 |
|
|
static void protect_lo(void); |
106 |
|
|
static uint8_t prefixlen_classful(in_addr_t); |
107 |
|
|
static void get_rtaddrs(int, struct sockaddr *, struct sockaddr **); |
108 |
|
|
static void if_change(unsigned short, int, struct if_data *, |
109 |
|
|
struct sockaddr_dl *); |
110 |
|
|
static void if_newaddr(unsigned short, struct sockaddr *, |
111 |
|
|
struct sockaddr *, struct sockaddr *); |
112 |
|
|
static void if_deladdr(unsigned short, struct sockaddr *, |
113 |
|
|
struct sockaddr *, struct sockaddr *); |
114 |
|
|
static void if_announce(void *); |
115 |
|
|
static int send_rtmsg_v4(int, int, struct kroute *); |
116 |
|
|
static int send_rtmsg_v6(int, int, struct kroute *); |
117 |
|
|
static int send_rtmsg(int, int, struct kroute *); |
118 |
|
|
static int fetchtable(void); |
119 |
|
|
static int fetchifs(void); |
120 |
|
|
static int dispatch_rtmsg(void); |
121 |
|
|
static int rtmsg_process(char *, size_t); |
122 |
|
|
static int rtmsg_process_route(struct rt_msghdr *, |
123 |
|
|
struct sockaddr *[RTAX_MAX]); |
124 |
|
|
|
125 |
|
|
RB_GENERATE(kroute_tree, kroute_prefix, entry, kroute_compare) |
126 |
|
|
RB_GENERATE(kif_tree, kif_node, entry, kif_compare) |
127 |
|
|
|
128 |
|
|
static struct kroute_tree krt = RB_INITIALIZER(&krt); |
129 |
|
|
static struct kif_tree kit = RB_INITIALIZER(&kit); |
130 |
|
|
|
131 |
|
|
int |
132 |
|
|
kif_init(void) |
133 |
|
|
{ |
134 |
|
|
if (fetchifs() == -1) |
135 |
|
|
return (-1); |
136 |
|
|
|
137 |
|
|
return (0); |
138 |
|
|
} |
139 |
|
|
|
140 |
|
|
int |
141 |
|
|
kr_init(int fs, unsigned int rdomain) |
142 |
|
|
{ |
143 |
|
|
int opt = 0, rcvbuf, default_rcvbuf; |
144 |
|
|
socklen_t optlen; |
145 |
|
|
|
146 |
|
|
kr_state.fib_sync = fs; |
147 |
|
|
kr_state.rdomain = rdomain; |
148 |
|
|
|
149 |
|
|
if ((kr_state.fd = socket(AF_ROUTE, |
150 |
|
|
SOCK_RAW | SOCK_CLOEXEC | SOCK_NONBLOCK, 0)) == -1) { |
151 |
|
|
log_warn("%s: socket", __func__); |
152 |
|
|
return (-1); |
153 |
|
|
} |
154 |
|
|
|
155 |
|
|
/* not interested in my own messages */ |
156 |
|
|
if (setsockopt(kr_state.fd, SOL_SOCKET, SO_USELOOPBACK, |
157 |
|
|
&opt, sizeof(opt)) == -1) |
158 |
|
|
log_warn("%s: setsockopt(SO_USELOOPBACK)", __func__); |
159 |
|
|
|
160 |
|
|
/* grow receive buffer, don't wanna miss messages */ |
161 |
|
|
optlen = sizeof(default_rcvbuf); |
162 |
|
|
if (getsockopt(kr_state.fd, SOL_SOCKET, SO_RCVBUF, |
163 |
|
|
&default_rcvbuf, &optlen) == -1) |
164 |
|
|
log_warn("%s: getsockopt SOL_SOCKET SO_RCVBUF", __func__); |
165 |
|
|
else |
166 |
|
|
for (rcvbuf = MAX_RTSOCK_BUF; |
167 |
|
|
rcvbuf > default_rcvbuf && |
168 |
|
|
setsockopt(kr_state.fd, SOL_SOCKET, SO_RCVBUF, |
169 |
|
|
&rcvbuf, sizeof(rcvbuf)) == -1 && errno == ENOBUFS; |
170 |
|
|
rcvbuf /= 2) |
171 |
|
|
; /* nothing */ |
172 |
|
|
|
173 |
|
|
kr_state.pid = getpid(); |
174 |
|
|
kr_state.rtseq = 1; |
175 |
|
|
|
176 |
|
|
if (fetchtable() == -1) |
177 |
|
|
return (-1); |
178 |
|
|
|
179 |
|
|
protect_lo(); |
180 |
|
|
|
181 |
|
|
event_set(&kr_state.ev, kr_state.fd, EV_READ | EV_PERSIST, |
182 |
|
|
kr_dispatch_msg, NULL); |
183 |
|
|
event_add(&kr_state.ev, NULL); |
184 |
|
|
|
185 |
|
|
return (0); |
186 |
|
|
} |
187 |
|
|
|
188 |
|
|
void |
189 |
|
|
kif_redistribute(void) |
190 |
|
|
{ |
191 |
|
|
struct kif_node *kif; |
192 |
|
|
struct kif_addr *ka; |
193 |
|
|
|
194 |
|
|
RB_FOREACH(kif, kif_tree, &kit) { |
195 |
|
|
main_imsg_compose_eigrpe(IMSG_IFINFO, 0, &kif->k, |
196 |
|
|
sizeof(struct kif)); |
197 |
|
|
TAILQ_FOREACH(ka, &kif->addrs, entry) { |
198 |
|
|
main_imsg_compose_eigrpe(IMSG_NEWADDR, 0, &ka->a, |
199 |
|
|
sizeof(ka->a)); |
200 |
|
|
} |
201 |
|
|
} |
202 |
|
|
} |
203 |
|
|
|
204 |
|
|
int |
205 |
|
|
kr_change(struct kroute *kr) |
206 |
|
|
{ |
207 |
|
|
struct kroute_prefix *kp; |
208 |
|
|
struct kroute_priority *kprio; |
209 |
|
|
struct kroute_node *kn; |
210 |
|
|
int action = RTM_ADD; |
211 |
|
|
|
212 |
|
|
kp = kroute_find_prefix(kr->af, &kr->prefix, kr->prefixlen); |
213 |
|
|
if (kp == NULL) |
214 |
|
|
kn = kroute_insert(kr); |
215 |
|
|
else { |
216 |
|
|
kprio = kroute_find_prio(kp, kr->priority); |
217 |
|
|
if (kprio == NULL) |
218 |
|
|
kn = kroute_insert(kr); |
219 |
|
|
else { |
220 |
|
|
kn = kroute_find_gw(kprio, &kr->nexthop); |
221 |
|
|
if (kn == NULL) |
222 |
|
|
kn = kroute_insert(kr); |
223 |
|
|
else |
224 |
|
|
action = RTM_CHANGE; |
225 |
|
|
} |
226 |
|
|
} |
227 |
|
|
|
228 |
|
|
/* send update */ |
229 |
|
|
if (send_rtmsg(kr_state.fd, action, kr) == -1) |
230 |
|
|
return (-1); |
231 |
|
|
|
232 |
|
|
kn->r.flags |= F_EIGRPD_INSERTED; |
233 |
|
|
|
234 |
|
|
return (0); |
235 |
|
|
} |
236 |
|
|
|
237 |
|
|
int |
238 |
|
|
kr_delete(struct kroute *kr) |
239 |
|
|
{ |
240 |
|
|
struct kroute_prefix *kp; |
241 |
|
|
struct kroute_priority *kprio; |
242 |
|
|
struct kroute_node *kn; |
243 |
|
|
|
244 |
|
|
kp = kroute_find_prefix(kr->af, &kr->prefix, kr->prefixlen); |
245 |
|
|
if (kp == NULL) |
246 |
|
|
return (0); |
247 |
|
|
kprio = kroute_find_prio(kp, kr->priority); |
248 |
|
|
if (kprio == NULL) |
249 |
|
|
return (0); |
250 |
|
|
kn = kroute_find_gw(kprio, &kr->nexthop); |
251 |
|
|
if (kn == NULL) |
252 |
|
|
return (0); |
253 |
|
|
|
254 |
|
|
if (!(kn->r.flags & F_EIGRPD_INSERTED)) |
255 |
|
|
return (0); |
256 |
|
|
|
257 |
|
|
if (send_rtmsg(kr_state.fd, RTM_DELETE, &kn->r) == -1) |
258 |
|
|
return (-1); |
259 |
|
|
|
260 |
|
|
if (kroute_remove(kr) == -1) |
261 |
|
|
return (-1); |
262 |
|
|
|
263 |
|
|
return (0); |
264 |
|
|
} |
265 |
|
|
|
266 |
|
|
void |
267 |
|
|
kr_shutdown(void) |
268 |
|
|
{ |
269 |
|
|
kr_fib_decouple(); |
270 |
|
|
kroute_clear(); |
271 |
|
|
kif_clear(); |
272 |
|
|
} |
273 |
|
|
|
274 |
|
|
void |
275 |
|
|
kr_fib_couple(void) |
276 |
|
|
{ |
277 |
|
|
struct kroute_prefix *kp; |
278 |
|
|
struct kroute_priority *kprio; |
279 |
|
|
struct kroute_node *kn; |
280 |
|
|
|
281 |
|
|
if (kr_state.fib_sync == 1) /* already coupled */ |
282 |
|
|
return; |
283 |
|
|
|
284 |
|
|
kr_state.fib_sync = 1; |
285 |
|
|
|
286 |
|
|
RB_FOREACH(kp, kroute_tree, &krt) |
287 |
|
|
TAILQ_FOREACH(kprio, &kp->priorities, entry) |
288 |
|
|
TAILQ_FOREACH(kn, &kprio->nexthops, entry) { |
289 |
|
|
if (!(kn->r.flags & F_EIGRPD_INSERTED)) |
290 |
|
|
continue; |
291 |
|
|
send_rtmsg(kr_state.fd, RTM_ADD, &kn->r); |
292 |
|
|
} |
293 |
|
|
|
294 |
|
|
log_info("kernel routing table coupled"); |
295 |
|
|
} |
296 |
|
|
|
297 |
|
|
void |
298 |
|
|
kr_fib_decouple(void) |
299 |
|
|
{ |
300 |
|
|
struct kroute_prefix *kp; |
301 |
|
|
struct kroute_priority *kprio; |
302 |
|
|
struct kroute_node *kn; |
303 |
|
|
|
304 |
|
|
if (kr_state.fib_sync == 0) /* already decoupled */ |
305 |
|
|
return; |
306 |
|
|
|
307 |
|
|
RB_FOREACH(kp, kroute_tree, &krt) |
308 |
|
|
TAILQ_FOREACH(kprio, &kp->priorities, entry) |
309 |
|
|
TAILQ_FOREACH(kn, &kprio->nexthops, entry) { |
310 |
|
|
if (!(kn->r.flags & F_EIGRPD_INSERTED)) |
311 |
|
|
continue; |
312 |
|
|
|
313 |
|
|
send_rtmsg(kr_state.fd, RTM_DELETE, &kn->r); |
314 |
|
|
} |
315 |
|
|
|
316 |
|
|
kr_state.fib_sync = 0; |
317 |
|
|
|
318 |
|
|
log_info("kernel routing table decoupled"); |
319 |
|
|
} |
320 |
|
|
|
321 |
|
|
/* ARGSUSED */ |
322 |
|
|
static void |
323 |
|
|
kr_dispatch_msg(int fd, short event, void *bula) |
324 |
|
|
{ |
325 |
|
|
if (dispatch_rtmsg() == -1) |
326 |
|
|
event_loopexit(NULL); |
327 |
|
|
} |
328 |
|
|
|
329 |
|
|
void |
330 |
|
|
kr_show_route(struct imsg *imsg) |
331 |
|
|
{ |
332 |
|
|
struct kroute_prefix *kp; |
333 |
|
|
struct kroute_priority *kprio; |
334 |
|
|
struct kroute_node *kn; |
335 |
|
|
struct kroute kr; |
336 |
|
|
int flags; |
337 |
|
|
|
338 |
|
|
if (imsg->hdr.len != IMSG_HEADER_SIZE + sizeof(flags)) { |
339 |
|
|
log_warnx("%s: wrong imsg len", __func__); |
340 |
|
|
return; |
341 |
|
|
} |
342 |
|
|
memcpy(&flags, imsg->data, sizeof(flags)); |
343 |
|
|
RB_FOREACH(kp, kroute_tree, &krt) |
344 |
|
|
TAILQ_FOREACH(kprio, &kp->priorities, entry) |
345 |
|
|
TAILQ_FOREACH(kn, &kprio->nexthops, entry) { |
346 |
|
|
if (flags && !(kn->r.flags & flags)) |
347 |
|
|
continue; |
348 |
|
|
|
349 |
|
|
kr = kn->r; |
350 |
|
|
if (kr.priority == |
351 |
|
|
eigrpd_conf->fib_priority_external) |
352 |
|
|
kr.flags |= F_CTL_EXTERNAL; |
353 |
|
|
main_imsg_compose_eigrpe(IMSG_CTL_KROUTE, |
354 |
|
|
imsg->hdr.pid, &kr, sizeof(kr)); |
355 |
|
|
} |
356 |
|
|
|
357 |
|
|
main_imsg_compose_eigrpe(IMSG_CTL_END, imsg->hdr.pid, NULL, 0); |
358 |
|
|
} |
359 |
|
|
|
360 |
|
|
void |
361 |
|
|
kr_ifinfo(char *ifname, pid_t pid) |
362 |
|
|
{ |
363 |
|
|
struct kif_node *kif; |
364 |
|
|
|
365 |
|
|
RB_FOREACH(kif, kif_tree, &kit) |
366 |
|
|
if (ifname == NULL || !strcmp(ifname, kif->k.ifname)) { |
367 |
|
|
main_imsg_compose_eigrpe(IMSG_CTL_IFINFO, |
368 |
|
|
pid, &kif->k, sizeof(kif->k)); |
369 |
|
|
} |
370 |
|
|
|
371 |
|
|
main_imsg_compose_eigrpe(IMSG_CTL_END, pid, NULL, 0); |
372 |
|
|
} |
373 |
|
|
|
374 |
|
|
static void |
375 |
|
|
kr_redist_remove(struct kroute *kr) |
376 |
|
|
{ |
377 |
|
|
/* was the route redistributed? */ |
378 |
|
|
if (!(kr->flags & F_REDISTRIBUTED)) |
379 |
|
|
return; |
380 |
|
|
|
381 |
|
|
/* remove redistributed flag */ |
382 |
|
|
kr->flags &= ~F_REDISTRIBUTED; |
383 |
|
|
main_imsg_compose_rde(IMSG_NETWORK_DEL, 0, kr, sizeof(*kr)); |
384 |
|
|
} |
385 |
|
|
|
386 |
|
|
static int |
387 |
|
|
kr_redist_eval(struct kroute *kr) |
388 |
|
|
{ |
389 |
|
|
/* Only non-eigrpd routes are considered for redistribution. */ |
390 |
|
|
if (!(kr->flags & F_KERNEL)) |
391 |
|
|
goto dont_redistribute; |
392 |
|
|
|
393 |
|
|
/* Dynamic routes are not redistributable. */ |
394 |
|
|
if (kr->flags & F_DYNAMIC) |
395 |
|
|
goto dont_redistribute; |
396 |
|
|
|
397 |
|
|
/* filter-out non-redistributable addresses */ |
398 |
|
|
if (bad_addr(kr->af, &kr->prefix) || |
399 |
|
|
(kr->af == AF_INET6 && IN6_IS_SCOPE_EMBED(&kr->prefix.v6))) |
400 |
|
|
goto dont_redistribute; |
401 |
|
|
|
402 |
|
|
/* interface is not up and running so don't announce */ |
403 |
|
|
if (kr->flags & F_DOWN) |
404 |
|
|
goto dont_redistribute; |
405 |
|
|
|
406 |
|
|
/* |
407 |
|
|
* Consider networks with nexthop loopback as not redistributable |
408 |
|
|
* unless it is a reject or blackhole route. |
409 |
|
|
*/ |
410 |
|
|
switch (kr->af) { |
411 |
|
|
case AF_INET: |
412 |
|
|
if (kr->nexthop.v4.s_addr == htonl(INADDR_LOOPBACK) && |
413 |
|
|
!(kr->flags & (F_BLACKHOLE|F_REJECT))) |
414 |
|
|
goto dont_redistribute; |
415 |
|
|
break; |
416 |
|
|
case AF_INET6: |
417 |
|
|
if (IN6_IS_ADDR_LOOPBACK(&kr->nexthop.v6) && |
418 |
|
|
!(kr->flags & (F_BLACKHOLE|F_REJECT))) |
419 |
|
|
goto dont_redistribute; |
420 |
|
|
break; |
421 |
|
|
default: |
422 |
|
|
log_debug("%s: unexpected address-family", __func__); |
423 |
|
|
break; |
424 |
|
|
} |
425 |
|
|
|
426 |
|
|
/* prefix should be redistributed */ |
427 |
|
|
kr->flags |= F_REDISTRIBUTED; |
428 |
|
|
main_imsg_compose_rde(IMSG_NETWORK_ADD, 0, kr, sizeof(*kr)); |
429 |
|
|
return (1); |
430 |
|
|
|
431 |
|
|
dont_redistribute: |
432 |
|
|
kr_redist_remove(kr); |
433 |
|
|
return (0); |
434 |
|
|
} |
435 |
|
|
|
436 |
|
|
static void |
437 |
|
|
kr_redistribute(struct kroute_prefix *kp) |
438 |
|
|
{ |
439 |
|
|
struct kroute_priority *kprio; |
440 |
|
|
struct kroute_node *kn; |
441 |
|
|
|
442 |
|
|
/* only the highest prio route can be redistributed */ |
443 |
|
|
TAILQ_FOREACH_REVERSE(kprio, &kp->priorities, plist, entry) { |
444 |
|
|
if (kprio == TAILQ_FIRST(&kp->priorities)) { |
445 |
|
|
TAILQ_FOREACH(kn, &kprio->nexthops, entry) |
446 |
|
|
/* pick just one entry in case of multipath */ |
447 |
|
|
if (kr_redist_eval(&kn->r)) |
448 |
|
|
break; |
449 |
|
|
} else { |
450 |
|
|
TAILQ_FOREACH(kn, &kprio->nexthops, entry) |
451 |
|
|
kr_redist_remove(&kn->r); |
452 |
|
|
} |
453 |
|
|
} |
454 |
|
|
} |
455 |
|
|
|
456 |
|
|
static __inline int |
457 |
|
|
kroute_compare(struct kroute_prefix *a, struct kroute_prefix *b) |
458 |
|
|
{ |
459 |
|
|
int addrcmp; |
460 |
|
|
|
461 |
|
|
if (a->af < b->af) |
462 |
|
|
return (-1); |
463 |
|
|
if (a->af > b->af) |
464 |
|
|
return (1); |
465 |
|
|
|
466 |
|
|
addrcmp = eigrp_addrcmp(a->af, &a->prefix, &b->prefix); |
467 |
|
|
if (addrcmp != 0) |
468 |
|
|
return (addrcmp); |
469 |
|
|
|
470 |
|
|
if (a->prefixlen < b->prefixlen) |
471 |
|
|
return (-1); |
472 |
|
|
if (a->prefixlen > b->prefixlen) |
473 |
|
|
return (1); |
474 |
|
|
|
475 |
|
|
return (0); |
476 |
|
|
} |
477 |
|
|
|
478 |
|
|
/* tree management */ |
479 |
|
|
static struct kroute_prefix * |
480 |
|
|
kroute_find_prefix(int af, union eigrpd_addr *prefix, uint8_t prefixlen) |
481 |
|
|
{ |
482 |
|
|
struct kroute_prefix s; |
483 |
|
|
|
484 |
|
|
s.af = af; |
485 |
|
|
s.prefix = *prefix; |
486 |
|
|
s.prefixlen = prefixlen; |
487 |
|
|
|
488 |
|
|
return (RB_FIND(kroute_tree, &krt, &s)); |
489 |
|
|
} |
490 |
|
|
|
491 |
|
|
static struct kroute_priority * |
492 |
|
|
kroute_find_prio(struct kroute_prefix *kp, uint8_t prio) |
493 |
|
|
{ |
494 |
|
|
struct kroute_priority *kprio; |
495 |
|
|
|
496 |
|
|
/* RTP_ANY here picks the lowest priority node */ |
497 |
|
|
if (prio == RTP_ANY) |
498 |
|
|
return (TAILQ_FIRST(&kp->priorities)); |
499 |
|
|
|
500 |
|
|
TAILQ_FOREACH(kprio, &kp->priorities, entry) |
501 |
|
|
if (kprio->priority == prio) |
502 |
|
|
return (kprio); |
503 |
|
|
|
504 |
|
|
return (NULL); |
505 |
|
|
} |
506 |
|
|
|
507 |
|
|
static struct kroute_node * |
508 |
|
|
kroute_find_gw(struct kroute_priority *kprio, union eigrpd_addr *nh) |
509 |
|
|
{ |
510 |
|
|
struct kroute_node *kn; |
511 |
|
|
|
512 |
|
|
TAILQ_FOREACH(kn, &kprio->nexthops, entry) |
513 |
|
|
if (eigrp_addrcmp(kprio->kp->af, &kn->r.nexthop, nh) == 0) |
514 |
|
|
return (kn); |
515 |
|
|
|
516 |
|
|
return (NULL); |
517 |
|
|
} |
518 |
|
|
|
519 |
|
|
static struct kroute_node * |
520 |
|
|
kroute_insert(struct kroute *kr) |
521 |
|
|
{ |
522 |
|
|
struct kroute_prefix *kp; |
523 |
|
|
struct kroute_priority *kprio, *tmp; |
524 |
|
|
struct kroute_node *kn; |
525 |
|
|
|
526 |
|
|
kp = kroute_find_prefix(kr->af, &kr->prefix, kr->prefixlen); |
527 |
|
|
if (kp == NULL) { |
528 |
|
|
kp = calloc(1, sizeof((*kp))); |
529 |
|
|
if (kp == NULL) |
530 |
|
|
fatal("kroute_insert"); |
531 |
|
|
kp->af = kr->af; |
532 |
|
|
kp->prefix = kr->prefix; |
533 |
|
|
kp->prefixlen = kr->prefixlen; |
534 |
|
|
TAILQ_INIT(&kp->priorities); |
535 |
|
|
RB_INSERT(kroute_tree, &krt, kp); |
536 |
|
|
} |
537 |
|
|
|
538 |
|
|
kprio = kroute_find_prio(kp, kr->priority); |
539 |
|
|
if (kprio == NULL) { |
540 |
|
|
kprio = calloc(1, sizeof(*kprio)); |
541 |
|
|
if (kprio == NULL) |
542 |
|
|
fatal("kroute_insert"); |
543 |
|
|
kprio->kp = kp; |
544 |
|
|
kprio->priority = kr->priority; |
545 |
|
|
TAILQ_INIT(&kprio->nexthops); |
546 |
|
|
|
547 |
|
|
/* lower priorities first */ |
548 |
|
|
TAILQ_FOREACH(tmp, &kp->priorities, entry) |
549 |
|
|
if (tmp->priority > kprio->priority) |
550 |
|
|
break; |
551 |
|
|
if (tmp) |
552 |
|
|
TAILQ_INSERT_BEFORE(tmp, kprio, entry); |
553 |
|
|
else |
554 |
|
|
TAILQ_INSERT_TAIL(&kp->priorities, kprio, entry); |
555 |
|
|
} |
556 |
|
|
|
557 |
|
|
kn = kroute_find_gw(kprio, &kr->nexthop); |
558 |
|
|
if (kn == NULL) { |
559 |
|
|
kn = calloc(1, sizeof(*kn)); |
560 |
|
|
if (kn == NULL) |
561 |
|
|
fatal("kroute_insert"); |
562 |
|
|
kn->kprio = kprio; |
563 |
|
|
kn->r = *kr; |
564 |
|
|
TAILQ_INSERT_TAIL(&kprio->nexthops, kn, entry); |
565 |
|
|
} |
566 |
|
|
|
567 |
|
|
if (!(kr->flags & F_KERNEL)) { |
568 |
|
|
/* don't validate or redistribute eigrp route */ |
569 |
|
|
kr->flags &= ~F_DOWN; |
570 |
|
|
return (kn); |
571 |
|
|
} |
572 |
|
|
|
573 |
|
|
if (kif_validate(kr->ifindex)) |
574 |
|
|
kr->flags &= ~F_DOWN; |
575 |
|
|
else |
576 |
|
|
kr->flags |= F_DOWN; |
577 |
|
|
|
578 |
|
|
kr_redistribute(kp); |
579 |
|
|
return (kn); |
580 |
|
|
} |
581 |
|
|
|
582 |
|
|
static int |
583 |
|
|
kroute_remove(struct kroute *kr) |
584 |
|
|
{ |
585 |
|
|
struct kroute_prefix *kp; |
586 |
|
|
struct kroute_priority *kprio; |
587 |
|
|
struct kroute_node *kn; |
588 |
|
|
|
589 |
|
|
kp = kroute_find_prefix(kr->af, &kr->prefix, kr->prefixlen); |
590 |
|
|
if (kp == NULL) |
591 |
|
|
goto notfound; |
592 |
|
|
kprio = kroute_find_prio(kp, kr->priority); |
593 |
|
|
if (kprio == NULL) |
594 |
|
|
goto notfound; |
595 |
|
|
kn = kroute_find_gw(kprio, &kr->nexthop); |
596 |
|
|
if (kn == NULL) |
597 |
|
|
goto notfound; |
598 |
|
|
|
599 |
|
|
kr_redist_remove(&kn->r); |
600 |
|
|
|
601 |
|
|
TAILQ_REMOVE(&kprio->nexthops, kn, entry); |
602 |
|
|
free(kn); |
603 |
|
|
|
604 |
|
|
if (TAILQ_EMPTY(&kprio->nexthops)) { |
605 |
|
|
TAILQ_REMOVE(&kp->priorities, kprio, entry); |
606 |
|
|
free(kprio); |
607 |
|
|
} |
608 |
|
|
|
609 |
|
|
if (TAILQ_EMPTY(&kp->priorities)) { |
610 |
|
|
if (RB_REMOVE(kroute_tree, &krt, kp) == NULL) { |
611 |
|
|
log_warnx("%s failed for %s/%u", __func__, |
612 |
|
|
log_addr(kr->af, &kr->prefix), kp->prefixlen); |
613 |
|
|
return (-1); |
614 |
|
|
} |
615 |
|
|
free(kp); |
616 |
|
|
} else |
617 |
|
|
kr_redistribute(kp); |
618 |
|
|
|
619 |
|
|
return (0); |
620 |
|
|
|
621 |
|
|
notfound: |
622 |
|
|
log_warnx("%s failed to find %s/%u", __func__, |
623 |
|
|
log_addr(kr->af, &kr->prefix), kr->prefixlen); |
624 |
|
|
return (-1); |
625 |
|
|
} |
626 |
|
|
|
627 |
|
|
static void |
628 |
|
|
kroute_clear(void) |
629 |
|
|
{ |
630 |
|
|
struct kroute_prefix *kp; |
631 |
|
|
struct kroute_priority *kprio; |
632 |
|
|
struct kroute_node *kn; |
633 |
|
|
|
634 |
|
|
while ((kp = RB_MIN(kroute_tree, &krt)) != NULL) { |
635 |
|
|
while ((kprio = TAILQ_FIRST(&kp->priorities)) != NULL) { |
636 |
|
|
while ((kn = TAILQ_FIRST(&kprio->nexthops)) != NULL) { |
637 |
|
|
TAILQ_REMOVE(&kprio->nexthops, kn, entry); |
638 |
|
|
free(kn); |
639 |
|
|
} |
640 |
|
|
TAILQ_REMOVE(&kp->priorities, kprio, entry); |
641 |
|
|
free(kprio); |
642 |
|
|
} |
643 |
|
|
RB_REMOVE(kroute_tree, &krt, kp); |
644 |
|
|
free(kp); |
645 |
|
|
} |
646 |
|
|
} |
647 |
|
|
|
648 |
|
|
static __inline int |
649 |
|
|
kif_compare(struct kif_node *a, struct kif_node *b) |
650 |
|
|
{ |
651 |
|
|
return (b->k.ifindex - a->k.ifindex); |
652 |
|
|
} |
653 |
|
|
|
654 |
|
|
/* tree management */ |
655 |
|
|
static struct kif_node * |
656 |
|
|
kif_find(unsigned short ifindex) |
657 |
|
|
{ |
658 |
|
|
struct kif_node s; |
659 |
|
|
|
660 |
|
|
memset(&s, 0, sizeof(s)); |
661 |
|
|
s.k.ifindex = ifindex; |
662 |
|
|
|
663 |
|
|
return (RB_FIND(kif_tree, &kit, &s)); |
664 |
|
|
} |
665 |
|
|
|
666 |
|
|
struct kif * |
667 |
|
|
kif_findname(char *ifname) |
668 |
|
|
{ |
669 |
|
|
struct kif_node *kif; |
670 |
|
|
|
671 |
|
|
RB_FOREACH(kif, kif_tree, &kit) |
672 |
|
|
if (!strcmp(ifname, kif->k.ifname)) |
673 |
|
|
return (&kif->k); |
674 |
|
|
|
675 |
|
|
return (NULL); |
676 |
|
|
} |
677 |
|
|
|
678 |
|
|
static struct kif_node * |
679 |
|
|
kif_insert(unsigned short ifindex) |
680 |
|
|
{ |
681 |
|
|
struct kif_node *kif; |
682 |
|
|
|
683 |
|
|
if ((kif = calloc(1, sizeof(struct kif_node))) == NULL) |
684 |
|
|
return (NULL); |
685 |
|
|
|
686 |
|
|
kif->k.ifindex = ifindex; |
687 |
|
|
TAILQ_INIT(&kif->addrs); |
688 |
|
|
|
689 |
|
|
if (RB_INSERT(kif_tree, &kit, kif) != NULL) |
690 |
|
|
fatalx("kif_insert: RB_INSERT"); |
691 |
|
|
|
692 |
|
|
return (kif); |
693 |
|
|
} |
694 |
|
|
|
695 |
|
|
static int |
696 |
|
|
kif_remove(struct kif_node *kif) |
697 |
|
|
{ |
698 |
|
|
struct kif_addr *ka; |
699 |
|
|
|
700 |
|
|
if (RB_REMOVE(kif_tree, &kit, kif) == NULL) { |
701 |
|
|
log_warnx("%s failed for inteface %s", __func__, kif->k.ifname); |
702 |
|
|
return (-1); |
703 |
|
|
} |
704 |
|
|
|
705 |
|
|
while ((ka = TAILQ_FIRST(&kif->addrs)) != NULL) { |
706 |
|
|
TAILQ_REMOVE(&kif->addrs, ka, entry); |
707 |
|
|
free(ka); |
708 |
|
|
} |
709 |
|
|
free(kif); |
710 |
|
|
return (0); |
711 |
|
|
} |
712 |
|
|
|
713 |
|
|
void |
714 |
|
|
kif_clear(void) |
715 |
|
|
{ |
716 |
|
|
struct kif_node *kif; |
717 |
|
|
|
718 |
|
|
while ((kif = RB_MIN(kif_tree, &kit)) != NULL) |
719 |
|
|
kif_remove(kif); |
720 |
|
|
} |
721 |
|
|
|
722 |
|
|
static struct kif * |
723 |
|
|
kif_update(unsigned short ifindex, int flags, struct if_data *ifd, |
724 |
|
|
struct sockaddr_dl *sdl) |
725 |
|
|
{ |
726 |
|
|
struct kif_node *kif; |
727 |
|
|
|
728 |
|
|
if ((kif = kif_find(ifindex)) == NULL) { |
729 |
|
|
if ((kif = kif_insert(ifindex)) == NULL) |
730 |
|
|
return (NULL); |
731 |
|
|
kif->k.nh_reachable = (flags & IFF_UP) && |
732 |
|
|
LINK_STATE_IS_UP(ifd->ifi_link_state); |
733 |
|
|
} |
734 |
|
|
|
735 |
|
|
kif->k.flags = flags; |
736 |
|
|
kif->k.link_state = ifd->ifi_link_state; |
737 |
|
|
kif->k.if_type = ifd->ifi_type; |
738 |
|
|
kif->k.baudrate = ifd->ifi_baudrate; |
739 |
|
|
kif->k.mtu = ifd->ifi_mtu; |
740 |
|
|
kif->k.rdomain = ifd->ifi_rdomain; |
741 |
|
|
|
742 |
|
|
if (sdl && sdl->sdl_family == AF_LINK) { |
743 |
|
|
if (sdl->sdl_nlen >= sizeof(kif->k.ifname)) |
744 |
|
|
memcpy(kif->k.ifname, sdl->sdl_data, |
745 |
|
|
sizeof(kif->k.ifname) - 1); |
746 |
|
|
else if (sdl->sdl_nlen > 0) |
747 |
|
|
memcpy(kif->k.ifname, sdl->sdl_data, |
748 |
|
|
sdl->sdl_nlen); |
749 |
|
|
/* string already terminated via calloc() */ |
750 |
|
|
} |
751 |
|
|
|
752 |
|
|
return (&kif->k); |
753 |
|
|
} |
754 |
|
|
|
755 |
|
|
static int |
756 |
|
|
kif_validate(unsigned short ifindex) |
757 |
|
|
{ |
758 |
|
|
struct kif_node *kif; |
759 |
|
|
|
760 |
|
|
if ((kif = kif_find(ifindex)) == NULL) |
761 |
|
|
return (0); |
762 |
|
|
|
763 |
|
|
return (kif->k.nh_reachable); |
764 |
|
|
} |
765 |
|
|
|
766 |
|
|
/* misc */ |
767 |
|
|
static void |
768 |
|
|
protect_lo(void) |
769 |
|
|
{ |
770 |
|
|
struct kroute kr4, kr6; |
771 |
|
|
|
772 |
|
|
/* special protection for 127/8 */ |
773 |
|
|
memset(&kr4, 0, sizeof(kr4)); |
774 |
|
|
kr4.af = AF_INET; |
775 |
|
|
kr4.prefix.v4.s_addr = htonl(INADDR_LOOPBACK & IN_CLASSA_NET); |
776 |
|
|
kr4.prefixlen = 8; |
777 |
|
|
kr4.flags = F_KERNEL|F_CONNECTED; |
778 |
|
|
kroute_insert(&kr4); |
779 |
|
|
|
780 |
|
|
/* special protection for ::1 */ |
781 |
|
|
memset(&kr6, 0, sizeof(kr6)); |
782 |
|
|
kr6.af = AF_INET6; |
783 |
|
|
kr6.prefix.v6 = in6addr_loopback; |
784 |
|
|
kr6.prefixlen = 128; |
785 |
|
|
kr6.flags = F_KERNEL|F_CONNECTED; |
786 |
|
|
kroute_insert(&kr6); |
787 |
|
|
} |
788 |
|
|
|
789 |
|
|
/* misc */ |
790 |
|
|
static uint8_t |
791 |
|
|
prefixlen_classful(in_addr_t ina) |
792 |
|
|
{ |
793 |
|
|
/* it hurt to write this. */ |
794 |
|
|
|
795 |
|
|
if (ina >= 0xf0000000U) /* class E */ |
796 |
|
|
return (32); |
797 |
|
|
else if (ina >= 0xe0000000U) /* class D */ |
798 |
|
|
return (4); |
799 |
|
|
else if (ina >= 0xc0000000U) /* class C */ |
800 |
|
|
return (24); |
801 |
|
|
else if (ina >= 0x80000000U) /* class B */ |
802 |
|
|
return (16); |
803 |
|
|
else /* class A */ |
804 |
|
|
return (8); |
805 |
|
|
} |
806 |
|
|
|
807 |
|
|
#define ROUNDUP(a) \ |
808 |
|
|
((a) > 0 ? (1 + (((a) - 1) | (sizeof(long) - 1))) : sizeof(long)) |
809 |
|
|
|
810 |
|
|
static void |
811 |
|
|
get_rtaddrs(int addrs, struct sockaddr *sa, struct sockaddr **rti_info) |
812 |
|
|
{ |
813 |
|
|
int i; |
814 |
|
|
|
815 |
|
|
for (i = 0; i < RTAX_MAX; i++) { |
816 |
|
|
if (addrs & (1 << i)) { |
817 |
|
|
rti_info[i] = sa; |
818 |
|
|
sa = (struct sockaddr *)((char *)(sa) + |
819 |
|
|
ROUNDUP(sa->sa_len)); |
820 |
|
|
} else |
821 |
|
|
rti_info[i] = NULL; |
822 |
|
|
} |
823 |
|
|
} |
824 |
|
|
|
825 |
|
|
static void |
826 |
|
|
if_change(unsigned short ifindex, int flags, struct if_data *ifd, |
827 |
|
|
struct sockaddr_dl *sdl) |
828 |
|
|
{ |
829 |
|
|
struct kroute_prefix *kp; |
830 |
|
|
struct kroute_priority *kprio; |
831 |
|
|
struct kroute_node *kn; |
832 |
|
|
struct kif *kif; |
833 |
|
|
uint8_t reachable; |
834 |
|
|
|
835 |
|
|
if ((kif = kif_update(ifindex, flags, ifd, sdl)) == NULL) { |
836 |
|
|
log_warn("%s: kif_update(%u)", __func__, ifindex); |
837 |
|
|
return; |
838 |
|
|
} |
839 |
|
|
|
840 |
|
|
reachable = (kif->flags & IFF_UP) && |
841 |
|
|
LINK_STATE_IS_UP(kif->link_state); |
842 |
|
|
|
843 |
|
|
if (reachable == kif->nh_reachable) |
844 |
|
|
return; /* nothing changed wrt nexthop validity */ |
845 |
|
|
|
846 |
|
|
kif->nh_reachable = reachable; |
847 |
|
|
|
848 |
|
|
/* notify eigrpe about link state */ |
849 |
|
|
main_imsg_compose_eigrpe(IMSG_IFINFO, 0, kif, sizeof(struct kif)); |
850 |
|
|
|
851 |
|
|
/* notify rde about link going down */ |
852 |
|
|
if (!kif->nh_reachable) |
853 |
|
|
main_imsg_compose_rde(IMSG_IFDOWN, 0, kif, sizeof(struct kif)); |
854 |
|
|
|
855 |
|
|
/* update redistribute list */ |
856 |
|
|
RB_FOREACH(kp, kroute_tree, &krt) { |
857 |
|
|
TAILQ_FOREACH(kprio, &kp->priorities, entry) { |
858 |
|
|
TAILQ_FOREACH(kn, &kprio->nexthops, entry) { |
859 |
|
|
if (kn->r.ifindex != ifindex) |
860 |
|
|
continue; |
861 |
|
|
|
862 |
|
|
if (reachable) |
863 |
|
|
kn->r.flags &= ~F_DOWN; |
864 |
|
|
else |
865 |
|
|
kn->r.flags |= F_DOWN; |
866 |
|
|
} |
867 |
|
|
} |
868 |
|
|
kr_redistribute(kp); |
869 |
|
|
} |
870 |
|
|
} |
871 |
|
|
|
872 |
|
|
static void |
873 |
|
|
if_newaddr(unsigned short ifindex, struct sockaddr *ifa, struct sockaddr *mask, |
874 |
|
|
struct sockaddr *brd) |
875 |
|
|
{ |
876 |
|
|
struct kif_node *kif; |
877 |
|
|
struct sockaddr_in *ifa4, *mask4, *brd4; |
878 |
|
|
struct sockaddr_in6 *ifa6, *mask6, *brd6; |
879 |
|
|
struct kif_addr *ka; |
880 |
|
|
|
881 |
|
|
if (ifa == NULL) |
882 |
|
|
return; |
883 |
|
|
if ((kif = kif_find(ifindex)) == NULL) { |
884 |
|
|
log_warnx("%s: corresponding if %d not found", __func__, |
885 |
|
|
ifindex); |
886 |
|
|
return; |
887 |
|
|
} |
888 |
|
|
|
889 |
|
|
switch (ifa->sa_family) { |
890 |
|
|
case AF_INET: |
891 |
|
|
ifa4 = (struct sockaddr_in *) ifa; |
892 |
|
|
mask4 = (struct sockaddr_in *) mask; |
893 |
|
|
brd4 = (struct sockaddr_in *) brd; |
894 |
|
|
|
895 |
|
|
/* filter out unwanted addresses */ |
896 |
|
|
if (bad_addr_v4(ifa4->sin_addr)) |
897 |
|
|
return; |
898 |
|
|
|
899 |
|
|
if ((ka = calloc(1, sizeof(struct kif_addr))) == NULL) |
900 |
|
|
fatal("if_newaddr"); |
901 |
|
|
ka->a.addr.v4 = ifa4->sin_addr; |
902 |
|
|
if (mask4) |
903 |
|
|
ka->a.prefixlen = |
904 |
|
|
mask2prefixlen(mask4->sin_addr.s_addr); |
905 |
|
|
if (brd4) |
906 |
|
|
ka->a.dstbrd.v4 = brd4->sin_addr; |
907 |
|
|
break; |
908 |
|
|
case AF_INET6: |
909 |
|
|
ifa6 = (struct sockaddr_in6 *) ifa; |
910 |
|
|
mask6 = (struct sockaddr_in6 *) mask; |
911 |
|
|
brd6 = (struct sockaddr_in6 *) brd; |
912 |
|
|
|
913 |
|
|
/* We only care about link-local and global-scope. */ |
914 |
|
|
if (bad_addr_v6(&ifa6->sin6_addr)) |
915 |
|
|
return; |
916 |
|
|
|
917 |
|
|
clearscope(&ifa6->sin6_addr); |
918 |
|
|
|
919 |
|
|
if ((ka = calloc(1, sizeof(struct kif_addr))) == NULL) |
920 |
|
|
fatal("if_newaddr"); |
921 |
|
|
ka->a.addr.v6 = ifa6->sin6_addr; |
922 |
|
|
if (mask6) |
923 |
|
|
ka->a.prefixlen = mask2prefixlen6(mask6); |
924 |
|
|
if (brd6) |
925 |
|
|
ka->a.dstbrd.v6 = brd6->sin6_addr; |
926 |
|
|
break; |
927 |
|
|
default: |
928 |
|
|
return; |
929 |
|
|
} |
930 |
|
|
|
931 |
|
|
ka->a.ifindex = ifindex; |
932 |
|
|
ka->a.af = ifa->sa_family; |
933 |
|
|
TAILQ_INSERT_TAIL(&kif->addrs, ka, entry); |
934 |
|
|
|
935 |
|
|
/* notify eigrpe about new address */ |
936 |
|
|
main_imsg_compose_eigrpe(IMSG_NEWADDR, 0, &ka->a, sizeof(ka->a)); |
937 |
|
|
} |
938 |
|
|
|
939 |
|
|
static void |
940 |
|
|
if_deladdr(unsigned short ifindex, struct sockaddr *ifa, struct sockaddr *mask, |
941 |
|
|
struct sockaddr *brd) |
942 |
|
|
{ |
943 |
|
|
struct kif_node *kif; |
944 |
|
|
struct sockaddr_in *ifa4, *mask4, *brd4; |
945 |
|
|
struct sockaddr_in6 *ifa6, *mask6, *brd6; |
946 |
|
|
struct kaddr k; |
947 |
|
|
struct kif_addr *ka, *nka; |
948 |
|
|
|
949 |
|
|
if (ifa == NULL) |
950 |
|
|
return; |
951 |
|
|
if ((kif = kif_find(ifindex)) == NULL) { |
952 |
|
|
log_warnx("%s: corresponding if %d not found", __func__, |
953 |
|
|
ifindex); |
954 |
|
|
return; |
955 |
|
|
} |
956 |
|
|
|
957 |
|
|
memset(&k, 0, sizeof(k)); |
958 |
|
|
k.af = ifa->sa_family; |
959 |
|
|
switch (ifa->sa_family) { |
960 |
|
|
case AF_INET: |
961 |
|
|
ifa4 = (struct sockaddr_in *) ifa; |
962 |
|
|
mask4 = (struct sockaddr_in *) mask; |
963 |
|
|
brd4 = (struct sockaddr_in *) brd; |
964 |
|
|
|
965 |
|
|
/* filter out unwanted addresses */ |
966 |
|
|
if (bad_addr_v4(ifa4->sin_addr)) |
967 |
|
|
return; |
968 |
|
|
|
969 |
|
|
k.addr.v4 = ifa4->sin_addr; |
970 |
|
|
if (mask4) |
971 |
|
|
k.prefixlen = mask2prefixlen(mask4->sin_addr.s_addr); |
972 |
|
|
if (brd4) |
973 |
|
|
k.dstbrd.v4 = brd4->sin_addr; |
974 |
|
|
break; |
975 |
|
|
case AF_INET6: |
976 |
|
|
ifa6 = (struct sockaddr_in6 *) ifa; |
977 |
|
|
mask6 = (struct sockaddr_in6 *) mask; |
978 |
|
|
brd6 = (struct sockaddr_in6 *) brd; |
979 |
|
|
|
980 |
|
|
/* We only care about link-local and global-scope. */ |
981 |
|
|
if (bad_addr_v6(&ifa6->sin6_addr)) |
982 |
|
|
return; |
983 |
|
|
|
984 |
|
|
clearscope(&ifa6->sin6_addr); |
985 |
|
|
|
986 |
|
|
k.addr.v6 = ifa6->sin6_addr; |
987 |
|
|
if (mask6) |
988 |
|
|
k.prefixlen = mask2prefixlen6(mask6); |
989 |
|
|
if (brd6) |
990 |
|
|
k.dstbrd.v6 = brd6->sin6_addr; |
991 |
|
|
break; |
992 |
|
|
default: |
993 |
|
|
return; |
994 |
|
|
} |
995 |
|
|
|
996 |
|
|
for (ka = TAILQ_FIRST(&kif->addrs); ka != NULL; ka = nka) { |
997 |
|
|
nka = TAILQ_NEXT(ka, entry); |
998 |
|
|
|
999 |
|
|
if (ka->a.af != k.af || |
1000 |
|
|
ka->a.prefixlen != k.prefixlen || |
1001 |
|
|
eigrp_addrcmp(ka->a.af, &ka->a.addr, &k.addr) || |
1002 |
|
|
eigrp_addrcmp(ka->a.af, &ka->a.dstbrd, &k.dstbrd)) |
1003 |
|
|
continue; |
1004 |
|
|
|
1005 |
|
|
/* notify eigrpe about removed address */ |
1006 |
|
|
main_imsg_compose_eigrpe(IMSG_DELADDR, 0, &ka->a, |
1007 |
|
|
sizeof(ka->a)); |
1008 |
|
|
TAILQ_REMOVE(&kif->addrs, ka, entry); |
1009 |
|
|
free(ka); |
1010 |
|
|
return; |
1011 |
|
|
} |
1012 |
|
|
} |
1013 |
|
|
|
1014 |
|
|
static void |
1015 |
|
|
if_announce(void *msg) |
1016 |
|
|
{ |
1017 |
|
|
struct if_announcemsghdr *ifan; |
1018 |
|
|
struct kif_node *kif; |
1019 |
|
|
|
1020 |
|
|
ifan = msg; |
1021 |
|
|
|
1022 |
|
|
switch (ifan->ifan_what) { |
1023 |
|
|
case IFAN_ARRIVAL: |
1024 |
|
|
kif = kif_insert(ifan->ifan_index); |
1025 |
|
|
if (kif) |
1026 |
|
|
strlcpy(kif->k.ifname, ifan->ifan_name, |
1027 |
|
|
sizeof(kif->k.ifname)); |
1028 |
|
|
break; |
1029 |
|
|
case IFAN_DEPARTURE: |
1030 |
|
|
kif = kif_find(ifan->ifan_index); |
1031 |
|
|
if (kif) |
1032 |
|
|
kif_remove(kif); |
1033 |
|
|
break; |
1034 |
|
|
} |
1035 |
|
|
} |
1036 |
|
|
|
1037 |
|
|
/* rtsock */ |
1038 |
|
|
static int |
1039 |
|
|
send_rtmsg_v4(int fd, int action, struct kroute *kr) |
1040 |
|
|
{ |
1041 |
|
|
struct iovec iov[5]; |
1042 |
|
|
struct rt_msghdr hdr; |
1043 |
|
|
struct sockaddr_in prefix; |
1044 |
|
|
struct sockaddr_in nexthop; |
1045 |
|
|
struct sockaddr_in mask; |
1046 |
|
|
int iovcnt = 0; |
1047 |
|
|
|
1048 |
|
|
if (kr_state.fib_sync == 0) |
1049 |
|
|
return (0); |
1050 |
|
|
|
1051 |
|
|
/* initialize header */ |
1052 |
|
|
memset(&hdr, 0, sizeof(hdr)); |
1053 |
|
|
hdr.rtm_version = RTM_VERSION; |
1054 |
|
|
hdr.rtm_type = action; |
1055 |
|
|
hdr.rtm_priority = kr->priority; |
1056 |
|
|
hdr.rtm_tableid = kr_state.rdomain; /* rtableid */ |
1057 |
|
|
if (action == RTM_CHANGE) |
1058 |
|
|
hdr.rtm_fmask = RTF_REJECT|RTF_BLACKHOLE; |
1059 |
|
|
else |
1060 |
|
|
hdr.rtm_flags = RTF_MPATH; |
1061 |
|
|
if (kr->flags & F_BLACKHOLE) |
1062 |
|
|
hdr.rtm_flags |= RTF_BLACKHOLE; |
1063 |
|
|
hdr.rtm_seq = kr_state.rtseq++; /* overflow doesn't matter */ |
1064 |
|
|
hdr.rtm_msglen = sizeof(hdr); |
1065 |
|
|
/* adjust iovec */ |
1066 |
|
|
iov[iovcnt].iov_base = &hdr; |
1067 |
|
|
iov[iovcnt++].iov_len = sizeof(hdr); |
1068 |
|
|
|
1069 |
|
|
memset(&prefix, 0, sizeof(prefix)); |
1070 |
|
|
prefix.sin_len = sizeof(prefix); |
1071 |
|
|
prefix.sin_family = AF_INET; |
1072 |
|
|
prefix.sin_addr = kr->prefix.v4; |
1073 |
|
|
/* adjust header */ |
1074 |
|
|
hdr.rtm_addrs |= RTA_DST; |
1075 |
|
|
hdr.rtm_msglen += sizeof(prefix); |
1076 |
|
|
/* adjust iovec */ |
1077 |
|
|
iov[iovcnt].iov_base = &prefix; |
1078 |
|
|
iov[iovcnt++].iov_len = sizeof(prefix); |
1079 |
|
|
|
1080 |
|
|
if (kr->nexthop.v4.s_addr != 0) { |
1081 |
|
|
memset(&nexthop, 0, sizeof(nexthop)); |
1082 |
|
|
nexthop.sin_len = sizeof(nexthop); |
1083 |
|
|
nexthop.sin_family = AF_INET; |
1084 |
|
|
nexthop.sin_addr = kr->nexthop.v4; |
1085 |
|
|
/* adjust header */ |
1086 |
|
|
hdr.rtm_flags |= RTF_GATEWAY; |
1087 |
|
|
hdr.rtm_addrs |= RTA_GATEWAY; |
1088 |
|
|
hdr.rtm_msglen += sizeof(nexthop); |
1089 |
|
|
/* adjust iovec */ |
1090 |
|
|
iov[iovcnt].iov_base = &nexthop; |
1091 |
|
|
iov[iovcnt++].iov_len = sizeof(nexthop); |
1092 |
|
|
} |
1093 |
|
|
|
1094 |
|
|
memset(&mask, 0, sizeof(mask)); |
1095 |
|
|
mask.sin_len = sizeof(mask); |
1096 |
|
|
mask.sin_family = AF_INET; |
1097 |
|
|
mask.sin_addr.s_addr = prefixlen2mask(kr->prefixlen); |
1098 |
|
|
/* adjust header */ |
1099 |
|
|
hdr.rtm_addrs |= RTA_NETMASK; |
1100 |
|
|
hdr.rtm_msglen += sizeof(mask); |
1101 |
|
|
/* adjust iovec */ |
1102 |
|
|
iov[iovcnt].iov_base = &mask; |
1103 |
|
|
iov[iovcnt++].iov_len = sizeof(mask); |
1104 |
|
|
|
1105 |
|
|
retry: |
1106 |
|
|
if (writev(fd, iov, iovcnt) == -1) { |
1107 |
|
|
if (errno == ESRCH) { |
1108 |
|
|
if (hdr.rtm_type == RTM_CHANGE) { |
1109 |
|
|
hdr.rtm_type = RTM_ADD; |
1110 |
|
|
goto retry; |
1111 |
|
|
} else if (hdr.rtm_type == RTM_DELETE) { |
1112 |
|
|
log_info("route %s/%u vanished before delete", |
1113 |
|
|
inet_ntoa(kr->prefix.v4), |
1114 |
|
|
kr->prefixlen); |
1115 |
|
|
return (0); |
1116 |
|
|
} |
1117 |
|
|
} |
1118 |
|
|
log_warn("%s: action %u, prefix %s/%u", __func__, hdr.rtm_type, |
1119 |
|
|
inet_ntoa(kr->prefix.v4), kr->prefixlen); |
1120 |
|
|
return (0); |
1121 |
|
|
} |
1122 |
|
|
|
1123 |
|
|
return (0); |
1124 |
|
|
} |
1125 |
|
|
|
1126 |
|
|
static int |
1127 |
|
|
send_rtmsg_v6(int fd, int action, struct kroute *kr) |
1128 |
|
|
{ |
1129 |
|
|
struct iovec iov[5]; |
1130 |
|
|
struct rt_msghdr hdr; |
1131 |
|
|
struct pad { |
1132 |
|
|
struct sockaddr_in6 addr; |
1133 |
|
|
char pad[sizeof(long)]; /* thank you IPv6 */ |
1134 |
|
|
} prefix, nexthop, mask; |
1135 |
|
|
int iovcnt = 0; |
1136 |
|
|
|
1137 |
|
|
if (kr_state.fib_sync == 0) |
1138 |
|
|
return (0); |
1139 |
|
|
|
1140 |
|
|
/* initialize header */ |
1141 |
|
|
memset(&hdr, 0, sizeof(hdr)); |
1142 |
|
|
hdr.rtm_version = RTM_VERSION; |
1143 |
|
|
hdr.rtm_type = action; |
1144 |
|
|
hdr.rtm_priority = kr->priority; |
1145 |
|
|
hdr.rtm_tableid = kr_state.rdomain; /* rtableid */ |
1146 |
|
|
if (action == RTM_CHANGE) |
1147 |
|
|
hdr.rtm_fmask = RTF_REJECT|RTF_BLACKHOLE; |
1148 |
|
|
else |
1149 |
|
|
hdr.rtm_flags = RTF_MPATH; |
1150 |
|
|
hdr.rtm_seq = kr_state.rtseq++; /* overflow doesn't matter */ |
1151 |
|
|
hdr.rtm_msglen = sizeof(hdr); |
1152 |
|
|
/* adjust iovec */ |
1153 |
|
|
iov[iovcnt].iov_base = &hdr; |
1154 |
|
|
iov[iovcnt++].iov_len = sizeof(hdr); |
1155 |
|
|
|
1156 |
|
|
memset(&prefix, 0, sizeof(prefix)); |
1157 |
|
|
prefix.addr.sin6_len = sizeof(struct sockaddr_in6); |
1158 |
|
|
prefix.addr.sin6_family = AF_INET6; |
1159 |
|
|
prefix.addr.sin6_addr = kr->prefix.v6; |
1160 |
|
|
/* adjust header */ |
1161 |
|
|
hdr.rtm_addrs |= RTA_DST; |
1162 |
|
|
hdr.rtm_msglen += ROUNDUP(sizeof(struct sockaddr_in6)); |
1163 |
|
|
/* adjust iovec */ |
1164 |
|
|
iov[iovcnt].iov_base = &prefix; |
1165 |
|
|
iov[iovcnt++].iov_len = ROUNDUP(sizeof(struct sockaddr_in6)); |
1166 |
|
|
|
1167 |
|
|
if (!IN6_IS_ADDR_UNSPECIFIED(&kr->nexthop.v6)) { |
1168 |
|
|
memset(&nexthop, 0, sizeof(nexthop)); |
1169 |
|
|
nexthop.addr.sin6_len = sizeof(struct sockaddr_in6); |
1170 |
|
|
nexthop.addr.sin6_family = AF_INET6; |
1171 |
|
|
nexthop.addr.sin6_addr = kr->nexthop.v6; |
1172 |
|
|
nexthop.addr.sin6_scope_id = kr->ifindex; |
1173 |
|
|
embedscope(&nexthop.addr); |
1174 |
|
|
|
1175 |
|
|
/* adjust header */ |
1176 |
|
|
hdr.rtm_flags |= RTF_GATEWAY; |
1177 |
|
|
hdr.rtm_addrs |= RTA_GATEWAY; |
1178 |
|
|
hdr.rtm_msglen += ROUNDUP(sizeof(struct sockaddr_in6)); |
1179 |
|
|
/* adjust iovec */ |
1180 |
|
|
iov[iovcnt].iov_base = &nexthop; |
1181 |
|
|
iov[iovcnt++].iov_len = ROUNDUP(sizeof(struct sockaddr_in6)); |
1182 |
|
|
} |
1183 |
|
|
|
1184 |
|
|
memset(&mask, 0, sizeof(mask)); |
1185 |
|
|
mask.addr.sin6_len = sizeof(struct sockaddr_in6); |
1186 |
|
|
mask.addr.sin6_family = AF_INET6; |
1187 |
|
|
mask.addr.sin6_addr = *prefixlen2mask6(kr->prefixlen); |
1188 |
|
|
/* adjust header */ |
1189 |
|
|
if (kr->prefixlen == 128) |
1190 |
|
|
hdr.rtm_flags |= RTF_HOST; |
1191 |
|
|
hdr.rtm_addrs |= RTA_NETMASK; |
1192 |
|
|
hdr.rtm_msglen += ROUNDUP(sizeof(struct sockaddr_in6)); |
1193 |
|
|
/* adjust iovec */ |
1194 |
|
|
iov[iovcnt].iov_base = &mask; |
1195 |
|
|
iov[iovcnt++].iov_len = ROUNDUP(sizeof(struct sockaddr_in6)); |
1196 |
|
|
|
1197 |
|
|
retry: |
1198 |
|
|
if (writev(fd, iov, iovcnt) == -1) { |
1199 |
|
|
if (errno == ESRCH) { |
1200 |
|
|
if (hdr.rtm_type == RTM_CHANGE) { |
1201 |
|
|
hdr.rtm_type = RTM_ADD; |
1202 |
|
|
goto retry; |
1203 |
|
|
} else if (hdr.rtm_type == RTM_DELETE) { |
1204 |
|
|
log_info("route %s/%u vanished before delete", |
1205 |
|
|
log_in6addr(&kr->prefix.v6), |
1206 |
|
|
kr->prefixlen); |
1207 |
|
|
return (0); |
1208 |
|
|
} |
1209 |
|
|
} |
1210 |
|
|
log_warn("%s: action %u, prefix %s/%u", __func__, hdr.rtm_type, |
1211 |
|
|
log_in6addr(&kr->prefix.v6), kr->prefixlen); |
1212 |
|
|
return (0); |
1213 |
|
|
} |
1214 |
|
|
|
1215 |
|
|
return (0); |
1216 |
|
|
} |
1217 |
|
|
|
1218 |
|
|
static int |
1219 |
|
|
send_rtmsg(int fd, int action, struct kroute *kr) |
1220 |
|
|
{ |
1221 |
|
|
switch (kr->af) { |
1222 |
|
|
case AF_INET: |
1223 |
|
|
return (send_rtmsg_v4(fd, action, kr)); |
1224 |
|
|
case AF_INET6: |
1225 |
|
|
return (send_rtmsg_v6(fd, action, kr)); |
1226 |
|
|
default: |
1227 |
|
|
break; |
1228 |
|
|
} |
1229 |
|
|
|
1230 |
|
|
return (-1); |
1231 |
|
|
} |
1232 |
|
|
|
1233 |
|
|
static int |
1234 |
|
|
fetchtable(void) |
1235 |
|
|
{ |
1236 |
|
|
size_t len; |
1237 |
|
|
int mib[7]; |
1238 |
|
|
char *buf; |
1239 |
|
|
int rv; |
1240 |
|
|
|
1241 |
|
|
mib[0] = CTL_NET; |
1242 |
|
|
mib[1] = PF_ROUTE; |
1243 |
|
|
mib[2] = 0; |
1244 |
|
|
mib[3] = 0; |
1245 |
|
|
mib[4] = NET_RT_DUMP; |
1246 |
|
|
mib[5] = 0; |
1247 |
|
|
mib[6] = kr_state.rdomain; /* rtableid */ |
1248 |
|
|
|
1249 |
|
|
if (sysctl(mib, 7, NULL, &len, NULL, 0) == -1) { |
1250 |
|
|
log_warn("sysctl"); |
1251 |
|
|
return (-1); |
1252 |
|
|
} |
1253 |
|
|
if ((buf = malloc(len)) == NULL) { |
1254 |
|
|
log_warn("%s", __func__); |
1255 |
|
|
return (-1); |
1256 |
|
|
} |
1257 |
|
|
if (sysctl(mib, 7, buf, &len, NULL, 0) == -1) { |
1258 |
|
|
log_warn("sysctl"); |
1259 |
|
|
free(buf); |
1260 |
|
|
return (-1); |
1261 |
|
|
} |
1262 |
|
|
|
1263 |
|
|
rv = rtmsg_process(buf, len); |
1264 |
|
|
free(buf); |
1265 |
|
|
|
1266 |
|
|
return (rv); |
1267 |
|
|
} |
1268 |
|
|
|
1269 |
|
|
static int |
1270 |
|
|
fetchifs(void) |
1271 |
|
|
{ |
1272 |
|
|
size_t len; |
1273 |
|
|
int mib[6]; |
1274 |
|
|
char *buf; |
1275 |
|
|
int rv; |
1276 |
|
|
|
1277 |
|
|
mib[0] = CTL_NET; |
1278 |
|
|
mib[1] = PF_ROUTE; |
1279 |
|
|
mib[2] = 0; |
1280 |
|
|
mib[3] = 0; /* wildcard */ |
1281 |
|
|
mib[4] = NET_RT_IFLIST; |
1282 |
|
|
mib[5] = 0; |
1283 |
|
|
|
1284 |
|
|
if (sysctl(mib, 6, NULL, &len, NULL, 0) == -1) { |
1285 |
|
|
log_warn("sysctl"); |
1286 |
|
|
return (-1); |
1287 |
|
|
} |
1288 |
|
|
if ((buf = malloc(len)) == NULL) { |
1289 |
|
|
log_warn("%s", __func__); |
1290 |
|
|
return (-1); |
1291 |
|
|
} |
1292 |
|
|
if (sysctl(mib, 6, buf, &len, NULL, 0) == -1) { |
1293 |
|
|
log_warn("sysctl"); |
1294 |
|
|
free(buf); |
1295 |
|
|
return (-1); |
1296 |
|
|
} |
1297 |
|
|
|
1298 |
|
|
rv = rtmsg_process(buf, len); |
1299 |
|
|
free(buf); |
1300 |
|
|
|
1301 |
|
|
return (rv); |
1302 |
|
|
} |
1303 |
|
|
|
1304 |
|
|
static int |
1305 |
|
|
dispatch_rtmsg(void) |
1306 |
|
|
{ |
1307 |
|
|
char buf[RT_BUF_SIZE]; |
1308 |
|
|
ssize_t n; |
1309 |
|
|
|
1310 |
|
|
if ((n = read(kr_state.fd, &buf, sizeof(buf))) == -1) { |
1311 |
|
|
if (errno == EAGAIN || errno == EINTR) |
1312 |
|
|
return (0); |
1313 |
|
|
log_warn("%s: read error", __func__); |
1314 |
|
|
return (-1); |
1315 |
|
|
} |
1316 |
|
|
|
1317 |
|
|
if (n == 0) { |
1318 |
|
|
log_warnx("routing socket closed"); |
1319 |
|
|
return (-1); |
1320 |
|
|
} |
1321 |
|
|
|
1322 |
|
|
return (rtmsg_process(buf, n)); |
1323 |
|
|
} |
1324 |
|
|
|
1325 |
|
|
static int |
1326 |
|
|
rtmsg_process(char *buf, size_t len) |
1327 |
|
|
{ |
1328 |
|
|
struct rt_msghdr *rtm; |
1329 |
|
|
struct if_msghdr ifm; |
1330 |
|
|
struct ifa_msghdr *ifam; |
1331 |
|
|
struct sockaddr *sa, *rti_info[RTAX_MAX]; |
1332 |
|
|
size_t offset; |
1333 |
|
|
char *next; |
1334 |
|
|
|
1335 |
|
|
for (offset = 0; offset < len; offset += rtm->rtm_msglen) { |
1336 |
|
|
next = buf + offset; |
1337 |
|
|
rtm = (struct rt_msghdr *)next; |
1338 |
|
|
if (len < offset + sizeof(unsigned short) || |
1339 |
|
|
len < offset + rtm->rtm_msglen) |
1340 |
|
|
fatalx("rtmsg_process: partial rtm in buffer"); |
1341 |
|
|
if (rtm->rtm_version != RTM_VERSION) |
1342 |
|
|
continue; |
1343 |
|
|
|
1344 |
|
|
sa = (struct sockaddr *)(next + rtm->rtm_hdrlen); |
1345 |
|
|
get_rtaddrs(rtm->rtm_addrs, sa, rti_info); |
1346 |
|
|
|
1347 |
|
|
switch (rtm->rtm_type) { |
1348 |
|
|
case RTM_ADD: |
1349 |
|
|
case RTM_GET: |
1350 |
|
|
case RTM_CHANGE: |
1351 |
|
|
case RTM_DELETE: |
1352 |
|
|
if (rtm->rtm_errno) /* failed attempts... */ |
1353 |
|
|
continue; |
1354 |
|
|
|
1355 |
|
|
if (rtm->rtm_tableid != kr_state.rdomain) |
1356 |
|
|
continue; |
1357 |
|
|
|
1358 |
|
|
if (rtm->rtm_type == RTM_GET && |
1359 |
|
|
rtm->rtm_pid != kr_state.pid) |
1360 |
|
|
continue; |
1361 |
|
|
|
1362 |
|
|
/* Skip ARP/ND cache and broadcast routes. */ |
1363 |
|
|
if (rtm->rtm_flags & (RTF_LLINFO|RTF_BROADCAST)) |
1364 |
|
|
continue; |
1365 |
|
|
|
1366 |
|
|
if (rtmsg_process_route(rtm, rti_info) == -1) |
1367 |
|
|
return (-1); |
1368 |
|
|
} |
1369 |
|
|
|
1370 |
|
|
switch (rtm->rtm_type) { |
1371 |
|
|
case RTM_IFINFO: |
1372 |
|
|
memcpy(&ifm, next, sizeof(ifm)); |
1373 |
|
|
if_change(ifm.ifm_index, ifm.ifm_flags, &ifm.ifm_data, |
1374 |
|
|
(struct sockaddr_dl *)rti_info[RTAX_IFP]); |
1375 |
|
|
break; |
1376 |
|
|
case RTM_NEWADDR: |
1377 |
|
|
ifam = (struct ifa_msghdr *)rtm; |
1378 |
|
|
if ((ifam->ifam_addrs & (RTA_NETMASK | RTA_IFA | |
1379 |
|
|
RTA_BRD)) == 0) |
1380 |
|
|
break; |
1381 |
|
|
|
1382 |
|
|
if_newaddr(ifam->ifam_index, |
1383 |
|
|
(struct sockaddr *)rti_info[RTAX_IFA], |
1384 |
|
|
(struct sockaddr *)rti_info[RTAX_NETMASK], |
1385 |
|
|
(struct sockaddr *)rti_info[RTAX_BRD]); |
1386 |
|
|
break; |
1387 |
|
|
case RTM_DELADDR: |
1388 |
|
|
ifam = (struct ifa_msghdr *)rtm; |
1389 |
|
|
if ((ifam->ifam_addrs & (RTA_NETMASK | RTA_IFA | |
1390 |
|
|
RTA_BRD)) == 0) |
1391 |
|
|
break; |
1392 |
|
|
|
1393 |
|
|
if_deladdr(ifam->ifam_index, |
1394 |
|
|
(struct sockaddr *)rti_info[RTAX_IFA], |
1395 |
|
|
(struct sockaddr *)rti_info[RTAX_NETMASK], |
1396 |
|
|
(struct sockaddr *)rti_info[RTAX_BRD]); |
1397 |
|
|
break; |
1398 |
|
|
case RTM_IFANNOUNCE: |
1399 |
|
|
if_announce(next); |
1400 |
|
|
break; |
1401 |
|
|
default: |
1402 |
|
|
/* ignore for now */ |
1403 |
|
|
break; |
1404 |
|
|
} |
1405 |
|
|
} |
1406 |
|
|
|
1407 |
|
|
return (offset); |
1408 |
|
|
} |
1409 |
|
|
|
1410 |
|
|
static int |
1411 |
|
|
rtmsg_process_route(struct rt_msghdr *rtm, struct sockaddr *rti_info[RTAX_MAX]) |
1412 |
|
|
{ |
1413 |
|
|
struct sockaddr *sa; |
1414 |
|
|
struct sockaddr_in *sa_in; |
1415 |
|
|
struct sockaddr_in6 *sa_in6; |
1416 |
|
|
struct kroute kr; |
1417 |
|
|
struct kroute_prefix *kp; |
1418 |
|
|
struct kroute_priority *kprio; |
1419 |
|
|
struct kroute_node *kn; |
1420 |
|
|
|
1421 |
|
|
if ((sa = rti_info[RTAX_DST]) == NULL) |
1422 |
|
|
return (-1); |
1423 |
|
|
|
1424 |
|
|
memset(&kr, 0, sizeof(kr)); |
1425 |
|
|
kr.af = sa->sa_family; |
1426 |
|
|
switch (kr.af) { |
1427 |
|
|
case AF_INET: |
1428 |
|
|
kr.prefix.v4 = ((struct sockaddr_in *)sa)->sin_addr; |
1429 |
|
|
sa_in = (struct sockaddr_in *) rti_info[RTAX_NETMASK]; |
1430 |
|
|
if (sa_in != NULL && sa_in->sin_len != 0) |
1431 |
|
|
kr.prefixlen = mask2prefixlen(sa_in->sin_addr.s_addr); |
1432 |
|
|
else if (rtm->rtm_flags & RTF_HOST) |
1433 |
|
|
kr.prefixlen = 32; |
1434 |
|
|
else if (kr.prefix.v4.s_addr == INADDR_ANY) |
1435 |
|
|
kr.prefixlen = 0; |
1436 |
|
|
else |
1437 |
|
|
kr.prefixlen = prefixlen_classful(kr.prefix.v4.s_addr); |
1438 |
|
|
break; |
1439 |
|
|
case AF_INET6: |
1440 |
|
|
kr.prefix.v6 = ((struct sockaddr_in6 *)sa)->sin6_addr; |
1441 |
|
|
sa_in6 = (struct sockaddr_in6 *)rti_info[RTAX_NETMASK]; |
1442 |
|
|
if (sa_in6 != NULL && sa_in6->sin6_len != 0) |
1443 |
|
|
kr.prefixlen = mask2prefixlen6(sa_in6); |
1444 |
|
|
else if (rtm->rtm_flags & RTF_HOST) |
1445 |
|
|
kr.prefixlen = 128; |
1446 |
|
|
else if (IN6_IS_ADDR_UNSPECIFIED(&kr.prefix.v6)) |
1447 |
|
|
kr.prefixlen = 0; |
1448 |
|
|
else |
1449 |
|
|
fatalx("in6 net addr without netmask"); |
1450 |
|
|
break; |
1451 |
|
|
default: |
1452 |
|
|
return (0); |
1453 |
|
|
} |
1454 |
|
|
kr.ifindex = rtm->rtm_index; |
1455 |
|
|
if ((sa = rti_info[RTAX_GATEWAY]) != NULL) { |
1456 |
|
|
switch (sa->sa_family) { |
1457 |
|
|
case AF_INET: |
1458 |
|
|
kr.nexthop.v4 = ((struct sockaddr_in *)sa)->sin_addr; |
1459 |
|
|
break; |
1460 |
|
|
case AF_INET6: |
1461 |
|
|
sa_in6 = (struct sockaddr_in6 *)sa; |
1462 |
|
|
recoverscope(sa_in6); |
1463 |
|
|
kr.nexthop.v6 = sa_in6->sin6_addr; |
1464 |
|
|
if (sa_in6->sin6_scope_id) |
1465 |
|
|
kr.ifindex = sa_in6->sin6_scope_id; |
1466 |
|
|
break; |
1467 |
|
|
case AF_LINK: |
1468 |
|
|
kr.flags |= F_CONNECTED; |
1469 |
|
|
break; |
1470 |
|
|
} |
1471 |
|
|
} |
1472 |
|
|
kr.flags |= F_KERNEL; |
1473 |
|
|
if (rtm->rtm_flags & RTF_STATIC) |
1474 |
|
|
kr.flags |= F_STATIC; |
1475 |
|
|
if (rtm->rtm_flags & RTF_BLACKHOLE) |
1476 |
|
|
kr.flags |= F_BLACKHOLE; |
1477 |
|
|
if (rtm->rtm_flags & RTF_REJECT) |
1478 |
|
|
kr.flags |= F_REJECT; |
1479 |
|
|
if (rtm->rtm_flags & RTF_DYNAMIC) |
1480 |
|
|
kr.flags |= F_DYNAMIC; |
1481 |
|
|
if (rtm->rtm_flags & RTF_CONNECTED) |
1482 |
|
|
kr.flags |= F_CONNECTED; |
1483 |
|
|
kr.priority = rtm->rtm_priority; |
1484 |
|
|
|
1485 |
|
|
if (rtm->rtm_type == RTM_CHANGE) { |
1486 |
|
|
/* |
1487 |
|
|
* The kernel doesn't allow RTM_CHANGE for multipath routes. |
1488 |
|
|
* If we got this message we know that the route has only one |
1489 |
|
|
* nexthop and we should remove it before installing the same |
1490 |
|
|
* route with the new nexthop. |
1491 |
|
|
*/ |
1492 |
|
|
kp = kroute_find_prefix(kr.af, &kr.prefix, kr.prefixlen); |
1493 |
|
|
if (kp) { |
1494 |
|
|
kprio = kroute_find_prio(kp, kr.priority); |
1495 |
|
|
if (kprio) { |
1496 |
|
|
kn = TAILQ_FIRST(&kprio->nexthops); |
1497 |
|
|
if (kn) |
1498 |
|
|
kroute_remove(&kn->r); |
1499 |
|
|
} |
1500 |
|
|
} |
1501 |
|
|
} |
1502 |
|
|
|
1503 |
|
|
kn = NULL; |
1504 |
|
|
kp = kroute_find_prefix(kr.af, &kr.prefix, kr.prefixlen); |
1505 |
|
|
if (kp) { |
1506 |
|
|
kprio = kroute_find_prio(kp, kr.priority); |
1507 |
|
|
if (kprio) |
1508 |
|
|
kn = kroute_find_gw(kprio, &kr.nexthop); |
1509 |
|
|
} |
1510 |
|
|
|
1511 |
|
|
if (rtm->rtm_type == RTM_DELETE) { |
1512 |
|
|
if (kn == NULL || !(kn->r.flags & F_KERNEL)) |
1513 |
|
|
return (0); |
1514 |
|
|
return (kroute_remove(&kr)); |
1515 |
|
|
} |
1516 |
|
|
|
1517 |
|
|
if (!eigrp_addrisset(kr.af, &kr.nexthop) && !(kr.flags & F_CONNECTED)) { |
1518 |
|
|
log_warnx("%s: no nexthop for %s/%u", __func__, |
1519 |
|
|
log_addr(kr.af, &kr.prefix), kr.prefixlen); |
1520 |
|
|
return (-1); |
1521 |
|
|
} |
1522 |
|
|
|
1523 |
|
|
if (kn != NULL) { |
1524 |
|
|
/* update route */ |
1525 |
|
|
kn->r = kr; |
1526 |
|
|
|
1527 |
|
|
if (kif_validate(kn->r.ifindex)) |
1528 |
|
|
kn->r.flags &= ~F_DOWN; |
1529 |
|
|
else |
1530 |
|
|
kn->r.flags |= F_DOWN; |
1531 |
|
|
|
1532 |
|
|
kr_redistribute(kp); |
1533 |
|
|
} else { |
1534 |
|
|
if ((rtm->rtm_type == RTM_ADD || rtm->rtm_type == RTM_GET) && |
1535 |
|
|
(kr.priority == eigrpd_conf->fib_priority_internal || |
1536 |
|
|
kr.priority == eigrpd_conf->fib_priority_external || |
1537 |
|
|
kr.priority == eigrpd_conf->fib_priority_summary)) { |
1538 |
|
|
log_warnx("alien EIGRP route %s/%d", log_addr(kr.af, |
1539 |
|
|
&kr.prefix), kr.prefixlen); |
1540 |
|
|
return (send_rtmsg(kr_state.fd, RTM_DELETE, &kr)); |
1541 |
|
|
} |
1542 |
|
|
|
1543 |
|
|
kroute_insert(&kr); |
1544 |
|
|
} |
1545 |
|
|
|
1546 |
|
|
return (0); |
1547 |
|
|
} |