GCC Code Coverage Report
Directory: ./ Exec Total Coverage
File: usr.sbin/ripd/kroute.c Lines: 0 572 0.0 %
Date: 2017-11-13 Branches: 0 702 0.0 %

Line Branch Exec Source
1
/*	$OpenBSD: kroute.c,v 1.32 2017/07/24 11:00:01 friehm Exp $ */
2
3
/*
4
 * Copyright (c) 2004 Esben Norby <norby@openbsd.org>
5
 * Copyright (c) 2003, 2004 Henning Brauer <henning@openbsd.org>
6
 *
7
 * Permission to use, copy, modify, and distribute this software for any
8
 * purpose with or without fee is hereby granted, provided that the above
9
 * copyright notice and this permission notice appear in all copies.
10
 *
11
 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12
 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13
 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14
 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15
 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16
 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17
 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18
 */
19
20
#include <sys/types.h>
21
#include <sys/socket.h>
22
#include <sys/sysctl.h>
23
#include <sys/tree.h>
24
#include <sys/uio.h>
25
#include <netinet/in.h>
26
#include <arpa/inet.h>
27
#include <net/if.h>
28
#include <net/if_dl.h>
29
#include <net/if_types.h>
30
#include <net/route.h>
31
#include <err.h>
32
#include <errno.h>
33
#include <fcntl.h>
34
#include <stdio.h>
35
#include <stdlib.h>
36
#include <string.h>
37
#include <unistd.h>
38
39
#include "rip.h"
40
#include "ripd.h"
41
#include "log.h"
42
43
struct {
44
	u_int32_t		rtseq;
45
	pid_t			pid;
46
	int			fib_sync;
47
	int			fd;
48
	struct event		ev;
49
	u_int			rdomain;
50
} kr_state;
51
52
struct kroute_node {
53
	RB_ENTRY(kroute_node)	 entry;
54
	struct kroute		 r;
55
};
56
57
struct kif_node {
58
	RB_ENTRY(kif_node)	 entry;
59
	struct kif		 k;
60
};
61
62
void	kr_redistribute(int, struct kroute *);
63
int	kroute_compare(struct kroute_node *, struct kroute_node *);
64
int	kif_compare(struct kif_node *, struct kif_node *);
65
int	kr_change_fib(struct kroute_node *, struct kroute *, int);
66
67
struct kroute_node	*kroute_find(in_addr_t, in_addr_t, u_int8_t);
68
int			 kroute_insert(struct kroute_node *);
69
int			 kroute_remove(struct kroute_node *);
70
void			 kroute_clear(void);
71
72
struct kif_node		*kif_find(int);
73
int			 kif_insert(struct kif_node *);
74
int			 kif_remove(struct kif_node *);
75
void			 kif_clear(void);
76
int			 kif_validate(int);
77
78
struct kroute_node	*kroute_match(in_addr_t);
79
80
int		protect_lo(void);
81
u_int8_t	prefixlen_classful(in_addr_t);
82
void		get_rtaddrs(int, struct sockaddr *, struct sockaddr **);
83
void		if_change(u_short, int, struct if_data *);
84
void		if_announce(void *);
85
86
int		send_rtmsg(int, int, struct kroute *);
87
int		dispatch_rtmsg(void);
88
int		fetchtable(void);
89
int		fetchifs(int);
90
91
RB_HEAD(kroute_tree, kroute_node)	krt;
92
RB_PROTOTYPE(kroute_tree, kroute_node, entry, kroute_compare)
93
RB_GENERATE(kroute_tree, kroute_node, entry, kroute_compare)
94
95
RB_HEAD(kif_tree, kif_node)		kit;
96
RB_PROTOTYPE(kif_tree, kif_node, entry, kif_compare)
97
RB_GENERATE(kif_tree, kif_node, entry, kif_compare)
98
99
int
100
kif_init(void)
101
{
102
	RB_INIT(&kit);
103
104
	if (fetchifs(0) == -1)
105
		return (-1);
106
107
	return (0);
108
}
109
110
int
111
kr_init(int fs, u_int rdomain)
112
{
113
	int		opt = 0, rcvbuf, default_rcvbuf;
114
	socklen_t	optlen;
115
116
	if ((kr_state.fd = socket(AF_ROUTE,
117
	    SOCK_RAW | SOCK_CLOEXEC | SOCK_NONBLOCK, 0)) == -1) {
118
		log_warn("kr_init: socket");
119
		return (-1);
120
	}
121
122
	/* not interested in my own messages */
123
	if (setsockopt(kr_state.fd, SOL_SOCKET, SO_USELOOPBACK,
124
	    &opt, sizeof(opt)) == -1)
125
		log_warn("kr_init: setsockopt");	/* not fatal */
126
127
	/* grow receive buffer, don't wanna miss messages */
128
	optlen = sizeof(default_rcvbuf);
129
	if (getsockopt(kr_state.fd, SOL_SOCKET, SO_RCVBUF,
130
	    &default_rcvbuf, &optlen) == -1)
131
		log_warn("kr_init getsockopt SOL_SOCKET SO_RCVBUF");
132
	else
133
		for (rcvbuf = MAX_RTSOCK_BUF;
134
		    rcvbuf > default_rcvbuf &&
135
		    setsockopt(kr_state.fd, SOL_SOCKET, SO_RCVBUF,
136
		    &rcvbuf, sizeof(rcvbuf)) == -1 && errno == ENOBUFS;
137
		    rcvbuf /= 2)
138
			;	/* nothing */
139
140
	kr_state.pid = getpid();
141
	kr_state.rtseq = 1;
142
143
	RB_INIT(&krt);
144
145
	if (fetchtable() == -1)
146
		return (-1);
147
148
	if (protect_lo() == -1)
149
		return (-1);
150
151
	kr_state.fib_sync = fs; /* now set correct sync mode */
152
	kr_state.rdomain = rdomain;
153
154
	event_set(&kr_state.ev, kr_state.fd, EV_READ | EV_PERSIST,
155
	    kr_dispatch_msg, NULL);
156
	event_add(&kr_state.ev, NULL);
157
158
	return (0);
159
}
160
161
int
162
kr_change_fib(struct kroute_node *kr, struct kroute *kroute, int action)
163
{
164
	/* nexthop within 127/8 -> ignore silently */
165
	if ((kroute->nexthop.s_addr & htonl(IN_CLASSA_NET)) ==
166
	    htonl(INADDR_LOOPBACK & IN_CLASSA_NET))
167
		return (0);
168
169
	if (send_rtmsg(kr_state.fd, action, kroute) == -1)
170
		return (-1);
171
172
	if (action == RTM_ADD) {
173
		if ((kr = calloc(1, sizeof(struct kroute_node))) == NULL)
174
			fatal("kr_change_fib");
175
176
		kr->r.prefix.s_addr = kroute->prefix.s_addr;
177
		kr->r.netmask.s_addr = kroute->netmask.s_addr;
178
		kr->r.nexthop.s_addr = kroute->nexthop.s_addr;
179
		kr->r.flags = kroute->flags |= F_RIPD_INSERTED;
180
		kr->r.priority = RTP_RIP;
181
182
		if (kroute_insert(kr) == -1) {
183
			log_debug("kr_update_fib: cannot insert %s",
184
			    inet_ntoa(kr->r.nexthop));
185
			free(kr);
186
		}
187
	} else
188
		kr->r.nexthop.s_addr = kroute->nexthop.s_addr;
189
190
	return (0);
191
}
192
193
int
194
kr_change(struct kroute *kroute)
195
{
196
	struct kroute_node	*kr;
197
	int			 action = RTM_ADD;
198
199
	kr = kroute_find(kroute->prefix.s_addr, kroute->netmask.s_addr,
200
	    RTP_RIP);
201
	if (kr != NULL)
202
		action = RTM_CHANGE;
203
204
	return (kr_change_fib(kr, kroute, action));
205
}
206
207
int
208
kr_delete(struct kroute *kroute)
209
{
210
	struct kroute_node	*kr;
211
212
	kr = kroute_find(kroute->prefix.s_addr, kroute->netmask.s_addr,
213
	    RTP_RIP);
214
	if (kr == NULL)
215
		return (0);
216
217
	if (kr->r.priority != RTP_RIP)
218
		log_warn("kr_delete_fib: %s/%d has wrong priority %d",
219
		    inet_ntoa(kr->r.prefix), mask2prefixlen(kr->r.netmask.s_addr),
220
		    kr->r.priority);
221
222
	if (send_rtmsg(kr_state.fd, RTM_DELETE, kroute) == -1)
223
		return (-1);
224
225
	if (kroute_remove(kr) == -1)
226
		return (-1);
227
228
	return (0);
229
}
230
231
void
232
kr_shutdown(void)
233
{
234
	kr_fib_decouple();
235
236
	kroute_clear();
237
	kif_clear();
238
}
239
240
void
241
kr_fib_couple(void)
242
{
243
	struct kroute_node	*kr;
244
245
	if (kr_state.fib_sync == 1)	/* already coupled */
246
		return;
247
248
	kr_state.fib_sync = 1;
249
250
	RB_FOREACH(kr, kroute_tree, &krt)
251
		if (kr->r.priority == RTP_RIP)
252
			send_rtmsg(kr_state.fd, RTM_ADD, &kr->r);
253
254
	log_info("kernel routing table coupled");
255
}
256
257
void
258
kr_fib_decouple(void)
259
{
260
	struct kroute_node	*kr;
261
262
	if (kr_state.fib_sync == 0)	/* already decoupled */
263
		return;
264
265
	RB_FOREACH(kr, kroute_tree, &krt)
266
		if (kr->r.priority == RTP_RIP)
267
			send_rtmsg(kr_state.fd, RTM_DELETE, &kr->r);
268
269
	kr_state.fib_sync = 0;
270
271
	log_info("kernel routing table decoupled");
272
}
273
274
/* ARGSUSED */
275
void
276
kr_dispatch_msg(int fd, short event, void *bula)
277
{
278
	dispatch_rtmsg();
279
}
280
281
void
282
kr_show_route(struct imsg *imsg)
283
{
284
	struct kroute_node	*kr;
285
	int			 flags;
286
	struct in_addr		 addr;
287
288
	switch (imsg->hdr.type) {
289
	case IMSG_CTL_KROUTE:
290
		if (imsg->hdr.len != IMSG_HEADER_SIZE + sizeof(flags)) {
291
			log_warnx("kr_show_route: wrong imsg len");
292
			return;
293
		}
294
		memcpy(&flags, imsg->data, sizeof(flags));
295
		RB_FOREACH(kr, kroute_tree, &krt)
296
			if (!flags || kr->r.flags & flags) {
297
				main_imsg_compose_ripe(IMSG_CTL_KROUTE,
298
				    imsg->hdr.pid, &kr->r, sizeof(kr->r));
299
			}
300
		break;
301
	case IMSG_CTL_KROUTE_ADDR:
302
		if (imsg->hdr.len != IMSG_HEADER_SIZE +
303
		    sizeof(struct in_addr)) {
304
			log_warnx("kr_show_route: wrong imsg len");
305
			return;
306
		}
307
		memcpy(&addr, imsg->data, sizeof(addr));
308
		kr = NULL;
309
		kr = kroute_match(addr.s_addr);
310
		if (kr != NULL)
311
			main_imsg_compose_ripe(IMSG_CTL_KROUTE, imsg->hdr.pid,
312
			    &kr->r, sizeof(kr->r));
313
		break;
314
	default:
315
		log_debug("kr_show_route: error handling imsg");
316
		break;
317
	}
318
319
	main_imsg_compose_ripe(IMSG_CTL_END, imsg->hdr.pid, NULL, 0);
320
}
321
322
void
323
kr_ifinfo(char *ifname, pid_t pid)
324
{
325
	struct kif_node	*kif;
326
327
	RB_FOREACH(kif, kif_tree, &kit)
328
		if (ifname == NULL || !strcmp(ifname, kif->k.ifname)) {
329
			main_imsg_compose_ripe(IMSG_CTL_IFINFO,
330
			    pid, &kif->k, sizeof(kif->k));
331
		}
332
333
	main_imsg_compose_ripe(IMSG_CTL_END, pid, NULL, 0);
334
}
335
336
void
337
kr_redistribute(int type, struct kroute *kr)
338
{
339
	u_int32_t	a;
340
341
342
	if (type == IMSG_NETWORK_DEL) {
343
dont_redistribute:
344
		/* was the route redistributed? */
345
		if (kr->flags & F_REDISTRIBUTED) {
346
			/* remove redistributed flag */
347
			kr->flags &= ~F_REDISTRIBUTED;
348
			main_imsg_compose_rde(type, 0, kr,
349
			    sizeof(struct kroute));
350
		}
351
		return;
352
	}
353
354
	/* interface is not up and running so don't announce */
355
	if (kr->flags & F_DOWN)
356
		return;
357
358
	/*
359
	 * We consider the loopback net, multicast and experimental addresses
360
	 * as not redistributable.
361
	 */
362
	a = ntohl(kr->prefix.s_addr);
363
	if (IN_MULTICAST(a) || IN_BADCLASS(a) ||
364
	    (a >> IN_CLASSA_NSHIFT) == IN_LOOPBACKNET)
365
		return;
366
	/*
367
	 * Consider networks with nexthop loopback as not redistributable
368
	 * unless it is a reject or blackhole route.
369
	 */
370
	if (kr->nexthop.s_addr == htonl(INADDR_LOOPBACK) &&
371
	    !(kr->flags & (F_BLACKHOLE|F_REJECT)))
372
		return;
373
374
	/* Should we redistribute this route? */
375
	if (!rip_redistribute(kr))
376
		goto dont_redistribute;
377
378
	/* Does not matter if we resend the kr, the RDE will cope. */
379
	kr->flags |= F_REDISTRIBUTED;
380
	main_imsg_compose_rde(type, 0, kr, sizeof(struct kroute));
381
}
382
383
/* rb-tree compare */
384
int
385
kroute_compare(struct kroute_node *a, struct kroute_node *b)
386
{
387
	if (ntohl(a->r.prefix.s_addr) < ntohl(b->r.prefix.s_addr))
388
		return (-1);
389
	if (ntohl(a->r.prefix.s_addr) > ntohl(b->r.prefix.s_addr))
390
		return (1);
391
	if (ntohl(a->r.netmask.s_addr) < ntohl(b->r.netmask.s_addr))
392
		return (-1);
393
	if (ntohl(a->r.netmask.s_addr) > ntohl(b->r.netmask.s_addr))
394
		return (1);
395
396
	/* if the priority is RTP_ANY finish on the first address hit */
397
	if (a->r.priority == RTP_ANY || b->r.priority == RTP_ANY)
398
		return (0);
399
	if (a->r.priority < b->r.priority)
400
		return (-1);
401
	if (a->r.priority > b->r.priority)
402
		return (1);
403
404
	return (0);
405
}
406
407
int
408
kif_compare(struct kif_node *a, struct kif_node *b)
409
{
410
	return (b->k.ifindex - a->k.ifindex);
411
}
412
413
/* tree management */
414
struct kroute_node *
415
kroute_find(in_addr_t prefix, in_addr_t netmask, u_int8_t prio)
416
{
417
	struct kroute_node	s, *kn, *tmp;
418
419
	s.r.prefix.s_addr = prefix;
420
	s.r.netmask.s_addr = netmask;
421
	s.r.priority = prio;
422
423
	kn = RB_FIND(kroute_tree, &krt, &s);
424
	if (kn && prio == RTP_ANY) {
425
		tmp = RB_PREV(kroute_tree, &krt, kn);
426
		while (tmp) {
427
			if (kroute_compare(&s, tmp) == 0)
428
				kn = tmp;
429
			else
430
				break;
431
			tmp = RB_PREV(kroute_tree, &krt, kn);
432
		}
433
	}
434
435
	return (kn);
436
}
437
438
int
439
kroute_insert(struct kroute_node *kr)
440
{
441
	if (RB_INSERT(kroute_tree, &krt, kr) != NULL) {
442
		log_warnx("kroute_insert failed for %s/%u",
443
		    inet_ntoa(kr->r.prefix),
444
		    mask2prefixlen(kr->r.netmask.s_addr));
445
		free(kr);
446
		return (-1);
447
	}
448
449
	if (!(kr->r.flags & F_KERNEL)) {
450
		/* don't validate or redistribute rip route */
451
		kr->r.flags &= ~F_DOWN;
452
		return (0);
453
	}
454
455
	if (kif_validate(kr->r.ifindex))
456
		kr->r.flags &= ~F_DOWN;
457
	else
458
		kr->r.flags |= F_DOWN;
459
460
	kr_redistribute(IMSG_NETWORK_ADD, &kr->r);
461
462
	return (0);
463
}
464
465
int
466
kroute_remove(struct kroute_node *kr)
467
{
468
	if (RB_REMOVE(kroute_tree, &krt, kr) == NULL) {
469
		log_warnx("kroute_remove failed for %s/%u",
470
		    inet_ntoa(kr->r.prefix),
471
		    mask2prefixlen(kr->r.netmask.s_addr));
472
		return (-1);
473
	}
474
475
	kr_redistribute(IMSG_NETWORK_DEL, &kr->r);
476
	rtlabel_unref(kr->r.rtlabel);
477
478
	free(kr);
479
	return (0);
480
}
481
482
void
483
kroute_clear(void)
484
{
485
	struct kroute_node	*kr;
486
487
	while ((kr = RB_MIN(kroute_tree, &krt)) != NULL)
488
		kroute_remove(kr);
489
}
490
491
struct kif_node *
492
kif_find(int ifindex)
493
{
494
	struct kif_node	s;
495
496
	bzero(&s, sizeof(s));
497
	s.k.ifindex = ifindex;
498
499
	return (RB_FIND(kif_tree, &kit, &s));
500
}
501
502
struct kif *
503
kif_findname(char *ifname)
504
{
505
	struct kif_node	*kif;
506
507
	RB_FOREACH(kif, kif_tree, &kit)
508
		if (!strcmp(ifname, kif->k.ifname))
509
			return (&kif->k);
510
511
	return (NULL);
512
}
513
514
int
515
kif_insert(struct kif_node *kif)
516
{
517
	if (RB_INSERT(kif_tree, &kit, kif) != NULL) {
518
		log_warnx("RB_INSERT(kif_tree, &kit, kif)");
519
		free(kif);
520
		return (-1);
521
	}
522
523
	return (0);
524
}
525
526
int
527
kif_remove(struct kif_node *kif)
528
{
529
	if (RB_REMOVE(kif_tree, &kit, kif) == NULL) {
530
		log_warnx("RB_REMOVE(kif_tree, &kit, kif)");
531
		return (-1);
532
	}
533
534
	free(kif);
535
	return (0);
536
}
537
538
void
539
kif_clear(void)
540
{
541
	struct kif_node	*kif;
542
543
	while ((kif = RB_MIN(kif_tree, &kit)) != NULL)
544
		kif_remove(kif);
545
}
546
547
int
548
kif_validate(int ifindex)
549
{
550
	struct kif_node		*kif;
551
552
	if ((kif = kif_find(ifindex)) == NULL) {
553
		log_warnx("interface with index %u not found", ifindex);
554
		return (1);
555
	}
556
557
	return (kif->k.nh_reachable);
558
}
559
560
struct kroute_node *
561
kroute_match(in_addr_t key)
562
{
563
	u_int8_t		 i;
564
	struct kroute_node	*kr;
565
566
	/* we will never match the default route */
567
	for (i = 32; i > 0; i--)
568
		if ((kr = kroute_find(key & prefixlen2mask(i),
569
		    prefixlen2mask(i), RTP_ANY)) != NULL)
570
			return (kr);
571
572
	/* if we don't have a match yet, try to find a default route */
573
	if ((kr = kroute_find(0, 0, RTP_ANY)) != NULL)
574
			return (kr);
575
576
	return (NULL);
577
}
578
579
/* misc */
580
int
581
protect_lo(void)
582
{
583
	struct kroute_node	*kr;
584
585
	/* special protection for 127/8 */
586
	if ((kr = calloc(1, sizeof(struct kroute_node))) == NULL) {
587
		log_warn("protect_lo");
588
		return (-1);
589
	}
590
	kr->r.prefix.s_addr = htonl(INADDR_LOOPBACK);
591
	kr->r.netmask.s_addr = htonl(IN_CLASSA_NET);
592
	kr->r.flags = F_KERNEL|F_CONNECTED;
593
594
	if (RB_INSERT(kroute_tree, &krt, kr) != NULL)
595
		free(kr);	/* kernel route already there, no problem */
596
597
	return (0);
598
}
599
600
u_int8_t
601
prefixlen_classful(in_addr_t ina)
602
{
603
	/* it hurt to write this. */
604
605
	if (ina >= 0xf0000000U)		/* class E */
606
		return (32);
607
	else if (ina >= 0xe0000000U)	/* class D */
608
		return (4);
609
	else if (ina >= 0xc0000000U)	/* class C */
610
		return (24);
611
	else if (ina >= 0x80000000U)	/* class B */
612
		return (16);
613
	else				/* class A */
614
		return (8);
615
}
616
617
u_int8_t
618
mask2prefixlen(in_addr_t ina)
619
{
620
	if (ina == 0)
621
		return (0);
622
	else
623
		return (33 - ffs(ntohl(ina)));
624
}
625
626
in_addr_t
627
prefixlen2mask(u_int8_t prefixlen)
628
{
629
	if (prefixlen == 0)
630
		return (0);
631
632
	return (htonl(0xffffffff << (32 - prefixlen)));
633
}
634
635
#define ROUNDUP(a) \
636
	((a) > 0 ? (1 + (((a) - 1) | (sizeof(long) - 1))) : sizeof(long))
637
638
void
639
get_rtaddrs(int addrs, struct sockaddr *sa, struct sockaddr **rti_info)
640
{
641
	int	i;
642
643
	for (i = 0; i < RTAX_MAX; i++) {
644
		if (addrs & (1 << i)) {
645
			rti_info[i] = sa;
646
			sa = (struct sockaddr *)((char *)(sa) +
647
			    ROUNDUP(sa->sa_len));
648
		} else
649
			rti_info[i] = NULL;
650
	}
651
}
652
653
void
654
if_change(u_short ifindex, int flags, struct if_data *ifd)
655
{
656
	struct kif_node		*kif;
657
	struct kroute_node	*kr;
658
	int			 type;
659
	u_int8_t		 reachable;
660
661
	if ((kif = kif_find(ifindex)) == NULL) {
662
		log_warnx("interface with index %u not found", ifindex);
663
		return;
664
	}
665
666
	kif->k.flags = flags;
667
	kif->k.link_state = ifd->ifi_link_state;
668
	kif->k.if_type = ifd->ifi_type;
669
	kif->k.baudrate = ifd->ifi_baudrate;
670
671
	if ((reachable = (flags & IFF_UP) &&
672
	    LINK_STATE_IS_UP(ifd->ifi_link_state)) == kif->k.nh_reachable)
673
		return;		/* nothing changed wrt nexthop validity */
674
675
	kif->k.nh_reachable = reachable;
676
	type = reachable ? IMSG_NETWORK_ADD : IMSG_NETWORK_DEL;
677
678
	/* notify ripe about interface link state */
679
	main_imsg_compose_ripe(IMSG_IFINFO, 0, &kif->k, sizeof(kif->k));
680
681
	/* update redistribute list */
682
	RB_FOREACH(kr, kroute_tree, &krt)
683
		if (kr->r.ifindex == ifindex) {
684
			if (reachable)
685
				kr->r.flags &= ~F_DOWN;
686
			else
687
				kr->r.flags |= F_DOWN;
688
689
			kr_redistribute(type, &kr->r);
690
		}
691
}
692
693
void
694
if_announce(void *msg)
695
{
696
	struct if_announcemsghdr	*ifan;
697
	struct kif_node			*kif;
698
699
	ifan = msg;
700
701
	switch (ifan->ifan_what) {
702
	case IFAN_ARRIVAL:
703
		if ((kif = calloc(1, sizeof(struct kif_node))) == NULL) {
704
			log_warn("if_announce");
705
			return;
706
		}
707
708
		kif->k.ifindex = ifan->ifan_index;
709
		strlcpy(kif->k.ifname, ifan->ifan_name, sizeof(kif->k.ifname));
710
		kif_insert(kif);
711
		break;
712
	case IFAN_DEPARTURE:
713
		kif = kif_find(ifan->ifan_index);
714
		kif_remove(kif);
715
		break;
716
	}
717
}
718
719
/* rtsock */
720
int
721
send_rtmsg(int fd, int action, struct kroute *kroute)
722
{
723
	struct iovec		iov[4];
724
	struct rt_msghdr	hdr;
725
	struct sockaddr_in	prefix;
726
	struct sockaddr_in	nexthop;
727
	struct sockaddr_in	mask;
728
	int			iovcnt = 0;
729
730
	if (kr_state.fib_sync == 0)
731
		return (0);
732
733
	/* initialize header */
734
	bzero(&hdr, sizeof(hdr));
735
	hdr.rtm_version = RTM_VERSION;
736
	hdr.rtm_type = action;
737
	hdr.rtm_priority = RTP_RIP;
738
	hdr.rtm_tableid = kr_state.rdomain;
739
	if (action == RTM_CHANGE)
740
		hdr.rtm_fmask = RTF_REJECT|RTF_BLACKHOLE;
741
	hdr.rtm_seq = kr_state.rtseq++;	/* overflow doesn't matter */
742
	hdr.rtm_msglen = sizeof(hdr);
743
	/* adjust iovec */
744
	iov[iovcnt].iov_base = &hdr;
745
	iov[iovcnt++].iov_len = sizeof(hdr);
746
747
	bzero(&prefix, sizeof(prefix));
748
	prefix.sin_len = sizeof(prefix);
749
	prefix.sin_family = AF_INET;
750
	prefix.sin_addr.s_addr = kroute->prefix.s_addr;
751
	/* adjust header */
752
	hdr.rtm_addrs |= RTA_DST;
753
	hdr.rtm_msglen += sizeof(prefix);
754
	/* adjust iovec */
755
	iov[iovcnt].iov_base = &prefix;
756
	iov[iovcnt++].iov_len = sizeof(prefix);
757
758
	if (kroute->nexthop.s_addr != 0) {
759
		bzero(&nexthop, sizeof(nexthop));
760
		nexthop.sin_len = sizeof(nexthop);
761
		nexthop.sin_family = AF_INET;
762
		nexthop.sin_addr.s_addr = kroute->nexthop.s_addr;
763
		/* adjust header */
764
		hdr.rtm_flags |= RTF_GATEWAY;
765
		hdr.rtm_addrs |= RTA_GATEWAY;
766
		hdr.rtm_msglen += sizeof(nexthop);
767
		/* adjust iovec */
768
		iov[iovcnt].iov_base = &nexthop;
769
		iov[iovcnt++].iov_len = sizeof(nexthop);
770
	}
771
772
	bzero(&mask, sizeof(mask));
773
	mask.sin_len = sizeof(mask);
774
	mask.sin_family = AF_INET;
775
	mask.sin_addr.s_addr = kroute->netmask.s_addr;
776
	/* adjust header */
777
	hdr.rtm_addrs |= RTA_NETMASK;
778
	hdr.rtm_msglen += sizeof(mask);
779
	/* adjust iovec */
780
	iov[iovcnt].iov_base = &mask;
781
	iov[iovcnt++].iov_len = sizeof(mask);
782
783
784
retry:
785
	if (writev(fd, iov, iovcnt) == -1) {
786
		if (errno == ESRCH) {
787
			if (hdr.rtm_type == RTM_CHANGE) {
788
				hdr.rtm_type = RTM_ADD;
789
				goto retry;
790
			} else if (hdr.rtm_type == RTM_DELETE) {
791
				log_info("route %s/%u vanished before delete",
792
				    inet_ntoa(kroute->prefix),
793
				    mask2prefixlen(kroute->netmask.s_addr));
794
				return (0);
795
			}
796
		}
797
		log_warn("send_rtmsg: action %u, prefix %s/%u",
798
		    hdr.rtm_type, inet_ntoa(kroute->prefix),
799
		    mask2prefixlen(kroute->netmask.s_addr));
800
		return (0);
801
	}
802
803
	return (0);
804
}
805
806
int
807
fetchtable(void)
808
{
809
	size_t			 len;
810
	int			 mib[7];
811
	char			*buf, *next, *lim;
812
	struct rt_msghdr	*rtm;
813
	struct sockaddr		*sa, *rti_info[RTAX_MAX];
814
	struct sockaddr_in	*sa_in;
815
	struct sockaddr_rtlabel	*label;
816
	struct kroute_node	*kr;
817
	struct iface		*iface = NULL;
818
819
	mib[0] = CTL_NET;
820
	mib[1] = PF_ROUTE;
821
	mib[2] = 0;
822
	mib[3] = AF_INET;
823
	mib[4] = NET_RT_DUMP;
824
	mib[5] = 0;
825
	mib[6] = kr_state.rdomain;	/* rtableid */
826
827
	if (sysctl(mib, 7, NULL, &len, NULL, 0) == -1) {
828
		log_warn("sysctl");
829
		return (-1);
830
	}
831
	if ((buf = malloc(len)) == NULL) {
832
		log_warn("fetchtable");
833
		return (-1);
834
	}
835
	if (sysctl(mib, 7, buf, &len, NULL, 0) == -1) {
836
		log_warn("sysctl");
837
		free(buf);
838
		return (-1);
839
	}
840
841
	lim = buf + len;
842
	for (next = buf; next < lim; next += rtm->rtm_msglen) {
843
		rtm = (struct rt_msghdr *)next;
844
		if (rtm->rtm_version != RTM_VERSION)
845
			continue;
846
		sa = (struct sockaddr *)(next + rtm->rtm_hdrlen);
847
		get_rtaddrs(rtm->rtm_addrs, sa, rti_info);
848
849
		if ((sa = rti_info[RTAX_DST]) == NULL)
850
			continue;
851
852
		/* Skip ARP/ND cache and broadcast routes. */
853
		if (rtm->rtm_flags & (RTF_LLINFO|RTF_BROADCAST))
854
			continue;
855
856
#ifdef RTF_MPATH
857
		if (rtm->rtm_flags & RTF_MPATH)		/* multipath */
858
			continue;
859
#endif
860
861
		if ((kr = calloc(1, sizeof(struct kroute_node))) == NULL) {
862
			log_warn("fetchtable");
863
			free(buf);
864
			return (-1);
865
		}
866
867
		kr->r.flags = F_KERNEL;
868
		kr->r.priority = rtm->rtm_priority;
869
870
		switch (sa->sa_family) {
871
		case AF_INET:
872
			kr->r.prefix.s_addr =
873
			    ((struct sockaddr_in *)sa)->sin_addr.s_addr;
874
			sa_in = (struct sockaddr_in *)rti_info[RTAX_NETMASK];
875
			if (rtm->rtm_flags & RTF_STATIC)
876
				kr->r.flags |= F_STATIC;
877
			if (rtm->rtm_flags & RTF_BLACKHOLE)
878
				kr->r.flags |= F_BLACKHOLE;
879
			if (rtm->rtm_flags & RTF_REJECT)
880
				kr->r.flags |= F_REJECT;
881
			if (rtm->rtm_flags & RTF_DYNAMIC)
882
				kr->r.flags |= F_DYNAMIC;
883
			if (sa_in != NULL) {
884
				if (sa_in->sin_len == 0)
885
					break;
886
				kr->r.netmask.s_addr =
887
				    sa_in->sin_addr.s_addr;
888
			} else if (rtm->rtm_flags & RTF_HOST)
889
				kr->r.netmask.s_addr = prefixlen2mask(32);
890
			else
891
				kr->r.netmask.s_addr =
892
				    prefixlen2mask(prefixlen_classful
893
					(kr->r.prefix.s_addr));
894
			break;
895
		default:
896
			free(kr);
897
			continue;
898
		}
899
900
		kr->r.ifindex = rtm->rtm_index;
901
902
		iface = if_find_index(rtm->rtm_index);
903
		if (iface != NULL)
904
			kr->r.metric = iface->cost;
905
		else
906
			kr->r.metric = DEFAULT_COST;
907
908
		if ((sa = rti_info[RTAX_GATEWAY]) != NULL)
909
			switch (sa->sa_family) {
910
			case AF_INET:
911
				if (rtm->rtm_flags & RTF_CONNECTED) {
912
					kr->r.flags |= F_CONNECTED;
913
					break;
914
				}
915
916
				kr->r.nexthop.s_addr =
917
				    ((struct sockaddr_in *)sa)->sin_addr.s_addr;
918
				break;
919
			case AF_LINK:
920
				/*
921
				 * Traditional BSD connected routes have
922
				 * a gateway of type AF_LINK.
923
				 */
924
				kr->r.flags |= F_CONNECTED;
925
				break;
926
			}
927
928
		if (rtm->rtm_priority == RTP_RIP) {
929
			send_rtmsg(kr_state.fd, RTM_DELETE, &kr->r);
930
			free(kr);
931
		} else {
932
			if ((label = (struct sockaddr_rtlabel *)
933
			    rti_info[RTAX_LABEL]) != NULL)
934
				kr->r.rtlabel =
935
				    rtlabel_name2id(label->sr_label);
936
			kroute_insert(kr);
937
		}
938
939
	}
940
	free(buf);
941
	return (0);
942
}
943
944
int
945
fetchifs(int ifindex)
946
{
947
	size_t			 len;
948
	int			 mib[6];
949
	char			*buf, *next, *lim;
950
	struct if_msghdr	 ifm;
951
	struct kif_node		*kif;
952
	struct sockaddr		*sa, *rti_info[RTAX_MAX];
953
	struct sockaddr_dl	*sdl;
954
955
	mib[0] = CTL_NET;
956
	mib[1] = PF_ROUTE;
957
	mib[2] = 0;
958
	mib[3] = AF_INET;
959
	mib[4] = NET_RT_IFLIST;
960
	mib[5] = ifindex;
961
962
	if (sysctl(mib, 6, NULL, &len, NULL, 0) == -1) {
963
		log_warn("sysctl");
964
		return (-1);
965
	}
966
	if ((buf = malloc(len)) == NULL) {
967
		log_warn("fetchif");
968
		return (-1);
969
	}
970
	if (sysctl(mib, 6, buf, &len, NULL, 0) == -1) {
971
		log_warn("sysctl");
972
		free(buf);
973
		return (-1);
974
	}
975
976
	lim = buf + len;
977
	for (next = buf; next < lim; next += ifm.ifm_msglen) {
978
		memcpy(&ifm, next, sizeof(ifm));
979
		if (ifm.ifm_version != RTM_VERSION)
980
			continue;
981
		if (ifm.ifm_type != RTM_IFINFO)
982
			continue;
983
984
		sa = (struct sockaddr *)(next + sizeof(ifm));
985
		get_rtaddrs(ifm.ifm_addrs, sa, rti_info);
986
987
		if ((kif = calloc(1, sizeof(struct kif_node))) == NULL) {
988
			log_warn("fetchifs");
989
			free(buf);
990
			return (-1);
991
		}
992
993
		kif->k.ifindex = ifm.ifm_index;
994
		kif->k.flags = ifm.ifm_flags;
995
		kif->k.link_state = ifm.ifm_data.ifi_link_state;
996
		kif->k.if_type = ifm.ifm_data.ifi_type;
997
		kif->k.baudrate = ifm.ifm_data.ifi_baudrate;
998
		kif->k.mtu = ifm.ifm_data.ifi_mtu;
999
		kif->k.nh_reachable = (kif->k.flags & IFF_UP) &&
1000
		    LINK_STATE_IS_UP(ifm.ifm_data.ifi_link_state);
1001
		if ((sa = rti_info[RTAX_IFP]) != NULL)
1002
			if (sa->sa_family == AF_LINK) {
1003
				sdl = (struct sockaddr_dl *)sa;
1004
				if (sdl->sdl_nlen >= sizeof(kif->k.ifname))
1005
					memcpy(kif->k.ifname, sdl->sdl_data,
1006
					    sizeof(kif->k.ifname) - 1);
1007
				else if (sdl->sdl_nlen > 0)
1008
					memcpy(kif->k.ifname, sdl->sdl_data,
1009
					    sdl->sdl_nlen);
1010
				/* string already terminated via calloc() */
1011
			}
1012
1013
		kif_insert(kif);
1014
	}
1015
	free(buf);
1016
	return (0);
1017
}
1018
1019
int
1020
dispatch_rtmsg(void)
1021
{
1022
	char			 buf[RT_BUF_SIZE];
1023
	ssize_t			 n;
1024
	char			*next, *lim;
1025
	struct rt_msghdr	*rtm;
1026
	struct if_msghdr	 ifm;
1027
	struct sockaddr		*sa, *rti_info[RTAX_MAX];
1028
	struct sockaddr_in	*sa_in;
1029
	struct sockaddr_rtlabel	*label;
1030
	struct kroute_node	*kr;
1031
	struct in_addr		 prefix, nexthop, netmask;
1032
	struct iface		*iface = NULL;
1033
	int			 flags;
1034
	u_short			 ifindex = 0;
1035
	u_int8_t		 metric, prio;
1036
1037
	if ((n = read(kr_state.fd, &buf, sizeof(buf))) == -1) {
1038
		if (errno == EAGAIN || errno == EINTR)
1039
			return (0);
1040
		log_warn("dispatch_rtmsg: read error");
1041
		return (-1);
1042
	}
1043
1044
	if (n == 0) {
1045
		log_warnx("routing socket closed");
1046
		return (-1);
1047
	}
1048
1049
	lim = buf + n;
1050
	for (next = buf; next < lim; next += rtm->rtm_msglen) {
1051
		rtm = (struct rt_msghdr *)next;
1052
		if (lim < next + sizeof(u_short) ||
1053
		    lim < next + rtm->rtm_msglen)
1054
			fatalx("dispatch_rtmsg: partial rtm in buffer");
1055
		if (rtm->rtm_version != RTM_VERSION)
1056
			continue;
1057
1058
		prefix.s_addr = 0;
1059
		netmask.s_addr = 0;
1060
		flags = F_KERNEL;
1061
		nexthop.s_addr = 0;
1062
		prio = 0;
1063
1064
		if (rtm->rtm_type == RTM_ADD || rtm->rtm_type == RTM_CHANGE ||
1065
		    rtm->rtm_type == RTM_DELETE) {
1066
			sa = (struct sockaddr *)(next + rtm->rtm_hdrlen);
1067
			get_rtaddrs(rtm->rtm_addrs, sa, rti_info);
1068
1069
			if (rtm->rtm_tableid != kr_state.rdomain)
1070
				continue;
1071
1072
			if (rtm->rtm_pid == kr_state.pid)	/* cause by us */
1073
				continue;
1074
1075
			if (rtm->rtm_errno)			/* failed attempts... */
1076
				continue;
1077
1078
			/* Skip ARP/ND cache and broadcast routes. */
1079
			if (rtm->rtm_flags & (RTF_LLINFO|RTF_BROADCAST))
1080
				continue;
1081
1082
			prio = rtm->rtm_priority;
1083
1084
			switch (sa->sa_family) {
1085
			case AF_INET:
1086
				prefix.s_addr =
1087
				    ((struct sockaddr_in *)sa)->sin_addr.s_addr;
1088
				sa_in = (struct sockaddr_in *)
1089
				    rti_info[RTAX_NETMASK];
1090
				if (sa_in != NULL) {
1091
					if (sa_in->sin_len != 0)
1092
						netmask.s_addr =
1093
						    sa_in->sin_addr.s_addr;
1094
				} else if (rtm->rtm_flags & RTF_HOST)
1095
					netmask.s_addr = prefixlen2mask(32);
1096
				else
1097
					netmask.s_addr =
1098
					    prefixlen2mask(prefixlen_classful(
1099
						prefix.s_addr));
1100
				if (rtm->rtm_flags & RTF_STATIC)
1101
					flags |= F_STATIC;
1102
				if (rtm->rtm_flags & RTF_BLACKHOLE)
1103
					flags |= F_BLACKHOLE;
1104
				if (rtm->rtm_flags & RTF_REJECT)
1105
					flags |= F_REJECT;
1106
				if (rtm->rtm_flags & RTF_DYNAMIC)
1107
					flags |= F_DYNAMIC;
1108
				break;
1109
			default:
1110
				continue;
1111
			}
1112
1113
			ifindex = rtm->rtm_index;
1114
			if ((sa = rti_info[RTAX_GATEWAY]) != NULL) {
1115
				switch (sa->sa_family) {
1116
				case AF_INET:
1117
					nexthop.s_addr = ((struct
1118
					    sockaddr_in *)sa)->sin_addr.s_addr;
1119
					break;
1120
				case AF_LINK:
1121
					flags |= F_CONNECTED;
1122
					break;
1123
				}
1124
			}
1125
		}
1126
1127
		switch (rtm->rtm_type) {
1128
		case RTM_ADD:
1129
		case RTM_CHANGE:
1130
			if (nexthop.s_addr == 0 && !(flags & F_CONNECTED)) {
1131
				log_warnx("dispatch_rtmsg no nexthop for %s/%u",
1132
				    inet_ntoa(prefix),
1133
				    mask2prefixlen(netmask.s_addr));
1134
				continue;
1135
			}
1136
1137
			if ((kr = kroute_find(prefix.s_addr, netmask.s_addr,
1138
			    prio)) != NULL) {
1139
				if (kr->r.flags & F_REDISTRIBUTED)
1140
					flags |= F_REDISTRIBUTED;
1141
				kr->r.nexthop.s_addr = nexthop.s_addr;
1142
				kr->r.flags = flags;
1143
				kr->r.ifindex = ifindex;
1144
				kr->r.priority = prio;
1145
1146
				rtlabel_unref(kr->r.rtlabel);
1147
				kr->r.rtlabel = 0;
1148
				if ((label = (struct sockaddr_rtlabel *)
1149
				    rti_info[RTAX_LABEL]) != NULL)
1150
					kr->r.rtlabel =
1151
					    rtlabel_name2id(label->sr_label);
1152
1153
				if (kif_validate(kr->r.ifindex))
1154
					kr->r.flags &= ~F_DOWN;
1155
				else
1156
					kr->r.flags |= F_DOWN;
1157
1158
				/* just readd, the RDE will care */
1159
				kr_redistribute(IMSG_NETWORK_ADD, &kr->r);
1160
			} else {
1161
				if ((kr = calloc(1,
1162
				    sizeof(struct kroute_node))) == NULL) {
1163
					log_warn("dispatch_rtmsg");
1164
					return (-1);
1165
				}
1166
1167
				iface = if_find_index(rtm->rtm_index);
1168
				if (iface != NULL)
1169
					metric = iface->cost;
1170
				else
1171
					metric = DEFAULT_COST;
1172
1173
				kr->r.prefix.s_addr = prefix.s_addr;
1174
				kr->r.netmask.s_addr = netmask.s_addr;
1175
				kr->r.nexthop.s_addr = nexthop.s_addr;
1176
				kr->r.metric = metric;
1177
				kr->r.flags = flags;
1178
				kr->r.ifindex = ifindex;
1179
1180
				if ((label = (struct sockaddr_rtlabel *)
1181
				    rti_info[RTAX_LABEL]) != NULL)
1182
					kr->r.rtlabel =
1183
					    rtlabel_name2id(label->sr_label);
1184
1185
				kroute_insert(kr);
1186
			}
1187
			break;
1188
		case RTM_DELETE:
1189
			if ((kr = kroute_find(prefix.s_addr, netmask.s_addr,
1190
			    prio)) == NULL)
1191
				continue;
1192
			if (!(kr->r.flags & F_KERNEL))
1193
				continue;
1194
			if (kroute_remove(kr) == -1)
1195
				return (-1);
1196
			break;
1197
		case RTM_IFINFO:
1198
			memcpy(&ifm, next, sizeof(ifm));
1199
			if_change(ifm.ifm_index, ifm.ifm_flags,
1200
			    &ifm.ifm_data);
1201
			break;
1202
		case RTM_IFANNOUNCE:
1203
			if_announce(next);
1204
			break;
1205
		default:
1206
			/* ignore for now */
1207
			break;
1208
		}
1209
	}
1210
	return (0);
1211
}