GCC Code Coverage Report
Directory: ./ Exec Total Coverage
File: usr.sbin/bgpd/kroute.c Lines: 15 1677 0.9 %
Date: 2017-11-13 Branches: 2 1903 0.1 %

Line Branch Exec Source
1
/*	$OpenBSD: kroute.c,v 1.216 2017/07/24 11:00:01 friehm Exp $ */
2
3
/*
4
 * Copyright (c) 2003, 2004 Henning Brauer <henning@openbsd.org>
5
 *
6
 * Permission to use, copy, modify, and distribute this software for any
7
 * purpose with or without fee is hereby granted, provided that the above
8
 * copyright notice and this permission notice appear in all copies.
9
 *
10
 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11
 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12
 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13
 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14
 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15
 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16
 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17
 */
18
19
#include <sys/types.h>
20
#include <sys/socket.h>
21
#include <sys/sysctl.h>
22
#include <sys/tree.h>
23
#include <sys/uio.h>
24
#include <netinet/in.h>
25
#include <arpa/inet.h>
26
#include <net/if.h>
27
#include <net/if_dl.h>
28
#include <net/route.h>
29
#include <netmpls/mpls.h>
30
#include <err.h>
31
#include <errno.h>
32
#include <fcntl.h>
33
#include <stdio.h>
34
#include <stdlib.h>
35
#include <string.h>
36
#include <unistd.h>
37
38
#include "bgpd.h"
39
#include "log.h"
40
41
struct ktable		**krt;
42
u_int			  krt_size;
43
44
struct {
45
	u_int32_t		rtseq;
46
	pid_t			pid;
47
	int			fd;
48
} kr_state;
49
50
struct kroute_node {
51
	RB_ENTRY(kroute_node)	 entry;
52
	struct kroute		 r;
53
	struct kroute_node	*next;
54
};
55
56
struct kroute6_node {
57
	RB_ENTRY(kroute6_node)	 entry;
58
	struct kroute6		 r;
59
	struct kroute6_node	*next;
60
};
61
62
struct knexthop_node {
63
	RB_ENTRY(knexthop_node)	 entry;
64
	struct bgpd_addr	 nexthop;
65
	void			*kroute;
66
};
67
68
struct kif_kr {
69
	LIST_ENTRY(kif_kr)	 entry;
70
	struct kroute_node	*kr;
71
};
72
73
struct kif_kr6 {
74
	LIST_ENTRY(kif_kr6)	 entry;
75
	struct kroute6_node	*kr;
76
};
77
78
LIST_HEAD(kif_kr_head, kif_kr);
79
LIST_HEAD(kif_kr6_head, kif_kr6);
80
81
struct kif_node {
82
	RB_ENTRY(kif_node)	 entry;
83
	struct kif		 k;
84
	struct kif_kr_head	 kroute_l;
85
	struct kif_kr6_head	 kroute6_l;
86
};
87
88
int	ktable_new(u_int, u_int, char *, char *, int, u_int8_t);
89
void	ktable_free(u_int, u_int8_t);
90
void	ktable_destroy(struct ktable *, u_int8_t);
91
struct ktable	*ktable_get(u_int);
92
93
int	kr4_change(struct ktable *, struct kroute_full *, u_int8_t);
94
int	kr6_change(struct ktable *, struct kroute_full *, u_int8_t);
95
int	krVPN4_change(struct ktable *, struct kroute_full *, u_int8_t);
96
int	kr4_delete(struct ktable *, struct kroute_full *, u_int8_t);
97
int	kr6_delete(struct ktable *, struct kroute_full *, u_int8_t);
98
int	krVPN4_delete(struct ktable *, struct kroute_full *, u_int8_t);
99
void	kr_net_delete(struct network *);
100
struct network *kr_net_match(struct ktable *, struct kroute *);
101
struct network *kr_net_match6(struct ktable *, struct kroute6 *);
102
struct network *kr_net_find(struct ktable *, struct network *);
103
int	kr_redistribute(int, struct ktable *, struct kroute *);
104
int	kr_redistribute6(int, struct ktable *, struct kroute6 *);
105
struct kroute_full *kr_tofull(struct kroute *);
106
struct kroute_full *kr6_tofull(struct kroute6 *);
107
int	kroute_compare(struct kroute_node *, struct kroute_node *);
108
int	kroute6_compare(struct kroute6_node *, struct kroute6_node *);
109
int	knexthop_compare(struct knexthop_node *, struct knexthop_node *);
110
int	kif_compare(struct kif_node *, struct kif_node *);
111
void	kr_fib_update_prio(u_int, u_int8_t);
112
113
struct kroute_node	*kroute_find(struct ktable *, in_addr_t, u_int8_t,
114
			    u_int8_t);
115
struct kroute_node	*kroute_matchgw(struct kroute_node *,
116
			    struct sockaddr_in *);
117
int			 kroute_insert(struct ktable *, struct kroute_node *);
118
int			 kroute_remove(struct ktable *, struct kroute_node *);
119
void			 kroute_clear(struct ktable *);
120
121
struct kroute6_node	*kroute6_find(struct ktable *, const struct in6_addr *,
122
			    u_int8_t, u_int8_t);
123
struct kroute6_node	*kroute6_matchgw(struct kroute6_node *,
124
			    struct sockaddr_in6 *);
125
int			 kroute6_insert(struct ktable *, struct kroute6_node *);
126
int			 kroute6_remove(struct ktable *, struct kroute6_node *);
127
void			 kroute6_clear(struct ktable *);
128
129
struct knexthop_node	*knexthop_find(struct ktable *, struct bgpd_addr *);
130
int			 knexthop_insert(struct ktable *,
131
			    struct knexthop_node *);
132
int			 knexthop_remove(struct ktable *,
133
			    struct knexthop_node *);
134
void			 knexthop_clear(struct ktable *);
135
136
struct kif_node		*kif_find(int);
137
int			 kif_insert(struct kif_node *);
138
int			 kif_remove(struct kif_node *);
139
void			 kif_clear(void);
140
141
int			 kif_kr_insert(struct kroute_node *);
142
int			 kif_kr_remove(struct kroute_node *);
143
144
int			 kif_kr6_insert(struct kroute6_node *);
145
int			 kif_kr6_remove(struct kroute6_node *);
146
147
int			 kif_validate(struct kif *);
148
int			 kroute_validate(struct kroute *);
149
int			 kroute6_validate(struct kroute6 *);
150
void			 knexthop_validate(struct ktable *,
151
			    struct knexthop_node *);
152
void			 knexthop_track(struct ktable *, void *);
153
void			 knexthop_send_update(struct knexthop_node *);
154
struct kroute_node	*kroute_match(struct ktable *, in_addr_t, int);
155
struct kroute6_node	*kroute6_match(struct ktable *, struct in6_addr *, int);
156
void			 kroute_detach_nexthop(struct ktable *,
157
			    struct knexthop_node *);
158
159
int		protect_lo(struct ktable *);
160
u_int8_t	prefixlen_classful(in_addr_t);
161
u_int8_t	mask2prefixlen(in_addr_t);
162
u_int8_t	mask2prefixlen6(struct sockaddr_in6 *);
163
void		get_rtaddrs(int, struct sockaddr *, struct sockaddr **);
164
void		if_change(u_short, int, struct if_data *);
165
void		if_announce(void *);
166
167
int		send_rtmsg(int, int, struct ktable *, struct kroute *,
168
		    u_int8_t);
169
int		send_rt6msg(int, int, struct ktable *, struct kroute6 *,
170
		    u_int8_t);
171
int		dispatch_rtmsg(void);
172
int		fetchtable(struct ktable *, u_int8_t);
173
int		fetchifs(int);
174
int		dispatch_rtmsg_addr(struct rt_msghdr *,
175
		    struct sockaddr *[RTAX_MAX], struct ktable *);
176
177
RB_PROTOTYPE(kroute_tree, kroute_node, entry, kroute_compare)
178
RB_GENERATE(kroute_tree, kroute_node, entry, kroute_compare)
179
180
RB_PROTOTYPE(kroute6_tree, kroute6_node, entry, kroute6_compare)
181
RB_GENERATE(kroute6_tree, kroute6_node, entry, kroute6_compare)
182
183
RB_PROTOTYPE(knexthop_tree, knexthop_node, entry, knexthop_compare)
184
RB_GENERATE(knexthop_tree, knexthop_node, entry, knexthop_compare)
185
186
RB_HEAD(kif_tree, kif_node)		kit;
187
RB_PROTOTYPE(kif_tree, kif_node, entry, kif_compare)
188
RB_GENERATE(kif_tree, kif_node, entry, kif_compare)
189
190
#define KT2KNT(x)	(&(ktable_get((x)->nhtableid)->knt))
191
192
/*
193
 * exported functions
194
 */
195
196
int
197
kr_init(void)
198
{
199
	int		opt = 0, rcvbuf, default_rcvbuf;
200
	unsigned int	tid = RTABLE_ANY;
201
	socklen_t	optlen;
202
203
	if ((kr_state.fd = socket(AF_ROUTE,
204
	    SOCK_RAW | SOCK_CLOEXEC | SOCK_NONBLOCK, 0)) == -1) {
205
		log_warn("kr_init: socket");
206
		return (-1);
207
	}
208
209
	/* not interested in my own messages */
210
	if (setsockopt(kr_state.fd, SOL_SOCKET, SO_USELOOPBACK,
211
	    &opt, sizeof(opt)) == -1)
212
		log_warn("kr_init: setsockopt");	/* not fatal */
213
214
	/* grow receive buffer, don't wanna miss messages */
215
	optlen = sizeof(default_rcvbuf);
216
	if (getsockopt(kr_state.fd, SOL_SOCKET, SO_RCVBUF,
217
	    &default_rcvbuf, &optlen) == -1)
218
		log_warn("kr_init getsockopt SOL_SOCKET SO_RCVBUF");
219
	else
220
		for (rcvbuf = MAX_RTSOCK_BUF;
221
		    rcvbuf > default_rcvbuf &&
222
		    setsockopt(kr_state.fd, SOL_SOCKET, SO_RCVBUF,
223
		    &rcvbuf, sizeof(rcvbuf)) == -1 && errno == ENOBUFS;
224
		    rcvbuf /= 2)
225
			;	/* nothing */
226
227
	if (setsockopt(kr_state.fd, AF_ROUTE, ROUTE_TABLEFILTER, &tid,
228
	    sizeof(tid)) == -1) {
229
		log_warn("kr_init: setsockopt AF_ROUTE ROUTE_TABLEFILTER");
230
		return (-1);
231
	}
232
233
	kr_state.pid = getpid();
234
	kr_state.rtseq = 1;
235
236
	RB_INIT(&kit);
237
238
	if (fetchifs(0) == -1)
239
		return (-1);
240
241
	return (kr_state.fd);
242
}
243
244
int
245
ktable_new(u_int rtableid, u_int rdomid, char *name, char *ifname, int fs,
246
    u_int8_t fib_prio)
247
{
248
	struct ktable	**xkrt;
249
	struct ktable	 *kt;
250
	size_t		  oldsize;
251
252
	/* resize index table if needed */
253
	if (rtableid >= krt_size) {
254
		oldsize = sizeof(struct ktable *) * krt_size;
255
		if ((xkrt = reallocarray(krt, rtableid + 1,
256
		    sizeof(struct ktable *))) == NULL) {
257
			log_warn("ktable_new");
258
			return (-1);
259
		}
260
		krt = xkrt;
261
		krt_size = rtableid + 1;
262
		bzero((char *)krt + oldsize,
263
		    krt_size * sizeof(struct ktable *) - oldsize);
264
	}
265
266
	if (krt[rtableid])
267
		fatalx("ktable_new: table already exists.");
268
269
	/* allocate new element */
270
	kt = krt[rtableid] = calloc(1, sizeof(struct ktable));
271
	if (kt == NULL) {
272
		log_warn("ktable_new");
273
		return (-1);
274
	}
275
276
	/* initialize structure ... */
277
	strlcpy(kt->descr, name, sizeof(kt->descr));
278
	RB_INIT(&kt->krt);
279
	RB_INIT(&kt->krt6);
280
	RB_INIT(&kt->knt);
281
	TAILQ_INIT(&kt->krn);
282
	kt->fib_conf = kt->fib_sync = fs;
283
	kt->rtableid = rtableid;
284
	kt->nhtableid = rdomid;
285
	/* bump refcount of rdomain table for the nexthop lookups */
286
	ktable_get(kt->nhtableid)->nhrefcnt++;
287
	if (ifname) {
288
		strlcpy(kt->ifmpe, ifname, IFNAMSIZ);
289
		kt->ifindex = if_nametoindex(ifname);
290
	}
291
292
	/* ... and load it */
293
	if (fetchtable(kt, fib_prio) == -1)
294
		return (-1);
295
	if (protect_lo(kt) == -1)
296
		return (-1);
297
298
	/* everything is up and running */
299
	kt->state = RECONF_REINIT;
300
	log_debug("new ktable %s for rtableid %d", name, rtableid);
301
	return (0);
302
}
303
304
void
305
ktable_free(u_int rtableid, u_int8_t fib_prio)
306
{
307
	struct ktable	*kt, *nkt;
308
309
	if ((kt = ktable_get(rtableid)) == NULL)
310
		return;
311
312
	/* decouple from kernel, no new routes will be entered from here */
313
	kr_fib_decouple(kt->rtableid, fib_prio);
314
315
	/* first unhook from the nexthop table */
316
	nkt = ktable_get(kt->nhtableid);
317
	nkt->nhrefcnt--;
318
319
	/*
320
	 * Evil little details:
321
	 *   If kt->nhrefcnt > 0 then kt == nkt and nothing needs to be done.
322
	 *   If kt != nkt then kt->nhrefcnt must be 0 and kt must be killed.
323
	 *   If nkt is no longer referenced it must be killed (possible double
324
	 *   free so check that kt != nkt).
325
	 */
326
	if (kt != nkt && nkt->nhrefcnt <= 0)
327
		ktable_destroy(nkt, fib_prio);
328
	if (kt->nhrefcnt <= 0)
329
		ktable_destroy(kt, fib_prio);
330
}
331
332
void
333
ktable_destroy(struct ktable *kt, u_int8_t fib_prio)
334
{
335
	/* decouple just to be sure, does not hurt */
336
	kr_fib_decouple(kt->rtableid, fib_prio);
337
338
	log_debug("freeing ktable %s rtableid %u", kt->descr, kt->rtableid);
339
	knexthop_clear(kt);
340
	kroute_clear(kt);
341
	kroute6_clear(kt);
342
343
	krt[kt->rtableid] = NULL;
344
	free(kt);
345
}
346
347
struct ktable *
348
ktable_get(u_int rtableid)
349
{
350
	if (rtableid >= krt_size)
351
		return (NULL);
352
	return (krt[rtableid]);
353
}
354
355
int
356
ktable_update(u_int rtableid, char *name, char *ifname, int flags, u_int8_t
357
    fib_prio)
358
{
359
	struct ktable	*kt, *rkt;
360
	u_int		 rdomid;
361
362
	if (!ktable_exists(rtableid, &rdomid))
363
		fatalx("King Bula lost a table");	/* may not happen */
364
365
	if (rdomid != rtableid || flags & F_RIB_NOFIB) {
366
		rkt = ktable_get(rdomid);
367
		if (rkt == NULL) {
368
			char buf[32];
369
			snprintf(buf, sizeof(buf), "rdomain_%d", rdomid);
370
			if (ktable_new(rdomid, rdomid, buf, NULL, 0, fib_prio))
371
				return (-1);
372
		} else {
373
			/* there is no need for full fib synchronisation if
374
			 * the table is only used for nexthop lookups.
375
			 */
376
			if (rkt->state == RECONF_DELETE) {
377
				rkt->fib_conf = 0;
378
				rkt->state = RECONF_KEEP;
379
			}
380
		}
381
	}
382
383
	if (flags & (F_RIB_NOEVALUATE | F_RIB_NOFIB))
384
		/* only rdomain table must exist */
385
		return (0);
386
387
	kt = ktable_get(rtableid);
388
	if (kt == NULL) {
389
		if (ktable_new(rtableid, rdomid, name, ifname,
390
		    !(flags & F_RIB_NOFIBSYNC), fib_prio))
391
			return (-1);
392
	} else {
393
		/* fib sync has higher preference then no sync */
394
		if (kt->state == RECONF_DELETE) {
395
			kt->fib_conf = !(flags & F_RIB_NOFIBSYNC);
396
			kt->state = RECONF_KEEP;
397
		} else if (!kt->fib_conf)
398
			kt->fib_conf = !(flags & F_RIB_NOFIBSYNC);
399
400
		strlcpy(kt->descr, name, sizeof(kt->descr));
401
	}
402
	return (0);
403
}
404
405
void
406
ktable_preload(void)
407
{
408
	struct ktable	*kt;
409
	u_int		 i;
410
411
	for (i = 0; i < krt_size; i++) {
412
		if ((kt = ktable_get(i)) == NULL)
413
			continue;
414
		kt->state = RECONF_DELETE;
415
	}
416
}
417
418
void
419
ktable_postload(u_int8_t fib_prio)
420
{
421
	struct ktable	*kt;
422
	u_int		 i;
423
424
	for (i = krt_size; i > 0; i--) {
425
		if ((kt = ktable_get(i - 1)) == NULL)
426
			continue;
427
		if (kt->state == RECONF_DELETE)
428
			ktable_free(i - 1, fib_prio);
429
		else if (kt->state == RECONF_REINIT)
430
			kt->fib_sync = kt->fib_conf;
431
	}
432
}
433
434
int
435
ktable_exists(u_int rtableid, u_int *rdomid)
436
{
437
48
	size_t			 len;
438
24
	struct rt_tableinfo	 info;
439
24
	int			 mib[6];
440
441
24
	mib[0] = CTL_NET;
442
24
	mib[1] = PF_ROUTE;
443
24
	mib[2] = 0;
444
24
	mib[3] = 0;
445
24
	mib[4] = NET_RT_TABLE;
446
24
	mib[5] = rtableid;
447
448
24
	len = sizeof(info);
449
24
	if (sysctl(mib, 6, &info, &len, NULL, 0) == -1) {
450
		if (errno == ENOENT)
451
			/* table nonexistent */
452
			return (0);
453
		log_warn("sysctl");
454
		/* must return 0 so that the table is considered non-existent */
455
		return (0);
456
	}
457
24
	if (rdomid)
458
24
		*rdomid = info.rti_domainid;
459
24
	return (1);
460
24
}
461
462
int
463
kr_change(u_int rtableid, struct kroute_full *kl, u_int8_t fib_prio)
464
{
465
	struct ktable		*kt;
466
467
	if ((kt = ktable_get(rtableid)) == NULL)
468
		/* too noisy during reloads, just ignore */
469
		return (0);
470
	switch (kl->prefix.aid) {
471
	case AID_INET:
472
		return (kr4_change(kt, kl, fib_prio));
473
	case AID_INET6:
474
		return (kr6_change(kt, kl, fib_prio));
475
	case AID_VPN_IPv4:
476
		return (krVPN4_change(kt, kl, fib_prio));
477
	}
478
	log_warnx("kr_change: not handled AID");
479
	return (-1);
480
}
481
482
int
483
kr4_change(struct ktable *kt, struct kroute_full *kl, u_int8_t fib_prio)
484
{
485
	struct kroute_node	*kr;
486
	int			 action = RTM_ADD;
487
	u_int16_t		 labelid;
488
489
	if ((kr = kroute_find(kt, kl->prefix.v4.s_addr, kl->prefixlen,
490
	    fib_prio)) != NULL)
491
		action = RTM_CHANGE;
492
493
	/* for blackhole and reject routes nexthop needs to be 127.0.0.1 */
494
	if (kl->flags & (F_BLACKHOLE|F_REJECT))
495
		kl->nexthop.v4.s_addr = htonl(INADDR_LOOPBACK);
496
	/* nexthop within 127/8 -> ignore silently */
497
	else if ((kl->nexthop.v4.s_addr & htonl(IN_CLASSA_NET)) ==
498
	    htonl(INADDR_LOOPBACK & IN_CLASSA_NET))
499
		return (0);
500
501
	labelid = rtlabel_name2id(kl->label);
502
503
	if (action == RTM_ADD) {
504
		if ((kr = calloc(1, sizeof(struct kroute_node))) == NULL) {
505
			log_warn("kr_change");
506
			return (-1);
507
		}
508
		kr->r.prefix.s_addr = kl->prefix.v4.s_addr;
509
		kr->r.prefixlen = kl->prefixlen;
510
		kr->r.nexthop.s_addr = kl->nexthop.v4.s_addr;
511
		kr->r.flags = kl->flags | F_BGPD_INSERTED;
512
		kr->r.priority = fib_prio;
513
		kr->r.labelid = labelid;
514
515
		if (kroute_insert(kt, kr) == -1) {
516
			free(kr);
517
			return (-1);
518
		}
519
	} else {
520
		kr->r.nexthop.s_addr = kl->nexthop.v4.s_addr;
521
		rtlabel_unref(kr->r.labelid);
522
		kr->r.labelid = labelid;
523
		if (kl->flags & F_BLACKHOLE)
524
			kr->r.flags |= F_BLACKHOLE;
525
		else
526
			kr->r.flags &= ~F_BLACKHOLE;
527
		if (kl->flags & F_REJECT)
528
			kr->r.flags |= F_REJECT;
529
		else
530
			kr->r.flags &= ~F_REJECT;
531
	}
532
533
	if (send_rtmsg(kr_state.fd, action, kt, &kr->r, fib_prio) == -1)
534
		return (-1);
535
536
	return (0);
537
}
538
539
int
540
kr6_change(struct ktable *kt, struct kroute_full *kl, u_int8_t fib_prio)
541
{
542
	struct kroute6_node	*kr6;
543
	struct in6_addr		 lo6 = IN6ADDR_LOOPBACK_INIT;
544
	int			 action = RTM_ADD;
545
	u_int16_t		 labelid;
546
547
	if ((kr6 = kroute6_find(kt, &kl->prefix.v6, kl->prefixlen, fib_prio)) !=
548
	    NULL)
549
		action = RTM_CHANGE;
550
551
	/* for blackhole and reject routes nexthop needs to be ::1 */
552
	if (kl->flags & (F_BLACKHOLE|F_REJECT))
553
		bcopy(&lo6, &kl->nexthop.v6, sizeof(kl->nexthop.v6));
554
	/* nexthop to loopback -> ignore silently */
555
	else if (IN6_IS_ADDR_LOOPBACK(&kl->nexthop.v6))
556
		return (0);
557
558
	labelid = rtlabel_name2id(kl->label);
559
560
	if (action == RTM_ADD) {
561
		if ((kr6 = calloc(1, sizeof(struct kroute6_node))) == NULL) {
562
			log_warn("kr_change");
563
			return (-1);
564
		}
565
		memcpy(&kr6->r.prefix, &kl->prefix.v6, sizeof(struct in6_addr));
566
		kr6->r.prefixlen = kl->prefixlen;
567
		memcpy(&kr6->r.nexthop, &kl->nexthop.v6,
568
		    sizeof(struct in6_addr));
569
		kr6->r.flags = kl->flags | F_BGPD_INSERTED;
570
		kr6->r.priority = fib_prio;
571
		kr6->r.labelid = labelid;
572
573
		if (kroute6_insert(kt, kr6) == -1) {
574
			free(kr6);
575
			return (-1);
576
		}
577
	} else {
578
		memcpy(&kr6->r.nexthop, &kl->nexthop.v6,
579
		    sizeof(struct in6_addr));
580
		rtlabel_unref(kr6->r.labelid);
581
		kr6->r.labelid = labelid;
582
		if (kl->flags & F_BLACKHOLE)
583
			kr6->r.flags |= F_BLACKHOLE;
584
		else
585
			kr6->r.flags &= ~F_BLACKHOLE;
586
		if (kl->flags & F_REJECT)
587
			kr6->r.flags |= F_REJECT;
588
		else
589
			kr6->r.flags &= ~F_REJECT;
590
	}
591
592
	if (send_rt6msg(kr_state.fd, action, kt, &kr6->r, fib_prio) == -1)
593
		return (-1);
594
595
	return (0);
596
}
597
598
int
599
krVPN4_change(struct ktable *kt, struct kroute_full *kl, u_int8_t fib_prio)
600
{
601
	struct kroute_node	*kr;
602
	int			 action = RTM_ADD;
603
	u_int32_t		 mplslabel = 0;
604
	u_int16_t		 labelid;
605
606
	if ((kr = kroute_find(kt, kl->prefix.vpn4.addr.s_addr, kl->prefixlen,
607
	    fib_prio)) != NULL)
608
		action = RTM_CHANGE;
609
610
	/* nexthop within 127/8 -> ignore silently */
611
	if ((kl->nexthop.v4.s_addr & htonl(IN_CLASSA_NET)) ==
612
	    htonl(INADDR_LOOPBACK & IN_CLASSA_NET))
613
		return (0);
614
615
	/* only single MPLS label are supported for now */
616
	if (kl->prefix.vpn4.labellen != 3) {
617
		log_warnx("krVPN4_change: %s/%u has not a single label",
618
		    log_addr(&kl->prefix), kl->prefixlen);
619
		return (0);
620
	}
621
	mplslabel = (kl->prefix.vpn4.labelstack[0] << 24) |
622
	    (kl->prefix.vpn4.labelstack[1] << 16) |
623
	    (kl->prefix.vpn4.labelstack[2] << 8);
624
	mplslabel = htonl(mplslabel);
625
626
	labelid = rtlabel_name2id(kl->label);
627
628
	/* for blackhole and reject routes nexthop needs to be 127.0.0.1 */
629
	if (kl->flags & (F_BLACKHOLE|F_REJECT))
630
		kl->nexthop.v4.s_addr = htonl(INADDR_LOOPBACK);
631
632
	if (action == RTM_ADD) {
633
		if ((kr = calloc(1, sizeof(struct kroute_node))) == NULL) {
634
			log_warn("krVPN4_change");
635
			return (-1);
636
		}
637
		kr->r.prefix.s_addr = kl->prefix.vpn4.addr.s_addr;
638
		kr->r.prefixlen = kl->prefixlen;
639
		kr->r.nexthop.s_addr = kl->nexthop.v4.s_addr;
640
		kr->r.flags = kl->flags | F_BGPD_INSERTED | F_MPLS;
641
		kr->r.priority = fib_prio;
642
		kr->r.labelid = labelid;
643
		kr->r.mplslabel = mplslabel;
644
645
		if (kroute_insert(kt, kr) == -1) {
646
			free(kr);
647
			return (-1);
648
		}
649
	} else {
650
		kr->r.mplslabel = mplslabel;
651
		kr->r.nexthop.s_addr = kl->nexthop.v4.s_addr;
652
		rtlabel_unref(kr->r.labelid);
653
		kr->r.labelid = labelid;
654
		if (kl->flags & F_BLACKHOLE)
655
			kr->r.flags |= F_BLACKHOLE;
656
		else
657
			kr->r.flags &= ~F_BLACKHOLE;
658
		if (kl->flags & F_REJECT)
659
			kr->r.flags |= F_REJECT;
660
		else
661
			kr->r.flags &= ~F_REJECT;
662
	}
663
664
	if (send_rtmsg(kr_state.fd, action, kt, &kr->r, fib_prio) == -1)
665
		return (-1);
666
667
	return (0);
668
}
669
670
int
671
kr_delete(u_int rtableid, struct kroute_full *kl, u_int8_t fib_prio)
672
{
673
	struct ktable		*kt;
674
675
	if ((kt = ktable_get(rtableid)) == NULL)
676
		/* too noisy during reloads, just ignore */
677
		return (0);
678
679
	switch (kl->prefix.aid) {
680
	case AID_INET:
681
		return (kr4_delete(kt, kl, fib_prio));
682
	case AID_INET6:
683
		return (kr6_delete(kt, kl, fib_prio));
684
	case AID_VPN_IPv4:
685
		return (krVPN4_delete(kt, kl, fib_prio));
686
	}
687
	log_warnx("%s: not handled AID", __func__);
688
	return (-1);
689
}
690
691
int
692
kr4_delete(struct ktable *kt, struct kroute_full *kl, u_int8_t fib_prio)
693
{
694
	struct kroute_node	*kr;
695
696
	if ((kr = kroute_find(kt, kl->prefix.v4.s_addr, kl->prefixlen,
697
	    fib_prio)) == NULL)
698
		return (0);
699
700
	if (!(kr->r.flags & F_BGPD_INSERTED))
701
		return (0);
702
703
	if (send_rtmsg(kr_state.fd, RTM_DELETE, kt, &kr->r, fib_prio) == -1)
704
		return (-1);
705
706
	rtlabel_unref(kr->r.labelid);
707
708
	if (kroute_remove(kt, kr) == -1)
709
		return (-1);
710
711
	return (0);
712
}
713
714
int
715
kr6_delete(struct ktable *kt, struct kroute_full *kl, u_int8_t fib_prio)
716
{
717
	struct kroute6_node	*kr6;
718
719
	if ((kr6 = kroute6_find(kt, &kl->prefix.v6, kl->prefixlen, fib_prio)) ==
720
	    NULL)
721
		return (0);
722
723
	if (!(kr6->r.flags & F_BGPD_INSERTED))
724
		return (0);
725
726
	if (send_rt6msg(kr_state.fd, RTM_DELETE, kt, &kr6->r, fib_prio) == -1)
727
		return (-1);
728
729
	rtlabel_unref(kr6->r.labelid);
730
731
	if (kroute6_remove(kt, kr6) == -1)
732
		return (-1);
733
734
	return (0);
735
}
736
737
int
738
krVPN4_delete(struct ktable *kt, struct kroute_full *kl, u_int8_t fib_prio)
739
{
740
	struct kroute_node	*kr;
741
742
	if ((kr = kroute_find(kt, kl->prefix.vpn4.addr.s_addr, kl->prefixlen,
743
	    fib_prio)) == NULL)
744
		return (0);
745
746
	if (!(kr->r.flags & F_BGPD_INSERTED))
747
		return (0);
748
749
	if (send_rtmsg(kr_state.fd, RTM_DELETE, kt, &kr->r, fib_prio) == -1)
750
		return (-1);
751
752
	rtlabel_unref(kr->r.labelid);
753
754
	if (kroute_remove(kt, kr) == -1)
755
		return (-1);
756
757
	return (0);
758
}
759
760
void
761
kr_shutdown(u_int8_t fib_prio)
762
{
763
	u_int	i;
764
765
	for (i = krt_size; i > 0; i--)
766
		ktable_free(i - 1, fib_prio);
767
	kif_clear();
768
}
769
770
void
771
kr_fib_couple(u_int rtableid, u_int8_t fib_prio)
772
{
773
	struct ktable		*kt;
774
	struct kroute_node	*kr;
775
	struct kroute6_node	*kr6;
776
777
	if ((kt = ktable_get(rtableid)) == NULL)  /* table does not exist */
778
		return;
779
780
	if (kt->fib_sync)	/* already coupled */
781
		return;
782
783
	kt->fib_sync = 1;
784
785
	RB_FOREACH(kr, kroute_tree, &kt->krt)
786
		if ((kr->r.flags & F_BGPD_INSERTED))
787
			send_rtmsg(kr_state.fd, RTM_ADD, kt, &kr->r, fib_prio);
788
	RB_FOREACH(kr6, kroute6_tree, &kt->krt6)
789
		if ((kr6->r.flags & F_BGPD_INSERTED))
790
			send_rt6msg(kr_state.fd, RTM_ADD, kt, &kr6->r,
791
			    fib_prio);
792
793
	log_info("kernel routing table %u (%s) coupled", kt->rtableid,
794
	    kt->descr);
795
}
796
797
void
798
kr_fib_couple_all(u_int8_t fib_prio)
799
{
800
	u_int	 i;
801
802
	for (i = krt_size; i > 0; i--)
803
		kr_fib_couple(i - 1, fib_prio);
804
}
805
806
void
807
kr_fib_decouple(u_int rtableid, u_int8_t fib_prio)
808
{
809
	struct ktable		*kt;
810
	struct kroute_node	*kr;
811
	struct kroute6_node	*kr6;
812
813
	if ((kt = ktable_get(rtableid)) == NULL)  /* table does not exist */
814
		return;
815
816
	if (!kt->fib_sync)	/* already decoupled */
817
		return;
818
819
	RB_FOREACH(kr, kroute_tree, &kt->krt)
820
		if ((kr->r.flags & F_BGPD_INSERTED))
821
			send_rtmsg(kr_state.fd, RTM_DELETE, kt, &kr->r,
822
			    fib_prio);
823
	RB_FOREACH(kr6, kroute6_tree, &kt->krt6)
824
		if ((kr6->r.flags & F_BGPD_INSERTED))
825
			send_rt6msg(kr_state.fd, RTM_DELETE, kt, &kr6->r,
826
			    fib_prio);
827
828
	kt->fib_sync = 0;
829
830
	log_info("kernel routing table %u (%s) decoupled", kt->rtableid,
831
	    kt->descr);
832
}
833
834
void
835
kr_fib_decouple_all(u_int8_t fib_prio)
836
{
837
	u_int	 i;
838
839
	for (i = krt_size; i > 0; i--)
840
		kr_fib_decouple(i - 1, fib_prio);
841
}
842
843
void
844
kr_fib_update_prio(u_int rtableid, u_int8_t fib_prio)
845
{
846
	struct ktable		*kt;
847
	struct kroute_node	*kr;
848
	struct kroute6_node	*kr6;
849
850
	if ((kt = ktable_get(rtableid)) == NULL)  /* table does not exist */
851
		return;
852
853
	RB_FOREACH(kr, kroute_tree, &kt->krt)
854
		if ((kr->r.flags & F_BGPD_INSERTED))
855
			kr->r.priority = fib_prio;
856
857
	RB_FOREACH(kr6, kroute6_tree, &kt->krt6)
858
		if ((kr6->r.flags & F_BGPD_INSERTED))
859
			kr6->r.priority = fib_prio;
860
}
861
862
void
863
kr_fib_update_prio_all(u_int8_t fib_prio)
864
{
865
	u_int	 i;
866
867
	for (i = krt_size; i > 0; i--)
868
		kr_fib_update_prio(i - 1, fib_prio);
869
}
870
871
int
872
kr_dispatch_msg(void)
873
{
874
	return (dispatch_rtmsg());
875
}
876
877
int
878
kr_nexthop_add(u_int rtableid, struct bgpd_addr *addr, struct bgpd_config *conf)
879
{
880
	struct ktable		*kt;
881
	struct knexthop_node	*h;
882
883
	if (rtableid == 0)
884
		rtableid = conf->default_tableid;
885
886
	if ((kt = ktable_get(rtableid)) == NULL) {
887
		log_warnx("kr_nexthop_add: non-existent rtableid %d", rtableid);
888
		return (0);
889
	}
890
	if ((h = knexthop_find(kt, addr)) != NULL) {
891
		/* should not happen... this is actually an error path */
892
		knexthop_send_update(h);
893
	} else {
894
		if ((h = calloc(1, sizeof(struct knexthop_node))) == NULL) {
895
			log_warn("kr_nexthop_add");
896
			return (-1);
897
		}
898
		memcpy(&h->nexthop, addr, sizeof(h->nexthop));
899
900
		if (knexthop_insert(kt, h) == -1)
901
			return (-1);
902
	}
903
904
	return (0);
905
}
906
907
void
908
kr_nexthop_delete(u_int rtableid, struct bgpd_addr *addr,
909
    struct bgpd_config *conf)
910
{
911
	struct ktable		*kt;
912
	struct knexthop_node	*kn;
913
914
	if (rtableid == 0)
915
		rtableid = conf->default_tableid;
916
917
	if ((kt = ktable_get(rtableid)) == NULL) {
918
		log_warnx("kr_nexthop_delete: non-existent rtableid %d",
919
		    rtableid);
920
		return;
921
	}
922
	if ((kn = knexthop_find(kt, addr)) == NULL)
923
		return;
924
925
	knexthop_remove(kt, kn);
926
}
927
928
void
929
kr_show_route(struct imsg *imsg)
930
{
931
	struct ktable		*kt;
932
	struct kroute_node	*kr, *kn;
933
	struct kroute6_node	*kr6, *kn6;
934
	struct bgpd_addr	*addr;
935
	int			 flags;
936
	sa_family_t		 af;
937
	struct ctl_show_nexthop	 snh;
938
	struct knexthop_node	*h;
939
	struct kif_node		*kif;
940
	u_int			 i;
941
	u_short			 ifindex = 0;
942
943
	switch (imsg->hdr.type) {
944
	case IMSG_CTL_KROUTE:
945
		if (imsg->hdr.len != IMSG_HEADER_SIZE + sizeof(flags) +
946
		    sizeof(af)) {
947
			log_warnx("kr_show_route: wrong imsg len");
948
			break;
949
		}
950
		kt = ktable_get(imsg->hdr.peerid);
951
		if (kt == NULL) {
952
			log_warnx("kr_show_route: table %u does not exist",
953
			    imsg->hdr.peerid);
954
			break;
955
		}
956
		memcpy(&flags, imsg->data, sizeof(flags));
957
		memcpy(&af, (char *)imsg->data + sizeof(flags), sizeof(af));
958
		if (!af || af == AF_INET)
959
			RB_FOREACH(kr, kroute_tree, &kt->krt) {
960
				if (flags && (kr->r.flags & flags) == 0)
961
					continue;
962
				kn = kr;
963
				do {
964
					send_imsg_session(IMSG_CTL_KROUTE,
965
					    imsg->hdr.pid, kr_tofull(&kn->r),
966
					    sizeof(struct kroute_full));
967
				} while ((kn = kn->next) != NULL);
968
			}
969
		if (!af || af == AF_INET6)
970
			RB_FOREACH(kr6, kroute6_tree, &kt->krt6) {
971
				if (flags && (kr6->r.flags & flags) == 0)
972
					continue;
973
				kn6 = kr6;
974
				do {
975
					send_imsg_session(IMSG_CTL_KROUTE,
976
					    imsg->hdr.pid, kr6_tofull(&kn6->r),
977
					    sizeof(struct kroute_full));
978
				} while ((kn6 = kn6->next) != NULL);
979
			}
980
		break;
981
	case IMSG_CTL_KROUTE_ADDR:
982
		if (imsg->hdr.len != IMSG_HEADER_SIZE +
983
		    sizeof(struct bgpd_addr)) {
984
			log_warnx("kr_show_route: wrong imsg len");
985
			break;
986
		}
987
		kt = ktable_get(imsg->hdr.peerid);
988
		if (kt == NULL) {
989
			log_warnx("kr_show_route: table %u does not exist",
990
			    imsg->hdr.peerid);
991
			break;
992
		}
993
		addr = imsg->data;
994
		kr = NULL;
995
		switch (addr->aid) {
996
		case AID_INET:
997
			kr = kroute_match(kt, addr->v4.s_addr, 1);
998
			if (kr != NULL)
999
				send_imsg_session(IMSG_CTL_KROUTE,
1000
				    imsg->hdr.pid, kr_tofull(&kr->r),
1001
				    sizeof(struct kroute_full));
1002
			break;
1003
		case AID_INET6:
1004
			kr6 = kroute6_match(kt, &addr->v6, 1);
1005
			if (kr6 != NULL)
1006
				send_imsg_session(IMSG_CTL_KROUTE,
1007
				    imsg->hdr.pid, kr6_tofull(&kr6->r),
1008
				    sizeof(struct kroute_full));
1009
			break;
1010
		}
1011
		break;
1012
	case IMSG_CTL_SHOW_NEXTHOP:
1013
		kt = ktable_get(imsg->hdr.peerid);
1014
		if (kt == NULL) {
1015
			log_warnx("kr_show_route: table %u does not exist",
1016
			    imsg->hdr.peerid);
1017
			break;
1018
		}
1019
		RB_FOREACH(h, knexthop_tree, KT2KNT(kt)) {
1020
			bzero(&snh, sizeof(snh));
1021
			memcpy(&snh.addr, &h->nexthop, sizeof(snh.addr));
1022
			if (h->kroute != NULL) {
1023
				switch (h->nexthop.aid) {
1024
				case AID_INET:
1025
					kr = h->kroute;
1026
					snh.valid = kroute_validate(&kr->r);
1027
					snh.krvalid = 1;
1028
					memcpy(&snh.kr.kr4, &kr->r,
1029
					    sizeof(snh.kr.kr4));
1030
					ifindex = kr->r.ifindex;
1031
					break;
1032
				case AID_INET6:
1033
					kr6 = h->kroute;
1034
					snh.valid = kroute6_validate(&kr6->r);
1035
					snh.krvalid = 1;
1036
					memcpy(&snh.kr.kr6, &kr6->r,
1037
					    sizeof(snh.kr.kr6));
1038
					ifindex = kr6->r.ifindex;
1039
					break;
1040
				}
1041
				if ((kif = kif_find(ifindex)) != NULL)
1042
					memcpy(&snh.kif, &kif->k,
1043
					    sizeof(snh.kif));
1044
			}
1045
			send_imsg_session(IMSG_CTL_SHOW_NEXTHOP, imsg->hdr.pid,
1046
			    &snh, sizeof(snh));
1047
		}
1048
		break;
1049
	case IMSG_CTL_SHOW_INTERFACE:
1050
		RB_FOREACH(kif, kif_tree, &kit)
1051
			send_imsg_session(IMSG_CTL_SHOW_INTERFACE,
1052
			    imsg->hdr.pid, &kif->k, sizeof(kif->k));
1053
		break;
1054
	case IMSG_CTL_SHOW_FIB_TABLES:
1055
		for (i = 0; i < krt_size; i++) {
1056
			struct ktable	ktab;
1057
1058
			if ((kt = ktable_get(i)) == NULL)
1059
				continue;
1060
1061
			ktab = *kt;
1062
			/* do not leak internal information */
1063
			RB_INIT(&ktab.krt);
1064
			RB_INIT(&ktab.krt6);
1065
			RB_INIT(&ktab.knt);
1066
			TAILQ_INIT(&ktab.krn);
1067
1068
			send_imsg_session(IMSG_CTL_SHOW_FIB_TABLES,
1069
			    imsg->hdr.pid, &ktab, sizeof(ktab));
1070
		}
1071
		break;
1072
	default:	/* nada */
1073
		break;
1074
	}
1075
1076
	send_imsg_session(IMSG_CTL_END, imsg->hdr.pid, NULL, 0);
1077
}
1078
1079
void
1080
kr_ifinfo(char *ifname)
1081
{
1082
	struct kif_node	*kif;
1083
1084
	RB_FOREACH(kif, kif_tree, &kit)
1085
		if (!strcmp(ifname, kif->k.ifname)) {
1086
			send_imsg_session(IMSG_IFINFO, 0,
1087
			    &kif->k, sizeof(kif->k));
1088
			return;
1089
		}
1090
}
1091
1092
void
1093
kr_net_delete(struct network *n)
1094
{
1095
	filterset_free(&n->net.attrset);
1096
	free(n);
1097
}
1098
1099
struct network *
1100
kr_net_match(struct ktable *kt, struct kroute *kr)
1101
{
1102
	struct network		*xn;
1103
1104
	TAILQ_FOREACH(xn, &kt->krn, entry) {
1105
		if (xn->net.prefix.aid != AID_INET)
1106
			continue;
1107
		switch (xn->net.type) {
1108
		case NETWORK_DEFAULT:
1109
			if (xn->net.prefixlen == kr->prefixlen &&
1110
			    xn->net.prefix.v4.s_addr == kr->prefix.s_addr)
1111
				/* static match already redistributed */
1112
				return (NULL);
1113
			break;
1114
		case NETWORK_STATIC:
1115
			if (kr->flags & F_STATIC)
1116
				return (xn);
1117
			break;
1118
		case NETWORK_CONNECTED:
1119
			if (kr->flags & F_CONNECTED)
1120
				return (xn);
1121
			break;
1122
		case NETWORK_RTLABEL:
1123
			if (kr->labelid == xn->net.rtlabel)
1124
				return (xn);
1125
			break;
1126
		case NETWORK_MRTCLONE:
1127
			/* can not happen */
1128
			break;
1129
		}
1130
	}
1131
	return (NULL);
1132
}
1133
1134
struct network *
1135
kr_net_match6(struct ktable *kt, struct kroute6 *kr6)
1136
{
1137
	struct network		*xn;
1138
1139
	TAILQ_FOREACH(xn, &kt->krn, entry) {
1140
		if (xn->net.prefix.aid != AID_INET6)
1141
			continue;
1142
		switch (xn->net.type) {
1143
		case NETWORK_DEFAULT:
1144
			if (xn->net.prefixlen == kr6->prefixlen &&
1145
			    memcmp(&xn->net.prefix.v6, &kr6->prefix,
1146
			    sizeof(struct in6_addr)) == 0)
1147
				/* static match already redistributed */
1148
				return (NULL);
1149
			break;
1150
		case NETWORK_STATIC:
1151
			if (kr6->flags & F_STATIC)
1152
				return (xn);
1153
			break;
1154
		case NETWORK_CONNECTED:
1155
			if (kr6->flags & F_CONNECTED)
1156
				return (xn);
1157
			break;
1158
		case NETWORK_RTLABEL:
1159
			if (kr6->labelid == xn->net.rtlabel)
1160
				return (xn);
1161
			break;
1162
		case NETWORK_MRTCLONE:
1163
			/* can not happen */
1164
			break;
1165
		}
1166
	}
1167
	return (NULL);
1168
}
1169
1170
struct network *
1171
kr_net_find(struct ktable *kt, struct network *n)
1172
{
1173
	struct network		*xn;
1174
1175
	TAILQ_FOREACH(xn, &kt->krn, entry) {
1176
		if (n->net.type != xn->net.type ||
1177
		    n->net.prefixlen != xn->net.prefixlen ||
1178
		    n->net.rtableid != xn->net.rtableid)
1179
			continue;
1180
		if (memcmp(&n->net.prefix, &xn->net.prefix,
1181
		    sizeof(n->net.prefix)) == 0)
1182
			return (xn);
1183
	}
1184
	return (NULL);
1185
}
1186
1187
int
1188
kr_net_reload(u_int rtableid, struct network_head *nh)
1189
{
1190
	struct network		*n, *xn;
1191
	struct ktable		*kt;
1192
1193
	if ((kt = ktable_get(rtableid)) == NULL) {
1194
		log_warnx("kr_net_reload: non-existent rtableid %d", rtableid);
1195
		return (-1);
1196
	}
1197
1198
	TAILQ_FOREACH(n, &kt->krn, entry)
1199
		n->net.old = 1;
1200
1201
	while ((n = TAILQ_FIRST(nh)) != NULL) {
1202
		TAILQ_REMOVE(nh, n, entry);
1203
		n->net.old = 0;
1204
		n->net.rtableid = rtableid;
1205
		xn = kr_net_find(kt, n);
1206
		if (xn) {
1207
			xn->net.old = 0;
1208
			filterset_free(&xn->net.attrset);
1209
			filterset_move(&n->net.attrset, &xn->net.attrset);
1210
			kr_net_delete(n);
1211
		} else
1212
			TAILQ_INSERT_TAIL(&kt->krn, n, entry);
1213
	}
1214
1215
	for (n = TAILQ_FIRST(&kt->krn); n != NULL; n = xn) {
1216
		xn = TAILQ_NEXT(n, entry);
1217
		if (n->net.old) {
1218
			if (n->net.type == NETWORK_DEFAULT)
1219
				if (send_network(IMSG_NETWORK_REMOVE, &n->net,
1220
				    NULL))
1221
					return (-1);
1222
			TAILQ_REMOVE(&kt->krn, n, entry);
1223
			kr_net_delete(n);
1224
		}
1225
	}
1226
1227
	return (0);
1228
}
1229
1230
int
1231
kr_redistribute(int type, struct ktable *kt, struct kroute *kr)
1232
{
1233
	struct network		*match;
1234
	struct network_config	 net;
1235
	u_int32_t		 a;
1236
1237
	/* shortcut for removals */
1238
	if (type == IMSG_NETWORK_REMOVE) {
1239
		if (!(kr->flags & F_REDISTRIBUTED))
1240
			return (0);	/* no match, don't redistribute */
1241
		kr->flags &= ~F_REDISTRIBUTED;
1242
		match = NULL;
1243
		goto sendit;
1244
	}
1245
1246
	if (!(kr->flags & F_KERNEL))
1247
		return (0);
1248
1249
	/* Dynamic routes are not redistributable. */
1250
	if (kr->flags & F_DYNAMIC)
1251
		return (0);
1252
1253
	/*
1254
	 * We consider the loopback net, multicast and experimental addresses
1255
	 * as not redistributable.
1256
	 */
1257
	a = ntohl(kr->prefix.s_addr);
1258
	if (IN_MULTICAST(a) || IN_BADCLASS(a) ||
1259
	    (a >> IN_CLASSA_NSHIFT) == IN_LOOPBACKNET)
1260
		return (0);
1261
1262
	/* Consider networks with nexthop loopback as not redistributable. */
1263
	if (kr->nexthop.s_addr == htonl(INADDR_LOOPBACK))
1264
		return (0);
1265
1266
	/*
1267
	 * never allow 0.0.0.0/0 the default route can only be redistributed
1268
	 * with announce default.
1269
	 */
1270
	if (kr->prefix.s_addr == INADDR_ANY && kr->prefixlen == 0)
1271
		return (0);
1272
1273
	match = kr_net_match(kt, kr);
1274
	if (match == NULL) {
1275
		if (!(kr->flags & F_REDISTRIBUTED))
1276
			return (0);	/* no match, don't redistribute */
1277
		/* route no longer matches but is redistributed, so remove */
1278
		kr->flags &= ~F_REDISTRIBUTED;
1279
		type = IMSG_NETWORK_REMOVE;
1280
	} else
1281
		kr->flags |= F_REDISTRIBUTED;
1282
1283
sendit:
1284
	bzero(&net, sizeof(net));
1285
	net.prefix.aid = AID_INET;
1286
	net.prefix.v4.s_addr = kr->prefix.s_addr;
1287
	net.prefixlen = kr->prefixlen;
1288
	net.rtlabel = kr->labelid;
1289
	net.rtableid = kt->rtableid;
1290
1291
	return (send_network(type, &net, match ? &match->net.attrset : NULL));
1292
}
1293
1294
int
1295
kr_redistribute6(int type, struct ktable *kt, struct kroute6 *kr6)
1296
{
1297
	struct network		*match;
1298
	struct network_config	 net;
1299
1300
	/* shortcut for removals */
1301
	if (type == IMSG_NETWORK_REMOVE) {
1302
		if (!(kr6->flags & F_REDISTRIBUTED))
1303
			return (0);	/* no match, don't redistribute */
1304
		kr6->flags &= ~F_REDISTRIBUTED;
1305
		match = NULL;
1306
		goto sendit;
1307
	}
1308
1309
	if (!(kr6->flags & F_KERNEL))
1310
		return (0);
1311
1312
	/* Dynamic routes are not redistributable. */
1313
	if (kr6->flags & F_DYNAMIC)
1314
		return (0);
1315
1316
	/*
1317
	 * We consider unspecified, loopback, multicast, link- and site-local,
1318
	 * IPv4 mapped and IPv4 compatible addresses as not redistributable.
1319
	 */
1320
	if (IN6_IS_ADDR_UNSPECIFIED(&kr6->prefix) ||
1321
	    IN6_IS_ADDR_LOOPBACK(&kr6->prefix) ||
1322
	    IN6_IS_ADDR_MULTICAST(&kr6->prefix) ||
1323
	    IN6_IS_ADDR_LINKLOCAL(&kr6->prefix) ||
1324
	    IN6_IS_ADDR_SITELOCAL(&kr6->prefix) ||
1325
	    IN6_IS_ADDR_V4MAPPED(&kr6->prefix) ||
1326
	    IN6_IS_ADDR_V4COMPAT(&kr6->prefix))
1327
		return (0);
1328
1329
	/*
1330
	 * Consider networks with nexthop loopback as not redistributable.
1331
	 */
1332
	if (IN6_IS_ADDR_LOOPBACK(&kr6->nexthop))
1333
		return (0);
1334
1335
	/*
1336
	 * never allow ::/0 the default route can only be redistributed
1337
	 * with announce default.
1338
	 */
1339
	if (kr6->prefixlen == 0 &&
1340
	    memcmp(&kr6->prefix, &in6addr_any, sizeof(struct in6_addr)) == 0)
1341
		return (0);
1342
1343
	match = kr_net_match6(kt, kr6);
1344
	if (match == NULL) {
1345
		if (!(kr6->flags & F_REDISTRIBUTED))
1346
			return (0);	/* no match, don't redistribute */
1347
		/* route no longer matches but is redistributed, so remove */
1348
		kr6->flags &= ~F_REDISTRIBUTED;
1349
		type = IMSG_NETWORK_REMOVE;
1350
	} else
1351
		kr6->flags |= F_REDISTRIBUTED;
1352
sendit:
1353
	bzero(&net, sizeof(net));
1354
	net.prefix.aid = AID_INET6;
1355
	memcpy(&net.prefix.v6, &kr6->prefix, sizeof(struct in6_addr));
1356
	net.prefixlen = kr6->prefixlen;
1357
	net.rtlabel = kr6->labelid;
1358
	net.rtableid = kt->rtableid;
1359
1360
	return (send_network(type, &net, match ? &match->net.attrset : NULL));
1361
}
1362
1363
int
1364
kr_reload(void)
1365
{
1366
	struct ktable		*kt;
1367
	struct kroute_node	*kr;
1368
	struct kroute6_node	*kr6;
1369
	struct knexthop_node	*nh;
1370
	struct network		*n;
1371
	u_int			 rid;
1372
	int			 hasdyn = 0;
1373
1374
	for (rid = 0; rid < krt_size; rid++) {
1375
		if ((kt = ktable_get(rid)) == NULL)
1376
			continue;
1377
1378
		RB_FOREACH(nh, knexthop_tree, KT2KNT(kt))
1379
			knexthop_validate(kt, nh);
1380
1381
		TAILQ_FOREACH(n, &kt->krn, entry)
1382
			if (n->net.type == NETWORK_DEFAULT) {
1383
				if (send_network(IMSG_NETWORK_ADD, &n->net,
1384
				    &n->net.attrset))
1385
					return (-1);
1386
			} else
1387
				hasdyn = 1;
1388
1389
		if (hasdyn) {
1390
			/* only evaluate the full tree if we need */
1391
			RB_FOREACH(kr, kroute_tree, &kt->krt)
1392
				kr_redistribute(IMSG_NETWORK_ADD, kt, &kr->r);
1393
			RB_FOREACH(kr6, kroute6_tree, &kt->krt6)
1394
				kr_redistribute6(IMSG_NETWORK_ADD, kt, &kr6->r);
1395
		}
1396
	}
1397
1398
	return (0);
1399
}
1400
1401
struct kroute_full *
1402
kr_tofull(struct kroute *kr)
1403
{
1404
	static struct kroute_full	kf;
1405
1406
	bzero(&kf, sizeof(kf));
1407
1408
	kf.prefix.aid = AID_INET;
1409
	kf.prefix.v4.s_addr = kr->prefix.s_addr;
1410
	kf.nexthop.aid = AID_INET;
1411
	kf.nexthop.v4.s_addr = kr->nexthop.s_addr;
1412
	strlcpy(kf.label, rtlabel_id2name(kr->labelid), sizeof(kf.label));
1413
	kf.labelid = kr->labelid;
1414
	kf.flags = kr->flags;
1415
	kf.ifindex = kr->ifindex;
1416
	kf.prefixlen = kr->prefixlen;
1417
	kf.priority = kr->priority;
1418
1419
	return (&kf);
1420
}
1421
1422
struct kroute_full *
1423
kr6_tofull(struct kroute6 *kr6)
1424
{
1425
	static struct kroute_full	kf;
1426
1427
	bzero(&kf, sizeof(kf));
1428
1429
	kf.prefix.aid = AID_INET6;
1430
	memcpy(&kf.prefix.v6, &kr6->prefix, sizeof(struct in6_addr));
1431
	kf.nexthop.aid = AID_INET6;
1432
	memcpy(&kf.nexthop.v6, &kr6->nexthop, sizeof(struct in6_addr));
1433
	strlcpy(kf.label, rtlabel_id2name(kr6->labelid), sizeof(kf.label));
1434
	kf.labelid = kr6->labelid;
1435
	kf.flags = kr6->flags;
1436
	kf.ifindex = kr6->ifindex;
1437
	kf.prefixlen = kr6->prefixlen;
1438
	kf.priority = kr6->priority;
1439
1440
	return (&kf);
1441
}
1442
1443
/*
1444
 * RB-tree compare functions
1445
 */
1446
1447
int
1448
kroute_compare(struct kroute_node *a, struct kroute_node *b)
1449
{
1450
	if (ntohl(a->r.prefix.s_addr) < ntohl(b->r.prefix.s_addr))
1451
		return (-1);
1452
	if (ntohl(a->r.prefix.s_addr) > ntohl(b->r.prefix.s_addr))
1453
		return (1);
1454
	if (a->r.prefixlen < b->r.prefixlen)
1455
		return (-1);
1456
	if (a->r.prefixlen > b->r.prefixlen)
1457
		return (1);
1458
1459
	/* if the priority is RTP_ANY finish on the first address hit */
1460
	if (a->r.priority == RTP_ANY || b->r.priority == RTP_ANY)
1461
		return (0);
1462
	if (a->r.priority < b->r.priority)
1463
		return (-1);
1464
	if (a->r.priority > b->r.priority)
1465
		return (1);
1466
	return (0);
1467
}
1468
1469
int
1470
kroute6_compare(struct kroute6_node *a, struct kroute6_node *b)
1471
{
1472
	int i;
1473
1474
	for (i = 0; i < 16; i++) {
1475
		if (a->r.prefix.s6_addr[i] < b->r.prefix.s6_addr[i])
1476
			return (-1);
1477
		if (a->r.prefix.s6_addr[i] > b->r.prefix.s6_addr[i])
1478
			return (1);
1479
	}
1480
1481
	if (a->r.prefixlen < b->r.prefixlen)
1482
		return (-1);
1483
	if (a->r.prefixlen > b->r.prefixlen)
1484
		return (1);
1485
1486
	/* if the priority is RTP_ANY finish on the first address hit */
1487
	if (a->r.priority == RTP_ANY || b->r.priority == RTP_ANY)
1488
		return (0);
1489
	if (a->r.priority < b->r.priority)
1490
		return (-1);
1491
	if (a->r.priority > b->r.priority)
1492
		return (1);
1493
	return (0);
1494
}
1495
1496
int
1497
knexthop_compare(struct knexthop_node *a, struct knexthop_node *b)
1498
{
1499
	int	i;
1500
1501
	if (a->nexthop.aid != b->nexthop.aid)
1502
		return (b->nexthop.aid - a->nexthop.aid);
1503
1504
	switch (a->nexthop.aid) {
1505
	case AID_INET:
1506
		if (ntohl(a->nexthop.v4.s_addr) < ntohl(b->nexthop.v4.s_addr))
1507
			return (-1);
1508
		if (ntohl(a->nexthop.v4.s_addr) > ntohl(b->nexthop.v4.s_addr))
1509
			return (1);
1510
		break;
1511
	case AID_INET6:
1512
		for (i = 0; i < 16; i++) {
1513
			if (a->nexthop.v6.s6_addr[i] < b->nexthop.v6.s6_addr[i])
1514
				return (-1);
1515
			if (a->nexthop.v6.s6_addr[i] > b->nexthop.v6.s6_addr[i])
1516
				return (1);
1517
		}
1518
		break;
1519
	default:
1520
		fatalx("knexthop_compare: unknown AF");
1521
	}
1522
1523
	return (0);
1524
}
1525
1526
int
1527
kif_compare(struct kif_node *a, struct kif_node *b)
1528
{
1529
	return (b->k.ifindex - a->k.ifindex);
1530
}
1531
1532
1533
/*
1534
 * tree management functions
1535
 */
1536
1537
struct kroute_node *
1538
kroute_find(struct ktable *kt, in_addr_t prefix, u_int8_t prefixlen,
1539
    u_int8_t prio)
1540
{
1541
	struct kroute_node	s;
1542
	struct kroute_node	*kn, *tmp;
1543
1544
	s.r.prefix.s_addr = prefix;
1545
	s.r.prefixlen = prefixlen;
1546
	s.r.priority = prio;
1547
1548
	kn = RB_FIND(kroute_tree, &kt->krt, &s);
1549
	if (kn && prio == RTP_ANY) {
1550
		tmp = RB_PREV(kroute_tree, &kt->krt, kn);
1551
		while (tmp) {
1552
			if (kroute_compare(&s, tmp) == 0)
1553
				kn = tmp;
1554
			else
1555
				break;
1556
			tmp = RB_PREV(kroute_tree, &kt->krt, kn);
1557
		}
1558
	}
1559
	return (kn);
1560
}
1561
1562
struct kroute_node *
1563
kroute_matchgw(struct kroute_node *kr, struct sockaddr_in *sa_in)
1564
{
1565
	in_addr_t	nexthop;
1566
1567
	if (sa_in == NULL) {
1568
		log_warnx("kroute_matchgw: no nexthop defined");
1569
		return (NULL);
1570
	}
1571
	nexthop = sa_in->sin_addr.s_addr;
1572
1573
	while (kr) {
1574
		if (kr->r.nexthop.s_addr == nexthop)
1575
			return (kr);
1576
		kr = kr->next;
1577
	}
1578
1579
	return (NULL);
1580
}
1581
1582
int
1583
kroute_insert(struct ktable *kt, struct kroute_node *kr)
1584
{
1585
	struct kroute_node	*krm;
1586
	struct knexthop_node	*h;
1587
	in_addr_t		 mask, ina;
1588
1589
	if ((krm = RB_INSERT(kroute_tree, &kt->krt, kr)) != NULL) {
1590
		/* multipath route, add at end of list */
1591
		while (krm->next != NULL)
1592
			krm = krm->next;
1593
		krm->next = kr;
1594
		kr->next = NULL; /* to be sure */
1595
	}
1596
1597
	/* XXX this is wrong for nexthop validated via BGP */
1598
	if (kr->r.flags & F_KERNEL) {
1599
		mask = prefixlen2mask(kr->r.prefixlen);
1600
		ina = ntohl(kr->r.prefix.s_addr);
1601
		RB_FOREACH(h, knexthop_tree, KT2KNT(kt))
1602
			if (h->nexthop.aid == AID_INET &&
1603
			    (ntohl(h->nexthop.v4.s_addr) & mask) == ina)
1604
				knexthop_validate(kt, h);
1605
1606
		if (kr->r.flags & F_CONNECTED)
1607
			if (kif_kr_insert(kr) == -1)
1608
				return (-1);
1609
1610
		if (krm == NULL)
1611
			/* redistribute multipath routes only once */
1612
			kr_redistribute(IMSG_NETWORK_ADD, kt, &kr->r);
1613
	}
1614
	return (0);
1615
}
1616
1617
1618
int
1619
kroute_remove(struct ktable *kt, struct kroute_node *kr)
1620
{
1621
	struct kroute_node	*krm;
1622
	struct knexthop_node	*s;
1623
1624
	if ((krm = RB_FIND(kroute_tree, &kt->krt, kr)) == NULL) {
1625
		log_warnx("kroute_remove failed to find %s/%u",
1626
		    inet_ntoa(kr->r.prefix), kr->r.prefixlen);
1627
		return (-1);
1628
	}
1629
1630
	if (krm == kr) {
1631
		/* head element */
1632
		if (RB_REMOVE(kroute_tree, &kt->krt, kr) == NULL) {
1633
			log_warnx("kroute_remove failed for %s/%u",
1634
			    inet_ntoa(kr->r.prefix), kr->r.prefixlen);
1635
			return (-1);
1636
		}
1637
		if (kr->next != NULL) {
1638
			if (RB_INSERT(kroute_tree, &kt->krt, kr->next) !=
1639
			    NULL) {
1640
				log_warnx("kroute_remove failed to add %s/%u",
1641
				    inet_ntoa(kr->r.prefix), kr->r.prefixlen);
1642
				return (-1);
1643
			}
1644
		}
1645
	} else {
1646
		/* somewhere in the list */
1647
		while (krm->next != kr && krm->next != NULL)
1648
			krm = krm->next;
1649
		if (krm->next == NULL) {
1650
			log_warnx("kroute_remove multipath list corrupted "
1651
			    "for %s/%u", inet_ntoa(kr->r.prefix),
1652
			    kr->r.prefixlen);
1653
			return (-1);
1654
		}
1655
		krm->next = kr->next;
1656
	}
1657
1658
	/* check whether a nexthop depends on this kroute */
1659
	if (kr->r.flags & F_NEXTHOP)
1660
		RB_FOREACH(s, knexthop_tree, KT2KNT(kt))
1661
			if (s->kroute == kr)
1662
				knexthop_validate(kt, s);
1663
1664
	if (kr->r.flags & F_KERNEL && kr == krm && kr->next == NULL)
1665
		/* again remove only once */
1666
		kr_redistribute(IMSG_NETWORK_REMOVE, kt, &kr->r);
1667
1668
	if (kr->r.flags & F_CONNECTED)
1669
		if (kif_kr_remove(kr) == -1) {
1670
			free(kr);
1671
			return (-1);
1672
		}
1673
1674
	free(kr);
1675
	return (0);
1676
}
1677
1678
void
1679
kroute_clear(struct ktable *kt)
1680
{
1681
	struct kroute_node	*kr;
1682
1683
	while ((kr = RB_MIN(kroute_tree, &kt->krt)) != NULL)
1684
		kroute_remove(kt, kr);
1685
}
1686
1687
struct kroute6_node *
1688
kroute6_find(struct ktable *kt, const struct in6_addr *prefix,
1689
    u_int8_t prefixlen, u_int8_t prio)
1690
{
1691
	struct kroute6_node	s;
1692
	struct kroute6_node	*kn6, *tmp;
1693
1694
	memcpy(&s.r.prefix, prefix, sizeof(struct in6_addr));
1695
	s.r.prefixlen = prefixlen;
1696
	s.r.priority = prio;
1697
1698
	kn6 = RB_FIND(kroute6_tree, &kt->krt6, &s);
1699
	if (kn6 && prio == RTP_ANY) {
1700
		tmp = RB_PREV(kroute6_tree, &kt->krt6, kn6);
1701
		while (tmp) {
1702
			if (kroute6_compare(&s, tmp) == 0)
1703
				kn6 = tmp;
1704
			else
1705
				break;
1706
			tmp = RB_PREV(kroute6_tree, &kt->krt6, kn6);
1707
		}
1708
	}
1709
	return (kn6);
1710
}
1711
1712
struct kroute6_node *
1713
kroute6_matchgw(struct kroute6_node *kr, struct sockaddr_in6 *sa_in6)
1714
{
1715
	struct in6_addr	nexthop;
1716
1717
	if (sa_in6 == NULL) {
1718
		log_warnx("kroute6_matchgw: no nexthop defined");
1719
		return (NULL);
1720
	}
1721
	memcpy(&nexthop, &sa_in6->sin6_addr, sizeof(nexthop));
1722
1723
	while (kr) {
1724
		if (memcmp(&kr->r.nexthop, &nexthop, sizeof(nexthop)) == 0)
1725
			return (kr);
1726
		kr = kr->next;
1727
	}
1728
1729
	return (NULL);
1730
}
1731
1732
int
1733
kroute6_insert(struct ktable *kt, struct kroute6_node *kr)
1734
{
1735
	struct kroute6_node	*krm;
1736
	struct knexthop_node	*h;
1737
	struct in6_addr		 ina, inb;
1738
1739
	if ((krm = RB_INSERT(kroute6_tree, &kt->krt6, kr)) != NULL) {
1740
		/* multipath route, add at end of list */
1741
		while (krm->next != NULL)
1742
			krm = krm->next;
1743
		krm->next = kr;
1744
		kr->next = NULL; /* to be sure */
1745
	}
1746
1747
	/* XXX this is wrong for nexthop validated via BGP */
1748
	if (kr->r.flags & F_KERNEL) {
1749
		inet6applymask(&ina, &kr->r.prefix, kr->r.prefixlen);
1750
		RB_FOREACH(h, knexthop_tree, KT2KNT(kt))
1751
			if (h->nexthop.aid == AID_INET6) {
1752
				inet6applymask(&inb, &h->nexthop.v6,
1753
				    kr->r.prefixlen);
1754
				if (memcmp(&ina, &inb, sizeof(ina)) == 0)
1755
					knexthop_validate(kt, h);
1756
			}
1757
1758
		if (kr->r.flags & F_CONNECTED)
1759
			if (kif_kr6_insert(kr) == -1)
1760
				return (-1);
1761
1762
		if (krm == NULL)
1763
			/* redistribute multipath routes only once */
1764
			kr_redistribute6(IMSG_NETWORK_ADD, kt, &kr->r);
1765
	}
1766
1767
	return (0);
1768
}
1769
1770
int
1771
kroute6_remove(struct ktable *kt, struct kroute6_node *kr)
1772
{
1773
	struct kroute6_node	*krm;
1774
	struct knexthop_node	*s;
1775
1776
	if ((krm = RB_FIND(kroute6_tree, &kt->krt6, kr)) == NULL) {
1777
		log_warnx("kroute6_remove failed for %s/%u",
1778
		    log_in6addr(&kr->r.prefix), kr->r.prefixlen);
1779
		return (-1);
1780
	}
1781
1782
	if (krm == kr) {
1783
		/* head element */
1784
		if (RB_REMOVE(kroute6_tree, &kt->krt6, kr) == NULL) {
1785
			log_warnx("kroute6_remove failed for %s/%u",
1786
			    log_in6addr(&kr->r.prefix), kr->r.prefixlen);
1787
			return (-1);
1788
		}
1789
		if (kr->next != NULL) {
1790
			if (RB_INSERT(kroute6_tree, &kt->krt6, kr->next) !=
1791
			    NULL) {
1792
				log_warnx("kroute6_remove failed to add %s/%u",
1793
				    log_in6addr(&kr->r.prefix),
1794
				    kr->r.prefixlen);
1795
				return (-1);
1796
			}
1797
		}
1798
	} else {
1799
		/* somewhere in the list */
1800
		while (krm->next != kr && krm->next != NULL)
1801
			krm = krm->next;
1802
		if (krm->next == NULL) {
1803
			log_warnx("kroute6_remove multipath list corrupted "
1804
			    "for %s/%u", log_in6addr(&kr->r.prefix),
1805
			    kr->r.prefixlen);
1806
			return (-1);
1807
		}
1808
		krm->next = kr->next;
1809
	}
1810
1811
	/* check whether a nexthop depends on this kroute */
1812
	if (kr->r.flags & F_NEXTHOP)
1813
		RB_FOREACH(s, knexthop_tree, KT2KNT(kt))
1814
			if (s->kroute == kr)
1815
				knexthop_validate(kt, s);
1816
1817
	if (kr->r.flags & F_KERNEL && kr == krm && kr->next == NULL)
1818
		/* again remove only once */
1819
		kr_redistribute6(IMSG_NETWORK_REMOVE, kt, &kr->r);
1820
1821
	if (kr->r.flags & F_CONNECTED)
1822
		if (kif_kr6_remove(kr) == -1) {
1823
			free(kr);
1824
			return (-1);
1825
		}
1826
1827
	free(kr);
1828
	return (0);
1829
}
1830
1831
void
1832
kroute6_clear(struct ktable *kt)
1833
{
1834
	struct kroute6_node	*kr;
1835
1836
	while ((kr = RB_MIN(kroute6_tree, &kt->krt6)) != NULL)
1837
		kroute6_remove(kt, kr);
1838
}
1839
1840
struct knexthop_node *
1841
knexthop_find(struct ktable *kt, struct bgpd_addr *addr)
1842
{
1843
	struct knexthop_node	s;
1844
1845
	bzero(&s, sizeof(s));
1846
	memcpy(&s.nexthop, addr, sizeof(s.nexthop));
1847
1848
	return (RB_FIND(knexthop_tree, KT2KNT(kt), &s));
1849
}
1850
1851
int
1852
knexthop_insert(struct ktable *kt, struct knexthop_node *kn)
1853
{
1854
	if (RB_INSERT(knexthop_tree, KT2KNT(kt), kn) != NULL) {
1855
		log_warnx("knexthop_insert failed for %s",
1856
		    log_addr(&kn->nexthop));
1857
		free(kn);
1858
		return (-1);
1859
	}
1860
1861
	knexthop_validate(kt, kn);
1862
1863
	return (0);
1864
}
1865
1866
int
1867
knexthop_remove(struct ktable *kt, struct knexthop_node *kn)
1868
{
1869
	kroute_detach_nexthop(kt, kn);
1870
1871
	if (RB_REMOVE(knexthop_tree, KT2KNT(kt), kn) == NULL) {
1872
		log_warnx("knexthop_remove failed for %s",
1873
		    log_addr(&kn->nexthop));
1874
		return (-1);
1875
	}
1876
1877
	free(kn);
1878
	return (0);
1879
}
1880
1881
void
1882
knexthop_clear(struct ktable *kt)
1883
{
1884
	struct knexthop_node	*kn;
1885
1886
	while ((kn = RB_MIN(knexthop_tree, KT2KNT(kt))) != NULL)
1887
		knexthop_remove(kt, kn);
1888
}
1889
1890
struct kif_node *
1891
kif_find(int ifindex)
1892
{
1893
	struct kif_node	s;
1894
1895
	bzero(&s, sizeof(s));
1896
	s.k.ifindex = ifindex;
1897
1898
	return (RB_FIND(kif_tree, &kit, &s));
1899
}
1900
1901
int
1902
kif_insert(struct kif_node *kif)
1903
{
1904
	LIST_INIT(&kif->kroute_l);
1905
	LIST_INIT(&kif->kroute6_l);
1906
1907
	if (RB_INSERT(kif_tree, &kit, kif) != NULL) {
1908
		log_warnx("RB_INSERT(kif_tree, &kit, kif)");
1909
		free(kif);
1910
		return (-1);
1911
	}
1912
1913
	return (0);
1914
}
1915
1916
int
1917
kif_remove(struct kif_node *kif)
1918
{
1919
	struct ktable	*kt;
1920
	struct kif_kr	*kkr;
1921
	struct kif_kr6	*kkr6;
1922
1923
	if (RB_REMOVE(kif_tree, &kit, kif) == NULL) {
1924
		log_warnx("RB_REMOVE(kif_tree, &kit, kif)");
1925
		return (-1);
1926
	}
1927
1928
	if ((kt = ktable_get(/* XXX */ 0)) == NULL)
1929
		goto done;
1930
1931
	while ((kkr = LIST_FIRST(&kif->kroute_l)) != NULL) {
1932
		LIST_REMOVE(kkr, entry);
1933
		kkr->kr->r.flags &= ~F_NEXTHOP;
1934
		kroute_remove(kt, kkr->kr);
1935
		free(kkr);
1936
	}
1937
1938
	while ((kkr6 = LIST_FIRST(&kif->kroute6_l)) != NULL) {
1939
		LIST_REMOVE(kkr6, entry);
1940
		kkr6->kr->r.flags &= ~F_NEXTHOP;
1941
		kroute6_remove(kt, kkr6->kr);
1942
		free(kkr6);
1943
	}
1944
done:
1945
	free(kif);
1946
	return (0);
1947
}
1948
1949
void
1950
kif_clear(void)
1951
{
1952
	struct kif_node	*kif;
1953
1954
	while ((kif = RB_MIN(kif_tree, &kit)) != NULL)
1955
		kif_remove(kif);
1956
}
1957
1958
int
1959
kif_kr_insert(struct kroute_node *kr)
1960
{
1961
	struct kif_node	*kif;
1962
	struct kif_kr	*kkr;
1963
1964
	if ((kif = kif_find(kr->r.ifindex)) == NULL) {
1965
		if (kr->r.ifindex)
1966
			log_warnx("%s: interface with index %u not found",
1967
			    __func__, kr->r.ifindex);
1968
		return (0);
1969
	}
1970
1971
	if (kif->k.nh_reachable)
1972
		kr->r.flags &= ~F_DOWN;
1973
	else
1974
		kr->r.flags |= F_DOWN;
1975
1976
	if ((kkr = calloc(1, sizeof(struct kif_kr))) == NULL) {
1977
		log_warn("kif_kr_insert");
1978
		return (-1);
1979
	}
1980
1981
	kkr->kr = kr;
1982
1983
	LIST_INSERT_HEAD(&kif->kroute_l, kkr, entry);
1984
1985
	return (0);
1986
}
1987
1988
int
1989
kif_kr_remove(struct kroute_node *kr)
1990
{
1991
	struct kif_node	*kif;
1992
	struct kif_kr	*kkr;
1993
1994
	if ((kif = kif_find(kr->r.ifindex)) == NULL) {
1995
		if (kr->r.ifindex)
1996
			log_warnx("%s: interface with index %u not found",
1997
			    __func__, kr->r.ifindex);
1998
		return (0);
1999
	}
2000
2001
	for (kkr = LIST_FIRST(&kif->kroute_l); kkr != NULL && kkr->kr != kr;
2002
	    kkr = LIST_NEXT(kkr, entry))
2003
		;	/* nothing */
2004
2005
	if (kkr == NULL) {
2006
		log_warnx("can't remove connected route from interface "
2007
		    "with index %u: not found", kr->r.ifindex);
2008
		return (-1);
2009
	}
2010
2011
	LIST_REMOVE(kkr, entry);
2012
	free(kkr);
2013
2014
	return (0);
2015
}
2016
2017
int
2018
kif_kr6_insert(struct kroute6_node *kr)
2019
{
2020
	struct kif_node	*kif;
2021
	struct kif_kr6	*kkr6;
2022
2023
	if ((kif = kif_find(kr->r.ifindex)) == NULL) {
2024
		if (kr->r.ifindex)
2025
			log_warnx("%s: interface with index %u not found",
2026
			    __func__, kr->r.ifindex);
2027
		return (0);
2028
	}
2029
2030
	if (kif->k.nh_reachable)
2031
		kr->r.flags &= ~F_DOWN;
2032
	else
2033
		kr->r.flags |= F_DOWN;
2034
2035
	if ((kkr6 = calloc(1, sizeof(struct kif_kr6))) == NULL) {
2036
		log_warn("kif_kr6_insert");
2037
		return (-1);
2038
	}
2039
2040
	kkr6->kr = kr;
2041
2042
	LIST_INSERT_HEAD(&kif->kroute6_l, kkr6, entry);
2043
2044
	return (0);
2045
}
2046
2047
int
2048
kif_kr6_remove(struct kroute6_node *kr)
2049
{
2050
	struct kif_node	*kif;
2051
	struct kif_kr6	*kkr6;
2052
2053
	if ((kif = kif_find(kr->r.ifindex)) == NULL) {
2054
		if (kr->r.ifindex)
2055
			log_warnx("%s: interface with index %u not found",
2056
			    __func__, kr->r.ifindex);
2057
		return (0);
2058
	}
2059
2060
	for (kkr6 = LIST_FIRST(&kif->kroute6_l); kkr6 != NULL && kkr6->kr != kr;
2061
	    kkr6 = LIST_NEXT(kkr6, entry))
2062
		;	/* nothing */
2063
2064
	if (kkr6 == NULL) {
2065
		log_warnx("can't remove connected route from interface "
2066
		    "with index %u: not found", kr->r.ifindex);
2067
		return (-1);
2068
	}
2069
2070
	LIST_REMOVE(kkr6, entry);
2071
	free(kkr6);
2072
2073
	return (0);
2074
}
2075
2076
/*
2077
 * nexthop validation
2078
 */
2079
2080
int
2081
kif_validate(struct kif *kif)
2082
{
2083
	if (!(kif->flags & IFF_UP))
2084
		return (0);
2085
2086
	/*
2087
	 * we treat link_state == LINK_STATE_UNKNOWN as valid,
2088
	 * not all interfaces have a concept of "link state" and/or
2089
	 * do not report up
2090
	 */
2091
2092
	if (kif->link_state == LINK_STATE_DOWN)
2093
		return (0);
2094
2095
	return (1);
2096
}
2097
2098
int
2099
kroute_validate(struct kroute *kr)
2100
{
2101
	struct kif_node		*kif;
2102
2103
	if (kr->flags & (F_REJECT | F_BLACKHOLE))
2104
		return (0);
2105
2106
	if ((kif = kif_find(kr->ifindex)) == NULL) {
2107
		if (kr->ifindex)
2108
			log_warnx("%s: interface with index %d not found, "
2109
			    "referenced from route for %s/%u", __func__,
2110
			    kr->ifindex, inet_ntoa(kr->prefix),
2111
			    kr->prefixlen);
2112
		return (1);
2113
	}
2114
2115
	return (kif->k.nh_reachable);
2116
}
2117
2118
int
2119
kroute6_validate(struct kroute6 *kr)
2120
{
2121
	struct kif_node		*kif;
2122
2123
	if (kr->flags & (F_REJECT | F_BLACKHOLE))
2124
		return (0);
2125
2126
	if ((kif = kif_find(kr->ifindex)) == NULL) {
2127
		if (kr->ifindex)
2128
			log_warnx("%s: interface with index %d not found, "
2129
			    "referenced from route for %s/%u", __func__,
2130
			    kr->ifindex, log_in6addr(&kr->prefix),
2131
			    kr->prefixlen);
2132
		return (1);
2133
	}
2134
2135
	return (kif->k.nh_reachable);
2136
}
2137
2138
void
2139
knexthop_validate(struct ktable *kt, struct knexthop_node *kn)
2140
{
2141
	void			*oldk;
2142
	struct kroute_node	*kr;
2143
	struct kroute6_node	*kr6;
2144
2145
	oldk = kn->kroute;
2146
	kroute_detach_nexthop(kt, kn);
2147
2148
	switch (kn->nexthop.aid) {
2149
	case AID_INET:
2150
		kr = kroute_match(kt, kn->nexthop.v4.s_addr, 0);
2151
2152
		if (kr) {
2153
			kn->kroute = kr;
2154
			kr->r.flags |= F_NEXTHOP;
2155
		}
2156
2157
		/*
2158
		 * Send update if nexthop route changed under us if
2159
		 * the route remains the same then the NH state has not
2160
		 * changed. State changes are tracked by knexthop_track().
2161
		 */
2162
		if (kr != oldk)
2163
			knexthop_send_update(kn);
2164
		break;
2165
	case AID_INET6:
2166
		kr6 = kroute6_match(kt, &kn->nexthop.v6, 0);
2167
2168
		if (kr6) {
2169
			kn->kroute = kr6;
2170
			kr6->r.flags |= F_NEXTHOP;
2171
		}
2172
2173
		if (kr6 != oldk)
2174
			knexthop_send_update(kn);
2175
		break;
2176
	}
2177
}
2178
2179
void
2180
knexthop_track(struct ktable *kt, void *krp)
2181
{
2182
	struct knexthop_node	*kn;
2183
2184
	RB_FOREACH(kn, knexthop_tree, KT2KNT(kt))
2185
		if (kn->kroute == krp)
2186
			knexthop_send_update(kn);
2187
}
2188
2189
void
2190
knexthop_send_update(struct knexthop_node *kn)
2191
{
2192
	struct kroute_nexthop	 n;
2193
	struct kroute_node	*kr;
2194
	struct kroute6_node	*kr6;
2195
2196
	bzero(&n, sizeof(n));
2197
	memcpy(&n.nexthop, &kn->nexthop, sizeof(n.nexthop));
2198
2199
	if (kn->kroute == NULL) {
2200
		n.valid = 0;	/* NH is not valid */
2201
		send_nexthop_update(&n);
2202
		return;
2203
	}
2204
2205
	switch (kn->nexthop.aid) {
2206
	case AID_INET:
2207
		kr = kn->kroute;
2208
		n.valid = kroute_validate(&kr->r);
2209
		n.connected = kr->r.flags & F_CONNECTED;
2210
		if (kr->r.nexthop.s_addr != 0) {
2211
			n.gateway.aid = AID_INET;
2212
			n.gateway.v4.s_addr = kr->r.nexthop.s_addr;
2213
		}
2214
		if (n.connected) {
2215
			n.net.aid = AID_INET;
2216
			n.net.v4.s_addr = kr->r.prefix.s_addr;
2217
			n.netlen = kr->r.prefixlen;
2218
		}
2219
		break;
2220
	case AID_INET6:
2221
		kr6 = kn->kroute;
2222
		n.valid = kroute6_validate(&kr6->r);
2223
		n.connected = kr6->r.flags & F_CONNECTED;
2224
		if (memcmp(&kr6->r.nexthop, &in6addr_any,
2225
		    sizeof(struct in6_addr)) != 0) {
2226
			n.gateway.aid = AID_INET6;
2227
			memcpy(&n.gateway.v6, &kr6->r.nexthop,
2228
			    sizeof(struct in6_addr));
2229
		}
2230
		if (n.connected) {
2231
			n.net.aid = AID_INET6;
2232
			memcpy(&n.net.v6, &kr6->r.prefix,
2233
			    sizeof(struct in6_addr));
2234
			n.netlen = kr6->r.prefixlen;
2235
		}
2236
		break;
2237
	}
2238
	send_nexthop_update(&n);
2239
}
2240
2241
struct kroute_node *
2242
kroute_match(struct ktable *kt, in_addr_t key, int matchall)
2243
{
2244
	int			 i;
2245
	struct kroute_node	*kr;
2246
	in_addr_t		 ina;
2247
2248
	ina = ntohl(key);
2249
2250
	/* we will never match the default route */
2251
	for (i = 32; i > 0; i--)
2252
		if ((kr = kroute_find(kt, htonl(ina & prefixlen2mask(i)), i,
2253
		    RTP_ANY)) != NULL)
2254
			if (matchall || bgpd_filternexthop(&kr->r, NULL) == 0)
2255
			    return (kr);
2256
2257
	/* if we don't have a match yet, try to find a default route */
2258
	if ((kr = kroute_find(kt, 0, 0, RTP_ANY)) != NULL)
2259
		if (matchall || bgpd_filternexthop(&kr->r, NULL) == 0)
2260
			return (kr);
2261
2262
	return (NULL);
2263
}
2264
2265
struct kroute6_node *
2266
kroute6_match(struct ktable *kt, struct in6_addr *key, int matchall)
2267
{
2268
	int			 i;
2269
	struct kroute6_node	*kr6;
2270
	struct in6_addr		 ina;
2271
2272
	/* we will never match the default route */
2273
	for (i = 128; i > 0; i--) {
2274
		inet6applymask(&ina, key, i);
2275
		if ((kr6 = kroute6_find(kt, &ina, i, RTP_ANY)) != NULL)
2276
			if (matchall || bgpd_filternexthop(NULL, &kr6->r) == 0)
2277
				return (kr6);
2278
	}
2279
2280
	/* if we don't have a match yet, try to find a default route */
2281
	if ((kr6 = kroute6_find(kt, &in6addr_any, 0, RTP_ANY)) != NULL)
2282
		if (matchall || bgpd_filternexthop(NULL, &kr6->r) == 0)
2283
			return (kr6);
2284
2285
	return (NULL);
2286
}
2287
2288
void
2289
kroute_detach_nexthop(struct ktable *kt, struct knexthop_node *kn)
2290
{
2291
	struct knexthop_node	*s;
2292
	struct kroute_node	*k;
2293
	struct kroute6_node	*k6;
2294
2295
	if (kn->kroute == NULL)
2296
		return;
2297
2298
	/*
2299
	 * check whether there's another nexthop depending on this kroute
2300
	 * if not remove the flag
2301
	 */
2302
	RB_FOREACH(s, knexthop_tree, KT2KNT(kt))
2303
		if (s->kroute == kn->kroute && s != kn)
2304
			break;
2305
2306
	if (s == NULL) {
2307
		switch (kn->nexthop.aid) {
2308
		case AID_INET:
2309
			k = kn->kroute;
2310
			k->r.flags &= ~F_NEXTHOP;
2311
			break;
2312
		case AID_INET6:
2313
			k6 = kn->kroute;
2314
			k6->r.flags &= ~F_NEXTHOP;
2315
			break;
2316
		}
2317
	}
2318
2319
	kn->kroute = NULL;
2320
}
2321
2322
/*
2323
 * misc helpers
2324
 */
2325
2326
int
2327
protect_lo(struct ktable *kt)
2328
{
2329
	struct kroute_node	*kr;
2330
	struct kroute6_node	*kr6;
2331
2332
	/* special protection for 127/8 */
2333
	if ((kr = calloc(1, sizeof(struct kroute_node))) == NULL) {
2334
		log_warn("protect_lo");
2335
		return (-1);
2336
	}
2337
	kr->r.prefix.s_addr = htonl(INADDR_LOOPBACK & IN_CLASSA_NET);
2338
	kr->r.prefixlen = 8;
2339
	kr->r.flags = F_KERNEL|F_CONNECTED;
2340
2341
	if (RB_INSERT(kroute_tree, &kt->krt, kr) != NULL)
2342
		free(kr);	/* kernel route already there, no problem */
2343
2344
	/* special protection for loopback */
2345
	if ((kr6 = calloc(1, sizeof(struct kroute6_node))) == NULL) {
2346
		log_warn("protect_lo");
2347
		return (-1);
2348
	}
2349
	memcpy(&kr6->r.prefix, &in6addr_loopback, sizeof(kr6->r.prefix));
2350
	kr6->r.prefixlen = 128;
2351
	kr6->r.flags = F_KERNEL|F_CONNECTED;
2352
2353
	if (RB_INSERT(kroute6_tree, &kt->krt6, kr6) != NULL)
2354
		free(kr6);	/* kernel route already there, no problem */
2355
2356
	return (0);
2357
}
2358
2359
u_int8_t
2360
prefixlen_classful(in_addr_t ina)
2361
{
2362
	/* it hurt to write this. */
2363
2364
	if (ina >= 0xf0000000U)		/* class E */
2365
		return (32);
2366
	else if (ina >= 0xe0000000U)	/* class D */
2367
		return (4);
2368
	else if (ina >= 0xc0000000U)	/* class C */
2369
		return (24);
2370
	else if (ina >= 0x80000000U)	/* class B */
2371
		return (16);
2372
	else				/* class A */
2373
		return (8);
2374
}
2375
2376
u_int8_t
2377
mask2prefixlen(in_addr_t ina)
2378
{
2379
	if (ina == 0)
2380
		return (0);
2381
	else
2382
		return (33 - ffs(ntohl(ina)));
2383
}
2384
2385
u_int8_t
2386
mask2prefixlen6(struct sockaddr_in6 *sa_in6)
2387
{
2388
	u_int8_t	 l = 0, *ap, *ep;
2389
2390
	/*
2391
	 * sin6_len is the size of the sockaddr so substract the offset of
2392
	 * the possibly truncated sin6_addr struct.
2393
	 */
2394
	ap = (u_int8_t *)&sa_in6->sin6_addr;
2395
	ep = (u_int8_t *)sa_in6 + sa_in6->sin6_len;
2396
	for (; ap < ep; ap++) {
2397
		/* this "beauty" is adopted from sbin/route/show.c ... */
2398
		switch (*ap) {
2399
		case 0xff:
2400
			l += 8;
2401
			break;
2402
		case 0xfe:
2403
			l += 7;
2404
			return (l);
2405
		case 0xfc:
2406
			l += 6;
2407
			return (l);
2408
		case 0xf8:
2409
			l += 5;
2410
			return (l);
2411
		case 0xf0:
2412
			l += 4;
2413
			return (l);
2414
		case 0xe0:
2415
			l += 3;
2416
			return (l);
2417
		case 0xc0:
2418
			l += 2;
2419
			return (l);
2420
		case 0x80:
2421
			l += 1;
2422
			return (l);
2423
		case 0x00:
2424
			return (l);
2425
		default:
2426
			fatalx("non contiguous inet6 netmask");
2427
		}
2428
	}
2429
2430
	return (l);
2431
}
2432
2433
struct in6_addr *
2434
prefixlen2mask6(u_int8_t prefixlen)
2435
{
2436
	static struct in6_addr	mask;
2437
	int			i;
2438
2439
	bzero(&mask, sizeof(mask));
2440
	for (i = 0; i < prefixlen / 8; i++)
2441
		mask.s6_addr[i] = 0xff;
2442
	i = prefixlen % 8;
2443
	if (i)
2444
		mask.s6_addr[prefixlen / 8] = 0xff00 >> i;
2445
2446
	return (&mask);
2447
}
2448
2449
#define ROUNDUP(a) \
2450
	((a) > 0 ? (1 + (((a) - 1) | (sizeof(long) - 1))) : sizeof(long))
2451
2452
void
2453
get_rtaddrs(int addrs, struct sockaddr *sa, struct sockaddr **rti_info)
2454
{
2455
	int	i;
2456
2457
	for (i = 0; i < RTAX_MAX; i++) {
2458
		if (addrs & (1 << i)) {
2459
			rti_info[i] = sa;
2460
			sa = (struct sockaddr *)((char *)(sa) +
2461
			    ROUNDUP(sa->sa_len));
2462
		} else
2463
			rti_info[i] = NULL;
2464
	}
2465
}
2466
2467
void
2468
if_change(u_short ifindex, int flags, struct if_data *ifd)
2469
{
2470
	struct ktable		*kt;
2471
	struct kif_node		*kif;
2472
	struct kif_kr		*kkr;
2473
	struct kif_kr6		*kkr6;
2474
	u_int8_t		 reachable;
2475
2476
	if ((kif = kif_find(ifindex)) == NULL) {
2477
		log_warnx("%s: interface with index %u not found",
2478
		    __func__, ifindex);
2479
		return;
2480
	}
2481
2482
	kif->k.flags = flags;
2483
	kif->k.link_state = ifd->ifi_link_state;
2484
	kif->k.if_type = ifd->ifi_type;
2485
	kif->k.baudrate = ifd->ifi_baudrate;
2486
2487
	send_imsg_session(IMSG_IFINFO, 0, &kif->k, sizeof(kif->k));
2488
2489
	if ((reachable = kif_validate(&kif->k)) == kif->k.nh_reachable)
2490
		return;		/* nothing changed wrt nexthop validity */
2491
2492
	kif->k.nh_reachable = reachable;
2493
2494
	kt = ktable_get(/* XXX */ 0);
2495
2496
	LIST_FOREACH(kkr, &kif->kroute_l, entry) {
2497
		if (reachable)
2498
			kkr->kr->r.flags &= ~F_DOWN;
2499
		else
2500
			kkr->kr->r.flags |= F_DOWN;
2501
2502
		if (kt == NULL)
2503
			continue;
2504
2505
		knexthop_track(kt, kkr->kr);
2506
	}
2507
	LIST_FOREACH(kkr6, &kif->kroute6_l, entry) {
2508
		if (reachable)
2509
			kkr6->kr->r.flags &= ~F_DOWN;
2510
		else
2511
			kkr6->kr->r.flags |= F_DOWN;
2512
2513
		if (kt == NULL)
2514
			continue;
2515
2516
		knexthop_track(kt, kkr6->kr);
2517
	}
2518
}
2519
2520
void
2521
if_announce(void *msg)
2522
{
2523
	struct if_announcemsghdr	*ifan;
2524
	struct kif_node			*kif;
2525
2526
	ifan = msg;
2527
2528
	switch (ifan->ifan_what) {
2529
	case IFAN_ARRIVAL:
2530
		if ((kif = calloc(1, sizeof(struct kif_node))) == NULL) {
2531
			log_warn("if_announce");
2532
			return;
2533
		}
2534
2535
		kif->k.ifindex = ifan->ifan_index;
2536
		strlcpy(kif->k.ifname, ifan->ifan_name, sizeof(kif->k.ifname));
2537
		kif_insert(kif);
2538
		break;
2539
	case IFAN_DEPARTURE:
2540
		kif = kif_find(ifan->ifan_index);
2541
		kif_remove(kif);
2542
		break;
2543
	}
2544
}
2545
2546
/*
2547
 * rtsock related functions
2548
 */
2549
2550
int
2551
send_rtmsg(int fd, int action, struct ktable *kt, struct kroute *kroute,
2552
    u_int8_t fib_prio)
2553
{
2554
	struct iovec		iov[7];
2555
	struct rt_msghdr	hdr;
2556
	struct sockaddr_in	prefix;
2557
	struct sockaddr_in	nexthop;
2558
	struct sockaddr_in	mask;
2559
	struct {
2560
		struct sockaddr_dl	dl;
2561
		char			pad[sizeof(long)];
2562
	}			ifp;
2563
	struct sockaddr_mpls	mpls;
2564
	struct sockaddr_rtlabel	label;
2565
	int			iovcnt = 0;
2566
2567
	if (!kt->fib_sync)
2568
		return (0);
2569
2570
	/* initialize header */
2571
	bzero(&hdr, sizeof(hdr));
2572
	hdr.rtm_version = RTM_VERSION;
2573
	hdr.rtm_type = action;
2574
	hdr.rtm_tableid = kt->rtableid;
2575
	hdr.rtm_priority = fib_prio;
2576
	if (kroute->flags & F_BLACKHOLE)
2577
		hdr.rtm_flags |= RTF_BLACKHOLE;
2578
	if (kroute->flags & F_REJECT)
2579
		hdr.rtm_flags |= RTF_REJECT;
2580
	if (action == RTM_CHANGE)	/* reset these flags on change */
2581
		hdr.rtm_fmask = RTF_REJECT|RTF_BLACKHOLE;
2582
	hdr.rtm_seq = kr_state.rtseq++;	/* overflow doesn't matter */
2583
	hdr.rtm_msglen = sizeof(hdr);
2584
	/* adjust iovec */
2585
	iov[iovcnt].iov_base = &hdr;
2586
	iov[iovcnt++].iov_len = sizeof(hdr);
2587
2588
	bzero(&prefix, sizeof(prefix));
2589
	prefix.sin_len = sizeof(prefix);
2590
	prefix.sin_family = AF_INET;
2591
	prefix.sin_addr.s_addr = kroute->prefix.s_addr;
2592
	/* adjust header */
2593
	hdr.rtm_addrs |= RTA_DST;
2594
	hdr.rtm_msglen += sizeof(prefix);
2595
	/* adjust iovec */
2596
	iov[iovcnt].iov_base = &prefix;
2597
	iov[iovcnt++].iov_len = sizeof(prefix);
2598
2599
	if (kroute->nexthop.s_addr != 0) {
2600
		bzero(&nexthop, sizeof(nexthop));
2601
		nexthop.sin_len = sizeof(nexthop);
2602
		nexthop.sin_family = AF_INET;
2603
		nexthop.sin_addr.s_addr = kroute->nexthop.s_addr;
2604
		/* adjust header */
2605
		hdr.rtm_flags |= RTF_GATEWAY;
2606
		hdr.rtm_addrs |= RTA_GATEWAY;
2607
		hdr.rtm_msglen += sizeof(nexthop);
2608
		/* adjust iovec */
2609
		iov[iovcnt].iov_base = &nexthop;
2610
		iov[iovcnt++].iov_len = sizeof(nexthop);
2611
	}
2612
2613
	bzero(&mask, sizeof(mask));
2614
	mask.sin_len = sizeof(mask);
2615
	mask.sin_family = AF_INET;
2616
	mask.sin_addr.s_addr = htonl(prefixlen2mask(kroute->prefixlen));
2617
	/* adjust header */
2618
	hdr.rtm_addrs |= RTA_NETMASK;
2619
	hdr.rtm_msglen += sizeof(mask);
2620
	/* adjust iovec */
2621
	iov[iovcnt].iov_base = &mask;
2622
	iov[iovcnt++].iov_len = sizeof(mask);
2623
2624
	if (kt->ifindex) {
2625
		bzero(&ifp, sizeof(ifp));
2626
		ifp.dl.sdl_len = sizeof(struct sockaddr_dl);
2627
		ifp.dl.sdl_family = AF_LINK;
2628
		ifp.dl.sdl_index = kt->ifindex;
2629
		/* adjust header */
2630
		hdr.rtm_addrs |= RTA_IFP;
2631
		hdr.rtm_msglen += ROUNDUP(sizeof(struct sockaddr_dl));
2632
		/* adjust iovec */
2633
		iov[iovcnt].iov_base = &ifp;
2634
		iov[iovcnt++].iov_len = ROUNDUP(sizeof(struct sockaddr_dl));
2635
	}
2636
2637
	if (kroute->flags & F_MPLS) {
2638
		bzero(&mpls, sizeof(mpls));
2639
		mpls.smpls_len = sizeof(mpls);
2640
		mpls.smpls_family = AF_MPLS;
2641
		mpls.smpls_label = kroute->mplslabel;
2642
		/* adjust header */
2643
		hdr.rtm_flags |= RTF_MPLS;
2644
		hdr.rtm_mpls = MPLS_OP_PUSH;
2645
		hdr.rtm_addrs |= RTA_SRC;
2646
		hdr.rtm_msglen += sizeof(mpls);
2647
		/* adjust iovec */
2648
		iov[iovcnt].iov_base = &mpls;
2649
		iov[iovcnt++].iov_len = sizeof(mpls);
2650
	}
2651
2652
	if (kroute->labelid) {
2653
		bzero(&label, sizeof(label));
2654
		label.sr_len = sizeof(label);
2655
		strlcpy(label.sr_label, rtlabel_id2name(kroute->labelid),
2656
		    sizeof(label.sr_label));
2657
		/* adjust header */
2658
		hdr.rtm_addrs |= RTA_LABEL;
2659
		hdr.rtm_msglen += sizeof(label);
2660
		/* adjust iovec */
2661
		iov[iovcnt].iov_base = &label;
2662
		iov[iovcnt++].iov_len = sizeof(label);
2663
	}
2664
2665
retry:
2666
	if (writev(fd, iov, iovcnt) == -1) {
2667
		if (errno == ESRCH) {
2668
			if (hdr.rtm_type == RTM_CHANGE) {
2669
				hdr.rtm_type = RTM_ADD;
2670
				goto retry;
2671
			} else if (hdr.rtm_type == RTM_DELETE) {
2672
				log_info("route %s/%u vanished before delete",
2673
				    inet_ntoa(kroute->prefix),
2674
				    kroute->prefixlen);
2675
				return (0);
2676
			}
2677
		}
2678
		log_warn("send_rtmsg: action %u, prefix %s/%u", hdr.rtm_type,
2679
		    inet_ntoa(kroute->prefix), kroute->prefixlen);
2680
		return (0);
2681
	}
2682
2683
	return (0);
2684
}
2685
2686
int
2687
send_rt6msg(int fd, int action, struct ktable *kt, struct kroute6 *kroute,
2688
    u_int8_t fib_prio)
2689
{
2690
	struct iovec		iov[5];
2691
	struct rt_msghdr	hdr;
2692
	struct pad {
2693
		struct sockaddr_in6	addr;
2694
		char			pad[sizeof(long)];
2695
	} prefix, nexthop, mask;
2696
	struct sockaddr_rtlabel	label;
2697
	int			iovcnt = 0;
2698
2699
	if (!kt->fib_sync)
2700
		return (0);
2701
2702
	/* initialize header */
2703
	bzero(&hdr, sizeof(hdr));
2704
	hdr.rtm_version = RTM_VERSION;
2705
	hdr.rtm_type = action;
2706
	hdr.rtm_tableid = kt->rtableid;
2707
	hdr.rtm_priority = fib_prio;
2708
	if (kroute->flags & F_BLACKHOLE)
2709
		hdr.rtm_flags |= RTF_BLACKHOLE;
2710
	if (kroute->flags & F_REJECT)
2711
		hdr.rtm_flags |= RTF_REJECT;
2712
	if (action == RTM_CHANGE)	/* reset these flags on change */
2713
		hdr.rtm_fmask = RTF_REJECT|RTF_BLACKHOLE;
2714
	hdr.rtm_seq = kr_state.rtseq++;	/* overflow doesn't matter */
2715
	hdr.rtm_msglen = sizeof(hdr);
2716
	/* adjust iovec */
2717
	iov[iovcnt].iov_base = &hdr;
2718
	iov[iovcnt++].iov_len = sizeof(hdr);
2719
2720
	bzero(&prefix, sizeof(prefix));
2721
	prefix.addr.sin6_len = sizeof(struct sockaddr_in6);
2722
	prefix.addr.sin6_family = AF_INET6;
2723
	memcpy(&prefix.addr.sin6_addr, &kroute->prefix,
2724
	    sizeof(struct in6_addr));
2725
	/* XXX scope does not matter or? */
2726
	/* adjust header */
2727
	hdr.rtm_addrs |= RTA_DST;
2728
	hdr.rtm_msglen += ROUNDUP(sizeof(struct sockaddr_in6));
2729
	/* adjust iovec */
2730
	iov[iovcnt].iov_base = &prefix;
2731
	iov[iovcnt++].iov_len = ROUNDUP(sizeof(struct sockaddr_in6));
2732
2733
	if (memcmp(&kroute->nexthop, &in6addr_any, sizeof(struct in6_addr))) {
2734
		bzero(&nexthop, sizeof(nexthop));
2735
		nexthop.addr.sin6_len = sizeof(struct sockaddr_in6);
2736
		nexthop.addr.sin6_family = AF_INET6;
2737
		memcpy(&nexthop.addr.sin6_addr, &kroute->nexthop,
2738
		    sizeof(struct in6_addr));
2739
		/* adjust header */
2740
		hdr.rtm_flags |= RTF_GATEWAY;
2741
		hdr.rtm_addrs |= RTA_GATEWAY;
2742
		hdr.rtm_msglen += ROUNDUP(sizeof(struct sockaddr_in6));
2743
		/* adjust iovec */
2744
		iov[iovcnt].iov_base = &nexthop;
2745
		iov[iovcnt++].iov_len = ROUNDUP(sizeof(struct sockaddr_in6));
2746
	}
2747
2748
	bzero(&mask, sizeof(mask));
2749
	mask.addr.sin6_len = sizeof(struct sockaddr_in6);
2750
	mask.addr.sin6_family = AF_INET6;
2751
	memcpy(&mask.addr.sin6_addr, prefixlen2mask6(kroute->prefixlen),
2752
	    sizeof(struct in6_addr));
2753
	/* adjust header */
2754
	hdr.rtm_addrs |= RTA_NETMASK;
2755
	hdr.rtm_msglen += ROUNDUP(sizeof(struct sockaddr_in6));
2756
	/* adjust iovec */
2757
	iov[iovcnt].iov_base = &mask;
2758
	iov[iovcnt++].iov_len = ROUNDUP(sizeof(struct sockaddr_in6));
2759
2760
	if (kroute->labelid) {
2761
		bzero(&label, sizeof(label));
2762
		label.sr_len = sizeof(label);
2763
		strlcpy(label.sr_label, rtlabel_id2name(kroute->labelid),
2764
		    sizeof(label.sr_label));
2765
		/* adjust header */
2766
		hdr.rtm_addrs |= RTA_LABEL;
2767
		hdr.rtm_msglen += sizeof(label);
2768
		/* adjust iovec */
2769
		iov[iovcnt].iov_base = &label;
2770
		iov[iovcnt++].iov_len = sizeof(label);
2771
	}
2772
2773
retry:
2774
	if (writev(fd, iov, iovcnt) == -1) {
2775
		if (errno == ESRCH) {
2776
			if (hdr.rtm_type == RTM_CHANGE) {
2777
				hdr.rtm_type = RTM_ADD;
2778
				goto retry;
2779
			} else if (hdr.rtm_type == RTM_DELETE) {
2780
				log_info("route %s/%u vanished before delete",
2781
				    log_in6addr(&kroute->prefix),
2782
				    kroute->prefixlen);
2783
				return (0);
2784
			}
2785
		}
2786
		log_warn("send_rt6msg: action %u, prefix %s/%u", hdr.rtm_type,
2787
		    log_in6addr(&kroute->prefix), kroute->prefixlen);
2788
		return (0);
2789
	}
2790
2791
	return (0);
2792
}
2793
2794
int
2795
fetchtable(struct ktable *kt, u_int8_t fib_prio)
2796
{
2797
	size_t			 len;
2798
	int			 mib[7];
2799
	char			*buf = NULL, *next, *lim;
2800
	struct rt_msghdr	*rtm;
2801
	struct sockaddr		*sa, *gw, *rti_info[RTAX_MAX];
2802
	struct sockaddr_in	*sa_in;
2803
	struct sockaddr_in6	*sa_in6;
2804
	struct sockaddr_rtlabel	*label;
2805
	struct kroute_node	*kr = NULL;
2806
	struct kroute6_node	*kr6 = NULL;
2807
2808
	mib[0] = CTL_NET;
2809
	mib[1] = PF_ROUTE;
2810
	mib[2] = 0;
2811
	mib[3] = 0;
2812
	mib[4] = NET_RT_DUMP;
2813
	mib[5] = 0;
2814
	mib[6] = kt->rtableid;
2815
2816
	if (sysctl(mib, 7, NULL, &len, NULL, 0) == -1) {
2817
		if (kt->rtableid != 0 && errno == EINVAL)
2818
			/* table nonexistent */
2819
			return (0);
2820
		log_warn("sysctl");
2821
		return (-1);
2822
	}
2823
	if (len > 0) {
2824
		if ((buf = malloc(len)) == NULL) {
2825
			log_warn("fetchtable");
2826
			return (-1);
2827
		}
2828
		if (sysctl(mib, 7, buf, &len, NULL, 0) == -1) {
2829
			log_warn("sysctl2");
2830
			free(buf);
2831
			return (-1);
2832
		}
2833
	}
2834
2835
	lim = buf + len;
2836
	for (next = buf; next < lim; next += rtm->rtm_msglen) {
2837
		rtm = (struct rt_msghdr *)next;
2838
		if (rtm->rtm_version != RTM_VERSION)
2839
			continue;
2840
		sa = (struct sockaddr *)(next + rtm->rtm_hdrlen);
2841
		get_rtaddrs(rtm->rtm_addrs, sa, rti_info);
2842
2843
		if ((sa = rti_info[RTAX_DST]) == NULL)
2844
			continue;
2845
2846
		/* Skip ARP/ND cache and broadcast routes. */
2847
		if (rtm->rtm_flags & (RTF_LLINFO|RTF_BROADCAST))
2848
			continue;
2849
2850
		switch (sa->sa_family) {
2851
		case AF_INET:
2852
			if ((kr = calloc(1, sizeof(struct kroute_node))) ==
2853
			    NULL) {
2854
				log_warn("fetchtable");
2855
				free(buf);
2856
				return (-1);
2857
			}
2858
2859
			kr->r.flags = F_KERNEL;
2860
			kr->r.priority = rtm->rtm_priority;
2861
			kr->r.ifindex = rtm->rtm_index;
2862
			kr->r.prefix.s_addr =
2863
			    ((struct sockaddr_in *)sa)->sin_addr.s_addr;
2864
			sa_in = (struct sockaddr_in *)rti_info[RTAX_NETMASK];
2865
			if (rtm->rtm_flags & RTF_STATIC)
2866
				kr->r.flags |= F_STATIC;
2867
			if (rtm->rtm_flags & RTF_BLACKHOLE)
2868
				kr->r.flags |= F_BLACKHOLE;
2869
			if (rtm->rtm_flags & RTF_REJECT)
2870
				kr->r.flags |= F_REJECT;
2871
			if (rtm->rtm_flags & RTF_DYNAMIC)
2872
				kr->r.flags |= F_DYNAMIC;
2873
			if (sa_in != NULL) {
2874
				if (sa_in->sin_len == 0)
2875
					break;
2876
				kr->r.prefixlen =
2877
				    mask2prefixlen(sa_in->sin_addr.s_addr);
2878
			} else if (rtm->rtm_flags & RTF_HOST)
2879
				kr->r.prefixlen = 32;
2880
			else
2881
				kr->r.prefixlen =
2882
				    prefixlen_classful(kr->r.prefix.s_addr);
2883
			rtlabel_unref(kr->r.labelid);
2884
			kr->r.labelid = 0;
2885
			if ((label = (struct sockaddr_rtlabel *)
2886
			    rti_info[RTAX_LABEL]) != NULL) {
2887
				kr->r.flags |= F_RTLABEL;
2888
				kr->r.labelid =
2889
				    rtlabel_name2id(label->sr_label);
2890
			}
2891
			break;
2892
		case AF_INET6:
2893
			if ((kr6 = calloc(1, sizeof(struct kroute6_node))) ==
2894
			    NULL) {
2895
				log_warn("fetchtable");
2896
				free(buf);
2897
				return (-1);
2898
			}
2899
2900
			kr6->r.flags = F_KERNEL;
2901
			kr6->r.priority = rtm->rtm_priority;
2902
			kr6->r.ifindex = rtm->rtm_index;
2903
			memcpy(&kr6->r.prefix,
2904
			    &((struct sockaddr_in6 *)sa)->sin6_addr,
2905
			    sizeof(kr6->r.prefix));
2906
2907
			sa_in6 = (struct sockaddr_in6 *)rti_info[RTAX_NETMASK];
2908
			if (rtm->rtm_flags & RTF_STATIC)
2909
				kr6->r.flags |= F_STATIC;
2910
			if (rtm->rtm_flags & RTF_BLACKHOLE)
2911
				kr6->r.flags |= F_BLACKHOLE;
2912
			if (rtm->rtm_flags & RTF_REJECT)
2913
				kr6->r.flags |= F_REJECT;
2914
			if (rtm->rtm_flags & RTF_DYNAMIC)
2915
				kr6->r.flags |= F_DYNAMIC;
2916
			if (sa_in6 != NULL) {
2917
				if (sa_in6->sin6_len == 0)
2918
					break;
2919
				kr6->r.prefixlen = mask2prefixlen6(sa_in6);
2920
			} else if (rtm->rtm_flags & RTF_HOST)
2921
				kr6->r.prefixlen = 128;
2922
			else
2923
				fatalx("INET6 route without netmask");
2924
			rtlabel_unref(kr6->r.labelid);
2925
			kr6->r.labelid = 0;
2926
			if ((label = (struct sockaddr_rtlabel *)
2927
			    rti_info[RTAX_LABEL]) != NULL) {
2928
				kr6->r.flags |= F_RTLABEL;
2929
				kr6->r.labelid =
2930
				    rtlabel_name2id(label->sr_label);
2931
			}
2932
			break;
2933
		default:
2934
			continue;
2935
		}
2936
2937
		if ((gw = rti_info[RTAX_GATEWAY]) != NULL)
2938
			switch (gw->sa_family) {
2939
			case AF_INET:
2940
				if (kr == NULL)
2941
					fatalx("v4 gateway for !v4 dst?!");
2942
2943
				if (rtm->rtm_flags & RTF_CONNECTED) {
2944
					kr->r.flags |= F_CONNECTED;
2945
					break;
2946
				}
2947
2948
				kr->r.nexthop.s_addr =
2949
				    ((struct sockaddr_in *)gw)->sin_addr.s_addr;
2950
				break;
2951
			case AF_INET6:
2952
				if (kr6 == NULL)
2953
					fatalx("v6 gateway for !v6 dst?!");
2954
2955
				if (rtm->rtm_flags & RTF_CONNECTED) {
2956
					kr6->r.flags |= F_CONNECTED;
2957
					break;
2958
				}
2959
2960
				memcpy(&kr6->r.nexthop,
2961
				    &((struct sockaddr_in6 *)gw)->sin6_addr,
2962
				    sizeof(kr6->r.nexthop));
2963
				break;
2964
			case AF_LINK:
2965
				/*
2966
				 * Traditional BSD connected routes have
2967
				 * a gateway of type AF_LINK.
2968
				 */
2969
				if (sa->sa_family == AF_INET)
2970
					kr->r.flags |= F_CONNECTED;
2971
				else if (sa->sa_family == AF_INET6)
2972
					kr6->r.flags |= F_CONNECTED;
2973
				break;
2974
			}
2975
2976
		if (sa->sa_family == AF_INET) {
2977
			if (rtm->rtm_priority == fib_prio)  {
2978
				send_rtmsg(kr_state.fd, RTM_DELETE, kt, &kr->r,
2979
				    fib_prio);
2980
				free(kr);
2981
			} else
2982
				kroute_insert(kt, kr);
2983
		} else if (sa->sa_family == AF_INET6) {
2984
			if (rtm->rtm_priority == fib_prio)  {
2985
				send_rt6msg(kr_state.fd, RTM_DELETE, kt,
2986
				    &kr6->r, fib_prio);
2987
				free(kr6);
2988
			} else
2989
				kroute6_insert(kt, kr6);
2990
		}
2991
	}
2992
	free(buf);
2993
	return (0);
2994
}
2995
2996
int
2997
fetchifs(int ifindex)
2998
{
2999
	size_t			 len;
3000
	int			 mib[6];
3001
	char			*buf, *next, *lim;
3002
	struct if_msghdr	 ifm;
3003
	struct kif_node		*kif;
3004
	struct sockaddr		*sa, *rti_info[RTAX_MAX];
3005
	struct sockaddr_dl	*sdl;
3006
3007
	mib[0] = CTL_NET;
3008
	mib[1] = PF_ROUTE;
3009
	mib[2] = 0;
3010
	mib[3] = AF_INET;	/* AF does not matter but AF_INET is shorter */
3011
	mib[4] = NET_RT_IFLIST;
3012
	mib[5] = ifindex;
3013
3014
	if (sysctl(mib, 6, NULL, &len, NULL, 0) == -1) {
3015
		log_warn("sysctl");
3016
		return (-1);
3017
	}
3018
	if ((buf = malloc(len)) == NULL) {
3019
		log_warn("fetchif");
3020
		return (-1);
3021
	}
3022
	if (sysctl(mib, 6, buf, &len, NULL, 0) == -1) {
3023
		log_warn("sysctl");
3024
		free(buf);
3025
		return (-1);
3026
	}
3027
3028
	lim = buf + len;
3029
	for (next = buf; next < lim; next += ifm.ifm_msglen) {
3030
		memcpy(&ifm, next, sizeof(ifm));
3031
		if (ifm.ifm_version != RTM_VERSION)
3032
			continue;
3033
		if (ifm.ifm_type != RTM_IFINFO)
3034
			continue;
3035
3036
		sa = (struct sockaddr *)(next + sizeof(ifm));
3037
		get_rtaddrs(ifm.ifm_addrs, sa, rti_info);
3038
3039
		if ((kif = calloc(1, sizeof(struct kif_node))) == NULL) {
3040
			log_warn("fetchifs");
3041
			free(buf);
3042
			return (-1);
3043
		}
3044
3045
		kif->k.ifindex = ifm.ifm_index;
3046
		kif->k.flags = ifm.ifm_flags;
3047
		kif->k.link_state = ifm.ifm_data.ifi_link_state;
3048
		kif->k.if_type = ifm.ifm_data.ifi_type;
3049
		kif->k.baudrate = ifm.ifm_data.ifi_baudrate;
3050
		kif->k.nh_reachable = kif_validate(&kif->k);
3051
3052
		if ((sa = rti_info[RTAX_IFP]) != NULL)
3053
			if (sa->sa_family == AF_LINK) {
3054
				sdl = (struct sockaddr_dl *)sa;
3055
				if (sdl->sdl_nlen >= sizeof(kif->k.ifname))
3056
					memcpy(kif->k.ifname, sdl->sdl_data,
3057
					    sizeof(kif->k.ifname) - 1);
3058
				else if (sdl->sdl_nlen > 0)
3059
					memcpy(kif->k.ifname, sdl->sdl_data,
3060
					    sdl->sdl_nlen);
3061
				/* string already terminated via calloc() */
3062
			}
3063
3064
		kif_insert(kif);
3065
	}
3066
	free(buf);
3067
	return (0);
3068
}
3069
3070
int
3071
dispatch_rtmsg(void)
3072
{
3073
	char			 buf[RT_BUF_SIZE];
3074
	ssize_t			 n;
3075
	char			*next, *lim;
3076
	struct rt_msghdr	*rtm;
3077
	struct if_msghdr	 ifm;
3078
	struct sockaddr		*sa, *rti_info[RTAX_MAX];
3079
	struct ktable		*kt;
3080
3081
	if ((n = read(kr_state.fd, &buf, sizeof(buf))) == -1) {
3082
		if (errno == EAGAIN || errno == EINTR)
3083
			return (0);
3084
		log_warn("dispatch_rtmsg: read error");
3085
		return (-1);
3086
	}
3087
3088
	if (n == 0) {
3089
		log_warnx("routing socket closed");
3090
		return (-1);
3091
	}
3092
3093
	lim = buf + n;
3094
	for (next = buf; next < lim; next += rtm->rtm_msglen) {
3095
		rtm = (struct rt_msghdr *)next;
3096
		if (lim < next + sizeof(u_short) ||
3097
		    lim < next + rtm->rtm_msglen)
3098
			fatalx("dispatch_rtmsg: partial rtm in buffer");
3099
		if (rtm->rtm_version != RTM_VERSION)
3100
			continue;
3101
3102
		switch (rtm->rtm_type) {
3103
		case RTM_ADD:
3104
		case RTM_CHANGE:
3105
		case RTM_DELETE:
3106
			sa = (struct sockaddr *)(next + rtm->rtm_hdrlen);
3107
			get_rtaddrs(rtm->rtm_addrs, sa, rti_info);
3108
3109
			if (rtm->rtm_pid == kr_state.pid) /* cause by us */
3110
				continue;
3111
3112
			if (rtm->rtm_errno)		 /* failed attempts */
3113
				continue;
3114
3115
			if (rtm->rtm_flags & RTF_LLINFO) /* arp cache */
3116
				continue;
3117
3118
			if ((kt = ktable_get(rtm->rtm_tableid)) == NULL)
3119
				continue;
3120
3121
			if (dispatch_rtmsg_addr(rtm, rti_info, kt) == -1)
3122
				return (-1);
3123
			break;
3124
		case RTM_IFINFO:
3125
			memcpy(&ifm, next, sizeof(ifm));
3126
			if_change(ifm.ifm_index, ifm.ifm_flags,
3127
			    &ifm.ifm_data);
3128
			break;
3129
		case RTM_IFANNOUNCE:
3130
			if_announce(next);
3131
			break;
3132
		default:
3133
			/* ignore for now */
3134
			break;
3135
		}
3136
	}
3137
	return (0);
3138
}
3139
3140
int
3141
dispatch_rtmsg_addr(struct rt_msghdr *rtm, struct sockaddr *rti_info[RTAX_MAX],
3142
    struct ktable *kt)
3143
{
3144
	struct sockaddr		*sa;
3145
	struct sockaddr_in	*sa_in;
3146
	struct sockaddr_in6	*sa_in6;
3147
	struct kroute_node	*kr;
3148
	struct kroute6_node	*kr6;
3149
	struct bgpd_addr	 prefix;
3150
	int			 flags, oflags, mpath = 0, changed = 0;
3151
	u_int16_t		 ifindex;
3152
	u_int8_t		 prefixlen;
3153
	u_int8_t		 prio;
3154
3155
	flags = F_KERNEL;
3156
	ifindex = 0;
3157
	prefixlen = 0;
3158
	bzero(&prefix, sizeof(prefix));
3159
3160
	if ((sa = rti_info[RTAX_DST]) == NULL) {
3161
		log_warnx("empty route message");
3162
		return (0);
3163
	}
3164
3165
	if (rtm->rtm_flags & RTF_STATIC)
3166
		flags |= F_STATIC;
3167
	if (rtm->rtm_flags & RTF_BLACKHOLE)
3168
		flags |= F_BLACKHOLE;
3169
	if (rtm->rtm_flags & RTF_REJECT)
3170
		flags |= F_REJECT;
3171
	if (rtm->rtm_flags & RTF_DYNAMIC)
3172
		flags |= F_DYNAMIC;
3173
#ifdef RTF_MPATH
3174
	if (rtm->rtm_flags & RTF_MPATH)
3175
		mpath = 1;
3176
#endif
3177
3178
	prio = rtm->rtm_priority;
3179
	switch (sa->sa_family) {
3180
	case AF_INET:
3181
		prefix.aid = AID_INET;
3182
		prefix.v4.s_addr = ((struct sockaddr_in *)sa)->sin_addr.s_addr;
3183
		sa_in = (struct sockaddr_in *)rti_info[RTAX_NETMASK];
3184
		if (sa_in != NULL) {
3185
			if (sa_in->sin_len != 0)
3186
				prefixlen = mask2prefixlen(
3187
				    sa_in->sin_addr.s_addr);
3188
		} else if (rtm->rtm_flags & RTF_HOST)
3189
			prefixlen = 32;
3190
		else
3191
			prefixlen =
3192
			    prefixlen_classful(prefix.v4.s_addr);
3193
		break;
3194
	case AF_INET6:
3195
		prefix.aid = AID_INET6;
3196
		memcpy(&prefix.v6, &((struct sockaddr_in6 *)sa)->sin6_addr,
3197
		    sizeof(struct in6_addr));
3198
		sa_in6 = (struct sockaddr_in6 *)rti_info[RTAX_NETMASK];
3199
		if (sa_in6 != NULL) {
3200
			if (sa_in6->sin6_len != 0)
3201
				prefixlen = mask2prefixlen6(sa_in6);
3202
		} else if (rtm->rtm_flags & RTF_HOST)
3203
			prefixlen = 128;
3204
		else
3205
			fatalx("in6 net addr without netmask");
3206
		break;
3207
	default:
3208
		return (0);
3209
	}
3210
3211
	if ((sa = rti_info[RTAX_GATEWAY]) != NULL)
3212
		switch (sa->sa_family) {
3213
		case AF_LINK:
3214
			flags |= F_CONNECTED;
3215
			ifindex = rtm->rtm_index;
3216
			sa = NULL;
3217
			mpath = 0;	/* link local stuff can't be mpath */
3218
			break;
3219
		case AF_INET:
3220
		case AF_INET6:
3221
			if (rtm->rtm_flags & RTF_CONNECTED) {
3222
				flags |= F_CONNECTED;
3223
				ifindex = rtm->rtm_index;
3224
				sa = NULL;
3225
				mpath = 0; /* link local stuff can't be mpath */
3226
			}
3227
			break;
3228
		}
3229
3230
	if (rtm->rtm_type == RTM_DELETE) {
3231
		switch (prefix.aid) {
3232
		case AID_INET:
3233
			sa_in = (struct sockaddr_in *)sa;
3234
			if ((kr = kroute_find(kt, prefix.v4.s_addr,
3235
			    prefixlen, prio)) == NULL)
3236
				return (0);
3237
			if (!(kr->r.flags & F_KERNEL))
3238
				return (0);
3239
3240
			if (mpath)
3241
				/* get the correct route */
3242
				if ((kr = kroute_matchgw(kr, sa_in)) == NULL) {
3243
					log_warnx("dispatch_rtmsg_addr[delete] "
3244
					    "mpath route not found");
3245
					return (0);
3246
				}
3247
3248
			if (kroute_remove(kt, kr) == -1)
3249
				return (-1);
3250
			break;
3251
		case AID_INET6:
3252
			sa_in6 = (struct sockaddr_in6 *)sa;
3253
			if ((kr6 = kroute6_find(kt, &prefix.v6, prefixlen,
3254
			    prio)) == NULL)
3255
				return (0);
3256
			if (!(kr6->r.flags & F_KERNEL))
3257
				return (0);
3258
3259
			if (mpath)
3260
				/* get the correct route */
3261
				if ((kr6 = kroute6_matchgw(kr6, sa_in6)) ==
3262
				    NULL) {
3263
					log_warnx("dispatch_rtmsg_addr[delete] "
3264
					    "IPv6 mpath route not found");
3265
					return (0);
3266
				}
3267
3268
			if (kroute6_remove(kt, kr6) == -1)
3269
				return (-1);
3270
			break;
3271
		}
3272
		return (0);
3273
	}
3274
3275
	if (sa == NULL && !(flags & F_CONNECTED)) {
3276
		log_warnx("%s: no nexthop for %s/%u",
3277
		    __func__, log_addr(&prefix), prefixlen);
3278
		return (0);
3279
	}
3280
3281
	switch (prefix.aid) {
3282
	case AID_INET:
3283
		sa_in = (struct sockaddr_in *)sa;
3284
		if ((kr = kroute_find(kt, prefix.v4.s_addr, prefixlen,
3285
		    prio)) != NULL) {
3286
			if (kr->r.flags & F_KERNEL) {
3287
				/* get the correct route */
3288
				if (mpath && rtm->rtm_type == RTM_CHANGE &&
3289
				    (kr = kroute_matchgw(kr, sa_in)) == NULL) {
3290
					log_warnx("dispatch_rtmsg_addr[change] "
3291
					    "mpath route not found");
3292
					goto add4;
3293
				} else if (mpath && rtm->rtm_type == RTM_ADD)
3294
					goto add4;
3295
3296
				if (sa_in != NULL) {
3297
					if (kr->r.nexthop.s_addr !=
3298
					    sa_in->sin_addr.s_addr)
3299
						changed = 1;
3300
					kr->r.nexthop.s_addr =
3301
					    sa_in->sin_addr.s_addr;
3302
				} else {
3303
					if (kr->r.nexthop.s_addr != 0)
3304
						changed = 1;
3305
					kr->r.nexthop.s_addr = 0;
3306
				}
3307
3308
				if (kr->r.flags & F_NEXTHOP)
3309
					flags |= F_NEXTHOP;
3310
				oflags = kr->r.flags;
3311
				if (flags != oflags)
3312
					changed = 1;
3313
				kr->r.flags = flags;
3314
				if ((oflags & F_CONNECTED) &&
3315
				    !(flags & F_CONNECTED)) {
3316
					kif_kr_remove(kr);
3317
					kr_redistribute(IMSG_NETWORK_REMOVE,
3318
					    kt, &kr->r);
3319
				}
3320
				if ((flags & F_CONNECTED) &&
3321
				    !(oflags & F_CONNECTED)) {
3322
					kif_kr_insert(kr);
3323
					kr_redistribute(IMSG_NETWORK_ADD,
3324
					    kt, &kr->r);
3325
				}
3326
				if (kr->r.flags & F_NEXTHOP && changed)
3327
					knexthop_track(kt, kr);
3328
			}
3329
		} else if (rtm->rtm_type == RTM_CHANGE) {
3330
			log_warnx("change req for %s/%u: not in table",
3331
			    log_addr(&prefix), prefixlen);
3332
			return (0);
3333
		} else {
3334
add4:
3335
			if ((kr = calloc(1,
3336
			    sizeof(struct kroute_node))) == NULL) {
3337
				log_warn("dispatch_rtmsg");
3338
				return (-1);
3339
			}
3340
			kr->r.prefix.s_addr = prefix.v4.s_addr;
3341
			kr->r.prefixlen = prefixlen;
3342
			if (sa_in != NULL)
3343
				kr->r.nexthop.s_addr = sa_in->sin_addr.s_addr;
3344
			else
3345
				kr->r.nexthop.s_addr = 0;
3346
			kr->r.flags = flags;
3347
			kr->r.ifindex = ifindex;
3348
			kr->r.priority = prio;
3349
3350
			kroute_insert(kt, kr);
3351
		}
3352
		break;
3353
	case AID_INET6:
3354
		sa_in6 = (struct sockaddr_in6 *)sa;
3355
		if ((kr6 = kroute6_find(kt, &prefix.v6, prefixlen, prio)) !=
3356
		    NULL) {
3357
			if (kr6->r.flags & F_KERNEL) {
3358
				/* get the correct route */
3359
				if (mpath && rtm->rtm_type == RTM_CHANGE &&
3360
				    (kr6 = kroute6_matchgw(kr6, sa_in6)) ==
3361
				    NULL) {
3362
					log_warnx("dispatch_rtmsg[change] "
3363
					    "IPv6 mpath route not found");
3364
					goto add6;
3365
				} else if (mpath && rtm->rtm_type == RTM_ADD)
3366
					goto add6;
3367
3368
				if (sa_in6 != NULL) {
3369
					if (memcmp(&kr6->r.nexthop,
3370
					    &sa_in6->sin6_addr,
3371
					    sizeof(struct in6_addr)))
3372
						changed = 1;
3373
					memcpy(&kr6->r.nexthop,
3374
					    &sa_in6->sin6_addr,
3375
					    sizeof(struct in6_addr));
3376
				} else {
3377
					if (memcmp(&kr6->r.nexthop,
3378
					    &in6addr_any,
3379
					    sizeof(struct in6_addr)))
3380
						changed = 1;
3381
					memcpy(&kr6->r.nexthop,
3382
					    &in6addr_any,
3383
					    sizeof(struct in6_addr));
3384
				}
3385
3386
				if (kr6->r.flags & F_NEXTHOP)
3387
					flags |= F_NEXTHOP;
3388
				oflags = kr6->r.flags;
3389
				if (flags != oflags)
3390
					changed = 1;
3391
				kr6->r.flags = flags;
3392
				if ((oflags & F_CONNECTED) &&
3393
				    !(flags & F_CONNECTED)) {
3394
					kif_kr6_remove(kr6);
3395
					kr_redistribute6(IMSG_NETWORK_REMOVE,
3396
					    kt, &kr6->r);
3397
				}
3398
				if ((flags & F_CONNECTED) &&
3399
				    !(oflags & F_CONNECTED)) {
3400
					kif_kr6_insert(kr6);
3401
					kr_redistribute6(IMSG_NETWORK_ADD,
3402
					    kt, &kr6->r);
3403
				}
3404
				if (kr6->r.flags & F_NEXTHOP && changed)
3405
					knexthop_track(kt, kr6);
3406
			}
3407
		} else if (rtm->rtm_type == RTM_CHANGE) {
3408
			log_warnx("change req for %s/%u: not in table",
3409
			    log_addr(&prefix), prefixlen);
3410
			return (0);
3411
		} else {
3412
add6:
3413
			if ((kr6 = calloc(1,
3414
			    sizeof(struct kroute6_node))) == NULL) {
3415
				log_warn("dispatch_rtmsg_addr");
3416
				return (-1);
3417
			}
3418
			memcpy(&kr6->r.prefix, &prefix.v6,
3419
			    sizeof(struct in6_addr));
3420
			kr6->r.prefixlen = prefixlen;
3421
			if (sa_in6 != NULL)
3422
				memcpy(&kr6->r.nexthop, &sa_in6->sin6_addr,
3423
				    sizeof(struct in6_addr));
3424
			else
3425
				memcpy(&kr6->r.nexthop, &in6addr_any,
3426
				    sizeof(struct in6_addr));
3427
			kr6->r.flags = flags;
3428
			kr6->r.ifindex = ifindex;
3429
			kr6->r.priority = prio;
3430
3431
			kroute6_insert(kt, kr6);
3432
		}
3433
		break;
3434
	}
3435
3436
	return (0);
3437
}