GCC Code Coverage Report
Directory: ./ Exec Total Coverage
File: usr.sbin/bgpd/kroute.c Lines: 0 1572 0.0 %
Date: 2016-12-06 Branches: 0 2018 0.0 %

Line Branch Exec Source
1
/*	$OpenBSD: kroute.c,v 1.209 2016/04/08 12:27:05 phessler Exp $ */
2
3
/*
4
 * Copyright (c) 2003, 2004 Henning Brauer <henning@openbsd.org>
5
 *
6
 * Permission to use, copy, modify, and distribute this software for any
7
 * purpose with or without fee is hereby granted, provided that the above
8
 * copyright notice and this permission notice appear in all copies.
9
 *
10
 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11
 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12
 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13
 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14
 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15
 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16
 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17
 */
18
19
#include <sys/types.h>
20
#include <sys/socket.h>
21
#include <sys/sysctl.h>
22
#include <sys/tree.h>
23
#include <sys/uio.h>
24
#include <netinet/in.h>
25
#include <arpa/inet.h>
26
#include <net/if.h>
27
#include <net/if_dl.h>
28
#include <net/route.h>
29
#include <netmpls/mpls.h>
30
#include <err.h>
31
#include <errno.h>
32
#include <fcntl.h>
33
#include <stdio.h>
34
#include <stdlib.h>
35
#include <string.h>
36
#include <unistd.h>
37
38
#include "bgpd.h"
39
40
struct ktable		**krt;
41
u_int			  krt_size;
42
43
struct {
44
	u_int32_t		rtseq;
45
	pid_t			pid;
46
	int			fd;
47
} kr_state;
48
49
struct kroute_node {
50
	RB_ENTRY(kroute_node)	 entry;
51
	struct kroute		 r;
52
	struct kroute_node	*next;
53
};
54
55
struct kroute6_node {
56
	RB_ENTRY(kroute6_node)	 entry;
57
	struct kroute6		 r;
58
	struct kroute6_node	*next;
59
};
60
61
struct knexthop_node {
62
	RB_ENTRY(knexthop_node)	 entry;
63
	struct bgpd_addr	 nexthop;
64
	void			*kroute;
65
};
66
67
struct kif_kr {
68
	LIST_ENTRY(kif_kr)	 entry;
69
	struct kroute_node	*kr;
70
};
71
72
struct kif_kr6 {
73
	LIST_ENTRY(kif_kr6)	 entry;
74
	struct kroute6_node	*kr;
75
};
76
77
LIST_HEAD(kif_kr_head, kif_kr);
78
LIST_HEAD(kif_kr6_head, kif_kr6);
79
80
struct kif_node {
81
	RB_ENTRY(kif_node)	 entry;
82
	struct kif		 k;
83
	struct kif_kr_head	 kroute_l;
84
	struct kif_kr6_head	 kroute6_l;
85
};
86
87
int	ktable_new(u_int, u_int, char *, char *, int, u_int8_t);
88
void	ktable_free(u_int, u_int8_t);
89
void	ktable_destroy(struct ktable *, u_int8_t);
90
struct ktable	*ktable_get(u_int);
91
92
int	kr4_change(struct ktable *, struct kroute_full *, u_int8_t);
93
int	kr6_change(struct ktable *, struct kroute_full *, u_int8_t);
94
int	krVPN4_change(struct ktable *, struct kroute_full *, u_int8_t);
95
int	kr4_delete(struct ktable *, struct kroute_full *, u_int8_t);
96
int	kr6_delete(struct ktable *, struct kroute_full *, u_int8_t);
97
int	krVPN4_delete(struct ktable *, struct kroute_full *, u_int8_t);
98
void	kr_net_delete(struct network *);
99
struct network *kr_net_match(struct ktable *, struct kroute *);
100
struct network *kr_net_match6(struct ktable *, struct kroute6 *);
101
struct network *kr_net_find(struct ktable *, struct network *);
102
int	kr_redistribute(int, struct ktable *, struct kroute *);
103
int	kr_redistribute6(int, struct ktable *, struct kroute6 *);
104
struct kroute_full *kr_tofull(struct kroute *);
105
struct kroute_full *kr6_tofull(struct kroute6 *);
106
int	kroute_compare(struct kroute_node *, struct kroute_node *);
107
int	kroute6_compare(struct kroute6_node *, struct kroute6_node *);
108
int	knexthop_compare(struct knexthop_node *, struct knexthop_node *);
109
int	kif_compare(struct kif_node *, struct kif_node *);
110
void	kr_fib_update_prio(u_int, u_int8_t);
111
112
struct kroute_node	*kroute_find(struct ktable *, in_addr_t, u_int8_t,
113
			    u_int8_t);
114
struct kroute_node	*kroute_matchgw(struct kroute_node *,
115
			    struct sockaddr_in *);
116
int			 kroute_insert(struct ktable *, struct kroute_node *);
117
int			 kroute_remove(struct ktable *, struct kroute_node *);
118
void			 kroute_clear(struct ktable *);
119
120
struct kroute6_node	*kroute6_find(struct ktable *, const struct in6_addr *,
121
			    u_int8_t, u_int8_t);
122
struct kroute6_node	*kroute6_matchgw(struct kroute6_node *,
123
			    struct sockaddr_in6 *);
124
int			 kroute6_insert(struct ktable *, struct kroute6_node *);
125
int			 kroute6_remove(struct ktable *, struct kroute6_node *);
126
void			 kroute6_clear(struct ktable *);
127
128
struct knexthop_node	*knexthop_find(struct ktable *, struct bgpd_addr *);
129
int			 knexthop_insert(struct ktable *,
130
			    struct knexthop_node *);
131
int			 knexthop_remove(struct ktable *,
132
			    struct knexthop_node *);
133
void			 knexthop_clear(struct ktable *);
134
135
struct kif_node		*kif_find(int);
136
int			 kif_insert(struct kif_node *);
137
int			 kif_remove(struct kif_node *);
138
void			 kif_clear(void);
139
140
int			 kif_kr_insert(struct kroute_node *);
141
int			 kif_kr_remove(struct kroute_node *);
142
143
int			 kif_kr6_insert(struct kroute6_node *);
144
int			 kif_kr6_remove(struct kroute6_node *);
145
146
int			 kif_validate(struct kif *);
147
int			 kroute_validate(struct kroute *);
148
int			 kroute6_validate(struct kroute6 *);
149
void			 knexthop_validate(struct ktable *,
150
			    struct knexthop_node *);
151
void			 knexthop_track(struct ktable *, void *);
152
void			 knexthop_send_update(struct knexthop_node *);
153
struct kroute_node	*kroute_match(struct ktable *, in_addr_t, int);
154
struct kroute6_node	*kroute6_match(struct ktable *, struct in6_addr *, int);
155
void			 kroute_detach_nexthop(struct ktable *,
156
			    struct knexthop_node *);
157
158
int		protect_lo(struct ktable *);
159
u_int8_t	prefixlen_classful(in_addr_t);
160
u_int8_t	mask2prefixlen(in_addr_t);
161
u_int8_t	mask2prefixlen6(struct sockaddr_in6 *);
162
void		get_rtaddrs(int, struct sockaddr *, struct sockaddr **);
163
void		if_change(u_short, int, struct if_data *);
164
void		if_announce(void *);
165
166
int		send_rtmsg(int, int, struct ktable *, struct kroute *,
167
		    u_int8_t);
168
int		send_rt6msg(int, int, struct ktable *, struct kroute6 *,
169
		    u_int8_t);
170
int		dispatch_rtmsg(void);
171
int		fetchtable(struct ktable *, u_int8_t);
172
int		fetchifs(int);
173
int		dispatch_rtmsg_addr(struct rt_msghdr *,
174
		    struct sockaddr *[RTAX_MAX], struct ktable *);
175
176
RB_PROTOTYPE(kroute_tree, kroute_node, entry, kroute_compare)
177
RB_GENERATE(kroute_tree, kroute_node, entry, kroute_compare)
178
179
RB_PROTOTYPE(kroute6_tree, kroute6_node, entry, kroute6_compare)
180
RB_GENERATE(kroute6_tree, kroute6_node, entry, kroute6_compare)
181
182
RB_PROTOTYPE(knexthop_tree, knexthop_node, entry, knexthop_compare)
183
RB_GENERATE(knexthop_tree, knexthop_node, entry, knexthop_compare)
184
185
RB_HEAD(kif_tree, kif_node)		kit;
186
RB_PROTOTYPE(kif_tree, kif_node, entry, kif_compare)
187
RB_GENERATE(kif_tree, kif_node, entry, kif_compare)
188
189
#define KT2KNT(x)	(&(ktable_get((x)->nhtableid)->knt))
190
191
/*
192
 * exported functions
193
 */
194
195
int
196
kr_init(void)
197
{
198
	int		opt = 0, rcvbuf, default_rcvbuf;
199
	unsigned int	tid = RTABLE_ANY;
200
	socklen_t	optlen;
201
202
	if ((kr_state.fd = socket(AF_ROUTE,
203
	    SOCK_RAW | SOCK_CLOEXEC | SOCK_NONBLOCK, 0)) == -1) {
204
		log_warn("kr_init: socket");
205
		return (-1);
206
	}
207
208
	/* not interested in my own messages */
209
	if (setsockopt(kr_state.fd, SOL_SOCKET, SO_USELOOPBACK,
210
	    &opt, sizeof(opt)) == -1)
211
		log_warn("kr_init: setsockopt");	/* not fatal */
212
213
	/* grow receive buffer, don't wanna miss messages */
214
	optlen = sizeof(default_rcvbuf);
215
	if (getsockopt(kr_state.fd, SOL_SOCKET, SO_RCVBUF,
216
	    &default_rcvbuf, &optlen) == -1)
217
		log_warn("kr_init getsockopt SOL_SOCKET SO_RCVBUF");
218
	else
219
		for (rcvbuf = MAX_RTSOCK_BUF;
220
		    rcvbuf > default_rcvbuf &&
221
		    setsockopt(kr_state.fd, SOL_SOCKET, SO_RCVBUF,
222
		    &rcvbuf, sizeof(rcvbuf)) == -1 && errno == ENOBUFS;
223
		    rcvbuf /= 2)
224
			;	/* nothing */
225
226
	if (setsockopt(kr_state.fd, AF_ROUTE, ROUTE_TABLEFILTER, &tid,
227
	    sizeof(tid)) == -1) {
228
		log_warn("kr_init: setsockopt AF_ROUTE ROUTE_TABLEFILTER");
229
		return (-1);
230
	}
231
232
	kr_state.pid = getpid();
233
	kr_state.rtseq = 1;
234
235
	RB_INIT(&kit);
236
237
	if (fetchifs(0) == -1)
238
		return (-1);
239
240
	return (kr_state.fd);
241
}
242
243
int
244
ktable_new(u_int rtableid, u_int rdomid, char *name, char *ifname, int fs,
245
    u_int8_t fib_prio)
246
{
247
	struct ktable	**xkrt;
248
	struct ktable	 *kt;
249
	size_t		  oldsize;
250
251
	/* resize index table if needed */
252
	if (rtableid >= krt_size) {
253
		oldsize = sizeof(struct ktable *) * krt_size;
254
		if ((xkrt = reallocarray(krt, rtableid + 1,
255
		    sizeof(struct ktable *))) == NULL) {
256
			log_warn("ktable_new");
257
			return (-1);
258
		}
259
		krt = xkrt;
260
		krt_size = rtableid + 1;
261
		bzero((char *)krt + oldsize,
262
		    krt_size * sizeof(struct ktable *) - oldsize);
263
	}
264
265
	if (krt[rtableid])
266
		fatalx("ktable_new: table already exists.");
267
268
	/* allocate new element */
269
	kt = krt[rtableid] = calloc(1, sizeof(struct ktable));
270
	if (kt == NULL) {
271
		log_warn("ktable_new");
272
		return (-1);
273
	}
274
275
	/* initialize structure ... */
276
	strlcpy(kt->descr, name, sizeof(kt->descr));
277
	RB_INIT(&kt->krt);
278
	RB_INIT(&kt->krt6);
279
	RB_INIT(&kt->knt);
280
	TAILQ_INIT(&kt->krn);
281
	kt->fib_conf = kt->fib_sync = fs;
282
	kt->rtableid = rtableid;
283
	kt->nhtableid = rdomid;
284
	/* bump refcount of rdomain table for the nexthop lookups */
285
	ktable_get(kt->nhtableid)->nhrefcnt++;
286
	if (ifname) {
287
		strlcpy(kt->ifmpe, ifname, IFNAMSIZ);
288
		kt->ifindex = if_nametoindex(ifname);
289
	}
290
291
	/* ... and load it */
292
	if (fetchtable(kt, fib_prio) == -1)
293
		return (-1);
294
	if (protect_lo(kt) == -1)
295
		return (-1);
296
297
	/* everything is up and running */
298
	kt->state = RECONF_REINIT;
299
	log_debug("new ktable %s for rtableid %d", name, rtableid);
300
	return (0);
301
}
302
303
void
304
ktable_free(u_int rtableid, u_int8_t fib_prio)
305
{
306
	struct ktable	*kt, *nkt;
307
308
	if ((kt = ktable_get(rtableid)) == NULL)
309
		return;
310
311
	/* decouple from kernel, no new routes will be entered from here */
312
	kr_fib_decouple(kt->rtableid, fib_prio);
313
314
	/* first unhook from the nexthop table */
315
	nkt = ktable_get(kt->nhtableid);
316
	nkt->nhrefcnt--;
317
318
	/*
319
	 * Evil little details:
320
	 *   If kt->nhrefcnt > 0 then kt == nkt and nothing needs to be done.
321
	 *   If kt != nkt then kt->nhrefcnt must be 0 and kt must be killed.
322
	 *   If nkt is no longer referenced it must be killed (possible double
323
	 *   free so check that kt != nkt).
324
	 */
325
	if (kt != nkt && nkt->nhrefcnt <= 0)
326
		ktable_destroy(nkt, fib_prio);
327
	if (kt->nhrefcnt <= 0)
328
		ktable_destroy(kt, fib_prio);
329
}
330
331
void
332
ktable_destroy(struct ktable *kt, u_int8_t fib_prio)
333
{
334
	/* decouple just to be sure, does not hurt */
335
	kr_fib_decouple(kt->rtableid, fib_prio);
336
337
	log_debug("freeing ktable %s rtableid %u", kt->descr, kt->rtableid);
338
	knexthop_clear(kt);
339
	kroute_clear(kt);
340
	kroute6_clear(kt);
341
342
	krt[kt->rtableid] = NULL;
343
	free(kt);
344
}
345
346
struct ktable *
347
ktable_get(u_int rtableid)
348
{
349
	if (rtableid >= krt_size)
350
		return (NULL);
351
	return (krt[rtableid]);
352
}
353
354
int
355
ktable_update(u_int rtableid, char *name, char *ifname, int flags, u_int8_t
356
    fib_prio)
357
{
358
	struct ktable	*kt, *rkt;
359
	u_int		 rdomid;
360
361
	if (!ktable_exists(rtableid, &rdomid))
362
		fatalx("King Bula lost a table");	/* may not happen */
363
364
	if (rdomid != rtableid || flags & F_RIB_NOFIB) {
365
		rkt = ktable_get(rdomid);
366
		if (rkt == NULL) {
367
			char buf[32];
368
			snprintf(buf, sizeof(buf), "rdomain_%d", rdomid);
369
			if (ktable_new(rdomid, rdomid, buf, NULL, 0, fib_prio))
370
				return (-1);
371
		} else {
372
			/* there is no need for full fib synchronisation if
373
			 * the table is only used for nexthop lookups.
374
			 */
375
			if (rkt->state == RECONF_DELETE) {
376
				rkt->fib_conf = 0;
377
				rkt->state = RECONF_KEEP;
378
			}
379
		}
380
	}
381
382
	if (flags & (F_RIB_NOEVALUATE | F_RIB_NOFIB))
383
		/* only rdomain table must exist */
384
		return (0);
385
386
	kt = ktable_get(rtableid);
387
	if (kt == NULL) {
388
		if (ktable_new(rtableid, rdomid, name, ifname,
389
		    !(flags & F_RIB_NOFIBSYNC), fib_prio))
390
			return (-1);
391
	} else {
392
		/* fib sync has higher preference then no sync */
393
		if (kt->state == RECONF_DELETE) {
394
			kt->fib_conf = !(flags & F_RIB_NOFIBSYNC);
395
			kt->state = RECONF_KEEP;
396
		} else if (!kt->fib_conf)
397
			kt->fib_conf = !(flags & F_RIB_NOFIBSYNC);
398
399
		strlcpy(kt->descr, name, sizeof(kt->descr));
400
	}
401
	return (0);
402
}
403
404
void
405
ktable_preload(void)
406
{
407
	struct ktable	*kt;
408
	u_int		 i;
409
410
	for (i = 0; i < krt_size; i++) {
411
		if ((kt = ktable_get(i)) == NULL)
412
			continue;
413
		kt->state = RECONF_DELETE;
414
	}
415
}
416
417
void
418
ktable_postload(u_int8_t fib_prio)
419
{
420
	struct ktable	*kt;
421
	u_int		 i;
422
423
	for (i = krt_size; i > 0; i--) {
424
		if ((kt = ktable_get(i - 1)) == NULL)
425
			continue;
426
		if (kt->state == RECONF_DELETE)
427
			ktable_free(i - 1, fib_prio);
428
		else if (kt->state == RECONF_REINIT)
429
			kt->fib_sync = kt->fib_conf;
430
	}
431
}
432
433
int
434
ktable_exists(u_int rtableid, u_int *rdomid)
435
{
436
	size_t			 len;
437
	struct rt_tableinfo	 info;
438
	int			 mib[6];
439
440
	mib[0] = CTL_NET;
441
	mib[1] = PF_ROUTE;
442
	mib[2] = 0;
443
	mib[3] = 0;
444
	mib[4] = NET_RT_TABLE;
445
	mib[5] = rtableid;
446
447
	len = sizeof(info);
448
	if (sysctl(mib, 6, &info, &len, NULL, 0) == -1) {
449
		if (errno == ENOENT)
450
			/* table nonexistent */
451
			return (0);
452
		log_warn("sysctl");
453
		/* must return 0 so that the table is considered non-existent */
454
		return (0);
455
	}
456
	if (rdomid)
457
		*rdomid = info.rti_domainid;
458
	return (1);
459
}
460
461
int
462
kr_change(u_int rtableid, struct kroute_full *kl, u_int8_t fib_prio)
463
{
464
	struct ktable		*kt;
465
466
	if ((kt = ktable_get(rtableid)) == NULL)
467
		/* too noisy during reloads, just ignore */
468
		return (0);
469
	switch (kl->prefix.aid) {
470
	case AID_INET:
471
		return (kr4_change(kt, kl, fib_prio));
472
	case AID_INET6:
473
		return (kr6_change(kt, kl, fib_prio));
474
	case AID_VPN_IPv4:
475
		return (krVPN4_change(kt, kl, fib_prio));
476
	}
477
	log_warnx("kr_change: not handled AID");
478
	return (-1);
479
}
480
481
int
482
kr4_change(struct ktable *kt, struct kroute_full *kl, u_int8_t fib_prio)
483
{
484
	struct kroute_node	*kr;
485
	int			 action = RTM_ADD;
486
	u_int16_t		 labelid;
487
488
	if ((kr = kroute_find(kt, kl->prefix.v4.s_addr, kl->prefixlen,
489
	    fib_prio)) != NULL)
490
		action = RTM_CHANGE;
491
492
	/* for blackhole and reject routes nexthop needs to be 127.0.0.1 */
493
	if (kl->flags & (F_BLACKHOLE|F_REJECT))
494
		kl->nexthop.v4.s_addr = htonl(INADDR_LOOPBACK);
495
	/* nexthop within 127/8 -> ignore silently */
496
	else if ((kl->nexthop.v4.s_addr & htonl(IN_CLASSA_NET)) ==
497
	    htonl(INADDR_LOOPBACK & IN_CLASSA_NET))
498
		return (0);
499
500
	labelid = rtlabel_name2id(kl->label);
501
502
	if (action == RTM_ADD) {
503
		if ((kr = calloc(1, sizeof(struct kroute_node))) == NULL) {
504
			log_warn("kr_change");
505
			return (-1);
506
		}
507
		kr->r.prefix.s_addr = kl->prefix.v4.s_addr;
508
		kr->r.prefixlen = kl->prefixlen;
509
		kr->r.nexthop.s_addr = kl->nexthop.v4.s_addr;
510
		kr->r.flags = kl->flags | F_BGPD_INSERTED;
511
		kr->r.priority = fib_prio;
512
		kr->r.labelid = labelid;
513
514
		if (kroute_insert(kt, kr) == -1) {
515
			free(kr);
516
			return (-1);
517
		}
518
	} else {
519
		kr->r.nexthop.s_addr = kl->nexthop.v4.s_addr;
520
		rtlabel_unref(kr->r.labelid);
521
		kr->r.labelid = labelid;
522
		if (kl->flags & F_BLACKHOLE)
523
			kr->r.flags |= F_BLACKHOLE;
524
		else
525
			kr->r.flags &= ~F_BLACKHOLE;
526
		if (kl->flags & F_REJECT)
527
			kr->r.flags |= F_REJECT;
528
		else
529
			kr->r.flags &= ~F_REJECT;
530
	}
531
532
	if (send_rtmsg(kr_state.fd, action, kt, &kr->r, fib_prio) == -1)
533
		return (-1);
534
535
	return (0);
536
}
537
538
int
539
kr6_change(struct ktable *kt, struct kroute_full *kl, u_int8_t fib_prio)
540
{
541
	struct kroute6_node	*kr6;
542
	struct in6_addr		 lo6 = IN6ADDR_LOOPBACK_INIT;
543
	int			 action = RTM_ADD;
544
	u_int16_t		 labelid;
545
546
	if ((kr6 = kroute6_find(kt, &kl->prefix.v6, kl->prefixlen, fib_prio)) !=
547
	    NULL)
548
		action = RTM_CHANGE;
549
550
	/* for blackhole and reject routes nexthop needs to be ::1 */
551
	if (kl->flags & (F_BLACKHOLE|F_REJECT))
552
		bcopy(&lo6, &kl->nexthop.v6, sizeof(kl->nexthop.v6));
553
	/* nexthop to loopback -> ignore silently */
554
	else if (IN6_IS_ADDR_LOOPBACK(&kl->nexthop.v6))
555
		return (0);
556
557
	labelid = rtlabel_name2id(kl->label);
558
559
	if (action == RTM_ADD) {
560
		if ((kr6 = calloc(1, sizeof(struct kroute6_node))) == NULL) {
561
			log_warn("kr_change");
562
			return (-1);
563
		}
564
		memcpy(&kr6->r.prefix, &kl->prefix.v6, sizeof(struct in6_addr));
565
		kr6->r.prefixlen = kl->prefixlen;
566
		memcpy(&kr6->r.nexthop, &kl->nexthop.v6,
567
		    sizeof(struct in6_addr));
568
		kr6->r.flags = kl->flags | F_BGPD_INSERTED;
569
		kr6->r.priority = fib_prio;
570
		kr6->r.labelid = labelid;
571
572
		if (kroute6_insert(kt, kr6) == -1) {
573
			free(kr6);
574
			return (-1);
575
		}
576
	} else {
577
		memcpy(&kr6->r.nexthop, &kl->nexthop.v6,
578
		    sizeof(struct in6_addr));
579
		rtlabel_unref(kr6->r.labelid);
580
		kr6->r.labelid = labelid;
581
		if (kl->flags & F_BLACKHOLE)
582
			kr6->r.flags |= F_BLACKHOLE;
583
		else
584
			kr6->r.flags &= ~F_BLACKHOLE;
585
		if (kl->flags & F_REJECT)
586
			kr6->r.flags |= F_REJECT;
587
		else
588
			kr6->r.flags &= ~F_REJECT;
589
	}
590
591
	if (send_rt6msg(kr_state.fd, action, kt, &kr6->r, fib_prio) == -1)
592
		return (-1);
593
594
	return (0);
595
}
596
597
int
598
krVPN4_change(struct ktable *kt, struct kroute_full *kl, u_int8_t fib_prio)
599
{
600
	struct kroute_node	*kr;
601
	int			 action = RTM_ADD;
602
	u_int32_t		 mplslabel = 0;
603
	u_int16_t		 labelid;
604
605
	if ((kr = kroute_find(kt, kl->prefix.vpn4.addr.s_addr, kl->prefixlen,
606
	    fib_prio)) != NULL)
607
		action = RTM_CHANGE;
608
609
	/* nexthop within 127/8 -> ignore silently */
610
	if ((kl->nexthop.v4.s_addr & htonl(IN_CLASSA_NET)) ==
611
	    htonl(INADDR_LOOPBACK & IN_CLASSA_NET))
612
		return (0);
613
614
	/* only single MPLS label are supported for now */
615
	if (kl->prefix.vpn4.labellen != 3) {
616
		log_warnx("krVPN4_change: %s/%u has not a single label",
617
		    log_addr(&kl->prefix), kl->prefixlen);
618
		return (0);
619
	}
620
	mplslabel = (kl->prefix.vpn4.labelstack[0] << 24) |
621
	    (kl->prefix.vpn4.labelstack[1] << 16) |
622
	    (kl->prefix.vpn4.labelstack[2] << 8);
623
	mplslabel = htonl(mplslabel);
624
625
	labelid = rtlabel_name2id(kl->label);
626
627
	/* for blackhole and reject routes nexthop needs to be 127.0.0.1 */
628
	if (kl->flags & (F_BLACKHOLE|F_REJECT))
629
		kl->nexthop.v4.s_addr = htonl(INADDR_LOOPBACK);
630
631
	if (action == RTM_ADD) {
632
		if ((kr = calloc(1, sizeof(struct kroute_node))) == NULL) {
633
			log_warn("krVPN4_change");
634
			return (-1);
635
		}
636
		kr->r.prefix.s_addr = kl->prefix.vpn4.addr.s_addr;
637
		kr->r.prefixlen = kl->prefixlen;
638
		kr->r.nexthop.s_addr = kl->nexthop.v4.s_addr;
639
		kr->r.flags = kl->flags | F_BGPD_INSERTED | F_MPLS;
640
		kr->r.priority = fib_prio;
641
		kr->r.labelid = labelid;
642
		kr->r.mplslabel = mplslabel;
643
644
		if (kroute_insert(kt, kr) == -1) {
645
			free(kr);
646
			return (-1);
647
		}
648
	} else {
649
		kr->r.mplslabel = mplslabel;
650
		kr->r.nexthop.s_addr = kl->nexthop.v4.s_addr;
651
		rtlabel_unref(kr->r.labelid);
652
		kr->r.labelid = labelid;
653
		if (kl->flags & F_BLACKHOLE)
654
			kr->r.flags |= F_BLACKHOLE;
655
		else
656
			kr->r.flags &= ~F_BLACKHOLE;
657
		if (kl->flags & F_REJECT)
658
			kr->r.flags |= F_REJECT;
659
		else
660
			kr->r.flags &= ~F_REJECT;
661
	}
662
663
	if (send_rtmsg(kr_state.fd, action, kt, &kr->r, fib_prio) == -1)
664
		return (-1);
665
666
	return (0);
667
}
668
669
int
670
kr_delete(u_int rtableid, struct kroute_full *kl, u_int8_t fib_prio)
671
{
672
	struct ktable		*kt;
673
674
	if ((kt = ktable_get(rtableid)) == NULL)
675
		/* too noisy during reloads, just ignore */
676
		return (0);
677
678
	switch (kl->prefix.aid) {
679
	case AID_INET:
680
		return (kr4_delete(kt, kl, fib_prio));
681
	case AID_INET6:
682
		return (kr6_delete(kt, kl, fib_prio));
683
	case AID_VPN_IPv4:
684
		return (krVPN4_delete(kt, kl, fib_prio));
685
	}
686
	log_warnx("%s: not handled AID", __func__);
687
	return (-1);
688
}
689
690
int
691
kr4_delete(struct ktable *kt, struct kroute_full *kl, u_int8_t fib_prio)
692
{
693
	struct kroute_node	*kr;
694
695
	if ((kr = kroute_find(kt, kl->prefix.v4.s_addr, kl->prefixlen,
696
	    fib_prio)) == NULL)
697
		return (0);
698
699
	if (!(kr->r.flags & F_BGPD_INSERTED))
700
		return (0);
701
702
	if (send_rtmsg(kr_state.fd, RTM_DELETE, kt, &kr->r, fib_prio) == -1)
703
		return (-1);
704
705
	rtlabel_unref(kr->r.labelid);
706
707
	if (kroute_remove(kt, kr) == -1)
708
		return (-1);
709
710
	return (0);
711
}
712
713
int
714
kr6_delete(struct ktable *kt, struct kroute_full *kl, u_int8_t fib_prio)
715
{
716
	struct kroute6_node	*kr6;
717
718
	if ((kr6 = kroute6_find(kt, &kl->prefix.v6, kl->prefixlen, fib_prio)) ==
719
	    NULL)
720
		return (0);
721
722
	if (!(kr6->r.flags & F_BGPD_INSERTED))
723
		return (0);
724
725
	if (send_rt6msg(kr_state.fd, RTM_DELETE, kt, &kr6->r, fib_prio) == -1)
726
		return (-1);
727
728
	rtlabel_unref(kr6->r.labelid);
729
730
	if (kroute6_remove(kt, kr6) == -1)
731
		return (-1);
732
733
	return (0);
734
}
735
736
int
737
krVPN4_delete(struct ktable *kt, struct kroute_full *kl, u_int8_t fib_prio)
738
{
739
	struct kroute_node	*kr;
740
741
	if ((kr = kroute_find(kt, kl->prefix.vpn4.addr.s_addr, kl->prefixlen,
742
	    fib_prio)) == NULL)
743
		return (0);
744
745
	if (!(kr->r.flags & F_BGPD_INSERTED))
746
		return (0);
747
748
	if (send_rtmsg(kr_state.fd, RTM_DELETE, kt, &kr->r, fib_prio) == -1)
749
		return (-1);
750
751
	rtlabel_unref(kr->r.labelid);
752
753
	if (kroute_remove(kt, kr) == -1)
754
		return (-1);
755
756
	return (0);
757
}
758
759
void
760
kr_shutdown(u_int8_t fib_prio)
761
{
762
	u_int	i;
763
764
	for (i = krt_size; i > 0; i--)
765
		ktable_free(i - 1, fib_prio);
766
	kif_clear();
767
}
768
769
void
770
kr_fib_couple(u_int rtableid, u_int8_t fib_prio)
771
{
772
	struct ktable		*kt;
773
	struct kroute_node	*kr;
774
	struct kroute6_node	*kr6;
775
776
	if ((kt = ktable_get(rtableid)) == NULL)  /* table does not exist */
777
		return;
778
779
	if (kt->fib_sync)	/* already coupled */
780
		return;
781
782
	kt->fib_sync = 1;
783
784
	RB_FOREACH(kr, kroute_tree, &kt->krt)
785
		if ((kr->r.flags & F_BGPD_INSERTED))
786
			send_rtmsg(kr_state.fd, RTM_ADD, kt, &kr->r, fib_prio);
787
	RB_FOREACH(kr6, kroute6_tree, &kt->krt6)
788
		if ((kr6->r.flags & F_BGPD_INSERTED))
789
			send_rt6msg(kr_state.fd, RTM_ADD, kt, &kr6->r,
790
			    fib_prio);
791
792
	log_info("kernel routing table %u (%s) coupled", kt->rtableid,
793
	    kt->descr);
794
}
795
796
void
797
kr_fib_couple_all(u_int8_t fib_prio)
798
{
799
	u_int	 i;
800
801
	for (i = krt_size; i > 0; i--)
802
		kr_fib_couple(i - 1, fib_prio);
803
}
804
805
void
806
kr_fib_decouple(u_int rtableid, u_int8_t fib_prio)
807
{
808
	struct ktable		*kt;
809
	struct kroute_node	*kr;
810
	struct kroute6_node	*kr6;
811
812
	if ((kt = ktable_get(rtableid)) == NULL)  /* table does not exist */
813
		return;
814
815
	if (!kt->fib_sync)	/* already decoupled */
816
		return;
817
818
	RB_FOREACH(kr, kroute_tree, &kt->krt)
819
		if ((kr->r.flags & F_BGPD_INSERTED))
820
			send_rtmsg(kr_state.fd, RTM_DELETE, kt, &kr->r,
821
			    fib_prio);
822
	RB_FOREACH(kr6, kroute6_tree, &kt->krt6)
823
		if ((kr6->r.flags & F_BGPD_INSERTED))
824
			send_rt6msg(kr_state.fd, RTM_DELETE, kt, &kr6->r,
825
			    fib_prio);
826
827
	kt->fib_sync = 0;
828
829
	log_info("kernel routing table %u (%s) decoupled", kt->rtableid,
830
	    kt->descr);
831
}
832
833
void
834
kr_fib_decouple_all(u_int8_t fib_prio)
835
{
836
	u_int	 i;
837
838
	for (i = krt_size; i > 0; i--)
839
		kr_fib_decouple(i - 1, fib_prio);
840
}
841
842
void
843
kr_fib_update_prio(u_int rtableid, u_int8_t fib_prio)
844
{
845
	struct ktable		*kt;
846
	struct kroute_node	*kr;
847
	struct kroute6_node	*kr6;
848
849
	if ((kt = ktable_get(rtableid)) == NULL)  /* table does not exist */
850
		return;
851
852
	RB_FOREACH(kr, kroute_tree, &kt->krt)
853
		if ((kr->r.flags & F_BGPD_INSERTED))
854
			kr->r.priority = fib_prio;
855
856
	RB_FOREACH(kr6, kroute6_tree, &kt->krt6)
857
		if ((kr6->r.flags & F_BGPD_INSERTED))
858
			kr6->r.priority = fib_prio;
859
}
860
861
void
862
kr_fib_update_prio_all(u_int8_t fib_prio)
863
{
864
	u_int	 i;
865
866
	for (i = krt_size; i > 0; i--)
867
		kr_fib_update_prio(i - 1, fib_prio);
868
}
869
870
int
871
kr_dispatch_msg(void)
872
{
873
	return (dispatch_rtmsg());
874
}
875
876
int
877
kr_nexthop_add(u_int rtableid, struct bgpd_addr *addr)
878
{
879
	struct ktable		*kt;
880
	struct knexthop_node	*h;
881
882
	if ((kt = ktable_get(rtableid)) == NULL) {
883
		log_warnx("kr_nexthop_add: non-existent rtableid %d", rtableid);
884
		return (0);
885
	}
886
	if ((h = knexthop_find(kt, addr)) != NULL) {
887
		/* should not happen... this is actually an error path */
888
		knexthop_send_update(h);
889
	} else {
890
		if ((h = calloc(1, sizeof(struct knexthop_node))) == NULL) {
891
			log_warn("kr_nexthop_add");
892
			return (-1);
893
		}
894
		memcpy(&h->nexthop, addr, sizeof(h->nexthop));
895
896
		if (knexthop_insert(kt, h) == -1)
897
			return (-1);
898
	}
899
900
	return (0);
901
}
902
903
void
904
kr_nexthop_delete(u_int rtableid, struct bgpd_addr *addr)
905
{
906
	struct ktable		*kt;
907
	struct knexthop_node	*kn;
908
909
	if ((kt = ktable_get(rtableid)) == NULL) {
910
		log_warnx("kr_nexthop_delete: non-existent rtableid %d",
911
		    rtableid);
912
		return;
913
	}
914
	if ((kn = knexthop_find(kt, addr)) == NULL)
915
		return;
916
917
	knexthop_remove(kt, kn);
918
}
919
920
void
921
kr_show_route(struct imsg *imsg)
922
{
923
	struct ktable		*kt;
924
	struct kroute_node	*kr, *kn;
925
	struct kroute6_node	*kr6, *kn6;
926
	struct bgpd_addr	*addr;
927
	int			 flags;
928
	sa_family_t		 af;
929
	struct ctl_show_nexthop	 snh;
930
	struct knexthop_node	*h;
931
	struct kif_node		*kif;
932
	u_int			 i;
933
	u_short			 ifindex = 0;
934
935
	switch (imsg->hdr.type) {
936
	case IMSG_CTL_KROUTE:
937
		if (imsg->hdr.len != IMSG_HEADER_SIZE + sizeof(flags) +
938
		    sizeof(af)) {
939
			log_warnx("kr_show_route: wrong imsg len");
940
			break;
941
		}
942
		kt = ktable_get(imsg->hdr.peerid);
943
		if (kt == NULL) {
944
			log_warnx("kr_show_route: table %u does not exist",
945
			    imsg->hdr.peerid);
946
			break;
947
		}
948
		memcpy(&flags, imsg->data, sizeof(flags));
949
		memcpy(&af, (char *)imsg->data + sizeof(flags), sizeof(af));
950
		if (!af || af == AF_INET)
951
			RB_FOREACH(kr, kroute_tree, &kt->krt) {
952
				if (flags && (kr->r.flags & flags) == 0)
953
					continue;
954
				kn = kr;
955
				do {
956
					send_imsg_session(IMSG_CTL_KROUTE,
957
					    imsg->hdr.pid, kr_tofull(&kn->r),
958
					    sizeof(struct kroute_full));
959
				} while ((kn = kn->next) != NULL);
960
			}
961
		if (!af || af == AF_INET6)
962
			RB_FOREACH(kr6, kroute6_tree, &kt->krt6) {
963
				if (flags && (kr6->r.flags & flags) == 0)
964
					continue;
965
				kn6 = kr6;
966
				do {
967
					send_imsg_session(IMSG_CTL_KROUTE,
968
					    imsg->hdr.pid, kr6_tofull(&kn6->r),
969
					    sizeof(struct kroute_full));
970
				} while ((kn6 = kn6->next) != NULL);
971
			}
972
		break;
973
	case IMSG_CTL_KROUTE_ADDR:
974
		if (imsg->hdr.len != IMSG_HEADER_SIZE +
975
		    sizeof(struct bgpd_addr)) {
976
			log_warnx("kr_show_route: wrong imsg len");
977
			break;
978
		}
979
		kt = ktable_get(imsg->hdr.peerid);
980
		if (kt == NULL) {
981
			log_warnx("kr_show_route: table %u does not exist",
982
			    imsg->hdr.peerid);
983
			break;
984
		}
985
		addr = imsg->data;
986
		kr = NULL;
987
		switch (addr->aid) {
988
		case AID_INET:
989
			kr = kroute_match(kt, addr->v4.s_addr, 1);
990
			if (kr != NULL)
991
				send_imsg_session(IMSG_CTL_KROUTE,
992
				    imsg->hdr.pid, kr_tofull(&kr->r),
993
				    sizeof(struct kroute_full));
994
			break;
995
		case AID_INET6:
996
			kr6 = kroute6_match(kt, &addr->v6, 1);
997
			if (kr6 != NULL)
998
				send_imsg_session(IMSG_CTL_KROUTE,
999
				    imsg->hdr.pid, kr6_tofull(&kr6->r),
1000
				    sizeof(struct kroute_full));
1001
			break;
1002
		}
1003
		break;
1004
	case IMSG_CTL_SHOW_NEXTHOP:
1005
		kt = ktable_get(imsg->hdr.peerid);
1006
		if (kt == NULL) {
1007
			log_warnx("kr_show_route: table %u does not exist",
1008
			    imsg->hdr.peerid);
1009
			break;
1010
		}
1011
		RB_FOREACH(h, knexthop_tree, KT2KNT(kt)) {
1012
			bzero(&snh, sizeof(snh));
1013
			memcpy(&snh.addr, &h->nexthop, sizeof(snh.addr));
1014
			if (h->kroute != NULL) {
1015
				switch (h->nexthop.aid) {
1016
				case AID_INET:
1017
					kr = h->kroute;
1018
					snh.valid = kroute_validate(&kr->r);
1019
					snh.krvalid = 1;
1020
					memcpy(&snh.kr.kr4, &kr->r,
1021
					    sizeof(snh.kr.kr4));
1022
					ifindex = kr->r.ifindex;
1023
					break;
1024
				case AID_INET6:
1025
					kr6 = h->kroute;
1026
					snh.valid = kroute6_validate(&kr6->r);
1027
					snh.krvalid = 1;
1028
					memcpy(&snh.kr.kr6, &kr6->r,
1029
					    sizeof(snh.kr.kr6));
1030
					ifindex = kr6->r.ifindex;
1031
					break;
1032
				}
1033
				if ((kif = kif_find(ifindex)) != NULL)
1034
					memcpy(&snh.kif, &kif->k,
1035
					    sizeof(snh.kif));
1036
			}
1037
			send_imsg_session(IMSG_CTL_SHOW_NEXTHOP, imsg->hdr.pid,
1038
			    &snh, sizeof(snh));
1039
		}
1040
		break;
1041
	case IMSG_CTL_SHOW_INTERFACE:
1042
		RB_FOREACH(kif, kif_tree, &kit)
1043
			send_imsg_session(IMSG_CTL_SHOW_INTERFACE,
1044
			    imsg->hdr.pid, &kif->k, sizeof(kif->k));
1045
		break;
1046
	case IMSG_CTL_SHOW_FIB_TABLES:
1047
		for (i = 0; i < krt_size; i++) {
1048
			struct ktable	ktab;
1049
1050
			if ((kt = ktable_get(i)) == NULL)
1051
				continue;
1052
1053
			ktab = *kt;
1054
			/* do not leak internal information */
1055
			RB_INIT(&ktab.krt);
1056
			RB_INIT(&ktab.krt6);
1057
			RB_INIT(&ktab.knt);
1058
			TAILQ_INIT(&ktab.krn);
1059
1060
			send_imsg_session(IMSG_CTL_SHOW_FIB_TABLES,
1061
			    imsg->hdr.pid, &ktab, sizeof(ktab));
1062
		}
1063
		break;
1064
	default:	/* nada */
1065
		break;
1066
	}
1067
1068
	send_imsg_session(IMSG_CTL_END, imsg->hdr.pid, NULL, 0);
1069
}
1070
1071
void
1072
kr_ifinfo(char *ifname)
1073
{
1074
	struct kif_node	*kif;
1075
1076
	RB_FOREACH(kif, kif_tree, &kit)
1077
		if (!strcmp(ifname, kif->k.ifname)) {
1078
			send_imsg_session(IMSG_IFINFO, 0,
1079
			    &kif->k, sizeof(kif->k));
1080
			return;
1081
		}
1082
}
1083
1084
void
1085
kr_net_delete(struct network *n)
1086
{
1087
	filterset_free(&n->net.attrset);
1088
	free(n);
1089
}
1090
1091
struct network *
1092
kr_net_match(struct ktable *kt, struct kroute *kr)
1093
{
1094
	struct network		*xn;
1095
1096
	TAILQ_FOREACH(xn, &kt->krn, entry) {
1097
		if (xn->net.prefix.aid != AID_INET)
1098
			continue;
1099
		switch (xn->net.type) {
1100
		case NETWORK_DEFAULT:
1101
			if (xn->net.prefixlen == kr->prefixlen &&
1102
			    xn->net.prefix.v4.s_addr == kr->prefix.s_addr)
1103
				/* static match already redistributed */
1104
				return (NULL);
1105
			break;
1106
		case NETWORK_STATIC:
1107
			if (kr->flags & F_STATIC)
1108
				return (xn);
1109
			break;
1110
		case NETWORK_CONNECTED:
1111
			if (kr->flags & F_CONNECTED)
1112
				return (xn);
1113
			break;
1114
		case NETWORK_MRTCLONE:
1115
			/* can not happen */
1116
			break;
1117
		}
1118
	}
1119
	return (NULL);
1120
}
1121
1122
struct network *
1123
kr_net_match6(struct ktable *kt, struct kroute6 *kr6)
1124
{
1125
	struct network		*xn;
1126
1127
	TAILQ_FOREACH(xn, &kt->krn, entry) {
1128
		if (xn->net.prefix.aid != AID_INET6)
1129
			continue;
1130
		switch (xn->net.type) {
1131
		case NETWORK_DEFAULT:
1132
			if (xn->net.prefixlen == kr6->prefixlen &&
1133
			    memcmp(&xn->net.prefix.v6, &kr6->prefix,
1134
			    sizeof(struct in6_addr)) == 0)
1135
				/* static match already redistributed */
1136
				return (NULL);
1137
			break;
1138
		case NETWORK_STATIC:
1139
			if (kr6->flags & F_STATIC)
1140
				return (xn);
1141
			break;
1142
		case NETWORK_CONNECTED:
1143
			if (kr6->flags & F_CONNECTED)
1144
				return (xn);
1145
			break;
1146
		case NETWORK_MRTCLONE:
1147
			/* can not happen */
1148
			break;
1149
		}
1150
	}
1151
	return (NULL);
1152
}
1153
1154
struct network *
1155
kr_net_find(struct ktable *kt, struct network *n)
1156
{
1157
	struct network		*xn;
1158
1159
	TAILQ_FOREACH(xn, &kt->krn, entry) {
1160
		if (n->net.type != xn->net.type ||
1161
		    n->net.prefixlen != xn->net.prefixlen ||
1162
		    n->net.rtableid != xn->net.rtableid)
1163
			continue;
1164
		if (memcmp(&n->net.prefix, &xn->net.prefix,
1165
		    sizeof(n->net.prefix)) == 0)
1166
			return (xn);
1167
	}
1168
	return (NULL);
1169
}
1170
1171
int
1172
kr_net_reload(u_int rtableid, struct network_head *nh)
1173
{
1174
	struct network		*n, *xn;
1175
	struct ktable		*kt;
1176
1177
	if ((kt = ktable_get(rtableid)) == NULL) {
1178
		log_warnx("kr_net_reload: non-existent rtableid %d", rtableid);
1179
		return (-1);
1180
	}
1181
1182
	TAILQ_FOREACH(n, &kt->krn, entry)
1183
		n->net.old = 1;
1184
1185
	while ((n = TAILQ_FIRST(nh)) != NULL) {
1186
		TAILQ_REMOVE(nh, n, entry);
1187
		n->net.old = 0;
1188
		n->net.rtableid = rtableid;
1189
		xn = kr_net_find(kt, n);
1190
		if (xn) {
1191
			xn->net.old = 0;
1192
			filterset_free(&xn->net.attrset);
1193
			filterset_move(&n->net.attrset, &xn->net.attrset);
1194
			kr_net_delete(n);
1195
		} else
1196
			TAILQ_INSERT_TAIL(&kt->krn, n, entry);
1197
	}
1198
1199
	for (n = TAILQ_FIRST(&kt->krn); n != NULL; n = xn) {
1200
		xn = TAILQ_NEXT(n, entry);
1201
		if (n->net.old) {
1202
			if (n->net.type == NETWORK_DEFAULT)
1203
				if (send_network(IMSG_NETWORK_REMOVE, &n->net,
1204
				    NULL))
1205
					return (-1);
1206
			TAILQ_REMOVE(&kt->krn, n, entry);
1207
			kr_net_delete(n);
1208
		}
1209
	}
1210
1211
	return (0);
1212
}
1213
1214
int
1215
kr_redistribute(int type, struct ktable *kt, struct kroute *kr)
1216
{
1217
	struct network		*match;
1218
	struct network_config	 net;
1219
	u_int32_t		 a;
1220
1221
	/* shortcut for removals */
1222
	if (type == IMSG_NETWORK_REMOVE) {
1223
		if (!(kr->flags & F_REDISTRIBUTED))
1224
			return (0);	/* no match, don't redistribute */
1225
		kr->flags &= ~F_REDISTRIBUTED;
1226
		match = NULL;
1227
		goto sendit;
1228
	}
1229
1230
	if (!(kr->flags & F_KERNEL))
1231
		return (0);
1232
1233
	/* Dynamic routes are not redistributable. */
1234
	if (kr->flags & F_DYNAMIC)
1235
		return (0);
1236
1237
	/*
1238
	 * We consider the loopback net, multicast and experimental addresses
1239
	 * as not redistributable.
1240
	 */
1241
	a = ntohl(kr->prefix.s_addr);
1242
	if (IN_MULTICAST(a) || IN_BADCLASS(a) ||
1243
	    (a >> IN_CLASSA_NSHIFT) == IN_LOOPBACKNET)
1244
		return (0);
1245
1246
	/* Consider networks with nexthop loopback as not redistributable. */
1247
	if (kr->nexthop.s_addr == htonl(INADDR_LOOPBACK))
1248
		return (0);
1249
1250
	/*
1251
	 * never allow 0.0.0.0/0 the default route can only be redistributed
1252
	 * with announce default.
1253
	 */
1254
	if (kr->prefix.s_addr == INADDR_ANY && kr->prefixlen == 0)
1255
		return (0);
1256
1257
	match = kr_net_match(kt, kr);
1258
	if (match == NULL) {
1259
		if (!(kr->flags & F_REDISTRIBUTED))
1260
			return (0);	/* no match, don't redistribute */
1261
		/* route no longer matches but is redistributed, so remove */
1262
		kr->flags &= ~F_REDISTRIBUTED;
1263
		type = IMSG_NETWORK_REMOVE;
1264
	} else
1265
		kr->flags |= F_REDISTRIBUTED;
1266
1267
sendit:
1268
	bzero(&net, sizeof(net));
1269
	net.prefix.aid = AID_INET;
1270
	net.prefix.v4.s_addr = kr->prefix.s_addr;
1271
	net.prefixlen = kr->prefixlen;
1272
	net.rtableid = kt->rtableid;
1273
1274
	return (send_network(type, &net, match ? &match->net.attrset : NULL));
1275
}
1276
1277
int
1278
kr_redistribute6(int type, struct ktable *kt, struct kroute6 *kr6)
1279
{
1280
	struct network		*match;
1281
	struct network_config	 net;
1282
1283
	/* shortcut for removals */
1284
	if (type == IMSG_NETWORK_REMOVE) {
1285
		if (!(kr6->flags & F_REDISTRIBUTED))
1286
			return (0);	/* no match, don't redistribute */
1287
		kr6->flags &= ~F_REDISTRIBUTED;
1288
		match = NULL;
1289
		goto sendit;
1290
	}
1291
1292
	if (!(kr6->flags & F_KERNEL))
1293
		return (0);
1294
1295
	/* Dynamic routes are not redistributable. */
1296
	if (kr6->flags & F_DYNAMIC)
1297
		return (0);
1298
1299
	/*
1300
	 * We consider unspecified, loopback, multicast, link- and site-local,
1301
	 * IPv4 mapped and IPv4 compatible addresses as not redistributable.
1302
	 */
1303
	if (IN6_IS_ADDR_UNSPECIFIED(&kr6->prefix) ||
1304
	    IN6_IS_ADDR_LOOPBACK(&kr6->prefix) ||
1305
	    IN6_IS_ADDR_MULTICAST(&kr6->prefix) ||
1306
	    IN6_IS_ADDR_LINKLOCAL(&kr6->prefix) ||
1307
	    IN6_IS_ADDR_SITELOCAL(&kr6->prefix) ||
1308
	    IN6_IS_ADDR_V4MAPPED(&kr6->prefix) ||
1309
	    IN6_IS_ADDR_V4COMPAT(&kr6->prefix))
1310
		return (0);
1311
1312
	/*
1313
	 * Consider networks with nexthop loopback as not redistributable.
1314
	 */
1315
	if (IN6_IS_ADDR_LOOPBACK(&kr6->nexthop))
1316
		return (0);
1317
1318
	/*
1319
	 * never allow ::/0 the default route can only be redistributed
1320
	 * with announce default.
1321
	 */
1322
	if (kr6->prefixlen == 0 &&
1323
	    memcmp(&kr6->prefix, &in6addr_any, sizeof(struct in6_addr)) == 0)
1324
		return (0);
1325
1326
	match = kr_net_match6(kt, kr6);
1327
	if (match == NULL) {
1328
		if (!(kr6->flags & F_REDISTRIBUTED))
1329
			return (0);	/* no match, don't redistribute */
1330
		/* route no longer matches but is redistributed, so remove */
1331
		kr6->flags &= ~F_REDISTRIBUTED;
1332
		type = IMSG_NETWORK_REMOVE;
1333
	} else
1334
		kr6->flags |= F_REDISTRIBUTED;
1335
sendit:
1336
	bzero(&net, sizeof(net));
1337
	net.prefix.aid = AID_INET6;
1338
	memcpy(&net.prefix.v6, &kr6->prefix, sizeof(struct in6_addr));
1339
	net.prefixlen = kr6->prefixlen;
1340
	net.rtableid = kt->rtableid;
1341
1342
	return (send_network(type, &net, match ? &match->net.attrset : NULL));
1343
}
1344
1345
int
1346
kr_reload(void)
1347
{
1348
	struct ktable		*kt;
1349
	struct kroute_node	*kr;
1350
	struct kroute6_node	*kr6;
1351
	struct knexthop_node	*nh;
1352
	struct network		*n;
1353
	u_int			 rid;
1354
	int			 hasdyn = 0;
1355
1356
	for (rid = 0; rid < krt_size; rid++) {
1357
		if ((kt = ktable_get(rid)) == NULL)
1358
			continue;
1359
1360
		RB_FOREACH(nh, knexthop_tree, KT2KNT(kt))
1361
			knexthop_validate(kt, nh);
1362
1363
		TAILQ_FOREACH(n, &kt->krn, entry)
1364
			if (n->net.type == NETWORK_DEFAULT) {
1365
				if (send_network(IMSG_NETWORK_ADD, &n->net,
1366
				    &n->net.attrset))
1367
					return (-1);
1368
			} else
1369
				hasdyn = 1;
1370
1371
		if (hasdyn) {
1372
			/* only evaluate the full tree if we need */
1373
			RB_FOREACH(kr, kroute_tree, &kt->krt)
1374
				kr_redistribute(IMSG_NETWORK_ADD, kt, &kr->r);
1375
			RB_FOREACH(kr6, kroute6_tree, &kt->krt6)
1376
				kr_redistribute6(IMSG_NETWORK_ADD, kt, &kr6->r);
1377
		}
1378
	}
1379
1380
	return (0);
1381
}
1382
1383
struct kroute_full *
1384
kr_tofull(struct kroute *kr)
1385
{
1386
	static struct kroute_full	kf;
1387
1388
	bzero(&kf, sizeof(kf));
1389
1390
	kf.prefix.aid = AID_INET;
1391
	kf.prefix.v4.s_addr = kr->prefix.s_addr;
1392
	kf.nexthop.aid = AID_INET;
1393
	kf.nexthop.v4.s_addr = kr->nexthop.s_addr;
1394
	strlcpy(kf.label, rtlabel_id2name(kr->labelid), sizeof(kf.label));
1395
	kf.flags = kr->flags;
1396
	kf.ifindex = kr->ifindex;
1397
	kf.prefixlen = kr->prefixlen;
1398
	kf.priority = kr->priority;
1399
1400
	return (&kf);
1401
}
1402
1403
struct kroute_full *
1404
kr6_tofull(struct kroute6 *kr6)
1405
{
1406
	static struct kroute_full	kf;
1407
1408
	bzero(&kf, sizeof(kf));
1409
1410
	kf.prefix.aid = AID_INET6;
1411
	memcpy(&kf.prefix.v6, &kr6->prefix, sizeof(struct in6_addr));
1412
	kf.nexthop.aid = AID_INET6;
1413
	memcpy(&kf.nexthop.v6, &kr6->nexthop, sizeof(struct in6_addr));
1414
	strlcpy(kf.label, rtlabel_id2name(kr6->labelid), sizeof(kf.label));
1415
	kf.flags = kr6->flags;
1416
	kf.ifindex = kr6->ifindex;
1417
	kf.prefixlen = kr6->prefixlen;
1418
	kf.priority = kr6->priority;
1419
1420
	return (&kf);
1421
}
1422
1423
/*
1424
 * RB-tree compare functions
1425
 */
1426
1427
int
1428
kroute_compare(struct kroute_node *a, struct kroute_node *b)
1429
{
1430
	if (ntohl(a->r.prefix.s_addr) < ntohl(b->r.prefix.s_addr))
1431
		return (-1);
1432
	if (ntohl(a->r.prefix.s_addr) > ntohl(b->r.prefix.s_addr))
1433
		return (1);
1434
	if (a->r.prefixlen < b->r.prefixlen)
1435
		return (-1);
1436
	if (a->r.prefixlen > b->r.prefixlen)
1437
		return (1);
1438
1439
	/* if the priority is RTP_ANY finish on the first address hit */
1440
	if (a->r.priority == RTP_ANY || b->r.priority == RTP_ANY)
1441
		return (0);
1442
	if (a->r.priority < b->r.priority)
1443
		return (-1);
1444
	if (a->r.priority > b->r.priority)
1445
		return (1);
1446
	return (0);
1447
}
1448
1449
int
1450
kroute6_compare(struct kroute6_node *a, struct kroute6_node *b)
1451
{
1452
	int i;
1453
1454
	for (i = 0; i < 16; i++) {
1455
		if (a->r.prefix.s6_addr[i] < b->r.prefix.s6_addr[i])
1456
			return (-1);
1457
		if (a->r.prefix.s6_addr[i] > b->r.prefix.s6_addr[i])
1458
			return (1);
1459
	}
1460
1461
	if (a->r.prefixlen < b->r.prefixlen)
1462
		return (-1);
1463
	if (a->r.prefixlen > b->r.prefixlen)
1464
		return (1);
1465
1466
	/* if the priority is RTP_ANY finish on the first address hit */
1467
	if (a->r.priority == RTP_ANY || b->r.priority == RTP_ANY)
1468
		return (0);
1469
	if (a->r.priority < b->r.priority)
1470
		return (-1);
1471
	if (a->r.priority > b->r.priority)
1472
		return (1);
1473
	return (0);
1474
}
1475
1476
int
1477
knexthop_compare(struct knexthop_node *a, struct knexthop_node *b)
1478
{
1479
	int	i;
1480
1481
	if (a->nexthop.aid != b->nexthop.aid)
1482
		return (b->nexthop.aid - a->nexthop.aid);
1483
1484
	switch (a->nexthop.aid) {
1485
	case AID_INET:
1486
		if (ntohl(a->nexthop.v4.s_addr) < ntohl(b->nexthop.v4.s_addr))
1487
			return (-1);
1488
		if (ntohl(a->nexthop.v4.s_addr) > ntohl(b->nexthop.v4.s_addr))
1489
			return (1);
1490
		break;
1491
	case AID_INET6:
1492
		for (i = 0; i < 16; i++) {
1493
			if (a->nexthop.v6.s6_addr[i] < b->nexthop.v6.s6_addr[i])
1494
				return (-1);
1495
			if (a->nexthop.v6.s6_addr[i] > b->nexthop.v6.s6_addr[i])
1496
				return (1);
1497
		}
1498
		break;
1499
	default:
1500
		fatalx("knexthop_compare: unknown AF");
1501
	}
1502
1503
	return (0);
1504
}
1505
1506
int
1507
kif_compare(struct kif_node *a, struct kif_node *b)
1508
{
1509
	return (b->k.ifindex - a->k.ifindex);
1510
}
1511
1512
1513
/*
1514
 * tree management functions
1515
 */
1516
1517
struct kroute_node *
1518
kroute_find(struct ktable *kt, in_addr_t prefix, u_int8_t prefixlen,
1519
    u_int8_t prio)
1520
{
1521
	struct kroute_node	s;
1522
	struct kroute_node	*kn, *tmp;
1523
1524
	s.r.prefix.s_addr = prefix;
1525
	s.r.prefixlen = prefixlen;
1526
	s.r.priority = prio;
1527
1528
	kn = RB_FIND(kroute_tree, &kt->krt, &s);
1529
	if (kn && prio == RTP_ANY) {
1530
		tmp = RB_PREV(kroute_tree, &kt->krt, kn);
1531
		while (tmp) {
1532
			if (kroute_compare(&s, tmp) == 0)
1533
				kn = tmp;
1534
			else
1535
				break;
1536
			tmp = RB_PREV(kroute_tree, &kt->krt, kn);
1537
		}
1538
	}
1539
	return (kn);
1540
}
1541
1542
struct kroute_node *
1543
kroute_matchgw(struct kroute_node *kr, struct sockaddr_in *sa_in)
1544
{
1545
	in_addr_t	nexthop;
1546
1547
	if (sa_in == NULL) {
1548
		log_warnx("kroute_matchgw: no nexthop defined");
1549
		return (NULL);
1550
	}
1551
	nexthop = sa_in->sin_addr.s_addr;
1552
1553
	while (kr) {
1554
		if (kr->r.nexthop.s_addr == nexthop)
1555
			return (kr);
1556
		kr = kr->next;
1557
	}
1558
1559
	return (NULL);
1560
}
1561
1562
int
1563
kroute_insert(struct ktable *kt, struct kroute_node *kr)
1564
{
1565
	struct kroute_node	*krm;
1566
	struct knexthop_node	*h;
1567
	in_addr_t		 mask, ina;
1568
1569
	if ((krm = RB_INSERT(kroute_tree, &kt->krt, kr)) != NULL) {
1570
		/* multipath route, add at end of list */
1571
		while (krm->next != NULL)
1572
			krm = krm->next;
1573
		krm->next = kr;
1574
		kr->next = NULL; /* to be sure */
1575
	}
1576
1577
	/* XXX this is wrong for nexthop validated via BGP */
1578
	if (kr->r.flags & F_KERNEL) {
1579
		mask = prefixlen2mask(kr->r.prefixlen);
1580
		ina = ntohl(kr->r.prefix.s_addr);
1581
		RB_FOREACH(h, knexthop_tree, KT2KNT(kt))
1582
			if (h->nexthop.aid == AID_INET &&
1583
			    (ntohl(h->nexthop.v4.s_addr) & mask) == ina)
1584
				knexthop_validate(kt, h);
1585
1586
		if (kr->r.flags & F_CONNECTED)
1587
			if (kif_kr_insert(kr) == -1)
1588
				return (-1);
1589
1590
		if (krm == NULL)
1591
			/* redistribute multipath routes only once */
1592
			kr_redistribute(IMSG_NETWORK_ADD, kt, &kr->r);
1593
	}
1594
	return (0);
1595
}
1596
1597
1598
int
1599
kroute_remove(struct ktable *kt, struct kroute_node *kr)
1600
{
1601
	struct kroute_node	*krm;
1602
	struct knexthop_node	*s;
1603
1604
	if ((krm = RB_FIND(kroute_tree, &kt->krt, kr)) == NULL) {
1605
		log_warnx("kroute_remove failed to find %s/%u",
1606
		    inet_ntoa(kr->r.prefix), kr->r.prefixlen);
1607
		return (-1);
1608
	}
1609
1610
	if (krm == kr) {
1611
		/* head element */
1612
		if (RB_REMOVE(kroute_tree, &kt->krt, kr) == NULL) {
1613
			log_warnx("kroute_remove failed for %s/%u",
1614
			    inet_ntoa(kr->r.prefix), kr->r.prefixlen);
1615
			return (-1);
1616
		}
1617
		if (kr->next != NULL) {
1618
			if (RB_INSERT(kroute_tree, &kt->krt, kr->next) !=
1619
			    NULL) {
1620
				log_warnx("kroute_remove failed to add %s/%u",
1621
				    inet_ntoa(kr->r.prefix), kr->r.prefixlen);
1622
				return (-1);
1623
			}
1624
		}
1625
	} else {
1626
		/* somewhere in the list */
1627
		while (krm->next != kr && krm->next != NULL)
1628
			krm = krm->next;
1629
		if (krm->next == NULL) {
1630
			log_warnx("kroute_remove multipath list corrupted "
1631
			    "for %s/%u", inet_ntoa(kr->r.prefix),
1632
			    kr->r.prefixlen);
1633
			return (-1);
1634
		}
1635
		krm->next = kr->next;
1636
	}
1637
1638
	/* check whether a nexthop depends on this kroute */
1639
	if (kr->r.flags & F_NEXTHOP)
1640
		RB_FOREACH(s, knexthop_tree, KT2KNT(kt))
1641
			if (s->kroute == kr)
1642
				knexthop_validate(kt, s);
1643
1644
	if (kr->r.flags & F_KERNEL && kr == krm && kr->next == NULL)
1645
		/* again remove only once */
1646
		kr_redistribute(IMSG_NETWORK_REMOVE, kt, &kr->r);
1647
1648
	if (kr->r.flags & F_CONNECTED)
1649
		if (kif_kr_remove(kr) == -1) {
1650
			free(kr);
1651
			return (-1);
1652
		}
1653
1654
	free(kr);
1655
	return (0);
1656
}
1657
1658
void
1659
kroute_clear(struct ktable *kt)
1660
{
1661
	struct kroute_node	*kr;
1662
1663
	while ((kr = RB_MIN(kroute_tree, &kt->krt)) != NULL)
1664
		kroute_remove(kt, kr);
1665
}
1666
1667
struct kroute6_node *
1668
kroute6_find(struct ktable *kt, const struct in6_addr *prefix,
1669
    u_int8_t prefixlen, u_int8_t prio)
1670
{
1671
	struct kroute6_node	s;
1672
	struct kroute6_node	*kn6, *tmp;
1673
1674
	memcpy(&s.r.prefix, prefix, sizeof(struct in6_addr));
1675
	s.r.prefixlen = prefixlen;
1676
	s.r.priority = prio;
1677
1678
	kn6 = RB_FIND(kroute6_tree, &kt->krt6, &s);
1679
	if (kn6 && prio == RTP_ANY) {
1680
		tmp = RB_PREV(kroute6_tree, &kt->krt6, kn6);
1681
		while (tmp) {
1682
			if (kroute6_compare(&s, tmp) == 0)
1683
				kn6 = tmp;
1684
			else
1685
				break;
1686
			tmp = RB_PREV(kroute6_tree, &kt->krt6, kn6);
1687
		}
1688
	}
1689
	return (kn6);
1690
}
1691
1692
struct kroute6_node *
1693
kroute6_matchgw(struct kroute6_node *kr, struct sockaddr_in6 *sa_in6)
1694
{
1695
	struct in6_addr	nexthop;
1696
1697
	if (sa_in6 == NULL) {
1698
		log_warnx("kroute6_matchgw: no nexthop defined");
1699
		return (NULL);
1700
	}
1701
	memcpy(&nexthop, &sa_in6->sin6_addr, sizeof(nexthop));
1702
1703
	while (kr) {
1704
		if (memcmp(&kr->r.nexthop, &nexthop, sizeof(nexthop)) == 0)
1705
			return (kr);
1706
		kr = kr->next;
1707
	}
1708
1709
	return (NULL);
1710
}
1711
1712
int
1713
kroute6_insert(struct ktable *kt, struct kroute6_node *kr)
1714
{
1715
	struct kroute6_node	*krm;
1716
	struct knexthop_node	*h;
1717
	struct in6_addr		 ina, inb;
1718
1719
	if ((krm = RB_INSERT(kroute6_tree, &kt->krt6, kr)) != NULL) {
1720
		/* multipath route, add at end of list */
1721
		while (krm->next != NULL)
1722
			krm = krm->next;
1723
		krm->next = kr;
1724
		kr->next = NULL; /* to be sure */
1725
	}
1726
1727
	/* XXX this is wrong for nexthop validated via BGP */
1728
	if (kr->r.flags & F_KERNEL) {
1729
		inet6applymask(&ina, &kr->r.prefix, kr->r.prefixlen);
1730
		RB_FOREACH(h, knexthop_tree, KT2KNT(kt))
1731
			if (h->nexthop.aid == AID_INET6) {
1732
				inet6applymask(&inb, &h->nexthop.v6,
1733
				    kr->r.prefixlen);
1734
				if (memcmp(&ina, &inb, sizeof(ina)) == 0)
1735
					knexthop_validate(kt, h);
1736
			}
1737
1738
		if (kr->r.flags & F_CONNECTED)
1739
			if (kif_kr6_insert(kr) == -1)
1740
				return (-1);
1741
1742
		if (krm == NULL)
1743
			/* redistribute multipath routes only once */
1744
			kr_redistribute6(IMSG_NETWORK_ADD, kt, &kr->r);
1745
	}
1746
1747
	return (0);
1748
}
1749
1750
int
1751
kroute6_remove(struct ktable *kt, struct kroute6_node *kr)
1752
{
1753
	struct kroute6_node	*krm;
1754
	struct knexthop_node	*s;
1755
1756
	if ((krm = RB_FIND(kroute6_tree, &kt->krt6, kr)) == NULL) {
1757
		log_warnx("kroute6_remove failed for %s/%u",
1758
		    log_in6addr(&kr->r.prefix), kr->r.prefixlen);
1759
		return (-1);
1760
	}
1761
1762
	if (krm == kr) {
1763
		/* head element */
1764
		if (RB_REMOVE(kroute6_tree, &kt->krt6, kr) == NULL) {
1765
			log_warnx("kroute6_remove failed for %s/%u",
1766
			    log_in6addr(&kr->r.prefix), kr->r.prefixlen);
1767
			return (-1);
1768
		}
1769
		if (kr->next != NULL) {
1770
			if (RB_INSERT(kroute6_tree, &kt->krt6, kr->next) !=
1771
			    NULL) {
1772
				log_warnx("kroute6_remove failed to add %s/%u",
1773
				    log_in6addr(&kr->r.prefix),
1774
				    kr->r.prefixlen);
1775
				return (-1);
1776
			}
1777
		}
1778
	} else {
1779
		/* somewhere in the list */
1780
		while (krm->next != kr && krm->next != NULL)
1781
			krm = krm->next;
1782
		if (krm->next == NULL) {
1783
			log_warnx("kroute6_remove multipath list corrupted "
1784
			    "for %s/%u", log_in6addr(&kr->r.prefix),
1785
			    kr->r.prefixlen);
1786
			return (-1);
1787
		}
1788
		krm->next = kr->next;
1789
	}
1790
1791
	/* check whether a nexthop depends on this kroute */
1792
	if (kr->r.flags & F_NEXTHOP)
1793
		RB_FOREACH(s, knexthop_tree, KT2KNT(kt))
1794
			if (s->kroute == kr)
1795
				knexthop_validate(kt, s);
1796
1797
	if (kr->r.flags & F_KERNEL && kr == krm && kr->next == NULL)
1798
		/* again remove only once */
1799
		kr_redistribute6(IMSG_NETWORK_REMOVE, kt, &kr->r);
1800
1801
	if (kr->r.flags & F_CONNECTED)
1802
		if (kif_kr6_remove(kr) == -1) {
1803
			free(kr);
1804
			return (-1);
1805
		}
1806
1807
	free(kr);
1808
	return (0);
1809
}
1810
1811
void
1812
kroute6_clear(struct ktable *kt)
1813
{
1814
	struct kroute6_node	*kr;
1815
1816
	while ((kr = RB_MIN(kroute6_tree, &kt->krt6)) != NULL)
1817
		kroute6_remove(kt, kr);
1818
}
1819
1820
struct knexthop_node *
1821
knexthop_find(struct ktable *kt, struct bgpd_addr *addr)
1822
{
1823
	struct knexthop_node	s;
1824
1825
	bzero(&s, sizeof(s));
1826
	memcpy(&s.nexthop, addr, sizeof(s.nexthop));
1827
1828
	return (RB_FIND(knexthop_tree, KT2KNT(kt), &s));
1829
}
1830
1831
int
1832
knexthop_insert(struct ktable *kt, struct knexthop_node *kn)
1833
{
1834
	if (RB_INSERT(knexthop_tree, KT2KNT(kt), kn) != NULL) {
1835
		log_warnx("knexthop_insert failed for %s",
1836
		    log_addr(&kn->nexthop));
1837
		free(kn);
1838
		return (-1);
1839
	}
1840
1841
	knexthop_validate(kt, kn);
1842
1843
	return (0);
1844
}
1845
1846
int
1847
knexthop_remove(struct ktable *kt, struct knexthop_node *kn)
1848
{
1849
	kroute_detach_nexthop(kt, kn);
1850
1851
	if (RB_REMOVE(knexthop_tree, KT2KNT(kt), kn) == NULL) {
1852
		log_warnx("knexthop_remove failed for %s",
1853
		    log_addr(&kn->nexthop));
1854
		return (-1);
1855
	}
1856
1857
	free(kn);
1858
	return (0);
1859
}
1860
1861
void
1862
knexthop_clear(struct ktable *kt)
1863
{
1864
	struct knexthop_node	*kn;
1865
1866
	while ((kn = RB_MIN(knexthop_tree, KT2KNT(kt))) != NULL)
1867
		knexthop_remove(kt, kn);
1868
}
1869
1870
struct kif_node *
1871
kif_find(int ifindex)
1872
{
1873
	struct kif_node	s;
1874
1875
	bzero(&s, sizeof(s));
1876
	s.k.ifindex = ifindex;
1877
1878
	return (RB_FIND(kif_tree, &kit, &s));
1879
}
1880
1881
int
1882
kif_insert(struct kif_node *kif)
1883
{
1884
	LIST_INIT(&kif->kroute_l);
1885
	LIST_INIT(&kif->kroute6_l);
1886
1887
	if (RB_INSERT(kif_tree, &kit, kif) != NULL) {
1888
		log_warnx("RB_INSERT(kif_tree, &kit, kif)");
1889
		free(kif);
1890
		return (-1);
1891
	}
1892
1893
	return (0);
1894
}
1895
1896
int
1897
kif_remove(struct kif_node *kif)
1898
{
1899
	struct ktable	*kt;
1900
	struct kif_kr	*kkr;
1901
	struct kif_kr6	*kkr6;
1902
1903
	if (RB_REMOVE(kif_tree, &kit, kif) == NULL) {
1904
		log_warnx("RB_REMOVE(kif_tree, &kit, kif)");
1905
		return (-1);
1906
	}
1907
1908
	if ((kt = ktable_get(/* XXX */ 0)) == NULL)
1909
		goto done;
1910
1911
	while ((kkr = LIST_FIRST(&kif->kroute_l)) != NULL) {
1912
		LIST_REMOVE(kkr, entry);
1913
		kkr->kr->r.flags &= ~F_NEXTHOP;
1914
		kroute_remove(kt, kkr->kr);
1915
		free(kkr);
1916
	}
1917
1918
	while ((kkr6 = LIST_FIRST(&kif->kroute6_l)) != NULL) {
1919
		LIST_REMOVE(kkr6, entry);
1920
		kkr6->kr->r.flags &= ~F_NEXTHOP;
1921
		kroute6_remove(kt, kkr6->kr);
1922
		free(kkr6);
1923
	}
1924
done:
1925
	free(kif);
1926
	return (0);
1927
}
1928
1929
void
1930
kif_clear(void)
1931
{
1932
	struct kif_node	*kif;
1933
1934
	while ((kif = RB_MIN(kif_tree, &kit)) != NULL)
1935
		kif_remove(kif);
1936
}
1937
1938
int
1939
kif_kr_insert(struct kroute_node *kr)
1940
{
1941
	struct kif_node	*kif;
1942
	struct kif_kr	*kkr;
1943
1944
	if ((kif = kif_find(kr->r.ifindex)) == NULL) {
1945
		if (kr->r.ifindex)
1946
			log_warnx("%s: interface with index %u not found",
1947
			    __func__, kr->r.ifindex);
1948
		return (0);
1949
	}
1950
1951
	if (kif->k.nh_reachable)
1952
		kr->r.flags &= ~F_DOWN;
1953
	else
1954
		kr->r.flags |= F_DOWN;
1955
1956
	if ((kkr = calloc(1, sizeof(struct kif_kr))) == NULL) {
1957
		log_warn("kif_kr_insert");
1958
		return (-1);
1959
	}
1960
1961
	kkr->kr = kr;
1962
1963
	LIST_INSERT_HEAD(&kif->kroute_l, kkr, entry);
1964
1965
	return (0);
1966
}
1967
1968
int
1969
kif_kr_remove(struct kroute_node *kr)
1970
{
1971
	struct kif_node	*kif;
1972
	struct kif_kr	*kkr;
1973
1974
	if ((kif = kif_find(kr->r.ifindex)) == NULL) {
1975
		if (kr->r.ifindex)
1976
			log_warnx("%s: interface with index %u not found",
1977
			    __func__, kr->r.ifindex);
1978
		return (0);
1979
	}
1980
1981
	for (kkr = LIST_FIRST(&kif->kroute_l); kkr != NULL && kkr->kr != kr;
1982
	    kkr = LIST_NEXT(kkr, entry))
1983
		;	/* nothing */
1984
1985
	if (kkr == NULL) {
1986
		log_warnx("can't remove connected route from interface "
1987
		    "with index %u: not found", kr->r.ifindex);
1988
		return (-1);
1989
	}
1990
1991
	LIST_REMOVE(kkr, entry);
1992
	free(kkr);
1993
1994
	return (0);
1995
}
1996
1997
int
1998
kif_kr6_insert(struct kroute6_node *kr)
1999
{
2000
	struct kif_node	*kif;
2001
	struct kif_kr6	*kkr6;
2002
2003
	if ((kif = kif_find(kr->r.ifindex)) == NULL) {
2004
		if (kr->r.ifindex)
2005
			log_warnx("%s: interface with index %u not found",
2006
			    __func__, kr->r.ifindex);
2007
		return (0);
2008
	}
2009
2010
	if (kif->k.nh_reachable)
2011
		kr->r.flags &= ~F_DOWN;
2012
	else
2013
		kr->r.flags |= F_DOWN;
2014
2015
	if ((kkr6 = calloc(1, sizeof(struct kif_kr6))) == NULL) {
2016
		log_warn("kif_kr6_insert");
2017
		return (-1);
2018
	}
2019
2020
	kkr6->kr = kr;
2021
2022
	LIST_INSERT_HEAD(&kif->kroute6_l, kkr6, entry);
2023
2024
	return (0);
2025
}
2026
2027
int
2028
kif_kr6_remove(struct kroute6_node *kr)
2029
{
2030
	struct kif_node	*kif;
2031
	struct kif_kr6	*kkr6;
2032
2033
	if ((kif = kif_find(kr->r.ifindex)) == NULL) {
2034
		if (kr->r.ifindex)
2035
			log_warnx("%s: interface with index %u not found",
2036
			    __func__, kr->r.ifindex);
2037
		return (0);
2038
	}
2039
2040
	for (kkr6 = LIST_FIRST(&kif->kroute6_l); kkr6 != NULL && kkr6->kr != kr;
2041
	    kkr6 = LIST_NEXT(kkr6, entry))
2042
		;	/* nothing */
2043
2044
	if (kkr6 == NULL) {
2045
		log_warnx("can't remove connected route from interface "
2046
		    "with index %u: not found", kr->r.ifindex);
2047
		return (-1);
2048
	}
2049
2050
	LIST_REMOVE(kkr6, entry);
2051
	free(kkr6);
2052
2053
	return (0);
2054
}
2055
2056
/*
2057
 * nexthop validation
2058
 */
2059
2060
int
2061
kif_validate(struct kif *kif)
2062
{
2063
	if (!(kif->flags & IFF_UP))
2064
		return (0);
2065
2066
	/*
2067
	 * we treat link_state == LINK_STATE_UNKNOWN as valid,
2068
	 * not all interfaces have a concept of "link state" and/or
2069
	 * do not report up
2070
	 */
2071
2072
	if (kif->link_state == LINK_STATE_DOWN)
2073
		return (0);
2074
2075
	return (1);
2076
}
2077
2078
int
2079
kroute_validate(struct kroute *kr)
2080
{
2081
	struct kif_node		*kif;
2082
2083
	if (kr->flags & (F_REJECT | F_BLACKHOLE))
2084
		return (0);
2085
2086
	if ((kif = kif_find(kr->ifindex)) == NULL) {
2087
		if (kr->ifindex)
2088
			log_warnx("%s: interface with index %d not found, "
2089
			    "referenced from route for %s/%u", __func__,
2090
			    kr->ifindex, inet_ntoa(kr->prefix),
2091
			    kr->prefixlen);
2092
		return (1);
2093
	}
2094
2095
	return (kif->k.nh_reachable);
2096
}
2097
2098
int
2099
kroute6_validate(struct kroute6 *kr)
2100
{
2101
	struct kif_node		*kif;
2102
2103
	if (kr->flags & (F_REJECT | F_BLACKHOLE))
2104
		return (0);
2105
2106
	if ((kif = kif_find(kr->ifindex)) == NULL) {
2107
		if (kr->ifindex)
2108
			log_warnx("%s: interface with index %d not found, "
2109
			    "referenced from route for %s/%u", __func__,
2110
			    kr->ifindex, log_in6addr(&kr->prefix),
2111
			    kr->prefixlen);
2112
		return (1);
2113
	}
2114
2115
	return (kif->k.nh_reachable);
2116
}
2117
2118
void
2119
knexthop_validate(struct ktable *kt, struct knexthop_node *kn)
2120
{
2121
	void			*oldk;
2122
	struct kroute_node	*kr;
2123
	struct kroute6_node	*kr6;
2124
2125
	oldk = kn->kroute;
2126
	kroute_detach_nexthop(kt, kn);
2127
2128
	switch (kn->nexthop.aid) {
2129
	case AID_INET:
2130
		kr = kroute_match(kt, kn->nexthop.v4.s_addr, 0);
2131
2132
		if (kr) {
2133
			kn->kroute = kr;
2134
			kr->r.flags |= F_NEXTHOP;
2135
		}
2136
2137
		/*
2138
		 * Send update if nexthop route changed under us if
2139
		 * the route remains the same then the NH state has not
2140
		 * changed. State changes are tracked by knexthop_track().
2141
		 */
2142
		if (kr != oldk)
2143
			knexthop_send_update(kn);
2144
		break;
2145
	case AID_INET6:
2146
		kr6 = kroute6_match(kt, &kn->nexthop.v6, 0);
2147
2148
		if (kr6) {
2149
			kn->kroute = kr6;
2150
			kr6->r.flags |= F_NEXTHOP;
2151
		}
2152
2153
		if (kr6 != oldk)
2154
			knexthop_send_update(kn);
2155
		break;
2156
	}
2157
}
2158
2159
void
2160
knexthop_track(struct ktable *kt, void *krp)
2161
{
2162
	struct knexthop_node	*kn;
2163
2164
	RB_FOREACH(kn, knexthop_tree, KT2KNT(kt))
2165
		if (kn->kroute == krp)
2166
			knexthop_send_update(kn);
2167
}
2168
2169
void
2170
knexthop_send_update(struct knexthop_node *kn)
2171
{
2172
	struct kroute_nexthop	 n;
2173
	struct kroute_node	*kr;
2174
	struct kroute6_node	*kr6;
2175
2176
	bzero(&n, sizeof(n));
2177
	memcpy(&n.nexthop, &kn->nexthop, sizeof(n.nexthop));
2178
2179
	if (kn->kroute == NULL) {
2180
		n.valid = 0;	/* NH is not valid */
2181
		send_nexthop_update(&n);
2182
		return;
2183
	}
2184
2185
	switch (kn->nexthop.aid) {
2186
	case AID_INET:
2187
		kr = kn->kroute;
2188
		n.valid = kroute_validate(&kr->r);
2189
		n.connected = kr->r.flags & F_CONNECTED;
2190
		if ((n.gateway.v4.s_addr =
2191
		    kr->r.nexthop.s_addr) != 0)
2192
			n.gateway.aid = AID_INET;
2193
		if (n.connected) {
2194
			n.net.aid = AID_INET;
2195
			n.net.v4.s_addr = kr->r.prefix.s_addr;
2196
			n.netlen = kr->r.prefixlen;
2197
		}
2198
		break;
2199
	case AID_INET6:
2200
		kr6 = kn->kroute;
2201
		n.valid = kroute6_validate(&kr6->r);
2202
		n.connected = kr6->r.flags & F_CONNECTED;
2203
		if (memcmp(&kr6->r.nexthop, &in6addr_any,
2204
		    sizeof(struct in6_addr)) != 0) {
2205
			n.gateway.aid = AID_INET6;
2206
			memcpy(&n.gateway.v6, &kr6->r.nexthop,
2207
			    sizeof(struct in6_addr));
2208
		}
2209
		if (n.connected) {
2210
			n.net.aid = AID_INET6;
2211
			memcpy(&n.net.v6, &kr6->r.nexthop,
2212
			    sizeof(struct in6_addr));
2213
			n.netlen = kr6->r.prefixlen;
2214
		}
2215
		break;
2216
	}
2217
	send_nexthop_update(&n);
2218
}
2219
2220
struct kroute_node *
2221
kroute_match(struct ktable *kt, in_addr_t key, int matchall)
2222
{
2223
	int			 i;
2224
	struct kroute_node	*kr;
2225
	in_addr_t		 ina;
2226
2227
	ina = ntohl(key);
2228
2229
	/* we will never match the default route */
2230
	for (i = 32; i > 0; i--)
2231
		if ((kr = kroute_find(kt, htonl(ina & prefixlen2mask(i)), i,
2232
		    RTP_ANY)) != NULL)
2233
			if (matchall || bgpd_filternexthop(&kr->r, NULL) == 0)
2234
			    return (kr);
2235
2236
	/* if we don't have a match yet, try to find a default route */
2237
	if ((kr = kroute_find(kt, 0, 0, RTP_ANY)) != NULL)
2238
		if (matchall || bgpd_filternexthop(&kr->r, NULL) == 0)
2239
			return (kr);
2240
2241
	return (NULL);
2242
}
2243
2244
struct kroute6_node *
2245
kroute6_match(struct ktable *kt, struct in6_addr *key, int matchall)
2246
{
2247
	int			 i;
2248
	struct kroute6_node	*kr6;
2249
	struct in6_addr		 ina;
2250
2251
	/* we will never match the default route */
2252
	for (i = 128; i > 0; i--) {
2253
		inet6applymask(&ina, key, i);
2254
		if ((kr6 = kroute6_find(kt, &ina, i, RTP_ANY)) != NULL)
2255
			if (matchall || bgpd_filternexthop(NULL, &kr6->r) == 0)
2256
				return (kr6);
2257
	}
2258
2259
	/* if we don't have a match yet, try to find a default route */
2260
	if ((kr6 = kroute6_find(kt, &in6addr_any, 0, RTP_ANY)) != NULL)
2261
		if (matchall || bgpd_filternexthop(NULL, &kr6->r) == 0)
2262
			return (kr6);
2263
2264
	return (NULL);
2265
}
2266
2267
void
2268
kroute_detach_nexthop(struct ktable *kt, struct knexthop_node *kn)
2269
{
2270
	struct knexthop_node	*s;
2271
	struct kroute_node	*k;
2272
	struct kroute6_node	*k6;
2273
2274
	if (kn->kroute == NULL)
2275
		return;
2276
2277
	/*
2278
	 * check whether there's another nexthop depending on this kroute
2279
	 * if not remove the flag
2280
	 */
2281
	RB_FOREACH(s, knexthop_tree, KT2KNT(kt))
2282
		if (s->kroute == kn->kroute && s != kn)
2283
			break;
2284
2285
	if (s == NULL) {
2286
		switch (kn->nexthop.aid) {
2287
		case AID_INET:
2288
			k = kn->kroute;
2289
			k->r.flags &= ~F_NEXTHOP;
2290
			break;
2291
		case AID_INET6:
2292
			k6 = kn->kroute;
2293
			k6->r.flags &= ~F_NEXTHOP;
2294
			break;
2295
		}
2296
	}
2297
2298
	kn->kroute = NULL;
2299
}
2300
2301
/*
2302
 * misc helpers
2303
 */
2304
2305
int
2306
protect_lo(struct ktable *kt)
2307
{
2308
	struct kroute_node	*kr;
2309
	struct kroute6_node	*kr6;
2310
2311
	/* special protection for 127/8 */
2312
	if ((kr = calloc(1, sizeof(struct kroute_node))) == NULL) {
2313
		log_warn("protect_lo");
2314
		return (-1);
2315
	}
2316
	kr->r.prefix.s_addr = htonl(INADDR_LOOPBACK & IN_CLASSA_NET);
2317
	kr->r.prefixlen = 8;
2318
	kr->r.flags = F_KERNEL|F_CONNECTED;
2319
2320
	if (RB_INSERT(kroute_tree, &kt->krt, kr) != NULL)
2321
		free(kr);	/* kernel route already there, no problem */
2322
2323
	/* special protection for loopback */
2324
	if ((kr6 = calloc(1, sizeof(struct kroute6_node))) == NULL) {
2325
		log_warn("protect_lo");
2326
		return (-1);
2327
	}
2328
	memcpy(&kr6->r.prefix, &in6addr_loopback, sizeof(kr6->r.prefix));
2329
	kr6->r.prefixlen = 128;
2330
	kr6->r.flags = F_KERNEL|F_CONNECTED;
2331
2332
	if (RB_INSERT(kroute6_tree, &kt->krt6, kr6) != NULL)
2333
		free(kr6);	/* kernel route already there, no problem */
2334
2335
	return (0);
2336
}
2337
2338
u_int8_t
2339
prefixlen_classful(in_addr_t ina)
2340
{
2341
	/* it hurt to write this. */
2342
2343
	if (ina >= 0xf0000000U)		/* class E */
2344
		return (32);
2345
	else if (ina >= 0xe0000000U)	/* class D */
2346
		return (4);
2347
	else if (ina >= 0xc0000000U)	/* class C */
2348
		return (24);
2349
	else if (ina >= 0x80000000U)	/* class B */
2350
		return (16);
2351
	else				/* class A */
2352
		return (8);
2353
}
2354
2355
u_int8_t
2356
mask2prefixlen(in_addr_t ina)
2357
{
2358
	if (ina == 0)
2359
		return (0);
2360
	else
2361
		return (33 - ffs(ntohl(ina)));
2362
}
2363
2364
u_int8_t
2365
mask2prefixlen6(struct sockaddr_in6 *sa_in6)
2366
{
2367
	u_int8_t	 l = 0, *ap, *ep;
2368
2369
	/*
2370
	 * sin6_len is the size of the sockaddr so substract the offset of
2371
	 * the possibly truncated sin6_addr struct.
2372
	 */
2373
	ap = (u_int8_t *)&sa_in6->sin6_addr;
2374
	ep = (u_int8_t *)sa_in6 + sa_in6->sin6_len;
2375
	for (; ap < ep; ap++) {
2376
		/* this "beauty" is adopted from sbin/route/show.c ... */
2377
		switch (*ap) {
2378
		case 0xff:
2379
			l += 8;
2380
			break;
2381
		case 0xfe:
2382
			l += 7;
2383
			return (l);
2384
		case 0xfc:
2385
			l += 6;
2386
			return (l);
2387
		case 0xf8:
2388
			l += 5;
2389
			return (l);
2390
		case 0xf0:
2391
			l += 4;
2392
			return (l);
2393
		case 0xe0:
2394
			l += 3;
2395
			return (l);
2396
		case 0xc0:
2397
			l += 2;
2398
			return (l);
2399
		case 0x80:
2400
			l += 1;
2401
			return (l);
2402
		case 0x00:
2403
			return (l);
2404
		default:
2405
			fatalx("non contiguous inet6 netmask");
2406
		}
2407
	}
2408
2409
	return (l);
2410
}
2411
2412
struct in6_addr *
2413
prefixlen2mask6(u_int8_t prefixlen)
2414
{
2415
	static struct in6_addr	mask;
2416
	int			i;
2417
2418
	bzero(&mask, sizeof(mask));
2419
	for (i = 0; i < prefixlen / 8; i++)
2420
		mask.s6_addr[i] = 0xff;
2421
	i = prefixlen % 8;
2422
	if (i)
2423
		mask.s6_addr[prefixlen / 8] = 0xff00 >> i;
2424
2425
	return (&mask);
2426
}
2427
2428
#define	ROUNDUP(a)	\
2429
    (((a) & (sizeof(long) - 1)) ? (1 + ((a) | (sizeof(long) - 1))) : (a))
2430
2431
void
2432
get_rtaddrs(int addrs, struct sockaddr *sa, struct sockaddr **rti_info)
2433
{
2434
	int	i;
2435
2436
	for (i = 0; i < RTAX_MAX; i++) {
2437
		if (addrs & (1 << i)) {
2438
			rti_info[i] = sa;
2439
			sa = (struct sockaddr *)((char *)(sa) +
2440
			    ROUNDUP(sa->sa_len));
2441
		} else
2442
			rti_info[i] = NULL;
2443
	}
2444
}
2445
2446
void
2447
if_change(u_short ifindex, int flags, struct if_data *ifd)
2448
{
2449
	struct ktable		*kt;
2450
	struct kif_node		*kif;
2451
	struct kif_kr		*kkr;
2452
	struct kif_kr6		*kkr6;
2453
	u_int8_t		 reachable;
2454
2455
	if ((kif = kif_find(ifindex)) == NULL) {
2456
		log_warnx("%s: interface with index %u not found",
2457
		    __func__, ifindex);
2458
		return;
2459
	}
2460
2461
	kif->k.flags = flags;
2462
	kif->k.link_state = ifd->ifi_link_state;
2463
	kif->k.if_type = ifd->ifi_type;
2464
	kif->k.baudrate = ifd->ifi_baudrate;
2465
2466
	send_imsg_session(IMSG_IFINFO, 0, &kif->k, sizeof(kif->k));
2467
2468
	if ((reachable = kif_validate(&kif->k)) == kif->k.nh_reachable)
2469
		return;		/* nothing changed wrt nexthop validity */
2470
2471
	kif->k.nh_reachable = reachable;
2472
2473
	kt = ktable_get(/* XXX */ 0);
2474
2475
	LIST_FOREACH(kkr, &kif->kroute_l, entry) {
2476
		if (reachable)
2477
			kkr->kr->r.flags &= ~F_DOWN;
2478
		else
2479
			kkr->kr->r.flags |= F_DOWN;
2480
2481
		if (kt == NULL)
2482
			continue;
2483
2484
		knexthop_track(kt, kkr->kr);
2485
	}
2486
	LIST_FOREACH(kkr6, &kif->kroute6_l, entry) {
2487
		if (reachable)
2488
			kkr6->kr->r.flags &= ~F_DOWN;
2489
		else
2490
			kkr6->kr->r.flags |= F_DOWN;
2491
2492
		if (kt == NULL)
2493
			continue;
2494
2495
		knexthop_track(kt, kkr6->kr);
2496
	}
2497
}
2498
2499
void
2500
if_announce(void *msg)
2501
{
2502
	struct if_announcemsghdr	*ifan;
2503
	struct kif_node			*kif;
2504
2505
	ifan = msg;
2506
2507
	switch (ifan->ifan_what) {
2508
	case IFAN_ARRIVAL:
2509
		if ((kif = calloc(1, sizeof(struct kif_node))) == NULL) {
2510
			log_warn("if_announce");
2511
			return;
2512
		}
2513
2514
		kif->k.ifindex = ifan->ifan_index;
2515
		strlcpy(kif->k.ifname, ifan->ifan_name, sizeof(kif->k.ifname));
2516
		kif_insert(kif);
2517
		break;
2518
	case IFAN_DEPARTURE:
2519
		kif = kif_find(ifan->ifan_index);
2520
		kif_remove(kif);
2521
		break;
2522
	}
2523
}
2524
2525
/*
2526
 * rtsock related functions
2527
 */
2528
2529
int
2530
send_rtmsg(int fd, int action, struct ktable *kt, struct kroute *kroute,
2531
    u_int8_t fib_prio)
2532
{
2533
	struct iovec		iov[7];
2534
	struct rt_msghdr	hdr;
2535
	struct sockaddr_in	prefix;
2536
	struct sockaddr_in	nexthop;
2537
	struct sockaddr_in	mask;
2538
	struct {
2539
		struct sockaddr_dl	dl;
2540
		char			pad[sizeof(long)];
2541
	}			ifp;
2542
	struct sockaddr_mpls	mpls;
2543
	struct sockaddr_rtlabel	label;
2544
	int			iovcnt = 0;
2545
2546
	if (!kt->fib_sync)
2547
		return (0);
2548
2549
	/* initialize header */
2550
	bzero(&hdr, sizeof(hdr));
2551
	hdr.rtm_version = RTM_VERSION;
2552
	hdr.rtm_type = action;
2553
	hdr.rtm_tableid = kt->rtableid;
2554
	hdr.rtm_priority = fib_prio;
2555
	if (kroute->flags & F_BLACKHOLE)
2556
		hdr.rtm_flags |= RTF_BLACKHOLE;
2557
	if (kroute->flags & F_REJECT)
2558
		hdr.rtm_flags |= RTF_REJECT;
2559
	if (action == RTM_CHANGE)	/* reset these flags on change */
2560
		hdr.rtm_fmask = RTF_REJECT|RTF_BLACKHOLE;
2561
	hdr.rtm_seq = kr_state.rtseq++;	/* overflow doesn't matter */
2562
	hdr.rtm_msglen = sizeof(hdr);
2563
	/* adjust iovec */
2564
	iov[iovcnt].iov_base = &hdr;
2565
	iov[iovcnt++].iov_len = sizeof(hdr);
2566
2567
	bzero(&prefix, sizeof(prefix));
2568
	prefix.sin_len = sizeof(prefix);
2569
	prefix.sin_family = AF_INET;
2570
	prefix.sin_addr.s_addr = kroute->prefix.s_addr;
2571
	/* adjust header */
2572
	hdr.rtm_addrs |= RTA_DST;
2573
	hdr.rtm_msglen += sizeof(prefix);
2574
	/* adjust iovec */
2575
	iov[iovcnt].iov_base = &prefix;
2576
	iov[iovcnt++].iov_len = sizeof(prefix);
2577
2578
	if (kroute->nexthop.s_addr != 0) {
2579
		bzero(&nexthop, sizeof(nexthop));
2580
		nexthop.sin_len = sizeof(nexthop);
2581
		nexthop.sin_family = AF_INET;
2582
		nexthop.sin_addr.s_addr = kroute->nexthop.s_addr;
2583
		/* adjust header */
2584
		hdr.rtm_flags |= RTF_GATEWAY;
2585
		hdr.rtm_addrs |= RTA_GATEWAY;
2586
		hdr.rtm_msglen += sizeof(nexthop);
2587
		/* adjust iovec */
2588
		iov[iovcnt].iov_base = &nexthop;
2589
		iov[iovcnt++].iov_len = sizeof(nexthop);
2590
	}
2591
2592
	bzero(&mask, sizeof(mask));
2593
	mask.sin_len = sizeof(mask);
2594
	mask.sin_family = AF_INET;
2595
	mask.sin_addr.s_addr = htonl(prefixlen2mask(kroute->prefixlen));
2596
	/* adjust header */
2597
	hdr.rtm_addrs |= RTA_NETMASK;
2598
	hdr.rtm_msglen += sizeof(mask);
2599
	/* adjust iovec */
2600
	iov[iovcnt].iov_base = &mask;
2601
	iov[iovcnt++].iov_len = sizeof(mask);
2602
2603
	if (kt->ifindex) {
2604
		bzero(&ifp, sizeof(ifp));
2605
		ifp.dl.sdl_len = sizeof(struct sockaddr_dl);
2606
		ifp.dl.sdl_family = AF_LINK;
2607
		ifp.dl.sdl_index = kt->ifindex;
2608
		/* adjust header */
2609
		hdr.rtm_addrs |= RTA_IFP;
2610
		hdr.rtm_msglen += ROUNDUP(sizeof(struct sockaddr_dl));
2611
		/* adjust iovec */
2612
		iov[iovcnt].iov_base = &ifp;
2613
		iov[iovcnt++].iov_len = ROUNDUP(sizeof(struct sockaddr_dl));
2614
	}
2615
2616
	if (kroute->flags & F_MPLS) {
2617
		bzero(&mpls, sizeof(mpls));
2618
		mpls.smpls_len = sizeof(mpls);
2619
		mpls.smpls_family = AF_MPLS;
2620
		mpls.smpls_label = kroute->mplslabel;
2621
		/* adjust header */
2622
		hdr.rtm_flags |= RTF_MPLS;
2623
		hdr.rtm_mpls = MPLS_OP_PUSH;
2624
		hdr.rtm_addrs |= RTA_SRC;
2625
		hdr.rtm_msglen += sizeof(mpls);
2626
		/* adjust iovec */
2627
		iov[iovcnt].iov_base = &mpls;
2628
		iov[iovcnt++].iov_len = sizeof(mpls);
2629
	}
2630
2631
	if (kroute->labelid) {
2632
		bzero(&label, sizeof(label));
2633
		label.sr_len = sizeof(label);
2634
		strlcpy(label.sr_label, rtlabel_id2name(kroute->labelid),
2635
		    sizeof(label.sr_label));
2636
		/* adjust header */
2637
		hdr.rtm_addrs |= RTA_LABEL;
2638
		hdr.rtm_msglen += sizeof(label);
2639
		/* adjust iovec */
2640
		iov[iovcnt].iov_base = &label;
2641
		iov[iovcnt++].iov_len = sizeof(label);
2642
	}
2643
2644
retry:
2645
	if (writev(fd, iov, iovcnt) == -1) {
2646
		if (errno == ESRCH) {
2647
			if (hdr.rtm_type == RTM_CHANGE) {
2648
				hdr.rtm_type = RTM_ADD;
2649
				goto retry;
2650
			} else if (hdr.rtm_type == RTM_DELETE) {
2651
				log_info("route %s/%u vanished before delete",
2652
				    inet_ntoa(kroute->prefix),
2653
				    kroute->prefixlen);
2654
				return (0);
2655
			}
2656
		}
2657
		log_warn("send_rtmsg: action %u, prefix %s/%u", hdr.rtm_type,
2658
		    inet_ntoa(kroute->prefix), kroute->prefixlen);
2659
		return (0);
2660
	}
2661
2662
	return (0);
2663
}
2664
2665
int
2666
send_rt6msg(int fd, int action, struct ktable *kt, struct kroute6 *kroute,
2667
    u_int8_t fib_prio)
2668
{
2669
	struct iovec		iov[5];
2670
	struct rt_msghdr	hdr;
2671
	struct pad {
2672
		struct sockaddr_in6	addr;
2673
		char			pad[sizeof(long)];
2674
	} prefix, nexthop, mask;
2675
	struct sockaddr_rtlabel	label;
2676
	int			iovcnt = 0;
2677
2678
	if (!kt->fib_sync)
2679
		return (0);
2680
2681
	/* initialize header */
2682
	bzero(&hdr, sizeof(hdr));
2683
	hdr.rtm_version = RTM_VERSION;
2684
	hdr.rtm_type = action;
2685
	hdr.rtm_tableid = kt->rtableid;
2686
	hdr.rtm_priority = fib_prio;
2687
	if (kroute->flags & F_BLACKHOLE)
2688
		hdr.rtm_flags |= RTF_BLACKHOLE;
2689
	if (kroute->flags & F_REJECT)
2690
		hdr.rtm_flags |= RTF_REJECT;
2691
	if (action == RTM_CHANGE)	/* reset these flags on change */
2692
		hdr.rtm_fmask = RTF_REJECT|RTF_BLACKHOLE;
2693
	hdr.rtm_seq = kr_state.rtseq++;	/* overflow doesn't matter */
2694
	hdr.rtm_msglen = sizeof(hdr);
2695
	/* adjust iovec */
2696
	iov[iovcnt].iov_base = &hdr;
2697
	iov[iovcnt++].iov_len = sizeof(hdr);
2698
2699
	bzero(&prefix, sizeof(prefix));
2700
	prefix.addr.sin6_len = sizeof(struct sockaddr_in6);
2701
	prefix.addr.sin6_family = AF_INET6;
2702
	memcpy(&prefix.addr.sin6_addr, &kroute->prefix,
2703
	    sizeof(struct in6_addr));
2704
	/* XXX scope does not matter or? */
2705
	/* adjust header */
2706
	hdr.rtm_addrs |= RTA_DST;
2707
	hdr.rtm_msglen += ROUNDUP(sizeof(struct sockaddr_in6));
2708
	/* adjust iovec */
2709
	iov[iovcnt].iov_base = &prefix;
2710
	iov[iovcnt++].iov_len = ROUNDUP(sizeof(struct sockaddr_in6));
2711
2712
	if (memcmp(&kroute->nexthop, &in6addr_any, sizeof(struct in6_addr))) {
2713
		bzero(&nexthop, sizeof(nexthop));
2714
		nexthop.addr.sin6_len = sizeof(struct sockaddr_in6);
2715
		nexthop.addr.sin6_family = AF_INET6;
2716
		memcpy(&nexthop.addr.sin6_addr, &kroute->nexthop,
2717
		    sizeof(struct in6_addr));
2718
		/* adjust header */
2719
		hdr.rtm_flags |= RTF_GATEWAY;
2720
		hdr.rtm_addrs |= RTA_GATEWAY;
2721
		hdr.rtm_msglen += ROUNDUP(sizeof(struct sockaddr_in6));
2722
		/* adjust iovec */
2723
		iov[iovcnt].iov_base = &nexthop;
2724
		iov[iovcnt++].iov_len = ROUNDUP(sizeof(struct sockaddr_in6));
2725
	}
2726
2727
	bzero(&mask, sizeof(mask));
2728
	mask.addr.sin6_len = sizeof(struct sockaddr_in6);
2729
	mask.addr.sin6_family = AF_INET6;
2730
	memcpy(&mask.addr.sin6_addr, prefixlen2mask6(kroute->prefixlen),
2731
	    sizeof(struct in6_addr));
2732
	/* adjust header */
2733
	hdr.rtm_addrs |= RTA_NETMASK;
2734
	hdr.rtm_msglen += ROUNDUP(sizeof(struct sockaddr_in6));
2735
	/* adjust iovec */
2736
	iov[iovcnt].iov_base = &mask;
2737
	iov[iovcnt++].iov_len = ROUNDUP(sizeof(struct sockaddr_in6));
2738
2739
	if (kroute->labelid) {
2740
		bzero(&label, sizeof(label));
2741
		label.sr_len = sizeof(label);
2742
		strlcpy(label.sr_label, rtlabel_id2name(kroute->labelid),
2743
		    sizeof(label.sr_label));
2744
		/* adjust header */
2745
		hdr.rtm_addrs |= RTA_LABEL;
2746
		hdr.rtm_msglen += sizeof(label);
2747
		/* adjust iovec */
2748
		iov[iovcnt].iov_base = &label;
2749
		iov[iovcnt++].iov_len = sizeof(label);
2750
	}
2751
2752
retry:
2753
	if (writev(fd, iov, iovcnt) == -1) {
2754
		if (errno == ESRCH) {
2755
			if (hdr.rtm_type == RTM_CHANGE) {
2756
				hdr.rtm_type = RTM_ADD;
2757
				goto retry;
2758
			} else if (hdr.rtm_type == RTM_DELETE) {
2759
				log_info("route %s/%u vanished before delete",
2760
				    log_in6addr(&kroute->prefix),
2761
				    kroute->prefixlen);
2762
				return (0);
2763
			}
2764
		}
2765
		log_warn("send_rt6msg: action %u, prefix %s/%u", hdr.rtm_type,
2766
		    log_in6addr(&kroute->prefix), kroute->prefixlen);
2767
		return (0);
2768
	}
2769
2770
	return (0);
2771
}
2772
2773
int
2774
fetchtable(struct ktable *kt, u_int8_t fib_prio)
2775
{
2776
	size_t			 len;
2777
	int			 mib[7];
2778
	char			*buf = NULL, *next, *lim;
2779
	struct rt_msghdr	*rtm;
2780
	struct sockaddr		*sa, *gw, *rti_info[RTAX_MAX];
2781
	struct sockaddr_in	*sa_in;
2782
	struct sockaddr_in6	*sa_in6;
2783
	struct kroute_node	*kr = NULL;
2784
	struct kroute6_node	*kr6 = NULL;
2785
2786
	mib[0] = CTL_NET;
2787
	mib[1] = PF_ROUTE;
2788
	mib[2] = 0;
2789
	mib[3] = 0;
2790
	mib[4] = NET_RT_DUMP;
2791
	mib[5] = 0;
2792
	mib[6] = kt->rtableid;
2793
2794
	if (sysctl(mib, 7, NULL, &len, NULL, 0) == -1) {
2795
		if (kt->rtableid != 0 && errno == EINVAL)
2796
			/* table nonexistent */
2797
			return (0);
2798
		log_warn("sysctl");
2799
		return (-1);
2800
	}
2801
	if (len > 0) {
2802
		if ((buf = malloc(len)) == NULL) {
2803
			log_warn("fetchtable");
2804
			return (-1);
2805
		}
2806
		if (sysctl(mib, 7, buf, &len, NULL, 0) == -1) {
2807
			log_warn("sysctl2");
2808
			free(buf);
2809
			return (-1);
2810
		}
2811
	}
2812
2813
	lim = buf + len;
2814
	for (next = buf; next < lim; next += rtm->rtm_msglen) {
2815
		rtm = (struct rt_msghdr *)next;
2816
		if (rtm->rtm_version != RTM_VERSION)
2817
			continue;
2818
		sa = (struct sockaddr *)(next + rtm->rtm_hdrlen);
2819
		get_rtaddrs(rtm->rtm_addrs, sa, rti_info);
2820
2821
		if ((sa = rti_info[RTAX_DST]) == NULL)
2822
			continue;
2823
2824
		/* Skip ARP/ND cache and broadcast routes. */
2825
		if (rtm->rtm_flags & (RTF_LLINFO|RTF_BROADCAST))
2826
			continue;
2827
2828
		switch (sa->sa_family) {
2829
		case AF_INET:
2830
			if ((kr = calloc(1, sizeof(struct kroute_node))) ==
2831
			    NULL) {
2832
				log_warn("fetchtable");
2833
				free(buf);
2834
				return (-1);
2835
			}
2836
2837
			kr->r.flags = F_KERNEL;
2838
			kr->r.priority = rtm->rtm_priority;
2839
			kr->r.ifindex = rtm->rtm_index;
2840
			kr->r.prefix.s_addr =
2841
			    ((struct sockaddr_in *)sa)->sin_addr.s_addr;
2842
			sa_in = (struct sockaddr_in *)rti_info[RTAX_NETMASK];
2843
			if (rtm->rtm_flags & RTF_STATIC)
2844
				kr->r.flags |= F_STATIC;
2845
			if (rtm->rtm_flags & RTF_BLACKHOLE)
2846
				kr->r.flags |= F_BLACKHOLE;
2847
			if (rtm->rtm_flags & RTF_REJECT)
2848
				kr->r.flags |= F_REJECT;
2849
			if (rtm->rtm_flags & RTF_DYNAMIC)
2850
				kr->r.flags |= F_DYNAMIC;
2851
			if (sa_in != NULL) {
2852
				if (sa_in->sin_len == 0)
2853
					break;
2854
				kr->r.prefixlen =
2855
				    mask2prefixlen(sa_in->sin_addr.s_addr);
2856
			} else if (rtm->rtm_flags & RTF_HOST)
2857
				kr->r.prefixlen = 32;
2858
			else
2859
				kr->r.prefixlen =
2860
				    prefixlen_classful(kr->r.prefix.s_addr);
2861
			break;
2862
		case AF_INET6:
2863
			if ((kr6 = calloc(1, sizeof(struct kroute6_node))) ==
2864
			    NULL) {
2865
				log_warn("fetchtable");
2866
				free(buf);
2867
				return (-1);
2868
			}
2869
2870
			kr6->r.flags = F_KERNEL;
2871
			kr6->r.priority = rtm->rtm_priority;
2872
			kr6->r.ifindex = rtm->rtm_index;
2873
			memcpy(&kr6->r.prefix,
2874
			    &((struct sockaddr_in6 *)sa)->sin6_addr,
2875
			    sizeof(kr6->r.prefix));
2876
2877
			sa_in6 = (struct sockaddr_in6 *)rti_info[RTAX_NETMASK];
2878
			if (rtm->rtm_flags & RTF_STATIC)
2879
				kr6->r.flags |= F_STATIC;
2880
			if (rtm->rtm_flags & RTF_BLACKHOLE)
2881
				kr6->r.flags |= F_BLACKHOLE;
2882
			if (rtm->rtm_flags & RTF_REJECT)
2883
				kr6->r.flags |= F_REJECT;
2884
			if (rtm->rtm_flags & RTF_DYNAMIC)
2885
				kr6->r.flags |= F_DYNAMIC;
2886
			if (sa_in6 != NULL) {
2887
				if (sa_in6->sin6_len == 0)
2888
					break;
2889
				kr6->r.prefixlen = mask2prefixlen6(sa_in6);
2890
			} else if (rtm->rtm_flags & RTF_HOST)
2891
				kr6->r.prefixlen = 128;
2892
			else
2893
				fatalx("INET6 route without netmask");
2894
			break;
2895
		default:
2896
			continue;
2897
		}
2898
2899
		if ((gw = rti_info[RTAX_GATEWAY]) != NULL)
2900
			switch (gw->sa_family) {
2901
			case AF_INET:
2902
				if (kr == NULL)
2903
					fatalx("v4 gateway for !v4 dst?!");
2904
2905
				if (rtm->rtm_flags & RTF_CONNECTED) {
2906
					kr->r.flags |= F_CONNECTED;
2907
					break;
2908
				}
2909
2910
				kr->r.nexthop.s_addr =
2911
				    ((struct sockaddr_in *)gw)->sin_addr.s_addr;
2912
				break;
2913
			case AF_INET6:
2914
				if (kr6 == NULL)
2915
					fatalx("v6 gateway for !v6 dst?!");
2916
2917
				if (rtm->rtm_flags & RTF_CONNECTED) {
2918
					kr6->r.flags |= F_CONNECTED;
2919
					break;
2920
				}
2921
2922
				memcpy(&kr6->r.nexthop,
2923
				    &((struct sockaddr_in6 *)gw)->sin6_addr,
2924
				    sizeof(kr6->r.nexthop));
2925
				break;
2926
			case AF_LINK:
2927
				/*
2928
				 * Traditional BSD connected routes have
2929
				 * a gateway of type AF_LINK.
2930
				 */
2931
				if (sa->sa_family == AF_INET)
2932
					kr->r.flags |= F_CONNECTED;
2933
				else if (sa->sa_family == AF_INET6)
2934
					kr6->r.flags |= F_CONNECTED;
2935
				break;
2936
			}
2937
2938
		if (sa->sa_family == AF_INET) {
2939
			if (rtm->rtm_priority == fib_prio)  {
2940
				send_rtmsg(kr_state.fd, RTM_DELETE, kt, &kr->r,
2941
				    fib_prio);
2942
				free(kr);
2943
			} else
2944
				kroute_insert(kt, kr);
2945
		} else if (sa->sa_family == AF_INET6) {
2946
			if (rtm->rtm_priority == fib_prio)  {
2947
				send_rt6msg(kr_state.fd, RTM_DELETE, kt,
2948
				    &kr6->r, fib_prio);
2949
				free(kr6);
2950
			} else
2951
				kroute6_insert(kt, kr6);
2952
		}
2953
	}
2954
	free(buf);
2955
	return (0);
2956
}
2957
2958
int
2959
fetchifs(int ifindex)
2960
{
2961
	size_t			 len;
2962
	int			 mib[6];
2963
	char			*buf, *next, *lim;
2964
	struct if_msghdr	 ifm;
2965
	struct kif_node		*kif;
2966
	struct sockaddr		*sa, *rti_info[RTAX_MAX];
2967
	struct sockaddr_dl	*sdl;
2968
2969
	mib[0] = CTL_NET;
2970
	mib[1] = PF_ROUTE;
2971
	mib[2] = 0;
2972
	mib[3] = AF_INET;	/* AF does not matter but AF_INET is shorter */
2973
	mib[4] = NET_RT_IFLIST;
2974
	mib[5] = ifindex;
2975
2976
	if (sysctl(mib, 6, NULL, &len, NULL, 0) == -1) {
2977
		log_warn("sysctl");
2978
		return (-1);
2979
	}
2980
	if ((buf = malloc(len)) == NULL) {
2981
		log_warn("fetchif");
2982
		return (-1);
2983
	}
2984
	if (sysctl(mib, 6, buf, &len, NULL, 0) == -1) {
2985
		log_warn("sysctl");
2986
		free(buf);
2987
		return (-1);
2988
	}
2989
2990
	lim = buf + len;
2991
	for (next = buf; next < lim; next += ifm.ifm_msglen) {
2992
		memcpy(&ifm, next, sizeof(ifm));
2993
		if (ifm.ifm_version != RTM_VERSION)
2994
			continue;
2995
		if (ifm.ifm_type != RTM_IFINFO)
2996
			continue;
2997
2998
		sa = (struct sockaddr *)(next + sizeof(ifm));
2999
		get_rtaddrs(ifm.ifm_addrs, sa, rti_info);
3000
3001
		if ((kif = calloc(1, sizeof(struct kif_node))) == NULL) {
3002
			log_warn("fetchifs");
3003
			free(buf);
3004
			return (-1);
3005
		}
3006
3007
		kif->k.ifindex = ifm.ifm_index;
3008
		kif->k.flags = ifm.ifm_flags;
3009
		kif->k.link_state = ifm.ifm_data.ifi_link_state;
3010
		kif->k.if_type = ifm.ifm_data.ifi_type;
3011
		kif->k.baudrate = ifm.ifm_data.ifi_baudrate;
3012
		kif->k.nh_reachable = kif_validate(&kif->k);
3013
3014
		if ((sa = rti_info[RTAX_IFP]) != NULL)
3015
			if (sa->sa_family == AF_LINK) {
3016
				sdl = (struct sockaddr_dl *)sa;
3017
				if (sdl->sdl_nlen >= sizeof(kif->k.ifname))
3018
					memcpy(kif->k.ifname, sdl->sdl_data,
3019
					    sizeof(kif->k.ifname) - 1);
3020
				else if (sdl->sdl_nlen > 0)
3021
					memcpy(kif->k.ifname, sdl->sdl_data,
3022
					    sdl->sdl_nlen);
3023
				/* string already terminated via calloc() */
3024
			}
3025
3026
		kif_insert(kif);
3027
	}
3028
	free(buf);
3029
	return (0);
3030
}
3031
3032
int
3033
dispatch_rtmsg(void)
3034
{
3035
	char			 buf[RT_BUF_SIZE];
3036
	ssize_t			 n;
3037
	char			*next, *lim;
3038
	struct rt_msghdr	*rtm;
3039
	struct if_msghdr	 ifm;
3040
	struct sockaddr		*sa, *rti_info[RTAX_MAX];
3041
	struct ktable		*kt;
3042
3043
	if ((n = read(kr_state.fd, &buf, sizeof(buf))) == -1) {
3044
		if (errno == EAGAIN || errno == EINTR)
3045
			return (0);
3046
		log_warn("dispatch_rtmsg: read error");
3047
		return (-1);
3048
	}
3049
3050
	if (n == 0) {
3051
		log_warnx("routing socket closed");
3052
		return (-1);
3053
	}
3054
3055
	lim = buf + n;
3056
	for (next = buf; next < lim; next += rtm->rtm_msglen) {
3057
		rtm = (struct rt_msghdr *)next;
3058
		if (lim < next + sizeof(u_short) ||
3059
		    lim < next + rtm->rtm_msglen)
3060
			fatalx("dispatch_rtmsg: partial rtm in buffer");
3061
		if (rtm->rtm_version != RTM_VERSION)
3062
			continue;
3063
3064
		switch (rtm->rtm_type) {
3065
		case RTM_ADD:
3066
		case RTM_CHANGE:
3067
		case RTM_DELETE:
3068
			sa = (struct sockaddr *)(next + rtm->rtm_hdrlen);
3069
			get_rtaddrs(rtm->rtm_addrs, sa, rti_info);
3070
3071
			if (rtm->rtm_pid == kr_state.pid) /* cause by us */
3072
				continue;
3073
3074
			if (rtm->rtm_errno)		 /* failed attempts */
3075
				continue;
3076
3077
			if (rtm->rtm_flags & RTF_LLINFO) /* arp cache */
3078
				continue;
3079
3080
			if ((kt = ktable_get(rtm->rtm_tableid)) == NULL)
3081
				continue;
3082
3083
			if (dispatch_rtmsg_addr(rtm, rti_info, kt) == -1)
3084
				return (-1);
3085
			break;
3086
		case RTM_IFINFO:
3087
			memcpy(&ifm, next, sizeof(ifm));
3088
			if_change(ifm.ifm_index, ifm.ifm_flags,
3089
			    &ifm.ifm_data);
3090
			break;
3091
		case RTM_IFANNOUNCE:
3092
			if_announce(next);
3093
			break;
3094
		default:
3095
			/* ignore for now */
3096
			break;
3097
		}
3098
	}
3099
	return (0);
3100
}
3101
3102
int
3103
dispatch_rtmsg_addr(struct rt_msghdr *rtm, struct sockaddr *rti_info[RTAX_MAX],
3104
    struct ktable *kt)
3105
{
3106
	struct sockaddr		*sa;
3107
	struct sockaddr_in	*sa_in;
3108
	struct sockaddr_in6	*sa_in6;
3109
	struct kroute_node	*kr;
3110
	struct kroute6_node	*kr6;
3111
	struct bgpd_addr	 prefix;
3112
	int			 flags, oflags, mpath = 0, changed = 0;
3113
	u_int16_t		 ifindex;
3114
	u_int8_t		 prefixlen;
3115
	u_int8_t		 prio;
3116
3117
	flags = F_KERNEL;
3118
	ifindex = 0;
3119
	prefixlen = 0;
3120
	bzero(&prefix, sizeof(prefix));
3121
3122
	if ((sa = rti_info[RTAX_DST]) == NULL) {
3123
		log_warnx("empty route message");
3124
		return (0);
3125
	}
3126
3127
	if (rtm->rtm_flags & RTF_STATIC)
3128
		flags |= F_STATIC;
3129
	if (rtm->rtm_flags & RTF_BLACKHOLE)
3130
		flags |= F_BLACKHOLE;
3131
	if (rtm->rtm_flags & RTF_REJECT)
3132
		flags |= F_REJECT;
3133
	if (rtm->rtm_flags & RTF_DYNAMIC)
3134
		flags |= F_DYNAMIC;
3135
#ifdef RTF_MPATH
3136
	if (rtm->rtm_flags & RTF_MPATH)
3137
		mpath = 1;
3138
#endif
3139
3140
	prio = rtm->rtm_priority;
3141
	switch (sa->sa_family) {
3142
	case AF_INET:
3143
		prefix.aid = AID_INET;
3144
		prefix.v4.s_addr = ((struct sockaddr_in *)sa)->sin_addr.s_addr;
3145
		sa_in = (struct sockaddr_in *)rti_info[RTAX_NETMASK];
3146
		if (sa_in != NULL) {
3147
			if (sa_in->sin_len != 0)
3148
				prefixlen = mask2prefixlen(
3149
				    sa_in->sin_addr.s_addr);
3150
		} else if (rtm->rtm_flags & RTF_HOST)
3151
			prefixlen = 32;
3152
		else
3153
			prefixlen =
3154
			    prefixlen_classful(prefix.v4.s_addr);
3155
		break;
3156
	case AF_INET6:
3157
		prefix.aid = AID_INET6;
3158
		memcpy(&prefix.v6, &((struct sockaddr_in6 *)sa)->sin6_addr,
3159
		    sizeof(struct in6_addr));
3160
		sa_in6 = (struct sockaddr_in6 *)rti_info[RTAX_NETMASK];
3161
		if (sa_in6 != NULL) {
3162
			if (sa_in6->sin6_len != 0)
3163
				prefixlen = mask2prefixlen6(sa_in6);
3164
		} else if (rtm->rtm_flags & RTF_HOST)
3165
			prefixlen = 128;
3166
		else
3167
			fatalx("in6 net addr without netmask");
3168
		break;
3169
	default:
3170
		return (0);
3171
	}
3172
3173
	if ((sa = rti_info[RTAX_GATEWAY]) != NULL)
3174
		switch (sa->sa_family) {
3175
		case AF_LINK:
3176
			flags |= F_CONNECTED;
3177
			ifindex = rtm->rtm_index;
3178
			sa = NULL;
3179
			mpath = 0;	/* link local stuff can't be mpath */
3180
			break;
3181
		case AF_INET:
3182
		case AF_INET6:
3183
			if (rtm->rtm_flags & RTF_CONNECTED) {
3184
				flags |= F_CONNECTED;
3185
				ifindex = rtm->rtm_index;
3186
				sa = NULL;
3187
				mpath = 0; /* link local stuff can't be mpath */
3188
			}
3189
			break;
3190
		}
3191
3192
	if (rtm->rtm_type == RTM_DELETE) {
3193
		switch (prefix.aid) {
3194
		case AID_INET:
3195
			sa_in = (struct sockaddr_in *)sa;
3196
			if ((kr = kroute_find(kt, prefix.v4.s_addr,
3197
			    prefixlen, prio)) == NULL)
3198
				return (0);
3199
			if (!(kr->r.flags & F_KERNEL))
3200
				return (0);
3201
3202
			if (mpath)
3203
				/* get the correct route */
3204
				if ((kr = kroute_matchgw(kr, sa_in)) == NULL) {
3205
					log_warnx("dispatch_rtmsg_addr[delete] "
3206
					    "mpath route not found");
3207
					return (0);
3208
				}
3209
3210
			if (kroute_remove(kt, kr) == -1)
3211
				return (-1);
3212
			break;
3213
		case AID_INET6:
3214
			sa_in6 = (struct sockaddr_in6 *)sa;
3215
			if ((kr6 = kroute6_find(kt, &prefix.v6, prefixlen,
3216
			    prio)) == NULL)
3217
				return (0);
3218
			if (!(kr6->r.flags & F_KERNEL))
3219
				return (0);
3220
3221
			if (mpath)
3222
				/* get the correct route */
3223
				if ((kr6 = kroute6_matchgw(kr6, sa_in6)) ==
3224
				    NULL) {
3225
					log_warnx("dispatch_rtmsg_addr[delete] "
3226
					    "IPv6 mpath route not found");
3227
					return (0);
3228
				}
3229
3230
			if (kroute6_remove(kt, kr6) == -1)
3231
				return (-1);
3232
			break;
3233
		}
3234
		return (0);
3235
	}
3236
3237
	if (sa == NULL && !(flags & F_CONNECTED)) {
3238
		log_warnx("%s: no nexthop for %s/%u",
3239
		    __func__, log_addr(&prefix), prefixlen);
3240
		return (0);
3241
	}
3242
3243
	switch (prefix.aid) {
3244
	case AID_INET:
3245
		sa_in = (struct sockaddr_in *)sa;
3246
		if ((kr = kroute_find(kt, prefix.v4.s_addr, prefixlen,
3247
		    prio)) != NULL) {
3248
			if (kr->r.flags & F_KERNEL) {
3249
				/* get the correct route */
3250
				if (mpath && rtm->rtm_type == RTM_CHANGE &&
3251
				    (kr = kroute_matchgw(kr, sa_in)) == NULL) {
3252
					log_warnx("dispatch_rtmsg_addr[change] "
3253
					    "mpath route not found");
3254
					goto add4;
3255
				} else if (mpath && rtm->rtm_type == RTM_ADD)
3256
					goto add4;
3257
3258
				if (sa_in != NULL) {
3259
					if (kr->r.nexthop.s_addr !=
3260
					    sa_in->sin_addr.s_addr)
3261
						changed = 1;
3262
					kr->r.nexthop.s_addr =
3263
					    sa_in->sin_addr.s_addr;
3264
				} else {
3265
					if (kr->r.nexthop.s_addr != 0)
3266
						changed = 1;
3267
					kr->r.nexthop.s_addr = 0;
3268
				}
3269
3270
				if (kr->r.flags & F_NEXTHOP)
3271
					flags |= F_NEXTHOP;
3272
				oflags = kr->r.flags;
3273
				if (flags != oflags)
3274
					changed = 1;
3275
				kr->r.flags = flags;
3276
				if ((oflags & F_CONNECTED) &&
3277
				    !(flags & F_CONNECTED)) {
3278
					kif_kr_remove(kr);
3279
					kr_redistribute(IMSG_NETWORK_REMOVE,
3280
					    kt, &kr->r);
3281
				}
3282
				if ((flags & F_CONNECTED) &&
3283
				    !(oflags & F_CONNECTED)) {
3284
					kif_kr_insert(kr);
3285
					kr_redistribute(IMSG_NETWORK_ADD,
3286
					    kt, &kr->r);
3287
				}
3288
				if (kr->r.flags & F_NEXTHOP && changed)
3289
					knexthop_track(kt, kr);
3290
			}
3291
		} else if (rtm->rtm_type == RTM_CHANGE) {
3292
			log_warnx("change req for %s/%u: not in table",
3293
			    log_addr(&prefix), prefixlen);
3294
			return (0);
3295
		} else {
3296
add4:
3297
			if ((kr = calloc(1,
3298
			    sizeof(struct kroute_node))) == NULL) {
3299
				log_warn("dispatch_rtmsg");
3300
				return (-1);
3301
			}
3302
			kr->r.prefix.s_addr = prefix.v4.s_addr;
3303
			kr->r.prefixlen = prefixlen;
3304
			if (sa_in != NULL)
3305
				kr->r.nexthop.s_addr = sa_in->sin_addr.s_addr;
3306
			else
3307
				kr->r.nexthop.s_addr = 0;
3308
			kr->r.flags = flags;
3309
			kr->r.ifindex = ifindex;
3310
			kr->r.priority = prio;
3311
3312
			kroute_insert(kt, kr);
3313
		}
3314
		break;
3315
	case AID_INET6:
3316
		sa_in6 = (struct sockaddr_in6 *)sa;
3317
		if ((kr6 = kroute6_find(kt, &prefix.v6, prefixlen, prio)) !=
3318
		    NULL) {
3319
			if (kr6->r.flags & F_KERNEL) {
3320
				/* get the correct route */
3321
				if (mpath && rtm->rtm_type == RTM_CHANGE &&
3322
				    (kr6 = kroute6_matchgw(kr6, sa_in6)) ==
3323
				    NULL) {
3324
					log_warnx("dispatch_rtmsg[change] "
3325
					    "IPv6 mpath route not found");
3326
					goto add6;
3327
				} else if (mpath && rtm->rtm_type == RTM_ADD)
3328
					goto add6;
3329
3330
				if (sa_in6 != NULL) {
3331
					if (memcmp(&kr6->r.nexthop,
3332
					    &sa_in6->sin6_addr,
3333
					    sizeof(struct in6_addr)))
3334
						changed = 1;
3335
					memcpy(&kr6->r.nexthop,
3336
					    &sa_in6->sin6_addr,
3337
					    sizeof(struct in6_addr));
3338
				} else {
3339
					if (memcmp(&kr6->r.nexthop,
3340
					    &in6addr_any,
3341
					    sizeof(struct in6_addr)))
3342
						changed = 1;
3343
					memcpy(&kr6->r.nexthop,
3344
					    &in6addr_any,
3345
					    sizeof(struct in6_addr));
3346
				}
3347
3348
				if (kr6->r.flags & F_NEXTHOP)
3349
					flags |= F_NEXTHOP;
3350
				oflags = kr6->r.flags;
3351
				if (flags != oflags)
3352
					changed = 1;
3353
				kr6->r.flags = flags;
3354
				if ((oflags & F_CONNECTED) &&
3355
				    !(flags & F_CONNECTED)) {
3356
					kif_kr6_remove(kr6);
3357
					kr_redistribute6(IMSG_NETWORK_REMOVE,
3358
					    kt, &kr6->r);
3359
				}
3360
				if ((flags & F_CONNECTED) &&
3361
				    !(oflags & F_CONNECTED)) {
3362
					kif_kr6_insert(kr6);
3363
					kr_redistribute6(IMSG_NETWORK_ADD,
3364
					    kt, &kr6->r);
3365
				}
3366
				if (kr6->r.flags & F_NEXTHOP && changed)
3367
					knexthop_track(kt, kr6);
3368
			}
3369
		} else if (rtm->rtm_type == RTM_CHANGE) {
3370
			log_warnx("change req for %s/%u: not in table",
3371
			    log_addr(&prefix), prefixlen);
3372
			return (0);
3373
		} else {
3374
add6:
3375
			if ((kr6 = calloc(1,
3376
			    sizeof(struct kroute6_node))) == NULL) {
3377
				log_warn("dispatch_rtmsg_addr");
3378
				return (-1);
3379
			}
3380
			memcpy(&kr6->r.prefix, &prefix.v6,
3381
			    sizeof(struct in6_addr));
3382
			kr6->r.prefixlen = prefixlen;
3383
			if (sa_in6 != NULL)
3384
				memcpy(&kr6->r.nexthop, &sa_in6->sin6_addr,
3385
				    sizeof(struct in6_addr));
3386
			else
3387
				memcpy(&kr6->r.nexthop, &in6addr_any,
3388
				    sizeof(struct in6_addr));
3389
			kr6->r.flags = flags;
3390
			kr6->r.ifindex = ifindex;
3391
			kr6->r.priority = prio;
3392
3393
			kroute6_insert(kt, kr6);
3394
		}
3395
		break;
3396
	}
3397
3398
	return (0);
3399
}