GCC Code Coverage Report
Directory: ./ Exec Total Coverage
File: usr.sbin/bgpd/rde.c Lines: 0 1863 0.0 %
Date: 2017-11-13 Branches: 0 1230 0.0 %

Line Branch Exec Source
1
/*	$OpenBSD: rde.c,v 1.372 2017/09/14 18:16:28 phessler Exp $ */
2
3
/*
4
 * Copyright (c) 2003, 2004 Henning Brauer <henning@openbsd.org>
5
 * Copyright (c) 2016 Job Snijders <job@instituut.net>
6
 * Copyright (c) 2016 Peter Hessler <phessler@openbsd.org>
7
 *
8
 * Permission to use, copy, modify, and distribute this software for any
9
 * purpose with or without fee is hereby granted, provided that the above
10
 * copyright notice and this permission notice appear in all copies.
11
 *
12
 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13
 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14
 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15
 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16
 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17
 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18
 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
 */
20
21
#include <sys/types.h>
22
#include <sys/socket.h>
23
#include <sys/time.h>
24
#include <sys/resource.h>
25
26
#include <errno.h>
27
#include <ifaddrs.h>
28
#include <pwd.h>
29
#include <poll.h>
30
#include <signal.h>
31
#include <stdio.h>
32
#include <stdlib.h>
33
#include <string.h>
34
#include <syslog.h>
35
#include <unistd.h>
36
#include <err.h>
37
38
#include "bgpd.h"
39
#include "mrt.h"
40
#include "rde.h"
41
#include "session.h"
42
#include "log.h"
43
44
#define PFD_PIPE_MAIN		0
45
#define PFD_PIPE_SESSION	1
46
#define PFD_PIPE_SESSION_CTL	2
47
#define PFD_PIPE_COUNT		3
48
49
void		 rde_sighdlr(int);
50
void		 rde_dispatch_imsg_session(struct imsgbuf *);
51
void		 rde_dispatch_imsg_parent(struct imsgbuf *);
52
int		 rde_update_dispatch(struct imsg *);
53
void		 rde_update_update(struct rde_peer *, struct rde_aspath *,
54
		     struct bgpd_addr *, u_int8_t);
55
void		 rde_update_withdraw(struct rde_peer *, struct bgpd_addr *,
56
		     u_int8_t);
57
int		 rde_attr_parse(u_char *, u_int16_t, struct rde_peer *,
58
		     struct rde_aspath *, struct mpattr *);
59
int		 rde_attr_add(struct rde_aspath *, u_char *, u_int16_t);
60
u_int8_t	 rde_attr_missing(struct rde_aspath *, int, u_int16_t);
61
int		 rde_get_mp_nexthop(u_char *, u_int16_t, u_int8_t,
62
		     struct rde_aspath *);
63
int		 rde_update_extract_prefix(u_char *, u_int16_t, void *,
64
		     u_int8_t, u_int8_t);
65
int		 rde_update_get_prefix(u_char *, u_int16_t, struct bgpd_addr *,
66
		     u_int8_t *);
67
int		 rde_update_get_prefix6(u_char *, u_int16_t, struct bgpd_addr *,
68
		     u_int8_t *);
69
int		 rde_update_get_vpn4(u_char *, u_int16_t, struct bgpd_addr *,
70
		     u_int8_t *);
71
void		 rde_update_err(struct rde_peer *, u_int8_t , u_int8_t,
72
		     void *, u_int16_t);
73
void		 rde_update_log(const char *, u_int16_t,
74
		     const struct rde_peer *, const struct bgpd_addr *,
75
		     const struct bgpd_addr *, u_int8_t);
76
void		 rde_as4byte_fixup(struct rde_peer *, struct rde_aspath *);
77
void		 rde_reflector(struct rde_peer *, struct rde_aspath *);
78
79
void		 rde_dump_rib_as(struct prefix *, struct rde_aspath *, pid_t,
80
		     int);
81
void		 rde_dump_filter(struct prefix *,
82
		     struct ctl_show_rib_request *);
83
void		 rde_dump_filterout(struct rde_peer *, struct prefix *,
84
		     struct ctl_show_rib_request *);
85
void		 rde_dump_upcall(struct rib_entry *, void *);
86
void		 rde_dump_prefix_upcall(struct rib_entry *, void *);
87
void		 rde_dump_ctx_new(struct ctl_show_rib_request *, pid_t,
88
		     enum imsg_type);
89
void		 rde_dump_ctx_throttle(pid_t pid, int throttle);
90
void		 rde_dump_runner(void);
91
int		 rde_dump_pending(void);
92
void		 rde_dump_done(void *);
93
void		 rde_dump_mrt_new(struct mrt *, pid_t, int);
94
void		 rde_dump_rib_free(struct rib *);
95
void		 rde_dump_mrt_free(struct rib *);
96
void		 rde_rib_free(struct rib_desc *);
97
98
int		 rde_rdomain_import(struct rde_aspath *, struct rdomain *);
99
void		 rde_reload_done(void);
100
void		 rde_softreconfig_out(struct rib_entry *, void *);
101
void		 rde_softreconfig_in(struct rib_entry *, void *);
102
void		 rde_softreconfig_unload_peer(struct rib_entry *, void *);
103
void		 rde_up_dump_upcall(struct rib_entry *, void *);
104
void		 rde_update_queue_runner(void);
105
void		 rde_update6_queue_runner(u_int8_t);
106
107
void		 peer_init(u_int32_t);
108
void		 peer_shutdown(void);
109
int		 peer_localaddrs(struct rde_peer *, struct bgpd_addr *);
110
struct rde_peer	*peer_add(u_int32_t, struct peer_config *);
111
struct rde_peer	*peer_get(u_int32_t);
112
void		 peer_up(u_int32_t, struct session_up *);
113
void		 peer_down(u_int32_t);
114
void		 peer_flush(struct rde_peer *, u_int8_t);
115
void		 peer_stale(u_int32_t, u_int8_t);
116
void		 peer_recv_eor(struct rde_peer *, u_int8_t);
117
void		 peer_dump(u_int32_t, u_int8_t);
118
void		 peer_send_eor(struct rde_peer *, u_int8_t);
119
120
void		 network_add(struct network_config *, int);
121
void		 network_delete(struct network_config *, int);
122
void		 network_dump_upcall(struct rib_entry *, void *);
123
124
void		 rde_shutdown(void);
125
int		 sa_cmp(struct bgpd_addr *, struct sockaddr *);
126
127
volatile sig_atomic_t	 rde_quit = 0;
128
struct bgpd_config	*conf, *nconf;
129
time_t			 reloadtime;
130
struct rde_peer_head	 peerlist;
131
struct rde_peer		*peerself;
132
struct filter_head	*out_rules, *out_rules_tmp;
133
struct rdomain_head	*rdomains_l, *newdomains;
134
struct imsgbuf		*ibuf_se;
135
struct imsgbuf		*ibuf_se_ctl;
136
struct imsgbuf		*ibuf_main;
137
struct rde_memstats	 rdemem;
138
139
struct rde_dump_ctx {
140
	LIST_ENTRY(rde_dump_ctx)	entry;
141
	struct rib_context		ribctx;
142
	struct ctl_show_rib_request	req;
143
	sa_family_t			af;
144
	u_int8_t			throttled;
145
};
146
147
LIST_HEAD(, rde_dump_ctx) rde_dump_h = LIST_HEAD_INITIALIZER(rde_dump_h);
148
149
struct rde_mrt_ctx {
150
	LIST_ENTRY(rde_mrt_ctx)	entry;
151
	struct rib_context	ribctx;
152
	struct mrt		mrt;
153
};
154
155
LIST_HEAD(, rde_mrt_ctx) rde_mrts = LIST_HEAD_INITIALIZER(rde_mrts);
156
u_int rde_mrt_cnt;
157
158
void
159
rde_sighdlr(int sig)
160
{
161
	switch (sig) {
162
	case SIGINT:
163
	case SIGTERM:
164
		rde_quit = 1;
165
		break;
166
	}
167
}
168
169
u_int32_t	peerhashsize = 64;
170
u_int32_t	pathhashsize = 1024;
171
u_int32_t	attrhashsize = 512;
172
u_int32_t	nexthophashsize = 64;
173
174
void
175
rde_main(int debug, int verbose)
176
{
177
	struct passwd		*pw;
178
	struct pollfd		*pfd = NULL;
179
	struct rde_mrt_ctx	*mctx, *xmctx;
180
	void			*newp;
181
	u_int			 pfd_elms = 0, i, j;
182
	int			 timeout;
183
	u_int8_t		 aid;
184
185
	log_init(debug, LOG_DAEMON);
186
	log_setverbose(verbose);
187
188
	bgpd_process = PROC_RDE;
189
	log_procinit(log_procnames[bgpd_process]);
190
191
	if ((pw = getpwnam(BGPD_USER)) == NULL)
192
		fatal("getpwnam");
193
194
	if (chroot(pw->pw_dir) == -1)
195
		fatal("chroot");
196
	if (chdir("/") == -1)
197
		fatal("chdir(\"/\")");
198
199
	setproctitle("route decision engine");
200
201
	if (setgroups(1, &pw->pw_gid) ||
202
	    setresgid(pw->pw_gid, pw->pw_gid, pw->pw_gid) ||
203
	    setresuid(pw->pw_uid, pw->pw_uid, pw->pw_uid))
204
		fatal("can't drop privileges");
205
206
	if (pledge("stdio route recvfd flock rpath cpath wpath", NULL) == -1)
207
		fatal("pledge");
208
209
	signal(SIGTERM, rde_sighdlr);
210
	signal(SIGINT, rde_sighdlr);
211
	signal(SIGPIPE, SIG_IGN);
212
	signal(SIGHUP, SIG_IGN);
213
	signal(SIGALRM, SIG_IGN);
214
	signal(SIGUSR1, SIG_IGN);
215
216
	if ((ibuf_main = malloc(sizeof(struct imsgbuf))) == NULL)
217
		fatal(NULL);
218
	imsg_init(ibuf_main, 3);
219
220
	/* initialize the RIB structures */
221
	pt_init();
222
	path_init(pathhashsize);
223
	aspath_init(pathhashsize);
224
	attr_init(attrhashsize);
225
	nexthop_init(nexthophashsize);
226
	peer_init(peerhashsize);
227
228
	out_rules = calloc(1, sizeof(struct filter_head));
229
	if (out_rules == NULL)
230
		fatal(NULL);
231
	TAILQ_INIT(out_rules);
232
	rdomains_l = calloc(1, sizeof(struct rdomain_head));
233
	if (rdomains_l == NULL)
234
		fatal(NULL);
235
	SIMPLEQ_INIT(rdomains_l);
236
	if ((conf = calloc(1, sizeof(struct bgpd_config))) == NULL)
237
		fatal(NULL);
238
	log_info("route decision engine ready");
239
240
	while (rde_quit == 0) {
241
		if (pfd_elms < PFD_PIPE_COUNT + rde_mrt_cnt) {
242
			if ((newp = reallocarray(pfd,
243
			    PFD_PIPE_COUNT + rde_mrt_cnt,
244
			    sizeof(struct pollfd))) == NULL) {
245
				/* panic for now  */
246
				log_warn("could not resize pfd from %u -> %u"
247
				    " entries", pfd_elms, PFD_PIPE_COUNT +
248
				    rde_mrt_cnt);
249
				fatalx("exiting");
250
			}
251
			pfd = newp;
252
			pfd_elms = PFD_PIPE_COUNT + rde_mrt_cnt;
253
		}
254
		timeout = INFTIM;
255
		bzero(pfd, sizeof(struct pollfd) * pfd_elms);
256
257
		set_pollfd(&pfd[PFD_PIPE_MAIN], ibuf_main);
258
		set_pollfd(&pfd[PFD_PIPE_SESSION], ibuf_se);
259
		set_pollfd(&pfd[PFD_PIPE_SESSION_CTL], ibuf_se_ctl);
260
261
		if (rde_dump_pending() &&
262
		    ibuf_se_ctl && ibuf_se_ctl->w.queued == 0)
263
			timeout = 0;
264
265
		i = PFD_PIPE_COUNT;
266
		for (mctx = LIST_FIRST(&rde_mrts); mctx != 0; mctx = xmctx) {
267
			xmctx = LIST_NEXT(mctx, entry);
268
			if (mctx->mrt.wbuf.queued) {
269
				pfd[i].fd = mctx->mrt.wbuf.fd;
270
				pfd[i].events = POLLOUT;
271
				i++;
272
			} else if (mctx->mrt.state == MRT_STATE_REMOVE) {
273
				close(mctx->mrt.wbuf.fd);
274
				LIST_REMOVE(mctx, entry);
275
				free(mctx);
276
				rde_mrt_cnt--;
277
			}
278
		}
279
280
		if (poll(pfd, i, timeout) == -1) {
281
			if (errno != EINTR)
282
				fatal("poll error");
283
			continue;
284
		}
285
286
		if (handle_pollfd(&pfd[PFD_PIPE_MAIN], ibuf_main) == -1)
287
			fatalx("Lost connection to parent");
288
		else
289
			rde_dispatch_imsg_parent(ibuf_main);
290
291
		if (handle_pollfd(&pfd[PFD_PIPE_SESSION], ibuf_se) == -1) {
292
			log_warnx("RDE: Lost connection to SE");
293
			msgbuf_clear(&ibuf_se->w);
294
			free(ibuf_se);
295
			ibuf_se = NULL;
296
		} else
297
			rde_dispatch_imsg_session(ibuf_se);
298
299
		if (handle_pollfd(&pfd[PFD_PIPE_SESSION_CTL], ibuf_se_ctl) ==
300
		    -1) {
301
			log_warnx("RDE: Lost connection to SE control");
302
			msgbuf_clear(&ibuf_se_ctl->w);
303
			free(ibuf_se_ctl);
304
			ibuf_se_ctl = NULL;
305
		} else
306
			rde_dispatch_imsg_session(ibuf_se_ctl);
307
308
		for (j = PFD_PIPE_COUNT, mctx = LIST_FIRST(&rde_mrts);
309
		    j < i && mctx != 0; j++) {
310
			if (pfd[j].fd == mctx->mrt.wbuf.fd &&
311
			    pfd[j].revents & POLLOUT)
312
				mrt_write(&mctx->mrt);
313
			mctx = LIST_NEXT(mctx, entry);
314
		}
315
316
		rde_update_queue_runner();
317
		for (aid = AID_INET6; aid < AID_MAX; aid++)
318
			rde_update6_queue_runner(aid);
319
		if (rde_dump_pending() &&
320
		    ibuf_se_ctl && ibuf_se_ctl->w.queued <= 10)
321
			rde_dump_runner();
322
	}
323
324
	/* do not clean up on shutdown on production, it takes ages. */
325
	if (debug)
326
		rde_shutdown();
327
328
	/* close pipes */
329
	if (ibuf_se) {
330
		msgbuf_clear(&ibuf_se->w);
331
		close(ibuf_se->fd);
332
		free(ibuf_se);
333
	}
334
	if (ibuf_se_ctl) {
335
		msgbuf_clear(&ibuf_se_ctl->w);
336
		close(ibuf_se_ctl->fd);
337
		free(ibuf_se_ctl);
338
	}
339
	msgbuf_clear(&ibuf_main->w);
340
	close(ibuf_main->fd);
341
	free(ibuf_main);
342
343
	while ((mctx = LIST_FIRST(&rde_mrts)) != NULL) {
344
		msgbuf_clear(&mctx->mrt.wbuf);
345
		close(mctx->mrt.wbuf.fd);
346
		LIST_REMOVE(mctx, entry);
347
		free(mctx);
348
	}
349
350
351
	log_info("route decision engine exiting");
352
	exit(0);
353
}
354
355
struct network_config	 netconf_s, netconf_p;
356
struct filter_set_head	*session_set, *parent_set;
357
358
void
359
rde_dispatch_imsg_session(struct imsgbuf *ibuf)
360
{
361
	struct imsg		 imsg;
362
	struct peer		 p;
363
	struct peer_config	 pconf;
364
	struct session_up	 sup;
365
	struct ctl_show_rib	 csr;
366
	struct ctl_show_rib_request	req;
367
	struct rde_peer		*peer;
368
	struct rde_aspath	*asp;
369
	struct filter_set	*s;
370
	struct nexthop		*nh;
371
	u_int8_t		*asdata;
372
	ssize_t			 n;
373
	int			 verbose;
374
	u_int16_t		 len;
375
	u_int8_t		 aid;
376
377
	while (ibuf) {
378
		if ((n = imsg_get(ibuf, &imsg)) == -1)
379
			fatal("rde_dispatch_imsg_session: imsg_get error");
380
		if (n == 0)
381
			break;
382
383
		switch (imsg.hdr.type) {
384
		case IMSG_UPDATE:
385
			rde_update_dispatch(&imsg);
386
			break;
387
		case IMSG_SESSION_ADD:
388
			if (imsg.hdr.len - IMSG_HEADER_SIZE != sizeof(pconf))
389
				fatalx("incorrect size of session request");
390
			memcpy(&pconf, imsg.data, sizeof(pconf));
391
			peer_add(imsg.hdr.peerid, &pconf);
392
			break;
393
		case IMSG_SESSION_UP:
394
			if (imsg.hdr.len - IMSG_HEADER_SIZE != sizeof(sup))
395
				fatalx("incorrect size of session request");
396
			memcpy(&sup, imsg.data, sizeof(sup));
397
			peer_up(imsg.hdr.peerid, &sup);
398
			break;
399
		case IMSG_SESSION_DOWN:
400
			peer_down(imsg.hdr.peerid);
401
			break;
402
		case IMSG_SESSION_STALE:
403
			if (imsg.hdr.len - IMSG_HEADER_SIZE != sizeof(aid)) {
404
				log_warnx("rde_dispatch: wrong imsg len");
405
				break;
406
			}
407
			memcpy(&aid, imsg.data, sizeof(aid));
408
			if (aid >= AID_MAX)
409
				fatalx("IMSG_SESSION_STALE: bad AID");
410
			peer_stale(imsg.hdr.peerid, aid);
411
			break;
412
		case IMSG_SESSION_FLUSH:
413
			if (imsg.hdr.len - IMSG_HEADER_SIZE != sizeof(aid)) {
414
				log_warnx("rde_dispatch: wrong imsg len");
415
				break;
416
			}
417
			memcpy(&aid, imsg.data, sizeof(aid));
418
			if (aid >= AID_MAX)
419
				fatalx("IMSG_SESSION_FLUSH: bad AID");
420
			if ((peer = peer_get(imsg.hdr.peerid)) == NULL) {
421
				log_warnx("rde_dispatch: unknown peer id %d",
422
				    imsg.hdr.peerid);
423
				break;
424
			}
425
			peer_flush(peer, aid);
426
			break;
427
		case IMSG_SESSION_RESTARTED:
428
			if (imsg.hdr.len - IMSG_HEADER_SIZE != sizeof(aid)) {
429
				log_warnx("rde_dispatch: wrong imsg len");
430
				break;
431
			}
432
			memcpy(&aid, imsg.data, sizeof(aid));
433
			if (aid >= AID_MAX)
434
				fatalx("IMSG_SESSION_RESTARTED: bad AID");
435
			if ((peer = peer_get(imsg.hdr.peerid)) == NULL) {
436
				log_warnx("rde_dispatch: unknown peer id %d",
437
				    imsg.hdr.peerid);
438
				break;
439
			}
440
			if (peer->staletime[aid])
441
				peer_flush(peer, aid);
442
			break;
443
		case IMSG_REFRESH:
444
			if (imsg.hdr.len - IMSG_HEADER_SIZE != sizeof(aid)) {
445
				log_warnx("rde_dispatch: wrong imsg len");
446
				break;
447
			}
448
			memcpy(&aid, imsg.data, sizeof(aid));
449
			if (aid >= AID_MAX)
450
				fatalx("IMSG_REFRESH: bad AID");
451
			peer_dump(imsg.hdr.peerid, aid);
452
			break;
453
		case IMSG_NETWORK_ADD:
454
			if (imsg.hdr.len - IMSG_HEADER_SIZE !=
455
			    sizeof(struct network_config)) {
456
				log_warnx("rde_dispatch: wrong imsg len");
457
				break;
458
			}
459
			memcpy(&netconf_s, imsg.data, sizeof(netconf_s));
460
			TAILQ_INIT(&netconf_s.attrset);
461
			session_set = &netconf_s.attrset;
462
			break;
463
		case IMSG_NETWORK_ASPATH:
464
			if (imsg.hdr.len - IMSG_HEADER_SIZE <
465
			    sizeof(struct ctl_show_rib)) {
466
				log_warnx("rde_dispatch: wrong imsg len");
467
				bzero(&netconf_s, sizeof(netconf_s));
468
				break;
469
			}
470
			asdata = imsg.data;
471
			asdata += sizeof(struct ctl_show_rib);
472
			memcpy(&csr, imsg.data, sizeof(csr));
473
			if (csr.aspath_len + sizeof(csr) > imsg.hdr.len -
474
			    IMSG_HEADER_SIZE) {
475
				log_warnx("rde_dispatch: wrong aspath len");
476
				bzero(&netconf_s, sizeof(netconf_s));
477
				break;
478
			}
479
			asp = path_get();
480
			asp->lpref = csr.local_pref;
481
			asp->med = csr.med;
482
			asp->weight = csr.weight;
483
			asp->flags = csr.flags;
484
			asp->origin = csr.origin;
485
			asp->flags |= F_PREFIX_ANNOUNCED | F_ANN_DYNAMIC;
486
			asp->aspath = aspath_get(asdata, csr.aspath_len);
487
			netconf_s.asp = asp;
488
			break;
489
		case IMSG_NETWORK_ATTR:
490
			if (imsg.hdr.len <= IMSG_HEADER_SIZE) {
491
				log_warnx("rde_dispatch: wrong imsg len");
492
				break;
493
			}
494
			/* parse path attributes */
495
			len = imsg.hdr.len - IMSG_HEADER_SIZE;
496
			asp = netconf_s.asp;
497
			if (rde_attr_add(asp, imsg.data, len) == -1) {
498
				log_warnx("rde_dispatch: bad network "
499
				    "attribute");
500
				path_put(asp);
501
				bzero(&netconf_s, sizeof(netconf_s));
502
				break;
503
			}
504
			break;
505
		case IMSG_NETWORK_DONE:
506
			if (imsg.hdr.len != IMSG_HEADER_SIZE) {
507
				log_warnx("rde_dispatch: wrong imsg len");
508
				break;
509
			}
510
			session_set = NULL;
511
			switch (netconf_s.prefix.aid) {
512
			case AID_INET:
513
				if (netconf_s.prefixlen > 32)
514
					goto badnet;
515
				network_add(&netconf_s, 0);
516
				break;
517
			case AID_INET6:
518
				if (netconf_s.prefixlen > 128)
519
					goto badnet;
520
				network_add(&netconf_s, 0);
521
				break;
522
			case 0:
523
				/* something failed beforehands */
524
				break;
525
			default:
526
badnet:
527
				log_warnx("rde_dispatch: bad network");
528
				break;
529
			}
530
			break;
531
		case IMSG_NETWORK_REMOVE:
532
			if (imsg.hdr.len - IMSG_HEADER_SIZE !=
533
			    sizeof(struct network_config)) {
534
				log_warnx("rde_dispatch: wrong imsg len");
535
				break;
536
			}
537
			memcpy(&netconf_s, imsg.data, sizeof(netconf_s));
538
			TAILQ_INIT(&netconf_s.attrset);
539
			network_delete(&netconf_s, 0);
540
			break;
541
		case IMSG_NETWORK_FLUSH:
542
			if (imsg.hdr.len != IMSG_HEADER_SIZE) {
543
				log_warnx("rde_dispatch: wrong imsg len");
544
				break;
545
			}
546
			prefix_network_clean(peerself, time(NULL),
547
			    F_ANN_DYNAMIC);
548
			break;
549
		case IMSG_FILTER_SET:
550
			if (imsg.hdr.len - IMSG_HEADER_SIZE !=
551
			    sizeof(struct filter_set)) {
552
				log_warnx("rde_dispatch: wrong imsg len");
553
				break;
554
			}
555
			if (session_set == NULL) {
556
				log_warnx("rde_dispatch: "
557
				    "IMSG_FILTER_SET unexpected");
558
				break;
559
			}
560
			if ((s = malloc(sizeof(struct filter_set))) == NULL)
561
				fatal(NULL);
562
			memcpy(s, imsg.data, sizeof(struct filter_set));
563
			TAILQ_INSERT_TAIL(session_set, s, entry);
564
565
			if (s->type == ACTION_SET_NEXTHOP) {
566
				nh = nexthop_get(&s->action.nexthop);
567
				nh->refcnt++;
568
			}
569
			break;
570
		case IMSG_CTL_SHOW_NETWORK:
571
		case IMSG_CTL_SHOW_RIB:
572
		case IMSG_CTL_SHOW_RIB_AS:
573
		case IMSG_CTL_SHOW_RIB_COMMUNITY:
574
		case IMSG_CTL_SHOW_RIB_EXTCOMMUNITY:
575
		case IMSG_CTL_SHOW_RIB_LARGECOMMUNITY:
576
		case IMSG_CTL_SHOW_RIB_PREFIX:
577
			if (imsg.hdr.len != IMSG_HEADER_SIZE + sizeof(req)) {
578
				log_warnx("rde_dispatch: wrong imsg len");
579
				break;
580
			}
581
			memcpy(&req, imsg.data, sizeof(req));
582
			rde_dump_ctx_new(&req, imsg.hdr.pid, imsg.hdr.type);
583
			break;
584
		case IMSG_CTL_SHOW_NEIGHBOR:
585
			if (imsg.hdr.len - IMSG_HEADER_SIZE !=
586
			    sizeof(struct peer)) {
587
				log_warnx("rde_dispatch: wrong imsg len");
588
				break;
589
			}
590
			memcpy(&p, imsg.data, sizeof(struct peer));
591
			peer = peer_get(p.conf.id);
592
			if (peer != NULL) {
593
				p.stats.prefix_cnt = peer->prefix_cnt;
594
				p.stats.prefix_rcvd_update =
595
				    peer->prefix_rcvd_update;
596
				p.stats.prefix_rcvd_withdraw =
597
				    peer->prefix_rcvd_withdraw;
598
				p.stats.prefix_rcvd_eor =
599
				    peer->prefix_rcvd_eor;
600
				p.stats.prefix_sent_update =
601
				    peer->prefix_sent_update;
602
				p.stats.prefix_sent_withdraw =
603
				    peer->prefix_sent_withdraw;
604
				p.stats.prefix_sent_eor =
605
				    peer->prefix_sent_eor;
606
			}
607
			imsg_compose(ibuf_se_ctl, IMSG_CTL_SHOW_NEIGHBOR, 0,
608
			    imsg.hdr.pid, -1, &p, sizeof(struct peer));
609
			break;
610
		case IMSG_CTL_END:
611
			imsg_compose(ibuf_se_ctl, IMSG_CTL_END, 0, imsg.hdr.pid,
612
			    -1, NULL, 0);
613
			break;
614
		case IMSG_CTL_SHOW_RIB_MEM:
615
			imsg_compose(ibuf_se_ctl, IMSG_CTL_SHOW_RIB_MEM, 0,
616
			    imsg.hdr.pid, -1, &rdemem, sizeof(rdemem));
617
			break;
618
		case IMSG_CTL_LOG_VERBOSE:
619
			/* already checked by SE */
620
			memcpy(&verbose, imsg.data, sizeof(verbose));
621
			log_setverbose(verbose);
622
			break;
623
		case IMSG_XON:
624
			if (imsg.hdr.peerid) {
625
				peer = peer_get(imsg.hdr.peerid);
626
				if (peer)
627
					peer->throttled = 0;
628
				break;
629
			} else {
630
				rde_dump_ctx_throttle(imsg.hdr.pid, 0);
631
			}
632
			break;
633
		case IMSG_XOFF:
634
			if (imsg.hdr.peerid) {
635
				peer = peer_get(imsg.hdr.peerid);
636
				if (peer)
637
					peer->throttled = 1;
638
			} else {
639
				rde_dump_ctx_throttle(imsg.hdr.pid, 1);
640
			}
641
			break;
642
		default:
643
			break;
644
		}
645
		imsg_free(&imsg);
646
	}
647
}
648
649
void
650
rde_dispatch_imsg_parent(struct imsgbuf *ibuf)
651
{
652
	static struct rdomain	*rd;
653
	struct imsg		 imsg;
654
	struct mrt		 xmrt;
655
	struct rde_rib		 rn;
656
	struct imsgbuf		*i;
657
	struct filter_head	*nr;
658
	struct filter_rule	*r;
659
	struct filter_set	*s;
660
	struct nexthop		*nh;
661
	struct rib		*rib;
662
	int			 n, fd;
663
	u_int16_t		 rid;
664
665
	while (ibuf) {
666
		if ((n = imsg_get(ibuf, &imsg)) == -1)
667
			fatal("rde_dispatch_imsg_parent: imsg_get error");
668
		if (n == 0)
669
			break;
670
671
		switch (imsg.hdr.type) {
672
		case IMSG_SOCKET_CONN:
673
		case IMSG_SOCKET_CONN_CTL:
674
			if ((fd = imsg.fd) == -1) {
675
				log_warnx("expected to receive imsg fd to "
676
				    "SE but didn't receive any");
677
				break;
678
			}
679
			if ((i = malloc(sizeof(struct imsgbuf))) == NULL)
680
				fatal(NULL);
681
			imsg_init(i, fd);
682
			if (imsg.hdr.type == IMSG_SOCKET_CONN) {
683
				if (ibuf_se) {
684
					log_warnx("Unexpected imsg connection "
685
					    "to SE received");
686
					msgbuf_clear(&ibuf_se->w);
687
					free(ibuf_se);
688
				}
689
				ibuf_se = i;
690
			} else {
691
				if (ibuf_se_ctl) {
692
					log_warnx("Unexpected imsg ctl "
693
					    "connection to SE received");
694
					msgbuf_clear(&ibuf_se_ctl->w);
695
					free(ibuf_se_ctl);
696
				}
697
				ibuf_se_ctl = i;
698
			}
699
			break;
700
		case IMSG_NETWORK_ADD:
701
			if (imsg.hdr.len - IMSG_HEADER_SIZE !=
702
			    sizeof(struct network_config)) {
703
				log_warnx("rde_dispatch: wrong imsg len");
704
				break;
705
			}
706
			memcpy(&netconf_p, imsg.data, sizeof(netconf_p));
707
			TAILQ_INIT(&netconf_p.attrset);
708
			parent_set = &netconf_p.attrset;
709
			break;
710
		case IMSG_NETWORK_DONE:
711
			parent_set = NULL;
712
			network_add(&netconf_p, 1);
713
			break;
714
		case IMSG_NETWORK_REMOVE:
715
			if (imsg.hdr.len - IMSG_HEADER_SIZE !=
716
			    sizeof(struct network_config)) {
717
				log_warnx("rde_dispatch: wrong imsg len");
718
				break;
719
			}
720
			memcpy(&netconf_p, imsg.data, sizeof(netconf_p));
721
			TAILQ_INIT(&netconf_p.attrset);
722
			network_delete(&netconf_p, 1);
723
			break;
724
		case IMSG_RECONF_CONF:
725
			if (imsg.hdr.len - IMSG_HEADER_SIZE !=
726
			    sizeof(struct bgpd_config))
727
				fatalx("IMSG_RECONF_CONF bad len");
728
			reloadtime = time(NULL);
729
			out_rules_tmp = calloc(1, sizeof(struct filter_head));
730
			if (out_rules_tmp == NULL)
731
				fatal(NULL);
732
			TAILQ_INIT(out_rules_tmp);
733
			newdomains = calloc(1, sizeof(struct rdomain_head));
734
			if (newdomains == NULL)
735
				fatal(NULL);
736
			SIMPLEQ_INIT(newdomains);
737
			if ((nconf = malloc(sizeof(struct bgpd_config))) ==
738
			    NULL)
739
				fatal(NULL);
740
			memcpy(nconf, imsg.data, sizeof(struct bgpd_config));
741
			for (rid = 0; rid < rib_size; rid++) {
742
				if (*ribs[rid].name == '\0')
743
					break;
744
				ribs[rid].state = RECONF_DELETE;
745
			}
746
			break;
747
		case IMSG_RECONF_RIB:
748
			if (imsg.hdr.len - IMSG_HEADER_SIZE !=
749
			    sizeof(struct rde_rib))
750
				fatalx("IMSG_RECONF_RIB bad len");
751
			memcpy(&rn, imsg.data, sizeof(rn));
752
			rib = rib_find(rn.name);
753
			if (rib == NULL)
754
				rib = rib_new(rn.name, rn.rtableid, rn.flags);
755
			else if (rib->rtableid != rn.rtableid ||
756
			    (rib->flags & F_RIB_HASNOFIB) !=
757
			    (rib->flags & F_RIB_HASNOFIB)) {
758
				struct filter_head	*in_rules;
759
				struct rib_desc		*ribd = rib_desc(rib);
760
				/*
761
				 * Big hammer in the F_RIB_HASNOFIB case but
762
				 * not often enough used to optimise it more.
763
				 * Need to save the filters so that they're not
764
				 * lost.
765
				 */
766
				in_rules = ribd->in_rules;
767
				ribd->in_rules = NULL;
768
				rde_rib_free(ribd);
769
				rib = rib_new(rn.name, rn.rtableid, rn.flags);
770
				ribd->in_rules = in_rules;
771
			} else
772
				rib_desc(rib)->state = RECONF_KEEP;
773
			break;
774
		case IMSG_RECONF_FILTER:
775
			if (imsg.hdr.len - IMSG_HEADER_SIZE !=
776
			    sizeof(struct filter_rule))
777
				fatalx("IMSG_RECONF_FILTER bad len");
778
			if ((r = malloc(sizeof(struct filter_rule))) == NULL)
779
				fatal(NULL);
780
			memcpy(r, imsg.data, sizeof(struct filter_rule));
781
			TAILQ_INIT(&r->set);
782
			if ((rib = rib_find(r->rib)) == NULL) {
783
				log_warnx("IMSG_RECONF_FILTER: filter rule "
784
				    "for nonexistent rib %s", r->rib);
785
				parent_set = NULL;
786
				free(r);
787
				break;
788
			}
789
			r->peer.ribid = rib->id;
790
			parent_set = &r->set;
791
			if (r->dir == DIR_IN) {
792
				nr = rib_desc(rib)->in_rules_tmp;
793
				if (nr == NULL) {
794
					nr = calloc(1,
795
					    sizeof(struct filter_head));
796
					if (nr == NULL)
797
						fatal(NULL);
798
					TAILQ_INIT(nr);
799
					rib_desc(rib)->in_rules_tmp = nr;
800
				}
801
				TAILQ_INSERT_TAIL(nr, r, entry);
802
			} else
803
				TAILQ_INSERT_TAIL(out_rules_tmp, r, entry);
804
			break;
805
		case IMSG_RECONF_RDOMAIN:
806
			if (imsg.hdr.len - IMSG_HEADER_SIZE !=
807
			    sizeof(struct rdomain))
808
				fatalx("IMSG_RECONF_RDOMAIN bad len");
809
			if ((rd = malloc(sizeof(struct rdomain))) == NULL)
810
				fatal(NULL);
811
			memcpy(rd, imsg.data, sizeof(struct rdomain));
812
			TAILQ_INIT(&rd->import);
813
			TAILQ_INIT(&rd->export);
814
			SIMPLEQ_INSERT_TAIL(newdomains, rd, entry);
815
			break;
816
		case IMSG_RECONF_RDOMAIN_EXPORT:
817
			if (rd == NULL) {
818
				log_warnx("rde_dispatch_imsg_parent: "
819
				    "IMSG_RECONF_RDOMAIN_EXPORT unexpected");
820
				break;
821
			}
822
			parent_set = &rd->export;
823
			break;
824
		case IMSG_RECONF_RDOMAIN_IMPORT:
825
			if (rd == NULL) {
826
				log_warnx("rde_dispatch_imsg_parent: "
827
				    "IMSG_RECONF_RDOMAIN_IMPORT unexpected");
828
				break;
829
			}
830
			parent_set = &rd->import;
831
			break;
832
		case IMSG_RECONF_RDOMAIN_DONE:
833
			parent_set = NULL;
834
			break;
835
		case IMSG_RECONF_DONE:
836
			if (nconf == NULL)
837
				fatalx("got IMSG_RECONF_DONE but no config");
838
			parent_set = NULL;
839
840
			rde_reload_done();
841
			break;
842
		case IMSG_NEXTHOP_UPDATE:
843
			nexthop_update(imsg.data);
844
			break;
845
		case IMSG_FILTER_SET:
846
			if (imsg.hdr.len > IMSG_HEADER_SIZE +
847
			    sizeof(struct filter_set))
848
				fatalx("IMSG_FILTER_SET bad len");
849
			if (parent_set == NULL) {
850
				log_warnx("rde_dispatch_imsg_parent: "
851
				    "IMSG_FILTER_SET unexpected");
852
				break;
853
			}
854
			if ((s = malloc(sizeof(struct filter_set))) == NULL)
855
				fatal(NULL);
856
			memcpy(s, imsg.data, sizeof(struct filter_set));
857
			TAILQ_INSERT_TAIL(parent_set, s, entry);
858
859
			if (s->type == ACTION_SET_NEXTHOP) {
860
				nh = nexthop_get(&s->action.nexthop);
861
				nh->refcnt++;
862
			}
863
			break;
864
		case IMSG_MRT_OPEN:
865
		case IMSG_MRT_REOPEN:
866
			if (imsg.hdr.len > IMSG_HEADER_SIZE +
867
			    sizeof(struct mrt)) {
868
				log_warnx("wrong imsg len");
869
				break;
870
			}
871
			memcpy(&xmrt, imsg.data, sizeof(xmrt));
872
			if ((fd = imsg.fd) == -1)
873
				log_warnx("expected to receive fd for mrt dump "
874
				    "but didn't receive any");
875
			else if (xmrt.type == MRT_TABLE_DUMP ||
876
			    xmrt.type == MRT_TABLE_DUMP_MP ||
877
			    xmrt.type == MRT_TABLE_DUMP_V2) {
878
				rde_dump_mrt_new(&xmrt, imsg.hdr.pid, fd);
879
			} else
880
				close(fd);
881
			break;
882
		case IMSG_MRT_CLOSE:
883
			/* ignore end message because a dump is atomic */
884
			break;
885
		default:
886
			break;
887
		}
888
		imsg_free(&imsg);
889
	}
890
}
891
892
/* handle routing updates from the session engine. */
893
int
894
rde_update_dispatch(struct imsg *imsg)
895
{
896
	struct bgpd_addr	 prefix;
897
	struct mpattr		 mpa;
898
	struct rde_peer		*peer;
899
	struct rde_aspath	*asp = NULL;
900
	u_char			*p, *mpp = NULL;
901
	int			 error = -1, pos = 0;
902
	u_int16_t		 afi, len, mplen;
903
	u_int16_t		 withdrawn_len;
904
	u_int16_t		 attrpath_len;
905
	u_int16_t		 nlri_len;
906
	u_int8_t		 aid, prefixlen, safi, subtype;
907
	u_int32_t		 fas;
908
909
	peer = peer_get(imsg->hdr.peerid);
910
	if (peer == NULL)	/* unknown peer, cannot happen */
911
		return (-1);
912
	if (peer->state != PEER_UP)
913
		return (-1);	/* peer is not yet up, cannot happen */
914
915
	p = imsg->data;
916
917
	if (imsg->hdr.len < IMSG_HEADER_SIZE + 2) {
918
		rde_update_err(peer, ERR_UPDATE, ERR_UPD_ATTRLIST, NULL, 0);
919
		return (-1);
920
	}
921
922
	memcpy(&len, p, 2);
923
	withdrawn_len = ntohs(len);
924
	p += 2;
925
	if (imsg->hdr.len < IMSG_HEADER_SIZE + 2 + withdrawn_len + 2) {
926
		rde_update_err(peer, ERR_UPDATE, ERR_UPD_ATTRLIST, NULL, 0);
927
		return (-1);
928
	}
929
930
	p += withdrawn_len;
931
	memcpy(&len, p, 2);
932
	attrpath_len = len = ntohs(len);
933
	p += 2;
934
	if (imsg->hdr.len <
935
	    IMSG_HEADER_SIZE + 2 + withdrawn_len + 2 + attrpath_len) {
936
		rde_update_err(peer, ERR_UPDATE, ERR_UPD_ATTRLIST, NULL, 0);
937
		return (-1);
938
	}
939
940
	nlri_len =
941
	    imsg->hdr.len - IMSG_HEADER_SIZE - 4 - withdrawn_len - attrpath_len;
942
	bzero(&mpa, sizeof(mpa));
943
944
	if (attrpath_len != 0) { /* 0 = no NLRI information in this message */
945
		/* parse path attributes */
946
		asp = path_get();
947
		while (len > 0) {
948
			if ((pos = rde_attr_parse(p, len, peer, asp,
949
			    &mpa)) < 0)
950
				goto done;
951
			p += pos;
952
			len -= pos;
953
		}
954
955
		/* check for missing but necessary attributes */
956
		if ((subtype = rde_attr_missing(asp, peer->conf.ebgp,
957
		    nlri_len))) {
958
			rde_update_err(peer, ERR_UPDATE, ERR_UPD_MISSNG_WK_ATTR,
959
			    &subtype, sizeof(u_int8_t));
960
			goto done;
961
		}
962
963
		rde_as4byte_fixup(peer, asp);
964
965
		/* enforce remote AS if requested */
966
		if (asp->flags & F_ATTR_ASPATH &&
967
		    peer->conf.enforce_as == ENFORCE_AS_ON) {
968
			fas = aspath_neighbor(asp->aspath);
969
			if (peer->conf.remote_as != fas) {
970
			    log_peer_warnx(&peer->conf, "bad path, "
971
				"starting with %s, "
972
				"enforce neighbor-as enabled", log_as(fas));
973
			    rde_update_err(peer, ERR_UPDATE, ERR_UPD_ASPATH,
974
				    NULL, 0);
975
			    goto done;
976
			}
977
		}
978
979
		rde_reflector(peer, asp);
980
	}
981
982
	p = imsg->data;
983
	len = withdrawn_len;
984
	p += 2;
985
	/* withdraw prefix */
986
	while (len > 0) {
987
		if ((pos = rde_update_get_prefix(p, len, &prefix,
988
		    &prefixlen)) == -1) {
989
			/*
990
			 * the RFC does not mention what we should do in
991
			 * this case. Let's do the same as in the NLRI case.
992
			 */
993
			log_peer_warnx(&peer->conf, "bad withdraw prefix");
994
			rde_update_err(peer, ERR_UPDATE, ERR_UPD_NETWORK,
995
			    NULL, 0);
996
			goto done;
997
		}
998
		if (prefixlen > 32) {
999
			log_peer_warnx(&peer->conf, "bad withdraw prefix");
1000
			rde_update_err(peer, ERR_UPDATE, ERR_UPD_NETWORK,
1001
			    NULL, 0);
1002
			goto done;
1003
		}
1004
1005
		p += pos;
1006
		len -= pos;
1007
1008
		if (peer->capa.mp[AID_INET] == 0) {
1009
			log_peer_warnx(&peer->conf,
1010
			    "bad withdraw, %s disabled", aid2str(AID_INET));
1011
			rde_update_err(peer, ERR_UPDATE, ERR_UPD_OPTATTR,
1012
			    NULL, 0);
1013
			goto done;
1014
		}
1015
1016
		rde_update_withdraw(peer, &prefix, prefixlen);
1017
	}
1018
1019
	if (attrpath_len == 0) {
1020
		/* 0 = no NLRI information in this message */
1021
		if (nlri_len != 0) {
1022
			/* crap at end of update which should not be there */
1023
			rde_update_err(peer, ERR_UPDATE,
1024
			    ERR_UPD_ATTRLIST, NULL, 0);
1025
			return (-1);
1026
		}
1027
		if (withdrawn_len == 0) {
1028
			/* EoR marker */
1029
			peer_recv_eor(peer, AID_INET);
1030
		}
1031
		return (0);
1032
	}
1033
1034
	/* withdraw MP_UNREACH_NLRI if available */
1035
	if (mpa.unreach_len != 0) {
1036
		mpp = mpa.unreach;
1037
		mplen = mpa.unreach_len;
1038
		memcpy(&afi, mpp, 2);
1039
		mpp += 2;
1040
		mplen -= 2;
1041
		afi = ntohs(afi);
1042
		safi = *mpp++;
1043
		mplen--;
1044
1045
		if (afi2aid(afi, safi, &aid) == -1) {
1046
			log_peer_warnx(&peer->conf,
1047
			    "bad AFI/SAFI pair in withdraw");
1048
			rde_update_err(peer, ERR_UPDATE, ERR_UPD_OPTATTR,
1049
			    NULL, 0);
1050
			goto done;
1051
		}
1052
1053
		if (peer->capa.mp[aid] == 0) {
1054
			log_peer_warnx(&peer->conf,
1055
			    "bad withdraw, %s disabled", aid2str(aid));
1056
			rde_update_err(peer, ERR_UPDATE, ERR_UPD_OPTATTR,
1057
			    NULL, 0);
1058
			goto done;
1059
		}
1060
1061
		if ((asp->flags & ~F_ATTR_MP_UNREACH) == 0 && mplen == 0) {
1062
			/* EoR marker */
1063
			peer_recv_eor(peer, aid);
1064
		}
1065
1066
		switch (aid) {
1067
		case AID_INET6:
1068
			while (mplen > 0) {
1069
				if ((pos = rde_update_get_prefix6(mpp, mplen,
1070
				    &prefix, &prefixlen)) == -1) {
1071
					log_peer_warnx(&peer->conf,
1072
					    "bad IPv6 withdraw prefix");
1073
					rde_update_err(peer, ERR_UPDATE,
1074
					    ERR_UPD_OPTATTR,
1075
					    mpa.unreach, mpa.unreach_len);
1076
					goto done;
1077
				}
1078
				if (prefixlen > 128) {
1079
					log_peer_warnx(&peer->conf,
1080
					    "bad IPv6 withdraw prefix");
1081
					rde_update_err(peer, ERR_UPDATE,
1082
					    ERR_UPD_OPTATTR,
1083
					    mpa.unreach, mpa.unreach_len);
1084
					goto done;
1085
				}
1086
1087
				mpp += pos;
1088
				mplen -= pos;
1089
1090
				rde_update_withdraw(peer, &prefix, prefixlen);
1091
			}
1092
			break;
1093
		case AID_VPN_IPv4:
1094
			while (mplen > 0) {
1095
				if ((pos = rde_update_get_vpn4(mpp, mplen,
1096
				    &prefix, &prefixlen)) == -1) {
1097
					log_peer_warnx(&peer->conf,
1098
					    "bad VPNv4 withdraw prefix");
1099
					rde_update_err(peer, ERR_UPDATE,
1100
					    ERR_UPD_OPTATTR,
1101
					    mpa.unreach, mpa.unreach_len);
1102
					goto done;
1103
				}
1104
				if (prefixlen > 32) {
1105
					log_peer_warnx(&peer->conf,
1106
					    "bad VPNv4 withdraw prefix");
1107
					rde_update_err(peer, ERR_UPDATE,
1108
					    ERR_UPD_OPTATTR,
1109
					    mpa.unreach, mpa.unreach_len);
1110
					goto done;
1111
				}
1112
1113
				mpp += pos;
1114
				mplen -= pos;
1115
1116
				rde_update_withdraw(peer, &prefix, prefixlen);
1117
			}
1118
			break;
1119
		default:
1120
			/* silently ignore unsupported multiprotocol AF */
1121
			break;
1122
		}
1123
1124
		if ((asp->flags & ~F_ATTR_MP_UNREACH) == 0) {
1125
			error = 0;
1126
			goto done;
1127
		}
1128
	}
1129
1130
	/* shift to NLRI information */
1131
	p += 2 + attrpath_len;
1132
1133
	/* aspath needs to be loop free nota bene this is not a hard error */
1134
	if (peer->conf.ebgp &&
1135
	    peer->conf.enforce_local_as == ENFORCE_AS_ON &&
1136
	    !aspath_loopfree(asp->aspath, peer->conf.local_as))
1137
		asp->flags |= F_ATTR_LOOP;
1138
1139
	/* parse nlri prefix */
1140
	while (nlri_len > 0) {
1141
		if ((pos = rde_update_get_prefix(p, nlri_len, &prefix,
1142
		    &prefixlen)) == -1) {
1143
			log_peer_warnx(&peer->conf, "bad nlri prefix");
1144
			rde_update_err(peer, ERR_UPDATE, ERR_UPD_NETWORK,
1145
			    NULL, 0);
1146
			goto done;
1147
		}
1148
		if (prefixlen > 32) {
1149
			log_peer_warnx(&peer->conf, "bad nlri prefix");
1150
			rde_update_err(peer, ERR_UPDATE, ERR_UPD_NETWORK,
1151
			    NULL, 0);
1152
			goto done;
1153
		}
1154
1155
		p += pos;
1156
		nlri_len -= pos;
1157
1158
		if (peer->capa.mp[AID_INET] == 0) {
1159
			log_peer_warnx(&peer->conf,
1160
			    "bad update, %s disabled", aid2str(AID_INET));
1161
			rde_update_err(peer, ERR_UPDATE, ERR_UPD_OPTATTR,
1162
			    NULL, 0);
1163
			goto done;
1164
		}
1165
1166
		rde_update_update(peer, asp, &prefix, prefixlen);
1167
1168
		/* max prefix checker */
1169
		if (peer->conf.max_prefix &&
1170
		    peer->prefix_cnt > peer->conf.max_prefix) {
1171
			log_peer_warnx(&peer->conf, "prefix limit reached"
1172
			    " (>%u/%u)", peer->prefix_cnt,
1173
			    peer->conf.max_prefix);
1174
			rde_update_err(peer, ERR_CEASE, ERR_CEASE_MAX_PREFIX,
1175
			    NULL, 0);
1176
			goto done;
1177
		}
1178
1179
	}
1180
1181
	/* add MP_REACH_NLRI if available */
1182
	if (mpa.reach_len != 0) {
1183
		mpp = mpa.reach;
1184
		mplen = mpa.reach_len;
1185
		memcpy(&afi, mpp, 2);
1186
		mpp += 2;
1187
		mplen -= 2;
1188
		afi = ntohs(afi);
1189
		safi = *mpp++;
1190
		mplen--;
1191
1192
		if (afi2aid(afi, safi, &aid) == -1) {
1193
			log_peer_warnx(&peer->conf,
1194
			    "bad AFI/SAFI pair in update");
1195
			rde_update_err(peer, ERR_UPDATE, ERR_UPD_OPTATTR,
1196
			    NULL, 0);
1197
			goto done;
1198
		}
1199
1200
		if (peer->capa.mp[aid] == 0) {
1201
			log_peer_warnx(&peer->conf,
1202
			    "bad update, %s disabled", aid2str(aid));
1203
			rde_update_err(peer, ERR_UPDATE, ERR_UPD_OPTATTR,
1204
			    NULL, 0);
1205
			goto done;
1206
		}
1207
1208
		/*
1209
		 * this works because asp is not linked.
1210
		 * But first unlock the previously locked nexthop.
1211
		 */
1212
		if (asp->nexthop) {
1213
			asp->nexthop->refcnt--;
1214
			(void)nexthop_delete(asp->nexthop);
1215
			asp->nexthop = NULL;
1216
		}
1217
		if ((pos = rde_get_mp_nexthop(mpp, mplen, aid, asp)) == -1) {
1218
			log_peer_warnx(&peer->conf, "bad nlri prefix");
1219
			rde_update_err(peer, ERR_UPDATE, ERR_UPD_OPTATTR,
1220
			    mpa.reach, mpa.reach_len);
1221
			goto done;
1222
		}
1223
		mpp += pos;
1224
		mplen -= pos;
1225
1226
		switch (aid) {
1227
		case AID_INET6:
1228
			while (mplen > 0) {
1229
				if ((pos = rde_update_get_prefix6(mpp, mplen,
1230
				    &prefix, &prefixlen)) == -1) {
1231
					log_peer_warnx(&peer->conf,
1232
					    "bad IPv6 nlri prefix");
1233
					rde_update_err(peer, ERR_UPDATE,
1234
					    ERR_UPD_OPTATTR,
1235
					    mpa.reach, mpa.reach_len);
1236
					goto done;
1237
				}
1238
				if (prefixlen > 128) {
1239
					rde_update_err(peer, ERR_UPDATE,
1240
					    ERR_UPD_OPTATTR,
1241
					    mpa.reach, mpa.reach_len);
1242
					goto done;
1243
				}
1244
1245
				mpp += pos;
1246
				mplen -= pos;
1247
1248
				rde_update_update(peer, asp, &prefix,
1249
				    prefixlen);
1250
1251
				/* max prefix checker */
1252
				if (peer->conf.max_prefix &&
1253
				    peer->prefix_cnt > peer->conf.max_prefix) {
1254
					log_peer_warnx(&peer->conf,
1255
					    "prefix limit reached"
1256
					    " (>%u/%u)", peer->prefix_cnt,
1257
					    peer->conf.max_prefix);
1258
					rde_update_err(peer, ERR_CEASE,
1259
					    ERR_CEASE_MAX_PREFIX, NULL, 0);
1260
					goto done;
1261
				}
1262
1263
			}
1264
			break;
1265
		case AID_VPN_IPv4:
1266
			while (mplen > 0) {
1267
				if ((pos = rde_update_get_vpn4(mpp, mplen,
1268
				    &prefix, &prefixlen)) == -1) {
1269
					log_peer_warnx(&peer->conf,
1270
					    "bad VPNv4 nlri prefix");
1271
					rde_update_err(peer, ERR_UPDATE,
1272
					    ERR_UPD_OPTATTR,
1273
					    mpa.reach, mpa.reach_len);
1274
					goto done;
1275
				}
1276
				if (prefixlen > 32) {
1277
					rde_update_err(peer, ERR_UPDATE,
1278
					    ERR_UPD_OPTATTR,
1279
					    mpa.reach, mpa.reach_len);
1280
					goto done;
1281
				}
1282
1283
				mpp += pos;
1284
				mplen -= pos;
1285
1286
				rde_update_update(peer, asp, &prefix,
1287
				    prefixlen);
1288
1289
				/* max prefix checker */
1290
				if (peer->conf.max_prefix &&
1291
				    peer->prefix_cnt > peer->conf.max_prefix) {
1292
					log_peer_warnx(&peer->conf,
1293
					    "prefix limit reached"
1294
					    " (>%u/%u)", peer->prefix_cnt,
1295
					    peer->conf.max_prefix);
1296
					rde_update_err(peer, ERR_CEASE,
1297
					    ERR_CEASE_MAX_PREFIX, NULL, 0);
1298
					goto done;
1299
				}
1300
1301
			}
1302
			break;
1303
		default:
1304
			/* silently ignore unsupported multiprotocol AF */
1305
			break;
1306
		}
1307
	}
1308
1309
done:
1310
	if (attrpath_len != 0) {
1311
		/* unlock the previously locked entry */
1312
		if (asp->nexthop) {
1313
			asp->nexthop->refcnt--;
1314
			(void)nexthop_delete(asp->nexthop);
1315
		}
1316
		/* free allocated attribute memory that is no longer used */
1317
		path_put(asp);
1318
	}
1319
1320
	return (error);
1321
}
1322
1323
void
1324
rde_update_update(struct rde_peer *peer, struct rde_aspath *asp,
1325
    struct bgpd_addr *prefix, u_int8_t prefixlen)
1326
{
1327
	struct rde_aspath	*fasp;
1328
	enum filter_actions	 action;
1329
	u_int16_t		 i;
1330
1331
	peer->prefix_rcvd_update++;
1332
	/* add original path to the Adj-RIB-In */
1333
	if (path_update(&ribs[0].rib, peer, asp, prefix, prefixlen))
1334
		peer->prefix_cnt++;
1335
1336
	for (i = 1; i < rib_size; i++) {
1337
		if (*ribs[i].name == '\0')
1338
			break;
1339
		/* input filter */
1340
		action = rde_filter(ribs[i].in_rules, &fasp, peer, asp, prefix,
1341
		    prefixlen, peer);
1342
1343
		if (fasp == NULL)
1344
			fasp = asp;
1345
1346
		if (action == ACTION_ALLOW) {
1347
			rde_update_log("update", i, peer,
1348
			    &fasp->nexthop->exit_nexthop, prefix, prefixlen);
1349
			path_update(&ribs[i].rib, peer, fasp, prefix,
1350
			    prefixlen);
1351
		} else if (prefix_remove(&ribs[i].rib, peer, prefix, prefixlen,
1352
		    0)) {
1353
			rde_update_log("filtered withdraw", i, peer,
1354
			    NULL, prefix, prefixlen);
1355
		}
1356
1357
		/* free modified aspath */
1358
		if (fasp != asp)
1359
			path_put(fasp);
1360
	}
1361
}
1362
1363
void
1364
rde_update_withdraw(struct rde_peer *peer, struct bgpd_addr *prefix,
1365
    u_int8_t prefixlen)
1366
{
1367
	u_int16_t i;
1368
1369
	for (i = 1; i < rib_size; i++) {
1370
		if (*ribs[i].name == '\0')
1371
			break;
1372
		if (prefix_remove(&ribs[i].rib, peer, prefix, prefixlen, 0)) {
1373
			rde_update_log("withdraw", i, peer, NULL, prefix,
1374
			    prefixlen);
1375
		}
1376
	}
1377
1378
	/* remove original path form the Adj-RIB-In */
1379
	if (prefix_remove(&ribs[0].rib, peer, prefix, prefixlen, 0))
1380
		peer->prefix_cnt--;
1381
1382
	peer->prefix_rcvd_withdraw++;
1383
}
1384
1385
/*
1386
 * BGP UPDATE parser functions
1387
 */
1388
1389
/* attribute parser specific makros */
1390
#define UPD_READ(t, p, plen, n) \
1391
	do { \
1392
		memcpy(t, p, n); \
1393
		p += n; \
1394
		plen += n; \
1395
	} while (0)
1396
1397
#define CHECK_FLAGS(s, t, m)	\
1398
	(((s) & ~(ATTR_DEFMASK | (m))) == (t))
1399
1400
int
1401
rde_attr_parse(u_char *p, u_int16_t len, struct rde_peer *peer,
1402
    struct rde_aspath *a, struct mpattr *mpa)
1403
{
1404
	struct bgpd_addr nexthop;
1405
	u_char		*op = p, *npath;
1406
	u_int32_t	 tmp32;
1407
	int		 error;
1408
	u_int16_t	 attr_len, nlen;
1409
	u_int16_t	 plen = 0;
1410
	u_int8_t	 flags;
1411
	u_int8_t	 type;
1412
	u_int8_t	 tmp8;
1413
1414
	if (len < 3) {
1415
bad_len:
1416
		rde_update_err(peer, ERR_UPDATE, ERR_UPD_ATTRLEN, op, len);
1417
		return (-1);
1418
	}
1419
1420
	UPD_READ(&flags, p, plen, 1);
1421
	UPD_READ(&type, p, plen, 1);
1422
1423
	if (flags & ATTR_EXTLEN) {
1424
		if (len - plen < 2)
1425
			goto bad_len;
1426
		UPD_READ(&attr_len, p, plen, 2);
1427
		attr_len = ntohs(attr_len);
1428
	} else {
1429
		UPD_READ(&tmp8, p, plen, 1);
1430
		attr_len = tmp8;
1431
	}
1432
1433
	if (len - plen < attr_len)
1434
		goto bad_len;
1435
1436
	/* adjust len to the actual attribute size including header */
1437
	len = plen + attr_len;
1438
1439
	switch (type) {
1440
	case ATTR_UNDEF:
1441
		/* ignore and drop path attributes with a type code of 0 */
1442
		plen += attr_len;
1443
		break;
1444
	case ATTR_ORIGIN:
1445
		if (attr_len != 1)
1446
			goto bad_len;
1447
1448
		if (!CHECK_FLAGS(flags, ATTR_WELL_KNOWN, 0)) {
1449
bad_flags:
1450
			rde_update_err(peer, ERR_UPDATE, ERR_UPD_ATTRFLAGS,
1451
			    op, len);
1452
			return (-1);
1453
		}
1454
1455
		UPD_READ(&a->origin, p, plen, 1);
1456
		if (a->origin > ORIGIN_INCOMPLETE) {
1457
			rde_update_err(peer, ERR_UPDATE, ERR_UPD_ORIGIN,
1458
			    op, len);
1459
			return (-1);
1460
		}
1461
		if (a->flags & F_ATTR_ORIGIN)
1462
			goto bad_list;
1463
		a->flags |= F_ATTR_ORIGIN;
1464
		break;
1465
	case ATTR_ASPATH:
1466
		if (!CHECK_FLAGS(flags, ATTR_WELL_KNOWN, 0))
1467
			goto bad_flags;
1468
		error = aspath_verify(p, attr_len, rde_as4byte(peer));
1469
		if (error == AS_ERR_SOFT) {
1470
			/*
1471
			 * soft errors like unexpected segment types are
1472
			 * not considered fatal and the path is just
1473
			 * marked invalid.
1474
			 */
1475
			a->flags |= F_ATTR_PARSE_ERR;
1476
			log_peer_warnx(&peer->conf, "bad ASPATH, "
1477
			    "path invalidated and prefix withdrawn");
1478
		} else if (error != 0) {
1479
			rde_update_err(peer, ERR_UPDATE, ERR_UPD_ASPATH,
1480
			    NULL, 0);
1481
			return (-1);
1482
		}
1483
		if (a->flags & F_ATTR_ASPATH)
1484
			goto bad_list;
1485
		if (rde_as4byte(peer)) {
1486
			npath = p;
1487
			nlen = attr_len;
1488
		} else
1489
			npath = aspath_inflate(p, attr_len, &nlen);
1490
		a->flags |= F_ATTR_ASPATH;
1491
		a->aspath = aspath_get(npath, nlen);
1492
		if (npath != p)
1493
			free(npath);
1494
		plen += attr_len;
1495
		break;
1496
	case ATTR_NEXTHOP:
1497
		if (attr_len != 4)
1498
			goto bad_len;
1499
		if (!CHECK_FLAGS(flags, ATTR_WELL_KNOWN, 0))
1500
			goto bad_flags;
1501
		if (a->flags & F_ATTR_NEXTHOP)
1502
			goto bad_list;
1503
		a->flags |= F_ATTR_NEXTHOP;
1504
1505
		bzero(&nexthop, sizeof(nexthop));
1506
		nexthop.aid = AID_INET;
1507
		UPD_READ(&nexthop.v4.s_addr, p, plen, 4);
1508
		/*
1509
		 * Check if the nexthop is a valid IP address. We consider
1510
		 * multicast and experimental addresses as invalid.
1511
		 */
1512
		tmp32 = ntohl(nexthop.v4.s_addr);
1513
		if (IN_MULTICAST(tmp32) || IN_BADCLASS(tmp32)) {
1514
			rde_update_err(peer, ERR_UPDATE, ERR_UPD_NETWORK,
1515
			    op, len);
1516
			return (-1);
1517
		}
1518
		a->nexthop = nexthop_get(&nexthop);
1519
		/*
1520
		 * lock the nexthop because it is not yet linked else
1521
		 * withdraws may remove this nexthop which in turn would
1522
		 * cause a use after free error.
1523
		 */
1524
		a->nexthop->refcnt++;
1525
		break;
1526
	case ATTR_MED:
1527
		if (attr_len != 4)
1528
			goto bad_len;
1529
		if (!CHECK_FLAGS(flags, ATTR_OPTIONAL, 0))
1530
			goto bad_flags;
1531
		if (a->flags & F_ATTR_MED)
1532
			goto bad_list;
1533
		a->flags |= F_ATTR_MED;
1534
1535
		UPD_READ(&tmp32, p, plen, 4);
1536
		a->med = ntohl(tmp32);
1537
		break;
1538
	case ATTR_LOCALPREF:
1539
		if (attr_len != 4)
1540
			goto bad_len;
1541
		if (!CHECK_FLAGS(flags, ATTR_WELL_KNOWN, 0))
1542
			goto bad_flags;
1543
		if (peer->conf.ebgp) {
1544
			/* ignore local-pref attr on non ibgp peers */
1545
			plen += 4;
1546
			break;
1547
		}
1548
		if (a->flags & F_ATTR_LOCALPREF)
1549
			goto bad_list;
1550
		a->flags |= F_ATTR_LOCALPREF;
1551
1552
		UPD_READ(&tmp32, p, plen, 4);
1553
		a->lpref = ntohl(tmp32);
1554
		break;
1555
	case ATTR_ATOMIC_AGGREGATE:
1556
		if (attr_len != 0)
1557
			goto bad_len;
1558
		if (!CHECK_FLAGS(flags, ATTR_WELL_KNOWN, 0))
1559
			goto bad_flags;
1560
		goto optattr;
1561
	case ATTR_AGGREGATOR:
1562
		if ((!rde_as4byte(peer) && attr_len != 6) ||
1563
		    (rde_as4byte(peer) && attr_len != 8)) {
1564
			/*
1565
			 * ignore attribute in case of error as per
1566
			 * RFC 7606
1567
			 */
1568
			log_peer_warnx(&peer->conf, "bad AGGREGATOR, "
1569
			    "partial attribute ignored");
1570
			plen += attr_len;
1571
			break;
1572
		}
1573
		if (!CHECK_FLAGS(flags, ATTR_OPTIONAL|ATTR_TRANSITIVE,
1574
		    ATTR_PARTIAL))
1575
			goto bad_flags;
1576
		if (!rde_as4byte(peer)) {
1577
			/* need to inflate aggregator AS to 4-byte */
1578
			u_char	t[8];
1579
			t[0] = t[1] = 0;
1580
			UPD_READ(&t[2], p, plen, 2);
1581
			UPD_READ(&t[4], p, plen, 4);
1582
			if (attr_optadd(a, flags, type, t,
1583
			    sizeof(t)) == -1)
1584
				goto bad_list;
1585
			break;
1586
		}
1587
		/* 4-byte ready server take the default route */
1588
		goto optattr;
1589
	case ATTR_COMMUNITIES:
1590
		if (attr_len == 0 || attr_len % 4 != 0) {
1591
			/*
1592
			 * mark update as bad and withdraw all routes as per
1593
			 * RFC 7606
1594
			 */
1595
			a->flags |= F_ATTR_PARSE_ERR;
1596
			log_peer_warnx(&peer->conf, "bad COMMUNITIES, "
1597
			    "path invalidated and prefix withdrawn");
1598
		}
1599
		if (!CHECK_FLAGS(flags, ATTR_OPTIONAL|ATTR_TRANSITIVE,
1600
		    ATTR_PARTIAL))
1601
			goto bad_flags;
1602
		goto optattr;
1603
	case ATTR_LARGE_COMMUNITIES:
1604
		if (attr_len == 0 || attr_len % 12 != 0) {
1605
			/*
1606
			 * mark update as bad and withdraw all routes as per
1607
			 * RFC 7606
1608
			 */
1609
			a->flags |= F_ATTR_PARSE_ERR;
1610
			log_peer_warnx(&peer->conf, "bad LARGE COMMUNITIES, "
1611
			    "path invalidated and prefix withdrawn");
1612
		}
1613
		if (!CHECK_FLAGS(flags, ATTR_OPTIONAL|ATTR_TRANSITIVE,
1614
		    ATTR_PARTIAL))
1615
			goto bad_flags;
1616
		goto optattr;
1617
	case ATTR_EXT_COMMUNITIES:
1618
		if (attr_len == 0 || attr_len % 8 != 0) {
1619
			/*
1620
			 * mark update as bad and withdraw all routes as per
1621
			 * RFC 7606
1622
			 */
1623
			a->flags |= F_ATTR_PARSE_ERR;
1624
			log_peer_warnx(&peer->conf, "bad EXT_COMMUNITIES, "
1625
			    "path invalidated and prefix withdrawn");
1626
		}
1627
		if (!CHECK_FLAGS(flags, ATTR_OPTIONAL|ATTR_TRANSITIVE,
1628
		    ATTR_PARTIAL))
1629
			goto bad_flags;
1630
		goto optattr;
1631
	case ATTR_ORIGINATOR_ID:
1632
		if (attr_len != 4)
1633
			goto bad_len;
1634
		if (!CHECK_FLAGS(flags, ATTR_OPTIONAL, 0))
1635
			goto bad_flags;
1636
		goto optattr;
1637
	case ATTR_CLUSTER_LIST:
1638
		if (attr_len % 4 != 0)
1639
			goto bad_len;
1640
		if (!CHECK_FLAGS(flags, ATTR_OPTIONAL, 0))
1641
			goto bad_flags;
1642
		goto optattr;
1643
	case ATTR_MP_REACH_NLRI:
1644
		if (attr_len < 4)
1645
			goto bad_len;
1646
		if (!CHECK_FLAGS(flags, ATTR_OPTIONAL, 0))
1647
			goto bad_flags;
1648
		/* the validity is checked in rde_update_dispatch() */
1649
		if (a->flags & F_ATTR_MP_REACH)
1650
			goto bad_list;
1651
		a->flags |= F_ATTR_MP_REACH;
1652
1653
		mpa->reach = p;
1654
		mpa->reach_len = attr_len;
1655
		plen += attr_len;
1656
		break;
1657
	case ATTR_MP_UNREACH_NLRI:
1658
		if (attr_len < 3)
1659
			goto bad_len;
1660
		if (!CHECK_FLAGS(flags, ATTR_OPTIONAL, 0))
1661
			goto bad_flags;
1662
		/* the validity is checked in rde_update_dispatch() */
1663
		if (a->flags & F_ATTR_MP_UNREACH)
1664
			goto bad_list;
1665
		a->flags |= F_ATTR_MP_UNREACH;
1666
1667
		mpa->unreach = p;
1668
		mpa->unreach_len = attr_len;
1669
		plen += attr_len;
1670
		break;
1671
	case ATTR_AS4_AGGREGATOR:
1672
		if (attr_len != 8) {
1673
			/* see ATTR_AGGREGATOR ... */
1674
			if ((flags & ATTR_PARTIAL) == 0)
1675
				goto bad_len;
1676
			log_peer_warnx(&peer->conf, "bad AS4_AGGREGATOR, "
1677
			    "partial attribute ignored");
1678
			plen += attr_len;
1679
			break;
1680
		}
1681
		if (!CHECK_FLAGS(flags, ATTR_OPTIONAL|ATTR_TRANSITIVE,
1682
		    ATTR_PARTIAL))
1683
			goto bad_flags;
1684
		a->flags |= F_ATTR_AS4BYTE_NEW;
1685
		goto optattr;
1686
	case ATTR_AS4_PATH:
1687
		if (!CHECK_FLAGS(flags, ATTR_OPTIONAL|ATTR_TRANSITIVE,
1688
		    ATTR_PARTIAL))
1689
			goto bad_flags;
1690
		if ((error = aspath_verify(p, attr_len, 1)) != 0) {
1691
			/*
1692
			 * XXX RFC does not specify how to handle errors.
1693
			 * XXX Instead of dropping the session because of a
1694
			 * XXX bad path just mark the full update as having
1695
			 * XXX a parse error which makes the update no longer
1696
			 * XXX eligible and will not be considered for routing
1697
			 * XXX or redistribution.
1698
			 * XXX We follow draft-ietf-idr-optional-transitive
1699
			 * XXX by looking at the partial bit.
1700
			 * XXX Consider soft errors similar to a partial attr.
1701
			 */
1702
			if (flags & ATTR_PARTIAL || error == AS_ERR_SOFT) {
1703
				a->flags |= F_ATTR_PARSE_ERR;
1704
				log_peer_warnx(&peer->conf, "bad AS4_PATH, "
1705
				    "path invalidated and prefix withdrawn");
1706
				goto optattr;
1707
			} else {
1708
				rde_update_err(peer, ERR_UPDATE, ERR_UPD_ASPATH,
1709
				    NULL, 0);
1710
				return (-1);
1711
			}
1712
		}
1713
		a->flags |= F_ATTR_AS4BYTE_NEW;
1714
		goto optattr;
1715
	default:
1716
		if ((flags & ATTR_OPTIONAL) == 0) {
1717
			rde_update_err(peer, ERR_UPDATE, ERR_UPD_UNKNWN_WK_ATTR,
1718
			    op, len);
1719
			return (-1);
1720
		}
1721
optattr:
1722
		if (attr_optadd(a, flags, type, p, attr_len) == -1) {
1723
bad_list:
1724
			rde_update_err(peer, ERR_UPDATE, ERR_UPD_ATTRLIST,
1725
			    NULL, 0);
1726
			return (-1);
1727
		}
1728
1729
		plen += attr_len;
1730
		break;
1731
	}
1732
1733
	return (plen);
1734
}
1735
1736
int
1737
rde_attr_add(struct rde_aspath *a, u_char *p, u_int16_t len)
1738
{
1739
	u_int16_t	 attr_len;
1740
	u_int16_t	 plen = 0;
1741
	u_int8_t	 flags;
1742
	u_int8_t	 type;
1743
	u_int8_t	 tmp8;
1744
1745
	if (a == NULL)		/* no aspath, nothing to do */
1746
		return (0);
1747
	if (len < 3)
1748
		return (-1);
1749
1750
	UPD_READ(&flags, p, plen, 1);
1751
	UPD_READ(&type, p, plen, 1);
1752
1753
	if (flags & ATTR_EXTLEN) {
1754
		if (len - plen < 2)
1755
			return (-1);
1756
		UPD_READ(&attr_len, p, plen, 2);
1757
		attr_len = ntohs(attr_len);
1758
	} else {
1759
		UPD_READ(&tmp8, p, plen, 1);
1760
		attr_len = tmp8;
1761
	}
1762
1763
	if (len - plen < attr_len)
1764
		return (-1);
1765
1766
	if (attr_optadd(a, flags, type, p, attr_len) == -1)
1767
		return (-1);
1768
	return (0);
1769
}
1770
1771
#undef UPD_READ
1772
#undef CHECK_FLAGS
1773
1774
u_int8_t
1775
rde_attr_missing(struct rde_aspath *a, int ebgp, u_int16_t nlrilen)
1776
{
1777
	/* ATTR_MP_UNREACH_NLRI may be sent alone */
1778
	if (nlrilen == 0 && a->flags & F_ATTR_MP_UNREACH &&
1779
	    (a->flags & F_ATTR_MP_REACH) == 0)
1780
		return (0);
1781
1782
	if ((a->flags & F_ATTR_ORIGIN) == 0)
1783
		return (ATTR_ORIGIN);
1784
	if ((a->flags & F_ATTR_ASPATH) == 0)
1785
		return (ATTR_ASPATH);
1786
	if ((a->flags & F_ATTR_MP_REACH) == 0 &&
1787
	    (a->flags & F_ATTR_NEXTHOP) == 0)
1788
		return (ATTR_NEXTHOP);
1789
	if (!ebgp)
1790
		if ((a->flags & F_ATTR_LOCALPREF) == 0)
1791
			return (ATTR_LOCALPREF);
1792
	return (0);
1793
}
1794
1795
int
1796
rde_get_mp_nexthop(u_char *data, u_int16_t len, u_int8_t aid,
1797
    struct rde_aspath *asp)
1798
{
1799
	struct bgpd_addr	nexthop;
1800
	u_int8_t		totlen, nhlen;
1801
1802
	if (len == 0)
1803
		return (-1);
1804
1805
	nhlen = *data++;
1806
	totlen = 1;
1807
	len--;
1808
1809
	if (nhlen > len)
1810
		return (-1);
1811
1812
	bzero(&nexthop, sizeof(nexthop));
1813
	nexthop.aid = aid;
1814
	switch (aid) {
1815
	case AID_INET6:
1816
		/*
1817
		 * RFC2545 describes that there may be a link-local
1818
		 * address carried in nexthop. Yikes!
1819
		 * This is not only silly, it is wrong and we just ignore
1820
		 * this link-local nexthop. The bgpd session doesn't run
1821
		 * over the link-local address so why should all other
1822
		 * traffic.
1823
		 */
1824
		if (nhlen != 16 && nhlen != 32) {
1825
			log_warnx("bad multiprotocol nexthop, bad size");
1826
			return (-1);
1827
		}
1828
		memcpy(&nexthop.v6.s6_addr, data, 16);
1829
		break;
1830
	case AID_VPN_IPv4:
1831
		/*
1832
		 * Neither RFC4364 nor RFC3107 specify the format of the
1833
		 * nexthop in an explicit way. The quality of RFC went down
1834
		 * the toilet the larger the number got.
1835
		 * RFC4364 is very confusing about VPN-IPv4 address and the
1836
		 * VPN-IPv4 prefix that carries also a MPLS label.
1837
		 * So the nexthop is a 12-byte address with a 64bit RD and
1838
		 * an IPv4 address following. In the nexthop case the RD can
1839
		 * be ignored.
1840
		 * Since the nexthop has to be in the main IPv4 table just
1841
		 * create an AID_INET nexthop. So we don't need to handle
1842
		 * AID_VPN_IPv4 in nexthop and kroute.
1843
		 */
1844
		if (nhlen != 12) {
1845
			log_warnx("bad multiprotocol nexthop, bad size");
1846
			return (-1);
1847
		}
1848
		data += sizeof(u_int64_t);
1849
		nexthop.aid = AID_INET;
1850
		memcpy(&nexthop.v4, data, sizeof(nexthop.v4));
1851
		break;
1852
	default:
1853
		log_warnx("bad multiprotocol nexthop, bad AID");
1854
		return (-1);
1855
	}
1856
1857
	asp->nexthop = nexthop_get(&nexthop);
1858
	/*
1859
	 * lock the nexthop because it is not yet linked else
1860
	 * withdraws may remove this nexthop which in turn would
1861
	 * cause a use after free error.
1862
	 */
1863
	asp->nexthop->refcnt++;
1864
1865
	/* ignore reserved (old SNPA) field as per RFC4760 */
1866
	totlen += nhlen + 1;
1867
	data += nhlen + 1;
1868
1869
	return (totlen);
1870
}
1871
1872
int
1873
rde_update_extract_prefix(u_char *p, u_int16_t len, void *va,
1874
    u_int8_t pfxlen, u_int8_t max)
1875
{
1876
	static u_char addrmask[] = {
1877
	    0x00, 0x80, 0xc0, 0xe0, 0xf0, 0xf8, 0xfc, 0xfe, 0xff };
1878
	u_char		*a = va;
1879
	int		 i;
1880
	u_int16_t	 plen = 0;
1881
1882
	for (i = 0; pfxlen && i < max; i++) {
1883
		if (len <= plen)
1884
			return (-1);
1885
		if (pfxlen < 8) {
1886
			a[i] = *p++ & addrmask[pfxlen];
1887
			plen++;
1888
			break;
1889
		} else {
1890
			a[i] = *p++;
1891
			plen++;
1892
			pfxlen -= 8;
1893
		}
1894
	}
1895
	return (plen);
1896
}
1897
1898
int
1899
rde_update_get_prefix(u_char *p, u_int16_t len, struct bgpd_addr *prefix,
1900
    u_int8_t *prefixlen)
1901
{
1902
	u_int8_t	 pfxlen;
1903
	int		 plen;
1904
1905
	if (len < 1)
1906
		return (-1);
1907
1908
	pfxlen = *p++;
1909
	len--;
1910
1911
	bzero(prefix, sizeof(struct bgpd_addr));
1912
	prefix->aid = AID_INET;
1913
	*prefixlen = pfxlen;
1914
1915
	if ((plen = rde_update_extract_prefix(p, len, &prefix->v4, pfxlen,
1916
	    sizeof(prefix->v4))) == -1)
1917
		return (-1);
1918
1919
	return (plen + 1);	/* pfxlen needs to be added */
1920
}
1921
1922
int
1923
rde_update_get_prefix6(u_char *p, u_int16_t len, struct bgpd_addr *prefix,
1924
    u_int8_t *prefixlen)
1925
{
1926
	int		plen;
1927
	u_int8_t	pfxlen;
1928
1929
	if (len < 1)
1930
		return (-1);
1931
1932
	pfxlen = *p++;
1933
	len--;
1934
1935
	bzero(prefix, sizeof(struct bgpd_addr));
1936
	prefix->aid = AID_INET6;
1937
	*prefixlen = pfxlen;
1938
1939
	if ((plen = rde_update_extract_prefix(p, len, &prefix->v6, pfxlen,
1940
	    sizeof(prefix->v6))) == -1)
1941
		return (-1);
1942
1943
	return (plen + 1);	/* pfxlen needs to be added */
1944
}
1945
1946
int
1947
rde_update_get_vpn4(u_char *p, u_int16_t len, struct bgpd_addr *prefix,
1948
    u_int8_t *prefixlen)
1949
{
1950
	int		 rv, done = 0;
1951
	u_int8_t	 pfxlen;
1952
	u_int16_t	 plen;
1953
1954
	if (len < 1)
1955
		return (-1);
1956
1957
	memcpy(&pfxlen, p, 1);
1958
	p += 1;
1959
	plen = 1;
1960
1961
	bzero(prefix, sizeof(struct bgpd_addr));
1962
1963
	/* label stack */
1964
	do {
1965
		if (len - plen < 3 || pfxlen < 3 * 8)
1966
			return (-1);
1967
		if (prefix->vpn4.labellen + 3U >
1968
		    sizeof(prefix->vpn4.labelstack))
1969
			return (-1);
1970
		prefix->vpn4.labelstack[prefix->vpn4.labellen++] = *p++;
1971
		prefix->vpn4.labelstack[prefix->vpn4.labellen++] = *p++;
1972
		prefix->vpn4.labelstack[prefix->vpn4.labellen] = *p++;
1973
		if (prefix->vpn4.labelstack[prefix->vpn4.labellen] &
1974
		    BGP_MPLS_BOS)
1975
			done = 1;
1976
		prefix->vpn4.labellen++;
1977
		plen += 3;
1978
		pfxlen -= 3 * 8;
1979
	} while (!done);
1980
1981
	/* RD */
1982
	if (len - plen < (int)sizeof(u_int64_t) ||
1983
	    pfxlen < sizeof(u_int64_t) * 8)
1984
		return (-1);
1985
	memcpy(&prefix->vpn4.rd, p, sizeof(u_int64_t));
1986
	pfxlen -= sizeof(u_int64_t) * 8;
1987
	p += sizeof(u_int64_t);
1988
	plen += sizeof(u_int64_t);
1989
1990
	/* prefix */
1991
	prefix->aid = AID_VPN_IPv4;
1992
	*prefixlen = pfxlen;
1993
1994
	if ((rv = rde_update_extract_prefix(p, len, &prefix->vpn4.addr,
1995
	    pfxlen, sizeof(prefix->vpn4.addr))) == -1)
1996
		return (-1);
1997
1998
	return (plen + rv);
1999
}
2000
2001
void
2002
rde_update_err(struct rde_peer *peer, u_int8_t error, u_int8_t suberr,
2003
    void *data, u_int16_t size)
2004
{
2005
	struct ibuf	*wbuf;
2006
2007
	if ((wbuf = imsg_create(ibuf_se, IMSG_UPDATE_ERR, peer->conf.id, 0,
2008
	    size + sizeof(error) + sizeof(suberr))) == NULL)
2009
		fatal("%s %d imsg_create error", __func__, __LINE__);
2010
	if (imsg_add(wbuf, &error, sizeof(error)) == -1 ||
2011
	    imsg_add(wbuf, &suberr, sizeof(suberr)) == -1 ||
2012
	    imsg_add(wbuf, data, size) == -1)
2013
		fatal("%s %d imsg_add error", __func__, __LINE__);
2014
	imsg_close(ibuf_se, wbuf);
2015
	peer->state = PEER_ERR;
2016
}
2017
2018
void
2019
rde_update_log(const char *message, u_int16_t rid,
2020
    const struct rde_peer *peer, const struct bgpd_addr *next,
2021
    const struct bgpd_addr *prefix, u_int8_t prefixlen)
2022
{
2023
	char		*l = NULL;
2024
	char		*n = NULL;
2025
	char		*p = NULL;
2026
2027
	if ( !((conf->log & BGPD_LOG_UPDATES) ||
2028
	       (peer->conf.flags & PEERFLAG_LOG_UPDATES)) )
2029
		return;
2030
2031
	if (next != NULL)
2032
		if (asprintf(&n, " via %s", log_addr(next)) == -1)
2033
			n = NULL;
2034
	if (asprintf(&p, "%s/%u", log_addr(prefix), prefixlen) == -1)
2035
		p = NULL;
2036
	l = log_fmt_peer(&peer->conf);
2037
	log_info("Rib %s: %s AS%s: %s %s%s", ribs[rid].name,
2038
	    l, log_as(peer->conf.remote_as), message,
2039
	    p ? p : "out of memory", n ? n : "");
2040
2041
	free(l);
2042
	free(n);
2043
	free(p);
2044
}
2045
2046
/*
2047
 * 4-Byte ASN helper function.
2048
 * Two scenarios need to be considered:
2049
 * - NEW session with NEW attributes present -> just remove the attributes
2050
 * - OLD session with NEW attributes present -> try to merge them
2051
 */
2052
void
2053
rde_as4byte_fixup(struct rde_peer *peer, struct rde_aspath *a)
2054
{
2055
	struct attr	*nasp, *naggr, *oaggr;
2056
	u_int32_t	 as;
2057
2058
	/*
2059
	 * if either ATTR_AS4_AGGREGATOR or ATTR_AS4_PATH is present
2060
	 * try to fixup the attributes.
2061
	 * Do not fixup if F_ATTR_PARSE_ERR is set.
2062
	 */
2063
	if (!(a->flags & F_ATTR_AS4BYTE_NEW) || a->flags & F_ATTR_PARSE_ERR)
2064
		return;
2065
2066
	/* first get the attributes */
2067
	nasp = attr_optget(a, ATTR_AS4_PATH);
2068
	naggr = attr_optget(a, ATTR_AS4_AGGREGATOR);
2069
2070
	if (rde_as4byte(peer)) {
2071
		/* NEW session using 4-byte ASNs */
2072
		if (nasp) {
2073
			log_peer_warnx(&peer->conf, "uses 4-byte ASN "
2074
			    "but sent AS4_PATH attribute.");
2075
			attr_free(a, nasp);
2076
		}
2077
		if (naggr) {
2078
			log_peer_warnx(&peer->conf, "uses 4-byte ASN "
2079
			    "but sent AS4_AGGREGATOR attribute.");
2080
			attr_free(a, naggr);
2081
		}
2082
		return;
2083
	}
2084
	/* OLD session using 2-byte ASNs */
2085
	/* try to merge the new attributes into the old ones */
2086
	if ((oaggr = attr_optget(a, ATTR_AGGREGATOR))) {
2087
		memcpy(&as, oaggr->data, sizeof(as));
2088
		if (ntohl(as) != AS_TRANS) {
2089
			/* per RFC ignore AS4_PATH and AS4_AGGREGATOR */
2090
			if (nasp)
2091
				attr_free(a, nasp);
2092
			if (naggr)
2093
				attr_free(a, naggr);
2094
			return;
2095
		}
2096
		if (naggr) {
2097
			/* switch over to new AGGREGATOR */
2098
			attr_free(a, oaggr);
2099
			if (attr_optadd(a, ATTR_OPTIONAL | ATTR_TRANSITIVE,
2100
			    ATTR_AGGREGATOR, naggr->data, naggr->len))
2101
				fatalx("attr_optadd failed but impossible");
2102
		}
2103
	}
2104
	/* there is no need for AS4_AGGREGATOR any more */
2105
	if (naggr)
2106
		attr_free(a, naggr);
2107
2108
	/* merge AS4_PATH with ASPATH */
2109
	if (nasp)
2110
		aspath_merge(a, nasp);
2111
}
2112
2113
2114
/*
2115
 * route reflector helper function
2116
 */
2117
void
2118
rde_reflector(struct rde_peer *peer, struct rde_aspath *asp)
2119
{
2120
	struct attr	*a;
2121
	u_int8_t	*p;
2122
	u_int16_t	 len;
2123
	u_int32_t	 id;
2124
2125
	/* do not consider updates with parse errors */
2126
	if (asp->flags & F_ATTR_PARSE_ERR)
2127
		return;
2128
2129
	/* check for originator id if eq router_id drop */
2130
	if ((a = attr_optget(asp, ATTR_ORIGINATOR_ID)) != NULL) {
2131
		if (memcmp(&conf->bgpid, a->data, sizeof(conf->bgpid)) == 0) {
2132
			/* this is coming from myself */
2133
			asp->flags |= F_ATTR_LOOP;
2134
			return;
2135
		}
2136
	} else if (conf->flags & BGPD_FLAG_REFLECTOR) {
2137
		if (peer->conf.ebgp)
2138
			id = conf->bgpid;
2139
		else
2140
			id = htonl(peer->remote_bgpid);
2141
		if (attr_optadd(asp, ATTR_OPTIONAL, ATTR_ORIGINATOR_ID,
2142
		    &id, sizeof(u_int32_t)) == -1)
2143
			fatalx("attr_optadd failed but impossible");
2144
	}
2145
2146
	/* check for own id in the cluster list */
2147
	if (conf->flags & BGPD_FLAG_REFLECTOR) {
2148
		if ((a = attr_optget(asp, ATTR_CLUSTER_LIST)) != NULL) {
2149
			for (len = 0; len < a->len;
2150
			    len += sizeof(conf->clusterid))
2151
				/* check if coming from my cluster */
2152
				if (memcmp(&conf->clusterid, a->data + len,
2153
				    sizeof(conf->clusterid)) == 0) {
2154
					asp->flags |= F_ATTR_LOOP;
2155
					return;
2156
				}
2157
2158
			/* prepend own clusterid by replacing attribute */
2159
			len = a->len + sizeof(conf->clusterid);
2160
			if (len < a->len)
2161
				fatalx("rde_reflector: cluster-list overflow");
2162
			if ((p = malloc(len)) == NULL)
2163
				fatal("rde_reflector");
2164
			memcpy(p, &conf->clusterid, sizeof(conf->clusterid));
2165
			memcpy(p + sizeof(conf->clusterid), a->data, a->len);
2166
			attr_free(asp, a);
2167
			if (attr_optadd(asp, ATTR_OPTIONAL, ATTR_CLUSTER_LIST,
2168
			    p, len) == -1)
2169
				fatalx("attr_optadd failed but impossible");
2170
			free(p);
2171
		} else if (attr_optadd(asp, ATTR_OPTIONAL, ATTR_CLUSTER_LIST,
2172
		    &conf->clusterid, sizeof(conf->clusterid)) == -1)
2173
			fatalx("attr_optadd failed but impossible");
2174
	}
2175
}
2176
2177
/*
2178
 * control specific functions
2179
 */
2180
void
2181
rde_dump_rib_as(struct prefix *p, struct rde_aspath *asp, pid_t pid, int flags)
2182
{
2183
	struct ctl_show_rib	 rib;
2184
	struct ibuf		*wbuf;
2185
	struct attr		*a;
2186
	void			*bp;
2187
	time_t			 staletime;
2188
	u_int8_t		 l;
2189
2190
	bzero(&rib, sizeof(rib));
2191
	rib.lastchange = p->lastchange;
2192
	rib.local_pref = asp->lpref;
2193
	rib.med = asp->med;
2194
	rib.weight = asp->weight;
2195
	strlcpy(rib.descr, asp->peer->conf.descr, sizeof(rib.descr));
2196
	memcpy(&rib.remote_addr, &asp->peer->remote_addr,
2197
	    sizeof(rib.remote_addr));
2198
	rib.remote_id = asp->peer->remote_bgpid;
2199
	if (asp->nexthop != NULL) {
2200
		memcpy(&rib.true_nexthop, &asp->nexthop->true_nexthop,
2201
		    sizeof(rib.true_nexthop));
2202
		memcpy(&rib.exit_nexthop, &asp->nexthop->exit_nexthop,
2203
		    sizeof(rib.exit_nexthop));
2204
	} else {
2205
		/* announced network may have a NULL nexthop */
2206
		bzero(&rib.true_nexthop, sizeof(rib.true_nexthop));
2207
		bzero(&rib.exit_nexthop, sizeof(rib.exit_nexthop));
2208
		rib.true_nexthop.aid = p->prefix->aid;
2209
		rib.exit_nexthop.aid = p->prefix->aid;
2210
	}
2211
	pt_getaddr(p->prefix, &rib.prefix);
2212
	rib.prefixlen = p->prefix->prefixlen;
2213
	rib.origin = asp->origin;
2214
	rib.flags = 0;
2215
	if (p->re->active == p)
2216
		rib.flags |= F_PREF_ACTIVE;
2217
	if (!asp->peer->conf.ebgp)
2218
		rib.flags |= F_PREF_INTERNAL;
2219
	if (asp->flags & F_PREFIX_ANNOUNCED)
2220
		rib.flags |= F_PREF_ANNOUNCE;
2221
	if (asp->nexthop == NULL || asp->nexthop->state == NEXTHOP_REACH)
2222
		rib.flags |= F_PREF_ELIGIBLE;
2223
	if (asp->flags & F_ATTR_LOOP)
2224
		rib.flags &= ~F_PREF_ELIGIBLE;
2225
	staletime = asp->peer->staletime[p->prefix->aid];
2226
	if (staletime && p->lastchange <= staletime)
2227
		rib.flags |= F_PREF_STALE;
2228
	rib.aspath_len = aspath_length(asp->aspath);
2229
2230
	if ((wbuf = imsg_create(ibuf_se_ctl, IMSG_CTL_SHOW_RIB, 0, pid,
2231
	    sizeof(rib) + rib.aspath_len)) == NULL)
2232
		return;
2233
	if (imsg_add(wbuf, &rib, sizeof(rib)) == -1 ||
2234
	    imsg_add(wbuf, aspath_dump(asp->aspath),
2235
	    rib.aspath_len) == -1)
2236
		return;
2237
	imsg_close(ibuf_se_ctl, wbuf);
2238
2239
	if (flags & F_CTL_DETAIL)
2240
		for (l = 0; l < asp->others_len; l++) {
2241
			if ((a = asp->others[l]) == NULL)
2242
				break;
2243
			if ((wbuf = imsg_create(ibuf_se_ctl,
2244
			    IMSG_CTL_SHOW_RIB_ATTR, 0, pid,
2245
			    attr_optlen(a))) == NULL)
2246
				return;
2247
			if ((bp = ibuf_reserve(wbuf, attr_optlen(a))) == NULL) {
2248
				ibuf_free(wbuf);
2249
				return;
2250
			}
2251
			if (attr_write(bp, attr_optlen(a), a->flags,
2252
			    a->type, a->data, a->len) == -1) {
2253
				ibuf_free(wbuf);
2254
				return;
2255
			}
2256
			imsg_close(ibuf_se_ctl, wbuf);
2257
		}
2258
}
2259
2260
void
2261
rde_dump_filterout(struct rde_peer *peer, struct prefix *p,
2262
    struct ctl_show_rib_request *req)
2263
{
2264
	struct bgpd_addr	 addr;
2265
	struct rde_aspath	*asp;
2266
	enum filter_actions	 a;
2267
2268
	if (up_test_update(peer, p) != 1)
2269
		return;
2270
2271
	pt_getaddr(p->prefix, &addr);
2272
	a = rde_filter(out_rules, &asp, peer, p->aspath, &addr,
2273
	    p->prefix->prefixlen, p->aspath->peer);
2274
	if (asp)
2275
		asp->peer = p->aspath->peer;
2276
	else
2277
		asp = p->aspath;
2278
2279
	if (a == ACTION_ALLOW)
2280
		rde_dump_rib_as(p, asp, req->pid, req->flags);
2281
2282
	if (asp != p->aspath)
2283
		path_put(asp);
2284
}
2285
2286
void
2287
rde_dump_filter(struct prefix *p, struct ctl_show_rib_request *req)
2288
{
2289
	struct rde_peer		*peer;
2290
2291
	if (req->flags & F_CTL_ADJ_IN ||
2292
	    !(req->flags & (F_CTL_ADJ_IN|F_CTL_ADJ_OUT))) {
2293
		if (req->peerid && req->peerid != p->aspath->peer->conf.id)
2294
			return;
2295
		if (req->type == IMSG_CTL_SHOW_RIB_AS &&
2296
		    !aspath_match(p->aspath->aspath->data,
2297
		    p->aspath->aspath->len, &req->as, req->as.as))
2298
			return;
2299
		if (req->type == IMSG_CTL_SHOW_RIB_COMMUNITY &&
2300
		    !community_match(p->aspath, req->community.as,
2301
		    req->community.type))
2302
			return;
2303
		if (req->type == IMSG_CTL_SHOW_RIB_EXTCOMMUNITY &&
2304
		    !community_ext_match(p->aspath, &req->extcommunity, 0))
2305
			return;
2306
		if (req->type == IMSG_CTL_SHOW_RIB_LARGECOMMUNITY &&
2307
		    !community_large_match(p->aspath, req->large_community.as,
2308
		    req->large_community.ld1, req->large_community.ld2))
2309
			return;
2310
		if ((req->flags & F_CTL_ACTIVE) && p->re->active != p)
2311
			return;
2312
		rde_dump_rib_as(p, p->aspath, req->pid, req->flags);
2313
	} else if (req->flags & F_CTL_ADJ_OUT) {
2314
		if (p->re->active != p)
2315
			/* only consider active prefix */
2316
			return;
2317
		if (req->peerid) {
2318
			if ((peer = peer_get(req->peerid)) != NULL)
2319
				rde_dump_filterout(peer, p, req);
2320
			return;
2321
		}
2322
	}
2323
}
2324
2325
void
2326
rde_dump_upcall(struct rib_entry *re, void *ptr)
2327
{
2328
	struct prefix		*p;
2329
	struct rde_dump_ctx	*ctx = ptr;
2330
2331
	LIST_FOREACH(p, &re->prefix_h, rib_l)
2332
		rde_dump_filter(p, &ctx->req);
2333
}
2334
2335
void
2336
rde_dump_prefix_upcall(struct rib_entry *re, void *ptr)
2337
{
2338
	struct rde_dump_ctx	*ctx = ptr;
2339
	struct prefix		*p;
2340
	struct pt_entry		*pt;
2341
	struct bgpd_addr	 addr;
2342
2343
	pt = re->prefix;
2344
	pt_getaddr(pt, &addr);
2345
	if (addr.aid != ctx->req.prefix.aid)
2346
		return;
2347
	if (ctx->req.prefixlen > pt->prefixlen)
2348
		return;
2349
	if (!prefix_compare(&ctx->req.prefix, &addr, ctx->req.prefixlen))
2350
		LIST_FOREACH(p, &re->prefix_h, rib_l)
2351
			rde_dump_filter(p, &ctx->req);
2352
}
2353
2354
void
2355
rde_dump_ctx_new(struct ctl_show_rib_request *req, pid_t pid,
2356
    enum imsg_type type)
2357
{
2358
	struct rde_dump_ctx	*ctx;
2359
	struct rib		*rib;
2360
	struct rib_entry	*re;
2361
	u_int			 error;
2362
	u_int8_t		 hostplen;
2363
2364
	if ((ctx = calloc(1, sizeof(*ctx))) == NULL) {
2365
		log_warn("rde_dump_ctx_new");
2366
		error = CTL_RES_NOMEM;
2367
		imsg_compose(ibuf_se_ctl, IMSG_CTL_RESULT, 0, pid, -1, &error,
2368
		    sizeof(error));
2369
		return;
2370
	}
2371
	if ((rib = rib_find(req->rib)) == NULL) {
2372
		log_warnx("rde_dump_ctx_new: no such rib %s", req->rib);
2373
		error = CTL_RES_NOSUCHPEER;
2374
		imsg_compose(ibuf_se_ctl, IMSG_CTL_RESULT, 0, pid, -1, &error,
2375
		    sizeof(error));
2376
		free(ctx);
2377
		return;
2378
	}
2379
2380
	memcpy(&ctx->req, req, sizeof(struct ctl_show_rib_request));
2381
	ctx->req.pid = pid;
2382
	ctx->req.type = type;
2383
	ctx->ribctx.ctx_count = CTL_MSG_HIGH_MARK;
2384
	ctx->ribctx.ctx_rib = rib;
2385
	switch (ctx->req.type) {
2386
	case IMSG_CTL_SHOW_NETWORK:
2387
		ctx->ribctx.ctx_upcall = network_dump_upcall;
2388
		break;
2389
	case IMSG_CTL_SHOW_RIB:
2390
	case IMSG_CTL_SHOW_RIB_AS:
2391
	case IMSG_CTL_SHOW_RIB_COMMUNITY:
2392
	case IMSG_CTL_SHOW_RIB_EXTCOMMUNITY:
2393
	case IMSG_CTL_SHOW_RIB_LARGECOMMUNITY:
2394
		ctx->ribctx.ctx_upcall = rde_dump_upcall;
2395
		break;
2396
	case IMSG_CTL_SHOW_RIB_PREFIX:
2397
		if (req->flags & F_LONGER) {
2398
			ctx->ribctx.ctx_upcall = rde_dump_prefix_upcall;
2399
			break;
2400
		}
2401
		switch (req->prefix.aid) {
2402
		case AID_INET:
2403
		case AID_VPN_IPv4:
2404
			hostplen = 32;
2405
			break;
2406
		case AID_INET6:
2407
			hostplen = 128;
2408
			break;
2409
		default:
2410
			fatalx("rde_dump_ctx_new: unknown af");
2411
		}
2412
		if (req->prefixlen == hostplen)
2413
			re = rib_lookup(rib, &req->prefix);
2414
		else
2415
			re = rib_get(rib, &req->prefix, req->prefixlen);
2416
		if (re)
2417
			rde_dump_upcall(re, ctx);
2418
		imsg_compose(ibuf_se_ctl, IMSG_CTL_END, 0, ctx->req.pid,
2419
		    -1, NULL, 0);
2420
		free(ctx);
2421
		return;
2422
	default:
2423
		fatalx("rde_dump_ctx_new: unsupported imsg type");
2424
	}
2425
	ctx->ribctx.ctx_done = rde_dump_done;
2426
	ctx->ribctx.ctx_arg = ctx;
2427
	ctx->ribctx.ctx_aid = ctx->req.aid;
2428
	LIST_INSERT_HEAD(&rde_dump_h, ctx, entry);
2429
	rib_dump_r(&ctx->ribctx);
2430
}
2431
2432
void
2433
rde_dump_ctx_throttle(pid_t pid, int throttle)
2434
{
2435
	struct rde_dump_ctx	*ctx;
2436
2437
	LIST_FOREACH(ctx, &rde_dump_h, entry) {
2438
		if (ctx->req.pid == pid) {
2439
			ctx->throttled = throttle;
2440
			return;
2441
		}
2442
	}
2443
}
2444
2445
void
2446
rde_dump_runner(void)
2447
{
2448
	struct rde_dump_ctx	*ctx, *next;
2449
2450
	for (ctx = LIST_FIRST(&rde_dump_h); ctx != NULL; ctx = next) {
2451
		next = LIST_NEXT(ctx, entry);
2452
		if (!ctx->throttled)
2453
			rib_dump_r(&ctx->ribctx);
2454
	}
2455
}
2456
2457
int
2458
rde_dump_pending(void)
2459
{
2460
	struct rde_dump_ctx	*ctx;
2461
2462
	/* return true if there is at least one unthrottled context */
2463
	LIST_FOREACH(ctx, &rde_dump_h, entry)
2464
		if (!ctx->throttled)
2465
			return (1);
2466
2467
	return (0);
2468
}
2469
2470
void
2471
rde_dump_done(void *arg)
2472
{
2473
	struct rde_dump_ctx	*ctx = arg;
2474
2475
	imsg_compose(ibuf_se_ctl, IMSG_CTL_END, 0, ctx->req.pid,
2476
	    -1, NULL, 0);
2477
	LIST_REMOVE(ctx, entry);
2478
	free(ctx);
2479
}
2480
2481
void
2482
rde_dump_rib_free(struct rib *rib)
2483
{
2484
	struct rde_dump_ctx	*ctx, *next;
2485
2486
	for (ctx = LIST_FIRST(&rde_dump_h); ctx != NULL; ctx = next) {
2487
		next = LIST_NEXT(ctx, entry);
2488
		if (ctx->ribctx.ctx_rib == rib)
2489
			rde_dump_done(ctx);
2490
	}
2491
}
2492
2493
void
2494
rde_dump_mrt_new(struct mrt *mrt, pid_t pid, int fd)
2495
{
2496
	struct rde_mrt_ctx	*ctx;
2497
	struct rib		*rib;
2498
2499
	if ((ctx = calloc(1, sizeof(*ctx))) == NULL) {
2500
		log_warn("rde_dump_mrt_new");
2501
		return;
2502
	}
2503
	memcpy(&ctx->mrt, mrt, sizeof(struct mrt));
2504
	TAILQ_INIT(&ctx->mrt.wbuf.bufs);
2505
	ctx->mrt.wbuf.fd = fd;
2506
	ctx->mrt.state = MRT_STATE_RUNNING;
2507
	rib = rib_find(ctx->mrt.rib);
2508
	if (rib == NULL) {
2509
		log_warnx("non existing RIB %s for mrt dump", ctx->mrt.rib);
2510
		free(ctx);
2511
		return;
2512
	}
2513
2514
	if (ctx->mrt.type == MRT_TABLE_DUMP_V2)
2515
		mrt_dump_v2_hdr(&ctx->mrt, conf, &peerlist);
2516
2517
	ctx->ribctx.ctx_count = CTL_MSG_HIGH_MARK;
2518
	ctx->ribctx.ctx_rib = rib;
2519
	ctx->ribctx.ctx_upcall = mrt_dump_upcall;
2520
	ctx->ribctx.ctx_done = mrt_done;
2521
	ctx->ribctx.ctx_arg = &ctx->mrt;
2522
	ctx->ribctx.ctx_aid = AID_UNSPEC;
2523
	LIST_INSERT_HEAD(&rde_mrts, ctx, entry);
2524
	rde_mrt_cnt++;
2525
	rib_dump_r(&ctx->ribctx);
2526
}
2527
2528
void
2529
rde_dump_mrt_free(struct rib *rib)
2530
{
2531
	struct rde_mrt_ctx	*ctx, *next;
2532
2533
	for (ctx = LIST_FIRST(&rde_mrts); ctx != NULL; ctx = next) {
2534
		next = LIST_NEXT(ctx, entry);
2535
		if (ctx->ribctx.ctx_rib == rib)
2536
			mrt_done(&ctx->mrt);
2537
	}
2538
}
2539
2540
void
2541
rde_rib_free(struct rib_desc *rd)
2542
{
2543
	/* abort pending rib_dumps */
2544
	rde_dump_rib_free(&rd->rib);
2545
	rde_dump_mrt_free(&rd->rib);
2546
2547
	rib_free(&rd->rib);
2548
}
2549
2550
/*
2551
 * kroute specific functions
2552
 */
2553
int
2554
rde_rdomain_import(struct rde_aspath *asp, struct rdomain *rd)
2555
{
2556
	struct filter_set	*s;
2557
2558
	TAILQ_FOREACH(s, &rd->import, entry) {
2559
		if (community_ext_match(asp, &s->action.ext_community, 0))
2560
			return (1);
2561
	}
2562
	return (0);
2563
}
2564
2565
void
2566
rde_send_kroute(struct rib *rib, struct prefix *new, struct prefix *old)
2567
{
2568
	struct kroute_full	 kr;
2569
	struct bgpd_addr	 addr;
2570
	struct prefix		*p;
2571
	struct rdomain		*rd;
2572
	enum imsg_type		 type;
2573
2574
	/*
2575
	 * Make sure that self announce prefixes are not committed to the
2576
	 * FIB. If both prefixes are unreachable no update is needed.
2577
	 */
2578
	if ((old == NULL || old->aspath->flags & F_PREFIX_ANNOUNCED) &&
2579
	    (new == NULL || new->aspath->flags & F_PREFIX_ANNOUNCED))
2580
		return;
2581
2582
	if (new == NULL || new->aspath->flags & F_PREFIX_ANNOUNCED) {
2583
		type = IMSG_KROUTE_DELETE;
2584
		p = old;
2585
	} else {
2586
		type = IMSG_KROUTE_CHANGE;
2587
		p = new;
2588
	}
2589
2590
	pt_getaddr(p->prefix, &addr);
2591
	bzero(&kr, sizeof(kr));
2592
	memcpy(&kr.prefix, &addr, sizeof(kr.prefix));
2593
	kr.prefixlen = p->prefix->prefixlen;
2594
	if (p->aspath->flags & F_NEXTHOP_REJECT)
2595
		kr.flags |= F_REJECT;
2596
	if (p->aspath->flags & F_NEXTHOP_BLACKHOLE)
2597
		kr.flags |= F_BLACKHOLE;
2598
	if (type == IMSG_KROUTE_CHANGE)
2599
		memcpy(&kr.nexthop, &p->aspath->nexthop->true_nexthop,
2600
		    sizeof(kr.nexthop));
2601
	strlcpy(kr.label, rtlabel_id2name(p->aspath->rtlabelid),
2602
	    sizeof(kr.label));
2603
2604
	switch (addr.aid) {
2605
	case AID_VPN_IPv4:
2606
		if (rib->flags & F_RIB_LOCAL)
2607
			/* not Loc-RIB, no update for VPNs */
2608
			break;
2609
2610
		SIMPLEQ_FOREACH(rd, rdomains_l, entry) {
2611
			if (!rde_rdomain_import(p->aspath, rd))
2612
				continue;
2613
			/* must send exit_nexthop so that correct MPLS tunnel
2614
			 * is chosen
2615
			 */
2616
			if (type == IMSG_KROUTE_CHANGE)
2617
				memcpy(&kr.nexthop,
2618
				    &p->aspath->nexthop->exit_nexthop,
2619
				    sizeof(kr.nexthop));
2620
			if (imsg_compose(ibuf_main, type, rd->rtableid, 0, -1,
2621
			    &kr, sizeof(kr)) == -1)
2622
				fatal("%s %d imsg_compose error", __func__,
2623
				    __LINE__);
2624
		}
2625
		break;
2626
	default:
2627
		if (imsg_compose(ibuf_main, type, rib->rtableid, 0, -1,
2628
		    &kr, sizeof(kr)) == -1)
2629
			fatal("%s %d imsg_compose error", __func__, __LINE__);
2630
		break;
2631
	}
2632
}
2633
2634
/*
2635
 * update specific functions
2636
 */
2637
void
2638
rde_generate_updates(struct rib *rib, struct prefix *new, struct prefix *old)
2639
{
2640
	struct rde_peer			*peer;
2641
2642
	/*
2643
	 * If old is != NULL we know it was active and should be removed.
2644
	 * If new is != NULL we know it is reachable and then we should
2645
	 * generate an update.
2646
	 */
2647
	if (old == NULL && new == NULL)
2648
		return;
2649
2650
	LIST_FOREACH(peer, &peerlist, peer_l) {
2651
		if (peer->conf.id == 0)
2652
			continue;
2653
		if (peer->rib != rib)
2654
			continue;
2655
		if (peer->state != PEER_UP)
2656
			continue;
2657
		up_generate_updates(out_rules, peer, new, old);
2658
	}
2659
}
2660
2661
u_char	queue_buf[4096];
2662
2663
void
2664
rde_up_dump_upcall(struct rib_entry *re, void *ptr)
2665
{
2666
	struct rde_peer		*peer = ptr;
2667
2668
	if (re_rib(re) != peer->rib)
2669
		fatalx("King Bula: monstrous evil horror.");
2670
	if (re->active == NULL)
2671
		return;
2672
	up_generate_updates(out_rules, peer, re->active, NULL);
2673
}
2674
2675
void
2676
rde_update_queue_runner(void)
2677
{
2678
	struct rde_peer		*peer;
2679
	int			 r, sent, max = RDE_RUNNER_ROUNDS, eor = 0;
2680
	u_int16_t		 len, wd_len, wpos;
2681
2682
	len = sizeof(queue_buf) - MSGSIZE_HEADER;
2683
	do {
2684
		sent = 0;
2685
		LIST_FOREACH(peer, &peerlist, peer_l) {
2686
			if (peer->conf.id == 0)
2687
				continue;
2688
			if (peer->state != PEER_UP)
2689
				continue;
2690
			/* first withdraws */
2691
			wpos = 2; /* reserve space for the length field */
2692
			r = up_dump_prefix(queue_buf + wpos, len - wpos - 2,
2693
			    &peer->withdraws[AID_INET], peer);
2694
			wd_len = r;
2695
			/* write withdraws length filed */
2696
			wd_len = htons(wd_len);
2697
			memcpy(queue_buf, &wd_len, 2);
2698
			wpos += r;
2699
2700
			/* now bgp path attributes */
2701
			r = up_dump_attrnlri(queue_buf + wpos, len - wpos,
2702
			    peer);
2703
			switch (r) {
2704
			case -1:
2705
				eor = 1;
2706
				if (wd_len == 0) {
2707
					/* no withdraws queued just send EoR */
2708
					peer_send_eor(peer, AID_INET);
2709
					continue;
2710
				}
2711
				break;
2712
			case 2:
2713
				if (wd_len == 0) {
2714
					/*
2715
					 * No packet to send. No withdraws and
2716
					 * no path attributes. Skip.
2717
					 */
2718
					continue;
2719
				}
2720
				/* FALLTHROUGH */
2721
			default:
2722
				wpos += r;
2723
				break;
2724
			}
2725
2726
			/* finally send message to SE */
2727
			if (imsg_compose(ibuf_se, IMSG_UPDATE, peer->conf.id,
2728
			    0, -1, queue_buf, wpos) == -1)
2729
				fatal("%s %d imsg_compose error", __func__,
2730
				    __LINE__);
2731
			sent++;
2732
			if (eor) {
2733
				eor = 0;
2734
				peer_send_eor(peer, AID_INET);
2735
			}
2736
		}
2737
		max -= sent;
2738
	} while (sent != 0 && max > 0);
2739
}
2740
2741
void
2742
rde_update6_queue_runner(u_int8_t aid)
2743
{
2744
	struct rde_peer		*peer;
2745
	u_char			*b;
2746
	int			 r, sent, max = RDE_RUNNER_ROUNDS / 2;
2747
	u_int16_t		 len;
2748
2749
	/* first withdraws ... */
2750
	do {
2751
		sent = 0;
2752
		LIST_FOREACH(peer, &peerlist, peer_l) {
2753
			if (peer->conf.id == 0)
2754
				continue;
2755
			if (peer->state != PEER_UP)
2756
				continue;
2757
			len = sizeof(queue_buf) - MSGSIZE_HEADER;
2758
			b = up_dump_mp_unreach(queue_buf, &len, peer, aid);
2759
2760
			if (b == NULL)
2761
				continue;
2762
			/* finally send message to SE */
2763
			if (imsg_compose(ibuf_se, IMSG_UPDATE, peer->conf.id,
2764
			    0, -1, b, len) == -1)
2765
				fatal("%s %d imsg_compose error", __func__,
2766
				    __LINE__);
2767
			sent++;
2768
		}
2769
		max -= sent;
2770
	} while (sent != 0 && max > 0);
2771
2772
	/* ... then updates */
2773
	max = RDE_RUNNER_ROUNDS / 2;
2774
	do {
2775
		sent = 0;
2776
		LIST_FOREACH(peer, &peerlist, peer_l) {
2777
			if (peer->conf.id == 0)
2778
				continue;
2779
			if (peer->state != PEER_UP)
2780
				continue;
2781
			len = sizeof(queue_buf) - MSGSIZE_HEADER;
2782
			r = up_dump_mp_reach(queue_buf, &len, peer, aid);
2783
			switch (r) {
2784
			case -2:
2785
				continue;
2786
			case -1:
2787
				peer_send_eor(peer, aid);
2788
				continue;
2789
			default:
2790
				b = queue_buf + r;
2791
				break;
2792
			}
2793
2794
			/* finally send message to SE */
2795
			if (imsg_compose(ibuf_se, IMSG_UPDATE, peer->conf.id,
2796
			    0, -1, b, len) == -1)
2797
				fatal("%s %d imsg_compose error", __func__,
2798
				    __LINE__);
2799
			sent++;
2800
		}
2801
		max -= sent;
2802
	} while (sent != 0 && max > 0);
2803
}
2804
2805
/*
2806
 * pf table specific functions
2807
 */
2808
void
2809
rde_send_pftable(u_int16_t id, struct bgpd_addr *addr,
2810
    u_int8_t len, int del)
2811
{
2812
	struct pftable_msg pfm;
2813
2814
	if (id == 0)
2815
		return;
2816
2817
	/* do not run while cleaning up */
2818
	if (rde_quit)
2819
		return;
2820
2821
	bzero(&pfm, sizeof(pfm));
2822
	strlcpy(pfm.pftable, pftable_id2name(id), sizeof(pfm.pftable));
2823
	memcpy(&pfm.addr, addr, sizeof(pfm.addr));
2824
	pfm.len = len;
2825
2826
	if (imsg_compose(ibuf_main,
2827
	    del ? IMSG_PFTABLE_REMOVE : IMSG_PFTABLE_ADD,
2828
	    0, 0, -1, &pfm, sizeof(pfm)) == -1)
2829
		fatal("%s %d imsg_compose error", __func__, __LINE__);
2830
}
2831
2832
void
2833
rde_send_pftable_commit(void)
2834
{
2835
	/* do not run while cleaning up */
2836
	if (rde_quit)
2837
		return;
2838
2839
	if (imsg_compose(ibuf_main, IMSG_PFTABLE_COMMIT, 0, 0, -1, NULL, 0) ==
2840
	    -1)
2841
		fatal("%s %d imsg_compose error", __func__, __LINE__);
2842
}
2843
2844
/*
2845
 * nexthop specific functions
2846
 */
2847
void
2848
rde_send_nexthop(struct bgpd_addr *next, int valid)
2849
{
2850
	int			 type;
2851
2852
	if (valid)
2853
		type = IMSG_NEXTHOP_ADD;
2854
	else
2855
		type = IMSG_NEXTHOP_REMOVE;
2856
2857
	if (imsg_compose(ibuf_main, type, 0, 0, -1, next,
2858
	    sizeof(struct bgpd_addr)) == -1)
2859
		fatal("%s %d imsg_compose error", __func__, __LINE__);
2860
}
2861
2862
/*
2863
 * soft reconfig specific functions
2864
 */
2865
void
2866
rde_reload_done(void)
2867
{
2868
	struct rdomain		*rd;
2869
	struct rde_peer		*peer;
2870
	struct filter_head	*fh;
2871
	u_int16_t		 rid;
2872
2873
	/* first merge the main config */
2874
	if ((nconf->flags & BGPD_FLAG_NO_EVALUATE)
2875
	    != (conf->flags & BGPD_FLAG_NO_EVALUATE)) {
2876
		log_warnx("change to/from route-collector "
2877
		    "mode ignored");
2878
		if (conf->flags & BGPD_FLAG_NO_EVALUATE)
2879
			nconf->flags |= BGPD_FLAG_NO_EVALUATE;
2880
		else
2881
			nconf->flags &= ~BGPD_FLAG_NO_EVALUATE;
2882
	}
2883
	memcpy(conf, nconf, sizeof(struct bgpd_config));
2884
	conf->listen_addrs = NULL;
2885
	conf->csock = NULL;
2886
	conf->rcsock = NULL;
2887
	free(nconf);
2888
	nconf = NULL;
2889
2890
	/* sync peerself with conf */
2891
	peerself->remote_bgpid = ntohl(conf->bgpid);
2892
	peerself->conf.local_as = conf->as;
2893
	peerself->conf.remote_as = conf->as;
2894
	peerself->short_as = conf->short_as;
2895
2896
	/* apply new set of rdomain, sync will be done later */
2897
	while ((rd = SIMPLEQ_FIRST(rdomains_l)) != NULL) {
2898
		SIMPLEQ_REMOVE_HEAD(rdomains_l, entry);
2899
		filterset_free(&rd->import);
2900
		filterset_free(&rd->export);
2901
		free(rd);
2902
	}
2903
	free(rdomains_l);
2904
	rdomains_l = newdomains;
2905
	/* XXX WHERE IS THE SYNC ??? */
2906
2907
	rde_filter_calc_skip_steps(out_rules_tmp);
2908
2909
	/*
2910
	 * make the new filter rules the active one but keep the old for
2911
	 * softrconfig. This is needed so that changes happening are using
2912
	 * the right filters.
2913
	 */
2914
	fh = out_rules;
2915
	out_rules = out_rules_tmp;
2916
	out_rules_tmp = fh;
2917
2918
	/* check if filter changed */
2919
	LIST_FOREACH(peer, &peerlist, peer_l) {
2920
		if (peer->conf.id == 0)
2921
			continue;
2922
		peer->reconf_out = 0;
2923
		peer->reconf_rib = 0;
2924
		if (peer->rib != rib_find(peer->conf.rib)) {
2925
			rib_dump(peer->rib, rde_softreconfig_unload_peer, peer,
2926
			    AID_UNSPEC);
2927
			peer->rib = rib_find(peer->conf.rib);
2928
			if (peer->rib == NULL)
2929
				fatalx("King Bula's peer met an unknown RIB");
2930
			peer->reconf_rib = 1;
2931
			continue;
2932
		}
2933
		if (!rde_filter_equal(out_rules, out_rules_tmp, peer)) {
2934
			peer->reconf_out = 1;
2935
		}
2936
	}
2937
	/* bring ribs in sync */
2938
	for (rid = 0; rid < rib_size; rid++) {
2939
		if (*ribs[rid].name == '\0')
2940
			continue;
2941
		rde_filter_calc_skip_steps(ribs[rid].in_rules_tmp);
2942
2943
		/* flip rules, make new active */
2944
		fh = ribs[rid].in_rules;
2945
		ribs[rid].in_rules = ribs[rid].in_rules_tmp;
2946
		ribs[rid].in_rules_tmp = fh;
2947
2948
		switch (ribs[rid].state) {
2949
		case RECONF_DELETE:
2950
			rde_rib_free(&ribs[rid]);
2951
			break;
2952
		case RECONF_KEEP:
2953
			if (rde_filter_equal(ribs[rid].in_rules,
2954
			    ribs[rid].in_rules_tmp, NULL))
2955
				/* rib is in sync */
2956
				break;
2957
			ribs[rid].state = RECONF_RELOAD;
2958
			/* FALLTHROUGH */
2959
		case RECONF_REINIT:
2960
			rib_dump(&ribs[0].rib, rde_softreconfig_in, &ribs[rid],
2961
			    AID_UNSPEC);
2962
			break;
2963
		case RECONF_RELOAD:
2964
			log_warnx("Bad rib reload state");
2965
			/* FALLTHROUGH */
2966
		case RECONF_NONE:
2967
			break;
2968
		}
2969
	}
2970
	LIST_FOREACH(peer, &peerlist, peer_l) {
2971
		if (peer->reconf_out)
2972
			rib_dump(peer->rib, rde_softreconfig_out,
2973
			    peer, AID_UNSPEC);
2974
		else if (peer->reconf_rib)
2975
			/* dump the full table to neighbors that changed rib */
2976
			peer_dump(peer->conf.id, AID_UNSPEC);
2977
	}
2978
	filterlist_free(out_rules_tmp);
2979
	out_rules_tmp = NULL;
2980
	for (rid = 0; rid < rib_size; rid++) {
2981
		if (*ribs[rid].name == '\0')
2982
			continue;
2983
		filterlist_free(ribs[rid].in_rules_tmp);
2984
		ribs[rid].in_rules_tmp = NULL;
2985
		ribs[rid].state = RECONF_NONE;
2986
	}
2987
2988
	log_info("RDE reconfigured");
2989
	imsg_compose(ibuf_main, IMSG_RECONF_DONE, 0, 0,
2990
	    -1, NULL, 0);
2991
}
2992
2993
void
2994
rde_softreconfig_in(struct rib_entry *re, void *ptr)
2995
{
2996
	struct rib_desc		*rib = ptr;
2997
	struct prefix		*p, *np;
2998
	struct pt_entry		*pt;
2999
	struct rde_peer		*peer;
3000
	struct rde_aspath	*asp, *oasp, *nasp;
3001
	enum filter_actions	 oa, na;
3002
	struct bgpd_addr	 addr;
3003
3004
	pt = re->prefix;
3005
	pt_getaddr(pt, &addr);
3006
	for (p = LIST_FIRST(&re->prefix_h); p != NULL; p = np) {
3007
		/*
3008
		 * prefix_remove() and path_update() may change the object
3009
		 * so cache the values.
3010
		 */
3011
		np = LIST_NEXT(p, rib_l);
3012
		asp = p->aspath;
3013
		peer = asp->peer;
3014
3015
		/* check if prefix changed */
3016
		if (rib->state == RECONF_RELOAD) {
3017
			oa = rde_filter(rib->in_rules_tmp, &oasp, peer,
3018
			    asp, &addr, pt->prefixlen, peer);
3019
			oasp = oasp != NULL ? oasp : asp;
3020
		} else {
3021
			/* make sure we update everything for RECONF_REINIT */
3022
			oa = ACTION_DENY;
3023
			oasp = asp;
3024
		}
3025
		na = rde_filter(rib->in_rules, &nasp, peer, asp,
3026
		    &addr, pt->prefixlen, peer);
3027
		nasp = nasp != NULL ? nasp : asp;
3028
3029
		/* go through all 4 possible combinations */
3030
		/* if (oa == ACTION_DENY && na == ACTION_DENY) */
3031
			/* nothing todo */
3032
		if (oa == ACTION_DENY && na == ACTION_ALLOW) {
3033
			/* update Local-RIB */
3034
			path_update(&rib->rib, peer, nasp, &addr,
3035
			    pt->prefixlen);
3036
		} else if (oa == ACTION_ALLOW && na == ACTION_DENY) {
3037
			/* remove from Local-RIB */
3038
			prefix_remove(&rib->rib, peer, &addr, pt->prefixlen, 0);
3039
		} else if (oa == ACTION_ALLOW && na == ACTION_ALLOW) {
3040
			if (path_compare(nasp, oasp) != 0)
3041
				/* send update */
3042
				path_update(&rib->rib, peer, nasp, &addr,
3043
				    pt->prefixlen);
3044
		}
3045
3046
		if (oasp != asp)
3047
			path_put(oasp);
3048
		if (nasp != asp)
3049
			path_put(nasp);
3050
	}
3051
}
3052
3053
void
3054
rde_softreconfig_out(struct rib_entry *re, void *ptr)
3055
{
3056
	struct prefix		*p = re->active;
3057
	struct pt_entry		*pt;
3058
	struct rde_peer		*peer = ptr;
3059
	struct rde_aspath	*oasp, *nasp;
3060
	enum filter_actions	 oa, na;
3061
	struct bgpd_addr	 addr;
3062
3063
	if (peer->conf.id == 0)
3064
		fatalx("King Bula troubled by bad peer");
3065
3066
	if (p == NULL)
3067
		return;
3068
3069
	pt = re->prefix;
3070
	pt_getaddr(pt, &addr);
3071
3072
	if (up_test_update(peer, p) != 1)
3073
		return;
3074
3075
	oa = rde_filter(out_rules_tmp, &oasp, peer, p->aspath,
3076
	    &addr, pt->prefixlen, p->aspath->peer);
3077
	na = rde_filter(out_rules, &nasp, peer, p->aspath,
3078
	    &addr, pt->prefixlen, p->aspath->peer);
3079
	oasp = oasp != NULL ? oasp : p->aspath;
3080
	nasp = nasp != NULL ? nasp : p->aspath;
3081
3082
	/* go through all 4 possible combinations */
3083
	/* if (oa == ACTION_DENY && na == ACTION_DENY) */
3084
		/* nothing todo */
3085
	if (oa == ACTION_DENY && na == ACTION_ALLOW) {
3086
		/* send update */
3087
		up_generate(peer, nasp, &addr, pt->prefixlen);
3088
	} else if (oa == ACTION_ALLOW && na == ACTION_DENY) {
3089
		/* send withdraw */
3090
		up_generate(peer, NULL, &addr, pt->prefixlen);
3091
	} else if (oa == ACTION_ALLOW && na == ACTION_ALLOW) {
3092
		/* send update if path attributes changed */
3093
		if (path_compare(nasp, oasp) != 0)
3094
			up_generate(peer, nasp, &addr, pt->prefixlen);
3095
	}
3096
3097
	if (oasp != p->aspath)
3098
		path_put(oasp);
3099
	if (nasp != p->aspath)
3100
		path_put(nasp);
3101
}
3102
3103
void
3104
rde_softreconfig_unload_peer(struct rib_entry *re, void *ptr)
3105
{
3106
	struct rde_peer		*peer = ptr;
3107
	struct prefix		*p = re->active;
3108
	struct pt_entry		*pt;
3109
	struct rde_aspath	*oasp;
3110
	enum filter_actions	 oa;
3111
	struct bgpd_addr	 addr;
3112
3113
	pt = re->prefix;
3114
	pt_getaddr(pt, &addr);
3115
3116
	/* check if prefix was announced */
3117
	if (up_test_update(peer, p) != 1)
3118
		return;
3119
3120
	oa = rde_filter(out_rules_tmp, &oasp, peer, p->aspath,
3121
	    &addr, pt->prefixlen, p->aspath->peer);
3122
	oasp = oasp != NULL ? oasp : p->aspath;
3123
3124
	if (oa == ACTION_DENY)
3125
		/* nothing todo */
3126
		goto done;
3127
3128
	/* send withdraw */
3129
	up_generate(peer, NULL, &addr, pt->prefixlen);
3130
done:
3131
	if (oasp != p->aspath)
3132
		path_put(oasp);
3133
}
3134
3135
/*
3136
 * generic helper function
3137
 */
3138
u_int32_t
3139
rde_local_as(void)
3140
{
3141
	return (conf->as);
3142
}
3143
3144
int
3145
rde_noevaluate(void)
3146
{
3147
	/* do not run while cleaning up */
3148
	if (rde_quit)
3149
		return (1);
3150
3151
	return (conf->flags & BGPD_FLAG_NO_EVALUATE);
3152
}
3153
3154
int
3155
rde_decisionflags(void)
3156
{
3157
	return (conf->flags & BGPD_FLAG_DECISION_MASK);
3158
}
3159
3160
int
3161
rde_as4byte(struct rde_peer *peer)
3162
{
3163
	return (peer->capa.as4byte);
3164
}
3165
3166
/*
3167
 * peer functions
3168
 */
3169
struct peer_table {
3170
	struct rde_peer_head	*peer_hashtbl;
3171
	u_int32_t		 peer_hashmask;
3172
} peertable;
3173
3174
#define PEER_HASH(x)		\
3175
	&peertable.peer_hashtbl[(x) & peertable.peer_hashmask]
3176
3177
void
3178
peer_init(u_int32_t hashsize)
3179
{
3180
	struct peer_config pc;
3181
	u_int32_t	 hs, i;
3182
3183
	for (hs = 1; hs < hashsize; hs <<= 1)
3184
		;
3185
	peertable.peer_hashtbl = calloc(hs, sizeof(struct rde_peer_head));
3186
	if (peertable.peer_hashtbl == NULL)
3187
		fatal("peer_init");
3188
3189
	for (i = 0; i < hs; i++)
3190
		LIST_INIT(&peertable.peer_hashtbl[i]);
3191
	LIST_INIT(&peerlist);
3192
3193
	peertable.peer_hashmask = hs - 1;
3194
3195
	bzero(&pc, sizeof(pc));
3196
	snprintf(pc.descr, sizeof(pc.descr), "LOCAL");
3197
3198
	peerself = peer_add(0, &pc);
3199
	if (peerself == NULL)
3200
		fatalx("peer_init add self");
3201
3202
	peerself->state = PEER_UP;
3203
}
3204
3205
void
3206
peer_shutdown(void)
3207
{
3208
	u_int32_t	i;
3209
3210
	for (i = 0; i <= peertable.peer_hashmask; i++)
3211
		if (!LIST_EMPTY(&peertable.peer_hashtbl[i]))
3212
			log_warnx("peer_free: free non-free table");
3213
3214
	free(peertable.peer_hashtbl);
3215
}
3216
3217
struct rde_peer *
3218
peer_get(u_int32_t id)
3219
{
3220
	struct rde_peer_head	*head;
3221
	struct rde_peer		*peer;
3222
3223
	head = PEER_HASH(id);
3224
3225
	LIST_FOREACH(peer, head, hash_l) {
3226
		if (peer->conf.id == id)
3227
			return (peer);
3228
	}
3229
	return (NULL);
3230
}
3231
3232
struct rde_peer *
3233
peer_add(u_int32_t id, struct peer_config *p_conf)
3234
{
3235
	struct rde_peer_head	*head;
3236
	struct rde_peer		*peer;
3237
3238
	if ((peer = peer_get(id))) {
3239
		memcpy(&peer->conf, p_conf, sizeof(struct peer_config));
3240
		return (NULL);
3241
	}
3242
3243
	peer = calloc(1, sizeof(struct rde_peer));
3244
	if (peer == NULL)
3245
		fatal("peer_add");
3246
3247
	LIST_INIT(&peer->path_h);
3248
	memcpy(&peer->conf, p_conf, sizeof(struct peer_config));
3249
	peer->remote_bgpid = 0;
3250
	peer->rib = rib_find(peer->conf.rib);
3251
	if (peer->rib == NULL)
3252
		fatalx("King Bula's new peer met an unknown RIB");
3253
	peer->state = PEER_NONE;
3254
	up_init(peer);
3255
3256
	head = PEER_HASH(id);
3257
3258
	LIST_INSERT_HEAD(head, peer, hash_l);
3259
	LIST_INSERT_HEAD(&peerlist, peer, peer_l);
3260
3261
	return (peer);
3262
}
3263
3264
int
3265
peer_localaddrs(struct rde_peer *peer, struct bgpd_addr *laddr)
3266
{
3267
	struct ifaddrs	*ifap, *ifa, *match;
3268
3269
	if (getifaddrs(&ifap) == -1)
3270
		fatal("getifaddrs");
3271
3272
	for (match = ifap; match != NULL; match = match->ifa_next)
3273
		if (sa_cmp(laddr, match->ifa_addr) == 0)
3274
			break;
3275
3276
	if (match == NULL) {
3277
		log_warnx("peer_localaddrs: local address not found");
3278
		return (-1);
3279
	}
3280
3281
	for (ifa = ifap; ifa != NULL; ifa = ifa->ifa_next) {
3282
		if (ifa->ifa_addr->sa_family == AF_INET &&
3283
		    strcmp(ifa->ifa_name, match->ifa_name) == 0) {
3284
			if (ifa->ifa_addr->sa_family ==
3285
			    match->ifa_addr->sa_family)
3286
				ifa = match;
3287
			sa2addr(ifa->ifa_addr, &peer->local_v4_addr);
3288
			break;
3289
		}
3290
	}
3291
	for (ifa = ifap; ifa != NULL; ifa = ifa->ifa_next) {
3292
		if (ifa->ifa_addr->sa_family == AF_INET6 &&
3293
		    strcmp(ifa->ifa_name, match->ifa_name) == 0) {
3294
			/*
3295
			 * only accept global scope addresses except explicitly
3296
			 * specified.
3297
			 */
3298
			if (ifa->ifa_addr->sa_family ==
3299
			    match->ifa_addr->sa_family)
3300
				ifa = match;
3301
			else if (IN6_IS_ADDR_LINKLOCAL(
3302
			    &((struct sockaddr_in6 *)ifa->
3303
			    ifa_addr)->sin6_addr) ||
3304
			    IN6_IS_ADDR_SITELOCAL(
3305
			    &((struct sockaddr_in6 *)ifa->
3306
			    ifa_addr)->sin6_addr))
3307
				continue;
3308
			sa2addr(ifa->ifa_addr, &peer->local_v6_addr);
3309
			break;
3310
		}
3311
	}
3312
3313
	freeifaddrs(ifap);
3314
	return (0);
3315
}
3316
3317
void
3318
peer_up(u_int32_t id, struct session_up *sup)
3319
{
3320
	struct rde_peer	*peer;
3321
	u_int8_t	 i;
3322
3323
	peer = peer_get(id);
3324
	if (peer == NULL) {
3325
		log_warnx("peer_up: unknown peer id %d", id);
3326
		return;
3327
	}
3328
3329
	if (peer->state != PEER_DOWN && peer->state != PEER_NONE &&
3330
	    peer->state != PEER_UP) {
3331
		/*
3332
		 * There is a race condition when doing PEER_ERR -> PEER_DOWN.
3333
		 * So just do a full reset of the peer here.
3334
		 */
3335
		for (i = 0; i < AID_MAX; i++) {
3336
			peer->staletime[i] = 0;
3337
			peer_flush(peer, i);
3338
		}
3339
		up_down(peer);
3340
		peer->prefix_cnt = 0;
3341
		peer->state = PEER_DOWN;
3342
	}
3343
	peer->remote_bgpid = ntohl(sup->remote_bgpid);
3344
	peer->short_as = sup->short_as;
3345
	memcpy(&peer->remote_addr, &sup->remote_addr,
3346
	    sizeof(peer->remote_addr));
3347
	memcpy(&peer->capa, &sup->capa, sizeof(peer->capa));
3348
3349
	if (peer_localaddrs(peer, &sup->local_addr)) {
3350
		peer->state = PEER_DOWN;
3351
		imsg_compose(ibuf_se, IMSG_SESSION_DOWN, id, 0, -1, NULL, 0);
3352
		return;
3353
	}
3354
3355
	peer->state = PEER_UP;
3356
	up_init(peer);
3357
3358
	if (rde_noevaluate())
3359
		/*
3360
		 * no need to dump the table to the peer, there are no active
3361
		 * prefixes anyway. This is a speed up hack.
3362
		 */
3363
		return;
3364
3365
	for (i = 0; i < AID_MAX; i++) {
3366
		if (peer->capa.mp[i])
3367
			peer_dump(id, i);
3368
	}
3369
}
3370
3371
void
3372
peer_down(u_int32_t id)
3373
{
3374
	struct rde_peer		*peer;
3375
	struct rde_aspath	*asp, *nasp;
3376
3377
	peer = peer_get(id);
3378
	if (peer == NULL) {
3379
		log_warnx("peer_down: unknown peer id %d", id);
3380
		return;
3381
	}
3382
	peer->remote_bgpid = 0;
3383
	peer->state = PEER_DOWN;
3384
	up_down(peer);
3385
3386
	/* walk through per peer RIB list and remove all prefixes. */
3387
	for (asp = LIST_FIRST(&peer->path_h); asp != NULL; asp = nasp) {
3388
		nasp = LIST_NEXT(asp, peer_l);
3389
		path_remove(asp);
3390
	}
3391
	LIST_INIT(&peer->path_h);
3392
	peer->prefix_cnt = 0;
3393
3394
	/* Deletions are performed in path_remove() */
3395
	rde_send_pftable_commit();
3396
3397
	LIST_REMOVE(peer, hash_l);
3398
	LIST_REMOVE(peer, peer_l);
3399
	free(peer);
3400
}
3401
3402
/*
3403
 * Flush all routes older then staletime. If staletime is 0 all routes will
3404
 * be flushed.
3405
 */
3406
void
3407
peer_flush(struct rde_peer *peer, u_int8_t aid)
3408
{
3409
	struct rde_aspath	*asp, *nasp;
3410
	u_int32_t		 rprefixes;
3411
3412
	rprefixes = 0;
3413
	/* walk through per peer RIB list and remove all stale prefixes. */
3414
	for (asp = LIST_FIRST(&peer->path_h); asp != NULL; asp = nasp) {
3415
		nasp = LIST_NEXT(asp, peer_l);
3416
		rprefixes += path_remove_stale(asp, aid);
3417
	}
3418
3419
	/* Deletions are performed in path_remove() */
3420
	rde_send_pftable_commit();
3421
3422
	/* flushed no need to keep staletime */
3423
	peer->staletime[aid] = 0;
3424
3425
	if (peer->prefix_cnt > rprefixes)
3426
		peer->prefix_cnt -= rprefixes;
3427
	else
3428
		peer->prefix_cnt = 0;
3429
}
3430
3431
void
3432
peer_stale(u_int32_t id, u_int8_t aid)
3433
{
3434
	struct rde_peer		*peer;
3435
	time_t			 now;
3436
3437
	peer = peer_get(id);
3438
	if (peer == NULL) {
3439
		log_warnx("peer_stale: unknown peer id %d", id);
3440
		return;
3441
	}
3442
3443
	/* flush the now even staler routes out */
3444
	if (peer->staletime[aid])
3445
		peer_flush(peer, aid);
3446
	peer->staletime[aid] = now = time(NULL);
3447
3448
	/* make sure new prefixes start on a higher timestamp */
3449
	do {
3450
		sleep(1);
3451
	} while (now >= time(NULL));
3452
}
3453
3454
void
3455
peer_dump(u_int32_t id, u_int8_t aid)
3456
{
3457
	struct rde_peer		*peer;
3458
3459
	peer = peer_get(id);
3460
	if (peer == NULL) {
3461
		log_warnx("peer_dump: unknown peer id %d", id);
3462
		return;
3463
	}
3464
3465
	if (peer->conf.announce_type == ANNOUNCE_DEFAULT_ROUTE)
3466
		up_generate_default(out_rules, peer, aid);
3467
	else
3468
		rib_dump(peer->rib, rde_up_dump_upcall, peer, aid);
3469
	if (peer->capa.grestart.restart)
3470
		up_generate_marker(peer, aid);
3471
}
3472
3473
/* End-of-RIB marker, RFC 4724 */
3474
void
3475
peer_recv_eor(struct rde_peer *peer, u_int8_t aid)
3476
{
3477
	peer->prefix_rcvd_eor++;
3478
3479
	/*
3480
	 * First notify SE to avert a possible race with the restart timeout.
3481
	 * If the timeout fires before this imsg is processed by the SE it will
3482
	 * result in the same operation since the timeout issues a FLUSH which
3483
	 * does the same as the RESTARTED action (flushing stale routes).
3484
	 * The logic in the SE is so that only one of FLUSH or RESTARTED will
3485
	 * be sent back to the RDE and so peer_flush is only called once.
3486
	 */
3487
	if (imsg_compose(ibuf_se, IMSG_SESSION_RESTARTED, peer->conf.id,
3488
	    0, -1, &aid, sizeof(aid)) == -1)
3489
		fatal("%s %d imsg_compose error", __func__, __LINE__);
3490
3491
	log_peer_info(&peer->conf, "received %s EOR marker",
3492
	    aid2str(aid));
3493
}
3494
3495
void
3496
peer_send_eor(struct rde_peer *peer, u_int8_t aid)
3497
{
3498
	u_int16_t	afi;
3499
	u_int8_t	safi;
3500
3501
	peer->prefix_sent_eor++;
3502
3503
	if (aid == AID_INET) {
3504
		u_char null[4];
3505
3506
		bzero(&null, 4);
3507
		if (imsg_compose(ibuf_se, IMSG_UPDATE, peer->conf.id,
3508
		    0, -1, &null, 4) == -1)
3509
			fatal("%s %d imsg_compose error in peer_send_eor",
3510
			    __func__, __LINE__);
3511
	} else {
3512
		u_int16_t	i;
3513
		u_char		buf[10];
3514
3515
		if (aid2afi(aid, &afi, &safi) == -1)
3516
			fatalx("peer_send_eor: bad AID");
3517
3518
		i = 0;	/* v4 withdrawn len */
3519
		bcopy(&i, &buf[0], sizeof(i));
3520
		i = htons(6);	/* path attr len */
3521
		bcopy(&i, &buf[2], sizeof(i));
3522
		buf[4] = ATTR_OPTIONAL;
3523
		buf[5] = ATTR_MP_UNREACH_NLRI;
3524
		buf[6] = 3;	/* withdrawn len */
3525
		i = htons(afi);
3526
		bcopy(&i, &buf[7], sizeof(i));
3527
		buf[9] = safi;
3528
3529
		if (imsg_compose(ibuf_se, IMSG_UPDATE, peer->conf.id,
3530
		    0, -1, &buf, 10) == -1)
3531
			fatal("%s %d imsg_compose error in peer_send_eor",
3532
			    __func__, __LINE__);
3533
	}
3534
3535
	log_peer_info(&peer->conf, "sending %s EOR marker",
3536
	    aid2str(aid));
3537
}
3538
3539
/*
3540
 * network announcement stuff
3541
 */
3542
void
3543
network_add(struct network_config *nc, int flagstatic)
3544
{
3545
	struct rdomain		*rd;
3546
	struct rde_aspath	*asp;
3547
	struct filter_set_head	*vpnset = NULL;
3548
	in_addr_t		 prefix4;
3549
	u_int16_t		 i;
3550
3551
	if (nc->rtableid) {
3552
		SIMPLEQ_FOREACH(rd, rdomains_l, entry) {
3553
			if (rd->rtableid != nc->rtableid)
3554
				continue;
3555
			switch (nc->prefix.aid) {
3556
			case AID_INET:
3557
				prefix4 = nc->prefix.v4.s_addr;
3558
				bzero(&nc->prefix, sizeof(nc->prefix));
3559
				nc->prefix.aid = AID_VPN_IPv4;
3560
				nc->prefix.vpn4.rd = rd->rd;
3561
				nc->prefix.vpn4.addr.s_addr = prefix4;
3562
				nc->prefix.vpn4.labellen = 3;
3563
				nc->prefix.vpn4.labelstack[0] =
3564
				    (rd->label >> 12) & 0xff;
3565
				nc->prefix.vpn4.labelstack[1] =
3566
				    (rd->label >> 4) & 0xff;
3567
				nc->prefix.vpn4.labelstack[2] =
3568
				    (rd->label << 4) & 0xf0;
3569
				nc->prefix.vpn4.labelstack[2] |= BGP_MPLS_BOS;
3570
				vpnset = &rd->export;
3571
				break;
3572
			default:
3573
				log_warnx("unable to VPNize prefix");
3574
				filterset_free(&nc->attrset);
3575
				return;
3576
			}
3577
			break;
3578
		}
3579
		if (rd == NULL) {
3580
			log_warnx("network_add: "
3581
			    "prefix %s/%u in non-existing rdomain %u",
3582
			    log_addr(&nc->prefix), nc->prefixlen, nc->rtableid);
3583
			return;
3584
		}
3585
	}
3586
3587
	if (nc->type == NETWORK_MRTCLONE) {
3588
		asp = nc->asp;
3589
	} else {
3590
		asp = path_get();
3591
		asp->aspath = aspath_get(NULL, 0);
3592
		asp->origin = ORIGIN_IGP;
3593
		asp->flags = F_ATTR_ORIGIN | F_ATTR_ASPATH |
3594
		    F_ATTR_LOCALPREF | F_PREFIX_ANNOUNCED;
3595
		/* the nexthop is unset unless a default set overrides it */
3596
	}
3597
	if (!flagstatic)
3598
		asp->flags |= F_ANN_DYNAMIC;
3599
	rde_apply_set(asp, &nc->attrset, nc->prefix.aid, peerself, peerself);
3600
	if (vpnset)
3601
		rde_apply_set(asp, vpnset, nc->prefix.aid, peerself, peerself);
3602
	for (i = 1; i < rib_size; i++) {
3603
		if (*ribs[i].name == '\0')
3604
			break;
3605
		path_update(&ribs[i].rib, peerself, asp, &nc->prefix,
3606
		    nc->prefixlen);
3607
	}
3608
	path_put(asp);
3609
	filterset_free(&nc->attrset);
3610
}
3611
3612
void
3613
network_delete(struct network_config *nc, int flagstatic)
3614
{
3615
	struct rdomain	*rd;
3616
	in_addr_t	 prefix4;
3617
	u_int32_t	 flags = F_PREFIX_ANNOUNCED;
3618
	u_int32_t	 i;
3619
3620
	if (!flagstatic)
3621
		flags |= F_ANN_DYNAMIC;
3622
3623
	if (nc->rtableid) {
3624
		SIMPLEQ_FOREACH(rd, rdomains_l, entry) {
3625
			if (rd->rtableid != nc->rtableid)
3626
				continue;
3627
			switch (nc->prefix.aid) {
3628
			case AID_INET:
3629
				prefix4 = nc->prefix.v4.s_addr;
3630
				bzero(&nc->prefix, sizeof(nc->prefix));
3631
				nc->prefix.aid = AID_VPN_IPv4;
3632
				nc->prefix.vpn4.rd = rd->rd;
3633
				nc->prefix.vpn4.addr.s_addr = prefix4;
3634
				nc->prefix.vpn4.labellen = 3;
3635
				nc->prefix.vpn4.labelstack[0] =
3636
				    (rd->label >> 12) & 0xff;
3637
				nc->prefix.vpn4.labelstack[1] =
3638
				    (rd->label >> 4) & 0xff;
3639
				nc->prefix.vpn4.labelstack[2] =
3640
				    (rd->label << 4) & 0xf0;
3641
				nc->prefix.vpn4.labelstack[2] |= BGP_MPLS_BOS;
3642
				break;
3643
			default:
3644
				log_warnx("unable to VPNize prefix");
3645
				return;
3646
			}
3647
		}
3648
	}
3649
3650
	for (i = 1; i < rib_size; i++) {
3651
		if (*ribs[i].name == '\0')
3652
			break;
3653
		prefix_remove(&ribs[i].rib, peerself, &nc->prefix,
3654
		    nc->prefixlen, flags);
3655
	}
3656
}
3657
3658
void
3659
network_dump_upcall(struct rib_entry *re, void *ptr)
3660
{
3661
	struct prefix		*p;
3662
	struct kroute_full	 k;
3663
	struct bgpd_addr	 addr;
3664
	struct rde_dump_ctx	*ctx = ptr;
3665
3666
	LIST_FOREACH(p, &re->prefix_h, rib_l) {
3667
		if (!(p->aspath->flags & F_PREFIX_ANNOUNCED))
3668
			continue;
3669
		pt_getaddr(p->prefix, &addr);
3670
3671
		bzero(&k, sizeof(k));
3672
		memcpy(&k.prefix, &addr, sizeof(k.prefix));
3673
		if (p->aspath->nexthop == NULL ||
3674
		    p->aspath->nexthop->state != NEXTHOP_REACH)
3675
			k.nexthop.aid = k.prefix.aid;
3676
		else
3677
			memcpy(&k.nexthop, &p->aspath->nexthop->true_nexthop,
3678
			    sizeof(k.nexthop));
3679
		k.prefixlen = p->prefix->prefixlen;
3680
		k.flags = F_KERNEL;
3681
		if ((p->aspath->flags & F_ANN_DYNAMIC) == 0)
3682
			k.flags = F_STATIC;
3683
		if (imsg_compose(ibuf_se_ctl, IMSG_CTL_SHOW_NETWORK, 0,
3684
		    ctx->req.pid, -1, &k, sizeof(k)) == -1)
3685
			log_warnx("network_dump_upcall: "
3686
			    "imsg_compose error");
3687
	}
3688
}
3689
3690
/* clean up */
3691
void
3692
rde_shutdown(void)
3693
{
3694
	struct rde_peer		*p;
3695
	u_int32_t		 i;
3696
3697
	/*
3698
	 * the decision process is turned off if rde_quit = 1 and
3699
	 * rde_shutdown depends on this.
3700
	 */
3701
3702
	/*
3703
	 * All peers go down
3704
	 */
3705
	for (i = 0; i <= peertable.peer_hashmask; i++)
3706
		while ((p = LIST_FIRST(&peertable.peer_hashtbl[i])) != NULL)
3707
			peer_down(p->conf.id);
3708
3709
	/* free filters */
3710
	filterlist_free(out_rules);
3711
	for (i = 0; i < rib_size; i++) {
3712
		if (*ribs[i].name == '\0')
3713
			break;
3714
		filterlist_free(ribs[i].in_rules);
3715
	}
3716
3717
	nexthop_shutdown();
3718
	path_shutdown();
3719
	aspath_shutdown();
3720
	attr_shutdown();
3721
	pt_shutdown();
3722
	peer_shutdown();
3723
}
3724
3725
int
3726
sa_cmp(struct bgpd_addr *a, struct sockaddr *b)
3727
{
3728
	struct sockaddr_in	*in_b;
3729
	struct sockaddr_in6	*in6_b;
3730
3731
	if (aid2af(a->aid) != b->sa_family)
3732
		return (1);
3733
3734
	switch (b->sa_family) {
3735
	case AF_INET:
3736
		in_b = (struct sockaddr_in *)b;
3737
		if (a->v4.s_addr != in_b->sin_addr.s_addr)
3738
			return (1);
3739
		break;
3740
	case AF_INET6:
3741
		in6_b = (struct sockaddr_in6 *)b;
3742
#ifdef __KAME__
3743
		/* directly stolen from sbin/ifconfig/ifconfig.c */
3744
		if (IN6_IS_ADDR_LINKLOCAL(&in6_b->sin6_addr)) {
3745
			in6_b->sin6_scope_id =
3746
			    ntohs(*(u_int16_t *)&in6_b->sin6_addr.s6_addr[2]);
3747
			in6_b->sin6_addr.s6_addr[2] =
3748
			    in6_b->sin6_addr.s6_addr[3] = 0;
3749
		}
3750
#endif
3751
		if (bcmp(&a->v6, &in6_b->sin6_addr,
3752
		    sizeof(struct in6_addr)))
3753
			return (1);
3754
		break;
3755
	default:
3756
		fatal("king bula sez: unknown address family");
3757
		/* NOTREACHED */
3758
	}
3759
3760
	return (0);
3761
}