GCC Code Coverage Report
Directory: ./ Exec Total Coverage
File: usr.sbin/bgpd/rde_update.c Lines: 0 595 0.0 %
Date: 2017-11-07 Branches: 0 769 0.0 %

Line Branch Exec Source
1
/*	$OpenBSD: rde_update.c,v 1.86 2017/05/30 18:08:15 benno Exp $ */
2
3
/*
4
 * Copyright (c) 2004 Claudio Jeker <claudio@openbsd.org>
5
 *
6
 * Permission to use, copy, modify, and distribute this software for any
7
 * purpose with or without fee is hereby granted, provided that the above
8
 * copyright notice and this permission notice appear in all copies.
9
 *
10
 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11
 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12
 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13
 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14
 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15
 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16
 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17
 */
18
#include <sys/types.h>
19
#include <sys/queue.h>
20
21
#include <limits.h>
22
#include <stdlib.h>
23
#include <string.h>
24
#include <siphash.h>
25
26
#include "bgpd.h"
27
#include "rde.h"
28
#include "log.h"
29
30
in_addr_t	up_get_nexthop(struct rde_peer *, struct rde_aspath *);
31
int		up_generate_mp_reach(struct rde_peer *, struct update_attr *,
32
		    struct rde_aspath *, u_int8_t);
33
int		up_generate_attr(struct rde_peer *, struct update_attr *,
34
		    struct rde_aspath *, u_int8_t);
35
36
/* update stuff. */
37
struct update_prefix {
38
	TAILQ_ENTRY(update_prefix)	 prefix_l;
39
	RB_ENTRY(update_prefix)		 entry;
40
	struct uplist_prefix		*prefix_h;
41
	struct bgpd_addr		 prefix;
42
	int				 prefixlen;
43
};
44
45
struct update_attr {
46
	TAILQ_ENTRY(update_attr)	 attr_l;
47
	RB_ENTRY(update_attr)		 entry;
48
	struct uplist_prefix		 prefix_h;
49
	u_char				*attr;
50
	u_char				*mpattr;
51
	u_int32_t			 attr_hash;
52
	u_int16_t			 attr_len;
53
	u_int16_t			 mpattr_len;
54
};
55
56
void	up_clear(struct uplist_attr *, struct uplist_prefix *);
57
int	up_prefix_cmp(struct update_prefix *, struct update_prefix *);
58
int	up_attr_cmp(struct update_attr *, struct update_attr *);
59
int	up_add(struct rde_peer *, struct update_prefix *, struct update_attr *);
60
61
RB_PROTOTYPE(uptree_prefix, update_prefix, entry, up_prefix_cmp)
62
RB_GENERATE(uptree_prefix, update_prefix, entry, up_prefix_cmp)
63
64
RB_PROTOTYPE(uptree_attr, update_attr, entry, up_attr_cmp)
65
RB_GENERATE(uptree_attr, update_attr, entry, up_attr_cmp)
66
67
SIPHASH_KEY uptree_key;
68
69
void
70
up_init(struct rde_peer *peer)
71
{
72
	u_int8_t	i;
73
74
	for (i = 0; i < AID_MAX; i++) {
75
		TAILQ_INIT(&peer->updates[i]);
76
		TAILQ_INIT(&peer->withdraws[i]);
77
	}
78
	RB_INIT(&peer->up_prefix);
79
	RB_INIT(&peer->up_attrs);
80
	peer->up_pcnt = 0;
81
	peer->up_acnt = 0;
82
	peer->up_nlricnt = 0;
83
	peer->up_wcnt = 0;
84
	arc4random_buf(&uptree_key, sizeof(uptree_key));
85
}
86
87
void
88
up_clear(struct uplist_attr *updates, struct uplist_prefix *withdraws)
89
{
90
	struct update_attr	*ua;
91
	struct update_prefix	*up;
92
93
	while ((ua = TAILQ_FIRST(updates)) != NULL) {
94
		TAILQ_REMOVE(updates, ua, attr_l);
95
		while ((up = TAILQ_FIRST(&ua->prefix_h)) != NULL) {
96
			TAILQ_REMOVE(&ua->prefix_h, up, prefix_l);
97
			free(up);
98
		}
99
		free(ua->attr);
100
		free(ua->mpattr);
101
		free(ua);
102
	}
103
104
	while ((up = TAILQ_FIRST(withdraws)) != NULL) {
105
		TAILQ_REMOVE(withdraws, up, prefix_l);
106
		free(up);
107
	}
108
}
109
110
void
111
up_down(struct rde_peer *peer)
112
{
113
	u_int8_t	i;
114
115
	for (i = 0; i < AID_MAX; i++)
116
		up_clear(&peer->updates[i], &peer->withdraws[i]);
117
118
	RB_INIT(&peer->up_prefix);
119
	RB_INIT(&peer->up_attrs);
120
121
	peer->up_pcnt = 0;
122
	peer->up_acnt = 0;
123
	peer->up_nlricnt = 0;
124
	peer->up_wcnt = 0;
125
}
126
127
int
128
up_prefix_cmp(struct update_prefix *a, struct update_prefix *b)
129
{
130
	int	i;
131
132
	if (a->prefix.aid < b->prefix.aid)
133
		return (-1);
134
	if (a->prefix.aid > b->prefix.aid)
135
		return (1);
136
137
	switch (a->prefix.aid) {
138
	case AID_INET:
139
		if (ntohl(a->prefix.v4.s_addr) < ntohl(b->prefix.v4.s_addr))
140
			return (-1);
141
		if (ntohl(a->prefix.v4.s_addr) > ntohl(b->prefix.v4.s_addr))
142
			return (1);
143
		break;
144
	case AID_INET6:
145
		i = memcmp(&a->prefix.v6, &b->prefix.v6,
146
		    sizeof(struct in6_addr));
147
		if (i > 0)
148
			return (1);
149
		if (i < 0)
150
			return (-1);
151
		break;
152
	case AID_VPN_IPv4:
153
		if (betoh64(a->prefix.vpn4.rd) < betoh64(b->prefix.vpn4.rd))
154
			return (-1);
155
		if (betoh64(a->prefix.vpn4.rd) > betoh64(b->prefix.vpn4.rd))
156
			return (1);
157
		if (ntohl(a->prefix.v4.s_addr) < ntohl(b->prefix.v4.s_addr))
158
			return (-1);
159
		if (ntohl(a->prefix.v4.s_addr) > ntohl(b->prefix.v4.s_addr))
160
			return (1);
161
		if (a->prefixlen < b->prefixlen)
162
			return (-1);
163
		if (a->prefixlen > b->prefixlen)
164
			return (1);
165
		if (a->prefix.vpn4.labellen < b->prefix.vpn4.labellen)
166
			return (-1);
167
		if (a->prefix.vpn4.labellen > b->prefix.vpn4.labellen)
168
			return (1);
169
		return (memcmp(a->prefix.vpn4.labelstack,
170
		    b->prefix.vpn4.labelstack, a->prefix.vpn4.labellen));
171
	default:
172
		fatalx("pt_prefix_cmp: unknown af");
173
	}
174
	if (a->prefixlen < b->prefixlen)
175
		return (-1);
176
	if (a->prefixlen > b->prefixlen)
177
		return (1);
178
	return (0);
179
}
180
181
int
182
up_attr_cmp(struct update_attr *a, struct update_attr *b)
183
{
184
	int	r;
185
186
	if ((r = a->attr_hash - b->attr_hash) != 0)
187
		return (r);
188
	if ((r = a->attr_len - b->attr_len) != 0)
189
		return (r);
190
	if ((r = a->mpattr_len - b->mpattr_len) != 0)
191
		return (r);
192
	if ((r = memcmp(a->mpattr, b->mpattr, a->mpattr_len)) != 0)
193
		return (r);
194
	return (memcmp(a->attr, b->attr, a->attr_len));
195
}
196
197
int
198
up_add(struct rde_peer *peer, struct update_prefix *p, struct update_attr *a)
199
{
200
	struct update_attr	*na = NULL;
201
	struct update_prefix	*np;
202
	struct uplist_attr	*upl = NULL;
203
	struct uplist_prefix	*wdl = NULL;
204
205
	upl = &peer->updates[p->prefix.aid];
206
	wdl = &peer->withdraws[p->prefix.aid];
207
208
	/* 1. search for attr */
209
	if (a != NULL && (na = RB_FIND(uptree_attr, &peer->up_attrs, a)) ==
210
	    NULL) {
211
		/* 1.1 if not found -> add */
212
		TAILQ_INIT(&a->prefix_h);
213
		if (RB_INSERT(uptree_attr, &peer->up_attrs, a) != NULL) {
214
			log_warnx("uptree_attr insert failed");
215
			/* cleanup */
216
			free(a->attr);
217
			free(a->mpattr);
218
			free(a);
219
			free(p);
220
			return (-1);
221
		}
222
		TAILQ_INSERT_TAIL(upl, a, attr_l);
223
		peer->up_acnt++;
224
	} else {
225
		/* 1.2 if found -> use that, free a */
226
		if (a != NULL) {
227
			free(a->attr);
228
			free(a->mpattr);
229
			free(a);
230
			a = na;
231
			/* move to end of update queue */
232
			TAILQ_REMOVE(upl, a, attr_l);
233
			TAILQ_INSERT_TAIL(upl, a, attr_l);
234
		}
235
	}
236
237
	/* 2. search for prefix */
238
	if ((np = RB_FIND(uptree_prefix, &peer->up_prefix, p)) == NULL) {
239
		/* 2.1 if not found -> add */
240
		if (RB_INSERT(uptree_prefix, &peer->up_prefix, p) != NULL) {
241
			log_warnx("uptree_prefix insert failed");
242
			/*
243
			 * cleanup. But do not free a because it is already
244
			 * linked or NULL. up_dump_attrnlri() will remove and
245
			 * free the empty attribute later.
246
			 */
247
			free(p);
248
			return (-1);
249
		}
250
		peer->up_pcnt++;
251
	} else {
252
		/* 2.2 if found -> use that and free p */
253
		TAILQ_REMOVE(np->prefix_h, np, prefix_l);
254
		free(p);
255
		p = np;
256
		if (p->prefix_h == wdl)
257
			peer->up_wcnt--;
258
		else
259
			peer->up_nlricnt--;
260
	}
261
	/* 3. link prefix to attr */
262
	if (a == NULL) {
263
		TAILQ_INSERT_TAIL(wdl, p, prefix_l);
264
		p->prefix_h = wdl;
265
		peer->up_wcnt++;
266
	} else {
267
		TAILQ_INSERT_TAIL(&a->prefix_h, p, prefix_l);
268
		p->prefix_h = &a->prefix_h;
269
		peer->up_nlricnt++;
270
	}
271
	return (0);
272
}
273
274
int
275
up_test_update(struct rde_peer *peer, struct prefix *p)
276
{
277
	struct bgpd_addr	 addr;
278
	struct attr		*attr;
279
280
	if (peer->state != PEER_UP)
281
		return (-1);
282
283
	if (p == NULL)
284
		/* no prefix available */
285
		return (0);
286
287
	if (peer == p->aspath->peer)
288
		/* Do not send routes back to sender */
289
		return (0);
290
291
	if (p->aspath->flags & F_ATTR_PARSE_ERR)
292
		fatalx("try to send out a botched path");
293
	if (p->aspath->flags & F_ATTR_LOOP)
294
		fatalx("try to send out a looped path");
295
296
	pt_getaddr(p->prefix, &addr);
297
	if (peer->capa.mp[addr.aid] == 0)
298
		return (-1);
299
300
	if (!p->aspath->peer->conf.ebgp && !peer->conf.ebgp) {
301
		/*
302
		 * route reflector redistribution rules:
303
		 * 1. if announce is set                -> announce
304
		 * 2. old non-client, new non-client    -> no
305
		 * 3. old client, new non-client        -> yes
306
		 * 4. old non-client, new client        -> yes
307
		 * 5. old client, new client            -> yes
308
		 */
309
		if (p->aspath->peer->conf.reflector_client == 0 &&
310
		    peer->conf.reflector_client == 0 &&
311
		    (p->aspath->flags & F_PREFIX_ANNOUNCED) == 0)
312
			/* Do not redistribute updates to ibgp peers */
313
			return (0);
314
	}
315
316
	/* announce type handling */
317
	switch (peer->conf.announce_type) {
318
	case ANNOUNCE_UNDEF:
319
	case ANNOUNCE_NONE:
320
	case ANNOUNCE_DEFAULT_ROUTE:
321
		/*
322
		 * no need to withdraw old prefix as this will be
323
		 * filtered out as well.
324
		 */
325
		return (-1);
326
	case ANNOUNCE_ALL:
327
		break;
328
	case ANNOUNCE_SELF:
329
		/*
330
		 * pass only prefix that have an aspath count
331
		 * of zero this is equal to the ^$ regex.
332
		 */
333
		if (p->aspath->aspath->ascnt != 0)
334
			return (0);
335
		break;
336
	}
337
338
	/* well known communities */
339
	if (community_match(p->aspath,
340
	    COMMUNITY_WELLKNOWN, COMMUNITY_NO_ADVERTISE))
341
		return (0);
342
	if (peer->conf.ebgp && community_match(p->aspath,
343
	    COMMUNITY_WELLKNOWN, COMMUNITY_NO_EXPORT))
344
		return (0);
345
	if (peer->conf.ebgp && community_match(p->aspath,
346
	    COMMUNITY_WELLKNOWN, COMMUNITY_NO_EXPSUBCONFED))
347
		return (0);
348
349
	/*
350
	 * Don't send messages back to originator
351
	 * this is not specified in the RFC but seems logical.
352
	 */
353
	if ((attr = attr_optget(p->aspath, ATTR_ORIGINATOR_ID)) != NULL) {
354
		if (memcmp(attr->data, &peer->remote_bgpid,
355
		    sizeof(peer->remote_bgpid)) == 0) {
356
			/* would cause loop don't send */
357
			return (-1);
358
		}
359
	}
360
361
	return (1);
362
}
363
364
int
365
up_generate(struct rde_peer *peer, struct rde_aspath *asp,
366
    struct bgpd_addr *addr, u_int8_t prefixlen)
367
{
368
	struct update_attr		*ua = NULL;
369
	struct update_prefix		*up;
370
	SIPHASH_CTX			ctx;
371
372
	if (asp) {
373
		ua = calloc(1, sizeof(struct update_attr));
374
		if (ua == NULL)
375
			fatal("up_generate");
376
377
		if (up_generate_attr(peer, ua, asp, addr->aid) == -1) {
378
			log_warnx("generation of bgp path attributes failed");
379
			free(ua);
380
			return (-1);
381
		}
382
		/*
383
		 * use aspath_hash as attr_hash, this may be unoptimal
384
		 * but currently I don't care.
385
		 */
386
		SipHash24_Init(&ctx, &uptree_key);
387
		SipHash24_Update(&ctx, ua->attr, ua->attr_len);
388
		if (ua->mpattr)
389
			SipHash24_Update(&ctx, ua->mpattr, ua->mpattr_len);
390
		ua->attr_hash = SipHash24_End(&ctx);
391
	}
392
393
	up = calloc(1, sizeof(struct update_prefix));
394
	if (up == NULL)
395
		fatal("up_generate");
396
	up->prefix = *addr;
397
	up->prefixlen = prefixlen;
398
399
	if (up_add(peer, up, ua) == -1)
400
		return (-1);
401
402
	return (0);
403
}
404
405
void
406
up_generate_updates(struct filter_head *rules, struct rde_peer *peer,
407
    struct prefix *new, struct prefix *old)
408
{
409
	struct rde_aspath		*asp;
410
	struct bgpd_addr		 addr;
411
412
	if (peer->state != PEER_UP)
413
		return;
414
415
	if (new == NULL) {
416
withdraw:
417
		if (up_test_update(peer, old) != 1)
418
			return;
419
420
		pt_getaddr(old->prefix, &addr);
421
		if (rde_filter(rules, NULL, peer, old->aspath, &addr,
422
		    old->prefix->prefixlen, old->aspath->peer) == ACTION_DENY)
423
			return;
424
425
		/* withdraw prefix */
426
		up_generate(peer, NULL, &addr, old->prefix->prefixlen);
427
	} else {
428
		switch (up_test_update(peer, new)) {
429
		case 1:
430
			break;
431
		case 0:
432
			goto withdraw;
433
		case -1:
434
			return;
435
		}
436
437
		pt_getaddr(new->prefix, &addr);
438
		if (rde_filter(rules, &asp, peer, new->aspath, &addr,
439
		    new->prefix->prefixlen, new->aspath->peer) == ACTION_DENY) {
440
			path_put(asp);
441
			goto withdraw;
442
		}
443
		if (asp == NULL)
444
			asp = new->aspath;
445
446
		up_generate(peer, asp, &addr, new->prefix->prefixlen);
447
448
		/* free modified aspath */
449
		if (asp != new->aspath)
450
			path_put(asp);
451
	}
452
}
453
454
/* send a default route to the specified peer */
455
void
456
up_generate_default(struct filter_head *rules, struct rde_peer *peer,
457
    u_int8_t aid)
458
{
459
	struct rde_aspath	*asp, *fasp;
460
	struct bgpd_addr	 addr;
461
462
	if (peer->capa.mp[aid] == 0)
463
		return;
464
465
	asp = path_get();
466
	asp->aspath = aspath_get(NULL, 0);
467
	asp->origin = ORIGIN_IGP;
468
	/* the other default values are OK, nexthop is once again NULL */
469
470
	/*
471
	 * XXX apply default overrides. Not yet possible, mainly a parse.y
472
	 * problem.
473
	 */
474
	/* rde_apply_set(asp, set, af, NULL ???, DIR_IN); */
475
476
	/* filter as usual */
477
	bzero(&addr, sizeof(addr));
478
	addr.aid = aid;
479
480
	if (rde_filter(rules, &fasp, peer, asp, &addr, 0, NULL) ==
481
	    ACTION_DENY) {
482
		path_put(fasp);
483
		path_put(asp);
484
		return;
485
	}
486
487
	/* generate update */
488
	if (fasp != NULL)
489
		up_generate(peer, fasp, &addr, 0);
490
	else
491
		up_generate(peer, asp, &addr, 0);
492
493
	/* no longer needed */
494
	path_put(fasp);
495
	path_put(asp);
496
}
497
498
/* generate a EoR marker in the update list. This is a horrible hack. */
499
int
500
up_generate_marker(struct rde_peer *peer, u_int8_t aid)
501
{
502
	struct update_attr	*ua;
503
	struct update_attr	*na = NULL;
504
	struct uplist_attr	*upl = NULL;
505
506
	ua = calloc(1, sizeof(struct update_attr));
507
	if (ua == NULL)
508
		fatal("up_generate_marker");
509
510
	upl = &peer->updates[aid];
511
512
	/* 1. search for attr */
513
	if ((na = RB_FIND(uptree_attr, &peer->up_attrs, ua)) == NULL) {
514
		/* 1.1 if not found -> add */
515
		TAILQ_INIT(&ua->prefix_h);
516
		if (RB_INSERT(uptree_attr, &peer->up_attrs, ua) != NULL) {
517
			log_warnx("uptree_attr insert failed");
518
			/* cleanup */
519
			free(ua);
520
			return (-1);
521
		}
522
		TAILQ_INSERT_TAIL(upl, ua, attr_l);
523
		peer->up_acnt++;
524
	} else {
525
		/* 1.2 if found -> use that, free ua */
526
		free(ua);
527
		ua = na;
528
		/* move to end of update queue */
529
		TAILQ_REMOVE(upl, ua, attr_l);
530
		TAILQ_INSERT_TAIL(upl, ua, attr_l);
531
	}
532
	return (0);
533
}
534
535
u_char	up_attr_buf[4096];
536
537
/* only for IPv4 */
538
in_addr_t
539
up_get_nexthop(struct rde_peer *peer, struct rde_aspath *a)
540
{
541
	in_addr_t	mask;
542
543
	/* nexthop, already network byte order */
544
	if (a->flags & F_NEXTHOP_NOMODIFY) {
545
		/* no modify flag set */
546
		if (a->nexthop == NULL)
547
			return (peer->local_v4_addr.v4.s_addr);
548
		else
549
			return (a->nexthop->exit_nexthop.v4.s_addr);
550
	} else if (a->flags & F_NEXTHOP_SELF)
551
		return (peer->local_v4_addr.v4.s_addr);
552
	else if (!peer->conf.ebgp) {
553
		/*
554
		 * If directly connected use peer->local_v4_addr
555
		 * this is only true for announced networks.
556
		 */
557
		if (a->nexthop == NULL)
558
			return (peer->local_v4_addr.v4.s_addr);
559
		else if (a->nexthop->exit_nexthop.v4.s_addr ==
560
		    peer->remote_addr.v4.s_addr)
561
			/*
562
			 * per RFC: if remote peer address is equal to
563
			 * the nexthop set the nexthop to our local address.
564
			 * This reduces the risk of routing loops.
565
			 */
566
			return (peer->local_v4_addr.v4.s_addr);
567
		else
568
			return (a->nexthop->exit_nexthop.v4.s_addr);
569
	} else if (peer->conf.distance == 1) {
570
		/* ebgp directly connected */
571
		if (a->nexthop != NULL &&
572
		    a->nexthop->flags & NEXTHOP_CONNECTED) {
573
			mask = htonl(
574
			    prefixlen2mask(a->nexthop->nexthop_netlen));
575
			if ((peer->remote_addr.v4.s_addr & mask) ==
576
			    (a->nexthop->nexthop_net.v4.s_addr & mask))
577
				/* nexthop and peer are in the same net */
578
				return (a->nexthop->exit_nexthop.v4.s_addr);
579
			else
580
				return (peer->local_v4_addr.v4.s_addr);
581
		} else
582
			return (peer->local_v4_addr.v4.s_addr);
583
	} else
584
		/* ebgp multihop */
585
		/*
586
		 * For ebgp multihop nh->flags should never have
587
		 * NEXTHOP_CONNECTED set so it should be possible to unify the
588
		 * two ebgp cases. But this is safe and RFC compliant.
589
		 */
590
		return (peer->local_v4_addr.v4.s_addr);
591
}
592
593
int
594
up_generate_mp_reach(struct rde_peer *peer, struct update_attr *upa,
595
    struct rde_aspath *a, u_int8_t aid)
596
{
597
	u_int16_t	tmp;
598
599
	switch (aid) {
600
	case AID_INET6:
601
		upa->mpattr_len = 21; /* AFI + SAFI + NH LEN + NH + Reserved */
602
		upa->mpattr = malloc(upa->mpattr_len);
603
		if (upa->mpattr == NULL)
604
			fatal("up_generate_mp_reach");
605
		if (aid2afi(aid, &tmp, &upa->mpattr[2]))
606
			fatalx("up_generate_mp_reachi: bad AID");
607
		tmp = htons(tmp);
608
		memcpy(upa->mpattr, &tmp, sizeof(tmp));
609
		upa->mpattr[3] = sizeof(struct in6_addr);
610
		upa->mpattr[20] = 0; /* Reserved must be 0 */
611
612
		/* nexthop dance see also up_get_nexthop() */
613
		if (a->flags & F_NEXTHOP_NOMODIFY) {
614
			/* no modify flag set */
615
			if (a->nexthop == NULL)
616
				memcpy(&upa->mpattr[4], &peer->local_v6_addr.v6,
617
				    sizeof(struct in6_addr));
618
			else
619
				memcpy(&upa->mpattr[4],
620
				    &a->nexthop->exit_nexthop.v6,
621
				    sizeof(struct in6_addr));
622
		} else if (a->flags & F_NEXTHOP_SELF)
623
			memcpy(&upa->mpattr[4], &peer->local_v6_addr.v6,
624
			    sizeof(struct in6_addr));
625
		else if (!peer->conf.ebgp) {
626
			/* ibgp */
627
			if (a->nexthop == NULL ||
628
			    (a->nexthop->exit_nexthop.aid == AID_INET6 &&
629
			    !memcmp(&a->nexthop->exit_nexthop.v6,
630
			    &peer->remote_addr.v6, sizeof(struct in6_addr))))
631
				memcpy(&upa->mpattr[4], &peer->local_v6_addr.v6,
632
				    sizeof(struct in6_addr));
633
			else
634
				memcpy(&upa->mpattr[4],
635
				    &a->nexthop->exit_nexthop.v6,
636
				    sizeof(struct in6_addr));
637
		} else if (peer->conf.distance == 1) {
638
			/* ebgp directly connected */
639
			if (a->nexthop != NULL &&
640
			    a->nexthop->flags & NEXTHOP_CONNECTED)
641
				if (prefix_compare(&peer->remote_addr,
642
				    &a->nexthop->nexthop_net,
643
				    a->nexthop->nexthop_netlen) == 0) {
644
					/*
645
					 * nexthop and peer are in the same
646
					 * subnet
647
					 */
648
					memcpy(&upa->mpattr[4],
649
					    &a->nexthop->exit_nexthop.v6,
650
					    sizeof(struct in6_addr));
651
					return (0);
652
				}
653
			memcpy(&upa->mpattr[4], &peer->local_v6_addr.v6,
654
			    sizeof(struct in6_addr));
655
		} else
656
			/* ebgp multihop */
657
			memcpy(&upa->mpattr[4], &peer->local_v6_addr.v6,
658
			    sizeof(struct in6_addr));
659
		return (0);
660
	case AID_VPN_IPv4:
661
		upa->mpattr_len = 17; /* AFI + SAFI + NH LEN + NH + Reserved */
662
		upa->mpattr = calloc(upa->mpattr_len, 1);
663
		if (upa->mpattr == NULL)
664
			fatal("up_generate_mp_reach");
665
		if (aid2afi(aid, &tmp, &upa->mpattr[2]))
666
			fatalx("up_generate_mp_reachi: bad AID");
667
		tmp = htons(tmp);
668
		memcpy(upa->mpattr, &tmp, sizeof(tmp));
669
		upa->mpattr[3] = sizeof(u_int64_t) + sizeof(struct in_addr);
670
671
		/* nexthop dance see also up_get_nexthop() */
672
		if (a->flags & F_NEXTHOP_NOMODIFY) {
673
			/* no modify flag set */
674
			if (a->nexthop == NULL)
675
				memcpy(&upa->mpattr[12],
676
				    &peer->local_v4_addr.v4,
677
				    sizeof(struct in_addr));
678
			else
679
				/* nexthops are stored as IPv4 addrs */
680
				memcpy(&upa->mpattr[12],
681
				    &a->nexthop->exit_nexthop.v4,
682
				    sizeof(struct in_addr));
683
		} else if (a->flags & F_NEXTHOP_SELF)
684
			memcpy(&upa->mpattr[12], &peer->local_v4_addr.v4,
685
			    sizeof(struct in_addr));
686
		else if (!peer->conf.ebgp) {
687
			/* ibgp */
688
			if (a->nexthop == NULL ||
689
			    (a->nexthop->exit_nexthop.aid == AID_INET &&
690
			    !memcmp(&a->nexthop->exit_nexthop.v4,
691
			    &peer->remote_addr.v4, sizeof(struct in_addr))))
692
				memcpy(&upa->mpattr[12],
693
				    &peer->local_v4_addr.v4,
694
				    sizeof(struct in_addr));
695
			else
696
				memcpy(&upa->mpattr[12],
697
				    &a->nexthop->exit_nexthop.v4,
698
				    sizeof(struct in_addr));
699
		} else if (peer->conf.distance == 1) {
700
			/* ebgp directly connected */
701
			if (a->nexthop != NULL &&
702
			    a->nexthop->flags & NEXTHOP_CONNECTED)
703
				if (prefix_compare(&peer->remote_addr,
704
				    &a->nexthop->nexthop_net,
705
				    a->nexthop->nexthop_netlen) == 0) {
706
					/*
707
					 * nexthop and peer are in the same
708
					 * subnet
709
					 */
710
					memcpy(&upa->mpattr[12],
711
					    &a->nexthop->exit_nexthop.v4,
712
					    sizeof(struct in_addr));
713
					return (0);
714
				}
715
			memcpy(&upa->mpattr[12], &peer->local_v4_addr.v4,
716
			    sizeof(struct in_addr));
717
		} else
718
			/* ebgp multihop */
719
			memcpy(&upa->mpattr[12], &peer->local_v4_addr.v4,
720
			    sizeof(struct in_addr));
721
		return (0);
722
	default:
723
		break;
724
	}
725
	return (-1);
726
}
727
728
int
729
up_generate_attr(struct rde_peer *peer, struct update_attr *upa,
730
    struct rde_aspath *a, u_int8_t aid)
731
{
732
	struct attr	*oa, *newaggr = NULL;
733
	u_char		*pdata;
734
	u_int32_t	 tmp32;
735
	in_addr_t	 nexthop;
736
	int		 flags, r, ismp = 0, neednewpath = 0;
737
	u_int16_t	 len = sizeof(up_attr_buf), wlen = 0, plen;
738
	u_int8_t	 l;
739
	u_int16_t	 nlen = 0;
740
	u_char		*ndata = NULL;
741
742
	/* origin */
743
	if ((r = attr_write(up_attr_buf + wlen, len, ATTR_WELL_KNOWN,
744
	    ATTR_ORIGIN, &a->origin, 1)) == -1)
745
		return (-1);
746
	wlen += r; len -= r;
747
748
	/* aspath */
749
	if (!peer->conf.ebgp ||
750
	    peer->conf.flags & PEERFLAG_TRANS_AS)
751
		pdata = aspath_prepend(a->aspath, peer->conf.local_as, 0,
752
		    &plen);
753
	else
754
		pdata = aspath_prepend(a->aspath, peer->conf.local_as, 1,
755
		    &plen);
756
757
	if (!rde_as4byte(peer))
758
		pdata = aspath_deflate(pdata, &plen, &neednewpath);
759
760
	if ((r = attr_write(up_attr_buf + wlen, len, ATTR_WELL_KNOWN,
761
	    ATTR_ASPATH, pdata, plen)) == -1)
762
		return (-1);
763
	wlen += r; len -= r;
764
	free(pdata);
765
766
	switch (aid) {
767
	case AID_INET:
768
		nexthop = up_get_nexthop(peer, a);
769
		if ((r = attr_write(up_attr_buf + wlen, len, ATTR_WELL_KNOWN,
770
		    ATTR_NEXTHOP, &nexthop, 4)) == -1)
771
			return (-1);
772
		wlen += r; len -= r;
773
		break;
774
	default:
775
		ismp = 1;
776
		break;
777
	}
778
779
	/*
780
	 * The old MED from other peers MUST not be announced to others
781
	 * unless the MED is originating from us or the peer is an IBGP one.
782
	 * Only exception are routers with "transparent-as yes" set.
783
	 */
784
	if (a->flags & F_ATTR_MED && (!peer->conf.ebgp ||
785
	    a->flags & F_ATTR_MED_ANNOUNCE ||
786
	    peer->conf.flags & PEERFLAG_TRANS_AS)) {
787
		tmp32 = htonl(a->med);
788
		if ((r = attr_write(up_attr_buf + wlen, len, ATTR_OPTIONAL,
789
		    ATTR_MED, &tmp32, 4)) == -1)
790
			return (-1);
791
		wlen += r; len -= r;
792
	}
793
794
	if (!peer->conf.ebgp) {
795
		/* local preference, only valid for ibgp */
796
		tmp32 = htonl(a->lpref);
797
		if ((r = attr_write(up_attr_buf + wlen, len, ATTR_WELL_KNOWN,
798
		    ATTR_LOCALPREF, &tmp32, 4)) == -1)
799
			return (-1);
800
		wlen += r; len -= r;
801
	}
802
803
	/*
804
	 * dump all other path attributes. Following rules apply:
805
	 *  1. well-known attrs: ATTR_ATOMIC_AGGREGATE and ATTR_AGGREGATOR
806
	 *     pass unmodified (enforce flags to correct values)
807
	 *     Actually ATTR_AGGREGATOR may be deflated for OLD 2-byte peers.
808
	 *  2. non-transitive attrs: don't re-announce to ebgp peers
809
	 *  3. transitive known attrs: announce unmodified
810
	 *  4. transitive unknown attrs: set partial bit and re-announce
811
	 */
812
	for (l = 0; l < a->others_len; l++) {
813
		if ((oa = a->others[l]) == NULL)
814
			break;
815
		switch (oa->type) {
816
		case ATTR_ATOMIC_AGGREGATE:
817
			if ((r = attr_write(up_attr_buf + wlen, len,
818
			    ATTR_WELL_KNOWN, ATTR_ATOMIC_AGGREGATE,
819
			    NULL, 0)) == -1)
820
				return (-1);
821
			break;
822
		case ATTR_AGGREGATOR:
823
			if (!rde_as4byte(peer)) {
824
				/* need to deflate the aggregator */
825
				u_int8_t	t[6];
826
				u_int16_t	tas;
827
828
				if ((!(oa->flags & ATTR_TRANSITIVE)) &&
829
				    peer->conf.ebgp) {
830
					r = 0;
831
					break;
832
				}
833
834
				memcpy(&tmp32, oa->data, sizeof(tmp32));
835
				if (ntohl(tmp32) > USHRT_MAX) {
836
					tas = htons(AS_TRANS);
837
					newaggr = oa;
838
				} else
839
					tas = htons(ntohl(tmp32));
840
841
				memcpy(t, &tas, sizeof(tas));
842
				memcpy(t + sizeof(tas),
843
				    oa->data + sizeof(tmp32),
844
				    oa->len - sizeof(tmp32));
845
				if ((r = attr_write(up_attr_buf + wlen, len,
846
				    oa->flags, oa->type, &t, sizeof(t))) == -1)
847
					return (-1);
848
				break;
849
			}
850
			/* FALLTHROUGH */
851
		case ATTR_COMMUNITIES:
852
		case ATTR_ORIGINATOR_ID:
853
		case ATTR_CLUSTER_LIST:
854
		case ATTR_LARGE_COMMUNITIES:
855
			if ((!(oa->flags & ATTR_TRANSITIVE)) &&
856
			    peer->conf.ebgp) {
857
				r = 0;
858
				break;
859
			}
860
			if ((r = attr_write(up_attr_buf + wlen, len,
861
			    oa->flags, oa->type, oa->data, oa->len)) == -1)
862
				return (-1);
863
			break;
864
		case ATTR_EXT_COMMUNITIES:
865
			/* handle (non-)transitive extended communities */
866
			if (peer->conf.ebgp) {
867
				ndata = community_ext_delete_non_trans(oa->data,
868
				    oa->len, &nlen);
869
870
				if (nlen > 0) {
871
					if ((r = attr_write(up_attr_buf + wlen,
872
					    len, oa->flags, oa->type, ndata,
873
					    nlen)) == -1) {
874
						free(ndata);
875
						return (-1);
876
					}
877
				} else
878
					r = 0;
879
				break;
880
			}
881
			if ((r = attr_write(up_attr_buf + wlen, len,
882
			    oa->flags, oa->type, oa->data, oa->len)) == -1)
883
				return (-1);
884
			break;
885
		default:
886
			/* unknown attribute */
887
			if (!(oa->flags & ATTR_TRANSITIVE)) {
888
				/*
889
				 * RFC 1771:
890
				 * Unrecognized non-transitive optional
891
				 * attributes must be quietly ignored and
892
				 * not passed along to other BGP peers.
893
				 */
894
				r = 0;
895
				break;
896
			}
897
			if ((r = attr_write(up_attr_buf + wlen, len,
898
			    oa->flags | ATTR_PARTIAL, oa->type,
899
			    oa->data, oa->len)) == -1)
900
				return (-1);
901
			break;
902
		}
903
		wlen += r; len -= r;
904
	}
905
906
	/* NEW to OLD conversion when going sending stuff to a 2byte AS peer */
907
	if (neednewpath) {
908
		if (!peer->conf.ebgp ||
909
		    peer->conf.flags & PEERFLAG_TRANS_AS)
910
			pdata = aspath_prepend(a->aspath, peer->conf.local_as,
911
			    0, &plen);
912
		else
913
			pdata = aspath_prepend(a->aspath, peer->conf.local_as,
914
			    1, &plen);
915
		flags = ATTR_OPTIONAL|ATTR_TRANSITIVE;
916
		if (!(a->flags & F_PREFIX_ANNOUNCED))
917
			flags |= ATTR_PARTIAL;
918
		if (plen == 0)
919
			r = 0;
920
		else if ((r = attr_write(up_attr_buf + wlen, len, flags,
921
		    ATTR_AS4_PATH, pdata, plen)) == -1)
922
			return (-1);
923
		wlen += r; len -= r;
924
		free(pdata);
925
	}
926
	if (newaggr) {
927
		flags = ATTR_OPTIONAL|ATTR_TRANSITIVE;
928
		if (!(a->flags & F_PREFIX_ANNOUNCED))
929
			flags |= ATTR_PARTIAL;
930
		if ((r = attr_write(up_attr_buf + wlen, len, flags,
931
		    ATTR_AS4_AGGREGATOR, newaggr->data, newaggr->len)) == -1)
932
			return (-1);
933
		wlen += r; len -= r;
934
	}
935
936
	/* write mp attribute to different buffer */
937
	if (ismp)
938
		if (up_generate_mp_reach(peer, upa, a, aid) == -1)
939
			return (-1);
940
941
	/* the bgp path attributes are now stored in the global buf */
942
	upa->attr = malloc(wlen);
943
	if (upa->attr == NULL)
944
		fatal("up_generate_attr");
945
	memcpy(upa->attr, up_attr_buf, wlen);
946
	upa->attr_len = wlen;
947
	return (wlen);
948
}
949
950
#define MIN_PREFIX_LEN	5	/* 1 byte prefix length + 4 bytes addr */
951
int
952
up_dump_prefix(u_char *buf, int len, struct uplist_prefix *prefix_head,
953
    struct rde_peer *peer)
954
{
955
	struct update_prefix	*upp;
956
	int			 r, wpos = 0;
957
	u_int8_t		 i;
958
959
	while ((upp = TAILQ_FIRST(prefix_head)) != NULL) {
960
		if ((r = prefix_write(buf + wpos, len - wpos,
961
		    &upp->prefix, upp->prefixlen)) == -1)
962
			break;
963
		wpos += r;
964
		if (RB_REMOVE(uptree_prefix, &peer->up_prefix, upp) == NULL)
965
			log_warnx("dequeuing update failed.");
966
		TAILQ_REMOVE(upp->prefix_h, upp, prefix_l);
967
		peer->up_pcnt--;
968
		for (i = 0; i < AID_MAX; i++) {
969
			if (upp->prefix_h == &peer->withdraws[i]) {
970
				peer->up_wcnt--;
971
				peer->prefix_sent_withdraw++;
972
			} else {
973
				peer->up_nlricnt--;
974
				peer->prefix_sent_update++;
975
			}
976
		}
977
		free(upp);
978
	}
979
	return (wpos);
980
}
981
982
int
983
up_dump_attrnlri(u_char *buf, int len, struct rde_peer *peer)
984
{
985
	struct update_attr	*upa;
986
	int			 r, wpos;
987
	u_int16_t		 attr_len;
988
989
	/*
990
	 * It is possible that a queued path attribute has no nlri prefix.
991
	 * Ignore and remove those path attributes.
992
	 */
993
	while ((upa = TAILQ_FIRST(&peer->updates[AID_INET])) != NULL)
994
		if (TAILQ_EMPTY(&upa->prefix_h)) {
995
			attr_len = upa->attr_len;
996
			if (RB_REMOVE(uptree_attr, &peer->up_attrs,
997
			    upa) == NULL)
998
				log_warnx("dequeuing update failed.");
999
			TAILQ_REMOVE(&peer->updates[AID_INET], upa, attr_l);
1000
			free(upa->attr);
1001
			free(upa->mpattr);
1002
			free(upa);
1003
			peer->up_acnt--;
1004
			/* XXX horrible hack,
1005
			 * if attr_len is 0, it is a EoR marker */
1006
			if (attr_len == 0)
1007
				return (-1);
1008
		} else
1009
			break;
1010
1011
	if (upa == NULL || upa->attr_len + MIN_PREFIX_LEN > len) {
1012
		/*
1013
		 * either no packet or not enough space.
1014
		 * The length field needs to be set to zero else it would be
1015
		 * an invalid bgp update.
1016
		 */
1017
		bzero(buf, 2);
1018
		return (2);
1019
	}
1020
1021
	/* first dump the 2-byte path attribute length */
1022
	attr_len = htons(upa->attr_len);
1023
	memcpy(buf, &attr_len, 2);
1024
	wpos = 2;
1025
1026
	/* then the path attributes themselves */
1027
	memcpy(buf + wpos, upa->attr, upa->attr_len);
1028
	wpos += upa->attr_len;
1029
1030
	/* last but not least dump the nlri */
1031
	r = up_dump_prefix(buf + wpos, len - wpos, &upa->prefix_h, peer);
1032
	wpos += r;
1033
1034
	/* now check if all prefixes were written */
1035
	if (TAILQ_EMPTY(&upa->prefix_h)) {
1036
		if (RB_REMOVE(uptree_attr, &peer->up_attrs, upa) == NULL)
1037
			log_warnx("dequeuing update failed.");
1038
		TAILQ_REMOVE(&peer->updates[AID_INET], upa, attr_l);
1039
		free(upa->attr);
1040
		free(upa->mpattr);
1041
		free(upa);
1042
		peer->up_acnt--;
1043
	}
1044
1045
	return (wpos);
1046
}
1047
1048
u_char *
1049
up_dump_mp_unreach(u_char *buf, u_int16_t *len, struct rde_peer *peer,
1050
    u_int8_t aid)
1051
{
1052
	int		wpos;
1053
	u_int16_t	datalen, tmp;
1054
	u_int16_t	attrlen = 2;	/* attribute header (without len) */
1055
	u_int8_t	flags = ATTR_OPTIONAL, safi;
1056
1057
	/*
1058
	 * reserve space for withdraw len, attr len, the attribute header
1059
	 * and the mp attribute header
1060
	 */
1061
	wpos = 2 + 2 + 4 + 3;
1062
1063
	if (*len < wpos)
1064
		return (NULL);
1065
1066
	datalen = up_dump_prefix(buf + wpos, *len - wpos,
1067
	    &peer->withdraws[aid], peer);
1068
	if (datalen == 0)
1069
		return (NULL);
1070
1071
	datalen += 3;	/* afi + safi */
1072
1073
	/* prepend header, need to do it reverse */
1074
	/* safi & afi */
1075
	if (aid2afi(aid, &tmp, &safi))
1076
		fatalx("up_dump_mp_unreach: bad AID");
1077
	buf[--wpos] = safi;
1078
	wpos -= sizeof(u_int16_t);
1079
	tmp = htons(tmp);
1080
	memcpy(buf + wpos, &tmp, sizeof(u_int16_t));
1081
1082
	/* attribute length */
1083
	if (datalen > 255) {
1084
		attrlen += 2 + datalen;
1085
		flags |= ATTR_EXTLEN;
1086
		wpos -= sizeof(u_int16_t);
1087
		tmp = htons(datalen);
1088
		memcpy(buf + wpos, &tmp, sizeof(u_int16_t));
1089
	} else {
1090
		attrlen += 1 + datalen;
1091
		buf[--wpos] = (u_char)datalen;
1092
	}
1093
1094
	/* mp attribute */
1095
	buf[--wpos] = (u_char)ATTR_MP_UNREACH_NLRI;
1096
	buf[--wpos] = flags;
1097
1098
	/* attribute length */
1099
	wpos -= sizeof(u_int16_t);
1100
	tmp = htons(attrlen);
1101
	memcpy(buf + wpos, &tmp, sizeof(u_int16_t));
1102
1103
	/* no IPv4 withdraws */
1104
	wpos -= sizeof(u_int16_t);
1105
	bzero(buf + wpos, sizeof(u_int16_t));
1106
1107
	if (wpos < 0)
1108
		fatalx("up_dump_mp_unreach: buffer underflow");
1109
1110
	/* total length includes the two 2-bytes length fields. */
1111
	*len = attrlen + 2 * sizeof(u_int16_t);
1112
1113
	return (buf + wpos);
1114
}
1115
1116
int
1117
up_dump_mp_reach(u_char *buf, u_int16_t *len, struct rde_peer *peer,
1118
    u_int8_t aid)
1119
{
1120
	struct update_attr	*upa;
1121
	int			wpos;
1122
	u_int16_t		attr_len, datalen, tmp;
1123
	u_int8_t		flags = ATTR_OPTIONAL;
1124
1125
	/*
1126
	 * It is possible that a queued path attribute has no nlri prefix.
1127
	 * Ignore and remove those path attributes.
1128
	 */
1129
	while ((upa = TAILQ_FIRST(&peer->updates[aid])) != NULL)
1130
		if (TAILQ_EMPTY(&upa->prefix_h)) {
1131
			attr_len = upa->attr_len;
1132
			if (RB_REMOVE(uptree_attr, &peer->up_attrs,
1133
			    upa) == NULL)
1134
				log_warnx("dequeuing update failed.");
1135
			TAILQ_REMOVE(&peer->updates[aid], upa, attr_l);
1136
			free(upa->attr);
1137
			free(upa->mpattr);
1138
			free(upa);
1139
			peer->up_acnt--;
1140
			/* XXX horrible hack,
1141
			 * if attr_len is 0, it is a EoR marker */
1142
			if (attr_len == 0)
1143
				return (-1);
1144
		} else
1145
			break;
1146
1147
	if (upa == NULL)
1148
		return (-2);
1149
1150
	/*
1151
	 * reserve space for attr len, the attributes, the
1152
	 * mp attribute and the attribute header
1153
	 */
1154
	wpos = 2 + 2 + upa->attr_len + 4 + upa->mpattr_len;
1155
	if (*len < wpos)
1156
		return (-2);
1157
1158
	datalen = up_dump_prefix(buf + wpos, *len - wpos,
1159
	    &upa->prefix_h, peer);
1160
	if (datalen == 0)
1161
		return (-2);
1162
1163
	if (upa->mpattr_len == 0 || upa->mpattr == NULL)
1164
		fatalx("mulitprotocol update without MP attrs");
1165
1166
	datalen += upa->mpattr_len;
1167
	wpos -= upa->mpattr_len;
1168
	memcpy(buf + wpos, upa->mpattr, upa->mpattr_len);
1169
1170
	if (datalen > 255) {
1171
		wpos -= 2;
1172
		tmp = htons(datalen);
1173
		memcpy(buf + wpos, &tmp, sizeof(tmp));
1174
		datalen += 4;
1175
		flags |= ATTR_EXTLEN;
1176
	} else {
1177
		buf[--wpos] = (u_char)datalen;
1178
		datalen += 3;
1179
	}
1180
	buf[--wpos] = (u_char)ATTR_MP_REACH_NLRI;
1181
	buf[--wpos] = flags;
1182
1183
	datalen += upa->attr_len;
1184
	wpos -= upa->attr_len;
1185
	memcpy(buf + wpos, upa->attr, upa->attr_len);
1186
1187
	if (wpos < 4)
1188
		fatalx("Grrr, mp_reach buffer fucked up");
1189
1190
	wpos -= 2;
1191
	tmp = htons(datalen);
1192
	memcpy(buf + wpos, &tmp, sizeof(tmp));
1193
1194
	wpos -= 2;
1195
	bzero(buf + wpos, 2);
1196
1197
	/* now check if all prefixes were written */
1198
	if (TAILQ_EMPTY(&upa->prefix_h)) {
1199
		if (RB_REMOVE(uptree_attr, &peer->up_attrs, upa) == NULL)
1200
			log_warnx("dequeuing update failed.");
1201
		TAILQ_REMOVE(&peer->updates[aid], upa, attr_l);
1202
		free(upa->attr);
1203
		free(upa->mpattr);
1204
		free(upa);
1205
		peer->up_acnt--;
1206
	}
1207
1208
	*len = datalen + 4;
1209
	return (wpos);
1210
}