GCC Code Coverage Report
Directory: ./ Exec Total Coverage
File: usr.sbin/ospf6d/lsupdate.c Lines: 0 269 0.0 %
Date: 2017-11-07 Branches: 0 204 0.0 %

Line Branch Exec Source
1
/*	$OpenBSD: lsupdate.c,v 1.13 2015/01/28 22:03:17 bluhm Exp $ */
2
3
/*
4
 * Copyright (c) 2005 Claudio Jeker <claudio@openbsd.org>
5
 * Copyright (c) 2004, 2005, 2007 Esben Norby <norby@openbsd.org>
6
 *
7
 * Permission to use, copy, modify, and distribute this software for any
8
 * purpose with or without fee is hereby granted, provided that the above
9
 * copyright notice and this permission notice appear in all copies.
10
 *
11
 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12
 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13
 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14
 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15
 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16
 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17
 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18
 */
19
20
#include <sys/types.h>
21
#include <sys/socket.h>
22
#include <netinet/in.h>
23
#include <netinet/ip6.h>
24
#include <netinet/ip_ah.h>
25
#include <arpa/inet.h>
26
27
#include <stdlib.h>
28
#include <string.h>
29
#include <siphash.h>
30
31
#include "ospf6.h"
32
#include "ospf6d.h"
33
#include "log.h"
34
#include "ospfe.h"
35
#include "rde.h"
36
37
extern struct ospfd_conf	*oeconf;
38
extern struct imsgev		*iev_rde;
39
40
struct ibuf *prepare_ls_update(struct iface *, int);
41
int	add_ls_update(struct ibuf *, struct iface *, void *, int, u_int16_t);
42
int	send_ls_update(struct ibuf *, struct iface *, struct in6_addr, u_int32_t);
43
44
void	ls_retrans_list_insert(struct nbr *, struct lsa_entry *);
45
void	ls_retrans_list_remove(struct nbr *, struct lsa_entry *);
46
47
/* link state update packet handling */
48
int
49
lsa_flood(struct iface *iface, struct nbr *originator, struct lsa_hdr *lsa_hdr,
50
    void *data)
51
{
52
	struct nbr		*nbr;
53
	struct lsa_entry	*le = NULL;
54
	int			 queued = 0, dont_ack = 0;
55
	int			 r;
56
57
	LIST_FOREACH(nbr, &iface->nbr_list, entry) {
58
		if (nbr == iface->self)
59
			continue;
60
		if (!(nbr->state & NBR_STA_FLOOD))
61
			continue;
62
63
		if (iface->state & IF_STA_DROTHER && !queued)
64
			while ((le = ls_retrans_list_get(iface->self, lsa_hdr)))
65
			    ls_retrans_list_free(iface->self, le);
66
67
		while ((le = ls_retrans_list_get(nbr, lsa_hdr)))
68
			ls_retrans_list_free(nbr, le);
69
70
		if (!(nbr->state & NBR_STA_FULL) &&
71
		    (le = ls_req_list_get(nbr, lsa_hdr)) != NULL) {
72
			r = lsa_newer(lsa_hdr, le->le_lsa);
73
			if (r > 0) {
74
				/* to flood LSA is newer than requested */
75
				ls_req_list_free(nbr, le);
76
				/* new needs to be flooded */
77
			} else if (r < 0) {
78
				/* to flood LSA is older than requested */
79
				continue;
80
			} else {
81
				/* LSA are equal */
82
				ls_req_list_free(nbr, le);
83
				continue;
84
			}
85
		}
86
87
		if (nbr == originator) {
88
			dont_ack++;
89
			continue;
90
		}
91
92
		/* non DR or BDR router keep all lsa in one retrans list */
93
		if (iface->state & IF_STA_DROTHER) {
94
			if (!queued)
95
				ls_retrans_list_add(iface->self, data,
96
				    iface->rxmt_interval, 0);
97
			queued = 1;
98
		} else {
99
			ls_retrans_list_add(nbr, data, iface->rxmt_interval, 0);
100
			queued = 1;
101
		}
102
	}
103
104
	if (!queued)
105
		return (0);
106
107
	if (iface == originator->iface && iface->self != originator) {
108
		if (iface->dr == originator || iface->bdr == originator)
109
			return (0);
110
		if (iface->state & IF_STA_BACKUP)
111
			return (0);
112
		dont_ack++;
113
	}
114
115
	/*
116
	 * initial flood needs to be queued separately, timeout is zero
117
	 * and oneshot has to be set because the retransimssion queues
118
	 * are already loaded.
119
	 */
120
	switch (iface->type) {
121
	case IF_TYPE_POINTOPOINT:
122
	case IF_TYPE_BROADCAST:
123
		ls_retrans_list_add(iface->self, data, 0, 1);
124
		break;
125
	case IF_TYPE_NBMA:
126
	case IF_TYPE_POINTOMULTIPOINT:
127
	case IF_TYPE_VIRTUALLINK:
128
		LIST_FOREACH(nbr, &iface->nbr_list, entry) {
129
			if (nbr == iface->self)
130
				continue;
131
			if (!(nbr->state & NBR_STA_FLOOD))
132
				continue;
133
			if (!TAILQ_EMPTY(&nbr->ls_retrans_list)) {
134
				le = TAILQ_LAST(&nbr->ls_retrans_list,
135
				    lsa_head);
136
				if (lsa_hdr->type != le->le_lsa->type ||
137
				    lsa_hdr->ls_id != le->le_lsa->ls_id ||
138
				    lsa_hdr->adv_rtr != le->le_lsa->adv_rtr)
139
					continue;
140
			}
141
			ls_retrans_list_add(nbr, data, 0, 1);
142
		}
143
		break;
144
	default:
145
		fatalx("lsa_flood: unknown interface type");
146
	}
147
148
	return (dont_ack == 2);
149
}
150
151
struct ibuf *
152
prepare_ls_update(struct iface *iface, int bigpkt)
153
{
154
	struct ibuf		*buf;
155
	size_t			 size;
156
157
	size = bigpkt ? IPV6_MAXPACKET : iface->mtu;
158
	if (size < IPV6_MMTU)
159
		size = IPV6_MMTU;
160
	size -= sizeof(struct ip6_hdr);
161
	/*
162
	 * Reserve space for optional ah or esp encryption.  The
163
	 * algorithm is taken from ah_output and esp_output, the
164
	 * values are the maxima of crypto/xform.c.
165
	 */
166
	size -= max(
167
	    /* base-ah-header replay authsize */
168
	    AH_FLENGTH + sizeof(u_int32_t) + 32,
169
	    /* spi sequence ivlen blocksize pad-length next-header authsize */
170
	    2 * sizeof(u_int32_t) + 16 + 16 + 2 * sizeof(u_int8_t) + 32);
171
172
	if ((buf = ibuf_open(size)) == NULL)
173
		fatal("prepare_ls_update");
174
175
	/* OSPF header */
176
	if (gen_ospf_hdr(buf, iface, PACKET_TYPE_LS_UPDATE))
177
		goto fail;
178
179
	/* reserve space for number of lsa field */
180
	if (ibuf_reserve(buf, sizeof(u_int32_t)) == NULL)
181
		goto fail;
182
183
	return (buf);
184
fail:
185
	log_warn("prepare_ls_update");
186
	ibuf_free(buf);
187
	return (NULL);
188
}
189
190
int
191
add_ls_update(struct ibuf *buf, struct iface *iface, void *data, int len,
192
    u_int16_t older)
193
{
194
	size_t		pos;
195
	u_int16_t	age;
196
197
	if (buf->wpos + len >= buf->max)
198
		return (0);
199
200
	pos = buf->wpos;
201
	if (ibuf_add(buf, data, len)) {
202
		log_warn("add_ls_update");
203
		return (0);
204
	}
205
206
	/* age LSA before sending it out */
207
	memcpy(&age, data, sizeof(age));
208
	age = ntohs(age);
209
	if ((age += older + iface->transmit_delay) >= MAX_AGE)
210
		age = MAX_AGE;
211
	age = htons(age);
212
	memcpy(ibuf_seek(buf, pos, sizeof(age)), &age, sizeof(age));
213
214
	return (1);
215
}
216
217
int
218
send_ls_update(struct ibuf *buf, struct iface *iface, struct in6_addr addr,
219
    u_int32_t nlsa)
220
{
221
	int			 ret;
222
223
	nlsa = htonl(nlsa);
224
	memcpy(ibuf_seek(buf, sizeof(struct ospf_hdr), sizeof(nlsa)),
225
	    &nlsa, sizeof(nlsa));
226
	/* calculate checksum */
227
	if (upd_ospf_hdr(buf, iface))
228
		goto fail;
229
230
	ret = send_packet(iface, buf->buf, buf->wpos, &addr);
231
232
	ibuf_free(buf);
233
	return (ret);
234
fail:
235
	log_warn("send_ls_update");
236
	ibuf_free(buf);
237
	return (-1);
238
}
239
240
void
241
recv_ls_update(struct nbr *nbr, char *buf, u_int16_t len)
242
{
243
	struct lsa_hdr		 lsa;
244
	u_int32_t		 nlsa;
245
246
	if (len < sizeof(nlsa)) {
247
		log_warnx("recv_ls_update: bad packet size, neighbor ID %s",
248
		    inet_ntoa(nbr->id));
249
		return;
250
	}
251
	memcpy(&nlsa, buf, sizeof(nlsa));
252
	nlsa = ntohl(nlsa);
253
	buf += sizeof(nlsa);
254
	len -= sizeof(nlsa);
255
256
	switch (nbr->state) {
257
	case NBR_STA_DOWN:
258
	case NBR_STA_ATTEMPT:
259
	case NBR_STA_INIT:
260
	case NBR_STA_2_WAY:
261
	case NBR_STA_XSTRT:
262
	case NBR_STA_SNAP:
263
		log_debug("recv_ls_update: packet ignored in state %s, "
264
		    "neighbor ID %s", nbr_state_name(nbr->state),
265
		    inet_ntoa(nbr->id));
266
		break;
267
	case NBR_STA_XCHNG:
268
	case NBR_STA_LOAD:
269
	case NBR_STA_FULL:
270
		for (; nlsa > 0 && len > 0; nlsa--) {
271
			if (len < sizeof(lsa)) {
272
				log_warnx("recv_ls_update: bad packet size, "
273
				    "neighbor ID %s", inet_ntoa(nbr->id));
274
				return;
275
			}
276
			memcpy(&lsa, buf, sizeof(lsa));
277
			if (len < ntohs(lsa.len)) {
278
				log_warnx("recv_ls_update: bad packet size, "
279
				    "neighbor ID %s", inet_ntoa(nbr->id));
280
				return;
281
			}
282
			imsg_compose_event(iev_rde, IMSG_LS_UPD, nbr->peerid, 0,
283
			    -1, buf, ntohs(lsa.len));
284
			buf += ntohs(lsa.len);
285
			len -= ntohs(lsa.len);
286
		}
287
		if (nlsa > 0 || len > 0) {
288
			log_warnx("recv_ls_update: bad packet size, "
289
			    "neighbor ID %s", inet_ntoa(nbr->id));
290
			return;
291
		}
292
		break;
293
	default:
294
		fatalx("recv_ls_update: unknown neighbor state");
295
	}
296
}
297
298
/* link state retransmit list */
299
void
300
ls_retrans_list_add(struct nbr *nbr, struct lsa_hdr *lsa,
301
    unsigned short timeout, unsigned short oneshot)
302
{
303
	struct timeval		 tv;
304
	struct lsa_entry	*le;
305
	struct lsa_ref		*ref;
306
307
	if ((ref = lsa_cache_get(lsa)) == NULL)
308
		fatalx("King Bula sez: somebody forgot to lsa_cache_add");
309
310
	if ((le = calloc(1, sizeof(*le))) == NULL)
311
		fatal("ls_retrans_list_add");
312
313
	le->le_ref = ref;
314
	le->le_when = timeout;
315
	le->le_oneshot = oneshot;
316
317
	ls_retrans_list_insert(nbr, le);
318
319
	if (!evtimer_pending(&nbr->ls_retrans_timer, NULL)) {
320
		timerclear(&tv);
321
		tv.tv_sec = TAILQ_FIRST(&nbr->ls_retrans_list)->le_when;
322
323
		if (evtimer_add(&nbr->ls_retrans_timer, &tv) == -1)
324
			fatal("ls_retrans_list_add");
325
	}
326
}
327
328
int
329
ls_retrans_list_del(struct nbr *nbr, struct lsa_hdr *lsa_hdr)
330
{
331
	struct lsa_entry	*le;
332
333
	if ((le = ls_retrans_list_get(nbr, lsa_hdr)) == NULL)
334
		return (-1);
335
	/*
336
	 * Compare LSA with the Ack by comparing not only the seq_num and
337
	 * checksum but also the age field.  Since we only care about MAX_AGE
338
	 * vs. non-MAX_AGE LSA, a simple >= comparison is good enough.  This
339
	 * ensures that a LSA withdrawal is not acked by a previous update.
340
	 */
341
	if (lsa_hdr->seq_num == le->le_ref->hdr.seq_num &&
342
	    lsa_hdr->ls_chksum == le->le_ref->hdr.ls_chksum &&
343
	    ntohs(lsa_hdr->age) >= ntohs(le->le_ref->hdr.age)) {
344
		ls_retrans_list_free(nbr, le);
345
		return (0);
346
	}
347
348
	return (-1);
349
}
350
351
struct lsa_entry *
352
ls_retrans_list_get(struct nbr *nbr, struct lsa_hdr *lsa_hdr)
353
{
354
	struct lsa_entry	*le;
355
356
	TAILQ_FOREACH(le, &nbr->ls_retrans_list, entry) {
357
		if ((lsa_hdr->type == le->le_ref->hdr.type) &&
358
		    (lsa_hdr->ls_id == le->le_ref->hdr.ls_id) &&
359
		    (lsa_hdr->adv_rtr == le->le_ref->hdr.adv_rtr))
360
			return (le);
361
	}
362
	return (NULL);
363
}
364
365
void
366
ls_retrans_list_insert(struct nbr *nbr, struct lsa_entry *new)
367
{
368
	struct lsa_entry	*le;
369
	unsigned short		 when = new->le_when;
370
371
	TAILQ_FOREACH(le, &nbr->ls_retrans_list, entry) {
372
		if (when < le->le_when) {
373
			new->le_when = when;
374
			TAILQ_INSERT_BEFORE(le, new, entry);
375
			nbr->ls_ret_cnt++;
376
			return;
377
		}
378
		when -= le->le_when;
379
	}
380
	new->le_when = when;
381
	TAILQ_INSERT_TAIL(&nbr->ls_retrans_list, new, entry);
382
	nbr->ls_ret_cnt++;
383
}
384
385
void
386
ls_retrans_list_remove(struct nbr *nbr, struct lsa_entry *le)
387
{
388
	struct timeval		 tv;
389
	struct lsa_entry	*next = TAILQ_NEXT(le, entry);
390
	int			 reset = 0;
391
392
	/* adjust timeout of next entry */
393
	if (next)
394
		next->le_when += le->le_when;
395
396
	if (TAILQ_FIRST(&nbr->ls_retrans_list) == le &&
397
	    evtimer_pending(&nbr->ls_retrans_timer, NULL))
398
		reset = 1;
399
400
	TAILQ_REMOVE(&nbr->ls_retrans_list, le, entry);
401
	nbr->ls_ret_cnt--;
402
403
	if (reset && TAILQ_FIRST(&nbr->ls_retrans_list)) {
404
		if (evtimer_del(&nbr->ls_retrans_timer) == -1)
405
			fatal("ls_retrans_list_remove");
406
407
		timerclear(&tv);
408
		tv.tv_sec = TAILQ_FIRST(&nbr->ls_retrans_list)->le_when;
409
410
		if (evtimer_add(&nbr->ls_retrans_timer, &tv) == -1)
411
			fatal("ls_retrans_list_remove");
412
	}
413
}
414
415
void
416
ls_retrans_list_free(struct nbr *nbr, struct lsa_entry *le)
417
{
418
	ls_retrans_list_remove(nbr, le);
419
420
	lsa_cache_put(le->le_ref, nbr);
421
	free(le);
422
}
423
424
void
425
ls_retrans_list_clr(struct nbr *nbr)
426
{
427
	struct lsa_entry	*le;
428
429
	while ((le = TAILQ_FIRST(&nbr->ls_retrans_list)) != NULL)
430
		ls_retrans_list_free(nbr, le);
431
432
	nbr->ls_ret_cnt = 0;
433
}
434
435
/* ARGSUSED */
436
void
437
ls_retrans_timer(int fd, short event, void *bula)
438
{
439
	struct timeval		 tv;
440
	struct timespec		 tp;
441
	struct in6_addr		 addr;
442
	struct nbr		*nbr = bula;
443
	struct lsa_entry	*le;
444
	struct ibuf		*buf;
445
	time_t			 now;
446
	int			 bigpkt, d;
447
	u_int32_t		 nlsa = 0;
448
449
	if ((le = TAILQ_FIRST(&nbr->ls_retrans_list)) != NULL)
450
		le->le_when = 0;	/* timer fired */
451
	else
452
		return;			/* queue empty, nothing to do */
453
454
	clock_gettime(CLOCK_MONOTONIC, &tp);
455
	now = tp.tv_sec;
456
457
	if (nbr->iface->self == nbr) {
458
		/*
459
		 * oneshot needs to be set for lsa queued for flooding,
460
		 * if oneshot is not set then the lsa needs to be converted
461
		 * because the router switched lately to DR or BDR
462
		 */
463
		if (le->le_oneshot && nbr->iface->state & IF_STA_DRORBDR)
464
			inet_pton(AF_INET6, AllSPFRouters, &addr);
465
		else if (nbr->iface->state & IF_STA_DRORBDR) {
466
			/*
467
			 * old retransmission needs to be converted into
468
			 * flood by rerunning the lsa_flood.
469
			 */
470
			lsa_flood(nbr->iface, nbr, &le->le_ref->hdr,
471
			    le->le_ref->data);
472
			ls_retrans_list_free(nbr, le);
473
			/* ls_retrans_list_free retriggers the timer */
474
			return;
475
		} else if (nbr->iface->type == IF_TYPE_POINTOPOINT)
476
			memcpy(&addr, &nbr->iface->dst, sizeof(addr));
477
		else
478
			inet_pton(AF_INET6, AllDRouters, &addr);
479
	} else
480
		memcpy(&addr, &nbr->addr, sizeof(addr));
481
482
	/*
483
	 * Allow big ipv6 packets that may get fragmented if a
484
	 * single lsa might be too big for an unfragmented packet.
485
	 * To avoid the exact algorithm duplicated here, just make
486
	 * a good guess.  If the first lsa is bigger than 1024
487
	 * bytes, reserve a separate big packet for it.  The kernel
488
	 * will figure out if fragmentation is necessary.  For
489
	 * smaller lsas, we avoid big packets and fragmentation.
490
	 */
491
	bigpkt = le->le_ref->len > 1024;
492
	if ((buf = prepare_ls_update(nbr->iface, bigpkt)) == NULL) {
493
		le->le_when = 1;
494
		goto done;
495
	}
496
497
	while ((le = TAILQ_FIRST(&nbr->ls_retrans_list)) != NULL &&
498
	    le->le_when == 0) {
499
		d = now - le->le_ref->stamp;
500
		if (d < 0)
501
			d = 0;
502
		else if (d > MAX_AGE)
503
			d = MAX_AGE;
504
505
		if (add_ls_update(buf, nbr->iface, le->le_ref->data,
506
		    le->le_ref->len, d) == 0) {
507
			if (nlsa)
508
				break;
509
			/*
510
			 * A single lsa is too big to fit into an update
511
			 * packet.  In this case drop the lsa, otherwise
512
			 * we send empty update packets in an endless loop.
513
			 */
514
			log_warnx("ls_retrans_timer: cannot send lsa, dropped");
515
			log_debug("ls_retrans_timer: type: %04x len: %u",
516
			    ntohs(le->le_ref->hdr.type), le->le_ref->len);
517
			ls_retrans_list_free(nbr, le);
518
			continue;
519
		}
520
		nlsa++;
521
		if (le->le_oneshot)
522
			ls_retrans_list_free(nbr, le);
523
		else {
524
			TAILQ_REMOVE(&nbr->ls_retrans_list, le, entry);
525
			nbr->ls_ret_cnt--;
526
			le->le_when = nbr->iface->rxmt_interval;
527
			ls_retrans_list_insert(nbr, le);
528
		}
529
		/* do not put additional lsa into fragmented big packet */
530
		if (bigpkt)
531
			break;
532
	}
533
	send_ls_update(buf, nbr->iface, addr, nlsa);
534
535
done:
536
	if ((le = TAILQ_FIRST(&nbr->ls_retrans_list)) != NULL) {
537
		timerclear(&tv);
538
		tv.tv_sec = le->le_when;
539
540
		if (evtimer_add(&nbr->ls_retrans_timer, &tv) == -1)
541
			fatal("ls_retrans_timer");
542
	}
543
}
544
545
LIST_HEAD(lsa_cache_head, lsa_ref);
546
547
struct lsa_cache {
548
	struct lsa_cache_head	*hashtbl;
549
	u_int32_t		 hashmask;
550
} lsacache;
551
552
SIPHASH_KEY lsacachekey;
553
554
struct lsa_ref		*lsa_cache_look(struct lsa_hdr *);
555
556
void
557
lsa_cache_init(u_int32_t hashsize)
558
{
559
	u_int32_t        hs, i;
560
561
	for (hs = 1; hs < hashsize; hs <<= 1)
562
		;
563
	lsacache.hashtbl = calloc(hs, sizeof(struct lsa_cache_head));
564
	if (lsacache.hashtbl == NULL)
565
		fatal("lsa_cache_init");
566
567
	for (i = 0; i < hs; i++)
568
		LIST_INIT(&lsacache.hashtbl[i]);
569
	arc4random_buf(&lsacachekey, sizeof(lsacachekey));
570
571
	lsacache.hashmask = hs - 1;
572
}
573
574
static uint32_t
575
lsa_hash_hdr(const struct lsa_hdr *hdr)
576
{
577
	return SipHash24(&lsacachekey, hdr, sizeof(*hdr));
578
}
579
580
struct lsa_ref *
581
lsa_cache_add(void *data, u_int16_t len)
582
{
583
	struct lsa_cache_head	*head;
584
	struct lsa_ref		*ref, *old;
585
	struct timespec		 tp;
586
587
	if ((ref = calloc(1, sizeof(*ref))) == NULL)
588
		fatal("lsa_cache_add");
589
	memcpy(&ref->hdr, data, sizeof(ref->hdr));
590
591
	if ((old = lsa_cache_look(&ref->hdr))) {
592
		free(ref);
593
		old->refcnt++;
594
		return (old);
595
	}
596
597
	if ((ref->data = malloc(len)) == NULL)
598
		fatal("lsa_cache_add");
599
	memcpy(ref->data, data, len);
600
601
	clock_gettime(CLOCK_MONOTONIC, &tp);
602
	ref->stamp = tp.tv_sec;
603
	ref->len = len;
604
	ref->refcnt = 1;
605
606
	head = &lsacache.hashtbl[lsa_hash_hdr(&ref->hdr) & lsacache.hashmask];
607
	LIST_INSERT_HEAD(head, ref, entry);
608
	return (ref);
609
}
610
611
struct lsa_ref *
612
lsa_cache_get(struct lsa_hdr *lsa_hdr)
613
{
614
	struct lsa_ref		*ref;
615
616
	ref = lsa_cache_look(lsa_hdr);
617
	if (ref)
618
		ref->refcnt++;
619
620
	return (ref);
621
}
622
623
void
624
lsa_cache_put(struct lsa_ref *ref, struct nbr *nbr)
625
{
626
	if (--ref->refcnt > 0)
627
		return;
628
629
	if (ntohs(ref->hdr.age) >= MAX_AGE)
630
		ospfe_imsg_compose_rde(IMSG_LS_MAXAGE, nbr->peerid, 0,
631
		    ref->data, sizeof(struct lsa_hdr));
632
633
	free(ref->data);
634
	LIST_REMOVE(ref, entry);
635
	free(ref);
636
}
637
638
struct lsa_ref *
639
lsa_cache_look(struct lsa_hdr *lsa_hdr)
640
{
641
	struct lsa_cache_head	*head;
642
	struct lsa_ref		*ref;
643
644
	head = &lsacache.hashtbl[lsa_hash_hdr(lsa_hdr) & lsacache.hashmask];
645
646
	LIST_FOREACH(ref, head, entry) {
647
		if (memcmp(&ref->hdr, lsa_hdr, sizeof(*lsa_hdr)) == 0)
648
			/* found match */
649
			return (ref);
650
	}
651
652
	return (NULL);
653
}