Line data Source code
1 : /* $OpenBSD: pf_table.c,v 1.128 2018/03/28 10:56:18 sashan Exp $ */
2 :
3 : /*
4 : * Copyright (c) 2002 Cedric Berger
5 : * All rights reserved.
6 : *
7 : * Redistribution and use in source and binary forms, with or without
8 : * modification, are permitted provided that the following conditions
9 : * are met:
10 : *
11 : * - Redistributions of source code must retain the above copyright
12 : * notice, this list of conditions and the following disclaimer.
13 : * - Redistributions in binary form must reproduce the above
14 : * copyright notice, this list of conditions and the following
15 : * disclaimer in the documentation and/or other materials provided
16 : * with the distribution.
17 : *
18 : * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 : * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 : * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
21 : * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
22 : * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
23 : * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
24 : * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
25 : * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26 : * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 : * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
28 : * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 : * POSSIBILITY OF SUCH DAMAGE.
30 : *
31 : */
32 :
33 : #include <sys/param.h>
34 : #include <sys/systm.h>
35 : #include <sys/socket.h>
36 : #include <sys/mbuf.h>
37 : #include <sys/pool.h>
38 : #include <sys/syslog.h>
39 : #include <sys/proc.h>
40 :
41 : #include <net/if.h>
42 :
43 : #include <netinet/in.h>
44 : #include <netinet/ip.h>
45 : #include <netinet/ip_ipsp.h>
46 : #include <netinet/ip_icmp.h>
47 : #include <netinet/tcp.h>
48 : #include <netinet/udp.h>
49 :
50 : #ifdef INET6
51 : #include <netinet/ip6.h>
52 : #include <netinet/icmp6.h>
53 : #endif /* INET6 */
54 :
55 : #include <net/pfvar.h>
56 : #include <net/pfvar_priv.h>
57 :
58 : #define ACCEPT_FLAGS(flags, oklist) \
59 : do { \
60 : if ((flags & ~(oklist)) & \
61 : PFR_FLAG_ALLMASK) \
62 : return (EINVAL); \
63 : } while (0)
64 :
65 : #define COPYIN(from, to, size, flags) \
66 : ((flags & PFR_FLAG_USERIOCTL) ? \
67 : copyin((from), (to), (size)) : \
68 : (bcopy((from), (to), (size)), 0))
69 :
70 : #define COPYOUT(from, to, size, flags) \
71 : ((flags & PFR_FLAG_USERIOCTL) ? \
72 : copyout((from), (to), (size)) : \
73 : (bcopy((from), (to), (size)), 0))
74 :
75 : #define YIELD(cnt, ok) \
76 : sched_pause(preempt)
77 :
78 : #define FILLIN_SIN(sin, addr) \
79 : do { \
80 : (sin).sin_len = sizeof(sin); \
81 : (sin).sin_family = AF_INET; \
82 : (sin).sin_addr = (addr); \
83 : } while (0)
84 :
85 : #define FILLIN_SIN6(sin6, addr) \
86 : do { \
87 : (sin6).sin6_len = sizeof(sin6); \
88 : (sin6).sin6_family = AF_INET6; \
89 : (sin6).sin6_addr = (addr); \
90 : } while (0)
91 :
92 : #define SWAP(type, a1, a2) \
93 : do { \
94 : type tmp = a1; \
95 : a1 = a2; \
96 : a2 = tmp; \
97 : } while (0)
98 :
99 : #define SUNION2PF(su, af) (((af)==AF_INET) ? \
100 : (struct pf_addr *)&(su)->sin.sin_addr : \
101 : (struct pf_addr *)&(su)->sin6.sin6_addr)
102 :
103 : #define AF_BITS(af) (((af)==AF_INET)?32:128)
104 : #define ADDR_NETWORK(ad) ((ad)->pfra_net < AF_BITS((ad)->pfra_af))
105 : #define KENTRY_NETWORK(ke) ((ke)->pfrke_net < AF_BITS((ke)->pfrke_af))
106 :
107 : #define NO_ADDRESSES (-1)
108 : #define ENQUEUE_UNMARKED_ONLY (1)
109 : #define INVERT_NEG_FLAG (1)
110 :
111 : struct pfr_walktree {
112 : enum pfrw_op {
113 : PFRW_MARK,
114 : PFRW_SWEEP,
115 : PFRW_ENQUEUE,
116 : PFRW_GET_ADDRS,
117 : PFRW_GET_ASTATS,
118 : PFRW_POOL_GET,
119 : PFRW_DYNADDR_UPDATE
120 : } pfrw_op;
121 : union {
122 : struct pfr_addr *pfrw1_addr;
123 : struct pfr_astats *pfrw1_astats;
124 : struct pfr_kentryworkq *pfrw1_workq;
125 : struct pfr_kentry *pfrw1_kentry;
126 : struct pfi_dynaddr *pfrw1_dyn;
127 : } pfrw_1;
128 : int pfrw_free;
129 : int pfrw_flags;
130 : };
131 : #define pfrw_addr pfrw_1.pfrw1_addr
132 : #define pfrw_astats pfrw_1.pfrw1_astats
133 : #define pfrw_workq pfrw_1.pfrw1_workq
134 : #define pfrw_kentry pfrw_1.pfrw1_kentry
135 : #define pfrw_dyn pfrw_1.pfrw1_dyn
136 : #define pfrw_cnt pfrw_free
137 :
138 : #define senderr(e) do { rv = (e); goto _bad; } while (0)
139 :
140 : struct pool pfr_ktable_pl;
141 : struct pool pfr_kentry_pl[PFRKE_MAX];
142 : struct pool pfr_kcounters_pl;
143 : union sockaddr_union pfr_mask;
144 : struct pf_addr pfr_ffaddr;
145 :
146 : int pfr_gcd(int, int);
147 : void pfr_copyout_addr(struct pfr_addr *,
148 : struct pfr_kentry *ke);
149 : int pfr_validate_addr(struct pfr_addr *);
150 : void pfr_enqueue_addrs(struct pfr_ktable *,
151 : struct pfr_kentryworkq *, int *, int);
152 : void pfr_mark_addrs(struct pfr_ktable *);
153 : struct pfr_kentry *pfr_lookup_addr(struct pfr_ktable *,
154 : struct pfr_addr *, int);
155 : struct pfr_kentry *pfr_create_kentry(struct pfr_addr *);
156 : void pfr_destroy_kentries(struct pfr_kentryworkq *);
157 : void pfr_destroy_kentry(struct pfr_kentry *);
158 : void pfr_insert_kentries(struct pfr_ktable *,
159 : struct pfr_kentryworkq *, time_t);
160 : void pfr_remove_kentries(struct pfr_ktable *,
161 : struct pfr_kentryworkq *);
162 : void pfr_clstats_kentries(struct pfr_kentryworkq *, time_t,
163 : int);
164 : void pfr_reset_feedback(struct pfr_addr *, int, int);
165 : void pfr_prepare_network(union sockaddr_union *, int, int);
166 : int pfr_route_kentry(struct pfr_ktable *,
167 : struct pfr_kentry *);
168 : int pfr_unroute_kentry(struct pfr_ktable *,
169 : struct pfr_kentry *);
170 : int pfr_walktree(struct radix_node *, void *, u_int);
171 : int pfr_validate_table(struct pfr_table *, int, int);
172 : int pfr_fix_anchor(char *);
173 : void pfr_commit_ktable(struct pfr_ktable *, time_t);
174 : void pfr_insert_ktables(struct pfr_ktableworkq *);
175 : void pfr_insert_ktable(struct pfr_ktable *);
176 : void pfr_setflags_ktables(struct pfr_ktableworkq *);
177 : void pfr_setflags_ktable(struct pfr_ktable *, int);
178 : void pfr_clstats_ktables(struct pfr_ktableworkq *, time_t,
179 : int);
180 : void pfr_clstats_ktable(struct pfr_ktable *, time_t, int);
181 : struct pfr_ktable *pfr_create_ktable(struct pfr_table *, time_t, int,
182 : int);
183 : void pfr_destroy_ktables(struct pfr_ktableworkq *, int);
184 : void pfr_destroy_ktable(struct pfr_ktable *, int);
185 : int pfr_ktable_compare(struct pfr_ktable *,
186 : struct pfr_ktable *);
187 : void pfr_ktable_winfo_update(struct pfr_ktable *,
188 : struct pfr_kentry *);
189 : struct pfr_ktable *pfr_lookup_table(struct pfr_table *);
190 : void pfr_clean_node_mask(struct pfr_ktable *,
191 : struct pfr_kentryworkq *);
192 : int pfr_table_count(struct pfr_table *, int);
193 : int pfr_skip_table(struct pfr_table *,
194 : struct pfr_ktable *, int);
195 : struct pfr_kentry *pfr_kentry_byidx(struct pfr_ktable *, int, int);
196 : int pfr_islinklocal(sa_family_t, struct pf_addr *);
197 :
198 : RB_PROTOTYPE(pfr_ktablehead, pfr_ktable, pfrkt_tree, pfr_ktable_compare);
199 0 : RB_GENERATE(pfr_ktablehead, pfr_ktable, pfrkt_tree, pfr_ktable_compare);
200 :
201 : struct pfr_ktablehead pfr_ktables;
202 : struct pfr_table pfr_nulltable;
203 : int pfr_ktable_cnt;
204 :
205 : int
206 0 : pfr_gcd(int m, int n)
207 : {
208 : int t;
209 :
210 0 : while (m > 0) {
211 0 : t = n % m;
212 : n = m;
213 : m = t;
214 : }
215 0 : return (n);
216 : }
217 :
218 : void
219 0 : pfr_initialize(void)
220 : {
221 0 : rn_init(sizeof(struct sockaddr_in6));
222 :
223 0 : pool_init(&pfr_ktable_pl, sizeof(struct pfr_ktable),
224 : 0, IPL_SOFTNET, 0, "pfrktable", NULL);
225 0 : pool_init(&pfr_kentry_pl[PFRKE_PLAIN], sizeof(struct pfr_kentry),
226 : 0, IPL_SOFTNET, 0, "pfrke_plain", NULL);
227 0 : pool_init(&pfr_kentry_pl[PFRKE_ROUTE], sizeof(struct pfr_kentry_route),
228 : 0, IPL_SOFTNET, 0, "pfrke_route", NULL);
229 0 : pool_init(&pfr_kentry_pl[PFRKE_COST], sizeof(struct pfr_kentry_cost),
230 : 0, IPL_SOFTNET, 0, "pfrke_cost", NULL);
231 0 : pool_init(&pfr_kcounters_pl, sizeof(struct pfr_kcounters),
232 : 0, IPL_SOFTNET, 0, "pfrkcounters", NULL);
233 :
234 0 : memset(&pfr_ffaddr, 0xff, sizeof(pfr_ffaddr));
235 0 : }
236 :
237 : int
238 0 : pfr_clr_addrs(struct pfr_table *tbl, int *ndel, int flags)
239 : {
240 : struct pfr_ktable *kt;
241 0 : struct pfr_kentryworkq workq;
242 :
243 0 : ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY);
244 0 : if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL))
245 0 : return (EINVAL);
246 0 : kt = pfr_lookup_table(tbl);
247 0 : if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
248 0 : return (ESRCH);
249 0 : if (kt->pfrkt_flags & PFR_TFLAG_CONST)
250 0 : return (EPERM);
251 0 : pfr_enqueue_addrs(kt, &workq, ndel, 0);
252 :
253 0 : if (!(flags & PFR_FLAG_DUMMY)) {
254 0 : pfr_remove_kentries(kt, &workq);
255 0 : if (kt->pfrkt_cnt) {
256 0 : DPFPRINTF(LOG_NOTICE,
257 : "pfr_clr_addrs: corruption detected (%d).",
258 : kt->pfrkt_cnt);
259 0 : kt->pfrkt_cnt = 0;
260 0 : }
261 : }
262 0 : return (0);
263 0 : }
264 :
265 : int
266 0 : pfr_add_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size,
267 : int *nadd, int flags)
268 : {
269 : struct pfr_ktable *kt, *tmpkt;
270 0 : struct pfr_kentryworkq workq;
271 : struct pfr_kentry *p, *q;
272 0 : struct pfr_addr ad;
273 : int i, rv, xadd = 0;
274 0 : time_t tzero = time_second;
275 :
276 0 : ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_FEEDBACK);
277 0 : if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL))
278 0 : return (EINVAL);
279 0 : kt = pfr_lookup_table(tbl);
280 0 : if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
281 0 : return (ESRCH);
282 0 : if (kt->pfrkt_flags & PFR_TFLAG_CONST)
283 0 : return (EPERM);
284 0 : tmpkt = pfr_create_ktable(&pfr_nulltable, 0, 0,
285 0 : !(flags & PFR_FLAG_USERIOCTL));
286 0 : if (tmpkt == NULL)
287 0 : return (ENOMEM);
288 0 : SLIST_INIT(&workq);
289 0 : for (i = 0; i < size; i++) {
290 0 : YIELD(i, flags & PFR_FLAG_USERIOCTL);
291 0 : if (COPYIN(addr+i, &ad, sizeof(ad), flags))
292 0 : senderr(EFAULT);
293 0 : if (pfr_validate_addr(&ad))
294 0 : senderr(EINVAL);
295 0 : p = pfr_lookup_addr(kt, &ad, 1);
296 0 : q = pfr_lookup_addr(tmpkt, &ad, 1);
297 0 : if (flags & PFR_FLAG_FEEDBACK) {
298 0 : if (q != NULL)
299 0 : ad.pfra_fback = PFR_FB_DUPLICATE;
300 0 : else if (p == NULL)
301 0 : ad.pfra_fback = PFR_FB_ADDED;
302 0 : else if ((p->pfrke_flags & PFRKE_FLAG_NOT) !=
303 0 : ad.pfra_not)
304 0 : ad.pfra_fback = PFR_FB_CONFLICT;
305 : else
306 0 : ad.pfra_fback = PFR_FB_NONE;
307 : }
308 0 : if (p == NULL && q == NULL) {
309 0 : p = pfr_create_kentry(&ad);
310 0 : if (p == NULL)
311 0 : senderr(ENOMEM);
312 0 : if (pfr_route_kentry(tmpkt, p)) {
313 0 : pfr_destroy_kentry(p);
314 0 : ad.pfra_fback = PFR_FB_NONE;
315 0 : } else {
316 0 : SLIST_INSERT_HEAD(&workq, p, pfrke_workq);
317 0 : xadd++;
318 : }
319 : }
320 0 : if (flags & PFR_FLAG_FEEDBACK)
321 0 : if (COPYOUT(&ad, addr+i, sizeof(ad), flags))
322 0 : senderr(EFAULT);
323 : }
324 0 : pfr_clean_node_mask(tmpkt, &workq);
325 0 : if (!(flags & PFR_FLAG_DUMMY)) {
326 0 : pfr_insert_kentries(kt, &workq, tzero);
327 0 : } else
328 0 : pfr_destroy_kentries(&workq);
329 0 : if (nadd != NULL)
330 0 : *nadd = xadd;
331 0 : pfr_destroy_ktable(tmpkt, 0);
332 0 : return (0);
333 : _bad:
334 0 : pfr_clean_node_mask(tmpkt, &workq);
335 0 : pfr_destroy_kentries(&workq);
336 0 : if (flags & PFR_FLAG_FEEDBACK)
337 0 : pfr_reset_feedback(addr, size, flags);
338 0 : pfr_destroy_ktable(tmpkt, 0);
339 0 : return (rv);
340 0 : }
341 :
342 : int
343 0 : pfr_del_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size,
344 : int *ndel, int flags)
345 : {
346 : struct pfr_ktable *kt;
347 0 : struct pfr_kentryworkq workq;
348 : struct pfr_kentry *p;
349 0 : struct pfr_addr ad;
350 : int i, rv, xdel = 0, log = 1;
351 :
352 0 : ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_FEEDBACK);
353 0 : if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL))
354 0 : return (EINVAL);
355 0 : kt = pfr_lookup_table(tbl);
356 0 : if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
357 0 : return (ESRCH);
358 0 : if (kt->pfrkt_flags & PFR_TFLAG_CONST)
359 0 : return (EPERM);
360 : /*
361 : * there are two algorithms to choose from here.
362 : * with:
363 : * n: number of addresses to delete
364 : * N: number of addresses in the table
365 : *
366 : * one is O(N) and is better for large 'n'
367 : * one is O(n*LOG(N)) and is better for small 'n'
368 : *
369 : * following code try to decide which one is best.
370 : */
371 0 : for (i = kt->pfrkt_cnt; i > 0; i >>= 1)
372 0 : log++;
373 0 : if (size > kt->pfrkt_cnt/log) {
374 : /* full table scan */
375 0 : pfr_mark_addrs(kt);
376 0 : } else {
377 : /* iterate over addresses to delete */
378 0 : for (i = 0; i < size; i++) {
379 0 : YIELD(i, flags & PFR_FLAG_USERIOCTL);
380 0 : if (COPYIN(addr+i, &ad, sizeof(ad), flags))
381 0 : return (EFAULT);
382 0 : if (pfr_validate_addr(&ad))
383 0 : return (EINVAL);
384 0 : p = pfr_lookup_addr(kt, &ad, 1);
385 0 : if (p != NULL)
386 0 : p->pfrke_flags &= ~PFRKE_FLAG_MARK;
387 : }
388 : }
389 0 : SLIST_INIT(&workq);
390 0 : for (i = 0; i < size; i++) {
391 0 : YIELD(i, flags & PFR_FLAG_USERIOCTL);
392 0 : if (COPYIN(addr+i, &ad, sizeof(ad), flags))
393 0 : senderr(EFAULT);
394 0 : if (pfr_validate_addr(&ad))
395 0 : senderr(EINVAL);
396 0 : p = pfr_lookup_addr(kt, &ad, 1);
397 0 : if (flags & PFR_FLAG_FEEDBACK) {
398 0 : if (p == NULL)
399 0 : ad.pfra_fback = PFR_FB_NONE;
400 0 : else if ((p->pfrke_flags & PFRKE_FLAG_NOT) !=
401 0 : ad.pfra_not)
402 0 : ad.pfra_fback = PFR_FB_CONFLICT;
403 0 : else if (p->pfrke_flags & PFRKE_FLAG_MARK)
404 0 : ad.pfra_fback = PFR_FB_DUPLICATE;
405 : else
406 0 : ad.pfra_fback = PFR_FB_DELETED;
407 : }
408 0 : if (p != NULL &&
409 0 : (p->pfrke_flags & PFRKE_FLAG_NOT) == ad.pfra_not &&
410 0 : !(p->pfrke_flags & PFRKE_FLAG_MARK)) {
411 0 : p->pfrke_flags |= PFRKE_FLAG_MARK;
412 0 : SLIST_INSERT_HEAD(&workq, p, pfrke_workq);
413 0 : xdel++;
414 0 : }
415 0 : if (flags & PFR_FLAG_FEEDBACK)
416 0 : if (COPYOUT(&ad, addr+i, sizeof(ad), flags))
417 0 : senderr(EFAULT);
418 : }
419 0 : if (!(flags & PFR_FLAG_DUMMY)) {
420 0 : pfr_remove_kentries(kt, &workq);
421 0 : }
422 0 : if (ndel != NULL)
423 0 : *ndel = xdel;
424 0 : return (0);
425 : _bad:
426 0 : if (flags & PFR_FLAG_FEEDBACK)
427 0 : pfr_reset_feedback(addr, size, flags);
428 0 : return (rv);
429 0 : }
430 :
431 : int
432 0 : pfr_set_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size,
433 : int *size2, int *nadd, int *ndel, int *nchange, int flags,
434 : u_int32_t ignore_pfrt_flags)
435 : {
436 : struct pfr_ktable *kt, *tmpkt;
437 0 : struct pfr_kentryworkq addq, delq, changeq;
438 : struct pfr_kentry *p, *q;
439 0 : struct pfr_addr ad;
440 0 : int i, rv, xadd = 0, xdel = 0, xchange = 0;
441 0 : time_t tzero = time_second;
442 :
443 0 : ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_FEEDBACK);
444 0 : if (pfr_validate_table(tbl, ignore_pfrt_flags, flags &
445 : PFR_FLAG_USERIOCTL))
446 0 : return (EINVAL);
447 0 : kt = pfr_lookup_table(tbl);
448 0 : if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
449 0 : return (ESRCH);
450 0 : if (kt->pfrkt_flags & PFR_TFLAG_CONST)
451 0 : return (EPERM);
452 0 : tmpkt = pfr_create_ktable(&pfr_nulltable, 0, 0,
453 0 : !(flags & PFR_FLAG_USERIOCTL));
454 0 : if (tmpkt == NULL)
455 0 : return (ENOMEM);
456 0 : pfr_mark_addrs(kt);
457 0 : SLIST_INIT(&addq);
458 0 : SLIST_INIT(&delq);
459 0 : SLIST_INIT(&changeq);
460 0 : for (i = 0; i < size; i++) {
461 0 : YIELD(i, flags & PFR_FLAG_USERIOCTL);
462 0 : if (COPYIN(addr+i, &ad, sizeof(ad), flags))
463 0 : senderr(EFAULT);
464 0 : if (pfr_validate_addr(&ad))
465 0 : senderr(EINVAL);
466 0 : ad.pfra_fback = PFR_FB_NONE;
467 0 : p = pfr_lookup_addr(kt, &ad, 1);
468 0 : if (p != NULL) {
469 0 : if (p->pfrke_flags & PFRKE_FLAG_MARK) {
470 0 : ad.pfra_fback = PFR_FB_DUPLICATE;
471 0 : goto _skip;
472 : }
473 0 : p->pfrke_flags |= PFRKE_FLAG_MARK;
474 0 : if ((p->pfrke_flags & PFRKE_FLAG_NOT) != ad.pfra_not) {
475 0 : SLIST_INSERT_HEAD(&changeq, p, pfrke_workq);
476 0 : ad.pfra_fback = PFR_FB_CHANGED;
477 0 : xchange++;
478 0 : }
479 : } else {
480 0 : q = pfr_lookup_addr(tmpkt, &ad, 1);
481 0 : if (q != NULL) {
482 0 : ad.pfra_fback = PFR_FB_DUPLICATE;
483 0 : goto _skip;
484 : }
485 0 : p = pfr_create_kentry(&ad);
486 0 : if (p == NULL)
487 0 : senderr(ENOMEM);
488 0 : if (pfr_route_kentry(tmpkt, p)) {
489 0 : pfr_destroy_kentry(p);
490 0 : ad.pfra_fback = PFR_FB_NONE;
491 0 : goto _skip;
492 : }
493 0 : SLIST_INSERT_HEAD(&addq, p, pfrke_workq);
494 0 : ad.pfra_fback = PFR_FB_ADDED;
495 0 : xadd++;
496 0 : if (p->pfrke_type == PFRKE_COST)
497 0 : kt->pfrkt_refcntcost++;
498 0 : pfr_ktable_winfo_update(kt, p);
499 : }
500 : _skip:
501 0 : if (flags & PFR_FLAG_FEEDBACK)
502 0 : if (COPYOUT(&ad, addr+i, sizeof(ad), flags))
503 0 : senderr(EFAULT);
504 : }
505 0 : pfr_enqueue_addrs(kt, &delq, &xdel, ENQUEUE_UNMARKED_ONLY);
506 0 : if ((flags & PFR_FLAG_FEEDBACK) && *size2) {
507 0 : if (*size2 < size+xdel) {
508 0 : *size2 = size+xdel;
509 0 : senderr(0);
510 : }
511 : i = 0;
512 0 : SLIST_FOREACH(p, &delq, pfrke_workq) {
513 0 : pfr_copyout_addr(&ad, p);
514 0 : ad.pfra_fback = PFR_FB_DELETED;
515 0 : if (COPYOUT(&ad, addr+size+i, sizeof(ad), flags))
516 0 : senderr(EFAULT);
517 0 : i++;
518 : }
519 : }
520 0 : pfr_clean_node_mask(tmpkt, &addq);
521 0 : if (!(flags & PFR_FLAG_DUMMY)) {
522 0 : pfr_insert_kentries(kt, &addq, tzero);
523 0 : pfr_remove_kentries(kt, &delq);
524 0 : pfr_clstats_kentries(&changeq, tzero, INVERT_NEG_FLAG);
525 0 : } else
526 0 : pfr_destroy_kentries(&addq);
527 0 : if (nadd != NULL)
528 0 : *nadd = xadd;
529 0 : if (ndel != NULL)
530 0 : *ndel = xdel;
531 0 : if (nchange != NULL)
532 0 : *nchange = xchange;
533 0 : if ((flags & PFR_FLAG_FEEDBACK) && size2)
534 0 : *size2 = size+xdel;
535 0 : pfr_destroy_ktable(tmpkt, 0);
536 0 : return (0);
537 : _bad:
538 0 : pfr_clean_node_mask(tmpkt, &addq);
539 0 : pfr_destroy_kentries(&addq);
540 0 : if (flags & PFR_FLAG_FEEDBACK)
541 0 : pfr_reset_feedback(addr, size, flags);
542 0 : pfr_destroy_ktable(tmpkt, 0);
543 0 : return (rv);
544 0 : }
545 :
546 : int
547 0 : pfr_tst_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size,
548 : int *nmatch, int flags)
549 : {
550 : struct pfr_ktable *kt;
551 : struct pfr_kentry *p;
552 0 : struct pfr_addr ad;
553 : int i, xmatch = 0;
554 :
555 0 : ACCEPT_FLAGS(flags, PFR_FLAG_REPLACE);
556 0 : if (pfr_validate_table(tbl, 0, 0))
557 0 : return (EINVAL);
558 0 : kt = pfr_lookup_table(tbl);
559 0 : if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
560 0 : return (ESRCH);
561 :
562 0 : for (i = 0; i < size; i++) {
563 0 : YIELD(i, flags & PFR_FLAG_USERIOCTL);
564 0 : if (COPYIN(addr+i, &ad, sizeof(ad), flags))
565 0 : return (EFAULT);
566 0 : if (pfr_validate_addr(&ad))
567 0 : return (EINVAL);
568 0 : if (ADDR_NETWORK(&ad))
569 0 : return (EINVAL);
570 0 : p = pfr_lookup_addr(kt, &ad, 0);
571 0 : if (flags & PFR_FLAG_REPLACE)
572 0 : pfr_copyout_addr(&ad, p);
573 0 : ad.pfra_fback = (p == NULL) ? PFR_FB_NONE :
574 0 : ((p->pfrke_flags & PFRKE_FLAG_NOT) ?
575 : PFR_FB_NOTMATCH : PFR_FB_MATCH);
576 0 : if (p != NULL && !(p->pfrke_flags & PFRKE_FLAG_NOT))
577 0 : xmatch++;
578 0 : if (COPYOUT(&ad, addr+i, sizeof(ad), flags))
579 0 : return (EFAULT);
580 : }
581 0 : if (nmatch != NULL)
582 0 : *nmatch = xmatch;
583 0 : return (0);
584 0 : }
585 :
586 : int
587 0 : pfr_get_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int *size,
588 : int flags)
589 : {
590 : struct pfr_ktable *kt;
591 0 : struct pfr_walktree w;
592 : int rv;
593 :
594 0 : ACCEPT_FLAGS(flags, 0);
595 0 : if (pfr_validate_table(tbl, 0, 0))
596 0 : return (EINVAL);
597 0 : kt = pfr_lookup_table(tbl);
598 0 : if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
599 0 : return (ESRCH);
600 0 : if (kt->pfrkt_cnt > *size) {
601 0 : *size = kt->pfrkt_cnt;
602 0 : return (0);
603 : }
604 :
605 0 : bzero(&w, sizeof(w));
606 0 : w.pfrw_op = PFRW_GET_ADDRS;
607 0 : w.pfrw_addr = addr;
608 0 : w.pfrw_free = kt->pfrkt_cnt;
609 0 : w.pfrw_flags = flags;
610 0 : rv = rn_walktree(kt->pfrkt_ip4, pfr_walktree, &w);
611 0 : if (!rv)
612 0 : rv = rn_walktree(kt->pfrkt_ip6, pfr_walktree, &w);
613 0 : if (rv)
614 0 : return (rv);
615 :
616 0 : if (w.pfrw_free) {
617 0 : DPFPRINTF(LOG_ERR,
618 : "pfr_get_addrs: corruption detected (%d)", w.pfrw_free);
619 0 : return (ENOTTY);
620 : }
621 0 : *size = kt->pfrkt_cnt;
622 0 : return (0);
623 0 : }
624 :
625 : int
626 0 : pfr_get_astats(struct pfr_table *tbl, struct pfr_astats *addr, int *size,
627 : int flags)
628 : {
629 : struct pfr_ktable *kt;
630 0 : struct pfr_walktree w;
631 0 : struct pfr_kentryworkq workq;
632 : int rv;
633 0 : time_t tzero = time_second;
634 :
635 0 : if (pfr_validate_table(tbl, 0, 0))
636 0 : return (EINVAL);
637 0 : kt = pfr_lookup_table(tbl);
638 0 : if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
639 0 : return (ESRCH);
640 0 : if (kt->pfrkt_cnt > *size) {
641 0 : *size = kt->pfrkt_cnt;
642 0 : return (0);
643 : }
644 :
645 0 : bzero(&w, sizeof(w));
646 0 : w.pfrw_op = PFRW_GET_ASTATS;
647 0 : w.pfrw_astats = addr;
648 0 : w.pfrw_free = kt->pfrkt_cnt;
649 0 : w.pfrw_flags = flags;
650 0 : rv = rn_walktree(kt->pfrkt_ip4, pfr_walktree, &w);
651 0 : if (!rv)
652 0 : rv = rn_walktree(kt->pfrkt_ip6, pfr_walktree, &w);
653 0 : if (!rv && (flags & PFR_FLAG_CLSTATS)) {
654 0 : pfr_enqueue_addrs(kt, &workq, NULL, 0);
655 0 : pfr_clstats_kentries(&workq, tzero, 0);
656 0 : }
657 0 : if (rv)
658 0 : return (rv);
659 :
660 0 : if (w.pfrw_free) {
661 0 : DPFPRINTF(LOG_ERR,
662 : "pfr_get_astats: corruption detected (%d)", w.pfrw_free);
663 0 : return (ENOTTY);
664 : }
665 0 : *size = kt->pfrkt_cnt;
666 0 : return (0);
667 0 : }
668 :
669 : int
670 0 : pfr_clr_astats(struct pfr_table *tbl, struct pfr_addr *addr, int size,
671 : int *nzero, int flags)
672 : {
673 : struct pfr_ktable *kt;
674 0 : struct pfr_kentryworkq workq;
675 : struct pfr_kentry *p;
676 0 : struct pfr_addr ad;
677 : int i, rv, xzero = 0;
678 :
679 0 : ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_FEEDBACK);
680 0 : if (pfr_validate_table(tbl, 0, 0))
681 0 : return (EINVAL);
682 0 : kt = pfr_lookup_table(tbl);
683 0 : if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
684 0 : return (ESRCH);
685 0 : SLIST_INIT(&workq);
686 0 : for (i = 0; i < size; i++) {
687 0 : YIELD(i, flags & PFR_FLAG_USERIOCTL);
688 0 : if (COPYIN(addr+i, &ad, sizeof(ad), flags))
689 0 : senderr(EFAULT);
690 0 : if (pfr_validate_addr(&ad))
691 0 : senderr(EINVAL);
692 0 : p = pfr_lookup_addr(kt, &ad, 1);
693 0 : if (flags & PFR_FLAG_FEEDBACK) {
694 0 : ad.pfra_fback = (p != NULL) ?
695 : PFR_FB_CLEARED : PFR_FB_NONE;
696 0 : if (COPYOUT(&ad, addr+i, sizeof(ad), flags))
697 0 : senderr(EFAULT);
698 : }
699 0 : if (p != NULL) {
700 0 : SLIST_INSERT_HEAD(&workq, p, pfrke_workq);
701 0 : xzero++;
702 0 : }
703 : }
704 :
705 0 : if (!(flags & PFR_FLAG_DUMMY)) {
706 0 : pfr_clstats_kentries(&workq, time_second, 0);
707 0 : }
708 0 : if (nzero != NULL)
709 0 : *nzero = xzero;
710 0 : return (0);
711 : _bad:
712 0 : if (flags & PFR_FLAG_FEEDBACK)
713 0 : pfr_reset_feedback(addr, size, flags);
714 0 : return (rv);
715 0 : }
716 :
717 : int
718 0 : pfr_validate_addr(struct pfr_addr *ad)
719 : {
720 : int i;
721 :
722 0 : switch (ad->pfra_af) {
723 : case AF_INET:
724 0 : if (ad->pfra_net > 32)
725 0 : return (-1);
726 : break;
727 : #ifdef INET6
728 : case AF_INET6:
729 0 : if (ad->pfra_net > 128)
730 0 : return (-1);
731 : break;
732 : #endif /* INET6 */
733 : default:
734 0 : return (-1);
735 : }
736 0 : if (ad->pfra_net < 128 &&
737 0 : (((caddr_t)ad)[ad->pfra_net/8] & (0xFF >> (ad->pfra_net%8))))
738 0 : return (-1);
739 0 : for (i = (ad->pfra_net+7)/8; i < sizeof(ad->pfra_u); i++)
740 0 : if (((caddr_t)ad)[i])
741 0 : return (-1);
742 0 : if (ad->pfra_not && ad->pfra_not != 1)
743 0 : return (-1);
744 0 : if (ad->pfra_fback)
745 0 : return (-1);
746 0 : if (ad->pfra_type >= PFRKE_MAX)
747 0 : return (-1);
748 0 : return (0);
749 0 : }
750 :
751 : void
752 0 : pfr_enqueue_addrs(struct pfr_ktable *kt, struct pfr_kentryworkq *workq,
753 : int *naddr, int sweep)
754 : {
755 0 : struct pfr_walktree w;
756 :
757 0 : SLIST_INIT(workq);
758 0 : bzero(&w, sizeof(w));
759 0 : w.pfrw_op = sweep ? PFRW_SWEEP : PFRW_ENQUEUE;
760 0 : w.pfrw_workq = workq;
761 0 : if (kt->pfrkt_ip4 != NULL)
762 0 : if (rn_walktree(kt->pfrkt_ip4, pfr_walktree, &w))
763 0 : DPFPRINTF(LOG_ERR,
764 : "pfr_enqueue_addrs: IPv4 walktree failed.");
765 0 : if (kt->pfrkt_ip6 != NULL)
766 0 : if (rn_walktree(kt->pfrkt_ip6, pfr_walktree, &w))
767 0 : DPFPRINTF(LOG_ERR,
768 : "pfr_enqueue_addrs: IPv6 walktree failed.");
769 0 : if (naddr != NULL)
770 0 : *naddr = w.pfrw_cnt;
771 0 : }
772 :
773 : void
774 0 : pfr_mark_addrs(struct pfr_ktable *kt)
775 : {
776 0 : struct pfr_walktree w;
777 :
778 0 : bzero(&w, sizeof(w));
779 0 : w.pfrw_op = PFRW_MARK;
780 0 : if (rn_walktree(kt->pfrkt_ip4, pfr_walktree, &w))
781 0 : DPFPRINTF(LOG_ERR,
782 : "pfr_mark_addrs: IPv4 walktree failed.");
783 0 : if (rn_walktree(kt->pfrkt_ip6, pfr_walktree, &w))
784 0 : DPFPRINTF(LOG_ERR,
785 : "pfr_mark_addrs: IPv6 walktree failed.");
786 0 : }
787 :
788 :
789 : struct pfr_kentry *
790 0 : pfr_lookup_addr(struct pfr_ktable *kt, struct pfr_addr *ad, int exact)
791 : {
792 0 : union sockaddr_union sa, mask;
793 : struct radix_node_head *head;
794 : struct pfr_kentry *ke;
795 :
796 0 : bzero(&sa, sizeof(sa));
797 0 : switch (ad->pfra_af) {
798 : case AF_INET:
799 0 : FILLIN_SIN(sa.sin, ad->pfra_ip4addr);
800 0 : head = kt->pfrkt_ip4;
801 0 : break;
802 : #ifdef INET6
803 : case AF_INET6:
804 0 : FILLIN_SIN6(sa.sin6, ad->pfra_ip6addr);
805 0 : head = kt->pfrkt_ip6;
806 0 : break;
807 : #endif /* INET6 */
808 : default:
809 0 : unhandled_af(ad->pfra_af);
810 : }
811 0 : if (ADDR_NETWORK(ad)) {
812 0 : pfr_prepare_network(&mask, ad->pfra_af, ad->pfra_net);
813 0 : ke = (struct pfr_kentry *)rn_lookup(&sa, &mask, head);
814 0 : } else {
815 0 : ke = (struct pfr_kentry *)rn_match(&sa, head);
816 0 : if (exact && ke && KENTRY_NETWORK(ke))
817 0 : ke = NULL;
818 : }
819 0 : return (ke);
820 0 : }
821 :
822 : struct pfr_kentry *
823 0 : pfr_create_kentry(struct pfr_addr *ad)
824 : {
825 : struct pfr_kentry_all *ke;
826 :
827 0 : if (ad->pfra_type >= PFRKE_MAX)
828 0 : panic("unknown pfra_type %d", ad->pfra_type);
829 :
830 0 : ke = pool_get(&pfr_kentry_pl[ad->pfra_type], PR_NOWAIT | PR_ZERO);
831 0 : if (ke == NULL)
832 0 : return (NULL);
833 :
834 0 : ke->pfrke_type = ad->pfra_type;
835 :
836 : /* set weight allowing implicit weights */
837 0 : if (ad->pfra_weight == 0)
838 0 : ad->pfra_weight = 1;
839 :
840 0 : switch (ke->pfrke_type) {
841 : case PFRKE_PLAIN:
842 : break;
843 : case PFRKE_COST:
844 0 : ((struct pfr_kentry_cost *)ke)->weight = ad->pfra_weight;
845 : /* FALLTHROUGH */
846 : case PFRKE_ROUTE:
847 0 : if (ad->pfra_ifname[0])
848 0 : ke->pfrke_rkif = pfi_kif_get(ad->pfra_ifname);
849 0 : if (ke->pfrke_rkif)
850 0 : pfi_kif_ref(ke->pfrke_rkif, PFI_KIF_REF_ROUTE);
851 : break;
852 : }
853 :
854 0 : switch (ad->pfra_af) {
855 : case AF_INET:
856 0 : FILLIN_SIN(ke->pfrke_sa.sin, ad->pfra_ip4addr);
857 0 : break;
858 : #ifdef INET6
859 : case AF_INET6:
860 0 : FILLIN_SIN6(ke->pfrke_sa.sin6, ad->pfra_ip6addr);
861 0 : break;
862 : #endif /* INET6 */
863 : default:
864 0 : unhandled_af(ad->pfra_af);
865 : }
866 0 : ke->pfrke_af = ad->pfra_af;
867 0 : ke->pfrke_net = ad->pfra_net;
868 0 : if (ad->pfra_not)
869 0 : ke->pfrke_flags |= PFRKE_FLAG_NOT;
870 0 : return ((struct pfr_kentry *)ke);
871 0 : }
872 :
873 : void
874 0 : pfr_destroy_kentries(struct pfr_kentryworkq *workq)
875 : {
876 : struct pfr_kentry *p, *q;
877 : int i;
878 :
879 0 : for (i = 0, p = SLIST_FIRST(workq); p != NULL; i++, p = q) {
880 0 : YIELD(i, 1);
881 0 : q = SLIST_NEXT(p, pfrke_workq);
882 0 : pfr_destroy_kentry(p);
883 : }
884 0 : }
885 :
886 : void
887 0 : pfr_destroy_kentry(struct pfr_kentry *ke)
888 : {
889 0 : if (ke->pfrke_counters)
890 0 : pool_put(&pfr_kcounters_pl, ke->pfrke_counters);
891 0 : if (ke->pfrke_type == PFRKE_COST || ke->pfrke_type == PFRKE_ROUTE)
892 0 : pfi_kif_unref(((struct pfr_kentry_all *)ke)->pfrke_rkif,
893 : PFI_KIF_REF_ROUTE);
894 0 : pool_put(&pfr_kentry_pl[ke->pfrke_type], ke);
895 0 : }
896 :
897 : void
898 0 : pfr_insert_kentries(struct pfr_ktable *kt,
899 : struct pfr_kentryworkq *workq, time_t tzero)
900 : {
901 : struct pfr_kentry *p;
902 : int rv, n = 0;
903 :
904 0 : SLIST_FOREACH(p, workq, pfrke_workq) {
905 0 : rv = pfr_route_kentry(kt, p);
906 0 : if (rv) {
907 0 : DPFPRINTF(LOG_ERR,
908 : "pfr_insert_kentries: cannot route entry "
909 : "(code=%d).", rv);
910 : break;
911 : }
912 0 : p->pfrke_tzero = tzero;
913 0 : ++n;
914 0 : if (p->pfrke_type == PFRKE_COST)
915 0 : kt->pfrkt_refcntcost++;
916 0 : pfr_ktable_winfo_update(kt, p);
917 0 : YIELD(n, 1);
918 : }
919 0 : kt->pfrkt_cnt += n;
920 0 : }
921 :
922 : int
923 0 : pfr_insert_kentry(struct pfr_ktable *kt, struct pfr_addr *ad, time_t tzero)
924 : {
925 : struct pfr_kentry *p;
926 : int rv;
927 :
928 0 : p = pfr_lookup_addr(kt, ad, 1);
929 0 : if (p != NULL)
930 0 : return (0);
931 0 : p = pfr_create_kentry(ad);
932 0 : if (p == NULL)
933 0 : return (EINVAL);
934 :
935 0 : rv = pfr_route_kentry(kt, p);
936 0 : if (rv)
937 0 : return (rv);
938 :
939 0 : p->pfrke_tzero = tzero;
940 0 : if (p->pfrke_type == PFRKE_COST)
941 0 : kt->pfrkt_refcntcost++;
942 0 : kt->pfrkt_cnt++;
943 0 : pfr_ktable_winfo_update(kt, p);
944 :
945 0 : return (0);
946 0 : }
947 :
948 : void
949 0 : pfr_remove_kentries(struct pfr_ktable *kt,
950 : struct pfr_kentryworkq *workq)
951 : {
952 : struct pfr_kentry *p;
953 0 : struct pfr_kentryworkq addrq;
954 : int n = 0;
955 :
956 0 : SLIST_FOREACH(p, workq, pfrke_workq) {
957 0 : pfr_unroute_kentry(kt, p);
958 0 : ++n;
959 0 : YIELD(n, 1);
960 0 : if (p->pfrke_type == PFRKE_COST)
961 0 : kt->pfrkt_refcntcost--;
962 : }
963 0 : kt->pfrkt_cnt -= n;
964 0 : pfr_destroy_kentries(workq);
965 :
966 : /* update maxweight and gcd for load balancing */
967 0 : if (kt->pfrkt_refcntcost > 0) {
968 0 : kt->pfrkt_gcdweight = 0;
969 0 : kt->pfrkt_maxweight = 1;
970 0 : pfr_enqueue_addrs(kt, &addrq, NULL, 0);
971 0 : SLIST_FOREACH(p, &addrq, pfrke_workq)
972 0 : pfr_ktable_winfo_update(kt, p);
973 : }
974 0 : }
975 :
976 : void
977 0 : pfr_clean_node_mask(struct pfr_ktable *kt,
978 : struct pfr_kentryworkq *workq)
979 : {
980 : struct pfr_kentry *p;
981 :
982 0 : SLIST_FOREACH(p, workq, pfrke_workq) {
983 0 : pfr_unroute_kentry(kt, p);
984 : }
985 0 : }
986 :
987 : void
988 0 : pfr_clstats_kentries(struct pfr_kentryworkq *workq, time_t tzero, int negchange)
989 : {
990 : struct pfr_kentry *p;
991 :
992 0 : SLIST_FOREACH(p, workq, pfrke_workq) {
993 0 : if (negchange)
994 0 : p->pfrke_flags ^= PFRKE_FLAG_NOT;
995 0 : if (p->pfrke_counters) {
996 0 : pool_put(&pfr_kcounters_pl, p->pfrke_counters);
997 0 : p->pfrke_counters = NULL;
998 0 : }
999 0 : p->pfrke_tzero = tzero;
1000 : }
1001 0 : }
1002 :
1003 : void
1004 0 : pfr_reset_feedback(struct pfr_addr *addr, int size, int flags)
1005 : {
1006 0 : struct pfr_addr ad;
1007 : int i;
1008 :
1009 0 : for (i = 0; i < size; i++) {
1010 0 : YIELD(i, flags & PFR_FLAG_USERIOCTL);
1011 0 : if (COPYIN(addr+i, &ad, sizeof(ad), flags))
1012 : break;
1013 0 : ad.pfra_fback = PFR_FB_NONE;
1014 0 : if (COPYOUT(&ad, addr+i, sizeof(ad), flags))
1015 : break;
1016 : }
1017 0 : }
1018 :
1019 : void
1020 0 : pfr_prepare_network(union sockaddr_union *sa, int af, int net)
1021 : {
1022 : #ifdef INET6
1023 : int i;
1024 : #endif /* INET6 */
1025 :
1026 0 : bzero(sa, sizeof(*sa));
1027 0 : switch (af) {
1028 : case AF_INET:
1029 0 : sa->sin.sin_len = sizeof(sa->sin);
1030 0 : sa->sin.sin_family = AF_INET;
1031 0 : sa->sin.sin_addr.s_addr = net ? htonl(-1 << (32-net)) : 0;
1032 0 : break;
1033 : #ifdef INET6
1034 : case AF_INET6:
1035 0 : sa->sin6.sin6_len = sizeof(sa->sin6);
1036 0 : sa->sin6.sin6_family = AF_INET6;
1037 0 : for (i = 0; i < 4; i++) {
1038 0 : if (net <= 32) {
1039 0 : sa->sin6.sin6_addr.s6_addr32[i] =
1040 0 : net ? htonl(-1 << (32-net)) : 0;
1041 0 : break;
1042 : }
1043 0 : sa->sin6.sin6_addr.s6_addr32[i] = 0xFFFFFFFF;
1044 0 : net -= 32;
1045 : }
1046 : break;
1047 : #endif /* INET6 */
1048 : default:
1049 0 : unhandled_af(af);
1050 : }
1051 0 : }
1052 :
1053 : int
1054 0 : pfr_route_kentry(struct pfr_ktable *kt, struct pfr_kentry *ke)
1055 : {
1056 0 : union sockaddr_union mask;
1057 : struct radix_node *rn;
1058 : struct radix_node_head *head;
1059 :
1060 0 : bzero(ke->pfrke_node, sizeof(ke->pfrke_node));
1061 0 : switch (ke->pfrke_af) {
1062 : case AF_INET:
1063 0 : head = kt->pfrkt_ip4;
1064 0 : break;
1065 : #ifdef INET6
1066 : case AF_INET6:
1067 0 : head = kt->pfrkt_ip6;
1068 0 : break;
1069 : #endif /* INET6 */
1070 : default:
1071 0 : unhandled_af(ke->pfrke_af);
1072 : }
1073 :
1074 0 : if (KENTRY_NETWORK(ke)) {
1075 0 : pfr_prepare_network(&mask, ke->pfrke_af, ke->pfrke_net);
1076 0 : rn = rn_addroute(&ke->pfrke_sa, &mask, head, ke->pfrke_node, 0);
1077 0 : } else
1078 0 : rn = rn_addroute(&ke->pfrke_sa, NULL, head, ke->pfrke_node, 0);
1079 :
1080 0 : return (rn == NULL ? -1 : 0);
1081 0 : }
1082 :
1083 : int
1084 0 : pfr_unroute_kentry(struct pfr_ktable *kt, struct pfr_kentry *ke)
1085 : {
1086 0 : union sockaddr_union mask;
1087 : struct radix_node *rn;
1088 : struct radix_node_head *head;
1089 :
1090 0 : switch (ke->pfrke_af) {
1091 : case AF_INET:
1092 0 : head = kt->pfrkt_ip4;
1093 0 : break;
1094 : #ifdef INET6
1095 : case AF_INET6:
1096 0 : head = kt->pfrkt_ip6;
1097 0 : break;
1098 : #endif /* INET6 */
1099 : default:
1100 0 : unhandled_af(ke->pfrke_af);
1101 : }
1102 :
1103 0 : if (KENTRY_NETWORK(ke)) {
1104 0 : pfr_prepare_network(&mask, ke->pfrke_af, ke->pfrke_net);
1105 0 : rn = rn_delete(&ke->pfrke_sa, &mask, head, NULL);
1106 0 : } else
1107 0 : rn = rn_delete(&ke->pfrke_sa, NULL, head, NULL);
1108 :
1109 0 : if (rn == NULL) {
1110 0 : DPFPRINTF(LOG_ERR, "pfr_unroute_kentry: delete failed.\n");
1111 0 : return (-1);
1112 : }
1113 0 : return (0);
1114 0 : }
1115 :
1116 : void
1117 0 : pfr_copyout_addr(struct pfr_addr *ad, struct pfr_kentry *ke)
1118 : {
1119 0 : bzero(ad, sizeof(*ad));
1120 0 : if (ke == NULL)
1121 : return;
1122 0 : ad->pfra_af = ke->pfrke_af;
1123 0 : ad->pfra_net = ke->pfrke_net;
1124 0 : ad->pfra_type = ke->pfrke_type;
1125 0 : if (ke->pfrke_flags & PFRKE_FLAG_NOT)
1126 0 : ad->pfra_not = 1;
1127 :
1128 0 : switch (ad->pfra_af) {
1129 : case AF_INET:
1130 0 : ad->pfra_ip4addr = ke->pfrke_sa.sin.sin_addr;
1131 0 : break;
1132 : #ifdef INET6
1133 : case AF_INET6:
1134 0 : ad->pfra_ip6addr = ke->pfrke_sa.sin6.sin6_addr;
1135 0 : break;
1136 : #endif /* INET6 */
1137 : default:
1138 0 : unhandled_af(ad->pfra_af);
1139 : }
1140 0 : if (ke->pfrke_counters != NULL)
1141 0 : ad->pfra_states = ke->pfrke_counters->states;
1142 0 : switch (ke->pfrke_type) {
1143 : case PFRKE_COST:
1144 0 : ad->pfra_weight = ((struct pfr_kentry_cost *)ke)->weight;
1145 : /* FALLTHROUGH */
1146 : case PFRKE_ROUTE:
1147 0 : if (((struct pfr_kentry_route *)ke)->kif != NULL)
1148 0 : strlcpy(ad->pfra_ifname,
1149 0 : ((struct pfr_kentry_route *)ke)->kif->pfik_name,
1150 : IFNAMSIZ);
1151 : break;
1152 : default:
1153 : break;
1154 : }
1155 0 : }
1156 :
1157 : int
1158 0 : pfr_walktree(struct radix_node *rn, void *arg, u_int id)
1159 : {
1160 0 : struct pfr_kentry *ke = (struct pfr_kentry *)rn;
1161 0 : struct pfr_walktree *w = arg;
1162 0 : union sockaddr_union mask;
1163 0 : int flags = w->pfrw_flags;
1164 :
1165 0 : switch (w->pfrw_op) {
1166 : case PFRW_MARK:
1167 0 : ke->pfrke_flags &= ~PFRKE_FLAG_MARK;
1168 0 : break;
1169 : case PFRW_SWEEP:
1170 0 : if (ke->pfrke_flags & PFRKE_FLAG_MARK)
1171 : break;
1172 : /* FALLTHROUGH */
1173 : case PFRW_ENQUEUE:
1174 0 : SLIST_INSERT_HEAD(w->pfrw_workq, ke, pfrke_workq);
1175 0 : w->pfrw_cnt++;
1176 0 : break;
1177 : case PFRW_GET_ADDRS:
1178 0 : if (w->pfrw_free-- > 0) {
1179 0 : struct pfr_addr ad;
1180 :
1181 0 : pfr_copyout_addr(&ad, ke);
1182 0 : if (copyout(&ad, w->pfrw_addr, sizeof(ad)))
1183 0 : return (EFAULT);
1184 0 : w->pfrw_addr++;
1185 0 : }
1186 : break;
1187 : case PFRW_GET_ASTATS:
1188 0 : if (w->pfrw_free-- > 0) {
1189 0 : struct pfr_astats as;
1190 :
1191 0 : pfr_copyout_addr(&as.pfras_a, ke);
1192 :
1193 0 : if (ke->pfrke_counters) {
1194 0 : bcopy(ke->pfrke_counters->pfrkc_packets,
1195 0 : as.pfras_packets, sizeof(as.pfras_packets));
1196 0 : bcopy(ke->pfrke_counters->pfrkc_bytes,
1197 0 : as.pfras_bytes, sizeof(as.pfras_bytes));
1198 0 : } else {
1199 0 : bzero(as.pfras_packets,
1200 : sizeof(as.pfras_packets));
1201 0 : bzero(as.pfras_bytes, sizeof(as.pfras_bytes));
1202 0 : as.pfras_a.pfra_fback = PFR_FB_NOCOUNT;
1203 : }
1204 0 : as.pfras_tzero = ke->pfrke_tzero;
1205 :
1206 0 : if (COPYOUT(&as, w->pfrw_astats, sizeof(as), flags))
1207 0 : return (EFAULT);
1208 0 : w->pfrw_astats++;
1209 0 : }
1210 : break;
1211 : case PFRW_POOL_GET:
1212 0 : if (ke->pfrke_flags & PFRKE_FLAG_NOT)
1213 : break; /* negative entries are ignored */
1214 0 : if (!w->pfrw_cnt--) {
1215 0 : w->pfrw_kentry = ke;
1216 0 : return (1); /* finish search */
1217 : }
1218 : break;
1219 : case PFRW_DYNADDR_UPDATE:
1220 0 : switch (ke->pfrke_af) {
1221 : case AF_INET:
1222 0 : if (w->pfrw_dyn->pfid_acnt4++ > 0)
1223 : break;
1224 0 : pfr_prepare_network(&mask, AF_INET, ke->pfrke_net);
1225 0 : w->pfrw_dyn->pfid_addr4 = *SUNION2PF(
1226 : &ke->pfrke_sa, AF_INET);
1227 0 : w->pfrw_dyn->pfid_mask4 = *SUNION2PF(
1228 : &mask, AF_INET);
1229 0 : break;
1230 : #ifdef INET6
1231 : case AF_INET6:
1232 0 : if (w->pfrw_dyn->pfid_acnt6++ > 0)
1233 : break;
1234 0 : pfr_prepare_network(&mask, AF_INET6, ke->pfrke_net);
1235 0 : w->pfrw_dyn->pfid_addr6 = *SUNION2PF(
1236 : &ke->pfrke_sa, AF_INET6);
1237 0 : w->pfrw_dyn->pfid_mask6 = *SUNION2PF(
1238 : &mask, AF_INET6);
1239 0 : break;
1240 : #endif /* INET6 */
1241 : default:
1242 0 : unhandled_af(ke->pfrke_af);
1243 : }
1244 : break;
1245 : }
1246 0 : return (0);
1247 0 : }
1248 :
1249 : int
1250 0 : pfr_clr_tables(struct pfr_table *filter, int *ndel, int flags)
1251 : {
1252 0 : struct pfr_ktableworkq workq;
1253 : struct pfr_ktable *p;
1254 : int xdel = 0;
1255 :
1256 0 : ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_ALLRSETS);
1257 0 : if (pfr_fix_anchor(filter->pfrt_anchor))
1258 0 : return (EINVAL);
1259 0 : if (pfr_table_count(filter, flags) < 0)
1260 0 : return (ENOENT);
1261 :
1262 0 : SLIST_INIT(&workq);
1263 0 : RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1264 0 : if (pfr_skip_table(filter, p, flags))
1265 : continue;
1266 0 : if (!strcmp(p->pfrkt_anchor, PF_RESERVED_ANCHOR))
1267 : continue;
1268 0 : if (!(p->pfrkt_flags & PFR_TFLAG_ACTIVE))
1269 : continue;
1270 0 : p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_ACTIVE;
1271 0 : SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1272 0 : xdel++;
1273 0 : }
1274 0 : if (!(flags & PFR_FLAG_DUMMY)) {
1275 0 : pfr_setflags_ktables(&workq);
1276 0 : }
1277 0 : if (ndel != NULL)
1278 0 : *ndel = xdel;
1279 0 : return (0);
1280 0 : }
1281 :
1282 : int
1283 0 : pfr_add_tables(struct pfr_table *tbl, int size, int *nadd, int flags)
1284 : {
1285 0 : struct pfr_ktableworkq addq, changeq;
1286 0 : struct pfr_ktable *p, *q, *r, key;
1287 : int i, rv, xadd = 0;
1288 0 : time_t tzero = time_second;
1289 :
1290 0 : ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY);
1291 0 : SLIST_INIT(&addq);
1292 0 : SLIST_INIT(&changeq);
1293 0 : for (i = 0; i < size; i++) {
1294 0 : YIELD(i, flags & PFR_FLAG_USERIOCTL);
1295 0 : if (COPYIN(tbl+i, &key.pfrkt_t, sizeof(key.pfrkt_t), flags))
1296 0 : senderr(EFAULT);
1297 0 : if (pfr_validate_table(&key.pfrkt_t, PFR_TFLAG_USRMASK,
1298 : flags & PFR_FLAG_USERIOCTL))
1299 0 : senderr(EINVAL);
1300 0 : key.pfrkt_flags |= PFR_TFLAG_ACTIVE;
1301 0 : p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1302 0 : if (p == NULL) {
1303 0 : p = pfr_create_ktable(&key.pfrkt_t, tzero, 1,
1304 0 : !(flags & PFR_FLAG_USERIOCTL));
1305 0 : if (p == NULL)
1306 0 : senderr(ENOMEM);
1307 0 : SLIST_FOREACH(q, &addq, pfrkt_workq) {
1308 0 : if (!pfr_ktable_compare(p, q)) {
1309 0 : pfr_destroy_ktable(p, 0);
1310 0 : goto _skip;
1311 : }
1312 : }
1313 0 : SLIST_INSERT_HEAD(&addq, p, pfrkt_workq);
1314 0 : xadd++;
1315 0 : if (!key.pfrkt_anchor[0])
1316 : goto _skip;
1317 :
1318 : /* find or create root table */
1319 0 : bzero(key.pfrkt_anchor, sizeof(key.pfrkt_anchor));
1320 0 : r = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1321 0 : if (r != NULL) {
1322 0 : p->pfrkt_root = r;
1323 0 : goto _skip;
1324 : }
1325 0 : SLIST_FOREACH(q, &addq, pfrkt_workq) {
1326 0 : if (!pfr_ktable_compare(&key, q)) {
1327 0 : p->pfrkt_root = q;
1328 0 : goto _skip;
1329 : }
1330 : }
1331 0 : key.pfrkt_flags = 0;
1332 0 : r = pfr_create_ktable(&key.pfrkt_t, 0, 1,
1333 : !(flags & PFR_FLAG_USERIOCTL));
1334 0 : if (r == NULL)
1335 0 : senderr(ENOMEM);
1336 0 : SLIST_INSERT_HEAD(&addq, r, pfrkt_workq);
1337 0 : p->pfrkt_root = r;
1338 0 : } else if (!(p->pfrkt_flags & PFR_TFLAG_ACTIVE)) {
1339 0 : SLIST_FOREACH(q, &changeq, pfrkt_workq)
1340 0 : if (!pfr_ktable_compare(&key, q))
1341 : goto _skip;
1342 0 : p->pfrkt_nflags = (p->pfrkt_flags &
1343 0 : ~PFR_TFLAG_USRMASK) | key.pfrkt_flags;
1344 0 : SLIST_INSERT_HEAD(&changeq, p, pfrkt_workq);
1345 0 : xadd++;
1346 0 : }
1347 : _skip:
1348 : ;
1349 : }
1350 0 : if (!(flags & PFR_FLAG_DUMMY)) {
1351 0 : pfr_insert_ktables(&addq);
1352 0 : pfr_setflags_ktables(&changeq);
1353 0 : } else
1354 0 : pfr_destroy_ktables(&addq, 0);
1355 0 : if (nadd != NULL)
1356 0 : *nadd = xadd;
1357 0 : return (0);
1358 : _bad:
1359 0 : pfr_destroy_ktables(&addq, 0);
1360 0 : return (rv);
1361 0 : }
1362 :
1363 : int
1364 0 : pfr_del_tables(struct pfr_table *tbl, int size, int *ndel, int flags)
1365 : {
1366 0 : struct pfr_ktableworkq workq;
1367 0 : struct pfr_ktable *p, *q, key;
1368 : int i, xdel = 0;
1369 :
1370 0 : ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY);
1371 0 : SLIST_INIT(&workq);
1372 0 : for (i = 0; i < size; i++) {
1373 0 : YIELD(i, flags & PFR_FLAG_USERIOCTL);
1374 0 : if (COPYIN(tbl+i, &key.pfrkt_t, sizeof(key.pfrkt_t), flags))
1375 0 : return (EFAULT);
1376 0 : if (pfr_validate_table(&key.pfrkt_t, 0,
1377 : flags & PFR_FLAG_USERIOCTL))
1378 0 : return (EINVAL);
1379 0 : p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1380 0 : if (p != NULL && (p->pfrkt_flags & PFR_TFLAG_ACTIVE)) {
1381 0 : SLIST_FOREACH(q, &workq, pfrkt_workq)
1382 0 : if (!pfr_ktable_compare(p, q))
1383 : goto _skip;
1384 0 : p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_ACTIVE;
1385 0 : SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1386 0 : xdel++;
1387 0 : }
1388 : _skip:
1389 : ;
1390 : }
1391 :
1392 0 : if (!(flags & PFR_FLAG_DUMMY)) {
1393 0 : pfr_setflags_ktables(&workq);
1394 0 : }
1395 0 : if (ndel != NULL)
1396 0 : *ndel = xdel;
1397 0 : return (0);
1398 0 : }
1399 :
1400 : int
1401 0 : pfr_get_tables(struct pfr_table *filter, struct pfr_table *tbl, int *size,
1402 : int flags)
1403 : {
1404 : struct pfr_ktable *p;
1405 : int n, nn;
1406 :
1407 0 : ACCEPT_FLAGS(flags, PFR_FLAG_ALLRSETS);
1408 0 : if (pfr_fix_anchor(filter->pfrt_anchor))
1409 0 : return (EINVAL);
1410 0 : n = nn = pfr_table_count(filter, flags);
1411 0 : if (n < 0)
1412 0 : return (ENOENT);
1413 0 : if (n > *size) {
1414 0 : *size = n;
1415 0 : return (0);
1416 : }
1417 0 : RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1418 0 : if (pfr_skip_table(filter, p, flags))
1419 : continue;
1420 0 : if (n-- <= 0)
1421 : continue;
1422 0 : if (COPYOUT(&p->pfrkt_t, tbl++, sizeof(*tbl), flags))
1423 0 : return (EFAULT);
1424 : }
1425 0 : if (n) {
1426 0 : DPFPRINTF(LOG_ERR,
1427 : "pfr_get_tables: corruption detected (%d).", n);
1428 0 : return (ENOTTY);
1429 : }
1430 0 : *size = nn;
1431 0 : return (0);
1432 0 : }
1433 :
1434 : int
1435 0 : pfr_get_tstats(struct pfr_table *filter, struct pfr_tstats *tbl, int *size,
1436 : int flags)
1437 : {
1438 : struct pfr_ktable *p;
1439 0 : struct pfr_ktableworkq workq;
1440 : int n, nn;
1441 0 : time_t tzero = time_second;
1442 :
1443 : /* XXX PFR_FLAG_CLSTATS disabled */
1444 0 : ACCEPT_FLAGS(flags, PFR_FLAG_ALLRSETS);
1445 0 : if (pfr_fix_anchor(filter->pfrt_anchor))
1446 0 : return (EINVAL);
1447 0 : n = nn = pfr_table_count(filter, flags);
1448 0 : if (n < 0)
1449 0 : return (ENOENT);
1450 0 : if (n > *size) {
1451 0 : *size = n;
1452 0 : return (0);
1453 : }
1454 0 : SLIST_INIT(&workq);
1455 0 : RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1456 0 : if (pfr_skip_table(filter, p, flags))
1457 : continue;
1458 0 : if (n-- <= 0)
1459 : continue;
1460 0 : if (COPYOUT(&p->pfrkt_ts, tbl++, sizeof(*tbl), flags))
1461 0 : return (EFAULT);
1462 0 : SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1463 0 : }
1464 0 : if (flags & PFR_FLAG_CLSTATS)
1465 0 : pfr_clstats_ktables(&workq, tzero,
1466 0 : flags & PFR_FLAG_ADDRSTOO);
1467 0 : if (n) {
1468 0 : DPFPRINTF(LOG_ERR,
1469 : "pfr_get_tstats: corruption detected (%d).", n);
1470 0 : return (ENOTTY);
1471 : }
1472 0 : *size = nn;
1473 0 : return (0);
1474 0 : }
1475 :
1476 : int
1477 0 : pfr_clr_tstats(struct pfr_table *tbl, int size, int *nzero, int flags)
1478 : {
1479 0 : struct pfr_ktableworkq workq;
1480 0 : struct pfr_ktable *p, key;
1481 : int i, xzero = 0;
1482 0 : time_t tzero = time_second;
1483 :
1484 0 : ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_ADDRSTOO);
1485 0 : SLIST_INIT(&workq);
1486 0 : for (i = 0; i < size; i++) {
1487 0 : YIELD(i, flags & PFR_FLAG_USERIOCTL);
1488 0 : if (COPYIN(tbl+i, &key.pfrkt_t, sizeof(key.pfrkt_t), flags))
1489 0 : return (EFAULT);
1490 0 : if (pfr_validate_table(&key.pfrkt_t, 0, 0))
1491 0 : return (EINVAL);
1492 0 : p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1493 0 : if (p != NULL) {
1494 0 : SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1495 0 : xzero++;
1496 0 : }
1497 : }
1498 0 : if (!(flags & PFR_FLAG_DUMMY)) {
1499 0 : pfr_clstats_ktables(&workq, tzero, flags & PFR_FLAG_ADDRSTOO);
1500 0 : }
1501 0 : if (nzero != NULL)
1502 0 : *nzero = xzero;
1503 0 : return (0);
1504 0 : }
1505 :
1506 : int
1507 0 : pfr_set_tflags(struct pfr_table *tbl, int size, int setflag, int clrflag,
1508 : int *nchange, int *ndel, int flags)
1509 : {
1510 0 : struct pfr_ktableworkq workq;
1511 0 : struct pfr_ktable *p, *q, key;
1512 : int i, xchange = 0, xdel = 0;
1513 :
1514 0 : ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY);
1515 0 : if ((setflag & ~PFR_TFLAG_USRMASK) ||
1516 0 : (clrflag & ~PFR_TFLAG_USRMASK) ||
1517 0 : (setflag & clrflag))
1518 0 : return (EINVAL);
1519 0 : SLIST_INIT(&workq);
1520 0 : for (i = 0; i < size; i++) {
1521 0 : YIELD(i, flags & PFR_FLAG_USERIOCTL);
1522 0 : if (COPYIN(tbl+i, &key.pfrkt_t, sizeof(key.pfrkt_t), flags))
1523 0 : return (EFAULT);
1524 0 : if (pfr_validate_table(&key.pfrkt_t, 0,
1525 : flags & PFR_FLAG_USERIOCTL))
1526 0 : return (EINVAL);
1527 0 : p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1528 0 : if (p != NULL && (p->pfrkt_flags & PFR_TFLAG_ACTIVE)) {
1529 0 : p->pfrkt_nflags = (p->pfrkt_flags | setflag) &
1530 0 : ~clrflag;
1531 0 : if (p->pfrkt_nflags == p->pfrkt_flags)
1532 : goto _skip;
1533 0 : SLIST_FOREACH(q, &workq, pfrkt_workq)
1534 0 : if (!pfr_ktable_compare(p, q))
1535 : goto _skip;
1536 0 : SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1537 0 : if ((p->pfrkt_flags & PFR_TFLAG_PERSIST) &&
1538 0 : (clrflag & PFR_TFLAG_PERSIST) &&
1539 0 : !(p->pfrkt_flags & PFR_TFLAG_REFERENCED))
1540 0 : xdel++;
1541 : else
1542 0 : xchange++;
1543 : }
1544 : _skip:
1545 : ;
1546 : }
1547 0 : if (!(flags & PFR_FLAG_DUMMY)) {
1548 0 : pfr_setflags_ktables(&workq);
1549 0 : }
1550 0 : if (nchange != NULL)
1551 0 : *nchange = xchange;
1552 0 : if (ndel != NULL)
1553 0 : *ndel = xdel;
1554 0 : return (0);
1555 0 : }
1556 :
1557 : int
1558 0 : pfr_ina_begin(struct pfr_table *trs, u_int32_t *ticket, int *ndel, int flags)
1559 : {
1560 0 : struct pfr_ktableworkq workq;
1561 : struct pfr_ktable *p;
1562 : struct pf_ruleset *rs;
1563 : int xdel = 0;
1564 :
1565 0 : ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY);
1566 0 : rs = pf_find_or_create_ruleset(trs->pfrt_anchor);
1567 0 : if (rs == NULL)
1568 0 : return (ENOMEM);
1569 0 : SLIST_INIT(&workq);
1570 0 : RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1571 0 : if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) ||
1572 0 : pfr_skip_table(trs, p, 0))
1573 : continue;
1574 0 : p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_INACTIVE;
1575 0 : SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1576 0 : xdel++;
1577 0 : }
1578 0 : if (!(flags & PFR_FLAG_DUMMY)) {
1579 0 : pfr_setflags_ktables(&workq);
1580 0 : if (ticket != NULL)
1581 0 : *ticket = ++rs->tticket;
1582 0 : rs->topen = 1;
1583 0 : } else
1584 0 : pf_remove_if_empty_ruleset(rs);
1585 0 : if (ndel != NULL)
1586 0 : *ndel = xdel;
1587 0 : return (0);
1588 0 : }
1589 :
1590 : int
1591 0 : pfr_ina_define(struct pfr_table *tbl, struct pfr_addr *addr, int size,
1592 : int *nadd, int *naddr, u_int32_t ticket, int flags)
1593 : {
1594 0 : struct pfr_ktableworkq tableq;
1595 0 : struct pfr_kentryworkq addrq;
1596 0 : struct pfr_ktable *kt, *rt, *shadow, key;
1597 : struct pfr_kentry *p;
1598 0 : struct pfr_addr ad;
1599 : struct pf_ruleset *rs;
1600 : int i, rv, xadd = 0, xaddr = 0;
1601 :
1602 0 : ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_ADDRSTOO);
1603 0 : if (size && !(flags & PFR_FLAG_ADDRSTOO))
1604 0 : return (EINVAL);
1605 0 : if (pfr_validate_table(tbl, PFR_TFLAG_USRMASK,
1606 0 : flags & PFR_FLAG_USERIOCTL))
1607 0 : return (EINVAL);
1608 0 : rs = pf_find_ruleset(tbl->pfrt_anchor);
1609 0 : if (rs == NULL || !rs->topen || ticket != rs->tticket)
1610 0 : return (EBUSY);
1611 0 : tbl->pfrt_flags |= PFR_TFLAG_INACTIVE;
1612 0 : SLIST_INIT(&tableq);
1613 0 : kt = RB_FIND(pfr_ktablehead, &pfr_ktables, (struct pfr_ktable *)tbl);
1614 0 : if (kt == NULL) {
1615 0 : kt = pfr_create_ktable(tbl, 0, 1,
1616 0 : !(flags & PFR_FLAG_USERIOCTL));
1617 0 : if (kt == NULL)
1618 0 : return (ENOMEM);
1619 0 : SLIST_INSERT_HEAD(&tableq, kt, pfrkt_workq);
1620 : xadd++;
1621 0 : if (!tbl->pfrt_anchor[0])
1622 : goto _skip;
1623 :
1624 : /* find or create root table */
1625 0 : bzero(&key, sizeof(key));
1626 0 : strlcpy(key.pfrkt_name, tbl->pfrt_name, sizeof(key.pfrkt_name));
1627 0 : rt = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1628 0 : if (rt != NULL) {
1629 0 : kt->pfrkt_root = rt;
1630 0 : goto _skip;
1631 : }
1632 0 : rt = pfr_create_ktable(&key.pfrkt_t, 0, 1,
1633 : !(flags & PFR_FLAG_USERIOCTL));
1634 0 : if (rt == NULL) {
1635 0 : pfr_destroy_ktables(&tableq, 0);
1636 0 : return (ENOMEM);
1637 : }
1638 0 : SLIST_INSERT_HEAD(&tableq, rt, pfrkt_workq);
1639 0 : kt->pfrkt_root = rt;
1640 0 : } else if (!(kt->pfrkt_flags & PFR_TFLAG_INACTIVE))
1641 0 : xadd++;
1642 : _skip:
1643 0 : shadow = pfr_create_ktable(tbl, 0, 0, !(flags & PFR_FLAG_USERIOCTL));
1644 0 : if (shadow == NULL) {
1645 0 : pfr_destroy_ktables(&tableq, 0);
1646 0 : return (ENOMEM);
1647 : }
1648 0 : SLIST_INIT(&addrq);
1649 0 : for (i = 0; i < size; i++) {
1650 0 : YIELD(i, flags & PFR_FLAG_USERIOCTL);
1651 0 : if (COPYIN(addr+i, &ad, sizeof(ad), flags))
1652 0 : senderr(EFAULT);
1653 0 : if (pfr_validate_addr(&ad))
1654 0 : senderr(EINVAL);
1655 0 : if (pfr_lookup_addr(shadow, &ad, 1) != NULL)
1656 : continue;
1657 0 : p = pfr_create_kentry(&ad);
1658 0 : if (p == NULL)
1659 0 : senderr(ENOMEM);
1660 0 : if (pfr_route_kentry(shadow, p)) {
1661 0 : pfr_destroy_kentry(p);
1662 0 : continue;
1663 : }
1664 0 : SLIST_INSERT_HEAD(&addrq, p, pfrke_workq);
1665 0 : xaddr++;
1666 0 : if (p->pfrke_type == PFRKE_COST)
1667 0 : kt->pfrkt_refcntcost++;
1668 0 : pfr_ktable_winfo_update(kt, p);
1669 0 : }
1670 0 : if (!(flags & PFR_FLAG_DUMMY)) {
1671 0 : if (kt->pfrkt_shadow != NULL)
1672 0 : pfr_destroy_ktable(kt->pfrkt_shadow, 1);
1673 0 : kt->pfrkt_flags |= PFR_TFLAG_INACTIVE;
1674 0 : pfr_insert_ktables(&tableq);
1675 0 : shadow->pfrkt_cnt = (flags & PFR_FLAG_ADDRSTOO) ?
1676 : xaddr : NO_ADDRESSES;
1677 0 : kt->pfrkt_shadow = shadow;
1678 0 : } else {
1679 0 : pfr_clean_node_mask(shadow, &addrq);
1680 0 : pfr_destroy_ktable(shadow, 0);
1681 0 : pfr_destroy_ktables(&tableq, 0);
1682 0 : pfr_destroy_kentries(&addrq);
1683 : }
1684 0 : if (nadd != NULL)
1685 0 : *nadd = xadd;
1686 0 : if (naddr != NULL)
1687 0 : *naddr = xaddr;
1688 0 : return (0);
1689 : _bad:
1690 0 : pfr_destroy_ktable(shadow, 0);
1691 0 : pfr_destroy_ktables(&tableq, 0);
1692 0 : pfr_destroy_kentries(&addrq);
1693 0 : return (rv);
1694 0 : }
1695 :
1696 : int
1697 0 : pfr_ina_rollback(struct pfr_table *trs, u_int32_t ticket, int *ndel, int flags)
1698 : {
1699 0 : struct pfr_ktableworkq workq;
1700 : struct pfr_ktable *p;
1701 : struct pf_ruleset *rs;
1702 : int xdel = 0;
1703 :
1704 0 : ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY);
1705 0 : rs = pf_find_ruleset(trs->pfrt_anchor);
1706 0 : if (rs == NULL || !rs->topen || ticket != rs->tticket)
1707 0 : return (0);
1708 0 : SLIST_INIT(&workq);
1709 0 : RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1710 0 : if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) ||
1711 0 : pfr_skip_table(trs, p, 0))
1712 : continue;
1713 0 : p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_INACTIVE;
1714 0 : SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1715 0 : xdel++;
1716 0 : }
1717 0 : if (!(flags & PFR_FLAG_DUMMY)) {
1718 0 : pfr_setflags_ktables(&workq);
1719 0 : rs->topen = 0;
1720 0 : pf_remove_if_empty_ruleset(rs);
1721 0 : }
1722 0 : if (ndel != NULL)
1723 0 : *ndel = xdel;
1724 0 : return (0);
1725 0 : }
1726 :
1727 : int
1728 0 : pfr_ina_commit(struct pfr_table *trs, u_int32_t ticket, int *nadd,
1729 : int *nchange, int flags)
1730 : {
1731 : struct pfr_ktable *p, *q;
1732 : struct pfr_ktableworkq workq;
1733 : struct pf_ruleset *rs;
1734 : int xadd = 0, xchange = 0;
1735 0 : time_t tzero = time_second;
1736 :
1737 0 : ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY);
1738 0 : rs = pf_find_ruleset(trs->pfrt_anchor);
1739 0 : if (rs == NULL || !rs->topen || ticket != rs->tticket)
1740 0 : return (EBUSY);
1741 :
1742 : SLIST_INIT(&workq);
1743 0 : RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1744 0 : if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) ||
1745 0 : pfr_skip_table(trs, p, 0))
1746 : continue;
1747 0 : SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1748 0 : if (p->pfrkt_flags & PFR_TFLAG_ACTIVE)
1749 0 : xchange++;
1750 : else
1751 0 : xadd++;
1752 : }
1753 :
1754 0 : if (!(flags & PFR_FLAG_DUMMY)) {
1755 0 : for (p = SLIST_FIRST(&workq); p != NULL; p = q) {
1756 0 : q = SLIST_NEXT(p, pfrkt_workq);
1757 0 : pfr_commit_ktable(p, tzero);
1758 : }
1759 0 : rs->topen = 0;
1760 0 : pf_remove_if_empty_ruleset(rs);
1761 0 : }
1762 0 : if (nadd != NULL)
1763 0 : *nadd = xadd;
1764 0 : if (nchange != NULL)
1765 0 : *nchange = xchange;
1766 :
1767 0 : return (0);
1768 0 : }
1769 :
1770 : void
1771 0 : pfr_commit_ktable(struct pfr_ktable *kt, time_t tzero)
1772 : {
1773 0 : struct pfr_ktable *shadow = kt->pfrkt_shadow;
1774 : int nflags;
1775 :
1776 0 : if (shadow->pfrkt_cnt == NO_ADDRESSES) {
1777 0 : if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
1778 0 : pfr_clstats_ktable(kt, tzero, 1);
1779 0 : } else if (kt->pfrkt_flags & PFR_TFLAG_ACTIVE) {
1780 : /* kt might contain addresses */
1781 0 : struct pfr_kentryworkq addrq, addq, changeq, delq, garbageq;
1782 : struct pfr_kentry *p, *q, *next;
1783 0 : struct pfr_addr ad;
1784 :
1785 0 : pfr_enqueue_addrs(shadow, &addrq, NULL, 0);
1786 0 : pfr_mark_addrs(kt);
1787 0 : SLIST_INIT(&addq);
1788 0 : SLIST_INIT(&changeq);
1789 0 : SLIST_INIT(&delq);
1790 0 : SLIST_INIT(&garbageq);
1791 0 : pfr_clean_node_mask(shadow, &addrq);
1792 0 : for (p = SLIST_FIRST(&addrq); p != NULL; p = next) {
1793 0 : next = SLIST_NEXT(p, pfrke_workq); /* XXX */
1794 0 : pfr_copyout_addr(&ad, p);
1795 0 : q = pfr_lookup_addr(kt, &ad, 1);
1796 0 : if (q != NULL) {
1797 0 : if ((q->pfrke_flags & PFRKE_FLAG_NOT) !=
1798 0 : (p->pfrke_flags & PFRKE_FLAG_NOT))
1799 0 : SLIST_INSERT_HEAD(&changeq, q,
1800 : pfrke_workq);
1801 0 : q->pfrke_flags |= PFRKE_FLAG_MARK;
1802 0 : SLIST_INSERT_HEAD(&garbageq, p, pfrke_workq);
1803 0 : } else {
1804 0 : p->pfrke_tzero = tzero;
1805 0 : SLIST_INSERT_HEAD(&addq, p, pfrke_workq);
1806 : }
1807 : }
1808 0 : pfr_enqueue_addrs(kt, &delq, NULL, ENQUEUE_UNMARKED_ONLY);
1809 0 : pfr_insert_kentries(kt, &addq, tzero);
1810 0 : pfr_remove_kentries(kt, &delq);
1811 0 : pfr_clstats_kentries(&changeq, tzero, INVERT_NEG_FLAG);
1812 0 : pfr_destroy_kentries(&garbageq);
1813 0 : } else {
1814 : /* kt cannot contain addresses */
1815 0 : SWAP(struct radix_node_head *, kt->pfrkt_ip4,
1816 : shadow->pfrkt_ip4);
1817 0 : SWAP(struct radix_node_head *, kt->pfrkt_ip6,
1818 : shadow->pfrkt_ip6);
1819 0 : SWAP(int, kt->pfrkt_cnt, shadow->pfrkt_cnt);
1820 0 : pfr_clstats_ktable(kt, tzero, 1);
1821 : }
1822 0 : nflags = ((shadow->pfrkt_flags & PFR_TFLAG_USRMASK) |
1823 0 : (kt->pfrkt_flags & PFR_TFLAG_SETMASK) | PFR_TFLAG_ACTIVE)
1824 0 : & ~PFR_TFLAG_INACTIVE;
1825 0 : pfr_destroy_ktable(shadow, 0);
1826 0 : kt->pfrkt_shadow = NULL;
1827 0 : pfr_setflags_ktable(kt, nflags);
1828 0 : }
1829 :
1830 : int
1831 0 : pfr_validate_table(struct pfr_table *tbl, int allowedflags, int no_reserved)
1832 : {
1833 : int i;
1834 :
1835 0 : if (!tbl->pfrt_name[0])
1836 0 : return (-1);
1837 0 : if (no_reserved && !strcmp(tbl->pfrt_anchor, PF_RESERVED_ANCHOR))
1838 0 : return (-1);
1839 0 : if (tbl->pfrt_name[PF_TABLE_NAME_SIZE-1])
1840 0 : return (-1);
1841 0 : for (i = strlen(tbl->pfrt_name); i < PF_TABLE_NAME_SIZE; i++)
1842 0 : if (tbl->pfrt_name[i])
1843 0 : return (-1);
1844 0 : if (pfr_fix_anchor(tbl->pfrt_anchor))
1845 0 : return (-1);
1846 0 : if (tbl->pfrt_flags & ~allowedflags)
1847 0 : return (-1);
1848 0 : return (0);
1849 0 : }
1850 :
1851 : /*
1852 : * Rewrite anchors referenced by tables to remove slashes
1853 : * and check for validity.
1854 : */
1855 : int
1856 0 : pfr_fix_anchor(char *anchor)
1857 : {
1858 : size_t siz = MAXPATHLEN;
1859 : int i;
1860 :
1861 0 : if (anchor[0] == '/') {
1862 : char *path;
1863 : int off;
1864 :
1865 : path = anchor;
1866 : off = 1;
1867 0 : while (*++path == '/')
1868 0 : off++;
1869 0 : bcopy(path, anchor, siz - off);
1870 0 : memset(anchor + siz - off, 0, off);
1871 0 : }
1872 0 : if (anchor[siz - 1])
1873 0 : return (-1);
1874 0 : for (i = strlen(anchor); i < siz; i++)
1875 0 : if (anchor[i])
1876 0 : return (-1);
1877 0 : return (0);
1878 0 : }
1879 :
1880 : int
1881 0 : pfr_table_count(struct pfr_table *filter, int flags)
1882 : {
1883 : struct pf_ruleset *rs;
1884 :
1885 0 : if (flags & PFR_FLAG_ALLRSETS)
1886 0 : return (pfr_ktable_cnt);
1887 0 : if (filter->pfrt_anchor[0]) {
1888 0 : rs = pf_find_ruleset(filter->pfrt_anchor);
1889 0 : return ((rs != NULL) ? rs->tables : -1);
1890 : }
1891 0 : return (pf_main_ruleset.tables);
1892 0 : }
1893 :
1894 : int
1895 0 : pfr_skip_table(struct pfr_table *filter, struct pfr_ktable *kt, int flags)
1896 : {
1897 0 : if (flags & PFR_FLAG_ALLRSETS)
1898 0 : return (0);
1899 0 : if (strcmp(filter->pfrt_anchor, kt->pfrkt_anchor))
1900 0 : return (1);
1901 0 : return (0);
1902 0 : }
1903 :
1904 : void
1905 0 : pfr_insert_ktables(struct pfr_ktableworkq *workq)
1906 : {
1907 : struct pfr_ktable *p;
1908 :
1909 0 : SLIST_FOREACH(p, workq, pfrkt_workq)
1910 0 : pfr_insert_ktable(p);
1911 0 : }
1912 :
1913 : void
1914 0 : pfr_insert_ktable(struct pfr_ktable *kt)
1915 : {
1916 0 : RB_INSERT(pfr_ktablehead, &pfr_ktables, kt);
1917 0 : pfr_ktable_cnt++;
1918 0 : if (kt->pfrkt_root != NULL)
1919 0 : if (!kt->pfrkt_root->pfrkt_refcnt[PFR_REFCNT_ANCHOR]++)
1920 0 : pfr_setflags_ktable(kt->pfrkt_root,
1921 0 : kt->pfrkt_root->pfrkt_flags|PFR_TFLAG_REFDANCHOR);
1922 0 : }
1923 :
1924 : void
1925 0 : pfr_setflags_ktables(struct pfr_ktableworkq *workq)
1926 : {
1927 : struct pfr_ktable *p, *q;
1928 :
1929 0 : for (p = SLIST_FIRST(workq); p; p = q) {
1930 0 : q = SLIST_NEXT(p, pfrkt_workq);
1931 0 : pfr_setflags_ktable(p, p->pfrkt_nflags);
1932 : }
1933 0 : }
1934 :
1935 : void
1936 0 : pfr_setflags_ktable(struct pfr_ktable *kt, int newf)
1937 : {
1938 0 : struct pfr_kentryworkq addrq;
1939 :
1940 0 : if (!(newf & PFR_TFLAG_REFERENCED) &&
1941 0 : !(newf & PFR_TFLAG_REFDANCHOR) &&
1942 0 : !(newf & PFR_TFLAG_PERSIST))
1943 0 : newf &= ~PFR_TFLAG_ACTIVE;
1944 0 : if (!(newf & PFR_TFLAG_ACTIVE))
1945 0 : newf &= ~PFR_TFLAG_USRMASK;
1946 0 : if (!(newf & PFR_TFLAG_SETMASK)) {
1947 0 : RB_REMOVE(pfr_ktablehead, &pfr_ktables, kt);
1948 0 : if (kt->pfrkt_root != NULL)
1949 0 : if (!--kt->pfrkt_root->pfrkt_refcnt[PFR_REFCNT_ANCHOR])
1950 0 : pfr_setflags_ktable(kt->pfrkt_root,
1951 0 : kt->pfrkt_root->pfrkt_flags &
1952 : ~PFR_TFLAG_REFDANCHOR);
1953 0 : pfr_destroy_ktable(kt, 1);
1954 0 : pfr_ktable_cnt--;
1955 0 : return;
1956 : }
1957 0 : if (!(newf & PFR_TFLAG_ACTIVE) && kt->pfrkt_cnt) {
1958 0 : pfr_enqueue_addrs(kt, &addrq, NULL, 0);
1959 0 : pfr_remove_kentries(kt, &addrq);
1960 0 : }
1961 0 : if (!(newf & PFR_TFLAG_INACTIVE) && kt->pfrkt_shadow != NULL) {
1962 0 : pfr_destroy_ktable(kt->pfrkt_shadow, 1);
1963 0 : kt->pfrkt_shadow = NULL;
1964 0 : }
1965 0 : kt->pfrkt_flags = newf;
1966 0 : }
1967 :
1968 : void
1969 0 : pfr_clstats_ktables(struct pfr_ktableworkq *workq, time_t tzero, int recurse)
1970 : {
1971 : struct pfr_ktable *p;
1972 :
1973 0 : SLIST_FOREACH(p, workq, pfrkt_workq)
1974 0 : pfr_clstats_ktable(p, tzero, recurse);
1975 0 : }
1976 :
1977 : void
1978 0 : pfr_clstats_ktable(struct pfr_ktable *kt, time_t tzero, int recurse)
1979 : {
1980 0 : struct pfr_kentryworkq addrq;
1981 :
1982 0 : if (recurse) {
1983 0 : pfr_enqueue_addrs(kt, &addrq, NULL, 0);
1984 0 : pfr_clstats_kentries(&addrq, tzero, 0);
1985 0 : }
1986 0 : bzero(kt->pfrkt_packets, sizeof(kt->pfrkt_packets));
1987 0 : bzero(kt->pfrkt_bytes, sizeof(kt->pfrkt_bytes));
1988 0 : kt->pfrkt_match = kt->pfrkt_nomatch = 0;
1989 0 : kt->pfrkt_tzero = tzero;
1990 0 : }
1991 :
1992 : struct pfr_ktable *
1993 0 : pfr_create_ktable(struct pfr_table *tbl, time_t tzero, int attachruleset,
1994 : int intr)
1995 : {
1996 : struct pfr_ktable *kt;
1997 : struct pf_ruleset *rs;
1998 :
1999 0 : if (intr)
2000 0 : kt = pool_get(&pfr_ktable_pl, PR_NOWAIT|PR_ZERO|PR_LIMITFAIL);
2001 : else
2002 0 : kt = pool_get(&pfr_ktable_pl, PR_WAITOK|PR_ZERO|PR_LIMITFAIL);
2003 0 : if (kt == NULL)
2004 0 : return (NULL);
2005 0 : kt->pfrkt_t = *tbl;
2006 :
2007 0 : if (attachruleset) {
2008 0 : rs = pf_find_or_create_ruleset(tbl->pfrt_anchor);
2009 0 : if (!rs) {
2010 0 : pfr_destroy_ktable(kt, 0);
2011 0 : return (NULL);
2012 : }
2013 0 : kt->pfrkt_rs = rs;
2014 0 : rs->tables++;
2015 0 : }
2016 :
2017 0 : if (!rn_inithead((void **)&kt->pfrkt_ip4,
2018 0 : offsetof(struct sockaddr_in, sin_addr)) ||
2019 0 : !rn_inithead((void **)&kt->pfrkt_ip6,
2020 : offsetof(struct sockaddr_in6, sin6_addr))) {
2021 0 : pfr_destroy_ktable(kt, 0);
2022 0 : return (NULL);
2023 : }
2024 0 : kt->pfrkt_tzero = tzero;
2025 0 : kt->pfrkt_refcntcost = 0;
2026 0 : kt->pfrkt_gcdweight = 0;
2027 0 : kt->pfrkt_maxweight = 1;
2028 :
2029 0 : return (kt);
2030 0 : }
2031 :
2032 : void
2033 0 : pfr_destroy_ktables(struct pfr_ktableworkq *workq, int flushaddr)
2034 : {
2035 : struct pfr_ktable *p, *q;
2036 :
2037 0 : for (p = SLIST_FIRST(workq); p; p = q) {
2038 0 : q = SLIST_NEXT(p, pfrkt_workq);
2039 0 : pfr_destroy_ktable(p, flushaddr);
2040 : }
2041 0 : }
2042 :
2043 : void
2044 0 : pfr_destroy_ktable(struct pfr_ktable *kt, int flushaddr)
2045 : {
2046 0 : struct pfr_kentryworkq addrq;
2047 :
2048 0 : if (flushaddr) {
2049 0 : pfr_enqueue_addrs(kt, &addrq, NULL, 0);
2050 0 : pfr_clean_node_mask(kt, &addrq);
2051 0 : pfr_destroy_kentries(&addrq);
2052 0 : }
2053 0 : if (kt->pfrkt_ip4 != NULL)
2054 0 : free((caddr_t)kt->pfrkt_ip4, M_RTABLE, 0);
2055 0 : if (kt->pfrkt_ip6 != NULL)
2056 0 : free((caddr_t)kt->pfrkt_ip6, M_RTABLE, 0);
2057 0 : if (kt->pfrkt_shadow != NULL)
2058 0 : pfr_destroy_ktable(kt->pfrkt_shadow, flushaddr);
2059 0 : if (kt->pfrkt_rs != NULL) {
2060 0 : kt->pfrkt_rs->tables--;
2061 0 : pf_remove_if_empty_ruleset(kt->pfrkt_rs);
2062 0 : }
2063 0 : pool_put(&pfr_ktable_pl, kt);
2064 0 : }
2065 :
2066 : int
2067 0 : pfr_ktable_compare(struct pfr_ktable *p, struct pfr_ktable *q)
2068 : {
2069 : int d;
2070 :
2071 0 : if ((d = strncmp(p->pfrkt_name, q->pfrkt_name, PF_TABLE_NAME_SIZE)))
2072 0 : return (d);
2073 0 : return (strcmp(p->pfrkt_anchor, q->pfrkt_anchor));
2074 0 : }
2075 :
2076 : struct pfr_ktable *
2077 0 : pfr_lookup_table(struct pfr_table *tbl)
2078 : {
2079 : /* struct pfr_ktable start like a struct pfr_table */
2080 0 : return (RB_FIND(pfr_ktablehead, &pfr_ktables,
2081 : (struct pfr_ktable *)tbl));
2082 : }
2083 :
2084 : int
2085 0 : pfr_match_addr(struct pfr_ktable *kt, struct pf_addr *a, sa_family_t af)
2086 : {
2087 : struct pfr_kentry *ke = NULL;
2088 0 : struct sockaddr_in tmp4;
2089 : #ifdef INET6
2090 0 : struct sockaddr_in6 tmp6;
2091 : #endif /* INET6 */
2092 : int match;
2093 :
2094 0 : if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL)
2095 0 : kt = kt->pfrkt_root;
2096 0 : if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
2097 0 : return (0);
2098 :
2099 0 : switch (af) {
2100 : case AF_INET:
2101 0 : bzero(&tmp4, sizeof(tmp4));
2102 0 : tmp4.sin_len = sizeof(tmp4);
2103 0 : tmp4.sin_family = AF_INET;
2104 0 : tmp4.sin_addr.s_addr = a->addr32[0];
2105 0 : ke = (struct pfr_kentry *)rn_match(&tmp4, kt->pfrkt_ip4);
2106 0 : break;
2107 : #ifdef INET6
2108 : case AF_INET6:
2109 0 : bzero(&tmp6, sizeof(tmp6));
2110 0 : tmp6.sin6_len = sizeof(tmp6);
2111 0 : tmp6.sin6_family = AF_INET6;
2112 0 : bcopy(a, &tmp6.sin6_addr, sizeof(tmp6.sin6_addr));
2113 0 : ke = (struct pfr_kentry *)rn_match(&tmp6, kt->pfrkt_ip6);
2114 0 : break;
2115 : #endif /* INET6 */
2116 : default:
2117 0 : unhandled_af(af);
2118 : }
2119 0 : match = (ke && !(ke->pfrke_flags & PFRKE_FLAG_NOT));
2120 0 : if (match)
2121 0 : kt->pfrkt_match++;
2122 : else
2123 0 : kt->pfrkt_nomatch++;
2124 0 : return (match);
2125 0 : }
2126 :
2127 : void
2128 0 : pfr_update_stats(struct pfr_ktable *kt, struct pf_addr *a, struct pf_pdesc *pd,
2129 : int op, int notrule)
2130 : {
2131 : struct pfr_kentry *ke = NULL;
2132 0 : struct sockaddr_in tmp4;
2133 : #ifdef INET6
2134 0 : struct sockaddr_in6 tmp6;
2135 : #endif /* INET6 */
2136 0 : sa_family_t af = pd->af;
2137 0 : u_int64_t len = pd->tot_len;
2138 0 : int dir_idx = (pd->dir == PF_OUT);
2139 : int op_idx;
2140 :
2141 0 : if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL)
2142 0 : kt = kt->pfrkt_root;
2143 0 : if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
2144 0 : return;
2145 :
2146 0 : switch (af) {
2147 : case AF_INET:
2148 0 : bzero(&tmp4, sizeof(tmp4));
2149 0 : tmp4.sin_len = sizeof(tmp4);
2150 0 : tmp4.sin_family = AF_INET;
2151 0 : tmp4.sin_addr.s_addr = a->addr32[0];
2152 0 : ke = (struct pfr_kentry *)rn_match(&tmp4, kt->pfrkt_ip4);
2153 0 : break;
2154 : #ifdef INET6
2155 : case AF_INET6:
2156 0 : bzero(&tmp6, sizeof(tmp6));
2157 0 : tmp6.sin6_len = sizeof(tmp6);
2158 0 : tmp6.sin6_family = AF_INET6;
2159 0 : bcopy(a, &tmp6.sin6_addr, sizeof(tmp6.sin6_addr));
2160 0 : ke = (struct pfr_kentry *)rn_match(&tmp6, kt->pfrkt_ip6);
2161 0 : break;
2162 : #endif /* INET6 */
2163 : default:
2164 0 : unhandled_af(af);
2165 : }
2166 :
2167 0 : switch (op) {
2168 : case PF_PASS:
2169 : op_idx = PFR_OP_PASS;
2170 0 : break;
2171 : case PF_MATCH:
2172 : op_idx = PFR_OP_MATCH;
2173 0 : break;
2174 : case PF_DROP:
2175 : op_idx = PFR_OP_BLOCK;
2176 0 : break;
2177 : default:
2178 0 : panic("unhandled op");
2179 : }
2180 :
2181 0 : if ((ke == NULL || (ke->pfrke_flags & PFRKE_FLAG_NOT)) != notrule) {
2182 0 : if (op_idx != PFR_OP_PASS)
2183 0 : DPFPRINTF(LOG_DEBUG,
2184 : "pfr_update_stats: assertion failed.");
2185 : op_idx = PFR_OP_XPASS;
2186 0 : }
2187 0 : kt->pfrkt_packets[dir_idx][op_idx]++;
2188 0 : kt->pfrkt_bytes[dir_idx][op_idx] += len;
2189 0 : if (ke != NULL && op_idx != PFR_OP_XPASS &&
2190 0 : (kt->pfrkt_flags & PFR_TFLAG_COUNTERS)) {
2191 0 : if (ke->pfrke_counters == NULL)
2192 0 : ke->pfrke_counters = pool_get(&pfr_kcounters_pl,
2193 : PR_NOWAIT | PR_ZERO);
2194 0 : if (ke->pfrke_counters != NULL) {
2195 0 : ke->pfrke_counters->pfrkc_packets[dir_idx][op_idx]++;
2196 0 : ke->pfrke_counters->pfrkc_bytes[dir_idx][op_idx] += len;
2197 0 : }
2198 : }
2199 0 : }
2200 :
2201 : struct pfr_ktable *
2202 0 : pfr_attach_table(struct pf_ruleset *rs, char *name, int intr)
2203 : {
2204 : struct pfr_ktable *kt, *rt;
2205 0 : struct pfr_table tbl;
2206 0 : struct pf_anchor *ac = rs->anchor;
2207 :
2208 0 : bzero(&tbl, sizeof(tbl));
2209 0 : strlcpy(tbl.pfrt_name, name, sizeof(tbl.pfrt_name));
2210 0 : if (ac != NULL)
2211 0 : strlcpy(tbl.pfrt_anchor, ac->path, sizeof(tbl.pfrt_anchor));
2212 0 : kt = pfr_lookup_table(&tbl);
2213 0 : if (kt == NULL) {
2214 0 : kt = pfr_create_ktable(&tbl, time_second, 1, intr);
2215 0 : if (kt == NULL)
2216 0 : return (NULL);
2217 0 : if (ac != NULL) {
2218 0 : bzero(tbl.pfrt_anchor, sizeof(tbl.pfrt_anchor));
2219 0 : rt = pfr_lookup_table(&tbl);
2220 0 : if (rt == NULL) {
2221 0 : rt = pfr_create_ktable(&tbl, 0, 1, intr);
2222 0 : if (rt == NULL) {
2223 0 : pfr_destroy_ktable(kt, 0);
2224 0 : return (NULL);
2225 : }
2226 0 : pfr_insert_ktable(rt);
2227 0 : }
2228 0 : kt->pfrkt_root = rt;
2229 0 : }
2230 0 : pfr_insert_ktable(kt);
2231 0 : }
2232 0 : if (!kt->pfrkt_refcnt[PFR_REFCNT_RULE]++)
2233 0 : pfr_setflags_ktable(kt, kt->pfrkt_flags|PFR_TFLAG_REFERENCED);
2234 0 : return (kt);
2235 0 : }
2236 :
2237 : void
2238 0 : pfr_detach_table(struct pfr_ktable *kt)
2239 : {
2240 0 : if (kt->pfrkt_refcnt[PFR_REFCNT_RULE] <= 0)
2241 0 : DPFPRINTF(LOG_NOTICE, "pfr_detach_table: refcount = %d.",
2242 : kt->pfrkt_refcnt[PFR_REFCNT_RULE]);
2243 0 : else if (!--kt->pfrkt_refcnt[PFR_REFCNT_RULE])
2244 0 : pfr_setflags_ktable(kt, kt->pfrkt_flags&~PFR_TFLAG_REFERENCED);
2245 0 : }
2246 :
2247 : int
2248 0 : pfr_islinklocal(sa_family_t af, struct pf_addr *addr)
2249 : {
2250 : #ifdef INET6
2251 0 : if (af == AF_INET6 && IN6_IS_ADDR_LINKLOCAL(&addr->v6))
2252 0 : return (1);
2253 : #endif /* INET6 */
2254 0 : return (0);
2255 0 : }
2256 :
2257 : int
2258 0 : pfr_pool_get(struct pf_pool *rpool, struct pf_addr **raddr,
2259 : struct pf_addr **rmask, sa_family_t af)
2260 : {
2261 : struct pfr_ktable *kt;
2262 : struct pfr_kentry *ke, *ke2;
2263 : struct pf_addr *addr, *counter;
2264 0 : union sockaddr_union mask;
2265 0 : struct sockaddr_in tmp4;
2266 : #ifdef INET6
2267 0 : struct sockaddr_in6 tmp6;
2268 : #endif
2269 : int startidx, idx = -1, loop = 0, use_counter = 0;
2270 :
2271 0 : switch (af) {
2272 : case AF_INET:
2273 0 : bzero(&tmp4, sizeof(tmp4));
2274 0 : tmp4.sin_len = sizeof(tmp4);
2275 0 : tmp4.sin_family = AF_INET;
2276 0 : addr = (struct pf_addr *)&tmp4.sin_addr;
2277 0 : break;
2278 : #ifdef INET6
2279 : case AF_INET6:
2280 0 : bzero(&tmp6, sizeof(tmp6));
2281 0 : tmp6.sin6_len = sizeof(tmp6);
2282 0 : tmp6.sin6_family = AF_INET6;
2283 0 : addr = (struct pf_addr *)&tmp6.sin6_addr;
2284 0 : break;
2285 : #endif /* INET6 */
2286 : default:
2287 0 : unhandled_af(af);
2288 : }
2289 :
2290 0 : if (rpool->addr.type == PF_ADDR_TABLE)
2291 0 : kt = rpool->addr.p.tbl;
2292 0 : else if (rpool->addr.type == PF_ADDR_DYNIFTL)
2293 0 : kt = rpool->addr.p.dyn->pfid_kt;
2294 : else
2295 0 : return (-1);
2296 0 : if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL)
2297 0 : kt = kt->pfrkt_root;
2298 0 : if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
2299 0 : return (-1);
2300 :
2301 0 : counter = &rpool->counter;
2302 0 : idx = rpool->tblidx;
2303 0 : if (idx < 0 || idx >= kt->pfrkt_cnt)
2304 0 : idx = 0;
2305 : else
2306 : use_counter = 1;
2307 0 : startidx = idx;
2308 :
2309 : _next_block:
2310 0 : if (loop && startidx == idx) {
2311 0 : kt->pfrkt_nomatch++;
2312 0 : return (1);
2313 : }
2314 :
2315 0 : ke = pfr_kentry_byidx(kt, idx, af);
2316 0 : if (ke == NULL) {
2317 : /* we don't have this idx, try looping */
2318 0 : if (loop || (ke = pfr_kentry_byidx(kt, 0, af)) == NULL) {
2319 0 : kt->pfrkt_nomatch++;
2320 0 : return (1);
2321 : }
2322 : idx = 0;
2323 0 : loop++;
2324 0 : }
2325 :
2326 : /* Get current weight for weighted round-robin */
2327 0 : if (idx == 0 && use_counter == 1 && kt->pfrkt_refcntcost > 0) {
2328 0 : rpool->curweight = rpool->curweight - kt->pfrkt_gcdweight;
2329 :
2330 0 : if (rpool->curweight < 1)
2331 0 : rpool->curweight = kt->pfrkt_maxweight;
2332 : }
2333 :
2334 0 : pfr_prepare_network(&pfr_mask, af, ke->pfrke_net);
2335 0 : *raddr = SUNION2PF(&ke->pfrke_sa, af);
2336 0 : *rmask = SUNION2PF(&pfr_mask, af);
2337 :
2338 0 : if (use_counter && !PF_AZERO(counter, af)) {
2339 : /* is supplied address within block? */
2340 0 : if (!PF_MATCHA(0, *raddr, *rmask, counter, af)) {
2341 : /* no, go to next block in table */
2342 0 : idx++;
2343 : use_counter = 0;
2344 0 : goto _next_block;
2345 : }
2346 0 : PF_ACPY(addr, counter, af);
2347 0 : } else {
2348 : /* use first address of block */
2349 0 : PF_ACPY(addr, *raddr, af);
2350 : }
2351 :
2352 0 : if (!KENTRY_NETWORK(ke)) {
2353 : /* this is a single IP address - no possible nested block */
2354 0 : if (rpool->addr.type == PF_ADDR_DYNIFTL &&
2355 0 : pfr_islinklocal(af, addr)) {
2356 0 : idx++;
2357 0 : goto _next_block;
2358 : }
2359 0 : PF_ACPY(counter, addr, af);
2360 0 : rpool->tblidx = idx;
2361 0 : kt->pfrkt_match++;
2362 0 : rpool->states = 0;
2363 0 : if (ke->pfrke_counters != NULL)
2364 0 : rpool->states = ke->pfrke_counters->states;
2365 0 : switch (ke->pfrke_type) {
2366 : case PFRKE_COST:
2367 0 : rpool->weight = ((struct pfr_kentry_cost *)ke)->weight;
2368 : /* FALLTHROUGH */
2369 : case PFRKE_ROUTE:
2370 0 : rpool->kif = ((struct pfr_kentry_route *)ke)->kif;
2371 0 : break;
2372 : default:
2373 0 : rpool->weight = 1;
2374 0 : break;
2375 : }
2376 0 : return (0);
2377 : }
2378 0 : for (;;) {
2379 : /* we don't want to use a nested block */
2380 0 : switch (af) {
2381 : case AF_INET:
2382 0 : ke2 = (struct pfr_kentry *)rn_match(&tmp4,
2383 0 : kt->pfrkt_ip4);
2384 0 : break;
2385 : #ifdef INET6
2386 : case AF_INET6:
2387 0 : ke2 = (struct pfr_kentry *)rn_match(&tmp6,
2388 0 : kt->pfrkt_ip6);
2389 0 : break;
2390 : #endif /* INET6 */
2391 : default:
2392 0 : unhandled_af(af);
2393 : }
2394 0 : if (ke2 == ke) {
2395 : /* lookup return the same block - perfect */
2396 0 : if (rpool->addr.type == PF_ADDR_DYNIFTL &&
2397 0 : pfr_islinklocal(af, addr))
2398 : goto _next_entry;
2399 0 : PF_ACPY(counter, addr, af);
2400 0 : rpool->tblidx = idx;
2401 0 : kt->pfrkt_match++;
2402 0 : rpool->states = 0;
2403 0 : if (ke->pfrke_counters != NULL)
2404 0 : rpool->states = ke->pfrke_counters->states;
2405 0 : switch (ke->pfrke_type) {
2406 : case PFRKE_COST:
2407 0 : rpool->weight =
2408 0 : ((struct pfr_kentry_cost *)ke)->weight;
2409 : /* FALLTHROUGH */
2410 : case PFRKE_ROUTE:
2411 0 : rpool->kif = ((struct pfr_kentry_route *)ke)->kif;
2412 0 : break;
2413 : default:
2414 0 : rpool->weight = 1;
2415 0 : break;
2416 : }
2417 0 : return (0);
2418 : }
2419 : _next_entry:
2420 : /* we need to increase the counter past the nested block */
2421 0 : pfr_prepare_network(&mask, AF_INET, ke2->pfrke_net);
2422 0 : PF_POOLMASK(addr, addr, SUNION2PF(&mask, af), &pfr_ffaddr, af);
2423 0 : PF_AINC(addr, af);
2424 0 : if (!PF_MATCHA(0, *raddr, *rmask, addr, af)) {
2425 : /* ok, we reached the end of our main block */
2426 : /* go to next block in table */
2427 0 : idx++;
2428 : use_counter = 0;
2429 0 : goto _next_block;
2430 : }
2431 : }
2432 0 : }
2433 :
2434 : struct pfr_kentry *
2435 0 : pfr_kentry_byidx(struct pfr_ktable *kt, int idx, int af)
2436 : {
2437 0 : struct pfr_walktree w;
2438 :
2439 0 : bzero(&w, sizeof(w));
2440 0 : w.pfrw_op = PFRW_POOL_GET;
2441 0 : w.pfrw_cnt = idx;
2442 :
2443 0 : switch (af) {
2444 : case AF_INET:
2445 0 : rn_walktree(kt->pfrkt_ip4, pfr_walktree, &w);
2446 0 : return (w.pfrw_kentry);
2447 : #ifdef INET6
2448 : case AF_INET6:
2449 0 : rn_walktree(kt->pfrkt_ip6, pfr_walktree, &w);
2450 0 : return (w.pfrw_kentry);
2451 : #endif /* INET6 */
2452 : default:
2453 0 : return (NULL);
2454 : }
2455 0 : }
2456 :
2457 : /* Added for load balancing state counter use. */
2458 : int
2459 0 : pfr_states_increase(struct pfr_ktable *kt, struct pf_addr *addr, int af)
2460 : {
2461 : struct pfr_kentry *ke;
2462 :
2463 0 : ke = pfr_kentry_byaddr(kt, addr, af, 1);
2464 0 : if (ke == NULL)
2465 0 : return (-1);
2466 :
2467 0 : if (ke->pfrke_counters == NULL)
2468 0 : ke->pfrke_counters = pool_get(&pfr_kcounters_pl,
2469 : PR_NOWAIT | PR_ZERO);
2470 0 : if (ke->pfrke_counters == NULL)
2471 0 : return (-1);
2472 :
2473 0 : ke->pfrke_counters->states++;
2474 0 : return ke->pfrke_counters->states;
2475 0 : }
2476 :
2477 : /* Added for load balancing state counter use. */
2478 : int
2479 0 : pfr_states_decrease(struct pfr_ktable *kt, struct pf_addr *addr, int af)
2480 : {
2481 : struct pfr_kentry *ke;
2482 :
2483 0 : ke = pfr_kentry_byaddr(kt, addr, af, 1);
2484 0 : if (ke == NULL)
2485 0 : return (-1);
2486 :
2487 0 : if (ke->pfrke_counters == NULL)
2488 0 : ke->pfrke_counters = pool_get(&pfr_kcounters_pl,
2489 : PR_NOWAIT | PR_ZERO);
2490 0 : if (ke->pfrke_counters == NULL)
2491 0 : return (-1);
2492 :
2493 0 : if (ke->pfrke_counters->states > 0)
2494 0 : ke->pfrke_counters->states--;
2495 : else
2496 0 : DPFPRINTF(LOG_DEBUG,
2497 : "pfr_states_decrease: states-- when states <= 0");
2498 :
2499 0 : return ke->pfrke_counters->states;
2500 0 : }
2501 :
2502 : /*
2503 : * Added for load balancing to find a kentry outside of the table.
2504 : * We need to create a custom pfr_addr struct.
2505 : */
2506 : struct pfr_kentry *
2507 0 : pfr_kentry_byaddr(struct pfr_ktable *kt, struct pf_addr *addr, sa_family_t af,
2508 : int exact)
2509 : {
2510 : struct pfr_kentry *ke;
2511 0 : struct pfr_addr p;
2512 :
2513 0 : bzero(&p, sizeof(p));
2514 0 : p.pfra_af = af;
2515 0 : switch (af) {
2516 : case AF_INET:
2517 0 : p.pfra_net = 32;
2518 0 : p.pfra_ip4addr = addr->v4;
2519 0 : break;
2520 : #ifdef INET6
2521 : case AF_INET6:
2522 0 : p.pfra_net = 128;
2523 0 : p.pfra_ip6addr = addr->v6;
2524 0 : break;
2525 : #endif /* INET6 */
2526 : default:
2527 0 : unhandled_af(af);
2528 : }
2529 :
2530 0 : ke = pfr_lookup_addr(kt, &p, exact);
2531 :
2532 0 : return ke;
2533 0 : }
2534 :
2535 : void
2536 0 : pfr_dynaddr_update(struct pfr_ktable *kt, struct pfi_dynaddr *dyn)
2537 : {
2538 0 : struct pfr_walktree w;
2539 :
2540 0 : bzero(&w, sizeof(w));
2541 0 : w.pfrw_op = PFRW_DYNADDR_UPDATE;
2542 0 : w.pfrw_dyn = dyn;
2543 :
2544 0 : dyn->pfid_acnt4 = 0;
2545 0 : dyn->pfid_acnt6 = 0;
2546 0 : switch (dyn->pfid_af) {
2547 : case AF_UNSPEC: /* look up all both addresses IPv4 + IPv6 */
2548 0 : rn_walktree(kt->pfrkt_ip4, pfr_walktree, &w);
2549 0 : rn_walktree(kt->pfrkt_ip6, pfr_walktree, &w);
2550 0 : break;
2551 : case AF_INET:
2552 0 : rn_walktree(kt->pfrkt_ip4, pfr_walktree, &w);
2553 0 : break;
2554 : #ifdef INET6
2555 : case AF_INET6:
2556 0 : rn_walktree(kt->pfrkt_ip6, pfr_walktree, &w);
2557 0 : break;
2558 : #endif /* INET6 */
2559 : default:
2560 0 : unhandled_af(dyn->pfid_af);
2561 : }
2562 0 : }
2563 :
2564 : void
2565 0 : pfr_ktable_winfo_update(struct pfr_ktable *kt, struct pfr_kentry *p) {
2566 : /*
2567 : * If cost flag is set,
2568 : * gcdweight is needed for round-robin.
2569 : */
2570 0 : if (kt->pfrkt_refcntcost > 0) {
2571 : u_int16_t weight;
2572 :
2573 0 : weight = (p->pfrke_type == PFRKE_COST) ?
2574 0 : ((struct pfr_kentry_cost *)p)->weight : 1;
2575 :
2576 0 : if (kt->pfrkt_gcdweight == 0)
2577 0 : kt->pfrkt_gcdweight = weight;
2578 :
2579 0 : kt->pfrkt_gcdweight =
2580 0 : pfr_gcd(weight, kt->pfrkt_gcdweight);
2581 :
2582 0 : if (kt->pfrkt_maxweight < weight)
2583 0 : kt->pfrkt_maxweight = weight;
2584 0 : }
2585 0 : }
|