Line data Source code
1 : /* $OpenBSD: pf_ioctl.c,v 1.337 2018/09/11 07:53:38 sashan Exp $ */
2 :
3 : /*
4 : * Copyright (c) 2001 Daniel Hartmeier
5 : * Copyright (c) 2002 - 2018 Henning Brauer <henning@openbsd.org>
6 : * All rights reserved.
7 : *
8 : * Redistribution and use in source and binary forms, with or without
9 : * modification, are permitted provided that the following conditions
10 : * are met:
11 : *
12 : * - Redistributions of source code must retain the above copyright
13 : * notice, this list of conditions and the following disclaimer.
14 : * - Redistributions in binary form must reproduce the above
15 : * copyright notice, this list of conditions and the following
16 : * disclaimer in the documentation and/or other materials provided
17 : * with the distribution.
18 : *
19 : * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20 : * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
21 : * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
22 : * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
23 : * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
24 : * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
25 : * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
26 : * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
27 : * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 : * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
29 : * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 : * POSSIBILITY OF SUCH DAMAGE.
31 : *
32 : * Effort sponsored in part by the Defense Advanced Research Projects
33 : * Agency (DARPA) and Air Force Research Laboratory, Air Force
34 : * Materiel Command, USAF, under agreement number F30602-01-2-0537.
35 : *
36 : */
37 :
38 : #include "pfsync.h"
39 : #include "pflog.h"
40 :
41 : #include <sys/param.h>
42 : #include <sys/systm.h>
43 : #include <sys/mbuf.h>
44 : #include <sys/filio.h>
45 : #include <sys/fcntl.h>
46 : #include <sys/socket.h>
47 : #include <sys/socketvar.h>
48 : #include <sys/kernel.h>
49 : #include <sys/time.h>
50 : #include <sys/timeout.h>
51 : #include <sys/pool.h>
52 : #include <sys/malloc.h>
53 : #include <sys/kthread.h>
54 : #include <sys/rwlock.h>
55 : #include <sys/syslog.h>
56 : #include <uvm/uvm_extern.h>
57 :
58 : #include <crypto/md5.h>
59 :
60 : #include <net/if.h>
61 : #include <net/if_var.h>
62 : #include <net/route.h>
63 : #include <net/hfsc.h>
64 : #include <net/fq_codel.h>
65 :
66 : #include <netinet/in.h>
67 : #include <netinet/ip.h>
68 : #include <netinet/in_pcb.h>
69 : #include <netinet/ip_var.h>
70 : #include <netinet/ip_icmp.h>
71 : #include <netinet/tcp.h>
72 : #include <netinet/udp.h>
73 :
74 : #ifdef INET6
75 : #include <netinet/ip6.h>
76 : #include <netinet/icmp6.h>
77 : #endif /* INET6 */
78 :
79 : #include <net/pfvar.h>
80 : #include <net/pfvar_priv.h>
81 :
82 : #if NPFSYNC > 0
83 : #include <netinet/ip_ipsp.h>
84 : #include <net/if_pfsync.h>
85 : #endif /* NPFSYNC > 0 */
86 :
87 : struct pool pf_tag_pl;
88 :
89 : void pfattach(int);
90 : void pf_thread_create(void *);
91 : int pfopen(dev_t, int, int, struct proc *);
92 : int pfclose(dev_t, int, int, struct proc *);
93 : int pfioctl(dev_t, u_long, caddr_t, int, struct proc *);
94 : int pf_begin_rules(u_int32_t *, const char *);
95 : int pf_rollback_rules(u_int32_t, char *);
96 : void pf_remove_queues(void);
97 : int pf_commit_queues(void);
98 : void pf_free_queues(struct pf_queuehead *);
99 : int pf_setup_pfsync_matching(struct pf_ruleset *);
100 : void pf_hash_rule(MD5_CTX *, struct pf_rule *);
101 : void pf_hash_rule_addr(MD5_CTX *, struct pf_rule_addr *);
102 : int pf_commit_rules(u_int32_t, char *);
103 : int pf_addr_setup(struct pf_ruleset *,
104 : struct pf_addr_wrap *, sa_family_t);
105 : int pf_kif_setup(char *, struct pfi_kif **);
106 : void pf_addr_copyout(struct pf_addr_wrap *);
107 : void pf_trans_set_commit(void);
108 : void pf_pool_copyin(struct pf_pool *, struct pf_pool *);
109 : int pf_rule_copyin(struct pf_rule *, struct pf_rule *,
110 : struct pf_ruleset *);
111 : u_int16_t pf_qname2qid(char *, int);
112 : void pf_qid2qname(u_int16_t, char *);
113 : void pf_qid_unref(u_int16_t);
114 :
115 : struct pf_rule pf_default_rule, pf_default_rule_new;
116 :
117 : struct {
118 : char statusif[IFNAMSIZ];
119 : u_int32_t debug;
120 : u_int32_t hostid;
121 : u_int32_t reass;
122 : u_int32_t mask;
123 : } pf_trans_set;
124 :
125 : #define PF_TSET_STATUSIF 0x01
126 : #define PF_TSET_DEBUG 0x02
127 : #define PF_TSET_HOSTID 0x04
128 : #define PF_TSET_REASS 0x08
129 :
130 : #define TAGID_MAX 50000
131 : TAILQ_HEAD(pf_tags, pf_tagname) pf_tags = TAILQ_HEAD_INITIALIZER(pf_tags),
132 : pf_qids = TAILQ_HEAD_INITIALIZER(pf_qids);
133 :
134 : #ifdef WITH_PF_LOCK
135 : /*
136 : * pf_lock protects consistency of PF data structures, which don't have
137 : * their dedicated lock yet. The pf_lock currently protects:
138 : * - rules,
139 : * - radix tables,
140 : * - source nodes
141 : * All callers must grab pf_lock exclusively.
142 : *
143 : * pf_state_lock protects consistency of state table. Packets, which do state
144 : * look up grab the lock as readers. If packet must create state, then it must
145 : * grab the lock as writer. Whenever packet creates state it grabs pf_lock
146 : * first then it locks pf_state_lock as the writer.
147 : */
148 : struct rwlock pf_lock = RWLOCK_INITIALIZER("pf_lock");
149 : struct rwlock pf_state_lock = RWLOCK_INITIALIZER("pf_state_lock");
150 : #endif /* WITH_PF_LOCK */
151 :
152 : #if (PF_QNAME_SIZE != PF_TAG_NAME_SIZE)
153 : #error PF_QNAME_SIZE must be equal to PF_TAG_NAME_SIZE
154 : #endif
155 : u_int16_t tagname2tag(struct pf_tags *, char *, int);
156 : void tag2tagname(struct pf_tags *, u_int16_t, char *);
157 : void tag_unref(struct pf_tags *, u_int16_t);
158 : int pf_rtlabel_add(struct pf_addr_wrap *);
159 : void pf_rtlabel_remove(struct pf_addr_wrap *);
160 : void pf_rtlabel_copyout(struct pf_addr_wrap *);
161 :
162 :
163 : void
164 0 : pfattach(int num)
165 : {
166 : u_int32_t *timeout = pf_default_rule.timeout;
167 :
168 0 : pool_init(&pf_rule_pl, sizeof(struct pf_rule), 0,
169 : IPL_SOFTNET, 0, "pfrule", NULL);
170 0 : pool_init(&pf_src_tree_pl, sizeof(struct pf_src_node), 0,
171 : IPL_SOFTNET, 0, "pfsrctr", NULL);
172 0 : pool_init(&pf_sn_item_pl, sizeof(struct pf_sn_item), 0,
173 : IPL_SOFTNET, 0, "pfsnitem", NULL);
174 0 : pool_init(&pf_state_pl, sizeof(struct pf_state), 0,
175 : IPL_SOFTNET, 0, "pfstate", NULL);
176 0 : pool_init(&pf_state_key_pl, sizeof(struct pf_state_key), 0,
177 : IPL_SOFTNET, 0, "pfstkey", NULL);
178 0 : pool_init(&pf_state_item_pl, sizeof(struct pf_state_item), 0,
179 : IPL_SOFTNET, 0, "pfstitem", NULL);
180 0 : pool_init(&pf_rule_item_pl, sizeof(struct pf_rule_item), 0,
181 : IPL_SOFTNET, 0, "pfruleitem", NULL);
182 0 : pool_init(&pf_queue_pl, sizeof(struct pf_queuespec), 0,
183 : IPL_SOFTNET, 0, "pfqueue", NULL);
184 0 : pool_init(&pf_tag_pl, sizeof(struct pf_tagname), 0,
185 : IPL_SOFTNET, 0, "pftag", NULL);
186 0 : pool_init(&pf_pktdelay_pl, sizeof(struct pf_pktdelay), 0,
187 : IPL_SOFTNET, 0, "pfpktdelay", NULL);
188 :
189 0 : hfsc_initialize();
190 0 : pfr_initialize();
191 0 : pfi_initialize();
192 0 : pf_osfp_initialize();
193 0 : pf_syncookies_init();
194 :
195 0 : pool_sethardlimit(pf_pool_limits[PF_LIMIT_STATES].pp,
196 0 : pf_pool_limits[PF_LIMIT_STATES].limit, NULL, 0);
197 :
198 0 : if (physmem <= atop(100*1024*1024))
199 0 : pf_pool_limits[PF_LIMIT_TABLE_ENTRIES].limit =
200 : PFR_KENTRY_HIWAT_SMALL;
201 :
202 0 : RB_INIT(&tree_src_tracking);
203 0 : RB_INIT(&pf_anchors);
204 0 : pf_init_ruleset(&pf_main_ruleset);
205 0 : TAILQ_INIT(&pf_queues[0]);
206 0 : TAILQ_INIT(&pf_queues[1]);
207 0 : pf_queues_active = &pf_queues[0];
208 0 : pf_queues_inactive = &pf_queues[1];
209 0 : TAILQ_INIT(&state_list);
210 :
211 : /* default rule should never be garbage collected */
212 0 : pf_default_rule.entries.tqe_prev = &pf_default_rule.entries.tqe_next;
213 0 : pf_default_rule.action = PF_PASS;
214 0 : pf_default_rule.nr = (u_int32_t)-1;
215 0 : pf_default_rule.rtableid = -1;
216 :
217 : /* initialize default timeouts */
218 0 : timeout[PFTM_TCP_FIRST_PACKET] = PFTM_TCP_FIRST_PACKET_VAL;
219 0 : timeout[PFTM_TCP_OPENING] = PFTM_TCP_OPENING_VAL;
220 0 : timeout[PFTM_TCP_ESTABLISHED] = PFTM_TCP_ESTABLISHED_VAL;
221 0 : timeout[PFTM_TCP_CLOSING] = PFTM_TCP_CLOSING_VAL;
222 0 : timeout[PFTM_TCP_FIN_WAIT] = PFTM_TCP_FIN_WAIT_VAL;
223 0 : timeout[PFTM_TCP_CLOSED] = PFTM_TCP_CLOSED_VAL;
224 0 : timeout[PFTM_UDP_FIRST_PACKET] = PFTM_UDP_FIRST_PACKET_VAL;
225 0 : timeout[PFTM_UDP_SINGLE] = PFTM_UDP_SINGLE_VAL;
226 0 : timeout[PFTM_UDP_MULTIPLE] = PFTM_UDP_MULTIPLE_VAL;
227 0 : timeout[PFTM_ICMP_FIRST_PACKET] = PFTM_ICMP_FIRST_PACKET_VAL;
228 0 : timeout[PFTM_ICMP_ERROR_REPLY] = PFTM_ICMP_ERROR_REPLY_VAL;
229 0 : timeout[PFTM_OTHER_FIRST_PACKET] = PFTM_OTHER_FIRST_PACKET_VAL;
230 0 : timeout[PFTM_OTHER_SINGLE] = PFTM_OTHER_SINGLE_VAL;
231 0 : timeout[PFTM_OTHER_MULTIPLE] = PFTM_OTHER_MULTIPLE_VAL;
232 0 : timeout[PFTM_FRAG] = PFTM_FRAG_VAL;
233 0 : timeout[PFTM_INTERVAL] = PFTM_INTERVAL_VAL;
234 0 : timeout[PFTM_SRC_NODE] = PFTM_SRC_NODE_VAL;
235 0 : timeout[PFTM_TS_DIFF] = PFTM_TS_DIFF_VAL;
236 0 : timeout[PFTM_ADAPTIVE_START] = PFSTATE_ADAPT_START;
237 0 : timeout[PFTM_ADAPTIVE_END] = PFSTATE_ADAPT_END;
238 :
239 0 : pf_default_rule.src.addr.type = PF_ADDR_ADDRMASK;
240 0 : pf_default_rule.dst.addr.type = PF_ADDR_ADDRMASK;
241 0 : pf_default_rule.rdr.addr.type = PF_ADDR_NONE;
242 0 : pf_default_rule.nat.addr.type = PF_ADDR_NONE;
243 0 : pf_default_rule.route.addr.type = PF_ADDR_NONE;
244 :
245 0 : pf_normalize_init();
246 0 : memset(&pf_status, 0, sizeof(pf_status));
247 0 : pf_status.debug = LOG_ERR;
248 0 : pf_status.reass = PF_REASS_ENABLED;
249 :
250 : /* XXX do our best to avoid a conflict */
251 0 : pf_status.hostid = arc4random();
252 0 : }
253 :
254 : int
255 0 : pfopen(dev_t dev, int flags, int fmt, struct proc *p)
256 : {
257 0 : if (minor(dev) >= 1)
258 0 : return (ENXIO);
259 0 : return (0);
260 0 : }
261 :
262 : int
263 0 : pfclose(dev_t dev, int flags, int fmt, struct proc *p)
264 : {
265 0 : if (minor(dev) >= 1)
266 0 : return (ENXIO);
267 0 : return (0);
268 0 : }
269 :
270 : void
271 0 : pf_rm_rule(struct pf_rulequeue *rulequeue, struct pf_rule *rule)
272 : {
273 0 : if (rulequeue != NULL) {
274 0 : if (rule->states_cur == 0 && rule->src_nodes == 0) {
275 : /*
276 : * XXX - we need to remove the table *before* detaching
277 : * the rule to make sure the table code does not delete
278 : * the anchor under our feet.
279 : */
280 0 : pf_tbladdr_remove(&rule->src.addr);
281 0 : pf_tbladdr_remove(&rule->dst.addr);
282 0 : pf_tbladdr_remove(&rule->rdr.addr);
283 0 : pf_tbladdr_remove(&rule->nat.addr);
284 0 : pf_tbladdr_remove(&rule->route.addr);
285 0 : if (rule->overload_tbl)
286 0 : pfr_detach_table(rule->overload_tbl);
287 : }
288 0 : TAILQ_REMOVE(rulequeue, rule, entries);
289 0 : rule->entries.tqe_prev = NULL;
290 0 : rule->nr = (u_int32_t)-1;
291 0 : }
292 :
293 0 : if (rule->states_cur > 0 || rule->src_nodes > 0 ||
294 0 : rule->entries.tqe_prev != NULL)
295 : return;
296 0 : pf_tag_unref(rule->tag);
297 0 : pf_tag_unref(rule->match_tag);
298 0 : pf_rtlabel_remove(&rule->src.addr);
299 0 : pf_rtlabel_remove(&rule->dst.addr);
300 0 : pfi_dynaddr_remove(&rule->src.addr);
301 0 : pfi_dynaddr_remove(&rule->dst.addr);
302 0 : pfi_dynaddr_remove(&rule->rdr.addr);
303 0 : pfi_dynaddr_remove(&rule->nat.addr);
304 0 : pfi_dynaddr_remove(&rule->route.addr);
305 0 : if (rulequeue == NULL) {
306 0 : pf_tbladdr_remove(&rule->src.addr);
307 0 : pf_tbladdr_remove(&rule->dst.addr);
308 0 : pf_tbladdr_remove(&rule->rdr.addr);
309 0 : pf_tbladdr_remove(&rule->nat.addr);
310 0 : pf_tbladdr_remove(&rule->route.addr);
311 0 : if (rule->overload_tbl)
312 0 : pfr_detach_table(rule->overload_tbl);
313 : }
314 0 : pfi_kif_unref(rule->rcv_kif, PFI_KIF_REF_RULE);
315 0 : pfi_kif_unref(rule->kif, PFI_KIF_REF_RULE);
316 0 : pfi_kif_unref(rule->rdr.kif, PFI_KIF_REF_RULE);
317 0 : pfi_kif_unref(rule->nat.kif, PFI_KIF_REF_RULE);
318 0 : pfi_kif_unref(rule->route.kif, PFI_KIF_REF_RULE);
319 0 : pf_anchor_remove(rule);
320 0 : pool_put(&pf_rule_pl, rule);
321 0 : }
322 :
323 : void
324 0 : pf_purge_rule(struct pf_rule *rule)
325 : {
326 : u_int32_t nr = 0;
327 : struct pf_ruleset *ruleset;
328 :
329 0 : KASSERT((rule != NULL) && (rule->ruleset != NULL));
330 : ruleset = rule->ruleset;
331 :
332 0 : pf_rm_rule(ruleset->rules.active.ptr, rule);
333 0 : ruleset->rules.active.rcount--;
334 0 : TAILQ_FOREACH(rule, ruleset->rules.active.ptr, entries)
335 0 : rule->nr = nr++;
336 0 : ruleset->rules.active.ticket++;
337 0 : pf_calc_skip_steps(ruleset->rules.active.ptr);
338 0 : pf_remove_if_empty_ruleset(ruleset);
339 0 : }
340 :
341 : u_int16_t
342 0 : tagname2tag(struct pf_tags *head, char *tagname, int create)
343 : {
344 : struct pf_tagname *tag, *p = NULL;
345 : u_int16_t new_tagid = 1;
346 :
347 0 : TAILQ_FOREACH(tag, head, entries)
348 0 : if (strcmp(tagname, tag->name) == 0) {
349 0 : tag->ref++;
350 0 : return (tag->tag);
351 : }
352 :
353 0 : if (!create)
354 0 : return (0);
355 :
356 : /*
357 : * to avoid fragmentation, we do a linear search from the beginning
358 : * and take the first free slot we find. if there is none or the list
359 : * is empty, append a new entry at the end.
360 : */
361 :
362 : /* new entry */
363 0 : TAILQ_FOREACH(p, head, entries) {
364 0 : if (p->tag != new_tagid)
365 : break;
366 0 : new_tagid = p->tag + 1;
367 : }
368 :
369 0 : if (new_tagid > TAGID_MAX)
370 0 : return (0);
371 :
372 : /* allocate and fill new struct pf_tagname */
373 0 : tag = pool_get(&pf_tag_pl, PR_NOWAIT | PR_ZERO);
374 0 : if (tag == NULL)
375 0 : return (0);
376 0 : strlcpy(tag->name, tagname, sizeof(tag->name));
377 0 : tag->tag = new_tagid;
378 0 : tag->ref++;
379 :
380 0 : if (p != NULL) /* insert new entry before p */
381 0 : TAILQ_INSERT_BEFORE(p, tag, entries);
382 : else /* either list empty or no free slot in between */
383 0 : TAILQ_INSERT_TAIL(head, tag, entries);
384 :
385 0 : return (tag->tag);
386 0 : }
387 :
388 : void
389 0 : tag2tagname(struct pf_tags *head, u_int16_t tagid, char *p)
390 : {
391 : struct pf_tagname *tag;
392 :
393 0 : TAILQ_FOREACH(tag, head, entries)
394 0 : if (tag->tag == tagid) {
395 0 : strlcpy(p, tag->name, PF_TAG_NAME_SIZE);
396 0 : return;
397 : }
398 0 : }
399 :
400 : void
401 0 : tag_unref(struct pf_tags *head, u_int16_t tag)
402 : {
403 : struct pf_tagname *p, *next;
404 :
405 0 : if (tag == 0)
406 0 : return;
407 :
408 0 : TAILQ_FOREACH_SAFE(p, head, entries, next) {
409 0 : if (tag == p->tag) {
410 0 : if (--p->ref == 0) {
411 0 : TAILQ_REMOVE(head, p, entries);
412 0 : pool_put(&pf_tag_pl, p);
413 0 : }
414 : break;
415 : }
416 : }
417 0 : }
418 :
419 : u_int16_t
420 0 : pf_tagname2tag(char *tagname, int create)
421 : {
422 0 : return (tagname2tag(&pf_tags, tagname, create));
423 : }
424 :
425 : void
426 0 : pf_tag2tagname(u_int16_t tagid, char *p)
427 : {
428 0 : tag2tagname(&pf_tags, tagid, p);
429 0 : }
430 :
431 : void
432 0 : pf_tag_ref(u_int16_t tag)
433 : {
434 : struct pf_tagname *t;
435 :
436 0 : TAILQ_FOREACH(t, &pf_tags, entries)
437 0 : if (t->tag == tag)
438 : break;
439 0 : if (t != NULL)
440 0 : t->ref++;
441 0 : }
442 :
443 : void
444 0 : pf_tag_unref(u_int16_t tag)
445 : {
446 0 : tag_unref(&pf_tags, tag);
447 0 : }
448 :
449 : int
450 0 : pf_rtlabel_add(struct pf_addr_wrap *a)
451 : {
452 0 : if (a->type == PF_ADDR_RTLABEL &&
453 0 : (a->v.rtlabel = rtlabel_name2id(a->v.rtlabelname)) == 0)
454 0 : return (-1);
455 0 : return (0);
456 0 : }
457 :
458 : void
459 0 : pf_rtlabel_remove(struct pf_addr_wrap *a)
460 : {
461 0 : if (a->type == PF_ADDR_RTLABEL)
462 0 : rtlabel_unref(a->v.rtlabel);
463 0 : }
464 :
465 : void
466 0 : pf_rtlabel_copyout(struct pf_addr_wrap *a)
467 : {
468 : const char *name;
469 :
470 0 : if (a->type == PF_ADDR_RTLABEL && a->v.rtlabel) {
471 0 : if ((name = rtlabel_id2name(a->v.rtlabel)) == NULL)
472 0 : strlcpy(a->v.rtlabelname, "?",
473 : sizeof(a->v.rtlabelname));
474 : else
475 0 : strlcpy(a->v.rtlabelname, name,
476 : sizeof(a->v.rtlabelname));
477 : }
478 0 : }
479 :
480 : u_int16_t
481 0 : pf_qname2qid(char *qname, int create)
482 : {
483 0 : return (tagname2tag(&pf_qids, qname, create));
484 : }
485 :
486 : void
487 0 : pf_qid2qname(u_int16_t qid, char *p)
488 : {
489 0 : tag2tagname(&pf_qids, qid, p);
490 0 : }
491 :
492 : void
493 0 : pf_qid_unref(u_int16_t qid)
494 : {
495 0 : tag_unref(&pf_qids, (u_int16_t)qid);
496 0 : }
497 :
498 : int
499 0 : pf_begin_rules(u_int32_t *ticket, const char *anchor)
500 : {
501 : struct pf_ruleset *rs;
502 : struct pf_rule *rule;
503 :
504 0 : if ((rs = pf_find_or_create_ruleset(anchor)) == NULL)
505 0 : return (EINVAL);
506 0 : while ((rule = TAILQ_FIRST(rs->rules.inactive.ptr)) != NULL) {
507 0 : pf_rm_rule(rs->rules.inactive.ptr, rule);
508 0 : rs->rules.inactive.rcount--;
509 : }
510 0 : *ticket = ++rs->rules.inactive.ticket;
511 0 : rs->rules.inactive.open = 1;
512 0 : return (0);
513 0 : }
514 :
515 : int
516 0 : pf_rollback_rules(u_int32_t ticket, char *anchor)
517 : {
518 : struct pf_ruleset *rs;
519 : struct pf_rule *rule;
520 :
521 0 : rs = pf_find_ruleset(anchor);
522 0 : if (rs == NULL || !rs->rules.inactive.open ||
523 0 : rs->rules.inactive.ticket != ticket)
524 0 : return (0);
525 0 : while ((rule = TAILQ_FIRST(rs->rules.inactive.ptr)) != NULL) {
526 0 : pf_rm_rule(rs->rules.inactive.ptr, rule);
527 0 : rs->rules.inactive.rcount--;
528 : }
529 0 : rs->rules.inactive.open = 0;
530 :
531 : /* queue defs only in the main ruleset */
532 0 : if (anchor[0])
533 0 : return (0);
534 :
535 0 : pf_free_queues(pf_queues_inactive);
536 :
537 0 : return (0);
538 0 : }
539 :
540 : void
541 0 : pf_free_queues(struct pf_queuehead *where)
542 : {
543 : struct pf_queuespec *q, *qtmp;
544 :
545 0 : TAILQ_FOREACH_SAFE(q, where, entries, qtmp) {
546 0 : TAILQ_REMOVE(where, q, entries);
547 0 : pfi_kif_unref(q->kif, PFI_KIF_REF_RULE);
548 0 : pool_put(&pf_queue_pl, q);
549 : }
550 0 : }
551 :
552 : void
553 0 : pf_remove_queues(void)
554 : {
555 : struct pf_queuespec *q;
556 : struct ifnet *ifp;
557 :
558 : /* put back interfaces in normal queueing mode */
559 0 : TAILQ_FOREACH(q, pf_queues_active, entries) {
560 0 : if (q->parent_qid != 0)
561 : continue;
562 :
563 0 : ifp = q->kif->pfik_ifp;
564 0 : if (ifp == NULL)
565 : continue;
566 :
567 0 : ifq_attach(&ifp->if_snd, ifq_priq_ops, NULL);
568 0 : }
569 0 : }
570 :
571 : struct pf_queue_if {
572 : struct ifnet *ifp;
573 : const struct ifq_ops *ifqops;
574 : const struct pfq_ops *pfqops;
575 : void *disc;
576 : struct pf_queue_if *next;
577 : };
578 :
579 : static inline struct pf_queue_if *
580 0 : pf_ifp2q(struct pf_queue_if *list, struct ifnet *ifp)
581 : {
582 : struct pf_queue_if *qif = list;
583 :
584 0 : while (qif != NULL) {
585 0 : if (qif->ifp == ifp)
586 0 : return (qif);
587 :
588 0 : qif = qif->next;
589 : }
590 :
591 0 : return (qif);
592 0 : }
593 :
594 : int
595 0 : pf_create_queues(void)
596 : {
597 : struct pf_queuespec *q;
598 : struct ifnet *ifp;
599 : struct pf_queue_if *list = NULL, *qif;
600 : int error;
601 :
602 : /*
603 : * Find root queues and allocate traffic conditioner
604 : * private data for these interfaces
605 : */
606 0 : TAILQ_FOREACH(q, pf_queues_active, entries) {
607 0 : if (q->parent_qid != 0)
608 : continue;
609 :
610 0 : ifp = q->kif->pfik_ifp;
611 0 : if (ifp == NULL)
612 : continue;
613 :
614 0 : qif = malloc(sizeof(*qif), M_TEMP, M_WAITOK);
615 0 : qif->ifp = ifp;
616 :
617 0 : if (q->flags & PFQS_ROOTCLASS) {
618 0 : qif->ifqops = ifq_hfsc_ops;
619 0 : qif->pfqops = pfq_hfsc_ops;
620 0 : } else {
621 0 : qif->ifqops = ifq_fqcodel_ops;
622 0 : qif->pfqops = pfq_fqcodel_ops;
623 : }
624 :
625 0 : qif->disc = qif->pfqops->pfq_alloc(ifp);
626 :
627 0 : qif->next = list;
628 : list = qif;
629 0 : }
630 :
631 : /* and now everything */
632 0 : TAILQ_FOREACH(q, pf_queues_active, entries) {
633 0 : ifp = q->kif->pfik_ifp;
634 0 : if (ifp == NULL)
635 : continue;
636 :
637 0 : qif = pf_ifp2q(list, ifp);
638 0 : KASSERT(qif != NULL);
639 :
640 0 : error = qif->pfqops->pfq_addqueue(qif->disc, q);
641 0 : if (error != 0)
642 : goto error;
643 : }
644 :
645 : /* find root queues in old list to disable them if necessary */
646 0 : TAILQ_FOREACH(q, pf_queues_inactive, entries) {
647 0 : if (q->parent_qid != 0)
648 : continue;
649 :
650 0 : ifp = q->kif->pfik_ifp;
651 0 : if (ifp == NULL)
652 : continue;
653 :
654 0 : qif = pf_ifp2q(list, ifp);
655 0 : if (qif != NULL)
656 : continue;
657 :
658 0 : ifq_attach(&ifp->if_snd, ifq_priq_ops, NULL);
659 0 : }
660 :
661 : /* commit the new queues */
662 0 : while (list != NULL) {
663 : qif = list;
664 0 : list = qif->next;
665 :
666 0 : ifp = qif->ifp;
667 :
668 0 : ifq_attach(&ifp->if_snd, qif->ifqops, qif->disc);
669 0 : free(qif, M_TEMP, sizeof(*qif));
670 : }
671 :
672 0 : return (0);
673 :
674 : error:
675 0 : while (list != NULL) {
676 : qif = list;
677 0 : list = qif->next;
678 :
679 0 : qif->pfqops->pfq_free(qif->disc);
680 0 : free(qif, M_TEMP, sizeof(*qif));
681 : }
682 :
683 0 : return (error);
684 0 : }
685 :
686 : int
687 0 : pf_commit_queues(void)
688 : {
689 : struct pf_queuehead *qswap;
690 : int error;
691 :
692 : /* swap */
693 0 : qswap = pf_queues_active;
694 0 : pf_queues_active = pf_queues_inactive;
695 0 : pf_queues_inactive = qswap;
696 :
697 0 : error = pf_create_queues();
698 0 : if (error != 0) {
699 0 : pf_queues_inactive = pf_queues_active;
700 0 : pf_queues_active = qswap;
701 0 : return (error);
702 : }
703 :
704 0 : pf_free_queues(pf_queues_inactive);
705 :
706 0 : return (0);
707 0 : }
708 :
709 : const struct pfq_ops *
710 0 : pf_queue_manager(struct pf_queuespec *q)
711 : {
712 0 : if (q->flags & PFQS_FLOWQUEUE)
713 0 : return pfq_fqcodel_ops;
714 0 : return (/* pfq_default_ops */ NULL);
715 0 : }
716 :
717 : #define PF_MD5_UPD(st, elm) \
718 : MD5Update(ctx, (u_int8_t *) &(st)->elm, sizeof((st)->elm))
719 :
720 : #define PF_MD5_UPD_STR(st, elm) \
721 : MD5Update(ctx, (u_int8_t *) (st)->elm, strlen((st)->elm))
722 :
723 : #define PF_MD5_UPD_HTONL(st, elm, stor) do { \
724 : (stor) = htonl((st)->elm); \
725 : MD5Update(ctx, (u_int8_t *) &(stor), sizeof(u_int32_t));\
726 : } while (0)
727 :
728 : #define PF_MD5_UPD_HTONS(st, elm, stor) do { \
729 : (stor) = htons((st)->elm); \
730 : MD5Update(ctx, (u_int8_t *) &(stor), sizeof(u_int16_t));\
731 : } while (0)
732 :
733 : void
734 0 : pf_hash_rule_addr(MD5_CTX *ctx, struct pf_rule_addr *pfr)
735 : {
736 0 : PF_MD5_UPD(pfr, addr.type);
737 0 : switch (pfr->addr.type) {
738 : case PF_ADDR_DYNIFTL:
739 0 : PF_MD5_UPD(pfr, addr.v.ifname);
740 0 : PF_MD5_UPD(pfr, addr.iflags);
741 0 : break;
742 : case PF_ADDR_TABLE:
743 0 : PF_MD5_UPD(pfr, addr.v.tblname);
744 0 : break;
745 : case PF_ADDR_ADDRMASK:
746 : /* XXX ignore af? */
747 0 : PF_MD5_UPD(pfr, addr.v.a.addr.addr32);
748 0 : PF_MD5_UPD(pfr, addr.v.a.mask.addr32);
749 0 : break;
750 : case PF_ADDR_RTLABEL:
751 0 : PF_MD5_UPD(pfr, addr.v.rtlabelname);
752 0 : break;
753 : }
754 :
755 0 : PF_MD5_UPD(pfr, port[0]);
756 0 : PF_MD5_UPD(pfr, port[1]);
757 0 : PF_MD5_UPD(pfr, neg);
758 0 : PF_MD5_UPD(pfr, port_op);
759 0 : }
760 :
761 : void
762 0 : pf_hash_rule(MD5_CTX *ctx, struct pf_rule *rule)
763 : {
764 0 : u_int16_t x;
765 0 : u_int32_t y;
766 :
767 0 : pf_hash_rule_addr(ctx, &rule->src);
768 0 : pf_hash_rule_addr(ctx, &rule->dst);
769 0 : PF_MD5_UPD_STR(rule, label);
770 0 : PF_MD5_UPD_STR(rule, ifname);
771 0 : PF_MD5_UPD_STR(rule, rcv_ifname);
772 0 : PF_MD5_UPD_STR(rule, match_tagname);
773 0 : PF_MD5_UPD_HTONS(rule, match_tag, x); /* dup? */
774 0 : PF_MD5_UPD_HTONL(rule, os_fingerprint, y);
775 0 : PF_MD5_UPD_HTONL(rule, prob, y);
776 0 : PF_MD5_UPD_HTONL(rule, uid.uid[0], y);
777 0 : PF_MD5_UPD_HTONL(rule, uid.uid[1], y);
778 0 : PF_MD5_UPD(rule, uid.op);
779 0 : PF_MD5_UPD_HTONL(rule, gid.gid[0], y);
780 0 : PF_MD5_UPD_HTONL(rule, gid.gid[1], y);
781 0 : PF_MD5_UPD(rule, gid.op);
782 0 : PF_MD5_UPD_HTONL(rule, rule_flag, y);
783 0 : PF_MD5_UPD(rule, action);
784 0 : PF_MD5_UPD(rule, direction);
785 0 : PF_MD5_UPD(rule, af);
786 0 : PF_MD5_UPD(rule, quick);
787 0 : PF_MD5_UPD(rule, ifnot);
788 0 : PF_MD5_UPD(rule, rcvifnot);
789 0 : PF_MD5_UPD(rule, match_tag_not);
790 0 : PF_MD5_UPD(rule, keep_state);
791 0 : PF_MD5_UPD(rule, proto);
792 0 : PF_MD5_UPD(rule, type);
793 0 : PF_MD5_UPD(rule, code);
794 0 : PF_MD5_UPD(rule, flags);
795 0 : PF_MD5_UPD(rule, flagset);
796 0 : PF_MD5_UPD(rule, allow_opts);
797 0 : PF_MD5_UPD(rule, rt);
798 0 : PF_MD5_UPD(rule, tos);
799 0 : }
800 :
801 : int
802 0 : pf_commit_rules(u_int32_t ticket, char *anchor)
803 : {
804 : struct pf_ruleset *rs;
805 : struct pf_rule *rule, **old_array;
806 : struct pf_rulequeue *old_rules;
807 : int error;
808 : u_int32_t old_rcount;
809 :
810 : /* Make sure any expired rules get removed from active rules first. */
811 0 : pf_purge_expired_rules();
812 :
813 0 : rs = pf_find_ruleset(anchor);
814 0 : if (rs == NULL || !rs->rules.inactive.open ||
815 0 : ticket != rs->rules.inactive.ticket)
816 0 : return (EBUSY);
817 :
818 : /* Calculate checksum for the main ruleset */
819 0 : if (rs == &pf_main_ruleset) {
820 0 : error = pf_setup_pfsync_matching(rs);
821 0 : if (error != 0)
822 0 : return (error);
823 : }
824 :
825 : /* Swap rules, keep the old. */
826 0 : old_rules = rs->rules.active.ptr;
827 0 : old_rcount = rs->rules.active.rcount;
828 0 : old_array = rs->rules.active.ptr_array;
829 :
830 0 : rs->rules.active.ptr = rs->rules.inactive.ptr;
831 0 : rs->rules.active.ptr_array = rs->rules.inactive.ptr_array;
832 0 : rs->rules.active.rcount = rs->rules.inactive.rcount;
833 0 : rs->rules.inactive.ptr = old_rules;
834 0 : rs->rules.inactive.ptr_array = old_array;
835 0 : rs->rules.inactive.rcount = old_rcount;
836 :
837 0 : rs->rules.active.ticket = rs->rules.inactive.ticket;
838 0 : pf_calc_skip_steps(rs->rules.active.ptr);
839 :
840 :
841 : /* Purge the old rule list. */
842 0 : while ((rule = TAILQ_FIRST(old_rules)) != NULL)
843 0 : pf_rm_rule(old_rules, rule);
844 0 : if (rs->rules.inactive.ptr_array)
845 0 : free(rs->rules.inactive.ptr_array, M_TEMP, 0);
846 0 : rs->rules.inactive.ptr_array = NULL;
847 0 : rs->rules.inactive.rcount = 0;
848 0 : rs->rules.inactive.open = 0;
849 0 : pf_remove_if_empty_ruleset(rs);
850 :
851 : /* queue defs only in the main ruleset */
852 0 : if (anchor[0])
853 0 : return (0);
854 0 : return (pf_commit_queues());
855 0 : }
856 :
857 : int
858 0 : pf_setup_pfsync_matching(struct pf_ruleset *rs)
859 : {
860 0 : MD5_CTX ctx;
861 : struct pf_rule *rule;
862 0 : u_int8_t digest[PF_MD5_DIGEST_LENGTH];
863 :
864 0 : MD5Init(&ctx);
865 0 : if (rs->rules.inactive.ptr_array)
866 0 : free(rs->rules.inactive.ptr_array, M_TEMP, 0);
867 0 : rs->rules.inactive.ptr_array = NULL;
868 :
869 0 : if (rs->rules.inactive.rcount) {
870 0 : rs->rules.inactive.ptr_array =
871 0 : mallocarray(rs->rules.inactive.rcount, sizeof(caddr_t),
872 : M_TEMP, M_NOWAIT);
873 :
874 0 : if (!rs->rules.inactive.ptr_array)
875 0 : return (ENOMEM);
876 :
877 0 : TAILQ_FOREACH(rule, rs->rules.inactive.ptr, entries) {
878 0 : pf_hash_rule(&ctx, rule);
879 0 : (rs->rules.inactive.ptr_array)[rule->nr] = rule;
880 : }
881 : }
882 :
883 0 : MD5Final(digest, &ctx);
884 0 : memcpy(pf_status.pf_chksum, digest, sizeof(pf_status.pf_chksum));
885 0 : return (0);
886 0 : }
887 :
888 : int
889 0 : pf_addr_setup(struct pf_ruleset *ruleset, struct pf_addr_wrap *addr,
890 : sa_family_t af)
891 : {
892 0 : if (pfi_dynaddr_setup(addr, af) ||
893 0 : pf_tbladdr_setup(ruleset, addr) ||
894 0 : pf_rtlabel_add(addr))
895 0 : return (EINVAL);
896 :
897 0 : return (0);
898 0 : }
899 :
900 : int
901 0 : pf_kif_setup(char *ifname, struct pfi_kif **kif)
902 : {
903 0 : if (ifname[0]) {
904 0 : *kif = pfi_kif_get(ifname);
905 0 : if (*kif == NULL)
906 0 : return (EINVAL);
907 :
908 0 : pfi_kif_ref(*kif, PFI_KIF_REF_RULE);
909 0 : } else
910 0 : *kif = NULL;
911 :
912 0 : return (0);
913 0 : }
914 :
915 : void
916 0 : pf_addr_copyout(struct pf_addr_wrap *addr)
917 : {
918 0 : pfi_dynaddr_copyout(addr);
919 0 : pf_tbladdr_copyout(addr);
920 0 : pf_rtlabel_copyout(addr);
921 0 : }
922 :
923 : int
924 0 : pfioctl(dev_t dev, u_long cmd, caddr_t addr, int flags, struct proc *p)
925 : {
926 : int error = 0;
927 :
928 : /* XXX keep in sync with switch() below */
929 0 : if (securelevel > 1)
930 0 : switch (cmd) {
931 : case DIOCGETRULES:
932 : case DIOCGETRULE:
933 : case DIOCGETSTATE:
934 : case DIOCSETSTATUSIF:
935 : case DIOCGETSTATUS:
936 : case DIOCCLRSTATUS:
937 : case DIOCNATLOOK:
938 : case DIOCSETDEBUG:
939 : case DIOCGETSTATES:
940 : case DIOCGETTIMEOUT:
941 : case DIOCGETLIMIT:
942 : case DIOCGETRULESETS:
943 : case DIOCGETRULESET:
944 : case DIOCGETQUEUES:
945 : case DIOCGETQUEUE:
946 : case DIOCGETQSTATS:
947 : case DIOCRGETTABLES:
948 : case DIOCRGETTSTATS:
949 : case DIOCRCLRTSTATS:
950 : case DIOCRCLRADDRS:
951 : case DIOCRADDADDRS:
952 : case DIOCRDELADDRS:
953 : case DIOCRSETADDRS:
954 : case DIOCRGETASTATS:
955 : case DIOCRCLRASTATS:
956 : case DIOCRTSTADDRS:
957 : case DIOCOSFPGET:
958 : case DIOCGETSRCNODES:
959 : case DIOCCLRSRCNODES:
960 : case DIOCIGETIFACES:
961 : case DIOCSETIFFLAG:
962 : case DIOCCLRIFFLAG:
963 : case DIOCGETSYNFLWATS:
964 : break;
965 : case DIOCRCLRTABLES:
966 : case DIOCRADDTABLES:
967 : case DIOCRDELTABLES:
968 : case DIOCRSETTFLAGS:
969 0 : if (((struct pfioc_table *)addr)->pfrio_flags &
970 : PFR_FLAG_DUMMY)
971 : break; /* dummy operation ok */
972 0 : return (EPERM);
973 : default:
974 0 : return (EPERM);
975 : }
976 :
977 0 : if (!(flags & FWRITE))
978 0 : switch (cmd) {
979 : case DIOCGETRULES:
980 : case DIOCGETSTATE:
981 : case DIOCGETSTATUS:
982 : case DIOCGETSTATES:
983 : case DIOCGETTIMEOUT:
984 : case DIOCGETLIMIT:
985 : case DIOCGETRULESETS:
986 : case DIOCGETRULESET:
987 : case DIOCGETQUEUES:
988 : case DIOCGETQUEUE:
989 : case DIOCGETQSTATS:
990 : case DIOCNATLOOK:
991 : case DIOCRGETTABLES:
992 : case DIOCRGETTSTATS:
993 : case DIOCRGETADDRS:
994 : case DIOCRGETASTATS:
995 : case DIOCRTSTADDRS:
996 : case DIOCOSFPGET:
997 : case DIOCGETSRCNODES:
998 : case DIOCIGETIFACES:
999 : case DIOCGETSYNFLWATS:
1000 : break;
1001 : case DIOCRCLRTABLES:
1002 : case DIOCRADDTABLES:
1003 : case DIOCRDELTABLES:
1004 : case DIOCRCLRTSTATS:
1005 : case DIOCRCLRADDRS:
1006 : case DIOCRADDADDRS:
1007 : case DIOCRDELADDRS:
1008 : case DIOCRSETADDRS:
1009 : case DIOCRSETTFLAGS:
1010 0 : if (((struct pfioc_table *)addr)->pfrio_flags &
1011 : PFR_FLAG_DUMMY) {
1012 : flags |= FWRITE; /* need write lock for dummy */
1013 0 : break; /* dummy operation ok */
1014 : }
1015 0 : return (EACCES);
1016 : case DIOCGETRULE:
1017 0 : if (((struct pfioc_rule *)addr)->action ==
1018 : PF_GET_CLR_CNTR)
1019 0 : return (EACCES);
1020 : break;
1021 : default:
1022 0 : return (EACCES);
1023 : }
1024 :
1025 0 : NET_LOCK();
1026 0 : switch (cmd) {
1027 :
1028 : case DIOCSTART:
1029 : PF_LOCK();
1030 0 : if (pf_status.running)
1031 0 : error = EEXIST;
1032 : else {
1033 0 : pf_status.running = 1;
1034 0 : pf_status.since = time_uptime;
1035 0 : if (pf_status.stateid == 0) {
1036 0 : pf_status.stateid = time_second;
1037 0 : pf_status.stateid = pf_status.stateid << 32;
1038 0 : }
1039 0 : timeout_add(&pf_purge_to, 1 * hz);
1040 0 : pf_create_queues();
1041 0 : DPFPRINTF(LOG_NOTICE, "pf: started");
1042 : }
1043 : PF_UNLOCK();
1044 : break;
1045 :
1046 : case DIOCSTOP:
1047 : PF_LOCK();
1048 0 : if (!pf_status.running)
1049 0 : error = ENOENT;
1050 : else {
1051 0 : pf_status.running = 0;
1052 0 : pf_status.since = time_uptime;
1053 0 : pf_remove_queues();
1054 0 : DPFPRINTF(LOG_NOTICE, "pf: stopped");
1055 : }
1056 : PF_UNLOCK();
1057 : break;
1058 :
1059 : case DIOCGETQUEUES: {
1060 0 : struct pfioc_queue *pq = (struct pfioc_queue *)addr;
1061 : struct pf_queuespec *qs;
1062 : u_int32_t nr = 0;
1063 :
1064 : PF_LOCK();
1065 0 : pq->ticket = pf_main_ruleset.rules.active.ticket;
1066 :
1067 : /* save state to not run over them all each time? */
1068 0 : qs = TAILQ_FIRST(pf_queues_active);
1069 0 : while (qs != NULL) {
1070 0 : qs = TAILQ_NEXT(qs, entries);
1071 0 : nr++;
1072 : }
1073 0 : pq->nr = nr;
1074 : PF_UNLOCK();
1075 : break;
1076 : }
1077 :
1078 : case DIOCGETQUEUE: {
1079 0 : struct pfioc_queue *pq = (struct pfioc_queue *)addr;
1080 : struct pf_queuespec *qs;
1081 : u_int32_t nr = 0;
1082 :
1083 : PF_LOCK();
1084 0 : if (pq->ticket != pf_main_ruleset.rules.active.ticket) {
1085 : error = EBUSY;
1086 : PF_UNLOCK();
1087 0 : break;
1088 : }
1089 :
1090 : /* save state to not run over them all each time? */
1091 0 : qs = TAILQ_FIRST(pf_queues_active);
1092 0 : while ((qs != NULL) && (nr++ < pq->nr))
1093 0 : qs = TAILQ_NEXT(qs, entries);
1094 0 : if (qs == NULL) {
1095 : error = EBUSY;
1096 : PF_UNLOCK();
1097 0 : break;
1098 : }
1099 0 : memcpy(&pq->queue, qs, sizeof(pq->queue));
1100 : PF_UNLOCK();
1101 0 : break;
1102 : }
1103 :
1104 : case DIOCGETQSTATS: {
1105 0 : struct pfioc_qstats *pq = (struct pfioc_qstats *)addr;
1106 : struct pf_queuespec *qs;
1107 : u_int32_t nr;
1108 0 : int nbytes;
1109 :
1110 : PF_LOCK();
1111 0 : if (pq->ticket != pf_main_ruleset.rules.active.ticket) {
1112 : error = EBUSY;
1113 : PF_UNLOCK();
1114 0 : break;
1115 : }
1116 0 : nbytes = pq->nbytes;
1117 : nr = 0;
1118 :
1119 : /* save state to not run over them all each time? */
1120 0 : qs = TAILQ_FIRST(pf_queues_active);
1121 0 : while ((qs != NULL) && (nr++ < pq->nr))
1122 0 : qs = TAILQ_NEXT(qs, entries);
1123 0 : if (qs == NULL) {
1124 : error = EBUSY;
1125 : PF_UNLOCK();
1126 0 : break;
1127 : }
1128 0 : memcpy(&pq->queue, qs, sizeof(pq->queue));
1129 : /* It's a root flow queue but is not an HFSC root class */
1130 0 : if ((qs->flags & PFQS_FLOWQUEUE) && qs->parent_qid == 0 &&
1131 0 : !(qs->flags & PFQS_ROOTCLASS))
1132 0 : error = pfq_fqcodel_ops->pfq_qstats(qs, pq->buf,
1133 : &nbytes);
1134 : else
1135 0 : error = pfq_hfsc_ops->pfq_qstats(qs, pq->buf,
1136 : &nbytes);
1137 0 : if (error == 0)
1138 0 : pq->nbytes = nbytes;
1139 : PF_UNLOCK();
1140 0 : break;
1141 0 : }
1142 :
1143 : case DIOCADDQUEUE: {
1144 0 : struct pfioc_queue *q = (struct pfioc_queue *)addr;
1145 : struct pf_queuespec *qs;
1146 :
1147 : PF_LOCK();
1148 0 : if (q->ticket != pf_main_ruleset.rules.inactive.ticket) {
1149 : error = EBUSY;
1150 : PF_UNLOCK();
1151 0 : break;
1152 : }
1153 0 : qs = pool_get(&pf_queue_pl, PR_WAITOK|PR_LIMITFAIL|PR_ZERO);
1154 0 : if (qs == NULL) {
1155 : error = ENOMEM;
1156 : PF_UNLOCK();
1157 0 : break;
1158 : }
1159 0 : memcpy(qs, &q->queue, sizeof(*qs));
1160 0 : qs->qid = pf_qname2qid(qs->qname, 1);
1161 0 : if (qs->qid == 0) {
1162 0 : pool_put(&pf_queue_pl, qs);
1163 : error = EBUSY;
1164 : PF_UNLOCK();
1165 0 : break;
1166 : }
1167 0 : if (qs->parent[0] && (qs->parent_qid =
1168 0 : pf_qname2qid(qs->parent, 0)) == 0) {
1169 0 : pool_put(&pf_queue_pl, qs);
1170 : error = ESRCH;
1171 : PF_UNLOCK();
1172 0 : break;
1173 : }
1174 0 : qs->kif = pfi_kif_get(qs->ifname);
1175 0 : if (qs->kif == NULL) {
1176 0 : pool_put(&pf_queue_pl, qs);
1177 : error = ESRCH;
1178 : PF_UNLOCK();
1179 0 : break;
1180 : }
1181 : /* XXX resolve bw percentage specs */
1182 0 : pfi_kif_ref(qs->kif, PFI_KIF_REF_RULE);
1183 :
1184 0 : TAILQ_INSERT_TAIL(pf_queues_inactive, qs, entries);
1185 : PF_UNLOCK();
1186 :
1187 0 : break;
1188 : }
1189 :
1190 : case DIOCADDRULE: {
1191 0 : struct pfioc_rule *pr = (struct pfioc_rule *)addr;
1192 : struct pf_ruleset *ruleset;
1193 : struct pf_rule *rule, *tail;
1194 :
1195 : PF_LOCK();
1196 0 : pr->anchor[sizeof(pr->anchor) - 1] = 0;
1197 0 : ruleset = pf_find_ruleset(pr->anchor);
1198 0 : if (ruleset == NULL) {
1199 : error = EINVAL;
1200 : PF_UNLOCK();
1201 0 : break;
1202 : }
1203 0 : if (pr->rule.return_icmp >> 8 > ICMP_MAXTYPE) {
1204 : error = EINVAL;
1205 : PF_UNLOCK();
1206 0 : break;
1207 : }
1208 0 : if (pr->ticket != ruleset->rules.inactive.ticket) {
1209 : error = EBUSY;
1210 : PF_UNLOCK();
1211 0 : break;
1212 : }
1213 0 : rule = pool_get(&pf_rule_pl, PR_WAITOK|PR_LIMITFAIL|PR_ZERO);
1214 0 : if (rule == NULL) {
1215 : error = ENOMEM;
1216 : PF_UNLOCK();
1217 0 : break;
1218 : }
1219 0 : if ((error = pf_rule_copyin(&pr->rule, rule, ruleset))) {
1220 0 : pf_rm_rule(NULL, rule);
1221 : rule = NULL;
1222 : PF_UNLOCK();
1223 0 : break;
1224 : }
1225 0 : rule->cuid = p->p_ucred->cr_ruid;
1226 0 : rule->cpid = p->p_p->ps_pid;
1227 :
1228 0 : switch (rule->af) {
1229 : case 0:
1230 : break;
1231 : case AF_INET:
1232 : break;
1233 : #ifdef INET6
1234 : case AF_INET6:
1235 : break;
1236 : #endif /* INET6 */
1237 : default:
1238 0 : pf_rm_rule(NULL, rule);
1239 : rule = NULL;
1240 : error = EAFNOSUPPORT;
1241 : PF_UNLOCK();
1242 0 : goto fail;
1243 : }
1244 0 : tail = TAILQ_LAST(ruleset->rules.inactive.ptr,
1245 : pf_rulequeue);
1246 0 : if (tail)
1247 0 : rule->nr = tail->nr + 1;
1248 : else
1249 0 : rule->nr = 0;
1250 :
1251 0 : if (rule->src.addr.type == PF_ADDR_NONE ||
1252 0 : rule->dst.addr.type == PF_ADDR_NONE)
1253 0 : error = EINVAL;
1254 :
1255 0 : if (pf_addr_setup(ruleset, &rule->src.addr, rule->af))
1256 0 : error = EINVAL;
1257 0 : if (pf_addr_setup(ruleset, &rule->dst.addr, rule->af))
1258 0 : error = EINVAL;
1259 0 : if (pf_addr_setup(ruleset, &rule->rdr.addr, rule->af))
1260 0 : error = EINVAL;
1261 0 : if (pf_addr_setup(ruleset, &rule->nat.addr, rule->af))
1262 0 : error = EINVAL;
1263 0 : if (pf_addr_setup(ruleset, &rule->route.addr, rule->af))
1264 0 : error = EINVAL;
1265 0 : if (pf_anchor_setup(rule, ruleset, pr->anchor_call))
1266 0 : error = EINVAL;
1267 0 : if (rule->rt && !rule->direction)
1268 0 : error = EINVAL;
1269 0 : if (rule->scrub_flags & PFSTATE_SETPRIO &&
1270 0 : (rule->set_prio[0] > IFQ_MAXPRIO ||
1271 0 : rule->set_prio[1] > IFQ_MAXPRIO))
1272 0 : error = EINVAL;
1273 :
1274 0 : if (error) {
1275 0 : pf_rm_rule(NULL, rule);
1276 : PF_UNLOCK();
1277 0 : break;
1278 : }
1279 0 : TAILQ_INSERT_TAIL(ruleset->rules.inactive.ptr,
1280 : rule, entries);
1281 0 : rule->ruleset = ruleset;
1282 0 : ruleset->rules.inactive.rcount++;
1283 : PF_UNLOCK();
1284 0 : break;
1285 : }
1286 :
1287 : case DIOCGETRULES: {
1288 0 : struct pfioc_rule *pr = (struct pfioc_rule *)addr;
1289 : struct pf_ruleset *ruleset;
1290 : struct pf_rule *tail;
1291 :
1292 : PF_LOCK();
1293 0 : pr->anchor[sizeof(pr->anchor) - 1] = 0;
1294 0 : ruleset = pf_find_ruleset(pr->anchor);
1295 0 : if (ruleset == NULL) {
1296 : error = EINVAL;
1297 : PF_UNLOCK();
1298 0 : break;
1299 : }
1300 0 : tail = TAILQ_LAST(ruleset->rules.active.ptr, pf_rulequeue);
1301 0 : if (tail)
1302 0 : pr->nr = tail->nr + 1;
1303 : else
1304 0 : pr->nr = 0;
1305 0 : pr->ticket = ruleset->rules.active.ticket;
1306 : PF_UNLOCK();
1307 0 : break;
1308 : }
1309 :
1310 : case DIOCGETRULE: {
1311 0 : struct pfioc_rule *pr = (struct pfioc_rule *)addr;
1312 : struct pf_ruleset *ruleset;
1313 : struct pf_rule *rule;
1314 : int i;
1315 :
1316 : PF_LOCK();
1317 0 : pr->anchor[sizeof(pr->anchor) - 1] = 0;
1318 0 : ruleset = pf_find_ruleset(pr->anchor);
1319 0 : if (ruleset == NULL) {
1320 : error = EINVAL;
1321 : PF_UNLOCK();
1322 0 : break;
1323 : }
1324 0 : if (pr->ticket != ruleset->rules.active.ticket) {
1325 : error = EBUSY;
1326 : PF_UNLOCK();
1327 0 : break;
1328 : }
1329 0 : rule = TAILQ_FIRST(ruleset->rules.active.ptr);
1330 0 : while ((rule != NULL) && (rule->nr != pr->nr))
1331 0 : rule = TAILQ_NEXT(rule, entries);
1332 0 : if (rule == NULL) {
1333 : error = EBUSY;
1334 : PF_UNLOCK();
1335 0 : break;
1336 : }
1337 0 : memcpy(&pr->rule, rule, sizeof(struct pf_rule));
1338 0 : memset(&pr->rule.entries, 0, sizeof(pr->rule.entries));
1339 0 : pr->rule.kif = NULL;
1340 0 : pr->rule.nat.kif = NULL;
1341 0 : pr->rule.rdr.kif = NULL;
1342 0 : pr->rule.route.kif = NULL;
1343 0 : pr->rule.rcv_kif = NULL;
1344 0 : pr->rule.anchor = NULL;
1345 0 : pr->rule.overload_tbl = NULL;
1346 0 : pr->rule.pktrate.limit /= PF_THRESHOLD_MULT;
1347 0 : memset(&pr->rule.gcle, 0, sizeof(pr->rule.gcle));
1348 0 : pr->rule.ruleset = NULL;
1349 0 : if (pf_anchor_copyout(ruleset, rule, pr)) {
1350 : error = EBUSY;
1351 : PF_UNLOCK();
1352 0 : break;
1353 : }
1354 0 : pf_addr_copyout(&pr->rule.src.addr);
1355 0 : pf_addr_copyout(&pr->rule.dst.addr);
1356 0 : pf_addr_copyout(&pr->rule.rdr.addr);
1357 0 : pf_addr_copyout(&pr->rule.nat.addr);
1358 0 : pf_addr_copyout(&pr->rule.route.addr);
1359 0 : for (i = 0; i < PF_SKIP_COUNT; ++i)
1360 0 : if (rule->skip[i].ptr == NULL)
1361 0 : pr->rule.skip[i].nr = (u_int32_t)-1;
1362 : else
1363 0 : pr->rule.skip[i].nr =
1364 0 : rule->skip[i].ptr->nr;
1365 :
1366 0 : if (pr->action == PF_GET_CLR_CNTR) {
1367 0 : rule->evaluations = 0;
1368 0 : rule->packets[0] = rule->packets[1] = 0;
1369 0 : rule->bytes[0] = rule->bytes[1] = 0;
1370 0 : rule->states_tot = 0;
1371 0 : }
1372 : PF_UNLOCK();
1373 0 : break;
1374 : }
1375 :
1376 : case DIOCCHANGERULE: {
1377 0 : struct pfioc_rule *pcr = (struct pfioc_rule *)addr;
1378 : struct pf_ruleset *ruleset;
1379 : struct pf_rule *oldrule = NULL, *newrule = NULL;
1380 : u_int32_t nr = 0;
1381 :
1382 0 : if (pcr->action < PF_CHANGE_ADD_HEAD ||
1383 0 : pcr->action > PF_CHANGE_GET_TICKET) {
1384 : error = EINVAL;
1385 0 : break;
1386 : }
1387 : PF_LOCK();
1388 0 : ruleset = pf_find_ruleset(pcr->anchor);
1389 0 : if (ruleset == NULL) {
1390 : error = EINVAL;
1391 : PF_UNLOCK();
1392 0 : break;
1393 : }
1394 :
1395 0 : if (pcr->action == PF_CHANGE_GET_TICKET) {
1396 0 : pcr->ticket = ++ruleset->rules.active.ticket;
1397 : PF_UNLOCK();
1398 0 : break;
1399 : } else {
1400 0 : if (pcr->ticket !=
1401 0 : ruleset->rules.active.ticket) {
1402 : error = EINVAL;
1403 : PF_UNLOCK();
1404 0 : break;
1405 : }
1406 0 : if (pcr->rule.return_icmp >> 8 > ICMP_MAXTYPE) {
1407 : error = EINVAL;
1408 : PF_UNLOCK();
1409 0 : break;
1410 : }
1411 : }
1412 :
1413 0 : if (pcr->action != PF_CHANGE_REMOVE) {
1414 0 : newrule = pool_get(&pf_rule_pl,
1415 : PR_WAITOK|PR_LIMITFAIL|PR_ZERO);
1416 0 : if (newrule == NULL) {
1417 : error = ENOMEM;
1418 : PF_UNLOCK();
1419 0 : break;
1420 : }
1421 0 : pf_rule_copyin(&pcr->rule, newrule, ruleset);
1422 0 : newrule->cuid = p->p_ucred->cr_ruid;
1423 0 : newrule->cpid = p->p_p->ps_pid;
1424 :
1425 0 : switch (newrule->af) {
1426 : case 0:
1427 : break;
1428 : case AF_INET:
1429 : break;
1430 : #ifdef INET6
1431 : case AF_INET6:
1432 : break;
1433 : #endif /* INET6 */
1434 : default:
1435 0 : pf_rm_rule(NULL, newrule);
1436 : error = EAFNOSUPPORT;
1437 : PF_UNLOCK();
1438 0 : goto fail;
1439 : }
1440 :
1441 0 : if (newrule->rt && !newrule->direction)
1442 0 : error = EINVAL;
1443 0 : if (pf_addr_setup(ruleset, &newrule->src.addr, newrule->af))
1444 0 : error = EINVAL;
1445 0 : if (pf_addr_setup(ruleset, &newrule->dst.addr, newrule->af))
1446 0 : error = EINVAL;
1447 0 : if (pf_addr_setup(ruleset, &newrule->rdr.addr, newrule->af))
1448 0 : error = EINVAL;
1449 0 : if (pf_addr_setup(ruleset, &newrule->nat.addr, newrule->af))
1450 0 : error = EINVAL;
1451 0 : if (pf_addr_setup(ruleset, &newrule->route.addr, newrule->af))
1452 0 : error = EINVAL;
1453 0 : if (pf_anchor_setup(newrule, ruleset, pcr->anchor_call))
1454 0 : error = EINVAL;
1455 :
1456 0 : if (error) {
1457 0 : pf_rm_rule(NULL, newrule);
1458 : PF_UNLOCK();
1459 0 : break;
1460 : }
1461 : }
1462 :
1463 0 : if (pcr->action == PF_CHANGE_ADD_HEAD)
1464 0 : oldrule = TAILQ_FIRST(ruleset->rules.active.ptr);
1465 0 : else if (pcr->action == PF_CHANGE_ADD_TAIL)
1466 0 : oldrule = TAILQ_LAST(ruleset->rules.active.ptr,
1467 : pf_rulequeue);
1468 : else {
1469 0 : oldrule = TAILQ_FIRST(ruleset->rules.active.ptr);
1470 0 : while ((oldrule != NULL) && (oldrule->nr != pcr->nr))
1471 0 : oldrule = TAILQ_NEXT(oldrule, entries);
1472 0 : if (oldrule == NULL) {
1473 0 : if (newrule != NULL)
1474 0 : pf_rm_rule(NULL, newrule);
1475 : error = EINVAL;
1476 : PF_UNLOCK();
1477 0 : break;
1478 : }
1479 : }
1480 :
1481 0 : if (pcr->action == PF_CHANGE_REMOVE) {
1482 0 : pf_rm_rule(ruleset->rules.active.ptr, oldrule);
1483 0 : ruleset->rules.active.rcount--;
1484 0 : } else {
1485 0 : if (oldrule == NULL)
1486 0 : TAILQ_INSERT_TAIL(
1487 : ruleset->rules.active.ptr,
1488 : newrule, entries);
1489 0 : else if (pcr->action == PF_CHANGE_ADD_HEAD ||
1490 0 : pcr->action == PF_CHANGE_ADD_BEFORE)
1491 0 : TAILQ_INSERT_BEFORE(oldrule, newrule, entries);
1492 : else
1493 0 : TAILQ_INSERT_AFTER(
1494 : ruleset->rules.active.ptr,
1495 : oldrule, newrule, entries);
1496 0 : ruleset->rules.active.rcount++;
1497 : }
1498 :
1499 : nr = 0;
1500 0 : TAILQ_FOREACH(oldrule, ruleset->rules.active.ptr, entries)
1501 0 : oldrule->nr = nr++;
1502 :
1503 0 : ruleset->rules.active.ticket++;
1504 :
1505 0 : pf_calc_skip_steps(ruleset->rules.active.ptr);
1506 0 : pf_remove_if_empty_ruleset(ruleset);
1507 :
1508 : PF_UNLOCK();
1509 0 : break;
1510 : }
1511 :
1512 : case DIOCCLRSTATES: {
1513 : struct pf_state *s, *nexts;
1514 0 : struct pfioc_state_kill *psk = (struct pfioc_state_kill *)addr;
1515 : u_int killed = 0;
1516 :
1517 : PF_LOCK();
1518 : PF_STATE_ENTER_WRITE();
1519 0 : for (s = RB_MIN(pf_state_tree_id, &tree_id); s; s = nexts) {
1520 0 : nexts = RB_NEXT(pf_state_tree_id, &tree_id, s);
1521 :
1522 0 : if (!psk->psk_ifname[0] || !strcmp(psk->psk_ifname,
1523 0 : s->kif->pfik_name)) {
1524 : #if NPFSYNC > 0
1525 : /* don't send out individual delete messages */
1526 0 : SET(s->state_flags, PFSTATE_NOSYNC);
1527 : #endif /* NPFSYNC > 0 */
1528 0 : pf_remove_state(s);
1529 0 : killed++;
1530 0 : }
1531 : }
1532 : PF_STATE_EXIT_WRITE();
1533 0 : psk->psk_killed = killed;
1534 : #if NPFSYNC > 0
1535 0 : pfsync_clear_states(pf_status.hostid, psk->psk_ifname);
1536 : #endif /* NPFSYNC > 0 */
1537 : PF_UNLOCK();
1538 : break;
1539 : }
1540 :
1541 : case DIOCKILLSTATES: {
1542 : struct pf_state *s, *nexts;
1543 : struct pf_state_item *si, *sit;
1544 0 : struct pf_state_key *sk, key;
1545 : struct pf_addr *srcaddr, *dstaddr;
1546 : u_int16_t srcport, dstport;
1547 0 : struct pfioc_state_kill *psk = (struct pfioc_state_kill *)addr;
1548 : u_int i, killed = 0;
1549 : const int dirs[] = { PF_IN, PF_OUT };
1550 : int sidx, didx;
1551 :
1552 0 : if (psk->psk_pfcmp.id) {
1553 0 : if (psk->psk_pfcmp.creatorid == 0)
1554 0 : psk->psk_pfcmp.creatorid = pf_status.hostid;
1555 : PF_LOCK();
1556 : PF_STATE_ENTER_WRITE();
1557 0 : if ((s = pf_find_state_byid(&psk->psk_pfcmp))) {
1558 0 : pf_remove_state(s);
1559 0 : psk->psk_killed = 1;
1560 0 : }
1561 : PF_STATE_EXIT_WRITE();
1562 : PF_UNLOCK();
1563 0 : break;
1564 : }
1565 :
1566 0 : if (psk->psk_af && psk->psk_proto &&
1567 0 : psk->psk_src.port_op == PF_OP_EQ &&
1568 0 : psk->psk_dst.port_op == PF_OP_EQ) {
1569 :
1570 0 : key.af = psk->psk_af;
1571 0 : key.proto = psk->psk_proto;
1572 0 : key.rdomain = psk->psk_rdomain;
1573 :
1574 : PF_LOCK();
1575 : PF_STATE_ENTER_WRITE();
1576 0 : for (i = 0; i < nitems(dirs); i++) {
1577 0 : if (dirs[i] == PF_IN) {
1578 : sidx = 0;
1579 : didx = 1;
1580 0 : } else {
1581 : sidx = 1;
1582 : didx = 0;
1583 : }
1584 0 : PF_ACPY(&key.addr[sidx],
1585 : &psk->psk_src.addr.v.a.addr, key.af);
1586 0 : PF_ACPY(&key.addr[didx],
1587 : &psk->psk_dst.addr.v.a.addr, key.af);
1588 0 : key.port[sidx] = psk->psk_src.port[0];
1589 0 : key.port[didx] = psk->psk_dst.port[0];
1590 :
1591 0 : sk = RB_FIND(pf_state_tree, &pf_statetbl, &key);
1592 0 : if (sk == NULL)
1593 : continue;
1594 :
1595 0 : TAILQ_FOREACH_SAFE(si, &sk->states, entry, sit)
1596 0 : if (((si->s->key[PF_SK_WIRE]->af ==
1597 0 : si->s->key[PF_SK_STACK]->af &&
1598 0 : sk == (dirs[i] == PF_IN ?
1599 : si->s->key[PF_SK_WIRE] :
1600 0 : si->s->key[PF_SK_STACK])) ||
1601 0 : (si->s->key[PF_SK_WIRE]->af !=
1602 0 : si->s->key[PF_SK_STACK]->af &&
1603 0 : dirs[i] == PF_IN &&
1604 0 : (sk == si->s->key[PF_SK_STACK] ||
1605 0 : sk == si->s->key[PF_SK_WIRE]))) &&
1606 0 : (!psk->psk_ifname[0] ||
1607 0 : (si->s->kif != pfi_all &&
1608 0 : !strcmp(psk->psk_ifname,
1609 0 : si->s->kif->pfik_name)))) {
1610 0 : pf_remove_state(si->s);
1611 0 : killed++;
1612 0 : }
1613 : }
1614 0 : if (killed)
1615 0 : psk->psk_killed = killed;
1616 : PF_STATE_EXIT_WRITE();
1617 : PF_UNLOCK();
1618 0 : break;
1619 : }
1620 :
1621 : PF_LOCK();
1622 : PF_STATE_ENTER_WRITE();
1623 0 : for (s = RB_MIN(pf_state_tree_id, &tree_id); s;
1624 : s = nexts) {
1625 0 : nexts = RB_NEXT(pf_state_tree_id, &tree_id, s);
1626 :
1627 0 : if (s->direction == PF_OUT) {
1628 0 : sk = s->key[PF_SK_STACK];
1629 0 : srcaddr = &sk->addr[1];
1630 0 : dstaddr = &sk->addr[0];
1631 0 : srcport = sk->port[1];
1632 0 : dstport = sk->port[0];
1633 0 : } else {
1634 0 : sk = s->key[PF_SK_WIRE];
1635 0 : srcaddr = &sk->addr[0];
1636 0 : dstaddr = &sk->addr[1];
1637 0 : srcport = sk->port[0];
1638 0 : dstport = sk->port[1];
1639 : }
1640 0 : if ((!psk->psk_af || sk->af == psk->psk_af)
1641 0 : && (!psk->psk_proto || psk->psk_proto ==
1642 0 : sk->proto) && psk->psk_rdomain == sk->rdomain &&
1643 0 : PF_MATCHA(psk->psk_src.neg,
1644 : &psk->psk_src.addr.v.a.addr,
1645 : &psk->psk_src.addr.v.a.mask,
1646 0 : srcaddr, sk->af) &&
1647 0 : PF_MATCHA(psk->psk_dst.neg,
1648 : &psk->psk_dst.addr.v.a.addr,
1649 : &psk->psk_dst.addr.v.a.mask,
1650 0 : dstaddr, sk->af) &&
1651 0 : (psk->psk_src.port_op == 0 ||
1652 0 : pf_match_port(psk->psk_src.port_op,
1653 0 : psk->psk_src.port[0], psk->psk_src.port[1],
1654 0 : srcport)) &&
1655 0 : (psk->psk_dst.port_op == 0 ||
1656 0 : pf_match_port(psk->psk_dst.port_op,
1657 0 : psk->psk_dst.port[0], psk->psk_dst.port[1],
1658 0 : dstport)) &&
1659 0 : (!psk->psk_label[0] || (s->rule.ptr->label[0] &&
1660 0 : !strcmp(psk->psk_label, s->rule.ptr->label))) &&
1661 0 : (!psk->psk_ifname[0] || !strcmp(psk->psk_ifname,
1662 0 : s->kif->pfik_name))) {
1663 0 : pf_remove_state(s);
1664 0 : killed++;
1665 0 : }
1666 : }
1667 0 : psk->psk_killed = killed;
1668 : PF_STATE_EXIT_WRITE();
1669 : PF_UNLOCK();
1670 0 : break;
1671 0 : }
1672 :
1673 : #if NPFSYNC > 0
1674 : case DIOCADDSTATE: {
1675 0 : struct pfioc_state *ps = (struct pfioc_state *)addr;
1676 0 : struct pfsync_state *sp = &ps->state;
1677 :
1678 0 : if (sp->timeout >= PFTM_MAX) {
1679 : error = EINVAL;
1680 0 : break;
1681 : }
1682 : PF_LOCK();
1683 : PF_STATE_ENTER_WRITE();
1684 0 : error = pfsync_state_import(sp, PFSYNC_SI_IOCTL);
1685 : PF_STATE_EXIT_WRITE();
1686 : PF_UNLOCK();
1687 0 : break;
1688 : }
1689 : #endif /* NPFSYNC > 0 */
1690 :
1691 : case DIOCGETSTATE: {
1692 0 : struct pfioc_state *ps = (struct pfioc_state *)addr;
1693 : struct pf_state *s;
1694 0 : struct pf_state_cmp id_key;
1695 :
1696 0 : memset(&id_key, 0, sizeof(id_key));
1697 0 : id_key.id = ps->state.id;
1698 0 : id_key.creatorid = ps->state.creatorid;
1699 :
1700 : PF_STATE_ENTER_READ();
1701 0 : s = pf_find_state_byid(&id_key);
1702 0 : s = pf_state_ref(s);
1703 : PF_STATE_EXIT_READ();
1704 0 : if (s == NULL) {
1705 : error = ENOENT;
1706 0 : break;
1707 : }
1708 :
1709 0 : pf_state_export(&ps->state, s);
1710 0 : pf_state_unref(s);
1711 0 : break;
1712 0 : }
1713 :
1714 : case DIOCGETSTATES: {
1715 0 : struct pfioc_states *ps = (struct pfioc_states *)addr;
1716 : struct pf_state *state;
1717 : struct pfsync_state *p, *pstore;
1718 : u_int32_t nr = 0;
1719 :
1720 0 : if (ps->ps_len == 0) {
1721 0 : nr = pf_status.states;
1722 0 : ps->ps_len = sizeof(struct pfsync_state) * nr;
1723 0 : break;
1724 : }
1725 :
1726 0 : pstore = malloc(sizeof(*pstore), M_TEMP, M_WAITOK);
1727 :
1728 0 : p = ps->ps_states;
1729 :
1730 : PF_STATE_ENTER_READ();
1731 0 : state = TAILQ_FIRST(&state_list);
1732 0 : while (state) {
1733 0 : if (state->timeout != PFTM_UNLINKED) {
1734 0 : if ((nr+1) * sizeof(*p) > (unsigned)ps->ps_len)
1735 : break;
1736 0 : pf_state_export(pstore, state);
1737 0 : error = copyout(pstore, p, sizeof(*p));
1738 0 : if (error) {
1739 0 : free(pstore, M_TEMP, sizeof(*pstore));
1740 : PF_STATE_EXIT_READ();
1741 0 : goto fail;
1742 : }
1743 0 : p++;
1744 : nr++;
1745 0 : }
1746 0 : state = TAILQ_NEXT(state, entry_list);
1747 : }
1748 : PF_STATE_EXIT_READ();
1749 :
1750 0 : ps->ps_len = sizeof(struct pfsync_state) * nr;
1751 :
1752 0 : free(pstore, M_TEMP, sizeof(*pstore));
1753 0 : break;
1754 : }
1755 :
1756 : case DIOCGETSTATUS: {
1757 0 : struct pf_status *s = (struct pf_status *)addr;
1758 : PF_LOCK();
1759 0 : memcpy(s, &pf_status, sizeof(struct pf_status));
1760 0 : pfi_update_status(s->ifname, s);
1761 : PF_UNLOCK();
1762 : break;
1763 : }
1764 :
1765 : case DIOCSETSTATUSIF: {
1766 0 : struct pfioc_iface *pi = (struct pfioc_iface *)addr;
1767 :
1768 : PF_LOCK();
1769 0 : if (pi->pfiio_name[0] == 0) {
1770 0 : memset(pf_status.ifname, 0, IFNAMSIZ);
1771 : PF_UNLOCK();
1772 0 : break;
1773 : }
1774 0 : strlcpy(pf_trans_set.statusif, pi->pfiio_name, IFNAMSIZ);
1775 0 : pf_trans_set.mask |= PF_TSET_STATUSIF;
1776 : PF_UNLOCK();
1777 0 : break;
1778 : }
1779 :
1780 : case DIOCCLRSTATUS: {
1781 0 : struct pfioc_iface *pi = (struct pfioc_iface *)addr;
1782 :
1783 : PF_LOCK();
1784 : /* if ifname is specified, clear counters there only */
1785 0 : if (pi->pfiio_name[0]) {
1786 0 : pfi_update_status(pi->pfiio_name, NULL);
1787 : PF_UNLOCK();
1788 0 : break;
1789 : }
1790 :
1791 0 : memset(pf_status.counters, 0, sizeof(pf_status.counters));
1792 0 : memset(pf_status.fcounters, 0, sizeof(pf_status.fcounters));
1793 0 : memset(pf_status.scounters, 0, sizeof(pf_status.scounters));
1794 0 : pf_status.since = time_uptime;
1795 :
1796 : PF_UNLOCK();
1797 0 : break;
1798 : }
1799 :
1800 : case DIOCNATLOOK: {
1801 0 : struct pfioc_natlook *pnl = (struct pfioc_natlook *)addr;
1802 : struct pf_state_key *sk;
1803 : struct pf_state *state;
1804 0 : struct pf_state_key_cmp key;
1805 0 : int m = 0, direction = pnl->direction;
1806 : int sidx, didx;
1807 :
1808 : /* NATLOOK src and dst are reversed, so reverse sidx/didx */
1809 0 : sidx = (direction == PF_IN) ? 1 : 0;
1810 0 : didx = (direction == PF_IN) ? 0 : 1;
1811 :
1812 0 : if (!pnl->proto ||
1813 0 : PF_AZERO(&pnl->saddr, pnl->af) ||
1814 0 : PF_AZERO(&pnl->daddr, pnl->af) ||
1815 0 : ((pnl->proto == IPPROTO_TCP ||
1816 0 : pnl->proto == IPPROTO_UDP) &&
1817 0 : (!pnl->dport || !pnl->sport)) ||
1818 0 : pnl->rdomain > RT_TABLEID_MAX)
1819 0 : error = EINVAL;
1820 : else {
1821 0 : key.af = pnl->af;
1822 0 : key.proto = pnl->proto;
1823 0 : key.rdomain = pnl->rdomain;
1824 0 : PF_ACPY(&key.addr[sidx], &pnl->saddr, pnl->af);
1825 0 : key.port[sidx] = pnl->sport;
1826 0 : PF_ACPY(&key.addr[didx], &pnl->daddr, pnl->af);
1827 0 : key.port[didx] = pnl->dport;
1828 :
1829 : PF_STATE_ENTER_READ();
1830 0 : state = pf_find_state_all(&key, direction, &m);
1831 0 : state = pf_state_ref(state);
1832 : PF_STATE_EXIT_READ();
1833 :
1834 0 : if (m > 1)
1835 0 : error = E2BIG; /* more than one state */
1836 0 : else if (state != NULL) {
1837 0 : sk = state->key[sidx];
1838 0 : PF_ACPY(&pnl->rsaddr, &sk->addr[sidx], sk->af);
1839 0 : pnl->rsport = sk->port[sidx];
1840 0 : PF_ACPY(&pnl->rdaddr, &sk->addr[didx], sk->af);
1841 0 : pnl->rdport = sk->port[didx];
1842 0 : pnl->rrdomain = sk->rdomain;
1843 0 : } else
1844 : error = ENOENT;
1845 0 : pf_state_unref(state);
1846 : }
1847 : break;
1848 0 : }
1849 :
1850 : case DIOCSETTIMEOUT: {
1851 0 : struct pfioc_tm *pt = (struct pfioc_tm *)addr;
1852 :
1853 0 : if (pt->timeout < 0 || pt->timeout >= PFTM_MAX ||
1854 0 : pt->seconds < 0) {
1855 : error = EINVAL;
1856 0 : goto fail;
1857 : }
1858 : PF_LOCK();
1859 0 : if (pt->timeout == PFTM_INTERVAL && pt->seconds == 0)
1860 0 : pt->seconds = 1;
1861 0 : pf_default_rule_new.timeout[pt->timeout] = pt->seconds;
1862 0 : pt->seconds = pf_default_rule.timeout[pt->timeout];
1863 : PF_UNLOCK();
1864 0 : break;
1865 : }
1866 :
1867 : case DIOCGETTIMEOUT: {
1868 0 : struct pfioc_tm *pt = (struct pfioc_tm *)addr;
1869 :
1870 0 : if (pt->timeout < 0 || pt->timeout >= PFTM_MAX) {
1871 : error = EINVAL;
1872 0 : goto fail;
1873 : }
1874 : PF_LOCK();
1875 0 : pt->seconds = pf_default_rule.timeout[pt->timeout];
1876 : PF_UNLOCK();
1877 0 : break;
1878 : }
1879 :
1880 : case DIOCGETLIMIT: {
1881 0 : struct pfioc_limit *pl = (struct pfioc_limit *)addr;
1882 :
1883 0 : if (pl->index < 0 || pl->index >= PF_LIMIT_MAX) {
1884 : error = EINVAL;
1885 0 : goto fail;
1886 : }
1887 : PF_LOCK();
1888 0 : pl->limit = pf_pool_limits[pl->index].limit;
1889 : PF_UNLOCK();
1890 0 : break;
1891 : }
1892 :
1893 : case DIOCSETLIMIT: {
1894 0 : struct pfioc_limit *pl = (struct pfioc_limit *)addr;
1895 :
1896 : PF_LOCK();
1897 0 : if (pl->index < 0 || pl->index >= PF_LIMIT_MAX ||
1898 0 : pf_pool_limits[pl->index].pp == NULL) {
1899 : error = EINVAL;
1900 : PF_UNLOCK();
1901 0 : goto fail;
1902 : }
1903 0 : if (((struct pool *)pf_pool_limits[pl->index].pp)->pr_nout >
1904 0 : pl->limit) {
1905 : error = EBUSY;
1906 : PF_UNLOCK();
1907 0 : goto fail;
1908 : }
1909 : /* Fragments reference mbuf clusters. */
1910 0 : if (pl->index == PF_LIMIT_FRAGS && pl->limit > nmbclust) {
1911 : error = EINVAL;
1912 : PF_UNLOCK();
1913 0 : goto fail;
1914 : }
1915 :
1916 0 : pf_pool_limits[pl->index].limit_new = pl->limit;
1917 0 : pl->limit = pf_pool_limits[pl->index].limit;
1918 : PF_UNLOCK();
1919 0 : break;
1920 : }
1921 :
1922 : case DIOCSETDEBUG: {
1923 0 : u_int32_t *level = (u_int32_t *)addr;
1924 :
1925 : PF_LOCK();
1926 0 : pf_trans_set.debug = *level;
1927 0 : pf_trans_set.mask |= PF_TSET_DEBUG;
1928 : PF_UNLOCK();
1929 : break;
1930 : }
1931 :
1932 : case DIOCGETRULESETS: {
1933 0 : struct pfioc_ruleset *pr = (struct pfioc_ruleset *)addr;
1934 : struct pf_ruleset *ruleset;
1935 : struct pf_anchor *anchor;
1936 :
1937 : PF_LOCK();
1938 0 : pr->path[sizeof(pr->path) - 1] = 0;
1939 0 : if ((ruleset = pf_find_ruleset(pr->path)) == NULL) {
1940 : error = EINVAL;
1941 : PF_UNLOCK();
1942 0 : break;
1943 : }
1944 0 : pr->nr = 0;
1945 0 : if (ruleset->anchor == NULL) {
1946 : /* XXX kludge for pf_main_ruleset */
1947 0 : RB_FOREACH(anchor, pf_anchor_global, &pf_anchors)
1948 0 : if (anchor->parent == NULL)
1949 0 : pr->nr++;
1950 : } else {
1951 0 : RB_FOREACH(anchor, pf_anchor_node,
1952 : &ruleset->anchor->children)
1953 0 : pr->nr++;
1954 : }
1955 : PF_UNLOCK();
1956 0 : break;
1957 : }
1958 :
1959 : case DIOCGETRULESET: {
1960 0 : struct pfioc_ruleset *pr = (struct pfioc_ruleset *)addr;
1961 : struct pf_ruleset *ruleset;
1962 : struct pf_anchor *anchor;
1963 : u_int32_t nr = 0;
1964 :
1965 : PF_LOCK();
1966 0 : pr->path[sizeof(pr->path) - 1] = 0;
1967 0 : if ((ruleset = pf_find_ruleset(pr->path)) == NULL) {
1968 : error = EINVAL;
1969 : PF_UNLOCK();
1970 0 : break;
1971 : }
1972 0 : pr->name[0] = 0;
1973 0 : if (ruleset->anchor == NULL) {
1974 : /* XXX kludge for pf_main_ruleset */
1975 0 : RB_FOREACH(anchor, pf_anchor_global, &pf_anchors)
1976 0 : if (anchor->parent == NULL && nr++ == pr->nr) {
1977 0 : strlcpy(pr->name, anchor->name,
1978 : sizeof(pr->name));
1979 : PF_UNLOCK();
1980 0 : break;
1981 : }
1982 : } else {
1983 0 : RB_FOREACH(anchor, pf_anchor_node,
1984 : &ruleset->anchor->children)
1985 0 : if (nr++ == pr->nr) {
1986 0 : strlcpy(pr->name, anchor->name,
1987 : sizeof(pr->name));
1988 : PF_UNLOCK();
1989 0 : break;
1990 : }
1991 : }
1992 0 : if (!pr->name[0])
1993 0 : error = EBUSY;
1994 : PF_UNLOCK();
1995 0 : break;
1996 : }
1997 :
1998 : case DIOCRCLRTABLES: {
1999 0 : struct pfioc_table *io = (struct pfioc_table *)addr;
2000 :
2001 0 : if (io->pfrio_esize != 0) {
2002 : error = ENODEV;
2003 0 : break;
2004 : }
2005 : PF_LOCK();
2006 0 : error = pfr_clr_tables(&io->pfrio_table, &io->pfrio_ndel,
2007 0 : io->pfrio_flags | PFR_FLAG_USERIOCTL);
2008 : PF_UNLOCK();
2009 0 : break;
2010 : }
2011 :
2012 : case DIOCRADDTABLES: {
2013 0 : struct pfioc_table *io = (struct pfioc_table *)addr;
2014 :
2015 0 : if (io->pfrio_esize != sizeof(struct pfr_table)) {
2016 : error = ENODEV;
2017 0 : break;
2018 : }
2019 : PF_LOCK();
2020 0 : error = pfr_add_tables(io->pfrio_buffer, io->pfrio_size,
2021 0 : &io->pfrio_nadd, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2022 : PF_UNLOCK();
2023 0 : break;
2024 : }
2025 :
2026 : case DIOCRDELTABLES: {
2027 0 : struct pfioc_table *io = (struct pfioc_table *)addr;
2028 :
2029 0 : if (io->pfrio_esize != sizeof(struct pfr_table)) {
2030 : error = ENODEV;
2031 0 : break;
2032 : }
2033 : PF_LOCK();
2034 0 : error = pfr_del_tables(io->pfrio_buffer, io->pfrio_size,
2035 0 : &io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2036 : PF_UNLOCK();
2037 0 : break;
2038 : }
2039 :
2040 : case DIOCRGETTABLES: {
2041 0 : struct pfioc_table *io = (struct pfioc_table *)addr;
2042 :
2043 0 : if (io->pfrio_esize != sizeof(struct pfr_table)) {
2044 : error = ENODEV;
2045 0 : break;
2046 : }
2047 : PF_LOCK();
2048 0 : error = pfr_get_tables(&io->pfrio_table, io->pfrio_buffer,
2049 0 : &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2050 : PF_UNLOCK();
2051 0 : break;
2052 : }
2053 :
2054 : case DIOCRGETTSTATS: {
2055 0 : struct pfioc_table *io = (struct pfioc_table *)addr;
2056 :
2057 0 : if (io->pfrio_esize != sizeof(struct pfr_tstats)) {
2058 : error = ENODEV;
2059 0 : break;
2060 : }
2061 : PF_LOCK();
2062 0 : error = pfr_get_tstats(&io->pfrio_table, io->pfrio_buffer,
2063 0 : &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2064 : PF_UNLOCK();
2065 0 : break;
2066 : }
2067 :
2068 : case DIOCRCLRTSTATS: {
2069 0 : struct pfioc_table *io = (struct pfioc_table *)addr;
2070 :
2071 0 : if (io->pfrio_esize != sizeof(struct pfr_table)) {
2072 : error = ENODEV;
2073 0 : break;
2074 : }
2075 : PF_LOCK();
2076 0 : error = pfr_clr_tstats(io->pfrio_buffer, io->pfrio_size,
2077 0 : &io->pfrio_nzero, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2078 : PF_UNLOCK();
2079 0 : break;
2080 : }
2081 :
2082 : case DIOCRSETTFLAGS: {
2083 0 : struct pfioc_table *io = (struct pfioc_table *)addr;
2084 :
2085 0 : if (io->pfrio_esize != sizeof(struct pfr_table)) {
2086 : error = ENODEV;
2087 0 : break;
2088 : }
2089 : PF_LOCK();
2090 0 : error = pfr_set_tflags(io->pfrio_buffer, io->pfrio_size,
2091 0 : io->pfrio_setflag, io->pfrio_clrflag, &io->pfrio_nchange,
2092 0 : &io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2093 : PF_UNLOCK();
2094 0 : break;
2095 : }
2096 :
2097 : case DIOCRCLRADDRS: {
2098 0 : struct pfioc_table *io = (struct pfioc_table *)addr;
2099 :
2100 0 : if (io->pfrio_esize != 0) {
2101 : error = ENODEV;
2102 0 : break;
2103 : }
2104 : PF_LOCK();
2105 0 : error = pfr_clr_addrs(&io->pfrio_table, &io->pfrio_ndel,
2106 0 : io->pfrio_flags | PFR_FLAG_USERIOCTL);
2107 : PF_UNLOCK();
2108 0 : break;
2109 : }
2110 :
2111 : case DIOCRADDADDRS: {
2112 0 : struct pfioc_table *io = (struct pfioc_table *)addr;
2113 :
2114 0 : if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2115 : error = ENODEV;
2116 0 : break;
2117 : }
2118 : PF_LOCK();
2119 0 : error = pfr_add_addrs(&io->pfrio_table, io->pfrio_buffer,
2120 0 : io->pfrio_size, &io->pfrio_nadd, io->pfrio_flags |
2121 : PFR_FLAG_USERIOCTL);
2122 : PF_UNLOCK();
2123 0 : break;
2124 : }
2125 :
2126 : case DIOCRDELADDRS: {
2127 0 : struct pfioc_table *io = (struct pfioc_table *)addr;
2128 :
2129 0 : if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2130 : error = ENODEV;
2131 0 : break;
2132 : }
2133 : PF_LOCK();
2134 0 : error = pfr_del_addrs(&io->pfrio_table, io->pfrio_buffer,
2135 0 : io->pfrio_size, &io->pfrio_ndel, io->pfrio_flags |
2136 : PFR_FLAG_USERIOCTL);
2137 : PF_UNLOCK();
2138 0 : break;
2139 : }
2140 :
2141 : case DIOCRSETADDRS: {
2142 0 : struct pfioc_table *io = (struct pfioc_table *)addr;
2143 :
2144 0 : if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2145 : error = ENODEV;
2146 0 : break;
2147 : }
2148 : PF_LOCK();
2149 0 : error = pfr_set_addrs(&io->pfrio_table, io->pfrio_buffer,
2150 0 : io->pfrio_size, &io->pfrio_size2, &io->pfrio_nadd,
2151 0 : &io->pfrio_ndel, &io->pfrio_nchange, io->pfrio_flags |
2152 : PFR_FLAG_USERIOCTL, 0);
2153 : PF_UNLOCK();
2154 0 : break;
2155 : }
2156 :
2157 : case DIOCRGETADDRS: {
2158 0 : struct pfioc_table *io = (struct pfioc_table *)addr;
2159 :
2160 0 : if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2161 : error = ENODEV;
2162 0 : break;
2163 : }
2164 : PF_LOCK();
2165 0 : error = pfr_get_addrs(&io->pfrio_table, io->pfrio_buffer,
2166 0 : &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2167 : PF_UNLOCK();
2168 0 : break;
2169 : }
2170 :
2171 : case DIOCRGETASTATS: {
2172 0 : struct pfioc_table *io = (struct pfioc_table *)addr;
2173 :
2174 0 : if (io->pfrio_esize != sizeof(struct pfr_astats)) {
2175 : error = ENODEV;
2176 0 : break;
2177 : }
2178 : PF_LOCK();
2179 0 : error = pfr_get_astats(&io->pfrio_table, io->pfrio_buffer,
2180 0 : &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2181 : PF_UNLOCK();
2182 0 : break;
2183 : }
2184 :
2185 : case DIOCRCLRASTATS: {
2186 0 : struct pfioc_table *io = (struct pfioc_table *)addr;
2187 :
2188 0 : if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2189 : error = ENODEV;
2190 0 : break;
2191 : }
2192 : PF_LOCK();
2193 0 : error = pfr_clr_astats(&io->pfrio_table, io->pfrio_buffer,
2194 0 : io->pfrio_size, &io->pfrio_nzero, io->pfrio_flags |
2195 : PFR_FLAG_USERIOCTL);
2196 : PF_UNLOCK();
2197 0 : break;
2198 : }
2199 :
2200 : case DIOCRTSTADDRS: {
2201 0 : struct pfioc_table *io = (struct pfioc_table *)addr;
2202 :
2203 0 : if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2204 : error = ENODEV;
2205 0 : break;
2206 : }
2207 : PF_LOCK();
2208 0 : error = pfr_tst_addrs(&io->pfrio_table, io->pfrio_buffer,
2209 0 : io->pfrio_size, &io->pfrio_nmatch, io->pfrio_flags |
2210 : PFR_FLAG_USERIOCTL);
2211 : PF_UNLOCK();
2212 0 : break;
2213 : }
2214 :
2215 : case DIOCRINADEFINE: {
2216 0 : struct pfioc_table *io = (struct pfioc_table *)addr;
2217 :
2218 0 : if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2219 : error = ENODEV;
2220 0 : break;
2221 : }
2222 : PF_LOCK();
2223 0 : error = pfr_ina_define(&io->pfrio_table, io->pfrio_buffer,
2224 0 : io->pfrio_size, &io->pfrio_nadd, &io->pfrio_naddr,
2225 0 : io->pfrio_ticket, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2226 : PF_UNLOCK();
2227 0 : break;
2228 : }
2229 :
2230 : case DIOCOSFPADD: {
2231 0 : struct pf_osfp_ioctl *io = (struct pf_osfp_ioctl *)addr;
2232 : PF_LOCK();
2233 0 : error = pf_osfp_add(io);
2234 : PF_UNLOCK();
2235 : break;
2236 : }
2237 :
2238 : case DIOCOSFPGET: {
2239 0 : struct pf_osfp_ioctl *io = (struct pf_osfp_ioctl *)addr;
2240 : PF_LOCK();
2241 0 : error = pf_osfp_get(io);
2242 : PF_UNLOCK();
2243 : break;
2244 : }
2245 :
2246 : case DIOCXBEGIN: {
2247 0 : struct pfioc_trans *io = (struct pfioc_trans *)addr;
2248 : struct pfioc_trans_e *ioe;
2249 : struct pfr_table *table;
2250 : int i;
2251 :
2252 0 : if (io->esize != sizeof(*ioe)) {
2253 : error = ENODEV;
2254 0 : goto fail;
2255 : }
2256 : PF_LOCK();
2257 0 : ioe = malloc(sizeof(*ioe), M_TEMP, M_WAITOK);
2258 0 : table = malloc(sizeof(*table), M_TEMP, M_WAITOK);
2259 0 : pf_default_rule_new = pf_default_rule;
2260 0 : memset(&pf_trans_set, 0, sizeof(pf_trans_set));
2261 0 : for (i = 0; i < io->size; i++) {
2262 0 : if (copyin(io->array+i, ioe, sizeof(*ioe))) {
2263 0 : free(table, M_TEMP, sizeof(*table));
2264 0 : free(ioe, M_TEMP, sizeof(*ioe));
2265 : error = EFAULT;
2266 : PF_UNLOCK();
2267 0 : goto fail;
2268 : }
2269 0 : if (strnlen(ioe->anchor, sizeof(ioe->anchor)) ==
2270 : sizeof(ioe->anchor)) {
2271 0 : free(table, M_TEMP, sizeof(*table));
2272 0 : free(ioe, M_TEMP, sizeof(*ioe));
2273 : error = ENAMETOOLONG;
2274 : PF_UNLOCK();
2275 0 : goto fail;
2276 : }
2277 0 : switch (ioe->type) {
2278 : case PF_TRANS_TABLE:
2279 0 : memset(table, 0, sizeof(*table));
2280 0 : strlcpy(table->pfrt_anchor, ioe->anchor,
2281 : sizeof(table->pfrt_anchor));
2282 0 : if ((error = pfr_ina_begin(table,
2283 0 : &ioe->ticket, NULL, 0))) {
2284 0 : free(table, M_TEMP, sizeof(*table));
2285 0 : free(ioe, M_TEMP, sizeof(*ioe));
2286 : PF_UNLOCK();
2287 0 : goto fail;
2288 : }
2289 : break;
2290 : case PF_TRANS_RULESET:
2291 0 : if ((error = pf_begin_rules(&ioe->ticket,
2292 : ioe->anchor))) {
2293 0 : free(table, M_TEMP, sizeof(*table));
2294 0 : free(ioe, M_TEMP, sizeof(*ioe));
2295 : PF_UNLOCK();
2296 0 : goto fail;
2297 : }
2298 : break;
2299 : default:
2300 0 : free(table, M_TEMP, sizeof(*table));
2301 0 : free(ioe, M_TEMP, sizeof(*ioe));
2302 : error = EINVAL;
2303 : PF_UNLOCK();
2304 0 : goto fail;
2305 : }
2306 0 : if (copyout(ioe, io->array+i, sizeof(io->array[i]))) {
2307 0 : free(table, M_TEMP, sizeof(*table));
2308 0 : free(ioe, M_TEMP, sizeof(*ioe));
2309 : error = EFAULT;
2310 : PF_UNLOCK();
2311 0 : goto fail;
2312 : }
2313 : }
2314 0 : free(table, M_TEMP, sizeof(*table));
2315 0 : free(ioe, M_TEMP, sizeof(*ioe));
2316 : PF_UNLOCK();
2317 0 : break;
2318 : }
2319 :
2320 : case DIOCXROLLBACK: {
2321 0 : struct pfioc_trans *io = (struct pfioc_trans *)addr;
2322 : struct pfioc_trans_e *ioe;
2323 : struct pfr_table *table;
2324 : int i;
2325 :
2326 0 : if (io->esize != sizeof(*ioe)) {
2327 : error = ENODEV;
2328 0 : goto fail;
2329 : }
2330 : PF_LOCK();
2331 0 : ioe = malloc(sizeof(*ioe), M_TEMP, M_WAITOK);
2332 0 : table = malloc(sizeof(*table), M_TEMP, M_WAITOK);
2333 0 : for (i = 0; i < io->size; i++) {
2334 0 : if (copyin(io->array+i, ioe, sizeof(*ioe))) {
2335 0 : free(table, M_TEMP, sizeof(*table));
2336 0 : free(ioe, M_TEMP, sizeof(*ioe));
2337 : error = EFAULT;
2338 : PF_UNLOCK();
2339 0 : goto fail;
2340 : }
2341 0 : if (strnlen(ioe->anchor, sizeof(ioe->anchor)) ==
2342 : sizeof(ioe->anchor)) {
2343 0 : free(table, M_TEMP, sizeof(*table));
2344 0 : free(ioe, M_TEMP, sizeof(*ioe));
2345 : error = ENAMETOOLONG;
2346 : PF_UNLOCK();
2347 0 : goto fail;
2348 : }
2349 0 : switch (ioe->type) {
2350 : case PF_TRANS_TABLE:
2351 0 : memset(table, 0, sizeof(*table));
2352 0 : strlcpy(table->pfrt_anchor, ioe->anchor,
2353 : sizeof(table->pfrt_anchor));
2354 0 : if ((error = pfr_ina_rollback(table,
2355 0 : ioe->ticket, NULL, 0))) {
2356 0 : free(table, M_TEMP, sizeof(*table));
2357 0 : free(ioe, M_TEMP, sizeof(*ioe));
2358 : PF_UNLOCK();
2359 0 : goto fail; /* really bad */
2360 : }
2361 : break;
2362 : case PF_TRANS_RULESET:
2363 0 : if ((error = pf_rollback_rules(ioe->ticket,
2364 : ioe->anchor))) {
2365 0 : free(table, M_TEMP, sizeof(*table));
2366 0 : free(ioe, M_TEMP, sizeof(*ioe));
2367 : PF_UNLOCK();
2368 0 : goto fail; /* really bad */
2369 : }
2370 : break;
2371 : default:
2372 0 : free(table, M_TEMP, sizeof(*table));
2373 0 : free(ioe, M_TEMP, sizeof(*ioe));
2374 : error = EINVAL;
2375 : PF_UNLOCK();
2376 0 : goto fail; /* really bad */
2377 : }
2378 : }
2379 0 : free(table, M_TEMP, sizeof(*table));
2380 0 : free(ioe, M_TEMP, sizeof(*ioe));
2381 : PF_UNLOCK();
2382 0 : break;
2383 : }
2384 :
2385 : case DIOCXCOMMIT: {
2386 0 : struct pfioc_trans *io = (struct pfioc_trans *)addr;
2387 : struct pfioc_trans_e *ioe;
2388 : struct pfr_table *table;
2389 : struct pf_ruleset *rs;
2390 : int i;
2391 :
2392 0 : if (io->esize != sizeof(*ioe)) {
2393 : error = ENODEV;
2394 0 : goto fail;
2395 : }
2396 : PF_LOCK();
2397 0 : ioe = malloc(sizeof(*ioe), M_TEMP, M_WAITOK);
2398 0 : table = malloc(sizeof(*table), M_TEMP, M_WAITOK);
2399 : /* first makes sure everything will succeed */
2400 0 : for (i = 0; i < io->size; i++) {
2401 0 : if (copyin(io->array+i, ioe, sizeof(*ioe))) {
2402 0 : free(table, M_TEMP, sizeof(*table));
2403 0 : free(ioe, M_TEMP, sizeof(*ioe));
2404 : error = EFAULT;
2405 : PF_UNLOCK();
2406 0 : goto fail;
2407 : }
2408 0 : if (strnlen(ioe->anchor, sizeof(ioe->anchor)) ==
2409 : sizeof(ioe->anchor)) {
2410 0 : free(table, M_TEMP, sizeof(*table));
2411 0 : free(ioe, M_TEMP, sizeof(*ioe));
2412 : error = ENAMETOOLONG;
2413 : PF_UNLOCK();
2414 0 : goto fail;
2415 : }
2416 0 : switch (ioe->type) {
2417 : case PF_TRANS_TABLE:
2418 0 : rs = pf_find_ruleset(ioe->anchor);
2419 0 : if (rs == NULL || !rs->topen || ioe->ticket !=
2420 0 : rs->tticket) {
2421 0 : free(table, M_TEMP, sizeof(*table));
2422 0 : free(ioe, M_TEMP, sizeof(*ioe));
2423 : error = EBUSY;
2424 : PF_UNLOCK();
2425 0 : goto fail;
2426 : }
2427 : break;
2428 : case PF_TRANS_RULESET:
2429 0 : rs = pf_find_ruleset(ioe->anchor);
2430 0 : if (rs == NULL ||
2431 0 : !rs->rules.inactive.open ||
2432 0 : rs->rules.inactive.ticket !=
2433 0 : ioe->ticket) {
2434 0 : free(table, M_TEMP, sizeof(*table));
2435 0 : free(ioe, M_TEMP, sizeof(*ioe));
2436 : error = EBUSY;
2437 : PF_UNLOCK();
2438 0 : goto fail;
2439 : }
2440 : break;
2441 : default:
2442 0 : free(table, M_TEMP, sizeof(*table));
2443 0 : free(ioe, M_TEMP, sizeof(*ioe));
2444 : error = EINVAL;
2445 : PF_UNLOCK();
2446 0 : goto fail;
2447 : }
2448 : }
2449 :
2450 : /*
2451 : * Checked already in DIOCSETLIMIT, but check again as the
2452 : * situation might have changed.
2453 : */
2454 0 : for (i = 0; i < PF_LIMIT_MAX; i++) {
2455 0 : if (((struct pool *)pf_pool_limits[i].pp)->pr_nout >
2456 0 : pf_pool_limits[i].limit_new) {
2457 0 : free(table, M_TEMP, sizeof(*table));
2458 0 : free(ioe, M_TEMP, sizeof(*ioe));
2459 : error = EBUSY;
2460 : PF_UNLOCK();
2461 0 : goto fail;
2462 : }
2463 : }
2464 : /* now do the commit - no errors should happen here */
2465 0 : for (i = 0; i < io->size; i++) {
2466 0 : if (copyin(io->array+i, ioe, sizeof(*ioe))) {
2467 0 : free(table, M_TEMP, sizeof(*table));
2468 0 : free(ioe, M_TEMP, sizeof(*ioe));
2469 : error = EFAULT;
2470 : PF_UNLOCK();
2471 0 : goto fail;
2472 : }
2473 0 : if (strnlen(ioe->anchor, sizeof(ioe->anchor)) ==
2474 : sizeof(ioe->anchor)) {
2475 0 : free(table, M_TEMP, sizeof(*table));
2476 0 : free(ioe, M_TEMP, sizeof(*ioe));
2477 : error = ENAMETOOLONG;
2478 : PF_UNLOCK();
2479 0 : goto fail;
2480 : }
2481 0 : switch (ioe->type) {
2482 : case PF_TRANS_TABLE:
2483 0 : memset(table, 0, sizeof(*table));
2484 0 : strlcpy(table->pfrt_anchor, ioe->anchor,
2485 : sizeof(table->pfrt_anchor));
2486 0 : if ((error = pfr_ina_commit(table, ioe->ticket,
2487 : NULL, NULL, 0))) {
2488 0 : free(table, M_TEMP, sizeof(*table));
2489 0 : free(ioe, M_TEMP, sizeof(*ioe));
2490 : PF_UNLOCK();
2491 0 : goto fail; /* really bad */
2492 : }
2493 : break;
2494 : case PF_TRANS_RULESET:
2495 0 : if ((error = pf_commit_rules(ioe->ticket,
2496 : ioe->anchor))) {
2497 0 : free(table, M_TEMP, sizeof(*table));
2498 0 : free(ioe, M_TEMP, sizeof(*ioe));
2499 : PF_UNLOCK();
2500 0 : goto fail; /* really bad */
2501 : }
2502 : break;
2503 : default:
2504 0 : free(table, M_TEMP, sizeof(*table));
2505 0 : free(ioe, M_TEMP, sizeof(*ioe));
2506 : error = EINVAL;
2507 : PF_UNLOCK();
2508 0 : goto fail; /* really bad */
2509 : }
2510 : }
2511 0 : for (i = 0; i < PF_LIMIT_MAX; i++) {
2512 0 : if (pf_pool_limits[i].limit_new !=
2513 0 : pf_pool_limits[i].limit &&
2514 0 : pool_sethardlimit(pf_pool_limits[i].pp,
2515 0 : pf_pool_limits[i].limit_new, NULL, 0) != 0) {
2516 0 : free(table, M_TEMP, sizeof(*table));
2517 0 : free(ioe, M_TEMP, sizeof(*ioe));
2518 : error = EBUSY;
2519 : PF_UNLOCK();
2520 0 : goto fail; /* really bad */
2521 : }
2522 0 : pf_pool_limits[i].limit = pf_pool_limits[i].limit_new;
2523 : }
2524 0 : for (i = 0; i < PFTM_MAX; i++) {
2525 0 : int old = pf_default_rule.timeout[i];
2526 :
2527 0 : pf_default_rule.timeout[i] =
2528 0 : pf_default_rule_new.timeout[i];
2529 0 : if (pf_default_rule.timeout[i] == PFTM_INTERVAL &&
2530 0 : pf_default_rule.timeout[i] < old)
2531 0 : task_add(net_tq(0), &pf_purge_task);
2532 : }
2533 0 : pfi_xcommit();
2534 0 : pf_trans_set_commit();
2535 0 : free(table, M_TEMP, sizeof(*table));
2536 0 : free(ioe, M_TEMP, sizeof(*ioe));
2537 : PF_UNLOCK();
2538 0 : break;
2539 : }
2540 :
2541 : case DIOCGETSRCNODES: {
2542 0 : struct pfioc_src_nodes *psn = (struct pfioc_src_nodes *)addr;
2543 : struct pf_src_node *n, *p, *pstore;
2544 : u_int32_t nr = 0;
2545 0 : int space = psn->psn_len;
2546 :
2547 : PF_LOCK();
2548 0 : if (space == 0) {
2549 0 : RB_FOREACH(n, pf_src_tree, &tree_src_tracking)
2550 0 : nr++;
2551 0 : psn->psn_len = sizeof(struct pf_src_node) * nr;
2552 : PF_UNLOCK();
2553 0 : break;
2554 : }
2555 :
2556 0 : pstore = malloc(sizeof(*pstore), M_TEMP, M_WAITOK);
2557 :
2558 0 : p = psn->psn_src_nodes;
2559 0 : RB_FOREACH(n, pf_src_tree, &tree_src_tracking) {
2560 0 : int secs = time_uptime, diff;
2561 :
2562 0 : if ((nr + 1) * sizeof(*p) > (unsigned)psn->psn_len)
2563 0 : break;
2564 :
2565 0 : memcpy(pstore, n, sizeof(*pstore));
2566 0 : memset(&pstore->entry, 0, sizeof(pstore->entry));
2567 0 : pstore->rule.ptr = NULL;
2568 0 : pstore->kif = NULL;
2569 0 : pstore->rule.nr = n->rule.ptr->nr;
2570 0 : pstore->creation = secs - pstore->creation;
2571 0 : if (pstore->expire > secs)
2572 0 : pstore->expire -= secs;
2573 : else
2574 0 : pstore->expire = 0;
2575 :
2576 : /* adjust the connection rate estimate */
2577 0 : diff = secs - n->conn_rate.last;
2578 0 : if (diff >= n->conn_rate.seconds)
2579 0 : pstore->conn_rate.count = 0;
2580 : else
2581 0 : pstore->conn_rate.count -=
2582 0 : n->conn_rate.count * diff /
2583 : n->conn_rate.seconds;
2584 :
2585 0 : error = copyout(pstore, p, sizeof(*p));
2586 0 : if (error) {
2587 0 : free(pstore, M_TEMP, sizeof(*pstore));
2588 : PF_UNLOCK();
2589 0 : goto fail;
2590 : }
2591 0 : p++;
2592 : nr++;
2593 0 : }
2594 0 : psn->psn_len = sizeof(struct pf_src_node) * nr;
2595 :
2596 0 : free(pstore, M_TEMP, sizeof(*pstore));
2597 : PF_UNLOCK();
2598 0 : break;
2599 : }
2600 :
2601 : case DIOCCLRSRCNODES: {
2602 : struct pf_src_node *n;
2603 : struct pf_state *state;
2604 :
2605 : PF_LOCK();
2606 : PF_STATE_ENTER_WRITE();
2607 0 : RB_FOREACH(state, pf_state_tree_id, &tree_id)
2608 0 : pf_src_tree_remove_state(state);
2609 : PF_STATE_EXIT_WRITE();
2610 0 : RB_FOREACH(n, pf_src_tree, &tree_src_tracking)
2611 0 : n->expire = 1;
2612 0 : pf_purge_expired_src_nodes();
2613 : PF_UNLOCK();
2614 : break;
2615 : }
2616 :
2617 : case DIOCKILLSRCNODES: {
2618 : struct pf_src_node *sn;
2619 : struct pf_state *s;
2620 : struct pfioc_src_node_kill *psnk =
2621 0 : (struct pfioc_src_node_kill *)addr;
2622 : u_int killed = 0;
2623 :
2624 : PF_LOCK();
2625 0 : RB_FOREACH(sn, pf_src_tree, &tree_src_tracking) {
2626 0 : if (PF_MATCHA(psnk->psnk_src.neg,
2627 : &psnk->psnk_src.addr.v.a.addr,
2628 : &psnk->psnk_src.addr.v.a.mask,
2629 0 : &sn->addr, sn->af) &&
2630 0 : PF_MATCHA(psnk->psnk_dst.neg,
2631 : &psnk->psnk_dst.addr.v.a.addr,
2632 : &psnk->psnk_dst.addr.v.a.mask,
2633 : &sn->raddr, sn->af)) {
2634 : /* Handle state to src_node linkage */
2635 0 : if (sn->states != 0) {
2636 : PF_ASSERT_LOCKED();
2637 : PF_STATE_ENTER_WRITE();
2638 0 : RB_FOREACH(s, pf_state_tree_id,
2639 : &tree_id)
2640 0 : pf_state_rm_src_node(s, sn);
2641 : PF_STATE_EXIT_WRITE();
2642 : }
2643 0 : sn->expire = 1;
2644 0 : killed++;
2645 0 : }
2646 : }
2647 :
2648 0 : if (killed > 0)
2649 0 : pf_purge_expired_src_nodes();
2650 :
2651 0 : psnk->psnk_killed = killed;
2652 : PF_UNLOCK();
2653 : break;
2654 : }
2655 :
2656 : case DIOCSETHOSTID: {
2657 0 : u_int32_t *hostid = (u_int32_t *)addr;
2658 :
2659 : PF_LOCK();
2660 0 : if (*hostid == 0)
2661 0 : pf_trans_set.hostid = arc4random();
2662 : else
2663 0 : pf_trans_set.hostid = *hostid;
2664 0 : pf_trans_set.mask |= PF_TSET_HOSTID;
2665 : PF_UNLOCK();
2666 : break;
2667 : }
2668 :
2669 : case DIOCOSFPFLUSH:
2670 : PF_LOCK();
2671 0 : pf_osfp_flush();
2672 : PF_UNLOCK();
2673 0 : break;
2674 :
2675 : case DIOCIGETIFACES: {
2676 0 : struct pfioc_iface *io = (struct pfioc_iface *)addr;
2677 :
2678 0 : if (io->pfiio_esize != sizeof(struct pfi_kif)) {
2679 : error = ENODEV;
2680 0 : break;
2681 : }
2682 : PF_LOCK();
2683 0 : error = pfi_get_ifaces(io->pfiio_name, io->pfiio_buffer,
2684 0 : &io->pfiio_size);
2685 : PF_UNLOCK();
2686 0 : break;
2687 : }
2688 :
2689 : case DIOCSETIFFLAG: {
2690 0 : struct pfioc_iface *io = (struct pfioc_iface *)addr;
2691 :
2692 : PF_LOCK();
2693 0 : error = pfi_set_flags(io->pfiio_name, io->pfiio_flags);
2694 : PF_UNLOCK();
2695 : break;
2696 : }
2697 :
2698 : case DIOCCLRIFFLAG: {
2699 0 : struct pfioc_iface *io = (struct pfioc_iface *)addr;
2700 :
2701 : PF_LOCK();
2702 0 : error = pfi_clear_flags(io->pfiio_name, io->pfiio_flags);
2703 : PF_UNLOCK();
2704 : break;
2705 : }
2706 :
2707 : case DIOCSETREASS: {
2708 0 : u_int32_t *reass = (u_int32_t *)addr;
2709 :
2710 : PF_LOCK();
2711 0 : pf_trans_set.reass = *reass;
2712 0 : pf_trans_set.mask |= PF_TSET_REASS;
2713 : PF_UNLOCK();
2714 : break;
2715 : }
2716 :
2717 : case DIOCSETSYNFLWATS: {
2718 0 : struct pfioc_synflwats *io = (struct pfioc_synflwats *)addr;
2719 :
2720 : PF_LOCK();
2721 0 : error = pf_syncookies_setwats(io->hiwat, io->lowat);
2722 : PF_UNLOCK();
2723 : break;
2724 : }
2725 :
2726 : case DIOCGETSYNFLWATS: {
2727 0 : struct pfioc_synflwats *io = (struct pfioc_synflwats *)addr;
2728 :
2729 : PF_LOCK();
2730 0 : error = pf_syncookies_getwats(io);
2731 : PF_UNLOCK();
2732 : break;
2733 : }
2734 :
2735 : case DIOCSETSYNCOOKIES: {
2736 : u_int8_t *mode = (u_int8_t *)addr;
2737 :
2738 : PF_LOCK();
2739 0 : error = pf_syncookies_setmode(*mode);
2740 : PF_UNLOCK();
2741 : break;
2742 : }
2743 :
2744 : default:
2745 : error = ENODEV;
2746 0 : break;
2747 : }
2748 : fail:
2749 0 : NET_UNLOCK();
2750 0 : return (error);
2751 0 : }
2752 :
2753 : void
2754 0 : pf_trans_set_commit(void)
2755 : {
2756 0 : if (pf_trans_set.mask & PF_TSET_STATUSIF)
2757 0 : strlcpy(pf_status.ifname, pf_trans_set.statusif, IFNAMSIZ);
2758 0 : if (pf_trans_set.mask & PF_TSET_DEBUG)
2759 0 : pf_status.debug = pf_trans_set.debug;
2760 0 : if (pf_trans_set.mask & PF_TSET_HOSTID)
2761 0 : pf_status.hostid = pf_trans_set.hostid;
2762 0 : if (pf_trans_set.mask & PF_TSET_REASS)
2763 0 : pf_status.reass = pf_trans_set.reass;
2764 0 : }
2765 :
2766 : void
2767 0 : pf_pool_copyin(struct pf_pool *from, struct pf_pool *to)
2768 : {
2769 0 : memmove(to, from, sizeof(*to));
2770 0 : to->kif = NULL;
2771 0 : }
2772 :
2773 : int
2774 0 : pf_rule_copyin(struct pf_rule *from, struct pf_rule *to,
2775 : struct pf_ruleset *ruleset)
2776 : {
2777 : int i;
2778 :
2779 0 : to->src = from->src;
2780 0 : to->dst = from->dst;
2781 :
2782 : /* XXX union skip[] */
2783 :
2784 0 : strlcpy(to->label, from->label, sizeof(to->label));
2785 0 : strlcpy(to->ifname, from->ifname, sizeof(to->ifname));
2786 0 : strlcpy(to->rcv_ifname, from->rcv_ifname, sizeof(to->rcv_ifname));
2787 0 : strlcpy(to->qname, from->qname, sizeof(to->qname));
2788 0 : strlcpy(to->pqname, from->pqname, sizeof(to->pqname));
2789 0 : strlcpy(to->tagname, from->tagname, sizeof(to->tagname));
2790 0 : strlcpy(to->match_tagname, from->match_tagname,
2791 : sizeof(to->match_tagname));
2792 0 : strlcpy(to->overload_tblname, from->overload_tblname,
2793 : sizeof(to->overload_tblname));
2794 :
2795 0 : pf_pool_copyin(&from->nat, &to->nat);
2796 0 : pf_pool_copyin(&from->rdr, &to->rdr);
2797 0 : pf_pool_copyin(&from->route, &to->route);
2798 :
2799 0 : if (pf_kif_setup(to->ifname, &to->kif))
2800 0 : return (EINVAL);
2801 0 : if (pf_kif_setup(to->rcv_ifname, &to->rcv_kif))
2802 0 : return (EINVAL);
2803 0 : if (to->overload_tblname[0]) {
2804 0 : if ((to->overload_tbl = pfr_attach_table(ruleset,
2805 0 : to->overload_tblname, 0)) == NULL)
2806 0 : return (EINVAL);
2807 : else
2808 0 : to->overload_tbl->pfrkt_flags |= PFR_TFLAG_ACTIVE;
2809 0 : }
2810 :
2811 0 : if (pf_kif_setup(to->rdr.ifname, &to->rdr.kif))
2812 0 : return (EINVAL);
2813 0 : if (pf_kif_setup(to->nat.ifname, &to->nat.kif))
2814 0 : return (EINVAL);
2815 0 : if (pf_kif_setup(to->route.ifname, &to->route.kif))
2816 0 : return (EINVAL);
2817 :
2818 0 : to->os_fingerprint = from->os_fingerprint;
2819 :
2820 0 : to->rtableid = from->rtableid;
2821 0 : if (to->rtableid >= 0 && !rtable_exists(to->rtableid))
2822 0 : return (EBUSY);
2823 0 : to->onrdomain = from->onrdomain;
2824 0 : if (to->onrdomain >= 0 && !rtable_exists(to->onrdomain))
2825 0 : return (EBUSY);
2826 0 : if (to->onrdomain >= 0) /* make sure it is a real rdomain */
2827 0 : to->onrdomain = rtable_l2(to->onrdomain);
2828 :
2829 0 : for (i = 0; i < PFTM_MAX; i++)
2830 0 : to->timeout[i] = from->timeout[i];
2831 0 : to->states_tot = from->states_tot;
2832 0 : to->max_states = from->max_states;
2833 0 : to->max_src_nodes = from->max_src_nodes;
2834 0 : to->max_src_states = from->max_src_states;
2835 0 : to->max_src_conn = from->max_src_conn;
2836 0 : to->max_src_conn_rate.limit = from->max_src_conn_rate.limit;
2837 0 : to->max_src_conn_rate.seconds = from->max_src_conn_rate.seconds;
2838 0 : pf_init_threshold(&to->pktrate, from->pktrate.limit,
2839 0 : from->pktrate.seconds);
2840 :
2841 0 : if (to->qname[0] != 0) {
2842 0 : if ((to->qid = pf_qname2qid(to->qname, 0)) == 0)
2843 0 : return (EBUSY);
2844 0 : if (to->pqname[0] != 0) {
2845 0 : if ((to->pqid = pf_qname2qid(to->pqname, 0)) == 0)
2846 0 : return (EBUSY);
2847 : } else
2848 0 : to->pqid = to->qid;
2849 : }
2850 0 : to->rt_listid = from->rt_listid;
2851 0 : to->prob = from->prob;
2852 0 : to->return_icmp = from->return_icmp;
2853 0 : to->return_icmp6 = from->return_icmp6;
2854 0 : to->max_mss = from->max_mss;
2855 0 : if (to->tagname[0])
2856 0 : if ((to->tag = pf_tagname2tag(to->tagname, 1)) == 0)
2857 0 : return (EBUSY);
2858 0 : if (to->match_tagname[0])
2859 0 : if ((to->match_tag = pf_tagname2tag(to->match_tagname, 1)) == 0)
2860 0 : return (EBUSY);
2861 0 : to->scrub_flags = from->scrub_flags;
2862 0 : to->uid = from->uid;
2863 0 : to->gid = from->gid;
2864 0 : to->rule_flag = from->rule_flag;
2865 0 : to->action = from->action;
2866 0 : to->direction = from->direction;
2867 0 : to->log = from->log;
2868 0 : to->logif = from->logif;
2869 : #if NPFLOG > 0
2870 0 : if (!to->log)
2871 0 : to->logif = 0;
2872 : #endif /* NPFLOG > 0 */
2873 0 : to->quick = from->quick;
2874 0 : to->ifnot = from->ifnot;
2875 0 : to->rcvifnot = from->rcvifnot;
2876 0 : to->match_tag_not = from->match_tag_not;
2877 0 : to->keep_state = from->keep_state;
2878 0 : to->af = from->af;
2879 0 : to->naf = from->naf;
2880 0 : to->proto = from->proto;
2881 0 : to->type = from->type;
2882 0 : to->code = from->code;
2883 0 : to->flags = from->flags;
2884 0 : to->flagset = from->flagset;
2885 0 : to->min_ttl = from->min_ttl;
2886 0 : to->allow_opts = from->allow_opts;
2887 0 : to->rt = from->rt;
2888 0 : to->return_ttl = from->return_ttl;
2889 0 : to->tos = from->tos;
2890 0 : to->set_tos = from->set_tos;
2891 0 : to->anchor_relative = from->anchor_relative; /* XXX */
2892 0 : to->anchor_wildcard = from->anchor_wildcard; /* XXX */
2893 0 : to->flush = from->flush;
2894 0 : to->divert.addr = from->divert.addr;
2895 0 : to->divert.port = from->divert.port;
2896 0 : to->divert.type = from->divert.type;
2897 0 : to->prio = from->prio;
2898 0 : to->set_prio[0] = from->set_prio[0];
2899 0 : to->set_prio[1] = from->set_prio[1];
2900 :
2901 0 : return (0);
2902 0 : }
|