| GCC Code Coverage Report | |||||||||||||||||||||
| 
 | |||||||||||||||||||||
| Line | Branch | Exec | Source | 
| 1 | /* $OpenBSD: pfctl_optimize.c,v 1.36 2016/08/03 16:27:25 krw Exp $ */ | ||
| 2 | |||
| 3 | /* | ||
| 4 | * Copyright (c) 2004 Mike Frantzen <frantzen@openbsd.org> | ||
| 5 | * | ||
| 6 | * Permission to use, copy, modify, and distribute this software for any | ||
| 7 | * purpose with or without fee is hereby granted, provided that the above | ||
| 8 | * copyright notice and this permission notice appear in all copies. | ||
| 9 | * | ||
| 10 | * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES | ||
| 11 | * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF | ||
| 12 | * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR | ||
| 13 | * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES | ||
| 14 | * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN | ||
| 15 | * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF | ||
| 16 | * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. | ||
| 17 | */ | ||
| 18 | |||
| 19 | #include <sys/types.h> | ||
| 20 | #include <sys/ioctl.h> | ||
| 21 | #include <sys/socket.h> | ||
| 22 | |||
| 23 | #include <netinet/in.h> | ||
| 24 | #include <arpa/inet.h> | ||
| 25 | #include <net/if.h> | ||
| 26 | #include <net/pfvar.h> | ||
| 27 | |||
| 28 | #include <assert.h> | ||
| 29 | #include <ctype.h> | ||
| 30 | #include <err.h> | ||
| 31 | #include <errno.h> | ||
| 32 | #include <stddef.h> | ||
| 33 | #include <stdio.h> | ||
| 34 | #include <stdlib.h> | ||
| 35 | #include <string.h> | ||
| 36 | |||
| 37 | #include "pfctl_parser.h" | ||
| 38 | #include "pfctl.h" | ||
| 39 | |||
| 40 | /* The size at which a table becomes faster than individual rules */ | ||
| 41 | #define TABLE_THRESHOLD 6 | ||
| 42 | |||
| 43 | |||
| 44 | /* #define OPT_DEBUG 1 */ | ||
| 45 | #ifdef OPT_DEBUG | ||
| 46 | # define DEBUG(str, v...) \ | ||
| 47 | 	printf("%s: " str "\n", __FUNCTION__ , ## v) | ||
| 48 | #else | ||
| 49 | # define DEBUG(str, v...) ((void)0) | ||
| 50 | #endif | ||
| 51 | |||
| 52 | |||
| 53 | /* | ||
| 54 | * A container that lets us sort a superblock to optimize the skip step jumps | ||
| 55 | */ | ||
| 56 | struct pf_skip_step { | ||
| 57 | int ps_count; /* number of items */ | ||
| 58 | TAILQ_HEAD( , pf_opt_rule) ps_rules; | ||
| 59 | TAILQ_ENTRY(pf_skip_step) ps_entry; | ||
| 60 | }; | ||
| 61 | |||
| 62 | |||
| 63 | /* | ||
| 64 | * A superblock is a block of adjacent rules of similar action. If there | ||
| 65 | * are five PASS rules in a row, they all become members of a superblock. | ||
| 66 | * Once we have a superblock, we are free to re-order any rules within it | ||
| 67 | * in order to improve performance; if a packet is passed, it doesn't matter | ||
| 68 | * who passed it. | ||
| 69 | */ | ||
| 70 | struct superblock { | ||
| 71 | TAILQ_HEAD( , pf_opt_rule) sb_rules; | ||
| 72 | TAILQ_ENTRY(superblock) sb_entry; | ||
| 73 | struct superblock *sb_profiled_block; | ||
| 74 | TAILQ_HEAD(skiplist, pf_skip_step) sb_skipsteps[PF_SKIP_COUNT]; | ||
| 75 | }; | ||
| 76 | TAILQ_HEAD(superblocks, superblock); | ||
| 77 | |||
| 78 | |||
| 79 | /* | ||
| 80 | * Description of the PF rule structure. | ||
| 81 | */ | ||
| 82 | enum { | ||
| 83 | BARRIER, /* the presence of the field puts the rule in it's own block */ | ||
| 84 | BREAK, /* the field may not differ between rules in a superblock */ | ||
| 85 | NOMERGE, /* the field may not differ between rules when combined */ | ||
| 86 | COMBINED, /* the field may itself be combined with other rules */ | ||
| 87 | DC, /* we just don't care about the field */ | ||
| 88 | NEVER}; /* we should never see this field set?!? */ | ||
| 89 | struct pf_rule_field { | ||
| 90 | const char *prf_name; | ||
| 91 | int prf_type; | ||
| 92 | size_t prf_offset; | ||
| 93 | size_t prf_size; | ||
| 94 | } pf_rule_desc[] = { | ||
| 95 | #define PF_RULE_FIELD(field, ty) \ | ||
| 96 |     {#field,				\ | ||
| 97 | ty, \ | ||
| 98 | offsetof(struct pf_rule, field), \ | ||
| 99 | sizeof(((struct pf_rule *)0)->field)} | ||
| 100 | |||
| 101 | |||
| 102 | /* | ||
| 103 | * The presence of these fields in a rule put the rule in it's own | ||
| 104 | * superblock. Thus it will not be optimized. It also prevents the | ||
| 105 | * rule from being re-ordered at all. | ||
| 106 | */ | ||
| 107 | PF_RULE_FIELD(label, BARRIER), | ||
| 108 | PF_RULE_FIELD(prob, BARRIER), | ||
| 109 | PF_RULE_FIELD(max_states, BARRIER), | ||
| 110 | PF_RULE_FIELD(max_src_nodes, BARRIER), | ||
| 111 | PF_RULE_FIELD(max_src_states, BARRIER), | ||
| 112 | PF_RULE_FIELD(max_src_conn, BARRIER), | ||
| 113 | PF_RULE_FIELD(max_src_conn_rate, BARRIER), | ||
| 114 | PF_RULE_FIELD(anchor, BARRIER), /* for now */ | ||
| 115 | |||
| 116 | /* | ||
| 117 | * These fields must be the same between all rules in the same superblock. | ||
| 118 | * These rules are allowed to be re-ordered but only among like rules. | ||
| 119 | * For instance we can re-order all 'tag "foo"' rules because they have the | ||
| 120 | * same tag. But we can not re-order between a 'tag "foo"' and a | ||
| 121 | * 'tag "bar"' since that would change the meaning of the ruleset. | ||
| 122 | */ | ||
| 123 | PF_RULE_FIELD(tagname, BREAK), | ||
| 124 | PF_RULE_FIELD(keep_state, BREAK), | ||
| 125 | PF_RULE_FIELD(qname, BREAK), | ||
| 126 | PF_RULE_FIELD(pqname, BREAK), | ||
| 127 | PF_RULE_FIELD(rt, BREAK), | ||
| 128 | PF_RULE_FIELD(allow_opts, BREAK), | ||
| 129 | PF_RULE_FIELD(rule_flag, BREAK), | ||
| 130 | PF_RULE_FIELD(action, BREAK), | ||
| 131 | PF_RULE_FIELD(log, BREAK), | ||
| 132 | PF_RULE_FIELD(quick, BREAK), | ||
| 133 | PF_RULE_FIELD(return_ttl, BREAK), | ||
| 134 | PF_RULE_FIELD(overload_tblname, BREAK), | ||
| 135 | PF_RULE_FIELD(flush, BREAK), | ||
| 136 | PF_RULE_FIELD(rdr, BREAK), | ||
| 137 | PF_RULE_FIELD(nat, BREAK), | ||
| 138 | PF_RULE_FIELD(logif, BREAK), | ||
| 139 | PF_RULE_FIELD(route, BREAK), | ||
| 140 | PF_RULE_FIELD(rtableid, BREAK), | ||
| 141 | |||
| 142 | /* | ||
| 143 | * Any fields not listed in this structure act as BREAK fields | ||
| 144 | */ | ||
| 145 | |||
| 146 | |||
| 147 | /* | ||
| 148 | * These fields must not differ when we merge two rules together but | ||
| 149 | * their difference isn't enough to put the rules in different superblocks. | ||
| 150 | * There are no problems re-ordering any rules with these fields. | ||
| 151 | */ | ||
| 152 | PF_RULE_FIELD(af, NOMERGE), | ||
| 153 | PF_RULE_FIELD(ifnot, NOMERGE), | ||
| 154 | PF_RULE_FIELD(ifname, NOMERGE), /* hack for IF groups */ | ||
| 155 | PF_RULE_FIELD(match_tag_not, NOMERGE), | ||
| 156 | PF_RULE_FIELD(match_tagname, NOMERGE), | ||
| 157 | PF_RULE_FIELD(os_fingerprint, NOMERGE), | ||
| 158 | PF_RULE_FIELD(timeout, NOMERGE), | ||
| 159 | PF_RULE_FIELD(return_icmp, NOMERGE), | ||
| 160 | PF_RULE_FIELD(return_icmp6, NOMERGE), | ||
| 161 | PF_RULE_FIELD(uid, NOMERGE), | ||
| 162 | PF_RULE_FIELD(gid, NOMERGE), | ||
| 163 | PF_RULE_FIELD(direction, NOMERGE), | ||
| 164 | PF_RULE_FIELD(proto, NOMERGE), | ||
| 165 | PF_RULE_FIELD(type, NOMERGE), | ||
| 166 | PF_RULE_FIELD(code, NOMERGE), | ||
| 167 | PF_RULE_FIELD(flags, NOMERGE), | ||
| 168 | PF_RULE_FIELD(flagset, NOMERGE), | ||
| 169 | PF_RULE_FIELD(tos, NOMERGE), | ||
| 170 | PF_RULE_FIELD(src.port, NOMERGE), | ||
| 171 | PF_RULE_FIELD(dst.port, NOMERGE), | ||
| 172 | PF_RULE_FIELD(src.port_op, NOMERGE), | ||
| 173 | PF_RULE_FIELD(dst.port_op, NOMERGE), | ||
| 174 | PF_RULE_FIELD(src.neg, NOMERGE), | ||
| 175 | PF_RULE_FIELD(dst.neg, NOMERGE), | ||
| 176 | PF_RULE_FIELD(onrdomain, NOMERGE), | ||
| 177 | PF_RULE_FIELD(naf, NOMERGE), | ||
| 178 | |||
| 179 | /* These fields can be merged */ | ||
| 180 | PF_RULE_FIELD(src.addr, COMBINED), | ||
| 181 | PF_RULE_FIELD(dst.addr, COMBINED), | ||
| 182 | |||
| 183 | /* We just don't care about these fields. They're set by the kernel */ | ||
| 184 | PF_RULE_FIELD(skip, DC), | ||
| 185 | PF_RULE_FIELD(evaluations, DC), | ||
| 186 | PF_RULE_FIELD(packets, DC), | ||
| 187 | PF_RULE_FIELD(bytes, DC), | ||
| 188 | PF_RULE_FIELD(kif, DC), | ||
| 189 | PF_RULE_FIELD(states_cur, DC), | ||
| 190 | PF_RULE_FIELD(states_tot, DC), | ||
| 191 | PF_RULE_FIELD(src_nodes, DC), | ||
| 192 | PF_RULE_FIELD(nr, DC), | ||
| 193 | PF_RULE_FIELD(entries, DC), | ||
| 194 | PF_RULE_FIELD(qid, DC), | ||
| 195 | PF_RULE_FIELD(pqid, DC), | ||
| 196 | PF_RULE_FIELD(anchor_relative, DC), | ||
| 197 | PF_RULE_FIELD(anchor_wildcard, DC), | ||
| 198 | PF_RULE_FIELD(tag, DC), | ||
| 199 | PF_RULE_FIELD(match_tag, DC), | ||
| 200 | PF_RULE_FIELD(overload_tbl, DC), | ||
| 201 | |||
| 202 | /* These fields should never be set in a PASS/BLOCK rule XXX fix*/ | ||
| 203 | PF_RULE_FIELD(max_mss, NEVER), | ||
| 204 | PF_RULE_FIELD(min_ttl, NEVER), | ||
| 205 | PF_RULE_FIELD(set_tos, NEVER), | ||
| 206 | }; | ||
| 207 | |||
| 208 | |||
| 209 | |||
| 210 | int addrs_combineable(struct pf_rule_addr *, struct pf_rule_addr *); | ||
| 211 | int addrs_equal(struct pf_rule_addr *, struct pf_rule_addr *); | ||
| 212 | int block_feedback(struct pfctl *, struct superblock *); | ||
| 213 | int combine_rules(struct pfctl *, struct superblock *); | ||
| 214 | void comparable_rule(struct pf_rule *, const struct pf_rule *, int); | ||
| 215 | int construct_superblocks(struct pfctl *, struct pf_opt_queue *, | ||
| 216 | struct superblocks *); | ||
| 217 | void exclude_supersets(struct pf_rule *, struct pf_rule *); | ||
| 218 | int interface_group(const char *); | ||
| 219 | int load_feedback_profile(struct pfctl *, struct superblocks *); | ||
| 220 | int optimize_superblock(struct pfctl *, struct superblock *); | ||
| 221 | void remove_from_skipsteps(struct skiplist *, struct superblock *, | ||
| 222 | struct pf_opt_rule *, struct pf_skip_step *); | ||
| 223 | int remove_identical_rules(struct pfctl *, struct superblock *); | ||
| 224 | int reorder_rules(struct pfctl *, struct superblock *, int); | ||
| 225 | int rules_combineable(struct pf_rule *, struct pf_rule *); | ||
| 226 | void skip_append(struct superblock *, int, struct pf_skip_step *, | ||
| 227 | struct pf_opt_rule *); | ||
| 228 | int skip_compare(int, struct pf_skip_step *, struct pf_opt_rule *); | ||
| 229 | void skip_init(void); | ||
| 230 | int skip_cmp_af(struct pf_rule *, struct pf_rule *); | ||
| 231 | int skip_cmp_dir(struct pf_rule *, struct pf_rule *); | ||
| 232 | int skip_cmp_rdom(struct pf_rule *, struct pf_rule *); | ||
| 233 | int skip_cmp_dst_addr(struct pf_rule *, struct pf_rule *); | ||
| 234 | int skip_cmp_dst_port(struct pf_rule *, struct pf_rule *); | ||
| 235 | int skip_cmp_ifp(struct pf_rule *, struct pf_rule *); | ||
| 236 | int skip_cmp_proto(struct pf_rule *, struct pf_rule *); | ||
| 237 | int skip_cmp_src_addr(struct pf_rule *, struct pf_rule *); | ||
| 238 | int skip_cmp_src_port(struct pf_rule *, struct pf_rule *); | ||
| 239 | int superblock_inclusive(struct superblock *, struct pf_opt_rule *); | ||
| 240 | void superblock_free(struct pfctl *, struct superblock *); | ||
| 241 | |||
| 242 | |||
| 243 | int (*skip_comparitors[PF_SKIP_COUNT])(struct pf_rule *, struct pf_rule *); | ||
| 244 | const char *skip_comparitors_names[PF_SKIP_COUNT]; | ||
| 245 | #define PF_SKIP_COMPARITORS {				\ | ||
| 246 |     { "ifp", PF_SKIP_IFP, skip_cmp_ifp },		\ | ||
| 247 |     { "dir", PF_SKIP_DIR, skip_cmp_dir },		\ | ||
| 248 |     { "rdomain", PF_SKIP_RDOM, skip_cmp_rdom },		\ | ||
| 249 |     { "af", PF_SKIP_AF, skip_cmp_af },			\ | ||
| 250 |     { "proto", PF_SKIP_PROTO, skip_cmp_proto },		\ | ||
| 251 |     { "saddr", PF_SKIP_SRC_ADDR, skip_cmp_src_addr },	\ | ||
| 252 |     { "daddr", PF_SKIP_DST_ADDR, skip_cmp_dst_addr },	\ | ||
| 253 |     { "sport", PF_SKIP_SRC_PORT, skip_cmp_src_port },	\ | ||
| 254 |     { "dport", PF_SKIP_DST_PORT, skip_cmp_dst_port }	\ | ||
| 255 | } | ||
| 256 | |||
| 257 | struct pfr_buffer table_buffer; | ||
| 258 | int table_identifier; | ||
| 259 | |||
| 260 | |||
| 261 | int | ||
| 262 | pfctl_optimize_ruleset(struct pfctl *pf, struct pf_ruleset *rs) | ||
| 263 | { | ||
| 264 | 252 | struct superblocks superblocks; | |
| 265 | 126 | struct pf_opt_queue opt_queue; | |
| 266 | struct superblock *block; | ||
| 267 | struct pf_opt_rule *por; | ||
| 268 | struct pf_rule *r; | ||
| 269 | struct pf_rulequeue *old_rules; | ||
| 270 | |||
| 271 | 	DEBUG("optimizing ruleset"); | ||
| 272 | 126 | memset(&table_buffer, 0, sizeof(table_buffer)); | |
| 273 | 126 | skip_init(); | |
| 274 | 126 | TAILQ_INIT(&opt_queue); | |
| 275 | |||
| 276 | 126 | old_rules = rs->rules.active.ptr; | |
| 277 | 126 | rs->rules.active.ptr = rs->rules.inactive.ptr; | |
| 278 | 126 | rs->rules.inactive.ptr = old_rules; | |
| 279 | |||
| 280 | /* | ||
| 281 | * XXX expanding the pf_opt_rule format throughout pfctl might allow | ||
| 282 | * us to avoid all this copying. | ||
| 283 | */ | ||
| 284 | ✓✓ | 1972 | 	while ((r = TAILQ_FIRST(rs->rules.inactive.ptr)) != NULL) { | 
| 285 | ✓✓ | 2580 | TAILQ_REMOVE(rs->rules.inactive.ptr, r, entries); | 
| 286 | ✗✓ | 860 | if ((por = calloc(1, sizeof(*por))) == NULL) | 
| 287 | err(1, "calloc"); | ||
| 288 | 860 | memcpy(&por->por_rule, r, sizeof(*r)); | |
| 289 | |||
| 290 | 860 | TAILQ_INSERT_TAIL(&opt_queue, por, por_entry); | |
| 291 | } | ||
| 292 | |||
| 293 | 126 | TAILQ_INIT(&superblocks); | |
| 294 | ✓✗ | 126 | if (construct_superblocks(pf, &opt_queue, &superblocks)) | 
| 295 | goto error; | ||
| 296 | |||
| 297 | ✗✓ | 126 | 	if (pf->optimize & PF_OPTIMIZE_PROFILE) { | 
| 298 | if (load_feedback_profile(pf, &superblocks)) | ||
| 299 | goto error; | ||
| 300 | } | ||
| 301 | |||
| 302 | ✓✓ | 1212 | 	TAILQ_FOREACH(block, &superblocks, sb_entry) { | 
| 303 | ✓✗ | 480 | if (optimize_superblock(pf, block)) | 
| 304 | goto error; | ||
| 305 | } | ||
| 306 | |||
| 307 | 126 | rs->anchor->refcnt = 0; | |
| 308 | ✓✓ | 1296 | 	while ((block = TAILQ_FIRST(&superblocks))) { | 
| 309 | ✓✓ | 1446 | TAILQ_REMOVE(&superblocks, block, sb_entry); | 
| 310 | |||
| 311 | ✓✓ | 2424 | 		while ((por = TAILQ_FIRST(&block->sb_rules))) { | 
| 312 | ✓✓ | 2070 | TAILQ_REMOVE(&block->sb_rules, por, por_entry); | 
| 313 | 690 | por->por_rule.nr = rs->anchor->refcnt++; | |
| 314 | ✗✓ | 690 | if ((r = calloc(1, sizeof(*r))) == NULL) | 
| 315 | err(1, "calloc"); | ||
| 316 | 690 | memcpy(r, &por->por_rule, sizeof(*r)); | |
| 317 | 690 | TAILQ_INSERT_TAIL(rs->rules.active.ptr, r, entries); | |
| 318 | 690 | free(por); | |
| 319 | } | ||
| 320 | 522 | free(block); | |
| 321 | } | ||
| 322 | |||
| 323 | 126 | return (0); | |
| 324 | |||
| 325 | error: | ||
| 326 | 	while ((por = TAILQ_FIRST(&opt_queue))) { | ||
| 327 | TAILQ_REMOVE(&opt_queue, por, por_entry); | ||
| 328 | 		if (por->por_src_tbl) { | ||
| 329 | pfr_buf_clear(por->por_src_tbl->pt_buf); | ||
| 330 | free(por->por_src_tbl->pt_buf); | ||
| 331 | free(por->por_src_tbl); | ||
| 332 | } | ||
| 333 | 		if (por->por_dst_tbl) { | ||
| 334 | pfr_buf_clear(por->por_dst_tbl->pt_buf); | ||
| 335 | free(por->por_dst_tbl->pt_buf); | ||
| 336 | free(por->por_dst_tbl); | ||
| 337 | } | ||
| 338 | free(por); | ||
| 339 | } | ||
| 340 | 	while ((block = TAILQ_FIRST(&superblocks))) { | ||
| 341 | TAILQ_REMOVE(&superblocks, block, sb_entry); | ||
| 342 | superblock_free(pf, block); | ||
| 343 | } | ||
| 344 | return (1); | ||
| 345 | 126 | } | |
| 346 | |||
| 347 | |||
| 348 | /* | ||
| 349 | * Go ahead and optimize a superblock | ||
| 350 | */ | ||
| 351 | int | ||
| 352 | optimize_superblock(struct pfctl *pf, struct superblock *block) | ||
| 353 | { | ||
| 354 | #ifdef OPT_DEBUG | ||
| 355 | struct pf_opt_rule *por; | ||
| 356 | #endif /* OPT_DEBUG */ | ||
| 357 | |||
| 358 | /* We have a few optimization passes: | ||
| 359 | * 1) remove duplicate rules or rules that are a subset of other | ||
| 360 | * rules | ||
| 361 | * 2) combine otherwise identical rules with different IP addresses | ||
| 362 | * into a single rule and put the addresses in a table. | ||
| 363 | * 3) re-order the rules to improve kernel skip steps | ||
| 364 | * 4) re-order the 'quick' rules based on feedback from the | ||
| 365 | * active ruleset statistics | ||
| 366 | * | ||
| 367 | * XXX combine_rules() doesn't combine v4 and v6 rules. would just | ||
| 368 | * have to keep af in the table container, make af 'COMBINE' and | ||
| 369 | * twiddle the af on the merged rule | ||
| 370 | * XXX maybe add a weighting to the metric on skipsteps when doing | ||
| 371 | * reordering. sometimes two sequential tables will be better | ||
| 372 | * that four consecutive interfaces. | ||
| 373 | * XXX need to adjust the skipstep count of everything after PROTO, | ||
| 374 | * since they aren't actually checked on a proto mismatch in | ||
| 375 | 	 *     pf_test_{tcp, udp, icmp}() | ||
| 376 | * XXX should i treat proto=0, af=0 or dir=0 special in skepstep | ||
| 377 | * calculation since they are a DC? | ||
| 378 | * XXX keep last skiplist of last superblock to influence this | ||
| 379 | * superblock. '5 inet6 log' should make '3 inet6' come before '4 | ||
| 380 | * inet' in the next superblock. | ||
| 381 | * XXX would be useful to add tables for ports | ||
| 382 | * XXX we can also re-order some mutually exclusive superblocks to | ||
| 383 | * try merging superblocks before any of these optimization passes. | ||
| 384 | * for instance a single 'log in' rule in the middle of non-logging | ||
| 385 | * out rules. | ||
| 386 | */ | ||
| 387 | |||
| 388 | /* shortcut. there will be a lot of 1-rule superblocks */ | ||
| 389 | ✓✓ | 960 | if (!TAILQ_NEXT(TAILQ_FIRST(&block->sb_rules), por_entry)) | 
| 390 | 388 | return (0); | |
| 391 | |||
| 392 | #ifdef OPT_DEBUG | ||
| 393 | 	printf("--- Superblock ---\n"); | ||
| 394 | 	TAILQ_FOREACH(por, &block->sb_rules, por_entry) { | ||
| 395 | 		printf("  "); | ||
| 396 | print_rule(&por->por_rule, por->por_rule.anchor ? | ||
| 397 | por->por_rule.anchor->name : "", PF_OPT_DEBUG); | ||
| 398 | } | ||
| 399 | #endif /* OPT_DEBUG */ | ||
| 400 | |||
| 401 | |||
| 402 | ✗✓ | 92 | if (remove_identical_rules(pf, block)) | 
| 403 | return (1); | ||
| 404 | ✗✓ | 92 | if (combine_rules(pf, block)) | 
| 405 | return (1); | ||
| 406 | ✗✓✗✗ | 92 | if ((pf->optimize & PF_OPTIMIZE_PROFILE) && | 
| 407 | TAILQ_FIRST(&block->sb_rules)->por_rule.quick && | ||
| 408 | 	    block->sb_profiled_block) { | ||
| 409 | if (block_feedback(pf, block)) | ||
| 410 | return (1); | ||
| 411 | ✗✓ | 92 | 	} else if (reorder_rules(pf, block, 0)) { | 
| 412 | return (1); | ||
| 413 | } | ||
| 414 | |||
| 415 | /* | ||
| 416 | * Don't add any optimization passes below reorder_rules(). It will | ||
| 417 | * have divided superblocks into smaller blocks for further refinement | ||
| 418 | * and doesn't put them back together again. What once was a true | ||
| 419 | * superblock might have been split into multiple superblocks. | ||
| 420 | */ | ||
| 421 | |||
| 422 | #ifdef OPT_DEBUG | ||
| 423 | 	printf("--- END Superblock ---\n"); | ||
| 424 | #endif /* OPT_DEBUG */ | ||
| 425 | 92 | return (0); | |
| 426 | 480 | } | |
| 427 | |||
| 428 | |||
| 429 | /* | ||
| 430 | * Optimization pass #1: remove identical rules | ||
| 431 | */ | ||
| 432 | int | ||
| 433 | remove_identical_rules(struct pfctl *pf, struct superblock *block) | ||
| 434 | { | ||
| 435 | struct pf_opt_rule *por1, *por2, *por_next, *por2_next; | ||
| 436 | 184 | struct pf_rule a, a2, b, b2; | |
| 437 | |||
| 438 | ✓✓ | 824 | 	for (por1 = TAILQ_FIRST(&block->sb_rules); por1; por1 = por_next) { | 
| 439 | 320 | por_next = TAILQ_NEXT(por1, por_entry); | |
| 440 | ✓✓ | 3186 | 		for (por2 = por_next; por2; por2 = por2_next) { | 
| 441 | 1291 | por2_next = TAILQ_NEXT(por2, por_entry); | |
| 442 | 1291 | comparable_rule(&a, &por1->por_rule, DC); | |
| 443 | 1291 | comparable_rule(&b, &por2->por_rule, DC); | |
| 444 | 1291 | memcpy(&a2, &a, sizeof(a2)); | |
| 445 | 1291 | memcpy(&b2, &b, sizeof(b2)); | |
| 446 | |||
| 447 | 1291 | exclude_supersets(&a, &b); | |
| 448 | 1291 | exclude_supersets(&b2, &a2); | |
| 449 | ✓✓ | 1291 | 			if (memcmp(&a, &b, sizeof(a)) == 0) { | 
| 450 | 				DEBUG("removing identical rule  nr%d = *nr%d*", | ||
| 451 | por1->por_rule.nr, por2->por_rule.nr); | ||
| 452 | ✓✓ | 456 | TAILQ_REMOVE(&block->sb_rules, por2, por_entry); | 
| 453 | ✓✓ | 152 | if (por_next == por2) | 
| 454 | 69 | por_next = TAILQ_NEXT(por1, por_entry); | |
| 455 | 152 | free(por2); | |
| 456 | ✓✓ | 1291 | 			} else if (memcmp(&a2, &b2, sizeof(a2)) == 0) { | 
| 457 | 				DEBUG("removing identical rule  *nr%d* = nr%d", | ||
| 458 | por1->por_rule.nr, por2->por_rule.nr); | ||
| 459 | ✓✗ | 54 | TAILQ_REMOVE(&block->sb_rules, por1, por_entry); | 
| 460 | 18 | free(por1); | |
| 461 | 18 | break; | |
| 462 | } | ||
| 463 | } | ||
| 464 | } | ||
| 465 | |||
| 466 | 92 | return (0); | |
| 467 | 92 | } | |
| 468 | |||
| 469 | |||
| 470 | /* | ||
| 471 | * Optimization pass #2: combine similar rules with different addresses | ||
| 472 | * into a single rule and a table | ||
| 473 | */ | ||
| 474 | int | ||
| 475 | combine_rules(struct pfctl *pf, struct superblock *block) | ||
| 476 | { | ||
| 477 | struct pf_opt_rule *p1, *p2, *por_next; | ||
| 478 | int src_eq, dst_eq; | ||
| 479 | |||
| 480 | /* First we make a pass to combine the rules. O(n log n) */ | ||
| 481 | ✓✓ | 880 | 	TAILQ_FOREACH(p1, &block->sb_rules, por_entry) { | 
| 482 | ✓✓ | 2512 | 		for (p2 = TAILQ_NEXT(p1, por_entry); p2; p2 = por_next) { | 
| 483 | 954 | por_next = TAILQ_NEXT(p2, por_entry); | |
| 484 | |||
| 485 | 1908 | src_eq = addrs_equal(&p1->por_rule.src, | |
| 486 | 954 | &p2->por_rule.src); | |
| 487 | 1908 | dst_eq = addrs_equal(&p1->por_rule.dst, | |
| 488 | 954 | &p2->por_rule.dst); | |
| 489 | |||
| 490 | ✓✓✓✗ ✓✓ | 1198 | if (src_eq && !dst_eq && p1->por_src_tbl == NULL && | 
| 491 | ✓✓ | 203 | p2->por_dst_tbl == NULL && | 
| 492 | ✓✗ | 186 | p2->por_src_tbl == NULL && | 
| 493 | ✓✓ | 186 | rules_combineable(&p1->por_rule, &p2->por_rule) && | 
| 494 | 41 | addrs_combineable(&p1->por_rule.dst, | |
| 495 | 			    &p2->por_rule.dst)) { | ||
| 496 | 				DEBUG("can combine rules  nr%d = nr%d", | ||
| 497 | p1->por_rule.nr, p2->por_rule.nr); | ||
| 498 | ✓✓✗✓ | 59 | if (p1->por_dst_tbl == NULL && | 
| 499 | 25 | add_opt_table(pf, &p1->por_dst_tbl, | |
| 500 | 25 | p1->por_rule.af, &p1->por_rule.dst, NULL)) | |
| 501 | return (1); | ||
| 502 | ✗✓ | 34 | if (add_opt_table(pf, &p1->por_dst_tbl, | 
| 503 | 34 | p1->por_rule.af, &p2->por_rule.dst, NULL)) | |
| 504 | return (1); | ||
| 505 | 34 | p2->por_dst_tbl = p1->por_dst_tbl; | |
| 506 | ✗✓ | 34 | if (p1->por_dst_tbl->pt_rulecount >= | 
| 507 | 				    TABLE_THRESHOLD) { | ||
| 508 | TAILQ_REMOVE(&block->sb_rules, p2, | ||
| 509 | por_entry); | ||
| 510 | free(p2); | ||
| 511 | } | ||
| 512 | ✓✓✓✓ | 1066 | } else if (!src_eq && dst_eq && p1->por_dst_tbl == NULL | 
| 513 | ✓✓✓✓ | 213 | && p2->por_src_tbl == NULL && | 
| 514 | ✓✗ | 77 | p2->por_dst_tbl == NULL && | 
| 515 | ✓✓ | 77 | rules_combineable(&p1->por_rule, &p2->por_rule) && | 
| 516 | 16 | addrs_combineable(&p1->por_rule.src, | |
| 517 | 			    &p2->por_rule.src)) { | ||
| 518 | 				DEBUG("can combine rules  nr%d = nr%d", | ||
| 519 | p1->por_rule.nr, p2->por_rule.nr); | ||
| 520 | ✓✓✗✓ | 10 | if (p1->por_src_tbl == NULL && | 
| 521 | 3 | add_opt_table(pf, &p1->por_src_tbl, | |
| 522 | 3 | p1->por_rule.af, &p1->por_rule.src, NULL)) | |
| 523 | return (1); | ||
| 524 | ✗✓ | 7 | if (add_opt_table(pf, &p1->por_src_tbl, | 
| 525 | 7 | p1->por_rule.af, &p2->por_rule.src, NULL)) | |
| 526 | return (1); | ||
| 527 | 7 | p2->por_src_tbl = p1->por_src_tbl; | |
| 528 | ✗✓ | 7 | if (p1->por_src_tbl->pt_rulecount >= | 
| 529 | 				    TABLE_THRESHOLD) { | ||
| 530 | TAILQ_REMOVE(&block->sb_rules, p2, | ||
| 531 | por_entry); | ||
| 532 | free(p2); | ||
| 533 | } | ||
| 534 | } | ||
| 535 | } | ||
| 536 | } | ||
| 537 | |||
| 538 | |||
| 539 | /* | ||
| 540 | * Then we make a final pass to create a valid table name and | ||
| 541 | * insert the name into the rules. | ||
| 542 | * Convert translation/routing mapping pools to tables as well. | ||
| 543 | */ | ||
| 544 | ✓✓ | 788 | 	for (p1 = TAILQ_FIRST(&block->sb_rules); p1; p1 = por_next) { | 
| 545 | 302 | por_next = TAILQ_NEXT(p1, por_entry); | |
| 546 | ✓✓✗✓ | 312 | assert(p1->por_src_tbl == NULL || p1->por_dst_tbl == NULL); | 
| 547 | |||
| 548 | ✓✓✗✓ | 312 | if (p1->por_src_tbl && p1->por_src_tbl->pt_rulecount >= | 
| 549 | 		    TABLE_THRESHOLD) { | ||
| 550 | 			if (p1->por_src_tbl->pt_generated) { | ||
| 551 | /* This rule is included in a table */ | ||
| 552 | TAILQ_REMOVE(&block->sb_rules, p1, por_entry); | ||
| 553 | free(p1); | ||
| 554 | continue; | ||
| 555 | } | ||
| 556 | p1->por_src_tbl->pt_generated = 1; | ||
| 557 | |||
| 558 | if ((pf->opts & PF_OPT_NOACTION) == 0 && | ||
| 559 | pf_opt_create_table(pf, p1->por_src_tbl)) | ||
| 560 | return (1); | ||
| 561 | |||
| 562 | pf->tdirty = 1; | ||
| 563 | |||
| 564 | if (pf->opts & PF_OPT_VERBOSE) | ||
| 565 | print_tabledef(p1->por_src_tbl->pt_name, | ||
| 566 | PFR_TFLAG_CONST, 1, | ||
| 567 | &p1->por_src_tbl->pt_nodes); | ||
| 568 | |||
| 569 | memset(&p1->por_rule.src.addr, 0, | ||
| 570 | sizeof(p1->por_rule.src.addr)); | ||
| 571 | p1->por_rule.src.addr.type = PF_ADDR_TABLE; | ||
| 572 | strlcpy(p1->por_rule.src.addr.v.tblname, | ||
| 573 | p1->por_src_tbl->pt_name, | ||
| 574 | sizeof(p1->por_rule.src.addr.v.tblname)); | ||
| 575 | |||
| 576 | pfr_buf_clear(p1->por_src_tbl->pt_buf); | ||
| 577 | free(p1->por_src_tbl->pt_buf); | ||
| 578 | p1->por_src_tbl->pt_buf = NULL; | ||
| 579 | } | ||
| 580 | ✓✓✗✓ | 361 | if (p1->por_dst_tbl && p1->por_dst_tbl->pt_rulecount >= | 
| 581 | 		    TABLE_THRESHOLD) { | ||
| 582 | 			if (p1->por_dst_tbl->pt_generated) { | ||
| 583 | /* This rule is included in a table */ | ||
| 584 | TAILQ_REMOVE(&block->sb_rules, p1, por_entry); | ||
| 585 | free(p1); | ||
| 586 | continue; | ||
| 587 | } | ||
| 588 | p1->por_dst_tbl->pt_generated = 1; | ||
| 589 | |||
| 590 | if ((pf->opts & PF_OPT_NOACTION) == 0 && | ||
| 591 | pf_opt_create_table(pf, p1->por_dst_tbl)) | ||
| 592 | return (1); | ||
| 593 | pf->tdirty = 1; | ||
| 594 | |||
| 595 | if (pf->opts & PF_OPT_VERBOSE) | ||
| 596 | print_tabledef(p1->por_dst_tbl->pt_name, | ||
| 597 | PFR_TFLAG_CONST, 1, | ||
| 598 | &p1->por_dst_tbl->pt_nodes); | ||
| 599 | |||
| 600 | memset(&p1->por_rule.dst.addr, 0, | ||
| 601 | sizeof(p1->por_rule.dst.addr)); | ||
| 602 | p1->por_rule.dst.addr.type = PF_ADDR_TABLE; | ||
| 603 | strlcpy(p1->por_rule.dst.addr.v.tblname, | ||
| 604 | p1->por_dst_tbl->pt_name, | ||
| 605 | sizeof(p1->por_rule.dst.addr.v.tblname)); | ||
| 606 | |||
| 607 | pfr_buf_clear(p1->por_dst_tbl->pt_buf); | ||
| 608 | free(p1->por_dst_tbl->pt_buf); | ||
| 609 | p1->por_dst_tbl->pt_buf = NULL; | ||
| 610 | } | ||
| 611 | } | ||
| 612 | |||
| 613 | 92 | return (0); | |
| 614 | 92 | } | |
| 615 | |||
| 616 | |||
| 617 | /* | ||
| 618 | * Optimization pass #3: re-order rules to improve skip steps | ||
| 619 | */ | ||
| 620 | int | ||
| 621 | reorder_rules(struct pfctl *pf, struct superblock *block, int depth) | ||
| 622 | { | ||
| 623 | struct superblock *newblock; | ||
| 624 | struct pf_skip_step *skiplist; | ||
| 625 | struct pf_opt_rule *por; | ||
| 626 | int i, largest, largest_list, rule_count = 0; | ||
| 627 | 268 | TAILQ_HEAD( , pf_opt_rule) head; | |
| 628 | |||
| 629 | /* | ||
| 630 | * Calculate the best-case skip steps. We put each rule in a list | ||
| 631 | * of other rules with common fields | ||
| 632 | */ | ||
| 633 | ✓✓ | 2680 | 	for (i = 0; i < PF_SKIP_COUNT; i++) { | 
| 634 | ✓✓ | 9936 | 		TAILQ_FOREACH(por, &block->sb_rules, por_entry) { | 
| 635 | ✓✓ | 22972 | TAILQ_FOREACH(skiplist, &block->sb_skipsteps[i], | 
| 636 | 			    ps_entry) { | ||
| 637 | ✓✓ | 8276 | if (skip_compare(i, skiplist, por) == 0) | 
| 638 | break; | ||
| 639 | } | ||
| 640 | ✓✓ | 4365 | 			if (skiplist == NULL) { | 
| 641 | ✗✓ | 3210 | if ((skiplist = calloc(1, sizeof(*skiplist))) == | 
| 642 | NULL) | ||
| 643 | err(1, "calloc"); | ||
| 644 | 3210 | TAILQ_INIT(&skiplist->ps_rules); | |
| 645 | 3210 | TAILQ_INSERT_TAIL(&block->sb_skipsteps[i], | |
| 646 | skiplist, ps_entry); | ||
| 647 | 3210 | } | |
| 648 | 4365 | skip_append(block, i, skiplist, por); | |
| 649 | } | ||
| 650 | } | ||
| 651 | |||
| 652 | ✓✓ | 1104 | TAILQ_FOREACH(por, &block->sb_rules, por_entry) | 
| 653 | 485 | rule_count++; | |
| 654 | |||
| 655 | /* | ||
| 656 | * Now we're going to ignore any fields that are identical between | ||
| 657 | * all of the rules in the superblock and those fields which differ | ||
| 658 | * between every rule in the superblock. | ||
| 659 | */ | ||
| 660 | largest = 0; | ||
| 661 | ✓✓ | 2680 | 	for (i = 0; i < PF_SKIP_COUNT; i++) { | 
| 662 | 1206 | skiplist = TAILQ_FIRST(&block->sb_skipsteps[i]); | |
| 663 | ✓✓ | 1206 | 		if (skiplist->ps_count == rule_count) { | 
| 664 | 			DEBUG("(%d) original skipstep '%s' is all rules", | ||
| 665 | depth, skip_comparitors_names[i]); | ||
| 666 | 414 | skiplist->ps_count = 0; | |
| 667 | ✓✓ | 1206 | 		} else if (skiplist->ps_count == 1) { | 
| 668 | 705 | skiplist->ps_count = 0; | |
| 669 | 705 | 		} else { | |
| 670 | 			DEBUG("(%d) original skipstep '%s' largest jump is %d", | ||
| 671 | depth, skip_comparitors_names[i], | ||
| 672 | skiplist->ps_count); | ||
| 673 | ✓✓ | 87 | if (skiplist->ps_count > largest) | 
| 674 | 43 | largest = skiplist->ps_count; | |
| 675 | } | ||
| 676 | } | ||
| 677 | ✓✓ | 134 | 	if (largest == 0) { | 
| 678 | /* Ugh. There is NO commonality in the superblock on which | ||
| 679 | * optimize the skipsteps optimization. | ||
| 680 | */ | ||
| 681 | goto done; | ||
| 682 | } | ||
| 683 | |||
| 684 | /* | ||
| 685 | * Now we're going to empty the superblock rule list and re-create | ||
| 686 | * it based on a more optimal skipstep order. | ||
| 687 | */ | ||
| 688 | 38 | TAILQ_INIT(&head); | |
| 689 | ✓✓ | 596 | 	while ((por = TAILQ_FIRST(&block->sb_rules))) { | 
| 690 | ✓✓ | 780 | TAILQ_REMOVE(&block->sb_rules, por, por_entry); | 
| 691 | 260 | TAILQ_INSERT_TAIL(&head, por, por_entry); | |
| 692 | } | ||
| 693 | |||
| 694 | |||
| 695 | ✓✓ | 122 | 	while (!TAILQ_EMPTY(&head)) { | 
| 696 | largest = 1; | ||
| 697 | |||
| 698 | /* | ||
| 699 | * Find the most useful skip steps remaining | ||
| 700 | */ | ||
| 701 | ✓✓ | 1680 | 		for (i = 0; i < PF_SKIP_COUNT; i++) { | 
| 702 | 756 | skiplist = TAILQ_FIRST(&block->sb_skipsteps[i]); | |
| 703 | ✓✓ | 756 | 			if (skiplist->ps_count > largest) { | 
| 704 | largest = skiplist->ps_count; | ||
| 705 | largest_list = i; | ||
| 706 | 77 | } | |
| 707 | } | ||
| 708 | |||
| 709 | ✓✓ | 84 | 		if (largest <= 1) { | 
| 710 | /* | ||
| 711 | * Nothing useful left. Leave remaining rules in order. | ||
| 712 | */ | ||
| 713 | 			DEBUG("(%d) no more commonality for skip steps", depth); | ||
| 714 | ✓✓ | 56 | 			while ((por = TAILQ_FIRST(&head))) { | 
| 715 | ✓✓ | 49 | TAILQ_REMOVE(&head, por, por_entry); | 
| 716 | 21 | TAILQ_INSERT_TAIL(&block->sb_rules, por, | |
| 717 | por_entry); | ||
| 718 | } | ||
| 719 | 		} else { | ||
| 720 | /* | ||
| 721 | * There is commonality. Extract those common rules | ||
| 722 | * and place them in the ruleset adjacent to each | ||
| 723 | * other. | ||
| 724 | */ | ||
| 725 | 70 | skiplist = TAILQ_FIRST(&block->sb_skipsteps[ | |
| 726 | largest_list]); | ||
| 727 | 			DEBUG("(%d) skipstep '%s' largest jump is %d @ #%d", | ||
| 728 | depth, skip_comparitors_names[largest_list], | ||
| 729 | largest, TAILQ_FIRST(&TAILQ_FIRST(&block-> | ||
| 730 | sb_skipsteps [largest_list])->ps_rules)-> | ||
| 731 | por_rule.nr); | ||
| 732 | ✓✓ | 210 | TAILQ_REMOVE(&block->sb_skipsteps[largest_list], | 
| 733 | skiplist, ps_entry); | ||
| 734 | |||
| 735 | |||
| 736 | /* | ||
| 737 | * There may be further commonality inside these | ||
| 738 | * rules. So we'll split them off into they're own | ||
| 739 | * superblock and pass it back into the optimizer. | ||
| 740 | */ | ||
| 741 | ✓✓ | 70 | 			if (skiplist->ps_count > 2) { | 
| 742 | ✗✓ | 84 | if ((newblock = calloc(1, sizeof(*newblock))) | 
| 743 | 42 | 				    == NULL) { | |
| 744 | 					warn("calloc"); | ||
| 745 | return (1); | ||
| 746 | } | ||
| 747 | 42 | TAILQ_INIT(&newblock->sb_rules); | |
| 748 | ✓✓ | 840 | for (i = 0; i < PF_SKIP_COUNT; i++) | 
| 749 | 378 | TAILQ_INIT(&newblock->sb_skipsteps[i]); | |
| 750 | 42 | TAILQ_INSERT_BEFORE(block, newblock, sb_entry); | |
| 751 | 				DEBUG("(%d) splitting off %d rules from superblock @ #%d", | ||
| 752 | depth, skiplist->ps_count, | ||
| 753 | TAILQ_FIRST(&skiplist->ps_rules)-> | ||
| 754 | por_rule.nr); | ||
| 755 | 42 | 			} else { | |
| 756 | newblock = block; | ||
| 757 | } | ||
| 758 | |||
| 759 | ✓✓ | 548 | 			while ((por = TAILQ_FIRST(&skiplist->ps_rules))) { | 
| 760 | ✓✓ | 677 | TAILQ_REMOVE(&head, por, por_entry); | 
| 761 | ✓✓ | 717 | TAILQ_REMOVE(&skiplist->ps_rules, por, | 
| 762 | por_skip_entry[largest_list]); | ||
| 763 | 239 | TAILQ_INSERT_TAIL(&newblock->sb_rules, por, | |
| 764 | por_entry); | ||
| 765 | |||
| 766 | /* Remove this rule from all other skiplists */ | ||
| 767 | 239 | remove_from_skipsteps(&block->sb_skipsteps[ | |
| 768 | largest_list], block, por, skiplist); | ||
| 769 | } | ||
| 770 | 70 | free(skiplist); | |
| 771 | ✓✓ | 70 | if (newblock != block) | 
| 772 | ✓✗ | 42 | if (reorder_rules(pf, newblock, depth + 1)) | 
| 773 | return (1); | ||
| 774 | } | ||
| 775 | } | ||
| 776 | |||
| 777 | done: | ||
| 778 | ✓✓ | 2680 | 	for (i = 0; i < PF_SKIP_COUNT; i++) { | 
| 779 | ✓✓ | 7486 | 		while ((skiplist = TAILQ_FIRST(&block->sb_skipsteps[i]))) { | 
| 780 | ✓✓ | 9420 | TAILQ_REMOVE(&block->sb_skipsteps[i], skiplist, | 
| 781 | ps_entry); | ||
| 782 | 3140 | free(skiplist); | |
| 783 | } | ||
| 784 | } | ||
| 785 | |||
| 786 | 134 | return (0); | |
| 787 | 134 | } | |
| 788 | |||
| 789 | |||
| 790 | /* | ||
| 791 | * Optimization pass #4: re-order 'quick' rules based on feedback from the | ||
| 792 | * currently running ruleset | ||
| 793 | */ | ||
| 794 | int | ||
| 795 | block_feedback(struct pfctl *pf, struct superblock *block) | ||
| 796 | { | ||
| 797 | TAILQ_HEAD( , pf_opt_rule) queue; | ||
| 798 | struct pf_opt_rule *por1, *por2; | ||
| 799 | u_int64_t total_count = 0; | ||
| 800 | struct pf_rule a, b; | ||
| 801 | |||
| 802 | |||
| 803 | /* | ||
| 804 | * Walk through all of the profiled superblock's rules and copy | ||
| 805 | * the counters onto our rules. | ||
| 806 | */ | ||
| 807 | 	TAILQ_FOREACH(por1, &block->sb_profiled_block->sb_rules, por_entry) { | ||
| 808 | comparable_rule(&a, &por1->por_rule, DC); | ||
| 809 | total_count += por1->por_rule.packets[0] + | ||
| 810 | por1->por_rule.packets[1]; | ||
| 811 | 		TAILQ_FOREACH(por2, &block->sb_rules, por_entry) { | ||
| 812 | if (por2->por_profile_count) | ||
| 813 | continue; | ||
| 814 | comparable_rule(&b, &por2->por_rule, DC); | ||
| 815 | 			if (memcmp(&a, &b, sizeof(a)) == 0) { | ||
| 816 | por2->por_profile_count = | ||
| 817 | por1->por_rule.packets[0] + | ||
| 818 | por1->por_rule.packets[1]; | ||
| 819 | break; | ||
| 820 | } | ||
| 821 | } | ||
| 822 | } | ||
| 823 | superblock_free(pf, block->sb_profiled_block); | ||
| 824 | block->sb_profiled_block = NULL; | ||
| 825 | |||
| 826 | /* | ||
| 827 | * Now we pull all of the rules off the superblock and re-insert them | ||
| 828 | * in sorted order. | ||
| 829 | */ | ||
| 830 | |||
| 831 | TAILQ_INIT(&queue); | ||
| 832 | 	while ((por1 = TAILQ_FIRST(&block->sb_rules)) != NULL) { | ||
| 833 | TAILQ_REMOVE(&block->sb_rules, por1, por_entry); | ||
| 834 | TAILQ_INSERT_TAIL(&queue, por1, por_entry); | ||
| 835 | } | ||
| 836 | |||
| 837 | 	while ((por1 = TAILQ_FIRST(&queue)) != NULL) { | ||
| 838 | TAILQ_REMOVE(&queue, por1, por_entry); | ||
| 839 | /* XXX I should sort all of the unused rules based on skip steps */ | ||
| 840 | 		TAILQ_FOREACH(por2, &block->sb_rules, por_entry) { | ||
| 841 | 			if (por1->por_profile_count > por2->por_profile_count) { | ||
| 842 | TAILQ_INSERT_BEFORE(por2, por1, por_entry); | ||
| 843 | break; | ||
| 844 | } | ||
| 845 | } | ||
| 846 | if (por2 == NULL) | ||
| 847 | TAILQ_INSERT_TAIL(&block->sb_rules, por1, por_entry); | ||
| 848 | } | ||
| 849 | |||
| 850 | return (0); | ||
| 851 | } | ||
| 852 | |||
| 853 | |||
| 854 | /* | ||
| 855 | * Load the current ruleset from the kernel and try to associate them with | ||
| 856 | * the ruleset we're optimizing. | ||
| 857 | */ | ||
| 858 | int | ||
| 859 | load_feedback_profile(struct pfctl *pf, struct superblocks *superblocks) | ||
| 860 | { | ||
| 861 | struct superblock *block, *blockcur; | ||
| 862 | struct superblocks prof_superblocks; | ||
| 863 | struct pf_opt_rule *por; | ||
| 864 | struct pf_opt_queue queue; | ||
| 865 | struct pfioc_rule pr; | ||
| 866 | struct pf_rule a, b; | ||
| 867 | int nr, mnr; | ||
| 868 | |||
| 869 | TAILQ_INIT(&queue); | ||
| 870 | TAILQ_INIT(&prof_superblocks); | ||
| 871 | |||
| 872 | memset(&pr, 0, sizeof(pr)); | ||
| 873 | pr.rule.action = PF_PASS; | ||
| 874 | 	if (ioctl(pf->dev, DIOCGETRULES, &pr)) { | ||
| 875 | 		warn("DIOCGETRULES"); | ||
| 876 | return (1); | ||
| 877 | } | ||
| 878 | mnr = pr.nr; | ||
| 879 | |||
| 880 | 	DEBUG("Loading %d active rules for a feedback profile", mnr); | ||
| 881 | 	for (nr = 0; nr < mnr; ++nr) { | ||
| 882 | struct pf_ruleset *rs; | ||
| 883 | 		if ((por = calloc(1, sizeof(*por))) == NULL) { | ||
| 884 | 			warn("calloc"); | ||
| 885 | return (1); | ||
| 886 | } | ||
| 887 | pr.nr = nr; | ||
| 888 | 		if (ioctl(pf->dev, DIOCGETRULE, &pr)) { | ||
| 889 | 			warn("DIOCGETRULES"); | ||
| 890 | free(por); | ||
| 891 | return (1); | ||
| 892 | } | ||
| 893 | memcpy(&por->por_rule, &pr.rule, sizeof(por->por_rule)); | ||
| 894 | rs = pf_find_or_create_ruleset(pr.anchor_call); | ||
| 895 | por->por_rule.anchor = rs->anchor; | ||
| 896 | TAILQ_INSERT_TAIL(&queue, por, por_entry); | ||
| 897 | |||
| 898 | /* XXX pfctl_get_pool(pf->dev, &pr.rule.rpool, nr, pr.ticket, | ||
| 899 | * PF_PASS, pf->anchor) ??? | ||
| 900 | * ... pfctl_clear_pool(&pr.rule.rpool) | ||
| 901 | */ | ||
| 902 | } | ||
| 903 | |||
| 904 | if (construct_superblocks(pf, &queue, &prof_superblocks)) | ||
| 905 | return (1); | ||
| 906 | |||
| 907 | |||
| 908 | /* | ||
| 909 | * Now we try to associate the active ruleset's superblocks with | ||
| 910 | * the superblocks we're compiling. | ||
| 911 | */ | ||
| 912 | block = TAILQ_FIRST(superblocks); | ||
| 913 | blockcur = TAILQ_FIRST(&prof_superblocks); | ||
| 914 | 	while (block && blockcur) { | ||
| 915 | comparable_rule(&a, &TAILQ_FIRST(&block->sb_rules)->por_rule, | ||
| 916 | BREAK); | ||
| 917 | comparable_rule(&b, &TAILQ_FIRST(&blockcur->sb_rules)->por_rule, | ||
| 918 | BREAK); | ||
| 919 | 		if (memcmp(&a, &b, sizeof(a)) == 0) { | ||
| 920 | /* The two superblocks lined up */ | ||
| 921 | block->sb_profiled_block = blockcur; | ||
| 922 | 		} else { | ||
| 923 | 			DEBUG("superblocks don't line up between #%d and #%d", | ||
| 924 | TAILQ_FIRST(&block->sb_rules)->por_rule.nr, | ||
| 925 | TAILQ_FIRST(&blockcur->sb_rules)->por_rule.nr); | ||
| 926 | break; | ||
| 927 | } | ||
| 928 | block = TAILQ_NEXT(block, sb_entry); | ||
| 929 | blockcur = TAILQ_NEXT(blockcur, sb_entry); | ||
| 930 | } | ||
| 931 | |||
| 932 | |||
| 933 | |||
| 934 | /* Free any superblocks we couldn't link */ | ||
| 935 | 	while (blockcur) { | ||
| 936 | block = TAILQ_NEXT(blockcur, sb_entry); | ||
| 937 | superblock_free(pf, blockcur); | ||
| 938 | blockcur = block; | ||
| 939 | } | ||
| 940 | return (0); | ||
| 941 | } | ||
| 942 | |||
| 943 | |||
| 944 | /* | ||
| 945 | * Compare a rule to a skiplist to see if the rule is a member | ||
| 946 | */ | ||
| 947 | int | ||
| 948 | skip_compare(int skipnum, struct pf_skip_step *skiplist, | ||
| 949 | struct pf_opt_rule *por) | ||
| 950 | { | ||
| 951 | struct pf_rule *a, *b; | ||
| 952 | ✗✓ | 16552 | if (skipnum >= PF_SKIP_COUNT || skipnum < 0) | 
| 953 | errx(1, "skip_compare() out of bounds"); | ||
| 954 | 8276 | a = &por->por_rule; | |
| 955 | 8276 | b = &TAILQ_FIRST(&skiplist->ps_rules)->por_rule; | |
| 956 | |||
| 957 | 8276 | return ((skip_comparitors[skipnum])(a, b)); | |
| 958 | } | ||
| 959 | |||
| 960 | |||
| 961 | /* | ||
| 962 | * Add a rule to a skiplist | ||
| 963 | */ | ||
| 964 | void | ||
| 965 | skip_append(struct superblock *superblock, int skipnum, | ||
| 966 | struct pf_skip_step *skiplist, struct pf_opt_rule *por) | ||
| 967 | { | ||
| 968 | struct pf_skip_step *prev; | ||
| 969 | |||
| 970 | 8730 | skiplist->ps_count++; | |
| 971 | 4365 | TAILQ_INSERT_TAIL(&skiplist->ps_rules, por, por_skip_entry[skipnum]); | |
| 972 | |||
| 973 | /* Keep the list of skiplists sorted by whichever is larger */ | ||
| 974 | ✓✓✓✓ | 11128 | while ((prev = TAILQ_PREV(skiplist, skiplist, ps_entry)) && | 
| 975 | 2260 | 	    prev->ps_count < skiplist->ps_count) { | |
| 976 | ✓✓ | 207 | TAILQ_REMOVE(&superblock->sb_skipsteps[skipnum], | 
| 977 | skiplist, ps_entry); | ||
| 978 | 69 | TAILQ_INSERT_BEFORE(prev, skiplist, ps_entry); | |
| 979 | } | ||
| 980 | 4365 | } | |
| 981 | |||
| 982 | |||
| 983 | /* | ||
| 984 | * Remove a rule from the other skiplist calculations. | ||
| 985 | */ | ||
| 986 | void | ||
| 987 | remove_from_skipsteps(struct skiplist *head, struct superblock *block, | ||
| 988 | struct pf_opt_rule *por, struct pf_skip_step *active_list) | ||
| 989 | { | ||
| 990 | struct pf_skip_step *sk, *next; | ||
| 991 | struct pf_opt_rule *p2; | ||
| 992 | int i, found; | ||
| 993 | |||
| 994 | ✓✓ | 5019 | 	for (i = 0; i < PF_SKIP_COUNT; i++) { | 
| 995 | 2151 | sk = TAILQ_FIRST(&block->sb_skipsteps[i]); | |
| 996 | ✓✓✓✗ ✓✓ | 6313 | if (sk == NULL || sk == active_list || sk->ps_count <= 1) | 
| 997 | continue; | ||
| 998 | found = 0; | ||
| 999 | 434 | 		do { | |
| 1000 | ✓✓ | 4848 | TAILQ_FOREACH(p2, &sk->ps_rules, por_skip_entry[i]) | 
| 1001 | ✓✓ | 1907 | 				if (p2 == por) { | 
| 1002 | ✓✓ | 945 | TAILQ_REMOVE(&sk->ps_rules, p2, | 
| 1003 | por_skip_entry[i]); | ||
| 1004 | found = 1; | ||
| 1005 | 315 | sk->ps_count--; | |
| 1006 | 315 | break; | |
| 1007 | } | ||
| 1008 | ✓✓✓✓ | 1349 | } while (!found && (sk = TAILQ_NEXT(sk, ps_entry))); | 
| 1009 | ✓✓ | 434 | 		if (found && sk) { | 
| 1010 | /* Does this change the sorting order? */ | ||
| 1011 | ✓✓✓✓ | 1264 | while ((next = TAILQ_NEXT(sk, ps_entry)) && | 
| 1012 | 473 | 			    next->ps_count > sk->ps_count) { | |
| 1013 | ✓✗ | 714 | TAILQ_REMOVE(head, sk, ps_entry); | 
| 1014 | ✓✓ | 714 | TAILQ_INSERT_AFTER(head, next, sk, ps_entry); | 
| 1015 | } | ||
| 1016 | #ifdef OPT_DEBUG | ||
| 1017 | next = TAILQ_NEXT(sk, ps_entry); | ||
| 1018 | assert(next == NULL || next->ps_count <= sk->ps_count); | ||
| 1019 | #endif /* OPT_DEBUG */ | ||
| 1020 | } | ||
| 1021 | } | ||
| 1022 | 239 | } | |
| 1023 | |||
| 1024 | |||
| 1025 | /* Compare two rules AF field for skiplist construction */ | ||
| 1026 | int | ||
| 1027 | skip_cmp_af(struct pf_rule *a, struct pf_rule *b) | ||
| 1028 | { | ||
| 1029 | ✓✓✓✓ | 1354 | if (a->af != b->af || a->af == 0) | 
| 1030 | 239 | return (1); | |
| 1031 | 240 | return (0); | |
| 1032 | 479 | } | |
| 1033 | |||
| 1034 | /* Compare two rules DIRECTION field for skiplist construction */ | ||
| 1035 | int | ||
| 1036 | skip_cmp_dir(struct pf_rule *a, struct pf_rule *b) | ||
| 1037 | { | ||
| 1038 | ✓✓✓✓ | 1708 | if (a->direction == 0 || a->direction != b->direction) | 
| 1039 | 438 | return (1); | |
| 1040 | 264 | return (0); | |
| 1041 | 702 | } | |
| 1042 | |||
| 1043 | /* Compare two rules ON RDOMAIN field for skiplist construction */ | ||
| 1044 | int | ||
| 1045 | skip_cmp_rdom(struct pf_rule *a, struct pf_rule *b) | ||
| 1046 | { | ||
| 1047 | ✗✓✗✗ | 2724 | if (a->onrdomain == -1 || a->onrdomain != b->onrdomain) | 
| 1048 | 1362 | return (1); | |
| 1049 | return (a->ifnot != b->ifnot); | ||
| 1050 | 1362 | } | |
| 1051 | |||
| 1052 | /* Compare two rules DST Address field for skiplist construction */ | ||
| 1053 | int | ||
| 1054 | skip_cmp_dst_addr(struct pf_rule *a, struct pf_rule *b) | ||
| 1055 | { | ||
| 1056 | ✓✓✓✓ | 2220 | if (a->dst.neg != b->dst.neg || | 
| 1057 | 724 | a->dst.addr.type != b->dst.addr.type) | |
| 1058 | 69 | return (1); | |
| 1059 | /* XXX if (a->proto != b->proto && a->proto != 0 && b->proto != 0 | ||
| 1060 | * && (a->proto == IPPROTO_TCP || a->proto == IPPROTO_UDP || | ||
| 1061 | * a->proto == IPPROTO_ICMP | ||
| 1062 | * return (1); | ||
| 1063 | */ | ||
| 1064 | ✓✗✗✗ ✓✗ | 679 | 	switch (a->dst.addr.type) { | 
| 1065 | case PF_ADDR_ADDRMASK: | ||
| 1066 | ✓✓ | 1012 | if (memcmp(&a->dst.addr.v.a.addr, &b->dst.addr.v.a.addr, | 
| 1067 | ✓✓ | 660 | sizeof(a->dst.addr.v.a.addr)) || | 
| 1068 | 457 | memcmp(&a->dst.addr.v.a.mask, &b->dst.addr.v.a.mask, | |
| 1069 | ✓✓ | 457 | sizeof(a->dst.addr.v.a.mask)) || | 
| 1070 | ✓✓ | 455 | (a->dst.addr.v.a.addr.addr32[0] == 0 && | 
| 1071 | ✓✗ | 352 | a->dst.addr.v.a.addr.addr32[1] == 0 && | 
| 1072 | ✓✗ | 352 | a->dst.addr.v.a.addr.addr32[2] == 0 && | 
| 1073 | 352 | a->dst.addr.v.a.addr.addr32[3] == 0)) | |
| 1074 | 545 | return (1); | |
| 1075 | 115 | return (0); | |
| 1076 | case PF_ADDR_DYNIFTL: | ||
| 1077 | if (strcmp(a->dst.addr.v.ifname, b->dst.addr.v.ifname) != 0 || | ||
| 1078 | a->dst.addr.iflags != b->dst.addr.iflags || | ||
| 1079 | memcmp(&a->dst.addr.v.a.mask, &b->dst.addr.v.a.mask, | ||
| 1080 | sizeof(a->dst.addr.v.a.mask))) | ||
| 1081 | return (1); | ||
| 1082 | return (0); | ||
| 1083 | case PF_ADDR_NOROUTE: | ||
| 1084 | case PF_ADDR_URPFFAILED: | ||
| 1085 | return (0); | ||
| 1086 | case PF_ADDR_TABLE: | ||
| 1087 | 19 | return (strcmp(a->dst.addr.v.tblname, b->dst.addr.v.tblname)); | |
| 1088 | } | ||
| 1089 | return (1); | ||
| 1090 | 748 | } | |
| 1091 | |||
| 1092 | /* Compare two rules DST port field for skiplist construction */ | ||
| 1093 | int | ||
| 1094 | skip_cmp_dst_port(struct pf_rule *a, struct pf_rule *b) | ||
| 1095 | { | ||
| 1096 | /* XXX if (a->proto != b->proto && a->proto != 0 && b->proto != 0 | ||
| 1097 | * && (a->proto == IPPROTO_TCP || a->proto == IPPROTO_UDP || | ||
| 1098 | * a->proto == IPPROTO_ICMP | ||
| 1099 | * return (1); | ||
| 1100 | */ | ||
| 1101 | ✓✓✓✓ ✗✓ | 2325 | if (a->dst.port_op == PF_OP_NONE || a->dst.port_op != b->dst.port_op || | 
| 1102 | ✓✓ | 160 | a->dst.port[0] != b->dst.port[0] || | 
| 1103 | 80 | a->dst.port[1] != b->dst.port[1]) | |
| 1104 | 936 | return (1); | |
| 1105 | 80 | return (0); | |
| 1106 | 1016 | } | |
| 1107 | |||
| 1108 | /* Compare two rules IFP field for skiplist construction */ | ||
| 1109 | int | ||
| 1110 | skip_cmp_ifp(struct pf_rule *a, struct pf_rule *b) | ||
| 1111 | { | ||
| 1112 | ✓✓✓✓ | 2729 | if (strcmp(a->ifname, b->ifname) || a->ifname[0] == '\0') | 
| 1113 | 793 | return (1); | |
| 1114 | 143 | return (a->ifnot != b->ifnot); | |
| 1115 | 936 | } | |
| 1116 | |||
| 1117 | /* Compare two rules PROTO field for skiplist construction */ | ||
| 1118 | int | ||
| 1119 | skip_cmp_proto(struct pf_rule *a, struct pf_rule *b) | ||
| 1120 | { | ||
| 1121 | ✓✓ | 3114 | return (a->proto != b->proto || a->proto == 0); | 
| 1122 | } | ||
| 1123 | |||
| 1124 | /* Compare two rules SRC addr field for skiplist construction */ | ||
| 1125 | int | ||
| 1126 | skip_cmp_src_addr(struct pf_rule *a, struct pf_rule *b) | ||
| 1127 | { | ||
| 1128 | ✓✓✓✓ | 2629 | if (a->src.neg != b->src.neg || | 
| 1129 | 859 | a->src.addr.type != b->src.addr.type) | |
| 1130 | 101 | return (1); | |
| 1131 | /* XXX if (a->proto != b->proto && a->proto != 0 && b->proto != 0 | ||
| 1132 | * && (a->proto == IPPROTO_TCP || a->proto == IPPROTO_UDP || | ||
| 1133 | * a->proto == IPPROTO_ICMP | ||
| 1134 | * return (1); | ||
| 1135 | */ | ||
| 1136 | ✓✓✗✓ ✓✗ | 784 | 	switch (a->src.addr.type) { | 
| 1137 | case PF_ADDR_ADDRMASK: | ||
| 1138 | ✓✗ | 1220 | if (memcmp(&a->src.addr.v.a.addr, &b->src.addr.v.a.addr, | 
| 1139 | ✓✓ | 725 | sizeof(a->src.addr.v.a.addr)) || | 
| 1140 | 557 | memcmp(&a->src.addr.v.a.mask, &b->src.addr.v.a.mask, | |
| 1141 | ✓✗ | 557 | sizeof(a->src.addr.v.a.mask)) || | 
| 1142 | ✓✓ | 557 | (a->src.addr.v.a.addr.addr32[0] == 0 && | 
| 1143 | ✓✗ | 495 | a->src.addr.v.a.addr.addr32[1] == 0 && | 
| 1144 | ✓✗ | 495 | a->src.addr.v.a.addr.addr32[2] == 0 && | 
| 1145 | 495 | a->src.addr.v.a.addr.addr32[3] == 0)) | |
| 1146 | 663 | return (1); | |
| 1147 | 62 | return (0); | |
| 1148 | case PF_ADDR_DYNIFTL: | ||
| 1149 | ✓✗✓✓ | 32 | if (strcmp(a->src.addr.v.ifname, b->src.addr.v.ifname) != 0 || | 
| 1150 | ✓✗ | 16 | a->src.addr.iflags != b->src.addr.iflags || | 
| 1151 | 16 | memcmp(&a->src.addr.v.a.mask, &b->src.addr.v.a.mask, | |
| 1152 | sizeof(a->src.addr.v.a.mask))) | ||
| 1153 | 4 | return (1); | |
| 1154 | 12 | return (0); | |
| 1155 | case PF_ADDR_NOROUTE: | ||
| 1156 | case PF_ADDR_URPFFAILED: | ||
| 1157 | 18 | return (0); | |
| 1158 | case PF_ADDR_TABLE: | ||
| 1159 | 25 | return (strcmp(a->src.addr.v.tblname, b->src.addr.v.tblname)); | |
| 1160 | } | ||
| 1161 | return (1); | ||
| 1162 | 885 | } | |
| 1163 | |||
| 1164 | /* Compare two rules SRC port field for skiplist construction */ | ||
| 1165 | int | ||
| 1166 | skip_cmp_src_port(struct pf_rule *a, struct pf_rule *b) | ||
| 1167 | { | ||
| 1168 | ✓✓✓✓ ✗✓ | 2759 | if (a->src.port_op == PF_OP_NONE || a->src.port_op != b->src.port_op || | 
| 1169 | ✓✓ | 34 | a->src.port[0] != b->src.port[0] || | 
| 1170 | 5 | a->src.port[1] != b->src.port[1]) | |
| 1171 | 1345 | return (1); | |
| 1172 | /* XXX if (a->proto != b->proto && a->proto != 0 && b->proto != 0 | ||
| 1173 | * && (a->proto == IPPROTO_TCP || a->proto == IPPROTO_UDP || | ||
| 1174 | * a->proto == IPPROTO_ICMP | ||
| 1175 | * return (1); | ||
| 1176 | */ | ||
| 1177 | 5 | return (0); | |
| 1178 | 1350 | } | |
| 1179 | |||
| 1180 | |||
| 1181 | void | ||
| 1182 | skip_init(void) | ||
| 1183 | { | ||
| 1184 | 252 | 	struct { | |
| 1185 | char *name; | ||
| 1186 | int skipnum; | ||
| 1187 | int (*func)(struct pf_rule *, struct pf_rule *); | ||
| 1188 | 126 | } comps[] = PF_SKIP_COMPARITORS; | |
| 1189 | int skipnum, i; | ||
| 1190 | |||
| 1191 | ✓✓ | 2520 | 	for (skipnum = 0; skipnum < PF_SKIP_COUNT; skipnum++) { | 
| 1192 | ✓✓ | 22680 | for (i = 0; i < sizeof(comps)/sizeof(*comps); i++) | 
| 1193 | ✓✓ | 10206 | 			if (comps[i].skipnum == skipnum) { | 
| 1194 | 1134 | skip_comparitors[skipnum] = comps[i].func; | |
| 1195 | 1134 | skip_comparitors_names[skipnum] = comps[i].name; | |
| 1196 | 1134 | } | |
| 1197 | } | ||
| 1198 | ✓✓ | 2520 | for (skipnum = 0; skipnum < PF_SKIP_COUNT; skipnum++) | 
| 1199 | ✗✓ | 1134 | if (skip_comparitors[skipnum] == NULL) | 
| 1200 | errx(1, "Need to add skip step comparitor to pfctl?!"); | ||
| 1201 | 126 | } | |
| 1202 | |||
| 1203 | /* | ||
| 1204 | * Add a host/netmask to a table | ||
| 1205 | */ | ||
| 1206 | int | ||
| 1207 | add_opt_table(struct pfctl *pf, struct pf_opt_tbl **tbl, sa_family_t af, | ||
| 1208 | struct pf_rule_addr *addr, char *ifname) | ||
| 1209 | { | ||
| 1210 | #ifdef OPT_DEBUG | ||
| 1211 | char buf[128]; | ||
| 1212 | #endif /* OPT_DEBUG */ | ||
| 1213 | static int tablenum = 0; | ||
| 1214 | 278 | struct node_host node_host; | |
| 1215 | |||
| 1216 | ✓✓ | 139 | 	if (*tbl == NULL) { | 
| 1217 | ✓✗✗✓ | 126 | if ((*tbl = calloc(1, sizeof(**tbl))) == NULL || | 
| 1218 | 63 | ((*tbl)->pt_buf = calloc(1, sizeof(*(*tbl)->pt_buf))) == | |
| 1219 | NULL) | ||
| 1220 | err(1, "calloc"); | ||
| 1221 | 63 | (*tbl)->pt_buf->pfrb_type = PFRB_ADDRS; | |
| 1222 | 63 | SIMPLEQ_INIT(&(*tbl)->pt_nodes); | |
| 1223 | |||
| 1224 | /* This is just a temporary table name */ | ||
| 1225 | 126 | snprintf((*tbl)->pt_name, sizeof((*tbl)->pt_name), "%s%d", | |
| 1226 | 63 | PF_OPT_TABLE_PREFIX, tablenum++); | |
| 1227 | 		DEBUG("creating table <%s>", (*tbl)->pt_name); | ||
| 1228 | 63 | } | |
| 1229 | |||
| 1230 | 139 | memset(&node_host, 0, sizeof(node_host)); | |
| 1231 | 139 | node_host.af = af; | |
| 1232 | 139 | node_host.addr = addr->addr; | |
| 1233 | 139 | node_host.ifname = ifname; | |
| 1234 | 139 | node_host.weight = addr->weight; | |
| 1235 | |||
| 1236 | #ifdef OPT_DEBUG | ||
| 1237 | 	DEBUG("<%s> adding %s/%d", (*tbl)->pt_name, inet_ntop(af, | ||
| 1238 | &node_host.addr.v.a.addr, buf, sizeof(buf)), | ||
| 1239 | unmask(&node_host.addr.v.a.mask, af)); | ||
| 1240 | #endif /* OPT_DEBUG */ | ||
| 1241 | |||
| 1242 | ✗✓ | 139 | 	if (append_addr_host((*tbl)->pt_buf, &node_host, 0, 0)) { | 
| 1243 | 		warn("failed to add host"); | ||
| 1244 | return (1); | ||
| 1245 | } | ||
| 1246 | ✓✓ | 139 | 	if (pf->opts & PF_OPT_VERBOSE) { | 
| 1247 | struct node_tinit *ti; | ||
| 1248 | |||
| 1249 | ✗✓ | 26 | if ((ti = calloc(1, sizeof(*ti))) == NULL) | 
| 1250 | err(1, "malloc"); | ||
| 1251 | ✗✓ | 26 | if ((ti->host = malloc(sizeof(*ti->host))) == NULL) | 
| 1252 | err(1, "malloc"); | ||
| 1253 | 26 | memcpy(ti->host, &node_host, sizeof(*ti->host)); | |
| 1254 | 26 | SIMPLEQ_INSERT_TAIL(&(*tbl)->pt_nodes, ti, entries); | |
| 1255 | 26 | } | |
| 1256 | |||
| 1257 | 139 | (*tbl)->pt_rulecount++; | |
| 1258 | 139 | if ((*tbl)->pt_rulecount == TABLE_THRESHOLD) | |
| 1259 | 		DEBUG("table <%s> now faster than skip steps", (*tbl)->pt_name); | ||
| 1260 | |||
| 1261 | 139 | return (0); | |
| 1262 | 139 | } | |
| 1263 | |||
| 1264 | |||
| 1265 | /* | ||
| 1266 | * Do the dirty work of choosing an unused table name and creating it. | ||
| 1267 | * (be careful with the table name, it might already be used in another anchor) | ||
| 1268 | */ | ||
| 1269 | int | ||
| 1270 | pf_opt_create_table(struct pfctl *pf, struct pf_opt_tbl *tbl) | ||
| 1271 | { | ||
| 1272 | static int tablenum; | ||
| 1273 | struct pfr_table *t; | ||
| 1274 | |||
| 1275 | ✓✓ | 44 | 	if (table_buffer.pfrb_type == 0) { | 
| 1276 | /* Initialize the list of tables */ | ||
| 1277 | 6 | table_buffer.pfrb_type = PFRB_TABLES; | |
| 1278 | 6 | 		for (;;) { | |
| 1279 | 6 | pfr_buf_grow(&table_buffer, table_buffer.pfrb_size); | |
| 1280 | 6 | table_buffer.pfrb_size = table_buffer.pfrb_msize; | |
| 1281 | ✗✓ | 6 | if (pfr_get_tables(NULL, table_buffer.pfrb_caddr, | 
| 1282 | &table_buffer.pfrb_size, PFR_FLAG_ALLRSETS)) | ||
| 1283 | err(1, "pfr_get_tables"); | ||
| 1284 | ✗✓ | 6 | if (table_buffer.pfrb_size <= table_buffer.pfrb_msize) | 
| 1285 | break; | ||
| 1286 | } | ||
| 1287 | 6 | table_identifier = arc4random(); | |
| 1288 | } | ||
| 1289 | |||
| 1290 | /* XXX would be *really* nice to avoid duplicating identical tables */ | ||
| 1291 | |||
| 1292 | /* Now we have to pick a table name that isn't used */ | ||
| 1293 | again: | ||
| 1294 | 	DEBUG("translating temporary table <%s> to <%s%x_%d>", tbl->pt_name, | ||
| 1295 | PF_OPT_TABLE_PREFIX, table_identifier, tablenum); | ||
| 1296 | 50 | snprintf(tbl->pt_name, sizeof(tbl->pt_name), "%s%x_%d", | |
| 1297 | 22 | PF_OPT_TABLE_PREFIX, table_identifier, tablenum); | |
| 1298 | ✓✓ | 132 | 	PFRB_FOREACH(t, &table_buffer) { | 
| 1299 | ✗✓ | 44 | 		if (strcasecmp(t->pfrt_name, tbl->pt_name) == 0) { | 
| 1300 | /* Collision. Try again */ | ||
| 1301 | 			DEBUG("wow, table <%s> in use.  trying again", | ||
| 1302 | tbl->pt_name); | ||
| 1303 | table_identifier = arc4random(); | ||
| 1304 | goto again; | ||
| 1305 | } | ||
| 1306 | } | ||
| 1307 | 22 | tablenum++; | |
| 1308 | |||
| 1309 | ✗✓ | 44 | if (pfctl_define_table(tbl->pt_name, PFR_TFLAG_CONST | tbl->pt_flags, 1, | 
| 1310 | 22 | 	    pf->astack[0]->name, tbl->pt_buf, pf->astack[0]->ruleset.tticket)) { | |
| 1311 | 		warn("failed to create table %s in %s", | ||
| 1312 | tbl->pt_name, pf->astack[0]->name); | ||
| 1313 | return (1); | ||
| 1314 | } | ||
| 1315 | 22 | return (0); | |
| 1316 | 22 | } | |
| 1317 | |||
| 1318 | /* | ||
| 1319 | * Partition the flat ruleset into a list of distinct superblocks | ||
| 1320 | */ | ||
| 1321 | int | ||
| 1322 | construct_superblocks(struct pfctl *pf, struct pf_opt_queue *opt_queue, | ||
| 1323 | struct superblocks *superblocks) | ||
| 1324 | { | ||
| 1325 | struct superblock *block = NULL; | ||
| 1326 | struct pf_opt_rule *por; | ||
| 1327 | int i; | ||
| 1328 | |||
| 1329 | ✓✓ | 2098 | 	while (!TAILQ_EMPTY(opt_queue)) { | 
| 1330 | por = TAILQ_FIRST(opt_queue); | ||
| 1331 | ✓✓ | 2580 | TAILQ_REMOVE(opt_queue, por, por_entry); | 
| 1332 | ✓✓✓✓ | 1600 | 		if (block == NULL || !superblock_inclusive(block, por)) { | 
| 1333 | ✗✓ | 480 | 			if ((block = calloc(1, sizeof(*block))) == NULL) { | 
| 1334 | 				warn("calloc"); | ||
| 1335 | return (1); | ||
| 1336 | } | ||
| 1337 | 480 | TAILQ_INIT(&block->sb_rules); | |
| 1338 | ✓✓ | 9600 | for (i = 0; i < PF_SKIP_COUNT; i++) | 
| 1339 | 4320 | TAILQ_INIT(&block->sb_skipsteps[i]); | |
| 1340 | 480 | TAILQ_INSERT_TAIL(superblocks, block, sb_entry); | |
| 1341 | 480 | } | |
| 1342 | 860 | TAILQ_INSERT_TAIL(&block->sb_rules, por, por_entry); | |
| 1343 | } | ||
| 1344 | |||
| 1345 | 126 | return (0); | |
| 1346 | 126 | } | |
| 1347 | |||
| 1348 | |||
| 1349 | /* | ||
| 1350 | * Compare two rule addresses | ||
| 1351 | */ | ||
| 1352 | int | ||
| 1353 | addrs_equal(struct pf_rule_addr *a, struct pf_rule_addr *b) | ||
| 1354 | { | ||
| 1355 | ✓✓ | 3816 | if (a->neg != b->neg) | 
| 1356 | 46 | return (0); | |
| 1357 | 1862 | return (memcmp(&a->addr, &b->addr, sizeof(a->addr)) == 0); | |
| 1358 | 1908 | } | |
| 1359 | |||
| 1360 | |||
| 1361 | /* | ||
| 1362 | * The addresses are not equal, but can we combine them into one table? | ||
| 1363 | */ | ||
| 1364 | int | ||
| 1365 | addrs_combineable(struct pf_rule_addr *a, struct pf_rule_addr *b) | ||
| 1366 | { | ||
| 1367 | ✓✓✗✓ | 155 | if (a->addr.type != PF_ADDR_ADDRMASK || | 
| 1368 | 41 | b->addr.type != PF_ADDR_ADDRMASK) | |
| 1369 | 16 | return (0); | |
| 1370 | ✓✗✓✗ ✗✓ | 123 | if (a->neg != b->neg || a->port_op != b->port_op || | 
| 1371 | ✓✗ | 82 | a->port[0] != b->port[0] || a->port[1] != b->port[1]) | 
| 1372 | return (0); | ||
| 1373 | 41 | return (1); | |
| 1374 | 57 | } | |
| 1375 | |||
| 1376 | |||
| 1377 | /* | ||
| 1378 | * Are we allowed to combine these two rules | ||
| 1379 | */ | ||
| 1380 | int | ||
| 1381 | rules_combineable(struct pf_rule *p1, struct pf_rule *p2) | ||
| 1382 | { | ||
| 1383 | 526 | struct pf_rule a, b; | |
| 1384 | |||
| 1385 | 263 | comparable_rule(&a, p1, COMBINED); | |
| 1386 | 263 | comparable_rule(&b, p2, COMBINED); | |
| 1387 | 526 | return (memcmp(&a, &b, sizeof(a)) == 0); | |
| 1388 | 263 | } | |
| 1389 | |||
| 1390 | |||
| 1391 | /* | ||
| 1392 | * Can a rule be included inside a superblock | ||
| 1393 | */ | ||
| 1394 | int | ||
| 1395 | superblock_inclusive(struct superblock *block, struct pf_opt_rule *por) | ||
| 1396 | { | ||
| 1397 | 1480 | struct pf_rule a, b; | |
| 1398 | int i, j; | ||
| 1399 | |||
| 1400 | /* First check for hard breaks */ | ||
| 1401 | ✓✓ | 97162 | 	for (i = 0; i < sizeof(pf_rule_desc)/sizeof(*pf_rule_desc); i++) { | 
| 1402 | ✓✓ | 47936 | 		if (pf_rule_desc[i].prf_type == BARRIER) { | 
| 1403 | ✓✓ | 143376 | for (j = 0; j < pf_rule_desc[i].prf_size; j++) | 
| 1404 | ✓✓ | 199251 | if (((char *)&por->por_rule)[j + | 
| 1405 | 132834 | pf_rule_desc[i].prf_offset] != 0) | |
| 1406 | 95 | return (0); | |
| 1407 | } | ||
| 1408 | } | ||
| 1409 | |||
| 1410 | /* per-rule src-track is also a hard break */ | ||
| 1411 | ✗✓ | 645 | if (por->por_rule.rule_flag & PFRULE_RULESRCTRACK) | 
| 1412 | return (0); | ||
| 1413 | |||
| 1414 | /* | ||
| 1415 | * Have to handle interface groups separately. Consider the following | ||
| 1416 | * rules: | ||
| 1417 | * block on EXTIFS to any port 22 | ||
| 1418 | * pass on em0 to any port 22 | ||
| 1419 | * (where EXTIFS is an arbitrary interface group) | ||
| 1420 | * The optimizer may decide to re-order the pass rule in front of the | ||
| 1421 | * block rule. But what if EXTIFS includes em0??? Such a reordering | ||
| 1422 | * would change the meaning of the ruleset. | ||
| 1423 | * We can't just lookup the EXTIFS group and check if em0 is a member | ||
| 1424 | * because the user is allowed to add interfaces to a group during | ||
| 1425 | * runtime. | ||
| 1426 | * Ergo interface groups become a defacto superblock break :-( | ||
| 1427 | */ | ||
| 1428 | ✓✓✗✓ | 1286 | if (interface_group(por->por_rule.ifname) || | 
| 1429 | 641 | 	    interface_group(TAILQ_FIRST(&block->sb_rules)->por_rule.ifname)) { | |
| 1430 | ✓✓ | 8 | if (strcasecmp(por->por_rule.ifname, | 
| 1431 | 8 | TAILQ_FIRST(&block->sb_rules)->por_rule.ifname) != 0) | |
| 1432 | 2 | return (0); | |
| 1433 | } | ||
| 1434 | |||
| 1435 | 643 | comparable_rule(&a, &TAILQ_FIRST(&block->sb_rules)->por_rule, NOMERGE); | |
| 1436 | 643 | comparable_rule(&b, &por->por_rule, NOMERGE); | |
| 1437 | ✓✓ | 643 | if (memcmp(&a, &b, sizeof(a)) == 0) | 
| 1438 | 380 | return (1); | |
| 1439 | |||
| 1440 | #ifdef OPT_DEBUG | ||
| 1441 | 	for (i = 0; i < sizeof(por->por_rule); i++) { | ||
| 1442 | int closest = -1; | ||
| 1443 | 		if (((u_int8_t *)&a)[i] != ((u_int8_t *)&b)[i]) { | ||
| 1444 | for (j = 0; j < sizeof(pf_rule_desc) / | ||
| 1445 | 			    sizeof(*pf_rule_desc); j++) { | ||
| 1446 | if (i >= pf_rule_desc[j].prf_offset && | ||
| 1447 | i < pf_rule_desc[j].prf_offset + | ||
| 1448 | 				    pf_rule_desc[j].prf_size) { | ||
| 1449 | 					DEBUG("superblock break @ %d due to %s", | ||
| 1450 | por->por_rule.nr, | ||
| 1451 | pf_rule_desc[j].prf_name); | ||
| 1452 | return (0); | ||
| 1453 | } | ||
| 1454 | 				if (i > pf_rule_desc[j].prf_offset) { | ||
| 1455 | if (closest == -1 || | ||
| 1456 | i-pf_rule_desc[j].prf_offset < | ||
| 1457 | i-pf_rule_desc[closest].prf_offset) | ||
| 1458 | closest = j; | ||
| 1459 | } | ||
| 1460 | } | ||
| 1461 | |||
| 1462 | if (closest >= 0) | ||
| 1463 | 				DEBUG("superblock break @ %d on %s+%lxh", | ||
| 1464 | por->por_rule.nr, | ||
| 1465 | pf_rule_desc[closest].prf_name, | ||
| 1466 | i - pf_rule_desc[closest].prf_offset - | ||
| 1467 | pf_rule_desc[closest].prf_size); | ||
| 1468 | else | ||
| 1469 | 				DEBUG("superblock break @ %d on field @ %d", | ||
| 1470 | por->por_rule.nr, i); | ||
| 1471 | return (0); | ||
| 1472 | } | ||
| 1473 | } | ||
| 1474 | #endif /* OPT_DEBUG */ | ||
| 1475 | |||
| 1476 | 263 | return (0); | |
| 1477 | 740 | } | |
| 1478 | |||
| 1479 | |||
| 1480 | /* | ||
| 1481 | * Figure out if an interface name is an actual interface or actually a | ||
| 1482 | * group of interfaces. | ||
| 1483 | */ | ||
| 1484 | int | ||
| 1485 | interface_group(const char *ifname) | ||
| 1486 | { | ||
| 1487 | ✓✗✓✓ | 3858 | if (ifname == NULL || !ifname[0]) | 
| 1488 | 550 | return (0); | |
| 1489 | |||
| 1490 | /* Real interfaces must end in a number, interface groups do not */ | ||
| 1491 | ✓✓ | 736 | if (isdigit((unsigned char)ifname[strlen(ifname) - 1])) | 
| 1492 | 732 | return (0); | |
| 1493 | else | ||
| 1494 | 4 | return (1); | |
| 1495 | 1286 | } | |
| 1496 | |||
| 1497 | |||
| 1498 | /* | ||
| 1499 | * Make a rule that can directly compared by memcmp() | ||
| 1500 | */ | ||
| 1501 | void | ||
| 1502 | comparable_rule(struct pf_rule *dst, const struct pf_rule *src, int type) | ||
| 1503 | { | ||
| 1504 | int i; | ||
| 1505 | /* | ||
| 1506 | * To simplify the comparison, we just zero out the fields that are | ||
| 1507 | * allowed to be different and then do a simple memcmp() | ||
| 1508 | */ | ||
| 1509 | 8788 | memcpy(dst, src, sizeof(*dst)); | |
| 1510 | ✓✓ | 659100 | for (i = 0; i < sizeof(pf_rule_desc)/sizeof(*pf_rule_desc); i++) | 
| 1511 | ✓✓ | 325156 | 		if (pf_rule_desc[i].prf_type >= type) { | 
| 1512 | #ifdef OPT_DEBUG | ||
| 1513 | assert(pf_rule_desc[i].prf_type != NEVER || | ||
| 1514 | *(((char *)dst) + pf_rule_desc[i].prf_offset) == 0); | ||
| 1515 | #endif /* OPT_DEBUG */ | ||
| 1516 | 249880 | memset(((char *)dst) + pf_rule_desc[i].prf_offset, 0, | |
| 1517 | 124940 | pf_rule_desc[i].prf_size); | |
| 1518 | 124940 | } | |
| 1519 | 4394 | } | |
| 1520 | |||
| 1521 | |||
| 1522 | /* | ||
| 1523 | * Remove superset information from two rules so we can directly compare them | ||
| 1524 | * with memcmp() | ||
| 1525 | */ | ||
| 1526 | void | ||
| 1527 | exclude_supersets(struct pf_rule *super, struct pf_rule *sub) | ||
| 1528 | { | ||
| 1529 | ✓✓ | 5164 | if (super->ifname[0] == '\0') | 
| 1530 | 1752 | memset(sub->ifname, 0, sizeof(sub->ifname)); | |
| 1531 | ✓✓ | 2582 | if (super->direction == PF_INOUT) | 
| 1532 | 855 | sub->direction = PF_INOUT; | |
| 1533 | ✓✓✓✓ ✗✓ | 4831 | if ((super->proto == 0 || super->proto == sub->proto) && | 
| 1534 | ✓✓✓✓ ✓✓ | 3808 | super->flags == 0 && super->flagset == 0 && (sub->flags || | 
| 1535 | 899 | 	    sub->flagset)) { | |
| 1536 | 3 | sub->flags = super->flags; | |
| 1537 | 3 | sub->flagset = super->flagset; | |
| 1538 | 3 | } | |
| 1539 | ✓✓ | 2582 | if (super->proto == 0) | 
| 1540 | 1232 | sub->proto = 0; | |
| 1541 | |||
| 1542 | ✓✓ | 2582 | 	if (super->src.port_op == 0) { | 
| 1543 | 2393 | sub->src.port_op = 0; | |
| 1544 | 2393 | sub->src.port[0] = 0; | |
| 1545 | 2393 | sub->src.port[1] = 0; | |
| 1546 | 2393 | } | |
| 1547 | ✓✓ | 2582 | 	if (super->dst.port_op == 0) { | 
| 1548 | 1866 | sub->dst.port_op = 0; | |
| 1549 | 1866 | sub->dst.port[0] = 0; | |
| 1550 | 1866 | sub->dst.port[1] = 0; | |
| 1551 | 1866 | } | |
| 1552 | |||
| 1553 | ✓✓✓✓ ✓✗ | 5488 | if (super->src.addr.type == PF_ADDR_ADDRMASK && !super->src.neg && | 
| 1554 | ✓✓✓✓ | 3438 | !sub->src.neg && super->src.addr.v.a.mask.addr32[0] == 0 && | 
| 1555 | ✓✗ | 1175 | super->src.addr.v.a.mask.addr32[1] == 0 && | 
| 1556 | ✓✗ | 1175 | super->src.addr.v.a.mask.addr32[2] == 0 && | 
| 1557 | 1175 | super->src.addr.v.a.mask.addr32[3] == 0) | |
| 1558 | 1175 | memset(&sub->src.addr, 0, sizeof(sub->src.addr)); | |
| 1559 | ✓✓✓✗ | 1417 | else if (super->src.addr.type == PF_ADDR_ADDRMASK && | 
| 1560 | ✓✓ | 556 | sub->src.addr.type == PF_ADDR_ADDRMASK && | 
| 1561 | ✓✓ | 540 | super->src.neg == sub->src.neg && | 
| 1562 | ✓✓ | 532 | super->af == sub->af && | 
| 1563 | 928 | unmask(&super->src.addr.v.a.mask, super->af) < | |
| 1564 | ✓✓ | 928 | unmask(&sub->src.addr.v.a.mask, sub->af) && | 
| 1565 | 50 | super->src.addr.v.a.addr.addr32[0] == | |
| 1566 | 50 | (sub->src.addr.v.a.addr.addr32[0] & | |
| 1567 | ✓✓ | 50 | super->src.addr.v.a.mask.addr32[0]) && | 
| 1568 | 20 | super->src.addr.v.a.addr.addr32[1] == | |
| 1569 | 20 | (sub->src.addr.v.a.addr.addr32[1] & | |
| 1570 | ✓✗ | 20 | super->src.addr.v.a.mask.addr32[1]) && | 
| 1571 | 20 | super->src.addr.v.a.addr.addr32[2] == | |
| 1572 | 20 | (sub->src.addr.v.a.addr.addr32[2] & | |
| 1573 | ✓✗ | 20 | super->src.addr.v.a.mask.addr32[2]) && | 
| 1574 | 20 | super->src.addr.v.a.addr.addr32[3] == | |
| 1575 | 20 | (sub->src.addr.v.a.addr.addr32[3] & | |
| 1576 | 10 | 	    super->src.addr.v.a.mask.addr32[3])) { | |
| 1577 | /* sub->src.addr is a subset of super->src.addr/mask */ | ||
| 1578 | 10 | memcpy(&sub->src.addr, &super->src.addr, sizeof(sub->src.addr)); | |
| 1579 | 10 | } | |
| 1580 | |||
| 1581 | ✓✓✓✓ ✓✗ | 5886 | if (super->dst.addr.type == PF_ADDR_ADDRMASK && !super->dst.neg && | 
| 1582 | ✓✓✓✓ | 4596 | !sub->dst.neg && super->dst.addr.v.a.mask.addr32[0] == 0 && | 
| 1583 | ✓✗ | 993 | super->dst.addr.v.a.mask.addr32[1] == 0 && | 
| 1584 | ✓✗ | 993 | super->dst.addr.v.a.mask.addr32[2] == 0 && | 
| 1585 | 993 | super->dst.addr.v.a.mask.addr32[3] == 0) | |
| 1586 | 993 | memset(&sub->dst.addr, 0, sizeof(sub->dst.addr)); | |
| 1587 | ✓✓✓✗ | 1591 | else if (super->dst.addr.type == PF_ADDR_ADDRMASK && | 
| 1588 | ✓✓ | 1318 | sub->dst.addr.type == PF_ADDR_ADDRMASK && | 
| 1589 | ✓✓ | 1143 | super->dst.neg == sub->dst.neg && | 
| 1590 | ✓✓ | 1137 | super->af == sub->af && | 
| 1591 | 1800 | unmask(&super->dst.addr.v.a.mask, super->af) < | |
| 1592 | ✓✓ | 1800 | unmask(&sub->dst.addr.v.a.mask, sub->af) && | 
| 1593 | 112 | super->dst.addr.v.a.addr.addr32[0] == | |
| 1594 | 112 | (sub->dst.addr.v.a.addr.addr32[0] & | |
| 1595 | ✓✓ | 112 | super->dst.addr.v.a.mask.addr32[0]) && | 
| 1596 | 4 | super->dst.addr.v.a.addr.addr32[1] == | |
| 1597 | 4 | (sub->dst.addr.v.a.addr.addr32[1] & | |
| 1598 | ✓✗ | 4 | super->dst.addr.v.a.mask.addr32[1]) && | 
| 1599 | 4 | super->dst.addr.v.a.addr.addr32[2] == | |
| 1600 | 4 | (sub->dst.addr.v.a.addr.addr32[2] & | |
| 1601 | ✓✗ | 4 | super->dst.addr.v.a.mask.addr32[2]) && | 
| 1602 | 4 | super->dst.addr.v.a.addr.addr32[3] == | |
| 1603 | 4 | (sub->dst.addr.v.a.addr.addr32[3] & | |
| 1604 | 2 | 	    super->dst.addr.v.a.mask.addr32[3])) { | |
| 1605 | /* sub->dst.addr is a subset of super->dst.addr/mask */ | ||
| 1606 | 2 | memcpy(&sub->dst.addr, &super->dst.addr, sizeof(sub->dst.addr)); | |
| 1607 | 2 | } | |
| 1608 | |||
| 1609 | ✓✓ | 2582 | if (super->af == 0) | 
| 1610 | 740 | sub->af = 0; | |
| 1611 | 2582 | } | |
| 1612 | |||
| 1613 | |||
| 1614 | void | ||
| 1615 | superblock_free(struct pfctl *pf, struct superblock *block) | ||
| 1616 | { | ||
| 1617 | struct pf_opt_rule *por; | ||
| 1618 | 	while ((por = TAILQ_FIRST(&block->sb_rules))) { | ||
| 1619 | TAILQ_REMOVE(&block->sb_rules, por, por_entry); | ||
| 1620 | 		if (por->por_src_tbl) { | ||
| 1621 | 			if (por->por_src_tbl->pt_buf) { | ||
| 1622 | pfr_buf_clear(por->por_src_tbl->pt_buf); | ||
| 1623 | free(por->por_src_tbl->pt_buf); | ||
| 1624 | } | ||
| 1625 | free(por->por_src_tbl); | ||
| 1626 | } | ||
| 1627 | 		if (por->por_dst_tbl) { | ||
| 1628 | 			if (por->por_dst_tbl->pt_buf) { | ||
| 1629 | pfr_buf_clear(por->por_dst_tbl->pt_buf); | ||
| 1630 | free(por->por_dst_tbl->pt_buf); | ||
| 1631 | } | ||
| 1632 | free(por->por_dst_tbl); | ||
| 1633 | } | ||
| 1634 | free(por); | ||
| 1635 | } | ||
| 1636 | if (block->sb_profiled_block) | ||
| 1637 | superblock_free(pf, block->sb_profiled_block); | ||
| 1638 | free(block); | ||
| 1639 | } | ||
| 1640 | 
| Generated by: GCOVR (Version 3.3) |