Line data Source code
1 : /* $OpenBSD: if_pfsync.c,v 1.259 2018/09/11 07:53:38 sashan Exp $ */
2 :
3 : /*
4 : * Copyright (c) 2002 Michael Shalayeff
5 : * All rights reserved.
6 : *
7 : * Redistribution and use in source and binary forms, with or without
8 : * modification, are permitted provided that the following conditions
9 : * are met:
10 : * 1. Redistributions of source code must retain the above copyright
11 : * notice, this list of conditions and the following disclaimer.
12 : * 2. Redistributions in binary form must reproduce the above copyright
13 : * notice, this list of conditions and the following disclaimer in the
14 : * documentation and/or other materials provided with the distribution.
15 : *
16 : * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17 : * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 : * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19 : * IN NO EVENT SHALL THE AUTHOR OR HIS RELATIVES BE LIABLE FOR ANY DIRECT,
20 : * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
21 : * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
22 : * SERVICES; LOSS OF MIND, USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 : * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
24 : * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
25 : * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
26 : * THE POSSIBILITY OF SUCH DAMAGE.
27 : */
28 :
29 : /*
30 : * Copyright (c) 2009 David Gwynne <dlg@openbsd.org>
31 : *
32 : * Permission to use, copy, modify, and distribute this software for any
33 : * purpose with or without fee is hereby granted, provided that the above
34 : * copyright notice and this permission notice appear in all copies.
35 : *
36 : * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
37 : * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
38 : * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
39 : * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
40 : * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
41 : * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
42 : * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
43 : */
44 :
45 : #include <sys/param.h>
46 : #include <sys/systm.h>
47 : #include <sys/time.h>
48 : #include <sys/malloc.h>
49 : #include <sys/mbuf.h>
50 : #include <sys/socket.h>
51 : #include <sys/ioctl.h>
52 : #include <sys/timeout.h>
53 : #include <sys/kernel.h>
54 : #include <sys/sysctl.h>
55 : #include <sys/pool.h>
56 : #include <sys/syslog.h>
57 :
58 : #include <net/if.h>
59 : #include <net/if_types.h>
60 : #include <net/bpf.h>
61 : #include <net/netisr.h>
62 :
63 : #include <netinet/in.h>
64 : #include <netinet/if_ether.h>
65 : #include <netinet/ip.h>
66 : #include <netinet/in_var.h>
67 : #include <netinet/ip_var.h>
68 : #include <netinet/ip_ipsp.h>
69 : #include <netinet/ip_icmp.h>
70 : #include <netinet/icmp6.h>
71 : #include <netinet/tcp.h>
72 : #include <netinet/tcp_seq.h>
73 : #include <netinet/tcp_fsm.h>
74 : #include <netinet/udp.h>
75 :
76 : #ifdef INET6
77 : #include <netinet6/in6_var.h>
78 : #include <netinet/ip6.h>
79 : #include <netinet6/ip6_var.h>
80 : #include <netinet6/nd6.h>
81 : #endif /* INET6 */
82 :
83 : #include "carp.h"
84 : #if NCARP > 0
85 : #include <netinet/ip_carp.h>
86 : #endif
87 :
88 : #define PF_DEBUGNAME "pfsync: "
89 : #include <net/pfvar.h>
90 : #include <net/pfvar_priv.h>
91 : #include <net/if_pfsync.h>
92 :
93 : #include "bpfilter.h"
94 : #include "pfsync.h"
95 :
96 : #define PFSYNC_MINPKT ( \
97 : sizeof(struct ip) + \
98 : sizeof(struct pfsync_header))
99 :
100 : int pfsync_upd_tcp(struct pf_state *, struct pfsync_state_peer *,
101 : struct pfsync_state_peer *);
102 :
103 : int pfsync_in_clr(caddr_t, int, int, int);
104 : int pfsync_in_iack(caddr_t, int, int, int);
105 : int pfsync_in_upd_c(caddr_t, int, int, int);
106 : int pfsync_in_ureq(caddr_t, int, int, int);
107 : int pfsync_in_del(caddr_t, int, int, int);
108 : int pfsync_in_del_c(caddr_t, int, int, int);
109 : int pfsync_in_bus(caddr_t, int, int, int);
110 : int pfsync_in_tdb(caddr_t, int, int, int);
111 : int pfsync_in_ins(caddr_t, int, int, int);
112 : int pfsync_in_upd(caddr_t, int, int, int);
113 : int pfsync_in_eof(caddr_t, int, int, int);
114 :
115 : int pfsync_in_error(caddr_t, int, int, int);
116 :
117 : void pfsync_update_state_locked(struct pf_state *);
118 :
119 : struct {
120 : int (*in)(caddr_t, int, int, int);
121 : size_t len;
122 : } pfsync_acts[] = {
123 : /* PFSYNC_ACT_CLR */
124 : { pfsync_in_clr, sizeof(struct pfsync_clr) },
125 : /* PFSYNC_ACT_OINS */
126 : { pfsync_in_error, 0 },
127 : /* PFSYNC_ACT_INS_ACK */
128 : { pfsync_in_iack, sizeof(struct pfsync_ins_ack) },
129 : /* PFSYNC_ACT_OUPD */
130 : { pfsync_in_error, 0 },
131 : /* PFSYNC_ACT_UPD_C */
132 : { pfsync_in_upd_c, sizeof(struct pfsync_upd_c) },
133 : /* PFSYNC_ACT_UPD_REQ */
134 : { pfsync_in_ureq, sizeof(struct pfsync_upd_req) },
135 : /* PFSYNC_ACT_DEL */
136 : { pfsync_in_del, sizeof(struct pfsync_state) },
137 : /* PFSYNC_ACT_DEL_C */
138 : { pfsync_in_del_c, sizeof(struct pfsync_del_c) },
139 : /* PFSYNC_ACT_INS_F */
140 : { pfsync_in_error, 0 },
141 : /* PFSYNC_ACT_DEL_F */
142 : { pfsync_in_error, 0 },
143 : /* PFSYNC_ACT_BUS */
144 : { pfsync_in_bus, sizeof(struct pfsync_bus) },
145 : /* PFSYNC_ACT_OTDB */
146 : { pfsync_in_error, 0 },
147 : /* PFSYNC_ACT_EOF */
148 : { pfsync_in_error, 0 },
149 : /* PFSYNC_ACT_INS */
150 : { pfsync_in_ins, sizeof(struct pfsync_state) },
151 : /* PFSYNC_ACT_UPD */
152 : { pfsync_in_upd, sizeof(struct pfsync_state) },
153 : /* PFSYNC_ACT_TDB */
154 : { pfsync_in_tdb, sizeof(struct pfsync_tdb) },
155 : };
156 :
157 : struct pfsync_q {
158 : void (*write)(struct pf_state *, void *);
159 : size_t len;
160 : u_int8_t action;
161 : };
162 :
163 : /* we have one of these for every PFSYNC_S_ */
164 : void pfsync_out_state(struct pf_state *, void *);
165 : void pfsync_out_iack(struct pf_state *, void *);
166 : void pfsync_out_upd_c(struct pf_state *, void *);
167 : void pfsync_out_del(struct pf_state *, void *);
168 :
169 : struct pfsync_q pfsync_qs[] = {
170 : { pfsync_out_iack, sizeof(struct pfsync_ins_ack), PFSYNC_ACT_INS_ACK },
171 : { pfsync_out_upd_c, sizeof(struct pfsync_upd_c), PFSYNC_ACT_UPD_C },
172 : { pfsync_out_del, sizeof(struct pfsync_del_c), PFSYNC_ACT_DEL_C },
173 : { pfsync_out_state, sizeof(struct pfsync_state), PFSYNC_ACT_INS },
174 : { pfsync_out_state, sizeof(struct pfsync_state), PFSYNC_ACT_UPD }
175 : };
176 :
177 : void pfsync_q_ins(struct pf_state *, int);
178 : void pfsync_q_del(struct pf_state *);
179 :
180 : struct pfsync_upd_req_item {
181 : TAILQ_ENTRY(pfsync_upd_req_item) ur_entry;
182 : struct pfsync_upd_req ur_msg;
183 : };
184 : TAILQ_HEAD(pfsync_upd_reqs, pfsync_upd_req_item);
185 :
186 : struct pfsync_deferral {
187 : TAILQ_ENTRY(pfsync_deferral) pd_entry;
188 : struct pf_state *pd_st;
189 : struct mbuf *pd_m;
190 : struct timeout pd_tmo;
191 : };
192 : TAILQ_HEAD(pfsync_deferrals, pfsync_deferral);
193 :
194 : #define PFSYNC_PLSIZE MAX(sizeof(struct pfsync_upd_req_item), \
195 : sizeof(struct pfsync_deferral))
196 :
197 : void pfsync_out_tdb(struct tdb *, void *);
198 :
199 : struct pfsync_softc {
200 : struct ifnet sc_if;
201 : struct ifnet *sc_sync_if;
202 :
203 : struct pool sc_pool;
204 :
205 : struct ip_moptions sc_imo;
206 :
207 : struct in_addr sc_sync_peer;
208 : u_int8_t sc_maxupdates;
209 :
210 : struct ip sc_template;
211 :
212 : struct pf_state_queue sc_qs[PFSYNC_S_COUNT];
213 : size_t sc_len;
214 :
215 : struct pfsync_upd_reqs sc_upd_req_list;
216 :
217 : int sc_initial_bulk;
218 : int sc_link_demoted;
219 :
220 : int sc_defer;
221 : struct pfsync_deferrals sc_deferrals;
222 : u_int sc_deferred;
223 :
224 : void *sc_plus;
225 : size_t sc_pluslen;
226 :
227 : u_int32_t sc_ureq_sent;
228 : int sc_bulk_tries;
229 : struct timeout sc_bulkfail_tmo;
230 :
231 : u_int32_t sc_ureq_received;
232 : struct pf_state *sc_bulk_next;
233 : struct pf_state *sc_bulk_last;
234 : struct timeout sc_bulk_tmo;
235 :
236 : TAILQ_HEAD(, tdb) sc_tdb_q;
237 :
238 : void *sc_lhcookie;
239 : void *sc_dhcookie;
240 :
241 : struct timeout sc_tmo;
242 : };
243 :
244 : struct pfsync_softc *pfsyncif = NULL;
245 : struct cpumem *pfsynccounters;
246 :
247 : void pfsyncattach(int);
248 : int pfsync_clone_create(struct if_clone *, int);
249 : int pfsync_clone_destroy(struct ifnet *);
250 : int pfsync_alloc_scrub_memory(struct pfsync_state_peer *,
251 : struct pf_state_peer *);
252 : void pfsync_update_net_tdb(struct pfsync_tdb *);
253 : int pfsyncoutput(struct ifnet *, struct mbuf *, struct sockaddr *,
254 : struct rtentry *);
255 : int pfsyncioctl(struct ifnet *, u_long, caddr_t);
256 : void pfsyncstart(struct ifnet *);
257 : void pfsync_syncdev_state(void *);
258 : void pfsync_ifdetach(void *);
259 :
260 : void pfsync_deferred(struct pf_state *, int);
261 : void pfsync_undefer(struct pfsync_deferral *, int);
262 : void pfsync_defer_tmo(void *);
263 :
264 : void pfsync_cancel_full_update(struct pfsync_softc *);
265 : void pfsync_request_full_update(struct pfsync_softc *);
266 : void pfsync_request_update(u_int32_t, u_int64_t);
267 : void pfsync_update_state_req(struct pf_state *);
268 :
269 : void pfsync_drop(struct pfsync_softc *);
270 : void pfsync_sendout(void);
271 : void pfsync_send_plus(void *, size_t);
272 : void pfsync_timeout(void *);
273 : void pfsync_tdb_timeout(void *);
274 :
275 : void pfsync_bulk_start(void);
276 : void pfsync_bulk_status(u_int8_t);
277 : void pfsync_bulk_update(void *);
278 : void pfsync_bulk_fail(void *);
279 :
280 : #define PFSYNC_MAX_BULKTRIES 12
281 : int pfsync_sync_ok;
282 :
283 : struct if_clone pfsync_cloner =
284 : IF_CLONE_INITIALIZER("pfsync", pfsync_clone_create, pfsync_clone_destroy);
285 :
286 : void
287 0 : pfsyncattach(int npfsync)
288 : {
289 0 : if_clone_attach(&pfsync_cloner);
290 0 : pfsynccounters = counters_alloc(pfsyncs_ncounters);
291 0 : }
292 :
293 : int
294 0 : pfsync_clone_create(struct if_clone *ifc, int unit)
295 : {
296 : struct pfsync_softc *sc;
297 : struct ifnet *ifp;
298 : int q;
299 :
300 0 : if (unit != 0)
301 0 : return (EINVAL);
302 :
303 0 : pfsync_sync_ok = 1;
304 :
305 0 : sc = malloc(sizeof(*pfsyncif), M_DEVBUF, M_WAITOK|M_ZERO);
306 0 : for (q = 0; q < PFSYNC_S_COUNT; q++)
307 0 : TAILQ_INIT(&sc->sc_qs[q]);
308 :
309 0 : pool_init(&sc->sc_pool, PFSYNC_PLSIZE, 0, IPL_SOFTNET, 0, "pfsync",
310 : NULL);
311 0 : TAILQ_INIT(&sc->sc_upd_req_list);
312 0 : TAILQ_INIT(&sc->sc_deferrals);
313 0 : sc->sc_deferred = 0;
314 :
315 0 : TAILQ_INIT(&sc->sc_tdb_q);
316 :
317 0 : sc->sc_len = PFSYNC_MINPKT;
318 0 : sc->sc_maxupdates = 128;
319 :
320 0 : sc->sc_imo.imo_membership = (struct in_multi **)malloc(
321 : (sizeof(struct in_multi *) * IP_MIN_MEMBERSHIPS), M_IPMOPTS,
322 : M_WAITOK | M_ZERO);
323 0 : sc->sc_imo.imo_max_memberships = IP_MIN_MEMBERSHIPS;
324 :
325 0 : ifp = &sc->sc_if;
326 0 : snprintf(ifp->if_xname, sizeof ifp->if_xname, "pfsync%d", unit);
327 0 : ifp->if_softc = sc;
328 0 : ifp->if_ioctl = pfsyncioctl;
329 0 : ifp->if_output = pfsyncoutput;
330 0 : ifp->if_start = pfsyncstart;
331 0 : ifp->if_type = IFT_PFSYNC;
332 0 : IFQ_SET_MAXLEN(&ifp->if_snd, IFQ_MAXLEN);
333 0 : ifp->if_hdrlen = sizeof(struct pfsync_header);
334 0 : ifp->if_mtu = ETHERMTU;
335 0 : ifp->if_xflags = IFXF_CLONED;
336 0 : timeout_set_proc(&sc->sc_tmo, pfsync_timeout, sc);
337 0 : timeout_set_proc(&sc->sc_bulk_tmo, pfsync_bulk_update, sc);
338 0 : timeout_set_proc(&sc->sc_bulkfail_tmo, pfsync_bulk_fail, sc);
339 :
340 0 : if_attach(ifp);
341 0 : if_alloc_sadl(ifp);
342 :
343 : #if NCARP > 0
344 0 : if_addgroup(ifp, "carp");
345 : #endif
346 :
347 : #if NBPFILTER > 0
348 0 : bpfattach(&sc->sc_if.if_bpf, ifp, DLT_PFSYNC, PFSYNC_HDRLEN);
349 : #endif
350 :
351 0 : pfsyncif = sc;
352 :
353 0 : return (0);
354 0 : }
355 :
356 : int
357 0 : pfsync_clone_destroy(struct ifnet *ifp)
358 : {
359 0 : struct pfsync_softc *sc = ifp->if_softc;
360 : struct pfsync_deferral *pd;
361 :
362 0 : timeout_del(&sc->sc_bulkfail_tmo);
363 0 : timeout_del(&sc->sc_bulk_tmo);
364 0 : timeout_del(&sc->sc_tmo);
365 : #if NCARP > 0
366 0 : if (!pfsync_sync_ok)
367 0 : carp_group_demote_adj(&sc->sc_if, -1, "pfsync destroy");
368 0 : if (sc->sc_link_demoted)
369 0 : carp_group_demote_adj(&sc->sc_if, -1, "pfsync destroy");
370 : #endif
371 0 : if (sc->sc_sync_if) {
372 0 : hook_disestablish(
373 0 : sc->sc_sync_if->if_linkstatehooks,
374 0 : sc->sc_lhcookie);
375 0 : hook_disestablish(sc->sc_sync_if->if_detachhooks,
376 0 : sc->sc_dhcookie);
377 0 : }
378 0 : if_detach(ifp);
379 :
380 0 : pfsync_drop(sc);
381 :
382 0 : while (sc->sc_deferred > 0) {
383 0 : pd = TAILQ_FIRST(&sc->sc_deferrals);
384 0 : timeout_del(&pd->pd_tmo);
385 0 : pfsync_undefer(pd, 0);
386 : }
387 :
388 0 : pool_destroy(&sc->sc_pool);
389 0 : free(sc->sc_imo.imo_membership, M_IPMOPTS, 0);
390 0 : free(sc, M_DEVBUF, sizeof(*sc));
391 :
392 0 : pfsyncif = NULL;
393 :
394 0 : return (0);
395 : }
396 :
397 : /*
398 : * Start output on the pfsync interface.
399 : */
400 : void
401 0 : pfsyncstart(struct ifnet *ifp)
402 : {
403 0 : IFQ_PURGE(&ifp->if_snd);
404 0 : }
405 :
406 : void
407 0 : pfsync_syncdev_state(void *arg)
408 : {
409 0 : struct pfsync_softc *sc = arg;
410 :
411 0 : if (!sc->sc_sync_if || !(sc->sc_if.if_flags & IFF_UP))
412 0 : return;
413 :
414 0 : if (sc->sc_sync_if->if_link_state == LINK_STATE_DOWN) {
415 0 : sc->sc_if.if_flags &= ~IFF_RUNNING;
416 0 : if (!sc->sc_link_demoted) {
417 : #if NCARP > 0
418 0 : carp_group_demote_adj(&sc->sc_if, 1,
419 : "pfsync link state down");
420 : #endif
421 0 : sc->sc_link_demoted = 1;
422 0 : }
423 :
424 : /* drop everything */
425 0 : timeout_del(&sc->sc_tmo);
426 0 : pfsync_drop(sc);
427 :
428 0 : pfsync_cancel_full_update(sc);
429 0 : } else if (sc->sc_link_demoted) {
430 0 : sc->sc_if.if_flags |= IFF_RUNNING;
431 :
432 0 : pfsync_request_full_update(sc);
433 0 : }
434 0 : }
435 :
436 : void
437 0 : pfsync_ifdetach(void *arg)
438 : {
439 0 : struct pfsync_softc *sc = arg;
440 :
441 0 : sc->sc_sync_if = NULL;
442 0 : }
443 :
444 : int
445 0 : pfsync_alloc_scrub_memory(struct pfsync_state_peer *s,
446 : struct pf_state_peer *d)
447 : {
448 0 : if (s->scrub.scrub_flag && d->scrub == NULL) {
449 0 : d->scrub = pool_get(&pf_state_scrub_pl, PR_NOWAIT | PR_ZERO);
450 0 : if (d->scrub == NULL)
451 0 : return (ENOMEM);
452 : }
453 :
454 0 : return (0);
455 0 : }
456 :
457 : void
458 0 : pfsync_state_export(struct pfsync_state *sp, struct pf_state *st)
459 : {
460 0 : pf_state_export(sp, st);
461 0 : }
462 :
463 : int
464 0 : pfsync_state_import(struct pfsync_state *sp, int flags)
465 : {
466 : struct pf_state *st = NULL;
467 0 : struct pf_state_key *skw = NULL, *sks = NULL;
468 : struct pf_rule *r = NULL;
469 : struct pfi_kif *kif;
470 : int pool_flags;
471 : int error;
472 :
473 0 : if (sp->creatorid == 0) {
474 0 : DPFPRINTF(LOG_NOTICE, "pfsync_state_import: "
475 : "invalid creator id: %08x", ntohl(sp->creatorid));
476 0 : return (EINVAL);
477 : }
478 :
479 0 : if ((kif = pfi_kif_get(sp->ifname)) == NULL) {
480 0 : DPFPRINTF(LOG_NOTICE, "pfsync_state_import: "
481 : "unknown interface: %s", sp->ifname);
482 0 : if (flags & PFSYNC_SI_IOCTL)
483 0 : return (EINVAL);
484 0 : return (0); /* skip this state */
485 : }
486 :
487 0 : if (sp->af == 0)
488 0 : return (0); /* skip this state */
489 :
490 : /*
491 : * If the ruleset checksums match or the state is coming from the ioctl,
492 : * it's safe to associate the state with the rule of that number.
493 : */
494 0 : if (sp->rule != htonl(-1) && sp->anchor == htonl(-1) &&
495 0 : (flags & (PFSYNC_SI_IOCTL | PFSYNC_SI_CKSUM)) && ntohl(sp->rule) <
496 0 : pf_main_ruleset.rules.active.rcount)
497 0 : r = pf_main_ruleset.rules.active.ptr_array[ntohl(sp->rule)];
498 : else
499 : r = &pf_default_rule;
500 :
501 0 : if ((r->max_states && r->states_cur >= r->max_states))
502 : goto cleanup;
503 :
504 0 : if (flags & PFSYNC_SI_IOCTL)
505 0 : pool_flags = PR_WAITOK | PR_LIMITFAIL | PR_ZERO;
506 : else
507 : pool_flags = PR_NOWAIT | PR_LIMITFAIL | PR_ZERO;
508 :
509 0 : if ((st = pool_get(&pf_state_pl, pool_flags)) == NULL)
510 : goto cleanup;
511 :
512 0 : if ((skw = pf_alloc_state_key(pool_flags)) == NULL)
513 : goto cleanup;
514 :
515 0 : if ((sp->key[PF_SK_WIRE].af &&
516 0 : (sp->key[PF_SK_WIRE].af != sp->key[PF_SK_STACK].af)) ||
517 0 : PF_ANEQ(&sp->key[PF_SK_WIRE].addr[0],
518 0 : &sp->key[PF_SK_STACK].addr[0], sp->af) ||
519 0 : PF_ANEQ(&sp->key[PF_SK_WIRE].addr[1],
520 0 : &sp->key[PF_SK_STACK].addr[1], sp->af) ||
521 0 : sp->key[PF_SK_WIRE].port[0] != sp->key[PF_SK_STACK].port[0] ||
522 0 : sp->key[PF_SK_WIRE].port[1] != sp->key[PF_SK_STACK].port[1] ||
523 0 : sp->key[PF_SK_WIRE].rdomain != sp->key[PF_SK_STACK].rdomain) {
524 0 : if ((sks = pf_alloc_state_key(pool_flags)) == NULL)
525 : goto cleanup;
526 : } else
527 0 : sks = skw;
528 :
529 : /* allocate memory for scrub info */
530 0 : if (pfsync_alloc_scrub_memory(&sp->src, &st->src) ||
531 0 : pfsync_alloc_scrub_memory(&sp->dst, &st->dst))
532 : goto cleanup;
533 :
534 : /* copy to state key(s) */
535 0 : skw->addr[0] = sp->key[PF_SK_WIRE].addr[0];
536 0 : skw->addr[1] = sp->key[PF_SK_WIRE].addr[1];
537 0 : skw->port[0] = sp->key[PF_SK_WIRE].port[0];
538 0 : skw->port[1] = sp->key[PF_SK_WIRE].port[1];
539 0 : skw->rdomain = ntohs(sp->key[PF_SK_WIRE].rdomain);
540 0 : PF_REF_INIT(skw->refcnt);
541 0 : skw->proto = sp->proto;
542 0 : if (!(skw->af = sp->key[PF_SK_WIRE].af))
543 0 : skw->af = sp->af;
544 0 : if (sks != skw) {
545 0 : sks->addr[0] = sp->key[PF_SK_STACK].addr[0];
546 0 : sks->addr[1] = sp->key[PF_SK_STACK].addr[1];
547 0 : sks->port[0] = sp->key[PF_SK_STACK].port[0];
548 0 : sks->port[1] = sp->key[PF_SK_STACK].port[1];
549 0 : sks->rdomain = ntohs(sp->key[PF_SK_STACK].rdomain);
550 0 : PF_REF_INIT(sks->refcnt);
551 0 : if (!(sks->af = sp->key[PF_SK_STACK].af))
552 0 : sks->af = sp->af;
553 0 : if (sks->af != skw->af) {
554 0 : switch (sp->proto) {
555 : case IPPROTO_ICMP:
556 0 : sks->proto = IPPROTO_ICMPV6;
557 0 : break;
558 : case IPPROTO_ICMPV6:
559 0 : sks->proto = IPPROTO_ICMP;
560 0 : break;
561 : default:
562 0 : sks->proto = sp->proto;
563 0 : }
564 : } else
565 0 : sks->proto = sp->proto;
566 : }
567 0 : st->rtableid[PF_SK_WIRE] = ntohl(sp->rtableid[PF_SK_WIRE]);
568 0 : st->rtableid[PF_SK_STACK] = ntohl(sp->rtableid[PF_SK_STACK]);
569 :
570 : /* copy to state */
571 0 : bcopy(&sp->rt_addr, &st->rt_addr, sizeof(st->rt_addr));
572 0 : st->creation = time_uptime - ntohl(sp->creation);
573 0 : st->expire = time_uptime;
574 0 : if (ntohl(sp->expire)) {
575 : u_int32_t timeout;
576 :
577 0 : timeout = r->timeout[sp->timeout];
578 0 : if (!timeout)
579 0 : timeout = pf_default_rule.timeout[sp->timeout];
580 :
581 : /* sp->expire may have been adaptively scaled by export. */
582 0 : st->expire -= timeout - ntohl(sp->expire);
583 0 : }
584 :
585 0 : st->direction = sp->direction;
586 0 : st->log = sp->log;
587 0 : st->timeout = sp->timeout;
588 0 : st->state_flags = ntohs(sp->state_flags);
589 0 : st->max_mss = ntohs(sp->max_mss);
590 0 : st->min_ttl = sp->min_ttl;
591 0 : st->set_tos = sp->set_tos;
592 0 : st->set_prio[0] = sp->set_prio[0];
593 0 : st->set_prio[1] = sp->set_prio[1];
594 :
595 0 : st->id = sp->id;
596 0 : st->creatorid = sp->creatorid;
597 0 : pf_state_peer_ntoh(&sp->src, &st->src);
598 0 : pf_state_peer_ntoh(&sp->dst, &st->dst);
599 :
600 0 : st->rule.ptr = r;
601 0 : st->anchor.ptr = NULL;
602 0 : st->rt_kif = NULL;
603 :
604 0 : st->pfsync_time = time_uptime;
605 0 : st->sync_state = PFSYNC_S_NONE;
606 :
607 0 : refcnt_init(&st->refcnt);
608 :
609 : /* XXX when we have anchors, use STATE_INC_COUNTERS */
610 0 : r->states_cur++;
611 0 : r->states_tot++;
612 :
613 0 : if (!ISSET(flags, PFSYNC_SI_IOCTL))
614 0 : SET(st->state_flags, PFSTATE_NOSYNC);
615 :
616 : /*
617 : * We just set PFSTATE_NOSYNC bit, which prevents
618 : * pfsync_insert_state() to insert state to pfsync.
619 : */
620 0 : if (pf_state_insert(kif, &skw, &sks, st) != 0) {
621 : /* XXX when we have anchors, use STATE_DEC_COUNTERS */
622 0 : r->states_cur--;
623 : error = EEXIST;
624 0 : goto cleanup_state;
625 : }
626 :
627 0 : if (!ISSET(flags, PFSYNC_SI_IOCTL)) {
628 0 : CLR(st->state_flags, PFSTATE_NOSYNC);
629 0 : if (ISSET(st->state_flags, PFSTATE_ACK)) {
630 0 : pfsync_q_ins(st, PFSYNC_S_IACK);
631 0 : schednetisr(NETISR_PFSYNC);
632 0 : }
633 : }
634 0 : CLR(st->state_flags, PFSTATE_ACK);
635 :
636 0 : return (0);
637 :
638 : cleanup:
639 : error = ENOMEM;
640 0 : if (skw == sks)
641 0 : sks = NULL;
642 0 : if (skw != NULL)
643 0 : pool_put(&pf_state_key_pl, skw);
644 0 : if (sks != NULL)
645 0 : pool_put(&pf_state_key_pl, sks);
646 :
647 : cleanup_state: /* pf_state_insert frees the state keys */
648 0 : if (st) {
649 0 : if (st->dst.scrub)
650 0 : pool_put(&pf_state_scrub_pl, st->dst.scrub);
651 0 : if (st->src.scrub)
652 0 : pool_put(&pf_state_scrub_pl, st->src.scrub);
653 0 : pool_put(&pf_state_pl, st);
654 0 : }
655 0 : return (error);
656 0 : }
657 :
658 : int
659 0 : pfsync_input(struct mbuf **mp, int *offp, int proto, int af)
660 : {
661 0 : struct mbuf *n, *m = *mp;
662 0 : struct pfsync_softc *sc = pfsyncif;
663 0 : struct ip *ip = mtod(m, struct ip *);
664 : struct pfsync_header *ph;
665 0 : struct pfsync_subheader subh;
666 0 : int offset, noff, len, count, mlen, flags = 0;
667 : int e;
668 :
669 0 : NET_ASSERT_LOCKED();
670 :
671 0 : pfsyncstat_inc(pfsyncs_ipackets);
672 :
673 : /* verify that we have a sync interface configured */
674 0 : if (sc == NULL || !ISSET(sc->sc_if.if_flags, IFF_RUNNING) ||
675 0 : sc->sc_sync_if == NULL || !pf_status.running)
676 : goto done;
677 :
678 : /* verify that the packet came in on the right interface */
679 0 : if (sc->sc_sync_if->if_index != m->m_pkthdr.ph_ifidx) {
680 0 : pfsyncstat_inc(pfsyncs_badif);
681 0 : goto done;
682 : }
683 :
684 0 : sc->sc_if.if_ipackets++;
685 0 : sc->sc_if.if_ibytes += m->m_pkthdr.len;
686 :
687 : /* verify that the IP TTL is 255. */
688 0 : if (ip->ip_ttl != PFSYNC_DFLTTL) {
689 0 : pfsyncstat_inc(pfsyncs_badttl);
690 0 : goto done;
691 : }
692 :
693 0 : offset = ip->ip_hl << 2;
694 0 : n = m_pulldown(m, offset, sizeof(*ph), &noff);
695 0 : if (n == NULL) {
696 0 : pfsyncstat_inc(pfsyncs_hdrops);
697 0 : return IPPROTO_DONE;
698 : }
699 0 : ph = (struct pfsync_header *)(n->m_data + noff);
700 :
701 : /* verify the version */
702 0 : if (ph->version != PFSYNC_VERSION) {
703 0 : pfsyncstat_inc(pfsyncs_badver);
704 0 : goto done;
705 : }
706 0 : len = ntohs(ph->len) + offset;
707 0 : if (m->m_pkthdr.len < len) {
708 0 : pfsyncstat_inc(pfsyncs_badlen);
709 0 : goto done;
710 : }
711 :
712 0 : if (!bcmp(&ph->pfcksum, &pf_status.pf_chksum, PF_MD5_DIGEST_LENGTH))
713 0 : flags = PFSYNC_SI_CKSUM;
714 :
715 0 : offset += sizeof(*ph);
716 0 : while (offset <= len - sizeof(subh)) {
717 0 : m_copydata(m, offset, sizeof(subh), (caddr_t)&subh);
718 0 : offset += sizeof(subh);
719 :
720 0 : mlen = subh.len << 2;
721 0 : count = ntohs(subh.count);
722 :
723 0 : if (subh.action >= PFSYNC_ACT_MAX ||
724 0 : subh.action >= nitems(pfsync_acts) ||
725 0 : mlen < pfsync_acts[subh.action].len) {
726 : /*
727 : * subheaders are always followed by at least one
728 : * message, so if the peer is new
729 : * enough to tell us how big its messages are then we
730 : * know enough to skip them.
731 : */
732 0 : if (count > 0 && mlen > 0) {
733 0 : offset += count * mlen;
734 0 : continue;
735 : }
736 0 : pfsyncstat_inc(pfsyncs_badact);
737 0 : goto done;
738 : }
739 :
740 0 : n = m_pulldown(m, offset, mlen * count, &noff);
741 0 : if (n == NULL) {
742 0 : pfsyncstat_inc(pfsyncs_badlen);
743 0 : return IPPROTO_DONE;
744 : }
745 :
746 : PF_LOCK();
747 0 : e = pfsync_acts[subh.action].in(n->m_data + noff, mlen, count,
748 : flags);
749 : PF_UNLOCK();
750 0 : if (e != 0)
751 : goto done;
752 :
753 0 : offset += mlen * count;
754 : }
755 :
756 : done:
757 0 : m_freem(m);
758 0 : return IPPROTO_DONE;
759 0 : }
760 :
761 : int
762 0 : pfsync_in_clr(caddr_t buf, int len, int count, int flags)
763 : {
764 : struct pfsync_clr *clr;
765 : struct pf_state *st, *nexts;
766 : struct pfi_kif *kif;
767 : u_int32_t creatorid;
768 : int i;
769 :
770 0 : for (i = 0; i < count; i++) {
771 0 : clr = (struct pfsync_clr *)buf + len * i;
772 : kif = NULL;
773 0 : creatorid = clr->creatorid;
774 0 : if (strlen(clr->ifname) &&
775 0 : (kif = pfi_kif_find(clr->ifname)) == NULL)
776 : continue;
777 :
778 0 : for (st = RB_MIN(pf_state_tree_id, &tree_id); st; st = nexts) {
779 0 : nexts = RB_NEXT(pf_state_tree_id, &tree_id, st);
780 0 : if (st->creatorid == creatorid &&
781 0 : ((kif && st->kif == kif) || !kif)) {
782 0 : SET(st->state_flags, PFSTATE_NOSYNC);
783 0 : pf_remove_state(st);
784 0 : }
785 : }
786 : }
787 :
788 0 : return (0);
789 : }
790 :
791 : int
792 0 : pfsync_in_ins(caddr_t buf, int len, int count, int flags)
793 : {
794 : struct pfsync_state *sp;
795 : sa_family_t af1, af2;
796 : int i;
797 :
798 0 : for (i = 0; i < count; i++) {
799 0 : sp = (struct pfsync_state *)(buf + len * i);
800 0 : af1 = sp->key[0].af;
801 0 : af2 = sp->key[1].af;
802 :
803 : /* check for invalid values */
804 0 : if (sp->timeout >= PFTM_MAX ||
805 0 : sp->src.state > PF_TCPS_PROXY_DST ||
806 0 : sp->dst.state > PF_TCPS_PROXY_DST ||
807 0 : sp->direction > PF_OUT ||
808 0 : (((af1 || af2) &&
809 0 : ((af1 != AF_INET && af1 != AF_INET6) ||
810 0 : (af2 != AF_INET && af2 != AF_INET6))) ||
811 0 : (sp->af != AF_INET && sp->af != AF_INET6))) {
812 0 : DPFPRINTF(LOG_NOTICE,
813 : "pfsync_input: PFSYNC5_ACT_INS: invalid value");
814 0 : pfsyncstat_inc(pfsyncs_badval);
815 0 : continue;
816 : }
817 :
818 0 : if (pfsync_state_import(sp, flags) == ENOMEM) {
819 : /* drop out, but process the rest of the actions */
820 : break;
821 : }
822 : }
823 :
824 0 : return (0);
825 : }
826 :
827 : int
828 0 : pfsync_in_iack(caddr_t buf, int len, int count, int flags)
829 : {
830 : struct pfsync_ins_ack *ia;
831 0 : struct pf_state_cmp id_key;
832 : struct pf_state *st;
833 : int i;
834 :
835 0 : for (i = 0; i < count; i++) {
836 0 : ia = (struct pfsync_ins_ack *)(buf + len * i);
837 :
838 0 : id_key.id = ia->id;
839 0 : id_key.creatorid = ia->creatorid;
840 :
841 0 : st = pf_find_state_byid(&id_key);
842 0 : if (st == NULL)
843 : continue;
844 :
845 0 : if (ISSET(st->state_flags, PFSTATE_ACK))
846 0 : pfsync_deferred(st, 0);
847 : }
848 :
849 0 : return (0);
850 0 : }
851 :
852 : int
853 0 : pfsync_upd_tcp(struct pf_state *st, struct pfsync_state_peer *src,
854 : struct pfsync_state_peer *dst)
855 : {
856 : int sync = 0;
857 :
858 : /*
859 : * The state should never go backwards except
860 : * for syn-proxy states. Neither should the
861 : * sequence window slide backwards.
862 : */
863 0 : if ((st->src.state > src->state &&
864 0 : (st->src.state < PF_TCPS_PROXY_SRC ||
865 0 : src->state >= PF_TCPS_PROXY_SRC)) ||
866 :
867 0 : (st->src.state == src->state &&
868 0 : SEQ_GT(st->src.seqlo, ntohl(src->seqlo))))
869 0 : sync++;
870 : else
871 0 : pf_state_peer_ntoh(src, &st->src);
872 :
873 0 : if ((st->dst.state > dst->state) ||
874 :
875 0 : (st->dst.state >= TCPS_SYN_SENT &&
876 0 : SEQ_GT(st->dst.seqlo, ntohl(dst->seqlo))))
877 0 : sync++;
878 : else
879 0 : pf_state_peer_ntoh(dst, &st->dst);
880 :
881 0 : return (sync);
882 : }
883 :
884 : int
885 0 : pfsync_in_upd(caddr_t buf, int len, int count, int flags)
886 : {
887 : struct pfsync_state *sp;
888 0 : struct pf_state_cmp id_key;
889 : struct pf_state *st;
890 : int sync;
891 :
892 : int i;
893 :
894 0 : for (i = 0; i < count; i++) {
895 0 : sp = (struct pfsync_state *)(buf + len * i);
896 :
897 : /* check for invalid values */
898 0 : if (sp->timeout >= PFTM_MAX ||
899 0 : sp->src.state > PF_TCPS_PROXY_DST ||
900 0 : sp->dst.state > PF_TCPS_PROXY_DST) {
901 0 : DPFPRINTF(LOG_NOTICE,
902 : "pfsync_input: PFSYNC_ACT_UPD: invalid value");
903 0 : pfsyncstat_inc(pfsyncs_badval);
904 0 : continue;
905 : }
906 :
907 0 : id_key.id = sp->id;
908 0 : id_key.creatorid = sp->creatorid;
909 :
910 0 : st = pf_find_state_byid(&id_key);
911 0 : if (st == NULL) {
912 : /* insert the update */
913 0 : if (pfsync_state_import(sp, flags))
914 0 : pfsyncstat_inc(pfsyncs_badstate);
915 : continue;
916 : }
917 :
918 0 : if (ISSET(st->state_flags, PFSTATE_ACK))
919 0 : pfsync_deferred(st, 1);
920 :
921 0 : if (st->key[PF_SK_WIRE]->proto == IPPROTO_TCP)
922 0 : sync = pfsync_upd_tcp(st, &sp->src, &sp->dst);
923 : else {
924 : sync = 0;
925 :
926 : /*
927 : * Non-TCP protocol state machine always go
928 : * forwards
929 : */
930 0 : if (st->src.state > sp->src.state)
931 0 : sync++;
932 : else
933 0 : pf_state_peer_ntoh(&sp->src, &st->src);
934 :
935 0 : if (st->dst.state > sp->dst.state)
936 0 : sync++;
937 : else
938 0 : pf_state_peer_ntoh(&sp->dst, &st->dst);
939 : }
940 :
941 0 : if (sync < 2) {
942 0 : pfsync_alloc_scrub_memory(&sp->dst, &st->dst);
943 0 : pf_state_peer_ntoh(&sp->dst, &st->dst);
944 0 : st->expire = time_uptime;
945 0 : st->timeout = sp->timeout;
946 0 : }
947 0 : st->pfsync_time = time_uptime;
948 :
949 0 : if (sync) {
950 0 : pfsyncstat_inc(pfsyncs_stale);
951 :
952 0 : pfsync_update_state_locked(st);
953 0 : schednetisr(NETISR_PFSYNC);
954 0 : }
955 : }
956 :
957 0 : return (0);
958 0 : }
959 :
960 : int
961 0 : pfsync_in_upd_c(caddr_t buf, int len, int count, int flags)
962 : {
963 : struct pfsync_upd_c *up;
964 0 : struct pf_state_cmp id_key;
965 : struct pf_state *st;
966 :
967 : int sync;
968 :
969 : int i;
970 :
971 0 : for (i = 0; i < count; i++) {
972 0 : up = (struct pfsync_upd_c *)(buf + len * i);
973 :
974 : /* check for invalid values */
975 0 : if (up->timeout >= PFTM_MAX ||
976 0 : up->src.state > PF_TCPS_PROXY_DST ||
977 0 : up->dst.state > PF_TCPS_PROXY_DST) {
978 0 : DPFPRINTF(LOG_NOTICE,
979 : "pfsync_input: PFSYNC_ACT_UPD_C: invalid value");
980 0 : pfsyncstat_inc(pfsyncs_badval);
981 0 : continue;
982 : }
983 :
984 0 : id_key.id = up->id;
985 0 : id_key.creatorid = up->creatorid;
986 :
987 0 : st = pf_find_state_byid(&id_key);
988 0 : if (st == NULL) {
989 : /* We don't have this state. Ask for it. */
990 0 : pfsync_request_update(id_key.creatorid, id_key.id);
991 0 : continue;
992 : }
993 :
994 0 : if (ISSET(st->state_flags, PFSTATE_ACK))
995 0 : pfsync_deferred(st, 1);
996 :
997 0 : if (st->key[PF_SK_WIRE]->proto == IPPROTO_TCP)
998 0 : sync = pfsync_upd_tcp(st, &up->src, &up->dst);
999 : else {
1000 : sync = 0;
1001 : /*
1002 : * Non-TCP protocol state machine always go
1003 : * forwards
1004 : */
1005 0 : if (st->src.state > up->src.state)
1006 0 : sync++;
1007 : else
1008 0 : pf_state_peer_ntoh(&up->src, &st->src);
1009 :
1010 0 : if (st->dst.state > up->dst.state)
1011 0 : sync++;
1012 : else
1013 0 : pf_state_peer_ntoh(&up->dst, &st->dst);
1014 : }
1015 0 : if (sync < 2) {
1016 0 : pfsync_alloc_scrub_memory(&up->dst, &st->dst);
1017 0 : pf_state_peer_ntoh(&up->dst, &st->dst);
1018 0 : st->expire = time_uptime;
1019 0 : st->timeout = up->timeout;
1020 0 : }
1021 0 : st->pfsync_time = time_uptime;
1022 :
1023 0 : if (sync) {
1024 0 : pfsyncstat_inc(pfsyncs_stale);
1025 :
1026 0 : pfsync_update_state_locked(st);
1027 0 : schednetisr(NETISR_PFSYNC);
1028 0 : }
1029 : }
1030 :
1031 0 : return (0);
1032 0 : }
1033 :
1034 : int
1035 0 : pfsync_in_ureq(caddr_t buf, int len, int count, int flags)
1036 : {
1037 : struct pfsync_upd_req *ur;
1038 : int i;
1039 :
1040 0 : struct pf_state_cmp id_key;
1041 : struct pf_state *st;
1042 :
1043 0 : for (i = 0; i < count; i++) {
1044 0 : ur = (struct pfsync_upd_req *)(buf + len * i);
1045 :
1046 0 : id_key.id = ur->id;
1047 0 : id_key.creatorid = ur->creatorid;
1048 :
1049 0 : if (id_key.id == 0 && id_key.creatorid == 0)
1050 0 : pfsync_bulk_start();
1051 : else {
1052 0 : st = pf_find_state_byid(&id_key);
1053 0 : if (st == NULL) {
1054 0 : pfsyncstat_inc(pfsyncs_badstate);
1055 0 : continue;
1056 : }
1057 0 : if (ISSET(st->state_flags, PFSTATE_NOSYNC))
1058 : continue;
1059 :
1060 0 : pfsync_update_state_req(st);
1061 : }
1062 : }
1063 :
1064 0 : return (0);
1065 0 : }
1066 :
1067 : int
1068 0 : pfsync_in_del(caddr_t buf, int len, int count, int flags)
1069 : {
1070 : struct pfsync_state *sp;
1071 0 : struct pf_state_cmp id_key;
1072 : struct pf_state *st;
1073 : int i;
1074 :
1075 0 : for (i = 0; i < count; i++) {
1076 0 : sp = (struct pfsync_state *)(buf + len * i);
1077 :
1078 0 : id_key.id = sp->id;
1079 0 : id_key.creatorid = sp->creatorid;
1080 :
1081 0 : st = pf_find_state_byid(&id_key);
1082 0 : if (st == NULL) {
1083 0 : pfsyncstat_inc(pfsyncs_badstate);
1084 0 : continue;
1085 : }
1086 0 : SET(st->state_flags, PFSTATE_NOSYNC);
1087 0 : pf_remove_state(st);
1088 0 : }
1089 :
1090 0 : return (0);
1091 0 : }
1092 :
1093 : int
1094 0 : pfsync_in_del_c(caddr_t buf, int len, int count, int flags)
1095 : {
1096 : struct pfsync_del_c *sp;
1097 0 : struct pf_state_cmp id_key;
1098 : struct pf_state *st;
1099 : int i;
1100 :
1101 0 : for (i = 0; i < count; i++) {
1102 0 : sp = (struct pfsync_del_c *)(buf + len * i);
1103 :
1104 0 : id_key.id = sp->id;
1105 0 : id_key.creatorid = sp->creatorid;
1106 :
1107 0 : st = pf_find_state_byid(&id_key);
1108 0 : if (st == NULL) {
1109 0 : pfsyncstat_inc(pfsyncs_badstate);
1110 0 : continue;
1111 : }
1112 :
1113 0 : SET(st->state_flags, PFSTATE_NOSYNC);
1114 0 : pf_remove_state(st);
1115 0 : }
1116 :
1117 0 : return (0);
1118 0 : }
1119 :
1120 : int
1121 0 : pfsync_in_bus(caddr_t buf, int len, int count, int flags)
1122 : {
1123 0 : struct pfsync_softc *sc = pfsyncif;
1124 : struct pfsync_bus *bus;
1125 :
1126 : /* If we're not waiting for a bulk update, who cares. */
1127 0 : if (sc->sc_ureq_sent == 0)
1128 0 : return (0);
1129 :
1130 0 : bus = (struct pfsync_bus *)buf;
1131 :
1132 0 : switch (bus->status) {
1133 : case PFSYNC_BUS_START:
1134 0 : timeout_add(&sc->sc_bulkfail_tmo, 4 * hz +
1135 0 : pf_pool_limits[PF_LIMIT_STATES].limit /
1136 0 : ((sc->sc_if.if_mtu - PFSYNC_MINPKT) /
1137 : sizeof(struct pfsync_state)));
1138 0 : DPFPRINTF(LOG_INFO, "received bulk update start");
1139 : break;
1140 :
1141 : case PFSYNC_BUS_END:
1142 0 : if (time_uptime - ntohl(bus->endtime) >=
1143 0 : sc->sc_ureq_sent) {
1144 : /* that's it, we're happy */
1145 0 : sc->sc_ureq_sent = 0;
1146 0 : sc->sc_bulk_tries = 0;
1147 0 : timeout_del(&sc->sc_bulkfail_tmo);
1148 : #if NCARP > 0
1149 0 : if (!pfsync_sync_ok)
1150 0 : carp_group_demote_adj(&sc->sc_if, -1,
1151 0 : sc->sc_link_demoted ?
1152 : "pfsync link state up" :
1153 : "pfsync bulk done");
1154 0 : if (sc->sc_initial_bulk) {
1155 0 : carp_group_demote_adj(&sc->sc_if, -32,
1156 : "pfsync init");
1157 0 : sc->sc_initial_bulk = 0;
1158 0 : }
1159 : #endif
1160 0 : pfsync_sync_ok = 1;
1161 0 : sc->sc_link_demoted = 0;
1162 0 : DPFPRINTF(LOG_INFO, "received valid bulk update end");
1163 : } else {
1164 0 : DPFPRINTF(LOG_WARNING, "received invalid "
1165 : "bulk update end: bad timestamp");
1166 : }
1167 : break;
1168 : }
1169 :
1170 0 : return (0);
1171 0 : }
1172 :
1173 : int
1174 0 : pfsync_in_tdb(caddr_t buf, int len, int count, int flags)
1175 : {
1176 : #if defined(IPSEC)
1177 : struct pfsync_tdb *tp;
1178 : int i;
1179 :
1180 0 : for (i = 0; i < count; i++) {
1181 0 : tp = (struct pfsync_tdb *)(buf + len * i);
1182 0 : pfsync_update_net_tdb(tp);
1183 : }
1184 : #endif
1185 :
1186 0 : return (0);
1187 : }
1188 :
1189 : #if defined(IPSEC)
1190 : /* Update an in-kernel tdb. Silently fail if no tdb is found. */
1191 : void
1192 0 : pfsync_update_net_tdb(struct pfsync_tdb *pt)
1193 : {
1194 : struct tdb *tdb;
1195 :
1196 0 : NET_ASSERT_LOCKED();
1197 :
1198 : /* check for invalid values */
1199 0 : if (ntohl(pt->spi) <= SPI_RESERVED_MAX ||
1200 0 : (pt->dst.sa.sa_family != AF_INET &&
1201 0 : pt->dst.sa.sa_family != AF_INET6))
1202 : goto bad;
1203 :
1204 0 : tdb = gettdb(ntohs(pt->rdomain), pt->spi,
1205 0 : (union sockaddr_union *)&pt->dst, pt->sproto);
1206 0 : if (tdb) {
1207 0 : pt->rpl = betoh64(pt->rpl);
1208 0 : pt->cur_bytes = betoh64(pt->cur_bytes);
1209 :
1210 : /* Neither replay nor byte counter should ever decrease. */
1211 0 : if (pt->rpl < tdb->tdb_rpl ||
1212 0 : pt->cur_bytes < tdb->tdb_cur_bytes) {
1213 : goto bad;
1214 : }
1215 :
1216 0 : tdb->tdb_rpl = pt->rpl;
1217 0 : tdb->tdb_cur_bytes = pt->cur_bytes;
1218 0 : }
1219 0 : return;
1220 :
1221 : bad:
1222 0 : DPFPRINTF(LOG_WARNING, "pfsync_insert: PFSYNC_ACT_TDB_UPD: "
1223 : "invalid value");
1224 0 : pfsyncstat_inc(pfsyncs_badstate);
1225 0 : return;
1226 0 : }
1227 : #endif
1228 :
1229 :
1230 : int
1231 0 : pfsync_in_eof(caddr_t buf, int len, int count, int flags)
1232 : {
1233 0 : if (len > 0 || count > 0)
1234 0 : pfsyncstat_inc(pfsyncs_badact);
1235 :
1236 : /* we're done. let the caller return */
1237 0 : return (1);
1238 : }
1239 :
1240 : int
1241 0 : pfsync_in_error(caddr_t buf, int len, int count, int flags)
1242 : {
1243 0 : pfsyncstat_inc(pfsyncs_badact);
1244 0 : return (-1);
1245 : }
1246 :
1247 : int
1248 0 : pfsyncoutput(struct ifnet *ifp, struct mbuf *m, struct sockaddr *dst,
1249 : struct rtentry *rt)
1250 : {
1251 0 : m_freem(m); /* drop packet */
1252 0 : return (EAFNOSUPPORT);
1253 : }
1254 :
1255 : int
1256 0 : pfsyncioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1257 : {
1258 0 : struct proc *p = curproc;
1259 0 : struct pfsync_softc *sc = ifp->if_softc;
1260 0 : struct ifreq *ifr = (struct ifreq *)data;
1261 0 : struct ip_moptions *imo = &sc->sc_imo;
1262 0 : struct pfsyncreq pfsyncr;
1263 : struct ifnet *sifp;
1264 : struct ip *ip;
1265 : int error;
1266 :
1267 0 : switch (cmd) {
1268 : case SIOCSIFFLAGS:
1269 0 : if ((ifp->if_flags & IFF_RUNNING) == 0 &&
1270 0 : (ifp->if_flags & IFF_UP)) {
1271 0 : ifp->if_flags |= IFF_RUNNING;
1272 :
1273 : #if NCARP > 0
1274 0 : sc->sc_initial_bulk = 1;
1275 0 : carp_group_demote_adj(&sc->sc_if, 32, "pfsync init");
1276 : #endif
1277 :
1278 0 : pfsync_request_full_update(sc);
1279 0 : }
1280 0 : if ((ifp->if_flags & IFF_RUNNING) &&
1281 0 : (ifp->if_flags & IFF_UP) == 0) {
1282 0 : ifp->if_flags &= ~IFF_RUNNING;
1283 :
1284 : /* drop everything */
1285 0 : timeout_del(&sc->sc_tmo);
1286 0 : pfsync_drop(sc);
1287 :
1288 0 : pfsync_cancel_full_update(sc);
1289 0 : }
1290 : break;
1291 : case SIOCSIFMTU:
1292 0 : if (!sc->sc_sync_if ||
1293 0 : ifr->ifr_mtu <= PFSYNC_MINPKT ||
1294 0 : ifr->ifr_mtu > sc->sc_sync_if->if_mtu)
1295 0 : return (EINVAL);
1296 0 : if (ifr->ifr_mtu < ifp->if_mtu)
1297 0 : pfsync_sendout();
1298 0 : ifp->if_mtu = ifr->ifr_mtu;
1299 0 : break;
1300 : case SIOCGETPFSYNC:
1301 0 : bzero(&pfsyncr, sizeof(pfsyncr));
1302 0 : if (sc->sc_sync_if) {
1303 0 : strlcpy(pfsyncr.pfsyncr_syncdev,
1304 0 : sc->sc_sync_if->if_xname, IFNAMSIZ);
1305 0 : }
1306 0 : pfsyncr.pfsyncr_syncpeer = sc->sc_sync_peer;
1307 0 : pfsyncr.pfsyncr_maxupdates = sc->sc_maxupdates;
1308 0 : pfsyncr.pfsyncr_defer = sc->sc_defer;
1309 0 : return (copyout(&pfsyncr, ifr->ifr_data, sizeof(pfsyncr)));
1310 :
1311 : case SIOCSETPFSYNC:
1312 0 : if ((error = suser(p)) != 0)
1313 0 : return (error);
1314 0 : if ((error = copyin(ifr->ifr_data, &pfsyncr, sizeof(pfsyncr))))
1315 0 : return (error);
1316 :
1317 0 : if (pfsyncr.pfsyncr_syncpeer.s_addr == 0)
1318 0 : sc->sc_sync_peer.s_addr = INADDR_PFSYNC_GROUP;
1319 : else
1320 0 : sc->sc_sync_peer.s_addr =
1321 : pfsyncr.pfsyncr_syncpeer.s_addr;
1322 :
1323 0 : if (pfsyncr.pfsyncr_maxupdates > 255)
1324 0 : return (EINVAL);
1325 0 : sc->sc_maxupdates = pfsyncr.pfsyncr_maxupdates;
1326 :
1327 0 : sc->sc_defer = pfsyncr.pfsyncr_defer;
1328 :
1329 0 : if (pfsyncr.pfsyncr_syncdev[0] == 0) {
1330 0 : if (sc->sc_sync_if) {
1331 0 : hook_disestablish(
1332 0 : sc->sc_sync_if->if_linkstatehooks,
1333 0 : sc->sc_lhcookie);
1334 0 : hook_disestablish(
1335 0 : sc->sc_sync_if->if_detachhooks,
1336 0 : sc->sc_dhcookie);
1337 0 : }
1338 0 : sc->sc_sync_if = NULL;
1339 0 : if (imo->imo_num_memberships > 0) {
1340 0 : in_delmulti(imo->imo_membership[
1341 0 : --imo->imo_num_memberships]);
1342 0 : imo->imo_ifidx = 0;
1343 0 : }
1344 : break;
1345 : }
1346 :
1347 0 : if ((sifp = ifunit(pfsyncr.pfsyncr_syncdev)) == NULL)
1348 0 : return (EINVAL);
1349 :
1350 0 : if (sifp->if_mtu < sc->sc_if.if_mtu ||
1351 0 : (sc->sc_sync_if != NULL &&
1352 0 : sifp->if_mtu < sc->sc_sync_if->if_mtu) ||
1353 0 : sifp->if_mtu < MCLBYTES - sizeof(struct ip))
1354 0 : pfsync_sendout();
1355 :
1356 0 : if (sc->sc_sync_if) {
1357 0 : hook_disestablish(
1358 0 : sc->sc_sync_if->if_linkstatehooks,
1359 0 : sc->sc_lhcookie);
1360 0 : hook_disestablish(
1361 0 : sc->sc_sync_if->if_detachhooks,
1362 0 : sc->sc_dhcookie);
1363 0 : }
1364 0 : sc->sc_sync_if = sifp;
1365 :
1366 0 : if (imo->imo_num_memberships > 0) {
1367 0 : in_delmulti(imo->imo_membership[--imo->imo_num_memberships]);
1368 0 : imo->imo_ifidx = 0;
1369 0 : }
1370 :
1371 0 : if (sc->sc_sync_if &&
1372 0 : sc->sc_sync_peer.s_addr == INADDR_PFSYNC_GROUP) {
1373 0 : struct in_addr addr;
1374 :
1375 0 : if (!(sc->sc_sync_if->if_flags & IFF_MULTICAST)) {
1376 0 : sc->sc_sync_if = NULL;
1377 0 : return (EADDRNOTAVAIL);
1378 : }
1379 :
1380 0 : addr.s_addr = INADDR_PFSYNC_GROUP;
1381 :
1382 0 : if ((imo->imo_membership[0] =
1383 0 : in_addmulti(&addr, sc->sc_sync_if)) == NULL) {
1384 0 : sc->sc_sync_if = NULL;
1385 0 : return (ENOBUFS);
1386 : }
1387 0 : imo->imo_num_memberships++;
1388 0 : imo->imo_ifidx = sc->sc_sync_if->if_index;
1389 0 : imo->imo_ttl = PFSYNC_DFLTTL;
1390 0 : imo->imo_loop = 0;
1391 0 : }
1392 :
1393 0 : ip = &sc->sc_template;
1394 0 : bzero(ip, sizeof(*ip));
1395 0 : ip->ip_v = IPVERSION;
1396 0 : ip->ip_hl = sizeof(sc->sc_template) >> 2;
1397 0 : ip->ip_tos = IPTOS_LOWDELAY;
1398 : /* len and id are set later */
1399 0 : ip->ip_off = htons(IP_DF);
1400 0 : ip->ip_ttl = PFSYNC_DFLTTL;
1401 0 : ip->ip_p = IPPROTO_PFSYNC;
1402 0 : ip->ip_src.s_addr = INADDR_ANY;
1403 0 : ip->ip_dst.s_addr = sc->sc_sync_peer.s_addr;
1404 :
1405 0 : sc->sc_lhcookie =
1406 0 : hook_establish(sc->sc_sync_if->if_linkstatehooks, 1,
1407 : pfsync_syncdev_state, sc);
1408 0 : sc->sc_dhcookie = hook_establish(sc->sc_sync_if->if_detachhooks,
1409 : 0, pfsync_ifdetach, sc);
1410 :
1411 0 : pfsync_request_full_update(sc);
1412 :
1413 0 : break;
1414 :
1415 : default:
1416 0 : return (ENOTTY);
1417 : }
1418 :
1419 0 : return (0);
1420 0 : }
1421 :
1422 : void
1423 0 : pfsync_out_state(struct pf_state *st, void *buf)
1424 : {
1425 0 : struct pfsync_state *sp = buf;
1426 :
1427 0 : pfsync_state_export(sp, st);
1428 0 : }
1429 :
1430 : void
1431 0 : pfsync_out_iack(struct pf_state *st, void *buf)
1432 : {
1433 0 : struct pfsync_ins_ack *iack = buf;
1434 :
1435 0 : iack->id = st->id;
1436 0 : iack->creatorid = st->creatorid;
1437 0 : }
1438 :
1439 : void
1440 0 : pfsync_out_upd_c(struct pf_state *st, void *buf)
1441 : {
1442 0 : struct pfsync_upd_c *up = buf;
1443 :
1444 0 : bzero(up, sizeof(*up));
1445 0 : up->id = st->id;
1446 0 : pf_state_peer_hton(&st->src, &up->src);
1447 0 : pf_state_peer_hton(&st->dst, &up->dst);
1448 0 : up->creatorid = st->creatorid;
1449 0 : up->timeout = st->timeout;
1450 0 : }
1451 :
1452 : void
1453 0 : pfsync_out_del(struct pf_state *st, void *buf)
1454 : {
1455 0 : struct pfsync_del_c *dp = buf;
1456 :
1457 0 : dp->id = st->id;
1458 0 : dp->creatorid = st->creatorid;
1459 :
1460 0 : SET(st->state_flags, PFSTATE_NOSYNC);
1461 0 : }
1462 :
1463 : void
1464 0 : pfsync_drop(struct pfsync_softc *sc)
1465 : {
1466 : struct pf_state *st;
1467 : struct pfsync_upd_req_item *ur;
1468 : struct tdb *t;
1469 : int q;
1470 :
1471 0 : for (q = 0; q < PFSYNC_S_COUNT; q++) {
1472 0 : if (TAILQ_EMPTY(&sc->sc_qs[q]))
1473 : continue;
1474 :
1475 0 : while ((st = TAILQ_FIRST(&sc->sc_qs[q])) != NULL) {
1476 0 : TAILQ_REMOVE(&sc->sc_qs[q], st, sync_list);
1477 : #ifdef PFSYNC_DEBUG
1478 : KASSERT(st->sync_state == q);
1479 : #endif
1480 0 : st->sync_state = PFSYNC_S_NONE;
1481 0 : pf_state_unref(st);
1482 : }
1483 : }
1484 :
1485 0 : while ((ur = TAILQ_FIRST(&sc->sc_upd_req_list)) != NULL) {
1486 0 : TAILQ_REMOVE(&sc->sc_upd_req_list, ur, ur_entry);
1487 0 : pool_put(&sc->sc_pool, ur);
1488 : }
1489 :
1490 0 : sc->sc_plus = NULL;
1491 :
1492 0 : while ((t = TAILQ_FIRST(&sc->sc_tdb_q)) != NULL) {
1493 0 : TAILQ_REMOVE(&sc->sc_tdb_q, t, tdb_sync_entry);
1494 0 : CLR(t->tdb_flags, TDBF_PFSYNC);
1495 : }
1496 :
1497 0 : sc->sc_len = PFSYNC_MINPKT;
1498 0 : }
1499 :
1500 : void
1501 0 : pfsync_sendout(void)
1502 : {
1503 0 : struct pfsync_softc *sc = pfsyncif;
1504 : #if NBPFILTER > 0
1505 0 : struct ifnet *ifp = &sc->sc_if;
1506 : #endif
1507 : struct mbuf *m;
1508 : struct ip *ip;
1509 : struct pfsync_header *ph;
1510 : struct pfsync_subheader *subh;
1511 : struct pf_state *st;
1512 : struct pfsync_upd_req_item *ur;
1513 : struct tdb *t;
1514 :
1515 : int offset;
1516 : int q, count = 0;
1517 :
1518 0 : if (sc == NULL || sc->sc_len == PFSYNC_MINPKT)
1519 0 : return;
1520 :
1521 0 : if (!ISSET(sc->sc_if.if_flags, IFF_RUNNING) ||
1522 : #if NBPFILTER > 0
1523 0 : (ifp->if_bpf == NULL && sc->sc_sync_if == NULL)) {
1524 : #else
1525 : sc->sc_sync_if == NULL) {
1526 : #endif
1527 0 : pfsync_drop(sc);
1528 0 : return;
1529 : }
1530 :
1531 0 : MGETHDR(m, M_DONTWAIT, MT_DATA);
1532 0 : if (m == NULL) {
1533 0 : sc->sc_if.if_oerrors++;
1534 0 : pfsyncstat_inc(pfsyncs_onomem);
1535 0 : pfsync_drop(sc);
1536 0 : return;
1537 : }
1538 :
1539 0 : if (max_linkhdr + sc->sc_len > MHLEN) {
1540 0 : MCLGETI(m, M_DONTWAIT, NULL, max_linkhdr + sc->sc_len);
1541 0 : if (!ISSET(m->m_flags, M_EXT)) {
1542 0 : m_free(m);
1543 0 : sc->sc_if.if_oerrors++;
1544 0 : pfsyncstat_inc(pfsyncs_onomem);
1545 0 : pfsync_drop(sc);
1546 0 : return;
1547 : }
1548 : }
1549 0 : m->m_data += max_linkhdr;
1550 0 : m->m_len = m->m_pkthdr.len = sc->sc_len;
1551 :
1552 : /* build the ip header */
1553 0 : ip = mtod(m, struct ip *);
1554 0 : bcopy(&sc->sc_template, ip, sizeof(*ip));
1555 : offset = sizeof(*ip);
1556 :
1557 0 : ip->ip_len = htons(m->m_pkthdr.len);
1558 0 : ip->ip_id = htons(ip_randomid());
1559 :
1560 : /* build the pfsync header */
1561 0 : ph = (struct pfsync_header *)(m->m_data + offset);
1562 0 : bzero(ph, sizeof(*ph));
1563 : offset += sizeof(*ph);
1564 :
1565 0 : ph->version = PFSYNC_VERSION;
1566 0 : ph->len = htons(sc->sc_len - sizeof(*ip));
1567 0 : bcopy(pf_status.pf_chksum, ph->pfcksum, PF_MD5_DIGEST_LENGTH);
1568 :
1569 0 : if (!TAILQ_EMPTY(&sc->sc_upd_req_list)) {
1570 0 : subh = (struct pfsync_subheader *)(m->m_data + offset);
1571 : offset += sizeof(*subh);
1572 :
1573 : count = 0;
1574 0 : while ((ur = TAILQ_FIRST(&sc->sc_upd_req_list)) != NULL) {
1575 0 : TAILQ_REMOVE(&sc->sc_upd_req_list, ur, ur_entry);
1576 :
1577 0 : bcopy(&ur->ur_msg, m->m_data + offset,
1578 : sizeof(ur->ur_msg));
1579 0 : offset += sizeof(ur->ur_msg);
1580 :
1581 0 : pool_put(&sc->sc_pool, ur);
1582 :
1583 0 : count++;
1584 : }
1585 :
1586 0 : bzero(subh, sizeof(*subh));
1587 0 : subh->len = sizeof(ur->ur_msg) >> 2;
1588 0 : subh->action = PFSYNC_ACT_UPD_REQ;
1589 0 : subh->count = htons(count);
1590 0 : }
1591 :
1592 : /* has someone built a custom region for us to add? */
1593 0 : if (sc->sc_plus != NULL) {
1594 0 : bcopy(sc->sc_plus, m->m_data + offset, sc->sc_pluslen);
1595 0 : offset += sc->sc_pluslen;
1596 :
1597 0 : sc->sc_plus = NULL;
1598 0 : }
1599 :
1600 0 : if (!TAILQ_EMPTY(&sc->sc_tdb_q)) {
1601 0 : subh = (struct pfsync_subheader *)(m->m_data + offset);
1602 0 : offset += sizeof(*subh);
1603 :
1604 : count = 0;
1605 0 : while ((t = TAILQ_FIRST(&sc->sc_tdb_q)) != NULL) {
1606 0 : TAILQ_REMOVE(&sc->sc_tdb_q, t, tdb_sync_entry);
1607 0 : pfsync_out_tdb(t, m->m_data + offset);
1608 0 : offset += sizeof(struct pfsync_tdb);
1609 0 : CLR(t->tdb_flags, TDBF_PFSYNC);
1610 0 : count++;
1611 : }
1612 :
1613 0 : bzero(subh, sizeof(*subh));
1614 0 : subh->action = PFSYNC_ACT_TDB;
1615 0 : subh->len = sizeof(struct pfsync_tdb) >> 2;
1616 0 : subh->count = htons(count);
1617 0 : }
1618 :
1619 : /* walk the queues */
1620 0 : for (q = 0; q < PFSYNC_S_COUNT; q++) {
1621 0 : if (TAILQ_EMPTY(&sc->sc_qs[q]))
1622 : continue;
1623 :
1624 0 : subh = (struct pfsync_subheader *)(m->m_data + offset);
1625 0 : offset += sizeof(*subh);
1626 :
1627 : count = 0;
1628 0 : while ((st = TAILQ_FIRST(&sc->sc_qs[q])) != NULL) {
1629 0 : TAILQ_REMOVE(&sc->sc_qs[q], st, sync_list);
1630 0 : st->sync_state = PFSYNC_S_NONE;
1631 : #ifdef PFSYNC_DEBUG
1632 : KASSERT(st->sync_state == q);
1633 : #endif
1634 0 : pfsync_qs[q].write(st, m->m_data + offset);
1635 0 : offset += pfsync_qs[q].len;
1636 :
1637 0 : pf_state_unref(st);
1638 0 : count++;
1639 : }
1640 :
1641 0 : bzero(subh, sizeof(*subh));
1642 0 : subh->action = pfsync_qs[q].action;
1643 0 : subh->len = pfsync_qs[q].len >> 2;
1644 0 : subh->count = htons(count);
1645 0 : }
1646 :
1647 : /* we're done, let's put it on the wire */
1648 : #if NBPFILTER > 0
1649 0 : if (ifp->if_bpf) {
1650 0 : m->m_data += sizeof(*ip);
1651 0 : m->m_len = m->m_pkthdr.len = sc->sc_len - sizeof(*ip);
1652 0 : bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_OUT);
1653 0 : m->m_data -= sizeof(*ip);
1654 0 : m->m_len = m->m_pkthdr.len = sc->sc_len;
1655 0 : }
1656 :
1657 0 : if (sc->sc_sync_if == NULL) {
1658 : sc->sc_len = PFSYNC_MINPKT;
1659 0 : m_freem(m);
1660 0 : return;
1661 : }
1662 : #endif
1663 :
1664 : /* start again */
1665 : sc->sc_len = PFSYNC_MINPKT;
1666 :
1667 0 : sc->sc_if.if_opackets++;
1668 0 : sc->sc_if.if_obytes += m->m_pkthdr.len;
1669 :
1670 0 : m->m_pkthdr.ph_rtableid = sc->sc_if.if_rdomain;
1671 :
1672 0 : if (ip_output(m, NULL, NULL, IP_RAWOUTPUT, &sc->sc_imo, NULL, 0) == 0)
1673 0 : pfsyncstat_inc(pfsyncs_opackets);
1674 : else
1675 0 : pfsyncstat_inc(pfsyncs_oerrors);
1676 0 : }
1677 :
1678 : void
1679 0 : pfsync_insert_state(struct pf_state *st)
1680 : {
1681 0 : struct pfsync_softc *sc = pfsyncif;
1682 :
1683 0 : NET_ASSERT_LOCKED();
1684 :
1685 0 : if (ISSET(st->rule.ptr->rule_flag, PFRULE_NOSYNC) ||
1686 0 : st->key[PF_SK_WIRE]->proto == IPPROTO_PFSYNC) {
1687 0 : SET(st->state_flags, PFSTATE_NOSYNC);
1688 0 : return;
1689 : }
1690 :
1691 0 : if (sc == NULL || !ISSET(sc->sc_if.if_flags, IFF_RUNNING) ||
1692 0 : ISSET(st->state_flags, PFSTATE_NOSYNC))
1693 0 : return;
1694 :
1695 : #ifdef PFSYNC_DEBUG
1696 : KASSERT(st->sync_state == PFSYNC_S_NONE);
1697 : #endif
1698 :
1699 0 : if (sc->sc_len == PFSYNC_MINPKT)
1700 0 : timeout_add_sec(&sc->sc_tmo, 1);
1701 :
1702 0 : pfsync_q_ins(st, PFSYNC_S_INS);
1703 :
1704 0 : st->sync_updates = 0;
1705 0 : }
1706 :
1707 : int
1708 0 : pfsync_defer(struct pf_state *st, struct mbuf *m)
1709 : {
1710 0 : struct pfsync_softc *sc = pfsyncif;
1711 : struct pfsync_deferral *pd;
1712 :
1713 0 : NET_ASSERT_LOCKED();
1714 :
1715 0 : if (!sc->sc_defer ||
1716 0 : ISSET(st->state_flags, PFSTATE_NOSYNC) ||
1717 0 : m->m_flags & (M_BCAST|M_MCAST))
1718 0 : return (0);
1719 :
1720 0 : if (sc->sc_deferred >= 128) {
1721 0 : pd = TAILQ_FIRST(&sc->sc_deferrals);
1722 0 : if (timeout_del(&pd->pd_tmo))
1723 0 : pfsync_undefer(pd, 0);
1724 : }
1725 :
1726 0 : pd = pool_get(&sc->sc_pool, M_NOWAIT);
1727 0 : if (pd == NULL)
1728 0 : return (0);
1729 :
1730 0 : m->m_pkthdr.pf.flags |= PF_TAG_GENERATED;
1731 0 : SET(st->state_flags, PFSTATE_ACK);
1732 :
1733 0 : pd->pd_st = pf_state_ref(st);
1734 0 : pd->pd_m = m;
1735 :
1736 0 : sc->sc_deferred++;
1737 0 : TAILQ_INSERT_TAIL(&sc->sc_deferrals, pd, pd_entry);
1738 :
1739 0 : timeout_set_proc(&pd->pd_tmo, pfsync_defer_tmo, pd);
1740 0 : timeout_add_msec(&pd->pd_tmo, 20);
1741 :
1742 0 : schednetisr(NETISR_PFSYNC);
1743 :
1744 0 : return (1);
1745 0 : }
1746 :
1747 : void
1748 0 : pfsync_undefer(struct pfsync_deferral *pd, int drop)
1749 : {
1750 0 : struct pfsync_softc *sc = pfsyncif;
1751 0 : struct pf_pdesc pdesc;
1752 :
1753 0 : NET_ASSERT_LOCKED();
1754 :
1755 0 : TAILQ_REMOVE(&sc->sc_deferrals, pd, pd_entry);
1756 0 : sc->sc_deferred--;
1757 :
1758 0 : CLR(pd->pd_st->state_flags, PFSTATE_ACK);
1759 0 : if (drop)
1760 0 : m_freem(pd->pd_m);
1761 : else {
1762 0 : if (pd->pd_st->rule.ptr->rt == PF_ROUTETO) {
1763 0 : if (pf_setup_pdesc(&pdesc,
1764 : pd->pd_st->key[PF_SK_WIRE]->af,
1765 0 : pd->pd_st->direction, pd->pd_st->rt_kif,
1766 0 : pd->pd_m, NULL) != PF_PASS) {
1767 0 : m_freem(pd->pd_m);
1768 0 : goto out;
1769 : }
1770 0 : switch (pd->pd_st->key[PF_SK_WIRE]->af) {
1771 : case AF_INET:
1772 0 : pf_route(&pdesc,
1773 0 : pd->pd_st->rule.ptr, pd->pd_st);
1774 0 : break;
1775 : #ifdef INET6
1776 : case AF_INET6:
1777 0 : pf_route6(&pdesc,
1778 0 : pd->pd_st->rule.ptr, pd->pd_st);
1779 0 : break;
1780 : #endif /* INET6 */
1781 : default:
1782 0 : unhandled_af(pd->pd_st->key[PF_SK_WIRE]->af);
1783 : }
1784 0 : pd->pd_m = pdesc.m;
1785 0 : } else {
1786 0 : switch (pd->pd_st->key[PF_SK_WIRE]->af) {
1787 : case AF_INET:
1788 0 : ip_output(pd->pd_m, NULL, NULL, 0, NULL, NULL,
1789 : 0);
1790 0 : break;
1791 : #ifdef INET6
1792 : case AF_INET6:
1793 0 : ip6_output(pd->pd_m, NULL, NULL, 0,
1794 : NULL, NULL);
1795 0 : break;
1796 : #endif /* INET6 */
1797 : default:
1798 0 : unhandled_af(pd->pd_st->key[PF_SK_WIRE]->af);
1799 : }
1800 : }
1801 : }
1802 : out:
1803 0 : pf_state_unref(pd->pd_st);
1804 0 : pool_put(&sc->sc_pool, pd);
1805 0 : }
1806 :
1807 : void
1808 0 : pfsync_defer_tmo(void *arg)
1809 : {
1810 0 : NET_LOCK();
1811 0 : pfsync_undefer(arg, 0);
1812 0 : NET_UNLOCK();
1813 0 : }
1814 :
1815 : void
1816 0 : pfsync_deferred(struct pf_state *st, int drop)
1817 : {
1818 0 : struct pfsync_softc *sc = pfsyncif;
1819 : struct pfsync_deferral *pd;
1820 :
1821 0 : NET_ASSERT_LOCKED();
1822 :
1823 0 : TAILQ_FOREACH(pd, &sc->sc_deferrals, pd_entry) {
1824 0 : if (pd->pd_st == st) {
1825 0 : if (timeout_del(&pd->pd_tmo))
1826 0 : pfsync_undefer(pd, drop);
1827 : return;
1828 : }
1829 : }
1830 :
1831 0 : panic("pfsync_deferred: unable to find deferred state");
1832 0 : }
1833 :
1834 : void
1835 0 : pfsync_update_state_locked(struct pf_state *st)
1836 : {
1837 0 : struct pfsync_softc *sc = pfsyncif;
1838 : int sync = 0;
1839 :
1840 0 : NET_ASSERT_LOCKED();
1841 : PF_ASSERT_LOCKED();
1842 :
1843 0 : if (sc == NULL || !ISSET(sc->sc_if.if_flags, IFF_RUNNING))
1844 0 : return;
1845 :
1846 0 : if (ISSET(st->state_flags, PFSTATE_ACK))
1847 0 : pfsync_deferred(st, 0);
1848 0 : if (ISSET(st->state_flags, PFSTATE_NOSYNC)) {
1849 0 : if (st->sync_state != PFSYNC_S_NONE)
1850 0 : pfsync_q_del(st);
1851 0 : return;
1852 : }
1853 :
1854 0 : if (sc->sc_len == PFSYNC_MINPKT)
1855 0 : timeout_add_sec(&sc->sc_tmo, 1);
1856 :
1857 0 : switch (st->sync_state) {
1858 : case PFSYNC_S_UPD_C:
1859 : case PFSYNC_S_UPD:
1860 : case PFSYNC_S_INS:
1861 : /* we're already handling it */
1862 :
1863 0 : if (st->key[PF_SK_WIRE]->proto == IPPROTO_TCP) {
1864 0 : st->sync_updates++;
1865 0 : if (st->sync_updates >= sc->sc_maxupdates)
1866 0 : sync = 1;
1867 : }
1868 : break;
1869 :
1870 : case PFSYNC_S_IACK:
1871 0 : pfsync_q_del(st);
1872 : case PFSYNC_S_NONE:
1873 0 : pfsync_q_ins(st, PFSYNC_S_UPD_C);
1874 0 : st->sync_updates = 0;
1875 0 : break;
1876 :
1877 : default:
1878 0 : panic("pfsync_update_state: unexpected sync state %d",
1879 : st->sync_state);
1880 : }
1881 :
1882 0 : if (sync || (time_uptime - st->pfsync_time) < 2)
1883 0 : schednetisr(NETISR_PFSYNC);
1884 0 : }
1885 :
1886 : void
1887 0 : pfsync_update_state(struct pf_state *st, int *have_pf_lock)
1888 : {
1889 0 : struct pfsync_softc *sc = pfsyncif;
1890 :
1891 0 : if (sc == NULL || !ISSET(sc->sc_if.if_flags, IFF_RUNNING))
1892 0 : return;
1893 :
1894 0 : if (*have_pf_lock == 0) {
1895 : PF_LOCK();
1896 0 : *have_pf_lock = 1;
1897 0 : }
1898 :
1899 0 : pfsync_update_state_locked(st);
1900 0 : }
1901 :
1902 : void
1903 0 : pfsync_cancel_full_update(struct pfsync_softc *sc)
1904 : {
1905 0 : if (timeout_pending(&sc->sc_bulkfail_tmo) ||
1906 0 : timeout_pending(&sc->sc_bulk_tmo)) {
1907 : #if NCARP > 0
1908 0 : if (!pfsync_sync_ok)
1909 0 : carp_group_demote_adj(&sc->sc_if, -1,
1910 : "pfsync bulk cancelled");
1911 0 : if (sc->sc_initial_bulk) {
1912 0 : carp_group_demote_adj(&sc->sc_if, -32,
1913 : "pfsync init");
1914 0 : sc->sc_initial_bulk = 0;
1915 0 : }
1916 : #endif
1917 0 : pfsync_sync_ok = 1;
1918 0 : DPFPRINTF(LOG_INFO, "cancelling bulk update");
1919 : }
1920 0 : timeout_del(&sc->sc_bulkfail_tmo);
1921 0 : timeout_del(&sc->sc_bulk_tmo);
1922 0 : sc->sc_bulk_next = NULL;
1923 0 : sc->sc_bulk_last = NULL;
1924 0 : sc->sc_ureq_sent = 0;
1925 0 : sc->sc_bulk_tries = 0;
1926 0 : }
1927 :
1928 : void
1929 0 : pfsync_request_full_update(struct pfsync_softc *sc)
1930 : {
1931 0 : if (sc->sc_sync_if && ISSET(sc->sc_if.if_flags, IFF_RUNNING)) {
1932 : /* Request a full state table update. */
1933 0 : sc->sc_ureq_sent = time_uptime;
1934 : #if NCARP > 0
1935 0 : if (!sc->sc_link_demoted && pfsync_sync_ok)
1936 0 : carp_group_demote_adj(&sc->sc_if, 1,
1937 : "pfsync bulk start");
1938 : #endif
1939 0 : pfsync_sync_ok = 0;
1940 0 : DPFPRINTF(LOG_INFO, "requesting bulk update");
1941 0 : timeout_add(&sc->sc_bulkfail_tmo, 4 * hz +
1942 0 : pf_pool_limits[PF_LIMIT_STATES].limit /
1943 0 : ((sc->sc_if.if_mtu - PFSYNC_MINPKT) /
1944 : sizeof(struct pfsync_state)));
1945 0 : pfsync_request_update(0, 0);
1946 0 : }
1947 0 : }
1948 :
1949 : void
1950 0 : pfsync_request_update(u_int32_t creatorid, u_int64_t id)
1951 : {
1952 0 : struct pfsync_softc *sc = pfsyncif;
1953 : struct pfsync_upd_req_item *item;
1954 : size_t nlen = sizeof(struct pfsync_upd_req);
1955 :
1956 : /*
1957 : * this code does nothing to prevent multiple update requests for the
1958 : * same state being generated.
1959 : */
1960 :
1961 0 : item = pool_get(&sc->sc_pool, PR_NOWAIT);
1962 0 : if (item == NULL) {
1963 : /* XXX stats */
1964 0 : return;
1965 : }
1966 :
1967 0 : item->ur_msg.id = id;
1968 0 : item->ur_msg.creatorid = creatorid;
1969 :
1970 0 : if (TAILQ_EMPTY(&sc->sc_upd_req_list))
1971 0 : nlen += sizeof(struct pfsync_subheader);
1972 :
1973 0 : if (sc->sc_len + nlen > sc->sc_if.if_mtu) {
1974 0 : pfsync_sendout();
1975 :
1976 : nlen = sizeof(struct pfsync_subheader) +
1977 : sizeof(struct pfsync_upd_req);
1978 0 : }
1979 :
1980 0 : TAILQ_INSERT_TAIL(&sc->sc_upd_req_list, item, ur_entry);
1981 0 : sc->sc_len += nlen;
1982 :
1983 0 : schednetisr(NETISR_PFSYNC);
1984 0 : }
1985 :
1986 : void
1987 0 : pfsync_update_state_req(struct pf_state *st)
1988 : {
1989 0 : struct pfsync_softc *sc = pfsyncif;
1990 :
1991 0 : if (sc == NULL)
1992 0 : panic("pfsync_update_state_req: nonexistant instance");
1993 :
1994 0 : if (ISSET(st->state_flags, PFSTATE_NOSYNC)) {
1995 0 : if (st->sync_state != PFSYNC_S_NONE)
1996 0 : pfsync_q_del(st);
1997 0 : return;
1998 : }
1999 :
2000 0 : switch (st->sync_state) {
2001 : case PFSYNC_S_UPD_C:
2002 : case PFSYNC_S_IACK:
2003 0 : pfsync_q_del(st);
2004 : case PFSYNC_S_NONE:
2005 0 : pfsync_q_ins(st, PFSYNC_S_UPD);
2006 0 : schednetisr(NETISR_PFSYNC);
2007 0 : return;
2008 :
2009 : case PFSYNC_S_INS:
2010 : case PFSYNC_S_UPD:
2011 : case PFSYNC_S_DEL:
2012 : /* we're already handling it */
2013 0 : return;
2014 :
2015 : default:
2016 0 : panic("pfsync_update_state_req: unexpected sync state %d",
2017 : st->sync_state);
2018 : }
2019 0 : }
2020 :
2021 : void
2022 0 : pfsync_delete_state(struct pf_state *st)
2023 : {
2024 0 : struct pfsync_softc *sc = pfsyncif;
2025 :
2026 0 : NET_ASSERT_LOCKED();
2027 :
2028 0 : if (sc == NULL || !ISSET(sc->sc_if.if_flags, IFF_RUNNING))
2029 0 : return;
2030 :
2031 0 : if (ISSET(st->state_flags, PFSTATE_ACK))
2032 0 : pfsync_deferred(st, 1);
2033 0 : if (ISSET(st->state_flags, PFSTATE_NOSYNC)) {
2034 0 : if (st->sync_state != PFSYNC_S_NONE)
2035 0 : pfsync_q_del(st);
2036 0 : return;
2037 : }
2038 :
2039 0 : if (sc->sc_len == PFSYNC_MINPKT)
2040 0 : timeout_add_sec(&sc->sc_tmo, 1);
2041 :
2042 0 : switch (st->sync_state) {
2043 : case PFSYNC_S_INS:
2044 : /* we never got to tell the world so just forget about it */
2045 0 : pfsync_q_del(st);
2046 0 : return;
2047 :
2048 : case PFSYNC_S_UPD_C:
2049 : case PFSYNC_S_UPD:
2050 : case PFSYNC_S_IACK:
2051 0 : pfsync_q_del(st);
2052 : /*
2053 : * FALLTHROUGH to putting it on the del list
2054 : * Note on refence count bookeeping:
2055 : * pfsync_q_del() drops reference for queue
2056 : * ownership. But the st entry survives, because
2057 : * our caller still holds a reference.
2058 : */
2059 :
2060 : case PFSYNC_S_NONE:
2061 : /*
2062 : * We either fall through here, or there is no reference to
2063 : * st owned by pfsync queues at this point.
2064 : *
2065 : * Calling pfsync_q_ins() puts st to del queue. The pfsync_q_ins()
2066 : * grabs a reference for delete queue.
2067 : */
2068 0 : pfsync_q_ins(st, PFSYNC_S_DEL);
2069 0 : return;
2070 :
2071 : default:
2072 0 : panic("pfsync_delete_state: unexpected sync state %d",
2073 : st->sync_state);
2074 : }
2075 0 : }
2076 :
2077 : void
2078 0 : pfsync_clear_states(u_int32_t creatorid, const char *ifname)
2079 : {
2080 0 : struct pfsync_softc *sc = pfsyncif;
2081 0 : struct {
2082 : struct pfsync_subheader subh;
2083 : struct pfsync_clr clr;
2084 : } __packed r;
2085 :
2086 0 : NET_ASSERT_LOCKED();
2087 :
2088 0 : if (sc == NULL || !ISSET(sc->sc_if.if_flags, IFF_RUNNING))
2089 0 : return;
2090 :
2091 0 : bzero(&r, sizeof(r));
2092 :
2093 0 : r.subh.action = PFSYNC_ACT_CLR;
2094 0 : r.subh.len = sizeof(struct pfsync_clr) >> 2;
2095 0 : r.subh.count = htons(1);
2096 :
2097 0 : strlcpy(r.clr.ifname, ifname, sizeof(r.clr.ifname));
2098 0 : r.clr.creatorid = creatorid;
2099 :
2100 0 : pfsync_send_plus(&r, sizeof(r));
2101 0 : }
2102 :
2103 : void
2104 0 : pfsync_q_ins(struct pf_state *st, int q)
2105 : {
2106 0 : struct pfsync_softc *sc = pfsyncif;
2107 0 : size_t nlen = pfsync_qs[q].len;
2108 :
2109 0 : KASSERT(st->sync_state == PFSYNC_S_NONE);
2110 :
2111 : #if defined(PFSYNC_DEBUG)
2112 : if (sc->sc_len < PFSYNC_MINPKT)
2113 : panic("pfsync pkt len is too low %d", sc->sc_len);
2114 : #endif
2115 0 : if (TAILQ_EMPTY(&sc->sc_qs[q]))
2116 0 : nlen += sizeof(struct pfsync_subheader);
2117 :
2118 0 : if (sc->sc_len + nlen > sc->sc_if.if_mtu) {
2119 0 : pfsync_sendout();
2120 :
2121 0 : nlen = sizeof(struct pfsync_subheader) + pfsync_qs[q].len;
2122 0 : }
2123 :
2124 0 : sc->sc_len += nlen;
2125 0 : pf_state_ref(st);
2126 0 : TAILQ_INSERT_TAIL(&sc->sc_qs[q], st, sync_list);
2127 0 : st->sync_state = q;
2128 0 : }
2129 :
2130 : void
2131 0 : pfsync_q_del(struct pf_state *st)
2132 : {
2133 0 : struct pfsync_softc *sc = pfsyncif;
2134 0 : int q = st->sync_state;
2135 :
2136 0 : KASSERT(st->sync_state != PFSYNC_S_NONE);
2137 :
2138 0 : sc->sc_len -= pfsync_qs[q].len;
2139 0 : TAILQ_REMOVE(&sc->sc_qs[q], st, sync_list);
2140 0 : st->sync_state = PFSYNC_S_NONE;
2141 0 : pf_state_unref(st);
2142 :
2143 0 : if (TAILQ_EMPTY(&sc->sc_qs[q]))
2144 0 : sc->sc_len -= sizeof(struct pfsync_subheader);
2145 0 : }
2146 :
2147 : void
2148 0 : pfsync_update_tdb(struct tdb *t, int output)
2149 : {
2150 0 : struct pfsync_softc *sc = pfsyncif;
2151 : size_t nlen = sizeof(struct pfsync_tdb);
2152 :
2153 0 : if (sc == NULL)
2154 0 : return;
2155 :
2156 0 : if (!ISSET(t->tdb_flags, TDBF_PFSYNC)) {
2157 0 : if (TAILQ_EMPTY(&sc->sc_tdb_q))
2158 0 : nlen += sizeof(struct pfsync_subheader);
2159 :
2160 0 : if (sc->sc_len + nlen > sc->sc_if.if_mtu) {
2161 0 : pfsync_sendout();
2162 :
2163 : nlen = sizeof(struct pfsync_subheader) +
2164 : sizeof(struct pfsync_tdb);
2165 0 : }
2166 :
2167 0 : sc->sc_len += nlen;
2168 0 : TAILQ_INSERT_TAIL(&sc->sc_tdb_q, t, tdb_sync_entry);
2169 0 : SET(t->tdb_flags, TDBF_PFSYNC);
2170 0 : t->tdb_updates = 0;
2171 0 : } else {
2172 0 : if (++t->tdb_updates >= sc->sc_maxupdates)
2173 0 : schednetisr(NETISR_PFSYNC);
2174 : }
2175 :
2176 0 : if (output)
2177 0 : SET(t->tdb_flags, TDBF_PFSYNC_RPL);
2178 : else
2179 0 : CLR(t->tdb_flags, TDBF_PFSYNC_RPL);
2180 0 : }
2181 :
2182 : void
2183 0 : pfsync_delete_tdb(struct tdb *t)
2184 : {
2185 0 : struct pfsync_softc *sc = pfsyncif;
2186 :
2187 0 : if (sc == NULL || !ISSET(t->tdb_flags, TDBF_PFSYNC))
2188 0 : return;
2189 :
2190 0 : sc->sc_len -= sizeof(struct pfsync_tdb);
2191 0 : TAILQ_REMOVE(&sc->sc_tdb_q, t, tdb_sync_entry);
2192 0 : CLR(t->tdb_flags, TDBF_PFSYNC);
2193 :
2194 0 : if (TAILQ_EMPTY(&sc->sc_tdb_q))
2195 0 : sc->sc_len -= sizeof(struct pfsync_subheader);
2196 0 : }
2197 :
2198 : void
2199 0 : pfsync_out_tdb(struct tdb *t, void *buf)
2200 : {
2201 0 : struct pfsync_tdb *ut = buf;
2202 :
2203 0 : bzero(ut, sizeof(*ut));
2204 0 : ut->spi = t->tdb_spi;
2205 0 : bcopy(&t->tdb_dst, &ut->dst, sizeof(ut->dst));
2206 : /*
2207 : * When a failover happens, the master's rpl is probably above
2208 : * what we see here (we may be up to a second late), so
2209 : * increase it a bit for outbound tdbs to manage most such
2210 : * situations.
2211 : *
2212 : * For now, just add an offset that is likely to be larger
2213 : * than the number of packets we can see in one second. The RFC
2214 : * just says the next packet must have a higher seq value.
2215 : *
2216 : * XXX What is a good algorithm for this? We could use
2217 : * a rate-determined increase, but to know it, we would have
2218 : * to extend struct tdb.
2219 : * XXX pt->rpl can wrap over MAXINT, but if so the real tdb
2220 : * will soon be replaced anyway. For now, just don't handle
2221 : * this edge case.
2222 : */
2223 : #define RPL_INCR 16384
2224 0 : ut->rpl = htobe64(t->tdb_rpl + (ISSET(t->tdb_flags, TDBF_PFSYNC_RPL) ?
2225 : RPL_INCR : 0));
2226 0 : ut->cur_bytes = htobe64(t->tdb_cur_bytes);
2227 0 : ut->sproto = t->tdb_sproto;
2228 0 : ut->rdomain = htons(t->tdb_rdomain);
2229 0 : }
2230 :
2231 : void
2232 0 : pfsync_bulk_start(void)
2233 : {
2234 0 : struct pfsync_softc *sc = pfsyncif;
2235 :
2236 0 : DPFPRINTF(LOG_INFO, "received bulk update request");
2237 :
2238 0 : if (TAILQ_EMPTY(&state_list))
2239 0 : pfsync_bulk_status(PFSYNC_BUS_END);
2240 : else {
2241 0 : sc->sc_ureq_received = time_uptime;
2242 :
2243 0 : if (sc->sc_bulk_next == NULL)
2244 0 : sc->sc_bulk_next = TAILQ_FIRST(&state_list);
2245 0 : sc->sc_bulk_last = sc->sc_bulk_next;
2246 :
2247 0 : pfsync_bulk_status(PFSYNC_BUS_START);
2248 0 : timeout_add(&sc->sc_bulk_tmo, 0);
2249 : }
2250 0 : }
2251 :
2252 : void
2253 0 : pfsync_bulk_update(void *arg)
2254 : {
2255 0 : struct pfsync_softc *sc = arg;
2256 : struct pf_state *st;
2257 : int i = 0;
2258 :
2259 0 : NET_LOCK();
2260 0 : st = sc->sc_bulk_next;
2261 :
2262 0 : for (;;) {
2263 0 : if (st->sync_state == PFSYNC_S_NONE &&
2264 0 : st->timeout < PFTM_MAX &&
2265 0 : st->pfsync_time <= sc->sc_ureq_received) {
2266 0 : pfsync_update_state_req(st);
2267 0 : i++;
2268 0 : }
2269 :
2270 0 : st = TAILQ_NEXT(st, entry_list);
2271 0 : if (st == NULL)
2272 0 : st = TAILQ_FIRST(&state_list);
2273 :
2274 0 : if (st == sc->sc_bulk_last) {
2275 : /* we're done */
2276 0 : sc->sc_bulk_next = NULL;
2277 0 : sc->sc_bulk_last = NULL;
2278 0 : pfsync_bulk_status(PFSYNC_BUS_END);
2279 0 : break;
2280 : }
2281 :
2282 0 : if (i > 1 && (sc->sc_if.if_mtu - sc->sc_len) <
2283 : sizeof(struct pfsync_state)) {
2284 : /* we've filled a packet */
2285 0 : sc->sc_bulk_next = st;
2286 0 : timeout_add(&sc->sc_bulk_tmo, 1);
2287 0 : break;
2288 : }
2289 : }
2290 0 : NET_UNLOCK();
2291 0 : }
2292 :
2293 : void
2294 0 : pfsync_bulk_status(u_int8_t status)
2295 : {
2296 0 : struct {
2297 : struct pfsync_subheader subh;
2298 : struct pfsync_bus bus;
2299 : } __packed r;
2300 :
2301 0 : struct pfsync_softc *sc = pfsyncif;
2302 :
2303 0 : bzero(&r, sizeof(r));
2304 :
2305 0 : r.subh.action = PFSYNC_ACT_BUS;
2306 0 : r.subh.len = sizeof(struct pfsync_bus) >> 2;
2307 0 : r.subh.count = htons(1);
2308 :
2309 0 : r.bus.creatorid = pf_status.hostid;
2310 0 : r.bus.endtime = htonl(time_uptime - sc->sc_ureq_received);
2311 0 : r.bus.status = status;
2312 :
2313 0 : pfsync_send_plus(&r, sizeof(r));
2314 0 : }
2315 :
2316 : void
2317 0 : pfsync_bulk_fail(void *arg)
2318 : {
2319 0 : struct pfsync_softc *sc = arg;
2320 :
2321 0 : NET_LOCK();
2322 0 : if (sc->sc_bulk_tries++ < PFSYNC_MAX_BULKTRIES) {
2323 : /* Try again */
2324 0 : timeout_add_sec(&sc->sc_bulkfail_tmo, 5);
2325 0 : pfsync_request_update(0, 0);
2326 0 : } else {
2327 : /* Pretend like the transfer was ok */
2328 0 : sc->sc_ureq_sent = 0;
2329 0 : sc->sc_bulk_tries = 0;
2330 : #if NCARP > 0
2331 0 : if (!pfsync_sync_ok)
2332 0 : carp_group_demote_adj(&sc->sc_if, -1,
2333 0 : sc->sc_link_demoted ?
2334 : "pfsync link state up" :
2335 : "pfsync bulk fail");
2336 0 : if (sc->sc_initial_bulk) {
2337 0 : carp_group_demote_adj(&sc->sc_if, -32,
2338 : "pfsync init");
2339 0 : sc->sc_initial_bulk = 0;
2340 0 : }
2341 : #endif
2342 0 : pfsync_sync_ok = 1;
2343 0 : sc->sc_link_demoted = 0;
2344 0 : DPFPRINTF(LOG_ERR, "failed to receive bulk update");
2345 : }
2346 0 : NET_UNLOCK();
2347 0 : }
2348 :
2349 : void
2350 0 : pfsync_send_plus(void *plus, size_t pluslen)
2351 : {
2352 0 : struct pfsync_softc *sc = pfsyncif;
2353 :
2354 0 : if (sc->sc_len + pluslen > sc->sc_if.if_mtu)
2355 0 : pfsync_sendout();
2356 :
2357 0 : sc->sc_plus = plus;
2358 0 : sc->sc_len += (sc->sc_pluslen = pluslen);
2359 :
2360 0 : pfsync_sendout();
2361 0 : }
2362 :
2363 : int
2364 0 : pfsync_up(void)
2365 : {
2366 0 : struct pfsync_softc *sc = pfsyncif;
2367 :
2368 0 : if (sc == NULL || !ISSET(sc->sc_if.if_flags, IFF_RUNNING))
2369 0 : return (0);
2370 :
2371 0 : return (1);
2372 0 : }
2373 :
2374 : int
2375 0 : pfsync_state_in_use(struct pf_state *st)
2376 : {
2377 0 : struct pfsync_softc *sc = pfsyncif;
2378 :
2379 0 : if (sc == NULL)
2380 0 : return (0);
2381 :
2382 0 : if (st->sync_state != PFSYNC_S_NONE ||
2383 0 : st == sc->sc_bulk_next ||
2384 0 : st == sc->sc_bulk_last)
2385 0 : return (1);
2386 :
2387 0 : return (0);
2388 0 : }
2389 :
2390 : void
2391 0 : pfsync_timeout(void *arg)
2392 : {
2393 0 : NET_LOCK();
2394 0 : pfsync_sendout();
2395 0 : NET_UNLOCK();
2396 0 : }
2397 :
2398 : /* this is a softnet/netisr handler */
2399 : void
2400 0 : pfsyncintr(void)
2401 : {
2402 0 : pfsync_sendout();
2403 0 : }
2404 :
2405 : int
2406 0 : pfsync_sysctl_pfsyncstat(void *oldp, size_t *oldlenp, void *newp)
2407 : {
2408 0 : struct pfsyncstats pfsyncstat;
2409 :
2410 : CTASSERT(sizeof(pfsyncstat) == (pfsyncs_ncounters * sizeof(uint64_t)));
2411 0 : memset(&pfsyncstat, 0, sizeof pfsyncstat);
2412 0 : counters_read(pfsynccounters, (uint64_t *)&pfsyncstat,
2413 : pfsyncs_ncounters);
2414 0 : return (sysctl_rdstruct(oldp, oldlenp, newp,
2415 : &pfsyncstat, sizeof(pfsyncstat)));
2416 0 : }
2417 :
2418 : int
2419 0 : pfsync_sysctl(int *name, u_int namelen, void *oldp, size_t *oldlenp, void *newp,
2420 : size_t newlen)
2421 : {
2422 : /* All sysctl names at this level are terminal. */
2423 0 : if (namelen != 1)
2424 0 : return (ENOTDIR);
2425 :
2426 0 : switch (name[0]) {
2427 : case PFSYNCCTL_STATS:
2428 0 : return (pfsync_sysctl_pfsyncstat(oldp, oldlenp, newp));
2429 : default:
2430 0 : return (ENOPROTOOPT);
2431 : }
2432 0 : }
|