Line data Source code
1 : /* $OpenBSD: pgt.c,v 1.93 2018/04/28 16:05:56 phessler Exp $ */
2 :
3 : /*
4 : * Copyright (c) 2006 Claudio Jeker <claudio@openbsd.org>
5 : * Copyright (c) 2006 Marcus Glocker <mglocker@openbsd.org>
6 : *
7 : * Permission to use, copy, modify, and distribute this software for any
8 : * purpose with or without fee is hereby granted, provided that the above
9 : * copyright notice and this permission notice appear in all copies.
10 : *
11 : * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12 : * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13 : * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14 : * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15 : * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 : * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 : * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18 : */
19 :
20 : /*
21 : * Copyright (c) 2004 Fujitsu Laboratories of America, Inc.
22 : * Copyright (c) 2004 Brian Fundakowski Feldman
23 : * All rights reserved.
24 : *
25 : * Redistribution and use in source and binary forms, with or without
26 : * modification, are permitted provided that the following conditions
27 : * are met:
28 : * 1. Redistributions of source code must retain the above copyright
29 : * notice, this list of conditions and the following disclaimer.
30 : * 2. Redistributions in binary form must reproduce the above copyright
31 : * notice, this list of conditions and the following disclaimer in the
32 : * documentation and/or other materials provided with the distribution.
33 : *
34 : * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
35 : * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
36 : * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
37 : * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
38 : * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
39 : * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
40 : * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
41 : * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
42 : * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
43 : * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
44 : * SUCH DAMAGE.
45 : */
46 :
47 : #include "bpfilter.h"
48 :
49 : #include <sys/param.h>
50 : #include <sys/systm.h>
51 : #include <sys/kernel.h>
52 : #include <sys/malloc.h>
53 : #include <sys/socket.h>
54 : #include <sys/mbuf.h>
55 : #include <sys/endian.h>
56 : #include <sys/sockio.h>
57 : #include <sys/kthread.h>
58 : #include <sys/time.h>
59 : #include <sys/ioctl.h>
60 : #include <sys/device.h>
61 :
62 : #include <machine/bus.h>
63 : #include <machine/intr.h>
64 :
65 : #include <net/if.h>
66 : #include <net/if_llc.h>
67 : #include <net/if_media.h>
68 :
69 : #if NBPFILTER > 0
70 : #include <net/bpf.h>
71 : #endif
72 :
73 : #include <netinet/in.h>
74 : #include <netinet/if_ether.h>
75 :
76 : #include <net80211/ieee80211_var.h>
77 : #include <net80211/ieee80211_radiotap.h>
78 :
79 : #include <dev/ic/pgtreg.h>
80 : #include <dev/ic/pgtvar.h>
81 :
82 : #include <dev/ic/if_wireg.h>
83 : #include <dev/ic/if_wi_ieee.h>
84 : #include <dev/ic/if_wivar.h>
85 :
86 : #ifdef PGT_DEBUG
87 : #define DPRINTF(x) do { printf x; } while (0)
88 : #else
89 : #define DPRINTF(x)
90 : #endif
91 :
92 : #define SETOID(oid, var, size) { \
93 : if (pgt_oid_set(sc, oid, var, size) != 0) \
94 : break; \
95 : }
96 :
97 : /*
98 : * This is a driver for the Intersil Prism family of 802.11g network cards,
99 : * based upon version 1.2 of the Linux driver and firmware found at
100 : * http://www.prism54.org/.
101 : */
102 :
103 : #define SCAN_TIMEOUT 5 /* 5 seconds */
104 :
105 : struct cfdriver pgt_cd = {
106 : NULL, "pgt", DV_IFNET
107 : };
108 :
109 : void pgt_media_status(struct ifnet *ifp, struct ifmediareq *imr);
110 : int pgt_media_change(struct ifnet *ifp);
111 : void pgt_write_memory_barrier(struct pgt_softc *);
112 : uint32_t pgt_read_4(struct pgt_softc *, uint16_t);
113 : void pgt_write_4(struct pgt_softc *, uint16_t, uint32_t);
114 : void pgt_write_4_flush(struct pgt_softc *, uint16_t, uint32_t);
115 : void pgt_debug_events(struct pgt_softc *, const char *);
116 : uint32_t pgt_queue_frags_pending(struct pgt_softc *, enum pgt_queue);
117 : void pgt_reinit_rx_desc_frag(struct pgt_softc *, struct pgt_desc *);
118 : int pgt_load_tx_desc_frag(struct pgt_softc *, enum pgt_queue,
119 : struct pgt_desc *);
120 : void pgt_unload_tx_desc_frag(struct pgt_softc *, struct pgt_desc *);
121 : int pgt_load_firmware(struct pgt_softc *);
122 : void pgt_cleanup_queue(struct pgt_softc *, enum pgt_queue,
123 : struct pgt_frag *);
124 : int pgt_reset(struct pgt_softc *);
125 : void pgt_stop(struct pgt_softc *, unsigned int);
126 : void pgt_reboot(struct pgt_softc *);
127 : void pgt_init_intr(struct pgt_softc *);
128 : void pgt_update_intr(struct pgt_softc *, int);
129 : struct mbuf
130 : *pgt_ieee80211_encap(struct pgt_softc *, struct ether_header *,
131 : struct mbuf *, struct ieee80211_node **);
132 : void pgt_input_frames(struct pgt_softc *, struct mbuf *);
133 : void pgt_wakeup_intr(struct pgt_softc *);
134 : void pgt_sleep_intr(struct pgt_softc *);
135 : void pgt_empty_traps(struct pgt_softc_kthread *);
136 : void pgt_per_device_kthread(void *);
137 : void pgt_async_reset(struct pgt_softc *);
138 : void pgt_async_update(struct pgt_softc *);
139 : void pgt_txdone(struct pgt_softc *, enum pgt_queue);
140 : void pgt_rxdone(struct pgt_softc *, enum pgt_queue);
141 : void pgt_trap_received(struct pgt_softc *, uint32_t, void *, size_t);
142 : void pgt_mgmtrx_completion(struct pgt_softc *, struct pgt_mgmt_desc *);
143 : struct mbuf
144 : *pgt_datarx_completion(struct pgt_softc *, enum pgt_queue);
145 : int pgt_oid_get(struct pgt_softc *, enum pgt_oid, void *, size_t);
146 : int pgt_oid_retrieve(struct pgt_softc *, enum pgt_oid, void *, size_t);
147 : int pgt_oid_set(struct pgt_softc *, enum pgt_oid, const void *, size_t);
148 : void pgt_state_dump(struct pgt_softc *);
149 : int pgt_mgmt_request(struct pgt_softc *, struct pgt_mgmt_desc *);
150 : void pgt_desc_transmit(struct pgt_softc *, enum pgt_queue,
151 : struct pgt_desc *, uint16_t, int);
152 : void pgt_maybe_trigger(struct pgt_softc *, enum pgt_queue);
153 : struct ieee80211_node
154 : *pgt_ieee80211_node_alloc(struct ieee80211com *);
155 : void pgt_ieee80211_newassoc(struct ieee80211com *,
156 : struct ieee80211_node *, int);
157 : void pgt_ieee80211_node_free(struct ieee80211com *,
158 : struct ieee80211_node *);
159 : void pgt_ieee80211_node_copy(struct ieee80211com *,
160 : struct ieee80211_node *,
161 : const struct ieee80211_node *);
162 : int pgt_ieee80211_send_mgmt(struct ieee80211com *,
163 : struct ieee80211_node *, int, int, int);
164 : int pgt_net_attach(struct pgt_softc *);
165 : void pgt_start(struct ifnet *);
166 : int pgt_ioctl(struct ifnet *, u_long, caddr_t);
167 : void pgt_obj_bss2scanres(struct pgt_softc *,
168 : struct pgt_obj_bss *, struct wi_scan_res *, uint32_t);
169 : void node_mark_active_ap(void *, struct ieee80211_node *);
170 : void node_mark_active_adhoc(void *, struct ieee80211_node *);
171 : void pgt_watchdog(struct ifnet *);
172 : int pgt_init(struct ifnet *);
173 : void pgt_update_hw_from_sw(struct pgt_softc *, int, int);
174 : void pgt_hostap_handle_mlme(struct pgt_softc *, uint32_t,
175 : struct pgt_obj_mlme *);
176 : void pgt_update_sw_from_hw(struct pgt_softc *,
177 : struct pgt_async_trap *, struct mbuf *);
178 : int pgt_newstate(struct ieee80211com *, enum ieee80211_state, int);
179 : int pgt_drain_tx_queue(struct pgt_softc *, enum pgt_queue);
180 : int pgt_dma_alloc(struct pgt_softc *);
181 : int pgt_dma_alloc_queue(struct pgt_softc *sc, enum pgt_queue pq);
182 : void pgt_dma_free(struct pgt_softc *);
183 : void pgt_dma_free_queue(struct pgt_softc *sc, enum pgt_queue pq);
184 : void pgt_wakeup(struct pgt_softc *);
185 :
186 : void
187 0 : pgt_write_memory_barrier(struct pgt_softc *sc)
188 : {
189 0 : bus_space_barrier(sc->sc_iotag, sc->sc_iohandle, 0, 0,
190 : BUS_SPACE_BARRIER_WRITE);
191 0 : }
192 :
193 : u_int32_t
194 0 : pgt_read_4(struct pgt_softc *sc, uint16_t offset)
195 : {
196 0 : return (bus_space_read_4(sc->sc_iotag, sc->sc_iohandle, offset));
197 : }
198 :
199 : void
200 0 : pgt_write_4(struct pgt_softc *sc, uint16_t offset, uint32_t value)
201 : {
202 0 : bus_space_write_4(sc->sc_iotag, sc->sc_iohandle, offset, value);
203 0 : }
204 :
205 : /*
206 : * Write out 4 bytes and cause a PCI flush by reading back in on a
207 : * harmless register.
208 : */
209 : void
210 0 : pgt_write_4_flush(struct pgt_softc *sc, uint16_t offset, uint32_t value)
211 : {
212 0 : bus_space_write_4(sc->sc_iotag, sc->sc_iohandle, offset, value);
213 0 : (void)bus_space_read_4(sc->sc_iotag, sc->sc_iohandle, PGT_REG_INT_EN);
214 0 : }
215 :
216 : /*
217 : * Print the state of events in the queues from an interrupt or a trigger.
218 : */
219 : void
220 0 : pgt_debug_events(struct pgt_softc *sc, const char *when)
221 : {
222 : #define COUNT(i) \
223 : letoh32(sc->sc_cb->pcb_driver_curfrag[i]) - \
224 : letoh32(sc->sc_cb->pcb_device_curfrag[i])
225 0 : if (sc->sc_debug & SC_DEBUG_EVENTS)
226 : DPRINTF(("%s: ev%s: %u %u %u %u %u %u\n",
227 : sc->sc_dev.dv_xname, when, COUNT(0), COUNT(1), COUNT(2),
228 : COUNT(3), COUNT(4), COUNT(5)));
229 : #undef COUNT
230 0 : }
231 :
232 : uint32_t
233 0 : pgt_queue_frags_pending(struct pgt_softc *sc, enum pgt_queue pq)
234 : {
235 0 : return (letoh32(sc->sc_cb->pcb_driver_curfrag[pq]) -
236 0 : letoh32(sc->sc_cb->pcb_device_curfrag[pq]));
237 : }
238 :
239 : void
240 0 : pgt_reinit_rx_desc_frag(struct pgt_softc *sc, struct pgt_desc *pd)
241 : {
242 0 : pd->pd_fragp->pf_addr = htole32((uint32_t)pd->pd_dmaaddr);
243 0 : pd->pd_fragp->pf_size = htole16(PGT_FRAG_SIZE);
244 0 : pd->pd_fragp->pf_flags = 0;
245 :
246 0 : bus_dmamap_sync(sc->sc_dmat, pd->pd_dmam, 0, pd->pd_dmam->dm_mapsize,
247 : BUS_DMASYNC_POSTWRITE);
248 0 : }
249 :
250 : int
251 0 : pgt_load_tx_desc_frag(struct pgt_softc *sc, enum pgt_queue pq,
252 : struct pgt_desc *pd)
253 : {
254 : int error;
255 :
256 0 : error = bus_dmamap_load(sc->sc_dmat, pd->pd_dmam, pd->pd_mem,
257 : PGT_FRAG_SIZE, NULL, BUS_DMA_NOWAIT);
258 0 : if (error) {
259 : DPRINTF(("%s: unable to load %s tx DMA: %d\n",
260 : sc->sc_dev.dv_xname,
261 : pgt_queue_is_data(pq) ? "data" : "mgmt", error));
262 0 : return (error);
263 : }
264 0 : pd->pd_dmaaddr = pd->pd_dmam->dm_segs[0].ds_addr;
265 0 : pd->pd_fragp->pf_addr = htole32((uint32_t)pd->pd_dmaaddr);
266 0 : pd->pd_fragp->pf_size = htole16(PGT_FRAG_SIZE);
267 0 : pd->pd_fragp->pf_flags = htole16(0);
268 :
269 0 : bus_dmamap_sync(sc->sc_dmat, pd->pd_dmam, 0, pd->pd_dmam->dm_mapsize,
270 : BUS_DMASYNC_POSTWRITE);
271 :
272 0 : return (0);
273 0 : }
274 :
275 : void
276 0 : pgt_unload_tx_desc_frag(struct pgt_softc *sc, struct pgt_desc *pd)
277 : {
278 0 : bus_dmamap_unload(sc->sc_dmat, pd->pd_dmam);
279 0 : pd->pd_dmaaddr = 0;
280 0 : }
281 :
282 : int
283 0 : pgt_load_firmware(struct pgt_softc *sc)
284 : {
285 : int error, reg, dirreg, fwoff, ucodeoff, fwlen;
286 0 : uint8_t *ucode;
287 : uint32_t *uc;
288 0 : size_t size;
289 : char *name;
290 :
291 0 : if (sc->sc_flags & SC_ISL3877)
292 0 : name = "pgt-isl3877";
293 : else
294 : name = "pgt-isl3890"; /* includes isl3880 */
295 :
296 0 : error = loadfirmware(name, &ucode, &size);
297 :
298 0 : if (error != 0) {
299 : DPRINTF(("%s: error %d, could not read firmware %s\n",
300 : sc->sc_dev.dv_xname, error, name));
301 0 : return (EIO);
302 : }
303 :
304 0 : if (size & 3) {
305 : DPRINTF(("%s: bad firmware size %u\n",
306 : sc->sc_dev.dv_xname, size));
307 0 : free(ucode, M_DEVBUF, 0);
308 0 : return (EINVAL);
309 : }
310 :
311 0 : pgt_reboot(sc);
312 :
313 : fwoff = 0;
314 : ucodeoff = 0;
315 0 : uc = (uint32_t *)ucode;
316 : reg = PGT_FIRMWARE_INTERNAL_OFFSET;
317 0 : while (fwoff < size) {
318 0 : pgt_write_4_flush(sc, PGT_REG_DIR_MEM_BASE, reg);
319 :
320 0 : if ((size - fwoff) >= PGT_DIRECT_MEMORY_SIZE)
321 0 : fwlen = PGT_DIRECT_MEMORY_SIZE;
322 : else
323 0 : fwlen = size - fwoff;
324 :
325 : dirreg = PGT_DIRECT_MEMORY_OFFSET;
326 0 : while (fwlen > 4) {
327 0 : pgt_write_4(sc, dirreg, uc[ucodeoff]);
328 0 : fwoff += 4;
329 0 : dirreg += 4;
330 0 : reg += 4;
331 0 : fwlen -= 4;
332 0 : ucodeoff++;
333 : }
334 0 : pgt_write_4_flush(sc, dirreg, uc[ucodeoff]);
335 0 : fwoff += 4;
336 : dirreg += 4;
337 0 : reg += 4;
338 : fwlen -= 4;
339 0 : ucodeoff++;
340 : }
341 : DPRINTF(("%s: %d bytes microcode loaded from %s\n",
342 : sc->sc_dev.dv_xname, fwoff, name));
343 :
344 0 : reg = pgt_read_4(sc, PGT_REG_CTRL_STAT);
345 0 : reg &= ~(PGT_CTRL_STAT_RESET | PGT_CTRL_STAT_CLOCKRUN);
346 0 : reg |= PGT_CTRL_STAT_RAMBOOT;
347 0 : pgt_write_4_flush(sc, PGT_REG_CTRL_STAT, reg);
348 0 : pgt_write_memory_barrier(sc);
349 0 : DELAY(PGT_WRITEIO_DELAY);
350 :
351 0 : reg |= PGT_CTRL_STAT_RESET;
352 0 : pgt_write_4(sc, PGT_REG_CTRL_STAT, reg);
353 0 : pgt_write_memory_barrier(sc);
354 0 : DELAY(PGT_WRITEIO_DELAY);
355 :
356 : reg &= ~PGT_CTRL_STAT_RESET;
357 0 : pgt_write_4(sc, PGT_REG_CTRL_STAT, reg);
358 0 : pgt_write_memory_barrier(sc);
359 0 : DELAY(PGT_WRITEIO_DELAY);
360 :
361 0 : free(ucode, M_DEVBUF, 0);
362 :
363 0 : return (0);
364 0 : }
365 :
366 : void
367 0 : pgt_cleanup_queue(struct pgt_softc *sc, enum pgt_queue pq,
368 : struct pgt_frag *pqfrags)
369 : {
370 : struct pgt_desc *pd;
371 : unsigned int i;
372 :
373 0 : sc->sc_cb->pcb_device_curfrag[pq] = 0;
374 : i = 0;
375 : /* XXX why only freeq ??? */
376 0 : TAILQ_FOREACH(pd, &sc->sc_freeq[pq], pd_link) {
377 0 : pd->pd_fragnum = i;
378 0 : pd->pd_fragp = &pqfrags[i];
379 0 : if (pgt_queue_is_rx(pq))
380 0 : pgt_reinit_rx_desc_frag(sc, pd);
381 0 : i++;
382 : }
383 0 : sc->sc_freeq_count[pq] = i;
384 : /*
385 : * The ring buffer describes how many free buffers are available from
386 : * the host (for receive queues) or how many are pending (for
387 : * transmit queues).
388 : */
389 0 : if (pgt_queue_is_rx(pq))
390 0 : sc->sc_cb->pcb_driver_curfrag[pq] = htole32(i);
391 : else
392 0 : sc->sc_cb->pcb_driver_curfrag[pq] = 0;
393 0 : }
394 :
395 : /*
396 : * Turn off interrupts, reset the device (possibly loading firmware),
397 : * and put everything in a known state.
398 : */
399 : int
400 0 : pgt_reset(struct pgt_softc *sc)
401 : {
402 : int error;
403 :
404 : /* disable all interrupts */
405 0 : pgt_write_4_flush(sc, PGT_REG_INT_EN, 0);
406 0 : DELAY(PGT_WRITEIO_DELAY);
407 :
408 : /*
409 : * Set up the management receive queue, assuming there are no
410 : * requests in progress.
411 : */
412 0 : bus_dmamap_sync(sc->sc_dmat, sc->sc_cbdmam, 0,
413 : sc->sc_cbdmam->dm_mapsize,
414 : BUS_DMASYNC_POSTREAD | BUS_DMASYNC_PREWRITE);
415 0 : pgt_cleanup_queue(sc, PGT_QUEUE_DATA_LOW_RX,
416 0 : &sc->sc_cb->pcb_data_low_rx[0]);
417 0 : pgt_cleanup_queue(sc, PGT_QUEUE_DATA_LOW_TX,
418 0 : &sc->sc_cb->pcb_data_low_tx[0]);
419 0 : pgt_cleanup_queue(sc, PGT_QUEUE_DATA_HIGH_RX,
420 0 : &sc->sc_cb->pcb_data_high_rx[0]);
421 0 : pgt_cleanup_queue(sc, PGT_QUEUE_DATA_HIGH_TX,
422 0 : &sc->sc_cb->pcb_data_high_tx[0]);
423 0 : pgt_cleanup_queue(sc, PGT_QUEUE_MGMT_RX,
424 0 : &sc->sc_cb->pcb_mgmt_rx[0]);
425 0 : pgt_cleanup_queue(sc, PGT_QUEUE_MGMT_TX,
426 0 : &sc->sc_cb->pcb_mgmt_tx[0]);
427 0 : bus_dmamap_sync(sc->sc_dmat, sc->sc_cbdmam, 0,
428 : sc->sc_cbdmam->dm_mapsize,
429 : BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_PREREAD);
430 :
431 : /* load firmware */
432 0 : if (sc->sc_flags & SC_NEEDS_FIRMWARE) {
433 0 : error = pgt_load_firmware(sc);
434 0 : if (error) {
435 0 : printf("%s: firmware load failed\n",
436 0 : sc->sc_dev.dv_xname);
437 0 : return (error);
438 : }
439 0 : sc->sc_flags &= ~SC_NEEDS_FIRMWARE;
440 : DPRINTF(("%s: firmware loaded\n", sc->sc_dev.dv_xname));
441 0 : }
442 :
443 : /* upload the control block's DMA address */
444 0 : pgt_write_4_flush(sc, PGT_REG_CTRL_BLK_BASE,
445 0 : htole32((uint32_t)sc->sc_cbdmam->dm_segs[0].ds_addr));
446 0 : DELAY(PGT_WRITEIO_DELAY);
447 :
448 : /* send a reset event */
449 0 : pgt_write_4_flush(sc, PGT_REG_DEV_INT, PGT_DEV_INT_RESET);
450 0 : DELAY(PGT_WRITEIO_DELAY);
451 :
452 : /* await only the initialization interrupt */
453 0 : pgt_write_4_flush(sc, PGT_REG_INT_EN, PGT_INT_STAT_INIT);
454 0 : DELAY(PGT_WRITEIO_DELAY);
455 :
456 0 : return (0);
457 0 : }
458 :
459 : /*
460 : * If we're trying to reset and the device has seemingly not been detached,
461 : * we'll spend a minute seeing if we can't do the reset.
462 : */
463 : void
464 0 : pgt_stop(struct pgt_softc *sc, unsigned int flag)
465 : {
466 : struct ieee80211com *ic;
467 : unsigned int wokeup;
468 : int tryagain = 0;
469 :
470 0 : ic = &sc->sc_ic;
471 :
472 0 : ic->ic_if.if_flags &= ~IFF_RUNNING;
473 0 : sc->sc_flags |= SC_UNINITIALIZED;
474 0 : sc->sc_flags |= flag;
475 :
476 0 : pgt_drain_tx_queue(sc, PGT_QUEUE_DATA_LOW_TX);
477 0 : pgt_drain_tx_queue(sc, PGT_QUEUE_DATA_HIGH_TX);
478 0 : pgt_drain_tx_queue(sc, PGT_QUEUE_MGMT_TX);
479 :
480 : trying_again:
481 : /* disable all interrupts */
482 0 : pgt_write_4_flush(sc, PGT_REG_INT_EN, 0);
483 0 : DELAY(PGT_WRITEIO_DELAY);
484 :
485 : /* reboot card */
486 0 : pgt_reboot(sc);
487 :
488 0 : do {
489 : wokeup = 0;
490 : /*
491 : * We don't expect to be woken up, just to drop the lock
492 : * and time out. Only tx queues can have anything valid
493 : * on them outside of an interrupt.
494 : */
495 0 : while (!TAILQ_EMPTY(&sc->sc_mgmtinprog)) {
496 : struct pgt_mgmt_desc *pmd;
497 :
498 : pmd = TAILQ_FIRST(&sc->sc_mgmtinprog);
499 0 : TAILQ_REMOVE(&sc->sc_mgmtinprog, pmd, pmd_link);
500 0 : pmd->pmd_error = ENETRESET;
501 0 : wakeup_one(pmd);
502 0 : if (sc->sc_debug & SC_DEBUG_MGMT)
503 : DPRINTF(("%s: queue: mgmt %p <- %#x "
504 : "(drained)\n", sc->sc_dev.dv_xname,
505 : pmd, pmd->pmd_oid));
506 0 : wokeup++;
507 : }
508 0 : if (wokeup > 0) {
509 0 : if (flag == SC_NEEDS_RESET && sc->sc_flags & SC_DYING) {
510 0 : sc->sc_flags &= ~flag;
511 0 : return;
512 : }
513 : }
514 0 : } while (wokeup > 0);
515 :
516 0 : if (flag == SC_NEEDS_RESET) {
517 : int error;
518 :
519 : DPRINTF(("%s: resetting\n", sc->sc_dev.dv_xname));
520 0 : sc->sc_flags &= ~SC_POWERSAVE;
521 0 : sc->sc_flags |= SC_NEEDS_FIRMWARE;
522 0 : error = pgt_reset(sc);
523 0 : if (error == 0) {
524 0 : tsleep(&sc->sc_flags, 0, "pgtres", hz);
525 0 : if (sc->sc_flags & SC_UNINITIALIZED) {
526 0 : printf("%s: not responding\n",
527 0 : sc->sc_dev.dv_xname);
528 : /* Thud. It was probably removed. */
529 0 : if (tryagain)
530 0 : panic("pgt went for lunch"); /* XXX */
531 : tryagain = 1;
532 0 : } else {
533 : /* await all interrupts */
534 0 : pgt_write_4_flush(sc, PGT_REG_INT_EN,
535 : PGT_INT_STAT_SOURCES);
536 0 : DELAY(PGT_WRITEIO_DELAY);
537 0 : ic->ic_if.if_flags |= IFF_RUNNING;
538 : }
539 : }
540 :
541 0 : if (tryagain)
542 0 : goto trying_again;
543 :
544 0 : sc->sc_flags &= ~flag;
545 0 : if (ic->ic_if.if_flags & IFF_RUNNING)
546 0 : pgt_update_hw_from_sw(sc,
547 0 : ic->ic_state != IEEE80211_S_INIT,
548 0 : ic->ic_opmode != IEEE80211_M_MONITOR);
549 0 : }
550 :
551 0 : ic->ic_if.if_flags &= ~IFF_RUNNING;
552 0 : ifq_clr_oactive(&ic->ic_if.if_snd);
553 0 : ieee80211_new_state(&sc->sc_ic, IEEE80211_S_INIT, -1);
554 0 : }
555 :
556 : void
557 0 : pgt_attach(struct device *self)
558 : {
559 0 : struct pgt_softc *sc = (struct pgt_softc *)self;
560 : int error;
561 :
562 : /* debug flags */
563 : //sc->sc_debug |= SC_DEBUG_QUEUES; /* super verbose */
564 : //sc->sc_debug |= SC_DEBUG_MGMT;
565 0 : sc->sc_debug |= SC_DEBUG_UNEXPECTED;
566 : //sc->sc_debug |= SC_DEBUG_TRIGGER; /* verbose */
567 : //sc->sc_debug |= SC_DEBUG_EVENTS; /* super verbose */
568 : //sc->sc_debug |= SC_DEBUG_POWER;
569 0 : sc->sc_debug |= SC_DEBUG_TRAP;
570 0 : sc->sc_debug |= SC_DEBUG_LINK;
571 : //sc->sc_debug |= SC_DEBUG_RXANNEX;
572 : //sc->sc_debug |= SC_DEBUG_RXFRAG;
573 : //sc->sc_debug |= SC_DEBUG_RXETHER;
574 :
575 : /* enable card if possible */
576 0 : if (sc->sc_enable != NULL)
577 0 : (*sc->sc_enable)(sc);
578 :
579 0 : error = pgt_dma_alloc(sc);
580 0 : if (error)
581 0 : return;
582 :
583 0 : sc->sc_ic.ic_if.if_softc = sc;
584 0 : TAILQ_INIT(&sc->sc_mgmtinprog);
585 0 : TAILQ_INIT(&sc->sc_kthread.sck_traps);
586 0 : sc->sc_flags |= SC_NEEDS_FIRMWARE | SC_UNINITIALIZED;
587 0 : sc->sc_80211_ioc_auth = IEEE80211_AUTH_OPEN;
588 :
589 0 : error = pgt_reset(sc);
590 0 : if (error)
591 0 : return;
592 :
593 0 : tsleep(&sc->sc_flags, 0, "pgtres", hz);
594 0 : if (sc->sc_flags & SC_UNINITIALIZED) {
595 0 : printf("%s: not responding\n", sc->sc_dev.dv_xname);
596 0 : sc->sc_flags |= SC_NEEDS_FIRMWARE;
597 0 : return;
598 : } else {
599 : /* await all interrupts */
600 0 : pgt_write_4_flush(sc, PGT_REG_INT_EN, PGT_INT_STAT_SOURCES);
601 0 : DELAY(PGT_WRITEIO_DELAY);
602 : }
603 :
604 0 : error = pgt_net_attach(sc);
605 0 : if (error)
606 0 : return;
607 :
608 0 : if (kthread_create(pgt_per_device_kthread, sc, NULL,
609 0 : sc->sc_dev.dv_xname) != 0)
610 0 : return;
611 :
612 0 : ieee80211_new_state(&sc->sc_ic, IEEE80211_S_INIT, -1);
613 0 : }
614 :
615 : int
616 0 : pgt_detach(struct pgt_softc *sc)
617 : {
618 0 : if (sc->sc_flags & SC_NEEDS_FIRMWARE || sc->sc_flags & SC_UNINITIALIZED)
619 : /* device was not initialized correctly, so leave early */
620 : goto out;
621 :
622 : /* stop card */
623 0 : pgt_stop(sc, SC_DYING);
624 0 : pgt_reboot(sc);
625 :
626 0 : ieee80211_ifdetach(&sc->sc_ic.ic_if);
627 0 : if_detach(&sc->sc_ic.ic_if);
628 :
629 : out:
630 : /* disable card if possible */
631 0 : if (sc->sc_disable != NULL)
632 0 : (*sc->sc_disable)(sc);
633 :
634 0 : pgt_dma_free(sc);
635 :
636 0 : return (0);
637 : }
638 :
639 : void
640 0 : pgt_reboot(struct pgt_softc *sc)
641 : {
642 : uint32_t reg;
643 :
644 0 : reg = pgt_read_4(sc, PGT_REG_CTRL_STAT);
645 0 : reg &= ~(PGT_CTRL_STAT_RESET | PGT_CTRL_STAT_RAMBOOT);
646 0 : pgt_write_4(sc, PGT_REG_CTRL_STAT, reg);
647 0 : pgt_write_memory_barrier(sc);
648 0 : DELAY(PGT_WRITEIO_DELAY);
649 :
650 0 : reg |= PGT_CTRL_STAT_RESET;
651 0 : pgt_write_4(sc, PGT_REG_CTRL_STAT, reg);
652 0 : pgt_write_memory_barrier(sc);
653 0 : DELAY(PGT_WRITEIO_DELAY);
654 :
655 : reg &= ~PGT_CTRL_STAT_RESET;
656 0 : pgt_write_4(sc, PGT_REG_CTRL_STAT, reg);
657 0 : pgt_write_memory_barrier(sc);
658 0 : DELAY(PGT_RESET_DELAY);
659 0 : }
660 :
661 : void
662 0 : pgt_init_intr(struct pgt_softc *sc)
663 : {
664 0 : if ((sc->sc_flags & SC_UNINITIALIZED) == 0) {
665 0 : if (sc->sc_debug & SC_DEBUG_UNEXPECTED)
666 : DPRINTF(("%s: spurious initialization\n",
667 : sc->sc_dev.dv_xname));
668 0 : } else {
669 0 : sc->sc_flags &= ~SC_UNINITIALIZED;
670 0 : wakeup(&sc->sc_flags);
671 : }
672 0 : }
673 :
674 : /*
675 : * If called with a NULL last_nextpkt, only the mgmt queue will be checked
676 : * for new packets.
677 : */
678 : void
679 0 : pgt_update_intr(struct pgt_softc *sc, int hack)
680 : {
681 : /* priority order */
682 0 : enum pgt_queue pqs[PGT_QUEUE_COUNT] = {
683 : PGT_QUEUE_MGMT_TX, PGT_QUEUE_MGMT_RX,
684 : PGT_QUEUE_DATA_HIGH_TX, PGT_QUEUE_DATA_HIGH_RX,
685 : PGT_QUEUE_DATA_LOW_TX, PGT_QUEUE_DATA_LOW_RX
686 : };
687 : struct mbuf *m;
688 : uint32_t npend;
689 : unsigned int dirtycount;
690 : int i;
691 :
692 0 : bus_dmamap_sync(sc->sc_dmat, sc->sc_cbdmam, 0,
693 : sc->sc_cbdmam->dm_mapsize,
694 : BUS_DMASYNC_POSTREAD | BUS_DMASYNC_PREWRITE);
695 0 : pgt_debug_events(sc, "intr");
696 : /*
697 : * Check for completion of tx in their dirty queues.
698 : * Check completion of rx into their dirty queues.
699 : */
700 0 : for (i = 0; i < PGT_QUEUE_COUNT; i++) {
701 : size_t qdirty, qfree;
702 :
703 0 : qdirty = sc->sc_dirtyq_count[pqs[i]];
704 0 : qfree = sc->sc_freeq_count[pqs[i]];
705 : /*
706 : * We want the wrap-around here.
707 : */
708 0 : if (pgt_queue_is_rx(pqs[i])) {
709 : int data;
710 :
711 0 : data = pgt_queue_is_data(pqs[i]);
712 : #ifdef PGT_BUGGY_INTERRUPT_RECOVERY
713 : if (hack && data)
714 : continue;
715 : #endif
716 0 : npend = pgt_queue_frags_pending(sc, pqs[i]);
717 : /*
718 : * Receive queues clean up below, so qdirty must
719 : * always be 0.
720 : */
721 0 : if (npend > qfree) {
722 0 : if (sc->sc_debug & SC_DEBUG_UNEXPECTED)
723 : DPRINTF(("%s: rx queue [%u] "
724 : "overflowed by %u\n",
725 : sc->sc_dev.dv_xname, pqs[i],
726 : npend - qfree));
727 0 : sc->sc_flags |= SC_INTR_RESET;
728 0 : break;
729 : }
730 0 : while (qfree-- > npend)
731 0 : pgt_rxdone(sc, pqs[i]);
732 0 : } else {
733 0 : npend = pgt_queue_frags_pending(sc, pqs[i]);
734 0 : if (npend > qdirty) {
735 0 : if (sc->sc_debug & SC_DEBUG_UNEXPECTED)
736 : DPRINTF(("%s: tx queue [%u] "
737 : "underflowed by %u\n",
738 : sc->sc_dev.dv_xname, pqs[i],
739 : npend - qdirty));
740 0 : sc->sc_flags |= SC_INTR_RESET;
741 0 : break;
742 : }
743 : /*
744 : * If the free queue was empty, or the data transmit
745 : * queue just became empty, wake up any waiters.
746 : */
747 0 : if (qdirty > npend) {
748 0 : if (pgt_queue_is_data(pqs[i])) {
749 0 : sc->sc_ic.ic_if.if_timer = 0;
750 0 : ifq_clr_oactive(
751 0 : &sc->sc_ic.ic_if.if_snd);
752 0 : }
753 0 : while (qdirty-- > npend)
754 0 : pgt_txdone(sc, pqs[i]);
755 : }
756 : }
757 0 : }
758 :
759 : /*
760 : * This is the deferred completion for received management frames
761 : * and where we queue network frames for stack input.
762 : */
763 0 : dirtycount = sc->sc_dirtyq_count[PGT_QUEUE_MGMT_RX];
764 0 : while (!TAILQ_EMPTY(&sc->sc_dirtyq[PGT_QUEUE_MGMT_RX])) {
765 : struct pgt_mgmt_desc *pmd;
766 :
767 0 : pmd = TAILQ_FIRST(&sc->sc_mgmtinprog);
768 : /*
769 : * If there is no mgmt request in progress or the operation
770 : * returned is explicitly a trap, this pmd will essentially
771 : * be ignored.
772 : */
773 0 : pgt_mgmtrx_completion(sc, pmd);
774 : }
775 0 : sc->sc_cb->pcb_driver_curfrag[PGT_QUEUE_MGMT_RX] =
776 0 : htole32(dirtycount +
777 : letoh32(sc->sc_cb->pcb_driver_curfrag[PGT_QUEUE_MGMT_RX]));
778 :
779 0 : dirtycount = sc->sc_dirtyq_count[PGT_QUEUE_DATA_HIGH_RX];
780 0 : while (!TAILQ_EMPTY(&sc->sc_dirtyq[PGT_QUEUE_DATA_HIGH_RX])) {
781 0 : if ((m = pgt_datarx_completion(sc, PGT_QUEUE_DATA_HIGH_RX)))
782 0 : pgt_input_frames(sc, m);
783 : }
784 0 : sc->sc_cb->pcb_driver_curfrag[PGT_QUEUE_DATA_HIGH_RX] =
785 0 : htole32(dirtycount +
786 : letoh32(sc->sc_cb->pcb_driver_curfrag[PGT_QUEUE_DATA_HIGH_RX]));
787 :
788 0 : dirtycount = sc->sc_dirtyq_count[PGT_QUEUE_DATA_LOW_RX];
789 0 : while (!TAILQ_EMPTY(&sc->sc_dirtyq[PGT_QUEUE_DATA_LOW_RX])) {
790 0 : if ((m = pgt_datarx_completion(sc, PGT_QUEUE_DATA_LOW_RX)))
791 0 : pgt_input_frames(sc, m);
792 : }
793 0 : sc->sc_cb->pcb_driver_curfrag[PGT_QUEUE_DATA_LOW_RX] =
794 0 : htole32(dirtycount +
795 : letoh32(sc->sc_cb->pcb_driver_curfrag[PGT_QUEUE_DATA_LOW_RX]));
796 :
797 : /*
798 : * Write out what we've finished with.
799 : */
800 0 : bus_dmamap_sync(sc->sc_dmat, sc->sc_cbdmam, 0,
801 : sc->sc_cbdmam->dm_mapsize,
802 : BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_PREREAD);
803 0 : }
804 :
805 : struct mbuf *
806 0 : pgt_ieee80211_encap(struct pgt_softc *sc, struct ether_header *eh,
807 : struct mbuf *m, struct ieee80211_node **ni)
808 : {
809 : struct ieee80211com *ic;
810 : struct ieee80211_frame *frame;
811 : struct llc *snap;
812 :
813 0 : ic = &sc->sc_ic;
814 0 : if (ni != NULL && ic->ic_opmode == IEEE80211_M_MONITOR) {
815 0 : *ni = ieee80211_ref_node(ic->ic_bss);
816 0 : (*ni)->ni_inact = 0;
817 0 : return (m);
818 : }
819 :
820 0 : M_PREPEND(m, sizeof(*frame) + sizeof(*snap), M_DONTWAIT);
821 0 : if (m == NULL)
822 0 : return (m);
823 0 : if (m->m_len < sizeof(*frame) + sizeof(*snap)) {
824 0 : m = m_pullup(m, sizeof(*frame) + sizeof(*snap));
825 0 : if (m == NULL)
826 0 : return (m);
827 : }
828 0 : frame = mtod(m, struct ieee80211_frame *);
829 0 : snap = (struct llc *)&frame[1];
830 0 : if (ni != NULL) {
831 0 : if (ic->ic_opmode == IEEE80211_M_STA) {
832 0 : *ni = ieee80211_ref_node(ic->ic_bss);
833 0 : }
834 : #ifndef IEEE80211_STA_ONLY
835 : else {
836 0 : *ni = ieee80211_find_node(ic, eh->ether_shost);
837 : /*
838 : * Make up associations for ad-hoc mode. To support
839 : * ad-hoc WPA, we'll need to maintain a bounded
840 : * pool of ad-hoc stations.
841 : */
842 0 : if (*ni == NULL &&
843 0 : ic->ic_opmode != IEEE80211_M_HOSTAP) {
844 0 : *ni = ieee80211_dup_bss(ic, eh->ether_shost);
845 0 : if (*ni != NULL) {
846 0 : (*ni)->ni_associd = 1;
847 0 : ic->ic_newassoc(ic, *ni, 1);
848 0 : }
849 : }
850 0 : if (*ni == NULL) {
851 0 : m_freem(m);
852 0 : return (NULL);
853 : }
854 : }
855 : #endif
856 0 : (*ni)->ni_inact = 0;
857 0 : }
858 0 : snap->llc_dsap = snap->llc_ssap = LLC_SNAP_LSAP;
859 0 : snap->llc_control = LLC_UI;
860 0 : snap->llc_snap.org_code[0] = 0;
861 0 : snap->llc_snap.org_code[1] = 0;
862 0 : snap->llc_snap.org_code[2] = 0;
863 0 : snap->llc_snap.ether_type = eh->ether_type;
864 0 : frame->i_fc[0] = IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_DATA;
865 : /* Doesn't look like much of the 802.11 header is available. */
866 0 : *(uint16_t *)frame->i_dur = *(uint16_t *)frame->i_seq = 0;
867 : /*
868 : * Translate the addresses; WDS is not handled.
869 : */
870 0 : switch (ic->ic_opmode) {
871 : case IEEE80211_M_STA:
872 0 : frame->i_fc[1] = IEEE80211_FC1_DIR_FROMDS;
873 0 : IEEE80211_ADDR_COPY(frame->i_addr1, eh->ether_dhost);
874 0 : IEEE80211_ADDR_COPY(frame->i_addr2, ic->ic_bss->ni_bssid);
875 0 : IEEE80211_ADDR_COPY(frame->i_addr3, eh->ether_shost);
876 0 : break;
877 : #ifndef IEEE80211_STA_ONLY
878 : case IEEE80211_M_IBSS:
879 : case IEEE80211_M_AHDEMO:
880 0 : frame->i_fc[1] = IEEE80211_FC1_DIR_NODS;
881 0 : IEEE80211_ADDR_COPY(frame->i_addr1, eh->ether_dhost);
882 0 : IEEE80211_ADDR_COPY(frame->i_addr2, eh->ether_shost);
883 0 : IEEE80211_ADDR_COPY(frame->i_addr3, ic->ic_bss->ni_bssid);
884 0 : break;
885 : case IEEE80211_M_HOSTAP:
886 : /* HostAP forwarding defaults to being done on firmware. */
887 0 : frame->i_fc[1] = IEEE80211_FC1_DIR_TODS;
888 0 : IEEE80211_ADDR_COPY(frame->i_addr1, ic->ic_bss->ni_bssid);
889 0 : IEEE80211_ADDR_COPY(frame->i_addr2, eh->ether_shost);
890 0 : IEEE80211_ADDR_COPY(frame->i_addr3, eh->ether_dhost);
891 0 : break;
892 : #endif
893 : default:
894 : break;
895 : }
896 0 : return (m);
897 0 : }
898 :
899 : void
900 0 : pgt_input_frames(struct pgt_softc *sc, struct mbuf *m)
901 : {
902 0 : struct ether_header eh;
903 : struct ifnet *ifp;
904 : struct ieee80211_channel *chan;
905 0 : struct ieee80211_rxinfo rxi;
906 0 : struct ieee80211_node *ni;
907 : struct ieee80211com *ic;
908 : struct pgt_rx_annex *pra;
909 : struct pgt_rx_header *pha;
910 : struct mbuf *next;
911 : unsigned int n;
912 : uint32_t rstamp;
913 : uint8_t rssi;
914 :
915 0 : ic = &sc->sc_ic;
916 0 : ifp = &ic->ic_if;
917 0 : for (next = m; m != NULL; m = next) {
918 0 : next = m->m_nextpkt;
919 0 : m->m_nextpkt = NULL;
920 :
921 0 : if (ic->ic_opmode == IEEE80211_M_MONITOR) {
922 0 : if (m->m_len < sizeof(*pha)) {
923 0 : m = m_pullup(m, sizeof(*pha));
924 0 : if (m == NULL) {
925 0 : if (sc->sc_debug & SC_DEBUG_UNEXPECTED)
926 : DPRINTF(("%s: m_pullup "
927 : "failure\n",
928 : sc->sc_dev.dv_xname));
929 0 : ifp->if_ierrors++;
930 0 : continue;
931 : }
932 : }
933 0 : pha = mtod(m, struct pgt_rx_header *);
934 : pra = NULL;
935 0 : goto input;
936 : }
937 :
938 0 : if (m->m_len < sizeof(*pra)) {
939 0 : m = m_pullup(m, sizeof(*pra));
940 0 : if (m == NULL) {
941 0 : if (sc->sc_debug & SC_DEBUG_UNEXPECTED)
942 : DPRINTF(("%s: m_pullup failure\n",
943 : sc->sc_dev.dv_xname));
944 0 : ifp->if_ierrors++;
945 0 : continue;
946 : }
947 : }
948 0 : pra = mtod(m, struct pgt_rx_annex *);
949 0 : pha = &pra->pra_header;
950 0 : if (sc->sc_debug & SC_DEBUG_RXANNEX)
951 : DPRINTF(("%s: rx annex: ? %04x "
952 : "len %u clock %u flags %02x ? %02x rate %u ? %02x "
953 : "freq %u ? %04x rssi %u pad %02x%02x%02x\n",
954 : sc->sc_dev.dv_xname,
955 : letoh16(pha->pra_unknown0),
956 : letoh16(pha->pra_length),
957 : letoh32(pha->pra_clock), pha->pra_flags,
958 : pha->pra_unknown1, pha->pra_rate,
959 : pha->pra_unknown2, letoh32(pha->pra_frequency),
960 : pha->pra_unknown3, pha->pra_rssi,
961 : pha->pra_pad[0], pha->pra_pad[1], pha->pra_pad[2]));
962 0 : if (sc->sc_debug & SC_DEBUG_RXETHER)
963 : DPRINTF(("%s: rx ether: %s < %s 0x%04x\n",
964 : sc->sc_dev.dv_xname,
965 : ether_sprintf(pra->pra_ether_dhost),
966 : ether_sprintf(pra->pra_ether_shost),
967 : ntohs(pra->pra_ether_type)));
968 :
969 0 : memcpy(eh.ether_dhost, pra->pra_ether_dhost, ETHER_ADDR_LEN);
970 0 : memcpy(eh.ether_shost, pra->pra_ether_shost, ETHER_ADDR_LEN);
971 0 : eh.ether_type = pra->pra_ether_type;
972 :
973 : input:
974 : /*
975 : * This flag is set if e.g. packet could not be decrypted.
976 : */
977 0 : if (pha->pra_flags & PRA_FLAG_BAD) {
978 0 : ifp->if_ierrors++;
979 0 : m_freem(m);
980 0 : continue;
981 : }
982 :
983 : /*
984 : * After getting what we want, chop off the annex, then
985 : * turn into something that looks like it really was
986 : * 802.11.
987 : */
988 0 : rssi = pha->pra_rssi;
989 0 : rstamp = letoh32(pha->pra_clock);
990 0 : n = ieee80211_mhz2ieee(letoh32(pha->pra_frequency), 0);
991 0 : if (n <= IEEE80211_CHAN_MAX)
992 0 : chan = &ic->ic_channels[n];
993 : else
994 0 : chan = ic->ic_bss->ni_chan;
995 : /* Send to 802.3 listeners. */
996 0 : if (pra) {
997 0 : m_adj(m, sizeof(*pra));
998 0 : } else
999 0 : m_adj(m, sizeof(*pha));
1000 :
1001 0 : m = pgt_ieee80211_encap(sc, &eh, m, &ni);
1002 0 : if (m != NULL) {
1003 : #if NBPFILTER > 0
1004 0 : if (sc->sc_drvbpf != NULL) {
1005 0 : struct mbuf mb;
1006 0 : struct pgt_rx_radiotap_hdr *tap = &sc->sc_rxtap;
1007 :
1008 0 : tap->wr_flags = 0;
1009 0 : tap->wr_chan_freq = htole16(chan->ic_freq);
1010 0 : tap->wr_chan_flags = htole16(chan->ic_flags);
1011 0 : tap->wr_rssi = rssi;
1012 0 : tap->wr_max_rssi = ic->ic_max_rssi;
1013 :
1014 0 : mb.m_data = (caddr_t)tap;
1015 0 : mb.m_len = sc->sc_rxtap_len;
1016 0 : mb.m_next = m;
1017 0 : mb.m_nextpkt = NULL;
1018 0 : mb.m_type = 0;
1019 0 : mb.m_flags = 0;
1020 0 : bpf_mtap(sc->sc_drvbpf, &mb, BPF_DIRECTION_IN);
1021 0 : }
1022 : #endif
1023 0 : rxi.rxi_flags = 0;
1024 0 : ni->ni_rssi = rxi.rxi_rssi = rssi;
1025 0 : ni->ni_rstamp = rxi.rxi_tstamp = rstamp;
1026 0 : ieee80211_input(ifp, m, ni, &rxi);
1027 : /*
1028 : * The frame may have caused the node to be marked for
1029 : * reclamation (e.g. in response to a DEAUTH message)
1030 : * so use free_node here instead of unref_node.
1031 : */
1032 0 : if (ni == ic->ic_bss)
1033 0 : ieee80211_unref_node(&ni);
1034 : else
1035 0 : ieee80211_release_node(&sc->sc_ic, ni);
1036 : } else {
1037 0 : ifp->if_ierrors++;
1038 : }
1039 : }
1040 0 : }
1041 :
1042 : void
1043 0 : pgt_wakeup_intr(struct pgt_softc *sc)
1044 : {
1045 : int shouldupdate;
1046 : int i;
1047 :
1048 : shouldupdate = 0;
1049 : /* Check for any queues being empty before updating. */
1050 0 : bus_dmamap_sync(sc->sc_dmat, sc->sc_cbdmam, 0,
1051 : sc->sc_cbdmam->dm_mapsize,
1052 : BUS_DMASYNC_POSTREAD);
1053 0 : for (i = 0; !shouldupdate && i < PGT_QUEUE_COUNT; i++) {
1054 0 : if (pgt_queue_is_tx(i))
1055 0 : shouldupdate = pgt_queue_frags_pending(sc, i);
1056 : else
1057 0 : shouldupdate = pgt_queue_frags_pending(sc, i) <
1058 0 : sc->sc_freeq_count[i];
1059 : }
1060 0 : if (!TAILQ_EMPTY(&sc->sc_mgmtinprog))
1061 0 : shouldupdate = 1;
1062 0 : if (sc->sc_debug & SC_DEBUG_POWER)
1063 : DPRINTF(("%s: wakeup interrupt (update = %d)\n",
1064 : sc->sc_dev.dv_xname, shouldupdate));
1065 0 : sc->sc_flags &= ~SC_POWERSAVE;
1066 0 : if (shouldupdate) {
1067 0 : pgt_write_4_flush(sc, PGT_REG_DEV_INT, PGT_DEV_INT_UPDATE);
1068 0 : DELAY(PGT_WRITEIO_DELAY);
1069 0 : }
1070 0 : }
1071 :
1072 : void
1073 0 : pgt_sleep_intr(struct pgt_softc *sc)
1074 : {
1075 : int allowed;
1076 : int i;
1077 :
1078 : allowed = 1;
1079 : /* Check for any queues not being empty before allowing. */
1080 0 : bus_dmamap_sync(sc->sc_dmat, sc->sc_cbdmam, 0,
1081 : sc->sc_cbdmam->dm_mapsize,
1082 : BUS_DMASYNC_POSTREAD);
1083 0 : for (i = 0; allowed && i < PGT_QUEUE_COUNT; i++) {
1084 0 : if (pgt_queue_is_tx(i))
1085 0 : allowed = pgt_queue_frags_pending(sc, i) == 0;
1086 : else
1087 0 : allowed = pgt_queue_frags_pending(sc, i) >=
1088 0 : sc->sc_freeq_count[i];
1089 : }
1090 0 : if (!TAILQ_EMPTY(&sc->sc_mgmtinprog))
1091 0 : allowed = 0;
1092 0 : if (sc->sc_debug & SC_DEBUG_POWER)
1093 : DPRINTF(("%s: sleep interrupt (allowed = %d)\n",
1094 : sc->sc_dev.dv_xname, allowed));
1095 0 : if (allowed && sc->sc_ic.ic_flags & IEEE80211_F_PMGTON) {
1096 0 : sc->sc_flags |= SC_POWERSAVE;
1097 0 : pgt_write_4_flush(sc, PGT_REG_DEV_INT, PGT_DEV_INT_SLEEP);
1098 0 : DELAY(PGT_WRITEIO_DELAY);
1099 0 : }
1100 0 : }
1101 :
1102 : void
1103 0 : pgt_empty_traps(struct pgt_softc_kthread *sck)
1104 : {
1105 : struct pgt_async_trap *pa;
1106 : struct mbuf *m;
1107 :
1108 0 : while (!TAILQ_EMPTY(&sck->sck_traps)) {
1109 : pa = TAILQ_FIRST(&sck->sck_traps);
1110 0 : TAILQ_REMOVE(&sck->sck_traps, pa, pa_link);
1111 0 : m = pa->pa_mbuf;
1112 0 : m_freem(m);
1113 : }
1114 0 : }
1115 :
1116 : void
1117 0 : pgt_per_device_kthread(void *argp)
1118 : {
1119 : struct pgt_softc *sc;
1120 : struct pgt_softc_kthread *sck;
1121 : struct pgt_async_trap *pa;
1122 : struct mbuf *m;
1123 : int s;
1124 :
1125 0 : sc = argp;
1126 0 : sck = &sc->sc_kthread;
1127 0 : while (!sck->sck_exit) {
1128 0 : if (!sck->sck_update && !sck->sck_reset &&
1129 0 : TAILQ_EMPTY(&sck->sck_traps))
1130 0 : tsleep(&sc->sc_kthread, 0, "pgtkth", 0);
1131 0 : if (sck->sck_reset) {
1132 : DPRINTF(("%s: [thread] async reset\n",
1133 : sc->sc_dev.dv_xname));
1134 0 : sck->sck_reset = 0;
1135 0 : sck->sck_update = 0;
1136 0 : pgt_empty_traps(sck);
1137 0 : s = splnet();
1138 0 : pgt_stop(sc, SC_NEEDS_RESET);
1139 0 : splx(s);
1140 0 : } else if (!TAILQ_EMPTY(&sck->sck_traps)) {
1141 : DPRINTF(("%s: [thread] got a trap\n",
1142 : sc->sc_dev.dv_xname));
1143 : pa = TAILQ_FIRST(&sck->sck_traps);
1144 0 : TAILQ_REMOVE(&sck->sck_traps, pa, pa_link);
1145 0 : m = pa->pa_mbuf;
1146 0 : m_adj(m, sizeof(*pa));
1147 0 : pgt_update_sw_from_hw(sc, pa, m);
1148 0 : m_freem(m);
1149 0 : } else if (sck->sck_update) {
1150 0 : sck->sck_update = 0;
1151 0 : pgt_update_sw_from_hw(sc, NULL, NULL);
1152 0 : }
1153 : }
1154 0 : pgt_empty_traps(sck);
1155 0 : kthread_exit(0);
1156 : }
1157 :
1158 : void
1159 0 : pgt_async_reset(struct pgt_softc *sc)
1160 : {
1161 0 : if (sc->sc_flags & (SC_DYING | SC_NEEDS_RESET))
1162 : return;
1163 0 : sc->sc_kthread.sck_reset = 1;
1164 0 : wakeup(&sc->sc_kthread);
1165 0 : }
1166 :
1167 : void
1168 0 : pgt_async_update(struct pgt_softc *sc)
1169 : {
1170 0 : if (sc->sc_flags & SC_DYING)
1171 : return;
1172 0 : sc->sc_kthread.sck_update = 1;
1173 0 : wakeup(&sc->sc_kthread);
1174 0 : }
1175 :
1176 : int
1177 0 : pgt_intr(void *arg)
1178 : {
1179 : struct pgt_softc *sc;
1180 : struct ifnet *ifp;
1181 : u_int32_t reg;
1182 :
1183 0 : sc = arg;
1184 0 : ifp = &sc->sc_ic.ic_if;
1185 :
1186 : /*
1187 : * Here the Linux driver ands in the value of the INT_EN register,
1188 : * and masks off everything but the documented interrupt bits. Why?
1189 : *
1190 : * Unknown bit 0x4000 is set upon initialization, 0x8000000 some
1191 : * other times.
1192 : */
1193 0 : if (sc->sc_ic.ic_flags & IEEE80211_F_PMGTON &&
1194 0 : sc->sc_flags & SC_POWERSAVE) {
1195 : /*
1196 : * Don't try handling the interrupt in sleep mode.
1197 : */
1198 0 : reg = pgt_read_4(sc, PGT_REG_CTRL_STAT);
1199 0 : if (reg & PGT_CTRL_STAT_SLEEPMODE)
1200 0 : return (0);
1201 : }
1202 0 : reg = pgt_read_4(sc, PGT_REG_INT_STAT);
1203 0 : if (reg == 0)
1204 0 : return (0); /* This interrupt is not from us */
1205 :
1206 0 : pgt_write_4_flush(sc, PGT_REG_INT_ACK, reg);
1207 0 : if (reg & PGT_INT_STAT_INIT)
1208 0 : pgt_init_intr(sc);
1209 0 : if (reg & PGT_INT_STAT_UPDATE) {
1210 0 : pgt_update_intr(sc, 0);
1211 : /*
1212 : * If we got an update, it's not really asleep.
1213 : */
1214 0 : sc->sc_flags &= ~SC_POWERSAVE;
1215 : /*
1216 : * Pretend I have any idea what the documentation
1217 : * would say, and just give it a shot sending an
1218 : * "update" after acknowledging the interrupt
1219 : * bits and writing out the new control block.
1220 : */
1221 0 : pgt_write_4_flush(sc, PGT_REG_DEV_INT, PGT_DEV_INT_UPDATE);
1222 0 : DELAY(PGT_WRITEIO_DELAY);
1223 0 : }
1224 0 : if (reg & PGT_INT_STAT_SLEEP && !(reg & PGT_INT_STAT_WAKEUP))
1225 0 : pgt_sleep_intr(sc);
1226 0 : if (reg & PGT_INT_STAT_WAKEUP)
1227 0 : pgt_wakeup_intr(sc);
1228 :
1229 0 : if (sc->sc_flags & SC_INTR_RESET) {
1230 0 : sc->sc_flags &= ~SC_INTR_RESET;
1231 0 : pgt_async_reset(sc);
1232 0 : }
1233 :
1234 0 : if (reg & ~PGT_INT_STAT_SOURCES && sc->sc_debug & SC_DEBUG_UNEXPECTED) {
1235 : DPRINTF(("%s: unknown interrupt bits %#x (stat %#x)\n",
1236 : sc->sc_dev.dv_xname,
1237 : reg & ~PGT_INT_STAT_SOURCES,
1238 : pgt_read_4(sc, PGT_REG_CTRL_STAT)));
1239 : }
1240 :
1241 0 : if (!IFQ_IS_EMPTY(&ifp->if_snd))
1242 0 : pgt_start(ifp);
1243 :
1244 0 : return (1);
1245 0 : }
1246 :
1247 : void
1248 0 : pgt_txdone(struct pgt_softc *sc, enum pgt_queue pq)
1249 : {
1250 : struct pgt_desc *pd;
1251 :
1252 0 : pd = TAILQ_FIRST(&sc->sc_dirtyq[pq]);
1253 0 : TAILQ_REMOVE(&sc->sc_dirtyq[pq], pd, pd_link);
1254 0 : sc->sc_dirtyq_count[pq]--;
1255 0 : TAILQ_INSERT_TAIL(&sc->sc_freeq[pq], pd, pd_link);
1256 0 : sc->sc_freeq_count[pq]++;
1257 0 : bus_dmamap_sync(sc->sc_dmat, pd->pd_dmam, 0,
1258 : pd->pd_dmam->dm_mapsize,
1259 : BUS_DMASYNC_POSTREAD);
1260 : /* Management frames want completion information. */
1261 0 : if (sc->sc_debug & SC_DEBUG_QUEUES) {
1262 : DPRINTF(("%s: queue: tx %u <- [%u]\n",
1263 : sc->sc_dev.dv_xname, pd->pd_fragnum, pq));
1264 0 : if (sc->sc_debug & SC_DEBUG_MGMT && pgt_queue_is_mgmt(pq)) {
1265 : struct pgt_mgmt_frame *pmf;
1266 :
1267 0 : pmf = (struct pgt_mgmt_frame *)pd->pd_mem;
1268 : DPRINTF(("%s: queue: txmgmt %p <- "
1269 : "(ver %u, op %u, flags %#x)\n",
1270 : sc->sc_dev.dv_xname,
1271 : pd, pmf->pmf_version, pmf->pmf_operation,
1272 : pmf->pmf_flags));
1273 0 : }
1274 : }
1275 0 : pgt_unload_tx_desc_frag(sc, pd);
1276 0 : }
1277 :
1278 : void
1279 0 : pgt_rxdone(struct pgt_softc *sc, enum pgt_queue pq)
1280 : {
1281 : struct pgt_desc *pd;
1282 :
1283 0 : pd = TAILQ_FIRST(&sc->sc_freeq[pq]);
1284 0 : TAILQ_REMOVE(&sc->sc_freeq[pq], pd, pd_link);
1285 0 : sc->sc_freeq_count[pq]--;
1286 0 : TAILQ_INSERT_TAIL(&sc->sc_dirtyq[pq], pd, pd_link);
1287 0 : sc->sc_dirtyq_count[pq]++;
1288 0 : bus_dmamap_sync(sc->sc_dmat, pd->pd_dmam, 0,
1289 : pd->pd_dmam->dm_mapsize,
1290 : BUS_DMASYNC_POSTREAD);
1291 0 : if (sc->sc_debug & SC_DEBUG_QUEUES)
1292 : DPRINTF(("%s: queue: rx %u <- [%u]\n",
1293 : sc->sc_dev.dv_xname, pd->pd_fragnum, pq));
1294 0 : if (sc->sc_debug & SC_DEBUG_UNEXPECTED &&
1295 : pd->pd_fragp->pf_flags & ~htole16(PF_FLAG_MF))
1296 : DPRINTF(("%s: unknown flags on rx [%u]: %#x\n",
1297 : sc->sc_dev.dv_xname, pq, letoh16(pd->pd_fragp->pf_flags)));
1298 0 : }
1299 :
1300 : /*
1301 : * Traps are generally used for the firmware to report changes in state
1302 : * back to the host. Mostly this processes changes in link state, but
1303 : * it needs to also be used to initiate WPA and other authentication
1304 : * schemes in terms of client (station) or server (access point).
1305 : */
1306 : void
1307 0 : pgt_trap_received(struct pgt_softc *sc, uint32_t oid, void *trapdata,
1308 : size_t size)
1309 : {
1310 : struct pgt_async_trap *pa;
1311 : struct mbuf *m;
1312 : char *p;
1313 : size_t total;
1314 :
1315 0 : if (sc->sc_flags & SC_DYING)
1316 0 : return;
1317 :
1318 0 : total = sizeof(oid) + size + sizeof(struct pgt_async_trap);
1319 0 : if (total > MLEN) {
1320 0 : MGETHDR(m, M_DONTWAIT, MT_DATA);
1321 0 : if (m == NULL)
1322 0 : return;
1323 0 : MCLGET(m, M_DONTWAIT);
1324 0 : if (!(m->m_flags & M_EXT)) {
1325 0 : m_freem(m);
1326 : m = NULL;
1327 0 : }
1328 : } else
1329 0 : m = m_get(M_DONTWAIT, MT_DATA);
1330 :
1331 0 : if (m == NULL)
1332 0 : return;
1333 : else
1334 0 : m->m_len = total;
1335 :
1336 0 : pa = mtod(m, struct pgt_async_trap *);
1337 0 : p = mtod(m, char *) + sizeof(*pa);
1338 0 : *(uint32_t *)p = oid;
1339 0 : p += sizeof(uint32_t);
1340 0 : memcpy(p, trapdata, size);
1341 0 : pa->pa_mbuf = m;
1342 :
1343 0 : TAILQ_INSERT_TAIL(&sc->sc_kthread.sck_traps, pa, pa_link);
1344 0 : wakeup(&sc->sc_kthread);
1345 0 : }
1346 :
1347 : /*
1348 : * Process a completed management response (all requests should be
1349 : * responded to, quickly) or an event (trap).
1350 : */
1351 : void
1352 0 : pgt_mgmtrx_completion(struct pgt_softc *sc, struct pgt_mgmt_desc *pmd)
1353 : {
1354 : struct pgt_desc *pd;
1355 : struct pgt_mgmt_frame *pmf;
1356 : uint32_t oid, size;
1357 :
1358 0 : pd = TAILQ_FIRST(&sc->sc_dirtyq[PGT_QUEUE_MGMT_RX]);
1359 0 : TAILQ_REMOVE(&sc->sc_dirtyq[PGT_QUEUE_MGMT_RX], pd, pd_link);
1360 0 : sc->sc_dirtyq_count[PGT_QUEUE_MGMT_RX]--;
1361 0 : TAILQ_INSERT_TAIL(&sc->sc_freeq[PGT_QUEUE_MGMT_RX],
1362 : pd, pd_link);
1363 0 : sc->sc_freeq_count[PGT_QUEUE_MGMT_RX]++;
1364 0 : if (letoh16(pd->pd_fragp->pf_size) < sizeof(*pmf)) {
1365 0 : if (sc->sc_debug & SC_DEBUG_UNEXPECTED)
1366 : DPRINTF(("%s: mgmt desc too small: %u\n",
1367 : sc->sc_dev.dv_xname,
1368 : letoh16(pd->pd_fragp->pf_size)));
1369 0 : goto out_nopmd;
1370 : }
1371 0 : pmf = (struct pgt_mgmt_frame *)pd->pd_mem;
1372 0 : if (pmf->pmf_version != PMF_VER) {
1373 0 : if (sc->sc_debug & SC_DEBUG_UNEXPECTED)
1374 : DPRINTF(("%s: unknown mgmt version %u\n",
1375 : sc->sc_dev.dv_xname, pmf->pmf_version));
1376 0 : goto out_nopmd;
1377 : }
1378 0 : if (pmf->pmf_device != PMF_DEV) {
1379 0 : if (sc->sc_debug & SC_DEBUG_UNEXPECTED)
1380 : DPRINTF(("%s: unknown mgmt dev %u\n",
1381 : sc->sc_dev.dv_xname, pmf->pmf_device));
1382 0 : goto out;
1383 : }
1384 0 : if (pmf->pmf_flags & ~PMF_FLAG_VALID) {
1385 0 : if (sc->sc_debug & SC_DEBUG_UNEXPECTED)
1386 : DPRINTF(("%s: unknown mgmt flags %x\n",
1387 : sc->sc_dev.dv_xname,
1388 : pmf->pmf_flags & ~PMF_FLAG_VALID));
1389 0 : goto out;
1390 : }
1391 0 : if (pmf->pmf_flags & PMF_FLAG_LE) {
1392 0 : oid = letoh32(pmf->pmf_oid);
1393 0 : size = letoh32(pmf->pmf_size);
1394 0 : } else {
1395 0 : oid = betoh32(pmf->pmf_oid);
1396 0 : size = betoh32(pmf->pmf_size);
1397 : }
1398 0 : if (pmf->pmf_operation == PMF_OP_TRAP) {
1399 : pmd = NULL; /* ignored */
1400 : DPRINTF(("%s: mgmt trap received (op %u, oid %#x, len %u)\n",
1401 : sc->sc_dev.dv_xname,
1402 : pmf->pmf_operation, oid, size));
1403 0 : pgt_trap_received(sc, oid, (char *)pmf + sizeof(*pmf),
1404 0 : min(size, PGT_FRAG_SIZE - sizeof(*pmf)));
1405 0 : goto out_nopmd;
1406 : }
1407 0 : if (pmd == NULL) {
1408 0 : if (sc->sc_debug & (SC_DEBUG_UNEXPECTED | SC_DEBUG_MGMT))
1409 : DPRINTF(("%s: spurious mgmt received "
1410 : "(op %u, oid %#x, len %u)\n", sc->sc_dev.dv_xname,
1411 : pmf->pmf_operation, oid, size));
1412 0 : goto out_nopmd;
1413 : }
1414 0 : switch (pmf->pmf_operation) {
1415 : case PMF_OP_RESPONSE:
1416 0 : pmd->pmd_error = 0;
1417 : break;
1418 : case PMF_OP_ERROR:
1419 0 : pmd->pmd_error = EPERM;
1420 0 : goto out;
1421 : default:
1422 0 : if (sc->sc_debug & SC_DEBUG_UNEXPECTED)
1423 : DPRINTF(("%s: unknown mgmt op %u\n",
1424 : sc->sc_dev.dv_xname, pmf->pmf_operation));
1425 0 : pmd->pmd_error = EIO;
1426 0 : goto out;
1427 : }
1428 0 : if (oid != pmd->pmd_oid) {
1429 0 : if (sc->sc_debug & SC_DEBUG_UNEXPECTED)
1430 : DPRINTF(("%s: mgmt oid changed from %#x -> %#x\n",
1431 : sc->sc_dev.dv_xname, pmd->pmd_oid, oid));
1432 0 : pmd->pmd_oid = oid;
1433 0 : }
1434 0 : if (pmd->pmd_recvbuf != NULL) {
1435 0 : if (size > PGT_FRAG_SIZE) {
1436 0 : if (sc->sc_debug & SC_DEBUG_UNEXPECTED)
1437 : DPRINTF(("%s: mgmt oid %#x has bad size %u\n",
1438 : sc->sc_dev.dv_xname, oid, size));
1439 0 : pmd->pmd_error = EIO;
1440 0 : goto out;
1441 : }
1442 0 : if (size > pmd->pmd_len)
1443 0 : pmd->pmd_error = ENOMEM;
1444 : else
1445 0 : memcpy(pmd->pmd_recvbuf, (char *)pmf + sizeof(*pmf),
1446 : size);
1447 0 : pmd->pmd_len = size;
1448 0 : }
1449 :
1450 : out:
1451 0 : TAILQ_REMOVE(&sc->sc_mgmtinprog, pmd, pmd_link);
1452 0 : wakeup_one(pmd);
1453 0 : if (sc->sc_debug & SC_DEBUG_MGMT)
1454 : DPRINTF(("%s: queue: mgmt %p <- (op %u, oid %#x, len %u)\n",
1455 : sc->sc_dev.dv_xname, pmd, pmf->pmf_operation,
1456 : pmd->pmd_oid, pmd->pmd_len));
1457 : out_nopmd:
1458 0 : pgt_reinit_rx_desc_frag(sc, pd);
1459 0 : }
1460 :
1461 : /*
1462 : * Queue packets for reception and defragmentation. I don't know now
1463 : * whether the rx queue being full enough to start, but not finish,
1464 : * queueing a fragmented packet, can happen.
1465 : */
1466 : struct mbuf *
1467 0 : pgt_datarx_completion(struct pgt_softc *sc, enum pgt_queue pq)
1468 : {
1469 : struct ifnet *ifp;
1470 : struct pgt_desc *pd;
1471 0 : struct mbuf *top, **mp, *m;
1472 : size_t datalen;
1473 : uint16_t morefrags, dataoff;
1474 : int tlen = 0;
1475 :
1476 0 : ifp = &sc->sc_ic.ic_if;
1477 : m = NULL;
1478 0 : top = NULL;
1479 : mp = ⊤
1480 :
1481 0 : while ((pd = TAILQ_FIRST(&sc->sc_dirtyq[pq])) != NULL) {
1482 0 : TAILQ_REMOVE(&sc->sc_dirtyq[pq], pd, pd_link);
1483 0 : sc->sc_dirtyq_count[pq]--;
1484 0 : datalen = letoh16(pd->pd_fragp->pf_size);
1485 0 : dataoff = letoh32(pd->pd_fragp->pf_addr) - pd->pd_dmaaddr;
1486 0 : morefrags = pd->pd_fragp->pf_flags & htole16(PF_FLAG_MF);
1487 :
1488 0 : if (sc->sc_debug & SC_DEBUG_RXFRAG)
1489 : DPRINTF(("%s: rx frag: len %u memoff %u flags %x\n",
1490 : sc->sc_dev.dv_xname, datalen, dataoff,
1491 : pd->pd_fragp->pf_flags));
1492 :
1493 : /* Add the (two+?) bytes for the header. */
1494 0 : if (datalen + dataoff > PGT_FRAG_SIZE) {
1495 0 : if (sc->sc_debug & SC_DEBUG_UNEXPECTED)
1496 : DPRINTF(("%s data rx too big: %u\n",
1497 : sc->sc_dev.dv_xname, datalen));
1498 0 : goto fail;
1499 : }
1500 :
1501 0 : if (m == NULL)
1502 0 : MGETHDR(m, M_DONTWAIT, MT_DATA);
1503 : else
1504 0 : m = m_get(M_DONTWAIT, MT_DATA);
1505 :
1506 0 : if (m == NULL)
1507 : goto fail;
1508 0 : if (datalen > MHLEN) {
1509 0 : MCLGET(m, M_DONTWAIT);
1510 0 : if (!(m->m_flags & M_EXT)) {
1511 0 : m_free(m);
1512 0 : goto fail;
1513 : }
1514 : }
1515 0 : bcopy(pd->pd_mem + dataoff, mtod(m, char *), datalen);
1516 0 : m->m_len = datalen;
1517 0 : tlen += datalen;
1518 :
1519 0 : *mp = m;
1520 0 : mp = &m->m_next;
1521 :
1522 0 : TAILQ_INSERT_TAIL(&sc->sc_freeq[pq], pd, pd_link);
1523 0 : sc->sc_freeq_count[pq]++;
1524 0 : pgt_reinit_rx_desc_frag(sc, pd);
1525 :
1526 0 : if (!morefrags)
1527 : break;
1528 : }
1529 :
1530 0 : if (top) {
1531 0 : top->m_pkthdr.len = tlen;
1532 0 : }
1533 0 : return (top);
1534 :
1535 : fail:
1536 0 : TAILQ_INSERT_TAIL(&sc->sc_freeq[pq], pd, pd_link);
1537 0 : sc->sc_freeq_count[pq]++;
1538 0 : pgt_reinit_rx_desc_frag(sc, pd);
1539 :
1540 0 : ifp->if_ierrors++;
1541 0 : m_freem(top);
1542 0 : return (NULL);
1543 0 : }
1544 :
1545 : int
1546 0 : pgt_oid_get(struct pgt_softc *sc, enum pgt_oid oid,
1547 : void *arg, size_t arglen)
1548 : {
1549 0 : struct pgt_mgmt_desc pmd;
1550 : int error;
1551 :
1552 0 : bzero(&pmd, sizeof(pmd));
1553 0 : pmd.pmd_recvbuf = arg;
1554 0 : pmd.pmd_len = arglen;
1555 0 : pmd.pmd_oid = oid;
1556 :
1557 0 : error = pgt_mgmt_request(sc, &pmd);
1558 0 : if (error == 0)
1559 0 : error = pmd.pmd_error;
1560 0 : if (error != 0 && error != EPERM && sc->sc_debug & SC_DEBUG_UNEXPECTED)
1561 : DPRINTF(("%s: failure getting oid %#x: %d\n",
1562 : sc->sc_dev.dv_xname, oid, error));
1563 :
1564 0 : return (error);
1565 0 : }
1566 :
1567 : int
1568 0 : pgt_oid_retrieve(struct pgt_softc *sc, enum pgt_oid oid,
1569 : void *arg, size_t arglen)
1570 : {
1571 0 : struct pgt_mgmt_desc pmd;
1572 : int error;
1573 :
1574 0 : bzero(&pmd, sizeof(pmd));
1575 0 : pmd.pmd_sendbuf = arg;
1576 0 : pmd.pmd_recvbuf = arg;
1577 0 : pmd.pmd_len = arglen;
1578 0 : pmd.pmd_oid = oid;
1579 :
1580 0 : error = pgt_mgmt_request(sc, &pmd);
1581 0 : if (error == 0)
1582 0 : error = pmd.pmd_error;
1583 0 : if (error != 0 && error != EPERM && sc->sc_debug & SC_DEBUG_UNEXPECTED)
1584 : DPRINTF(("%s: failure retrieving oid %#x: %d\n",
1585 : sc->sc_dev.dv_xname, oid, error));
1586 :
1587 0 : return (error);
1588 0 : }
1589 :
1590 : int
1591 0 : pgt_oid_set(struct pgt_softc *sc, enum pgt_oid oid,
1592 : const void *arg, size_t arglen)
1593 : {
1594 0 : struct pgt_mgmt_desc pmd;
1595 : int error;
1596 :
1597 0 : bzero(&pmd, sizeof(pmd));
1598 0 : pmd.pmd_sendbuf = arg;
1599 0 : pmd.pmd_len = arglen;
1600 0 : pmd.pmd_oid = oid;
1601 :
1602 0 : error = pgt_mgmt_request(sc, &pmd);
1603 0 : if (error == 0)
1604 0 : error = pmd.pmd_error;
1605 0 : if (error != 0 && error != EPERM && sc->sc_debug & SC_DEBUG_UNEXPECTED)
1606 : DPRINTF(("%s: failure setting oid %#x: %d\n",
1607 : sc->sc_dev.dv_xname, oid, error));
1608 :
1609 0 : return (error);
1610 0 : }
1611 :
1612 : void
1613 0 : pgt_state_dump(struct pgt_softc *sc)
1614 : {
1615 0 : printf("%s: state dump: control 0x%08x interrupt 0x%08x\n",
1616 0 : sc->sc_dev.dv_xname,
1617 0 : pgt_read_4(sc, PGT_REG_CTRL_STAT),
1618 0 : pgt_read_4(sc, PGT_REG_INT_STAT));
1619 :
1620 0 : printf("%s: state dump: driver curfrag[]\n",
1621 : sc->sc_dev.dv_xname);
1622 :
1623 0 : printf("%s: 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
1624 : sc->sc_dev.dv_xname,
1625 0 : letoh32(sc->sc_cb->pcb_driver_curfrag[0]),
1626 0 : letoh32(sc->sc_cb->pcb_driver_curfrag[1]),
1627 0 : letoh32(sc->sc_cb->pcb_driver_curfrag[2]),
1628 0 : letoh32(sc->sc_cb->pcb_driver_curfrag[3]),
1629 0 : letoh32(sc->sc_cb->pcb_driver_curfrag[4]),
1630 0 : letoh32(sc->sc_cb->pcb_driver_curfrag[5]));
1631 :
1632 0 : printf("%s: state dump: device curfrag[]\n",
1633 : sc->sc_dev.dv_xname);
1634 :
1635 0 : printf("%s: 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
1636 : sc->sc_dev.dv_xname,
1637 0 : letoh32(sc->sc_cb->pcb_device_curfrag[0]),
1638 0 : letoh32(sc->sc_cb->pcb_device_curfrag[1]),
1639 0 : letoh32(sc->sc_cb->pcb_device_curfrag[2]),
1640 0 : letoh32(sc->sc_cb->pcb_device_curfrag[3]),
1641 0 : letoh32(sc->sc_cb->pcb_device_curfrag[4]),
1642 0 : letoh32(sc->sc_cb->pcb_device_curfrag[5]));
1643 0 : }
1644 :
1645 : int
1646 0 : pgt_mgmt_request(struct pgt_softc *sc, struct pgt_mgmt_desc *pmd)
1647 : {
1648 : struct pgt_desc *pd;
1649 : struct pgt_mgmt_frame *pmf;
1650 : int error, i;
1651 :
1652 0 : if (sc->sc_flags & (SC_DYING | SC_NEEDS_RESET))
1653 0 : return (EIO);
1654 0 : if (pmd->pmd_len > PGT_FRAG_SIZE - sizeof(*pmf))
1655 0 : return (ENOMEM);
1656 0 : pd = TAILQ_FIRST(&sc->sc_freeq[PGT_QUEUE_MGMT_TX]);
1657 0 : if (pd == NULL)
1658 0 : return (ENOMEM);
1659 0 : error = pgt_load_tx_desc_frag(sc, PGT_QUEUE_MGMT_TX, pd);
1660 0 : if (error)
1661 0 : return (error);
1662 0 : pmf = (struct pgt_mgmt_frame *)pd->pd_mem;
1663 0 : pmf->pmf_version = PMF_VER;
1664 : /* "get" and "retrieve" operations look the same */
1665 0 : if (pmd->pmd_recvbuf != NULL)
1666 0 : pmf->pmf_operation = PMF_OP_GET;
1667 : else
1668 0 : pmf->pmf_operation = PMF_OP_SET;
1669 0 : pmf->pmf_oid = htobe32(pmd->pmd_oid);
1670 0 : pmf->pmf_device = PMF_DEV;
1671 0 : pmf->pmf_flags = 0;
1672 0 : pmf->pmf_size = htobe32(pmd->pmd_len);
1673 : /* "set" and "retrieve" operations both send data */
1674 0 : if (pmd->pmd_sendbuf != NULL)
1675 0 : memcpy(pmf + 1, pmd->pmd_sendbuf, pmd->pmd_len);
1676 : else
1677 0 : bzero(pmf + 1, pmd->pmd_len);
1678 0 : pmd->pmd_error = EINPROGRESS;
1679 0 : TAILQ_INSERT_TAIL(&sc->sc_mgmtinprog, pmd, pmd_link);
1680 0 : if (sc->sc_debug & SC_DEBUG_MGMT)
1681 : DPRINTF(("%s: queue: mgmt %p -> (op %u, oid %#x, len %u)\n",
1682 : sc->sc_dev.dv_xname,
1683 : pmd, pmf->pmf_operation,
1684 : pmd->pmd_oid, pmd->pmd_len));
1685 0 : pgt_desc_transmit(sc, PGT_QUEUE_MGMT_TX, pd,
1686 0 : sizeof(*pmf) + pmd->pmd_len, 0);
1687 : /*
1688 : * Try for one second, triggering 10 times.
1689 : *
1690 : * Do our best to work around seemingly buggy CardBus controllers
1691 : * on Soekris 4521 that fail to get interrupts with alarming
1692 : * regularity: run as if an interrupt occurred and service every
1693 : * queue except for mbuf reception.
1694 : */
1695 : i = 0;
1696 0 : do {
1697 0 : if (tsleep(pmd, 0, "pgtmgm", hz / 10) != EWOULDBLOCK)
1698 : break;
1699 0 : if (pmd->pmd_error != EINPROGRESS)
1700 : break;
1701 0 : if (sc->sc_flags & (SC_DYING | SC_NEEDS_RESET)) {
1702 0 : pmd->pmd_error = EIO;
1703 0 : TAILQ_REMOVE(&sc->sc_mgmtinprog, pmd, pmd_link);
1704 0 : break;
1705 : }
1706 0 : if (i != 9)
1707 0 : pgt_maybe_trigger(sc, PGT_QUEUE_MGMT_RX);
1708 : #ifdef PGT_BUGGY_INTERRUPT_RECOVERY
1709 : pgt_update_intr(sc, 0);
1710 : #endif
1711 0 : } while (i++ < 10);
1712 :
1713 0 : if (pmd->pmd_error == EINPROGRESS) {
1714 0 : printf("%s: timeout waiting for management "
1715 : "packet response to %#x\n",
1716 0 : sc->sc_dev.dv_xname, pmd->pmd_oid);
1717 0 : TAILQ_REMOVE(&sc->sc_mgmtinprog, pmd, pmd_link);
1718 0 : if (sc->sc_debug & SC_DEBUG_UNEXPECTED)
1719 0 : pgt_state_dump(sc);
1720 0 : pgt_async_reset(sc);
1721 : error = ETIMEDOUT;
1722 0 : } else
1723 : error = 0;
1724 :
1725 0 : return (error);
1726 0 : }
1727 :
1728 : void
1729 0 : pgt_desc_transmit(struct pgt_softc *sc, enum pgt_queue pq, struct pgt_desc *pd,
1730 : uint16_t len, int morecoming)
1731 : {
1732 0 : TAILQ_REMOVE(&sc->sc_freeq[pq], pd, pd_link);
1733 0 : sc->sc_freeq_count[pq]--;
1734 0 : TAILQ_INSERT_TAIL(&sc->sc_dirtyq[pq], pd, pd_link);
1735 0 : sc->sc_dirtyq_count[pq]++;
1736 0 : if (sc->sc_debug & SC_DEBUG_QUEUES)
1737 : DPRINTF(("%s: queue: tx %u -> [%u]\n", sc->sc_dev.dv_xname,
1738 : pd->pd_fragnum, pq));
1739 0 : bus_dmamap_sync(sc->sc_dmat, sc->sc_cbdmam, 0,
1740 : sc->sc_cbdmam->dm_mapsize,
1741 : BUS_DMASYNC_POSTREAD | BUS_DMASYNC_PREWRITE);
1742 0 : if (morecoming)
1743 0 : pd->pd_fragp->pf_flags |= htole16(PF_FLAG_MF);
1744 0 : pd->pd_fragp->pf_size = htole16(len);
1745 0 : bus_dmamap_sync(sc->sc_dmat, pd->pd_dmam, 0,
1746 : pd->pd_dmam->dm_mapsize,
1747 : BUS_DMASYNC_POSTWRITE);
1748 0 : sc->sc_cb->pcb_driver_curfrag[pq] =
1749 0 : htole32(letoh32(sc->sc_cb->pcb_driver_curfrag[pq]) + 1);
1750 0 : bus_dmamap_sync(sc->sc_dmat, sc->sc_cbdmam, 0,
1751 : sc->sc_cbdmam->dm_mapsize,
1752 : BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_PREREAD);
1753 0 : if (!morecoming)
1754 0 : pgt_maybe_trigger(sc, pq);
1755 0 : }
1756 :
1757 : void
1758 0 : pgt_maybe_trigger(struct pgt_softc *sc, enum pgt_queue pq)
1759 : {
1760 : unsigned int tries = 1000000 / PGT_WRITEIO_DELAY; /* one second */
1761 : uint32_t reg;
1762 :
1763 0 : if (sc->sc_debug & SC_DEBUG_TRIGGER)
1764 : DPRINTF(("%s: triggered by queue [%u]\n",
1765 : sc->sc_dev.dv_xname, pq));
1766 0 : pgt_debug_events(sc, "trig");
1767 0 : if (sc->sc_flags & SC_POWERSAVE) {
1768 : /* Magic values ahoy? */
1769 0 : if (pgt_read_4(sc, PGT_REG_INT_STAT) == 0xabadface) {
1770 0 : do {
1771 0 : reg = pgt_read_4(sc, PGT_REG_CTRL_STAT);
1772 0 : if (!(reg & PGT_CTRL_STAT_SLEEPMODE))
1773 0 : DELAY(PGT_WRITEIO_DELAY);
1774 0 : } while (tries-- != 0);
1775 0 : if (!(reg & PGT_CTRL_STAT_SLEEPMODE)) {
1776 0 : if (sc->sc_debug & SC_DEBUG_UNEXPECTED)
1777 : DPRINTF(("%s: timeout triggering from "
1778 : "sleep mode\n",
1779 : sc->sc_dev.dv_xname));
1780 0 : pgt_async_reset(sc);
1781 0 : return;
1782 : }
1783 : }
1784 0 : pgt_write_4_flush(sc, PGT_REG_DEV_INT,
1785 : PGT_DEV_INT_WAKEUP);
1786 0 : DELAY(PGT_WRITEIO_DELAY);
1787 : /* read the status back in */
1788 0 : (void)pgt_read_4(sc, PGT_REG_CTRL_STAT);
1789 0 : DELAY(PGT_WRITEIO_DELAY);
1790 0 : } else {
1791 0 : pgt_write_4_flush(sc, PGT_REG_DEV_INT, PGT_DEV_INT_UPDATE);
1792 0 : DELAY(PGT_WRITEIO_DELAY);
1793 : }
1794 0 : }
1795 :
1796 : struct ieee80211_node *
1797 0 : pgt_ieee80211_node_alloc(struct ieee80211com *ic)
1798 : {
1799 : struct pgt_ieee80211_node *pin;
1800 :
1801 0 : pin = malloc(sizeof(*pin), M_DEVBUF, M_NOWAIT | M_ZERO);
1802 0 : if (pin != NULL) {
1803 0 : pin->pin_dot1x_auth = PIN_DOT1X_UNAUTHORIZED;
1804 0 : }
1805 0 : return (struct ieee80211_node *)pin;
1806 : }
1807 :
1808 : void
1809 0 : pgt_ieee80211_newassoc(struct ieee80211com *ic, struct ieee80211_node *ni,
1810 : int reallynew)
1811 : {
1812 0 : ieee80211_ref_node(ni);
1813 0 : }
1814 :
1815 : void
1816 0 : pgt_ieee80211_node_free(struct ieee80211com *ic, struct ieee80211_node *ni)
1817 : {
1818 : struct pgt_ieee80211_node *pin;
1819 :
1820 0 : pin = (struct pgt_ieee80211_node *)ni;
1821 0 : free(pin, M_DEVBUF, 0);
1822 0 : }
1823 :
1824 : void
1825 0 : pgt_ieee80211_node_copy(struct ieee80211com *ic, struct ieee80211_node *dst,
1826 : const struct ieee80211_node *src)
1827 : {
1828 : const struct pgt_ieee80211_node *psrc;
1829 : struct pgt_ieee80211_node *pdst;
1830 :
1831 0 : psrc = (const struct pgt_ieee80211_node *)src;
1832 0 : pdst = (struct pgt_ieee80211_node *)dst;
1833 0 : bcopy(psrc, pdst, sizeof(*psrc));
1834 0 : }
1835 :
1836 : int
1837 0 : pgt_ieee80211_send_mgmt(struct ieee80211com *ic, struct ieee80211_node *ni,
1838 : int type, int arg1, int arg2)
1839 : {
1840 0 : return (EOPNOTSUPP);
1841 : }
1842 :
1843 : int
1844 0 : pgt_net_attach(struct pgt_softc *sc)
1845 : {
1846 0 : struct ieee80211com *ic = &sc->sc_ic;
1847 0 : struct ifnet *ifp = &ic->ic_if;
1848 : struct ieee80211_rateset *rs;
1849 0 : uint8_t rates[IEEE80211_RATE_MAXSIZE];
1850 0 : struct pgt_obj_buffer psbuffer;
1851 : struct pgt_obj_frequencies *freqs;
1852 0 : uint32_t phymode, country;
1853 : unsigned int chan, i, j, firstchan = -1;
1854 : int error;
1855 :
1856 0 : psbuffer.pob_size = htole32(PGT_FRAG_SIZE * PGT_PSM_BUFFER_FRAME_COUNT);
1857 0 : psbuffer.pob_addr = htole32(sc->sc_psmdmam->dm_segs[0].ds_addr);
1858 0 : error = pgt_oid_set(sc, PGT_OID_PSM_BUFFER, &psbuffer, sizeof(country));
1859 0 : if (error)
1860 0 : return (error);
1861 0 : error = pgt_oid_get(sc, PGT_OID_PHY, &phymode, sizeof(phymode));
1862 0 : if (error)
1863 0 : return (error);
1864 0 : error = pgt_oid_get(sc, PGT_OID_MAC_ADDRESS, ic->ic_myaddr,
1865 : sizeof(ic->ic_myaddr));
1866 0 : if (error)
1867 0 : return (error);
1868 0 : error = pgt_oid_get(sc, PGT_OID_COUNTRY, &country, sizeof(country));
1869 0 : if (error)
1870 0 : return (error);
1871 :
1872 0 : ifp->if_softc = sc;
1873 0 : ifp->if_ioctl = pgt_ioctl;
1874 0 : ifp->if_start = pgt_start;
1875 0 : ifp->if_watchdog = pgt_watchdog;
1876 0 : ifp->if_flags = IFF_SIMPLEX | IFF_BROADCAST | IFF_MULTICAST;
1877 0 : strlcpy(ifp->if_xname, sc->sc_dev.dv_xname, IFNAMSIZ);
1878 :
1879 0 : IFQ_SET_MAXLEN(&ifp->if_snd, IFQ_MAXLEN);
1880 :
1881 : /*
1882 : * Set channels
1883 : *
1884 : * Prism hardware likes to report supported frequencies that are
1885 : * not actually available for the country of origin.
1886 : */
1887 : j = sizeof(*freqs) + (IEEE80211_CHAN_MAX + 1) * sizeof(uint16_t);
1888 0 : freqs = malloc(j, M_DEVBUF, M_WAITOK);
1889 0 : error = pgt_oid_get(sc, PGT_OID_SUPPORTED_FREQUENCIES, freqs, j);
1890 0 : if (error) {
1891 0 : free(freqs, M_DEVBUF, 0);
1892 0 : return (error);
1893 : }
1894 :
1895 0 : for (i = 0, j = letoh16(freqs->pof_count); i < j; i++) {
1896 0 : chan = ieee80211_mhz2ieee(letoh16(freqs->pof_freqlist_mhz[i]),
1897 : 0);
1898 :
1899 0 : if (chan > IEEE80211_CHAN_MAX) {
1900 0 : printf("%s: reported bogus channel (%uMHz)\n",
1901 : sc->sc_dev.dv_xname, chan);
1902 0 : free(freqs, M_DEVBUF, 0);
1903 0 : return (EIO);
1904 : }
1905 :
1906 0 : if (letoh16(freqs->pof_freqlist_mhz[i]) < 5000) {
1907 0 : if (!(phymode & htole32(PGT_OID_PHY_2400MHZ)))
1908 : continue;
1909 0 : if (country == letoh32(PGT_COUNTRY_USA)) {
1910 0 : if (chan >= 12 && chan <= 14)
1911 : continue;
1912 : }
1913 0 : if (chan <= 14)
1914 0 : ic->ic_channels[chan].ic_flags |=
1915 : IEEE80211_CHAN_B;
1916 0 : ic->ic_channels[chan].ic_flags |= IEEE80211_CHAN_PUREG;
1917 0 : } else {
1918 0 : if (!(phymode & htole32(PGT_OID_PHY_5000MHZ)))
1919 : continue;
1920 0 : ic->ic_channels[chan].ic_flags |= IEEE80211_CHAN_A;
1921 : }
1922 :
1923 0 : ic->ic_channels[chan].ic_freq =
1924 0 : letoh16(freqs->pof_freqlist_mhz[i]);
1925 :
1926 0 : if (firstchan == -1)
1927 0 : firstchan = chan;
1928 :
1929 : DPRINTF(("%s: set channel %d to freq %uMHz\n",
1930 : sc->sc_dev.dv_xname, chan,
1931 : letoh16(freqs->pof_freqlist_mhz[i])));
1932 : }
1933 0 : free(freqs, M_DEVBUF, 0);
1934 0 : if (firstchan == -1) {
1935 0 : printf("%s: no channels found\n", sc->sc_dev.dv_xname);
1936 0 : return (EIO);
1937 : }
1938 :
1939 : /*
1940 : * Set rates
1941 : */
1942 0 : bzero(rates, sizeof(rates));
1943 0 : error = pgt_oid_get(sc, PGT_OID_SUPPORTED_RATES, rates, sizeof(rates));
1944 0 : if (error)
1945 0 : return (error);
1946 0 : for (i = 0; i < sizeof(rates) && rates[i] != 0; i++) {
1947 0 : switch (rates[i]) {
1948 : case 2:
1949 : case 4:
1950 : case 11:
1951 : case 22:
1952 : case 44: /* maybe */
1953 0 : if (phymode & htole32(PGT_OID_PHY_2400MHZ)) {
1954 0 : rs = &ic->ic_sup_rates[IEEE80211_MODE_11B];
1955 0 : rs->rs_rates[rs->rs_nrates++] = rates[i];
1956 0 : }
1957 : default:
1958 0 : if (phymode & htole32(PGT_OID_PHY_2400MHZ)) {
1959 0 : rs = &ic->ic_sup_rates[IEEE80211_MODE_11G];
1960 0 : rs->rs_rates[rs->rs_nrates++] = rates[i];
1961 0 : }
1962 0 : if (phymode & htole32(PGT_OID_PHY_5000MHZ)) {
1963 0 : rs = &ic->ic_sup_rates[IEEE80211_MODE_11A];
1964 0 : rs->rs_rates[rs->rs_nrates++] = rates[i];
1965 0 : }
1966 0 : rs = &ic->ic_sup_rates[IEEE80211_MODE_AUTO];
1967 0 : rs->rs_rates[rs->rs_nrates++] = rates[i];
1968 : }
1969 : }
1970 :
1971 0 : ic->ic_caps = IEEE80211_C_WEP | IEEE80211_C_PMGT | IEEE80211_C_TXPMGT |
1972 : IEEE80211_C_SHSLOT | IEEE80211_C_SHPREAMBLE | IEEE80211_C_MONITOR;
1973 : #ifndef IEEE80211_STA_ONLY
1974 0 : ic->ic_caps |= IEEE80211_C_IBSS | IEEE80211_C_HOSTAP;
1975 : #endif
1976 0 : ic->ic_opmode = IEEE80211_M_STA;
1977 0 : ic->ic_state = IEEE80211_S_INIT;
1978 :
1979 0 : if_attach(ifp);
1980 0 : ieee80211_ifattach(ifp);
1981 :
1982 : /* setup post-attach/pre-lateattach vector functions */
1983 0 : sc->sc_newstate = ic->ic_newstate;
1984 0 : ic->ic_newstate = pgt_newstate;
1985 0 : ic->ic_node_alloc = pgt_ieee80211_node_alloc;
1986 0 : ic->ic_newassoc = pgt_ieee80211_newassoc;
1987 0 : ic->ic_node_free = pgt_ieee80211_node_free;
1988 0 : ic->ic_node_copy = pgt_ieee80211_node_copy;
1989 0 : ic->ic_send_mgmt = pgt_ieee80211_send_mgmt;
1990 0 : ic->ic_max_rssi = 255; /* rssi is a u_int8_t */
1991 :
1992 : /* let net80211 handle switching around the media + resetting */
1993 0 : ieee80211_media_init(ifp, pgt_media_change, pgt_media_status);
1994 :
1995 : #if NBPFILTER > 0
1996 0 : bpfattach(&sc->sc_drvbpf, ifp, DLT_IEEE802_11_RADIO,
1997 : sizeof(struct ieee80211_frame) + 64);
1998 :
1999 0 : sc->sc_rxtap_len = sizeof(sc->sc_rxtapu);
2000 0 : sc->sc_rxtap.wr_ihdr.it_len = htole16(sc->sc_rxtap_len);
2001 0 : sc->sc_rxtap.wr_ihdr.it_present = htole32(PGT_RX_RADIOTAP_PRESENT);
2002 :
2003 0 : sc->sc_txtap_len = sizeof(sc->sc_txtapu);
2004 0 : sc->sc_txtap.wt_ihdr.it_len = htole16(sc->sc_txtap_len);
2005 0 : sc->sc_txtap.wt_ihdr.it_present = htole32(PGT_TX_RADIOTAP_PRESENT);
2006 : #endif
2007 0 : return (0);
2008 0 : }
2009 :
2010 : int
2011 0 : pgt_media_change(struct ifnet *ifp)
2012 : {
2013 0 : struct pgt_softc *sc = ifp->if_softc;
2014 : int error;
2015 :
2016 0 : error = ieee80211_media_change(ifp);
2017 0 : if (error == ENETRESET) {
2018 0 : pgt_update_hw_from_sw(sc, 0, 0);
2019 : error = 0;
2020 0 : }
2021 :
2022 0 : return (error);
2023 : }
2024 :
2025 : void
2026 0 : pgt_media_status(struct ifnet *ifp, struct ifmediareq *imr)
2027 : {
2028 0 : struct pgt_softc *sc = ifp->if_softc;
2029 0 : struct ieee80211com *ic = &sc->sc_ic;
2030 0 : uint32_t rate;
2031 : int s;
2032 :
2033 0 : imr->ifm_status = 0;
2034 0 : imr->ifm_active = IFM_IEEE80211 | IFM_NONE;
2035 :
2036 0 : if (!(ifp->if_flags & IFF_UP))
2037 0 : return;
2038 :
2039 0 : s = splnet();
2040 :
2041 0 : if (ic->ic_fixed_rate != -1) {
2042 0 : rate = ic->ic_sup_rates[ic->ic_curmode].
2043 0 : rs_rates[ic->ic_fixed_rate] & IEEE80211_RATE_VAL;
2044 0 : } else {
2045 0 : if (pgt_oid_get(sc, PGT_OID_LINK_STATE, &rate, sizeof(rate)))
2046 : goto out;
2047 0 : rate = letoh32(rate);
2048 0 : if (sc->sc_debug & SC_DEBUG_LINK) {
2049 : DPRINTF(("%s: %s: link rate %u\n",
2050 : sc->sc_dev.dv_xname, __func__, rate));
2051 : }
2052 0 : if (rate == 0)
2053 : goto out;
2054 : }
2055 :
2056 0 : imr->ifm_status = IFM_AVALID;
2057 0 : imr->ifm_active = IFM_IEEE80211;
2058 0 : if (ic->ic_state == IEEE80211_S_RUN)
2059 0 : imr->ifm_status |= IFM_ACTIVE;
2060 :
2061 0 : imr->ifm_active |= ieee80211_rate2media(ic, rate, ic->ic_curmode);
2062 :
2063 0 : switch (ic->ic_opmode) {
2064 : case IEEE80211_M_STA:
2065 : break;
2066 : #ifndef IEEE80211_STA_ONLY
2067 : case IEEE80211_M_IBSS:
2068 0 : imr->ifm_active |= IFM_IEEE80211_ADHOC;
2069 0 : break;
2070 : case IEEE80211_M_AHDEMO:
2071 0 : imr->ifm_active |= IFM_IEEE80211_ADHOC | IFM_FLAG0;
2072 0 : break;
2073 : case IEEE80211_M_HOSTAP:
2074 0 : imr->ifm_active |= IFM_IEEE80211_HOSTAP;
2075 0 : break;
2076 : #endif
2077 : case IEEE80211_M_MONITOR:
2078 0 : imr->ifm_active |= IFM_IEEE80211_MONITOR;
2079 0 : break;
2080 : default:
2081 : break;
2082 : }
2083 :
2084 : out:
2085 0 : splx(s);
2086 0 : }
2087 :
2088 : /*
2089 : * Start data frames. Critical sections surround the boundary of
2090 : * management frame transmission / transmission acknowledgement / response
2091 : * and data frame transmission / transmission acknowledgement.
2092 : */
2093 : void
2094 0 : pgt_start(struct ifnet *ifp)
2095 : {
2096 : struct pgt_softc *sc;
2097 : struct ieee80211com *ic;
2098 : struct pgt_desc *pd;
2099 : struct mbuf *m;
2100 : int error;
2101 :
2102 0 : sc = ifp->if_softc;
2103 0 : ic = &sc->sc_ic;
2104 :
2105 0 : if (sc->sc_flags & (SC_DYING | SC_NEEDS_RESET) ||
2106 0 : !(ifp->if_flags & IFF_RUNNING) ||
2107 0 : ic->ic_state != IEEE80211_S_RUN) {
2108 0 : return;
2109 : }
2110 :
2111 : /*
2112 : * Management packets should probably be MLME frames
2113 : * (i.e. hostap "managed" mode); we don't touch the
2114 : * net80211 management queue.
2115 : */
2116 0 : for (; sc->sc_dirtyq_count[PGT_QUEUE_DATA_LOW_TX] <
2117 0 : PGT_QUEUE_FULL_THRESHOLD && !IFQ_IS_EMPTY(&ifp->if_snd);) {
2118 0 : pd = TAILQ_FIRST(&sc->sc_freeq[PGT_QUEUE_DATA_LOW_TX]);
2119 0 : m = ifq_deq_begin(&ifp->if_snd);
2120 0 : if (m == NULL)
2121 : break;
2122 0 : if (m->m_pkthdr.len <= PGT_FRAG_SIZE) {
2123 0 : error = pgt_load_tx_desc_frag(sc,
2124 : PGT_QUEUE_DATA_LOW_TX, pd);
2125 0 : if (error) {
2126 0 : ifq_deq_rollback(&ifp->if_snd, m);
2127 0 : break;
2128 : }
2129 0 : ifq_deq_commit(&ifp->if_snd, m);
2130 0 : m_copydata(m, 0, m->m_pkthdr.len, pd->pd_mem);
2131 0 : pgt_desc_transmit(sc, PGT_QUEUE_DATA_LOW_TX,
2132 0 : pd, m->m_pkthdr.len, 0);
2133 0 : } else if (m->m_pkthdr.len <= PGT_FRAG_SIZE * 2) {
2134 : struct pgt_desc *pd2;
2135 :
2136 : /*
2137 : * Transmit a fragmented frame if there is
2138 : * not enough room in one fragment; limit
2139 : * to two fragments (802.11 itself couldn't
2140 : * even support a full two.)
2141 : */
2142 0 : if (sc->sc_dirtyq_count[PGT_QUEUE_DATA_LOW_TX] + 2 >
2143 : PGT_QUEUE_FULL_THRESHOLD) {
2144 0 : ifq_deq_rollback(&ifp->if_snd, m);
2145 0 : break;
2146 : }
2147 0 : pd2 = TAILQ_NEXT(pd, pd_link);
2148 0 : error = pgt_load_tx_desc_frag(sc,
2149 : PGT_QUEUE_DATA_LOW_TX, pd);
2150 0 : if (error == 0) {
2151 0 : error = pgt_load_tx_desc_frag(sc,
2152 : PGT_QUEUE_DATA_LOW_TX, pd2);
2153 0 : if (error) {
2154 0 : pgt_unload_tx_desc_frag(sc, pd);
2155 0 : TAILQ_INSERT_HEAD(&sc->sc_freeq[
2156 : PGT_QUEUE_DATA_LOW_TX], pd,
2157 : pd_link);
2158 0 : }
2159 : }
2160 0 : if (error) {
2161 0 : ifq_deq_rollback(&ifp->if_snd, m);
2162 0 : break;
2163 : }
2164 0 : ifq_deq_commit(&ifp->if_snd, m);
2165 0 : m_copydata(m, 0, PGT_FRAG_SIZE, pd->pd_mem);
2166 0 : pgt_desc_transmit(sc, PGT_QUEUE_DATA_LOW_TX,
2167 : pd, PGT_FRAG_SIZE, 1);
2168 0 : m_copydata(m, PGT_FRAG_SIZE,
2169 0 : m->m_pkthdr.len - PGT_FRAG_SIZE, pd2->pd_mem);
2170 0 : pgt_desc_transmit(sc, PGT_QUEUE_DATA_LOW_TX,
2171 0 : pd2, m->m_pkthdr.len - PGT_FRAG_SIZE, 0);
2172 0 : } else {
2173 0 : ifq_deq_commit(&ifp->if_snd, m);
2174 0 : ifp->if_oerrors++;
2175 0 : m_freem(m);
2176 : m = NULL;
2177 : }
2178 0 : if (m != NULL) {
2179 : struct ieee80211_node *ni;
2180 : #if NBPFILTER > 0
2181 0 : if (ifp->if_bpf != NULL)
2182 0 : bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_OUT);
2183 : #endif
2184 0 : ifp->if_timer = 1;
2185 0 : sc->sc_txtimer = 5;
2186 0 : ni = ieee80211_find_txnode(&sc->sc_ic,
2187 0 : mtod(m, struct ether_header *)->ether_dhost);
2188 0 : if (ni != NULL) {
2189 0 : ni->ni_inact = 0;
2190 0 : if (ni != ic->ic_bss)
2191 0 : ieee80211_release_node(&sc->sc_ic, ni);
2192 : }
2193 : #if NBPFILTER > 0
2194 0 : if (sc->sc_drvbpf != NULL) {
2195 0 : struct mbuf mb;
2196 0 : struct ether_header eh;
2197 0 : struct pgt_tx_radiotap_hdr *tap = &sc->sc_txtap;
2198 :
2199 0 : bcopy(mtod(m, struct ether_header *), &eh,
2200 : sizeof(eh));
2201 0 : m_adj(m, sizeof(eh));
2202 0 : m = pgt_ieee80211_encap(sc, &eh, m, NULL);
2203 :
2204 0 : tap->wt_flags = 0;
2205 : //tap->wt_rate = rate;
2206 0 : tap->wt_rate = 0;
2207 0 : tap->wt_chan_freq =
2208 0 : htole16(ic->ic_bss->ni_chan->ic_freq);
2209 0 : tap->wt_chan_flags =
2210 0 : htole16(ic->ic_bss->ni_chan->ic_flags);
2211 :
2212 0 : if (m != NULL) {
2213 0 : mb.m_data = (caddr_t)tap;
2214 0 : mb.m_len = sc->sc_txtap_len;
2215 0 : mb.m_next = m;
2216 0 : mb.m_nextpkt = NULL;
2217 0 : mb.m_type = 0;
2218 0 : mb.m_flags = 0;
2219 :
2220 0 : bpf_mtap(sc->sc_drvbpf, &mb,
2221 : BPF_DIRECTION_OUT);
2222 0 : }
2223 0 : }
2224 : #endif
2225 0 : m_freem(m);
2226 0 : }
2227 : }
2228 0 : }
2229 :
2230 : int
2231 0 : pgt_ioctl(struct ifnet *ifp, u_long cmd, caddr_t req)
2232 : {
2233 0 : struct pgt_softc *sc = ifp->if_softc;
2234 : struct ifreq *ifr;
2235 : struct wi_req *wreq;
2236 : struct ieee80211_nodereq_all *na;
2237 : struct ieee80211com *ic;
2238 : struct pgt_obj_bsslist *pob;
2239 : struct wi_scan_p2_hdr *p2hdr;
2240 : struct wi_scan_res *res;
2241 0 : uint32_t noise;
2242 : int maxscan, i, j, s, error = 0;
2243 :
2244 : ic = &sc->sc_ic;
2245 0 : ifr = (struct ifreq *)req;
2246 :
2247 0 : s = splnet();
2248 0 : switch (cmd) {
2249 : case SIOCS80211SCAN:
2250 : /*
2251 : * This chip scans always as soon as it gets initialized.
2252 : */
2253 : break;
2254 : case SIOCG80211ALLNODES: {
2255 : struct ieee80211_nodereq *nr = NULL;
2256 0 : na = (struct ieee80211_nodereq_all *)req;
2257 0 : wreq = malloc(sizeof(*wreq), M_DEVBUF, M_WAITOK | M_ZERO);
2258 :
2259 : maxscan = PGT_OBJ_BSSLIST_NBSS;
2260 0 : pob = malloc(sizeof(*pob) +
2261 : sizeof(struct pgt_obj_bss) * maxscan, M_DEVBUF, M_WAITOK);
2262 0 : error = pgt_oid_get(sc, PGT_OID_NOISE_FLOOR, &noise,
2263 : sizeof(noise));
2264 :
2265 0 : if (error == 0) {
2266 0 : noise = letoh32(noise);
2267 0 : error = pgt_oid_get(sc, PGT_OID_BSS_LIST, pob,
2268 : sizeof(*pob) +
2269 : sizeof(struct pgt_obj_bss) * maxscan);
2270 0 : }
2271 :
2272 0 : if (error == 0) {
2273 0 : maxscan = min(PGT_OBJ_BSSLIST_NBSS,
2274 0 : letoh32(pob->pob_count));
2275 0 : maxscan = min(maxscan,
2276 : (sizeof(wreq->wi_val) - sizeof(*p2hdr)) /
2277 : WI_PRISM2_RES_SIZE);
2278 0 : p2hdr = (struct wi_scan_p2_hdr *)&wreq->wi_val;
2279 0 : p2hdr->wi_rsvd = 0;
2280 0 : p2hdr->wi_reason = 1;
2281 0 : wreq->wi_len = (maxscan * WI_PRISM2_RES_SIZE) / 2 +
2282 : sizeof(*p2hdr) / 2;
2283 0 : wreq->wi_type = WI_RID_SCAN_RES;
2284 0 : }
2285 :
2286 0 : for (na->na_nodes = j = i = 0; i < maxscan &&
2287 0 : (na->na_size >= j + sizeof(struct ieee80211_nodereq));
2288 0 : i++) {
2289 : /* allocate node space */
2290 0 : if (nr == NULL)
2291 0 : nr = malloc(sizeof(*nr), M_DEVBUF, M_WAITOK);
2292 :
2293 : /* get next BSS scan result */
2294 0 : res = (struct wi_scan_res *)
2295 0 : ((char *)&wreq->wi_val + sizeof(*p2hdr) +
2296 0 : i * WI_PRISM2_RES_SIZE);
2297 0 : pgt_obj_bss2scanres(sc, &pob->pob_bsslist[i],
2298 0 : res, noise);
2299 :
2300 : /* copy it to node structure for ifconfig to read */
2301 0 : bzero(nr, sizeof(*nr));
2302 0 : IEEE80211_ADDR_COPY(nr->nr_macaddr, res->wi_bssid);
2303 0 : IEEE80211_ADDR_COPY(nr->nr_bssid, res->wi_bssid);
2304 0 : nr->nr_channel = letoh16(res->wi_chan);
2305 0 : nr->nr_chan_flags = IEEE80211_CHAN_B;
2306 0 : nr->nr_rssi = letoh16(res->wi_signal);
2307 0 : nr->nr_max_rssi = 0; /* XXX */
2308 0 : nr->nr_nwid_len = letoh16(res->wi_ssid_len);
2309 0 : bcopy(res->wi_ssid, nr->nr_nwid, nr->nr_nwid_len);
2310 0 : nr->nr_intval = letoh16(res->wi_interval);
2311 0 : nr->nr_capinfo = letoh16(res->wi_capinfo);
2312 0 : nr->nr_txrate = res->wi_rate == WI_WAVELAN_RES_1M ? 2 :
2313 0 : (res->wi_rate == WI_WAVELAN_RES_2M ? 4 :
2314 0 : (res->wi_rate == WI_WAVELAN_RES_5M ? 11 :
2315 0 : (res->wi_rate == WI_WAVELAN_RES_11M ? 22 : 0)));
2316 0 : nr->nr_nrates = 0;
2317 0 : while (res->wi_srates[nr->nr_nrates] != 0) {
2318 0 : nr->nr_rates[nr->nr_nrates] =
2319 0 : res->wi_srates[nr->nr_nrates] &
2320 : WI_VAR_SRATES_MASK;
2321 0 : nr->nr_nrates++;
2322 : }
2323 0 : nr->nr_flags = 0;
2324 0 : if (bcmp(nr->nr_macaddr, nr->nr_bssid,
2325 0 : IEEE80211_ADDR_LEN) == 0)
2326 0 : nr->nr_flags |= IEEE80211_NODEREQ_AP;
2327 0 : error = copyout(nr, (caddr_t)na->na_node + j,
2328 : sizeof(struct ieee80211_nodereq));
2329 0 : if (error)
2330 : break;
2331 :
2332 : /* point to next node entry */
2333 0 : j += sizeof(struct ieee80211_nodereq);
2334 0 : na->na_nodes++;
2335 : }
2336 0 : if (nr)
2337 0 : free(nr, M_DEVBUF, 0);
2338 0 : free(pob, M_DEVBUF, 0);
2339 0 : free(wreq, M_DEVBUF, 0);
2340 : break;
2341 : }
2342 : case SIOCSIFADDR:
2343 0 : ifp->if_flags |= IFF_UP;
2344 : /* FALLTHROUGH */
2345 : case SIOCSIFFLAGS:
2346 0 : if (ifp->if_flags & IFF_UP) {
2347 0 : if ((ifp->if_flags & IFF_RUNNING) == 0) {
2348 0 : pgt_init(ifp);
2349 : error = ENETRESET;
2350 0 : }
2351 : } else {
2352 0 : if (ifp->if_flags & IFF_RUNNING) {
2353 0 : pgt_stop(sc, SC_NEEDS_RESET);
2354 : error = ENETRESET;
2355 0 : }
2356 : }
2357 : break;
2358 : case SIOCSIFMTU:
2359 0 : if (ifr->ifr_mtu > PGT_FRAG_SIZE) {
2360 : error = EINVAL;
2361 0 : break;
2362 : }
2363 : /* FALLTHROUGH */
2364 : default:
2365 0 : error = ieee80211_ioctl(ifp, cmd, req);
2366 0 : break;
2367 : }
2368 :
2369 0 : if (error == ENETRESET) {
2370 0 : pgt_update_hw_from_sw(sc, 0, 0);
2371 : error = 0;
2372 0 : }
2373 0 : splx(s);
2374 :
2375 0 : return (error);
2376 0 : }
2377 :
2378 : void
2379 0 : pgt_obj_bss2scanres(struct pgt_softc *sc, struct pgt_obj_bss *pob,
2380 : struct wi_scan_res *scanres, uint32_t noise)
2381 : {
2382 : struct ieee80211_rateset *rs;
2383 0 : struct wi_scan_res ap;
2384 : unsigned int i, n;
2385 :
2386 0 : rs = &sc->sc_ic.ic_sup_rates[IEEE80211_MODE_AUTO];
2387 0 : bzero(&ap, sizeof(ap));
2388 0 : ap.wi_chan = ieee80211_mhz2ieee(letoh16(pob->pob_channel), 0);
2389 0 : ap.wi_noise = noise;
2390 0 : ap.wi_signal = letoh16(pob->pob_rssi);
2391 0 : IEEE80211_ADDR_COPY(ap.wi_bssid, pob->pob_address);
2392 0 : ap.wi_interval = letoh16(pob->pob_beacon_period);
2393 0 : ap.wi_capinfo = letoh16(pob->pob_capinfo);
2394 0 : ap.wi_ssid_len = min(sizeof(ap.wi_ssid), pob->pob_ssid.pos_length);
2395 0 : memcpy(ap.wi_ssid, pob->pob_ssid.pos_ssid, ap.wi_ssid_len);
2396 : n = 0;
2397 0 : for (i = 0; i < 16; i++) {
2398 0 : if (letoh16(pob->pob_rates) & (1 << i)) {
2399 0 : if (i > rs->rs_nrates)
2400 : break;
2401 0 : ap.wi_srates[n++] = ap.wi_rate = rs->rs_rates[i];
2402 0 : if (n >= sizeof(ap.wi_srates) / sizeof(ap.wi_srates[0]))
2403 : break;
2404 : }
2405 : }
2406 0 : memcpy(scanres, &ap, WI_PRISM2_RES_SIZE);
2407 0 : }
2408 :
2409 : void
2410 0 : node_mark_active_ap(void *arg, struct ieee80211_node *ni)
2411 : {
2412 : /*
2413 : * HostAP mode lets all nodes stick around unless
2414 : * the firmware AP kicks them off.
2415 : */
2416 0 : ni->ni_inact = 0;
2417 0 : }
2418 :
2419 : void
2420 0 : node_mark_active_adhoc(void *arg, struct ieee80211_node *ni)
2421 : {
2422 : struct pgt_ieee80211_node *pin;
2423 :
2424 : /*
2425 : * As there is no association in ad-hoc, we let links just
2426 : * time out naturally as long they are not holding any private
2427 : * configuration, such as 802.1x authorization.
2428 : */
2429 0 : pin = (struct pgt_ieee80211_node *)ni;
2430 0 : if (pin->pin_dot1x_auth == PIN_DOT1X_AUTHORIZED)
2431 0 : pin->pin_node.ni_inact = 0;
2432 0 : }
2433 :
2434 : void
2435 0 : pgt_watchdog(struct ifnet *ifp)
2436 : {
2437 : struct pgt_softc *sc;
2438 :
2439 0 : sc = ifp->if_softc;
2440 : /*
2441 : * Check for timed out transmissions (and make sure to set
2442 : * this watchdog to fire again if there is still data in the
2443 : * output device queue).
2444 : */
2445 0 : if (sc->sc_dirtyq_count[PGT_QUEUE_DATA_LOW_TX] != 0) {
2446 : int count;
2447 :
2448 0 : ifp->if_timer = 1;
2449 0 : if (sc->sc_txtimer && --sc->sc_txtimer == 0) {
2450 0 : count = pgt_drain_tx_queue(sc, PGT_QUEUE_DATA_LOW_TX);
2451 0 : if (sc->sc_debug & SC_DEBUG_UNEXPECTED)
2452 : DPRINTF(("%s: timeout %d data transmissions\n",
2453 : sc->sc_dev.dv_xname, count));
2454 0 : }
2455 0 : }
2456 0 : if (sc->sc_flags & (SC_DYING | SC_NEEDS_RESET))
2457 0 : return;
2458 : /*
2459 : * If we're goign to kick the device out of power-save mode
2460 : * just to update the BSSID and such, we should not do it
2461 : * very often; need to determine in what way to do that.
2462 : */
2463 0 : if (ifp->if_flags & IFF_RUNNING &&
2464 0 : sc->sc_ic.ic_state != IEEE80211_S_INIT &&
2465 0 : sc->sc_ic.ic_opmode != IEEE80211_M_MONITOR)
2466 0 : pgt_async_update(sc);
2467 :
2468 : #ifndef IEEE80211_STA_ONLY
2469 : /*
2470 : * As a firmware-based HostAP, we should not time out
2471 : * nodes inside the driver additionally to the timeout
2472 : * that exists in the firmware. The only things we
2473 : * should have to deal with timing out when doing HostAP
2474 : * are the privacy-related.
2475 : */
2476 0 : switch (sc->sc_ic.ic_opmode) {
2477 : case IEEE80211_M_HOSTAP:
2478 0 : ieee80211_iterate_nodes(&sc->sc_ic,
2479 : node_mark_active_ap, NULL);
2480 0 : break;
2481 : case IEEE80211_M_IBSS:
2482 0 : ieee80211_iterate_nodes(&sc->sc_ic,
2483 : node_mark_active_adhoc, NULL);
2484 0 : break;
2485 : default:
2486 : break;
2487 : }
2488 : #endif
2489 0 : ieee80211_watchdog(ifp);
2490 0 : ifp->if_timer = 1;
2491 0 : }
2492 :
2493 : int
2494 0 : pgt_init(struct ifnet *ifp)
2495 : {
2496 0 : struct pgt_softc *sc = ifp->if_softc;
2497 0 : struct ieee80211com *ic = &sc->sc_ic;
2498 :
2499 : /* set default channel */
2500 0 : ic->ic_bss->ni_chan = ic->ic_ibss_chan;
2501 :
2502 0 : if (!(sc->sc_flags & (SC_DYING | SC_UNINITIALIZED)))
2503 0 : pgt_update_hw_from_sw(sc,
2504 0 : ic->ic_state != IEEE80211_S_INIT,
2505 0 : ic->ic_opmode != IEEE80211_M_MONITOR);
2506 :
2507 0 : ifp->if_flags |= IFF_RUNNING;
2508 0 : ifq_clr_oactive(&ifp->if_snd);
2509 :
2510 : /* Begin background scanning */
2511 0 : ieee80211_new_state(&sc->sc_ic, IEEE80211_S_SCAN, -1);
2512 :
2513 0 : return (0);
2514 : }
2515 :
2516 : /*
2517 : * After most every configuration change, everything needs to be fully
2518 : * reinitialized. For some operations (currently, WEP settings
2519 : * in ad-hoc+802.1x mode), the change is "soft" and doesn't remove
2520 : * "associations," and allows EAP authorization to occur again.
2521 : * If keepassoc is specified, the reset operation should try to go
2522 : * back to the BSS had before.
2523 : */
2524 : void
2525 0 : pgt_update_hw_from_sw(struct pgt_softc *sc, int keepassoc, int keepnodes)
2526 : {
2527 0 : struct ieee80211com *ic = &sc->sc_ic;
2528 0 : struct arpcom *ac = &ic->ic_ac;
2529 0 : struct ifnet *ifp = &ac->ac_if;
2530 0 : struct pgt_obj_key keyobj;
2531 0 : struct pgt_obj_ssid essid;
2532 0 : uint8_t availrates[IEEE80211_RATE_MAXSIZE + 1];
2533 0 : uint32_t mode, bsstype, config, profile, channel, slot, preamble;
2534 0 : uint32_t wep, exunencrypted, wepkey, dot1x, auth, mlme;
2535 : unsigned int i;
2536 : int success, shouldbeup, s;
2537 :
2538 0 : config = PGT_CONFIG_MANUAL_RUN | PGT_CONFIG_RX_ANNEX;
2539 :
2540 : /*
2541 : * Promiscuous mode is currently a no-op since packets transmitted,
2542 : * while in promiscuous mode, don't ever seem to go anywhere.
2543 : */
2544 0 : shouldbeup = ifp->if_flags & IFF_RUNNING && ifp->if_flags & IFF_UP;
2545 :
2546 0 : if (shouldbeup) {
2547 0 : switch (ic->ic_opmode) {
2548 : case IEEE80211_M_STA:
2549 : if (ifp->if_flags & IFF_PROMISC)
2550 : mode = PGT_MODE_CLIENT; /* what to do? */
2551 : else
2552 : mode = PGT_MODE_CLIENT;
2553 0 : bsstype = PGT_BSS_TYPE_STA;
2554 0 : dot1x = PGT_DOT1X_AUTH_ENABLED;
2555 0 : break;
2556 : #ifndef IEEE80211_STA_ONLY
2557 : case IEEE80211_M_IBSS:
2558 : if (ifp->if_flags & IFF_PROMISC)
2559 : mode = PGT_MODE_CLIENT; /* what to do? */
2560 : else
2561 : mode = PGT_MODE_CLIENT;
2562 0 : bsstype = PGT_BSS_TYPE_IBSS;
2563 0 : dot1x = PGT_DOT1X_AUTH_ENABLED;
2564 0 : break;
2565 : case IEEE80211_M_HOSTAP:
2566 0 : mode = PGT_MODE_AP;
2567 0 : bsstype = PGT_BSS_TYPE_STA;
2568 : /*
2569 : * For IEEE 802.1x, we need to authenticate and
2570 : * authorize hosts from here on or they remain
2571 : * associated but without the ability to send or
2572 : * receive normal traffic to us (courtesy the
2573 : * firmware AP implementation).
2574 : */
2575 0 : dot1x = PGT_DOT1X_AUTH_ENABLED;
2576 : /*
2577 : * WDS mode needs several things to work:
2578 : * discovery of exactly how creating the WDS
2579 : * links is meant to function, an interface
2580 : * for this, and ability to encode or decode
2581 : * the WDS frames.
2582 : */
2583 0 : if (sc->sc_wds)
2584 0 : config |= PGT_CONFIG_WDS;
2585 : break;
2586 : #endif
2587 : case IEEE80211_M_MONITOR:
2588 0 : mode = PGT_MODE_PROMISCUOUS;
2589 0 : bsstype = PGT_BSS_TYPE_ANY;
2590 0 : dot1x = PGT_DOT1X_AUTH_NONE;
2591 0 : break;
2592 : default:
2593 : goto badopmode;
2594 : }
2595 : } else {
2596 : badopmode:
2597 0 : mode = PGT_MODE_CLIENT;
2598 0 : bsstype = PGT_BSS_TYPE_NONE;
2599 : }
2600 :
2601 : DPRINTF(("%s: current mode is ", sc->sc_dev.dv_xname));
2602 0 : switch (ic->ic_curmode) {
2603 : case IEEE80211_MODE_11A:
2604 0 : profile = PGT_PROFILE_A_ONLY;
2605 0 : preamble = PGT_OID_PREAMBLE_MODE_DYNAMIC;
2606 : DPRINTF(("IEEE80211_MODE_11A\n"));
2607 0 : break;
2608 : case IEEE80211_MODE_11B:
2609 0 : profile = PGT_PROFILE_B_ONLY;
2610 0 : preamble = PGT_OID_PREAMBLE_MODE_LONG;
2611 : DPRINTF(("IEEE80211_MODE_11B\n"));
2612 0 : break;
2613 : case IEEE80211_MODE_11G:
2614 0 : profile = PGT_PROFILE_G_ONLY;
2615 0 : preamble = PGT_OID_PREAMBLE_MODE_SHORT;
2616 : DPRINTF(("IEEE80211_MODE_11G\n"));
2617 0 : break;
2618 : case IEEE80211_MODE_AUTO:
2619 0 : profile = PGT_PROFILE_MIXED_G_WIFI;
2620 0 : preamble = PGT_OID_PREAMBLE_MODE_DYNAMIC;
2621 : DPRINTF(("IEEE80211_MODE_AUTO\n"));
2622 0 : break;
2623 : default:
2624 0 : panic("unknown mode %d", ic->ic_curmode);
2625 : }
2626 :
2627 0 : switch (sc->sc_80211_ioc_auth) {
2628 : case IEEE80211_AUTH_NONE:
2629 0 : auth = PGT_AUTH_MODE_NONE;
2630 0 : break;
2631 : case IEEE80211_AUTH_OPEN:
2632 0 : auth = PGT_AUTH_MODE_OPEN;
2633 0 : break;
2634 : default:
2635 0 : auth = PGT_AUTH_MODE_SHARED;
2636 0 : break;
2637 : }
2638 :
2639 0 : if (sc->sc_ic.ic_flags & IEEE80211_F_WEPON) {
2640 0 : wep = 1;
2641 0 : exunencrypted = 1;
2642 0 : } else {
2643 0 : wep = 0;
2644 0 : exunencrypted = 0;
2645 : }
2646 :
2647 0 : mlme = htole32(PGT_MLME_AUTO_LEVEL_AUTO);
2648 0 : wep = htole32(wep);
2649 0 : exunencrypted = htole32(exunencrypted);
2650 0 : profile = htole32(profile);
2651 0 : preamble = htole32(preamble);
2652 0 : bsstype = htole32(bsstype);
2653 0 : config = htole32(config);
2654 0 : mode = htole32(mode);
2655 :
2656 0 : if (!wep || !sc->sc_dot1x)
2657 0 : dot1x = PGT_DOT1X_AUTH_NONE;
2658 0 : dot1x = htole32(dot1x);
2659 0 : auth = htole32(auth);
2660 :
2661 0 : if (ic->ic_flags & IEEE80211_F_SHSLOT)
2662 0 : slot = htole32(PGT_OID_SLOT_MODE_SHORT);
2663 : else
2664 0 : slot = htole32(PGT_OID_SLOT_MODE_DYNAMIC);
2665 :
2666 0 : if (ic->ic_des_chan == IEEE80211_CHAN_ANYC) {
2667 0 : if (keepassoc)
2668 0 : channel = 0;
2669 : else
2670 0 : channel = ieee80211_chan2ieee(ic, ic->ic_bss->ni_chan);
2671 : } else
2672 0 : channel = ieee80211_chan2ieee(ic, ic->ic_des_chan);
2673 :
2674 : DPRINTF(("%s: set rates", sc->sc_dev.dv_xname));
2675 0 : for (i = 0; i < ic->ic_sup_rates[ic->ic_curmode].rs_nrates; i++) {
2676 0 : availrates[i] = ic->ic_sup_rates[ic->ic_curmode].rs_rates[i];
2677 : DPRINTF((" %d", availrates[i]));
2678 : }
2679 : DPRINTF(("\n"));
2680 0 : availrates[i++] = 0;
2681 :
2682 0 : essid.pos_length = min(ic->ic_des_esslen, sizeof(essid.pos_ssid));
2683 0 : memcpy(&essid.pos_ssid, ic->ic_des_essid, essid.pos_length);
2684 :
2685 0 : s = splnet();
2686 0 : for (success = 0; success == 0; success = 1) {
2687 0 : SETOID(PGT_OID_PROFILE, &profile, sizeof(profile));
2688 0 : SETOID(PGT_OID_CONFIG, &config, sizeof(config));
2689 0 : SETOID(PGT_OID_MLME_AUTO_LEVEL, &mlme, sizeof(mlme));
2690 :
2691 0 : if (!IEEE80211_ADDR_EQ(ic->ic_myaddr, ac->ac_enaddr)) {
2692 0 : SETOID(PGT_OID_MAC_ADDRESS, ac->ac_enaddr,
2693 : sizeof(ac->ac_enaddr));
2694 0 : IEEE80211_ADDR_COPY(ic->ic_myaddr, ac->ac_enaddr);
2695 0 : }
2696 :
2697 0 : SETOID(PGT_OID_MODE, &mode, sizeof(mode));
2698 0 : SETOID(PGT_OID_BSS_TYPE, &bsstype, sizeof(bsstype));
2699 :
2700 0 : if (channel != 0 && channel != IEEE80211_CHAN_ANY)
2701 0 : SETOID(PGT_OID_CHANNEL, &channel, sizeof(channel));
2702 :
2703 0 : if (ic->ic_flags & IEEE80211_F_DESBSSID) {
2704 0 : SETOID(PGT_OID_BSSID, ic->ic_des_bssid,
2705 : sizeof(ic->ic_des_bssid));
2706 0 : } else if (keepassoc) {
2707 0 : SETOID(PGT_OID_BSSID, ic->ic_bss->ni_bssid,
2708 : sizeof(ic->ic_bss->ni_bssid));
2709 : }
2710 :
2711 0 : SETOID(PGT_OID_SSID, &essid, sizeof(essid));
2712 :
2713 0 : if (ic->ic_des_esslen > 0)
2714 0 : SETOID(PGT_OID_SSID_OVERRIDE, &essid, sizeof(essid));
2715 :
2716 0 : SETOID(PGT_OID_RATES, &availrates, i);
2717 0 : SETOID(PGT_OID_EXTENDED_RATES, &availrates, i);
2718 0 : SETOID(PGT_OID_PREAMBLE_MODE, &preamble, sizeof(preamble));
2719 0 : SETOID(PGT_OID_SLOT_MODE, &slot, sizeof(slot));
2720 0 : SETOID(PGT_OID_AUTH_MODE, &auth, sizeof(auth));
2721 0 : SETOID(PGT_OID_EXCLUDE_UNENCRYPTED, &exunencrypted,
2722 : sizeof(exunencrypted));
2723 0 : SETOID(PGT_OID_DOT1X, &dot1x, sizeof(dot1x));
2724 0 : SETOID(PGT_OID_PRIVACY_INVOKED, &wep, sizeof(wep));
2725 : /*
2726 : * Setting WEP key(s)
2727 : */
2728 0 : if (letoh32(wep) != 0) {
2729 0 : keyobj.pok_type = PGT_OBJ_KEY_TYPE_WEP;
2730 : /* key 1 */
2731 0 : keyobj.pok_length = min(sizeof(keyobj.pok_key),
2732 : IEEE80211_KEYBUF_SIZE);
2733 0 : keyobj.pok_length = min(keyobj.pok_length,
2734 0 : ic->ic_nw_keys[0].k_len);
2735 0 : bcopy(ic->ic_nw_keys[0].k_key, keyobj.pok_key,
2736 0 : keyobj.pok_length);
2737 0 : SETOID(PGT_OID_DEFAULT_KEY0, &keyobj, sizeof(keyobj));
2738 : /* key 2 */
2739 0 : keyobj.pok_length = min(sizeof(keyobj.pok_key),
2740 : IEEE80211_KEYBUF_SIZE);
2741 0 : keyobj.pok_length = min(keyobj.pok_length,
2742 0 : ic->ic_nw_keys[1].k_len);
2743 0 : bcopy(ic->ic_nw_keys[1].k_key, keyobj.pok_key,
2744 0 : keyobj.pok_length);
2745 0 : SETOID(PGT_OID_DEFAULT_KEY1, &keyobj, sizeof(keyobj));
2746 : /* key 3 */
2747 0 : keyobj.pok_length = min(sizeof(keyobj.pok_key),
2748 : IEEE80211_KEYBUF_SIZE);
2749 0 : keyobj.pok_length = min(keyobj.pok_length,
2750 0 : ic->ic_nw_keys[2].k_len);
2751 0 : bcopy(ic->ic_nw_keys[2].k_key, keyobj.pok_key,
2752 0 : keyobj.pok_length);
2753 0 : SETOID(PGT_OID_DEFAULT_KEY2, &keyobj, sizeof(keyobj));
2754 : /* key 4 */
2755 0 : keyobj.pok_length = min(sizeof(keyobj.pok_key),
2756 : IEEE80211_KEYBUF_SIZE);
2757 0 : keyobj.pok_length = min(keyobj.pok_length,
2758 0 : ic->ic_nw_keys[3].k_len);
2759 0 : bcopy(ic->ic_nw_keys[3].k_key, keyobj.pok_key,
2760 0 : keyobj.pok_length);
2761 0 : SETOID(PGT_OID_DEFAULT_KEY3, &keyobj, sizeof(keyobj));
2762 :
2763 0 : wepkey = htole32(ic->ic_wep_txkey);
2764 0 : SETOID(PGT_OID_DEFAULT_KEYNUM, &wepkey, sizeof(wepkey));
2765 : }
2766 : /* set mode again to commit */
2767 0 : SETOID(PGT_OID_MODE, &mode, sizeof(mode));
2768 : }
2769 0 : splx(s);
2770 :
2771 0 : if (success) {
2772 0 : if (shouldbeup && keepnodes)
2773 0 : sc->sc_flags |= SC_NOFREE_ALLNODES;
2774 0 : if (shouldbeup)
2775 0 : ieee80211_new_state(ic, IEEE80211_S_SCAN, -1);
2776 : else
2777 0 : ieee80211_new_state(ic, IEEE80211_S_INIT, -1);
2778 : } else {
2779 0 : printf("%s: problem setting modes\n", sc->sc_dev.dv_xname);
2780 0 : ieee80211_new_state(ic, IEEE80211_S_INIT, -1);
2781 : }
2782 0 : }
2783 :
2784 : void
2785 0 : pgt_hostap_handle_mlme(struct pgt_softc *sc, uint32_t oid,
2786 : struct pgt_obj_mlme *mlme)
2787 : {
2788 0 : struct ieee80211com *ic = &sc->sc_ic;
2789 : struct pgt_ieee80211_node *pin;
2790 : struct ieee80211_node *ni;
2791 :
2792 0 : ni = ieee80211_find_node(ic, mlme->pom_address);
2793 0 : pin = (struct pgt_ieee80211_node *)ni;
2794 0 : switch (oid) {
2795 : case PGT_OID_DISASSOCIATE:
2796 0 : if (ni != NULL)
2797 0 : ieee80211_release_node(&sc->sc_ic, ni);
2798 : break;
2799 : case PGT_OID_ASSOCIATE:
2800 0 : if (ni == NULL) {
2801 0 : ni = ieee80211_dup_bss(ic, mlme->pom_address);
2802 0 : if (ni == NULL)
2803 : break;
2804 0 : ic->ic_newassoc(ic, ni, 1);
2805 0 : pin = (struct pgt_ieee80211_node *)ni;
2806 0 : }
2807 0 : ni->ni_associd = letoh16(mlme->pom_id);
2808 0 : pin->pin_mlme_state = letoh16(mlme->pom_state);
2809 0 : break;
2810 : default:
2811 0 : if (pin != NULL)
2812 0 : pin->pin_mlme_state = letoh16(mlme->pom_state);
2813 : break;
2814 : }
2815 0 : }
2816 :
2817 : /*
2818 : * Either in response to an event or after a certain amount of time,
2819 : * synchronize our idea of the network we're part of from the hardware.
2820 : */
2821 : void
2822 0 : pgt_update_sw_from_hw(struct pgt_softc *sc, struct pgt_async_trap *pa,
2823 : struct mbuf *args)
2824 : {
2825 0 : struct ieee80211com *ic = &sc->sc_ic;
2826 0 : struct pgt_obj_ssid ssid;
2827 0 : struct pgt_obj_bss bss;
2828 0 : uint32_t channel, noise, ls;
2829 : int error, s;
2830 :
2831 0 : if (pa != NULL) {
2832 : struct pgt_obj_mlme *mlme;
2833 : uint32_t oid;
2834 :
2835 0 : oid = *mtod(args, uint32_t *);
2836 0 : m_adj(args, sizeof(uint32_t));
2837 0 : if (sc->sc_debug & SC_DEBUG_TRAP)
2838 : DPRINTF(("%s: trap: oid %#x len %u\n",
2839 : sc->sc_dev.dv_xname, oid, args->m_len));
2840 0 : switch (oid) {
2841 : case PGT_OID_LINK_STATE:
2842 0 : if (args->m_len < sizeof(uint32_t))
2843 : break;
2844 0 : ls = letoh32(*mtod(args, uint32_t *));
2845 0 : if (sc->sc_debug & (SC_DEBUG_TRAP | SC_DEBUG_LINK))
2846 : DPRINTF(("%s: %s: link rate %u\n",
2847 : sc->sc_dev.dv_xname, __func__, ls));
2848 0 : if (ls)
2849 0 : ieee80211_new_state(ic, IEEE80211_S_RUN, -1);
2850 : else
2851 0 : ieee80211_new_state(ic, IEEE80211_S_SCAN, -1);
2852 0 : goto gotlinkstate;
2853 : case PGT_OID_DEAUTHENTICATE:
2854 : case PGT_OID_AUTHENTICATE:
2855 : case PGT_OID_DISASSOCIATE:
2856 : case PGT_OID_ASSOCIATE:
2857 0 : if (args->m_len < sizeof(struct pgt_obj_mlme))
2858 : break;
2859 0 : mlme = mtod(args, struct pgt_obj_mlme *);
2860 0 : if (sc->sc_debug & SC_DEBUG_TRAP)
2861 : DPRINTF(("%s: mlme: address "
2862 : "%s id 0x%02x state 0x%02x code 0x%02x\n",
2863 : sc->sc_dev.dv_xname,
2864 : ether_sprintf(mlme->pom_address),
2865 : letoh16(mlme->pom_id),
2866 : letoh16(mlme->pom_state),
2867 : letoh16(mlme->pom_code)));
2868 : #ifndef IEEE80211_STA_ONLY
2869 0 : if (ic->ic_opmode == IEEE80211_M_HOSTAP)
2870 0 : pgt_hostap_handle_mlme(sc, oid, mlme);
2871 : #endif
2872 : break;
2873 : }
2874 0 : return;
2875 : }
2876 0 : if (ic->ic_state == IEEE80211_S_SCAN) {
2877 0 : s = splnet();
2878 0 : error = pgt_oid_get(sc, PGT_OID_LINK_STATE, &ls, sizeof(ls));
2879 0 : splx(s);
2880 0 : if (error)
2881 0 : return;
2882 : DPRINTF(("%s: up_sw_from_hw: link %u\n", sc->sc_dev.dv_xname,
2883 : htole32(ls)));
2884 0 : if (ls != 0)
2885 0 : ieee80211_new_state(ic, IEEE80211_S_RUN, -1);
2886 : }
2887 :
2888 : gotlinkstate:
2889 0 : s = splnet();
2890 0 : if (pgt_oid_get(sc, PGT_OID_NOISE_FLOOR, &noise, sizeof(noise)) != 0)
2891 : goto out;
2892 0 : sc->sc_noise = letoh32(noise);
2893 0 : if (ic->ic_state == IEEE80211_S_RUN) {
2894 0 : if (pgt_oid_get(sc, PGT_OID_CHANNEL, &channel,
2895 0 : sizeof(channel)) != 0)
2896 : goto out;
2897 0 : channel = min(letoh32(channel), IEEE80211_CHAN_MAX);
2898 0 : ic->ic_bss->ni_chan = &ic->ic_channels[channel];
2899 0 : if (pgt_oid_get(sc, PGT_OID_BSSID, ic->ic_bss->ni_bssid,
2900 0 : sizeof(ic->ic_bss->ni_bssid)) != 0)
2901 : goto out;
2902 0 : IEEE80211_ADDR_COPY(&bss.pob_address, ic->ic_bss->ni_bssid);
2903 0 : error = pgt_oid_retrieve(sc, PGT_OID_BSS_FIND, &bss,
2904 : sizeof(bss));
2905 0 : if (error == 0)
2906 0 : ic->ic_bss->ni_rssi = bss.pob_rssi;
2907 0 : else if (error != EPERM)
2908 : goto out;
2909 0 : error = pgt_oid_get(sc, PGT_OID_SSID, &ssid, sizeof(ssid));
2910 0 : if (error)
2911 : goto out;
2912 0 : ic->ic_bss->ni_esslen = min(ssid.pos_length,
2913 : sizeof(ic->ic_bss->ni_essid));
2914 0 : memcpy(ic->ic_bss->ni_essid, ssid.pos_ssid,
2915 : ssid.pos_length);
2916 0 : }
2917 :
2918 : out:
2919 0 : splx(s);
2920 0 : }
2921 :
2922 : int
2923 0 : pgt_newstate(struct ieee80211com *ic, enum ieee80211_state nstate, int arg)
2924 : {
2925 0 : struct pgt_softc *sc = ic->ic_if.if_softc;
2926 : enum ieee80211_state ostate;
2927 :
2928 0 : ostate = ic->ic_state;
2929 :
2930 : DPRINTF(("%s: newstate %s -> %s\n", sc->sc_dev.dv_xname,
2931 : ieee80211_state_name[ostate], ieee80211_state_name[nstate]));
2932 :
2933 0 : switch (nstate) {
2934 : case IEEE80211_S_INIT:
2935 0 : if (sc->sc_dirtyq_count[PGT_QUEUE_DATA_LOW_TX] == 0)
2936 0 : ic->ic_if.if_timer = 0;
2937 0 : ic->ic_mgt_timer = 0;
2938 0 : ic->ic_flags &= ~IEEE80211_F_SIBSS;
2939 0 : ieee80211_free_allnodes(ic, 1);
2940 0 : ieee80211_set_link_state(ic, LINK_STATE_DOWN);
2941 0 : break;
2942 : case IEEE80211_S_SCAN:
2943 0 : ic->ic_if.if_timer = 1;
2944 0 : ic->ic_mgt_timer = 0;
2945 0 : if (sc->sc_flags & SC_NOFREE_ALLNODES)
2946 0 : sc->sc_flags &= ~SC_NOFREE_ALLNODES;
2947 : else
2948 0 : ieee80211_free_allnodes(ic, 1);
2949 :
2950 0 : ieee80211_set_link_state(ic, LINK_STATE_DOWN);
2951 : #ifndef IEEE80211_STA_ONLY
2952 : /* Just use any old channel; we override it anyway. */
2953 0 : if (ic->ic_opmode == IEEE80211_M_HOSTAP)
2954 0 : ieee80211_create_ibss(ic, ic->ic_ibss_chan);
2955 : #endif
2956 : break;
2957 : case IEEE80211_S_RUN:
2958 0 : ic->ic_if.if_timer = 1;
2959 0 : break;
2960 : default:
2961 : break;
2962 : }
2963 :
2964 0 : return (sc->sc_newstate(ic, nstate, arg));
2965 : }
2966 :
2967 : int
2968 0 : pgt_drain_tx_queue(struct pgt_softc *sc, enum pgt_queue pq)
2969 : {
2970 : int wokeup = 0;
2971 :
2972 0 : bus_dmamap_sync(sc->sc_dmat, sc->sc_cbdmam, 0,
2973 : sc->sc_cbdmam->dm_mapsize,
2974 : BUS_DMASYNC_POSTREAD | BUS_DMASYNC_PREWRITE);
2975 0 : sc->sc_cb->pcb_device_curfrag[pq] =
2976 0 : sc->sc_cb->pcb_driver_curfrag[pq];
2977 0 : bus_dmamap_sync(sc->sc_dmat, sc->sc_cbdmam, 0,
2978 : sc->sc_cbdmam->dm_mapsize,
2979 : BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_PREREAD);
2980 0 : while (!TAILQ_EMPTY(&sc->sc_dirtyq[pq])) {
2981 : struct pgt_desc *pd;
2982 :
2983 : pd = TAILQ_FIRST(&sc->sc_dirtyq[pq]);
2984 0 : TAILQ_REMOVE(&sc->sc_dirtyq[pq], pd, pd_link);
2985 0 : sc->sc_dirtyq_count[pq]--;
2986 0 : TAILQ_INSERT_TAIL(&sc->sc_freeq[pq], pd, pd_link);
2987 0 : sc->sc_freeq_count[pq]++;
2988 0 : pgt_unload_tx_desc_frag(sc, pd);
2989 0 : if (sc->sc_debug & SC_DEBUG_QUEUES)
2990 : DPRINTF(("%s: queue: tx %u <- [%u] (drained)\n",
2991 : sc->sc_dev.dv_xname, pd->pd_fragnum, pq));
2992 0 : wokeup++;
2993 0 : if (pgt_queue_is_data(pq))
2994 0 : sc->sc_ic.ic_if.if_oerrors++;
2995 : }
2996 :
2997 0 : return (wokeup);
2998 : }
2999 :
3000 : int
3001 0 : pgt_dma_alloc(struct pgt_softc *sc)
3002 : {
3003 : size_t size;
3004 0 : int i, error, nsegs;
3005 :
3006 0 : for (i = 0; i < PGT_QUEUE_COUNT; i++) {
3007 0 : TAILQ_INIT(&sc->sc_freeq[i]);
3008 0 : TAILQ_INIT(&sc->sc_dirtyq[i]);
3009 : }
3010 :
3011 : /*
3012 : * control block
3013 : */
3014 : size = sizeof(struct pgt_control_block);
3015 :
3016 0 : error = bus_dmamap_create(sc->sc_dmat, size, 1, size, 0,
3017 : BUS_DMA_NOWAIT, &sc->sc_cbdmam);
3018 0 : if (error != 0) {
3019 0 : printf("%s: can not create DMA tag for control block\n",
3020 0 : sc->sc_dev.dv_xname);
3021 0 : goto out;
3022 : }
3023 :
3024 0 : error = bus_dmamem_alloc(sc->sc_dmat, size, PAGE_SIZE,
3025 : 0, &sc->sc_cbdmas, 1, &nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO);
3026 0 : if (error != 0) {
3027 0 : printf("%s: can not allocate DMA memory for control block\n",
3028 0 : sc->sc_dev.dv_xname);
3029 0 : goto out;
3030 : }
3031 :
3032 0 : error = bus_dmamem_map(sc->sc_dmat, &sc->sc_cbdmas, nsegs,
3033 : size, (caddr_t *)&sc->sc_cb, BUS_DMA_NOWAIT);
3034 0 : if (error != 0) {
3035 0 : printf("%s: can not map DMA memory for control block\n",
3036 0 : sc->sc_dev.dv_xname);
3037 0 : goto out;
3038 : }
3039 :
3040 0 : error = bus_dmamap_load(sc->sc_dmat, sc->sc_cbdmam,
3041 : sc->sc_cb, size, NULL, BUS_DMA_NOWAIT);
3042 0 : if (error != 0) {
3043 0 : printf("%s: can not load DMA map for control block\n",
3044 0 : sc->sc_dev.dv_xname);
3045 0 : goto out;
3046 : }
3047 :
3048 : /*
3049 : * powersave
3050 : */
3051 : size = PGT_FRAG_SIZE * PGT_PSM_BUFFER_FRAME_COUNT;
3052 :
3053 0 : error = bus_dmamap_create(sc->sc_dmat, size, 1, size, 0,
3054 : BUS_DMA_ALLOCNOW, &sc->sc_psmdmam);
3055 0 : if (error != 0) {
3056 0 : printf("%s: can not create DMA tag for powersave\n",
3057 0 : sc->sc_dev.dv_xname);
3058 0 : goto out;
3059 : }
3060 :
3061 0 : error = bus_dmamem_alloc(sc->sc_dmat, size, PAGE_SIZE,
3062 : 0, &sc->sc_psmdmas, 1, &nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO);
3063 0 : if (error != 0) {
3064 0 : printf("%s: can not allocate DMA memory for powersave\n",
3065 0 : sc->sc_dev.dv_xname);
3066 0 : goto out;
3067 : }
3068 :
3069 0 : error = bus_dmamem_map(sc->sc_dmat, &sc->sc_psmdmas, nsegs,
3070 : size, (caddr_t *)&sc->sc_psmbuf, BUS_DMA_NOWAIT);
3071 0 : if (error != 0) {
3072 0 : printf("%s: can not map DMA memory for powersave\n",
3073 0 : sc->sc_dev.dv_xname);
3074 0 : goto out;
3075 : }
3076 :
3077 0 : error = bus_dmamap_load(sc->sc_dmat, sc->sc_psmdmam,
3078 : sc->sc_psmbuf, size, NULL, BUS_DMA_WAITOK);
3079 0 : if (error != 0) {
3080 0 : printf("%s: can not load DMA map for powersave\n",
3081 0 : sc->sc_dev.dv_xname);
3082 0 : goto out;
3083 : }
3084 :
3085 : /*
3086 : * fragments
3087 : */
3088 0 : error = pgt_dma_alloc_queue(sc, PGT_QUEUE_DATA_LOW_RX);
3089 0 : if (error != 0)
3090 : goto out;
3091 :
3092 0 : error = pgt_dma_alloc_queue(sc, PGT_QUEUE_DATA_LOW_TX);
3093 0 : if (error != 0)
3094 : goto out;
3095 :
3096 0 : error = pgt_dma_alloc_queue(sc, PGT_QUEUE_DATA_HIGH_RX);
3097 0 : if (error != 0)
3098 : goto out;
3099 :
3100 0 : error = pgt_dma_alloc_queue(sc, PGT_QUEUE_DATA_HIGH_TX);
3101 0 : if (error != 0)
3102 : goto out;
3103 :
3104 0 : error = pgt_dma_alloc_queue(sc, PGT_QUEUE_MGMT_RX);
3105 0 : if (error != 0)
3106 : goto out;
3107 :
3108 0 : error = pgt_dma_alloc_queue(sc, PGT_QUEUE_MGMT_TX);
3109 : if (error != 0)
3110 0 : goto out;
3111 :
3112 : out:
3113 0 : if (error) {
3114 0 : printf("%s: error in DMA allocation\n", sc->sc_dev.dv_xname);
3115 0 : pgt_dma_free(sc);
3116 0 : }
3117 :
3118 0 : return (error);
3119 0 : }
3120 :
3121 : int
3122 0 : pgt_dma_alloc_queue(struct pgt_softc *sc, enum pgt_queue pq)
3123 : {
3124 : struct pgt_desc *pd;
3125 : size_t i, qsize;
3126 0 : int error, nsegs;
3127 :
3128 0 : switch (pq) {
3129 : case PGT_QUEUE_DATA_LOW_RX:
3130 : qsize = PGT_QUEUE_DATA_RX_SIZE;
3131 0 : break;
3132 : case PGT_QUEUE_DATA_LOW_TX:
3133 : qsize = PGT_QUEUE_DATA_TX_SIZE;
3134 0 : break;
3135 : case PGT_QUEUE_DATA_HIGH_RX:
3136 : qsize = PGT_QUEUE_DATA_RX_SIZE;
3137 0 : break;
3138 : case PGT_QUEUE_DATA_HIGH_TX:
3139 : qsize = PGT_QUEUE_DATA_TX_SIZE;
3140 0 : break;
3141 : case PGT_QUEUE_MGMT_RX:
3142 : qsize = PGT_QUEUE_MGMT_SIZE;
3143 0 : break;
3144 : case PGT_QUEUE_MGMT_TX:
3145 : qsize = PGT_QUEUE_MGMT_SIZE;
3146 0 : break;
3147 : default:
3148 0 : return (EINVAL);
3149 : }
3150 :
3151 0 : for (i = 0; i < qsize; i++) {
3152 0 : pd = malloc(sizeof(*pd), M_DEVBUF, M_WAITOK);
3153 :
3154 0 : error = bus_dmamap_create(sc->sc_dmat, PGT_FRAG_SIZE, 1,
3155 : PGT_FRAG_SIZE, 0, BUS_DMA_ALLOCNOW, &pd->pd_dmam);
3156 0 : if (error != 0) {
3157 0 : printf("%s: can not create DMA tag for fragment\n",
3158 0 : sc->sc_dev.dv_xname);
3159 0 : free(pd, M_DEVBUF, 0);
3160 0 : break;
3161 : }
3162 :
3163 0 : error = bus_dmamem_alloc(sc->sc_dmat, PGT_FRAG_SIZE, PAGE_SIZE,
3164 : 0, &pd->pd_dmas, 1, &nsegs, BUS_DMA_WAITOK);
3165 0 : if (error != 0) {
3166 0 : printf("%s: error alloc frag %zu on queue %u\n",
3167 0 : sc->sc_dev.dv_xname, i, pq);
3168 0 : free(pd, M_DEVBUF, 0);
3169 0 : break;
3170 : }
3171 :
3172 0 : error = bus_dmamem_map(sc->sc_dmat, &pd->pd_dmas, nsegs,
3173 : PGT_FRAG_SIZE, (caddr_t *)&pd->pd_mem, BUS_DMA_WAITOK);
3174 0 : if (error != 0) {
3175 0 : printf("%s: error map frag %zu on queue %u\n",
3176 0 : sc->sc_dev.dv_xname, i, pq);
3177 0 : free(pd, M_DEVBUF, 0);
3178 0 : break;
3179 : }
3180 :
3181 0 : if (pgt_queue_is_rx(pq)) {
3182 0 : error = bus_dmamap_load(sc->sc_dmat, pd->pd_dmam,
3183 : pd->pd_mem, PGT_FRAG_SIZE, NULL, BUS_DMA_NOWAIT);
3184 0 : if (error != 0) {
3185 0 : printf("%s: error load frag %zu on queue %u\n",
3186 0 : sc->sc_dev.dv_xname, i, pq);
3187 0 : bus_dmamem_free(sc->sc_dmat, &pd->pd_dmas,
3188 : nsegs);
3189 0 : free(pd, M_DEVBUF, 0);
3190 0 : break;
3191 : }
3192 0 : pd->pd_dmaaddr = pd->pd_dmam->dm_segs[0].ds_addr;
3193 0 : }
3194 0 : TAILQ_INSERT_TAIL(&sc->sc_freeq[pq], pd, pd_link);
3195 : }
3196 :
3197 0 : return (error);
3198 0 : }
3199 :
3200 : void
3201 0 : pgt_dma_free(struct pgt_softc *sc)
3202 : {
3203 : /*
3204 : * fragments
3205 : */
3206 0 : if (sc->sc_dmat != NULL) {
3207 0 : pgt_dma_free_queue(sc, PGT_QUEUE_DATA_LOW_RX);
3208 0 : pgt_dma_free_queue(sc, PGT_QUEUE_DATA_LOW_TX);
3209 0 : pgt_dma_free_queue(sc, PGT_QUEUE_DATA_HIGH_RX);
3210 0 : pgt_dma_free_queue(sc, PGT_QUEUE_DATA_HIGH_TX);
3211 0 : pgt_dma_free_queue(sc, PGT_QUEUE_MGMT_RX);
3212 0 : pgt_dma_free_queue(sc, PGT_QUEUE_MGMT_TX);
3213 0 : }
3214 :
3215 : /*
3216 : * powersave
3217 : */
3218 0 : if (sc->sc_psmbuf != NULL) {
3219 0 : bus_dmamap_unload(sc->sc_dmat, sc->sc_psmdmam);
3220 0 : bus_dmamem_free(sc->sc_dmat, &sc->sc_psmdmas, 1);
3221 0 : sc->sc_psmbuf = NULL;
3222 0 : sc->sc_psmdmam = NULL;
3223 0 : }
3224 :
3225 : /*
3226 : * control block
3227 : */
3228 0 : if (sc->sc_cb != NULL) {
3229 0 : bus_dmamap_unload(sc->sc_dmat, sc->sc_cbdmam);
3230 0 : bus_dmamem_free(sc->sc_dmat, &sc->sc_cbdmas, 1);
3231 0 : sc->sc_cb = NULL;
3232 0 : sc->sc_cbdmam = NULL;
3233 0 : }
3234 0 : }
3235 :
3236 : void
3237 0 : pgt_dma_free_queue(struct pgt_softc *sc, enum pgt_queue pq)
3238 : {
3239 : struct pgt_desc *pd;
3240 :
3241 0 : while (!TAILQ_EMPTY(&sc->sc_freeq[pq])) {
3242 : pd = TAILQ_FIRST(&sc->sc_freeq[pq]);
3243 0 : TAILQ_REMOVE(&sc->sc_freeq[pq], pd, pd_link);
3244 0 : if (pd->pd_dmam != NULL) {
3245 0 : bus_dmamap_unload(sc->sc_dmat, pd->pd_dmam);
3246 0 : pd->pd_dmam = NULL;
3247 0 : }
3248 0 : bus_dmamem_free(sc->sc_dmat, &pd->pd_dmas, 1);
3249 0 : free(pd, M_DEVBUF, 0);
3250 : }
3251 0 : }
3252 :
3253 : int
3254 0 : pgt_activate(struct device *self, int act)
3255 : {
3256 0 : struct pgt_softc *sc = (struct pgt_softc *)self;
3257 0 : struct ifnet *ifp = &sc->sc_ic.ic_if;
3258 :
3259 : DPRINTF(("%s: %s(%d)\n", sc->sc_dev.dv_xname, __func__, why));
3260 :
3261 0 : switch (act) {
3262 : case DVACT_SUSPEND:
3263 0 : if (ifp->if_flags & IFF_RUNNING) {
3264 0 : pgt_stop(sc, SC_NEEDS_RESET);
3265 0 : pgt_update_hw_from_sw(sc, 0, 0);
3266 0 : }
3267 0 : if (sc->sc_power != NULL)
3268 0 : (*sc->sc_power)(sc, act);
3269 : break;
3270 : case DVACT_WAKEUP:
3271 0 : pgt_wakeup(sc);
3272 0 : break;
3273 : }
3274 0 : return 0;
3275 : }
3276 :
3277 : void
3278 0 : pgt_wakeup(struct pgt_softc *sc)
3279 : {
3280 0 : struct ifnet *ifp = &sc->sc_ic.ic_if;
3281 :
3282 0 : if (sc->sc_power != NULL)
3283 0 : (*sc->sc_power)(sc, DVACT_RESUME);
3284 :
3285 0 : pgt_stop(sc, SC_NEEDS_RESET);
3286 0 : pgt_update_hw_from_sw(sc, 0, 0);
3287 :
3288 0 : if (ifp->if_flags & IFF_UP) {
3289 0 : pgt_init(ifp);
3290 0 : pgt_update_hw_from_sw(sc, 0, 0);
3291 0 : }
3292 0 : }
|