Line data Source code
1 : /* $OpenBSD: if_rtwn.c,v 1.32 2018/09/13 09:28:07 kevlo Exp $ */
2 :
3 : /*-
4 : * Copyright (c) 2010 Damien Bergamini <damien.bergamini@free.fr>
5 : * Copyright (c) 2015 Stefan Sperling <stsp@openbsd.org>
6 : * Copyright (c) 2015-2016 Andriy Voskoboinyk <avos@FreeBSD.org>
7 : *
8 : * Permission to use, copy, modify, and distribute this software for any
9 : * purpose with or without fee is hereby granted, provided that the above
10 : * copyright notice and this permission notice appear in all copies.
11 : *
12 : * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13 : * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14 : * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15 : * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16 : * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 : * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 : * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 : */
20 :
21 : /*
22 : * PCI front-end for Realtek RTL8188CE/RTL8192CE driver.
23 : */
24 :
25 : #include "bpfilter.h"
26 :
27 : #include <sys/param.h>
28 : #include <sys/sockio.h>
29 : #include <sys/mbuf.h>
30 : #include <sys/kernel.h>
31 : #include <sys/socket.h>
32 : #include <sys/systm.h>
33 : #include <sys/task.h>
34 : #include <sys/timeout.h>
35 : #include <sys/conf.h>
36 : #include <sys/device.h>
37 : #include <sys/endian.h>
38 :
39 : #include <machine/bus.h>
40 : #include <machine/intr.h>
41 :
42 : #if NBPFILTER > 0
43 : #include <net/bpf.h>
44 : #endif
45 : #include <net/if.h>
46 : #include <net/if_dl.h>
47 : #include <net/if_media.h>
48 :
49 : #include <netinet/in.h>
50 : #include <netinet/if_ether.h>
51 :
52 : #include <net80211/ieee80211_var.h>
53 : #include <net80211/ieee80211_amrr.h>
54 : #include <net80211/ieee80211_radiotap.h>
55 :
56 : #include <dev/pci/pcireg.h>
57 : #include <dev/pci/pcivar.h>
58 : #include <dev/pci/pcidevs.h>
59 :
60 : #include <dev/ic/r92creg.h>
61 : #include <dev/ic/rtwnvar.h>
62 :
63 : /*
64 : * Driver definitions.
65 : */
66 :
67 : #define R92C_PUBQ_NPAGES 176
68 : #define R92C_HPQ_NPAGES 41
69 : #define R92C_LPQ_NPAGES 28
70 : #define R92C_TXPKTBUF_COUNT 256
71 : #define R92C_TX_PAGE_COUNT \
72 : (R92C_PUBQ_NPAGES + R92C_HPQ_NPAGES + R92C_LPQ_NPAGES)
73 : #define R92C_TX_PAGE_BOUNDARY (R92C_TX_PAGE_COUNT + 1)
74 :
75 : #define RTWN_NTXQUEUES 9
76 : #define RTWN_RX_LIST_COUNT 256
77 : #define RTWN_TX_LIST_COUNT 256
78 :
79 : /* TX queue indices. */
80 : #define RTWN_BK_QUEUE 0
81 : #define RTWN_BE_QUEUE 1
82 : #define RTWN_VI_QUEUE 2
83 : #define RTWN_VO_QUEUE 3
84 : #define RTWN_BEACON_QUEUE 4
85 : #define RTWN_TXCMD_QUEUE 5
86 : #define RTWN_MGNT_QUEUE 6
87 : #define RTWN_HIGH_QUEUE 7
88 : #define RTWN_HCCA_QUEUE 8
89 :
90 : struct rtwn_rx_radiotap_header {
91 : struct ieee80211_radiotap_header wr_ihdr;
92 : uint8_t wr_flags;
93 : uint8_t wr_rate;
94 : uint16_t wr_chan_freq;
95 : uint16_t wr_chan_flags;
96 : uint8_t wr_dbm_antsignal;
97 : } __packed;
98 :
99 : #define RTWN_RX_RADIOTAP_PRESENT \
100 : (1 << IEEE80211_RADIOTAP_FLAGS | \
101 : 1 << IEEE80211_RADIOTAP_RATE | \
102 : 1 << IEEE80211_RADIOTAP_CHANNEL | \
103 : 1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL)
104 :
105 : struct rtwn_tx_radiotap_header {
106 : struct ieee80211_radiotap_header wt_ihdr;
107 : uint8_t wt_flags;
108 : uint16_t wt_chan_freq;
109 : uint16_t wt_chan_flags;
110 : } __packed;
111 :
112 : #define RTWN_TX_RADIOTAP_PRESENT \
113 : (1 << IEEE80211_RADIOTAP_FLAGS | \
114 : 1 << IEEE80211_RADIOTAP_CHANNEL)
115 :
116 : struct rtwn_rx_data {
117 : bus_dmamap_t map;
118 : struct mbuf *m;
119 : };
120 :
121 : struct rtwn_rx_ring {
122 : struct r92c_rx_desc_pci *desc;
123 : bus_dmamap_t map;
124 : bus_dma_segment_t seg;
125 : int nsegs;
126 : struct rtwn_rx_data rx_data[RTWN_RX_LIST_COUNT];
127 :
128 : };
129 : struct rtwn_tx_data {
130 : bus_dmamap_t map;
131 : struct mbuf *m;
132 : struct ieee80211_node *ni;
133 : };
134 :
135 : struct rtwn_tx_ring {
136 : bus_dmamap_t map;
137 : bus_dma_segment_t seg;
138 : int nsegs;
139 : struct r92c_tx_desc_pci *desc;
140 : struct rtwn_tx_data tx_data[RTWN_TX_LIST_COUNT];
141 : int queued;
142 : int cur;
143 : };
144 :
145 : struct rtwn_pci_softc {
146 : struct device sc_dev;
147 : struct rtwn_softc sc_sc;
148 :
149 : struct rtwn_rx_ring rx_ring;
150 : struct rtwn_tx_ring tx_ring[RTWN_NTXQUEUES];
151 : uint32_t qfullmsk;
152 :
153 : struct timeout calib_to;
154 : struct timeout scan_to;
155 :
156 : /* PCI specific goo. */
157 : bus_dma_tag_t sc_dmat;
158 : pci_chipset_tag_t sc_pc;
159 : pcitag_t sc_tag;
160 : void *sc_ih;
161 : bus_space_tag_t sc_st;
162 : bus_space_handle_t sc_sh;
163 : bus_size_t sc_mapsize;
164 : int sc_cap_off;
165 :
166 : struct ieee80211_amrr amrr;
167 : struct ieee80211_amrr_node amn;
168 :
169 : #if NBPFILTER > 0
170 : caddr_t sc_drvbpf;
171 :
172 : union {
173 : struct rtwn_rx_radiotap_header th;
174 : uint8_t pad[64];
175 : } sc_rxtapu;
176 : #define sc_rxtap sc_rxtapu.th
177 : int sc_rxtap_len;
178 :
179 : union {
180 : struct rtwn_tx_radiotap_header th;
181 : uint8_t pad[64];
182 : } sc_txtapu;
183 : #define sc_txtap sc_txtapu.th
184 : int sc_txtap_len;
185 : #endif
186 : };
187 :
188 : #ifdef RTWN_DEBUG
189 : #define DPRINTF(x) do { if (rtwn_debug) printf x; } while (0)
190 : #define DPRINTFN(n, x) do { if (rtwn_debug >= (n)) printf x; } while (0)
191 : extern int rtwn_debug;
192 : #else
193 : #define DPRINTF(x)
194 : #define DPRINTFN(n, x)
195 : #endif
196 :
197 : /*
198 : * PCI configuration space registers.
199 : */
200 : #define RTWN_PCI_IOBA 0x10 /* i/o mapped base */
201 : #define RTWN_PCI_MMBA 0x18 /* memory mapped base */
202 :
203 : static const struct pci_matchid rtwn_pci_devices[] = {
204 : { PCI_VENDOR_REALTEK, PCI_PRODUCT_REALTEK_RTL8188CE },
205 : { PCI_VENDOR_REALTEK, PCI_PRODUCT_REALTEK_RTL8192CE }
206 : };
207 :
208 : int rtwn_pci_match(struct device *, void *, void *);
209 : void rtwn_pci_attach(struct device *, struct device *, void *);
210 : int rtwn_pci_detach(struct device *, int);
211 : int rtwn_pci_activate(struct device *, int);
212 : int rtwn_alloc_rx_list(struct rtwn_pci_softc *);
213 : void rtwn_reset_rx_list(struct rtwn_pci_softc *);
214 : void rtwn_free_rx_list(struct rtwn_pci_softc *);
215 : void rtwn_setup_rx_desc(struct rtwn_pci_softc *,
216 : struct r92c_rx_desc_pci *, bus_addr_t, size_t, int);
217 : int rtwn_alloc_tx_list(struct rtwn_pci_softc *, int);
218 : void rtwn_reset_tx_list(struct rtwn_pci_softc *, int);
219 : void rtwn_free_tx_list(struct rtwn_pci_softc *, int);
220 : void rtwn_pci_write_1(void *, uint16_t, uint8_t);
221 : void rtwn_pci_write_2(void *, uint16_t, uint16_t);
222 : void rtwn_pci_write_4(void *, uint16_t, uint32_t);
223 : uint8_t rtwn_pci_read_1(void *, uint16_t);
224 : uint16_t rtwn_pci_read_2(void *, uint16_t);
225 : uint32_t rtwn_pci_read_4(void *, uint16_t);
226 : void rtwn_rx_frame(struct rtwn_pci_softc *,
227 : struct r92c_rx_desc_pci *, struct rtwn_rx_data *, int);
228 : int rtwn_tx(void *, struct mbuf *, struct ieee80211_node *);
229 : void rtwn_tx_done(struct rtwn_pci_softc *, int);
230 : int rtwn_alloc_buffers(void *);
231 : int rtwn_pci_init(void *);
232 : void rtwn_pci_stop(void *);
233 : int rtwn_intr(void *);
234 : int rtwn_is_oactive(void *);
235 : int rtwn_power_on(void *);
236 : int rtwn_llt_write(struct rtwn_pci_softc *, uint32_t, uint32_t);
237 : int rtwn_llt_init(struct rtwn_pci_softc *);
238 : int rtwn_dma_init(void *);
239 : int rtwn_fw_loadpage(void *, int, uint8_t *, int);
240 : int rtwn_pci_load_firmware(void *, u_char **, size_t *);
241 : void rtwn_mac_init(void *);
242 : void rtwn_bb_init(void *);
243 : void rtwn_calib_to(void *);
244 : void rtwn_next_calib(void *);
245 : void rtwn_cancel_calib(void *);
246 : void rtwn_scan_to(void *);
247 : void rtwn_pci_next_scan(void *);
248 : void rtwn_cancel_scan(void *);
249 : void rtwn_wait_async(void *);
250 : void rtwn_poll_c2h_events(struct rtwn_pci_softc *);
251 : void rtwn_tx_report(struct rtwn_pci_softc *, uint8_t *, int);
252 :
253 : /* Aliases. */
254 : #define rtwn_bb_write rtwn_pci_write_4
255 : #define rtwn_bb_read rtwn_pci_read_4
256 :
257 : struct cfdriver rtwn_cd = {
258 : NULL, "rtwn", DV_IFNET
259 : };
260 :
261 : const struct cfattach rtwn_pci_ca = {
262 : sizeof(struct rtwn_pci_softc),
263 : rtwn_pci_match,
264 : rtwn_pci_attach,
265 : rtwn_pci_detach,
266 : rtwn_pci_activate
267 : };
268 :
269 : int
270 0 : rtwn_pci_match(struct device *parent, void *match, void *aux)
271 : {
272 0 : return (pci_matchbyid(aux, rtwn_pci_devices,
273 : nitems(rtwn_pci_devices)));
274 : }
275 :
276 : void
277 0 : rtwn_pci_attach(struct device *parent, struct device *self, void *aux)
278 : {
279 0 : struct rtwn_pci_softc *sc = (struct rtwn_pci_softc*)self;
280 0 : struct pci_attach_args *pa = aux;
281 : struct ifnet *ifp;
282 : int i, error;
283 : pcireg_t memtype;
284 0 : pci_intr_handle_t ih;
285 : const char *intrstr;
286 :
287 0 : sc->sc_dmat = pa->pa_dmat;
288 0 : sc->sc_pc = pa->pa_pc;
289 0 : sc->sc_tag = pa->pa_tag;
290 :
291 0 : timeout_set(&sc->calib_to, rtwn_calib_to, sc);
292 0 : timeout_set(&sc->scan_to, rtwn_scan_to, sc);
293 :
294 0 : pci_set_powerstate(pa->pa_pc, pa->pa_tag, PCI_PMCSR_STATE_D0);
295 :
296 : /* Map control/status registers. */
297 0 : memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, RTWN_PCI_MMBA);
298 0 : error = pci_mapreg_map(pa, RTWN_PCI_MMBA, memtype, 0, &sc->sc_st,
299 0 : &sc->sc_sh, NULL, &sc->sc_mapsize, 0);
300 0 : if (error != 0) {
301 0 : printf(": can't map mem space\n");
302 0 : return;
303 : }
304 :
305 0 : if (pci_intr_map_msi(pa, &ih) && pci_intr_map(pa, &ih)) {
306 0 : printf(": can't map interrupt\n");
307 0 : return;
308 : }
309 0 : intrstr = pci_intr_string(sc->sc_pc, ih);
310 0 : sc->sc_ih = pci_intr_establish(sc->sc_pc, ih, IPL_NET,
311 0 : rtwn_intr, sc, sc->sc_dev.dv_xname);
312 0 : if (sc->sc_ih == NULL) {
313 0 : printf(": can't establish interrupt");
314 0 : if (intrstr != NULL)
315 0 : printf(" at %s", intrstr);
316 0 : printf("\n");
317 0 : return;
318 : }
319 0 : printf(": %s\n", intrstr);
320 :
321 : /* Disable PCIe Active State Power Management (ASPM). */
322 0 : if (pci_get_capability(sc->sc_pc, sc->sc_tag, PCI_CAP_PCIEXPRESS,
323 0 : &sc->sc_cap_off, NULL)) {
324 0 : uint32_t lcsr = pci_conf_read(sc->sc_pc, sc->sc_tag,
325 0 : sc->sc_cap_off + PCI_PCIE_LCSR);
326 0 : lcsr &= ~(PCI_PCIE_LCSR_ASPM_L0S | PCI_PCIE_LCSR_ASPM_L1);
327 0 : pci_conf_write(sc->sc_pc, sc->sc_tag,
328 0 : sc->sc_cap_off + PCI_PCIE_LCSR, lcsr);
329 0 : }
330 :
331 : /* Allocate Tx/Rx buffers. */
332 0 : error = rtwn_alloc_rx_list(sc);
333 0 : if (error != 0) {
334 0 : printf("%s: could not allocate Rx buffers\n",
335 : sc->sc_dev.dv_xname);
336 0 : return;
337 : }
338 0 : for (i = 0; i < RTWN_NTXQUEUES; i++) {
339 0 : error = rtwn_alloc_tx_list(sc, i);
340 0 : if (error != 0) {
341 0 : printf("%s: could not allocate Tx buffers\n",
342 : sc->sc_dev.dv_xname);
343 0 : rtwn_free_rx_list(sc);
344 0 : return;
345 : }
346 : }
347 :
348 0 : sc->amrr.amrr_min_success_threshold = 1;
349 0 : sc->amrr.amrr_max_success_threshold = 15;
350 :
351 : /* Attach the bus-agnostic driver. */
352 0 : sc->sc_sc.sc_ops.cookie = sc;
353 0 : sc->sc_sc.sc_ops.write_1 = rtwn_pci_write_1;
354 0 : sc->sc_sc.sc_ops.write_2 = rtwn_pci_write_2;
355 0 : sc->sc_sc.sc_ops.write_4 = rtwn_pci_write_4;
356 0 : sc->sc_sc.sc_ops.read_1 = rtwn_pci_read_1;
357 0 : sc->sc_sc.sc_ops.read_2 = rtwn_pci_read_2;
358 0 : sc->sc_sc.sc_ops.read_4 = rtwn_pci_read_4;
359 0 : sc->sc_sc.sc_ops.tx = rtwn_tx;
360 0 : sc->sc_sc.sc_ops.power_on = rtwn_power_on;
361 0 : sc->sc_sc.sc_ops.dma_init = rtwn_dma_init;
362 0 : sc->sc_sc.sc_ops.load_firmware = rtwn_pci_load_firmware;
363 0 : sc->sc_sc.sc_ops.fw_loadpage = rtwn_fw_loadpage;
364 0 : sc->sc_sc.sc_ops.mac_init = rtwn_mac_init;
365 0 : sc->sc_sc.sc_ops.bb_init = rtwn_bb_init;
366 0 : sc->sc_sc.sc_ops.alloc_buffers = rtwn_alloc_buffers;
367 0 : sc->sc_sc.sc_ops.init = rtwn_pci_init;
368 0 : sc->sc_sc.sc_ops.stop = rtwn_pci_stop;
369 0 : sc->sc_sc.sc_ops.is_oactive = rtwn_is_oactive;
370 0 : sc->sc_sc.sc_ops.next_calib = rtwn_next_calib;
371 0 : sc->sc_sc.sc_ops.cancel_calib = rtwn_cancel_calib;
372 0 : sc->sc_sc.sc_ops.next_scan = rtwn_pci_next_scan;
373 0 : sc->sc_sc.sc_ops.cancel_scan = rtwn_cancel_scan;
374 0 : sc->sc_sc.sc_ops.wait_async = rtwn_wait_async;
375 :
376 0 : sc->sc_sc.chip = RTWN_CHIP_88C | RTWN_CHIP_92C | RTWN_CHIP_PCI;
377 :
378 0 : error = rtwn_attach(&sc->sc_dev, &sc->sc_sc);
379 0 : if (error != 0) {
380 0 : rtwn_free_rx_list(sc);
381 0 : for (i = 0; i < RTWN_NTXQUEUES; i++)
382 0 : rtwn_free_tx_list(sc, i);
383 0 : return;
384 : }
385 :
386 : /* ifp is now valid */
387 0 : ifp = &sc->sc_sc.sc_ic.ic_if;
388 : #if NBPFILTER > 0
389 0 : bpfattach(&sc->sc_drvbpf, ifp, DLT_IEEE802_11_RADIO,
390 : sizeof(struct ieee80211_frame) + IEEE80211_RADIOTAP_HDRLEN);
391 :
392 0 : sc->sc_rxtap_len = sizeof(sc->sc_rxtapu);
393 0 : sc->sc_rxtap.wr_ihdr.it_len = htole16(sc->sc_rxtap_len);
394 0 : sc->sc_rxtap.wr_ihdr.it_present = htole32(RTWN_RX_RADIOTAP_PRESENT);
395 :
396 0 : sc->sc_txtap_len = sizeof(sc->sc_txtapu);
397 0 : sc->sc_txtap.wt_ihdr.it_len = htole16(sc->sc_txtap_len);
398 0 : sc->sc_txtap.wt_ihdr.it_present = htole32(RTWN_TX_RADIOTAP_PRESENT);
399 : #endif
400 0 : }
401 :
402 : int
403 0 : rtwn_pci_detach(struct device *self, int flags)
404 : {
405 0 : struct rtwn_pci_softc *sc = (struct rtwn_pci_softc *)self;
406 : int s, i;
407 :
408 0 : s = splnet();
409 :
410 0 : if (timeout_initialized(&sc->calib_to))
411 0 : timeout_del(&sc->calib_to);
412 0 : if (timeout_initialized(&sc->scan_to))
413 0 : timeout_del(&sc->scan_to);
414 :
415 0 : rtwn_detach(&sc->sc_sc, flags);
416 :
417 : /* Free Tx/Rx buffers. */
418 0 : for (i = 0; i < RTWN_NTXQUEUES; i++)
419 0 : rtwn_free_tx_list(sc, i);
420 0 : rtwn_free_rx_list(sc);
421 0 : splx(s);
422 :
423 0 : return (0);
424 : }
425 :
426 : int
427 0 : rtwn_pci_activate(struct device *self, int act)
428 : {
429 0 : struct rtwn_pci_softc *sc = (struct rtwn_pci_softc *)self;
430 :
431 0 : return rtwn_activate(&sc->sc_sc, act);
432 : }
433 :
434 : void
435 0 : rtwn_setup_rx_desc(struct rtwn_pci_softc *sc, struct r92c_rx_desc_pci *desc,
436 : bus_addr_t addr, size_t len, int idx)
437 : {
438 0 : memset(desc, 0, sizeof(*desc));
439 0 : desc->rxdw0 = htole32(SM(R92C_RXDW0_PKTLEN, len) |
440 : ((idx == RTWN_RX_LIST_COUNT - 1) ? R92C_RXDW0_EOR : 0));
441 0 : desc->rxbufaddr = htole32(addr);
442 0 : bus_space_barrier(sc->sc_st, sc->sc_sh, 0, sc->sc_mapsize,
443 : BUS_SPACE_BARRIER_WRITE);
444 0 : desc->rxdw0 |= htole32(R92C_RXDW0_OWN);
445 0 : }
446 :
447 : int
448 0 : rtwn_alloc_rx_list(struct rtwn_pci_softc *sc)
449 : {
450 0 : struct rtwn_rx_ring *rx_ring = &sc->rx_ring;
451 : struct rtwn_rx_data *rx_data;
452 : size_t size;
453 : int i, error = 0;
454 :
455 : /* Allocate Rx descriptors. */
456 : size = sizeof(struct r92c_rx_desc_pci) * RTWN_RX_LIST_COUNT;
457 0 : error = bus_dmamap_create(sc->sc_dmat, size, 1, size, 0, BUS_DMA_NOWAIT,
458 : &rx_ring->map);
459 0 : if (error != 0) {
460 0 : printf("%s: could not create rx desc DMA map\n",
461 0 : sc->sc_dev.dv_xname);
462 0 : rx_ring->map = NULL;
463 0 : goto fail;
464 : }
465 :
466 0 : error = bus_dmamem_alloc(sc->sc_dmat, size, 0, 0, &rx_ring->seg, 1,
467 : &rx_ring->nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO);
468 0 : if (error != 0) {
469 0 : printf("%s: could not allocate rx desc\n",
470 0 : sc->sc_dev.dv_xname);
471 0 : goto fail;
472 : }
473 :
474 0 : error = bus_dmamem_map(sc->sc_dmat, &rx_ring->seg, rx_ring->nsegs,
475 : size, (caddr_t *)&rx_ring->desc,
476 : BUS_DMA_NOWAIT | BUS_DMA_COHERENT);
477 0 : if (error != 0) {
478 0 : bus_dmamem_free(sc->sc_dmat, &rx_ring->seg, rx_ring->nsegs);
479 0 : rx_ring->desc = NULL;
480 0 : printf("%s: could not map rx desc\n", sc->sc_dev.dv_xname);
481 0 : goto fail;
482 : }
483 :
484 0 : error = bus_dmamap_load_raw(sc->sc_dmat, rx_ring->map, &rx_ring->seg,
485 : 1, size, BUS_DMA_NOWAIT);
486 0 : if (error != 0) {
487 0 : printf("%s: could not load rx desc\n",
488 0 : sc->sc_dev.dv_xname);
489 0 : goto fail;
490 : }
491 :
492 0 : bus_dmamap_sync(sc->sc_dmat, rx_ring->map, 0, size,
493 : BUS_DMASYNC_PREWRITE);
494 :
495 : /* Allocate Rx buffers. */
496 0 : for (i = 0; i < RTWN_RX_LIST_COUNT; i++) {
497 0 : rx_data = &rx_ring->rx_data[i];
498 :
499 0 : error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES,
500 : 0, BUS_DMA_NOWAIT, &rx_data->map);
501 0 : if (error != 0) {
502 0 : printf("%s: could not create rx buf DMA map\n",
503 0 : sc->sc_dev.dv_xname);
504 0 : goto fail;
505 : }
506 :
507 0 : rx_data->m = MCLGETI(NULL, M_DONTWAIT, NULL, MCLBYTES);
508 0 : if (rx_data->m == NULL) {
509 0 : printf("%s: could not allocate rx mbuf\n",
510 0 : sc->sc_dev.dv_xname);
511 : error = ENOMEM;
512 0 : goto fail;
513 : }
514 :
515 0 : error = bus_dmamap_load(sc->sc_dmat, rx_data->map,
516 : mtod(rx_data->m, void *), MCLBYTES, NULL,
517 : BUS_DMA_NOWAIT | BUS_DMA_READ);
518 0 : if (error != 0) {
519 0 : printf("%s: could not load rx buf DMA map\n",
520 0 : sc->sc_dev.dv_xname);
521 0 : goto fail;
522 : }
523 :
524 0 : rtwn_setup_rx_desc(sc, &rx_ring->desc[i],
525 0 : rx_data->map->dm_segs[0].ds_addr, MCLBYTES, i);
526 : }
527 0 : fail: if (error != 0)
528 0 : rtwn_free_rx_list(sc);
529 0 : return (error);
530 : }
531 :
532 : void
533 0 : rtwn_reset_rx_list(struct rtwn_pci_softc *sc)
534 : {
535 0 : struct rtwn_rx_ring *rx_ring = &sc->rx_ring;
536 : struct rtwn_rx_data *rx_data;
537 : int i;
538 :
539 0 : for (i = 0; i < RTWN_RX_LIST_COUNT; i++) {
540 0 : rx_data = &rx_ring->rx_data[i];
541 0 : rtwn_setup_rx_desc(sc, &rx_ring->desc[i],
542 0 : rx_data->map->dm_segs[0].ds_addr, MCLBYTES, i);
543 : }
544 0 : }
545 :
546 : void
547 0 : rtwn_free_rx_list(struct rtwn_pci_softc *sc)
548 : {
549 0 : struct rtwn_rx_ring *rx_ring = &sc->rx_ring;
550 : struct rtwn_rx_data *rx_data;
551 : int i, s;
552 :
553 0 : s = splnet();
554 :
555 0 : if (rx_ring->map) {
556 0 : if (rx_ring->desc) {
557 0 : bus_dmamap_unload(sc->sc_dmat, rx_ring->map);
558 0 : bus_dmamem_unmap(sc->sc_dmat, (caddr_t)rx_ring->desc,
559 : sizeof (struct r92c_rx_desc_pci) *
560 : RTWN_RX_LIST_COUNT);
561 0 : bus_dmamem_free(sc->sc_dmat, &rx_ring->seg,
562 : rx_ring->nsegs);
563 0 : rx_ring->desc = NULL;
564 0 : }
565 0 : bus_dmamap_destroy(sc->sc_dmat, rx_ring->map);
566 0 : rx_ring->map = NULL;
567 0 : }
568 :
569 0 : for (i = 0; i < RTWN_RX_LIST_COUNT; i++) {
570 0 : rx_data = &rx_ring->rx_data[i];
571 :
572 0 : if (rx_data->m != NULL) {
573 0 : bus_dmamap_unload(sc->sc_dmat, rx_data->map);
574 0 : m_freem(rx_data->m);
575 0 : rx_data->m = NULL;
576 0 : }
577 0 : bus_dmamap_destroy(sc->sc_dmat, rx_data->map);
578 0 : rx_data->map = NULL;
579 : }
580 :
581 0 : splx(s);
582 0 : }
583 :
584 : int
585 0 : rtwn_alloc_tx_list(struct rtwn_pci_softc *sc, int qid)
586 : {
587 0 : struct rtwn_tx_ring *tx_ring = &sc->tx_ring[qid];
588 : struct rtwn_tx_data *tx_data;
589 : int i = 0, error = 0;
590 :
591 0 : error = bus_dmamap_create(sc->sc_dmat,
592 : sizeof (struct r92c_tx_desc_pci) * RTWN_TX_LIST_COUNT, 1,
593 : sizeof (struct r92c_tx_desc_pci) * RTWN_TX_LIST_COUNT, 0,
594 : BUS_DMA_NOWAIT, &tx_ring->map);
595 0 : if (error != 0) {
596 0 : printf("%s: could not create tx ring DMA map\n",
597 0 : sc->sc_dev.dv_xname);
598 0 : goto fail;
599 : }
600 :
601 0 : error = bus_dmamem_alloc(sc->sc_dmat,
602 : sizeof (struct r92c_tx_desc_pci) * RTWN_TX_LIST_COUNT, PAGE_SIZE, 0,
603 : &tx_ring->seg, 1, &tx_ring->nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO);
604 0 : if (error != 0) {
605 0 : printf("%s: could not allocate tx ring DMA memory\n",
606 0 : sc->sc_dev.dv_xname);
607 0 : goto fail;
608 : }
609 :
610 0 : error = bus_dmamem_map(sc->sc_dmat, &tx_ring->seg, tx_ring->nsegs,
611 : sizeof (struct r92c_tx_desc_pci) * RTWN_TX_LIST_COUNT,
612 : (caddr_t *)&tx_ring->desc, BUS_DMA_NOWAIT);
613 0 : if (error != 0) {
614 0 : bus_dmamem_free(sc->sc_dmat, &tx_ring->seg, tx_ring->nsegs);
615 0 : printf("%s: can't map tx ring DMA memory\n",
616 0 : sc->sc_dev.dv_xname);
617 0 : goto fail;
618 : }
619 :
620 0 : error = bus_dmamap_load(sc->sc_dmat, tx_ring->map, tx_ring->desc,
621 : sizeof (struct r92c_tx_desc_pci) * RTWN_TX_LIST_COUNT, NULL,
622 : BUS_DMA_NOWAIT);
623 0 : if (error != 0) {
624 0 : printf("%s: could not load tx ring DMA map\n",
625 0 : sc->sc_dev.dv_xname);
626 0 : goto fail;
627 : }
628 :
629 0 : for (i = 0; i < RTWN_TX_LIST_COUNT; i++) {
630 0 : struct r92c_tx_desc_pci *desc = &tx_ring->desc[i];
631 :
632 : /* setup tx desc */
633 0 : desc->nextdescaddr = htole32(tx_ring->map->dm_segs[0].ds_addr
634 : + sizeof(struct r92c_tx_desc_pci)
635 : * ((i + 1) % RTWN_TX_LIST_COUNT));
636 :
637 0 : tx_data = &tx_ring->tx_data[i];
638 0 : error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES,
639 : 0, BUS_DMA_NOWAIT, &tx_data->map);
640 0 : if (error != 0) {
641 0 : printf("%s: could not create tx buf DMA map\n",
642 0 : sc->sc_dev.dv_xname);
643 0 : goto fail;
644 : }
645 0 : tx_data->m = NULL;
646 0 : tx_data->ni = NULL;
647 0 : }
648 : fail:
649 0 : if (error != 0)
650 0 : rtwn_free_tx_list(sc, qid);
651 0 : return (error);
652 0 : }
653 :
654 : void
655 0 : rtwn_reset_tx_list(struct rtwn_pci_softc *sc, int qid)
656 : {
657 0 : struct ieee80211com *ic = &sc->sc_sc.sc_ic;
658 0 : struct rtwn_tx_ring *tx_ring = &sc->tx_ring[qid];
659 : int i;
660 :
661 0 : for (i = 0; i < RTWN_TX_LIST_COUNT; i++) {
662 0 : struct r92c_tx_desc_pci *desc = &tx_ring->desc[i];
663 0 : struct rtwn_tx_data *tx_data = &tx_ring->tx_data[i];
664 :
665 0 : memset(desc, 0, sizeof(*desc) -
666 : (sizeof(desc->reserved) + sizeof(desc->nextdescaddr64) +
667 : sizeof(desc->nextdescaddr)));
668 :
669 0 : if (tx_data->m != NULL) {
670 0 : bus_dmamap_unload(sc->sc_dmat, tx_data->map);
671 0 : m_freem(tx_data->m);
672 0 : tx_data->m = NULL;
673 0 : ieee80211_release_node(ic, tx_data->ni);
674 0 : tx_data->ni = NULL;
675 0 : }
676 : }
677 :
678 0 : bus_dmamap_sync(sc->sc_dmat, tx_ring->map, 0, MCLBYTES,
679 : BUS_DMASYNC_POSTWRITE);
680 :
681 0 : sc->qfullmsk &= ~(1 << qid);
682 0 : tx_ring->queued = 0;
683 0 : tx_ring->cur = 0;
684 0 : }
685 :
686 : void
687 0 : rtwn_free_tx_list(struct rtwn_pci_softc *sc, int qid)
688 : {
689 0 : struct rtwn_tx_ring *tx_ring = &sc->tx_ring[qid];
690 : struct rtwn_tx_data *tx_data;
691 : int i;
692 :
693 0 : if (tx_ring->map != NULL) {
694 0 : if (tx_ring->desc != NULL) {
695 0 : bus_dmamap_unload(sc->sc_dmat, tx_ring->map);
696 0 : bus_dmamem_unmap(sc->sc_dmat, (caddr_t)tx_ring->desc,
697 : sizeof (struct r92c_tx_desc_pci) *
698 : RTWN_TX_LIST_COUNT);
699 0 : bus_dmamem_free(sc->sc_dmat, &tx_ring->seg, tx_ring->nsegs);
700 0 : }
701 0 : bus_dmamap_destroy(sc->sc_dmat, tx_ring->map);
702 0 : }
703 :
704 0 : for (i = 0; i < RTWN_TX_LIST_COUNT; i++) {
705 0 : tx_data = &tx_ring->tx_data[i];
706 :
707 0 : if (tx_data->m != NULL) {
708 0 : bus_dmamap_unload(sc->sc_dmat, tx_data->map);
709 0 : m_freem(tx_data->m);
710 0 : tx_data->m = NULL;
711 0 : }
712 0 : bus_dmamap_destroy(sc->sc_dmat, tx_data->map);
713 : }
714 :
715 0 : sc->qfullmsk &= ~(1 << qid);
716 0 : tx_ring->queued = 0;
717 0 : tx_ring->cur = 0;
718 0 : }
719 :
720 : void
721 0 : rtwn_pci_write_1(void *cookie, uint16_t addr, uint8_t val)
722 : {
723 0 : struct rtwn_pci_softc *sc = cookie;
724 :
725 0 : bus_space_write_1(sc->sc_st, sc->sc_sh, addr, val);
726 0 : }
727 :
728 : void
729 0 : rtwn_pci_write_2(void *cookie, uint16_t addr, uint16_t val)
730 : {
731 0 : struct rtwn_pci_softc *sc = cookie;
732 :
733 : val = htole16(val);
734 0 : bus_space_write_2(sc->sc_st, sc->sc_sh, addr, val);
735 0 : }
736 :
737 : void
738 0 : rtwn_pci_write_4(void *cookie, uint16_t addr, uint32_t val)
739 : {
740 0 : struct rtwn_pci_softc *sc = cookie;
741 :
742 : val = htole32(val);
743 0 : bus_space_write_4(sc->sc_st, sc->sc_sh, addr, val);
744 0 : }
745 :
746 : uint8_t
747 0 : rtwn_pci_read_1(void *cookie, uint16_t addr)
748 : {
749 0 : struct rtwn_pci_softc *sc = cookie;
750 :
751 0 : return bus_space_read_1(sc->sc_st, sc->sc_sh, addr);
752 : }
753 :
754 : uint16_t
755 0 : rtwn_pci_read_2(void *cookie, uint16_t addr)
756 : {
757 0 : struct rtwn_pci_softc *sc = cookie;
758 : uint16_t val;
759 :
760 0 : val = bus_space_read_2(sc->sc_st, sc->sc_sh, addr);
761 0 : return le16toh(val);
762 : }
763 :
764 : uint32_t
765 0 : rtwn_pci_read_4(void *cookie, uint16_t addr)
766 : {
767 0 : struct rtwn_pci_softc *sc = cookie;
768 : uint32_t val;
769 :
770 0 : val = bus_space_read_4(sc->sc_st, sc->sc_sh, addr);
771 0 : return le32toh(val);
772 : }
773 :
774 : void
775 0 : rtwn_rx_frame(struct rtwn_pci_softc *sc, struct r92c_rx_desc_pci *rx_desc,
776 : struct rtwn_rx_data *rx_data, int desc_idx)
777 : {
778 0 : struct ieee80211com *ic = &sc->sc_sc.sc_ic;
779 0 : struct ifnet *ifp = &ic->ic_if;
780 0 : struct ieee80211_rxinfo rxi;
781 : struct ieee80211_frame *wh;
782 : struct ieee80211_node *ni;
783 : struct r92c_rx_phystat *phy = NULL;
784 : uint32_t rxdw0, rxdw3;
785 : struct mbuf *m, *m1;
786 : uint8_t rate;
787 : int8_t rssi = 0;
788 : int infosz, pktlen, shift, error;
789 :
790 0 : rxdw0 = letoh32(rx_desc->rxdw0);
791 0 : rxdw3 = letoh32(rx_desc->rxdw3);
792 :
793 0 : if (__predict_false(rxdw0 & (R92C_RXDW0_CRCERR | R92C_RXDW0_ICVERR))) {
794 : /*
795 : * This should not happen since we setup our Rx filter
796 : * to not receive these frames.
797 : */
798 0 : ifp->if_ierrors++;
799 0 : return;
800 : }
801 :
802 0 : pktlen = MS(rxdw0, R92C_RXDW0_PKTLEN);
803 0 : if (__predict_false(pktlen < sizeof(*wh) || pktlen > MCLBYTES)) {
804 0 : ifp->if_ierrors++;
805 0 : return;
806 : }
807 :
808 0 : rate = MS(rxdw3, R92C_RXDW3_RATE);
809 0 : infosz = MS(rxdw0, R92C_RXDW0_INFOSZ) * 8;
810 0 : if (infosz > sizeof(struct r92c_rx_phystat))
811 : infosz = sizeof(struct r92c_rx_phystat);
812 0 : shift = MS(rxdw0, R92C_RXDW0_SHIFT);
813 :
814 : /* Get RSSI from PHY status descriptor if present. */
815 0 : if (infosz != 0 && (rxdw0 & R92C_RXDW0_PHYST)) {
816 0 : phy = mtod(rx_data->m, struct r92c_rx_phystat *);
817 0 : rssi = rtwn_get_rssi(&sc->sc_sc, rate, phy);
818 : /* Update our average RSSI. */
819 0 : rtwn_update_avgrssi(&sc->sc_sc, rate, rssi);
820 0 : }
821 :
822 : DPRINTFN(5, ("Rx frame len=%d rate=%d infosz=%d shift=%d rssi=%d\n",
823 : pktlen, rate, infosz, shift, rssi));
824 :
825 0 : m1 = MCLGETI(NULL, M_DONTWAIT, NULL, MCLBYTES);
826 0 : if (m1 == NULL) {
827 0 : ifp->if_ierrors++;
828 0 : return;
829 : }
830 0 : bus_dmamap_unload(sc->sc_dmat, rx_data->map);
831 0 : error = bus_dmamap_load(sc->sc_dmat, rx_data->map,
832 : mtod(m1, void *), MCLBYTES, NULL,
833 : BUS_DMA_NOWAIT | BUS_DMA_READ);
834 0 : if (error != 0) {
835 0 : m_freem(m1);
836 :
837 0 : if (bus_dmamap_load_mbuf(sc->sc_dmat, rx_data->map,
838 : rx_data->m, BUS_DMA_NOWAIT))
839 0 : panic("%s: could not load old RX mbuf",
840 0 : sc->sc_dev.dv_xname);
841 :
842 : /* Physical address may have changed. */
843 0 : rtwn_setup_rx_desc(sc, rx_desc,
844 0 : rx_data->map->dm_segs[0].ds_addr, MCLBYTES, desc_idx);
845 :
846 0 : ifp->if_ierrors++;
847 0 : return;
848 : }
849 :
850 : /* Finalize mbuf. */
851 0 : m = rx_data->m;
852 0 : rx_data->m = m1;
853 0 : m->m_pkthdr.len = m->m_len = pktlen + infosz + shift;
854 :
855 : /* Update RX descriptor. */
856 0 : rtwn_setup_rx_desc(sc, rx_desc, rx_data->map->dm_segs[0].ds_addr,
857 : MCLBYTES, desc_idx);
858 :
859 : /* Get ieee80211 frame header. */
860 0 : if (rxdw0 & R92C_RXDW0_PHYST)
861 0 : m_adj(m, infosz + shift);
862 : else
863 0 : m_adj(m, shift);
864 0 : wh = mtod(m, struct ieee80211_frame *);
865 :
866 : #if NBPFILTER > 0
867 0 : if (__predict_false(sc->sc_drvbpf != NULL)) {
868 0 : struct rtwn_rx_radiotap_header *tap = &sc->sc_rxtap;
869 0 : struct mbuf mb;
870 :
871 0 : tap->wr_flags = 0;
872 : /* Map HW rate index to 802.11 rate. */
873 0 : tap->wr_flags = 2;
874 0 : if (!(rxdw3 & R92C_RXDW3_HT)) {
875 0 : switch (rate) {
876 : /* CCK. */
877 0 : case 0: tap->wr_rate = 2; break;
878 0 : case 1: tap->wr_rate = 4; break;
879 0 : case 2: tap->wr_rate = 11; break;
880 0 : case 3: tap->wr_rate = 22; break;
881 : /* OFDM. */
882 0 : case 4: tap->wr_rate = 12; break;
883 0 : case 5: tap->wr_rate = 18; break;
884 0 : case 6: tap->wr_rate = 24; break;
885 0 : case 7: tap->wr_rate = 36; break;
886 0 : case 8: tap->wr_rate = 48; break;
887 0 : case 9: tap->wr_rate = 72; break;
888 0 : case 10: tap->wr_rate = 96; break;
889 0 : case 11: tap->wr_rate = 108; break;
890 : }
891 0 : } else if (rate >= 12) { /* MCS0~15. */
892 : /* Bit 7 set means HT MCS instead of rate. */
893 0 : tap->wr_rate = 0x80 | (rate - 12);
894 0 : }
895 0 : tap->wr_dbm_antsignal = rssi;
896 0 : tap->wr_chan_freq = htole16(ic->ic_ibss_chan->ic_freq);
897 0 : tap->wr_chan_flags = htole16(ic->ic_ibss_chan->ic_flags);
898 :
899 0 : mb.m_data = (caddr_t)tap;
900 0 : mb.m_len = sc->sc_rxtap_len;
901 0 : mb.m_next = m;
902 0 : mb.m_nextpkt = NULL;
903 0 : mb.m_type = 0;
904 0 : mb.m_flags = 0;
905 0 : bpf_mtap(sc->sc_drvbpf, &mb, BPF_DIRECTION_IN);
906 0 : }
907 : #endif
908 :
909 0 : ni = ieee80211_find_rxnode(ic, wh);
910 0 : rxi.rxi_flags = 0;
911 0 : rxi.rxi_rssi = rssi;
912 0 : rxi.rxi_tstamp = 0; /* Unused. */
913 0 : ieee80211_input(ifp, m, ni, &rxi);
914 : /* Node is no longer needed. */
915 0 : ieee80211_release_node(ic, ni);
916 0 : }
917 :
918 : int
919 0 : rtwn_tx(void *cookie, struct mbuf *m, struct ieee80211_node *ni)
920 : {
921 0 : struct rtwn_pci_softc *sc = cookie;
922 0 : struct ieee80211com *ic = &sc->sc_sc.sc_ic;
923 : struct ieee80211_frame *wh;
924 : struct ieee80211_key *k = NULL;
925 : struct rtwn_tx_ring *tx_ring;
926 : struct rtwn_tx_data *data;
927 : struct r92c_tx_desc_pci *txd;
928 : uint16_t qos;
929 : uint8_t raid, type, tid, qid;
930 : int hasqos, error;
931 :
932 0 : wh = mtod(m, struct ieee80211_frame *);
933 0 : type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
934 :
935 0 : if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
936 0 : k = ieee80211_get_txkey(ic, wh, ni);
937 0 : if ((m = ieee80211_encrypt(ic, m, k)) == NULL)
938 0 : return (ENOBUFS);
939 0 : wh = mtod(m, struct ieee80211_frame *);
940 0 : }
941 :
942 0 : if ((hasqos = ieee80211_has_qos(wh))) {
943 0 : qos = ieee80211_get_qos(wh);
944 0 : tid = qos & IEEE80211_QOS_TID;
945 0 : qid = ieee80211_up_to_ac(ic, tid);
946 0 : } else if (type != IEEE80211_FC0_TYPE_DATA) {
947 : qid = RTWN_VO_QUEUE;
948 0 : } else
949 : qid = RTWN_BE_QUEUE;
950 :
951 : /* Grab a Tx buffer from the ring. */
952 0 : tx_ring = &sc->tx_ring[qid];
953 0 : data = &tx_ring->tx_data[tx_ring->cur];
954 0 : if (data->m != NULL) {
955 0 : m_freem(m);
956 0 : return (ENOBUFS);
957 : }
958 :
959 : /* Fill Tx descriptor. */
960 0 : txd = &tx_ring->desc[tx_ring->cur];
961 0 : if (htole32(txd->txdw0) & R92C_RXDW0_OWN) {
962 0 : m_freem(m);
963 0 : return (ENOBUFS);
964 : }
965 0 : txd->txdw0 = htole32(
966 : SM(R92C_TXDW0_PKTLEN, m->m_pkthdr.len) |
967 : SM(R92C_TXDW0_OFFSET, sizeof(*txd)) |
968 : R92C_TXDW0_FSG | R92C_TXDW0_LSG);
969 0 : if (IEEE80211_IS_MULTICAST(wh->i_addr1))
970 0 : txd->txdw0 |= htole32(R92C_TXDW0_BMCAST);
971 :
972 0 : txd->txdw1 = 0;
973 : #ifdef notyet
974 : if (k != NULL) {
975 : switch (k->k_cipher) {
976 : case IEEE80211_CIPHER_WEP40:
977 : case IEEE80211_CIPHER_WEP104:
978 : case IEEE80211_CIPHER_TKIP:
979 : cipher = R92C_TXDW1_CIPHER_RC4;
980 : break;
981 : case IEEE80211_CIPHER_CCMP:
982 : cipher = R92C_TXDW1_CIPHER_AES;
983 : break;
984 : default:
985 : cipher = R92C_TXDW1_CIPHER_NONE;
986 : }
987 : txd->txdw1 |= htole32(SM(R92C_TXDW1_CIPHER, cipher));
988 : }
989 : #endif
990 0 : txd->txdw4 = 0;
991 0 : txd->txdw5 = 0;
992 0 : if (!IEEE80211_IS_MULTICAST(wh->i_addr1) &&
993 0 : type == IEEE80211_FC0_TYPE_DATA) {
994 0 : if (ic->ic_curmode == IEEE80211_MODE_11B ||
995 0 : (sc->sc_sc.sc_flags & RTWN_FLAG_FORCE_RAID_11B))
996 0 : raid = R92C_RAID_11B;
997 : else
998 : raid = R92C_RAID_11BG;
999 0 : txd->txdw1 |= htole32(
1000 : SM(R92C_TXDW1_MACID, R92C_MACID_BSS) |
1001 : SM(R92C_TXDW1_QSEL, R92C_TXDW1_QSEL_BE) |
1002 : SM(R92C_TXDW1_RAID, raid) |
1003 : R92C_TXDW1_AGGBK);
1004 :
1005 : /* Request TX status report for AMRR. */
1006 0 : txd->txdw2 |= htole32(R92C_TXDW2_CCX_RPT);
1007 :
1008 0 : if (m->m_pkthdr.len + IEEE80211_CRC_LEN > ic->ic_rtsthreshold) {
1009 0 : txd->txdw4 |= htole32(R92C_TXDW4_RTSEN |
1010 : R92C_TXDW4_HWRTSEN);
1011 0 : } else if (ic->ic_flags & IEEE80211_F_USEPROT) {
1012 0 : if (ic->ic_protmode == IEEE80211_PROT_CTSONLY) {
1013 0 : txd->txdw4 |= htole32(R92C_TXDW4_CTS2SELF |
1014 : R92C_TXDW4_HWRTSEN);
1015 0 : } else if (ic->ic_protmode == IEEE80211_PROT_RTSCTS) {
1016 0 : txd->txdw4 |= htole32(R92C_TXDW4_RTSEN |
1017 : R92C_TXDW4_HWRTSEN);
1018 0 : }
1019 : }
1020 :
1021 0 : if (ic->ic_curmode == IEEE80211_MODE_11B)
1022 0 : txd->txdw4 |= htole32(SM(R92C_TXDW4_RTSRATE, 0));
1023 : else
1024 0 : txd->txdw4 |= htole32(SM(R92C_TXDW4_RTSRATE, 3));
1025 0 : txd->txdw5 |= htole32(SM(R92C_TXDW5_RTSRATE_FBLIMIT, 0xf));
1026 :
1027 : /* Use AMMR rate for data. */
1028 0 : txd->txdw4 |= htole32(R92C_TXDW4_DRVRATE);
1029 0 : if (ic->ic_fixed_rate != -1)
1030 0 : txd->txdw5 |= htole32(SM(R92C_TXDW5_DATARATE,
1031 : ic->ic_fixed_rate));
1032 : else
1033 0 : txd->txdw5 |= htole32(SM(R92C_TXDW5_DATARATE,
1034 : ni->ni_txrate));
1035 0 : txd->txdw5 |= htole32(SM(R92C_TXDW5_DATARATE_FBLIMIT, 0x1f));
1036 0 : } else {
1037 0 : txd->txdw1 |= htole32(
1038 : SM(R92C_TXDW1_MACID, 0) |
1039 : SM(R92C_TXDW1_QSEL, R92C_TXDW1_QSEL_MGNT) |
1040 : SM(R92C_TXDW1_RAID, R92C_RAID_11B));
1041 :
1042 : /* Force CCK1. */
1043 0 : txd->txdw4 |= htole32(R92C_TXDW4_DRVRATE);
1044 0 : txd->txdw5 |= htole32(SM(R92C_TXDW5_DATARATE, 0));
1045 : }
1046 : /* Set sequence number (already little endian). */
1047 0 : txd->txdseq = *(uint16_t *)wh->i_seq;
1048 :
1049 0 : if (!hasqos) {
1050 : /* Use HW sequence numbering for non-QoS frames. */
1051 0 : txd->txdw4 |= htole32(R92C_TXDW4_HWSEQ);
1052 0 : txd->txdseq |= htole16(0x8000); /* WTF? */
1053 0 : } else
1054 0 : txd->txdw4 |= htole32(R92C_TXDW4_QOS);
1055 :
1056 0 : error = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, m,
1057 : BUS_DMA_NOWAIT | BUS_DMA_WRITE);
1058 0 : if (error && error != EFBIG) {
1059 0 : printf("%s: can't map mbuf (error %d)\n",
1060 0 : sc->sc_dev.dv_xname, error);
1061 0 : m_freem(m);
1062 0 : return error;
1063 : }
1064 0 : if (error != 0) {
1065 : /* Too many DMA segments, linearize mbuf. */
1066 0 : if (m_defrag(m, M_DONTWAIT)) {
1067 0 : m_freem(m);
1068 0 : return ENOBUFS;
1069 : }
1070 :
1071 0 : error = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, m,
1072 : BUS_DMA_NOWAIT | BUS_DMA_WRITE);
1073 0 : if (error != 0) {
1074 0 : printf("%s: can't map mbuf (error %d)\n",
1075 0 : sc->sc_dev.dv_xname, error);
1076 0 : m_freem(m);
1077 0 : return error;
1078 : }
1079 : }
1080 :
1081 0 : txd->txbufaddr = htole32(data->map->dm_segs[0].ds_addr);
1082 0 : txd->txbufsize = htole16(m->m_pkthdr.len);
1083 0 : bus_space_barrier(sc->sc_st, sc->sc_sh, 0, sc->sc_mapsize,
1084 : BUS_SPACE_BARRIER_WRITE);
1085 0 : txd->txdw0 |= htole32(R92C_TXDW0_OWN);
1086 :
1087 0 : bus_dmamap_sync(sc->sc_dmat, tx_ring->map, 0, MCLBYTES,
1088 : BUS_DMASYNC_POSTWRITE);
1089 0 : bus_dmamap_sync(sc->sc_dmat, data->map, 0, MCLBYTES,
1090 : BUS_DMASYNC_POSTWRITE);
1091 :
1092 0 : data->m = m;
1093 0 : data->ni = ni;
1094 :
1095 : #if NBPFILTER > 0
1096 0 : if (__predict_false(sc->sc_drvbpf != NULL)) {
1097 0 : struct rtwn_tx_radiotap_header *tap = &sc->sc_txtap;
1098 0 : struct mbuf mb;
1099 :
1100 0 : tap->wt_flags = 0;
1101 0 : tap->wt_chan_freq = htole16(ic->ic_bss->ni_chan->ic_freq);
1102 0 : tap->wt_chan_flags = htole16(ic->ic_bss->ni_chan->ic_flags);
1103 :
1104 0 : mb.m_data = (caddr_t)tap;
1105 0 : mb.m_len = sc->sc_txtap_len;
1106 0 : mb.m_next = m;
1107 0 : mb.m_nextpkt = NULL;
1108 0 : mb.m_type = 0;
1109 0 : mb.m_flags = 0;
1110 0 : bpf_mtap(sc->sc_drvbpf, &mb, BPF_DIRECTION_OUT);
1111 0 : }
1112 : #endif
1113 :
1114 0 : tx_ring->cur = (tx_ring->cur + 1) % RTWN_TX_LIST_COUNT;
1115 0 : tx_ring->queued++;
1116 :
1117 0 : if (tx_ring->queued >= (RTWN_TX_LIST_COUNT - 1))
1118 0 : sc->qfullmsk |= (1 << qid);
1119 :
1120 : /* Kick TX. */
1121 0 : rtwn_pci_write_2(sc, R92C_PCIE_CTRL_REG, (1 << qid));
1122 :
1123 0 : return (0);
1124 0 : }
1125 :
1126 : void
1127 0 : rtwn_tx_done(struct rtwn_pci_softc *sc, int qid)
1128 : {
1129 0 : struct ieee80211com *ic = &sc->sc_sc.sc_ic;
1130 0 : struct ifnet *ifp = &ic->ic_if;
1131 0 : struct rtwn_tx_ring *tx_ring = &sc->tx_ring[qid];
1132 : struct rtwn_tx_data *tx_data;
1133 : struct r92c_tx_desc_pci *tx_desc;
1134 : int i;
1135 :
1136 0 : bus_dmamap_sync(sc->sc_dmat, tx_ring->map, 0, MCLBYTES,
1137 : BUS_DMASYNC_POSTREAD);
1138 :
1139 0 : for (i = 0; i < RTWN_TX_LIST_COUNT; i++) {
1140 0 : tx_data = &tx_ring->tx_data[i];
1141 0 : if (tx_data->m == NULL)
1142 : continue;
1143 :
1144 0 : tx_desc = &tx_ring->desc[i];
1145 0 : if (letoh32(tx_desc->txdw0) & R92C_TXDW0_OWN)
1146 : continue;
1147 :
1148 0 : bus_dmamap_unload(sc->sc_dmat, tx_data->map);
1149 0 : m_freem(tx_data->m);
1150 0 : tx_data->m = NULL;
1151 0 : ieee80211_release_node(ic, tx_data->ni);
1152 0 : tx_data->ni = NULL;
1153 :
1154 0 : sc->sc_sc.sc_tx_timer = 0;
1155 0 : tx_ring->queued--;
1156 :
1157 0 : rtwn_poll_c2h_events(sc);
1158 0 : }
1159 :
1160 0 : if (tx_ring->queued < (RTWN_TX_LIST_COUNT - 1))
1161 0 : sc->qfullmsk &= ~(1 << qid);
1162 :
1163 0 : if (sc->qfullmsk == 0) {
1164 0 : ifq_clr_oactive(&ifp->if_snd);
1165 0 : (*ifp->if_start)(ifp);
1166 0 : }
1167 0 : }
1168 :
1169 : int
1170 0 : rtwn_alloc_buffers(void *cookie)
1171 : {
1172 : /* Tx/Rx buffers were already allocated in rtwn_pci_attach() */
1173 0 : return (0);
1174 : }
1175 :
1176 : int
1177 0 : rtwn_pci_init(void *cookie)
1178 : {
1179 0 : struct rtwn_pci_softc *sc = cookie;
1180 0 : ieee80211_amrr_node_init(&sc->amrr, &sc->amn);
1181 0 : return (0);
1182 : }
1183 :
1184 : void
1185 0 : rtwn_pci_stop(void *cookie)
1186 : {
1187 0 : struct rtwn_pci_softc *sc = cookie;
1188 : uint16_t reg;
1189 : int i, s;
1190 :
1191 0 : s = splnet();
1192 :
1193 : /* Disable interrupts. */
1194 0 : rtwn_pci_write_4(sc, R92C_HIMR, 0x00000000);
1195 :
1196 : /* Stop hardware. */
1197 0 : rtwn_pci_write_1(sc, R92C_TXPAUSE, 0xff);
1198 0 : rtwn_pci_write_1(sc, R92C_RF_CTRL, 0x00);
1199 0 : reg = rtwn_pci_read_1(sc, R92C_SYS_FUNC_EN);
1200 0 : reg |= R92C_SYS_FUNC_EN_BB_GLB_RST;
1201 0 : rtwn_pci_write_1(sc, R92C_SYS_FUNC_EN, reg);
1202 0 : reg &= ~R92C_SYS_FUNC_EN_BB_GLB_RST;
1203 0 : rtwn_pci_write_1(sc, R92C_SYS_FUNC_EN, reg);
1204 0 : reg = rtwn_pci_read_2(sc, R92C_CR);
1205 0 : reg &= ~(R92C_CR_HCI_TXDMA_EN | R92C_CR_HCI_RXDMA_EN |
1206 : R92C_CR_TXDMA_EN | R92C_CR_RXDMA_EN | R92C_CR_PROTOCOL_EN |
1207 : R92C_CR_SCHEDULE_EN | R92C_CR_MACTXEN | R92C_CR_MACRXEN |
1208 : R92C_CR_ENSEC);
1209 0 : rtwn_pci_write_2(sc, R92C_CR, reg);
1210 0 : if (rtwn_pci_read_1(sc, R92C_MCUFWDL) & R92C_MCUFWDL_RAM_DL_SEL)
1211 0 : rtwn_fw_reset(&sc->sc_sc);
1212 : /* TODO: linux does additional btcoex stuff here */
1213 0 : rtwn_pci_write_2(sc, R92C_AFE_PLL_CTRL, 0x80); /* linux magic number */
1214 0 : rtwn_pci_write_1(sc, R92C_SPS0_CTRL, 0x23); /* ditto */
1215 0 : rtwn_pci_write_1(sc, R92C_AFE_XTAL_CTRL, 0x0e); /* differs in btcoex */
1216 0 : rtwn_pci_write_1(sc, R92C_RSV_CTRL, 0x0e);
1217 0 : rtwn_pci_write_1(sc, R92C_APS_FSMCO, R92C_APS_FSMCO_PDN_EN);
1218 :
1219 0 : for (i = 0; i < RTWN_NTXQUEUES; i++)
1220 0 : rtwn_reset_tx_list(sc, i);
1221 0 : rtwn_reset_rx_list(sc);
1222 :
1223 0 : splx(s);
1224 0 : }
1225 :
1226 : int
1227 0 : rtwn_intr(void *xsc)
1228 : {
1229 0 : struct rtwn_pci_softc *sc = xsc;
1230 : u_int32_t status;
1231 : int i;
1232 :
1233 0 : status = rtwn_pci_read_4(sc, R92C_HISR);
1234 0 : if (status == 0 || status == 0xffffffff)
1235 0 : return (0);
1236 :
1237 : /* Disable interrupts. */
1238 0 : rtwn_pci_write_4(sc, R92C_HIMR, 0x00000000);
1239 :
1240 : /* Ack interrupts. */
1241 0 : rtwn_pci_write_4(sc, R92C_HISR, status);
1242 :
1243 : /* Vendor driver treats RX errors like ROK... */
1244 0 : if (status & (R92C_IMR_ROK | R92C_IMR_RXFOVW | R92C_IMR_RDU)) {
1245 0 : bus_dmamap_sync(sc->sc_dmat, sc->rx_ring.map, 0,
1246 : sizeof(struct r92c_rx_desc_pci) * RTWN_RX_LIST_COUNT,
1247 : BUS_DMASYNC_POSTREAD);
1248 :
1249 0 : for (i = 0; i < RTWN_RX_LIST_COUNT; i++) {
1250 0 : struct r92c_rx_desc_pci *rx_desc = &sc->rx_ring.desc[i];
1251 0 : struct rtwn_rx_data *rx_data = &sc->rx_ring.rx_data[i];
1252 :
1253 0 : if (letoh32(rx_desc->rxdw0) & R92C_RXDW0_OWN)
1254 0 : continue;
1255 :
1256 0 : rtwn_rx_frame(sc, rx_desc, rx_data, i);
1257 0 : }
1258 : }
1259 :
1260 0 : if (status & R92C_IMR_BDOK)
1261 0 : rtwn_tx_done(sc, RTWN_BEACON_QUEUE);
1262 0 : if (status & R92C_IMR_HIGHDOK)
1263 0 : rtwn_tx_done(sc, RTWN_HIGH_QUEUE);
1264 0 : if (status & R92C_IMR_MGNTDOK)
1265 0 : rtwn_tx_done(sc, RTWN_MGNT_QUEUE);
1266 0 : if (status & R92C_IMR_BKDOK)
1267 0 : rtwn_tx_done(sc, RTWN_BK_QUEUE);
1268 0 : if (status & R92C_IMR_BEDOK)
1269 0 : rtwn_tx_done(sc, RTWN_BE_QUEUE);
1270 0 : if (status & R92C_IMR_VIDOK)
1271 0 : rtwn_tx_done(sc, RTWN_VI_QUEUE);
1272 0 : if (status & R92C_IMR_VODOK)
1273 0 : rtwn_tx_done(sc, RTWN_VO_QUEUE);
1274 :
1275 : /* Enable interrupts. */
1276 0 : rtwn_pci_write_4(sc, R92C_HIMR, RTWN_INT_ENABLE);
1277 :
1278 0 : return (1);
1279 0 : }
1280 :
1281 : int
1282 0 : rtwn_is_oactive(void *cookie)
1283 : {
1284 0 : struct rtwn_pci_softc *sc = cookie;
1285 :
1286 0 : return (sc->qfullmsk != 0);
1287 : }
1288 :
1289 : int
1290 0 : rtwn_llt_write(struct rtwn_pci_softc *sc, uint32_t addr, uint32_t data)
1291 : {
1292 : int ntries;
1293 :
1294 0 : rtwn_pci_write_4(sc, R92C_LLT_INIT,
1295 0 : SM(R92C_LLT_INIT_OP, R92C_LLT_INIT_OP_WRITE) |
1296 0 : SM(R92C_LLT_INIT_ADDR, addr) |
1297 0 : SM(R92C_LLT_INIT_DATA, data));
1298 : /* Wait for write operation to complete. */
1299 0 : for (ntries = 0; ntries < 20; ntries++) {
1300 0 : if (MS(rtwn_pci_read_4(sc, R92C_LLT_INIT), R92C_LLT_INIT_OP) ==
1301 : R92C_LLT_INIT_OP_NO_ACTIVE)
1302 0 : return (0);
1303 0 : DELAY(5);
1304 : }
1305 0 : return (ETIMEDOUT);
1306 0 : }
1307 :
1308 : int
1309 0 : rtwn_llt_init(struct rtwn_pci_softc *sc)
1310 : {
1311 : int i, error;
1312 :
1313 : /* Reserve pages [0; R92C_TX_PAGE_COUNT]. */
1314 0 : for (i = 0; i < R92C_TX_PAGE_COUNT; i++) {
1315 0 : if ((error = rtwn_llt_write(sc, i, i + 1)) != 0)
1316 0 : return (error);
1317 : }
1318 : /* NB: 0xff indicates end-of-list. */
1319 0 : if ((error = rtwn_llt_write(sc, i, 0xff)) != 0)
1320 0 : return (error);
1321 : /*
1322 : * Use pages [R92C_TX_PAGE_COUNT + 1; R92C_TXPKTBUF_COUNT - 1]
1323 : * as ring buffer.
1324 : */
1325 0 : for (++i; i < R92C_TXPKTBUF_COUNT - 1; i++) {
1326 0 : if ((error = rtwn_llt_write(sc, i, i + 1)) != 0)
1327 0 : return (error);
1328 : }
1329 : /* Make the last page point to the beginning of the ring buffer. */
1330 0 : error = rtwn_llt_write(sc, i, R92C_TX_PAGE_COUNT + 1);
1331 0 : return (error);
1332 0 : }
1333 :
1334 : int
1335 0 : rtwn_power_on(void *cookie)
1336 : {
1337 0 : struct rtwn_pci_softc *sc = cookie;
1338 : uint32_t reg;
1339 : int ntries;
1340 :
1341 : /* Wait for autoload done bit. */
1342 0 : for (ntries = 0; ntries < 1000; ntries++) {
1343 0 : if (rtwn_pci_read_1(sc, R92C_APS_FSMCO) &
1344 : R92C_APS_FSMCO_PFM_ALDN)
1345 : break;
1346 0 : DELAY(5);
1347 : }
1348 0 : if (ntries == 1000) {
1349 0 : printf("%s: timeout waiting for chip autoload\n",
1350 0 : sc->sc_dev.dv_xname);
1351 0 : return (ETIMEDOUT);
1352 : }
1353 :
1354 : /* Unlock ISO/CLK/Power control register. */
1355 0 : rtwn_pci_write_1(sc, R92C_RSV_CTRL, 0);
1356 :
1357 : /* TODO: check if we need this for 8188CE */
1358 0 : if (sc->sc_sc.board_type != R92C_BOARD_TYPE_DONGLE) {
1359 : /* bt coex */
1360 0 : reg = rtwn_pci_read_4(sc, R92C_APS_FSMCO);
1361 0 : reg |= (R92C_APS_FSMCO_SOP_ABG |
1362 : R92C_APS_FSMCO_SOP_AMB |
1363 : R92C_APS_FSMCO_XOP_BTCK);
1364 0 : rtwn_pci_write_4(sc, R92C_APS_FSMCO, reg);
1365 0 : }
1366 :
1367 : /* Move SPS into PWM mode. */
1368 0 : rtwn_pci_write_1(sc, R92C_SPS0_CTRL, 0x2b);
1369 0 : DELAY(100);
1370 :
1371 : /* Set low byte to 0x0f, leave others unchanged. */
1372 0 : rtwn_pci_write_4(sc, R92C_AFE_XTAL_CTRL,
1373 0 : (rtwn_pci_read_4(sc, R92C_AFE_XTAL_CTRL) & 0xffffff00) | 0x0f);
1374 :
1375 : /* TODO: check if we need this for 8188CE */
1376 0 : if (sc->sc_sc.board_type != R92C_BOARD_TYPE_DONGLE) {
1377 : /* bt coex */
1378 0 : reg = rtwn_pci_read_4(sc, R92C_AFE_XTAL_CTRL);
1379 0 : reg &= (~0x00024800); /* XXX magic from linux */
1380 0 : rtwn_pci_write_4(sc, R92C_AFE_XTAL_CTRL, reg);
1381 0 : }
1382 :
1383 0 : rtwn_pci_write_2(sc, R92C_SYS_ISO_CTRL,
1384 0 : (rtwn_pci_read_2(sc, R92C_SYS_ISO_CTRL) & 0xff) |
1385 0 : R92C_SYS_ISO_CTRL_PWC_EV12V | R92C_SYS_ISO_CTRL_DIOR);
1386 0 : DELAY(200);
1387 :
1388 : /* TODO: linux does additional btcoex stuff here */
1389 :
1390 : /* Auto enable WLAN. */
1391 0 : rtwn_pci_write_2(sc, R92C_APS_FSMCO,
1392 0 : rtwn_pci_read_2(sc, R92C_APS_FSMCO) | R92C_APS_FSMCO_APFM_ONMAC);
1393 0 : for (ntries = 0; ntries < 1000; ntries++) {
1394 0 : if (!(rtwn_pci_read_2(sc, R92C_APS_FSMCO) &
1395 : R92C_APS_FSMCO_APFM_ONMAC))
1396 : break;
1397 0 : DELAY(5);
1398 : }
1399 0 : if (ntries == 1000) {
1400 0 : printf("%s: timeout waiting for MAC auto ON\n",
1401 0 : sc->sc_dev.dv_xname);
1402 0 : return (ETIMEDOUT);
1403 : }
1404 :
1405 : /* Enable radio, GPIO and LED functions. */
1406 0 : rtwn_pci_write_2(sc, R92C_APS_FSMCO,
1407 : R92C_APS_FSMCO_AFSM_PCIE |
1408 : R92C_APS_FSMCO_PDN_EN |
1409 : R92C_APS_FSMCO_PFM_ALDN);
1410 : /* Release RF digital isolation. */
1411 0 : rtwn_pci_write_2(sc, R92C_SYS_ISO_CTRL,
1412 0 : rtwn_pci_read_2(sc, R92C_SYS_ISO_CTRL) & ~R92C_SYS_ISO_CTRL_DIOR);
1413 :
1414 0 : if (sc->sc_sc.chip & RTWN_CHIP_92C)
1415 0 : rtwn_pci_write_1(sc, R92C_PCIE_CTRL_REG + 3, 0x77);
1416 : else
1417 0 : rtwn_pci_write_1(sc, R92C_PCIE_CTRL_REG + 3, 0x22);
1418 :
1419 0 : rtwn_pci_write_4(sc, R92C_INT_MIG, 0);
1420 :
1421 0 : if (sc->sc_sc.board_type != R92C_BOARD_TYPE_DONGLE) {
1422 : /* bt coex */
1423 0 : reg = rtwn_pci_read_4(sc, R92C_AFE_XTAL_CTRL + 2);
1424 0 : reg &= 0xfd; /* XXX magic from linux */
1425 0 : rtwn_pci_write_4(sc, R92C_AFE_XTAL_CTRL + 2, reg);
1426 0 : }
1427 :
1428 0 : rtwn_pci_write_1(sc, R92C_GPIO_MUXCFG,
1429 0 : rtwn_pci_read_1(sc, R92C_GPIO_MUXCFG) & ~R92C_GPIO_MUXCFG_RFKILL);
1430 :
1431 0 : reg = rtwn_pci_read_1(sc, R92C_GPIO_IO_SEL);
1432 0 : if (!(reg & R92C_GPIO_IO_SEL_RFKILL)) {
1433 0 : printf("%s: radio is disabled by hardware switch\n",
1434 0 : sc->sc_dev.dv_xname);
1435 0 : return (EPERM); /* :-) */
1436 : }
1437 :
1438 : /* Initialize MAC. */
1439 0 : rtwn_pci_write_1(sc, R92C_APSD_CTRL,
1440 0 : rtwn_pci_read_1(sc, R92C_APSD_CTRL) & ~R92C_APSD_CTRL_OFF);
1441 0 : for (ntries = 0; ntries < 200; ntries++) {
1442 0 : if (!(rtwn_pci_read_1(sc, R92C_APSD_CTRL) &
1443 : R92C_APSD_CTRL_OFF_STATUS))
1444 : break;
1445 0 : DELAY(500);
1446 : }
1447 0 : if (ntries == 200) {
1448 0 : printf("%s: timeout waiting for MAC initialization\n",
1449 0 : sc->sc_dev.dv_xname);
1450 0 : return (ETIMEDOUT);
1451 : }
1452 :
1453 : /* Enable MAC DMA/WMAC/SCHEDULE/SEC blocks. */
1454 0 : reg = rtwn_pci_read_2(sc, R92C_CR);
1455 0 : reg |= R92C_CR_HCI_TXDMA_EN | R92C_CR_HCI_RXDMA_EN |
1456 : R92C_CR_TXDMA_EN | R92C_CR_RXDMA_EN | R92C_CR_PROTOCOL_EN |
1457 : R92C_CR_SCHEDULE_EN | R92C_CR_MACTXEN | R92C_CR_MACRXEN |
1458 : R92C_CR_ENSEC;
1459 0 : rtwn_pci_write_2(sc, R92C_CR, reg);
1460 :
1461 0 : rtwn_pci_write_1(sc, 0xfe10, 0x19);
1462 :
1463 0 : return (0);
1464 0 : }
1465 :
1466 : int
1467 0 : rtwn_dma_init(void *cookie)
1468 : {
1469 0 : struct rtwn_pci_softc *sc = cookie;
1470 : uint32_t reg;
1471 : int error;
1472 :
1473 : /* Initialize LLT table. */
1474 0 : error = rtwn_llt_init(sc);
1475 0 : if (error != 0)
1476 0 : return error;
1477 :
1478 : /* Set number of pages for normal priority queue. */
1479 0 : rtwn_pci_write_2(sc, R92C_RQPN_NPQ, 0);
1480 0 : rtwn_pci_write_4(sc, R92C_RQPN,
1481 : /* Set number of pages for public queue. */
1482 : SM(R92C_RQPN_PUBQ, R92C_PUBQ_NPAGES) |
1483 : /* Set number of pages for high priority queue. */
1484 : SM(R92C_RQPN_HPQ, R92C_HPQ_NPAGES) |
1485 : /* Set number of pages for low priority queue. */
1486 : SM(R92C_RQPN_LPQ, R92C_LPQ_NPAGES) |
1487 : /* Load values. */
1488 : R92C_RQPN_LD);
1489 :
1490 0 : rtwn_pci_write_1(sc, R92C_TXPKTBUF_BCNQ_BDNY, R92C_TX_PAGE_BOUNDARY);
1491 0 : rtwn_pci_write_1(sc, R92C_TXPKTBUF_MGQ_BDNY, R92C_TX_PAGE_BOUNDARY);
1492 0 : rtwn_pci_write_1(sc, R92C_TXPKTBUF_WMAC_LBK_BF_HD,
1493 : R92C_TX_PAGE_BOUNDARY);
1494 0 : rtwn_pci_write_1(sc, R92C_TRXFF_BNDY, R92C_TX_PAGE_BOUNDARY);
1495 0 : rtwn_pci_write_1(sc, R92C_TDECTRL + 1, R92C_TX_PAGE_BOUNDARY);
1496 :
1497 0 : reg = rtwn_pci_read_2(sc, R92C_TRXDMA_CTRL);
1498 0 : reg &= ~R92C_TRXDMA_CTRL_QMAP_M;
1499 0 : reg |= 0xF771;
1500 0 : rtwn_pci_write_2(sc, R92C_TRXDMA_CTRL, reg);
1501 :
1502 0 : rtwn_pci_write_4(sc, R92C_TCR,
1503 : R92C_TCR_CFENDFORM | R92C_TCR_ERRSTEN0 | R92C_TCR_ERRSTEN1);
1504 :
1505 : /* Configure Tx DMA. */
1506 0 : rtwn_pci_write_4(sc, R92C_BKQ_DESA,
1507 0 : sc->tx_ring[RTWN_BK_QUEUE].map->dm_segs[0].ds_addr);
1508 0 : rtwn_pci_write_4(sc, R92C_BEQ_DESA,
1509 0 : sc->tx_ring[RTWN_BE_QUEUE].map->dm_segs[0].ds_addr);
1510 0 : rtwn_pci_write_4(sc, R92C_VIQ_DESA,
1511 0 : sc->tx_ring[RTWN_VI_QUEUE].map->dm_segs[0].ds_addr);
1512 0 : rtwn_pci_write_4(sc, R92C_VOQ_DESA,
1513 0 : sc->tx_ring[RTWN_VO_QUEUE].map->dm_segs[0].ds_addr);
1514 0 : rtwn_pci_write_4(sc, R92C_BCNQ_DESA,
1515 0 : sc->tx_ring[RTWN_BEACON_QUEUE].map->dm_segs[0].ds_addr);
1516 0 : rtwn_pci_write_4(sc, R92C_MGQ_DESA,
1517 0 : sc->tx_ring[RTWN_MGNT_QUEUE].map->dm_segs[0].ds_addr);
1518 0 : rtwn_pci_write_4(sc, R92C_HQ_DESA,
1519 0 : sc->tx_ring[RTWN_HIGH_QUEUE].map->dm_segs[0].ds_addr);
1520 :
1521 : /* Configure Rx DMA. */
1522 0 : rtwn_pci_write_4(sc, R92C_RX_DESA, sc->rx_ring.map->dm_segs[0].ds_addr);
1523 :
1524 : /* Set Tx/Rx transfer page boundary. */
1525 0 : rtwn_pci_write_2(sc, R92C_TRXFF_BNDY + 2, 0x27ff);
1526 :
1527 : /* Set Tx/Rx transfer page size. */
1528 0 : rtwn_pci_write_1(sc, R92C_PBP,
1529 : SM(R92C_PBP_PSRX, R92C_PBP_128) |
1530 : SM(R92C_PBP_PSTX, R92C_PBP_128));
1531 :
1532 0 : return (0);
1533 0 : }
1534 :
1535 : int
1536 0 : rtwn_fw_loadpage(void *cookie, int page, uint8_t *buf, int len)
1537 : {
1538 0 : struct rtwn_pci_softc *sc = cookie;
1539 : uint32_t reg;
1540 : int off, mlen, error = 0, i;
1541 :
1542 0 : reg = rtwn_pci_read_4(sc, R92C_MCUFWDL);
1543 0 : reg = RW(reg, R92C_MCUFWDL_PAGE, page);
1544 0 : rtwn_pci_write_4(sc, R92C_MCUFWDL, reg);
1545 :
1546 0 : DELAY(5);
1547 :
1548 : off = R92C_FW_START_ADDR;
1549 0 : while (len > 0) {
1550 0 : if (len > 196)
1551 0 : mlen = 196;
1552 0 : else if (len > 4)
1553 0 : mlen = 4;
1554 : else
1555 : mlen = 1;
1556 0 : for (i = 0; i < mlen; i++)
1557 0 : rtwn_pci_write_1(sc, off++, buf[i]);
1558 0 : buf += mlen;
1559 0 : len -= mlen;
1560 : }
1561 :
1562 0 : return (error);
1563 : }
1564 :
1565 : int
1566 0 : rtwn_pci_load_firmware(void *cookie, u_char **fw, size_t *len)
1567 : {
1568 0 : struct rtwn_pci_softc *sc = cookie;
1569 : const char *name;
1570 : int error;
1571 :
1572 0 : if ((sc->sc_sc.chip & (RTWN_CHIP_UMC_A_CUT | RTWN_CHIP_92C)) ==
1573 : RTWN_CHIP_UMC_A_CUT)
1574 0 : name = "rtwn-rtl8192cfwU";
1575 : else
1576 : name = "rtwn-rtl8192cfwU_B";
1577 :
1578 0 : error = loadfirmware(name, fw, len);
1579 0 : if (error)
1580 0 : printf("%s: could not read firmware %s (error %d)\n",
1581 0 : sc->sc_dev.dv_xname, name, error);
1582 0 : return (error);
1583 : }
1584 :
1585 : void
1586 0 : rtwn_mac_init(void *cookie)
1587 : {
1588 0 : struct rtwn_pci_softc *sc = cookie;
1589 : int i;
1590 :
1591 : /* Write MAC initialization values. */
1592 0 : for (i = 0; i < nitems(rtl8192ce_mac); i++)
1593 0 : rtwn_pci_write_1(sc, rtl8192ce_mac[i].reg,
1594 0 : rtl8192ce_mac[i].val);
1595 0 : }
1596 :
1597 : void
1598 0 : rtwn_bb_init(void *cookie)
1599 : {
1600 0 : struct rtwn_pci_softc *sc = cookie;
1601 : const struct r92c_bb_prog *prog;
1602 : uint32_t reg;
1603 : int i;
1604 :
1605 : /* Enable BB and RF. */
1606 0 : rtwn_pci_write_2(sc, R92C_SYS_FUNC_EN,
1607 0 : rtwn_pci_read_2(sc, R92C_SYS_FUNC_EN) |
1608 0 : R92C_SYS_FUNC_EN_BBRSTB | R92C_SYS_FUNC_EN_BB_GLB_RST |
1609 : R92C_SYS_FUNC_EN_DIO_RF);
1610 :
1611 0 : rtwn_pci_write_2(sc, R92C_AFE_PLL_CTRL, 0xdb83);
1612 :
1613 0 : rtwn_pci_write_1(sc, R92C_RF_CTRL,
1614 : R92C_RF_CTRL_EN | R92C_RF_CTRL_RSTB | R92C_RF_CTRL_SDMRSTB);
1615 :
1616 0 : rtwn_pci_write_1(sc, R92C_SYS_FUNC_EN,
1617 : R92C_SYS_FUNC_EN_DIO_PCIE | R92C_SYS_FUNC_EN_PCIEA |
1618 : R92C_SYS_FUNC_EN_PPLL | R92C_SYS_FUNC_EN_BB_GLB_RST |
1619 : R92C_SYS_FUNC_EN_BBRSTB);
1620 :
1621 0 : rtwn_pci_write_1(sc, R92C_AFE_XTAL_CTRL + 1, 0x80);
1622 :
1623 0 : rtwn_pci_write_4(sc, R92C_LEDCFG0,
1624 0 : rtwn_pci_read_4(sc, R92C_LEDCFG0) | 0x00800000);
1625 :
1626 : /* Select BB programming. */
1627 0 : prog = (sc->sc_sc.chip & RTWN_CHIP_92C) ?
1628 : &rtl8192ce_bb_prog_2t : &rtl8192ce_bb_prog_1t;
1629 :
1630 : /* Write BB initialization values. */
1631 0 : for (i = 0; i < prog->count; i++) {
1632 0 : rtwn_bb_write(sc, prog->regs[i], prog->vals[i]);
1633 0 : DELAY(1);
1634 : }
1635 :
1636 0 : if (sc->sc_sc.chip & RTWN_CHIP_92C_1T2R) {
1637 : /* 8192C 1T only configuration. */
1638 0 : reg = rtwn_bb_read(sc, R92C_FPGA0_TXINFO);
1639 0 : reg = (reg & ~0x00000003) | 0x2;
1640 0 : rtwn_bb_write(sc, R92C_FPGA0_TXINFO, reg);
1641 :
1642 0 : reg = rtwn_bb_read(sc, R92C_FPGA1_TXINFO);
1643 0 : reg = (reg & ~0x00300033) | 0x00200022;
1644 0 : rtwn_bb_write(sc, R92C_FPGA1_TXINFO, reg);
1645 :
1646 0 : reg = rtwn_bb_read(sc, R92C_CCK0_AFESETTING);
1647 0 : reg = (reg & ~0xff000000) | 0x45 << 24;
1648 0 : rtwn_bb_write(sc, R92C_CCK0_AFESETTING, reg);
1649 :
1650 0 : reg = rtwn_bb_read(sc, R92C_OFDM0_TRXPATHENA);
1651 0 : reg = (reg & ~0x000000ff) | 0x23;
1652 0 : rtwn_bb_write(sc, R92C_OFDM0_TRXPATHENA, reg);
1653 :
1654 0 : reg = rtwn_bb_read(sc, R92C_OFDM0_AGCPARAM1);
1655 0 : reg = (reg & ~0x00000030) | 1 << 4;
1656 0 : rtwn_bb_write(sc, R92C_OFDM0_AGCPARAM1, reg);
1657 :
1658 0 : reg = rtwn_bb_read(sc, 0xe74);
1659 0 : reg = (reg & ~0x0c000000) | 2 << 26;
1660 0 : rtwn_bb_write(sc, 0xe74, reg);
1661 0 : reg = rtwn_bb_read(sc, 0xe78);
1662 0 : reg = (reg & ~0x0c000000) | 2 << 26;
1663 0 : rtwn_bb_write(sc, 0xe78, reg);
1664 0 : reg = rtwn_bb_read(sc, 0xe7c);
1665 0 : reg = (reg & ~0x0c000000) | 2 << 26;
1666 0 : rtwn_bb_write(sc, 0xe7c, reg);
1667 0 : reg = rtwn_bb_read(sc, 0xe80);
1668 0 : reg = (reg & ~0x0c000000) | 2 << 26;
1669 0 : rtwn_bb_write(sc, 0xe80, reg);
1670 0 : reg = rtwn_bb_read(sc, 0xe88);
1671 0 : reg = (reg & ~0x0c000000) | 2 << 26;
1672 0 : rtwn_bb_write(sc, 0xe88, reg);
1673 0 : }
1674 :
1675 : /* Write AGC values. */
1676 0 : for (i = 0; i < prog->agccount; i++) {
1677 0 : rtwn_bb_write(sc, R92C_OFDM0_AGCRSSITABLE,
1678 0 : prog->agcvals[i]);
1679 0 : DELAY(1);
1680 : }
1681 :
1682 0 : if (rtwn_bb_read(sc, R92C_HSSI_PARAM2(0)) & R92C_HSSI_PARAM2_CCK_HIPWR)
1683 0 : sc->sc_sc.sc_flags |= RTWN_FLAG_CCK_HIPWR;
1684 0 : }
1685 :
1686 : void
1687 0 : rtwn_calib_to(void *arg)
1688 : {
1689 0 : struct rtwn_pci_softc *sc = arg;
1690 0 : struct ieee80211com *ic = &sc->sc_sc.sc_ic;
1691 : int s;
1692 :
1693 0 : s = splnet();
1694 0 : ieee80211_amrr_choose(&sc->amrr, ic->ic_bss, &sc->amn);
1695 0 : splx(s);
1696 :
1697 0 : rtwn_calib(&sc->sc_sc);
1698 0 : }
1699 :
1700 : void
1701 0 : rtwn_next_calib(void *cookie)
1702 : {
1703 0 : struct rtwn_pci_softc *sc = cookie;
1704 :
1705 0 : timeout_add_sec(&sc->calib_to, 2);
1706 0 : }
1707 :
1708 : void
1709 0 : rtwn_cancel_calib(void *cookie)
1710 : {
1711 0 : struct rtwn_pci_softc *sc = cookie;
1712 :
1713 0 : if (timeout_initialized(&sc->calib_to))
1714 0 : timeout_del(&sc->calib_to);
1715 0 : }
1716 :
1717 : void
1718 0 : rtwn_scan_to(void *arg)
1719 : {
1720 0 : struct rtwn_pci_softc *sc = arg;
1721 :
1722 0 : rtwn_next_scan(&sc->sc_sc);
1723 0 : }
1724 :
1725 : void
1726 0 : rtwn_pci_next_scan(void *cookie)
1727 : {
1728 0 : struct rtwn_pci_softc *sc = cookie;
1729 :
1730 0 : timeout_add_msec(&sc->scan_to, 200);
1731 0 : }
1732 :
1733 : void
1734 0 : rtwn_cancel_scan(void *cookie)
1735 : {
1736 0 : struct rtwn_pci_softc *sc = cookie;
1737 :
1738 0 : if (timeout_initialized(&sc->scan_to))
1739 0 : timeout_del(&sc->scan_to);
1740 0 : }
1741 :
1742 : void
1743 0 : rtwn_wait_async(void *cookie)
1744 : {
1745 : /* nothing to do */
1746 0 : }
1747 :
1748 : void
1749 0 : rtwn_tx_report(struct rtwn_pci_softc *sc, uint8_t *buf, int len)
1750 : {
1751 0 : struct r92c_c2h_tx_rpt *rpt = (struct r92c_c2h_tx_rpt *)buf;
1752 : int packets, tries, tx_ok, drop, expire, over;
1753 :
1754 0 : if (len != sizeof(*rpt))
1755 0 : return;
1756 :
1757 0 : packets = MS(rpt->rptb6, R92C_RPTB6_RPT_PKT_NUM);
1758 0 : tries = MS(rpt->rptb0, R92C_RPTB0_RETRY_CNT);
1759 0 : tx_ok = (rpt->rptb7 & R92C_RPTB7_PKT_OK);
1760 0 : drop = (rpt->rptb6 & R92C_RPTB6_PKT_DROP);
1761 0 : expire = (rpt->rptb6 & R92C_RPTB6_LIFE_EXPIRE);
1762 0 : over = (rpt->rptb6 & R92C_RPTB6_RETRY_OVER);
1763 :
1764 0 : if (packets > 0) {
1765 0 : sc->amn.amn_txcnt += packets;
1766 0 : if (!tx_ok || tries > 1 || drop || expire || over)
1767 0 : sc->amn.amn_retrycnt++;
1768 : }
1769 0 : }
1770 :
1771 : void
1772 0 : rtwn_poll_c2h_events(struct rtwn_pci_softc *sc)
1773 : {
1774 : const uint16_t off = R92C_C2HEVT_MSG + sizeof(struct r92c_c2h_evt);
1775 0 : uint8_t buf[R92C_C2H_MSG_MAX_LEN];
1776 : uint8_t id, len, status;
1777 : int i;
1778 :
1779 : /* Read current status. */
1780 0 : status = rtwn_pci_read_1(sc, R92C_C2HEVT_CLEAR);
1781 0 : if (status == R92C_C2HEVT_HOST_CLOSE)
1782 0 : return; /* nothing to do */
1783 :
1784 0 : if (status == R92C_C2HEVT_FW_CLOSE) {
1785 0 : len = rtwn_pci_read_1(sc, R92C_C2HEVT_MSG);
1786 0 : id = MS(len, R92C_C2H_EVTB0_ID);
1787 0 : len = MS(len, R92C_C2H_EVTB0_LEN);
1788 :
1789 0 : if (id == R92C_C2HEVT_TX_REPORT && len <= sizeof(buf)) {
1790 0 : memset(buf, 0, sizeof(buf));
1791 0 : for (i = 0; i < len; i++)
1792 0 : buf[i] = rtwn_pci_read_1(sc, off + i);
1793 0 : rtwn_tx_report(sc, buf, len);
1794 0 : } else
1795 : DPRINTF(("unhandled C2H event %d (%d bytes)\n",
1796 : id, len));
1797 : }
1798 :
1799 : /* Prepare for next event. */
1800 0 : rtwn_pci_write_1(sc, R92C_C2HEVT_CLEAR, R92C_C2HEVT_HOST_CLOSE);
1801 0 : }
|