Line data Source code
1 : /* $OpenBSD: if_bwfm_pci.c,v 1.27 2018/08/20 18:58:06 patrick Exp $ */
2 : /*
3 : * Copyright (c) 2010-2016 Broadcom Corporation
4 : * Copyright (c) 2017 Patrick Wildt <patrick@blueri.se>
5 : *
6 : * Permission to use, copy, modify, and/or distribute this software for any
7 : * purpose with or without fee is hereby granted, provided that the above
8 : * copyright notice and this permission notice appear in all copies.
9 : *
10 : * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11 : * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12 : * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13 : * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14 : * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15 : * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16 : * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 : */
18 :
19 : #include "bpfilter.h"
20 :
21 : #include <sys/param.h>
22 : #include <sys/systm.h>
23 : #include <sys/buf.h>
24 : #include <sys/kernel.h>
25 : #include <sys/malloc.h>
26 : #include <sys/device.h>
27 : #include <sys/queue.h>
28 : #include <sys/socket.h>
29 :
30 : #if NBPFILTER > 0
31 : #include <net/bpf.h>
32 : #endif
33 : #include <net/if.h>
34 : #include <net/if_dl.h>
35 : #include <net/if_media.h>
36 :
37 : #include <netinet/in.h>
38 : #include <netinet/if_ether.h>
39 :
40 : #include <net80211/ieee80211_var.h>
41 :
42 : #include <machine/bus.h>
43 :
44 : #include <dev/pci/pcireg.h>
45 : #include <dev/pci/pcivar.h>
46 : #include <dev/pci/pcidevs.h>
47 :
48 : #include <dev/ic/bwfmvar.h>
49 : #include <dev/ic/bwfmreg.h>
50 : #include <dev/pci/if_bwfm_pci.h>
51 :
52 : #define BWFM_DMA_D2H_SCRATCH_BUF_LEN 8
53 : #define BWFM_DMA_D2H_RINGUPD_BUF_LEN 1024
54 : #define BWFM_DMA_H2D_IOCTL_BUF_LEN ETHER_MAX_LEN
55 :
56 : #define BWFM_NUM_TX_MSGRINGS 2
57 : #define BWFM_NUM_RX_MSGRINGS 3
58 :
59 : #define BWFM_NUM_IOCTL_PKTIDS 8
60 : #define BWFM_NUM_TX_PKTIDS 2048
61 : #define BWFM_NUM_RX_PKTIDS 1024
62 :
63 : #define BWFM_NUM_IOCTL_DESCS 1
64 : #define BWFM_NUM_TX_DESCS 1
65 : #define BWFM_NUM_RX_DESCS 1
66 :
67 : #ifdef BWFM_DEBUG
68 : #define DPRINTF(x) do { if (bwfm_debug > 0) printf x; } while (0)
69 : #define DPRINTFN(n, x) do { if (bwfm_debug >= (n)) printf x; } while (0)
70 : static int bwfm_debug = 2;
71 : #else
72 : #define DPRINTF(x) do { ; } while (0)
73 : #define DPRINTFN(n, x) do { ; } while (0)
74 : #endif
75 :
76 : #define DEVNAME(sc) ((sc)->sc_sc.sc_dev.dv_xname)
77 :
78 : enum ring_status {
79 : RING_CLOSED,
80 : RING_CLOSING,
81 : RING_OPEN,
82 : RING_OPENING,
83 : };
84 :
85 : struct bwfm_pci_msgring {
86 : uint32_t w_idx_addr;
87 : uint32_t r_idx_addr;
88 : uint32_t w_ptr;
89 : uint32_t r_ptr;
90 : int nitem;
91 : int itemsz;
92 : enum ring_status status;
93 : struct bwfm_pci_dmamem *ring;
94 : struct mbuf *m;
95 :
96 : int fifo;
97 : uint8_t mac[ETHER_ADDR_LEN];
98 : };
99 :
100 : struct bwfm_pci_ioctl {
101 : uint16_t transid;
102 : uint16_t retlen;
103 : int16_t status;
104 : struct mbuf *m;
105 : TAILQ_ENTRY(bwfm_pci_ioctl) next;
106 : };
107 :
108 : struct bwfm_pci_buf {
109 : bus_dmamap_t bb_map;
110 : struct mbuf *bb_m;
111 : };
112 :
113 : struct bwfm_pci_pkts {
114 : struct bwfm_pci_buf *pkts;
115 : uint32_t npkt;
116 : int last;
117 : };
118 :
119 : struct bwfm_pci_softc {
120 : struct bwfm_softc sc_sc;
121 : pci_chipset_tag_t sc_pc;
122 : pcitag_t sc_tag;
123 : pcireg_t sc_id;
124 : void *sc_ih;
125 :
126 : int sc_initialized;
127 :
128 : bus_space_tag_t sc_reg_iot;
129 : bus_space_handle_t sc_reg_ioh;
130 : bus_size_t sc_reg_ios;
131 :
132 : bus_space_tag_t sc_tcm_iot;
133 : bus_space_handle_t sc_tcm_ioh;
134 : bus_size_t sc_tcm_ios;
135 :
136 : bus_dma_tag_t sc_dmat;
137 :
138 : uint32_t sc_shared_address;
139 : uint32_t sc_shared_flags;
140 : uint8_t sc_shared_version;
141 :
142 : uint8_t sc_dma_idx_sz;
143 : struct bwfm_pci_dmamem *sc_dma_idx_buf;
144 : size_t sc_dma_idx_bufsz;
145 :
146 : uint16_t sc_max_rxbufpost;
147 : uint32_t sc_rx_dataoffset;
148 : uint32_t sc_htod_mb_data_addr;
149 : uint32_t sc_dtoh_mb_data_addr;
150 : uint32_t sc_ring_info_addr;
151 :
152 : uint32_t sc_console_base_addr;
153 : uint32_t sc_console_buf_addr;
154 : uint32_t sc_console_buf_size;
155 : uint32_t sc_console_readidx;
156 :
157 : uint16_t sc_max_flowrings;
158 : uint16_t sc_max_submissionrings;
159 : uint16_t sc_max_completionrings;
160 :
161 : struct bwfm_pci_msgring sc_ctrl_submit;
162 : struct bwfm_pci_msgring sc_rxpost_submit;
163 : struct bwfm_pci_msgring sc_ctrl_complete;
164 : struct bwfm_pci_msgring sc_tx_complete;
165 : struct bwfm_pci_msgring sc_rx_complete;
166 : struct bwfm_pci_msgring *sc_flowrings;
167 :
168 : struct bwfm_pci_dmamem *sc_scratch_buf;
169 : struct bwfm_pci_dmamem *sc_ringupd_buf;
170 :
171 : TAILQ_HEAD(, bwfm_pci_ioctl) sc_ioctlq;
172 : uint16_t sc_ioctl_transid;
173 :
174 : struct if_rxring sc_ioctl_ring;
175 : struct if_rxring sc_event_ring;
176 : struct if_rxring sc_rxbuf_ring;
177 :
178 : struct bwfm_pci_pkts sc_ioctl_pkts;
179 : struct bwfm_pci_pkts sc_rx_pkts;
180 : struct bwfm_pci_pkts sc_tx_pkts;
181 : int sc_tx_pkts_full;
182 : };
183 :
184 : struct bwfm_pci_dmamem {
185 : bus_dmamap_t bdm_map;
186 : bus_dma_segment_t bdm_seg;
187 : size_t bdm_size;
188 : caddr_t bdm_kva;
189 : };
190 :
191 : #define BWFM_PCI_DMA_MAP(_bdm) ((_bdm)->bdm_map)
192 : #define BWFM_PCI_DMA_LEN(_bdm) ((_bdm)->bdm_size)
193 : #define BWFM_PCI_DMA_DVA(_bdm) ((uint64_t)(_bdm)->bdm_map->dm_segs[0].ds_addr)
194 : #define BWFM_PCI_DMA_KVA(_bdm) ((void *)(_bdm)->bdm_kva)
195 :
196 : int bwfm_pci_match(struct device *, void *, void *);
197 : void bwfm_pci_attach(struct device *, struct device *, void *);
198 : int bwfm_pci_detach(struct device *, int);
199 :
200 : int bwfm_pci_intr(void *);
201 : void bwfm_pci_intr_enable(struct bwfm_pci_softc *);
202 : void bwfm_pci_intr_disable(struct bwfm_pci_softc *);
203 : int bwfm_pci_load_microcode(struct bwfm_pci_softc *, const u_char *,
204 : size_t, const u_char *, size_t);
205 : void bwfm_pci_select_core(struct bwfm_pci_softc *, int );
206 :
207 : struct bwfm_pci_dmamem *
208 : bwfm_pci_dmamem_alloc(struct bwfm_pci_softc *, bus_size_t,
209 : bus_size_t);
210 : void bwfm_pci_dmamem_free(struct bwfm_pci_softc *, struct bwfm_pci_dmamem *);
211 : int bwfm_pci_pktid_avail(struct bwfm_pci_softc *,
212 : struct bwfm_pci_pkts *);
213 : int bwfm_pci_pktid_new(struct bwfm_pci_softc *,
214 : struct bwfm_pci_pkts *, struct mbuf *,
215 : uint32_t *, paddr_t *);
216 : struct mbuf * bwfm_pci_pktid_free(struct bwfm_pci_softc *,
217 : struct bwfm_pci_pkts *, uint32_t);
218 : void bwfm_pci_fill_rx_ioctl_ring(struct bwfm_pci_softc *,
219 : struct if_rxring *, uint32_t);
220 : void bwfm_pci_fill_rx_buf_ring(struct bwfm_pci_softc *);
221 : void bwfm_pci_fill_rx_rings(struct bwfm_pci_softc *);
222 : int bwfm_pci_setup_ring(struct bwfm_pci_softc *, struct bwfm_pci_msgring *,
223 : int, size_t, uint32_t, uint32_t, int, uint32_t, uint32_t *);
224 : int bwfm_pci_setup_flowring(struct bwfm_pci_softc *, struct bwfm_pci_msgring *,
225 : int, size_t);
226 :
227 : void bwfm_pci_ring_bell(struct bwfm_pci_softc *,
228 : struct bwfm_pci_msgring *);
229 : void bwfm_pci_ring_update_rptr(struct bwfm_pci_softc *,
230 : struct bwfm_pci_msgring *);
231 : void bwfm_pci_ring_update_wptr(struct bwfm_pci_softc *,
232 : struct bwfm_pci_msgring *);
233 : void bwfm_pci_ring_write_rptr(struct bwfm_pci_softc *,
234 : struct bwfm_pci_msgring *);
235 : void bwfm_pci_ring_write_wptr(struct bwfm_pci_softc *,
236 : struct bwfm_pci_msgring *);
237 : void * bwfm_pci_ring_write_reserve(struct bwfm_pci_softc *,
238 : struct bwfm_pci_msgring *);
239 : void * bwfm_pci_ring_write_reserve_multi(struct bwfm_pci_softc *,
240 : struct bwfm_pci_msgring *, int, int *);
241 : void * bwfm_pci_ring_read_avail(struct bwfm_pci_softc *,
242 : struct bwfm_pci_msgring *, int *);
243 : void bwfm_pci_ring_read_commit(struct bwfm_pci_softc *,
244 : struct bwfm_pci_msgring *, int);
245 : void bwfm_pci_ring_write_commit(struct bwfm_pci_softc *,
246 : struct bwfm_pci_msgring *);
247 : void bwfm_pci_ring_write_cancel(struct bwfm_pci_softc *,
248 : struct bwfm_pci_msgring *, int);
249 :
250 : void bwfm_pci_ring_rx(struct bwfm_pci_softc *,
251 : struct bwfm_pci_msgring *);
252 : void bwfm_pci_msg_rx(struct bwfm_pci_softc *, void *);
253 :
254 : uint32_t bwfm_pci_buscore_read(struct bwfm_softc *, uint32_t);
255 : void bwfm_pci_buscore_write(struct bwfm_softc *, uint32_t,
256 : uint32_t);
257 : int bwfm_pci_buscore_prepare(struct bwfm_softc *);
258 : int bwfm_pci_buscore_reset(struct bwfm_softc *);
259 : void bwfm_pci_buscore_activate(struct bwfm_softc *, uint32_t);
260 :
261 : int bwfm_pci_flowring_lookup(struct bwfm_pci_softc *,
262 : struct mbuf *);
263 : void bwfm_pci_flowring_create(struct bwfm_pci_softc *,
264 : struct mbuf *);
265 : void bwfm_pci_flowring_create_cb(struct bwfm_softc *, void *);
266 : void bwfm_pci_flowring_delete(struct bwfm_pci_softc *, int);
267 :
268 : int bwfm_pci_preinit(struct bwfm_softc *);
269 : void bwfm_pci_stop(struct bwfm_softc *);
270 : int bwfm_pci_txcheck(struct bwfm_softc *);
271 : int bwfm_pci_txdata(struct bwfm_softc *, struct mbuf *);
272 :
273 : #ifdef BWFM_DEBUG
274 : void bwfm_pci_debug_console(struct bwfm_pci_softc *);
275 : #endif
276 :
277 : int bwfm_pci_msgbuf_query_dcmd(struct bwfm_softc *, int,
278 : int, char *, size_t *);
279 : int bwfm_pci_msgbuf_set_dcmd(struct bwfm_softc *, int,
280 : int, char *, size_t);
281 : void bwfm_pci_msgbuf_rxioctl(struct bwfm_pci_softc *,
282 : struct msgbuf_ioctl_resp_hdr *);
283 :
284 : struct bwfm_buscore_ops bwfm_pci_buscore_ops = {
285 : .bc_read = bwfm_pci_buscore_read,
286 : .bc_write = bwfm_pci_buscore_write,
287 : .bc_prepare = bwfm_pci_buscore_prepare,
288 : .bc_reset = bwfm_pci_buscore_reset,
289 : .bc_setup = NULL,
290 : .bc_activate = bwfm_pci_buscore_activate,
291 : };
292 :
293 : struct bwfm_bus_ops bwfm_pci_bus_ops = {
294 : .bs_preinit = bwfm_pci_preinit,
295 : .bs_stop = bwfm_pci_stop,
296 : .bs_txcheck = bwfm_pci_txcheck,
297 : .bs_txdata = bwfm_pci_txdata,
298 : .bs_txctl = NULL,
299 : };
300 :
301 : struct bwfm_proto_ops bwfm_pci_msgbuf_ops = {
302 : .proto_query_dcmd = bwfm_pci_msgbuf_query_dcmd,
303 : .proto_set_dcmd = bwfm_pci_msgbuf_set_dcmd,
304 : .proto_rx = NULL,
305 : .proto_rxctl = NULL,
306 : };
307 :
308 : struct cfattach bwfm_pci_ca = {
309 : sizeof(struct bwfm_pci_softc),
310 : bwfm_pci_match,
311 : bwfm_pci_attach,
312 : bwfm_pci_detach,
313 : };
314 :
315 : static const struct pci_matchid bwfm_pci_devices[] = {
316 : { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM4350 },
317 : { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM4356 },
318 : { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM43602 },
319 : { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM4371 },
320 : };
321 :
322 : int
323 0 : bwfm_pci_match(struct device *parent, void *match, void *aux)
324 : {
325 0 : return (pci_matchbyid(aux, bwfm_pci_devices,
326 : nitems(bwfm_pci_devices)));
327 : }
328 :
329 : void
330 0 : bwfm_pci_attach(struct device *parent, struct device *self, void *aux)
331 : {
332 0 : struct bwfm_pci_softc *sc = (struct bwfm_pci_softc *)self;
333 0 : struct pci_attach_args *pa = (struct pci_attach_args *)aux;
334 : const char *intrstr;
335 0 : pci_intr_handle_t ih;
336 :
337 0 : if (pci_mapreg_map(pa, PCI_MAPREG_START + 0x08,
338 0 : PCI_MAPREG_MEM_TYPE_64BIT, 0, &sc->sc_tcm_iot, &sc->sc_tcm_ioh,
339 0 : NULL, &sc->sc_tcm_ios, 0)) {
340 0 : printf(": can't map bar1\n");
341 0 : return;
342 : }
343 :
344 0 : if (pci_mapreg_map(pa, PCI_MAPREG_START + 0x00,
345 0 : PCI_MAPREG_MEM_TYPE_64BIT, 0, &sc->sc_reg_iot, &sc->sc_reg_ioh,
346 0 : NULL, &sc->sc_reg_ios, 0)) {
347 0 : printf(": can't map bar0\n");
348 0 : goto bar1;
349 : }
350 :
351 0 : sc->sc_pc = pa->pa_pc;
352 0 : sc->sc_tag = pa->pa_tag;
353 0 : sc->sc_id = pa->pa_id;
354 0 : sc->sc_dmat = pa->pa_dmat;
355 :
356 : /* Map and establish the interrupt. */
357 0 : if (pci_intr_map_msi(pa, &ih) != 0 && pci_intr_map(pa, &ih) != 0) {
358 0 : printf(": couldn't map interrupt\n");
359 : goto bar0;
360 : }
361 0 : intrstr = pci_intr_string(pa->pa_pc, ih);
362 :
363 0 : sc->sc_ih = pci_intr_establish(pa->pa_pc, ih, IPL_NET | IPL_MPSAFE,
364 0 : bwfm_pci_intr, sc, DEVNAME(sc));
365 0 : if (sc->sc_ih == NULL) {
366 0 : printf(": couldn't establish interrupt");
367 0 : if (intrstr != NULL)
368 0 : printf(" at %s", intrstr);
369 0 : printf("\n");
370 0 : goto bar1;
371 : }
372 0 : printf(": %s\n", intrstr);
373 :
374 0 : sc->sc_sc.sc_bus_ops = &bwfm_pci_bus_ops;
375 0 : sc->sc_sc.sc_proto_ops = &bwfm_pci_msgbuf_ops;
376 0 : bwfm_attach(&sc->sc_sc);
377 0 : config_mountroot(self, bwfm_attachhook);
378 0 : return;
379 :
380 : bar0:
381 0 : bus_space_unmap(sc->sc_reg_iot, sc->sc_reg_ioh, sc->sc_reg_ios);
382 : bar1:
383 0 : bus_space_unmap(sc->sc_tcm_iot, sc->sc_tcm_ioh, sc->sc_tcm_ios);
384 0 : }
385 :
386 : int
387 0 : bwfm_pci_preinit(struct bwfm_softc *bwfm)
388 : {
389 0 : struct bwfm_pci_softc *sc = (void *)bwfm;
390 0 : struct bwfm_pci_ringinfo ringinfo;
391 : const char *name, *nvname;
392 0 : u_char *ucode, *nvram = NULL;
393 0 : size_t size, nvlen = 0;
394 : uint32_t d2h_w_idx_ptr, d2h_r_idx_ptr;
395 : uint32_t h2d_w_idx_ptr, h2d_r_idx_ptr;
396 : uint32_t idx_offset, reg;
397 : int i;
398 :
399 0 : if (sc->sc_initialized)
400 0 : return 0;
401 :
402 0 : sc->sc_sc.sc_buscore_ops = &bwfm_pci_buscore_ops;
403 0 : if (bwfm_chip_attach(&sc->sc_sc) != 0) {
404 0 : printf("%s: cannot attach chip\n", DEVNAME(sc));
405 0 : return 1;
406 : }
407 :
408 0 : bwfm_pci_select_core(sc, BWFM_AGENT_CORE_PCIE2);
409 0 : bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
410 : BWFM_PCI_PCIE2REG_CONFIGADDR, 0x4e0);
411 0 : reg = bus_space_read_4(sc->sc_reg_iot, sc->sc_reg_ioh,
412 : BWFM_PCI_PCIE2REG_CONFIGDATA);
413 0 : bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
414 : BWFM_PCI_PCIE2REG_CONFIGDATA, reg);
415 :
416 0 : switch (bwfm->sc_chip.ch_chip)
417 : {
418 : case BRCM_CC_4350_CHIP_ID:
419 0 : if (bwfm->sc_chip.ch_chiprev > 7) {
420 : name = "brcmfmac4350-pcie.bin";
421 : nvname = "brcmfmac4350-pcie.nvram";
422 0 : } else {
423 : name = "brcmfmac4350c2-pcie.bin";
424 : nvname = "brcmfmac4350c2-pcie.nvram";
425 : }
426 : break;
427 : case BRCM_CC_4356_CHIP_ID:
428 : name = "brcmfmac4356-pcie.bin";
429 : nvname = "brcmfmac4356-pcie.nvram";
430 0 : break;
431 : case BRCM_CC_43602_CHIP_ID:
432 : name = "brcmfmac43602-pcie.bin";
433 : nvname = "brcmfmac43602-pcie.nvram";
434 0 : break;
435 : case BRCM_CC_4371_CHIP_ID:
436 : name = "brcmfmac4371-pcie.bin";
437 : nvname = "brcmfmac4371-pcie.nvram";
438 0 : break;
439 : default:
440 0 : printf("%s: unknown firmware for chip %s\n",
441 0 : DEVNAME(sc), bwfm->sc_chip.ch_name);
442 0 : return 1;
443 : }
444 :
445 0 : if (loadfirmware(name, &ucode, &size) != 0) {
446 0 : printf("%s: failed loadfirmware of file %s\n",
447 0 : DEVNAME(sc), name);
448 0 : return 1;
449 : }
450 :
451 : /* NVRAM is optional. */
452 0 : loadfirmware(nvname, &nvram, &nvlen);
453 :
454 : /* Retrieve RAM size from firmware. */
455 0 : if (size >= BWFM_RAMSIZE + 8) {
456 0 : uint32_t *ramsize = (uint32_t *)&ucode[BWFM_RAMSIZE];
457 0 : if (letoh32(ramsize[0]) == BWFM_RAMSIZE_MAGIC)
458 0 : bwfm->sc_chip.ch_ramsize = letoh32(ramsize[1]);
459 0 : }
460 :
461 0 : if (bwfm_pci_load_microcode(sc, ucode, size, nvram, nvlen) != 0) {
462 0 : printf("%s: could not load microcode\n",
463 0 : DEVNAME(sc));
464 0 : free(ucode, M_DEVBUF, size);
465 0 : free(nvram, M_DEVBUF, nvlen);
466 0 : return 1;
467 : }
468 0 : free(ucode, M_DEVBUF, size);
469 0 : free(nvram, M_DEVBUF, nvlen);
470 :
471 0 : sc->sc_shared_flags = bus_space_read_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
472 : sc->sc_shared_address + BWFM_SHARED_INFO);
473 0 : sc->sc_shared_version = sc->sc_shared_flags;
474 0 : if (sc->sc_shared_version > BWFM_SHARED_INFO_MAX_VERSION ||
475 0 : sc->sc_shared_version < BWFM_SHARED_INFO_MIN_VERSION) {
476 0 : printf("%s: PCIe version %d unsupported\n",
477 0 : DEVNAME(sc), sc->sc_shared_version);
478 0 : return 1;
479 : }
480 :
481 0 : if (sc->sc_shared_flags & BWFM_SHARED_INFO_DMA_INDEX) {
482 0 : if (sc->sc_shared_flags & BWFM_SHARED_INFO_DMA_2B_IDX)
483 0 : sc->sc_dma_idx_sz = sizeof(uint16_t);
484 : else
485 0 : sc->sc_dma_idx_sz = sizeof(uint32_t);
486 : }
487 :
488 : /* Maximum RX data buffers in the ring. */
489 0 : sc->sc_max_rxbufpost = bus_space_read_2(sc->sc_tcm_iot, sc->sc_tcm_ioh,
490 : sc->sc_shared_address + BWFM_SHARED_MAX_RXBUFPOST);
491 0 : if (sc->sc_max_rxbufpost == 0)
492 0 : sc->sc_max_rxbufpost = BWFM_SHARED_MAX_RXBUFPOST_DEFAULT;
493 :
494 : /* Alternative offset of data in a packet */
495 0 : sc->sc_rx_dataoffset = bus_space_read_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
496 : sc->sc_shared_address + BWFM_SHARED_RX_DATAOFFSET);
497 :
498 : /* For Power Management */
499 0 : sc->sc_htod_mb_data_addr = bus_space_read_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
500 : sc->sc_shared_address + BWFM_SHARED_HTOD_MB_DATA_ADDR);
501 0 : sc->sc_dtoh_mb_data_addr = bus_space_read_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
502 : sc->sc_shared_address + BWFM_SHARED_DTOH_MB_DATA_ADDR);
503 :
504 : /* Ring information */
505 0 : sc->sc_ring_info_addr = bus_space_read_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
506 : sc->sc_shared_address + BWFM_SHARED_RING_INFO_ADDR);
507 :
508 : /* Firmware's "dmesg" */
509 0 : sc->sc_console_base_addr = bus_space_read_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
510 : sc->sc_shared_address + BWFM_SHARED_CONSOLE_ADDR);
511 0 : sc->sc_console_buf_addr = bus_space_read_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
512 : sc->sc_console_base_addr + BWFM_CONSOLE_BUFADDR);
513 0 : sc->sc_console_buf_size = bus_space_read_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
514 : sc->sc_console_base_addr + BWFM_CONSOLE_BUFSIZE);
515 :
516 : /* Read ring information. */
517 0 : bus_space_read_region_1(sc->sc_tcm_iot, sc->sc_tcm_ioh,
518 : sc->sc_ring_info_addr, (void *)&ringinfo, sizeof(ringinfo));
519 :
520 0 : if (sc->sc_shared_version >= 6) {
521 0 : sc->sc_max_submissionrings = le16toh(ringinfo.max_submissionrings);
522 0 : sc->sc_max_flowrings = le16toh(ringinfo.max_flowrings);
523 0 : sc->sc_max_completionrings = le16toh(ringinfo.max_completionrings);
524 0 : } else {
525 0 : sc->sc_max_submissionrings = le16toh(ringinfo.max_flowrings);
526 0 : sc->sc_max_flowrings = sc->sc_max_submissionrings -
527 : BWFM_NUM_TX_MSGRINGS;
528 0 : sc->sc_max_completionrings = BWFM_NUM_RX_MSGRINGS;
529 : }
530 :
531 0 : if (sc->sc_dma_idx_sz == 0) {
532 0 : d2h_w_idx_ptr = letoh32(ringinfo.d2h_w_idx_ptr);
533 0 : d2h_r_idx_ptr = letoh32(ringinfo.d2h_r_idx_ptr);
534 0 : h2d_w_idx_ptr = letoh32(ringinfo.h2d_w_idx_ptr);
535 0 : h2d_r_idx_ptr = letoh32(ringinfo.h2d_r_idx_ptr);
536 : idx_offset = sizeof(uint32_t);
537 0 : } else {
538 : uint64_t address;
539 :
540 : /* Each TX/RX Ring has a Read and Write Ptr */
541 0 : sc->sc_dma_idx_bufsz = (sc->sc_max_submissionrings +
542 0 : sc->sc_max_completionrings) * sc->sc_dma_idx_sz * 2;
543 0 : sc->sc_dma_idx_buf = bwfm_pci_dmamem_alloc(sc,
544 : sc->sc_dma_idx_bufsz, 8);
545 0 : if (sc->sc_dma_idx_buf == NULL) {
546 : /* XXX: Fallback to TCM? */
547 0 : printf("%s: cannot allocate idx buf\n",
548 0 : DEVNAME(sc));
549 0 : return 1;
550 : }
551 :
552 0 : idx_offset = sc->sc_dma_idx_sz;
553 : h2d_w_idx_ptr = 0;
554 0 : address = BWFM_PCI_DMA_DVA(sc->sc_dma_idx_buf);
555 0 : ringinfo.h2d_w_idx_hostaddr_low =
556 0 : htole32(address & 0xffffffff);
557 0 : ringinfo.h2d_w_idx_hostaddr_high =
558 0 : htole32(address >> 32);
559 :
560 : h2d_r_idx_ptr = h2d_w_idx_ptr +
561 0 : sc->sc_max_submissionrings * idx_offset;
562 0 : address += sc->sc_max_submissionrings * idx_offset;
563 0 : ringinfo.h2d_r_idx_hostaddr_low =
564 0 : htole32(address & 0xffffffff);
565 0 : ringinfo.h2d_r_idx_hostaddr_high =
566 0 : htole32(address >> 32);
567 :
568 0 : d2h_w_idx_ptr = h2d_r_idx_ptr +
569 0 : sc->sc_max_submissionrings * idx_offset;
570 0 : address += sc->sc_max_submissionrings * idx_offset;
571 0 : ringinfo.d2h_w_idx_hostaddr_low =
572 0 : htole32(address & 0xffffffff);
573 0 : ringinfo.d2h_w_idx_hostaddr_high =
574 0 : htole32(address >> 32);
575 :
576 0 : d2h_r_idx_ptr = d2h_w_idx_ptr +
577 0 : sc->sc_max_completionrings * idx_offset;
578 0 : address += sc->sc_max_completionrings * idx_offset;
579 0 : ringinfo.d2h_r_idx_hostaddr_low =
580 0 : htole32(address & 0xffffffff);
581 0 : ringinfo.d2h_r_idx_hostaddr_high =
582 0 : htole32(address >> 32);
583 :
584 0 : bus_space_write_region_1(sc->sc_tcm_iot, sc->sc_tcm_ioh,
585 : sc->sc_ring_info_addr, (void *)&ringinfo, sizeof(ringinfo));
586 0 : }
587 :
588 0 : uint32_t ring_mem_ptr = letoh32(ringinfo.ringmem);
589 : /* TX ctrl ring: Send ctrl buffers, send IOCTLs */
590 0 : if (bwfm_pci_setup_ring(sc, &sc->sc_ctrl_submit, 64, 40,
591 : h2d_w_idx_ptr, h2d_r_idx_ptr, 0, idx_offset,
592 : &ring_mem_ptr))
593 : goto cleanup;
594 : /* TX rxpost ring: Send clean data mbufs for RX */
595 0 : if (bwfm_pci_setup_ring(sc, &sc->sc_rxpost_submit, 512, 32,
596 : h2d_w_idx_ptr, h2d_r_idx_ptr, 1, idx_offset,
597 : &ring_mem_ptr))
598 : goto cleanup;
599 : /* RX completion rings: recv our filled buffers back */
600 0 : if (bwfm_pci_setup_ring(sc, &sc->sc_ctrl_complete, 64, 24,
601 : d2h_w_idx_ptr, d2h_r_idx_ptr, 0, idx_offset,
602 : &ring_mem_ptr))
603 : goto cleanup;
604 0 : if (bwfm_pci_setup_ring(sc, &sc->sc_tx_complete, 1024, 16,
605 : d2h_w_idx_ptr, d2h_r_idx_ptr, 1, idx_offset,
606 : &ring_mem_ptr))
607 : goto cleanup;
608 0 : if (bwfm_pci_setup_ring(sc, &sc->sc_rx_complete, 512, 32,
609 : d2h_w_idx_ptr, d2h_r_idx_ptr, 2, idx_offset,
610 : &ring_mem_ptr))
611 : goto cleanup;
612 :
613 : /* Dynamic TX rings for actual data */
614 0 : sc->sc_flowrings = malloc(sc->sc_max_flowrings *
615 : sizeof(struct bwfm_pci_msgring), M_DEVBUF, M_WAITOK | M_ZERO);
616 0 : for (i = 0; i < sc->sc_max_flowrings; i++) {
617 0 : struct bwfm_pci_msgring *ring = &sc->sc_flowrings[i];
618 0 : ring->w_idx_addr = h2d_w_idx_ptr + (i + 2) * idx_offset;
619 0 : ring->r_idx_addr = h2d_r_idx_ptr + (i + 2) * idx_offset;
620 : }
621 :
622 : /* Scratch and ring update buffers for firmware */
623 0 : if ((sc->sc_scratch_buf = bwfm_pci_dmamem_alloc(sc,
624 0 : BWFM_DMA_D2H_SCRATCH_BUF_LEN, 8)) == NULL)
625 : goto cleanup;
626 0 : bus_space_write_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
627 : sc->sc_shared_address + BWFM_SHARED_DMA_SCRATCH_ADDR_LOW,
628 : BWFM_PCI_DMA_DVA(sc->sc_scratch_buf) & 0xffffffff);
629 0 : bus_space_write_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
630 : sc->sc_shared_address + BWFM_SHARED_DMA_SCRATCH_ADDR_HIGH,
631 : BWFM_PCI_DMA_DVA(sc->sc_scratch_buf) >> 32);
632 0 : bus_space_write_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
633 : sc->sc_shared_address + BWFM_SHARED_DMA_SCRATCH_LEN,
634 : BWFM_DMA_D2H_SCRATCH_BUF_LEN);
635 :
636 0 : if ((sc->sc_ringupd_buf = bwfm_pci_dmamem_alloc(sc,
637 0 : BWFM_DMA_D2H_RINGUPD_BUF_LEN, 8)) == NULL)
638 : goto cleanup;
639 0 : bus_space_write_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
640 : sc->sc_shared_address + BWFM_SHARED_DMA_RINGUPD_ADDR_LOW,
641 : BWFM_PCI_DMA_DVA(sc->sc_ringupd_buf) & 0xffffffff);
642 0 : bus_space_write_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
643 : sc->sc_shared_address + BWFM_SHARED_DMA_RINGUPD_ADDR_HIGH,
644 : BWFM_PCI_DMA_DVA(sc->sc_ringupd_buf) >> 32);
645 0 : bus_space_write_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
646 : sc->sc_shared_address + BWFM_SHARED_DMA_RINGUPD_LEN,
647 : BWFM_DMA_D2H_RINGUPD_BUF_LEN);
648 :
649 0 : bwfm_pci_select_core(sc, BWFM_AGENT_CORE_PCIE2);
650 0 : bwfm_pci_intr_enable(sc);
651 :
652 : /* Maps RX mbufs to a packet id and back. */
653 0 : sc->sc_rx_pkts.npkt = BWFM_NUM_RX_PKTIDS;
654 0 : sc->sc_rx_pkts.pkts = malloc(BWFM_NUM_RX_PKTIDS *
655 : sizeof(struct bwfm_pci_buf), M_DEVBUF, M_WAITOK | M_ZERO);
656 0 : for (i = 0; i < BWFM_NUM_RX_PKTIDS; i++)
657 0 : bus_dmamap_create(sc->sc_dmat, MSGBUF_MAX_PKT_SIZE,
658 : BWFM_NUM_RX_DESCS, MSGBUF_MAX_PKT_SIZE, 0, BUS_DMA_WAITOK,
659 : &sc->sc_rx_pkts.pkts[i].bb_map);
660 :
661 : /* Maps TX mbufs to a packet id and back. */
662 0 : sc->sc_tx_pkts.npkt = BWFM_NUM_TX_PKTIDS;
663 0 : sc->sc_tx_pkts.pkts = malloc(BWFM_NUM_TX_PKTIDS
664 : * sizeof(struct bwfm_pci_buf), M_DEVBUF, M_WAITOK | M_ZERO);
665 0 : for (i = 0; i < BWFM_NUM_TX_PKTIDS; i++)
666 0 : bus_dmamap_create(sc->sc_dmat, MSGBUF_MAX_PKT_SIZE,
667 : BWFM_NUM_TX_DESCS, MSGBUF_MAX_PKT_SIZE, 0, BUS_DMA_WAITOK,
668 : &sc->sc_tx_pkts.pkts[i].bb_map);
669 :
670 : /* Maps IOCTL mbufs to a packet id and back. */
671 0 : sc->sc_ioctl_pkts.npkt = BWFM_NUM_IOCTL_PKTIDS;
672 0 : sc->sc_ioctl_pkts.pkts = malloc(BWFM_NUM_IOCTL_PKTIDS
673 : * sizeof(struct bwfm_pci_buf), M_DEVBUF, M_WAITOK | M_ZERO);
674 0 : for (i = 0; i < BWFM_NUM_IOCTL_PKTIDS; i++)
675 0 : bus_dmamap_create(sc->sc_dmat, MSGBUF_MAX_PKT_SIZE,
676 : BWFM_NUM_IOCTL_DESCS, MSGBUF_MAX_PKT_SIZE, 0, BUS_DMA_WAITOK,
677 : &sc->sc_ioctl_pkts.pkts[i].bb_map);
678 :
679 : /*
680 : * For whatever reason, could also be a bug somewhere in this
681 : * driver, the firmware needs a bunch of RX buffers otherwise
682 : * it won't send any RX complete messages. 64 buffers don't
683 : * suffice, but 128 buffers are enough.
684 : */
685 0 : if_rxr_init(&sc->sc_rxbuf_ring, 128, sc->sc_max_rxbufpost);
686 0 : if_rxr_init(&sc->sc_ioctl_ring, 8, 8);
687 0 : if_rxr_init(&sc->sc_event_ring, 8, 8);
688 0 : bwfm_pci_fill_rx_rings(sc);
689 :
690 0 : TAILQ_INIT(&sc->sc_ioctlq);
691 :
692 : #ifdef BWFM_DEBUG
693 : sc->sc_console_readidx = 0;
694 : bwfm_pci_debug_console(sc);
695 : #endif
696 :
697 0 : sc->sc_initialized = 1;
698 0 : return 0;
699 :
700 : cleanup:
701 0 : if (sc->sc_ringupd_buf)
702 0 : bwfm_pci_dmamem_free(sc, sc->sc_ringupd_buf);
703 0 : if (sc->sc_scratch_buf)
704 0 : bwfm_pci_dmamem_free(sc, sc->sc_scratch_buf);
705 0 : if (sc->sc_rx_complete.ring)
706 0 : bwfm_pci_dmamem_free(sc, sc->sc_rx_complete.ring);
707 0 : if (sc->sc_tx_complete.ring)
708 0 : bwfm_pci_dmamem_free(sc, sc->sc_tx_complete.ring);
709 0 : if (sc->sc_ctrl_complete.ring)
710 0 : bwfm_pci_dmamem_free(sc, sc->sc_ctrl_complete.ring);
711 0 : if (sc->sc_rxpost_submit.ring)
712 0 : bwfm_pci_dmamem_free(sc, sc->sc_rxpost_submit.ring);
713 0 : if (sc->sc_ctrl_submit.ring)
714 0 : bwfm_pci_dmamem_free(sc, sc->sc_ctrl_submit.ring);
715 0 : if (sc->sc_dma_idx_buf)
716 0 : bwfm_pci_dmamem_free(sc, sc->sc_dma_idx_buf);
717 0 : return 1;
718 0 : }
719 :
720 : int
721 0 : bwfm_pci_load_microcode(struct bwfm_pci_softc *sc, const u_char *ucode, size_t size,
722 : const u_char *nvram, size_t nvlen)
723 : {
724 0 : struct bwfm_softc *bwfm = (void *)sc;
725 : struct bwfm_core *core;
726 : uint32_t shared, written;
727 : int i;
728 :
729 0 : if (bwfm->sc_chip.ch_chip == BRCM_CC_43602_CHIP_ID) {
730 0 : bwfm_pci_select_core(sc, BWFM_AGENT_CORE_ARM_CR4);
731 0 : bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
732 : BWFM_PCI_ARMCR4REG_BANKIDX, 5);
733 0 : bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
734 : BWFM_PCI_ARMCR4REG_BANKPDA, 0);
735 0 : bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
736 : BWFM_PCI_ARMCR4REG_BANKIDX, 7);
737 0 : bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
738 : BWFM_PCI_ARMCR4REG_BANKPDA, 0);
739 0 : }
740 :
741 0 : for (i = 0; i < size; i++)
742 0 : bus_space_write_1(sc->sc_tcm_iot, sc->sc_tcm_ioh,
743 : bwfm->sc_chip.ch_rambase + i, ucode[i]);
744 :
745 : /* Firmware replaces this with a pointer once up. */
746 0 : bus_space_write_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
747 : bwfm->sc_chip.ch_rambase + bwfm->sc_chip.ch_ramsize - 4, 0);
748 :
749 0 : if (nvram) {
750 0 : for (i = 0; i < nvlen; i++)
751 0 : bus_space_write_1(sc->sc_tcm_iot, sc->sc_tcm_ioh,
752 : bwfm->sc_chip.ch_rambase + bwfm->sc_chip.ch_ramsize
753 : - nvlen + i, nvram[i]);
754 : }
755 :
756 0 : written = bus_space_read_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
757 : bwfm->sc_chip.ch_rambase + bwfm->sc_chip.ch_ramsize - 4);
758 :
759 : /* Load reset vector from firmware and kickstart core. */
760 0 : if (bwfm->sc_chip.ch_chip == BRCM_CC_43602_CHIP_ID) {
761 0 : core = bwfm_chip_get_core(bwfm, BWFM_AGENT_INTERNAL_MEM);
762 0 : bwfm->sc_chip.ch_core_reset(bwfm, core, 0, 0, 0);
763 0 : }
764 0 : bwfm_chip_set_active(bwfm, *(uint32_t *)ucode);
765 :
766 0 : for (i = 0; i < 40; i++) {
767 0 : delay(50 * 1000);
768 0 : shared = bus_space_read_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
769 : bwfm->sc_chip.ch_rambase + bwfm->sc_chip.ch_ramsize - 4);
770 0 : if (shared != written)
771 : break;
772 : }
773 0 : if (!shared) {
774 0 : printf("%s: firmware did not come up\n", DEVNAME(sc));
775 0 : return 1;
776 : }
777 :
778 0 : sc->sc_shared_address = shared;
779 0 : return 0;
780 0 : }
781 :
782 : int
783 0 : bwfm_pci_detach(struct device *self, int flags)
784 : {
785 0 : struct bwfm_pci_softc *sc = (struct bwfm_pci_softc *)self;
786 :
787 0 : bwfm_detach(&sc->sc_sc, flags);
788 :
789 : /* FIXME: free RX buffers */
790 : /* FIXME: free TX buffers */
791 : /* FIXME: free more memory */
792 :
793 0 : bwfm_pci_dmamem_free(sc, sc->sc_ringupd_buf);
794 0 : bwfm_pci_dmamem_free(sc, sc->sc_scratch_buf);
795 0 : bwfm_pci_dmamem_free(sc, sc->sc_rx_complete.ring);
796 0 : bwfm_pci_dmamem_free(sc, sc->sc_tx_complete.ring);
797 0 : bwfm_pci_dmamem_free(sc, sc->sc_ctrl_complete.ring);
798 0 : bwfm_pci_dmamem_free(sc, sc->sc_rxpost_submit.ring);
799 0 : bwfm_pci_dmamem_free(sc, sc->sc_ctrl_submit.ring);
800 0 : bwfm_pci_dmamem_free(sc, sc->sc_dma_idx_buf);
801 0 : return 0;
802 : }
803 :
804 : /* DMA code */
805 : struct bwfm_pci_dmamem *
806 0 : bwfm_pci_dmamem_alloc(struct bwfm_pci_softc *sc, bus_size_t size, bus_size_t align)
807 : {
808 : struct bwfm_pci_dmamem *bdm;
809 0 : int nsegs;
810 :
811 0 : bdm = malloc(sizeof(*bdm), M_DEVBUF, M_WAITOK | M_ZERO);
812 0 : bdm->bdm_size = size;
813 :
814 0 : if (bus_dmamap_create(sc->sc_dmat, size, 1, size, 0,
815 0 : BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, &bdm->bdm_map) != 0)
816 : goto bdmfree;
817 :
818 0 : if (bus_dmamem_alloc(sc->sc_dmat, size, align, 0, &bdm->bdm_seg, 1,
819 0 : &nsegs, BUS_DMA_WAITOK) != 0)
820 : goto destroy;
821 :
822 0 : if (bus_dmamem_map(sc->sc_dmat, &bdm->bdm_seg, nsegs, size,
823 0 : &bdm->bdm_kva, BUS_DMA_WAITOK | BUS_DMA_COHERENT) != 0)
824 : goto free;
825 :
826 0 : if (bus_dmamap_load(sc->sc_dmat, bdm->bdm_map, bdm->bdm_kva, size,
827 0 : NULL, BUS_DMA_WAITOK) != 0)
828 : goto unmap;
829 :
830 0 : bzero(bdm->bdm_kva, size);
831 :
832 0 : return (bdm);
833 :
834 : unmap:
835 0 : bus_dmamem_unmap(sc->sc_dmat, bdm->bdm_kva, size);
836 : free:
837 0 : bus_dmamem_free(sc->sc_dmat, &bdm->bdm_seg, 1);
838 : destroy:
839 0 : bus_dmamap_destroy(sc->sc_dmat, bdm->bdm_map);
840 : bdmfree:
841 0 : free(bdm, M_DEVBUF, sizeof(*bdm));
842 :
843 0 : return (NULL);
844 0 : }
845 :
846 : void
847 0 : bwfm_pci_dmamem_free(struct bwfm_pci_softc *sc, struct bwfm_pci_dmamem *bdm)
848 : {
849 0 : bus_dmamem_unmap(sc->sc_dmat, bdm->bdm_kva, bdm->bdm_size);
850 0 : bus_dmamem_free(sc->sc_dmat, &bdm->bdm_seg, 1);
851 0 : bus_dmamap_destroy(sc->sc_dmat, bdm->bdm_map);
852 0 : free(bdm, M_DEVBUF, sizeof(*bdm));
853 0 : }
854 :
855 : /*
856 : * We need a simple mapping from a packet ID to mbufs, because when
857 : * a transfer completed, we only know the ID so we have to look up
858 : * the memory for the ID. This simply looks for an empty slot.
859 : */
860 : int
861 0 : bwfm_pci_pktid_avail(struct bwfm_pci_softc *sc, struct bwfm_pci_pkts *pkts)
862 : {
863 : int i, idx;
864 :
865 0 : idx = pkts->last + 1;
866 0 : for (i = 0; i < pkts->npkt; i++) {
867 0 : if (idx == pkts->npkt)
868 0 : idx = 0;
869 0 : if (pkts->pkts[idx].bb_m == NULL)
870 0 : return 0;
871 0 : idx++;
872 : }
873 0 : return ENOBUFS;
874 0 : }
875 :
876 : int
877 0 : bwfm_pci_pktid_new(struct bwfm_pci_softc *sc, struct bwfm_pci_pkts *pkts,
878 : struct mbuf *m, uint32_t *pktid, paddr_t *paddr)
879 : {
880 : int i, idx;
881 :
882 0 : idx = pkts->last + 1;
883 0 : for (i = 0; i < pkts->npkt; i++) {
884 0 : if (idx == pkts->npkt)
885 0 : idx = 0;
886 0 : if (pkts->pkts[idx].bb_m == NULL) {
887 0 : if (bus_dmamap_load_mbuf(sc->sc_dmat,
888 0 : pkts->pkts[idx].bb_map, m, BUS_DMA_NOWAIT) != 0) {
889 0 : if (m_defrag(m, M_DONTWAIT))
890 0 : return EFBIG;
891 0 : if (bus_dmamap_load_mbuf(sc->sc_dmat,
892 0 : pkts->pkts[idx].bb_map, m, BUS_DMA_NOWAIT) != 0)
893 0 : return EFBIG;
894 : }
895 0 : bus_dmamap_sync(sc->sc_dmat, pkts->pkts[idx].bb_map,
896 : 0, pkts->pkts[idx].bb_map->dm_mapsize,
897 : BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
898 0 : pkts->last = idx;
899 0 : pkts->pkts[idx].bb_m = m;
900 0 : *pktid = idx;
901 0 : *paddr = pkts->pkts[idx].bb_map->dm_segs[0].ds_addr;
902 0 : return 0;
903 : }
904 0 : idx++;
905 : }
906 0 : return ENOBUFS;
907 0 : }
908 :
909 : struct mbuf *
910 0 : bwfm_pci_pktid_free(struct bwfm_pci_softc *sc, struct bwfm_pci_pkts *pkts,
911 : uint32_t pktid)
912 : {
913 : struct mbuf *m;
914 :
915 0 : if (pktid >= pkts->npkt || pkts->pkts[pktid].bb_m == NULL)
916 0 : return NULL;
917 0 : bus_dmamap_sync(sc->sc_dmat, pkts->pkts[pktid].bb_map, 0,
918 : pkts->pkts[pktid].bb_map->dm_mapsize,
919 : BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
920 0 : bus_dmamap_unload(sc->sc_dmat, pkts->pkts[pktid].bb_map);
921 0 : m = pkts->pkts[pktid].bb_m;
922 0 : pkts->pkts[pktid].bb_m = NULL;
923 0 : return m;
924 0 : }
925 :
926 : void
927 0 : bwfm_pci_fill_rx_rings(struct bwfm_pci_softc *sc)
928 : {
929 0 : bwfm_pci_fill_rx_buf_ring(sc);
930 0 : bwfm_pci_fill_rx_ioctl_ring(sc, &sc->sc_ioctl_ring,
931 : MSGBUF_TYPE_IOCTLRESP_BUF_POST);
932 0 : bwfm_pci_fill_rx_ioctl_ring(sc, &sc->sc_event_ring,
933 : MSGBUF_TYPE_EVENT_BUF_POST);
934 0 : }
935 :
936 : void
937 0 : bwfm_pci_fill_rx_ioctl_ring(struct bwfm_pci_softc *sc, struct if_rxring *rxring,
938 : uint32_t msgtype)
939 : {
940 : struct msgbuf_rx_ioctl_resp_or_event *req;
941 : struct mbuf *m;
942 0 : uint32_t pktid;
943 0 : paddr_t paddr;
944 : int s, slots;
945 :
946 0 : s = splnet();
947 0 : for (slots = if_rxr_get(rxring, 8); slots > 0; slots--) {
948 0 : if (bwfm_pci_pktid_avail(sc, &sc->sc_rx_pkts))
949 : break;
950 0 : req = bwfm_pci_ring_write_reserve(sc, &sc->sc_ctrl_submit);
951 0 : if (req == NULL)
952 : break;
953 0 : m = MCLGETI(NULL, M_DONTWAIT, NULL, MSGBUF_MAX_PKT_SIZE);
954 0 : if (m == NULL) {
955 0 : bwfm_pci_ring_write_cancel(sc, &sc->sc_ctrl_submit, 1);
956 0 : break;
957 : }
958 0 : m->m_len = m->m_pkthdr.len = MSGBUF_MAX_PKT_SIZE;
959 0 : if (bwfm_pci_pktid_new(sc, &sc->sc_rx_pkts, m, &pktid, &paddr)) {
960 0 : bwfm_pci_ring_write_cancel(sc, &sc->sc_ctrl_submit, 1);
961 0 : m_freem(m);
962 0 : break;
963 : }
964 0 : memset(req, 0, sizeof(*req));
965 0 : req->msg.msgtype = msgtype;
966 0 : req->msg.request_id = htole32(pktid);
967 0 : req->host_buf_len = htole16(MSGBUF_MAX_PKT_SIZE);
968 0 : req->host_buf_addr.high_addr = htole32((uint64_t)paddr >> 32);
969 0 : req->host_buf_addr.low_addr = htole32(paddr & 0xffffffff);
970 0 : bwfm_pci_ring_write_commit(sc, &sc->sc_ctrl_submit);
971 : }
972 0 : if_rxr_put(rxring, slots);
973 0 : splx(s);
974 0 : }
975 :
976 : void
977 0 : bwfm_pci_fill_rx_buf_ring(struct bwfm_pci_softc *sc)
978 : {
979 : struct msgbuf_rx_bufpost *req;
980 : struct mbuf *m;
981 0 : uint32_t pktid;
982 0 : paddr_t paddr;
983 : int s, slots;
984 :
985 0 : s = splnet();
986 0 : for (slots = if_rxr_get(&sc->sc_rxbuf_ring, sc->sc_max_rxbufpost);
987 0 : slots > 0; slots--) {
988 0 : if (bwfm_pci_pktid_avail(sc, &sc->sc_rx_pkts))
989 : break;
990 0 : req = bwfm_pci_ring_write_reserve(sc, &sc->sc_rxpost_submit);
991 0 : if (req == NULL)
992 : break;
993 0 : m = MCLGETI(NULL, M_DONTWAIT, NULL, MSGBUF_MAX_PKT_SIZE);
994 0 : if (m == NULL) {
995 0 : bwfm_pci_ring_write_cancel(sc, &sc->sc_rxpost_submit, 1);
996 0 : break;
997 : }
998 0 : m->m_len = m->m_pkthdr.len = MSGBUF_MAX_PKT_SIZE;
999 0 : if (bwfm_pci_pktid_new(sc, &sc->sc_rx_pkts, m, &pktid, &paddr)) {
1000 0 : bwfm_pci_ring_write_cancel(sc, &sc->sc_rxpost_submit, 1);
1001 0 : m_freem(m);
1002 0 : break;
1003 : }
1004 0 : memset(req, 0, sizeof(*req));
1005 0 : req->msg.msgtype = MSGBUF_TYPE_RXBUF_POST;
1006 0 : req->msg.request_id = htole32(pktid);
1007 0 : req->data_buf_len = htole16(MSGBUF_MAX_PKT_SIZE);
1008 0 : req->data_buf_addr.high_addr = htole32((uint64_t)paddr >> 32);
1009 0 : req->data_buf_addr.low_addr = htole32(paddr & 0xffffffff);
1010 0 : bwfm_pci_ring_write_commit(sc, &sc->sc_rxpost_submit);
1011 : }
1012 0 : if_rxr_put(&sc->sc_rxbuf_ring, slots);
1013 0 : splx(s);
1014 0 : }
1015 :
1016 : int
1017 0 : bwfm_pci_setup_ring(struct bwfm_pci_softc *sc, struct bwfm_pci_msgring *ring,
1018 : int nitem, size_t itemsz, uint32_t w_idx, uint32_t r_idx,
1019 : int idx, uint32_t idx_off, uint32_t *ring_mem)
1020 : {
1021 0 : ring->w_idx_addr = w_idx + idx * idx_off;
1022 0 : ring->r_idx_addr = r_idx + idx * idx_off;
1023 0 : ring->nitem = nitem;
1024 0 : ring->itemsz = itemsz;
1025 0 : bwfm_pci_ring_write_rptr(sc, ring);
1026 0 : bwfm_pci_ring_write_wptr(sc, ring);
1027 :
1028 0 : ring->ring = bwfm_pci_dmamem_alloc(sc, nitem * itemsz, 8);
1029 0 : if (ring->ring == NULL)
1030 0 : return ENOMEM;
1031 0 : bus_space_write_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
1032 : *ring_mem + BWFM_RING_MEM_BASE_ADDR_LOW,
1033 : BWFM_PCI_DMA_DVA(ring->ring) & 0xffffffff);
1034 0 : bus_space_write_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
1035 : *ring_mem + BWFM_RING_MEM_BASE_ADDR_HIGH,
1036 : BWFM_PCI_DMA_DVA(ring->ring) >> 32);
1037 0 : bus_space_write_2(sc->sc_tcm_iot, sc->sc_tcm_ioh,
1038 : *ring_mem + BWFM_RING_MAX_ITEM, nitem);
1039 0 : bus_space_write_2(sc->sc_tcm_iot, sc->sc_tcm_ioh,
1040 : *ring_mem + BWFM_RING_LEN_ITEMS, itemsz);
1041 0 : *ring_mem = *ring_mem + BWFM_RING_MEM_SZ;
1042 0 : return 0;
1043 0 : }
1044 :
1045 : int
1046 0 : bwfm_pci_setup_flowring(struct bwfm_pci_softc *sc, struct bwfm_pci_msgring *ring,
1047 : int nitem, size_t itemsz)
1048 : {
1049 0 : ring->w_ptr = 0;
1050 0 : ring->r_ptr = 0;
1051 0 : ring->nitem = nitem;
1052 0 : ring->itemsz = itemsz;
1053 0 : bwfm_pci_ring_write_rptr(sc, ring);
1054 0 : bwfm_pci_ring_write_wptr(sc, ring);
1055 :
1056 0 : ring->ring = bwfm_pci_dmamem_alloc(sc, nitem * itemsz, 8);
1057 0 : if (ring->ring == NULL)
1058 0 : return ENOMEM;
1059 0 : return 0;
1060 0 : }
1061 :
1062 : /* Ring helpers */
1063 : void
1064 0 : bwfm_pci_ring_bell(struct bwfm_pci_softc *sc,
1065 : struct bwfm_pci_msgring *ring)
1066 : {
1067 0 : bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
1068 : BWFM_PCI_PCIE2REG_H2D_MAILBOX, 1);
1069 0 : }
1070 :
1071 : void
1072 0 : bwfm_pci_ring_update_rptr(struct bwfm_pci_softc *sc,
1073 : struct bwfm_pci_msgring *ring)
1074 : {
1075 0 : if (sc->sc_dma_idx_sz == 0) {
1076 0 : ring->r_ptr = bus_space_read_2(sc->sc_tcm_iot,
1077 : sc->sc_tcm_ioh, ring->r_idx_addr);
1078 0 : } else {
1079 0 : bus_dmamap_sync(sc->sc_dmat,
1080 : BWFM_PCI_DMA_MAP(sc->sc_dma_idx_buf), ring->r_idx_addr,
1081 : sizeof(uint16_t), BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1082 0 : ring->r_ptr = *(uint16_t *)(BWFM_PCI_DMA_KVA(sc->sc_dma_idx_buf)
1083 0 : + ring->r_idx_addr);
1084 : }
1085 0 : }
1086 :
1087 : void
1088 0 : bwfm_pci_ring_update_wptr(struct bwfm_pci_softc *sc,
1089 : struct bwfm_pci_msgring *ring)
1090 : {
1091 0 : if (sc->sc_dma_idx_sz == 0) {
1092 0 : ring->w_ptr = bus_space_read_2(sc->sc_tcm_iot,
1093 : sc->sc_tcm_ioh, ring->w_idx_addr);
1094 0 : } else {
1095 0 : bus_dmamap_sync(sc->sc_dmat,
1096 : BWFM_PCI_DMA_MAP(sc->sc_dma_idx_buf), ring->w_idx_addr,
1097 : sizeof(uint16_t), BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1098 0 : ring->w_ptr = *(uint16_t *)(BWFM_PCI_DMA_KVA(sc->sc_dma_idx_buf)
1099 0 : + ring->w_idx_addr);
1100 : }
1101 0 : }
1102 :
1103 : void
1104 0 : bwfm_pci_ring_write_rptr(struct bwfm_pci_softc *sc,
1105 : struct bwfm_pci_msgring *ring)
1106 : {
1107 0 : if (sc->sc_dma_idx_sz == 0) {
1108 0 : bus_space_write_2(sc->sc_tcm_iot, sc->sc_tcm_ioh,
1109 : ring->r_idx_addr, ring->r_ptr);
1110 0 : } else {
1111 0 : *(uint16_t *)(BWFM_PCI_DMA_KVA(sc->sc_dma_idx_buf)
1112 0 : + ring->r_idx_addr) = ring->r_ptr;
1113 0 : bus_dmamap_sync(sc->sc_dmat,
1114 : BWFM_PCI_DMA_MAP(sc->sc_dma_idx_buf), ring->r_idx_addr,
1115 : sizeof(uint16_t), BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1116 : }
1117 0 : }
1118 :
1119 : void
1120 0 : bwfm_pci_ring_write_wptr(struct bwfm_pci_softc *sc,
1121 : struct bwfm_pci_msgring *ring)
1122 : {
1123 0 : if (sc->sc_dma_idx_sz == 0) {
1124 0 : bus_space_write_2(sc->sc_tcm_iot, sc->sc_tcm_ioh,
1125 : ring->w_idx_addr, ring->w_ptr);
1126 0 : } else {
1127 0 : *(uint16_t *)(BWFM_PCI_DMA_KVA(sc->sc_dma_idx_buf)
1128 0 : + ring->w_idx_addr) = ring->w_ptr;
1129 0 : bus_dmamap_sync(sc->sc_dmat,
1130 : BWFM_PCI_DMA_MAP(sc->sc_dma_idx_buf), ring->w_idx_addr,
1131 : sizeof(uint16_t), BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1132 : }
1133 0 : }
1134 :
1135 : /*
1136 : * Retrieve a free descriptor to put new stuff in, but don't commit
1137 : * to it yet so we can rollback later if any error occurs.
1138 : */
1139 : void *
1140 0 : bwfm_pci_ring_write_reserve(struct bwfm_pci_softc *sc,
1141 : struct bwfm_pci_msgring *ring)
1142 : {
1143 : int available;
1144 : char *ret;
1145 :
1146 0 : bwfm_pci_ring_update_rptr(sc, ring);
1147 :
1148 0 : if (ring->r_ptr > ring->w_ptr)
1149 0 : available = ring->r_ptr - ring->w_ptr;
1150 : else
1151 0 : available = ring->r_ptr + (ring->nitem - ring->w_ptr);
1152 :
1153 0 : if (available < 1)
1154 0 : return NULL;
1155 :
1156 0 : ret = BWFM_PCI_DMA_KVA(ring->ring) + (ring->w_ptr * ring->itemsz);
1157 0 : ring->w_ptr += 1;
1158 0 : if (ring->w_ptr == ring->nitem)
1159 0 : ring->w_ptr = 0;
1160 0 : return ret;
1161 0 : }
1162 :
1163 : void *
1164 0 : bwfm_pci_ring_write_reserve_multi(struct bwfm_pci_softc *sc,
1165 : struct bwfm_pci_msgring *ring, int count, int *avail)
1166 : {
1167 : int available;
1168 : char *ret;
1169 :
1170 0 : bwfm_pci_ring_update_rptr(sc, ring);
1171 :
1172 0 : if (ring->r_ptr > ring->w_ptr)
1173 0 : available = ring->r_ptr - ring->w_ptr;
1174 : else
1175 0 : available = ring->r_ptr + (ring->nitem - ring->w_ptr);
1176 :
1177 0 : if (available < 1)
1178 0 : return NULL;
1179 :
1180 0 : ret = BWFM_PCI_DMA_KVA(ring->ring) + (ring->w_ptr * ring->itemsz);
1181 0 : *avail = min(count, available - 1);
1182 0 : if (*avail + ring->w_ptr > ring->nitem)
1183 0 : *avail = ring->nitem - ring->w_ptr;
1184 0 : ring->w_ptr += *avail;
1185 0 : if (ring->w_ptr == ring->nitem)
1186 0 : ring->w_ptr = 0;
1187 0 : return ret;
1188 0 : }
1189 :
1190 : /*
1191 : * Read number of descriptors available (submitted by the firmware)
1192 : * and retrieve pointer to first descriptor.
1193 : */
1194 : void *
1195 0 : bwfm_pci_ring_read_avail(struct bwfm_pci_softc *sc,
1196 : struct bwfm_pci_msgring *ring, int *avail)
1197 : {
1198 0 : bwfm_pci_ring_update_wptr(sc, ring);
1199 :
1200 0 : if (ring->w_ptr >= ring->r_ptr)
1201 0 : *avail = ring->w_ptr - ring->r_ptr;
1202 : else
1203 0 : *avail = ring->nitem - ring->r_ptr;
1204 :
1205 0 : if (*avail == 0)
1206 0 : return NULL;
1207 :
1208 0 : bus_dmamap_sync(sc->sc_dmat, BWFM_PCI_DMA_MAP(ring->ring),
1209 : ring->r_ptr * ring->itemsz, *avail * ring->itemsz,
1210 : BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1211 0 : return BWFM_PCI_DMA_KVA(ring->ring) + (ring->r_ptr * ring->itemsz);
1212 0 : }
1213 :
1214 : /*
1215 : * Let firmware know we read N descriptors.
1216 : */
1217 : void
1218 0 : bwfm_pci_ring_read_commit(struct bwfm_pci_softc *sc,
1219 : struct bwfm_pci_msgring *ring, int nitem)
1220 : {
1221 0 : ring->r_ptr += nitem;
1222 0 : if (ring->r_ptr == ring->nitem)
1223 0 : ring->r_ptr = 0;
1224 0 : bwfm_pci_ring_write_rptr(sc, ring);
1225 0 : }
1226 :
1227 : /*
1228 : * Let firmware know that we submitted some descriptors.
1229 : */
1230 : void
1231 0 : bwfm_pci_ring_write_commit(struct bwfm_pci_softc *sc,
1232 : struct bwfm_pci_msgring *ring)
1233 : {
1234 0 : bus_dmamap_sync(sc->sc_dmat, BWFM_PCI_DMA_MAP(ring->ring),
1235 : 0, BWFM_PCI_DMA_LEN(ring->ring), BUS_DMASYNC_PREREAD |
1236 : BUS_DMASYNC_PREWRITE);
1237 0 : bwfm_pci_ring_write_wptr(sc, ring);
1238 0 : bwfm_pci_ring_bell(sc, ring);
1239 0 : }
1240 :
1241 : /*
1242 : * Rollback N descriptors in case we don't actually want
1243 : * to commit to it.
1244 : */
1245 : void
1246 0 : bwfm_pci_ring_write_cancel(struct bwfm_pci_softc *sc,
1247 : struct bwfm_pci_msgring *ring, int nitem)
1248 : {
1249 0 : if (ring->w_ptr == 0)
1250 0 : ring->w_ptr = ring->nitem - nitem;
1251 : else
1252 0 : ring->w_ptr -= nitem;
1253 0 : }
1254 :
1255 : /*
1256 : * Foreach written descriptor on the ring, pass the descriptor to
1257 : * a message handler and let the firmware know we handled it.
1258 : */
1259 : void
1260 0 : bwfm_pci_ring_rx(struct bwfm_pci_softc *sc, struct bwfm_pci_msgring *ring)
1261 : {
1262 : void *buf;
1263 0 : int avail, processed;
1264 :
1265 : again:
1266 0 : buf = bwfm_pci_ring_read_avail(sc, ring, &avail);
1267 0 : if (buf == NULL)
1268 0 : return;
1269 :
1270 : processed = 0;
1271 0 : while (avail) {
1272 0 : bwfm_pci_msg_rx(sc, buf + sc->sc_rx_dataoffset);
1273 0 : buf += ring->itemsz;
1274 0 : processed++;
1275 0 : if (processed == 48) {
1276 0 : bwfm_pci_ring_read_commit(sc, ring, processed);
1277 : processed = 0;
1278 0 : }
1279 0 : avail--;
1280 : }
1281 0 : if (processed)
1282 0 : bwfm_pci_ring_read_commit(sc, ring, processed);
1283 0 : if (ring->r_ptr == 0)
1284 0 : goto again;
1285 0 : }
1286 :
1287 : void
1288 0 : bwfm_pci_msg_rx(struct bwfm_pci_softc *sc, void *buf)
1289 : {
1290 0 : struct ifnet *ifp = &sc->sc_sc.sc_ic.ic_if;
1291 : struct msgbuf_ioctl_resp_hdr *resp;
1292 : struct msgbuf_tx_status *tx;
1293 : struct msgbuf_rx_complete *rx;
1294 : struct msgbuf_rx_event *event;
1295 : struct msgbuf_common_hdr *msg;
1296 : struct msgbuf_flowring_create_resp *fcr;
1297 : struct msgbuf_flowring_delete_resp *fdr;
1298 : struct bwfm_pci_msgring *ring;
1299 : struct mbuf *m;
1300 : int flowid;
1301 :
1302 0 : msg = (struct msgbuf_common_hdr *)buf;
1303 0 : switch (msg->msgtype)
1304 : {
1305 : case MSGBUF_TYPE_FLOW_RING_CREATE_CMPLT:
1306 0 : fcr = (struct msgbuf_flowring_create_resp *)buf;
1307 0 : flowid = letoh16(fcr->compl_hdr.flow_ring_id);
1308 0 : if (flowid < 2)
1309 : break;
1310 0 : flowid -= 2;
1311 0 : if (flowid >= sc->sc_max_flowrings)
1312 : break;
1313 0 : ring = &sc->sc_flowrings[flowid];
1314 0 : if (ring->status != RING_OPENING)
1315 : break;
1316 0 : if (fcr->compl_hdr.status) {
1317 0 : printf("%s: failed to open flowring %d\n",
1318 0 : DEVNAME(sc), flowid);
1319 0 : ring->status = RING_CLOSED;
1320 0 : if (ring->m) {
1321 0 : m_freem(ring->m);
1322 0 : ring->m = NULL;
1323 0 : }
1324 0 : ifq_restart(&ifp->if_snd);
1325 0 : break;
1326 : }
1327 0 : ring->status = RING_OPEN;
1328 0 : if (ring->m != NULL) {
1329 : m = ring->m;
1330 0 : ring->m = NULL;
1331 0 : if (bwfm_pci_txdata(&sc->sc_sc, m))
1332 0 : m_freem(ring->m);
1333 : }
1334 0 : ifq_restart(&ifp->if_snd);
1335 0 : break;
1336 : case MSGBUF_TYPE_FLOW_RING_DELETE_CMPLT:
1337 0 : fdr = (struct msgbuf_flowring_delete_resp *)buf;
1338 0 : flowid = letoh16(fdr->compl_hdr.flow_ring_id);
1339 0 : if (flowid < 2)
1340 : break;
1341 0 : flowid -= 2;
1342 0 : if (flowid >= sc->sc_max_flowrings)
1343 : break;
1344 0 : ring = &sc->sc_flowrings[flowid];
1345 0 : if (ring->status != RING_CLOSING)
1346 : break;
1347 0 : if (fdr->compl_hdr.status) {
1348 0 : printf("%s: failed to delete flowring %d\n",
1349 0 : DEVNAME(sc), flowid);
1350 0 : break;
1351 : }
1352 0 : bwfm_pci_dmamem_free(sc, ring->ring);
1353 0 : ring->status = RING_CLOSED;
1354 0 : break;
1355 : case MSGBUF_TYPE_IOCTLPTR_REQ_ACK:
1356 0 : m = bwfm_pci_pktid_free(sc, &sc->sc_ioctl_pkts,
1357 0 : letoh32(msg->request_id));
1358 0 : if (m == NULL)
1359 : break;
1360 0 : m_freem(m);
1361 0 : break;
1362 : case MSGBUF_TYPE_IOCTL_CMPLT:
1363 0 : resp = (struct msgbuf_ioctl_resp_hdr *)buf;
1364 0 : bwfm_pci_msgbuf_rxioctl(sc, resp);
1365 0 : if_rxr_put(&sc->sc_ioctl_ring, 1);
1366 0 : bwfm_pci_fill_rx_rings(sc);
1367 0 : break;
1368 : case MSGBUF_TYPE_WL_EVENT:
1369 0 : event = (struct msgbuf_rx_event *)buf;
1370 0 : m = bwfm_pci_pktid_free(sc, &sc->sc_rx_pkts,
1371 0 : letoh32(event->msg.request_id));
1372 0 : if (m == NULL)
1373 : break;
1374 0 : m_adj(m, sc->sc_rx_dataoffset);
1375 0 : m->m_len = m->m_pkthdr.len = letoh16(event->event_data_len);
1376 0 : bwfm_rx(&sc->sc_sc, m);
1377 0 : if_rxr_put(&sc->sc_event_ring, 1);
1378 0 : bwfm_pci_fill_rx_rings(sc);
1379 0 : break;
1380 : case MSGBUF_TYPE_TX_STATUS:
1381 0 : tx = (struct msgbuf_tx_status *)buf;
1382 0 : m = bwfm_pci_pktid_free(sc, &sc->sc_tx_pkts,
1383 0 : letoh32(tx->msg.request_id));
1384 0 : if (m == NULL)
1385 : break;
1386 0 : m_freem(m);
1387 0 : if (sc->sc_tx_pkts_full) {
1388 0 : sc->sc_tx_pkts_full = 0;
1389 0 : ifq_restart(&ifp->if_snd);
1390 0 : }
1391 : break;
1392 : case MSGBUF_TYPE_RX_CMPLT:
1393 0 : rx = (struct msgbuf_rx_complete *)buf;
1394 0 : m = bwfm_pci_pktid_free(sc, &sc->sc_rx_pkts,
1395 0 : letoh32(rx->msg.request_id));
1396 0 : if (m == NULL)
1397 : break;
1398 0 : if (letoh16(rx->data_offset))
1399 0 : m_adj(m, letoh16(rx->data_offset));
1400 0 : else if (sc->sc_rx_dataoffset)
1401 0 : m_adj(m, sc->sc_rx_dataoffset);
1402 0 : m->m_len = m->m_pkthdr.len = letoh16(rx->data_len);
1403 0 : bwfm_rx(&sc->sc_sc, m);
1404 0 : if_rxr_put(&sc->sc_rxbuf_ring, 1);
1405 0 : bwfm_pci_fill_rx_rings(sc);
1406 0 : break;
1407 : default:
1408 0 : printf("%s: msgtype 0x%08x\n", __func__, msg->msgtype);
1409 0 : break;
1410 : }
1411 0 : }
1412 :
1413 : /* Bus core helpers */
1414 : void
1415 0 : bwfm_pci_select_core(struct bwfm_pci_softc *sc, int id)
1416 : {
1417 0 : struct bwfm_softc *bwfm = (void *)sc;
1418 : struct bwfm_core *core;
1419 :
1420 0 : core = bwfm_chip_get_core(bwfm, id);
1421 0 : if (core == NULL) {
1422 0 : printf("%s: could not find core to select", DEVNAME(sc));
1423 0 : return;
1424 : }
1425 :
1426 0 : pci_conf_write(sc->sc_pc, sc->sc_tag,
1427 0 : BWFM_PCI_BAR0_WINDOW, core->co_base);
1428 0 : if (pci_conf_read(sc->sc_pc, sc->sc_tag,
1429 0 : BWFM_PCI_BAR0_WINDOW) != core->co_base)
1430 0 : pci_conf_write(sc->sc_pc, sc->sc_tag,
1431 : BWFM_PCI_BAR0_WINDOW, core->co_base);
1432 0 : }
1433 :
1434 : uint32_t
1435 0 : bwfm_pci_buscore_read(struct bwfm_softc *bwfm, uint32_t reg)
1436 : {
1437 0 : struct bwfm_pci_softc *sc = (void *)bwfm;
1438 : uint32_t page, offset;
1439 :
1440 0 : page = reg & ~(BWFM_PCI_BAR0_REG_SIZE - 1);
1441 0 : offset = reg & (BWFM_PCI_BAR0_REG_SIZE - 1);
1442 0 : pci_conf_write(sc->sc_pc, sc->sc_tag, BWFM_PCI_BAR0_WINDOW, page);
1443 0 : return bus_space_read_4(sc->sc_reg_iot, sc->sc_reg_ioh, offset);
1444 : }
1445 :
1446 : void
1447 0 : bwfm_pci_buscore_write(struct bwfm_softc *bwfm, uint32_t reg, uint32_t val)
1448 : {
1449 0 : struct bwfm_pci_softc *sc = (void *)bwfm;
1450 : uint32_t page, offset;
1451 :
1452 0 : page = reg & ~(BWFM_PCI_BAR0_REG_SIZE - 1);
1453 0 : offset = reg & (BWFM_PCI_BAR0_REG_SIZE - 1);
1454 0 : pci_conf_write(sc->sc_pc, sc->sc_tag, BWFM_PCI_BAR0_WINDOW, page);
1455 0 : bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh, offset, val);
1456 0 : }
1457 :
1458 : int
1459 0 : bwfm_pci_buscore_prepare(struct bwfm_softc *bwfm)
1460 : {
1461 0 : return 0;
1462 : }
1463 :
1464 : int
1465 0 : bwfm_pci_buscore_reset(struct bwfm_softc *bwfm)
1466 : {
1467 0 : struct bwfm_pci_softc *sc = (void *)bwfm;
1468 : struct bwfm_core *core;
1469 : uint32_t reg;
1470 : int i;
1471 :
1472 0 : bwfm_pci_select_core(sc, BWFM_AGENT_CORE_PCIE2);
1473 0 : reg = pci_conf_read(sc->sc_pc, sc->sc_tag,
1474 : BWFM_PCI_CFGREG_LINK_STATUS_CTRL);
1475 0 : pci_conf_write(sc->sc_pc, sc->sc_tag, BWFM_PCI_CFGREG_LINK_STATUS_CTRL,
1476 0 : reg & ~BWFM_PCI_CFGREG_LINK_STATUS_CTRL_ASPM_ENAB);
1477 :
1478 0 : bwfm_pci_select_core(sc, BWFM_AGENT_CORE_CHIPCOMMON);
1479 0 : bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
1480 : BWFM_CHIP_REG_WATCHDOG, 4);
1481 0 : delay(100 * 1000);
1482 :
1483 0 : bwfm_pci_select_core(sc, BWFM_AGENT_CORE_PCIE2);
1484 0 : pci_conf_write(sc->sc_pc, sc->sc_tag,
1485 : BWFM_PCI_CFGREG_LINK_STATUS_CTRL, reg);
1486 :
1487 0 : core = bwfm_chip_get_core(bwfm, BWFM_AGENT_CORE_PCIE2);
1488 0 : if (core->co_rev <= 13) {
1489 0 : uint16_t cfg_offset[] = {
1490 : BWFM_PCI_CFGREG_STATUS_CMD,
1491 : BWFM_PCI_CFGREG_PM_CSR,
1492 : BWFM_PCI_CFGREG_MSI_CAP,
1493 : BWFM_PCI_CFGREG_MSI_ADDR_L,
1494 : BWFM_PCI_CFGREG_MSI_ADDR_H,
1495 : BWFM_PCI_CFGREG_MSI_DATA,
1496 : BWFM_PCI_CFGREG_LINK_STATUS_CTRL2,
1497 : BWFM_PCI_CFGREG_RBAR_CTRL,
1498 : BWFM_PCI_CFGREG_PML1_SUB_CTRL1,
1499 : BWFM_PCI_CFGREG_REG_BAR2_CONFIG,
1500 : BWFM_PCI_CFGREG_REG_BAR3_CONFIG,
1501 : };
1502 :
1503 0 : for (i = 0; i < nitems(cfg_offset); i++) {
1504 0 : bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
1505 : BWFM_PCI_PCIE2REG_CONFIGADDR, cfg_offset[i]);
1506 0 : reg = bus_space_read_4(sc->sc_reg_iot, sc->sc_reg_ioh,
1507 : BWFM_PCI_PCIE2REG_CONFIGDATA);
1508 : DPRINTFN(3, ("%s: config offset 0x%04x, value 0x%04x\n",
1509 : DEVNAME(sc), cfg_offset[i], reg));
1510 0 : bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
1511 : BWFM_PCI_PCIE2REG_CONFIGDATA, reg);
1512 : }
1513 0 : }
1514 :
1515 0 : reg = bus_space_read_4(sc->sc_reg_iot, sc->sc_reg_ioh,
1516 : BWFM_PCI_PCIE2REG_MAILBOXINT);
1517 0 : if (reg != 0xffffffff)
1518 0 : bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
1519 : BWFM_PCI_PCIE2REG_MAILBOXINT, reg);
1520 :
1521 0 : return 0;
1522 : }
1523 :
1524 : void
1525 0 : bwfm_pci_buscore_activate(struct bwfm_softc *bwfm, uint32_t rstvec)
1526 : {
1527 0 : struct bwfm_pci_softc *sc = (void *)bwfm;
1528 0 : bus_space_write_4(sc->sc_tcm_iot, sc->sc_tcm_ioh, 0, rstvec);
1529 0 : }
1530 :
1531 : static int bwfm_pci_prio2fifo[8] = {
1532 : 1, /* best effort */
1533 : 0, /* IPTOS_PREC_IMMEDIATE */
1534 : 0, /* IPTOS_PREC_PRIORITY */
1535 : 1, /* IPTOS_PREC_FLASH */
1536 : 2, /* IPTOS_PREC_FLASHOVERRIDE */
1537 : 2, /* IPTOS_PREC_CRITIC_ECP */
1538 : 3, /* IPTOS_PREC_INTERNETCONTROL */
1539 : 3, /* IPTOS_PREC_NETCONTROL */
1540 : };
1541 :
1542 : int
1543 0 : bwfm_pci_flowring_lookup(struct bwfm_pci_softc *sc, struct mbuf *m)
1544 : {
1545 0 : struct ieee80211com *ic = &sc->sc_sc.sc_ic;
1546 0 : uint8_t *da = mtod(m, uint8_t *);
1547 : int flowid, prio, fifo;
1548 : int i, found;
1549 :
1550 0 : prio = ieee80211_classify(ic, m);
1551 0 : fifo = bwfm_pci_prio2fifo[prio];
1552 :
1553 0 : switch (ic->ic_opmode)
1554 : {
1555 : case IEEE80211_M_STA:
1556 : flowid = fifo;
1557 0 : break;
1558 : #ifndef IEEE80211_STA_ONLY
1559 : case IEEE80211_M_HOSTAP:
1560 0 : if (ETHER_IS_MULTICAST(da))
1561 0 : da = etherbroadcastaddr;
1562 0 : flowid = da[5] * 2 + fifo;
1563 0 : break;
1564 : #endif
1565 : default:
1566 0 : printf("%s: state not supported\n", DEVNAME(sc));
1567 0 : return ENOBUFS;
1568 : }
1569 :
1570 : found = 0;
1571 0 : flowid = flowid % sc->sc_max_flowrings;
1572 0 : for (i = 0; i < sc->sc_max_flowrings; i++) {
1573 0 : if (ic->ic_opmode == IEEE80211_M_STA &&
1574 0 : sc->sc_flowrings[flowid].status >= RING_OPEN &&
1575 0 : sc->sc_flowrings[flowid].fifo == fifo) {
1576 : found = 1;
1577 0 : break;
1578 : }
1579 : #ifndef IEEE80211_STA_ONLY
1580 0 : if (ic->ic_opmode == IEEE80211_M_HOSTAP &&
1581 0 : sc->sc_flowrings[flowid].status >= RING_OPEN &&
1582 0 : sc->sc_flowrings[flowid].fifo == fifo &&
1583 0 : !memcmp(sc->sc_flowrings[flowid].mac, da, ETHER_ADDR_LEN)) {
1584 : found = 1;
1585 0 : break;
1586 : }
1587 : #endif
1588 0 : flowid = (flowid + 1) % sc->sc_max_flowrings;
1589 : }
1590 :
1591 0 : if (found)
1592 0 : return flowid;
1593 :
1594 0 : return -1;
1595 0 : }
1596 :
1597 : void
1598 0 : bwfm_pci_flowring_create(struct bwfm_pci_softc *sc, struct mbuf *m)
1599 : {
1600 0 : struct ieee80211com *ic = &sc->sc_sc.sc_ic;
1601 0 : struct bwfm_cmd_flowring_create cmd;
1602 0 : uint8_t *da = mtod(m, uint8_t *);
1603 : struct bwfm_pci_msgring *ring;
1604 : int flowid, prio, fifo;
1605 : int i, found;
1606 :
1607 0 : prio = ieee80211_classify(ic, m);
1608 0 : fifo = bwfm_pci_prio2fifo[prio];
1609 :
1610 0 : switch (ic->ic_opmode)
1611 : {
1612 : case IEEE80211_M_STA:
1613 : flowid = fifo;
1614 0 : break;
1615 : #ifndef IEEE80211_STA_ONLY
1616 : case IEEE80211_M_HOSTAP:
1617 0 : if (ETHER_IS_MULTICAST(da))
1618 0 : da = etherbroadcastaddr;
1619 0 : flowid = da[5] * 2 + fifo;
1620 0 : break;
1621 : #endif
1622 : default:
1623 0 : printf("%s: state not supported\n", DEVNAME(sc));
1624 0 : return;
1625 : }
1626 :
1627 : found = 0;
1628 0 : flowid = flowid % sc->sc_max_flowrings;
1629 0 : for (i = 0; i < sc->sc_max_flowrings; i++) {
1630 0 : ring = &sc->sc_flowrings[flowid];
1631 0 : if (ring->status == RING_CLOSED) {
1632 0 : ring->status = RING_OPENING;
1633 : found = 1;
1634 0 : break;
1635 : }
1636 0 : flowid = (flowid + 1) % sc->sc_max_flowrings;
1637 : }
1638 :
1639 : /*
1640 : * We cannot recover from that so far. Only a stop/init
1641 : * cycle can revive this if it ever happens at all.
1642 : */
1643 0 : if (!found) {
1644 0 : printf("%s: no flowring available\n", DEVNAME(sc));
1645 0 : return;
1646 : }
1647 :
1648 0 : cmd.m = m;
1649 0 : cmd.prio = prio;
1650 0 : cmd.flowid = flowid;
1651 0 : bwfm_do_async(&sc->sc_sc, bwfm_pci_flowring_create_cb, &cmd, sizeof(cmd));
1652 0 : }
1653 :
1654 : void
1655 0 : bwfm_pci_flowring_create_cb(struct bwfm_softc *bwfm, void *arg)
1656 : {
1657 0 : struct bwfm_pci_softc *sc = (void *)bwfm;
1658 0 : struct ieee80211com *ic = &sc->sc_sc.sc_ic;
1659 0 : struct bwfm_cmd_flowring_create *cmd = arg;
1660 : struct msgbuf_tx_flowring_create_req *req;
1661 : struct bwfm_pci_msgring *ring;
1662 : uint8_t *da, *sa;
1663 :
1664 0 : da = mtod(cmd->m, char *) + 0 * ETHER_ADDR_LEN;
1665 0 : sa = mtod(cmd->m, char *) + 1 * ETHER_ADDR_LEN;
1666 :
1667 0 : ring = &sc->sc_flowrings[cmd->flowid];
1668 0 : if (ring->status != RING_OPENING) {
1669 0 : printf("%s: flowring not opening\n", DEVNAME(sc));
1670 0 : return;
1671 : }
1672 :
1673 0 : if (bwfm_pci_setup_flowring(sc, ring, 512, 48)) {
1674 0 : printf("%s: cannot setup flowring\n", DEVNAME(sc));
1675 0 : return;
1676 : }
1677 :
1678 0 : req = bwfm_pci_ring_write_reserve(sc, &sc->sc_ctrl_submit);
1679 0 : if (req == NULL) {
1680 0 : printf("%s: cannot reserve for flowring\n", DEVNAME(sc));
1681 0 : return;
1682 : }
1683 :
1684 0 : ring->status = RING_OPENING;
1685 0 : ring->fifo = bwfm_pci_prio2fifo[cmd->prio];
1686 0 : ring->m = cmd->m;
1687 0 : memcpy(ring->mac, da, ETHER_ADDR_LEN);
1688 : #ifndef IEEE80211_STA_ONLY
1689 0 : if (ic->ic_opmode == IEEE80211_M_HOSTAP && ETHER_IS_MULTICAST(da))
1690 0 : memcpy(ring->mac, etherbroadcastaddr, ETHER_ADDR_LEN);
1691 : #endif
1692 :
1693 0 : req->msg.msgtype = MSGBUF_TYPE_FLOW_RING_CREATE;
1694 0 : req->msg.ifidx = 0;
1695 0 : req->msg.request_id = 0;
1696 0 : req->tid = bwfm_pci_prio2fifo[cmd->prio];
1697 0 : req->flow_ring_id = letoh16(cmd->flowid + 2);
1698 0 : memcpy(req->da, da, ETHER_ADDR_LEN);
1699 0 : memcpy(req->sa, sa, ETHER_ADDR_LEN);
1700 0 : req->flow_ring_addr.high_addr =
1701 0 : letoh32(BWFM_PCI_DMA_DVA(ring->ring) >> 32);
1702 0 : req->flow_ring_addr.low_addr =
1703 0 : letoh32(BWFM_PCI_DMA_DVA(ring->ring) & 0xffffffff);
1704 0 : req->max_items = letoh16(512);
1705 0 : req->len_item = letoh16(48);
1706 :
1707 0 : bwfm_pci_ring_write_commit(sc, &sc->sc_ctrl_submit);
1708 0 : }
1709 :
1710 : void
1711 0 : bwfm_pci_flowring_delete(struct bwfm_pci_softc *sc, int flowid)
1712 : {
1713 : struct msgbuf_tx_flowring_delete_req *req;
1714 : struct bwfm_pci_msgring *ring;
1715 :
1716 0 : ring = &sc->sc_flowrings[flowid];
1717 0 : if (ring->status != RING_OPEN) {
1718 0 : printf("%s: flowring not open\n", DEVNAME(sc));
1719 0 : return;
1720 : }
1721 :
1722 0 : req = bwfm_pci_ring_write_reserve(sc, &sc->sc_ctrl_submit);
1723 0 : if (req == NULL) {
1724 0 : printf("%s: cannot reserve for flowring\n", DEVNAME(sc));
1725 0 : return;
1726 : }
1727 :
1728 0 : ring->status = RING_CLOSING;
1729 :
1730 0 : req->msg.msgtype = MSGBUF_TYPE_FLOW_RING_DELETE;
1731 0 : req->msg.ifidx = 0;
1732 0 : req->msg.request_id = 0;
1733 0 : req->flow_ring_id = letoh16(flowid + 2);
1734 0 : req->reason = 0;
1735 :
1736 0 : bwfm_pci_ring_write_commit(sc, &sc->sc_ctrl_submit);
1737 0 : }
1738 :
1739 : void
1740 0 : bwfm_pci_stop(struct bwfm_softc *bwfm)
1741 : {
1742 0 : struct bwfm_pci_softc *sc = (void *)bwfm;
1743 : struct bwfm_pci_msgring *ring;
1744 : int i;
1745 :
1746 0 : for (i = 0; i < sc->sc_max_flowrings; i++) {
1747 0 : ring = &sc->sc_flowrings[i];
1748 0 : if (ring->status == RING_OPEN)
1749 0 : bwfm_pci_flowring_delete(sc, i);
1750 : }
1751 0 : }
1752 :
1753 : int
1754 0 : bwfm_pci_txcheck(struct bwfm_softc *bwfm)
1755 : {
1756 0 : struct bwfm_pci_softc *sc = (void *)bwfm;
1757 : struct bwfm_pci_msgring *ring;
1758 : int i;
1759 :
1760 : /* If we are transitioning, we cannot send. */
1761 0 : for (i = 0; i < sc->sc_max_flowrings; i++) {
1762 0 : ring = &sc->sc_flowrings[i];
1763 0 : if (ring->status == RING_OPENING)
1764 0 : return ENOBUFS;
1765 : }
1766 :
1767 0 : if (bwfm_pci_pktid_avail(sc, &sc->sc_tx_pkts)) {
1768 0 : sc->sc_tx_pkts_full = 1;
1769 0 : return ENOBUFS;
1770 : }
1771 :
1772 0 : return 0;
1773 0 : }
1774 :
1775 : int
1776 0 : bwfm_pci_txdata(struct bwfm_softc *bwfm, struct mbuf *m)
1777 : {
1778 0 : struct bwfm_pci_softc *sc = (void *)bwfm;
1779 : struct bwfm_pci_msgring *ring;
1780 : struct msgbuf_tx_msghdr *tx;
1781 0 : uint32_t pktid;
1782 0 : paddr_t paddr;
1783 : int flowid, ret;
1784 :
1785 0 : flowid = bwfm_pci_flowring_lookup(sc, m);
1786 0 : if (flowid < 0) {
1787 : /*
1788 : * We cannot send the packet right now as there is
1789 : * no flowring yet. The flowring will be created
1790 : * asynchronously. While the ring is transitioning
1791 : * the TX check will tell the upper layers that we
1792 : * cannot send packets right now. When the flowring
1793 : * is created the queue will be restarted and this
1794 : * mbuf will be transmitted.
1795 : */
1796 0 : bwfm_pci_flowring_create(sc, m);
1797 0 : return 0;
1798 : }
1799 :
1800 0 : ring = &sc->sc_flowrings[flowid];
1801 0 : if (ring->status == RING_OPENING ||
1802 0 : ring->status == RING_CLOSING) {
1803 0 : printf("%s: tried to use a flow that was "
1804 : "transitioning in status %d\n",
1805 0 : DEVNAME(sc), ring->status);
1806 0 : return ENOBUFS;
1807 : }
1808 :
1809 0 : tx = bwfm_pci_ring_write_reserve(sc, ring);
1810 0 : if (tx == NULL)
1811 0 : return ENOBUFS;
1812 :
1813 0 : memset(tx, 0, sizeof(*tx));
1814 0 : tx->msg.msgtype = MSGBUF_TYPE_TX_POST;
1815 0 : tx->msg.ifidx = 0;
1816 0 : tx->flags = BWFM_MSGBUF_PKT_FLAGS_FRAME_802_3;
1817 0 : tx->flags |= ieee80211_classify(&sc->sc_sc.sc_ic, m) <<
1818 : BWFM_MSGBUF_PKT_FLAGS_PRIO_SHIFT;
1819 0 : tx->seg_cnt = 1;
1820 0 : memcpy(tx->txhdr, mtod(m, char *), ETHER_HDR_LEN);
1821 :
1822 0 : ret = bwfm_pci_pktid_new(sc, &sc->sc_tx_pkts, m, &pktid, &paddr);
1823 0 : if (ret) {
1824 0 : if (ret == ENOBUFS) {
1825 0 : printf("%s: no pktid available for TX\n",
1826 0 : DEVNAME(sc));
1827 0 : sc->sc_tx_pkts_full = 1;
1828 0 : }
1829 0 : bwfm_pci_ring_write_cancel(sc, ring, 1);
1830 0 : return ret;
1831 : }
1832 0 : paddr += ETHER_HDR_LEN;
1833 :
1834 0 : tx->msg.request_id = htole32(pktid);
1835 0 : tx->data_len = htole16(m->m_len - ETHER_HDR_LEN);
1836 0 : tx->data_buf_addr.high_addr = htole32((uint64_t)paddr >> 32);
1837 0 : tx->data_buf_addr.low_addr = htole32(paddr & 0xffffffff);
1838 :
1839 0 : bwfm_pci_ring_write_commit(sc, ring);
1840 0 : return 0;
1841 0 : }
1842 :
1843 : #ifdef BWFM_DEBUG
1844 : void
1845 : bwfm_pci_debug_console(struct bwfm_pci_softc *sc)
1846 : {
1847 : uint32_t newidx = bus_space_read_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
1848 : sc->sc_console_base_addr + BWFM_CONSOLE_WRITEIDX);
1849 :
1850 : if (newidx != sc->sc_console_readidx)
1851 : DPRINTFN(3, ("BWFM CONSOLE: "));
1852 : while (newidx != sc->sc_console_readidx) {
1853 : uint8_t ch = bus_space_read_1(sc->sc_tcm_iot, sc->sc_tcm_ioh,
1854 : sc->sc_console_buf_addr + sc->sc_console_readidx);
1855 : sc->sc_console_readidx++;
1856 : if (sc->sc_console_readidx == sc->sc_console_buf_size)
1857 : sc->sc_console_readidx = 0;
1858 : if (ch == '\r')
1859 : continue;
1860 : DPRINTFN(3, ("%c", ch));
1861 : }
1862 : }
1863 : #endif
1864 :
1865 : int
1866 0 : bwfm_pci_intr(void *v)
1867 : {
1868 0 : struct bwfm_pci_softc *sc = (void *)v;
1869 : uint32_t status;
1870 :
1871 0 : if ((status = bus_space_read_4(sc->sc_reg_iot, sc->sc_reg_ioh,
1872 0 : BWFM_PCI_PCIE2REG_MAILBOXINT)) == 0)
1873 0 : return 0;
1874 :
1875 0 : bwfm_pci_intr_disable(sc);
1876 0 : bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
1877 : BWFM_PCI_PCIE2REG_MAILBOXINT, status);
1878 :
1879 0 : if (status & (BWFM_PCI_PCIE2REG_MAILBOXMASK_INT_FN0_0 |
1880 : BWFM_PCI_PCIE2REG_MAILBOXMASK_INT_FN0_1))
1881 0 : printf("%s: handle MB data\n", __func__);
1882 :
1883 0 : if (status & BWFM_PCI_PCIE2REG_MAILBOXMASK_INT_D2H_DB) {
1884 0 : bwfm_pci_ring_rx(sc, &sc->sc_rx_complete);
1885 0 : bwfm_pci_ring_rx(sc, &sc->sc_tx_complete);
1886 0 : bwfm_pci_ring_rx(sc, &sc->sc_ctrl_complete);
1887 0 : }
1888 :
1889 : #ifdef BWFM_DEBUG
1890 : bwfm_pci_debug_console(sc);
1891 : #endif
1892 :
1893 0 : bwfm_pci_intr_enable(sc);
1894 0 : return 1;
1895 0 : }
1896 :
1897 : void
1898 0 : bwfm_pci_intr_enable(struct bwfm_pci_softc *sc)
1899 : {
1900 0 : bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
1901 : BWFM_PCI_PCIE2REG_MAILBOXMASK,
1902 : BWFM_PCI_PCIE2REG_MAILBOXMASK_INT_FN0_0 |
1903 : BWFM_PCI_PCIE2REG_MAILBOXMASK_INT_FN0_1 |
1904 : BWFM_PCI_PCIE2REG_MAILBOXMASK_INT_D2H_DB);
1905 0 : }
1906 :
1907 : void
1908 0 : bwfm_pci_intr_disable(struct bwfm_pci_softc *sc)
1909 : {
1910 0 : bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
1911 : BWFM_PCI_PCIE2REG_MAILBOXMASK, 0);
1912 0 : }
1913 :
1914 : /* Msgbuf protocol implementation */
1915 : int
1916 0 : bwfm_pci_msgbuf_query_dcmd(struct bwfm_softc *bwfm, int ifidx,
1917 : int cmd, char *buf, size_t *len)
1918 : {
1919 0 : struct bwfm_pci_softc *sc = (void *)bwfm;
1920 : struct msgbuf_ioctl_req_hdr *req;
1921 : struct bwfm_pci_ioctl *ctl;
1922 : struct mbuf *m;
1923 0 : uint32_t pktid;
1924 0 : paddr_t paddr;
1925 : size_t buflen;
1926 :
1927 0 : buflen = min(*len, BWFM_DMA_H2D_IOCTL_BUF_LEN);
1928 0 : m = MCLGETI(NULL, M_DONTWAIT, NULL, buflen);
1929 0 : if (m == NULL)
1930 0 : return 1;
1931 0 : m->m_len = m->m_pkthdr.len = buflen;
1932 :
1933 0 : if (buf)
1934 0 : memcpy(mtod(m, char *), buf, buflen);
1935 : else
1936 0 : memset(mtod(m, char *), 0, buflen);
1937 :
1938 0 : req = bwfm_pci_ring_write_reserve(sc, &sc->sc_ctrl_submit);
1939 0 : if (req == NULL) {
1940 0 : m_freem(m);
1941 0 : return 1;
1942 : }
1943 :
1944 0 : if (bwfm_pci_pktid_new(sc, &sc->sc_ioctl_pkts, m, &pktid, &paddr)) {
1945 0 : bwfm_pci_ring_write_cancel(sc, &sc->sc_ctrl_submit, 1);
1946 0 : m_freem(m);
1947 0 : return 1;
1948 : }
1949 :
1950 0 : ctl = malloc(sizeof(*ctl), M_TEMP, M_WAITOK|M_ZERO);
1951 0 : ctl->transid = sc->sc_ioctl_transid++;
1952 0 : TAILQ_INSERT_TAIL(&sc->sc_ioctlq, ctl, next);
1953 :
1954 0 : req->msg.msgtype = MSGBUF_TYPE_IOCTLPTR_REQ;
1955 0 : req->msg.ifidx = 0;
1956 0 : req->msg.flags = 0;
1957 0 : req->msg.request_id = htole32(pktid);
1958 0 : req->cmd = htole32(cmd);
1959 0 : req->output_buf_len = htole16(*len);
1960 0 : req->trans_id = htole16(ctl->transid);
1961 :
1962 0 : req->input_buf_len = htole16(m->m_len);
1963 0 : req->req_buf_addr.high_addr = htole32((uint64_t)paddr >> 32);
1964 0 : req->req_buf_addr.low_addr = htole32(paddr & 0xffffffff);
1965 :
1966 0 : bwfm_pci_ring_write_commit(sc, &sc->sc_ctrl_submit);
1967 :
1968 0 : tsleep(ctl, PWAIT, "bwfm", hz);
1969 0 : TAILQ_REMOVE(&sc->sc_ioctlq, ctl, next);
1970 :
1971 0 : if (ctl->m == NULL) {
1972 0 : free(ctl, M_TEMP, sizeof(*ctl));
1973 0 : return 1;
1974 : }
1975 :
1976 0 : *len = min(ctl->retlen, m->m_len);
1977 0 : *len = min(*len, buflen);
1978 0 : if (buf)
1979 0 : m_copydata(ctl->m, 0, *len, (caddr_t)buf);
1980 0 : m_freem(ctl->m);
1981 :
1982 0 : if (ctl->status < 0) {
1983 : free(ctl, M_TEMP, sizeof(*ctl));
1984 0 : return 1;
1985 : }
1986 :
1987 : free(ctl, M_TEMP, sizeof(*ctl));
1988 0 : return 0;
1989 0 : }
1990 :
1991 : int
1992 0 : bwfm_pci_msgbuf_set_dcmd(struct bwfm_softc *bwfm, int ifidx,
1993 : int cmd, char *buf, size_t len)
1994 : {
1995 0 : return bwfm_pci_msgbuf_query_dcmd(bwfm, ifidx, cmd, buf, &len);
1996 : }
1997 :
1998 : void
1999 0 : bwfm_pci_msgbuf_rxioctl(struct bwfm_pci_softc *sc,
2000 : struct msgbuf_ioctl_resp_hdr *resp)
2001 : {
2002 : struct bwfm_pci_ioctl *ctl, *tmp;
2003 : struct mbuf *m;
2004 :
2005 0 : m = bwfm_pci_pktid_free(sc, &sc->sc_rx_pkts,
2006 0 : letoh32(resp->msg.request_id));
2007 :
2008 0 : TAILQ_FOREACH_SAFE(ctl, &sc->sc_ioctlq, next, tmp) {
2009 0 : if (ctl->transid != letoh16(resp->trans_id))
2010 : continue;
2011 0 : ctl->m = m;
2012 0 : ctl->retlen = letoh16(resp->resp_len);
2013 0 : ctl->status = letoh16(resp->compl_hdr.status);
2014 0 : wakeup(ctl);
2015 0 : return;
2016 : }
2017 :
2018 0 : m_free(m);
2019 0 : }
|