Line data Source code
1 : /* $OpenBSD: if_txp.c,v 1.127 2017/08/23 10:10:56 mikeb Exp $ */
2 :
3 : /*
4 : * Copyright (c) 2001
5 : * Jason L. Wright <jason@thought.net>, Theo de Raadt, and
6 : * Aaron Campbell <aaron@monkey.org>. All rights reserved.
7 : *
8 : * Redistribution and use in source and binary forms, with or without
9 : * modification, are permitted provided that the following conditions
10 : * are met:
11 : * 1. Redistributions of source code must retain the above copyright
12 : * notice, this list of conditions and the following disclaimer.
13 : * 2. Redistributions in binary form must reproduce the above copyright
14 : * notice, this list of conditions and the following disclaimer in the
15 : * documentation and/or other materials provided with the distribution.
16 : *
17 : * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
18 : * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
19 : * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 : * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR THE VOICES IN THEIR HEADS
21 : * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
22 : * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23 : * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24 : * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
25 : * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26 : * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
27 : * THE POSSIBILITY OF SUCH DAMAGE.
28 : */
29 :
30 : /*
31 : * Driver for 3c990 (Typhoon) Ethernet ASIC
32 : */
33 :
34 : #include "bpfilter.h"
35 : #include "vlan.h"
36 :
37 : #include <sys/param.h>
38 : #include <sys/systm.h>
39 : #include <sys/sockio.h>
40 : #include <sys/mbuf.h>
41 : #include <sys/malloc.h>
42 : #include <sys/kernel.h>
43 : #include <sys/socket.h>
44 : #include <sys/device.h>
45 : #include <sys/timeout.h>
46 :
47 : #include <net/if.h>
48 :
49 : #include <netinet/in.h>
50 : #include <netinet/if_ether.h>
51 :
52 : #include <net/if_media.h>
53 :
54 : #if NBPFILTER > 0
55 : #include <net/bpf.h>
56 : #endif
57 :
58 : #include <machine/bus.h>
59 :
60 : #include <dev/mii/mii.h>
61 : #include <dev/pci/pcireg.h>
62 : #include <dev/pci/pcivar.h>
63 : #include <dev/pci/pcidevs.h>
64 :
65 : #include <dev/pci/if_txpreg.h>
66 :
67 : /*
68 : * These currently break the 3c990 firmware, hopefully will be resolved
69 : * at some point.
70 : */
71 : #undef TRY_TX_UDP_CSUM
72 : #undef TRY_TX_TCP_CSUM
73 :
74 : int txp_probe(struct device *, void *, void *);
75 : void txp_attach(struct device *, struct device *, void *);
76 : void txp_attachhook(struct device *);
77 : int txp_intr(void *);
78 : void txp_tick(void *);
79 : int txp_ioctl(struct ifnet *, u_long, caddr_t);
80 : void txp_start(struct ifnet *);
81 : void txp_stop(struct txp_softc *);
82 : void txp_init(struct txp_softc *);
83 : void txp_watchdog(struct ifnet *);
84 :
85 : int txp_chip_init(struct txp_softc *);
86 : int txp_reset_adapter(struct txp_softc *);
87 : int txp_download_fw(struct txp_softc *);
88 : int txp_download_fw_wait(struct txp_softc *);
89 : int txp_download_fw_section(struct txp_softc *,
90 : struct txp_fw_section_header *, int, u_char *, size_t);
91 : int txp_alloc_rings(struct txp_softc *);
92 : void txp_dma_free(struct txp_softc *, struct txp_dma_alloc *);
93 : int txp_dma_malloc(struct txp_softc *, bus_size_t, struct txp_dma_alloc *, int);
94 : void txp_set_filter(struct txp_softc *);
95 :
96 : int txp_cmd_desc_numfree(struct txp_softc *);
97 : int txp_command(struct txp_softc *, u_int16_t, u_int16_t, u_int32_t,
98 : u_int32_t, u_int16_t *, u_int32_t *, u_int32_t *, int);
99 : int txp_command2(struct txp_softc *, u_int16_t, u_int16_t,
100 : u_int32_t, u_int32_t, struct txp_ext_desc *, u_int8_t,
101 : struct txp_rsp_desc **, int);
102 : int txp_response(struct txp_softc *, u_int32_t, u_int16_t, u_int16_t,
103 : struct txp_rsp_desc **);
104 : void txp_rsp_fixup(struct txp_softc *, struct txp_rsp_desc *,
105 : struct txp_rsp_desc *);
106 : void txp_capabilities(struct txp_softc *);
107 :
108 : void txp_ifmedia_sts(struct ifnet *, struct ifmediareq *);
109 : int txp_ifmedia_upd(struct ifnet *);
110 : void txp_show_descriptor(void *);
111 : void txp_tx_reclaim(struct txp_softc *, struct txp_tx_ring *,
112 : struct txp_dma_alloc *);
113 : void txp_rxbuf_reclaim(struct txp_softc *);
114 : void txp_rx_reclaim(struct txp_softc *, struct txp_rx_ring *,
115 : struct txp_dma_alloc *);
116 :
117 : struct cfattach txp_ca = {
118 : sizeof(struct txp_softc), txp_probe, txp_attach,
119 : };
120 :
121 : struct cfdriver txp_cd = {
122 : NULL, "txp", DV_IFNET
123 : };
124 :
125 : const struct pci_matchid txp_devices[] = {
126 : { PCI_VENDOR_3COM, PCI_PRODUCT_3COM_3CR990 },
127 : { PCI_VENDOR_3COM, PCI_PRODUCT_3COM_3CR990TX },
128 : { PCI_VENDOR_3COM, PCI_PRODUCT_3COM_3CR990TX95 },
129 : { PCI_VENDOR_3COM, PCI_PRODUCT_3COM_3CR990TX97 },
130 : { PCI_VENDOR_3COM, PCI_PRODUCT_3COM_3CR990SVR95 },
131 : { PCI_VENDOR_3COM, PCI_PRODUCT_3COM_3CR990SVR97 },
132 : { PCI_VENDOR_3COM, PCI_PRODUCT_3COM_3C990BTXM },
133 : { PCI_VENDOR_3COM, PCI_PRODUCT_3COM_3C990BSVR },
134 : { PCI_VENDOR_3COM, PCI_PRODUCT_3COM_3CR990FX },
135 : };
136 :
137 : int
138 0 : txp_probe(struct device *parent, void *match, void *aux)
139 : {
140 0 : return (pci_matchbyid((struct pci_attach_args *)aux, txp_devices,
141 : nitems(txp_devices)));
142 : }
143 :
144 : void
145 0 : txp_attachhook(struct device *self)
146 : {
147 0 : struct txp_softc *sc = (struct txp_softc *)self;
148 0 : struct ifnet *ifp = &sc->sc_arpcom.ac_if;
149 0 : u_int16_t p1;
150 0 : u_int32_t p2;
151 : int s;
152 :
153 0 : s = splnet();
154 0 : printf("%s: ", sc->sc_dev.dv_xname);
155 :
156 0 : if (txp_chip_init(sc)) {
157 0 : printf("failed chip init\n");
158 0 : splx(s);
159 0 : return;
160 : }
161 :
162 0 : if (txp_download_fw(sc)) {
163 0 : splx(s);
164 0 : return;
165 : }
166 :
167 0 : if (txp_alloc_rings(sc)) {
168 0 : splx(s);
169 0 : return;
170 : }
171 :
172 0 : if (txp_command(sc, TXP_CMD_MAX_PKT_SIZE_WRITE, TXP_MAX_PKTLEN, 0, 0,
173 : NULL, NULL, NULL, 1)) {
174 0 : splx(s);
175 0 : return;
176 : }
177 :
178 0 : if (txp_command(sc, TXP_CMD_STATION_ADDRESS_READ, 0, 0, 0,
179 : &p1, &p2, NULL, 1)) {
180 0 : splx(s);
181 0 : return;
182 : }
183 :
184 0 : p1 = htole16(p1);
185 0 : sc->sc_arpcom.ac_enaddr[0] = ((u_int8_t *)&p1)[1];
186 0 : sc->sc_arpcom.ac_enaddr[1] = ((u_int8_t *)&p1)[0];
187 0 : p2 = htole32(p2);
188 0 : sc->sc_arpcom.ac_enaddr[2] = ((u_int8_t *)&p2)[3];
189 0 : sc->sc_arpcom.ac_enaddr[3] = ((u_int8_t *)&p2)[2];
190 0 : sc->sc_arpcom.ac_enaddr[4] = ((u_int8_t *)&p2)[1];
191 0 : sc->sc_arpcom.ac_enaddr[5] = ((u_int8_t *)&p2)[0];
192 :
193 0 : printf("address %s\n", ether_sprintf(sc->sc_arpcom.ac_enaddr));
194 0 : sc->sc_cold = 0;
195 :
196 0 : ifmedia_init(&sc->sc_ifmedia, 0, txp_ifmedia_upd, txp_ifmedia_sts);
197 0 : ifmedia_add(&sc->sc_ifmedia, IFM_ETHER|IFM_10_T, 0, NULL);
198 0 : ifmedia_add(&sc->sc_ifmedia, IFM_ETHER|IFM_10_T|IFM_HDX, 0, NULL);
199 0 : ifmedia_add(&sc->sc_ifmedia, IFM_ETHER|IFM_10_T|IFM_FDX, 0, NULL);
200 0 : ifmedia_add(&sc->sc_ifmedia, IFM_ETHER|IFM_100_TX, 0, NULL);
201 0 : ifmedia_add(&sc->sc_ifmedia, IFM_ETHER|IFM_100_TX|IFM_HDX, 0, NULL);
202 0 : ifmedia_add(&sc->sc_ifmedia, IFM_ETHER|IFM_100_TX|IFM_FDX, 0, NULL);
203 0 : ifmedia_add(&sc->sc_ifmedia, IFM_ETHER|IFM_AUTO, 0, NULL);
204 :
205 0 : sc->sc_xcvr = TXP_XCVR_AUTO;
206 0 : txp_command(sc, TXP_CMD_XCVR_SELECT, TXP_XCVR_AUTO, 0, 0,
207 : NULL, NULL, NULL, 0);
208 0 : ifmedia_set(&sc->sc_ifmedia, IFM_ETHER|IFM_AUTO);
209 :
210 0 : ifp->if_softc = sc;
211 0 : ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
212 0 : ifp->if_ioctl = txp_ioctl;
213 0 : ifp->if_start = txp_start;
214 0 : ifp->if_watchdog = txp_watchdog;
215 0 : ifp->if_baudrate = IF_Mbps(10);
216 0 : IFQ_SET_MAXLEN(&ifp->if_snd, TX_ENTRIES);
217 0 : bcopy(sc->sc_dev.dv_xname, ifp->if_xname, IFNAMSIZ);
218 :
219 0 : txp_capabilities(sc);
220 :
221 0 : timeout_set(&sc->sc_tick, txp_tick, sc);
222 :
223 : /*
224 : * Attach us everywhere
225 : */
226 0 : if_attach(ifp);
227 0 : ether_ifattach(ifp);
228 :
229 0 : splx(s);
230 0 : }
231 :
232 : void
233 0 : txp_attach(struct device *parent, struct device *self, void *aux)
234 : {
235 0 : struct txp_softc *sc = (struct txp_softc *)self;
236 0 : struct pci_attach_args *pa = aux;
237 0 : pci_chipset_tag_t pc = pa->pa_pc;
238 0 : pci_intr_handle_t ih;
239 : const char *intrstr = NULL;
240 0 : bus_size_t iosize;
241 :
242 0 : sc->sc_cold = 1;
243 :
244 0 : if (pci_mapreg_map(pa, TXP_PCI_LOMEM, PCI_MAPREG_TYPE_MEM, 0,
245 0 : &sc->sc_bt, &sc->sc_bh, NULL, &iosize, 0)) {
246 0 : printf(": can't map mem space %d\n", 0);
247 0 : return;
248 : }
249 :
250 0 : sc->sc_dmat = pa->pa_dmat;
251 :
252 : /*
253 : * Allocate our interrupt.
254 : */
255 0 : if (pci_intr_map(pa, &ih)) {
256 0 : printf(": couldn't map interrupt\n");
257 0 : return;
258 : }
259 :
260 0 : intrstr = pci_intr_string(pc, ih);
261 0 : sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, txp_intr, sc,
262 0 : self->dv_xname);
263 0 : if (sc->sc_ih == NULL) {
264 0 : printf(": couldn't establish interrupt");
265 0 : if (intrstr != NULL)
266 0 : printf(" at %s", intrstr);
267 0 : printf("\n");
268 0 : return;
269 : }
270 0 : printf(": %s\n", intrstr);
271 :
272 0 : config_mountroot(self, txp_attachhook);
273 :
274 0 : }
275 :
276 : int
277 0 : txp_chip_init(struct txp_softc *sc)
278 : {
279 : /* disable interrupts */
280 0 : WRITE_REG(sc, TXP_IER, 0);
281 0 : WRITE_REG(sc, TXP_IMR,
282 : TXP_INT_SELF | TXP_INT_PCI_TABORT | TXP_INT_PCI_MABORT |
283 : TXP_INT_DMA3 | TXP_INT_DMA2 | TXP_INT_DMA1 | TXP_INT_DMA0 |
284 : TXP_INT_LATCH);
285 :
286 : /* ack all interrupts */
287 0 : WRITE_REG(sc, TXP_ISR, TXP_INT_RESERVED | TXP_INT_LATCH |
288 : TXP_INT_A2H_7 | TXP_INT_A2H_6 | TXP_INT_A2H_5 | TXP_INT_A2H_4 |
289 : TXP_INT_SELF | TXP_INT_PCI_TABORT | TXP_INT_PCI_MABORT |
290 : TXP_INT_DMA3 | TXP_INT_DMA2 | TXP_INT_DMA1 | TXP_INT_DMA0 |
291 : TXP_INT_A2H_3 | TXP_INT_A2H_2 | TXP_INT_A2H_1 | TXP_INT_A2H_0);
292 :
293 0 : if (txp_reset_adapter(sc))
294 0 : return (-1);
295 :
296 : /* disable interrupts */
297 0 : WRITE_REG(sc, TXP_IER, 0);
298 0 : WRITE_REG(sc, TXP_IMR,
299 : TXP_INT_SELF | TXP_INT_PCI_TABORT | TXP_INT_PCI_MABORT |
300 : TXP_INT_DMA3 | TXP_INT_DMA2 | TXP_INT_DMA1 | TXP_INT_DMA0 |
301 : TXP_INT_LATCH);
302 :
303 : /* ack all interrupts */
304 0 : WRITE_REG(sc, TXP_ISR, TXP_INT_RESERVED | TXP_INT_LATCH |
305 : TXP_INT_A2H_7 | TXP_INT_A2H_6 | TXP_INT_A2H_5 | TXP_INT_A2H_4 |
306 : TXP_INT_SELF | TXP_INT_PCI_TABORT | TXP_INT_PCI_MABORT |
307 : TXP_INT_DMA3 | TXP_INT_DMA2 | TXP_INT_DMA1 | TXP_INT_DMA0 |
308 : TXP_INT_A2H_3 | TXP_INT_A2H_2 | TXP_INT_A2H_1 | TXP_INT_A2H_0);
309 :
310 0 : return (0);
311 0 : }
312 :
313 : int
314 0 : txp_reset_adapter(struct txp_softc *sc)
315 : {
316 : u_int32_t r;
317 : int i;
318 :
319 0 : WRITE_REG(sc, TXP_SRR, TXP_SRR_ALL);
320 0 : DELAY(1000);
321 0 : WRITE_REG(sc, TXP_SRR, 0);
322 :
323 : /* Should wait max 6 seconds */
324 0 : for (i = 0; i < 6000; i++) {
325 0 : r = READ_REG(sc, TXP_A2H_0);
326 0 : if (r == STAT_WAITING_FOR_HOST_REQUEST)
327 : break;
328 0 : DELAY(1000);
329 : }
330 :
331 0 : if (r != STAT_WAITING_FOR_HOST_REQUEST) {
332 0 : printf("%s: reset hung\n", TXP_DEVNAME(sc));
333 0 : return (-1);
334 : }
335 :
336 0 : return (0);
337 0 : }
338 :
339 : int
340 0 : txp_download_fw(struct txp_softc *sc)
341 : {
342 : struct txp_fw_file_header *fileheader;
343 : struct txp_fw_section_header *secthead;
344 : u_int32_t r, i, ier, imr;
345 0 : size_t buflen;
346 : int sect, err;
347 0 : u_char *buf;
348 :
349 0 : ier = READ_REG(sc, TXP_IER);
350 0 : WRITE_REG(sc, TXP_IER, ier | TXP_INT_A2H_0);
351 :
352 0 : imr = READ_REG(sc, TXP_IMR);
353 0 : WRITE_REG(sc, TXP_IMR, imr | TXP_INT_A2H_0);
354 :
355 0 : for (i = 0; i < 10000; i++) {
356 0 : r = READ_REG(sc, TXP_A2H_0);
357 0 : if (r == STAT_WAITING_FOR_HOST_REQUEST)
358 : break;
359 0 : DELAY(50);
360 : }
361 0 : if (r != STAT_WAITING_FOR_HOST_REQUEST) {
362 0 : printf("not waiting for host request\n");
363 0 : return (-1);
364 : }
365 :
366 : /* Ack the status */
367 0 : WRITE_REG(sc, TXP_ISR, TXP_INT_A2H_0);
368 :
369 0 : err = loadfirmware("3c990", &buf, &buflen);
370 0 : if (err) {
371 0 : printf("failed loadfirmware of file 3c990: errno %d\n",
372 : err);
373 0 : return (err);
374 : }
375 :
376 0 : fileheader = (struct txp_fw_file_header *)buf;
377 0 : if (bcmp("TYPHOON", fileheader->magicid, sizeof(fileheader->magicid))) {
378 0 : printf("firmware invalid magic\n");
379 0 : goto fail;
380 : }
381 :
382 : /* Tell boot firmware to get ready for image */
383 0 : WRITE_REG(sc, TXP_H2A_1, letoh32(fileheader->addr));
384 0 : WRITE_REG(sc, TXP_H2A_2, letoh32(fileheader->hmac[0]));
385 0 : WRITE_REG(sc, TXP_H2A_3, letoh32(fileheader->hmac[1]));
386 0 : WRITE_REG(sc, TXP_H2A_4, letoh32(fileheader->hmac[2]));
387 0 : WRITE_REG(sc, TXP_H2A_5, letoh32(fileheader->hmac[3]));
388 0 : WRITE_REG(sc, TXP_H2A_6, letoh32(fileheader->hmac[4]));
389 0 : WRITE_REG(sc, TXP_H2A_0, TXP_BOOTCMD_RUNTIME_IMAGE);
390 :
391 0 : if (txp_download_fw_wait(sc)) {
392 0 : printf("fw wait failed, initial\n");
393 0 : goto fail;
394 : }
395 :
396 0 : secthead = (struct txp_fw_section_header *)(buf +
397 : sizeof(struct txp_fw_file_header));
398 :
399 0 : for (sect = 0; sect < letoh32(fileheader->nsections); sect++) {
400 0 : if (txp_download_fw_section(sc, secthead, sect, buf, buflen))
401 : goto fail;
402 0 : secthead = (struct txp_fw_section_header *)
403 0 : (((u_int8_t *)secthead) + letoh32(secthead->nbytes) +
404 : sizeof(*secthead));
405 : }
406 :
407 0 : WRITE_REG(sc, TXP_H2A_0, TXP_BOOTCMD_DOWNLOAD_COMPLETE);
408 :
409 0 : for (i = 0; i < 10000; i++) {
410 0 : r = READ_REG(sc, TXP_A2H_0);
411 0 : if (r == STAT_WAITING_FOR_BOOT)
412 : break;
413 0 : DELAY(50);
414 : }
415 0 : if (r != STAT_WAITING_FOR_BOOT) {
416 0 : printf("not waiting for boot\n");
417 0 : goto fail;
418 : }
419 :
420 0 : WRITE_REG(sc, TXP_IER, ier);
421 0 : WRITE_REG(sc, TXP_IMR, imr);
422 :
423 0 : free(buf, M_DEVBUF, 0);
424 0 : return (0);
425 : fail:
426 0 : free(buf, M_DEVBUF, 0);
427 0 : return (-1);
428 0 : }
429 :
430 : int
431 0 : txp_download_fw_wait(struct txp_softc *sc)
432 : {
433 : u_int32_t i, r;
434 :
435 0 : for (i = 0; i < 10000; i++) {
436 0 : r = READ_REG(sc, TXP_ISR);
437 0 : if (r & TXP_INT_A2H_0)
438 : break;
439 0 : DELAY(50);
440 : }
441 :
442 0 : if (!(r & TXP_INT_A2H_0)) {
443 0 : printf("fw wait failed comm0\n");
444 0 : return (-1);
445 : }
446 :
447 0 : WRITE_REG(sc, TXP_ISR, TXP_INT_A2H_0);
448 :
449 0 : r = READ_REG(sc, TXP_A2H_0);
450 0 : if (r != STAT_WAITING_FOR_SEGMENT) {
451 0 : printf("fw not waiting for segment\n");
452 0 : return (-1);
453 : }
454 0 : return (0);
455 0 : }
456 :
457 : int
458 0 : txp_download_fw_section(struct txp_softc *sc,
459 : struct txp_fw_section_header *sect, int sectnum, u_char *buf,
460 : size_t buflen)
461 : {
462 0 : struct txp_dma_alloc dma;
463 : int rseg, err = 0;
464 0 : struct mbuf m;
465 : u_int16_t csum;
466 :
467 : /* Skip zero length sections */
468 0 : if (sect->nbytes == 0)
469 0 : return (0);
470 :
471 : /* Make sure we aren't past the end of the image */
472 0 : rseg = ((u_int8_t *)sect) - ((u_int8_t *)buf);
473 0 : if (rseg >= buflen) {
474 0 : printf("fw invalid section address, section %d\n", sectnum);
475 0 : return (-1);
476 : }
477 :
478 : /* Make sure this section doesn't go past the end */
479 0 : rseg += letoh32(sect->nbytes);
480 0 : if (rseg >= buflen) {
481 0 : printf("fw truncated section %d\n", sectnum);
482 0 : return (-1);
483 : }
484 :
485 : /* map a buffer, copy segment to it, get physaddr */
486 0 : if (txp_dma_malloc(sc, letoh32(sect->nbytes), &dma, 0)) {
487 0 : printf("fw dma malloc failed, section %d\n", sectnum);
488 0 : return (-1);
489 : }
490 :
491 0 : bcopy(((u_int8_t *)sect) + sizeof(*sect), dma.dma_vaddr,
492 0 : letoh32(sect->nbytes));
493 :
494 : /*
495 : * dummy up mbuf and verify section checksum
496 : */
497 0 : m.m_type = MT_DATA;
498 0 : m.m_next = m.m_nextpkt = NULL;
499 0 : m.m_len = letoh32(sect->nbytes);
500 0 : m.m_data = dma.dma_vaddr;
501 0 : m.m_flags = 0;
502 0 : csum = in_cksum(&m, letoh32(sect->nbytes));
503 0 : if (csum != sect->cksum) {
504 0 : printf("fw section %d, bad cksum (expected 0x%x got 0x%x)\n",
505 : sectnum, sect->cksum, csum);
506 : err = -1;
507 0 : goto bail;
508 : }
509 :
510 0 : bus_dmamap_sync(sc->sc_dmat, dma.dma_map, 0,
511 : dma.dma_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
512 :
513 0 : WRITE_REG(sc, TXP_H2A_1, letoh32(sect->nbytes));
514 0 : WRITE_REG(sc, TXP_H2A_2, letoh16(sect->cksum));
515 0 : WRITE_REG(sc, TXP_H2A_3, letoh32(sect->addr));
516 0 : WRITE_REG(sc, TXP_H2A_4, dma.dma_paddr >> 32);
517 0 : WRITE_REG(sc, TXP_H2A_5, dma.dma_paddr & 0xffffffff);
518 0 : WRITE_REG(sc, TXP_H2A_0, TXP_BOOTCMD_SEGMENT_AVAILABLE);
519 :
520 0 : if (txp_download_fw_wait(sc)) {
521 0 : printf("%s: fw wait failed, section %d\n",
522 0 : sc->sc_dev.dv_xname, sectnum);
523 : err = -1;
524 0 : }
525 :
526 0 : bus_dmamap_sync(sc->sc_dmat, dma.dma_map, 0,
527 : dma.dma_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
528 :
529 : bail:
530 0 : txp_dma_free(sc, &dma);
531 :
532 0 : return (err);
533 0 : }
534 :
535 : int
536 0 : txp_intr(void *vsc)
537 : {
538 0 : struct txp_softc *sc = vsc;
539 0 : struct txp_hostvar *hv = sc->sc_hostvar;
540 : u_int32_t isr;
541 : int claimed = 0;
542 :
543 : /* mask all interrupts */
544 0 : WRITE_REG(sc, TXP_IMR, TXP_INT_RESERVED | TXP_INT_SELF |
545 : TXP_INT_A2H_7 | TXP_INT_A2H_6 | TXP_INT_A2H_5 | TXP_INT_A2H_4 |
546 : TXP_INT_A2H_2 | TXP_INT_A2H_1 | TXP_INT_A2H_0 |
547 : TXP_INT_DMA3 | TXP_INT_DMA2 | TXP_INT_DMA1 | TXP_INT_DMA0 |
548 : TXP_INT_PCI_TABORT | TXP_INT_PCI_MABORT | TXP_INT_LATCH);
549 :
550 0 : bus_dmamap_sync(sc->sc_dmat, sc->sc_host_dma.dma_map, 0,
551 : sizeof(struct txp_hostvar), BUS_DMASYNC_POSTWRITE|BUS_DMASYNC_POSTREAD);
552 :
553 0 : isr = READ_REG(sc, TXP_ISR);
554 0 : while (isr) {
555 : claimed = 1;
556 0 : WRITE_REG(sc, TXP_ISR, isr);
557 :
558 0 : if ((*sc->sc_rxhir.r_roff) != (*sc->sc_rxhir.r_woff))
559 0 : txp_rx_reclaim(sc, &sc->sc_rxhir, &sc->sc_rxhiring_dma);
560 0 : if ((*sc->sc_rxlor.r_roff) != (*sc->sc_rxlor.r_woff))
561 0 : txp_rx_reclaim(sc, &sc->sc_rxlor, &sc->sc_rxloring_dma);
562 :
563 0 : if (hv->hv_rx_buf_write_idx == hv->hv_rx_buf_read_idx)
564 0 : txp_rxbuf_reclaim(sc);
565 :
566 0 : if (sc->sc_txhir.r_cnt && (sc->sc_txhir.r_cons !=
567 0 : TXP_OFFSET2IDX(letoh32(*(sc->sc_txhir.r_off)))))
568 0 : txp_tx_reclaim(sc, &sc->sc_txhir, &sc->sc_txhiring_dma);
569 :
570 0 : if (sc->sc_txlor.r_cnt && (sc->sc_txlor.r_cons !=
571 0 : TXP_OFFSET2IDX(letoh32(*(sc->sc_txlor.r_off)))))
572 0 : txp_tx_reclaim(sc, &sc->sc_txlor, &sc->sc_txloring_dma);
573 :
574 0 : isr = READ_REG(sc, TXP_ISR);
575 : }
576 :
577 0 : bus_dmamap_sync(sc->sc_dmat, sc->sc_host_dma.dma_map, 0,
578 : sizeof(struct txp_hostvar), BUS_DMASYNC_POSTWRITE|BUS_DMASYNC_POSTREAD);
579 :
580 : /* unmask all interrupts */
581 0 : WRITE_REG(sc, TXP_IMR, TXP_INT_A2H_3);
582 :
583 0 : txp_start(&sc->sc_arpcom.ac_if);
584 :
585 0 : return (claimed);
586 : }
587 :
588 : void
589 0 : txp_rx_reclaim(struct txp_softc *sc, struct txp_rx_ring *r,
590 : struct txp_dma_alloc *dma)
591 : {
592 0 : struct ifnet *ifp = &sc->sc_arpcom.ac_if;
593 : struct txp_rx_desc *rxd;
594 : struct mbuf *m;
595 0 : struct mbuf_list ml = MBUF_LIST_INITIALIZER();
596 0 : struct txp_swdesc *sd;
597 : u_int32_t roff, woff;
598 : int idx;
599 : u_int16_t sumflags = 0;
600 :
601 0 : roff = letoh32(*r->r_roff);
602 0 : woff = letoh32(*r->r_woff);
603 0 : idx = roff / sizeof(struct txp_rx_desc);
604 0 : rxd = r->r_desc + idx;
605 :
606 0 : while (roff != woff) {
607 :
608 0 : bus_dmamap_sync(sc->sc_dmat, dma->dma_map,
609 : idx * sizeof(struct txp_rx_desc), sizeof(struct txp_rx_desc),
610 : BUS_DMASYNC_POSTREAD);
611 :
612 0 : if (rxd->rx_flags & RX_FLAGS_ERROR) {
613 0 : printf("%s: error 0x%x\n", sc->sc_dev.dv_xname,
614 0 : letoh32(rxd->rx_stat));
615 0 : ifp->if_ierrors++;
616 0 : goto next;
617 : }
618 :
619 : /* retrieve stashed pointer */
620 0 : bcopy((u_long *)&rxd->rx_vaddrlo, &sd, sizeof(sd));
621 :
622 0 : bus_dmamap_sync(sc->sc_dmat, sd->sd_map, 0,
623 : sd->sd_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
624 0 : bus_dmamap_unload(sc->sc_dmat, sd->sd_map);
625 0 : bus_dmamap_destroy(sc->sc_dmat, sd->sd_map);
626 0 : m = sd->sd_mbuf;
627 0 : free(sd, M_DEVBUF, 0);
628 0 : m->m_pkthdr.len = m->m_len = letoh16(rxd->rx_len);
629 :
630 : #ifdef __STRICT_ALIGNMENT
631 : {
632 : /*
633 : * XXX Nice chip, except it won't accept "off by 2"
634 : * buffers, so we're force to copy. Supposedly
635 : * this will be fixed in a newer firmware rev
636 : * and this will be temporary.
637 : */
638 : struct mbuf *mnew;
639 :
640 : MGETHDR(mnew, M_DONTWAIT, MT_DATA);
641 : if (mnew == NULL) {
642 : m_freem(m);
643 : goto next;
644 : }
645 : if (m->m_len > (MHLEN - 2)) {
646 : MCLGET(mnew, M_DONTWAIT);
647 : if (!(mnew->m_flags & M_EXT)) {
648 : m_freem(mnew);
649 : m_freem(m);
650 : goto next;
651 : }
652 : }
653 : mnew->m_pkthdr.len = mnew->m_len = m->m_len;
654 : mnew->m_data += 2;
655 : bcopy(m->m_data, mnew->m_data, m->m_len);
656 : m_freem(m);
657 : m = mnew;
658 : }
659 : #endif
660 :
661 : #if NVLAN > 0
662 : /*
663 : * XXX Another firmware bug: the vlan encapsulation
664 : * is always removed, even when we tell the card not
665 : * to do that. Restore the vlan encapsulation below.
666 : */
667 0 : if (rxd->rx_stat & htole32(RX_STAT_VLAN)) {
668 0 : m->m_pkthdr.ether_vtag = ntohs(rxd->rx_vlan >> 16);
669 0 : m->m_flags |= M_VLANTAG;
670 0 : }
671 : #endif
672 :
673 0 : if (rxd->rx_stat & htole32(RX_STAT_IPCKSUMBAD))
674 0 : sumflags |= M_IPV4_CSUM_IN_BAD;
675 0 : else if (rxd->rx_stat & htole32(RX_STAT_IPCKSUMGOOD))
676 0 : sumflags |= M_IPV4_CSUM_IN_OK;
677 :
678 0 : if (rxd->rx_stat & htole32(RX_STAT_TCPCKSUMBAD))
679 0 : sumflags |= M_TCP_CSUM_IN_BAD;
680 0 : else if (rxd->rx_stat & htole32(RX_STAT_TCPCKSUMGOOD))
681 0 : sumflags |= M_TCP_CSUM_IN_OK;
682 :
683 0 : if (rxd->rx_stat & htole32(RX_STAT_UDPCKSUMBAD))
684 0 : sumflags |= M_UDP_CSUM_IN_BAD;
685 0 : else if (rxd->rx_stat & htole32(RX_STAT_UDPCKSUMGOOD))
686 0 : sumflags |= M_UDP_CSUM_IN_OK;
687 :
688 0 : m->m_pkthdr.csum_flags = sumflags;
689 :
690 0 : ml_enqueue(&ml, m);
691 :
692 : next:
693 0 : bus_dmamap_sync(sc->sc_dmat, dma->dma_map,
694 : idx * sizeof(struct txp_rx_desc), sizeof(struct txp_rx_desc),
695 : BUS_DMASYNC_PREREAD);
696 :
697 0 : roff += sizeof(struct txp_rx_desc);
698 0 : if (roff == (RX_ENTRIES * sizeof(struct txp_rx_desc))) {
699 : idx = 0;
700 : roff = 0;
701 0 : rxd = r->r_desc;
702 0 : } else {
703 0 : idx++;
704 0 : rxd++;
705 : }
706 0 : woff = letoh32(*r->r_woff);
707 : }
708 :
709 0 : if_input(ifp, &ml);
710 :
711 0 : *r->r_roff = htole32(woff);
712 0 : }
713 :
714 : void
715 0 : txp_rxbuf_reclaim(struct txp_softc *sc)
716 : {
717 0 : struct txp_hostvar *hv = sc->sc_hostvar;
718 : struct txp_rxbuf_desc *rbd;
719 0 : struct txp_swdesc *sd;
720 : u_int32_t i, end;
721 :
722 0 : end = TXP_OFFSET2IDX(letoh32(hv->hv_rx_buf_read_idx));
723 0 : i = TXP_OFFSET2IDX(letoh32(hv->hv_rx_buf_write_idx));
724 :
725 0 : if (++i == RXBUF_ENTRIES)
726 : i = 0;
727 :
728 0 : rbd = sc->sc_rxbufs + i;
729 :
730 0 : while (i != end) {
731 0 : sd = (struct txp_swdesc *)malloc(sizeof(struct txp_swdesc),
732 : M_DEVBUF, M_NOWAIT);
733 0 : if (sd == NULL)
734 : break;
735 :
736 0 : MGETHDR(sd->sd_mbuf, M_DONTWAIT, MT_DATA);
737 0 : if (sd->sd_mbuf == NULL)
738 : goto err_sd;
739 :
740 0 : MCLGET(sd->sd_mbuf, M_DONTWAIT);
741 0 : if ((sd->sd_mbuf->m_flags & M_EXT) == 0)
742 : goto err_mbuf;
743 0 : sd->sd_mbuf->m_pkthdr.len = sd->sd_mbuf->m_len = MCLBYTES;
744 0 : if (bus_dmamap_create(sc->sc_dmat, TXP_MAX_PKTLEN, 1,
745 : TXP_MAX_PKTLEN, 0, BUS_DMA_NOWAIT, &sd->sd_map))
746 : goto err_mbuf;
747 0 : if (bus_dmamap_load_mbuf(sc->sc_dmat, sd->sd_map, sd->sd_mbuf,
748 : BUS_DMA_NOWAIT)) {
749 0 : bus_dmamap_destroy(sc->sc_dmat, sd->sd_map);
750 0 : goto err_mbuf;
751 : }
752 :
753 0 : bus_dmamap_sync(sc->sc_dmat, sc->sc_rxbufring_dma.dma_map,
754 : i * sizeof(struct txp_rxbuf_desc),
755 : sizeof(struct txp_rxbuf_desc), BUS_DMASYNC_POSTWRITE);
756 :
757 : /* stash away pointer */
758 0 : bcopy(&sd, (u_long *)&rbd->rb_vaddrlo, sizeof(sd));
759 :
760 0 : rbd->rb_paddrlo = ((u_int64_t)sd->sd_map->dm_segs[0].ds_addr)
761 0 : & 0xffffffff;
762 0 : rbd->rb_paddrhi = ((u_int64_t)sd->sd_map->dm_segs[0].ds_addr)
763 0 : >> 32;
764 :
765 0 : bus_dmamap_sync(sc->sc_dmat, sd->sd_map, 0,
766 : sd->sd_map->dm_mapsize, BUS_DMASYNC_PREREAD);
767 :
768 0 : bus_dmamap_sync(sc->sc_dmat, sc->sc_rxbufring_dma.dma_map,
769 : i * sizeof(struct txp_rxbuf_desc),
770 : sizeof(struct txp_rxbuf_desc), BUS_DMASYNC_PREWRITE);
771 :
772 0 : hv->hv_rx_buf_write_idx = htole32(TXP_IDX2OFFSET(i));
773 :
774 0 : if (++i == RXBUF_ENTRIES) {
775 : i = 0;
776 0 : rbd = sc->sc_rxbufs;
777 0 : } else
778 0 : rbd++;
779 : }
780 0 : return;
781 :
782 : err_mbuf:
783 0 : m_freem(sd->sd_mbuf);
784 : err_sd:
785 0 : free(sd, M_DEVBUF, 0);
786 0 : }
787 :
788 : /*
789 : * Reclaim mbufs and entries from a transmit ring.
790 : */
791 : void
792 0 : txp_tx_reclaim(struct txp_softc *sc, struct txp_tx_ring *r,
793 : struct txp_dma_alloc *dma)
794 : {
795 0 : struct ifnet *ifp = &sc->sc_arpcom.ac_if;
796 0 : u_int32_t idx = TXP_OFFSET2IDX(letoh32(*(r->r_off)));
797 0 : u_int32_t cons = r->r_cons, cnt = r->r_cnt;
798 0 : struct txp_tx_desc *txd = r->r_desc + cons;
799 0 : struct txp_swdesc *sd = sc->sc_txd + cons;
800 : struct mbuf *m;
801 :
802 0 : while (cons != idx) {
803 0 : if (cnt == 0)
804 : break;
805 :
806 0 : bus_dmamap_sync(sc->sc_dmat, dma->dma_map,
807 : cons * sizeof(struct txp_tx_desc),
808 : sizeof(struct txp_tx_desc),
809 : BUS_DMASYNC_POSTWRITE);
810 :
811 0 : if ((txd->tx_flags & TX_FLAGS_TYPE_M) ==
812 : TX_FLAGS_TYPE_DATA) {
813 0 : bus_dmamap_sync(sc->sc_dmat, sd->sd_map, 0,
814 : sd->sd_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
815 0 : bus_dmamap_unload(sc->sc_dmat, sd->sd_map);
816 0 : m = sd->sd_mbuf;
817 0 : if (m != NULL) {
818 0 : m_freem(m);
819 0 : txd->tx_addrlo = 0;
820 0 : txd->tx_addrhi = 0;
821 0 : }
822 : }
823 0 : ifq_clr_oactive(&ifp->if_snd);
824 :
825 0 : if (++cons == TX_ENTRIES) {
826 0 : txd = r->r_desc;
827 : cons = 0;
828 : sd = sc->sc_txd;
829 0 : } else {
830 0 : txd++;
831 0 : sd++;
832 : }
833 :
834 0 : cnt--;
835 : }
836 :
837 0 : r->r_cons = cons;
838 0 : r->r_cnt = cnt;
839 0 : if (cnt == 0)
840 0 : ifp->if_timer = 0;
841 0 : }
842 :
843 : int
844 0 : txp_alloc_rings(struct txp_softc *sc)
845 : {
846 : struct txp_boot_record *boot;
847 0 : struct txp_swdesc *sd;
848 : u_int32_t r;
849 : int i, j;
850 :
851 : /* boot record */
852 0 : if (txp_dma_malloc(sc, sizeof(struct txp_boot_record), &sc->sc_boot_dma,
853 : BUS_DMA_COHERENT)) {
854 0 : printf("can't allocate boot record\n");
855 0 : return (-1);
856 : }
857 0 : boot = (struct txp_boot_record *)sc->sc_boot_dma.dma_vaddr;
858 0 : bzero(boot, sizeof(*boot));
859 0 : sc->sc_boot = boot;
860 :
861 : /* host variables */
862 0 : if (txp_dma_malloc(sc, sizeof(struct txp_hostvar), &sc->sc_host_dma,
863 : BUS_DMA_COHERENT)) {
864 0 : printf("can't allocate host ring\n");
865 0 : goto bail_boot;
866 : }
867 0 : bzero(sc->sc_host_dma.dma_vaddr, sizeof(struct txp_hostvar));
868 0 : boot->br_hostvar_lo = htole32(sc->sc_host_dma.dma_paddr & 0xffffffff);
869 0 : boot->br_hostvar_hi = htole32(sc->sc_host_dma.dma_paddr >> 32);
870 0 : sc->sc_hostvar = (struct txp_hostvar *)sc->sc_host_dma.dma_vaddr;
871 :
872 : /* high priority tx ring */
873 0 : if (txp_dma_malloc(sc, sizeof(struct txp_tx_desc) * TX_ENTRIES,
874 0 : &sc->sc_txhiring_dma, BUS_DMA_COHERENT)) {
875 0 : printf("can't allocate high tx ring\n");
876 0 : goto bail_host;
877 : }
878 0 : bzero(sc->sc_txhiring_dma.dma_vaddr, sizeof(struct txp_tx_desc) * TX_ENTRIES);
879 0 : boot->br_txhipri_lo = htole32(sc->sc_txhiring_dma.dma_paddr & 0xffffffff);
880 0 : boot->br_txhipri_hi = htole32(sc->sc_txhiring_dma.dma_paddr >> 32);
881 0 : boot->br_txhipri_siz = htole32(TX_ENTRIES * sizeof(struct txp_tx_desc));
882 0 : sc->sc_txhir.r_reg = TXP_H2A_1;
883 0 : sc->sc_txhir.r_desc = (struct txp_tx_desc *)sc->sc_txhiring_dma.dma_vaddr;
884 0 : sc->sc_txhir.r_cons = sc->sc_txhir.r_prod = sc->sc_txhir.r_cnt = 0;
885 0 : sc->sc_txhir.r_off = &sc->sc_hostvar->hv_tx_hi_desc_read_idx;
886 0 : for (i = 0; i < TX_ENTRIES; i++) {
887 0 : if (bus_dmamap_create(sc->sc_dmat, TXP_MAX_PKTLEN,
888 : TXP_MAXTXSEGS, MCLBYTES, 0, BUS_DMA_NOWAIT,
889 0 : &sc->sc_txd[i].sd_map) != 0) {
890 0 : for (j = 0; j < i; j++) {
891 0 : bus_dmamap_destroy(sc->sc_dmat,
892 : sc->sc_txd[j].sd_map);
893 0 : sc->sc_txd[j].sd_map = NULL;
894 : }
895 : goto bail_txhiring;
896 : }
897 : }
898 :
899 : /* low priority tx ring */
900 0 : if (txp_dma_malloc(sc, sizeof(struct txp_tx_desc) * TX_ENTRIES,
901 0 : &sc->sc_txloring_dma, BUS_DMA_COHERENT)) {
902 0 : printf("can't allocate low tx ring\n");
903 0 : goto bail_txhiring;
904 : }
905 0 : bzero(sc->sc_txloring_dma.dma_vaddr, sizeof(struct txp_tx_desc) * TX_ENTRIES);
906 0 : boot->br_txlopri_lo = htole32(sc->sc_txloring_dma.dma_paddr & 0xffffffff);
907 0 : boot->br_txlopri_hi = htole32(sc->sc_txloring_dma.dma_paddr >> 32);
908 0 : boot->br_txlopri_siz = htole32(TX_ENTRIES * sizeof(struct txp_tx_desc));
909 0 : sc->sc_txlor.r_reg = TXP_H2A_3;
910 0 : sc->sc_txlor.r_desc = (struct txp_tx_desc *)sc->sc_txloring_dma.dma_vaddr;
911 0 : sc->sc_txlor.r_cons = sc->sc_txlor.r_prod = sc->sc_txlor.r_cnt = 0;
912 0 : sc->sc_txlor.r_off = &sc->sc_hostvar->hv_tx_lo_desc_read_idx;
913 :
914 : /* high priority rx ring */
915 0 : if (txp_dma_malloc(sc, sizeof(struct txp_rx_desc) * RX_ENTRIES,
916 0 : &sc->sc_rxhiring_dma, BUS_DMA_COHERENT)) {
917 0 : printf("can't allocate high rx ring\n");
918 0 : goto bail_txloring;
919 : }
920 0 : bzero(sc->sc_rxhiring_dma.dma_vaddr, sizeof(struct txp_rx_desc) * RX_ENTRIES);
921 0 : boot->br_rxhipri_lo = htole32(sc->sc_rxhiring_dma.dma_paddr & 0xffffffff);
922 0 : boot->br_rxhipri_hi = htole32(sc->sc_rxhiring_dma.dma_paddr >> 32);
923 0 : boot->br_rxhipri_siz = htole32(RX_ENTRIES * sizeof(struct txp_rx_desc));
924 0 : sc->sc_rxhir.r_desc =
925 0 : (struct txp_rx_desc *)sc->sc_rxhiring_dma.dma_vaddr;
926 0 : sc->sc_rxhir.r_roff = &sc->sc_hostvar->hv_rx_hi_read_idx;
927 0 : sc->sc_rxhir.r_woff = &sc->sc_hostvar->hv_rx_hi_write_idx;
928 0 : bus_dmamap_sync(sc->sc_dmat, sc->sc_rxhiring_dma.dma_map,
929 : 0, sc->sc_rxhiring_dma.dma_map->dm_mapsize, BUS_DMASYNC_PREREAD);
930 :
931 : /* low priority ring */
932 0 : if (txp_dma_malloc(sc, sizeof(struct txp_rx_desc) * RX_ENTRIES,
933 0 : &sc->sc_rxloring_dma, BUS_DMA_COHERENT)) {
934 0 : printf("can't allocate low rx ring\n");
935 0 : goto bail_rxhiring;
936 : }
937 0 : bzero(sc->sc_rxloring_dma.dma_vaddr, sizeof(struct txp_rx_desc) * RX_ENTRIES);
938 0 : boot->br_rxlopri_lo = htole32(sc->sc_rxloring_dma.dma_paddr & 0xffffffff);
939 0 : boot->br_rxlopri_hi = htole32(sc->sc_rxloring_dma.dma_paddr >> 32);
940 0 : boot->br_rxlopri_siz = htole32(RX_ENTRIES * sizeof(struct txp_rx_desc));
941 0 : sc->sc_rxlor.r_desc =
942 0 : (struct txp_rx_desc *)sc->sc_rxloring_dma.dma_vaddr;
943 0 : sc->sc_rxlor.r_roff = &sc->sc_hostvar->hv_rx_lo_read_idx;
944 0 : sc->sc_rxlor.r_woff = &sc->sc_hostvar->hv_rx_lo_write_idx;
945 0 : bus_dmamap_sync(sc->sc_dmat, sc->sc_rxloring_dma.dma_map,
946 : 0, sc->sc_rxloring_dma.dma_map->dm_mapsize, BUS_DMASYNC_PREREAD);
947 :
948 : /* command ring */
949 0 : if (txp_dma_malloc(sc, sizeof(struct txp_cmd_desc) * CMD_ENTRIES,
950 0 : &sc->sc_cmdring_dma, BUS_DMA_COHERENT)) {
951 0 : printf("can't allocate command ring\n");
952 0 : goto bail_rxloring;
953 : }
954 0 : bzero(sc->sc_cmdring_dma.dma_vaddr, sizeof(struct txp_cmd_desc) * CMD_ENTRIES);
955 0 : boot->br_cmd_lo = htole32(sc->sc_cmdring_dma.dma_paddr & 0xffffffff);
956 0 : boot->br_cmd_hi = htole32(sc->sc_cmdring_dma.dma_paddr >> 32);
957 0 : boot->br_cmd_siz = htole32(CMD_ENTRIES * sizeof(struct txp_cmd_desc));
958 0 : sc->sc_cmdring.base = (struct txp_cmd_desc *)sc->sc_cmdring_dma.dma_vaddr;
959 0 : sc->sc_cmdring.size = CMD_ENTRIES * sizeof(struct txp_cmd_desc);
960 0 : sc->sc_cmdring.lastwrite = 0;
961 :
962 : /* response ring */
963 0 : if (txp_dma_malloc(sc, sizeof(struct txp_rsp_desc) * RSP_ENTRIES,
964 0 : &sc->sc_rspring_dma, BUS_DMA_COHERENT)) {
965 0 : printf("can't allocate response ring\n");
966 0 : goto bail_cmdring;
967 : }
968 0 : bzero(sc->sc_rspring_dma.dma_vaddr, sizeof(struct txp_rsp_desc) * RSP_ENTRIES);
969 0 : boot->br_resp_lo = htole32(sc->sc_rspring_dma.dma_paddr & 0xffffffff);
970 0 : boot->br_resp_hi = htole32(sc->sc_rspring_dma.dma_paddr >> 32);
971 0 : boot->br_resp_siz = htole32(CMD_ENTRIES * sizeof(struct txp_rsp_desc));
972 0 : sc->sc_rspring.base = (struct txp_rsp_desc *)sc->sc_rspring_dma.dma_vaddr;
973 0 : sc->sc_rspring.size = RSP_ENTRIES * sizeof(struct txp_rsp_desc);
974 0 : sc->sc_rspring.lastwrite = 0;
975 :
976 : /* receive buffer ring */
977 0 : if (txp_dma_malloc(sc, sizeof(struct txp_rxbuf_desc) * RXBUF_ENTRIES,
978 0 : &sc->sc_rxbufring_dma, BUS_DMA_COHERENT)) {
979 0 : printf("can't allocate rx buffer ring\n");
980 0 : goto bail_rspring;
981 : }
982 0 : bzero(sc->sc_rxbufring_dma.dma_vaddr, sizeof(struct txp_rxbuf_desc) * RXBUF_ENTRIES);
983 0 : boot->br_rxbuf_lo = htole32(sc->sc_rxbufring_dma.dma_paddr & 0xffffffff);
984 0 : boot->br_rxbuf_hi = htole32(sc->sc_rxbufring_dma.dma_paddr >> 32);
985 0 : boot->br_rxbuf_siz = htole32(RXBUF_ENTRIES * sizeof(struct txp_rxbuf_desc));
986 0 : sc->sc_rxbufs = (struct txp_rxbuf_desc *)sc->sc_rxbufring_dma.dma_vaddr;
987 0 : for (i = 0; i < RXBUF_ENTRIES; i++) {
988 0 : sd = (struct txp_swdesc *)malloc(sizeof(struct txp_swdesc),
989 : M_DEVBUF, M_NOWAIT);
990 :
991 : /* stash away pointer */
992 0 : bcopy(&sd, (u_long *)&sc->sc_rxbufs[i].rb_vaddrlo, sizeof(sd));
993 :
994 0 : if (sd == NULL)
995 : break;
996 :
997 0 : MGETHDR(sd->sd_mbuf, M_DONTWAIT, MT_DATA);
998 0 : if (sd->sd_mbuf == NULL) {
999 : goto bail_rxbufring;
1000 : }
1001 :
1002 0 : MCLGET(sd->sd_mbuf, M_DONTWAIT);
1003 0 : if ((sd->sd_mbuf->m_flags & M_EXT) == 0) {
1004 : goto bail_rxbufring;
1005 : }
1006 0 : sd->sd_mbuf->m_pkthdr.len = sd->sd_mbuf->m_len = MCLBYTES;
1007 0 : if (bus_dmamap_create(sc->sc_dmat, TXP_MAX_PKTLEN, 1,
1008 : TXP_MAX_PKTLEN, 0, BUS_DMA_NOWAIT, &sd->sd_map)) {
1009 : goto bail_rxbufring;
1010 : }
1011 0 : if (bus_dmamap_load_mbuf(sc->sc_dmat, sd->sd_map, sd->sd_mbuf,
1012 : BUS_DMA_NOWAIT)) {
1013 0 : bus_dmamap_destroy(sc->sc_dmat, sd->sd_map);
1014 0 : goto bail_rxbufring;
1015 : }
1016 0 : bus_dmamap_sync(sc->sc_dmat, sd->sd_map, 0,
1017 : sd->sd_map->dm_mapsize, BUS_DMASYNC_PREREAD);
1018 :
1019 0 : sc->sc_rxbufs[i].rb_paddrlo =
1020 0 : ((u_int64_t)sd->sd_map->dm_segs[0].ds_addr) & 0xffffffff;
1021 0 : sc->sc_rxbufs[i].rb_paddrhi =
1022 0 : ((u_int64_t)sd->sd_map->dm_segs[0].ds_addr) >> 32;
1023 : }
1024 0 : bus_dmamap_sync(sc->sc_dmat, sc->sc_rxbufring_dma.dma_map,
1025 : 0, sc->sc_rxbufring_dma.dma_map->dm_mapsize,
1026 : BUS_DMASYNC_PREWRITE);
1027 0 : sc->sc_hostvar->hv_rx_buf_write_idx = htole32((RXBUF_ENTRIES - 1) *
1028 : sizeof(struct txp_rxbuf_desc));
1029 :
1030 : /* zero dma */
1031 0 : if (txp_dma_malloc(sc, sizeof(u_int32_t), &sc->sc_zero_dma,
1032 : BUS_DMA_COHERENT)) {
1033 0 : printf("can't allocate response ring\n");
1034 0 : goto bail_rxbufring;
1035 : }
1036 0 : bzero(sc->sc_zero_dma.dma_vaddr, sizeof(u_int32_t));
1037 0 : boot->br_zero_lo = htole32(sc->sc_zero_dma.dma_paddr & 0xffffffff);
1038 0 : boot->br_zero_hi = htole32(sc->sc_zero_dma.dma_paddr >> 32);
1039 :
1040 : /* See if it's waiting for boot, and try to boot it */
1041 0 : for (i = 0; i < 10000; i++) {
1042 0 : r = READ_REG(sc, TXP_A2H_0);
1043 0 : if (r == STAT_WAITING_FOR_BOOT)
1044 : break;
1045 0 : DELAY(50);
1046 : }
1047 0 : if (r != STAT_WAITING_FOR_BOOT) {
1048 0 : printf("not waiting for boot\n");
1049 0 : goto bail;
1050 : }
1051 0 : WRITE_REG(sc, TXP_H2A_2, sc->sc_boot_dma.dma_paddr >> 32);
1052 0 : WRITE_REG(sc, TXP_H2A_1, sc->sc_boot_dma.dma_paddr & 0xffffffff);
1053 0 : WRITE_REG(sc, TXP_H2A_0, TXP_BOOTCMD_REGISTER_BOOT_RECORD);
1054 :
1055 : /* See if it booted */
1056 0 : for (i = 0; i < 10000; i++) {
1057 0 : r = READ_REG(sc, TXP_A2H_0);
1058 0 : if (r == STAT_RUNNING)
1059 : break;
1060 0 : DELAY(50);
1061 : }
1062 0 : if (r != STAT_RUNNING) {
1063 0 : printf("fw not running\n");
1064 0 : goto bail;
1065 : }
1066 :
1067 : /* Clear TX and CMD ring write registers */
1068 0 : WRITE_REG(sc, TXP_H2A_1, TXP_BOOTCMD_NULL);
1069 0 : WRITE_REG(sc, TXP_H2A_2, TXP_BOOTCMD_NULL);
1070 0 : WRITE_REG(sc, TXP_H2A_3, TXP_BOOTCMD_NULL);
1071 0 : WRITE_REG(sc, TXP_H2A_0, TXP_BOOTCMD_NULL);
1072 :
1073 0 : return (0);
1074 :
1075 : bail:
1076 0 : txp_dma_free(sc, &sc->sc_zero_dma);
1077 : bail_rxbufring:
1078 0 : for (i = 0; i < RXBUF_ENTRIES; i++) {
1079 0 : bcopy((u_long *)&sc->sc_rxbufs[i].rb_vaddrlo, &sd, sizeof(sd));
1080 0 : if (sd)
1081 0 : free(sd, M_DEVBUF, 0);
1082 : }
1083 0 : txp_dma_free(sc, &sc->sc_rxbufring_dma);
1084 : bail_rspring:
1085 0 : txp_dma_free(sc, &sc->sc_rspring_dma);
1086 : bail_cmdring:
1087 0 : txp_dma_free(sc, &sc->sc_cmdring_dma);
1088 : bail_rxloring:
1089 0 : txp_dma_free(sc, &sc->sc_rxloring_dma);
1090 : bail_rxhiring:
1091 0 : txp_dma_free(sc, &sc->sc_rxhiring_dma);
1092 : bail_txloring:
1093 0 : txp_dma_free(sc, &sc->sc_txloring_dma);
1094 : bail_txhiring:
1095 0 : txp_dma_free(sc, &sc->sc_txhiring_dma);
1096 : bail_host:
1097 0 : txp_dma_free(sc, &sc->sc_host_dma);
1098 : bail_boot:
1099 0 : txp_dma_free(sc, &sc->sc_boot_dma);
1100 0 : return (-1);
1101 0 : }
1102 :
1103 : int
1104 0 : txp_dma_malloc(struct txp_softc *sc, bus_size_t size,
1105 : struct txp_dma_alloc *dma, int mapflags)
1106 : {
1107 : int r;
1108 :
1109 0 : if ((r = bus_dmamem_alloc(sc->sc_dmat, size, PAGE_SIZE, 0,
1110 0 : &dma->dma_seg, 1, &dma->dma_nseg, 0)) != 0)
1111 : goto fail_0;
1112 :
1113 0 : if ((r = bus_dmamem_map(sc->sc_dmat, &dma->dma_seg, dma->dma_nseg,
1114 0 : size, &dma->dma_vaddr, mapflags | BUS_DMA_NOWAIT)) != 0)
1115 : goto fail_1;
1116 :
1117 0 : if ((r = bus_dmamap_create(sc->sc_dmat, size, 1, size, 0,
1118 0 : BUS_DMA_NOWAIT, &dma->dma_map)) != 0)
1119 : goto fail_2;
1120 :
1121 0 : if ((r = bus_dmamap_load(sc->sc_dmat, dma->dma_map, dma->dma_vaddr,
1122 0 : size, NULL, BUS_DMA_NOWAIT)) != 0)
1123 : goto fail_3;
1124 :
1125 0 : dma->dma_paddr = dma->dma_map->dm_segs[0].ds_addr;
1126 0 : return (0);
1127 :
1128 : fail_3:
1129 0 : bus_dmamap_destroy(sc->sc_dmat, dma->dma_map);
1130 : fail_2:
1131 0 : bus_dmamem_unmap(sc->sc_dmat, dma->dma_vaddr, size);
1132 : fail_1:
1133 0 : bus_dmamem_free(sc->sc_dmat, &dma->dma_seg, dma->dma_nseg);
1134 : fail_0:
1135 0 : return (r);
1136 0 : }
1137 :
1138 : void
1139 0 : txp_dma_free(struct txp_softc *sc, struct txp_dma_alloc *dma)
1140 : {
1141 0 : bus_dmamap_unload(sc->sc_dmat, dma->dma_map);
1142 0 : bus_dmamem_unmap(sc->sc_dmat, dma->dma_vaddr, dma->dma_map->dm_mapsize);
1143 0 : bus_dmamem_free(sc->sc_dmat, &dma->dma_seg, dma->dma_nseg);
1144 0 : bus_dmamap_destroy(sc->sc_dmat, dma->dma_map);
1145 0 : }
1146 :
1147 : int
1148 0 : txp_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
1149 : {
1150 0 : struct txp_softc *sc = ifp->if_softc;
1151 0 : struct ifreq *ifr = (struct ifreq *) data;
1152 : int s, error = 0;
1153 :
1154 0 : s = splnet();
1155 :
1156 0 : switch(command) {
1157 : case SIOCSIFADDR:
1158 0 : ifp->if_flags |= IFF_UP;
1159 0 : txp_init(sc);
1160 0 : break;
1161 :
1162 : case SIOCSIFFLAGS:
1163 0 : if (ifp->if_flags & IFF_UP) {
1164 0 : txp_init(sc);
1165 0 : } else {
1166 0 : if (ifp->if_flags & IFF_RUNNING)
1167 0 : txp_stop(sc);
1168 : }
1169 : break;
1170 :
1171 : case SIOCGIFMEDIA:
1172 : case SIOCSIFMEDIA:
1173 0 : error = ifmedia_ioctl(ifp, ifr, &sc->sc_ifmedia, command);
1174 0 : break;
1175 :
1176 : default:
1177 0 : error = ether_ioctl(ifp, &sc->sc_arpcom, command, data);
1178 0 : }
1179 :
1180 0 : if (error == ENETRESET) {
1181 0 : if (ifp->if_flags & IFF_RUNNING)
1182 0 : txp_set_filter(sc);
1183 : error = 0;
1184 0 : }
1185 :
1186 0 : splx(s);
1187 0 : return(error);
1188 : }
1189 :
1190 : void
1191 0 : txp_init(struct txp_softc *sc)
1192 : {
1193 0 : struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1194 : int s;
1195 :
1196 0 : txp_stop(sc);
1197 :
1198 0 : s = splnet();
1199 :
1200 0 : txp_set_filter(sc);
1201 :
1202 0 : txp_command(sc, TXP_CMD_TX_ENABLE, 0, 0, 0, NULL, NULL, NULL, 1);
1203 0 : txp_command(sc, TXP_CMD_RX_ENABLE, 0, 0, 0, NULL, NULL, NULL, 1);
1204 :
1205 0 : WRITE_REG(sc, TXP_IER, TXP_INT_RESERVED | TXP_INT_SELF |
1206 : TXP_INT_A2H_7 | TXP_INT_A2H_6 | TXP_INT_A2H_5 | TXP_INT_A2H_4 |
1207 : TXP_INT_A2H_2 | TXP_INT_A2H_1 | TXP_INT_A2H_0 |
1208 : TXP_INT_DMA3 | TXP_INT_DMA2 | TXP_INT_DMA1 | TXP_INT_DMA0 |
1209 : TXP_INT_PCI_TABORT | TXP_INT_PCI_MABORT | TXP_INT_LATCH);
1210 0 : WRITE_REG(sc, TXP_IMR, TXP_INT_A2H_3);
1211 :
1212 0 : ifp->if_flags |= IFF_RUNNING;
1213 0 : ifq_clr_oactive(&ifp->if_snd);
1214 :
1215 0 : if (!timeout_pending(&sc->sc_tick))
1216 0 : timeout_add_sec(&sc->sc_tick, 1);
1217 :
1218 0 : splx(s);
1219 0 : }
1220 :
1221 : void
1222 0 : txp_tick(void *vsc)
1223 : {
1224 0 : struct txp_softc *sc = vsc;
1225 0 : struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1226 0 : struct txp_rsp_desc *rsp = NULL;
1227 : struct txp_ext_desc *ext;
1228 : int s;
1229 :
1230 0 : s = splnet();
1231 0 : txp_rxbuf_reclaim(sc);
1232 :
1233 0 : if (txp_command2(sc, TXP_CMD_READ_STATISTICS, 0, 0, 0, NULL, 0,
1234 : &rsp, 1))
1235 : goto out;
1236 0 : if (rsp->rsp_numdesc != 6)
1237 : goto out;
1238 0 : if (txp_command(sc, TXP_CMD_CLEAR_STATISTICS, 0, 0, 0,
1239 : NULL, NULL, NULL, 1))
1240 : goto out;
1241 0 : ext = (struct txp_ext_desc *)(rsp + 1);
1242 :
1243 0 : ifp->if_ierrors += ext[3].ext_2 + ext[3].ext_3 + ext[3].ext_4 +
1244 0 : ext[4].ext_1 + ext[4].ext_4;
1245 0 : ifp->if_oerrors += ext[0].ext_1 + ext[1].ext_1 + ext[1].ext_4 +
1246 0 : ext[2].ext_1;
1247 0 : ifp->if_collisions += ext[0].ext_2 + ext[0].ext_3 + ext[1].ext_2 +
1248 0 : ext[1].ext_3;
1249 :
1250 : out:
1251 0 : if (rsp != NULL)
1252 0 : free(rsp, M_DEVBUF, 0);
1253 :
1254 0 : splx(s);
1255 0 : timeout_add_sec(&sc->sc_tick, 1);
1256 0 : }
1257 :
1258 : void
1259 0 : txp_start(struct ifnet *ifp)
1260 : {
1261 0 : struct txp_softc *sc = ifp->if_softc;
1262 0 : struct txp_tx_ring *r = &sc->sc_txhir;
1263 : struct txp_tx_desc *txd;
1264 : int txdidx;
1265 : struct txp_frag_desc *fxd;
1266 : struct mbuf *m;
1267 : struct txp_swdesc *sd;
1268 : u_int32_t prod, cnt, i;
1269 :
1270 0 : if (!(ifp->if_flags & IFF_RUNNING) || ifq_is_oactive(&ifp->if_snd))
1271 0 : return;
1272 :
1273 0 : prod = r->r_prod;
1274 0 : cnt = r->r_cnt;
1275 :
1276 0 : while (1) {
1277 0 : if (cnt >= TX_ENTRIES - TXP_MAXTXSEGS - 4)
1278 : goto oactive;
1279 :
1280 0 : m = ifq_dequeue(&ifp->if_snd);
1281 0 : if (m == NULL)
1282 : break;
1283 :
1284 0 : sd = sc->sc_txd + prod;
1285 0 : sd->sd_mbuf = m;
1286 :
1287 0 : switch (bus_dmamap_load_mbuf(sc->sc_dmat, sd->sd_map, m,
1288 : BUS_DMA_NOWAIT)) {
1289 : case 0:
1290 : break;
1291 : case EFBIG:
1292 0 : if (m_defrag(m, M_DONTWAIT) == 0 &&
1293 0 : bus_dmamap_load_mbuf(sc->sc_dmat, sd->sd_map, m,
1294 0 : BUS_DMA_NOWAIT) == 0)
1295 : break;
1296 : default:
1297 0 : m_freem(m);
1298 0 : continue;
1299 : }
1300 :
1301 0 : txd = r->r_desc + prod;
1302 : txdidx = prod;
1303 0 : txd->tx_flags = TX_FLAGS_TYPE_DATA;
1304 0 : txd->tx_numdesc = 0;
1305 0 : txd->tx_addrlo = 0;
1306 0 : txd->tx_addrhi = 0;
1307 0 : txd->tx_totlen = m->m_pkthdr.len;
1308 0 : txd->tx_pflags = 0;
1309 0 : txd->tx_numdesc = sd->sd_map->dm_nsegs;
1310 :
1311 0 : if (++prod == TX_ENTRIES)
1312 : prod = 0;
1313 :
1314 : #if NVLAN > 0
1315 0 : if (m->m_flags & M_VLANTAG) {
1316 0 : txd->tx_pflags = TX_PFLAGS_VLAN |
1317 0 : (htons(m->m_pkthdr.ether_vtag) << TX_PFLAGS_VLANTAG_S);
1318 0 : }
1319 : #endif
1320 :
1321 0 : if (m->m_pkthdr.csum_flags & M_IPV4_CSUM_OUT)
1322 0 : txd->tx_pflags |= TX_PFLAGS_IPCKSUM;
1323 : #ifdef TRY_TX_TCP_CSUM
1324 : if (m->m_pkthdr.csum_flags & M_TCP_CSUM_OUT)
1325 : txd->tx_pflags |= TX_PFLAGS_TCPCKSUM;
1326 : #endif
1327 : #ifdef TRY_TX_UDP_CSUM
1328 : if (m->m_pkthdr.csum_flags & M_UDP_CSUM_OUT)
1329 : txd->tx_pflags |= TX_PFLAGS_UDPCKSUM;
1330 : #endif
1331 :
1332 0 : bus_dmamap_sync(sc->sc_dmat, sd->sd_map, 0,
1333 : sd->sd_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
1334 :
1335 0 : fxd = (struct txp_frag_desc *)(r->r_desc + prod);
1336 0 : for (i = 0; i < sd->sd_map->dm_nsegs; i++) {
1337 0 : fxd->frag_flags = FRAG_FLAGS_TYPE_FRAG |
1338 : FRAG_FLAGS_VALID;
1339 0 : fxd->frag_rsvd1 = 0;
1340 0 : fxd->frag_len = sd->sd_map->dm_segs[i].ds_len;
1341 0 : fxd->frag_addrlo =
1342 0 : ((u_int64_t)sd->sd_map->dm_segs[i].ds_addr) &
1343 : 0xffffffff;
1344 0 : fxd->frag_addrhi =
1345 0 : ((u_int64_t)sd->sd_map->dm_segs[i].ds_addr) >>
1346 : 32;
1347 0 : fxd->frag_rsvd2 = 0;
1348 :
1349 0 : bus_dmamap_sync(sc->sc_dmat,
1350 : sc->sc_txhiring_dma.dma_map,
1351 : prod * sizeof(struct txp_frag_desc),
1352 : sizeof(struct txp_frag_desc), BUS_DMASYNC_PREWRITE);
1353 :
1354 0 : if (++prod == TX_ENTRIES) {
1355 0 : fxd = (struct txp_frag_desc *)r->r_desc;
1356 : prod = 0;
1357 0 : } else
1358 0 : fxd++;
1359 :
1360 : }
1361 :
1362 0 : ifp->if_timer = 5;
1363 :
1364 : #if NBPFILTER > 0
1365 0 : if (ifp->if_bpf)
1366 0 : bpf_mtap_ether(ifp->if_bpf, m, BPF_DIRECTION_OUT);
1367 : #endif
1368 :
1369 0 : txd->tx_flags |= TX_FLAGS_VALID;
1370 0 : bus_dmamap_sync(sc->sc_dmat, sc->sc_txhiring_dma.dma_map,
1371 : txdidx * sizeof(struct txp_tx_desc),
1372 : sizeof(struct txp_tx_desc), BUS_DMASYNC_PREWRITE);
1373 :
1374 : #if 0
1375 : {
1376 : struct mbuf *mx;
1377 : int i;
1378 :
1379 : printf("txd: flags 0x%x ndesc %d totlen %d pflags 0x%x\n",
1380 : txd->tx_flags, txd->tx_numdesc, txd->tx_totlen,
1381 : txd->tx_pflags);
1382 : for (mx = m; mx != NULL; mx = mx->m_next) {
1383 : for (i = 0; i < mx->m_len; i++) {
1384 : printf(":%02x",
1385 : (u_int8_t)m->m_data[i]);
1386 : }
1387 : }
1388 : printf("\n");
1389 : }
1390 : #endif
1391 :
1392 0 : WRITE_REG(sc, r->r_reg, TXP_IDX2OFFSET(prod));
1393 : }
1394 :
1395 0 : r->r_prod = prod;
1396 0 : r->r_cnt = cnt;
1397 0 : return;
1398 :
1399 : oactive:
1400 0 : ifq_set_oactive(&ifp->if_snd);
1401 0 : r->r_prod = prod;
1402 0 : r->r_cnt = cnt;
1403 0 : }
1404 :
1405 : /*
1406 : * Handle simple commands sent to the typhoon
1407 : */
1408 : int
1409 0 : txp_command(struct txp_softc *sc, u_int16_t id, u_int16_t in1,
1410 : u_int32_t in2, u_int32_t in3, u_int16_t *out1, u_int32_t *out2,
1411 : u_int32_t *out3, int wait)
1412 : {
1413 0 : struct txp_rsp_desc *rsp = NULL;
1414 :
1415 0 : if (txp_command2(sc, id, in1, in2, in3, NULL, 0, &rsp, wait))
1416 0 : return (-1);
1417 :
1418 0 : if (!wait)
1419 0 : return (0);
1420 :
1421 0 : if (out1 != NULL)
1422 0 : *out1 = letoh16(rsp->rsp_par1);
1423 0 : if (out2 != NULL)
1424 0 : *out2 = letoh32(rsp->rsp_par2);
1425 0 : if (out3 != NULL)
1426 0 : *out3 = letoh32(rsp->rsp_par3);
1427 0 : free(rsp, M_DEVBUF, 0);
1428 0 : return (0);
1429 0 : }
1430 :
1431 : int
1432 0 : txp_command2(struct txp_softc *sc, u_int16_t id, u_int16_t in1,
1433 : u_int32_t in2, u_int32_t in3, struct txp_ext_desc *in_extp,
1434 : u_int8_t in_extn,struct txp_rsp_desc **rspp, int wait)
1435 : {
1436 0 : struct txp_hostvar *hv = sc->sc_hostvar;
1437 : struct txp_cmd_desc *cmd;
1438 : struct txp_ext_desc *ext;
1439 : u_int32_t idx, i;
1440 : u_int16_t seq;
1441 :
1442 0 : if (txp_cmd_desc_numfree(sc) < (in_extn + 1)) {
1443 0 : printf("%s: no free cmd descriptors\n", TXP_DEVNAME(sc));
1444 0 : return (-1);
1445 : }
1446 :
1447 0 : idx = sc->sc_cmdring.lastwrite;
1448 0 : cmd = (struct txp_cmd_desc *)(((u_int8_t *)sc->sc_cmdring.base) + idx);
1449 0 : bzero(cmd, sizeof(*cmd));
1450 :
1451 0 : cmd->cmd_numdesc = in_extn;
1452 0 : seq = sc->sc_seq++;
1453 0 : cmd->cmd_seq = htole16(seq);
1454 0 : cmd->cmd_id = htole16(id);
1455 0 : cmd->cmd_par1 = htole16(in1);
1456 0 : cmd->cmd_par2 = htole32(in2);
1457 0 : cmd->cmd_par3 = htole32(in3);
1458 0 : cmd->cmd_flags = CMD_FLAGS_TYPE_CMD |
1459 0 : (wait ? CMD_FLAGS_RESP : 0) | CMD_FLAGS_VALID;
1460 :
1461 0 : idx += sizeof(struct txp_cmd_desc);
1462 0 : if (idx == sc->sc_cmdring.size)
1463 : idx = 0;
1464 :
1465 0 : for (i = 0; i < in_extn; i++) {
1466 0 : ext = (struct txp_ext_desc *)(((u_int8_t *)sc->sc_cmdring.base) + idx);
1467 0 : bcopy(in_extp, ext, sizeof(struct txp_ext_desc));
1468 0 : in_extp++;
1469 0 : idx += sizeof(struct txp_cmd_desc);
1470 0 : if (idx == sc->sc_cmdring.size)
1471 : idx = 0;
1472 : }
1473 :
1474 0 : sc->sc_cmdring.lastwrite = idx;
1475 :
1476 0 : WRITE_REG(sc, TXP_H2A_2, sc->sc_cmdring.lastwrite);
1477 0 : bus_dmamap_sync(sc->sc_dmat, sc->sc_host_dma.dma_map, 0,
1478 : sizeof(struct txp_hostvar), BUS_DMASYNC_PREREAD);
1479 :
1480 0 : if (!wait)
1481 0 : return (0);
1482 :
1483 0 : for (i = 0; i < 10000; i++) {
1484 0 : bus_dmamap_sync(sc->sc_dmat, sc->sc_host_dma.dma_map, 0,
1485 : sizeof(struct txp_hostvar), BUS_DMASYNC_POSTREAD);
1486 0 : idx = letoh32(hv->hv_resp_read_idx);
1487 0 : if (idx != letoh32(hv->hv_resp_write_idx)) {
1488 0 : *rspp = NULL;
1489 0 : if (txp_response(sc, idx, id, seq, rspp))
1490 0 : return (-1);
1491 0 : if (*rspp != NULL)
1492 : break;
1493 : }
1494 0 : bus_dmamap_sync(sc->sc_dmat, sc->sc_host_dma.dma_map, 0,
1495 : sizeof(struct txp_hostvar), BUS_DMASYNC_PREREAD);
1496 0 : DELAY(50);
1497 : }
1498 0 : if (i == 1000 || (*rspp) == NULL) {
1499 0 : printf("%s: 0x%x command failed\n", TXP_DEVNAME(sc), id);
1500 0 : return (-1);
1501 : }
1502 :
1503 0 : return (0);
1504 0 : }
1505 :
1506 : int
1507 0 : txp_response(struct txp_softc *sc, u_int32_t ridx, u_int16_t id,
1508 : u_int16_t seq, struct txp_rsp_desc **rspp)
1509 : {
1510 0 : struct txp_hostvar *hv = sc->sc_hostvar;
1511 : struct txp_rsp_desc *rsp;
1512 :
1513 0 : while (ridx != letoh32(hv->hv_resp_write_idx)) {
1514 0 : rsp = (struct txp_rsp_desc *)(((u_int8_t *)sc->sc_rspring.base) + ridx);
1515 :
1516 0 : if (id == letoh16(rsp->rsp_id) && letoh16(rsp->rsp_seq) == seq) {
1517 0 : *rspp = mallocarray(rsp->rsp_numdesc + 1,
1518 : sizeof(struct txp_rsp_desc), M_DEVBUF, M_NOWAIT);
1519 0 : if ((*rspp) == NULL)
1520 0 : return (-1);
1521 0 : txp_rsp_fixup(sc, rsp, *rspp);
1522 0 : return (0);
1523 : }
1524 :
1525 0 : if (rsp->rsp_flags & RSP_FLAGS_ERROR) {
1526 0 : printf("%s: response error: id 0x%x\n",
1527 0 : TXP_DEVNAME(sc), letoh16(rsp->rsp_id));
1528 0 : txp_rsp_fixup(sc, rsp, NULL);
1529 0 : ridx = letoh32(hv->hv_resp_read_idx);
1530 0 : continue;
1531 : }
1532 :
1533 0 : switch (letoh16(rsp->rsp_id)) {
1534 : case TXP_CMD_CYCLE_STATISTICS:
1535 : case TXP_CMD_MEDIA_STATUS_READ:
1536 : break;
1537 : case TXP_CMD_HELLO_RESPONSE:
1538 0 : printf("%s: hello\n", TXP_DEVNAME(sc));
1539 0 : break;
1540 : default:
1541 0 : printf("%s: unknown id(0x%x)\n", TXP_DEVNAME(sc),
1542 0 : letoh16(rsp->rsp_id));
1543 0 : }
1544 :
1545 0 : txp_rsp_fixup(sc, rsp, NULL);
1546 0 : ridx = letoh32(hv->hv_resp_read_idx);
1547 0 : hv->hv_resp_read_idx = letoh32(ridx);
1548 : }
1549 :
1550 0 : return (0);
1551 0 : }
1552 :
1553 : void
1554 0 : txp_rsp_fixup(struct txp_softc *sc, struct txp_rsp_desc *rsp,
1555 : struct txp_rsp_desc *dst)
1556 : {
1557 : struct txp_rsp_desc *src = rsp;
1558 0 : struct txp_hostvar *hv = sc->sc_hostvar;
1559 : u_int32_t i, ridx;
1560 :
1561 0 : ridx = letoh32(hv->hv_resp_read_idx);
1562 :
1563 0 : for (i = 0; i < rsp->rsp_numdesc + 1; i++) {
1564 0 : if (dst != NULL)
1565 0 : bcopy(src, dst++, sizeof(struct txp_rsp_desc));
1566 0 : ridx += sizeof(struct txp_rsp_desc);
1567 0 : if (ridx == sc->sc_rspring.size) {
1568 0 : src = sc->sc_rspring.base;
1569 : ridx = 0;
1570 0 : } else
1571 0 : src++;
1572 0 : sc->sc_rspring.lastwrite = ridx;
1573 0 : hv->hv_resp_read_idx = htole32(ridx);
1574 : }
1575 :
1576 0 : hv->hv_resp_read_idx = htole32(ridx);
1577 0 : }
1578 :
1579 : int
1580 0 : txp_cmd_desc_numfree(struct txp_softc *sc)
1581 : {
1582 0 : struct txp_hostvar *hv = sc->sc_hostvar;
1583 0 : struct txp_boot_record *br = sc->sc_boot;
1584 : u_int32_t widx, ridx, nfree;
1585 :
1586 0 : widx = sc->sc_cmdring.lastwrite;
1587 0 : ridx = letoh32(hv->hv_cmd_read_idx);
1588 :
1589 0 : if (widx == ridx) {
1590 : /* Ring is completely free */
1591 0 : nfree = letoh32(br->br_cmd_siz) - sizeof(struct txp_cmd_desc);
1592 0 : } else {
1593 0 : if (widx > ridx)
1594 0 : nfree = letoh32(br->br_cmd_siz) -
1595 0 : (widx - ridx + sizeof(struct txp_cmd_desc));
1596 : else
1597 0 : nfree = ridx - widx - sizeof(struct txp_cmd_desc);
1598 : }
1599 :
1600 0 : return (nfree / sizeof(struct txp_cmd_desc));
1601 : }
1602 :
1603 : void
1604 0 : txp_stop(struct txp_softc *sc)
1605 : {
1606 0 : struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1607 :
1608 0 : timeout_del(&sc->sc_tick);
1609 :
1610 : /* Mark the interface as down and cancel the watchdog timer. */
1611 0 : ifp->if_flags &= ~IFF_RUNNING;
1612 0 : ifq_clr_oactive(&ifp->if_snd);
1613 0 : ifp->if_timer = 0;
1614 :
1615 0 : txp_command(sc, TXP_CMD_TX_DISABLE, 0, 0, 0, NULL, NULL, NULL, 1);
1616 0 : txp_command(sc, TXP_CMD_RX_DISABLE, 0, 0, 0, NULL, NULL, NULL, 1);
1617 0 : }
1618 :
1619 : void
1620 0 : txp_watchdog(struct ifnet *ifp)
1621 : {
1622 0 : }
1623 :
1624 : int
1625 0 : txp_ifmedia_upd(struct ifnet *ifp)
1626 : {
1627 0 : struct txp_softc *sc = ifp->if_softc;
1628 0 : struct ifmedia *ifm = &sc->sc_ifmedia;
1629 : u_int16_t new_xcvr;
1630 :
1631 0 : if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1632 0 : return (EINVAL);
1633 :
1634 0 : if (IFM_SUBTYPE(ifm->ifm_media) == IFM_10_T) {
1635 0 : if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
1636 0 : new_xcvr = TXP_XCVR_10_FDX;
1637 : else
1638 : new_xcvr = TXP_XCVR_10_HDX;
1639 0 : } else if (IFM_SUBTYPE(ifm->ifm_media) == IFM_100_TX) {
1640 0 : if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
1641 0 : new_xcvr = TXP_XCVR_100_FDX;
1642 : else
1643 : new_xcvr = TXP_XCVR_100_HDX;
1644 0 : } else if (IFM_SUBTYPE(ifm->ifm_media) == IFM_AUTO) {
1645 : new_xcvr = TXP_XCVR_AUTO;
1646 : } else
1647 0 : return (EINVAL);
1648 :
1649 : /* nothing to do */
1650 0 : if (sc->sc_xcvr == new_xcvr)
1651 0 : return (0);
1652 :
1653 0 : txp_command(sc, TXP_CMD_XCVR_SELECT, new_xcvr, 0, 0,
1654 : NULL, NULL, NULL, 0);
1655 0 : sc->sc_xcvr = new_xcvr;
1656 :
1657 0 : return (0);
1658 0 : }
1659 :
1660 : void
1661 0 : txp_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
1662 : {
1663 0 : struct txp_softc *sc = ifp->if_softc;
1664 0 : struct ifmedia *ifm = &sc->sc_ifmedia;
1665 0 : u_int16_t bmsr, bmcr, anar, anlpar;
1666 :
1667 0 : ifmr->ifm_status = IFM_AVALID;
1668 0 : ifmr->ifm_active = IFM_ETHER;
1669 :
1670 0 : if (txp_command(sc, TXP_CMD_PHY_MGMT_READ, 0, MII_BMSR, 0,
1671 : &bmsr, NULL, NULL, 1))
1672 : goto bail;
1673 0 : if (txp_command(sc, TXP_CMD_PHY_MGMT_READ, 0, MII_BMSR, 0,
1674 : &bmsr, NULL, NULL, 1))
1675 : goto bail;
1676 :
1677 0 : if (txp_command(sc, TXP_CMD_PHY_MGMT_READ, 0, MII_BMCR, 0,
1678 : &bmcr, NULL, NULL, 1))
1679 : goto bail;
1680 :
1681 0 : if (txp_command(sc, TXP_CMD_PHY_MGMT_READ, 0, MII_ANAR, 0,
1682 : &anar, NULL, NULL, 1))
1683 : goto bail;
1684 :
1685 0 : if (txp_command(sc, TXP_CMD_PHY_MGMT_READ, 0, MII_ANLPAR, 0,
1686 : &anlpar, NULL, NULL, 1))
1687 : goto bail;
1688 :
1689 0 : if (bmsr & BMSR_LINK)
1690 0 : ifmr->ifm_status |= IFM_ACTIVE;
1691 :
1692 0 : if (bmcr & BMCR_ISO) {
1693 0 : ifmr->ifm_active |= IFM_NONE;
1694 0 : ifmr->ifm_status = 0;
1695 0 : return;
1696 : }
1697 :
1698 0 : if (bmcr & BMCR_LOOP)
1699 0 : ifmr->ifm_active |= IFM_LOOP;
1700 :
1701 0 : if (bmcr & BMCR_AUTOEN) {
1702 0 : if ((bmsr & BMSR_ACOMP) == 0) {
1703 0 : ifmr->ifm_active |= IFM_NONE;
1704 0 : return;
1705 : }
1706 :
1707 0 : anlpar &= anar;
1708 0 : if (anlpar & ANLPAR_TX_FD)
1709 0 : ifmr->ifm_active |= IFM_100_TX|IFM_FDX;
1710 0 : else if (anlpar & ANLPAR_T4)
1711 0 : ifmr->ifm_active |= IFM_100_T4|IFM_HDX;
1712 0 : else if (anlpar & ANLPAR_TX)
1713 0 : ifmr->ifm_active |= IFM_100_TX|IFM_HDX;
1714 0 : else if (anlpar & ANLPAR_10_FD)
1715 0 : ifmr->ifm_active |= IFM_10_T|IFM_FDX;
1716 0 : else if (anlpar & ANLPAR_10)
1717 0 : ifmr->ifm_active |= IFM_10_T|IFM_HDX;
1718 : else
1719 0 : ifmr->ifm_active |= IFM_NONE;
1720 : } else
1721 0 : ifmr->ifm_active = ifm->ifm_cur->ifm_media;
1722 0 : return;
1723 :
1724 : bail:
1725 0 : ifmr->ifm_active |= IFM_NONE;
1726 0 : ifmr->ifm_status &= ~IFM_AVALID;
1727 0 : }
1728 :
1729 : void
1730 0 : txp_show_descriptor(void *d)
1731 : {
1732 0 : struct txp_cmd_desc *cmd = d;
1733 0 : struct txp_rsp_desc *rsp = d;
1734 0 : struct txp_tx_desc *txd = d;
1735 0 : struct txp_frag_desc *frgd = d;
1736 :
1737 0 : switch (cmd->cmd_flags & CMD_FLAGS_TYPE_M) {
1738 : case CMD_FLAGS_TYPE_CMD:
1739 : /* command descriptor */
1740 0 : printf("[cmd flags 0x%x num %d id %d seq %d par1 0x%x par2 0x%x par3 0x%x]\n",
1741 0 : cmd->cmd_flags, cmd->cmd_numdesc, letoh16(cmd->cmd_id),
1742 0 : letoh16(cmd->cmd_seq), letoh16(cmd->cmd_par1),
1743 0 : letoh32(cmd->cmd_par2), letoh32(cmd->cmd_par3));
1744 0 : break;
1745 : case CMD_FLAGS_TYPE_RESP:
1746 : /* response descriptor */
1747 0 : printf("[rsp flags 0x%x num %d id %d seq %d par1 0x%x par2 0x%x par3 0x%x]\n",
1748 0 : rsp->rsp_flags, rsp->rsp_numdesc, letoh16(rsp->rsp_id),
1749 0 : letoh16(rsp->rsp_seq), letoh16(rsp->rsp_par1),
1750 0 : letoh32(rsp->rsp_par2), letoh32(rsp->rsp_par3));
1751 0 : break;
1752 : case CMD_FLAGS_TYPE_DATA:
1753 : /* data header (assuming tx for now) */
1754 0 : printf("[data flags 0x%x num %d totlen %d addr 0x%x/0x%x pflags 0x%x]",
1755 0 : txd->tx_flags, txd->tx_numdesc, txd->tx_totlen,
1756 0 : txd->tx_addrlo, txd->tx_addrhi, txd->tx_pflags);
1757 0 : break;
1758 : case CMD_FLAGS_TYPE_FRAG:
1759 : /* fragment descriptor */
1760 0 : printf("[frag flags 0x%x rsvd1 0x%x len %d addr 0x%x/0x%x rsvd2 0x%x]",
1761 0 : frgd->frag_flags, frgd->frag_rsvd1, frgd->frag_len,
1762 0 : frgd->frag_addrlo, frgd->frag_addrhi, frgd->frag_rsvd2);
1763 0 : break;
1764 : default:
1765 0 : printf("[unknown(%x) flags 0x%x num %d id %d seq %d par1 0x%x par2 0x%x par3 0x%x]\n",
1766 0 : cmd->cmd_flags & CMD_FLAGS_TYPE_M,
1767 0 : cmd->cmd_flags, cmd->cmd_numdesc, letoh16(cmd->cmd_id),
1768 0 : letoh16(cmd->cmd_seq), letoh16(cmd->cmd_par1),
1769 0 : letoh32(cmd->cmd_par2), letoh32(cmd->cmd_par3));
1770 0 : break;
1771 : }
1772 0 : }
1773 :
1774 : void
1775 0 : txp_set_filter(struct txp_softc *sc)
1776 : {
1777 0 : struct arpcom *ac = &sc->sc_arpcom;
1778 0 : struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1779 0 : u_int32_t hashbit, hash[2];
1780 : u_int16_t filter;
1781 : int mcnt = 0;
1782 : struct ether_multi *enm;
1783 : struct ether_multistep step;
1784 :
1785 0 : if (ifp->if_flags & IFF_PROMISC) {
1786 : filter = TXP_RXFILT_PROMISC;
1787 0 : goto setit;
1788 : }
1789 :
1790 0 : if (ac->ac_multirangecnt > 0)
1791 0 : ifp->if_flags |= IFF_ALLMULTI;
1792 :
1793 : filter = TXP_RXFILT_DIRECT;
1794 :
1795 0 : if (ifp->if_flags & IFF_BROADCAST)
1796 0 : filter |= TXP_RXFILT_BROADCAST;
1797 :
1798 0 : if (ifp->if_flags & IFF_ALLMULTI)
1799 0 : filter |= TXP_RXFILT_ALLMULTI;
1800 : else {
1801 0 : hash[0] = hash[1] = 0;
1802 :
1803 0 : ETHER_FIRST_MULTI(step, ac, enm);
1804 0 : while (enm != NULL) {
1805 0 : mcnt++;
1806 0 : hashbit = (u_int16_t)(ether_crc32_be(enm->enm_addrlo,
1807 0 : ETHER_ADDR_LEN) & (64 - 1));
1808 0 : hash[hashbit / 32] |= (1 << hashbit % 32);
1809 0 : ETHER_NEXT_MULTI(step, enm);
1810 : }
1811 :
1812 0 : if (mcnt > 0) {
1813 0 : filter |= TXP_RXFILT_HASHMULTI;
1814 0 : txp_command(sc, TXP_CMD_MCAST_HASH_MASK_WRITE,
1815 0 : 2, hash[0], hash[1], NULL, NULL, NULL, 0);
1816 0 : }
1817 : }
1818 :
1819 : setit:
1820 0 : txp_command(sc, TXP_CMD_RX_FILTER_WRITE, filter, 0, 0,
1821 : NULL, NULL, NULL, 1);
1822 0 : }
1823 :
1824 : void
1825 0 : txp_capabilities(struct txp_softc *sc)
1826 : {
1827 0 : struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1828 0 : struct txp_rsp_desc *rsp = NULL;
1829 : struct txp_ext_desc *ext;
1830 :
1831 0 : if (txp_command2(sc, TXP_CMD_OFFLOAD_READ, 0, 0, 0, NULL, 0, &rsp, 1))
1832 : goto out;
1833 :
1834 0 : if (rsp->rsp_numdesc != 1)
1835 : goto out;
1836 0 : ext = (struct txp_ext_desc *)(rsp + 1);
1837 :
1838 0 : sc->sc_tx_capability = ext->ext_1 & OFFLOAD_MASK;
1839 0 : sc->sc_rx_capability = ext->ext_2 & OFFLOAD_MASK;
1840 :
1841 0 : ifp->if_capabilities = IFCAP_VLAN_MTU;
1842 :
1843 : #if NVLAN > 0
1844 0 : if (rsp->rsp_par2 & rsp->rsp_par3 & OFFLOAD_VLAN) {
1845 0 : sc->sc_tx_capability |= OFFLOAD_VLAN;
1846 0 : sc->sc_rx_capability |= OFFLOAD_VLAN;
1847 0 : ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING;
1848 0 : }
1849 : #endif
1850 :
1851 : #if 0
1852 : /* not ready yet */
1853 : if (rsp->rsp_par2 & rsp->rsp_par3 & OFFLOAD_IPSEC) {
1854 : sc->sc_tx_capability |= OFFLOAD_IPSEC;
1855 : sc->sc_rx_capability |= OFFLOAD_IPSEC;
1856 : ifp->if_capabilities |= IFCAP_IPSEC;
1857 : }
1858 : #endif
1859 :
1860 0 : if (rsp->rsp_par2 & rsp->rsp_par3 & OFFLOAD_IPCKSUM) {
1861 0 : sc->sc_tx_capability |= OFFLOAD_IPCKSUM;
1862 0 : sc->sc_rx_capability |= OFFLOAD_IPCKSUM;
1863 0 : ifp->if_capabilities |= IFCAP_CSUM_IPv4;
1864 0 : }
1865 :
1866 0 : if (rsp->rsp_par2 & rsp->rsp_par3 & OFFLOAD_TCPCKSUM) {
1867 0 : sc->sc_rx_capability |= OFFLOAD_TCPCKSUM;
1868 : #ifdef TRY_TX_TCP_CSUM
1869 : sc->sc_tx_capability |= OFFLOAD_TCPCKSUM;
1870 : ifp->if_capabilities |= IFCAP_CSUM_TCPv4;
1871 : #endif
1872 0 : }
1873 :
1874 0 : if (rsp->rsp_par2 & rsp->rsp_par3 & OFFLOAD_UDPCKSUM) {
1875 0 : sc->sc_rx_capability |= OFFLOAD_UDPCKSUM;
1876 : #ifdef TRY_TX_UDP_CSUM
1877 : sc->sc_tx_capability |= OFFLOAD_UDPCKSUM;
1878 : ifp->if_capabilities |= IFCAP_CSUM_UDPv4;
1879 : #endif
1880 0 : }
1881 :
1882 0 : if (txp_command(sc, TXP_CMD_OFFLOAD_WRITE, 0,
1883 0 : sc->sc_tx_capability, sc->sc_rx_capability, NULL, NULL, NULL, 1))
1884 0 : goto out;
1885 :
1886 : out:
1887 0 : if (rsp != NULL)
1888 0 : free(rsp, M_DEVBUF, 0);
1889 0 : }
|