Line data Source code
1 : /* $OpenBSD: if_bce.c,v 1.52 2017/01/22 10:17:38 dlg Exp $ */
2 : /* $NetBSD: if_bce.c,v 1.3 2003/09/29 01:53:02 mrg Exp $ */
3 :
4 : /*
5 : * Copyright (c) 2003 Clifford Wright. All rights reserved.
6 : *
7 : * Redistribution and use in source and binary forms, with or without
8 : * modification, are permitted provided that the following conditions
9 : * are met:
10 : * 1. Redistributions of source code must retain the above copyright
11 : * notice, this list of conditions and the following disclaimer.
12 : * 2. Redistributions in binary form must reproduce the above copyright
13 : * notice, this list of conditions and the following disclaimer in the
14 : * documentation and/or other materials provided with the distribution.
15 : * 3. The name of the author may not be used to endorse or promote products
16 : * derived from this software without specific prior written permission.
17 : *
18 : * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 : * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 : * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 : * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 : * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
23 : * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
24 : * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
25 : * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
26 : * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 : * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 : * SUCH DAMAGE.
29 : */
30 :
31 : /*
32 : * Broadcom BCM440x 10/100 ethernet (broadcom.com)
33 : * SiliconBackplane is technology from Sonics, Inc.(sonicsinc.com)
34 : *
35 : * Cliff Wright cliff@snipe444.org
36 : */
37 :
38 : #include "bpfilter.h"
39 :
40 : #include <sys/param.h>
41 : #include <sys/systm.h>
42 : #include <sys/timeout.h>
43 : #include <sys/sockio.h>
44 : #include <sys/mbuf.h>
45 : #include <sys/malloc.h>
46 : #include <sys/kernel.h>
47 : #include <sys/device.h>
48 : #include <sys/socket.h>
49 :
50 : #include <net/if.h>
51 : #include <net/if_media.h>
52 :
53 : #include <netinet/in.h>
54 : #include <netinet/if_ether.h>
55 : #if NBPFILTER > 0
56 : #include <net/bpf.h>
57 : #endif
58 :
59 : #include <dev/pci/pcireg.h>
60 : #include <dev/pci/pcivar.h>
61 : #include <dev/pci/pcidevs.h>
62 :
63 : #include <dev/mii/mii.h>
64 : #include <dev/mii/miivar.h>
65 : #include <dev/mii/miidevs.h>
66 :
67 : #include <dev/pci/if_bcereg.h>
68 :
69 : #include <uvm/uvm.h>
70 :
71 : /* ring descriptor */
72 : struct bce_dma_slot {
73 : u_int32_t ctrl;
74 : u_int32_t addr;
75 : };
76 : #define CTRL_BC_MASK 0x1fff /* buffer byte count */
77 : #define CTRL_EOT 0x10000000 /* end of descriptor table */
78 : #define CTRL_IOC 0x20000000 /* interrupt on completion */
79 : #define CTRL_EOF 0x40000000 /* end of frame */
80 : #define CTRL_SOF 0x80000000 /* start of frame */
81 :
82 : #define BCE_RXBUF_LEN (MCLBYTES - 4)
83 :
84 : /* Packet status is returned in a pre-packet header */
85 : struct rx_pph {
86 : u_int16_t len;
87 : u_int16_t flags;
88 : u_int16_t pad[12];
89 : };
90 :
91 : #define BCE_PREPKT_HEADER_SIZE 30
92 :
93 : /* packet status flags bits */
94 : #define RXF_NO 0x8 /* odd number of nibbles */
95 : #define RXF_RXER 0x4 /* receive symbol error */
96 : #define RXF_CRC 0x2 /* crc error */
97 : #define RXF_OV 0x1 /* fifo overflow */
98 :
99 : /* number of descriptors used in a ring */
100 : #define BCE_NRXDESC 64
101 : #define BCE_NTXDESC 64
102 :
103 : #define BCE_TIMEOUT 100 /* # 10us for mii read/write */
104 :
105 : struct bce_softc {
106 : struct device bce_dev;
107 : bus_space_tag_t bce_btag;
108 : bus_space_handle_t bce_bhandle;
109 : bus_dma_tag_t bce_dmatag;
110 : struct arpcom bce_ac; /* interface info */
111 : void *bce_intrhand;
112 : struct pci_attach_args bce_pa;
113 : struct mii_data bce_mii;
114 : u_int32_t bce_phy; /* eeprom indicated phy */
115 : struct bce_dma_slot *bce_rx_ring; /* receive ring */
116 : struct bce_dma_slot *bce_tx_ring; /* transmit ring */
117 : caddr_t bce_data;
118 : bus_dmamap_t bce_ring_map;
119 : bus_dmamap_t bce_rxdata_map;
120 : bus_dmamap_t bce_txdata_map;
121 : u_int32_t bce_intmask; /* current intr mask */
122 : u_int32_t bce_rxin; /* last rx descriptor seen */
123 : u_int32_t bce_txin; /* last tx descriptor seen */
124 : int bce_txsfree; /* no. tx slots available */
125 : int bce_txsnext; /* next available tx slot */
126 : struct timeout bce_timeout;
127 : };
128 :
129 : int bce_probe(struct device *, void *, void *);
130 : void bce_attach(struct device *, struct device *, void *);
131 : int bce_activate(struct device *, int);
132 : int bce_ioctl(struct ifnet *, u_long, caddr_t);
133 : void bce_start(struct ifnet *);
134 : void bce_watchdog(struct ifnet *);
135 : int bce_intr(void *);
136 : void bce_rxintr(struct bce_softc *);
137 : void bce_txintr(struct bce_softc *);
138 : int bce_init(struct ifnet *);
139 : void bce_add_mac(struct bce_softc *, u_int8_t *, unsigned long);
140 : void bce_add_rxbuf(struct bce_softc *, int);
141 : void bce_stop(struct ifnet *);
142 : void bce_reset(struct bce_softc *);
143 : void bce_iff(struct ifnet *);
144 : int bce_mii_read(struct device *, int, int);
145 : void bce_mii_write(struct device *, int, int, int);
146 : void bce_statchg(struct device *);
147 : int bce_mediachange(struct ifnet *);
148 : void bce_mediastatus(struct ifnet *, struct ifmediareq *);
149 : void bce_tick(void *);
150 :
151 : #ifdef BCE_DEBUG
152 : #define DPRINTF(x) do { \
153 : if (bcedebug) \
154 : printf x; \
155 : } while (/* CONSTCOND */ 0)
156 : #define DPRINTFN(n,x) do { \
157 : if (bcedebug >= (n)) \
158 : printf x; \
159 : } while (/* CONSTCOND */ 0)
160 : int bcedebug = 0;
161 : #else
162 : #define DPRINTF(x)
163 : #define DPRINTFN(n,x)
164 : #endif
165 :
166 : struct cfattach bce_ca = {
167 : sizeof(struct bce_softc), bce_probe, bce_attach, NULL, bce_activate
168 : };
169 : struct cfdriver bce_cd = {
170 : NULL, "bce", DV_IFNET
171 : };
172 :
173 : const struct pci_matchid bce_devices[] = {
174 : { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM4401 },
175 : { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM4401B0 },
176 : { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM4401B1 }
177 : };
178 :
179 : int
180 0 : bce_probe(struct device *parent, void *match, void *aux)
181 : {
182 0 : return (pci_matchbyid((struct pci_attach_args *)aux, bce_devices,
183 : nitems(bce_devices)));
184 : }
185 :
186 : void
187 0 : bce_attach(struct device *parent, struct device *self, void *aux)
188 : {
189 0 : struct bce_softc *sc = (struct bce_softc *) self;
190 0 : struct pci_attach_args *pa = aux;
191 0 : pci_chipset_tag_t pc = pa->pa_pc;
192 0 : pci_intr_handle_t ih;
193 : const char *intrstr = NULL;
194 0 : caddr_t kva;
195 0 : bus_dma_segment_t seg;
196 0 : int rseg;
197 : struct ifnet *ifp;
198 : pcireg_t memtype;
199 0 : bus_addr_t memaddr;
200 0 : bus_size_t memsize;
201 0 : int pmreg;
202 : pcireg_t pmode;
203 : int error;
204 :
205 0 : sc->bce_pa = *pa;
206 0 : sc->bce_dmatag = pa->pa_dmat;
207 :
208 : /*
209 : * Map control/status registers.
210 : */
211 0 : memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, BCE_PCI_BAR0);
212 0 : if (pci_mapreg_map(pa, BCE_PCI_BAR0, memtype, 0, &sc->bce_btag,
213 0 : &sc->bce_bhandle, &memaddr, &memsize, 0)) {
214 0 : printf(": unable to find mem space\n");
215 0 : return;
216 : }
217 :
218 : /* Get it out of power save mode if needed. */
219 0 : if (pci_get_capability(pc, pa->pa_tag, PCI_CAP_PWRMGMT, &pmreg, 0)) {
220 0 : pmode = pci_conf_read(pc, pa->pa_tag, pmreg + 4) & 0x3;
221 0 : if (pmode == 3) {
222 : /*
223 : * The card has lost all configuration data in
224 : * this state, so punt.
225 : */
226 0 : printf(": unable to wake up from power state D3\n");
227 0 : return;
228 : }
229 0 : if (pmode != 0) {
230 0 : printf(": waking up from power state D%d\n",
231 : pmode);
232 0 : pci_conf_write(pc, pa->pa_tag, pmreg + 4, 0);
233 0 : }
234 : }
235 :
236 0 : if (pci_intr_map(pa, &ih)) {
237 0 : printf(": couldn't map interrupt\n");
238 0 : return;
239 : }
240 :
241 0 : intrstr = pci_intr_string(pc, ih);
242 0 : sc->bce_intrhand = pci_intr_establish(pc, ih, IPL_NET, bce_intr, sc,
243 0 : self->dv_xname);
244 0 : if (sc->bce_intrhand == NULL) {
245 0 : printf(": couldn't establish interrupt");
246 0 : if (intrstr != NULL)
247 0 : printf(" at %s", intrstr);
248 0 : printf("\n");
249 0 : return;
250 : }
251 :
252 : /* reset the chip */
253 0 : bce_reset(sc);
254 :
255 : /* Create the data DMA region and maps. */
256 0 : if ((sc->bce_data = (caddr_t)uvm_km_kmemalloc_pla(kernel_map,
257 0 : uvm.kernel_object, (BCE_NTXDESC + BCE_NRXDESC) * MCLBYTES, 0,
258 0 : UVM_KMF_NOWAIT, 0, (paddr_t)(0x40000000 - 1), 0, 0, 1)) == NULL) {
259 0 : printf(": unable to alloc space for ring");
260 0 : return;
261 : }
262 :
263 : /* create a dma map for the RX ring */
264 0 : if ((error = bus_dmamap_create(sc->bce_dmatag, BCE_NRXDESC * MCLBYTES,
265 : 1, BCE_NRXDESC * MCLBYTES, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
266 : &sc->bce_rxdata_map))) {
267 0 : printf(": unable to create ring DMA map, error = %d\n", error);
268 0 : uvm_km_free(kernel_map, (vaddr_t)sc->bce_data,
269 : (BCE_NTXDESC + BCE_NRXDESC) * MCLBYTES);
270 0 : return;
271 : }
272 :
273 : /* connect the ring space to the dma map */
274 0 : if (bus_dmamap_load(sc->bce_dmatag, sc->bce_rxdata_map, sc->bce_data,
275 : BCE_NRXDESC * MCLBYTES, NULL, BUS_DMA_READ | BUS_DMA_NOWAIT)) {
276 0 : printf(": unable to load rx ring DMA map\n");
277 0 : uvm_km_free(kernel_map, (vaddr_t)sc->bce_data,
278 : (BCE_NTXDESC + BCE_NRXDESC) * MCLBYTES);
279 0 : bus_dmamap_destroy(sc->bce_dmatag, sc->bce_rxdata_map);
280 0 : return;
281 : }
282 :
283 : /* create a dma map for the TX ring */
284 0 : if ((error = bus_dmamap_create(sc->bce_dmatag, BCE_NTXDESC * MCLBYTES,
285 : 1, BCE_NTXDESC * MCLBYTES, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
286 : &sc->bce_txdata_map))) {
287 0 : printf(": unable to create ring DMA map, error = %d\n", error);
288 0 : uvm_km_free(kernel_map, (vaddr_t)sc->bce_data,
289 : (BCE_NTXDESC + BCE_NRXDESC) * MCLBYTES);
290 0 : bus_dmamap_destroy(sc->bce_dmatag, sc->bce_rxdata_map);
291 0 : return;
292 : }
293 :
294 : /* connect the ring space to the dma map */
295 0 : if (bus_dmamap_load(sc->bce_dmatag, sc->bce_txdata_map,
296 : sc->bce_data + BCE_NRXDESC * MCLBYTES,
297 : BCE_NTXDESC * MCLBYTES, NULL, BUS_DMA_WRITE | BUS_DMA_NOWAIT)) {
298 0 : printf(": unable to load tx ring DMA map\n");
299 0 : uvm_km_free(kernel_map, (vaddr_t)sc->bce_data,
300 : (BCE_NTXDESC + BCE_NRXDESC) * MCLBYTES);
301 0 : bus_dmamap_destroy(sc->bce_dmatag, sc->bce_rxdata_map);
302 0 : bus_dmamap_destroy(sc->bce_dmatag, sc->bce_txdata_map);
303 0 : return;
304 : }
305 :
306 :
307 : /*
308 : * Allocate DMA-safe memory for ring descriptors.
309 : * The receive, and transmit rings can not share the same
310 : * 4k space, however both are allocated at once here.
311 : */
312 : /*
313 : * XXX PAGE_SIZE is wasteful; we only need 1KB + 1KB, but
314 : * due to the limition above. ??
315 : */
316 0 : if ((error = bus_dmamem_alloc_range(sc->bce_dmatag, 2 * PAGE_SIZE,
317 : PAGE_SIZE, 2 * PAGE_SIZE, &seg, 1, &rseg, BUS_DMA_NOWAIT,
318 : (bus_addr_t)0, (bus_addr_t)0x3fffffff))) {
319 0 : printf(": unable to alloc space for ring descriptors, "
320 : "error = %d\n", error);
321 0 : uvm_km_free(kernel_map, (vaddr_t)sc->bce_data,
322 : (BCE_NTXDESC + BCE_NRXDESC) * MCLBYTES);
323 0 : bus_dmamap_destroy(sc->bce_dmatag, sc->bce_rxdata_map);
324 0 : bus_dmamap_destroy(sc->bce_dmatag, sc->bce_txdata_map);
325 0 : return;
326 : }
327 :
328 : /* map ring space to kernel */
329 0 : if ((error = bus_dmamem_map(sc->bce_dmatag, &seg, rseg,
330 : 2 * PAGE_SIZE, &kva, BUS_DMA_NOWAIT))) {
331 0 : printf(": unable to map DMA buffers, error = %d\n", error);
332 0 : uvm_km_free(kernel_map, (vaddr_t)sc->bce_data,
333 : (BCE_NTXDESC + BCE_NRXDESC) * MCLBYTES);
334 0 : bus_dmamap_destroy(sc->bce_dmatag, sc->bce_rxdata_map);
335 0 : bus_dmamap_destroy(sc->bce_dmatag, sc->bce_txdata_map);
336 0 : bus_dmamem_free(sc->bce_dmatag, &seg, rseg);
337 0 : return;
338 : }
339 :
340 : /* create a dma map for the ring */
341 0 : if ((error = bus_dmamap_create(sc->bce_dmatag, 2 * PAGE_SIZE, 1,
342 : 2 * PAGE_SIZE, 0, BUS_DMA_NOWAIT, &sc->bce_ring_map))) {
343 0 : printf(": unable to create ring DMA map, error = %d\n", error);
344 0 : uvm_km_free(kernel_map, (vaddr_t)sc->bce_data,
345 : (BCE_NTXDESC + BCE_NRXDESC) * MCLBYTES);
346 0 : bus_dmamap_destroy(sc->bce_dmatag, sc->bce_rxdata_map);
347 0 : bus_dmamap_destroy(sc->bce_dmatag, sc->bce_txdata_map);
348 0 : bus_dmamem_free(sc->bce_dmatag, &seg, rseg);
349 0 : return;
350 : }
351 :
352 : /* connect the ring space to the dma map */
353 0 : if (bus_dmamap_load(sc->bce_dmatag, sc->bce_ring_map, kva,
354 : 2 * PAGE_SIZE, NULL, BUS_DMA_NOWAIT)) {
355 0 : printf(": unable to load ring DMA map\n");
356 0 : uvm_km_free(kernel_map, (vaddr_t)sc->bce_data,
357 : (BCE_NTXDESC + BCE_NRXDESC) * MCLBYTES);
358 0 : bus_dmamap_destroy(sc->bce_dmatag, sc->bce_rxdata_map);
359 0 : bus_dmamap_destroy(sc->bce_dmatag, sc->bce_txdata_map);
360 0 : bus_dmamap_destroy(sc->bce_dmatag, sc->bce_ring_map);
361 0 : bus_dmamem_free(sc->bce_dmatag, &seg, rseg);
362 0 : return;
363 : }
364 :
365 : /* save the ring space in softc */
366 0 : sc->bce_rx_ring = (struct bce_dma_slot *)kva;
367 0 : sc->bce_tx_ring = (struct bce_dma_slot *)(kva + PAGE_SIZE);
368 :
369 : /* Set up ifnet structure */
370 0 : ifp = &sc->bce_ac.ac_if;
371 0 : strlcpy(ifp->if_xname, sc->bce_dev.dv_xname, IF_NAMESIZE);
372 0 : ifp->if_softc = sc;
373 0 : ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
374 0 : ifp->if_ioctl = bce_ioctl;
375 0 : ifp->if_start = bce_start;
376 0 : ifp->if_watchdog = bce_watchdog;
377 :
378 0 : ifp->if_capabilities = IFCAP_VLAN_MTU;
379 :
380 : /* MAC address */
381 0 : sc->bce_ac.ac_enaddr[0] =
382 0 : bus_space_read_1(sc->bce_btag, sc->bce_bhandle, BCE_ENET0);
383 0 : sc->bce_ac.ac_enaddr[1] =
384 0 : bus_space_read_1(sc->bce_btag, sc->bce_bhandle, BCE_ENET1);
385 0 : sc->bce_ac.ac_enaddr[2] =
386 0 : bus_space_read_1(sc->bce_btag, sc->bce_bhandle, BCE_ENET2);
387 0 : sc->bce_ac.ac_enaddr[3] =
388 0 : bus_space_read_1(sc->bce_btag, sc->bce_bhandle, BCE_ENET3);
389 0 : sc->bce_ac.ac_enaddr[4] =
390 0 : bus_space_read_1(sc->bce_btag, sc->bce_bhandle, BCE_ENET4);
391 0 : sc->bce_ac.ac_enaddr[5] =
392 0 : bus_space_read_1(sc->bce_btag, sc->bce_bhandle, BCE_ENET5);
393 :
394 0 : printf(": %s, address %s\n", intrstr,
395 0 : ether_sprintf(sc->bce_ac.ac_enaddr));
396 :
397 : /* Initialize our media structures and probe the MII. */
398 0 : sc->bce_mii.mii_ifp = ifp;
399 0 : sc->bce_mii.mii_readreg = bce_mii_read;
400 0 : sc->bce_mii.mii_writereg = bce_mii_write;
401 0 : sc->bce_mii.mii_statchg = bce_statchg;
402 0 : ifmedia_init(&sc->bce_mii.mii_media, 0, bce_mediachange,
403 : bce_mediastatus);
404 0 : mii_attach(&sc->bce_dev, &sc->bce_mii, 0xffffffff, MII_PHY_ANY,
405 : MII_OFFSET_ANY, 0);
406 0 : if (LIST_FIRST(&sc->bce_mii.mii_phys) == NULL) {
407 0 : ifmedia_add(&sc->bce_mii.mii_media, IFM_ETHER | IFM_NONE, 0, NULL);
408 0 : ifmedia_set(&sc->bce_mii.mii_media, IFM_ETHER | IFM_NONE);
409 0 : } else
410 0 : ifmedia_set(&sc->bce_mii.mii_media, IFM_ETHER | IFM_AUTO);
411 :
412 : /* get the phy */
413 0 : sc->bce_phy = bus_space_read_1(sc->bce_btag, sc->bce_bhandle,
414 0 : BCE_PHY) & 0x1f;
415 :
416 : /*
417 : * Enable activity led.
418 : * XXX This should be in a phy driver, but not currently.
419 : */
420 0 : bce_mii_write((struct device *) sc, 1, 26, /* MAGIC */
421 0 : bce_mii_read((struct device *) sc, 1, 26) & 0x7fff); /* MAGIC */
422 :
423 : /* enable traffic meter led mode */
424 0 : bce_mii_write((struct device *) sc, 1, 27, /* MAGIC */
425 0 : bce_mii_read((struct device *) sc, 1, 27) | (1 << 6)); /* MAGIC */
426 :
427 : /* Attach the interface */
428 0 : if_attach(ifp);
429 0 : ether_ifattach(ifp);
430 :
431 0 : timeout_set(&sc->bce_timeout, bce_tick, sc);
432 0 : }
433 :
434 : int
435 0 : bce_activate(struct device *self, int act)
436 : {
437 0 : struct bce_softc *sc = (struct bce_softc *)self;
438 0 : struct ifnet *ifp = &sc->bce_ac.ac_if;
439 :
440 0 : switch (act) {
441 : case DVACT_SUSPEND:
442 0 : if (ifp->if_flags & IFF_RUNNING)
443 0 : bce_stop(ifp);
444 : break;
445 : case DVACT_RESUME:
446 0 : if (ifp->if_flags & IFF_UP) {
447 0 : bce_init(ifp);
448 0 : bce_start(ifp);
449 0 : }
450 : break;
451 : }
452 :
453 0 : return (0);
454 : }
455 :
456 : /* handle media, and ethernet requests */
457 : int
458 0 : bce_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
459 : {
460 0 : struct bce_softc *sc = ifp->if_softc;
461 0 : struct ifreq *ifr = (struct ifreq *) data;
462 : int s, error = 0;
463 :
464 0 : s = splnet();
465 :
466 0 : switch (cmd) {
467 : case SIOCSIFADDR:
468 0 : ifp->if_flags |= IFF_UP;
469 0 : if (!(ifp->if_flags & IFF_RUNNING))
470 0 : bce_init(ifp);
471 : break;
472 :
473 : case SIOCSIFFLAGS:
474 0 : if (ifp->if_flags & IFF_UP) {
475 0 : if (ifp->if_flags & IFF_RUNNING)
476 0 : error = ENETRESET;
477 : else
478 0 : bce_init(ifp);
479 : } else {
480 0 : if (ifp->if_flags & IFF_RUNNING)
481 0 : bce_stop(ifp);
482 : }
483 : break;
484 :
485 : case SIOCSIFMEDIA:
486 : case SIOCGIFMEDIA:
487 0 : error = ifmedia_ioctl(ifp, ifr, &sc->bce_mii.mii_media, cmd);
488 0 : break;
489 :
490 : default:
491 0 : error = ether_ioctl(ifp, &sc->bce_ac, cmd, data);
492 0 : }
493 :
494 0 : if (error == ENETRESET) {
495 0 : if (ifp->if_flags & IFF_RUNNING)
496 0 : bce_iff(ifp);
497 : error = 0;
498 0 : }
499 :
500 0 : splx(s);
501 0 : return error;
502 : }
503 :
504 : /* Start packet transmission on the interface. */
505 : void
506 0 : bce_start(struct ifnet *ifp)
507 : {
508 0 : struct bce_softc *sc = ifp->if_softc;
509 : struct mbuf *m0;
510 : u_int32_t ctrl;
511 : int txstart;
512 : int txsfree;
513 : int newpkts = 0;
514 :
515 : /*
516 : * do not start another if currently transmitting, and more
517 : * descriptors(tx slots) are needed for next packet.
518 : */
519 0 : if (!(ifp->if_flags & IFF_RUNNING) || ifq_is_oactive(&ifp->if_snd))
520 0 : return;
521 :
522 : /* determine number of descriptors available */
523 0 : if (sc->bce_txsnext >= sc->bce_txin)
524 0 : txsfree = BCE_NTXDESC - 1 + sc->bce_txin - sc->bce_txsnext;
525 : else
526 0 : txsfree = sc->bce_txin - sc->bce_txsnext - 1;
527 :
528 : /*
529 : * Loop through the send queue, setting up transmit descriptors
530 : * until we drain the queue, or use up all available transmit
531 : * descriptors.
532 : */
533 0 : while (txsfree > 0) {
534 :
535 : /* Grab a packet off the queue. */
536 0 : IFQ_DEQUEUE(&ifp->if_snd, m0);
537 0 : if (m0 == NULL)
538 : break;
539 :
540 : /*
541 : * copy mbuf chain into DMA memory buffer.
542 : */
543 0 : m_copydata(m0, 0, m0->m_pkthdr.len, sc->bce_data +
544 0 : (sc->bce_txsnext + BCE_NRXDESC) * MCLBYTES);
545 0 : ctrl = m0->m_pkthdr.len & CTRL_BC_MASK;
546 0 : ctrl |= CTRL_SOF | CTRL_EOF | CTRL_IOC;
547 :
548 : #if NBPFILTER > 0
549 : /* Pass the packet to any BPF listeners. */
550 0 : if (ifp->if_bpf)
551 0 : bpf_mtap(ifp->if_bpf, m0, BPF_DIRECTION_OUT);
552 : #endif
553 : /* mbuf no longer needed */
554 0 : m_freem(m0);
555 :
556 : /* Sync the data DMA map. */
557 0 : bus_dmamap_sync(sc->bce_dmatag, sc->bce_txdata_map,
558 : sc->bce_txsnext * MCLBYTES, MCLBYTES, BUS_DMASYNC_PREWRITE);
559 :
560 : /* Initialize the transmit descriptor(s). */
561 0 : txstart = sc->bce_txsnext;
562 :
563 0 : if (sc->bce_txsnext == BCE_NTXDESC - 1)
564 0 : ctrl |= CTRL_EOT;
565 0 : sc->bce_tx_ring[sc->bce_txsnext].ctrl = htole32(ctrl);
566 0 : sc->bce_tx_ring[sc->bce_txsnext].addr =
567 0 : htole32(sc->bce_txdata_map->dm_segs[0].ds_addr +
568 : sc->bce_txsnext * MCLBYTES + 0x40000000); /* MAGIC */
569 0 : if (sc->bce_txsnext + 1 > BCE_NTXDESC - 1)
570 0 : sc->bce_txsnext = 0;
571 : else
572 0 : sc->bce_txsnext++;
573 0 : txsfree--;
574 :
575 : /* sync descriptors being used */
576 0 : bus_dmamap_sync(sc->bce_dmatag, sc->bce_ring_map,
577 : sizeof(struct bce_dma_slot) * txstart + PAGE_SIZE,
578 : sizeof(struct bce_dma_slot),
579 : BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
580 :
581 : /* Give the packet to the chip. */
582 0 : bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_DMA_DPTR,
583 : sc->bce_txsnext * sizeof(struct bce_dma_slot));
584 :
585 0 : newpkts++;
586 : }
587 0 : if (txsfree == 0) {
588 : /* No more slots left; notify upper layer. */
589 0 : ifq_set_oactive(&ifp->if_snd);
590 0 : }
591 0 : if (newpkts) {
592 : /* Set a watchdog timer in case the chip flakes out. */
593 0 : ifp->if_timer = 5;
594 0 : }
595 0 : }
596 :
597 : /* Watchdog timer handler. */
598 : void
599 0 : bce_watchdog(struct ifnet *ifp)
600 : {
601 0 : struct bce_softc *sc = ifp->if_softc;
602 :
603 0 : printf("%s: device timeout\n", sc->bce_dev.dv_xname);
604 0 : ifp->if_oerrors++;
605 :
606 0 : (void) bce_init(ifp);
607 :
608 : /* Try to get more packets going. */
609 0 : bce_start(ifp);
610 0 : }
611 :
612 : int
613 0 : bce_intr(void *xsc)
614 : {
615 : struct bce_softc *sc;
616 : struct ifnet *ifp;
617 : u_int32_t intstatus;
618 : int wantinit;
619 : int handled = 0;
620 :
621 0 : sc = xsc;
622 0 : ifp = &sc->bce_ac.ac_if;
623 :
624 :
625 0 : for (wantinit = 0; wantinit == 0;) {
626 0 : intstatus = bus_space_read_4(sc->bce_btag, sc->bce_bhandle,
627 : BCE_INT_STS);
628 :
629 : /* ignore if not ours, or unsolicited interrupts */
630 0 : intstatus &= sc->bce_intmask;
631 0 : if (intstatus == 0)
632 : break;
633 :
634 : handled = 1;
635 :
636 : /* Ack interrupt */
637 0 : bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_INT_STS,
638 : intstatus);
639 :
640 : /* Receive interrupts. */
641 0 : if (intstatus & I_RI)
642 0 : bce_rxintr(sc);
643 : /* Transmit interrupts. */
644 0 : if (intstatus & I_XI)
645 0 : bce_txintr(sc);
646 : /* Error interrupts */
647 0 : if (intstatus & ~(I_RI | I_XI)) {
648 0 : if (intstatus & I_XU)
649 0 : printf("%s: transmit fifo underflow\n",
650 0 : sc->bce_dev.dv_xname);
651 0 : if (intstatus & I_RO) {
652 0 : printf("%s: receive fifo overflow\n",
653 0 : sc->bce_dev.dv_xname);
654 0 : ifp->if_ierrors++;
655 0 : }
656 0 : if (intstatus & I_RU)
657 0 : printf("%s: receive descriptor underflow\n",
658 0 : sc->bce_dev.dv_xname);
659 0 : if (intstatus & I_DE)
660 0 : printf("%s: descriptor protocol error\n",
661 0 : sc->bce_dev.dv_xname);
662 0 : if (intstatus & I_PD)
663 0 : printf("%s: data error\n",
664 0 : sc->bce_dev.dv_xname);
665 0 : if (intstatus & I_PC)
666 0 : printf("%s: descriptor error\n",
667 0 : sc->bce_dev.dv_xname);
668 0 : if (intstatus & I_TO)
669 0 : printf("%s: general purpose timeout\n",
670 0 : sc->bce_dev.dv_xname);
671 : wantinit = 1;
672 0 : }
673 : }
674 :
675 0 : if (handled) {
676 0 : if (wantinit)
677 0 : bce_init(ifp);
678 : /* Try to get more packets going. */
679 0 : bce_start(ifp);
680 0 : }
681 0 : return (handled);
682 : }
683 :
684 : /* Receive interrupt handler */
685 : void
686 0 : bce_rxintr(struct bce_softc *sc)
687 : {
688 0 : struct ifnet *ifp = &sc->bce_ac.ac_if;
689 0 : struct mbuf_list ml = MBUF_LIST_INITIALIZER();
690 : struct rx_pph *pph;
691 : struct mbuf *m;
692 : int curr;
693 : int len;
694 : int i;
695 :
696 : /* get pointer to active receive slot */
697 0 : curr = bus_space_read_4(sc->bce_btag, sc->bce_bhandle, BCE_DMA_RXSTATUS)
698 0 : & RS_CD_MASK;
699 0 : curr = curr / sizeof(struct bce_dma_slot);
700 0 : if (curr >= BCE_NRXDESC)
701 : curr = BCE_NRXDESC - 1;
702 :
703 : /* process packets up to but not current packet being worked on */
704 0 : for (i = sc->bce_rxin; i != curr; i = (i + 1) % BCE_NRXDESC) {
705 : /* complete any post dma memory ops on packet */
706 0 : bus_dmamap_sync(sc->bce_dmatag, sc->bce_rxdata_map,
707 : i * MCLBYTES, MCLBYTES, BUS_DMASYNC_POSTREAD);
708 :
709 : /*
710 : * If the packet had an error, simply recycle the buffer,
711 : * resetting the len, and flags.
712 : */
713 0 : pph = (struct rx_pph *)(sc->bce_data + i * MCLBYTES);
714 0 : if (pph->flags & (RXF_NO | RXF_RXER | RXF_CRC | RXF_OV)) {
715 0 : ifp->if_ierrors++;
716 0 : pph->len = 0;
717 0 : pph->flags = 0;
718 0 : continue;
719 : }
720 : /* receive the packet */
721 0 : len = pph->len;
722 0 : if (len == 0)
723 : continue; /* no packet if empty */
724 0 : pph->len = 0;
725 0 : pph->flags = 0;
726 :
727 : /*
728 : * The chip includes the CRC with every packet. Trim
729 : * it off here.
730 : */
731 0 : len -= ETHER_CRC_LEN;
732 :
733 0 : m = m_devget(sc->bce_data + i * MCLBYTES +
734 : BCE_PREPKT_HEADER_SIZE, len, ETHER_ALIGN);
735 :
736 0 : ml_enqueue(&ml, m);
737 :
738 : /* re-check current in case it changed */
739 0 : curr = (bus_space_read_4(sc->bce_btag, sc->bce_bhandle,
740 0 : BCE_DMA_RXSTATUS) & RS_CD_MASK) /
741 : sizeof(struct bce_dma_slot);
742 0 : if (curr >= BCE_NRXDESC)
743 : curr = BCE_NRXDESC - 1;
744 0 : }
745 :
746 0 : if_input(ifp, &ml);
747 :
748 0 : sc->bce_rxin = curr;
749 0 : }
750 :
751 : /* Transmit interrupt handler */
752 : void
753 0 : bce_txintr(struct bce_softc *sc)
754 : {
755 0 : struct ifnet *ifp = &sc->bce_ac.ac_if;
756 : int curr;
757 : int i;
758 :
759 0 : ifq_clr_oactive(&ifp->if_snd);
760 :
761 : /*
762 : * Go through the Tx list and free mbufs for those
763 : * frames which have been transmitted.
764 : */
765 0 : curr = bus_space_read_4(sc->bce_btag, sc->bce_bhandle,
766 0 : BCE_DMA_TXSTATUS) & RS_CD_MASK;
767 0 : curr = curr / sizeof(struct bce_dma_slot);
768 0 : if (curr >= BCE_NTXDESC)
769 : curr = BCE_NTXDESC - 1;
770 0 : for (i = sc->bce_txin; i != curr; i = (i + 1) % BCE_NTXDESC) {
771 : /* do any post dma memory ops on transmit data */
772 0 : bus_dmamap_sync(sc->bce_dmatag, sc->bce_txdata_map,
773 : i * MCLBYTES, MCLBYTES, BUS_DMASYNC_POSTWRITE);
774 : }
775 0 : sc->bce_txin = curr;
776 :
777 : /*
778 : * If there are no more pending transmissions, cancel the watchdog
779 : * timer
780 : */
781 0 : if (sc->bce_txsnext == sc->bce_txin)
782 0 : ifp->if_timer = 0;
783 0 : }
784 :
785 : /* initialize the interface */
786 : int
787 0 : bce_init(struct ifnet *ifp)
788 : {
789 0 : struct bce_softc *sc = ifp->if_softc;
790 : u_int32_t reg_win;
791 : int i;
792 :
793 : /* Cancel any pending I/O. */
794 0 : bce_stop(ifp);
795 :
796 : /* enable pci inerrupts, bursts, and prefetch */
797 :
798 : /* remap the pci registers to the Sonics config registers */
799 :
800 : /* save the current map, so it can be restored */
801 0 : reg_win = pci_conf_read(sc->bce_pa.pa_pc, sc->bce_pa.pa_tag,
802 : BCE_REG_WIN);
803 :
804 : /* set register window to Sonics registers */
805 0 : pci_conf_write(sc->bce_pa.pa_pc, sc->bce_pa.pa_tag, BCE_REG_WIN,
806 : BCE_SONICS_WIN);
807 :
808 : /* enable SB to PCI interrupt */
809 0 : bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_SBINTVEC,
810 : bus_space_read_4(sc->bce_btag, sc->bce_bhandle, BCE_SBINTVEC) |
811 : SBIV_ENET0);
812 :
813 : /* enable prefetch and bursts for sonics-to-pci translation 2 */
814 0 : bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_SPCI_TR2,
815 : bus_space_read_4(sc->bce_btag, sc->bce_bhandle, BCE_SPCI_TR2) |
816 : SBTOPCI_PREF | SBTOPCI_BURST);
817 :
818 : /* restore to ethernet register space */
819 0 : pci_conf_write(sc->bce_pa.pa_pc, sc->bce_pa.pa_tag, BCE_REG_WIN,
820 : reg_win);
821 :
822 : /* Reset the chip to a known state. */
823 0 : bce_reset(sc);
824 :
825 : /* Initialize transmit descriptors */
826 0 : memset(sc->bce_tx_ring, 0, BCE_NTXDESC * sizeof(struct bce_dma_slot));
827 0 : sc->bce_txsnext = 0;
828 0 : sc->bce_txin = 0;
829 :
830 : /* enable crc32 generation and set proper LED modes */
831 0 : bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_MACCTL,
832 : bus_space_read_4(sc->bce_btag, sc->bce_bhandle, BCE_MACCTL) |
833 : BCE_EMC_CRC32_ENAB | BCE_EMC_LED);
834 :
835 : /* reset or clear powerdown control bit */
836 0 : bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_MACCTL,
837 : bus_space_read_4(sc->bce_btag, sc->bce_bhandle, BCE_MACCTL) &
838 : ~BCE_EMC_PDOWN);
839 :
840 : /* setup DMA interrupt control */
841 0 : bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_DMAI_CTL, 1 << 24); /* MAGIC */
842 :
843 : /* program promiscuous mode and multicast filters */
844 0 : bce_iff(ifp);
845 :
846 : /* set max frame length, account for possible VLAN tag */
847 0 : bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_RX_MAX,
848 : ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN);
849 0 : bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_TX_MAX,
850 : ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN);
851 :
852 : /* set tx watermark */
853 0 : bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_TX_WATER, 56);
854 :
855 : /* enable transmit */
856 0 : bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_DMA_TXCTL, XC_XE);
857 0 : bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_DMA_TXADDR,
858 : sc->bce_ring_map->dm_segs[0].ds_addr + PAGE_SIZE + 0x40000000); /* MAGIC */
859 :
860 : /*
861 : * Give the receive ring to the chip, and
862 : * start the receive DMA engine.
863 : */
864 0 : sc->bce_rxin = 0;
865 :
866 : /* clear the rx descriptor ring */
867 0 : memset(sc->bce_rx_ring, 0, BCE_NRXDESC * sizeof(struct bce_dma_slot));
868 : /* enable receive */
869 0 : bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_DMA_RXCTL,
870 : BCE_PREPKT_HEADER_SIZE << 1 | XC_XE);
871 0 : bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_DMA_RXADDR,
872 : sc->bce_ring_map->dm_segs[0].ds_addr + 0x40000000); /* MAGIC */
873 :
874 : /* Initialize receive descriptors */
875 0 : for (i = 0; i < BCE_NRXDESC; i++)
876 0 : bce_add_rxbuf(sc, i);
877 :
878 : /* Enable interrupts */
879 0 : sc->bce_intmask =
880 : I_XI | I_RI | I_XU | I_RO | I_RU | I_DE | I_PD | I_PC | I_TO;
881 0 : bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_INT_MASK,
882 : sc->bce_intmask);
883 :
884 : /* start the receive dma */
885 0 : bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_DMA_RXDPTR,
886 : BCE_NRXDESC * sizeof(struct bce_dma_slot));
887 :
888 : /* set media */
889 0 : mii_mediachg(&sc->bce_mii);
890 :
891 : /* turn on the ethernet mac */
892 0 : bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_ENET_CTL,
893 : bus_space_read_4(sc->bce_btag, sc->bce_bhandle,
894 : BCE_ENET_CTL) | EC_EE);
895 :
896 : /* start timer */
897 0 : timeout_add_sec(&sc->bce_timeout, 1);
898 :
899 : /* mark as running, and no outputs active */
900 0 : ifp->if_flags |= IFF_RUNNING;
901 0 : ifq_clr_oactive(&ifp->if_snd);
902 :
903 0 : return 0;
904 : }
905 :
906 : /* add a mac address to packet filter */
907 : void
908 0 : bce_add_mac(struct bce_softc *sc, u_int8_t *mac, unsigned long idx)
909 : {
910 : int i;
911 : u_int32_t rval;
912 :
913 0 : bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_FILT_LOW,
914 : mac[2] << 24 | mac[3] << 16 | mac[4] << 8 | mac[5]);
915 0 : bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_FILT_HI,
916 : mac[0] << 8 | mac[1] | 0x10000); /* MAGIC */
917 0 : bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_FILT_CTL,
918 : idx << 16 | 8); /* MAGIC */
919 : /* wait for write to complete */
920 0 : for (i = 0; i < 100; i++) {
921 0 : rval = bus_space_read_4(sc->bce_btag, sc->bce_bhandle,
922 : BCE_FILT_CTL);
923 0 : if (!(rval & 0x80000000)) /* MAGIC */
924 : break;
925 0 : delay(10);
926 : }
927 0 : if (i == 100) {
928 0 : printf("%s: timed out writing pkt filter ctl\n",
929 0 : sc->bce_dev.dv_xname);
930 0 : }
931 0 : }
932 :
933 : /* Add a receive buffer to the indiciated descriptor. */
934 : void
935 0 : bce_add_rxbuf(struct bce_softc *sc, int idx)
936 : {
937 0 : struct bce_dma_slot *bced = &sc->bce_rx_ring[idx];
938 :
939 0 : bus_dmamap_sync(sc->bce_dmatag, sc->bce_rxdata_map, idx * MCLBYTES,
940 : MCLBYTES, BUS_DMASYNC_PREREAD);
941 :
942 0 : *(u_int32_t *)(sc->bce_data + idx * MCLBYTES) = 0;
943 0 : bced->addr = htole32(sc->bce_rxdata_map->dm_segs[0].ds_addr +
944 : idx * MCLBYTES + 0x40000000);
945 0 : if (idx != (BCE_NRXDESC - 1))
946 0 : bced->ctrl = htole32(BCE_RXBUF_LEN);
947 : else
948 0 : bced->ctrl = htole32(BCE_RXBUF_LEN | CTRL_EOT);
949 :
950 0 : bus_dmamap_sync(sc->bce_dmatag, sc->bce_ring_map,
951 : sizeof(struct bce_dma_slot) * idx,
952 : sizeof(struct bce_dma_slot),
953 : BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
954 :
955 0 : }
956 :
957 : /* Stop transmission on the interface */
958 : void
959 0 : bce_stop(struct ifnet *ifp)
960 : {
961 0 : struct bce_softc *sc = ifp->if_softc;
962 : int i;
963 : u_int32_t val;
964 :
965 : /* Stop the 1 second timer */
966 0 : timeout_del(&sc->bce_timeout);
967 :
968 : /* Mark the interface down and cancel the watchdog timer. */
969 0 : ifp->if_flags &= ~IFF_RUNNING;
970 0 : ifq_clr_oactive(&ifp->if_snd);
971 0 : ifp->if_timer = 0;
972 :
973 : /* Down the MII. */
974 0 : mii_down(&sc->bce_mii);
975 :
976 : /* Disable interrupts. */
977 0 : bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_INT_MASK, 0);
978 0 : sc->bce_intmask = 0;
979 0 : delay(10);
980 :
981 : /* Disable emac */
982 0 : bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_ENET_CTL, EC_ED);
983 0 : for (i = 0; i < 200; i++) {
984 0 : val = bus_space_read_4(sc->bce_btag, sc->bce_bhandle,
985 : BCE_ENET_CTL);
986 0 : if (!(val & EC_ED))
987 : break;
988 0 : delay(10);
989 : }
990 :
991 : /* Stop the DMA */
992 0 : bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_DMA_RXCTL, 0);
993 0 : bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_DMA_TXCTL, 0);
994 0 : delay(10);
995 0 : }
996 :
997 : /* reset the chip */
998 : void
999 0 : bce_reset(struct bce_softc *sc)
1000 : {
1001 : u_int32_t val;
1002 : u_int32_t sbval;
1003 : int i;
1004 :
1005 : /* if SB core is up */
1006 0 : sbval = bus_space_read_4(sc->bce_btag, sc->bce_bhandle,
1007 : BCE_SBTMSTATELOW);
1008 0 : if ((sbval & (SBTML_RESET | SBTML_REJ | SBTML_CLK)) == SBTML_CLK) {
1009 0 : bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_DMAI_CTL,
1010 : 0);
1011 :
1012 : /* disable emac */
1013 0 : bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_ENET_CTL,
1014 : EC_ED);
1015 0 : for (i = 0; i < 200; i++) {
1016 0 : val = bus_space_read_4(sc->bce_btag, sc->bce_bhandle,
1017 : BCE_ENET_CTL);
1018 0 : if (!(val & EC_ED))
1019 : break;
1020 0 : delay(10);
1021 : }
1022 0 : if (i == 200)
1023 0 : printf("%s: timed out disabling ethernet mac\n",
1024 0 : sc->bce_dev.dv_xname);
1025 :
1026 : /* reset the dma engines */
1027 0 : bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_DMA_TXCTL,
1028 : 0);
1029 0 : val = bus_space_read_4(sc->bce_btag, sc->bce_bhandle,
1030 : BCE_DMA_RXSTATUS);
1031 : /* if error on receive, wait to go idle */
1032 0 : if (val & RS_ERROR) {
1033 0 : for (i = 0; i < 100; i++) {
1034 0 : val = bus_space_read_4(sc->bce_btag,
1035 : sc->bce_bhandle, BCE_DMA_RXSTATUS);
1036 0 : if (val & RS_DMA_IDLE)
1037 : break;
1038 0 : delay(10);
1039 : }
1040 0 : if (i == 100)
1041 0 : printf("%s: receive dma did not go idle after"
1042 0 : " error\n", sc->bce_dev.dv_xname);
1043 : }
1044 0 : bus_space_write_4(sc->bce_btag, sc->bce_bhandle,
1045 : BCE_DMA_RXSTATUS, 0);
1046 :
1047 : /* reset ethernet mac */
1048 0 : bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_ENET_CTL,
1049 : EC_ES);
1050 0 : for (i = 0; i < 200; i++) {
1051 0 : val = bus_space_read_4(sc->bce_btag, sc->bce_bhandle,
1052 : BCE_ENET_CTL);
1053 0 : if (!(val & EC_ES))
1054 : break;
1055 0 : delay(10);
1056 : }
1057 0 : if (i == 200)
1058 0 : printf("%s: timed out resetting ethernet mac\n",
1059 0 : sc->bce_dev.dv_xname);
1060 : } else {
1061 : u_int32_t reg_win;
1062 :
1063 : /* remap the pci registers to the Sonics config registers */
1064 :
1065 : /* save the current map, so it can be restored */
1066 0 : reg_win = pci_conf_read(sc->bce_pa.pa_pc, sc->bce_pa.pa_tag,
1067 : BCE_REG_WIN);
1068 : /* set register window to Sonics registers */
1069 0 : pci_conf_write(sc->bce_pa.pa_pc, sc->bce_pa.pa_tag,
1070 : BCE_REG_WIN, BCE_SONICS_WIN);
1071 :
1072 : /* enable SB to PCI interrupt */
1073 0 : bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_SBINTVEC,
1074 : bus_space_read_4(sc->bce_btag, sc->bce_bhandle,
1075 : BCE_SBINTVEC) | SBIV_ENET0);
1076 :
1077 : /* enable prefetch and bursts for sonics-to-pci translation 2 */
1078 0 : bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_SPCI_TR2,
1079 : bus_space_read_4(sc->bce_btag, sc->bce_bhandle,
1080 : BCE_SPCI_TR2) | SBTOPCI_PREF | SBTOPCI_BURST);
1081 :
1082 : /* restore to ethernet register space */
1083 0 : pci_conf_write(sc->bce_pa.pa_pc, sc->bce_pa.pa_tag, BCE_REG_WIN,
1084 : reg_win);
1085 : }
1086 :
1087 : /* disable SB core if not in reset */
1088 0 : if (!(sbval & SBTML_RESET)) {
1089 :
1090 : /* set the reject bit */
1091 0 : bus_space_write_4(sc->bce_btag, sc->bce_bhandle,
1092 : BCE_SBTMSTATELOW, SBTML_REJ | SBTML_CLK);
1093 0 : for (i = 0; i < 200; i++) {
1094 0 : val = bus_space_read_4(sc->bce_btag, sc->bce_bhandle,
1095 : BCE_SBTMSTATELOW);
1096 0 : if (val & SBTML_REJ)
1097 : break;
1098 0 : delay(1);
1099 : }
1100 0 : if (i == 200)
1101 0 : printf("%s: while resetting core, reject did not set\n",
1102 0 : sc->bce_dev.dv_xname);
1103 : /* wait until busy is clear */
1104 0 : for (i = 0; i < 200; i++) {
1105 0 : val = bus_space_read_4(sc->bce_btag, sc->bce_bhandle,
1106 : BCE_SBTMSTATEHI);
1107 0 : if (!(val & 0x4))
1108 : break;
1109 0 : delay(1);
1110 : }
1111 0 : if (i == 200)
1112 0 : printf("%s: while resetting core, busy did not clear\n",
1113 0 : sc->bce_dev.dv_xname);
1114 : /* set reset and reject while enabling the clocks */
1115 0 : bus_space_write_4(sc->bce_btag, sc->bce_bhandle,
1116 : BCE_SBTMSTATELOW,
1117 : SBTML_FGC | SBTML_CLK | SBTML_REJ | SBTML_RESET);
1118 0 : val = bus_space_read_4(sc->bce_btag, sc->bce_bhandle,
1119 : BCE_SBTMSTATELOW);
1120 0 : delay(10);
1121 0 : bus_space_write_4(sc->bce_btag, sc->bce_bhandle,
1122 : BCE_SBTMSTATELOW, SBTML_REJ | SBTML_RESET);
1123 0 : delay(1);
1124 0 : }
1125 : /* enable clock */
1126 0 : bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_SBTMSTATELOW,
1127 : SBTML_FGC | SBTML_CLK | SBTML_RESET);
1128 0 : val = bus_space_read_4(sc->bce_btag, sc->bce_bhandle, BCE_SBTMSTATELOW);
1129 0 : delay(1);
1130 :
1131 : /* clear any error bits that may be on */
1132 0 : val = bus_space_read_4(sc->bce_btag, sc->bce_bhandle, BCE_SBTMSTATEHI);
1133 0 : if (val & 1)
1134 0 : bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_SBTMSTATEHI,
1135 : 0);
1136 0 : val = bus_space_read_4(sc->bce_btag, sc->bce_bhandle, BCE_SBIMSTATE);
1137 0 : if (val & SBIM_ERRORBITS)
1138 0 : bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_SBIMSTATE,
1139 : val & ~SBIM_ERRORBITS);
1140 :
1141 : /* clear reset and allow it to propagate throughout the core */
1142 0 : bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_SBTMSTATELOW,
1143 : SBTML_FGC | SBTML_CLK);
1144 0 : val = bus_space_read_4(sc->bce_btag, sc->bce_bhandle, BCE_SBTMSTATELOW);
1145 0 : delay(1);
1146 :
1147 : /* leave clock enabled */
1148 0 : bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_SBTMSTATELOW,
1149 : SBTML_CLK);
1150 0 : val = bus_space_read_4(sc->bce_btag, sc->bce_bhandle, BCE_SBTMSTATELOW);
1151 0 : delay(1);
1152 :
1153 : /* initialize MDC preamble, frequency */
1154 0 : bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_MI_CTL, 0x8d); /* MAGIC */
1155 :
1156 : /* enable phy, differs for internal, and external */
1157 0 : val = bus_space_read_4(sc->bce_btag, sc->bce_bhandle, BCE_DEVCTL);
1158 0 : if (!(val & BCE_DC_IP)) {
1159 : /* select external phy */
1160 0 : bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_ENET_CTL,
1161 : EC_EP);
1162 0 : } else if (val & BCE_DC_ER) { /* internal, clear reset bit if on */
1163 0 : bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_DEVCTL,
1164 : val & ~BCE_DC_ER);
1165 0 : delay(100);
1166 0 : }
1167 0 : }
1168 :
1169 : /* Set up the receive filter. */
1170 : void
1171 0 : bce_iff(struct ifnet *ifp)
1172 : {
1173 0 : struct bce_softc *sc = ifp->if_softc;
1174 0 : struct arpcom *ac = &sc->bce_ac;
1175 : u_int32_t rxctl;
1176 :
1177 0 : rxctl = bus_space_read_4(sc->bce_btag, sc->bce_bhandle, BCE_RX_CTL);
1178 0 : rxctl &= ~(ERC_AM | ERC_DB | ERC_PE);
1179 0 : ifp->if_flags |= IFF_ALLMULTI;
1180 :
1181 : /* disable the filter */
1182 0 : bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_FILT_CTL, 0);
1183 :
1184 : /* add our own address */
1185 0 : bce_add_mac(sc, ac->ac_enaddr, 0);
1186 :
1187 0 : if (ifp->if_flags & IFF_PROMISC || ac->ac_multicnt > 0) {
1188 0 : ifp->if_flags |= IFF_ALLMULTI;
1189 0 : if (ifp->if_flags & IFF_PROMISC)
1190 0 : rxctl |= ERC_PE;
1191 : else
1192 0 : rxctl |= ERC_AM;
1193 : }
1194 :
1195 0 : bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_RX_CTL, rxctl);
1196 :
1197 : /* enable the filter */
1198 0 : bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_FILT_CTL,
1199 : bus_space_read_4(sc->bce_btag, sc->bce_bhandle, BCE_FILT_CTL) | 1);
1200 0 : }
1201 :
1202 : /* Read a PHY register on the MII. */
1203 : int
1204 0 : bce_mii_read(struct device *self, int phy, int reg)
1205 : {
1206 0 : struct bce_softc *sc = (struct bce_softc *) self;
1207 : int i;
1208 : u_int32_t val;
1209 :
1210 : /* clear mii_int */
1211 0 : bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_MI_STS,
1212 : BCE_MIINTR);
1213 :
1214 : /* Read the PHY register */
1215 0 : bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_MI_COMM,
1216 : (MII_COMMAND_READ << 28) | (MII_COMMAND_START << 30) | /* MAGIC */
1217 : (MII_COMMAND_ACK << 16) | BCE_MIPHY(phy) | BCE_MIREG(reg)); /* MAGIC */
1218 :
1219 0 : for (i = 0; i < BCE_TIMEOUT; i++) {
1220 0 : val = bus_space_read_4(sc->bce_btag, sc->bce_bhandle,
1221 : BCE_MI_STS);
1222 0 : if (val & BCE_MIINTR)
1223 : break;
1224 0 : delay(10);
1225 : }
1226 0 : val = bus_space_read_4(sc->bce_btag, sc->bce_bhandle, BCE_MI_COMM);
1227 0 : if (i == BCE_TIMEOUT) {
1228 0 : printf("%s: PHY read timed out reading phy %d, reg %d, val = "
1229 0 : "0x%08x\n", sc->bce_dev.dv_xname, phy, reg, val);
1230 0 : return (0);
1231 : }
1232 0 : return (val & BCE_MICOMM_DATA);
1233 0 : }
1234 :
1235 : /* Write a PHY register on the MII */
1236 : void
1237 0 : bce_mii_write(struct device *self, int phy, int reg, int val)
1238 : {
1239 0 : struct bce_softc *sc = (struct bce_softc *) self;
1240 : int i;
1241 : u_int32_t rval;
1242 :
1243 : /* clear mii_int */
1244 0 : bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_MI_STS,
1245 : BCE_MIINTR);
1246 :
1247 : /* Write the PHY register */
1248 0 : bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_MI_COMM,
1249 : (MII_COMMAND_WRITE << 28) | (MII_COMMAND_START << 30) | /* MAGIC */
1250 : (MII_COMMAND_ACK << 16) | (val & BCE_MICOMM_DATA) | /* MAGIC */
1251 : BCE_MIPHY(phy) | BCE_MIREG(reg));
1252 :
1253 : /* wait for write to complete */
1254 0 : for (i = 0; i < BCE_TIMEOUT; i++) {
1255 0 : rval = bus_space_read_4(sc->bce_btag, sc->bce_bhandle,
1256 : BCE_MI_STS);
1257 0 : if (rval & BCE_MIINTR)
1258 : break;
1259 0 : delay(10);
1260 : }
1261 0 : rval = bus_space_read_4(sc->bce_btag, sc->bce_bhandle, BCE_MI_COMM);
1262 0 : if (i == BCE_TIMEOUT) {
1263 0 : printf("%s: PHY timed out writing phy %d, reg %d, val "
1264 0 : "= 0x%08x\n", sc->bce_dev.dv_xname, phy, reg, val);
1265 0 : }
1266 0 : }
1267 :
1268 : /* sync hardware duplex mode to software state */
1269 : void
1270 0 : bce_statchg(struct device *self)
1271 : {
1272 0 : struct bce_softc *sc = (struct bce_softc *) self;
1273 : u_int32_t reg;
1274 :
1275 : /* if needed, change register to match duplex mode */
1276 0 : reg = bus_space_read_4(sc->bce_btag, sc->bce_bhandle, BCE_TX_CTL);
1277 0 : if (sc->bce_mii.mii_media_active & IFM_FDX && !(reg & EXC_FD))
1278 0 : bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_TX_CTL,
1279 : reg | EXC_FD);
1280 0 : else if (!(sc->bce_mii.mii_media_active & IFM_FDX) && reg & EXC_FD)
1281 0 : bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_TX_CTL,
1282 : reg & ~EXC_FD);
1283 :
1284 : /*
1285 : * Enable activity led.
1286 : * XXX This should be in a phy driver, but not currently.
1287 : */
1288 0 : bce_mii_write((struct device *) sc, 1, 26, /* MAGIC */
1289 0 : bce_mii_read((struct device *) sc, 1, 26) & 0x7fff); /* MAGIC */
1290 : /* enable traffic meter led mode */
1291 0 : bce_mii_write((struct device *) sc, 1, 26, /* MAGIC */
1292 0 : bce_mii_read((struct device *) sc, 1, 27) | (1 << 6)); /* MAGIC */
1293 0 : }
1294 :
1295 : /* Set hardware to newly-selected media */
1296 : int
1297 0 : bce_mediachange(struct ifnet *ifp)
1298 : {
1299 0 : struct bce_softc *sc = ifp->if_softc;
1300 :
1301 0 : if (ifp->if_flags & IFF_UP)
1302 0 : mii_mediachg(&sc->bce_mii);
1303 0 : return (0);
1304 : }
1305 :
1306 : /* Get the current interface media status */
1307 : void
1308 0 : bce_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
1309 : {
1310 0 : struct bce_softc *sc = ifp->if_softc;
1311 :
1312 0 : mii_pollstat(&sc->bce_mii);
1313 0 : ifmr->ifm_active = sc->bce_mii.mii_media_active;
1314 0 : ifmr->ifm_status = sc->bce_mii.mii_media_status;
1315 0 : }
1316 :
1317 : /* One second timer, checks link status */
1318 : void
1319 0 : bce_tick(void *v)
1320 : {
1321 0 : struct bce_softc *sc = v;
1322 : int s;
1323 :
1324 0 : s = splnet();
1325 0 : mii_tick(&sc->bce_mii);
1326 0 : splx(s);
1327 :
1328 0 : timeout_add_sec(&sc->bce_timeout, 1);
1329 0 : }
|