Line data Source code
1 : /* $OpenBSD: if_ale.c,v 1.46 2017/09/08 05:36:52 deraadt Exp $ */
2 : /*-
3 : * Copyright (c) 2008, Pyun YongHyeon <yongari@FreeBSD.org>
4 : * All rights reserved.
5 : *
6 : * Redistribution and use in source and binary forms, with or without
7 : * modification, are permitted provided that the following conditions
8 : * are met:
9 : * 1. Redistributions of source code must retain the above copyright
10 : * notice unmodified, this list of conditions, and the following
11 : * disclaimer.
12 : * 2. Redistributions in binary form must reproduce the above copyright
13 : * notice, this list of conditions and the following disclaimer in the
14 : * documentation and/or other materials provided with the distribution.
15 : *
16 : * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 : * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 : * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 : * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 : * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 : * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 : * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 : * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 : * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 : * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 : * SUCH DAMAGE.
27 : *
28 : * $FreeBSD: src/sys/dev/ale/if_ale.c,v 1.3 2008/12/03 09:01:12 yongari Exp $
29 : */
30 :
31 : /* Driver for Atheros AR8121/AR8113/AR8114 PCIe Ethernet. */
32 :
33 : #include "bpfilter.h"
34 : #include "vlan.h"
35 :
36 : #include <sys/param.h>
37 : #include <sys/endian.h>
38 : #include <sys/systm.h>
39 : #include <sys/sockio.h>
40 : #include <sys/mbuf.h>
41 : #include <sys/queue.h>
42 : #include <sys/kernel.h>
43 : #include <sys/device.h>
44 : #include <sys/timeout.h>
45 : #include <sys/socket.h>
46 :
47 : #include <machine/bus.h>
48 :
49 : #include <net/if.h>
50 : #include <net/if_dl.h>
51 : #include <net/if_llc.h>
52 : #include <net/if_media.h>
53 :
54 : #include <netinet/in.h>
55 : #include <netinet/ip.h>
56 : #include <netinet/if_ether.h>
57 :
58 : #if NBPFILTER > 0
59 : #include <net/bpf.h>
60 : #endif
61 :
62 : #include <dev/mii/mii.h>
63 : #include <dev/mii/miivar.h>
64 :
65 : #include <dev/pci/pcireg.h>
66 : #include <dev/pci/pcivar.h>
67 : #include <dev/pci/pcidevs.h>
68 :
69 : #include <dev/pci/if_alereg.h>
70 :
71 : int ale_match(struct device *, void *, void *);
72 : void ale_attach(struct device *, struct device *, void *);
73 : int ale_detach(struct device *, int);
74 : int ale_activate(struct device *, int);
75 :
76 : int ale_miibus_readreg(struct device *, int, int);
77 : void ale_miibus_writereg(struct device *, int, int, int);
78 : void ale_miibus_statchg(struct device *);
79 :
80 : int ale_init(struct ifnet *);
81 : void ale_start(struct ifnet *);
82 : int ale_ioctl(struct ifnet *, u_long, caddr_t);
83 : void ale_watchdog(struct ifnet *);
84 : int ale_mediachange(struct ifnet *);
85 : void ale_mediastatus(struct ifnet *, struct ifmediareq *);
86 :
87 : int ale_intr(void *);
88 : int ale_rxeof(struct ale_softc *sc);
89 : void ale_rx_update_page(struct ale_softc *, struct ale_rx_page **,
90 : uint32_t, uint32_t *);
91 : void ale_rxcsum(struct ale_softc *, struct mbuf *, uint32_t);
92 : void ale_txeof(struct ale_softc *);
93 :
94 : int ale_dma_alloc(struct ale_softc *);
95 : void ale_dma_free(struct ale_softc *);
96 : int ale_encap(struct ale_softc *, struct mbuf *);
97 : void ale_init_rx_pages(struct ale_softc *);
98 : void ale_init_tx_ring(struct ale_softc *);
99 :
100 : void ale_stop(struct ale_softc *);
101 : void ale_tick(void *);
102 : void ale_get_macaddr(struct ale_softc *);
103 : void ale_mac_config(struct ale_softc *);
104 : void ale_phy_reset(struct ale_softc *);
105 : void ale_reset(struct ale_softc *);
106 : void ale_iff(struct ale_softc *);
107 : void ale_rxvlan(struct ale_softc *);
108 : void ale_stats_clear(struct ale_softc *);
109 : void ale_stats_update(struct ale_softc *);
110 : void ale_stop_mac(struct ale_softc *);
111 :
112 : const struct pci_matchid ale_devices[] = {
113 : { PCI_VENDOR_ATTANSIC, PCI_PRODUCT_ATTANSIC_L1E }
114 : };
115 :
116 : struct cfattach ale_ca = {
117 : sizeof (struct ale_softc), ale_match, ale_attach, NULL,
118 : ale_activate
119 : };
120 :
121 : struct cfdriver ale_cd = {
122 : NULL, "ale", DV_IFNET
123 : };
124 :
125 : int aledebug = 0;
126 : #define DPRINTF(x) do { if (aledebug) printf x; } while (0)
127 :
128 : #define ALE_CSUM_FEATURES (M_TCP_CSUM_OUT | M_UDP_CSUM_OUT)
129 :
130 : int
131 0 : ale_miibus_readreg(struct device *dev, int phy, int reg)
132 : {
133 0 : struct ale_softc *sc = (struct ale_softc *)dev;
134 : uint32_t v;
135 : int i;
136 :
137 0 : if (phy != sc->ale_phyaddr)
138 0 : return (0);
139 :
140 0 : if ((sc->ale_flags & ALE_FLAG_FASTETHER) != 0 &&
141 0 : reg == MII_EXTSR)
142 0 : return (0);
143 :
144 0 : CSR_WRITE_4(sc, ALE_MDIO, MDIO_OP_EXECUTE | MDIO_OP_READ |
145 : MDIO_SUP_PREAMBLE | MDIO_CLK_25_4 | MDIO_REG_ADDR(reg));
146 0 : for (i = ALE_PHY_TIMEOUT; i > 0; i--) {
147 0 : DELAY(5);
148 0 : v = CSR_READ_4(sc, ALE_MDIO);
149 0 : if ((v & (MDIO_OP_EXECUTE | MDIO_OP_BUSY)) == 0)
150 : break;
151 : }
152 :
153 0 : if (i == 0) {
154 0 : printf("%s: phy read timeout: phy %d, reg %d\n",
155 0 : sc->sc_dev.dv_xname, phy, reg);
156 0 : return (0);
157 : }
158 :
159 0 : return ((v & MDIO_DATA_MASK) >> MDIO_DATA_SHIFT);
160 0 : }
161 :
162 : void
163 0 : ale_miibus_writereg(struct device *dev, int phy, int reg, int val)
164 : {
165 0 : struct ale_softc *sc = (struct ale_softc *)dev;
166 : uint32_t v;
167 : int i;
168 :
169 0 : if (phy != sc->ale_phyaddr)
170 0 : return;
171 :
172 0 : CSR_WRITE_4(sc, ALE_MDIO, MDIO_OP_EXECUTE | MDIO_OP_WRITE |
173 : (val & MDIO_DATA_MASK) << MDIO_DATA_SHIFT |
174 : MDIO_SUP_PREAMBLE | MDIO_CLK_25_4 | MDIO_REG_ADDR(reg));
175 0 : for (i = ALE_PHY_TIMEOUT; i > 0; i--) {
176 0 : DELAY(5);
177 0 : v = CSR_READ_4(sc, ALE_MDIO);
178 0 : if ((v & (MDIO_OP_EXECUTE | MDIO_OP_BUSY)) == 0)
179 : break;
180 : }
181 :
182 0 : if (i == 0)
183 0 : printf("%s: phy write timeout: phy %d, reg %d\n",
184 0 : sc->sc_dev.dv_xname, phy, reg);
185 0 : }
186 :
187 : void
188 0 : ale_miibus_statchg(struct device *dev)
189 : {
190 0 : struct ale_softc *sc = (struct ale_softc *)dev;
191 0 : struct ifnet *ifp = &sc->sc_arpcom.ac_if;
192 0 : struct mii_data *mii = &sc->sc_miibus;
193 : uint32_t reg;
194 :
195 0 : if ((ifp->if_flags & IFF_RUNNING) == 0)
196 0 : return;
197 :
198 0 : sc->ale_flags &= ~ALE_FLAG_LINK;
199 0 : if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
200 : (IFM_ACTIVE | IFM_AVALID)) {
201 0 : switch (IFM_SUBTYPE(mii->mii_media_active)) {
202 : case IFM_10_T:
203 : case IFM_100_TX:
204 0 : sc->ale_flags |= ALE_FLAG_LINK;
205 0 : break;
206 :
207 : case IFM_1000_T:
208 0 : if ((sc->ale_flags & ALE_FLAG_FASTETHER) == 0)
209 0 : sc->ale_flags |= ALE_FLAG_LINK;
210 : break;
211 :
212 : default:
213 : break;
214 : }
215 : }
216 :
217 : /* Stop Rx/Tx MACs. */
218 0 : ale_stop_mac(sc);
219 :
220 : /* Program MACs with resolved speed/duplex/flow-control. */
221 0 : if ((sc->ale_flags & ALE_FLAG_LINK) != 0) {
222 0 : ale_mac_config(sc);
223 : /* Reenable Tx/Rx MACs. */
224 0 : reg = CSR_READ_4(sc, ALE_MAC_CFG);
225 0 : reg |= MAC_CFG_TX_ENB | MAC_CFG_RX_ENB;
226 0 : CSR_WRITE_4(sc, ALE_MAC_CFG, reg);
227 0 : }
228 0 : }
229 :
230 : void
231 0 : ale_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
232 : {
233 0 : struct ale_softc *sc = ifp->if_softc;
234 0 : struct mii_data *mii = &sc->sc_miibus;
235 :
236 0 : if ((ifp->if_flags & IFF_UP) == 0)
237 0 : return;
238 :
239 0 : mii_pollstat(mii);
240 0 : ifmr->ifm_status = mii->mii_media_status;
241 0 : ifmr->ifm_active = mii->mii_media_active;
242 0 : }
243 :
244 : int
245 0 : ale_mediachange(struct ifnet *ifp)
246 : {
247 0 : struct ale_softc *sc = ifp->if_softc;
248 0 : struct mii_data *mii = &sc->sc_miibus;
249 : int error;
250 :
251 0 : if (mii->mii_instance != 0) {
252 : struct mii_softc *miisc;
253 :
254 0 : LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
255 0 : mii_phy_reset(miisc);
256 0 : }
257 0 : error = mii_mediachg(mii);
258 :
259 0 : return (error);
260 : }
261 :
262 : int
263 0 : ale_match(struct device *dev, void *match, void *aux)
264 : {
265 0 : return pci_matchbyid((struct pci_attach_args *)aux, ale_devices,
266 : sizeof (ale_devices) / sizeof (ale_devices[0]));
267 : }
268 :
269 : void
270 0 : ale_get_macaddr(struct ale_softc *sc)
271 : {
272 : uint32_t ea[2], reg;
273 0 : int i, vpdc;
274 :
275 0 : reg = CSR_READ_4(sc, ALE_SPI_CTRL);
276 0 : if ((reg & SPI_VPD_ENB) != 0) {
277 0 : reg &= ~SPI_VPD_ENB;
278 0 : CSR_WRITE_4(sc, ALE_SPI_CTRL, reg);
279 0 : }
280 :
281 0 : if (pci_get_capability(sc->sc_pct, sc->sc_pcitag, PCI_CAP_VPD,
282 : &vpdc, NULL)) {
283 : /*
284 : * PCI VPD capability found, let TWSI reload EEPROM.
285 : * This will set ethernet address of controller.
286 : */
287 0 : CSR_WRITE_4(sc, ALE_TWSI_CTRL, CSR_READ_4(sc, ALE_TWSI_CTRL) |
288 : TWSI_CTRL_SW_LD_START);
289 0 : for (i = 100; i > 0; i--) {
290 0 : DELAY(1000);
291 0 : reg = CSR_READ_4(sc, ALE_TWSI_CTRL);
292 0 : if ((reg & TWSI_CTRL_SW_LD_START) == 0)
293 : break;
294 : }
295 0 : if (i == 0)
296 0 : printf("%s: reloading EEPROM timeout!\n",
297 0 : sc->sc_dev.dv_xname);
298 : } else {
299 0 : if (aledebug)
300 0 : printf("%s: PCI VPD capability not found!\n",
301 0 : sc->sc_dev.dv_xname);
302 : }
303 :
304 0 : ea[0] = CSR_READ_4(sc, ALE_PAR0);
305 0 : ea[1] = CSR_READ_4(sc, ALE_PAR1);
306 0 : sc->ale_eaddr[0] = (ea[1] >> 8) & 0xFF;
307 0 : sc->ale_eaddr[1] = (ea[1] >> 0) & 0xFF;
308 0 : sc->ale_eaddr[2] = (ea[0] >> 24) & 0xFF;
309 0 : sc->ale_eaddr[3] = (ea[0] >> 16) & 0xFF;
310 0 : sc->ale_eaddr[4] = (ea[0] >> 8) & 0xFF;
311 0 : sc->ale_eaddr[5] = (ea[0] >> 0) & 0xFF;
312 0 : }
313 :
314 : void
315 0 : ale_phy_reset(struct ale_softc *sc)
316 : {
317 : /* Reset magic from Linux. */
318 0 : CSR_WRITE_2(sc, ALE_GPHY_CTRL,
319 : GPHY_CTRL_HIB_EN | GPHY_CTRL_HIB_PULSE | GPHY_CTRL_SEL_ANA_RESET |
320 : GPHY_CTRL_PHY_PLL_ON);
321 0 : DELAY(1000);
322 0 : CSR_WRITE_2(sc, ALE_GPHY_CTRL,
323 : GPHY_CTRL_EXT_RESET | GPHY_CTRL_HIB_EN | GPHY_CTRL_HIB_PULSE |
324 : GPHY_CTRL_SEL_ANA_RESET | GPHY_CTRL_PHY_PLL_ON);
325 0 : DELAY(1000);
326 :
327 : #define ATPHY_DBG_ADDR 0x1D
328 : #define ATPHY_DBG_DATA 0x1E
329 :
330 : /* Enable hibernation mode. */
331 0 : ale_miibus_writereg(&sc->sc_dev, sc->ale_phyaddr,
332 : ATPHY_DBG_ADDR, 0x0B);
333 0 : ale_miibus_writereg(&sc->sc_dev, sc->ale_phyaddr,
334 : ATPHY_DBG_DATA, 0xBC00);
335 : /* Set Class A/B for all modes. */
336 0 : ale_miibus_writereg(&sc->sc_dev, sc->ale_phyaddr,
337 : ATPHY_DBG_ADDR, 0x00);
338 0 : ale_miibus_writereg(&sc->sc_dev, sc->ale_phyaddr,
339 : ATPHY_DBG_DATA, 0x02EF);
340 : /* Enable 10BT power saving. */
341 0 : ale_miibus_writereg(&sc->sc_dev, sc->ale_phyaddr,
342 : ATPHY_DBG_ADDR, 0x12);
343 0 : ale_miibus_writereg(&sc->sc_dev, sc->ale_phyaddr,
344 : ATPHY_DBG_DATA, 0x4C04);
345 : /* Adjust 1000T power. */
346 0 : ale_miibus_writereg(&sc->sc_dev, sc->ale_phyaddr,
347 : ATPHY_DBG_ADDR, 0x04);
348 0 : ale_miibus_writereg(&sc->sc_dev, sc->ale_phyaddr,
349 : ATPHY_DBG_ADDR, 0x8BBB);
350 : /* 10BT center tap voltage. */
351 0 : ale_miibus_writereg(&sc->sc_dev, sc->ale_phyaddr,
352 : ATPHY_DBG_ADDR, 0x05);
353 0 : ale_miibus_writereg(&sc->sc_dev, sc->ale_phyaddr,
354 : ATPHY_DBG_ADDR, 0x2C46);
355 :
356 : #undef ATPHY_DBG_ADDR
357 : #undef ATPHY_DBG_DATA
358 0 : DELAY(1000);
359 0 : }
360 :
361 : void
362 0 : ale_attach(struct device *parent, struct device *self, void *aux)
363 : {
364 0 : struct ale_softc *sc = (struct ale_softc *)self;
365 0 : struct pci_attach_args *pa = aux;
366 0 : pci_chipset_tag_t pc = pa->pa_pc;
367 0 : pci_intr_handle_t ih;
368 : const char *intrstr;
369 : struct ifnet *ifp;
370 : pcireg_t memtype;
371 : int mii_flags, error = 0;
372 : uint32_t rxf_len, txf_len;
373 : const char *chipname;
374 :
375 : /*
376 : * Allocate IO memory
377 : */
378 0 : memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, ALE_PCIR_BAR);
379 0 : if (pci_mapreg_map(pa, ALE_PCIR_BAR, memtype, 0, &sc->sc_mem_bt,
380 0 : &sc->sc_mem_bh, NULL, &sc->sc_mem_size, 0)) {
381 0 : printf(": can't map mem space\n");
382 0 : return;
383 : }
384 :
385 0 : if (pci_intr_map_msi(pa, &ih) != 0 && pci_intr_map(pa, &ih) != 0) {
386 0 : printf(": can't map interrupt\n");
387 0 : goto fail;
388 : }
389 :
390 : /*
391 : * Allocate IRQ
392 : */
393 0 : intrstr = pci_intr_string(pc, ih);
394 0 : sc->sc_irq_handle = pci_intr_establish(pc, ih, IPL_NET, ale_intr, sc,
395 0 : sc->sc_dev.dv_xname);
396 0 : if (sc->sc_irq_handle == NULL) {
397 0 : printf(": could not establish interrupt");
398 0 : if (intrstr != NULL)
399 0 : printf(" at %s", intrstr);
400 0 : printf("\n");
401 0 : goto fail;
402 : }
403 :
404 0 : sc->sc_dmat = pa->pa_dmat;
405 0 : sc->sc_pct = pa->pa_pc;
406 0 : sc->sc_pcitag = pa->pa_tag;
407 :
408 : /* Set PHY address. */
409 0 : sc->ale_phyaddr = ALE_PHY_ADDR;
410 :
411 : /* Reset PHY. */
412 0 : ale_phy_reset(sc);
413 :
414 : /* Reset the ethernet controller. */
415 0 : ale_reset(sc);
416 :
417 : /* Get PCI and chip id/revision. */
418 0 : sc->ale_rev = PCI_REVISION(pa->pa_class);
419 0 : if (sc->ale_rev >= 0xF0) {
420 : /* L2E Rev. B. AR8114 */
421 0 : sc->ale_flags |= ALE_FLAG_FASTETHER;
422 : chipname = "AR8114";
423 0 : } else {
424 0 : if ((CSR_READ_4(sc, ALE_PHY_STATUS) & PHY_STATUS_100M) != 0) {
425 : /* L1E AR8121 */
426 0 : sc->ale_flags |= ALE_FLAG_JUMBO;
427 : chipname = "AR8121";
428 0 : } else {
429 : /* L2E Rev. A. AR8113 */
430 0 : sc->ale_flags |= ALE_FLAG_FASTETHER;
431 : chipname = "AR8113";
432 : }
433 : }
434 :
435 0 : printf(": %s, %s", chipname, intrstr);
436 :
437 : /*
438 : * All known controllers seems to require 4 bytes alignment
439 : * of Tx buffers to make Tx checksum offload with custom
440 : * checksum generation method work.
441 : */
442 0 : sc->ale_flags |= ALE_FLAG_TXCSUM_BUG;
443 :
444 : /*
445 : * All known controllers seems to have issues on Rx checksum
446 : * offload for fragmented IP datagrams.
447 : */
448 0 : sc->ale_flags |= ALE_FLAG_RXCSUM_BUG;
449 :
450 : /*
451 : * Don't use Tx CMB. It is known to cause RRS update failure
452 : * under certain circumstances. Typical phenomenon of the
453 : * issue would be unexpected sequence number encountered in
454 : * Rx handler.
455 : */
456 0 : sc->ale_flags |= ALE_FLAG_TXCMB_BUG;
457 0 : sc->ale_chip_rev = CSR_READ_4(sc, ALE_MASTER_CFG) >>
458 : MASTER_CHIP_REV_SHIFT;
459 0 : if (aledebug) {
460 0 : printf("%s: PCI device revision : 0x%04x\n",
461 0 : sc->sc_dev.dv_xname, sc->ale_rev);
462 0 : printf("%s: Chip id/revision : 0x%04x\n",
463 0 : sc->sc_dev.dv_xname, sc->ale_chip_rev);
464 0 : }
465 :
466 : /*
467 : * Uninitialized hardware returns an invalid chip id/revision
468 : * as well as 0xFFFFFFFF for Tx/Rx fifo length.
469 : */
470 0 : txf_len = CSR_READ_4(sc, ALE_SRAM_TX_FIFO_LEN);
471 0 : rxf_len = CSR_READ_4(sc, ALE_SRAM_RX_FIFO_LEN);
472 0 : if (sc->ale_chip_rev == 0xFFFF || txf_len == 0xFFFFFFFF ||
473 0 : rxf_len == 0xFFFFFFF) {
474 0 : printf("%s: chip revision : 0x%04x, %u Tx FIFO "
475 : "%u Rx FIFO -- not initialized?\n", sc->sc_dev.dv_xname,
476 : sc->ale_chip_rev, txf_len, rxf_len);
477 0 : goto fail;
478 : }
479 :
480 0 : if (aledebug) {
481 0 : printf("%s: %u Tx FIFO, %u Rx FIFO\n", sc->sc_dev.dv_xname,
482 : txf_len, rxf_len);
483 0 : }
484 :
485 : /* Set max allowable DMA size. */
486 0 : sc->ale_dma_rd_burst = DMA_CFG_RD_BURST_128;
487 0 : sc->ale_dma_wr_burst = DMA_CFG_WR_BURST_128;
488 :
489 0 : error = ale_dma_alloc(sc);
490 0 : if (error)
491 : goto fail;
492 :
493 : /* Load station address. */
494 0 : ale_get_macaddr(sc);
495 :
496 0 : ifp = &sc->sc_arpcom.ac_if;
497 0 : ifp->if_softc = sc;
498 0 : ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
499 0 : ifp->if_ioctl = ale_ioctl;
500 0 : ifp->if_start = ale_start;
501 0 : ifp->if_watchdog = ale_watchdog;
502 0 : IFQ_SET_MAXLEN(&ifp->if_snd, ALE_TX_RING_CNT - 1);
503 0 : bcopy(sc->ale_eaddr, sc->sc_arpcom.ac_enaddr, ETHER_ADDR_LEN);
504 0 : bcopy(sc->sc_dev.dv_xname, ifp->if_xname, IFNAMSIZ);
505 :
506 0 : ifp->if_capabilities = IFCAP_VLAN_MTU;
507 :
508 : #ifdef ALE_CHECKSUM
509 : ifp->if_capabilities |= IFCAP_CSUM_IPv4 | IFCAP_CSUM_TCPv4 |
510 : IFCAP_CSUM_UDPv4;
511 : #endif
512 :
513 : #if NVLAN > 0
514 0 : ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING;
515 : #endif
516 :
517 0 : printf(", address %s\n", ether_sprintf(sc->sc_arpcom.ac_enaddr));
518 :
519 : /* Set up MII bus. */
520 0 : sc->sc_miibus.mii_ifp = ifp;
521 0 : sc->sc_miibus.mii_readreg = ale_miibus_readreg;
522 0 : sc->sc_miibus.mii_writereg = ale_miibus_writereg;
523 0 : sc->sc_miibus.mii_statchg = ale_miibus_statchg;
524 :
525 0 : ifmedia_init(&sc->sc_miibus.mii_media, 0, ale_mediachange,
526 : ale_mediastatus);
527 : mii_flags = 0;
528 0 : if ((sc->ale_flags & ALE_FLAG_JUMBO) != 0)
529 0 : mii_flags |= MIIF_DOPAUSE;
530 0 : mii_attach(self, &sc->sc_miibus, 0xffffffff, MII_PHY_ANY,
531 : MII_OFFSET_ANY, mii_flags);
532 :
533 0 : if (LIST_FIRST(&sc->sc_miibus.mii_phys) == NULL) {
534 0 : printf("%s: no PHY found!\n", sc->sc_dev.dv_xname);
535 0 : ifmedia_add(&sc->sc_miibus.mii_media, IFM_ETHER | IFM_MANUAL,
536 : 0, NULL);
537 0 : ifmedia_set(&sc->sc_miibus.mii_media, IFM_ETHER | IFM_MANUAL);
538 0 : } else
539 0 : ifmedia_set(&sc->sc_miibus.mii_media, IFM_ETHER | IFM_AUTO);
540 :
541 0 : if_attach(ifp);
542 0 : ether_ifattach(ifp);
543 :
544 0 : timeout_set(&sc->ale_tick_ch, ale_tick, sc);
545 :
546 0 : return;
547 : fail:
548 0 : ale_dma_free(sc);
549 0 : if (sc->sc_irq_handle != NULL)
550 0 : pci_intr_disestablish(pc, sc->sc_irq_handle);
551 0 : if (sc->sc_mem_size)
552 0 : bus_space_unmap(sc->sc_mem_bt, sc->sc_mem_bh, sc->sc_mem_size);
553 0 : }
554 :
555 : int
556 0 : ale_detach(struct device *self, int flags)
557 : {
558 0 : struct ale_softc *sc = (struct ale_softc *)self;
559 0 : struct ifnet *ifp = &sc->sc_arpcom.ac_if;
560 : int s;
561 :
562 0 : s = splnet();
563 0 : ale_stop(sc);
564 0 : splx(s);
565 :
566 0 : mii_detach(&sc->sc_miibus, MII_PHY_ANY, MII_OFFSET_ANY);
567 :
568 : /* Delete all remaining media. */
569 0 : ifmedia_delete_instance(&sc->sc_miibus.mii_media, IFM_INST_ANY);
570 :
571 0 : ether_ifdetach(ifp);
572 0 : if_detach(ifp);
573 0 : ale_dma_free(sc);
574 :
575 0 : if (sc->sc_irq_handle != NULL) {
576 0 : pci_intr_disestablish(sc->sc_pct, sc->sc_irq_handle);
577 0 : sc->sc_irq_handle = NULL;
578 0 : }
579 :
580 0 : return (0);
581 : }
582 :
583 : int
584 0 : ale_activate(struct device *self, int act)
585 : {
586 0 : struct ale_softc *sc = (struct ale_softc *)self;
587 0 : struct ifnet *ifp = &sc->sc_arpcom.ac_if;
588 : int rv = 0;
589 :
590 0 : switch (act) {
591 : case DVACT_SUSPEND:
592 0 : if (ifp->if_flags & IFF_RUNNING)
593 0 : ale_stop(sc);
594 0 : rv = config_activate_children(self, act);
595 0 : break;
596 : case DVACT_RESUME:
597 0 : if (ifp->if_flags & IFF_UP)
598 0 : ale_init(ifp);
599 : break;
600 : default:
601 0 : rv = config_activate_children(self, act);
602 0 : break;
603 : }
604 0 : return (rv);
605 : }
606 :
607 : int
608 0 : ale_dma_alloc(struct ale_softc *sc)
609 : {
610 : struct ale_txdesc *txd;
611 0 : int nsegs, error, guard_size, i;
612 :
613 0 : if ((sc->ale_flags & ALE_FLAG_JUMBO) != 0)
614 0 : guard_size = ALE_JUMBO_FRAMELEN;
615 : else
616 : guard_size = ALE_MAX_FRAMELEN;
617 0 : sc->ale_pagesize = roundup(guard_size + ALE_RX_PAGE_SZ,
618 : ALE_RX_PAGE_ALIGN);
619 :
620 : /*
621 : * Create DMA stuffs for TX ring
622 : */
623 0 : error = bus_dmamap_create(sc->sc_dmat, ALE_TX_RING_SZ, 1,
624 : ALE_TX_RING_SZ, 0, BUS_DMA_NOWAIT, &sc->ale_cdata.ale_tx_ring_map);
625 0 : if (error)
626 0 : return (ENOBUFS);
627 :
628 : /* Allocate DMA'able memory for TX ring */
629 0 : error = bus_dmamem_alloc(sc->sc_dmat, ALE_TX_RING_SZ,
630 : ETHER_ALIGN, 0, &sc->ale_cdata.ale_tx_ring_seg, 1,
631 : &nsegs, BUS_DMA_WAITOK | BUS_DMA_ZERO);
632 0 : if (error) {
633 0 : printf("%s: could not allocate DMA'able memory for Tx ring.\n",
634 0 : sc->sc_dev.dv_xname);
635 0 : return error;
636 : }
637 :
638 0 : error = bus_dmamem_map(sc->sc_dmat, &sc->ale_cdata.ale_tx_ring_seg,
639 : nsegs, ALE_TX_RING_SZ, (caddr_t *)&sc->ale_cdata.ale_tx_ring,
640 : BUS_DMA_NOWAIT);
641 0 : if (error)
642 0 : return (ENOBUFS);
643 :
644 : /* Load the DMA map for Tx ring. */
645 0 : error = bus_dmamap_load(sc->sc_dmat, sc->ale_cdata.ale_tx_ring_map,
646 : sc->ale_cdata.ale_tx_ring, ALE_TX_RING_SZ, NULL, BUS_DMA_WAITOK);
647 0 : if (error) {
648 0 : printf("%s: could not load DMA'able memory for Tx ring.\n",
649 0 : sc->sc_dev.dv_xname);
650 0 : bus_dmamem_free(sc->sc_dmat,
651 : (bus_dma_segment_t *)&sc->ale_cdata.ale_tx_ring, 1);
652 0 : return error;
653 : }
654 0 : sc->ale_cdata.ale_tx_ring_paddr =
655 0 : sc->ale_cdata.ale_tx_ring_map->dm_segs[0].ds_addr;
656 :
657 0 : for (i = 0; i < ALE_RX_PAGES; i++) {
658 : /*
659 : * Create DMA stuffs for RX pages
660 : */
661 0 : error = bus_dmamap_create(sc->sc_dmat, sc->ale_pagesize, 1,
662 : sc->ale_pagesize, 0, BUS_DMA_NOWAIT,
663 : &sc->ale_cdata.ale_rx_page[i].page_map);
664 0 : if (error)
665 0 : return (ENOBUFS);
666 :
667 : /* Allocate DMA'able memory for RX pages */
668 0 : error = bus_dmamem_alloc(sc->sc_dmat, sc->ale_pagesize,
669 : ETHER_ALIGN, 0, &sc->ale_cdata.ale_rx_page[i].page_seg,
670 : 1, &nsegs, BUS_DMA_WAITOK | BUS_DMA_ZERO);
671 0 : if (error) {
672 0 : printf("%s: could not allocate DMA'able memory for "
673 0 : "Rx ring.\n", sc->sc_dev.dv_xname);
674 0 : return error;
675 : }
676 0 : error = bus_dmamem_map(sc->sc_dmat,
677 : &sc->ale_cdata.ale_rx_page[i].page_seg, nsegs,
678 : sc->ale_pagesize,
679 : (caddr_t *)&sc->ale_cdata.ale_rx_page[i].page_addr,
680 : BUS_DMA_NOWAIT);
681 0 : if (error)
682 0 : return (ENOBUFS);
683 :
684 : /* Load the DMA map for Rx pages. */
685 0 : error = bus_dmamap_load(sc->sc_dmat,
686 : sc->ale_cdata.ale_rx_page[i].page_map,
687 : sc->ale_cdata.ale_rx_page[i].page_addr,
688 : sc->ale_pagesize, NULL, BUS_DMA_WAITOK);
689 0 : if (error) {
690 0 : printf("%s: could not load DMA'able memory for "
691 0 : "Rx pages.\n", sc->sc_dev.dv_xname);
692 0 : bus_dmamem_free(sc->sc_dmat,
693 : (bus_dma_segment_t *)sc->ale_cdata.ale_rx_page[i].page_addr, 1);
694 0 : return error;
695 : }
696 0 : sc->ale_cdata.ale_rx_page[i].page_paddr =
697 0 : sc->ale_cdata.ale_rx_page[i].page_map->dm_segs[0].ds_addr;
698 : }
699 :
700 : /*
701 : * Create DMA stuffs for Tx CMB.
702 : */
703 0 : error = bus_dmamap_create(sc->sc_dmat, ALE_TX_CMB_SZ, 1,
704 : ALE_TX_CMB_SZ, 0, BUS_DMA_NOWAIT, &sc->ale_cdata.ale_tx_cmb_map);
705 0 : if (error)
706 0 : return (ENOBUFS);
707 :
708 : /* Allocate DMA'able memory for Tx CMB. */
709 0 : error = bus_dmamem_alloc(sc->sc_dmat, ALE_TX_CMB_SZ, ETHER_ALIGN, 0,
710 : &sc->ale_cdata.ale_tx_cmb_seg, 1, &nsegs,
711 : BUS_DMA_WAITOK |BUS_DMA_ZERO);
712 :
713 0 : if (error) {
714 0 : printf("%s: could not allocate DMA'able memory for Tx CMB.\n",
715 0 : sc->sc_dev.dv_xname);
716 0 : return error;
717 : }
718 :
719 0 : error = bus_dmamem_map(sc->sc_dmat, &sc->ale_cdata.ale_tx_cmb_seg,
720 : nsegs, ALE_TX_CMB_SZ, (caddr_t *)&sc->ale_cdata.ale_tx_cmb,
721 : BUS_DMA_NOWAIT);
722 0 : if (error)
723 0 : return (ENOBUFS);
724 :
725 : /* Load the DMA map for Tx CMB. */
726 0 : error = bus_dmamap_load(sc->sc_dmat, sc->ale_cdata.ale_tx_cmb_map,
727 : sc->ale_cdata.ale_tx_cmb, ALE_TX_CMB_SZ, NULL, BUS_DMA_WAITOK);
728 0 : if (error) {
729 0 : printf("%s: could not load DMA'able memory for Tx CMB.\n",
730 0 : sc->sc_dev.dv_xname);
731 0 : bus_dmamem_free(sc->sc_dmat,
732 : (bus_dma_segment_t *)&sc->ale_cdata.ale_tx_cmb, 1);
733 0 : return error;
734 : }
735 :
736 0 : sc->ale_cdata.ale_tx_cmb_paddr =
737 0 : sc->ale_cdata.ale_tx_cmb_map->dm_segs[0].ds_addr;
738 :
739 0 : for (i = 0; i < ALE_RX_PAGES; i++) {
740 : /*
741 : * Create DMA stuffs for Rx CMB.
742 : */
743 0 : error = bus_dmamap_create(sc->sc_dmat, ALE_RX_CMB_SZ, 1,
744 : ALE_RX_CMB_SZ, 0, BUS_DMA_NOWAIT,
745 : &sc->ale_cdata.ale_rx_page[i].cmb_map);
746 0 : if (error)
747 0 : return (ENOBUFS);
748 :
749 : /* Allocate DMA'able memory for Rx CMB */
750 0 : error = bus_dmamem_alloc(sc->sc_dmat, ALE_RX_CMB_SZ,
751 : ETHER_ALIGN, 0, &sc->ale_cdata.ale_rx_page[i].cmb_seg, 1,
752 : &nsegs, BUS_DMA_WAITOK | BUS_DMA_ZERO);
753 0 : if (error) {
754 0 : printf("%s: could not allocate DMA'able memory for "
755 0 : "Rx CMB\n", sc->sc_dev.dv_xname);
756 0 : return error;
757 : }
758 0 : error = bus_dmamem_map(sc->sc_dmat,
759 : &sc->ale_cdata.ale_rx_page[i].cmb_seg, nsegs,
760 : ALE_RX_CMB_SZ,
761 : (caddr_t *)&sc->ale_cdata.ale_rx_page[i].cmb_addr,
762 : BUS_DMA_NOWAIT);
763 0 : if (error)
764 0 : return (ENOBUFS);
765 :
766 : /* Load the DMA map for Rx CMB */
767 0 : error = bus_dmamap_load(sc->sc_dmat,
768 : sc->ale_cdata.ale_rx_page[i].cmb_map,
769 : sc->ale_cdata.ale_rx_page[i].cmb_addr,
770 : ALE_RX_CMB_SZ, NULL, BUS_DMA_WAITOK);
771 0 : if (error) {
772 0 : printf("%s: could not load DMA'able memory for Rx CMB"
773 0 : "\n", sc->sc_dev.dv_xname);
774 0 : bus_dmamem_free(sc->sc_dmat,
775 : (bus_dma_segment_t *)&sc->ale_cdata.ale_rx_page[i].cmb_addr, 1);
776 0 : return error;
777 : }
778 0 : sc->ale_cdata.ale_rx_page[i].cmb_paddr =
779 0 : sc->ale_cdata.ale_rx_page[i].cmb_map->dm_segs[0].ds_addr;
780 : }
781 :
782 :
783 : /* Create DMA maps for Tx buffers. */
784 0 : for (i = 0; i < ALE_TX_RING_CNT; i++) {
785 0 : txd = &sc->ale_cdata.ale_txdesc[i];
786 0 : txd->tx_m = NULL;
787 0 : txd->tx_dmamap = NULL;
788 0 : error = bus_dmamap_create(sc->sc_dmat, ALE_TSO_MAXSIZE,
789 : ALE_MAXTXSEGS, ALE_TSO_MAXSEGSIZE, 0, BUS_DMA_NOWAIT,
790 : &txd->tx_dmamap);
791 0 : if (error) {
792 0 : printf("%s: could not create Tx dmamap.\n",
793 0 : sc->sc_dev.dv_xname);
794 0 : return error;
795 : }
796 : }
797 :
798 0 : return (0);
799 0 : }
800 :
801 : void
802 0 : ale_dma_free(struct ale_softc *sc)
803 : {
804 : struct ale_txdesc *txd;
805 : int i;
806 :
807 : /* Tx buffers. */
808 0 : for (i = 0; i < ALE_TX_RING_CNT; i++) {
809 0 : txd = &sc->ale_cdata.ale_txdesc[i];
810 0 : if (txd->tx_dmamap != NULL) {
811 0 : bus_dmamap_destroy(sc->sc_dmat, txd->tx_dmamap);
812 0 : txd->tx_dmamap = NULL;
813 0 : }
814 : }
815 :
816 : /* Tx descriptor ring. */
817 0 : if (sc->ale_cdata.ale_tx_ring_map != NULL)
818 0 : bus_dmamap_unload(sc->sc_dmat, sc->ale_cdata.ale_tx_ring_map);
819 0 : if (sc->ale_cdata.ale_tx_ring_map != NULL &&
820 0 : sc->ale_cdata.ale_tx_ring != NULL)
821 0 : bus_dmamem_free(sc->sc_dmat,
822 : (bus_dma_segment_t *)sc->ale_cdata.ale_tx_ring, 1);
823 0 : sc->ale_cdata.ale_tx_ring = NULL;
824 0 : sc->ale_cdata.ale_tx_ring_map = NULL;
825 :
826 : /* Rx page block. */
827 0 : for (i = 0; i < ALE_RX_PAGES; i++) {
828 0 : if (sc->ale_cdata.ale_rx_page[i].page_map != NULL)
829 0 : bus_dmamap_unload(sc->sc_dmat,
830 : sc->ale_cdata.ale_rx_page[i].page_map);
831 0 : if (sc->ale_cdata.ale_rx_page[i].page_map != NULL &&
832 0 : sc->ale_cdata.ale_rx_page[i].page_addr != NULL)
833 0 : bus_dmamem_free(sc->sc_dmat,
834 : (bus_dma_segment_t *)sc->ale_cdata.ale_rx_page[i].page_addr, 1);
835 0 : sc->ale_cdata.ale_rx_page[i].page_addr = NULL;
836 0 : sc->ale_cdata.ale_rx_page[i].page_map = NULL;
837 : }
838 :
839 : /* Rx CMB. */
840 0 : for (i = 0; i < ALE_RX_PAGES; i++) {
841 0 : if (sc->ale_cdata.ale_rx_page[i].cmb_map != NULL)
842 0 : bus_dmamap_unload(sc->sc_dmat,
843 : sc->ale_cdata.ale_rx_page[i].cmb_map);
844 0 : if (sc->ale_cdata.ale_rx_page[i].cmb_map != NULL &&
845 0 : sc->ale_cdata.ale_rx_page[i].cmb_addr != NULL)
846 0 : bus_dmamem_free(sc->sc_dmat,
847 : (bus_dma_segment_t *)sc->ale_cdata.ale_rx_page[i].cmb_addr, 1);
848 0 : sc->ale_cdata.ale_rx_page[i].cmb_addr = NULL;
849 0 : sc->ale_cdata.ale_rx_page[i].cmb_map = NULL;
850 : }
851 :
852 : /* Tx CMB. */
853 0 : if (sc->ale_cdata.ale_tx_cmb_map != NULL)
854 0 : bus_dmamap_unload(sc->sc_dmat, sc->ale_cdata.ale_tx_cmb_map);
855 0 : if (sc->ale_cdata.ale_tx_cmb_map != NULL &&
856 0 : sc->ale_cdata.ale_tx_cmb != NULL)
857 0 : bus_dmamem_free(sc->sc_dmat,
858 : (bus_dma_segment_t *)sc->ale_cdata.ale_tx_cmb, 1);
859 0 : sc->ale_cdata.ale_tx_cmb = NULL;
860 0 : sc->ale_cdata.ale_tx_cmb_map = NULL;
861 :
862 0 : }
863 :
864 : int
865 0 : ale_encap(struct ale_softc *sc, struct mbuf *m)
866 : {
867 : struct ale_txdesc *txd, *txd_last;
868 : struct tx_desc *desc;
869 : bus_dmamap_t map;
870 : uint32_t cflags, poff, vtag;
871 : int error, i, prod;
872 :
873 : cflags = vtag = 0;
874 : poff = 0;
875 :
876 0 : prod = sc->ale_cdata.ale_tx_prod;
877 0 : txd = &sc->ale_cdata.ale_txdesc[prod];
878 : txd_last = txd;
879 0 : map = txd->tx_dmamap;
880 :
881 0 : error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m, BUS_DMA_NOWAIT);
882 0 : if (error != 0 && error != EFBIG)
883 : goto drop;
884 0 : if (error != 0) {
885 0 : if (m_defrag(m, M_DONTWAIT)) {
886 : error = ENOBUFS;
887 0 : goto drop;
888 : }
889 0 : error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m,
890 : BUS_DMA_NOWAIT);
891 0 : if (error != 0)
892 : goto drop;
893 : }
894 :
895 0 : bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
896 : BUS_DMASYNC_PREWRITE);
897 :
898 : /* Configure Tx checksum offload. */
899 0 : if ((m->m_pkthdr.csum_flags & ALE_CSUM_FEATURES) != 0) {
900 : /*
901 : * AR81xx supports Tx custom checksum offload feature
902 : * that offloads single 16bit checksum computation.
903 : * So you can choose one among IP, TCP and UDP.
904 : * Normally driver sets checksum start/insertion
905 : * position from the information of TCP/UDP frame as
906 : * TCP/UDP checksum takes more time than that of IP.
907 : * However it seems that custom checksum offload
908 : * requires 4 bytes aligned Tx buffers due to hardware
909 : * bug.
910 : * AR81xx also supports explicit Tx checksum computation
911 : * if it is told that the size of IP header and TCP
912 : * header(for UDP, the header size does not matter
913 : * because it's fixed length). However with this scheme
914 : * TSO does not work so you have to choose one either
915 : * TSO or explicit Tx checksum offload. I chosen TSO
916 : * plus custom checksum offload with work-around which
917 : * will cover most common usage for this consumer
918 : * ethernet controller. The work-around takes a lot of
919 : * CPU cycles if Tx buffer is not aligned on 4 bytes
920 : * boundary, though.
921 : */
922 : cflags |= ALE_TD_CXSUM;
923 : /* Set checksum start offset. */
924 : cflags |= (poff << ALE_TD_CSUM_PLOADOFFSET_SHIFT);
925 0 : }
926 :
927 : #if NVLAN > 0
928 : /* Configure VLAN hardware tag insertion. */
929 0 : if (m->m_flags & M_VLANTAG) {
930 0 : vtag = ALE_TX_VLAN_TAG(m->m_pkthdr.ether_vtag);
931 0 : vtag = ((vtag << ALE_TD_VLAN_SHIFT) & ALE_TD_VLAN_MASK);
932 0 : cflags |= ALE_TD_INSERT_VLAN_TAG;
933 0 : }
934 : #endif
935 :
936 : desc = NULL;
937 0 : for (i = 0; i < map->dm_nsegs; i++) {
938 0 : desc = &sc->ale_cdata.ale_tx_ring[prod];
939 0 : desc->addr = htole64(map->dm_segs[i].ds_addr);
940 0 : desc->len =
941 0 : htole32(ALE_TX_BYTES(map->dm_segs[i].ds_len) | vtag);
942 0 : desc->flags = htole32(cflags);
943 0 : sc->ale_cdata.ale_tx_cnt++;
944 0 : ALE_DESC_INC(prod, ALE_TX_RING_CNT);
945 : }
946 :
947 : /* Update producer index. */
948 0 : sc->ale_cdata.ale_tx_prod = prod;
949 :
950 : /* Finally set EOP on the last descriptor. */
951 0 : prod = (prod + ALE_TX_RING_CNT - 1) % ALE_TX_RING_CNT;
952 0 : desc = &sc->ale_cdata.ale_tx_ring[prod];
953 0 : desc->flags |= htole32(ALE_TD_EOP);
954 :
955 : /* Swap dmamap of the first and the last. */
956 0 : txd = &sc->ale_cdata.ale_txdesc[prod];
957 0 : map = txd_last->tx_dmamap;
958 0 : txd_last->tx_dmamap = txd->tx_dmamap;
959 0 : txd->tx_dmamap = map;
960 0 : txd->tx_m = m;
961 :
962 : /* Sync descriptors. */
963 0 : bus_dmamap_sync(sc->sc_dmat, sc->ale_cdata.ale_tx_ring_map, 0,
964 : sc->ale_cdata.ale_tx_ring_map->dm_mapsize,
965 : BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
966 :
967 0 : return (0);
968 :
969 : drop:
970 0 : m_freem(m);
971 0 : return (error);
972 0 : }
973 :
974 : void
975 0 : ale_start(struct ifnet *ifp)
976 : {
977 0 : struct ale_softc *sc = ifp->if_softc;
978 : struct mbuf *m;
979 : int enq;
980 :
981 : /* Reclaim transmitted frames. */
982 0 : if (sc->ale_cdata.ale_tx_cnt >= ALE_TX_DESC_HIWAT)
983 0 : ale_txeof(sc);
984 :
985 0 : if (!(ifp->if_flags & IFF_RUNNING) || ifq_is_oactive(&ifp->if_snd))
986 0 : return;
987 0 : if ((sc->ale_flags & ALE_FLAG_LINK) == 0)
988 0 : return;
989 0 : if (IFQ_IS_EMPTY(&ifp->if_snd))
990 0 : return;
991 :
992 : enq = 0;
993 0 : for (;;) {
994 : /* Check descriptor overrun. */
995 0 : if (sc->ale_cdata.ale_tx_cnt + ALE_MAXTXSEGS >=
996 : ALE_TX_RING_CNT - 2) {
997 0 : ifq_set_oactive(&ifp->if_snd);
998 0 : break;
999 : }
1000 :
1001 0 : IFQ_DEQUEUE(&ifp->if_snd, m);
1002 0 : if (m == NULL)
1003 : break;
1004 :
1005 : /*
1006 : * Pack the data into the transmit ring. If we
1007 : * don't have room, set the OACTIVE flag and wait
1008 : * for the NIC to drain the ring.
1009 : */
1010 0 : if (ale_encap(sc, m) != 0) {
1011 0 : ifp->if_oerrors++;
1012 0 : continue;
1013 : }
1014 :
1015 : enq = 1;
1016 :
1017 : #if NBPFILTER > 0
1018 : /*
1019 : * If there's a BPF listener, bounce a copy of this frame
1020 : * to him.
1021 : */
1022 0 : if (ifp->if_bpf != NULL)
1023 0 : bpf_mtap_ether(ifp->if_bpf, m, BPF_DIRECTION_OUT);
1024 : #endif
1025 : }
1026 :
1027 0 : if (enq) {
1028 : /* Kick. */
1029 0 : CSR_WRITE_4(sc, ALE_MBOX_TPD_PROD_IDX,
1030 : sc->ale_cdata.ale_tx_prod);
1031 :
1032 : /* Set a timeout in case the chip goes out to lunch. */
1033 0 : ifp->if_timer = ALE_TX_TIMEOUT;
1034 0 : }
1035 0 : }
1036 :
1037 : void
1038 0 : ale_watchdog(struct ifnet *ifp)
1039 : {
1040 0 : struct ale_softc *sc = ifp->if_softc;
1041 :
1042 0 : if ((sc->ale_flags & ALE_FLAG_LINK) == 0) {
1043 0 : printf("%s: watchdog timeout (missed link)\n",
1044 : sc->sc_dev.dv_xname);
1045 0 : ifp->if_oerrors++;
1046 0 : ale_init(ifp);
1047 0 : return;
1048 : }
1049 :
1050 0 : printf("%s: watchdog timeout\n", sc->sc_dev.dv_xname);
1051 0 : ifp->if_oerrors++;
1052 0 : ale_init(ifp);
1053 0 : ale_start(ifp);
1054 0 : }
1055 :
1056 : int
1057 0 : ale_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1058 : {
1059 0 : struct ale_softc *sc = ifp->if_softc;
1060 0 : struct mii_data *mii = &sc->sc_miibus;
1061 0 : struct ifreq *ifr = (struct ifreq *)data;
1062 : int s, error = 0;
1063 :
1064 0 : s = splnet();
1065 :
1066 0 : switch (cmd) {
1067 : case SIOCSIFADDR:
1068 0 : ifp->if_flags |= IFF_UP;
1069 0 : if (!(ifp->if_flags & IFF_RUNNING))
1070 0 : ale_init(ifp);
1071 : break;
1072 :
1073 : case SIOCSIFFLAGS:
1074 0 : if (ifp->if_flags & IFF_UP) {
1075 0 : if (ifp->if_flags & IFF_RUNNING)
1076 0 : error = ENETRESET;
1077 : else
1078 0 : ale_init(ifp);
1079 : } else {
1080 0 : if (ifp->if_flags & IFF_RUNNING)
1081 0 : ale_stop(sc);
1082 : }
1083 : break;
1084 :
1085 : case SIOCSIFMEDIA:
1086 : case SIOCGIFMEDIA:
1087 0 : error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
1088 0 : break;
1089 :
1090 : default:
1091 0 : error = ether_ioctl(ifp, &sc->sc_arpcom, cmd, data);
1092 0 : break;
1093 : }
1094 :
1095 0 : if (error == ENETRESET) {
1096 0 : if (ifp->if_flags & IFF_RUNNING)
1097 0 : ale_iff(sc);
1098 : error = 0;
1099 0 : }
1100 :
1101 0 : splx(s);
1102 0 : return (error);
1103 : }
1104 :
1105 : void
1106 0 : ale_mac_config(struct ale_softc *sc)
1107 : {
1108 : struct mii_data *mii;
1109 : uint32_t reg;
1110 :
1111 0 : mii = &sc->sc_miibus;
1112 0 : reg = CSR_READ_4(sc, ALE_MAC_CFG);
1113 0 : reg &= ~(MAC_CFG_FULL_DUPLEX | MAC_CFG_TX_FC | MAC_CFG_RX_FC |
1114 : MAC_CFG_SPEED_MASK);
1115 : /* Reprogram MAC with resolved speed/duplex. */
1116 0 : switch (IFM_SUBTYPE(mii->mii_media_active)) {
1117 : case IFM_10_T:
1118 : case IFM_100_TX:
1119 0 : reg |= MAC_CFG_SPEED_10_100;
1120 0 : break;
1121 : case IFM_1000_T:
1122 0 : reg |= MAC_CFG_SPEED_1000;
1123 0 : break;
1124 : }
1125 0 : if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) {
1126 0 : reg |= MAC_CFG_FULL_DUPLEX;
1127 0 : if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE) != 0)
1128 0 : reg |= MAC_CFG_TX_FC;
1129 0 : if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0)
1130 0 : reg |= MAC_CFG_RX_FC;
1131 : }
1132 0 : CSR_WRITE_4(sc, ALE_MAC_CFG, reg);
1133 0 : }
1134 :
1135 : void
1136 0 : ale_stats_clear(struct ale_softc *sc)
1137 : {
1138 0 : struct smb sb;
1139 : uint32_t *reg;
1140 : int i;
1141 :
1142 0 : for (reg = &sb.rx_frames, i = 0; reg <= &sb.rx_pkts_filtered; reg++) {
1143 0 : CSR_READ_4(sc, ALE_RX_MIB_BASE + i);
1144 0 : i += sizeof(uint32_t);
1145 : }
1146 : /* Read Tx statistics. */
1147 0 : for (reg = &sb.tx_frames, i = 0; reg <= &sb.tx_mcast_bytes; reg++) {
1148 0 : CSR_READ_4(sc, ALE_TX_MIB_BASE + i);
1149 0 : i += sizeof(uint32_t);
1150 : }
1151 0 : }
1152 :
1153 : void
1154 0 : ale_stats_update(struct ale_softc *sc)
1155 : {
1156 0 : struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1157 : struct ale_hw_stats *stat;
1158 0 : struct smb sb, *smb;
1159 : uint32_t *reg;
1160 : int i;
1161 :
1162 0 : stat = &sc->ale_stats;
1163 : smb = &sb;
1164 :
1165 : /* Read Rx statistics. */
1166 0 : for (reg = &sb.rx_frames, i = 0; reg <= &sb.rx_pkts_filtered; reg++) {
1167 0 : *reg = CSR_READ_4(sc, ALE_RX_MIB_BASE + i);
1168 0 : i += sizeof(uint32_t);
1169 : }
1170 : /* Read Tx statistics. */
1171 0 : for (reg = &sb.tx_frames, i = 0; reg <= &sb.tx_mcast_bytes; reg++) {
1172 0 : *reg = CSR_READ_4(sc, ALE_TX_MIB_BASE + i);
1173 0 : i += sizeof(uint32_t);
1174 : }
1175 :
1176 : /* Rx stats. */
1177 0 : stat->rx_frames += smb->rx_frames;
1178 0 : stat->rx_bcast_frames += smb->rx_bcast_frames;
1179 0 : stat->rx_mcast_frames += smb->rx_mcast_frames;
1180 0 : stat->rx_pause_frames += smb->rx_pause_frames;
1181 0 : stat->rx_control_frames += smb->rx_control_frames;
1182 0 : stat->rx_crcerrs += smb->rx_crcerrs;
1183 0 : stat->rx_lenerrs += smb->rx_lenerrs;
1184 0 : stat->rx_bytes += smb->rx_bytes;
1185 0 : stat->rx_runts += smb->rx_runts;
1186 0 : stat->rx_fragments += smb->rx_fragments;
1187 0 : stat->rx_pkts_64 += smb->rx_pkts_64;
1188 0 : stat->rx_pkts_65_127 += smb->rx_pkts_65_127;
1189 0 : stat->rx_pkts_128_255 += smb->rx_pkts_128_255;
1190 0 : stat->rx_pkts_256_511 += smb->rx_pkts_256_511;
1191 0 : stat->rx_pkts_512_1023 += smb->rx_pkts_512_1023;
1192 0 : stat->rx_pkts_1024_1518 += smb->rx_pkts_1024_1518;
1193 0 : stat->rx_pkts_1519_max += smb->rx_pkts_1519_max;
1194 0 : stat->rx_pkts_truncated += smb->rx_pkts_truncated;
1195 0 : stat->rx_fifo_oflows += smb->rx_fifo_oflows;
1196 0 : stat->rx_rrs_errs += smb->rx_rrs_errs;
1197 0 : stat->rx_alignerrs += smb->rx_alignerrs;
1198 0 : stat->rx_bcast_bytes += smb->rx_bcast_bytes;
1199 0 : stat->rx_mcast_bytes += smb->rx_mcast_bytes;
1200 0 : stat->rx_pkts_filtered += smb->rx_pkts_filtered;
1201 :
1202 : /* Tx stats. */
1203 0 : stat->tx_frames += smb->tx_frames;
1204 0 : stat->tx_bcast_frames += smb->tx_bcast_frames;
1205 0 : stat->tx_mcast_frames += smb->tx_mcast_frames;
1206 0 : stat->tx_pause_frames += smb->tx_pause_frames;
1207 0 : stat->tx_excess_defer += smb->tx_excess_defer;
1208 0 : stat->tx_control_frames += smb->tx_control_frames;
1209 0 : stat->tx_deferred += smb->tx_deferred;
1210 0 : stat->tx_bytes += smb->tx_bytes;
1211 0 : stat->tx_pkts_64 += smb->tx_pkts_64;
1212 0 : stat->tx_pkts_65_127 += smb->tx_pkts_65_127;
1213 0 : stat->tx_pkts_128_255 += smb->tx_pkts_128_255;
1214 0 : stat->tx_pkts_256_511 += smb->tx_pkts_256_511;
1215 0 : stat->tx_pkts_512_1023 += smb->tx_pkts_512_1023;
1216 0 : stat->tx_pkts_1024_1518 += smb->tx_pkts_1024_1518;
1217 0 : stat->tx_pkts_1519_max += smb->tx_pkts_1519_max;
1218 0 : stat->tx_single_colls += smb->tx_single_colls;
1219 0 : stat->tx_multi_colls += smb->tx_multi_colls;
1220 0 : stat->tx_late_colls += smb->tx_late_colls;
1221 0 : stat->tx_excess_colls += smb->tx_excess_colls;
1222 0 : stat->tx_underrun += smb->tx_underrun;
1223 0 : stat->tx_desc_underrun += smb->tx_desc_underrun;
1224 0 : stat->tx_lenerrs += smb->tx_lenerrs;
1225 0 : stat->tx_pkts_truncated += smb->tx_pkts_truncated;
1226 0 : stat->tx_bcast_bytes += smb->tx_bcast_bytes;
1227 0 : stat->tx_mcast_bytes += smb->tx_mcast_bytes;
1228 :
1229 0 : ifp->if_collisions += smb->tx_single_colls +
1230 0 : smb->tx_multi_colls * 2 + smb->tx_late_colls +
1231 0 : smb->tx_excess_colls * HDPX_CFG_RETRY_DEFAULT;
1232 :
1233 0 : ifp->if_oerrors += smb->tx_late_colls + smb->tx_excess_colls +
1234 0 : smb->tx_underrun + smb->tx_pkts_truncated;
1235 :
1236 0 : ifp->if_ierrors += smb->rx_crcerrs + smb->rx_lenerrs +
1237 0 : smb->rx_runts + smb->rx_pkts_truncated +
1238 0 : smb->rx_fifo_oflows + smb->rx_rrs_errs +
1239 0 : smb->rx_alignerrs;
1240 0 : }
1241 :
1242 : int
1243 0 : ale_intr(void *xsc)
1244 : {
1245 0 : struct ale_softc *sc = xsc;
1246 0 : struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1247 : uint32_t status;
1248 :
1249 0 : status = CSR_READ_4(sc, ALE_INTR_STATUS);
1250 0 : if ((status & ALE_INTRS) == 0)
1251 0 : return (0);
1252 :
1253 : /* Acknowledge and disable interrupts. */
1254 0 : CSR_WRITE_4(sc, ALE_INTR_STATUS, status | INTR_DIS_INT);
1255 :
1256 0 : if (ifp->if_flags & IFF_RUNNING) {
1257 : int error;
1258 :
1259 0 : error = ale_rxeof(sc);
1260 0 : if (error) {
1261 0 : sc->ale_stats.reset_brk_seq++;
1262 0 : ale_init(ifp);
1263 0 : return (0);
1264 : }
1265 :
1266 0 : if (status & (INTR_DMA_RD_TO_RST | INTR_DMA_WR_TO_RST)) {
1267 0 : if (status & INTR_DMA_RD_TO_RST)
1268 0 : printf("%s: DMA read error! -- resetting\n",
1269 0 : sc->sc_dev.dv_xname);
1270 0 : if (status & INTR_DMA_WR_TO_RST)
1271 0 : printf("%s: DMA write error! -- resetting\n",
1272 0 : sc->sc_dev.dv_xname);
1273 0 : ale_init(ifp);
1274 0 : return (0);
1275 : }
1276 :
1277 0 : ale_txeof(sc);
1278 0 : ale_start(ifp);
1279 0 : }
1280 :
1281 : /* Re-enable interrupts. */
1282 0 : CSR_WRITE_4(sc, ALE_INTR_STATUS, 0x7FFFFFFF);
1283 0 : return (1);
1284 0 : }
1285 :
1286 : void
1287 0 : ale_txeof(struct ale_softc *sc)
1288 : {
1289 0 : struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1290 : struct ale_txdesc *txd;
1291 : uint32_t cons, prod;
1292 : int prog;
1293 :
1294 0 : if (sc->ale_cdata.ale_tx_cnt == 0)
1295 0 : return;
1296 :
1297 0 : bus_dmamap_sync(sc->sc_dmat, sc->ale_cdata.ale_tx_ring_map, 0,
1298 : sc->ale_cdata.ale_tx_ring_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1299 0 : if ((sc->ale_flags & ALE_FLAG_TXCMB_BUG) == 0) {
1300 0 : bus_dmamap_sync(sc->sc_dmat, sc->ale_cdata.ale_tx_cmb_map, 0,
1301 : sc->ale_cdata.ale_tx_cmb_map->dm_mapsize,
1302 : BUS_DMASYNC_POSTREAD);
1303 0 : prod = *sc->ale_cdata.ale_tx_cmb & TPD_CNT_MASK;
1304 0 : } else
1305 0 : prod = CSR_READ_2(sc, ALE_TPD_CONS_IDX);
1306 0 : cons = sc->ale_cdata.ale_tx_cons;
1307 : /*
1308 : * Go through our Tx list and free mbufs for those
1309 : * frames which have been transmitted.
1310 : */
1311 0 : for (prog = 0; cons != prod; prog++,
1312 0 : ALE_DESC_INC(cons, ALE_TX_RING_CNT)) {
1313 0 : if (sc->ale_cdata.ale_tx_cnt <= 0)
1314 : break;
1315 0 : prog++;
1316 0 : ifq_clr_oactive(&ifp->if_snd);
1317 0 : sc->ale_cdata.ale_tx_cnt--;
1318 0 : txd = &sc->ale_cdata.ale_txdesc[cons];
1319 0 : if (txd->tx_m != NULL) {
1320 : /* Reclaim transmitted mbufs. */
1321 0 : bus_dmamap_sync(sc->sc_dmat, txd->tx_dmamap, 0,
1322 : txd->tx_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1323 0 : bus_dmamap_unload(sc->sc_dmat, txd->tx_dmamap);
1324 0 : m_freem(txd->tx_m);
1325 0 : txd->tx_m = NULL;
1326 0 : }
1327 : }
1328 :
1329 0 : if (prog > 0) {
1330 0 : sc->ale_cdata.ale_tx_cons = cons;
1331 : /*
1332 : * Unarm watchdog timer only when there is no pending
1333 : * Tx descriptors in queue.
1334 : */
1335 0 : if (sc->ale_cdata.ale_tx_cnt == 0)
1336 0 : ifp->if_timer = 0;
1337 : }
1338 0 : }
1339 :
1340 : void
1341 0 : ale_rx_update_page(struct ale_softc *sc, struct ale_rx_page **page,
1342 : uint32_t length, uint32_t *prod)
1343 : {
1344 : struct ale_rx_page *rx_page;
1345 :
1346 0 : rx_page = *page;
1347 : /* Update consumer position. */
1348 0 : rx_page->cons += roundup(length + sizeof(struct rx_rs),
1349 : ALE_RX_PAGE_ALIGN);
1350 0 : if (rx_page->cons >= ALE_RX_PAGE_SZ) {
1351 : /*
1352 : * End of Rx page reached, let hardware reuse
1353 : * this page.
1354 : */
1355 0 : rx_page->cons = 0;
1356 0 : *rx_page->cmb_addr = 0;
1357 0 : bus_dmamap_sync(sc->sc_dmat, rx_page->cmb_map, 0,
1358 : rx_page->cmb_map->dm_mapsize,
1359 : BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1360 0 : CSR_WRITE_1(sc, ALE_RXF0_PAGE0 + sc->ale_cdata.ale_rx_curp,
1361 : RXF_VALID);
1362 : /* Switch to alternate Rx page. */
1363 0 : sc->ale_cdata.ale_rx_curp ^= 1;
1364 0 : rx_page = *page =
1365 0 : &sc->ale_cdata.ale_rx_page[sc->ale_cdata.ale_rx_curp];
1366 : /* Page flipped, sync CMB and Rx page. */
1367 0 : bus_dmamap_sync(sc->sc_dmat, rx_page->page_map, 0,
1368 : rx_page->page_map->dm_mapsize,
1369 : BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1370 0 : bus_dmamap_sync(sc->sc_dmat, rx_page->cmb_map, 0,
1371 : rx_page->cmb_map->dm_mapsize,
1372 : BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1373 : /* Sync completed, cache updated producer index. */
1374 0 : *prod = *rx_page->cmb_addr;
1375 0 : }
1376 0 : }
1377 :
1378 :
1379 : /*
1380 : * It seems that AR81xx controller can compute partial checksum.
1381 : * The partial checksum value can be used to accelerate checksum
1382 : * computation for fragmented TCP/UDP packets. Upper network stack
1383 : * already takes advantage of the partial checksum value in IP
1384 : * reassembly stage. But I'm not sure the correctness of the
1385 : * partial hardware checksum assistance due to lack of data sheet.
1386 : * In addition, the Rx feature of controller that requires copying
1387 : * for every frames effectively nullifies one of most nice offload
1388 : * capability of controller.
1389 : */
1390 : void
1391 0 : ale_rxcsum(struct ale_softc *sc, struct mbuf *m, uint32_t status)
1392 : {
1393 : struct ip *ip;
1394 : char *p;
1395 :
1396 0 : if ((status & ALE_RD_IPCSUM_NOK) == 0)
1397 0 : m->m_pkthdr.csum_flags |= M_IPV4_CSUM_IN_OK;
1398 :
1399 0 : if ((sc->ale_flags & ALE_FLAG_RXCSUM_BUG) == 0) {
1400 0 : if (((status & ALE_RD_IPV4_FRAG) == 0) &&
1401 0 : ((status & (ALE_RD_TCP | ALE_RD_UDP)) != 0) &&
1402 0 : ((status & ALE_RD_TCP_UDPCSUM_NOK) == 0)) {
1403 0 : m->m_pkthdr.csum_flags |=
1404 : M_TCP_CSUM_IN_OK | M_UDP_CSUM_IN_OK;
1405 0 : }
1406 : } else {
1407 0 : if ((status & (ALE_RD_TCP | ALE_RD_UDP)) != 0 &&
1408 0 : (status & ALE_RD_TCP_UDPCSUM_NOK) == 0) {
1409 0 : p = mtod(m, char *);
1410 0 : p += ETHER_HDR_LEN;
1411 0 : if ((status & ALE_RD_802_3) != 0)
1412 0 : p += LLC_SNAPFRAMELEN;
1413 : #if NVLAN > 0
1414 0 : if (status & ALE_RD_VLAN)
1415 0 : p += EVL_ENCAPLEN;
1416 : #endif
1417 0 : ip = (struct ip *)p;
1418 0 : if (ip->ip_off != 0 && (status & ALE_RD_IPV4_DF) == 0)
1419 0 : return;
1420 0 : m->m_pkthdr.csum_flags |=
1421 : M_TCP_CSUM_IN_OK | M_UDP_CSUM_IN_OK;
1422 0 : }
1423 : }
1424 : /*
1425 : * Don't mark bad checksum for TCP/UDP frames
1426 : * as fragmented frames may always have set
1427 : * bad checksummed bit of frame status.
1428 : */
1429 0 : }
1430 :
1431 : /* Process received frames. */
1432 : int
1433 0 : ale_rxeof(struct ale_softc *sc)
1434 : {
1435 0 : struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1436 0 : struct mbuf_list ml = MBUF_LIST_INITIALIZER();
1437 0 : struct ale_rx_page *rx_page;
1438 : struct rx_rs *rs;
1439 : struct mbuf *m;
1440 0 : uint32_t length, prod, seqno, status;
1441 : int prog;
1442 :
1443 0 : rx_page = &sc->ale_cdata.ale_rx_page[sc->ale_cdata.ale_rx_curp];
1444 0 : bus_dmamap_sync(sc->sc_dmat, rx_page->cmb_map, 0,
1445 : rx_page->cmb_map->dm_mapsize,
1446 : BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1447 0 : bus_dmamap_sync(sc->sc_dmat, rx_page->page_map, 0,
1448 : rx_page->page_map->dm_mapsize,
1449 : BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1450 : /*
1451 : * Don't directly access producer index as hardware may
1452 : * update it while Rx handler is in progress. It would
1453 : * be even better if there is a way to let hardware
1454 : * know how far driver processed its received frames.
1455 : * Alternatively, hardware could provide a way to disable
1456 : * CMB updates until driver acknowledges the end of CMB
1457 : * access.
1458 : */
1459 0 : prod = *rx_page->cmb_addr;
1460 0 : for (prog = 0; ; prog++) {
1461 0 : if (rx_page->cons >= prod)
1462 : break;
1463 0 : rs = (struct rx_rs *)(rx_page->page_addr + rx_page->cons);
1464 0 : seqno = ALE_RX_SEQNO(letoh32(rs->seqno));
1465 0 : if (sc->ale_cdata.ale_rx_seqno != seqno) {
1466 : /*
1467 : * Normally I believe this should not happen unless
1468 : * severe driver bug or corrupted memory. However
1469 : * it seems to happen under certain conditions which
1470 : * is triggered by abrupt Rx events such as initiation
1471 : * of bulk transfer of remote host. It's not easy to
1472 : * reproduce this and I doubt it could be related
1473 : * with FIFO overflow of hardware or activity of Tx
1474 : * CMB updates. I also remember similar behaviour
1475 : * seen on Realtek 8139 which uses resembling Rx
1476 : * scheme.
1477 : */
1478 0 : if (aledebug)
1479 0 : printf("%s: garbled seq: %u, expected: %u -- "
1480 0 : "resetting!\n", sc->sc_dev.dv_xname,
1481 : seqno, sc->ale_cdata.ale_rx_seqno);
1482 0 : return (EIO);
1483 : }
1484 : /* Frame received. */
1485 0 : sc->ale_cdata.ale_rx_seqno++;
1486 0 : length = ALE_RX_BYTES(letoh32(rs->length));
1487 0 : status = letoh32(rs->flags);
1488 0 : if (status & ALE_RD_ERROR) {
1489 : /*
1490 : * We want to pass the following frames to upper
1491 : * layer regardless of error status of Rx return
1492 : * status.
1493 : *
1494 : * o IP/TCP/UDP checksum is bad.
1495 : * o frame length and protocol specific length
1496 : * does not match.
1497 : */
1498 0 : if (status & (ALE_RD_CRC | ALE_RD_CODE |
1499 : ALE_RD_DRIBBLE | ALE_RD_RUNT | ALE_RD_OFLOW |
1500 : ALE_RD_TRUNC)) {
1501 0 : ale_rx_update_page(sc, &rx_page, length, &prod);
1502 0 : continue;
1503 : }
1504 : }
1505 : /*
1506 : * m_devget(9) is major bottle-neck of ale(4)(It comes
1507 : * from hardware limitation). For jumbo frames we could
1508 : * get a slightly better performance if driver use
1509 : * m_getjcl(9) with proper buffer size argument. However
1510 : * that would make code more complicated and I don't
1511 : * think users would expect good Rx performance numbers
1512 : * on these low-end consumer ethernet controller.
1513 : */
1514 0 : m = m_devget((char *)(rs + 1), length - ETHER_CRC_LEN,
1515 : ETHER_ALIGN);
1516 0 : if (m == NULL) {
1517 0 : ifp->if_iqdrops++;
1518 0 : ale_rx_update_page(sc, &rx_page, length, &prod);
1519 0 : continue;
1520 : }
1521 0 : if (status & ALE_RD_IPV4)
1522 0 : ale_rxcsum(sc, m, status);
1523 : #if NVLAN > 0
1524 0 : if (status & ALE_RD_VLAN) {
1525 0 : uint32_t vtags = ALE_RX_VLAN(letoh32(rs->vtags));
1526 0 : m->m_pkthdr.ether_vtag = ALE_RX_VLAN_TAG(vtags);
1527 0 : m->m_flags |= M_VLANTAG;
1528 0 : }
1529 : #endif
1530 :
1531 0 : ml_enqueue(&ml, m);
1532 :
1533 0 : ale_rx_update_page(sc, &rx_page, length, &prod);
1534 0 : }
1535 :
1536 0 : if_input(ifp, &ml);
1537 :
1538 0 : return 0;
1539 0 : }
1540 :
1541 : void
1542 0 : ale_tick(void *xsc)
1543 : {
1544 0 : struct ale_softc *sc = xsc;
1545 0 : struct mii_data *mii = &sc->sc_miibus;
1546 : int s;
1547 :
1548 0 : s = splnet();
1549 0 : mii_tick(mii);
1550 0 : ale_stats_update(sc);
1551 :
1552 0 : timeout_add_sec(&sc->ale_tick_ch, 1);
1553 0 : splx(s);
1554 0 : }
1555 :
1556 : void
1557 0 : ale_reset(struct ale_softc *sc)
1558 : {
1559 : uint32_t reg;
1560 : int i;
1561 :
1562 : /* Initialize PCIe module. From Linux. */
1563 0 : CSR_WRITE_4(sc, 0x1008, CSR_READ_4(sc, 0x1008) | 0x8000);
1564 :
1565 0 : CSR_WRITE_4(sc, ALE_MASTER_CFG, MASTER_RESET);
1566 0 : for (i = ALE_RESET_TIMEOUT; i > 0; i--) {
1567 0 : DELAY(10);
1568 0 : if ((CSR_READ_4(sc, ALE_MASTER_CFG) & MASTER_RESET) == 0)
1569 : break;
1570 : }
1571 0 : if (i == 0)
1572 0 : printf("%s: master reset timeout!\n", sc->sc_dev.dv_xname);
1573 :
1574 0 : for (i = ALE_RESET_TIMEOUT; i > 0; i--) {
1575 0 : if ((reg = CSR_READ_4(sc, ALE_IDLE_STATUS)) == 0)
1576 : break;
1577 0 : DELAY(10);
1578 : }
1579 :
1580 0 : if (i == 0)
1581 0 : printf("%s: reset timeout(0x%08x)!\n", sc->sc_dev.dv_xname,
1582 : reg);
1583 0 : }
1584 :
1585 : int
1586 0 : ale_init(struct ifnet *ifp)
1587 : {
1588 0 : struct ale_softc *sc = ifp->if_softc;
1589 : struct mii_data *mii;
1590 0 : uint8_t eaddr[ETHER_ADDR_LEN];
1591 : bus_addr_t paddr;
1592 : uint32_t reg, rxf_hi, rxf_lo;
1593 :
1594 : /*
1595 : * Cancel any pending I/O.
1596 : */
1597 0 : ale_stop(sc);
1598 :
1599 : /*
1600 : * Reset the chip to a known state.
1601 : */
1602 0 : ale_reset(sc);
1603 :
1604 : /* Initialize Tx descriptors, DMA memory blocks. */
1605 0 : ale_init_rx_pages(sc);
1606 0 : ale_init_tx_ring(sc);
1607 :
1608 : /* Reprogram the station address. */
1609 0 : bcopy(LLADDR(ifp->if_sadl), eaddr, ETHER_ADDR_LEN);
1610 0 : CSR_WRITE_4(sc, ALE_PAR0,
1611 : eaddr[2] << 24 | eaddr[3] << 16 | eaddr[4] << 8 | eaddr[5]);
1612 0 : CSR_WRITE_4(sc, ALE_PAR1, eaddr[0] << 8 | eaddr[1]);
1613 :
1614 : /*
1615 : * Clear WOL status and disable all WOL feature as WOL
1616 : * would interfere Rx operation under normal environments.
1617 : */
1618 0 : CSR_READ_4(sc, ALE_WOL_CFG);
1619 0 : CSR_WRITE_4(sc, ALE_WOL_CFG, 0);
1620 :
1621 : /*
1622 : * Set Tx descriptor/RXF0/CMB base addresses. They share
1623 : * the same high address part of DMAable region.
1624 : */
1625 0 : paddr = sc->ale_cdata.ale_tx_ring_paddr;
1626 0 : CSR_WRITE_4(sc, ALE_TPD_ADDR_HI, ALE_ADDR_HI(paddr));
1627 0 : CSR_WRITE_4(sc, ALE_TPD_ADDR_LO, ALE_ADDR_LO(paddr));
1628 0 : CSR_WRITE_4(sc, ALE_TPD_CNT,
1629 : (ALE_TX_RING_CNT << TPD_CNT_SHIFT) & TPD_CNT_MASK);
1630 :
1631 : /* Set Rx page base address, note we use single queue. */
1632 0 : paddr = sc->ale_cdata.ale_rx_page[0].page_paddr;
1633 0 : CSR_WRITE_4(sc, ALE_RXF0_PAGE0_ADDR_LO, ALE_ADDR_LO(paddr));
1634 0 : paddr = sc->ale_cdata.ale_rx_page[1].page_paddr;
1635 0 : CSR_WRITE_4(sc, ALE_RXF0_PAGE1_ADDR_LO, ALE_ADDR_LO(paddr));
1636 :
1637 : /* Set Tx/Rx CMB addresses. */
1638 0 : paddr = sc->ale_cdata.ale_tx_cmb_paddr;
1639 0 : CSR_WRITE_4(sc, ALE_TX_CMB_ADDR_LO, ALE_ADDR_LO(paddr));
1640 0 : paddr = sc->ale_cdata.ale_rx_page[0].cmb_paddr;
1641 0 : CSR_WRITE_4(sc, ALE_RXF0_CMB0_ADDR_LO, ALE_ADDR_LO(paddr));
1642 0 : paddr = sc->ale_cdata.ale_rx_page[1].cmb_paddr;
1643 0 : CSR_WRITE_4(sc, ALE_RXF0_CMB1_ADDR_LO, ALE_ADDR_LO(paddr));
1644 :
1645 : /* Mark RXF0 is valid. */
1646 0 : CSR_WRITE_1(sc, ALE_RXF0_PAGE0, RXF_VALID);
1647 0 : CSR_WRITE_1(sc, ALE_RXF0_PAGE1, RXF_VALID);
1648 : /*
1649 : * No need to initialize RFX1/RXF2/RXF3. We don't use
1650 : * multi-queue yet.
1651 : */
1652 :
1653 : /* Set Rx page size, excluding guard frame size. */
1654 0 : CSR_WRITE_4(sc, ALE_RXF_PAGE_SIZE, ALE_RX_PAGE_SZ);
1655 :
1656 : /* Tell hardware that we're ready to load DMA blocks. */
1657 0 : CSR_WRITE_4(sc, ALE_DMA_BLOCK, DMA_BLOCK_LOAD);
1658 :
1659 : /* Set Rx/Tx interrupt trigger threshold. */
1660 0 : CSR_WRITE_4(sc, ALE_INT_TRIG_THRESH, (1 << INT_TRIG_RX_THRESH_SHIFT) |
1661 : (4 << INT_TRIG_TX_THRESH_SHIFT));
1662 : /*
1663 : * XXX
1664 : * Set interrupt trigger timer, its purpose and relation
1665 : * with interrupt moderation mechanism is not clear yet.
1666 : */
1667 0 : CSR_WRITE_4(sc, ALE_INT_TRIG_TIMER,
1668 : ((ALE_USECS(10) << INT_TRIG_RX_TIMER_SHIFT) |
1669 : (ALE_USECS(1000) << INT_TRIG_TX_TIMER_SHIFT)));
1670 :
1671 : /* Configure interrupt moderation timer. */
1672 0 : sc->ale_int_rx_mod = ALE_IM_RX_TIMER_DEFAULT;
1673 0 : sc->ale_int_tx_mod = ALE_IM_TX_TIMER_DEFAULT;
1674 0 : reg = ALE_USECS(sc->ale_int_rx_mod) << IM_TIMER_RX_SHIFT;
1675 0 : reg |= ALE_USECS(sc->ale_int_tx_mod) << IM_TIMER_TX_SHIFT;
1676 0 : CSR_WRITE_4(sc, ALE_IM_TIMER, reg);
1677 0 : reg = CSR_READ_4(sc, ALE_MASTER_CFG);
1678 0 : reg &= ~(MASTER_CHIP_REV_MASK | MASTER_CHIP_ID_MASK);
1679 0 : reg &= ~(MASTER_IM_RX_TIMER_ENB | MASTER_IM_TX_TIMER_ENB);
1680 0 : if (ALE_USECS(sc->ale_int_rx_mod) != 0)
1681 0 : reg |= MASTER_IM_RX_TIMER_ENB;
1682 0 : if (ALE_USECS(sc->ale_int_tx_mod) != 0)
1683 0 : reg |= MASTER_IM_TX_TIMER_ENB;
1684 0 : CSR_WRITE_4(sc, ALE_MASTER_CFG, reg);
1685 0 : CSR_WRITE_2(sc, ALE_INTR_CLR_TIMER, ALE_USECS(1000));
1686 :
1687 : /* Set Maximum frame size of controller. */
1688 0 : if (ifp->if_mtu < ETHERMTU)
1689 0 : sc->ale_max_frame_size = ETHERMTU;
1690 : else
1691 0 : sc->ale_max_frame_size = ifp->if_mtu;
1692 0 : sc->ale_max_frame_size += ETHER_HDR_LEN + EVL_ENCAPLEN + ETHER_CRC_LEN;
1693 0 : CSR_WRITE_4(sc, ALE_FRAME_SIZE, sc->ale_max_frame_size);
1694 :
1695 : /* Configure IPG/IFG parameters. */
1696 0 : CSR_WRITE_4(sc, ALE_IPG_IFG_CFG,
1697 : ((IPG_IFG_IPGT_DEFAULT << IPG_IFG_IPGT_SHIFT) & IPG_IFG_IPGT_MASK) |
1698 : ((IPG_IFG_MIFG_DEFAULT << IPG_IFG_MIFG_SHIFT) & IPG_IFG_MIFG_MASK) |
1699 : ((IPG_IFG_IPG1_DEFAULT << IPG_IFG_IPG1_SHIFT) & IPG_IFG_IPG1_MASK) |
1700 : ((IPG_IFG_IPG2_DEFAULT << IPG_IFG_IPG2_SHIFT) & IPG_IFG_IPG2_MASK));
1701 :
1702 : /* Set parameters for half-duplex media. */
1703 0 : CSR_WRITE_4(sc, ALE_HDPX_CFG,
1704 : ((HDPX_CFG_LCOL_DEFAULT << HDPX_CFG_LCOL_SHIFT) &
1705 : HDPX_CFG_LCOL_MASK) |
1706 : ((HDPX_CFG_RETRY_DEFAULT << HDPX_CFG_RETRY_SHIFT) &
1707 : HDPX_CFG_RETRY_MASK) | HDPX_CFG_EXC_DEF_EN |
1708 : ((HDPX_CFG_ABEBT_DEFAULT << HDPX_CFG_ABEBT_SHIFT) &
1709 : HDPX_CFG_ABEBT_MASK) |
1710 : ((HDPX_CFG_JAMIPG_DEFAULT << HDPX_CFG_JAMIPG_SHIFT) &
1711 : HDPX_CFG_JAMIPG_MASK));
1712 :
1713 : /* Configure Tx jumbo frame parameters. */
1714 0 : if ((sc->ale_flags & ALE_FLAG_JUMBO) != 0) {
1715 0 : if (ifp->if_mtu < ETHERMTU)
1716 0 : reg = sc->ale_max_frame_size;
1717 0 : else if (ifp->if_mtu < 6 * 1024)
1718 0 : reg = (sc->ale_max_frame_size * 2) / 3;
1719 : else
1720 0 : reg = sc->ale_max_frame_size / 2;
1721 0 : CSR_WRITE_4(sc, ALE_TX_JUMBO_THRESH,
1722 : roundup(reg, TX_JUMBO_THRESH_UNIT) >>
1723 : TX_JUMBO_THRESH_UNIT_SHIFT);
1724 0 : }
1725 :
1726 : /* Configure TxQ. */
1727 0 : reg = (128 << (sc->ale_dma_rd_burst >> DMA_CFG_RD_BURST_SHIFT))
1728 0 : << TXQ_CFG_TX_FIFO_BURST_SHIFT;
1729 0 : reg |= (TXQ_CFG_TPD_BURST_DEFAULT << TXQ_CFG_TPD_BURST_SHIFT) &
1730 : TXQ_CFG_TPD_BURST_MASK;
1731 0 : CSR_WRITE_4(sc, ALE_TXQ_CFG, reg | TXQ_CFG_ENHANCED_MODE | TXQ_CFG_ENB);
1732 :
1733 : /* Configure Rx jumbo frame & flow control parameters. */
1734 0 : if ((sc->ale_flags & ALE_FLAG_JUMBO) != 0) {
1735 0 : reg = roundup(sc->ale_max_frame_size, RX_JUMBO_THRESH_UNIT);
1736 0 : CSR_WRITE_4(sc, ALE_RX_JUMBO_THRESH,
1737 : (((reg >> RX_JUMBO_THRESH_UNIT_SHIFT) <<
1738 : RX_JUMBO_THRESH_MASK_SHIFT) & RX_JUMBO_THRESH_MASK) |
1739 : ((RX_JUMBO_LKAH_DEFAULT << RX_JUMBO_LKAH_SHIFT) &
1740 : RX_JUMBO_LKAH_MASK));
1741 0 : reg = CSR_READ_4(sc, ALE_SRAM_RX_FIFO_LEN);
1742 0 : rxf_hi = (reg * 7) / 10;
1743 0 : rxf_lo = (reg * 3)/ 10;
1744 0 : CSR_WRITE_4(sc, ALE_RX_FIFO_PAUSE_THRESH,
1745 : ((rxf_lo << RX_FIFO_PAUSE_THRESH_LO_SHIFT) &
1746 : RX_FIFO_PAUSE_THRESH_LO_MASK) |
1747 : ((rxf_hi << RX_FIFO_PAUSE_THRESH_HI_SHIFT) &
1748 : RX_FIFO_PAUSE_THRESH_HI_MASK));
1749 0 : }
1750 :
1751 : /* Disable RSS. */
1752 0 : CSR_WRITE_4(sc, ALE_RSS_IDT_TABLE0, 0);
1753 0 : CSR_WRITE_4(sc, ALE_RSS_CPU, 0);
1754 :
1755 : /* Configure RxQ. */
1756 0 : CSR_WRITE_4(sc, ALE_RXQ_CFG,
1757 : RXQ_CFG_ALIGN_32 | RXQ_CFG_CUT_THROUGH_ENB | RXQ_CFG_ENB);
1758 :
1759 : /* Configure DMA parameters. */
1760 : reg = 0;
1761 0 : if ((sc->ale_flags & ALE_FLAG_TXCMB_BUG) == 0)
1762 0 : reg |= DMA_CFG_TXCMB_ENB;
1763 0 : CSR_WRITE_4(sc, ALE_DMA_CFG,
1764 : DMA_CFG_OUT_ORDER | DMA_CFG_RD_REQ_PRI | DMA_CFG_RCB_64 |
1765 : sc->ale_dma_rd_burst | reg |
1766 : sc->ale_dma_wr_burst | DMA_CFG_RXCMB_ENB |
1767 : ((DMA_CFG_RD_DELAY_CNT_DEFAULT << DMA_CFG_RD_DELAY_CNT_SHIFT) &
1768 : DMA_CFG_RD_DELAY_CNT_MASK) |
1769 : ((DMA_CFG_WR_DELAY_CNT_DEFAULT << DMA_CFG_WR_DELAY_CNT_SHIFT) &
1770 : DMA_CFG_WR_DELAY_CNT_MASK));
1771 :
1772 : /*
1773 : * Hardware can be configured to issue SMB interrupt based
1774 : * on programmed interval. Since there is a callout that is
1775 : * invoked for every hz in driver we use that instead of
1776 : * relying on periodic SMB interrupt.
1777 : */
1778 0 : CSR_WRITE_4(sc, ALE_SMB_STAT_TIMER, ALE_USECS(0));
1779 :
1780 : /* Clear MAC statistics. */
1781 0 : ale_stats_clear(sc);
1782 :
1783 : /*
1784 : * Configure Tx/Rx MACs.
1785 : * - Auto-padding for short frames.
1786 : * - Enable CRC generation.
1787 : * Actual reconfiguration of MAC for resolved speed/duplex
1788 : * is followed after detection of link establishment.
1789 : * AR81xx always does checksum computation regardless of
1790 : * MAC_CFG_RXCSUM_ENB bit. In fact, setting the bit will
1791 : * cause Rx handling issue for fragmented IP datagrams due
1792 : * to silicon bug.
1793 : */
1794 : reg = MAC_CFG_TX_CRC_ENB | MAC_CFG_TX_AUTO_PAD | MAC_CFG_FULL_DUPLEX |
1795 : ((MAC_CFG_PREAMBLE_DEFAULT << MAC_CFG_PREAMBLE_SHIFT) &
1796 : MAC_CFG_PREAMBLE_MASK);
1797 0 : if ((sc->ale_flags & ALE_FLAG_FASTETHER) != 0)
1798 0 : reg |= MAC_CFG_SPEED_10_100;
1799 : else
1800 : reg |= MAC_CFG_SPEED_1000;
1801 0 : CSR_WRITE_4(sc, ALE_MAC_CFG, reg);
1802 :
1803 : /* Set up the receive filter. */
1804 0 : ale_iff(sc);
1805 :
1806 0 : ale_rxvlan(sc);
1807 :
1808 : /* Acknowledge all pending interrupts and clear it. */
1809 0 : CSR_WRITE_4(sc, ALE_INTR_MASK, ALE_INTRS);
1810 0 : CSR_WRITE_4(sc, ALE_INTR_STATUS, 0xFFFFFFFF);
1811 0 : CSR_WRITE_4(sc, ALE_INTR_STATUS, 0);
1812 :
1813 0 : sc->ale_flags &= ~ALE_FLAG_LINK;
1814 :
1815 : /* Switch to the current media. */
1816 0 : mii = &sc->sc_miibus;
1817 0 : mii_mediachg(mii);
1818 :
1819 0 : timeout_add_sec(&sc->ale_tick_ch, 1);
1820 :
1821 0 : ifp->if_flags |= IFF_RUNNING;
1822 0 : ifq_clr_oactive(&ifp->if_snd);
1823 :
1824 0 : return 0;
1825 0 : }
1826 :
1827 : void
1828 0 : ale_stop(struct ale_softc *sc)
1829 : {
1830 0 : struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1831 : struct ale_txdesc *txd;
1832 : uint32_t reg;
1833 : int i;
1834 :
1835 : /*
1836 : * Mark the interface down and cancel the watchdog timer.
1837 : */
1838 0 : ifp->if_flags &= ~IFF_RUNNING;
1839 0 : ifq_clr_oactive(&ifp->if_snd);
1840 0 : ifp->if_timer = 0;
1841 :
1842 0 : timeout_del(&sc->ale_tick_ch);
1843 0 : sc->ale_flags &= ~ALE_FLAG_LINK;
1844 :
1845 0 : ale_stats_update(sc);
1846 :
1847 : /* Disable interrupts. */
1848 0 : CSR_WRITE_4(sc, ALE_INTR_MASK, 0);
1849 0 : CSR_WRITE_4(sc, ALE_INTR_STATUS, 0xFFFFFFFF);
1850 :
1851 : /* Disable queue processing and DMA. */
1852 0 : reg = CSR_READ_4(sc, ALE_TXQ_CFG);
1853 0 : reg &= ~TXQ_CFG_ENB;
1854 0 : CSR_WRITE_4(sc, ALE_TXQ_CFG, reg);
1855 0 : reg = CSR_READ_4(sc, ALE_RXQ_CFG);
1856 0 : reg &= ~RXQ_CFG_ENB;
1857 0 : CSR_WRITE_4(sc, ALE_RXQ_CFG, reg);
1858 0 : reg = CSR_READ_4(sc, ALE_DMA_CFG);
1859 0 : reg &= ~(DMA_CFG_TXCMB_ENB | DMA_CFG_RXCMB_ENB);
1860 0 : CSR_WRITE_4(sc, ALE_DMA_CFG, reg);
1861 0 : DELAY(1000);
1862 :
1863 : /* Stop Rx/Tx MACs. */
1864 0 : ale_stop_mac(sc);
1865 :
1866 : /* Disable interrupts again? XXX */
1867 0 : CSR_WRITE_4(sc, ALE_INTR_STATUS, 0xFFFFFFFF);
1868 :
1869 : /*
1870 : * Free TX mbufs still in the queues.
1871 : */
1872 0 : for (i = 0; i < ALE_TX_RING_CNT; i++) {
1873 0 : txd = &sc->ale_cdata.ale_txdesc[i];
1874 0 : if (txd->tx_m != NULL) {
1875 0 : bus_dmamap_sync(sc->sc_dmat, txd->tx_dmamap, 0,
1876 : txd->tx_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1877 0 : bus_dmamap_unload(sc->sc_dmat, txd->tx_dmamap);
1878 0 : m_freem(txd->tx_m);
1879 0 : txd->tx_m = NULL;
1880 0 : }
1881 : }
1882 0 : }
1883 :
1884 : void
1885 0 : ale_stop_mac(struct ale_softc *sc)
1886 : {
1887 : uint32_t reg;
1888 : int i;
1889 :
1890 0 : reg = CSR_READ_4(sc, ALE_MAC_CFG);
1891 0 : if ((reg & (MAC_CFG_TX_ENB | MAC_CFG_RX_ENB)) != 0) {
1892 0 : reg &= ~(MAC_CFG_TX_ENB | MAC_CFG_RX_ENB);
1893 0 : CSR_WRITE_4(sc, ALE_MAC_CFG, reg);
1894 0 : }
1895 :
1896 0 : for (i = ALE_TIMEOUT; i > 0; i--) {
1897 0 : reg = CSR_READ_4(sc, ALE_IDLE_STATUS);
1898 0 : if (reg == 0)
1899 : break;
1900 0 : DELAY(10);
1901 : }
1902 0 : if (i == 0)
1903 0 : printf("%s: could not disable Tx/Rx MAC(0x%08x)!\n",
1904 0 : sc->sc_dev.dv_xname, reg);
1905 0 : }
1906 :
1907 : void
1908 0 : ale_init_tx_ring(struct ale_softc *sc)
1909 : {
1910 : struct ale_txdesc *txd;
1911 : int i;
1912 :
1913 0 : sc->ale_cdata.ale_tx_prod = 0;
1914 0 : sc->ale_cdata.ale_tx_cons = 0;
1915 0 : sc->ale_cdata.ale_tx_cnt = 0;
1916 :
1917 0 : bzero(sc->ale_cdata.ale_tx_ring, ALE_TX_RING_SZ);
1918 0 : bzero(sc->ale_cdata.ale_tx_cmb, ALE_TX_CMB_SZ);
1919 0 : for (i = 0; i < ALE_TX_RING_CNT; i++) {
1920 0 : txd = &sc->ale_cdata.ale_txdesc[i];
1921 0 : txd->tx_m = NULL;
1922 : }
1923 0 : *sc->ale_cdata.ale_tx_cmb = 0;
1924 0 : bus_dmamap_sync(sc->sc_dmat, sc->ale_cdata.ale_tx_cmb_map, 0,
1925 : sc->ale_cdata.ale_tx_cmb_map->dm_mapsize,
1926 : BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1927 0 : bus_dmamap_sync(sc->sc_dmat, sc->ale_cdata.ale_tx_ring_map, 0,
1928 : sc->ale_cdata.ale_tx_ring_map->dm_mapsize,
1929 : BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1930 0 : }
1931 :
1932 : void
1933 0 : ale_init_rx_pages(struct ale_softc *sc)
1934 : {
1935 : struct ale_rx_page *rx_page;
1936 : int i;
1937 :
1938 0 : sc->ale_cdata.ale_rx_seqno = 0;
1939 0 : sc->ale_cdata.ale_rx_curp = 0;
1940 :
1941 0 : for (i = 0; i < ALE_RX_PAGES; i++) {
1942 0 : rx_page = &sc->ale_cdata.ale_rx_page[i];
1943 0 : bzero(rx_page->page_addr, sc->ale_pagesize);
1944 0 : bzero(rx_page->cmb_addr, ALE_RX_CMB_SZ);
1945 0 : rx_page->cons = 0;
1946 0 : *rx_page->cmb_addr = 0;
1947 0 : bus_dmamap_sync(sc->sc_dmat, rx_page->page_map, 0,
1948 : rx_page->page_map->dm_mapsize,
1949 : BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1950 0 : bus_dmamap_sync(sc->sc_dmat, rx_page->cmb_map, 0,
1951 : rx_page->cmb_map->dm_mapsize,
1952 : BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1953 : }
1954 0 : }
1955 :
1956 : void
1957 0 : ale_rxvlan(struct ale_softc *sc)
1958 : {
1959 0 : struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1960 : uint32_t reg;
1961 :
1962 0 : reg = CSR_READ_4(sc, ALE_MAC_CFG);
1963 0 : reg &= ~MAC_CFG_VLAN_TAG_STRIP;
1964 0 : if (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING)
1965 0 : reg |= MAC_CFG_VLAN_TAG_STRIP;
1966 0 : CSR_WRITE_4(sc, ALE_MAC_CFG, reg);
1967 0 : }
1968 :
1969 : void
1970 0 : ale_iff(struct ale_softc *sc)
1971 : {
1972 0 : struct arpcom *ac = &sc->sc_arpcom;
1973 0 : struct ifnet *ifp = &ac->ac_if;
1974 : struct ether_multi *enm;
1975 : struct ether_multistep step;
1976 : uint32_t crc;
1977 0 : uint32_t mchash[2];
1978 : uint32_t rxcfg;
1979 :
1980 0 : rxcfg = CSR_READ_4(sc, ALE_MAC_CFG);
1981 0 : rxcfg &= ~(MAC_CFG_ALLMULTI | MAC_CFG_BCAST | MAC_CFG_PROMISC);
1982 0 : ifp->if_flags &= ~IFF_ALLMULTI;
1983 :
1984 : /*
1985 : * Always accept broadcast frames.
1986 : */
1987 0 : rxcfg |= MAC_CFG_BCAST;
1988 :
1989 0 : if (ifp->if_flags & IFF_PROMISC || ac->ac_multirangecnt > 0) {
1990 0 : ifp->if_flags |= IFF_ALLMULTI;
1991 0 : if (ifp->if_flags & IFF_PROMISC)
1992 0 : rxcfg |= MAC_CFG_PROMISC;
1993 : else
1994 0 : rxcfg |= MAC_CFG_ALLMULTI;
1995 0 : mchash[0] = mchash[1] = 0xFFFFFFFF;
1996 0 : } else {
1997 : /* Program new filter. */
1998 0 : bzero(mchash, sizeof(mchash));
1999 :
2000 0 : ETHER_FIRST_MULTI(step, ac, enm);
2001 0 : while (enm != NULL) {
2002 0 : crc = ether_crc32_be(enm->enm_addrlo, ETHER_ADDR_LEN);
2003 :
2004 0 : mchash[crc >> 31] |= 1 << ((crc >> 26) & 0x1f);
2005 :
2006 0 : ETHER_NEXT_MULTI(step, enm);
2007 : }
2008 : }
2009 :
2010 0 : CSR_WRITE_4(sc, ALE_MAR0, mchash[0]);
2011 0 : CSR_WRITE_4(sc, ALE_MAR1, mchash[1]);
2012 0 : CSR_WRITE_4(sc, ALE_MAC_CFG, rxcfg);
2013 0 : }
|