Line data Source code
1 : /* $OpenBSD: if_age.c,v 1.35 2017/09/08 05:36:52 deraadt Exp $ */
2 :
3 : /*-
4 : * Copyright (c) 2008, Pyun YongHyeon <yongari@FreeBSD.org>
5 : * All rights reserved.
6 : *
7 : * Redistribution and use in source and binary forms, with or without
8 : * modification, are permitted provided that the following conditions
9 : * are met:
10 : * 1. Redistributions of source code must retain the above copyright
11 : * notice unmodified, this list of conditions, and the following
12 : * disclaimer.
13 : * 2. Redistributions in binary form must reproduce the above copyright
14 : * notice, this list of conditions and the following disclaimer in the
15 : * documentation and/or other materials provided with the distribution.
16 : *
17 : * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 : * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 : * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 : * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 : * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 : * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 : * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 : * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 : * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 : * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 : * SUCH DAMAGE.
28 : */
29 :
30 : /* Driver for Attansic Technology Corp. L1 Gigabit Ethernet. */
31 :
32 : #include "bpfilter.h"
33 : #include "vlan.h"
34 :
35 : #include <sys/param.h>
36 : #include <sys/endian.h>
37 : #include <sys/systm.h>
38 : #include <sys/sockio.h>
39 : #include <sys/mbuf.h>
40 : #include <sys/queue.h>
41 : #include <sys/kernel.h>
42 : #include <sys/device.h>
43 : #include <sys/timeout.h>
44 : #include <sys/socket.h>
45 :
46 : #include <machine/bus.h>
47 :
48 : #include <net/if.h>
49 : #include <net/if_dl.h>
50 : #include <net/if_media.h>
51 :
52 : #include <netinet/in.h>
53 : #include <netinet/if_ether.h>
54 :
55 : #if NBPFILTER > 0
56 : #include <net/bpf.h>
57 : #endif
58 :
59 : #include <dev/mii/mii.h>
60 : #include <dev/mii/miivar.h>
61 :
62 : #include <dev/pci/pcireg.h>
63 : #include <dev/pci/pcivar.h>
64 : #include <dev/pci/pcidevs.h>
65 :
66 : #include <dev/pci/if_agereg.h>
67 :
68 : int age_match(struct device *, void *, void *);
69 : void age_attach(struct device *, struct device *, void *);
70 : int age_detach(struct device *, int);
71 :
72 : int age_miibus_readreg(struct device *, int, int);
73 : void age_miibus_writereg(struct device *, int, int, int);
74 : void age_miibus_statchg(struct device *);
75 :
76 : int age_init(struct ifnet *);
77 : int age_ioctl(struct ifnet *, u_long, caddr_t);
78 : void age_start(struct ifnet *);
79 : void age_watchdog(struct ifnet *);
80 : void age_mediastatus(struct ifnet *, struct ifmediareq *);
81 : int age_mediachange(struct ifnet *);
82 :
83 : int age_intr(void *);
84 : int age_dma_alloc(struct age_softc *);
85 : void age_dma_free(struct age_softc *);
86 : void age_get_macaddr(struct age_softc *);
87 : void age_phy_reset(struct age_softc *);
88 :
89 : int age_encap(struct age_softc *, struct mbuf *);
90 : void age_init_tx_ring(struct age_softc *);
91 : int age_init_rx_ring(struct age_softc *);
92 : void age_init_rr_ring(struct age_softc *);
93 : void age_init_cmb_block(struct age_softc *);
94 : void age_init_smb_block(struct age_softc *);
95 : int age_newbuf(struct age_softc *, struct age_rxdesc *);
96 : void age_mac_config(struct age_softc *);
97 : void age_txintr(struct age_softc *, int);
98 : void age_rxeof(struct age_softc *sc, struct rx_rdesc *);
99 : void age_rxintr(struct age_softc *, int);
100 : void age_tick(void *);
101 : void age_reset(struct age_softc *);
102 : void age_stop(struct age_softc *);
103 : void age_stats_update(struct age_softc *);
104 : void age_stop_txmac(struct age_softc *);
105 : void age_stop_rxmac(struct age_softc *);
106 : void age_rxvlan(struct age_softc *sc);
107 : void age_iff(struct age_softc *);
108 :
109 : const struct pci_matchid age_devices[] = {
110 : { PCI_VENDOR_ATTANSIC, PCI_PRODUCT_ATTANSIC_L1 }
111 : };
112 :
113 : struct cfattach age_ca = {
114 : sizeof (struct age_softc), age_match, age_attach
115 : };
116 :
117 : struct cfdriver age_cd = {
118 : NULL, "age", DV_IFNET
119 : };
120 :
121 : int agedebug = 0;
122 : #define DPRINTF(x) do { if (agedebug) printf x; } while (0)
123 :
124 : #define AGE_CSUM_FEATURES (M_TCP_CSUM_OUT | M_UDP_CSUM_OUT)
125 :
126 : int
127 0 : age_match(struct device *dev, void *match, void *aux)
128 : {
129 0 : return pci_matchbyid((struct pci_attach_args *)aux, age_devices,
130 : sizeof (age_devices) / sizeof (age_devices[0]));
131 : }
132 :
133 : void
134 0 : age_attach(struct device *parent, struct device *self, void *aux)
135 : {
136 0 : struct age_softc *sc = (struct age_softc *)self;
137 0 : struct pci_attach_args *pa = aux;
138 0 : pci_chipset_tag_t pc = pa->pa_pc;
139 0 : pci_intr_handle_t ih;
140 : const char *intrstr;
141 : struct ifnet *ifp;
142 : pcireg_t memtype;
143 : int error = 0;
144 :
145 : /*
146 : * Allocate IO memory
147 : */
148 0 : memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, AGE_PCIR_BAR);
149 0 : if (pci_mapreg_map(pa, AGE_PCIR_BAR, memtype, 0, &sc->sc_mem_bt,
150 0 : &sc->sc_mem_bh, NULL, &sc->sc_mem_size, 0)) {
151 0 : printf(": can't map mem space\n");
152 0 : return;
153 : }
154 :
155 0 : if (pci_intr_map_msi(pa, &ih) != 0 && pci_intr_map(pa, &ih) != 0) {
156 0 : printf(": can't map interrupt\n");
157 0 : goto fail;
158 : }
159 :
160 : /*
161 : * Allocate IRQ
162 : */
163 0 : intrstr = pci_intr_string(pc, ih);
164 0 : sc->sc_irq_handle = pci_intr_establish(pc, ih, IPL_NET, age_intr, sc,
165 0 : sc->sc_dev.dv_xname);
166 0 : if (sc->sc_irq_handle == NULL) {
167 0 : printf(": could not establish interrupt");
168 0 : if (intrstr != NULL)
169 0 : printf(" at %s", intrstr);
170 0 : printf("\n");
171 0 : goto fail;
172 : }
173 0 : printf(": %s", intrstr);
174 :
175 0 : sc->sc_dmat = pa->pa_dmat;
176 0 : sc->sc_pct = pa->pa_pc;
177 0 : sc->sc_pcitag = pa->pa_tag;
178 :
179 : /* Set PHY address. */
180 0 : sc->age_phyaddr = AGE_PHY_ADDR;
181 :
182 : /* Reset PHY. */
183 0 : age_phy_reset(sc);
184 :
185 : /* Reset the ethernet controller. */
186 0 : age_reset(sc);
187 :
188 : /* Get PCI and chip id/revision. */
189 0 : sc->age_rev = PCI_REVISION(pa->pa_class);
190 0 : sc->age_chip_rev = CSR_READ_4(sc, AGE_MASTER_CFG) >>
191 : MASTER_CHIP_REV_SHIFT;
192 0 : if (agedebug) {
193 0 : printf("%s: PCI device revision : 0x%04x\n",
194 0 : sc->sc_dev.dv_xname, sc->age_rev);
195 0 : printf("%s: Chip id/revision : 0x%04x\n",
196 0 : sc->sc_dev.dv_xname, sc->age_chip_rev);
197 0 : }
198 :
199 0 : if (agedebug) {
200 0 : printf("%s: %d Tx FIFO, %d Rx FIFO\n", sc->sc_dev.dv_xname,
201 0 : CSR_READ_4(sc, AGE_SRAM_TX_FIFO_LEN),
202 0 : CSR_READ_4(sc, AGE_SRAM_RX_FIFO_LEN));
203 0 : }
204 :
205 : /* Set max allowable DMA size. */
206 0 : sc->age_dma_rd_burst = DMA_CFG_RD_BURST_128;
207 0 : sc->age_dma_wr_burst = DMA_CFG_WR_BURST_128;
208 :
209 : /* Allocate DMA stuffs */
210 0 : error = age_dma_alloc(sc);
211 0 : if (error)
212 : goto fail;
213 :
214 : /* Load station address. */
215 0 : age_get_macaddr(sc);
216 :
217 0 : ifp = &sc->sc_arpcom.ac_if;
218 0 : ifp->if_softc = sc;
219 0 : ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
220 0 : ifp->if_ioctl = age_ioctl;
221 0 : ifp->if_start = age_start;
222 0 : ifp->if_watchdog = age_watchdog;
223 0 : IFQ_SET_MAXLEN(&ifp->if_snd, AGE_TX_RING_CNT - 1);
224 0 : bcopy(sc->age_eaddr, sc->sc_arpcom.ac_enaddr, ETHER_ADDR_LEN);
225 0 : bcopy(sc->sc_dev.dv_xname, ifp->if_xname, IFNAMSIZ);
226 :
227 0 : ifp->if_capabilities = IFCAP_VLAN_MTU;
228 :
229 : #ifdef AGE_CHECKSUM
230 : ifp->if_capabilities |= IFCAP_CSUM_IPv4 | IFCAP_CSUM_TCPv4 |
231 : IFCAP_CSUM_UDPv4;
232 : #endif
233 :
234 : #if NVLAN > 0
235 0 : ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING;
236 : #endif
237 :
238 0 : printf(", address %s\n", ether_sprintf(sc->sc_arpcom.ac_enaddr));
239 :
240 : /* Set up MII bus. */
241 0 : sc->sc_miibus.mii_ifp = ifp;
242 0 : sc->sc_miibus.mii_readreg = age_miibus_readreg;
243 0 : sc->sc_miibus.mii_writereg = age_miibus_writereg;
244 0 : sc->sc_miibus.mii_statchg = age_miibus_statchg;
245 :
246 0 : ifmedia_init(&sc->sc_miibus.mii_media, 0, age_mediachange,
247 : age_mediastatus);
248 0 : mii_attach(self, &sc->sc_miibus, 0xffffffff, MII_PHY_ANY,
249 : MII_OFFSET_ANY, MIIF_DOPAUSE);
250 :
251 0 : if (LIST_FIRST(&sc->sc_miibus.mii_phys) == NULL) {
252 0 : printf("%s: no PHY found!\n", sc->sc_dev.dv_xname);
253 0 : ifmedia_add(&sc->sc_miibus.mii_media, IFM_ETHER | IFM_MANUAL,
254 : 0, NULL);
255 0 : ifmedia_set(&sc->sc_miibus.mii_media, IFM_ETHER | IFM_MANUAL);
256 0 : } else
257 0 : ifmedia_set(&sc->sc_miibus.mii_media, IFM_ETHER | IFM_AUTO);
258 :
259 0 : if_attach(ifp);
260 0 : ether_ifattach(ifp);
261 :
262 0 : timeout_set(&sc->age_tick_ch, age_tick, sc);
263 :
264 0 : return;
265 : fail:
266 0 : age_dma_free(sc);
267 0 : if (sc->sc_irq_handle != NULL)
268 0 : pci_intr_disestablish(pc, sc->sc_irq_handle);
269 0 : if (sc->sc_mem_size)
270 0 : bus_space_unmap(sc->sc_mem_bt, sc->sc_mem_bh, sc->sc_mem_size);
271 0 : }
272 :
273 : int
274 0 : age_detach(struct device *self, int flags)
275 : {
276 0 : struct age_softc *sc = (struct age_softc *)self;
277 0 : struct ifnet *ifp = &sc->sc_arpcom.ac_if;
278 : int s;
279 :
280 0 : s = splnet();
281 0 : age_stop(sc);
282 0 : splx(s);
283 :
284 0 : mii_detach(&sc->sc_miibus, MII_PHY_ANY, MII_OFFSET_ANY);
285 :
286 : /* Delete all remaining media. */
287 0 : ifmedia_delete_instance(&sc->sc_miibus.mii_media, IFM_INST_ANY);
288 :
289 0 : ether_ifdetach(ifp);
290 0 : if_detach(ifp);
291 0 : age_dma_free(sc);
292 :
293 0 : if (sc->sc_irq_handle != NULL) {
294 0 : pci_intr_disestablish(sc->sc_pct, sc->sc_irq_handle);
295 0 : sc->sc_irq_handle = NULL;
296 0 : }
297 :
298 0 : return (0);
299 : }
300 :
301 : /*
302 : * Read a PHY register on the MII of the L1.
303 : */
304 : int
305 0 : age_miibus_readreg(struct device *dev, int phy, int reg)
306 : {
307 0 : struct age_softc *sc = (struct age_softc *)dev;
308 : uint32_t v;
309 : int i;
310 :
311 0 : if (phy != sc->age_phyaddr)
312 0 : return (0);
313 :
314 0 : CSR_WRITE_4(sc, AGE_MDIO, MDIO_OP_EXECUTE | MDIO_OP_READ |
315 : MDIO_SUP_PREAMBLE | MDIO_CLK_25_4 | MDIO_REG_ADDR(reg));
316 0 : for (i = AGE_PHY_TIMEOUT; i > 0; i--) {
317 0 : DELAY(1);
318 0 : v = CSR_READ_4(sc, AGE_MDIO);
319 0 : if ((v & (MDIO_OP_EXECUTE | MDIO_OP_BUSY)) == 0)
320 : break;
321 : }
322 :
323 0 : if (i == 0) {
324 0 : printf("%s: phy read timeout: phy %d, reg %d\n",
325 0 : sc->sc_dev.dv_xname, phy, reg);
326 0 : return (0);
327 : }
328 :
329 0 : return ((v & MDIO_DATA_MASK) >> MDIO_DATA_SHIFT);
330 0 : }
331 :
332 : /*
333 : * Write a PHY register on the MII of the L1.
334 : */
335 : void
336 0 : age_miibus_writereg(struct device *dev, int phy, int reg, int val)
337 : {
338 0 : struct age_softc *sc = (struct age_softc *)dev;
339 : uint32_t v;
340 : int i;
341 :
342 0 : if (phy != sc->age_phyaddr)
343 0 : return;
344 :
345 0 : CSR_WRITE_4(sc, AGE_MDIO, MDIO_OP_EXECUTE | MDIO_OP_WRITE |
346 : (val & MDIO_DATA_MASK) << MDIO_DATA_SHIFT |
347 : MDIO_SUP_PREAMBLE | MDIO_CLK_25_4 | MDIO_REG_ADDR(reg));
348 :
349 0 : for (i = AGE_PHY_TIMEOUT; i > 0; i--) {
350 0 : DELAY(1);
351 0 : v = CSR_READ_4(sc, AGE_MDIO);
352 0 : if ((v & (MDIO_OP_EXECUTE | MDIO_OP_BUSY)) == 0)
353 : break;
354 : }
355 :
356 0 : if (i == 0) {
357 0 : printf("%s: phy write timeout: phy %d, reg %d\n",
358 0 : sc->sc_dev.dv_xname, phy, reg);
359 0 : }
360 0 : }
361 :
362 : /*
363 : * Callback from MII layer when media changes.
364 : */
365 : void
366 0 : age_miibus_statchg(struct device *dev)
367 : {
368 0 : struct age_softc *sc = (struct age_softc *)dev;
369 0 : struct ifnet *ifp = &sc->sc_arpcom.ac_if;
370 0 : struct mii_data *mii = &sc->sc_miibus;
371 :
372 0 : if ((ifp->if_flags & IFF_RUNNING) == 0)
373 0 : return;
374 :
375 0 : sc->age_flags &= ~AGE_FLAG_LINK;
376 0 : if ((mii->mii_media_status & IFM_AVALID) != 0) {
377 0 : switch (IFM_SUBTYPE(mii->mii_media_active)) {
378 : case IFM_10_T:
379 : case IFM_100_TX:
380 : case IFM_1000_T:
381 0 : sc->age_flags |= AGE_FLAG_LINK;
382 0 : break;
383 : default:
384 : break;
385 : }
386 : }
387 :
388 : /* Stop Rx/Tx MACs. */
389 0 : age_stop_rxmac(sc);
390 0 : age_stop_txmac(sc);
391 :
392 : /* Program MACs with resolved speed/duplex/flow-control. */
393 0 : if ((sc->age_flags & AGE_FLAG_LINK) != 0) {
394 : uint32_t reg;
395 :
396 0 : age_mac_config(sc);
397 0 : reg = CSR_READ_4(sc, AGE_MAC_CFG);
398 : /* Restart DMA engine and Tx/Rx MAC. */
399 0 : CSR_WRITE_4(sc, AGE_DMA_CFG, CSR_READ_4(sc, AGE_DMA_CFG) |
400 : DMA_CFG_RD_ENB | DMA_CFG_WR_ENB);
401 0 : reg |= MAC_CFG_TX_ENB | MAC_CFG_RX_ENB;
402 0 : CSR_WRITE_4(sc, AGE_MAC_CFG, reg);
403 0 : }
404 0 : }
405 :
406 : /*
407 : * Get the current interface media status.
408 : */
409 : void
410 0 : age_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
411 : {
412 0 : struct age_softc *sc = ifp->if_softc;
413 0 : struct mii_data *mii = &sc->sc_miibus;
414 :
415 0 : mii_pollstat(mii);
416 0 : ifmr->ifm_status = mii->mii_media_status;
417 0 : ifmr->ifm_active = mii->mii_media_active;
418 0 : }
419 :
420 : /*
421 : * Set hardware to newly-selected media.
422 : */
423 : int
424 0 : age_mediachange(struct ifnet *ifp)
425 : {
426 0 : struct age_softc *sc = ifp->if_softc;
427 0 : struct mii_data *mii = &sc->sc_miibus;
428 : int error;
429 :
430 0 : if (mii->mii_instance != 0) {
431 : struct mii_softc *miisc;
432 :
433 0 : LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
434 0 : mii_phy_reset(miisc);
435 0 : }
436 0 : error = mii_mediachg(mii);
437 :
438 0 : return (error);
439 : }
440 :
441 : int
442 0 : age_intr(void *arg)
443 : {
444 0 : struct age_softc *sc = arg;
445 0 : struct ifnet *ifp = &sc->sc_arpcom.ac_if;
446 : struct cmb *cmb;
447 : uint32_t status;
448 :
449 0 : status = CSR_READ_4(sc, AGE_INTR_STATUS);
450 0 : if (status == 0 || (status & AGE_INTRS) == 0)
451 0 : return (0);
452 :
453 : /* Disable interrupts. */
454 0 : CSR_WRITE_4(sc, AGE_INTR_STATUS, status | INTR_DIS_INT);
455 :
456 0 : bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_cmb_block_map, 0,
457 : sc->age_cdata.age_cmb_block_map->dm_mapsize,
458 : BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
459 0 : cmb = sc->age_rdata.age_cmb_block;
460 0 : status = letoh32(cmb->intr_status);
461 0 : if ((status & AGE_INTRS) == 0)
462 : goto back;
463 :
464 0 : sc->age_tpd_cons = (letoh32(cmb->tpd_cons) & TPD_CONS_MASK) >>
465 : TPD_CONS_SHIFT;
466 0 : sc->age_rr_prod = (letoh32(cmb->rprod_cons) & RRD_PROD_MASK) >>
467 : RRD_PROD_SHIFT;
468 : /* Let hardware know CMB was served. */
469 0 : cmb->intr_status = 0;
470 0 : bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_cmb_block_map, 0,
471 : sc->age_cdata.age_cmb_block_map->dm_mapsize,
472 : BUS_DMASYNC_PREWRITE);
473 :
474 0 : if (ifp->if_flags & IFF_RUNNING) {
475 0 : if (status & INTR_CMB_RX)
476 0 : age_rxintr(sc, sc->age_rr_prod);
477 :
478 0 : if (status & INTR_CMB_TX)
479 0 : age_txintr(sc, sc->age_tpd_cons);
480 :
481 0 : if (status & (INTR_DMA_RD_TO_RST | INTR_DMA_WR_TO_RST)) {
482 0 : if (status & INTR_DMA_RD_TO_RST)
483 0 : printf("%s: DMA read error! -- resetting\n",
484 0 : sc->sc_dev.dv_xname);
485 0 : if (status & INTR_DMA_WR_TO_RST)
486 0 : printf("%s: DMA write error! -- resetting\n",
487 0 : sc->sc_dev.dv_xname);
488 0 : age_init(ifp);
489 0 : }
490 :
491 0 : age_start(ifp);
492 :
493 0 : if (status & INTR_SMB)
494 0 : age_stats_update(sc);
495 : }
496 :
497 : /* Check whether CMB was updated while serving Tx/Rx/SMB handler. */
498 0 : bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_cmb_block_map, 0,
499 : sc->age_cdata.age_cmb_block_map->dm_mapsize,
500 : BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
501 :
502 : back:
503 : /* Re-enable interrupts. */
504 0 : CSR_WRITE_4(sc, AGE_INTR_STATUS, 0);
505 :
506 0 : return (1);
507 0 : }
508 :
509 : void
510 0 : age_get_macaddr(struct age_softc *sc)
511 : {
512 : uint32_t ea[2], reg;
513 0 : int i, vpdc;
514 :
515 0 : reg = CSR_READ_4(sc, AGE_SPI_CTRL);
516 0 : if ((reg & SPI_VPD_ENB) != 0) {
517 : /* Get VPD stored in TWSI EEPROM. */
518 0 : reg &= ~SPI_VPD_ENB;
519 0 : CSR_WRITE_4(sc, AGE_SPI_CTRL, reg);
520 0 : }
521 :
522 0 : if (pci_get_capability(sc->sc_pct, sc->sc_pcitag,
523 : PCI_CAP_VPD, &vpdc, NULL)) {
524 : /*
525 : * PCI VPD capability found, let TWSI reload EEPROM.
526 : * This will set Ethernet address of controller.
527 : */
528 0 : CSR_WRITE_4(sc, AGE_TWSI_CTRL, CSR_READ_4(sc, AGE_TWSI_CTRL) |
529 : TWSI_CTRL_SW_LD_START);
530 0 : for (i = 100; i > 0; i--) {
531 0 : DELAY(1000);
532 0 : reg = CSR_READ_4(sc, AGE_TWSI_CTRL);
533 0 : if ((reg & TWSI_CTRL_SW_LD_START) == 0)
534 : break;
535 : }
536 0 : if (i == 0)
537 0 : printf("%s: reloading EEPROM timeout!\n",
538 0 : sc->sc_dev.dv_xname);
539 : } else {
540 0 : if (agedebug)
541 0 : printf("%s: PCI VPD capability not found!\n",
542 0 : sc->sc_dev.dv_xname);
543 : }
544 :
545 0 : ea[0] = CSR_READ_4(sc, AGE_PAR0);
546 0 : ea[1] = CSR_READ_4(sc, AGE_PAR1);
547 0 : sc->age_eaddr[0] = (ea[1] >> 8) & 0xFF;
548 0 : sc->age_eaddr[1] = (ea[1] >> 0) & 0xFF;
549 0 : sc->age_eaddr[2] = (ea[0] >> 24) & 0xFF;
550 0 : sc->age_eaddr[3] = (ea[0] >> 16) & 0xFF;
551 0 : sc->age_eaddr[4] = (ea[0] >> 8) & 0xFF;
552 0 : sc->age_eaddr[5] = (ea[0] >> 0) & 0xFF;
553 0 : }
554 :
555 : void
556 0 : age_phy_reset(struct age_softc *sc)
557 : {
558 : uint16_t reg, pn;
559 : int i, linkup;
560 :
561 : /* Reset PHY. */
562 0 : CSR_WRITE_4(sc, AGE_GPHY_CTRL, GPHY_CTRL_RST);
563 0 : DELAY(2000);
564 0 : CSR_WRITE_4(sc, AGE_GPHY_CTRL, GPHY_CTRL_CLR);
565 0 : DELAY(2000);
566 :
567 : #define ATPHY_DBG_ADDR 0x1D
568 : #define ATPHY_DBG_DATA 0x1E
569 : #define ATPHY_CDTC 0x16
570 : #define PHY_CDTC_ENB 0x0001
571 : #define PHY_CDTC_POFF 8
572 : #define ATPHY_CDTS 0x1C
573 : #define PHY_CDTS_STAT_OK 0x0000
574 : #define PHY_CDTS_STAT_SHORT 0x0100
575 : #define PHY_CDTS_STAT_OPEN 0x0200
576 : #define PHY_CDTS_STAT_INVAL 0x0300
577 : #define PHY_CDTS_STAT_MASK 0x0300
578 :
579 : /* Check power saving mode. Magic from Linux. */
580 0 : age_miibus_writereg(&sc->sc_dev, sc->age_phyaddr, MII_BMCR, BMCR_RESET);
581 0 : for (linkup = 0, pn = 0; pn < 4; pn++) {
582 0 : age_miibus_writereg(&sc->sc_dev, sc->age_phyaddr, ATPHY_CDTC,
583 0 : (pn << PHY_CDTC_POFF) | PHY_CDTC_ENB);
584 0 : for (i = 200; i > 0; i--) {
585 0 : DELAY(1000);
586 0 : reg = age_miibus_readreg(&sc->sc_dev, sc->age_phyaddr,
587 : ATPHY_CDTC);
588 0 : if ((reg & PHY_CDTC_ENB) == 0)
589 : break;
590 : }
591 0 : DELAY(1000);
592 0 : reg = age_miibus_readreg(&sc->sc_dev, sc->age_phyaddr,
593 : ATPHY_CDTS);
594 0 : if ((reg & PHY_CDTS_STAT_MASK) != PHY_CDTS_STAT_OPEN) {
595 : linkup++;
596 0 : break;
597 : }
598 : }
599 0 : age_miibus_writereg(&sc->sc_dev, sc->age_phyaddr, MII_BMCR,
600 : BMCR_RESET | BMCR_AUTOEN | BMCR_STARTNEG);
601 0 : if (linkup == 0) {
602 0 : age_miibus_writereg(&sc->sc_dev, sc->age_phyaddr,
603 : ATPHY_DBG_ADDR, 0);
604 0 : age_miibus_writereg(&sc->sc_dev, sc->age_phyaddr,
605 : ATPHY_DBG_DATA, 0x124E);
606 0 : age_miibus_writereg(&sc->sc_dev, sc->age_phyaddr,
607 : ATPHY_DBG_ADDR, 1);
608 0 : reg = age_miibus_readreg(&sc->sc_dev, sc->age_phyaddr,
609 : ATPHY_DBG_DATA);
610 0 : age_miibus_writereg(&sc->sc_dev, sc->age_phyaddr,
611 0 : ATPHY_DBG_DATA, reg | 0x03);
612 : /* XXX */
613 0 : DELAY(1500 * 1000);
614 0 : age_miibus_writereg(&sc->sc_dev, sc->age_phyaddr,
615 : ATPHY_DBG_ADDR, 0);
616 0 : age_miibus_writereg(&sc->sc_dev, sc->age_phyaddr,
617 : ATPHY_DBG_DATA, 0x024E);
618 0 : }
619 :
620 : #undef ATPHY_DBG_ADDR
621 : #undef ATPHY_DBG_DATA
622 : #undef ATPHY_CDTC
623 : #undef PHY_CDTC_ENB
624 : #undef PHY_CDTC_POFF
625 : #undef ATPHY_CDTS
626 : #undef PHY_CDTS_STAT_OK
627 : #undef PHY_CDTS_STAT_SHORT
628 : #undef PHY_CDTS_STAT_OPEN
629 : #undef PHY_CDTS_STAT_INVAL
630 : #undef PHY_CDTS_STAT_MASK
631 0 : }
632 :
633 : int
634 0 : age_dma_alloc(struct age_softc *sc)
635 : {
636 : struct age_txdesc *txd;
637 : struct age_rxdesc *rxd;
638 0 : int nsegs, error, i;
639 :
640 : /*
641 : * Create DMA stuffs for TX ring
642 : */
643 0 : error = bus_dmamap_create(sc->sc_dmat, AGE_TX_RING_SZ, 1,
644 : AGE_TX_RING_SZ, 0, BUS_DMA_NOWAIT, &sc->age_cdata.age_tx_ring_map);
645 0 : if (error)
646 0 : return (ENOBUFS);
647 :
648 : /* Allocate DMA'able memory for TX ring */
649 0 : error = bus_dmamem_alloc(sc->sc_dmat, AGE_TX_RING_SZ,
650 : ETHER_ALIGN, 0, &sc->age_rdata.age_tx_ring_seg, 1,
651 : &nsegs, BUS_DMA_WAITOK | BUS_DMA_ZERO);
652 0 : if (error) {
653 0 : printf("%s: could not allocate DMA'able memory for Tx ring.\n",
654 0 : sc->sc_dev.dv_xname);
655 0 : return error;
656 : }
657 :
658 0 : error = bus_dmamem_map(sc->sc_dmat, &sc->age_rdata.age_tx_ring_seg,
659 : nsegs, AGE_TX_RING_SZ, (caddr_t *)&sc->age_rdata.age_tx_ring,
660 : BUS_DMA_NOWAIT);
661 0 : if (error)
662 0 : return (ENOBUFS);
663 :
664 : /* Load the DMA map for Tx ring. */
665 0 : error = bus_dmamap_load(sc->sc_dmat, sc->age_cdata.age_tx_ring_map,
666 : sc->age_rdata.age_tx_ring, AGE_TX_RING_SZ, NULL, BUS_DMA_WAITOK);
667 0 : if (error) {
668 0 : printf("%s: could not load DMA'able memory for Tx ring.\n",
669 0 : sc->sc_dev.dv_xname);
670 0 : bus_dmamem_free(sc->sc_dmat,
671 : (bus_dma_segment_t *)&sc->age_rdata.age_tx_ring, 1);
672 0 : return error;
673 : }
674 :
675 0 : sc->age_rdata.age_tx_ring_paddr =
676 0 : sc->age_cdata.age_tx_ring_map->dm_segs[0].ds_addr;
677 :
678 : /*
679 : * Create DMA stuffs for RX ring
680 : */
681 0 : error = bus_dmamap_create(sc->sc_dmat, AGE_RX_RING_SZ, 1,
682 : AGE_RX_RING_SZ, 0, BUS_DMA_NOWAIT, &sc->age_cdata.age_rx_ring_map);
683 0 : if (error)
684 0 : return (ENOBUFS);
685 :
686 : /* Allocate DMA'able memory for RX ring */
687 0 : error = bus_dmamem_alloc(sc->sc_dmat, AGE_RX_RING_SZ,
688 : ETHER_ALIGN, 0, &sc->age_rdata.age_rx_ring_seg, 1,
689 : &nsegs, BUS_DMA_WAITOK | BUS_DMA_ZERO);
690 0 : if (error) {
691 0 : printf("%s: could not allocate DMA'able memory for Rx ring.\n",
692 0 : sc->sc_dev.dv_xname);
693 0 : return error;
694 : }
695 :
696 0 : error = bus_dmamem_map(sc->sc_dmat, &sc->age_rdata.age_rx_ring_seg,
697 : nsegs, AGE_RX_RING_SZ, (caddr_t *)&sc->age_rdata.age_rx_ring,
698 : BUS_DMA_NOWAIT);
699 0 : if (error)
700 0 : return (ENOBUFS);
701 :
702 : /* Load the DMA map for Rx ring. */
703 0 : error = bus_dmamap_load(sc->sc_dmat, sc->age_cdata.age_rx_ring_map,
704 : sc->age_rdata.age_rx_ring, AGE_RX_RING_SZ, NULL, BUS_DMA_WAITOK);
705 0 : if (error) {
706 0 : printf("%s: could not load DMA'able memory for Rx ring.\n",
707 0 : sc->sc_dev.dv_xname);
708 0 : bus_dmamem_free(sc->sc_dmat,
709 : (bus_dma_segment_t *)sc->age_rdata.age_rx_ring, 1);
710 0 : return error;
711 : }
712 :
713 0 : sc->age_rdata.age_rx_ring_paddr =
714 0 : sc->age_cdata.age_rx_ring_map->dm_segs[0].ds_addr;
715 :
716 : /*
717 : * Create DMA stuffs for RX return ring
718 : */
719 0 : error = bus_dmamap_create(sc->sc_dmat, AGE_RR_RING_SZ, 1,
720 : AGE_RR_RING_SZ, 0, BUS_DMA_NOWAIT, &sc->age_cdata.age_rr_ring_map);
721 0 : if (error)
722 0 : return (ENOBUFS);
723 :
724 : /* Allocate DMA'able memory for RX return ring */
725 0 : error = bus_dmamem_alloc(sc->sc_dmat, AGE_RR_RING_SZ,
726 : ETHER_ALIGN, 0, &sc->age_rdata.age_rr_ring_seg, 1,
727 : &nsegs, BUS_DMA_WAITOK | BUS_DMA_ZERO);
728 0 : if (error) {
729 0 : printf("%s: could not allocate DMA'able memory for Rx "
730 0 : "return ring.\n", sc->sc_dev.dv_xname);
731 0 : return error;
732 : }
733 :
734 0 : error = bus_dmamem_map(sc->sc_dmat, &sc->age_rdata.age_rr_ring_seg,
735 : nsegs, AGE_RR_RING_SZ, (caddr_t *)&sc->age_rdata.age_rr_ring,
736 : BUS_DMA_NOWAIT);
737 0 : if (error)
738 0 : return (ENOBUFS);
739 :
740 : /* Load the DMA map for Rx return ring. */
741 0 : error = bus_dmamap_load(sc->sc_dmat, sc->age_cdata.age_rr_ring_map,
742 : sc->age_rdata.age_rr_ring, AGE_RR_RING_SZ, NULL, BUS_DMA_WAITOK);
743 0 : if (error) {
744 0 : printf("%s: could not load DMA'able memory for Rx return ring."
745 0 : "\n", sc->sc_dev.dv_xname);
746 0 : bus_dmamem_free(sc->sc_dmat,
747 : (bus_dma_segment_t *)&sc->age_rdata.age_rr_ring, 1);
748 0 : return error;
749 : }
750 :
751 0 : sc->age_rdata.age_rr_ring_paddr =
752 0 : sc->age_cdata.age_rr_ring_map->dm_segs[0].ds_addr;
753 :
754 : /*
755 : * Create DMA stuffs for CMB block
756 : */
757 0 : error = bus_dmamap_create(sc->sc_dmat, AGE_CMB_BLOCK_SZ, 1,
758 : AGE_CMB_BLOCK_SZ, 0, BUS_DMA_NOWAIT,
759 : &sc->age_cdata.age_cmb_block_map);
760 0 : if (error)
761 0 : return (ENOBUFS);
762 :
763 : /* Allocate DMA'able memory for CMB block */
764 0 : error = bus_dmamem_alloc(sc->sc_dmat, AGE_CMB_BLOCK_SZ,
765 : ETHER_ALIGN, 0, &sc->age_rdata.age_cmb_block_seg, 1,
766 : &nsegs, BUS_DMA_WAITOK | BUS_DMA_ZERO);
767 0 : if (error) {
768 0 : printf("%s: could not allocate DMA'able memory for "
769 0 : "CMB block\n", sc->sc_dev.dv_xname);
770 0 : return error;
771 : }
772 :
773 0 : error = bus_dmamem_map(sc->sc_dmat, &sc->age_rdata.age_cmb_block_seg,
774 : nsegs, AGE_CMB_BLOCK_SZ, (caddr_t *)&sc->age_rdata.age_cmb_block,
775 : BUS_DMA_NOWAIT);
776 0 : if (error)
777 0 : return (ENOBUFS);
778 :
779 : /* Load the DMA map for CMB block. */
780 0 : error = bus_dmamap_load(sc->sc_dmat, sc->age_cdata.age_cmb_block_map,
781 : sc->age_rdata.age_cmb_block, AGE_CMB_BLOCK_SZ, NULL,
782 : BUS_DMA_WAITOK);
783 0 : if (error) {
784 0 : printf("%s: could not load DMA'able memory for CMB block\n",
785 0 : sc->sc_dev.dv_xname);
786 0 : bus_dmamem_free(sc->sc_dmat,
787 : (bus_dma_segment_t *)&sc->age_rdata.age_cmb_block, 1);
788 0 : return error;
789 : }
790 :
791 0 : sc->age_rdata.age_cmb_block_paddr =
792 0 : sc->age_cdata.age_cmb_block_map->dm_segs[0].ds_addr;
793 :
794 : /*
795 : * Create DMA stuffs for SMB block
796 : */
797 0 : error = bus_dmamap_create(sc->sc_dmat, AGE_SMB_BLOCK_SZ, 1,
798 : AGE_SMB_BLOCK_SZ, 0, BUS_DMA_NOWAIT,
799 : &sc->age_cdata.age_smb_block_map);
800 0 : if (error)
801 0 : return (ENOBUFS);
802 :
803 : /* Allocate DMA'able memory for SMB block */
804 0 : error = bus_dmamem_alloc(sc->sc_dmat, AGE_SMB_BLOCK_SZ,
805 : ETHER_ALIGN, 0, &sc->age_rdata.age_smb_block_seg, 1,
806 : &nsegs, BUS_DMA_WAITOK | BUS_DMA_ZERO);
807 0 : if (error) {
808 0 : printf("%s: could not allocate DMA'able memory for "
809 0 : "SMB block\n", sc->sc_dev.dv_xname);
810 0 : return error;
811 : }
812 :
813 0 : error = bus_dmamem_map(sc->sc_dmat, &sc->age_rdata.age_smb_block_seg,
814 : nsegs, AGE_SMB_BLOCK_SZ, (caddr_t *)&sc->age_rdata.age_smb_block,
815 : BUS_DMA_NOWAIT);
816 0 : if (error)
817 0 : return (ENOBUFS);
818 :
819 : /* Load the DMA map for SMB block */
820 0 : error = bus_dmamap_load(sc->sc_dmat, sc->age_cdata.age_smb_block_map,
821 : sc->age_rdata.age_smb_block, AGE_SMB_BLOCK_SZ, NULL,
822 : BUS_DMA_WAITOK);
823 0 : if (error) {
824 0 : printf("%s: could not load DMA'able memory for SMB block\n",
825 0 : sc->sc_dev.dv_xname);
826 0 : bus_dmamem_free(sc->sc_dmat,
827 : (bus_dma_segment_t *)&sc->age_rdata.age_smb_block, 1);
828 0 : return error;
829 : }
830 :
831 0 : sc->age_rdata.age_smb_block_paddr =
832 0 : sc->age_cdata.age_smb_block_map->dm_segs[0].ds_addr;
833 :
834 : /* Create DMA maps for Tx buffers. */
835 0 : for (i = 0; i < AGE_TX_RING_CNT; i++) {
836 0 : txd = &sc->age_cdata.age_txdesc[i];
837 0 : txd->tx_m = NULL;
838 0 : txd->tx_dmamap = NULL;
839 0 : error = bus_dmamap_create(sc->sc_dmat, AGE_TSO_MAXSIZE,
840 : AGE_MAXTXSEGS, AGE_TSO_MAXSEGSIZE, 0, BUS_DMA_NOWAIT,
841 : &txd->tx_dmamap);
842 0 : if (error) {
843 0 : printf("%s: could not create Tx dmamap.\n",
844 0 : sc->sc_dev.dv_xname);
845 0 : return error;
846 : }
847 : }
848 :
849 : /* Create DMA maps for Rx buffers. */
850 0 : error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 0,
851 : BUS_DMA_NOWAIT, &sc->age_cdata.age_rx_sparemap);
852 0 : if (error) {
853 0 : printf("%s: could not create spare Rx dmamap.\n",
854 0 : sc->sc_dev.dv_xname);
855 0 : return error;
856 : }
857 0 : for (i = 0; i < AGE_RX_RING_CNT; i++) {
858 0 : rxd = &sc->age_cdata.age_rxdesc[i];
859 0 : rxd->rx_m = NULL;
860 0 : rxd->rx_dmamap = NULL;
861 0 : error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
862 : MCLBYTES, 0, BUS_DMA_NOWAIT, &rxd->rx_dmamap);
863 0 : if (error) {
864 0 : printf("%s: could not create Rx dmamap.\n",
865 0 : sc->sc_dev.dv_xname);
866 0 : return error;
867 : }
868 : }
869 :
870 0 : return (0);
871 0 : }
872 :
873 : void
874 0 : age_dma_free(struct age_softc *sc)
875 : {
876 : struct age_txdesc *txd;
877 : struct age_rxdesc *rxd;
878 : int i;
879 :
880 : /* Tx buffers */
881 0 : for (i = 0; i < AGE_TX_RING_CNT; i++) {
882 0 : txd = &sc->age_cdata.age_txdesc[i];
883 0 : if (txd->tx_dmamap != NULL) {
884 0 : bus_dmamap_destroy(sc->sc_dmat, txd->tx_dmamap);
885 0 : txd->tx_dmamap = NULL;
886 0 : }
887 : }
888 : /* Rx buffers */
889 0 : for (i = 0; i < AGE_RX_RING_CNT; i++) {
890 0 : rxd = &sc->age_cdata.age_rxdesc[i];
891 0 : if (rxd->rx_dmamap != NULL) {
892 0 : bus_dmamap_destroy(sc->sc_dmat, rxd->rx_dmamap);
893 0 : rxd->rx_dmamap = NULL;
894 0 : }
895 : }
896 0 : if (sc->age_cdata.age_rx_sparemap != NULL) {
897 0 : bus_dmamap_destroy(sc->sc_dmat, sc->age_cdata.age_rx_sparemap);
898 0 : sc->age_cdata.age_rx_sparemap = NULL;
899 0 : }
900 :
901 : /* Tx ring. */
902 0 : if (sc->age_cdata.age_tx_ring_map != NULL)
903 0 : bus_dmamap_unload(sc->sc_dmat, sc->age_cdata.age_tx_ring_map);
904 0 : if (sc->age_cdata.age_tx_ring_map != NULL &&
905 0 : sc->age_rdata.age_tx_ring != NULL)
906 0 : bus_dmamem_free(sc->sc_dmat,
907 : (bus_dma_segment_t *)sc->age_rdata.age_tx_ring, 1);
908 0 : sc->age_rdata.age_tx_ring = NULL;
909 0 : sc->age_cdata.age_tx_ring_map = NULL;
910 :
911 : /* Rx ring. */
912 0 : if (sc->age_cdata.age_rx_ring_map != NULL)
913 0 : bus_dmamap_unload(sc->sc_dmat, sc->age_cdata.age_rx_ring_map);
914 0 : if (sc->age_cdata.age_rx_ring_map != NULL &&
915 0 : sc->age_rdata.age_rx_ring != NULL)
916 0 : bus_dmamem_free(sc->sc_dmat,
917 : (bus_dma_segment_t *)sc->age_rdata.age_rx_ring, 1);
918 0 : sc->age_rdata.age_rx_ring = NULL;
919 0 : sc->age_cdata.age_rx_ring_map = NULL;
920 :
921 : /* Rx return ring. */
922 0 : if (sc->age_cdata.age_rr_ring_map != NULL)
923 0 : bus_dmamap_unload(sc->sc_dmat, sc->age_cdata.age_rr_ring_map);
924 0 : if (sc->age_cdata.age_rr_ring_map != NULL &&
925 0 : sc->age_rdata.age_rr_ring != NULL)
926 0 : bus_dmamem_free(sc->sc_dmat,
927 : (bus_dma_segment_t *)sc->age_rdata.age_rr_ring, 1);
928 0 : sc->age_rdata.age_rr_ring = NULL;
929 0 : sc->age_cdata.age_rr_ring_map = NULL;
930 :
931 : /* CMB block */
932 0 : if (sc->age_cdata.age_cmb_block_map != NULL)
933 0 : bus_dmamap_unload(sc->sc_dmat, sc->age_cdata.age_cmb_block_map);
934 0 : if (sc->age_cdata.age_cmb_block_map != NULL &&
935 0 : sc->age_rdata.age_cmb_block != NULL)
936 0 : bus_dmamem_free(sc->sc_dmat,
937 : (bus_dma_segment_t *)sc->age_rdata.age_cmb_block, 1);
938 0 : sc->age_rdata.age_cmb_block = NULL;
939 0 : sc->age_cdata.age_cmb_block_map = NULL;
940 :
941 : /* SMB block */
942 0 : if (sc->age_cdata.age_smb_block_map != NULL)
943 0 : bus_dmamap_unload(sc->sc_dmat, sc->age_cdata.age_smb_block_map);
944 0 : if (sc->age_cdata.age_smb_block_map != NULL &&
945 0 : sc->age_rdata.age_smb_block != NULL)
946 0 : bus_dmamem_free(sc->sc_dmat,
947 : (bus_dma_segment_t *)sc->age_rdata.age_smb_block, 1);
948 0 : sc->age_rdata.age_smb_block = NULL;
949 0 : sc->age_cdata.age_smb_block_map = NULL;
950 0 : }
951 :
952 : void
953 0 : age_start(struct ifnet *ifp)
954 : {
955 0 : struct age_softc *sc = ifp->if_softc;
956 : struct mbuf *m;
957 : int enq;
958 :
959 0 : if (!(ifp->if_flags & IFF_RUNNING) || ifq_is_oactive(&ifp->if_snd))
960 0 : return;
961 0 : if ((sc->age_flags & AGE_FLAG_LINK) == 0)
962 0 : return;
963 0 : if (IFQ_IS_EMPTY(&ifp->if_snd))
964 0 : return;
965 :
966 : enq = 0;
967 0 : for (;;) {
968 0 : if (sc->age_cdata.age_tx_cnt + AGE_MAXTXSEGS >=
969 : AGE_TX_RING_CNT - 2) {
970 0 : ifq_set_oactive(&ifp->if_snd);
971 0 : break;
972 : }
973 :
974 0 : IFQ_DEQUEUE(&ifp->if_snd, m);
975 0 : if (m == NULL)
976 : break;
977 :
978 : /*
979 : * Pack the data into the transmit ring. If we
980 : * don't have room, set the OACTIVE flag and wait
981 : * for the NIC to drain the ring.
982 : */
983 0 : if (age_encap(sc, m) != 0) {
984 0 : ifp->if_oerrors++;
985 0 : continue;
986 : }
987 : enq = 1;
988 :
989 : #if NBPFILTER > 0
990 : /*
991 : * If there's a BPF listener, bounce a copy of this frame
992 : * to him.
993 : */
994 0 : if (ifp->if_bpf != NULL)
995 0 : bpf_mtap_ether(ifp->if_bpf, m, BPF_DIRECTION_OUT);
996 : #endif
997 : }
998 :
999 0 : if (enq) {
1000 : /* Update mbox. */
1001 0 : AGE_COMMIT_MBOX(sc);
1002 : /* Set a timeout in case the chip goes out to lunch. */
1003 0 : ifp->if_timer = AGE_TX_TIMEOUT;
1004 0 : }
1005 0 : }
1006 :
1007 : void
1008 0 : age_watchdog(struct ifnet *ifp)
1009 : {
1010 0 : struct age_softc *sc = ifp->if_softc;
1011 :
1012 0 : if ((sc->age_flags & AGE_FLAG_LINK) == 0) {
1013 0 : printf("%s: watchdog timeout (missed link)\n",
1014 0 : sc->sc_dev.dv_xname);
1015 0 : ifp->if_oerrors++;
1016 0 : age_init(ifp);
1017 0 : return;
1018 : }
1019 :
1020 0 : if (sc->age_cdata.age_tx_cnt == 0) {
1021 0 : printf("%s: watchdog timeout (missed Tx interrupts) "
1022 : "-- recovering\n", sc->sc_dev.dv_xname);
1023 0 : age_start(ifp);
1024 0 : return;
1025 : }
1026 :
1027 0 : printf("%s: watchdog timeout\n", sc->sc_dev.dv_xname);
1028 0 : ifp->if_oerrors++;
1029 0 : age_init(ifp);
1030 0 : age_start(ifp);
1031 0 : }
1032 :
1033 : int
1034 0 : age_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1035 : {
1036 0 : struct age_softc *sc = ifp->if_softc;
1037 0 : struct mii_data *mii = &sc->sc_miibus;
1038 0 : struct ifreq *ifr = (struct ifreq *)data;
1039 : int s, error = 0;
1040 :
1041 0 : s = splnet();
1042 :
1043 0 : switch (cmd) {
1044 : case SIOCSIFADDR:
1045 0 : ifp->if_flags |= IFF_UP;
1046 0 : if (!(ifp->if_flags & IFF_RUNNING))
1047 0 : age_init(ifp);
1048 : break;
1049 :
1050 : case SIOCSIFFLAGS:
1051 0 : if (ifp->if_flags & IFF_UP) {
1052 0 : if (ifp->if_flags & IFF_RUNNING)
1053 0 : error = ENETRESET;
1054 : else
1055 0 : age_init(ifp);
1056 : } else {
1057 0 : if (ifp->if_flags & IFF_RUNNING)
1058 0 : age_stop(sc);
1059 : }
1060 : break;
1061 :
1062 : case SIOCSIFMEDIA:
1063 : case SIOCGIFMEDIA:
1064 0 : error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
1065 0 : break;
1066 :
1067 : default:
1068 0 : error = ether_ioctl(ifp, &sc->sc_arpcom, cmd, data);
1069 0 : break;
1070 : }
1071 :
1072 0 : if (error == ENETRESET) {
1073 0 : if (ifp->if_flags & IFF_RUNNING)
1074 0 : age_iff(sc);
1075 : error = 0;
1076 0 : }
1077 :
1078 0 : splx(s);
1079 0 : return (error);
1080 : }
1081 :
1082 : void
1083 0 : age_mac_config(struct age_softc *sc)
1084 : {
1085 0 : struct mii_data *mii = &sc->sc_miibus;
1086 : uint32_t reg;
1087 :
1088 0 : reg = CSR_READ_4(sc, AGE_MAC_CFG);
1089 0 : reg &= ~MAC_CFG_FULL_DUPLEX;
1090 0 : reg &= ~(MAC_CFG_TX_FC | MAC_CFG_RX_FC);
1091 0 : reg &= ~MAC_CFG_SPEED_MASK;
1092 :
1093 : /* Reprogram MAC with resolved speed/duplex. */
1094 0 : switch (IFM_SUBTYPE(mii->mii_media_active)) {
1095 : case IFM_10_T:
1096 : case IFM_100_TX:
1097 0 : reg |= MAC_CFG_SPEED_10_100;
1098 0 : break;
1099 : case IFM_1000_T:
1100 0 : reg |= MAC_CFG_SPEED_1000;
1101 0 : break;
1102 : }
1103 0 : if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) {
1104 0 : reg |= MAC_CFG_FULL_DUPLEX;
1105 0 : if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE) != 0)
1106 0 : reg |= MAC_CFG_TX_FC;
1107 0 : if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0)
1108 0 : reg |= MAC_CFG_RX_FC;
1109 : }
1110 :
1111 0 : CSR_WRITE_4(sc, AGE_MAC_CFG, reg);
1112 0 : }
1113 :
1114 : int
1115 0 : age_encap(struct age_softc *sc, struct mbuf *m)
1116 : {
1117 : struct age_txdesc *txd, *txd_last;
1118 : struct tx_desc *desc;
1119 : bus_dmamap_t map;
1120 : uint32_t cflags, poff, vtag;
1121 : int error, i, prod;
1122 :
1123 : cflags = vtag = 0;
1124 : poff = 0;
1125 :
1126 0 : prod = sc->age_cdata.age_tx_prod;
1127 0 : txd = &sc->age_cdata.age_txdesc[prod];
1128 : txd_last = txd;
1129 0 : map = txd->tx_dmamap;
1130 :
1131 0 : error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m, BUS_DMA_NOWAIT);
1132 0 : if (error != 0 && error != EFBIG)
1133 : goto drop;
1134 0 : if (error != 0) {
1135 0 : if (m_defrag(m, M_DONTWAIT)) {
1136 : error = ENOBUFS;
1137 0 : goto drop;
1138 : }
1139 0 : error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m,
1140 : BUS_DMA_NOWAIT);
1141 0 : if (error != 0)
1142 : goto drop;
1143 : }
1144 :
1145 : /* Configure Tx IP/TCP/UDP checksum offload. */
1146 0 : if ((m->m_pkthdr.csum_flags & AGE_CSUM_FEATURES) != 0) {
1147 : cflags |= AGE_TD_CSUM;
1148 0 : if ((m->m_pkthdr.csum_flags & M_TCP_CSUM_OUT) != 0)
1149 0 : cflags |= AGE_TD_TCPCSUM;
1150 0 : if ((m->m_pkthdr.csum_flags & M_UDP_CSUM_OUT) != 0)
1151 0 : cflags |= AGE_TD_UDPCSUM;
1152 : /* Set checksum start offset. */
1153 : cflags |= (poff << AGE_TD_CSUM_PLOADOFFSET_SHIFT);
1154 0 : }
1155 :
1156 : #if NVLAN > 0
1157 : /* Configure VLAN hardware tag insertion. */
1158 0 : if (m->m_flags & M_VLANTAG) {
1159 0 : vtag = AGE_TX_VLAN_TAG(m->m_pkthdr.ether_vtag);
1160 0 : vtag = ((vtag << AGE_TD_VLAN_SHIFT) & AGE_TD_VLAN_MASK);
1161 0 : cflags |= AGE_TD_INSERT_VLAN_TAG;
1162 0 : }
1163 : #endif
1164 :
1165 : desc = NULL;
1166 0 : for (i = 0; i < map->dm_nsegs; i++) {
1167 0 : desc = &sc->age_rdata.age_tx_ring[prod];
1168 0 : desc->addr = htole64(map->dm_segs[i].ds_addr);
1169 0 : desc->len =
1170 0 : htole32(AGE_TX_BYTES(map->dm_segs[i].ds_len) | vtag);
1171 0 : desc->flags = htole32(cflags);
1172 0 : sc->age_cdata.age_tx_cnt++;
1173 0 : AGE_DESC_INC(prod, AGE_TX_RING_CNT);
1174 : }
1175 :
1176 : /* Update producer index. */
1177 0 : sc->age_cdata.age_tx_prod = prod;
1178 :
1179 : /* Set EOP on the last descriptor. */
1180 0 : prod = (prod + AGE_TX_RING_CNT - 1) % AGE_TX_RING_CNT;
1181 0 : desc = &sc->age_rdata.age_tx_ring[prod];
1182 0 : desc->flags |= htole32(AGE_TD_EOP);
1183 :
1184 : /* Swap dmamap of the first and the last. */
1185 0 : txd = &sc->age_cdata.age_txdesc[prod];
1186 0 : map = txd_last->tx_dmamap;
1187 0 : txd_last->tx_dmamap = txd->tx_dmamap;
1188 0 : txd->tx_dmamap = map;
1189 0 : txd->tx_m = m;
1190 :
1191 : /* Sync descriptors. */
1192 0 : bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1193 : BUS_DMASYNC_PREWRITE);
1194 0 : bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_tx_ring_map, 0,
1195 : sc->age_cdata.age_tx_ring_map->dm_mapsize,
1196 : BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1197 :
1198 0 : return (0);
1199 :
1200 : drop:
1201 0 : m_freem(m);
1202 0 : return (error);
1203 0 : }
1204 :
1205 : void
1206 0 : age_txintr(struct age_softc *sc, int tpd_cons)
1207 : {
1208 0 : struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1209 : struct age_txdesc *txd;
1210 : int cons, prog;
1211 :
1212 0 : bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_tx_ring_map, 0,
1213 : sc->age_cdata.age_tx_ring_map->dm_mapsize,
1214 : BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1215 :
1216 : /*
1217 : * Go through our Tx list and free mbufs for those
1218 : * frames which have been transmitted.
1219 : */
1220 0 : cons = sc->age_cdata.age_tx_cons;
1221 0 : for (prog = 0; cons != tpd_cons; AGE_DESC_INC(cons, AGE_TX_RING_CNT)) {
1222 0 : if (sc->age_cdata.age_tx_cnt <= 0)
1223 : break;
1224 0 : prog++;
1225 0 : ifq_clr_oactive(&ifp->if_snd);
1226 0 : sc->age_cdata.age_tx_cnt--;
1227 0 : txd = &sc->age_cdata.age_txdesc[cons];
1228 : /*
1229 : * Clear Tx descriptors, it's not required but would
1230 : * help debugging in case of Tx issues.
1231 : */
1232 0 : txd->tx_desc->addr = 0;
1233 0 : txd->tx_desc->len = 0;
1234 0 : txd->tx_desc->flags = 0;
1235 :
1236 0 : if (txd->tx_m == NULL)
1237 : continue;
1238 : /* Reclaim transmitted mbufs. */
1239 0 : bus_dmamap_sync(sc->sc_dmat, txd->tx_dmamap, 0,
1240 : txd->tx_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1241 0 : bus_dmamap_unload(sc->sc_dmat, txd->tx_dmamap);
1242 0 : m_freem(txd->tx_m);
1243 0 : txd->tx_m = NULL;
1244 0 : }
1245 :
1246 0 : if (prog > 0) {
1247 0 : sc->age_cdata.age_tx_cons = cons;
1248 :
1249 : /*
1250 : * Unarm watchdog timer only when there are no pending
1251 : * Tx descriptors in queue.
1252 : */
1253 0 : if (sc->age_cdata.age_tx_cnt == 0)
1254 0 : ifp->if_timer = 0;
1255 :
1256 0 : bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_tx_ring_map, 0,
1257 : sc->age_cdata.age_tx_ring_map->dm_mapsize,
1258 : BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1259 0 : }
1260 0 : }
1261 :
1262 : /* Receive a frame. */
1263 : void
1264 0 : age_rxeof(struct age_softc *sc, struct rx_rdesc *rxrd)
1265 : {
1266 0 : struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1267 : struct age_rxdesc *rxd;
1268 : struct rx_desc *desc;
1269 0 : struct mbuf_list ml = MBUF_LIST_INITIALIZER();
1270 : struct mbuf *mp, *m;
1271 : uint32_t status, index;
1272 : int count, nsegs, pktlen;
1273 : int rx_cons;
1274 :
1275 0 : status = letoh32(rxrd->flags);
1276 0 : index = letoh32(rxrd->index);
1277 0 : rx_cons = AGE_RX_CONS(index);
1278 0 : nsegs = AGE_RX_NSEGS(index);
1279 :
1280 0 : sc->age_cdata.age_rxlen = AGE_RX_BYTES(letoh32(rxrd->len));
1281 0 : if ((status & AGE_RRD_ERROR) != 0 &&
1282 0 : (status & (AGE_RRD_CRC | AGE_RRD_CODE | AGE_RRD_DRIBBLE |
1283 0 : AGE_RRD_RUNT | AGE_RRD_OFLOW | AGE_RRD_TRUNC)) != 0) {
1284 : /*
1285 : * We want to pass the following frames to upper
1286 : * layer regardless of error status of Rx return
1287 : * ring.
1288 : *
1289 : * o IP/TCP/UDP checksum is bad.
1290 : * o frame length and protocol specific length
1291 : * does not match.
1292 : */
1293 0 : sc->age_cdata.age_rx_cons += nsegs;
1294 0 : sc->age_cdata.age_rx_cons %= AGE_RX_RING_CNT;
1295 0 : return;
1296 : }
1297 :
1298 : pktlen = 0;
1299 0 : for (count = 0; count < nsegs; count++,
1300 0 : AGE_DESC_INC(rx_cons, AGE_RX_RING_CNT)) {
1301 0 : rxd = &sc->age_cdata.age_rxdesc[rx_cons];
1302 0 : mp = rxd->rx_m;
1303 0 : desc = rxd->rx_desc;
1304 : /* Add a new receive buffer to the ring. */
1305 0 : if (age_newbuf(sc, rxd) != 0) {
1306 0 : ifp->if_iqdrops++;
1307 : /* Reuse Rx buffers. */
1308 0 : if (sc->age_cdata.age_rxhead != NULL) {
1309 0 : m_freem(sc->age_cdata.age_rxhead);
1310 0 : AGE_RXCHAIN_RESET(sc);
1311 0 : }
1312 : break;
1313 : }
1314 :
1315 : /* The length of the first mbuf is computed last. */
1316 0 : if (count != 0) {
1317 0 : mp->m_len = AGE_RX_BYTES(letoh32(desc->len));
1318 0 : pktlen += mp->m_len;
1319 0 : }
1320 :
1321 : /* Chain received mbufs. */
1322 0 : if (sc->age_cdata.age_rxhead == NULL) {
1323 0 : sc->age_cdata.age_rxhead = mp;
1324 0 : sc->age_cdata.age_rxtail = mp;
1325 0 : } else {
1326 0 : mp->m_flags &= ~M_PKTHDR;
1327 0 : sc->age_cdata.age_rxprev_tail =
1328 0 : sc->age_cdata.age_rxtail;
1329 0 : sc->age_cdata.age_rxtail->m_next = mp;
1330 0 : sc->age_cdata.age_rxtail = mp;
1331 : }
1332 :
1333 0 : if (count == nsegs - 1) {
1334 : /*
1335 : * It seems that L1 controller has no way
1336 : * to tell hardware to strip CRC bytes.
1337 : */
1338 0 : sc->age_cdata.age_rxlen -= ETHER_CRC_LEN;
1339 0 : if (nsegs > 1) {
1340 : /* Remove the CRC bytes in chained mbufs. */
1341 0 : pktlen -= ETHER_CRC_LEN;
1342 0 : if (mp->m_len <= ETHER_CRC_LEN) {
1343 0 : sc->age_cdata.age_rxtail =
1344 0 : sc->age_cdata.age_rxprev_tail;
1345 0 : sc->age_cdata.age_rxtail->m_len -=
1346 0 : (ETHER_CRC_LEN - mp->m_len);
1347 0 : sc->age_cdata.age_rxtail->m_next = NULL;
1348 0 : m_freem(mp);
1349 0 : } else {
1350 0 : mp->m_len -= ETHER_CRC_LEN;
1351 : }
1352 : }
1353 :
1354 0 : m = sc->age_cdata.age_rxhead;
1355 0 : m->m_flags |= M_PKTHDR;
1356 0 : m->m_pkthdr.len = sc->age_cdata.age_rxlen;
1357 : /* Set the first mbuf length. */
1358 0 : m->m_len = sc->age_cdata.age_rxlen - pktlen;
1359 :
1360 : /*
1361 : * Set checksum information.
1362 : * It seems that L1 controller can compute partial
1363 : * checksum. The partial checksum value can be used
1364 : * to accelerate checksum computation for fragmented
1365 : * TCP/UDP packets. Upper network stack already
1366 : * takes advantage of the partial checksum value in
1367 : * IP reassembly stage. But I'm not sure the
1368 : * correctness of the partial hardware checksum
1369 : * assistance due to lack of data sheet. If it is
1370 : * proven to work on L1 I'll enable it.
1371 : */
1372 0 : if (status & AGE_RRD_IPV4) {
1373 0 : if ((status & AGE_RRD_IPCSUM_NOK) == 0)
1374 0 : m->m_pkthdr.csum_flags |=
1375 : M_IPV4_CSUM_IN_OK;
1376 0 : if ((status & (AGE_RRD_TCP | AGE_RRD_UDP)) &&
1377 0 : (status & AGE_RRD_TCP_UDPCSUM_NOK) == 0) {
1378 0 : m->m_pkthdr.csum_flags |=
1379 : M_TCP_CSUM_IN_OK | M_UDP_CSUM_IN_OK;
1380 0 : }
1381 : /*
1382 : * Don't mark bad checksum for TCP/UDP frames
1383 : * as fragmented frames may always have set
1384 : * bad checksummed bit of descriptor status.
1385 : */
1386 : }
1387 : #if NVLAN > 0
1388 : /* Check for VLAN tagged frames. */
1389 0 : if (status & AGE_RRD_VLAN) {
1390 0 : u_int32_t vtag = AGE_RX_VLAN(letoh32(rxrd->vtags));
1391 0 : m->m_pkthdr.ether_vtag =
1392 0 : AGE_RX_VLAN_TAG(vtag);
1393 0 : m->m_flags |= M_VLANTAG;
1394 0 : }
1395 : #endif
1396 :
1397 0 : ml_enqueue(&ml, m);
1398 :
1399 : /* Reset mbuf chains. */
1400 0 : AGE_RXCHAIN_RESET(sc);
1401 0 : }
1402 : }
1403 :
1404 0 : if_input(ifp, &ml);
1405 :
1406 0 : if (count != nsegs) {
1407 0 : sc->age_cdata.age_rx_cons += nsegs;
1408 0 : sc->age_cdata.age_rx_cons %= AGE_RX_RING_CNT;
1409 0 : } else
1410 0 : sc->age_cdata.age_rx_cons = rx_cons;
1411 0 : }
1412 :
1413 : void
1414 0 : age_rxintr(struct age_softc *sc, int rr_prod)
1415 : {
1416 : struct rx_rdesc *rxrd;
1417 : int rr_cons, nsegs, pktlen, prog;
1418 :
1419 0 : rr_cons = sc->age_cdata.age_rr_cons;
1420 0 : if (rr_cons == rr_prod)
1421 0 : return;
1422 :
1423 0 : bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_rr_ring_map, 0,
1424 : sc->age_cdata.age_rr_ring_map->dm_mapsize,
1425 : BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1426 0 : bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_rx_ring_map, 0,
1427 : sc->age_cdata.age_rx_ring_map->dm_mapsize,
1428 : BUS_DMASYNC_POSTWRITE);
1429 :
1430 0 : for (prog = 0; rr_cons != rr_prod; prog++) {
1431 0 : rxrd = &sc->age_rdata.age_rr_ring[rr_cons];
1432 0 : nsegs = AGE_RX_NSEGS(letoh32(rxrd->index));
1433 0 : if (nsegs == 0)
1434 : break;
1435 : /*
1436 : * Check number of segments against received bytes
1437 : * Non-matching value would indicate that hardware
1438 : * is still trying to update Rx return descriptors.
1439 : * I'm not sure whether this check is really needed.
1440 : */
1441 0 : pktlen = AGE_RX_BYTES(letoh32(rxrd->len));
1442 0 : if (nsegs != ((pktlen + (MCLBYTES - ETHER_ALIGN - 1)) /
1443 : (MCLBYTES - ETHER_ALIGN)))
1444 : break;
1445 :
1446 : /* Received a frame. */
1447 0 : age_rxeof(sc, rxrd);
1448 :
1449 : /* Clear return ring. */
1450 0 : rxrd->index = 0;
1451 0 : AGE_DESC_INC(rr_cons, AGE_RR_RING_CNT);
1452 : }
1453 :
1454 0 : if (prog > 0) {
1455 : /* Update the consumer index. */
1456 0 : sc->age_cdata.age_rr_cons = rr_cons;
1457 :
1458 0 : bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_rx_ring_map, 0,
1459 : sc->age_cdata.age_rx_ring_map->dm_mapsize,
1460 : BUS_DMASYNC_PREWRITE);
1461 : /* Sync descriptors. */
1462 0 : bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_rr_ring_map, 0,
1463 : sc->age_cdata.age_rr_ring_map->dm_mapsize,
1464 : BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1465 :
1466 : /* Notify hardware availability of new Rx buffers. */
1467 0 : AGE_COMMIT_MBOX(sc);
1468 0 : }
1469 0 : }
1470 :
1471 : void
1472 0 : age_tick(void *xsc)
1473 : {
1474 0 : struct age_softc *sc = xsc;
1475 0 : struct mii_data *mii = &sc->sc_miibus;
1476 : int s;
1477 :
1478 0 : s = splnet();
1479 0 : mii_tick(mii);
1480 0 : timeout_add_sec(&sc->age_tick_ch, 1);
1481 0 : splx(s);
1482 0 : }
1483 :
1484 : void
1485 0 : age_reset(struct age_softc *sc)
1486 : {
1487 : uint32_t reg;
1488 : int i;
1489 :
1490 0 : CSR_WRITE_4(sc, AGE_MASTER_CFG, MASTER_RESET);
1491 0 : CSR_READ_4(sc, AGE_MASTER_CFG);
1492 0 : DELAY(1000);
1493 0 : for (i = AGE_RESET_TIMEOUT; i > 0; i--) {
1494 0 : if ((reg = CSR_READ_4(sc, AGE_IDLE_STATUS)) == 0)
1495 : break;
1496 0 : DELAY(10);
1497 : }
1498 :
1499 0 : if (i == 0)
1500 0 : printf("%s: reset timeout(0x%08x)!\n", sc->sc_dev.dv_xname,
1501 : reg);
1502 :
1503 : /* Initialize PCIe module. From Linux. */
1504 0 : CSR_WRITE_4(sc, 0x12FC, 0x6500);
1505 0 : CSR_WRITE_4(sc, 0x1008, CSR_READ_4(sc, 0x1008) | 0x8000);
1506 0 : }
1507 :
1508 : int
1509 0 : age_init(struct ifnet *ifp)
1510 : {
1511 0 : struct age_softc *sc = ifp->if_softc;
1512 0 : struct mii_data *mii = &sc->sc_miibus;
1513 0 : uint8_t eaddr[ETHER_ADDR_LEN];
1514 : bus_addr_t paddr;
1515 : uint32_t reg, fsize;
1516 : uint32_t rxf_hi, rxf_lo, rrd_hi, rrd_lo;
1517 : int error;
1518 :
1519 : /*
1520 : * Cancel any pending I/O.
1521 : */
1522 0 : age_stop(sc);
1523 :
1524 : /*
1525 : * Reset the chip to a known state.
1526 : */
1527 0 : age_reset(sc);
1528 :
1529 : /* Initialize descriptors. */
1530 0 : error = age_init_rx_ring(sc);
1531 0 : if (error != 0) {
1532 0 : printf("%s: no memory for Rx buffers.\n", sc->sc_dev.dv_xname);
1533 0 : age_stop(sc);
1534 0 : return (error);
1535 : }
1536 0 : age_init_rr_ring(sc);
1537 0 : age_init_tx_ring(sc);
1538 0 : age_init_cmb_block(sc);
1539 0 : age_init_smb_block(sc);
1540 :
1541 : /* Reprogram the station address. */
1542 0 : bcopy(LLADDR(ifp->if_sadl), eaddr, ETHER_ADDR_LEN);
1543 0 : CSR_WRITE_4(sc, AGE_PAR0,
1544 : eaddr[2] << 24 | eaddr[3] << 16 | eaddr[4] << 8 | eaddr[5]);
1545 0 : CSR_WRITE_4(sc, AGE_PAR1, eaddr[0] << 8 | eaddr[1]);
1546 :
1547 : /* Set descriptor base addresses. */
1548 0 : paddr = sc->age_rdata.age_tx_ring_paddr;
1549 0 : CSR_WRITE_4(sc, AGE_DESC_ADDR_HI, AGE_ADDR_HI(paddr));
1550 0 : paddr = sc->age_rdata.age_rx_ring_paddr;
1551 0 : CSR_WRITE_4(sc, AGE_DESC_RD_ADDR_LO, AGE_ADDR_LO(paddr));
1552 0 : paddr = sc->age_rdata.age_rr_ring_paddr;
1553 0 : CSR_WRITE_4(sc, AGE_DESC_RRD_ADDR_LO, AGE_ADDR_LO(paddr));
1554 0 : paddr = sc->age_rdata.age_tx_ring_paddr;
1555 0 : CSR_WRITE_4(sc, AGE_DESC_TPD_ADDR_LO, AGE_ADDR_LO(paddr));
1556 0 : paddr = sc->age_rdata.age_cmb_block_paddr;
1557 0 : CSR_WRITE_4(sc, AGE_DESC_CMB_ADDR_LO, AGE_ADDR_LO(paddr));
1558 0 : paddr = sc->age_rdata.age_smb_block_paddr;
1559 0 : CSR_WRITE_4(sc, AGE_DESC_SMB_ADDR_LO, AGE_ADDR_LO(paddr));
1560 :
1561 : /* Set Rx/Rx return descriptor counter. */
1562 0 : CSR_WRITE_4(sc, AGE_DESC_RRD_RD_CNT,
1563 : ((AGE_RR_RING_CNT << DESC_RRD_CNT_SHIFT) &
1564 : DESC_RRD_CNT_MASK) |
1565 : ((AGE_RX_RING_CNT << DESC_RD_CNT_SHIFT) & DESC_RD_CNT_MASK));
1566 :
1567 : /* Set Tx descriptor counter. */
1568 0 : CSR_WRITE_4(sc, AGE_DESC_TPD_CNT,
1569 : (AGE_TX_RING_CNT << DESC_TPD_CNT_SHIFT) & DESC_TPD_CNT_MASK);
1570 :
1571 : /* Tell hardware that we're ready to load descriptors. */
1572 0 : CSR_WRITE_4(sc, AGE_DMA_BLOCK, DMA_BLOCK_LOAD);
1573 :
1574 : /*
1575 : * Initialize mailbox register.
1576 : * Updated producer/consumer index information is exchanged
1577 : * through this mailbox register. However Tx producer and
1578 : * Rx return consumer/Rx producer are all shared such that
1579 : * it's hard to separate code path between Tx and Rx without
1580 : * locking. If L1 hardware have a separate mail box register
1581 : * for Tx and Rx consumer/producer management we could have
1582 : * independent Tx/Rx handler which in turn Rx handler could have
1583 : * been run without any locking.
1584 : */
1585 0 : AGE_COMMIT_MBOX(sc);
1586 :
1587 : /* Configure IPG/IFG parameters. */
1588 0 : CSR_WRITE_4(sc, AGE_IPG_IFG_CFG,
1589 : ((IPG_IFG_IPG2_DEFAULT << IPG_IFG_IPG2_SHIFT) & IPG_IFG_IPG2_MASK) |
1590 : ((IPG_IFG_IPG1_DEFAULT << IPG_IFG_IPG1_SHIFT) & IPG_IFG_IPG1_MASK) |
1591 : ((IPG_IFG_MIFG_DEFAULT << IPG_IFG_MIFG_SHIFT) & IPG_IFG_MIFG_MASK) |
1592 : ((IPG_IFG_IPGT_DEFAULT << IPG_IFG_IPGT_SHIFT) & IPG_IFG_IPGT_MASK));
1593 :
1594 : /* Set parameters for half-duplex media. */
1595 0 : CSR_WRITE_4(sc, AGE_HDPX_CFG,
1596 : ((HDPX_CFG_LCOL_DEFAULT << HDPX_CFG_LCOL_SHIFT) &
1597 : HDPX_CFG_LCOL_MASK) |
1598 : ((HDPX_CFG_RETRY_DEFAULT << HDPX_CFG_RETRY_SHIFT) &
1599 : HDPX_CFG_RETRY_MASK) | HDPX_CFG_EXC_DEF_EN |
1600 : ((HDPX_CFG_ABEBT_DEFAULT << HDPX_CFG_ABEBT_SHIFT) &
1601 : HDPX_CFG_ABEBT_MASK) |
1602 : ((HDPX_CFG_JAMIPG_DEFAULT << HDPX_CFG_JAMIPG_SHIFT) &
1603 : HDPX_CFG_JAMIPG_MASK));
1604 :
1605 : /* Configure interrupt moderation timer. */
1606 0 : sc->age_int_mod = AGE_IM_TIMER_DEFAULT;
1607 0 : CSR_WRITE_2(sc, AGE_IM_TIMER, AGE_USECS(sc->age_int_mod));
1608 0 : reg = CSR_READ_4(sc, AGE_MASTER_CFG);
1609 0 : reg &= ~MASTER_MTIMER_ENB;
1610 0 : if (AGE_USECS(sc->age_int_mod) == 0)
1611 0 : reg &= ~MASTER_ITIMER_ENB;
1612 : else
1613 0 : reg |= MASTER_ITIMER_ENB;
1614 0 : CSR_WRITE_4(sc, AGE_MASTER_CFG, reg);
1615 0 : if (agedebug)
1616 0 : printf("%s: interrupt moderation is %d us.\n",
1617 0 : sc->sc_dev.dv_xname, sc->age_int_mod);
1618 0 : CSR_WRITE_2(sc, AGE_INTR_CLR_TIMER, AGE_USECS(1000));
1619 :
1620 : /* Set Maximum frame size but don't let MTU be lass than ETHER_MTU. */
1621 0 : if (ifp->if_mtu < ETHERMTU)
1622 0 : sc->age_max_frame_size = ETHERMTU;
1623 : else
1624 0 : sc->age_max_frame_size = ifp->if_mtu;
1625 0 : sc->age_max_frame_size += ETHER_HDR_LEN +
1626 : sizeof(struct ether_vlan_header) + ETHER_CRC_LEN;
1627 0 : CSR_WRITE_4(sc, AGE_FRAME_SIZE, sc->age_max_frame_size);
1628 :
1629 : /* Configure jumbo frame. */
1630 0 : fsize = roundup(sc->age_max_frame_size, sizeof(uint64_t));
1631 0 : CSR_WRITE_4(sc, AGE_RXQ_JUMBO_CFG,
1632 : (((fsize / sizeof(uint64_t)) <<
1633 : RXQ_JUMBO_CFG_SZ_THRESH_SHIFT) & RXQ_JUMBO_CFG_SZ_THRESH_MASK) |
1634 : ((RXQ_JUMBO_CFG_LKAH_DEFAULT <<
1635 : RXQ_JUMBO_CFG_LKAH_SHIFT) & RXQ_JUMBO_CFG_LKAH_MASK) |
1636 : ((AGE_USECS(8) << RXQ_JUMBO_CFG_RRD_TIMER_SHIFT) &
1637 : RXQ_JUMBO_CFG_RRD_TIMER_MASK));
1638 :
1639 : /* Configure flow-control parameters. From Linux. */
1640 0 : if ((sc->age_flags & AGE_FLAG_PCIE) != 0) {
1641 : /*
1642 : * Magic workaround for old-L1.
1643 : * Don't know which hw revision requires this magic.
1644 : */
1645 0 : CSR_WRITE_4(sc, 0x12FC, 0x6500);
1646 : /*
1647 : * Another magic workaround for flow-control mode
1648 : * change. From Linux.
1649 : */
1650 0 : CSR_WRITE_4(sc, 0x1008, CSR_READ_4(sc, 0x1008) | 0x8000);
1651 0 : }
1652 : /*
1653 : * TODO
1654 : * Should understand pause parameter relationships between FIFO
1655 : * size and number of Rx descriptors and Rx return descriptors.
1656 : *
1657 : * Magic parameters came from Linux.
1658 : */
1659 0 : switch (sc->age_chip_rev) {
1660 : case 0x8001:
1661 : case 0x9001:
1662 : case 0x9002:
1663 : case 0x9003:
1664 : rxf_hi = AGE_RX_RING_CNT / 16;
1665 : rxf_lo = (AGE_RX_RING_CNT * 7) / 8;
1666 : rrd_hi = (AGE_RR_RING_CNT * 7) / 8;
1667 : rrd_lo = AGE_RR_RING_CNT / 16;
1668 0 : break;
1669 : default:
1670 0 : reg = CSR_READ_4(sc, AGE_SRAM_RX_FIFO_LEN);
1671 0 : rxf_lo = reg / 16;
1672 0 : if (rxf_lo < 192)
1673 : rxf_lo = 192;
1674 0 : rxf_hi = (reg * 7) / 8;
1675 0 : if (rxf_hi < rxf_lo)
1676 0 : rxf_hi = rxf_lo + 16;
1677 0 : reg = CSR_READ_4(sc, AGE_SRAM_RRD_LEN);
1678 0 : rrd_lo = reg / 8;
1679 0 : rrd_hi = (reg * 7) / 8;
1680 0 : if (rrd_lo < 2)
1681 0 : rrd_lo = 2;
1682 0 : if (rrd_hi < rrd_lo)
1683 0 : rrd_hi = rrd_lo + 3;
1684 : break;
1685 : }
1686 0 : CSR_WRITE_4(sc, AGE_RXQ_FIFO_PAUSE_THRESH,
1687 : ((rxf_lo << RXQ_FIFO_PAUSE_THRESH_LO_SHIFT) &
1688 : RXQ_FIFO_PAUSE_THRESH_LO_MASK) |
1689 : ((rxf_hi << RXQ_FIFO_PAUSE_THRESH_HI_SHIFT) &
1690 : RXQ_FIFO_PAUSE_THRESH_HI_MASK));
1691 0 : CSR_WRITE_4(sc, AGE_RXQ_RRD_PAUSE_THRESH,
1692 : ((rrd_lo << RXQ_RRD_PAUSE_THRESH_LO_SHIFT) &
1693 : RXQ_RRD_PAUSE_THRESH_LO_MASK) |
1694 : ((rrd_hi << RXQ_RRD_PAUSE_THRESH_HI_SHIFT) &
1695 : RXQ_RRD_PAUSE_THRESH_HI_MASK));
1696 :
1697 : /* Configure RxQ. */
1698 0 : CSR_WRITE_4(sc, AGE_RXQ_CFG,
1699 : ((RXQ_CFG_RD_BURST_DEFAULT << RXQ_CFG_RD_BURST_SHIFT) &
1700 : RXQ_CFG_RD_BURST_MASK) |
1701 : ((RXQ_CFG_RRD_BURST_THRESH_DEFAULT <<
1702 : RXQ_CFG_RRD_BURST_THRESH_SHIFT) & RXQ_CFG_RRD_BURST_THRESH_MASK) |
1703 : ((RXQ_CFG_RD_PREF_MIN_IPG_DEFAULT <<
1704 : RXQ_CFG_RD_PREF_MIN_IPG_SHIFT) & RXQ_CFG_RD_PREF_MIN_IPG_MASK) |
1705 : RXQ_CFG_CUT_THROUGH_ENB | RXQ_CFG_ENB);
1706 :
1707 : /* Configure TxQ. */
1708 0 : CSR_WRITE_4(sc, AGE_TXQ_CFG,
1709 : ((TXQ_CFG_TPD_BURST_DEFAULT << TXQ_CFG_TPD_BURST_SHIFT) &
1710 : TXQ_CFG_TPD_BURST_MASK) |
1711 : ((TXQ_CFG_TX_FIFO_BURST_DEFAULT << TXQ_CFG_TX_FIFO_BURST_SHIFT) &
1712 : TXQ_CFG_TX_FIFO_BURST_MASK) |
1713 : ((TXQ_CFG_TPD_FETCH_DEFAULT <<
1714 : TXQ_CFG_TPD_FETCH_THRESH_SHIFT) & TXQ_CFG_TPD_FETCH_THRESH_MASK) |
1715 : TXQ_CFG_ENB);
1716 :
1717 : /* Configure DMA parameters. */
1718 0 : CSR_WRITE_4(sc, AGE_DMA_CFG,
1719 : DMA_CFG_ENH_ORDER | DMA_CFG_RCB_64 |
1720 : sc->age_dma_rd_burst | DMA_CFG_RD_ENB |
1721 : sc->age_dma_wr_burst | DMA_CFG_WR_ENB);
1722 :
1723 : /* Configure CMB DMA write threshold. */
1724 0 : CSR_WRITE_4(sc, AGE_CMB_WR_THRESH,
1725 : ((CMB_WR_THRESH_RRD_DEFAULT << CMB_WR_THRESH_RRD_SHIFT) &
1726 : CMB_WR_THRESH_RRD_MASK) |
1727 : ((CMB_WR_THRESH_TPD_DEFAULT << CMB_WR_THRESH_TPD_SHIFT) &
1728 : CMB_WR_THRESH_TPD_MASK));
1729 :
1730 : /* Set CMB/SMB timer and enable them. */
1731 0 : CSR_WRITE_4(sc, AGE_CMB_WR_TIMER,
1732 : ((AGE_USECS(2) << CMB_WR_TIMER_TX_SHIFT) & CMB_WR_TIMER_TX_MASK) |
1733 : ((AGE_USECS(2) << CMB_WR_TIMER_RX_SHIFT) & CMB_WR_TIMER_RX_MASK));
1734 :
1735 : /* Request SMB updates for every seconds. */
1736 0 : CSR_WRITE_4(sc, AGE_SMB_TIMER, AGE_USECS(1000 * 1000));
1737 0 : CSR_WRITE_4(sc, AGE_CSMB_CTRL, CSMB_CTRL_SMB_ENB | CSMB_CTRL_CMB_ENB);
1738 :
1739 : /*
1740 : * Disable all WOL bits as WOL can interfere normal Rx
1741 : * operation.
1742 : */
1743 0 : CSR_WRITE_4(sc, AGE_WOL_CFG, 0);
1744 :
1745 : /*
1746 : * Configure Tx/Rx MACs.
1747 : * - Auto-padding for short frames.
1748 : * - Enable CRC generation.
1749 : * Start with full-duplex/1000Mbps media. Actual reconfiguration
1750 : * of MAC is followed after link establishment.
1751 : */
1752 0 : CSR_WRITE_4(sc, AGE_MAC_CFG,
1753 : MAC_CFG_TX_CRC_ENB | MAC_CFG_TX_AUTO_PAD |
1754 : MAC_CFG_FULL_DUPLEX | MAC_CFG_SPEED_1000 |
1755 : ((MAC_CFG_PREAMBLE_DEFAULT << MAC_CFG_PREAMBLE_SHIFT) &
1756 : MAC_CFG_PREAMBLE_MASK));
1757 :
1758 : /* Set up the receive filter. */
1759 0 : age_iff(sc);
1760 :
1761 0 : age_rxvlan(sc);
1762 :
1763 0 : reg = CSR_READ_4(sc, AGE_MAC_CFG);
1764 0 : reg |= MAC_CFG_RXCSUM_ENB;
1765 :
1766 : /* Ack all pending interrupts and clear it. */
1767 0 : CSR_WRITE_4(sc, AGE_INTR_STATUS, 0);
1768 0 : CSR_WRITE_4(sc, AGE_INTR_MASK, AGE_INTRS);
1769 :
1770 : /* Finally enable Tx/Rx MAC. */
1771 0 : CSR_WRITE_4(sc, AGE_MAC_CFG, reg | MAC_CFG_TX_ENB | MAC_CFG_RX_ENB);
1772 :
1773 0 : sc->age_flags &= ~AGE_FLAG_LINK;
1774 :
1775 : /* Switch to the current media. */
1776 0 : mii_mediachg(mii);
1777 :
1778 0 : timeout_add_sec(&sc->age_tick_ch, 1);
1779 :
1780 0 : ifp->if_flags |= IFF_RUNNING;
1781 0 : ifq_clr_oactive(&ifp->if_snd);
1782 :
1783 0 : return (0);
1784 0 : }
1785 :
1786 : void
1787 0 : age_stop(struct age_softc *sc)
1788 : {
1789 0 : struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1790 : struct age_txdesc *txd;
1791 : struct age_rxdesc *rxd;
1792 : uint32_t reg;
1793 : int i;
1794 :
1795 : /*
1796 : * Mark the interface down and cancel the watchdog timer.
1797 : */
1798 0 : ifp->if_flags &= ~IFF_RUNNING;
1799 0 : ifq_clr_oactive(&ifp->if_snd);
1800 0 : ifp->if_timer = 0;
1801 :
1802 0 : sc->age_flags &= ~AGE_FLAG_LINK;
1803 0 : timeout_del(&sc->age_tick_ch);
1804 :
1805 : /*
1806 : * Disable interrupts.
1807 : */
1808 0 : CSR_WRITE_4(sc, AGE_INTR_MASK, 0);
1809 0 : CSR_WRITE_4(sc, AGE_INTR_STATUS, 0xFFFFFFFF);
1810 :
1811 : /* Stop CMB/SMB updates. */
1812 0 : CSR_WRITE_4(sc, AGE_CSMB_CTRL, 0);
1813 :
1814 : /* Stop Rx/Tx MAC. */
1815 0 : age_stop_rxmac(sc);
1816 0 : age_stop_txmac(sc);
1817 :
1818 : /* Stop DMA. */
1819 0 : CSR_WRITE_4(sc, AGE_DMA_CFG,
1820 : CSR_READ_4(sc, AGE_DMA_CFG) & ~(DMA_CFG_RD_ENB | DMA_CFG_WR_ENB));
1821 :
1822 : /* Stop TxQ/RxQ. */
1823 0 : CSR_WRITE_4(sc, AGE_TXQ_CFG,
1824 : CSR_READ_4(sc, AGE_TXQ_CFG) & ~TXQ_CFG_ENB);
1825 0 : CSR_WRITE_4(sc, AGE_RXQ_CFG,
1826 : CSR_READ_4(sc, AGE_RXQ_CFG) & ~RXQ_CFG_ENB);
1827 0 : for (i = AGE_RESET_TIMEOUT; i > 0; i--) {
1828 0 : if ((reg = CSR_READ_4(sc, AGE_IDLE_STATUS)) == 0)
1829 : break;
1830 0 : DELAY(10);
1831 : }
1832 0 : if (i == 0)
1833 0 : printf("%s: stopping Rx/Tx MACs timed out(0x%08x)!\n",
1834 0 : sc->sc_dev.dv_xname, reg);
1835 :
1836 : /* Reclaim Rx buffers that have been processed. */
1837 0 : if (sc->age_cdata.age_rxhead != NULL)
1838 0 : m_freem(sc->age_cdata.age_rxhead);
1839 0 : AGE_RXCHAIN_RESET(sc);
1840 :
1841 : /*
1842 : * Free RX and TX mbufs still in the queues.
1843 : */
1844 0 : for (i = 0; i < AGE_RX_RING_CNT; i++) {
1845 0 : rxd = &sc->age_cdata.age_rxdesc[i];
1846 0 : if (rxd->rx_m != NULL) {
1847 0 : bus_dmamap_sync(sc->sc_dmat, rxd->rx_dmamap, 0,
1848 : rxd->rx_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
1849 0 : bus_dmamap_unload(sc->sc_dmat, rxd->rx_dmamap);
1850 0 : m_freem(rxd->rx_m);
1851 0 : rxd->rx_m = NULL;
1852 0 : }
1853 : }
1854 0 : for (i = 0; i < AGE_TX_RING_CNT; i++) {
1855 0 : txd = &sc->age_cdata.age_txdesc[i];
1856 0 : if (txd->tx_m != NULL) {
1857 0 : bus_dmamap_sync(sc->sc_dmat, txd->tx_dmamap, 0,
1858 : txd->tx_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1859 0 : bus_dmamap_unload(sc->sc_dmat, txd->tx_dmamap);
1860 0 : m_freem(txd->tx_m);
1861 0 : txd->tx_m = NULL;
1862 0 : }
1863 : }
1864 0 : }
1865 :
1866 : void
1867 0 : age_stats_update(struct age_softc *sc)
1868 : {
1869 0 : struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1870 : struct age_stats *stat;
1871 : struct smb *smb;
1872 :
1873 0 : stat = &sc->age_stat;
1874 :
1875 0 : bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_smb_block_map, 0,
1876 : sc->age_cdata.age_smb_block_map->dm_mapsize, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1877 :
1878 0 : smb = sc->age_rdata.age_smb_block;
1879 0 : if (smb->updated == 0)
1880 0 : return;
1881 :
1882 : /* Rx stats. */
1883 0 : stat->rx_frames += smb->rx_frames;
1884 0 : stat->rx_bcast_frames += smb->rx_bcast_frames;
1885 0 : stat->rx_mcast_frames += smb->rx_mcast_frames;
1886 0 : stat->rx_pause_frames += smb->rx_pause_frames;
1887 0 : stat->rx_control_frames += smb->rx_control_frames;
1888 0 : stat->rx_crcerrs += smb->rx_crcerrs;
1889 0 : stat->rx_lenerrs += smb->rx_lenerrs;
1890 0 : stat->rx_bytes += smb->rx_bytes;
1891 0 : stat->rx_runts += smb->rx_runts;
1892 0 : stat->rx_fragments += smb->rx_fragments;
1893 0 : stat->rx_pkts_64 += smb->rx_pkts_64;
1894 0 : stat->rx_pkts_65_127 += smb->rx_pkts_65_127;
1895 0 : stat->rx_pkts_128_255 += smb->rx_pkts_128_255;
1896 0 : stat->rx_pkts_256_511 += smb->rx_pkts_256_511;
1897 0 : stat->rx_pkts_512_1023 += smb->rx_pkts_512_1023;
1898 0 : stat->rx_pkts_1024_1518 += smb->rx_pkts_1024_1518;
1899 0 : stat->rx_pkts_1519_max += smb->rx_pkts_1519_max;
1900 0 : stat->rx_pkts_truncated += smb->rx_pkts_truncated;
1901 0 : stat->rx_fifo_oflows += smb->rx_fifo_oflows;
1902 0 : stat->rx_desc_oflows += smb->rx_desc_oflows;
1903 0 : stat->rx_alignerrs += smb->rx_alignerrs;
1904 0 : stat->rx_bcast_bytes += smb->rx_bcast_bytes;
1905 0 : stat->rx_mcast_bytes += smb->rx_mcast_bytes;
1906 0 : stat->rx_pkts_filtered += smb->rx_pkts_filtered;
1907 :
1908 : /* Tx stats. */
1909 0 : stat->tx_frames += smb->tx_frames;
1910 0 : stat->tx_bcast_frames += smb->tx_bcast_frames;
1911 0 : stat->tx_mcast_frames += smb->tx_mcast_frames;
1912 0 : stat->tx_pause_frames += smb->tx_pause_frames;
1913 0 : stat->tx_excess_defer += smb->tx_excess_defer;
1914 0 : stat->tx_control_frames += smb->tx_control_frames;
1915 0 : stat->tx_deferred += smb->tx_deferred;
1916 0 : stat->tx_bytes += smb->tx_bytes;
1917 0 : stat->tx_pkts_64 += smb->tx_pkts_64;
1918 0 : stat->tx_pkts_65_127 += smb->tx_pkts_65_127;
1919 0 : stat->tx_pkts_128_255 += smb->tx_pkts_128_255;
1920 0 : stat->tx_pkts_256_511 += smb->tx_pkts_256_511;
1921 0 : stat->tx_pkts_512_1023 += smb->tx_pkts_512_1023;
1922 0 : stat->tx_pkts_1024_1518 += smb->tx_pkts_1024_1518;
1923 0 : stat->tx_pkts_1519_max += smb->tx_pkts_1519_max;
1924 0 : stat->tx_single_colls += smb->tx_single_colls;
1925 0 : stat->tx_multi_colls += smb->tx_multi_colls;
1926 0 : stat->tx_late_colls += smb->tx_late_colls;
1927 0 : stat->tx_excess_colls += smb->tx_excess_colls;
1928 0 : stat->tx_underrun += smb->tx_underrun;
1929 0 : stat->tx_desc_underrun += smb->tx_desc_underrun;
1930 0 : stat->tx_lenerrs += smb->tx_lenerrs;
1931 0 : stat->tx_pkts_truncated += smb->tx_pkts_truncated;
1932 0 : stat->tx_bcast_bytes += smb->tx_bcast_bytes;
1933 0 : stat->tx_mcast_bytes += smb->tx_mcast_bytes;
1934 :
1935 0 : ifp->if_collisions += smb->tx_single_colls +
1936 0 : smb->tx_multi_colls + smb->tx_late_colls +
1937 0 : smb->tx_excess_colls * HDPX_CFG_RETRY_DEFAULT;
1938 :
1939 0 : ifp->if_oerrors += smb->tx_excess_colls +
1940 0 : smb->tx_late_colls + smb->tx_underrun +
1941 0 : smb->tx_pkts_truncated;
1942 :
1943 0 : ifp->if_ierrors += smb->rx_crcerrs + smb->rx_lenerrs +
1944 0 : smb->rx_runts + smb->rx_pkts_truncated +
1945 0 : smb->rx_fifo_oflows + smb->rx_desc_oflows +
1946 0 : smb->rx_alignerrs;
1947 :
1948 : /* Update done, clear. */
1949 0 : smb->updated = 0;
1950 :
1951 0 : bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_smb_block_map, 0,
1952 : sc->age_cdata.age_smb_block_map->dm_mapsize,
1953 : BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1954 0 : }
1955 :
1956 : void
1957 0 : age_stop_txmac(struct age_softc *sc)
1958 : {
1959 : uint32_t reg;
1960 : int i;
1961 :
1962 0 : reg = CSR_READ_4(sc, AGE_MAC_CFG);
1963 0 : if ((reg & MAC_CFG_TX_ENB) != 0) {
1964 0 : reg &= ~MAC_CFG_TX_ENB;
1965 0 : CSR_WRITE_4(sc, AGE_MAC_CFG, reg);
1966 0 : }
1967 : /* Stop Tx DMA engine. */
1968 0 : reg = CSR_READ_4(sc, AGE_DMA_CFG);
1969 0 : if ((reg & DMA_CFG_RD_ENB) != 0) {
1970 0 : reg &= ~DMA_CFG_RD_ENB;
1971 0 : CSR_WRITE_4(sc, AGE_DMA_CFG, reg);
1972 0 : }
1973 0 : for (i = AGE_RESET_TIMEOUT; i > 0; i--) {
1974 0 : if ((CSR_READ_4(sc, AGE_IDLE_STATUS) &
1975 0 : (IDLE_STATUS_TXMAC | IDLE_STATUS_DMARD)) == 0)
1976 : break;
1977 0 : DELAY(10);
1978 : }
1979 0 : if (i == 0)
1980 0 : printf("%s: stopping TxMAC timeout!\n", sc->sc_dev.dv_xname);
1981 0 : }
1982 :
1983 : void
1984 0 : age_stop_rxmac(struct age_softc *sc)
1985 : {
1986 : uint32_t reg;
1987 : int i;
1988 :
1989 0 : reg = CSR_READ_4(sc, AGE_MAC_CFG);
1990 0 : if ((reg & MAC_CFG_RX_ENB) != 0) {
1991 0 : reg &= ~MAC_CFG_RX_ENB;
1992 0 : CSR_WRITE_4(sc, AGE_MAC_CFG, reg);
1993 0 : }
1994 : /* Stop Rx DMA engine. */
1995 0 : reg = CSR_READ_4(sc, AGE_DMA_CFG);
1996 0 : if ((reg & DMA_CFG_WR_ENB) != 0) {
1997 0 : reg &= ~DMA_CFG_WR_ENB;
1998 0 : CSR_WRITE_4(sc, AGE_DMA_CFG, reg);
1999 0 : }
2000 0 : for (i = AGE_RESET_TIMEOUT; i > 0; i--) {
2001 0 : if ((CSR_READ_4(sc, AGE_IDLE_STATUS) &
2002 0 : (IDLE_STATUS_RXMAC | IDLE_STATUS_DMAWR)) == 0)
2003 : break;
2004 0 : DELAY(10);
2005 : }
2006 0 : if (i == 0)
2007 0 : printf("%s: stopping RxMAC timeout!\n", sc->sc_dev.dv_xname);
2008 0 : }
2009 :
2010 : void
2011 0 : age_init_tx_ring(struct age_softc *sc)
2012 : {
2013 : struct age_ring_data *rd;
2014 : struct age_txdesc *txd;
2015 : int i;
2016 :
2017 0 : sc->age_cdata.age_tx_prod = 0;
2018 0 : sc->age_cdata.age_tx_cons = 0;
2019 0 : sc->age_cdata.age_tx_cnt = 0;
2020 :
2021 0 : rd = &sc->age_rdata;
2022 0 : bzero(rd->age_tx_ring, AGE_TX_RING_SZ);
2023 0 : for (i = 0; i < AGE_TX_RING_CNT; i++) {
2024 0 : txd = &sc->age_cdata.age_txdesc[i];
2025 0 : txd->tx_desc = &rd->age_tx_ring[i];
2026 0 : txd->tx_m = NULL;
2027 : }
2028 :
2029 0 : bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_tx_ring_map, 0,
2030 : sc->age_cdata.age_tx_ring_map->dm_mapsize,
2031 : BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2032 0 : }
2033 :
2034 : int
2035 0 : age_init_rx_ring(struct age_softc *sc)
2036 : {
2037 : struct age_ring_data *rd;
2038 : struct age_rxdesc *rxd;
2039 : int i;
2040 :
2041 0 : sc->age_cdata.age_rx_cons = AGE_RX_RING_CNT - 1;
2042 0 : rd = &sc->age_rdata;
2043 0 : bzero(rd->age_rx_ring, AGE_RX_RING_SZ);
2044 0 : for (i = 0; i < AGE_RX_RING_CNT; i++) {
2045 0 : rxd = &sc->age_cdata.age_rxdesc[i];
2046 0 : rxd->rx_m = NULL;
2047 0 : rxd->rx_desc = &rd->age_rx_ring[i];
2048 0 : if (age_newbuf(sc, rxd) != 0)
2049 0 : return (ENOBUFS);
2050 : }
2051 :
2052 0 : bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_rx_ring_map, 0,
2053 : sc->age_cdata.age_rx_ring_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
2054 :
2055 0 : return (0);
2056 0 : }
2057 :
2058 : void
2059 0 : age_init_rr_ring(struct age_softc *sc)
2060 : {
2061 : struct age_ring_data *rd;
2062 :
2063 0 : sc->age_cdata.age_rr_cons = 0;
2064 0 : AGE_RXCHAIN_RESET(sc);
2065 :
2066 0 : rd = &sc->age_rdata;
2067 0 : bzero(rd->age_rr_ring, AGE_RR_RING_SZ);
2068 0 : bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_rr_ring_map, 0,
2069 : sc->age_cdata.age_rr_ring_map->dm_mapsize,
2070 : BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2071 0 : }
2072 :
2073 : void
2074 0 : age_init_cmb_block(struct age_softc *sc)
2075 : {
2076 : struct age_ring_data *rd;
2077 :
2078 0 : rd = &sc->age_rdata;
2079 0 : bzero(rd->age_cmb_block, AGE_CMB_BLOCK_SZ);
2080 0 : bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_cmb_block_map, 0,
2081 : sc->age_cdata.age_cmb_block_map->dm_mapsize,
2082 : BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2083 0 : }
2084 :
2085 : void
2086 0 : age_init_smb_block(struct age_softc *sc)
2087 : {
2088 : struct age_ring_data *rd;
2089 :
2090 0 : rd = &sc->age_rdata;
2091 0 : bzero(rd->age_smb_block, AGE_SMB_BLOCK_SZ);
2092 0 : bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_smb_block_map, 0,
2093 : sc->age_cdata.age_smb_block_map->dm_mapsize,
2094 : BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2095 0 : }
2096 :
2097 : int
2098 0 : age_newbuf(struct age_softc *sc, struct age_rxdesc *rxd)
2099 : {
2100 : struct rx_desc *desc;
2101 : struct mbuf *m;
2102 : bus_dmamap_t map;
2103 : int error;
2104 :
2105 0 : MGETHDR(m, M_DONTWAIT, MT_DATA);
2106 0 : if (m == NULL)
2107 0 : return (ENOBUFS);
2108 0 : MCLGET(m, M_DONTWAIT);
2109 0 : if (!(m->m_flags & M_EXT)) {
2110 0 : m_freem(m);
2111 0 : return (ENOBUFS);
2112 : }
2113 :
2114 0 : m->m_len = m->m_pkthdr.len = MCLBYTES;
2115 0 : m_adj(m, ETHER_ALIGN);
2116 :
2117 0 : error = bus_dmamap_load_mbuf(sc->sc_dmat,
2118 : sc->age_cdata.age_rx_sparemap, m, BUS_DMA_NOWAIT);
2119 :
2120 0 : if (error != 0) {
2121 0 : m_freem(m);
2122 0 : printf("%s: can't load RX mbuf\n", sc->sc_dev.dv_xname);
2123 0 : return (error);
2124 : }
2125 :
2126 0 : if (rxd->rx_m != NULL) {
2127 0 : bus_dmamap_sync(sc->sc_dmat, rxd->rx_dmamap, 0,
2128 : rxd->rx_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
2129 0 : bus_dmamap_unload(sc->sc_dmat, rxd->rx_dmamap);
2130 0 : }
2131 0 : map = rxd->rx_dmamap;
2132 0 : rxd->rx_dmamap = sc->age_cdata.age_rx_sparemap;
2133 0 : sc->age_cdata.age_rx_sparemap = map;
2134 0 : bus_dmamap_sync(sc->sc_dmat, rxd->rx_dmamap, 0,
2135 : rxd->rx_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
2136 0 : rxd->rx_m = m;
2137 :
2138 0 : desc = rxd->rx_desc;
2139 0 : desc->addr = htole64(rxd->rx_dmamap->dm_segs[0].ds_addr);
2140 0 : desc->len =
2141 0 : htole32((rxd->rx_dmamap->dm_segs[0].ds_len & AGE_RD_LEN_MASK) <<
2142 : AGE_RD_LEN_SHIFT);
2143 :
2144 0 : return (0);
2145 0 : }
2146 :
2147 : void
2148 0 : age_rxvlan(struct age_softc *sc)
2149 : {
2150 0 : struct ifnet *ifp = &sc->sc_arpcom.ac_if;
2151 : uint32_t reg;
2152 :
2153 0 : reg = CSR_READ_4(sc, AGE_MAC_CFG);
2154 0 : reg &= ~MAC_CFG_VLAN_TAG_STRIP;
2155 0 : if (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING)
2156 0 : reg |= MAC_CFG_VLAN_TAG_STRIP;
2157 0 : CSR_WRITE_4(sc, AGE_MAC_CFG, reg);
2158 0 : }
2159 :
2160 : void
2161 0 : age_iff(struct age_softc *sc)
2162 : {
2163 0 : struct arpcom *ac = &sc->sc_arpcom;
2164 0 : struct ifnet *ifp = &ac->ac_if;
2165 : struct ether_multi *enm;
2166 : struct ether_multistep step;
2167 : uint32_t crc;
2168 0 : uint32_t mchash[2];
2169 : uint32_t rxcfg;
2170 :
2171 0 : rxcfg = CSR_READ_4(sc, AGE_MAC_CFG);
2172 0 : rxcfg &= ~(MAC_CFG_ALLMULTI | MAC_CFG_BCAST | MAC_CFG_PROMISC);
2173 0 : ifp->if_flags &= ~IFF_ALLMULTI;
2174 :
2175 : /*
2176 : * Always accept broadcast frames.
2177 : */
2178 0 : rxcfg |= MAC_CFG_BCAST;
2179 :
2180 0 : if (ifp->if_flags & IFF_PROMISC || ac->ac_multirangecnt > 0) {
2181 0 : ifp->if_flags |= IFF_ALLMULTI;
2182 0 : if (ifp->if_flags & IFF_PROMISC)
2183 0 : rxcfg |= MAC_CFG_PROMISC;
2184 : else
2185 0 : rxcfg |= MAC_CFG_ALLMULTI;
2186 0 : mchash[0] = mchash[1] = 0xFFFFFFFF;
2187 0 : } else {
2188 : /* Program new filter. */
2189 0 : bzero(mchash, sizeof(mchash));
2190 :
2191 0 : ETHER_FIRST_MULTI(step, ac, enm);
2192 0 : while (enm != NULL) {
2193 0 : crc = ether_crc32_be(enm->enm_addrlo,
2194 : ETHER_ADDR_LEN);
2195 :
2196 0 : mchash[crc >> 31] |= 1 << ((crc >> 26) & 0x1f);
2197 :
2198 0 : ETHER_NEXT_MULTI(step, enm);
2199 : }
2200 : }
2201 :
2202 0 : CSR_WRITE_4(sc, AGE_MAR0, mchash[0]);
2203 0 : CSR_WRITE_4(sc, AGE_MAR1, mchash[1]);
2204 0 : CSR_WRITE_4(sc, AGE_MAC_CFG, rxcfg);
2205 0 : }
|