Line data Source code
1 : /* $OpenBSD: if_alc.c,v 1.42 2017/09/08 05:36:52 deraadt Exp $ */
2 : /*-
3 : * Copyright (c) 2009, Pyun YongHyeon <yongari@FreeBSD.org>
4 : * All rights reserved.
5 : *
6 : * Redistribution and use in source and binary forms, with or without
7 : * modification, are permitted provided that the following conditions
8 : * are met:
9 : * 1. Redistributions of source code must retain the above copyright
10 : * notice unmodified, this list of conditions, and the following
11 : * disclaimer.
12 : * 2. Redistributions in binary form must reproduce the above copyright
13 : * notice, this list of conditions and the following disclaimer in the
14 : * documentation and/or other materials provided with the distribution.
15 : *
16 : * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 : * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 : * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 : * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 : * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 : * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 : * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 : * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 : * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 : * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 : * SUCH DAMAGE.
27 : */
28 :
29 : /* Driver for Atheros AR8131/AR8132 PCIe Ethernet. */
30 :
31 : #include "bpfilter.h"
32 : #include "vlan.h"
33 :
34 : #include <sys/param.h>
35 : #include <sys/endian.h>
36 : #include <sys/systm.h>
37 : #include <sys/sockio.h>
38 : #include <sys/mbuf.h>
39 : #include <sys/queue.h>
40 : #include <sys/kernel.h>
41 : #include <sys/device.h>
42 : #include <sys/timeout.h>
43 : #include <sys/socket.h>
44 :
45 : #include <machine/bus.h>
46 :
47 : #include <net/if.h>
48 : #include <net/if_dl.h>
49 : #include <net/if_media.h>
50 :
51 : #include <netinet/in.h>
52 : #include <netinet/if_ether.h>
53 :
54 : #if NBPFILTER > 0
55 : #include <net/bpf.h>
56 : #endif
57 :
58 : #include <dev/mii/mii.h>
59 : #include <dev/mii/miivar.h>
60 :
61 : #include <dev/pci/pcireg.h>
62 : #include <dev/pci/pcivar.h>
63 : #include <dev/pci/pcidevs.h>
64 :
65 : #include <dev/pci/if_alcreg.h>
66 :
67 : int alc_match(struct device *, void *, void *);
68 : void alc_attach(struct device *, struct device *, void *);
69 : int alc_detach(struct device *, int);
70 : int alc_activate(struct device *, int);
71 :
72 : int alc_init(struct ifnet *);
73 : void alc_start(struct ifnet *);
74 : int alc_ioctl(struct ifnet *, u_long, caddr_t);
75 : void alc_watchdog(struct ifnet *);
76 : int alc_mediachange(struct ifnet *);
77 : void alc_mediastatus(struct ifnet *, struct ifmediareq *);
78 :
79 : void alc_aspm(struct alc_softc *, uint64_t);
80 : void alc_disable_l0s_l1(struct alc_softc *);
81 : int alc_dma_alloc(struct alc_softc *);
82 : void alc_dma_free(struct alc_softc *);
83 : int alc_encap(struct alc_softc *, struct mbuf *);
84 : void alc_get_macaddr(struct alc_softc *);
85 : void alc_init_cmb(struct alc_softc *);
86 : void alc_init_rr_ring(struct alc_softc *);
87 : int alc_init_rx_ring(struct alc_softc *);
88 : void alc_init_smb(struct alc_softc *);
89 : void alc_init_tx_ring(struct alc_softc *);
90 : int alc_intr(void *);
91 : void alc_mac_config(struct alc_softc *);
92 : int alc_miibus_readreg(struct device *, int, int);
93 : void alc_miibus_statchg(struct device *);
94 : void alc_miibus_writereg(struct device *, int, int, int);
95 : int alc_newbuf(struct alc_softc *, struct alc_rxdesc *);
96 : void alc_phy_down(struct alc_softc *);
97 : void alc_phy_reset(struct alc_softc *);
98 : void alc_reset(struct alc_softc *);
99 : void alc_rxeof(struct alc_softc *, struct rx_rdesc *);
100 : void alc_rxintr(struct alc_softc *);
101 : void alc_iff(struct alc_softc *);
102 : void alc_rxvlan(struct alc_softc *);
103 : void alc_start_queue(struct alc_softc *);
104 : void alc_stats_clear(struct alc_softc *);
105 : void alc_stats_update(struct alc_softc *);
106 : void alc_stop(struct alc_softc *);
107 : void alc_stop_mac(struct alc_softc *);
108 : void alc_stop_queue(struct alc_softc *);
109 : void alc_tick(void *);
110 : void alc_txeof(struct alc_softc *);
111 :
112 : uint32_t alc_dma_burst[] = { 128, 256, 512, 1024, 2048, 4096, 0 };
113 :
114 : const struct pci_matchid alc_devices[] = {
115 : { PCI_VENDOR_ATTANSIC, PCI_PRODUCT_ATTANSIC_L1C },
116 : { PCI_VENDOR_ATTANSIC, PCI_PRODUCT_ATTANSIC_L2C },
117 : { PCI_VENDOR_ATTANSIC, PCI_PRODUCT_ATTANSIC_L1D },
118 : { PCI_VENDOR_ATTANSIC, PCI_PRODUCT_ATTANSIC_L1D_1 },
119 : { PCI_VENDOR_ATTANSIC, PCI_PRODUCT_ATTANSIC_L2C_1 },
120 : { PCI_VENDOR_ATTANSIC, PCI_PRODUCT_ATTANSIC_L2C_2 }
121 : };
122 :
123 : struct cfattach alc_ca = {
124 : sizeof (struct alc_softc), alc_match, alc_attach, NULL,
125 : alc_activate
126 : };
127 :
128 : struct cfdriver alc_cd = {
129 : NULL, "alc", DV_IFNET
130 : };
131 :
132 : int alcdebug = 0;
133 : #define DPRINTF(x) do { if (alcdebug) printf x; } while (0)
134 :
135 : #define ALC_CSUM_FEATURES (M_TCP_CSUM_OUT | M_UDP_CSUM_OUT)
136 :
137 : int
138 0 : alc_miibus_readreg(struct device *dev, int phy, int reg)
139 : {
140 0 : struct alc_softc *sc = (struct alc_softc *)dev;
141 : uint32_t v;
142 : int i;
143 :
144 0 : if (phy != sc->alc_phyaddr)
145 0 : return (0);
146 :
147 : /*
148 : * For AR8132 fast ethernet controller, do not report 1000baseT
149 : * capability to mii(4). Even though AR8132 uses the same
150 : * model/revision number of F1 gigabit PHY, the PHY has no
151 : * ability to establish 1000baseT link.
152 : */
153 0 : if ((sc->alc_flags & ALC_FLAG_FASTETHER) != 0 &&
154 0 : reg == MII_EXTSR)
155 0 : return (0);
156 :
157 0 : CSR_WRITE_4(sc, ALC_MDIO, MDIO_OP_EXECUTE | MDIO_OP_READ |
158 : MDIO_SUP_PREAMBLE | MDIO_CLK_25_4 | MDIO_REG_ADDR(reg));
159 0 : for (i = ALC_PHY_TIMEOUT; i > 0; i--) {
160 0 : DELAY(5);
161 0 : v = CSR_READ_4(sc, ALC_MDIO);
162 0 : if ((v & (MDIO_OP_EXECUTE | MDIO_OP_BUSY)) == 0)
163 : break;
164 : }
165 :
166 0 : if (i == 0) {
167 0 : printf("%s: phy read timeout: phy %d, reg %d\n",
168 0 : sc->sc_dev.dv_xname, phy, reg);
169 0 : return (0);
170 : }
171 :
172 0 : return ((v & MDIO_DATA_MASK) >> MDIO_DATA_SHIFT);
173 0 : }
174 :
175 : void
176 0 : alc_miibus_writereg(struct device *dev, int phy, int reg, int val)
177 : {
178 0 : struct alc_softc *sc = (struct alc_softc *)dev;
179 : uint32_t v;
180 : int i;
181 :
182 0 : if (phy != sc->alc_phyaddr)
183 0 : return;
184 :
185 0 : CSR_WRITE_4(sc, ALC_MDIO, MDIO_OP_EXECUTE | MDIO_OP_WRITE |
186 : (val & MDIO_DATA_MASK) << MDIO_DATA_SHIFT |
187 : MDIO_SUP_PREAMBLE | MDIO_CLK_25_4 | MDIO_REG_ADDR(reg));
188 0 : for (i = ALC_PHY_TIMEOUT; i > 0; i--) {
189 0 : DELAY(5);
190 0 : v = CSR_READ_4(sc, ALC_MDIO);
191 0 : if ((v & (MDIO_OP_EXECUTE | MDIO_OP_BUSY)) == 0)
192 : break;
193 : }
194 :
195 0 : if (i == 0)
196 0 : printf("%s: phy write timeout: phy %d, reg %d\n",
197 0 : sc->sc_dev.dv_xname, phy, reg);
198 0 : }
199 :
200 : void
201 0 : alc_miibus_statchg(struct device *dev)
202 : {
203 0 : struct alc_softc *sc = (struct alc_softc *)dev;
204 0 : struct ifnet *ifp = &sc->sc_arpcom.ac_if;
205 0 : struct mii_data *mii = &sc->sc_miibus;
206 : uint32_t reg;
207 :
208 0 : if ((ifp->if_flags & IFF_RUNNING) == 0)
209 0 : return;
210 :
211 0 : sc->alc_flags &= ~ALC_FLAG_LINK;
212 0 : if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
213 : (IFM_ACTIVE | IFM_AVALID)) {
214 0 : switch (IFM_SUBTYPE(mii->mii_media_active)) {
215 : case IFM_10_T:
216 : case IFM_100_TX:
217 0 : sc->alc_flags |= ALC_FLAG_LINK;
218 0 : break;
219 : case IFM_1000_T:
220 0 : if ((sc->alc_flags & ALC_FLAG_FASTETHER) == 0)
221 0 : sc->alc_flags |= ALC_FLAG_LINK;
222 : break;
223 : default:
224 : break;
225 : }
226 : }
227 0 : alc_stop_queue(sc);
228 : /* Stop Rx/Tx MACs. */
229 0 : alc_stop_mac(sc);
230 :
231 : /* Program MACs with resolved speed/duplex/flow-control. */
232 0 : if ((sc->alc_flags & ALC_FLAG_LINK) != 0) {
233 0 : alc_start_queue(sc);
234 0 : alc_mac_config(sc);
235 : /* Re-enable Tx/Rx MACs. */
236 0 : reg = CSR_READ_4(sc, ALC_MAC_CFG);
237 0 : reg |= MAC_CFG_TX_ENB | MAC_CFG_RX_ENB;
238 0 : CSR_WRITE_4(sc, ALC_MAC_CFG, reg);
239 0 : alc_aspm(sc, IFM_SUBTYPE(mii->mii_media_active));
240 0 : }
241 0 : }
242 :
243 : void
244 0 : alc_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
245 : {
246 0 : struct alc_softc *sc = ifp->if_softc;
247 0 : struct mii_data *mii = &sc->sc_miibus;
248 :
249 0 : if ((ifp->if_flags & IFF_UP) == 0)
250 0 : return;
251 :
252 0 : mii_pollstat(mii);
253 0 : ifmr->ifm_status = mii->mii_media_status;
254 0 : ifmr->ifm_active = mii->mii_media_active;
255 0 : }
256 :
257 : int
258 0 : alc_mediachange(struct ifnet *ifp)
259 : {
260 0 : struct alc_softc *sc = ifp->if_softc;
261 0 : struct mii_data *mii = &sc->sc_miibus;
262 : int error;
263 :
264 0 : if (mii->mii_instance != 0) {
265 : struct mii_softc *miisc;
266 :
267 0 : LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
268 0 : mii_phy_reset(miisc);
269 0 : }
270 0 : error = mii_mediachg(mii);
271 :
272 0 : return (error);
273 : }
274 :
275 : int
276 0 : alc_match(struct device *dev, void *match, void *aux)
277 : {
278 0 : return pci_matchbyid((struct pci_attach_args *)aux, alc_devices,
279 : nitems(alc_devices));
280 : }
281 :
282 : void
283 0 : alc_get_macaddr(struct alc_softc *sc)
284 : {
285 : uint32_t ea[2], opt;
286 : uint16_t val;
287 : int eeprom, i;
288 :
289 : eeprom = 0;
290 0 : opt = CSR_READ_4(sc, ALC_OPT_CFG);
291 0 : if ((CSR_READ_4(sc, ALC_MASTER_CFG) & MASTER_OTP_SEL) != 0 &&
292 0 : (CSR_READ_4(sc, ALC_TWSI_DEBUG) & TWSI_DEBUG_DEV_EXIST) != 0) {
293 : /*
294 : * EEPROM found, let TWSI reload EEPROM configuration.
295 : * This will set ethernet address of controller.
296 : */
297 : eeprom++;
298 0 : switch (sc->sc_product) {
299 : case PCI_PRODUCT_ATTANSIC_L1C:
300 : case PCI_PRODUCT_ATTANSIC_L2C:
301 0 : if ((opt & OPT_CFG_CLK_ENB) == 0) {
302 0 : opt |= OPT_CFG_CLK_ENB;
303 0 : CSR_WRITE_4(sc, ALC_OPT_CFG, opt);
304 0 : CSR_READ_4(sc, ALC_OPT_CFG);
305 0 : DELAY(1000);
306 0 : }
307 : break;
308 : case PCI_PRODUCT_ATTANSIC_L1D:
309 : case PCI_PRODUCT_ATTANSIC_L1D_1:
310 : case PCI_PRODUCT_ATTANSIC_L2C_1:
311 : case PCI_PRODUCT_ATTANSIC_L2C_2:
312 0 : alc_miibus_writereg(&sc->sc_dev, sc->alc_phyaddr,
313 : ALC_MII_DBG_ADDR, 0x00);
314 0 : val = alc_miibus_readreg(&sc->sc_dev, sc->alc_phyaddr,
315 : ALC_MII_DBG_DATA);
316 0 : alc_miibus_writereg(&sc->sc_dev, sc->alc_phyaddr,
317 0 : ALC_MII_DBG_DATA, val & 0xFF7F);
318 0 : alc_miibus_writereg(&sc->sc_dev, sc->alc_phyaddr,
319 : ALC_MII_DBG_ADDR, 0x3B);
320 0 : val = alc_miibus_readreg(&sc->sc_dev, sc->alc_phyaddr,
321 : ALC_MII_DBG_DATA);
322 0 : alc_miibus_writereg(&sc->sc_dev, sc->alc_phyaddr,
323 0 : ALC_MII_DBG_DATA, val | 0x0008);
324 0 : DELAY(20);
325 0 : break;
326 : }
327 :
328 0 : CSR_WRITE_4(sc, ALC_LTSSM_ID_CFG,
329 : CSR_READ_4(sc, ALC_LTSSM_ID_CFG) & ~LTSSM_ID_WRO_ENB);
330 0 : CSR_WRITE_4(sc, ALC_WOL_CFG, 0);
331 0 : CSR_READ_4(sc, ALC_WOL_CFG);
332 :
333 0 : CSR_WRITE_4(sc, ALC_TWSI_CFG, CSR_READ_4(sc, ALC_TWSI_CFG) |
334 : TWSI_CFG_SW_LD_START);
335 0 : for (i = 100; i > 0; i--) {
336 0 : DELAY(1000);
337 0 : if ((CSR_READ_4(sc, ALC_TWSI_CFG) &
338 0 : TWSI_CFG_SW_LD_START) == 0)
339 : break;
340 : }
341 0 : if (i == 0)
342 0 : printf("%s: reloading EEPROM timeout!\n",
343 0 : sc->sc_dev.dv_xname);
344 : } else {
345 0 : if (alcdebug)
346 0 : printf("%s: EEPROM not found!\n", sc->sc_dev.dv_xname);
347 : }
348 0 : if (eeprom != 0) {
349 0 : switch (sc->sc_product) {
350 : case PCI_PRODUCT_ATTANSIC_L1C:
351 : case PCI_PRODUCT_ATTANSIC_L2C:
352 0 : if ((opt & OPT_CFG_CLK_ENB) != 0) {
353 0 : opt &= ~OPT_CFG_CLK_ENB;
354 0 : CSR_WRITE_4(sc, ALC_OPT_CFG, opt);
355 0 : CSR_READ_4(sc, ALC_OPT_CFG);
356 0 : DELAY(1000);
357 0 : }
358 : break;
359 : case PCI_PRODUCT_ATTANSIC_L1D:
360 : case PCI_PRODUCT_ATTANSIC_L1D_1:
361 : case PCI_PRODUCT_ATTANSIC_L2C_1:
362 : case PCI_PRODUCT_ATTANSIC_L2C_2:
363 0 : alc_miibus_writereg(&sc->sc_dev, sc->alc_phyaddr,
364 : ALC_MII_DBG_ADDR, 0x00);
365 0 : val = alc_miibus_readreg(&sc->sc_dev, sc->alc_phyaddr,
366 : ALC_MII_DBG_DATA);
367 0 : alc_miibus_writereg(&sc->sc_dev, sc->alc_phyaddr,
368 0 : ALC_MII_DBG_DATA, val | 0x0080);
369 0 : alc_miibus_writereg(&sc->sc_dev, sc->alc_phyaddr,
370 : ALC_MII_DBG_ADDR, 0x3B);
371 0 : val = alc_miibus_readreg(&sc->sc_dev, sc->alc_phyaddr,
372 : ALC_MII_DBG_DATA);
373 0 : alc_miibus_writereg(&sc->sc_dev, sc->alc_phyaddr,
374 0 : ALC_MII_DBG_DATA, val & 0xFFF7);
375 0 : DELAY(20);
376 0 : break;
377 : }
378 : }
379 :
380 0 : ea[0] = CSR_READ_4(sc, ALC_PAR0);
381 0 : ea[1] = CSR_READ_4(sc, ALC_PAR1);
382 0 : sc->alc_eaddr[0] = (ea[1] >> 8) & 0xFF;
383 0 : sc->alc_eaddr[1] = (ea[1] >> 0) & 0xFF;
384 0 : sc->alc_eaddr[2] = (ea[0] >> 24) & 0xFF;
385 0 : sc->alc_eaddr[3] = (ea[0] >> 16) & 0xFF;
386 0 : sc->alc_eaddr[4] = (ea[0] >> 8) & 0xFF;
387 0 : sc->alc_eaddr[5] = (ea[0] >> 0) & 0xFF;
388 0 : }
389 :
390 : void
391 0 : alc_disable_l0s_l1(struct alc_softc *sc)
392 : {
393 : uint32_t pmcfg;
394 :
395 : /* Another magic from vendor. */
396 0 : pmcfg = CSR_READ_4(sc, ALC_PM_CFG);
397 0 : pmcfg &= ~(PM_CFG_L1_ENTRY_TIMER_MASK | PM_CFG_CLK_SWH_L1 |
398 : PM_CFG_ASPM_L0S_ENB | PM_CFG_ASPM_L1_ENB | PM_CFG_MAC_ASPM_CHK |
399 : PM_CFG_SERDES_PD_EX_L1);
400 0 : pmcfg |= PM_CFG_SERDES_BUDS_RX_L1_ENB | PM_CFG_SERDES_PLL_L1_ENB |
401 : PM_CFG_SERDES_L1_ENB;
402 0 : CSR_WRITE_4(sc, ALC_PM_CFG, pmcfg);
403 0 : }
404 :
405 : void
406 0 : alc_phy_reset(struct alc_softc *sc)
407 : {
408 : uint16_t data;
409 :
410 : /* Reset magic from Linux. */
411 0 : CSR_WRITE_2(sc, ALC_GPHY_CFG, GPHY_CFG_SEL_ANA_RESET);
412 0 : CSR_READ_2(sc, ALC_GPHY_CFG);
413 0 : DELAY(10 * 1000);
414 :
415 0 : CSR_WRITE_2(sc, ALC_GPHY_CFG, GPHY_CFG_EXT_RESET |
416 : GPHY_CFG_SEL_ANA_RESET);
417 0 : CSR_READ_2(sc, ALC_GPHY_CFG);
418 0 : DELAY(10 * 1000);
419 :
420 : /* DSP fixup, Vendor magic. */
421 0 : if (sc->sc_product == PCI_PRODUCT_ATTANSIC_L2C_1) {
422 0 : alc_miibus_writereg(&sc->sc_dev, sc->alc_phyaddr,
423 : ALC_MII_DBG_ADDR, 0x000A);
424 0 : data = alc_miibus_readreg(&sc->sc_dev, sc->alc_phyaddr,
425 : ALC_MII_DBG_DATA);
426 0 : alc_miibus_writereg(&sc->sc_dev, sc->alc_phyaddr,
427 0 : ALC_MII_DBG_DATA, data & 0xDFFF);
428 0 : }
429 0 : if (sc->sc_product == PCI_PRODUCT_ATTANSIC_L1D ||
430 0 : sc->sc_product == PCI_PRODUCT_ATTANSIC_L1D_1 ||
431 0 : sc->sc_product == PCI_PRODUCT_ATTANSIC_L2C_1 ||
432 0 : sc->sc_product == PCI_PRODUCT_ATTANSIC_L2C_2) {
433 0 : alc_miibus_writereg(&sc->sc_dev, sc->alc_phyaddr,
434 : ALC_MII_DBG_ADDR, 0x003B);
435 0 : data = alc_miibus_readreg(&sc->sc_dev, sc->alc_phyaddr,
436 : ALC_MII_DBG_DATA);
437 0 : alc_miibus_writereg(&sc->sc_dev, sc->alc_phyaddr,
438 0 : ALC_MII_DBG_DATA, data & 0xFFF7);
439 0 : DELAY(20 * 1000);
440 0 : }
441 0 : if (sc->sc_product == PCI_PRODUCT_ATTANSIC_L1D) {
442 0 : alc_miibus_writereg(&sc->sc_dev, sc->alc_phyaddr,
443 : ALC_MII_DBG_ADDR, 0x0029);
444 0 : alc_miibus_writereg(&sc->sc_dev, sc->alc_phyaddr,
445 : ALC_MII_DBG_DATA, 0x929D);
446 0 : }
447 0 : if (sc->sc_product == PCI_PRODUCT_ATTANSIC_L1C ||
448 0 : sc->sc_product == PCI_PRODUCT_ATTANSIC_L2C ||
449 0 : sc->sc_product == PCI_PRODUCT_ATTANSIC_L1D_1 ||
450 0 : sc->sc_product == PCI_PRODUCT_ATTANSIC_L2C_2) {
451 0 : alc_miibus_writereg(&sc->sc_dev, sc->alc_phyaddr,
452 : ALC_MII_DBG_ADDR, 0x0029);
453 0 : alc_miibus_writereg(&sc->sc_dev, sc->alc_phyaddr,
454 : ALC_MII_DBG_DATA, 0xB6DD);
455 0 : }
456 :
457 : /* Load DSP codes, vendor magic. */
458 : data = ANA_LOOP_SEL_10BT | ANA_EN_MASK_TB | ANA_EN_10BT_IDLE |
459 : ((1 << ANA_INTERVAL_SEL_TIMER_SHIFT) & ANA_INTERVAL_SEL_TIMER_MASK);
460 0 : alc_miibus_writereg(&sc->sc_dev, sc->alc_phyaddr,
461 : ALC_MII_DBG_ADDR, MII_ANA_CFG18);
462 0 : alc_miibus_writereg(&sc->sc_dev, sc->alc_phyaddr,
463 : ALC_MII_DBG_DATA, data);
464 :
465 : data = ((2 << ANA_SERDES_CDR_BW_SHIFT) & ANA_SERDES_CDR_BW_MASK) |
466 : ANA_SERDES_EN_DEEM | ANA_SERDES_SEL_HSP | ANA_SERDES_EN_PLL |
467 : ANA_SERDES_EN_LCKDT;
468 0 : alc_miibus_writereg(&sc->sc_dev, sc->alc_phyaddr,
469 : ALC_MII_DBG_ADDR, MII_ANA_CFG5);
470 0 : alc_miibus_writereg(&sc->sc_dev, sc->alc_phyaddr,
471 : ALC_MII_DBG_DATA, data);
472 :
473 : data = ((44 << ANA_LONG_CABLE_TH_100_SHIFT) &
474 : ANA_LONG_CABLE_TH_100_MASK) |
475 : ((33 << ANA_SHORT_CABLE_TH_100_SHIFT) &
476 : ANA_SHORT_CABLE_TH_100_SHIFT) |
477 : ANA_BP_BAD_LINK_ACCUM | ANA_BP_SMALL_BW;
478 0 : alc_miibus_writereg(&sc->sc_dev, sc->alc_phyaddr,
479 : ALC_MII_DBG_ADDR, MII_ANA_CFG54);
480 0 : alc_miibus_writereg(&sc->sc_dev, sc->alc_phyaddr,
481 : ALC_MII_DBG_DATA, data);
482 :
483 : data = ((11 << ANA_IECHO_ADJ_3_SHIFT) & ANA_IECHO_ADJ_3_MASK) |
484 : ((11 << ANA_IECHO_ADJ_2_SHIFT) & ANA_IECHO_ADJ_2_MASK) |
485 : ((8 << ANA_IECHO_ADJ_1_SHIFT) & ANA_IECHO_ADJ_1_MASK) |
486 : ((8 << ANA_IECHO_ADJ_0_SHIFT) & ANA_IECHO_ADJ_0_MASK);
487 0 : alc_miibus_writereg(&sc->sc_dev, sc->alc_phyaddr,
488 : ALC_MII_DBG_ADDR, MII_ANA_CFG4);
489 0 : alc_miibus_writereg(&sc->sc_dev, sc->alc_phyaddr,
490 : ALC_MII_DBG_DATA, data);
491 :
492 : data = ((7 & ANA_MANUL_SWICH_ON_SHIFT) & ANA_MANUL_SWICH_ON_MASK) |
493 : ANA_RESTART_CAL | ANA_MAN_ENABLE | ANA_SEL_HSP | ANA_EN_HB |
494 : ANA_OEN_125M;
495 0 : alc_miibus_writereg(&sc->sc_dev, sc->alc_phyaddr,
496 : ALC_MII_DBG_ADDR, MII_ANA_CFG0);
497 0 : alc_miibus_writereg(&sc->sc_dev, sc->alc_phyaddr,
498 : ALC_MII_DBG_DATA, data);
499 0 : DELAY(1000);
500 :
501 : /* Disable hibernation. */
502 0 : alc_miibus_writereg(&sc->sc_dev, sc->alc_phyaddr, ALC_MII_DBG_ADDR,
503 : 0x0029);
504 0 : data = alc_miibus_readreg(&sc->sc_dev, sc->alc_phyaddr,
505 : ALC_MII_DBG_DATA);
506 0 : data &= ~0x8000;
507 0 : alc_miibus_writereg(&sc->sc_dev, sc->alc_phyaddr, ALC_MII_DBG_DATA,
508 0 : data);
509 :
510 0 : alc_miibus_writereg(&sc->sc_dev, sc->alc_phyaddr, ALC_MII_DBG_ADDR,
511 : 0x000B);
512 0 : data = alc_miibus_readreg(&sc->sc_dev, sc->alc_phyaddr,
513 : ALC_MII_DBG_DATA);
514 0 : data &= ~0x8000;
515 0 : alc_miibus_writereg(&sc->sc_dev, sc->alc_phyaddr, ALC_MII_DBG_DATA,
516 0 : data);
517 0 : }
518 :
519 : void
520 0 : alc_phy_down(struct alc_softc *sc)
521 : {
522 0 : switch (sc->sc_product) {
523 : case PCI_PRODUCT_ATTANSIC_L1D:
524 : case PCI_PRODUCT_ATTANSIC_L1D_1:
525 : /*
526 : * GPHY power down caused more problems on AR8151 v2.0.
527 : * When driver is reloaded after GPHY power down,
528 : * accesses to PHY/MAC registers hung the system. Only
529 : * cold boot recovered from it. I'm not sure whether
530 : * AR8151 v1.0 also requires this one though. I don't
531 : * have AR8151 v1.0 controller in hand.
532 : * The only option left is to isolate the PHY and
533 : * initiates power down the PHY which in turn saves
534 : * more power when driver is unloaded.
535 : */
536 0 : alc_miibus_writereg(&sc->sc_dev, sc->alc_phyaddr,
537 : MII_BMCR, BMCR_ISO | BMCR_PDOWN);
538 0 : break;
539 : default:
540 : /* Force PHY down. */
541 0 : CSR_WRITE_2(sc, ALC_GPHY_CFG, GPHY_CFG_EXT_RESET |
542 : GPHY_CFG_SEL_ANA_RESET | GPHY_CFG_PHY_IDDQ |
543 : GPHY_CFG_PWDOWN_HW);
544 0 : DELAY(1000);
545 0 : break;
546 : }
547 0 : }
548 :
549 : void
550 0 : alc_aspm(struct alc_softc *sc, uint64_t media)
551 : {
552 : uint32_t pmcfg;
553 : uint16_t linkcfg;
554 :
555 0 : pmcfg = CSR_READ_4(sc, ALC_PM_CFG);
556 0 : if ((sc->alc_flags & (ALC_FLAG_APS | ALC_FLAG_PCIE)) ==
557 : (ALC_FLAG_APS | ALC_FLAG_PCIE))
558 0 : linkcfg = CSR_READ_2(sc, sc->alc_expcap +
559 : PCI_PCIE_LCSR);
560 : else
561 : linkcfg = 0;
562 0 : pmcfg &= ~PM_CFG_SERDES_PD_EX_L1;
563 0 : pmcfg &= ~(PM_CFG_L1_ENTRY_TIMER_MASK | PM_CFG_LCKDET_TIMER_MASK);
564 0 : pmcfg |= PM_CFG_MAC_ASPM_CHK;
565 0 : pmcfg |= (PM_CFG_LCKDET_TIMER_DEFAULT << PM_CFG_LCKDET_TIMER_SHIFT);
566 0 : pmcfg &= ~(PM_CFG_ASPM_L1_ENB | PM_CFG_ASPM_L0S_ENB);
567 :
568 0 : if ((sc->alc_flags & ALC_FLAG_APS) != 0) {
569 : /* Disable extended sync except AR8152 B v1.0 */
570 0 : linkcfg &= ~0x80;
571 0 : if (sc->sc_product == PCI_PRODUCT_ATTANSIC_L2C_1 &&
572 0 : sc->alc_rev == ATHEROS_AR8152_B_V10)
573 0 : linkcfg |= 0x80;
574 0 : CSR_WRITE_2(sc, sc->alc_expcap + PCI_PCIE_LCSR,
575 : linkcfg);
576 0 : pmcfg &= ~(PM_CFG_EN_BUFS_RX_L0S | PM_CFG_SA_DLY_ENB |
577 : PM_CFG_HOTRST);
578 0 : pmcfg |= (PM_CFG_L1_ENTRY_TIMER_DEFAULT <<
579 : PM_CFG_L1_ENTRY_TIMER_SHIFT);
580 0 : pmcfg &= ~PM_CFG_PM_REQ_TIMER_MASK;
581 0 : pmcfg |= (PM_CFG_PM_REQ_TIMER_DEFAULT <<
582 : PM_CFG_PM_REQ_TIMER_SHIFT);
583 0 : pmcfg |= PM_CFG_SERDES_PD_EX_L1 | PM_CFG_PCIE_RECV;
584 0 : }
585 :
586 0 : if ((sc->alc_flags & ALC_FLAG_LINK) != 0) {
587 0 : if ((sc->alc_flags & ALC_FLAG_L0S) != 0)
588 0 : pmcfg |= PM_CFG_ASPM_L0S_ENB;
589 0 : if ((sc->alc_flags & ALC_FLAG_L1S) != 0)
590 0 : pmcfg |= PM_CFG_ASPM_L1_ENB;
591 0 : if ((sc->alc_flags & ALC_FLAG_APS) != 0) {
592 0 : if (sc->sc_product == PCI_PRODUCT_ATTANSIC_L2C_1)
593 0 : pmcfg &= ~PM_CFG_ASPM_L0S_ENB;
594 0 : pmcfg &= ~(PM_CFG_SERDES_L1_ENB |
595 : PM_CFG_SERDES_PLL_L1_ENB |
596 : PM_CFG_SERDES_BUDS_RX_L1_ENB);
597 0 : pmcfg |= PM_CFG_CLK_SWH_L1;
598 0 : if (media == IFM_100_TX || media == IFM_1000_T) {
599 0 : pmcfg &= ~PM_CFG_L1_ENTRY_TIMER_MASK;
600 0 : switch (sc->sc_product) {
601 : case PCI_PRODUCT_ATTANSIC_L2C_1:
602 0 : pmcfg |= (7 <<
603 : PM_CFG_L1_ENTRY_TIMER_SHIFT);
604 0 : break;
605 : case PCI_PRODUCT_ATTANSIC_L1D_1:
606 : case PCI_PRODUCT_ATTANSIC_L2C_2:
607 0 : pmcfg |= (4 <<
608 : PM_CFG_L1_ENTRY_TIMER_SHIFT);
609 0 : break;
610 : default:
611 0 : pmcfg |= (15 <<
612 : PM_CFG_L1_ENTRY_TIMER_SHIFT);
613 0 : break;
614 : }
615 : }
616 : } else {
617 0 : pmcfg |= PM_CFG_SERDES_L1_ENB |
618 : PM_CFG_SERDES_PLL_L1_ENB |
619 : PM_CFG_SERDES_BUDS_RX_L1_ENB;
620 0 : pmcfg &= ~(PM_CFG_CLK_SWH_L1 |
621 : PM_CFG_ASPM_L1_ENB | PM_CFG_ASPM_L0S_ENB);
622 : }
623 : } else {
624 0 : pmcfg &= ~(PM_CFG_SERDES_BUDS_RX_L1_ENB | PM_CFG_SERDES_L1_ENB |
625 : PM_CFG_SERDES_PLL_L1_ENB);
626 0 : pmcfg |= PM_CFG_CLK_SWH_L1;
627 0 : if ((sc->alc_flags & ALC_FLAG_L1S) != 0)
628 0 : pmcfg |= PM_CFG_ASPM_L1_ENB;
629 : }
630 0 : CSR_WRITE_4(sc, ALC_PM_CFG, pmcfg);
631 0 : }
632 :
633 : void
634 0 : alc_attach(struct device *parent, struct device *self, void *aux)
635 : {
636 :
637 0 : struct alc_softc *sc = (struct alc_softc *)self;
638 0 : struct pci_attach_args *pa = aux;
639 0 : pci_chipset_tag_t pc = pa->pa_pc;
640 0 : pci_intr_handle_t ih;
641 : const char *intrstr;
642 : struct ifnet *ifp;
643 : pcireg_t memtype;
644 0 : char *aspm_state[] = { "L0s/L1", "L0s", "L1", "L0s/L1" };
645 : uint16_t burst;
646 0 : int base, state, error = 0;
647 : uint32_t cap, ctl, val;
648 :
649 : /*
650 : * Allocate IO memory
651 : */
652 0 : memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, ALC_PCIR_BAR);
653 0 : if (pci_mapreg_map(pa, ALC_PCIR_BAR, memtype, 0, &sc->sc_mem_bt,
654 0 : &sc->sc_mem_bh, NULL, &sc->sc_mem_size, 0)) {
655 0 : printf(": can't map mem space\n");
656 0 : return;
657 : }
658 :
659 0 : if (pci_intr_map_msi(pa, &ih) != 0 && pci_intr_map(pa, &ih) != 0) {
660 0 : printf(": can't map interrupt\n");
661 0 : goto fail;
662 : }
663 :
664 : /*
665 : * Allocate IRQ
666 : */
667 0 : intrstr = pci_intr_string(pc, ih);
668 0 : sc->sc_irq_handle = pci_intr_establish(pc, ih, IPL_NET, alc_intr, sc,
669 0 : sc->sc_dev.dv_xname);
670 0 : if (sc->sc_irq_handle == NULL) {
671 0 : printf(": could not establish interrupt");
672 0 : if (intrstr != NULL)
673 0 : printf(" at %s", intrstr);
674 0 : printf("\n");
675 0 : goto fail;
676 : }
677 0 : printf(": %s", intrstr);
678 :
679 0 : sc->sc_dmat = pa->pa_dmat;
680 0 : sc->sc_pct = pa->pa_pc;
681 0 : sc->sc_pcitag = pa->pa_tag;
682 :
683 : /* Set PHY address. */
684 0 : sc->alc_phyaddr = ALC_PHY_ADDR;
685 :
686 : /* Get PCI and chip id/revision. */
687 0 : sc->sc_product = PCI_PRODUCT(pa->pa_id);
688 0 : sc->alc_rev = PCI_REVISION(pa->pa_class);
689 :
690 : /* Initialize DMA parameters. */
691 0 : sc->alc_dma_rd_burst = 0;
692 0 : sc->alc_dma_wr_burst = 0;
693 0 : sc->alc_rcb = DMA_CFG_RCB_64;
694 0 : if (pci_get_capability(pc, pa->pa_tag, PCI_CAP_PCIEXPRESS,
695 : &base, NULL)) {
696 0 : sc->alc_flags |= ALC_FLAG_PCIE;
697 0 : sc->alc_expcap = base;
698 0 : burst = pci_conf_read(sc->sc_pct, sc->sc_pcitag,
699 0 : base + PCI_PCIE_DCSR) >> 16;
700 0 : sc->alc_dma_rd_burst = (burst & 0x7000) >> 12;
701 0 : sc->alc_dma_wr_burst = (burst & 0x00e0) >> 5;
702 0 : if (alcdebug) {
703 0 : printf("%s: Read request size : %u bytes.\n",
704 : sc->sc_dev.dv_xname,
705 0 : alc_dma_burst[sc->alc_dma_rd_burst]);
706 0 : printf("%s: TLP payload size : %u bytes.\n",
707 : sc->sc_dev.dv_xname,
708 0 : alc_dma_burst[sc->alc_dma_wr_burst]);
709 0 : }
710 0 : if (alc_dma_burst[sc->alc_dma_rd_burst] > 1024)
711 0 : sc->alc_dma_rd_burst = 3;
712 0 : if (alc_dma_burst[sc->alc_dma_wr_burst] > 1024)
713 0 : sc->alc_dma_wr_burst = 3;
714 : /* Clear data link and flow-control protocol error. */
715 0 : val = CSR_READ_4(sc, ALC_PEX_UNC_ERR_SEV);
716 0 : val &= ~(PEX_UNC_ERR_SEV_DLP | PEX_UNC_ERR_SEV_FCP);
717 0 : CSR_WRITE_4(sc, ALC_PEX_UNC_ERR_SEV, val);
718 0 : CSR_WRITE_4(sc, ALC_LTSSM_ID_CFG,
719 : CSR_READ_4(sc, ALC_LTSSM_ID_CFG) & ~LTSSM_ID_WRO_ENB);
720 0 : CSR_WRITE_4(sc, ALC_PCIE_PHYMISC,
721 : CSR_READ_4(sc, ALC_PCIE_PHYMISC) |
722 : PCIE_PHYMISC_FORCE_RCV_DET);
723 0 : if (sc->sc_product == PCI_PRODUCT_ATTANSIC_L2C_1 &&
724 0 : sc->alc_rev == ATHEROS_AR8152_B_V10) {
725 0 : val = CSR_READ_4(sc, ALC_PCIE_PHYMISC2);
726 0 : val &= ~(PCIE_PHYMISC2_SERDES_CDR_MASK |
727 : PCIE_PHYMISC2_SERDES_TH_MASK);
728 0 : val |= 3 << PCIE_PHYMISC2_SERDES_CDR_SHIFT;
729 0 : val |= 3 << PCIE_PHYMISC2_SERDES_TH_SHIFT;
730 0 : CSR_WRITE_4(sc, ALC_PCIE_PHYMISC2, val);
731 0 : }
732 : /* Disable ASPM L0S and L1. */
733 0 : cap = pci_conf_read(sc->sc_pct, sc->sc_pcitag,
734 0 : base + PCI_PCIE_LCAP) >> 16;
735 0 : if ((cap & 0x00000c00) != 0) {
736 0 : ctl = pci_conf_read(sc->sc_pct, sc->sc_pcitag,
737 0 : base + PCI_PCIE_LCSR) >> 16;
738 0 : if ((ctl & 0x08) != 0)
739 0 : sc->alc_rcb = DMA_CFG_RCB_128;
740 0 : if (alcdebug)
741 0 : printf("%s: RCB %u bytes\n",
742 : sc->sc_dev.dv_xname,
743 0 : sc->alc_rcb == DMA_CFG_RCB_64 ? 64 : 128);
744 0 : state = ctl & 0x03;
745 0 : if (state & 0x01)
746 0 : sc->alc_flags |= ALC_FLAG_L0S;
747 0 : if (state & 0x02)
748 0 : sc->alc_flags |= ALC_FLAG_L1S;
749 0 : if (alcdebug)
750 0 : printf("%s: ASPM %s %s\n",
751 : sc->sc_dev.dv_xname,
752 0 : aspm_state[state],
753 0 : state == 0 ? "disabled" : "enabled");
754 0 : alc_disable_l0s_l1(sc);
755 0 : }
756 : }
757 :
758 : /* Reset PHY. */
759 0 : alc_phy_reset(sc);
760 :
761 : /* Reset the ethernet controller. */
762 0 : alc_reset(sc);
763 :
764 : /*
765 : * One odd thing is AR8132 uses the same PHY hardware(F1
766 : * gigabit PHY) of AR8131. So atphy(4) of AR8132 reports
767 : * the PHY supports 1000Mbps but that's not true. The PHY
768 : * used in AR8132 can't establish gigabit link even if it
769 : * shows the same PHY model/revision number of AR8131.
770 : */
771 0 : switch (sc->sc_product) {
772 : case PCI_PRODUCT_ATTANSIC_L2C_1:
773 : case PCI_PRODUCT_ATTANSIC_L2C_2:
774 0 : sc->alc_flags |= ALC_FLAG_APS;
775 : /* FALLTHROUGH */
776 : case PCI_PRODUCT_ATTANSIC_L2C:
777 0 : sc->alc_flags |= ALC_FLAG_FASTETHER;
778 0 : break;
779 : case PCI_PRODUCT_ATTANSIC_L1D:
780 : case PCI_PRODUCT_ATTANSIC_L1D_1:
781 0 : sc->alc_flags |= ALC_FLAG_APS;
782 : /* FALLTHROUGH */
783 : default:
784 : break;
785 : }
786 0 : sc->alc_flags |= ALC_FLAG_ASPM_MON | ALC_FLAG_JUMBO;
787 :
788 0 : switch (sc->sc_product) {
789 : case PCI_PRODUCT_ATTANSIC_L1C:
790 : case PCI_PRODUCT_ATTANSIC_L2C:
791 0 : sc->alc_max_framelen = 9 * 1024;
792 0 : break;
793 : case PCI_PRODUCT_ATTANSIC_L1D:
794 : case PCI_PRODUCT_ATTANSIC_L1D_1:
795 : case PCI_PRODUCT_ATTANSIC_L2C_1:
796 : case PCI_PRODUCT_ATTANSIC_L2C_2:
797 0 : sc->alc_max_framelen = 6 * 1024;
798 0 : break;
799 : }
800 :
801 : /*
802 : * It seems that AR813x/AR815x has silicon bug for SMB. In
803 : * addition, Atheros said that enabling SMB wouldn't improve
804 : * performance. However I think it's bad to access lots of
805 : * registers to extract MAC statistics.
806 : */
807 0 : sc->alc_flags |= ALC_FLAG_SMB_BUG;
808 : /*
809 : * Don't use Tx CMB. It is known to have silicon bug.
810 : */
811 0 : sc->alc_flags |= ALC_FLAG_CMB_BUG;
812 :
813 0 : sc->alc_chip_rev = CSR_READ_4(sc, ALC_MASTER_CFG) >>
814 : MASTER_CHIP_REV_SHIFT;
815 0 : if (alcdebug) {
816 0 : printf("%s: PCI device revision : 0x%04x\n",
817 0 : sc->sc_dev.dv_xname, sc->alc_rev);
818 0 : printf("%s: Chip id/revision : 0x%04x\n",
819 0 : sc->sc_dev.dv_xname, sc->alc_chip_rev);
820 0 : printf("%s: %u Tx FIFO, %u Rx FIFO\n", sc->sc_dev.dv_xname,
821 0 : CSR_READ_4(sc, ALC_SRAM_TX_FIFO_LEN) * 8,
822 0 : CSR_READ_4(sc, ALC_SRAM_RX_FIFO_LEN) * 8);
823 0 : }
824 :
825 0 : error = alc_dma_alloc(sc);
826 0 : if (error)
827 : goto fail;
828 :
829 : /* Load station address. */
830 0 : alc_get_macaddr(sc);
831 :
832 0 : ifp = &sc->sc_arpcom.ac_if;
833 0 : ifp->if_softc = sc;
834 0 : ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
835 0 : ifp->if_ioctl = alc_ioctl;
836 0 : ifp->if_start = alc_start;
837 0 : ifp->if_watchdog = alc_watchdog;
838 0 : IFQ_SET_MAXLEN(&ifp->if_snd, ALC_TX_RING_CNT - 1);
839 0 : bcopy(sc->alc_eaddr, sc->sc_arpcom.ac_enaddr, ETHER_ADDR_LEN);
840 0 : bcopy(sc->sc_dev.dv_xname, ifp->if_xname, IFNAMSIZ);
841 :
842 0 : ifp->if_capabilities = IFCAP_VLAN_MTU;
843 :
844 : #ifdef ALC_CHECKSUM
845 : ifp->if_capabilities |= IFCAP_CSUM_IPv4 | IFCAP_CSUM_TCPv4 |
846 : IFCAP_CSUM_UDPv4;
847 : #endif
848 :
849 : #if NVLAN > 0
850 0 : ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING;
851 : #endif
852 :
853 0 : printf(", address %s\n", ether_sprintf(sc->sc_arpcom.ac_enaddr));
854 :
855 : /* Set up MII bus. */
856 0 : sc->sc_miibus.mii_ifp = ifp;
857 0 : sc->sc_miibus.mii_readreg = alc_miibus_readreg;
858 0 : sc->sc_miibus.mii_writereg = alc_miibus_writereg;
859 0 : sc->sc_miibus.mii_statchg = alc_miibus_statchg;
860 :
861 0 : ifmedia_init(&sc->sc_miibus.mii_media, 0, alc_mediachange,
862 : alc_mediastatus);
863 0 : mii_attach(self, &sc->sc_miibus, 0xffffffff, MII_PHY_ANY,
864 : MII_OFFSET_ANY, MIIF_DOPAUSE);
865 :
866 0 : if (LIST_FIRST(&sc->sc_miibus.mii_phys) == NULL) {
867 0 : printf("%s: no PHY found!\n", sc->sc_dev.dv_xname);
868 0 : ifmedia_add(&sc->sc_miibus.mii_media, IFM_ETHER | IFM_MANUAL,
869 : 0, NULL);
870 0 : ifmedia_set(&sc->sc_miibus.mii_media, IFM_ETHER | IFM_MANUAL);
871 0 : } else
872 0 : ifmedia_set(&sc->sc_miibus.mii_media, IFM_ETHER | IFM_AUTO);
873 :
874 0 : if_attach(ifp);
875 0 : ether_ifattach(ifp);
876 :
877 0 : timeout_set(&sc->alc_tick_ch, alc_tick, sc);
878 :
879 0 : return;
880 : fail:
881 0 : alc_dma_free(sc);
882 0 : if (sc->sc_irq_handle != NULL)
883 0 : pci_intr_disestablish(pc, sc->sc_irq_handle);
884 0 : if (sc->sc_mem_size)
885 0 : bus_space_unmap(sc->sc_mem_bt, sc->sc_mem_bh, sc->sc_mem_size);
886 0 : }
887 :
888 : int
889 0 : alc_detach(struct device *self, int flags)
890 : {
891 0 : struct alc_softc *sc = (struct alc_softc *)self;
892 0 : struct ifnet *ifp = &sc->sc_arpcom.ac_if;
893 : int s;
894 :
895 0 : s = splnet();
896 0 : alc_stop(sc);
897 0 : splx(s);
898 :
899 0 : mii_detach(&sc->sc_miibus, MII_PHY_ANY, MII_OFFSET_ANY);
900 :
901 : /* Delete all remaining media. */
902 0 : ifmedia_delete_instance(&sc->sc_miibus.mii_media, IFM_INST_ANY);
903 :
904 0 : ether_ifdetach(ifp);
905 0 : if_detach(ifp);
906 0 : alc_dma_free(sc);
907 :
908 0 : alc_phy_down(sc);
909 0 : if (sc->sc_irq_handle != NULL) {
910 0 : pci_intr_disestablish(sc->sc_pct, sc->sc_irq_handle);
911 0 : sc->sc_irq_handle = NULL;
912 0 : }
913 :
914 0 : return (0);
915 : }
916 :
917 : int
918 0 : alc_activate(struct device *self, int act)
919 : {
920 0 : struct alc_softc *sc = (struct alc_softc *)self;
921 0 : struct ifnet *ifp = &sc->sc_arpcom.ac_if;
922 : int rv = 0;
923 :
924 0 : switch (act) {
925 : case DVACT_SUSPEND:
926 0 : if (ifp->if_flags & IFF_RUNNING)
927 0 : alc_stop(sc);
928 0 : rv = config_activate_children(self, act);
929 0 : break;
930 : case DVACT_RESUME:
931 0 : if (ifp->if_flags & IFF_UP)
932 0 : alc_init(ifp);
933 : break;
934 : default:
935 0 : rv = config_activate_children(self, act);
936 0 : break;
937 : }
938 0 : return (rv);
939 : }
940 :
941 : int
942 0 : alc_dma_alloc(struct alc_softc *sc)
943 : {
944 : struct alc_txdesc *txd;
945 : struct alc_rxdesc *rxd;
946 0 : int nsegs, error, i;
947 :
948 : /*
949 : * Create DMA stuffs for TX ring
950 : */
951 0 : error = bus_dmamap_create(sc->sc_dmat, ALC_TX_RING_SZ, 1,
952 : ALC_TX_RING_SZ, 0, BUS_DMA_NOWAIT, &sc->alc_cdata.alc_tx_ring_map);
953 0 : if (error)
954 0 : return (ENOBUFS);
955 :
956 : /* Allocate DMA'able memory for TX ring */
957 0 : error = bus_dmamem_alloc(sc->sc_dmat, ALC_TX_RING_SZ,
958 : ETHER_ALIGN, 0, &sc->alc_rdata.alc_tx_ring_seg, 1,
959 : &nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO);
960 0 : if (error) {
961 0 : printf("%s: could not allocate DMA'able memory for Tx ring.\n",
962 0 : sc->sc_dev.dv_xname);
963 0 : return error;
964 : }
965 :
966 0 : error = bus_dmamem_map(sc->sc_dmat, &sc->alc_rdata.alc_tx_ring_seg,
967 : nsegs, ALC_TX_RING_SZ, (caddr_t *)&sc->alc_rdata.alc_tx_ring,
968 : BUS_DMA_NOWAIT);
969 0 : if (error)
970 0 : return (ENOBUFS);
971 :
972 : /* Load the DMA map for Tx ring. */
973 0 : error = bus_dmamap_load(sc->sc_dmat, sc->alc_cdata.alc_tx_ring_map,
974 : sc->alc_rdata.alc_tx_ring, ALC_TX_RING_SZ, NULL, BUS_DMA_WAITOK);
975 0 : if (error) {
976 0 : printf("%s: could not load DMA'able memory for Tx ring.\n",
977 0 : sc->sc_dev.dv_xname);
978 0 : bus_dmamem_free(sc->sc_dmat,
979 : (bus_dma_segment_t *)&sc->alc_rdata.alc_tx_ring, 1);
980 0 : return error;
981 : }
982 :
983 0 : sc->alc_rdata.alc_tx_ring_paddr =
984 0 : sc->alc_cdata.alc_tx_ring_map->dm_segs[0].ds_addr;
985 :
986 : /*
987 : * Create DMA stuffs for RX ring
988 : */
989 0 : error = bus_dmamap_create(sc->sc_dmat, ALC_RX_RING_SZ, 1,
990 : ALC_RX_RING_SZ, 0, BUS_DMA_NOWAIT, &sc->alc_cdata.alc_rx_ring_map);
991 0 : if (error)
992 0 : return (ENOBUFS);
993 :
994 : /* Allocate DMA'able memory for RX ring */
995 0 : error = bus_dmamem_alloc(sc->sc_dmat, ALC_RX_RING_SZ,
996 : ETHER_ALIGN, 0, &sc->alc_rdata.alc_rx_ring_seg, 1,
997 : &nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO);
998 0 : if (error) {
999 0 : printf("%s: could not allocate DMA'able memory for Rx ring.\n",
1000 0 : sc->sc_dev.dv_xname);
1001 0 : return error;
1002 : }
1003 :
1004 0 : error = bus_dmamem_map(sc->sc_dmat, &sc->alc_rdata.alc_rx_ring_seg,
1005 : nsegs, ALC_RX_RING_SZ, (caddr_t *)&sc->alc_rdata.alc_rx_ring,
1006 : BUS_DMA_NOWAIT);
1007 0 : if (error)
1008 0 : return (ENOBUFS);
1009 :
1010 : /* Load the DMA map for Rx ring. */
1011 0 : error = bus_dmamap_load(sc->sc_dmat, sc->alc_cdata.alc_rx_ring_map,
1012 : sc->alc_rdata.alc_rx_ring, ALC_RX_RING_SZ, NULL, BUS_DMA_WAITOK);
1013 0 : if (error) {
1014 0 : printf("%s: could not load DMA'able memory for Rx ring.\n",
1015 0 : sc->sc_dev.dv_xname);
1016 0 : bus_dmamem_free(sc->sc_dmat,
1017 : (bus_dma_segment_t *)sc->alc_rdata.alc_rx_ring, 1);
1018 0 : return error;
1019 : }
1020 :
1021 0 : sc->alc_rdata.alc_rx_ring_paddr =
1022 0 : sc->alc_cdata.alc_rx_ring_map->dm_segs[0].ds_addr;
1023 :
1024 : /*
1025 : * Create DMA stuffs for RX return ring
1026 : */
1027 0 : error = bus_dmamap_create(sc->sc_dmat, ALC_RR_RING_SZ, 1,
1028 : ALC_RR_RING_SZ, 0, BUS_DMA_NOWAIT, &sc->alc_cdata.alc_rr_ring_map);
1029 0 : if (error)
1030 0 : return (ENOBUFS);
1031 :
1032 : /* Allocate DMA'able memory for RX return ring */
1033 0 : error = bus_dmamem_alloc(sc->sc_dmat, ALC_RR_RING_SZ,
1034 : ETHER_ALIGN, 0, &sc->alc_rdata.alc_rr_ring_seg, 1,
1035 : &nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO);
1036 0 : if (error) {
1037 0 : printf("%s: could not allocate DMA'able memory for Rx "
1038 0 : "return ring.\n", sc->sc_dev.dv_xname);
1039 0 : return error;
1040 : }
1041 :
1042 0 : error = bus_dmamem_map(sc->sc_dmat, &sc->alc_rdata.alc_rr_ring_seg,
1043 : nsegs, ALC_RR_RING_SZ, (caddr_t *)&sc->alc_rdata.alc_rr_ring,
1044 : BUS_DMA_NOWAIT);
1045 0 : if (error)
1046 0 : return (ENOBUFS);
1047 :
1048 : /* Load the DMA map for Rx return ring. */
1049 0 : error = bus_dmamap_load(sc->sc_dmat, sc->alc_cdata.alc_rr_ring_map,
1050 : sc->alc_rdata.alc_rr_ring, ALC_RR_RING_SZ, NULL, BUS_DMA_WAITOK);
1051 0 : if (error) {
1052 0 : printf("%s: could not load DMA'able memory for Rx return ring."
1053 0 : "\n", sc->sc_dev.dv_xname);
1054 0 : bus_dmamem_free(sc->sc_dmat,
1055 : (bus_dma_segment_t *)&sc->alc_rdata.alc_rr_ring, 1);
1056 0 : return error;
1057 : }
1058 :
1059 0 : sc->alc_rdata.alc_rr_ring_paddr =
1060 0 : sc->alc_cdata.alc_rr_ring_map->dm_segs[0].ds_addr;
1061 :
1062 : /*
1063 : * Create DMA stuffs for CMB block
1064 : */
1065 0 : error = bus_dmamap_create(sc->sc_dmat, ALC_CMB_SZ, 1,
1066 : ALC_CMB_SZ, 0, BUS_DMA_NOWAIT,
1067 : &sc->alc_cdata.alc_cmb_map);
1068 0 : if (error)
1069 0 : return (ENOBUFS);
1070 :
1071 : /* Allocate DMA'able memory for CMB block */
1072 0 : error = bus_dmamem_alloc(sc->sc_dmat, ALC_CMB_SZ,
1073 : ETHER_ALIGN, 0, &sc->alc_rdata.alc_cmb_seg, 1,
1074 : &nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO);
1075 0 : if (error) {
1076 0 : printf("%s: could not allocate DMA'able memory for "
1077 0 : "CMB block\n", sc->sc_dev.dv_xname);
1078 0 : return error;
1079 : }
1080 :
1081 0 : error = bus_dmamem_map(sc->sc_dmat, &sc->alc_rdata.alc_cmb_seg,
1082 : nsegs, ALC_CMB_SZ, (caddr_t *)&sc->alc_rdata.alc_cmb,
1083 : BUS_DMA_NOWAIT);
1084 0 : if (error)
1085 0 : return (ENOBUFS);
1086 :
1087 : /* Load the DMA map for CMB block. */
1088 0 : error = bus_dmamap_load(sc->sc_dmat, sc->alc_cdata.alc_cmb_map,
1089 : sc->alc_rdata.alc_cmb, ALC_CMB_SZ, NULL,
1090 : BUS_DMA_WAITOK);
1091 0 : if (error) {
1092 0 : printf("%s: could not load DMA'able memory for CMB block\n",
1093 0 : sc->sc_dev.dv_xname);
1094 0 : bus_dmamem_free(sc->sc_dmat,
1095 : (bus_dma_segment_t *)&sc->alc_rdata.alc_cmb, 1);
1096 0 : return error;
1097 : }
1098 :
1099 0 : sc->alc_rdata.alc_cmb_paddr =
1100 0 : sc->alc_cdata.alc_cmb_map->dm_segs[0].ds_addr;
1101 :
1102 : /*
1103 : * Create DMA stuffs for SMB block
1104 : */
1105 0 : error = bus_dmamap_create(sc->sc_dmat, ALC_SMB_SZ, 1,
1106 : ALC_SMB_SZ, 0, BUS_DMA_NOWAIT,
1107 : &sc->alc_cdata.alc_smb_map);
1108 0 : if (error)
1109 0 : return (ENOBUFS);
1110 :
1111 : /* Allocate DMA'able memory for SMB block */
1112 0 : error = bus_dmamem_alloc(sc->sc_dmat, ALC_SMB_SZ,
1113 : ETHER_ALIGN, 0, &sc->alc_rdata.alc_smb_seg, 1,
1114 : &nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO);
1115 0 : if (error) {
1116 0 : printf("%s: could not allocate DMA'able memory for "
1117 0 : "SMB block\n", sc->sc_dev.dv_xname);
1118 0 : return error;
1119 : }
1120 :
1121 0 : error = bus_dmamem_map(sc->sc_dmat, &sc->alc_rdata.alc_smb_seg,
1122 : nsegs, ALC_SMB_SZ, (caddr_t *)&sc->alc_rdata.alc_smb,
1123 : BUS_DMA_NOWAIT);
1124 0 : if (error)
1125 0 : return (ENOBUFS);
1126 :
1127 : /* Load the DMA map for SMB block */
1128 0 : error = bus_dmamap_load(sc->sc_dmat, sc->alc_cdata.alc_smb_map,
1129 : sc->alc_rdata.alc_smb, ALC_SMB_SZ, NULL,
1130 : BUS_DMA_WAITOK);
1131 0 : if (error) {
1132 0 : printf("%s: could not load DMA'able memory for SMB block\n",
1133 0 : sc->sc_dev.dv_xname);
1134 0 : bus_dmamem_free(sc->sc_dmat,
1135 : (bus_dma_segment_t *)&sc->alc_rdata.alc_smb, 1);
1136 0 : return error;
1137 : }
1138 :
1139 0 : sc->alc_rdata.alc_smb_paddr =
1140 0 : sc->alc_cdata.alc_smb_map->dm_segs[0].ds_addr;
1141 :
1142 :
1143 : /* Create DMA maps for Tx buffers. */
1144 0 : for (i = 0; i < ALC_TX_RING_CNT; i++) {
1145 0 : txd = &sc->alc_cdata.alc_txdesc[i];
1146 0 : txd->tx_m = NULL;
1147 0 : txd->tx_dmamap = NULL;
1148 0 : error = bus_dmamap_create(sc->sc_dmat, ALC_TSO_MAXSIZE,
1149 : ALC_MAXTXSEGS, ALC_TSO_MAXSEGSIZE, 0, BUS_DMA_NOWAIT,
1150 : &txd->tx_dmamap);
1151 0 : if (error) {
1152 0 : printf("%s: could not create Tx dmamap.\n",
1153 0 : sc->sc_dev.dv_xname);
1154 0 : return error;
1155 : }
1156 : }
1157 :
1158 : /* Create DMA maps for Rx buffers. */
1159 0 : error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 0,
1160 : BUS_DMA_NOWAIT, &sc->alc_cdata.alc_rx_sparemap);
1161 0 : if (error) {
1162 0 : printf("%s: could not create spare Rx dmamap.\n",
1163 0 : sc->sc_dev.dv_xname);
1164 0 : return error;
1165 : }
1166 :
1167 0 : for (i = 0; i < ALC_RX_RING_CNT; i++) {
1168 0 : rxd = &sc->alc_cdata.alc_rxdesc[i];
1169 0 : rxd->rx_m = NULL;
1170 0 : rxd->rx_dmamap = NULL;
1171 0 : error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
1172 : MCLBYTES, 0, BUS_DMA_NOWAIT, &rxd->rx_dmamap);
1173 0 : if (error) {
1174 0 : printf("%s: could not create Rx dmamap.\n",
1175 0 : sc->sc_dev.dv_xname);
1176 0 : return error;
1177 : }
1178 : }
1179 :
1180 0 : return (0);
1181 0 : }
1182 :
1183 :
1184 : void
1185 0 : alc_dma_free(struct alc_softc *sc)
1186 : {
1187 : struct alc_txdesc *txd;
1188 : struct alc_rxdesc *rxd;
1189 : int i;
1190 :
1191 : /* Tx buffers */
1192 0 : for (i = 0; i < ALC_TX_RING_CNT; i++) {
1193 0 : txd = &sc->alc_cdata.alc_txdesc[i];
1194 0 : if (txd->tx_dmamap != NULL) {
1195 0 : bus_dmamap_destroy(sc->sc_dmat, txd->tx_dmamap);
1196 0 : txd->tx_dmamap = NULL;
1197 0 : }
1198 : }
1199 : /* Rx buffers */
1200 0 : for (i = 0; i < ALC_RX_RING_CNT; i++) {
1201 0 : rxd = &sc->alc_cdata.alc_rxdesc[i];
1202 0 : if (rxd->rx_dmamap != NULL) {
1203 0 : bus_dmamap_destroy(sc->sc_dmat, rxd->rx_dmamap);
1204 0 : rxd->rx_dmamap = NULL;
1205 0 : }
1206 : }
1207 0 : if (sc->alc_cdata.alc_rx_sparemap != NULL) {
1208 0 : bus_dmamap_destroy(sc->sc_dmat, sc->alc_cdata.alc_rx_sparemap);
1209 0 : sc->alc_cdata.alc_rx_sparemap = NULL;
1210 0 : }
1211 :
1212 : /* Tx ring. */
1213 0 : if (sc->alc_cdata.alc_tx_ring_map != NULL)
1214 0 : bus_dmamap_unload(sc->sc_dmat, sc->alc_cdata.alc_tx_ring_map);
1215 0 : if (sc->alc_cdata.alc_tx_ring_map != NULL &&
1216 0 : sc->alc_rdata.alc_tx_ring != NULL)
1217 0 : bus_dmamem_free(sc->sc_dmat,
1218 : (bus_dma_segment_t *)sc->alc_rdata.alc_tx_ring, 1);
1219 0 : sc->alc_rdata.alc_tx_ring = NULL;
1220 0 : sc->alc_cdata.alc_tx_ring_map = NULL;
1221 :
1222 : /* Rx ring. */
1223 0 : if (sc->alc_cdata.alc_rx_ring_map != NULL)
1224 0 : bus_dmamap_unload(sc->sc_dmat, sc->alc_cdata.alc_rx_ring_map);
1225 0 : if (sc->alc_cdata.alc_rx_ring_map != NULL &&
1226 0 : sc->alc_rdata.alc_rx_ring != NULL)
1227 0 : bus_dmamem_free(sc->sc_dmat,
1228 : (bus_dma_segment_t *)sc->alc_rdata.alc_rx_ring, 1);
1229 0 : sc->alc_rdata.alc_rx_ring = NULL;
1230 0 : sc->alc_cdata.alc_rx_ring_map = NULL;
1231 :
1232 : /* Rx return ring. */
1233 0 : if (sc->alc_cdata.alc_rr_ring_map != NULL)
1234 0 : bus_dmamap_unload(sc->sc_dmat, sc->alc_cdata.alc_rr_ring_map);
1235 0 : if (sc->alc_cdata.alc_rr_ring_map != NULL &&
1236 0 : sc->alc_rdata.alc_rr_ring != NULL)
1237 0 : bus_dmamem_free(sc->sc_dmat,
1238 : (bus_dma_segment_t *)sc->alc_rdata.alc_rr_ring, 1);
1239 0 : sc->alc_rdata.alc_rr_ring = NULL;
1240 0 : sc->alc_cdata.alc_rr_ring_map = NULL;
1241 :
1242 : /* CMB block */
1243 0 : if (sc->alc_cdata.alc_cmb_map != NULL)
1244 0 : bus_dmamap_unload(sc->sc_dmat, sc->alc_cdata.alc_cmb_map);
1245 0 : if (sc->alc_cdata.alc_cmb_map != NULL &&
1246 0 : sc->alc_rdata.alc_cmb != NULL)
1247 0 : bus_dmamem_free(sc->sc_dmat,
1248 : (bus_dma_segment_t *)sc->alc_rdata.alc_cmb, 1);
1249 0 : sc->alc_rdata.alc_cmb = NULL;
1250 0 : sc->alc_cdata.alc_cmb_map = NULL;
1251 :
1252 : /* SMB block */
1253 0 : if (sc->alc_cdata.alc_smb_map != NULL)
1254 0 : bus_dmamap_unload(sc->sc_dmat, sc->alc_cdata.alc_smb_map);
1255 0 : if (sc->alc_cdata.alc_smb_map != NULL &&
1256 0 : sc->alc_rdata.alc_smb != NULL)
1257 0 : bus_dmamem_free(sc->sc_dmat,
1258 : (bus_dma_segment_t *)sc->alc_rdata.alc_smb, 1);
1259 0 : sc->alc_rdata.alc_smb = NULL;
1260 0 : sc->alc_cdata.alc_smb_map = NULL;
1261 0 : }
1262 :
1263 : int
1264 0 : alc_encap(struct alc_softc *sc, struct mbuf *m)
1265 : {
1266 : struct alc_txdesc *txd, *txd_last;
1267 : struct tx_desc *desc;
1268 : bus_dmamap_t map;
1269 : uint32_t cflags, poff, vtag;
1270 : int error, idx, prod;
1271 :
1272 : cflags = vtag = 0;
1273 : poff = 0;
1274 :
1275 0 : prod = sc->alc_cdata.alc_tx_prod;
1276 0 : txd = &sc->alc_cdata.alc_txdesc[prod];
1277 : txd_last = txd;
1278 0 : map = txd->tx_dmamap;
1279 :
1280 0 : error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m, BUS_DMA_NOWAIT);
1281 0 : if (error != 0 && error != EFBIG)
1282 : goto drop;
1283 0 : if (error != 0) {
1284 0 : if (m_defrag(m, M_DONTWAIT)) {
1285 : error = ENOBUFS;
1286 0 : goto drop;
1287 : }
1288 0 : error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m,
1289 : BUS_DMA_NOWAIT);
1290 0 : if (error != 0)
1291 : goto drop;
1292 : }
1293 :
1294 0 : bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1295 : BUS_DMASYNC_PREWRITE);
1296 :
1297 : desc = NULL;
1298 : idx = 0;
1299 : #if NVLAN > 0
1300 : /* Configure VLAN hardware tag insertion. */
1301 0 : if (m->m_flags & M_VLANTAG) {
1302 0 : vtag = htons(m->m_pkthdr.ether_vtag);
1303 0 : vtag = (vtag << TD_VLAN_SHIFT) & TD_VLAN_MASK;
1304 : cflags |= TD_INS_VLAN_TAG;
1305 0 : }
1306 : #endif
1307 : /* Configure Tx checksum offload. */
1308 0 : if ((m->m_pkthdr.csum_flags & ALC_CSUM_FEATURES) != 0) {
1309 0 : cflags |= TD_CUSTOM_CSUM;
1310 : /* Set checksum start offset. */
1311 : cflags |= ((poff >> 1) << TD_PLOAD_OFFSET_SHIFT) &
1312 : TD_PLOAD_OFFSET_MASK;
1313 0 : }
1314 :
1315 0 : for (; idx < map->dm_nsegs; idx++) {
1316 0 : desc = &sc->alc_rdata.alc_tx_ring[prod];
1317 0 : desc->len =
1318 0 : htole32(TX_BYTES(map->dm_segs[idx].ds_len) | vtag);
1319 0 : desc->flags = htole32(cflags);
1320 0 : desc->addr = htole64(map->dm_segs[idx].ds_addr);
1321 0 : sc->alc_cdata.alc_tx_cnt++;
1322 0 : ALC_DESC_INC(prod, ALC_TX_RING_CNT);
1323 : }
1324 :
1325 : /* Update producer index. */
1326 0 : sc->alc_cdata.alc_tx_prod = prod;
1327 :
1328 : /* Finally set EOP on the last descriptor. */
1329 0 : prod = (prod + ALC_TX_RING_CNT - 1) % ALC_TX_RING_CNT;
1330 0 : desc = &sc->alc_rdata.alc_tx_ring[prod];
1331 0 : desc->flags |= htole32(TD_EOP);
1332 :
1333 : /* Swap dmamap of the first and the last. */
1334 0 : txd = &sc->alc_cdata.alc_txdesc[prod];
1335 0 : map = txd_last->tx_dmamap;
1336 0 : txd_last->tx_dmamap = txd->tx_dmamap;
1337 0 : txd->tx_dmamap = map;
1338 0 : txd->tx_m = m;
1339 :
1340 0 : return (0);
1341 :
1342 : drop:
1343 0 : m_freem(m);
1344 0 : return (error);
1345 0 : }
1346 :
1347 : void
1348 0 : alc_start(struct ifnet *ifp)
1349 : {
1350 0 : struct alc_softc *sc = ifp->if_softc;
1351 : struct mbuf *m;
1352 : int enq = 0;
1353 :
1354 : /* Reclaim transmitted frames. */
1355 0 : if (sc->alc_cdata.alc_tx_cnt >= ALC_TX_DESC_HIWAT)
1356 0 : alc_txeof(sc);
1357 :
1358 0 : if (!(ifp->if_flags & IFF_RUNNING) || ifq_is_oactive(&ifp->if_snd))
1359 0 : return;
1360 0 : if ((sc->alc_flags & ALC_FLAG_LINK) == 0)
1361 0 : return;
1362 0 : if (IFQ_IS_EMPTY(&ifp->if_snd))
1363 0 : return;
1364 :
1365 0 : for (;;) {
1366 0 : if (sc->alc_cdata.alc_tx_cnt + ALC_MAXTXSEGS >=
1367 : ALC_TX_RING_CNT - 3) {
1368 0 : ifq_set_oactive(&ifp->if_snd);
1369 0 : break;
1370 : }
1371 :
1372 0 : IFQ_DEQUEUE(&ifp->if_snd, m);
1373 0 : if (m == NULL)
1374 : break;
1375 :
1376 0 : if (alc_encap(sc, m) != 0) {
1377 0 : ifp->if_oerrors++;
1378 0 : continue;
1379 : }
1380 0 : enq++;
1381 :
1382 : #if NBPFILTER > 0
1383 : /*
1384 : * If there's a BPF listener, bounce a copy of this frame
1385 : * to him.
1386 : */
1387 0 : if (ifp->if_bpf != NULL)
1388 0 : bpf_mtap_ether(ifp->if_bpf, m, BPF_DIRECTION_OUT);
1389 : #endif
1390 : }
1391 :
1392 0 : if (enq > 0) {
1393 : /* Sync descriptors. */
1394 0 : bus_dmamap_sync(sc->sc_dmat, sc->alc_cdata.alc_tx_ring_map, 0,
1395 : sc->alc_cdata.alc_tx_ring_map->dm_mapsize,
1396 : BUS_DMASYNC_PREWRITE);
1397 : /* Kick. Assume we're using normal Tx priority queue. */
1398 0 : CSR_WRITE_4(sc, ALC_MBOX_TD_PROD_IDX,
1399 : (sc->alc_cdata.alc_tx_prod <<
1400 : MBOX_TD_PROD_LO_IDX_SHIFT) &
1401 : MBOX_TD_PROD_LO_IDX_MASK);
1402 : /* Set a timeout in case the chip goes out to lunch. */
1403 0 : ifp->if_timer = ALC_TX_TIMEOUT;
1404 0 : }
1405 0 : }
1406 :
1407 : void
1408 0 : alc_watchdog(struct ifnet *ifp)
1409 : {
1410 0 : struct alc_softc *sc = ifp->if_softc;
1411 :
1412 0 : if ((sc->alc_flags & ALC_FLAG_LINK) == 0) {
1413 0 : printf("%s: watchdog timeout (missed link)\n",
1414 : sc->sc_dev.dv_xname);
1415 0 : ifp->if_oerrors++;
1416 0 : alc_init(ifp);
1417 0 : return;
1418 : }
1419 :
1420 0 : printf("%s: watchdog timeout\n", sc->sc_dev.dv_xname);
1421 0 : ifp->if_oerrors++;
1422 0 : alc_init(ifp);
1423 0 : alc_start(ifp);
1424 0 : }
1425 :
1426 : int
1427 0 : alc_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1428 : {
1429 0 : struct alc_softc *sc = ifp->if_softc;
1430 0 : struct mii_data *mii = &sc->sc_miibus;
1431 0 : struct ifreq *ifr = (struct ifreq *)data;
1432 : int s, error = 0;
1433 :
1434 0 : s = splnet();
1435 :
1436 0 : switch (cmd) {
1437 : case SIOCSIFADDR:
1438 0 : ifp->if_flags |= IFF_UP;
1439 0 : if (!(ifp->if_flags & IFF_RUNNING))
1440 0 : alc_init(ifp);
1441 : break;
1442 :
1443 : case SIOCSIFFLAGS:
1444 0 : if (ifp->if_flags & IFF_UP) {
1445 0 : if (ifp->if_flags & IFF_RUNNING)
1446 0 : error = ENETRESET;
1447 : else
1448 0 : alc_init(ifp);
1449 : } else {
1450 0 : if (ifp->if_flags & IFF_RUNNING)
1451 0 : alc_stop(sc);
1452 : }
1453 : break;
1454 :
1455 : case SIOCSIFMEDIA:
1456 : case SIOCGIFMEDIA:
1457 0 : error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
1458 0 : break;
1459 :
1460 : default:
1461 0 : error = ether_ioctl(ifp, &sc->sc_arpcom, cmd, data);
1462 0 : break;
1463 : }
1464 :
1465 0 : if (error == ENETRESET) {
1466 0 : if (ifp->if_flags & IFF_RUNNING)
1467 0 : alc_iff(sc);
1468 : error = 0;
1469 0 : }
1470 :
1471 0 : splx(s);
1472 0 : return (error);
1473 : }
1474 :
1475 : void
1476 0 : alc_mac_config(struct alc_softc *sc)
1477 : {
1478 : struct mii_data *mii;
1479 : uint32_t reg;
1480 :
1481 0 : mii = &sc->sc_miibus;
1482 0 : reg = CSR_READ_4(sc, ALC_MAC_CFG);
1483 0 : reg &= ~(MAC_CFG_FULL_DUPLEX | MAC_CFG_TX_FC | MAC_CFG_RX_FC |
1484 : MAC_CFG_SPEED_MASK);
1485 0 : if (sc->sc_product == PCI_PRODUCT_ATTANSIC_L1D ||
1486 0 : sc->sc_product == PCI_PRODUCT_ATTANSIC_L1D_1 ||
1487 0 : sc->sc_product == PCI_PRODUCT_ATTANSIC_L2C_2)
1488 0 : reg |= MAC_CFG_HASH_ALG_CRC32 | MAC_CFG_SPEED_MODE_SW;
1489 : /* Reprogram MAC with resolved speed/duplex. */
1490 0 : switch (IFM_SUBTYPE(mii->mii_media_active)) {
1491 : case IFM_10_T:
1492 : case IFM_100_TX:
1493 0 : reg |= MAC_CFG_SPEED_10_100;
1494 0 : break;
1495 : case IFM_1000_T:
1496 0 : reg |= MAC_CFG_SPEED_1000;
1497 0 : break;
1498 : }
1499 0 : if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) {
1500 0 : reg |= MAC_CFG_FULL_DUPLEX;
1501 0 : if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE) != 0)
1502 0 : reg |= MAC_CFG_TX_FC;
1503 0 : if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0)
1504 0 : reg |= MAC_CFG_RX_FC;
1505 : }
1506 0 : CSR_WRITE_4(sc, ALC_MAC_CFG, reg);
1507 0 : }
1508 :
1509 : void
1510 0 : alc_stats_clear(struct alc_softc *sc)
1511 : {
1512 0 : struct smb sb, *smb;
1513 : uint32_t *reg;
1514 : int i;
1515 :
1516 0 : if ((sc->alc_flags & ALC_FLAG_SMB_BUG) == 0) {
1517 0 : bus_dmamap_sync(sc->sc_dmat, sc->alc_cdata.alc_smb_map, 0,
1518 : sc->alc_cdata.alc_smb_map->dm_mapsize,
1519 : BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1520 0 : smb = sc->alc_rdata.alc_smb;
1521 : /* Update done, clear. */
1522 0 : smb->updated = 0;
1523 0 : bus_dmamap_sync(sc->sc_dmat, sc->alc_cdata.alc_smb_map, 0,
1524 : sc->alc_cdata.alc_smb_map->dm_mapsize,
1525 : BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1526 0 : } else {
1527 0 : for (reg = &sb.rx_frames, i = 0; reg <= &sb.rx_pkts_filtered;
1528 0 : reg++) {
1529 0 : CSR_READ_4(sc, ALC_RX_MIB_BASE + i);
1530 0 : i += sizeof(uint32_t);
1531 : }
1532 : /* Read Tx statistics. */
1533 0 : for (reg = &sb.tx_frames, i = 0; reg <= &sb.tx_mcast_bytes;
1534 0 : reg++) {
1535 0 : CSR_READ_4(sc, ALC_TX_MIB_BASE + i);
1536 0 : i += sizeof(uint32_t);
1537 : }
1538 : }
1539 0 : }
1540 :
1541 : void
1542 0 : alc_stats_update(struct alc_softc *sc)
1543 : {
1544 0 : struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1545 : struct alc_hw_stats *stat;
1546 0 : struct smb sb, *smb;
1547 : uint32_t *reg;
1548 : int i;
1549 :
1550 0 : stat = &sc->alc_stats;
1551 0 : if ((sc->alc_flags & ALC_FLAG_SMB_BUG) == 0) {
1552 0 : bus_dmamap_sync(sc->sc_dmat, sc->alc_cdata.alc_smb_map, 0,
1553 : sc->alc_cdata.alc_smb_map->dm_mapsize,
1554 : BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1555 0 : smb = sc->alc_rdata.alc_smb;
1556 0 : if (smb->updated == 0)
1557 0 : return;
1558 : } else {
1559 : smb = &sb;
1560 : /* Read Rx statistics. */
1561 0 : for (reg = &sb.rx_frames, i = 0; reg <= &sb.rx_pkts_filtered;
1562 0 : reg++) {
1563 0 : *reg = CSR_READ_4(sc, ALC_RX_MIB_BASE + i);
1564 0 : i += sizeof(uint32_t);
1565 : }
1566 : /* Read Tx statistics. */
1567 0 : for (reg = &sb.tx_frames, i = 0; reg <= &sb.tx_mcast_bytes;
1568 0 : reg++) {
1569 0 : *reg = CSR_READ_4(sc, ALC_TX_MIB_BASE + i);
1570 0 : i += sizeof(uint32_t);
1571 : }
1572 : }
1573 :
1574 : /* Rx stats. */
1575 0 : stat->rx_frames += smb->rx_frames;
1576 0 : stat->rx_bcast_frames += smb->rx_bcast_frames;
1577 0 : stat->rx_mcast_frames += smb->rx_mcast_frames;
1578 0 : stat->rx_pause_frames += smb->rx_pause_frames;
1579 0 : stat->rx_control_frames += smb->rx_control_frames;
1580 0 : stat->rx_crcerrs += smb->rx_crcerrs;
1581 0 : stat->rx_lenerrs += smb->rx_lenerrs;
1582 0 : stat->rx_bytes += smb->rx_bytes;
1583 0 : stat->rx_runts += smb->rx_runts;
1584 0 : stat->rx_fragments += smb->rx_fragments;
1585 0 : stat->rx_pkts_64 += smb->rx_pkts_64;
1586 0 : stat->rx_pkts_65_127 += smb->rx_pkts_65_127;
1587 0 : stat->rx_pkts_128_255 += smb->rx_pkts_128_255;
1588 0 : stat->rx_pkts_256_511 += smb->rx_pkts_256_511;
1589 0 : stat->rx_pkts_512_1023 += smb->rx_pkts_512_1023;
1590 0 : stat->rx_pkts_1024_1518 += smb->rx_pkts_1024_1518;
1591 0 : stat->rx_pkts_1519_max += smb->rx_pkts_1519_max;
1592 0 : stat->rx_pkts_truncated += smb->rx_pkts_truncated;
1593 0 : stat->rx_fifo_oflows += smb->rx_fifo_oflows;
1594 0 : stat->rx_rrs_errs += smb->rx_rrs_errs;
1595 0 : stat->rx_alignerrs += smb->rx_alignerrs;
1596 0 : stat->rx_bcast_bytes += smb->rx_bcast_bytes;
1597 0 : stat->rx_mcast_bytes += smb->rx_mcast_bytes;
1598 0 : stat->rx_pkts_filtered += smb->rx_pkts_filtered;
1599 :
1600 : /* Tx stats. */
1601 0 : stat->tx_frames += smb->tx_frames;
1602 0 : stat->tx_bcast_frames += smb->tx_bcast_frames;
1603 0 : stat->tx_mcast_frames += smb->tx_mcast_frames;
1604 0 : stat->tx_pause_frames += smb->tx_pause_frames;
1605 0 : stat->tx_excess_defer += smb->tx_excess_defer;
1606 0 : stat->tx_control_frames += smb->tx_control_frames;
1607 0 : stat->tx_deferred += smb->tx_deferred;
1608 0 : stat->tx_bytes += smb->tx_bytes;
1609 0 : stat->tx_pkts_64 += smb->tx_pkts_64;
1610 0 : stat->tx_pkts_65_127 += smb->tx_pkts_65_127;
1611 0 : stat->tx_pkts_128_255 += smb->tx_pkts_128_255;
1612 0 : stat->tx_pkts_256_511 += smb->tx_pkts_256_511;
1613 0 : stat->tx_pkts_512_1023 += smb->tx_pkts_512_1023;
1614 0 : stat->tx_pkts_1024_1518 += smb->tx_pkts_1024_1518;
1615 0 : stat->tx_pkts_1519_max += smb->tx_pkts_1519_max;
1616 0 : stat->tx_single_colls += smb->tx_single_colls;
1617 0 : stat->tx_multi_colls += smb->tx_multi_colls;
1618 0 : stat->tx_late_colls += smb->tx_late_colls;
1619 0 : stat->tx_excess_colls += smb->tx_excess_colls;
1620 0 : stat->tx_underrun += smb->tx_underrun;
1621 0 : stat->tx_desc_underrun += smb->tx_desc_underrun;
1622 0 : stat->tx_lenerrs += smb->tx_lenerrs;
1623 0 : stat->tx_pkts_truncated += smb->tx_pkts_truncated;
1624 0 : stat->tx_bcast_bytes += smb->tx_bcast_bytes;
1625 0 : stat->tx_mcast_bytes += smb->tx_mcast_bytes;
1626 :
1627 0 : ifp->if_collisions += smb->tx_single_colls +
1628 0 : smb->tx_multi_colls * 2 + smb->tx_late_colls +
1629 0 : smb->tx_excess_colls * HDPX_CFG_RETRY_DEFAULT;
1630 :
1631 0 : ifp->if_oerrors += smb->tx_late_colls + smb->tx_excess_colls +
1632 0 : smb->tx_underrun + smb->tx_pkts_truncated;
1633 :
1634 0 : ifp->if_ierrors += smb->rx_crcerrs + smb->rx_lenerrs +
1635 0 : smb->rx_runts + smb->rx_pkts_truncated +
1636 0 : smb->rx_fifo_oflows + smb->rx_rrs_errs +
1637 0 : smb->rx_alignerrs;
1638 :
1639 0 : if ((sc->alc_flags & ALC_FLAG_SMB_BUG) == 0) {
1640 : /* Update done, clear. */
1641 0 : smb->updated = 0;
1642 0 : bus_dmamap_sync(sc->sc_dmat, sc->alc_cdata.alc_smb_map, 0,
1643 : sc->alc_cdata.alc_smb_map->dm_mapsize,
1644 : BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1645 0 : }
1646 0 : }
1647 :
1648 : int
1649 0 : alc_intr(void *arg)
1650 : {
1651 0 : struct alc_softc *sc = arg;
1652 0 : struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1653 : uint32_t status;
1654 : int claimed = 0;
1655 :
1656 0 : status = CSR_READ_4(sc, ALC_INTR_STATUS);
1657 0 : if ((status & ALC_INTRS) == 0)
1658 0 : return (0);
1659 :
1660 : /* Disable interrupts. */
1661 0 : CSR_WRITE_4(sc, ALC_INTR_STATUS, INTR_DIS_INT);
1662 :
1663 0 : status = CSR_READ_4(sc, ALC_INTR_STATUS);
1664 0 : if ((status & ALC_INTRS) == 0)
1665 : goto back;
1666 :
1667 : /* Acknowledge and disable interrupts. */
1668 0 : CSR_WRITE_4(sc, ALC_INTR_STATUS, status | INTR_DIS_INT);
1669 :
1670 0 : if (ifp->if_flags & IFF_RUNNING) {
1671 0 : if (status & INTR_RX_PKT)
1672 0 : alc_rxintr(sc);
1673 :
1674 0 : if (status & (INTR_DMA_RD_TO_RST | INTR_DMA_WR_TO_RST |
1675 : INTR_TXQ_TO_RST)) {
1676 0 : if (status & INTR_DMA_RD_TO_RST)
1677 0 : printf("%s: DMA read error! -- resetting\n",
1678 0 : sc->sc_dev.dv_xname);
1679 0 : if (status & INTR_DMA_WR_TO_RST)
1680 0 : printf("%s: DMA write error! -- resetting\n",
1681 0 : sc->sc_dev.dv_xname);
1682 0 : if (status & INTR_TXQ_TO_RST)
1683 0 : printf("%s: TxQ reset! -- resetting\n",
1684 0 : sc->sc_dev.dv_xname);
1685 0 : alc_init(ifp);
1686 0 : return (0);
1687 : }
1688 :
1689 0 : if (status & INTR_TX_PKT)
1690 0 : alc_txeof(sc);
1691 :
1692 0 : alc_start(ifp);
1693 0 : }
1694 :
1695 0 : claimed = 1;
1696 : back:
1697 : /* Re-enable interrupts. */
1698 0 : CSR_WRITE_4(sc, ALC_INTR_STATUS, 0x7FFFFFFF);
1699 0 : return (claimed);
1700 0 : }
1701 :
1702 : void
1703 0 : alc_txeof(struct alc_softc *sc)
1704 : {
1705 0 : struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1706 : struct alc_txdesc *txd;
1707 : uint32_t cons, prod;
1708 : int prog;
1709 :
1710 0 : if (sc->alc_cdata.alc_tx_cnt == 0)
1711 0 : return;
1712 0 : bus_dmamap_sync(sc->sc_dmat, sc->alc_cdata.alc_tx_ring_map, 0,
1713 : sc->alc_cdata.alc_tx_ring_map->dm_mapsize,
1714 : BUS_DMASYNC_POSTWRITE);
1715 0 : if ((sc->alc_flags & ALC_FLAG_CMB_BUG) == 0) {
1716 0 : bus_dmamap_sync(sc->sc_dmat, sc->alc_cdata.alc_cmb_map, 0,
1717 : sc->alc_cdata.alc_cmb_map->dm_mapsize,
1718 : BUS_DMASYNC_POSTREAD);
1719 0 : prod = sc->alc_rdata.alc_cmb->cons;
1720 0 : } else
1721 0 : prod = CSR_READ_4(sc, ALC_MBOX_TD_CONS_IDX);
1722 : /* Assume we're using normal Tx priority queue. */
1723 0 : prod = (prod & MBOX_TD_CONS_LO_IDX_MASK) >>
1724 : MBOX_TD_CONS_LO_IDX_SHIFT;
1725 0 : cons = sc->alc_cdata.alc_tx_cons;
1726 : /*
1727 : * Go through our Tx list and free mbufs for those
1728 : * frames which have been transmitted.
1729 : */
1730 0 : for (prog = 0; cons != prod; prog++,
1731 0 : ALC_DESC_INC(cons, ALC_TX_RING_CNT)) {
1732 0 : if (sc->alc_cdata.alc_tx_cnt <= 0)
1733 : break;
1734 0 : prog++;
1735 0 : ifq_clr_oactive(&ifp->if_snd);
1736 0 : sc->alc_cdata.alc_tx_cnt--;
1737 0 : txd = &sc->alc_cdata.alc_txdesc[cons];
1738 0 : if (txd->tx_m != NULL) {
1739 : /* Reclaim transmitted mbufs. */
1740 0 : bus_dmamap_sync(sc->sc_dmat, txd->tx_dmamap, 0,
1741 : txd->tx_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1742 0 : bus_dmamap_unload(sc->sc_dmat, txd->tx_dmamap);
1743 0 : m_freem(txd->tx_m);
1744 0 : txd->tx_m = NULL;
1745 0 : }
1746 : }
1747 :
1748 0 : if ((sc->alc_flags & ALC_FLAG_CMB_BUG) == 0)
1749 0 : bus_dmamap_sync(sc->sc_dmat, sc->alc_cdata.alc_cmb_map, 0,
1750 : sc->alc_cdata.alc_cmb_map->dm_mapsize, BUS_DMASYNC_PREREAD);
1751 0 : sc->alc_cdata.alc_tx_cons = cons;
1752 : /*
1753 : * Unarm watchdog timer only when there is no pending
1754 : * frames in Tx queue.
1755 : */
1756 0 : if (sc->alc_cdata.alc_tx_cnt == 0)
1757 0 : ifp->if_timer = 0;
1758 0 : }
1759 :
1760 : int
1761 0 : alc_newbuf(struct alc_softc *sc, struct alc_rxdesc *rxd)
1762 : {
1763 : struct mbuf *m;
1764 : bus_dmamap_t map;
1765 : int error;
1766 :
1767 0 : MGETHDR(m, M_DONTWAIT, MT_DATA);
1768 0 : if (m == NULL)
1769 0 : return (ENOBUFS);
1770 0 : MCLGET(m, M_DONTWAIT);
1771 0 : if (!(m->m_flags & M_EXT)) {
1772 0 : m_freem(m);
1773 0 : return (ENOBUFS);
1774 : }
1775 :
1776 0 : m->m_len = m->m_pkthdr.len = RX_BUF_SIZE_MAX;
1777 :
1778 0 : error = bus_dmamap_load_mbuf(sc->sc_dmat,
1779 : sc->alc_cdata.alc_rx_sparemap, m, BUS_DMA_NOWAIT);
1780 :
1781 0 : if (error != 0) {
1782 0 : m_freem(m);
1783 0 : printf("%s: can't load RX mbuf\n", sc->sc_dev.dv_xname);
1784 0 : return (error);
1785 : }
1786 :
1787 0 : if (rxd->rx_m != NULL) {
1788 0 : bus_dmamap_sync(sc->sc_dmat, rxd->rx_dmamap, 0,
1789 : rxd->rx_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
1790 0 : bus_dmamap_unload(sc->sc_dmat, rxd->rx_dmamap);
1791 0 : }
1792 0 : map = rxd->rx_dmamap;
1793 0 : rxd->rx_dmamap = sc->alc_cdata.alc_rx_sparemap;
1794 0 : sc->alc_cdata.alc_rx_sparemap = map;
1795 0 : bus_dmamap_sync(sc->sc_dmat, rxd->rx_dmamap, 0, rxd->rx_dmamap->dm_mapsize,
1796 : BUS_DMASYNC_PREREAD);
1797 0 : rxd->rx_m = m;
1798 0 : rxd->rx_desc->addr = htole64(rxd->rx_dmamap->dm_segs[0].ds_addr);
1799 0 : return (0);
1800 0 : }
1801 :
1802 : void
1803 0 : alc_rxintr(struct alc_softc *sc)
1804 : {
1805 0 : struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1806 : struct rx_rdesc *rrd;
1807 : uint32_t nsegs, status;
1808 : int rr_cons, prog;
1809 :
1810 0 : bus_dmamap_sync(sc->sc_dmat, sc->alc_cdata.alc_rr_ring_map, 0,
1811 : sc->alc_cdata.alc_rr_ring_map->dm_mapsize,
1812 : BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1813 0 : bus_dmamap_sync(sc->sc_dmat, sc->alc_cdata.alc_rx_ring_map, 0,
1814 : sc->alc_cdata.alc_rx_ring_map->dm_mapsize,
1815 : BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1816 0 : rr_cons = sc->alc_cdata.alc_rr_cons;
1817 0 : for (prog = 0; (ifp->if_flags & IFF_RUNNING) != 0;) {
1818 0 : rrd = &sc->alc_rdata.alc_rr_ring[rr_cons];
1819 0 : status = letoh32(rrd->status);
1820 0 : if ((status & RRD_VALID) == 0)
1821 : break;
1822 0 : nsegs = RRD_RD_CNT(letoh32(rrd->rdinfo));
1823 0 : if (nsegs == 0) {
1824 : /* This should not happen! */
1825 0 : if (alcdebug)
1826 0 : printf("%s: unexpected segment count -- "
1827 0 : "resetting\n", sc->sc_dev.dv_xname);
1828 : break;
1829 : }
1830 0 : alc_rxeof(sc, rrd);
1831 : /* Clear Rx return status. */
1832 0 : rrd->status = 0;
1833 0 : ALC_DESC_INC(rr_cons, ALC_RR_RING_CNT);
1834 0 : sc->alc_cdata.alc_rx_cons += nsegs;
1835 0 : sc->alc_cdata.alc_rx_cons %= ALC_RR_RING_CNT;
1836 0 : prog += nsegs;
1837 : }
1838 :
1839 0 : if (prog > 0) {
1840 : /* Update the consumer index. */
1841 0 : sc->alc_cdata.alc_rr_cons = rr_cons;
1842 : /* Sync Rx return descriptors. */
1843 0 : bus_dmamap_sync(sc->sc_dmat, sc->alc_cdata.alc_rr_ring_map, 0,
1844 : sc->alc_cdata.alc_rr_ring_map->dm_mapsize,
1845 : BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1846 : /*
1847 : * Sync updated Rx descriptors such that controller see
1848 : * modified buffer addresses.
1849 : */
1850 0 : bus_dmamap_sync(sc->sc_dmat, sc->alc_cdata.alc_rx_ring_map, 0,
1851 : sc->alc_cdata.alc_rx_ring_map->dm_mapsize,
1852 : BUS_DMASYNC_PREWRITE);
1853 : /*
1854 : * Let controller know availability of new Rx buffers.
1855 : * Since alc(4) use RXQ_CFG_RD_BURST_DEFAULT descriptors
1856 : * it may be possible to update ALC_MBOX_RD0_PROD_IDX
1857 : * only when Rx buffer pre-fetching is required. In
1858 : * addition we already set ALC_RX_RD_FREE_THRESH to
1859 : * RX_RD_FREE_THRESH_LO_DEFAULT descriptors. However
1860 : * it still seems that pre-fetching needs more
1861 : * experimentation.
1862 : */
1863 0 : CSR_WRITE_4(sc, ALC_MBOX_RD0_PROD_IDX,
1864 : sc->alc_cdata.alc_rx_cons);
1865 0 : }
1866 0 : }
1867 :
1868 : /* Receive a frame. */
1869 : void
1870 0 : alc_rxeof(struct alc_softc *sc, struct rx_rdesc *rrd)
1871 : {
1872 0 : struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1873 : struct alc_rxdesc *rxd;
1874 0 : struct mbuf_list ml = MBUF_LIST_INITIALIZER();
1875 : struct mbuf *mp, *m;
1876 : uint32_t rdinfo, status;
1877 : int count, nsegs, rx_cons;
1878 :
1879 0 : status = letoh32(rrd->status);
1880 0 : rdinfo = letoh32(rrd->rdinfo);
1881 0 : rx_cons = RRD_RD_IDX(rdinfo);
1882 0 : nsegs = RRD_RD_CNT(rdinfo);
1883 :
1884 0 : sc->alc_cdata.alc_rxlen = RRD_BYTES(status);
1885 0 : if (status & (RRD_ERR_SUM | RRD_ERR_LENGTH)) {
1886 : /*
1887 : * We want to pass the following frames to upper
1888 : * layer regardless of error status of Rx return
1889 : * ring.
1890 : *
1891 : * o IP/TCP/UDP checksum is bad.
1892 : * o frame length and protocol specific length
1893 : * does not match.
1894 : *
1895 : * Force network stack compute checksum for
1896 : * errored frames.
1897 : */
1898 0 : if ((status & (RRD_ERR_CRC | RRD_ERR_ALIGN |
1899 0 : RRD_ERR_TRUNC | RRD_ERR_RUNT)) != 0)
1900 0 : return;
1901 : }
1902 :
1903 0 : for (count = 0; count < nsegs; count++,
1904 0 : ALC_DESC_INC(rx_cons, ALC_RX_RING_CNT)) {
1905 0 : rxd = &sc->alc_cdata.alc_rxdesc[rx_cons];
1906 0 : mp = rxd->rx_m;
1907 : /* Add a new receive buffer to the ring. */
1908 0 : if (alc_newbuf(sc, rxd) != 0) {
1909 0 : ifp->if_iqdrops++;
1910 : /* Reuse Rx buffers. */
1911 0 : m_freem(sc->alc_cdata.alc_rxhead);
1912 0 : break;
1913 : }
1914 :
1915 : /*
1916 : * Assume we've received a full sized frame.
1917 : * Actual size is fixed when we encounter the end of
1918 : * multi-segmented frame.
1919 : */
1920 0 : mp->m_len = sc->alc_buf_size;
1921 :
1922 : /* Chain received mbufs. */
1923 0 : if (sc->alc_cdata.alc_rxhead == NULL) {
1924 0 : sc->alc_cdata.alc_rxhead = mp;
1925 0 : sc->alc_cdata.alc_rxtail = mp;
1926 0 : } else {
1927 0 : mp->m_flags &= ~M_PKTHDR;
1928 0 : sc->alc_cdata.alc_rxprev_tail =
1929 0 : sc->alc_cdata.alc_rxtail;
1930 0 : sc->alc_cdata.alc_rxtail->m_next = mp;
1931 0 : sc->alc_cdata.alc_rxtail = mp;
1932 : }
1933 :
1934 0 : if (count == nsegs - 1) {
1935 : /* Last desc. for this frame. */
1936 0 : m = sc->alc_cdata.alc_rxhead;
1937 0 : m->m_flags |= M_PKTHDR;
1938 : /*
1939 : * It seems that L1C/L2C controller has no way
1940 : * to tell hardware to strip CRC bytes.
1941 : */
1942 0 : m->m_pkthdr.len =
1943 0 : sc->alc_cdata.alc_rxlen - ETHER_CRC_LEN;
1944 0 : if (nsegs > 1) {
1945 : /* Set last mbuf size. */
1946 0 : mp->m_len = sc->alc_cdata.alc_rxlen -
1947 0 : (nsegs - 1) * sc->alc_buf_size;
1948 : /* Remove the CRC bytes in chained mbufs. */
1949 0 : if (mp->m_len <= ETHER_CRC_LEN) {
1950 0 : sc->alc_cdata.alc_rxtail =
1951 0 : sc->alc_cdata.alc_rxprev_tail;
1952 0 : sc->alc_cdata.alc_rxtail->m_len -=
1953 0 : (ETHER_CRC_LEN - mp->m_len);
1954 0 : sc->alc_cdata.alc_rxtail->m_next = NULL;
1955 0 : m_freem(mp);
1956 0 : } else {
1957 0 : mp->m_len -= ETHER_CRC_LEN;
1958 : }
1959 : } else
1960 0 : m->m_len = m->m_pkthdr.len;
1961 : /*
1962 : * Due to hardware bugs, Rx checksum offloading
1963 : * was intentionally disabled.
1964 : */
1965 : #if NVLAN > 0
1966 0 : if (status & RRD_VLAN_TAG) {
1967 0 : u_int32_t vtag = RRD_VLAN(letoh32(rrd->vtag));
1968 0 : m->m_pkthdr.ether_vtag = ntohs(vtag);
1969 0 : m->m_flags |= M_VLANTAG;
1970 0 : }
1971 : #endif
1972 :
1973 :
1974 0 : ml_enqueue(&ml, m);
1975 0 : }
1976 : }
1977 0 : if_input(ifp, &ml);
1978 :
1979 : /* Reset mbuf chains. */
1980 0 : ALC_RXCHAIN_RESET(sc);
1981 0 : }
1982 :
1983 : void
1984 0 : alc_tick(void *xsc)
1985 : {
1986 0 : struct alc_softc *sc = xsc;
1987 0 : struct mii_data *mii = &sc->sc_miibus;
1988 : int s;
1989 :
1990 0 : s = splnet();
1991 0 : mii_tick(mii);
1992 0 : alc_stats_update(sc);
1993 :
1994 0 : timeout_add_sec(&sc->alc_tick_ch, 1);
1995 0 : splx(s);
1996 0 : }
1997 :
1998 : void
1999 0 : alc_reset(struct alc_softc *sc)
2000 : {
2001 : uint32_t reg;
2002 : int i;
2003 :
2004 0 : reg = CSR_READ_4(sc, ALC_MASTER_CFG) & 0xFFFF;
2005 0 : reg |= MASTER_OOB_DIS_OFF | MASTER_RESET;
2006 0 : CSR_WRITE_4(sc, ALC_MASTER_CFG, reg);
2007 0 : for (i = ALC_RESET_TIMEOUT; i > 0; i--) {
2008 0 : DELAY(10);
2009 0 : if ((CSR_READ_4(sc, ALC_MASTER_CFG) & MASTER_RESET) == 0)
2010 : break;
2011 : }
2012 0 : if (i == 0)
2013 0 : printf("%s: master reset timeout!\n", sc->sc_dev.dv_xname);
2014 :
2015 0 : for (i = ALC_RESET_TIMEOUT; i > 0; i--) {
2016 0 : if ((reg = CSR_READ_4(sc, ALC_IDLE_STATUS)) == 0)
2017 : break;
2018 0 : DELAY(10);
2019 : }
2020 :
2021 0 : if (i == 0)
2022 0 : printf("%s: reset timeout(0x%08x)!\n", sc->sc_dev.dv_xname,
2023 : reg);
2024 0 : }
2025 :
2026 : int
2027 0 : alc_init(struct ifnet *ifp)
2028 : {
2029 0 : struct alc_softc *sc = ifp->if_softc;
2030 : struct mii_data *mii;
2031 0 : uint8_t eaddr[ETHER_ADDR_LEN];
2032 : bus_addr_t paddr;
2033 : uint32_t reg, rxf_hi, rxf_lo;
2034 : int error;
2035 :
2036 : /*
2037 : * Cancel any pending I/O.
2038 : */
2039 0 : alc_stop(sc);
2040 : /*
2041 : * Reset the chip to a known state.
2042 : */
2043 0 : alc_reset(sc);
2044 :
2045 : /* Initialize Rx descriptors. */
2046 0 : error = alc_init_rx_ring(sc);
2047 0 : if (error != 0) {
2048 0 : printf("%s: no memory for Rx buffers.\n", sc->sc_dev.dv_xname);
2049 0 : alc_stop(sc);
2050 0 : return (error);
2051 : }
2052 0 : alc_init_rr_ring(sc);
2053 0 : alc_init_tx_ring(sc);
2054 0 : alc_init_cmb(sc);
2055 0 : alc_init_smb(sc);
2056 :
2057 : /* Enable all clocks. */
2058 0 : CSR_WRITE_4(sc, ALC_CLK_GATING_CFG, 0);
2059 :
2060 : /* Reprogram the station address. */
2061 0 : bcopy(LLADDR(ifp->if_sadl), eaddr, ETHER_ADDR_LEN);
2062 0 : CSR_WRITE_4(sc, ALC_PAR0,
2063 : eaddr[2] << 24 | eaddr[3] << 16 | eaddr[4] << 8 | eaddr[5]);
2064 0 : CSR_WRITE_4(sc, ALC_PAR1, eaddr[0] << 8 | eaddr[1]);
2065 : /*
2066 : * Clear WOL status and disable all WOL feature as WOL
2067 : * would interfere Rx operation under normal environments.
2068 : */
2069 0 : CSR_READ_4(sc, ALC_WOL_CFG);
2070 0 : CSR_WRITE_4(sc, ALC_WOL_CFG, 0);
2071 : /* Set Tx descriptor base addresses. */
2072 0 : paddr = sc->alc_rdata.alc_tx_ring_paddr;
2073 0 : CSR_WRITE_4(sc, ALC_TX_BASE_ADDR_HI, ALC_ADDR_HI(paddr));
2074 0 : CSR_WRITE_4(sc, ALC_TDL_HEAD_ADDR_LO, ALC_ADDR_LO(paddr));
2075 : /* We don't use high priority ring. */
2076 0 : CSR_WRITE_4(sc, ALC_TDH_HEAD_ADDR_LO, 0);
2077 : /* Set Tx descriptor counter. */
2078 0 : CSR_WRITE_4(sc, ALC_TD_RING_CNT,
2079 : (ALC_TX_RING_CNT << TD_RING_CNT_SHIFT) & TD_RING_CNT_MASK);
2080 : /* Set Rx descriptor base addresses. */
2081 0 : paddr = sc->alc_rdata.alc_rx_ring_paddr;
2082 0 : CSR_WRITE_4(sc, ALC_RX_BASE_ADDR_HI, ALC_ADDR_HI(paddr));
2083 0 : CSR_WRITE_4(sc, ALC_RD0_HEAD_ADDR_LO, ALC_ADDR_LO(paddr));
2084 : /* We use one Rx ring. */
2085 0 : CSR_WRITE_4(sc, ALC_RD1_HEAD_ADDR_LO, 0);
2086 0 : CSR_WRITE_4(sc, ALC_RD2_HEAD_ADDR_LO, 0);
2087 0 : CSR_WRITE_4(sc, ALC_RD3_HEAD_ADDR_LO, 0);
2088 : /* Set Rx descriptor counter. */
2089 0 : CSR_WRITE_4(sc, ALC_RD_RING_CNT,
2090 : (ALC_RX_RING_CNT << RD_RING_CNT_SHIFT) & RD_RING_CNT_MASK);
2091 :
2092 : /*
2093 : * Let hardware split jumbo frames into alc_max_buf_sized chunks.
2094 : * if it do not fit the buffer size. Rx return descriptor holds
2095 : * a counter that indicates how many fragments were made by the
2096 : * hardware. The buffer size should be multiple of 8 bytes.
2097 : * Since hardware has limit on the size of buffer size, always
2098 : * use the maximum value.
2099 : * For strict-alignment architectures make sure to reduce buffer
2100 : * size by 8 bytes to make room for alignment fixup.
2101 : */
2102 0 : sc->alc_buf_size = RX_BUF_SIZE_MAX;
2103 0 : CSR_WRITE_4(sc, ALC_RX_BUF_SIZE, sc->alc_buf_size);
2104 :
2105 0 : paddr = sc->alc_rdata.alc_rr_ring_paddr;
2106 : /* Set Rx return descriptor base addresses. */
2107 0 : CSR_WRITE_4(sc, ALC_RRD0_HEAD_ADDR_LO, ALC_ADDR_LO(paddr));
2108 : /* We use one Rx return ring. */
2109 0 : CSR_WRITE_4(sc, ALC_RRD1_HEAD_ADDR_LO, 0);
2110 0 : CSR_WRITE_4(sc, ALC_RRD2_HEAD_ADDR_LO, 0);
2111 0 : CSR_WRITE_4(sc, ALC_RRD3_HEAD_ADDR_LO, 0);
2112 : /* Set Rx return descriptor counter. */
2113 0 : CSR_WRITE_4(sc, ALC_RRD_RING_CNT,
2114 : (ALC_RR_RING_CNT << RRD_RING_CNT_SHIFT) & RRD_RING_CNT_MASK);
2115 0 : paddr = sc->alc_rdata.alc_cmb_paddr;
2116 0 : CSR_WRITE_4(sc, ALC_CMB_BASE_ADDR_LO, ALC_ADDR_LO(paddr));
2117 0 : paddr = sc->alc_rdata.alc_smb_paddr;
2118 0 : CSR_WRITE_4(sc, ALC_SMB_BASE_ADDR_HI, ALC_ADDR_HI(paddr));
2119 0 : CSR_WRITE_4(sc, ALC_SMB_BASE_ADDR_LO, ALC_ADDR_LO(paddr));
2120 :
2121 0 : if (sc->sc_product == PCI_PRODUCT_ATTANSIC_L2C_1) {
2122 : /* Reconfigure SRAM - Vendor magic. */
2123 0 : CSR_WRITE_4(sc, ALC_SRAM_RX_FIFO_LEN, 0x000002A0);
2124 0 : CSR_WRITE_4(sc, ALC_SRAM_TX_FIFO_LEN, 0x00000100);
2125 0 : CSR_WRITE_4(sc, ALC_SRAM_RX_FIFO_ADDR, 0x029F0000);
2126 0 : CSR_WRITE_4(sc, ALC_SRAM_RD0_ADDR, 0x02BF02A0);
2127 0 : CSR_WRITE_4(sc, ALC_SRAM_TX_FIFO_ADDR, 0x03BF02C0);
2128 0 : CSR_WRITE_4(sc, ALC_SRAM_TD_ADDR, 0x03DF03C0);
2129 0 : CSR_WRITE_4(sc, ALC_TXF_WATER_MARK, 0x00000000);
2130 0 : CSR_WRITE_4(sc, ALC_RD_DMA_CFG, 0x00000000);
2131 0 : }
2132 :
2133 : /* Tell hardware that we're ready to load DMA blocks. */
2134 0 : CSR_WRITE_4(sc, ALC_DMA_BLOCK, DMA_BLOCK_LOAD);
2135 :
2136 : /* Configure interrupt moderation timer. */
2137 0 : sc->alc_int_rx_mod = ALC_IM_RX_TIMER_DEFAULT;
2138 0 : sc->alc_int_tx_mod = ALC_IM_TX_TIMER_DEFAULT;
2139 0 : reg = ALC_USECS(sc->alc_int_rx_mod) << IM_TIMER_RX_SHIFT;
2140 0 : reg |= ALC_USECS(sc->alc_int_tx_mod) << IM_TIMER_TX_SHIFT;
2141 0 : CSR_WRITE_4(sc, ALC_IM_TIMER, reg);
2142 : /*
2143 : * We don't want to automatic interrupt clear as task queue
2144 : * for the interrupt should know interrupt status.
2145 : */
2146 : reg = MASTER_SA_TIMER_ENB;
2147 0 : if (ALC_USECS(sc->alc_int_rx_mod) != 0)
2148 0 : reg |= MASTER_IM_RX_TIMER_ENB;
2149 0 : if (ALC_USECS(sc->alc_int_tx_mod) != 0)
2150 0 : reg |= MASTER_IM_TX_TIMER_ENB;
2151 0 : CSR_WRITE_4(sc, ALC_MASTER_CFG, reg);
2152 : /*
2153 : * Disable interrupt re-trigger timer. We don't want automatic
2154 : * re-triggering of un-ACKed interrupts.
2155 : */
2156 0 : CSR_WRITE_4(sc, ALC_INTR_RETRIG_TIMER, ALC_USECS(0));
2157 : /* Configure CMB. */
2158 0 : if ((sc->alc_flags & ALC_FLAG_CMB_BUG) == 0) {
2159 0 : CSR_WRITE_4(sc, ALC_CMB_TD_THRESH, 4);
2160 0 : CSR_WRITE_4(sc, ALC_CMB_TX_TIMER, ALC_USECS(5000));
2161 0 : } else
2162 0 : CSR_WRITE_4(sc, ALC_CMB_TX_TIMER, ALC_USECS(0));
2163 : /*
2164 : * Hardware can be configured to issue SMB interrupt based
2165 : * on programmed interval. Since there is a callout that is
2166 : * invoked for every hz in driver we use that instead of
2167 : * relying on periodic SMB interrupt.
2168 : */
2169 0 : CSR_WRITE_4(sc, ALC_SMB_STAT_TIMER, ALC_USECS(0));
2170 : /* Clear MAC statistics. */
2171 0 : alc_stats_clear(sc);
2172 :
2173 : /*
2174 : * Always use maximum frame size that controller can support.
2175 : * Otherwise received frames that has larger frame length
2176 : * than alc(4) MTU would be silently dropped in hardware. This
2177 : * would make path-MTU discovery hard as sender wouldn't get
2178 : * any responses from receiver. alc(4) supports
2179 : * multi-fragmented frames on Rx path so it has no issue on
2180 : * assembling fragmented frames. Using maximum frame size also
2181 : * removes the need to reinitialize hardware when interface
2182 : * MTU configuration was changed.
2183 : *
2184 : * Be conservative in what you do, be liberal in what you
2185 : * accept from others - RFC 793.
2186 : */
2187 0 : CSR_WRITE_4(sc, ALC_FRAME_SIZE, sc->alc_max_framelen);
2188 :
2189 : /* Disable header split(?) */
2190 0 : CSR_WRITE_4(sc, ALC_HDS_CFG, 0);
2191 :
2192 : /* Configure IPG/IFG parameters. */
2193 0 : CSR_WRITE_4(sc, ALC_IPG_IFG_CFG,
2194 : ((IPG_IFG_IPGT_DEFAULT << IPG_IFG_IPGT_SHIFT) & IPG_IFG_IPGT_MASK) |
2195 : ((IPG_IFG_MIFG_DEFAULT << IPG_IFG_MIFG_SHIFT) & IPG_IFG_MIFG_MASK) |
2196 : ((IPG_IFG_IPG1_DEFAULT << IPG_IFG_IPG1_SHIFT) & IPG_IFG_IPG1_MASK) |
2197 : ((IPG_IFG_IPG2_DEFAULT << IPG_IFG_IPG2_SHIFT) & IPG_IFG_IPG2_MASK));
2198 : /* Set parameters for half-duplex media. */
2199 0 : CSR_WRITE_4(sc, ALC_HDPX_CFG,
2200 : ((HDPX_CFG_LCOL_DEFAULT << HDPX_CFG_LCOL_SHIFT) &
2201 : HDPX_CFG_LCOL_MASK) |
2202 : ((HDPX_CFG_RETRY_DEFAULT << HDPX_CFG_RETRY_SHIFT) &
2203 : HDPX_CFG_RETRY_MASK) | HDPX_CFG_EXC_DEF_EN |
2204 : ((HDPX_CFG_ABEBT_DEFAULT << HDPX_CFG_ABEBT_SHIFT) &
2205 : HDPX_CFG_ABEBT_MASK) |
2206 : ((HDPX_CFG_JAMIPG_DEFAULT << HDPX_CFG_JAMIPG_SHIFT) &
2207 : HDPX_CFG_JAMIPG_MASK));
2208 : /*
2209 : * Set TSO/checksum offload threshold. For frames that is
2210 : * larger than this threshold, hardware wouldn't do
2211 : * TSO/checksum offloading.
2212 : */
2213 0 : CSR_WRITE_4(sc, ALC_TSO_OFFLOAD_THRESH,
2214 : (sc->alc_max_framelen >> TSO_OFFLOAD_THRESH_UNIT_SHIFT) &
2215 : TSO_OFFLOAD_THRESH_MASK);
2216 : /* Configure TxQ. */
2217 0 : reg = (alc_dma_burst[sc->alc_dma_rd_burst] <<
2218 : TXQ_CFG_TX_FIFO_BURST_SHIFT) & TXQ_CFG_TX_FIFO_BURST_MASK;
2219 0 : if (sc->sc_product == PCI_PRODUCT_ATTANSIC_L2C_1 ||
2220 0 : sc->sc_product == PCI_PRODUCT_ATTANSIC_L2C_2)
2221 0 : reg >>= 1;
2222 0 : reg |= (TXQ_CFG_TD_BURST_DEFAULT << TXQ_CFG_TD_BURST_SHIFT) &
2223 : TXQ_CFG_TD_BURST_MASK;
2224 0 : CSR_WRITE_4(sc, ALC_TXQ_CFG, reg | TXQ_CFG_ENHANCED_MODE);
2225 :
2226 : /* Configure Rx free descriptor pre-fetching. */
2227 0 : CSR_WRITE_4(sc, ALC_RX_RD_FREE_THRESH,
2228 : ((RX_RD_FREE_THRESH_HI_DEFAULT << RX_RD_FREE_THRESH_HI_SHIFT) &
2229 : RX_RD_FREE_THRESH_HI_MASK) |
2230 : ((RX_RD_FREE_THRESH_LO_DEFAULT << RX_RD_FREE_THRESH_LO_SHIFT) &
2231 : RX_RD_FREE_THRESH_LO_MASK));
2232 :
2233 : /*
2234 : * Configure flow control parameters.
2235 : * XON : 80% of Rx FIFO
2236 : * XOFF : 30% of Rx FIFO
2237 : */
2238 0 : if (sc->sc_product == PCI_PRODUCT_ATTANSIC_L1C ||
2239 0 : sc->sc_product == PCI_PRODUCT_ATTANSIC_L2C) {
2240 0 : reg = CSR_READ_4(sc, ALC_SRAM_RX_FIFO_LEN);
2241 0 : rxf_hi = (reg * 8) / 10;
2242 0 : rxf_lo = (reg * 3) / 10;
2243 0 : CSR_WRITE_4(sc, ALC_RX_FIFO_PAUSE_THRESH,
2244 : ((rxf_lo << RX_FIFO_PAUSE_THRESH_LO_SHIFT) &
2245 : RX_FIFO_PAUSE_THRESH_LO_MASK) |
2246 : ((rxf_hi << RX_FIFO_PAUSE_THRESH_HI_SHIFT) &
2247 : RX_FIFO_PAUSE_THRESH_HI_MASK));
2248 0 : }
2249 0 : if (sc->sc_product == PCI_PRODUCT_ATTANSIC_L1D_1 ||
2250 0 : sc->sc_product == PCI_PRODUCT_ATTANSIC_L2C_1)
2251 0 : CSR_WRITE_4(sc, ALC_SERDES_LOCK,
2252 : CSR_READ_4(sc, ALC_SERDES_LOCK) | SERDES_MAC_CLK_SLOWDOWN |
2253 : SERDES_PHY_CLK_SLOWDOWN);
2254 :
2255 : /* Disable RSS until I understand L1C/L2C's RSS logic. */
2256 0 : CSR_WRITE_4(sc, ALC_RSS_IDT_TABLE0, 0);
2257 0 : CSR_WRITE_4(sc, ALC_RSS_CPU, 0);
2258 :
2259 : /* Configure RxQ. */
2260 : reg = (RXQ_CFG_RD_BURST_DEFAULT << RXQ_CFG_RD_BURST_SHIFT) &
2261 : RXQ_CFG_RD_BURST_MASK;
2262 : reg |= RXQ_CFG_RSS_MODE_DIS;
2263 0 : if ((sc->alc_flags & ALC_FLAG_ASPM_MON) != 0)
2264 0 : reg |= RXQ_CFG_ASPM_THROUGHPUT_LIMIT_1M;
2265 0 : CSR_WRITE_4(sc, ALC_RXQ_CFG, reg);
2266 :
2267 : /* Configure DMA parameters. */
2268 : reg = DMA_CFG_OUT_ORDER | DMA_CFG_RD_REQ_PRI;
2269 0 : reg |= sc->alc_rcb;
2270 0 : if ((sc->alc_flags & ALC_FLAG_CMB_BUG) == 0)
2271 0 : reg |= DMA_CFG_CMB_ENB;
2272 0 : if ((sc->alc_flags & ALC_FLAG_SMB_BUG) == 0)
2273 0 : reg |= DMA_CFG_SMB_ENB;
2274 : else
2275 0 : reg |= DMA_CFG_SMB_DIS;
2276 0 : reg |= (sc->alc_dma_rd_burst & DMA_CFG_RD_BURST_MASK) <<
2277 : DMA_CFG_RD_BURST_SHIFT;
2278 0 : reg |= (sc->alc_dma_wr_burst & DMA_CFG_WR_BURST_MASK) <<
2279 : DMA_CFG_WR_BURST_SHIFT;
2280 0 : reg |= (DMA_CFG_RD_DELAY_CNT_DEFAULT << DMA_CFG_RD_DELAY_CNT_SHIFT) &
2281 : DMA_CFG_RD_DELAY_CNT_MASK;
2282 0 : reg |= (DMA_CFG_WR_DELAY_CNT_DEFAULT << DMA_CFG_WR_DELAY_CNT_SHIFT) &
2283 : DMA_CFG_WR_DELAY_CNT_MASK;
2284 0 : CSR_WRITE_4(sc, ALC_DMA_CFG, reg);
2285 :
2286 : /*
2287 : * Configure Tx/Rx MACs.
2288 : * - Auto-padding for short frames.
2289 : * - Enable CRC generation.
2290 : * Actual reconfiguration of MAC for resolved speed/duplex
2291 : * is followed after detection of link establishment.
2292 : * AR813x/AR815x always does checksum computation regardless
2293 : * of MAC_CFG_RXCSUM_ENB bit. Also the controller is known to
2294 : * have bug in protocol field in Rx return structure so
2295 : * these controllers can't handle fragmented frames. Disable
2296 : * Rx checksum offloading until there is a newer controller
2297 : * that has sane implementation.
2298 : */
2299 : reg = MAC_CFG_TX_CRC_ENB | MAC_CFG_TX_AUTO_PAD | MAC_CFG_FULL_DUPLEX |
2300 : ((MAC_CFG_PREAMBLE_DEFAULT << MAC_CFG_PREAMBLE_SHIFT) &
2301 : MAC_CFG_PREAMBLE_MASK);
2302 0 : if (sc->sc_product == PCI_PRODUCT_ATTANSIC_L1D ||
2303 0 : sc->sc_product == PCI_PRODUCT_ATTANSIC_L1D_1 ||
2304 0 : sc->sc_product == PCI_PRODUCT_ATTANSIC_L2C_2)
2305 0 : reg |= MAC_CFG_HASH_ALG_CRC32 | MAC_CFG_SPEED_MODE_SW;
2306 0 : if ((sc->alc_flags & ALC_FLAG_FASTETHER) != 0)
2307 0 : reg |= MAC_CFG_SPEED_10_100;
2308 : else
2309 0 : reg |= MAC_CFG_SPEED_1000;
2310 0 : CSR_WRITE_4(sc, ALC_MAC_CFG, reg);
2311 :
2312 : /* Set up the receive filter. */
2313 0 : alc_iff(sc);
2314 :
2315 0 : alc_rxvlan(sc);
2316 :
2317 : /* Acknowledge all pending interrupts and clear it. */
2318 0 : CSR_WRITE_4(sc, ALC_INTR_MASK, ALC_INTRS);
2319 0 : CSR_WRITE_4(sc, ALC_INTR_STATUS, 0xFFFFFFFF);
2320 0 : CSR_WRITE_4(sc, ALC_INTR_STATUS, 0);
2321 :
2322 0 : sc->alc_flags &= ~ALC_FLAG_LINK;
2323 : /* Switch to the current media. */
2324 0 : mii = &sc->sc_miibus;
2325 0 : mii_mediachg(mii);
2326 :
2327 0 : timeout_add_sec(&sc->alc_tick_ch, 1);
2328 :
2329 0 : ifp->if_flags |= IFF_RUNNING;
2330 0 : ifq_clr_oactive(&ifp->if_snd);
2331 :
2332 0 : return (0);
2333 0 : }
2334 :
2335 : void
2336 0 : alc_stop(struct alc_softc *sc)
2337 : {
2338 0 : struct ifnet *ifp = &sc->sc_arpcom.ac_if;
2339 : struct alc_txdesc *txd;
2340 : struct alc_rxdesc *rxd;
2341 : uint32_t reg;
2342 : int i;
2343 :
2344 : /*
2345 : * Mark the interface down and cancel the watchdog timer.
2346 : */
2347 0 : ifp->if_flags &= ~IFF_RUNNING;
2348 0 : ifq_clr_oactive(&ifp->if_snd);
2349 0 : ifp->if_timer = 0;
2350 :
2351 0 : timeout_del(&sc->alc_tick_ch);
2352 0 : sc->alc_flags &= ~ALC_FLAG_LINK;
2353 :
2354 0 : alc_stats_update(sc);
2355 :
2356 : /* Disable interrupts. */
2357 0 : CSR_WRITE_4(sc, ALC_INTR_MASK, 0);
2358 0 : CSR_WRITE_4(sc, ALC_INTR_STATUS, 0xFFFFFFFF);
2359 0 : alc_stop_queue(sc);
2360 :
2361 : /* Disable DMA. */
2362 0 : reg = CSR_READ_4(sc, ALC_DMA_CFG);
2363 0 : reg &= ~(DMA_CFG_CMB_ENB | DMA_CFG_SMB_ENB);
2364 0 : reg |= DMA_CFG_SMB_DIS;
2365 0 : CSR_WRITE_4(sc, ALC_DMA_CFG, reg);
2366 0 : DELAY(1000);
2367 :
2368 : /* Stop Rx/Tx MACs. */
2369 0 : alc_stop_mac(sc);
2370 :
2371 : /* Disable interrupts which might be touched in taskq handler. */
2372 0 : CSR_WRITE_4(sc, ALC_INTR_STATUS, 0xFFFFFFFF);
2373 :
2374 : /* Reclaim Rx buffers that have been processed. */
2375 0 : m_freem(sc->alc_cdata.alc_rxhead);
2376 0 : ALC_RXCHAIN_RESET(sc);
2377 : /*
2378 : * Free Tx/Rx mbufs still in the queues.
2379 : */
2380 0 : for (i = 0; i < ALC_RX_RING_CNT; i++) {
2381 0 : rxd = &sc->alc_cdata.alc_rxdesc[i];
2382 0 : if (rxd->rx_m != NULL) {
2383 0 : bus_dmamap_sync(sc->sc_dmat, rxd->rx_dmamap, 0,
2384 : rxd->rx_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
2385 0 : bus_dmamap_unload(sc->sc_dmat, rxd->rx_dmamap);
2386 0 : m_freem(rxd->rx_m);
2387 0 : rxd->rx_m = NULL;
2388 0 : }
2389 : }
2390 0 : for (i = 0; i < ALC_TX_RING_CNT; i++) {
2391 0 : txd = &sc->alc_cdata.alc_txdesc[i];
2392 0 : if (txd->tx_m != NULL) {
2393 0 : bus_dmamap_sync(sc->sc_dmat, txd->tx_dmamap, 0,
2394 : txd->tx_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
2395 0 : bus_dmamap_unload(sc->sc_dmat, txd->tx_dmamap);
2396 0 : m_freem(txd->tx_m);
2397 0 : txd->tx_m = NULL;
2398 0 : }
2399 : }
2400 0 : }
2401 :
2402 : void
2403 0 : alc_stop_mac(struct alc_softc *sc)
2404 : {
2405 : uint32_t reg;
2406 : int i;
2407 :
2408 : /* Disable Rx/Tx MAC. */
2409 0 : reg = CSR_READ_4(sc, ALC_MAC_CFG);
2410 0 : if ((reg & (MAC_CFG_TX_ENB | MAC_CFG_RX_ENB)) != 0) {
2411 0 : reg &= ~(MAC_CFG_TX_ENB | MAC_CFG_RX_ENB);
2412 0 : CSR_WRITE_4(sc, ALC_MAC_CFG, reg);
2413 0 : }
2414 0 : for (i = ALC_TIMEOUT; i > 0; i--) {
2415 0 : reg = CSR_READ_4(sc, ALC_IDLE_STATUS);
2416 0 : if (reg == 0)
2417 : break;
2418 0 : DELAY(10);
2419 : }
2420 0 : if (i == 0)
2421 0 : printf("%s: could not disable Rx/Tx MAC(0x%08x)!\n",
2422 0 : sc->sc_dev.dv_xname, reg);
2423 0 : }
2424 :
2425 : void
2426 0 : alc_start_queue(struct alc_softc *sc)
2427 : {
2428 0 : uint32_t qcfg[] = {
2429 : 0,
2430 : RXQ_CFG_QUEUE0_ENB,
2431 : RXQ_CFG_QUEUE0_ENB | RXQ_CFG_QUEUE1_ENB,
2432 : RXQ_CFG_QUEUE0_ENB | RXQ_CFG_QUEUE1_ENB | RXQ_CFG_QUEUE2_ENB,
2433 : RXQ_CFG_ENB
2434 : };
2435 : uint32_t cfg;
2436 :
2437 : /* Enable RxQ. */
2438 0 : cfg = CSR_READ_4(sc, ALC_RXQ_CFG);
2439 0 : cfg &= ~RXQ_CFG_ENB;
2440 0 : cfg |= qcfg[1];
2441 0 : CSR_WRITE_4(sc, ALC_RXQ_CFG, cfg);
2442 : /* Enable TxQ. */
2443 0 : cfg = CSR_READ_4(sc, ALC_TXQ_CFG);
2444 0 : cfg |= TXQ_CFG_ENB;
2445 0 : CSR_WRITE_4(sc, ALC_TXQ_CFG, cfg);
2446 0 : }
2447 :
2448 : void
2449 0 : alc_stop_queue(struct alc_softc *sc)
2450 : {
2451 : uint32_t reg;
2452 : int i;
2453 :
2454 : /* Disable RxQ. */
2455 0 : reg = CSR_READ_4(sc, ALC_RXQ_CFG);
2456 0 : if ((reg & RXQ_CFG_ENB) != 0) {
2457 0 : reg &= ~RXQ_CFG_ENB;
2458 0 : CSR_WRITE_4(sc, ALC_RXQ_CFG, reg);
2459 0 : }
2460 : /* Disable TxQ. */
2461 0 : reg = CSR_READ_4(sc, ALC_TXQ_CFG);
2462 0 : if ((reg & TXQ_CFG_ENB) != 0) {
2463 0 : reg &= ~TXQ_CFG_ENB;
2464 0 : CSR_WRITE_4(sc, ALC_TXQ_CFG, reg);
2465 0 : }
2466 0 : for (i = ALC_TIMEOUT; i > 0; i--) {
2467 0 : reg = CSR_READ_4(sc, ALC_IDLE_STATUS);
2468 0 : if ((reg & (IDLE_STATUS_RXQ | IDLE_STATUS_TXQ)) == 0)
2469 : break;
2470 0 : DELAY(10);
2471 : }
2472 0 : if (i == 0)
2473 0 : printf("%s: could not disable RxQ/TxQ (0x%08x)!\n",
2474 0 : sc->sc_dev.dv_xname, reg);
2475 0 : }
2476 :
2477 : void
2478 0 : alc_init_tx_ring(struct alc_softc *sc)
2479 : {
2480 : struct alc_ring_data *rd;
2481 : struct alc_txdesc *txd;
2482 : int i;
2483 :
2484 0 : sc->alc_cdata.alc_tx_prod = 0;
2485 0 : sc->alc_cdata.alc_tx_cons = 0;
2486 0 : sc->alc_cdata.alc_tx_cnt = 0;
2487 :
2488 0 : rd = &sc->alc_rdata;
2489 0 : bzero(rd->alc_tx_ring, ALC_TX_RING_SZ);
2490 0 : for (i = 0; i < ALC_TX_RING_CNT; i++) {
2491 0 : txd = &sc->alc_cdata.alc_txdesc[i];
2492 0 : txd->tx_m = NULL;
2493 : }
2494 :
2495 0 : bus_dmamap_sync(sc->sc_dmat, sc->alc_cdata.alc_tx_ring_map, 0,
2496 : sc->alc_cdata.alc_tx_ring_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
2497 0 : }
2498 :
2499 : int
2500 0 : alc_init_rx_ring(struct alc_softc *sc)
2501 : {
2502 : struct alc_ring_data *rd;
2503 : struct alc_rxdesc *rxd;
2504 : int i;
2505 :
2506 0 : sc->alc_cdata.alc_rx_cons = ALC_RX_RING_CNT - 1;
2507 0 : rd = &sc->alc_rdata;
2508 0 : bzero(rd->alc_rx_ring, ALC_RX_RING_SZ);
2509 0 : for (i = 0; i < ALC_RX_RING_CNT; i++) {
2510 0 : rxd = &sc->alc_cdata.alc_rxdesc[i];
2511 0 : rxd->rx_m = NULL;
2512 0 : rxd->rx_desc = &rd->alc_rx_ring[i];
2513 0 : if (alc_newbuf(sc, rxd) != 0)
2514 0 : return (ENOBUFS);
2515 : }
2516 :
2517 : /*
2518 : * Since controller does not update Rx descriptors, driver
2519 : * does have to read Rx descriptors back so BUS_DMASYNC_PREWRITE
2520 : * is enough to ensure coherence.
2521 : */
2522 0 : bus_dmamap_sync(sc->sc_dmat, sc->alc_cdata.alc_rx_ring_map, 0,
2523 : sc->alc_cdata.alc_rx_ring_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
2524 : /* Let controller know availability of new Rx buffers. */
2525 0 : CSR_WRITE_4(sc, ALC_MBOX_RD0_PROD_IDX, sc->alc_cdata.alc_rx_cons);
2526 :
2527 0 : return (0);
2528 0 : }
2529 :
2530 : void
2531 0 : alc_init_rr_ring(struct alc_softc *sc)
2532 : {
2533 : struct alc_ring_data *rd;
2534 :
2535 0 : sc->alc_cdata.alc_rr_cons = 0;
2536 0 : ALC_RXCHAIN_RESET(sc);
2537 :
2538 0 : rd = &sc->alc_rdata;
2539 0 : bzero(rd->alc_rr_ring, ALC_RR_RING_SZ);
2540 0 : bus_dmamap_sync(sc->sc_dmat, sc->alc_cdata.alc_rr_ring_map, 0,
2541 : sc->alc_cdata.alc_rr_ring_map->dm_mapsize,
2542 : BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2543 0 : }
2544 :
2545 : void
2546 0 : alc_init_cmb(struct alc_softc *sc)
2547 : {
2548 : struct alc_ring_data *rd;
2549 :
2550 0 : rd = &sc->alc_rdata;
2551 0 : bzero(rd->alc_cmb, ALC_CMB_SZ);
2552 0 : bus_dmamap_sync(sc->sc_dmat, sc->alc_cdata.alc_cmb_map, 0,
2553 : sc->alc_cdata.alc_cmb_map->dm_mapsize,
2554 : BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2555 0 : }
2556 :
2557 : void
2558 0 : alc_init_smb(struct alc_softc *sc)
2559 : {
2560 : struct alc_ring_data *rd;
2561 :
2562 0 : rd = &sc->alc_rdata;
2563 0 : bzero(rd->alc_smb, ALC_SMB_SZ);
2564 0 : bus_dmamap_sync(sc->sc_dmat, sc->alc_cdata.alc_smb_map, 0,
2565 : sc->alc_cdata.alc_smb_map->dm_mapsize,
2566 : BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2567 0 : }
2568 :
2569 : void
2570 0 : alc_rxvlan(struct alc_softc *sc)
2571 : {
2572 0 : struct ifnet *ifp = &sc->sc_arpcom.ac_if;
2573 : uint32_t reg;
2574 :
2575 0 : reg = CSR_READ_4(sc, ALC_MAC_CFG);
2576 0 : if (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING)
2577 0 : reg |= MAC_CFG_VLAN_TAG_STRIP;
2578 : else
2579 0 : reg &= ~MAC_CFG_VLAN_TAG_STRIP;
2580 0 : CSR_WRITE_4(sc, ALC_MAC_CFG, reg);
2581 0 : }
2582 :
2583 : void
2584 0 : alc_iff(struct alc_softc *sc)
2585 : {
2586 0 : struct arpcom *ac = &sc->sc_arpcom;
2587 0 : struct ifnet *ifp = &ac->ac_if;
2588 : struct ether_multi *enm;
2589 : struct ether_multistep step;
2590 : uint32_t crc;
2591 0 : uint32_t mchash[2];
2592 : uint32_t rxcfg;
2593 :
2594 0 : rxcfg = CSR_READ_4(sc, ALC_MAC_CFG);
2595 0 : rxcfg &= ~(MAC_CFG_ALLMULTI | MAC_CFG_BCAST | MAC_CFG_PROMISC);
2596 0 : ifp->if_flags &= ~IFF_ALLMULTI;
2597 :
2598 : /*
2599 : * Always accept broadcast frames.
2600 : */
2601 0 : rxcfg |= MAC_CFG_BCAST;
2602 :
2603 0 : if (ifp->if_flags & IFF_PROMISC || ac->ac_multirangecnt > 0) {
2604 0 : ifp->if_flags |= IFF_ALLMULTI;
2605 0 : if (ifp->if_flags & IFF_PROMISC)
2606 0 : rxcfg |= MAC_CFG_PROMISC;
2607 : else
2608 0 : rxcfg |= MAC_CFG_ALLMULTI;
2609 0 : mchash[0] = mchash[1] = 0xFFFFFFFF;
2610 0 : } else {
2611 : /* Program new filter. */
2612 0 : bzero(mchash, sizeof(mchash));
2613 :
2614 0 : ETHER_FIRST_MULTI(step, ac, enm);
2615 0 : while (enm != NULL) {
2616 0 : crc = ether_crc32_be(enm->enm_addrlo, ETHER_ADDR_LEN);
2617 :
2618 0 : mchash[crc >> 31] |= 1 << ((crc >> 26) & 0x1f);
2619 :
2620 0 : ETHER_NEXT_MULTI(step, enm);
2621 : }
2622 : }
2623 :
2624 0 : CSR_WRITE_4(sc, ALC_MAR0, mchash[0]);
2625 0 : CSR_WRITE_4(sc, ALC_MAR1, mchash[1]);
2626 0 : CSR_WRITE_4(sc, ALC_MAC_CFG, rxcfg);
2627 0 : }
|