Line data Source code
1 : /* $OpenBSD: if_et.c,v 1.37 2017/09/08 05:36:52 deraadt Exp $ */
2 : /*
3 : * Copyright (c) 2007 The DragonFly Project. All rights reserved.
4 : *
5 : * This code is derived from software contributed to The DragonFly Project
6 : * by Sepherosa Ziehau <sepherosa@gmail.com>
7 : *
8 : * Redistribution and use in source and binary forms, with or without
9 : * modification, are permitted provided that the following conditions
10 : * are met:
11 : *
12 : * 1. Redistributions of source code must retain the above copyright
13 : * notice, this list of conditions and the following disclaimer.
14 : * 2. Redistributions in binary form must reproduce the above copyright
15 : * notice, this list of conditions and the following disclaimer in
16 : * the documentation and/or other materials provided with the
17 : * distribution.
18 : * 3. Neither the name of The DragonFly Project nor the names of its
19 : * contributors may be used to endorse or promote products derived
20 : * from this software without specific, prior written permission.
21 : *
22 : * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23 : * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24 : * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
25 : * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
26 : * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
27 : * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
28 : * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
29 : * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
30 : * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
31 : * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
32 : * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 : * SUCH DAMAGE.
34 : *
35 : * $DragonFly: src/sys/dev/netif/et/if_et.c,v 1.1 2007/10/12 14:12:42 sephe Exp $
36 : */
37 :
38 : #include "bpfilter.h"
39 :
40 : #include <sys/param.h>
41 : #include <sys/endian.h>
42 : #include <sys/systm.h>
43 : #include <sys/sockio.h>
44 : #include <sys/mbuf.h>
45 : #include <sys/queue.h>
46 : #include <sys/kernel.h>
47 : #include <sys/device.h>
48 : #include <sys/timeout.h>
49 : #include <sys/socket.h>
50 :
51 : #include <machine/bus.h>
52 :
53 : #include <net/if.h>
54 : #include <net/if_dl.h>
55 : #include <net/if_media.h>
56 :
57 : #include <netinet/in.h>
58 : #include <netinet/if_ether.h>
59 :
60 : #if NBPFILTER > 0
61 : #include <net/bpf.h>
62 : #endif
63 :
64 : #include <dev/mii/miivar.h>
65 :
66 : #include <dev/pci/pcireg.h>
67 : #include <dev/pci/pcivar.h>
68 : #include <dev/pci/pcidevs.h>
69 :
70 : #include <dev/pci/if_etreg.h>
71 :
72 : /* XXX temporary porting goop */
73 : #define KKASSERT(cond) if (!(cond)) panic("KKASSERT: %s in %s", #cond, __func__)
74 : #undef KASSERT
75 : #define KASSERT(cond, complaint) if (!(cond)) panic complaint
76 :
77 : /* these macros in particular need to die, so gross */
78 : #define __LOWEST_SET_BIT(__mask) ((((__mask) - 1) & (__mask)) ^ (__mask))
79 : #define __SHIFTOUT(__x, __mask) (((__x) & (__mask)) / __LOWEST_SET_BIT(__mask))
80 : #define __SHIFTIN(__x, __mask) ((__x) * __LOWEST_SET_BIT(__mask))
81 : /* XXX end porting goop */
82 :
83 : int et_match(struct device *, void *, void *);
84 : void et_attach(struct device *, struct device *, void *);
85 : int et_detach(struct device *, int);
86 :
87 : int et_miibus_readreg(struct device *, int, int);
88 : void et_miibus_writereg(struct device *, int, int, int);
89 : void et_miibus_statchg(struct device *);
90 :
91 : int et_init(struct ifnet *);
92 : int et_ioctl(struct ifnet *, u_long, caddr_t);
93 : void et_start(struct ifnet *);
94 : void et_watchdog(struct ifnet *);
95 : int et_ifmedia_upd(struct ifnet *);
96 : void et_ifmedia_sts(struct ifnet *, struct ifmediareq *);
97 :
98 : int et_intr(void *);
99 : void et_enable_intrs(struct et_softc *, uint32_t);
100 : void et_disable_intrs(struct et_softc *);
101 : void et_rxeof(struct et_softc *);
102 : void et_txeof(struct et_softc *);
103 : void et_txtick(void *);
104 :
105 : int et_dma_alloc(struct et_softc *);
106 : void et_dma_free(struct et_softc *);
107 : int et_dma_mem_create(struct et_softc *, bus_size_t,
108 : void **, bus_addr_t *, bus_dmamap_t *, bus_dma_segment_t *);
109 : void et_dma_mem_destroy(struct et_softc *, void *, bus_dmamap_t);
110 : int et_dma_mbuf_create(struct et_softc *);
111 : void et_dma_mbuf_destroy(struct et_softc *, int, const int[]);
112 :
113 : int et_init_tx_ring(struct et_softc *);
114 : int et_init_rx_ring(struct et_softc *);
115 : void et_free_tx_ring(struct et_softc *);
116 : void et_free_rx_ring(struct et_softc *);
117 : int et_encap(struct et_softc *, struct mbuf **);
118 : int et_newbuf(struct et_rxbuf_data *, int, int, int);
119 : int et_newbuf_cluster(struct et_rxbuf_data *, int, int);
120 : int et_newbuf_hdr(struct et_rxbuf_data *, int, int);
121 :
122 : void et_stop(struct et_softc *);
123 : int et_chip_init(struct et_softc *);
124 : void et_chip_attach(struct et_softc *);
125 : void et_init_mac(struct et_softc *);
126 : void et_init_rxmac(struct et_softc *);
127 : void et_init_txmac(struct et_softc *);
128 : int et_init_rxdma(struct et_softc *);
129 : int et_init_txdma(struct et_softc *);
130 : int et_start_rxdma(struct et_softc *);
131 : int et_start_txdma(struct et_softc *);
132 : int et_stop_rxdma(struct et_softc *);
133 : int et_stop_txdma(struct et_softc *);
134 : int et_enable_txrx(struct et_softc *);
135 : void et_reset(struct et_softc *);
136 : int et_bus_config(struct et_softc *);
137 : void et_get_eaddr(struct et_softc *, uint8_t[]);
138 : void et_setmulti(struct et_softc *);
139 : void et_tick(void *);
140 :
141 : static int et_rx_intr_npkts = 32;
142 : static int et_rx_intr_delay = 20; /* x10 usec */
143 : static int et_tx_intr_nsegs = 128;
144 : static uint32_t et_timer = 1000 * 1000 * 1000; /* nanosec */
145 :
146 : struct et_bsize {
147 : int bufsize;
148 : et_newbuf_t newbuf;
149 : };
150 :
151 : static const struct et_bsize et_bufsize[ET_RX_NRING] = {
152 : { .bufsize = 0, .newbuf = et_newbuf_hdr },
153 : { .bufsize = 0, .newbuf = et_newbuf_cluster },
154 : };
155 :
156 : const struct pci_matchid et_devices[] = {
157 : { PCI_VENDOR_LUCENT, PCI_PRODUCT_LUCENT_ET1310_FE },
158 : { PCI_VENDOR_LUCENT, PCI_PRODUCT_LUCENT_ET1310_GBE }
159 : };
160 :
161 : struct cfattach et_ca = {
162 : sizeof (struct et_softc), et_match, et_attach, et_detach
163 : };
164 :
165 : struct cfdriver et_cd = {
166 : NULL, "et", DV_IFNET
167 : };
168 :
169 : int
170 0 : et_match(struct device *dev, void *match, void *aux)
171 : {
172 0 : return pci_matchbyid((struct pci_attach_args *)aux, et_devices,
173 : sizeof (et_devices) / sizeof (et_devices[0]));
174 : }
175 :
176 : void
177 0 : et_attach(struct device *parent, struct device *self, void *aux)
178 : {
179 0 : struct et_softc *sc = (struct et_softc *)self;
180 0 : struct pci_attach_args *pa = aux;
181 0 : pci_chipset_tag_t pc = pa->pa_pc;
182 0 : pci_intr_handle_t ih;
183 : const char *intrstr;
184 0 : struct ifnet *ifp = &sc->sc_arpcom.ac_if;
185 : pcireg_t memtype;
186 : int error;
187 :
188 : /*
189 : * Initialize tunables
190 : */
191 0 : sc->sc_rx_intr_npkts = et_rx_intr_npkts;
192 0 : sc->sc_rx_intr_delay = et_rx_intr_delay;
193 0 : sc->sc_tx_intr_nsegs = et_tx_intr_nsegs;
194 0 : sc->sc_timer = et_timer;
195 :
196 0 : memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, ET_PCIR_BAR);
197 0 : if (pci_mapreg_map(pa, ET_PCIR_BAR, memtype, 0, &sc->sc_mem_bt,
198 0 : &sc->sc_mem_bh, NULL, &sc->sc_mem_size, 0)) {
199 0 : printf(": can't map mem space\n");
200 0 : return;
201 : }
202 :
203 0 : if (pci_intr_map(pa, &ih) != 0) {
204 0 : printf(": can't map interrupt\n");
205 0 : return;
206 : }
207 :
208 0 : intrstr = pci_intr_string(pc, ih);
209 0 : sc->sc_irq_handle = pci_intr_establish(pc, ih, IPL_NET, et_intr, sc,
210 0 : sc->sc_dev.dv_xname);
211 0 : if (sc->sc_irq_handle == NULL) {
212 0 : printf(": could not establish interrupt");
213 0 : if (intrstr != NULL)
214 0 : printf(" at %s", intrstr);
215 0 : printf("\n");
216 0 : return;
217 : }
218 0 : printf(": %s", intrstr);
219 :
220 0 : sc->sc_dmat = pa->pa_dmat;
221 0 : sc->sc_pct = pa->pa_pc;
222 0 : sc->sc_pcitag = pa->pa_tag;
223 :
224 0 : error = et_bus_config(sc);
225 0 : if (error)
226 0 : return;
227 :
228 0 : et_get_eaddr(sc, sc->sc_arpcom.ac_enaddr);
229 :
230 0 : printf(", address %s\n", ether_sprintf(sc->sc_arpcom.ac_enaddr));
231 :
232 0 : CSR_WRITE_4(sc, ET_PM,
233 : ET_PM_SYSCLK_GATE | ET_PM_TXCLK_GATE | ET_PM_RXCLK_GATE);
234 :
235 0 : et_reset(sc);
236 :
237 0 : et_disable_intrs(sc);
238 :
239 0 : error = et_dma_alloc(sc);
240 0 : if (error)
241 0 : return;
242 :
243 0 : ifp->if_softc = sc;
244 0 : ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
245 0 : ifp->if_ioctl = et_ioctl;
246 0 : ifp->if_start = et_start;
247 0 : ifp->if_watchdog = et_watchdog;
248 0 : IFQ_SET_MAXLEN(&ifp->if_snd, ET_TX_NDESC);
249 0 : strlcpy(ifp->if_xname, sc->sc_dev.dv_xname, IFNAMSIZ);
250 :
251 0 : ifp->if_capabilities = IFCAP_VLAN_MTU;
252 :
253 0 : et_chip_attach(sc);
254 :
255 0 : sc->sc_miibus.mii_ifp = ifp;
256 0 : sc->sc_miibus.mii_readreg = et_miibus_readreg;
257 0 : sc->sc_miibus.mii_writereg = et_miibus_writereg;
258 0 : sc->sc_miibus.mii_statchg = et_miibus_statchg;
259 :
260 0 : ifmedia_init(&sc->sc_miibus.mii_media, 0, et_ifmedia_upd,
261 : et_ifmedia_sts);
262 0 : mii_attach(self, &sc->sc_miibus, 0xffffffff, MII_PHY_ANY,
263 : MII_OFFSET_ANY, 0);
264 0 : if (LIST_FIRST(&sc->sc_miibus.mii_phys) == NULL) {
265 0 : printf("%s: no PHY found!\n", sc->sc_dev.dv_xname);
266 0 : ifmedia_add(&sc->sc_miibus.mii_media, IFM_ETHER | IFM_MANUAL,
267 : 0, NULL);
268 0 : ifmedia_set(&sc->sc_miibus.mii_media, IFM_ETHER | IFM_MANUAL);
269 0 : } else
270 0 : ifmedia_set(&sc->sc_miibus.mii_media, IFM_ETHER | IFM_AUTO);
271 :
272 0 : if_attach(ifp);
273 0 : ether_ifattach(ifp);
274 :
275 0 : timeout_set(&sc->sc_tick, et_tick, sc);
276 0 : timeout_set(&sc->sc_txtick, et_txtick, sc);
277 0 : }
278 :
279 : int
280 0 : et_detach(struct device *self, int flags)
281 : {
282 0 : struct et_softc *sc = (struct et_softc *)self;
283 0 : struct ifnet *ifp = &sc->sc_arpcom.ac_if;
284 : int s;
285 :
286 0 : s = splnet();
287 0 : et_stop(sc);
288 0 : splx(s);
289 :
290 0 : mii_detach(&sc->sc_miibus, MII_PHY_ANY, MII_OFFSET_ANY);
291 :
292 : /* Delete all remaining media. */
293 0 : ifmedia_delete_instance(&sc->sc_miibus.mii_media, IFM_INST_ANY);
294 :
295 0 : ether_ifdetach(ifp);
296 0 : if_detach(ifp);
297 0 : et_dma_free(sc);
298 :
299 0 : if (sc->sc_irq_handle != NULL) {
300 0 : pci_intr_disestablish(sc->sc_pct, sc->sc_irq_handle);
301 0 : sc->sc_irq_handle = NULL;
302 0 : }
303 :
304 0 : bus_space_unmap(sc->sc_mem_bt, sc->sc_mem_bh, sc->sc_mem_size);
305 :
306 0 : return 0;
307 : }
308 :
309 : int
310 0 : et_miibus_readreg(struct device *dev, int phy, int reg)
311 : {
312 0 : struct et_softc *sc = (struct et_softc *)dev;
313 : uint32_t val;
314 : int i, ret;
315 :
316 : /* Stop any pending operations */
317 0 : CSR_WRITE_4(sc, ET_MII_CMD, 0);
318 :
319 0 : val = __SHIFTIN(phy, ET_MII_ADDR_PHY) |
320 : __SHIFTIN(reg, ET_MII_ADDR_REG);
321 0 : CSR_WRITE_4(sc, ET_MII_ADDR, val);
322 :
323 : /* Start reading */
324 0 : CSR_WRITE_4(sc, ET_MII_CMD, ET_MII_CMD_READ);
325 :
326 : #define NRETRY 50
327 :
328 0 : for (i = 0; i < NRETRY; ++i) {
329 0 : val = CSR_READ_4(sc, ET_MII_IND);
330 0 : if ((val & (ET_MII_IND_BUSY | ET_MII_IND_INVALID)) == 0)
331 : break;
332 0 : DELAY(50);
333 : }
334 0 : if (i == NRETRY) {
335 0 : printf("%s: read phy %d, reg %d timed out\n",
336 0 : sc->sc_dev.dv_xname, phy, reg);
337 : ret = 0;
338 0 : goto back;
339 : }
340 :
341 : #undef NRETRY
342 :
343 0 : val = CSR_READ_4(sc, ET_MII_STAT);
344 0 : ret = __SHIFTOUT(val, ET_MII_STAT_VALUE);
345 :
346 : back:
347 : /* Make sure that the current operation is stopped */
348 0 : CSR_WRITE_4(sc, ET_MII_CMD, 0);
349 0 : return ret;
350 : }
351 :
352 : void
353 0 : et_miibus_writereg(struct device *dev, int phy, int reg, int val0)
354 : {
355 0 : struct et_softc *sc = (struct et_softc *)dev;
356 : uint32_t val;
357 : int i;
358 :
359 : /* Stop any pending operations */
360 0 : CSR_WRITE_4(sc, ET_MII_CMD, 0);
361 :
362 0 : val = __SHIFTIN(phy, ET_MII_ADDR_PHY) |
363 : __SHIFTIN(reg, ET_MII_ADDR_REG);
364 0 : CSR_WRITE_4(sc, ET_MII_ADDR, val);
365 :
366 : /* Start writing */
367 0 : CSR_WRITE_4(sc, ET_MII_CTRL, __SHIFTIN(val0, ET_MII_CTRL_VALUE));
368 :
369 : #define NRETRY 100
370 :
371 0 : for (i = 0; i < NRETRY; ++i) {
372 0 : val = CSR_READ_4(sc, ET_MII_IND);
373 0 : if ((val & ET_MII_IND_BUSY) == 0)
374 : break;
375 0 : DELAY(50);
376 : }
377 0 : if (i == NRETRY) {
378 0 : printf("%s: write phy %d, reg %d timed out\n",
379 0 : sc->sc_dev.dv_xname, phy, reg);
380 0 : et_miibus_readreg(dev, phy, reg);
381 0 : }
382 :
383 : #undef NRETRY
384 :
385 : /* Make sure that the current operation is stopped */
386 0 : CSR_WRITE_4(sc, ET_MII_CMD, 0);
387 0 : }
388 :
389 : void
390 0 : et_miibus_statchg(struct device *dev)
391 : {
392 0 : struct et_softc *sc = (struct et_softc *)dev;
393 0 : struct mii_data *mii = &sc->sc_miibus;
394 : uint32_t cfg2, ctrl;
395 :
396 0 : cfg2 = CSR_READ_4(sc, ET_MAC_CFG2);
397 0 : cfg2 &= ~(ET_MAC_CFG2_MODE_MII | ET_MAC_CFG2_MODE_GMII |
398 : ET_MAC_CFG2_FDX | ET_MAC_CFG2_BIGFRM);
399 0 : cfg2 |= ET_MAC_CFG2_LENCHK | ET_MAC_CFG2_CRC | ET_MAC_CFG2_PADCRC |
400 : __SHIFTIN(7, ET_MAC_CFG2_PREAMBLE_LEN);
401 :
402 0 : ctrl = CSR_READ_4(sc, ET_MAC_CTRL);
403 0 : ctrl &= ~(ET_MAC_CTRL_GHDX | ET_MAC_CTRL_MODE_MII);
404 :
405 0 : if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T) {
406 0 : cfg2 |= ET_MAC_CFG2_MODE_GMII;
407 0 : } else {
408 0 : cfg2 |= ET_MAC_CFG2_MODE_MII;
409 0 : ctrl |= ET_MAC_CTRL_MODE_MII;
410 : }
411 :
412 0 : if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX)
413 0 : cfg2 |= ET_MAC_CFG2_FDX;
414 : else
415 0 : ctrl |= ET_MAC_CTRL_GHDX;
416 :
417 0 : CSR_WRITE_4(sc, ET_MAC_CTRL, ctrl);
418 0 : CSR_WRITE_4(sc, ET_MAC_CFG2, cfg2);
419 0 : }
420 :
421 : int
422 0 : et_ifmedia_upd(struct ifnet *ifp)
423 : {
424 0 : struct et_softc *sc = ifp->if_softc;
425 0 : struct mii_data *mii = &sc->sc_miibus;
426 :
427 0 : if (mii->mii_instance != 0) {
428 : struct mii_softc *miisc;
429 :
430 0 : LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
431 0 : mii_phy_reset(miisc);
432 0 : }
433 0 : mii_mediachg(mii);
434 :
435 0 : return 0;
436 : }
437 :
438 : void
439 0 : et_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
440 : {
441 0 : struct et_softc *sc = ifp->if_softc;
442 0 : struct mii_data *mii = &sc->sc_miibus;
443 :
444 0 : mii_pollstat(mii);
445 0 : ifmr->ifm_active = mii->mii_media_active;
446 0 : ifmr->ifm_status = mii->mii_media_status;
447 0 : }
448 :
449 : void
450 0 : et_stop(struct et_softc *sc)
451 : {
452 0 : struct ifnet *ifp = &sc->sc_arpcom.ac_if;
453 :
454 0 : timeout_del(&sc->sc_tick);
455 0 : timeout_del(&sc->sc_txtick);
456 :
457 0 : et_stop_rxdma(sc);
458 0 : et_stop_txdma(sc);
459 :
460 0 : et_disable_intrs(sc);
461 :
462 0 : et_free_tx_ring(sc);
463 0 : et_free_rx_ring(sc);
464 :
465 0 : et_reset(sc);
466 :
467 0 : sc->sc_tx = 0;
468 0 : sc->sc_tx_intr = 0;
469 :
470 0 : ifp->if_timer = 0;
471 0 : ifp->if_flags &= ~IFF_RUNNING;
472 0 : ifq_clr_oactive(&ifp->if_snd);
473 0 : }
474 :
475 : int
476 0 : et_bus_config(struct et_softc *sc)
477 : {
478 : uint32_t val; //, max_plsz;
479 : // uint16_t ack_latency, replay_timer;
480 :
481 : /*
482 : * Test whether EEPROM is valid
483 : * NOTE: Read twice to get the correct value
484 : */
485 0 : pci_conf_read(sc->sc_pct, sc->sc_pcitag, ET_PCIR_EEPROM_MISC);
486 0 : val = pci_conf_read(sc->sc_pct, sc->sc_pcitag, ET_PCIR_EEPROM_MISC);
487 :
488 0 : if (val & ET_PCIM_EEPROM_STATUS_ERROR) {
489 0 : printf("%s: EEPROM status error 0x%02x\n",
490 0 : sc->sc_dev.dv_xname, val);
491 0 : return ENXIO;
492 : }
493 :
494 : /* TODO: LED */
495 : #if 0
496 : /*
497 : * Configure ACK latency and replay timer according to
498 : * max playload size
499 : */
500 : val = pci_conf_read(sc->sc_pct, sc->sc_pcitag, ET_PCIR_DEVICE_CAPS);
501 : max_plsz = val & ET_PCIM_DEVICE_CAPS_MAX_PLSZ;
502 :
503 : switch (max_plsz) {
504 : case ET_PCIV_DEVICE_CAPS_PLSZ_128:
505 : ack_latency = ET_PCIV_ACK_LATENCY_128;
506 : replay_timer = ET_PCIV_REPLAY_TIMER_128;
507 : break;
508 :
509 : case ET_PCIV_DEVICE_CAPS_PLSZ_256:
510 : ack_latency = ET_PCIV_ACK_LATENCY_256;
511 : replay_timer = ET_PCIV_REPLAY_TIMER_256;
512 : break;
513 :
514 : default:
515 : ack_latency = pci_conf_read(sc->sc_pct, sc->sc_pcitag,
516 : ET_PCIR_ACK_LATENCY) >> 16;
517 : replay_timer = pci_conf_read(sc->sc_pct, sc->sc_pcitag,
518 : ET_PCIR_REPLAY_TIMER) >> 16;
519 : printf("%s: ack latency %u, replay timer %u\n",
520 : sc->sc_dev.dv_xname, ack_latency, replay_timer);
521 : break;
522 : }
523 : if (ack_latency != 0) {
524 : pci_conf_write(sc->sc_pct, sc->sc_pcitag,
525 : ET_PCIR_ACK_LATENCY, ack_latency << 16);
526 : pci_conf_write(sc->sc_pct, sc->sc_pcitag,
527 : ET_PCIR_REPLAY_TIMER, replay_timer << 16);
528 : }
529 :
530 : /*
531 : * Set L0s and L1 latency timer to 2us
532 : */
533 : val = ET_PCIV_L0S_LATENCY(2) | ET_PCIV_L1_LATENCY(2);
534 : pci_conf_write(sc->sc_pct, sc->sc_pcitag, ET_PCIR_L0S_L1_LATENCY,
535 : val << 24);
536 :
537 : /*
538 : * Set max read request size to 2048 bytes
539 : */
540 : val = pci_conf_read(sc->sc_pct, sc->sc_pcitag,
541 : ET_PCIR_DEVICE_CTRL) >> 16;
542 : val &= ~ET_PCIM_DEVICE_CTRL_MAX_RRSZ;
543 : val |= ET_PCIV_DEVICE_CTRL_RRSZ_2K;
544 : pci_conf_write(sc->sc_pct, sc->sc_pcitag, ET_PCIR_DEVICE_CTRL,
545 : val << 16);
546 : #endif
547 :
548 0 : return 0;
549 0 : }
550 :
551 : void
552 0 : et_get_eaddr(struct et_softc *sc, uint8_t eaddr[])
553 : {
554 : uint32_t r;
555 :
556 0 : r = pci_conf_read(sc->sc_pct, sc->sc_pcitag, ET_PCIR_MACADDR_LO);
557 0 : eaddr[0] = r & 0xff;
558 0 : eaddr[1] = (r >> 8) & 0xff;
559 0 : eaddr[2] = (r >> 16) & 0xff;
560 0 : eaddr[3] = (r >> 24) & 0xff;
561 0 : r = pci_conf_read(sc->sc_pct, sc->sc_pcitag, ET_PCIR_MACADDR_HI);
562 0 : eaddr[4] = r & 0xff;
563 0 : eaddr[5] = (r >> 8) & 0xff;
564 0 : }
565 :
566 : void
567 0 : et_reset(struct et_softc *sc)
568 : {
569 0 : CSR_WRITE_4(sc, ET_MAC_CFG1,
570 : ET_MAC_CFG1_RST_TXFUNC | ET_MAC_CFG1_RST_RXFUNC |
571 : ET_MAC_CFG1_RST_TXMC | ET_MAC_CFG1_RST_RXMC |
572 : ET_MAC_CFG1_SIM_RST | ET_MAC_CFG1_SOFT_RST);
573 :
574 0 : CSR_WRITE_4(sc, ET_SWRST,
575 : ET_SWRST_TXDMA | ET_SWRST_RXDMA |
576 : ET_SWRST_TXMAC | ET_SWRST_RXMAC |
577 : ET_SWRST_MAC | ET_SWRST_MAC_STAT | ET_SWRST_MMC);
578 :
579 0 : CSR_WRITE_4(sc, ET_MAC_CFG1,
580 : ET_MAC_CFG1_RST_TXFUNC | ET_MAC_CFG1_RST_RXFUNC |
581 : ET_MAC_CFG1_RST_TXMC | ET_MAC_CFG1_RST_RXMC);
582 0 : CSR_WRITE_4(sc, ET_MAC_CFG1, 0);
583 0 : }
584 :
585 : void
586 0 : et_disable_intrs(struct et_softc *sc)
587 : {
588 0 : CSR_WRITE_4(sc, ET_INTR_MASK, 0xffffffff);
589 0 : }
590 :
591 : void
592 0 : et_enable_intrs(struct et_softc *sc, uint32_t intrs)
593 : {
594 0 : CSR_WRITE_4(sc, ET_INTR_MASK, ~intrs);
595 0 : }
596 :
597 : int
598 0 : et_dma_alloc(struct et_softc *sc)
599 : {
600 0 : struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring;
601 0 : struct et_txstatus_data *txsd = &sc->sc_tx_status;
602 0 : struct et_rxstat_ring *rxst_ring = &sc->sc_rxstat_ring;
603 0 : struct et_rxstatus_data *rxsd = &sc->sc_rx_status;
604 : int i, error;
605 :
606 : /*
607 : * Create TX ring DMA stuffs
608 : */
609 0 : error = et_dma_mem_create(sc, ET_TX_RING_SIZE,
610 0 : (void **)&tx_ring->tr_desc, &tx_ring->tr_paddr, &tx_ring->tr_dmap,
611 0 : &tx_ring->tr_seg);
612 0 : if (error) {
613 0 : printf("%s: can't create TX ring DMA stuffs\n",
614 0 : sc->sc_dev.dv_xname);
615 0 : return error;
616 : }
617 :
618 : /*
619 : * Create TX status DMA stuffs
620 : */
621 0 : error = et_dma_mem_create(sc, sizeof(uint32_t),
622 0 : (void **)&txsd->txsd_status,
623 0 : &txsd->txsd_paddr, &txsd->txsd_dmap, &txsd->txsd_seg);
624 0 : if (error) {
625 0 : printf("%s: can't create TX status DMA stuffs\n",
626 0 : sc->sc_dev.dv_xname);
627 0 : return error;
628 : }
629 :
630 : /*
631 : * Create DMA stuffs for RX rings
632 : */
633 0 : for (i = 0; i < ET_RX_NRING; ++i) {
634 : static const uint32_t rx_ring_posreg[ET_RX_NRING] =
635 : { ET_RX_RING0_POS, ET_RX_RING1_POS };
636 :
637 0 : struct et_rxdesc_ring *rx_ring = &sc->sc_rx_ring[i];
638 :
639 0 : error = et_dma_mem_create(sc, ET_RX_RING_SIZE,
640 0 : (void **)&rx_ring->rr_desc,
641 0 : &rx_ring->rr_paddr, &rx_ring->rr_dmap, &rx_ring->rr_seg);
642 0 : if (error) {
643 0 : printf("%s: can't create DMA stuffs for "
644 0 : "the %d RX ring\n", sc->sc_dev.dv_xname, i);
645 0 : return error;
646 : }
647 0 : rx_ring->rr_posreg = rx_ring_posreg[i];
648 0 : }
649 :
650 : /*
651 : * Create RX stat ring DMA stuffs
652 : */
653 0 : error = et_dma_mem_create(sc, ET_RXSTAT_RING_SIZE,
654 0 : (void **)&rxst_ring->rsr_stat,
655 0 : &rxst_ring->rsr_paddr, &rxst_ring->rsr_dmap, &rxst_ring->rsr_seg);
656 0 : if (error) {
657 0 : printf("%s: can't create RX stat ring DMA stuffs\n",
658 0 : sc->sc_dev.dv_xname);
659 0 : return error;
660 : }
661 :
662 : /*
663 : * Create RX status DMA stuffs
664 : */
665 0 : error = et_dma_mem_create(sc, sizeof(struct et_rxstatus),
666 0 : (void **)&rxsd->rxsd_status,
667 0 : &rxsd->rxsd_paddr, &rxsd->rxsd_dmap, &rxsd->rxsd_seg);
668 0 : if (error) {
669 0 : printf("%s: can't create RX status DMA stuffs\n",
670 0 : sc->sc_dev.dv_xname);
671 0 : return error;
672 : }
673 :
674 : /*
675 : * Create mbuf DMA stuffs
676 : */
677 0 : error = et_dma_mbuf_create(sc);
678 0 : if (error)
679 0 : return error;
680 :
681 0 : return 0;
682 0 : }
683 :
684 : void
685 0 : et_dma_free(struct et_softc *sc)
686 : {
687 0 : struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring;
688 0 : struct et_txstatus_data *txsd = &sc->sc_tx_status;
689 0 : struct et_rxstat_ring *rxst_ring = &sc->sc_rxstat_ring;
690 0 : struct et_rxstatus_data *rxsd = &sc->sc_rx_status;
691 0 : int i, rx_done[ET_RX_NRING];
692 :
693 : /*
694 : * Destroy TX ring DMA stuffs
695 : */
696 0 : et_dma_mem_destroy(sc, tx_ring->tr_desc, tx_ring->tr_dmap);
697 :
698 : /*
699 : * Destroy TX status DMA stuffs
700 : */
701 0 : et_dma_mem_destroy(sc, txsd->txsd_status, txsd->txsd_dmap);
702 :
703 : /*
704 : * Destroy DMA stuffs for RX rings
705 : */
706 0 : for (i = 0; i < ET_RX_NRING; ++i) {
707 0 : struct et_rxdesc_ring *rx_ring = &sc->sc_rx_ring[i];
708 :
709 0 : et_dma_mem_destroy(sc, rx_ring->rr_desc, rx_ring->rr_dmap);
710 : }
711 :
712 : /*
713 : * Destroy RX stat ring DMA stuffs
714 : */
715 0 : et_dma_mem_destroy(sc, rxst_ring->rsr_stat, rxst_ring->rsr_dmap);
716 :
717 : /*
718 : * Destroy RX status DMA stuffs
719 : */
720 0 : et_dma_mem_destroy(sc, rxsd->rxsd_status, rxsd->rxsd_dmap);
721 :
722 : /*
723 : * Destroy mbuf DMA stuffs
724 : */
725 0 : for (i = 0; i < ET_RX_NRING; ++i)
726 0 : rx_done[i] = ET_RX_NDESC;
727 0 : et_dma_mbuf_destroy(sc, ET_TX_NDESC, rx_done);
728 0 : }
729 :
730 : int
731 0 : et_dma_mbuf_create(struct et_softc *sc)
732 : {
733 0 : struct et_txbuf_data *tbd = &sc->sc_tx_data;
734 0 : int i, error, rx_done[ET_RX_NRING];
735 :
736 : /*
737 : * Create spare DMA map for RX mbufs
738 : */
739 0 : error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 0,
740 : BUS_DMA_NOWAIT, &sc->sc_mbuf_tmp_dmap);
741 0 : if (error) {
742 0 : printf("%s: can't create spare mbuf DMA map\n",
743 0 : sc->sc_dev.dv_xname);
744 0 : return error;
745 : }
746 :
747 : /*
748 : * Create DMA maps for RX mbufs
749 : */
750 0 : bzero(rx_done, sizeof(rx_done));
751 0 : for (i = 0; i < ET_RX_NRING; ++i) {
752 0 : struct et_rxbuf_data *rbd = &sc->sc_rx_data[i];
753 : int j;
754 :
755 0 : for (j = 0; j < ET_RX_NDESC; ++j) {
756 0 : error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
757 : MCLBYTES, 0, BUS_DMA_NOWAIT,
758 : &rbd->rbd_buf[j].rb_dmap);
759 0 : if (error) {
760 0 : printf("%s: can't create %d RX mbuf "
761 0 : "for %d RX ring\n", sc->sc_dev.dv_xname,
762 : j, i);
763 0 : rx_done[i] = j;
764 0 : et_dma_mbuf_destroy(sc, 0, rx_done);
765 0 : return error;
766 : }
767 : }
768 0 : rx_done[i] = ET_RX_NDESC;
769 :
770 0 : rbd->rbd_softc = sc;
771 0 : rbd->rbd_ring = &sc->sc_rx_ring[i];
772 0 : }
773 :
774 : /*
775 : * Create DMA maps for TX mbufs
776 : */
777 0 : for (i = 0; i < ET_TX_NDESC; ++i) {
778 0 : error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES,
779 : 0, BUS_DMA_NOWAIT, &tbd->tbd_buf[i].tb_dmap);
780 0 : if (error) {
781 0 : printf("%s: can't create %d TX mbuf "
782 0 : "DMA map\n", sc->sc_dev.dv_xname, i);
783 0 : et_dma_mbuf_destroy(sc, i, rx_done);
784 0 : return error;
785 : }
786 : }
787 :
788 0 : return 0;
789 0 : }
790 :
791 : void
792 0 : et_dma_mbuf_destroy(struct et_softc *sc, int tx_done, const int rx_done[])
793 : {
794 0 : struct et_txbuf_data *tbd = &sc->sc_tx_data;
795 : int i;
796 :
797 : /*
798 : * Destroy DMA maps for RX mbufs
799 : */
800 0 : for (i = 0; i < ET_RX_NRING; ++i) {
801 0 : struct et_rxbuf_data *rbd = &sc->sc_rx_data[i];
802 : int j;
803 :
804 0 : for (j = 0; j < rx_done[i]; ++j) {
805 0 : struct et_rxbuf *rb = &rbd->rbd_buf[j];
806 :
807 0 : KASSERT(rb->rb_mbuf == NULL,
808 : ("RX mbuf in %d RX ring is not freed yet\n", i));
809 0 : bus_dmamap_destroy(sc->sc_dmat, rb->rb_dmap);
810 : }
811 : }
812 :
813 : /*
814 : * Destroy DMA maps for TX mbufs
815 : */
816 0 : for (i = 0; i < tx_done; ++i) {
817 0 : struct et_txbuf *tb = &tbd->tbd_buf[i];
818 :
819 0 : KASSERT(tb->tb_mbuf == NULL, ("TX mbuf is not freed yet\n"));
820 0 : bus_dmamap_destroy(sc->sc_dmat, tb->tb_dmap);
821 : }
822 :
823 : /*
824 : * Destroy spare mbuf DMA map
825 : */
826 0 : bus_dmamap_destroy(sc->sc_dmat, sc->sc_mbuf_tmp_dmap);
827 0 : }
828 :
829 : int
830 0 : et_dma_mem_create(struct et_softc *sc, bus_size_t size,
831 : void **addr, bus_addr_t *paddr, bus_dmamap_t *dmap, bus_dma_segment_t *seg)
832 : {
833 0 : int error, nsegs;
834 :
835 0 : error = bus_dmamap_create(sc->sc_dmat, size, 1, size, 0, BUS_DMA_NOWAIT,
836 : dmap);
837 0 : if (error) {
838 0 : printf("%s: can't create DMA map\n", sc->sc_dev.dv_xname);
839 0 : return error;
840 : }
841 :
842 0 : error = bus_dmamem_alloc(sc->sc_dmat, size, ET_ALIGN, 0, seg,
843 : 1, &nsegs, BUS_DMA_WAITOK | BUS_DMA_ZERO);
844 0 : if (error) {
845 0 : printf("%s: can't allocate DMA mem\n", sc->sc_dev.dv_xname);
846 0 : return error;
847 : }
848 :
849 0 : error = bus_dmamem_map(sc->sc_dmat, seg, nsegs,
850 : size, (caddr_t *)addr, BUS_DMA_NOWAIT);
851 0 : if (error) {
852 0 : printf("%s: can't map DMA mem\n", sc->sc_dev.dv_xname);
853 0 : return (error);
854 : }
855 :
856 0 : error = bus_dmamap_load(sc->sc_dmat, *dmap, *addr, size, NULL,
857 : BUS_DMA_WAITOK);
858 0 : if (error) {
859 0 : printf("%s: can't load DMA mem\n", sc->sc_dev.dv_xname);
860 0 : bus_dmamem_free(sc->sc_dmat, (bus_dma_segment_t *)addr, 1);
861 0 : return error;
862 : }
863 :
864 0 : *paddr = (*dmap)->dm_segs[0].ds_addr;
865 :
866 0 : return 0;
867 0 : }
868 :
869 : void
870 0 : et_dma_mem_destroy(struct et_softc *sc, void *addr, bus_dmamap_t dmap)
871 : {
872 0 : bus_dmamap_unload(sc->sc_dmat, dmap);
873 0 : bus_dmamem_free(sc->sc_dmat, (bus_dma_segment_t *)&addr, 1);
874 0 : }
875 :
876 : void
877 0 : et_chip_attach(struct et_softc *sc)
878 : {
879 : uint32_t val;
880 :
881 : /*
882 : * Perform minimal initialization
883 : */
884 :
885 : /* Disable loopback */
886 0 : CSR_WRITE_4(sc, ET_LOOPBACK, 0);
887 :
888 : /* Reset MAC */
889 0 : CSR_WRITE_4(sc, ET_MAC_CFG1,
890 : ET_MAC_CFG1_RST_TXFUNC | ET_MAC_CFG1_RST_RXFUNC |
891 : ET_MAC_CFG1_RST_TXMC | ET_MAC_CFG1_RST_RXMC |
892 : ET_MAC_CFG1_SIM_RST | ET_MAC_CFG1_SOFT_RST);
893 :
894 : /*
895 : * Setup half duplex mode
896 : */
897 : val = __SHIFTIN(10, ET_MAC_HDX_ALT_BEB_TRUNC) |
898 : __SHIFTIN(15, ET_MAC_HDX_REXMIT_MAX) |
899 : __SHIFTIN(55, ET_MAC_HDX_COLLWIN) |
900 : ET_MAC_HDX_EXC_DEFER;
901 0 : CSR_WRITE_4(sc, ET_MAC_HDX, val);
902 :
903 : /* Clear MAC control */
904 0 : CSR_WRITE_4(sc, ET_MAC_CTRL, 0);
905 :
906 : /* Reset MII */
907 0 : CSR_WRITE_4(sc, ET_MII_CFG, ET_MII_CFG_CLKRST);
908 :
909 : /* Bring MAC out of reset state */
910 0 : CSR_WRITE_4(sc, ET_MAC_CFG1, 0);
911 :
912 : /* Enable memory controllers */
913 0 : CSR_WRITE_4(sc, ET_MMC_CTRL, ET_MMC_CTRL_ENABLE);
914 0 : }
915 :
916 : int
917 0 : et_intr(void *xsc)
918 : {
919 0 : struct et_softc *sc = xsc;
920 0 : struct ifnet *ifp = &sc->sc_arpcom.ac_if;
921 : uint32_t intrs;
922 :
923 0 : if ((ifp->if_flags & IFF_RUNNING) == 0)
924 0 : return (0);
925 :
926 0 : intrs = CSR_READ_4(sc, ET_INTR_STATUS);
927 0 : if (intrs == 0 || intrs == 0xffffffff)
928 0 : return (0);
929 :
930 0 : et_disable_intrs(sc);
931 0 : intrs &= ET_INTRS;
932 0 : if (intrs == 0) /* Not interested */
933 : goto back;
934 :
935 0 : if (intrs & ET_INTR_RXEOF)
936 0 : et_rxeof(sc);
937 0 : if (intrs & (ET_INTR_TXEOF | ET_INTR_TIMER))
938 0 : et_txeof(sc);
939 0 : if (intrs & ET_INTR_TIMER)
940 0 : CSR_WRITE_4(sc, ET_TIMER, sc->sc_timer);
941 : back:
942 0 : et_enable_intrs(sc, ET_INTRS);
943 :
944 0 : return (1);
945 0 : }
946 :
947 : int
948 0 : et_init(struct ifnet *ifp)
949 : {
950 0 : struct et_softc *sc = ifp->if_softc;
951 : int error, i, s;
952 :
953 0 : s = splnet();
954 :
955 0 : et_stop(sc);
956 :
957 0 : for (i = 0; i < ET_RX_NRING; ++i) {
958 0 : sc->sc_rx_data[i].rbd_bufsize = et_bufsize[i].bufsize;
959 0 : sc->sc_rx_data[i].rbd_newbuf = et_bufsize[i].newbuf;
960 : }
961 :
962 0 : error = et_init_tx_ring(sc);
963 0 : if (error)
964 : goto back;
965 :
966 0 : error = et_init_rx_ring(sc);
967 0 : if (error)
968 : goto back;
969 :
970 0 : error = et_chip_init(sc);
971 0 : if (error)
972 : goto back;
973 :
974 0 : error = et_enable_txrx(sc);
975 0 : if (error)
976 : goto back;
977 :
978 0 : error = et_start_rxdma(sc);
979 0 : if (error)
980 : goto back;
981 :
982 0 : error = et_start_txdma(sc);
983 0 : if (error)
984 : goto back;
985 :
986 0 : et_enable_intrs(sc, ET_INTRS);
987 :
988 0 : timeout_add_sec(&sc->sc_tick, 1);
989 :
990 0 : CSR_WRITE_4(sc, ET_TIMER, sc->sc_timer);
991 :
992 0 : ifp->if_flags |= IFF_RUNNING;
993 0 : ifq_clr_oactive(&ifp->if_snd);
994 : back:
995 0 : if (error)
996 0 : et_stop(sc);
997 :
998 0 : splx(s);
999 :
1000 0 : return (0);
1001 : }
1002 :
1003 : int
1004 0 : et_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1005 : {
1006 0 : struct et_softc *sc = ifp->if_softc;
1007 0 : struct ifreq *ifr = (struct ifreq *)data;
1008 : int s, error = 0;
1009 :
1010 0 : s = splnet();
1011 :
1012 0 : switch (cmd) {
1013 : case SIOCSIFADDR:
1014 0 : ifp->if_flags |= IFF_UP;
1015 0 : if (!(ifp->if_flags & IFF_RUNNING))
1016 0 : et_init(ifp);
1017 : break;
1018 :
1019 : case SIOCSIFFLAGS:
1020 0 : if (ifp->if_flags & IFF_UP) {
1021 : /*
1022 : * If only the PROMISC or ALLMULTI flag changes, then
1023 : * don't do a full re-init of the chip, just update
1024 : * the Rx filter.
1025 : */
1026 0 : if ((ifp->if_flags & IFF_RUNNING) &&
1027 0 : ((ifp->if_flags ^ sc->sc_if_flags) &
1028 0 : (IFF_ALLMULTI | IFF_PROMISC)) != 0) {
1029 0 : et_setmulti(sc);
1030 0 : } else {
1031 0 : if (!(ifp->if_flags & IFF_RUNNING))
1032 0 : et_init(ifp);
1033 : }
1034 : } else {
1035 0 : if (ifp->if_flags & IFF_RUNNING)
1036 0 : et_stop(sc);
1037 : }
1038 0 : sc->sc_if_flags = ifp->if_flags;
1039 0 : break;
1040 :
1041 : case SIOCSIFMEDIA:
1042 : case SIOCGIFMEDIA:
1043 0 : error = ifmedia_ioctl(ifp, ifr, &sc->sc_miibus.mii_media, cmd);
1044 0 : break;
1045 :
1046 : default:
1047 0 : error = ether_ioctl(ifp, &sc->sc_arpcom, cmd, data);
1048 0 : }
1049 :
1050 0 : if (error == ENETRESET) {
1051 0 : if (ifp->if_flags & IFF_RUNNING)
1052 0 : et_setmulti(sc);
1053 : error = 0;
1054 0 : }
1055 :
1056 0 : splx(s);
1057 0 : return error;
1058 : }
1059 :
1060 : void
1061 0 : et_start(struct ifnet *ifp)
1062 : {
1063 0 : struct et_softc *sc = ifp->if_softc;
1064 0 : struct et_txbuf_data *tbd = &sc->sc_tx_data;
1065 : int trans;
1066 0 : struct mbuf *m;
1067 :
1068 0 : if (!(ifp->if_flags & IFF_RUNNING) || ifq_is_oactive(&ifp->if_snd))
1069 0 : return;
1070 :
1071 : trans = 0;
1072 0 : for (;;) {
1073 0 : IFQ_DEQUEUE(&ifp->if_snd, m);
1074 0 : if (m == NULL)
1075 : break;
1076 :
1077 0 : if ((tbd->tbd_used + ET_NSEG_SPARE) > ET_TX_NDESC) {
1078 0 : ifq_set_oactive(&ifp->if_snd);
1079 0 : break;
1080 : }
1081 :
1082 0 : if (et_encap(sc, &m)) {
1083 0 : ifp->if_oerrors++;
1084 0 : ifq_set_oactive(&ifp->if_snd);
1085 0 : break;
1086 : }
1087 :
1088 : trans = 1;
1089 :
1090 : #if NBPFILTER > 0
1091 0 : if (ifp->if_bpf != NULL)
1092 0 : bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_OUT);
1093 : #endif
1094 : }
1095 :
1096 0 : if (trans) {
1097 0 : timeout_add_sec(&sc->sc_txtick, 1);
1098 0 : ifp->if_timer = 5;
1099 0 : }
1100 0 : }
1101 :
1102 : void
1103 0 : et_watchdog(struct ifnet *ifp)
1104 : {
1105 0 : struct et_softc *sc = ifp->if_softc;
1106 0 : printf("%s: watchdog timed out\n", sc->sc_dev.dv_xname);
1107 :
1108 0 : et_init(ifp);
1109 0 : et_start(ifp);
1110 0 : }
1111 :
1112 : int
1113 0 : et_stop_rxdma(struct et_softc *sc)
1114 : {
1115 0 : CSR_WRITE_4(sc, ET_RXDMA_CTRL,
1116 : ET_RXDMA_CTRL_HALT | ET_RXDMA_CTRL_RING1_ENABLE);
1117 :
1118 0 : DELAY(5);
1119 0 : if ((CSR_READ_4(sc, ET_RXDMA_CTRL) & ET_RXDMA_CTRL_HALTED) == 0) {
1120 0 : printf("%s: can't stop RX DMA engine\n", sc->sc_dev.dv_xname);
1121 0 : return ETIMEDOUT;
1122 : }
1123 0 : return 0;
1124 0 : }
1125 :
1126 : int
1127 0 : et_stop_txdma(struct et_softc *sc)
1128 : {
1129 0 : CSR_WRITE_4(sc, ET_TXDMA_CTRL,
1130 : ET_TXDMA_CTRL_HALT | ET_TXDMA_CTRL_SINGLE_EPKT);
1131 0 : return 0;
1132 : }
1133 :
1134 : void
1135 0 : et_free_tx_ring(struct et_softc *sc)
1136 : {
1137 0 : struct et_txbuf_data *tbd = &sc->sc_tx_data;
1138 0 : struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring;
1139 : int i;
1140 :
1141 0 : for (i = 0; i < ET_TX_NDESC; ++i) {
1142 0 : struct et_txbuf *tb = &tbd->tbd_buf[i];
1143 :
1144 0 : if (tb->tb_mbuf != NULL) {
1145 0 : bus_dmamap_unload(sc->sc_dmat, tb->tb_dmap);
1146 0 : m_freem(tb->tb_mbuf);
1147 0 : tb->tb_mbuf = NULL;
1148 0 : }
1149 : }
1150 :
1151 0 : bzero(tx_ring->tr_desc, ET_TX_RING_SIZE);
1152 0 : bus_dmamap_sync(sc->sc_dmat, tx_ring->tr_dmap, 0,
1153 : tx_ring->tr_dmap->dm_mapsize, BUS_DMASYNC_PREWRITE);
1154 0 : }
1155 :
1156 : void
1157 0 : et_free_rx_ring(struct et_softc *sc)
1158 : {
1159 : int n;
1160 :
1161 0 : for (n = 0; n < ET_RX_NRING; ++n) {
1162 0 : struct et_rxbuf_data *rbd = &sc->sc_rx_data[n];
1163 0 : struct et_rxdesc_ring *rx_ring = &sc->sc_rx_ring[n];
1164 : int i;
1165 :
1166 0 : for (i = 0; i < ET_RX_NDESC; ++i) {
1167 0 : struct et_rxbuf *rb = &rbd->rbd_buf[i];
1168 :
1169 0 : if (rb->rb_mbuf != NULL) {
1170 0 : bus_dmamap_unload(sc->sc_dmat, rb->rb_dmap);
1171 0 : m_freem(rb->rb_mbuf);
1172 0 : rb->rb_mbuf = NULL;
1173 0 : }
1174 : }
1175 :
1176 0 : bzero(rx_ring->rr_desc, ET_RX_RING_SIZE);
1177 0 : bus_dmamap_sync(sc->sc_dmat, rx_ring->rr_dmap, 0,
1178 : rx_ring->rr_dmap->dm_mapsize, BUS_DMASYNC_PREWRITE);
1179 : }
1180 0 : }
1181 :
1182 : void
1183 0 : et_setmulti(struct et_softc *sc)
1184 : {
1185 0 : struct arpcom *ac = &sc->sc_arpcom;
1186 0 : struct ifnet *ifp = &ac->ac_if;
1187 0 : uint32_t hash[4] = { 0, 0, 0, 0 };
1188 : uint32_t rxmac_ctrl, pktfilt;
1189 : struct ether_multi *enm;
1190 : struct ether_multistep step;
1191 0 : uint8_t addr[ETHER_ADDR_LEN];
1192 : int i, count;
1193 :
1194 0 : pktfilt = CSR_READ_4(sc, ET_PKTFILT);
1195 0 : rxmac_ctrl = CSR_READ_4(sc, ET_RXMAC_CTRL);
1196 :
1197 0 : pktfilt &= ~(ET_PKTFILT_BCAST | ET_PKTFILT_MCAST | ET_PKTFILT_UCAST);
1198 0 : if (ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) {
1199 0 : rxmac_ctrl |= ET_RXMAC_CTRL_NO_PKTFILT;
1200 0 : goto back;
1201 : }
1202 :
1203 0 : bcopy(etherbroadcastaddr, addr, ETHER_ADDR_LEN);
1204 :
1205 : count = 0;
1206 0 : ETHER_FIRST_MULTI(step, ac, enm);
1207 0 : while (enm != NULL) {
1208 : uint32_t *hp, h;
1209 :
1210 0 : for (i = 0; i < ETHER_ADDR_LEN; i++) {
1211 0 : addr[i] &= enm->enm_addrlo[i];
1212 : }
1213 :
1214 0 : h = ether_crc32_be(LLADDR((struct sockaddr_dl *)addr),
1215 : ETHER_ADDR_LEN);
1216 0 : h = (h & 0x3f800000) >> 23;
1217 :
1218 0 : hp = &hash[0];
1219 0 : if (h >= 32 && h < 64) {
1220 0 : h -= 32;
1221 0 : hp = &hash[1];
1222 0 : } else if (h >= 64 && h < 96) {
1223 0 : h -= 64;
1224 0 : hp = &hash[2];
1225 0 : } else if (h >= 96) {
1226 0 : h -= 96;
1227 0 : hp = &hash[3];
1228 0 : }
1229 0 : *hp |= (1 << h);
1230 :
1231 0 : ++count;
1232 0 : ETHER_NEXT_MULTI(step, enm);
1233 : }
1234 :
1235 0 : for (i = 0; i < 4; ++i)
1236 0 : CSR_WRITE_4(sc, ET_MULTI_HASH + (i * 4), hash[i]);
1237 :
1238 0 : if (count > 0)
1239 0 : pktfilt |= ET_PKTFILT_MCAST;
1240 0 : rxmac_ctrl &= ~ET_RXMAC_CTRL_NO_PKTFILT;
1241 : back:
1242 0 : CSR_WRITE_4(sc, ET_PKTFILT, pktfilt);
1243 0 : CSR_WRITE_4(sc, ET_RXMAC_CTRL, rxmac_ctrl);
1244 0 : }
1245 :
1246 : int
1247 0 : et_chip_init(struct et_softc *sc)
1248 : {
1249 0 : struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1250 : uint32_t rxq_end;
1251 : int error;
1252 :
1253 : /*
1254 : * Split internal memory between TX and RX according to MTU
1255 : */
1256 0 : if (ifp->if_hardmtu < 2048)
1257 0 : rxq_end = 0x2bc;
1258 0 : else if (ifp->if_hardmtu < 8192)
1259 0 : rxq_end = 0x1ff;
1260 : else
1261 : rxq_end = 0x1b3;
1262 0 : CSR_WRITE_4(sc, ET_RXQ_START, 0);
1263 0 : CSR_WRITE_4(sc, ET_RXQ_END, rxq_end);
1264 0 : CSR_WRITE_4(sc, ET_TXQ_START, rxq_end + 1);
1265 0 : CSR_WRITE_4(sc, ET_TXQ_END, ET_INTERN_MEM_END);
1266 :
1267 : /* No loopback */
1268 0 : CSR_WRITE_4(sc, ET_LOOPBACK, 0);
1269 :
1270 : /* Clear MSI configure */
1271 0 : CSR_WRITE_4(sc, ET_MSI_CFG, 0);
1272 :
1273 : /* Disable timer */
1274 0 : CSR_WRITE_4(sc, ET_TIMER, 0);
1275 :
1276 : /* Initialize MAC */
1277 0 : et_init_mac(sc);
1278 :
1279 : /* Enable memory controllers */
1280 0 : CSR_WRITE_4(sc, ET_MMC_CTRL, ET_MMC_CTRL_ENABLE);
1281 :
1282 : /* Initialize RX MAC */
1283 0 : et_init_rxmac(sc);
1284 :
1285 : /* Initialize TX MAC */
1286 0 : et_init_txmac(sc);
1287 :
1288 : /* Initialize RX DMA engine */
1289 0 : error = et_init_rxdma(sc);
1290 0 : if (error)
1291 0 : return error;
1292 :
1293 : /* Initialize TX DMA engine */
1294 0 : error = et_init_txdma(sc);
1295 0 : if (error)
1296 0 : return error;
1297 :
1298 0 : return 0;
1299 0 : }
1300 :
1301 : int
1302 0 : et_init_tx_ring(struct et_softc *sc)
1303 : {
1304 0 : struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring;
1305 0 : struct et_txstatus_data *txsd = &sc->sc_tx_status;
1306 0 : struct et_txbuf_data *tbd = &sc->sc_tx_data;
1307 :
1308 0 : bzero(tx_ring->tr_desc, ET_TX_RING_SIZE);
1309 0 : bus_dmamap_sync(sc->sc_dmat, tx_ring->tr_dmap, 0,
1310 : tx_ring->tr_dmap->dm_mapsize, BUS_DMASYNC_PREWRITE);
1311 :
1312 0 : tbd->tbd_start_index = 0;
1313 0 : tbd->tbd_start_wrap = 0;
1314 0 : tbd->tbd_used = 0;
1315 :
1316 0 : bzero(txsd->txsd_status, sizeof(uint32_t));
1317 0 : bus_dmamap_sync(sc->sc_dmat, txsd->txsd_dmap, 0,
1318 : txsd->txsd_dmap->dm_mapsize, BUS_DMASYNC_PREWRITE);
1319 0 : return 0;
1320 : }
1321 :
1322 : int
1323 0 : et_init_rx_ring(struct et_softc *sc)
1324 : {
1325 0 : struct et_rxstatus_data *rxsd = &sc->sc_rx_status;
1326 0 : struct et_rxstat_ring *rxst_ring = &sc->sc_rxstat_ring;
1327 : int n;
1328 :
1329 0 : for (n = 0; n < ET_RX_NRING; ++n) {
1330 0 : struct et_rxbuf_data *rbd = &sc->sc_rx_data[n];
1331 : int i, error;
1332 :
1333 0 : for (i = 0; i < ET_RX_NDESC; ++i) {
1334 0 : error = rbd->rbd_newbuf(rbd, i, 1);
1335 0 : if (error) {
1336 0 : printf("%s: %d ring %d buf, newbuf failed: "
1337 0 : "%d\n", sc->sc_dev.dv_xname, n, i, error);
1338 0 : return error;
1339 : }
1340 : }
1341 0 : }
1342 :
1343 0 : bzero(rxsd->rxsd_status, sizeof(struct et_rxstatus));
1344 0 : bus_dmamap_sync(sc->sc_dmat, rxsd->rxsd_dmap, 0,
1345 : rxsd->rxsd_dmap->dm_mapsize, BUS_DMASYNC_PREWRITE);
1346 :
1347 0 : bzero(rxst_ring->rsr_stat, ET_RXSTAT_RING_SIZE);
1348 0 : bus_dmamap_sync(sc->sc_dmat, rxst_ring->rsr_dmap, 0,
1349 : rxst_ring->rsr_dmap->dm_mapsize, BUS_DMASYNC_PREWRITE);
1350 :
1351 0 : return 0;
1352 0 : }
1353 :
1354 : int
1355 0 : et_init_rxdma(struct et_softc *sc)
1356 : {
1357 0 : struct et_rxstatus_data *rxsd = &sc->sc_rx_status;
1358 0 : struct et_rxstat_ring *rxst_ring = &sc->sc_rxstat_ring;
1359 : struct et_rxdesc_ring *rx_ring;
1360 : int error;
1361 :
1362 0 : error = et_stop_rxdma(sc);
1363 0 : if (error) {
1364 0 : printf("%s: can't init RX DMA engine\n", sc->sc_dev.dv_xname);
1365 0 : return error;
1366 : }
1367 :
1368 : /*
1369 : * Install RX status
1370 : */
1371 0 : CSR_WRITE_4(sc, ET_RX_STATUS_HI, ET_ADDR_HI(rxsd->rxsd_paddr));
1372 0 : CSR_WRITE_4(sc, ET_RX_STATUS_LO, ET_ADDR_LO(rxsd->rxsd_paddr));
1373 :
1374 : /*
1375 : * Install RX stat ring
1376 : */
1377 0 : CSR_WRITE_4(sc, ET_RXSTAT_HI, ET_ADDR_HI(rxst_ring->rsr_paddr));
1378 0 : CSR_WRITE_4(sc, ET_RXSTAT_LO, ET_ADDR_LO(rxst_ring->rsr_paddr));
1379 0 : CSR_WRITE_4(sc, ET_RXSTAT_CNT, ET_RX_NSTAT - 1);
1380 0 : CSR_WRITE_4(sc, ET_RXSTAT_POS, 0);
1381 0 : CSR_WRITE_4(sc, ET_RXSTAT_MINCNT, ((ET_RX_NSTAT * 15) / 100) - 1);
1382 :
1383 : /* Match ET_RXSTAT_POS */
1384 0 : rxst_ring->rsr_index = 0;
1385 0 : rxst_ring->rsr_wrap = 0;
1386 :
1387 : /*
1388 : * Install the 2nd RX descriptor ring
1389 : */
1390 0 : rx_ring = &sc->sc_rx_ring[1];
1391 0 : CSR_WRITE_4(sc, ET_RX_RING1_HI, ET_ADDR_HI(rx_ring->rr_paddr));
1392 0 : CSR_WRITE_4(sc, ET_RX_RING1_LO, ET_ADDR_LO(rx_ring->rr_paddr));
1393 0 : CSR_WRITE_4(sc, ET_RX_RING1_CNT, ET_RX_NDESC - 1);
1394 0 : CSR_WRITE_4(sc, ET_RX_RING1_POS, ET_RX_RING1_POS_WRAP);
1395 0 : CSR_WRITE_4(sc, ET_RX_RING1_MINCNT, ((ET_RX_NDESC * 15) / 100) - 1);
1396 :
1397 : /* Match ET_RX_RING1_POS */
1398 0 : rx_ring->rr_index = 0;
1399 0 : rx_ring->rr_wrap = 1;
1400 :
1401 : /*
1402 : * Install the 1st RX descriptor ring
1403 : */
1404 0 : rx_ring = &sc->sc_rx_ring[0];
1405 0 : CSR_WRITE_4(sc, ET_RX_RING0_HI, ET_ADDR_HI(rx_ring->rr_paddr));
1406 0 : CSR_WRITE_4(sc, ET_RX_RING0_LO, ET_ADDR_LO(rx_ring->rr_paddr));
1407 0 : CSR_WRITE_4(sc, ET_RX_RING0_CNT, ET_RX_NDESC - 1);
1408 0 : CSR_WRITE_4(sc, ET_RX_RING0_POS, ET_RX_RING0_POS_WRAP);
1409 0 : CSR_WRITE_4(sc, ET_RX_RING0_MINCNT, ((ET_RX_NDESC * 15) / 100) - 1);
1410 :
1411 : /* Match ET_RX_RING0_POS */
1412 0 : rx_ring->rr_index = 0;
1413 0 : rx_ring->rr_wrap = 1;
1414 :
1415 : /*
1416 : * RX intr moderation
1417 : */
1418 0 : CSR_WRITE_4(sc, ET_RX_INTR_NPKTS, sc->sc_rx_intr_npkts);
1419 0 : CSR_WRITE_4(sc, ET_RX_INTR_DELAY, sc->sc_rx_intr_delay);
1420 :
1421 0 : return 0;
1422 0 : }
1423 :
1424 : int
1425 0 : et_init_txdma(struct et_softc *sc)
1426 : {
1427 0 : struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring;
1428 0 : struct et_txstatus_data *txsd = &sc->sc_tx_status;
1429 : int error;
1430 :
1431 0 : error = et_stop_txdma(sc);
1432 0 : if (error) {
1433 0 : printf("%s: can't init TX DMA engine\n", sc->sc_dev.dv_xname);
1434 0 : return error;
1435 : }
1436 :
1437 : /*
1438 : * Install TX descriptor ring
1439 : */
1440 0 : CSR_WRITE_4(sc, ET_TX_RING_HI, ET_ADDR_HI(tx_ring->tr_paddr));
1441 0 : CSR_WRITE_4(sc, ET_TX_RING_LO, ET_ADDR_LO(tx_ring->tr_paddr));
1442 0 : CSR_WRITE_4(sc, ET_TX_RING_CNT, ET_TX_NDESC - 1);
1443 :
1444 : /*
1445 : * Install TX status
1446 : */
1447 0 : CSR_WRITE_4(sc, ET_TX_STATUS_HI, ET_ADDR_HI(txsd->txsd_paddr));
1448 0 : CSR_WRITE_4(sc, ET_TX_STATUS_LO, ET_ADDR_LO(txsd->txsd_paddr));
1449 :
1450 0 : CSR_WRITE_4(sc, ET_TX_READY_POS, 0);
1451 :
1452 : /* Match ET_TX_READY_POS */
1453 0 : tx_ring->tr_ready_index = 0;
1454 0 : tx_ring->tr_ready_wrap = 0;
1455 :
1456 0 : return 0;
1457 0 : }
1458 :
1459 : void
1460 0 : et_init_mac(struct et_softc *sc)
1461 : {
1462 0 : struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1463 0 : const uint8_t *eaddr = LLADDR(ifp->if_sadl);
1464 : uint32_t val;
1465 :
1466 : /* Reset MAC */
1467 0 : CSR_WRITE_4(sc, ET_MAC_CFG1,
1468 : ET_MAC_CFG1_RST_TXFUNC | ET_MAC_CFG1_RST_RXFUNC |
1469 : ET_MAC_CFG1_RST_TXMC | ET_MAC_CFG1_RST_RXMC |
1470 : ET_MAC_CFG1_SIM_RST | ET_MAC_CFG1_SOFT_RST);
1471 :
1472 : /*
1473 : * Setup inter packet gap
1474 : */
1475 : val = __SHIFTIN(56, ET_IPG_NONB2B_1) |
1476 : __SHIFTIN(88, ET_IPG_NONB2B_2) |
1477 : __SHIFTIN(80, ET_IPG_MINIFG) |
1478 : __SHIFTIN(96, ET_IPG_B2B);
1479 0 : CSR_WRITE_4(sc, ET_IPG, val);
1480 :
1481 : /*
1482 : * Setup half duplex mode
1483 : */
1484 : val = __SHIFTIN(10, ET_MAC_HDX_ALT_BEB_TRUNC) |
1485 : __SHIFTIN(15, ET_MAC_HDX_REXMIT_MAX) |
1486 : __SHIFTIN(55, ET_MAC_HDX_COLLWIN) |
1487 : ET_MAC_HDX_EXC_DEFER;
1488 0 : CSR_WRITE_4(sc, ET_MAC_HDX, val);
1489 :
1490 : /* Clear MAC control */
1491 0 : CSR_WRITE_4(sc, ET_MAC_CTRL, 0);
1492 :
1493 : /* Reset MII */
1494 0 : CSR_WRITE_4(sc, ET_MII_CFG, ET_MII_CFG_CLKRST);
1495 :
1496 : /*
1497 : * Set MAC address
1498 : */
1499 0 : val = eaddr[2] | (eaddr[3] << 8) | (eaddr[4] << 16) | (eaddr[5] << 24);
1500 0 : CSR_WRITE_4(sc, ET_MAC_ADDR1, val);
1501 0 : val = (eaddr[0] << 16) | (eaddr[1] << 24);
1502 0 : CSR_WRITE_4(sc, ET_MAC_ADDR2, val);
1503 :
1504 : /* Set max frame length */
1505 0 : CSR_WRITE_4(sc, ET_MAX_FRMLEN, ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN);
1506 :
1507 : /* Bring MAC out of reset state */
1508 0 : CSR_WRITE_4(sc, ET_MAC_CFG1, 0);
1509 0 : }
1510 :
1511 : void
1512 0 : et_init_rxmac(struct et_softc *sc)
1513 : {
1514 0 : struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1515 0 : const uint8_t *eaddr = LLADDR(ifp->if_sadl);
1516 : uint32_t val;
1517 : int i;
1518 :
1519 : /* Disable RX MAC and WOL */
1520 0 : CSR_WRITE_4(sc, ET_RXMAC_CTRL, ET_RXMAC_CTRL_WOL_DISABLE);
1521 :
1522 : /*
1523 : * Clear all WOL related registers
1524 : */
1525 0 : for (i = 0; i < 3; ++i)
1526 0 : CSR_WRITE_4(sc, ET_WOL_CRC + (i * 4), 0);
1527 0 : for (i = 0; i < 20; ++i)
1528 0 : CSR_WRITE_4(sc, ET_WOL_MASK + (i * 4), 0);
1529 :
1530 : /*
1531 : * Set WOL source address. XXX is this necessary?
1532 : */
1533 0 : val = (eaddr[2] << 24) | (eaddr[3] << 16) | (eaddr[4] << 8) | eaddr[5];
1534 0 : CSR_WRITE_4(sc, ET_WOL_SA_LO, val);
1535 0 : val = (eaddr[0] << 8) | eaddr[1];
1536 0 : CSR_WRITE_4(sc, ET_WOL_SA_HI, val);
1537 :
1538 : /* Clear packet filters */
1539 0 : CSR_WRITE_4(sc, ET_PKTFILT, 0);
1540 :
1541 : /* No ucast filtering */
1542 0 : CSR_WRITE_4(sc, ET_UCAST_FILTADDR1, 0);
1543 0 : CSR_WRITE_4(sc, ET_UCAST_FILTADDR2, 0);
1544 0 : CSR_WRITE_4(sc, ET_UCAST_FILTADDR3, 0);
1545 :
1546 0 : if (ifp->if_hardmtu > 8192) {
1547 : /*
1548 : * In order to transmit jumbo packets greater than 8k,
1549 : * the FIFO between RX MAC and RX DMA needs to be reduced
1550 : * in size to (16k - MTU). In order to implement this, we
1551 : * must use "cut through" mode in the RX MAC, which chops
1552 : * packets down into segments which are (max_size * 16).
1553 : * In this case we selected 256 bytes, since this is the
1554 : * size of the PCI-Express TLP's that the 1310 uses.
1555 : */
1556 : val = __SHIFTIN(16, ET_RXMAC_MC_SEGSZ_MAX) |
1557 : ET_RXMAC_MC_SEGSZ_ENABLE;
1558 0 : } else {
1559 : val = 0;
1560 : }
1561 0 : CSR_WRITE_4(sc, ET_RXMAC_MC_SEGSZ, val);
1562 :
1563 0 : CSR_WRITE_4(sc, ET_RXMAC_MC_WATERMARK, 0);
1564 :
1565 : /* Initialize RX MAC management register */
1566 0 : CSR_WRITE_4(sc, ET_RXMAC_MGT, 0);
1567 :
1568 0 : CSR_WRITE_4(sc, ET_RXMAC_SPACE_AVL, 0);
1569 :
1570 0 : CSR_WRITE_4(sc, ET_RXMAC_MGT,
1571 : ET_RXMAC_MGT_PASS_ECRC |
1572 : ET_RXMAC_MGT_PASS_ELEN |
1573 : ET_RXMAC_MGT_PASS_ETRUNC |
1574 : ET_RXMAC_MGT_CHECK_PKT);
1575 :
1576 : /*
1577 : * Configure runt filtering (may not work on certain chip generation)
1578 : */
1579 : val = __SHIFTIN(ETHER_MIN_LEN, ET_PKTFILT_MINLEN) | ET_PKTFILT_FRAG;
1580 0 : CSR_WRITE_4(sc, ET_PKTFILT, val);
1581 :
1582 : /* Enable RX MAC but leave WOL disabled */
1583 0 : CSR_WRITE_4(sc, ET_RXMAC_CTRL,
1584 : ET_RXMAC_CTRL_WOL_DISABLE | ET_RXMAC_CTRL_ENABLE);
1585 :
1586 : /*
1587 : * Setup multicast hash and allmulti/promisc mode
1588 : */
1589 0 : et_setmulti(sc);
1590 0 : }
1591 :
1592 : void
1593 0 : et_init_txmac(struct et_softc *sc)
1594 : {
1595 : /* Disable TX MAC and FC(?) */
1596 0 : CSR_WRITE_4(sc, ET_TXMAC_CTRL, ET_TXMAC_CTRL_FC_DISABLE);
1597 :
1598 : /* No flow control yet */
1599 0 : CSR_WRITE_4(sc, ET_TXMAC_FLOWCTRL, 0);
1600 :
1601 : /* Enable TX MAC but leave FC(?) diabled */
1602 0 : CSR_WRITE_4(sc, ET_TXMAC_CTRL,
1603 : ET_TXMAC_CTRL_ENABLE | ET_TXMAC_CTRL_FC_DISABLE);
1604 0 : }
1605 :
1606 : int
1607 0 : et_start_rxdma(struct et_softc *sc)
1608 : {
1609 : uint32_t val = 0;
1610 :
1611 0 : val |= __SHIFTIN(sc->sc_rx_data[0].rbd_bufsize,
1612 0 : ET_RXDMA_CTRL_RING0_SIZE) |
1613 : ET_RXDMA_CTRL_RING0_ENABLE;
1614 0 : val |= __SHIFTIN(sc->sc_rx_data[1].rbd_bufsize,
1615 0 : ET_RXDMA_CTRL_RING1_SIZE) |
1616 : ET_RXDMA_CTRL_RING1_ENABLE;
1617 :
1618 0 : CSR_WRITE_4(sc, ET_RXDMA_CTRL, val);
1619 :
1620 0 : DELAY(5);
1621 :
1622 0 : if (CSR_READ_4(sc, ET_RXDMA_CTRL) & ET_RXDMA_CTRL_HALTED) {
1623 0 : printf("%s: can't start RX DMA engine\n", sc->sc_dev.dv_xname);
1624 0 : return ETIMEDOUT;
1625 : }
1626 0 : return 0;
1627 0 : }
1628 :
1629 : int
1630 0 : et_start_txdma(struct et_softc *sc)
1631 : {
1632 0 : CSR_WRITE_4(sc, ET_TXDMA_CTRL, ET_TXDMA_CTRL_SINGLE_EPKT);
1633 0 : return 0;
1634 : }
1635 :
1636 : int
1637 0 : et_enable_txrx(struct et_softc *sc)
1638 : {
1639 0 : struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1640 : uint32_t val;
1641 : int i;
1642 :
1643 0 : val = CSR_READ_4(sc, ET_MAC_CFG1);
1644 0 : val |= ET_MAC_CFG1_TXEN | ET_MAC_CFG1_RXEN;
1645 0 : val &= ~(ET_MAC_CFG1_TXFLOW | ET_MAC_CFG1_RXFLOW |
1646 : ET_MAC_CFG1_LOOPBACK);
1647 0 : CSR_WRITE_4(sc, ET_MAC_CFG1, val);
1648 :
1649 0 : et_ifmedia_upd(ifp);
1650 :
1651 : #define NRETRY 100
1652 :
1653 0 : for (i = 0; i < NRETRY; ++i) {
1654 0 : val = CSR_READ_4(sc, ET_MAC_CFG1);
1655 0 : if ((val & (ET_MAC_CFG1_SYNC_TXEN | ET_MAC_CFG1_SYNC_RXEN)) ==
1656 : (ET_MAC_CFG1_SYNC_TXEN | ET_MAC_CFG1_SYNC_RXEN))
1657 : break;
1658 :
1659 0 : DELAY(10);
1660 : }
1661 0 : if (i == NRETRY) {
1662 0 : printf("%s: can't enable RX/TX\n", sc->sc_dev.dv_xname);
1663 0 : return ETIMEDOUT;
1664 : }
1665 :
1666 : #undef NRETRY
1667 0 : return 0;
1668 0 : }
1669 :
1670 : void
1671 0 : et_rxeof(struct et_softc *sc)
1672 : {
1673 0 : struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1674 0 : struct mbuf_list ml = MBUF_LIST_INITIALIZER();
1675 0 : struct et_rxstatus_data *rxsd = &sc->sc_rx_status;
1676 0 : struct et_rxstat_ring *rxst_ring = &sc->sc_rxstat_ring;
1677 : uint32_t rxs_stat_ring;
1678 : int rxst_wrap, rxst_index;
1679 :
1680 0 : bus_dmamap_sync(sc->sc_dmat, rxsd->rxsd_dmap, 0,
1681 : rxsd->rxsd_dmap->dm_mapsize, BUS_DMASYNC_POSTREAD);
1682 0 : bus_dmamap_sync(sc->sc_dmat, rxst_ring->rsr_dmap, 0,
1683 : rxst_ring->rsr_dmap->dm_mapsize, BUS_DMASYNC_POSTREAD);
1684 :
1685 0 : rxs_stat_ring = rxsd->rxsd_status->rxs_stat_ring;
1686 0 : rxst_wrap = (rxs_stat_ring & ET_RXS_STATRING_WRAP) ? 1 : 0;
1687 0 : rxst_index = __SHIFTOUT(rxs_stat_ring, ET_RXS_STATRING_INDEX);
1688 :
1689 0 : while (rxst_index != rxst_ring->rsr_index ||
1690 0 : rxst_wrap != rxst_ring->rsr_wrap) {
1691 : struct et_rxbuf_data *rbd;
1692 : struct et_rxdesc_ring *rx_ring;
1693 : struct et_rxstat *st;
1694 : struct et_rxbuf *rb;
1695 : struct mbuf *m;
1696 : int buflen, buf_idx, ring_idx;
1697 : uint32_t rxstat_pos, rxring_pos;
1698 :
1699 0 : KKASSERT(rxst_ring->rsr_index < ET_RX_NSTAT);
1700 0 : st = &rxst_ring->rsr_stat[rxst_ring->rsr_index];
1701 :
1702 0 : buflen = __SHIFTOUT(st->rxst_info2, ET_RXST_INFO2_LEN);
1703 0 : buf_idx = __SHIFTOUT(st->rxst_info2, ET_RXST_INFO2_BUFIDX);
1704 0 : ring_idx = __SHIFTOUT(st->rxst_info2, ET_RXST_INFO2_RINGIDX);
1705 :
1706 0 : if (++rxst_ring->rsr_index == ET_RX_NSTAT) {
1707 0 : rxst_ring->rsr_index = 0;
1708 0 : rxst_ring->rsr_wrap ^= 1;
1709 0 : }
1710 0 : rxstat_pos = __SHIFTIN(rxst_ring->rsr_index,
1711 : ET_RXSTAT_POS_INDEX);
1712 0 : if (rxst_ring->rsr_wrap)
1713 0 : rxstat_pos |= ET_RXSTAT_POS_WRAP;
1714 0 : CSR_WRITE_4(sc, ET_RXSTAT_POS, rxstat_pos);
1715 :
1716 0 : if (ring_idx >= ET_RX_NRING) {
1717 0 : ifp->if_ierrors++;
1718 0 : printf("%s: invalid ring index %d\n",
1719 0 : sc->sc_dev.dv_xname, ring_idx);
1720 0 : continue;
1721 : }
1722 0 : if (buf_idx >= ET_RX_NDESC) {
1723 0 : ifp->if_ierrors++;
1724 0 : printf("%s: invalid buf index %d\n",
1725 0 : sc->sc_dev.dv_xname, buf_idx);
1726 0 : continue;
1727 : }
1728 :
1729 0 : rbd = &sc->sc_rx_data[ring_idx];
1730 0 : rb = &rbd->rbd_buf[buf_idx];
1731 0 : m = rb->rb_mbuf;
1732 0 : bus_dmamap_sync(sc->sc_dmat, rb->rb_dmap, 0,
1733 : rb->rb_dmap->dm_mapsize, BUS_DMASYNC_POSTREAD);
1734 :
1735 0 : if (rbd->rbd_newbuf(rbd, buf_idx, 0) == 0) {
1736 0 : if (buflen < ETHER_CRC_LEN) {
1737 0 : m_freem(m);
1738 0 : ifp->if_ierrors++;
1739 0 : } else {
1740 0 : m->m_pkthdr.len = m->m_len = buflen -
1741 : ETHER_CRC_LEN;
1742 0 : ml_enqueue(&ml, m);
1743 : }
1744 : } else {
1745 0 : ifp->if_ierrors++;
1746 : }
1747 :
1748 0 : rx_ring = &sc->sc_rx_ring[ring_idx];
1749 :
1750 0 : if (buf_idx != rx_ring->rr_index) {
1751 0 : printf("%s: WARNING!! ring %d, "
1752 0 : "buf_idx %d, rr_idx %d\n", sc->sc_dev.dv_xname,
1753 : ring_idx, buf_idx, rx_ring->rr_index);
1754 0 : }
1755 :
1756 0 : KKASSERT(rx_ring->rr_index < ET_RX_NDESC);
1757 0 : if (++rx_ring->rr_index == ET_RX_NDESC) {
1758 0 : rx_ring->rr_index = 0;
1759 0 : rx_ring->rr_wrap ^= 1;
1760 0 : }
1761 0 : rxring_pos = __SHIFTIN(rx_ring->rr_index, ET_RX_RING_POS_INDEX);
1762 0 : if (rx_ring->rr_wrap)
1763 0 : rxring_pos |= ET_RX_RING_POS_WRAP;
1764 0 : CSR_WRITE_4(sc, rx_ring->rr_posreg, rxring_pos);
1765 0 : }
1766 :
1767 0 : if_input(ifp, &ml);
1768 0 : }
1769 :
1770 : int
1771 0 : et_encap(struct et_softc *sc, struct mbuf **m0)
1772 : {
1773 0 : struct mbuf *m = *m0;
1774 0 : struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring;
1775 0 : struct et_txbuf_data *tbd = &sc->sc_tx_data;
1776 : struct et_txdesc *td;
1777 : bus_dmamap_t map;
1778 : int error, maxsegs, first_idx, last_idx, i;
1779 : uint32_t tx_ready_pos, last_td_ctrl2;
1780 :
1781 0 : maxsegs = ET_TX_NDESC - tbd->tbd_used;
1782 0 : if (maxsegs > ET_NSEG_MAX)
1783 : maxsegs = ET_NSEG_MAX;
1784 0 : KASSERT(maxsegs >= ET_NSEG_SPARE,
1785 : ("not enough spare TX desc (%d)\n", maxsegs));
1786 :
1787 0 : KKASSERT(tx_ring->tr_ready_index < ET_TX_NDESC);
1788 : first_idx = tx_ring->tr_ready_index;
1789 0 : map = tbd->tbd_buf[first_idx].tb_dmap;
1790 :
1791 0 : error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m,
1792 : BUS_DMA_NOWAIT);
1793 0 : if (!error && map->dm_nsegs == 0) {
1794 0 : bus_dmamap_unload(sc->sc_dmat, map);
1795 : error = EFBIG;
1796 0 : }
1797 0 : if (error && error != EFBIG) {
1798 0 : printf("%s: can't load TX mbuf", sc->sc_dev.dv_xname);
1799 0 : goto back;
1800 : }
1801 0 : if (error) { /* error == EFBIG */
1802 0 : if (m_defrag(m, M_DONTWAIT)) {
1803 0 : printf("%s: can't defrag TX mbuf\n",
1804 0 : sc->sc_dev.dv_xname);
1805 : error = ENOBUFS;
1806 0 : goto back;
1807 : }
1808 0 : error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m,
1809 : BUS_DMA_NOWAIT);
1810 0 : if (error || map->dm_nsegs == 0) {
1811 0 : if (map->dm_nsegs == 0) {
1812 0 : bus_dmamap_unload(sc->sc_dmat, map);
1813 : error = EFBIG;
1814 0 : }
1815 0 : printf("%s: can't load defraged TX mbuf\n",
1816 0 : sc->sc_dev.dv_xname);
1817 0 : goto back;
1818 : }
1819 : }
1820 :
1821 0 : bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1822 : BUS_DMASYNC_PREWRITE);
1823 :
1824 : last_td_ctrl2 = ET_TDCTRL2_LAST_FRAG;
1825 0 : sc->sc_tx += map->dm_nsegs;
1826 0 : if (sc->sc_tx / sc->sc_tx_intr_nsegs != sc->sc_tx_intr) {
1827 0 : sc->sc_tx_intr = sc->sc_tx / sc->sc_tx_intr_nsegs;
1828 : last_td_ctrl2 |= ET_TDCTRL2_INTR;
1829 0 : }
1830 :
1831 : last_idx = -1;
1832 0 : for (i = 0; i < map->dm_nsegs; ++i) {
1833 : int idx;
1834 :
1835 0 : idx = (first_idx + i) % ET_TX_NDESC;
1836 0 : td = &tx_ring->tr_desc[idx];
1837 0 : td->td_addr_hi = ET_ADDR_HI(map->dm_segs[i].ds_addr);
1838 0 : td->td_addr_lo = ET_ADDR_LO(map->dm_segs[i].ds_addr);
1839 0 : td->td_ctrl1 =
1840 0 : __SHIFTIN(map->dm_segs[i].ds_len, ET_TDCTRL1_LEN);
1841 :
1842 0 : if (i == map->dm_nsegs - 1) { /* Last frag */
1843 0 : td->td_ctrl2 = last_td_ctrl2;
1844 : last_idx = idx;
1845 0 : }
1846 :
1847 0 : KKASSERT(tx_ring->tr_ready_index < ET_TX_NDESC);
1848 0 : if (++tx_ring->tr_ready_index == ET_TX_NDESC) {
1849 0 : tx_ring->tr_ready_index = 0;
1850 0 : tx_ring->tr_ready_wrap ^= 1;
1851 0 : }
1852 : }
1853 0 : td = &tx_ring->tr_desc[first_idx];
1854 0 : td->td_ctrl2 |= ET_TDCTRL2_FIRST_FRAG; /* First frag */
1855 :
1856 0 : KKASSERT(last_idx >= 0);
1857 0 : tbd->tbd_buf[first_idx].tb_dmap = tbd->tbd_buf[last_idx].tb_dmap;
1858 0 : tbd->tbd_buf[last_idx].tb_dmap = map;
1859 0 : tbd->tbd_buf[last_idx].tb_mbuf = m;
1860 :
1861 0 : tbd->tbd_used += map->dm_nsegs;
1862 0 : KKASSERT(tbd->tbd_used <= ET_TX_NDESC);
1863 :
1864 0 : bus_dmamap_sync(sc->sc_dmat, tx_ring->tr_dmap, 0,
1865 : tx_ring->tr_dmap->dm_mapsize, BUS_DMASYNC_PREWRITE);
1866 :
1867 :
1868 0 : tx_ready_pos = __SHIFTIN(tx_ring->tr_ready_index,
1869 : ET_TX_READY_POS_INDEX);
1870 0 : if (tx_ring->tr_ready_wrap)
1871 0 : tx_ready_pos |= ET_TX_READY_POS_WRAP;
1872 0 : CSR_WRITE_4(sc, ET_TX_READY_POS, tx_ready_pos);
1873 :
1874 0 : error = 0;
1875 : back:
1876 0 : if (error) {
1877 0 : m_freem(m);
1878 0 : *m0 = NULL;
1879 0 : }
1880 0 : return error;
1881 : }
1882 :
1883 : void
1884 0 : et_txeof(struct et_softc *sc)
1885 : {
1886 0 : struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1887 0 : struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring;
1888 0 : struct et_txbuf_data *tbd = &sc->sc_tx_data;
1889 : uint32_t tx_done;
1890 : int end, wrap;
1891 :
1892 0 : if (tbd->tbd_used == 0)
1893 0 : return;
1894 :
1895 0 : tx_done = CSR_READ_4(sc, ET_TX_DONE_POS);
1896 0 : end = __SHIFTOUT(tx_done, ET_TX_DONE_POS_INDEX);
1897 0 : wrap = (tx_done & ET_TX_DONE_POS_WRAP) ? 1 : 0;
1898 :
1899 0 : while (tbd->tbd_start_index != end || tbd->tbd_start_wrap != wrap) {
1900 : struct et_txbuf *tb;
1901 :
1902 0 : KKASSERT(tbd->tbd_start_index < ET_TX_NDESC);
1903 0 : tb = &tbd->tbd_buf[tbd->tbd_start_index];
1904 :
1905 0 : bzero(&tx_ring->tr_desc[tbd->tbd_start_index],
1906 : sizeof(struct et_txdesc));
1907 0 : bus_dmamap_sync(sc->sc_dmat, tx_ring->tr_dmap, 0,
1908 : tx_ring->tr_dmap->dm_mapsize, BUS_DMASYNC_PREWRITE);
1909 :
1910 0 : if (tb->tb_mbuf != NULL) {
1911 0 : bus_dmamap_unload(sc->sc_dmat, tb->tb_dmap);
1912 0 : m_freem(tb->tb_mbuf);
1913 0 : tb->tb_mbuf = NULL;
1914 0 : }
1915 :
1916 0 : if (++tbd->tbd_start_index == ET_TX_NDESC) {
1917 0 : tbd->tbd_start_index = 0;
1918 0 : tbd->tbd_start_wrap ^= 1;
1919 0 : }
1920 :
1921 0 : KKASSERT(tbd->tbd_used > 0);
1922 0 : tbd->tbd_used--;
1923 : }
1924 :
1925 0 : if (tbd->tbd_used == 0) {
1926 0 : timeout_del(&sc->sc_txtick);
1927 0 : ifp->if_timer = 0;
1928 0 : }
1929 0 : if (tbd->tbd_used + ET_NSEG_SPARE <= ET_TX_NDESC)
1930 0 : ifq_clr_oactive(&ifp->if_snd);
1931 :
1932 0 : et_start(ifp);
1933 0 : }
1934 :
1935 : void
1936 0 : et_txtick(void *xsc)
1937 : {
1938 0 : struct et_softc *sc = xsc;
1939 : int s;
1940 :
1941 0 : s = splnet();
1942 0 : et_txeof(sc);
1943 0 : splx(s);
1944 0 : }
1945 :
1946 : void
1947 0 : et_tick(void *xsc)
1948 : {
1949 0 : struct et_softc *sc = xsc;
1950 : int s;
1951 :
1952 0 : s = splnet();
1953 0 : mii_tick(&sc->sc_miibus);
1954 0 : timeout_add_sec(&sc->sc_tick, 1);
1955 0 : splx(s);
1956 0 : }
1957 :
1958 : int
1959 0 : et_newbuf_cluster(struct et_rxbuf_data *rbd, int buf_idx, int init)
1960 : {
1961 0 : return et_newbuf(rbd, buf_idx, init, MCLBYTES);
1962 : }
1963 :
1964 : int
1965 0 : et_newbuf_hdr(struct et_rxbuf_data *rbd, int buf_idx, int init)
1966 : {
1967 0 : return et_newbuf(rbd, buf_idx, init, MHLEN);
1968 : }
1969 :
1970 : int
1971 0 : et_newbuf(struct et_rxbuf_data *rbd, int buf_idx, int init, int len0)
1972 : {
1973 0 : struct et_softc *sc = rbd->rbd_softc;
1974 : struct et_rxdesc_ring *rx_ring;
1975 : struct et_rxdesc *desc;
1976 : struct et_rxbuf *rb;
1977 : struct mbuf *m;
1978 : bus_dmamap_t dmap;
1979 : int error, len;
1980 :
1981 0 : KKASSERT(buf_idx < ET_RX_NDESC);
1982 0 : rb = &rbd->rbd_buf[buf_idx];
1983 :
1984 0 : if (len0 >= MINCLSIZE) {
1985 : MGETHDR(m, init ? M_WAITOK : M_DONTWAIT, MT_DATA);
1986 0 : if (m == NULL)
1987 0 : return (ENOBUFS);
1988 0 : MCLGET(m, init ? M_WAITOK : M_DONTWAIT);
1989 0 : if ((m->m_flags & M_EXT) == 0) {
1990 0 : m_freem(m);
1991 0 : return (ENOBUFS);
1992 : }
1993 : len = MCLBYTES;
1994 0 : } else {
1995 : MGETHDR(m, init ? M_WAITOK : M_DONTWAIT, MT_DATA);
1996 : len = MHLEN;
1997 : }
1998 :
1999 0 : if (m == NULL) {
2000 : error = ENOBUFS;
2001 :
2002 : /* XXX for debug */
2003 0 : printf("%s: M_CLGET failed, size %d\n", sc->sc_dev.dv_xname,
2004 : len0);
2005 0 : if (init) {
2006 0 : return error;
2007 : } else {
2008 : goto back;
2009 : }
2010 : }
2011 0 : m->m_len = m->m_pkthdr.len = len;
2012 :
2013 : /*
2014 : * Try load RX mbuf into temporary DMA tag
2015 : */
2016 0 : error = bus_dmamap_load_mbuf(sc->sc_dmat, sc->sc_mbuf_tmp_dmap, m,
2017 : init ? BUS_DMA_WAITOK : BUS_DMA_NOWAIT);
2018 0 : if (error) {
2019 0 : if (!error) {
2020 0 : bus_dmamap_unload(sc->sc_dmat, sc->sc_mbuf_tmp_dmap);
2021 : error = EFBIG;
2022 0 : printf("%s: too many segments?!\n",
2023 0 : sc->sc_dev.dv_xname);
2024 0 : }
2025 0 : m_freem(m);
2026 :
2027 : /* XXX for debug */
2028 0 : printf("%s: can't load RX mbuf\n", sc->sc_dev.dv_xname);
2029 0 : if (init) {
2030 0 : return error;
2031 : } else {
2032 : goto back;
2033 : }
2034 : }
2035 :
2036 0 : if (!init)
2037 0 : bus_dmamap_unload(sc->sc_dmat, rb->rb_dmap);
2038 0 : rb->rb_mbuf = m;
2039 :
2040 : /*
2041 : * Swap RX buf's DMA map with the loaded temporary one
2042 : */
2043 0 : dmap = rb->rb_dmap;
2044 0 : rb->rb_dmap = sc->sc_mbuf_tmp_dmap;
2045 0 : rb->rb_paddr = rb->rb_dmap->dm_segs[0].ds_addr;
2046 0 : sc->sc_mbuf_tmp_dmap = dmap;
2047 :
2048 0 : error = 0;
2049 : back:
2050 0 : rx_ring = rbd->rbd_ring;
2051 0 : desc = &rx_ring->rr_desc[buf_idx];
2052 :
2053 0 : desc->rd_addr_hi = ET_ADDR_HI(rb->rb_paddr);
2054 0 : desc->rd_addr_lo = ET_ADDR_LO(rb->rb_paddr);
2055 0 : desc->rd_ctrl = __SHIFTIN(buf_idx, ET_RDCTRL_BUFIDX);
2056 :
2057 0 : bus_dmamap_sync(sc->sc_dmat, rx_ring->rr_dmap, 0,
2058 : rx_ring->rr_dmap->dm_mapsize, BUS_DMASYNC_PREWRITE);
2059 0 : return error;
2060 0 : }
|