Line data Source code
1 : /* $OpenBSD: smc83c170.c,v 1.28 2018/07/03 14:33:43 kevlo Exp $ */
2 : /* $NetBSD: smc83c170.c,v 1.59 2005/02/27 00:27:02 perry Exp $ */
3 :
4 : /*-
5 : * Copyright (c) 1998, 1999 The NetBSD Foundation, Inc.
6 : * All rights reserved.
7 : *
8 : * This code is derived from software contributed to The NetBSD Foundation
9 : * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
10 : * NASA Ames Research Center.
11 : *
12 : * Redistribution and use in source and binary forms, with or without
13 : * modification, are permitted provided that the following conditions
14 : * are met:
15 : * 1. Redistributions of source code must retain the above copyright
16 : * notice, this list of conditions and the following disclaimer.
17 : * 2. Redistributions in binary form must reproduce the above copyright
18 : * notice, this list of conditions and the following disclaimer in the
19 : * documentation and/or other materials provided with the distribution.
20 : *
21 : * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
22 : * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
23 : * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
24 : * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
25 : * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 : * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 : * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 : * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 : * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 : * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31 : * POSSIBILITY OF SUCH DAMAGE.
32 : */
33 :
34 : /*
35 : * Device driver for the Standard Microsystems Corp. 83C170
36 : * Ethernet PCI Integrated Controller (EPIC/100).
37 : */
38 :
39 : #include "bpfilter.h"
40 :
41 : #include <sys/param.h>
42 : #include <sys/systm.h>
43 : #include <sys/timeout.h>
44 : #include <sys/mbuf.h>
45 : #include <sys/malloc.h>
46 : #include <sys/kernel.h>
47 : #include <sys/socket.h>
48 : #include <sys/ioctl.h>
49 : #include <sys/errno.h>
50 : #include <sys/device.h>
51 :
52 : #include <net/if.h>
53 :
54 : #include <netinet/in.h>
55 : #include <netinet/if_ether.h>
56 :
57 : #include <net/if_media.h>
58 :
59 : #if NBPFILTER > 0
60 : #include <net/bpf.h>
61 : #endif
62 :
63 : #include <machine/bus.h>
64 : #include <machine/intr.h>
65 :
66 : #include <dev/mii/miivar.h>
67 : #include <dev/mii/lxtphyreg.h>
68 :
69 : #include <dev/ic/smc83c170reg.h>
70 : #include <dev/ic/smc83c170var.h>
71 :
72 : void epic_start(struct ifnet *);
73 : void epic_watchdog(struct ifnet *);
74 : int epic_ioctl(struct ifnet *, u_long, caddr_t);
75 : int epic_init(struct ifnet *);
76 : void epic_stop(struct ifnet *, int);
77 :
78 : void epic_reset(struct epic_softc *);
79 : void epic_rxdrain(struct epic_softc *);
80 : int epic_add_rxbuf(struct epic_softc *, int);
81 : void epic_read_eeprom(struct epic_softc *, int, int, u_int16_t *);
82 : void epic_set_mchash(struct epic_softc *);
83 : void epic_fixup_clock_source(struct epic_softc *);
84 : int epic_mii_read(struct device *, int, int);
85 : void epic_mii_write(struct device *, int, int, int);
86 : int epic_mii_wait(struct epic_softc *, u_int32_t);
87 : void epic_tick(void *);
88 :
89 : void epic_statchg(struct device *);
90 : int epic_mediachange(struct ifnet *);
91 : void epic_mediastatus(struct ifnet *, struct ifmediareq *);
92 :
93 : struct cfdriver epic_cd = {
94 : 0, "epic", DV_IFNET
95 : };
96 :
97 : #define INTMASK (INTSTAT_FATAL_INT | INTSTAT_TXU | \
98 : INTSTAT_TXC | INTSTAT_RXE | INTSTAT_RQE | INTSTAT_RCC)
99 :
100 : int epic_copy_small = 0;
101 :
102 : #define ETHER_PAD_LEN (ETHER_MIN_LEN - ETHER_CRC_LEN)
103 :
104 : /*
105 : * Attach an EPIC interface to the system.
106 : */
107 : void
108 0 : epic_attach(struct epic_softc *sc, const char *intrstr)
109 : {
110 0 : bus_space_tag_t st = sc->sc_st;
111 0 : bus_space_handle_t sh = sc->sc_sh;
112 0 : struct ifnet *ifp = &sc->sc_arpcom.ac_if;
113 0 : int rseg, error, miiflags;
114 : u_int i;
115 0 : bus_dma_segment_t seg;
116 0 : u_int8_t enaddr[ETHER_ADDR_LEN], devname[12 + 1];
117 0 : u_int16_t myea[ETHER_ADDR_LEN / 2], mydevname[6];
118 : char *nullbuf;
119 :
120 0 : timeout_set(&sc->sc_mii_timeout, epic_tick, sc);
121 :
122 : /*
123 : * Allocate the control data structures, and create and load the
124 : * DMA map for it.
125 : */
126 0 : if ((error = bus_dmamem_alloc(sc->sc_dmat,
127 : sizeof(struct epic_control_data) + ETHER_PAD_LEN, PAGE_SIZE, 0,
128 0 : &seg, 1, &rseg, BUS_DMA_NOWAIT)) != 0) {
129 0 : printf(": unable to allocate control data, error = %d\n",
130 : error);
131 0 : goto fail_0;
132 : }
133 :
134 0 : if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
135 : sizeof(struct epic_control_data) + ETHER_PAD_LEN,
136 : (caddr_t *)&sc->sc_control_data,
137 0 : BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) {
138 0 : printf(": unable to map control data, error = %d\n", error);
139 0 : goto fail_1;
140 : }
141 : nullbuf =
142 0 : (char *)sc->sc_control_data + sizeof(struct epic_control_data);
143 0 : memset(nullbuf, 0, ETHER_PAD_LEN);
144 :
145 0 : if ((error = bus_dmamap_create(sc->sc_dmat,
146 : sizeof(struct epic_control_data), 1,
147 : sizeof(struct epic_control_data), 0, BUS_DMA_NOWAIT,
148 0 : &sc->sc_cddmamap)) != 0) {
149 0 : printf(": unable to create control data DMA map, error = %d\n",
150 : error);
151 0 : goto fail_2;
152 : }
153 :
154 0 : if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
155 : sc->sc_control_data, sizeof(struct epic_control_data), NULL,
156 0 : BUS_DMA_NOWAIT)) != 0) {
157 0 : printf(": unable to load control data DMA map, error = %d\n",
158 : error);
159 0 : goto fail_3;
160 : }
161 :
162 : /*
163 : * Create the transmit buffer DMA maps.
164 : */
165 0 : for (i = 0; i < EPIC_NTXDESC; i++) {
166 0 : if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
167 : EPIC_NFRAGS, MCLBYTES, 0, BUS_DMA_NOWAIT,
168 0 : &EPIC_DSTX(sc, i)->ds_dmamap)) != 0) {
169 0 : printf(": unable to create tx DMA map %d, error = %d\n",
170 : i, error);
171 0 : goto fail_4;
172 : }
173 : }
174 :
175 : /*
176 : * Create the receive buffer DMA maps.
177 : */
178 0 : for (i = 0; i < EPIC_NRXDESC; i++) {
179 0 : if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
180 : MCLBYTES, 0, BUS_DMA_NOWAIT,
181 0 : &EPIC_DSRX(sc, i)->ds_dmamap)) != 0) {
182 0 : printf(": unable to create rx DMA map %d, error = %d\n",
183 : i, error);
184 0 : goto fail_5;
185 : }
186 0 : EPIC_DSRX(sc, i)->ds_mbuf = NULL;
187 : }
188 :
189 : /*
190 : * create and map the pad buffer
191 : */
192 0 : if ((error = bus_dmamap_create(sc->sc_dmat, ETHER_PAD_LEN, 1,
193 0 : ETHER_PAD_LEN, 0, BUS_DMA_NOWAIT,&sc->sc_nulldmamap)) != 0) {
194 0 : printf(": unable to create pad buffer DMA map, error = %d\n",
195 : error);
196 0 : goto fail_5;
197 : }
198 :
199 0 : if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_nulldmamap,
200 0 : nullbuf, ETHER_PAD_LEN, NULL, BUS_DMA_NOWAIT)) != 0) {
201 0 : printf(": unable to load pad buffer DMA map, error = %d\n",
202 : error);
203 : goto fail_6;
204 : }
205 0 : bus_dmamap_sync(sc->sc_dmat, sc->sc_nulldmamap, 0, ETHER_PAD_LEN,
206 : BUS_DMASYNC_PREWRITE);
207 :
208 : /*
209 : * Bring the chip out of low-power mode and reset it to a known state.
210 : */
211 0 : bus_space_write_4(st, sh, EPIC_GENCTL, 0);
212 0 : epic_reset(sc);
213 :
214 : /*
215 : * Read the Ethernet address from the EEPROM.
216 : */
217 0 : epic_read_eeprom(sc, 0, (sizeof(myea) / sizeof(myea[0])), myea);
218 0 : for (i = 0; i < sizeof(myea)/ sizeof(myea[0]); i++) {
219 0 : enaddr[i * 2] = myea[i] & 0xff;
220 0 : enaddr[i * 2 + 1] = myea[i] >> 8;
221 : }
222 :
223 : /*
224 : * ...and the device name.
225 : */
226 0 : epic_read_eeprom(sc, 0x2c, (sizeof(mydevname) / sizeof(mydevname[0])),
227 0 : mydevname);
228 0 : for (i = 0; i < sizeof(mydevname) / sizeof(mydevname[0]); i++) {
229 0 : devname[i * 2] = mydevname[i] & 0xff;
230 0 : devname[i * 2 + 1] = mydevname[i] >> 8;
231 : }
232 :
233 0 : devname[sizeof(devname) - 1] = ' ';
234 0 : for (i = sizeof(devname) - 1; devname[i] == ' '; i--) {
235 0 : devname[i] = '\0';
236 0 : if (i == 0)
237 : break;
238 : }
239 :
240 0 : printf(", %s : %s, address %s\n", devname, intrstr,
241 0 : ether_sprintf(enaddr));
242 :
243 : miiflags = 0;
244 0 : if (sc->sc_hwflags & EPIC_HAS_MII_FIBER)
245 0 : miiflags |= MIIF_HAVEFIBER;
246 :
247 : /*
248 : * Initialize our media structures and probe the MII.
249 : */
250 0 : sc->sc_mii.mii_ifp = ifp;
251 0 : sc->sc_mii.mii_readreg = epic_mii_read;
252 0 : sc->sc_mii.mii_writereg = epic_mii_write;
253 0 : sc->sc_mii.mii_statchg = epic_statchg;
254 0 : ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, epic_mediachange,
255 : epic_mediastatus);
256 0 : mii_attach(&sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
257 : MII_OFFSET_ANY, miiflags);
258 0 : if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
259 0 : ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
260 0 : ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE);
261 0 : } else
262 0 : ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
263 :
264 0 : if (sc->sc_hwflags & EPIC_HAS_BNC) {
265 : /* use the next free media instance */
266 0 : sc->sc_serinst = sc->sc_mii.mii_instance++;
267 0 : ifmedia_add(&sc->sc_mii.mii_media,
268 0 : IFM_MAKEWORD(IFM_ETHER, IFM_10_2, 0,
269 : sc->sc_serinst),
270 : 0, NULL);
271 0 : } else
272 0 : sc->sc_serinst = -1;
273 :
274 0 : bcopy(enaddr, sc->sc_arpcom.ac_enaddr, ETHER_ADDR_LEN);
275 0 : bcopy(sc->sc_dev.dv_xname, ifp->if_xname, IFNAMSIZ);
276 0 : ifp->if_softc = sc;
277 0 : ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
278 0 : ifp->if_ioctl = epic_ioctl;
279 0 : ifp->if_start = epic_start;
280 0 : ifp->if_watchdog = epic_watchdog;
281 0 : IFQ_SET_MAXLEN(&ifp->if_snd, EPIC_NTXDESC - 1);
282 :
283 0 : ifp->if_capabilities = IFCAP_VLAN_MTU;
284 :
285 : /*
286 : * Attach the interface.
287 : */
288 0 : if_attach(ifp);
289 0 : ether_ifattach(ifp);
290 0 : return;
291 :
292 : /*
293 : * Free any resources we've allocated during the failed attach
294 : * attempt. Do this in reverse order and fall through.
295 : */
296 : fail_6:
297 0 : bus_dmamap_destroy(sc->sc_dmat, sc->sc_nulldmamap);
298 : fail_5:
299 0 : for (i = 0; i < EPIC_NRXDESC; i++) {
300 0 : if (EPIC_DSRX(sc, i)->ds_dmamap != NULL)
301 0 : bus_dmamap_destroy(sc->sc_dmat,
302 : EPIC_DSRX(sc, i)->ds_dmamap);
303 : }
304 : fail_4:
305 0 : for (i = 0; i < EPIC_NTXDESC; i++) {
306 0 : if (EPIC_DSTX(sc, i)->ds_dmamap != NULL)
307 0 : bus_dmamap_destroy(sc->sc_dmat,
308 : EPIC_DSTX(sc, i)->ds_dmamap);
309 : }
310 0 : bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
311 : fail_3:
312 0 : bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
313 : fail_2:
314 0 : bus_dmamem_unmap(sc->sc_dmat, (caddr_t)sc->sc_control_data,
315 : sizeof(struct epic_control_data));
316 : fail_1:
317 0 : bus_dmamem_free(sc->sc_dmat, &seg, rseg);
318 : fail_0:
319 0 : return;
320 0 : }
321 :
322 : /*
323 : * Start packet transmission on the interface.
324 : * [ifnet interface function]
325 : */
326 : void
327 0 : epic_start(struct ifnet *ifp)
328 : {
329 0 : struct epic_softc *sc = ifp->if_softc;
330 : struct mbuf *m0, *m;
331 : struct epic_txdesc *txd;
332 : struct epic_descsoft *ds;
333 : struct epic_fraglist *fr;
334 : bus_dmamap_t dmamap;
335 : int error, firsttx, nexttx, opending, seg;
336 : u_int len;
337 :
338 : /*
339 : * Remember the previous txpending and the first transmit
340 : * descriptor we use.
341 : */
342 0 : opending = sc->sc_txpending;
343 0 : firsttx = EPIC_NEXTTX(sc->sc_txlast);
344 :
345 : /*
346 : * Loop through the send queue, setting up transmit descriptors
347 : * until we drain the queue, or use up all available transmit
348 : * descriptors.
349 : */
350 0 : while (sc->sc_txpending < EPIC_NTXDESC) {
351 : /*
352 : * Grab a packet off the queue.
353 : */
354 0 : m0 = ifq_deq_begin(&ifp->if_snd);
355 0 : if (m0 == NULL)
356 : break;
357 : m = NULL;
358 :
359 : /*
360 : * Get the last and next available transmit descriptor.
361 : */
362 0 : nexttx = EPIC_NEXTTX(sc->sc_txlast);
363 0 : txd = EPIC_CDTX(sc, nexttx);
364 0 : fr = EPIC_CDFL(sc, nexttx);
365 0 : ds = EPIC_DSTX(sc, nexttx);
366 0 : dmamap = ds->ds_dmamap;
367 :
368 : /*
369 : * Load the DMA map. If this fails, the packet either
370 : * didn't fit in the alloted number of frags, or we were
371 : * short on resources. In this case, we'll copy and try
372 : * again.
373 : */
374 0 : if ((error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
375 0 : BUS_DMA_WRITE|BUS_DMA_NOWAIT)) != 0 ||
376 0 : (m0->m_pkthdr.len < ETHER_PAD_LEN &&
377 0 : dmamap-> dm_nsegs == EPIC_NFRAGS)) {
378 0 : if (error == 0)
379 0 : bus_dmamap_unload(sc->sc_dmat, dmamap);
380 :
381 0 : MGETHDR(m, M_DONTWAIT, MT_DATA);
382 0 : if (m == NULL) {
383 0 : ifq_deq_rollback(&ifp->if_snd, m0);
384 0 : break;
385 : }
386 0 : if (m0->m_pkthdr.len > MHLEN) {
387 0 : MCLGET(m, M_DONTWAIT);
388 0 : if ((m->m_flags & M_EXT) == 0) {
389 0 : m_freem(m);
390 0 : ifq_deq_rollback(&ifp->if_snd, m0);
391 0 : break;
392 : }
393 : }
394 0 : m_copydata(m0, 0, m0->m_pkthdr.len, mtod(m, caddr_t));
395 0 : m->m_pkthdr.len = m->m_len = m0->m_pkthdr.len;
396 0 : error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap,
397 : m, BUS_DMA_WRITE|BUS_DMA_NOWAIT);
398 0 : if (error) {
399 0 : ifq_deq_rollback(&ifp->if_snd, m0);
400 0 : break;
401 : }
402 : }
403 0 : ifq_deq_commit(&ifp->if_snd, m0);
404 0 : if (m != NULL) {
405 0 : m_freem(m0);
406 : m0 = m;
407 0 : }
408 :
409 : /* Initialize the fraglist. */
410 0 : for (seg = 0; seg < dmamap->dm_nsegs; seg++) {
411 0 : fr->ef_frags[seg].ef_addr =
412 0 : dmamap->dm_segs[seg].ds_addr;
413 0 : fr->ef_frags[seg].ef_length =
414 0 : dmamap->dm_segs[seg].ds_len;
415 : }
416 0 : len = m0->m_pkthdr.len;
417 0 : if (len < ETHER_PAD_LEN) {
418 0 : fr->ef_frags[seg].ef_addr = sc->sc_nulldma;
419 0 : fr->ef_frags[seg].ef_length = ETHER_PAD_LEN - len;
420 : len = ETHER_PAD_LEN;
421 0 : seg++;
422 0 : }
423 0 : fr->ef_nfrags = seg;
424 :
425 0 : EPIC_CDFLSYNC(sc, nexttx, BUS_DMASYNC_PREWRITE);
426 :
427 : /* Sync the DMA map. */
428 0 : bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
429 : BUS_DMASYNC_PREWRITE);
430 :
431 : /*
432 : * Store a pointer to the packet so we can free it later.
433 : */
434 0 : ds->ds_mbuf = m0;
435 :
436 : /*
437 : * Fill in the transmit descriptor.
438 : */
439 0 : txd->et_control = ET_TXCTL_LASTDESC | ET_TXCTL_FRAGLIST;
440 :
441 : /*
442 : * If this is the first descriptor we're enqueueing,
443 : * don't give it to the EPIC yet. That could cause
444 : * a race condition. We'll do it below.
445 : */
446 0 : if (nexttx == firsttx)
447 0 : txd->et_txstatus = TXSTAT_TXLENGTH(len);
448 : else
449 0 : txd->et_txstatus =
450 0 : TXSTAT_TXLENGTH(len) | ET_TXSTAT_OWNER;
451 :
452 0 : EPIC_CDTXSYNC(sc, nexttx,
453 : BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
454 :
455 : /* Advance the tx pointer. */
456 0 : sc->sc_txpending++;
457 0 : sc->sc_txlast = nexttx;
458 :
459 : #if NBPFILTER > 0
460 : /*
461 : * Pass the packet to any BPF listeners.
462 : */
463 0 : if (ifp->if_bpf)
464 0 : bpf_mtap(ifp->if_bpf, m0, BPF_DIRECTION_OUT);
465 : #endif
466 : }
467 :
468 0 : if (sc->sc_txpending == EPIC_NTXDESC) {
469 : /* No more slots left; notify upper layer. */
470 0 : ifq_set_oactive(&ifp->if_snd);
471 0 : }
472 :
473 0 : if (sc->sc_txpending != opending) {
474 : /*
475 : * We enqueued packets. If the transmitter was idle,
476 : * reset the txdirty pointer.
477 : */
478 0 : if (opending == 0)
479 0 : sc->sc_txdirty = firsttx;
480 :
481 : /*
482 : * Cause a transmit interrupt to happen on the
483 : * last packet we enqueued.
484 : */
485 0 : EPIC_CDTX(sc, sc->sc_txlast)->et_control |= ET_TXCTL_IAF;
486 0 : EPIC_CDTXSYNC(sc, sc->sc_txlast,
487 : BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
488 :
489 : /*
490 : * The entire packet chain is set up. Give the
491 : * first descriptor to the EPIC now.
492 : */
493 0 : EPIC_CDTX(sc, firsttx)->et_txstatus |= ET_TXSTAT_OWNER;
494 0 : EPIC_CDTXSYNC(sc, firsttx,
495 : BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
496 :
497 : /* Start the transmitter. */
498 0 : bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_COMMAND,
499 : COMMAND_TXQUEUED);
500 :
501 : /* Set a watchdog timer in case the chip flakes out. */
502 0 : ifp->if_timer = 5;
503 0 : }
504 0 : }
505 :
506 : /*
507 : * Watchdog timer handler.
508 : * [ifnet interface function]
509 : */
510 : void
511 0 : epic_watchdog(struct ifnet *ifp)
512 : {
513 0 : struct epic_softc *sc = ifp->if_softc;
514 :
515 0 : printf("%s: device timeout\n", sc->sc_dev.dv_xname);
516 0 : ifp->if_oerrors++;
517 :
518 0 : (void) epic_init(ifp);
519 0 : }
520 :
521 : /*
522 : * Handle control requests from the operator.
523 : * [ifnet interface function]
524 : */
525 : int
526 0 : epic_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
527 : {
528 0 : struct epic_softc *sc = ifp->if_softc;
529 0 : struct ifreq *ifr = (struct ifreq *)data;
530 : int s, error = 0;
531 :
532 0 : s = splnet();
533 :
534 0 : switch (cmd) {
535 : case SIOCSIFADDR:
536 0 : ifp->if_flags |= IFF_UP;
537 0 : epic_init(ifp);
538 0 : break;
539 :
540 : case SIOCSIFFLAGS:
541 : /*
542 : * If interface is marked up and not running, then start it.
543 : * If it is marked down and running, stop it.
544 : * XXX If it's up then re-initialize it. This is so flags
545 : * such as IFF_PROMISC are handled.
546 : */
547 0 : if (ifp->if_flags & IFF_UP)
548 0 : epic_init(ifp);
549 0 : else if (ifp->if_flags & IFF_RUNNING)
550 0 : epic_stop(ifp, 1);
551 : break;
552 :
553 : case SIOCSIFMEDIA:
554 : case SIOCGIFMEDIA:
555 0 : error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
556 0 : break;
557 :
558 : default:
559 0 : error = ether_ioctl(ifp, &sc->sc_arpcom, cmd, data);
560 0 : }
561 :
562 0 : if (error == ENETRESET) {
563 0 : if (ifp->if_flags & IFF_RUNNING) {
564 0 : mii_pollstat(&sc->sc_mii);
565 0 : epic_set_mchash(sc);
566 0 : }
567 : error = 0;
568 0 : }
569 :
570 0 : splx(s);
571 0 : return (error);
572 : }
573 :
574 : /*
575 : * Interrupt handler.
576 : */
577 : int
578 0 : epic_intr(void *arg)
579 : {
580 0 : struct epic_softc *sc = arg;
581 0 : struct ifnet *ifp = &sc->sc_arpcom.ac_if;
582 : struct epic_rxdesc *rxd;
583 : struct epic_txdesc *txd;
584 : struct epic_descsoft *ds;
585 0 : struct mbuf_list ml = MBUF_LIST_INITIALIZER();
586 : struct mbuf *m;
587 : u_int32_t intstat, rxstatus, txstatus;
588 : int i, claimed = 0;
589 : u_int len;
590 :
591 : /*
592 : * Get the interrupt status from the EPIC.
593 : */
594 0 : intstat = bus_space_read_4(sc->sc_st, sc->sc_sh, EPIC_INTSTAT);
595 0 : if ((intstat & INTSTAT_INT_ACTV) == 0)
596 0 : return (claimed);
597 :
598 : claimed = 1;
599 :
600 : /*
601 : * Acknowledge the interrupt.
602 : */
603 0 : bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_INTSTAT,
604 : intstat & INTMASK);
605 :
606 : /*
607 : * Check for receive interrupts.
608 : */
609 0 : if (intstat & (INTSTAT_RCC | INTSTAT_RXE | INTSTAT_RQE)) {
610 0 : for (i = sc->sc_rxptr;; i = EPIC_NEXTRX(i)) {
611 0 : rxd = EPIC_CDRX(sc, i);
612 0 : ds = EPIC_DSRX(sc, i);
613 :
614 0 : EPIC_CDRXSYNC(sc, i,
615 : BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
616 :
617 0 : rxstatus = rxd->er_rxstatus;
618 0 : if (rxstatus & ER_RXSTAT_OWNER) {
619 : /*
620 : * We have processed all of the
621 : * receive buffers.
622 : */
623 : break;
624 : }
625 :
626 : /*
627 : * Make sure the packet arrived intact. If an error
628 : * occurred, update stats and reset the descriptor.
629 : * The buffer will be reused the next time the
630 : * descriptor comes up in the ring.
631 : */
632 0 : if ((rxstatus & ER_RXSTAT_PKTINTACT) == 0) {
633 0 : if (rxstatus & ER_RXSTAT_CRCERROR)
634 0 : printf("%s: CRC error\n",
635 0 : sc->sc_dev.dv_xname);
636 0 : if (rxstatus & ER_RXSTAT_ALIGNERROR)
637 0 : printf("%s: alignment error\n",
638 0 : sc->sc_dev.dv_xname);
639 0 : ifp->if_ierrors++;
640 0 : EPIC_INIT_RXDESC(sc, i);
641 0 : continue;
642 : }
643 :
644 0 : bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0,
645 : ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
646 :
647 : /*
648 : * The EPIC includes the CRC with every packet;
649 : * trim it.
650 : */
651 0 : len = RXSTAT_RXLENGTH(rxstatus) - ETHER_CRC_LEN;
652 :
653 0 : if (len < sizeof(struct ether_header)) {
654 : /*
655 : * Runt packet; drop it now.
656 : */
657 0 : ifp->if_ierrors++;
658 0 : EPIC_INIT_RXDESC(sc, i);
659 0 : bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0,
660 : ds->ds_dmamap->dm_mapsize,
661 : BUS_DMASYNC_PREREAD);
662 0 : continue;
663 : }
664 :
665 : /*
666 : * If the packet is small enough to fit in a
667 : * single header mbuf, allocate one and copy
668 : * the data into it. This greatly reduces
669 : * memory consumption when we receive lots
670 : * of small packets.
671 : *
672 : * Otherwise, we add a new buffer to the receive
673 : * chain. If this fails, we drop the packet and
674 : * recycle the old buffer.
675 : */
676 0 : if (epic_copy_small != 0 && len <= MHLEN) {
677 0 : MGETHDR(m, M_DONTWAIT, MT_DATA);
678 0 : if (m == NULL)
679 : goto dropit;
680 0 : memcpy(mtod(m, caddr_t),
681 : mtod(ds->ds_mbuf, caddr_t), len);
682 0 : EPIC_INIT_RXDESC(sc, i);
683 0 : bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0,
684 : ds->ds_dmamap->dm_mapsize,
685 : BUS_DMASYNC_PREREAD);
686 0 : } else {
687 0 : m = ds->ds_mbuf;
688 0 : if (epic_add_rxbuf(sc, i) != 0) {
689 : dropit:
690 0 : ifp->if_ierrors++;
691 0 : EPIC_INIT_RXDESC(sc, i);
692 0 : bus_dmamap_sync(sc->sc_dmat,
693 : ds->ds_dmamap, 0,
694 : ds->ds_dmamap->dm_mapsize,
695 : BUS_DMASYNC_PREREAD);
696 0 : continue;
697 : }
698 : }
699 :
700 0 : m->m_pkthdr.len = m->m_len = len;
701 :
702 0 : ml_enqueue(&ml, m);
703 0 : }
704 :
705 : /* Update the receive pointer. */
706 0 : sc->sc_rxptr = i;
707 :
708 : /*
709 : * Check for receive queue underflow.
710 : */
711 0 : if (intstat & INTSTAT_RQE) {
712 0 : printf("%s: receiver queue empty\n",
713 0 : sc->sc_dev.dv_xname);
714 : /*
715 : * Ring is already built; just restart the
716 : * receiver.
717 : */
718 0 : bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_PRCDAR,
719 : EPIC_CDRXADDR(sc, sc->sc_rxptr));
720 0 : bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_COMMAND,
721 : COMMAND_RXQUEUED | COMMAND_START_RX);
722 0 : }
723 : }
724 :
725 0 : if_input(ifp, &ml);
726 :
727 : /*
728 : * Check for transmission complete interrupts.
729 : */
730 0 : if (intstat & (INTSTAT_TXC | INTSTAT_TXU)) {
731 0 : ifq_clr_oactive(&ifp->if_snd);
732 0 : for (i = sc->sc_txdirty; sc->sc_txpending != 0;
733 0 : i = EPIC_NEXTTX(i), sc->sc_txpending--) {
734 0 : txd = EPIC_CDTX(sc, i);
735 0 : ds = EPIC_DSTX(sc, i);
736 :
737 0 : EPIC_CDTXSYNC(sc, i,
738 : BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
739 :
740 0 : txstatus = txd->et_txstatus;
741 0 : if (txstatus & ET_TXSTAT_OWNER)
742 : break;
743 :
744 0 : EPIC_CDFLSYNC(sc, i, BUS_DMASYNC_POSTWRITE);
745 :
746 0 : bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap,
747 : 0, ds->ds_dmamap->dm_mapsize,
748 : BUS_DMASYNC_POSTWRITE);
749 0 : bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap);
750 0 : m_freem(ds->ds_mbuf);
751 0 : ds->ds_mbuf = NULL;
752 :
753 : /*
754 : * Check for errors and collisions.
755 : */
756 0 : if ((txstatus & ET_TXSTAT_PACKETTX) == 0)
757 0 : ifp->if_oerrors++;
758 0 : ifp->if_collisions +=
759 0 : TXSTAT_COLLISIONS(txstatus);
760 0 : if (txstatus & ET_TXSTAT_CARSENSELOST)
761 0 : printf("%s: lost carrier\n",
762 0 : sc->sc_dev.dv_xname);
763 : }
764 :
765 : /* Update the dirty transmit buffer pointer. */
766 0 : sc->sc_txdirty = i;
767 :
768 : /*
769 : * Cancel the watchdog timer if there are no pending
770 : * transmissions.
771 : */
772 0 : if (sc->sc_txpending == 0)
773 0 : ifp->if_timer = 0;
774 :
775 : /*
776 : * Kick the transmitter after a DMA underrun.
777 : */
778 0 : if (intstat & INTSTAT_TXU) {
779 0 : printf("%s: transmit underrun\n", sc->sc_dev.dv_xname);
780 0 : bus_space_write_4(sc->sc_st, sc->sc_sh,
781 : EPIC_COMMAND, COMMAND_TXUGO);
782 0 : if (sc->sc_txpending)
783 0 : bus_space_write_4(sc->sc_st, sc->sc_sh,
784 : EPIC_COMMAND, COMMAND_TXQUEUED);
785 : }
786 :
787 : /*
788 : * Try to get more packets going.
789 : */
790 0 : epic_start(ifp);
791 0 : }
792 :
793 : /*
794 : * Check for fatal interrupts.
795 : */
796 0 : if (intstat & INTSTAT_FATAL_INT) {
797 0 : if (intstat & INTSTAT_PTA)
798 0 : printf("%s: PCI target abort error\n",
799 0 : sc->sc_dev.dv_xname);
800 0 : else if (intstat & INTSTAT_PMA)
801 0 : printf("%s: PCI master abort error\n",
802 0 : sc->sc_dev.dv_xname);
803 0 : else if (intstat & INTSTAT_APE)
804 0 : printf("%s: PCI address parity error\n",
805 0 : sc->sc_dev.dv_xname);
806 0 : else if (intstat & INTSTAT_DPE)
807 0 : printf("%s: PCI data parity error\n",
808 : sc->sc_dev.dv_xname);
809 : else
810 0 : printf("%s: unknown fatal error\n",
811 : sc->sc_dev.dv_xname);
812 0 : (void) epic_init(ifp);
813 0 : }
814 :
815 0 : return (claimed);
816 0 : }
817 :
818 : /*
819 : * One second timer, used to tick the MII.
820 : */
821 : void
822 0 : epic_tick(void *arg)
823 : {
824 0 : struct epic_softc *sc = arg;
825 : int s;
826 :
827 0 : s = splnet();
828 0 : mii_tick(&sc->sc_mii);
829 0 : splx(s);
830 :
831 0 : timeout_add_sec(&sc->sc_mii_timeout, 1);
832 0 : }
833 :
834 : /*
835 : * Fixup the clock source on the EPIC.
836 : */
837 : void
838 0 : epic_fixup_clock_source(struct epic_softc *sc)
839 : {
840 : int i;
841 :
842 : /*
843 : * According to SMC Application Note 7-15, the EPIC's clock
844 : * source is incorrect following a reset. This manifests itself
845 : * as failure to recognize when host software has written to
846 : * a register on the EPIC. The appnote recommends issuing at
847 : * least 16 consecutive writes to the CLOCK TEST bit to correctly
848 : * configure the clock source.
849 : */
850 0 : for (i = 0; i < 16; i++)
851 0 : bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_TEST,
852 : TEST_CLOCKTEST);
853 0 : }
854 :
855 : /*
856 : * Perform a soft reset on the EPIC.
857 : */
858 : void
859 0 : epic_reset(struct epic_softc *sc)
860 : {
861 :
862 0 : epic_fixup_clock_source(sc);
863 :
864 0 : bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_GENCTL, 0);
865 0 : delay(100);
866 0 : bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_GENCTL, GENCTL_SOFTRESET);
867 0 : delay(100);
868 :
869 0 : epic_fixup_clock_source(sc);
870 0 : }
871 :
872 : /*
873 : * Initialize the interface. Must be called at splnet().
874 : */
875 : int
876 0 : epic_init(struct ifnet *ifp)
877 : {
878 0 : struct epic_softc *sc = ifp->if_softc;
879 0 : bus_space_tag_t st = sc->sc_st;
880 0 : bus_space_handle_t sh = sc->sc_sh;
881 : struct epic_txdesc *txd;
882 : struct epic_descsoft *ds;
883 : u_int32_t genctl, reg0;
884 : int i, error = 0;
885 :
886 : /*
887 : * Cancel any pending I/O.
888 : */
889 0 : epic_stop(ifp, 0);
890 :
891 : /*
892 : * Reset the EPIC to a known state.
893 : */
894 0 : epic_reset(sc);
895 :
896 : /*
897 : * Magical mystery initialization.
898 : */
899 0 : bus_space_write_4(st, sh, EPIC_TXTEST, 0);
900 :
901 : /*
902 : * Initialize the EPIC genctl register:
903 : *
904 : * - 64 byte receive FIFO threshold
905 : * - automatic advance to next receive frame
906 : */
907 : genctl = GENCTL_RX_FIFO_THRESH0 | GENCTL_ONECOPY;
908 : #if BYTE_ORDER == BIG_ENDIAN
909 : genctl |= GENCTL_BIG_ENDIAN;
910 : #endif
911 0 : bus_space_write_4(st, sh, EPIC_GENCTL, genctl);
912 :
913 : /*
914 : * Reset the MII bus and PHY.
915 : */
916 0 : reg0 = bus_space_read_4(st, sh, EPIC_NVCTL);
917 0 : bus_space_write_4(st, sh, EPIC_NVCTL, reg0 | NVCTL_GPIO1 | NVCTL_GPOE1);
918 0 : bus_space_write_4(st, sh, EPIC_MIICFG, MIICFG_ENASER);
919 0 : bus_space_write_4(st, sh, EPIC_GENCTL, genctl | GENCTL_RESET_PHY);
920 0 : delay(100);
921 0 : bus_space_write_4(st, sh, EPIC_GENCTL, genctl);
922 0 : delay(1000);
923 0 : bus_space_write_4(st, sh, EPIC_NVCTL, reg0);
924 :
925 : /*
926 : * Initialize Ethernet address.
927 : */
928 0 : reg0 = sc->sc_arpcom.ac_enaddr[1] << 8 | sc->sc_arpcom.ac_enaddr[0];
929 0 : bus_space_write_4(st, sh, EPIC_LAN0, reg0);
930 0 : reg0 = sc->sc_arpcom.ac_enaddr[3] << 8 | sc->sc_arpcom.ac_enaddr[2];
931 0 : bus_space_write_4(st, sh, EPIC_LAN1, reg0);
932 0 : reg0 = sc->sc_arpcom.ac_enaddr[5] << 8 | sc->sc_arpcom.ac_enaddr[4];
933 0 : bus_space_write_4(st, sh, EPIC_LAN2, reg0);
934 :
935 : /*
936 : * Initialize receive control. Remember the external buffer
937 : * size setting.
938 : */
939 0 : reg0 = bus_space_read_4(st, sh, EPIC_RXCON) &
940 : (RXCON_EXTBUFSIZESEL1 | RXCON_EXTBUFSIZESEL0);
941 0 : reg0 |= (RXCON_RXMULTICAST | RXCON_RXBROADCAST);
942 0 : if (ifp->if_flags & IFF_PROMISC)
943 0 : reg0 |= RXCON_PROMISCMODE;
944 0 : bus_space_write_4(st, sh, EPIC_RXCON, reg0);
945 :
946 : /* Set the current media. */
947 0 : epic_mediachange(ifp);
948 :
949 : /* Set up the multicast hash table. */
950 0 : epic_set_mchash(sc);
951 :
952 : /*
953 : * Initialize the transmit descriptor ring. txlast is initialized
954 : * to the end of the list so that it will wrap around to the first
955 : * descriptor when the first packet is transmitted.
956 : */
957 0 : for (i = 0; i < EPIC_NTXDESC; i++) {
958 0 : txd = EPIC_CDTX(sc, i);
959 0 : memset(txd, 0, sizeof(struct epic_txdesc));
960 0 : txd->et_bufaddr = EPIC_CDFLADDR(sc, i);
961 0 : txd->et_nextdesc = EPIC_CDTXADDR(sc, EPIC_NEXTTX(i));
962 0 : EPIC_CDTXSYNC(sc, i, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
963 : }
964 0 : sc->sc_txpending = 0;
965 0 : sc->sc_txdirty = 0;
966 0 : sc->sc_txlast = EPIC_NTXDESC - 1;
967 :
968 : /*
969 : * Initialize the receive descriptor ring.
970 : */
971 0 : for (i = 0; i < EPIC_NRXDESC; i++) {
972 0 : ds = EPIC_DSRX(sc, i);
973 0 : if (ds->ds_mbuf == NULL) {
974 0 : if ((error = epic_add_rxbuf(sc, i)) != 0) {
975 0 : printf("%s: unable to allocate or map rx "
976 : "buffer %d error = %d\n",
977 0 : sc->sc_dev.dv_xname, i, error);
978 : /*
979 : * XXX Should attempt to run with fewer receive
980 : * XXX buffers instead of just failing.
981 : */
982 0 : epic_rxdrain(sc);
983 0 : goto out;
984 : }
985 : } else
986 0 : EPIC_INIT_RXDESC(sc, i);
987 : }
988 0 : sc->sc_rxptr = 0;
989 :
990 : /*
991 : * Initialize the interrupt mask and enable interrupts.
992 : */
993 0 : bus_space_write_4(st, sh, EPIC_INTMASK, INTMASK);
994 0 : bus_space_write_4(st, sh, EPIC_GENCTL, genctl | GENCTL_INTENA);
995 :
996 : /*
997 : * Give the transmit and receive rings to the EPIC.
998 : */
999 0 : bus_space_write_4(st, sh, EPIC_PTCDAR,
1000 : EPIC_CDTXADDR(sc, EPIC_NEXTTX(sc->sc_txlast)));
1001 0 : bus_space_write_4(st, sh, EPIC_PRCDAR,
1002 : EPIC_CDRXADDR(sc, sc->sc_rxptr));
1003 :
1004 : /*
1005 : * Set the EPIC in motion.
1006 : */
1007 0 : bus_space_write_4(st, sh, EPIC_COMMAND,
1008 : COMMAND_RXQUEUED | COMMAND_START_RX);
1009 :
1010 : /*
1011 : * ...all done!
1012 : */
1013 0 : ifp->if_flags |= IFF_RUNNING;
1014 0 : ifq_clr_oactive(&ifp->if_snd);
1015 :
1016 : /*
1017 : * Start the one second clock.
1018 : */
1019 0 : timeout_add_sec(&sc->sc_mii_timeout, 1);
1020 :
1021 : /*
1022 : * Attempt to start output on the interface.
1023 : */
1024 0 : epic_start(ifp);
1025 :
1026 : out:
1027 0 : if (error)
1028 0 : printf("%s: interface not running\n", sc->sc_dev.dv_xname);
1029 0 : return (error);
1030 : }
1031 :
1032 : /*
1033 : * Drain the receive queue.
1034 : */
1035 : void
1036 0 : epic_rxdrain(struct epic_softc *sc)
1037 : {
1038 : struct epic_descsoft *ds;
1039 : int i;
1040 :
1041 0 : for (i = 0; i < EPIC_NRXDESC; i++) {
1042 0 : ds = EPIC_DSRX(sc, i);
1043 0 : if (ds->ds_mbuf != NULL) {
1044 0 : bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap);
1045 0 : m_freem(ds->ds_mbuf);
1046 0 : ds->ds_mbuf = NULL;
1047 0 : }
1048 : }
1049 0 : }
1050 :
1051 : /*
1052 : * Stop transmission on the interface.
1053 : */
1054 : void
1055 0 : epic_stop(struct ifnet *ifp, int disable)
1056 : {
1057 0 : struct epic_softc *sc = ifp->if_softc;
1058 0 : bus_space_tag_t st = sc->sc_st;
1059 0 : bus_space_handle_t sh = sc->sc_sh;
1060 : struct epic_descsoft *ds;
1061 : u_int32_t reg;
1062 : int i;
1063 :
1064 : /*
1065 : * Stop the one second clock.
1066 : */
1067 0 : timeout_del(&sc->sc_mii_timeout);
1068 :
1069 : /*
1070 : * Mark the interface down and cancel the watchdog timer.
1071 : */
1072 0 : ifp->if_flags &= ~IFF_RUNNING;
1073 0 : ifq_clr_oactive(&ifp->if_snd);
1074 0 : ifp->if_timer = 0;
1075 :
1076 : /* Down the MII. */
1077 0 : mii_down(&sc->sc_mii);
1078 :
1079 : /* Paranoia... */
1080 0 : epic_fixup_clock_source(sc);
1081 :
1082 : /*
1083 : * Disable interrupts.
1084 : */
1085 0 : reg = bus_space_read_4(st, sh, EPIC_GENCTL);
1086 0 : bus_space_write_4(st, sh, EPIC_GENCTL, reg & ~GENCTL_INTENA);
1087 0 : bus_space_write_4(st, sh, EPIC_INTMASK, 0);
1088 :
1089 : /*
1090 : * Stop the DMA engine and take the receiver off-line.
1091 : */
1092 0 : bus_space_write_4(st, sh, EPIC_COMMAND, COMMAND_STOP_RDMA |
1093 : COMMAND_STOP_TDMA | COMMAND_STOP_RX);
1094 :
1095 : /*
1096 : * Release any queued transmit buffers.
1097 : */
1098 0 : for (i = 0; i < EPIC_NTXDESC; i++) {
1099 0 : ds = EPIC_DSTX(sc, i);
1100 0 : if (ds->ds_mbuf != NULL) {
1101 0 : bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap);
1102 0 : m_freem(ds->ds_mbuf);
1103 0 : ds->ds_mbuf = NULL;
1104 0 : }
1105 : }
1106 :
1107 0 : if (disable)
1108 0 : epic_rxdrain(sc);
1109 0 : }
1110 :
1111 : /*
1112 : * Read the EPIC Serial EEPROM.
1113 : */
1114 : void
1115 0 : epic_read_eeprom(struct epic_softc *sc, int word, int wordcnt, u_int16_t *data)
1116 : {
1117 0 : bus_space_tag_t st = sc->sc_st;
1118 0 : bus_space_handle_t sh = sc->sc_sh;
1119 : u_int16_t reg;
1120 : int i, x;
1121 :
1122 : #define EEPROM_WAIT_READY(st, sh) \
1123 : while ((bus_space_read_4((st), (sh), EPIC_EECTL) & EECTL_EERDY) == 0) \
1124 : /* nothing */
1125 :
1126 : /*
1127 : * Enable the EEPROM.
1128 : */
1129 0 : bus_space_write_4(st, sh, EPIC_EECTL, EECTL_ENABLE);
1130 0 : EEPROM_WAIT_READY(st, sh);
1131 :
1132 0 : for (i = 0; i < wordcnt; i++) {
1133 : /* Send CHIP SELECT for one clock tick. */
1134 0 : bus_space_write_4(st, sh, EPIC_EECTL, EECTL_ENABLE|EECTL_EECS);
1135 0 : EEPROM_WAIT_READY(st, sh);
1136 :
1137 : /* Shift in the READ opcode. */
1138 0 : for (x = 3; x > 0; x--) {
1139 : reg = EECTL_ENABLE|EECTL_EECS;
1140 0 : if (EPIC_EEPROM_OPC_READ & (1 << (x - 1)))
1141 0 : reg |= EECTL_EEDI;
1142 0 : bus_space_write_4(st, sh, EPIC_EECTL, reg);
1143 0 : EEPROM_WAIT_READY(st, sh);
1144 0 : bus_space_write_4(st, sh, EPIC_EECTL, reg|EECTL_EESK);
1145 0 : EEPROM_WAIT_READY(st, sh);
1146 0 : bus_space_write_4(st, sh, EPIC_EECTL, reg);
1147 0 : EEPROM_WAIT_READY(st, sh);
1148 : }
1149 :
1150 : /* Shift in address. */
1151 0 : for (x = 6; x > 0; x--) {
1152 : reg = EECTL_ENABLE|EECTL_EECS;
1153 0 : if ((word + i) & (1 << (x - 1)))
1154 0 : reg |= EECTL_EEDI;
1155 0 : bus_space_write_4(st, sh, EPIC_EECTL, reg);
1156 0 : EEPROM_WAIT_READY(st, sh);
1157 0 : bus_space_write_4(st, sh, EPIC_EECTL, reg|EECTL_EESK);
1158 0 : EEPROM_WAIT_READY(st, sh);
1159 0 : bus_space_write_4(st, sh, EPIC_EECTL, reg);
1160 0 : EEPROM_WAIT_READY(st, sh);
1161 : }
1162 :
1163 : /* Shift out data. */
1164 : reg = EECTL_ENABLE|EECTL_EECS;
1165 0 : data[i] = 0;
1166 0 : for (x = 16; x > 0; x--) {
1167 0 : bus_space_write_4(st, sh, EPIC_EECTL, reg|EECTL_EESK);
1168 0 : EEPROM_WAIT_READY(st, sh);
1169 0 : if (bus_space_read_4(st, sh, EPIC_EECTL) & EECTL_EEDO)
1170 0 : data[i] |= (1 << (x - 1));
1171 0 : bus_space_write_4(st, sh, EPIC_EECTL, reg);
1172 0 : EEPROM_WAIT_READY(st, sh);
1173 : }
1174 :
1175 : /* Clear CHIP SELECT. */
1176 0 : bus_space_write_4(st, sh, EPIC_EECTL, EECTL_ENABLE);
1177 0 : EEPROM_WAIT_READY(st, sh);
1178 : }
1179 :
1180 : /*
1181 : * Disable the EEPROM.
1182 : */
1183 0 : bus_space_write_4(st, sh, EPIC_EECTL, 0);
1184 :
1185 : #undef EEPROM_WAIT_READY
1186 0 : }
1187 :
1188 : /*
1189 : * Add a receive buffer to the indicated descriptor.
1190 : */
1191 : int
1192 0 : epic_add_rxbuf(struct epic_softc *sc, int idx)
1193 : {
1194 0 : struct epic_descsoft *ds = EPIC_DSRX(sc, idx);
1195 : struct mbuf *m;
1196 : int error;
1197 :
1198 0 : MGETHDR(m, M_DONTWAIT, MT_DATA);
1199 0 : if (m == NULL)
1200 0 : return (ENOBUFS);
1201 :
1202 0 : MCLGET(m, M_DONTWAIT);
1203 0 : if ((m->m_flags & M_EXT) == 0) {
1204 0 : m_freem(m);
1205 0 : return (ENOBUFS);
1206 : }
1207 :
1208 0 : if (ds->ds_mbuf != NULL)
1209 0 : bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap);
1210 :
1211 0 : ds->ds_mbuf = m;
1212 :
1213 0 : error = bus_dmamap_load(sc->sc_dmat, ds->ds_dmamap,
1214 : m->m_ext.ext_buf, m->m_ext.ext_size, NULL,
1215 : BUS_DMA_READ|BUS_DMA_NOWAIT);
1216 0 : if (error) {
1217 0 : printf("%s: can't load rx DMA map %d, error = %d\n",
1218 0 : sc->sc_dev.dv_xname, idx, error);
1219 0 : panic("epic_add_rxbuf"); /* XXX */
1220 : }
1221 :
1222 0 : bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0,
1223 : ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
1224 :
1225 0 : EPIC_INIT_RXDESC(sc, idx);
1226 :
1227 0 : return (0);
1228 0 : }
1229 :
1230 : /*
1231 : * Set the EPIC multicast hash table.
1232 : *
1233 : * NOTE: We rely on a recently-updated mii_media_active here!
1234 : */
1235 : void
1236 0 : epic_set_mchash(struct epic_softc *sc)
1237 : {
1238 0 : struct arpcom *ac = &sc->sc_arpcom;
1239 0 : struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1240 : struct ether_multi *enm;
1241 : struct ether_multistep step;
1242 0 : u_int32_t hash, mchash[4];
1243 :
1244 : /*
1245 : * Set up the multicast address filter by passing all multicast
1246 : * addresses through a CRC generator, and then using the low-order
1247 : * 6 bits as an index into the 64 bit multicast hash table (only
1248 : * the lower 16 bits of each 32 bit multicast hash register are
1249 : * valid). The high order bits select the register, while the
1250 : * rest of the bits select the bit within the register.
1251 : */
1252 :
1253 0 : if (ifp->if_flags & IFF_PROMISC)
1254 : goto allmulti;
1255 :
1256 0 : if (IFM_SUBTYPE(sc->sc_mii.mii_media_active) == IFM_10_T) {
1257 : /* XXX hardware bug in 10Mbps mode. */
1258 : goto allmulti;
1259 : }
1260 :
1261 0 : if (ac->ac_multirangecnt > 0)
1262 : goto allmulti;
1263 :
1264 0 : mchash[0] = mchash[1] = mchash[2] = mchash[3] = 0;
1265 :
1266 0 : ETHER_FIRST_MULTI(step, ac, enm);
1267 0 : while (enm != NULL) {
1268 0 : hash = ether_crc32_be(enm->enm_addrlo, ETHER_ADDR_LEN);
1269 0 : hash >>= 26;
1270 :
1271 : /* Set the corresponding bit in the hash table. */
1272 0 : mchash[hash >> 4] |= 1 << (hash & 0xf);
1273 :
1274 0 : ETHER_NEXT_MULTI(step, enm);
1275 : }
1276 :
1277 0 : ifp->if_flags &= ~IFF_ALLMULTI;
1278 0 : goto sethash;
1279 :
1280 : allmulti:
1281 0 : ifp->if_flags |= IFF_ALLMULTI;
1282 0 : mchash[0] = mchash[1] = mchash[2] = mchash[3] = 0xffff;
1283 :
1284 : sethash:
1285 0 : bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_MC0, mchash[0]);
1286 0 : bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_MC1, mchash[1]);
1287 0 : bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_MC2, mchash[2]);
1288 0 : bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_MC3, mchash[3]);
1289 0 : }
1290 :
1291 : /*
1292 : * Wait for the MII to become ready.
1293 : */
1294 : int
1295 0 : epic_mii_wait(struct epic_softc *sc, u_int32_t rw)
1296 : {
1297 : int i;
1298 :
1299 0 : for (i = 0; i < 50; i++) {
1300 0 : if ((bus_space_read_4(sc->sc_st, sc->sc_sh, EPIC_MMCTL) & rw)
1301 0 : == 0)
1302 : break;
1303 0 : delay(2);
1304 : }
1305 0 : if (i == 50) {
1306 0 : printf("%s: MII timed out\n", sc->sc_dev.dv_xname);
1307 0 : return (1);
1308 : }
1309 :
1310 0 : return (0);
1311 0 : }
1312 :
1313 : /*
1314 : * Read from the MII.
1315 : */
1316 : int
1317 0 : epic_mii_read(struct device *self, int phy, int reg)
1318 : {
1319 0 : struct epic_softc *sc = (struct epic_softc *)self;
1320 :
1321 0 : if (epic_mii_wait(sc, MMCTL_WRITE))
1322 0 : return (0);
1323 :
1324 0 : bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_MMCTL,
1325 : MMCTL_ARG(phy, reg, MMCTL_READ));
1326 :
1327 0 : if (epic_mii_wait(sc, MMCTL_READ))
1328 0 : return (0);
1329 :
1330 0 : return (bus_space_read_4(sc->sc_st, sc->sc_sh, EPIC_MMDATA) &
1331 : MMDATA_MASK);
1332 0 : }
1333 :
1334 : /*
1335 : * Write to the MII.
1336 : */
1337 : void
1338 0 : epic_mii_write(struct device *self, int phy, int reg, int val)
1339 : {
1340 0 : struct epic_softc *sc = (struct epic_softc *)self;
1341 :
1342 0 : if (epic_mii_wait(sc, MMCTL_WRITE))
1343 0 : return;
1344 :
1345 0 : bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_MMDATA, val);
1346 0 : bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_MMCTL,
1347 : MMCTL_ARG(phy, reg, MMCTL_WRITE));
1348 0 : }
1349 :
1350 : /*
1351 : * Callback from PHY when media changes.
1352 : */
1353 : void
1354 0 : epic_statchg(struct device *self)
1355 : {
1356 0 : struct epic_softc *sc = (struct epic_softc *)self;
1357 : u_int32_t txcon, miicfg;
1358 :
1359 : /*
1360 : * Update loopback bits in TXCON to reflect duplex mode.
1361 : */
1362 0 : txcon = bus_space_read_4(sc->sc_st, sc->sc_sh, EPIC_TXCON);
1363 0 : if (sc->sc_mii.mii_media_active & IFM_FDX)
1364 0 : txcon |= (TXCON_LOOPBACK_D1|TXCON_LOOPBACK_D2);
1365 : else
1366 0 : txcon &= ~(TXCON_LOOPBACK_D1|TXCON_LOOPBACK_D2);
1367 0 : bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_TXCON, txcon);
1368 :
1369 : /* On some cards we need manualy set fullduplex led */
1370 0 : if (sc->sc_hwflags & EPIC_DUPLEXLED_ON_694) {
1371 0 : miicfg = bus_space_read_4(sc->sc_st, sc->sc_sh, EPIC_MIICFG);
1372 0 : if (IFM_OPTIONS(sc->sc_mii.mii_media_active) & IFM_FDX)
1373 0 : miicfg |= MIICFG_ENABLE;
1374 : else
1375 0 : miicfg &= ~MIICFG_ENABLE;
1376 0 : bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_MIICFG, miicfg);
1377 0 : }
1378 :
1379 : /*
1380 : * There is a multicast filter bug in 10Mbps mode. Kick the
1381 : * multicast filter in case the speed changed.
1382 : */
1383 0 : epic_set_mchash(sc);
1384 0 : }
1385 :
1386 : /*
1387 : * Callback from ifmedia to request current media status.
1388 : */
1389 : void
1390 0 : epic_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
1391 : {
1392 0 : struct epic_softc *sc = ifp->if_softc;
1393 :
1394 0 : mii_pollstat(&sc->sc_mii);
1395 0 : ifmr->ifm_status = sc->sc_mii.mii_media_status;
1396 0 : ifmr->ifm_active = sc->sc_mii.mii_media_active;
1397 0 : }
1398 :
1399 : /*
1400 : * Callback from ifmedia to request new media setting.
1401 : */
1402 : int
1403 0 : epic_mediachange(struct ifnet *ifp)
1404 : {
1405 0 : struct epic_softc *sc = ifp->if_softc;
1406 0 : struct mii_data *mii = &sc->sc_mii;
1407 0 : struct ifmedia *ifm = &mii->mii_media;
1408 0 : uint64_t media = ifm->ifm_cur->ifm_media;
1409 : u_int32_t miicfg;
1410 : struct mii_softc *miisc;
1411 : int cfg;
1412 :
1413 0 : if (!(ifp->if_flags & IFF_UP))
1414 0 : return (0);
1415 :
1416 0 : if (IFM_INST(media) != sc->sc_serinst) {
1417 : /* If we're not selecting serial interface, select MII mode */
1418 : #ifdef EPICMEDIADEBUG
1419 : printf("%s: parallel mode\n", ifp->if_xname);
1420 : #endif
1421 0 : miicfg = bus_space_read_4(sc->sc_st, sc->sc_sh, EPIC_MIICFG);
1422 0 : miicfg &= ~MIICFG_SERMODEENA;
1423 0 : bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_MIICFG, miicfg);
1424 0 : }
1425 :
1426 0 : mii_mediachg(mii);
1427 :
1428 0 : if (IFM_INST(media) == sc->sc_serinst) {
1429 : /* select serial interface */
1430 : #ifdef EPICMEDIADEBUG
1431 : printf("%s: serial mode\n", ifp->if_xname);
1432 : #endif
1433 0 : miicfg = bus_space_read_4(sc->sc_st, sc->sc_sh, EPIC_MIICFG);
1434 0 : miicfg |= (MIICFG_SERMODEENA | MIICFG_ENABLE);
1435 0 : bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_MIICFG, miicfg);
1436 :
1437 : /* There is no driver to fill this */
1438 0 : mii->mii_media_active = media;
1439 0 : mii->mii_media_status = 0;
1440 :
1441 0 : epic_statchg(&sc->sc_dev);
1442 0 : return (0);
1443 : }
1444 :
1445 : /* Lookup selected PHY */
1446 0 : LIST_FOREACH(miisc, &mii->mii_phys, mii_list) {
1447 0 : if (IFM_INST(media) == miisc->mii_inst)
1448 : break;
1449 : }
1450 0 : if (!miisc) {
1451 0 : printf("epic_mediachange: can't happen\n"); /* ??? panic */
1452 0 : return (0);
1453 : }
1454 : #ifdef EPICMEDIADEBUG
1455 : printf("%s: using phy %s\n", ifp->if_xname,
1456 : miisc->mii_dev.dv_xname);
1457 : #endif
1458 :
1459 0 : if (miisc->mii_flags & MIIF_HAVEFIBER) {
1460 : /* XXX XXX assume it's a Level1 - should check */
1461 :
1462 : /* We have to powerup fiber transceivers */
1463 0 : cfg = PHY_READ(miisc, MII_LXTPHY_CONFIG);
1464 0 : if (IFM_SUBTYPE(media) == IFM_100_FX) {
1465 : #ifdef EPICMEDIADEBUG
1466 : printf("%s: power up fiber\n", ifp->if_xname);
1467 : #endif
1468 0 : cfg |= (CONFIG_LEDC1 | CONFIG_LEDC0);
1469 0 : } else {
1470 : #ifdef EPICMEDIADEBUG
1471 : printf("%s: power down fiber\n", ifp->if_xname);
1472 : #endif
1473 0 : cfg &= ~(CONFIG_LEDC1 | CONFIG_LEDC0);
1474 : }
1475 0 : PHY_WRITE(miisc, MII_LXTPHY_CONFIG, cfg);
1476 0 : }
1477 :
1478 0 : return (0);
1479 0 : }
|