Line data Source code
1 : /* $OpenBSD: aic6915.c,v 1.22 2017/01/22 10:17:37 dlg Exp $ */
2 : /* $NetBSD: aic6915.c,v 1.15 2005/12/24 20:27:29 perry Exp $ */
3 :
4 : /*-
5 : * Copyright (c) 2001 The NetBSD Foundation, Inc.
6 : * All rights reserved.
7 : *
8 : * This code is derived from software contributed to The NetBSD Foundation
9 : * by Jason R. Thorpe.
10 : *
11 : * Redistribution and use in source and binary forms, with or without
12 : * modification, are permitted provided that the following conditions
13 : * are met:
14 : * 1. Redistributions of source code must retain the above copyright
15 : * notice, this list of conditions and the following disclaimer.
16 : * 2. Redistributions in binary form must reproduce the above copyright
17 : * notice, this list of conditions and the following disclaimer in the
18 : * documentation and/or other materials provided with the distribution.
19 : *
20 : * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21 : * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22 : * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23 : * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24 : * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 : * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 : * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 : * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 : * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 : * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 : * POSSIBILITY OF SUCH DAMAGE.
31 : */
32 :
33 : /*
34 : * Device driver for the Adaptec AIC-6915 (``Starfire'')
35 : * 10/100 Ethernet controller.
36 : */
37 :
38 : #include "bpfilter.h"
39 :
40 : #include <sys/param.h>
41 : #include <sys/endian.h>
42 : #include <sys/systm.h>
43 : #include <sys/timeout.h>
44 : #include <sys/mbuf.h>
45 : #include <sys/malloc.h>
46 : #include <sys/kernel.h>
47 : #include <sys/socket.h>
48 : #include <sys/ioctl.h>
49 : #include <sys/errno.h>
50 : #include <sys/device.h>
51 :
52 : #include <net/if.h>
53 : #include <net/if_dl.h>
54 :
55 : #include <netinet/in.h>
56 : #include <netinet/if_ether.h>
57 :
58 : #include <net/if_media.h>
59 :
60 : #if NBPFILTER > 0
61 : #include <net/bpf.h>
62 : #endif
63 :
64 : #include <machine/bus.h>
65 : #include <machine/intr.h>
66 :
67 : #include <dev/mii/miivar.h>
68 :
69 : #include <dev/ic/aic6915.h>
70 :
71 : void sf_start(struct ifnet *);
72 : void sf_watchdog(struct ifnet *);
73 : int sf_ioctl(struct ifnet *, u_long, caddr_t);
74 : int sf_init(struct ifnet *);
75 : void sf_stop(struct ifnet *, int);
76 :
77 : void sf_txintr(struct sf_softc *);
78 : void sf_rxintr(struct sf_softc *);
79 : void sf_stats_update(struct sf_softc *);
80 :
81 : void sf_reset(struct sf_softc *);
82 : void sf_macreset(struct sf_softc *);
83 : void sf_rxdrain(struct sf_softc *);
84 : int sf_add_rxbuf(struct sf_softc *, int);
85 : uint8_t sf_read_eeprom(struct sf_softc *, int);
86 : void sf_set_filter(struct sf_softc *);
87 :
88 : int sf_mii_read(struct device *, int, int);
89 : void sf_mii_write(struct device *, int, int, int);
90 : void sf_mii_statchg(struct device *);
91 :
92 : void sf_tick(void *);
93 :
94 : int sf_mediachange(struct ifnet *);
95 : void sf_mediastatus(struct ifnet *, struct ifmediareq *);
96 :
97 : uint32_t sf_reg_read(struct sf_softc *, bus_addr_t);
98 : void sf_reg_write(struct sf_softc *, bus_addr_t , uint32_t);
99 :
100 : void sf_set_filter_perfect(struct sf_softc *, int , uint8_t *);
101 : void sf_set_filter_hash(struct sf_softc *, uint8_t *);
102 :
103 : struct cfdriver sf_cd = {
104 : NULL, "sf", DV_IFNET
105 : };
106 :
107 : #define sf_funcreg_read(sc, reg) \
108 : bus_space_read_4((sc)->sc_st, (sc)->sc_sh_func, (reg))
109 : #define sf_funcreg_write(sc, reg, val) \
110 : bus_space_write_4((sc)->sc_st, (sc)->sc_sh_func, (reg), (val))
111 :
112 : uint32_t
113 0 : sf_reg_read(struct sf_softc *sc, bus_addr_t reg)
114 : {
115 :
116 0 : if (__predict_false(sc->sc_iomapped)) {
117 0 : bus_space_write_4(sc->sc_st, sc->sc_sh, SF_IndirectIoAccess,
118 : reg);
119 0 : return (bus_space_read_4(sc->sc_st, sc->sc_sh,
120 : SF_IndirectIoDataPort));
121 : }
122 :
123 0 : return (bus_space_read_4(sc->sc_st, sc->sc_sh, reg));
124 0 : }
125 :
126 : void
127 0 : sf_reg_write(struct sf_softc *sc, bus_addr_t reg, uint32_t val)
128 : {
129 :
130 0 : if (__predict_false(sc->sc_iomapped)) {
131 0 : bus_space_write_4(sc->sc_st, sc->sc_sh, SF_IndirectIoAccess,
132 : reg);
133 0 : bus_space_write_4(sc->sc_st, sc->sc_sh, SF_IndirectIoDataPort,
134 : val);
135 0 : return;
136 : }
137 :
138 0 : bus_space_write_4(sc->sc_st, sc->sc_sh, reg, val);
139 0 : }
140 :
141 : #define sf_genreg_read(sc, reg) \
142 : sf_reg_read((sc), (reg) + SF_GENREG_OFFSET)
143 : #define sf_genreg_write(sc, reg, val) \
144 : sf_reg_write((sc), (reg) + SF_GENREG_OFFSET, (val))
145 :
146 : /*
147 : * sf_attach:
148 : *
149 : * Attach a Starfire interface to the system.
150 : */
151 : void
152 0 : sf_attach(struct sf_softc *sc)
153 : {
154 0 : struct ifnet *ifp = &sc->sc_arpcom.ac_if;
155 0 : int i, rseg, error;
156 0 : bus_dma_segment_t seg;
157 0 : u_int8_t enaddr[ETHER_ADDR_LEN];
158 :
159 0 : timeout_set(&sc->sc_mii_timeout, sf_tick, sc);
160 :
161 : /*
162 : * If we're I/O mapped, the functional register handle is
163 : * the same as the base handle. If we're memory mapped,
164 : * carve off a chunk of the register space for the functional
165 : * registers, to save on arithmetic later.
166 : */
167 0 : if (sc->sc_iomapped)
168 0 : sc->sc_sh_func = sc->sc_sh;
169 : else {
170 0 : if ((error = bus_space_subregion(sc->sc_st, sc->sc_sh,
171 0 : SF_GENREG_OFFSET, SF_FUNCREG_SIZE, &sc->sc_sh_func)) != 0) {
172 0 : printf("%s: unable to sub-region functional "
173 0 : "registers, error = %d\n", sc->sc_dev.dv_xname,
174 : error);
175 0 : return;
176 : }
177 : }
178 :
179 : /*
180 : * Initialize the transmit threshold for this interface. The
181 : * manual describes the default as 4 * 16 bytes. We start out
182 : * at 10 * 16 bytes, to avoid a bunch of initial underruns on
183 : * several platforms.
184 : */
185 0 : sc->sc_txthresh = 10;
186 :
187 : /*
188 : * Allocate the control data structures, and create and load the
189 : * DMA map for it.
190 : */
191 0 : if ((error = bus_dmamem_alloc(sc->sc_dmat,
192 : sizeof(struct sf_control_data), PAGE_SIZE, 0, &seg, 1, &rseg,
193 0 : BUS_DMA_NOWAIT)) != 0) {
194 0 : printf("%s: unable to allocate control data, error = %d\n",
195 0 : sc->sc_dev.dv_xname, error);
196 0 : goto fail_0;
197 : }
198 :
199 0 : if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
200 : sizeof(struct sf_control_data), (caddr_t *)&sc->sc_control_data,
201 0 : BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) {
202 0 : printf("%s: unable to map control data, error = %d\n",
203 0 : sc->sc_dev.dv_xname, error);
204 0 : goto fail_1;
205 : }
206 :
207 0 : if ((error = bus_dmamap_create(sc->sc_dmat,
208 : sizeof(struct sf_control_data), 1,
209 : sizeof(struct sf_control_data), 0, BUS_DMA_NOWAIT,
210 0 : &sc->sc_cddmamap)) != 0) {
211 0 : printf("%s: unable to create control data DMA map, "
212 0 : "error = %d\n", sc->sc_dev.dv_xname, error);
213 0 : goto fail_2;
214 : }
215 :
216 0 : if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
217 : sc->sc_control_data, sizeof(struct sf_control_data), NULL,
218 0 : BUS_DMA_NOWAIT)) != 0) {
219 0 : printf("%s: unable to load control data DMA map, error = %d\n",
220 0 : sc->sc_dev.dv_xname, error);
221 0 : goto fail_3;
222 : }
223 :
224 : /*
225 : * Create the transmit buffer DMA maps.
226 : */
227 0 : for (i = 0; i < SF_NTXDESC; i++) {
228 0 : if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
229 : SF_NTXFRAGS, MCLBYTES, 0, BUS_DMA_NOWAIT,
230 0 : &sc->sc_txsoft[i].ds_dmamap)) != 0) {
231 0 : printf("%s: unable to create tx DMA map %d, "
232 0 : "error = %d\n", sc->sc_dev.dv_xname, i, error);
233 0 : goto fail_4;
234 : }
235 : }
236 :
237 : /*
238 : * Create the receive buffer DMA maps.
239 : */
240 0 : for (i = 0; i < SF_NRXDESC; i++) {
241 0 : if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
242 : MCLBYTES, 0, BUS_DMA_NOWAIT,
243 0 : &sc->sc_rxsoft[i].ds_dmamap)) != 0) {
244 0 : printf("%s: unable to create rx DMA map %d, "
245 0 : "error = %d\n", sc->sc_dev.dv_xname, i, error);
246 : goto fail_5;
247 : }
248 : }
249 :
250 : /*
251 : * Reset the chip to a known state.
252 : */
253 0 : sf_reset(sc);
254 :
255 : /*
256 : * Read the Ethernet address from the EEPROM.
257 : */
258 0 : for (i = 0; i < ETHER_ADDR_LEN; i++)
259 0 : enaddr[i] = sf_read_eeprom(sc, (15 + (ETHER_ADDR_LEN - 1)) - i);
260 :
261 0 : printf(", address %s\n", ether_sprintf(enaddr));
262 :
263 : #ifdef DEBUG
264 : if (sf_funcreg_read(sc, SF_PciDeviceConfig) & PDC_System64)
265 : printf("%s: 64-bit PCI slot detected\n", sc->sc_dev.dv_xname);
266 : #endif
267 :
268 : /*
269 : * Initialize our media structures and probe the MII.
270 : */
271 0 : sc->sc_mii.mii_ifp = ifp;
272 0 : sc->sc_mii.mii_readreg = sf_mii_read;
273 0 : sc->sc_mii.mii_writereg = sf_mii_write;
274 0 : sc->sc_mii.mii_statchg = sf_mii_statchg;
275 0 : ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, sf_mediachange,
276 : sf_mediastatus);
277 0 : mii_attach(&sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
278 : MII_OFFSET_ANY, 0);
279 0 : if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
280 0 : ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
281 0 : ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE);
282 0 : } else
283 0 : ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
284 0 : bcopy(enaddr, sc->sc_arpcom.ac_enaddr, ETHER_ADDR_LEN);
285 0 : bcopy(sc->sc_dev.dv_xname, ifp->if_xname, IFNAMSIZ);
286 : ifp = &sc->sc_arpcom.ac_if;
287 0 : ifp->if_softc = sc;
288 0 : ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
289 0 : ifp->if_ioctl = sf_ioctl;
290 0 : ifp->if_start = sf_start;
291 0 : ifp->if_watchdog = sf_watchdog;
292 0 : IFQ_SET_MAXLEN(&ifp->if_snd, SF_NTXDESC_MASK);
293 :
294 : /*
295 : * Attach the interface.
296 : */
297 0 : if_attach(ifp);
298 0 : ether_ifattach(ifp);
299 0 : return;
300 :
301 : /*
302 : * Free any resources we've allocated during the failed attach
303 : * attempt. Do this in reverse order an fall through.
304 : */
305 : fail_5:
306 0 : for (i = 0; i < SF_NRXDESC; i++) {
307 0 : if (sc->sc_rxsoft[i].ds_dmamap != NULL)
308 0 : bus_dmamap_destroy(sc->sc_dmat,
309 : sc->sc_rxsoft[i].ds_dmamap);
310 : }
311 : fail_4:
312 0 : for (i = 0; i < SF_NTXDESC; i++) {
313 0 : if (sc->sc_txsoft[i].ds_dmamap != NULL)
314 0 : bus_dmamap_destroy(sc->sc_dmat,
315 : sc->sc_txsoft[i].ds_dmamap);
316 : }
317 0 : bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
318 : fail_3:
319 0 : bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
320 : fail_2:
321 0 : bus_dmamem_unmap(sc->sc_dmat, (caddr_t) sc->sc_control_data,
322 : sizeof(struct sf_control_data));
323 : fail_1:
324 0 : bus_dmamem_free(sc->sc_dmat, &seg, rseg);
325 : fail_0:
326 0 : return;
327 0 : }
328 :
329 : /*
330 : * sf_start: [ifnet interface function]
331 : *
332 : * Start packet transmission on the interface.
333 : */
334 : void
335 0 : sf_start(struct ifnet *ifp)
336 : {
337 0 : struct sf_softc *sc = ifp->if_softc;
338 : struct mbuf *m0, *m;
339 : struct sf_txdesc0 *txd;
340 : struct sf_descsoft *ds;
341 : bus_dmamap_t dmamap;
342 : int error, producer, last = -1, opending, seg;
343 :
344 : /*
345 : * Remember the previous number of pending transmits.
346 : */
347 0 : opending = sc->sc_txpending;
348 :
349 : /*
350 : * Find out where we're sitting.
351 : */
352 0 : producer = SF_TXDINDEX_TO_HOST(
353 : TDQPI_HiPrTxProducerIndex_get(
354 : sf_funcreg_read(sc, SF_TxDescQueueProducerIndex)));
355 :
356 : /*
357 : * Loop through the send queue, setting up transmit descriptors
358 : * until we drain the queue, or use up all available transmit
359 : * descriptors. Leave a blank one at the end for sanity's sake.
360 : */
361 0 : while (sc->sc_txpending < (SF_NTXDESC - 1)) {
362 : /*
363 : * Grab a packet off the queue.
364 : */
365 0 : m0 = ifq_deq_begin(&ifp->if_snd);
366 0 : if (m0 == NULL)
367 : break;
368 : m = NULL;
369 :
370 : /*
371 : * Get the transmit descriptor.
372 : */
373 0 : txd = &sc->sc_txdescs[producer];
374 0 : ds = &sc->sc_txsoft[producer];
375 0 : dmamap = ds->ds_dmamap;
376 :
377 : /*
378 : * Load the DMA map. If this fails, the packet either
379 : * didn't fit in the allotted number of frags, or we were
380 : * short on resources. In this case, we'll copy and try
381 : * again.
382 : */
383 0 : if (bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
384 0 : BUS_DMA_WRITE|BUS_DMA_NOWAIT) != 0) {
385 0 : MGETHDR(m, M_DONTWAIT, MT_DATA);
386 0 : if (m == NULL) {
387 0 : ifq_deq_rollback(&ifp->if_snd, m0);
388 0 : printf("%s: unable to allocate Tx mbuf\n",
389 0 : sc->sc_dev.dv_xname);
390 0 : break;
391 : }
392 0 : if (m0->m_pkthdr.len > MHLEN) {
393 0 : MCLGET(m, M_DONTWAIT);
394 0 : if ((m->m_flags & M_EXT) == 0) {
395 0 : ifq_deq_rollback(&ifp->if_snd, m0);
396 0 : printf("%s: unable to allocate Tx "
397 0 : "cluster\n", sc->sc_dev.dv_xname);
398 0 : m_freem(m);
399 0 : break;
400 : }
401 : }
402 0 : m_copydata(m0, 0, m0->m_pkthdr.len, mtod(m, caddr_t));
403 0 : m->m_pkthdr.len = m->m_len = m0->m_pkthdr.len;
404 0 : error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap,
405 : m, BUS_DMA_WRITE|BUS_DMA_NOWAIT);
406 0 : if (error) {
407 0 : ifq_deq_rollback(&ifp->if_snd, m0);
408 0 : printf("%s: unable to load Tx buffer, "
409 0 : "error = %d\n", sc->sc_dev.dv_xname, error);
410 0 : m_freem(m);
411 0 : break;
412 : }
413 : }
414 :
415 : /*
416 : * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
417 : */
418 0 : ifq_deq_commit(&ifp->if_snd, m0);
419 0 : if (m != NULL) {
420 0 : m_freem(m0);
421 : m0 = m;
422 0 : }
423 :
424 : /* Initialize the descriptor. */
425 0 : txd->td_word0 =
426 0 : htole32(TD_W0_ID | TD_W0_CRCEN | m0->m_pkthdr.len);
427 0 : if (producer == (SF_NTXDESC - 1))
428 0 : txd->td_word0 |= TD_W0_END;
429 0 : txd->td_word1 = htole32(dmamap->dm_nsegs);
430 0 : for (seg = 0; seg < dmamap->dm_nsegs; seg++) {
431 0 : txd->td_frags[seg].fr_addr =
432 0 : htole32(dmamap->dm_segs[seg].ds_addr);
433 0 : txd->td_frags[seg].fr_len =
434 0 : htole32(dmamap->dm_segs[seg].ds_len);
435 : }
436 :
437 : /* Sync the descriptor and the DMA map. */
438 0 : SF_CDTXDSYNC(sc, producer, BUS_DMASYNC_PREWRITE);
439 0 : bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
440 : BUS_DMASYNC_PREWRITE);
441 :
442 : /*
443 : * Store a pointer to the packet so we can free it later.
444 : */
445 0 : ds->ds_mbuf = m0;
446 :
447 : /* Advance the Tx pointer. */
448 0 : sc->sc_txpending++;
449 : last = producer;
450 0 : producer = SF_NEXTTX(producer);
451 :
452 : #if NBPFILTER > 0
453 : /*
454 : * Pass the packet to any BPF listeners.
455 : */
456 0 : if (ifp->if_bpf)
457 0 : bpf_mtap(ifp->if_bpf, m0, BPF_DIRECTION_OUT);
458 : #endif
459 : }
460 :
461 0 : if (sc->sc_txpending == (SF_NTXDESC - 1)) {
462 : /* No more slots left; notify upper layer. */
463 0 : ifq_set_oactive(&ifp->if_snd);
464 0 : }
465 :
466 0 : if (sc->sc_txpending != opending) {
467 0 : KASSERT(last != -1);
468 : /*
469 : * We enqueued packets. Cause a transmit interrupt to
470 : * happen on the last packet we enqueued, and give the
471 : * new descriptors to the chip by writing the new
472 : * producer index.
473 : */
474 0 : sc->sc_txdescs[last].td_word0 |= TD_W0_INTR;
475 0 : SF_CDTXDSYNC(sc, last, BUS_DMASYNC_PREWRITE);
476 :
477 0 : sf_funcreg_write(sc, SF_TxDescQueueProducerIndex,
478 : TDQPI_HiPrTxProducerIndex(SF_TXDINDEX_TO_CHIP(producer)));
479 :
480 : /* Set a watchdog timer in case the chip flakes out. */
481 0 : ifp->if_timer = 5;
482 0 : }
483 0 : }
484 :
485 : /*
486 : * sf_watchdog: [ifnet interface function]
487 : *
488 : * Watchdog timer handler.
489 : */
490 : void
491 0 : sf_watchdog(struct ifnet *ifp)
492 : {
493 0 : struct sf_softc *sc = ifp->if_softc;
494 :
495 0 : printf("%s: device timeout\n", sc->sc_dev.dv_xname);
496 0 : ifp->if_oerrors++;
497 :
498 0 : (void) sf_init(ifp);
499 :
500 : /* Try to get more packets going. */
501 0 : sf_start(ifp);
502 0 : }
503 :
504 : /*
505 : * sf_ioctl: [ifnet interface function]
506 : *
507 : * Handle control requests from the operator.
508 : */
509 : int
510 0 : sf_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
511 : {
512 0 : struct sf_softc *sc = (struct sf_softc *)ifp->if_softc;
513 0 : struct ifreq *ifr = (struct ifreq *) data;
514 : int s, error = 0;
515 :
516 0 : s = splnet();
517 :
518 0 : switch (cmd) {
519 : case SIOCSIFADDR:
520 0 : ifp->if_flags |= IFF_UP;
521 0 : if (!(ifp->if_flags & IFF_RUNNING))
522 0 : sf_init(ifp);
523 : break;
524 :
525 : case SIOCSIFFLAGS:
526 0 : if (ifp->if_flags & IFF_UP) {
527 0 : if (ifp->if_flags & IFF_RUNNING &&
528 0 : ((ifp->if_flags ^ sc->sc_flags) &
529 : IFF_PROMISC)) {
530 0 : sf_set_filter(sc);
531 0 : } else {
532 0 : if (!(ifp->if_flags & IFF_RUNNING))
533 0 : sf_init(ifp);
534 : }
535 : } else {
536 0 : if (ifp->if_flags & IFF_RUNNING)
537 0 : sf_stop(ifp, 1);
538 : }
539 0 : sc->sc_flags = ifp->if_flags;
540 0 : break;
541 :
542 : case SIOCGIFMEDIA:
543 : case SIOCSIFMEDIA:
544 0 : error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
545 0 : break;
546 :
547 : default:
548 0 : error = ether_ioctl(ifp, &sc->sc_arpcom, cmd, data);
549 0 : }
550 :
551 0 : if (error == ENETRESET) {
552 0 : if (ifp->if_flags & IFF_RUNNING)
553 0 : sf_set_filter(sc);
554 : error = 0;
555 0 : }
556 :
557 : /* Try to get more packets going. */
558 0 : sf_start(ifp);
559 :
560 0 : splx(s);
561 0 : return (error);
562 : }
563 :
564 : /*
565 : * sf_intr:
566 : *
567 : * Interrupt service routine.
568 : */
569 : int
570 0 : sf_intr(void *arg)
571 : {
572 0 : struct sf_softc *sc = arg;
573 : uint32_t isr;
574 : int handled = 0, wantinit = 0;
575 :
576 0 : for (;;) {
577 : /* Reading clears all interrupts we're interested in. */
578 0 : isr = sf_funcreg_read(sc, SF_InterruptStatus);
579 0 : if ((isr & IS_PCIPadInt) == 0)
580 : break;
581 :
582 : handled = 1;
583 :
584 : /* Handle receive interrupts. */
585 0 : if (isr & IS_RxQ1DoneInt)
586 0 : sf_rxintr(sc);
587 :
588 : /* Handle transmit completion interrupts. */
589 0 : if (isr & (IS_TxDmaDoneInt|IS_TxQueueDoneInt))
590 0 : sf_txintr(sc);
591 :
592 : /* Handle abnormal interrupts. */
593 0 : if (isr & IS_AbnormalInterrupt) {
594 : /* Statistics. */
595 0 : if (isr & IS_StatisticWrapInt)
596 0 : sf_stats_update(sc);
597 :
598 : /* DMA errors. */
599 0 : if (isr & IS_DmaErrInt) {
600 : wantinit = 1;
601 0 : printf("%s: WARNING: DMA error\n",
602 0 : sc->sc_dev.dv_xname);
603 0 : }
604 :
605 : /* Transmit FIFO underruns. */
606 0 : if (isr & IS_TxDataLowInt) {
607 0 : if (sc->sc_txthresh < 0xff)
608 0 : sc->sc_txthresh++;
609 : #ifdef DEBUG
610 : printf("%s: transmit FIFO underrun, new "
611 : "threshold: %d bytes\n",
612 : sc->sc_dev.dv_xname,
613 : sc->sc_txthresh * 16);
614 : #endif
615 0 : sf_funcreg_write(sc, SF_TransmitFrameCSR,
616 : sc->sc_TransmitFrameCSR |
617 : TFCSR_TransmitThreshold(sc->sc_txthresh));
618 0 : sf_funcreg_write(sc, SF_TxDescQueueCtrl,
619 : sc->sc_TxDescQueueCtrl |
620 : TDQC_TxHighPriorityFifoThreshold(
621 : sc->sc_txthresh));
622 0 : }
623 : }
624 : }
625 :
626 0 : if (handled) {
627 : /* Reset the interface, if necessary. */
628 0 : if (wantinit)
629 0 : sf_init(&sc->sc_arpcom.ac_if);
630 :
631 : /* Try and get more packets going. */
632 0 : sf_start(&sc->sc_arpcom.ac_if);
633 0 : }
634 :
635 0 : return (handled);
636 : }
637 :
638 : /*
639 : * sf_txintr:
640 : *
641 : * Helper -- handle transmit completion interrupts.
642 : */
643 : void
644 0 : sf_txintr(struct sf_softc *sc)
645 : {
646 0 : struct ifnet *ifp = &sc->sc_arpcom.ac_if;
647 : struct sf_descsoft *ds;
648 : uint32_t cqci, tcd;
649 0 : int consumer, producer, txidx;
650 :
651 : try_again:
652 0 : cqci = sf_funcreg_read(sc, SF_CompletionQueueConsumerIndex);
653 :
654 0 : consumer = CQCI_TxCompletionConsumerIndex_get(cqci);
655 0 : producer = CQPI_TxCompletionProducerIndex_get(
656 : sf_funcreg_read(sc, SF_CompletionQueueProducerIndex));
657 :
658 0 : if (consumer == producer)
659 : return;
660 :
661 0 : ifq_clr_oactive(&ifp->if_snd);
662 :
663 0 : while (consumer != producer) {
664 0 : SF_CDTXCSYNC(sc, consumer, BUS_DMASYNC_POSTREAD);
665 0 : tcd = letoh32(sc->sc_txcomp[consumer].tcd_word0);
666 :
667 0 : txidx = SF_TCD_INDEX_TO_HOST(TCD_INDEX(tcd));
668 : #ifdef DIAGNOSTIC
669 0 : if ((tcd & TCD_PR) == 0)
670 0 : printf("%s: Tx queue mismatch, index %d\n",
671 0 : sc->sc_dev.dv_xname, txidx);
672 : #endif
673 : /*
674 : * NOTE: stats are updated later. We're just
675 : * releasing packets that have been DMA'd to
676 : * the chip.
677 : */
678 0 : ds = &sc->sc_txsoft[txidx];
679 0 : SF_CDTXDSYNC(sc, txidx, BUS_DMASYNC_POSTWRITE);
680 0 : bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap,
681 : 0, ds->ds_dmamap->dm_mapsize,
682 : BUS_DMASYNC_POSTWRITE);
683 0 : m_freem(ds->ds_mbuf);
684 0 : ds->ds_mbuf = NULL;
685 :
686 0 : consumer = SF_NEXTTCD(consumer);
687 0 : sc->sc_txpending--;
688 : }
689 :
690 : /* XXXJRT -- should be KDASSERT() */
691 0 : KASSERT(sc->sc_txpending >= 0);
692 :
693 : /* If all packets are done, cancel the watchdog timer. */
694 0 : if (sc->sc_txpending == 0)
695 0 : ifp->if_timer = 0;
696 :
697 : /* Update the consumer index. */
698 0 : sf_funcreg_write(sc, SF_CompletionQueueConsumerIndex,
699 : (cqci & ~CQCI_TxCompletionConsumerIndex(0x7ff)) |
700 : CQCI_TxCompletionConsumerIndex(consumer));
701 :
702 : /* Double check for new completions. */
703 0 : goto try_again;
704 0 : }
705 :
706 : /*
707 : * sf_rxintr:
708 : *
709 : * Helper -- handle receive interrupts.
710 : */
711 : void
712 0 : sf_rxintr(struct sf_softc *sc)
713 : {
714 0 : struct ifnet *ifp = &sc->sc_arpcom.ac_if;
715 : struct sf_descsoft *ds;
716 : struct sf_rcd_full *rcd;
717 0 : struct mbuf_list ml = MBUF_LIST_INITIALIZER();
718 : struct mbuf *m;
719 : uint32_t cqci, word0;
720 : int consumer, producer, bufproducer, rxidx, len;
721 :
722 0 : cqci = sf_funcreg_read(sc, SF_CompletionQueueConsumerIndex);
723 :
724 0 : consumer = CQCI_RxCompletionQ1ConsumerIndex_get(cqci);
725 0 : producer = CQPI_RxCompletionQ1ProducerIndex_get(
726 : sf_funcreg_read(sc, SF_CompletionQueueProducerIndex));
727 0 : bufproducer = RXQ1P_RxDescQ1Producer_get(
728 : sf_funcreg_read(sc, SF_RxDescQueue1Ptrs));
729 :
730 0 : if (consumer == producer)
731 0 : return;
732 :
733 0 : while (consumer != producer) {
734 0 : rcd = &sc->sc_rxcomp[consumer];
735 0 : SF_CDRXCSYNC(sc, consumer,
736 : BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
737 0 : SF_CDRXCSYNC(sc, consumer,
738 : BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
739 :
740 0 : word0 = letoh32(rcd->rcd_word0);
741 0 : rxidx = RCD_W0_EndIndex(word0);
742 :
743 0 : ds = &sc->sc_rxsoft[rxidx];
744 :
745 0 : consumer = SF_NEXTRCD(consumer);
746 0 : bufproducer = SF_NEXTRX(bufproducer);
747 :
748 0 : if ((word0 & RCD_W0_OK) == 0) {
749 0 : SF_INIT_RXDESC(sc, rxidx);
750 0 : continue;
751 : }
752 :
753 0 : bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0,
754 : ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
755 :
756 : /*
757 : * No errors; receive the packet. Note that we have
758 : * configured the Starfire to NOT transfer the CRC
759 : * with the packet.
760 : */
761 0 : len = RCD_W0_Length(word0);
762 :
763 : #ifndef __STRICT_ALIGNMENT
764 : /*
765 : * Allocate a new mbuf cluster. If that fails, we are
766 : * out of memory, and must drop the packet and recycle
767 : * the buffer that's already attached to this descriptor.
768 : */
769 0 : m = ds->ds_mbuf;
770 0 : if (sf_add_rxbuf(sc, rxidx) != 0) {
771 0 : ifp->if_ierrors++;
772 0 : SF_INIT_RXDESC(sc, rxidx);
773 0 : bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0,
774 : ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
775 0 : continue;
776 : }
777 : #else
778 : /*
779 : * The Starfire's receive buffer must be 4-byte aligned.
780 : * But this means that the data after the Ethernet header
781 : * is misaligned. We must allocate a new buffer and
782 : * copy the data, shifted forward 2 bytes.
783 : */
784 : MGETHDR(m, M_DONTWAIT, MT_DATA);
785 : if (m == NULL) {
786 : dropit:
787 : ifp->if_ierrors++;
788 : SF_INIT_RXDESC(sc, rxidx);
789 : bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0,
790 : ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
791 : continue;
792 : }
793 : if (len > (MHLEN - 2)) {
794 : MCLGET(m, M_DONTWAIT);
795 : if ((m->m_flags & M_EXT) == 0) {
796 : m_freem(m);
797 : goto dropit;
798 : }
799 : }
800 : m->m_data += 2;
801 :
802 : /*
803 : * Note that we use cluster for incoming frames, so the
804 : * buffer is virtually contiguous.
805 : */
806 : memcpy(mtod(m, caddr_t), mtod(ds->ds_mbuf, caddr_t), len);
807 :
808 : /* Allow the receive descriptor to continue using its mbuf. */
809 : SF_INIT_RXDESC(sc, rxidx);
810 : bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0,
811 : ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
812 : #endif /* __STRICT_ALIGNMENT */
813 :
814 0 : m->m_pkthdr.len = m->m_len = len;
815 :
816 0 : ml_enqueue(&ml, m);
817 : }
818 :
819 0 : if_input(ifp, &ml);
820 :
821 : /* Update the chip's pointers. */
822 0 : sf_funcreg_write(sc, SF_CompletionQueueConsumerIndex,
823 : (cqci & ~CQCI_RxCompletionQ1ConsumerIndex(0x7ff)) |
824 : CQCI_RxCompletionQ1ConsumerIndex(consumer));
825 0 : sf_funcreg_write(sc, SF_RxDescQueue1Ptrs,
826 : RXQ1P_RxDescQ1Producer(bufproducer));
827 0 : }
828 :
829 : /*
830 : * sf_tick:
831 : *
832 : * One second timer, used to tick the MII and update stats.
833 : */
834 : void
835 0 : sf_tick(void *arg)
836 : {
837 0 : struct sf_softc *sc = arg;
838 : int s;
839 :
840 0 : s = splnet();
841 0 : mii_tick(&sc->sc_mii);
842 0 : sf_stats_update(sc);
843 0 : splx(s);
844 :
845 0 : timeout_add_sec(&sc->sc_mii_timeout, 1);
846 0 : }
847 :
848 : /*
849 : * sf_stats_update:
850 : *
851 : * Read the statitistics counters.
852 : */
853 : void
854 0 : sf_stats_update(struct sf_softc *sc)
855 : {
856 0 : struct sf_stats stats;
857 0 : struct ifnet *ifp = &sc->sc_arpcom.ac_if;
858 : uint32_t *p;
859 : u_int i;
860 :
861 0 : p = &stats.TransmitOKFrames;
862 0 : for (i = 0; i < (sizeof(stats) / sizeof(uint32_t)); i++) {
863 0 : *p++ = sf_genreg_read(sc,
864 : SF_STATS_BASE + (i * sizeof(uint32_t)));
865 0 : sf_genreg_write(sc, SF_STATS_BASE + (i * sizeof(uint32_t)), 0);
866 : }
867 :
868 0 : ifp->if_collisions += stats.SingleCollisionFrames +
869 0 : stats.MultipleCollisionFrames;
870 :
871 0 : ifp->if_oerrors += stats.TransmitAbortDueToExcessiveCollisions +
872 0 : stats.TransmitAbortDueToExcessingDeferral +
873 0 : stats.FramesLostDueToInternalTransmitErrors;
874 :
875 0 : ifp->if_ierrors += stats.ReceiveCRCErrors + stats.AlignmentErrors +
876 0 : stats.ReceiveFramesTooLong + stats.ReceiveFramesTooShort +
877 0 : stats.ReceiveFramesJabbersError +
878 0 : stats.FramesLostDueToInternalReceiveErrors;
879 0 : }
880 :
881 : /*
882 : * sf_reset:
883 : *
884 : * Perform a soft reset on the Starfire.
885 : */
886 : void
887 0 : sf_reset(struct sf_softc *sc)
888 : {
889 : int i;
890 :
891 0 : sf_funcreg_write(sc, SF_GeneralEthernetCtrl, 0);
892 :
893 0 : sf_macreset(sc);
894 :
895 0 : sf_funcreg_write(sc, SF_PciDeviceConfig, PDC_SoftReset);
896 0 : for (i = 0; i < 1000; i++) {
897 0 : delay(10);
898 0 : if ((sf_funcreg_read(sc, SF_PciDeviceConfig) &
899 0 : PDC_SoftReset) == 0)
900 : break;
901 : }
902 :
903 0 : if (i == 1000) {
904 0 : printf("%s: reset failed to complete\n", sc->sc_dev.dv_xname);
905 0 : sf_funcreg_write(sc, SF_PciDeviceConfig, 0);
906 0 : }
907 :
908 0 : delay(1000);
909 0 : }
910 :
911 : /*
912 : * sf_macreset:
913 : *
914 : * Reset the MAC portion of the Starfire.
915 : */
916 : void
917 0 : sf_macreset(struct sf_softc *sc)
918 : {
919 :
920 0 : sf_genreg_write(sc, SF_MacConfig1, sc->sc_MacConfig1 | MC1_SoftRst);
921 0 : delay(1000);
922 0 : sf_genreg_write(sc, SF_MacConfig1, sc->sc_MacConfig1);
923 0 : }
924 :
925 : /*
926 : * sf_init: [ifnet interface function]
927 : *
928 : * Initialize the interface. Must be called at splnet().
929 : */
930 : int
931 0 : sf_init(struct ifnet *ifp)
932 : {
933 0 : struct sf_softc *sc = ifp->if_softc;
934 : struct sf_descsoft *ds;
935 : int error = 0;
936 : u_int i;
937 :
938 : /*
939 : * Cancel any pending I/O.
940 : */
941 0 : sf_stop(ifp, 0);
942 :
943 : /*
944 : * Reset the Starfire to a known state.
945 : */
946 0 : sf_reset(sc);
947 :
948 : /* Clear the stat counters. */
949 0 : for (i = 0; i < sizeof(struct sf_stats); i += sizeof(uint32_t))
950 0 : sf_genreg_write(sc, SF_STATS_BASE + i, 0);
951 :
952 : /*
953 : * Initialize the transmit descriptor ring.
954 : */
955 0 : memset(sc->sc_txdescs, 0, sizeof(sc->sc_txdescs));
956 0 : sf_funcreg_write(sc, SF_TxDescQueueHighAddr, 0);
957 0 : sf_funcreg_write(sc, SF_HiPrTxDescQueueBaseAddr, SF_CDTXDADDR(sc, 0));
958 0 : sf_funcreg_write(sc, SF_LoPrTxDescQueueBaseAddr, 0);
959 :
960 : /*
961 : * Initialize the transmit completion ring.
962 : */
963 0 : for (i = 0; i < SF_NTCD; i++) {
964 0 : sc->sc_txcomp[i].tcd_word0 = TCD_DMA_ID;
965 0 : SF_CDTXCSYNC(sc, i, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
966 : }
967 0 : sf_funcreg_write(sc, SF_CompletionQueueHighAddr, 0);
968 0 : sf_funcreg_write(sc, SF_TxCompletionQueueCtrl, SF_CDTXCADDR(sc, 0));
969 :
970 : /*
971 : * Initialize the receive descriptor ring.
972 : */
973 0 : for (i = 0; i < SF_NRXDESC; i++) {
974 0 : ds = &sc->sc_rxsoft[i];
975 0 : if (ds->ds_mbuf == NULL) {
976 0 : if ((error = sf_add_rxbuf(sc, i)) != 0) {
977 0 : printf("%s: unable to allocate or map rx "
978 : "buffer %d, error = %d\n",
979 0 : sc->sc_dev.dv_xname, i, error);
980 : /*
981 : * XXX Should attempt to run with fewer receive
982 : * XXX buffers instead of just failing.
983 : */
984 0 : sf_rxdrain(sc);
985 0 : goto out;
986 : }
987 : } else
988 0 : SF_INIT_RXDESC(sc, i);
989 : }
990 0 : sf_funcreg_write(sc, SF_RxDescQueueHighAddress, 0);
991 0 : sf_funcreg_write(sc, SF_RxDescQueue1LowAddress, SF_CDRXDADDR(sc, 0));
992 0 : sf_funcreg_write(sc, SF_RxDescQueue2LowAddress, 0);
993 :
994 : /*
995 : * Initialize the receive completion ring.
996 : */
997 0 : for (i = 0; i < SF_NRCD; i++) {
998 0 : sc->sc_rxcomp[i].rcd_word0 = RCD_W0_ID;
999 0 : sc->sc_rxcomp[i].rcd_word1 = 0;
1000 0 : sc->sc_rxcomp[i].rcd_word2 = 0;
1001 0 : sc->sc_rxcomp[i].rcd_timestamp = 0;
1002 0 : SF_CDRXCSYNC(sc, i, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1003 : }
1004 0 : sf_funcreg_write(sc, SF_RxCompletionQueue1Ctrl, SF_CDRXCADDR(sc, 0) |
1005 : RCQ1C_RxCompletionQ1Type(3));
1006 0 : sf_funcreg_write(sc, SF_RxCompletionQueue2Ctrl, 0);
1007 :
1008 : /*
1009 : * Initialize the Tx CSR.
1010 : */
1011 0 : sc->sc_TransmitFrameCSR = 0;
1012 0 : sf_funcreg_write(sc, SF_TransmitFrameCSR,
1013 : sc->sc_TransmitFrameCSR |
1014 : TFCSR_TransmitThreshold(sc->sc_txthresh));
1015 :
1016 : /*
1017 : * Initialize the Tx descriptor control register.
1018 : */
1019 0 : sc->sc_TxDescQueueCtrl = TDQC_SkipLength(0) |
1020 : TDQC_TxDmaBurstSize(4) | /* default */
1021 : TDQC_MinFrameSpacing(3) | /* 128 bytes */
1022 : TDQC_TxDescType(0);
1023 0 : sf_funcreg_write(sc, SF_TxDescQueueCtrl,
1024 : sc->sc_TxDescQueueCtrl |
1025 : TDQC_TxHighPriorityFifoThreshold(sc->sc_txthresh));
1026 :
1027 : /*
1028 : * Initialize the Rx descriptor control registers.
1029 : */
1030 0 : sf_funcreg_write(sc, SF_RxDescQueue1Ctrl,
1031 : RDQ1C_RxQ1BufferLength(MCLBYTES) |
1032 : RDQ1C_RxDescSpacing(0));
1033 0 : sf_funcreg_write(sc, SF_RxDescQueue2Ctrl, 0);
1034 :
1035 : /*
1036 : * Initialize the Tx descriptor producer indices.
1037 : */
1038 0 : sf_funcreg_write(sc, SF_TxDescQueueProducerIndex,
1039 : TDQPI_HiPrTxProducerIndex(0) |
1040 : TDQPI_LoPrTxProducerIndex(0));
1041 :
1042 : /*
1043 : * Initialize the Rx descriptor producer indices.
1044 : */
1045 0 : sf_funcreg_write(sc, SF_RxDescQueue1Ptrs,
1046 : RXQ1P_RxDescQ1Producer(SF_NRXDESC - 1));
1047 0 : sf_funcreg_write(sc, SF_RxDescQueue2Ptrs,
1048 : RXQ2P_RxDescQ2Producer(0));
1049 :
1050 : /*
1051 : * Initialize the Tx and Rx completion queue consumer indices.
1052 : */
1053 0 : sf_funcreg_write(sc, SF_CompletionQueueConsumerIndex,
1054 : CQCI_TxCompletionConsumerIndex(0) |
1055 : CQCI_RxCompletionQ1ConsumerIndex(0));
1056 0 : sf_funcreg_write(sc, SF_RxHiPrCompletionPtrs, 0);
1057 :
1058 : /*
1059 : * Initialize the Rx DMA control register.
1060 : */
1061 0 : sf_funcreg_write(sc, SF_RxDmaCtrl,
1062 : RDC_RxHighPriorityThreshold(6) | /* default */
1063 : RDC_RxBurstSize(4)); /* default */
1064 :
1065 : /*
1066 : * Set the receive filter.
1067 : */
1068 0 : sc->sc_RxAddressFilteringCtl = 0;
1069 0 : sf_set_filter(sc);
1070 :
1071 : /*
1072 : * Set MacConfig1. When we set the media, MacConfig1 will
1073 : * actually be written and the MAC part reset.
1074 : */
1075 0 : sc->sc_MacConfig1 = MC1_PadEn;
1076 :
1077 : /*
1078 : * Set the media.
1079 : */
1080 0 : mii_mediachg(&sc->sc_mii);
1081 :
1082 : /*
1083 : * Initialize the interrupt register.
1084 : */
1085 0 : sc->sc_InterruptEn = IS_PCIPadInt | IS_RxQ1DoneInt |
1086 : IS_TxQueueDoneInt | IS_TxDmaDoneInt | IS_DmaErrInt |
1087 : IS_StatisticWrapInt;
1088 0 : sf_funcreg_write(sc, SF_InterruptEn, sc->sc_InterruptEn);
1089 :
1090 0 : sf_funcreg_write(sc, SF_PciDeviceConfig, PDC_IntEnable |
1091 : PDC_PCIMstDmaEn | (1 << PDC_FifoThreshold_SHIFT));
1092 :
1093 : /*
1094 : * Start the transmit and receive processes.
1095 : */
1096 0 : sf_funcreg_write(sc, SF_GeneralEthernetCtrl,
1097 : GEC_TxDmaEn|GEC_RxDmaEn|GEC_TransmitEn|GEC_ReceiveEn);
1098 :
1099 : /* Start the on second clock. */
1100 0 : timeout_add_sec(&sc->sc_mii_timeout, 1);
1101 :
1102 : /*
1103 : * Note that the interface is now running.
1104 : */
1105 0 : ifp->if_flags |= IFF_RUNNING;
1106 0 : ifq_clr_oactive(&ifp->if_snd);
1107 :
1108 : out:
1109 0 : if (error) {
1110 0 : ifp->if_flags &= ~IFF_RUNNING;
1111 0 : ifq_clr_oactive(&ifp->if_snd);
1112 0 : ifp->if_timer = 0;
1113 0 : printf("%s: interface not running\n", sc->sc_dev.dv_xname);
1114 0 : }
1115 0 : return (error);
1116 : }
1117 :
1118 : /*
1119 : * sf_rxdrain:
1120 : *
1121 : * Drain the receive queue.
1122 : */
1123 : void
1124 0 : sf_rxdrain(struct sf_softc *sc)
1125 : {
1126 : struct sf_descsoft *ds;
1127 : int i;
1128 :
1129 0 : for (i = 0; i < SF_NRXDESC; i++) {
1130 0 : ds = &sc->sc_rxsoft[i];
1131 0 : if (ds->ds_mbuf != NULL) {
1132 0 : bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap);
1133 0 : m_freem(ds->ds_mbuf);
1134 0 : ds->ds_mbuf = NULL;
1135 0 : }
1136 : }
1137 0 : }
1138 :
1139 : /*
1140 : * sf_stop: [ifnet interface function]
1141 : *
1142 : * Stop transmission on the interface.
1143 : */
1144 : void
1145 0 : sf_stop(struct ifnet *ifp, int disable)
1146 : {
1147 0 : struct sf_softc *sc = ifp->if_softc;
1148 : struct sf_descsoft *ds;
1149 : int i;
1150 :
1151 : /* Stop the one second clock. */
1152 0 : timeout_del(&sc->sc_mii_timeout);
1153 :
1154 : /* Down the MII. */
1155 0 : mii_down(&sc->sc_mii);
1156 :
1157 : /* Disable interrupts. */
1158 0 : sf_funcreg_write(sc, SF_InterruptEn, 0);
1159 :
1160 : /* Stop the transmit and receive processes. */
1161 0 : sf_funcreg_write(sc, SF_GeneralEthernetCtrl, 0);
1162 :
1163 : /*
1164 : * Release any queued transmit buffers.
1165 : */
1166 0 : for (i = 0; i < SF_NTXDESC; i++) {
1167 0 : ds = &sc->sc_txsoft[i];
1168 0 : if (ds->ds_mbuf != NULL) {
1169 0 : bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap);
1170 0 : m_freem(ds->ds_mbuf);
1171 0 : ds->ds_mbuf = NULL;
1172 0 : }
1173 : }
1174 0 : sc->sc_txpending = 0;
1175 :
1176 0 : if (disable)
1177 0 : sf_rxdrain(sc);
1178 :
1179 : /*
1180 : * Mark the interface down and cancel the watchdog timer.
1181 : */
1182 0 : ifp->if_flags &= ~IFF_RUNNING;
1183 0 : ifq_clr_oactive(&ifp->if_snd);
1184 0 : ifp->if_timer = 0;
1185 0 : }
1186 :
1187 : /*
1188 : * sf_read_eeprom:
1189 : *
1190 : * Read from the Starfire EEPROM.
1191 : */
1192 : uint8_t
1193 0 : sf_read_eeprom(struct sf_softc *sc, int offset)
1194 : {
1195 : uint32_t reg;
1196 :
1197 0 : reg = sf_genreg_read(sc, SF_EEPROM_BASE + (offset & ~3));
1198 :
1199 0 : return ((reg >> (8 * (offset & 3))) & 0xff);
1200 : }
1201 :
1202 : /*
1203 : * sf_add_rxbuf:
1204 : *
1205 : * Add a receive buffer to the indicated descriptor.
1206 : */
1207 : int
1208 0 : sf_add_rxbuf(struct sf_softc *sc, int idx)
1209 : {
1210 0 : struct sf_descsoft *ds = &sc->sc_rxsoft[idx];
1211 : struct mbuf *m;
1212 : int error;
1213 :
1214 0 : MGETHDR(m, M_DONTWAIT, MT_DATA);
1215 0 : if (m == NULL)
1216 0 : return (ENOBUFS);
1217 :
1218 0 : MCLGET(m, M_DONTWAIT);
1219 0 : if ((m->m_flags & M_EXT) == 0) {
1220 0 : m_freem(m);
1221 0 : return (ENOBUFS);
1222 : }
1223 :
1224 0 : if (ds->ds_mbuf != NULL)
1225 0 : bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap);
1226 :
1227 0 : ds->ds_mbuf = m;
1228 :
1229 0 : error = bus_dmamap_load(sc->sc_dmat, ds->ds_dmamap,
1230 : m->m_ext.ext_buf, m->m_ext.ext_size, NULL,
1231 : BUS_DMA_READ|BUS_DMA_NOWAIT);
1232 0 : if (error) {
1233 0 : printf("%s: can't load rx DMA map %d, error = %d\n",
1234 0 : sc->sc_dev.dv_xname, idx, error);
1235 0 : panic("sf_add_rxbuf"); /* XXX */
1236 : }
1237 :
1238 0 : bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0,
1239 : ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
1240 :
1241 0 : SF_INIT_RXDESC(sc, idx);
1242 :
1243 0 : return (0);
1244 0 : }
1245 :
1246 : void
1247 0 : sf_set_filter_perfect(struct sf_softc *sc, int slot, uint8_t *enaddr)
1248 : {
1249 : uint32_t reg0, reg1, reg2;
1250 :
1251 0 : reg0 = enaddr[5] | (enaddr[4] << 8);
1252 0 : reg1 = enaddr[3] | (enaddr[2] << 8);
1253 0 : reg2 = enaddr[1] | (enaddr[0] << 8);
1254 :
1255 0 : sf_genreg_write(sc, SF_PERFECT_BASE + (slot * 0x10) + 0, reg0);
1256 0 : sf_genreg_write(sc, SF_PERFECT_BASE + (slot * 0x10) + 4, reg1);
1257 0 : sf_genreg_write(sc, SF_PERFECT_BASE + (slot * 0x10) + 8, reg2);
1258 0 : }
1259 :
1260 : void
1261 0 : sf_set_filter_hash(struct sf_softc *sc, uint8_t *enaddr)
1262 : {
1263 : uint32_t hash, slot, reg;
1264 :
1265 0 : hash = ether_crc32_be(enaddr, ETHER_ADDR_LEN) >> 23;
1266 0 : slot = hash >> 4;
1267 :
1268 0 : reg = sf_genreg_read(sc, SF_HASH_BASE + (slot * 0x10));
1269 0 : reg |= 1 << (hash & 0xf);
1270 0 : sf_genreg_write(sc, SF_HASH_BASE + (slot * 0x10), reg);
1271 0 : }
1272 :
1273 : /*
1274 : * sf_set_filter:
1275 : *
1276 : * Set the Starfire receive filter.
1277 : */
1278 : void
1279 0 : sf_set_filter(struct sf_softc *sc)
1280 : {
1281 0 : struct arpcom *ac = &sc->sc_arpcom;
1282 0 : struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1283 : struct ether_multi *enm;
1284 : struct ether_multistep step;
1285 : int i;
1286 :
1287 : /* Start by clearing the perfect and hash tables. */
1288 0 : for (i = 0; i < SF_PERFECT_SIZE; i += sizeof(uint32_t))
1289 0 : sf_genreg_write(sc, SF_PERFECT_BASE + i, 0);
1290 :
1291 0 : for (i = 0; i < SF_HASH_SIZE; i += sizeof(uint32_t))
1292 0 : sf_genreg_write(sc, SF_HASH_BASE + i, 0);
1293 :
1294 : /*
1295 : * Clear the perfect and hash mode bits.
1296 : */
1297 0 : sc->sc_RxAddressFilteringCtl &=
1298 : ~(RAFC_PerfectFilteringMode(3) | RAFC_HashFilteringMode(3));
1299 :
1300 0 : if (ifp->if_flags & IFF_BROADCAST)
1301 0 : sc->sc_RxAddressFilteringCtl |= RAFC_PassBroadcast;
1302 : else
1303 0 : sc->sc_RxAddressFilteringCtl &= ~RAFC_PassBroadcast;
1304 :
1305 0 : if (ifp->if_flags & IFF_PROMISC) {
1306 0 : sc->sc_RxAddressFilteringCtl |= RAFC_PromiscuousMode;
1307 0 : goto allmulti;
1308 : } else
1309 0 : sc->sc_RxAddressFilteringCtl &= ~RAFC_PromiscuousMode;
1310 :
1311 : /*
1312 : * Set normal perfect filtering mode.
1313 : */
1314 0 : sc->sc_RxAddressFilteringCtl |= RAFC_PerfectFilteringMode(1);
1315 :
1316 : /*
1317 : * First, write the station address to the perfect filter
1318 : * table.
1319 : */
1320 0 : sf_set_filter_perfect(sc, 0, LLADDR(ifp->if_sadl));
1321 :
1322 0 : if (ac->ac_multirangecnt > 0)
1323 : goto allmulti;
1324 :
1325 : /*
1326 : * Now set the hash bits for each multicast address in our
1327 : * list.
1328 : */
1329 0 : ETHER_FIRST_MULTI(step, ac, enm);
1330 0 : if (enm == NULL)
1331 : goto done;
1332 0 : while (enm != NULL) {
1333 0 : sf_set_filter_hash(sc, enm->enm_addrlo);
1334 0 : ETHER_NEXT_MULTI(step, enm);
1335 : }
1336 :
1337 : /*
1338 : * Set "hash only multicast dest, match regardless of VLAN ID".
1339 : */
1340 0 : sc->sc_RxAddressFilteringCtl |= RAFC_HashFilteringMode(2);
1341 0 : goto done;
1342 :
1343 : allmulti:
1344 : /*
1345 : * XXX RAFC_PassMulticast is sub-optimal if using VLAN mode.
1346 : */
1347 0 : sc->sc_RxAddressFilteringCtl |= RAFC_PassMulticast;
1348 0 : ifp->if_flags |= IFF_ALLMULTI;
1349 :
1350 : done:
1351 0 : sf_funcreg_write(sc, SF_RxAddressFilteringCtl,
1352 : sc->sc_RxAddressFilteringCtl);
1353 0 : }
1354 :
1355 : /*
1356 : * sf_mii_read: [mii interface function]
1357 : *
1358 : * Read from the MII.
1359 : */
1360 : int
1361 0 : sf_mii_read(struct device *self, int phy, int reg)
1362 : {
1363 0 : struct sf_softc *sc = (void *) self;
1364 : uint32_t v;
1365 : int i;
1366 :
1367 0 : for (i = 0; i < 1000; i++) {
1368 0 : v = sf_genreg_read(sc, SF_MII_PHY_REG(phy, reg));
1369 0 : if (v & MiiDataValid)
1370 : break;
1371 0 : delay(1);
1372 : }
1373 :
1374 0 : if ((v & MiiDataValid) == 0)
1375 0 : return (0);
1376 :
1377 0 : if (MiiRegDataPort(v) == 0xffff)
1378 0 : return (0);
1379 :
1380 0 : return (MiiRegDataPort(v));
1381 0 : }
1382 :
1383 : /*
1384 : * sf_mii_write: [mii interface function]
1385 : *
1386 : * Write to the MII.
1387 : */
1388 : void
1389 0 : sf_mii_write(struct device *self, int phy, int reg, int val)
1390 : {
1391 0 : struct sf_softc *sc = (void *) self;
1392 : int i;
1393 :
1394 0 : sf_genreg_write(sc, SF_MII_PHY_REG(phy, reg), val);
1395 :
1396 0 : for (i = 0; i < 1000; i++) {
1397 0 : if ((sf_genreg_read(sc, SF_MII_PHY_REG(phy, reg)) &
1398 0 : MiiBusy) == 0)
1399 0 : return;
1400 0 : delay(1);
1401 : }
1402 :
1403 0 : printf("%s: MII write timed out\n", sc->sc_dev.dv_xname);
1404 0 : }
1405 :
1406 : /*
1407 : * sf_mii_statchg: [mii interface function]
1408 : *
1409 : * Callback from the PHY when the media changes.
1410 : */
1411 : void
1412 0 : sf_mii_statchg(struct device *self)
1413 : {
1414 0 : struct sf_softc *sc = (void *) self;
1415 : uint32_t ipg;
1416 :
1417 0 : if (sc->sc_mii.mii_media_active & IFM_FDX) {
1418 0 : sc->sc_MacConfig1 |= MC1_FullDuplex;
1419 : ipg = 0x15;
1420 0 : } else {
1421 0 : sc->sc_MacConfig1 &= ~MC1_FullDuplex;
1422 : ipg = 0x11;
1423 : }
1424 :
1425 0 : sf_genreg_write(sc, SF_MacConfig1, sc->sc_MacConfig1);
1426 0 : sf_macreset(sc);
1427 :
1428 0 : sf_genreg_write(sc, SF_BkToBkIPG, ipg);
1429 0 : }
1430 :
1431 : /*
1432 : * sf_mediastatus: [ifmedia interface function]
1433 : *
1434 : * Callback from ifmedia to request current media status.
1435 : */
1436 : void
1437 0 : sf_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
1438 : {
1439 0 : struct sf_softc *sc = ifp->if_softc;
1440 :
1441 0 : mii_pollstat(&sc->sc_mii);
1442 0 : ifmr->ifm_status = sc->sc_mii.mii_media_status;
1443 0 : ifmr->ifm_active = sc->sc_mii.mii_media_active;
1444 0 : }
1445 :
1446 : /*
1447 : * sf_mediachange: [ifmedia interface function]
1448 : *
1449 : * Callback from ifmedia to request new media setting.
1450 : */
1451 : int
1452 0 : sf_mediachange(struct ifnet *ifp)
1453 : {
1454 0 : struct sf_softc *sc = ifp->if_softc;
1455 :
1456 0 : if (ifp->if_flags & IFF_UP)
1457 0 : mii_mediachg(&sc->sc_mii);
1458 0 : return (0);
1459 : }
|