Line data Source code
1 : /* $OpenBSD: hme.c,v 1.81 2017/01/22 10:17:38 dlg Exp $ */
2 : /* $NetBSD: hme.c,v 1.21 2001/07/07 15:59:37 thorpej Exp $ */
3 :
4 : /*-
5 : * Copyright (c) 1999 The NetBSD Foundation, Inc.
6 : * All rights reserved.
7 : *
8 : * This code is derived from software contributed to The NetBSD Foundation
9 : * by Paul Kranenburg.
10 : *
11 : * Redistribution and use in source and binary forms, with or without
12 : * modification, are permitted provided that the following conditions
13 : * are met:
14 : * 1. Redistributions of source code must retain the above copyright
15 : * notice, this list of conditions and the following disclaimer.
16 : * 2. Redistributions in binary form must reproduce the above copyright
17 : * notice, this list of conditions and the following disclaimer in the
18 : * documentation and/or other materials provided with the distribution.
19 : *
20 : * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21 : * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22 : * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23 : * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24 : * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 : * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 : * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 : * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 : * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 : * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 : * POSSIBILITY OF SUCH DAMAGE.
31 : */
32 :
33 : /*
34 : * HME Ethernet module driver.
35 : */
36 :
37 : #include "bpfilter.h"
38 :
39 : #undef HMEDEBUG
40 :
41 : #include <sys/param.h>
42 : #include <sys/systm.h>
43 : #include <sys/kernel.h>
44 : #include <sys/mbuf.h>
45 : #include <sys/syslog.h>
46 : #include <sys/socket.h>
47 : #include <sys/device.h>
48 : #include <sys/malloc.h>
49 : #include <sys/ioctl.h>
50 : #include <sys/errno.h>
51 :
52 : #include <net/if.h>
53 : #include <net/if_media.h>
54 :
55 : #include <netinet/in.h>
56 : #include <netinet/if_ether.h>
57 :
58 : #if NBPFILTER > 0
59 : #include <net/bpf.h>
60 : #endif
61 :
62 : #include <dev/mii/mii.h>
63 : #include <dev/mii/miivar.h>
64 :
65 : #include <machine/bus.h>
66 :
67 : #include <dev/ic/hmereg.h>
68 : #include <dev/ic/hmevar.h>
69 :
70 : struct cfdriver hme_cd = {
71 : NULL, "hme", DV_IFNET
72 : };
73 :
74 : #define HME_RX_OFFSET 2
75 :
76 : void hme_start(struct ifnet *);
77 : void hme_stop(struct hme_softc *, int);
78 : int hme_ioctl(struct ifnet *, u_long, caddr_t);
79 : void hme_tick(void *);
80 : void hme_watchdog(struct ifnet *);
81 : void hme_init(struct hme_softc *);
82 : void hme_meminit(struct hme_softc *);
83 : void hme_mifinit(struct hme_softc *);
84 : void hme_reset(struct hme_softc *);
85 : void hme_iff(struct hme_softc *);
86 : void hme_fill_rx_ring(struct hme_softc *);
87 : int hme_newbuf(struct hme_softc *, struct hme_sxd *);
88 :
89 : /* MII methods & callbacks */
90 : static int hme_mii_readreg(struct device *, int, int);
91 : static void hme_mii_writereg(struct device *, int, int, int);
92 : static void hme_mii_statchg(struct device *);
93 :
94 : int hme_mediachange(struct ifnet *);
95 : void hme_mediastatus(struct ifnet *, struct ifmediareq *);
96 :
97 : int hme_eint(struct hme_softc *, u_int);
98 : int hme_rint(struct hme_softc *);
99 : int hme_tint(struct hme_softc *);
100 :
101 : void
102 0 : hme_config(struct hme_softc *sc)
103 : {
104 0 : struct ifnet *ifp = &sc->sc_arpcom.ac_if;
105 0 : struct mii_data *mii = &sc->sc_mii;
106 : struct mii_softc *child;
107 0 : bus_dma_tag_t dmatag = sc->sc_dmatag;
108 0 : bus_dma_segment_t seg;
109 : bus_size_t size;
110 0 : int rseg, error, i;
111 :
112 : /*
113 : * HME common initialization.
114 : *
115 : * hme_softc fields that must be initialized by the front-end:
116 : *
117 : * the bus tag:
118 : * sc_bustag
119 : *
120 : * the dma bus tag:
121 : * sc_dmatag
122 : *
123 : * the bus handles:
124 : * sc_seb (Shared Ethernet Block registers)
125 : * sc_erx (Receiver Unit registers)
126 : * sc_etx (Transmitter Unit registers)
127 : * sc_mac (MAC registers)
128 : * sc_mif (Management Interface registers)
129 : *
130 : * the maximum bus burst size:
131 : * sc_burst
132 : *
133 : * the local Ethernet address:
134 : * sc_arpcom.ac_enaddr
135 : *
136 : */
137 :
138 : /* Make sure the chip is stopped. */
139 0 : hme_stop(sc, 0);
140 :
141 0 : for (i = 0; i < HME_TX_RING_SIZE; i++) {
142 0 : if (bus_dmamap_create(sc->sc_dmatag, MCLBYTES, HME_TX_NSEGS,
143 : MCLBYTES, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
144 0 : &sc->sc_txd[i].sd_map) != 0) {
145 0 : sc->sc_txd[i].sd_map = NULL;
146 0 : goto fail;
147 : }
148 : }
149 0 : for (i = 0; i < HME_RX_RING_SIZE; i++) {
150 0 : if (bus_dmamap_create(sc->sc_dmatag, MCLBYTES, 1,
151 : MCLBYTES, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
152 0 : &sc->sc_rxd[i].sd_map) != 0) {
153 0 : sc->sc_rxd[i].sd_map = NULL;
154 0 : goto fail;
155 : }
156 : }
157 0 : if (bus_dmamap_create(sc->sc_dmatag, MCLBYTES, 1, MCLBYTES, 0,
158 0 : BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &sc->sc_rxmap_spare) != 0) {
159 0 : sc->sc_rxmap_spare = NULL;
160 0 : goto fail;
161 : }
162 :
163 : /*
164 : * Allocate DMA capable memory
165 : * Buffer descriptors must be aligned on a 2048 byte boundary;
166 : * take this into account when calculating the size. Note that
167 : * the maximum number of descriptors (256) occupies 2048 bytes,
168 : * so we allocate that much regardless of the number of descriptors.
169 : */
170 : size = (HME_XD_SIZE * HME_RX_RING_MAX) + /* RX descriptors */
171 : (HME_XD_SIZE * HME_TX_RING_MAX); /* TX descriptors */
172 :
173 : /* Allocate DMA buffer */
174 0 : if ((error = bus_dmamem_alloc(dmatag, size, 2048, 0, &seg, 1, &rseg,
175 0 : BUS_DMA_NOWAIT)) != 0) {
176 0 : printf("\n%s: DMA buffer alloc error %d\n",
177 0 : sc->sc_dev.dv_xname, error);
178 0 : return;
179 : }
180 :
181 : /* Map DMA memory in CPU addressable space */
182 0 : if ((error = bus_dmamem_map(dmatag, &seg, rseg, size,
183 0 : &sc->sc_rb.rb_membase, BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) {
184 0 : printf("\n%s: DMA buffer map error %d\n",
185 0 : sc->sc_dev.dv_xname, error);
186 0 : bus_dmamap_unload(dmatag, sc->sc_dmamap);
187 0 : bus_dmamem_free(dmatag, &seg, rseg);
188 0 : return;
189 : }
190 :
191 0 : if ((error = bus_dmamap_create(dmatag, size, 1, size, 0,
192 0 : BUS_DMA_NOWAIT, &sc->sc_dmamap)) != 0) {
193 0 : printf("\n%s: DMA map create error %d\n",
194 0 : sc->sc_dev.dv_xname, error);
195 0 : return;
196 : }
197 :
198 : /* Load the buffer */
199 0 : if ((error = bus_dmamap_load(dmatag, sc->sc_dmamap,
200 : sc->sc_rb.rb_membase, size, NULL,
201 0 : BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) {
202 0 : printf("\n%s: DMA buffer map load error %d\n",
203 0 : sc->sc_dev.dv_xname, error);
204 0 : bus_dmamem_free(dmatag, &seg, rseg);
205 0 : return;
206 : }
207 0 : sc->sc_rb.rb_dmabase = sc->sc_dmamap->dm_segs[0].ds_addr;
208 :
209 0 : printf(", address %s\n", ether_sprintf(sc->sc_arpcom.ac_enaddr));
210 :
211 : /* Initialize ifnet structure. */
212 0 : strlcpy(ifp->if_xname, sc->sc_dev.dv_xname, sizeof ifp->if_xname);
213 0 : ifp->if_softc = sc;
214 0 : ifp->if_start = hme_start;
215 0 : ifp->if_ioctl = hme_ioctl;
216 0 : ifp->if_watchdog = hme_watchdog;
217 0 : ifp->if_flags =
218 : IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
219 0 : ifp->if_capabilities = IFCAP_VLAN_MTU;
220 :
221 : /* Initialize ifmedia structures and MII info */
222 0 : mii->mii_ifp = ifp;
223 0 : mii->mii_readreg = hme_mii_readreg;
224 0 : mii->mii_writereg = hme_mii_writereg;
225 0 : mii->mii_statchg = hme_mii_statchg;
226 :
227 0 : ifmedia_init(&mii->mii_media, IFM_IMASK,
228 : hme_mediachange, hme_mediastatus);
229 :
230 0 : hme_mifinit(sc);
231 :
232 0 : if (sc->sc_tcvr == -1)
233 0 : mii_attach(&sc->sc_dev, mii, 0xffffffff, MII_PHY_ANY,
234 : MII_OFFSET_ANY, 0);
235 : else
236 0 : mii_attach(&sc->sc_dev, mii, 0xffffffff, sc->sc_tcvr,
237 : MII_OFFSET_ANY, 0);
238 :
239 0 : child = LIST_FIRST(&mii->mii_phys);
240 0 : if (child == NULL) {
241 : /* No PHY attached */
242 0 : ifmedia_add(&sc->sc_media, IFM_ETHER|IFM_MANUAL, 0, NULL);
243 0 : ifmedia_set(&sc->sc_media, IFM_ETHER|IFM_MANUAL);
244 0 : } else {
245 : /*
246 : * Walk along the list of attached MII devices and
247 : * establish an `MII instance' to `phy number'
248 : * mapping. We'll use this mapping in media change
249 : * requests to determine which phy to use to program
250 : * the MIF configuration register.
251 : */
252 0 : for (; child != NULL; child = LIST_NEXT(child, mii_list)) {
253 : /*
254 : * Note: we support just two PHYs: the built-in
255 : * internal device and an external on the MII
256 : * connector.
257 : */
258 0 : if (child->mii_phy > 1 || child->mii_inst > 1) {
259 0 : printf("%s: cannot accommodate MII device %s"
260 : " at phy %d, instance %lld\n",
261 : sc->sc_dev.dv_xname,
262 0 : child->mii_dev.dv_xname,
263 0 : child->mii_phy, child->mii_inst);
264 0 : continue;
265 : }
266 :
267 0 : sc->sc_phys[child->mii_inst] = child->mii_phy;
268 0 : }
269 :
270 : /*
271 : * XXX - we can really do the following ONLY if the
272 : * phy indeed has the auto negotiation capability!!
273 : */
274 0 : ifmedia_set(&sc->sc_media, IFM_ETHER|IFM_AUTO);
275 : }
276 :
277 : /* Attach the interface. */
278 0 : if_attach(ifp);
279 0 : ether_ifattach(ifp);
280 :
281 0 : timeout_set(&sc->sc_tick_ch, hme_tick, sc);
282 0 : return;
283 :
284 : fail:
285 0 : if (sc->sc_rxmap_spare != NULL)
286 0 : bus_dmamap_destroy(sc->sc_dmatag, sc->sc_rxmap_spare);
287 0 : for (i = 0; i < HME_TX_RING_SIZE; i++)
288 0 : if (sc->sc_txd[i].sd_map != NULL)
289 0 : bus_dmamap_destroy(sc->sc_dmatag, sc->sc_txd[i].sd_map);
290 0 : for (i = 0; i < HME_RX_RING_SIZE; i++)
291 0 : if (sc->sc_rxd[i].sd_map != NULL)
292 0 : bus_dmamap_destroy(sc->sc_dmatag, sc->sc_rxd[i].sd_map);
293 0 : }
294 :
295 : void
296 0 : hme_unconfig(struct hme_softc *sc)
297 : {
298 0 : struct ifnet *ifp = &sc->sc_arpcom.ac_if;
299 : int i;
300 :
301 0 : hme_stop(sc, 1);
302 :
303 0 : bus_dmamap_destroy(sc->sc_dmatag, sc->sc_rxmap_spare);
304 0 : for (i = 0; i < HME_TX_RING_SIZE; i++)
305 0 : if (sc->sc_txd[i].sd_map != NULL)
306 0 : bus_dmamap_destroy(sc->sc_dmatag, sc->sc_txd[i].sd_map);
307 0 : for (i = 0; i < HME_RX_RING_SIZE; i++)
308 0 : if (sc->sc_rxd[i].sd_map != NULL)
309 0 : bus_dmamap_destroy(sc->sc_dmatag, sc->sc_rxd[i].sd_map);
310 :
311 : /* Detach all PHYs */
312 0 : mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
313 :
314 : /* Delete all remaining media. */
315 0 : ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY);
316 :
317 0 : ether_ifdetach(ifp);
318 0 : if_detach(ifp);
319 0 : }
320 :
321 : void
322 0 : hme_tick(void *arg)
323 : {
324 0 : struct hme_softc *sc = arg;
325 0 : struct ifnet *ifp = &sc->sc_arpcom.ac_if;
326 0 : bus_space_tag_t t = sc->sc_bustag;
327 0 : bus_space_handle_t mac = sc->sc_mac;
328 : int s;
329 :
330 0 : s = splnet();
331 : /*
332 : * Unload collision counters
333 : */
334 0 : ifp->if_collisions +=
335 0 : bus_space_read_4(t, mac, HME_MACI_NCCNT) +
336 0 : bus_space_read_4(t, mac, HME_MACI_FCCNT) +
337 0 : bus_space_read_4(t, mac, HME_MACI_EXCNT) +
338 0 : bus_space_read_4(t, mac, HME_MACI_LTCNT);
339 :
340 : /*
341 : * then clear the hardware counters.
342 : */
343 0 : bus_space_write_4(t, mac, HME_MACI_NCCNT, 0);
344 0 : bus_space_write_4(t, mac, HME_MACI_FCCNT, 0);
345 0 : bus_space_write_4(t, mac, HME_MACI_EXCNT, 0);
346 0 : bus_space_write_4(t, mac, HME_MACI_LTCNT, 0);
347 :
348 : /*
349 : * If buffer allocation fails, the receive ring may become
350 : * empty. There is no receive interrupt to recover from that.
351 : */
352 0 : if (if_rxr_inuse(&sc->sc_rx_ring) == 0)
353 0 : hme_fill_rx_ring(sc);
354 :
355 0 : mii_tick(&sc->sc_mii);
356 0 : splx(s);
357 :
358 0 : timeout_add_sec(&sc->sc_tick_ch, 1);
359 0 : }
360 :
361 : void
362 0 : hme_reset(struct hme_softc *sc)
363 : {
364 : int s;
365 :
366 0 : s = splnet();
367 0 : hme_init(sc);
368 0 : splx(s);
369 0 : }
370 :
371 : void
372 0 : hme_stop(struct hme_softc *sc, int softonly)
373 : {
374 0 : struct ifnet *ifp = &sc->sc_arpcom.ac_if;
375 0 : bus_space_tag_t t = sc->sc_bustag;
376 0 : bus_space_handle_t seb = sc->sc_seb;
377 : int n;
378 :
379 0 : timeout_del(&sc->sc_tick_ch);
380 :
381 : /*
382 : * Mark the interface down and cancel the watchdog timer.
383 : */
384 0 : ifp->if_flags &= ~IFF_RUNNING;
385 0 : ifq_clr_oactive(&ifp->if_snd);
386 0 : ifp->if_timer = 0;
387 :
388 0 : if (!softonly) {
389 0 : mii_down(&sc->sc_mii);
390 :
391 : /* Mask all interrupts */
392 0 : bus_space_write_4(t, seb, HME_SEBI_IMASK, 0xffffffff);
393 :
394 : /* Reset transmitter and receiver */
395 0 : bus_space_write_4(t, seb, HME_SEBI_RESET,
396 : (HME_SEB_RESET_ETX | HME_SEB_RESET_ERX));
397 :
398 0 : for (n = 0; n < 20; n++) {
399 0 : u_int32_t v = bus_space_read_4(t, seb, HME_SEBI_RESET);
400 0 : if ((v & (HME_SEB_RESET_ETX | HME_SEB_RESET_ERX)) == 0)
401 0 : break;
402 0 : DELAY(20);
403 0 : }
404 0 : if (n >= 20)
405 0 : printf("%s: hme_stop: reset failed\n", sc->sc_dev.dv_xname);
406 : }
407 :
408 0 : for (n = 0; n < HME_TX_RING_SIZE; n++) {
409 0 : if (sc->sc_txd[n].sd_mbuf != NULL) {
410 0 : bus_dmamap_sync(sc->sc_dmatag, sc->sc_txd[n].sd_map,
411 : 0, sc->sc_txd[n].sd_map->dm_mapsize,
412 : BUS_DMASYNC_POSTWRITE);
413 0 : bus_dmamap_unload(sc->sc_dmatag, sc->sc_txd[n].sd_map);
414 0 : m_freem(sc->sc_txd[n].sd_mbuf);
415 0 : sc->sc_txd[n].sd_mbuf = NULL;
416 0 : }
417 : }
418 0 : sc->sc_tx_prod = sc->sc_tx_cons = sc->sc_tx_cnt = 0;
419 :
420 0 : for (n = 0; n < HME_RX_RING_SIZE; n++) {
421 0 : if (sc->sc_rxd[n].sd_mbuf != NULL) {
422 0 : bus_dmamap_sync(sc->sc_dmatag, sc->sc_rxd[n].sd_map,
423 : 0, sc->sc_rxd[n].sd_map->dm_mapsize,
424 : BUS_DMASYNC_POSTREAD);
425 0 : bus_dmamap_unload(sc->sc_dmatag, sc->sc_rxd[n].sd_map);
426 0 : m_freem(sc->sc_rxd[n].sd_mbuf);
427 0 : sc->sc_rxd[n].sd_mbuf = NULL;
428 0 : }
429 : }
430 0 : sc->sc_rx_prod = sc->sc_rx_cons = 0;
431 0 : }
432 :
433 : void
434 0 : hme_meminit(struct hme_softc *sc)
435 : {
436 : bus_addr_t dma;
437 : caddr_t p;
438 : unsigned int i;
439 0 : struct hme_ring *hr = &sc->sc_rb;
440 :
441 0 : p = hr->rb_membase;
442 0 : dma = hr->rb_dmabase;
443 :
444 : /*
445 : * Allocate transmit descriptors
446 : */
447 0 : hr->rb_txd = p;
448 0 : hr->rb_txddma = dma;
449 0 : p += HME_TX_RING_SIZE * HME_XD_SIZE;
450 0 : dma += HME_TX_RING_SIZE * HME_XD_SIZE;
451 : /* We have reserved descriptor space until the next 2048 byte boundary.*/
452 0 : dma = (bus_addr_t)roundup((u_long)dma, 2048);
453 0 : p = (caddr_t)roundup((u_long)p, 2048);
454 :
455 : /*
456 : * Allocate receive descriptors
457 : */
458 0 : hr->rb_rxd = p;
459 0 : hr->rb_rxddma = dma;
460 0 : p += HME_RX_RING_SIZE * HME_XD_SIZE;
461 0 : dma += HME_RX_RING_SIZE * HME_XD_SIZE;
462 : /* Again move forward to the next 2048 byte boundary.*/
463 0 : dma = (bus_addr_t)roundup((u_long)dma, 2048);
464 0 : p = (caddr_t)roundup((u_long)p, 2048);
465 :
466 : /*
467 : * Initialize transmit descriptors
468 : */
469 0 : for (i = 0; i < HME_TX_RING_SIZE; i++) {
470 0 : HME_XD_SETADDR(sc->sc_pci, hr->rb_txd, i, 0);
471 0 : HME_XD_SETFLAGS(sc->sc_pci, hr->rb_txd, i, 0);
472 0 : sc->sc_txd[i].sd_mbuf = NULL;
473 : }
474 :
475 : /*
476 : * Initialize receive descriptors
477 : */
478 0 : for (i = 0; i < HME_RX_RING_SIZE; i++) {
479 0 : HME_XD_SETADDR(sc->sc_pci, hr->rb_rxd, i, 0);
480 0 : HME_XD_SETFLAGS(sc->sc_pci, hr->rb_rxd, i, 0);
481 0 : sc->sc_rxd[i].sd_mbuf = NULL;
482 : }
483 :
484 0 : if_rxr_init(&sc->sc_rx_ring, 2, HME_RX_RING_SIZE);
485 0 : hme_fill_rx_ring(sc);
486 0 : }
487 :
488 : /*
489 : * Initialization of interface; set up initialization block
490 : * and transmit/receive descriptor rings.
491 : */
492 : void
493 0 : hme_init(struct hme_softc *sc)
494 : {
495 0 : struct ifnet *ifp = &sc->sc_arpcom.ac_if;
496 0 : bus_space_tag_t t = sc->sc_bustag;
497 0 : bus_space_handle_t seb = sc->sc_seb;
498 0 : bus_space_handle_t etx = sc->sc_etx;
499 0 : bus_space_handle_t erx = sc->sc_erx;
500 0 : bus_space_handle_t mac = sc->sc_mac;
501 : u_int8_t *ea;
502 : u_int32_t v;
503 :
504 : /*
505 : * Initialization sequence. The numbered steps below correspond
506 : * to the sequence outlined in section 6.3.5.1 in the Ethernet
507 : * Channel Engine manual (part of the PCIO manual).
508 : * See also the STP2002-STQ document from Sun Microsystems.
509 : */
510 :
511 : /* step 1 & 2. Reset the Ethernet Channel */
512 0 : hme_stop(sc, 0);
513 :
514 : /* Re-initialize the MIF */
515 0 : hme_mifinit(sc);
516 :
517 : /* step 3. Setup data structures in host memory */
518 0 : hme_meminit(sc);
519 :
520 : /* step 4. TX MAC registers & counters */
521 0 : bus_space_write_4(t, mac, HME_MACI_NCCNT, 0);
522 0 : bus_space_write_4(t, mac, HME_MACI_FCCNT, 0);
523 0 : bus_space_write_4(t, mac, HME_MACI_EXCNT, 0);
524 0 : bus_space_write_4(t, mac, HME_MACI_LTCNT, 0);
525 0 : bus_space_write_4(t, mac, HME_MACI_TXSIZE, ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN);
526 :
527 : /* Load station MAC address */
528 0 : ea = sc->sc_arpcom.ac_enaddr;
529 0 : bus_space_write_4(t, mac, HME_MACI_MACADDR0, (ea[0] << 8) | ea[1]);
530 0 : bus_space_write_4(t, mac, HME_MACI_MACADDR1, (ea[2] << 8) | ea[3]);
531 0 : bus_space_write_4(t, mac, HME_MACI_MACADDR2, (ea[4] << 8) | ea[5]);
532 :
533 : /*
534 : * Init seed for backoff
535 : * (source suggested by manual: low 10 bits of MAC address)
536 : */
537 0 : v = ((ea[4] << 8) | ea[5]) & 0x3fff;
538 0 : bus_space_write_4(t, mac, HME_MACI_RANDSEED, v);
539 :
540 :
541 : /* Note: Accepting power-on default for other MAC registers here.. */
542 :
543 :
544 : /* step 5. RX MAC registers & counters */
545 0 : hme_iff(sc);
546 :
547 : /* step 6 & 7. Program Descriptor Ring Base Addresses */
548 0 : bus_space_write_4(t, etx, HME_ETXI_RING, sc->sc_rb.rb_txddma);
549 0 : bus_space_write_4(t, etx, HME_ETXI_RSIZE, HME_TX_RING_SIZE);
550 :
551 0 : bus_space_write_4(t, erx, HME_ERXI_RING, sc->sc_rb.rb_rxddma);
552 0 : bus_space_write_4(t, mac, HME_MACI_RXSIZE, ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN);
553 :
554 : /* step 8. Global Configuration & Interrupt Mask */
555 0 : bus_space_write_4(t, seb, HME_SEBI_IMASK,
556 : ~(HME_SEB_STAT_HOSTTOTX | HME_SEB_STAT_RXTOHOST |
557 : HME_SEB_STAT_TXALL | HME_SEB_STAT_TXPERR |
558 : HME_SEB_STAT_RCNTEXP | HME_SEB_STAT_ALL_ERRORS));
559 :
560 0 : switch (sc->sc_burst) {
561 : default:
562 : v = 0;
563 0 : break;
564 : case 16:
565 : v = HME_SEB_CFG_BURST16;
566 0 : break;
567 : case 32:
568 : v = HME_SEB_CFG_BURST32;
569 0 : break;
570 : case 64:
571 : v = HME_SEB_CFG_BURST64;
572 0 : break;
573 : }
574 0 : bus_space_write_4(t, seb, HME_SEBI_CFG, v);
575 :
576 : /* step 9. ETX Configuration: use mostly default values */
577 :
578 : /* Enable DMA */
579 0 : v = bus_space_read_4(t, etx, HME_ETXI_CFG);
580 0 : v |= HME_ETX_CFG_DMAENABLE;
581 0 : bus_space_write_4(t, etx, HME_ETXI_CFG, v);
582 :
583 : /* Transmit Descriptor ring size: in increments of 16 */
584 0 : bus_space_write_4(t, etx, HME_ETXI_RSIZE, HME_TX_RING_SIZE / 16 - 1);
585 :
586 : /* step 10. ERX Configuration */
587 0 : v = bus_space_read_4(t, erx, HME_ERXI_CFG);
588 0 : v &= ~HME_ERX_CFG_RINGSIZE256;
589 : #if HME_RX_RING_SIZE == 32
590 : v |= HME_ERX_CFG_RINGSIZE32;
591 : #elif HME_RX_RING_SIZE == 64
592 0 : v |= HME_ERX_CFG_RINGSIZE64;
593 : #elif HME_RX_RING_SIZE == 128
594 : v |= HME_ERX_CFG_RINGSIZE128;
595 : #elif HME_RX_RING_SIZE == 256
596 : v |= HME_ERX_CFG_RINGSIZE256;
597 : #else
598 : # error "RX ring size must be 32, 64, 128, or 256"
599 : #endif
600 : /* Enable DMA */
601 0 : v |= HME_ERX_CFG_DMAENABLE | (HME_RX_OFFSET << 3);
602 0 : bus_space_write_4(t, erx, HME_ERXI_CFG, v);
603 :
604 : /* step 11. XIF Configuration */
605 0 : v = bus_space_read_4(t, mac, HME_MACI_XIF);
606 0 : v |= HME_MAC_XIF_OE;
607 0 : bus_space_write_4(t, mac, HME_MACI_XIF, v);
608 :
609 : /* step 12. RX_MAC Configuration Register */
610 0 : v = bus_space_read_4(t, mac, HME_MACI_RXCFG);
611 0 : v |= HME_MAC_RXCFG_ENABLE;
612 0 : bus_space_write_4(t, mac, HME_MACI_RXCFG, v);
613 :
614 : /* step 13. TX_MAC Configuration Register */
615 0 : v = bus_space_read_4(t, mac, HME_MACI_TXCFG);
616 0 : v |= (HME_MAC_TXCFG_ENABLE | HME_MAC_TXCFG_DGIVEUP);
617 0 : bus_space_write_4(t, mac, HME_MACI_TXCFG, v);
618 :
619 : /* Set the current media. */
620 0 : mii_mediachg(&sc->sc_mii);
621 :
622 : /* Start the one second timer. */
623 0 : timeout_add_sec(&sc->sc_tick_ch, 1);
624 :
625 0 : ifp->if_flags |= IFF_RUNNING;
626 0 : ifq_clr_oactive(&ifp->if_snd);
627 :
628 0 : hme_start(ifp);
629 0 : }
630 :
631 : void
632 0 : hme_start(struct ifnet *ifp)
633 : {
634 0 : struct hme_softc *sc = (struct hme_softc *)ifp->if_softc;
635 0 : struct hme_ring *hr = &sc->sc_rb;
636 : struct mbuf *m;
637 : u_int32_t flags;
638 : bus_dmamap_t map;
639 : u_int32_t frag, cur, i;
640 : int error;
641 :
642 0 : if (!(ifp->if_flags & IFF_RUNNING) || ifq_is_oactive(&ifp->if_snd))
643 0 : return;
644 :
645 0 : while (sc->sc_txd[sc->sc_tx_prod].sd_mbuf == NULL) {
646 0 : m = ifq_deq_begin(&ifp->if_snd);
647 0 : if (m == NULL)
648 : break;
649 :
650 : /*
651 : * Encapsulate this packet and start it going...
652 : * or fail...
653 : */
654 :
655 0 : cur = frag = sc->sc_tx_prod;
656 0 : map = sc->sc_txd[cur].sd_map;
657 :
658 0 : error = bus_dmamap_load_mbuf(sc->sc_dmatag, map, m,
659 : BUS_DMA_NOWAIT);
660 0 : if (error != 0 && error != EFBIG)
661 : goto drop;
662 0 : if (error != 0) {
663 : /* Too many fragments, linearize. */
664 0 : if (m_defrag(m, M_DONTWAIT))
665 : goto drop;
666 0 : error = bus_dmamap_load_mbuf(sc->sc_dmatag, map, m,
667 : BUS_DMA_NOWAIT);
668 0 : if (error != 0)
669 : goto drop;
670 : }
671 :
672 0 : if ((HME_TX_RING_SIZE - (sc->sc_tx_cnt + map->dm_nsegs)) < 5) {
673 0 : bus_dmamap_unload(sc->sc_dmatag, map);
674 0 : ifq_deq_rollback(&ifp->if_snd, m);
675 0 : ifq_set_oactive(&ifp->if_snd);
676 0 : break;
677 : }
678 :
679 : /* We are now committed to transmitting the packet. */
680 0 : ifq_deq_commit(&ifp->if_snd, m);
681 :
682 : #if NBPFILTER > 0
683 : /*
684 : * If BPF is listening on this interface, let it see the
685 : * packet before we commit it to the wire.
686 : */
687 0 : if (ifp->if_bpf)
688 0 : bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_OUT);
689 : #endif
690 :
691 0 : bus_dmamap_sync(sc->sc_dmatag, map, 0, map->dm_mapsize,
692 : BUS_DMASYNC_PREWRITE);
693 :
694 0 : for (i = 0; i < map->dm_nsegs; i++) {
695 0 : flags = HME_XD_ENCODE_TSIZE(map->dm_segs[i].ds_len);
696 0 : if (i == 0)
697 0 : flags |= HME_XD_SOP;
698 : else
699 0 : flags |= HME_XD_OWN;
700 :
701 0 : HME_XD_SETADDR(sc->sc_pci, hr->rb_txd, frag,
702 : map->dm_segs[i].ds_addr);
703 0 : HME_XD_SETFLAGS(sc->sc_pci, hr->rb_txd, frag, flags);
704 :
705 : cur = frag;
706 0 : if (++frag == HME_TX_RING_SIZE)
707 : frag = 0;
708 : }
709 :
710 : /* Set end of packet on last descriptor. */
711 0 : flags = HME_XD_GETFLAGS(sc->sc_pci, hr->rb_txd, cur);
712 0 : flags |= HME_XD_EOP;
713 0 : HME_XD_SETFLAGS(sc->sc_pci, hr->rb_txd, cur, flags);
714 :
715 0 : sc->sc_tx_cnt += map->dm_nsegs;
716 0 : sc->sc_txd[sc->sc_tx_prod].sd_map = sc->sc_txd[cur].sd_map;
717 0 : sc->sc_txd[cur].sd_map = map;
718 0 : sc->sc_txd[cur].sd_mbuf = m;
719 :
720 : /* Give first frame over to the hardware. */
721 0 : flags = HME_XD_GETFLAGS(sc->sc_pci, hr->rb_txd, sc->sc_tx_prod);
722 0 : flags |= HME_XD_OWN;
723 0 : HME_XD_SETFLAGS(sc->sc_pci, hr->rb_txd, sc->sc_tx_prod, flags);
724 :
725 0 : bus_space_write_4(sc->sc_bustag, sc->sc_etx, HME_ETXI_PENDING,
726 : HME_ETX_TP_DMAWAKEUP);
727 0 : sc->sc_tx_prod = frag;
728 :
729 0 : ifp->if_timer = 5;
730 : }
731 :
732 0 : return;
733 :
734 : drop:
735 0 : ifq_deq_commit(&ifp->if_snd, m);
736 0 : m_freem(m);
737 0 : ifp->if_oerrors++;
738 0 : }
739 :
740 : /*
741 : * Transmit interrupt.
742 : */
743 : int
744 0 : hme_tint(struct hme_softc *sc)
745 : {
746 0 : struct ifnet *ifp = &sc->sc_arpcom.ac_if;
747 : unsigned int ri, txflags;
748 : struct hme_sxd *sd;
749 0 : int cnt = sc->sc_tx_cnt;
750 :
751 : /* Fetch current position in the transmit ring */
752 0 : ri = sc->sc_tx_cons;
753 0 : sd = &sc->sc_txd[ri];
754 :
755 0 : for (;;) {
756 0 : if (cnt <= 0)
757 : break;
758 :
759 0 : txflags = HME_XD_GETFLAGS(sc->sc_pci, sc->sc_rb.rb_txd, ri);
760 :
761 0 : if (txflags & HME_XD_OWN)
762 : break;
763 :
764 0 : ifq_clr_oactive(&ifp->if_snd);
765 :
766 0 : if (sd->sd_mbuf != NULL) {
767 0 : bus_dmamap_sync(sc->sc_dmatag, sd->sd_map,
768 : 0, sd->sd_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
769 0 : bus_dmamap_unload(sc->sc_dmatag, sd->sd_map);
770 0 : m_freem(sd->sd_mbuf);
771 0 : sd->sd_mbuf = NULL;
772 0 : }
773 :
774 0 : if (++ri == HME_TX_RING_SIZE) {
775 : ri = 0;
776 0 : sd = sc->sc_txd;
777 0 : } else
778 0 : sd++;
779 :
780 0 : --cnt;
781 : }
782 :
783 0 : sc->sc_tx_cnt = cnt;
784 0 : ifp->if_timer = cnt > 0 ? 5 : 0;
785 :
786 : /* Update ring */
787 0 : sc->sc_tx_cons = ri;
788 :
789 0 : hme_start(ifp);
790 :
791 0 : return (1);
792 : }
793 :
794 : /*
795 : * Receive interrupt.
796 : */
797 : int
798 0 : hme_rint(struct hme_softc *sc)
799 : {
800 0 : struct ifnet *ifp = &sc->sc_arpcom.ac_if;
801 0 : struct mbuf_list ml = MBUF_LIST_INITIALIZER();
802 : struct mbuf *m;
803 : struct hme_sxd *sd;
804 : unsigned int ri, len;
805 : u_int32_t flags;
806 :
807 0 : ri = sc->sc_rx_cons;
808 0 : sd = &sc->sc_rxd[ri];
809 :
810 : /*
811 : * Process all buffers with valid data.
812 : */
813 0 : while (if_rxr_inuse(&sc->sc_rx_ring) > 0) {
814 0 : flags = HME_XD_GETFLAGS(sc->sc_pci, sc->sc_rb.rb_rxd, ri);
815 0 : if (flags & HME_XD_OWN)
816 : break;
817 :
818 0 : bus_dmamap_sync(sc->sc_dmatag, sd->sd_map,
819 : 0, sd->sd_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
820 0 : bus_dmamap_unload(sc->sc_dmatag, sd->sd_map);
821 :
822 0 : m = sd->sd_mbuf;
823 0 : sd->sd_mbuf = NULL;
824 :
825 0 : if (++ri == HME_RX_RING_SIZE) {
826 : ri = 0;
827 0 : sd = sc->sc_rxd;
828 0 : } else
829 0 : sd++;
830 :
831 0 : if_rxr_put(&sc->sc_rx_ring, 1);
832 :
833 0 : if (flags & HME_XD_OFL) {
834 0 : ifp->if_ierrors++;
835 0 : printf("%s: buffer overflow, ri=%d; flags=0x%x\n",
836 0 : sc->sc_dev.dv_xname, ri, flags);
837 0 : m_freem(m);
838 0 : continue;
839 : }
840 :
841 0 : len = HME_XD_DECODE_RSIZE(flags);
842 0 : m->m_pkthdr.len = m->m_len = len;
843 :
844 0 : ml_enqueue(&ml, m);
845 : }
846 :
847 0 : if_input(ifp, &ml);
848 :
849 0 : sc->sc_rx_cons = ri;
850 0 : hme_fill_rx_ring(sc);
851 0 : return (1);
852 0 : }
853 :
854 : int
855 0 : hme_eint(struct hme_softc *sc, u_int status)
856 : {
857 0 : struct ifnet *ifp = &sc->sc_arpcom.ac_if;
858 :
859 0 : if (status & HME_SEB_STAT_MIFIRQ) {
860 0 : printf("%s: XXXlink status changed\n", sc->sc_dev.dv_xname);
861 0 : status &= ~HME_SEB_STAT_MIFIRQ;
862 0 : }
863 :
864 0 : if (status & HME_SEB_STAT_DTIMEXP) {
865 0 : ifp->if_oerrors++;
866 0 : status &= ~HME_SEB_STAT_DTIMEXP;
867 0 : }
868 :
869 0 : if (status & HME_SEB_STAT_NORXD) {
870 0 : ifp->if_ierrors++;
871 0 : status &= ~HME_SEB_STAT_NORXD;
872 0 : }
873 :
874 : status &= ~(HME_SEB_STAT_RXTOHOST | HME_SEB_STAT_GOTFRAME |
875 : HME_SEB_STAT_SENTFRAME | HME_SEB_STAT_HOSTTOTX |
876 : HME_SEB_STAT_TXALL);
877 :
878 : if (status == 0)
879 : return (1);
880 :
881 : #ifdef HME_DEBUG
882 : printf("%s: status=%b\n", sc->sc_dev.dv_xname, status, HME_SEB_STAT_BITS);
883 : #endif
884 : return (1);
885 0 : }
886 :
887 : int
888 0 : hme_intr(void *v)
889 : {
890 0 : struct hme_softc *sc = (struct hme_softc *)v;
891 0 : bus_space_tag_t t = sc->sc_bustag;
892 0 : bus_space_handle_t seb = sc->sc_seb;
893 : u_int32_t status;
894 : int r = 0;
895 :
896 0 : status = bus_space_read_4(t, seb, HME_SEBI_STAT);
897 0 : if (status == 0xffffffff)
898 0 : return (0);
899 :
900 0 : if ((status & HME_SEB_STAT_ALL_ERRORS) != 0)
901 0 : r |= hme_eint(sc, status);
902 :
903 0 : if ((status & (HME_SEB_STAT_TXALL | HME_SEB_STAT_HOSTTOTX)) != 0)
904 0 : r |= hme_tint(sc);
905 :
906 0 : if ((status & HME_SEB_STAT_RXTOHOST) != 0)
907 0 : r |= hme_rint(sc);
908 :
909 0 : return (r);
910 0 : }
911 :
912 :
913 : void
914 0 : hme_watchdog(struct ifnet *ifp)
915 : {
916 0 : struct hme_softc *sc = ifp->if_softc;
917 :
918 0 : log(LOG_ERR, "%s: device timeout\n", sc->sc_dev.dv_xname);
919 0 : ifp->if_oerrors++;
920 :
921 0 : hme_reset(sc);
922 0 : }
923 :
924 : /*
925 : * Initialize the MII Management Interface
926 : */
927 : void
928 0 : hme_mifinit(struct hme_softc *sc)
929 : {
930 0 : bus_space_tag_t t = sc->sc_bustag;
931 0 : bus_space_handle_t mif = sc->sc_mif;
932 0 : bus_space_handle_t mac = sc->sc_mac;
933 : int phy;
934 : u_int32_t v;
935 :
936 0 : v = bus_space_read_4(t, mif, HME_MIFI_CFG);
937 : phy = HME_PHYAD_EXTERNAL;
938 0 : if (v & HME_MIF_CFG_MDI1)
939 0 : phy = sc->sc_tcvr = HME_PHYAD_EXTERNAL;
940 0 : else if (v & HME_MIF_CFG_MDI0)
941 0 : phy = sc->sc_tcvr = HME_PHYAD_INTERNAL;
942 : else
943 0 : sc->sc_tcvr = -1;
944 :
945 : /* Configure the MIF in frame mode, no poll, current phy select */
946 : v = 0;
947 0 : if (phy == HME_PHYAD_EXTERNAL)
948 0 : v |= HME_MIF_CFG_PHY;
949 0 : bus_space_write_4(t, mif, HME_MIFI_CFG, v);
950 :
951 : /* If an external transceiver is selected, enable its MII drivers */
952 0 : v = bus_space_read_4(t, mac, HME_MACI_XIF);
953 0 : v &= ~HME_MAC_XIF_MIIENABLE;
954 0 : if (phy == HME_PHYAD_EXTERNAL)
955 0 : v |= HME_MAC_XIF_MIIENABLE;
956 0 : bus_space_write_4(t, mac, HME_MACI_XIF, v);
957 0 : }
958 :
959 : /*
960 : * MII interface
961 : */
962 : static int
963 0 : hme_mii_readreg(struct device *self, int phy, int reg)
964 : {
965 0 : struct hme_softc *sc = (struct hme_softc *)self;
966 0 : bus_space_tag_t t = sc->sc_bustag;
967 0 : bus_space_handle_t mif = sc->sc_mif;
968 0 : bus_space_handle_t mac = sc->sc_mac;
969 : u_int32_t v, xif_cfg, mifi_cfg;
970 : int n;
971 :
972 0 : if (phy != HME_PHYAD_EXTERNAL && phy != HME_PHYAD_INTERNAL)
973 0 : return (0);
974 :
975 : /* Select the desired PHY in the MIF configuration register */
976 0 : v = mifi_cfg = bus_space_read_4(t, mif, HME_MIFI_CFG);
977 0 : v &= ~HME_MIF_CFG_PHY;
978 0 : if (phy == HME_PHYAD_EXTERNAL)
979 0 : v |= HME_MIF_CFG_PHY;
980 0 : bus_space_write_4(t, mif, HME_MIFI_CFG, v);
981 :
982 : /* Enable MII drivers on external transceiver */
983 0 : v = xif_cfg = bus_space_read_4(t, mac, HME_MACI_XIF);
984 0 : if (phy == HME_PHYAD_EXTERNAL)
985 0 : v |= HME_MAC_XIF_MIIENABLE;
986 : else
987 0 : v &= ~HME_MAC_XIF_MIIENABLE;
988 0 : bus_space_write_4(t, mac, HME_MACI_XIF, v);
989 :
990 : /* Construct the frame command */
991 : v = (MII_COMMAND_START << HME_MIF_FO_ST_SHIFT) |
992 : HME_MIF_FO_TAMSB |
993 0 : (MII_COMMAND_READ << HME_MIF_FO_OPC_SHIFT) |
994 0 : (phy << HME_MIF_FO_PHYAD_SHIFT) |
995 0 : (reg << HME_MIF_FO_REGAD_SHIFT);
996 :
997 0 : bus_space_write_4(t, mif, HME_MIFI_FO, v);
998 0 : for (n = 0; n < 100; n++) {
999 0 : DELAY(1);
1000 0 : v = bus_space_read_4(t, mif, HME_MIFI_FO);
1001 0 : if (v & HME_MIF_FO_TALSB) {
1002 0 : v &= HME_MIF_FO_DATA;
1003 0 : goto out;
1004 : }
1005 : }
1006 :
1007 : v = 0;
1008 0 : printf("%s: mii_read timeout\n", sc->sc_dev.dv_xname);
1009 :
1010 : out:
1011 : /* Restore MIFI_CFG register */
1012 0 : bus_space_write_4(t, mif, HME_MIFI_CFG, mifi_cfg);
1013 : /* Restore XIF register */
1014 0 : bus_space_write_4(t, mac, HME_MACI_XIF, xif_cfg);
1015 0 : return (v);
1016 0 : }
1017 :
1018 : static void
1019 0 : hme_mii_writereg(struct device *self, int phy, int reg, int val)
1020 : {
1021 0 : struct hme_softc *sc = (void *)self;
1022 0 : bus_space_tag_t t = sc->sc_bustag;
1023 0 : bus_space_handle_t mif = sc->sc_mif;
1024 0 : bus_space_handle_t mac = sc->sc_mac;
1025 : u_int32_t v, xif_cfg, mifi_cfg;
1026 : int n;
1027 :
1028 : /* We can at most have two PHYs */
1029 0 : if (phy != HME_PHYAD_EXTERNAL && phy != HME_PHYAD_INTERNAL)
1030 0 : return;
1031 :
1032 : /* Select the desired PHY in the MIF configuration register */
1033 0 : v = mifi_cfg = bus_space_read_4(t, mif, HME_MIFI_CFG);
1034 0 : v &= ~HME_MIF_CFG_PHY;
1035 0 : if (phy == HME_PHYAD_EXTERNAL)
1036 0 : v |= HME_MIF_CFG_PHY;
1037 0 : bus_space_write_4(t, mif, HME_MIFI_CFG, v);
1038 :
1039 : /* Enable MII drivers on external transceiver */
1040 0 : v = xif_cfg = bus_space_read_4(t, mac, HME_MACI_XIF);
1041 0 : if (phy == HME_PHYAD_EXTERNAL)
1042 0 : v |= HME_MAC_XIF_MIIENABLE;
1043 : else
1044 0 : v &= ~HME_MAC_XIF_MIIENABLE;
1045 0 : bus_space_write_4(t, mac, HME_MACI_XIF, v);
1046 :
1047 : /* Construct the frame command */
1048 : v = (MII_COMMAND_START << HME_MIF_FO_ST_SHIFT) |
1049 : HME_MIF_FO_TAMSB |
1050 0 : (MII_COMMAND_WRITE << HME_MIF_FO_OPC_SHIFT) |
1051 0 : (phy << HME_MIF_FO_PHYAD_SHIFT) |
1052 0 : (reg << HME_MIF_FO_REGAD_SHIFT) |
1053 0 : (val & HME_MIF_FO_DATA);
1054 :
1055 0 : bus_space_write_4(t, mif, HME_MIFI_FO, v);
1056 0 : for (n = 0; n < 100; n++) {
1057 0 : DELAY(1);
1058 0 : v = bus_space_read_4(t, mif, HME_MIFI_FO);
1059 0 : if (v & HME_MIF_FO_TALSB)
1060 : goto out;
1061 : }
1062 :
1063 0 : printf("%s: mii_write timeout\n", sc->sc_dev.dv_xname);
1064 : out:
1065 : /* Restore MIFI_CFG register */
1066 0 : bus_space_write_4(t, mif, HME_MIFI_CFG, mifi_cfg);
1067 : /* Restore XIF register */
1068 0 : bus_space_write_4(t, mac, HME_MACI_XIF, xif_cfg);
1069 0 : }
1070 :
1071 : static void
1072 0 : hme_mii_statchg(struct device *dev)
1073 : {
1074 0 : struct hme_softc *sc = (void *)dev;
1075 0 : bus_space_tag_t t = sc->sc_bustag;
1076 0 : bus_space_handle_t mac = sc->sc_mac;
1077 : u_int32_t v;
1078 :
1079 : #ifdef HMEDEBUG
1080 : if (sc->sc_debug)
1081 : printf("hme_mii_statchg: status change\n", phy);
1082 : #endif
1083 :
1084 : /* Set the MAC Full Duplex bit appropriately */
1085 : /* Apparently the hme chip is SIMPLEX if working in full duplex mode,
1086 : but not otherwise. */
1087 0 : v = bus_space_read_4(t, mac, HME_MACI_TXCFG);
1088 0 : if ((IFM_OPTIONS(sc->sc_mii.mii_media_active) & IFM_FDX) != 0) {
1089 0 : v |= HME_MAC_TXCFG_FULLDPLX;
1090 0 : sc->sc_arpcom.ac_if.if_flags |= IFF_SIMPLEX;
1091 0 : } else {
1092 0 : v &= ~HME_MAC_TXCFG_FULLDPLX;
1093 0 : sc->sc_arpcom.ac_if.if_flags &= ~IFF_SIMPLEX;
1094 : }
1095 0 : bus_space_write_4(t, mac, HME_MACI_TXCFG, v);
1096 0 : }
1097 :
1098 : int
1099 0 : hme_mediachange(struct ifnet *ifp)
1100 : {
1101 0 : struct hme_softc *sc = ifp->if_softc;
1102 0 : bus_space_tag_t t = sc->sc_bustag;
1103 0 : bus_space_handle_t mif = sc->sc_mif;
1104 0 : bus_space_handle_t mac = sc->sc_mac;
1105 0 : uint64_t instance = IFM_INST(sc->sc_mii.mii_media.ifm_cur->ifm_media);
1106 0 : int phy = sc->sc_phys[instance];
1107 : u_int32_t v;
1108 :
1109 : #ifdef HMEDEBUG
1110 : if (sc->sc_debug)
1111 : printf("hme_mediachange: phy = %d\n", phy);
1112 : #endif
1113 0 : if (IFM_TYPE(sc->sc_media.ifm_media) != IFM_ETHER)
1114 0 : return (EINVAL);
1115 :
1116 : /* Select the current PHY in the MIF configuration register */
1117 0 : v = bus_space_read_4(t, mif, HME_MIFI_CFG);
1118 0 : v &= ~HME_MIF_CFG_PHY;
1119 0 : if (phy == HME_PHYAD_EXTERNAL)
1120 0 : v |= HME_MIF_CFG_PHY;
1121 0 : bus_space_write_4(t, mif, HME_MIFI_CFG, v);
1122 :
1123 : /* If an external transceiver is selected, enable its MII drivers */
1124 0 : v = bus_space_read_4(t, mac, HME_MACI_XIF);
1125 0 : v &= ~HME_MAC_XIF_MIIENABLE;
1126 0 : if (phy == HME_PHYAD_EXTERNAL)
1127 0 : v |= HME_MAC_XIF_MIIENABLE;
1128 0 : bus_space_write_4(t, mac, HME_MACI_XIF, v);
1129 :
1130 0 : return (mii_mediachg(&sc->sc_mii));
1131 0 : }
1132 :
1133 : void
1134 0 : hme_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
1135 : {
1136 0 : struct hme_softc *sc = ifp->if_softc;
1137 :
1138 0 : if ((ifp->if_flags & IFF_UP) == 0)
1139 0 : return;
1140 :
1141 0 : mii_pollstat(&sc->sc_mii);
1142 0 : ifmr->ifm_active = sc->sc_mii.mii_media_active;
1143 0 : ifmr->ifm_status = sc->sc_mii.mii_media_status;
1144 0 : }
1145 :
1146 : /*
1147 : * Process an ioctl request.
1148 : */
1149 : int
1150 0 : hme_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1151 : {
1152 0 : struct hme_softc *sc = ifp->if_softc;
1153 0 : struct ifreq *ifr = (struct ifreq *)data;
1154 : int s, error = 0;
1155 :
1156 0 : s = splnet();
1157 :
1158 0 : switch (cmd) {
1159 : case SIOCSIFADDR:
1160 0 : ifp->if_flags |= IFF_UP;
1161 0 : if (!(ifp->if_flags & IFF_RUNNING))
1162 0 : hme_init(sc);
1163 : break;
1164 :
1165 : case SIOCSIFFLAGS:
1166 0 : if (ifp->if_flags & IFF_UP) {
1167 0 : if (ifp->if_flags & IFF_RUNNING)
1168 0 : error = ENETRESET;
1169 : else
1170 0 : hme_init(sc);
1171 : } else {
1172 0 : if (ifp->if_flags & IFF_RUNNING)
1173 0 : hme_stop(sc, 0);
1174 : }
1175 : #ifdef HMEDEBUG
1176 : sc->sc_debug = (ifp->if_flags & IFF_DEBUG) != 0 ? 1 : 0;
1177 : #endif
1178 : break;
1179 :
1180 : case SIOCGIFMEDIA:
1181 : case SIOCSIFMEDIA:
1182 0 : error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd);
1183 0 : break;
1184 :
1185 : case SIOCGIFRXR:
1186 0 : error = if_rxr_ioctl((struct if_rxrinfo *)ifr->ifr_data,
1187 0 : NULL, MCLBYTES, &sc->sc_rx_ring);
1188 0 : break;
1189 :
1190 : default:
1191 0 : error = ether_ioctl(ifp, &sc->sc_arpcom, cmd, data);
1192 0 : }
1193 :
1194 0 : if (error == ENETRESET) {
1195 0 : if (ifp->if_flags & IFF_RUNNING)
1196 0 : hme_iff(sc);
1197 : error = 0;
1198 0 : }
1199 :
1200 0 : splx(s);
1201 0 : return (error);
1202 : }
1203 :
1204 : void
1205 0 : hme_iff(struct hme_softc *sc)
1206 : {
1207 0 : struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1208 : struct arpcom *ac = &sc->sc_arpcom;
1209 : struct ether_multi *enm;
1210 : struct ether_multistep step;
1211 0 : bus_space_tag_t t = sc->sc_bustag;
1212 0 : bus_space_handle_t mac = sc->sc_mac;
1213 0 : u_int32_t hash[4];
1214 : u_int32_t rxcfg, crc;
1215 :
1216 0 : rxcfg = bus_space_read_4(t, mac, HME_MACI_RXCFG);
1217 0 : rxcfg &= ~(HME_MAC_RXCFG_HENABLE | HME_MAC_RXCFG_PMISC);
1218 0 : ifp->if_flags &= ~IFF_ALLMULTI;
1219 : /* Clear hash table */
1220 0 : hash[0] = hash[1] = hash[2] = hash[3] = 0;
1221 :
1222 0 : if (ifp->if_flags & IFF_PROMISC) {
1223 0 : ifp->if_flags |= IFF_ALLMULTI;
1224 0 : rxcfg |= HME_MAC_RXCFG_PMISC;
1225 0 : } else if (ac->ac_multirangecnt > 0) {
1226 0 : ifp->if_flags |= IFF_ALLMULTI;
1227 0 : rxcfg |= HME_MAC_RXCFG_HENABLE;
1228 0 : hash[0] = hash[1] = hash[2] = hash[3] = 0xffff;
1229 0 : } else {
1230 0 : rxcfg |= HME_MAC_RXCFG_HENABLE;
1231 :
1232 0 : ETHER_FIRST_MULTI(step, ac, enm);
1233 0 : while (enm != NULL) {
1234 0 : crc = ether_crc32_le(enm->enm_addrlo,
1235 0 : ETHER_ADDR_LEN) >> 26;
1236 :
1237 : /* Set the corresponding bit in the filter. */
1238 0 : hash[crc >> 4] |= 1 << (crc & 0xf);
1239 :
1240 0 : ETHER_NEXT_MULTI(step, enm);
1241 : }
1242 : }
1243 :
1244 : /* Now load the hash table into the chip */
1245 0 : bus_space_write_4(t, mac, HME_MACI_HASHTAB0, hash[0]);
1246 0 : bus_space_write_4(t, mac, HME_MACI_HASHTAB1, hash[1]);
1247 0 : bus_space_write_4(t, mac, HME_MACI_HASHTAB2, hash[2]);
1248 0 : bus_space_write_4(t, mac, HME_MACI_HASHTAB3, hash[3]);
1249 0 : bus_space_write_4(t, mac, HME_MACI_RXCFG, rxcfg);
1250 0 : }
1251 :
1252 : void
1253 0 : hme_fill_rx_ring(struct hme_softc *sc)
1254 : {
1255 : struct hme_sxd *sd;
1256 : u_int slots;
1257 :
1258 0 : for (slots = if_rxr_get(&sc->sc_rx_ring, HME_RX_RING_SIZE);
1259 0 : slots > 0; slots--) {
1260 0 : if (hme_newbuf(sc, &sc->sc_rxd[sc->sc_rx_prod]))
1261 : break;
1262 :
1263 0 : sd = &sc->sc_rxd[sc->sc_rx_prod];
1264 0 : HME_XD_SETADDR(sc->sc_pci, sc->sc_rb.rb_rxd, sc->sc_rx_prod,
1265 : sd->sd_map->dm_segs[0].ds_addr);
1266 0 : HME_XD_SETFLAGS(sc->sc_pci, sc->sc_rb.rb_rxd, sc->sc_rx_prod,
1267 : HME_XD_OWN | HME_XD_ENCODE_RSIZE(HME_RX_PKTSIZE));
1268 :
1269 0 : if (++sc->sc_rx_prod == HME_RX_RING_SIZE)
1270 0 : sc->sc_rx_prod = 0;
1271 : }
1272 0 : if_rxr_put(&sc->sc_rx_ring, slots);
1273 0 : }
1274 :
1275 : int
1276 0 : hme_newbuf(struct hme_softc *sc, struct hme_sxd *d)
1277 : {
1278 : struct mbuf *m;
1279 : bus_dmamap_t map;
1280 :
1281 : /*
1282 : * All operations should be on local variables and/or rx spare map
1283 : * until we're sure everything is a success.
1284 : */
1285 :
1286 0 : m = MCLGETI(NULL, M_DONTWAIT, NULL, MCLBYTES);
1287 0 : if (!m)
1288 0 : return (ENOBUFS);
1289 :
1290 0 : if (bus_dmamap_load(sc->sc_dmatag, sc->sc_rxmap_spare,
1291 : mtod(m, caddr_t), MCLBYTES - HME_RX_OFFSET, NULL,
1292 0 : BUS_DMA_NOWAIT) != 0) {
1293 0 : m_freem(m);
1294 0 : return (ENOBUFS);
1295 : }
1296 :
1297 : /*
1298 : * At this point we have a new buffer loaded into the spare map.
1299 : * Just need to clear out the old mbuf/map and put the new one
1300 : * in place.
1301 : */
1302 :
1303 0 : map = d->sd_map;
1304 0 : d->sd_map = sc->sc_rxmap_spare;
1305 0 : sc->sc_rxmap_spare = map;
1306 :
1307 0 : bus_dmamap_sync(sc->sc_dmatag, d->sd_map, 0, d->sd_map->dm_mapsize,
1308 : BUS_DMASYNC_PREREAD);
1309 :
1310 0 : m->m_data += HME_RX_OFFSET;
1311 0 : d->sd_mbuf = m;
1312 0 : return (0);
1313 0 : }
|