Line data Source code
1 : /* $OpenBSD: gem.c,v 1.123 2018/02/07 22:35:14 bluhm Exp $ */
2 : /* $NetBSD: gem.c,v 1.1 2001/09/16 00:11:43 eeh Exp $ */
3 :
4 : /*
5 : *
6 : * Copyright (C) 2001 Eduardo Horvath.
7 : * All rights reserved.
8 : *
9 : *
10 : * Redistribution and use in source and binary forms, with or without
11 : * modification, are permitted provided that the following conditions
12 : * are met:
13 : * 1. Redistributions of source code must retain the above copyright
14 : * notice, this list of conditions and the following disclaimer.
15 : * 2. Redistributions in binary form must reproduce the above copyright
16 : * notice, this list of conditions and the following disclaimer in the
17 : * documentation and/or other materials provided with the distribution.
18 : *
19 : * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND
20 : * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 : * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 : * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE
23 : * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 : * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 : * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 : * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 : * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 : * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 : * SUCH DAMAGE.
30 : *
31 : */
32 :
33 : /*
34 : * Driver for Sun GEM ethernet controllers.
35 : */
36 :
37 : #include "bpfilter.h"
38 :
39 : #include <sys/param.h>
40 : #include <sys/systm.h>
41 : #include <sys/timeout.h>
42 : #include <sys/mbuf.h>
43 : #include <sys/syslog.h>
44 : #include <sys/malloc.h>
45 : #include <sys/kernel.h>
46 : #include <sys/socket.h>
47 : #include <sys/ioctl.h>
48 : #include <sys/errno.h>
49 : #include <sys/device.h>
50 : #include <sys/endian.h>
51 : #include <sys/atomic.h>
52 :
53 : #include <net/if.h>
54 : #include <net/if_media.h>
55 :
56 : #include <netinet/in.h>
57 : #include <netinet/if_ether.h>
58 :
59 : #if NBPFILTER > 0
60 : #include <net/bpf.h>
61 : #endif
62 :
63 : #include <machine/bus.h>
64 : #include <machine/intr.h>
65 :
66 : #include <dev/mii/mii.h>
67 : #include <dev/mii/miivar.h>
68 :
69 : #include <dev/ic/gemreg.h>
70 : #include <dev/ic/gemvar.h>
71 :
72 : #define TRIES 10000
73 :
74 : struct cfdriver gem_cd = {
75 : NULL, "gem", DV_IFNET
76 : };
77 :
78 : void gem_start(struct ifqueue *);
79 : void gem_stop(struct ifnet *, int);
80 : int gem_ioctl(struct ifnet *, u_long, caddr_t);
81 : void gem_tick(void *);
82 : void gem_watchdog(struct ifnet *);
83 : int gem_init(struct ifnet *);
84 : void gem_init_regs(struct gem_softc *);
85 : int gem_ringsize(int);
86 : int gem_meminit(struct gem_softc *);
87 : void gem_mifinit(struct gem_softc *);
88 : int gem_bitwait(struct gem_softc *, bus_space_handle_t, int,
89 : u_int32_t, u_int32_t);
90 : void gem_reset(struct gem_softc *);
91 : int gem_reset_rx(struct gem_softc *);
92 : int gem_reset_tx(struct gem_softc *);
93 : int gem_disable_rx(struct gem_softc *);
94 : int gem_disable_tx(struct gem_softc *);
95 : void gem_rx_watchdog(void *);
96 : void gem_rxdrain(struct gem_softc *);
97 : void gem_fill_rx_ring(struct gem_softc *);
98 : int gem_add_rxbuf(struct gem_softc *, int idx);
99 : int gem_load_mbuf(struct gem_softc *, struct gem_sxd *,
100 : struct mbuf *);
101 : void gem_iff(struct gem_softc *);
102 :
103 : /* MII methods & callbacks */
104 : int gem_mii_readreg(struct device *, int, int);
105 : void gem_mii_writereg(struct device *, int, int, int);
106 : void gem_mii_statchg(struct device *);
107 : int gem_pcs_readreg(struct device *, int, int);
108 : void gem_pcs_writereg(struct device *, int, int, int);
109 :
110 : int gem_mediachange(struct ifnet *);
111 : void gem_mediastatus(struct ifnet *, struct ifmediareq *);
112 :
113 : int gem_eint(struct gem_softc *, u_int);
114 : int gem_rint(struct gem_softc *);
115 : int gem_tint(struct gem_softc *, u_int32_t);
116 : int gem_pint(struct gem_softc *);
117 :
118 : #ifdef GEM_DEBUG
119 : #define DPRINTF(sc, x) if ((sc)->sc_arpcom.ac_if.if_flags & IFF_DEBUG) \
120 : printf x
121 : #else
122 : #define DPRINTF(sc, x) /* nothing */
123 : #endif
124 :
125 : /*
126 : * Attach a Gem interface to the system.
127 : */
128 : void
129 0 : gem_config(struct gem_softc *sc)
130 : {
131 0 : struct ifnet *ifp = &sc->sc_arpcom.ac_if;
132 0 : struct mii_data *mii = &sc->sc_mii;
133 : struct mii_softc *child;
134 : int i, error, mii_flags, phyad;
135 : struct ifmedia_entry *ifm;
136 :
137 : /* Make sure the chip is stopped. */
138 0 : ifp->if_softc = sc;
139 0 : gem_reset(sc);
140 :
141 : /*
142 : * Allocate the control data structures, and create and load the
143 : * DMA map for it.
144 : */
145 0 : if ((error = bus_dmamem_alloc(sc->sc_dmatag,
146 : sizeof(struct gem_control_data), PAGE_SIZE, 0, &sc->sc_cdseg,
147 0 : 1, &sc->sc_cdnseg, 0)) != 0) {
148 0 : printf("\n%s: unable to allocate control data, error = %d\n",
149 0 : sc->sc_dev.dv_xname, error);
150 0 : goto fail_0;
151 : }
152 :
153 : /* XXX should map this in with correct endianness */
154 0 : if ((error = bus_dmamem_map(sc->sc_dmatag, &sc->sc_cdseg, sc->sc_cdnseg,
155 : sizeof(struct gem_control_data), (caddr_t *)&sc->sc_control_data,
156 0 : BUS_DMA_COHERENT)) != 0) {
157 0 : printf("\n%s: unable to map control data, error = %d\n",
158 0 : sc->sc_dev.dv_xname, error);
159 0 : goto fail_1;
160 : }
161 :
162 0 : if ((error = bus_dmamap_create(sc->sc_dmatag,
163 : sizeof(struct gem_control_data), 1,
164 0 : sizeof(struct gem_control_data), 0, 0, &sc->sc_cddmamap)) != 0) {
165 0 : printf("\n%s: unable to create control data DMA map, "
166 0 : "error = %d\n", sc->sc_dev.dv_xname, error);
167 0 : goto fail_2;
168 : }
169 :
170 0 : if ((error = bus_dmamap_load(sc->sc_dmatag, sc->sc_cddmamap,
171 : sc->sc_control_data, sizeof(struct gem_control_data), NULL,
172 0 : 0)) != 0) {
173 0 : printf("\n%s: unable to load control data DMA map, error = %d\n",
174 0 : sc->sc_dev.dv_xname, error);
175 0 : goto fail_3;
176 : }
177 :
178 : /*
179 : * Create the receive buffer DMA maps.
180 : */
181 0 : for (i = 0; i < GEM_NRXDESC; i++) {
182 0 : if ((error = bus_dmamap_create(sc->sc_dmatag, MCLBYTES, 1,
183 0 : MCLBYTES, 0, 0, &sc->sc_rxsoft[i].rxs_dmamap)) != 0) {
184 0 : printf("\n%s: unable to create rx DMA map %d, "
185 0 : "error = %d\n", sc->sc_dev.dv_xname, i, error);
186 0 : goto fail_5;
187 : }
188 0 : sc->sc_rxsoft[i].rxs_mbuf = NULL;
189 : }
190 : /*
191 : * Create the transmit buffer DMA maps.
192 : */
193 0 : for (i = 0; i < GEM_NTXDESC; i++) {
194 0 : if ((error = bus_dmamap_create(sc->sc_dmatag, MCLBYTES,
195 : GEM_NTXSEGS, MCLBYTES, 0, BUS_DMA_NOWAIT,
196 0 : &sc->sc_txd[i].sd_map)) != 0) {
197 0 : printf("\n%s: unable to create tx DMA map %d, "
198 0 : "error = %d\n", sc->sc_dev.dv_xname, i, error);
199 : goto fail_6;
200 : }
201 0 : sc->sc_txd[i].sd_mbuf = NULL;
202 : }
203 :
204 : /*
205 : * From this point forward, the attachment cannot fail. A failure
206 : * before this point releases all resources that may have been
207 : * allocated.
208 : */
209 :
210 : /* Announce ourselves. */
211 0 : printf(", address %s\n", ether_sprintf(sc->sc_arpcom.ac_enaddr));
212 :
213 : /* Get RX FIFO size */
214 0 : sc->sc_rxfifosize = 64 *
215 0 : bus_space_read_4(sc->sc_bustag, sc->sc_h1, GEM_RX_FIFO_SIZE);
216 :
217 : /* Initialize ifnet structure. */
218 0 : strlcpy(ifp->if_xname, sc->sc_dev.dv_xname, sizeof ifp->if_xname);
219 0 : ifp->if_softc = sc;
220 0 : ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
221 0 : ifp->if_xflags = IFXF_MPSAFE;
222 0 : ifp->if_qstart = gem_start;
223 0 : ifp->if_ioctl = gem_ioctl;
224 0 : ifp->if_watchdog = gem_watchdog;
225 0 : IFQ_SET_MAXLEN(&ifp->if_snd, GEM_NTXDESC - 1);
226 :
227 0 : ifp->if_capabilities = IFCAP_VLAN_MTU;
228 :
229 : /* Initialize ifmedia structures and MII info */
230 0 : mii->mii_ifp = ifp;
231 0 : mii->mii_readreg = gem_mii_readreg;
232 0 : mii->mii_writereg = gem_mii_writereg;
233 0 : mii->mii_statchg = gem_mii_statchg;
234 :
235 0 : ifmedia_init(&mii->mii_media, 0, gem_mediachange, gem_mediastatus);
236 :
237 : /* Bad things will happen if we touch this register on ERI. */
238 0 : if (sc->sc_variant != GEM_SUN_ERI)
239 0 : bus_space_write_4(sc->sc_bustag, sc->sc_h1,
240 : GEM_MII_DATAPATH_MODE, 0);
241 :
242 0 : gem_mifinit(sc);
243 :
244 : mii_flags = MIIF_DOPAUSE;
245 :
246 : /*
247 : * Look for an external PHY.
248 : */
249 0 : if (sc->sc_mif_config & GEM_MIF_CONFIG_MDI1) {
250 0 : sc->sc_mif_config |= GEM_MIF_CONFIG_PHY_SEL;
251 0 : bus_space_write_4(sc->sc_bustag, sc->sc_h1,
252 : GEM_MIF_CONFIG, sc->sc_mif_config);
253 :
254 0 : switch (sc->sc_variant) {
255 : case GEM_SUN_ERI:
256 : phyad = GEM_PHYAD_EXTERNAL;
257 0 : break;
258 : default:
259 : phyad = MII_PHY_ANY;
260 0 : break;
261 : }
262 :
263 0 : mii_attach(&sc->sc_dev, mii, 0xffffffff, phyad,
264 : MII_OFFSET_ANY, mii_flags);
265 0 : }
266 :
267 : /*
268 : * Fall back on an internal PHY if no external PHY was found.
269 : * Note that with Apple (K2) GMACs GEM_MIF_CONFIG_MDI0 can't be
270 : * trusted when the firmware has powered down the chip
271 : */
272 0 : child = LIST_FIRST(&mii->mii_phys);
273 0 : if (child == NULL &&
274 0 : (sc->sc_mif_config & GEM_MIF_CONFIG_MDI0 || GEM_IS_APPLE(sc))) {
275 0 : sc->sc_mif_config &= ~GEM_MIF_CONFIG_PHY_SEL;
276 0 : bus_space_write_4(sc->sc_bustag, sc->sc_h1,
277 : GEM_MIF_CONFIG, sc->sc_mif_config);
278 :
279 0 : switch (sc->sc_variant) {
280 : case GEM_SUN_ERI:
281 : case GEM_APPLE_K2_GMAC:
282 : phyad = GEM_PHYAD_INTERNAL;
283 0 : break;
284 : case GEM_APPLE_GMAC:
285 : phyad = GEM_PHYAD_EXTERNAL;
286 0 : break;
287 : default:
288 : phyad = MII_PHY_ANY;
289 0 : break;
290 : }
291 :
292 0 : mii_attach(&sc->sc_dev, mii, 0xffffffff, phyad,
293 : MII_OFFSET_ANY, mii_flags);
294 0 : }
295 :
296 : /*
297 : * Try the external PCS SERDES if we didn't find any MII
298 : * devices.
299 : */
300 0 : child = LIST_FIRST(&mii->mii_phys);
301 0 : if (child == NULL && sc->sc_variant != GEM_SUN_ERI) {
302 0 : bus_space_write_4(sc->sc_bustag, sc->sc_h1,
303 : GEM_MII_DATAPATH_MODE, GEM_MII_DATAPATH_SERDES);
304 :
305 0 : bus_space_write_4(sc->sc_bustag, sc->sc_h1,
306 : GEM_MII_SLINK_CONTROL,
307 : GEM_MII_SLINK_LOOPBACK|GEM_MII_SLINK_EN_SYNC_D);
308 :
309 0 : bus_space_write_4(sc->sc_bustag, sc->sc_h1,
310 : GEM_MII_CONFIG, GEM_MII_CONFIG_ENABLE);
311 :
312 0 : mii->mii_readreg = gem_pcs_readreg;
313 0 : mii->mii_writereg = gem_pcs_writereg;
314 :
315 : mii_flags |= MIIF_NOISOLATE;
316 :
317 0 : mii_attach(&sc->sc_dev, mii, 0xffffffff, MII_PHY_ANY,
318 : MII_OFFSET_ANY, mii_flags);
319 0 : }
320 :
321 0 : child = LIST_FIRST(&mii->mii_phys);
322 0 : if (child == NULL) {
323 : /* No PHY attached */
324 0 : ifmedia_add(&sc->sc_media, IFM_ETHER|IFM_MANUAL, 0, NULL);
325 0 : ifmedia_set(&sc->sc_media, IFM_ETHER|IFM_MANUAL);
326 0 : } else {
327 : /*
328 : * XXX - we can really do the following ONLY if the
329 : * phy indeed has the auto negotiation capability!!
330 : */
331 0 : ifmedia_set(&sc->sc_media, IFM_ETHER|IFM_AUTO);
332 : }
333 :
334 : /* Check if we support GigE media. */
335 0 : TAILQ_FOREACH(ifm, &sc->sc_media.ifm_list, ifm_list) {
336 0 : if (IFM_SUBTYPE(ifm->ifm_media) == IFM_1000_T ||
337 0 : IFM_SUBTYPE(ifm->ifm_media) == IFM_1000_SX ||
338 0 : IFM_SUBTYPE(ifm->ifm_media) == IFM_1000_LX ||
339 0 : IFM_SUBTYPE(ifm->ifm_media) == IFM_1000_CX) {
340 0 : sc->sc_flags |= GEM_GIGABIT;
341 0 : break;
342 : }
343 : }
344 :
345 : /* Attach the interface. */
346 0 : if_attach(ifp);
347 0 : ether_ifattach(ifp);
348 :
349 0 : timeout_set(&sc->sc_tick_ch, gem_tick, sc);
350 0 : timeout_set(&sc->sc_rx_watchdog, gem_rx_watchdog, sc);
351 0 : return;
352 :
353 : /*
354 : * Free any resources we've allocated during the failed attach
355 : * attempt. Do this in reverse order and fall through.
356 : */
357 : fail_6:
358 0 : for (i = 0; i < GEM_NTXDESC; i++) {
359 0 : if (sc->sc_txd[i].sd_map != NULL)
360 0 : bus_dmamap_destroy(sc->sc_dmatag,
361 : sc->sc_txd[i].sd_map);
362 : }
363 : fail_5:
364 0 : for (i = 0; i < GEM_NRXDESC; i++) {
365 0 : if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
366 0 : bus_dmamap_destroy(sc->sc_dmatag,
367 : sc->sc_rxsoft[i].rxs_dmamap);
368 : }
369 0 : bus_dmamap_unload(sc->sc_dmatag, sc->sc_cddmamap);
370 : fail_3:
371 0 : bus_dmamap_destroy(sc->sc_dmatag, sc->sc_cddmamap);
372 : fail_2:
373 0 : bus_dmamem_unmap(sc->sc_dmatag, (caddr_t)sc->sc_control_data,
374 : sizeof(struct gem_control_data));
375 : fail_1:
376 0 : bus_dmamem_free(sc->sc_dmatag, &sc->sc_cdseg, sc->sc_cdnseg);
377 : fail_0:
378 0 : return;
379 0 : }
380 :
381 : void
382 0 : gem_unconfig(struct gem_softc *sc)
383 : {
384 0 : struct ifnet *ifp = &sc->sc_arpcom.ac_if;
385 : int i;
386 :
387 0 : gem_stop(ifp, 1);
388 :
389 0 : for (i = 0; i < GEM_NTXDESC; i++) {
390 0 : if (sc->sc_txd[i].sd_map != NULL)
391 0 : bus_dmamap_destroy(sc->sc_dmatag,
392 : sc->sc_txd[i].sd_map);
393 : }
394 0 : for (i = 0; i < GEM_NRXDESC; i++) {
395 0 : if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
396 0 : bus_dmamap_destroy(sc->sc_dmatag,
397 : sc->sc_rxsoft[i].rxs_dmamap);
398 : }
399 0 : bus_dmamap_unload(sc->sc_dmatag, sc->sc_cddmamap);
400 0 : bus_dmamap_destroy(sc->sc_dmatag, sc->sc_cddmamap);
401 0 : bus_dmamem_unmap(sc->sc_dmatag, (caddr_t)sc->sc_control_data,
402 : sizeof(struct gem_control_data));
403 0 : bus_dmamem_free(sc->sc_dmatag, &sc->sc_cdseg, sc->sc_cdnseg);
404 :
405 : /* Detach all PHYs */
406 0 : mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
407 :
408 : /* Delete all remaining media. */
409 0 : ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY);
410 :
411 0 : ether_ifdetach(ifp);
412 0 : if_detach(ifp);
413 0 : }
414 :
415 :
416 : void
417 0 : gem_tick(void *arg)
418 : {
419 0 : struct gem_softc *sc = arg;
420 0 : struct ifnet *ifp = &sc->sc_arpcom.ac_if;
421 0 : bus_space_tag_t t = sc->sc_bustag;
422 0 : bus_space_handle_t mac = sc->sc_h1;
423 : int s;
424 : u_int32_t v;
425 :
426 0 : s = splnet();
427 : /* unload collisions counters */
428 0 : v = bus_space_read_4(t, mac, GEM_MAC_EXCESS_COLL_CNT) +
429 0 : bus_space_read_4(t, mac, GEM_MAC_LATE_COLL_CNT);
430 0 : ifp->if_collisions += v +
431 0 : bus_space_read_4(t, mac, GEM_MAC_NORM_COLL_CNT) +
432 0 : bus_space_read_4(t, mac, GEM_MAC_FIRST_COLL_CNT);
433 0 : ifp->if_oerrors += v;
434 :
435 : /* read error counters */
436 0 : ifp->if_ierrors +=
437 0 : bus_space_read_4(t, mac, GEM_MAC_RX_LEN_ERR_CNT) +
438 0 : bus_space_read_4(t, mac, GEM_MAC_RX_ALIGN_ERR) +
439 0 : bus_space_read_4(t, mac, GEM_MAC_RX_CRC_ERR_CNT) +
440 0 : bus_space_read_4(t, mac, GEM_MAC_RX_CODE_VIOL);
441 :
442 : /* clear the hardware counters */
443 0 : bus_space_write_4(t, mac, GEM_MAC_NORM_COLL_CNT, 0);
444 0 : bus_space_write_4(t, mac, GEM_MAC_FIRST_COLL_CNT, 0);
445 0 : bus_space_write_4(t, mac, GEM_MAC_EXCESS_COLL_CNT, 0);
446 0 : bus_space_write_4(t, mac, GEM_MAC_LATE_COLL_CNT, 0);
447 0 : bus_space_write_4(t, mac, GEM_MAC_RX_LEN_ERR_CNT, 0);
448 0 : bus_space_write_4(t, mac, GEM_MAC_RX_ALIGN_ERR, 0);
449 0 : bus_space_write_4(t, mac, GEM_MAC_RX_CRC_ERR_CNT, 0);
450 0 : bus_space_write_4(t, mac, GEM_MAC_RX_CODE_VIOL, 0);
451 :
452 : /*
453 : * If buffer allocation fails, the receive ring may become
454 : * empty. There is no receive interrupt to recover from that.
455 : */
456 0 : if (if_rxr_inuse(&sc->sc_rx_ring) == 0) {
457 0 : gem_fill_rx_ring(sc);
458 0 : bus_space_write_4(t, mac, GEM_RX_KICK, sc->sc_rx_prod);
459 0 : }
460 :
461 0 : mii_tick(&sc->sc_mii);
462 0 : splx(s);
463 :
464 0 : timeout_add_sec(&sc->sc_tick_ch, 1);
465 0 : }
466 :
467 : int
468 0 : gem_bitwait(struct gem_softc *sc, bus_space_handle_t h, int r,
469 : u_int32_t clr, u_int32_t set)
470 : {
471 : int i;
472 : u_int32_t reg;
473 :
474 0 : for (i = TRIES; i--; DELAY(100)) {
475 0 : reg = bus_space_read_4(sc->sc_bustag, h, r);
476 0 : if ((reg & clr) == 0 && (reg & set) == set)
477 0 : return (1);
478 : }
479 :
480 0 : return (0);
481 0 : }
482 :
483 : void
484 0 : gem_reset(struct gem_softc *sc)
485 : {
486 0 : bus_space_tag_t t = sc->sc_bustag;
487 0 : bus_space_handle_t h = sc->sc_h2;
488 : int s;
489 :
490 0 : s = splnet();
491 : DPRINTF(sc, ("%s: gem_reset\n", sc->sc_dev.dv_xname));
492 0 : gem_reset_rx(sc);
493 0 : gem_reset_tx(sc);
494 :
495 : /* Do a full reset */
496 0 : bus_space_write_4(t, h, GEM_RESET, GEM_RESET_RX|GEM_RESET_TX);
497 0 : if (!gem_bitwait(sc, h, GEM_RESET, GEM_RESET_RX | GEM_RESET_TX, 0))
498 0 : printf("%s: cannot reset device\n", sc->sc_dev.dv_xname);
499 0 : splx(s);
500 0 : }
501 :
502 :
503 : /*
504 : * Drain the receive queue.
505 : */
506 : void
507 0 : gem_rxdrain(struct gem_softc *sc)
508 : {
509 : struct gem_rxsoft *rxs;
510 : int i;
511 :
512 0 : for (i = 0; i < GEM_NRXDESC; i++) {
513 0 : rxs = &sc->sc_rxsoft[i];
514 0 : if (rxs->rxs_mbuf != NULL) {
515 0 : bus_dmamap_sync(sc->sc_dmatag, rxs->rxs_dmamap, 0,
516 : rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
517 0 : bus_dmamap_unload(sc->sc_dmatag, rxs->rxs_dmamap);
518 0 : m_freem(rxs->rxs_mbuf);
519 0 : rxs->rxs_mbuf = NULL;
520 0 : }
521 : }
522 0 : sc->sc_rx_prod = sc->sc_rx_cons = 0;
523 0 : }
524 :
525 : /*
526 : * Reset the whole thing.
527 : */
528 : void
529 0 : gem_stop(struct ifnet *ifp, int softonly)
530 : {
531 0 : struct gem_softc *sc = (struct gem_softc *)ifp->if_softc;
532 : struct gem_sxd *sd;
533 : u_int32_t i;
534 :
535 : DPRINTF(sc, ("%s: gem_stop\n", sc->sc_dev.dv_xname));
536 :
537 0 : timeout_del(&sc->sc_tick_ch);
538 :
539 : /*
540 : * Mark the interface down and cancel the watchdog timer.
541 : */
542 0 : ifp->if_flags &= ~IFF_RUNNING;
543 0 : ifq_clr_oactive(&ifp->if_snd);
544 0 : ifp->if_timer = 0;
545 :
546 0 : if (!softonly) {
547 0 : mii_down(&sc->sc_mii);
548 :
549 0 : gem_reset_rx(sc);
550 0 : gem_reset_tx(sc);
551 0 : }
552 :
553 0 : intr_barrier(sc->sc_ih);
554 0 : ifq_barrier(&ifp->if_snd);
555 :
556 0 : KASSERT((ifp->if_flags & IFF_RUNNING) == 0);
557 :
558 : /*
559 : * Release any queued transmit buffers.
560 : */
561 0 : for (i = 0; i < GEM_NTXDESC; i++) {
562 0 : sd = &sc->sc_txd[i];
563 0 : if (sd->sd_mbuf != NULL) {
564 0 : bus_dmamap_sync(sc->sc_dmatag, sd->sd_map, 0,
565 : sd->sd_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
566 0 : bus_dmamap_unload(sc->sc_dmatag, sd->sd_map);
567 0 : m_freem(sd->sd_mbuf);
568 0 : sd->sd_mbuf = NULL;
569 0 : }
570 : }
571 0 : sc->sc_tx_cnt = sc->sc_tx_prod = sc->sc_tx_cons = 0;
572 :
573 0 : gem_rxdrain(sc);
574 0 : }
575 :
576 :
577 : /*
578 : * Reset the receiver
579 : */
580 : int
581 0 : gem_reset_rx(struct gem_softc *sc)
582 : {
583 0 : bus_space_tag_t t = sc->sc_bustag;
584 0 : bus_space_handle_t h = sc->sc_h1, h2 = sc->sc_h2;
585 :
586 : /*
587 : * Resetting while DMA is in progress can cause a bus hang, so we
588 : * disable DMA first.
589 : */
590 0 : gem_disable_rx(sc);
591 0 : bus_space_write_4(t, h, GEM_RX_CONFIG, 0);
592 : /* Wait till it finishes */
593 0 : if (!gem_bitwait(sc, h, GEM_RX_CONFIG, 1, 0))
594 0 : printf("%s: cannot disable rx dma\n", sc->sc_dev.dv_xname);
595 : /* Wait 5ms extra. */
596 0 : delay(5000);
597 :
598 : /* Finally, reset the ERX */
599 0 : bus_space_write_4(t, h2, GEM_RESET, GEM_RESET_RX);
600 : /* Wait till it finishes */
601 0 : if (!gem_bitwait(sc, h2, GEM_RESET, GEM_RESET_RX, 0)) {
602 0 : printf("%s: cannot reset receiver\n", sc->sc_dev.dv_xname);
603 0 : return (1);
604 : }
605 0 : return (0);
606 0 : }
607 :
608 :
609 : /*
610 : * Reset the transmitter
611 : */
612 : int
613 0 : gem_reset_tx(struct gem_softc *sc)
614 : {
615 0 : bus_space_tag_t t = sc->sc_bustag;
616 0 : bus_space_handle_t h = sc->sc_h1, h2 = sc->sc_h2;
617 :
618 : /*
619 : * Resetting while DMA is in progress can cause a bus hang, so we
620 : * disable DMA first.
621 : */
622 0 : gem_disable_tx(sc);
623 0 : bus_space_write_4(t, h, GEM_TX_CONFIG, 0);
624 : /* Wait till it finishes */
625 0 : if (!gem_bitwait(sc, h, GEM_TX_CONFIG, 1, 0))
626 0 : printf("%s: cannot disable tx dma\n", sc->sc_dev.dv_xname);
627 : /* Wait 5ms extra. */
628 0 : delay(5000);
629 :
630 : /* Finally, reset the ETX */
631 0 : bus_space_write_4(t, h2, GEM_RESET, GEM_RESET_TX);
632 : /* Wait till it finishes */
633 0 : if (!gem_bitwait(sc, h2, GEM_RESET, GEM_RESET_TX, 0)) {
634 0 : printf("%s: cannot reset transmitter\n",
635 0 : sc->sc_dev.dv_xname);
636 0 : return (1);
637 : }
638 0 : return (0);
639 0 : }
640 :
641 : /*
642 : * Disable receiver.
643 : */
644 : int
645 0 : gem_disable_rx(struct gem_softc *sc)
646 : {
647 0 : bus_space_tag_t t = sc->sc_bustag;
648 0 : bus_space_handle_t h = sc->sc_h1;
649 : u_int32_t cfg;
650 :
651 : /* Flip the enable bit */
652 0 : cfg = bus_space_read_4(t, h, GEM_MAC_RX_CONFIG);
653 0 : cfg &= ~GEM_MAC_RX_ENABLE;
654 0 : bus_space_write_4(t, h, GEM_MAC_RX_CONFIG, cfg);
655 :
656 : /* Wait for it to finish */
657 0 : return (gem_bitwait(sc, h, GEM_MAC_RX_CONFIG, GEM_MAC_RX_ENABLE, 0));
658 : }
659 :
660 : /*
661 : * Disable transmitter.
662 : */
663 : int
664 0 : gem_disable_tx(struct gem_softc *sc)
665 : {
666 0 : bus_space_tag_t t = sc->sc_bustag;
667 0 : bus_space_handle_t h = sc->sc_h1;
668 : u_int32_t cfg;
669 :
670 : /* Flip the enable bit */
671 0 : cfg = bus_space_read_4(t, h, GEM_MAC_TX_CONFIG);
672 0 : cfg &= ~GEM_MAC_TX_ENABLE;
673 0 : bus_space_write_4(t, h, GEM_MAC_TX_CONFIG, cfg);
674 :
675 : /* Wait for it to finish */
676 0 : return (gem_bitwait(sc, h, GEM_MAC_TX_CONFIG, GEM_MAC_TX_ENABLE, 0));
677 : }
678 :
679 : /*
680 : * Initialize interface.
681 : */
682 : int
683 0 : gem_meminit(struct gem_softc *sc)
684 : {
685 : int i;
686 :
687 : /*
688 : * Initialize the transmit descriptor ring.
689 : */
690 0 : for (i = 0; i < GEM_NTXDESC; i++) {
691 0 : sc->sc_txdescs[i].gd_flags = 0;
692 0 : sc->sc_txdescs[i].gd_addr = 0;
693 : }
694 0 : GEM_CDTXSYNC(sc, 0, GEM_NTXDESC,
695 : BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
696 :
697 : /*
698 : * Initialize the receive descriptor and receive job
699 : * descriptor rings.
700 : */
701 0 : for (i = 0; i < GEM_NRXDESC; i++) {
702 0 : sc->sc_rxdescs[i].gd_flags = 0;
703 0 : sc->sc_rxdescs[i].gd_addr = 0;
704 : }
705 : /* Hardware reads RX descriptors in multiples of four. */
706 0 : if_rxr_init(&sc->sc_rx_ring, 4, GEM_NRXDESC - 4);
707 0 : gem_fill_rx_ring(sc);
708 :
709 0 : return (0);
710 : }
711 :
712 : int
713 0 : gem_ringsize(int sz)
714 : {
715 0 : switch (sz) {
716 : case 32:
717 0 : return GEM_RING_SZ_32;
718 : case 64:
719 0 : return GEM_RING_SZ_64;
720 : case 128:
721 0 : return GEM_RING_SZ_128;
722 : case 256:
723 0 : return GEM_RING_SZ_256;
724 : case 512:
725 0 : return GEM_RING_SZ_512;
726 : case 1024:
727 0 : return GEM_RING_SZ_1024;
728 : case 2048:
729 0 : return GEM_RING_SZ_2048;
730 : case 4096:
731 0 : return GEM_RING_SZ_4096;
732 : case 8192:
733 0 : return GEM_RING_SZ_8192;
734 : default:
735 0 : printf("gem: invalid Receive Descriptor ring size %d\n", sz);
736 0 : return GEM_RING_SZ_32;
737 : }
738 0 : }
739 :
740 : /*
741 : * Initialization of interface; set up initialization block
742 : * and transmit/receive descriptor rings.
743 : */
744 : int
745 0 : gem_init(struct ifnet *ifp)
746 : {
747 :
748 0 : struct gem_softc *sc = (struct gem_softc *)ifp->if_softc;
749 0 : bus_space_tag_t t = sc->sc_bustag;
750 0 : bus_space_handle_t h = sc->sc_h1;
751 : int s;
752 : u_int32_t v;
753 :
754 0 : s = splnet();
755 :
756 : DPRINTF(sc, ("%s: gem_init: calling stop\n", sc->sc_dev.dv_xname));
757 : /*
758 : * Initialization sequence. The numbered steps below correspond
759 : * to the sequence outlined in section 6.3.5.1 in the Ethernet
760 : * Channel Engine manual (part of the PCIO manual).
761 : * See also the STP2002-STQ document from Sun Microsystems.
762 : */
763 :
764 : /* step 1 & 2. Reset the Ethernet Channel */
765 0 : gem_stop(ifp, 0);
766 0 : gem_reset(sc);
767 : DPRINTF(sc, ("%s: gem_init: restarting\n", sc->sc_dev.dv_xname));
768 :
769 : /* Re-initialize the MIF */
770 0 : gem_mifinit(sc);
771 :
772 : /* Call MI reset function if any */
773 0 : if (sc->sc_hwreset)
774 0 : (*sc->sc_hwreset)(sc);
775 :
776 : /* step 3. Setup data structures in host memory */
777 0 : gem_meminit(sc);
778 :
779 : /* step 4. TX MAC registers & counters */
780 0 : gem_init_regs(sc);
781 :
782 : /* step 5. RX MAC registers & counters */
783 0 : gem_iff(sc);
784 :
785 : /* step 6 & 7. Program Descriptor Ring Base Addresses */
786 0 : bus_space_write_4(t, h, GEM_TX_RING_PTR_HI,
787 : (((uint64_t)GEM_CDTXADDR(sc,0)) >> 32));
788 0 : bus_space_write_4(t, h, GEM_TX_RING_PTR_LO, GEM_CDTXADDR(sc, 0));
789 :
790 0 : bus_space_write_4(t, h, GEM_RX_RING_PTR_HI,
791 : (((uint64_t)GEM_CDRXADDR(sc,0)) >> 32));
792 0 : bus_space_write_4(t, h, GEM_RX_RING_PTR_LO, GEM_CDRXADDR(sc, 0));
793 :
794 : /* step 8. Global Configuration & Interrupt Mask */
795 0 : bus_space_write_4(t, h, GEM_INTMASK,
796 : ~(GEM_INTR_TX_INTME|
797 : GEM_INTR_TX_EMPTY|
798 : GEM_INTR_RX_DONE|GEM_INTR_RX_NOBUF|
799 : GEM_INTR_RX_TAG_ERR|GEM_INTR_PCS|
800 : GEM_INTR_MAC_CONTROL|GEM_INTR_MIF|
801 : GEM_INTR_BERR));
802 0 : bus_space_write_4(t, h, GEM_MAC_RX_MASK,
803 : GEM_MAC_RX_DONE|GEM_MAC_RX_FRAME_CNT);
804 0 : bus_space_write_4(t, h, GEM_MAC_TX_MASK, 0xffff); /* XXXX */
805 0 : bus_space_write_4(t, h, GEM_MAC_CONTROL_MASK, 0); /* XXXX */
806 :
807 : /* step 9. ETX Configuration: use mostly default values */
808 :
809 : /* Enable DMA */
810 0 : v = gem_ringsize(GEM_NTXDESC /*XXX*/);
811 0 : v |= ((sc->sc_variant == GEM_SUN_ERI ? 0x100 : 0x04ff) << 10) &
812 : GEM_TX_CONFIG_TXFIFO_TH;
813 0 : bus_space_write_4(t, h, GEM_TX_CONFIG, v | GEM_TX_CONFIG_TXDMA_EN);
814 0 : bus_space_write_4(t, h, GEM_TX_KICK, 0);
815 :
816 : /* step 10. ERX Configuration */
817 :
818 : /* Encode Receive Descriptor ring size: four possible values */
819 0 : v = gem_ringsize(GEM_NRXDESC /*XXX*/);
820 : /* Enable DMA */
821 0 : bus_space_write_4(t, h, GEM_RX_CONFIG,
822 : v|(GEM_THRSH_1024<<GEM_RX_CONFIG_FIFO_THRS_SHIFT)|
823 : (2<<GEM_RX_CONFIG_FBOFF_SHFT)|GEM_RX_CONFIG_RXDMA_EN|
824 : (0<<GEM_RX_CONFIG_CXM_START_SHFT));
825 : /*
826 : * The following value is for an OFF Threshold of about 3/4 full
827 : * and an ON Threshold of 1/4 full.
828 : */
829 0 : bus_space_write_4(t, h, GEM_RX_PAUSE_THRESH,
830 : (3 * sc->sc_rxfifosize / 256) |
831 : ((sc->sc_rxfifosize / 256) << 12));
832 0 : bus_space_write_4(t, h, GEM_RX_BLANKING, (6 << 12) | 6);
833 :
834 : /* step 11. Configure Media */
835 0 : mii_mediachg(&sc->sc_mii);
836 :
837 : /* step 12. RX_MAC Configuration Register */
838 0 : v = bus_space_read_4(t, h, GEM_MAC_RX_CONFIG);
839 0 : v |= GEM_MAC_RX_ENABLE | GEM_MAC_RX_STRIP_CRC;
840 0 : bus_space_write_4(t, h, GEM_MAC_RX_CONFIG, v);
841 :
842 : /* step 14. Issue Transmit Pending command */
843 :
844 : /* Call MI initialization function if any */
845 0 : if (sc->sc_hwinit)
846 0 : (*sc->sc_hwinit)(sc);
847 :
848 : /* step 15. Give the receiver a swift kick */
849 0 : bus_space_write_4(t, h, GEM_RX_KICK, sc->sc_rx_prod);
850 :
851 : /* Start the one second timer. */
852 0 : timeout_add_sec(&sc->sc_tick_ch, 1);
853 :
854 0 : ifp->if_flags |= IFF_RUNNING;
855 0 : ifq_clr_oactive(&ifp->if_snd);
856 :
857 0 : splx(s);
858 :
859 0 : return (0);
860 : }
861 :
862 : void
863 0 : gem_init_regs(struct gem_softc *sc)
864 : {
865 0 : bus_space_tag_t t = sc->sc_bustag;
866 0 : bus_space_handle_t h = sc->sc_h1;
867 : u_int32_t v;
868 :
869 : /* These regs are not cleared on reset */
870 0 : sc->sc_inited = 0;
871 0 : if (!sc->sc_inited) {
872 : /* Load recommended values */
873 0 : bus_space_write_4(t, h, GEM_MAC_IPG0, 0x00);
874 0 : bus_space_write_4(t, h, GEM_MAC_IPG1, 0x08);
875 0 : bus_space_write_4(t, h, GEM_MAC_IPG2, 0x04);
876 :
877 0 : bus_space_write_4(t, h, GEM_MAC_MAC_MIN_FRAME, ETHER_MIN_LEN);
878 : /* Max frame and max burst size */
879 0 : bus_space_write_4(t, h, GEM_MAC_MAC_MAX_FRAME,
880 : (ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN) | (0x2000 << 16));
881 :
882 0 : bus_space_write_4(t, h, GEM_MAC_PREAMBLE_LEN, 0x07);
883 0 : bus_space_write_4(t, h, GEM_MAC_JAM_SIZE, 0x04);
884 0 : bus_space_write_4(t, h, GEM_MAC_ATTEMPT_LIMIT, 0x10);
885 0 : bus_space_write_4(t, h, GEM_MAC_CONTROL_TYPE, 0x8088);
886 0 : bus_space_write_4(t, h, GEM_MAC_RANDOM_SEED,
887 : ((sc->sc_arpcom.ac_enaddr[5]<<8)|sc->sc_arpcom.ac_enaddr[4])&0x3ff);
888 :
889 : /* Secondary MAC addr set to 0:0:0:0:0:0 */
890 0 : bus_space_write_4(t, h, GEM_MAC_ADDR3, 0);
891 0 : bus_space_write_4(t, h, GEM_MAC_ADDR4, 0);
892 0 : bus_space_write_4(t, h, GEM_MAC_ADDR5, 0);
893 :
894 : /* MAC control addr set to 0:1:c2:0:1:80 */
895 0 : bus_space_write_4(t, h, GEM_MAC_ADDR6, 0x0001);
896 0 : bus_space_write_4(t, h, GEM_MAC_ADDR7, 0xc200);
897 0 : bus_space_write_4(t, h, GEM_MAC_ADDR8, 0x0180);
898 :
899 : /* MAC filter addr set to 0:0:0:0:0:0 */
900 0 : bus_space_write_4(t, h, GEM_MAC_ADDR_FILTER0, 0);
901 0 : bus_space_write_4(t, h, GEM_MAC_ADDR_FILTER1, 0);
902 0 : bus_space_write_4(t, h, GEM_MAC_ADDR_FILTER2, 0);
903 :
904 0 : bus_space_write_4(t, h, GEM_MAC_ADR_FLT_MASK1_2, 0);
905 0 : bus_space_write_4(t, h, GEM_MAC_ADR_FLT_MASK0, 0);
906 :
907 0 : sc->sc_inited = 1;
908 0 : }
909 :
910 : /* Counters need to be zeroed */
911 0 : bus_space_write_4(t, h, GEM_MAC_NORM_COLL_CNT, 0);
912 0 : bus_space_write_4(t, h, GEM_MAC_FIRST_COLL_CNT, 0);
913 0 : bus_space_write_4(t, h, GEM_MAC_EXCESS_COLL_CNT, 0);
914 0 : bus_space_write_4(t, h, GEM_MAC_LATE_COLL_CNT, 0);
915 0 : bus_space_write_4(t, h, GEM_MAC_DEFER_TMR_CNT, 0);
916 0 : bus_space_write_4(t, h, GEM_MAC_PEAK_ATTEMPTS, 0);
917 0 : bus_space_write_4(t, h, GEM_MAC_RX_FRAME_COUNT, 0);
918 0 : bus_space_write_4(t, h, GEM_MAC_RX_LEN_ERR_CNT, 0);
919 0 : bus_space_write_4(t, h, GEM_MAC_RX_ALIGN_ERR, 0);
920 0 : bus_space_write_4(t, h, GEM_MAC_RX_CRC_ERR_CNT, 0);
921 0 : bus_space_write_4(t, h, GEM_MAC_RX_CODE_VIOL, 0);
922 :
923 : /* Set XOFF PAUSE time */
924 0 : bus_space_write_4(t, h, GEM_MAC_SEND_PAUSE_CMD, 0x1bf0);
925 :
926 : /*
927 : * Set the internal arbitration to "infinite" bursts of the
928 : * maximum length of 31 * 64 bytes so DMA transfers aren't
929 : * split up in cache line size chunks. This greatly improves
930 : * especially RX performance.
931 : * Enable silicon bug workarounds for the Apple variants.
932 : */
933 : v = GEM_CONFIG_TXDMA_LIMIT | GEM_CONFIG_RXDMA_LIMIT;
934 0 : if (sc->sc_pci)
935 0 : v |= GEM_CONFIG_BURST_INF;
936 : else
937 : v |= GEM_CONFIG_BURST_64;
938 0 : if (sc->sc_variant != GEM_SUN_GEM && sc->sc_variant != GEM_SUN_ERI)
939 0 : v |= GEM_CONFIG_RONPAULBIT | GEM_CONFIG_BUG2FIX;
940 0 : bus_space_write_4(t, h, GEM_CONFIG, v);
941 :
942 : /*
943 : * Set the station address.
944 : */
945 0 : bus_space_write_4(t, h, GEM_MAC_ADDR0,
946 : (sc->sc_arpcom.ac_enaddr[4]<<8) | sc->sc_arpcom.ac_enaddr[5]);
947 0 : bus_space_write_4(t, h, GEM_MAC_ADDR1,
948 : (sc->sc_arpcom.ac_enaddr[2]<<8) | sc->sc_arpcom.ac_enaddr[3]);
949 0 : bus_space_write_4(t, h, GEM_MAC_ADDR2,
950 : (sc->sc_arpcom.ac_enaddr[0]<<8) | sc->sc_arpcom.ac_enaddr[1]);
951 0 : }
952 :
953 : /*
954 : * Receive interrupt.
955 : */
956 : int
957 0 : gem_rint(struct gem_softc *sc)
958 : {
959 0 : struct ifnet *ifp = &sc->sc_arpcom.ac_if;
960 0 : bus_space_tag_t t = sc->sc_bustag;
961 0 : bus_space_handle_t h = sc->sc_h1;
962 : struct gem_rxsoft *rxs;
963 0 : struct mbuf_list ml = MBUF_LIST_INITIALIZER();
964 : struct mbuf *m;
965 : u_int64_t rxstat;
966 : int i, len;
967 :
968 0 : if (if_rxr_inuse(&sc->sc_rx_ring) == 0)
969 0 : return (0);
970 :
971 0 : for (i = sc->sc_rx_cons; if_rxr_inuse(&sc->sc_rx_ring) > 0;
972 0 : i = GEM_NEXTRX(i)) {
973 0 : rxs = &sc->sc_rxsoft[i];
974 :
975 0 : GEM_CDRXSYNC(sc, i,
976 : BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
977 :
978 0 : rxstat = GEM_DMA_READ(sc, &sc->sc_rxdescs[i].gd_flags);
979 :
980 0 : if (rxstat & GEM_RD_OWN) {
981 : /* We have processed all of the receive buffers. */
982 : break;
983 : }
984 :
985 0 : bus_dmamap_sync(sc->sc_dmatag, rxs->rxs_dmamap, 0,
986 : rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
987 0 : bus_dmamap_unload(sc->sc_dmatag, rxs->rxs_dmamap);
988 :
989 0 : m = rxs->rxs_mbuf;
990 0 : rxs->rxs_mbuf = NULL;
991 :
992 0 : if_rxr_put(&sc->sc_rx_ring, 1);
993 :
994 0 : if (rxstat & GEM_RD_BAD_CRC) {
995 0 : ifp->if_ierrors++;
996 : #ifdef GEM_DEBUG
997 : printf("%s: receive error: CRC error\n",
998 : sc->sc_dev.dv_xname);
999 : #endif
1000 0 : m_freem(m);
1001 0 : continue;
1002 : }
1003 :
1004 : #ifdef GEM_DEBUG
1005 : if (ifp->if_flags & IFF_DEBUG) {
1006 : printf(" rxsoft %p descriptor %d: ", rxs, i);
1007 : printf("gd_flags: 0x%016llx\t", (long long)
1008 : GEM_DMA_READ(sc, &sc->sc_rxdescs[i].gd_flags));
1009 : printf("gd_addr: 0x%016llx\n", (long long)
1010 : GEM_DMA_READ(sc, &sc->sc_rxdescs[i].gd_addr));
1011 : }
1012 : #endif
1013 :
1014 : /* No errors; receive the packet. */
1015 0 : len = GEM_RD_BUFLEN(rxstat);
1016 :
1017 0 : m->m_data += 2; /* We're already off by two */
1018 0 : m->m_pkthdr.len = m->m_len = len;
1019 :
1020 0 : ml_enqueue(&ml, m);
1021 0 : }
1022 :
1023 : /* Update the receive pointer. */
1024 0 : sc->sc_rx_cons = i;
1025 0 : gem_fill_rx_ring(sc);
1026 0 : bus_space_write_4(t, h, GEM_RX_KICK, sc->sc_rx_prod);
1027 :
1028 : DPRINTF(sc, ("gem_rint: done sc->sc_rx_cons %d, complete %d\n",
1029 : sc->sc_rx_cons, bus_space_read_4(t, h, GEM_RX_COMPLETION)));
1030 :
1031 0 : if_input(ifp, &ml);
1032 :
1033 0 : return (1);
1034 0 : }
1035 :
1036 : void
1037 0 : gem_fill_rx_ring(struct gem_softc *sc)
1038 : {
1039 : u_int slots;
1040 :
1041 0 : for (slots = if_rxr_get(&sc->sc_rx_ring, GEM_NRXDESC - 4);
1042 0 : slots > 0; slots--) {
1043 0 : if (gem_add_rxbuf(sc, sc->sc_rx_prod))
1044 : break;
1045 : }
1046 0 : if_rxr_put(&sc->sc_rx_ring, slots);
1047 0 : }
1048 :
1049 : /*
1050 : * Add a receive buffer to the indicated descriptor.
1051 : */
1052 : int
1053 0 : gem_add_rxbuf(struct gem_softc *sc, int idx)
1054 : {
1055 0 : struct gem_rxsoft *rxs = &sc->sc_rxsoft[idx];
1056 : struct mbuf *m;
1057 : int error;
1058 :
1059 0 : m = MCLGETI(NULL, M_DONTWAIT, NULL, MCLBYTES);
1060 0 : if (!m)
1061 0 : return (ENOBUFS);
1062 0 : m->m_len = m->m_pkthdr.len = MCLBYTES;
1063 :
1064 : #ifdef GEM_DEBUG
1065 : /* bzero the packet to check dma */
1066 : memset(m->m_ext.ext_buf, 0, m->m_ext.ext_size);
1067 : #endif
1068 :
1069 0 : rxs->rxs_mbuf = m;
1070 :
1071 0 : error = bus_dmamap_load_mbuf(sc->sc_dmatag, rxs->rxs_dmamap, m,
1072 : BUS_DMA_READ|BUS_DMA_NOWAIT);
1073 0 : if (error) {
1074 0 : printf("%s: can't load rx DMA map %d, error = %d\n",
1075 0 : sc->sc_dev.dv_xname, idx, error);
1076 0 : panic("gem_add_rxbuf"); /* XXX */
1077 : }
1078 :
1079 0 : bus_dmamap_sync(sc->sc_dmatag, rxs->rxs_dmamap, 0,
1080 : rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
1081 :
1082 0 : GEM_INIT_RXDESC(sc, idx);
1083 :
1084 0 : sc->sc_rx_prod = GEM_NEXTRX(sc->sc_rx_prod);
1085 :
1086 0 : return (0);
1087 0 : }
1088 :
1089 : int
1090 0 : gem_eint(struct gem_softc *sc, u_int status)
1091 : {
1092 0 : if ((status & GEM_INTR_MIF) != 0) {
1093 : #ifdef GEM_DEBUG
1094 : printf("%s: link status changed\n", sc->sc_dev.dv_xname);
1095 : #endif
1096 0 : return (1);
1097 : }
1098 :
1099 0 : printf("%s: status=%b\n", sc->sc_dev.dv_xname, status, GEM_INTR_BITS);
1100 0 : return (1);
1101 0 : }
1102 :
1103 : int
1104 0 : gem_pint(struct gem_softc *sc)
1105 : {
1106 0 : bus_space_tag_t t = sc->sc_bustag;
1107 0 : bus_space_handle_t seb = sc->sc_h1;
1108 : u_int32_t status;
1109 :
1110 0 : status = bus_space_read_4(t, seb, GEM_MII_INTERRUP_STATUS);
1111 0 : status |= bus_space_read_4(t, seb, GEM_MII_INTERRUP_STATUS);
1112 : #ifdef GEM_DEBUG
1113 : if (status)
1114 : printf("%s: link status changed\n", sc->sc_dev.dv_xname);
1115 : #endif
1116 0 : return (1);
1117 : }
1118 :
1119 : int
1120 0 : gem_intr(void *v)
1121 : {
1122 0 : struct gem_softc *sc = (struct gem_softc *)v;
1123 0 : struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1124 0 : bus_space_tag_t t = sc->sc_bustag;
1125 0 : bus_space_handle_t seb = sc->sc_h1;
1126 : u_int32_t status;
1127 : int r = 0;
1128 :
1129 0 : status = bus_space_read_4(t, seb, GEM_STATUS);
1130 : DPRINTF(sc, ("%s: gem_intr: cplt %xstatus %b\n",
1131 : sc->sc_dev.dv_xname, (status>>19), status, GEM_INTR_BITS));
1132 :
1133 0 : if (status == 0xffffffff)
1134 0 : return (0);
1135 :
1136 0 : if ((status & GEM_INTR_PCS) != 0)
1137 0 : r |= gem_pint(sc);
1138 :
1139 0 : if ((status & (GEM_INTR_RX_TAG_ERR | GEM_INTR_BERR)) != 0)
1140 0 : r |= gem_eint(sc, status);
1141 :
1142 0 : if ((status & (GEM_INTR_TX_EMPTY | GEM_INTR_TX_INTME)) != 0)
1143 0 : r |= gem_tint(sc, status);
1144 :
1145 0 : if ((status & (GEM_INTR_RX_DONE | GEM_INTR_RX_NOBUF)) != 0)
1146 0 : r |= gem_rint(sc);
1147 :
1148 : /* We should eventually do more than just print out error stats. */
1149 0 : if (status & GEM_INTR_TX_MAC) {
1150 0 : int txstat = bus_space_read_4(t, seb, GEM_MAC_TX_STATUS);
1151 : #ifdef GEM_DEBUG
1152 : if (txstat & ~GEM_MAC_TX_XMIT_DONE)
1153 : printf("%s: MAC tx fault, status %x\n",
1154 : sc->sc_dev.dv_xname, txstat);
1155 : #endif
1156 0 : if (txstat & (GEM_MAC_TX_UNDERRUN | GEM_MAC_TX_PKT_TOO_LONG)) {
1157 0 : KERNEL_LOCK();
1158 0 : gem_init(ifp);
1159 0 : KERNEL_UNLOCK();
1160 0 : }
1161 0 : }
1162 0 : if (status & GEM_INTR_RX_MAC) {
1163 0 : int rxstat = bus_space_read_4(t, seb, GEM_MAC_RX_STATUS);
1164 : #ifdef GEM_DEBUG
1165 : if (rxstat & ~GEM_MAC_RX_DONE)
1166 : printf("%s: MAC rx fault, status %x\n",
1167 : sc->sc_dev.dv_xname, rxstat);
1168 : #endif
1169 0 : if (rxstat & GEM_MAC_RX_OVERFLOW) {
1170 0 : ifp->if_ierrors++;
1171 :
1172 : /*
1173 : * Apparently a silicon bug causes ERI to hang
1174 : * from time to time. So if we detect an RX
1175 : * FIFO overflow, we fire off a timer, and
1176 : * check whether we're still making progress
1177 : * by looking at the RX FIFO write and read
1178 : * pointers.
1179 : */
1180 0 : sc->sc_rx_fifo_wr_ptr =
1181 0 : bus_space_read_4(t, seb, GEM_RX_FIFO_WR_PTR);
1182 0 : sc->sc_rx_fifo_rd_ptr =
1183 0 : bus_space_read_4(t, seb, GEM_RX_FIFO_RD_PTR);
1184 0 : timeout_add_msec(&sc->sc_rx_watchdog, 400);
1185 0 : }
1186 : #ifdef GEM_DEBUG
1187 : else if (rxstat & ~(GEM_MAC_RX_DONE | GEM_MAC_RX_FRAME_CNT))
1188 : printf("%s: MAC rx fault, status %x\n",
1189 : sc->sc_dev.dv_xname, rxstat);
1190 : #endif
1191 0 : }
1192 0 : return (r);
1193 0 : }
1194 :
1195 : void
1196 0 : gem_rx_watchdog(void *arg)
1197 : {
1198 0 : struct gem_softc *sc = arg;
1199 0 : struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1200 0 : bus_space_tag_t t = sc->sc_bustag;
1201 0 : bus_space_handle_t h = sc->sc_h1;
1202 : u_int32_t rx_fifo_wr_ptr;
1203 : u_int32_t rx_fifo_rd_ptr;
1204 : u_int32_t state;
1205 :
1206 0 : if ((ifp->if_flags & IFF_RUNNING) == 0)
1207 0 : return;
1208 :
1209 0 : rx_fifo_wr_ptr = bus_space_read_4(t, h, GEM_RX_FIFO_WR_PTR);
1210 0 : rx_fifo_rd_ptr = bus_space_read_4(t, h, GEM_RX_FIFO_RD_PTR);
1211 0 : state = bus_space_read_4(t, h, GEM_MAC_MAC_STATE);
1212 0 : if ((state & GEM_MAC_STATE_OVERFLOW) == GEM_MAC_STATE_OVERFLOW) {
1213 0 : if ((rx_fifo_wr_ptr == rx_fifo_rd_ptr) ||
1214 0 : ((sc->sc_rx_fifo_wr_ptr == rx_fifo_wr_ptr) &&
1215 0 : (sc->sc_rx_fifo_rd_ptr == rx_fifo_rd_ptr))) {
1216 : /*
1217 : * The RX state machine is still in overflow state and
1218 : * the RX FIFO write and read pointers seem to be
1219 : * stuck. Whack the chip over the head to get things
1220 : * going again.
1221 : */
1222 0 : gem_init(ifp);
1223 0 : } else {
1224 : /*
1225 : * We made some progress, but is not certain that the
1226 : * overflow condition has been resolved. Check again.
1227 : */
1228 0 : sc->sc_rx_fifo_wr_ptr = rx_fifo_wr_ptr;
1229 0 : sc->sc_rx_fifo_rd_ptr = rx_fifo_rd_ptr;
1230 0 : timeout_add_msec(&sc->sc_rx_watchdog, 400);
1231 : }
1232 : }
1233 0 : }
1234 :
1235 : void
1236 0 : gem_watchdog(struct ifnet *ifp)
1237 : {
1238 0 : struct gem_softc *sc = ifp->if_softc;
1239 :
1240 : DPRINTF(sc, ("gem_watchdog: GEM_RX_CONFIG %x GEM_MAC_RX_STATUS %x "
1241 : "GEM_MAC_RX_CONFIG %x\n",
1242 : bus_space_read_4(sc->sc_bustag, sc->sc_h1, GEM_RX_CONFIG),
1243 : bus_space_read_4(sc->sc_bustag, sc->sc_h1, GEM_MAC_RX_STATUS),
1244 : bus_space_read_4(sc->sc_bustag, sc->sc_h1, GEM_MAC_RX_CONFIG)));
1245 :
1246 0 : log(LOG_ERR, "%s: device timeout\n", sc->sc_dev.dv_xname);
1247 0 : ++ifp->if_oerrors;
1248 :
1249 : /* Try to get more packets going. */
1250 0 : gem_init(ifp);
1251 0 : }
1252 :
1253 : /*
1254 : * Initialize the MII Management Interface
1255 : */
1256 : void
1257 0 : gem_mifinit(struct gem_softc *sc)
1258 : {
1259 0 : bus_space_tag_t t = sc->sc_bustag;
1260 0 : bus_space_handle_t mif = sc->sc_h1;
1261 :
1262 : /* Configure the MIF in frame mode */
1263 0 : sc->sc_mif_config = bus_space_read_4(t, mif, GEM_MIF_CONFIG);
1264 0 : sc->sc_mif_config &= ~GEM_MIF_CONFIG_BB_ENA;
1265 0 : bus_space_write_4(t, mif, GEM_MIF_CONFIG, sc->sc_mif_config);
1266 0 : }
1267 :
1268 : /*
1269 : * MII interface
1270 : *
1271 : * The GEM MII interface supports at least three different operating modes:
1272 : *
1273 : * Bitbang mode is implemented using data, clock and output enable registers.
1274 : *
1275 : * Frame mode is implemented by loading a complete frame into the frame
1276 : * register and polling the valid bit for completion.
1277 : *
1278 : * Polling mode uses the frame register but completion is indicated by
1279 : * an interrupt.
1280 : *
1281 : */
1282 : int
1283 0 : gem_mii_readreg(struct device *self, int phy, int reg)
1284 : {
1285 0 : struct gem_softc *sc = (void *)self;
1286 0 : bus_space_tag_t t = sc->sc_bustag;
1287 0 : bus_space_handle_t mif = sc->sc_h1;
1288 : int n;
1289 : u_int32_t v;
1290 :
1291 : #ifdef GEM_DEBUG
1292 : if (sc->sc_debug)
1293 : printf("gem_mii_readreg: phy %d reg %d\n", phy, reg);
1294 : #endif
1295 :
1296 : /* Construct the frame command */
1297 0 : v = (reg << GEM_MIF_REG_SHIFT) | (phy << GEM_MIF_PHY_SHIFT) |
1298 : GEM_MIF_FRAME_READ;
1299 :
1300 0 : bus_space_write_4(t, mif, GEM_MIF_FRAME, v);
1301 0 : for (n = 0; n < 100; n++) {
1302 0 : DELAY(1);
1303 0 : v = bus_space_read_4(t, mif, GEM_MIF_FRAME);
1304 0 : if (v & GEM_MIF_FRAME_TA0)
1305 0 : return (v & GEM_MIF_FRAME_DATA);
1306 : }
1307 :
1308 0 : printf("%s: mii_read timeout\n", sc->sc_dev.dv_xname);
1309 0 : return (0);
1310 0 : }
1311 :
1312 : void
1313 0 : gem_mii_writereg(struct device *self, int phy, int reg, int val)
1314 : {
1315 0 : struct gem_softc *sc = (void *)self;
1316 0 : bus_space_tag_t t = sc->sc_bustag;
1317 0 : bus_space_handle_t mif = sc->sc_h1;
1318 : int n;
1319 : u_int32_t v;
1320 :
1321 : #ifdef GEM_DEBUG
1322 : if (sc->sc_debug)
1323 : printf("gem_mii_writereg: phy %d reg %d val %x\n",
1324 : phy, reg, val);
1325 : #endif
1326 :
1327 : /* Construct the frame command */
1328 0 : v = GEM_MIF_FRAME_WRITE |
1329 0 : (phy << GEM_MIF_PHY_SHIFT) |
1330 0 : (reg << GEM_MIF_REG_SHIFT) |
1331 0 : (val & GEM_MIF_FRAME_DATA);
1332 :
1333 0 : bus_space_write_4(t, mif, GEM_MIF_FRAME, v);
1334 0 : for (n = 0; n < 100; n++) {
1335 0 : DELAY(1);
1336 0 : v = bus_space_read_4(t, mif, GEM_MIF_FRAME);
1337 0 : if (v & GEM_MIF_FRAME_TA0)
1338 0 : return;
1339 : }
1340 :
1341 0 : printf("%s: mii_write timeout\n", sc->sc_dev.dv_xname);
1342 0 : }
1343 :
1344 : void
1345 0 : gem_mii_statchg(struct device *dev)
1346 : {
1347 0 : struct gem_softc *sc = (void *)dev;
1348 : #ifdef GEM_DEBUG
1349 : uint64_t instance = IFM_INST(sc->sc_mii.mii_media.ifm_cur->ifm_media);
1350 : #endif
1351 0 : bus_space_tag_t t = sc->sc_bustag;
1352 0 : bus_space_handle_t mac = sc->sc_h1;
1353 : u_int32_t v;
1354 :
1355 : #ifdef GEM_DEBUG
1356 : if (sc->sc_debug)
1357 : printf("gem_mii_statchg: status change: phy = %lld\n", instance);
1358 : #endif
1359 :
1360 : /* Set tx full duplex options */
1361 0 : bus_space_write_4(t, mac, GEM_MAC_TX_CONFIG, 0);
1362 0 : delay(10000); /* reg must be cleared and delay before changing. */
1363 : v = GEM_MAC_TX_ENA_IPG0|GEM_MAC_TX_NGU|GEM_MAC_TX_NGU_LIMIT|
1364 : GEM_MAC_TX_ENABLE;
1365 0 : if ((IFM_OPTIONS(sc->sc_mii.mii_media_active) & IFM_FDX) != 0) {
1366 : v |= GEM_MAC_TX_IGN_CARRIER|GEM_MAC_TX_IGN_COLLIS;
1367 0 : }
1368 0 : bus_space_write_4(t, mac, GEM_MAC_TX_CONFIG, v);
1369 :
1370 : /* XIF Configuration */
1371 : v = GEM_MAC_XIF_TX_MII_ENA;
1372 : v |= GEM_MAC_XIF_LINK_LED;
1373 :
1374 : /* External MII needs echo disable if half duplex. */
1375 0 : if ((IFM_OPTIONS(sc->sc_mii.mii_media_active) & IFM_FDX) != 0)
1376 : /* turn on full duplex LED */
1377 0 : v |= GEM_MAC_XIF_FDPLX_LED;
1378 : else
1379 : /* half duplex -- disable echo */
1380 : v |= GEM_MAC_XIF_ECHO_DISABL;
1381 :
1382 0 : switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
1383 : case IFM_1000_T: /* Gigabit using GMII interface */
1384 : case IFM_1000_SX:
1385 0 : v |= GEM_MAC_XIF_GMII_MODE;
1386 0 : break;
1387 : default:
1388 0 : v &= ~GEM_MAC_XIF_GMII_MODE;
1389 0 : }
1390 0 : bus_space_write_4(t, mac, GEM_MAC_XIF_CONFIG, v);
1391 :
1392 : /*
1393 : * 802.3x flow control
1394 : */
1395 0 : v = bus_space_read_4(t, mac, GEM_MAC_CONTROL_CONFIG);
1396 0 : v &= ~(GEM_MAC_CC_RX_PAUSE | GEM_MAC_CC_TX_PAUSE);
1397 0 : if ((IFM_OPTIONS(sc->sc_mii.mii_media_active) & IFM_ETH_RXPAUSE) != 0)
1398 0 : v |= GEM_MAC_CC_RX_PAUSE;
1399 0 : if ((IFM_OPTIONS(sc->sc_mii.mii_media_active) & IFM_ETH_TXPAUSE) != 0)
1400 0 : v |= GEM_MAC_CC_TX_PAUSE;
1401 0 : bus_space_write_4(t, mac, GEM_MAC_CONTROL_CONFIG, v);
1402 0 : }
1403 :
1404 : int
1405 0 : gem_pcs_readreg(struct device *self, int phy, int reg)
1406 : {
1407 0 : struct gem_softc *sc = (void *)self;
1408 0 : bus_space_tag_t t = sc->sc_bustag;
1409 0 : bus_space_handle_t pcs = sc->sc_h1;
1410 :
1411 : #ifdef GEM_DEBUG
1412 : if (sc->sc_debug)
1413 : printf("gem_pcs_readreg: phy %d reg %d\n", phy, reg);
1414 : #endif
1415 :
1416 0 : if (phy != GEM_PHYAD_EXTERNAL)
1417 0 : return (0);
1418 :
1419 0 : switch (reg) {
1420 : case MII_BMCR:
1421 : reg = GEM_MII_CONTROL;
1422 0 : break;
1423 : case MII_BMSR:
1424 : reg = GEM_MII_STATUS;
1425 0 : break;
1426 : case MII_ANAR:
1427 : reg = GEM_MII_ANAR;
1428 0 : break;
1429 : case MII_ANLPAR:
1430 : reg = GEM_MII_ANLPAR;
1431 0 : break;
1432 : case MII_EXTSR:
1433 0 : return (EXTSR_1000XFDX|EXTSR_1000XHDX);
1434 : default:
1435 0 : return (0);
1436 : }
1437 :
1438 0 : return bus_space_read_4(t, pcs, reg);
1439 0 : }
1440 :
1441 : void
1442 0 : gem_pcs_writereg(struct device *self, int phy, int reg, int val)
1443 : {
1444 0 : struct gem_softc *sc = (void *)self;
1445 0 : bus_space_tag_t t = sc->sc_bustag;
1446 0 : bus_space_handle_t pcs = sc->sc_h1;
1447 : int reset = 0;
1448 :
1449 : #ifdef GEM_DEBUG
1450 : if (sc->sc_debug)
1451 : printf("gem_pcs_writereg: phy %d reg %d val %x\n",
1452 : phy, reg, val);
1453 : #endif
1454 :
1455 0 : if (phy != GEM_PHYAD_EXTERNAL)
1456 0 : return;
1457 :
1458 0 : if (reg == MII_ANAR)
1459 0 : bus_space_write_4(t, pcs, GEM_MII_CONFIG, 0);
1460 :
1461 0 : switch (reg) {
1462 : case MII_BMCR:
1463 0 : reset = (val & GEM_MII_CONTROL_RESET);
1464 : reg = GEM_MII_CONTROL;
1465 0 : break;
1466 : case MII_BMSR:
1467 : reg = GEM_MII_STATUS;
1468 0 : break;
1469 : case MII_ANAR:
1470 : reg = GEM_MII_ANAR;
1471 0 : break;
1472 : case MII_ANLPAR:
1473 : reg = GEM_MII_ANLPAR;
1474 0 : break;
1475 : default:
1476 0 : return;
1477 : }
1478 :
1479 0 : bus_space_write_4(t, pcs, reg, val);
1480 :
1481 0 : if (reset)
1482 0 : gem_bitwait(sc, pcs, GEM_MII_CONTROL, GEM_MII_CONTROL_RESET, 0);
1483 :
1484 0 : if (reg == GEM_MII_ANAR || reset) {
1485 0 : bus_space_write_4(t, pcs, GEM_MII_SLINK_CONTROL,
1486 : GEM_MII_SLINK_LOOPBACK|GEM_MII_SLINK_EN_SYNC_D);
1487 0 : bus_space_write_4(t, pcs, GEM_MII_CONFIG,
1488 : GEM_MII_CONFIG_ENABLE);
1489 0 : }
1490 0 : }
1491 :
1492 : int
1493 0 : gem_mediachange(struct ifnet *ifp)
1494 : {
1495 0 : struct gem_softc *sc = ifp->if_softc;
1496 0 : struct mii_data *mii = &sc->sc_mii;
1497 :
1498 0 : if (mii->mii_instance) {
1499 : struct mii_softc *miisc;
1500 0 : LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
1501 0 : mii_phy_reset(miisc);
1502 0 : }
1503 :
1504 0 : return (mii_mediachg(&sc->sc_mii));
1505 : }
1506 :
1507 : void
1508 0 : gem_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
1509 : {
1510 0 : struct gem_softc *sc = ifp->if_softc;
1511 :
1512 0 : mii_pollstat(&sc->sc_mii);
1513 0 : ifmr->ifm_active = sc->sc_mii.mii_media_active;
1514 0 : ifmr->ifm_status = sc->sc_mii.mii_media_status;
1515 0 : }
1516 :
1517 : /*
1518 : * Process an ioctl request.
1519 : */
1520 : int
1521 0 : gem_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1522 : {
1523 0 : struct gem_softc *sc = ifp->if_softc;
1524 0 : struct ifreq *ifr = (struct ifreq *)data;
1525 : int s, error = 0;
1526 :
1527 0 : s = splnet();
1528 :
1529 0 : switch (cmd) {
1530 : case SIOCSIFADDR:
1531 0 : ifp->if_flags |= IFF_UP;
1532 0 : if ((ifp->if_flags & IFF_RUNNING) == 0)
1533 0 : gem_init(ifp);
1534 : break;
1535 :
1536 : case SIOCSIFFLAGS:
1537 0 : if (ifp->if_flags & IFF_UP) {
1538 0 : if (ifp->if_flags & IFF_RUNNING)
1539 0 : error = ENETRESET;
1540 : else
1541 0 : gem_init(ifp);
1542 : } else {
1543 0 : if (ifp->if_flags & IFF_RUNNING)
1544 0 : gem_stop(ifp, 0);
1545 : }
1546 : #ifdef GEM_DEBUG
1547 : sc->sc_debug = (ifp->if_flags & IFF_DEBUG) != 0 ? 1 : 0;
1548 : #endif
1549 : break;
1550 :
1551 : case SIOCGIFMEDIA:
1552 : case SIOCSIFMEDIA:
1553 0 : error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd);
1554 0 : break;
1555 :
1556 : case SIOCGIFRXR:
1557 0 : error = if_rxr_ioctl((struct if_rxrinfo *)ifr->ifr_data,
1558 0 : NULL, MCLBYTES, &sc->sc_rx_ring);
1559 0 : break;
1560 :
1561 : default:
1562 0 : error = ether_ioctl(ifp, &sc->sc_arpcom, cmd, data);
1563 0 : }
1564 :
1565 0 : if (error == ENETRESET) {
1566 0 : if (ifp->if_flags & IFF_RUNNING)
1567 0 : gem_iff(sc);
1568 : error = 0;
1569 0 : }
1570 :
1571 0 : splx(s);
1572 0 : return (error);
1573 : }
1574 :
1575 : void
1576 0 : gem_iff(struct gem_softc *sc)
1577 : {
1578 0 : struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1579 : struct arpcom *ac = &sc->sc_arpcom;
1580 : struct ether_multi *enm;
1581 : struct ether_multistep step;
1582 0 : bus_space_tag_t t = sc->sc_bustag;
1583 0 : bus_space_handle_t h = sc->sc_h1;
1584 0 : u_int32_t crc, hash[16], rxcfg;
1585 : int i;
1586 :
1587 0 : rxcfg = bus_space_read_4(t, h, GEM_MAC_RX_CONFIG);
1588 0 : rxcfg &= ~(GEM_MAC_RX_HASH_FILTER | GEM_MAC_RX_PROMISCUOUS |
1589 : GEM_MAC_RX_PROMISC_GRP);
1590 0 : ifp->if_flags &= ~IFF_ALLMULTI;
1591 :
1592 0 : if (ifp->if_flags & IFF_PROMISC || ac->ac_multirangecnt > 0) {
1593 0 : ifp->if_flags |= IFF_ALLMULTI;
1594 0 : if (ifp->if_flags & IFF_PROMISC)
1595 0 : rxcfg |= GEM_MAC_RX_PROMISCUOUS;
1596 : else
1597 0 : rxcfg |= GEM_MAC_RX_PROMISC_GRP;
1598 : } else {
1599 : /*
1600 : * Set up multicast address filter by passing all multicast
1601 : * addresses through a crc generator, and then using the
1602 : * high order 8 bits as an index into the 256 bit logical
1603 : * address filter. The high order 4 bits selects the word,
1604 : * while the other 4 bits select the bit within the word
1605 : * (where bit 0 is the MSB).
1606 : */
1607 :
1608 0 : rxcfg |= GEM_MAC_RX_HASH_FILTER;
1609 :
1610 : /* Clear hash table */
1611 0 : for (i = 0; i < 16; i++)
1612 0 : hash[i] = 0;
1613 :
1614 0 : ETHER_FIRST_MULTI(step, ac, enm);
1615 0 : while (enm != NULL) {
1616 0 : crc = ether_crc32_le(enm->enm_addrlo,
1617 : ETHER_ADDR_LEN);
1618 :
1619 : /* Just want the 8 most significant bits. */
1620 0 : crc >>= 24;
1621 :
1622 : /* Set the corresponding bit in the filter. */
1623 0 : hash[crc >> 4] |= 1 << (15 - (crc & 15));
1624 :
1625 0 : ETHER_NEXT_MULTI(step, enm);
1626 : }
1627 :
1628 : /* Now load the hash table into the chip (if we are using it) */
1629 0 : for (i = 0; i < 16; i++) {
1630 0 : bus_space_write_4(t, h,
1631 : GEM_MAC_HASH0 + i * (GEM_MAC_HASH1 - GEM_MAC_HASH0),
1632 : hash[i]);
1633 : }
1634 : }
1635 :
1636 0 : bus_space_write_4(t, h, GEM_MAC_RX_CONFIG, rxcfg);
1637 0 : }
1638 :
1639 : /*
1640 : * Transmit interrupt.
1641 : */
1642 : int
1643 0 : gem_tint(struct gem_softc *sc, u_int32_t status)
1644 : {
1645 0 : struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1646 : struct gem_sxd *sd;
1647 : u_int32_t cons, prod;
1648 : int free = 0;
1649 :
1650 0 : prod = status >> 19;
1651 0 : cons = sc->sc_tx_cons;
1652 0 : while (cons != prod) {
1653 0 : sd = &sc->sc_txd[cons];
1654 0 : if (sd->sd_mbuf != NULL) {
1655 0 : bus_dmamap_sync(sc->sc_dmatag, sd->sd_map, 0,
1656 : sd->sd_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1657 0 : bus_dmamap_unload(sc->sc_dmatag, sd->sd_map);
1658 0 : m_freem(sd->sd_mbuf);
1659 0 : sd->sd_mbuf = NULL;
1660 0 : }
1661 :
1662 : free = 1;
1663 :
1664 0 : cons++;
1665 0 : cons &= GEM_NTXDESC - 1;
1666 : }
1667 :
1668 0 : if (free == 0)
1669 0 : return (0);
1670 :
1671 0 : sc->sc_tx_cons = cons;
1672 :
1673 0 : if (sc->sc_tx_prod == cons)
1674 0 : ifp->if_timer = 0;
1675 :
1676 0 : if (ifq_is_oactive(&ifp->if_snd))
1677 0 : ifq_restart(&ifp->if_snd);
1678 :
1679 0 : return (1);
1680 0 : }
1681 :
1682 : int
1683 0 : gem_load_mbuf(struct gem_softc *sc, struct gem_sxd *sd, struct mbuf *m)
1684 : {
1685 : int error;
1686 :
1687 0 : error = bus_dmamap_load_mbuf(sc->sc_dmatag, sd->sd_map, m,
1688 : BUS_DMA_NOWAIT);
1689 0 : switch (error) {
1690 : case 0:
1691 : break;
1692 :
1693 : case EFBIG: /* mbuf chain is too fragmented */
1694 0 : if (m_defrag(m, M_DONTWAIT) == 0 &&
1695 0 : bus_dmamap_load_mbuf(sc->sc_dmatag, sd->sd_map, m,
1696 0 : BUS_DMA_NOWAIT) == 0)
1697 : break;
1698 : /* FALLTHROUGH */
1699 : default:
1700 0 : return (1);
1701 : }
1702 :
1703 0 : return (0);
1704 0 : }
1705 :
1706 : void
1707 0 : gem_start(struct ifqueue *ifq)
1708 : {
1709 0 : struct ifnet *ifp = ifq->ifq_if;
1710 0 : struct gem_softc *sc = ifp->if_softc;
1711 : struct gem_sxd *sd;
1712 : struct mbuf *m;
1713 : uint64_t flags, nflags;
1714 : bus_dmamap_t map;
1715 : uint32_t prod;
1716 : uint32_t free, used = 0;
1717 : uint32_t first, last;
1718 : int i;
1719 :
1720 0 : prod = sc->sc_tx_prod;
1721 :
1722 : /* figure out space */
1723 0 : free = sc->sc_tx_cons;
1724 0 : if (free <= prod)
1725 0 : free += GEM_NTXDESC;
1726 0 : free -= prod;
1727 :
1728 0 : bus_dmamap_sync(sc->sc_dmatag, sc->sc_cddmamap,
1729 : 0, sizeof(struct gem_desc) * GEM_NTXDESC,
1730 : BUS_DMASYNC_PREWRITE);
1731 :
1732 0 : for (;;) {
1733 0 : if (used + GEM_NTXSEGS + 1 > free) {
1734 0 : ifq_set_oactive(&ifp->if_snd);
1735 0 : break;
1736 : }
1737 :
1738 0 : m = ifq_dequeue(ifq);
1739 0 : if (m == NULL)
1740 : break;
1741 :
1742 : first = prod;
1743 0 : sd = &sc->sc_txd[first];
1744 0 : map = sd->sd_map;
1745 :
1746 0 : if (gem_load_mbuf(sc, sd, m)) {
1747 0 : m_freem(m);
1748 0 : ifp->if_oerrors++;
1749 0 : continue;
1750 : }
1751 :
1752 : #if NBPFILTER > 0
1753 0 : if (ifp->if_bpf)
1754 0 : bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_OUT);
1755 : #endif
1756 :
1757 0 : bus_dmamap_sync(sc->sc_dmatag, map, 0, map->dm_mapsize,
1758 : BUS_DMASYNC_PREWRITE);
1759 :
1760 : nflags = GEM_TD_START_OF_PACKET;
1761 0 : for (i = 0; i < map->dm_nsegs; i++) {
1762 0 : flags = nflags |
1763 0 : (map->dm_segs[i].ds_len & GEM_TD_BUFSIZE);
1764 :
1765 0 : GEM_DMA_WRITE(sc, &sc->sc_txdescs[prod].gd_addr,
1766 : map->dm_segs[i].ds_addr);
1767 0 : GEM_DMA_WRITE(sc, &sc->sc_txdescs[prod].gd_flags,
1768 : flags);
1769 :
1770 : last = prod;
1771 0 : prod++;
1772 0 : prod &= GEM_NTXDESC - 1;
1773 :
1774 : nflags = 0;
1775 : }
1776 0 : GEM_DMA_WRITE(sc, &sc->sc_txdescs[last].gd_flags,
1777 : GEM_TD_END_OF_PACKET | flags);
1778 :
1779 0 : used += map->dm_nsegs;
1780 0 : sc->sc_txd[last].sd_mbuf = m;
1781 0 : sc->sc_txd[first].sd_map = sc->sc_txd[last].sd_map;
1782 0 : sc->sc_txd[last].sd_map = map;
1783 : }
1784 :
1785 0 : bus_dmamap_sync(sc->sc_dmatag, sc->sc_cddmamap,
1786 : 0, sizeof(struct gem_desc) * GEM_NTXDESC,
1787 : BUS_DMASYNC_POSTWRITE);
1788 :
1789 0 : if (used == 0)
1790 0 : return;
1791 :
1792 : /* Commit. */
1793 0 : sc->sc_tx_prod = prod;
1794 :
1795 : /* Transmit. */
1796 0 : bus_space_write_4(sc->sc_bustag, sc->sc_h1, GEM_TX_KICK, prod);
1797 :
1798 : /* Set timeout in case hardware has problems transmitting. */
1799 0 : ifp->if_timer = 5;
1800 0 : }
|