Line data Source code
1 : /* $OpenBSD: if_nfe.c,v 1.120 2017/09/08 05:36:52 deraadt Exp $ */
2 :
3 : /*-
4 : * Copyright (c) 2006, 2007 Damien Bergamini <damien.bergamini@free.fr>
5 : * Copyright (c) 2005, 2006 Jonathan Gray <jsg@openbsd.org>
6 : *
7 : * Permission to use, copy, modify, and distribute this software for any
8 : * purpose with or without fee is hereby granted, provided that the above
9 : * copyright notice and this permission notice appear in all copies.
10 : *
11 : * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12 : * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13 : * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14 : * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15 : * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 : * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 : * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18 : */
19 :
20 : /* Driver for NVIDIA nForce MCP Fast Ethernet and Gigabit Ethernet */
21 :
22 : #include "bpfilter.h"
23 : #include "vlan.h"
24 :
25 : #include <sys/param.h>
26 : #include <sys/endian.h>
27 : #include <sys/systm.h>
28 : #include <sys/sockio.h>
29 : #include <sys/mbuf.h>
30 : #include <sys/queue.h>
31 : #include <sys/kernel.h>
32 : #include <sys/device.h>
33 : #include <sys/timeout.h>
34 : #include <sys/socket.h>
35 :
36 : #include <machine/bus.h>
37 :
38 : #include <net/if.h>
39 : #include <net/if_media.h>
40 :
41 : #include <netinet/in.h>
42 : #include <netinet/if_ether.h>
43 :
44 : #if NBPFILTER > 0
45 : #include <net/bpf.h>
46 : #endif
47 :
48 : #include <dev/mii/miivar.h>
49 :
50 : #include <dev/pci/pcireg.h>
51 : #include <dev/pci/pcivar.h>
52 : #include <dev/pci/pcidevs.h>
53 :
54 : #include <dev/pci/if_nfereg.h>
55 : #include <dev/pci/if_nfevar.h>
56 :
57 : int nfe_match(struct device *, void *, void *);
58 : void nfe_attach(struct device *, struct device *, void *);
59 : int nfe_activate(struct device *, int);
60 : void nfe_miibus_statchg(struct device *);
61 : int nfe_miibus_readreg(struct device *, int, int);
62 : void nfe_miibus_writereg(struct device *, int, int, int);
63 : int nfe_intr(void *);
64 : int nfe_ioctl(struct ifnet *, u_long, caddr_t);
65 : void nfe_txdesc32_sync(struct nfe_softc *, struct nfe_desc32 *, int);
66 : void nfe_txdesc64_sync(struct nfe_softc *, struct nfe_desc64 *, int);
67 : void nfe_txdesc32_rsync(struct nfe_softc *, int, int, int);
68 : void nfe_txdesc64_rsync(struct nfe_softc *, int, int, int);
69 : void nfe_rxdesc32_sync(struct nfe_softc *, struct nfe_desc32 *, int);
70 : void nfe_rxdesc64_sync(struct nfe_softc *, struct nfe_desc64 *, int);
71 : void nfe_rxeof(struct nfe_softc *);
72 : void nfe_txeof(struct nfe_softc *);
73 : int nfe_encap(struct nfe_softc *, struct mbuf *);
74 : void nfe_start(struct ifnet *);
75 : void nfe_watchdog(struct ifnet *);
76 : int nfe_init(struct ifnet *);
77 : void nfe_stop(struct ifnet *, int);
78 : int nfe_alloc_rx_ring(struct nfe_softc *, struct nfe_rx_ring *);
79 : void nfe_reset_rx_ring(struct nfe_softc *, struct nfe_rx_ring *);
80 : void nfe_free_rx_ring(struct nfe_softc *, struct nfe_rx_ring *);
81 : int nfe_alloc_tx_ring(struct nfe_softc *, struct nfe_tx_ring *);
82 : void nfe_reset_tx_ring(struct nfe_softc *, struct nfe_tx_ring *);
83 : void nfe_free_tx_ring(struct nfe_softc *, struct nfe_tx_ring *);
84 : int nfe_ifmedia_upd(struct ifnet *);
85 : void nfe_ifmedia_sts(struct ifnet *, struct ifmediareq *);
86 : void nfe_iff(struct nfe_softc *);
87 : void nfe_get_macaddr(struct nfe_softc *, uint8_t *);
88 : void nfe_set_macaddr(struct nfe_softc *, const uint8_t *);
89 : void nfe_tick(void *);
90 : #ifndef SMALL_KERNEL
91 : int nfe_wol(struct ifnet*, int);
92 : #endif
93 :
94 : struct cfattach nfe_ca = {
95 : sizeof (struct nfe_softc), nfe_match, nfe_attach, NULL,
96 : nfe_activate
97 : };
98 :
99 : struct cfdriver nfe_cd = {
100 : NULL, "nfe", DV_IFNET
101 : };
102 :
103 : #ifdef NFE_DEBUG
104 : int nfedebug = 0;
105 : #define DPRINTF(x) do { if (nfedebug) printf x; } while (0)
106 : #define DPRINTFN(n,x) do { if (nfedebug >= (n)) printf x; } while (0)
107 : #else
108 : #define DPRINTF(x)
109 : #define DPRINTFN(n,x)
110 : #endif
111 :
112 : const struct pci_matchid nfe_devices[] = {
113 : { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE_LAN },
114 : { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE2_LAN },
115 : { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN1 },
116 : { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN2 },
117 : { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN3 },
118 : { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN4 },
119 : { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN5 },
120 : { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_CK804_LAN1 },
121 : { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_CK804_LAN2 },
122 : { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN1 },
123 : { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN2 },
124 : { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP51_LAN1 },
125 : { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP51_LAN2 },
126 : { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN1 },
127 : { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN2 },
128 : { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN1 },
129 : { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN2 },
130 : { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN3 },
131 : { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN4 },
132 : { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN1 },
133 : { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN2 },
134 : { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN3 },
135 : { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN4 },
136 : { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN1 },
137 : { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN2 },
138 : { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN3 },
139 : { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN4 },
140 : { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN1 },
141 : { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN2 },
142 : { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN3 },
143 : { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN4 },
144 : { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN1 },
145 : { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN2 },
146 : { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN3 },
147 : { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN4 },
148 : { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN1 },
149 : { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN2 },
150 : { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN3 },
151 : { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN4 },
152 : { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP89_LAN }
153 : };
154 :
155 : int
156 0 : nfe_match(struct device *dev, void *match, void *aux)
157 : {
158 0 : return pci_matchbyid((struct pci_attach_args *)aux, nfe_devices,
159 : sizeof (nfe_devices) / sizeof (nfe_devices[0]));
160 : }
161 :
162 : int
163 0 : nfe_activate(struct device *self, int act)
164 : {
165 0 : struct nfe_softc *sc = (struct nfe_softc *)self;
166 0 : struct ifnet *ifp = &sc->sc_arpcom.ac_if;
167 : int rv = 0;
168 :
169 0 : switch (act) {
170 : case DVACT_SUSPEND:
171 0 : if (ifp->if_flags & IFF_RUNNING)
172 0 : nfe_stop(ifp, 0);
173 0 : rv = config_activate_children(self, act);
174 0 : break;
175 : case DVACT_RESUME:
176 0 : if (ifp->if_flags & IFF_UP)
177 0 : nfe_init(ifp);
178 : break;
179 : default:
180 0 : rv = config_activate_children(self, act);
181 0 : break;
182 : }
183 0 : return (rv);
184 : }
185 :
186 :
187 : void
188 0 : nfe_attach(struct device *parent, struct device *self, void *aux)
189 : {
190 0 : struct nfe_softc *sc = (struct nfe_softc *)self;
191 0 : struct pci_attach_args *pa = aux;
192 0 : pci_chipset_tag_t pc = pa->pa_pc;
193 0 : pci_intr_handle_t ih;
194 : const char *intrstr;
195 : struct ifnet *ifp;
196 0 : bus_size_t memsize;
197 : pcireg_t memtype;
198 :
199 0 : memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, NFE_PCI_BA);
200 0 : if (pci_mapreg_map(pa, NFE_PCI_BA, memtype, 0, &sc->sc_memt,
201 0 : &sc->sc_memh, NULL, &memsize, 0)) {
202 0 : printf(": can't map mem space\n");
203 0 : return;
204 : }
205 :
206 0 : if (pci_intr_map(pa, &ih) != 0) {
207 0 : printf(": can't map interrupt\n");
208 0 : return;
209 : }
210 :
211 0 : intrstr = pci_intr_string(pc, ih);
212 0 : sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, nfe_intr, sc,
213 0 : sc->sc_dev.dv_xname);
214 0 : if (sc->sc_ih == NULL) {
215 0 : printf(": could not establish interrupt");
216 0 : if (intrstr != NULL)
217 0 : printf(" at %s", intrstr);
218 0 : printf("\n");
219 0 : return;
220 : }
221 0 : printf(": %s", intrstr);
222 :
223 0 : sc->sc_dmat = pa->pa_dmat;
224 0 : sc->sc_flags = 0;
225 :
226 0 : switch (PCI_PRODUCT(pa->pa_id)) {
227 : case PCI_PRODUCT_NVIDIA_NFORCE3_LAN2:
228 : case PCI_PRODUCT_NVIDIA_NFORCE3_LAN3:
229 : case PCI_PRODUCT_NVIDIA_NFORCE3_LAN4:
230 : case PCI_PRODUCT_NVIDIA_NFORCE3_LAN5:
231 0 : sc->sc_flags |= NFE_JUMBO_SUP | NFE_HW_CSUM;
232 0 : break;
233 : case PCI_PRODUCT_NVIDIA_MCP51_LAN1:
234 : case PCI_PRODUCT_NVIDIA_MCP51_LAN2:
235 0 : sc->sc_flags |= NFE_40BIT_ADDR | NFE_PWR_MGMT;
236 0 : break;
237 : case PCI_PRODUCT_NVIDIA_MCP61_LAN1:
238 : case PCI_PRODUCT_NVIDIA_MCP61_LAN2:
239 : case PCI_PRODUCT_NVIDIA_MCP61_LAN3:
240 : case PCI_PRODUCT_NVIDIA_MCP61_LAN4:
241 : case PCI_PRODUCT_NVIDIA_MCP67_LAN1:
242 : case PCI_PRODUCT_NVIDIA_MCP67_LAN2:
243 : case PCI_PRODUCT_NVIDIA_MCP67_LAN3:
244 : case PCI_PRODUCT_NVIDIA_MCP67_LAN4:
245 : case PCI_PRODUCT_NVIDIA_MCP73_LAN1:
246 : case PCI_PRODUCT_NVIDIA_MCP73_LAN2:
247 : case PCI_PRODUCT_NVIDIA_MCP73_LAN3:
248 : case PCI_PRODUCT_NVIDIA_MCP73_LAN4:
249 0 : sc->sc_flags |= NFE_40BIT_ADDR | NFE_CORRECT_MACADDR |
250 : NFE_PWR_MGMT;
251 0 : break;
252 : case PCI_PRODUCT_NVIDIA_MCP77_LAN1:
253 : case PCI_PRODUCT_NVIDIA_MCP77_LAN2:
254 : case PCI_PRODUCT_NVIDIA_MCP77_LAN3:
255 : case PCI_PRODUCT_NVIDIA_MCP77_LAN4:
256 0 : sc->sc_flags |= NFE_40BIT_ADDR | NFE_HW_CSUM |
257 : NFE_CORRECT_MACADDR | NFE_PWR_MGMT;
258 0 : break;
259 : case PCI_PRODUCT_NVIDIA_MCP79_LAN1:
260 : case PCI_PRODUCT_NVIDIA_MCP79_LAN2:
261 : case PCI_PRODUCT_NVIDIA_MCP79_LAN3:
262 : case PCI_PRODUCT_NVIDIA_MCP79_LAN4:
263 : case PCI_PRODUCT_NVIDIA_MCP89_LAN:
264 0 : sc->sc_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM |
265 : NFE_CORRECT_MACADDR | NFE_PWR_MGMT;
266 0 : break;
267 : case PCI_PRODUCT_NVIDIA_CK804_LAN1:
268 : case PCI_PRODUCT_NVIDIA_CK804_LAN2:
269 : case PCI_PRODUCT_NVIDIA_MCP04_LAN1:
270 : case PCI_PRODUCT_NVIDIA_MCP04_LAN2:
271 0 : sc->sc_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM;
272 0 : break;
273 : case PCI_PRODUCT_NVIDIA_MCP65_LAN1:
274 : case PCI_PRODUCT_NVIDIA_MCP65_LAN2:
275 : case PCI_PRODUCT_NVIDIA_MCP65_LAN3:
276 : case PCI_PRODUCT_NVIDIA_MCP65_LAN4:
277 0 : sc->sc_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR |
278 : NFE_CORRECT_MACADDR | NFE_PWR_MGMT;
279 0 : break;
280 : case PCI_PRODUCT_NVIDIA_MCP55_LAN1:
281 : case PCI_PRODUCT_NVIDIA_MCP55_LAN2:
282 0 : sc->sc_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM |
283 : NFE_HW_VLAN | NFE_PWR_MGMT;
284 0 : break;
285 : }
286 :
287 0 : if (sc->sc_flags & NFE_PWR_MGMT) {
288 0 : NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_RESET | NFE_RXTX_BIT2);
289 0 : NFE_WRITE(sc, NFE_MAC_RESET, NFE_MAC_RESET_MAGIC);
290 0 : DELAY(100);
291 0 : NFE_WRITE(sc, NFE_MAC_RESET, 0);
292 0 : DELAY(100);
293 0 : NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_BIT2);
294 0 : NFE_WRITE(sc, NFE_PWR2_CTL,
295 : NFE_READ(sc, NFE_PWR2_CTL) & ~NFE_PWR2_WAKEUP_MASK);
296 0 : }
297 :
298 0 : nfe_get_macaddr(sc, sc->sc_arpcom.ac_enaddr);
299 0 : printf(", address %s\n", ether_sprintf(sc->sc_arpcom.ac_enaddr));
300 :
301 : /*
302 : * Allocate Tx and Rx rings.
303 : */
304 0 : if (nfe_alloc_tx_ring(sc, &sc->txq) != 0) {
305 0 : printf("%s: could not allocate Tx ring\n",
306 : sc->sc_dev.dv_xname);
307 0 : return;
308 : }
309 :
310 0 : if (nfe_alloc_rx_ring(sc, &sc->rxq) != 0) {
311 0 : printf("%s: could not allocate Rx ring\n",
312 : sc->sc_dev.dv_xname);
313 0 : nfe_free_tx_ring(sc, &sc->txq);
314 0 : return;
315 : }
316 :
317 0 : ifp = &sc->sc_arpcom.ac_if;
318 0 : ifp->if_softc = sc;
319 0 : ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
320 0 : ifp->if_ioctl = nfe_ioctl;
321 0 : ifp->if_start = nfe_start;
322 0 : ifp->if_watchdog = nfe_watchdog;
323 0 : IFQ_SET_MAXLEN(&ifp->if_snd, NFE_IFQ_MAXLEN);
324 0 : strlcpy(ifp->if_xname, sc->sc_dev.dv_xname, IFNAMSIZ);
325 :
326 0 : ifp->if_capabilities = IFCAP_VLAN_MTU;
327 :
328 : #ifndef SMALL_KERNEL
329 0 : ifp->if_capabilities |= IFCAP_WOL;
330 0 : ifp->if_wol = nfe_wol;
331 0 : nfe_wol(ifp, 0);
332 : #endif
333 :
334 : #if NVLAN > 0
335 0 : if (sc->sc_flags & NFE_HW_VLAN)
336 0 : ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING;
337 : #endif
338 :
339 0 : if (sc->sc_flags & NFE_HW_CSUM) {
340 0 : ifp->if_capabilities |= IFCAP_CSUM_IPv4 | IFCAP_CSUM_TCPv4 |
341 : IFCAP_CSUM_UDPv4;
342 0 : }
343 :
344 0 : sc->sc_mii.mii_ifp = ifp;
345 0 : sc->sc_mii.mii_readreg = nfe_miibus_readreg;
346 0 : sc->sc_mii.mii_writereg = nfe_miibus_writereg;
347 0 : sc->sc_mii.mii_statchg = nfe_miibus_statchg;
348 :
349 0 : ifmedia_init(&sc->sc_mii.mii_media, 0, nfe_ifmedia_upd,
350 : nfe_ifmedia_sts);
351 0 : mii_attach(self, &sc->sc_mii, 0xffffffff, MII_PHY_ANY, 0, 0);
352 0 : if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
353 0 : printf("%s: no PHY found!\n", sc->sc_dev.dv_xname);
354 0 : ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | IFM_MANUAL,
355 : 0, NULL);
356 0 : ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_MANUAL);
357 0 : } else
358 0 : ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
359 :
360 0 : if_attach(ifp);
361 0 : ether_ifattach(ifp);
362 :
363 0 : timeout_set(&sc->sc_tick_ch, nfe_tick, sc);
364 0 : }
365 :
366 : void
367 0 : nfe_miibus_statchg(struct device *dev)
368 : {
369 0 : struct nfe_softc *sc = (struct nfe_softc *)dev;
370 0 : struct mii_data *mii = &sc->sc_mii;
371 : uint32_t phy, seed, misc = NFE_MISC1_MAGIC, link = NFE_MEDIA_SET;
372 :
373 0 : phy = NFE_READ(sc, NFE_PHY_IFACE);
374 0 : phy &= ~(NFE_PHY_HDX | NFE_PHY_100TX | NFE_PHY_1000T);
375 :
376 0 : seed = NFE_READ(sc, NFE_RNDSEED);
377 0 : seed &= ~NFE_SEED_MASK;
378 :
379 0 : if ((mii->mii_media_active & IFM_GMASK) == IFM_HDX) {
380 0 : phy |= NFE_PHY_HDX; /* half-duplex */
381 : misc |= NFE_MISC1_HDX;
382 0 : }
383 :
384 0 : switch (IFM_SUBTYPE(mii->mii_media_active)) {
385 : case IFM_1000_T: /* full-duplex only */
386 : link |= NFE_MEDIA_1000T;
387 0 : seed |= NFE_SEED_1000T;
388 0 : phy |= NFE_PHY_1000T;
389 0 : break;
390 : case IFM_100_TX:
391 : link |= NFE_MEDIA_100TX;
392 0 : seed |= NFE_SEED_100TX;
393 0 : phy |= NFE_PHY_100TX;
394 0 : break;
395 : case IFM_10_T:
396 : link |= NFE_MEDIA_10T;
397 0 : seed |= NFE_SEED_10T;
398 0 : break;
399 : }
400 :
401 0 : NFE_WRITE(sc, NFE_RNDSEED, seed); /* XXX: gigabit NICs only? */
402 :
403 0 : NFE_WRITE(sc, NFE_PHY_IFACE, phy);
404 0 : NFE_WRITE(sc, NFE_MISC1, misc);
405 0 : NFE_WRITE(sc, NFE_LINKSPEED, link);
406 0 : }
407 :
408 : int
409 0 : nfe_miibus_readreg(struct device *dev, int phy, int reg)
410 : {
411 0 : struct nfe_softc *sc = (struct nfe_softc *)dev;
412 : uint32_t val;
413 : int ntries;
414 :
415 0 : NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
416 :
417 0 : if (NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY) {
418 0 : NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY);
419 0 : DELAY(100);
420 0 : }
421 :
422 0 : NFE_WRITE(sc, NFE_PHY_CTL, (phy << NFE_PHYADD_SHIFT) | reg);
423 :
424 0 : for (ntries = 0; ntries < 1000; ntries++) {
425 0 : DELAY(100);
426 0 : if (!(NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY))
427 : break;
428 : }
429 0 : if (ntries == 1000) {
430 : DPRINTFN(2, ("%s: timeout waiting for PHY\n",
431 : sc->sc_dev.dv_xname));
432 0 : return 0;
433 : }
434 :
435 0 : if (NFE_READ(sc, NFE_PHY_STATUS) & NFE_PHY_ERROR) {
436 : DPRINTFN(2, ("%s: could not read PHY\n",
437 : sc->sc_dev.dv_xname));
438 0 : return 0;
439 : }
440 :
441 0 : val = NFE_READ(sc, NFE_PHY_DATA);
442 0 : if (val != 0xffffffff && val != 0)
443 0 : sc->mii_phyaddr = phy;
444 :
445 : DPRINTFN(2, ("%s: mii read phy %d reg 0x%x ret 0x%x\n",
446 : sc->sc_dev.dv_xname, phy, reg, val));
447 :
448 0 : return val;
449 0 : }
450 :
451 : void
452 0 : nfe_miibus_writereg(struct device *dev, int phy, int reg, int val)
453 : {
454 0 : struct nfe_softc *sc = (struct nfe_softc *)dev;
455 : uint32_t ctl;
456 : int ntries;
457 :
458 0 : NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
459 :
460 0 : if (NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY) {
461 0 : NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY);
462 0 : DELAY(100);
463 0 : }
464 :
465 0 : NFE_WRITE(sc, NFE_PHY_DATA, val);
466 0 : ctl = NFE_PHY_WRITE | (phy << NFE_PHYADD_SHIFT) | reg;
467 0 : NFE_WRITE(sc, NFE_PHY_CTL, ctl);
468 :
469 0 : for (ntries = 0; ntries < 1000; ntries++) {
470 0 : DELAY(100);
471 0 : if (!(NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY))
472 : break;
473 : }
474 : #ifdef NFE_DEBUG
475 : if (nfedebug >= 2 && ntries == 1000)
476 : printf("could not write to PHY\n");
477 : #endif
478 0 : }
479 :
480 : int
481 0 : nfe_intr(void *arg)
482 : {
483 0 : struct nfe_softc *sc = arg;
484 0 : struct ifnet *ifp = &sc->sc_arpcom.ac_if;
485 : uint32_t r;
486 :
487 0 : if ((r = NFE_READ(sc, NFE_IRQ_STATUS) & NFE_IRQ_WANTED) == 0)
488 0 : return 0; /* not for us */
489 0 : NFE_WRITE(sc, NFE_IRQ_STATUS, r);
490 :
491 : DPRINTFN(5, ("nfe_intr: interrupt register %x\n", r));
492 :
493 0 : if (r & NFE_IRQ_LINK) {
494 0 : NFE_READ(sc, NFE_PHY_STATUS);
495 0 : NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
496 : DPRINTF(("%s: link state changed\n", sc->sc_dev.dv_xname));
497 0 : }
498 :
499 0 : if (ifp->if_flags & IFF_RUNNING) {
500 : /* check Rx ring */
501 0 : nfe_rxeof(sc);
502 :
503 : /* check Tx ring */
504 0 : nfe_txeof(sc);
505 0 : }
506 :
507 0 : return 1;
508 0 : }
509 :
510 : int
511 0 : nfe_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
512 : {
513 0 : struct nfe_softc *sc = ifp->if_softc;
514 0 : struct ifreq *ifr = (struct ifreq *)data;
515 : int s, error = 0;
516 :
517 0 : s = splnet();
518 :
519 0 : switch (cmd) {
520 : case SIOCSIFADDR:
521 0 : ifp->if_flags |= IFF_UP;
522 0 : if (!(ifp->if_flags & IFF_RUNNING))
523 0 : nfe_init(ifp);
524 : break;
525 :
526 : case SIOCSIFFLAGS:
527 0 : if (ifp->if_flags & IFF_UP) {
528 0 : if (ifp->if_flags & IFF_RUNNING)
529 0 : error = ENETRESET;
530 : else
531 0 : nfe_init(ifp);
532 : } else {
533 0 : if (ifp->if_flags & IFF_RUNNING)
534 0 : nfe_stop(ifp, 1);
535 : }
536 : break;
537 :
538 : case SIOCSIFMEDIA:
539 : case SIOCGIFMEDIA:
540 0 : error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
541 0 : break;
542 :
543 : default:
544 0 : error = ether_ioctl(ifp, &sc->sc_arpcom, cmd, data);
545 0 : }
546 :
547 0 : if (error == ENETRESET) {
548 0 : if (ifp->if_flags & IFF_RUNNING)
549 0 : nfe_iff(sc);
550 : error = 0;
551 0 : }
552 :
553 0 : splx(s);
554 0 : return error;
555 : }
556 :
557 : void
558 0 : nfe_txdesc32_sync(struct nfe_softc *sc, struct nfe_desc32 *desc32, int ops)
559 : {
560 0 : bus_dmamap_sync(sc->sc_dmat, sc->txq.map,
561 : (caddr_t)desc32 - (caddr_t)sc->txq.desc32,
562 : sizeof (struct nfe_desc32), ops);
563 0 : }
564 :
565 : void
566 0 : nfe_txdesc64_sync(struct nfe_softc *sc, struct nfe_desc64 *desc64, int ops)
567 : {
568 0 : bus_dmamap_sync(sc->sc_dmat, sc->txq.map,
569 : (caddr_t)desc64 - (caddr_t)sc->txq.desc64,
570 : sizeof (struct nfe_desc64), ops);
571 0 : }
572 :
573 : void
574 0 : nfe_txdesc32_rsync(struct nfe_softc *sc, int start, int end, int ops)
575 : {
576 0 : if (end > start) {
577 0 : bus_dmamap_sync(sc->sc_dmat, sc->txq.map,
578 : (caddr_t)&sc->txq.desc32[start] - (caddr_t)sc->txq.desc32,
579 : (caddr_t)&sc->txq.desc32[end] -
580 : (caddr_t)&sc->txq.desc32[start], ops);
581 0 : return;
582 : }
583 : /* sync from 'start' to end of ring */
584 0 : bus_dmamap_sync(sc->sc_dmat, sc->txq.map,
585 : (caddr_t)&sc->txq.desc32[start] - (caddr_t)sc->txq.desc32,
586 : (caddr_t)&sc->txq.desc32[NFE_TX_RING_COUNT] -
587 : (caddr_t)&sc->txq.desc32[start], ops);
588 :
589 : /* sync from start of ring to 'end' */
590 0 : bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 0,
591 : (caddr_t)&sc->txq.desc32[end] - (caddr_t)sc->txq.desc32, ops);
592 0 : }
593 :
594 : void
595 0 : nfe_txdesc64_rsync(struct nfe_softc *sc, int start, int end, int ops)
596 : {
597 0 : if (end > start) {
598 0 : bus_dmamap_sync(sc->sc_dmat, sc->txq.map,
599 : (caddr_t)&sc->txq.desc64[start] - (caddr_t)sc->txq.desc64,
600 : (caddr_t)&sc->txq.desc64[end] -
601 : (caddr_t)&sc->txq.desc64[start], ops);
602 0 : return;
603 : }
604 : /* sync from 'start' to end of ring */
605 0 : bus_dmamap_sync(sc->sc_dmat, sc->txq.map,
606 : (caddr_t)&sc->txq.desc64[start] - (caddr_t)sc->txq.desc64,
607 : (caddr_t)&sc->txq.desc64[NFE_TX_RING_COUNT] -
608 : (caddr_t)&sc->txq.desc64[start], ops);
609 :
610 : /* sync from start of ring to 'end' */
611 0 : bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 0,
612 : (caddr_t)&sc->txq.desc64[end] - (caddr_t)sc->txq.desc64, ops);
613 0 : }
614 :
615 : void
616 0 : nfe_rxdesc32_sync(struct nfe_softc *sc, struct nfe_desc32 *desc32, int ops)
617 : {
618 0 : bus_dmamap_sync(sc->sc_dmat, sc->rxq.map,
619 : (caddr_t)desc32 - (caddr_t)sc->rxq.desc32,
620 : sizeof (struct nfe_desc32), ops);
621 0 : }
622 :
623 : void
624 0 : nfe_rxdesc64_sync(struct nfe_softc *sc, struct nfe_desc64 *desc64, int ops)
625 : {
626 0 : bus_dmamap_sync(sc->sc_dmat, sc->rxq.map,
627 : (caddr_t)desc64 - (caddr_t)sc->rxq.desc64,
628 : sizeof (struct nfe_desc64), ops);
629 0 : }
630 :
631 : void
632 0 : nfe_rxeof(struct nfe_softc *sc)
633 : {
634 0 : struct ifnet *ifp = &sc->sc_arpcom.ac_if;
635 : struct nfe_desc32 *desc32;
636 : struct nfe_desc64 *desc64;
637 : struct nfe_rx_data *data;
638 0 : struct mbuf_list ml = MBUF_LIST_INITIALIZER();
639 : struct mbuf *m, *mnew;
640 : bus_addr_t physaddr;
641 : #if NVLAN > 0
642 : uint32_t vtag;
643 : #endif
644 : uint16_t flags;
645 : int error, len;
646 :
647 0 : for (;;) {
648 0 : data = &sc->rxq.data[sc->rxq.cur];
649 :
650 0 : if (sc->sc_flags & NFE_40BIT_ADDR) {
651 0 : desc64 = &sc->rxq.desc64[sc->rxq.cur];
652 0 : nfe_rxdesc64_sync(sc, desc64, BUS_DMASYNC_POSTREAD);
653 :
654 0 : flags = letoh16(desc64->flags);
655 0 : len = letoh16(desc64->length) & 0x3fff;
656 : #if NVLAN > 0
657 0 : vtag = letoh32(desc64->physaddr[1]);
658 : #endif
659 0 : } else {
660 0 : desc32 = &sc->rxq.desc32[sc->rxq.cur];
661 0 : nfe_rxdesc32_sync(sc, desc32, BUS_DMASYNC_POSTREAD);
662 :
663 0 : flags = letoh16(desc32->flags);
664 0 : len = letoh16(desc32->length) & 0x3fff;
665 : }
666 :
667 0 : if (flags & NFE_RX_READY)
668 : break;
669 :
670 0 : if ((sc->sc_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) {
671 0 : if (!(flags & NFE_RX_VALID_V1))
672 : goto skip;
673 :
674 0 : if ((flags & NFE_RX_FIXME_V1) == NFE_RX_FIXME_V1) {
675 0 : flags &= ~NFE_RX_ERROR;
676 0 : len--; /* fix buffer length */
677 0 : }
678 : } else {
679 0 : if (!(flags & NFE_RX_VALID_V2))
680 : goto skip;
681 :
682 0 : if ((flags & NFE_RX_FIXME_V2) == NFE_RX_FIXME_V2) {
683 0 : flags &= ~NFE_RX_ERROR;
684 0 : len--; /* fix buffer length */
685 0 : }
686 : }
687 :
688 0 : if (flags & NFE_RX_ERROR) {
689 0 : ifp->if_ierrors++;
690 0 : goto skip;
691 : }
692 :
693 : /*
694 : * Try to allocate a new mbuf for this ring element and load
695 : * it before processing the current mbuf. If the ring element
696 : * cannot be loaded, drop the received packet and reuse the
697 : * old mbuf. In the unlikely case that the old mbuf can't be
698 : * reloaded either, explicitly panic.
699 : */
700 0 : mnew = MCLGETI(NULL, MCLBYTES, NULL, M_DONTWAIT);
701 0 : if (mnew == NULL) {
702 0 : ifp->if_ierrors++;
703 0 : goto skip;
704 : }
705 0 : mnew->m_pkthdr.len = mnew->m_len = MCLBYTES;
706 :
707 0 : bus_dmamap_sync(sc->sc_dmat, data->map, 0,
708 : data->map->dm_mapsize, BUS_DMASYNC_POSTREAD);
709 0 : bus_dmamap_unload(sc->sc_dmat, data->map);
710 :
711 0 : error = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, mnew,
712 : BUS_DMA_READ | BUS_DMA_NOWAIT);
713 0 : if (error != 0) {
714 0 : m_freem(mnew);
715 :
716 : /* try to reload the old mbuf */
717 0 : error = bus_dmamap_load_mbuf(sc->sc_dmat, data->map,
718 : m, BUS_DMA_READ | BUS_DMA_NOWAIT);
719 0 : if (error != 0) {
720 : /* very unlikely that it will fail.. */
721 0 : panic("%s: could not load old rx mbuf",
722 0 : sc->sc_dev.dv_xname);
723 : }
724 0 : ifp->if_ierrors++;
725 0 : goto skip;
726 : }
727 0 : physaddr = data->map->dm_segs[0].ds_addr;
728 :
729 : /*
730 : * New mbuf successfully loaded, update Rx ring and continue
731 : * processing.
732 : */
733 0 : m = data->m;
734 0 : data->m = mnew;
735 :
736 : /* finalize mbuf */
737 0 : m->m_pkthdr.len = m->m_len = len;
738 :
739 0 : if ((sc->sc_flags & NFE_HW_CSUM) &&
740 0 : (flags & NFE_RX_IP_CSUMOK)) {
741 0 : m->m_pkthdr.csum_flags |= M_IPV4_CSUM_IN_OK;
742 0 : if (flags & NFE_RX_UDP_CSUMOK)
743 0 : m->m_pkthdr.csum_flags |= M_UDP_CSUM_IN_OK;
744 0 : if (flags & NFE_RX_TCP_CSUMOK)
745 0 : m->m_pkthdr.csum_flags |= M_TCP_CSUM_IN_OK;
746 : }
747 :
748 : #if NVLAN > 0
749 0 : if ((vtag & NFE_RX_VTAG) && (sc->sc_flags & NFE_HW_VLAN)) {
750 0 : m->m_pkthdr.ether_vtag = vtag & 0xffff;
751 0 : m->m_flags |= M_VLANTAG;
752 0 : }
753 : #endif
754 :
755 0 : ml_enqueue(&ml, m);
756 :
757 : /* update mapping address in h/w descriptor */
758 0 : if (sc->sc_flags & NFE_40BIT_ADDR) {
759 : #if defined(__LP64__)
760 0 : desc64->physaddr[0] = htole32(physaddr >> 32);
761 : #endif
762 0 : desc64->physaddr[1] = htole32(physaddr & 0xffffffff);
763 0 : } else {
764 0 : desc32->physaddr = htole32(physaddr);
765 : }
766 :
767 0 : skip: if (sc->sc_flags & NFE_40BIT_ADDR) {
768 0 : desc64->length = htole16(sc->rxq.bufsz);
769 0 : desc64->flags = htole16(NFE_RX_READY);
770 :
771 0 : nfe_rxdesc64_sync(sc, desc64, BUS_DMASYNC_PREWRITE);
772 0 : } else {
773 0 : desc32->length = htole16(sc->rxq.bufsz);
774 0 : desc32->flags = htole16(NFE_RX_READY);
775 :
776 0 : nfe_rxdesc32_sync(sc, desc32, BUS_DMASYNC_PREWRITE);
777 : }
778 :
779 0 : sc->rxq.cur = (sc->rxq.cur + 1) % NFE_RX_RING_COUNT;
780 : }
781 0 : if_input(ifp, &ml);
782 0 : }
783 :
784 : void
785 0 : nfe_txeof(struct nfe_softc *sc)
786 : {
787 0 : struct ifnet *ifp = &sc->sc_arpcom.ac_if;
788 : struct nfe_desc32 *desc32;
789 : struct nfe_desc64 *desc64;
790 : struct nfe_tx_data *data = NULL;
791 : uint16_t flags;
792 :
793 0 : while (sc->txq.next != sc->txq.cur) {
794 0 : if (sc->sc_flags & NFE_40BIT_ADDR) {
795 0 : desc64 = &sc->txq.desc64[sc->txq.next];
796 0 : nfe_txdesc64_sync(sc, desc64, BUS_DMASYNC_POSTREAD);
797 :
798 0 : flags = letoh16(desc64->flags);
799 0 : } else {
800 0 : desc32 = &sc->txq.desc32[sc->txq.next];
801 0 : nfe_txdesc32_sync(sc, desc32, BUS_DMASYNC_POSTREAD);
802 :
803 0 : flags = letoh16(desc32->flags);
804 : }
805 :
806 0 : if (flags & NFE_TX_VALID)
807 : break;
808 :
809 0 : data = &sc->txq.data[sc->txq.next];
810 :
811 0 : if ((sc->sc_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) {
812 0 : if (!(flags & NFE_TX_LASTFRAG_V1) && data->m == NULL)
813 : goto skip;
814 :
815 0 : if ((flags & NFE_TX_ERROR_V1) != 0) {
816 0 : printf("%s: tx v1 error %b\n",
817 0 : sc->sc_dev.dv_xname, flags, NFE_V1_TXERR);
818 0 : ifp->if_oerrors++;
819 0 : }
820 : } else {
821 0 : if (!(flags & NFE_TX_LASTFRAG_V2) && data->m == NULL)
822 : goto skip;
823 :
824 0 : if ((flags & NFE_TX_ERROR_V2) != 0) {
825 0 : printf("%s: tx v2 error %b\n",
826 0 : sc->sc_dev.dv_xname, flags, NFE_V2_TXERR);
827 0 : ifp->if_oerrors++;
828 0 : }
829 : }
830 :
831 0 : if (data->m == NULL) { /* should not get there */
832 0 : printf("%s: last fragment bit w/o associated mbuf!\n",
833 0 : sc->sc_dev.dv_xname);
834 0 : goto skip;
835 : }
836 :
837 : /* last fragment of the mbuf chain transmitted */
838 0 : bus_dmamap_sync(sc->sc_dmat, data->active, 0,
839 : data->active->dm_mapsize, BUS_DMASYNC_POSTWRITE);
840 0 : bus_dmamap_unload(sc->sc_dmat, data->active);
841 0 : m_freem(data->m);
842 0 : data->m = NULL;
843 :
844 0 : ifp->if_timer = 0;
845 :
846 0 : skip: sc->txq.queued--;
847 0 : sc->txq.next = (sc->txq.next + 1) % NFE_TX_RING_COUNT;
848 : }
849 :
850 0 : if (data != NULL) { /* at least one slot freed */
851 0 : ifq_clr_oactive(&ifp->if_snd);
852 0 : nfe_start(ifp);
853 0 : }
854 0 : }
855 :
856 : int
857 0 : nfe_encap(struct nfe_softc *sc, struct mbuf *m0)
858 : {
859 : struct nfe_desc32 *desc32;
860 : struct nfe_desc64 *desc64;
861 : struct nfe_tx_data *data;
862 : bus_dmamap_t map;
863 : uint16_t flags = 0;
864 : uint32_t vtag = 0;
865 0 : int error, i, first = sc->txq.cur;
866 :
867 0 : map = sc->txq.data[first].map;
868 :
869 0 : error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m0, BUS_DMA_NOWAIT);
870 0 : if (error != 0) {
871 0 : printf("%s: can't map mbuf (error %d)\n",
872 0 : sc->sc_dev.dv_xname, error);
873 0 : return error;
874 : }
875 :
876 0 : if (sc->txq.queued + map->dm_nsegs >= NFE_TX_RING_COUNT - 1) {
877 0 : bus_dmamap_unload(sc->sc_dmat, map);
878 0 : return ENOBUFS;
879 : }
880 :
881 : #if NVLAN > 0
882 : /* setup h/w VLAN tagging */
883 0 : if (m0->m_flags & M_VLANTAG)
884 0 : vtag = NFE_TX_VTAG | m0->m_pkthdr.ether_vtag;
885 : #endif
886 0 : if (m0->m_pkthdr.csum_flags & M_IPV4_CSUM_OUT)
887 0 : flags |= NFE_TX_IP_CSUM;
888 0 : if (m0->m_pkthdr.csum_flags & (M_TCP_CSUM_OUT | M_UDP_CSUM_OUT))
889 0 : flags |= NFE_TX_TCP_UDP_CSUM;
890 :
891 0 : for (i = 0; i < map->dm_nsegs; i++) {
892 0 : data = &sc->txq.data[sc->txq.cur];
893 :
894 0 : if (sc->sc_flags & NFE_40BIT_ADDR) {
895 0 : desc64 = &sc->txq.desc64[sc->txq.cur];
896 : #if defined(__LP64__)
897 0 : desc64->physaddr[0] =
898 0 : htole32(map->dm_segs[i].ds_addr >> 32);
899 : #endif
900 0 : desc64->physaddr[1] =
901 0 : htole32(map->dm_segs[i].ds_addr & 0xffffffff);
902 0 : desc64->length = htole16(map->dm_segs[i].ds_len - 1);
903 0 : desc64->flags = htole16(flags);
904 0 : desc64->vtag = htole32(vtag);
905 0 : } else {
906 0 : desc32 = &sc->txq.desc32[sc->txq.cur];
907 :
908 0 : desc32->physaddr = htole32(map->dm_segs[i].ds_addr);
909 0 : desc32->length = htole16(map->dm_segs[i].ds_len - 1);
910 0 : desc32->flags = htole16(flags);
911 : }
912 :
913 0 : if (map->dm_nsegs > 1) {
914 : /*
915 : * Checksum flags and vtag belong to the first fragment
916 : * only.
917 : */
918 0 : flags &= ~(NFE_TX_IP_CSUM | NFE_TX_TCP_UDP_CSUM);
919 : vtag = 0;
920 :
921 : /*
922 : * Setting of the valid bit in the first descriptor is
923 : * deferred until the whole chain is fully setup.
924 : */
925 0 : flags |= NFE_TX_VALID;
926 0 : }
927 :
928 0 : sc->txq.queued++;
929 0 : sc->txq.cur = (sc->txq.cur + 1) % NFE_TX_RING_COUNT;
930 : }
931 :
932 : /* the whole mbuf chain has been setup */
933 0 : if (sc->sc_flags & NFE_40BIT_ADDR) {
934 : /* fix last descriptor */
935 0 : flags |= NFE_TX_LASTFRAG_V2;
936 0 : desc64->flags = htole16(flags);
937 :
938 : /* finally, set the valid bit in the first descriptor */
939 0 : sc->txq.desc64[first].flags |= htole16(NFE_TX_VALID);
940 0 : } else {
941 : /* fix last descriptor */
942 0 : if (sc->sc_flags & NFE_JUMBO_SUP)
943 0 : flags |= NFE_TX_LASTFRAG_V2;
944 : else
945 0 : flags |= NFE_TX_LASTFRAG_V1;
946 0 : desc32->flags = htole16(flags);
947 :
948 : /* finally, set the valid bit in the first descriptor */
949 0 : sc->txq.desc32[first].flags |= htole16(NFE_TX_VALID);
950 : }
951 :
952 0 : data->m = m0;
953 0 : data->active = map;
954 :
955 0 : bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
956 : BUS_DMASYNC_PREWRITE);
957 :
958 0 : return 0;
959 0 : }
960 :
961 : void
962 0 : nfe_start(struct ifnet *ifp)
963 : {
964 0 : struct nfe_softc *sc = ifp->if_softc;
965 0 : int old = sc->txq.cur;
966 : struct mbuf *m0;
967 :
968 0 : if (!(ifp->if_flags & IFF_RUNNING) || ifq_is_oactive(&ifp->if_snd))
969 0 : return;
970 :
971 0 : for (;;) {
972 0 : m0 = ifq_deq_begin(&ifp->if_snd);
973 0 : if (m0 == NULL)
974 : break;
975 :
976 0 : if (nfe_encap(sc, m0) != 0) {
977 0 : ifq_deq_rollback(&ifp->if_snd, m0);
978 0 : ifq_set_oactive(&ifp->if_snd);
979 0 : break;
980 : }
981 :
982 : /* packet put in h/w queue, remove from s/w queue */
983 0 : ifq_deq_commit(&ifp->if_snd, m0);
984 :
985 : #if NBPFILTER > 0
986 0 : if (ifp->if_bpf != NULL)
987 0 : bpf_mtap_ether(ifp->if_bpf, m0, BPF_DIRECTION_OUT);
988 : #endif
989 : }
990 0 : if (sc->txq.cur == old) /* nothing sent */
991 0 : return;
992 :
993 0 : if (sc->sc_flags & NFE_40BIT_ADDR)
994 0 : nfe_txdesc64_rsync(sc, old, sc->txq.cur, BUS_DMASYNC_PREWRITE);
995 : else
996 0 : nfe_txdesc32_rsync(sc, old, sc->txq.cur, BUS_DMASYNC_PREWRITE);
997 :
998 : /* kick Tx */
999 0 : NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_KICKTX | sc->rxtxctl);
1000 :
1001 : /*
1002 : * Set a timeout in case the chip goes out to lunch.
1003 : */
1004 0 : ifp->if_timer = 5;
1005 0 : }
1006 :
1007 : void
1008 0 : nfe_watchdog(struct ifnet *ifp)
1009 : {
1010 0 : struct nfe_softc *sc = ifp->if_softc;
1011 :
1012 0 : printf("%s: watchdog timeout\n", sc->sc_dev.dv_xname);
1013 :
1014 0 : nfe_init(ifp);
1015 :
1016 0 : ifp->if_oerrors++;
1017 0 : }
1018 :
1019 : int
1020 0 : nfe_init(struct ifnet *ifp)
1021 : {
1022 0 : struct nfe_softc *sc = ifp->if_softc;
1023 : uint32_t tmp;
1024 :
1025 0 : nfe_stop(ifp, 0);
1026 :
1027 0 : NFE_WRITE(sc, NFE_TX_UNK, 0);
1028 0 : NFE_WRITE(sc, NFE_STATUS, 0);
1029 :
1030 0 : sc->rxtxctl = NFE_RXTX_BIT2;
1031 0 : if (sc->sc_flags & NFE_40BIT_ADDR)
1032 0 : sc->rxtxctl |= NFE_RXTX_V3MAGIC;
1033 0 : else if (sc->sc_flags & NFE_JUMBO_SUP)
1034 0 : sc->rxtxctl |= NFE_RXTX_V2MAGIC;
1035 :
1036 0 : if (sc->sc_flags & NFE_HW_CSUM)
1037 0 : sc->rxtxctl |= NFE_RXTX_RXCSUM;
1038 0 : if (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING)
1039 0 : sc->rxtxctl |= NFE_RXTX_VTAG_INSERT | NFE_RXTX_VTAG_STRIP;
1040 :
1041 0 : NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_RESET | sc->rxtxctl);
1042 0 : DELAY(10);
1043 0 : NFE_WRITE(sc, NFE_RXTX_CTL, sc->rxtxctl);
1044 :
1045 0 : if (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING)
1046 0 : NFE_WRITE(sc, NFE_VTAG_CTL, NFE_VTAG_ENABLE);
1047 : else
1048 0 : NFE_WRITE(sc, NFE_VTAG_CTL, 0);
1049 :
1050 0 : NFE_WRITE(sc, NFE_SETUP_R6, 0);
1051 :
1052 : /* set MAC address */
1053 0 : nfe_set_macaddr(sc, sc->sc_arpcom.ac_enaddr);
1054 :
1055 : /* tell MAC where rings are in memory */
1056 : #ifdef __LP64__
1057 0 : NFE_WRITE(sc, NFE_RX_RING_ADDR_HI, sc->rxq.physaddr >> 32);
1058 : #endif
1059 0 : NFE_WRITE(sc, NFE_RX_RING_ADDR_LO, sc->rxq.physaddr & 0xffffffff);
1060 : #ifdef __LP64__
1061 0 : NFE_WRITE(sc, NFE_TX_RING_ADDR_HI, sc->txq.physaddr >> 32);
1062 : #endif
1063 0 : NFE_WRITE(sc, NFE_TX_RING_ADDR_LO, sc->txq.physaddr & 0xffffffff);
1064 :
1065 0 : NFE_WRITE(sc, NFE_RING_SIZE,
1066 : (NFE_RX_RING_COUNT - 1) << 16 |
1067 : (NFE_TX_RING_COUNT - 1));
1068 :
1069 0 : NFE_WRITE(sc, NFE_RXBUFSZ, sc->rxq.bufsz);
1070 :
1071 : /* force MAC to wakeup */
1072 0 : tmp = NFE_READ(sc, NFE_PWR_STATE);
1073 0 : NFE_WRITE(sc, NFE_PWR_STATE, tmp | NFE_PWR_WAKEUP);
1074 0 : DELAY(10);
1075 0 : tmp = NFE_READ(sc, NFE_PWR_STATE);
1076 0 : NFE_WRITE(sc, NFE_PWR_STATE, tmp | NFE_PWR_VALID);
1077 :
1078 : #if 1
1079 : /* configure interrupts coalescing/mitigation */
1080 0 : NFE_WRITE(sc, NFE_IMTIMER, NFE_IM_DEFAULT);
1081 : #else
1082 : /* no interrupt mitigation: one interrupt per packet */
1083 : NFE_WRITE(sc, NFE_IMTIMER, 970);
1084 : #endif
1085 :
1086 0 : NFE_WRITE(sc, NFE_SETUP_R1, NFE_R1_MAGIC);
1087 0 : NFE_WRITE(sc, NFE_SETUP_R2, NFE_R2_MAGIC);
1088 0 : NFE_WRITE(sc, NFE_SETUP_R6, NFE_R6_MAGIC);
1089 :
1090 : /* update MAC knowledge of PHY; generates a NFE_IRQ_LINK interrupt */
1091 0 : NFE_WRITE(sc, NFE_STATUS, sc->mii_phyaddr << 24 | NFE_STATUS_MAGIC);
1092 :
1093 0 : NFE_WRITE(sc, NFE_SETUP_R4, NFE_R4_MAGIC);
1094 :
1095 0 : sc->rxtxctl &= ~NFE_RXTX_BIT2;
1096 0 : NFE_WRITE(sc, NFE_RXTX_CTL, sc->rxtxctl);
1097 0 : DELAY(10);
1098 0 : NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_BIT1 | sc->rxtxctl);
1099 :
1100 : /* program promiscuous mode and multicast filters */
1101 0 : nfe_iff(sc);
1102 :
1103 0 : nfe_ifmedia_upd(ifp);
1104 :
1105 : /* enable Rx */
1106 0 : NFE_WRITE(sc, NFE_RX_CTL, NFE_RX_START);
1107 :
1108 : /* enable Tx */
1109 0 : NFE_WRITE(sc, NFE_TX_CTL, NFE_TX_START);
1110 :
1111 0 : NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
1112 :
1113 : /* enable interrupts */
1114 0 : NFE_WRITE(sc, NFE_IRQ_MASK, NFE_IRQ_WANTED);
1115 :
1116 0 : timeout_add_sec(&sc->sc_tick_ch, 1);
1117 :
1118 0 : ifp->if_flags |= IFF_RUNNING;
1119 0 : ifq_clr_oactive(&ifp->if_snd);
1120 :
1121 0 : return 0;
1122 : }
1123 :
1124 : void
1125 0 : nfe_stop(struct ifnet *ifp, int disable)
1126 : {
1127 0 : struct nfe_softc *sc = ifp->if_softc;
1128 :
1129 0 : timeout_del(&sc->sc_tick_ch);
1130 :
1131 0 : ifp->if_timer = 0;
1132 0 : ifp->if_flags &= ~IFF_RUNNING;
1133 0 : ifq_clr_oactive(&ifp->if_snd);
1134 :
1135 0 : mii_down(&sc->sc_mii);
1136 :
1137 : /* abort Tx */
1138 0 : NFE_WRITE(sc, NFE_TX_CTL, 0);
1139 :
1140 0 : if ((sc->sc_flags & NFE_WOL) == 0) {
1141 : /* disable Rx */
1142 0 : NFE_WRITE(sc, NFE_RX_CTL, 0);
1143 :
1144 : /* disable interrupts */
1145 0 : NFE_WRITE(sc, NFE_IRQ_MASK, 0);
1146 0 : }
1147 :
1148 : /* reset Tx and Rx rings */
1149 0 : nfe_reset_tx_ring(sc, &sc->txq);
1150 0 : nfe_reset_rx_ring(sc, &sc->rxq);
1151 0 : }
1152 :
1153 : int
1154 0 : nfe_alloc_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring)
1155 : {
1156 : struct nfe_desc32 *desc32;
1157 : struct nfe_desc64 *desc64;
1158 : struct nfe_rx_data *data;
1159 : void **desc;
1160 : bus_addr_t physaddr;
1161 0 : int i, nsegs, error, descsize;
1162 :
1163 0 : if (sc->sc_flags & NFE_40BIT_ADDR) {
1164 0 : desc = (void **)&ring->desc64;
1165 : descsize = sizeof (struct nfe_desc64);
1166 0 : } else {
1167 0 : desc = (void **)&ring->desc32;
1168 : descsize = sizeof (struct nfe_desc32);
1169 : }
1170 :
1171 0 : ring->cur = ring->next = 0;
1172 0 : ring->bufsz = MCLBYTES;
1173 :
1174 0 : error = bus_dmamap_create(sc->sc_dmat, NFE_RX_RING_COUNT * descsize, 1,
1175 : NFE_RX_RING_COUNT * descsize, 0, BUS_DMA_NOWAIT, &ring->map);
1176 0 : if (error != 0) {
1177 0 : printf("%s: could not create desc DMA map\n",
1178 0 : sc->sc_dev.dv_xname);
1179 0 : goto fail;
1180 : }
1181 :
1182 0 : error = bus_dmamem_alloc(sc->sc_dmat, NFE_RX_RING_COUNT * descsize,
1183 : PAGE_SIZE, 0, &ring->seg, 1, &nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO);
1184 0 : if (error != 0) {
1185 0 : printf("%s: could not allocate DMA memory\n",
1186 0 : sc->sc_dev.dv_xname);
1187 0 : goto fail;
1188 : }
1189 :
1190 0 : error = bus_dmamem_map(sc->sc_dmat, &ring->seg, nsegs,
1191 : NFE_RX_RING_COUNT * descsize, (caddr_t *)desc, BUS_DMA_NOWAIT);
1192 0 : if (error != 0) {
1193 0 : printf("%s: can't map desc DMA memory\n",
1194 0 : sc->sc_dev.dv_xname);
1195 0 : goto fail;
1196 : }
1197 :
1198 0 : error = bus_dmamap_load(sc->sc_dmat, ring->map, *desc,
1199 : NFE_RX_RING_COUNT * descsize, NULL, BUS_DMA_NOWAIT);
1200 0 : if (error != 0) {
1201 0 : printf("%s: could not load desc DMA map\n",
1202 0 : sc->sc_dev.dv_xname);
1203 0 : goto fail;
1204 : }
1205 0 : ring->physaddr = ring->map->dm_segs[0].ds_addr;
1206 :
1207 : /*
1208 : * Pre-allocate Rx buffers and populate Rx ring.
1209 : */
1210 0 : for (i = 0; i < NFE_RX_RING_COUNT; i++) {
1211 0 : data = &sc->rxq.data[i];
1212 :
1213 0 : data->m = MCLGETI(NULL, MCLBYTES, NULL, M_DONTWAIT);
1214 0 : if (data->m == NULL) {
1215 0 : printf("%s: could not allocate rx mbuf\n",
1216 0 : sc->sc_dev.dv_xname);
1217 : error = ENOMEM;
1218 0 : goto fail;
1219 : }
1220 0 : data->m->m_pkthdr.len = data->m->m_len = MCLBYTES;
1221 :
1222 0 : error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
1223 : MCLBYTES, 0, BUS_DMA_NOWAIT, &data->map);
1224 0 : if (error != 0) {
1225 0 : printf("%s: could not create DMA map\n",
1226 0 : sc->sc_dev.dv_xname);
1227 0 : goto fail;
1228 : }
1229 :
1230 0 : error = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, data->m,
1231 : BUS_DMA_READ | BUS_DMA_NOWAIT);
1232 0 : if (error != 0) {
1233 0 : printf("%s: could not load rx buf DMA map",
1234 0 : sc->sc_dev.dv_xname);
1235 0 : goto fail;
1236 : }
1237 0 : physaddr = data->map->dm_segs[0].ds_addr;
1238 :
1239 0 : if (sc->sc_flags & NFE_40BIT_ADDR) {
1240 0 : desc64 = &sc->rxq.desc64[i];
1241 : #if defined(__LP64__)
1242 0 : desc64->physaddr[0] = htole32(physaddr >> 32);
1243 : #endif
1244 0 : desc64->physaddr[1] = htole32(physaddr & 0xffffffff);
1245 0 : desc64->length = htole16(sc->rxq.bufsz);
1246 0 : desc64->flags = htole16(NFE_RX_READY);
1247 0 : } else {
1248 0 : desc32 = &sc->rxq.desc32[i];
1249 0 : desc32->physaddr = htole32(physaddr);
1250 0 : desc32->length = htole16(sc->rxq.bufsz);
1251 0 : desc32->flags = htole16(NFE_RX_READY);
1252 : }
1253 : }
1254 :
1255 0 : bus_dmamap_sync(sc->sc_dmat, ring->map, 0, ring->map->dm_mapsize,
1256 : BUS_DMASYNC_PREWRITE);
1257 :
1258 0 : return 0;
1259 :
1260 0 : fail: nfe_free_rx_ring(sc, ring);
1261 0 : return error;
1262 0 : }
1263 :
1264 : void
1265 0 : nfe_reset_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring)
1266 : {
1267 : int i;
1268 :
1269 0 : for (i = 0; i < NFE_RX_RING_COUNT; i++) {
1270 0 : if (sc->sc_flags & NFE_40BIT_ADDR) {
1271 0 : ring->desc64[i].length = htole16(ring->bufsz);
1272 0 : ring->desc64[i].flags = htole16(NFE_RX_READY);
1273 0 : } else {
1274 0 : ring->desc32[i].length = htole16(ring->bufsz);
1275 0 : ring->desc32[i].flags = htole16(NFE_RX_READY);
1276 : }
1277 : }
1278 :
1279 0 : bus_dmamap_sync(sc->sc_dmat, ring->map, 0, ring->map->dm_mapsize,
1280 : BUS_DMASYNC_PREWRITE);
1281 :
1282 0 : ring->cur = ring->next = 0;
1283 0 : }
1284 :
1285 : void
1286 0 : nfe_free_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring)
1287 : {
1288 : struct nfe_rx_data *data;
1289 : void *desc;
1290 : int i, descsize;
1291 :
1292 0 : if (sc->sc_flags & NFE_40BIT_ADDR) {
1293 0 : desc = ring->desc64;
1294 : descsize = sizeof (struct nfe_desc64);
1295 0 : } else {
1296 0 : desc = ring->desc32;
1297 : descsize = sizeof (struct nfe_desc32);
1298 : }
1299 :
1300 0 : if (desc != NULL) {
1301 0 : bus_dmamap_sync(sc->sc_dmat, ring->map, 0,
1302 : ring->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1303 0 : bus_dmamap_unload(sc->sc_dmat, ring->map);
1304 0 : bus_dmamem_unmap(sc->sc_dmat, (caddr_t)desc,
1305 : NFE_RX_RING_COUNT * descsize);
1306 0 : bus_dmamem_free(sc->sc_dmat, &ring->seg, 1);
1307 0 : }
1308 :
1309 0 : for (i = 0; i < NFE_RX_RING_COUNT; i++) {
1310 0 : data = &ring->data[i];
1311 :
1312 0 : if (data->map != NULL) {
1313 0 : bus_dmamap_sync(sc->sc_dmat, data->map, 0,
1314 : data->map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1315 0 : bus_dmamap_unload(sc->sc_dmat, data->map);
1316 0 : bus_dmamap_destroy(sc->sc_dmat, data->map);
1317 0 : }
1318 0 : m_freem(data->m);
1319 : }
1320 0 : }
1321 :
1322 : int
1323 0 : nfe_alloc_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring)
1324 : {
1325 0 : int i, nsegs, error;
1326 : void **desc;
1327 : int descsize;
1328 :
1329 0 : if (sc->sc_flags & NFE_40BIT_ADDR) {
1330 0 : desc = (void **)&ring->desc64;
1331 : descsize = sizeof (struct nfe_desc64);
1332 0 : } else {
1333 0 : desc = (void **)&ring->desc32;
1334 : descsize = sizeof (struct nfe_desc32);
1335 : }
1336 :
1337 0 : ring->queued = 0;
1338 0 : ring->cur = ring->next = 0;
1339 :
1340 0 : error = bus_dmamap_create(sc->sc_dmat, NFE_TX_RING_COUNT * descsize, 1,
1341 : NFE_TX_RING_COUNT * descsize, 0, BUS_DMA_NOWAIT, &ring->map);
1342 :
1343 0 : if (error != 0) {
1344 0 : printf("%s: could not create desc DMA map\n",
1345 0 : sc->sc_dev.dv_xname);
1346 0 : goto fail;
1347 : }
1348 :
1349 0 : error = bus_dmamem_alloc(sc->sc_dmat, NFE_TX_RING_COUNT * descsize,
1350 : PAGE_SIZE, 0, &ring->seg, 1, &nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO);
1351 0 : if (error != 0) {
1352 0 : printf("%s: could not allocate DMA memory\n",
1353 0 : sc->sc_dev.dv_xname);
1354 0 : goto fail;
1355 : }
1356 :
1357 0 : error = bus_dmamem_map(sc->sc_dmat, &ring->seg, nsegs,
1358 : NFE_TX_RING_COUNT * descsize, (caddr_t *)desc, BUS_DMA_NOWAIT);
1359 0 : if (error != 0) {
1360 0 : printf("%s: can't map desc DMA memory\n",
1361 0 : sc->sc_dev.dv_xname);
1362 0 : goto fail;
1363 : }
1364 :
1365 0 : error = bus_dmamap_load(sc->sc_dmat, ring->map, *desc,
1366 : NFE_TX_RING_COUNT * descsize, NULL, BUS_DMA_NOWAIT);
1367 0 : if (error != 0) {
1368 0 : printf("%s: could not load desc DMA map\n",
1369 0 : sc->sc_dev.dv_xname);
1370 0 : goto fail;
1371 : }
1372 0 : ring->physaddr = ring->map->dm_segs[0].ds_addr;
1373 :
1374 0 : for (i = 0; i < NFE_TX_RING_COUNT; i++) {
1375 0 : error = bus_dmamap_create(sc->sc_dmat, NFE_JBYTES,
1376 : NFE_MAX_SCATTER, NFE_JBYTES, 0, BUS_DMA_NOWAIT,
1377 : &ring->data[i].map);
1378 0 : if (error != 0) {
1379 0 : printf("%s: could not create DMA map\n",
1380 0 : sc->sc_dev.dv_xname);
1381 0 : goto fail;
1382 : }
1383 : }
1384 :
1385 0 : return 0;
1386 :
1387 0 : fail: nfe_free_tx_ring(sc, ring);
1388 0 : return error;
1389 0 : }
1390 :
1391 : void
1392 0 : nfe_reset_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring)
1393 : {
1394 : struct nfe_tx_data *data;
1395 : int i;
1396 :
1397 0 : for (i = 0; i < NFE_TX_RING_COUNT; i++) {
1398 0 : if (sc->sc_flags & NFE_40BIT_ADDR)
1399 0 : ring->desc64[i].flags = 0;
1400 : else
1401 0 : ring->desc32[i].flags = 0;
1402 :
1403 0 : data = &ring->data[i];
1404 :
1405 0 : if (data->m != NULL) {
1406 0 : bus_dmamap_sync(sc->sc_dmat, data->active, 0,
1407 : data->active->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1408 0 : bus_dmamap_unload(sc->sc_dmat, data->active);
1409 0 : m_freem(data->m);
1410 0 : data->m = NULL;
1411 0 : }
1412 : }
1413 :
1414 0 : bus_dmamap_sync(sc->sc_dmat, ring->map, 0, ring->map->dm_mapsize,
1415 : BUS_DMASYNC_PREWRITE);
1416 :
1417 0 : ring->queued = 0;
1418 0 : ring->cur = ring->next = 0;
1419 0 : }
1420 :
1421 : void
1422 0 : nfe_free_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring)
1423 : {
1424 : struct nfe_tx_data *data;
1425 : void *desc;
1426 : int i, descsize;
1427 :
1428 0 : if (sc->sc_flags & NFE_40BIT_ADDR) {
1429 0 : desc = ring->desc64;
1430 : descsize = sizeof (struct nfe_desc64);
1431 0 : } else {
1432 0 : desc = ring->desc32;
1433 : descsize = sizeof (struct nfe_desc32);
1434 : }
1435 :
1436 0 : if (desc != NULL) {
1437 0 : bus_dmamap_sync(sc->sc_dmat, ring->map, 0,
1438 : ring->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1439 0 : bus_dmamap_unload(sc->sc_dmat, ring->map);
1440 0 : bus_dmamem_unmap(sc->sc_dmat, (caddr_t)desc,
1441 : NFE_TX_RING_COUNT * descsize);
1442 0 : bus_dmamem_free(sc->sc_dmat, &ring->seg, 1);
1443 0 : }
1444 :
1445 0 : for (i = 0; i < NFE_TX_RING_COUNT; i++) {
1446 0 : data = &ring->data[i];
1447 :
1448 0 : if (data->m != NULL) {
1449 0 : bus_dmamap_sync(sc->sc_dmat, data->active, 0,
1450 : data->active->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1451 0 : bus_dmamap_unload(sc->sc_dmat, data->active);
1452 0 : m_freem(data->m);
1453 0 : }
1454 : }
1455 :
1456 : /* ..and now actually destroy the DMA mappings */
1457 0 : for (i = 0; i < NFE_TX_RING_COUNT; i++) {
1458 0 : data = &ring->data[i];
1459 0 : if (data->map == NULL)
1460 : continue;
1461 0 : bus_dmamap_destroy(sc->sc_dmat, data->map);
1462 0 : }
1463 0 : }
1464 :
1465 : int
1466 0 : nfe_ifmedia_upd(struct ifnet *ifp)
1467 : {
1468 0 : struct nfe_softc *sc = ifp->if_softc;
1469 0 : struct mii_data *mii = &sc->sc_mii;
1470 : struct mii_softc *miisc;
1471 :
1472 0 : if (mii->mii_instance != 0) {
1473 0 : LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
1474 0 : mii_phy_reset(miisc);
1475 : }
1476 0 : return mii_mediachg(mii);
1477 : }
1478 :
1479 : void
1480 0 : nfe_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
1481 : {
1482 0 : struct nfe_softc *sc = ifp->if_softc;
1483 0 : struct mii_data *mii = &sc->sc_mii;
1484 :
1485 0 : mii_pollstat(mii);
1486 0 : ifmr->ifm_status = mii->mii_media_status;
1487 0 : ifmr->ifm_active = mii->mii_media_active;
1488 0 : }
1489 :
1490 : void
1491 0 : nfe_iff(struct nfe_softc *sc)
1492 : {
1493 0 : struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1494 : struct arpcom *ac = &sc->sc_arpcom;
1495 : struct ether_multi *enm;
1496 : struct ether_multistep step;
1497 0 : uint8_t addr[ETHER_ADDR_LEN], mask[ETHER_ADDR_LEN];
1498 : uint32_t filter;
1499 : int i;
1500 :
1501 : filter = NFE_RXFILTER_MAGIC;
1502 0 : ifp->if_flags &= ~IFF_ALLMULTI;
1503 :
1504 0 : if (ifp->if_flags & IFF_PROMISC || ac->ac_multirangecnt > 0) {
1505 0 : ifp->if_flags |= IFF_ALLMULTI;
1506 0 : if (ifp->if_flags & IFF_PROMISC)
1507 0 : filter |= NFE_PROMISC;
1508 : else
1509 : filter |= NFE_U2M;
1510 0 : bzero(addr, ETHER_ADDR_LEN);
1511 0 : bzero(mask, ETHER_ADDR_LEN);
1512 0 : } else {
1513 : filter |= NFE_U2M;
1514 :
1515 0 : bcopy(etherbroadcastaddr, addr, ETHER_ADDR_LEN);
1516 0 : bcopy(etherbroadcastaddr, mask, ETHER_ADDR_LEN);
1517 :
1518 0 : ETHER_FIRST_MULTI(step, ac, enm);
1519 0 : while (enm != NULL) {
1520 0 : for (i = 0; i < ETHER_ADDR_LEN; i++) {
1521 0 : addr[i] &= enm->enm_addrlo[i];
1522 0 : mask[i] &= ~enm->enm_addrlo[i];
1523 : }
1524 :
1525 0 : ETHER_NEXT_MULTI(step, enm);
1526 : }
1527 :
1528 0 : for (i = 0; i < ETHER_ADDR_LEN; i++)
1529 0 : mask[i] |= addr[i];
1530 : }
1531 :
1532 0 : addr[0] |= 0x01; /* make sure multicast bit is set */
1533 :
1534 0 : NFE_WRITE(sc, NFE_MULTIADDR_HI,
1535 : addr[3] << 24 | addr[2] << 16 | addr[1] << 8 | addr[0]);
1536 0 : NFE_WRITE(sc, NFE_MULTIADDR_LO,
1537 : addr[5] << 8 | addr[4]);
1538 0 : NFE_WRITE(sc, NFE_MULTIMASK_HI,
1539 : mask[3] << 24 | mask[2] << 16 | mask[1] << 8 | mask[0]);
1540 0 : NFE_WRITE(sc, NFE_MULTIMASK_LO,
1541 : mask[5] << 8 | mask[4]);
1542 0 : NFE_WRITE(sc, NFE_RXFILTER, filter);
1543 0 : }
1544 :
1545 : void
1546 0 : nfe_get_macaddr(struct nfe_softc *sc, uint8_t *addr)
1547 : {
1548 : uint32_t tmp;
1549 :
1550 0 : if (sc->sc_flags & NFE_CORRECT_MACADDR) {
1551 0 : tmp = NFE_READ(sc, NFE_MACADDR_HI);
1552 0 : addr[0] = (tmp & 0xff);
1553 0 : addr[1] = (tmp >> 8) & 0xff;
1554 0 : addr[2] = (tmp >> 16) & 0xff;
1555 0 : addr[3] = (tmp >> 24) & 0xff;
1556 :
1557 0 : tmp = NFE_READ(sc, NFE_MACADDR_LO);
1558 0 : addr[4] = (tmp & 0xff);
1559 0 : addr[5] = (tmp >> 8) & 0xff;
1560 :
1561 0 : } else {
1562 0 : tmp = NFE_READ(sc, NFE_MACADDR_LO);
1563 0 : addr[0] = (tmp >> 8) & 0xff;
1564 0 : addr[1] = (tmp & 0xff);
1565 :
1566 0 : tmp = NFE_READ(sc, NFE_MACADDR_HI);
1567 0 : addr[2] = (tmp >> 24) & 0xff;
1568 0 : addr[3] = (tmp >> 16) & 0xff;
1569 0 : addr[4] = (tmp >> 8) & 0xff;
1570 0 : addr[5] = (tmp & 0xff);
1571 : }
1572 0 : }
1573 :
1574 : void
1575 0 : nfe_set_macaddr(struct nfe_softc *sc, const uint8_t *addr)
1576 : {
1577 0 : NFE_WRITE(sc, NFE_MACADDR_LO,
1578 : addr[5] << 8 | addr[4]);
1579 0 : NFE_WRITE(sc, NFE_MACADDR_HI,
1580 : addr[3] << 24 | addr[2] << 16 | addr[1] << 8 | addr[0]);
1581 0 : }
1582 :
1583 : void
1584 0 : nfe_tick(void *arg)
1585 : {
1586 0 : struct nfe_softc *sc = arg;
1587 : int s;
1588 :
1589 0 : s = splnet();
1590 0 : mii_tick(&sc->sc_mii);
1591 0 : splx(s);
1592 :
1593 0 : timeout_add_sec(&sc->sc_tick_ch, 1);
1594 0 : }
1595 :
1596 : #ifndef SMALL_KERNEL
1597 : int
1598 0 : nfe_wol(struct ifnet *ifp, int enable)
1599 : {
1600 0 : struct nfe_softc *sc = ifp->if_softc;
1601 :
1602 0 : if (enable) {
1603 0 : sc->sc_flags |= NFE_WOL;
1604 0 : NFE_WRITE(sc, NFE_WOL_CTL, NFE_WOL_ENABLE);
1605 0 : } else {
1606 0 : sc->sc_flags &= ~NFE_WOL;
1607 0 : NFE_WRITE(sc, NFE_WOL_CTL, 0);
1608 : }
1609 :
1610 0 : return 0;
1611 : }
1612 : #endif
|