Line data Source code
1 : /* $OpenBSD: if_vr.c,v 1.153 2017/01/22 10:17:38 dlg Exp $ */
2 :
3 : /*
4 : * Copyright (c) 1997, 1998
5 : * Bill Paul <wpaul@ctr.columbia.edu>. All rights reserved.
6 : *
7 : * Redistribution and use in source and binary forms, with or without
8 : * modification, are permitted provided that the following conditions
9 : * are met:
10 : * 1. Redistributions of source code must retain the above copyright
11 : * notice, this list of conditions and the following disclaimer.
12 : * 2. Redistributions in binary form must reproduce the above copyright
13 : * notice, this list of conditions and the following disclaimer in the
14 : * documentation and/or other materials provided with the distribution.
15 : * 3. All advertising materials mentioning features or use of this software
16 : * must display the following acknowledgement:
17 : * This product includes software developed by Bill Paul.
18 : * 4. Neither the name of the author nor the names of any co-contributors
19 : * may be used to endorse or promote products derived from this software
20 : * without specific prior written permission.
21 : *
22 : * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
23 : * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 : * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 : * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
26 : * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 : * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 : * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 : * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 : * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 : * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
32 : * THE POSSIBILITY OF SUCH DAMAGE.
33 : *
34 : * $FreeBSD: src/sys/pci/if_vr.c,v 1.73 2003/08/22 07:13:22 imp Exp $
35 : */
36 :
37 : /*
38 : * VIA Rhine fast ethernet PCI NIC driver
39 : *
40 : * Supports various network adapters based on the VIA Rhine
41 : * and Rhine II PCI controllers, including the D-Link DFE530TX.
42 : * Datasheets are available at ftp://ftp.vtbridge.org/Docs/LAN/.
43 : *
44 : * Written by Bill Paul <wpaul@ctr.columbia.edu>
45 : * Electrical Engineering Department
46 : * Columbia University, New York City
47 : */
48 :
49 : /*
50 : * The VIA Rhine controllers are similar in some respects to the
51 : * the DEC tulip chips, except less complicated. The controller
52 : * uses an MII bus and an external physical layer interface. The
53 : * receiver has a one entry perfect filter and a 64-bit hash table
54 : * multicast filter. Transmit and receive descriptors are similar
55 : * to the tulip.
56 : *
57 : * Early Rhine has a serious flaw in its transmit DMA mechanism:
58 : * transmit buffers must be longword aligned. Unfortunately,
59 : * OpenBSD doesn't guarantee that mbufs will be filled in starting
60 : * at longword boundaries, so we have to do a buffer copy before
61 : * transmission.
62 : */
63 :
64 : #include "bpfilter.h"
65 : #include "vlan.h"
66 :
67 : #include <sys/param.h>
68 : #include <sys/systm.h>
69 : #include <sys/sockio.h>
70 : #include <sys/mbuf.h>
71 : #include <sys/kernel.h>
72 : #include <sys/timeout.h>
73 : #include <sys/socket.h>
74 :
75 : #include <net/if.h>
76 : #include <sys/device.h>
77 : #include <netinet/in.h>
78 : #include <netinet/if_ether.h>
79 : #include <net/if_media.h>
80 :
81 : #if NBPFILTER > 0
82 : #include <net/bpf.h>
83 : #endif
84 :
85 : #include <machine/bus.h>
86 :
87 : #include <dev/mii/miivar.h>
88 :
89 : #include <dev/pci/pcireg.h>
90 : #include <dev/pci/pcivar.h>
91 : #include <dev/pci/pcidevs.h>
92 :
93 : #define VR_USEIOSPACE
94 :
95 : #include <dev/pci/if_vrreg.h>
96 :
97 : int vr_probe(struct device *, void *, void *);
98 : int vr_quirks(struct pci_attach_args *);
99 : void vr_attach(struct device *, struct device *, void *);
100 : int vr_activate(struct device *, int);
101 :
102 : struct cfattach vr_ca = {
103 : sizeof(struct vr_softc), vr_probe, vr_attach, NULL,
104 : vr_activate
105 : };
106 : struct cfdriver vr_cd = {
107 : NULL, "vr", DV_IFNET
108 : };
109 :
110 : int vr_encap(struct vr_softc *, struct vr_chain **, struct mbuf *);
111 : void vr_rxeof(struct vr_softc *);
112 : void vr_rxeoc(struct vr_softc *);
113 : void vr_txeof(struct vr_softc *);
114 : void vr_tick(void *);
115 : void vr_rxtick(void *);
116 : int vr_intr(void *);
117 : int vr_dmamem_alloc(struct vr_softc *, struct vr_dmamem *,
118 : bus_size_t, u_int);
119 : void vr_dmamem_free(struct vr_softc *, struct vr_dmamem *);
120 : void vr_start(struct ifnet *);
121 : int vr_ioctl(struct ifnet *, u_long, caddr_t);
122 : void vr_chipinit(struct vr_softc *);
123 : void vr_init(void *);
124 : void vr_stop(struct vr_softc *);
125 : void vr_watchdog(struct ifnet *);
126 : int vr_ifmedia_upd(struct ifnet *);
127 : void vr_ifmedia_sts(struct ifnet *, struct ifmediareq *);
128 :
129 : int vr_mii_readreg(struct vr_softc *, struct vr_mii_frame *);
130 : int vr_mii_writereg(struct vr_softc *, struct vr_mii_frame *);
131 : int vr_miibus_readreg(struct device *, int, int);
132 : void vr_miibus_writereg(struct device *, int, int, int);
133 : void vr_miibus_statchg(struct device *);
134 :
135 : void vr_setcfg(struct vr_softc *, uint64_t);
136 : void vr_iff(struct vr_softc *);
137 : void vr_reset(struct vr_softc *);
138 : int vr_list_rx_init(struct vr_softc *);
139 : void vr_fill_rx_ring(struct vr_softc *);
140 : int vr_list_tx_init(struct vr_softc *);
141 : #ifndef SMALL_KERNEL
142 : int vr_wol(struct ifnet *, int);
143 : #endif
144 :
145 : int vr_alloc_mbuf(struct vr_softc *, struct vr_chain_onefrag *);
146 :
147 : /*
148 : * Supported devices & quirks
149 : */
150 : #define VR_Q_NEEDALIGN (1<<0)
151 : #define VR_Q_CSUM (1<<1)
152 : #define VR_Q_CAM (1<<2)
153 : #define VR_Q_HWTAG (1<<3)
154 : #define VR_Q_INTDISABLE (1<<4)
155 : #define VR_Q_BABYJUMBO (1<<5) /* others may work too */
156 :
157 : struct vr_type {
158 : pci_vendor_id_t vr_vid;
159 : pci_product_id_t vr_pid;
160 : int vr_quirks;
161 : } vr_devices[] = {
162 : { PCI_VENDOR_VIATECH, PCI_PRODUCT_VIATECH_RHINE,
163 : VR_Q_NEEDALIGN },
164 : { PCI_VENDOR_VIATECH, PCI_PRODUCT_VIATECH_RHINEII,
165 : VR_Q_NEEDALIGN },
166 : { PCI_VENDOR_VIATECH, PCI_PRODUCT_VIATECH_RHINEII_2,
167 : VR_Q_BABYJUMBO },
168 : { PCI_VENDOR_VIATECH, PCI_PRODUCT_VIATECH_VT6105,
169 : VR_Q_BABYJUMBO },
170 : { PCI_VENDOR_VIATECH, PCI_PRODUCT_VIATECH_VT6105M,
171 : VR_Q_CSUM | VR_Q_CAM | VR_Q_HWTAG | VR_Q_INTDISABLE |
172 : VR_Q_BABYJUMBO },
173 : { PCI_VENDOR_DELTA, PCI_PRODUCT_DELTA_RHINEII,
174 : VR_Q_NEEDALIGN },
175 : { PCI_VENDOR_ADDTRON, PCI_PRODUCT_ADDTRON_RHINEII,
176 : VR_Q_NEEDALIGN }
177 : };
178 :
179 : #define VR_SETBIT(sc, reg, x) \
180 : CSR_WRITE_1(sc, reg, \
181 : CSR_READ_1(sc, reg) | (x))
182 :
183 : #define VR_CLRBIT(sc, reg, x) \
184 : CSR_WRITE_1(sc, reg, \
185 : CSR_READ_1(sc, reg) & ~(x))
186 :
187 : #define VR_SETBIT16(sc, reg, x) \
188 : CSR_WRITE_2(sc, reg, \
189 : CSR_READ_2(sc, reg) | (x))
190 :
191 : #define VR_CLRBIT16(sc, reg, x) \
192 : CSR_WRITE_2(sc, reg, \
193 : CSR_READ_2(sc, reg) & ~(x))
194 :
195 : #define VR_SETBIT32(sc, reg, x) \
196 : CSR_WRITE_4(sc, reg, \
197 : CSR_READ_4(sc, reg) | (x))
198 :
199 : #define VR_CLRBIT32(sc, reg, x) \
200 : CSR_WRITE_4(sc, reg, \
201 : CSR_READ_4(sc, reg) & ~(x))
202 :
203 : #define SIO_SET(x) \
204 : CSR_WRITE_1(sc, VR_MIICMD, \
205 : CSR_READ_1(sc, VR_MIICMD) | (x))
206 :
207 : #define SIO_CLR(x) \
208 : CSR_WRITE_1(sc, VR_MIICMD, \
209 : CSR_READ_1(sc, VR_MIICMD) & ~(x))
210 :
211 : /*
212 : * Read an PHY register through the MII.
213 : */
214 : int
215 0 : vr_mii_readreg(struct vr_softc *sc, struct vr_mii_frame *frame)
216 : {
217 : int s, i;
218 :
219 0 : s = splnet();
220 :
221 : /* Set the PHY-address */
222 0 : CSR_WRITE_1(sc, VR_PHYADDR, (CSR_READ_1(sc, VR_PHYADDR)& 0xe0)|
223 : frame->mii_phyaddr);
224 :
225 : /* Set the register-address */
226 0 : CSR_WRITE_1(sc, VR_MIIADDR, frame->mii_regaddr);
227 0 : VR_SETBIT(sc, VR_MIICMD, VR_MIICMD_READ_ENB);
228 :
229 0 : for (i = 0; i < 10000; i++) {
230 0 : if ((CSR_READ_1(sc, VR_MIICMD) & VR_MIICMD_READ_ENB) == 0)
231 : break;
232 0 : DELAY(1);
233 : }
234 :
235 0 : frame->mii_data = CSR_READ_2(sc, VR_MIIDATA);
236 :
237 0 : splx(s);
238 :
239 0 : return(0);
240 : }
241 :
242 : /*
243 : * Write to a PHY register through the MII.
244 : */
245 : int
246 0 : vr_mii_writereg(struct vr_softc *sc, struct vr_mii_frame *frame)
247 : {
248 : int s, i;
249 :
250 0 : s = splnet();
251 :
252 : /* Set the PHY-address */
253 0 : CSR_WRITE_1(sc, VR_PHYADDR, (CSR_READ_1(sc, VR_PHYADDR)& 0xe0)|
254 : frame->mii_phyaddr);
255 :
256 : /* Set the register-address and data to write */
257 0 : CSR_WRITE_1(sc, VR_MIIADDR, frame->mii_regaddr);
258 0 : CSR_WRITE_2(sc, VR_MIIDATA, frame->mii_data);
259 :
260 0 : VR_SETBIT(sc, VR_MIICMD, VR_MIICMD_WRITE_ENB);
261 :
262 0 : for (i = 0; i < 10000; i++) {
263 0 : if ((CSR_READ_1(sc, VR_MIICMD) & VR_MIICMD_WRITE_ENB) == 0)
264 : break;
265 0 : DELAY(1);
266 : }
267 :
268 0 : splx(s);
269 :
270 0 : return(0);
271 : }
272 :
273 : int
274 0 : vr_miibus_readreg(struct device *dev, int phy, int reg)
275 : {
276 0 : struct vr_softc *sc = (struct vr_softc *)dev;
277 0 : struct vr_mii_frame frame;
278 :
279 0 : switch (sc->vr_revid) {
280 : case REV_ID_VT6102_APOLLO:
281 : case REV_ID_VT6103:
282 0 : if (phy != 1)
283 0 : return 0;
284 : default:
285 : break;
286 : }
287 :
288 0 : bzero(&frame, sizeof(frame));
289 :
290 0 : frame.mii_phyaddr = phy;
291 0 : frame.mii_regaddr = reg;
292 0 : vr_mii_readreg(sc, &frame);
293 :
294 0 : return(frame.mii_data);
295 0 : }
296 :
297 : void
298 0 : vr_miibus_writereg(struct device *dev, int phy, int reg, int data)
299 : {
300 0 : struct vr_softc *sc = (struct vr_softc *)dev;
301 0 : struct vr_mii_frame frame;
302 :
303 0 : switch (sc->vr_revid) {
304 : case REV_ID_VT6102_APOLLO:
305 : case REV_ID_VT6103:
306 0 : if (phy != 1)
307 0 : return;
308 : default:
309 : break;
310 : }
311 :
312 0 : bzero(&frame, sizeof(frame));
313 :
314 0 : frame.mii_phyaddr = phy;
315 0 : frame.mii_regaddr = reg;
316 0 : frame.mii_data = data;
317 :
318 0 : vr_mii_writereg(sc, &frame);
319 0 : }
320 :
321 : void
322 0 : vr_miibus_statchg(struct device *dev)
323 : {
324 0 : struct vr_softc *sc = (struct vr_softc *)dev;
325 :
326 0 : vr_setcfg(sc, sc->sc_mii.mii_media_active);
327 0 : }
328 :
329 : void
330 0 : vr_iff(struct vr_softc *sc)
331 : {
332 0 : struct arpcom *ac = &sc->arpcom;
333 0 : struct ifnet *ifp = &sc->arpcom.ac_if;
334 : int h = 0;
335 : u_int32_t hashes[2];
336 : struct ether_multi *enm;
337 : struct ether_multistep step;
338 : u_int8_t rxfilt;
339 :
340 0 : rxfilt = CSR_READ_1(sc, VR_RXCFG);
341 0 : rxfilt &= ~(VR_RXCFG_RX_BROAD | VR_RXCFG_RX_MULTI |
342 : VR_RXCFG_RX_PROMISC);
343 0 : ifp->if_flags &= ~IFF_ALLMULTI;
344 :
345 : /*
346 : * Always accept broadcast frames.
347 : */
348 0 : rxfilt |= VR_RXCFG_RX_BROAD;
349 :
350 0 : if (ifp->if_flags & IFF_PROMISC || ac->ac_multirangecnt > 0) {
351 0 : ifp->if_flags |= IFF_ALLMULTI;
352 0 : rxfilt |= VR_RXCFG_RX_MULTI;
353 0 : if (ifp->if_flags & IFF_PROMISC)
354 0 : rxfilt |= VR_RXCFG_RX_PROMISC;
355 : hashes[0] = hashes[1] = 0xFFFFFFFF;
356 0 : } else {
357 : /* Program new filter. */
358 0 : rxfilt |= VR_RXCFG_RX_MULTI;
359 : bzero(hashes, sizeof(hashes));
360 :
361 0 : ETHER_FIRST_MULTI(step, ac, enm);
362 0 : while (enm != NULL) {
363 0 : h = ether_crc32_be(enm->enm_addrlo,
364 0 : ETHER_ADDR_LEN) >> 26;
365 :
366 0 : if (h < 32)
367 0 : hashes[0] |= (1 << h);
368 : else
369 0 : hashes[1] |= (1 << (h - 32));
370 :
371 0 : ETHER_NEXT_MULTI(step, enm);
372 : }
373 : }
374 :
375 0 : CSR_WRITE_4(sc, VR_MAR0, hashes[0]);
376 0 : CSR_WRITE_4(sc, VR_MAR1, hashes[1]);
377 0 : CSR_WRITE_1(sc, VR_RXCFG, rxfilt);
378 0 : }
379 :
380 : /*
381 : * In order to fiddle with the
382 : * 'full-duplex' and '100Mbps' bits in the netconfig register, we
383 : * first have to put the transmit and/or receive logic in the idle state.
384 : */
385 : void
386 0 : vr_setcfg(struct vr_softc *sc, uint64_t media)
387 : {
388 : int i;
389 :
390 0 : if (sc->sc_mii.mii_media_status & IFM_ACTIVE &&
391 0 : IFM_SUBTYPE(sc->sc_mii.mii_media_active) != IFM_NONE) {
392 0 : sc->vr_link = 1;
393 :
394 0 : if (CSR_READ_2(sc, VR_COMMAND) & (VR_CMD_TX_ON|VR_CMD_RX_ON))
395 0 : VR_CLRBIT16(sc, VR_COMMAND,
396 : (VR_CMD_TX_ON|VR_CMD_RX_ON));
397 :
398 0 : if ((media & IFM_GMASK) == IFM_FDX)
399 0 : VR_SETBIT16(sc, VR_COMMAND, VR_CMD_FULLDUPLEX);
400 : else
401 0 : VR_CLRBIT16(sc, VR_COMMAND, VR_CMD_FULLDUPLEX);
402 :
403 0 : VR_SETBIT16(sc, VR_COMMAND, VR_CMD_TX_ON|VR_CMD_RX_ON);
404 0 : } else {
405 0 : sc->vr_link = 0;
406 0 : VR_CLRBIT16(sc, VR_COMMAND, (VR_CMD_TX_ON|VR_CMD_RX_ON));
407 0 : for (i = VR_TIMEOUT; i > 0; i--) {
408 0 : DELAY(10);
409 0 : if (!(CSR_READ_2(sc, VR_COMMAND) &
410 : (VR_CMD_TX_ON|VR_CMD_RX_ON)))
411 : break;
412 : }
413 0 : if (i == 0) {
414 : #ifdef VR_DEBUG
415 : printf("%s: rx shutdown error!\n", sc->sc_dev.dv_xname);
416 : #endif
417 0 : sc->vr_flags |= VR_F_RESTART;
418 0 : }
419 : }
420 0 : }
421 :
422 : void
423 0 : vr_reset(struct vr_softc *sc)
424 : {
425 : int i;
426 :
427 0 : VR_SETBIT16(sc, VR_COMMAND, VR_CMD_RESET);
428 :
429 0 : for (i = 0; i < VR_TIMEOUT; i++) {
430 0 : DELAY(10);
431 0 : if (!(CSR_READ_2(sc, VR_COMMAND) & VR_CMD_RESET))
432 : break;
433 : }
434 0 : if (i == VR_TIMEOUT) {
435 0 : if (sc->vr_revid < REV_ID_VT3065_A)
436 0 : printf("%s: reset never completed!\n",
437 0 : sc->sc_dev.dv_xname);
438 : else {
439 : #ifdef VR_DEBUG
440 : /* Use newer force reset command */
441 : printf("%s: Using force reset command.\n",
442 : sc->sc_dev.dv_xname);
443 : #endif
444 0 : VR_SETBIT(sc, VR_MISC_CR1, VR_MISCCR1_FORSRST);
445 : }
446 : }
447 :
448 : /* Wait a little while for the chip to get its brains in order. */
449 0 : DELAY(1000);
450 0 : }
451 :
452 : /*
453 : * Probe for a VIA Rhine chip.
454 : */
455 : int
456 0 : vr_probe(struct device *parent, void *match, void *aux)
457 : {
458 : const struct vr_type *vr;
459 0 : struct pci_attach_args *pa = (struct pci_attach_args *)aux;
460 : int i, nent = nitems(vr_devices);
461 :
462 0 : for (i = 0, vr = vr_devices; i < nent; i++, vr++)
463 0 : if (PCI_VENDOR(pa->pa_id) == vr->vr_vid &&
464 0 : PCI_PRODUCT(pa->pa_id) == vr->vr_pid)
465 0 : return(1);
466 :
467 0 : return(0);
468 0 : }
469 :
470 : int
471 0 : vr_quirks(struct pci_attach_args *pa)
472 : {
473 : const struct vr_type *vr;
474 : int i, nent = nitems(vr_devices);
475 :
476 0 : for (i = 0, vr = vr_devices; i < nent; i++, vr++)
477 0 : if (PCI_VENDOR(pa->pa_id) == vr->vr_vid &&
478 0 : PCI_PRODUCT(pa->pa_id) == vr->vr_pid)
479 0 : return(vr->vr_quirks);
480 :
481 0 : return(0);
482 0 : }
483 :
484 : int
485 0 : vr_dmamem_alloc(struct vr_softc *sc, struct vr_dmamem *vrm,
486 : bus_size_t size, u_int align)
487 : {
488 0 : vrm->vrm_size = size;
489 :
490 0 : if (bus_dmamap_create(sc->sc_dmat, vrm->vrm_size, 1,
491 : vrm->vrm_size, 0, BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW,
492 0 : &vrm->vrm_map) != 0)
493 0 : return (1);
494 0 : if (bus_dmamem_alloc(sc->sc_dmat, vrm->vrm_size,
495 : align, 0, &vrm->vrm_seg, 1, &vrm->vrm_nsegs,
496 0 : BUS_DMA_WAITOK | BUS_DMA_ZERO) != 0)
497 : goto destroy;
498 0 : if (bus_dmamem_map(sc->sc_dmat, &vrm->vrm_seg, vrm->vrm_nsegs,
499 0 : vrm->vrm_size, &vrm->vrm_kva, BUS_DMA_WAITOK) != 0)
500 : goto free;
501 0 : if (bus_dmamap_load(sc->sc_dmat, vrm->vrm_map, vrm->vrm_kva,
502 0 : vrm->vrm_size, NULL, BUS_DMA_WAITOK) != 0)
503 : goto unmap;
504 :
505 0 : return (0);
506 : unmap:
507 0 : bus_dmamem_unmap(sc->sc_dmat, vrm->vrm_kva, vrm->vrm_size);
508 : free:
509 0 : bus_dmamem_free(sc->sc_dmat, &vrm->vrm_seg, 1);
510 : destroy:
511 0 : bus_dmamap_destroy(sc->sc_dmat, vrm->vrm_map);
512 0 : return (1);
513 0 : }
514 :
515 : void
516 0 : vr_dmamem_free(struct vr_softc *sc, struct vr_dmamem *vrm)
517 : {
518 0 : bus_dmamap_unload(sc->sc_dmat, vrm->vrm_map);
519 0 : bus_dmamem_unmap(sc->sc_dmat, vrm->vrm_kva, vrm->vrm_size);
520 0 : bus_dmamem_free(sc->sc_dmat, &vrm->vrm_seg, 1);
521 0 : bus_dmamap_destroy(sc->sc_dmat, vrm->vrm_map);
522 0 : }
523 :
524 : /*
525 : * Attach the interface. Allocate softc structures, do ifmedia
526 : * setup and ethernet/BPF attach.
527 : */
528 : void
529 0 : vr_attach(struct device *parent, struct device *self, void *aux)
530 : {
531 : int i;
532 0 : struct vr_softc *sc = (struct vr_softc *)self;
533 0 : struct pci_attach_args *pa = aux;
534 0 : pci_chipset_tag_t pc = pa->pa_pc;
535 0 : pci_intr_handle_t ih;
536 : const char *intrstr = NULL;
537 0 : struct ifnet *ifp = &sc->arpcom.ac_if;
538 0 : bus_size_t size;
539 :
540 0 : pci_set_powerstate(pa->pa_pc, pa->pa_tag, PCI_PMCSR_STATE_D0);
541 :
542 : /*
543 : * Map control/status registers.
544 : */
545 :
546 : #ifdef VR_USEIOSPACE
547 0 : if (pci_mapreg_map(pa, VR_PCI_LOIO, PCI_MAPREG_TYPE_IO, 0,
548 0 : &sc->vr_btag, &sc->vr_bhandle, NULL, &size, 0)) {
549 0 : printf(": can't map i/o space\n");
550 0 : return;
551 : }
552 : #else
553 : if (pci_mapreg_map(pa, VR_PCI_LOMEM, PCI_MAPREG_TYPE_MEM, 0,
554 : &sc->vr_btag, &sc->vr_bhandle, NULL, &size, 0)) {
555 : printf(": can't map mem space\n");
556 : return;
557 : }
558 : #endif
559 :
560 : /* Allocate interrupt */
561 0 : if (pci_intr_map(pa, &ih)) {
562 0 : printf(": can't map interrupt\n");
563 0 : goto fail;
564 : }
565 0 : intrstr = pci_intr_string(pc, ih);
566 0 : sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, vr_intr, sc,
567 0 : self->dv_xname);
568 0 : if (sc->sc_ih == NULL) {
569 0 : printf(": can't establish interrupt");
570 0 : if (intrstr != NULL)
571 0 : printf(" at %s", intrstr);
572 0 : printf("\n");
573 0 : goto fail;
574 : }
575 0 : printf(": %s", intrstr);
576 :
577 0 : sc->vr_revid = PCI_REVISION(pa->pa_class);
578 0 : sc->sc_pc = pa->pa_pc;
579 0 : sc->sc_tag = pa->pa_tag;
580 :
581 0 : vr_chipinit(sc);
582 :
583 : /*
584 : * Get station address. The way the Rhine chips work,
585 : * you're not allowed to directly access the EEPROM once
586 : * they've been programmed a special way. Consequently,
587 : * we need to read the node address from the PAR0 and PAR1
588 : * registers.
589 : */
590 0 : VR_SETBIT(sc, VR_EECSR, VR_EECSR_LOAD);
591 0 : DELAY(1000);
592 0 : for (i = 0; i < ETHER_ADDR_LEN; i++)
593 0 : sc->arpcom.ac_enaddr[i] = CSR_READ_1(sc, VR_PAR0 + i);
594 :
595 : /*
596 : * A Rhine chip was detected. Inform the world.
597 : */
598 0 : printf(", address %s\n", ether_sprintf(sc->arpcom.ac_enaddr));
599 :
600 0 : sc->sc_dmat = pa->pa_dmat;
601 0 : if (vr_dmamem_alloc(sc, &sc->sc_zeromap, 64, PAGE_SIZE) != 0) {
602 0 : printf(": failed to allocate zero pad memory\n");
603 0 : return;
604 : }
605 0 : bzero(sc->sc_zeromap.vrm_kva, 64);
606 0 : bus_dmamap_sync(sc->sc_dmat, sc->sc_zeromap.vrm_map, 0,
607 : sc->sc_zeromap.vrm_map->dm_mapsize, BUS_DMASYNC_PREREAD);
608 0 : if (vr_dmamem_alloc(sc, &sc->sc_listmap, sizeof(struct vr_list_data),
609 0 : PAGE_SIZE) != 0) {
610 0 : printf(": failed to allocate dma map\n");
611 : goto free_zero;
612 : }
613 :
614 0 : sc->vr_ldata = (struct vr_list_data *)sc->sc_listmap.vrm_kva;
615 0 : sc->vr_quirks = vr_quirks(pa);
616 :
617 0 : ifp = &sc->arpcom.ac_if;
618 0 : ifp->if_softc = sc;
619 0 : ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
620 0 : ifp->if_ioctl = vr_ioctl;
621 0 : ifp->if_start = vr_start;
622 0 : ifp->if_watchdog = vr_watchdog;
623 0 : if (sc->vr_quirks & VR_Q_BABYJUMBO)
624 0 : ifp->if_hardmtu = VR_RXLEN_BABYJUMBO -
625 : ETHER_HDR_LEN - ETHER_CRC_LEN;
626 0 : bcopy(sc->sc_dev.dv_xname, ifp->if_xname, IFNAMSIZ);
627 :
628 0 : ifp->if_capabilities = IFCAP_VLAN_MTU;
629 :
630 0 : if (sc->vr_quirks & VR_Q_CSUM)
631 0 : ifp->if_capabilities |= IFCAP_CSUM_IPv4 | IFCAP_CSUM_TCPv4 |
632 : IFCAP_CSUM_UDPv4;
633 :
634 : #if NVLAN > 0
635 : /* if the hardware can do VLAN tagging, say so. */
636 0 : if (sc->vr_quirks & VR_Q_HWTAG)
637 0 : ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING;
638 : #endif
639 :
640 : #ifndef SMALL_KERNEL
641 0 : if (sc->vr_revid >= REV_ID_VT3065_A) {
642 0 : ifp->if_capabilities |= IFCAP_WOL;
643 0 : ifp->if_wol = vr_wol;
644 0 : vr_wol(ifp, 0);
645 0 : }
646 : #endif
647 :
648 : /*
649 : * Do MII setup.
650 : */
651 0 : sc->sc_mii.mii_ifp = ifp;
652 0 : sc->sc_mii.mii_readreg = vr_miibus_readreg;
653 0 : sc->sc_mii.mii_writereg = vr_miibus_writereg;
654 0 : sc->sc_mii.mii_statchg = vr_miibus_statchg;
655 0 : ifmedia_init(&sc->sc_mii.mii_media, 0, vr_ifmedia_upd, vr_ifmedia_sts);
656 0 : mii_attach(self, &sc->sc_mii, 0xffffffff, MII_PHY_ANY, MII_OFFSET_ANY,
657 : 0);
658 0 : if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
659 0 : ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
660 0 : ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE);
661 0 : } else
662 0 : ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
663 0 : timeout_set(&sc->sc_to, vr_tick, sc);
664 0 : timeout_set(&sc->sc_rxto, vr_rxtick, sc);
665 :
666 : /*
667 : * Call MI attach routines.
668 : */
669 0 : if_attach(ifp);
670 0 : ether_ifattach(ifp);
671 0 : return;
672 :
673 : free_zero:
674 0 : bus_dmamap_sync(sc->sc_dmat, sc->sc_zeromap.vrm_map, 0,
675 : sc->sc_zeromap.vrm_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
676 0 : vr_dmamem_free(sc, &sc->sc_zeromap);
677 : fail:
678 0 : bus_space_unmap(sc->vr_btag, sc->vr_bhandle, size);
679 0 : }
680 :
681 : int
682 0 : vr_activate(struct device *self, int act)
683 : {
684 0 : struct vr_softc *sc = (struct vr_softc *)self;
685 0 : struct ifnet *ifp = &sc->arpcom.ac_if;
686 : int rv = 0;
687 :
688 0 : switch (act) {
689 : case DVACT_SUSPEND:
690 0 : if (ifp->if_flags & IFF_RUNNING)
691 0 : vr_stop(sc);
692 0 : rv = config_activate_children(self, act);
693 0 : break;
694 : case DVACT_RESUME:
695 0 : if (ifp->if_flags & IFF_UP)
696 0 : vr_init(sc);
697 : break;
698 : default:
699 0 : rv = config_activate_children(self, act);
700 0 : break;
701 : }
702 0 : return (rv);
703 : }
704 :
705 : /*
706 : * Initialize the transmit descriptors.
707 : */
708 : int
709 0 : vr_list_tx_init(struct vr_softc *sc)
710 : {
711 : struct vr_chain_data *cd;
712 : struct vr_list_data *ld;
713 : int i;
714 :
715 0 : cd = &sc->vr_cdata;
716 0 : ld = sc->vr_ldata;
717 :
718 0 : cd->vr_tx_cnt = cd->vr_tx_pkts = 0;
719 :
720 0 : for (i = 0; i < VR_TX_LIST_CNT; i++) {
721 0 : cd->vr_tx_chain[i].vr_ptr = &ld->vr_tx_list[i];
722 0 : cd->vr_tx_chain[i].vr_paddr =
723 0 : sc->sc_listmap.vrm_map->dm_segs[0].ds_addr +
724 0 : offsetof(struct vr_list_data, vr_tx_list[i]);
725 :
726 0 : if (bus_dmamap_create(sc->sc_dmat, MCLBYTES, VR_MAXFRAGS,
727 : MCLBYTES, 0, BUS_DMA_NOWAIT, &cd->vr_tx_chain[i].vr_map))
728 0 : return (ENOBUFS);
729 :
730 0 : if (i == (VR_TX_LIST_CNT - 1))
731 0 : cd->vr_tx_chain[i].vr_nextdesc =
732 0 : &cd->vr_tx_chain[0];
733 : else
734 0 : cd->vr_tx_chain[i].vr_nextdesc =
735 0 : &cd->vr_tx_chain[i + 1];
736 : }
737 :
738 0 : cd->vr_tx_cons = cd->vr_tx_prod = &cd->vr_tx_chain[0];
739 :
740 0 : return (0);
741 0 : }
742 :
743 :
744 : /*
745 : * Initialize the RX descriptors and allocate mbufs for them. Note that
746 : * we arrange the descriptors in a closed ring, so that the last descriptor
747 : * points back to the first.
748 : */
749 : int
750 0 : vr_list_rx_init(struct vr_softc *sc)
751 : {
752 : struct vr_chain_data *cd;
753 : struct vr_list_data *ld;
754 : struct vr_desc *d;
755 : int i, nexti;
756 :
757 0 : cd = &sc->vr_cdata;
758 0 : ld = sc->vr_ldata;
759 :
760 0 : for (i = 0; i < VR_RX_LIST_CNT; i++) {
761 0 : if (bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES,
762 : 0, BUS_DMA_NOWAIT | BUS_DMA_READ,
763 : &cd->vr_rx_chain[i].vr_map))
764 0 : return (ENOBUFS);
765 :
766 0 : d = (struct vr_desc *)&ld->vr_rx_list[i];
767 0 : cd->vr_rx_chain[i].vr_ptr = d;
768 0 : cd->vr_rx_chain[i].vr_paddr =
769 0 : sc->sc_listmap.vrm_map->dm_segs[0].ds_addr +
770 0 : offsetof(struct vr_list_data, vr_rx_list[i]);
771 :
772 0 : if (i == (VR_RX_LIST_CNT - 1))
773 0 : nexti = 0;
774 : else
775 0 : nexti = i + 1;
776 :
777 0 : cd->vr_rx_chain[i].vr_nextdesc = &cd->vr_rx_chain[nexti];
778 0 : ld->vr_rx_list[i].vr_next =
779 0 : htole32(sc->sc_listmap.vrm_map->dm_segs[0].ds_addr +
780 : offsetof(struct vr_list_data, vr_rx_list[nexti]));
781 : }
782 :
783 0 : cd->vr_rx_prod = cd->vr_rx_cons = &cd->vr_rx_chain[0];
784 0 : if_rxr_init(&sc->sc_rxring, 2, VR_RX_LIST_CNT - 1);
785 0 : vr_fill_rx_ring(sc);
786 :
787 0 : return (0);
788 0 : }
789 :
790 : void
791 0 : vr_fill_rx_ring(struct vr_softc *sc)
792 : {
793 : struct vr_chain_data *cd;
794 : struct vr_list_data *ld;
795 : u_int slots;
796 :
797 0 : cd = &sc->vr_cdata;
798 0 : ld = sc->vr_ldata;
799 :
800 0 : for (slots = if_rxr_get(&sc->sc_rxring, VR_RX_LIST_CNT);
801 0 : slots > 0; slots--) {
802 0 : if (vr_alloc_mbuf(sc, cd->vr_rx_prod))
803 : break;
804 :
805 0 : cd->vr_rx_prod = cd->vr_rx_prod->vr_nextdesc;
806 : }
807 :
808 0 : if_rxr_put(&sc->sc_rxring, slots);
809 0 : if (if_rxr_inuse(&sc->sc_rxring) == 0)
810 0 : timeout_add(&sc->sc_rxto, 0);
811 0 : }
812 :
813 : /*
814 : * A frame has been uploaded: pass the resulting mbuf chain up to
815 : * the higher level protocols.
816 : */
817 : void
818 0 : vr_rxeof(struct vr_softc *sc)
819 : {
820 : struct mbuf *m;
821 0 : struct mbuf_list ml = MBUF_LIST_INITIALIZER();
822 : struct ifnet *ifp;
823 : struct vr_chain_onefrag *cur_rx;
824 : int total_len = 0;
825 : u_int32_t rxstat, rxctl;
826 :
827 0 : ifp = &sc->arpcom.ac_if;
828 :
829 0 : while (if_rxr_inuse(&sc->sc_rxring) > 0) {
830 0 : bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap.vrm_map,
831 : 0, sc->sc_listmap.vrm_map->dm_mapsize,
832 : BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
833 0 : rxstat = letoh32(sc->vr_cdata.vr_rx_cons->vr_ptr->vr_status);
834 0 : if (rxstat & VR_RXSTAT_OWN)
835 : break;
836 :
837 0 : rxctl = letoh32(sc->vr_cdata.vr_rx_cons->vr_ptr->vr_ctl);
838 :
839 : cur_rx = sc->vr_cdata.vr_rx_cons;
840 0 : m = cur_rx->vr_mbuf;
841 0 : cur_rx->vr_mbuf = NULL;
842 0 : sc->vr_cdata.vr_rx_cons = cur_rx->vr_nextdesc;
843 0 : if_rxr_put(&sc->sc_rxring, 1);
844 :
845 : /*
846 : * If an error occurs, update stats, clear the
847 : * status word and leave the mbuf cluster in place:
848 : * it should simply get re-used next time this descriptor
849 : * comes up in the ring.
850 : */
851 0 : if ((rxstat & VR_RXSTAT_RX_OK) == 0) {
852 0 : ifp->if_ierrors++;
853 : #ifdef VR_DEBUG
854 : printf("%s: rx error (%02x):",
855 : sc->sc_dev.dv_xname, rxstat & 0x000000ff);
856 : if (rxstat & VR_RXSTAT_CRCERR)
857 : printf(" crc error");
858 : if (rxstat & VR_RXSTAT_FRAMEALIGNERR)
859 : printf(" frame alignment error");
860 : if (rxstat & VR_RXSTAT_FIFOOFLOW)
861 : printf(" FIFO overflow");
862 : if (rxstat & VR_RXSTAT_GIANT)
863 : printf(" received giant packet");
864 : if (rxstat & VR_RXSTAT_RUNT)
865 : printf(" received runt packet");
866 : if (rxstat & VR_RXSTAT_BUSERR)
867 : printf(" system bus error");
868 : if (rxstat & VR_RXSTAT_BUFFERR)
869 : printf(" rx buffer error");
870 : printf("\n");
871 : #endif
872 :
873 0 : m_freem(m);
874 0 : continue;
875 : }
876 :
877 : /* No errors; receive the packet. */
878 0 : total_len = VR_RXBYTES(letoh32(cur_rx->vr_ptr->vr_status));
879 :
880 0 : bus_dmamap_sync(sc->sc_dmat, cur_rx->vr_map, 0,
881 : cur_rx->vr_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
882 0 : bus_dmamap_unload(sc->sc_dmat, cur_rx->vr_map);
883 :
884 : /*
885 : * The VIA Rhine chip includes the CRC with every
886 : * received frame, and there's no way to turn this
887 : * behavior off so trim the CRC manually.
888 : */
889 0 : total_len -= ETHER_CRC_LEN;
890 :
891 : #ifdef __STRICT_ALIGNMENT
892 : {
893 : struct mbuf *m0;
894 : m0 = m_devget(mtod(m, caddr_t), total_len, ETHER_ALIGN);
895 : m_freem(m);
896 : if (m0 == NULL) {
897 : ifp->if_ierrors++;
898 : continue;
899 : }
900 : m = m0;
901 : }
902 : #else
903 0 : m->m_pkthdr.len = m->m_len = total_len;
904 : #endif
905 :
906 0 : if (sc->vr_quirks & VR_Q_CSUM &&
907 0 : (rxstat & VR_RXSTAT_FRAG) == 0 &&
908 0 : (rxctl & VR_RXCTL_IP) != 0) {
909 : /* Checksum is valid for non-fragmented IP packets. */
910 0 : if ((rxctl & VR_RXCTL_IPOK) == VR_RXCTL_IPOK)
911 0 : m->m_pkthdr.csum_flags |= M_IPV4_CSUM_IN_OK;
912 0 : if (rxctl & (VR_RXCTL_TCP | VR_RXCTL_UDP) &&
913 0 : ((rxctl & VR_RXCTL_TCPUDPOK) != 0))
914 0 : m->m_pkthdr.csum_flags |= M_TCP_CSUM_IN_OK |
915 : M_UDP_CSUM_IN_OK;
916 : }
917 :
918 : #if NVLAN > 0
919 : /*
920 : * If there's a tagged packet, the 802.1q header will be at the
921 : * 4-byte boundary following the CRC. There will be 2 bytes
922 : * TPID (0x8100) and 2 bytes TCI (including VLAN ID).
923 : * This isn't in the data sheet.
924 : */
925 0 : if (rxctl & VR_RXCTL_TAG) {
926 0 : int offset = ((total_len + 3) & ~3) + ETHER_CRC_LEN + 2;
927 0 : m->m_pkthdr.ether_vtag = htons(*(u_int16_t *)
928 : ((u_int8_t *)m->m_data + offset));
929 0 : m->m_flags |= M_VLANTAG;
930 0 : }
931 : #endif
932 :
933 0 : ml_enqueue(&ml, m);
934 : }
935 :
936 0 : vr_fill_rx_ring(sc);
937 :
938 0 : bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap.vrm_map,
939 : 0, sc->sc_listmap.vrm_map->dm_mapsize,
940 : BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
941 :
942 0 : if_input(ifp, &ml);
943 0 : }
944 :
945 : void
946 0 : vr_rxeoc(struct vr_softc *sc)
947 : {
948 : struct ifnet *ifp;
949 : int i;
950 :
951 0 : ifp = &sc->arpcom.ac_if;
952 :
953 0 : ifp->if_ierrors++;
954 :
955 0 : VR_CLRBIT16(sc, VR_COMMAND, VR_CMD_RX_ON);
956 0 : DELAY(10000);
957 :
958 0 : for (i = 0x400;
959 0 : i && (CSR_READ_2(sc, VR_COMMAND) & VR_CMD_RX_ON);
960 0 : i--)
961 : ; /* Wait for receiver to stop */
962 :
963 0 : if (!i) {
964 0 : printf("%s: rx shutdown error!\n", sc->sc_dev.dv_xname);
965 0 : sc->vr_flags |= VR_F_RESTART;
966 0 : return;
967 : }
968 :
969 0 : vr_rxeof(sc);
970 :
971 0 : CSR_WRITE_4(sc, VR_RXADDR, sc->vr_cdata.vr_rx_cons->vr_paddr);
972 0 : VR_SETBIT16(sc, VR_COMMAND, VR_CMD_RX_ON);
973 0 : VR_SETBIT16(sc, VR_COMMAND, VR_CMD_RX_GO);
974 0 : }
975 :
976 : /*
977 : * A frame was downloaded to the chip. It's safe for us to clean up
978 : * the list buffers.
979 : */
980 :
981 : void
982 0 : vr_txeof(struct vr_softc *sc)
983 : {
984 : struct vr_chain *cur_tx;
985 : struct ifnet *ifp;
986 :
987 0 : ifp = &sc->arpcom.ac_if;
988 :
989 : /*
990 : * Go through our tx list and free mbufs for those
991 : * frames that have been transmitted.
992 : */
993 0 : cur_tx = sc->vr_cdata.vr_tx_cons;
994 0 : while (cur_tx != sc->vr_cdata.vr_tx_prod) {
995 : u_int32_t txstat, txctl;
996 : int i;
997 :
998 0 : txstat = letoh32(cur_tx->vr_ptr->vr_status);
999 0 : txctl = letoh32(cur_tx->vr_ptr->vr_ctl);
1000 :
1001 0 : if ((txstat & VR_TXSTAT_ABRT) ||
1002 0 : (txstat & VR_TXSTAT_UDF)) {
1003 0 : for (i = 0x400;
1004 0 : i && (CSR_READ_2(sc, VR_COMMAND) & VR_CMD_TX_ON);
1005 0 : i--)
1006 : ; /* Wait for chip to shutdown */
1007 0 : if (!i) {
1008 0 : printf("%s: tx shutdown timeout\n",
1009 0 : sc->sc_dev.dv_xname);
1010 0 : sc->vr_flags |= VR_F_RESTART;
1011 0 : break;
1012 : }
1013 0 : cur_tx->vr_ptr->vr_status = htole32(VR_TXSTAT_OWN);
1014 0 : CSR_WRITE_4(sc, VR_TXADDR, cur_tx->vr_paddr);
1015 0 : break;
1016 : }
1017 :
1018 0 : if (txstat & VR_TXSTAT_OWN)
1019 0 : break;
1020 :
1021 0 : sc->vr_cdata.vr_tx_cnt--;
1022 : /* Only the first descriptor in the chain is valid. */
1023 0 : if ((txctl & VR_TXCTL_FIRSTFRAG) == 0)
1024 : goto next;
1025 :
1026 0 : if (txstat & VR_TXSTAT_ERRSUM) {
1027 0 : ifp->if_oerrors++;
1028 0 : if (txstat & VR_TXSTAT_DEFER)
1029 0 : ifp->if_collisions++;
1030 0 : if (txstat & VR_TXSTAT_LATECOLL)
1031 0 : ifp->if_collisions++;
1032 : }
1033 :
1034 0 : ifp->if_collisions +=(txstat & VR_TXSTAT_COLLCNT) >> 3;
1035 :
1036 0 : if (cur_tx->vr_map != NULL && cur_tx->vr_map->dm_nsegs > 0)
1037 0 : bus_dmamap_unload(sc->sc_dmat, cur_tx->vr_map);
1038 :
1039 0 : m_freem(cur_tx->vr_mbuf);
1040 0 : cur_tx->vr_mbuf = NULL;
1041 0 : ifq_clr_oactive(&ifp->if_snd);
1042 :
1043 : next:
1044 0 : cur_tx = cur_tx->vr_nextdesc;
1045 0 : }
1046 :
1047 0 : sc->vr_cdata.vr_tx_cons = cur_tx;
1048 0 : if (sc->vr_cdata.vr_tx_cnt == 0)
1049 0 : ifp->if_timer = 0;
1050 0 : }
1051 :
1052 : void
1053 0 : vr_tick(void *xsc)
1054 : {
1055 0 : struct vr_softc *sc = xsc;
1056 : int s;
1057 :
1058 0 : s = splnet();
1059 0 : if (sc->vr_flags & VR_F_RESTART) {
1060 0 : printf("%s: restarting\n", sc->sc_dev.dv_xname);
1061 0 : vr_init(sc);
1062 0 : sc->vr_flags &= ~VR_F_RESTART;
1063 0 : }
1064 :
1065 0 : mii_tick(&sc->sc_mii);
1066 0 : timeout_add_sec(&sc->sc_to, 1);
1067 0 : splx(s);
1068 0 : }
1069 :
1070 : void
1071 0 : vr_rxtick(void *xsc)
1072 : {
1073 0 : struct vr_softc *sc = xsc;
1074 : int s;
1075 :
1076 0 : s = splnet();
1077 0 : if (if_rxr_inuse(&sc->sc_rxring) == 0) {
1078 0 : vr_fill_rx_ring(sc);
1079 0 : if (if_rxr_inuse(&sc->sc_rxring) == 0)
1080 0 : timeout_add(&sc->sc_rxto, 1);
1081 : }
1082 0 : splx(s);
1083 0 : }
1084 :
1085 : int
1086 0 : vr_intr(void *arg)
1087 : {
1088 : struct vr_softc *sc;
1089 : struct ifnet *ifp;
1090 : u_int16_t status;
1091 : int claimed = 0;
1092 :
1093 0 : sc = arg;
1094 0 : ifp = &sc->arpcom.ac_if;
1095 :
1096 : /* Suppress unwanted interrupts. */
1097 0 : if (!(ifp->if_flags & IFF_UP)) {
1098 0 : vr_stop(sc);
1099 0 : return 0;
1100 : }
1101 :
1102 0 : status = CSR_READ_2(sc, VR_ISR);
1103 0 : if (status)
1104 0 : CSR_WRITE_2(sc, VR_ISR, status);
1105 :
1106 0 : if (status & VR_INTRS) {
1107 : claimed = 1;
1108 :
1109 0 : if (status & VR_ISR_RX_OK)
1110 0 : vr_rxeof(sc);
1111 :
1112 0 : if (status & VR_ISR_RX_DROPPED) {
1113 : #ifdef VR_DEBUG
1114 : printf("%s: rx packet lost\n", sc->sc_dev.dv_xname);
1115 : #endif
1116 0 : ifp->if_ierrors++;
1117 0 : }
1118 :
1119 0 : if ((status & VR_ISR_RX_ERR) || (status & VR_ISR_RX_NOBUF) ||
1120 0 : (status & VR_ISR_RX_OFLOW)) {
1121 : #ifdef VR_DEBUG
1122 : printf("%s: receive error (%04x)",
1123 : sc->sc_dev.dv_xname, status);
1124 : if (status & VR_ISR_RX_NOBUF)
1125 : printf(" no buffers");
1126 : if (status & VR_ISR_RX_OFLOW)
1127 : printf(" overflow");
1128 : printf("\n");
1129 : #endif
1130 0 : vr_rxeoc(sc);
1131 0 : }
1132 :
1133 0 : if ((status & VR_ISR_BUSERR) || (status & VR_ISR_TX_UNDERRUN)) {
1134 0 : if (status & VR_ISR_BUSERR)
1135 0 : printf("%s: PCI bus error\n",
1136 0 : sc->sc_dev.dv_xname);
1137 0 : if (status & VR_ISR_TX_UNDERRUN)
1138 0 : printf("%s: transmit underrun\n",
1139 0 : sc->sc_dev.dv_xname);
1140 0 : vr_init(sc);
1141 : status = 0;
1142 0 : }
1143 :
1144 0 : if ((status & VR_ISR_TX_OK) || (status & VR_ISR_TX_ABRT) ||
1145 0 : (status & VR_ISR_TX_ABRT2) || (status & VR_ISR_UDFI)) {
1146 0 : vr_txeof(sc);
1147 0 : if ((status & VR_ISR_UDFI) ||
1148 0 : (status & VR_ISR_TX_ABRT2) ||
1149 0 : (status & VR_ISR_TX_ABRT)) {
1150 : #ifdef VR_DEBUG
1151 : if (status & (VR_ISR_TX_ABRT | VR_ISR_TX_ABRT2))
1152 : printf("%s: transmit aborted\n",
1153 : sc->sc_dev.dv_xname);
1154 : if (status & VR_ISR_UDFI)
1155 : printf("%s: transmit underflow\n",
1156 : sc->sc_dev.dv_xname);
1157 : #endif
1158 0 : ifp->if_oerrors++;
1159 0 : if (sc->vr_cdata.vr_tx_cons->vr_mbuf != NULL) {
1160 0 : VR_SETBIT16(sc, VR_COMMAND,
1161 : VR_CMD_TX_ON);
1162 0 : VR_SETBIT16(sc, VR_COMMAND,
1163 : VR_CMD_TX_GO);
1164 0 : }
1165 : }
1166 : }
1167 : }
1168 :
1169 0 : if (!IFQ_IS_EMPTY(&ifp->if_snd))
1170 0 : vr_start(ifp);
1171 :
1172 0 : return (claimed);
1173 0 : }
1174 :
1175 : /*
1176 : * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data
1177 : * pointers to the fragment pointers.
1178 : */
1179 : int
1180 0 : vr_encap(struct vr_softc *sc, struct vr_chain **cp, struct mbuf *m)
1181 : {
1182 0 : struct vr_chain *c = *cp;
1183 : struct vr_desc *f = NULL;
1184 : u_int32_t vr_ctl = 0, vr_status = 0, intdisable = 0;
1185 : bus_dmamap_t txmap;
1186 : int i, runt = 0;
1187 : int error;
1188 :
1189 0 : if (sc->vr_quirks & VR_Q_CSUM) {
1190 0 : if (m->m_pkthdr.csum_flags & M_IPV4_CSUM_OUT)
1191 0 : vr_ctl |= VR_TXCTL_IPCSUM;
1192 0 : if (m->m_pkthdr.csum_flags & M_TCP_CSUM_OUT)
1193 0 : vr_ctl |= VR_TXCTL_TCPCSUM;
1194 0 : if (m->m_pkthdr.csum_flags & M_UDP_CSUM_OUT)
1195 0 : vr_ctl |= VR_TXCTL_UDPCSUM;
1196 : }
1197 :
1198 0 : if (sc->vr_quirks & VR_Q_NEEDALIGN) {
1199 : /* Deep copy for chips that need alignment */
1200 : error = EFBIG;
1201 0 : } else {
1202 0 : error = bus_dmamap_load_mbuf(sc->sc_dmat, c->vr_map, m,
1203 : BUS_DMA_NOWAIT | BUS_DMA_WRITE);
1204 : }
1205 :
1206 0 : switch (error) {
1207 : case 0:
1208 : break;
1209 : case EFBIG:
1210 0 : if (m_defrag(m, M_DONTWAIT) == 0 &&
1211 0 : bus_dmamap_load_mbuf(sc->sc_dmat, c->vr_map, m,
1212 0 : BUS_DMA_NOWAIT) == 0)
1213 : break;
1214 :
1215 : /* FALLTHROUGH */
1216 : default:
1217 0 : return (ENOBUFS);
1218 : }
1219 :
1220 0 : bus_dmamap_sync(sc->sc_dmat, c->vr_map, 0, c->vr_map->dm_mapsize,
1221 : BUS_DMASYNC_PREWRITE);
1222 0 : if (c->vr_map->dm_mapsize < VR_MIN_FRAMELEN)
1223 0 : runt = 1;
1224 :
1225 : #if NVLAN > 0
1226 : /*
1227 : * Tell chip to insert VLAN tag if needed.
1228 : * This chip expects the VLAN ID (0x0FFF) and the PCP (0xE000)
1229 : * in only 15 bits without the gap at 0x1000 (reserved for DEI).
1230 : * Therefore we need to de- / re-construct the VLAN header.
1231 : */
1232 0 : if (m->m_flags & M_VLANTAG) {
1233 0 : u_int32_t vtag = m->m_pkthdr.ether_vtag;
1234 0 : vtag = EVL_VLANOFTAG(vtag) | EVL_PRIOFTAG(vtag) << 12;
1235 0 : vr_status |= vtag << VR_TXSTAT_PQSHIFT;
1236 0 : vr_ctl |= htole32(VR_TXCTL_INSERTTAG);
1237 0 : }
1238 : #endif
1239 :
1240 : /*
1241 : * We only want TX completion interrupts on every Nth packet.
1242 : * We need to set VR_TXNEXT_INTDISABLE on every descriptor except
1243 : * for the last discriptor of every Nth packet, where we set
1244 : * VR_TXCTL_FINT. The former is in the specs for only some chips.
1245 : * present: VT6102 VT6105M VT8235M
1246 : * not present: VT86C100 6105LOM
1247 : */
1248 0 : if (++sc->vr_cdata.vr_tx_pkts % VR_TX_INTR_THRESH != 0 &&
1249 0 : sc->vr_quirks & VR_Q_INTDISABLE)
1250 0 : intdisable = VR_TXNEXT_INTDISABLE;
1251 :
1252 0 : c->vr_mbuf = m;
1253 0 : txmap = c->vr_map;
1254 0 : for (i = 0; i < txmap->dm_nsegs; i++) {
1255 0 : if (i != 0)
1256 0 : *cp = c = c->vr_nextdesc;
1257 0 : f = c->vr_ptr;
1258 0 : f->vr_ctl = htole32(txmap->dm_segs[i].ds_len | VR_TXCTL_TLINK |
1259 : vr_ctl);
1260 0 : if (i == 0)
1261 0 : f->vr_ctl |= htole32(VR_TXCTL_FIRSTFRAG);
1262 0 : f->vr_status = htole32(vr_status);
1263 0 : f->vr_data = htole32(txmap->dm_segs[i].ds_addr);
1264 0 : f->vr_next = htole32(c->vr_nextdesc->vr_paddr | intdisable);
1265 0 : sc->vr_cdata.vr_tx_cnt++;
1266 : }
1267 :
1268 : /* Pad runt frames */
1269 0 : if (runt) {
1270 0 : *cp = c = c->vr_nextdesc;
1271 0 : f = c->vr_ptr;
1272 0 : f->vr_ctl = htole32((VR_MIN_FRAMELEN - txmap->dm_mapsize) |
1273 : VR_TXCTL_TLINK | vr_ctl);
1274 0 : f->vr_status = htole32(vr_status);
1275 0 : f->vr_data = htole32(sc->sc_zeromap.vrm_map->dm_segs[0].ds_addr);
1276 0 : f->vr_next = htole32(c->vr_nextdesc->vr_paddr | intdisable);
1277 0 : sc->vr_cdata.vr_tx_cnt++;
1278 0 : }
1279 :
1280 : /* Set EOP on the last descriptor */
1281 0 : f->vr_ctl |= htole32(VR_TXCTL_LASTFRAG);
1282 :
1283 0 : if (sc->vr_cdata.vr_tx_pkts % VR_TX_INTR_THRESH == 0)
1284 0 : f->vr_ctl |= htole32(VR_TXCTL_FINT);
1285 :
1286 0 : return (0);
1287 0 : }
1288 :
1289 : /*
1290 : * Main transmit routine. To avoid having to do mbuf copies, we put pointers
1291 : * to the mbuf data regions directly in the transmit lists. We also save a
1292 : * copy of the pointers since the transmit list fragment pointers are
1293 : * physical addresses.
1294 : */
1295 :
1296 : void
1297 0 : vr_start(struct ifnet *ifp)
1298 : {
1299 : struct vr_softc *sc;
1300 : struct mbuf *m;
1301 0 : struct vr_chain *cur_tx, *head_tx;
1302 : unsigned int queued = 0;
1303 :
1304 0 : sc = ifp->if_softc;
1305 :
1306 0 : if (!(ifp->if_flags & IFF_RUNNING) || ifq_is_oactive(&ifp->if_snd))
1307 0 : return;
1308 :
1309 0 : if (sc->vr_link == 0)
1310 0 : return;
1311 :
1312 0 : cur_tx = sc->vr_cdata.vr_tx_prod;
1313 0 : for (;;) {
1314 0 : if (sc->vr_cdata.vr_tx_cnt + VR_MAXFRAGS >=
1315 : VR_TX_LIST_CNT - 1) {
1316 0 : ifq_set_oactive(&ifp->if_snd);
1317 0 : break;
1318 : }
1319 :
1320 0 : IFQ_DEQUEUE(&ifp->if_snd, m);
1321 0 : if (m == NULL)
1322 : break;
1323 :
1324 : /* Pack the data into the descriptor. */
1325 0 : head_tx = cur_tx;
1326 0 : if (vr_encap(sc, &cur_tx, m)) {
1327 0 : m_freem(m);
1328 0 : ifp->if_oerrors++;
1329 0 : continue;
1330 : }
1331 0 : queued++;
1332 :
1333 : /* Only set ownership bit on first descriptor */
1334 0 : head_tx->vr_ptr->vr_status |= htole32(VR_TXSTAT_OWN);
1335 :
1336 : #if NBPFILTER > 0
1337 : /*
1338 : * If there's a BPF listener, bounce a copy of this frame
1339 : * to him.
1340 : */
1341 0 : if (ifp->if_bpf)
1342 0 : bpf_mtap_ether(ifp->if_bpf, m, BPF_DIRECTION_OUT);
1343 : #endif
1344 0 : cur_tx = cur_tx->vr_nextdesc;
1345 : }
1346 0 : if (queued > 0) {
1347 0 : sc->vr_cdata.vr_tx_prod = cur_tx;
1348 :
1349 0 : bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap.vrm_map, 0,
1350 : sc->sc_listmap.vrm_map->dm_mapsize,
1351 : BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD);
1352 :
1353 : /* Tell the chip to start transmitting. */
1354 0 : VR_SETBIT16(sc, VR_COMMAND, /*VR_CMD_TX_ON|*/VR_CMD_TX_GO);
1355 :
1356 : /* Set a timeout in case the chip goes out to lunch. */
1357 0 : ifp->if_timer = 5;
1358 0 : }
1359 0 : }
1360 :
1361 : void
1362 0 : vr_chipinit(struct vr_softc *sc)
1363 : {
1364 : /*
1365 : * Make sure it isn't suspended.
1366 : */
1367 0 : if (pci_get_capability(sc->sc_pc, sc->sc_tag,
1368 : PCI_CAP_PWRMGMT, NULL, NULL))
1369 0 : VR_CLRBIT(sc, VR_STICKHW, (VR_STICKHW_DS0|VR_STICKHW_DS1));
1370 :
1371 : /* Reset the adapter. */
1372 0 : vr_reset(sc);
1373 :
1374 : /*
1375 : * Turn on bit2 (MIION) in PCI configuration register 0x53 during
1376 : * initialization and disable AUTOPOLL.
1377 : */
1378 0 : pci_conf_write(sc->sc_pc, sc->sc_tag, VR_PCI_MODE,
1379 0 : pci_conf_read(sc->sc_pc, sc->sc_tag, VR_PCI_MODE) |
1380 : (VR_MODE3_MIION << 24));
1381 0 : VR_CLRBIT(sc, VR_MIICMD, VR_MIICMD_AUTOPOLL);
1382 0 : }
1383 :
1384 : void
1385 0 : vr_init(void *xsc)
1386 : {
1387 0 : struct vr_softc *sc = xsc;
1388 0 : struct ifnet *ifp = &sc->arpcom.ac_if;
1389 0 : struct mii_data *mii = &sc->sc_mii;
1390 : int s, i;
1391 :
1392 0 : s = splnet();
1393 :
1394 : /*
1395 : * Cancel pending I/O and free all RX/TX buffers.
1396 : */
1397 0 : vr_stop(sc);
1398 0 : vr_chipinit(sc);
1399 :
1400 : /*
1401 : * Set our station address.
1402 : */
1403 0 : for (i = 0; i < ETHER_ADDR_LEN; i++)
1404 0 : CSR_WRITE_1(sc, VR_PAR0 + i, sc->arpcom.ac_enaddr[i]);
1405 :
1406 : /* Set DMA size */
1407 0 : VR_CLRBIT(sc, VR_BCR0, VR_BCR0_DMA_LENGTH);
1408 0 : VR_SETBIT(sc, VR_BCR0, VR_BCR0_DMA_STORENFWD);
1409 :
1410 : /*
1411 : * BCR0 and BCR1 can override the RXCFG and TXCFG registers,
1412 : * so we must set both.
1413 : */
1414 0 : VR_CLRBIT(sc, VR_BCR0, VR_BCR0_RX_THRESH);
1415 0 : VR_SETBIT(sc, VR_BCR0, VR_BCR0_RXTHRESH128BYTES);
1416 :
1417 0 : VR_CLRBIT(sc, VR_BCR1, VR_BCR1_TX_THRESH);
1418 0 : VR_SETBIT(sc, VR_BCR1, VR_BCR1_TXTHRESHSTORENFWD);
1419 :
1420 0 : VR_CLRBIT(sc, VR_RXCFG, VR_RXCFG_RX_THRESH);
1421 0 : VR_SETBIT(sc, VR_RXCFG, VR_RXTHRESH_128BYTES);
1422 :
1423 0 : VR_CLRBIT(sc, VR_TXCFG, VR_TXCFG_TX_THRESH);
1424 0 : VR_SETBIT(sc, VR_TXCFG, VR_TXTHRESH_STORENFWD);
1425 :
1426 0 : if (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING)
1427 0 : VR_SETBIT(sc, VR_TXCFG, VR_TXCFG_TXTAGEN);
1428 :
1429 : /* Init circular RX list. */
1430 0 : if (vr_list_rx_init(sc) == ENOBUFS) {
1431 0 : printf("%s: initialization failed: no memory for rx buffers\n",
1432 0 : sc->sc_dev.dv_xname);
1433 0 : vr_stop(sc);
1434 0 : splx(s);
1435 0 : return;
1436 : }
1437 :
1438 : /*
1439 : * Init tx descriptors.
1440 : */
1441 0 : if (vr_list_tx_init(sc) == ENOBUFS) {
1442 0 : printf("%s: initialization failed: no memory for tx buffers\n",
1443 0 : sc->sc_dev.dv_xname);
1444 0 : vr_stop(sc);
1445 0 : splx(s);
1446 0 : return;
1447 : }
1448 :
1449 : /*
1450 : * Program promiscuous mode and multicast filters.
1451 : */
1452 0 : vr_iff(sc);
1453 :
1454 : /*
1455 : * Load the address of the RX list.
1456 : */
1457 0 : CSR_WRITE_4(sc, VR_RXADDR, sc->vr_cdata.vr_rx_cons->vr_paddr);
1458 :
1459 : /* Enable receiver and transmitter. */
1460 0 : CSR_WRITE_2(sc, VR_COMMAND, VR_CMD_TX_NOPOLL|VR_CMD_START|
1461 : VR_CMD_TX_ON|VR_CMD_RX_ON|
1462 : VR_CMD_RX_GO);
1463 :
1464 0 : CSR_WRITE_4(sc, VR_TXADDR, sc->sc_listmap.vrm_map->dm_segs[0].ds_addr +
1465 : offsetof(struct vr_list_data, vr_tx_list[0]));
1466 :
1467 : /*
1468 : * Enable interrupts.
1469 : */
1470 0 : CSR_WRITE_2(sc, VR_ISR, 0xFFFF);
1471 0 : CSR_WRITE_2(sc, VR_IMR, VR_INTRS);
1472 :
1473 : /* Restore state of BMCR */
1474 0 : sc->vr_link = 1;
1475 0 : mii_mediachg(mii);
1476 :
1477 0 : ifp->if_flags |= IFF_RUNNING;
1478 0 : ifq_clr_oactive(&ifp->if_snd);
1479 :
1480 0 : if (!timeout_pending(&sc->sc_to))
1481 0 : timeout_add_sec(&sc->sc_to, 1);
1482 :
1483 0 : splx(s);
1484 0 : }
1485 :
1486 : /*
1487 : * Set media options.
1488 : */
1489 : int
1490 0 : vr_ifmedia_upd(struct ifnet *ifp)
1491 : {
1492 0 : struct vr_softc *sc = ifp->if_softc;
1493 :
1494 0 : if (ifp->if_flags & IFF_UP)
1495 0 : vr_init(sc);
1496 :
1497 0 : return (0);
1498 : }
1499 :
1500 : /*
1501 : * Report current media status.
1502 : */
1503 : void
1504 0 : vr_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
1505 : {
1506 0 : struct vr_softc *sc = ifp->if_softc;
1507 0 : struct mii_data *mii = &sc->sc_mii;
1508 :
1509 0 : mii_pollstat(mii);
1510 0 : ifmr->ifm_active = mii->mii_media_active;
1511 0 : ifmr->ifm_status = mii->mii_media_status;
1512 0 : }
1513 :
1514 : int
1515 0 : vr_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
1516 : {
1517 0 : struct vr_softc *sc = ifp->if_softc;
1518 0 : struct ifreq *ifr = (struct ifreq *) data;
1519 : int s, error = 0;
1520 :
1521 0 : s = splnet();
1522 :
1523 0 : switch(command) {
1524 : case SIOCSIFADDR:
1525 0 : ifp->if_flags |= IFF_UP;
1526 0 : if (!(ifp->if_flags & IFF_RUNNING))
1527 0 : vr_init(sc);
1528 : break;
1529 :
1530 : case SIOCSIFFLAGS:
1531 0 : if (ifp->if_flags & IFF_UP) {
1532 0 : if (ifp->if_flags & IFF_RUNNING)
1533 0 : error = ENETRESET;
1534 : else
1535 0 : vr_init(sc);
1536 : } else {
1537 0 : if (ifp->if_flags & IFF_RUNNING)
1538 0 : vr_stop(sc);
1539 : }
1540 : break;
1541 :
1542 : case SIOCGIFMEDIA:
1543 : case SIOCSIFMEDIA:
1544 0 : error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, command);
1545 0 : break;
1546 :
1547 : case SIOCGIFRXR:
1548 0 : error = if_rxr_ioctl((struct if_rxrinfo *)ifr->ifr_data,
1549 0 : NULL, MCLBYTES, &sc->sc_rxring);
1550 0 : break;
1551 :
1552 : default:
1553 0 : error = ether_ioctl(ifp, &sc->arpcom, command, data);
1554 0 : }
1555 :
1556 0 : if (error == ENETRESET) {
1557 0 : if (ifp->if_flags & IFF_RUNNING)
1558 0 : vr_iff(sc);
1559 : error = 0;
1560 0 : }
1561 :
1562 0 : splx(s);
1563 0 : return(error);
1564 : }
1565 :
1566 : void
1567 0 : vr_watchdog(struct ifnet *ifp)
1568 : {
1569 : struct vr_softc *sc;
1570 :
1571 0 : sc = ifp->if_softc;
1572 :
1573 : /*
1574 : * Since we're only asking for completion interrupts only every
1575 : * few packets, occasionally the watchdog will fire when we have
1576 : * some TX descriptors to reclaim, so check for that first.
1577 : */
1578 0 : vr_txeof(sc);
1579 0 : if (sc->vr_cdata.vr_tx_cnt == 0)
1580 0 : return;
1581 :
1582 0 : ifp->if_oerrors++;
1583 0 : printf("%s: watchdog timeout\n", sc->sc_dev.dv_xname);
1584 0 : vr_init(sc);
1585 :
1586 0 : if (!IFQ_IS_EMPTY(&ifp->if_snd))
1587 0 : vr_start(ifp);
1588 0 : }
1589 :
1590 : /*
1591 : * Stop the adapter and free any mbufs allocated to the
1592 : * RX and TX lists.
1593 : */
1594 : void
1595 0 : vr_stop(struct vr_softc *sc)
1596 : {
1597 : int i;
1598 : struct ifnet *ifp;
1599 : bus_dmamap_t map;
1600 :
1601 0 : ifp = &sc->arpcom.ac_if;
1602 0 : ifp->if_timer = 0;
1603 :
1604 0 : timeout_del(&sc->sc_to);
1605 :
1606 0 : ifp->if_flags &= ~IFF_RUNNING;
1607 0 : ifq_clr_oactive(&ifp->if_snd);
1608 :
1609 0 : VR_SETBIT16(sc, VR_COMMAND, VR_CMD_STOP);
1610 0 : VR_CLRBIT16(sc, VR_COMMAND, (VR_CMD_RX_ON|VR_CMD_TX_ON));
1611 :
1612 : /* wait for xfers to shutdown */
1613 0 : for (i = VR_TIMEOUT; i > 0; i--) {
1614 0 : DELAY(10);
1615 0 : if (!(CSR_READ_2(sc, VR_COMMAND) & (VR_CMD_TX_ON|VR_CMD_RX_ON)))
1616 : break;
1617 : }
1618 : #ifdef VR_DEBUG
1619 : if (i == 0)
1620 : printf("%s: rx shutdown error!\n", sc->sc_dev.dv_xname);
1621 : #endif
1622 0 : CSR_WRITE_2(sc, VR_IMR, 0x0000);
1623 0 : CSR_WRITE_4(sc, VR_TXADDR, 0x00000000);
1624 0 : CSR_WRITE_4(sc, VR_RXADDR, 0x00000000);
1625 :
1626 : /*
1627 : * Free data in the RX lists.
1628 : */
1629 0 : for (i = 0; i < VR_RX_LIST_CNT; i++) {
1630 0 : if (sc->vr_cdata.vr_rx_chain[i].vr_mbuf != NULL) {
1631 0 : m_freem(sc->vr_cdata.vr_rx_chain[i].vr_mbuf);
1632 0 : sc->vr_cdata.vr_rx_chain[i].vr_mbuf = NULL;
1633 0 : }
1634 0 : map = sc->vr_cdata.vr_rx_chain[i].vr_map;
1635 0 : if (map != NULL) {
1636 0 : if (map->dm_nsegs > 0)
1637 0 : bus_dmamap_unload(sc->sc_dmat, map);
1638 0 : bus_dmamap_destroy(sc->sc_dmat, map);
1639 0 : sc->vr_cdata.vr_rx_chain[i].vr_map = NULL;
1640 0 : }
1641 : }
1642 0 : bzero(&sc->vr_ldata->vr_rx_list, sizeof(sc->vr_ldata->vr_rx_list));
1643 :
1644 : /*
1645 : * Free the TX list buffers.
1646 : */
1647 0 : for (i = 0; i < VR_TX_LIST_CNT; i++) {
1648 0 : if (sc->vr_cdata.vr_tx_chain[i].vr_mbuf != NULL) {
1649 0 : m_freem(sc->vr_cdata.vr_tx_chain[i].vr_mbuf);
1650 0 : sc->vr_cdata.vr_tx_chain[i].vr_mbuf = NULL;
1651 0 : ifp->if_oerrors++;
1652 0 : }
1653 0 : map = sc->vr_cdata.vr_tx_chain[i].vr_map;
1654 0 : if (map != NULL) {
1655 0 : if (map->dm_nsegs > 0)
1656 0 : bus_dmamap_unload(sc->sc_dmat, map);
1657 0 : bus_dmamap_destroy(sc->sc_dmat, map);
1658 0 : sc->vr_cdata.vr_tx_chain[i].vr_map = NULL;
1659 0 : }
1660 : }
1661 0 : bzero(&sc->vr_ldata->vr_tx_list, sizeof(sc->vr_ldata->vr_tx_list));
1662 0 : }
1663 :
1664 : #ifndef SMALL_KERNEL
1665 : int
1666 0 : vr_wol(struct ifnet *ifp, int enable)
1667 : {
1668 0 : struct vr_softc *sc = ifp->if_softc;
1669 :
1670 : /* Clear WOL configuration */
1671 0 : CSR_WRITE_1(sc, VR_WOLCRCLR, 0xFF);
1672 :
1673 : /* Clear event status bits. */
1674 0 : CSR_WRITE_1(sc, VR_PWRCSRCLR, 0xFF);
1675 :
1676 : /* Disable PME# assertion upon wake event. */
1677 0 : VR_CLRBIT(sc, VR_STICKHW, VR_STICKHW_WOL_ENB);
1678 0 : VR_SETBIT(sc, VR_WOLCFGCLR, VR_WOLCFG_PMEOVR);
1679 :
1680 0 : if (enable) {
1681 0 : VR_SETBIT(sc, VR_WOLCRSET, VR_WOLCR_MAGIC);
1682 :
1683 : /* Enable PME# assertion upon wake event. */
1684 0 : VR_SETBIT(sc, VR_STICKHW, VR_STICKHW_WOL_ENB);
1685 0 : VR_SETBIT(sc, VR_WOLCFGSET, VR_WOLCFG_PMEOVR);
1686 0 : }
1687 :
1688 0 : return (0);
1689 : }
1690 : #endif
1691 :
1692 : int
1693 0 : vr_alloc_mbuf(struct vr_softc *sc, struct vr_chain_onefrag *r)
1694 : {
1695 : struct vr_desc *d;
1696 : struct mbuf *m;
1697 :
1698 0 : if (r == NULL)
1699 0 : return (EINVAL);
1700 :
1701 0 : m = MCLGETI(NULL, M_DONTWAIT, NULL, MCLBYTES);
1702 0 : if (!m)
1703 0 : return (ENOBUFS);
1704 :
1705 0 : m->m_len = m->m_pkthdr.len = MCLBYTES;
1706 0 : m_adj(m, sizeof(u_int64_t));
1707 :
1708 0 : if (bus_dmamap_load_mbuf(sc->sc_dmat, r->vr_map, m, BUS_DMA_NOWAIT)) {
1709 0 : m_free(m);
1710 0 : return (ENOBUFS);
1711 : }
1712 :
1713 0 : bus_dmamap_sync(sc->sc_dmat, r->vr_map, 0, r->vr_map->dm_mapsize,
1714 : BUS_DMASYNC_PREREAD);
1715 :
1716 : /* Reinitialize the RX descriptor */
1717 0 : r->vr_mbuf = m;
1718 0 : d = r->vr_ptr;
1719 0 : d->vr_data = htole32(r->vr_map->dm_segs[0].ds_addr);
1720 0 : if (sc->vr_quirks & VR_Q_BABYJUMBO)
1721 0 : d->vr_ctl = htole32(VR_RXCTL | VR_RXLEN_BABYJUMBO);
1722 : else
1723 0 : d->vr_ctl = htole32(VR_RXCTL | VR_RXLEN);
1724 :
1725 0 : bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap.vrm_map, 0,
1726 : sc->sc_listmap.vrm_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
1727 :
1728 0 : d->vr_status = htole32(VR_RXSTAT);
1729 :
1730 0 : bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap.vrm_map, 0,
1731 : sc->sc_listmap.vrm_map->dm_mapsize,
1732 : BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1733 :
1734 0 : return (0);
1735 0 : }
|