Line data Source code
1 : /* $OpenBSD: if_vic.c,v 1.98 2017/07/12 14:25:36 mikeb Exp $ */
2 :
3 : /*
4 : * Copyright (c) 2006 Reyk Floeter <reyk@openbsd.org>
5 : * Copyright (c) 2006 David Gwynne <dlg@openbsd.org>
6 : *
7 : * Permission to use, copy, modify, and distribute this software for any
8 : * purpose with or without fee is hereby granted, provided that the above
9 : * copyright notice and this permission notice appear in all copies.
10 : *
11 : * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12 : * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13 : * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14 : * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15 : * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 : * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 : * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18 : */
19 :
20 : /*
21 : * Driver for the VMware Virtual NIC ("vmxnet")
22 : */
23 :
24 : #include "bpfilter.h"
25 :
26 : #include <sys/param.h>
27 : #include <sys/systm.h>
28 : #include <sys/sockio.h>
29 : #include <sys/mbuf.h>
30 : #include <sys/kernel.h>
31 : #include <sys/socket.h>
32 : #include <sys/malloc.h>
33 : #include <sys/timeout.h>
34 : #include <sys/device.h>
35 :
36 : #include <machine/bus.h>
37 : #include <machine/intr.h>
38 :
39 : #include <net/if.h>
40 : #include <net/if_media.h>
41 :
42 : #if NBPFILTER > 0
43 : #include <net/bpf.h>
44 : #endif
45 :
46 : #include <netinet/in.h>
47 : #include <netinet/if_ether.h>
48 :
49 : #include <dev/pci/pcireg.h>
50 : #include <dev/pci/pcivar.h>
51 : #include <dev/pci/pcidevs.h>
52 :
53 : #define VIC_PCI_BAR PCI_MAPREG_START /* Base Address Register */
54 :
55 : #define VIC_LANCE_SIZE 0x20
56 : #define VIC_MORPH_SIZE 0x04
57 : #define VIC_MORPH_MASK 0xffff
58 : #define VIC_MORPH_LANCE 0x2934
59 : #define VIC_MORPH_VMXNET 0x4392
60 : #define VIC_VMXNET_SIZE 0x40
61 : #define VIC_LANCE_MINLEN (VIC_LANCE_SIZE + VIC_MORPH_SIZE + \
62 : VIC_VMXNET_SIZE)
63 :
64 : #define VIC_MAGIC 0xbabe864f
65 :
66 : /* Register address offsets */
67 : #define VIC_DATA_ADDR 0x0000 /* Shared data address */
68 : #define VIC_DATA_LENGTH 0x0004 /* Shared data length */
69 : #define VIC_Tx_ADDR 0x0008 /* Tx pointer address */
70 :
71 : /* Command register */
72 : #define VIC_CMD 0x000c /* Command register */
73 : #define VIC_CMD_INTR_ACK 0x0001 /* Acknowledge interrupt */
74 : #define VIC_CMD_MCASTFIL 0x0002 /* Multicast address filter */
75 : #define VIC_CMD_MCASTFIL_LENGTH 2
76 : #define VIC_CMD_IFF 0x0004 /* Interface flags */
77 : #define VIC_CMD_IFF_PROMISC 0x0001 /* Promiscous enabled */
78 : #define VIC_CMD_IFF_BROADCAST 0x0002 /* Broadcast enabled */
79 : #define VIC_CMD_IFF_MULTICAST 0x0004 /* Multicast enabled */
80 : #define VIC_CMD_INTR_DISABLE 0x0020 /* Disable interrupts */
81 : #define VIC_CMD_INTR_ENABLE 0x0040 /* Enable interrupts */
82 : #define VIC_CMD_Tx_DONE 0x0100 /* Tx done register */
83 : #define VIC_CMD_NUM_Rx_BUF 0x0200 /* Number of Rx buffers */
84 : #define VIC_CMD_NUM_Tx_BUF 0x0400 /* Number of Tx buffers */
85 : #define VIC_CMD_NUM_PINNED_BUF 0x0800 /* Number of pinned buffers */
86 : #define VIC_CMD_HWCAP 0x1000 /* Capability register */
87 : #define VIC_CMD_HWCAP_SG (1<<0) /* Scatter-gather transmits */
88 : #define VIC_CMD_HWCAP_CSUM_IPv4 (1<<1) /* TCP/UDP cksum */
89 : #define VIC_CMD_HWCAP_CSUM_ALL (1<<3) /* Hardware cksum */
90 : #define VIC_CMD_HWCAP_CSUM \
91 : (VIC_CMD_HWCAP_CSUM_IPv4 | VIC_CMD_HWCAP_CSUM_ALL)
92 : #define VIC_CMD_HWCAP_DMA_HIGH (1<<4) /* High DMA mapping */
93 : #define VIC_CMD_HWCAP_TOE (1<<5) /* TCP offload engine */
94 : #define VIC_CMD_HWCAP_TSO (1<<6) /* TCP segmentation offload */
95 : #define VIC_CMD_HWCAP_TSO_SW (1<<7) /* Software TCP segmentation */
96 : #define VIC_CMD_HWCAP_VPROM (1<<8) /* Virtual PROM available */
97 : #define VIC_CMD_HWCAP_VLAN_Tx (1<<9) /* Hardware VLAN MTU Rx */
98 : #define VIC_CMD_HWCAP_VLAN_Rx (1<<10) /* Hardware VLAN MTU Tx */
99 : #define VIC_CMD_HWCAP_VLAN_SW (1<<11) /* Software VLAN MTU */
100 : #define VIC_CMD_HWCAP_VLAN \
101 : (VIC_CMD_HWCAP_VLAN_Tx | VIC_CMD_HWCAP_VLAN_Rx | \
102 : VIC_CMD_HWCAP_VLAN_SW)
103 : #define VIC_CMD_HWCAP_BITS \
104 : "\20\01SG\02CSUM4\03CSUM\04HDMA\05TOE\06TSO" \
105 : "\07TSOSW\10VPROM\13VLANTx\14VLANRx\15VLANSW"
106 : #define VIC_CMD_FEATURE 0x2000 /* Additional feature register */
107 : #define VIC_CMD_FEATURE_0_Tx (1<<0)
108 : #define VIC_CMD_FEATURE_TSO (1<<1)
109 :
110 : #define VIC_LLADDR 0x0010 /* MAC address register */
111 : #define VIC_VERSION_MINOR 0x0018 /* Minor version register */
112 : #define VIC_VERSION_MAJOR 0x001c /* Major version register */
113 : #define VIC_VERSION_MAJOR_M 0xffff0000
114 :
115 : /* Status register */
116 : #define VIC_STATUS 0x0020
117 : #define VIC_STATUS_CONNECTED (1<<0)
118 : #define VIC_STATUS_ENABLED (1<<1)
119 :
120 : #define VIC_TOE_ADDR 0x0024 /* TCP offload address */
121 :
122 : /* Virtual PROM address */
123 : #define VIC_VPROM 0x0028
124 : #define VIC_VPROM_LENGTH 6
125 :
126 : /* Shared DMA data structures */
127 :
128 : struct vic_sg {
129 : u_int32_t sg_addr_low;
130 : u_int16_t sg_addr_high;
131 : u_int16_t sg_length;
132 : } __packed;
133 :
134 : #define VIC_SG_MAX 6
135 : #define VIC_SG_ADDR_MACH 0
136 : #define VIC_SG_ADDR_PHYS 1
137 : #define VIC_SG_ADDR_VIRT 3
138 :
139 : struct vic_sgarray {
140 : u_int16_t sa_addr_type;
141 : u_int16_t sa_length;
142 : struct vic_sg sa_sg[VIC_SG_MAX];
143 : } __packed;
144 :
145 : struct vic_rxdesc {
146 : u_int64_t rx_physaddr;
147 : u_int32_t rx_buflength;
148 : u_int32_t rx_length;
149 : u_int16_t rx_owner;
150 : u_int16_t rx_flags;
151 : u_int32_t rx_priv;
152 : } __packed;
153 :
154 : #define VIC_RX_FLAGS_CSUMHW_OK 0x0001
155 :
156 : struct vic_txdesc {
157 : u_int16_t tx_flags;
158 : u_int16_t tx_owner;
159 : u_int32_t tx_priv;
160 : u_int32_t tx_tsomss;
161 : struct vic_sgarray tx_sa;
162 : } __packed;
163 :
164 : #define VIC_TX_FLAGS_KEEP 0x0001
165 : #define VIC_TX_FLAGS_TXURN 0x0002
166 : #define VIC_TX_FLAGS_CSUMHW 0x0004
167 : #define VIC_TX_FLAGS_TSO 0x0008
168 : #define VIC_TX_FLAGS_PINNED 0x0010
169 : #define VIC_TX_FLAGS_QRETRY 0x1000
170 :
171 : struct vic_stats {
172 : u_int32_t vs_tx_count;
173 : u_int32_t vs_tx_packets;
174 : u_int32_t vs_tx_0copy;
175 : u_int32_t vs_tx_copy;
176 : u_int32_t vs_tx_maxpending;
177 : u_int32_t vs_tx_stopped;
178 : u_int32_t vs_tx_overrun;
179 : u_int32_t vs_intr;
180 : u_int32_t vs_rx_packets;
181 : u_int32_t vs_rx_underrun;
182 : } __packed;
183 :
184 : #define VIC_NRXRINGS 2
185 :
186 : struct vic_data {
187 : u_int32_t vd_magic;
188 :
189 : struct {
190 : u_int32_t length;
191 : u_int32_t nextidx;
192 : } vd_rx[VIC_NRXRINGS];
193 :
194 : u_int32_t vd_irq;
195 : u_int32_t vd_iff;
196 :
197 : u_int32_t vd_mcastfil[VIC_CMD_MCASTFIL_LENGTH];
198 :
199 : u_int32_t vd_reserved1[1];
200 :
201 : u_int32_t vd_tx_length;
202 : u_int32_t vd_tx_curidx;
203 : u_int32_t vd_tx_nextidx;
204 : u_int32_t vd_tx_stopped;
205 : u_int32_t vd_tx_triggerlvl;
206 : u_int32_t vd_tx_queued;
207 : u_int32_t vd_tx_minlength;
208 :
209 : u_int32_t vd_reserved2[6];
210 :
211 : u_int32_t vd_rx_saved_nextidx[VIC_NRXRINGS];
212 : u_int32_t vd_tx_saved_nextidx;
213 :
214 : u_int32_t vd_length;
215 : u_int32_t vd_rx_offset[VIC_NRXRINGS];
216 : u_int32_t vd_tx_offset;
217 : u_int32_t vd_debug;
218 : u_int32_t vd_tx_physaddr;
219 : u_int32_t vd_tx_physaddr_length;
220 : u_int32_t vd_tx_maxlength;
221 :
222 : struct vic_stats vd_stats;
223 : } __packed;
224 :
225 : #define VIC_OWNER_DRIVER 0
226 : #define VIC_OWNER_DRIVER_PEND 1
227 : #define VIC_OWNER_NIC 2
228 : #define VIC_OWNER_NIC_PEND 3
229 :
230 : #define VIC_JUMBO_FRAMELEN 9018
231 : #define VIC_JUMBO_MTU (VIC_JUMBO_FRAMELEN - ETHER_HDR_LEN - ETHER_CRC_LEN)
232 :
233 : #define VIC_NBUF 100
234 : #define VIC_NBUF_MAX 128
235 : #define VIC_MAX_SCATTER 1 /* 8? */
236 : #define VIC_QUEUE_SIZE VIC_NBUF_MAX
237 : #define VIC_INC(_x, _y) (_x) = ((_x) + 1) % (_y)
238 : #define VIC_TX_TIMEOUT 5
239 :
240 : #define VIC_MIN_FRAMELEN (ETHER_MIN_LEN - ETHER_CRC_LEN)
241 :
242 : #define VIC_TXURN_WARN(_sc) ((_sc)->sc_txpending >= ((_sc)->sc_ntxbuf - 5))
243 : #define VIC_TXURN(_sc) ((_sc)->sc_txpending >= (_sc)->sc_ntxbuf)
244 :
245 : struct vic_rxbuf {
246 : bus_dmamap_t rxb_dmamap;
247 : struct mbuf *rxb_m;
248 : };
249 :
250 : struct vic_txbuf {
251 : bus_dmamap_t txb_dmamap;
252 : struct mbuf *txb_m;
253 : };
254 :
255 : struct vic_softc {
256 : struct device sc_dev;
257 :
258 : pci_chipset_tag_t sc_pc;
259 : pcitag_t sc_tag;
260 :
261 : bus_space_tag_t sc_iot;
262 : bus_space_handle_t sc_ioh;
263 : bus_size_t sc_ios;
264 : bus_dma_tag_t sc_dmat;
265 :
266 : void *sc_ih;
267 :
268 : struct timeout sc_tick;
269 :
270 : struct arpcom sc_ac;
271 : struct ifmedia sc_media;
272 :
273 : u_int32_t sc_nrxbuf;
274 : u_int32_t sc_ntxbuf;
275 : u_int32_t sc_cap;
276 : u_int32_t sc_feature;
277 : u_int8_t sc_lladdr[ETHER_ADDR_LEN];
278 :
279 : bus_dmamap_t sc_dma_map;
280 : bus_dma_segment_t sc_dma_seg;
281 : size_t sc_dma_size;
282 : caddr_t sc_dma_kva;
283 : #define VIC_DMA_DVA(_sc) ((_sc)->sc_dma_map->dm_segs[0].ds_addr)
284 : #define VIC_DMA_KVA(_sc) ((void *)(_sc)->sc_dma_kva)
285 :
286 : struct vic_data *sc_data;
287 :
288 : struct {
289 : struct if_rxring ring;
290 : struct vic_rxbuf *bufs;
291 : struct vic_rxdesc *slots;
292 : int end;
293 : u_int pktlen;
294 : } sc_rxq[VIC_NRXRINGS];
295 :
296 : struct vic_txbuf *sc_txbuf;
297 : struct vic_txdesc *sc_txq;
298 : volatile u_int sc_txpending;
299 : };
300 :
301 : struct cfdriver vic_cd = {
302 : NULL, "vic", DV_IFNET
303 : };
304 :
305 : int vic_match(struct device *, void *, void *);
306 : void vic_attach(struct device *, struct device *, void *);
307 :
308 : struct cfattach vic_ca = {
309 : sizeof(struct vic_softc), vic_match, vic_attach
310 : };
311 :
312 : int vic_intr(void *);
313 :
314 : int vic_query(struct vic_softc *);
315 : int vic_alloc_data(struct vic_softc *);
316 : int vic_init_data(struct vic_softc *sc);
317 : int vic_uninit_data(struct vic_softc *sc);
318 :
319 : u_int32_t vic_read(struct vic_softc *, bus_size_t);
320 : void vic_write(struct vic_softc *, bus_size_t, u_int32_t);
321 :
322 : u_int32_t vic_read_cmd(struct vic_softc *, u_int32_t);
323 :
324 : int vic_alloc_dmamem(struct vic_softc *);
325 : void vic_free_dmamem(struct vic_softc *);
326 :
327 : void vic_link_state(struct vic_softc *);
328 : void vic_rx_fill(struct vic_softc *, int);
329 : void vic_rx_proc(struct vic_softc *, int);
330 : void vic_tx_proc(struct vic_softc *);
331 : void vic_iff(struct vic_softc *);
332 : void vic_getlladdr(struct vic_softc *);
333 : void vic_setlladdr(struct vic_softc *);
334 : int vic_media_change(struct ifnet *);
335 : void vic_media_status(struct ifnet *, struct ifmediareq *);
336 : void vic_start(struct ifnet *);
337 : int vic_load_txb(struct vic_softc *, struct vic_txbuf *,
338 : struct mbuf *);
339 : void vic_watchdog(struct ifnet *);
340 : int vic_ioctl(struct ifnet *, u_long, caddr_t);
341 : int vic_rxrinfo(struct vic_softc *, struct if_rxrinfo *);
342 : void vic_init(struct ifnet *);
343 : void vic_stop(struct ifnet *);
344 : void vic_tick(void *);
345 :
346 : #define DEVNAME(_s) ((_s)->sc_dev.dv_xname)
347 :
348 : struct mbuf *vic_alloc_mbuf(struct vic_softc *, bus_dmamap_t, u_int);
349 :
350 : const struct pci_matchid vic_devices[] = {
351 : { PCI_VENDOR_VMWARE, PCI_PRODUCT_VMWARE_NET }
352 : };
353 :
354 : int
355 0 : vic_match(struct device *parent, void *match, void *aux)
356 : {
357 0 : struct pci_attach_args *pa = aux;
358 : pcireg_t memtype;
359 0 : bus_size_t pcisize;
360 0 : bus_addr_t pciaddr;
361 :
362 0 : switch (pa->pa_id) {
363 : case PCI_ID_CODE(PCI_VENDOR_VMWARE, PCI_PRODUCT_VMWARE_NET):
364 0 : return (1);
365 :
366 : case PCI_ID_CODE(PCI_VENDOR_AMD, PCI_PRODUCT_AMD_PCNET_PCI):
367 0 : memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, VIC_PCI_BAR);
368 0 : if (pci_mapreg_info(pa->pa_pc, pa->pa_tag, VIC_PCI_BAR,
369 0 : memtype, &pciaddr, &pcisize, NULL) != 0)
370 : break;
371 :
372 0 : if (pcisize > VIC_LANCE_MINLEN)
373 0 : return (2);
374 :
375 : break;
376 : }
377 :
378 0 : return (0);
379 0 : }
380 :
381 : void
382 0 : vic_attach(struct device *parent, struct device *self, void *aux)
383 : {
384 0 : struct vic_softc *sc = (struct vic_softc *)self;
385 0 : struct pci_attach_args *pa = aux;
386 0 : bus_space_handle_t ioh;
387 : pcireg_t r;
388 0 : pci_intr_handle_t ih;
389 : struct ifnet *ifp;
390 :
391 0 : sc->sc_pc = pa->pa_pc;
392 0 : sc->sc_tag = pa->pa_tag;
393 0 : sc->sc_dmat = pa->pa_dmat;
394 :
395 0 : r = pci_mapreg_type(sc->sc_pc, sc->sc_tag, VIC_PCI_BAR);
396 0 : if (pci_mapreg_map(pa, VIC_PCI_BAR, r, 0, &sc->sc_iot,
397 0 : &ioh, NULL, &sc->sc_ios, 0) != 0) {
398 0 : printf(": unable to map system interface register\n");
399 0 : return;
400 : }
401 :
402 0 : switch (pa->pa_id) {
403 : case PCI_ID_CODE(PCI_VENDOR_VMWARE, PCI_PRODUCT_VMWARE_NET):
404 0 : if (bus_space_subregion(sc->sc_iot, ioh, 0, sc->sc_ios,
405 0 : &sc->sc_ioh) != 0) {
406 0 : printf(": unable to map register window\n");
407 0 : goto unmap;
408 : }
409 : break;
410 :
411 : case PCI_ID_CODE(PCI_VENDOR_AMD, PCI_PRODUCT_AMD_PCNET_PCI):
412 0 : if (bus_space_subregion(sc->sc_iot, ioh,
413 : VIC_LANCE_SIZE + VIC_MORPH_SIZE, VIC_VMXNET_SIZE,
414 0 : &sc->sc_ioh) != 0) {
415 0 : printf(": unable to map register window\n");
416 0 : goto unmap;
417 : }
418 :
419 0 : bus_space_barrier(sc->sc_iot, ioh, VIC_LANCE_SIZE, 4,
420 : BUS_SPACE_BARRIER_READ);
421 0 : r = bus_space_read_4(sc->sc_iot, ioh, VIC_LANCE_SIZE);
422 :
423 0 : if ((r & VIC_MORPH_MASK) == VIC_MORPH_VMXNET)
424 : break;
425 0 : if ((r & VIC_MORPH_MASK) != VIC_MORPH_LANCE) {
426 0 : printf(": unexpect morph value (0x%08x)\n", r);
427 0 : goto unmap;
428 : }
429 :
430 0 : r &= ~VIC_MORPH_MASK;
431 0 : r |= VIC_MORPH_VMXNET;
432 :
433 0 : bus_space_write_4(sc->sc_iot, ioh, VIC_LANCE_SIZE, r);
434 0 : bus_space_barrier(sc->sc_iot, ioh, VIC_LANCE_SIZE, 4,
435 : BUS_SPACE_BARRIER_WRITE);
436 :
437 0 : bus_space_barrier(sc->sc_iot, ioh, VIC_LANCE_SIZE, 4,
438 : BUS_SPACE_BARRIER_READ);
439 0 : r = bus_space_read_4(sc->sc_iot, ioh, VIC_LANCE_SIZE);
440 :
441 0 : if ((r & VIC_MORPH_MASK) != VIC_MORPH_VMXNET) {
442 0 : printf(": unable to morph vlance chip\n");
443 0 : goto unmap;
444 : }
445 :
446 : break;
447 : }
448 :
449 0 : if (pci_intr_map(pa, &ih) != 0) {
450 0 : printf(": unable to map interrupt\n");
451 0 : goto unmap;
452 : }
453 :
454 0 : sc->sc_ih = pci_intr_establish(pa->pa_pc, ih, IPL_NET,
455 0 : vic_intr, sc, DEVNAME(sc));
456 0 : if (sc->sc_ih == NULL) {
457 0 : printf(": unable to establish interrupt\n");
458 0 : goto unmap;
459 : }
460 :
461 0 : if (vic_query(sc) != 0) {
462 : /* error printed by vic_query */
463 : goto unmap;
464 : }
465 :
466 0 : if (vic_alloc_data(sc) != 0) {
467 : /* error printed by vic_alloc */
468 : goto unmap;
469 : }
470 :
471 0 : timeout_set(&sc->sc_tick, vic_tick, sc);
472 :
473 0 : bcopy(sc->sc_lladdr, sc->sc_ac.ac_enaddr, ETHER_ADDR_LEN);
474 :
475 0 : ifp = &sc->sc_ac.ac_if;
476 0 : ifp->if_softc = sc;
477 0 : ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
478 0 : ifp->if_ioctl = vic_ioctl;
479 0 : ifp->if_start = vic_start;
480 0 : ifp->if_watchdog = vic_watchdog;
481 0 : ifp->if_hardmtu = VIC_JUMBO_MTU;
482 0 : strlcpy(ifp->if_xname, DEVNAME(sc), IFNAMSIZ);
483 0 : IFQ_SET_MAXLEN(&ifp->if_snd, sc->sc_ntxbuf - 1);
484 :
485 0 : ifp->if_capabilities = IFCAP_VLAN_MTU;
486 :
487 : #if 0
488 : /* XXX interface capabilities */
489 : if (sc->sc_cap & VIC_CMD_HWCAP_VLAN)
490 : ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING;
491 : if (sc->sc_cap & VIC_CMD_HWCAP_CSUM)
492 : ifp->if_capabilities |= IFCAP_CSUM_IPv4 | IFCAP_CSUM_TCPv4 |
493 : IFCAP_CSUM_UDPv4;
494 : #endif
495 :
496 0 : ifmedia_init(&sc->sc_media, 0, vic_media_change, vic_media_status);
497 0 : ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_AUTO, 0, NULL);
498 0 : ifmedia_set(&sc->sc_media, IFM_ETHER | IFM_AUTO);
499 :
500 0 : if_attach(ifp);
501 0 : ether_ifattach(ifp);
502 :
503 0 : printf(": %s, address %s\n", pci_intr_string(pa->pa_pc, ih),
504 0 : ether_sprintf(sc->sc_lladdr));
505 :
506 : #ifdef VIC_DEBUG
507 : printf("%s: feature 0x%8x, cap 0x%8x, rx/txbuf %d/%d\n", DEVNAME(sc),
508 : sc->sc_feature, sc->sc_cap, sc->sc_nrxbuf, sc->sc_ntxbuf);
509 : #endif
510 :
511 0 : return;
512 :
513 : unmap:
514 0 : bus_space_unmap(sc->sc_iot, ioh, sc->sc_ios);
515 0 : sc->sc_ios = 0;
516 0 : }
517 :
518 : int
519 0 : vic_query(struct vic_softc *sc)
520 : {
521 : u_int32_t major, minor;
522 :
523 0 : major = vic_read(sc, VIC_VERSION_MAJOR);
524 0 : minor = vic_read(sc, VIC_VERSION_MINOR);
525 :
526 : /* Check for a supported version */
527 0 : if ((major & VIC_VERSION_MAJOR_M) !=
528 : (VIC_MAGIC & VIC_VERSION_MAJOR_M)) {
529 0 : printf(": magic mismatch\n");
530 0 : return (1);
531 : }
532 :
533 0 : if (VIC_MAGIC > major || VIC_MAGIC < minor) {
534 0 : printf(": unsupported version (%X)\n",
535 0 : major & ~VIC_VERSION_MAJOR_M);
536 0 : return (1);
537 : }
538 :
539 0 : sc->sc_nrxbuf = vic_read_cmd(sc, VIC_CMD_NUM_Rx_BUF);
540 0 : sc->sc_ntxbuf = vic_read_cmd(sc, VIC_CMD_NUM_Tx_BUF);
541 0 : sc->sc_feature = vic_read_cmd(sc, VIC_CMD_FEATURE);
542 0 : sc->sc_cap = vic_read_cmd(sc, VIC_CMD_HWCAP);
543 :
544 0 : vic_getlladdr(sc);
545 :
546 0 : if (sc->sc_nrxbuf > VIC_NBUF_MAX || sc->sc_nrxbuf == 0)
547 0 : sc->sc_nrxbuf = VIC_NBUF;
548 0 : if (sc->sc_ntxbuf > VIC_NBUF_MAX || sc->sc_ntxbuf == 0)
549 0 : sc->sc_ntxbuf = VIC_NBUF;
550 :
551 0 : return (0);
552 0 : }
553 :
554 : int
555 0 : vic_alloc_data(struct vic_softc *sc)
556 : {
557 : u_int8_t *kva;
558 : u_int offset;
559 : struct vic_rxdesc *rxd;
560 : int i, q;
561 :
562 0 : sc->sc_rxq[0].pktlen = MCLBYTES;
563 0 : sc->sc_rxq[1].pktlen = 4096;
564 :
565 0 : for (q = 0; q < VIC_NRXRINGS; q++) {
566 0 : sc->sc_rxq[q].bufs = mallocarray(sc->sc_nrxbuf,
567 : sizeof(struct vic_rxbuf), M_DEVBUF, M_NOWAIT | M_ZERO);
568 0 : if (sc->sc_rxq[q].bufs == NULL) {
569 0 : printf(": unable to allocate rxbuf for ring %d\n", q);
570 0 : goto freerx;
571 : }
572 : }
573 :
574 0 : sc->sc_txbuf = mallocarray(sc->sc_ntxbuf, sizeof(struct vic_txbuf),
575 : M_DEVBUF, M_NOWAIT);
576 0 : if (sc->sc_txbuf == NULL) {
577 0 : printf(": unable to allocate txbuf\n");
578 0 : goto freerx;
579 : }
580 :
581 0 : sc->sc_dma_size = sizeof(struct vic_data) +
582 0 : (sc->sc_nrxbuf * VIC_NRXRINGS) * sizeof(struct vic_rxdesc) +
583 0 : sc->sc_ntxbuf * sizeof(struct vic_txdesc);
584 :
585 0 : if (vic_alloc_dmamem(sc) != 0) {
586 0 : printf(": unable to allocate dma region\n");
587 : goto freetx;
588 : }
589 0 : kva = VIC_DMA_KVA(sc);
590 :
591 : /* set up basic vic data */
592 0 : sc->sc_data = VIC_DMA_KVA(sc);
593 :
594 0 : sc->sc_data->vd_magic = VIC_MAGIC;
595 0 : sc->sc_data->vd_length = sc->sc_dma_size;
596 :
597 : offset = sizeof(struct vic_data);
598 :
599 : /* set up the rx rings */
600 :
601 0 : for (q = 0; q < VIC_NRXRINGS; q++) {
602 0 : sc->sc_rxq[q].slots = (struct vic_rxdesc *)&kva[offset];
603 0 : sc->sc_data->vd_rx_offset[q] = offset;
604 0 : sc->sc_data->vd_rx[q].length = sc->sc_nrxbuf;
605 :
606 0 : for (i = 0; i < sc->sc_nrxbuf; i++) {
607 0 : rxd = &sc->sc_rxq[q].slots[i];
608 :
609 0 : rxd->rx_physaddr = 0;
610 0 : rxd->rx_buflength = 0;
611 0 : rxd->rx_length = 0;
612 0 : rxd->rx_owner = VIC_OWNER_DRIVER;
613 :
614 0 : offset += sizeof(struct vic_rxdesc);
615 : }
616 : }
617 :
618 : /* set up the tx ring */
619 0 : sc->sc_txq = (struct vic_txdesc *)&kva[offset];
620 :
621 0 : sc->sc_data->vd_tx_offset = offset;
622 0 : sc->sc_data->vd_tx_length = sc->sc_ntxbuf;
623 :
624 0 : return (0);
625 : freetx:
626 0 : free(sc->sc_txbuf, M_DEVBUF, 0);
627 0 : q = VIC_NRXRINGS;
628 : freerx:
629 0 : while (q--)
630 0 : free(sc->sc_rxq[q].bufs, M_DEVBUF, 0);
631 :
632 0 : return (1);
633 0 : }
634 :
635 : void
636 0 : vic_rx_fill(struct vic_softc *sc, int q)
637 : {
638 : struct vic_rxbuf *rxb;
639 : struct vic_rxdesc *rxd;
640 : u_int slots;
641 :
642 0 : for (slots = if_rxr_get(&sc->sc_rxq[q].ring, sc->sc_nrxbuf);
643 0 : slots > 0; slots--) {
644 0 : rxb = &sc->sc_rxq[q].bufs[sc->sc_rxq[q].end];
645 0 : rxd = &sc->sc_rxq[q].slots[sc->sc_rxq[q].end];
646 :
647 0 : rxb->rxb_m = vic_alloc_mbuf(sc, rxb->rxb_dmamap,
648 0 : sc->sc_rxq[q].pktlen);
649 0 : if (rxb->rxb_m == NULL)
650 : break;
651 :
652 0 : bus_dmamap_sync(sc->sc_dmat, rxb->rxb_dmamap, 0,
653 : rxb->rxb_m->m_pkthdr.len, BUS_DMASYNC_PREREAD);
654 :
655 0 : rxd->rx_physaddr = rxb->rxb_dmamap->dm_segs[0].ds_addr;
656 0 : rxd->rx_buflength = rxb->rxb_m->m_pkthdr.len;
657 0 : rxd->rx_length = 0;
658 0 : rxd->rx_owner = VIC_OWNER_NIC;
659 :
660 0 : VIC_INC(sc->sc_rxq[q].end, sc->sc_data->vd_rx[q].length);
661 : }
662 0 : if_rxr_put(&sc->sc_rxq[q].ring, slots);
663 0 : }
664 :
665 : int
666 0 : vic_init_data(struct vic_softc *sc)
667 : {
668 : struct vic_rxbuf *rxb;
669 : struct vic_rxdesc *rxd;
670 : struct vic_txbuf *txb;
671 :
672 : int q, i;
673 :
674 0 : for (q = 0; q < VIC_NRXRINGS; q++) {
675 0 : for (i = 0; i < sc->sc_nrxbuf; i++) {
676 0 : rxb = &sc->sc_rxq[q].bufs[i];
677 0 : rxd = &sc->sc_rxq[q].slots[i];
678 :
679 0 : if (bus_dmamap_create(sc->sc_dmat,
680 : sc->sc_rxq[q].pktlen, 1, sc->sc_rxq[q].pktlen, 0,
681 0 : BUS_DMA_NOWAIT, &rxb->rxb_dmamap) != 0) {
682 0 : printf("%s: unable to create dmamap for "
683 0 : "ring %d slot %d\n", DEVNAME(sc), q, i);
684 0 : goto freerxbs;
685 : }
686 :
687 : /* scrub the ring */
688 0 : rxd->rx_physaddr = 0;
689 0 : rxd->rx_buflength = 0;
690 0 : rxd->rx_length = 0;
691 0 : rxd->rx_owner = VIC_OWNER_DRIVER;
692 : }
693 0 : sc->sc_rxq[q].end = 0;
694 :
695 0 : if_rxr_init(&sc->sc_rxq[q].ring, 2, sc->sc_nrxbuf - 1);
696 0 : vic_rx_fill(sc, q);
697 : }
698 :
699 0 : for (i = 0; i < sc->sc_ntxbuf; i++) {
700 0 : txb = &sc->sc_txbuf[i];
701 0 : if (bus_dmamap_create(sc->sc_dmat, VIC_JUMBO_FRAMELEN,
702 : (sc->sc_cap & VIC_CMD_HWCAP_SG) ? VIC_SG_MAX : 1,
703 : VIC_JUMBO_FRAMELEN, 0, BUS_DMA_NOWAIT,
704 0 : &txb->txb_dmamap) != 0) {
705 0 : printf("%s: unable to create dmamap for tx %d\n",
706 0 : DEVNAME(sc), i);
707 : goto freetxbs;
708 : }
709 0 : txb->txb_m = NULL;
710 : }
711 :
712 0 : return (0);
713 :
714 : freetxbs:
715 0 : while (i--) {
716 0 : txb = &sc->sc_txbuf[i];
717 0 : bus_dmamap_destroy(sc->sc_dmat, txb->txb_dmamap);
718 : }
719 :
720 0 : i = sc->sc_nrxbuf;
721 0 : q = VIC_NRXRINGS - 1;
722 : freerxbs:
723 0 : while (q >= 0) {
724 0 : while (i--) {
725 0 : rxb = &sc->sc_rxq[q].bufs[i];
726 :
727 0 : if (rxb->rxb_m != NULL) {
728 0 : bus_dmamap_sync(sc->sc_dmat, rxb->rxb_dmamap,
729 : 0, rxb->rxb_m->m_pkthdr.len,
730 : BUS_DMASYNC_POSTREAD);
731 0 : bus_dmamap_unload(sc->sc_dmat, rxb->rxb_dmamap);
732 0 : m_freem(rxb->rxb_m);
733 0 : rxb->rxb_m = NULL;
734 0 : }
735 0 : bus_dmamap_destroy(sc->sc_dmat, rxb->rxb_dmamap);
736 : }
737 0 : q--;
738 : }
739 :
740 0 : return (1);
741 0 : }
742 :
743 : int
744 0 : vic_uninit_data(struct vic_softc *sc)
745 : {
746 : struct vic_rxbuf *rxb;
747 : struct vic_rxdesc *rxd;
748 : struct vic_txbuf *txb;
749 :
750 : int i, q;
751 :
752 0 : for (q = 0; q < VIC_NRXRINGS; q++) {
753 0 : for (i = 0; i < sc->sc_nrxbuf; i++) {
754 0 : rxb = &sc->sc_rxq[q].bufs[i];
755 0 : rxd = &sc->sc_rxq[q].slots[i];
756 :
757 0 : if (rxb->rxb_m != NULL) {
758 0 : bus_dmamap_sync(sc->sc_dmat, rxb->rxb_dmamap,
759 : 0, rxb->rxb_m->m_pkthdr.len,
760 : BUS_DMASYNC_POSTREAD);
761 0 : bus_dmamap_unload(sc->sc_dmat, rxb->rxb_dmamap);
762 0 : m_freem(rxb->rxb_m);
763 0 : rxb->rxb_m = NULL;
764 0 : }
765 0 : bus_dmamap_destroy(sc->sc_dmat, rxb->rxb_dmamap);
766 : }
767 : }
768 :
769 0 : for (i = 0; i < sc->sc_ntxbuf; i++) {
770 0 : txb = &sc->sc_txbuf[i];
771 0 : bus_dmamap_destroy(sc->sc_dmat, txb->txb_dmamap);
772 : }
773 :
774 0 : return (0);
775 : }
776 :
777 : void
778 0 : vic_link_state(struct vic_softc *sc)
779 : {
780 0 : struct ifnet *ifp = &sc->sc_ac.ac_if;
781 : u_int32_t status;
782 : int link_state = LINK_STATE_DOWN;
783 :
784 0 : status = vic_read(sc, VIC_STATUS);
785 0 : if (status & VIC_STATUS_CONNECTED)
786 0 : link_state = LINK_STATE_FULL_DUPLEX;
787 0 : if (ifp->if_link_state != link_state) {
788 0 : ifp->if_link_state = link_state;
789 0 : if_link_state_change(ifp);
790 0 : }
791 0 : }
792 :
793 : int
794 0 : vic_intr(void *arg)
795 : {
796 0 : struct vic_softc *sc = (struct vic_softc *)arg;
797 : int q;
798 :
799 0 : for (q = 0; q < VIC_NRXRINGS; q++)
800 0 : vic_rx_proc(sc, q);
801 0 : vic_tx_proc(sc);
802 :
803 0 : vic_write(sc, VIC_CMD, VIC_CMD_INTR_ACK);
804 :
805 0 : return (-1);
806 : }
807 :
808 : void
809 0 : vic_rx_proc(struct vic_softc *sc, int q)
810 : {
811 0 : struct ifnet *ifp = &sc->sc_ac.ac_if;
812 : struct vic_rxdesc *rxd;
813 : struct vic_rxbuf *rxb;
814 0 : struct mbuf_list ml = MBUF_LIST_INITIALIZER();
815 : struct mbuf *m;
816 : int len, idx;
817 :
818 0 : if ((ifp->if_flags & IFF_RUNNING) == 0)
819 0 : return;
820 :
821 0 : bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_map, 0, sc->sc_dma_size,
822 : BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
823 :
824 0 : while (if_rxr_inuse(&sc->sc_rxq[q].ring) > 0) {
825 0 : idx = sc->sc_data->vd_rx[q].nextidx;
826 0 : if (idx >= sc->sc_data->vd_rx[q].length) {
827 0 : ifp->if_ierrors++;
828 0 : if (ifp->if_flags & IFF_DEBUG)
829 0 : printf("%s: receive index error\n",
830 0 : sc->sc_dev.dv_xname);
831 : break;
832 : }
833 :
834 0 : rxd = &sc->sc_rxq[q].slots[idx];
835 0 : if (rxd->rx_owner != VIC_OWNER_DRIVER)
836 : break;
837 :
838 0 : rxb = &sc->sc_rxq[q].bufs[idx];
839 :
840 0 : if (rxb->rxb_m == NULL) {
841 0 : ifp->if_ierrors++;
842 0 : printf("%s: rxb %d has no mbuf\n", DEVNAME(sc), idx);
843 0 : break;
844 : }
845 :
846 0 : bus_dmamap_sync(sc->sc_dmat, rxb->rxb_dmamap, 0,
847 : rxb->rxb_m->m_pkthdr.len, BUS_DMASYNC_POSTREAD);
848 0 : bus_dmamap_unload(sc->sc_dmat, rxb->rxb_dmamap);
849 :
850 0 : m = rxb->rxb_m;
851 0 : rxb->rxb_m = NULL;
852 0 : len = rxd->rx_length;
853 :
854 0 : if (len < VIC_MIN_FRAMELEN) {
855 0 : m_freem(m);
856 :
857 0 : ifp->if_iqdrops++;
858 0 : goto nextp;
859 : }
860 :
861 0 : m->m_pkthdr.len = m->m_len = len;
862 :
863 0 : ml_enqueue(&ml, m);
864 :
865 : nextp:
866 0 : if_rxr_put(&sc->sc_rxq[q].ring, 1);
867 0 : VIC_INC(sc->sc_data->vd_rx[q].nextidx, sc->sc_nrxbuf);
868 : }
869 :
870 0 : if_input(ifp, &ml);
871 0 : vic_rx_fill(sc, q);
872 :
873 0 : bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_map, 0, sc->sc_dma_size,
874 : BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
875 0 : }
876 :
877 : void
878 0 : vic_tx_proc(struct vic_softc *sc)
879 : {
880 0 : struct ifnet *ifp = &sc->sc_ac.ac_if;
881 : struct vic_txdesc *txd;
882 : struct vic_txbuf *txb;
883 : int idx;
884 :
885 0 : bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_map, 0, sc->sc_dma_size,
886 : BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
887 :
888 0 : while (sc->sc_txpending > 0) {
889 0 : idx = sc->sc_data->vd_tx_curidx;
890 0 : if (idx >= sc->sc_data->vd_tx_length) {
891 0 : ifp->if_oerrors++;
892 0 : break;
893 : }
894 :
895 0 : txd = &sc->sc_txq[idx];
896 0 : if (txd->tx_owner != VIC_OWNER_DRIVER)
897 : break;
898 :
899 0 : txb = &sc->sc_txbuf[idx];
900 0 : if (txb->txb_m == NULL) {
901 0 : printf("%s: tx ring is corrupt\n", DEVNAME(sc));
902 0 : ifp->if_oerrors++;
903 0 : break;
904 : }
905 :
906 0 : bus_dmamap_sync(sc->sc_dmat, txb->txb_dmamap, 0,
907 : txb->txb_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
908 0 : bus_dmamap_unload(sc->sc_dmat, txb->txb_dmamap);
909 :
910 0 : m_freem(txb->txb_m);
911 0 : txb->txb_m = NULL;
912 0 : ifq_clr_oactive(&ifp->if_snd);
913 :
914 0 : sc->sc_txpending--;
915 0 : sc->sc_data->vd_tx_stopped = 0;
916 :
917 0 : VIC_INC(sc->sc_data->vd_tx_curidx, sc->sc_data->vd_tx_length);
918 : }
919 :
920 0 : bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_map, 0, sc->sc_dma_size,
921 : BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
922 :
923 0 : vic_start(ifp);
924 0 : }
925 :
926 : void
927 0 : vic_iff(struct vic_softc *sc)
928 : {
929 0 : struct arpcom *ac = &sc->sc_ac;
930 0 : struct ifnet *ifp = &sc->sc_ac.ac_if;
931 : struct ether_multi *enm;
932 : struct ether_multistep step;
933 : u_int32_t crc;
934 0 : u_int16_t *mcastfil = (u_int16_t *)sc->sc_data->vd_mcastfil;
935 : u_int flags;
936 :
937 0 : ifp->if_flags &= ~IFF_ALLMULTI;
938 :
939 : /* Always accept broadcast frames. */
940 : flags = VIC_CMD_IFF_BROADCAST;
941 :
942 0 : if (ifp->if_flags & IFF_PROMISC || ac->ac_multirangecnt > 0) {
943 0 : ifp->if_flags |= IFF_ALLMULTI;
944 0 : if (ifp->if_flags & IFF_PROMISC)
945 0 : flags |= VIC_CMD_IFF_PROMISC;
946 : else
947 : flags |= VIC_CMD_IFF_MULTICAST;
948 0 : memset(&sc->sc_data->vd_mcastfil, 0xff,
949 : sizeof(sc->sc_data->vd_mcastfil));
950 0 : } else {
951 : flags |= VIC_CMD_IFF_MULTICAST;
952 :
953 0 : bzero(&sc->sc_data->vd_mcastfil,
954 : sizeof(sc->sc_data->vd_mcastfil));
955 :
956 0 : ETHER_FIRST_MULTI(step, ac, enm);
957 0 : while (enm != NULL) {
958 0 : crc = ether_crc32_le(enm->enm_addrlo, ETHER_ADDR_LEN);
959 :
960 0 : crc >>= 26;
961 :
962 0 : mcastfil[crc >> 4] |= htole16(1 << (crc & 0xf));
963 :
964 0 : ETHER_NEXT_MULTI(step, enm);
965 : }
966 : }
967 :
968 0 : vic_write(sc, VIC_CMD, VIC_CMD_MCASTFIL);
969 0 : sc->sc_data->vd_iff = flags;
970 0 : vic_write(sc, VIC_CMD, VIC_CMD_IFF);
971 0 : }
972 :
973 : void
974 0 : vic_getlladdr(struct vic_softc *sc)
975 : {
976 : u_int32_t reg;
977 :
978 : /* Get MAC address */
979 0 : reg = (sc->sc_cap & VIC_CMD_HWCAP_VPROM) ? VIC_VPROM : VIC_LLADDR;
980 :
981 0 : bus_space_barrier(sc->sc_iot, sc->sc_ioh, reg, ETHER_ADDR_LEN,
982 : BUS_SPACE_BARRIER_READ);
983 0 : bus_space_read_region_1(sc->sc_iot, sc->sc_ioh, reg, sc->sc_lladdr,
984 : ETHER_ADDR_LEN);
985 :
986 : /* Update the MAC address register */
987 0 : if (reg == VIC_VPROM)
988 0 : vic_setlladdr(sc);
989 0 : }
990 :
991 : void
992 0 : vic_setlladdr(struct vic_softc *sc)
993 : {
994 0 : bus_space_write_region_1(sc->sc_iot, sc->sc_ioh, VIC_LLADDR,
995 : sc->sc_lladdr, ETHER_ADDR_LEN);
996 0 : bus_space_barrier(sc->sc_iot, sc->sc_ioh, VIC_LLADDR, ETHER_ADDR_LEN,
997 : BUS_SPACE_BARRIER_WRITE);
998 0 : }
999 :
1000 : int
1001 0 : vic_media_change(struct ifnet *ifp)
1002 : {
1003 : /* Ignore */
1004 0 : return (0);
1005 : }
1006 :
1007 : void
1008 0 : vic_media_status(struct ifnet *ifp, struct ifmediareq *imr)
1009 : {
1010 0 : struct vic_softc *sc = (struct vic_softc *)ifp->if_softc;
1011 :
1012 0 : imr->ifm_active = IFM_ETHER | IFM_AUTO;
1013 0 : imr->ifm_status = IFM_AVALID;
1014 :
1015 0 : vic_link_state(sc);
1016 :
1017 0 : if (LINK_STATE_IS_UP(ifp->if_link_state) &&
1018 0 : ifp->if_flags & IFF_UP)
1019 0 : imr->ifm_status |= IFM_ACTIVE;
1020 0 : }
1021 :
1022 : void
1023 0 : vic_start(struct ifnet *ifp)
1024 : {
1025 : struct vic_softc *sc;
1026 : struct mbuf *m;
1027 : struct vic_txbuf *txb;
1028 : struct vic_txdesc *txd;
1029 : struct vic_sg *sge;
1030 : bus_dmamap_t dmap;
1031 : int i, idx;
1032 : int tx = 0;
1033 :
1034 0 : if (!(ifp->if_flags & IFF_RUNNING))
1035 0 : return;
1036 :
1037 0 : if (ifq_is_oactive(&ifp->if_snd))
1038 0 : return;
1039 :
1040 0 : if (IFQ_IS_EMPTY(&ifp->if_snd))
1041 0 : return;
1042 :
1043 0 : sc = (struct vic_softc *)ifp->if_softc;
1044 :
1045 0 : bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_map, 0, sc->sc_dma_size,
1046 : BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1047 :
1048 0 : for (;;) {
1049 0 : if (VIC_TXURN(sc)) {
1050 0 : ifq_set_oactive(&ifp->if_snd);
1051 0 : break;
1052 : }
1053 :
1054 0 : idx = sc->sc_data->vd_tx_nextidx;
1055 0 : if (idx >= sc->sc_data->vd_tx_length) {
1056 0 : printf("%s: tx idx is corrupt\n", DEVNAME(sc));
1057 0 : ifp->if_oerrors++;
1058 0 : break;
1059 : }
1060 :
1061 0 : txd = &sc->sc_txq[idx];
1062 0 : txb = &sc->sc_txbuf[idx];
1063 :
1064 0 : if (txb->txb_m != NULL) {
1065 0 : printf("%s: tx ring is corrupt\n", DEVNAME(sc));
1066 0 : sc->sc_data->vd_tx_stopped = 1;
1067 0 : ifp->if_oerrors++;
1068 0 : break;
1069 : }
1070 :
1071 0 : m = ifq_dequeue(&ifp->if_snd);
1072 0 : if (m == NULL)
1073 : break;
1074 :
1075 0 : if (vic_load_txb(sc, txb, m) != 0) {
1076 0 : m_freem(m);
1077 0 : ifp->if_oerrors++;
1078 0 : continue;
1079 : }
1080 :
1081 : #if NBPFILTER > 0
1082 0 : if (ifp->if_bpf)
1083 0 : bpf_mtap(ifp->if_bpf, txb->txb_m, BPF_DIRECTION_OUT);
1084 : #endif
1085 :
1086 0 : dmap = txb->txb_dmamap;
1087 0 : txd->tx_flags = VIC_TX_FLAGS_KEEP;
1088 0 : txd->tx_owner = VIC_OWNER_NIC;
1089 0 : txd->tx_sa.sa_addr_type = VIC_SG_ADDR_PHYS;
1090 0 : txd->tx_sa.sa_length = dmap->dm_nsegs;
1091 0 : for (i = 0; i < dmap->dm_nsegs; i++) {
1092 0 : sge = &txd->tx_sa.sa_sg[i];
1093 0 : sge->sg_length = dmap->dm_segs[i].ds_len;
1094 0 : sge->sg_addr_low = dmap->dm_segs[i].ds_addr;
1095 : }
1096 :
1097 0 : if (VIC_TXURN_WARN(sc)) {
1098 0 : txd->tx_flags |= VIC_TX_FLAGS_TXURN;
1099 0 : }
1100 :
1101 0 : bus_dmamap_sync(sc->sc_dmat, dmap, 0, dmap->dm_mapsize,
1102 : BUS_DMASYNC_PREWRITE);
1103 :
1104 0 : sc->sc_txpending++;
1105 :
1106 0 : VIC_INC(sc->sc_data->vd_tx_nextidx, sc->sc_data->vd_tx_length);
1107 :
1108 : tx = 1;
1109 : }
1110 :
1111 0 : bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_map, 0, sc->sc_dma_size,
1112 : BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1113 :
1114 0 : if (tx)
1115 0 : vic_read(sc, VIC_Tx_ADDR);
1116 0 : }
1117 :
1118 : int
1119 0 : vic_load_txb(struct vic_softc *sc, struct vic_txbuf *txb, struct mbuf *m)
1120 : {
1121 0 : bus_dmamap_t dmap = txb->txb_dmamap;
1122 : int error;
1123 :
1124 0 : error = bus_dmamap_load_mbuf(sc->sc_dmat, dmap, m, BUS_DMA_NOWAIT);
1125 0 : switch (error) {
1126 : case 0:
1127 0 : txb->txb_m = m;
1128 0 : break;
1129 :
1130 : case EFBIG:
1131 0 : if (m_defrag(m, M_DONTWAIT) == 0 &&
1132 0 : bus_dmamap_load_mbuf(sc->sc_dmat, dmap, m,
1133 0 : BUS_DMA_NOWAIT) == 0) {
1134 0 : txb->txb_m = m;
1135 0 : break;
1136 : }
1137 :
1138 : /* FALLTHROUGH */
1139 : default:
1140 0 : return (ENOBUFS);
1141 : }
1142 :
1143 0 : return (0);
1144 0 : }
1145 :
1146 : void
1147 0 : vic_watchdog(struct ifnet *ifp)
1148 : {
1149 : #if 0
1150 : struct vic_softc *sc = (struct vic_softc *)ifp->if_softc;
1151 :
1152 : if (sc->sc_txpending && sc->sc_txtimeout > 0) {
1153 : if (--sc->sc_txtimeout == 0) {
1154 : printf("%s: device timeout\n", sc->sc_dev.dv_xname);
1155 : ifp->if_flags &= ~IFF_RUNNING;
1156 : vic_init(ifp);
1157 : ifp->if_oerrors++;
1158 : return;
1159 : }
1160 : }
1161 :
1162 : if (!IFQ_IS_EMPTY(&ifp->if_snd))
1163 : vic_start(ifp);
1164 : #endif
1165 0 : }
1166 :
1167 : int
1168 0 : vic_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1169 : {
1170 0 : struct vic_softc *sc = (struct vic_softc *)ifp->if_softc;
1171 0 : struct ifreq *ifr = (struct ifreq *)data;
1172 : int s, error = 0;
1173 :
1174 0 : s = splnet();
1175 :
1176 0 : switch (cmd) {
1177 : case SIOCSIFADDR:
1178 0 : ifp->if_flags |= IFF_UP;
1179 : /* FALLTHROUGH */
1180 : case SIOCSIFFLAGS:
1181 0 : if (ifp->if_flags & IFF_UP) {
1182 0 : if (ifp->if_flags & IFF_RUNNING)
1183 0 : error = ENETRESET;
1184 : else
1185 0 : vic_init(ifp);
1186 : } else {
1187 0 : if (ifp->if_flags & IFF_RUNNING)
1188 0 : vic_stop(ifp);
1189 : }
1190 : break;
1191 :
1192 : case SIOCGIFMEDIA:
1193 : case SIOCSIFMEDIA:
1194 0 : error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd);
1195 0 : break;
1196 :
1197 : case SIOCGIFRXR:
1198 0 : error = vic_rxrinfo(sc, (struct if_rxrinfo *)ifr->ifr_data);
1199 0 : break;
1200 :
1201 : default:
1202 0 : error = ether_ioctl(ifp, &sc->sc_ac, cmd, data);
1203 0 : }
1204 :
1205 0 : if (error == ENETRESET) {
1206 0 : if (ifp->if_flags & IFF_RUNNING)
1207 0 : vic_iff(sc);
1208 : error = 0;
1209 0 : }
1210 :
1211 0 : splx(s);
1212 0 : return (error);
1213 : }
1214 :
1215 : int
1216 0 : vic_rxrinfo(struct vic_softc *sc, struct if_rxrinfo *ifri)
1217 : {
1218 0 : struct if_rxring_info ifr[2];
1219 :
1220 0 : memset(ifr, 0, sizeof(ifr));
1221 :
1222 0 : ifr[0].ifr_size = MCLBYTES;
1223 0 : ifr[0].ifr_info = sc->sc_rxq[0].ring;
1224 :
1225 0 : ifr[1].ifr_size = 4096;
1226 0 : ifr[1].ifr_info = sc->sc_rxq[1].ring;
1227 :
1228 0 : return (if_rxr_info_ioctl(ifri, nitems(ifr), ifr));
1229 0 : }
1230 :
1231 : void
1232 0 : vic_init(struct ifnet *ifp)
1233 : {
1234 0 : struct vic_softc *sc = (struct vic_softc *)ifp->if_softc;
1235 : int q;
1236 : int s;
1237 :
1238 0 : sc->sc_data->vd_tx_curidx = 0;
1239 0 : sc->sc_data->vd_tx_nextidx = 0;
1240 0 : sc->sc_data->vd_tx_stopped = sc->sc_data->vd_tx_queued = 0;
1241 0 : sc->sc_data->vd_tx_saved_nextidx = 0;
1242 :
1243 0 : for (q = 0; q < VIC_NRXRINGS; q++) {
1244 0 : sc->sc_data->vd_rx[q].nextidx = 0;
1245 0 : sc->sc_data->vd_rx_saved_nextidx[q] = 0;
1246 : }
1247 :
1248 0 : if (vic_init_data(sc) != 0)
1249 0 : return;
1250 :
1251 0 : bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_map, 0, sc->sc_dma_size,
1252 : BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1253 :
1254 0 : s = splnet();
1255 :
1256 0 : vic_write(sc, VIC_DATA_ADDR, VIC_DMA_DVA(sc));
1257 0 : vic_write(sc, VIC_DATA_LENGTH, sc->sc_dma_size);
1258 :
1259 0 : ifp->if_flags |= IFF_RUNNING;
1260 0 : ifq_clr_oactive(&ifp->if_snd);
1261 :
1262 0 : vic_iff(sc);
1263 0 : vic_write(sc, VIC_CMD, VIC_CMD_INTR_ENABLE);
1264 :
1265 0 : splx(s);
1266 :
1267 0 : timeout_add_sec(&sc->sc_tick, 1);
1268 0 : }
1269 :
1270 : void
1271 0 : vic_stop(struct ifnet *ifp)
1272 : {
1273 0 : struct vic_softc *sc = (struct vic_softc *)ifp->if_softc;
1274 : int s;
1275 :
1276 0 : s = splnet();
1277 :
1278 0 : timeout_del(&sc->sc_tick);
1279 :
1280 0 : ifp->if_flags &= ~IFF_RUNNING;
1281 0 : ifq_clr_oactive(&ifp->if_snd);
1282 :
1283 0 : bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_map, 0, sc->sc_dma_size,
1284 : BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1285 :
1286 : /* XXX wait for tx to complete */
1287 0 : while (sc->sc_txpending > 0) {
1288 0 : splx(s);
1289 0 : delay(1000);
1290 0 : s = splnet();
1291 : }
1292 :
1293 0 : sc->sc_data->vd_tx_stopped = 1;
1294 :
1295 0 : vic_write(sc, VIC_CMD, VIC_CMD_INTR_DISABLE);
1296 :
1297 0 : sc->sc_data->vd_iff = 0;
1298 0 : vic_write(sc, VIC_CMD, VIC_CMD_IFF);
1299 :
1300 0 : vic_write(sc, VIC_DATA_ADDR, 0);
1301 :
1302 0 : vic_uninit_data(sc);
1303 :
1304 0 : splx(s);
1305 0 : }
1306 :
1307 : struct mbuf *
1308 0 : vic_alloc_mbuf(struct vic_softc *sc, bus_dmamap_t map, u_int pktlen)
1309 : {
1310 : struct mbuf *m = NULL;
1311 :
1312 0 : m = MCLGETI(NULL, M_DONTWAIT, NULL, pktlen);
1313 0 : if (!m)
1314 0 : return (NULL);
1315 0 : m->m_data += ETHER_ALIGN;
1316 0 : m->m_len = m->m_pkthdr.len = pktlen - ETHER_ALIGN;
1317 :
1318 0 : if (bus_dmamap_load_mbuf(sc->sc_dmat, map, m, BUS_DMA_NOWAIT) != 0) {
1319 0 : printf("%s: could not load mbuf DMA map\n", DEVNAME(sc));
1320 0 : m_freem(m);
1321 0 : return (NULL);
1322 : }
1323 :
1324 0 : return (m);
1325 0 : }
1326 :
1327 : void
1328 0 : vic_tick(void *arg)
1329 : {
1330 0 : struct vic_softc *sc = (struct vic_softc *)arg;
1331 :
1332 0 : vic_link_state(sc);
1333 :
1334 0 : timeout_add_sec(&sc->sc_tick, 1);
1335 0 : }
1336 :
1337 : u_int32_t
1338 0 : vic_read(struct vic_softc *sc, bus_size_t r)
1339 : {
1340 0 : bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, 4,
1341 : BUS_SPACE_BARRIER_READ);
1342 0 : return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, r));
1343 : }
1344 :
1345 : void
1346 0 : vic_write(struct vic_softc *sc, bus_size_t r, u_int32_t v)
1347 : {
1348 0 : bus_space_write_4(sc->sc_iot, sc->sc_ioh, r, v);
1349 0 : bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, 4,
1350 : BUS_SPACE_BARRIER_WRITE);
1351 0 : }
1352 :
1353 : u_int32_t
1354 0 : vic_read_cmd(struct vic_softc *sc, u_int32_t cmd)
1355 : {
1356 0 : vic_write(sc, VIC_CMD, cmd);
1357 0 : return (vic_read(sc, VIC_CMD));
1358 : }
1359 :
1360 : int
1361 0 : vic_alloc_dmamem(struct vic_softc *sc)
1362 : {
1363 0 : int nsegs;
1364 :
1365 0 : if (bus_dmamap_create(sc->sc_dmat, sc->sc_dma_size, 1,
1366 : sc->sc_dma_size, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
1367 0 : &sc->sc_dma_map) != 0)
1368 : goto err;
1369 :
1370 0 : if (bus_dmamem_alloc(sc->sc_dmat, sc->sc_dma_size, 16, 0,
1371 0 : &sc->sc_dma_seg, 1, &nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO) != 0)
1372 : goto destroy;
1373 :
1374 0 : if (bus_dmamem_map(sc->sc_dmat, &sc->sc_dma_seg, nsegs,
1375 0 : sc->sc_dma_size, &sc->sc_dma_kva, BUS_DMA_NOWAIT) != 0)
1376 : goto free;
1377 :
1378 0 : if (bus_dmamap_load(sc->sc_dmat, sc->sc_dma_map, sc->sc_dma_kva,
1379 0 : sc->sc_dma_size, NULL, BUS_DMA_NOWAIT) != 0)
1380 : goto unmap;
1381 :
1382 0 : return (0);
1383 :
1384 : unmap:
1385 0 : bus_dmamem_unmap(sc->sc_dmat, sc->sc_dma_kva, sc->sc_dma_size);
1386 : free:
1387 0 : bus_dmamem_free(sc->sc_dmat, &sc->sc_dma_seg, 1);
1388 : destroy:
1389 0 : bus_dmamap_destroy(sc->sc_dmat, sc->sc_dma_map);
1390 : err:
1391 0 : return (1);
1392 0 : }
1393 :
1394 : void
1395 0 : vic_free_dmamem(struct vic_softc *sc)
1396 : {
1397 0 : bus_dmamap_unload(sc->sc_dmat, sc->sc_dma_map);
1398 0 : bus_dmamem_unmap(sc->sc_dmat, sc->sc_dma_kva, sc->sc_dma_size);
1399 0 : bus_dmamem_free(sc->sc_dmat, &sc->sc_dma_seg, 1);
1400 0 : bus_dmamap_destroy(sc->sc_dmat, sc->sc_dma_map);
1401 0 : }
|