Line data Source code
1 : /* $OpenBSD: if_vmx.c,v 1.45 2017/01/22 10:17:38 dlg Exp $ */
2 :
3 : /*
4 : * Copyright (c) 2013 Tsubai Masanari
5 : *
6 : * Permission to use, copy, modify, and distribute this software for any
7 : * purpose with or without fee is hereby granted, provided that the above
8 : * copyright notice and this permission notice appear in all copies.
9 : *
10 : * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11 : * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12 : * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13 : * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14 : * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15 : * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16 : * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 : */
18 :
19 : #include "bpfilter.h"
20 :
21 : #include <sys/param.h>
22 : #include <sys/device.h>
23 : #include <sys/mbuf.h>
24 : #include <sys/socket.h>
25 : #include <sys/sockio.h>
26 : #include <sys/systm.h>
27 : #include <sys/atomic.h>
28 :
29 : #include <net/bpf.h>
30 : #include <net/if.h>
31 : #include <net/if_media.h>
32 :
33 : #include <netinet/in.h>
34 : #include <netinet/if_ether.h>
35 : #include <netinet/ip.h>
36 : #include <netinet/tcp.h>
37 : #include <netinet/udp.h>
38 :
39 : #include <machine/bus.h>
40 :
41 : #include <dev/pci/if_vmxreg.h>
42 : #include <dev/pci/pcivar.h>
43 : #include <dev/pci/pcidevs.h>
44 :
45 : #define NRXQUEUE 1
46 : #define NTXQUEUE 1
47 :
48 : #define NTXDESC 512 /* tx ring size */
49 : #define NTXSEGS 8 /* tx descriptors per packet */
50 : #define NRXDESC 512
51 : #define NTXCOMPDESC NTXDESC
52 : #define NRXCOMPDESC (NRXDESC * 2) /* ring1 + ring2 */
53 :
54 : #define VMXNET3_DRIVER_VERSION 0x00010000
55 :
56 : struct vmxnet3_txring {
57 : struct mbuf *m[NTXDESC];
58 : bus_dmamap_t dmap[NTXDESC];
59 : struct vmxnet3_txdesc *txd;
60 : u_int prod;
61 : u_int cons;
62 : u_int free;
63 : u_int8_t gen;
64 : };
65 :
66 : struct vmxnet3_rxring {
67 : struct mbuf *m[NRXDESC];
68 : bus_dmamap_t dmap[NRXDESC];
69 : struct if_rxring rxr;
70 : struct vmxnet3_rxdesc *rxd;
71 : u_int fill;
72 : u_int8_t gen;
73 : u_int8_t rid;
74 : };
75 :
76 : struct vmxnet3_comp_ring {
77 : union {
78 : struct vmxnet3_txcompdesc *txcd;
79 : struct vmxnet3_rxcompdesc *rxcd;
80 : };
81 : u_int next;
82 : u_int8_t gen;
83 : };
84 :
85 : struct vmxnet3_txqueue {
86 : struct vmxnet3_txring cmd_ring;
87 : struct vmxnet3_comp_ring comp_ring;
88 : struct vmxnet3_txq_shared *ts;
89 : };
90 :
91 : struct vmxnet3_rxqueue {
92 : struct vmxnet3_rxring cmd_ring[2];
93 : struct vmxnet3_comp_ring comp_ring;
94 : struct vmxnet3_rxq_shared *rs;
95 : };
96 :
97 : struct vmxnet3_softc {
98 : struct device sc_dev;
99 : struct arpcom sc_arpcom;
100 : struct ifmedia sc_media;
101 :
102 : bus_space_tag_t sc_iot0;
103 : bus_space_tag_t sc_iot1;
104 : bus_space_handle_t sc_ioh0;
105 : bus_space_handle_t sc_ioh1;
106 : bus_dma_tag_t sc_dmat;
107 : void *sc_ih;
108 :
109 : struct vmxnet3_txqueue sc_txq[NTXQUEUE];
110 : struct vmxnet3_rxqueue sc_rxq[NRXQUEUE];
111 : struct vmxnet3_driver_shared *sc_ds;
112 : u_int8_t *sc_mcast;
113 : };
114 :
115 : #define VMXNET3_STAT
116 :
117 : #ifdef VMXNET3_STAT
118 : struct {
119 : u_int ntxdesc;
120 : u_int nrxdesc;
121 : u_int txhead;
122 : u_int txdone;
123 : u_int maxtxlen;
124 : u_int rxdone;
125 : u_int rxfill;
126 : u_int intr;
127 : } vmxstat = {
128 : NTXDESC, NRXDESC
129 : };
130 : #endif
131 :
132 : #define JUMBO_LEN (1024 * 9)
133 : #define DMAADDR(map) ((map)->dm_segs[0].ds_addr)
134 :
135 : #define READ_BAR0(sc, reg) bus_space_read_4((sc)->sc_iot0, (sc)->sc_ioh0, reg)
136 : #define READ_BAR1(sc, reg) bus_space_read_4((sc)->sc_iot1, (sc)->sc_ioh1, reg)
137 : #define WRITE_BAR0(sc, reg, val) \
138 : bus_space_write_4((sc)->sc_iot0, (sc)->sc_ioh0, reg, val)
139 : #define WRITE_BAR1(sc, reg, val) \
140 : bus_space_write_4((sc)->sc_iot1, (sc)->sc_ioh1, reg, val)
141 : #define WRITE_CMD(sc, cmd) WRITE_BAR1(sc, VMXNET3_BAR1_CMD, cmd)
142 : #define vtophys(va) 0 /* XXX ok? */
143 :
144 : int vmxnet3_match(struct device *, void *, void *);
145 : void vmxnet3_attach(struct device *, struct device *, void *);
146 : int vmxnet3_dma_init(struct vmxnet3_softc *);
147 : int vmxnet3_alloc_txring(struct vmxnet3_softc *, int);
148 : int vmxnet3_alloc_rxring(struct vmxnet3_softc *, int);
149 : void vmxnet3_txinit(struct vmxnet3_softc *, struct vmxnet3_txqueue *);
150 : void vmxnet3_rxinit(struct vmxnet3_softc *, struct vmxnet3_rxqueue *);
151 : void vmxnet3_txstop(struct vmxnet3_softc *, struct vmxnet3_txqueue *);
152 : void vmxnet3_rxstop(struct vmxnet3_softc *, struct vmxnet3_rxqueue *);
153 : void vmxnet3_link_state(struct vmxnet3_softc *);
154 : void vmxnet3_enable_all_intrs(struct vmxnet3_softc *);
155 : void vmxnet3_disable_all_intrs(struct vmxnet3_softc *);
156 : int vmxnet3_intr(void *);
157 : void vmxnet3_evintr(struct vmxnet3_softc *);
158 : void vmxnet3_txintr(struct vmxnet3_softc *, struct vmxnet3_txqueue *);
159 : void vmxnet3_rxintr(struct vmxnet3_softc *, struct vmxnet3_rxqueue *);
160 : void vmxnet3_iff(struct vmxnet3_softc *);
161 : void vmxnet3_rx_csum(struct vmxnet3_rxcompdesc *, struct mbuf *);
162 : int vmxnet3_getbuf(struct vmxnet3_softc *, struct vmxnet3_rxring *);
163 : void vmxnet3_stop(struct ifnet *);
164 : void vmxnet3_reset(struct vmxnet3_softc *);
165 : int vmxnet3_init(struct vmxnet3_softc *);
166 : int vmxnet3_ioctl(struct ifnet *, u_long, caddr_t);
167 : void vmxnet3_start(struct ifnet *);
168 : int vmxnet3_load_mbuf(struct vmxnet3_softc *, struct vmxnet3_txring *,
169 : struct mbuf **);
170 : void vmxnet3_watchdog(struct ifnet *);
171 : void vmxnet3_media_status(struct ifnet *, struct ifmediareq *);
172 : int vmxnet3_media_change(struct ifnet *);
173 : void *vmxnet3_dma_allocmem(struct vmxnet3_softc *, u_int, u_int, bus_addr_t *);
174 :
175 : const struct pci_matchid vmx_devices[] = {
176 : { PCI_VENDOR_VMWARE, PCI_PRODUCT_VMWARE_NET_3 }
177 : };
178 :
179 : struct cfattach vmx_ca = {
180 : sizeof(struct vmxnet3_softc), vmxnet3_match, vmxnet3_attach
181 : };
182 :
183 : struct cfdriver vmx_cd = {
184 : NULL, "vmx", DV_IFNET
185 : };
186 :
187 : int
188 0 : vmxnet3_match(struct device *parent, void *match, void *aux)
189 : {
190 0 : return (pci_matchbyid(aux, vmx_devices, nitems(vmx_devices)));
191 : }
192 :
193 : void
194 0 : vmxnet3_attach(struct device *parent, struct device *self, void *aux)
195 : {
196 0 : struct vmxnet3_softc *sc = (void *)self;
197 0 : struct pci_attach_args *pa = aux;
198 0 : struct ifnet *ifp = &sc->sc_arpcom.ac_if;
199 0 : pci_intr_handle_t ih;
200 : const char *intrstr;
201 : u_int memtype, ver, macl, mach;
202 0 : u_char enaddr[ETHER_ADDR_LEN];
203 :
204 0 : memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, 0x10);
205 0 : if (pci_mapreg_map(pa, 0x10, memtype, 0, &sc->sc_iot0, &sc->sc_ioh0,
206 : NULL, NULL, 0)) {
207 0 : printf(": failed to map BAR0\n");
208 0 : return;
209 : }
210 0 : memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, 0x14);
211 0 : if (pci_mapreg_map(pa, 0x14, memtype, 0, &sc->sc_iot1, &sc->sc_ioh1,
212 : NULL, NULL, 0)) {
213 0 : printf(": failed to map BAR1\n");
214 0 : return;
215 : }
216 :
217 0 : ver = READ_BAR1(sc, VMXNET3_BAR1_VRRS);
218 0 : if ((ver & 0x1) == 0) {
219 0 : printf(": unsupported hardware version 0x%x\n", ver);
220 0 : return;
221 : }
222 0 : WRITE_BAR1(sc, VMXNET3_BAR1_VRRS, 1);
223 :
224 0 : ver = READ_BAR1(sc, VMXNET3_BAR1_UVRS);
225 0 : if ((ver & 0x1) == 0) {
226 0 : printf(": incompatiable UPT version 0x%x\n", ver);
227 0 : return;
228 : }
229 0 : WRITE_BAR1(sc, VMXNET3_BAR1_UVRS, 1);
230 :
231 0 : sc->sc_dmat = pa->pa_dmat;
232 0 : if (vmxnet3_dma_init(sc)) {
233 0 : printf(": failed to setup DMA\n");
234 0 : return;
235 : }
236 :
237 0 : if (pci_intr_map(pa, &ih)) {
238 0 : printf(": failed to map interrupt\n");
239 0 : return;
240 : }
241 0 : sc->sc_ih = pci_intr_establish(pa->pa_pc, ih, IPL_NET | IPL_MPSAFE,
242 0 : vmxnet3_intr, sc, self->dv_xname);
243 0 : intrstr = pci_intr_string(pa->pa_pc, ih);
244 0 : if (intrstr)
245 0 : printf(": %s", intrstr);
246 :
247 0 : WRITE_CMD(sc, VMXNET3_CMD_GET_MACL);
248 0 : macl = READ_BAR1(sc, VMXNET3_BAR1_CMD);
249 0 : enaddr[0] = macl;
250 0 : enaddr[1] = macl >> 8;
251 0 : enaddr[2] = macl >> 16;
252 0 : enaddr[3] = macl >> 24;
253 0 : WRITE_CMD(sc, VMXNET3_CMD_GET_MACH);
254 0 : mach = READ_BAR1(sc, VMXNET3_BAR1_CMD);
255 0 : enaddr[4] = mach;
256 0 : enaddr[5] = mach >> 8;
257 :
258 0 : WRITE_BAR1(sc, VMXNET3_BAR1_MACL, macl);
259 0 : WRITE_BAR1(sc, VMXNET3_BAR1_MACH, mach);
260 0 : printf(", address %s\n", ether_sprintf(enaddr));
261 :
262 0 : bcopy(enaddr, sc->sc_arpcom.ac_enaddr, 6);
263 0 : strlcpy(ifp->if_xname, self->dv_xname, IFNAMSIZ);
264 0 : ifp->if_softc = sc;
265 0 : ifp->if_flags = IFF_BROADCAST | IFF_MULTICAST | IFF_SIMPLEX;
266 0 : ifp->if_ioctl = vmxnet3_ioctl;
267 0 : ifp->if_start = vmxnet3_start;
268 0 : ifp->if_watchdog = vmxnet3_watchdog;
269 0 : ifp->if_hardmtu = VMXNET3_MAX_MTU;
270 0 : ifp->if_capabilities = IFCAP_VLAN_MTU;
271 0 : if (sc->sc_ds->upt_features & UPT1_F_CSUM)
272 0 : ifp->if_capabilities |= IFCAP_CSUM_TCPv4 | IFCAP_CSUM_UDPv4;
273 0 : if (sc->sc_ds->upt_features & UPT1_F_VLAN)
274 0 : ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING;
275 :
276 0 : IFQ_SET_MAXLEN(&ifp->if_snd, NTXDESC);
277 :
278 0 : ifmedia_init(&sc->sc_media, IFM_IMASK, vmxnet3_media_change,
279 : vmxnet3_media_status);
280 0 : ifmedia_add(&sc->sc_media, IFM_ETHER|IFM_AUTO, 0, NULL);
281 0 : ifmedia_add(&sc->sc_media, IFM_ETHER|IFM_10G_T|IFM_FDX, 0, NULL);
282 0 : ifmedia_add(&sc->sc_media, IFM_ETHER|IFM_10G_T, 0, NULL);
283 0 : ifmedia_add(&sc->sc_media, IFM_ETHER|IFM_1000_T|IFM_FDX, 0, NULL);
284 0 : ifmedia_add(&sc->sc_media, IFM_ETHER|IFM_1000_T, 0, NULL);
285 0 : ifmedia_set(&sc->sc_media, IFM_ETHER|IFM_AUTO);
286 :
287 0 : if_attach(ifp);
288 0 : ether_ifattach(ifp);
289 0 : vmxnet3_link_state(sc);
290 0 : }
291 :
292 : int
293 0 : vmxnet3_dma_init(struct vmxnet3_softc *sc)
294 : {
295 : struct vmxnet3_driver_shared *ds;
296 : struct vmxnet3_txq_shared *ts;
297 : struct vmxnet3_rxq_shared *rs;
298 0 : bus_addr_t ds_pa, qs_pa, mcast_pa;
299 : int i, queue, qs_len;
300 : u_int major, minor, release_code, rev;
301 :
302 : qs_len = NTXQUEUE * sizeof *ts + NRXQUEUE * sizeof *rs;
303 0 : ts = vmxnet3_dma_allocmem(sc, qs_len, VMXNET3_DMADESC_ALIGN, &qs_pa);
304 0 : if (ts == NULL)
305 0 : return -1;
306 0 : for (queue = 0; queue < NTXQUEUE; queue++)
307 0 : sc->sc_txq[queue].ts = ts++;
308 0 : rs = (void *)ts;
309 0 : for (queue = 0; queue < NRXQUEUE; queue++)
310 0 : sc->sc_rxq[queue].rs = rs++;
311 :
312 0 : for (queue = 0; queue < NTXQUEUE; queue++)
313 0 : if (vmxnet3_alloc_txring(sc, queue))
314 0 : return -1;
315 0 : for (queue = 0; queue < NRXQUEUE; queue++)
316 0 : if (vmxnet3_alloc_rxring(sc, queue))
317 0 : return -1;
318 :
319 0 : sc->sc_mcast = vmxnet3_dma_allocmem(sc, 682 * ETHER_ADDR_LEN, 32, &mcast_pa);
320 0 : if (sc->sc_mcast == NULL)
321 0 : return -1;
322 :
323 0 : ds = vmxnet3_dma_allocmem(sc, sizeof *sc->sc_ds, 8, &ds_pa);
324 0 : if (ds == NULL)
325 0 : return -1;
326 0 : sc->sc_ds = ds;
327 0 : ds->magic = VMXNET3_REV1_MAGIC;
328 0 : ds->version = VMXNET3_DRIVER_VERSION;
329 :
330 : /*
331 : * XXX FreeBSD version uses following values:
332 : * (Does the device behavior depend on them?)
333 : *
334 : * major = __FreeBSD_version / 100000;
335 : * minor = (__FreeBSD_version / 1000) % 100;
336 : * release_code = (__FreeBSD_version / 100) % 10;
337 : * rev = __FreeBSD_version % 100;
338 : */
339 : major = 0;
340 : minor = 0;
341 : release_code = 0;
342 : rev = 0;
343 : #ifdef __LP64__
344 0 : ds->guest = release_code << 30 | rev << 22 | major << 14 | minor << 6
345 : | VMXNET3_GOS_FREEBSD | VMXNET3_GOS_64BIT;
346 : #else
347 : ds->guest = release_code << 30 | rev << 22 | major << 14 | minor << 6
348 : | VMXNET3_GOS_FREEBSD | VMXNET3_GOS_32BIT;
349 : #endif
350 0 : ds->vmxnet3_revision = 1;
351 0 : ds->upt_version = 1;
352 0 : ds->upt_features = UPT1_F_CSUM | UPT1_F_VLAN;
353 0 : ds->driver_data = vtophys(sc);
354 0 : ds->driver_data_len = sizeof(struct vmxnet3_softc);
355 0 : ds->queue_shared = qs_pa;
356 0 : ds->queue_shared_len = qs_len;
357 0 : ds->mtu = VMXNET3_MAX_MTU;
358 0 : ds->ntxqueue = NTXQUEUE;
359 0 : ds->nrxqueue = NRXQUEUE;
360 0 : ds->mcast_table = mcast_pa;
361 0 : ds->automask = 1;
362 0 : ds->nintr = VMXNET3_NINTR;
363 0 : ds->evintr = 0;
364 0 : ds->ictrl = VMXNET3_ICTRL_DISABLE_ALL;
365 0 : for (i = 0; i < VMXNET3_NINTR; i++)
366 0 : ds->modlevel[i] = UPT1_IMOD_ADAPTIVE;
367 0 : WRITE_BAR1(sc, VMXNET3_BAR1_DSL, ds_pa);
368 0 : WRITE_BAR1(sc, VMXNET3_BAR1_DSH, (u_int64_t)ds_pa >> 32);
369 0 : return 0;
370 0 : }
371 :
372 : int
373 0 : vmxnet3_alloc_txring(struct vmxnet3_softc *sc, int queue)
374 : {
375 0 : struct vmxnet3_txqueue *tq = &sc->sc_txq[queue];
376 : struct vmxnet3_txq_shared *ts;
377 0 : struct vmxnet3_txring *ring = &tq->cmd_ring;
378 0 : struct vmxnet3_comp_ring *comp_ring = &tq->comp_ring;
379 0 : bus_addr_t pa, comp_pa;
380 : int idx;
381 :
382 0 : ring->txd = vmxnet3_dma_allocmem(sc, NTXDESC * sizeof ring->txd[0], 512, &pa);
383 0 : if (ring->txd == NULL)
384 0 : return -1;
385 0 : comp_ring->txcd = vmxnet3_dma_allocmem(sc,
386 : NTXCOMPDESC * sizeof comp_ring->txcd[0], 512, &comp_pa);
387 0 : if (comp_ring->txcd == NULL)
388 0 : return -1;
389 :
390 0 : for (idx = 0; idx < NTXDESC; idx++) {
391 0 : if (bus_dmamap_create(sc->sc_dmat, JUMBO_LEN, NTXSEGS,
392 : JUMBO_LEN, 0, BUS_DMA_NOWAIT, &ring->dmap[idx]))
393 0 : return -1;
394 : }
395 :
396 0 : ts = tq->ts;
397 0 : bzero(ts, sizeof *ts);
398 0 : ts->npending = 0;
399 0 : ts->intr_threshold = 1;
400 0 : ts->cmd_ring = pa;
401 0 : ts->cmd_ring_len = NTXDESC;
402 0 : ts->comp_ring = comp_pa;
403 0 : ts->comp_ring_len = NTXCOMPDESC;
404 0 : ts->driver_data = vtophys(tq);
405 0 : ts->driver_data_len = sizeof *tq;
406 0 : ts->intr_idx = 0;
407 0 : ts->stopped = 1;
408 0 : ts->error = 0;
409 0 : return 0;
410 0 : }
411 :
412 : int
413 0 : vmxnet3_alloc_rxring(struct vmxnet3_softc *sc, int queue)
414 : {
415 0 : struct vmxnet3_rxqueue *rq = &sc->sc_rxq[queue];
416 : struct vmxnet3_rxq_shared *rs;
417 : struct vmxnet3_rxring *ring;
418 : struct vmxnet3_comp_ring *comp_ring;
419 0 : bus_addr_t pa[2], comp_pa;
420 : int i, idx;
421 :
422 0 : for (i = 0; i < 2; i++) {
423 0 : ring = &rq->cmd_ring[i];
424 0 : ring->rxd = vmxnet3_dma_allocmem(sc, NRXDESC * sizeof ring->rxd[0],
425 0 : 512, &pa[i]);
426 0 : if (ring->rxd == NULL)
427 0 : return -1;
428 : }
429 0 : comp_ring = &rq->comp_ring;
430 0 : comp_ring->rxcd = vmxnet3_dma_allocmem(sc,
431 : NRXCOMPDESC * sizeof comp_ring->rxcd[0], 512, &comp_pa);
432 0 : if (comp_ring->rxcd == NULL)
433 0 : return -1;
434 :
435 0 : for (i = 0; i < 2; i++) {
436 0 : ring = &rq->cmd_ring[i];
437 0 : ring->rid = i;
438 0 : for (idx = 0; idx < NRXDESC; idx++) {
439 0 : if (bus_dmamap_create(sc->sc_dmat, JUMBO_LEN, 1,
440 : JUMBO_LEN, 0, BUS_DMA_NOWAIT, &ring->dmap[idx]))
441 0 : return -1;
442 : }
443 : }
444 :
445 0 : rs = rq->rs;
446 0 : bzero(rs, sizeof *rs);
447 0 : rs->cmd_ring[0] = pa[0];
448 0 : rs->cmd_ring[1] = pa[1];
449 0 : rs->cmd_ring_len[0] = NRXDESC;
450 0 : rs->cmd_ring_len[1] = NRXDESC;
451 0 : rs->comp_ring = comp_pa;
452 0 : rs->comp_ring_len = NRXCOMPDESC;
453 0 : rs->driver_data = vtophys(rq);
454 0 : rs->driver_data_len = sizeof *rq;
455 0 : rs->intr_idx = 0;
456 0 : rs->stopped = 1;
457 0 : rs->error = 0;
458 0 : return 0;
459 0 : }
460 :
461 : void
462 0 : vmxnet3_txinit(struct vmxnet3_softc *sc, struct vmxnet3_txqueue *tq)
463 : {
464 0 : struct vmxnet3_txring *ring = &tq->cmd_ring;
465 0 : struct vmxnet3_comp_ring *comp_ring = &tq->comp_ring;
466 :
467 0 : ring->cons = ring->prod = 0;
468 0 : ring->free = NTXDESC;
469 0 : ring->gen = 1;
470 0 : comp_ring->next = 0;
471 0 : comp_ring->gen = 1;
472 0 : bzero(ring->txd, NTXDESC * sizeof ring->txd[0]);
473 0 : bzero(comp_ring->txcd, NTXCOMPDESC * sizeof comp_ring->txcd[0]);
474 0 : }
475 :
476 : void
477 0 : vmxnet3_rxinit(struct vmxnet3_softc *sc, struct vmxnet3_rxqueue *rq)
478 : {
479 : struct vmxnet3_rxring *ring;
480 : struct vmxnet3_comp_ring *comp_ring;
481 : int i;
482 : u_int slots;
483 :
484 0 : for (i = 0; i < 2; i++) {
485 0 : ring = &rq->cmd_ring[i];
486 0 : ring->fill = 0;
487 0 : ring->gen = 1;
488 0 : bzero(ring->rxd, NRXDESC * sizeof ring->rxd[0]);
489 0 : if_rxr_init(&ring->rxr, 2, NRXDESC - 1);
490 0 : for (slots = if_rxr_get(&ring->rxr, NRXDESC);
491 0 : slots > 0; slots--) {
492 0 : if (vmxnet3_getbuf(sc, ring))
493 : break;
494 : }
495 0 : if_rxr_put(&ring->rxr, slots);
496 : }
497 0 : comp_ring = &rq->comp_ring;
498 0 : comp_ring->next = 0;
499 0 : comp_ring->gen = 1;
500 0 : bzero(comp_ring->rxcd, NRXCOMPDESC * sizeof comp_ring->rxcd[0]);
501 0 : }
502 :
503 : void
504 0 : vmxnet3_txstop(struct vmxnet3_softc *sc, struct vmxnet3_txqueue *tq)
505 : {
506 0 : struct vmxnet3_txring *ring = &tq->cmd_ring;
507 : int idx;
508 :
509 0 : for (idx = 0; idx < NTXDESC; idx++) {
510 0 : if (ring->m[idx]) {
511 0 : bus_dmamap_unload(sc->sc_dmat, ring->dmap[idx]);
512 0 : m_freem(ring->m[idx]);
513 0 : ring->m[idx] = NULL;
514 0 : }
515 : }
516 0 : }
517 :
518 : void
519 0 : vmxnet3_rxstop(struct vmxnet3_softc *sc, struct vmxnet3_rxqueue *rq)
520 : {
521 : struct vmxnet3_rxring *ring;
522 : int i, idx;
523 :
524 0 : for (i = 0; i < 2; i++) {
525 0 : ring = &rq->cmd_ring[i];
526 0 : for (idx = 0; idx < NRXDESC; idx++) {
527 0 : if (ring->m[idx]) {
528 0 : m_freem(ring->m[idx]);
529 0 : ring->m[idx] = NULL;
530 0 : }
531 : }
532 : }
533 0 : }
534 :
535 : void
536 0 : vmxnet3_link_state(struct vmxnet3_softc *sc)
537 : {
538 0 : struct ifnet *ifp = &sc->sc_arpcom.ac_if;
539 : u_int x, link, speed;
540 :
541 0 : WRITE_CMD(sc, VMXNET3_CMD_GET_LINK);
542 0 : x = READ_BAR1(sc, VMXNET3_BAR1_CMD);
543 0 : speed = x >> 16;
544 0 : if (x & 1) {
545 0 : ifp->if_baudrate = IF_Mbps(speed);
546 : link = LINK_STATE_UP;
547 0 : } else
548 : link = LINK_STATE_DOWN;
549 :
550 0 : if (ifp->if_link_state != link) {
551 0 : ifp->if_link_state = link;
552 0 : if_link_state_change(ifp);
553 0 : }
554 0 : }
555 :
556 : static inline void
557 0 : vmxnet3_enable_intr(struct vmxnet3_softc *sc, int irq)
558 : {
559 0 : WRITE_BAR0(sc, VMXNET3_BAR0_IMASK(irq), 0);
560 0 : }
561 :
562 : static inline void
563 0 : vmxnet3_disable_intr(struct vmxnet3_softc *sc, int irq)
564 : {
565 0 : WRITE_BAR0(sc, VMXNET3_BAR0_IMASK(irq), 1);
566 0 : }
567 :
568 : void
569 0 : vmxnet3_enable_all_intrs(struct vmxnet3_softc *sc)
570 : {
571 : int i;
572 :
573 0 : sc->sc_ds->ictrl &= ~VMXNET3_ICTRL_DISABLE_ALL;
574 0 : for (i = 0; i < VMXNET3_NINTR; i++)
575 0 : vmxnet3_enable_intr(sc, i);
576 0 : }
577 :
578 : void
579 0 : vmxnet3_disable_all_intrs(struct vmxnet3_softc *sc)
580 : {
581 : int i;
582 :
583 0 : sc->sc_ds->ictrl |= VMXNET3_ICTRL_DISABLE_ALL;
584 0 : for (i = 0; i < VMXNET3_NINTR; i++)
585 0 : vmxnet3_disable_intr(sc, i);
586 0 : }
587 :
588 : int
589 0 : vmxnet3_intr(void *arg)
590 : {
591 0 : struct vmxnet3_softc *sc = arg;
592 0 : struct ifnet *ifp = &sc->sc_arpcom.ac_if;
593 :
594 0 : if (READ_BAR1(sc, VMXNET3_BAR1_INTR) == 0)
595 0 : return 0;
596 :
597 0 : if (sc->sc_ds->event) {
598 0 : KERNEL_LOCK();
599 0 : vmxnet3_evintr(sc);
600 0 : KERNEL_UNLOCK();
601 0 : }
602 :
603 0 : if (ifp->if_flags & IFF_RUNNING) {
604 0 : vmxnet3_rxintr(sc, &sc->sc_rxq[0]);
605 0 : vmxnet3_txintr(sc, &sc->sc_txq[0]);
606 0 : vmxnet3_enable_intr(sc, 0);
607 0 : }
608 :
609 0 : return 1;
610 0 : }
611 :
612 : void
613 0 : vmxnet3_evintr(struct vmxnet3_softc *sc)
614 : {
615 0 : struct ifnet *ifp = &sc->sc_arpcom.ac_if;
616 0 : u_int event = sc->sc_ds->event;
617 : struct vmxnet3_txq_shared *ts;
618 : struct vmxnet3_rxq_shared *rs;
619 :
620 : /* Clear events. */
621 0 : WRITE_BAR1(sc, VMXNET3_BAR1_EVENT, event);
622 :
623 : /* Link state change? */
624 0 : if (event & VMXNET3_EVENT_LINK)
625 0 : vmxnet3_link_state(sc);
626 :
627 : /* Queue error? */
628 0 : if (event & (VMXNET3_EVENT_TQERROR | VMXNET3_EVENT_RQERROR)) {
629 0 : WRITE_CMD(sc, VMXNET3_CMD_GET_STATUS);
630 :
631 0 : ts = sc->sc_txq[0].ts;
632 0 : if (ts->stopped)
633 0 : printf("%s: TX error 0x%x\n", ifp->if_xname, ts->error);
634 0 : rs = sc->sc_rxq[0].rs;
635 0 : if (rs->stopped)
636 0 : printf("%s: RX error 0x%x\n", ifp->if_xname, rs->error);
637 0 : vmxnet3_init(sc);
638 0 : }
639 :
640 0 : if (event & VMXNET3_EVENT_DIC)
641 0 : printf("%s: device implementation change event\n",
642 0 : ifp->if_xname);
643 0 : if (event & VMXNET3_EVENT_DEBUG)
644 0 : printf("%s: debug event\n", ifp->if_xname);
645 0 : }
646 :
647 : void
648 0 : vmxnet3_txintr(struct vmxnet3_softc *sc, struct vmxnet3_txqueue *tq)
649 : {
650 0 : struct vmxnet3_txring *ring = &tq->cmd_ring;
651 0 : struct vmxnet3_comp_ring *comp_ring = &tq->comp_ring;
652 : struct vmxnet3_txcompdesc *txcd;
653 0 : struct ifnet *ifp = &sc->sc_arpcom.ac_if;
654 : bus_dmamap_t map;
655 : struct mbuf *m;
656 : u_int cons;
657 : u_int free = 0;
658 :
659 0 : cons = ring->cons;
660 :
661 0 : for (;;) {
662 0 : txcd = &comp_ring->txcd[comp_ring->next];
663 :
664 0 : if (letoh32((txcd->txc_word3 >> VMXNET3_TXC_GEN_S) &
665 0 : VMXNET3_TXC_GEN_M) != comp_ring->gen)
666 : break;
667 :
668 0 : comp_ring->next++;
669 0 : if (comp_ring->next == NTXCOMPDESC) {
670 0 : comp_ring->next = 0;
671 0 : comp_ring->gen ^= 1;
672 0 : }
673 :
674 0 : m = ring->m[cons];
675 0 : ring->m[cons] = NULL;
676 :
677 0 : KASSERT(m != NULL);
678 :
679 0 : map = ring->dmap[cons];
680 0 : free += map->dm_nsegs;
681 0 : bus_dmamap_unload(sc->sc_dmat, map);
682 0 : m_freem(m);
683 :
684 0 : cons = (letoh32((txcd->txc_word0 >>
685 0 : VMXNET3_TXC_EOPIDX_S) & VMXNET3_TXC_EOPIDX_M) + 1)
686 0 : % NTXDESC;
687 : }
688 :
689 0 : ring->cons = cons;
690 :
691 0 : if (atomic_add_int_nv(&ring->free, free) == NTXDESC)
692 0 : ifp->if_timer = 0;
693 :
694 0 : if (ifq_is_oactive(&ifp->if_snd)) {
695 0 : KERNEL_LOCK();
696 0 : ifq_clr_oactive(&ifp->if_snd);
697 0 : vmxnet3_start(ifp);
698 0 : KERNEL_UNLOCK();
699 0 : }
700 0 : }
701 :
702 : void
703 0 : vmxnet3_rxintr(struct vmxnet3_softc *sc, struct vmxnet3_rxqueue *rq)
704 : {
705 0 : struct vmxnet3_comp_ring *comp_ring = &rq->comp_ring;
706 : struct vmxnet3_rxring *ring;
707 : struct vmxnet3_rxdesc *rxd;
708 : struct vmxnet3_rxcompdesc *rxcd;
709 0 : struct ifnet *ifp = &sc->sc_arpcom.ac_if;
710 0 : struct mbuf_list ml = MBUF_LIST_INITIALIZER();
711 : struct mbuf *m;
712 : int idx, len;
713 : u_int slots;
714 :
715 0 : for (;;) {
716 0 : rxcd = &comp_ring->rxcd[comp_ring->next];
717 0 : if (letoh32((rxcd->rxc_word3 >> VMXNET3_RXC_GEN_S) &
718 0 : VMXNET3_RXC_GEN_M) != comp_ring->gen)
719 : break;
720 :
721 0 : comp_ring->next++;
722 0 : if (comp_ring->next == NRXCOMPDESC) {
723 0 : comp_ring->next = 0;
724 0 : comp_ring->gen ^= 1;
725 0 : }
726 :
727 0 : idx = letoh32((rxcd->rxc_word0 >> VMXNET3_RXC_IDX_S) &
728 : VMXNET3_RXC_IDX_M);
729 0 : if (letoh32((rxcd->rxc_word0 >> VMXNET3_RXC_QID_S) &
730 0 : VMXNET3_RXC_QID_M) < NRXQUEUE)
731 0 : ring = &rq->cmd_ring[0];
732 : else
733 0 : ring = &rq->cmd_ring[1];
734 0 : rxd = &ring->rxd[idx];
735 0 : len = letoh32((rxcd->rxc_word2 >> VMXNET3_RXC_LEN_S) &
736 : VMXNET3_RXC_LEN_M);
737 0 : m = ring->m[idx];
738 0 : ring->m[idx] = NULL;
739 0 : if_rxr_put(&ring->rxr, 1);
740 0 : bus_dmamap_unload(sc->sc_dmat, ring->dmap[idx]);
741 :
742 0 : if (m == NULL)
743 0 : panic("%s: NULL ring->m[%u]", __func__, idx);
744 :
745 0 : if (letoh32((rxd->rx_word2 >> VMXNET3_RX_BTYPE_S) &
746 0 : VMXNET3_RX_BTYPE_M) != VMXNET3_BTYPE_HEAD) {
747 0 : m_freem(m);
748 0 : goto skip_buffer;
749 : }
750 0 : if (letoh32(rxcd->rxc_word2 & VMXNET3_RXC_ERROR)) {
751 0 : ifp->if_ierrors++;
752 0 : m_freem(m);
753 0 : goto skip_buffer;
754 : }
755 0 : if (len < VMXNET3_MIN_MTU) {
756 0 : m_freem(m);
757 0 : goto skip_buffer;
758 : }
759 :
760 0 : vmxnet3_rx_csum(rxcd, m);
761 0 : m->m_pkthdr.len = m->m_len = len;
762 0 : if (letoh32(rxcd->rxc_word2 & VMXNET3_RXC_VLAN)) {
763 0 : m->m_flags |= M_VLANTAG;
764 0 : m->m_pkthdr.ether_vtag = letoh32((rxcd->rxc_word2 >>
765 : VMXNET3_RXC_VLANTAG_S) & VMXNET3_RXC_VLANTAG_M);
766 0 : }
767 :
768 0 : ml_enqueue(&ml, m);
769 :
770 : skip_buffer:
771 : #ifdef VMXNET3_STAT
772 0 : vmxstat.rxdone = idx;
773 : #endif
774 0 : if (rq->rs->update_rxhead) {
775 0 : u_int qid = letoh32((rxcd->rxc_word0 >>
776 : VMXNET3_RXC_QID_S) & VMXNET3_RXC_QID_M);
777 :
778 0 : idx = (idx + 1) % NRXDESC;
779 0 : if (qid < NRXQUEUE) {
780 0 : WRITE_BAR0(sc, VMXNET3_BAR0_RXH1(qid), idx);
781 0 : } else {
782 0 : qid -= NRXQUEUE;
783 0 : WRITE_BAR0(sc, VMXNET3_BAR0_RXH2(qid), idx);
784 : }
785 0 : }
786 : }
787 :
788 0 : if_input(ifp, &ml);
789 :
790 : /* XXX Should we (try to) allocate buffers for ring 2 too? */
791 0 : ring = &rq->cmd_ring[0];
792 0 : for (slots = if_rxr_get(&ring->rxr, NRXDESC); slots > 0; slots--) {
793 0 : if (vmxnet3_getbuf(sc, ring))
794 : break;
795 : }
796 0 : if_rxr_put(&ring->rxr, slots);
797 0 : }
798 :
799 : void
800 0 : vmxnet3_iff(struct vmxnet3_softc *sc)
801 : {
802 0 : struct ifnet *ifp = &sc->sc_arpcom.ac_if;
803 : struct arpcom *ac = &sc->sc_arpcom;
804 0 : struct vmxnet3_driver_shared *ds = sc->sc_ds;
805 : struct ether_multi *enm;
806 : struct ether_multistep step;
807 : u_int mode;
808 : u_int8_t *p;
809 :
810 0 : ds->mcast_tablelen = 0;
811 0 : CLR(ifp->if_flags, IFF_ALLMULTI);
812 :
813 : /*
814 : * Always accept broadcast frames.
815 : * Always accept frames destined to our station address.
816 : */
817 : mode = VMXNET3_RXMODE_BCAST | VMXNET3_RXMODE_UCAST;
818 :
819 0 : if (ISSET(ifp->if_flags, IFF_PROMISC) || ac->ac_multirangecnt > 0 ||
820 0 : ac->ac_multicnt > 682) {
821 0 : SET(ifp->if_flags, IFF_ALLMULTI);
822 : SET(mode, (VMXNET3_RXMODE_ALLMULTI | VMXNET3_RXMODE_MCAST));
823 0 : if (ifp->if_flags & IFF_PROMISC)
824 0 : SET(mode, VMXNET3_RXMODE_PROMISC);
825 : } else {
826 0 : p = sc->sc_mcast;
827 0 : ETHER_FIRST_MULTI(step, ac, enm);
828 0 : while (enm != NULL) {
829 0 : bcopy(enm->enm_addrlo, p, ETHER_ADDR_LEN);
830 :
831 0 : p += ETHER_ADDR_LEN;
832 :
833 0 : ETHER_NEXT_MULTI(step, enm);
834 : }
835 :
836 0 : if (ac->ac_multicnt > 0) {
837 : SET(mode, VMXNET3_RXMODE_MCAST);
838 0 : ds->mcast_tablelen = p - sc->sc_mcast;
839 0 : }
840 : }
841 :
842 0 : WRITE_CMD(sc, VMXNET3_CMD_SET_FILTER);
843 0 : ds->rxmode = mode;
844 0 : WRITE_CMD(sc, VMXNET3_CMD_SET_RXMODE);
845 0 : }
846 :
847 :
848 : void
849 0 : vmxnet3_rx_csum(struct vmxnet3_rxcompdesc *rxcd, struct mbuf *m)
850 : {
851 0 : if (letoh32(rxcd->rxc_word0 & VMXNET3_RXC_NOCSUM))
852 : return;
853 :
854 0 : if ((rxcd->rxc_word3 & (VMXNET3_RXC_IPV4 | VMXNET3_RXC_IPSUM_OK)) ==
855 : (VMXNET3_RXC_IPV4 | VMXNET3_RXC_IPSUM_OK))
856 0 : m->m_pkthdr.csum_flags |= M_IPV4_CSUM_IN_OK;
857 :
858 0 : if (rxcd->rxc_word3 & VMXNET3_RXC_FRAGMENT)
859 : return;
860 :
861 0 : if (rxcd->rxc_word3 & (VMXNET3_RXC_TCP | VMXNET3_RXC_UDP)) {
862 0 : if (rxcd->rxc_word3 & VMXNET3_RXC_CSUM_OK)
863 0 : m->m_pkthdr.csum_flags |=
864 : M_TCP_CSUM_IN_OK | M_UDP_CSUM_IN_OK;
865 : }
866 0 : }
867 :
868 : int
869 0 : vmxnet3_getbuf(struct vmxnet3_softc *sc, struct vmxnet3_rxring *ring)
870 : {
871 0 : int idx = ring->fill;
872 0 : struct vmxnet3_rxdesc *rxd = &ring->rxd[idx];
873 : struct mbuf *m;
874 : int btype;
875 :
876 0 : if (ring->m[idx])
877 0 : panic("vmxnet3_getbuf: buffer has mbuf");
878 :
879 : #if 1
880 : /* XXX Don't allocate buffers for ring 2 for now. */
881 0 : if (ring->rid != 0)
882 0 : return -1;
883 : btype = VMXNET3_BTYPE_HEAD;
884 : #else
885 : if (ring->rid == 0)
886 : btype = VMXNET3_BTYPE_HEAD;
887 : else
888 : btype = VMXNET3_BTYPE_BODY;
889 : #endif
890 :
891 0 : m = MCLGETI(NULL, M_DONTWAIT, NULL, JUMBO_LEN);
892 0 : if (m == NULL)
893 0 : return -1;
894 :
895 0 : m->m_pkthdr.len = m->m_len = JUMBO_LEN;
896 0 : m_adj(m, ETHER_ALIGN);
897 0 : ring->m[idx] = m;
898 :
899 0 : if (bus_dmamap_load_mbuf(sc->sc_dmat, ring->dmap[idx], m,
900 : BUS_DMA_NOWAIT))
901 0 : panic("load mbuf");
902 0 : rxd->rx_addr = htole64(DMAADDR(ring->dmap[idx]));
903 0 : rxd->rx_word2 = htole32(((m->m_pkthdr.len & VMXNET3_RX_LEN_M) <<
904 : VMXNET3_RX_LEN_S) | ((btype & VMXNET3_RX_BTYPE_M) <<
905 : VMXNET3_RX_BTYPE_S) | ((ring->gen & VMXNET3_RX_GEN_M) <<
906 : VMXNET3_RX_GEN_S));
907 0 : idx++;
908 0 : if (idx == NRXDESC) {
909 : idx = 0;
910 0 : ring->gen ^= 1;
911 0 : }
912 0 : ring->fill = idx;
913 : #ifdef VMXNET3_STAT
914 0 : vmxstat.rxfill = ring->fill;
915 : #endif
916 0 : return 0;
917 0 : }
918 :
919 : void
920 0 : vmxnet3_stop(struct ifnet *ifp)
921 : {
922 0 : struct vmxnet3_softc *sc = ifp->if_softc;
923 : int queue;
924 :
925 0 : ifp->if_flags &= ~IFF_RUNNING;
926 0 : ifq_clr_oactive(&ifp->if_snd);
927 0 : ifp->if_timer = 0;
928 :
929 0 : vmxnet3_disable_all_intrs(sc);
930 :
931 0 : WRITE_CMD(sc, VMXNET3_CMD_DISABLE);
932 :
933 0 : intr_barrier(sc->sc_ih);
934 :
935 0 : for (queue = 0; queue < NTXQUEUE; queue++)
936 0 : vmxnet3_txstop(sc, &sc->sc_txq[queue]);
937 0 : for (queue = 0; queue < NRXQUEUE; queue++)
938 0 : vmxnet3_rxstop(sc, &sc->sc_rxq[queue]);
939 0 : }
940 :
941 : void
942 0 : vmxnet3_reset(struct vmxnet3_softc *sc)
943 : {
944 0 : WRITE_CMD(sc, VMXNET3_CMD_RESET);
945 0 : }
946 :
947 : int
948 0 : vmxnet3_init(struct vmxnet3_softc *sc)
949 : {
950 0 : struct ifnet *ifp = &sc->sc_arpcom.ac_if;
951 : int queue;
952 :
953 : /*
954 : * Cancel pending I/O and free all RX/TX buffers.
955 : */
956 0 : vmxnet3_stop(ifp);
957 :
958 : #if 0
959 : /* Put controller into known state. */
960 : vmxnet3_reset(sc);
961 : #endif
962 :
963 0 : for (queue = 0; queue < NTXQUEUE; queue++)
964 0 : vmxnet3_txinit(sc, &sc->sc_txq[queue]);
965 0 : for (queue = 0; queue < NRXQUEUE; queue++)
966 0 : vmxnet3_rxinit(sc, &sc->sc_rxq[queue]);
967 :
968 0 : for (queue = 0; queue < NRXQUEUE; queue++) {
969 0 : WRITE_BAR0(sc, VMXNET3_BAR0_RXH1(queue), 0);
970 0 : WRITE_BAR0(sc, VMXNET3_BAR0_RXH2(queue), 0);
971 : }
972 :
973 0 : WRITE_CMD(sc, VMXNET3_CMD_ENABLE);
974 0 : if (READ_BAR1(sc, VMXNET3_BAR1_CMD)) {
975 0 : printf("%s: failed to initialize\n", ifp->if_xname);
976 0 : vmxnet3_stop(ifp);
977 0 : return EIO;
978 : }
979 :
980 : /* Program promiscuous mode and multicast filters. */
981 0 : vmxnet3_iff(sc);
982 :
983 0 : vmxnet3_enable_all_intrs(sc);
984 :
985 0 : vmxnet3_link_state(sc);
986 :
987 0 : ifp->if_flags |= IFF_RUNNING;
988 0 : ifq_clr_oactive(&ifp->if_snd);
989 :
990 0 : return 0;
991 0 : }
992 :
993 : int
994 0 : vmxnet3_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
995 : {
996 0 : struct vmxnet3_softc *sc = ifp->if_softc;
997 0 : struct ifreq *ifr = (struct ifreq *)data;
998 : int error = 0, s;
999 :
1000 0 : s = splnet();
1001 :
1002 0 : switch (cmd) {
1003 : case SIOCSIFADDR:
1004 0 : ifp->if_flags |= IFF_UP;
1005 0 : if ((ifp->if_flags & IFF_RUNNING) == 0)
1006 0 : error = vmxnet3_init(sc);
1007 : break;
1008 : case SIOCSIFFLAGS:
1009 0 : if (ifp->if_flags & IFF_UP) {
1010 0 : if (ifp->if_flags & IFF_RUNNING)
1011 0 : error = ENETRESET;
1012 : else
1013 0 : error = vmxnet3_init(sc);
1014 : } else {
1015 0 : if (ifp->if_flags & IFF_RUNNING)
1016 0 : vmxnet3_stop(ifp);
1017 : }
1018 : break;
1019 : case SIOCSIFMEDIA:
1020 : case SIOCGIFMEDIA:
1021 0 : error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd);
1022 0 : break;
1023 : case SIOCGIFRXR:
1024 0 : error = if_rxr_ioctl((struct if_rxrinfo *)ifr->ifr_data,
1025 0 : NULL, JUMBO_LEN, &sc->sc_rxq[0].cmd_ring[0].rxr);
1026 0 : break;
1027 : default:
1028 0 : error = ether_ioctl(ifp, &sc->sc_arpcom, cmd, data);
1029 0 : }
1030 :
1031 0 : if (error == ENETRESET) {
1032 0 : if (ifp->if_flags & IFF_RUNNING)
1033 0 : vmxnet3_iff(sc);
1034 : error = 0;
1035 0 : }
1036 :
1037 0 : splx(s);
1038 0 : return error;
1039 : }
1040 :
1041 : void
1042 0 : vmxnet3_start(struct ifnet *ifp)
1043 : {
1044 0 : struct vmxnet3_softc *sc = ifp->if_softc;
1045 0 : struct vmxnet3_txqueue *tq = sc->sc_txq;
1046 0 : struct vmxnet3_txring *ring = &tq->cmd_ring;
1047 : struct vmxnet3_txdesc *txd;
1048 0 : struct mbuf *m;
1049 : u_int free, used;
1050 : int n;
1051 :
1052 0 : if (!(ifp->if_flags & IFF_RUNNING) || ifq_is_oactive(&ifp->if_snd))
1053 0 : return;
1054 :
1055 0 : free = ring->free;
1056 : used = 0;
1057 :
1058 0 : for (;;) {
1059 0 : if (used + NTXSEGS > free) {
1060 0 : ifq_set_oactive(&ifp->if_snd);
1061 0 : break;
1062 : }
1063 :
1064 0 : IFQ_DEQUEUE(&ifp->if_snd, m);
1065 0 : if (m == NULL)
1066 : break;
1067 :
1068 0 : txd = &ring->txd[ring->prod];
1069 :
1070 0 : n = vmxnet3_load_mbuf(sc, ring, &m);
1071 0 : if (n == -1) {
1072 0 : ifp->if_oerrors++;
1073 0 : continue;
1074 : }
1075 :
1076 : #if NBPFILTER > 0
1077 0 : if (ifp->if_bpf)
1078 0 : bpf_mtap_ether(ifp->if_bpf, m, BPF_DIRECTION_OUT);
1079 : #endif
1080 :
1081 : /* Change the ownership by flipping the "generation" bit */
1082 0 : txd->tx_word2 ^= htole32(VMXNET3_TX_GEN_M << VMXNET3_TX_GEN_S);
1083 :
1084 0 : used += n;
1085 : }
1086 :
1087 0 : if (used > 0) {
1088 0 : ifp->if_timer = 5;
1089 0 : atomic_sub_int(&ring->free, used);
1090 0 : WRITE_BAR0(sc, VMXNET3_BAR0_TXH(0), ring->prod);
1091 0 : }
1092 0 : }
1093 :
1094 : int
1095 0 : vmxnet3_load_mbuf(struct vmxnet3_softc *sc, struct vmxnet3_txring *ring,
1096 : struct mbuf **mp)
1097 : {
1098 : struct vmxnet3_txdesc *txd, *sop;
1099 0 : struct mbuf *n, *m = *mp;
1100 : bus_dmamap_t map;
1101 : u_int hlen = ETHER_HDR_LEN, csum_off;
1102 : u_int prod;
1103 : int gen, i;
1104 :
1105 0 : prod = ring->prod;
1106 0 : map = ring->dmap[prod];
1107 : #if 0
1108 : if (m->m_pkthdr.csum_flags & M_IPV4_CSUM_OUT) {
1109 : printf("%s: IP checksum offloading is not supported\n",
1110 : sc->sc_dev.dv_xname);
1111 : return -1;
1112 : }
1113 : #endif
1114 0 : if (m->m_pkthdr.csum_flags & (M_TCP_CSUM_OUT|M_UDP_CSUM_OUT)) {
1115 : struct ip *ip;
1116 0 : int offp;
1117 :
1118 0 : if (m->m_pkthdr.csum_flags & M_TCP_CSUM_OUT)
1119 0 : csum_off = offsetof(struct tcphdr, th_sum);
1120 : else
1121 : csum_off = offsetof(struct udphdr, uh_sum);
1122 :
1123 0 : n = m_pulldown(m, hlen, sizeof(*ip), &offp);
1124 0 : if (n == NULL)
1125 0 : return (-1);
1126 :
1127 0 : ip = (struct ip *)(n->m_data + offp);
1128 0 : hlen += ip->ip_hl << 2;
1129 :
1130 0 : *mp = m_pullup(m, hlen + csum_off + 2);
1131 0 : if (*mp == NULL)
1132 0 : return (-1);
1133 : m = *mp;
1134 0 : }
1135 :
1136 0 : switch (bus_dmamap_load_mbuf(sc->sc_dmat, map, m, BUS_DMA_NOWAIT)) {
1137 : case 0:
1138 : break;
1139 : case EFBIG:
1140 0 : if (m_defrag(m, M_DONTWAIT) == 0 &&
1141 0 : bus_dmamap_load_mbuf(sc->sc_dmat, map, m,
1142 0 : BUS_DMA_NOWAIT) == 0)
1143 : break;
1144 :
1145 : /* FALLTHROUGH */
1146 : default:
1147 0 : m_freem(m);
1148 0 : return -1;
1149 : }
1150 :
1151 0 : ring->m[prod] = m;
1152 :
1153 0 : sop = &ring->txd[prod];
1154 0 : gen = ring->gen ^ 1; /* owned by cpu (yet) */
1155 :
1156 0 : for (i = 0; i < map->dm_nsegs; i++) {
1157 0 : txd = &ring->txd[prod];
1158 0 : txd->tx_addr = htole64(map->dm_segs[i].ds_addr);
1159 0 : txd->tx_word2 = htole32(((map->dm_segs[i].ds_len &
1160 : VMXNET3_TX_LEN_M) << VMXNET3_TX_LEN_S) |
1161 : ((gen & VMXNET3_TX_GEN_M) << VMXNET3_TX_GEN_S));
1162 0 : txd->tx_word3 = 0;
1163 :
1164 0 : if (++prod == NTXDESC) {
1165 : prod = 0;
1166 0 : ring->gen ^= 1;
1167 0 : }
1168 :
1169 0 : gen = ring->gen;
1170 : }
1171 0 : txd->tx_word3 |= htole32(VMXNET3_TX_EOP | VMXNET3_TX_COMPREQ);
1172 :
1173 0 : if (m->m_flags & M_VLANTAG) {
1174 0 : sop->tx_word3 |= htole32(VMXNET3_TX_VTAG_MODE);
1175 0 : sop->tx_word3 |= htole32((m->m_pkthdr.ether_vtag &
1176 : VMXNET3_TX_VLANTAG_M) << VMXNET3_TX_VLANTAG_S);
1177 0 : }
1178 0 : if (m->m_pkthdr.csum_flags & (M_TCP_CSUM_OUT|M_UDP_CSUM_OUT)) {
1179 0 : sop->tx_word2 |= htole32(((hlen + csum_off) &
1180 : VMXNET3_TX_OP_M) << VMXNET3_TX_OP_S);
1181 0 : sop->tx_word3 |= htole32(((hlen & VMXNET3_TX_HLEN_M) <<
1182 : VMXNET3_TX_HLEN_S) | (VMXNET3_OM_CSUM << VMXNET3_TX_OM_S));
1183 0 : }
1184 :
1185 : /* dmamap_sync map */
1186 :
1187 0 : ring->prod = prod;
1188 :
1189 0 : return (map->dm_nsegs);
1190 0 : }
1191 :
1192 : void
1193 0 : vmxnet3_watchdog(struct ifnet *ifp)
1194 : {
1195 0 : struct vmxnet3_softc *sc = ifp->if_softc;
1196 : int s;
1197 :
1198 0 : printf("%s: device timeout\n", ifp->if_xname);
1199 0 : s = splnet();
1200 0 : vmxnet3_init(sc);
1201 0 : splx(s);
1202 0 : }
1203 :
1204 : void
1205 0 : vmxnet3_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
1206 : {
1207 0 : struct vmxnet3_softc *sc = ifp->if_softc;
1208 :
1209 0 : vmxnet3_link_state(sc);
1210 :
1211 0 : ifmr->ifm_status = IFM_AVALID;
1212 0 : ifmr->ifm_active = IFM_ETHER;
1213 :
1214 0 : if (ifp->if_link_state != LINK_STATE_UP)
1215 0 : return;
1216 :
1217 0 : ifmr->ifm_status |= IFM_ACTIVE;
1218 :
1219 0 : if (ifp->if_baudrate >= IF_Gbps(10))
1220 0 : ifmr->ifm_active |= IFM_10G_T;
1221 0 : }
1222 :
1223 : int
1224 0 : vmxnet3_media_change(struct ifnet *ifp)
1225 : {
1226 0 : return 0;
1227 : }
1228 :
1229 : void *
1230 0 : vmxnet3_dma_allocmem(struct vmxnet3_softc *sc, u_int size, u_int align, bus_addr_t *pa)
1231 : {
1232 0 : bus_dma_tag_t t = sc->sc_dmat;
1233 0 : bus_dma_segment_t segs[1];
1234 0 : bus_dmamap_t map;
1235 0 : caddr_t va;
1236 0 : int n;
1237 :
1238 0 : if (bus_dmamem_alloc(t, size, align, 0, segs, 1, &n, BUS_DMA_NOWAIT))
1239 0 : return NULL;
1240 0 : if (bus_dmamem_map(t, segs, 1, size, &va, BUS_DMA_NOWAIT))
1241 0 : return NULL;
1242 0 : if (bus_dmamap_create(t, size, 1, size, 0, BUS_DMA_NOWAIT, &map))
1243 0 : return NULL;
1244 0 : if (bus_dmamap_load(t, map, va, size, NULL, BUS_DMA_NOWAIT))
1245 0 : return NULL;
1246 0 : bzero(va, size);
1247 0 : *pa = DMAADDR(map);
1248 0 : bus_dmamap_unload(t, map);
1249 0 : bus_dmamap_destroy(t, map);
1250 0 : return va;
1251 0 : }
|