Line data Source code
1 : /* $OpenBSD: if_vio.c,v 1.5 2018/02/27 08:44:58 mpi Exp $ */
2 :
3 : /*
4 : * Copyright (c) 2012 Stefan Fritsch, Alexander Fiveg.
5 : * Copyright (c) 2010 Minoura Makoto.
6 : * All rights reserved.
7 : *
8 : * Redistribution and use in source and binary forms, with or without
9 : * modification, are permitted provided that the following conditions
10 : * are met:
11 : * 1. Redistributions of source code must retain the above copyright
12 : * notice, this list of conditions and the following disclaimer.
13 : * 2. Redistributions in binary form must reproduce the above copyright
14 : * notice, this list of conditions and the following disclaimer in the
15 : * documentation and/or other materials provided with the distribution.
16 : *
17 : * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 : * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 : * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 : * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 : * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 : * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 : * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 : * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 : * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 : * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 : */
28 :
29 : #include "bpfilter.h"
30 : #include "vlan.h"
31 :
32 : #include <sys/param.h>
33 : #include <sys/systm.h>
34 : #include <sys/kernel.h>
35 : #include <sys/device.h>
36 : #include <sys/mbuf.h>
37 : #include <sys/socket.h>
38 : #include <sys/sockio.h>
39 : #include <sys/timeout.h>
40 :
41 : #include <dev/pv/virtioreg.h>
42 : #include <dev/pv/virtiovar.h>
43 :
44 : #include <net/if.h>
45 : #include <net/if_media.h>
46 :
47 : #include <netinet/in.h>
48 : #include <netinet/if_ether.h>
49 : #include <netinet/ip.h>
50 : #include <netinet/tcp.h>
51 : #include <netinet/udp.h>
52 :
53 : #if NBPFILTER > 0
54 : #include <net/bpf.h>
55 : #endif
56 :
57 : #if VIRTIO_DEBUG
58 : #define DPRINTF(x...) printf(x)
59 : #else
60 : #define DPRINTF(x...)
61 : #endif
62 :
63 : /*
64 : * if_vioreg.h:
65 : */
66 : /* Configuration registers */
67 : #define VIRTIO_NET_CONFIG_MAC 0 /* 8bit x 6byte */
68 : #define VIRTIO_NET_CONFIG_STATUS 6 /* 16bit */
69 :
70 : /* Feature bits */
71 : #define VIRTIO_NET_F_CSUM (1<<0)
72 : #define VIRTIO_NET_F_GUEST_CSUM (1<<1)
73 : #define VIRTIO_NET_F_MAC (1<<5)
74 : #define VIRTIO_NET_F_GSO (1<<6)
75 : #define VIRTIO_NET_F_GUEST_TSO4 (1<<7)
76 : #define VIRTIO_NET_F_GUEST_TSO6 (1<<8)
77 : #define VIRTIO_NET_F_GUEST_ECN (1<<9)
78 : #define VIRTIO_NET_F_GUEST_UFO (1<<10)
79 : #define VIRTIO_NET_F_HOST_TSO4 (1<<11)
80 : #define VIRTIO_NET_F_HOST_TSO6 (1<<12)
81 : #define VIRTIO_NET_F_HOST_ECN (1<<13)
82 : #define VIRTIO_NET_F_HOST_UFO (1<<14)
83 : #define VIRTIO_NET_F_MRG_RXBUF (1<<15)
84 : #define VIRTIO_NET_F_STATUS (1<<16)
85 : #define VIRTIO_NET_F_CTRL_VQ (1<<17)
86 : #define VIRTIO_NET_F_CTRL_RX (1<<18)
87 : #define VIRTIO_NET_F_CTRL_VLAN (1<<19)
88 : #define VIRTIO_NET_F_CTRL_RX_EXTRA (1<<20)
89 : #define VIRTIO_NET_F_GUEST_ANNOUNCE (1<<21)
90 :
91 : /*
92 : * Config(8) flags. The lowest byte is reserved for generic virtio stuff.
93 : */
94 :
95 : /* Workaround for vlan related bug in qemu < version 2.0 */
96 : #define CONFFLAG_QEMU_VLAN_BUG (1<<8)
97 :
98 : static const struct virtio_feature_name virtio_net_feature_names[] = {
99 : { VIRTIO_NET_F_CSUM, "CSum" },
100 : { VIRTIO_NET_F_GUEST_CSUM, "GuestCSum" },
101 : { VIRTIO_NET_F_MAC, "MAC" },
102 : { VIRTIO_NET_F_GSO, "GSO" },
103 : { VIRTIO_NET_F_GUEST_TSO4, "GuestTSO4" },
104 : { VIRTIO_NET_F_GUEST_TSO6, "GuestTSO6" },
105 : { VIRTIO_NET_F_GUEST_ECN, "GuestECN" },
106 : { VIRTIO_NET_F_GUEST_UFO, "GuestUFO" },
107 : { VIRTIO_NET_F_HOST_TSO4, "HostTSO4" },
108 : { VIRTIO_NET_F_HOST_TSO6, "HostTSO6" },
109 : { VIRTIO_NET_F_HOST_ECN, "HostECN" },
110 : { VIRTIO_NET_F_HOST_UFO, "HostUFO" },
111 : { VIRTIO_NET_F_MRG_RXBUF, "MrgRXBuf" },
112 : { VIRTIO_NET_F_STATUS, "Status" },
113 : { VIRTIO_NET_F_CTRL_VQ, "CtrlVQ" },
114 : { VIRTIO_NET_F_CTRL_RX, "CtrlRX" },
115 : { VIRTIO_NET_F_CTRL_VLAN, "CtrlVLAN" },
116 : { VIRTIO_NET_F_CTRL_RX_EXTRA, "CtrlRXExtra" },
117 : { VIRTIO_NET_F_GUEST_ANNOUNCE, "GuestAnnounce" },
118 : { 0, NULL }
119 : };
120 :
121 : /* Status */
122 : #define VIRTIO_NET_S_LINK_UP 1
123 :
124 : /* Packet header structure */
125 : struct virtio_net_hdr {
126 : uint8_t flags;
127 : uint8_t gso_type;
128 : uint16_t hdr_len;
129 : uint16_t gso_size;
130 : uint16_t csum_start;
131 : uint16_t csum_offset;
132 :
133 : /* only present if VIRTIO_NET_F_MRG_RXBUF is negotiated */
134 : uint16_t num_buffers;
135 : } __packed;
136 :
137 : #define VIRTIO_NET_HDR_F_NEEDS_CSUM 1 /* flags */
138 : #define VIRTIO_NET_HDR_GSO_NONE 0 /* gso_type */
139 : #define VIRTIO_NET_HDR_GSO_TCPV4 1 /* gso_type */
140 : #define VIRTIO_NET_HDR_GSO_UDP 3 /* gso_type */
141 : #define VIRTIO_NET_HDR_GSO_TCPV6 4 /* gso_type */
142 : #define VIRTIO_NET_HDR_GSO_ECN 0x80 /* gso_type, |'ed */
143 :
144 : #define VIRTIO_NET_MAX_GSO_LEN (65536+ETHER_HDR_LEN)
145 :
146 : /* Control virtqueue */
147 : struct virtio_net_ctrl_cmd {
148 : uint8_t class;
149 : uint8_t command;
150 : } __packed;
151 : #define VIRTIO_NET_CTRL_RX 0
152 : # define VIRTIO_NET_CTRL_RX_PROMISC 0
153 : # define VIRTIO_NET_CTRL_RX_ALLMULTI 1
154 :
155 : #define VIRTIO_NET_CTRL_MAC 1
156 : # define VIRTIO_NET_CTRL_MAC_TABLE_SET 0
157 :
158 : #define VIRTIO_NET_CTRL_VLAN 2
159 : # define VIRTIO_NET_CTRL_VLAN_ADD 0
160 : # define VIRTIO_NET_CTRL_VLAN_DEL 1
161 :
162 : struct virtio_net_ctrl_status {
163 : uint8_t ack;
164 : } __packed;
165 : #define VIRTIO_NET_OK 0
166 : #define VIRTIO_NET_ERR 1
167 :
168 : struct virtio_net_ctrl_rx {
169 : uint8_t onoff;
170 : } __packed;
171 :
172 : struct virtio_net_ctrl_mac_tbl {
173 : uint32_t nentries;
174 : uint8_t macs[][ETHER_ADDR_LEN];
175 : } __packed;
176 :
177 : struct virtio_net_ctrl_vlan {
178 : uint16_t id;
179 : } __packed;
180 :
181 : /*
182 : * if_viovar.h:
183 : */
184 : enum vio_ctrl_state {
185 : FREE, INUSE, DONE, RESET
186 : };
187 :
188 : struct vio_softc {
189 : struct device sc_dev;
190 :
191 : struct virtio_softc *sc_virtio;
192 : #define VQRX 0
193 : #define VQTX 1
194 : #define VQCTL 2
195 : struct virtqueue sc_vq[3];
196 :
197 : struct arpcom sc_ac;
198 : struct ifmedia sc_media;
199 :
200 : short sc_ifflags;
201 :
202 : /* bus_dmamem */
203 : bus_dma_segment_t sc_dma_seg;
204 : bus_dmamap_t sc_dma_map;
205 : size_t sc_dma_size;
206 : caddr_t sc_dma_kva;
207 :
208 : int sc_hdr_size;
209 : struct virtio_net_hdr *sc_tx_hdrs;
210 : struct virtio_net_ctrl_cmd *sc_ctrl_cmd;
211 : struct virtio_net_ctrl_status *sc_ctrl_status;
212 : struct virtio_net_ctrl_rx *sc_ctrl_rx;
213 : struct virtio_net_ctrl_mac_tbl *sc_ctrl_mac_tbl_uc;
214 : #define sc_ctrl_mac_info sc_ctrl_mac_tbl_uc
215 : struct virtio_net_ctrl_mac_tbl *sc_ctrl_mac_tbl_mc;
216 :
217 : /* kmem */
218 : bus_dmamap_t *sc_arrays;
219 : #define sc_rx_dmamaps sc_arrays
220 : bus_dmamap_t *sc_tx_dmamaps;
221 : struct mbuf **sc_rx_mbufs;
222 : struct mbuf **sc_tx_mbufs;
223 : struct if_rxring sc_rx_ring;
224 :
225 : enum vio_ctrl_state sc_ctrl_inuse;
226 :
227 : struct timeout sc_txtick, sc_rxtick;
228 : };
229 :
230 : #define VIO_DMAMEM_OFFSET(sc, p) ((caddr_t)(p) - (sc)->sc_dma_kva)
231 : #define VIO_DMAMEM_SYNC(vsc, sc, p, size, flags) \
232 : bus_dmamap_sync((vsc)->sc_dmat, (sc)->sc_dma_map, \
233 : VIO_DMAMEM_OFFSET((sc), (p)), (size), (flags))
234 : #define VIO_DMAMEM_ENQUEUE(sc, vq, slot, p, size, write) \
235 : virtio_enqueue_p((vq), (slot), (sc)->sc_dma_map, \
236 : VIO_DMAMEM_OFFSET((sc), (p)), (size), (write))
237 : #define VIO_HAVE_MRG_RXBUF(sc) \
238 : ((sc)->sc_hdr_size == sizeof(struct virtio_net_hdr))
239 :
240 : #define VIRTIO_NET_TX_MAXNSEGS 16 /* for larger chains, defrag */
241 : #define VIRTIO_NET_CTRL_MAC_MC_ENTRIES 64 /* for more entries, use ALLMULTI */
242 : #define VIRTIO_NET_CTRL_MAC_UC_ENTRIES 1 /* one entry for own unicast addr */
243 :
244 : #define VIO_CTRL_MAC_INFO_SIZE \
245 : (2*sizeof(struct virtio_net_ctrl_mac_tbl) + \
246 : (VIRTIO_NET_CTRL_MAC_MC_ENTRIES + \
247 : VIRTIO_NET_CTRL_MAC_UC_ENTRIES) * ETHER_ADDR_LEN)
248 :
249 : /* cfattach interface functions */
250 : int vio_match(struct device *, void *, void *);
251 : void vio_attach(struct device *, struct device *, void *);
252 :
253 : /* ifnet interface functions */
254 : int vio_init(struct ifnet *);
255 : void vio_stop(struct ifnet *, int);
256 : void vio_start(struct ifnet *);
257 : int vio_ioctl(struct ifnet *, u_long, caddr_t);
258 : void vio_get_lladr(struct arpcom *ac, struct virtio_softc *vsc);
259 : void vio_put_lladr(struct arpcom *ac, struct virtio_softc *vsc);
260 :
261 : /* rx */
262 : int vio_add_rx_mbuf(struct vio_softc *, int);
263 : void vio_free_rx_mbuf(struct vio_softc *, int);
264 : void vio_populate_rx_mbufs(struct vio_softc *);
265 : int vio_rxeof(struct vio_softc *);
266 : int vio_rx_intr(struct virtqueue *);
267 : void vio_rx_drain(struct vio_softc *);
268 : void vio_rxtick(void *);
269 :
270 : /* tx */
271 : int vio_tx_intr(struct virtqueue *);
272 : int vio_txeof(struct virtqueue *);
273 : void vio_tx_drain(struct vio_softc *);
274 : int vio_encap(struct vio_softc *, int, struct mbuf *);
275 : void vio_txtick(void *);
276 :
277 : /* other control */
278 : void vio_link_state(struct ifnet *);
279 : int vio_config_change(struct virtio_softc *);
280 : int vio_ctrl_rx(struct vio_softc *, int, int);
281 : int vio_set_rx_filter(struct vio_softc *);
282 : void vio_iff(struct vio_softc *);
283 : int vio_media_change(struct ifnet *);
284 : void vio_media_status(struct ifnet *, struct ifmediareq *);
285 : int vio_ctrleof(struct virtqueue *);
286 : int vio_wait_ctrl(struct vio_softc *sc);
287 : int vio_wait_ctrl_done(struct vio_softc *sc);
288 : void vio_ctrl_wakeup(struct vio_softc *, enum vio_ctrl_state);
289 : int vio_alloc_mem(struct vio_softc *);
290 : int vio_alloc_dmamem(struct vio_softc *);
291 : void vio_free_dmamem(struct vio_softc *);
292 :
293 : #if VIRTIO_DEBUG
294 : void vio_dump(struct vio_softc *);
295 : #endif
296 :
297 : int
298 0 : vio_match(struct device *parent, void *match, void *aux)
299 : {
300 0 : struct virtio_softc *va = aux;
301 :
302 0 : if (va->sc_childdevid == PCI_PRODUCT_VIRTIO_NETWORK)
303 0 : return 1;
304 :
305 0 : return 0;
306 0 : }
307 :
308 : struct cfattach vio_ca = {
309 : sizeof(struct vio_softc), vio_match, vio_attach, NULL
310 : };
311 :
312 : struct cfdriver vio_cd = {
313 : NULL, "vio", DV_IFNET
314 : };
315 :
316 : int
317 0 : vio_alloc_dmamem(struct vio_softc *sc)
318 : {
319 0 : struct virtio_softc *vsc = sc->sc_virtio;
320 0 : int nsegs;
321 :
322 0 : if (bus_dmamap_create(vsc->sc_dmat, sc->sc_dma_size, 1,
323 : sc->sc_dma_size, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
324 0 : &sc->sc_dma_map) != 0)
325 : goto err;
326 0 : if (bus_dmamem_alloc(vsc->sc_dmat, sc->sc_dma_size, 16, 0,
327 0 : &sc->sc_dma_seg, 1, &nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO) != 0)
328 : goto destroy;
329 0 : if (bus_dmamem_map(vsc->sc_dmat, &sc->sc_dma_seg, nsegs,
330 0 : sc->sc_dma_size, &sc->sc_dma_kva, BUS_DMA_NOWAIT) != 0)
331 : goto free;
332 0 : if (bus_dmamap_load(vsc->sc_dmat, sc->sc_dma_map, sc->sc_dma_kva,
333 0 : sc->sc_dma_size, NULL, BUS_DMA_NOWAIT) != 0)
334 : goto unmap;
335 0 : return (0);
336 :
337 : unmap:
338 0 : bus_dmamem_unmap(vsc->sc_dmat, sc->sc_dma_kva, sc->sc_dma_size);
339 : free:
340 0 : bus_dmamem_free(vsc->sc_dmat, &sc->sc_dma_seg, 1);
341 : destroy:
342 0 : bus_dmamap_destroy(vsc->sc_dmat, sc->sc_dma_map);
343 : err:
344 0 : return (1);
345 0 : }
346 :
347 : void
348 0 : vio_free_dmamem(struct vio_softc *sc)
349 : {
350 0 : struct virtio_softc *vsc = sc->sc_virtio;
351 0 : bus_dmamap_unload(vsc->sc_dmat, sc->sc_dma_map);
352 0 : bus_dmamem_unmap(vsc->sc_dmat, sc->sc_dma_kva, sc->sc_dma_size);
353 0 : bus_dmamem_free(vsc->sc_dmat, &sc->sc_dma_seg, 1);
354 0 : bus_dmamap_destroy(vsc->sc_dmat, sc->sc_dma_map);
355 0 : }
356 :
357 : /* allocate memory */
358 : /*
359 : * dma memory is used for:
360 : * sc_tx_hdrs[slot]: metadata array for frames to be sent (WRITE)
361 : * sc_ctrl_cmd: command to be sent via ctrl vq (WRITE)
362 : * sc_ctrl_status: return value for a command via ctrl vq (READ)
363 : * sc_ctrl_rx: parameter for a VIRTIO_NET_CTRL_RX class command
364 : * (WRITE)
365 : * sc_ctrl_mac_tbl_uc: unicast MAC address filter for a VIRTIO_NET_CTRL_MAC
366 : * class command (WRITE)
367 : * sc_ctrl_mac_tbl_mc: multicast MAC address filter for a VIRTIO_NET_CTRL_MAC
368 : * class command (WRITE)
369 : * sc_ctrl_* structures are allocated only one each; they are protected by
370 : * sc_ctrl_inuse, which must only be accessed at splnet
371 : *
372 : * metadata headers for received frames are stored at the start of the
373 : * rx mbufs.
374 : */
375 : /*
376 : * dynamically allocated memory is used for:
377 : * sc_rx_dmamaps[slot]: bus_dmamap_t array for received payload
378 : * sc_tx_dmamaps[slot]: bus_dmamap_t array for sent payload
379 : * sc_rx_mbufs[slot]: mbuf pointer array for received frames
380 : * sc_tx_mbufs[slot]: mbuf pointer array for sent frames
381 : */
382 : int
383 0 : vio_alloc_mem(struct vio_softc *sc)
384 : {
385 0 : struct virtio_softc *vsc = sc->sc_virtio;
386 0 : struct ifnet *ifp = &sc->sc_ac.ac_if;
387 : int allocsize, r, i, txsize;
388 : unsigned int offset = 0;
389 : int rxqsize, txqsize;
390 : caddr_t kva;
391 :
392 0 : rxqsize = vsc->sc_vqs[0].vq_num;
393 0 : txqsize = vsc->sc_vqs[1].vq_num;
394 :
395 : /*
396 : * For simplicity, we always allocate the full virtio_net_hdr size
397 : * even if VIRTIO_NET_F_MRG_RXBUF is not negotiated and
398 : * only a part of the memory is ever used.
399 : */
400 0 : allocsize = sizeof(struct virtio_net_hdr) * txqsize;
401 :
402 0 : if (vsc->sc_nvqs == 3) {
403 0 : allocsize += sizeof(struct virtio_net_ctrl_cmd) * 1;
404 0 : allocsize += sizeof(struct virtio_net_ctrl_status) * 1;
405 0 : allocsize += sizeof(struct virtio_net_ctrl_rx) * 1;
406 0 : allocsize += VIO_CTRL_MAC_INFO_SIZE;
407 0 : }
408 0 : sc->sc_dma_size = allocsize;
409 :
410 0 : if (vio_alloc_dmamem(sc) != 0) {
411 0 : printf("unable to allocate dma region\n");
412 0 : return -1;
413 : }
414 :
415 0 : kva = sc->sc_dma_kva;
416 0 : sc->sc_tx_hdrs = (struct virtio_net_hdr*)(kva + offset);
417 : offset += sizeof(struct virtio_net_hdr) * txqsize;
418 0 : if (vsc->sc_nvqs == 3) {
419 0 : sc->sc_ctrl_cmd = (void*)(kva + offset);
420 0 : offset += sizeof(*sc->sc_ctrl_cmd);
421 0 : sc->sc_ctrl_status = (void*)(kva + offset);
422 0 : offset += sizeof(*sc->sc_ctrl_status);
423 0 : sc->sc_ctrl_rx = (void*)(kva + offset);
424 0 : offset += sizeof(*sc->sc_ctrl_rx);
425 0 : sc->sc_ctrl_mac_tbl_uc = (void*)(kva + offset);
426 0 : offset += sizeof(*sc->sc_ctrl_mac_tbl_uc) +
427 : ETHER_ADDR_LEN * VIRTIO_NET_CTRL_MAC_UC_ENTRIES;
428 0 : sc->sc_ctrl_mac_tbl_mc = (void*)(kva + offset);
429 0 : }
430 :
431 0 : sc->sc_arrays = mallocarray(rxqsize + txqsize,
432 : 2 * sizeof(bus_dmamap_t) + sizeof(struct mbuf *), M_DEVBUF,
433 : M_WAITOK | M_CANFAIL | M_ZERO);
434 0 : if (sc->sc_arrays == NULL) {
435 0 : printf("unable to allocate mem for dmamaps\n");
436 0 : goto err_hdr;
437 : }
438 0 : allocsize = (rxqsize + txqsize) *
439 : (2 * sizeof(bus_dmamap_t) + sizeof(struct mbuf *));
440 :
441 0 : sc->sc_tx_dmamaps = sc->sc_arrays + rxqsize;
442 0 : sc->sc_rx_mbufs = (void*) (sc->sc_tx_dmamaps + txqsize);
443 0 : sc->sc_tx_mbufs = sc->sc_rx_mbufs + rxqsize;
444 :
445 0 : for (i = 0; i < rxqsize; i++) {
446 0 : r = bus_dmamap_create(vsc->sc_dmat, MCLBYTES, 1, MCLBYTES, 0,
447 : BUS_DMA_NOWAIT|BUS_DMA_ALLOCNOW, &sc->sc_rx_dmamaps[i]);
448 0 : if (r != 0)
449 : goto err_reqs;
450 : }
451 :
452 0 : txsize = ifp->if_hardmtu + sc->sc_hdr_size + ETHER_HDR_LEN;
453 0 : for (i = 0; i < txqsize; i++) {
454 0 : r = bus_dmamap_create(vsc->sc_dmat, txsize,
455 : VIRTIO_NET_TX_MAXNSEGS, txsize, 0,
456 : BUS_DMA_NOWAIT|BUS_DMA_ALLOCNOW,
457 : &sc->sc_tx_dmamaps[i]);
458 0 : if (r != 0)
459 : goto err_reqs;
460 : }
461 :
462 0 : return 0;
463 :
464 : err_reqs:
465 0 : printf("dmamap creation failed, error %d\n", r);
466 0 : for (i = 0; i < txqsize; i++) {
467 0 : if (sc->sc_tx_dmamaps[i])
468 0 : bus_dmamap_destroy(vsc->sc_dmat, sc->sc_tx_dmamaps[i]);
469 : }
470 0 : for (i = 0; i < rxqsize; i++) {
471 0 : if (sc->sc_rx_dmamaps[i])
472 0 : bus_dmamap_destroy(vsc->sc_dmat, sc->sc_rx_dmamaps[i]);
473 : }
474 0 : if (sc->sc_arrays) {
475 0 : free(sc->sc_arrays, M_DEVBUF, 0);
476 0 : sc->sc_arrays = 0;
477 0 : }
478 : err_hdr:
479 0 : vio_free_dmamem(sc);
480 0 : return -1;
481 0 : }
482 :
483 : void
484 0 : vio_get_lladr(struct arpcom *ac, struct virtio_softc *vsc)
485 : {
486 : int i;
487 0 : for (i = 0; i < ETHER_ADDR_LEN; i++) {
488 0 : ac->ac_enaddr[i] = virtio_read_device_config_1(vsc,
489 : VIRTIO_NET_CONFIG_MAC + i);
490 : }
491 0 : }
492 :
493 : void
494 0 : vio_put_lladr(struct arpcom *ac, struct virtio_softc *vsc)
495 : {
496 : int i;
497 0 : for (i = 0; i < ETHER_ADDR_LEN; i++) {
498 0 : virtio_write_device_config_1(vsc, VIRTIO_NET_CONFIG_MAC + i,
499 : ac->ac_enaddr[i]);
500 : }
501 0 : }
502 :
503 : void
504 0 : vio_attach(struct device *parent, struct device *self, void *aux)
505 : {
506 0 : struct vio_softc *sc = (struct vio_softc *)self;
507 0 : struct virtio_softc *vsc = (struct virtio_softc *)parent;
508 : uint32_t features;
509 : int i;
510 0 : struct ifnet *ifp = &sc->sc_ac.ac_if;
511 :
512 0 : if (vsc->sc_child != NULL) {
513 0 : printf(": child already attached for %s; something wrong...\n",
514 0 : parent->dv_xname);
515 0 : return;
516 : }
517 :
518 0 : sc->sc_virtio = vsc;
519 :
520 0 : vsc->sc_child = self;
521 0 : vsc->sc_ipl = IPL_NET;
522 0 : vsc->sc_vqs = &sc->sc_vq[0];
523 0 : vsc->sc_config_change = 0;
524 :
525 : features = VIRTIO_NET_F_MAC | VIRTIO_NET_F_STATUS |
526 : VIRTIO_NET_F_CTRL_VQ | VIRTIO_NET_F_CTRL_RX |
527 : VIRTIO_NET_F_MRG_RXBUF | VIRTIO_NET_F_CSUM;
528 : /*
529 : * VIRTIO_F_RING_EVENT_IDX can be switched off by setting bit 2 in the
530 : * driver flags, see config(8)
531 : */
532 0 : if (!(sc->sc_dev.dv_cfdata->cf_flags & 2) &&
533 0 : !(vsc->sc_dev.dv_cfdata->cf_flags & 2))
534 0 : features |= VIRTIO_F_RING_EVENT_IDX;
535 : else
536 0 : printf(": RingEventIdx disabled by UKC");
537 :
538 0 : features = virtio_negotiate_features(vsc, features,
539 : virtio_net_feature_names);
540 0 : if (features & VIRTIO_NET_F_MAC) {
541 0 : vio_get_lladr(&sc->sc_ac, vsc);
542 0 : } else {
543 0 : ether_fakeaddr(ifp);
544 0 : vio_put_lladr(&sc->sc_ac, vsc);
545 : }
546 0 : printf(": address %s\n", ether_sprintf(sc->sc_ac.ac_enaddr));
547 :
548 0 : if (features & VIRTIO_NET_F_MRG_RXBUF) {
549 0 : sc->sc_hdr_size = sizeof(struct virtio_net_hdr);
550 0 : ifp->if_hardmtu = 16000; /* arbitrary limit */
551 0 : } else {
552 0 : sc->sc_hdr_size = offsetof(struct virtio_net_hdr, num_buffers);
553 0 : ifp->if_hardmtu = MCLBYTES - sc->sc_hdr_size - ETHER_HDR_LEN;
554 : }
555 :
556 0 : if (virtio_alloc_vq(vsc, &sc->sc_vq[VQRX], 0, MCLBYTES, 2, "rx") != 0)
557 : goto err;
558 0 : vsc->sc_nvqs = 1;
559 0 : sc->sc_vq[VQRX].vq_done = vio_rx_intr;
560 0 : if (virtio_alloc_vq(vsc, &sc->sc_vq[VQTX], 1,
561 0 : sc->sc_hdr_size + ifp->if_hardmtu + ETHER_HDR_LEN,
562 0 : VIRTIO_NET_TX_MAXNSEGS + 1, "tx") != 0) {
563 : goto err;
564 : }
565 0 : vsc->sc_nvqs = 2;
566 0 : sc->sc_vq[VQTX].vq_done = vio_tx_intr;
567 0 : virtio_start_vq_intr(vsc, &sc->sc_vq[VQRX]);
568 0 : if (features & VIRTIO_F_RING_EVENT_IDX)
569 0 : virtio_postpone_intr_far(&sc->sc_vq[VQTX]);
570 : else
571 0 : virtio_stop_vq_intr(vsc, &sc->sc_vq[VQTX]);
572 0 : if ((features & VIRTIO_NET_F_CTRL_VQ)
573 0 : && (features & VIRTIO_NET_F_CTRL_RX)) {
574 0 : if (virtio_alloc_vq(vsc, &sc->sc_vq[VQCTL], 2, NBPG, 1,
575 0 : "control") == 0) {
576 0 : sc->sc_vq[VQCTL].vq_done = vio_ctrleof;
577 0 : virtio_start_vq_intr(vsc, &sc->sc_vq[VQCTL]);
578 0 : vsc->sc_nvqs = 3;
579 0 : }
580 : }
581 :
582 0 : if (vio_alloc_mem(sc) < 0)
583 : goto err;
584 :
585 0 : strlcpy(ifp->if_xname, self->dv_xname, IFNAMSIZ);
586 0 : ifp->if_softc = sc;
587 0 : ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
588 0 : ifp->if_start = vio_start;
589 0 : ifp->if_ioctl = vio_ioctl;
590 0 : ifp->if_capabilities = IFCAP_VLAN_MTU;
591 0 : if (features & VIRTIO_NET_F_CSUM)
592 0 : ifp->if_capabilities |= IFCAP_CSUM_TCPv4|IFCAP_CSUM_UDPv4;
593 0 : IFQ_SET_MAXLEN(&ifp->if_snd, vsc->sc_vqs[1].vq_num - 1);
594 0 : ifmedia_init(&sc->sc_media, 0, vio_media_change, vio_media_status);
595 0 : ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_AUTO, 0, NULL);
596 0 : ifmedia_set(&sc->sc_media, IFM_ETHER | IFM_AUTO);
597 0 : vsc->sc_config_change = vio_config_change;
598 0 : timeout_set(&sc->sc_txtick, vio_txtick, &sc->sc_vq[VQTX]);
599 0 : timeout_set(&sc->sc_rxtick, vio_rxtick, &sc->sc_vq[VQRX]);
600 :
601 0 : if_attach(ifp);
602 0 : ether_ifattach(ifp);
603 :
604 0 : return;
605 :
606 : err:
607 0 : for (i = 0; i < vsc->sc_nvqs; i++)
608 0 : virtio_free_vq(vsc, &sc->sc_vq[i]);
609 0 : vsc->sc_nvqs = 0;
610 0 : vsc->sc_child = VIRTIO_CHILD_ERROR;
611 0 : return;
612 0 : }
613 :
614 : /* check link status */
615 : void
616 0 : vio_link_state(struct ifnet *ifp)
617 : {
618 0 : struct vio_softc *sc = ifp->if_softc;
619 0 : struct virtio_softc *vsc = sc->sc_virtio;
620 : int link_state = LINK_STATE_FULL_DUPLEX;
621 :
622 0 : if (vsc->sc_features & VIRTIO_NET_F_STATUS) {
623 0 : int status = virtio_read_device_config_2(vsc,
624 : VIRTIO_NET_CONFIG_STATUS);
625 0 : if (!(status & VIRTIO_NET_S_LINK_UP))
626 0 : link_state = LINK_STATE_DOWN;
627 0 : }
628 0 : if (ifp->if_link_state != link_state) {
629 0 : ifp->if_link_state = link_state;
630 0 : if_link_state_change(ifp);
631 0 : }
632 0 : }
633 :
634 : int
635 0 : vio_config_change(struct virtio_softc *vsc)
636 : {
637 0 : struct vio_softc *sc = (struct vio_softc *)vsc->sc_child;
638 0 : vio_link_state(&sc->sc_ac.ac_if);
639 0 : return 1;
640 : }
641 :
642 : int
643 0 : vio_media_change(struct ifnet *ifp)
644 : {
645 : /* Ignore */
646 0 : return (0);
647 : }
648 :
649 : void
650 0 : vio_media_status(struct ifnet *ifp, struct ifmediareq *imr)
651 : {
652 0 : imr->ifm_active = IFM_ETHER | IFM_AUTO;
653 0 : imr->ifm_status = IFM_AVALID;
654 :
655 0 : vio_link_state(ifp);
656 0 : if (LINK_STATE_IS_UP(ifp->if_link_state) && ifp->if_flags & IFF_UP)
657 0 : imr->ifm_status |= IFM_ACTIVE|IFM_FDX;
658 0 : }
659 :
660 : /*
661 : * Interface functions for ifnet
662 : */
663 : int
664 0 : vio_init(struct ifnet *ifp)
665 : {
666 0 : struct vio_softc *sc = ifp->if_softc;
667 :
668 0 : vio_stop(ifp, 0);
669 0 : if_rxr_init(&sc->sc_rx_ring, 2 * ((ifp->if_hardmtu / MCLBYTES) + 1),
670 0 : sc->sc_vq[VQRX].vq_num);
671 0 : vio_populate_rx_mbufs(sc);
672 0 : ifp->if_flags |= IFF_RUNNING;
673 0 : ifq_clr_oactive(&ifp->if_snd);
674 0 : vio_iff(sc);
675 0 : vio_link_state(ifp);
676 0 : return 0;
677 : }
678 :
679 : void
680 0 : vio_stop(struct ifnet *ifp, int disable)
681 : {
682 0 : struct vio_softc *sc = ifp->if_softc;
683 0 : struct virtio_softc *vsc = sc->sc_virtio;
684 :
685 0 : timeout_del(&sc->sc_txtick);
686 0 : timeout_del(&sc->sc_rxtick);
687 0 : ifp->if_flags &= ~IFF_RUNNING;
688 0 : ifq_clr_oactive(&ifp->if_snd);
689 : /* only way to stop I/O and DMA is resetting... */
690 0 : virtio_reset(vsc);
691 0 : vio_rxeof(sc);
692 0 : if (vsc->sc_nvqs >= 3)
693 0 : vio_ctrleof(&sc->sc_vq[VQCTL]);
694 0 : vio_tx_drain(sc);
695 0 : if (disable)
696 0 : vio_rx_drain(sc);
697 :
698 0 : virtio_reinit_start(vsc);
699 0 : virtio_negotiate_features(vsc, vsc->sc_features, NULL);
700 0 : virtio_start_vq_intr(vsc, &sc->sc_vq[VQRX]);
701 0 : virtio_stop_vq_intr(vsc, &sc->sc_vq[VQTX]);
702 0 : if (vsc->sc_nvqs >= 3)
703 0 : virtio_start_vq_intr(vsc, &sc->sc_vq[VQCTL]);
704 0 : virtio_reinit_end(vsc);
705 0 : if (vsc->sc_nvqs >= 3) {
706 0 : if (sc->sc_ctrl_inuse != FREE)
707 0 : sc->sc_ctrl_inuse = RESET;
708 0 : wakeup(&sc->sc_ctrl_inuse);
709 0 : }
710 0 : }
711 :
712 : void
713 0 : vio_start(struct ifnet *ifp)
714 : {
715 0 : struct vio_softc *sc = ifp->if_softc;
716 0 : struct virtio_softc *vsc = sc->sc_virtio;
717 0 : struct virtqueue *vq = &sc->sc_vq[VQTX];
718 : struct mbuf *m;
719 : int queued = 0;
720 :
721 0 : vio_txeof(vq);
722 :
723 0 : if (!(ifp->if_flags & IFF_RUNNING) || ifq_is_oactive(&ifp->if_snd))
724 0 : return;
725 0 : if (IFQ_IS_EMPTY(&ifp->if_snd))
726 0 : return;
727 :
728 : again:
729 0 : for (;;) {
730 0 : int slot, r;
731 : struct virtio_net_hdr *hdr;
732 :
733 0 : m = ifq_deq_begin(&ifp->if_snd);
734 0 : if (m == NULL)
735 0 : break;
736 :
737 0 : r = virtio_enqueue_prep(vq, &slot);
738 0 : if (r == EAGAIN) {
739 0 : ifq_deq_rollback(&ifp->if_snd, m);
740 0 : ifq_set_oactive(&ifp->if_snd);
741 0 : break;
742 : }
743 0 : if (r != 0)
744 0 : panic("enqueue_prep for a tx buffer: %d", r);
745 :
746 0 : hdr = &sc->sc_tx_hdrs[slot];
747 0 : memset(hdr, 0, sc->sc_hdr_size);
748 0 : if (m->m_pkthdr.csum_flags & (M_TCP_CSUM_OUT|M_UDP_CSUM_OUT)) {
749 : struct mbuf *mip;
750 : struct ip *ip;
751 : int ehdrlen = ETHER_HDR_LEN;
752 0 : int ipoff;
753 : #if NVLAN > 0
754 : struct ether_vlan_header *eh;
755 :
756 0 : eh = mtod(m, struct ether_vlan_header *);
757 0 : if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN))
758 0 : ehdrlen += ETHER_VLAN_ENCAP_LEN;
759 : #endif
760 :
761 0 : if (m->m_pkthdr.csum_flags & M_TCP_CSUM_OUT)
762 0 : hdr->csum_offset = offsetof(struct tcphdr, th_sum);
763 : else
764 0 : hdr->csum_offset = offsetof(struct udphdr, uh_sum);
765 :
766 0 : mip = m_getptr(m, ehdrlen, &ipoff);
767 0 : KASSERT(mip != NULL && mip->m_len - ipoff >= sizeof(*ip));
768 0 : ip = (struct ip *)(mip->m_data + ipoff);
769 0 : hdr->csum_start = ehdrlen + (ip->ip_hl << 2);
770 0 : hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
771 0 : }
772 :
773 0 : r = vio_encap(sc, slot, m);
774 0 : if (r != 0) {
775 0 : virtio_enqueue_abort(vq, slot);
776 0 : ifq_deq_commit(&ifp->if_snd, m);
777 0 : m_freem(m);
778 0 : ifp->if_oerrors++;
779 0 : continue;
780 : }
781 0 : r = virtio_enqueue_reserve(vq, slot,
782 0 : sc->sc_tx_dmamaps[slot]->dm_nsegs + 1);
783 0 : if (r != 0) {
784 0 : bus_dmamap_unload(vsc->sc_dmat,
785 : sc->sc_tx_dmamaps[slot]);
786 0 : ifq_deq_rollback(&ifp->if_snd, m);
787 0 : sc->sc_tx_mbufs[slot] = NULL;
788 0 : ifq_set_oactive(&ifp->if_snd);
789 0 : break;
790 : }
791 0 : ifq_deq_commit(&ifp->if_snd, m);
792 :
793 0 : bus_dmamap_sync(vsc->sc_dmat, sc->sc_tx_dmamaps[slot], 0,
794 : sc->sc_tx_dmamaps[slot]->dm_mapsize, BUS_DMASYNC_PREWRITE);
795 0 : VIO_DMAMEM_SYNC(vsc, sc, hdr, sc->sc_hdr_size,
796 : BUS_DMASYNC_PREWRITE);
797 0 : VIO_DMAMEM_ENQUEUE(sc, vq, slot, hdr, sc->sc_hdr_size, 1);
798 0 : virtio_enqueue(vq, slot, sc->sc_tx_dmamaps[slot], 1);
799 0 : virtio_enqueue_commit(vsc, vq, slot, 0);
800 0 : queued++;
801 : #if NBPFILTER > 0
802 0 : if (ifp->if_bpf)
803 0 : bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_OUT);
804 : #endif
805 0 : }
806 0 : if (ifq_is_oactive(&ifp->if_snd)) {
807 : int r;
808 0 : if (vsc->sc_features & VIRTIO_F_RING_EVENT_IDX)
809 0 : r = virtio_postpone_intr_smart(&sc->sc_vq[VQTX]);
810 : else
811 0 : r = virtio_start_vq_intr(vsc, &sc->sc_vq[VQTX]);
812 0 : if (r) {
813 0 : vio_txeof(vq);
814 0 : goto again;
815 : }
816 0 : }
817 :
818 0 : if (queued > 0) {
819 0 : virtio_notify(vsc, vq);
820 0 : timeout_add_sec(&sc->sc_txtick, 1);
821 0 : }
822 0 : }
823 :
824 : #if VIRTIO_DEBUG
825 : void
826 : vio_dump(struct vio_softc *sc)
827 : {
828 : struct ifnet *ifp = &sc->sc_ac.ac_if;
829 : struct virtio_softc *vsc = sc->sc_virtio;
830 :
831 : printf("%s status dump:\n", ifp->if_xname);
832 : printf("TX virtqueue:\n");
833 : virtio_vq_dump(&vsc->sc_vqs[VQTX]);
834 : printf("tx tick active: %d\n", !timeout_triggered(&sc->sc_txtick));
835 : printf("rx tick active: %d\n", !timeout_triggered(&sc->sc_rxtick));
836 : printf("RX virtqueue:\n");
837 : virtio_vq_dump(&vsc->sc_vqs[VQRX]);
838 : if (vsc->sc_nvqs == 3) {
839 : printf("CTL virtqueue:\n");
840 : virtio_vq_dump(&vsc->sc_vqs[VQCTL]);
841 : printf("ctrl_inuse: %d\n", sc->sc_ctrl_inuse);
842 : }
843 : }
844 : #endif
845 :
846 : int
847 0 : vio_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
848 : {
849 0 : struct vio_softc *sc = ifp->if_softc;
850 0 : struct ifreq *ifr = (struct ifreq *)data;
851 : int s, r = 0;
852 :
853 0 : s = splnet();
854 0 : switch (cmd) {
855 : case SIOCSIFADDR:
856 0 : ifp->if_flags |= IFF_UP;
857 0 : if (!(ifp->if_flags & IFF_RUNNING))
858 0 : vio_init(ifp);
859 : break;
860 : case SIOCSIFFLAGS:
861 0 : if (ifp->if_flags & IFF_UP) {
862 : #if VIRTIO_DEBUG
863 : if (ifp->if_flags & IFF_DEBUG)
864 : vio_dump(sc);
865 : #endif
866 0 : if (ifp->if_flags & IFF_RUNNING)
867 0 : r = ENETRESET;
868 : else
869 0 : vio_init(ifp);
870 : } else {
871 0 : if (ifp->if_flags & IFF_RUNNING)
872 0 : vio_stop(ifp, 1);
873 : }
874 : break;
875 : case SIOCGIFMEDIA:
876 : case SIOCSIFMEDIA:
877 0 : r = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd);
878 0 : break;
879 : case SIOCGIFRXR:
880 0 : r = if_rxr_ioctl((struct if_rxrinfo *)ifr->ifr_data,
881 0 : NULL, MCLBYTES, &sc->sc_rx_ring);
882 0 : break;
883 : default:
884 0 : r = ether_ioctl(ifp, &sc->sc_ac, cmd, data);
885 0 : }
886 :
887 0 : if (r == ENETRESET) {
888 0 : if (ifp->if_flags & IFF_RUNNING)
889 0 : vio_iff(sc);
890 : r = 0;
891 0 : }
892 0 : splx(s);
893 0 : return r;
894 : }
895 :
896 : /*
897 : * Recieve implementation
898 : */
899 : /* allocate and initialize a mbuf for receive */
900 : int
901 0 : vio_add_rx_mbuf(struct vio_softc *sc, int i)
902 : {
903 : struct mbuf *m;
904 : int r;
905 :
906 0 : m = MCLGETI(NULL, M_DONTWAIT, NULL, MCLBYTES);
907 0 : if (m == NULL)
908 0 : return ENOBUFS;
909 0 : sc->sc_rx_mbufs[i] = m;
910 0 : m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
911 0 : r = bus_dmamap_load_mbuf(sc->sc_virtio->sc_dmat, sc->sc_rx_dmamaps[i],
912 : m, BUS_DMA_READ|BUS_DMA_NOWAIT);
913 0 : if (r) {
914 0 : m_freem(m);
915 0 : sc->sc_rx_mbufs[i] = 0;
916 0 : return r;
917 : }
918 :
919 0 : return 0;
920 0 : }
921 :
922 : /* free a mbuf for receive */
923 : void
924 0 : vio_free_rx_mbuf(struct vio_softc *sc, int i)
925 : {
926 0 : bus_dmamap_unload(sc->sc_virtio->sc_dmat, sc->sc_rx_dmamaps[i]);
927 0 : m_freem(sc->sc_rx_mbufs[i]);
928 0 : sc->sc_rx_mbufs[i] = NULL;
929 0 : }
930 :
931 : /* add mbufs for all the empty receive slots */
932 : void
933 0 : vio_populate_rx_mbufs(struct vio_softc *sc)
934 : {
935 0 : struct virtio_softc *vsc = sc->sc_virtio;
936 : int r, done = 0;
937 : u_int slots;
938 0 : struct virtqueue *vq = &sc->sc_vq[VQRX];
939 0 : int mrg_rxbuf = VIO_HAVE_MRG_RXBUF(sc);
940 :
941 0 : for (slots = if_rxr_get(&sc->sc_rx_ring, vq->vq_num);
942 0 : slots > 0; slots--) {
943 0 : int slot;
944 0 : r = virtio_enqueue_prep(vq, &slot);
945 0 : if (r == EAGAIN)
946 0 : break;
947 0 : if (r != 0)
948 0 : panic("enqueue_prep for rx buffers: %d", r);
949 0 : if (sc->sc_rx_mbufs[slot] == NULL) {
950 0 : r = vio_add_rx_mbuf(sc, slot);
951 0 : if (r != 0) {
952 0 : virtio_enqueue_abort(vq, slot);
953 0 : break;
954 : }
955 : }
956 0 : r = virtio_enqueue_reserve(vq, slot,
957 0 : sc->sc_rx_dmamaps[slot]->dm_nsegs + (mrg_rxbuf ? 0 : 1));
958 0 : if (r != 0) {
959 0 : vio_free_rx_mbuf(sc, slot);
960 0 : break;
961 : }
962 0 : bus_dmamap_sync(vsc->sc_dmat, sc->sc_rx_dmamaps[slot], 0,
963 : MCLBYTES, BUS_DMASYNC_PREREAD);
964 0 : if (mrg_rxbuf) {
965 0 : virtio_enqueue(vq, slot, sc->sc_rx_dmamaps[slot], 0);
966 0 : } else {
967 : /*
968 : * Buggy kvm wants a buffer of exactly the size of
969 : * the header in this case, so we have to split in
970 : * two.
971 : */
972 0 : virtio_enqueue_p(vq, slot, sc->sc_rx_dmamaps[slot],
973 0 : 0, sc->sc_hdr_size, 0);
974 0 : virtio_enqueue_p(vq, slot, sc->sc_rx_dmamaps[slot],
975 0 : sc->sc_hdr_size, MCLBYTES - sc->sc_hdr_size, 0);
976 : }
977 0 : virtio_enqueue_commit(vsc, vq, slot, 0);
978 : done = 1;
979 0 : }
980 0 : if_rxr_put(&sc->sc_rx_ring, slots);
981 :
982 0 : if (done)
983 0 : virtio_notify(vsc, vq);
984 0 : if (vq->vq_used_idx != vq->vq_avail_idx)
985 0 : timeout_del(&sc->sc_rxtick);
986 : else
987 0 : timeout_add_sec(&sc->sc_rxtick, 1);
988 0 : }
989 :
990 : /* dequeue received packets */
991 : int
992 0 : vio_rxeof(struct vio_softc *sc)
993 : {
994 0 : struct virtio_softc *vsc = sc->sc_virtio;
995 0 : struct virtqueue *vq = &sc->sc_vq[VQRX];
996 0 : struct ifnet *ifp = &sc->sc_ac.ac_if;
997 0 : struct mbuf_list ml = MBUF_LIST_INITIALIZER();
998 : struct mbuf *m, *m0 = NULL, *mlast;
999 : int r = 0;
1000 0 : int slot, len, bufs_left;
1001 : struct virtio_net_hdr *hdr;
1002 :
1003 0 : while (virtio_dequeue(vsc, vq, &slot, &len) == 0) {
1004 : r = 1;
1005 0 : bus_dmamap_sync(vsc->sc_dmat, sc->sc_rx_dmamaps[slot], 0,
1006 : MCLBYTES, BUS_DMASYNC_POSTREAD);
1007 0 : m = sc->sc_rx_mbufs[slot];
1008 0 : KASSERT(m != NULL);
1009 0 : bus_dmamap_unload(vsc->sc_dmat, sc->sc_rx_dmamaps[slot]);
1010 0 : sc->sc_rx_mbufs[slot] = NULL;
1011 0 : virtio_dequeue_commit(vq, slot);
1012 0 : if_rxr_put(&sc->sc_rx_ring, 1);
1013 0 : m->m_len = m->m_pkthdr.len = len;
1014 0 : m->m_pkthdr.csum_flags = 0;
1015 0 : if (m0 == NULL) {
1016 0 : hdr = mtod(m, struct virtio_net_hdr *);
1017 0 : m_adj(m, sc->sc_hdr_size);
1018 : m0 = mlast = m;
1019 0 : if (VIO_HAVE_MRG_RXBUF(sc))
1020 0 : bufs_left = hdr->num_buffers - 1;
1021 : else
1022 : bufs_left = 0;
1023 : }
1024 : else {
1025 0 : m->m_flags &= ~M_PKTHDR;
1026 0 : m0->m_pkthdr.len += m->m_len;
1027 0 : mlast->m_next = m;
1028 : mlast = m;
1029 0 : bufs_left--;
1030 : }
1031 :
1032 0 : if (bufs_left == 0) {
1033 0 : ml_enqueue(&ml, m0);
1034 : m0 = NULL;
1035 0 : }
1036 : }
1037 0 : if (m0 != NULL) {
1038 : DPRINTF("%s: expected %d buffers, got %d\n", __func__,
1039 : (int)hdr->num_buffers,
1040 : (int)hdr->num_buffers - bufs_left);
1041 0 : ifp->if_ierrors++;
1042 0 : m_freem(m0);
1043 0 : }
1044 :
1045 0 : if_input(ifp, &ml);
1046 0 : return r;
1047 0 : }
1048 :
1049 : int
1050 0 : vio_rx_intr(struct virtqueue *vq)
1051 : {
1052 0 : struct virtio_softc *vsc = vq->vq_owner;
1053 0 : struct vio_softc *sc = (struct vio_softc *)vsc->sc_child;
1054 0 : int r, sum = 0;
1055 :
1056 : again:
1057 0 : r = vio_rxeof(sc);
1058 0 : sum += r;
1059 0 : if (r) {
1060 0 : vio_populate_rx_mbufs(sc);
1061 : /* set used event index to the next slot */
1062 0 : if (vsc->sc_features & VIRTIO_F_RING_EVENT_IDX) {
1063 0 : if (virtio_start_vq_intr(vq->vq_owner, vq))
1064 0 : goto again;
1065 : }
1066 : }
1067 :
1068 0 : return sum;
1069 : }
1070 :
1071 : void
1072 0 : vio_rxtick(void *arg)
1073 : {
1074 0 : struct virtqueue *vq = arg;
1075 0 : struct virtio_softc *vsc = vq->vq_owner;
1076 0 : struct vio_softc *sc = (struct vio_softc *)vsc->sc_child;
1077 : int s;
1078 :
1079 0 : s = splnet();
1080 0 : vio_populate_rx_mbufs(sc);
1081 0 : splx(s);
1082 0 : }
1083 :
1084 : /* free all the mbufs; called from if_stop(disable) */
1085 : void
1086 0 : vio_rx_drain(struct vio_softc *sc)
1087 : {
1088 0 : struct virtqueue *vq = &sc->sc_vq[VQRX];
1089 : int i;
1090 :
1091 0 : for (i = 0; i < vq->vq_num; i++) {
1092 0 : if (sc->sc_rx_mbufs[i] == NULL)
1093 : continue;
1094 0 : vio_free_rx_mbuf(sc, i);
1095 0 : }
1096 0 : }
1097 :
1098 : /*
1099 : * Transmition implementation
1100 : */
1101 : /* actual transmission is done in if_start */
1102 : /* tx interrupt; dequeue and free mbufs */
1103 : /*
1104 : * tx interrupt is actually disabled unless the tx queue is full, i.e.
1105 : * IFF_OACTIVE is set. vio_txtick is used to make sure that mbufs
1106 : * are dequeued and freed even if no further transfer happens.
1107 : */
1108 : int
1109 0 : vio_tx_intr(struct virtqueue *vq)
1110 : {
1111 0 : struct virtio_softc *vsc = vq->vq_owner;
1112 0 : struct vio_softc *sc = (struct vio_softc *)vsc->sc_child;
1113 0 : struct ifnet *ifp = &sc->sc_ac.ac_if;
1114 : int r;
1115 :
1116 0 : r = vio_txeof(vq);
1117 0 : vio_start(ifp);
1118 0 : return r;
1119 : }
1120 :
1121 : void
1122 0 : vio_txtick(void *arg)
1123 : {
1124 0 : struct virtqueue *vq = arg;
1125 0 : int s = splnet();
1126 0 : vio_tx_intr(vq);
1127 0 : splx(s);
1128 0 : }
1129 :
1130 : int
1131 0 : vio_txeof(struct virtqueue *vq)
1132 : {
1133 0 : struct virtio_softc *vsc = vq->vq_owner;
1134 0 : struct vio_softc *sc = (struct vio_softc *)vsc->sc_child;
1135 0 : struct ifnet *ifp = &sc->sc_ac.ac_if;
1136 : struct mbuf *m;
1137 : int r = 0;
1138 0 : int slot, len;
1139 :
1140 0 : while (virtio_dequeue(vsc, vq, &slot, &len) == 0) {
1141 0 : struct virtio_net_hdr *hdr = &sc->sc_tx_hdrs[slot];
1142 0 : r++;
1143 0 : VIO_DMAMEM_SYNC(vsc, sc, hdr, sc->sc_hdr_size,
1144 : BUS_DMASYNC_POSTWRITE);
1145 0 : bus_dmamap_sync(vsc->sc_dmat, sc->sc_tx_dmamaps[slot], 0,
1146 : sc->sc_tx_dmamaps[slot]->dm_mapsize,
1147 : BUS_DMASYNC_POSTWRITE);
1148 0 : m = sc->sc_tx_mbufs[slot];
1149 0 : bus_dmamap_unload(vsc->sc_dmat, sc->sc_tx_dmamaps[slot]);
1150 0 : sc->sc_tx_mbufs[slot] = 0;
1151 0 : virtio_dequeue_commit(vq, slot);
1152 0 : m_freem(m);
1153 : }
1154 :
1155 0 : if (r) {
1156 0 : ifq_clr_oactive(&ifp->if_snd);
1157 0 : virtio_stop_vq_intr(vsc, &sc->sc_vq[VQTX]);
1158 0 : }
1159 0 : if (vq->vq_used_idx == vq->vq_avail_idx)
1160 0 : timeout_del(&sc->sc_txtick);
1161 0 : else if (r)
1162 0 : timeout_add_sec(&sc->sc_txtick, 1);
1163 0 : return r;
1164 0 : }
1165 :
1166 : int
1167 0 : vio_encap(struct vio_softc *sc, int slot, struct mbuf *m)
1168 : {
1169 0 : struct virtio_softc *vsc = sc->sc_virtio;
1170 0 : bus_dmamap_t dmap= sc->sc_tx_dmamaps[slot];
1171 : int r;
1172 :
1173 0 : r = bus_dmamap_load_mbuf(vsc->sc_dmat, dmap, m,
1174 : BUS_DMA_WRITE|BUS_DMA_NOWAIT);
1175 0 : switch (r) {
1176 : case 0:
1177 : break;
1178 : case EFBIG:
1179 0 : if (m_defrag(m, M_DONTWAIT) == 0 &&
1180 0 : bus_dmamap_load_mbuf(vsc->sc_dmat, dmap, m,
1181 0 : BUS_DMA_WRITE|BUS_DMA_NOWAIT) == 0)
1182 : break;
1183 :
1184 : /* FALLTHROUGH */
1185 : default:
1186 0 : return ENOBUFS;
1187 : }
1188 0 : sc->sc_tx_mbufs[slot] = m;
1189 0 : return 0;
1190 0 : }
1191 :
1192 : /* free all the mbufs already put on vq; called from if_stop(disable) */
1193 : void
1194 0 : vio_tx_drain(struct vio_softc *sc)
1195 : {
1196 0 : struct virtio_softc *vsc = sc->sc_virtio;
1197 0 : struct virtqueue *vq = &sc->sc_vq[VQTX];
1198 : int i;
1199 :
1200 0 : for (i = 0; i < vq->vq_num; i++) {
1201 0 : if (sc->sc_tx_mbufs[i] == NULL)
1202 : continue;
1203 0 : bus_dmamap_unload(vsc->sc_dmat, sc->sc_tx_dmamaps[i]);
1204 0 : m_freem(sc->sc_tx_mbufs[i]);
1205 0 : sc->sc_tx_mbufs[i] = NULL;
1206 0 : }
1207 0 : }
1208 :
1209 : /*
1210 : * Control vq
1211 : */
1212 : /* issue a VIRTIO_NET_CTRL_RX class command and wait for completion */
1213 : int
1214 0 : vio_ctrl_rx(struct vio_softc *sc, int cmd, int onoff)
1215 : {
1216 0 : struct virtio_softc *vsc = sc->sc_virtio;
1217 0 : struct virtqueue *vq = &sc->sc_vq[VQCTL];
1218 0 : int r, slot;
1219 :
1220 0 : splassert(IPL_NET);
1221 :
1222 0 : if ((r = vio_wait_ctrl(sc)) != 0)
1223 0 : return r;
1224 :
1225 0 : sc->sc_ctrl_cmd->class = VIRTIO_NET_CTRL_RX;
1226 0 : sc->sc_ctrl_cmd->command = cmd;
1227 0 : sc->sc_ctrl_rx->onoff = onoff;
1228 :
1229 0 : VIO_DMAMEM_SYNC(vsc, sc, sc->sc_ctrl_cmd,
1230 : sizeof(*sc->sc_ctrl_cmd), BUS_DMASYNC_PREWRITE);
1231 0 : VIO_DMAMEM_SYNC(vsc, sc, sc->sc_ctrl_rx,
1232 : sizeof(*sc->sc_ctrl_rx), BUS_DMASYNC_PREWRITE);
1233 0 : VIO_DMAMEM_SYNC(vsc, sc, sc->sc_ctrl_status,
1234 : sizeof(*sc->sc_ctrl_status), BUS_DMASYNC_PREREAD);
1235 :
1236 0 : r = virtio_enqueue_prep(vq, &slot);
1237 0 : if (r != 0)
1238 0 : panic("%s: control vq busy!?", sc->sc_dev.dv_xname);
1239 0 : r = virtio_enqueue_reserve(vq, slot, 3);
1240 0 : if (r != 0)
1241 0 : panic("%s: control vq busy!?", sc->sc_dev.dv_xname);
1242 0 : VIO_DMAMEM_ENQUEUE(sc, vq, slot, sc->sc_ctrl_cmd,
1243 : sizeof(*sc->sc_ctrl_cmd), 1);
1244 0 : VIO_DMAMEM_ENQUEUE(sc, vq, slot, sc->sc_ctrl_rx,
1245 : sizeof(*sc->sc_ctrl_rx), 1);
1246 0 : VIO_DMAMEM_ENQUEUE(sc, vq, slot, sc->sc_ctrl_status,
1247 : sizeof(*sc->sc_ctrl_status), 0);
1248 0 : virtio_enqueue_commit(vsc, vq, slot, 1);
1249 :
1250 0 : if ((r = vio_wait_ctrl_done(sc)) != 0)
1251 : goto out;
1252 :
1253 0 : VIO_DMAMEM_SYNC(vsc, sc, sc->sc_ctrl_cmd,
1254 : sizeof(*sc->sc_ctrl_cmd), BUS_DMASYNC_POSTWRITE);
1255 0 : VIO_DMAMEM_SYNC(vsc, sc, sc->sc_ctrl_rx,
1256 : sizeof(*sc->sc_ctrl_rx), BUS_DMASYNC_POSTWRITE);
1257 0 : VIO_DMAMEM_SYNC(vsc, sc, sc->sc_ctrl_status,
1258 : sizeof(*sc->sc_ctrl_status), BUS_DMASYNC_POSTREAD);
1259 :
1260 0 : if (sc->sc_ctrl_status->ack == VIRTIO_NET_OK) {
1261 : r = 0;
1262 0 : } else {
1263 0 : printf("%s: ctrl cmd %d failed\n", sc->sc_dev.dv_xname, cmd);
1264 : r = EIO;
1265 : }
1266 :
1267 : DPRINTF("%s: cmd %d %d: %d\n", __func__, cmd, (int)onoff, r);
1268 : out:
1269 0 : vio_ctrl_wakeup(sc, FREE);
1270 0 : return r;
1271 0 : }
1272 :
1273 : int
1274 0 : vio_wait_ctrl(struct vio_softc *sc)
1275 : {
1276 : int r = 0;
1277 :
1278 0 : while (sc->sc_ctrl_inuse != FREE) {
1279 0 : r = rwsleep(&sc->sc_ctrl_inuse, &netlock, PRIBIO|PCATCH,
1280 : "viowait", 0);
1281 0 : if (r == EINTR)
1282 0 : return r;
1283 : }
1284 0 : sc->sc_ctrl_inuse = INUSE;
1285 :
1286 0 : return r;
1287 0 : }
1288 :
1289 : int
1290 0 : vio_wait_ctrl_done(struct vio_softc *sc)
1291 : {
1292 : int r = 0;
1293 :
1294 0 : while (sc->sc_ctrl_inuse != DONE && sc->sc_ctrl_inuse != RESET) {
1295 0 : if (sc->sc_ctrl_inuse == RESET) {
1296 : r = 1;
1297 0 : break;
1298 : }
1299 0 : r = rwsleep(&sc->sc_ctrl_inuse, &netlock, PRIBIO|PCATCH,
1300 : "viodone", 0);
1301 0 : if (r == EINTR)
1302 : break;
1303 : }
1304 0 : return r;
1305 : }
1306 :
1307 : void
1308 0 : vio_ctrl_wakeup(struct vio_softc *sc, enum vio_ctrl_state new)
1309 : {
1310 0 : sc->sc_ctrl_inuse = new;
1311 0 : wakeup(&sc->sc_ctrl_inuse);
1312 0 : }
1313 :
1314 : int
1315 0 : vio_ctrleof(struct virtqueue *vq)
1316 : {
1317 0 : struct virtio_softc *vsc = vq->vq_owner;
1318 0 : struct vio_softc *sc = (struct vio_softc *)vsc->sc_child;
1319 0 : int r = 0, ret, slot;
1320 :
1321 : again:
1322 0 : ret = virtio_dequeue(vsc, vq, &slot, NULL);
1323 0 : if (ret == ENOENT)
1324 0 : return r;
1325 0 : virtio_dequeue_commit(vq, slot);
1326 0 : r++;
1327 0 : vio_ctrl_wakeup(sc, DONE);
1328 0 : if (virtio_start_vq_intr(vsc, vq))
1329 0 : goto again;
1330 :
1331 0 : return r;
1332 0 : }
1333 :
1334 : /* issue VIRTIO_NET_CTRL_MAC_TABLE_SET command and wait for completion */
1335 : int
1336 0 : vio_set_rx_filter(struct vio_softc *sc)
1337 : {
1338 : /* filter already set in sc_ctrl_mac_tbl */
1339 0 : struct virtio_softc *vsc = sc->sc_virtio;
1340 0 : struct virtqueue *vq = &sc->sc_vq[VQCTL];
1341 0 : int r, slot;
1342 :
1343 0 : splassert(IPL_NET);
1344 :
1345 0 : if ((r = vio_wait_ctrl(sc)) != 0)
1346 0 : return r;
1347 :
1348 0 : sc->sc_ctrl_cmd->class = VIRTIO_NET_CTRL_MAC;
1349 0 : sc->sc_ctrl_cmd->command = VIRTIO_NET_CTRL_MAC_TABLE_SET;
1350 :
1351 0 : VIO_DMAMEM_SYNC(vsc, sc, sc->sc_ctrl_cmd,
1352 : sizeof(*sc->sc_ctrl_cmd), BUS_DMASYNC_PREWRITE);
1353 0 : VIO_DMAMEM_SYNC(vsc, sc, sc->sc_ctrl_mac_info,
1354 : VIO_CTRL_MAC_INFO_SIZE, BUS_DMASYNC_PREWRITE);
1355 0 : VIO_DMAMEM_SYNC(vsc, sc, sc->sc_ctrl_status,
1356 : sizeof(*sc->sc_ctrl_status), BUS_DMASYNC_PREREAD);
1357 :
1358 0 : r = virtio_enqueue_prep(vq, &slot);
1359 0 : if (r != 0)
1360 0 : panic("%s: control vq busy!?", sc->sc_dev.dv_xname);
1361 0 : r = virtio_enqueue_reserve(vq, slot, 4);
1362 0 : if (r != 0)
1363 0 : panic("%s: control vq busy!?", sc->sc_dev.dv_xname);
1364 0 : VIO_DMAMEM_ENQUEUE(sc, vq, slot, sc->sc_ctrl_cmd,
1365 : sizeof(*sc->sc_ctrl_cmd), 1);
1366 0 : VIO_DMAMEM_ENQUEUE(sc, vq, slot, sc->sc_ctrl_mac_tbl_uc,
1367 : sizeof(*sc->sc_ctrl_mac_tbl_uc) +
1368 : sc->sc_ctrl_mac_tbl_uc->nentries * ETHER_ADDR_LEN, 1);
1369 0 : VIO_DMAMEM_ENQUEUE(sc, vq, slot, sc->sc_ctrl_mac_tbl_mc,
1370 : sizeof(*sc->sc_ctrl_mac_tbl_mc) +
1371 : sc->sc_ctrl_mac_tbl_mc->nentries * ETHER_ADDR_LEN, 1);
1372 0 : VIO_DMAMEM_ENQUEUE(sc, vq, slot, sc->sc_ctrl_status,
1373 : sizeof(*sc->sc_ctrl_status), 0);
1374 0 : virtio_enqueue_commit(vsc, vq, slot, 1);
1375 :
1376 0 : if ((r = vio_wait_ctrl_done(sc)) != 0)
1377 : goto out;
1378 :
1379 0 : VIO_DMAMEM_SYNC(vsc, sc, sc->sc_ctrl_cmd,
1380 : sizeof(*sc->sc_ctrl_cmd), BUS_DMASYNC_POSTWRITE);
1381 0 : VIO_DMAMEM_SYNC(vsc, sc, sc->sc_ctrl_mac_info,
1382 : VIO_CTRL_MAC_INFO_SIZE, BUS_DMASYNC_POSTWRITE);
1383 0 : VIO_DMAMEM_SYNC(vsc, sc, sc->sc_ctrl_status,
1384 : sizeof(*sc->sc_ctrl_status), BUS_DMASYNC_POSTREAD);
1385 :
1386 0 : if (sc->sc_ctrl_status->ack == VIRTIO_NET_OK) {
1387 : r = 0;
1388 0 : } else {
1389 : /* The host's filter table is not large enough */
1390 0 : printf("%s: failed setting rx filter\n", sc->sc_dev.dv_xname);
1391 : r = EIO;
1392 : }
1393 :
1394 : out:
1395 0 : vio_ctrl_wakeup(sc, FREE);
1396 0 : return r;
1397 0 : }
1398 :
1399 : void
1400 0 : vio_iff(struct vio_softc *sc)
1401 : {
1402 0 : struct virtio_softc *vsc = sc->sc_virtio;
1403 0 : struct ifnet *ifp = &sc->sc_ac.ac_if;
1404 : struct arpcom *ac = &sc->sc_ac;
1405 : struct ether_multi *enm;
1406 : struct ether_multistep step;
1407 : int nentries = 0;
1408 : int promisc = 0, allmulti = 0, rxfilter = 0;
1409 : int r;
1410 :
1411 0 : splassert(IPL_NET);
1412 :
1413 0 : ifp->if_flags &= ~IFF_ALLMULTI;
1414 :
1415 0 : if (vsc->sc_nvqs < 3) {
1416 : /* no ctrl vq; always promisc */
1417 0 : ifp->if_flags |= IFF_ALLMULTI | IFF_PROMISC;
1418 0 : return;
1419 : }
1420 :
1421 0 : if (sc->sc_dev.dv_cfdata->cf_flags & CONFFLAG_QEMU_VLAN_BUG)
1422 0 : ifp->if_flags |= IFF_PROMISC;
1423 :
1424 0 : if (ifp->if_flags & IFF_PROMISC || ac->ac_multirangecnt > 0 ||
1425 0 : ac->ac_multicnt >= VIRTIO_NET_CTRL_MAC_MC_ENTRIES) {
1426 0 : ifp->if_flags |= IFF_ALLMULTI;
1427 0 : if (ifp->if_flags & IFF_PROMISC)
1428 0 : promisc = 1;
1429 : else
1430 : allmulti = 1;
1431 : } else {
1432 : rxfilter = 1;
1433 :
1434 0 : ETHER_FIRST_MULTI(step, ac, enm);
1435 0 : while (enm != NULL) {
1436 0 : memcpy(sc->sc_ctrl_mac_tbl_mc->macs[nentries++],
1437 : enm->enm_addrlo, ETHER_ADDR_LEN);
1438 :
1439 0 : ETHER_NEXT_MULTI(step, enm);
1440 : }
1441 : }
1442 :
1443 : /* set unicast address, VirtualBox wants that */
1444 0 : memcpy(sc->sc_ctrl_mac_tbl_uc->macs[0], ac->ac_enaddr, ETHER_ADDR_LEN);
1445 0 : sc->sc_ctrl_mac_tbl_uc->nentries = 1;
1446 :
1447 0 : sc->sc_ctrl_mac_tbl_mc->nentries = rxfilter ? nentries : 0;
1448 :
1449 0 : if (vsc->sc_nvqs < 3)
1450 0 : return;
1451 :
1452 0 : r = vio_set_rx_filter(sc);
1453 0 : if (r == EIO)
1454 0 : allmulti = 1; /* fallback */
1455 0 : else if (r != 0)
1456 0 : return;
1457 :
1458 0 : r = vio_ctrl_rx(sc, VIRTIO_NET_CTRL_RX_ALLMULTI, allmulti);
1459 0 : if (r == EIO)
1460 0 : promisc = 1; /* fallback */
1461 0 : else if (r != 0)
1462 0 : return;
1463 :
1464 0 : vio_ctrl_rx(sc, VIRTIO_NET_CTRL_RX_PROMISC, promisc);
1465 0 : }
|