Line data Source code
1 : /* $OpenBSD: if_xnf.c,v 1.63 2018/01/20 20:03:45 mikeb Exp $ */
2 :
3 : /*
4 : * Copyright (c) 2015, 2016 Mike Belopuhov
5 : *
6 : * Permission to use, copy, modify, and distribute this software for any
7 : * purpose with or without fee is hereby granted, provided that the above
8 : * copyright notice and this permission notice appear in all copies.
9 : *
10 : * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11 : * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12 : * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13 : * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14 : * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15 : * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16 : * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 : */
18 :
19 : #include "bpfilter.h"
20 : #include "vlan.h"
21 : #include "xen.h"
22 :
23 : #include <sys/param.h>
24 : #include <sys/systm.h>
25 : #include <sys/atomic.h>
26 : #include <sys/device.h>
27 : #include <sys/kernel.h>
28 : #include <sys/malloc.h>
29 : #include <sys/mbuf.h>
30 : #include <sys/pool.h>
31 : #include <sys/queue.h>
32 : #include <sys/socket.h>
33 : #include <sys/sockio.h>
34 : #include <sys/task.h>
35 : #include <sys/timeout.h>
36 :
37 : #include <machine/bus.h>
38 :
39 : #include <dev/pv/xenreg.h>
40 : #include <dev/pv/xenvar.h>
41 :
42 : #include <net/if.h>
43 : #include <net/if_media.h>
44 :
45 : #include <netinet/in.h>
46 : #include <netinet/if_ether.h>
47 :
48 : #ifdef INET6
49 : #include <netinet/ip6.h>
50 : #endif
51 :
52 : #if NBPFILTER > 0
53 : #include <net/bpf.h>
54 : #endif
55 :
56 : /* #define XNF_DEBUG */
57 :
58 : #ifdef XNF_DEBUG
59 : #define DPRINTF(x...) printf(x)
60 : #else
61 : #define DPRINTF(x...)
62 : #endif
63 :
64 : /*
65 : * Rx ring
66 : */
67 :
68 : struct xnf_rx_req {
69 : uint16_t rxq_id;
70 : uint16_t rxq_pad;
71 : uint32_t rxq_ref;
72 : } __packed;
73 :
74 : struct xnf_rx_rsp {
75 : uint16_t rxp_id;
76 : uint16_t rxp_offset;
77 : uint16_t rxp_flags;
78 : #define XNF_RXF_CSUM_VALID 0x0001
79 : #define XNF_RXF_CSUM_BLANK 0x0002
80 : #define XNF_RXF_CHUNK 0x0004
81 : #define XNF_RXF_MGMT 0x0008
82 : int16_t rxp_status;
83 : } __packed;
84 :
85 : union xnf_rx_desc {
86 : struct xnf_rx_req rxd_req;
87 : struct xnf_rx_rsp rxd_rsp;
88 : } __packed;
89 :
90 : #define XNF_RX_DESC 256
91 : #define XNF_MCLEN PAGE_SIZE
92 : #define XNF_RX_MIN 32
93 :
94 : struct xnf_rx_ring {
95 : volatile uint32_t rxr_prod;
96 : volatile uint32_t rxr_prod_event;
97 : volatile uint32_t rxr_cons;
98 : volatile uint32_t rxr_cons_event;
99 : uint32_t rxr_reserved[12];
100 : union xnf_rx_desc rxr_desc[XNF_RX_DESC];
101 : } __packed;
102 :
103 :
104 : /*
105 : * Tx ring
106 : */
107 :
108 : struct xnf_tx_req {
109 : uint32_t txq_ref;
110 : uint16_t txq_offset;
111 : uint16_t txq_flags;
112 : #define XNF_TXF_CSUM_BLANK 0x0001
113 : #define XNF_TXF_CSUM_VALID 0x0002
114 : #define XNF_TXF_CHUNK 0x0004
115 : #define XNF_TXF_ETXRA 0x0008
116 : uint16_t txq_id;
117 : uint16_t txq_size;
118 : } __packed;
119 :
120 : struct xnf_tx_rsp {
121 : uint16_t txp_id;
122 : int16_t txp_status;
123 : } __packed;
124 :
125 : union xnf_tx_desc {
126 : struct xnf_tx_req txd_req;
127 : struct xnf_tx_rsp txd_rsp;
128 : } __packed;
129 :
130 : #define XNF_TX_DESC 256
131 : #define XNF_TX_FRAG 18
132 :
133 : struct xnf_tx_ring {
134 : volatile uint32_t txr_prod;
135 : volatile uint32_t txr_prod_event;
136 : volatile uint32_t txr_cons;
137 : volatile uint32_t txr_cons_event;
138 : uint32_t txr_reserved[12];
139 : union xnf_tx_desc txr_desc[XNF_TX_DESC];
140 : } __packed;
141 :
142 : struct xnf_tx_buf {
143 : uint32_t txb_ndesc;
144 : bus_dmamap_t txb_dmap;
145 : struct mbuf *txb_mbuf;
146 : };
147 :
148 : /* Management frame, "extra info" in Xen parlance */
149 : struct xnf_mgmt {
150 : uint8_t mg_type;
151 : #define XNF_MGMT_MCAST_ADD 2
152 : #define XNF_MGMT_MCAST_DEL 3
153 : uint8_t mg_flags;
154 : union {
155 : uint8_t mgu_mcaddr[ETHER_ADDR_LEN];
156 : uint16_t mgu_pad[3];
157 : } u;
158 : #define mg_mcaddr u.mgu_mcaddr
159 : } __packed;
160 :
161 :
162 : struct xnf_softc {
163 : struct device sc_dev;
164 : struct device *sc_parent;
165 : char sc_node[XEN_MAX_NODE_LEN];
166 : char sc_backend[XEN_MAX_BACKEND_LEN];
167 : bus_dma_tag_t sc_dmat;
168 : int sc_domid;
169 :
170 : struct arpcom sc_ac;
171 : struct ifmedia sc_media;
172 :
173 : xen_intr_handle_t sc_xih;
174 :
175 : int sc_caps;
176 : #define XNF_CAP_SG 0x0001
177 : #define XNF_CAP_CSUM4 0x0002
178 : #define XNF_CAP_CSUM6 0x0004
179 : #define XNF_CAP_MCAST 0x0008
180 : #define XNF_CAP_SPLIT 0x0010
181 : #define XNF_CAP_MULTIQ 0x0020
182 :
183 : /* Rx ring */
184 : struct xnf_rx_ring *sc_rx_ring;
185 : bus_dmamap_t sc_rx_rmap; /* map for the ring */
186 : bus_dma_segment_t sc_rx_seg;
187 : uint32_t sc_rx_ref; /* grant table ref */
188 : uint32_t sc_rx_cons;
189 : struct mbuf *sc_rx_buf[XNF_RX_DESC];
190 : bus_dmamap_t sc_rx_dmap[XNF_RX_DESC]; /* maps for packets */
191 : struct mbuf *sc_rx_cbuf[2]; /* chain handling */
192 :
193 : /* Tx ring */
194 : struct xnf_tx_ring *sc_tx_ring;
195 : bus_dmamap_t sc_tx_rmap; /* map for the ring */
196 : bus_dma_segment_t sc_tx_seg;
197 : uint32_t sc_tx_ref; /* grant table ref */
198 : uint32_t sc_tx_cons;
199 : int sc_tx_frags;
200 : uint32_t sc_tx_next; /* next buffer */
201 : volatile unsigned int sc_tx_avail;
202 : struct xnf_tx_buf sc_tx_buf[XNF_TX_DESC];
203 : };
204 :
205 : int xnf_match(struct device *, void *, void *);
206 : void xnf_attach(struct device *, struct device *, void *);
207 : int xnf_detach(struct device *, int);
208 : int xnf_lladdr(struct xnf_softc *);
209 : int xnf_ioctl(struct ifnet *, u_long, caddr_t);
210 : int xnf_media_change(struct ifnet *);
211 : void xnf_media_status(struct ifnet *, struct ifmediareq *);
212 : int xnf_iff(struct xnf_softc *);
213 : void xnf_init(struct xnf_softc *);
214 : void xnf_stop(struct xnf_softc *);
215 : void xnf_start(struct ifqueue *);
216 : int xnf_encap(struct xnf_softc *, struct mbuf *, uint32_t *);
217 : void xnf_intr(void *);
218 : void xnf_watchdog(struct ifnet *);
219 : void xnf_txeof(struct xnf_softc *);
220 : void xnf_rxeof(struct xnf_softc *);
221 : int xnf_rx_ring_fill(struct xnf_softc *);
222 : int xnf_rx_ring_create(struct xnf_softc *);
223 : void xnf_rx_ring_drain(struct xnf_softc *);
224 : void xnf_rx_ring_destroy(struct xnf_softc *);
225 : int xnf_tx_ring_create(struct xnf_softc *);
226 : void xnf_tx_ring_drain(struct xnf_softc *);
227 : void xnf_tx_ring_destroy(struct xnf_softc *);
228 : int xnf_capabilities(struct xnf_softc *sc);
229 : int xnf_init_backend(struct xnf_softc *);
230 :
231 : struct cfdriver xnf_cd = {
232 : NULL, "xnf", DV_IFNET
233 : };
234 :
235 : const struct cfattach xnf_ca = {
236 : sizeof(struct xnf_softc), xnf_match, xnf_attach, xnf_detach
237 : };
238 :
239 : int
240 0 : xnf_match(struct device *parent, void *match, void *aux)
241 : {
242 0 : struct xen_attach_args *xa = aux;
243 :
244 0 : if (strcmp("vif", xa->xa_name))
245 0 : return (0);
246 :
247 0 : return (1);
248 0 : }
249 :
250 : void
251 0 : xnf_attach(struct device *parent, struct device *self, void *aux)
252 : {
253 0 : struct xen_attach_args *xa = aux;
254 0 : struct xnf_softc *sc = (struct xnf_softc *)self;
255 0 : struct ifnet *ifp = &sc->sc_ac.ac_if;
256 :
257 0 : sc->sc_parent = parent;
258 0 : sc->sc_dmat = xa->xa_dmat;
259 0 : sc->sc_domid = xa->xa_domid;
260 :
261 0 : memcpy(sc->sc_node, xa->xa_node, XEN_MAX_NODE_LEN);
262 0 : memcpy(sc->sc_backend, xa->xa_backend, XEN_MAX_BACKEND_LEN);
263 :
264 0 : strlcpy(ifp->if_xname, sc->sc_dev.dv_xname, IFNAMSIZ);
265 :
266 0 : if (xnf_lladdr(sc)) {
267 0 : printf(": failed to obtain MAC address\n");
268 0 : return;
269 : }
270 :
271 0 : if (xen_intr_establish(0, &sc->sc_xih, sc->sc_domid, xnf_intr, sc,
272 : ifp->if_xname)) {
273 0 : printf(": failed to establish an interrupt\n");
274 0 : return;
275 : }
276 0 : xen_intr_mask(sc->sc_xih);
277 :
278 0 : printf(" backend %d channel %u: address %s\n", sc->sc_domid,
279 0 : sc->sc_xih, ether_sprintf(sc->sc_ac.ac_enaddr));
280 :
281 0 : if (xnf_capabilities(sc)) {
282 0 : xen_intr_disestablish(sc->sc_xih);
283 0 : return;
284 : }
285 :
286 0 : if (sc->sc_caps & XNF_CAP_SG)
287 0 : ifp->if_hardmtu = 9000;
288 :
289 0 : if (xnf_rx_ring_create(sc)) {
290 0 : xen_intr_disestablish(sc->sc_xih);
291 0 : return;
292 : }
293 0 : if (xnf_tx_ring_create(sc)) {
294 0 : xen_intr_disestablish(sc->sc_xih);
295 0 : xnf_rx_ring_destroy(sc);
296 0 : return;
297 : }
298 0 : if (xnf_init_backend(sc)) {
299 0 : xen_intr_disestablish(sc->sc_xih);
300 0 : xnf_rx_ring_destroy(sc);
301 0 : xnf_tx_ring_destroy(sc);
302 0 : return;
303 : }
304 :
305 0 : ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
306 0 : ifp->if_xflags = IFXF_MPSAFE;
307 0 : ifp->if_ioctl = xnf_ioctl;
308 0 : ifp->if_qstart = xnf_start;
309 0 : ifp->if_watchdog = xnf_watchdog;
310 0 : ifp->if_softc = sc;
311 :
312 0 : ifp->if_capabilities = IFCAP_VLAN_MTU;
313 0 : if (sc->sc_caps & XNF_CAP_CSUM4)
314 0 : ifp->if_capabilities |= IFCAP_CSUM_TCPv4 | IFCAP_CSUM_UDPv4;
315 0 : if (sc->sc_caps & XNF_CAP_CSUM6)
316 0 : ifp->if_capabilities |= IFCAP_CSUM_TCPv6 | IFCAP_CSUM_UDPv6;
317 :
318 0 : IFQ_SET_MAXLEN(&ifp->if_snd, XNF_TX_DESC - 1);
319 :
320 0 : ifmedia_init(&sc->sc_media, IFM_IMASK, xnf_media_change,
321 : xnf_media_status);
322 0 : ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_MANUAL, 0, NULL);
323 0 : ifmedia_set(&sc->sc_media, IFM_ETHER | IFM_MANUAL);
324 :
325 0 : if_attach(ifp);
326 0 : ether_ifattach(ifp);
327 :
328 : /* Kick out emulated em's and re's */
329 0 : xen_unplug_emulated(parent, XEN_UNPLUG_NIC);
330 0 : }
331 :
332 : int
333 0 : xnf_detach(struct device *self, int flags)
334 : {
335 0 : struct xnf_softc *sc = (struct xnf_softc *)self;
336 0 : struct ifnet *ifp = &sc->sc_ac.ac_if;
337 :
338 0 : xnf_stop(sc);
339 :
340 0 : ether_ifdetach(ifp);
341 0 : if_detach(ifp);
342 :
343 0 : xen_intr_disestablish(sc->sc_xih);
344 :
345 0 : if (sc->sc_tx_ring)
346 0 : xnf_tx_ring_destroy(sc);
347 0 : if (sc->sc_rx_ring)
348 0 : xnf_rx_ring_destroy(sc);
349 :
350 0 : return (0);
351 : }
352 :
353 : static int
354 0 : nibble(int ch)
355 : {
356 0 : if (ch >= '0' && ch <= '9')
357 0 : return (ch - '0');
358 0 : if (ch >= 'A' && ch <= 'F')
359 0 : return (10 + ch - 'A');
360 0 : if (ch >= 'a' && ch <= 'f')
361 0 : return (10 + ch - 'a');
362 0 : return (-1);
363 0 : }
364 :
365 : int
366 0 : xnf_lladdr(struct xnf_softc *sc)
367 : {
368 0 : char enaddr[ETHER_ADDR_LEN];
369 0 : char mac[32];
370 : int i, j, lo, hi;
371 :
372 0 : if (xs_getprop(sc->sc_parent, sc->sc_backend, "mac", mac, sizeof(mac)))
373 0 : return (-1);
374 :
375 0 : for (i = 0, j = 0; j < ETHER_ADDR_LEN; i += 3, j++) {
376 0 : if ((hi = nibble(mac[i])) == -1 ||
377 0 : (lo = nibble(mac[i+1])) == -1)
378 0 : return (-1);
379 0 : enaddr[j] = hi << 4 | lo;
380 : }
381 :
382 0 : memcpy(sc->sc_ac.ac_enaddr, enaddr, ETHER_ADDR_LEN);
383 0 : return (0);
384 0 : }
385 :
386 : int
387 0 : xnf_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
388 : {
389 0 : struct xnf_softc *sc = ifp->if_softc;
390 0 : struct ifreq *ifr = (struct ifreq *)data;
391 : int s, error = 0;
392 :
393 0 : s = splnet();
394 :
395 0 : switch (command) {
396 : case SIOCSIFADDR:
397 0 : ifp->if_flags |= IFF_UP;
398 0 : if (!(ifp->if_flags & IFF_RUNNING))
399 0 : xnf_init(sc);
400 : break;
401 : case SIOCSIFFLAGS:
402 0 : if (ifp->if_flags & IFF_UP) {
403 0 : if (ifp->if_flags & IFF_RUNNING)
404 0 : error = ENETRESET;
405 : else
406 0 : xnf_init(sc);
407 : } else {
408 0 : if (ifp->if_flags & IFF_RUNNING)
409 0 : xnf_stop(sc);
410 : }
411 : break;
412 : case SIOCGIFMEDIA:
413 : case SIOCSIFMEDIA:
414 0 : error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, command);
415 0 : break;
416 : default:
417 0 : error = ether_ioctl(ifp, &sc->sc_ac, command, data);
418 0 : break;
419 : }
420 :
421 0 : if (error == ENETRESET) {
422 0 : if (ifp->if_flags & IFF_RUNNING)
423 0 : xnf_iff(sc);
424 : error = 0;
425 0 : }
426 :
427 0 : splx(s);
428 :
429 0 : return (error);
430 : }
431 :
432 : int
433 0 : xnf_media_change(struct ifnet *ifp)
434 : {
435 0 : return (0);
436 : }
437 :
438 : void
439 0 : xnf_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
440 : {
441 0 : ifmr->ifm_status = IFM_ACTIVE | IFM_AVALID;
442 0 : ifmr->ifm_active = IFM_ETHER | IFM_MANUAL;
443 0 : }
444 :
445 : int
446 0 : xnf_iff(struct xnf_softc *sc)
447 : {
448 0 : return (0);
449 : }
450 :
451 : void
452 0 : xnf_init(struct xnf_softc *sc)
453 : {
454 0 : struct ifnet *ifp = &sc->sc_ac.ac_if;
455 :
456 0 : xnf_stop(sc);
457 :
458 0 : xnf_iff(sc);
459 :
460 0 : xnf_rx_ring_fill(sc);
461 :
462 0 : if (xen_intr_unmask(sc->sc_xih)) {
463 0 : printf("%s: failed to enable interrupts\n", ifp->if_xname);
464 0 : xnf_stop(sc);
465 0 : return;
466 : }
467 :
468 0 : ifp->if_flags |= IFF_RUNNING;
469 0 : ifq_clr_oactive(&ifp->if_snd);
470 0 : }
471 :
472 : void
473 0 : xnf_stop(struct xnf_softc *sc)
474 : {
475 0 : struct ifnet *ifp = &sc->sc_ac.ac_if;
476 :
477 0 : ifp->if_flags &= ~IFF_RUNNING;
478 :
479 0 : xen_intr_mask(sc->sc_xih);
480 :
481 0 : ifp->if_timer = 0;
482 :
483 0 : ifq_barrier(&ifp->if_snd);
484 0 : xen_intr_barrier(sc->sc_xih);
485 :
486 0 : ifq_clr_oactive(&ifp->if_snd);
487 :
488 0 : if (sc->sc_tx_ring)
489 0 : xnf_tx_ring_drain(sc);
490 0 : if (sc->sc_rx_ring)
491 0 : xnf_rx_ring_drain(sc);
492 0 : }
493 :
494 : void
495 0 : xnf_start(struct ifqueue *ifq)
496 : {
497 0 : struct ifnet *ifp = ifq->ifq_if;
498 0 : struct xnf_softc *sc = ifp->if_softc;
499 0 : struct xnf_tx_ring *txr = sc->sc_tx_ring;
500 : struct mbuf *m;
501 : int pkts = 0;
502 0 : uint32_t prod, oprod;
503 :
504 0 : bus_dmamap_sync(sc->sc_dmat, sc->sc_tx_rmap, 0, 0,
505 : BUS_DMASYNC_POSTREAD);
506 :
507 0 : prod = oprod = txr->txr_prod;
508 :
509 0 : for (;;) {
510 0 : if (((XNF_TX_DESC - (prod - sc->sc_tx_cons)) <
511 0 : sc->sc_tx_frags) || !sc->sc_tx_avail) {
512 : /* transient */
513 0 : ifq_set_oactive(ifq);
514 0 : break;
515 : }
516 :
517 0 : m = ifq_dequeue(ifq);
518 0 : if (m == NULL)
519 : break;
520 :
521 0 : if (xnf_encap(sc, m, &prod)) {
522 : /* the chain is too large */
523 0 : ifp->if_oerrors++;
524 0 : m_freem(m);
525 0 : continue;
526 : }
527 :
528 : #if NBPFILTER > 0
529 0 : if (ifp->if_bpf)
530 0 : bpf_mtap_ether(ifp->if_bpf, m, BPF_DIRECTION_OUT);
531 : #endif
532 0 : pkts++;
533 : }
534 0 : if (pkts > 0) {
535 0 : txr->txr_prod = prod;
536 0 : if (txr->txr_cons_event <= txr->txr_cons)
537 0 : txr->txr_cons_event = txr->txr_cons +
538 0 : ((txr->txr_prod - txr->txr_cons) >> 1) + 1;
539 0 : bus_dmamap_sync(sc->sc_dmat, sc->sc_tx_rmap, 0, 0,
540 : BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
541 0 : if (prod - txr->txr_prod_event < prod - oprod)
542 0 : xen_intr_signal(sc->sc_xih);
543 0 : ifp->if_timer = 5;
544 0 : }
545 0 : }
546 :
547 : static inline int
548 0 : xnf_fragcount(struct mbuf *m_head)
549 : {
550 : struct mbuf *m;
551 : vaddr_t va, va0;
552 : int n = 0;
553 :
554 0 : for (m = m_head; m != NULL; m = m->m_next) {
555 0 : if (m->m_len == 0)
556 : continue;
557 : /* start of the buffer */
558 0 : for (va0 = va = mtod(m, vaddr_t);
559 : /* does the buffer end on this page? */
560 0 : va + (PAGE_SIZE - (va & PAGE_MASK)) < va0 + m->m_len;
561 : /* move on to the next page */
562 : va += PAGE_SIZE - (va & PAGE_MASK))
563 : n++;
564 : n++;
565 : }
566 0 : return (n);
567 : }
568 :
569 : int
570 0 : xnf_encap(struct xnf_softc *sc, struct mbuf *m_head, uint32_t *prod)
571 : {
572 0 : struct xnf_tx_ring *txr = sc->sc_tx_ring;
573 : struct xnf_tx_buf *txb = NULL;
574 : union xnf_tx_desc *txd = NULL;
575 : struct mbuf *m;
576 0 : uint32_t oprod = *prod;
577 : uint16_t id;
578 : int i, flags, n, used = 0;
579 :
580 0 : if ((xnf_fragcount(m_head) > sc->sc_tx_frags) &&
581 0 : m_defrag(m_head, M_DONTWAIT))
582 0 : return (ENOBUFS);
583 :
584 0 : flags = (sc->sc_domid << 16) | BUS_DMA_WRITE | BUS_DMA_NOWAIT;
585 :
586 0 : for (m = m_head; m != NULL && m->m_len > 0; m = m->m_next) {
587 0 : i = *prod & (XNF_TX_DESC - 1);
588 0 : txd = &txr->txr_desc[i];
589 :
590 : /*
591 : * Find an unused TX buffer. We're guaranteed to find one
592 : * because xnf_encap cannot be called with sc_tx_avail == 0.
593 : */
594 0 : do {
595 0 : id = sc->sc_tx_next++ & (XNF_TX_DESC - 1);
596 0 : txb = &sc->sc_tx_buf[id];
597 0 : } while (txb->txb_mbuf);
598 :
599 0 : if (bus_dmamap_load(sc->sc_dmat, txb->txb_dmap, m->m_data,
600 : m->m_len, NULL, flags)) {
601 : DPRINTF("%s: failed to load %u bytes @%lu\n",
602 : sc->sc_dev.dv_xname, m->m_len,
603 : mtod(m, vaddr_t) & PAGE_MASK);
604 : goto unroll;
605 : }
606 :
607 0 : for (n = 0; n < txb->txb_dmap->dm_nsegs; n++) {
608 0 : i = *prod & (XNF_TX_DESC - 1);
609 0 : txd = &txr->txr_desc[i];
610 :
611 0 : if (m == m_head && n == 0) {
612 0 : if (m->m_pkthdr.csum_flags &
613 : (M_TCP_CSUM_OUT | M_UDP_CSUM_OUT))
614 0 : txd->txd_req.txq_flags =
615 : XNF_TXF_CSUM_BLANK |
616 : XNF_TXF_CSUM_VALID;
617 0 : txd->txd_req.txq_size = m->m_pkthdr.len;
618 0 : } else {
619 0 : txd->txd_req.txq_size =
620 0 : txb->txb_dmap->dm_segs[n].ds_len;
621 : }
622 0 : txd->txd_req.txq_ref =
623 0 : txb->txb_dmap->dm_segs[n].ds_addr;
624 0 : if (n == 0)
625 0 : txd->txd_req.txq_offset =
626 0 : mtod(m, vaddr_t) & PAGE_MASK;
627 : /* The chunk flag will be removed from the last one */
628 0 : txd->txd_req.txq_flags |= XNF_TXF_CHUNK;
629 0 : txd->txd_req.txq_id = id;
630 :
631 0 : txb->txb_ndesc++;
632 0 : (*prod)++;
633 : }
634 :
635 0 : txb->txb_mbuf = m;
636 0 : used++;
637 : }
638 :
639 : /* Clear the chunk flag from the last segment */
640 0 : txd->txd_req.txq_flags &= ~XNF_TXF_CHUNK;
641 0 : bus_dmamap_sync(sc->sc_dmat, sc->sc_tx_rmap, 0, 0,
642 : BUS_DMASYNC_PREWRITE);
643 :
644 0 : KASSERT(sc->sc_tx_avail > used);
645 0 : atomic_sub_int(&sc->sc_tx_avail, used);
646 :
647 0 : return (0);
648 :
649 : unroll:
650 : DPRINTF("%s: unrolling from %u to %u\n", sc->sc_dev.dv_xname,
651 : *prod, oprod);
652 0 : for (; *prod != oprod; (*prod)--) {
653 0 : i = (*prod - 1) & (XNF_TX_DESC - 1);
654 0 : txd = &txr->txr_desc[i];
655 0 : id = txd->txd_req.txq_id;
656 0 : txb = &sc->sc_tx_buf[id];
657 :
658 0 : memset(txd, 0, sizeof(*txd));
659 :
660 0 : if (txb->txb_mbuf) {
661 0 : bus_dmamap_sync(sc->sc_dmat, txb->txb_dmap, 0, 0,
662 : BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
663 0 : bus_dmamap_unload(sc->sc_dmat, txb->txb_dmap);
664 :
665 0 : txb->txb_mbuf = NULL;
666 0 : txb->txb_ndesc = 0;
667 0 : }
668 : }
669 0 : return (ENOBUFS);
670 0 : }
671 :
672 : void
673 0 : xnf_intr(void *arg)
674 : {
675 0 : struct xnf_softc *sc = arg;
676 0 : struct ifnet *ifp = &sc->sc_ac.ac_if;
677 :
678 0 : if (ifp->if_flags & IFF_RUNNING) {
679 0 : xnf_txeof(sc);
680 0 : xnf_rxeof(sc);
681 0 : }
682 0 : }
683 :
684 : void
685 0 : xnf_watchdog(struct ifnet *ifp)
686 : {
687 0 : struct xnf_softc *sc = ifp->if_softc;
688 0 : struct xnf_tx_ring *txr = sc->sc_tx_ring;
689 :
690 0 : printf("%s: tx stuck: prod %u cons %u,%u evt %u,%u\n",
691 0 : ifp->if_xname, txr->txr_prod, txr->txr_cons, sc->sc_tx_cons,
692 0 : txr->txr_prod_event, txr->txr_cons_event);
693 0 : }
694 :
695 : void
696 0 : xnf_txeof(struct xnf_softc *sc)
697 : {
698 0 : struct ifnet *ifp = &sc->sc_ac.ac_if;
699 0 : struct xnf_tx_ring *txr = sc->sc_tx_ring;
700 : struct xnf_tx_buf *txb;
701 : union xnf_tx_desc *txd;
702 : uint done = 0;
703 : uint32_t cons;
704 : uint16_t id;
705 : int i;
706 :
707 0 : bus_dmamap_sync(sc->sc_dmat, sc->sc_tx_rmap, 0, 0,
708 : BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
709 :
710 0 : for (cons = sc->sc_tx_cons; cons != txr->txr_cons; cons++) {
711 0 : i = cons & (XNF_TX_DESC - 1);
712 0 : txd = &txr->txr_desc[i];
713 0 : id = txd->txd_rsp.txp_id;
714 0 : txb = &sc->sc_tx_buf[id];
715 :
716 0 : KASSERT(txb->txb_ndesc > 0);
717 0 : if (--txb->txb_ndesc == 0) {
718 0 : bus_dmamap_sync(sc->sc_dmat, txb->txb_dmap, 0, 0,
719 : BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
720 0 : bus_dmamap_unload(sc->sc_dmat, txb->txb_dmap);
721 :
722 0 : m_free(txb->txb_mbuf);
723 0 : txb->txb_mbuf = NULL;
724 0 : done++;
725 0 : }
726 :
727 0 : memset(txd, 0, sizeof(*txd));
728 : }
729 :
730 0 : sc->sc_tx_cons = cons;
731 0 : txr->txr_cons_event = sc->sc_tx_cons +
732 0 : ((txr->txr_prod - sc->sc_tx_cons) >> 1) + 1;
733 0 : bus_dmamap_sync(sc->sc_dmat, sc->sc_tx_rmap, 0, 0,
734 : BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
735 :
736 0 : atomic_add_int(&sc->sc_tx_avail, done);
737 :
738 0 : if (sc->sc_tx_cons == txr->txr_prod)
739 0 : ifp->if_timer = 0;
740 0 : if (ifq_is_oactive(&ifp->if_snd))
741 0 : ifq_restart(&ifp->if_snd);
742 0 : }
743 :
744 : void
745 0 : xnf_rxeof(struct xnf_softc *sc)
746 : {
747 0 : struct ifnet *ifp = &sc->sc_ac.ac_if;
748 0 : struct xnf_rx_ring *rxr = sc->sc_rx_ring;
749 : union xnf_rx_desc *rxd;
750 0 : struct mbuf_list ml = MBUF_LIST_INITIALIZER();
751 0 : struct mbuf *fmp = sc->sc_rx_cbuf[0];
752 0 : struct mbuf *lmp = sc->sc_rx_cbuf[1];
753 : struct mbuf *m;
754 : bus_dmamap_t dmap;
755 : uint32_t cons;
756 : uint16_t id;
757 : int i, flags, len, offset;
758 :
759 0 : bus_dmamap_sync(sc->sc_dmat, sc->sc_rx_rmap, 0, 0,
760 : BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
761 :
762 0 : for (cons = sc->sc_rx_cons; cons != rxr->rxr_cons; cons++) {
763 0 : i = cons & (XNF_RX_DESC - 1);
764 0 : rxd = &rxr->rxr_desc[i];
765 :
766 0 : id = rxd->rxd_rsp.rxp_id;
767 0 : len = rxd->rxd_rsp.rxp_status;
768 0 : flags = rxd->rxd_rsp.rxp_flags;
769 0 : offset = rxd->rxd_rsp.rxp_offset;
770 :
771 0 : dmap = sc->sc_rx_dmap[id];
772 0 : bus_dmamap_sync(sc->sc_dmat, dmap, 0, 0,
773 : BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
774 0 : bus_dmamap_unload(sc->sc_dmat, dmap);
775 :
776 0 : m = sc->sc_rx_buf[id];
777 0 : KASSERT(m != NULL);
778 0 : sc->sc_rx_buf[id] = NULL;
779 :
780 0 : if (flags & XNF_RXF_MGMT) {
781 0 : printf("%s: management data present\n",
782 0 : ifp->if_xname);
783 0 : m_freem(m);
784 0 : continue;
785 : }
786 :
787 0 : if (flags & XNF_RXF_CSUM_VALID)
788 0 : m->m_pkthdr.csum_flags = M_TCP_CSUM_IN_OK |
789 : M_UDP_CSUM_IN_OK;
790 :
791 0 : if (len < 0 || (len + offset > PAGE_SIZE)) {
792 0 : ifp->if_ierrors++;
793 0 : m_freem(m);
794 0 : continue;
795 : }
796 :
797 0 : m->m_len = len;
798 0 : m->m_data += offset;
799 :
800 0 : if (fmp == NULL) {
801 0 : m->m_pkthdr.len = len;
802 : fmp = m;
803 0 : } else {
804 0 : m->m_flags &= ~M_PKTHDR;
805 0 : lmp->m_next = m;
806 0 : fmp->m_pkthdr.len += m->m_len;
807 : }
808 : lmp = m;
809 :
810 0 : if (flags & XNF_RXF_CHUNK) {
811 0 : sc->sc_rx_cbuf[0] = fmp;
812 0 : sc->sc_rx_cbuf[1] = lmp;
813 0 : continue;
814 : }
815 :
816 : m = fmp;
817 :
818 0 : ml_enqueue(&ml, m);
819 0 : sc->sc_rx_cbuf[0] = sc->sc_rx_cbuf[1] = fmp = lmp = NULL;
820 :
821 0 : memset(rxd, 0, sizeof(*rxd));
822 0 : rxd->rxd_req.rxq_id = id;
823 0 : }
824 :
825 0 : sc->sc_rx_cons = cons;
826 0 : rxr->rxr_cons_event = sc->sc_rx_cons + 1;
827 0 : bus_dmamap_sync(sc->sc_dmat, sc->sc_rx_rmap, 0, 0,
828 : BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
829 :
830 0 : if_input(ifp, &ml);
831 :
832 0 : if (xnf_rx_ring_fill(sc) || (sc->sc_rx_cons != rxr->rxr_cons))
833 0 : xen_intr_schedule(sc->sc_xih);
834 0 : }
835 :
836 : int
837 0 : xnf_rx_ring_fill(struct xnf_softc *sc)
838 : {
839 0 : struct ifnet *ifp = &sc->sc_ac.ac_if;
840 0 : struct xnf_rx_ring *rxr = sc->sc_rx_ring;
841 : union xnf_rx_desc *rxd;
842 : bus_dmamap_t dmap;
843 : struct mbuf *m;
844 : uint32_t cons, prod, oprod;
845 : uint16_t id;
846 : int i, flags, resched = 0;
847 :
848 0 : cons = rxr->rxr_cons;
849 0 : prod = oprod = rxr->rxr_prod;
850 :
851 0 : while (prod - cons < XNF_RX_DESC) {
852 0 : i = prod & (XNF_RX_DESC - 1);
853 0 : rxd = &rxr->rxr_desc[i];
854 :
855 0 : id = rxd->rxd_rsp.rxp_id;
856 0 : if (sc->sc_rx_buf[id])
857 : break;
858 0 : m = MCLGETI(NULL, M_DONTWAIT, NULL, XNF_MCLEN);
859 0 : if (m == NULL)
860 : break;
861 0 : m->m_len = m->m_pkthdr.len = XNF_MCLEN;
862 0 : dmap = sc->sc_rx_dmap[id];
863 0 : flags = (sc->sc_domid << 16) | BUS_DMA_READ | BUS_DMA_NOWAIT;
864 0 : if (bus_dmamap_load_mbuf(sc->sc_dmat, dmap, m, flags)) {
865 0 : m_freem(m);
866 0 : break;
867 : }
868 0 : sc->sc_rx_buf[id] = m;
869 0 : rxd->rxd_req.rxq_ref = dmap->dm_segs[0].ds_addr;
870 0 : bus_dmamap_sync(sc->sc_dmat, dmap, 0, 0, BUS_DMASYNC_PREWRITE);
871 0 : prod++;
872 : }
873 :
874 0 : rxr->rxr_prod = prod;
875 0 : bus_dmamap_sync(sc->sc_dmat, sc->sc_rx_rmap, 0, 0,
876 : BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
877 :
878 0 : if ((prod - cons < XNF_RX_MIN) && (ifp->if_flags & IFF_RUNNING))
879 0 : resched = 1;
880 0 : if (prod - rxr->rxr_prod_event < prod - oprod)
881 0 : xen_intr_signal(sc->sc_xih);
882 :
883 0 : return (resched);
884 : }
885 :
886 : int
887 0 : xnf_rx_ring_create(struct xnf_softc *sc)
888 : {
889 0 : int i, flags, rsegs;
890 :
891 : /* Allocate a page of memory for the ring */
892 0 : if (bus_dmamem_alloc(sc->sc_dmat, PAGE_SIZE, PAGE_SIZE, 0,
893 : &sc->sc_rx_seg, 1, &rsegs, BUS_DMA_ZERO | BUS_DMA_NOWAIT)) {
894 0 : printf("%s: failed to allocate memory for the rx ring\n",
895 0 : sc->sc_dev.dv_xname);
896 0 : return (-1);
897 : }
898 : /* Map in the allocated memory into the ring structure */
899 0 : if (bus_dmamem_map(sc->sc_dmat, &sc->sc_rx_seg, 1, PAGE_SIZE,
900 : (caddr_t *)(&sc->sc_rx_ring), BUS_DMA_NOWAIT)) {
901 0 : printf("%s: failed to map memory for the rx ring\n",
902 0 : sc->sc_dev.dv_xname);
903 0 : goto errout;
904 : }
905 : /* Create a map to load the ring memory into */
906 0 : if (bus_dmamap_create(sc->sc_dmat, PAGE_SIZE, 1, PAGE_SIZE, 0,
907 : BUS_DMA_NOWAIT, &sc->sc_rx_rmap)) {
908 0 : printf("%s: failed to create a memory map for the rx ring\n",
909 0 : sc->sc_dev.dv_xname);
910 0 : goto errout;
911 : }
912 : /* Load the ring into the ring map to extract the PA */
913 0 : flags = (sc->sc_domid << 16) | BUS_DMA_NOWAIT;
914 0 : if (bus_dmamap_load(sc->sc_dmat, sc->sc_rx_rmap, sc->sc_rx_ring,
915 : PAGE_SIZE, NULL, flags)) {
916 0 : printf("%s: failed to load the rx ring map\n",
917 0 : sc->sc_dev.dv_xname);
918 0 : goto errout;
919 : }
920 0 : sc->sc_rx_ref = sc->sc_rx_rmap->dm_segs[0].ds_addr;
921 :
922 0 : sc->sc_rx_ring->rxr_prod_event = sc->sc_rx_ring->rxr_cons_event = 1;
923 :
924 0 : for (i = 0; i < XNF_RX_DESC; i++) {
925 0 : if (bus_dmamap_create(sc->sc_dmat, XNF_MCLEN, 1, XNF_MCLEN,
926 : PAGE_SIZE, BUS_DMA_NOWAIT, &sc->sc_rx_dmap[i])) {
927 0 : printf("%s: failed to create a memory map for the"
928 0 : " rx slot %d\n", sc->sc_dev.dv_xname, i);
929 0 : goto errout;
930 : }
931 0 : sc->sc_rx_ring->rxr_desc[i].rxd_req.rxq_id = i;
932 : }
933 :
934 0 : return (0);
935 :
936 : errout:
937 0 : xnf_rx_ring_destroy(sc);
938 0 : return (-1);
939 0 : }
940 :
941 : void
942 0 : xnf_rx_ring_drain(struct xnf_softc *sc)
943 : {
944 0 : struct xnf_rx_ring *rxr = sc->sc_rx_ring;
945 :
946 0 : if (sc->sc_rx_cons != rxr->rxr_cons)
947 0 : xnf_rxeof(sc);
948 0 : }
949 :
950 : void
951 0 : xnf_rx_ring_destroy(struct xnf_softc *sc)
952 : {
953 : int i;
954 :
955 0 : for (i = 0; i < XNF_RX_DESC; i++) {
956 0 : if (sc->sc_rx_buf[i] == NULL)
957 : continue;
958 0 : bus_dmamap_sync(sc->sc_dmat, sc->sc_rx_dmap[i], 0, 0,
959 : BUS_DMASYNC_POSTREAD);
960 0 : bus_dmamap_unload(sc->sc_dmat, sc->sc_rx_dmap[i]);
961 0 : m_freem(sc->sc_rx_buf[i]);
962 0 : sc->sc_rx_buf[i] = NULL;
963 0 : }
964 :
965 0 : for (i = 0; i < XNF_RX_DESC; i++) {
966 0 : if (sc->sc_rx_dmap[i] == NULL)
967 : continue;
968 0 : bus_dmamap_destroy(sc->sc_dmat, sc->sc_rx_dmap[i]);
969 0 : sc->sc_rx_dmap[i] = NULL;
970 0 : }
971 0 : if (sc->sc_rx_rmap) {
972 0 : bus_dmamap_sync(sc->sc_dmat, sc->sc_rx_rmap, 0, 0,
973 : BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
974 0 : bus_dmamap_unload(sc->sc_dmat, sc->sc_rx_rmap);
975 0 : bus_dmamap_destroy(sc->sc_dmat, sc->sc_rx_rmap);
976 0 : }
977 0 : if (sc->sc_rx_ring) {
978 0 : bus_dmamem_unmap(sc->sc_dmat, (caddr_t)sc->sc_rx_ring,
979 : PAGE_SIZE);
980 0 : bus_dmamem_free(sc->sc_dmat, &sc->sc_rx_seg, 1);
981 0 : }
982 0 : sc->sc_rx_ring = NULL;
983 0 : sc->sc_rx_rmap = NULL;
984 0 : sc->sc_rx_cons = 0;
985 0 : }
986 :
987 : int
988 0 : xnf_tx_ring_create(struct xnf_softc *sc)
989 : {
990 0 : struct ifnet *ifp = &sc->sc_ac.ac_if;
991 0 : int i, flags, nsegs, rsegs;
992 : bus_size_t segsz;
993 :
994 0 : sc->sc_tx_frags = sc->sc_caps & XNF_CAP_SG ? XNF_TX_FRAG : 1;
995 :
996 : /* Allocate a page of memory for the ring */
997 0 : if (bus_dmamem_alloc(sc->sc_dmat, PAGE_SIZE, PAGE_SIZE, 0,
998 : &sc->sc_tx_seg, 1, &rsegs, BUS_DMA_ZERO | BUS_DMA_NOWAIT)) {
999 0 : printf("%s: failed to allocate memory for the tx ring\n",
1000 0 : sc->sc_dev.dv_xname);
1001 0 : return (-1);
1002 : }
1003 : /* Map in the allocated memory into the ring structure */
1004 0 : if (bus_dmamem_map(sc->sc_dmat, &sc->sc_tx_seg, 1, PAGE_SIZE,
1005 : (caddr_t *)&sc->sc_tx_ring, BUS_DMA_NOWAIT)) {
1006 0 : printf("%s: failed to map memory for the tx ring\n",
1007 0 : sc->sc_dev.dv_xname);
1008 0 : goto errout;
1009 : }
1010 : /* Create a map to load the ring memory into */
1011 0 : if (bus_dmamap_create(sc->sc_dmat, PAGE_SIZE, 1, PAGE_SIZE, 0,
1012 : BUS_DMA_NOWAIT, &sc->sc_tx_rmap)) {
1013 0 : printf("%s: failed to create a memory map for the tx ring\n",
1014 0 : sc->sc_dev.dv_xname);
1015 0 : goto errout;
1016 : }
1017 : /* Load the ring into the ring map to extract the PA */
1018 0 : flags = (sc->sc_domid << 16) | BUS_DMA_NOWAIT;
1019 0 : if (bus_dmamap_load(sc->sc_dmat, sc->sc_tx_rmap, sc->sc_tx_ring,
1020 : PAGE_SIZE, NULL, flags)) {
1021 0 : printf("%s: failed to load the tx ring map\n",
1022 0 : sc->sc_dev.dv_xname);
1023 0 : goto errout;
1024 : }
1025 0 : sc->sc_tx_ref = sc->sc_tx_rmap->dm_segs[0].ds_addr;
1026 :
1027 0 : sc->sc_tx_ring->txr_prod_event = sc->sc_tx_ring->txr_cons_event = 1;
1028 :
1029 0 : if (sc->sc_caps & XNF_CAP_SG) {
1030 0 : nsegs = roundup(ifp->if_hardmtu, XNF_MCLEN) / XNF_MCLEN + 1;
1031 0 : segsz = nsegs * XNF_MCLEN;
1032 0 : } else {
1033 : nsegs = 1;
1034 : segsz = XNF_MCLEN;
1035 : }
1036 0 : for (i = 0; i < XNF_TX_DESC; i++) {
1037 0 : if (bus_dmamap_create(sc->sc_dmat, segsz, nsegs, XNF_MCLEN,
1038 : PAGE_SIZE, BUS_DMA_NOWAIT, &sc->sc_tx_buf[i].txb_dmap)) {
1039 0 : printf("%s: failed to create a memory map for the"
1040 0 : " tx slot %d\n", sc->sc_dev.dv_xname, i);
1041 0 : goto errout;
1042 : }
1043 : }
1044 :
1045 0 : sc->sc_tx_avail = XNF_TX_DESC;
1046 0 : sc->sc_tx_next = 0;
1047 :
1048 0 : return (0);
1049 :
1050 : errout:
1051 0 : xnf_tx_ring_destroy(sc);
1052 0 : return (-1);
1053 0 : }
1054 :
1055 : void
1056 0 : xnf_tx_ring_drain(struct xnf_softc *sc)
1057 : {
1058 0 : struct xnf_tx_ring *txr = sc->sc_tx_ring;
1059 :
1060 0 : if (sc->sc_tx_cons != txr->txr_cons)
1061 0 : xnf_txeof(sc);
1062 0 : }
1063 :
1064 : void
1065 0 : xnf_tx_ring_destroy(struct xnf_softc *sc)
1066 : {
1067 : int i;
1068 :
1069 0 : for (i = 0; i < XNF_TX_DESC; i++) {
1070 0 : if (sc->sc_tx_buf[i].txb_dmap == NULL)
1071 : continue;
1072 0 : bus_dmamap_sync(sc->sc_dmat, sc->sc_tx_buf[i].txb_dmap, 0, 0,
1073 : BUS_DMASYNC_POSTWRITE);
1074 0 : bus_dmamap_unload(sc->sc_dmat, sc->sc_tx_buf[i].txb_dmap);
1075 0 : bus_dmamap_destroy(sc->sc_dmat, sc->sc_tx_buf[i].txb_dmap);
1076 0 : sc->sc_tx_buf[i].txb_dmap = NULL;
1077 0 : if (sc->sc_tx_buf[i].txb_mbuf == NULL)
1078 : continue;
1079 0 : m_free(sc->sc_tx_buf[i].txb_mbuf);
1080 0 : sc->sc_tx_buf[i].txb_mbuf = NULL;
1081 0 : sc->sc_tx_buf[i].txb_ndesc = 0;
1082 0 : }
1083 0 : if (sc->sc_tx_rmap) {
1084 0 : bus_dmamap_sync(sc->sc_dmat, sc->sc_tx_rmap, 0, 0,
1085 : BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1086 0 : bus_dmamap_unload(sc->sc_dmat, sc->sc_tx_rmap);
1087 0 : bus_dmamap_destroy(sc->sc_dmat, sc->sc_tx_rmap);
1088 0 : }
1089 0 : if (sc->sc_tx_ring) {
1090 0 : bus_dmamem_unmap(sc->sc_dmat, (caddr_t)sc->sc_tx_ring,
1091 : PAGE_SIZE);
1092 0 : bus_dmamem_free(sc->sc_dmat, &sc->sc_tx_seg, 1);
1093 0 : }
1094 0 : sc->sc_tx_ring = NULL;
1095 0 : sc->sc_tx_rmap = NULL;
1096 0 : sc->sc_tx_avail = XNF_TX_DESC;
1097 0 : sc->sc_tx_next = 0;
1098 0 : }
1099 :
1100 : int
1101 0 : xnf_capabilities(struct xnf_softc *sc)
1102 : {
1103 0 : unsigned long long res;
1104 : const char *prop;
1105 : int error;
1106 :
1107 : /* Query scatter-gather capability */
1108 : prop = "feature-sg";
1109 0 : if ((error = xs_getnum(sc->sc_parent, sc->sc_backend, prop, &res)) != 0
1110 0 : && error != ENOENT)
1111 : goto errout;
1112 0 : if (error == 0 && res == 1)
1113 0 : sc->sc_caps |= XNF_CAP_SG;
1114 :
1115 : #if 0
1116 : /* Query IPv4 checksum offloading capability, enabled by default */
1117 : sc->sc_caps |= XNF_CAP_CSUM4;
1118 : prop = "feature-no-csum-offload";
1119 : if ((error = xs_getnum(sc->sc_parent, sc->sc_backend, prop, &res)) != 0
1120 : && error != ENOENT)
1121 : goto errout;
1122 : if (error == 0 && res == 1)
1123 : sc->sc_caps &= ~XNF_CAP_CSUM4;
1124 :
1125 : /* Query IPv6 checksum offloading capability */
1126 : prop = "feature-ipv6-csum-offload";
1127 : if ((error = xs_getnum(sc->sc_parent, sc->sc_backend, prop, &res)) != 0
1128 : && error != ENOENT)
1129 : goto errout;
1130 : if (error == 0 && res == 1)
1131 : sc->sc_caps |= XNF_CAP_CSUM6;
1132 : #endif
1133 :
1134 : /* Query multicast traffic contol capability */
1135 : prop = "feature-multicast-control";
1136 0 : if ((error = xs_getnum(sc->sc_parent, sc->sc_backend, prop, &res)) != 0
1137 0 : && error != ENOENT)
1138 : goto errout;
1139 0 : if (error == 0 && res == 1)
1140 0 : sc->sc_caps |= XNF_CAP_MCAST;
1141 :
1142 : /* Query split Rx/Tx event channel capability */
1143 : prop = "feature-split-event-channels";
1144 0 : if ((error = xs_getnum(sc->sc_parent, sc->sc_backend, prop, &res)) != 0
1145 0 : && error != ENOENT)
1146 : goto errout;
1147 0 : if (error == 0 && res == 1)
1148 0 : sc->sc_caps |= XNF_CAP_SPLIT;
1149 :
1150 : /* Query multiqueue capability */
1151 : prop = "multi-queue-max-queues";
1152 0 : if ((error = xs_getnum(sc->sc_parent, sc->sc_backend, prop, &res)) != 0
1153 0 : && error != ENOENT)
1154 : goto errout;
1155 0 : if (error == 0)
1156 0 : sc->sc_caps |= XNF_CAP_MULTIQ;
1157 :
1158 : DPRINTF("%s: capabilities %b\n", sc->sc_dev.dv_xname, sc->sc_caps,
1159 : "\20\006MULTIQ\005SPLIT\004MCAST\003CSUM6\002CSUM4\001SG");
1160 0 : return (0);
1161 :
1162 : errout:
1163 0 : printf("%s: failed to read \"%s\" property\n", sc->sc_dev.dv_xname,
1164 : prop);
1165 0 : return (-1);
1166 0 : }
1167 :
1168 : int
1169 0 : xnf_init_backend(struct xnf_softc *sc)
1170 : {
1171 : const char *prop;
1172 :
1173 : /* Plumb the Rx ring */
1174 : prop = "rx-ring-ref";
1175 0 : if (xs_setnum(sc->sc_parent, sc->sc_node, prop, sc->sc_rx_ref))
1176 : goto errout;
1177 : /* Enable "copy" mode */
1178 : prop = "request-rx-copy";
1179 0 : if (xs_setnum(sc->sc_parent, sc->sc_node, prop, 1))
1180 : goto errout;
1181 : /* Enable notify mode */
1182 : prop = "feature-rx-notify";
1183 0 : if (xs_setnum(sc->sc_parent, sc->sc_node, prop, 1))
1184 : goto errout;
1185 :
1186 : /* Plumb the Tx ring */
1187 : prop = "tx-ring-ref";
1188 0 : if (xs_setnum(sc->sc_parent, sc->sc_node, prop, sc->sc_tx_ref))
1189 : goto errout;
1190 : /* Enable scatter-gather mode */
1191 0 : if (sc->sc_tx_frags > 1) {
1192 : prop = "feature-sg";
1193 0 : if (xs_setnum(sc->sc_parent, sc->sc_node, prop, 1))
1194 : goto errout;
1195 : }
1196 :
1197 : /* Disable IPv4 checksum offloading */
1198 0 : if (!(sc->sc_caps & XNF_CAP_CSUM4)) {
1199 : prop = "feature-no-csum-offload";
1200 0 : if (xs_setnum(sc->sc_parent, sc->sc_node, prop, 1))
1201 : goto errout;
1202 : }
1203 :
1204 : /* Enable IPv6 checksum offloading */
1205 0 : if (sc->sc_caps & XNF_CAP_CSUM6) {
1206 : prop = "feature-ipv6-csum-offload";
1207 0 : if (xs_setnum(sc->sc_parent, sc->sc_node, prop, 1))
1208 : goto errout;
1209 : }
1210 :
1211 : /* Plumb the event channel port */
1212 : prop = "event-channel";
1213 0 : if (xs_setnum(sc->sc_parent, sc->sc_node, prop, sc->sc_xih))
1214 : goto errout;
1215 :
1216 : /* Connect the device */
1217 : prop = "state";
1218 0 : if (xs_setprop(sc->sc_parent, sc->sc_node, prop, XEN_STATE_CONNECTED,
1219 0 : strlen(XEN_STATE_CONNECTED)))
1220 : goto errout;
1221 :
1222 0 : return (0);
1223 :
1224 : errout:
1225 0 : printf("%s: failed to set \"%s\" property\n", sc->sc_dev.dv_xname, prop);
1226 0 : return (-1);
1227 0 : }
|