LCOV - code coverage report
Current view: top level - dev/pv - virtio.c (source / functions) Hit Total Coverage
Test: 6.4 Lines: 0 356 0.0 %
Date: 2018-10-19 03:25:38 Functions: 0 31 0.0 %
Legend: Lines: hit not hit

          Line data    Source code
       1             : /*      $OpenBSD: virtio.c,v 1.11 2017/08/10 18:00:59 reyk Exp $        */
       2             : /*      $NetBSD: virtio.c,v 1.3 2011/11/02 23:05:52 njoly Exp $ */
       3             : 
       4             : /*
       5             :  * Copyright (c) 2012 Stefan Fritsch, Alexander Fiveg.
       6             :  * Copyright (c) 2010 Minoura Makoto.
       7             :  * All rights reserved.
       8             :  *
       9             :  * Redistribution and use in source and binary forms, with or without
      10             :  * modification, are permitted provided that the following conditions
      11             :  * are met:
      12             :  * 1. Redistributions of source code must retain the above copyright
      13             :  *    notice, this list of conditions and the following disclaimer.
      14             :  * 2. Redistributions in binary form must reproduce the above copyright
      15             :  *    notice, this list of conditions and the following disclaimer in the
      16             :  *    documentation and/or other materials provided with the distribution.
      17             :  *
      18             :  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
      19             :  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
      20             :  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
      21             :  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
      22             :  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
      23             :  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
      24             :  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
      25             :  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
      26             :  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
      27             :  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
      28             :  */
      29             : 
      30             : #include <sys/param.h>
      31             : #include <sys/systm.h>
      32             : #include <sys/kernel.h>
      33             : #include <sys/device.h>
      34             : #include <sys/mutex.h>
      35             : #include <sys/atomic.h>
      36             : #include <sys/malloc.h>
      37             : 
      38             : #include <dev/pv/virtioreg.h>
      39             : #include <dev/pv/virtiovar.h>
      40             : 
      41             : #if VIRTIO_DEBUG
      42             : #define VIRTIO_ASSERT(x)        KASSERT(x)
      43             : #else
      44             : #define VIRTIO_ASSERT(x)
      45             : #endif
      46             : 
      47             : void             virtio_init_vq(struct virtio_softc *,
      48             :                                 struct virtqueue *, int);
      49             : void             vq_free_entry(struct virtqueue *, struct vq_entry *);
      50             : struct vq_entry *vq_alloc_entry(struct virtqueue *);
      51             : 
      52             : struct cfdriver virtio_cd = {
      53             :         NULL, "virtio", DV_DULL
      54             : };
      55             : 
      56             : #define virtio_set_status(sc, s) (sc)->sc_ops->set_status(sc, s)
      57             : #define virtio_device_reset(sc) virtio_set_status((sc), 0)
      58             : 
      59             : static const char * const virtio_device_name[] = {
      60             :         "Unknown (0)",                /* 0 */
      61             :         "Network",            /* 1 */
      62             :         "Block",              /* 2 */
      63             :         "Console",            /* 3 */
      64             :         "Entropy",            /* 4 */
      65             :         "Memory Balloon",     /* 5 */
      66             :         "IO Memory",          /* 6 */
      67             :         "Rpmsg",              /* 7 */
      68             :         "SCSI host",          /* 8 */
      69             :         "9P Transport",               /* 9 */
      70             :         "mac80211 wlan"               /* 10 */
      71             : };
      72             : #define NDEVNAMES       (sizeof(virtio_device_name)/sizeof(char*))
      73             : 
      74             : static const struct virtio_feature_name transport_feature_names[] = {
      75             :         { VIRTIO_F_NOTIFY_ON_EMPTY,     "NotifyOnEmpty"},
      76             :         { VIRTIO_F_RING_INDIRECT_DESC,  "RingIndirectDesc"},
      77             :         { VIRTIO_F_RING_EVENT_IDX,      "RingEventIdx"},
      78             :         { VIRTIO_F_BAD_FEATURE,         "BadFeature"},
      79             :         { 0,                            NULL}
      80             : };
      81             : 
      82             : const char *
      83           0 : virtio_device_string(int id)
      84             : {
      85           0 :         return id < NDEVNAMES ? virtio_device_name[id] : "Unknown";
      86             : }
      87             : 
      88             : void
      89           0 : virtio_log_features(uint32_t host, uint32_t neg,
      90             :     const struct virtio_feature_name *guest_feature_names)
      91             : {
      92             :         const struct virtio_feature_name *namep;
      93             :         int i;
      94             :         char c;
      95             :         uint32_t bit;
      96             : 
      97           0 :         for (i = 0; i < 32; i++) {
      98           0 :                 if (i == 30) {
      99             :                         /*
     100             :                          * VIRTIO_F_BAD_FEATURE is only used for
     101             :                          * checking correct negotiation
     102             :                          */
     103             :                         continue;
     104             :                 }
     105           0 :                 bit = 1 << i;
     106           0 :                 if ((host&bit) == 0)
     107             :                         continue;
     108           0 :                 namep = (i < 24) ? guest_feature_names :
     109             :                     transport_feature_names;
     110           0 :                 while (namep->bit && namep->bit != bit)
     111           0 :                         namep++;
     112           0 :                 c = (neg&bit) ? '+' : '-';
     113           0 :                 if (namep->name)
     114           0 :                         printf(" %c%s", c, namep->name);
     115             :                 else
     116           0 :                         printf(" %cUnknown(%d)", c, i);
     117             :         }
     118           0 : }
     119             : 
     120             : /*
     121             :  * Reset the device.
     122             :  */
     123             : /*
     124             :  * To reset the device to a known state, do following:
     125             :  *      virtio_reset(sc);            // this will stop the device activity
     126             :  *      <dequeue finished requests>; // virtio_dequeue() still can be called
     127             :  *      <revoke pending requests in the vqs if any>;
     128             :  *      virtio_reinit_start(sc);     // dequeue prohibitted
     129             :  *      newfeatures = virtio_negotiate_features(sc, requestedfeatures);
     130             :  *      <some other initialization>;
     131             :  *      virtio_reinit_end(sc);       // device activated; enqueue allowed
     132             :  * Once attached, feature negotiation can only be allowed after virtio_reset.
     133             :  */
     134             : void
     135           0 : virtio_reset(struct virtio_softc *sc)
     136             : {
     137           0 :         virtio_device_reset(sc);
     138           0 : }
     139             : 
     140             : void
     141           0 : virtio_reinit_start(struct virtio_softc *sc)
     142             : {
     143             :         int i;
     144             : 
     145           0 :         virtio_set_status(sc, VIRTIO_CONFIG_DEVICE_STATUS_ACK);
     146           0 :         virtio_set_status(sc, VIRTIO_CONFIG_DEVICE_STATUS_DRIVER);
     147           0 :         for (i = 0; i < sc->sc_nvqs; i++) {
     148             :                 int n;
     149           0 :                 struct virtqueue *vq = &sc->sc_vqs[i];
     150           0 :                 n = virtio_read_queue_size(sc, vq->vq_index);
     151           0 :                 if (n == 0)     /* vq disappeared */
     152           0 :                         continue;
     153           0 :                 if (n != vq->vq_num) {
     154           0 :                         panic("%s: virtqueue size changed, vq index %d\n",
     155           0 :                             sc->sc_dev.dv_xname, vq->vq_index);
     156             :                 }
     157           0 :                 virtio_init_vq(sc, vq, 1);
     158           0 :                 virtio_setup_queue(sc, vq->vq_index,
     159             :                     vq->vq_dmamap->dm_segs[0].ds_addr / VIRTIO_PAGE_SIZE);
     160           0 :         }
     161           0 : }
     162             : 
     163             : void
     164           0 : virtio_reinit_end(struct virtio_softc *sc)
     165             : {
     166           0 :         virtio_set_status(sc, VIRTIO_CONFIG_DEVICE_STATUS_DRIVER_OK);
     167           0 : }
     168             : 
     169             : /*
     170             :  * dmamap sync operations for a virtqueue.
     171             :  */
     172             : static inline void
     173           0 : vq_sync_descs(struct virtio_softc *sc, struct virtqueue *vq, int ops)
     174             : {
     175             :         /* availoffset == sizeof(vring_desc)*vq_num */
     176           0 :         bus_dmamap_sync(sc->sc_dmat, vq->vq_dmamap, 0, vq->vq_availoffset,
     177             :             ops);
     178           0 : }
     179             : 
     180             : static inline void
     181           0 : vq_sync_aring(struct virtio_softc *sc, struct virtqueue *vq, int ops)
     182             : {
     183           0 :         bus_dmamap_sync(sc->sc_dmat, vq->vq_dmamap, vq->vq_availoffset,
     184             :             offsetof(struct vring_avail, ring) + vq->vq_num * sizeof(uint16_t),
     185             :             ops);
     186           0 : }
     187             : 
     188             : static inline void
     189           0 : vq_sync_uring(struct virtio_softc *sc, struct virtqueue *vq, int ops)
     190             : {
     191           0 :         bus_dmamap_sync(sc->sc_dmat, vq->vq_dmamap, vq->vq_usedoffset,
     192             :             offsetof(struct vring_used, ring) + vq->vq_num *
     193             :             sizeof(struct vring_used_elem), ops);
     194           0 : }
     195             : 
     196             : static inline void
     197           0 : vq_sync_indirect(struct virtio_softc *sc, struct virtqueue *vq, int slot,
     198             :     int ops)
     199             : {
     200           0 :         int offset = vq->vq_indirectoffset +
     201           0 :             sizeof(struct vring_desc) * vq->vq_maxnsegs * slot;
     202             : 
     203           0 :         bus_dmamap_sync(sc->sc_dmat, vq->vq_dmamap, offset,
     204             :             sizeof(struct vring_desc) * vq->vq_maxnsegs, ops);
     205           0 : }
     206             : 
     207             : /*
     208             :  * Scan vq, bus_dmamap_sync for the vqs (not for the payload),
     209             :  * and calls (*vq_done)() if some entries are consumed.
     210             :  * For use in transport specific irq handlers.
     211             :  */
     212             : int
     213           0 : virtio_check_vqs(struct virtio_softc *sc)
     214             : {
     215             :         struct virtqueue *vq;
     216             :         int i, r = 0;
     217             : 
     218             :         /* going backwards is better for if_vio */
     219           0 :         for (i = sc->sc_nvqs - 1; i >= 0; i--) {
     220           0 :                 vq = &sc->sc_vqs[i];
     221           0 :                 if (vq->vq_queued) {
     222           0 :                         vq->vq_queued = 0;
     223           0 :                         vq_sync_aring(sc, vq, BUS_DMASYNC_POSTWRITE);
     224           0 :                 }
     225           0 :                 vq_sync_uring(sc, vq, BUS_DMASYNC_POSTREAD);
     226           0 :                 if (vq->vq_used_idx != vq->vq_used->idx) {
     227           0 :                         if (vq->vq_done)
     228           0 :                                 r |= (vq->vq_done)(vq);
     229             :                 }
     230             :         }
     231             : 
     232           0 :         return r;
     233             : }
     234             : 
     235             : /*
     236             :  * Initialize vq structure.
     237             :  */
     238             : void
     239           0 : virtio_init_vq(struct virtio_softc *sc, struct virtqueue *vq, int reinit)
     240             : {
     241             :         int i, j;
     242           0 :         int vq_size = vq->vq_num;
     243             : 
     244           0 :         memset(vq->vq_vaddr, 0, vq->vq_bytesize);
     245             : 
     246             :         /* build the indirect descriptor chain */
     247           0 :         if (vq->vq_indirect != NULL) {
     248             :                 struct vring_desc *vd;
     249             : 
     250           0 :                 for (i = 0; i < vq_size; i++) {
     251           0 :                         vd = vq->vq_indirect;
     252           0 :                         vd += vq->vq_maxnsegs * i;
     253           0 :                         for (j = 0; j < vq->vq_maxnsegs-1; j++)
     254           0 :                                 vd[j].next = j + 1;
     255             :                 }
     256           0 :         }
     257             : 
     258             :         /* free slot management */
     259           0 :         SLIST_INIT(&vq->vq_freelist);
     260             :         /*
     261             :          * virtio_enqueue_trim needs monotonely raising entries, therefore
     262             :          * initialize in reverse order
     263             :          */
     264           0 :         for (i = vq_size - 1; i >= 0; i--) {
     265           0 :                 SLIST_INSERT_HEAD(&vq->vq_freelist, &vq->vq_entries[i],
     266             :                     qe_list);
     267           0 :                 vq->vq_entries[i].qe_index = i;
     268             :         }
     269             : 
     270             :         /* enqueue/dequeue status */
     271           0 :         vq->vq_avail_idx = 0;
     272           0 :         vq->vq_used_idx = 0;
     273           0 :         vq_sync_aring(sc, vq, BUS_DMASYNC_PREWRITE);
     274           0 :         vq_sync_uring(sc, vq, BUS_DMASYNC_PREREAD);
     275           0 :         vq->vq_queued = 1;
     276           0 : }
     277             : 
     278             : /*
     279             :  * Allocate/free a vq.
     280             :  *
     281             :  * maxnsegs denotes how much space should be allocated for indirect
     282             :  * descriptors. maxnsegs == 1 can be used to disable use indirect
     283             :  * descriptors for this queue.
     284             :  */
     285             : int
     286           0 : virtio_alloc_vq(struct virtio_softc *sc, struct virtqueue *vq, int index,
     287             :     int maxsegsize, int maxnsegs, const char *name)
     288             : {
     289             :         int vq_size, allocsize1, allocsize2, allocsize3, allocsize = 0;
     290           0 :         int rsegs, r, hdrlen;
     291             : #define VIRTQUEUE_ALIGN(n)      (((n)+(VIRTIO_PAGE_SIZE-1))&        \
     292             :                                  ~(VIRTIO_PAGE_SIZE-1))
     293             : 
     294           0 :         memset(vq, 0, sizeof(*vq));
     295             : 
     296           0 :         vq_size = virtio_read_queue_size(sc, index);
     297           0 :         if (vq_size == 0) {
     298           0 :                 printf("virtqueue not exist, index %d for %s\n", index, name);
     299           0 :                 goto err;
     300             :         }
     301           0 :         if (((vq_size - 1) & vq_size) != 0)
     302           0 :                 panic("vq_size not power of two: %d", vq_size);
     303             : 
     304           0 :         hdrlen = (sc->sc_features & VIRTIO_F_RING_EVENT_IDX) ? 3 : 2;
     305             : 
     306             :         /* allocsize1: descriptor table + avail ring + pad */
     307           0 :         allocsize1 = VIRTQUEUE_ALIGN(sizeof(struct vring_desc) * vq_size
     308             :             + sizeof(uint16_t) * (hdrlen + vq_size));
     309             :         /* allocsize2: used ring + pad */
     310           0 :         allocsize2 = VIRTQUEUE_ALIGN(sizeof(uint16_t) * hdrlen
     311             :             + sizeof(struct vring_used_elem) * vq_size);
     312             :         /* allocsize3: indirect table */
     313           0 :         if (sc->sc_indirect && maxnsegs > 1)
     314           0 :                 allocsize3 = sizeof(struct vring_desc) * maxnsegs * vq_size;
     315             :         else
     316             :                 allocsize3 = 0;
     317           0 :         allocsize = allocsize1 + allocsize2 + allocsize3;
     318             : 
     319             :         /* alloc and map the memory */
     320           0 :         r = bus_dmamem_alloc(sc->sc_dmat, allocsize, VIRTIO_PAGE_SIZE, 0,
     321             :             &vq->vq_segs[0], 1, &rsegs, BUS_DMA_NOWAIT);
     322           0 :         if (r != 0) {
     323           0 :                 printf("virtqueue %d for %s allocation failed, error %d\n",
     324             :                        index, name, r);
     325           0 :                 goto err;
     326             :         }
     327           0 :         r = bus_dmamem_map(sc->sc_dmat, &vq->vq_segs[0], 1, allocsize,
     328             :             (caddr_t*)&vq->vq_vaddr, BUS_DMA_NOWAIT);
     329           0 :         if (r != 0) {
     330           0 :                 printf("virtqueue %d for %s map failed, error %d\n", index,
     331             :                     name, r);
     332           0 :                 goto err;
     333             :         }
     334           0 :         r = bus_dmamap_create(sc->sc_dmat, allocsize, 1, allocsize, 0,
     335             :             BUS_DMA_NOWAIT, &vq->vq_dmamap);
     336           0 :         if (r != 0) {
     337           0 :                 printf("virtqueue %d for %s dmamap creation failed, "
     338             :                     "error %d\n", index, name, r);
     339           0 :                 goto err;
     340             :         }
     341           0 :         r = bus_dmamap_load(sc->sc_dmat, vq->vq_dmamap, vq->vq_vaddr,
     342             :             allocsize, NULL, BUS_DMA_NOWAIT);
     343           0 :         if (r != 0) {
     344           0 :                 printf("virtqueue %d for %s dmamap load failed, error %d\n",
     345             :                     index, name, r);
     346           0 :                 goto err;
     347             :         }
     348             : 
     349           0 :         virtio_setup_queue(sc, index,
     350             :             vq->vq_dmamap->dm_segs[0].ds_addr / VIRTIO_PAGE_SIZE);
     351             : 
     352             :         /* remember addresses and offsets for later use */
     353           0 :         vq->vq_owner = sc;
     354           0 :         vq->vq_num = vq_size;
     355           0 :         vq->vq_mask = vq_size - 1;
     356           0 :         vq->vq_index = index;
     357           0 :         vq->vq_desc = vq->vq_vaddr;
     358           0 :         vq->vq_availoffset = sizeof(struct vring_desc)*vq_size;
     359           0 :         vq->vq_avail = (struct vring_avail*)(((char*)vq->vq_desc) +
     360             :             vq->vq_availoffset);
     361           0 :         vq->vq_usedoffset = allocsize1;
     362           0 :         vq->vq_used = (struct vring_used*)(((char*)vq->vq_desc) +
     363             :             vq->vq_usedoffset);
     364           0 :         if (allocsize3 > 0) {
     365           0 :                 vq->vq_indirectoffset = allocsize1 + allocsize2;
     366           0 :                 vq->vq_indirect = (void*)(((char*)vq->vq_desc)
     367           0 :                     + vq->vq_indirectoffset);
     368           0 :         }
     369           0 :         vq->vq_bytesize = allocsize;
     370           0 :         vq->vq_maxnsegs = maxnsegs;
     371             : 
     372             :         /* free slot management */
     373           0 :         vq->vq_entries = mallocarray(vq_size, sizeof(struct vq_entry),
     374             :             M_DEVBUF, M_NOWAIT | M_ZERO);
     375           0 :         if (vq->vq_entries == NULL) {
     376             :                 r = ENOMEM;
     377           0 :                 goto err;
     378             :         }
     379             : 
     380           0 :         virtio_init_vq(sc, vq, 0);
     381             : 
     382             : #if VIRTIO_DEBUG
     383             :         printf("\nallocated %u byte for virtqueue %d for %s, size %d\n",
     384             :             allocsize, index, name, vq_size);
     385             :         if (allocsize3 > 0)
     386             :                 printf("using %d byte (%d entries) indirect descriptors\n",
     387             :                     allocsize3, maxnsegs * vq_size);
     388             : #endif
     389           0 :         return 0;
     390             : 
     391             : err:
     392           0 :         virtio_setup_queue(sc, index, 0);
     393           0 :         if (vq->vq_dmamap)
     394           0 :                 bus_dmamap_destroy(sc->sc_dmat, vq->vq_dmamap);
     395           0 :         if (vq->vq_vaddr)
     396           0 :                 bus_dmamem_unmap(sc->sc_dmat, vq->vq_vaddr, allocsize);
     397           0 :         if (vq->vq_segs[0].ds_addr)
     398           0 :                 bus_dmamem_free(sc->sc_dmat, &vq->vq_segs[0], 1);
     399           0 :         memset(vq, 0, sizeof(*vq));
     400             : 
     401           0 :         return -1;
     402           0 : }
     403             : 
     404             : int
     405           0 : virtio_free_vq(struct virtio_softc *sc, struct virtqueue *vq)
     406             : {
     407             :         struct vq_entry *qe;
     408             :         int i = 0;
     409             : 
     410             :         /* device must be already deactivated */
     411             :         /* confirm the vq is empty */
     412           0 :         SLIST_FOREACH(qe, &vq->vq_freelist, qe_list) {
     413           0 :                 i++;
     414             :         }
     415           0 :         if (i != vq->vq_num) {
     416           0 :                 printf("%s: freeing non-empty vq, index %d\n",
     417           0 :                     sc->sc_dev.dv_xname, vq->vq_index);
     418           0 :                 return EBUSY;
     419             :         }
     420             : 
     421             :         /* tell device that there's no virtqueue any longer */
     422           0 :         virtio_setup_queue(sc, vq->vq_index, 0);
     423             : 
     424           0 :         free(vq->vq_entries, M_DEVBUF, 0);
     425           0 :         bus_dmamap_unload(sc->sc_dmat, vq->vq_dmamap);
     426           0 :         bus_dmamap_destroy(sc->sc_dmat, vq->vq_dmamap);
     427           0 :         bus_dmamem_unmap(sc->sc_dmat, vq->vq_vaddr, vq->vq_bytesize);
     428           0 :         bus_dmamem_free(sc->sc_dmat, &vq->vq_segs[0], 1);
     429           0 :         memset(vq, 0, sizeof(*vq));
     430             : 
     431           0 :         return 0;
     432           0 : }
     433             : 
     434             : /*
     435             :  * Free descriptor management.
     436             :  */
     437             : struct vq_entry *
     438           0 : vq_alloc_entry(struct virtqueue *vq)
     439             : {
     440             :         struct vq_entry *qe;
     441             : 
     442           0 :         if (SLIST_EMPTY(&vq->vq_freelist))
     443           0 :                 return NULL;
     444             :         qe = SLIST_FIRST(&vq->vq_freelist);
     445           0 :         SLIST_REMOVE_HEAD(&vq->vq_freelist, qe_list);
     446             : 
     447           0 :         return qe;
     448           0 : }
     449             : 
     450             : void
     451           0 : vq_free_entry(struct virtqueue *vq, struct vq_entry *qe)
     452             : {
     453           0 :         SLIST_INSERT_HEAD(&vq->vq_freelist, qe, qe_list);
     454           0 : }
     455             : 
     456             : /*
     457             :  * Enqueue several dmamaps as a single request.
     458             :  */
     459             : /*
     460             :  * Typical usage:
     461             :  *  <queue size> number of followings are stored in arrays
     462             :  *  - command blocks (in dmamem) should be pre-allocated and mapped
     463             :  *  - dmamaps for command blocks should be pre-allocated and loaded
     464             :  *  - dmamaps for payload should be pre-allocated
     465             :  *      r = virtio_enqueue_prep(sc, vq, &slot);             // allocate a slot
     466             :  *      if (r)          // currently 0 or EAGAIN
     467             :  *        return r;
     468             :  *      r = bus_dmamap_load(dmat, dmamap_payload[slot], data, count, ..);
     469             :  *      if (r) {
     470             :  *        virtio_enqueue_abort(sc, vq, slot);
     471             :  *        bus_dmamap_unload(dmat, dmamap_payload[slot]);
     472             :  *        return r;
     473             :  *      }
     474             :  *      r = virtio_enqueue_reserve(sc, vq, slot,
     475             :  *                                 dmamap_payload[slot]->dm_nsegs+1);
     476             :  *                                                      // ^ +1 for command
     477             :  *      if (r) {        // currently 0 or EAGAIN
     478             :  *        bus_dmamap_unload(dmat, dmamap_payload[slot]);
     479             :  *        return r;                                     // do not call abort()
     480             :  *      }
     481             :  *      <setup and prepare commands>
     482             :  *      bus_dmamap_sync(dmat, dmamap_cmd[slot],... BUS_DMASYNC_PREWRITE);
     483             :  *      bus_dmamap_sync(dmat, dmamap_payload[slot],...);
     484             :  *      virtio_enqueue(sc, vq, slot, dmamap_cmd[slot], 0);
     485             :  *      virtio_enqueue(sc, vq, slot, dmamap_payload[slot], iswrite);
     486             :  *      virtio_enqueue_commit(sc, vq, slot, 1);
     487             :  *
     488             :  * Alternative usage with statically allocated slots:
     489             :  *      <during initialization>
     490             :  *      // while not out of slots, do
     491             :  *      virtio_enqueue_prep(sc, vq, &slot);         // allocate a slot
     492             :  *      virtio_enqueue_reserve(sc, vq, slot, max_segs); // reserve all slots
     493             :  *                                              that may ever be needed
     494             :  *
     495             :  *      <when enqueing a request>
     496             :  *      // Don't call virtio_enqueue_prep()
     497             :  *      bus_dmamap_load(dmat, dmamap_payload[slot], data, count, ..);
     498             :  *      bus_dmamap_sync(dmat, dmamap_cmd[slot],... BUS_DMASYNC_PREWRITE);
     499             :  *      bus_dmamap_sync(dmat, dmamap_payload[slot],...);
     500             :  *      virtio_enqueue_trim(sc, vq, slot, num_segs_needed);
     501             :  *      virtio_enqueue(sc, vq, slot, dmamap_cmd[slot], 0);
     502             :  *      virtio_enqueue(sc, vq, slot, dmamap_payload[slot], iswrite);
     503             :  *      virtio_enqueue_commit(sc, vq, slot, 1);
     504             :  *
     505             :  *      <when dequeuing>
     506             :  *      // don't call virtio_dequeue_commit()
     507             :  */
     508             : 
     509             : /*
     510             :  * enqueue_prep: allocate a slot number
     511             :  */
     512             : int
     513           0 : virtio_enqueue_prep(struct virtqueue *vq, int *slotp)
     514             : {
     515             :         struct vq_entry *qe1;
     516             : 
     517             :         VIRTIO_ASSERT(slotp != NULL);
     518             : 
     519           0 :         qe1 = vq_alloc_entry(vq);
     520           0 :         if (qe1 == NULL)
     521           0 :                 return EAGAIN;
     522             :         /* next slot is not allocated yet */
     523           0 :         qe1->qe_next = -1;
     524           0 :         *slotp = qe1->qe_index;
     525             : 
     526           0 :         return 0;
     527           0 : }
     528             : 
     529             : /*
     530             :  * enqueue_reserve: allocate remaining slots and build the descriptor chain.
     531             :  * Calls virtio_enqueue_abort() on failure.
     532             :  */
     533             : int
     534           0 : virtio_enqueue_reserve(struct virtqueue *vq, int slot, int nsegs)
     535             : {
     536           0 :         struct vq_entry *qe1 = &vq->vq_entries[slot];
     537             : 
     538             :         VIRTIO_ASSERT(qe1->qe_next == -1);
     539             :         VIRTIO_ASSERT(1 <= nsegs && nsegs <= vq->vq_num);
     540             : 
     541           0 :         if (vq->vq_indirect != NULL && nsegs > 1 && nsegs <= vq->vq_maxnsegs) {
     542             :                 struct vring_desc *vd;
     543             :                 int i;
     544             : 
     545           0 :                 qe1->qe_indirect = 1;
     546             : 
     547           0 :                 vd = &vq->vq_desc[qe1->qe_index];
     548           0 :                 vd->addr = vq->vq_dmamap->dm_segs[0].ds_addr +
     549           0 :                     vq->vq_indirectoffset;
     550           0 :                 vd->addr += sizeof(struct vring_desc) * vq->vq_maxnsegs *
     551           0 :                     qe1->qe_index;
     552           0 :                 vd->len = sizeof(struct vring_desc) * nsegs;
     553           0 :                 vd->flags = VRING_DESC_F_INDIRECT;
     554             : 
     555           0 :                 vd = vq->vq_indirect;
     556           0 :                 vd += vq->vq_maxnsegs * qe1->qe_index;
     557           0 :                 qe1->qe_desc_base = vd;
     558             : 
     559           0 :                 for (i = 0; i < nsegs-1; i++)
     560           0 :                         vd[i].flags = VRING_DESC_F_NEXT;
     561           0 :                 vd[i].flags = 0;
     562           0 :                 qe1->qe_next = 0;
     563             : 
     564             :                 return 0;
     565             :         } else {
     566             :                 struct vring_desc *vd;
     567             :                 struct vq_entry *qe;
     568             :                 int i, s;
     569             : 
     570           0 :                 qe1->qe_indirect = 0;
     571             : 
     572           0 :                 vd = &vq->vq_desc[0];
     573           0 :                 qe1->qe_desc_base = vd;
     574           0 :                 qe1->qe_next = qe1->qe_index;
     575             :                 s = slot;
     576           0 :                 for (i = 0; i < nsegs - 1; i++) {
     577           0 :                         qe = vq_alloc_entry(vq);
     578           0 :                         if (qe == NULL) {
     579           0 :                                 vd[s].flags = 0;
     580           0 :                                 virtio_enqueue_abort(vq, slot);
     581           0 :                                 return EAGAIN;
     582             :                         }
     583           0 :                         vd[s].flags = VRING_DESC_F_NEXT;
     584           0 :                         vd[s].next = qe->qe_index;
     585           0 :                         s = qe->qe_index;
     586             :                 }
     587           0 :                 vd[s].flags = 0;
     588             : 
     589           0 :                 return 0;
     590             :         }
     591           0 : }
     592             : 
     593             : /*
     594             :  * enqueue: enqueue a single dmamap.
     595             :  */
     596             : int
     597           0 : virtio_enqueue(struct virtqueue *vq, int slot, bus_dmamap_t dmamap, int write)
     598             : {
     599           0 :         struct vq_entry *qe1 = &vq->vq_entries[slot];
     600           0 :         struct vring_desc *vd = qe1->qe_desc_base;
     601             :         int i;
     602           0 :         int s = qe1->qe_next;
     603             : 
     604             :         VIRTIO_ASSERT(s >= 0);
     605             :         VIRTIO_ASSERT(dmamap->dm_nsegs > 0);
     606           0 :         if (dmamap->dm_nsegs > vq->vq_maxnsegs) {
     607             : #if VIRTIO_DEBUG
     608             :                 for (i = 0; i < dmamap->dm_nsegs; i++) {
     609             :                         printf(" %d (%d): %p %lx \n", i, write,
     610             :                             (void *)dmamap->dm_segs[i].ds_addr,
     611             :                             dmamap->dm_segs[i].ds_len);
     612             :                 }
     613             : #endif
     614           0 :                 panic("dmamap->dm_nseg %d > vq->vq_maxnsegs %d\n",
     615             :                     dmamap->dm_nsegs, vq->vq_maxnsegs);
     616             :         }
     617             : 
     618           0 :         for (i = 0; i < dmamap->dm_nsegs; i++) {
     619           0 :                 vd[s].addr = dmamap->dm_segs[i].ds_addr;
     620           0 :                 vd[s].len = dmamap->dm_segs[i].ds_len;
     621           0 :                 if (!write)
     622           0 :                         vd[s].flags |= VRING_DESC_F_WRITE;
     623           0 :                 s = vd[s].next;
     624             :         }
     625           0 :         qe1->qe_next = s;
     626             : 
     627           0 :         return 0;
     628             : }
     629             : 
     630             : int
     631           0 : virtio_enqueue_p(struct virtqueue *vq, int slot, bus_dmamap_t dmamap,
     632             :     bus_addr_t start, bus_size_t len, int write)
     633             : {
     634           0 :         struct vq_entry *qe1 = &vq->vq_entries[slot];
     635           0 :         struct vring_desc *vd = qe1->qe_desc_base;
     636           0 :         int s = qe1->qe_next;
     637             : 
     638             :         VIRTIO_ASSERT(s >= 0);
     639             :         /* XXX todo: handle more segments */
     640             :         VIRTIO_ASSERT(dmamap->dm_nsegs == 1);
     641             :         VIRTIO_ASSERT((dmamap->dm_segs[0].ds_len > start) &&
     642             :             (dmamap->dm_segs[0].ds_len >= start + len));
     643             : 
     644           0 :         vd[s].addr = dmamap->dm_segs[0].ds_addr + start;
     645           0 :         vd[s].len = len;
     646           0 :         if (!write)
     647           0 :                 vd[s].flags |= VRING_DESC_F_WRITE;
     648           0 :         qe1->qe_next = vd[s].next;
     649             : 
     650           0 :         return 0;
     651             : }
     652             : 
     653             : static void
     654           0 : publish_avail_idx(struct virtio_softc *sc, struct virtqueue *vq)
     655             : {
     656           0 :         vq_sync_aring(sc, vq, BUS_DMASYNC_PREWRITE);
     657             : 
     658           0 :         virtio_membar_producer();
     659           0 :         vq->vq_avail->idx = vq->vq_avail_idx;
     660           0 :         vq_sync_aring(sc, vq, BUS_DMASYNC_POSTWRITE);
     661           0 :         vq->vq_queued = 1;
     662           0 : }
     663             : 
     664             : /*
     665             :  * enqueue_commit: add it to the aring.
     666             :  */
     667             : void
     668           0 : virtio_enqueue_commit(struct virtio_softc *sc, struct virtqueue *vq, int slot,
     669             :     int notifynow)
     670             : {
     671             :         struct vq_entry *qe1;
     672             : 
     673           0 :         if (slot < 0)
     674             :                 goto notify;
     675           0 :         vq_sync_descs(sc, vq, BUS_DMASYNC_PREWRITE);
     676           0 :         qe1 = &vq->vq_entries[slot];
     677           0 :         if (qe1->qe_indirect)
     678           0 :                 vq_sync_indirect(sc, vq, slot, BUS_DMASYNC_PREWRITE);
     679           0 :         vq->vq_avail->ring[(vq->vq_avail_idx++) & vq->vq_mask] = slot;
     680             : 
     681             : notify:
     682           0 :         if (notifynow) {
     683           0 :                 if (vq->vq_owner->sc_features & VIRTIO_F_RING_EVENT_IDX) {
     684           0 :                         uint16_t o = vq->vq_avail->idx;
     685           0 :                         uint16_t n = vq->vq_avail_idx;
     686             :                         uint16_t t;
     687           0 :                         publish_avail_idx(sc, vq);
     688             : 
     689           0 :                         virtio_membar_sync();
     690           0 :                         t = VQ_AVAIL_EVENT(vq) + 1;
     691           0 :                         if ((uint16_t)(n - t) < (uint16_t)(n - o))
     692           0 :                                 sc->sc_ops->kick(sc, vq->vq_index);
     693           0 :                 } else {
     694           0 :                         publish_avail_idx(sc, vq);
     695             : 
     696           0 :                         virtio_membar_sync();
     697           0 :                         if (!(vq->vq_used->flags & VRING_USED_F_NO_NOTIFY))
     698           0 :                                 sc->sc_ops->kick(sc, vq->vq_index);
     699             :                 }
     700             :         }
     701           0 : }
     702             : 
     703             : /*
     704             :  * enqueue_abort: rollback.
     705             :  */
     706             : int
     707           0 : virtio_enqueue_abort(struct virtqueue *vq, int slot)
     708             : {
     709           0 :         struct vq_entry *qe = &vq->vq_entries[slot];
     710             :         struct vring_desc *vd;
     711             :         int s;
     712             : 
     713           0 :         if (qe->qe_next < 0) {
     714           0 :                 vq_free_entry(vq, qe);
     715           0 :                 return 0;
     716             :         }
     717             : 
     718             :         s = slot;
     719           0 :         vd = &vq->vq_desc[0];
     720           0 :         while (vd[s].flags & VRING_DESC_F_NEXT) {
     721           0 :                 s = vd[s].next;
     722           0 :                 vq_free_entry(vq, qe);
     723           0 :                 qe = &vq->vq_entries[s];
     724             :         }
     725           0 :         vq_free_entry(vq, qe);
     726           0 :         return 0;
     727           0 : }
     728             : 
     729             : /*
     730             :  * enqueue_trim: adjust buffer size to given # of segments, a.k.a.
     731             :  * descriptors.
     732             :  */
     733             : void
     734           0 : virtio_enqueue_trim(struct virtqueue *vq, int slot, int nsegs)
     735             : {
     736           0 :         struct vq_entry *qe1 = &vq->vq_entries[slot];
     737           0 :         struct vring_desc *vd = &vq->vq_desc[0];
     738             :         int i;
     739             : 
     740           0 :         if ((vd[slot].flags & VRING_DESC_F_INDIRECT) == 0) {
     741           0 :                 qe1->qe_next = qe1->qe_index;
     742             :                 /*
     743             :                  * N.B.: the vq_entries are ASSUMED to be a contiguous
     744             :                  *       block with slot being the index to the first one.
     745             :                  */
     746           0 :         } else {
     747           0 :                 qe1->qe_next = 0;
     748           0 :                 vd = &vq->vq_desc[qe1->qe_index];
     749           0 :                 vd->len = sizeof(struct vring_desc) * nsegs;
     750           0 :                 vd = qe1->qe_desc_base;
     751             :                 slot = 0;
     752             :         }
     753             : 
     754           0 :         for (i = 0; i < nsegs -1 ; i++) {
     755           0 :                 vd[slot].flags = VRING_DESC_F_NEXT;
     756           0 :                 slot++;
     757             :         }
     758           0 :         vd[slot].flags = 0;
     759           0 : }
     760             : 
     761             : /*
     762             :  * Dequeue a request.
     763             :  */
     764             : /*
     765             :  * dequeue: dequeue a request from uring; dmamap_sync for uring is
     766             :  *          already done in the interrupt handler.
     767             :  */
     768             : int
     769           0 : virtio_dequeue(struct virtio_softc *sc, struct virtqueue *vq,
     770             :     int *slotp, int *lenp)
     771             : {
     772             :         uint16_t slot, usedidx;
     773             :         struct vq_entry *qe;
     774             : 
     775           0 :         if (vq->vq_used_idx == vq->vq_used->idx)
     776           0 :                 return ENOENT;
     777           0 :         usedidx = vq->vq_used_idx++;
     778           0 :         usedidx &= vq->vq_mask;
     779             : 
     780           0 :         virtio_membar_consumer();
     781           0 :         slot = vq->vq_used->ring[usedidx].id;
     782           0 :         qe = &vq->vq_entries[slot];
     783             : 
     784           0 :         if (qe->qe_indirect)
     785           0 :                 vq_sync_indirect(sc, vq, slot, BUS_DMASYNC_POSTWRITE);
     786             : 
     787           0 :         if (slotp)
     788           0 :                 *slotp = slot;
     789           0 :         if (lenp)
     790           0 :                 *lenp = vq->vq_used->ring[usedidx].len;
     791             : 
     792           0 :         return 0;
     793           0 : }
     794             : 
     795             : /*
     796             :  * dequeue_commit: complete dequeue; the slot is recycled for future use.
     797             :  *                 if you forget to call this the slot will be leaked.
     798             :  *
     799             :  *                 Don't call this if you use statically allocated slots
     800             :  *                 and virtio_dequeue_trim().
     801             :  */
     802             : int
     803           0 : virtio_dequeue_commit(struct virtqueue *vq, int slot)
     804             : {
     805           0 :         struct vq_entry *qe = &vq->vq_entries[slot];
     806           0 :         struct vring_desc *vd = &vq->vq_desc[0];
     807             :         int s = slot;
     808             : 
     809           0 :         while (vd[s].flags & VRING_DESC_F_NEXT) {
     810           0 :                 s = vd[s].next;
     811           0 :                 vq_free_entry(vq, qe);
     812           0 :                 qe = &vq->vq_entries[s];
     813             :         }
     814           0 :         vq_free_entry(vq, qe);
     815             : 
     816           0 :         return 0;
     817             : }
     818             : 
     819             : /*
     820             :  * Increase the event index in order to delay interrupts.
     821             :  * Returns 0 on success; returns 1 if the used ring has already advanced
     822             :  * too far, and the caller must process the queue again (otherewise, no
     823             :  * more interrupts will happen).
     824             :  */
     825             : int
     826           0 : virtio_postpone_intr(struct virtqueue *vq, uint16_t nslots)
     827             : {
     828             :         uint16_t        idx;
     829             : 
     830           0 :         idx = vq->vq_used_idx + nslots;
     831             : 
     832             :         /* set the new event index: avail_ring->used_event = idx */
     833           0 :         VQ_USED_EVENT(vq) = idx;
     834           0 :         virtio_membar_sync();
     835             : 
     836           0 :         vq_sync_aring(vq->vq_owner, vq, BUS_DMASYNC_PREWRITE);
     837           0 :         vq->vq_queued++;
     838             : 
     839           0 :         if (nslots < virtio_nused(vq))
     840           0 :                 return 1;
     841             : 
     842           0 :         return 0;
     843           0 : }
     844             : 
     845             : /*
     846             :  * Postpone interrupt until 3/4 of the available descriptors have been
     847             :  * consumed.
     848             :  */
     849             : int
     850           0 : virtio_postpone_intr_smart(struct virtqueue *vq)
     851             : {
     852             :         uint16_t        nslots;
     853             : 
     854           0 :         nslots = (uint16_t)(vq->vq_avail->idx - vq->vq_used_idx) * 3 / 4;
     855             : 
     856           0 :         return virtio_postpone_intr(vq, nslots);
     857             : }
     858             : 
     859             : /*
     860             :  * Postpone interrupt until all of the available descriptors have been
     861             :  * consumed.
     862             :  */
     863             : int
     864           0 : virtio_postpone_intr_far(struct virtqueue *vq)
     865             : {
     866             :         uint16_t        nslots;
     867             : 
     868           0 :         nslots = (uint16_t)(vq->vq_avail->idx - vq->vq_used_idx);
     869             : 
     870           0 :         return virtio_postpone_intr(vq, nslots);
     871             : }
     872             : 
     873             : 
     874             : /*
     875             :  * Start/stop vq interrupt.  No guarantee.
     876             :  */
     877             : void
     878           0 : virtio_stop_vq_intr(struct virtio_softc *sc, struct virtqueue *vq)
     879             : {
     880           0 :         if ((sc->sc_features & VIRTIO_F_RING_EVENT_IDX)) {
     881             :                 /*
     882             :                  * No way to disable the interrupt completely with
     883             :                  * RingEventIdx. Instead advance used_event by half
     884             :                  * the possible value. This won't happen soon and
     885             :                  * is far enough in the past to not trigger a spurios
     886             :                  * interrupt.
     887             :                  */
     888           0 :                 VQ_USED_EVENT(vq) = vq->vq_used_idx + 0x8000;
     889           0 :         } else {
     890           0 :                 vq->vq_avail->flags |= VRING_AVAIL_F_NO_INTERRUPT;
     891             :         }
     892           0 :         vq_sync_aring(sc, vq, BUS_DMASYNC_PREWRITE);
     893           0 :         vq->vq_queued++;
     894           0 : }
     895             : 
     896             : int
     897           0 : virtio_start_vq_intr(struct virtio_softc *sc, struct virtqueue *vq)
     898             : {
     899             :         /*
     900             :          * If event index feature is negotiated, enabling
     901             :          * interrupts is done through setting the latest
     902             :          * consumed index in the used_event field
     903             :          */
     904           0 :         if (sc->sc_features & VIRTIO_F_RING_EVENT_IDX)
     905           0 :                 VQ_USED_EVENT(vq) = vq->vq_used_idx;
     906             :         else
     907           0 :                 vq->vq_avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT;
     908             : 
     909           0 :         virtio_membar_sync();
     910             : 
     911           0 :         vq_sync_aring(sc, vq, BUS_DMASYNC_PREWRITE);
     912           0 :         vq->vq_queued++;
     913             : 
     914           0 :         if (vq->vq_used_idx != vq->vq_used->idx)
     915           0 :                 return 1;
     916             : 
     917           0 :         return 0;
     918           0 : }
     919             : 
     920             : /*
     921             :  * Returns a number of slots in the used ring available to
     922             :  * be supplied to the avail ring.
     923             :  */
     924             : int
     925           0 : virtio_nused(struct virtqueue *vq)
     926             : {
     927             :         uint16_t        n;
     928             : 
     929           0 :         n = (uint16_t)(vq->vq_used->idx - vq->vq_used_idx);
     930             :         VIRTIO_ASSERT(n <= vq->vq_num);
     931             : 
     932           0 :         return n;
     933             : }
     934             : 
     935             : #if VIRTIO_DEBUG
     936             : void
     937             : virtio_vq_dump(struct virtqueue *vq)
     938             : {
     939             :         /* Common fields */
     940             :         printf(" + vq num: %d\n", vq->vq_num);
     941             :         printf(" + vq mask: 0x%X\n", vq->vq_mask);
     942             :         printf(" + vq index: %d\n", vq->vq_index);
     943             :         printf(" + vq used idx: %d\n", vq->vq_used_idx);
     944             :         printf(" + vq avail idx: %d\n", vq->vq_avail_idx);
     945             :         printf(" + vq queued: %d\n",vq->vq_queued);
     946             :         /* Avail ring fields */
     947             :         printf(" + avail flags: 0x%X\n", vq->vq_avail->flags);
     948             :         printf(" + avail idx: %d\n", vq->vq_avail->idx);
     949             :         printf(" + avail event: %d\n", VQ_AVAIL_EVENT(vq));
     950             :         /* Used ring fields */
     951             :         printf(" + used flags: 0x%X\n",vq->vq_used->flags);
     952             :         printf(" + used idx: %d\n",vq->vq_used->idx);
     953             :         printf(" + used event: %d\n", VQ_USED_EVENT(vq));
     954             :         printf(" +++++++++++++++++++++++++++\n");
     955             : }
     956             : #endif

Generated by: LCOV version 1.13