Line data Source code
1 : /* $OpenBSD: vioblk.c,v 1.9 2017/08/10 18:06:58 reyk Exp $ */
2 :
3 : /*
4 : * Copyright (c) 2012 Stefan Fritsch.
5 : * Copyright (c) 2010 Minoura Makoto.
6 : * Copyright (c) 1998, 2001 Manuel Bouyer.
7 : * All rights reserved.
8 : *
9 : * This code is based in part on the NetBSD ld_virtio driver and the
10 : * OpenBSD vdsk driver.
11 : *
12 : * Redistribution and use in source and binary forms, with or without
13 : * modification, are permitted provided that the following conditions
14 : * are met:
15 : * 1. Redistributions of source code must retain the above copyright
16 : * notice, this list of conditions and the following disclaimer.
17 : * 2. Redistributions in binary form must reproduce the above copyright
18 : * notice, this list of conditions and the following disclaimer in the
19 : * documentation and/or other materials provided with the distribution.
20 : *
21 : * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
22 : * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
23 : * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
24 : * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
25 : * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
26 : * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 : * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 : * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 : * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
30 : * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 : */
32 :
33 : /*
34 : * Copyright (c) 2009, 2011 Mark Kettenis
35 : *
36 : * Permission to use, copy, modify, and distribute this software for any
37 : * purpose with or without fee is hereby granted, provided that the above
38 : * copyright notice and this permission notice appear in all copies.
39 : *
40 : * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
41 : * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
42 : * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
43 : * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
44 : * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
45 : * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
46 : * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
47 : */
48 :
49 : #include <sys/param.h>
50 : #include <sys/systm.h>
51 : #include <sys/kernel.h>
52 : #include <machine/bus.h>
53 :
54 : #include <sys/device.h>
55 : #include <sys/stat.h>
56 : #include <sys/buf.h>
57 : #include <sys/mutex.h>
58 : #include <dev/pv/virtioreg.h>
59 : #include <dev/pv/virtiovar.h>
60 : #include <dev/pv/vioblkreg.h>
61 :
62 : #include <scsi/scsi_all.h>
63 : #include <scsi/scsi_disk.h>
64 : #include <scsi/scsiconf.h>
65 :
66 : #define VIOBLK_DONE -1
67 :
68 : #define MAX_XFER MAX(MAXPHYS,MAXBSIZE)
69 : /* Number of DMA segments for buffers that the device must support */
70 : #define SEG_MAX (MAX_XFER/PAGE_SIZE + 1)
71 : /* In the virtqueue, we need space for header and footer, too */
72 : #define ALLOC_SEGS (SEG_MAX + 2)
73 :
74 : struct virtio_feature_name vioblk_feature_names[] = {
75 : { VIRTIO_BLK_F_BARRIER, "Barrier" },
76 : { VIRTIO_BLK_F_SIZE_MAX, "SizeMax" },
77 : { VIRTIO_BLK_F_SEG_MAX, "SegMax" },
78 : { VIRTIO_BLK_F_GEOMETRY, "Geometry" },
79 : { VIRTIO_BLK_F_RO, "RO" },
80 : { VIRTIO_BLK_F_BLK_SIZE, "BlkSize" },
81 : { VIRTIO_BLK_F_SCSI, "SCSI" },
82 : { VIRTIO_BLK_F_FLUSH, "Flush" },
83 : { VIRTIO_BLK_F_TOPOLOGY, "Topology" },
84 : { 0, NULL }
85 : };
86 :
87 : struct virtio_blk_req {
88 : struct virtio_blk_req_hdr vr_hdr;
89 : uint8_t vr_status;
90 : #define VR_DMA_END offsetof(struct virtio_blk_req, vr_qe_index)
91 : int16_t vr_qe_index;
92 : int vr_len;
93 : struct scsi_xfer *vr_xs;
94 : bus_dmamap_t vr_cmdsts;
95 : bus_dmamap_t vr_payload;
96 : SLIST_ENTRY(virtio_blk_req) vr_list;
97 : };
98 :
99 : struct vioblk_softc {
100 : struct device sc_dev;
101 : struct virtio_softc *sc_virtio;
102 :
103 : struct virtqueue sc_vq[1];
104 : struct virtio_blk_req *sc_reqs;
105 : bus_dma_segment_t sc_reqs_segs[1];
106 :
107 : struct scsi_adapter sc_switch;
108 : struct scsi_link sc_link;
109 : struct scsi_iopool sc_iopool;
110 : struct mutex sc_vr_mtx;
111 : SLIST_HEAD(, virtio_blk_req) sc_freelist;
112 :
113 : int sc_notify_on_empty;
114 :
115 : uint32_t sc_queued;
116 :
117 : uint64_t sc_capacity;
118 : };
119 :
120 : int vioblk_match(struct device *, void *, void *);
121 : void vioblk_attach(struct device *, struct device *, void *);
122 : int vioblk_alloc_reqs(struct vioblk_softc *, int);
123 : int vioblk_vq_done(struct virtqueue *);
124 : void vioblk_vq_done1(struct vioblk_softc *, struct virtio_softc *,
125 : struct virtqueue *, int);
126 : void vioblk_reset(struct vioblk_softc *);
127 :
128 : void vioblk_scsi_cmd(struct scsi_xfer *);
129 : int vioblk_dev_probe(struct scsi_link *);
130 : void vioblk_dev_free(struct scsi_link *);
131 :
132 : void *vioblk_req_get(void *);
133 : void vioblk_req_put(void *, void *);
134 :
135 : void vioblk_scsi_inq(struct scsi_xfer *);
136 : void vioblk_scsi_capacity(struct scsi_xfer *);
137 : void vioblk_scsi_capacity16(struct scsi_xfer *);
138 : void vioblk_scsi_done(struct scsi_xfer *, int);
139 :
140 : struct cfattach vioblk_ca = {
141 : sizeof(struct vioblk_softc),
142 : vioblk_match,
143 : vioblk_attach,
144 : NULL
145 : };
146 :
147 : struct cfdriver vioblk_cd = {
148 : NULL, "vioblk", DV_DULL
149 : };
150 :
151 :
152 0 : int vioblk_match(struct device *parent, void *match, void *aux)
153 : {
154 0 : struct virtio_softc *va = aux;
155 0 : if (va->sc_childdevid == PCI_PRODUCT_VIRTIO_BLOCK)
156 0 : return 1;
157 0 : return 0;
158 0 : }
159 :
160 : #define DNPRINTF(n,x...) \
161 : do { if (VIRTIO_DEBUG >= n) printf(x); } while(0)
162 :
163 : void
164 0 : vioblk_attach(struct device *parent, struct device *self, void *aux)
165 : {
166 0 : struct vioblk_softc *sc = (struct vioblk_softc *)self;
167 0 : struct virtio_softc *vsc = (struct virtio_softc *)parent;
168 0 : struct scsibus_attach_args saa;
169 : uint32_t features;
170 : int qsize;
171 :
172 0 : vsc->sc_vqs = &sc->sc_vq[0];
173 0 : vsc->sc_nvqs = 1;
174 0 : vsc->sc_config_change = 0;
175 0 : if (vsc->sc_child)
176 0 : panic("already attached to something else");
177 0 : vsc->sc_child = self;
178 0 : vsc->sc_ipl = IPL_BIO;
179 0 : sc->sc_virtio = vsc;
180 :
181 0 : features = virtio_negotiate_features(vsc,
182 : (VIRTIO_BLK_F_RO | VIRTIO_F_NOTIFY_ON_EMPTY |
183 : VIRTIO_BLK_F_SIZE_MAX | VIRTIO_BLK_F_SEG_MAX |
184 : VIRTIO_BLK_F_FLUSH),
185 : vioblk_feature_names);
186 :
187 :
188 0 : if (features & VIRTIO_BLK_F_SIZE_MAX) {
189 0 : uint32_t size_max = virtio_read_device_config_4(vsc,
190 : VIRTIO_BLK_CONFIG_SIZE_MAX);
191 0 : if (size_max < PAGE_SIZE) {
192 0 : printf("\nMax segment size %u too low\n", size_max);
193 0 : goto err;
194 : }
195 0 : }
196 :
197 0 : if (features & VIRTIO_BLK_F_SEG_MAX) {
198 0 : uint32_t seg_max = virtio_read_device_config_4(vsc,
199 : VIRTIO_BLK_CONFIG_SEG_MAX);
200 0 : if (seg_max < SEG_MAX) {
201 0 : printf("\nMax number of segments %d too small\n",
202 : seg_max);
203 0 : goto err;
204 : }
205 0 : }
206 :
207 0 : sc->sc_capacity = virtio_read_device_config_8(vsc,
208 : VIRTIO_BLK_CONFIG_CAPACITY);
209 :
210 0 : if (virtio_alloc_vq(vsc, &sc->sc_vq[0], 0, MAX_XFER, ALLOC_SEGS,
211 0 : "I/O request") != 0) {
212 0 : printf("\nCan't alloc virtqueue\n");
213 0 : goto err;
214 : }
215 0 : qsize = sc->sc_vq[0].vq_num;
216 0 : sc->sc_vq[0].vq_done = vioblk_vq_done;
217 :
218 0 : if (features & VIRTIO_F_NOTIFY_ON_EMPTY) {
219 0 : virtio_stop_vq_intr(vsc, &sc->sc_vq[0]);
220 0 : sc->sc_notify_on_empty = 1;
221 0 : }
222 : else {
223 0 : sc->sc_notify_on_empty = 0;
224 : }
225 :
226 0 : sc->sc_queued = 0;
227 :
228 0 : sc->sc_switch.scsi_cmd = vioblk_scsi_cmd;
229 0 : sc->sc_switch.scsi_minphys = scsi_minphys;
230 0 : sc->sc_switch.dev_probe = vioblk_dev_probe;
231 0 : sc->sc_switch.dev_free = vioblk_dev_free;
232 :
233 0 : SLIST_INIT(&sc->sc_freelist);
234 0 : mtx_init(&sc->sc_vr_mtx, IPL_BIO);
235 0 : scsi_iopool_init(&sc->sc_iopool, sc, vioblk_req_get, vioblk_req_put);
236 :
237 0 : sc->sc_link.openings = vioblk_alloc_reqs(sc, qsize);
238 0 : if (sc->sc_link.openings == 0) {
239 0 : printf("\nCan't alloc reqs\n");
240 0 : goto err;
241 : }
242 :
243 0 : sc->sc_link.adapter = &sc->sc_switch;
244 0 : sc->sc_link.pool = &sc->sc_iopool;
245 0 : sc->sc_link.adapter_softc = self;
246 0 : sc->sc_link.adapter_buswidth = 2;
247 0 : sc->sc_link.luns = 1;
248 0 : sc->sc_link.adapter_target = 2;
249 : DNPRINTF(1, "%s: qsize: %d\n", __func__, qsize);
250 0 : if (features & VIRTIO_BLK_F_RO)
251 0 : sc->sc_link.flags |= SDEV_READONLY;
252 :
253 0 : bzero(&saa, sizeof(saa));
254 0 : saa.saa_sc_link = &sc->sc_link;
255 0 : printf("\n");
256 0 : config_found(self, &saa, scsiprint);
257 :
258 0 : return;
259 : err:
260 0 : vsc->sc_child = VIRTIO_CHILD_ERROR;
261 0 : return;
262 0 : }
263 :
264 : /*
265 : * vioblk_req_get() provides the SCSI layer with all the
266 : * resources necessary to start an I/O on the device.
267 : *
268 : * Since the size of the I/O is unknown at this time the
269 : * resouces allocated (a.k.a. reserved) must be sufficient
270 : * to allow the maximum possible I/O size.
271 : *
272 : * When the I/O is actually attempted via vioblk_scsi_cmd()
273 : * excess resources will be returned via virtio_enqueue_trim().
274 : */
275 : void *
276 0 : vioblk_req_get(void *cookie)
277 : {
278 0 : struct vioblk_softc *sc = cookie;
279 : struct virtio_blk_req *vr = NULL;
280 :
281 0 : mtx_enter(&sc->sc_vr_mtx);
282 0 : vr = SLIST_FIRST(&sc->sc_freelist);
283 0 : if (vr != NULL)
284 0 : SLIST_REMOVE_HEAD(&sc->sc_freelist, vr_list);
285 0 : mtx_leave(&sc->sc_vr_mtx);
286 :
287 : DNPRINTF(2, "%s: %p\n", __func__, vr);
288 :
289 0 : return vr;
290 : }
291 :
292 : void
293 0 : vioblk_req_put(void *cookie, void *io)
294 : {
295 0 : struct vioblk_softc *sc = cookie;
296 0 : struct virtio_blk_req *vr = io;
297 :
298 : DNPRINTF(2, "%s: %p\n", __func__, vr);
299 :
300 0 : mtx_enter(&sc->sc_vr_mtx);
301 : /*
302 : * Do *NOT* call virtio_dequeue_commit()!
303 : *
304 : * Descriptors are permanently associated with the vioscsi_req and
305 : * should not be placed on the free list!
306 : */
307 0 : SLIST_INSERT_HEAD(&sc->sc_freelist, vr, vr_list);
308 0 : mtx_leave(&sc->sc_vr_mtx);
309 0 : }
310 :
311 : int
312 0 : vioblk_vq_done(struct virtqueue *vq)
313 : {
314 0 : struct virtio_softc *vsc = vq->vq_owner;
315 0 : struct vioblk_softc *sc = (struct vioblk_softc *)vsc->sc_child;
316 : struct vq_entry *qe;
317 0 : int slot;
318 : int ret = 0;
319 :
320 0 : if (!sc->sc_notify_on_empty)
321 0 : virtio_stop_vq_intr(vsc, vq);
322 0 : for (;;) {
323 0 : if (virtio_dequeue(vsc, vq, &slot, NULL) != 0) {
324 0 : if (sc->sc_notify_on_empty)
325 : break;
326 0 : virtio_start_vq_intr(vsc, vq);
327 0 : if (virtio_dequeue(vsc, vq, &slot, NULL) != 0)
328 : break;
329 : }
330 0 : qe = &vq->vq_entries[slot];
331 0 : vioblk_vq_done1(sc, vsc, vq, qe->qe_vr_index);
332 : ret = 1;
333 : }
334 0 : return ret;
335 0 : }
336 :
337 : void
338 0 : vioblk_vq_done1(struct vioblk_softc *sc, struct virtio_softc *vsc,
339 : struct virtqueue *vq, int slot)
340 : {
341 0 : struct virtio_blk_req *vr = &sc->sc_reqs[slot];
342 0 : struct scsi_xfer *xs = vr->vr_xs;
343 0 : KASSERT(vr->vr_len != VIOBLK_DONE);
344 0 : bus_dmamap_sync(vsc->sc_dmat, vr->vr_cmdsts, 0,
345 : sizeof(struct virtio_blk_req_hdr), BUS_DMASYNC_POSTWRITE);
346 0 : if (vr->vr_hdr.type != VIRTIO_BLK_T_FLUSH) {
347 0 : bus_dmamap_sync(vsc->sc_dmat, vr->vr_payload, 0, vr->vr_len,
348 : (vr->vr_hdr.type == VIRTIO_BLK_T_IN) ?
349 : BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
350 0 : bus_dmamap_unload(vsc->sc_dmat, vr->vr_payload);
351 0 : }
352 0 : bus_dmamap_sync(vsc->sc_dmat, vr->vr_cmdsts,
353 : sizeof(struct virtio_blk_req_hdr), sizeof(uint8_t),
354 : BUS_DMASYNC_POSTREAD);
355 :
356 :
357 0 : if (vr->vr_status != VIRTIO_BLK_S_OK) {
358 : DNPRINTF(1, "%s: EIO\n", __func__);
359 0 : xs->error = XS_DRIVER_STUFFUP;
360 0 : xs->resid = xs->datalen;
361 0 : } else {
362 0 : xs->error = XS_NOERROR;
363 0 : xs->resid = xs->datalen - vr->vr_len;
364 : }
365 0 : vr->vr_len = VIOBLK_DONE;
366 0 : scsi_done(xs);
367 0 : }
368 :
369 : void
370 0 : vioblk_reset(struct vioblk_softc *sc)
371 : {
372 : int i;
373 :
374 : /* reset device to stop DMA */
375 0 : virtio_reset(sc->sc_virtio);
376 :
377 : /* finish requests that have been completed */
378 0 : vioblk_vq_done(&sc->sc_vq[0]);
379 :
380 : /* abort all remaining requests */
381 0 : for (i = 0; i < sc->sc_link.openings; i++) {
382 0 : struct virtio_blk_req *vr = &sc->sc_reqs[i];
383 0 : struct scsi_xfer *xs = vr->vr_xs;
384 :
385 0 : if (vr->vr_len == VIOBLK_DONE)
386 0 : continue;
387 :
388 0 : xs->error = XS_DRIVER_STUFFUP;
389 0 : xs->resid = xs->datalen;
390 0 : scsi_done(xs);
391 0 : }
392 0 : }
393 :
394 : void
395 0 : vioblk_scsi_cmd(struct scsi_xfer *xs)
396 : {
397 0 : struct vioblk_softc *sc = xs->sc_link->adapter_softc;
398 0 : struct virtqueue *vq = &sc->sc_vq[0];
399 0 : struct virtio_softc *vsc = sc->sc_virtio;
400 : struct virtio_blk_req *vr;
401 : int len, s, timeout, isread, slot, ret, nsegs;
402 : int error = XS_DRIVER_STUFFUP;
403 : struct scsi_rw *rw;
404 : struct scsi_rw_big *rwb;
405 : struct scsi_rw_12 *rw12;
406 : struct scsi_rw_16 *rw16;
407 : u_int64_t lba = 0;
408 : u_int32_t sector_count = 0;
409 : uint8_t operation;
410 :
411 0 : switch (xs->cmd->opcode) {
412 : case READ_BIG:
413 : case READ_COMMAND:
414 : case READ_12:
415 : case READ_16:
416 : operation = VIRTIO_BLK_T_IN;
417 : isread = 1;
418 0 : break;
419 : case WRITE_BIG:
420 : case WRITE_COMMAND:
421 : case WRITE_12:
422 : case WRITE_16:
423 : operation = VIRTIO_BLK_T_OUT;
424 : isread = 0;
425 0 : break;
426 :
427 : case SYNCHRONIZE_CACHE:
428 0 : if ((vsc->sc_features & VIRTIO_BLK_F_FLUSH) == 0) {
429 0 : vioblk_scsi_done(xs, XS_NOERROR);
430 0 : return;
431 : }
432 : operation = VIRTIO_BLK_T_FLUSH;
433 0 : break;
434 :
435 : case INQUIRY:
436 0 : vioblk_scsi_inq(xs);
437 0 : return;
438 : case READ_CAPACITY:
439 0 : vioblk_scsi_capacity(xs);
440 0 : return;
441 : case READ_CAPACITY_16:
442 0 : vioblk_scsi_capacity16(xs);
443 0 : return;
444 :
445 : case TEST_UNIT_READY:
446 : case START_STOP:
447 : case PREVENT_ALLOW:
448 0 : vioblk_scsi_done(xs, XS_NOERROR);
449 0 : return;
450 :
451 : default:
452 0 : printf("%s cmd 0x%02x\n", __func__, xs->cmd->opcode);
453 : case MODE_SENSE:
454 : case MODE_SENSE_BIG:
455 : case REPORT_LUNS:
456 0 : vioblk_scsi_done(xs, XS_DRIVER_STUFFUP);
457 0 : return;
458 : }
459 :
460 : /*
461 : * READ/WRITE/SYNCHRONIZE commands. SYNCHRONIZE CACHE has same
462 : * layout as 10-byte READ/WRITE commands.
463 : */
464 0 : if (xs->cmdlen == 6) {
465 0 : rw = (struct scsi_rw *)xs->cmd;
466 0 : lba = _3btol(rw->addr) & (SRW_TOPADDR << 16 | 0xffff);
467 0 : sector_count = rw->length ? rw->length : 0x100;
468 0 : } else if (xs->cmdlen == 10) {
469 0 : rwb = (struct scsi_rw_big *)xs->cmd;
470 0 : lba = _4btol(rwb->addr);
471 0 : sector_count = _2btol(rwb->length);
472 0 : } else if (xs->cmdlen == 12) {
473 0 : rw12 = (struct scsi_rw_12 *)xs->cmd;
474 0 : lba = _4btol(rw12->addr);
475 0 : sector_count = _4btol(rw12->length);
476 0 : } else if (xs->cmdlen == 16) {
477 0 : rw16 = (struct scsi_rw_16 *)xs->cmd;
478 0 : lba = _8btol(rw16->addr);
479 0 : sector_count = _4btol(rw16->length);
480 0 : }
481 :
482 0 : s = splbio();
483 0 : vr = xs->io;
484 0 : slot = vr->vr_qe_index;
485 0 : if (operation != VIRTIO_BLK_T_FLUSH) {
486 0 : len = MIN(xs->datalen, sector_count * VIRTIO_BLK_SECTOR_SIZE);
487 0 : ret = bus_dmamap_load(vsc->sc_dmat, vr->vr_payload,
488 : xs->data, len, NULL,
489 : ((isread ? BUS_DMA_READ : BUS_DMA_WRITE) |
490 : BUS_DMA_NOWAIT));
491 0 : if (ret) {
492 0 : printf("%s: bus_dmamap_load: %d", __func__, ret);
493 : error = XS_DRIVER_STUFFUP;
494 : goto out_done;
495 : }
496 0 : nsegs = vr->vr_payload->dm_nsegs + 2;
497 0 : } else {
498 : len = 0;
499 : nsegs = 2;
500 : }
501 :
502 : /*
503 : * Adjust reservation to the number needed, or virtio gets upset. Note
504 : * that it may trim UP if 'xs' is being recycled w/o getting a new
505 : * reservation!
506 : */
507 0 : virtio_enqueue_trim(vq, slot, nsegs);
508 :
509 0 : vr->vr_xs = xs;
510 0 : vr->vr_hdr.type = operation;
511 0 : vr->vr_hdr.ioprio = 0;
512 0 : vr->vr_hdr.sector = lba;
513 0 : vr->vr_len = len;
514 :
515 0 : bus_dmamap_sync(vsc->sc_dmat, vr->vr_cmdsts,
516 : 0, sizeof(struct virtio_blk_req_hdr),
517 : BUS_DMASYNC_PREWRITE);
518 0 : if (operation != VIRTIO_BLK_T_FLUSH) {
519 0 : bus_dmamap_sync(vsc->sc_dmat, vr->vr_payload, 0, len,
520 : isread ? BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
521 0 : }
522 0 : bus_dmamap_sync(vsc->sc_dmat, vr->vr_cmdsts,
523 : offsetof(struct virtio_blk_req, vr_status), sizeof(uint8_t),
524 : BUS_DMASYNC_PREREAD);
525 :
526 0 : virtio_enqueue_p(vq, slot, vr->vr_cmdsts, 0,
527 : sizeof(struct virtio_blk_req_hdr), 1);
528 0 : if (operation != VIRTIO_BLK_T_FLUSH)
529 0 : virtio_enqueue(vq, slot, vr->vr_payload, !isread);
530 0 : virtio_enqueue_p(vq, slot, vr->vr_cmdsts,
531 : offsetof(struct virtio_blk_req, vr_status), sizeof(uint8_t), 0);
532 0 : virtio_enqueue_commit(vsc, vq, slot, 1);
533 0 : sc->sc_queued++;
534 :
535 0 : if (!ISSET(xs->flags, SCSI_POLL)) {
536 : /* check if some xfers are done: */
537 0 : if (sc->sc_queued > 1)
538 0 : vioblk_vq_done(vq);
539 0 : splx(s);
540 0 : return;
541 : }
542 :
543 : timeout = 15 * 1000;
544 0 : do {
545 0 : if (virtio_poll_intr(vsc) && vr->vr_len == VIOBLK_DONE)
546 : break;
547 :
548 0 : delay(1000);
549 0 : } while(--timeout > 0);
550 0 : if (timeout <= 0) {
551 : uint32_t features;
552 0 : printf("%s: SCSI_POLL timed out\n", __func__);
553 0 : vioblk_reset(sc);
554 0 : virtio_reinit_start(vsc);
555 0 : features = virtio_negotiate_features(vsc, vsc->sc_features,
556 : NULL);
557 0 : KASSERT(features == vsc->sc_features);
558 0 : }
559 0 : splx(s);
560 0 : return;
561 :
562 : out_done:
563 0 : splx(s);
564 0 : vioblk_scsi_done(xs, error);
565 0 : }
566 :
567 : void
568 0 : vioblk_scsi_inq(struct scsi_xfer *xs)
569 : {
570 0 : struct scsi_inquiry *inq = (struct scsi_inquiry *)xs->cmd;
571 0 : struct scsi_inquiry_data inqd;
572 :
573 0 : if (ISSET(inq->flags, SI_EVPD)) {
574 0 : vioblk_scsi_done(xs, XS_DRIVER_STUFFUP);
575 0 : return;
576 : }
577 :
578 0 : bzero(&inqd, sizeof(inqd));
579 :
580 0 : inqd.device = T_DIRECT;
581 0 : inqd.version = 0x05; /* SPC-3 */
582 0 : inqd.response_format = 2;
583 0 : inqd.additional_length = 32;
584 0 : inqd.flags |= SID_CmdQue;
585 0 : bcopy("VirtIO ", inqd.vendor, sizeof(inqd.vendor));
586 0 : bcopy("Block Device ", inqd.product, sizeof(inqd.product));
587 :
588 0 : bcopy(&inqd, xs->data, MIN(sizeof(inqd), xs->datalen));
589 0 : vioblk_scsi_done(xs, XS_NOERROR);
590 0 : }
591 :
592 : void
593 0 : vioblk_scsi_capacity(struct scsi_xfer *xs)
594 : {
595 0 : struct vioblk_softc *sc = xs->sc_link->adapter_softc;
596 0 : struct scsi_read_cap_data rcd;
597 : uint64_t capacity;
598 :
599 0 : bzero(&rcd, sizeof(rcd));
600 :
601 0 : capacity = sc->sc_capacity - 1;
602 0 : if (capacity > 0xffffffff)
603 : capacity = 0xffffffff;
604 :
605 0 : _lto4b(capacity, rcd.addr);
606 0 : _lto4b(VIRTIO_BLK_SECTOR_SIZE, rcd.length);
607 :
608 0 : bcopy(&rcd, xs->data, MIN(sizeof(rcd), xs->datalen));
609 0 : vioblk_scsi_done(xs, XS_NOERROR);
610 0 : }
611 :
612 : void
613 0 : vioblk_scsi_capacity16(struct scsi_xfer *xs)
614 : {
615 0 : struct vioblk_softc *sc = xs->sc_link->adapter_softc;
616 0 : struct scsi_read_cap_data_16 rcd;
617 :
618 0 : bzero(&rcd, sizeof(rcd));
619 :
620 0 : _lto8b(sc->sc_capacity - 1, rcd.addr);
621 0 : _lto4b(VIRTIO_BLK_SECTOR_SIZE, rcd.length);
622 :
623 0 : bcopy(&rcd, xs->data, MIN(sizeof(rcd), xs->datalen));
624 0 : vioblk_scsi_done(xs, XS_NOERROR);
625 0 : }
626 :
627 : void
628 0 : vioblk_scsi_done(struct scsi_xfer *xs, int error)
629 : {
630 0 : xs->error = error;
631 0 : scsi_done(xs);
632 0 : }
633 :
634 : int
635 0 : vioblk_dev_probe(struct scsi_link *link)
636 : {
637 0 : KASSERT(link->lun == 0);
638 0 : if (link->target == 0)
639 0 : return (0);
640 0 : return (ENODEV);
641 0 : }
642 :
643 : void
644 0 : vioblk_dev_free(struct scsi_link *link)
645 : {
646 0 : printf("%s\n", __func__);
647 0 : }
648 :
649 : int
650 0 : vioblk_alloc_reqs(struct vioblk_softc *sc, int qsize)
651 : {
652 0 : struct virtqueue *vq = &sc->sc_vq[0];
653 : struct vring_desc *vd;
654 0 : int allocsize, nreqs, r, rsegs, slot, i;
655 0 : void *vaddr;
656 :
657 0 : if (vq->vq_indirect != NULL)
658 0 : nreqs = qsize;
659 : else
660 0 : nreqs = qsize / ALLOC_SEGS;
661 :
662 0 : allocsize = sizeof(struct virtio_blk_req) * nreqs;
663 0 : r = bus_dmamem_alloc(sc->sc_virtio->sc_dmat, allocsize, 0, 0,
664 : &sc->sc_reqs_segs[0], 1, &rsegs, BUS_DMA_NOWAIT);
665 0 : if (r != 0) {
666 0 : printf("DMA memory allocation failed, size %d, error %d\n",
667 : allocsize, r);
668 0 : goto err_none;
669 : }
670 0 : r = bus_dmamem_map(sc->sc_virtio->sc_dmat, &sc->sc_reqs_segs[0], 1,
671 : allocsize, (caddr_t *)&vaddr, BUS_DMA_NOWAIT);
672 0 : if (r != 0) {
673 0 : printf("DMA memory map failed, error %d\n", r);
674 0 : goto err_dmamem_alloc;
675 : }
676 0 : sc->sc_reqs = vaddr;
677 0 : memset(vaddr, 0, allocsize);
678 0 : for (i = 0; i < nreqs; i++) {
679 : /*
680 : * Assign descriptors and create the DMA maps for each
681 : * allocated request.
682 : */
683 0 : struct virtio_blk_req *vr = &sc->sc_reqs[i];
684 0 : r = virtio_enqueue_prep(vq, &slot);
685 0 : if (r == 0)
686 0 : r = virtio_enqueue_reserve(vq, slot, ALLOC_SEGS);
687 0 : if (r != 0)
688 0 : return i;
689 :
690 0 : if (vq->vq_indirect == NULL) {
691 : /*
692 : * The reserved slots must be a contiguous block
693 : * starting at vq_desc[slot].
694 : */
695 0 : vd = &vq->vq_desc[slot];
696 0 : for (r = 0; r < ALLOC_SEGS - 1; r++) {
697 : DNPRINTF(2, "%s: vd[%d].next = %d should be "
698 : "%d\n", __func__, r, vd[r].next,
699 : (slot + r + 1));
700 0 : if (vd[r].next != (slot + r + 1))
701 0 : return i;
702 : }
703 0 : if (r == (ALLOC_SEGS -1) && vd[r].next != 0)
704 0 : return i;
705 : DNPRINTF(2, "%s: reserved slots are contiguous "
706 : "(good!)\n", __func__);
707 : }
708 :
709 0 : vr->vr_qe_index = slot;
710 0 : vq->vq_entries[slot].qe_vr_index = i;
711 0 : vr->vr_len = VIOBLK_DONE;
712 :
713 0 : r = bus_dmamap_create(sc->sc_virtio->sc_dmat,
714 : VR_DMA_END, 1, VR_DMA_END, 0,
715 : BUS_DMA_NOWAIT|BUS_DMA_ALLOCNOW, &vr->vr_cmdsts);
716 0 : if (r != 0) {
717 0 : printf("cmd dmamap creation failed, err %d\n", r);
718 : nreqs = i;
719 0 : goto err_reqs;
720 : }
721 0 : r = bus_dmamap_load(sc->sc_virtio->sc_dmat, vr->vr_cmdsts,
722 : &vr->vr_hdr, VR_DMA_END, NULL, BUS_DMA_NOWAIT);
723 0 : if (r != 0) {
724 0 : printf("command dmamap load failed, err %d\n", r);
725 : nreqs = i;
726 0 : goto err_reqs;
727 : }
728 0 : r = bus_dmamap_create(sc->sc_virtio->sc_dmat, MAX_XFER,
729 : SEG_MAX, MAX_XFER, 0, BUS_DMA_NOWAIT|BUS_DMA_ALLOCNOW,
730 : &vr->vr_payload);
731 0 : if (r != 0) {
732 0 : printf("payload dmamap creation failed, err %d\n", r);
733 : nreqs = i;
734 0 : goto err_reqs;
735 : }
736 0 : SLIST_INSERT_HEAD(&sc->sc_freelist, vr, vr_list);
737 0 : }
738 0 : return nreqs;
739 :
740 : err_reqs:
741 0 : for (i = 0; i < nreqs; i++) {
742 0 : struct virtio_blk_req *vr = &sc->sc_reqs[i];
743 0 : if (vr->vr_cmdsts) {
744 0 : bus_dmamap_destroy(sc->sc_virtio->sc_dmat,
745 : vr->vr_cmdsts);
746 0 : vr->vr_cmdsts = 0;
747 0 : }
748 0 : if (vr->vr_payload) {
749 0 : bus_dmamap_destroy(sc->sc_virtio->sc_dmat,
750 : vr->vr_payload);
751 0 : vr->vr_payload = 0;
752 0 : }
753 : }
754 0 : bus_dmamem_unmap(sc->sc_virtio->sc_dmat, (caddr_t)sc->sc_reqs,
755 : allocsize);
756 : err_dmamem_alloc:
757 0 : bus_dmamem_free(sc->sc_virtio->sc_dmat, &sc->sc_reqs_segs[0], 1);
758 : err_none:
759 0 : return 0;
760 0 : }
|