Line data Source code
1 : /* $OpenBSD: vioscsi.c,v 1.12 2017/09/08 05:36:52 deraadt Exp $ */
2 : /*
3 : * Copyright (c) 2013 Google Inc.
4 : *
5 : * Permission to use, copy, modify, and distribute this software for any
6 : * purpose with or without fee is hereby granted, provided that the above
7 : * copyright notice and this permission notice appear in all copies.
8 : *
9 : * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 : * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 : * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 : * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 : * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 : * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 : * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16 : */
17 :
18 : #include <sys/param.h>
19 : #include <sys/systm.h>
20 : #include <sys/device.h>
21 : #include <sys/mutex.h>
22 :
23 : #include <machine/bus.h>
24 : #include <machine/intr.h>
25 :
26 : #include <dev/pv/vioscsireg.h>
27 : #include <dev/pv/virtiovar.h>
28 :
29 : #include <scsi/scsi_all.h>
30 : #include <scsi/scsiconf.h>
31 :
32 : enum { vioscsi_debug = 0 };
33 : #define DPRINTF(f...) do { if (vioscsi_debug) printf(f); } while (0)
34 :
35 : #define MAX_XFER MAX(MAXPHYS,MAXBSIZE)
36 : /* Number of DMA segments for buffers that the device must support */
37 : #define SEG_MAX (MAX_XFER/PAGE_SIZE + 1)
38 : /* In the virtqueue, we need space for header and footer, too */
39 : #define ALLOC_SEGS (SEG_MAX + 2)
40 :
41 : struct vioscsi_req {
42 : struct virtio_scsi_req_hdr vr_req;
43 : struct virtio_scsi_res_hdr vr_res;
44 : struct scsi_xfer *vr_xs;
45 : bus_dmamap_t vr_control;
46 : bus_dmamap_t vr_data;
47 : SLIST_ENTRY(vioscsi_req) vr_list;
48 : int vr_qe_index;
49 : };
50 :
51 : struct vioscsi_softc {
52 : struct device sc_dev;
53 : struct scsi_link sc_link;
54 : struct scsibus *sc_scsibus;
55 : struct scsi_iopool sc_iopool;
56 : struct mutex sc_vr_mtx;
57 :
58 : struct virtqueue sc_vqs[3];
59 : struct vioscsi_req *sc_reqs;
60 : bus_dma_segment_t sc_reqs_segs[1];
61 : SLIST_HEAD(, vioscsi_req) sc_freelist;
62 : };
63 :
64 : int vioscsi_match(struct device *, void *, void *);
65 : void vioscsi_attach(struct device *, struct device *, void *);
66 :
67 : int vioscsi_alloc_reqs(struct vioscsi_softc *,
68 : struct virtio_softc *, int);
69 : void vioscsi_scsi_cmd(struct scsi_xfer *);
70 : int vioscsi_vq_done(struct virtqueue *);
71 : void vioscsi_req_done(struct vioscsi_softc *, struct virtio_softc *,
72 : struct vioscsi_req *);
73 : void *vioscsi_req_get(void *);
74 : void vioscsi_req_put(void *, void *);
75 :
76 : struct cfattach vioscsi_ca = {
77 : sizeof(struct vioscsi_softc),
78 : vioscsi_match,
79 : vioscsi_attach,
80 : };
81 :
82 : struct cfdriver vioscsi_cd = {
83 : NULL,
84 : "vioscsi",
85 : DV_DULL,
86 : };
87 :
88 : struct scsi_adapter vioscsi_switch = {
89 : vioscsi_scsi_cmd,
90 : scsi_minphys,
91 : };
92 :
93 : const char *const vioscsi_vq_names[] = {
94 : "control",
95 : "event",
96 : "request",
97 : };
98 :
99 : int
100 0 : vioscsi_match(struct device *parent, void *self, void *aux)
101 : {
102 0 : struct virtio_softc *va = (struct virtio_softc *)aux;
103 :
104 0 : if (va->sc_childdevid == PCI_PRODUCT_VIRTIO_SCSI)
105 0 : return (1);
106 0 : return (0);
107 0 : }
108 :
109 : void
110 0 : vioscsi_attach(struct device *parent, struct device *self, void *aux)
111 : {
112 0 : struct virtio_softc *vsc = (struct virtio_softc *)parent;
113 0 : struct vioscsi_softc *sc = (struct vioscsi_softc *)self;
114 0 : struct scsibus_attach_args saa;
115 : int i, rv;
116 :
117 0 : if (vsc->sc_child != NULL) {
118 0 : printf(": parent already has a child\n");
119 0 : return;
120 : }
121 0 : vsc->sc_child = &sc->sc_dev;
122 0 : vsc->sc_ipl = IPL_BIO;
123 :
124 : // TODO(matthew): Negotiate hotplug.
125 :
126 0 : vsc->sc_vqs = sc->sc_vqs;
127 0 : vsc->sc_nvqs = nitems(sc->sc_vqs);
128 :
129 0 : virtio_negotiate_features(vsc, 0, NULL);
130 0 : uint32_t cmd_per_lun = virtio_read_device_config_4(vsc,
131 : VIRTIO_SCSI_CONFIG_CMD_PER_LUN);
132 0 : uint32_t seg_max = virtio_read_device_config_4(vsc,
133 : VIRTIO_SCSI_CONFIG_SEG_MAX);
134 0 : uint16_t max_target = virtio_read_device_config_2(vsc,
135 : VIRTIO_SCSI_CONFIG_MAX_TARGET);
136 :
137 0 : if (seg_max < SEG_MAX) {
138 0 : printf("\nMax number of segments %d too small\n", seg_max);
139 0 : goto err;
140 : }
141 :
142 0 : for (i = 0; i < nitems(sc->sc_vqs); i++) {
143 0 : rv = virtio_alloc_vq(vsc, &sc->sc_vqs[i], i, MAX_XFER,
144 0 : ALLOC_SEGS, vioscsi_vq_names[i]);
145 0 : if (rv) {
146 0 : printf(": failed to allocate virtqueue %d\n", i);
147 0 : goto err;
148 : }
149 0 : sc->sc_vqs[i].vq_done = vioscsi_vq_done;
150 : }
151 :
152 0 : int qsize = sc->sc_vqs[2].vq_num;
153 0 : printf(": qsize %d\n", qsize);
154 :
155 0 : SLIST_INIT(&sc->sc_freelist);
156 0 : mtx_init(&sc->sc_vr_mtx, IPL_BIO);
157 0 : scsi_iopool_init(&sc->sc_iopool, sc, vioscsi_req_get, vioscsi_req_put);
158 :
159 0 : sc->sc_link.openings = vioscsi_alloc_reqs(sc, vsc, qsize);
160 0 : if (sc->sc_link.openings == 0) {
161 0 : printf("\nCan't alloc reqs\n");
162 0 : goto err;
163 0 : } else if (sc->sc_link.openings > cmd_per_lun)
164 0 : sc->sc_link.openings = cmd_per_lun;
165 :
166 0 : sc->sc_link.adapter = &vioscsi_switch;
167 0 : sc->sc_link.adapter_softc = sc;
168 0 : sc->sc_link.adapter_target = max_target;
169 0 : sc->sc_link.adapter_buswidth = max_target;
170 0 : sc->sc_link.pool = &sc->sc_iopool;
171 :
172 0 : bzero(&saa, sizeof(saa));
173 0 : saa.saa_sc_link = &sc->sc_link;
174 :
175 0 : sc->sc_scsibus = (struct scsibus *)config_found(self, &saa, scsiprint);
176 0 : return;
177 :
178 : err:
179 0 : vsc->sc_child = VIRTIO_CHILD_ERROR;
180 0 : return;
181 0 : }
182 :
183 : void
184 0 : vioscsi_scsi_cmd(struct scsi_xfer *xs)
185 : {
186 0 : struct vioscsi_softc *sc = xs->sc_link->adapter_softc;
187 0 : struct virtio_softc *vsc = (struct virtio_softc *)sc->sc_dev.dv_parent;
188 0 : struct vioscsi_req *vr = xs->io;
189 0 : struct virtio_scsi_req_hdr *req = &vr->vr_req;
190 0 : struct virtqueue *vq = &sc->sc_vqs[2];
191 0 : int slot = vr->vr_qe_index;
192 :
193 : DPRINTF("vioscsi_scsi_cmd: enter\n");
194 :
195 : // TODO(matthew): Support bidirectional SCSI commands?
196 0 : if ((xs->flags & (SCSI_DATA_IN | SCSI_DATA_OUT))
197 0 : == (SCSI_DATA_IN | SCSI_DATA_OUT)) {
198 : goto stuffup;
199 : }
200 :
201 0 : vr->vr_xs = xs;
202 :
203 : /*
204 : * "The only supported format for the LUN field is: first byte set to
205 : * 1, second byte set to target, third and fourth byte representing a
206 : * single level LUN structure, followed by four zero bytes."
207 : */
208 0 : if (xs->sc_link->target >= 256 || xs->sc_link->lun >= 16384)
209 : goto stuffup;
210 0 : req->lun[0] = 1;
211 0 : req->lun[1] = xs->sc_link->target;
212 0 : req->lun[2] = 0x40 | (xs->sc_link->lun >> 8);
213 0 : req->lun[3] = xs->sc_link->lun;
214 0 : memset(req->lun + 4, 0, 4);
215 :
216 0 : if ((size_t)xs->cmdlen > sizeof(req->cdb))
217 : goto stuffup;
218 0 : memset(req->cdb, 0, sizeof(req->cdb));
219 0 : memcpy(req->cdb, xs->cmd, xs->cmdlen);
220 :
221 0 : int isread = !!(xs->flags & SCSI_DATA_IN);
222 :
223 : int nsegs = 2;
224 0 : if (xs->flags & (SCSI_DATA_IN | SCSI_DATA_OUT)) {
225 0 : if (bus_dmamap_load(vsc->sc_dmat, vr->vr_data,
226 : xs->data, xs->datalen, NULL,
227 : ((isread ? BUS_DMA_READ : BUS_DMA_WRITE) |
228 : BUS_DMA_NOWAIT)))
229 : goto stuffup;
230 0 : nsegs += vr->vr_data->dm_nsegs;
231 0 : }
232 :
233 : /*
234 : * Adjust reservation to the number needed, or virtio gets upset. Note
235 : * that it may trim UP if 'xs' is being recycled w/o getting a new
236 : * reservation!
237 : */
238 0 : int s = splbio();
239 0 : virtio_enqueue_trim(vq, slot, nsegs);
240 0 : splx(s);
241 :
242 0 : bus_dmamap_sync(vsc->sc_dmat, vr->vr_control,
243 : offsetof(struct vioscsi_req, vr_req),
244 : sizeof(struct virtio_scsi_req_hdr),
245 : BUS_DMASYNC_PREWRITE);
246 0 : bus_dmamap_sync(vsc->sc_dmat, vr->vr_control,
247 : offsetof(struct vioscsi_req, vr_res),
248 : sizeof(struct virtio_scsi_res_hdr),
249 : BUS_DMASYNC_PREREAD);
250 0 : if (xs->flags & (SCSI_DATA_IN | SCSI_DATA_OUT))
251 0 : bus_dmamap_sync(vsc->sc_dmat, vr->vr_data, 0, xs->datalen,
252 : isread ? BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
253 :
254 0 : s = splbio();
255 0 : virtio_enqueue_p(vq, slot, vr->vr_control,
256 : offsetof(struct vioscsi_req, vr_req),
257 : sizeof(struct virtio_scsi_req_hdr),
258 : 1);
259 0 : if (xs->flags & SCSI_DATA_OUT)
260 0 : virtio_enqueue(vq, slot, vr->vr_data, 1);
261 0 : virtio_enqueue_p(vq, slot, vr->vr_control,
262 : offsetof(struct vioscsi_req, vr_res),
263 : sizeof(struct virtio_scsi_res_hdr),
264 : 0);
265 0 : if (xs->flags & SCSI_DATA_IN)
266 0 : virtio_enqueue(vq, slot, vr->vr_data, 0);
267 :
268 0 : virtio_enqueue_commit(vsc, vq, slot, 1);
269 :
270 0 : if (ISSET(xs->flags, SCSI_POLL)) {
271 : DPRINTF("vioscsi_scsi_cmd: polling...\n");
272 : int timeout = 1000;
273 0 : do {
274 0 : virtio_poll_intr(vsc);
275 0 : if (vr->vr_xs != xs)
276 : break;
277 0 : delay(1000);
278 0 : } while (--timeout > 0);
279 0 : if (vr->vr_xs == xs) {
280 : // TODO(matthew): Abort the request.
281 0 : xs->error = XS_TIMEOUT;
282 0 : xs->resid = xs->datalen;
283 : DPRINTF("vioscsi_scsi_cmd: polling timeout\n");
284 0 : scsi_done(xs);
285 0 : }
286 : DPRINTF("vioscsi_scsi_cmd: done (timeout=%d)\n", timeout);
287 0 : }
288 0 : splx(s);
289 0 : return;
290 :
291 : stuffup:
292 0 : xs->error = XS_DRIVER_STUFFUP;
293 0 : xs->resid = xs->datalen;
294 : DPRINTF("vioscsi_scsi_cmd: stuffup\n");
295 0 : scsi_done(xs);
296 0 : }
297 :
298 : void
299 0 : vioscsi_req_done(struct vioscsi_softc *sc, struct virtio_softc *vsc,
300 : struct vioscsi_req *vr)
301 : {
302 0 : struct scsi_xfer *xs = vr->vr_xs;
303 : DPRINTF("vioscsi_req_done: enter vr: %p xs: %p\n", vr, xs);
304 :
305 0 : int isread = !!(xs->flags & SCSI_DATA_IN);
306 0 : bus_dmamap_sync(vsc->sc_dmat, vr->vr_control,
307 : offsetof(struct vioscsi_req, vr_req),
308 : sizeof(struct virtio_scsi_req_hdr),
309 : BUS_DMASYNC_POSTWRITE);
310 0 : bus_dmamap_sync(vsc->sc_dmat, vr->vr_control,
311 : offsetof(struct vioscsi_req, vr_res),
312 : sizeof(struct virtio_scsi_res_hdr),
313 : BUS_DMASYNC_POSTREAD);
314 0 : if (xs->flags & (SCSI_DATA_IN | SCSI_DATA_OUT)) {
315 0 : bus_dmamap_sync(vsc->sc_dmat, vr->vr_data, 0, xs->datalen,
316 : isread ? BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
317 0 : bus_dmamap_unload(vsc->sc_dmat, vr->vr_data);
318 0 : }
319 :
320 0 : if (vr->vr_res.response != VIRTIO_SCSI_S_OK) {
321 0 : xs->error = XS_DRIVER_STUFFUP;
322 0 : xs->resid = xs->datalen;
323 : DPRINTF("vioscsi_req_done: stuffup: %d\n", vr->vr_res.response);
324 0 : goto done;
325 : }
326 :
327 0 : size_t sense_len = MIN(sizeof(xs->sense), vr->vr_res.sense_len);
328 0 : memcpy(&xs->sense, vr->vr_res.sense, sense_len);
329 0 : xs->error = (sense_len == 0) ? XS_NOERROR : XS_SENSE;
330 :
331 0 : xs->status = vr->vr_res.status;
332 0 : xs->resid = vr->vr_res.residual;
333 :
334 0 : DPRINTF("vioscsi_req_done: done %d, %d, %zd\n",
335 : xs->error, xs->status, xs->resid);
336 :
337 : done:
338 0 : vr->vr_xs = NULL;
339 0 : scsi_done(xs);
340 0 : }
341 :
342 : int
343 0 : vioscsi_vq_done(struct virtqueue *vq)
344 : {
345 0 : struct virtio_softc *vsc = vq->vq_owner;
346 0 : struct vioscsi_softc *sc = (struct vioscsi_softc *)vsc->sc_child;
347 : struct vq_entry *qe;
348 : struct vioscsi_req *vr;
349 : int ret = 0;
350 :
351 : DPRINTF("vioscsi_vq_done: enter\n");
352 :
353 0 : for (;;) {
354 0 : int r, s, slot;
355 0 : s = splbio();
356 0 : r = virtio_dequeue(vsc, vq, &slot, NULL);
357 0 : splx(s);
358 0 : if (r != 0)
359 0 : break;
360 :
361 : DPRINTF("vioscsi_vq_done: slot=%d\n", slot);
362 0 : qe = &vq->vq_entries[slot];
363 0 : vr = &sc->sc_reqs[qe->qe_vr_index];
364 0 : vioscsi_req_done(sc, vsc, vr);
365 : ret = 1;
366 0 : }
367 :
368 : DPRINTF("vioscsi_vq_done: exit %d\n", ret);
369 :
370 0 : return (ret);
371 : }
372 :
373 : /*
374 : * vioscso_req_get() provides the SCSI layer with all the
375 : * resources necessary to start an I/O on the device.
376 : *
377 : * Since the size of the I/O is unknown at this time the
378 : * resouces allocated (a.k.a. reserved) must be sufficient
379 : * to allow the maximum possible I/O size.
380 : *
381 : * When the I/O is actually attempted via vioscsi_scsi_cmd()
382 : * excess resources will be returned via virtio_enqueue_trim().
383 : */
384 : void *
385 0 : vioscsi_req_get(void *cookie)
386 : {
387 0 : struct vioscsi_softc *sc = cookie;
388 : struct vioscsi_req *vr = NULL;
389 :
390 0 : mtx_enter(&sc->sc_vr_mtx);
391 0 : vr = SLIST_FIRST(&sc->sc_freelist);
392 0 : if (vr != NULL)
393 0 : SLIST_REMOVE_HEAD(&sc->sc_freelist, vr_list);
394 0 : mtx_leave(&sc->sc_vr_mtx);
395 :
396 : DPRINTF("vioscsi_req_get: %p\n", vr);
397 :
398 0 : return (vr);
399 : }
400 :
401 : void
402 0 : vioscsi_req_put(void *cookie, void *io)
403 : {
404 0 : struct vioscsi_softc *sc = cookie;
405 0 : struct vioscsi_req *vr = io;
406 :
407 : DPRINTF("vioscsi_req_put: %p\n", vr);
408 :
409 0 : mtx_enter(&sc->sc_vr_mtx);
410 : /*
411 : * Do *NOT* call virtio_dequeue_commit()!
412 : *
413 : * Descriptors are permanently associated with the vioscsi_req and
414 : * should not be placed on the free list!
415 : */
416 0 : SLIST_INSERT_HEAD(&sc->sc_freelist, vr, vr_list);
417 0 : mtx_leave(&sc->sc_vr_mtx);
418 0 : }
419 :
420 : int
421 0 : vioscsi_alloc_reqs(struct vioscsi_softc *sc, struct virtio_softc *vsc,
422 : int qsize)
423 : {
424 0 : struct virtqueue *vq = &sc->sc_vqs[2];
425 : struct vioscsi_req *vr;
426 : struct vring_desc *vd;
427 : size_t allocsize;
428 0 : int i, r, nreqs, rsegs, slot;
429 0 : void *vaddr;
430 :
431 0 : if (vq->vq_indirect != NULL)
432 0 : nreqs = qsize;
433 : else
434 0 : nreqs = qsize / ALLOC_SEGS;
435 :
436 0 : allocsize = nreqs * sizeof(struct vioscsi_req);
437 0 : r = bus_dmamem_alloc(vsc->sc_dmat, allocsize, 0, 0,
438 : &sc->sc_reqs_segs[0], 1, &rsegs, BUS_DMA_NOWAIT);
439 0 : if (r != 0) {
440 0 : printf("bus_dmamem_alloc, size %zd, error %d\n",
441 : allocsize, r);
442 0 : return 0;
443 : }
444 0 : r = bus_dmamem_map(vsc->sc_dmat, &sc->sc_reqs_segs[0], 1,
445 : allocsize, (caddr_t *)&vaddr, BUS_DMA_NOWAIT);
446 0 : if (r != 0) {
447 0 : printf("bus_dmamem_map failed, error %d\n", r);
448 0 : bus_dmamem_free(vsc->sc_dmat, &sc->sc_reqs_segs[0], 1);
449 0 : return 0;
450 : }
451 0 : sc->sc_reqs = vaddr;
452 0 : memset(vaddr, 0, allocsize);
453 :
454 0 : for (i = 0; i < nreqs; i++) {
455 : /*
456 : * Assign descriptors and create the DMA maps for each
457 : * allocated request.
458 : */
459 0 : vr = &sc->sc_reqs[i];
460 0 : r = virtio_enqueue_prep(vq, &slot);
461 0 : if (r == 0)
462 0 : r = virtio_enqueue_reserve(vq, slot, ALLOC_SEGS);
463 0 : if (r != 0)
464 0 : return i;
465 :
466 0 : if (vq->vq_indirect == NULL) {
467 : /*
468 : * The reserved slots must be a contiguous block
469 : * starting at vq_desc[slot].
470 : */
471 0 : vd = &vq->vq_desc[slot];
472 0 : for (r = 0; r < ALLOC_SEGS - 1; r++) {
473 : DPRINTF("vd[%d].next = %d should be %d\n",
474 : r, vd[r].next, (slot + r + 1));
475 0 : if (vd[r].next != (slot + r + 1))
476 0 : return i;
477 : }
478 0 : if (r == (ALLOC_SEGS -1) && vd[r].next != 0)
479 0 : return i;
480 : DPRINTF("Reserved slots are contiguous as required!\n");
481 : }
482 :
483 0 : vr->vr_qe_index = slot;
484 0 : vr->vr_req.id = slot;
485 0 : vr->vr_req.task_attr = VIRTIO_SCSI_S_SIMPLE;
486 0 : vq->vq_entries[slot].qe_vr_index = i;
487 :
488 0 : r = bus_dmamap_create(vsc->sc_dmat,
489 : offsetof(struct vioscsi_req, vr_xs), 1,
490 : offsetof(struct vioscsi_req, vr_xs), 0,
491 : BUS_DMA_NOWAIT|BUS_DMA_ALLOCNOW, &vr->vr_control);
492 0 : if (r != 0) {
493 0 : printf("bus_dmamap_create vr_control failed, error %d\n", r);
494 0 : return i;
495 : }
496 0 : r = bus_dmamap_create(vsc->sc_dmat, MAX_XFER, SEG_MAX,
497 : MAX_XFER, 0, BUS_DMA_NOWAIT|BUS_DMA_ALLOCNOW, &vr->vr_data);
498 0 : if (r != 0) {
499 0 : printf("bus_dmamap_create vr_data failed, error %d\n", r );
500 0 : return i;
501 : }
502 0 : r = bus_dmamap_load(vsc->sc_dmat, vr->vr_control,
503 : vr, offsetof(struct vioscsi_req, vr_xs), NULL,
504 : BUS_DMA_NOWAIT);
505 0 : if (r != 0) {
506 0 : printf("bus_dmamap_load vr_control failed, error %d\n", r );
507 0 : return i;
508 : }
509 :
510 0 : SLIST_INSERT_HEAD(&sc->sc_freelist, vr, vr_list);
511 : }
512 :
513 0 : return nreqs;
514 0 : }
|