Line data Source code
1 : /* $OpenBSD: xbf.c,v 1.32 2017/07/17 10:30:03 mikeb Exp $ */
2 :
3 : /*
4 : * Copyright (c) 2016, 2017 Mike Belopuhov
5 : * Copyright (c) 2009, 2011 Mark Kettenis
6 : *
7 : * Permission to use, copy, modify, and distribute this software for any
8 : * purpose with or without fee is hereby granted, provided that the above
9 : * copyright notice and this permission notice appear in all copies.
10 : *
11 : * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12 : * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13 : * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14 : * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15 : * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 : * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 : * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18 : */
19 :
20 : #include "bio.h"
21 :
22 : #include <sys/param.h>
23 : #include <sys/systm.h>
24 : #include <sys/atomic.h>
25 : #include <sys/device.h>
26 : #include <sys/kernel.h>
27 : #include <sys/buf.h>
28 : #include <sys/malloc.h>
29 : #include <sys/task.h>
30 :
31 : #include <machine/bus.h>
32 :
33 : #include <dev/pv/xenreg.h>
34 : #include <dev/pv/xenvar.h>
35 :
36 : #include <scsi/scsi_all.h>
37 : #include <scsi/cd.h>
38 : #include <scsi/scsi_disk.h>
39 : #include <scsi/scsiconf.h>
40 :
41 : /* #define XBF_DEBUG */
42 :
43 : #ifdef XBF_DEBUG
44 : #define DPRINTF(x...) printf(x)
45 : #else
46 : #define DPRINTF(x...)
47 : #endif
48 :
49 : #define XBF_OP_READ 0
50 : #define XBF_OP_WRITE 1
51 : #define XBF_OP_BARRIER 2 /* feature-barrier */
52 : #define XBF_OP_FLUSH 3 /* feature-flush-cache */
53 : #define XBF_OP_DISCARD 5 /* feature-discard */
54 : #define XBF_OP_INDIRECT 6 /* feature-max-indirect-segments */
55 :
56 : #define XBF_MAX_SGE 11
57 : #define XBF_MAX_ISGE 8
58 :
59 : #define XBF_SEC_SHIFT 9
60 :
61 : #define XBF_CDROM 1
62 : #define XBF_REMOVABLE 2
63 : #define XBF_READONLY 4
64 :
65 : #define XBF_OK 0
66 : #define XBF_EIO -1 /* generic failure */
67 : #define XBF_EOPNOTSUPP -2 /* only for XBF_OP_BARRIER */
68 :
69 : struct xbf_sge {
70 : uint32_t sge_ref;
71 : uint8_t sge_first;
72 : uint8_t sge_last;
73 : uint16_t sge_pad;
74 : } __packed;
75 :
76 : /* Generic I/O request */
77 : struct xbf_req {
78 : uint8_t req_op;
79 : uint8_t req_nsegs;
80 : uint16_t req_unit;
81 : #ifdef __amd64__
82 : uint32_t req_pad;
83 : #endif
84 : uint64_t req_id;
85 : uint64_t req_sector;
86 : struct xbf_sge req_sgl[XBF_MAX_SGE];
87 : } __packed;
88 :
89 : /* Indirect I/O request */
90 : struct xbf_ireq {
91 : uint8_t req_op;
92 : uint8_t req_iop;
93 : uint16_t req_nsegs;
94 : #ifdef __amd64__
95 : uint32_t req_pad;
96 : #endif
97 : uint64_t req_id;
98 : uint64_t req_sector;
99 : uint16_t req_unit;
100 : uint32_t req_gref[XBF_MAX_ISGE];
101 : #ifdef __i386__
102 : uint64_t req_pad;
103 : #endif
104 : } __packed;
105 :
106 : struct xbf_rsp {
107 : uint64_t rsp_id;
108 : uint8_t rsp_op;
109 : uint8_t rsp_pad1;
110 : int16_t rsp_status;
111 : #ifdef __amd64__
112 : uint32_t rsp_pad2;
113 : #endif
114 : } __packed;
115 :
116 : union xbf_ring_desc {
117 : struct xbf_req xrd_req;
118 : struct xbf_ireq xrd_ireq;
119 : struct xbf_rsp xrd_rsp;
120 : } __packed;
121 :
122 : #define XBF_MIN_RING_SIZE 1
123 : #define XBF_MAX_RING_SIZE 8
124 : #define XBF_MAX_REQS 256 /* must be a power of 2 */
125 :
126 : struct xbf_ring {
127 : volatile uint32_t xr_prod;
128 : volatile uint32_t xr_prod_event;
129 : volatile uint32_t xr_cons;
130 : volatile uint32_t xr_cons_event;
131 : uint32_t xr_reserved[12];
132 : union xbf_ring_desc xr_desc[0];
133 : } __packed;
134 :
135 : struct xbf_dma_mem {
136 : bus_size_t dma_size;
137 : bus_dma_tag_t dma_tag;
138 : bus_dmamap_t dma_map;
139 : bus_dma_segment_t *dma_seg;
140 : int dma_nsegs; /* total amount */
141 : int dma_rsegs; /* used amount */
142 : caddr_t dma_vaddr;
143 : };
144 :
145 : struct xbf_ccb {
146 : struct scsi_xfer *ccb_xfer; /* associated transfer */
147 : bus_dmamap_t ccb_dmap; /* transfer map */
148 : struct xbf_dma_mem ccb_bbuf; /* bounce buffer */
149 : uint32_t ccb_first; /* first descriptor */
150 : uint32_t ccb_last; /* last descriptor */
151 : uint16_t ccb_want; /* expected chunks */
152 : uint16_t ccb_seen; /* completed chunks */
153 : TAILQ_ENTRY(xbf_ccb) ccb_link;
154 : };
155 : TAILQ_HEAD(xbf_ccb_queue, xbf_ccb);
156 :
157 : struct xbf_softc {
158 : struct device sc_dev;
159 : struct device *sc_parent;
160 : char sc_node[XEN_MAX_NODE_LEN];
161 : char sc_backend[XEN_MAX_BACKEND_LEN];
162 : bus_dma_tag_t sc_dmat;
163 : int sc_domid;
164 :
165 : xen_intr_handle_t sc_xih;
166 :
167 : int sc_state;
168 : #define XBF_CONNECTED 4
169 : #define XBF_CLOSING 5
170 :
171 : int sc_caps;
172 : #define XBF_CAP_BARRIER 0x0001
173 : #define XBF_CAP_FLUSH 0x0002
174 :
175 : uint32_t sc_type;
176 : uint32_t sc_unit;
177 : char sc_dtype[16];
178 : char sc_prod[16];
179 :
180 : uint64_t sc_disk_size;
181 : uint32_t sc_block_size;
182 :
183 : /* Ring */
184 : struct xbf_ring *sc_xr;
185 : uint32_t sc_xr_cons;
186 : uint32_t sc_xr_prod;
187 : uint32_t sc_xr_size; /* in pages */
188 : struct xbf_dma_mem sc_xr_dma;
189 : uint32_t sc_xr_ref[XBF_MAX_RING_SIZE];
190 : int sc_xr_ndesc;
191 :
192 : /* Maximum number of blocks that one descriptor may refer to */
193 : int sc_xrd_nblk;
194 :
195 : /* CCBs */
196 : int sc_nccb;
197 : struct xbf_ccb *sc_ccbs;
198 : struct xbf_ccb_queue sc_ccb_fq; /* free queue */
199 : struct xbf_ccb_queue sc_ccb_sq; /* pending requests */
200 : struct mutex sc_ccb_fqlck;
201 : struct mutex sc_ccb_sqlck;
202 :
203 : struct scsi_iopool sc_iopool;
204 : struct scsi_adapter sc_switch;
205 : struct scsi_link sc_link;
206 : struct device *sc_scsibus;
207 : };
208 :
209 : int xbf_match(struct device *, void *, void *);
210 : void xbf_attach(struct device *, struct device *, void *);
211 : int xbf_detach(struct device *, int);
212 :
213 : struct cfdriver xbf_cd = {
214 : NULL, "xbf", DV_DULL
215 : };
216 :
217 : const struct cfattach xbf_ca = {
218 : sizeof(struct xbf_softc), xbf_match, xbf_attach, xbf_detach
219 : };
220 :
221 : void xbf_intr(void *);
222 :
223 : int xbf_load_cmd(struct scsi_xfer *);
224 : int xbf_bounce_cmd(struct scsi_xfer *);
225 : void xbf_reclaim_cmd(struct scsi_xfer *);
226 :
227 : void xbf_scsi_cmd(struct scsi_xfer *);
228 : int xbf_submit_cmd(struct scsi_xfer *);
229 : int xbf_poll_cmd(struct scsi_xfer *);
230 : void xbf_complete_cmd(struct xbf_softc *, struct xbf_ccb_queue *, int);
231 : int xbf_dev_probe(struct scsi_link *);
232 :
233 : void xbf_scsi_inq(struct scsi_xfer *);
234 : void xbf_scsi_inquiry(struct scsi_xfer *);
235 : void xbf_scsi_capacity(struct scsi_xfer *);
236 : void xbf_scsi_capacity16(struct scsi_xfer *);
237 : void xbf_scsi_done(struct scsi_xfer *, int);
238 :
239 : int xbf_dma_alloc(struct xbf_softc *, struct xbf_dma_mem *,
240 : bus_size_t, int, int);
241 : void xbf_dma_free(struct xbf_softc *, struct xbf_dma_mem *);
242 :
243 : int xbf_get_type(struct xbf_softc *);
244 : int xbf_init(struct xbf_softc *);
245 : int xbf_ring_create(struct xbf_softc *);
246 : void xbf_ring_destroy(struct xbf_softc *);
247 : void xbf_stop(struct xbf_softc *);
248 :
249 : int xbf_alloc_ccbs(struct xbf_softc *);
250 : void xbf_free_ccbs(struct xbf_softc *);
251 : void *xbf_get_ccb(void *);
252 : void xbf_put_ccb(void *, void *);
253 :
254 : int
255 0 : xbf_match(struct device *parent, void *match, void *aux)
256 : {
257 0 : struct xen_attach_args *xa = aux;
258 :
259 0 : if (strcmp("vbd", xa->xa_name))
260 0 : return (0);
261 :
262 0 : return (1);
263 0 : }
264 :
265 : void
266 0 : xbf_attach(struct device *parent, struct device *self, void *aux)
267 : {
268 0 : struct xen_attach_args *xa = aux;
269 0 : struct xbf_softc *sc = (struct xbf_softc *)self;
270 0 : struct scsibus_attach_args saa;
271 :
272 0 : sc->sc_parent = parent;
273 0 : sc->sc_dmat = xa->xa_dmat;
274 0 : sc->sc_domid = xa->xa_domid;
275 :
276 0 : memcpy(sc->sc_node, xa->xa_node, XEN_MAX_NODE_LEN);
277 0 : memcpy(sc->sc_backend, xa->xa_backend, XEN_MAX_BACKEND_LEN);
278 :
279 0 : if (xbf_get_type(sc))
280 0 : return;
281 :
282 0 : if (xen_intr_establish(0, &sc->sc_xih, sc->sc_domid, xbf_intr, sc,
283 0 : sc->sc_dev.dv_xname)) {
284 0 : printf(": failed to establish an interrupt\n");
285 0 : return;
286 : }
287 0 : xen_intr_mask(sc->sc_xih);
288 :
289 0 : printf(" backend %d channel %u: %s\n", sc->sc_domid, sc->sc_xih,
290 0 : sc->sc_dtype);
291 :
292 0 : if (xbf_init(sc))
293 : goto error;
294 :
295 0 : if (xen_intr_unmask(sc->sc_xih)) {
296 0 : printf("%s: failed to enable interrupts\n",
297 : sc->sc_dev.dv_xname);
298 0 : goto error;
299 : }
300 :
301 0 : sc->sc_switch.scsi_cmd = xbf_scsi_cmd;
302 0 : sc->sc_switch.scsi_minphys = scsi_minphys;
303 0 : sc->sc_switch.dev_probe = xbf_dev_probe;
304 :
305 0 : sc->sc_link.adapter = &sc->sc_switch;
306 0 : sc->sc_link.adapter_softc = self;
307 0 : sc->sc_link.adapter_buswidth = 2;
308 0 : sc->sc_link.luns = 1;
309 0 : sc->sc_link.adapter_target = 2;
310 0 : sc->sc_link.openings = sc->sc_nccb;
311 0 : sc->sc_link.pool = &sc->sc_iopool;
312 :
313 0 : bzero(&saa, sizeof(saa));
314 0 : saa.saa_sc_link = &sc->sc_link;
315 0 : sc->sc_scsibus = config_found(self, &saa, scsiprint);
316 :
317 0 : xen_unplug_emulated(parent, XEN_UNPLUG_IDE | XEN_UNPLUG_IDESEC);
318 :
319 0 : return;
320 :
321 : error:
322 0 : xen_intr_disestablish(sc->sc_xih);
323 0 : }
324 :
325 : int
326 0 : xbf_detach(struct device *self, int flags)
327 : {
328 0 : struct xbf_softc *sc = (struct xbf_softc *)self;
329 0 : int ostate = sc->sc_state;
330 :
331 0 : sc->sc_state = XBF_CLOSING;
332 :
333 0 : xen_intr_mask(sc->sc_xih);
334 0 : xen_intr_barrier(sc->sc_xih);
335 :
336 0 : if (ostate == XBF_CONNECTED) {
337 0 : xen_intr_disestablish(sc->sc_xih);
338 0 : xbf_stop(sc);
339 0 : }
340 :
341 0 : if (sc->sc_scsibus)
342 0 : return (config_detach(sc->sc_scsibus, flags | DETACH_FORCE));
343 :
344 0 : return (0);
345 0 : }
346 :
347 : void
348 0 : xbf_intr(void *xsc)
349 : {
350 0 : struct xbf_softc *sc = xsc;
351 0 : struct xbf_ring *xr = sc->sc_xr;
352 0 : struct xbf_dma_mem *dma = &sc->sc_xr_dma;
353 0 : struct xbf_ccb_queue cq;
354 : struct xbf_ccb *ccb, *nccb;
355 : uint32_t cons;
356 : int desc, s;
357 :
358 0 : TAILQ_INIT(&cq);
359 :
360 0 : for (;;) {
361 0 : bus_dmamap_sync(dma->dma_tag, dma->dma_map, 0, dma->dma_size,
362 : BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
363 :
364 0 : for (cons = sc->sc_xr_cons; cons != xr->xr_cons; cons++) {
365 0 : desc = cons & (sc->sc_xr_ndesc - 1);
366 0 : xbf_complete_cmd(sc, &cq, desc);
367 : }
368 :
369 0 : sc->sc_xr_cons = cons;
370 :
371 0 : if (TAILQ_EMPTY(&cq))
372 : break;
373 :
374 0 : s = splbio();
375 0 : KERNEL_LOCK();
376 0 : TAILQ_FOREACH_SAFE(ccb, &cq, ccb_link, nccb) {
377 0 : TAILQ_REMOVE(&cq, ccb, ccb_link);
378 0 : xbf_reclaim_cmd(ccb->ccb_xfer);
379 0 : scsi_done(ccb->ccb_xfer);
380 : }
381 0 : KERNEL_UNLOCK();
382 0 : splx(s);
383 : }
384 0 : }
385 :
386 : void
387 0 : xbf_scsi_cmd(struct scsi_xfer *xs)
388 : {
389 0 : struct xbf_softc *sc = xs->sc_link->adapter_softc;
390 :
391 0 : switch (xs->cmd->opcode) {
392 : case READ_BIG:
393 : case READ_COMMAND:
394 : case READ_12:
395 : case READ_16:
396 : case WRITE_BIG:
397 : case WRITE_COMMAND:
398 : case WRITE_12:
399 : case WRITE_16:
400 0 : if (sc->sc_state != XBF_CONNECTED) {
401 0 : xbf_scsi_done(xs, XS_SELTIMEOUT);
402 0 : return;
403 : }
404 : break;
405 : case SYNCHRONIZE_CACHE:
406 0 : if (!(sc->sc_caps & (XBF_CAP_BARRIER|XBF_CAP_FLUSH))) {
407 0 : xbf_scsi_done(xs, XS_NOERROR);
408 0 : return;
409 : }
410 : break;
411 : case INQUIRY:
412 0 : xbf_scsi_inq(xs);
413 0 : return;
414 : case READ_CAPACITY:
415 0 : xbf_scsi_capacity(xs);
416 0 : return;
417 : case READ_CAPACITY_16:
418 0 : xbf_scsi_capacity16(xs);
419 0 : return;
420 : case TEST_UNIT_READY:
421 : case START_STOP:
422 : case PREVENT_ALLOW:
423 0 : xbf_scsi_done(xs, XS_NOERROR);
424 0 : return;
425 : default:
426 0 : printf("%s cmd 0x%02x\n", __func__, xs->cmd->opcode);
427 : case MODE_SENSE:
428 : case MODE_SENSE_BIG:
429 : case REPORT_LUNS:
430 : case READ_TOC:
431 0 : xbf_scsi_done(xs, XS_DRIVER_STUFFUP);
432 0 : return;
433 : }
434 :
435 0 : if (xbf_submit_cmd(xs)) {
436 0 : xbf_scsi_done(xs, XS_DRIVER_STUFFUP);
437 0 : return;
438 : }
439 :
440 0 : if (ISSET(xs->flags, SCSI_POLL) && xbf_poll_cmd(xs)) {
441 0 : printf("%s: op %#x timed out\n", sc->sc_dev.dv_xname,
442 0 : xs->cmd->opcode);
443 0 : if (sc->sc_state == XBF_CONNECTED) {
444 0 : xbf_reclaim_cmd(xs);
445 0 : xbf_scsi_done(xs, XS_TIMEOUT);
446 0 : }
447 0 : return;
448 : }
449 0 : }
450 :
451 : int
452 0 : xbf_load_cmd(struct scsi_xfer *xs)
453 : {
454 0 : struct xbf_softc *sc = xs->sc_link->adapter_softc;
455 0 : struct xbf_ccb *ccb = xs->io;
456 : struct xbf_sge *sge;
457 : union xbf_ring_desc *xrd;
458 : bus_dmamap_t map;
459 : int error, mapflags, nsg, seg;
460 : int desc, ndesc = 0;
461 :
462 0 : map = ccb->ccb_dmap;
463 :
464 0 : mapflags = (sc->sc_domid << 16);
465 0 : if (ISSET(xs->flags, SCSI_NOSLEEP))
466 0 : mapflags |= BUS_DMA_NOWAIT;
467 : else
468 : mapflags |= BUS_DMA_WAITOK;
469 0 : if (ISSET(xs->flags, SCSI_DATA_IN))
470 0 : mapflags |= BUS_DMA_READ;
471 : else
472 0 : mapflags |= BUS_DMA_WRITE;
473 :
474 0 : error = bus_dmamap_load(sc->sc_dmat, map, xs->data, xs->datalen,
475 : NULL, mapflags);
476 0 : if (error) {
477 0 : printf("%s: failed to load %d bytes of data\n",
478 0 : sc->sc_dev.dv_xname, xs->datalen);
479 0 : return (error);
480 : }
481 :
482 0 : xrd = &sc->sc_xr->xr_desc[ccb->ccb_first];
483 : /* seg is the segment map iterator, nsg is the s-g list iterator */
484 0 : for (seg = 0, nsg = 0; seg < map->dm_nsegs; seg++, nsg++) {
485 0 : if (nsg == XBF_MAX_SGE) {
486 : /* Number of segments so far */
487 0 : xrd->xrd_req.req_nsegs = nsg;
488 : /* Pick next descriptor */
489 0 : ndesc++;
490 0 : desc = (sc->sc_xr_prod + ndesc) & (sc->sc_xr_ndesc - 1);
491 0 : xrd = &sc->sc_xr->xr_desc[desc];
492 : nsg = 0;
493 0 : }
494 0 : sge = &xrd->xrd_req.req_sgl[nsg];
495 0 : sge->sge_ref = map->dm_segs[seg].ds_addr;
496 0 : sge->sge_first = nsg > 0 ? 0 :
497 0 : (((vaddr_t)xs->data + ndesc * sc->sc_xrd_nblk *
498 0 : (1 << XBF_SEC_SHIFT)) & PAGE_MASK) >> XBF_SEC_SHIFT;
499 0 : sge->sge_last = sge->sge_first +
500 0 : (map->dm_segs[seg].ds_len >> XBF_SEC_SHIFT) - 1;
501 :
502 : DPRINTF("%s: seg %d/%d ref %lu len %lu first %u last %u\n",
503 : sc->sc_dev.dv_xname, nsg + 1, map->dm_nsegs,
504 : map->dm_segs[seg].ds_addr, map->dm_segs[seg].ds_len,
505 : sge->sge_first, sge->sge_last);
506 :
507 0 : KASSERT(sge->sge_last <= 7);
508 : }
509 :
510 0 : xrd->xrd_req.req_nsegs = nsg;
511 :
512 0 : return (0);
513 0 : }
514 :
515 : int
516 0 : xbf_bounce_cmd(struct scsi_xfer *xs)
517 : {
518 0 : struct xbf_softc *sc = xs->sc_link->adapter_softc;
519 0 : struct xbf_ccb *ccb = xs->io;
520 : struct xbf_sge *sge;
521 : struct xbf_dma_mem *dma;
522 : union xbf_ring_desc *xrd;
523 : bus_dmamap_t map;
524 : bus_size_t size;
525 : int error, mapflags, nsg, seg;
526 : int desc, ndesc = 0;
527 :
528 0 : size = roundup(xs->datalen, PAGE_SIZE);
529 0 : if (size > MAXPHYS)
530 0 : return (EFBIG);
531 :
532 0 : mapflags = (sc->sc_domid << 16);
533 0 : if (ISSET(xs->flags, SCSI_NOSLEEP))
534 0 : mapflags |= BUS_DMA_NOWAIT;
535 : else
536 : mapflags |= BUS_DMA_WAITOK;
537 0 : if (ISSET(xs->flags, SCSI_DATA_IN))
538 0 : mapflags |= BUS_DMA_READ;
539 : else
540 0 : mapflags |= BUS_DMA_WRITE;
541 :
542 0 : dma = &ccb->ccb_bbuf;
543 0 : error = xbf_dma_alloc(sc, dma, size, size / PAGE_SIZE, mapflags);
544 0 : if (error) {
545 : DPRINTF("%s: failed to allocate a %lu byte bounce buffer\n",
546 : sc->sc_dev.dv_xname, size);
547 0 : return (error);
548 : }
549 :
550 0 : map = dma->dma_map;
551 :
552 : DPRINTF("%s: bouncing %d bytes via %lu size map with %d segments\n",
553 : sc->sc_dev.dv_xname, xs->datalen, size, map->dm_nsegs);
554 :
555 0 : if (ISSET(xs->flags, SCSI_DATA_OUT))
556 0 : memcpy(dma->dma_vaddr, xs->data, xs->datalen);
557 :
558 0 : xrd = &sc->sc_xr->xr_desc[ccb->ccb_first];
559 : /* seg is the map segment iterator, nsg is the s-g element iterator */
560 0 : for (seg = 0, nsg = 0; seg < map->dm_nsegs; seg++, nsg++) {
561 0 : if (nsg == XBF_MAX_SGE) {
562 : /* Number of segments so far */
563 0 : xrd->xrd_req.req_nsegs = nsg;
564 : /* Pick next descriptor */
565 0 : ndesc++;
566 0 : desc = (sc->sc_xr_prod + ndesc) & (sc->sc_xr_ndesc - 1);
567 0 : xrd = &sc->sc_xr->xr_desc[desc];
568 : nsg = 0;
569 0 : }
570 0 : sge = &xrd->xrd_req.req_sgl[nsg];
571 0 : sge->sge_ref = map->dm_segs[seg].ds_addr;
572 0 : sge->sge_first = nsg > 0 ? 0 :
573 0 : (((vaddr_t)dma->dma_vaddr + ndesc * sc->sc_xrd_nblk *
574 0 : (1 << XBF_SEC_SHIFT)) & PAGE_MASK) >> XBF_SEC_SHIFT;
575 0 : sge->sge_last = sge->sge_first +
576 0 : (map->dm_segs[seg].ds_len >> XBF_SEC_SHIFT) - 1;
577 :
578 : DPRINTF("%s: seg %d/%d ref %lu len %lu first %u last %u\n",
579 : sc->sc_dev.dv_xname, nsg + 1, map->dm_nsegs,
580 : map->dm_segs[seg].ds_addr, map->dm_segs[seg].ds_len,
581 : sge->sge_first, sge->sge_last);
582 :
583 0 : KASSERT(sge->sge_last <= 7);
584 : }
585 :
586 0 : xrd->xrd_req.req_nsegs = nsg;
587 :
588 0 : return (0);
589 0 : }
590 :
591 : void
592 0 : xbf_reclaim_cmd(struct scsi_xfer *xs)
593 : {
594 0 : struct xbf_softc *sc = xs->sc_link->adapter_softc;
595 0 : struct xbf_ccb *ccb = xs->io;
596 0 : struct xbf_dma_mem *dma = &ccb->ccb_bbuf;
597 :
598 0 : if (dma->dma_size == 0)
599 0 : return;
600 :
601 0 : if (ISSET(xs->flags, SCSI_DATA_IN))
602 0 : memcpy(xs->data, (caddr_t)dma->dma_vaddr, xs->datalen);
603 :
604 0 : xbf_dma_free(sc, &ccb->ccb_bbuf);
605 0 : }
606 :
607 : int
608 0 : xbf_submit_cmd(struct scsi_xfer *xs)
609 : {
610 0 : struct xbf_softc *sc = xs->sc_link->adapter_softc;
611 0 : struct xbf_ccb *ccb = xs->io;
612 : union xbf_ring_desc *xrd;
613 : struct scsi_rw *rw;
614 : struct scsi_rw_big *rwb;
615 : struct scsi_rw_12 *rw12;
616 : struct scsi_rw_16 *rw16;
617 : uint64_t lba = 0;
618 : uint32_t nblk = 0;
619 : uint8_t operation = 0;
620 : unsigned int ndesc = 0;
621 : int desc, error;
622 :
623 0 : switch (xs->cmd->opcode) {
624 : case READ_BIG:
625 : case READ_COMMAND:
626 : case READ_12:
627 : case READ_16:
628 : operation = XBF_OP_READ;
629 0 : break;
630 :
631 : case WRITE_BIG:
632 : case WRITE_COMMAND:
633 : case WRITE_12:
634 : case WRITE_16:
635 : operation = XBF_OP_WRITE;
636 0 : break;
637 :
638 : case SYNCHRONIZE_CACHE:
639 0 : if (sc->sc_caps & XBF_CAP_FLUSH)
640 0 : operation = XBF_OP_FLUSH;
641 0 : else if (sc->sc_caps & XBF_CAP_BARRIER)
642 0 : operation = XBF_OP_BARRIER;
643 : break;
644 : }
645 :
646 : /*
647 : * READ/WRITE/SYNCHRONIZE commands. SYNCHRONIZE CACHE
648 : * has the same layout as 10-byte READ/WRITE commands.
649 : */
650 0 : if (xs->cmdlen == 6) {
651 0 : rw = (struct scsi_rw *)xs->cmd;
652 0 : lba = _3btol(rw->addr) & (SRW_TOPADDR << 16 | 0xffff);
653 0 : nblk = rw->length ? rw->length : 0x100;
654 0 : } else if (xs->cmdlen == 10) {
655 0 : rwb = (struct scsi_rw_big *)xs->cmd;
656 0 : lba = _4btol(rwb->addr);
657 0 : nblk = _2btol(rwb->length);
658 0 : } else if (xs->cmdlen == 12) {
659 0 : rw12 = (struct scsi_rw_12 *)xs->cmd;
660 0 : lba = _4btol(rw12->addr);
661 0 : nblk = _4btol(rw12->length);
662 0 : } else if (xs->cmdlen == 16) {
663 0 : rw16 = (struct scsi_rw_16 *)xs->cmd;
664 0 : lba = _8btol(rw16->addr);
665 0 : nblk = _4btol(rw16->length);
666 0 : }
667 :
668 0 : ccb->ccb_want = ccb->ccb_seen = 0;
669 :
670 0 : do {
671 0 : desc = (sc->sc_xr_prod + ndesc) & (sc->sc_xr_ndesc - 1);
672 0 : if (ndesc == 0)
673 0 : ccb->ccb_first = desc;
674 :
675 0 : xrd = &sc->sc_xr->xr_desc[desc];
676 0 : xrd->xrd_req.req_op = operation;
677 0 : xrd->xrd_req.req_unit = (uint16_t)sc->sc_unit;
678 0 : xrd->xrd_req.req_sector = lba + ndesc * sc->sc_xrd_nblk;
679 :
680 0 : ccb->ccb_want |= 1 << ndesc;
681 0 : ndesc++;
682 0 : } while (ndesc * sc->sc_xrd_nblk < nblk);
683 :
684 0 : ccb->ccb_last = desc;
685 :
686 0 : if (operation == XBF_OP_READ || operation == XBF_OP_WRITE) {
687 : DPRINTF("%s: desc %u,%u %s%s lba %llu nsec %u "
688 : "len %d\n", sc->sc_dev.dv_xname, ccb->ccb_first,
689 : ccb->ccb_last, operation == XBF_OP_READ ? "read" :
690 : "write", ISSET(xs->flags, SCSI_POLL) ? "-poll" : "",
691 : lba, nblk, xs->datalen);
692 :
693 0 : if (((vaddr_t)xs->data & ((1 << XBF_SEC_SHIFT) - 1)) == 0)
694 0 : error = xbf_load_cmd(xs);
695 : else
696 0 : error = xbf_bounce_cmd(xs);
697 0 : if (error)
698 0 : return (-1);
699 : } else {
700 : DPRINTF("%s: desc %u %s%s lba %llu\n", sc->sc_dev.dv_xname,
701 : ccb->ccb_first, operation == XBF_OP_FLUSH ? "flush" :
702 : "barrier", ISSET(xs->flags, SCSI_POLL) ? "-poll" : "",
703 : lba);
704 0 : xrd->xrd_req.req_nsegs = 0;
705 : }
706 :
707 0 : ccb->ccb_xfer = xs;
708 :
709 0 : bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmap, 0,
710 : ccb->ccb_dmap->dm_mapsize, BUS_DMASYNC_PREREAD |
711 : BUS_DMASYNC_PREWRITE);
712 :
713 0 : mtx_enter(&sc->sc_ccb_sqlck);
714 0 : TAILQ_INSERT_TAIL(&sc->sc_ccb_sq, ccb, ccb_link);
715 0 : mtx_leave(&sc->sc_ccb_sqlck);
716 :
717 0 : sc->sc_xr_prod += ndesc;
718 0 : sc->sc_xr->xr_prod = sc->sc_xr_prod;
719 0 : sc->sc_xr->xr_cons_event = sc->sc_xr_prod;
720 :
721 0 : bus_dmamap_sync(sc->sc_dmat, sc->sc_xr_dma.dma_map, 0,
722 : sc->sc_xr_dma.dma_map->dm_mapsize, BUS_DMASYNC_PREREAD |
723 : BUS_DMASYNC_PREWRITE);
724 :
725 0 : xen_intr_signal(sc->sc_xih);
726 :
727 0 : return (0);
728 0 : }
729 :
730 : int
731 0 : xbf_poll_cmd(struct scsi_xfer *xs)
732 : {
733 : int timo = 1000;
734 :
735 0 : do {
736 0 : if (ISSET(xs->flags, ITSDONE))
737 : break;
738 0 : if (ISSET(xs->flags, SCSI_NOSLEEP))
739 0 : delay(10);
740 : else
741 0 : tsleep(xs, PRIBIO, "xbfpoll", 1);
742 0 : xbf_intr(xs->sc_link->adapter_softc);
743 0 : } while(--timo > 0);
744 :
745 0 : return (0);
746 : }
747 :
748 : void
749 0 : xbf_complete_cmd(struct xbf_softc *sc, struct xbf_ccb_queue *cq, int desc)
750 : {
751 : struct xbf_ccb *ccb;
752 : union xbf_ring_desc *xrd;
753 : bus_dmamap_t map;
754 : uint32_t id, chunk;
755 : int error;
756 :
757 0 : xrd = &sc->sc_xr->xr_desc[desc];
758 0 : error = xrd->xrd_rsp.rsp_status == XBF_OK ? XS_NOERROR :
759 : XS_DRIVER_STUFFUP;
760 :
761 0 : mtx_enter(&sc->sc_ccb_sqlck);
762 :
763 : /*
764 : * To find a CCB for id equal to x within an interval [a, b] we must
765 : * locate a CCB such that (x - a) mod N <= (b - a) mod N, where a is
766 : * the first descriptor, b is the last one and N is the ring size.
767 : */
768 0 : id = (uint32_t)xrd->xrd_rsp.rsp_id;
769 0 : TAILQ_FOREACH(ccb, &sc->sc_ccb_sq, ccb_link) {
770 0 : if (((id - ccb->ccb_first) & (sc->sc_xr_ndesc - 1)) <=
771 0 : ((ccb->ccb_last - ccb->ccb_first) & (sc->sc_xr_ndesc - 1)))
772 : break;
773 : }
774 0 : KASSERT(ccb != NULL);
775 :
776 : /* Assert that this chunk belongs to this CCB */
777 0 : chunk = 1 << ((id - ccb->ccb_first) & (sc->sc_xr_ndesc - 1));
778 0 : KASSERT((ccb->ccb_want & chunk) != 0);
779 0 : KASSERT((ccb->ccb_seen & chunk) == 0);
780 :
781 : /* When all chunks are collected remove the CCB from the queue */
782 0 : ccb->ccb_seen |= chunk;
783 0 : if (ccb->ccb_seen == ccb->ccb_want)
784 0 : TAILQ_REMOVE(&sc->sc_ccb_sq, ccb, ccb_link);
785 :
786 0 : mtx_leave(&sc->sc_ccb_sqlck);
787 :
788 : DPRINTF("%s: completing desc %d(%llu) op %u with error %d\n",
789 : sc->sc_dev.dv_xname, desc, xrd->xrd_rsp.rsp_id,
790 : xrd->xrd_rsp.rsp_op, xrd->xrd_rsp.rsp_status);
791 :
792 0 : memset(xrd, 0, sizeof(*xrd));
793 0 : xrd->xrd_req.req_id = desc;
794 :
795 0 : if (ccb->ccb_seen != ccb->ccb_want)
796 0 : return;
797 :
798 0 : if (ccb->ccb_bbuf.dma_size > 0)
799 0 : map = ccb->ccb_bbuf.dma_map;
800 : else
801 0 : map = ccb->ccb_dmap;
802 :
803 0 : bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
804 : BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
805 0 : bus_dmamap_unload(sc->sc_dmat, map);
806 :
807 0 : ccb->ccb_xfer->resid = 0;
808 0 : ccb->ccb_xfer->error = error;
809 0 : TAILQ_INSERT_TAIL(cq, ccb, ccb_link);
810 0 : }
811 :
812 : void
813 0 : xbf_scsi_inq(struct scsi_xfer *xs)
814 : {
815 0 : struct scsi_inquiry *inq = (struct scsi_inquiry *)xs->cmd;
816 :
817 0 : if (ISSET(inq->flags, SI_EVPD))
818 0 : xbf_scsi_done(xs, XS_DRIVER_STUFFUP);
819 : else
820 0 : xbf_scsi_inquiry(xs);
821 0 : }
822 :
823 : void
824 0 : xbf_scsi_inquiry(struct scsi_xfer *xs)
825 : {
826 0 : struct xbf_softc *sc = xs->sc_link->adapter_softc;
827 0 : struct scsi_inquiry_data inq;
828 : /* char buf[5]; */
829 :
830 0 : bzero(&inq, sizeof(inq));
831 :
832 0 : switch (sc->sc_type) {
833 : case XBF_CDROM:
834 0 : inq.device = T_CDROM;
835 0 : break;
836 : default:
837 0 : inq.device = T_DIRECT;
838 0 : break;
839 : }
840 :
841 0 : inq.version = 0x05; /* SPC-3 */
842 0 : inq.response_format = 2;
843 0 : inq.additional_length = 32;
844 0 : inq.flags |= SID_CmdQue;
845 0 : bcopy("Xen ", inq.vendor, sizeof(inq.vendor));
846 0 : bcopy(sc->sc_prod, inq.product, sizeof(inq.product));
847 0 : bcopy("0000", inq.revision, sizeof(inq.revision));
848 :
849 0 : bcopy(&inq, xs->data, MIN(sizeof(inq), xs->datalen));
850 :
851 0 : xbf_scsi_done(xs, XS_NOERROR);
852 0 : }
853 :
854 : void
855 0 : xbf_scsi_capacity(struct scsi_xfer *xs)
856 : {
857 0 : struct xbf_softc *sc = xs->sc_link->adapter_softc;
858 0 : struct scsi_read_cap_data rcd;
859 : uint64_t capacity;
860 :
861 0 : bzero(&rcd, sizeof(rcd));
862 :
863 0 : capacity = sc->sc_disk_size - 1;
864 0 : if (capacity > 0xffffffff)
865 : capacity = 0xffffffff;
866 :
867 0 : _lto4b(capacity, rcd.addr);
868 0 : _lto4b(sc->sc_block_size, rcd.length);
869 :
870 0 : bcopy(&rcd, xs->data, MIN(sizeof(rcd), xs->datalen));
871 :
872 0 : xbf_scsi_done(xs, XS_NOERROR);
873 0 : }
874 :
875 : void
876 0 : xbf_scsi_capacity16(struct scsi_xfer *xs)
877 : {
878 0 : struct xbf_softc *sc = xs->sc_link->adapter_softc;
879 0 : struct scsi_read_cap_data_16 rcd;
880 :
881 0 : bzero(&rcd, sizeof(rcd));
882 :
883 0 : _lto8b(sc->sc_disk_size - 1, rcd.addr);
884 0 : _lto4b(sc->sc_block_size, rcd.length);
885 :
886 0 : bcopy(&rcd, xs->data, MIN(sizeof(rcd), xs->datalen));
887 :
888 0 : xbf_scsi_done(xs, XS_NOERROR);
889 0 : }
890 :
891 : void
892 0 : xbf_scsi_done(struct scsi_xfer *xs, int error)
893 : {
894 : int s;
895 :
896 0 : xs->error = error;
897 :
898 0 : s = splbio();
899 0 : scsi_done(xs);
900 0 : splx(s);
901 0 : }
902 :
903 : int
904 0 : xbf_dev_probe(struct scsi_link *link)
905 : {
906 0 : if (link->target == 0)
907 0 : return (0);
908 :
909 0 : return (ENODEV);
910 0 : }
911 :
912 : int
913 0 : xbf_get_type(struct xbf_softc *sc)
914 : {
915 0 : unsigned long long res;
916 : const char *prop;
917 0 : char val[32];
918 : int error;
919 :
920 : prop = "type";
921 0 : if ((error = xs_getprop(sc->sc_parent, sc->sc_backend, prop, val,
922 0 : sizeof(val))) != 0)
923 : goto errout;
924 0 : snprintf(sc->sc_prod, sizeof(sc->sc_prod), "%s", val);
925 :
926 : prop = "dev";
927 0 : if ((error = xs_getprop(sc->sc_parent, sc->sc_backend, prop, val,
928 0 : sizeof(val))) != 0)
929 : goto errout;
930 0 : snprintf(sc->sc_prod, sizeof(sc->sc_prod), "%s %s", sc->sc_prod, val);
931 :
932 : prop = "virtual-device";
933 0 : if ((error = xs_getnum(sc->sc_parent, sc->sc_node, prop, &res)) != 0)
934 : goto errout;
935 0 : sc->sc_unit = (uint32_t)res;
936 0 : snprintf(sc->sc_prod, sizeof(sc->sc_prod), "%s %llu", sc->sc_prod, res);
937 :
938 : prop = "device-type";
939 0 : if ((error = xs_getprop(sc->sc_parent, sc->sc_node, prop,
940 0 : sc->sc_dtype, sizeof(sc->sc_dtype))) != 0)
941 : goto errout;
942 0 : if (!strcmp(sc->sc_dtype, "cdrom"))
943 0 : sc->sc_type = XBF_CDROM;
944 :
945 0 : return (0);
946 :
947 : errout:
948 0 : printf("%s: failed to read \"%s\" property\n", sc->sc_dev.dv_xname,
949 : prop);
950 0 : return (-1);
951 0 : }
952 :
953 : int
954 0 : xbf_init(struct xbf_softc *sc)
955 : {
956 0 : unsigned long long res;
957 : const char *action, *prop;
958 0 : char pbuf[sizeof("ring-refXX")];
959 : unsigned int i;
960 : int error;
961 :
962 : prop = "max-ring-page-order";
963 0 : error = xs_getnum(sc->sc_parent, sc->sc_backend, prop, &res);
964 0 : if (error == 0)
965 0 : sc->sc_xr_size = 1 << res;
966 0 : if (error == ENOENT) {
967 : prop = "max-ring-pages";
968 0 : error = xs_getnum(sc->sc_parent, sc->sc_backend, prop, &res);
969 0 : if (error == 0)
970 0 : sc->sc_xr_size = res;
971 : }
972 : /* Fallback to the known minimum */
973 0 : if (error)
974 0 : sc->sc_xr_size = XBF_MIN_RING_SIZE;
975 :
976 0 : if (sc->sc_xr_size < XBF_MIN_RING_SIZE)
977 0 : sc->sc_xr_size = XBF_MIN_RING_SIZE;
978 0 : if (sc->sc_xr_size > XBF_MAX_RING_SIZE)
979 0 : sc->sc_xr_size = XBF_MAX_RING_SIZE;
980 0 : if (!powerof2(sc->sc_xr_size))
981 0 : sc->sc_xr_size = 1 << (fls(sc->sc_xr_size) - 1);
982 :
983 0 : sc->sc_xr_ndesc = ((sc->sc_xr_size * PAGE_SIZE) -
984 0 : sizeof(struct xbf_ring)) / sizeof(union xbf_ring_desc);
985 0 : if (!powerof2(sc->sc_xr_ndesc))
986 0 : sc->sc_xr_ndesc = 1 << (fls(sc->sc_xr_ndesc) - 1);
987 0 : if (sc->sc_xr_ndesc > XBF_MAX_REQS)
988 0 : sc->sc_xr_ndesc = XBF_MAX_REQS;
989 :
990 : DPRINTF("%s: %u ring pages, %d requests\n",
991 : sc->sc_dev.dv_xname, sc->sc_xr_size, sc->sc_xr_ndesc);
992 :
993 0 : if (xbf_ring_create(sc))
994 0 : return (-1);
995 :
996 : action = "set";
997 :
998 0 : for (i = 0; i < sc->sc_xr_size; i++) {
999 0 : if (i == 0 && sc->sc_xr_size == 1)
1000 0 : snprintf(pbuf, sizeof(pbuf), "ring-ref");
1001 : else
1002 0 : snprintf(pbuf, sizeof(pbuf), "ring-ref%d", i);
1003 0 : prop = pbuf;
1004 0 : if (xs_setnum(sc->sc_parent, sc->sc_node, prop,
1005 0 : sc->sc_xr_ref[i]))
1006 : goto errout;
1007 : }
1008 :
1009 0 : if (sc->sc_xr_size > 1) {
1010 : prop = "num-ring-pages";
1011 0 : if (xs_setnum(sc->sc_parent, sc->sc_node, prop,
1012 0 : sc->sc_xr_size))
1013 : goto errout;
1014 : prop = "ring-page-order";
1015 0 : if (xs_setnum(sc->sc_parent, sc->sc_node, prop,
1016 0 : fls(sc->sc_xr_size) - 1))
1017 : goto errout;
1018 : }
1019 :
1020 : prop = "event-channel";
1021 0 : if (xs_setnum(sc->sc_parent, sc->sc_node, prop, sc->sc_xih))
1022 : goto errout;
1023 :
1024 : prop = "protocol";
1025 : #ifdef __amd64__
1026 0 : if (xs_setprop(sc->sc_parent, sc->sc_node, prop, "x86_64-abi",
1027 0 : strlen("x86_64-abi")))
1028 : goto errout;
1029 : #else
1030 : if (xs_setprop(sc->sc_parent, sc->sc_node, prop, "x86_32-abi",
1031 : strlen("x86_32-abi")))
1032 : goto errout;
1033 : #endif
1034 :
1035 0 : if (xs_setprop(sc->sc_parent, sc->sc_node, "state",
1036 0 : XEN_STATE_INITIALIZED, strlen(XEN_STATE_INITIALIZED))) {
1037 0 : printf("%s: failed to set state to INITIALIZED\n",
1038 0 : sc->sc_dev.dv_xname);
1039 0 : xbf_ring_destroy(sc);
1040 0 : return (-1);
1041 : }
1042 :
1043 0 : if (xs_await_transition(sc->sc_parent, sc->sc_backend, "state",
1044 : XEN_STATE_CONNECTED, 10000)) {
1045 0 : printf("%s: timed out waiting for backend to connect\n",
1046 0 : sc->sc_dev.dv_xname);
1047 0 : xbf_ring_destroy(sc);
1048 0 : return (-1);
1049 : }
1050 :
1051 : action = "read";
1052 :
1053 : prop = "sectors";
1054 0 : if ((error = xs_getnum(sc->sc_parent, sc->sc_backend, prop, &res)) != 0)
1055 : goto errout;
1056 0 : sc->sc_disk_size = res;
1057 :
1058 : prop = "sector-size";
1059 0 : if ((error = xs_getnum(sc->sc_parent, sc->sc_backend, prop, &res)) != 0)
1060 : goto errout;
1061 0 : sc->sc_block_size = res;
1062 :
1063 : prop = "feature-barrier";
1064 0 : if ((error = xs_getnum(sc->sc_parent, sc->sc_backend, prop, &res)) != 0
1065 0 : && error != ENOENT)
1066 : goto errout;
1067 0 : if (error == 0 && res == 1)
1068 0 : sc->sc_caps |= XBF_CAP_BARRIER;
1069 :
1070 : prop = "feature-flush-cache";
1071 0 : if ((error = xs_getnum(sc->sc_parent, sc->sc_backend, prop, &res)) != 0
1072 0 : && error != ENOENT)
1073 : goto errout;
1074 0 : if (error == 0 && res == 1)
1075 0 : sc->sc_caps |= XBF_CAP_FLUSH;
1076 :
1077 : #ifdef XBF_DEBUG
1078 : if (sc->sc_caps) {
1079 : printf("%s: features:", sc->sc_dev.dv_xname);
1080 : if (sc->sc_caps & XBF_CAP_BARRIER)
1081 : printf(" BARRIER");
1082 : if (sc->sc_caps & XBF_CAP_FLUSH)
1083 : printf(" FLUSH");
1084 : printf("\n");
1085 : }
1086 : #endif
1087 :
1088 0 : if (xs_setprop(sc->sc_parent, sc->sc_node, "state",
1089 0 : XEN_STATE_CONNECTED, strlen(XEN_STATE_CONNECTED))) {
1090 0 : printf("%s: failed to set state to CONNECTED\n",
1091 0 : sc->sc_dev.dv_xname);
1092 0 : return (-1);
1093 : }
1094 :
1095 0 : sc->sc_state = XBF_CONNECTED;
1096 :
1097 0 : return (0);
1098 :
1099 : errout:
1100 0 : printf("%s: failed to %s \"%s\" property (%d)\n", sc->sc_dev.dv_xname,
1101 : action, prop, error);
1102 0 : xbf_ring_destroy(sc);
1103 0 : return (-1);
1104 0 : }
1105 :
1106 : int
1107 0 : xbf_dma_alloc(struct xbf_softc *sc, struct xbf_dma_mem *dma,
1108 : bus_size_t size, int nsegs, int mapflags)
1109 : {
1110 : int error;
1111 :
1112 0 : dma->dma_tag = sc->sc_dmat;
1113 :
1114 0 : dma->dma_seg = mallocarray(nsegs, sizeof(bus_dma_segment_t), M_DEVBUF,
1115 : M_ZERO | M_NOWAIT);
1116 0 : if (dma->dma_seg == NULL) {
1117 0 : printf("%s: failed to allocate a segment array\n",
1118 0 : sc->sc_dev.dv_xname);
1119 0 : return (ENOMEM);
1120 : }
1121 :
1122 0 : error = bus_dmamap_create(dma->dma_tag, size, nsegs, PAGE_SIZE, 0,
1123 : BUS_DMA_NOWAIT, &dma->dma_map);
1124 0 : if (error) {
1125 0 : printf("%s: failed to create a memory map (%d)\n",
1126 0 : sc->sc_dev.dv_xname, error);
1127 0 : goto errout;
1128 : }
1129 :
1130 0 : error = bus_dmamem_alloc(dma->dma_tag, size, PAGE_SIZE, 0,
1131 : dma->dma_seg, nsegs, &dma->dma_rsegs, BUS_DMA_ZERO |
1132 : BUS_DMA_NOWAIT);
1133 0 : if (error) {
1134 0 : printf("%s: failed to allocate DMA memory (%d)\n",
1135 0 : sc->sc_dev.dv_xname, error);
1136 0 : goto destroy;
1137 : }
1138 :
1139 0 : error = bus_dmamem_map(dma->dma_tag, dma->dma_seg, dma->dma_rsegs,
1140 : size, &dma->dma_vaddr, BUS_DMA_NOWAIT);
1141 0 : if (error) {
1142 0 : printf("%s: failed to map DMA memory (%d)\n",
1143 0 : sc->sc_dev.dv_xname, error);
1144 0 : goto free;
1145 : }
1146 :
1147 0 : error = bus_dmamap_load(dma->dma_tag, dma->dma_map, dma->dma_vaddr,
1148 : size, NULL, mapflags | BUS_DMA_NOWAIT);
1149 0 : if (error) {
1150 0 : printf("%s: failed to load DMA memory (%d)\n",
1151 0 : sc->sc_dev.dv_xname, error);
1152 : goto unmap;
1153 : }
1154 :
1155 0 : dma->dma_size = size;
1156 0 : dma->dma_nsegs = nsegs;
1157 0 : return (0);
1158 :
1159 : unmap:
1160 0 : bus_dmamem_unmap(dma->dma_tag, dma->dma_vaddr, size);
1161 : free:
1162 0 : bus_dmamem_free(dma->dma_tag, dma->dma_seg, dma->dma_rsegs);
1163 : destroy:
1164 0 : bus_dmamap_destroy(dma->dma_tag, dma->dma_map);
1165 : errout:
1166 0 : free(dma->dma_seg, M_DEVBUF, nsegs * sizeof(bus_dma_segment_t));
1167 0 : dma->dma_map = NULL;
1168 0 : dma->dma_tag = NULL;
1169 0 : return (error);
1170 0 : }
1171 :
1172 : void
1173 0 : xbf_dma_free(struct xbf_softc *sc, struct xbf_dma_mem *dma)
1174 : {
1175 0 : if (dma->dma_tag == NULL || dma->dma_map == NULL)
1176 : return;
1177 0 : bus_dmamap_sync(dma->dma_tag, dma->dma_map, 0, dma->dma_size,
1178 : BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1179 0 : bus_dmamap_unload(dma->dma_tag, dma->dma_map);
1180 0 : bus_dmamem_unmap(dma->dma_tag, dma->dma_vaddr, dma->dma_size);
1181 0 : bus_dmamem_free(dma->dma_tag, dma->dma_seg, dma->dma_rsegs);
1182 0 : bus_dmamap_destroy(dma->dma_tag, dma->dma_map);
1183 0 : free(dma->dma_seg, M_DEVBUF, dma->dma_nsegs * sizeof(bus_dma_segment_t));
1184 0 : dma->dma_seg = NULL;
1185 0 : dma->dma_map = NULL;
1186 0 : dma->dma_size = 0;
1187 0 : }
1188 :
1189 : int
1190 0 : xbf_ring_create(struct xbf_softc *sc)
1191 : {
1192 : int i;
1193 :
1194 0 : if (xbf_dma_alloc(sc, &sc->sc_xr_dma, sc->sc_xr_size * PAGE_SIZE,
1195 0 : sc->sc_xr_size, sc->sc_domid << 16))
1196 0 : return (-1);
1197 0 : for (i = 0; i < sc->sc_xr_dma.dma_map->dm_nsegs; i++)
1198 0 : sc->sc_xr_ref[i] = sc->sc_xr_dma.dma_map->dm_segs[i].ds_addr;
1199 :
1200 0 : sc->sc_xr = (struct xbf_ring *)sc->sc_xr_dma.dma_vaddr;
1201 :
1202 0 : sc->sc_xr->xr_prod_event = sc->sc_xr->xr_cons_event = 1;
1203 :
1204 0 : for (i = 0; i < sc->sc_xr_ndesc; i++)
1205 0 : sc->sc_xr->xr_desc[i].xrd_req.req_id = i;
1206 :
1207 : /* The number of contiguous blocks addressable by one descriptor */
1208 0 : sc->sc_xrd_nblk = (PAGE_SIZE * XBF_MAX_SGE) / (1 << XBF_SEC_SHIFT);
1209 :
1210 0 : if (xbf_alloc_ccbs(sc)) {
1211 0 : xbf_ring_destroy(sc);
1212 0 : return (-1);
1213 : }
1214 :
1215 0 : return (0);
1216 0 : }
1217 :
1218 : void
1219 0 : xbf_ring_destroy(struct xbf_softc *sc)
1220 : {
1221 0 : xbf_free_ccbs(sc);
1222 0 : xbf_dma_free(sc, &sc->sc_xr_dma);
1223 0 : sc->sc_xr = NULL;
1224 0 : }
1225 :
1226 : void
1227 0 : xbf_stop(struct xbf_softc *sc)
1228 : {
1229 : struct xbf_ccb *ccb, *nccb;
1230 : bus_dmamap_t map;
1231 :
1232 0 : bus_dmamap_sync(sc->sc_dmat, sc->sc_xr_dma.dma_map, 0,
1233 : sc->sc_xr_dma.dma_map->dm_mapsize, BUS_DMASYNC_POSTREAD |
1234 : BUS_DMASYNC_POSTWRITE);
1235 :
1236 0 : TAILQ_FOREACH_SAFE(ccb, &sc->sc_ccb_sq, ccb_link, nccb) {
1237 0 : TAILQ_REMOVE(&sc->sc_ccb_sq, ccb, ccb_link);
1238 :
1239 0 : if (ccb->ccb_bbuf.dma_size > 0)
1240 0 : map = ccb->ccb_bbuf.dma_map;
1241 : else
1242 0 : map = ccb->ccb_dmap;
1243 0 : bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1244 : BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1245 0 : bus_dmamap_unload(sc->sc_dmat, map);
1246 :
1247 0 : xbf_reclaim_cmd(ccb->ccb_xfer);
1248 0 : xbf_scsi_done(ccb->ccb_xfer, XS_SELTIMEOUT);
1249 : }
1250 :
1251 0 : xbf_ring_destroy(sc);
1252 0 : }
1253 :
1254 : int
1255 0 : xbf_alloc_ccbs(struct xbf_softc *sc)
1256 : {
1257 : int i, error;
1258 :
1259 0 : TAILQ_INIT(&sc->sc_ccb_fq);
1260 0 : TAILQ_INIT(&sc->sc_ccb_sq);
1261 0 : mtx_init(&sc->sc_ccb_fqlck, IPL_BIO);
1262 0 : mtx_init(&sc->sc_ccb_sqlck, IPL_BIO);
1263 :
1264 0 : sc->sc_nccb = sc->sc_xr_ndesc / 2;
1265 :
1266 0 : sc->sc_ccbs = mallocarray(sc->sc_nccb, sizeof(struct xbf_ccb),
1267 : M_DEVBUF, M_ZERO | M_NOWAIT);
1268 0 : if (sc->sc_ccbs == NULL) {
1269 0 : printf("%s: failed to allocate CCBs\n", sc->sc_dev.dv_xname);
1270 0 : return (-1);
1271 : }
1272 :
1273 0 : for (i = 0; i < sc->sc_nccb; i++) {
1274 : /*
1275 : * Each CCB is set up to use up to 2 descriptors and
1276 : * each descriptor can transfer XBF_MAX_SGE number of
1277 : * pages.
1278 : */
1279 0 : error = bus_dmamap_create(sc->sc_dmat, MAXPHYS, 2 *
1280 : XBF_MAX_SGE, PAGE_SIZE, PAGE_SIZE, BUS_DMA_NOWAIT,
1281 : &sc->sc_ccbs[i].ccb_dmap);
1282 0 : if (error) {
1283 0 : printf("%s: failed to create a memory map for "
1284 0 : "the xfer %d (%d)\n", sc->sc_dev.dv_xname, i,
1285 : error);
1286 : goto errout;
1287 : }
1288 :
1289 0 : xbf_put_ccb(sc, &sc->sc_ccbs[i]);
1290 : }
1291 :
1292 0 : scsi_iopool_init(&sc->sc_iopool, sc, xbf_get_ccb, xbf_put_ccb);
1293 :
1294 0 : return (0);
1295 :
1296 : errout:
1297 0 : xbf_free_ccbs(sc);
1298 0 : return (-1);
1299 0 : }
1300 :
1301 : void
1302 0 : xbf_free_ccbs(struct xbf_softc *sc)
1303 : {
1304 : struct xbf_ccb *ccb;
1305 : int i;
1306 :
1307 0 : for (i = 0; i < sc->sc_nccb; i++) {
1308 0 : ccb = &sc->sc_ccbs[i];
1309 0 : if (ccb->ccb_dmap == NULL)
1310 : continue;
1311 0 : bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmap, 0, 0,
1312 : BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1313 0 : bus_dmamap_unload(sc->sc_dmat, ccb->ccb_dmap);
1314 0 : bus_dmamap_destroy(sc->sc_dmat, ccb->ccb_dmap);
1315 0 : }
1316 :
1317 0 : free(sc->sc_ccbs, M_DEVBUF, sc->sc_nccb * sizeof(struct xbf_ccb));
1318 0 : sc->sc_ccbs = NULL;
1319 0 : sc->sc_nccb = 0;
1320 0 : }
1321 :
1322 : void *
1323 0 : xbf_get_ccb(void *xsc)
1324 : {
1325 0 : struct xbf_softc *sc = xsc;
1326 : struct xbf_ccb *ccb;
1327 :
1328 0 : if (sc->sc_state != XBF_CONNECTED &&
1329 0 : sc->sc_state != XBF_CLOSING)
1330 0 : return (NULL);
1331 :
1332 0 : mtx_enter(&sc->sc_ccb_fqlck);
1333 0 : ccb = TAILQ_FIRST(&sc->sc_ccb_fq);
1334 0 : if (ccb != NULL)
1335 0 : TAILQ_REMOVE(&sc->sc_ccb_fq, ccb, ccb_link);
1336 0 : mtx_leave(&sc->sc_ccb_fqlck);
1337 :
1338 0 : return (ccb);
1339 0 : }
1340 :
1341 : void
1342 0 : xbf_put_ccb(void *xsc, void *io)
1343 : {
1344 0 : struct xbf_softc *sc = xsc;
1345 0 : struct xbf_ccb *ccb = io;
1346 :
1347 0 : ccb->ccb_xfer = NULL;
1348 :
1349 0 : mtx_enter(&sc->sc_ccb_fqlck);
1350 0 : TAILQ_INSERT_HEAD(&sc->sc_ccb_fq, ccb, ccb_link);
1351 0 : mtx_leave(&sc->sc_ccb_fqlck);
1352 0 : }
|