Line data Source code
1 : /* $OpenBSD: xhci.c,v 1.89 2018/09/06 15:39:48 mpi Exp $ */
2 :
3 : /*
4 : * Copyright (c) 2014-2015 Martin Pieuchot
5 : *
6 : * Permission to use, copy, modify, and distribute this software for any
7 : * purpose with or without fee is hereby granted, provided that the above
8 : * copyright notice and this permission notice appear in all copies.
9 : *
10 : * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11 : * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12 : * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13 : * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14 : * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15 : * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16 : * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 : */
18 :
19 : #include <sys/param.h>
20 : #include <sys/systm.h>
21 : #include <sys/kernel.h>
22 : #include <sys/malloc.h>
23 : #include <sys/device.h>
24 : #include <sys/queue.h>
25 : #include <sys/timeout.h>
26 : #include <sys/pool.h>
27 : #include <sys/endian.h>
28 : #include <sys/rwlock.h>
29 :
30 : #include <machine/bus.h>
31 :
32 : #include <dev/usb/usb.h>
33 : #include <dev/usb/usbdi.h>
34 : #include <dev/usb/usbdivar.h>
35 : #include <dev/usb/usb_mem.h>
36 :
37 : #include <dev/usb/xhcireg.h>
38 : #include <dev/usb/xhcivar.h>
39 :
40 : struct cfdriver xhci_cd = {
41 : NULL, "xhci", DV_DULL
42 : };
43 :
44 : #ifdef XHCI_DEBUG
45 : #define DPRINTF(x) do { if (xhcidebug) printf x; } while(0)
46 : #define DPRINTFN(n,x) do { if (xhcidebug>(n)) printf x; } while (0)
47 : int xhcidebug = 3;
48 : #else
49 : #define DPRINTF(x)
50 : #define DPRINTFN(n,x)
51 : #endif
52 :
53 : #define DEVNAME(sc) ((sc)->sc_bus.bdev.dv_xname)
54 :
55 : #define TRBOFF(r, trb) ((char *)(trb) - (char *)((r)->trbs))
56 : #define DEQPTR(r) ((r).dma.paddr + (sizeof(struct xhci_trb) * (r).index))
57 :
58 : struct pool *xhcixfer;
59 :
60 : struct xhci_pipe {
61 : struct usbd_pipe pipe;
62 :
63 : uint8_t dci;
64 : uint8_t slot; /* Device slot ID */
65 : struct xhci_ring ring;
66 :
67 : /*
68 : * XXX used to pass the xfer pointer back to the
69 : * interrupt routine, better way?
70 : */
71 : struct usbd_xfer *pending_xfers[XHCI_MAX_XFER];
72 : struct usbd_xfer *aborted_xfer;
73 : int halted;
74 : size_t free_trbs;
75 : };
76 :
77 : int xhci_reset(struct xhci_softc *);
78 : int xhci_intr1(struct xhci_softc *);
79 : void xhci_event_dequeue(struct xhci_softc *);
80 : void xhci_event_xfer(struct xhci_softc *, uint64_t, uint32_t, uint32_t);
81 : void xhci_event_command(struct xhci_softc *, uint64_t);
82 : void xhci_event_port_change(struct xhci_softc *, uint64_t, uint32_t);
83 : int xhci_pipe_init(struct xhci_softc *, struct usbd_pipe *);
84 : int xhci_context_setup(struct xhci_softc *, struct usbd_pipe *);
85 : int xhci_scratchpad_alloc(struct xhci_softc *, int);
86 : void xhci_scratchpad_free(struct xhci_softc *);
87 : int xhci_softdev_alloc(struct xhci_softc *, uint8_t);
88 : void xhci_softdev_free(struct xhci_softc *, uint8_t);
89 : int xhci_ring_alloc(struct xhci_softc *, struct xhci_ring *, size_t,
90 : size_t);
91 : void xhci_ring_free(struct xhci_softc *, struct xhci_ring *);
92 : void xhci_ring_reset(struct xhci_softc *, struct xhci_ring *);
93 : struct xhci_trb *xhci_ring_consume(struct xhci_softc *, struct xhci_ring *);
94 : struct xhci_trb *xhci_ring_produce(struct xhci_softc *, struct xhci_ring *);
95 :
96 : struct xhci_trb *xhci_xfer_get_trb(struct xhci_softc *, struct usbd_xfer*,
97 : uint8_t *, int);
98 : void xhci_xfer_done(struct usbd_xfer *xfer);
99 : /* xHCI command helpers. */
100 : int xhci_command_submit(struct xhci_softc *, struct xhci_trb *, int);
101 : int xhci_command_abort(struct xhci_softc *);
102 :
103 : void xhci_cmd_reset_ep_async(struct xhci_softc *, uint8_t, uint8_t);
104 : void xhci_cmd_set_tr_deq_async(struct xhci_softc *, uint8_t, uint8_t, uint64_t);
105 : int xhci_cmd_configure_ep(struct xhci_softc *, uint8_t, uint64_t);
106 : int xhci_cmd_stop_ep(struct xhci_softc *, uint8_t, uint8_t);
107 : int xhci_cmd_slot_control(struct xhci_softc *, uint8_t *, int);
108 : int xhci_cmd_set_address(struct xhci_softc *, uint8_t, uint64_t, uint32_t);
109 : int xhci_cmd_evaluate_ctx(struct xhci_softc *, uint8_t, uint64_t);
110 : #ifdef XHCI_DEBUG
111 : int xhci_cmd_noop(struct xhci_softc *);
112 : #endif
113 :
114 : /* XXX should be part of the Bus interface. */
115 : void xhci_abort_xfer(struct usbd_xfer *, usbd_status);
116 : void xhci_pipe_close(struct usbd_pipe *);
117 : void xhci_noop(struct usbd_xfer *);
118 :
119 : void xhci_timeout(void *);
120 : void xhci_timeout_task(void *);
121 :
122 : /* USBD Bus Interface. */
123 : usbd_status xhci_pipe_open(struct usbd_pipe *);
124 : int xhci_setaddr(struct usbd_device *, int);
125 : void xhci_softintr(void *);
126 : void xhci_poll(struct usbd_bus *);
127 : struct usbd_xfer *xhci_allocx(struct usbd_bus *);
128 : void xhci_freex(struct usbd_bus *, struct usbd_xfer *);
129 :
130 : usbd_status xhci_root_ctrl_transfer(struct usbd_xfer *);
131 : usbd_status xhci_root_ctrl_start(struct usbd_xfer *);
132 :
133 : usbd_status xhci_root_intr_transfer(struct usbd_xfer *);
134 : usbd_status xhci_root_intr_start(struct usbd_xfer *);
135 : void xhci_root_intr_abort(struct usbd_xfer *);
136 : void xhci_root_intr_done(struct usbd_xfer *);
137 :
138 : usbd_status xhci_device_ctrl_transfer(struct usbd_xfer *);
139 : usbd_status xhci_device_ctrl_start(struct usbd_xfer *);
140 : void xhci_device_ctrl_abort(struct usbd_xfer *);
141 :
142 : usbd_status xhci_device_generic_transfer(struct usbd_xfer *);
143 : usbd_status xhci_device_generic_start(struct usbd_xfer *);
144 : void xhci_device_generic_abort(struct usbd_xfer *);
145 : void xhci_device_generic_done(struct usbd_xfer *);
146 :
147 : usbd_status xhci_device_isoc_transfer(struct usbd_xfer *);
148 : usbd_status xhci_device_isoc_start(struct usbd_xfer *);
149 :
150 : #define XHCI_INTR_ENDPT 1
151 :
152 : struct usbd_bus_methods xhci_bus_methods = {
153 : .open_pipe = xhci_pipe_open,
154 : .dev_setaddr = xhci_setaddr,
155 : .soft_intr = xhci_softintr,
156 : .do_poll = xhci_poll,
157 : .allocx = xhci_allocx,
158 : .freex = xhci_freex,
159 : };
160 :
161 : struct usbd_pipe_methods xhci_root_ctrl_methods = {
162 : .transfer = xhci_root_ctrl_transfer,
163 : .start = xhci_root_ctrl_start,
164 : .abort = xhci_noop,
165 : .close = xhci_pipe_close,
166 : .done = xhci_noop,
167 : };
168 :
169 : struct usbd_pipe_methods xhci_root_intr_methods = {
170 : .transfer = xhci_root_intr_transfer,
171 : .start = xhci_root_intr_start,
172 : .abort = xhci_root_intr_abort,
173 : .close = xhci_pipe_close,
174 : .done = xhci_root_intr_done,
175 : };
176 :
177 : struct usbd_pipe_methods xhci_device_ctrl_methods = {
178 : .transfer = xhci_device_ctrl_transfer,
179 : .start = xhci_device_ctrl_start,
180 : .abort = xhci_device_ctrl_abort,
181 : .close = xhci_pipe_close,
182 : .done = xhci_noop,
183 : };
184 :
185 : struct usbd_pipe_methods xhci_device_intr_methods = {
186 : .transfer = xhci_device_generic_transfer,
187 : .start = xhci_device_generic_start,
188 : .abort = xhci_device_generic_abort,
189 : .close = xhci_pipe_close,
190 : .done = xhci_device_generic_done,
191 : };
192 :
193 : struct usbd_pipe_methods xhci_device_bulk_methods = {
194 : .transfer = xhci_device_generic_transfer,
195 : .start = xhci_device_generic_start,
196 : .abort = xhci_device_generic_abort,
197 : .close = xhci_pipe_close,
198 : .done = xhci_device_generic_done,
199 : };
200 :
201 : struct usbd_pipe_methods xhci_device_isoc_methods = {
202 : .transfer = xhci_device_isoc_transfer,
203 : .start = xhci_device_isoc_start,
204 : .abort = xhci_device_generic_abort,
205 : .close = xhci_pipe_close,
206 : .done = xhci_noop,
207 : };
208 :
209 : #ifdef XHCI_DEBUG
210 : static void
211 : xhci_dump_trb(struct xhci_trb *trb)
212 : {
213 : printf("trb=%p (0x%016llx 0x%08x 0x%b)\n", trb,
214 : (long long)letoh64(trb->trb_paddr), letoh32(trb->trb_status),
215 : (int)letoh32(trb->trb_flags), XHCI_TRB_FLAGS_BITMASK);
216 : }
217 : #endif
218 :
219 : int usbd_dma_contig_alloc(struct usbd_bus *, struct usbd_dma_info *,
220 : void **, bus_size_t, bus_size_t, bus_size_t);
221 : void usbd_dma_contig_free(struct usbd_bus *, struct usbd_dma_info *);
222 :
223 : int
224 0 : usbd_dma_contig_alloc(struct usbd_bus *bus, struct usbd_dma_info *dma,
225 : void **kvap, bus_size_t size, bus_size_t alignment, bus_size_t boundary)
226 : {
227 : int error;
228 :
229 0 : dma->tag = bus->dmatag;
230 0 : dma->size = size;
231 :
232 0 : error = bus_dmamap_create(dma->tag, size, 1, size, boundary,
233 : BUS_DMA_NOWAIT, &dma->map);
234 0 : if (error != 0)
235 0 : return (error);
236 :
237 0 : error = bus_dmamem_alloc(dma->tag, size, alignment, boundary, &dma->seg,
238 : 1, &dma->nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO);
239 0 : if (error != 0)
240 : goto destroy;
241 :
242 0 : error = bus_dmamem_map(dma->tag, &dma->seg, 1, size, &dma->vaddr,
243 : BUS_DMA_NOWAIT | BUS_DMA_COHERENT);
244 0 : if (error != 0)
245 : goto free;
246 :
247 0 : error = bus_dmamap_load_raw(dma->tag, dma->map, &dma->seg, 1, size,
248 : BUS_DMA_NOWAIT);
249 0 : if (error != 0)
250 : goto unmap;
251 :
252 0 : bus_dmamap_sync(dma->tag, dma->map, 0, size, BUS_DMASYNC_PREREAD |
253 : BUS_DMASYNC_PREWRITE);
254 :
255 0 : dma->paddr = dma->map->dm_segs[0].ds_addr;
256 0 : if (kvap != NULL)
257 0 : *kvap = dma->vaddr;
258 :
259 0 : return (0);
260 :
261 : unmap:
262 0 : bus_dmamem_unmap(dma->tag, dma->vaddr, size);
263 : free:
264 0 : bus_dmamem_free(dma->tag, &dma->seg, 1);
265 : destroy:
266 0 : bus_dmamap_destroy(dma->tag, dma->map);
267 0 : return (error);
268 0 : }
269 :
270 : void
271 0 : usbd_dma_contig_free(struct usbd_bus *bus, struct usbd_dma_info *dma)
272 : {
273 0 : if (dma->map != NULL) {
274 0 : bus_dmamap_sync(bus->dmatag, dma->map, 0, dma->size,
275 : BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
276 0 : bus_dmamap_unload(bus->dmatag, dma->map);
277 0 : bus_dmamem_unmap(bus->dmatag, dma->vaddr, dma->size);
278 0 : bus_dmamem_free(bus->dmatag, &dma->seg, 1);
279 0 : bus_dmamap_destroy(bus->dmatag, dma->map);
280 0 : dma->map = NULL;
281 0 : }
282 0 : }
283 :
284 : int
285 0 : xhci_init(struct xhci_softc *sc)
286 : {
287 : uint32_t hcr;
288 : int npage, error;
289 :
290 0 : sc->sc_bus.usbrev = USBREV_3_0;
291 0 : sc->sc_bus.methods = &xhci_bus_methods;
292 0 : sc->sc_bus.pipe_size = sizeof(struct xhci_pipe);
293 :
294 0 : sc->sc_oper_off = XREAD1(sc, XHCI_CAPLENGTH);
295 0 : sc->sc_door_off = XREAD4(sc, XHCI_DBOFF);
296 0 : sc->sc_runt_off = XREAD4(sc, XHCI_RTSOFF);
297 :
298 0 : sc->sc_version = XREAD2(sc, XHCI_HCIVERSION);
299 0 : printf(", xHCI %u.%u\n", sc->sc_version >> 8, sc->sc_version & 0xff);
300 :
301 : #ifdef XHCI_DEBUG
302 : printf("%s: CAPLENGTH=%#lx\n", DEVNAME(sc), sc->sc_oper_off);
303 : printf("%s: DOORBELL=%#lx\n", DEVNAME(sc), sc->sc_door_off);
304 : printf("%s: RUNTIME=%#lx\n", DEVNAME(sc), sc->sc_runt_off);
305 : #endif
306 :
307 0 : error = xhci_reset(sc);
308 0 : if (error)
309 0 : return (error);
310 :
311 0 : if (xhcixfer == NULL) {
312 0 : xhcixfer = malloc(sizeof(struct pool), M_DEVBUF, M_NOWAIT);
313 0 : if (xhcixfer == NULL) {
314 0 : printf("%s: unable to allocate pool descriptor\n",
315 0 : DEVNAME(sc));
316 0 : return (ENOMEM);
317 : }
318 0 : pool_init(xhcixfer, sizeof(struct xhci_xfer), 0, IPL_SOFTUSB,
319 : 0, "xhcixfer", NULL);
320 0 : }
321 :
322 0 : hcr = XREAD4(sc, XHCI_HCCPARAMS);
323 0 : sc->sc_ctxsize = XHCI_HCC_CSZ(hcr) ? 64 : 32;
324 : DPRINTF(("%s: %d bytes context\n", DEVNAME(sc), sc->sc_ctxsize));
325 :
326 : #ifdef XHCI_DEBUG
327 : hcr = XOREAD4(sc, XHCI_PAGESIZE);
328 : printf("%s: supported page size 0x%08x\n", DEVNAME(sc), hcr);
329 : #endif
330 : /* Use 4K for the moment since it's easier. */
331 0 : sc->sc_pagesize = 4096;
332 :
333 : /* Get port and device slot numbers. */
334 0 : hcr = XREAD4(sc, XHCI_HCSPARAMS1);
335 0 : sc->sc_noport = XHCI_HCS1_N_PORTS(hcr);
336 0 : sc->sc_noslot = XHCI_HCS1_DEVSLOT_MAX(hcr);
337 : DPRINTF(("%s: %d ports and %d slots\n", DEVNAME(sc), sc->sc_noport,
338 : sc->sc_noslot));
339 :
340 : /* Setup Device Context Base Address Array. */
341 0 : error = usbd_dma_contig_alloc(&sc->sc_bus, &sc->sc_dcbaa.dma,
342 0 : (void **)&sc->sc_dcbaa.segs, (sc->sc_noslot + 1) * sizeof(uint64_t),
343 0 : XHCI_DCBAA_ALIGN, sc->sc_pagesize);
344 0 : if (error)
345 0 : return (ENOMEM);
346 :
347 : /* Setup command ring. */
348 0 : rw_init(&sc->sc_cmd_lock, "xhcicmd");
349 0 : error = xhci_ring_alloc(sc, &sc->sc_cmd_ring, XHCI_MAX_CMDS,
350 : XHCI_CMDS_RING_ALIGN);
351 0 : if (error) {
352 0 : printf("%s: could not allocate command ring.\n", DEVNAME(sc));
353 0 : usbd_dma_contig_free(&sc->sc_bus, &sc->sc_dcbaa.dma);
354 0 : return (error);
355 : }
356 :
357 : /* Setup one event ring and its segment table (ERST). */
358 0 : error = xhci_ring_alloc(sc, &sc->sc_evt_ring, XHCI_MAX_EVTS,
359 : XHCI_EVTS_RING_ALIGN);
360 0 : if (error) {
361 0 : printf("%s: could not allocate event ring.\n", DEVNAME(sc));
362 0 : xhci_ring_free(sc, &sc->sc_cmd_ring);
363 0 : usbd_dma_contig_free(&sc->sc_bus, &sc->sc_dcbaa.dma);
364 0 : return (error);
365 : }
366 :
367 : /* Allocate the required entry for the segment table. */
368 0 : error = usbd_dma_contig_alloc(&sc->sc_bus, &sc->sc_erst.dma,
369 0 : (void **)&sc->sc_erst.segs, sizeof(struct xhci_erseg),
370 : XHCI_ERST_ALIGN, XHCI_ERST_BOUNDARY);
371 0 : if (error) {
372 0 : printf("%s: could not allocate segment table.\n", DEVNAME(sc));
373 0 : xhci_ring_free(sc, &sc->sc_evt_ring);
374 0 : xhci_ring_free(sc, &sc->sc_cmd_ring);
375 0 : usbd_dma_contig_free(&sc->sc_bus, &sc->sc_dcbaa.dma);
376 0 : return (ENOMEM);
377 : }
378 :
379 : /* Set our ring address and size in its corresponding segment. */
380 0 : sc->sc_erst.segs[0].er_addr = htole64(sc->sc_evt_ring.dma.paddr);
381 0 : sc->sc_erst.segs[0].er_size = htole32(XHCI_MAX_EVTS);
382 0 : sc->sc_erst.segs[0].er_rsvd = 0;
383 0 : bus_dmamap_sync(sc->sc_erst.dma.tag, sc->sc_erst.dma.map, 0,
384 : sc->sc_erst.dma.size, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
385 :
386 : /* Get the number of scratch pages and configure them if necessary. */
387 0 : hcr = XREAD4(sc, XHCI_HCSPARAMS2);
388 0 : npage = XHCI_HCS2_SPB_MAX(hcr);
389 : DPRINTF(("%s: %u scratch pages, ETE=%u, IST=0x%x\n", DEVNAME(sc), npage,
390 : XHCI_HCS2_ETE(hcr), XHCI_HCS2_IST(hcr)));
391 :
392 0 : if (npage > 0 && xhci_scratchpad_alloc(sc, npage)) {
393 0 : printf("%s: could not allocate scratchpad.\n", DEVNAME(sc));
394 0 : usbd_dma_contig_free(&sc->sc_bus, &sc->sc_erst.dma);
395 0 : xhci_ring_free(sc, &sc->sc_evt_ring);
396 0 : xhci_ring_free(sc, &sc->sc_cmd_ring);
397 0 : usbd_dma_contig_free(&sc->sc_bus, &sc->sc_dcbaa.dma);
398 0 : return (ENOMEM);
399 : }
400 :
401 :
402 0 : return (0);
403 0 : }
404 :
405 : void
406 0 : xhci_config(struct xhci_softc *sc)
407 : {
408 : uint64_t paddr;
409 : uint32_t hcr;
410 :
411 : /* Make sure to program a number of device slots we can handle. */
412 0 : if (sc->sc_noslot > USB_MAX_DEVICES)
413 0 : sc->sc_noslot = USB_MAX_DEVICES;
414 0 : hcr = XOREAD4(sc, XHCI_CONFIG) & ~XHCI_CONFIG_SLOTS_MASK;
415 0 : XOWRITE4(sc, XHCI_CONFIG, hcr | sc->sc_noslot);
416 :
417 : /* Set the device context base array address. */
418 0 : paddr = (uint64_t)sc->sc_dcbaa.dma.paddr;
419 0 : XOWRITE4(sc, XHCI_DCBAAP_LO, (uint32_t)paddr);
420 0 : XOWRITE4(sc, XHCI_DCBAAP_HI, (uint32_t)(paddr >> 32));
421 :
422 : DPRINTF(("%s: DCBAAP=%#x%#x\n", DEVNAME(sc),
423 : XOREAD4(sc, XHCI_DCBAAP_HI), XOREAD4(sc, XHCI_DCBAAP_LO)));
424 :
425 : /* Set the command ring address. */
426 0 : paddr = (uint64_t)sc->sc_cmd_ring.dma.paddr;
427 0 : XOWRITE4(sc, XHCI_CRCR_LO, ((uint32_t)paddr) | XHCI_CRCR_LO_RCS);
428 0 : XOWRITE4(sc, XHCI_CRCR_HI, (uint32_t)(paddr >> 32));
429 :
430 : DPRINTF(("%s: CRCR=%#x%#x (%016llx)\n", DEVNAME(sc),
431 : XOREAD4(sc, XHCI_CRCR_HI), XOREAD4(sc, XHCI_CRCR_LO), paddr));
432 :
433 : /* Set the ERST count number to 1, since we use only one event ring. */
434 0 : XRWRITE4(sc, XHCI_ERSTSZ(0), XHCI_ERSTS_SET(1));
435 :
436 : /* Set the segment table address. */
437 0 : paddr = (uint64_t)sc->sc_erst.dma.paddr;
438 0 : XRWRITE4(sc, XHCI_ERSTBA_LO(0), (uint32_t)paddr);
439 0 : XRWRITE4(sc, XHCI_ERSTBA_HI(0), (uint32_t)(paddr >> 32));
440 :
441 : DPRINTF(("%s: ERSTBA=%#x%#x\n", DEVNAME(sc),
442 : XRREAD4(sc, XHCI_ERSTBA_HI(0)), XRREAD4(sc, XHCI_ERSTBA_LO(0))));
443 :
444 : /* Set the ring dequeue address. */
445 0 : paddr = (uint64_t)sc->sc_evt_ring.dma.paddr;
446 0 : XRWRITE4(sc, XHCI_ERDP_LO(0), (uint32_t)paddr);
447 0 : XRWRITE4(sc, XHCI_ERDP_HI(0), (uint32_t)(paddr >> 32));
448 :
449 : DPRINTF(("%s: ERDP=%#x%#x\n", DEVNAME(sc),
450 : XRREAD4(sc, XHCI_ERDP_HI(0)), XRREAD4(sc, XHCI_ERDP_LO(0))));
451 :
452 : /* Enable interrupts. */
453 0 : hcr = XRREAD4(sc, XHCI_IMAN(0));
454 0 : XRWRITE4(sc, XHCI_IMAN(0), hcr | XHCI_IMAN_INTR_ENA);
455 :
456 : /* Set default interrupt moderation. */
457 0 : XRWRITE4(sc, XHCI_IMOD(0), XHCI_IMOD_DEFAULT);
458 :
459 : /* Allow event interrupt and start the controller. */
460 0 : XOWRITE4(sc, XHCI_USBCMD, XHCI_CMD_INTE|XHCI_CMD_RS);
461 :
462 : DPRINTF(("%s: USBCMD=%#x\n", DEVNAME(sc), XOREAD4(sc, XHCI_USBCMD)));
463 : DPRINTF(("%s: IMAN=%#x\n", DEVNAME(sc), XRREAD4(sc, XHCI_IMAN(0))));
464 0 : }
465 :
466 : int
467 0 : xhci_detach(struct device *self, int flags)
468 : {
469 0 : struct xhci_softc *sc = (struct xhci_softc *)self;
470 : int rv;
471 :
472 0 : rv = config_detach_children(self, flags);
473 0 : if (rv != 0) {
474 0 : printf("%s: error while detaching %d\n", DEVNAME(sc), rv);
475 0 : return (rv);
476 : }
477 :
478 : /* Since the hardware might already be gone, ignore the errors. */
479 0 : xhci_command_abort(sc);
480 :
481 0 : xhci_reset(sc);
482 :
483 : /* Disable interrupts. */
484 0 : XRWRITE4(sc, XHCI_IMOD(0), 0);
485 0 : XRWRITE4(sc, XHCI_IMAN(0), 0);
486 :
487 : /* Clear the event ring address. */
488 0 : XRWRITE4(sc, XHCI_ERDP_LO(0), 0);
489 0 : XRWRITE4(sc, XHCI_ERDP_HI(0), 0);
490 :
491 0 : XRWRITE4(sc, XHCI_ERSTBA_LO(0), 0);
492 0 : XRWRITE4(sc, XHCI_ERSTBA_HI(0), 0);
493 :
494 0 : XRWRITE4(sc, XHCI_ERSTSZ(0), 0);
495 :
496 : /* Clear the command ring address. */
497 0 : XOWRITE4(sc, XHCI_CRCR_LO, 0);
498 0 : XOWRITE4(sc, XHCI_CRCR_HI, 0);
499 :
500 0 : XOWRITE4(sc, XHCI_DCBAAP_LO, 0);
501 0 : XOWRITE4(sc, XHCI_DCBAAP_HI, 0);
502 :
503 0 : if (sc->sc_spad.npage > 0)
504 0 : xhci_scratchpad_free(sc);
505 :
506 0 : usbd_dma_contig_free(&sc->sc_bus, &sc->sc_erst.dma);
507 0 : xhci_ring_free(sc, &sc->sc_evt_ring);
508 0 : xhci_ring_free(sc, &sc->sc_cmd_ring);
509 0 : usbd_dma_contig_free(&sc->sc_bus, &sc->sc_dcbaa.dma);
510 :
511 0 : return (0);
512 0 : }
513 :
514 : int
515 0 : xhci_activate(struct device *self, int act)
516 : {
517 0 : struct xhci_softc *sc = (struct xhci_softc *)self;
518 : int rv = 0;
519 :
520 0 : switch (act) {
521 : case DVACT_RESUME:
522 0 : sc->sc_bus.use_polling++;
523 :
524 0 : xhci_reset(sc);
525 0 : xhci_ring_reset(sc, &sc->sc_cmd_ring);
526 0 : xhci_ring_reset(sc, &sc->sc_evt_ring);
527 :
528 : /* Renesas controllers, at least, need more time to resume. */
529 0 : usb_delay_ms(&sc->sc_bus, USB_RESUME_WAIT);
530 :
531 0 : xhci_config(sc);
532 :
533 0 : sc->sc_bus.use_polling--;
534 0 : rv = config_activate_children(self, act);
535 0 : break;
536 : case DVACT_POWERDOWN:
537 0 : rv = config_activate_children(self, act);
538 0 : xhci_reset(sc);
539 0 : break;
540 : default:
541 0 : rv = config_activate_children(self, act);
542 0 : break;
543 : }
544 :
545 0 : return (rv);
546 : }
547 :
548 : int
549 0 : xhci_reset(struct xhci_softc *sc)
550 : {
551 : uint32_t hcr;
552 : int i;
553 :
554 0 : XOWRITE4(sc, XHCI_USBCMD, 0); /* Halt controller */
555 0 : for (i = 0; i < 100; i++) {
556 0 : usb_delay_ms(&sc->sc_bus, 1);
557 0 : hcr = XOREAD4(sc, XHCI_USBSTS) & XHCI_STS_HCH;
558 0 : if (hcr)
559 : break;
560 : }
561 :
562 0 : if (!hcr)
563 0 : printf("%s: halt timeout\n", DEVNAME(sc));
564 :
565 0 : XOWRITE4(sc, XHCI_USBCMD, XHCI_CMD_HCRST);
566 0 : for (i = 0; i < 100; i++) {
567 0 : usb_delay_ms(&sc->sc_bus, 1);
568 0 : hcr = (XOREAD4(sc, XHCI_USBCMD) & XHCI_CMD_HCRST) |
569 0 : (XOREAD4(sc, XHCI_USBSTS) & XHCI_STS_CNR);
570 0 : if (!hcr)
571 : break;
572 : }
573 :
574 0 : if (hcr) {
575 0 : printf("%s: reset timeout\n", DEVNAME(sc));
576 0 : return (EIO);
577 : }
578 :
579 0 : return (0);
580 0 : }
581 :
582 :
583 : int
584 0 : xhci_intr(void *v)
585 : {
586 0 : struct xhci_softc *sc = v;
587 :
588 0 : if (sc == NULL || sc->sc_bus.dying)
589 0 : return (0);
590 :
591 : /* If we get an interrupt while polling, then just ignore it. */
592 0 : if (sc->sc_bus.use_polling) {
593 : DPRINTFN(16, ("xhci_intr: ignored interrupt while polling\n"));
594 0 : return (0);
595 : }
596 :
597 0 : return (xhci_intr1(sc));
598 0 : }
599 :
600 : int
601 0 : xhci_intr1(struct xhci_softc *sc)
602 : {
603 : uint32_t intrs;
604 :
605 0 : intrs = XOREAD4(sc, XHCI_USBSTS);
606 0 : if (intrs == 0xffffffff) {
607 0 : sc->sc_bus.dying = 1;
608 0 : return (0);
609 : }
610 :
611 0 : if ((intrs & XHCI_STS_EINT) == 0)
612 0 : return (0);
613 :
614 0 : sc->sc_bus.no_intrs++;
615 :
616 0 : if (intrs & XHCI_STS_HSE) {
617 0 : printf("%s: host system error\n", DEVNAME(sc));
618 0 : sc->sc_bus.dying = 1;
619 0 : return (1);
620 : }
621 :
622 0 : XOWRITE4(sc, XHCI_USBSTS, intrs); /* Acknowledge */
623 0 : usb_schedsoftintr(&sc->sc_bus);
624 :
625 : /* Acknowledge PCI interrupt */
626 0 : intrs = XRREAD4(sc, XHCI_IMAN(0));
627 0 : XRWRITE4(sc, XHCI_IMAN(0), intrs | XHCI_IMAN_INTR_PEND);
628 :
629 0 : return (1);
630 0 : }
631 :
632 : void
633 0 : xhci_poll(struct usbd_bus *bus)
634 : {
635 0 : struct xhci_softc *sc = (struct xhci_softc *)bus;
636 :
637 0 : if (XOREAD4(sc, XHCI_USBSTS))
638 0 : xhci_intr1(sc);
639 0 : }
640 :
641 : void
642 0 : xhci_softintr(void *v)
643 : {
644 0 : struct xhci_softc *sc = v;
645 :
646 0 : if (sc->sc_bus.dying)
647 0 : return;
648 :
649 0 : sc->sc_bus.intr_context++;
650 0 : xhci_event_dequeue(sc);
651 0 : sc->sc_bus.intr_context--;
652 0 : }
653 :
654 : void
655 0 : xhci_event_dequeue(struct xhci_softc *sc)
656 : {
657 : struct xhci_trb *trb;
658 : uint64_t paddr;
659 : uint32_t status, flags;
660 :
661 0 : while ((trb = xhci_ring_consume(sc, &sc->sc_evt_ring)) != NULL) {
662 0 : paddr = letoh64(trb->trb_paddr);
663 0 : status = letoh32(trb->trb_status);
664 0 : flags = letoh32(trb->trb_flags);
665 :
666 0 : switch (flags & XHCI_TRB_TYPE_MASK) {
667 : case XHCI_EVT_XFER:
668 0 : xhci_event_xfer(sc, paddr, status, flags);
669 0 : break;
670 : case XHCI_EVT_CMD_COMPLETE:
671 0 : memcpy(&sc->sc_result_trb, trb, sizeof(*trb));
672 0 : xhci_event_command(sc, paddr);
673 0 : break;
674 : case XHCI_EVT_PORT_CHANGE:
675 0 : xhci_event_port_change(sc, paddr, status);
676 0 : break;
677 : case XHCI_EVT_HOST_CTRL:
678 : /* TODO */
679 : break;
680 : default:
681 : #ifdef XHCI_DEBUG
682 : printf("event (%d): ", XHCI_TRB_TYPE(flags));
683 : xhci_dump_trb(trb);
684 : #endif
685 : break;
686 : }
687 :
688 : }
689 :
690 0 : paddr = (uint64_t)DEQPTR(sc->sc_evt_ring);
691 0 : XRWRITE4(sc, XHCI_ERDP_LO(0), ((uint32_t)paddr) | XHCI_ERDP_LO_BUSY);
692 0 : XRWRITE4(sc, XHCI_ERDP_HI(0), (uint32_t)(paddr >> 32));
693 0 : }
694 :
695 : void
696 0 : xhci_event_xfer(struct xhci_softc *sc, uint64_t paddr, uint32_t status,
697 : uint32_t flags)
698 : {
699 : struct xhci_pipe *xp;
700 : struct usbd_xfer *xfer;
701 : struct xhci_xfer *xx;
702 : uint8_t dci, slot, code;
703 : uint32_t remain;
704 : int trb_idx;
705 :
706 0 : slot = XHCI_TRB_GET_SLOT(flags);
707 0 : dci = XHCI_TRB_GET_EP(flags);
708 0 : if (slot > sc->sc_noslot) {
709 : DPRINTF(("%s: incorrect slot (%u)\n", DEVNAME(sc), slot));
710 0 : return;
711 : }
712 :
713 0 : xp = sc->sc_sdevs[slot].pipes[dci - 1];
714 0 : if (xp == NULL) {
715 : DPRINTF(("%s: incorrect dci (%u)\n", DEVNAME(sc), dci));
716 0 : return;
717 : }
718 :
719 0 : code = XHCI_TRB_GET_CODE(status);
720 0 : remain = XHCI_TRB_REMAIN(status);
721 :
722 0 : switch (code) {
723 : case XHCI_CODE_RING_UNDERRUN:
724 : DPRINTF(("%s: slot %u underrun wih %zu TRB\n", DEVNAME(sc),
725 : slot, xp->ring.ntrb - xp->free_trbs));
726 0 : return;
727 : case XHCI_CODE_RING_OVERRUN:
728 : DPRINTF(("%s: slot %u overrun wih %zu TRB\n", DEVNAME(sc),
729 : slot, xp->ring.ntrb - xp->free_trbs));
730 0 : return;
731 : default:
732 : break;
733 : };
734 :
735 0 : trb_idx = (paddr - xp->ring.dma.paddr) / sizeof(struct xhci_trb);
736 0 : if (trb_idx < 0 || trb_idx >= xp->ring.ntrb) {
737 0 : printf("%s: wrong trb index (%u) max is %zu\n", DEVNAME(sc),
738 0 : trb_idx, xp->ring.ntrb - 1);
739 0 : return;
740 : }
741 :
742 0 : xfer = xp->pending_xfers[trb_idx];
743 0 : if (xfer == NULL) {
744 0 : printf("%s: NULL xfer pointer\n", DEVNAME(sc));
745 0 : return;
746 : }
747 :
748 0 : if (remain > xfer->length)
749 0 : remain = xfer->length;
750 :
751 0 : switch (code) {
752 : case XHCI_CODE_SUCCESS:
753 : /*
754 : * This might be the last TRB of a TD that ended up
755 : * with a Short Transfer condition, see below.
756 : */
757 0 : if (xfer->actlen == 0)
758 0 : xfer->actlen = xfer->length - remain;
759 :
760 0 : xfer->status = USBD_NORMAL_COMPLETION;
761 0 : break;
762 : case XHCI_CODE_SHORT_XFER:
763 0 : xfer->actlen = xfer->length - remain;
764 :
765 : /*
766 : * If this is not the last TRB of a transfer, we should
767 : * theoretically clear the IOC at the end of the chain
768 : * but the HC might have already processed it before we
769 : * had a chance to schedule the softinterrupt.
770 : */
771 0 : xx = (struct xhci_xfer *)xfer;
772 0 : if (xx->index != trb_idx) {
773 : DPRINTF(("%s: short xfer %p for %u\n", DEVNAME(sc),
774 : xfer, xx->index));
775 0 : return;
776 : }
777 :
778 0 : xfer->status = USBD_NORMAL_COMPLETION;
779 0 : break;
780 : case XHCI_CODE_TXERR:
781 : case XHCI_CODE_SPLITERR:
782 : DPRINTF(("%s: txerr? code %d\n", DEVNAME(sc), code));
783 0 : xfer->status = USBD_IOERROR;
784 0 : break;
785 : case XHCI_CODE_STALL:
786 : case XHCI_CODE_BABBLE:
787 : DPRINTF(("%s: babble code %d\n", DEVNAME(sc), code));
788 : /* Prevent any timeout to kick in. */
789 0 : timeout_del(&xfer->timeout_handle);
790 0 : usb_rem_task(xfer->device, &xfer->abort_task);
791 :
792 : /* We need to report this condition for umass(4). */
793 0 : if (code == XHCI_CODE_STALL)
794 0 : xp->halted = USBD_STALLED;
795 : else
796 0 : xp->halted = USBD_IOERROR;
797 : /*
798 : * Since the stack might try to start a new transfer as
799 : * soon as a pending one finishes, make sure the endpoint
800 : * is fully reset before calling usb_transfer_complete().
801 : */
802 0 : xp->aborted_xfer = xfer;
803 0 : xhci_cmd_reset_ep_async(sc, slot, dci);
804 0 : return;
805 : case XHCI_CODE_XFER_STOPPED:
806 : case XHCI_CODE_XFER_STOPINV:
807 : /* Endpoint stopped while processing a TD. */
808 0 : if (xfer == xp->aborted_xfer) {
809 : DPRINTF(("%s: stopped xfer=%p\n", __func__, xfer));
810 0 : return;
811 : }
812 :
813 : /* FALLTHROUGH */
814 : default:
815 : DPRINTF(("%s: unhandled code %d\n", DEVNAME(sc), code));
816 0 : xfer->status = USBD_IOERROR;
817 0 : xp->halted = 1;
818 0 : break;
819 : }
820 :
821 0 : xhci_xfer_done(xfer);
822 0 : }
823 :
824 : void
825 0 : xhci_event_command(struct xhci_softc *sc, uint64_t paddr)
826 : {
827 : struct xhci_trb *trb;
828 : struct xhci_pipe *xp;
829 : uint32_t flags;
830 : uint8_t dci, slot;
831 : int trb_idx, status;
832 :
833 0 : trb_idx = (paddr - sc->sc_cmd_ring.dma.paddr) / sizeof(*trb);
834 0 : if (trb_idx < 0 || trb_idx >= sc->sc_cmd_ring.ntrb) {
835 0 : printf("%s: wrong trb index (%u) max is %zu\n", DEVNAME(sc),
836 0 : trb_idx, sc->sc_cmd_ring.ntrb - 1);
837 0 : return;
838 : }
839 :
840 0 : trb = &sc->sc_cmd_ring.trbs[trb_idx];
841 :
842 0 : bus_dmamap_sync(sc->sc_cmd_ring.dma.tag, sc->sc_cmd_ring.dma.map,
843 : TRBOFF(&sc->sc_cmd_ring, trb), sizeof(struct xhci_trb),
844 : BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
845 :
846 0 : flags = letoh32(trb->trb_flags);
847 :
848 0 : slot = XHCI_TRB_GET_SLOT(flags);
849 0 : dci = XHCI_TRB_GET_EP(flags);
850 :
851 0 : switch (flags & XHCI_TRB_TYPE_MASK) {
852 : case XHCI_CMD_RESET_EP:
853 0 : xp = sc->sc_sdevs[slot].pipes[dci - 1];
854 0 : if (xp == NULL)
855 : break;
856 :
857 : /* Update the dequeue pointer past the last TRB. */
858 0 : xhci_cmd_set_tr_deq_async(sc, xp->slot, xp->dci,
859 0 : DEQPTR(xp->ring) | xp->ring.toggle);
860 0 : break;
861 : case XHCI_CMD_SET_TR_DEQ:
862 0 : xp = sc->sc_sdevs[slot].pipes[dci - 1];
863 0 : if (xp == NULL)
864 : break;
865 :
866 0 : status = xp->halted;
867 0 : xp->halted = 0;
868 0 : if (xp->aborted_xfer != NULL) {
869 0 : xp->aborted_xfer->status = status;
870 0 : xhci_xfer_done(xp->aborted_xfer);
871 0 : wakeup(xp);
872 0 : }
873 : break;
874 : case XHCI_CMD_CONFIG_EP:
875 : case XHCI_CMD_STOP_EP:
876 : case XHCI_CMD_DISABLE_SLOT:
877 : case XHCI_CMD_ENABLE_SLOT:
878 : case XHCI_CMD_ADDRESS_DEVICE:
879 : case XHCI_CMD_EVAL_CTX:
880 : case XHCI_CMD_NOOP:
881 : /* All these commands are synchronous. */
882 0 : KASSERT(sc->sc_cmd_trb == trb);
883 0 : sc->sc_cmd_trb = NULL;
884 0 : wakeup(&sc->sc_cmd_trb);
885 0 : break;
886 : default:
887 : DPRINTF(("%s: unexpected command %x\n", DEVNAME(sc), flags));
888 : }
889 0 : }
890 :
891 : void
892 0 : xhci_event_port_change(struct xhci_softc *sc, uint64_t paddr, uint32_t status)
893 : {
894 0 : struct usbd_xfer *xfer = sc->sc_intrxfer;
895 0 : uint32_t port = XHCI_TRB_PORTID(paddr);
896 : uint8_t *p;
897 :
898 0 : if (XHCI_TRB_GET_CODE(status) != XHCI_CODE_SUCCESS) {
899 : DPRINTF(("%s: failed port status event\n", DEVNAME(sc)));
900 0 : return;
901 : }
902 :
903 0 : if (xfer == NULL)
904 0 : return;
905 :
906 0 : p = KERNADDR(&xfer->dmabuf, 0);
907 0 : memset(p, 0, xfer->length);
908 :
909 0 : p[port/8] |= 1 << (port%8);
910 : DPRINTF(("%s: port=%d change=0x%02x\n", DEVNAME(sc), port, *p));
911 :
912 0 : xfer->actlen = xfer->length;
913 0 : xfer->status = USBD_NORMAL_COMPLETION;
914 :
915 0 : usb_transfer_complete(xfer);
916 0 : }
917 :
918 : void
919 0 : xhci_xfer_done(struct usbd_xfer *xfer)
920 : {
921 0 : struct xhci_pipe *xp = (struct xhci_pipe *)xfer->pipe;
922 0 : struct xhci_xfer *xx = (struct xhci_xfer *)xfer;
923 : int ntrb, i;
924 :
925 0 : splsoftassert(IPL_SOFTUSB);
926 :
927 : #ifdef XHCI_DEBUG
928 : if (xx->index < 0 || xp->pending_xfers[xx->index] == NULL) {
929 : printf("%s: xfer=%p done (idx=%d, ntrb=%zd)\n", __func__,
930 : xfer, xx->index, xx->ntrb);
931 : }
932 : #endif
933 :
934 0 : if (xp->aborted_xfer == xfer)
935 0 : xp->aborted_xfer = NULL;
936 :
937 0 : for (ntrb = 0, i = xx->index; ntrb < xx->ntrb; ntrb++, i--) {
938 0 : xp->pending_xfers[i] = NULL;
939 0 : if (i == 0)
940 0 : i = (xp->ring.ntrb - 1);
941 : }
942 0 : xp->free_trbs += xx->ntrb;
943 0 : xx->index = -1;
944 0 : xx->ntrb = 0;
945 :
946 0 : timeout_del(&xfer->timeout_handle);
947 0 : usb_rem_task(xfer->device, &xfer->abort_task);
948 0 : usb_transfer_complete(xfer);
949 0 : }
950 :
951 : /*
952 : * Calculate the Device Context Index (DCI) for endpoints as stated
953 : * in section 4.5.1 of xHCI specification r1.1.
954 : */
955 : static inline uint8_t
956 0 : xhci_ed2dci(usb_endpoint_descriptor_t *ed)
957 : {
958 : uint8_t dir;
959 :
960 0 : if (UE_GET_XFERTYPE(ed->bmAttributes) == UE_CONTROL)
961 0 : return (UE_GET_ADDR(ed->bEndpointAddress) * 2 + 1);
962 :
963 0 : if (UE_GET_DIR(ed->bEndpointAddress) == UE_DIR_IN)
964 0 : dir = 1;
965 : else
966 : dir = 0;
967 :
968 0 : return (UE_GET_ADDR(ed->bEndpointAddress) * 2 + dir);
969 0 : }
970 :
971 : usbd_status
972 0 : xhci_pipe_open(struct usbd_pipe *pipe)
973 : {
974 0 : struct xhci_softc *sc = (struct xhci_softc *)pipe->device->bus;
975 0 : struct xhci_pipe *xp = (struct xhci_pipe *)pipe;
976 0 : usb_endpoint_descriptor_t *ed = pipe->endpoint->edesc;
977 0 : uint8_t slot = 0, xfertype = UE_GET_XFERTYPE(ed->bmAttributes);
978 : int error;
979 :
980 0 : KASSERT(xp->slot == 0);
981 :
982 0 : if (sc->sc_bus.dying)
983 0 : return (USBD_IOERROR);
984 :
985 : /* Root Hub */
986 0 : if (pipe->device->depth == 0) {
987 0 : switch (ed->bEndpointAddress) {
988 : case USB_CONTROL_ENDPOINT:
989 0 : pipe->methods = &xhci_root_ctrl_methods;
990 0 : break;
991 : case UE_DIR_IN | XHCI_INTR_ENDPT:
992 0 : pipe->methods = &xhci_root_intr_methods;
993 0 : break;
994 : default:
995 0 : pipe->methods = NULL;
996 0 : return (USBD_INVAL);
997 : }
998 0 : return (USBD_NORMAL_COMPLETION);
999 : }
1000 :
1001 : #if 0
1002 : /* Issue a noop to check if the command ring is correctly configured. */
1003 : xhci_cmd_noop(sc);
1004 : #endif
1005 :
1006 0 : switch (xfertype) {
1007 : case UE_CONTROL:
1008 0 : pipe->methods = &xhci_device_ctrl_methods;
1009 :
1010 : /*
1011 : * Get a slot and init the device's contexts.
1012 : *
1013 : * Since the control enpoint, represented as the default
1014 : * pipe, is always opened first we are dealing with a
1015 : * new device. Put a new slot in the ENABLED state.
1016 : *
1017 : */
1018 0 : error = xhci_cmd_slot_control(sc, &slot, 1);
1019 0 : if (error || slot == 0 || slot > sc->sc_noslot)
1020 0 : return (USBD_INVAL);
1021 :
1022 0 : if (xhci_softdev_alloc(sc, slot)) {
1023 0 : xhci_cmd_slot_control(sc, &slot, 0);
1024 0 : return (USBD_NOMEM);
1025 : }
1026 :
1027 : break;
1028 : case UE_ISOCHRONOUS:
1029 : #if notyet
1030 : pipe->methods = &xhci_device_isoc_methods;
1031 : break;
1032 : #else
1033 : DPRINTF(("%s: isochronous xfer not supported \n", __func__));
1034 0 : return (USBD_INVAL);
1035 : #endif
1036 : case UE_BULK:
1037 0 : pipe->methods = &xhci_device_bulk_methods;
1038 0 : break;
1039 : case UE_INTERRUPT:
1040 0 : pipe->methods = &xhci_device_intr_methods;
1041 0 : break;
1042 : default:
1043 0 : return (USBD_INVAL);
1044 : }
1045 :
1046 : /*
1047 : * Our USBD Bus Interface is pipe-oriented but for most of the
1048 : * operations we need to access a device context, so keep track
1049 : * of the slot ID in every pipe.
1050 : */
1051 0 : if (slot == 0)
1052 0 : slot = ((struct xhci_pipe *)pipe->device->default_pipe)->slot;
1053 :
1054 0 : xp->slot = slot;
1055 0 : xp->dci = xhci_ed2dci(ed);
1056 :
1057 0 : if (xhci_pipe_init(sc, pipe)) {
1058 0 : xhci_cmd_slot_control(sc, &slot, 0);
1059 0 : return (USBD_IOERROR);
1060 : }
1061 :
1062 0 : return (USBD_NORMAL_COMPLETION);
1063 0 : }
1064 :
1065 : /*
1066 : * Set the maximum Endpoint Service Interface Time (ESIT) payload and
1067 : * the average TRB buffer length for an endpoint.
1068 : */
1069 : static inline uint32_t
1070 0 : xhci_get_txinfo(struct xhci_softc *sc, struct usbd_pipe *pipe)
1071 : {
1072 0 : usb_endpoint_descriptor_t *ed = pipe->endpoint->edesc;
1073 0 : uint32_t mep, atl, mps = UGETW(ed->wMaxPacketSize);
1074 :
1075 0 : switch (ed->bmAttributes & UE_XFERTYPE) {
1076 : case UE_CONTROL:
1077 : mep = 0;
1078 : atl = 8;
1079 0 : break;
1080 : case UE_INTERRUPT:
1081 : case UE_ISOCHRONOUS:
1082 0 : if (pipe->device->speed == USB_SPEED_SUPER) {
1083 : /* XXX Read the companion descriptor */
1084 : }
1085 :
1086 0 : mep = (UE_GET_TRANS(mps) | 0x1) * UE_GET_SIZE(mps);
1087 0 : atl = min(sc->sc_pagesize, mep);
1088 0 : break;
1089 : case UE_BULK:
1090 : default:
1091 : mep = 0;
1092 : atl = 0;
1093 0 : }
1094 :
1095 0 : return (XHCI_EPCTX_MAX_ESIT_PAYLOAD(mep) | XHCI_EPCTX_AVG_TRB_LEN(atl));
1096 : }
1097 :
1098 : static inline uint32_t
1099 0 : xhci_linear_interval(usb_endpoint_descriptor_t *ed)
1100 : {
1101 0 : uint32_t ival = min(max(1, ed->bInterval), 255);
1102 :
1103 0 : return (fls(ival) - 1);
1104 : }
1105 :
1106 : static inline uint32_t
1107 0 : xhci_exponential_interval(usb_endpoint_descriptor_t *ed)
1108 : {
1109 0 : uint32_t ival = min(max(1, ed->bInterval), 16);
1110 :
1111 0 : return (ival - 1);
1112 : }
1113 : /*
1114 : * Return interval for endpoint expressed in 2^(ival) * 125us.
1115 : *
1116 : * See section 6.2.3.6 of xHCI r1.1 Specification for more details.
1117 : */
1118 : uint32_t
1119 0 : xhci_pipe_interval(struct usbd_pipe *pipe)
1120 : {
1121 0 : usb_endpoint_descriptor_t *ed = pipe->endpoint->edesc;
1122 0 : uint8_t speed = pipe->device->speed;
1123 0 : uint8_t xfertype = UE_GET_XFERTYPE(ed->bmAttributes);
1124 : uint32_t ival;
1125 :
1126 0 : if (xfertype == UE_CONTROL || xfertype == UE_BULK) {
1127 : /* Control and Bulk endpoints never NAKs. */
1128 : ival = 0;
1129 0 : } else {
1130 0 : switch (speed) {
1131 : case USB_SPEED_FULL:
1132 0 : if (xfertype == UE_ISOCHRONOUS) {
1133 : /* Convert 1-2^(15)ms into 3-18 */
1134 0 : ival = xhci_exponential_interval(ed) + 3;
1135 0 : break;
1136 : }
1137 : /* FALLTHROUGH */
1138 : case USB_SPEED_LOW:
1139 : /* Convert 1-255ms into 3-10 */
1140 0 : ival = xhci_linear_interval(ed) + 3;
1141 0 : break;
1142 : case USB_SPEED_HIGH:
1143 : case USB_SPEED_SUPER:
1144 : default:
1145 : /* Convert 1-2^(15) * 125us into 0-15 */
1146 0 : ival = xhci_exponential_interval(ed);
1147 0 : break;
1148 : }
1149 : }
1150 :
1151 0 : KASSERT(ival <= 15);
1152 0 : return (XHCI_EPCTX_SET_IVAL(ival));
1153 : }
1154 :
1155 : uint32_t
1156 0 : xhci_pipe_maxburst(struct usbd_pipe *pipe)
1157 : {
1158 0 : usb_endpoint_descriptor_t *ed = pipe->endpoint->edesc;
1159 0 : uint32_t mps = UGETW(ed->wMaxPacketSize);
1160 0 : uint8_t xfertype = UE_GET_XFERTYPE(ed->bmAttributes);
1161 : uint32_t maxb = 0;
1162 :
1163 0 : switch (pipe->device->speed) {
1164 : case USB_SPEED_HIGH:
1165 0 : if (xfertype == UE_ISOCHRONOUS || xfertype == UE_INTERRUPT)
1166 0 : maxb = UE_GET_TRANS(mps);
1167 : break;
1168 : case USB_SPEED_SUPER:
1169 : /* XXX Read the companion descriptor */
1170 : default:
1171 : break;
1172 : }
1173 :
1174 0 : return (maxb);
1175 : }
1176 :
1177 : int
1178 0 : xhci_context_setup(struct xhci_softc *sc, struct usbd_pipe *pipe)
1179 : {
1180 0 : struct xhci_pipe *xp = (struct xhci_pipe *)pipe;
1181 0 : struct xhci_soft_dev *sdev = &sc->sc_sdevs[xp->slot];
1182 0 : usb_endpoint_descriptor_t *ed = pipe->endpoint->edesc;
1183 0 : uint32_t mps = UGETW(ed->wMaxPacketSize);
1184 0 : uint8_t xfertype = UE_GET_XFERTYPE(ed->bmAttributes);
1185 : uint8_t speed, cerr = 0;
1186 : uint32_t route = 0, rhport = 0;
1187 : struct usbd_device *hub;
1188 :
1189 : /*
1190 : * Calculate the Route String. Assume that there is no hub with
1191 : * more than 15 ports and that they all have a detph < 6. See
1192 : * section 8.9 of USB 3.1 Specification for more details.
1193 : */
1194 0 : for (hub = pipe->device; hub->myhub->depth; hub = hub->myhub) {
1195 0 : uint32_t port = hub->powersrc->portno;
1196 0 : uint32_t depth = hub->myhub->depth;
1197 :
1198 0 : route |= port << (4 * (depth - 1));
1199 : }
1200 :
1201 : /* Get Root Hub port */
1202 0 : rhport = hub->powersrc->portno;
1203 :
1204 0 : switch (pipe->device->speed) {
1205 : case USB_SPEED_LOW:
1206 : speed = XHCI_SPEED_LOW;
1207 0 : break;
1208 : case USB_SPEED_FULL:
1209 : speed = XHCI_SPEED_FULL;
1210 0 : break;
1211 : case USB_SPEED_HIGH:
1212 : speed = XHCI_SPEED_HIGH;
1213 0 : break;
1214 : case USB_SPEED_SUPER:
1215 : speed = XHCI_SPEED_SUPER;
1216 0 : break;
1217 : default:
1218 0 : return (USBD_INVAL);
1219 : }
1220 :
1221 : /* Setup the endpoint context */
1222 0 : if (xfertype != UE_ISOCHRONOUS)
1223 0 : cerr = 3;
1224 :
1225 0 : if ((ed->bEndpointAddress & UE_DIR_IN) || (xfertype == UE_CONTROL))
1226 0 : xfertype |= 0x4;
1227 :
1228 0 : sdev->ep_ctx[xp->dci-1]->info_lo = htole32(xhci_pipe_interval(pipe));
1229 0 : sdev->ep_ctx[xp->dci-1]->info_hi = htole32(
1230 : XHCI_EPCTX_SET_MPS(UE_GET_SIZE(mps)) |
1231 : XHCI_EPCTX_SET_MAXB(xhci_pipe_maxburst(pipe)) |
1232 : XHCI_EPCTX_SET_EPTYPE(xfertype) | XHCI_EPCTX_SET_CERR(cerr)
1233 : );
1234 0 : sdev->ep_ctx[xp->dci-1]->txinfo = htole32(xhci_get_txinfo(sc, pipe));
1235 0 : sdev->ep_ctx[xp->dci-1]->deqp = htole64(
1236 : DEQPTR(xp->ring) | xp->ring.toggle
1237 : );
1238 :
1239 : /* Unmask the new endoint */
1240 0 : sdev->input_ctx->drop_flags = 0;
1241 0 : sdev->input_ctx->add_flags = htole32(XHCI_INCTX_MASK_DCI(xp->dci));
1242 :
1243 : /* Setup the slot context */
1244 0 : sdev->slot_ctx->info_lo = htole32(
1245 : XHCI_SCTX_DCI(xp->dci) | XHCI_SCTX_SPEED(speed) |
1246 : XHCI_SCTX_ROUTE(route)
1247 : );
1248 0 : sdev->slot_ctx->info_hi = htole32(XHCI_SCTX_RHPORT(rhport));
1249 0 : sdev->slot_ctx->tt = 0;
1250 0 : sdev->slot_ctx->state = 0;
1251 :
1252 : /* XXX */
1253 : #define UHUB_IS_MTT(dev) (dev->ddesc.bDeviceProtocol == UDPROTO_HSHUBMTT)
1254 : /*
1255 : * If we are opening the interrupt pipe of a hub, update its
1256 : * context before putting it in the CONFIGURED state.
1257 : */
1258 0 : if (pipe->device->hub != NULL) {
1259 0 : int nports = pipe->device->hub->nports;
1260 :
1261 0 : sdev->slot_ctx->info_lo |= htole32(XHCI_SCTX_HUB(1));
1262 0 : sdev->slot_ctx->info_hi |= htole32(XHCI_SCTX_NPORTS(nports));
1263 :
1264 0 : if (UHUB_IS_MTT(pipe->device))
1265 0 : sdev->slot_ctx->info_lo |= htole32(XHCI_SCTX_MTT(1));
1266 :
1267 0 : sdev->slot_ctx->tt |= htole32(
1268 : XHCI_SCTX_TT_THINK_TIME(pipe->device->hub->ttthink)
1269 : );
1270 0 : }
1271 :
1272 : /*
1273 : * If this is a Low or Full Speed device below an external High
1274 : * Speed hub, it needs some TT love.
1275 : */
1276 0 : if (speed < XHCI_SPEED_HIGH && pipe->device->myhsport != NULL) {
1277 0 : struct usbd_device *hshub = pipe->device->myhsport->parent;
1278 0 : uint8_t slot = ((struct xhci_pipe *)hshub->default_pipe)->slot;
1279 :
1280 0 : if (UHUB_IS_MTT(hshub))
1281 0 : sdev->slot_ctx->info_lo |= htole32(XHCI_SCTX_MTT(1));
1282 :
1283 0 : sdev->slot_ctx->tt |= htole32(
1284 : XHCI_SCTX_TT_HUB_SID(slot) |
1285 : XHCI_SCTX_TT_PORT_NUM(pipe->device->myhsport->portno)
1286 : );
1287 0 : }
1288 : #undef UHUB_IS_MTT
1289 :
1290 : /* Unmask the slot context */
1291 0 : sdev->input_ctx->add_flags |= htole32(XHCI_INCTX_MASK_DCI(0));
1292 :
1293 0 : bus_dmamap_sync(sdev->ictx_dma.tag, sdev->ictx_dma.map, 0,
1294 : sc->sc_pagesize, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1295 :
1296 0 : return (0);
1297 0 : }
1298 :
1299 : int
1300 0 : xhci_pipe_init(struct xhci_softc *sc, struct usbd_pipe *pipe)
1301 : {
1302 0 : struct xhci_pipe *xp = (struct xhci_pipe *)pipe;
1303 0 : struct xhci_soft_dev *sdev = &sc->sc_sdevs[xp->slot];
1304 : int error;
1305 :
1306 : #ifdef XHCI_DEBUG
1307 : struct usbd_device *dev = pipe->device;
1308 : printf("%s: pipe=%p addr=%d depth=%d port=%d speed=%d dev %d dci %u"
1309 : " (epAddr=0x%x)\n", __func__, pipe, dev->address, dev->depth,
1310 : dev->powersrc->portno, dev->speed, xp->slot, xp->dci,
1311 : pipe->endpoint->edesc->bEndpointAddress);
1312 : #endif
1313 :
1314 0 : if (xhci_ring_alloc(sc, &xp->ring, XHCI_MAX_XFER, XHCI_XFER_RING_ALIGN))
1315 0 : return (ENOMEM);
1316 :
1317 0 : xp->free_trbs = xp->ring.ntrb;
1318 0 : xp->halted = 0;
1319 :
1320 0 : sdev->pipes[xp->dci - 1] = xp;
1321 :
1322 0 : error = xhci_context_setup(sc, pipe);
1323 0 : if (error)
1324 0 : return (error);
1325 :
1326 0 : if (xp->dci == 1) {
1327 : /*
1328 : * If we are opening the default pipe, the Slot should
1329 : * be in the ENABLED state. Issue an "Address Device"
1330 : * with BSR=1 to put the device in the DEFAULT state.
1331 : * We cannot jump directly to the ADDRESSED state with
1332 : * BSR=0 because some Low/Full speed devices won't accept
1333 : * a SET_ADDRESS command before we've read their device
1334 : * descriptor.
1335 : */
1336 0 : error = xhci_cmd_set_address(sc, xp->slot,
1337 : sdev->ictx_dma.paddr, XHCI_TRB_BSR);
1338 0 : } else {
1339 0 : error = xhci_cmd_configure_ep(sc, xp->slot,
1340 : sdev->ictx_dma.paddr);
1341 : }
1342 :
1343 0 : if (error) {
1344 0 : xhci_ring_free(sc, &xp->ring);
1345 0 : return (EIO);
1346 : }
1347 :
1348 0 : return (0);
1349 0 : }
1350 :
1351 : void
1352 0 : xhci_pipe_close(struct usbd_pipe *pipe)
1353 : {
1354 0 : struct xhci_softc *sc = (struct xhci_softc *)pipe->device->bus;
1355 0 : struct xhci_pipe *lxp, *xp = (struct xhci_pipe *)pipe;
1356 0 : struct xhci_soft_dev *sdev = &sc->sc_sdevs[xp->slot];
1357 : int i;
1358 :
1359 : /* Root Hub */
1360 0 : if (pipe->device->depth == 0)
1361 0 : return;
1362 :
1363 : /* Mask the endpoint */
1364 0 : sdev->input_ctx->drop_flags = htole32(XHCI_INCTX_MASK_DCI(xp->dci));
1365 0 : sdev->input_ctx->add_flags = 0;
1366 :
1367 : /* Update last valid Endpoint Context */
1368 0 : for (i = 30; i >= 0; i--) {
1369 0 : lxp = sdev->pipes[i];
1370 0 : if (lxp != NULL && lxp != xp)
1371 : break;
1372 : }
1373 0 : sdev->slot_ctx->info_lo = htole32(XHCI_SCTX_DCI(lxp->dci));
1374 :
1375 : /* Clear the Endpoint Context */
1376 0 : memset(sdev->ep_ctx[xp->dci - 1], 0, sizeof(struct xhci_epctx));
1377 :
1378 0 : bus_dmamap_sync(sdev->ictx_dma.tag, sdev->ictx_dma.map, 0,
1379 : sc->sc_pagesize, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1380 :
1381 0 : if (xhci_cmd_configure_ep(sc, xp->slot, sdev->ictx_dma.paddr))
1382 : DPRINTF(("%s: error clearing ep (%d)\n", DEVNAME(sc), xp->dci));
1383 :
1384 0 : xhci_ring_free(sc, &xp->ring);
1385 0 : sdev->pipes[xp->dci - 1] = NULL;
1386 :
1387 : /*
1388 : * If we are closing the default pipe, the device is probably
1389 : * gone, so put its slot in the DISABLED state.
1390 : */
1391 0 : if (xp->dci == 1) {
1392 0 : xhci_cmd_slot_control(sc, &xp->slot, 0);
1393 0 : xhci_softdev_free(sc, xp->slot);
1394 0 : }
1395 0 : }
1396 :
1397 : /*
1398 : * Transition a device from DEFAULT to ADDRESSED Slot state, this hook
1399 : * is needed for Low/Full speed devices.
1400 : *
1401 : * See section 4.5.3 of USB 3.1 Specification for more details.
1402 : */
1403 : int
1404 0 : xhci_setaddr(struct usbd_device *dev, int addr)
1405 : {
1406 0 : struct xhci_softc *sc = (struct xhci_softc *)dev->bus;
1407 0 : struct xhci_pipe *xp = (struct xhci_pipe *)dev->default_pipe;
1408 0 : struct xhci_soft_dev *sdev = &sc->sc_sdevs[xp->slot];
1409 : int error;
1410 :
1411 : /* Root Hub */
1412 0 : if (dev->depth == 0)
1413 0 : return (0);
1414 :
1415 0 : KASSERT(xp->dci == 1);
1416 :
1417 0 : error = xhci_context_setup(sc, dev->default_pipe);
1418 0 : if (error)
1419 0 : return (error);
1420 :
1421 0 : error = xhci_cmd_set_address(sc, xp->slot, sdev->ictx_dma.paddr, 0);
1422 :
1423 : #ifdef XHCI_DEBUG
1424 : if (error == 0) {
1425 : struct xhci_sctx *sctx;
1426 : uint8_t addr;
1427 :
1428 : bus_dmamap_sync(sdev->octx_dma.tag, sdev->octx_dma.map, 0,
1429 : sc->sc_pagesize, BUS_DMASYNC_POSTREAD);
1430 :
1431 : /* Get output slot context. */
1432 : sctx = (struct xhci_sctx *)sdev->octx_dma.vaddr;
1433 : addr = XHCI_SCTX_DEV_ADDR(letoh32(sctx->state));
1434 : error = (addr == 0);
1435 :
1436 : printf("%s: dev %d addr %d\n", DEVNAME(sc), xp->slot, addr);
1437 : }
1438 : #endif
1439 :
1440 0 : return (error);
1441 0 : }
1442 :
1443 : struct usbd_xfer *
1444 0 : xhci_allocx(struct usbd_bus *bus)
1445 : {
1446 0 : return (pool_get(xhcixfer, PR_NOWAIT | PR_ZERO));
1447 : }
1448 :
1449 : void
1450 0 : xhci_freex(struct usbd_bus *bus, struct usbd_xfer *xfer)
1451 : {
1452 0 : pool_put(xhcixfer, xfer);
1453 0 : }
1454 :
1455 : int
1456 0 : xhci_scratchpad_alloc(struct xhci_softc *sc, int npage)
1457 : {
1458 0 : uint64_t *pte;
1459 : int error, i;
1460 :
1461 : /* Allocate the required entry for the table. */
1462 0 : error = usbd_dma_contig_alloc(&sc->sc_bus, &sc->sc_spad.table_dma,
1463 0 : (void **)&pte, npage * sizeof(uint64_t), XHCI_SPAD_TABLE_ALIGN,
1464 0 : sc->sc_pagesize);
1465 0 : if (error)
1466 0 : return (ENOMEM);
1467 :
1468 : /* Allocate pages. XXX does not need to be contiguous. */
1469 0 : error = usbd_dma_contig_alloc(&sc->sc_bus, &sc->sc_spad.pages_dma,
1470 0 : NULL, npage * sc->sc_pagesize, sc->sc_pagesize, 0);
1471 0 : if (error) {
1472 0 : usbd_dma_contig_free(&sc->sc_bus, &sc->sc_spad.table_dma);
1473 0 : return (ENOMEM);
1474 : }
1475 :
1476 0 : for (i = 0; i < npage; i++) {
1477 0 : pte[i] = htole64(
1478 : sc->sc_spad.pages_dma.paddr + (i * sc->sc_pagesize)
1479 : );
1480 : }
1481 :
1482 0 : bus_dmamap_sync(sc->sc_spad.table_dma.tag, sc->sc_spad.table_dma.map, 0,
1483 : npage * sizeof(uint64_t), BUS_DMASYNC_PREREAD |
1484 : BUS_DMASYNC_PREWRITE);
1485 :
1486 : /* Entry 0 points to the table of scratchpad pointers. */
1487 0 : sc->sc_dcbaa.segs[0] = htole64(sc->sc_spad.table_dma.paddr);
1488 0 : bus_dmamap_sync(sc->sc_dcbaa.dma.tag, sc->sc_dcbaa.dma.map, 0,
1489 : sizeof(uint64_t), BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1490 :
1491 0 : sc->sc_spad.npage = npage;
1492 :
1493 0 : return (0);
1494 0 : }
1495 :
1496 : void
1497 0 : xhci_scratchpad_free(struct xhci_softc *sc)
1498 : {
1499 0 : sc->sc_dcbaa.segs[0] = 0;
1500 0 : bus_dmamap_sync(sc->sc_dcbaa.dma.tag, sc->sc_dcbaa.dma.map, 0,
1501 : sizeof(uint64_t), BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1502 :
1503 0 : usbd_dma_contig_free(&sc->sc_bus, &sc->sc_spad.pages_dma);
1504 0 : usbd_dma_contig_free(&sc->sc_bus, &sc->sc_spad.table_dma);
1505 0 : }
1506 :
1507 : int
1508 0 : xhci_ring_alloc(struct xhci_softc *sc, struct xhci_ring *ring, size_t ntrb,
1509 : size_t alignment)
1510 : {
1511 : size_t size;
1512 : int error;
1513 :
1514 0 : size = ntrb * sizeof(struct xhci_trb);
1515 :
1516 0 : error = usbd_dma_contig_alloc(&sc->sc_bus, &ring->dma,
1517 0 : (void **)&ring->trbs, size, alignment, XHCI_RING_BOUNDARY);
1518 0 : if (error)
1519 0 : return (error);
1520 :
1521 0 : ring->ntrb = ntrb;
1522 :
1523 0 : xhci_ring_reset(sc, ring);
1524 :
1525 0 : return (0);
1526 0 : }
1527 :
1528 : void
1529 0 : xhci_ring_free(struct xhci_softc *sc, struct xhci_ring *ring)
1530 : {
1531 0 : usbd_dma_contig_free(&sc->sc_bus, &ring->dma);
1532 0 : }
1533 :
1534 : void
1535 0 : xhci_ring_reset(struct xhci_softc *sc, struct xhci_ring *ring)
1536 : {
1537 : size_t size;
1538 :
1539 0 : size = ring->ntrb * sizeof(struct xhci_trb);
1540 :
1541 0 : memset(ring->trbs, 0, size);
1542 :
1543 0 : ring->index = 0;
1544 0 : ring->toggle = XHCI_TRB_CYCLE;
1545 :
1546 : /*
1547 : * Since all our rings use only one segment, at least for
1548 : * the moment, link their tail to their head.
1549 : */
1550 0 : if (ring != &sc->sc_evt_ring) {
1551 0 : struct xhci_trb *trb = &ring->trbs[ring->ntrb - 1];
1552 :
1553 0 : trb->trb_paddr = htole64(ring->dma.paddr);
1554 0 : trb->trb_flags = htole32(XHCI_TRB_TYPE_LINK | XHCI_TRB_LINKSEG);
1555 0 : bus_dmamap_sync(ring->dma.tag, ring->dma.map, 0, size,
1556 : BUS_DMASYNC_PREWRITE);
1557 0 : } else
1558 0 : bus_dmamap_sync(ring->dma.tag, ring->dma.map, 0, size,
1559 : BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1560 0 : }
1561 :
1562 : struct xhci_trb*
1563 0 : xhci_ring_consume(struct xhci_softc *sc, struct xhci_ring *ring)
1564 : {
1565 0 : struct xhci_trb *trb = &ring->trbs[ring->index];
1566 :
1567 0 : KASSERT(ring->index < ring->ntrb);
1568 :
1569 0 : bus_dmamap_sync(ring->dma.tag, ring->dma.map, TRBOFF(ring, trb),
1570 : sizeof(struct xhci_trb), BUS_DMASYNC_POSTREAD);
1571 :
1572 : /* Make sure this TRB can be consumed. */
1573 0 : if (ring->toggle != (letoh32(trb->trb_flags) & XHCI_TRB_CYCLE))
1574 0 : return (NULL);
1575 :
1576 0 : ring->index++;
1577 :
1578 0 : if (ring->index == ring->ntrb) {
1579 0 : ring->index = 0;
1580 0 : ring->toggle ^= 1;
1581 0 : }
1582 :
1583 0 : return (trb);
1584 0 : }
1585 :
1586 : struct xhci_trb*
1587 0 : xhci_ring_produce(struct xhci_softc *sc, struct xhci_ring *ring)
1588 : {
1589 0 : struct xhci_trb *trb = &ring->trbs[ring->index];
1590 :
1591 0 : KASSERT(ring->index < ring->ntrb);
1592 :
1593 0 : bus_dmamap_sync(ring->dma.tag, ring->dma.map, TRBOFF(ring, trb),
1594 : sizeof(struct xhci_trb), BUS_DMASYNC_POSTREAD |
1595 : BUS_DMASYNC_POSTWRITE);
1596 :
1597 0 : ring->index++;
1598 :
1599 : /* Toggle cycle state of the link TRB and skip it. */
1600 0 : if (ring->index == (ring->ntrb - 1)) {
1601 0 : struct xhci_trb *lnk = &ring->trbs[ring->index];
1602 :
1603 0 : bus_dmamap_sync(ring->dma.tag, ring->dma.map, TRBOFF(ring, lnk),
1604 : sizeof(struct xhci_trb), BUS_DMASYNC_POSTREAD |
1605 : BUS_DMASYNC_POSTWRITE);
1606 :
1607 0 : lnk->trb_flags ^= htole32(XHCI_TRB_CYCLE);
1608 :
1609 0 : bus_dmamap_sync(ring->dma.tag, ring->dma.map, TRBOFF(ring, lnk),
1610 : sizeof(struct xhci_trb), BUS_DMASYNC_PREWRITE);
1611 :
1612 0 : ring->index = 0;
1613 0 : ring->toggle ^= 1;
1614 0 : }
1615 :
1616 0 : return (trb);
1617 : }
1618 :
1619 : struct xhci_trb *
1620 0 : xhci_xfer_get_trb(struct xhci_softc *sc, struct usbd_xfer *xfer,
1621 : uint8_t *togglep, int last)
1622 : {
1623 0 : struct xhci_pipe *xp = (struct xhci_pipe *)xfer->pipe;
1624 0 : struct xhci_xfer *xx = (struct xhci_xfer *)xfer;
1625 :
1626 0 : KASSERT(xp->free_trbs >= 1);
1627 :
1628 : /* Associate this TRB to our xfer. */
1629 0 : xp->pending_xfers[xp->ring.index] = xfer;
1630 0 : xp->free_trbs--;
1631 :
1632 0 : xx->index = (last) ? xp->ring.index : -2;
1633 0 : xx->ntrb += 1;
1634 :
1635 0 : *togglep = xp->ring.toggle;
1636 0 : return (xhci_ring_produce(sc, &xp->ring));
1637 : }
1638 :
1639 : int
1640 0 : xhci_command_submit(struct xhci_softc *sc, struct xhci_trb *trb0, int timeout)
1641 : {
1642 : struct xhci_trb *trb;
1643 : int s, error = 0;
1644 :
1645 0 : KASSERT(timeout == 0 || sc->sc_cmd_trb == NULL);
1646 :
1647 0 : trb0->trb_flags |= htole32(sc->sc_cmd_ring.toggle);
1648 :
1649 0 : trb = xhci_ring_produce(sc, &sc->sc_cmd_ring);
1650 0 : if (trb == NULL)
1651 0 : return (EAGAIN);
1652 0 : trb->trb_paddr = trb0->trb_paddr;
1653 0 : trb->trb_status = trb0->trb_status;
1654 0 : bus_dmamap_sync(sc->sc_cmd_ring.dma.tag, sc->sc_cmd_ring.dma.map,
1655 : TRBOFF(&sc->sc_cmd_ring, trb), sizeof(struct xhci_trb),
1656 : BUS_DMASYNC_PREWRITE);
1657 :
1658 0 : trb->trb_flags = trb0->trb_flags;
1659 0 : bus_dmamap_sync(sc->sc_cmd_ring.dma.tag, sc->sc_cmd_ring.dma.map,
1660 : TRBOFF(&sc->sc_cmd_ring, trb), sizeof(struct xhci_trb),
1661 : BUS_DMASYNC_PREWRITE);
1662 :
1663 0 : if (timeout == 0) {
1664 0 : XDWRITE4(sc, XHCI_DOORBELL(0), 0);
1665 0 : return (0);
1666 : }
1667 :
1668 0 : rw_assert_wrlock(&sc->sc_cmd_lock);
1669 :
1670 0 : s = splusb();
1671 0 : sc->sc_cmd_trb = trb;
1672 0 : XDWRITE4(sc, XHCI_DOORBELL(0), 0);
1673 0 : error = tsleep(&sc->sc_cmd_trb, PZERO, "xhcicmd",
1674 0 : (timeout*hz+999)/ 1000 + 1);
1675 0 : if (error) {
1676 : #ifdef XHCI_DEBUG
1677 : printf("%s: tsleep() = %d\n", __func__, error);
1678 : printf("cmd = %d ", XHCI_TRB_TYPE(letoh32(trb->trb_flags)));
1679 : xhci_dump_trb(trb);
1680 : #endif
1681 0 : KASSERT(sc->sc_cmd_trb == trb);
1682 0 : sc->sc_cmd_trb = NULL;
1683 0 : splx(s);
1684 0 : return (error);
1685 : }
1686 0 : splx(s);
1687 :
1688 0 : memcpy(trb0, &sc->sc_result_trb, sizeof(struct xhci_trb));
1689 :
1690 0 : if (XHCI_TRB_GET_CODE(letoh32(trb0->trb_status)) == XHCI_CODE_SUCCESS)
1691 0 : return (0);
1692 :
1693 : #ifdef XHCI_DEBUG
1694 : printf("%s: event error code=%d, result=%d \n", DEVNAME(sc),
1695 : XHCI_TRB_GET_CODE(letoh32(trb0->trb_status)),
1696 : XHCI_TRB_TYPE(letoh32(trb0->trb_flags)));
1697 : xhci_dump_trb(trb0);
1698 : #endif
1699 0 : return (EIO);
1700 0 : }
1701 :
1702 : int
1703 0 : xhci_command_abort(struct xhci_softc *sc)
1704 : {
1705 : uint32_t reg;
1706 : int i;
1707 :
1708 0 : reg = XOREAD4(sc, XHCI_CRCR_LO);
1709 0 : if ((reg & XHCI_CRCR_LO_CRR) == 0)
1710 0 : return (0);
1711 :
1712 0 : XOWRITE4(sc, XHCI_CRCR_LO, reg | XHCI_CRCR_LO_CA);
1713 0 : XOWRITE4(sc, XHCI_CRCR_HI, 0);
1714 :
1715 0 : for (i = 0; i < 250; i++) {
1716 0 : usb_delay_ms(&sc->sc_bus, 1);
1717 0 : reg = XOREAD4(sc, XHCI_CRCR_LO) & XHCI_CRCR_LO_CRR;
1718 0 : if (!reg)
1719 : break;
1720 : }
1721 :
1722 0 : if (reg) {
1723 0 : printf("%s: command ring abort timeout\n", DEVNAME(sc));
1724 0 : return (1);
1725 : }
1726 :
1727 0 : return (0);
1728 0 : }
1729 :
1730 : int
1731 0 : xhci_cmd_configure_ep(struct xhci_softc *sc, uint8_t slot, uint64_t addr)
1732 : {
1733 0 : struct xhci_trb trb;
1734 : int error;
1735 :
1736 : DPRINTF(("%s: %s dev %u\n", DEVNAME(sc), __func__, slot));
1737 :
1738 0 : trb.trb_paddr = htole64(addr);
1739 0 : trb.trb_status = 0;
1740 0 : trb.trb_flags = htole32(
1741 : XHCI_TRB_SET_SLOT(slot) | XHCI_CMD_CONFIG_EP
1742 : );
1743 :
1744 0 : rw_enter_write(&sc->sc_cmd_lock);
1745 0 : error = xhci_command_submit(sc, &trb, XHCI_CMD_TIMEOUT);
1746 0 : rw_exit_write(&sc->sc_cmd_lock);
1747 0 : return (error);
1748 0 : }
1749 :
1750 : int
1751 0 : xhci_cmd_stop_ep(struct xhci_softc *sc, uint8_t slot, uint8_t dci)
1752 : {
1753 0 : struct xhci_trb trb;
1754 : int error;
1755 :
1756 : DPRINTF(("%s: %s dev %u dci %u\n", DEVNAME(sc), __func__, slot, dci));
1757 :
1758 0 : trb.trb_paddr = 0;
1759 0 : trb.trb_status = 0;
1760 0 : trb.trb_flags = htole32(
1761 : XHCI_TRB_SET_SLOT(slot) | XHCI_TRB_SET_EP(dci) | XHCI_CMD_STOP_EP
1762 : );
1763 :
1764 0 : rw_enter_write(&sc->sc_cmd_lock);
1765 0 : error = xhci_command_submit(sc, &trb, XHCI_CMD_TIMEOUT);
1766 0 : rw_exit_write(&sc->sc_cmd_lock);
1767 0 : return (error);
1768 0 : }
1769 :
1770 : void
1771 0 : xhci_cmd_reset_ep_async(struct xhci_softc *sc, uint8_t slot, uint8_t dci)
1772 : {
1773 0 : struct xhci_trb trb;
1774 :
1775 : DPRINTF(("%s: %s dev %u dci %u\n", DEVNAME(sc), __func__, slot, dci));
1776 :
1777 0 : trb.trb_paddr = 0;
1778 0 : trb.trb_status = 0;
1779 0 : trb.trb_flags = htole32(
1780 : XHCI_TRB_SET_SLOT(slot) | XHCI_TRB_SET_EP(dci) | XHCI_CMD_RESET_EP
1781 : );
1782 :
1783 0 : xhci_command_submit(sc, &trb, 0);
1784 0 : }
1785 :
1786 : void
1787 0 : xhci_cmd_set_tr_deq_async(struct xhci_softc *sc, uint8_t slot, uint8_t dci,
1788 : uint64_t addr)
1789 : {
1790 0 : struct xhci_trb trb;
1791 :
1792 : DPRINTF(("%s: %s dev %u dci %u\n", DEVNAME(sc), __func__, slot, dci));
1793 :
1794 0 : trb.trb_paddr = htole64(addr);
1795 0 : trb.trb_status = 0;
1796 0 : trb.trb_flags = htole32(
1797 : XHCI_TRB_SET_SLOT(slot) | XHCI_TRB_SET_EP(dci) | XHCI_CMD_SET_TR_DEQ
1798 : );
1799 :
1800 0 : xhci_command_submit(sc, &trb, 0);
1801 0 : }
1802 :
1803 : int
1804 0 : xhci_cmd_slot_control(struct xhci_softc *sc, uint8_t *slotp, int enable)
1805 : {
1806 0 : struct xhci_trb trb;
1807 : int error;
1808 :
1809 : DPRINTF(("%s: %s\n", DEVNAME(sc), __func__));
1810 :
1811 0 : trb.trb_paddr = 0;
1812 0 : trb.trb_status = 0;
1813 0 : if (enable)
1814 0 : trb.trb_flags = htole32(XHCI_CMD_ENABLE_SLOT);
1815 : else
1816 0 : trb.trb_flags = htole32(
1817 : XHCI_TRB_SET_SLOT(*slotp) | XHCI_CMD_DISABLE_SLOT
1818 : );
1819 :
1820 0 : rw_enter_write(&sc->sc_cmd_lock);
1821 0 : error = xhci_command_submit(sc, &trb, XHCI_CMD_TIMEOUT);
1822 0 : rw_exit_write(&sc->sc_cmd_lock);
1823 0 : if (error != 0)
1824 0 : return (EIO);
1825 :
1826 0 : if (enable)
1827 0 : *slotp = XHCI_TRB_GET_SLOT(letoh32(trb.trb_flags));
1828 :
1829 0 : return (0);
1830 0 : }
1831 :
1832 : int
1833 0 : xhci_cmd_set_address(struct xhci_softc *sc, uint8_t slot, uint64_t addr,
1834 : uint32_t bsr)
1835 : {
1836 0 : struct xhci_trb trb;
1837 : int error;
1838 :
1839 : DPRINTF(("%s: %s BSR=%u\n", DEVNAME(sc), __func__, bsr ? 1 : 0));
1840 :
1841 0 : trb.trb_paddr = htole64(addr);
1842 0 : trb.trb_status = 0;
1843 0 : trb.trb_flags = htole32(
1844 : XHCI_TRB_SET_SLOT(slot) | XHCI_CMD_ADDRESS_DEVICE | bsr
1845 : );
1846 :
1847 0 : rw_enter_write(&sc->sc_cmd_lock);
1848 0 : error = xhci_command_submit(sc, &trb, XHCI_CMD_TIMEOUT);
1849 0 : rw_exit_write(&sc->sc_cmd_lock);
1850 0 : return (error);
1851 0 : }
1852 :
1853 : int
1854 0 : xhci_cmd_evaluate_ctx(struct xhci_softc *sc, uint8_t slot, uint64_t addr)
1855 : {
1856 0 : struct xhci_trb trb;
1857 : int error;
1858 :
1859 : DPRINTF(("%s: %s dev %u\n", DEVNAME(sc), __func__, slot));
1860 :
1861 0 : trb.trb_paddr = htole64(addr);
1862 0 : trb.trb_status = 0;
1863 0 : trb.trb_flags = htole32(
1864 : XHCI_TRB_SET_SLOT(slot) | XHCI_CMD_EVAL_CTX
1865 : );
1866 :
1867 0 : rw_enter_write(&sc->sc_cmd_lock);
1868 0 : error = xhci_command_submit(sc, &trb, XHCI_CMD_TIMEOUT);
1869 0 : rw_exit_write(&sc->sc_cmd_lock);
1870 0 : return (error);
1871 0 : }
1872 :
1873 : #ifdef XHCI_DEBUG
1874 : int
1875 : xhci_cmd_noop(struct xhci_softc *sc)
1876 : {
1877 : struct xhci_trb trb;
1878 : int error;
1879 :
1880 : DPRINTF(("%s: %s\n", DEVNAME(sc), __func__));
1881 :
1882 : trb.trb_paddr = 0;
1883 : trb.trb_status = 0;
1884 : trb.trb_flags = htole32(XHCI_CMD_NOOP);
1885 :
1886 : rw_enter_write(&sc->sc_cmd_lock);
1887 : error = xhci_command_submit(sc, &trb, XHCI_CMD_TIMEOUT);
1888 : rw_exit_write(&sc->sc_cmd_lock);
1889 : return (error);
1890 : }
1891 : #endif
1892 :
1893 : int
1894 0 : xhci_softdev_alloc(struct xhci_softc *sc, uint8_t slot)
1895 : {
1896 0 : struct xhci_soft_dev *sdev = &sc->sc_sdevs[slot];
1897 : int i, error;
1898 0 : uint8_t *kva;
1899 :
1900 : /*
1901 : * Setup input context. Even with 64 byte context size, it
1902 : * fits into the smallest supported page size, so use that.
1903 : */
1904 0 : error = usbd_dma_contig_alloc(&sc->sc_bus, &sdev->ictx_dma,
1905 0 : (void **)&kva, sc->sc_pagesize, XHCI_ICTX_ALIGN, sc->sc_pagesize);
1906 0 : if (error)
1907 0 : return (ENOMEM);
1908 :
1909 0 : sdev->input_ctx = (struct xhci_inctx *)kva;
1910 0 : sdev->slot_ctx = (struct xhci_sctx *)(kva + sc->sc_ctxsize);
1911 0 : for (i = 0; i < 31; i++)
1912 0 : sdev->ep_ctx[i] =
1913 0 : (struct xhci_epctx *)(kva + (i + 2) * sc->sc_ctxsize);
1914 :
1915 : DPRINTF(("%s: dev %d, input=%p slot=%p ep0=%p\n", DEVNAME(sc),
1916 : slot, sdev->input_ctx, sdev->slot_ctx, sdev->ep_ctx[0]));
1917 :
1918 : /* Setup output context */
1919 0 : error = usbd_dma_contig_alloc(&sc->sc_bus, &sdev->octx_dma, NULL,
1920 0 : sc->sc_pagesize, XHCI_OCTX_ALIGN, sc->sc_pagesize);
1921 0 : if (error) {
1922 0 : usbd_dma_contig_free(&sc->sc_bus, &sdev->ictx_dma);
1923 0 : return (ENOMEM);
1924 : }
1925 :
1926 0 : memset(&sdev->pipes, 0, sizeof(sdev->pipes));
1927 :
1928 : DPRINTF(("%s: dev %d, setting DCBAA to 0x%016llx\n", DEVNAME(sc),
1929 : slot, (long long)sdev->octx_dma.paddr));
1930 :
1931 0 : sc->sc_dcbaa.segs[slot] = htole64(sdev->octx_dma.paddr);
1932 0 : bus_dmamap_sync(sc->sc_dcbaa.dma.tag, sc->sc_dcbaa.dma.map,
1933 : slot * sizeof(uint64_t), sizeof(uint64_t), BUS_DMASYNC_PREREAD |
1934 : BUS_DMASYNC_PREWRITE);
1935 :
1936 0 : return (0);
1937 0 : }
1938 :
1939 : void
1940 0 : xhci_softdev_free(struct xhci_softc *sc, uint8_t slot)
1941 : {
1942 0 : struct xhci_soft_dev *sdev = &sc->sc_sdevs[slot];
1943 :
1944 0 : sc->sc_dcbaa.segs[slot] = 0;
1945 0 : bus_dmamap_sync(sc->sc_dcbaa.dma.tag, sc->sc_dcbaa.dma.map,
1946 : slot * sizeof(uint64_t), sizeof(uint64_t), BUS_DMASYNC_PREREAD |
1947 : BUS_DMASYNC_PREWRITE);
1948 :
1949 0 : usbd_dma_contig_free(&sc->sc_bus, &sdev->octx_dma);
1950 0 : usbd_dma_contig_free(&sc->sc_bus, &sdev->ictx_dma);
1951 :
1952 0 : memset(sdev, 0, sizeof(struct xhci_soft_dev));
1953 0 : }
1954 :
1955 : /* Root hub descriptors. */
1956 : usb_device_descriptor_t xhci_devd = {
1957 : USB_DEVICE_DESCRIPTOR_SIZE,
1958 : UDESC_DEVICE, /* type */
1959 : {0x00, 0x03}, /* USB version */
1960 : UDCLASS_HUB, /* class */
1961 : UDSUBCLASS_HUB, /* subclass */
1962 : UDPROTO_HSHUBSTT, /* protocol */
1963 : 9, /* max packet */
1964 : {0},{0},{0x00,0x01}, /* device id */
1965 : 1,2,0, /* string indexes */
1966 : 1 /* # of configurations */
1967 : };
1968 :
1969 : const usb_config_descriptor_t xhci_confd = {
1970 : USB_CONFIG_DESCRIPTOR_SIZE,
1971 : UDESC_CONFIG,
1972 : {USB_CONFIG_DESCRIPTOR_SIZE +
1973 : USB_INTERFACE_DESCRIPTOR_SIZE +
1974 : USB_ENDPOINT_DESCRIPTOR_SIZE},
1975 : 1,
1976 : 1,
1977 : 0,
1978 : UC_SELF_POWERED,
1979 : 0 /* max power */
1980 : };
1981 :
1982 : const usb_interface_descriptor_t xhci_ifcd = {
1983 : USB_INTERFACE_DESCRIPTOR_SIZE,
1984 : UDESC_INTERFACE,
1985 : 0,
1986 : 0,
1987 : 1,
1988 : UICLASS_HUB,
1989 : UISUBCLASS_HUB,
1990 : UIPROTO_HSHUBSTT,
1991 : 0
1992 : };
1993 :
1994 : const usb_endpoint_descriptor_t xhci_endpd = {
1995 : USB_ENDPOINT_DESCRIPTOR_SIZE,
1996 : UDESC_ENDPOINT,
1997 : UE_DIR_IN | XHCI_INTR_ENDPT,
1998 : UE_INTERRUPT,
1999 : {2, 0}, /* max 15 ports */
2000 : 255
2001 : };
2002 :
2003 : const usb_endpoint_ss_comp_descriptor_t xhci_endpcd = {
2004 : USB_ENDPOINT_SS_COMP_DESCRIPTOR_SIZE,
2005 : UDESC_ENDPOINT_SS_COMP,
2006 : 0,
2007 : 0,
2008 : {0, 0}
2009 : };
2010 :
2011 : const usb_hub_descriptor_t xhci_hubd = {
2012 : USB_HUB_DESCRIPTOR_SIZE,
2013 : UDESC_SS_HUB,
2014 : 0,
2015 : {0,0},
2016 : 0,
2017 : 0,
2018 : {0},
2019 : };
2020 :
2021 : void
2022 0 : xhci_abort_xfer(struct usbd_xfer *xfer, usbd_status status)
2023 : {
2024 0 : struct xhci_softc *sc = (struct xhci_softc *)xfer->device->bus;
2025 0 : struct xhci_pipe *xp = (struct xhci_pipe *)xfer->pipe;
2026 : int error;
2027 :
2028 0 : splsoftassert(IPL_SOFTUSB);
2029 :
2030 : DPRINTF(("%s: xfer=%p status=%s err=%s actlen=%d len=%d idx=%d\n",
2031 : __func__, xfer, usbd_errstr(xfer->status), usbd_errstr(status),
2032 : xfer->actlen, xfer->length, ((struct xhci_xfer *)xfer)->index));
2033 :
2034 : /* XXX The stack should not call abort() in this case. */
2035 0 : if (sc->sc_bus.dying || xfer->status == USBD_NOT_STARTED) {
2036 0 : xfer->status = status;
2037 0 : timeout_del(&xfer->timeout_handle);
2038 0 : usb_rem_task(xfer->device, &xfer->abort_task);
2039 0 : usb_transfer_complete(xfer);
2040 0 : return;
2041 : }
2042 :
2043 : /* Transfer is already done. */
2044 0 : if (xfer->status != USBD_IN_PROGRESS) {
2045 : DPRINTF(("%s: already done \n", __func__));
2046 0 : return;
2047 : }
2048 :
2049 : /* Prevent any timeout to kick in. */
2050 0 : timeout_del(&xfer->timeout_handle);
2051 0 : usb_rem_task(xfer->device, &xfer->abort_task);
2052 :
2053 : /* Indicate that we are aborting this transfer. */
2054 0 : xp->halted = status;
2055 0 : xp->aborted_xfer = xfer;
2056 :
2057 : /* Stop the endpoint and wait until the hardware says so. */
2058 0 : if (xhci_cmd_stop_ep(sc, xp->slot, xp->dci)) {
2059 : DPRINTF(("%s: error stopping endpoint\n", DEVNAME(sc)));
2060 : /* Assume the device is gone. */
2061 0 : xfer->status = status;
2062 0 : usb_transfer_complete(xfer);
2063 0 : return;
2064 : }
2065 :
2066 : /*
2067 : * The transfer was already completed when we stopped the
2068 : * endpoint, no need to move the dequeue pointer past its
2069 : * TRBs.
2070 : */
2071 0 : if (xp->aborted_xfer == NULL) {
2072 : DPRINTF(("%s: done before stopping the endpoint\n", __func__));
2073 0 : xp->halted = 0;
2074 0 : return;
2075 : }
2076 :
2077 : /*
2078 : * At this stage the endpoint has been stopped, so update its
2079 : * dequeue pointer past the last TRB of the transfer.
2080 : *
2081 : * Note: This assumes that only one transfer per endpoint has
2082 : * pending TRBs on the ring.
2083 : */
2084 0 : xhci_cmd_set_tr_deq_async(sc, xp->slot, xp->dci,
2085 0 : DEQPTR(xp->ring) | xp->ring.toggle);
2086 0 : error = tsleep(xp, PZERO, "xhciab", (XHCI_CMD_TIMEOUT*hz+999)/1000 + 1);
2087 0 : if (error)
2088 0 : printf("%s: timeout aborting transfer\n", DEVNAME(sc));
2089 0 : }
2090 :
2091 : void
2092 0 : xhci_timeout(void *addr)
2093 : {
2094 0 : struct usbd_xfer *xfer = addr;
2095 0 : struct xhci_softc *sc = (struct xhci_softc *)xfer->device->bus;
2096 :
2097 0 : if (sc->sc_bus.dying) {
2098 0 : xhci_timeout_task(addr);
2099 0 : return;
2100 : }
2101 :
2102 0 : usb_init_task(&xfer->abort_task, xhci_timeout_task, addr,
2103 : USB_TASK_TYPE_ABORT);
2104 0 : usb_add_task(xfer->device, &xfer->abort_task);
2105 0 : }
2106 :
2107 : void
2108 0 : xhci_timeout_task(void *addr)
2109 : {
2110 0 : struct usbd_xfer *xfer = addr;
2111 : int s;
2112 :
2113 0 : s = splusb();
2114 0 : xhci_abort_xfer(xfer, USBD_TIMEOUT);
2115 0 : splx(s);
2116 0 : }
2117 :
2118 : usbd_status
2119 0 : xhci_root_ctrl_transfer(struct usbd_xfer *xfer)
2120 : {
2121 : usbd_status err;
2122 :
2123 0 : err = usb_insert_transfer(xfer);
2124 0 : if (err)
2125 0 : return (err);
2126 :
2127 0 : return (xhci_root_ctrl_start(SIMPLEQ_FIRST(&xfer->pipe->queue)));
2128 0 : }
2129 :
2130 : usbd_status
2131 0 : xhci_root_ctrl_start(struct usbd_xfer *xfer)
2132 : {
2133 0 : struct xhci_softc *sc = (struct xhci_softc *)xfer->device->bus;
2134 0 : usb_port_status_t ps;
2135 : usb_device_request_t *req;
2136 : void *buf = NULL;
2137 0 : usb_hub_descriptor_t hubd;
2138 : usbd_status err;
2139 : int s, len, value, index;
2140 : int l, totlen = 0;
2141 : int port, i;
2142 : uint32_t v;
2143 :
2144 0 : KASSERT(xfer->rqflags & URQ_REQUEST);
2145 :
2146 0 : if (sc->sc_bus.dying)
2147 0 : return (USBD_IOERROR);
2148 :
2149 0 : req = &xfer->request;
2150 :
2151 : DPRINTFN(4,("%s: type=0x%02x request=%02x\n", __func__,
2152 : req->bmRequestType, req->bRequest));
2153 :
2154 0 : len = UGETW(req->wLength);
2155 0 : value = UGETW(req->wValue);
2156 0 : index = UGETW(req->wIndex);
2157 :
2158 0 : if (len != 0)
2159 0 : buf = KERNADDR(&xfer->dmabuf, 0);
2160 :
2161 : #define C(x,y) ((x) | ((y) << 8))
2162 0 : switch(C(req->bRequest, req->bmRequestType)) {
2163 : case C(UR_CLEAR_FEATURE, UT_WRITE_DEVICE):
2164 : case C(UR_CLEAR_FEATURE, UT_WRITE_INTERFACE):
2165 : case C(UR_CLEAR_FEATURE, UT_WRITE_ENDPOINT):
2166 : /*
2167 : * DEVICE_REMOTE_WAKEUP and ENDPOINT_HALT are no-ops
2168 : * for the integrated root hub.
2169 : */
2170 : break;
2171 : case C(UR_GET_CONFIG, UT_READ_DEVICE):
2172 0 : if (len > 0) {
2173 0 : *(uint8_t *)buf = sc->sc_conf;
2174 : totlen = 1;
2175 0 : }
2176 : break;
2177 : case C(UR_GET_DESCRIPTOR, UT_READ_DEVICE):
2178 : DPRINTFN(8,("xhci_root_ctrl_start: wValue=0x%04x\n", value));
2179 0 : switch(value >> 8) {
2180 : case UDESC_DEVICE:
2181 0 : if ((value & 0xff) != 0) {
2182 : err = USBD_IOERROR;
2183 0 : goto ret;
2184 : }
2185 0 : totlen = l = min(len, USB_DEVICE_DESCRIPTOR_SIZE);
2186 0 : USETW(xhci_devd.idVendor, sc->sc_id_vendor);
2187 0 : memcpy(buf, &xhci_devd, l);
2188 0 : break;
2189 : /*
2190 : * We can't really operate at another speed, but the spec says
2191 : * we need this descriptor.
2192 : */
2193 : case UDESC_OTHER_SPEED_CONFIGURATION:
2194 : case UDESC_CONFIG:
2195 0 : if ((value & 0xff) != 0) {
2196 : err = USBD_IOERROR;
2197 0 : goto ret;
2198 : }
2199 0 : totlen = l = min(len, USB_CONFIG_DESCRIPTOR_SIZE);
2200 0 : memcpy(buf, &xhci_confd, l);
2201 0 : ((usb_config_descriptor_t *)buf)->bDescriptorType =
2202 0 : value >> 8;
2203 0 : buf = (char *)buf + l;
2204 0 : len -= l;
2205 0 : l = min(len, USB_INTERFACE_DESCRIPTOR_SIZE);
2206 0 : totlen += l;
2207 0 : memcpy(buf, &xhci_ifcd, l);
2208 0 : buf = (char *)buf + l;
2209 0 : len -= l;
2210 0 : l = min(len, USB_ENDPOINT_DESCRIPTOR_SIZE);
2211 0 : totlen += l;
2212 0 : memcpy(buf, &xhci_endpd, l);
2213 0 : break;
2214 : case UDESC_STRING:
2215 0 : if (len == 0)
2216 : break;
2217 0 : *(u_int8_t *)buf = 0;
2218 : totlen = 1;
2219 0 : switch (value & 0xff) {
2220 : case 0: /* Language table */
2221 0 : totlen = usbd_str(buf, len, "\001");
2222 0 : break;
2223 : case 1: /* Vendor */
2224 0 : totlen = usbd_str(buf, len, sc->sc_vendor);
2225 0 : break;
2226 : case 2: /* Product */
2227 0 : totlen = usbd_str(buf, len, "xHCI root hub");
2228 0 : break;
2229 : }
2230 : break;
2231 : default:
2232 : err = USBD_IOERROR;
2233 0 : goto ret;
2234 : }
2235 : break;
2236 : case C(UR_GET_INTERFACE, UT_READ_INTERFACE):
2237 0 : if (len > 0) {
2238 0 : *(uint8_t *)buf = 0;
2239 : totlen = 1;
2240 0 : }
2241 : break;
2242 : case C(UR_GET_STATUS, UT_READ_DEVICE):
2243 0 : if (len > 1) {
2244 0 : USETW(((usb_status_t *)buf)->wStatus,UDS_SELF_POWERED);
2245 : totlen = 2;
2246 0 : }
2247 : break;
2248 : case C(UR_GET_STATUS, UT_READ_INTERFACE):
2249 : case C(UR_GET_STATUS, UT_READ_ENDPOINT):
2250 0 : if (len > 1) {
2251 0 : USETW(((usb_status_t *)buf)->wStatus, 0);
2252 : totlen = 2;
2253 0 : }
2254 : break;
2255 : case C(UR_SET_ADDRESS, UT_WRITE_DEVICE):
2256 0 : if (value >= USB_MAX_DEVICES) {
2257 : err = USBD_IOERROR;
2258 0 : goto ret;
2259 : }
2260 : break;
2261 : case C(UR_SET_CONFIG, UT_WRITE_DEVICE):
2262 0 : if (value != 0 && value != 1) {
2263 : err = USBD_IOERROR;
2264 0 : goto ret;
2265 : }
2266 0 : sc->sc_conf = value;
2267 0 : break;
2268 : case C(UR_SET_DESCRIPTOR, UT_WRITE_DEVICE):
2269 : break;
2270 : case C(UR_SET_FEATURE, UT_WRITE_DEVICE):
2271 : case C(UR_SET_FEATURE, UT_WRITE_INTERFACE):
2272 : case C(UR_SET_FEATURE, UT_WRITE_ENDPOINT):
2273 : err = USBD_IOERROR;
2274 0 : goto ret;
2275 : case C(UR_SET_INTERFACE, UT_WRITE_INTERFACE):
2276 : break;
2277 : case C(UR_SYNCH_FRAME, UT_WRITE_ENDPOINT):
2278 : break;
2279 : /* Hub requests */
2280 : case C(UR_CLEAR_FEATURE, UT_WRITE_CLASS_DEVICE):
2281 : break;
2282 : case C(UR_CLEAR_FEATURE, UT_WRITE_CLASS_OTHER):
2283 : DPRINTFN(8, ("xhci_root_ctrl_start: UR_CLEAR_PORT_FEATURE "
2284 : "port=%d feature=%d\n", index, value));
2285 0 : if (index < 1 || index > sc->sc_noport) {
2286 : err = USBD_IOERROR;
2287 0 : goto ret;
2288 : }
2289 0 : port = XHCI_PORTSC(index);
2290 0 : v = XOREAD4(sc, port) & ~XHCI_PS_CLEAR;
2291 0 : switch (value) {
2292 : case UHF_PORT_ENABLE:
2293 0 : XOWRITE4(sc, port, v | XHCI_PS_PED);
2294 0 : break;
2295 : case UHF_PORT_SUSPEND:
2296 : /* TODO */
2297 : break;
2298 : case UHF_PORT_POWER:
2299 0 : XOWRITE4(sc, port, v & ~XHCI_PS_PP);
2300 0 : break;
2301 : case UHF_PORT_INDICATOR:
2302 0 : XOWRITE4(sc, port, v & ~XHCI_PS_SET_PIC(3));
2303 0 : break;
2304 : case UHF_C_PORT_CONNECTION:
2305 0 : XOWRITE4(sc, port, v | XHCI_PS_CSC);
2306 0 : break;
2307 : case UHF_C_PORT_ENABLE:
2308 0 : XOWRITE4(sc, port, v | XHCI_PS_PEC);
2309 0 : break;
2310 : case UHF_C_PORT_SUSPEND:
2311 : case UHF_C_PORT_LINK_STATE:
2312 0 : XOWRITE4(sc, port, v | XHCI_PS_PLC);
2313 0 : break;
2314 : case UHF_C_PORT_OVER_CURRENT:
2315 0 : XOWRITE4(sc, port, v | XHCI_PS_OCC);
2316 0 : break;
2317 : case UHF_C_PORT_RESET:
2318 0 : XOWRITE4(sc, port, v | XHCI_PS_PRC);
2319 0 : break;
2320 : case UHF_C_BH_PORT_RESET:
2321 0 : XOWRITE4(sc, port, v | XHCI_PS_WRC);
2322 0 : break;
2323 : default:
2324 : err = USBD_IOERROR;
2325 0 : goto ret;
2326 : }
2327 : break;
2328 :
2329 : case C(UR_GET_DESCRIPTOR, UT_READ_CLASS_DEVICE):
2330 0 : if (len == 0)
2331 : break;
2332 0 : if ((value & 0xff) != 0) {
2333 : err = USBD_IOERROR;
2334 0 : goto ret;
2335 : }
2336 0 : v = XREAD4(sc, XHCI_HCCPARAMS);
2337 0 : hubd = xhci_hubd;
2338 0 : hubd.bNbrPorts = sc->sc_noport;
2339 0 : USETW(hubd.wHubCharacteristics,
2340 : (XHCI_HCC_PPC(v) ? UHD_PWR_INDIVIDUAL : UHD_PWR_GANGED) |
2341 : (XHCI_HCC_PIND(v) ? UHD_PORT_IND : 0));
2342 0 : hubd.bPwrOn2PwrGood = 10; /* xHCI section 5.4.9 */
2343 0 : for (i = 1; i <= sc->sc_noport; i++) {
2344 0 : v = XOREAD4(sc, XHCI_PORTSC(i));
2345 0 : if (v & XHCI_PS_DR)
2346 0 : hubd.DeviceRemovable[i / 8] |= 1U << (i % 8);
2347 : }
2348 0 : hubd.bDescLength = USB_HUB_DESCRIPTOR_SIZE + i;
2349 0 : l = min(len, hubd.bDescLength);
2350 : totlen = l;
2351 0 : memcpy(buf, &hubd, l);
2352 0 : break;
2353 : case C(UR_GET_STATUS, UT_READ_CLASS_DEVICE):
2354 0 : if (len != 16) {
2355 : err = USBD_IOERROR;
2356 0 : goto ret;
2357 : }
2358 0 : memset(buf, 0, len);
2359 : totlen = len;
2360 0 : break;
2361 : case C(UR_GET_STATUS, UT_READ_CLASS_OTHER):
2362 : DPRINTFN(8,("xhci_root_ctrl_start: get port status i=%d\n",
2363 : index));
2364 0 : if (index < 1 || index > sc->sc_noport) {
2365 : err = USBD_IOERROR;
2366 0 : goto ret;
2367 : }
2368 0 : if (len != 4) {
2369 : err = USBD_IOERROR;
2370 0 : goto ret;
2371 : }
2372 0 : v = XOREAD4(sc, XHCI_PORTSC(index));
2373 : DPRINTFN(8,("xhci_root_ctrl_start: port status=0x%04x\n", v));
2374 0 : i = UPS_PORT_LS_SET(XHCI_PS_GET_PLS(v));
2375 0 : switch (XHCI_PS_SPEED(v)) {
2376 : case XHCI_SPEED_FULL:
2377 : i |= UPS_FULL_SPEED;
2378 0 : break;
2379 : case XHCI_SPEED_LOW:
2380 0 : i |= UPS_LOW_SPEED;
2381 0 : break;
2382 : case XHCI_SPEED_HIGH:
2383 0 : i |= UPS_HIGH_SPEED;
2384 0 : break;
2385 : case XHCI_SPEED_SUPER:
2386 : default:
2387 : break;
2388 : }
2389 0 : if (v & XHCI_PS_CCS) i |= UPS_CURRENT_CONNECT_STATUS;
2390 0 : if (v & XHCI_PS_PED) i |= UPS_PORT_ENABLED;
2391 0 : if (v & XHCI_PS_OCA) i |= UPS_OVERCURRENT_INDICATOR;
2392 0 : if (v & XHCI_PS_PR) i |= UPS_RESET;
2393 0 : if (v & XHCI_PS_PP) {
2394 0 : if (XHCI_PS_SPEED(v) >= XHCI_SPEED_FULL &&
2395 0 : XHCI_PS_SPEED(v) <= XHCI_SPEED_HIGH)
2396 0 : i |= UPS_PORT_POWER;
2397 : else
2398 0 : i |= UPS_PORT_POWER_SS;
2399 : }
2400 0 : USETW(ps.wPortStatus, i);
2401 : i = 0;
2402 0 : if (v & XHCI_PS_CSC) i |= UPS_C_CONNECT_STATUS;
2403 0 : if (v & XHCI_PS_PEC) i |= UPS_C_PORT_ENABLED;
2404 0 : if (v & XHCI_PS_OCC) i |= UPS_C_OVERCURRENT_INDICATOR;
2405 0 : if (v & XHCI_PS_PRC) i |= UPS_C_PORT_RESET;
2406 0 : if (v & XHCI_PS_WRC) i |= UPS_C_BH_PORT_RESET;
2407 0 : if (v & XHCI_PS_PLC) i |= UPS_C_PORT_LINK_STATE;
2408 0 : if (v & XHCI_PS_CEC) i |= UPS_C_PORT_CONFIG_ERROR;
2409 0 : USETW(ps.wPortChange, i);
2410 0 : l = min(len, sizeof ps);
2411 0 : memcpy(buf, &ps, l);
2412 : totlen = l;
2413 0 : break;
2414 : case C(UR_SET_DESCRIPTOR, UT_WRITE_CLASS_DEVICE):
2415 : err = USBD_IOERROR;
2416 0 : goto ret;
2417 : case C(UR_SET_FEATURE, UT_WRITE_CLASS_DEVICE):
2418 : break;
2419 : case C(UR_SET_FEATURE, UT_WRITE_CLASS_OTHER):
2420 :
2421 0 : i = index >> 8;
2422 0 : index &= 0x00ff;
2423 :
2424 0 : if (index < 1 || index > sc->sc_noport) {
2425 : err = USBD_IOERROR;
2426 0 : goto ret;
2427 : }
2428 0 : port = XHCI_PORTSC(index);
2429 0 : v = XOREAD4(sc, port) & ~XHCI_PS_CLEAR;
2430 :
2431 0 : switch (value) {
2432 : case UHF_PORT_ENABLE:
2433 0 : XOWRITE4(sc, port, v | XHCI_PS_PED);
2434 0 : break;
2435 : case UHF_PORT_SUSPEND:
2436 : DPRINTFN(6, ("suspend port %u (LPM=%u)\n", index, i));
2437 0 : if (XHCI_PS_SPEED(v) == XHCI_SPEED_SUPER) {
2438 : err = USBD_IOERROR;
2439 0 : goto ret;
2440 : }
2441 0 : XOWRITE4(sc, port, v |
2442 : XHCI_PS_SET_PLS(i ? 2 /* LPM */ : 3) | XHCI_PS_LWS);
2443 0 : break;
2444 : case UHF_PORT_RESET:
2445 : DPRINTFN(6, ("reset port %d\n", index));
2446 0 : XOWRITE4(sc, port, v | XHCI_PS_PR);
2447 0 : break;
2448 : case UHF_PORT_POWER:
2449 : DPRINTFN(3, ("set port power %d\n", index));
2450 0 : XOWRITE4(sc, port, v | XHCI_PS_PP);
2451 0 : break;
2452 : case UHF_PORT_INDICATOR:
2453 : DPRINTFN(3, ("set port indicator %d\n", index));
2454 :
2455 0 : v &= ~XHCI_PS_SET_PIC(3);
2456 0 : v |= XHCI_PS_SET_PIC(1);
2457 :
2458 0 : XOWRITE4(sc, port, v);
2459 0 : break;
2460 : case UHF_C_PORT_RESET:
2461 0 : XOWRITE4(sc, port, v | XHCI_PS_PRC);
2462 0 : break;
2463 : case UHF_C_BH_PORT_RESET:
2464 0 : XOWRITE4(sc, port, v | XHCI_PS_WRC);
2465 0 : break;
2466 : default:
2467 : err = USBD_IOERROR;
2468 0 : goto ret;
2469 : }
2470 : break;
2471 : case C(UR_CLEAR_TT_BUFFER, UT_WRITE_CLASS_OTHER):
2472 : case C(UR_RESET_TT, UT_WRITE_CLASS_OTHER):
2473 : case C(UR_GET_TT_STATE, UT_READ_CLASS_OTHER):
2474 : case C(UR_STOP_TT, UT_WRITE_CLASS_OTHER):
2475 : break;
2476 : default:
2477 : err = USBD_IOERROR;
2478 0 : goto ret;
2479 : }
2480 0 : xfer->actlen = totlen;
2481 0 : err = USBD_NORMAL_COMPLETION;
2482 : ret:
2483 0 : xfer->status = err;
2484 0 : s = splusb();
2485 0 : usb_transfer_complete(xfer);
2486 0 : splx(s);
2487 0 : return (err);
2488 0 : }
2489 :
2490 :
2491 : void
2492 0 : xhci_noop(struct usbd_xfer *xfer)
2493 : {
2494 0 : }
2495 :
2496 :
2497 : usbd_status
2498 0 : xhci_root_intr_transfer(struct usbd_xfer *xfer)
2499 : {
2500 : usbd_status err;
2501 :
2502 0 : err = usb_insert_transfer(xfer);
2503 0 : if (err)
2504 0 : return (err);
2505 :
2506 0 : return (xhci_root_intr_start(SIMPLEQ_FIRST(&xfer->pipe->queue)));
2507 0 : }
2508 :
2509 : usbd_status
2510 0 : xhci_root_intr_start(struct usbd_xfer *xfer)
2511 : {
2512 0 : struct xhci_softc *sc = (struct xhci_softc *)xfer->device->bus;
2513 :
2514 0 : if (sc->sc_bus.dying)
2515 0 : return (USBD_IOERROR);
2516 :
2517 0 : sc->sc_intrxfer = xfer;
2518 :
2519 0 : return (USBD_IN_PROGRESS);
2520 0 : }
2521 :
2522 : void
2523 0 : xhci_root_intr_abort(struct usbd_xfer *xfer)
2524 : {
2525 0 : struct xhci_softc *sc = (struct xhci_softc *)xfer->device->bus;
2526 : int s;
2527 :
2528 0 : sc->sc_intrxfer = NULL;
2529 :
2530 0 : xfer->status = USBD_CANCELLED;
2531 0 : s = splusb();
2532 0 : usb_transfer_complete(xfer);
2533 0 : splx(s);
2534 0 : }
2535 :
2536 : void
2537 0 : xhci_root_intr_done(struct usbd_xfer *xfer)
2538 : {
2539 0 : }
2540 :
2541 : /*
2542 : * Number of packets remaining in the TD after the corresponding TRB.
2543 : *
2544 : * Section 4.11.2.4 of xHCI specification r1.1.
2545 : */
2546 : static inline uint32_t
2547 0 : xhci_xfer_tdsize(struct usbd_xfer *xfer, uint32_t remain, uint32_t len)
2548 : {
2549 0 : uint32_t npkt, mps = UGETW(xfer->pipe->endpoint->edesc->wMaxPacketSize);
2550 :
2551 0 : if (len == 0)
2552 0 : return XHCI_TRB_TDREM(0);
2553 :
2554 0 : npkt = howmany(remain - len, UE_GET_SIZE(mps));
2555 0 : if (npkt > 31)
2556 : npkt = 31;
2557 :
2558 0 : return XHCI_TRB_TDREM(npkt);
2559 0 : }
2560 :
2561 : /*
2562 : * Transfer Burst Count (TBC) and Transfer Last Burst Packet Count (TLBPC).
2563 : *
2564 : * Section 4.11.2.3 of xHCI specification r1.1.
2565 : */
2566 : static inline uint32_t
2567 0 : xhci_xfer_tbc(struct usbd_xfer *xfer, uint32_t len, uint32_t *tlbpc)
2568 : {
2569 0 : uint32_t mps = UGETW(xfer->pipe->endpoint->edesc->wMaxPacketSize);
2570 : uint32_t maxb, tdpc, residue, tbc;
2571 :
2572 : /* Transfer Descriptor Packet Count, section 4.14.1. */
2573 0 : tdpc = howmany(len, UE_GET_SIZE(mps));
2574 0 : if (tdpc == 0)
2575 : tdpc = 1;
2576 :
2577 : /* Transfer Burst Count */
2578 0 : maxb = xhci_pipe_maxburst(xfer->pipe);
2579 0 : tbc = howmany(tdpc, maxb + 1) - 1;
2580 :
2581 : /* Transfer Last Burst Packet Count */
2582 0 : if (xfer->device->speed == USB_SPEED_SUPER) {
2583 0 : residue = tdpc % (maxb + 1);
2584 0 : if (residue == 0)
2585 0 : *tlbpc = maxb;
2586 : else
2587 0 : *tlbpc = residue - 1;
2588 : } else {
2589 0 : *tlbpc = tdpc - 1;
2590 : }
2591 :
2592 0 : return (tbc);
2593 : }
2594 :
2595 : usbd_status
2596 0 : xhci_device_ctrl_transfer(struct usbd_xfer *xfer)
2597 : {
2598 : usbd_status err;
2599 :
2600 0 : err = usb_insert_transfer(xfer);
2601 0 : if (err)
2602 0 : return (err);
2603 :
2604 0 : return (xhci_device_ctrl_start(SIMPLEQ_FIRST(&xfer->pipe->queue)));
2605 0 : }
2606 :
2607 : usbd_status
2608 0 : xhci_device_ctrl_start(struct usbd_xfer *xfer)
2609 : {
2610 0 : struct xhci_softc *sc = (struct xhci_softc *)xfer->device->bus;
2611 0 : struct xhci_pipe *xp = (struct xhci_pipe *)xfer->pipe;
2612 : struct xhci_trb *trb0, *trb;
2613 0 : uint32_t flags, len = UGETW(xfer->request.wLength);
2614 0 : uint8_t toggle0, toggle;
2615 : int s;
2616 :
2617 0 : KASSERT(xfer->rqflags & URQ_REQUEST);
2618 :
2619 0 : if (sc->sc_bus.dying || xp->halted)
2620 0 : return (USBD_IOERROR);
2621 :
2622 0 : if (xp->free_trbs < 3)
2623 0 : return (USBD_NOMEM);
2624 :
2625 : /* We'll do the setup TRB once we're finished with the other stages. */
2626 0 : trb0 = xhci_xfer_get_trb(sc, xfer, &toggle0, 0);
2627 :
2628 : /* Data TRB */
2629 0 : if (len != 0) {
2630 0 : trb = xhci_xfer_get_trb(sc, xfer, &toggle, 0);
2631 :
2632 0 : flags = XHCI_TRB_TYPE_DATA | toggle;
2633 0 : if (usbd_xfer_isread(xfer))
2634 0 : flags |= XHCI_TRB_DIR_IN | XHCI_TRB_ISP;
2635 :
2636 0 : trb->trb_paddr = htole64(DMAADDR(&xfer->dmabuf, 0));
2637 0 : trb->trb_status = htole32(
2638 : XHCI_TRB_INTR(0) | XHCI_TRB_LEN(len) |
2639 : xhci_xfer_tdsize(xfer, len, len)
2640 : );
2641 0 : trb->trb_flags = htole32(flags);
2642 :
2643 0 : bus_dmamap_sync(xp->ring.dma.tag, xp->ring.dma.map,
2644 : TRBOFF(&xp->ring, trb), sizeof(struct xhci_trb),
2645 : BUS_DMASYNC_PREWRITE);
2646 0 : }
2647 :
2648 : /* Status TRB */
2649 0 : trb = xhci_xfer_get_trb(sc, xfer, &toggle, 1);
2650 :
2651 0 : flags = XHCI_TRB_TYPE_STATUS | XHCI_TRB_IOC | toggle;
2652 0 : if (len == 0 || !usbd_xfer_isread(xfer))
2653 0 : flags |= XHCI_TRB_DIR_IN;
2654 :
2655 0 : trb->trb_paddr = 0;
2656 0 : trb->trb_status = htole32(XHCI_TRB_INTR(0));
2657 0 : trb->trb_flags = htole32(flags);
2658 :
2659 0 : bus_dmamap_sync(xp->ring.dma.tag, xp->ring.dma.map,
2660 : TRBOFF(&xp->ring, trb), sizeof(struct xhci_trb),
2661 : BUS_DMASYNC_PREWRITE);
2662 :
2663 : /* Setup TRB */
2664 0 : flags = XHCI_TRB_TYPE_SETUP | XHCI_TRB_IDT | toggle0;
2665 0 : if (len != 0) {
2666 0 : if (usbd_xfer_isread(xfer))
2667 0 : flags |= XHCI_TRB_TRT_IN;
2668 : else
2669 0 : flags |= XHCI_TRB_TRT_OUT;
2670 : }
2671 :
2672 0 : memcpy(&trb0->trb_paddr, &xfer->request, sizeof(trb0->trb_paddr));
2673 0 : trb0->trb_status = htole32(XHCI_TRB_INTR(0) | XHCI_TRB_LEN(8));
2674 0 : bus_dmamap_sync(xp->ring.dma.tag, xp->ring.dma.map,
2675 : TRBOFF(&xp->ring, trb0), sizeof(struct xhci_trb),
2676 : BUS_DMASYNC_PREWRITE);
2677 :
2678 0 : trb0->trb_flags = htole32(flags);
2679 0 : bus_dmamap_sync(xp->ring.dma.tag, xp->ring.dma.map,
2680 : TRBOFF(&xp->ring, trb0), sizeof(struct xhci_trb),
2681 : BUS_DMASYNC_PREWRITE);
2682 :
2683 0 : s = splusb();
2684 0 : XDWRITE4(sc, XHCI_DOORBELL(xp->slot), xp->dci);
2685 :
2686 0 : xfer->status = USBD_IN_PROGRESS;
2687 0 : if (xfer->timeout && !sc->sc_bus.use_polling) {
2688 0 : timeout_del(&xfer->timeout_handle);
2689 0 : timeout_set(&xfer->timeout_handle, xhci_timeout, xfer);
2690 0 : timeout_add_msec(&xfer->timeout_handle, xfer->timeout);
2691 0 : }
2692 0 : splx(s);
2693 :
2694 0 : return (USBD_IN_PROGRESS);
2695 0 : }
2696 :
2697 : void
2698 0 : xhci_device_ctrl_abort(struct usbd_xfer *xfer)
2699 : {
2700 0 : xhci_abort_xfer(xfer, USBD_CANCELLED);
2701 0 : }
2702 :
2703 : usbd_status
2704 0 : xhci_device_generic_transfer(struct usbd_xfer *xfer)
2705 : {
2706 : usbd_status err;
2707 :
2708 0 : err = usb_insert_transfer(xfer);
2709 0 : if (err)
2710 0 : return (err);
2711 :
2712 0 : return (xhci_device_generic_start(SIMPLEQ_FIRST(&xfer->pipe->queue)));
2713 0 : }
2714 :
2715 : usbd_status
2716 0 : xhci_device_generic_start(struct usbd_xfer *xfer)
2717 : {
2718 0 : struct xhci_softc *sc = (struct xhci_softc *)xfer->device->bus;
2719 0 : struct xhci_pipe *xp = (struct xhci_pipe *)xfer->pipe;
2720 : struct xhci_trb *trb0, *trb;
2721 : uint32_t len, remain, flags;
2722 0 : uint32_t len0, mps = UGETW(xfer->pipe->endpoint->edesc->wMaxPacketSize);
2723 0 : uint64_t paddr = DMAADDR(&xfer->dmabuf, 0);
2724 0 : uint8_t toggle0, toggle;
2725 : int s, i, ntrb;
2726 :
2727 0 : KASSERT(!(xfer->rqflags & URQ_REQUEST));
2728 :
2729 0 : if (sc->sc_bus.dying || xp->halted)
2730 0 : return (USBD_IOERROR);
2731 :
2732 : /* How many TRBs do we need for this transfer? */
2733 0 : ntrb = howmany(xfer->length, XHCI_TRB_MAXSIZE);
2734 :
2735 : /* If the buffer crosses a 64k boundary, we need one more. */
2736 0 : len0 = XHCI_TRB_MAXSIZE - (paddr & (XHCI_TRB_MAXSIZE - 1));
2737 0 : if (len0 < xfer->length)
2738 0 : ntrb++;
2739 : else
2740 : len0 = xfer->length;
2741 :
2742 : /* If we need to append a zero length packet, we need one more. */
2743 0 : if ((xfer->flags & USBD_FORCE_SHORT_XFER || xfer->length == 0) &&
2744 0 : (xfer->length % UE_GET_SIZE(mps) == 0))
2745 0 : ntrb++;
2746 :
2747 0 : if (xp->free_trbs < ntrb)
2748 0 : return (USBD_NOMEM);
2749 :
2750 : /* We'll do the first TRB once we're finished with the chain. */
2751 0 : trb0 = xhci_xfer_get_trb(sc, xfer, &toggle0, (ntrb == 1));
2752 :
2753 0 : remain = xfer->length - len0;
2754 0 : paddr += len0;
2755 :
2756 : /* Chain more TRBs if needed. */
2757 0 : for (i = ntrb - 1; i > 0; i--) {
2758 0 : len = min(remain, XHCI_TRB_MAXSIZE);
2759 :
2760 : /* Next (or Last) TRB. */
2761 0 : trb = xhci_xfer_get_trb(sc, xfer, &toggle, (i == 1));
2762 0 : flags = XHCI_TRB_TYPE_NORMAL | toggle;
2763 0 : if (usbd_xfer_isread(xfer))
2764 0 : flags |= XHCI_TRB_ISP;
2765 0 : flags |= (i == 1) ? XHCI_TRB_IOC : XHCI_TRB_CHAIN;
2766 :
2767 0 : trb->trb_paddr = htole64(paddr);
2768 0 : trb->trb_status = htole32(
2769 : XHCI_TRB_INTR(0) | XHCI_TRB_LEN(len) |
2770 : xhci_xfer_tdsize(xfer, remain, len)
2771 : );
2772 0 : trb->trb_flags = htole32(flags);
2773 :
2774 0 : bus_dmamap_sync(xp->ring.dma.tag, xp->ring.dma.map,
2775 : TRBOFF(&xp->ring, trb), sizeof(struct xhci_trb),
2776 : BUS_DMASYNC_PREWRITE);
2777 :
2778 0 : remain -= len;
2779 0 : paddr += len;
2780 : }
2781 :
2782 : /* First TRB. */
2783 0 : flags = XHCI_TRB_TYPE_NORMAL | toggle0;
2784 0 : if (usbd_xfer_isread(xfer))
2785 0 : flags |= XHCI_TRB_ISP;
2786 0 : flags |= (ntrb == 1) ? XHCI_TRB_IOC : XHCI_TRB_CHAIN;
2787 :
2788 0 : trb0->trb_paddr = htole64(DMAADDR(&xfer->dmabuf, 0));
2789 0 : trb0->trb_status = htole32(
2790 : XHCI_TRB_INTR(0) | XHCI_TRB_LEN(len0) |
2791 : xhci_xfer_tdsize(xfer, xfer->length, len0)
2792 : );
2793 0 : bus_dmamap_sync(xp->ring.dma.tag, xp->ring.dma.map,
2794 : TRBOFF(&xp->ring, trb0), sizeof(struct xhci_trb),
2795 : BUS_DMASYNC_PREWRITE);
2796 :
2797 0 : trb0->trb_flags = htole32(flags);
2798 0 : bus_dmamap_sync(xp->ring.dma.tag, xp->ring.dma.map,
2799 : TRBOFF(&xp->ring, trb0), sizeof(struct xhci_trb),
2800 : BUS_DMASYNC_PREWRITE);
2801 :
2802 0 : s = splusb();
2803 0 : XDWRITE4(sc, XHCI_DOORBELL(xp->slot), xp->dci);
2804 :
2805 0 : xfer->status = USBD_IN_PROGRESS;
2806 0 : if (xfer->timeout && !sc->sc_bus.use_polling) {
2807 0 : timeout_del(&xfer->timeout_handle);
2808 0 : timeout_set(&xfer->timeout_handle, xhci_timeout, xfer);
2809 0 : timeout_add_msec(&xfer->timeout_handle, xfer->timeout);
2810 0 : }
2811 0 : splx(s);
2812 :
2813 0 : return (USBD_IN_PROGRESS);
2814 0 : }
2815 :
2816 : void
2817 0 : xhci_device_generic_done(struct usbd_xfer *xfer)
2818 : {
2819 : /* Only happens with interrupt transfers. */
2820 0 : if (xfer->pipe->repeat) {
2821 0 : xfer->actlen = 0;
2822 0 : xhci_device_generic_start(xfer);
2823 0 : }
2824 0 : }
2825 :
2826 : void
2827 0 : xhci_device_generic_abort(struct usbd_xfer *xfer)
2828 : {
2829 0 : KASSERT(!xfer->pipe->repeat || xfer->pipe->intrxfer == xfer);
2830 :
2831 0 : xhci_abort_xfer(xfer, USBD_CANCELLED);
2832 0 : }
2833 :
2834 : usbd_status
2835 0 : xhci_device_isoc_transfer(struct usbd_xfer *xfer)
2836 : {
2837 : usbd_status err;
2838 :
2839 0 : err = usb_insert_transfer(xfer);
2840 0 : if (err && err != USBD_IN_PROGRESS)
2841 0 : return (err);
2842 :
2843 0 : return (xhci_device_isoc_start(xfer));
2844 0 : }
2845 :
2846 : usbd_status
2847 0 : xhci_device_isoc_start(struct usbd_xfer *xfer)
2848 : {
2849 0 : struct xhci_softc *sc = (struct xhci_softc *)xfer->device->bus;
2850 0 : struct xhci_pipe *xp = (struct xhci_pipe *)xfer->pipe;
2851 0 : struct xhci_xfer *xx = (struct xhci_xfer *)xfer;
2852 : struct xhci_trb *trb0, *trb;
2853 : uint32_t len, remain, flags;
2854 0 : uint64_t paddr = DMAADDR(&xfer->dmabuf, 0);
2855 0 : uint32_t len0, tbc, tlbpc;
2856 0 : uint8_t toggle0, toggle;
2857 0 : int s, i, ntrb = xfer->nframes;
2858 :
2859 0 : KASSERT(!(xfer->rqflags & URQ_REQUEST));
2860 :
2861 0 : if (sc->sc_bus.dying || xp->halted)
2862 0 : return (USBD_IOERROR);
2863 :
2864 : /* Why would you do that anyway? */
2865 0 : if (sc->sc_bus.use_polling)
2866 0 : return (USBD_INVAL);
2867 :
2868 : /*
2869 : * To allow continuous transfers, above we start all transfers
2870 : * immediately. However, we're still going to get usbd_start_next call
2871 : * this when another xfer completes. So, check if this is already
2872 : * in progress or not
2873 : */
2874 0 : if (xx->ntrb > 0)
2875 0 : return (USBD_IN_PROGRESS);
2876 :
2877 0 : if (xp->free_trbs < ntrb)
2878 0 : return (USBD_NOMEM);
2879 :
2880 0 : len0 = xfer->frlengths[0];
2881 :
2882 : /* We'll do the first TRB once we're finished with the chain. */
2883 0 : trb0 = xhci_xfer_get_trb(sc, xfer, &toggle0, (ntrb == 1));
2884 :
2885 0 : remain = xfer->length - len0;
2886 0 : paddr += len0;
2887 :
2888 : /* Chain more TRBs if needed. */
2889 0 : for (i = ntrb - 1; i > 0; i--) {
2890 0 : len = xfer->frlengths[ntrb - i];
2891 :
2892 : /* Next (or Last) TRB. */
2893 0 : trb = xhci_xfer_get_trb(sc, xfer, &toggle, (i == 1));
2894 0 : flags = XHCI_TRB_TYPE_NORMAL | toggle;
2895 0 : if (usbd_xfer_isread(xfer))
2896 0 : flags |= XHCI_TRB_ISP;
2897 0 : flags |= (i == 1) ? XHCI_TRB_IOC : XHCI_TRB_CHAIN;
2898 :
2899 0 : trb->trb_paddr = htole64(paddr);
2900 0 : trb->trb_status = htole32(
2901 : XHCI_TRB_INTR(0) | XHCI_TRB_LEN(len) |
2902 : xhci_xfer_tdsize(xfer, remain, len)
2903 : );
2904 0 : trb->trb_flags = htole32(flags);
2905 :
2906 0 : bus_dmamap_sync(xp->ring.dma.tag, xp->ring.dma.map,
2907 : TRBOFF(&xp->ring, trb), sizeof(struct xhci_trb),
2908 : BUS_DMASYNC_PREWRITE);
2909 :
2910 0 : remain -= len;
2911 0 : paddr += len;
2912 : }
2913 :
2914 : /* First TRB. */
2915 0 : flags = XHCI_TRB_TYPE_ISOCH | XHCI_TRB_SIA | toggle0;
2916 0 : if (usbd_xfer_isread(xfer))
2917 0 : flags |= XHCI_TRB_ISP;
2918 0 : flags |= (ntrb == 1) ? XHCI_TRB_IOC : XHCI_TRB_CHAIN;
2919 :
2920 0 : tbc = xhci_xfer_tbc(xfer, len0, &tlbpc);
2921 0 : flags |= XHCI_TRB_ISOC_TBC(tbc) | XHCI_TRB_ISOC_TLBPC(tlbpc);
2922 :
2923 0 : trb0->trb_paddr = htole64(DMAADDR(&xfer->dmabuf, 0));
2924 0 : trb0->trb_status = htole32(
2925 : XHCI_TRB_INTR(0) | XHCI_TRB_LEN(len0) |
2926 : xhci_xfer_tdsize(xfer, xfer->length, len0)
2927 : );
2928 0 : bus_dmamap_sync(xp->ring.dma.tag, xp->ring.dma.map,
2929 : TRBOFF(&xp->ring, trb0), sizeof(struct xhci_trb),
2930 : BUS_DMASYNC_PREWRITE);
2931 :
2932 0 : trb0->trb_flags = htole32(flags);
2933 0 : bus_dmamap_sync(xp->ring.dma.tag, xp->ring.dma.map,
2934 : TRBOFF(&xp->ring, trb0), sizeof(struct xhci_trb),
2935 : BUS_DMASYNC_PREWRITE);
2936 :
2937 0 : s = splusb();
2938 0 : XDWRITE4(sc, XHCI_DOORBELL(xp->slot), xp->dci);
2939 :
2940 0 : xfer->status = USBD_IN_PROGRESS;
2941 :
2942 0 : if (xfer->timeout) {
2943 0 : timeout_del(&xfer->timeout_handle);
2944 0 : timeout_set(&xfer->timeout_handle, xhci_timeout, xfer);
2945 0 : timeout_add_msec(&xfer->timeout_handle, xfer->timeout);
2946 0 : }
2947 0 : splx(s);
2948 :
2949 0 : return (USBD_IN_PROGRESS);
2950 0 : }
|