1// Copyright 2016 The Fuchsia Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#include <ddk/debug.h>
6#include <hw/arch_ops.h>
7#include <hw/reg.h>
8#include <zircon/types.h>
9#include <zircon/syscalls.h>
10#include <zircon/process.h>
11#include <limits.h>
12#include <stdio.h>
13#include <stdlib.h>
14#include <string.h>
15#include <threads.h>
16#include <unistd.h>
17
18#include "xdc.h"
19#include "xhci.h"
20#include "xhci-device-manager.h"
21#include "xhci-root-hub.h"
22#include "xhci-transfer.h"
23#include "xhci-util.h"
24
25#define ROUNDUP_TO(x, multiple) ((x + multiple - 1) & ~(multiple - 1))
26#define PAGE_ROUNDUP(x) ROUNDUP_TO(x, PAGE_SIZE)
27
28// The Interrupter Moderation Interval prevents the controller from sending interrupts too often.
29// According to XHCI Rev 1.1 4.17.2, the default is 4000 (= 1 ms). We set it to 1000 (= 250 us) to
30// get better latency on completions for bulk transfers; setting it too low seems to destabilize the
31// system.
32#define XHCI_IMODI_VAL      1000
33
34uint8_t xhci_endpoint_index(uint8_t ep_address) {
35    if (ep_address == 0) return 0;
36    uint32_t index = 2 * (ep_address & ~USB_ENDPOINT_DIR_MASK);
37    if ((ep_address & USB_ENDPOINT_DIR_MASK) == USB_ENDPOINT_OUT)
38        index--;
39    return index;
40}
41
42// returns index into xhci->root_hubs[], or -1 if not a root hub
43int xhci_get_root_hub_index(xhci_t* xhci, uint32_t device_id) {
44    // regular devices have IDs 1 through xhci->max_slots
45    // root hub IDs start at xhci->max_slots + 1
46    int index = device_id - (xhci->max_slots + 1);
47    if (index < 0 || index >= XHCI_RH_COUNT) return -1;
48    return index;
49}
50
51static void xhci_read_extended_caps(xhci_t* xhci) {
52    uint32_t* cap_ptr = NULL;
53    while ((cap_ptr = xhci_get_next_ext_cap(xhci->mmio, cap_ptr, NULL))) {
54        uint32_t cap_id = XHCI_GET_BITS32(cap_ptr, EXT_CAP_CAPABILITY_ID_START,
55                                          EXT_CAP_CAPABILITY_ID_BITS);
56
57        if (cap_id == EXT_CAP_SUPPORTED_PROTOCOL) {
58            uint32_t rev_major = XHCI_GET_BITS32(cap_ptr, EXT_CAP_SP_REV_MAJOR_START,
59                                                 EXT_CAP_SP_REV_MAJOR_BITS);
60            uint32_t rev_minor = XHCI_GET_BITS32(cap_ptr, EXT_CAP_SP_REV_MINOR_START,
61                                                 EXT_CAP_SP_REV_MINOR_BITS);
62            zxlogf(TRACE, "EXT_CAP_SUPPORTED_PROTOCOL %d.%d\n", rev_major, rev_minor);
63
64            uint32_t psic = XHCI_GET_BITS32(&cap_ptr[2], EXT_CAP_SP_PSIC_START,
65                                            EXT_CAP_SP_PSIC_BITS);
66            // psic = count of PSI registers
67            uint32_t compat_port_offset = XHCI_GET_BITS32(&cap_ptr[2],
68                                                          EXT_CAP_SP_COMPAT_PORT_OFFSET_START,
69                                                          EXT_CAP_SP_COMPAT_PORT_OFFSET_BITS);
70            uint32_t compat_port_count = XHCI_GET_BITS32(&cap_ptr[2],
71                                                         EXT_CAP_SP_COMPAT_PORT_COUNT_START,
72                                                         EXT_CAP_SP_COMPAT_PORT_COUNT_BITS);
73
74            zxlogf(TRACE, "compat_port_offset: %d compat_port_count: %d psic: %d\n",
75                    compat_port_offset, compat_port_count, psic);
76
77            int rh_index;
78            if (rev_major == 3) {
79                rh_index = XHCI_RH_USB_3;
80            } else if (rev_major == 2) {
81                rh_index = XHCI_RH_USB_2;
82            } else {
83                zxlogf(ERROR, "unsupported rev_major in XHCI extended capabilities\n");
84                rh_index = -1;
85            }
86            for (off_t i = 0; i < compat_port_count; i++) {
87                off_t index = compat_port_offset + i - 1;
88                if (index >= xhci->rh_num_ports) {
89                    zxlogf(ERROR, "port index out of range in xhci_read_extended_caps\n");
90                    break;
91                }
92                xhci->rh_map[index] = rh_index;
93            }
94
95            uint32_t* psi = &cap_ptr[4];
96            for (uint32_t i = 0; i < psic; i++, psi++) {
97                uint32_t psiv = XHCI_GET_BITS32(psi, EXT_CAP_SP_PSIV_START, EXT_CAP_SP_PSIV_BITS);
98                uint32_t psie = XHCI_GET_BITS32(psi, EXT_CAP_SP_PSIE_START, EXT_CAP_SP_PSIE_BITS);
99                uint32_t plt = XHCI_GET_BITS32(psi, EXT_CAP_SP_PLT_START, EXT_CAP_SP_PLT_BITS);
100                uint32_t psim = XHCI_GET_BITS32(psi, EXT_CAP_SP_PSIM_START, EXT_CAP_SP_PSIM_BITS);
101                zxlogf(TRACE, "PSI[%d] psiv: %d psie: %d plt: %d psim: %d\n", i, psiv, psie, plt, psim);
102            }
103        } else if (cap_id == EXT_CAP_USB_LEGACY_SUPPORT) {
104            xhci->usb_legacy_support_cap = (xhci_usb_legacy_support_cap_t*)cap_ptr;
105        }
106    }
107}
108
109static zx_status_t xhci_claim_ownership(xhci_t* xhci) {
110    xhci_usb_legacy_support_cap_t* cap = xhci->usb_legacy_support_cap;
111    if (cap == NULL) {
112        return ZX_OK;
113    }
114
115    // The XHCI spec defines this handoff protocol.  We need to wait at most one
116    // second for the BIOS to respond.
117    //
118    // Note that bios_owned_sem and os_owned_sem are adjacent 1-byte fields, so
119    // must be written to as single bytes to prevent the OS from modifying the
120    // BIOS semaphore.  Additionally, all bits besides bit 0 in the OS semaphore
121    // are RsvdP, so we need to preserve them on modification.
122    cap->os_owned_sem |= 1;
123    zx_time_t now = zx_clock_get_monotonic();
124    zx_time_t deadline = now + ZX_SEC(1);
125    while ((cap->bios_owned_sem & 1) && now < deadline) {
126        zx_nanosleep(zx_deadline_after(ZX_MSEC(10)));
127        now = zx_clock_get_monotonic();
128    }
129
130    if (cap->bios_owned_sem & 1) {
131        cap->os_owned_sem &= ~1;
132        return ZX_ERR_TIMED_OUT;
133    }
134    return ZX_OK;
135}
136
137static void xhci_vmo_release(zx_handle_t handle, zx_vaddr_t virt) {
138    uint64_t size;
139    zx_vmo_get_size(handle, &size);
140    zx_vmar_unmap(zx_vmar_root_self(), virt, size);
141    zx_handle_close(handle);
142}
143
144zx_status_t xhci_init(xhci_t* xhci, xhci_mode_t mode, uint32_t num_interrupts) {
145    zx_status_t result = ZX_OK;
146
147    list_initialize(&xhci->command_queue);
148    mtx_init(&xhci->command_ring_lock, mtx_plain);
149    mtx_init(&xhci->command_queue_mutex, mtx_plain);
150    mtx_init(&xhci->mfindex_mutex, mtx_plain);
151    mtx_init(&xhci->input_context_lock, mtx_plain);
152    sync_completion_reset(&xhci->command_queue_completion);
153
154    usb_request_pool_init(&xhci->free_reqs);
155
156    xhci->cap_regs = (xhci_cap_regs_t*)xhci->mmio;
157    xhci->op_regs = (xhci_op_regs_t*)((uint8_t*)xhci->cap_regs + xhci->cap_regs->length);
158    xhci->doorbells = (uint32_t*)((uint8_t*)xhci->cap_regs + xhci->cap_regs->dboff);
159    xhci->runtime_regs = (xhci_runtime_regs_t*)((uint8_t*)xhci->cap_regs + xhci->cap_regs->rtsoff);
160    volatile uint32_t* hcsparams1 = &xhci->cap_regs->hcsparams1;
161    volatile uint32_t* hcsparams2 = &xhci->cap_regs->hcsparams2;
162    volatile uint32_t* hccparams1 = &xhci->cap_regs->hccparams1;
163    volatile uint32_t* hccparams2 = &xhci->cap_regs->hccparams2;
164
165    xhci->mode = mode;
166    xhci->num_interrupts = num_interrupts;
167
168    xhci->max_slots = XHCI_GET_BITS32(hcsparams1, HCSPARAMS1_MAX_SLOTS_START,
169                                      HCSPARAMS1_MAX_SLOTS_BITS);
170    xhci->rh_num_ports = XHCI_GET_BITS32(hcsparams1, HCSPARAMS1_MAX_PORTS_START,
171                                         HCSPARAMS1_MAX_PORTS_BITS);
172    xhci->context_size = (XHCI_READ32(hccparams1) & HCCPARAMS1_CSZ ? 64 : 32);
173    xhci->large_esit = !!(XHCI_READ32(hccparams2) & HCCPARAMS2_LEC);
174
175    uint32_t scratch_pad_bufs = XHCI_GET_BITS32(hcsparams2, HCSPARAMS2_MAX_SBBUF_HI_START,
176                                                HCSPARAMS2_MAX_SBBUF_HI_BITS);
177    scratch_pad_bufs <<= HCSPARAMS2_MAX_SBBUF_LO_BITS;
178    scratch_pad_bufs |= XHCI_GET_BITS32(hcsparams2, HCSPARAMS2_MAX_SBBUF_LO_START,
179                                        HCSPARAMS2_MAX_SBBUF_LO_BITS);
180    xhci->page_size = XHCI_READ32(&xhci->op_regs->pagesize) << 12;
181
182    // allocate array to hold our slots
183    // add 1 to allow 1-based indexing of slots
184    xhci->slots = (xhci_slot_t*)calloc(xhci->max_slots + 1, sizeof(xhci_slot_t));
185    if (!xhci->slots) {
186        result = ZX_ERR_NO_MEMORY;
187        goto fail;
188    }
189
190    xhci->rh_map = (uint8_t *)calloc(xhci->rh_num_ports, sizeof(uint8_t));
191    if (!xhci->rh_map) {
192        result = ZX_ERR_NO_MEMORY;
193        goto fail;
194    }
195    xhci->rh_port_map = (uint8_t *)calloc(xhci->rh_num_ports, sizeof(uint8_t));
196    if (!xhci->rh_port_map) {
197        result = ZX_ERR_NO_MEMORY;
198        goto fail;
199    }
200    xhci_read_extended_caps(xhci);
201
202    // We need to claim before we write to any other registers on the
203    // controller, but after we've read the extended capabilities.
204    result = xhci_claim_ownership(xhci);
205    if (result != ZX_OK) {
206        zxlogf(ERROR, "xhci_claim_ownership failed\n");
207        goto fail;
208    }
209
210    // Allocate DMA memory for various things
211    result = io_buffer_init(&xhci->dcbaa_erst_buffer, xhci->bti_handle, PAGE_SIZE,
212                            IO_BUFFER_RW | IO_BUFFER_CONTIG | XHCI_IO_BUFFER_UNCACHED);
213    if (result != ZX_OK) {
214        zxlogf(ERROR, "io_buffer_init failed for xhci->dcbaa_erst_buffer\n");
215        goto fail;
216    }
217    result = io_buffer_init(&xhci->input_context_buffer, xhci->bti_handle, PAGE_SIZE,
218                            IO_BUFFER_RW | IO_BUFFER_CONTIG | XHCI_IO_BUFFER_UNCACHED);
219    if (result != ZX_OK) {
220        zxlogf(ERROR, "io_buffer_init failed for xhci->input_context_buffer\n");
221        goto fail;
222    }
223
224    bool scratch_pad_is_contig = false;
225    if (scratch_pad_bufs > 0) {
226        // map scratchpad buffers read-only
227        uint32_t flags = IO_BUFFER_RO;
228        if (xhci->page_size > PAGE_SIZE) {
229            flags |= IO_BUFFER_CONTIG;
230            scratch_pad_is_contig = true;
231        }
232        size_t scratch_pad_pages_size = scratch_pad_bufs * xhci->page_size;
233        result = io_buffer_init(&xhci->scratch_pad_pages_buffer, xhci->bti_handle,
234                                scratch_pad_pages_size, flags);
235        if (result != ZX_OK) {
236            zxlogf(ERROR, "io_buffer_init failed for xhci->scratch_pad_pages_buffer\n");
237            goto fail;
238        }
239        if (!scratch_pad_is_contig) {
240            result = io_buffer_physmap(&xhci->scratch_pad_pages_buffer);
241            if (result != ZX_OK) {
242                zxlogf(ERROR, "io_buffer_physmap failed for xhci->scratch_pad_pages_buffer\n");
243                goto fail;
244            }
245        }
246        size_t scratch_pad_index_size = PAGE_ROUNDUP(scratch_pad_bufs * sizeof(uint64_t));
247        result = io_buffer_init(&xhci->scratch_pad_index_buffer, xhci->bti_handle,
248                                scratch_pad_index_size,
249                                IO_BUFFER_RW | IO_BUFFER_CONTIG | XHCI_IO_BUFFER_UNCACHED);
250        if (result != ZX_OK) {
251            zxlogf(ERROR, "io_buffer_init failed for xhci->scratch_pad_index_buffer\n");
252            goto fail;
253        }
254    }
255
256    // set up DCBAA, ERST array and input context
257    xhci->dcbaa = (uint64_t *)io_buffer_virt(&xhci->dcbaa_erst_buffer);
258    xhci->dcbaa_phys = io_buffer_phys(&xhci->dcbaa_erst_buffer);
259    xhci->input_context = (uint8_t *)io_buffer_virt(&xhci->input_context_buffer);
260    xhci->input_context_phys = io_buffer_phys(&xhci->input_context_buffer);
261
262    // DCBAA can only be 256 * sizeof(uint64_t) = 2048 bytes, so we have room for ERST array after DCBAA
263    zx_off_t erst_offset = 256 * sizeof(uint64_t);
264
265    size_t array_bytes = ERST_ARRAY_SIZE * sizeof(erst_entry_t);
266    // MSI only supports up to 32 interupts, so the required ERST arrays will fit
267    // within the page. Potentially more pages will need to be allocated for MSI-X.
268    for (uint32_t i = 0; i < xhci->num_interrupts; i++) {
269        // Ran out of space in page.
270        if (erst_offset + array_bytes > PAGE_SIZE) {
271            zxlogf(ERROR, "only have space for %u ERST arrays, want %u\n", i,
272                    xhci->num_interrupts);
273            goto fail;
274        }
275        xhci->erst_arrays[i] = (void *)xhci->dcbaa + erst_offset;
276        xhci->erst_arrays_phys[i] = xhci->dcbaa_phys + erst_offset;
277        // ERST arrays must be 64 byte aligned - see Table 54 in XHCI spec.
278        // dcbaa_phys is already page (and hence 64 byte) aligned, so only
279        // need to round the offset.
280        erst_offset = ROUNDUP_TO(erst_offset + array_bytes, 64);
281    }
282
283    if (scratch_pad_bufs > 0) {
284        uint64_t* scratch_pad_index = (uint64_t *)io_buffer_virt(&xhci->scratch_pad_index_buffer);
285        off_t offset = 0;
286        for (uint32_t i = 0; i < scratch_pad_bufs; i++) {
287            zx_paddr_t scratch_pad_phys;
288            if (scratch_pad_is_contig) {
289                scratch_pad_phys = io_buffer_phys(&xhci->scratch_pad_pages_buffer) + offset;
290            } else {
291                size_t index = offset / PAGE_SIZE;
292                size_t suboffset = offset & (PAGE_SIZE - 1);
293                scratch_pad_phys = xhci->scratch_pad_pages_buffer.phys_list[index] + suboffset;
294            }
295
296            scratch_pad_index[i] = scratch_pad_phys;
297            offset += xhci->page_size;
298        }
299
300        zx_paddr_t scratch_pad_index_phys = io_buffer_phys(&xhci->scratch_pad_index_buffer);
301        xhci->dcbaa[0] = scratch_pad_index_phys;
302    } else {
303        xhci->dcbaa[0] = 0;
304    }
305
306    result = xhci_transfer_ring_init(&xhci->command_ring, xhci->bti_handle, COMMAND_RING_SIZE);
307    if (result != ZX_OK) {
308        zxlogf(ERROR, "xhci_command_ring_init failed\n");
309        goto fail;
310    }
311
312    for (uint32_t i = 0; i < xhci->num_interrupts; i++) {
313        result = xhci_event_ring_init(&xhci->event_rings[i], xhci->bti_handle,
314                                      xhci->erst_arrays[i], EVENT_RING_SIZE);
315        if (result != ZX_OK) {
316            zxlogf(ERROR, "xhci_event_ring_init failed\n");
317            goto fail;
318        }
319    }
320
321    // initialize slots and endpoints
322    for (uint32_t i = 1; i <= xhci->max_slots; i++) {
323        xhci_slot_t* slot = &xhci->slots[i];
324        xhci_endpoint_t* eps = slot->eps;
325        for (int j = 0; j < XHCI_NUM_EPS; j++) {
326            xhci_endpoint_t* ep = &eps[j];
327            mtx_init(&ep->lock, mtx_plain);
328            list_initialize(&ep->queued_reqs);
329            list_initialize(&ep->pending_reqs);
330            ep->current_req = NULL;
331        }
332    }
333
334    // initialize virtual root hub devices
335    for (int i = 0; i < XHCI_RH_COUNT; i++) {
336        result = xhci_root_hub_init(xhci, i);
337        if (result != ZX_OK) goto fail;
338    }
339
340    return ZX_OK;
341
342fail:
343    xhci_free(xhci);
344    return result;
345}
346
347uint32_t xhci_get_max_interrupters(xhci_t* xhci) {
348    xhci_cap_regs_t* cap_regs = (xhci_cap_regs_t*)xhci->mmio;
349    volatile uint32_t* hcsparams1 = &cap_regs->hcsparams1;
350    return XHCI_GET_BITS32(hcsparams1, HCSPARAMS1_MAX_INTRS_START,
351                           HCSPARAMS1_MAX_INTRS_BITS);
352}
353
354int xhci_get_slot_ctx_state(xhci_slot_t* slot) {
355    return XHCI_GET_BITS32(&slot->sc->sc3, SLOT_CTX_SLOT_STATE_START,
356                           SLOT_CTX_CONTEXT_ENTRIES_BITS);
357}
358
359int xhci_get_ep_ctx_state(xhci_slot_t* slot, xhci_endpoint_t* ep) {
360    if (!ep->epc) {
361        return EP_CTX_STATE_DISABLED;
362    }
363    return XHCI_GET_BITS32(&ep->epc->epc0, EP_CTX_EP_STATE_START, EP_CTX_EP_STATE_BITS);
364}
365
366static void xhci_update_erdp(xhci_t* xhci, int interrupter) {
367    xhci_event_ring_t* er = &xhci->event_rings[interrupter];
368    xhci_intr_regs_t* intr_regs = &xhci->runtime_regs->intr_regs[interrupter];
369
370    uint64_t erdp = xhci_event_ring_current_phys(er);
371    erdp |= ERDP_EHB; // clear event handler busy
372    XHCI_WRITE64(&intr_regs->erdp, erdp);
373}
374
375static void xhci_interrupter_init(xhci_t* xhci, int interrupter) {
376    xhci_intr_regs_t* intr_regs = &xhci->runtime_regs->intr_regs[interrupter];
377
378    xhci_update_erdp(xhci, interrupter);
379
380    XHCI_SET32(&intr_regs->iman, IMAN_IE, IMAN_IE);
381    XHCI_SET32(&intr_regs->imod, IMODI_MASK, XHCI_IMODI_VAL);
382    XHCI_SET32(&intr_regs->erstsz, ERSTSZ_MASK, ERST_ARRAY_SIZE);
383    XHCI_WRITE64(&intr_regs->erstba, xhci->erst_arrays_phys[interrupter]);
384}
385
386void xhci_wait_bits(volatile uint32_t* ptr, uint32_t bits, uint32_t expected) {
387    uint32_t value = XHCI_READ32(ptr);
388    while ((value & bits) != expected) {
389        usleep(1000);
390        value = XHCI_READ32(ptr);
391    }
392}
393
394void xhci_wait_bits64(volatile uint64_t* ptr, uint64_t bits, uint64_t expected) {
395    uint64_t value = XHCI_READ64(ptr);
396    while ((value & bits) != expected) {
397        usleep(1000);
398        value = XHCI_READ64(ptr);
399    }
400}
401
402void xhci_set_dbcaa(xhci_t* xhci, uint32_t slot_id, zx_paddr_t paddr) {
403    XHCI_WRITE64(&xhci->dcbaa[slot_id], paddr);
404}
405
406zx_status_t xhci_start(xhci_t* xhci) {
407    volatile uint32_t* usbcmd = &xhci->op_regs->usbcmd;
408    volatile uint32_t* usbsts = &xhci->op_regs->usbsts;
409
410    xhci_wait_bits(usbsts, USBSTS_CNR, 0);
411
412    // stop controller
413    XHCI_SET32(usbcmd, USBCMD_RS, 0);
414    // wait until USBSTS_HCH signals we stopped
415    xhci_wait_bits(usbsts, USBSTS_HCH, USBSTS_HCH);
416
417    XHCI_SET32(usbcmd, USBCMD_HCRST, USBCMD_HCRST);
418    xhci_wait_bits(usbcmd, USBCMD_HCRST, 0);
419    xhci_wait_bits(usbsts, USBSTS_CNR, 0);
420
421    if (xhci->mode == XHCI_PCI_MSI || xhci->mode == XHCI_PCI_LEGACY) {
422        // enable bus master
423        zx_status_t status = pci_enable_bus_master(&xhci->pci, true);
424        if (status < 0) {
425            zxlogf(ERROR, "usb_xhci_bind enable_bus_master failed %d\n", status);
426            return status;
427        }
428    }
429
430    // setup operational registers
431    xhci_op_regs_t* op_regs = xhci->op_regs;
432    // initialize command ring
433    uint64_t crcr = xhci_transfer_ring_start_phys(&xhci->command_ring);
434    if (xhci->command_ring.pcs) {
435        crcr |= CRCR_RCS;
436    }
437    XHCI_WRITE64(&op_regs->crcr, crcr);
438
439    XHCI_WRITE64(&op_regs->dcbaap, xhci->dcbaa_phys);
440    XHCI_SET_BITS32(&op_regs->config, CONFIG_MAX_SLOTS_ENABLED_START,
441                    CONFIG_MAX_SLOTS_ENABLED_BITS, xhci->max_slots);
442
443    // initialize interrupters
444    for (uint32_t i = 0; i < xhci->num_interrupts; i++) {
445        xhci_interrupter_init(xhci, i);
446    }
447
448    // start the controller with interrupts and mfindex wrap events enabled
449    uint32_t start_flags = USBCMD_RS | USBCMD_INTE | USBCMD_EWE;
450    XHCI_SET32(usbcmd, start_flags, start_flags);
451    xhci_wait_bits(usbsts, USBSTS_HCH, 0);
452
453    xhci_start_device_thread(xhci);
454
455#if defined(__x86_64__)
456    // TODO(jocelyndang): start xdc in a new process.
457    zx_status_t status = xdc_bind(xhci->zxdev, xhci->bti_handle, xhci->mmio);
458    if (status != ZX_OK) {
459        zxlogf(ERROR, "xhci_start: xdc_bind failed %d\n", status);
460    }
461#endif
462
463    return ZX_OK;
464}
465
466static void xhci_slot_stop(xhci_slot_t* slot) {
467    for (int i = 0; i < XHCI_NUM_EPS; i++) {
468        xhci_endpoint_t* ep = &slot->eps[i];
469
470        mtx_lock(&ep->lock);
471        if (ep->state != EP_STATE_DEAD) {
472            usb_request_t* req;
473            while ((req = list_remove_tail_type(&ep->pending_reqs, usb_request_t, node)) != NULL) {
474                usb_request_complete(req, ZX_ERR_IO_NOT_PRESENT, 0);
475            }
476            while ((req = list_remove_tail_type(&ep->queued_reqs, usb_request_t, node)) != NULL) {
477                usb_request_complete(req, ZX_ERR_IO_NOT_PRESENT, 0);
478            }
479            ep->state = EP_STATE_DEAD;
480        }
481        mtx_unlock(&ep->lock);
482    }
483}
484
485void xhci_stop(xhci_t* xhci) {
486    volatile uint32_t* usbcmd = &xhci->op_regs->usbcmd;
487    volatile uint32_t* usbsts = &xhci->op_regs->usbsts;
488
489    // stop device thread and root hubs before turning off controller
490    xhci_stop_device_thread(xhci);
491    xhci_stop_root_hubs(xhci);
492
493    // stop controller
494    XHCI_SET32(usbcmd, USBCMD_RS, 0);
495    // wait until USBSTS_HCH signals we stopped
496    xhci_wait_bits(usbsts, USBSTS_HCH, USBSTS_HCH);
497
498    for (uint32_t i = 1; i <= xhci->max_slots; i++) {
499        xhci_slot_stop(&xhci->slots[i]);
500    }
501}
502
503void xhci_free(xhci_t* xhci) {
504    for (uint32_t i = 1; i <= xhci->max_slots; i++) {
505        xhci_slot_t* slot = &xhci->slots[i];
506        io_buffer_release(&slot->buffer);
507
508        for (int j = 0; j < XHCI_NUM_EPS; j++) {
509            xhci_endpoint_t* ep = &slot->eps[j];
510            xhci_transfer_ring_free(&ep->transfer_ring);
511        }
512    }
513    free(xhci->slots);
514
515     for (int i = 0; i < XHCI_RH_COUNT; i++) {
516        xhci_root_hub_free(&xhci->root_hubs[i]);
517    }
518    free(xhci->rh_map);
519    free(xhci->rh_port_map);
520
521    for (uint32_t i = 0; i < xhci->num_interrupts; i++) {
522        xhci_event_ring_free(&xhci->event_rings[i]);
523    }
524
525    xhci_transfer_ring_free(&xhci->command_ring);
526    io_buffer_release(&xhci->dcbaa_erst_buffer);
527    io_buffer_release(&xhci->input_context_buffer);
528    io_buffer_release(&xhci->scratch_pad_pages_buffer);
529    io_buffer_release(&xhci->scratch_pad_index_buffer);
530
531    // this must done after releasing anything that relies
532    // on our bti, like our io_buffers
533    zx_handle_close(xhci->bti_handle);
534
535    free(xhci);
536}
537
538void xhci_post_command(xhci_t* xhci, uint32_t command, uint64_t ptr, uint32_t control_bits,
539                       xhci_command_context_t* context) {
540    // FIXME - check that command ring is not full?
541
542    mtx_lock(&xhci->command_ring_lock);
543
544    xhci_transfer_ring_t* cr = &xhci->command_ring;
545    xhci_trb_t* trb = cr->current;
546    int index = trb - cr->start;
547    xhci->command_contexts[index] = context;
548
549    XHCI_WRITE64(&trb->ptr, ptr);
550    XHCI_WRITE32(&trb->status, 0);
551    trb_set_control(trb, command, control_bits);
552
553    xhci_increment_ring(cr);
554
555    hw_mb();
556    XHCI_WRITE32(&xhci->doorbells[0], 0);
557
558    mtx_unlock(&xhci->command_ring_lock);
559}
560
561static void xhci_handle_command_complete_event(xhci_t* xhci, xhci_trb_t* event_trb) {
562    xhci_trb_t* command_trb = xhci_read_trb_ptr(&xhci->command_ring, event_trb);
563    uint32_t cc = XHCI_GET_BITS32(&event_trb->status, EVT_TRB_CC_START, EVT_TRB_CC_BITS);
564    zxlogf(TRACE, "xhci_handle_command_complete_event slot_id: %d command: %d cc: %d\n",
565            (event_trb->control >> TRB_SLOT_ID_START), trb_get_type(command_trb), cc);
566
567    int index = command_trb - xhci->command_ring.start;
568
569    if (cc == TRB_CC_COMMAND_RING_STOPPED) {
570        // TRB_CC_COMMAND_RING_STOPPED is generated after aborting a command.
571        // Ignore this, since it is unrelated to the next command in the command ring.
572        return;
573    }
574
575    mtx_lock(&xhci->command_ring_lock);
576    xhci_command_context_t* context = xhci->command_contexts[index];
577    xhci->command_contexts[index] = NULL;
578    mtx_unlock(&xhci->command_ring_lock);
579
580    context->callback(context->data, cc, command_trb, event_trb);
581}
582
583static void xhci_handle_mfindex_wrap(xhci_t* xhci) {
584    mtx_lock(&xhci->mfindex_mutex);
585    xhci->mfindex_wrap_count++;
586    xhci->last_mfindex_wrap = zx_clock_get_monotonic();
587    mtx_unlock(&xhci->mfindex_mutex);
588}
589
590uint64_t xhci_get_current_frame(xhci_t* xhci) {
591    mtx_lock(&xhci->mfindex_mutex);
592
593    uint32_t mfindex = XHCI_READ32(&xhci->runtime_regs->mfindex) & ((1 << XHCI_MFINDEX_BITS) - 1);
594    uint64_t wrap_count = xhci->mfindex_wrap_count;
595    // try to detect race condition where mfindex has wrapped but we haven't processed wrap event yet
596    if (mfindex < 500) {
597        if (zx_clock_get_monotonic() - xhci->last_mfindex_wrap > ZX_MSEC(1000)) {
598            zxlogf(TRACE, "woah, mfindex wrapped before we got the event!\n");
599            wrap_count++;
600        }
601    }
602    mtx_unlock(&xhci->mfindex_mutex);
603
604    // shift three to convert from 125us microframes to 1ms frames
605    return ((wrap_count * (1 << XHCI_MFINDEX_BITS)) + mfindex) >> 3;
606}
607
608static void xhci_handle_events(xhci_t* xhci, int interrupter) {
609    xhci_event_ring_t* er = &xhci->event_rings[interrupter];
610
611    // process all TRBs with cycle bit matching our CCS
612    while ((XHCI_READ32(&er->current->control) & TRB_C) == er->ccs) {
613        uint32_t type = trb_get_type(er->current);
614        switch (type) {
615        case TRB_EVENT_COMMAND_COMP:
616            xhci_handle_command_complete_event(xhci, er->current);
617            break;
618        case TRB_EVENT_PORT_STATUS_CHANGE:
619            xhci_handle_root_hub_change(xhci);
620            break;
621        case TRB_EVENT_TRANSFER:
622            xhci_handle_transfer_event(xhci, er->current);
623            break;
624        case TRB_EVENT_MFINDEX_WRAP:
625            xhci_handle_mfindex_wrap(xhci);
626            break;
627        default:
628            zxlogf(ERROR, "xhci_handle_events: unhandled event type %d\n", type);
629            break;
630        }
631
632        er->current++;
633        if (er->current == er->end) {
634            er->current = er->start;
635            er->ccs ^= TRB_C;
636        }
637    }
638
639    // update event ring dequeue pointer and clear event handler busy flag
640    xhci_update_erdp(xhci, interrupter);
641}
642
643void xhci_handle_interrupt(xhci_t* xhci, uint32_t interrupter) {
644    // clear the interrupt pending flag
645    xhci_intr_regs_t* intr_regs = &xhci->runtime_regs->intr_regs[interrupter];
646    XHCI_WRITE32(&intr_regs->iman, IMAN_IE | IMAN_IP);
647
648    xhci_handle_events(xhci, interrupter);
649}
650