1// Copyright 2017 The Fuchsia Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#include <hw/arch_ops.h>
6#include <limits.h>
7#include <zircon/thread_annotations.h>
8
9#include "binding.h"
10#include "debug-logging.h"
11#include "intel-hda-controller.h"
12#include "intel-hda-dsp.h"
13#include "intel-hda-stream.h"
14#include "utils.h"
15
16namespace audio {
17namespace intel_hda {
18
19namespace {
20static constexpr zx_duration_t INTEL_HDA_RESET_HOLD_TIME_NSEC        = ZX_USEC(100); // Section 5.5.1.2
21static constexpr zx_duration_t INTEL_HDA_RESET_TIMEOUT_NSEC          = ZX_MSEC(1);   // 1mS Arbitrary
22static constexpr zx_duration_t INTEL_HDA_RING_BUF_RESET_TIMEOUT_NSEC = ZX_MSEC(1);   // 1mS Arbitrary
23static constexpr zx_duration_t INTEL_HDA_RESET_POLL_TIMEOUT_NSEC     = ZX_USEC(10);  // 10uS Arbitrary
24static constexpr zx_duration_t INTEL_HDA_CODEC_DISCOVERY_WAIT_NSEC   = ZX_USEC(521); // Section 4.3
25
26static constexpr unsigned int MAX_CAPS = 10;  // Arbitrary number of capabilities to check
27}  // anon namespace
28
29zx_status_t IntelHDAController::ResetControllerHW() {
30    zx_status_t res;
31
32    // Are we currently being held in reset?  If not, try to make sure that all
33    // of our DMA streams are stopped and have been reset (but are not being
34    // held in reset) before cycling the controller.  Anecdotally, holding a
35    // stream in reset while attempting to reset the controller on some Skylake
36    // hardware has caused some pretty profound hardware lockups which require
37    // fully removing power (warm reboot == not good enough) to recover from.
38    if (REG_RD(&regs()->gctl) & HDA_REG_GCTL_HWINIT) {
39        // Explicitly disable all top level interrupt sources.
40        REG_WR(&regs()->intctl, 0u);
41        hw_mb();
42
43        // Count the number of streams present in the hardware and
44        // unconditionally stop and reset all of them.
45        uint16_t gcap = REG_RD(&regs()->gcap);
46        unsigned int total_stream_cnt = HDA_REG_GCAP_ISS(gcap)
47                                      + HDA_REG_GCAP_OSS(gcap)
48                                      + HDA_REG_GCAP_BSS(gcap);
49
50        if (total_stream_cnt > countof(regs()->stream_desc)) {
51            LOG(ERROR,
52                "Fatal error during reset!  Controller reports more streams (%u) "
53                "than should be possible for IHDA hardware.  (GCAP = 0x%04hx)\n",
54                total_stream_cnt, gcap);
55            return ZX_ERR_INTERNAL;
56        }
57
58        hda_stream_desc_regs_t* sregs = regs()->stream_desc;
59        for (uint32_t i = 0; i < total_stream_cnt; ++i) {
60            IntelHDAStream::Reset(sregs + i);
61        }
62
63        // Explicitly shut down any CORB/RIRB DMA
64        REG_WR(&regs()->corbctl, 0u);
65        REG_WR(&regs()->rirbctl, 0u);
66    }
67
68    // Assert the reset signal and wait for the controller to ack.
69    REG_CLR_BITS(&regs()->gctl, HDA_REG_GCTL_HWINIT);
70    hw_mb();
71
72    res = WaitCondition(INTEL_HDA_RESET_TIMEOUT_NSEC,
73                        INTEL_HDA_RESET_POLL_TIMEOUT_NSEC,
74                        [this]() -> bool {
75                           return (REG_RD(&regs()->gctl) & HDA_REG_GCTL_HWINIT) == 0;
76                        });
77
78    if (res != ZX_OK) {
79        LOG(ERROR, "Error attempting to enter reset! (res %d)\n", res);
80        return res;
81    }
82
83    // Wait the spec mandated hold time.
84    zx_nanosleep(zx_deadline_after(INTEL_HDA_RESET_HOLD_TIME_NSEC));
85
86    // Deassert the reset signal and wait for the controller to ack.
87    REG_SET_BITS<uint32_t>(&regs()->gctl, HDA_REG_GCTL_HWINIT);
88    hw_mb();
89
90    res = WaitCondition(INTEL_HDA_RESET_TIMEOUT_NSEC,
91                        INTEL_HDA_RESET_POLL_TIMEOUT_NSEC,
92                        [this]() -> bool {
93                           return (REG_RD(&regs()->gctl) & HDA_REG_GCTL_HWINIT) != 0;
94                        });
95
96    if (res != ZX_OK) {
97        LOG(ERROR, "Error attempting to leave reset! (res %d)\n", res);
98        return res;
99    }
100
101    // Wait the spec mandated discovery time.
102    zx_nanosleep(zx_deadline_after(INTEL_HDA_CODEC_DISCOVERY_WAIT_NSEC));
103    return res;
104}
105
106zx_status_t IntelHDAController::ResetCORBRdPtrLocked() {
107    zx_status_t res;
108
109    /* Set the reset bit, then wait for ack from the HW.  See Section 3.3.21 */
110    REG_WR(&regs()->corbrp, HDA_REG_CORBRP_RST);
111    hw_mb();
112
113    if ((res = WaitCondition(INTEL_HDA_RING_BUF_RESET_TIMEOUT_NSEC,
114                             INTEL_HDA_RESET_POLL_TIMEOUT_NSEC,
115                             [this]() -> bool {
116                                return (REG_RD(&regs()->corbrp) & HDA_REG_CORBRP_RST) != 0;
117                             })) != ZX_OK) {
118        return res;
119    }
120
121    /* Clear the reset bit, then wait for ack */
122    REG_WR(&regs()->corbrp, 0u);
123    hw_mb();
124
125    if ((res = WaitCondition(INTEL_HDA_RING_BUF_RESET_TIMEOUT_NSEC,
126                             INTEL_HDA_RESET_POLL_TIMEOUT_NSEC,
127                             [this]() -> bool {
128                                return (REG_RD(&regs()->corbrp) & HDA_REG_CORBRP_RST) == 0;
129                             })) != ZX_OK) {
130        return res;
131    }
132
133    return ZX_OK;
134}
135
136zx_status_t IntelHDAController::SetupPCIDevice(zx_device_t* pci_dev) {
137    zx_status_t res;
138
139    if (pci_dev == nullptr)
140        return ZX_ERR_INVALID_ARGS;
141
142    // Have we already been set up?
143    if (pci_dev_ != nullptr) {
144        LOG(ERROR, "Device already initialized!\n");
145        return ZX_ERR_BAD_STATE;
146    }
147
148    ZX_DEBUG_ASSERT(irq_ != nullptr);
149    ZX_DEBUG_ASSERT(mapped_regs_.start() == nullptr);
150    ZX_DEBUG_ASSERT(pci_.ops == nullptr);
151
152    pci_dev_ = pci_dev;
153
154    // The device had better be a PCI device, or we are very confused.
155    res = device_get_protocol(pci_dev_, ZX_PROTOCOL_PCI, reinterpret_cast<void*>(&pci_));
156    if (res != ZX_OK) {
157        LOG(ERROR, "PCI device does not support PCI protocol! (res %d)\n", res);
158        return res;
159    }
160
161    // Fetch our device info and use it to re-generate our debug tag once we
162    // know our BDF address.
163    ZX_DEBUG_ASSERT(pci_.ops != nullptr);
164    res = pci_get_device_info(&pci_, &pci_dev_info_);
165    if (res != ZX_OK) {
166        LOG(ERROR, "Failed to fetch basic PCI device info! (res %d)\n", res);
167        return res;
168    }
169
170    snprintf(log_prefix_, sizeof(log_prefix_), "IHDA Controller %02x:%02x.%01x",
171             pci_dev_info_.bus_id,
172             pci_dev_info_.dev_id,
173             pci_dev_info_.func_id);
174
175    // Fetch a handle to our bus transaction initiator and stash it in a ref
176    // counted object (so we can manage the lifecycle as we share the handle
177    // with various pinned VMOs we need to grant the controller BTI access to).
178    zx::bti pci_bti;
179    res = pci_get_bti(&pci_, 0, pci_bti.reset_and_get_address());
180    if (res != ZX_OK) {
181        LOG(ERROR, "Failed to get BTI handle for IHDA Controller (res %d)\n", res);
182        return res;
183    }
184
185    pci_bti_ = RefCountedBti::Create(fbl::move(pci_bti));
186    if (pci_bti_ == nullptr) {
187        LOG(ERROR, "Out of memory while attempting to allocate BTI wrapper for IHDA Controller\n");
188        return ZX_ERR_NO_MEMORY;
189    }
190
191    // Fetch the BAR which holds our main registers, then sanity check the type
192    // and size.
193    zx_pci_bar_t bar_info;
194    res = pci_get_bar(&pci_, 0u, &bar_info);
195    if (res != ZX_OK) {
196        LOG(ERROR, "Error attempting to fetch registers from PCI (res %d)\n", res);
197        return res;
198    }
199
200    if (bar_info.type != ZX_PCI_BAR_TYPE_MMIO) {
201        LOG(ERROR, "Bad register window type (expected %u got %u)\n",
202                ZX_PCI_BAR_TYPE_MMIO, bar_info.type);
203        return ZX_ERR_INTERNAL;
204    }
205
206    // We should have a valid handle now, make sure we don't leak it.
207    zx::vmo bar_vmo(bar_info.handle);
208    if (bar_info.size != sizeof(hda_all_registers_t)) {
209        LOG(ERROR, "Bad register window size (expected 0x%zx got 0x%zx)\n",
210            sizeof(hda_all_registers_t), bar_info.size);
211        return ZX_ERR_INTERNAL;
212    }
213
214    // Since this VMO provides access to our registers, make sure to set the
215    // cache policy to UNCACHED_DEVICE
216    res = bar_vmo.set_cache_policy(ZX_CACHE_POLICY_UNCACHED_DEVICE);
217    if (res != ZX_OK) {
218        LOG(ERROR, "Error attempting to set cache policy for PCI registers (res %d)\n", res);
219        return res;
220    }
221
222    // Map the VMO in, make sure to put it in the same VMAR as the rest of our
223    // registers.
224    constexpr uint32_t CPU_MAP_FLAGS = ZX_VM_PERM_READ | ZX_VM_PERM_WRITE;
225    res = mapped_regs_.Map(bar_vmo, 0, bar_info.size, CPU_MAP_FLAGS, DriverVmars::registers());
226    if (res != ZX_OK) {
227        LOG(ERROR, "Error attempting to map registers (res %d)\n", res);
228        return res;
229    }
230
231    return ZX_OK;
232}
233
234zx_status_t IntelHDAController::SetupPCIInterrupts() {
235    ZX_DEBUG_ASSERT(pci_dev_ != nullptr);
236
237    // Make absolutely sure that IRQs are disabled at the controller level
238    // before proceeding.
239    REG_WR(&regs()->intctl, 0u);
240
241    // Configure our IRQ mode and map our IRQ handle.  Try to use MSI, but if
242    // that fails, fall back on legacy IRQs.
243    zx_status_t res = pci_set_irq_mode(&pci_, ZX_PCIE_IRQ_MODE_MSI, 1);
244    if (res != ZX_OK) {
245        res = pci_set_irq_mode(&pci_, ZX_PCIE_IRQ_MODE_LEGACY, 1);
246        if (res != ZX_OK) {
247            LOG(ERROR, "Failed to set IRQ mode (%d)!\n", res);
248            return res;
249        } else {
250            LOG(ERROR, "Falling back on legacy IRQ mode!\n");
251        }
252    }
253
254    // Retrieve our PCI interrupt, then use it to activate our IRQ dispatcher.
255    zx::interrupt irq;
256    res = pci_map_interrupt(&pci_, 0, irq.reset_and_get_address());
257    if (res != ZX_OK) {
258        LOG(ERROR, "Failed to map IRQ! (res %d)\n", res);
259        return res;
260    }
261
262    ZX_DEBUG_ASSERT(irq_ != nullptr);
263    auto irq_handler = [controller = fbl::WrapRefPtr(this)]
264                       (const dispatcher::Interrupt* irq, zx_time_t timestamp) -> zx_status_t {
265                           OBTAIN_EXECUTION_DOMAIN_TOKEN(t, controller->default_domain_);
266                           LOG_EX(SPEW, *controller, "Hard IRQ (ts = %lu)\n", timestamp);
267                           return controller->HandleIrq();
268                       };
269
270    res = irq_->Activate(default_domain_, fbl::move(irq), fbl::move(irq_handler));
271
272    if (res != ZX_OK) {
273        LOG(ERROR, "Failed to activate IRQ dispatcher! (res %d)\n", res);
274        return res;
275    }
276
277    // Enable Bus Mastering so we can DMA data and receive MSIs
278    res = pci_enable_bus_master(&pci_, true);
279    if (res != ZX_OK) {
280        LOG(ERROR, "Failed to enable PCI bus mastering!\n");
281        return res;
282    }
283
284    return ZX_OK;
285}
286
287zx_status_t IntelHDAController::SetupStreamDescriptors() {
288    fbl::AutoLock stream_pool_lock(&stream_pool_lock_);
289
290    // Sanity check our stream counts.
291    uint16_t gcap;
292    unsigned int input_stream_cnt, output_stream_cnt, bidir_stream_cnt, total_stream_cnt;
293    gcap              = REG_RD(&regs()->gcap);
294    input_stream_cnt  = HDA_REG_GCAP_ISS(gcap);
295    output_stream_cnt = HDA_REG_GCAP_OSS(gcap);
296    bidir_stream_cnt  = HDA_REG_GCAP_BSS(gcap);
297    total_stream_cnt  = input_stream_cnt + output_stream_cnt + bidir_stream_cnt;
298
299    static_assert(MAX_STREAMS_PER_CONTROLLER == countof(regs()->stream_desc),
300                  "Max stream count mismatch!");
301
302    if (!total_stream_cnt || (total_stream_cnt > countof(regs()->stream_desc))) {
303        LOG(ERROR, "Invalid stream counts in GCAP register (In %u Out %u Bidir %u; Max %zu)\n",
304            input_stream_cnt, output_stream_cnt, bidir_stream_cnt, countof(regs()->stream_desc));
305        return ZX_ERR_INTERNAL;
306    }
307
308    // Allocate our stream descriptors and populate our free lists.
309    for (uint32_t i = 0; i < total_stream_cnt; ++i) {
310        uint16_t stream_id = static_cast<uint16_t>(i + 1);
311        auto type = (i < input_stream_cnt)
312                  ? IntelHDAStream::Type::INPUT
313                  : ((i < input_stream_cnt + output_stream_cnt)
314                  ? IntelHDAStream::Type::OUTPUT
315                  : IntelHDAStream::Type::BIDIR);
316
317        auto stream = IntelHDAStream::Create(type, stream_id, &regs()->stream_desc[i], pci_bti_);
318        if (stream == nullptr) {
319            LOG(ERROR, "Failed to create HDA stream context %u/%u\n", i, total_stream_cnt);
320            return ZX_ERR_NO_MEMORY;
321        }
322
323        ZX_DEBUG_ASSERT(i < countof(all_streams_));
324        ZX_DEBUG_ASSERT(all_streams_[i] == nullptr);
325        all_streams_[i] = stream;
326        ReturnStreamLocked(fbl::move(stream));
327    }
328
329    return ZX_OK;
330}
331
332zx_status_t IntelHDAController::SetupCommandBufferSize(uint8_t* size_reg,
333                                                       unsigned int* entry_count) {
334    // Note: this method takes advantage of the fact that the TX and RX ring
335    // buffer size register bitfield definitions are identical.
336    uint8_t tmp = REG_RD(size_reg);
337    uint8_t cmd;
338
339    if (tmp & HDA_REG_CORBSIZE_CAP_256ENT) {
340        *entry_count = 256;
341        cmd = HDA_REG_CORBSIZE_CFG_256ENT;
342    } else if (tmp & HDA_REG_CORBSIZE_CAP_16ENT) {
343        *entry_count = 16;
344        cmd = HDA_REG_CORBSIZE_CFG_16ENT;
345    } else if (tmp & HDA_REG_CORBSIZE_CAP_2ENT) {
346        *entry_count = 2;
347        cmd = HDA_REG_CORBSIZE_CFG_2ENT;
348    } else {
349        LOG(ERROR, "Invalid ring buffer capabilities! (0x%02x)\n", tmp);
350        return ZX_ERR_BAD_STATE;
351    }
352
353    REG_WR(size_reg, cmd);
354    return ZX_OK;
355}
356
357zx_status_t IntelHDAController::SetupCommandBuffer() {
358    fbl::AutoLock corb_lock(&corb_lock_);
359    fbl::AutoLock rirb_lock(&rirb_lock_);
360    zx_status_t res;
361
362    // Allocate our command buffer memory and map it into our address space.
363    // Even the largest buffers permissible should fit within a single 4k page.
364    zx::vmo cmd_buf_vmo;
365    constexpr uint32_t CPU_MAP_FLAGS = ZX_VM_PERM_READ | ZX_VM_PERM_WRITE;
366    static_assert(PAGE_SIZE >= (HDA_CORB_MAX_BYTES + HDA_RIRB_MAX_BYTES),
367                  "PAGE_SIZE to small to hold CORB and RIRB buffers!");
368    res = cmd_buf_cpu_mem_.CreateAndMap(PAGE_SIZE,
369                                        CPU_MAP_FLAGS,
370                                        DriverVmars::registers(),
371                                        &cmd_buf_vmo,
372                                        ZX_RIGHT_SAME_RIGHTS,
373                                        ZX_CACHE_POLICY_UNCACHED_DEVICE);
374
375    if (res != ZX_OK) {
376        LOG(ERROR, "Failed to create and map %u bytes for CORB/RIRB command buffers! (res %d)\n",
377            PAGE_SIZE, res);
378        return res;
379    }
380
381    // Pin this VMO and grant the controller access to it.  The controller will
382    // need read/write access as this page contains both the command and
383    // response buffers.
384    //
385    // TODO(johngro): If we (someday) decide that we need more isolation, we
386    // should split this allocation so that there is a dedicated page for the
387    // command buffer separate from the response buffer.  The controller should
388    // never have a reason it needs to write to the command buffer, but it would
389    // need its own page if we wanted to control the access at an IOMMU level.
390    constexpr uint32_t HDA_MAP_FLAGS = ZX_BTI_PERM_READ | ZX_BTI_PERM_WRITE;
391    res = cmd_buf_hda_mem_.Pin(cmd_buf_vmo, pci_bti_->initiator(), HDA_MAP_FLAGS);
392    if (res != ZX_OK) {
393        LOG(ERROR, "Failed to pin pages for CORB/RIRB command buffers! (res %d)\n", res);
394        return res;
395    }
396
397    // Start by making sure that the output and response ring buffers are being
398    // held in the stopped state
399    REG_WR(&regs()->corbctl, 0u);
400    REG_WR(&regs()->rirbctl, 0u);
401
402    // Reset the read and write pointers for both ring buffers
403    REG_WR(&regs()->corbwp, 0u);
404    res = ResetCORBRdPtrLocked();
405    if (res != ZX_OK)
406        return res;
407
408    // Note; the HW does not expose a Response Input Ring Buffer Read Pointer,
409    // we have to maintain our own.
410    rirb_rd_ptr_ = 0;
411    REG_WR(&regs()->rirbwp, HDA_REG_RIRBWP_RST);
412
413    // Physical memory for the CORB/RIRB should already have been allocated at
414    // this point
415    ZX_DEBUG_ASSERT(cmd_buf_cpu_mem_.start() != 0);
416
417    // Determine the ring buffer sizes.  If there are options, make them as
418    // large as possible.
419    res = SetupCommandBufferSize(&regs()->corbsize, &corb_entry_count_);
420    if (res != ZX_OK)
421        return res;
422
423    res = SetupCommandBufferSize(&regs()->rirbsize, &rirb_entry_count_);
424    if (res != ZX_OK)
425        return res;
426
427    // Stash these so we don't have to constantly recalculate then
428    corb_mask_ = corb_entry_count_ - 1u;
429    rirb_mask_ = rirb_entry_count_ - 1u;
430    corb_max_in_flight_ = rirb_mask_ > RIRB_RESERVED_RESPONSE_SLOTS
431                        ? rirb_mask_ - RIRB_RESERVED_RESPONSE_SLOTS
432                        : 1;
433    corb_max_in_flight_ = fbl::min(corb_max_in_flight_, corb_mask_);
434
435    // Program the base address registers for the TX/RX ring buffers, and set up
436    // the virtual pointers to the ring buffer entries.
437    const auto& region = cmd_buf_hda_mem_.region(0);
438    uint64_t cmd_buf_paddr64 = static_cast<uint64_t>(region.phys_addr);
439
440    // TODO(johngro) : If the controller does not support 64 bit phys
441    // addressing, we need to make sure to get a page from low memory to use for
442    // our command buffers.
443    bool gcap_64bit_ok = HDA_REG_GCAP_64OK(REG_RD(&regs()->gcap));
444    if ((cmd_buf_paddr64 >> 32) && !gcap_64bit_ok) {
445        LOG(ERROR, "Intel HDA controller does not support 64-bit physical addressing!\n");
446        return ZX_ERR_NOT_SUPPORTED;
447    }
448
449    // Section 4.4.1.1; corb ring buffer base address must be 128 byte aligned.
450    ZX_DEBUG_ASSERT(!(cmd_buf_paddr64 & 0x7F));
451    auto cmd_buf_start = reinterpret_cast<uint8_t*>(cmd_buf_cpu_mem_.start());
452    REG_WR(&regs()->corblbase, ((uint32_t)(cmd_buf_paddr64 & 0xFFFFFFFF)));
453    REG_WR(&regs()->corbubase, ((uint32_t)(cmd_buf_paddr64 >> 32)));
454    corb_ = reinterpret_cast<CodecCommand*>(cmd_buf_start);
455
456    cmd_buf_paddr64 += HDA_CORB_MAX_BYTES;
457
458    // Section 4.4.2.2; rirb ring buffer base address must be 128 byte aligned.
459    ZX_DEBUG_ASSERT(!(cmd_buf_paddr64 & 0x7F));
460    REG_WR(&regs()->rirblbase, ((uint32_t)(cmd_buf_paddr64 & 0xFFFFFFFF)));
461    REG_WR(&regs()->rirbubase, ((uint32_t)(cmd_buf_paddr64 >> 32)));
462    rirb_ = reinterpret_cast<CodecResponse*>(cmd_buf_start + HDA_CORB_MAX_BYTES);
463
464    // Make sure our current view of the space available in the CORB is up-to-date.
465    ComputeCORBSpaceLocked();
466
467    // Set the response interrupt count threshold.  The RIRB IRQ will fire any
468    // time all of the SDATA_IN lines stop having codec responses to transmit,
469    // or when RINTCNT responses have been received, whichever happens
470    // first.  We would like to batch up responses to minimize IRQ load, but we
471    // also need to make sure to...
472    // 1) Not configure the threshold to be larger than the available space in
473    //    the ring buffer.
474    // 2) Reserve some space (if we can) at the end of the ring buffer so the
475    //    hardware has space to write while we are servicing our IRQ.  If we
476    //    reserve no space, then the ring buffer is going to fill up and
477    //    potentially overflow before we can get in there and process responses.
478    unsigned int thresh = rirb_entry_count_ - 1u;
479    if (thresh > RIRB_RESERVED_RESPONSE_SLOTS)
480        thresh -= RIRB_RESERVED_RESPONSE_SLOTS;
481    ZX_DEBUG_ASSERT(thresh);
482    REG_WR(&regs()->rintcnt, thresh);
483
484    // Clear out any lingering interrupt status
485    REG_WR(&regs()->corbsts, HDA_REG_CORBSTS_MEI);
486    REG_WR(&regs()->rirbsts, OR(HDA_REG_RIRBSTS_INTFL, HDA_REG_RIRBSTS_OIS));
487
488    // Enable the TX/RX IRQs and DMA engines.
489    REG_WR(&regs()->corbctl, OR(HDA_REG_CORBCTL_MEIE, HDA_REG_CORBCTL_DMA_EN));
490    REG_WR(&regs()->rirbctl, OR(OR(HDA_REG_RIRBCTL_INTCTL, HDA_REG_RIRBCTL_DMA_EN),
491                               HDA_REG_RIRBCTL_OIC));
492
493    return ZX_OK;
494}
495
496void IntelHDAController::ProbeAudioDSP() {
497    // This driver only supports the Audio DSP on Kabylake.
498    if ((pci_dev_info_.vendor_id != INTEL_HDA_PCI_VID) ||
499        (pci_dev_info_.device_id != INTEL_HDA_PCI_DID_KABYLAKE)) {
500        LOG(TRACE, "Audio DSP is not supported for device 0x%04x:0x%04x\n",
501            pci_dev_info_.vendor_id, pci_dev_info_.device_id);
502        return;
503    }
504
505    // Look for the processing pipe capability structure. Existence of this
506    // structure means the Audio DSP is supported by the HW.
507    uint32_t offset = REG_RD(&regs()->llch);
508    if ((offset == 0) || (offset >= mapped_regs_.size())) {
509        LOG(TRACE, "Invalid LLCH offset to capability structures: 0x%08x\n", offset);
510        return;
511    }
512
513    hda_pp_registers_t* pp_regs = nullptr;
514    hda_pp_registers_t* found_regs = nullptr;
515    uint8_t* regs_ptr = nullptr;
516    unsigned int count = 0;
517    uint32_t cap;
518    do {
519        regs_ptr = reinterpret_cast<uint8_t*>(regs()) + offset;
520        pp_regs = reinterpret_cast<hda_pp_registers_t*>(regs_ptr);
521        cap = REG_RD(&pp_regs->ppch);
522        if ((cap & HDA_CAP_ID_MASK) == HDA_CAP_PP_ID) {
523            found_regs = pp_regs;
524            break;
525        }
526        offset = cap & HDA_CAP_PTR_MASK;
527        count += 1;
528    } while ((count < MAX_CAPS) && (offset != 0));
529
530    if (found_regs == nullptr) {
531        LOG(TRACE, "Pipe processing capability structure not found\n");
532        return;
533    }
534
535    dsp_ = IntelHDADSP::Create(*this, pp_regs, pci_bti_);
536}
537
538zx_status_t IntelHDAController::InitInternal(zx_device_t* pci_dev) {
539    // TODO(johngro): see ZX-940; remove this priority boost when we can, and
540    // when there is a better way of handling real time requirements.
541    //
542    // Right now, the interrupt handler runs in the same execution domain as all
543    // of the other event sources managed by the HDA controller.  If it is
544    // configured to run and send DMA ring buffer notifications to the higher
545    // level, the IRQ needs to be running at a boosted priority in order to have
546    // a chance of meeting its real time deadlines.
547    //
548    // There is currently no terribly good way to control this dynamically, or
549    // to apply this priority only to the interrupt event source and not others.
550    // If it ever becomes a serious issue that the channel event handlers in
551    // this system are running at boosted priority, we can come back here and
552    // split the IRQ handler to run its own dedicated exeuction domain instead
553    // of using the default domain.
554    default_domain_ = dispatcher::ExecutionDomain::Create(24 /* HIGH_PRIORITY in LK */);
555    if (default_domain_ == nullptr) {
556        return ZX_ERR_NO_MEMORY;
557    }
558
559    irq_ = dispatcher::Interrupt::Create();
560    if (irq_ == nullptr) {
561        return ZX_ERR_NO_MEMORY;
562    }
563
564    irq_wakeup_event_ = dispatcher::WakeupEvent::Create();
565    if (irq_wakeup_event_ == nullptr) {
566        return ZX_ERR_NO_MEMORY;
567    }
568
569    zx_status_t res;
570    res = irq_wakeup_event_->Activate(
571        default_domain_,
572        [controller = fbl::WrapRefPtr(this)](const dispatcher::WakeupEvent* evt) -> zx_status_t {
573            OBTAIN_EXECUTION_DOMAIN_TOKEN(t, controller->default_domain_);
574            LOG_EX(SPEW, *controller, "SW IRQ Wakeup\n");
575            return controller->HandleIrq();
576        });
577    if (res != ZX_OK) {
578        return res;
579    }
580
581    res = SetupPCIDevice(pci_dev);
582    if (res != ZX_OK) {
583        return res;
584    }
585
586    // Check our hardware version
587    uint8_t major, minor;
588    major = REG_RD(&regs()->vmaj);
589    minor = REG_RD(&regs()->vmin);
590
591    if ((1 != major) || (0 != minor)) {
592        LOG(ERROR, "Unexpected HW revision %d.%d!\n", major, minor);
593        return ZX_ERR_NOT_SUPPORTED;
594    }
595
596    // Completely reset the hardware
597    res = ResetControllerHW();
598    if (res != ZX_OK)
599        return res;
600
601    // Setup interrupts and enable bus mastering.
602    res = SetupPCIInterrupts();
603    if (res != ZX_OK)
604        return res;
605
606    // Allocate and set up our stream descriptors.
607    res = SetupStreamDescriptors();
608    if (res != ZX_OK)
609        return res;
610
611    // Allocate and set up the codec communication ring buffers (CORB/RIRB)
612    res = SetupCommandBuffer();
613    if (res != ZX_OK)
614        return res;
615
616    // Generate a device name, initialize our device structure, and attempt to
617    // publish our device.
618    char dev_name[ZX_DEVICE_NAME_MAX] = { 0 };
619    snprintf(dev_name, sizeof(dev_name), "intel-hda-%03u", id());
620
621    device_add_args_t args = {};
622    args.version = DEVICE_ADD_ARGS_VERSION;
623    args.name = dev_name;
624    args.ctx = this;
625    args.ops = &CONTROLLER_DEVICE_THUNKS;
626    args.proto_id = ZX_PROTOCOL_IHDA;
627
628    // Manually add a reference to this object.  If we succeeded in publishing,
629    // the DDK will be holding an unmanaged reference to us in our device's ctx
630    // pointer.  We will re-claim the reference when the DDK eventually calls
631    // our Release hook.
632    this->AddRef();
633
634    res = device_add(pci_dev_, &args, &dev_node_);
635    if (res != ZX_OK) {
636        // We failed to publish our device.  Release the manual reference we
637        // just added.
638        __UNUSED bool should_destruct;
639        should_destruct = this->Release();
640        ZX_DEBUG_ASSERT(!should_destruct);
641    } else {
642        // Flag the fact that we have entered the operating state.
643        SetState(State::OPERATING);
644
645        // Make sure that interrupts are completely disabled before proceeding.
646        // If we have a unmasked, pending IRQ, we need to make sure that it
647        // generates and interrupt once we have finished this interrupt
648        // configuration.
649        REG_WR(&regs()->intctl, 0u);
650
651        // Clear our STATESTS shadow, setup the WAKEEN register to wake us
652        // up if there is any change to the codec enumeration status.  This will
653        // kick off the process of codec enumeration.
654        REG_SET_BITS(&regs()->wakeen, HDA_REG_STATESTS_MASK);
655
656        // Allow unsolicited codec responses
657        REG_SET_BITS(&regs()->gctl, HDA_REG_GCTL_UNSOL);
658
659        // Compute the set of interrupts we may be interested in during
660        // operation, then enable those interrupts.
661        uint32_t interesting_irqs = HDA_REG_INTCTL_GIE | HDA_REG_INTCTL_CIE;
662        for (uint32_t i = 0; i < countof(all_streams_); ++i) {
663            if (all_streams_[i] != nullptr)
664                interesting_irqs |= HDA_REG_INTCTL_SIE(i);
665        }
666        REG_WR(&regs()->intctl, interesting_irqs);
667
668        // Probe for the Audio DSP. This is done after adding the HDA controller
669        // device because the Audio DSP will be added a child to the HDA
670        // controller and ddktl requires the parent device node to be initialized
671        // at construction time.
672
673        // No need to check for return value because the absence of the Audio
674        // DSP is not a failure.
675        // TODO(yky) Come up with a way to warn for the absence of Audio DSP
676        // on platforms that require it.
677        ProbeAudioDSP();
678    }
679
680    return res;
681}
682
683zx_status_t IntelHDAController::Init(zx_device_t* pci_dev) {
684    zx_status_t res = InitInternal(pci_dev);
685
686    if (res != ZX_OK) {
687        DeviceShutdown();
688    }
689
690    return res;
691}
692
693}  // namespace intel_hda
694}  // namespace audio
695