1// Copyright 2018 The Fuchsia Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#include "iommu.h"
6
7#include <acpica/acpi.h>
8#include <assert.h>
9#include <ddk/debug.h>
10#include <stdbool.h>
11#include <stdint.h>
12#include <zircon/syscalls/iommu.h>
13
14typedef struct iommu_info {
15    const zx_iommu_desc_intel_t* desc; // owned by this structure
16    size_t desc_len;
17
18    zx_handle_t handle; // ZX_HANDLE_INVALID if not activated
19} iommu_info_t;
20
21typedef struct {
22    mtx_t lock;
23    iommu_info_t* iommus; // Array of IOMMUs
24    size_t num_iommus; // Length of |iommus|
25
26    zx_handle_t dummy_iommu; // Used for BDFs not covered by the ACPI tables.
27} iommu_manager_t;
28
29static iommu_manager_t iommu_mgr;
30
31static zx_status_t acpi_scope_to_desc(ACPI_DMAR_DEVICE_SCOPE* acpi_scope,
32                                      zx_iommu_desc_intel_scope_t* desc_scope) {
33    switch (acpi_scope->EntryType) {
34    case ACPI_DMAR_SCOPE_TYPE_ENDPOINT:
35        desc_scope->type = ZX_IOMMU_INTEL_SCOPE_ENDPOINT;
36        break;
37    case ACPI_DMAR_SCOPE_TYPE_BRIDGE:
38        zxlogf(INFO, "acpi-bus: bridge scopes not supported\n");
39        return ZX_ERR_NOT_SUPPORTED;
40    default:
41        // Skip this scope, since it's not a type we care about.
42        return ZX_ERR_WRONG_TYPE;
43    }
44    desc_scope->start_bus = acpi_scope->Bus;
45    if (acpi_scope->Length < sizeof(*acpi_scope)) {
46        return ZX_ERR_IO_DATA_INTEGRITY;
47    }
48    desc_scope->num_hops = (acpi_scope->Length - sizeof(*acpi_scope)) / 2;
49    if (countof(desc_scope->dev_func) < desc_scope->num_hops) {
50        return ZX_ERR_NOT_SUPPORTED;
51    }
52    // TODO(teisenbe): We need to be aware of the mapping between
53    // PCI paths and bus numbers to properly evaluate this.
54    if (desc_scope->num_hops != 1) {
55        zxlogf(INFO, "acpi-bus: non root bus devices not supported\n");
56        return ZX_ERR_NOT_SUPPORTED;
57    }
58
59    // Walk the variable-length array of hops that is appended to the main
60    // ACPI_DMAR_DEVICE_SCOPE structure.
61    for (ssize_t i = 0; i < desc_scope->num_hops; ++i) {
62        uint16_t v = *(uint16_t*)((uintptr_t)acpi_scope + sizeof(*acpi_scope) + 2 * i);
63        const uint8_t dev = v & 0x1f;
64        const uint8_t func = (v >> 8) & 0x7;
65        desc_scope->dev_func[i] = (dev << 3) | func;
66    }
67    return ZX_OK;
68}
69
70// Walks the given unit's scopes and appends them to the given descriptor.
71// |max_scopes| is the number of scopes |scopes| can hold. |num_scopes_found|
72// is the number of scopes found on |unit|, even if they wouldn't all fit in |scopes|.
73static zx_status_t append_scopes(ACPI_DMAR_HARDWARE_UNIT* unit,
74                                 size_t max_scopes,
75                                 zx_iommu_desc_intel_scope_t* scopes,
76                                 size_t* num_scopes_found) {
77    size_t num_scopes = 0;
78    uintptr_t scope;
79    const uintptr_t addr = (uintptr_t)unit;
80    for (scope = addr + 16; scope < addr + unit->Header.Length; ) {
81        ACPI_DMAR_DEVICE_SCOPE* s = (ACPI_DMAR_DEVICE_SCOPE*)scope;
82        zxlogf(DEBUG1, "  DMAR Scope: %u, bus %u\n", s->EntryType, s->Bus);
83        for (size_t i = 0; i < (s->Length - sizeof(*s)) / 2; ++i) {
84            uint16_t v = *(uint16_t*)(scope + sizeof(*s) + 2 * i);
85            zxlogf(DEBUG1, "    Path %ld: %02x.%02x\n", i, v & 0xffu, (uint16_t)(v >> 8));
86        }
87        scope += s->Length;
88
89        // Count the scopes we care about
90        switch (s->EntryType) {
91        case ACPI_DMAR_SCOPE_TYPE_ENDPOINT:
92        case ACPI_DMAR_SCOPE_TYPE_BRIDGE:
93            num_scopes++;
94            break;
95        }
96    }
97
98    if (num_scopes_found) {
99        *num_scopes_found = num_scopes;
100    }
101    if (!scopes) {
102        return ZX_OK;
103    }
104
105    if (num_scopes > max_scopes) {
106        return ZX_ERR_BUFFER_TOO_SMALL;
107    }
108
109    size_t cur_num_scopes = 0;
110    for (scope = addr + 16; scope < addr + unit->Header.Length && cur_num_scopes < max_scopes;) {
111        ACPI_DMAR_DEVICE_SCOPE* s = (ACPI_DMAR_DEVICE_SCOPE*)scope;
112
113        zx_status_t status = acpi_scope_to_desc(s, &scopes[cur_num_scopes]);
114        if (status != ZX_OK && status != ZX_ERR_WRONG_TYPE) {
115            return status;
116        }
117        if (status == ZX_OK) {
118            cur_num_scopes++;
119        }
120
121        scope += s->Length;
122    }
123
124    // Since |num_scopes| is the number of ENDPOINT and BRIDGE entries, and
125    // |acpi_scope_to_desc| doesn't return ZX_ERR_WRONG_TYPE for those types of
126    // entries, we should always have seen that number of entries when we reach
127    // here.
128    assert(cur_num_scopes == num_scopes);
129    return ZX_OK;
130}
131
132static bool scope_eq(zx_iommu_desc_intel_scope_t* scope,
133                     ACPI_DMAR_DEVICE_SCOPE* acpi_scope) {
134
135    zx_iommu_desc_intel_scope_t other_scope;
136    zx_status_t status = acpi_scope_to_desc(acpi_scope, &other_scope);
137    if (status != ZX_OK) {
138        return false;
139    }
140
141    if (scope->type != other_scope.type || scope->start_bus != other_scope.start_bus ||
142        scope->num_hops != other_scope.num_hops) {
143
144        return false;
145    }
146
147    for (size_t i = 0; i < scope->num_hops; ++i) {
148        if (scope->dev_func[i] != other_scope.dev_func[i]) {
149            return false;
150        }
151    }
152
153    return true;
154}
155
156// Appends to desc any reserved memory regions that match its scopes. If
157// |desc_len| is not large enough to include the reserved memory descriptors, this
158// function will not append all of the found entries. |bytes_needed| will
159// always return the number of bytes needed to represent all of the reserved
160// descriptors. This function does not modify desc->reserved_mem_bytes.
161static zx_status_t append_reserved_mem(ACPI_TABLE_DMAR* table,
162                                       zx_iommu_desc_intel_t* desc,
163                                       size_t desc_len,
164                                       size_t* bytes_needed) {
165
166    const uintptr_t records_start = (uintptr_t)table + sizeof(*table);
167    const uintptr_t records_end = (uintptr_t)table + table->Header.Length;
168
169    zx_iommu_desc_intel_scope_t* desc_scopes = (zx_iommu_desc_intel_scope_t*)(
170            (uintptr_t)desc + sizeof(*desc));
171    const size_t num_desc_scopes = desc->scope_bytes / sizeof(zx_iommu_desc_intel_scope_t);
172
173    uintptr_t next_reserved_mem_desc_base = (uintptr_t)desc + sizeof(zx_iommu_desc_intel_t) +
174            desc->scope_bytes + desc->reserved_memory_bytes;
175
176    *bytes_needed = 0;
177    for (uintptr_t addr = records_start; addr < records_end;) {
178        ACPI_DMAR_HEADER* record_hdr = (ACPI_DMAR_HEADER*)addr;
179        switch (record_hdr->Type) {
180        case ACPI_DMAR_TYPE_RESERVED_MEMORY: {
181            ACPI_DMAR_RESERVED_MEMORY* rec = (ACPI_DMAR_RESERVED_MEMORY*)record_hdr;
182
183            if (desc->pci_segment != rec->Segment) {
184                break;
185            }
186
187            zx_iommu_desc_intel_reserved_memory_t* mem_desc =
188                    (zx_iommu_desc_intel_reserved_memory_t*)next_reserved_mem_desc_base;
189            size_t mem_desc_size = sizeof(*mem_desc);
190
191            // Search for scopes that match
192            for (uintptr_t scope = addr + 24; scope < addr + rec->Header.Length; ) {
193                ACPI_DMAR_DEVICE_SCOPE* s = (ACPI_DMAR_DEVICE_SCOPE*)scope;
194                // TODO(teisenbe): We should skip scope types we don't
195                // care about here
196
197                // Search for a scope in the descriptor that matches this
198                // ACPI scope.
199                bool no_matches = true;
200                for (size_t i = 0; i < num_desc_scopes; ++i) {
201                    zx_iommu_desc_intel_scope_t* scope_desc = &desc_scopes[i];
202                    const bool scope_matches = scope_eq(scope_desc, s);
203
204                    no_matches &= !scope_matches;
205
206                    // If this is a whole segment descriptor, then a match
207                    // corresponds to an entry we should ignore.
208                    if (scope_matches && !desc->whole_segment) {
209                        zx_iommu_desc_intel_scope_t* new_scope_desc =
210                                (zx_iommu_desc_intel_scope_t*)(next_reserved_mem_desc_base +
211                                                               mem_desc_size);
212                        mem_desc_size += sizeof(zx_iommu_desc_intel_scope_t);
213
214                        if (next_reserved_mem_desc_base + mem_desc_size <=
215                            (uintptr_t)desc + desc_len) {
216
217                            memcpy(new_scope_desc, scope_desc, sizeof(*scope_desc));
218                        }
219                        break;
220                    }
221                }
222
223                if (no_matches && desc->whole_segment) {
224                    zx_iommu_desc_intel_scope_t other_scope;
225                    zx_status_t status = acpi_scope_to_desc(s, &other_scope);
226                    if (status != ZX_ERR_WRONG_TYPE && status != ZX_OK) {
227                        return status;
228                    }
229                    if (status == ZX_OK) {
230                        zx_iommu_desc_intel_scope_t* new_scope_desc =
231                                (zx_iommu_desc_intel_scope_t*)(next_reserved_mem_desc_base +
232                                                               mem_desc_size);
233                        mem_desc_size += sizeof(zx_iommu_desc_intel_scope_t);
234
235                        if (next_reserved_mem_desc_base + mem_desc_size <=
236                            (uintptr_t)desc + desc_len) {
237
238                            memcpy(new_scope_desc, &other_scope, sizeof(other_scope));
239                        }
240                    }
241                }
242
243                scope += s->Length;
244            }
245
246            // If this descriptor does not have any scopes, ignore it
247            if (mem_desc_size == sizeof(*mem_desc)) {
248                break;
249            }
250
251            if (next_reserved_mem_desc_base + mem_desc_size <= (uintptr_t)desc + desc_len) {
252                mem_desc->base_addr = rec->BaseAddress;
253                mem_desc->len = rec->EndAddress - rec->BaseAddress + 1;
254                mem_desc->scope_bytes = mem_desc_size - sizeof(*mem_desc);
255                next_reserved_mem_desc_base += mem_desc_size;
256            }
257            *bytes_needed += mem_desc_size;
258
259            break;
260        }
261        }
262
263        addr += record_hdr->Length;
264    }
265
266    // Check if we weren't able to write all of the entries above.
267    if (*bytes_needed + sizeof(zx_iommu_desc_intel_t) +
268        desc->scope_bytes + desc->reserved_memory_bytes > desc_len) {
269        return ZX_ERR_BUFFER_TOO_SMALL;
270    }
271
272    return ZX_OK;
273}
274
275static zx_status_t create_whole_segment_iommu_desc(ACPI_TABLE_DMAR* table,
276                                                   ACPI_DMAR_HARDWARE_UNIT* unit,
277                                                   zx_iommu_desc_intel_t** desc_out,
278                                                   size_t* desc_len_out) {
279    assert(unit->Flags & ACPI_DMAR_INCLUDE_ALL);
280
281    // The VT-d spec requires that whole-segment hardware units appear in the
282    // DMAR table after all other hardware units on their segment. Search those
283    // entries for scopes to specify as excluded from this descriptor.
284
285    size_t num_scopes = 0;
286    size_t num_scopes_on_unit;
287
288    const uintptr_t records_start = ((uintptr_t)table) + sizeof(*table);
289    const uintptr_t records_end = (uintptr_t)unit + unit->Header.Length;
290
291    uintptr_t addr;
292    for (addr = records_start; addr < records_end;) {
293        ACPI_DMAR_HEADER* record_hdr = (ACPI_DMAR_HEADER*)addr;
294        switch (record_hdr->Type) {
295        case ACPI_DMAR_TYPE_HARDWARE_UNIT: {
296            ACPI_DMAR_HARDWARE_UNIT* rec = (ACPI_DMAR_HARDWARE_UNIT*)record_hdr;
297            if (rec->Segment != unit->Segment) {
298                break;
299            }
300            zx_status_t status = append_scopes(rec, 0, NULL, &num_scopes_on_unit);
301            if (status != ZX_OK) {
302                return status;
303            }
304            num_scopes += num_scopes_on_unit;
305        }
306        }
307        addr += record_hdr->Length;
308    }
309
310    size_t desc_len = sizeof(zx_iommu_desc_intel_t) +
311            sizeof(zx_iommu_desc_intel_scope_t) * num_scopes;
312    zx_iommu_desc_intel_t* desc = malloc(desc_len);
313    if (!desc) {
314        return ZX_ERR_NO_MEMORY;
315    }
316    desc->register_base = unit->Address;
317    desc->pci_segment = unit->Segment;
318    desc->whole_segment = true;
319    desc->scope_bytes = 0;
320    desc->reserved_memory_bytes = 0;
321
322    for (addr = records_start; addr < records_end;) {
323        ACPI_DMAR_HEADER* record_hdr = (ACPI_DMAR_HEADER*)addr;
324        switch (record_hdr->Type) {
325        case ACPI_DMAR_TYPE_HARDWARE_UNIT: {
326            ACPI_DMAR_HARDWARE_UNIT* rec = (ACPI_DMAR_HARDWARE_UNIT*)record_hdr;
327            if (rec->Segment != unit->Segment) {
328                break;
329            }
330            size_t scopes_found = 0;
331            zx_iommu_desc_intel_scope_t* scopes = (zx_iommu_desc_intel_scope_t*)(
332                    (uintptr_t)desc + sizeof(*desc) + desc->scope_bytes);
333            zx_status_t status = append_scopes(rec, num_scopes, scopes, &scopes_found);
334            if (status != ZX_OK) {
335                free(desc);
336                return status;
337            }
338            desc->scope_bytes += scopes_found * sizeof(zx_iommu_desc_intel_scope_t);
339            num_scopes -= scopes_found;
340        }
341        }
342        addr += record_hdr->Length;
343    }
344
345    size_t reserved_mem_bytes = 0;
346    zx_status_t status = append_reserved_mem(table, desc, desc_len, &reserved_mem_bytes);
347    if (status == ZX_ERR_BUFFER_TOO_SMALL) {
348        zx_iommu_desc_intel_t* new_desc = realloc(desc, desc_len + reserved_mem_bytes);
349        if (new_desc == NULL) {
350            free(desc);
351            return ZX_ERR_NO_MEMORY;
352        }
353        desc = new_desc;
354        desc_len += reserved_mem_bytes;
355        status = append_reserved_mem(table, desc, desc_len, &reserved_mem_bytes);
356    }
357    if (status != ZX_OK) {
358        free(desc);
359        return status;
360    }
361    desc->reserved_memory_bytes += reserved_mem_bytes;
362
363    *desc_out = desc;
364    *desc_len_out = desc_len;
365    return ZX_OK;
366}
367
368static zx_status_t create_partial_segment_iommu_desc(ACPI_TABLE_DMAR* table,
369                                                       ACPI_DMAR_HARDWARE_UNIT* unit,
370                                                       zx_iommu_desc_intel_t** desc_out,
371                                                       size_t* desc_len_out) {
372    assert((unit->Flags & ACPI_DMAR_INCLUDE_ALL) == 0);
373
374    size_t num_scopes;
375    zx_status_t status = append_scopes(unit, 0, NULL, &num_scopes);
376    if (status != ZX_OK) {
377        return status;
378    }
379
380    size_t desc_len = sizeof(zx_iommu_desc_intel_t) +
381            sizeof(zx_iommu_desc_intel_scope_t) * num_scopes;
382    zx_iommu_desc_intel_t* desc = malloc(desc_len);
383    if (!desc) {
384        return ZX_ERR_NO_MEMORY;
385    }
386    desc->register_base = unit->Address;
387    desc->pci_segment = unit->Segment;
388    desc->whole_segment = false;
389    desc->scope_bytes = 0;
390    desc->reserved_memory_bytes = 0;
391    zx_iommu_desc_intel_scope_t* scopes = (zx_iommu_desc_intel_scope_t*)(
392            (uintptr_t)desc + sizeof(*desc));
393    size_t actual_num_scopes;
394    status = append_scopes(unit, num_scopes, scopes, &actual_num_scopes);
395    if (status != ZX_OK) {
396        free(desc);
397        return status;
398    }
399    desc->scope_bytes = actual_num_scopes * sizeof(zx_iommu_desc_intel_scope_t);
400
401    size_t reserved_mem_bytes = 0;
402    status = append_reserved_mem(table, desc, desc_len, &reserved_mem_bytes);
403    if (status == ZX_ERR_BUFFER_TOO_SMALL) {
404        zx_iommu_desc_intel_t* new_desc = realloc(desc, desc_len + reserved_mem_bytes);
405        if (new_desc == NULL) {
406            status = ZX_ERR_NO_MEMORY;
407            goto cleanup;
408        }
409        desc = new_desc;
410        desc_len += reserved_mem_bytes;
411        status = append_reserved_mem(table, desc, desc_len, &reserved_mem_bytes);
412    }
413    if (status != ZX_OK) {
414        goto cleanup;
415    }
416    desc->reserved_memory_bytes += reserved_mem_bytes;
417
418    *desc_out = desc;
419    *desc_len_out = desc_len;
420    return ZX_OK;
421cleanup:
422    free(desc);
423    return status;
424}
425
426static bool use_hardware_iommu(void) {
427    const char* value = getenv("iommu.enable");
428    if (value == NULL) {
429        return false; // Default to false currently
430    } else if (!strcmp(value, "0") || !strcmp(value, "false") || !strcmp(value, "off")) {
431        return false;
432    } else {
433        return true;
434    }
435}
436
437zx_status_t iommu_manager_init(void) {
438    int err = mtx_init(&iommu_mgr.lock, mtx_plain);
439    if (err != thrd_success) {
440        return ZX_ERR_INTERNAL;
441    }
442
443    iommu_mgr.iommus = NULL;
444    iommu_mgr.num_iommus = 0;
445
446    zx_iommu_desc_dummy_t dummy;
447    zx_status_t status = zx_iommu_create(get_root_resource(), ZX_IOMMU_TYPE_DUMMY, &dummy,
448                                         sizeof(dummy), &iommu_mgr.dummy_iommu);
449    if (status != ZX_OK) {
450        zxlogf(ERROR, "acpi-bus: error %d in zx_iommu_create for dummy\n", status);
451        return status;
452    }
453
454    if (!use_hardware_iommu()) {
455        zxlogf(INFO, "acpi-bus: not using IOMMU\n");
456        return ZX_OK;
457    }
458
459    ACPI_TABLE_HEADER* table = NULL;
460    ACPI_STATUS acpi_status = AcpiGetTable((char*)ACPI_SIG_DMAR, 1, &table);
461    if (acpi_status != AE_OK) {
462        zxlogf(INFO, "acpi-bus: could not find DMAR table\n");
463        return ZX_ERR_NOT_FOUND;
464    }
465    ACPI_TABLE_DMAR* dmar = (ACPI_TABLE_DMAR*)table;
466    const uintptr_t records_start = ((uintptr_t)dmar) + sizeof(*dmar);
467    const uintptr_t records_end = ((uintptr_t)dmar) + dmar->Header.Length;
468    if (records_start >= records_end) {
469        zxlogf(ERROR, "acpi-bus: DMAR wraps around address space\n");
470        return ZX_ERR_IO_DATA_INTEGRITY;
471    }
472    // Shouldn't be too many records
473    if (dmar->Header.Length > 4096) {
474        zxlogf(ERROR, "acpi-bus: DMAR suspiciously long: %u\n", dmar->Header.Length);
475        return ZX_ERR_IO_DATA_INTEGRITY;
476    }
477
478    // Count the IOMMUs
479    size_t num_iommus = 0;
480    uintptr_t addr;
481    for (addr = records_start; addr < records_end;) {
482        ACPI_DMAR_HEADER* record_hdr = (ACPI_DMAR_HEADER*)addr;
483        if (record_hdr->Type == ACPI_DMAR_TYPE_HARDWARE_UNIT) {
484            num_iommus++;
485        }
486
487        addr += record_hdr->Length;
488    }
489    if (addr != records_end) {
490        zxlogf(ERROR, "acpi-bus: DMAR length weird: %u, reached %zu\n", dmar->Header.Length,
491               records_end - records_start);
492        return ZX_ERR_IO_DATA_INTEGRITY;
493    }
494
495    if (num_iommus == 0) {
496        return ZX_OK;
497    }
498
499    iommu_mgr.iommus = malloc(sizeof(iommu_info_t) * num_iommus);
500    if (iommu_mgr.iommus == NULL) {
501        return ZX_ERR_NO_MEMORY;
502    }
503
504    for (addr = records_start; addr < records_end;) {
505        ACPI_DMAR_HEADER* record_hdr = (ACPI_DMAR_HEADER*)addr;
506        zxlogf(DEBUG1, "DMAR record: %d\n", record_hdr->Type);
507        switch (record_hdr->Type) {
508        case ACPI_DMAR_TYPE_HARDWARE_UNIT: {
509            ACPI_DMAR_HARDWARE_UNIT* rec = (ACPI_DMAR_HARDWARE_UNIT*)record_hdr;
510
511            zxlogf(DEBUG1, "DMAR Hardware Unit: %u %#llx %#x\n", rec->Segment, rec->Address, rec->Flags);
512            const bool whole_segment = rec->Flags & ACPI_DMAR_INCLUDE_ALL;
513
514            zx_iommu_desc_intel_t* desc = NULL;
515            size_t desc_len = 0;
516            if (whole_segment) {
517                status = create_whole_segment_iommu_desc(dmar, rec, &desc, &desc_len);
518            } else {
519                status = create_partial_segment_iommu_desc(dmar, rec, &desc, &desc_len);
520            }
521            if (status != ZX_OK) {
522                zxlogf(ERROR, "acpi-bus: Failed to create iommu desc: %d\n", status);
523                goto cleanup;
524            }
525
526            zx_handle_t iommu_handle;
527            status = zx_iommu_create(get_root_resource(), ZX_IOMMU_TYPE_INTEL,
528                                     desc, desc_len, &iommu_handle);
529            if (status != ZX_OK) {
530                zxlogf(ERROR, "acpi-bus: Failed to create iommu object: %d\n", status);
531                goto cleanup;
532            }
533
534            ZX_DEBUG_ASSERT(iommu_mgr.num_iommus < num_iommus);
535            size_t idx = iommu_mgr.num_iommus;
536            iommu_mgr.iommus[idx].desc = desc;
537            iommu_mgr.iommus[idx].desc_len = desc_len;
538            iommu_mgr.iommus[idx].handle = iommu_handle;
539            iommu_mgr.num_iommus++;
540            break;
541        }
542        case ACPI_DMAR_TYPE_RESERVED_MEMORY: {
543            ACPI_DMAR_RESERVED_MEMORY* rec = (ACPI_DMAR_RESERVED_MEMORY*)record_hdr;
544            zxlogf(DEBUG1, "DMAR Reserved Memory: %u %#llx %#llx\n", rec->Segment, rec->BaseAddress, rec->EndAddress);
545            for (uintptr_t scope = addr + 24; scope < addr + rec->Header.Length; ) {
546                ACPI_DMAR_DEVICE_SCOPE* s = (ACPI_DMAR_DEVICE_SCOPE*)scope;
547                zxlogf(DEBUG1, "  DMAR Scope: %u, bus %u\n", s->EntryType, s->Bus);
548                for (size_t i = 0; i < (s->Length - sizeof(*s)) / 2; ++i) {
549                    uint16_t v = *(uint16_t*)(scope + sizeof(*s) + 2 * i);
550                    zxlogf(DEBUG1, "    Path %ld: %02x.%02x\n", i, v & 0xffu, (uint16_t)(v >> 8));
551                }
552                scope += s->Length;
553            }
554            break;
555        }
556        }
557
558        addr += record_hdr->Length;
559    }
560
561    zxlogf(INFO, "acpi-bus: using IOMMU\n");
562    return ZX_OK;
563cleanup:
564    for (size_t i = 0; i < iommu_mgr.num_iommus; ++i) {
565        zx_handle_close(iommu_mgr.iommus[i].handle);
566        free((void*)iommu_mgr.iommus[i].desc);
567    }
568    free(iommu_mgr.iommus);
569    iommu_mgr.iommus = NULL;
570    iommu_mgr.num_iommus = 0;
571    return status;
572}
573
574zx_status_t iommu_manager_iommu_for_bdf(uint32_t bdf, zx_handle_t* iommu_h) {
575    mtx_lock(&iommu_mgr.lock);
576
577    uint8_t bus = (uint8_t)(bdf >> 8);
578    uint8_t dev_func = (uint8_t)bdf;
579
580    iommu_info_t* match = NULL;
581    for (size_t i = 0; i < iommu_mgr.num_iommus; ++i) {
582        iommu_info_t* iommu = &iommu_mgr.iommus[i];
583        const zx_iommu_desc_intel_t* desc = iommu->desc;
584
585        // TODO(teisenbe): Check segments in this function, once we support segments
586        if (desc->pci_segment != 0) {
587            continue;
588        }
589
590        const uintptr_t scope_base = (uintptr_t)desc + sizeof(zx_iommu_desc_intel_t);
591        const zx_iommu_desc_intel_scope_t* scopes = (const zx_iommu_desc_intel_scope_t*)scope_base;
592        const size_t num_scopes = desc->scope_bytes / sizeof(scopes[0]);
593
594        bool found_matching_scope = false;
595        for (size_t i = 0; i < num_scopes; ++i) {
596            // TODO(teisenbe): Once we support scopes with multiple hops, need to correct
597            // this routine.
598            // TODO(teisenbe): Once we support bridge entries, need to correct this routine.
599            ZX_DEBUG_ASSERT(scopes[i].num_hops == 1);
600            if (scopes[i].start_bus != bus) {
601                continue;
602            }
603            if (scopes[i].dev_func[0] == dev_func) {
604                found_matching_scope = true;
605                break;
606            }
607        }
608
609        if (desc->whole_segment) {
610            // If we're in whole-segment mode, a match in the scope list means
611            // this device is *not* valid for this BDF.
612            if (!found_matching_scope) {
613                match = iommu;
614                break;
615            }
616        } else {
617            // If we're not in whole-segment mode, a match in the scope list
618            // means this device is valid for this BDF.
619            if (found_matching_scope) {
620                match = iommu;
621                break;
622            }
623        }
624    }
625
626    if (match) {
627        *iommu_h = match->handle;
628    } else {
629        // If there was no match, just use the dummy handle
630        *iommu_h = iommu_mgr.dummy_iommu;
631    }
632
633    mtx_unlock(&iommu_mgr.lock);
634    return ZX_OK;
635}
636
637zx_status_t iommu_manager_get_dummy_iommu(zx_handle_t* iommu) {
638    *iommu = iommu_mgr.dummy_iommu;
639    return ZX_OK;
640}
641