1/*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright (c) 2013 The FreeBSD Foundation 5 * 6 * This software was developed by Konstantin Belousov <kib@FreeBSD.org> 7 * under sponsorship from the FreeBSD Foundation. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 28 * SUCH DAMAGE. 29 */ 30 31#ifndef _DEV_IOMMU_IOMMU_H_ 32#define _DEV_IOMMU_IOMMU_H_ 33 34#include <dev/iommu/iommu_types.h> 35 36struct bus_dma_tag_common; 37struct iommu_map_entry; 38TAILQ_HEAD(iommu_map_entries_tailq, iommu_map_entry); 39 40RB_HEAD(iommu_gas_entries_tree, iommu_map_entry); 41RB_PROTOTYPE(iommu_gas_entries_tree, iommu_map_entry, rb_entry, 42 iommu_gas_cmp_entries); 43 44struct iommu_qi_genseq { 45 u_int gen; 46 uint32_t seq; 47}; 48 49struct iommu_map_entry { 50 iommu_gaddr_t start; 51 iommu_gaddr_t end; 52 iommu_gaddr_t first; /* Least start in subtree */ 53 iommu_gaddr_t last; /* Greatest end in subtree */ 54 iommu_gaddr_t free_down; /* Max free space below the 55 current R/B tree node */ 56 u_int flags; 57 union { 58 TAILQ_ENTRY(iommu_map_entry) dmamap_link; /* DMA map entries */ 59 struct iommu_map_entry *tlb_flush_next; 60 }; 61 RB_ENTRY(iommu_map_entry) rb_entry; /* Links for domain entries */ 62 struct iommu_domain *domain; 63 struct iommu_qi_genseq gseq; 64}; 65 66struct iommu_unit { 67 struct mtx lock; 68 device_t dev; 69 int unit; 70 71 int dma_enabled; 72 73 /* Busdma delayed map load */ 74 struct task dmamap_load_task; 75 TAILQ_HEAD(, bus_dmamap_iommu) delayed_maps; 76 struct taskqueue *delayed_taskqueue; 77 78 /* 79 * Bitmap of buses for which context must ignore slot:func, 80 * duplicating the page table pointer into all context table 81 * entries. This is a client-controlled quirk to support some 82 * NTBs. 83 */ 84 uint32_t buswide_ctxs[(PCI_BUSMAX + 1) / NBBY / sizeof(uint32_t)]; 85}; 86 87struct iommu_domain_map_ops { 88 int (*map)(struct iommu_domain *domain, iommu_gaddr_t base, 89 iommu_gaddr_t size, vm_page_t *ma, uint64_t pflags, int flags); 90 int (*unmap)(struct iommu_domain *domain, iommu_gaddr_t base, 91 iommu_gaddr_t size, int flags); 92}; 93 94/* 95 * Locking annotations: 96 * (u) - Protected by iommu unit lock 97 * (d) - Protected by domain lock 98 * (c) - Immutable after initialization 99 */ 100 101struct iommu_domain { 102 struct iommu_unit *iommu; /* (c) */ 103 const struct iommu_domain_map_ops *ops; 104 struct mtx lock; /* (c) */ 105 struct task unload_task; /* (c) */ 106 u_int entries_cnt; /* (d) */ 107 struct iommu_map_entries_tailq unload_entries; /* (d) Entries to 108 unload */ 109 struct iommu_gas_entries_tree rb_root; /* (d) */ 110 struct iommu_map_entry *start_gap; /* (d) */ 111 iommu_gaddr_t end; /* (c) Highest address + 1 in 112 the guest AS */ 113 struct iommu_map_entry *first_place, *last_place; /* (d) */ 114 struct iommu_map_entry *msi_entry; /* (d) Arch-specific */ 115 iommu_gaddr_t msi_base; /* (d) Arch-specific */ 116 vm_paddr_t msi_phys; /* (d) Arch-specific */ 117 u_int flags; /* (u) */ 118}; 119 120struct iommu_ctx { 121 struct iommu_domain *domain; /* (c) */ 122 struct bus_dma_tag_iommu *tag; /* (c) Root tag */ 123 u_long loads; /* atomic updates, for stat only */ 124 u_long unloads; /* same */ 125 u_int flags; /* (u) */ 126 uint16_t rid; /* (c) pci RID */ 127}; 128 129/* struct iommu_ctx flags */ 130#define IOMMU_CTX_FAULTED 0x0001 /* Fault was reported, 131 last_fault_rec is valid */ 132#define IOMMU_CTX_DISABLED 0x0002 /* Device is disabled, the 133 ephemeral reference is kept 134 to prevent context destruction */ 135 136#define IOMMU_DOMAIN_GAS_INITED 0x0001 137#define IOMMU_DOMAIN_PGTBL_INITED 0x0002 138#define IOMMU_DOMAIN_IDMAP 0x0010 /* Domain uses identity 139 page table */ 140#define IOMMU_DOMAIN_RMRR 0x0020 /* Domain contains RMRR entry, 141 cannot be turned off */ 142 143#define IOMMU_LOCK(unit) mtx_lock(&(unit)->lock) 144#define IOMMU_UNLOCK(unit) mtx_unlock(&(unit)->lock) 145#define IOMMU_ASSERT_LOCKED(unit) mtx_assert(&(unit)->lock, MA_OWNED) 146 147#define IOMMU_DOMAIN_LOCK(dom) mtx_lock(&(dom)->lock) 148#define IOMMU_DOMAIN_UNLOCK(dom) mtx_unlock(&(dom)->lock) 149#define IOMMU_DOMAIN_ASSERT_LOCKED(dom) mtx_assert(&(dom)->lock, MA_OWNED) 150 151void iommu_free_ctx(struct iommu_ctx *ctx); 152void iommu_free_ctx_locked(struct iommu_unit *iommu, struct iommu_ctx *ctx); 153struct iommu_ctx *iommu_get_ctx(struct iommu_unit *, device_t dev, 154 uint16_t rid, bool id_mapped, bool rmrr_init); 155struct iommu_unit *iommu_find(device_t dev, bool verbose); 156void iommu_domain_unload_entry(struct iommu_map_entry *entry, bool free, 157 bool cansleep); 158void iommu_domain_unload(struct iommu_domain *domain, 159 struct iommu_map_entries_tailq *entries, bool cansleep); 160 161struct iommu_ctx *iommu_instantiate_ctx(struct iommu_unit *iommu, 162 device_t dev, bool rmrr); 163device_t iommu_get_requester(device_t dev, uint16_t *rid); 164int iommu_init_busdma(struct iommu_unit *unit); 165void iommu_fini_busdma(struct iommu_unit *unit); 166 167void iommu_gas_init_domain(struct iommu_domain *domain); 168void iommu_gas_fini_domain(struct iommu_domain *domain); 169struct iommu_map_entry *iommu_gas_alloc_entry(struct iommu_domain *domain, 170 u_int flags); 171void iommu_gas_free_entry(struct iommu_map_entry *entry); 172void iommu_gas_free_space(struct iommu_map_entry *entry); 173void iommu_gas_remove(struct iommu_domain *domain, iommu_gaddr_t start, 174 iommu_gaddr_t size); 175int iommu_gas_map(struct iommu_domain *domain, 176 const struct bus_dma_tag_common *common, iommu_gaddr_t size, int offset, 177 u_int eflags, u_int flags, vm_page_t *ma, struct iommu_map_entry **res); 178void iommu_gas_free_region(struct iommu_map_entry *entry); 179int iommu_gas_map_region(struct iommu_domain *domain, 180 struct iommu_map_entry *entry, u_int eflags, u_int flags, vm_page_t *ma); 181int iommu_gas_reserve_region(struct iommu_domain *domain, iommu_gaddr_t start, 182 iommu_gaddr_t end, struct iommu_map_entry **entry0); 183int iommu_gas_reserve_region_extend(struct iommu_domain *domain, 184 iommu_gaddr_t start, iommu_gaddr_t end); 185 186void iommu_set_buswide_ctx(struct iommu_unit *unit, u_int busno); 187bool iommu_is_buswide_ctx(struct iommu_unit *unit, u_int busno); 188void iommu_domain_init(struct iommu_unit *unit, struct iommu_domain *domain, 189 const struct iommu_domain_map_ops *ops); 190void iommu_domain_fini(struct iommu_domain *domain); 191 192bool bus_dma_iommu_set_buswide(device_t dev); 193int bus_dma_iommu_load_ident(bus_dma_tag_t dmat, bus_dmamap_t map, 194 vm_paddr_t start, vm_size_t length, int flags); 195 196bus_dma_tag_t iommu_get_dma_tag(device_t dev, device_t child); 197struct iommu_ctx *iommu_get_dev_ctx(device_t dev); 198struct iommu_domain *iommu_get_ctx_domain(struct iommu_ctx *ctx); 199 200SYSCTL_DECL(_hw_iommu); 201 202#endif /* !_DEV_IOMMU_IOMMU_H_ */ 203