1/* 2 * Copyright (c) 2014, University of Washington. 3 * All rights reserved. 4 * 5 * This file is distributed under the terms in the attached LICENSE file. 6 * If you do not find this file, copies can be found by writing to: 7 * ETH Zurich D-INFK, CAB F.78, Universitaetstr. 6, CH-8092 Zurich. 8 * Attn: Systems Group. 9 */ 10 11#ifndef INTEL_VTD_H 12#define INTEL_VTD_H 13 14#include <pci/confspace/pci_confspace.h> 15 16#include <bitmacros.h> 17 18#include <dev/vtd_dev.h> 19#include <dev/vtd_iotlb_dev.h> 20 21#include "vtd_domains.h" 22#include "vtd_sl_paging.h" 23 24#define ROOT_TABLE_TYPE vtd_rt 25#define NUM_ROOT_ENTRIES 512 26 27#define VTD_FOR_EACH(var, head) for (var = (head); var; var = var->next) 28#define VTD_ADD_UNIT(x, head) do {(x)->next = (head); (head) = (x);} while(0) 29 30 31// A structure representing a remapping hardware unit. We currently assume 32// that there is only one such unit per segment. 33struct vtd_unit { 34 vtd_t * regset; // Remapping register set 35 vtd_iotlb_t * iotlb_regs; // IOTLB registers 36 37 uint16_t pci_seg; 38 // capability for the root table frame 39 struct capref rt_frame; 40 vtd_root_entry_array_t * root_table; 41 42 // array of capabilities of context table frames 43 struct capref ct_frame_caps[NUM_ROOT_ENTRIES]; 44 vtd_context_entry_array_t * context_tables[NUM_ROOT_ENTRIES]; 45 46 struct vtd_unit * next; 47}; 48 49errval_t vtd_create_domain(struct capref pml4); 50errval_t vtd_remove_domain(struct capref pml4); 51 52errval_t vtd_domain_remove_device(int seg, int bus, int dev, int func, struct capref pml4); 53errval_t vtd_domain_add_device(int seg, int bus, int dev, int func, struct capref pml4); 54 55void vtd_identity_domain_add_devices(void); 56 57int vtd_init(void); 58 59// This will need to be changed when (and if) Arrakis supports PCI 60// segments. 61static inline int valid_device(int bus, int dev, int func) 62{ 63 return ((bus >= 0 && bus < PCI_NBUSES) && 64 (dev >= 0 && dev < PCI_NDEVICES) && 65 (func >= 0 && func < PCI_NFUNCTIONS)); 66} 67 68// Returns true if units, a list of remapping hardware units, is empty. 69static inline int vtd_no_units(struct vtd_unit *units) { 70 return (units == NULL); 71} 72 73// Returns 1 if the remapping unit snoops the processor caches for both 74// paging-entries and pages, else 0. 75static inline int vtd_coherency(struct vtd_unit *unit) 76{ 77 int pwc = vtd_ECAP_pwc_rdf(unit->regset); 78 int sc = vtd_ECAP_sc_rdf(unit->regset); 79 return (pwc & sc); 80} 81 82// Perform command to set the root table and then wait until hardware 83// sets the bit to indicate completion. 84static inline void GSTS_srtp_wait(struct vtd_unit *unit) 85{ 86 vtd_GCMD_srtp_wrf(unit->regset, 1); 87 while(vtd_GSTS_rtps_rdf(unit->regset) == 0); 88} 89 90// Perform command to flush the write buffer and then wait until hardware 91// clears the bit to indicate completion. 92static inline void GSTS_wbf_wait(struct vtd_unit *unit) 93{ 94 vtd_GCMD_wbf_wrf(unit->regset, 1); 95 while(vtd_GSTS_wbfs_rdf(unit->regset)); 96} 97 98// Perform command to enable/diable DMA-remapping and then wait until hardware 99// sets/clears the bit to indicate completion. 100static inline void GSTS_te_wait(struct vtd_unit *unit, int val) 101{ 102 assert(val == 1 || val == 0); 103 vtd_GCMD_te_wrf(unit->regset, val); 104 while(vtd_GSTS_tes_rdf(unit->regset) == !val); 105} 106 107// Perform command to invalidate the context-cache and then wait until 108// the hardware clears the bit to indicate completion. 109static inline void CCMD_icc_wait(struct vtd_unit *unit) 110{ 111 vtd_CCMD_icc_wrf(unit->regset, 1); 112 while(vtd_CCMD_icc_rdf(unit->regset)); 113} 114 115// Perform command to invalidate the IOTLB and then wait until hardware 116// clears the bit to indicate completion. 117static inline void IOTLB_ivt_wait(struct vtd_unit *unit) 118{ 119 vtd_iotlb_iotlb_reg_ivt_wrf(unit->iotlb_regs, 1); 120 while (vtd_iotlb_iotlb_reg_ivt_rdf(unit->iotlb_regs)); 121} 122 123// Flush the internal write buffers. 124static inline void vtd_flush_write_buffer(struct vtd_unit *unit) 125{ 126 if (vtd_CAP_rwbf_rdf(unit->regset)) { 127 GSTS_wbf_wait(unit); 128 } 129} 130 131// Perform ax global invalidation of context-cache for unit. 132// vtd_iotlb_glob_inval must be called afterwards. 133static inline void vtd_context_cache_glob_inval(struct vtd_unit *unit) 134{ 135 // Flush the Root-Complex internal write buffers 136 vtd_flush_write_buffer(unit); 137 // We want a global invalidation 138 vtd_CCMD_cirg_wrf(unit->regset, vtd_gir); 139 // Perform invalidation 140 CCMD_icc_wait(unit); 141} 142 143// Perform a domain-selective invalidation of context-cache for dom. 144// vtd_iotlb_dom_inval must be called afterwards 145static inline void vtd_context_cache_dom_inval(struct vtd_domain *dom) 146{ 147 struct vtd_unit *u = NULL; 148 VTD_FOR_EACH(u, dom->units) { 149 vtd_flush_write_buffer(u); 150 vtd_CCMD_cirg_wrf(u->regset, vtd_domir); 151 vtd_CCMD_did_wrf(u->regset, dom->did); 152 CCMD_icc_wait(u); 153 } 154} 155 156// Perform a device-selective invalidation of context-cache for 157// the device with the source-id sid contained in the domain dom. 158// vtd_iotlb_dom_inval must be called afterwards 159static inline void vtd_context_cache_dev_inval(struct vtd_domain *dom, int sid, int func_mask) 160{ 161 struct vtd_unit *u = NULL; 162 VTD_FOR_EACH(u, dom->units) { 163 vtd_flush_write_buffer(u); 164 vtd_CCMD_cirg_wrf(u->regset, vtd_devir); 165 vtd_CCMD_did_wrf(u->regset, dom->did); 166 vtd_CCMD_sid_wrf(u->regset, sid); 167 vtd_CCMD_fm_wrf(u->regset, func_mask); 168 CCMD_icc_wait(u); 169 } 170} 171 172// Perform a global IOTLB invalidation for unit. 173static inline void vtd_iotlb_glob_inval(struct vtd_unit *unit) 174{ 175 // Flush the Root-Complex internal write buffers 176 vtd_flush_write_buffer(unit); 177 // We want a global invalidation 178 vtd_iotlb_iotlb_reg_iirg_wrf(unit->iotlb_regs, vtd_iotlb_gir); 179 // Drain writes and reads (if not required, will be ignored by hardware) 180 vtd_iotlb_iotlb_reg_dw_wrf(unit->iotlb_regs, 1); 181 vtd_iotlb_iotlb_reg_dr_wrf(unit->iotlb_regs, 1); 182 // Invalidate IOTLB 183 IOTLB_ivt_wait(unit); 184} 185 186// Perform a domain-selective IOTLB invalidation for dom. 187static inline void vtd_iotlb_dom_inval(struct vtd_domain *dom) 188{ 189 struct vtd_unit *u = NULL; 190 VTD_FOR_EACH(u, dom->units) { 191 vtd_flush_write_buffer(u); 192 vtd_iotlb_iotlb_reg_iirg_wrf(u->iotlb_regs, vtd_iotlb_domir); 193 vtd_iotlb_iotlb_reg_did_wrf(u->iotlb_regs, dom->did); 194 vtd_iotlb_iotlb_reg_dw_wrf(u->iotlb_regs, 1); 195 vtd_iotlb_iotlb_reg_dr_wrf(u->iotlb_regs, 1); 196 IOTLB_ivt_wait(u); 197 } 198} 199 200// Perform a page-selective-within-domain IOTLB invalidation for dom. 201// 202// addr - the sl-input-address that needs to be invalidated 203// 204// addr_mask - specifies the number of low bits of addr to be masked. 205// Used for the invalidation of multiple contiguous pages which may 206// comprise a large page. 207// 208// inval_hint - a value of 0 indicates that both leaf and non-leaf sl 209// entries may have been modified. 1 indicates that no sl non-leaf entry 210// has been modified. 211static inline void vtd_iotlb_page_inval(struct vtd_domain *dom, int ival_hint, int addr, int addr_mask) 212{ 213 struct vtd_unit *u = NULL; 214 VTD_FOR_EACH(u, dom->units) { 215 vtd_flush_write_buffer(u); 216 vtd_iotlb_iotlb_reg_iirg_wrf(u->iotlb_regs, vtd_iotlb_pir); 217 vtd_iotlb_iotlb_reg_did_wrf(u->iotlb_regs, dom->did); 218 vtd_iotlb_iotlb_reg_dw_wrf(u->iotlb_regs, 1); 219 vtd_iotlb_iotlb_reg_dr_wrf(u->iotlb_regs, 1); 220 vtd_iotlb_iva_reg_ih_wrf(u->iotlb_regs, ival_hint); 221 vtd_iotlb_iva_reg_addr_wrf(u->iotlb_regs, (addr >> 12)); 222 vtd_iotlb_iva_reg_am_wrf(u->iotlb_regs, addr_mask); 223 IOTLB_ivt_wait(u); 224 } 225} 226 227// Enable DMA-remapping for unit. 228static inline void vtd_trnsl_enable(struct vtd_unit *unit) 229{ 230 GSTS_te_wait(unit, 1); 231} 232 233// Disable DMA-remapping for unit. 234static inline void vtd_trnsl_disable(struct vtd_unit *unit) 235{ 236 GSTS_te_wait(unit, 0); 237} 238 239// Returns the number of domains supported. A simple linear relationship 240// exists between the value of the nd field in the Capability register 241// and the number of domains. 242static inline int vtd_number_domains_supported(struct vtd_unit *unit) 243{ 244 int nd_bits; 245 assert(unit != NULL); 246 nd_bits = (2 * vtd_CAP_nd_rdf(unit->regset)) + 4; 247 return (1 << nd_bits); 248} 249 250// Returns the number of bits per page for a pagetable with the specified 251// number of levels. 252static inline int vtd_levels_to_page_bits(int levels) { 253 assert(levels >= 2 && levels <= 4); 254 return (48 - SL_PTABLE_MASK_BITS * levels); 255} 256 257#endif //INTEL_VTD_H 258