1/* 2 * Copyright 2014, General Dynamics C4 Systems 3 * 4 * This software may be distributed and modified according to the terms of 5 * the GNU General Public License version 2. Note that NO WARRANTY is provided. 6 * See "LICENSE_GPLv2.txt" for details. 7 * 8 * @TAG(GD_GPL) 9 */ 10 11#include <config.h> 12 13#ifdef CONFIG_IOMMU 14 15#include <api/syscall.h> 16#include <machine/io.h> 17#include <kernel/thread.h> 18#include <arch/api/invocation.h> 19#include <arch/object/iospace.h> 20#include <arch/model/statedata.h> 21#include <linker.h> 22#include <plat/machine/intel-vtd.h> 23 24 25typedef struct lookupVTDContextSlot_ret { 26 vtd_cte_t *cte; 27 word_t index; 28} lookupVTDContextSlot_ret_t; 29 30 31BOOT_CODE cap_t 32master_iospace_cap(void) 33{ 34 if (x86KSnumDrhu == 0) { 35 return cap_null_cap_new(); 36 } 37 38 return 39 cap_io_space_cap_new( 40 0, /* capDomainID */ 41 0 /* capPCIDevice */ 42 ); 43} 44 45static vtd_cte_t* 46lookup_vtd_context_slot(cap_t cap) 47{ 48 uint32_t vtd_root_index; 49 uint32_t vtd_context_index; 50 uint32_t pci_request_id; 51 vtd_rte_t* vtd_root_slot; 52 vtd_cte_t* vtd_context; 53 vtd_cte_t* vtd_context_slot; 54 55 switch (cap_get_capType(cap)) { 56 case cap_io_space_cap: 57 pci_request_id = cap_io_space_cap_get_capPCIDevice(cap); 58 break; 59 60 case cap_io_page_table_cap: 61 pci_request_id = cap_io_page_table_cap_get_capIOPTIOASID(cap); 62 break; 63 64 case cap_frame_cap: 65 pci_request_id = cap_frame_cap_get_capFMappedASID(cap); 66 break; 67 68 default: 69 fail("Invalid cap type"); 70 } 71 72 vtd_root_index = get_pci_bus(pci_request_id); 73 vtd_root_slot = x86KSvtdRootTable + vtd_root_index; 74 75 vtd_context = (vtd_cte_t*)paddr_to_pptr(vtd_rte_ptr_get_ctp(vtd_root_slot)); 76 vtd_context_index = (get_pci_dev(pci_request_id) << 3) | get_pci_fun(pci_request_id); 77 vtd_context_slot = &vtd_context[vtd_context_index]; 78 79 return vtd_context_slot; 80} 81 82static lookupIOPTSlot_ret_t 83lookupIOPTSlot_resolve_levels(vtd_pte_t *iopt, word_t translation, 84 word_t levels_to_resolve, word_t levels_remaining) 85{ 86 lookupIOPTSlot_ret_t ret; 87 88 word_t iopt_index = 0; 89 vtd_pte_t *iopt_slot = 0; 90 vtd_pte_t *next_iopt_slot = 0; 91 92 if (iopt == 0) { 93 ret.ioptSlot = 0; 94 ret.level = levels_remaining; 95 ret.status = EXCEPTION_LOOKUP_FAULT; 96 return ret; 97 } 98 99 iopt_index = (translation >> (VTD_PT_INDEX_BITS * (x86KSnumIOPTLevels - 1 - (levels_to_resolve - levels_remaining)))) & MASK(VTD_PT_INDEX_BITS); 100 iopt_slot = iopt + iopt_index; 101 102 if (!vtd_pte_ptr_get_write(iopt_slot) || levels_remaining == 0) { 103 ret.ioptSlot = iopt_slot; 104 ret.level = levels_remaining; 105 ret.status = EXCEPTION_NONE; 106 return ret; 107 } 108 next_iopt_slot = (vtd_pte_t *)paddr_to_pptr(vtd_pte_ptr_get_addr(iopt_slot)); 109 return lookupIOPTSlot_resolve_levels(next_iopt_slot, translation, levels_to_resolve, levels_remaining - 1); 110} 111 112 113static inline lookupIOPTSlot_ret_t 114lookupIOPTSlot(vtd_pte_t* iopt, word_t io_address) 115{ 116 lookupIOPTSlot_ret_t ret; 117 118 if (iopt == 0) { 119 ret.ioptSlot = 0; 120 ret.level = 0; 121 ret.status = EXCEPTION_LOOKUP_FAULT; 122 return ret; 123 } else { 124 return lookupIOPTSlot_resolve_levels(iopt, io_address >> PAGE_BITS, 125 x86KSnumIOPTLevels - 1, x86KSnumIOPTLevels - 1); 126 } 127} 128 129void 130unmapVTDContextEntry(cap_t cap) 131{ 132 vtd_cte_t *cte = lookup_vtd_context_slot(cap); 133 assert(cte != 0); 134 *cte = vtd_cte_new( 135 0, 136 false, 137 0, 138 0, 139 0, 140 false 141 ); 142 143 flushCacheRange(cte, VTD_CTE_SIZE_BITS); 144 invalidate_iotlb(); 145 setThreadState(NODE_STATE(ksCurThread), ThreadState_Restart); 146 return; 147} 148 149static exception_t 150performX86IOPTInvocationUnmap(cap_t cap, cte_t *ctSlot) 151{ 152 deleteIOPageTable(cap); 153 cap = cap_io_page_table_cap_set_capIOPTIsMapped(cap, 0); 154 ctSlot->cap = cap; 155 156 return EXCEPTION_NONE; 157} 158 159static exception_t 160performX86IOPTInvocationMapContextRoot(cap_t cap, cte_t *ctSlot, vtd_cte_t vtd_cte, vtd_cte_t *vtd_context_slot) 161{ 162 *vtd_context_slot = vtd_cte; 163 flushCacheRange(vtd_context_slot, VTD_CTE_SIZE_BITS); 164 ctSlot->cap = cap; 165 166 return EXCEPTION_NONE; 167} 168 169static exception_t 170performX86IOPTInvocationMapPT(cap_t cap, cte_t *ctSlot, vtd_pte_t iopte, vtd_pte_t *ioptSlot) 171{ 172 *ioptSlot = iopte; 173 flushCacheRange(ioptSlot, VTD_PTE_SIZE_BITS); 174 ctSlot->cap = cap; 175 176 return EXCEPTION_NONE; 177} 178 179exception_t 180decodeX86IOPTInvocation( 181 word_t invLabel, 182 word_t length, 183 cte_t* slot, 184 cap_t cap, 185 extra_caps_t excaps, 186 word_t* buffer 187) 188{ 189 cap_t io_space; 190 paddr_t paddr; 191 uint32_t pci_request_id; 192 word_t io_address; 193 uint16_t domain_id; 194 vtd_cte_t* vtd_context_slot; 195 vtd_pte_t* vtd_pte; 196 197 if (invLabel == X86IOPageTableUnmap) { 198 199 setThreadState(NODE_STATE(ksCurThread), ThreadState_Restart); 200 return performX86IOPTInvocationUnmap(cap, slot); 201 } 202 203 if (invLabel != X86IOPageTableMap ) { 204 userError("X86IOPageTable: Illegal operation."); 205 current_syscall_error.type = seL4_IllegalOperation; 206 return EXCEPTION_SYSCALL_ERROR; 207 } 208 209 if (excaps.excaprefs[0] == NULL || length < 1) { 210 userError("X86IOPageTableMap: Truncated message."); 211 current_syscall_error.type = seL4_TruncatedMessage; 212 return EXCEPTION_SYSCALL_ERROR; 213 } 214 215 io_space = excaps.excaprefs[0]->cap; 216 io_address = getSyscallArg(0, buffer) & ~MASK(VTD_PT_INDEX_BITS + seL4_PageBits); 217 218 if (cap_io_page_table_cap_get_capIOPTIsMapped(cap)) { 219 userError("X86IOPageTableMap: IO page table is already mapped."); 220 current_syscall_error.type = seL4_InvalidCapability; 221 current_syscall_error.invalidCapNumber = 0; 222 return EXCEPTION_SYSCALL_ERROR; 223 } 224 225 if (cap_get_capType(io_space) != cap_io_space_cap) { 226 userError("X86IOPageTableMap: Invalid IO space capability."); 227 current_syscall_error.type = seL4_InvalidCapability; 228 current_syscall_error.invalidCapNumber = 0; 229 return EXCEPTION_SYSCALL_ERROR; 230 } 231 232 pci_request_id = cap_io_space_cap_get_capPCIDevice(io_space); 233 domain_id = cap_io_space_cap_get_capDomainID(io_space); 234 if (pci_request_id == asidInvalid) { 235 current_syscall_error.type = seL4_InvalidCapability; 236 current_syscall_error.invalidCapNumber = 0; 237 238 return EXCEPTION_SYSCALL_ERROR; 239 } 240 241 paddr = pptr_to_paddr(VTD_PTE_PTR(cap_io_page_table_cap_get_capIOPTBasePtr(cap))); 242 vtd_context_slot = lookup_vtd_context_slot(io_space); 243 244 if (!vtd_cte_ptr_get_present(vtd_context_slot)) { 245 246 /* 1st Level Page Table */ 247 vtd_cte_t vtd_cte = vtd_cte_new( 248 domain_id, /* domain ID */ 249 false, /* RMRR */ 250 x86KSnumIOPTLevels - 2, /* addr width (x = levels - 2) */ 251 paddr, /* address space root */ 252 0, /* translation type */ 253 true /* present */ 254 ); 255 256 cap = cap_io_page_table_cap_set_capIOPTIsMapped(cap, 1); 257 cap = cap_io_page_table_cap_set_capIOPTLevel(cap, 0); 258 cap = cap_io_page_table_cap_set_capIOPTIOASID(cap, pci_request_id); 259 260 setThreadState(NODE_STATE(ksCurThread), ThreadState_Restart); 261 return performX86IOPTInvocationMapContextRoot(cap, slot, vtd_cte, vtd_context_slot); 262 } else { 263 lookupIOPTSlot_ret_t lu_ret; 264 vtd_pte_t iopte; 265 266 vtd_pte = (vtd_pte_t *)paddr_to_pptr(vtd_cte_ptr_get_asr(vtd_context_slot)); 267 lu_ret = lookupIOPTSlot(vtd_pte, io_address); 268 269 if (lu_ret.status != EXCEPTION_NONE) { 270 current_syscall_error.type = seL4_FailedLookup; 271 current_syscall_error.failedLookupWasSource = false; 272 return EXCEPTION_SYSCALL_ERROR; 273 } 274 275 lu_ret.level = x86KSnumIOPTLevels - lu_ret.level; 276 if (vtd_pte_ptr_get_addr(lu_ret.ioptSlot) != 0) { 277 current_syscall_error.type = seL4_DeleteFirst; 278 279 return EXCEPTION_SYSCALL_ERROR; 280 } 281 282 iopte = vtd_pte_new( 283 paddr, /* physical addr */ 284 1, /* write permission flag */ 285 1 /* read permission flag */ 286 ); 287 288 cap = cap_io_page_table_cap_set_capIOPTIsMapped(cap, 1); 289 cap = cap_io_page_table_cap_set_capIOPTLevel(cap, lu_ret.level); 290 cap = cap_io_page_table_cap_set_capIOPTIOASID(cap, pci_request_id); 291 cap = cap_io_page_table_cap_set_capIOPTMappedAddress(cap, io_address); 292 293 setThreadState(NODE_STATE(ksCurThread), ThreadState_Restart); 294 return performX86IOPTInvocationMapPT(cap, slot, iopte, lu_ret.ioptSlot); 295 } 296} 297 298static exception_t 299performX86IOInvocationMap(cap_t cap, cte_t *ctSlot, vtd_pte_t iopte, vtd_pte_t *ioptSlot) 300{ 301 ctSlot->cap = cap; 302 *ioptSlot = iopte; 303 flushCacheRange(ioptSlot, VTD_PTE_SIZE_BITS); 304 305 return EXCEPTION_NONE; 306} 307 308 309exception_t 310decodeX86IOMapInvocation( 311 word_t length, 312 cte_t* slot, 313 cap_t cap, 314 extra_caps_t excaps, 315 word_t* buffer 316) 317{ 318 cap_t io_space; 319 word_t io_address; 320 uint32_t pci_request_id; 321 vtd_cte_t* vtd_context_slot; 322 vtd_pte_t* vtd_pte; 323 vtd_pte_t iopte; 324 paddr_t paddr; 325 lookupIOPTSlot_ret_t lu_ret; 326 vm_rights_t frame_cap_rights; 327 seL4_CapRights_t dma_cap_rights_mask; 328 329 if (excaps.excaprefs[0] == NULL || length < 2) { 330 userError("X86PageMapIO: Truncated message."); 331 current_syscall_error.type = seL4_TruncatedMessage; 332 return EXCEPTION_SYSCALL_ERROR; 333 } 334 335 if (cap_frame_cap_get_capFSize(cap) != X86_SmallPage) { 336 userError("X86PageMapIO: Invalid page size."); 337 current_syscall_error.type = seL4_InvalidCapability; 338 current_syscall_error.invalidCapNumber = 0; 339 return EXCEPTION_SYSCALL_ERROR; 340 } 341 342 if (cap_frame_cap_get_capFMappedASID(cap) != asidInvalid) { 343 userError("X86PageMapIO: Page already mapped."); 344 current_syscall_error.type = seL4_InvalidCapability; 345 current_syscall_error.invalidCapNumber = 0; 346 return EXCEPTION_SYSCALL_ERROR; 347 } 348 349 io_space = excaps.excaprefs[0]->cap; 350 io_address = getSyscallArg(1, buffer) & ~MASK(PAGE_BITS); 351 paddr = pptr_to_paddr((void*)cap_frame_cap_get_capFBasePtr(cap)); 352 353 if (cap_get_capType(io_space) != cap_io_space_cap) { 354 userError("X86PageMapIO: Invalid IO space capability."); 355 current_syscall_error.type = seL4_InvalidCapability; 356 current_syscall_error.invalidCapNumber = 0; 357 return EXCEPTION_SYSCALL_ERROR; 358 } 359 360 pci_request_id = cap_io_space_cap_get_capPCIDevice(io_space); 361 362 if (pci_request_id == asidInvalid) { 363 userError("X86PageMapIO: Invalid PCI device."); 364 current_syscall_error.type = seL4_InvalidCapability; 365 current_syscall_error.invalidCapNumber = 0; 366 return EXCEPTION_SYSCALL_ERROR; 367 } 368 369 vtd_context_slot = lookup_vtd_context_slot(io_space); 370 371 if (!vtd_cte_ptr_get_present(vtd_context_slot)) { 372 /* 1st Level Page Table is not installed */ 373 current_syscall_error.type = seL4_FailedLookup; 374 current_syscall_error.failedLookupWasSource = false; 375 return EXCEPTION_SYSCALL_ERROR; 376 } 377 378 vtd_pte = (vtd_pte_t*)paddr_to_pptr(vtd_cte_ptr_get_asr(vtd_context_slot)); 379 lu_ret = lookupIOPTSlot(vtd_pte, io_address); 380 if (lu_ret.status != EXCEPTION_NONE || lu_ret.level != 0) { 381 current_syscall_error.type = seL4_FailedLookup; 382 current_syscall_error.failedLookupWasSource = false; 383 return EXCEPTION_SYSCALL_ERROR; 384 } 385 386 if (vtd_pte_ptr_get_addr(lu_ret.ioptSlot) != 0) { 387 current_syscall_error.type = seL4_DeleteFirst; 388 return EXCEPTION_SYSCALL_ERROR; 389 } 390 391 dma_cap_rights_mask = rightsFromWord(getSyscallArg(0, buffer)); 392 frame_cap_rights = cap_frame_cap_get_capFVMRights(cap); 393 394 bool_t write = seL4_CapRights_get_capAllowWrite(dma_cap_rights_mask) && (frame_cap_rights == VMReadWrite); 395 bool_t read = seL4_CapRights_get_capAllowRead(dma_cap_rights_mask) && (frame_cap_rights != VMKernelOnly); 396 if (write || read) { 397 iopte = vtd_pte_new(paddr, !!write, !!read); 398 } else { 399 current_syscall_error.type = seL4_InvalidArgument; 400 current_syscall_error.invalidArgumentNumber = 0; 401 return EXCEPTION_SYSCALL_ERROR; 402 } 403 404 cap = cap_frame_cap_set_capFMapType(cap, X86_MappingIOSpace); 405 cap = cap_frame_cap_set_capFMappedASID(cap, pci_request_id); 406 cap = cap_frame_cap_set_capFMappedAddress(cap, io_address); 407 408 setThreadState(NODE_STATE(ksCurThread), ThreadState_Restart); 409 return performX86IOInvocationMap(cap, slot, iopte, lu_ret.ioptSlot); 410} 411 412void deleteIOPageTable(cap_t io_pt_cap) 413{ 414 lookupIOPTSlot_ret_t lu_ret; 415 uint32_t level; 416 word_t io_address; 417 vtd_cte_t* vtd_context_slot; 418 vtd_pte_t* vtd_pte; 419 420 if (cap_io_page_table_cap_get_capIOPTIsMapped(io_pt_cap)) { 421 io_pt_cap = cap_io_page_table_cap_set_capIOPTIsMapped(io_pt_cap, 0); 422 level = cap_io_page_table_cap_get_capIOPTLevel(io_pt_cap); 423 vtd_context_slot = lookup_vtd_context_slot(io_pt_cap); 424 425 if (!vtd_cte_ptr_get_present(vtd_context_slot)) { 426 return; 427 } 428 429 vtd_pte = (vtd_pte_t*)paddr_to_pptr(vtd_cte_ptr_get_asr(vtd_context_slot)); 430 431 if (level == 0) { 432 /* if we have been overmapped or something */ 433 if (pptr_to_paddr(vtd_pte) != pptr_to_paddr((void *)cap_io_page_table_cap_get_capIOPTBasePtr(io_pt_cap))) { 434 return; 435 } 436 *vtd_context_slot = vtd_cte_new( 437 0, /* Domain ID */ 438 false, /* RMRR */ 439 0, /* Address Width */ 440 0, /* Address Space Root */ 441 0, /* Translation Type */ 442 0 /* Present */ 443 ); 444 flushCacheRange(vtd_context_slot, VTD_CTE_SIZE_BITS); 445 } else { 446 io_address = cap_io_page_table_cap_get_capIOPTMappedAddress(io_pt_cap); 447 lu_ret = lookupIOPTSlot_resolve_levels(vtd_pte, io_address >> PAGE_BITS, level - 1, level - 1 ); 448 449 /* if we have been overmapped or something */ 450 if (lu_ret.status != EXCEPTION_NONE || lu_ret.level != 0) { 451 return; 452 } 453 if (vtd_pte_ptr_get_addr(lu_ret.ioptSlot) != pptr_to_paddr((void *)cap_io_page_table_cap_get_capIOPTBasePtr(io_pt_cap))) { 454 return; 455 } 456 *lu_ret.ioptSlot = vtd_pte_new( 457 0, /* Physical Address */ 458 0, /* Read Permission */ 459 0 /* Write Permission */ 460 ); 461 flushCacheRange(lu_ret.ioptSlot, VTD_PTE_SIZE_BITS); 462 } 463 invalidate_iotlb(); 464 } 465} 466 467void unmapIOPage(cap_t cap) 468{ 469 lookupIOPTSlot_ret_t lu_ret; 470 word_t io_address; 471 vtd_cte_t* vtd_context_slot; 472 vtd_pte_t* vtd_pte; 473 474 io_address = cap_frame_cap_get_capFMappedAddress(cap); 475 vtd_context_slot = lookup_vtd_context_slot(cap); 476 477 478 if (!vtd_cte_ptr_get_present(vtd_context_slot)) { 479 return; 480 } 481 482 vtd_pte = (vtd_pte_t*)paddr_to_pptr(vtd_cte_ptr_get_asr(vtd_context_slot)); 483 484 lu_ret = lookupIOPTSlot(vtd_pte, io_address); 485 if (lu_ret.status != EXCEPTION_NONE || lu_ret.level != 0) { 486 return; 487 } 488 489 if (vtd_pte_ptr_get_addr(lu_ret.ioptSlot) != pptr_to_paddr((void *)cap_frame_cap_get_capFBasePtr(cap))) { 490 return; 491 } 492 493 *lu_ret.ioptSlot = vtd_pte_new( 494 0, /* Physical Address */ 495 0, /* Read Permission */ 496 0 /* Write Permission */ 497 ); 498 499 flushCacheRange(lu_ret.ioptSlot, VTD_PTE_SIZE_BITS); 500 invalidate_iotlb(); 501} 502 503exception_t 504performX86IOUnMapInvocation(cap_t cap, cte_t *ctSlot) 505{ 506 unmapIOPage(ctSlot->cap); 507 508 ctSlot->cap = cap_frame_cap_set_capFMappedAddress(ctSlot->cap, 0); 509 ctSlot->cap = cap_frame_cap_set_capFMapType(ctSlot->cap, X86_MappingNone); 510 ctSlot->cap = cap_frame_cap_set_capFMappedASID(ctSlot->cap, asidInvalid); 511 512 return EXCEPTION_NONE; 513} 514 515exception_t decodeX86IOSpaceInvocation(word_t invLabel, cap_t cap) 516{ 517 userError("IOSpace capability has no invocations"); 518 current_syscall_error.type = seL4_IllegalOperation; 519 return EXCEPTION_SYSCALL_ERROR; 520} 521 522#endif /* CONFIG_IOMMU */ 523