1178354Ssam// SPDX-License-Identifier: GPL-2.0-only 2178354Ssam/* 3178354Ssam * CPU-agnostic AMD IO page table allocator. 4178354Ssam * 5178354Ssam * Copyright (C) 2020 Advanced Micro Devices, Inc. 6178354Ssam * Author: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com> 7178354Ssam */ 8178354Ssam 9178354Ssam#define pr_fmt(fmt) "AMD-Vi: " fmt 10178354Ssam#define dev_fmt(fmt) pr_fmt(fmt) 11178354Ssam 12178354Ssam#include <linux/atomic.h> 13178354Ssam#include <linux/bitops.h> 14178354Ssam#include <linux/io-pgtable.h> 15178354Ssam#include <linux/kernel.h> 16178354Ssam#include <linux/sizes.h> 17178354Ssam#include <linux/slab.h> 18178354Ssam#include <linux/types.h> 19178354Ssam#include <linux/dma-mapping.h> 20178354Ssam 21178354Ssam#include <asm/barrier.h> 22178354Ssam 23178354Ssam#include "amd_iommu_types.h" 24178354Ssam#include "amd_iommu.h" 25178354Ssam 26178354Ssamstatic void v1_tlb_flush_all(void *cookie) 27178354Ssam{ 28178354Ssam} 29178354Ssam 30178354Ssamstatic void v1_tlb_flush_walk(unsigned long iova, size_t size, 31178354Ssam size_t granule, void *cookie) 32178354Ssam{ 33178354Ssam} 34178354Ssam 35178354Ssamstatic void v1_tlb_add_page(struct iommu_iotlb_gather *gather, 36178354Ssam unsigned long iova, size_t granule, 37178354Ssam void *cookie) 38178354Ssam{ 39178354Ssam} 40178354Ssam 41178354Ssamstatic const struct iommu_flush_ops v1_flush_ops = { 42178354Ssam .tlb_flush_all = v1_tlb_flush_all, 43178354Ssam .tlb_flush_walk = v1_tlb_flush_walk, 44178354Ssam .tlb_add_page = v1_tlb_add_page, 45178354Ssam}; 46178354Ssam 47178354Ssam/* 48178354Ssam * Helper function to get the first pte of a large mapping 49178354Ssam */ 50178354Ssamstatic u64 *first_pte_l7(u64 *pte, unsigned long *page_size, 51257176Sglebius unsigned long *count) 52178354Ssam{ 53178354Ssam unsigned long pte_mask, pg_size, cnt; 54178354Ssam u64 *fpte; 55178354Ssam 56178354Ssam pg_size = PTE_PAGE_SIZE(*pte); 57178354Ssam cnt = PAGE_SIZE_PTE_COUNT(pg_size); 58178354Ssam pte_mask = ~((cnt << 3) - 1); 59178354Ssam fpte = (u64 *)(((unsigned long)pte) & pte_mask); 60178354Ssam 61190391Ssam if (page_size) 62190391Ssam *page_size = pg_size; 63190391Ssam 64178354Ssam if (count) 65178354Ssam *count = cnt; 66178354Ssam 67178354Ssam return fpte; 68178354Ssam} 69178354Ssam 70178354Ssam/**************************************************************************** 71283535Sadrian * 72192468Ssam * The functions below are used the create the page table mappings for 73178354Ssam * unity mapped regions. 74178354Ssam * 75178354Ssam ****************************************************************************/ 76283535Sadrian 77191546Ssamstatic void free_pt_page(u64 *pt, struct list_head *freelist) 78178354Ssam{ 79178354Ssam struct page *p = virt_to_page(pt); 80178354Ssam 81178354Ssam list_add_tail(&p->lru, freelist); 82178354Ssam} 83178354Ssam 84178354Ssamstatic void free_pt_lvl(u64 *pt, struct list_head *freelist, int lvl) 85178354Ssam{ 86178354Ssam u64 *p; 87178354Ssam int i; 88178354Ssam 89178354Ssam for (i = 0; i < 512; ++i) { 90178354Ssam /* PTE present? */ 91178354Ssam if (!IOMMU_PTE_PRESENT(pt[i])) 92178354Ssam continue; 93178354Ssam 94178354Ssam /* Large PTE? */ 95178354Ssam if (PM_PTE_LEVEL(pt[i]) == 0 || 96178354Ssam PM_PTE_LEVEL(pt[i]) == 7) 97178354Ssam continue; 98178354Ssam 99178354Ssam /* 100178354Ssam * Free the next level. No need to look at l1 tables here since 101191546Ssam * they can only contain leaf PTEs; just free them directly. 102178354Ssam */ 103178354Ssam p = IOMMU_PTE_PAGE(pt[i]); 104241138Sadrian if (lvl > 2) 105178354Ssam free_pt_lvl(p, freelist, lvl - 1); 106178354Ssam else 107178354Ssam free_pt_page(p, freelist); 108178354Ssam } 109178354Ssam 110178354Ssam free_pt_page(pt, freelist); 111178354Ssam} 112178354Ssam 113178354Ssamstatic void free_sub_pt(u64 *root, int mode, struct list_head *freelist) 114178354Ssam{ 115178354Ssam switch (mode) { 116178354Ssam case PAGE_MODE_NONE: 117178354Ssam case PAGE_MODE_7_LEVEL: 118178354Ssam break; 119193413Ssam case PAGE_MODE_1_LEVEL: 120193413Ssam free_pt_page(root, freelist); 121193413Ssam break; 122193413Ssam case PAGE_MODE_2_LEVEL: 123193413Ssam case PAGE_MODE_3_LEVEL: 124193413Ssam case PAGE_MODE_4_LEVEL: 125193413Ssam case PAGE_MODE_5_LEVEL: 126193413Ssam case PAGE_MODE_6_LEVEL: 127193413Ssam free_pt_lvl(root, freelist, mode); 128193413Ssam break; 129193413Ssam default: 130193413Ssam BUG(); 131193413Ssam } 132193414Ssam} 133193414Ssam 134193414Ssamvoid amd_iommu_domain_set_pgtable(struct protection_domain *domain, 135193414Ssam u64 *root, int mode) 136193414Ssam{ 137193414Ssam u64 pt_root; 138193414Ssam 139193414Ssam /* lowest 3 bits encode pgtable mode */ 140193414Ssam pt_root = mode & 7; 141178354Ssam pt_root |= (u64)root; 142193414Ssam 143193414Ssam amd_iommu_domain_set_pt_root(domain, pt_root); 144193414Ssam} 145193414Ssam 146193414Ssam/* 147193414Ssam * This function is used to add another level to an IO page table. Adding 148193414Ssam * another level increases the size of the address space by 9 bits to a size up 149193414Ssam * to 64 bits. 150193414Ssam */ 151193414Ssamstatic bool increase_address_space(struct protection_domain *domain, 152193414Ssam unsigned long address, 153193414Ssam gfp_t gfp) 154193414Ssam{ 155178354Ssam unsigned long flags; 156178354Ssam bool ret = true; 157178354Ssam u64 *pte; 158178354Ssam 159178354Ssam pte = alloc_pgtable_page(domain->nid, gfp); 160178354Ssam if (!pte) 161178354Ssam return false; 162178354Ssam 163178354Ssam spin_lock_irqsave(&domain->lock, flags); 164178354Ssam 165178354Ssam if (address <= PM_LEVEL_SIZE(domain->iop.mode)) 166178354Ssam goto out; 167178354Ssam 168178354Ssam ret = false; 169178354Ssam if (WARN_ON_ONCE(domain->iop.mode == PAGE_MODE_6_LEVEL)) 170178354Ssam goto out; 171178354Ssam 172178354Ssam *pte = PM_LEVEL_PDE(domain->iop.mode, iommu_virt_to_phys(domain->iop.root)); 173178354Ssam 174178354Ssam domain->iop.root = pte; 175178354Ssam domain->iop.mode += 1; 176178354Ssam amd_iommu_update_and_flush_device_table(domain); 177178354Ssam amd_iommu_domain_flush_complete(domain); 178178354Ssam 179178354Ssam /* 180178354Ssam * Device Table needs to be updated and flushed before the new root can 181178354Ssam * be published. 182178354Ssam */ 183178354Ssam amd_iommu_domain_set_pgtable(domain, pte, domain->iop.mode); 184178354Ssam 185178354Ssam pte = NULL; 186178354Ssam ret = true; 187178354Ssam 188178354Ssamout: 189178354Ssam spin_unlock_irqrestore(&domain->lock, flags); 190178354Ssam free_page((unsigned long)pte); 191178354Ssam 192178354Ssam return ret; 193178354Ssam} 194178354Ssam 195178354Ssamstatic u64 *alloc_pte(struct protection_domain *domain, 196178354Ssam unsigned long address, 197178354Ssam unsigned long page_size, 198178354Ssam u64 **pte_page, 199178354Ssam gfp_t gfp, 200178354Ssam bool *updated) 201178354Ssam{ 202178354Ssam int level, end_lvl; 203178354Ssam u64 *pte, *page; 204178354Ssam 205178354Ssam BUG_ON(!is_power_of_2(page_size)); 206193655Ssam 207178354Ssam while (address > PM_LEVEL_SIZE(domain->iop.mode)) { 208178354Ssam /* 209178354Ssam * Return an error if there is no memory to update the 210178354Ssam * page-table. 211178354Ssam */ 212178354Ssam if (!increase_address_space(domain, address, gfp)) 213178354Ssam return NULL; 214178354Ssam } 215178354Ssam 216178354Ssam 217178354Ssam level = domain->iop.mode - 1; 218178354Ssam pte = &domain->iop.root[PM_LEVEL_INDEX(level, address)]; 219178354Ssam address = PAGE_SIZE_ALIGN(address, page_size); 220178354Ssam end_lvl = PAGE_SIZE_LEVEL(page_size); 221178354Ssam 222178354Ssam while (level > end_lvl) { 223178354Ssam u64 __pte, __npte; 224178354Ssam int pte_level; 225178354Ssam 226178354Ssam __pte = *pte; 227178354Ssam pte_level = PM_PTE_LEVEL(__pte); 228178354Ssam 229178354Ssam /* 230178354Ssam * If we replace a series of large PTEs, we need 231178354Ssam * to tear down all of them. 232178354Ssam */ 233178354Ssam if (IOMMU_PTE_PRESENT(__pte) && 234178354Ssam pte_level == PAGE_MODE_7_LEVEL) { 235178354Ssam unsigned long count, i; 236178354Ssam u64 *lpte; 237178354Ssam 238178354Ssam lpte = first_pte_l7(pte, NULL, &count); 239178354Ssam 240178354Ssam /* 241178354Ssam * Unmap the replicated PTEs that still match the 242178354Ssam * original large mapping 243178354Ssam */ 244178354Ssam for (i = 0; i < count; ++i) 245178354Ssam cmpxchg64(&lpte[i], __pte, 0ULL); 246178354Ssam 247178354Ssam *updated = true; 248178354Ssam continue; 249178354Ssam } 250178354Ssam 251178354Ssam if (!IOMMU_PTE_PRESENT(__pte) || 252178354Ssam pte_level == PAGE_MODE_NONE) { 253178354Ssam page = alloc_pgtable_page(domain->nid, gfp); 254178354Ssam 255178354Ssam if (!page) 256178354Ssam return NULL; 257178354Ssam 258178354Ssam __npte = PM_LEVEL_PDE(level, iommu_virt_to_phys(page)); 259178354Ssam 260178354Ssam /* pte could have been changed somewhere. */ 261178354Ssam if (!try_cmpxchg64(pte, &__pte, __npte)) 262178354Ssam free_page((unsigned long)page); 263178354Ssam else if (IOMMU_PTE_PRESENT(__pte)) 264178354Ssam *updated = true; 265178354Ssam 266178354Ssam continue; 267178354Ssam } 268178354Ssam 269178354Ssam /* No level skipping support yet */ 270178354Ssam if (pte_level != level) 271178354Ssam return NULL; 272178354Ssam 273178354Ssam level -= 1; 274178354Ssam 275178354Ssam pte = IOMMU_PTE_PAGE(__pte); 276178354Ssam 277193655Ssam if (pte_page && level == end_lvl) 278178354Ssam *pte_page = pte; 279178354Ssam 280178354Ssam pte = &pte[PM_LEVEL_INDEX(level, address)]; 281178354Ssam } 282178354Ssam 283178354Ssam return pte; 284178354Ssam} 285178354Ssam 286178354Ssam/* 287178354Ssam * This function checks if there is a PTE for a given dma address. If 288178354Ssam * there is one, it returns the pointer to it. 289193413Ssam */ 290193413Ssamstatic u64 *fetch_pte(struct amd_io_pgtable *pgtable, 291193413Ssam unsigned long address, 292193413Ssam unsigned long *page_size) 293193413Ssam{ 294178354Ssam int level; 295178354Ssam u64 *pte; 296178354Ssam 297178354Ssam *page_size = 0; 298178354Ssam 299178354Ssam if (address > PM_LEVEL_SIZE(pgtable->mode)) 300178354Ssam return NULL; 301178354Ssam 302178354Ssam level = pgtable->mode - 1; 303178354Ssam pte = &pgtable->root[PM_LEVEL_INDEX(level, address)]; 304178354Ssam *page_size = PTE_LEVEL_PAGE_SIZE(level); 305178354Ssam 306178354Ssam while (level > 0) { 307178354Ssam 308178354Ssam /* Not Present */ 309178354Ssam if (!IOMMU_PTE_PRESENT(*pte)) 310178354Ssam return NULL; 311178354Ssam 312178354Ssam /* Large PTE */ 313178354Ssam if (PM_PTE_LEVEL(*pte) == PAGE_MODE_7_LEVEL || 314178354Ssam PM_PTE_LEVEL(*pte) == PAGE_MODE_NONE) 315178354Ssam break; 316178354Ssam 317178354Ssam /* No level skipping support yet */ 318178354Ssam if (PM_PTE_LEVEL(*pte) != level) 319178354Ssam return NULL; 320178354Ssam 321178354Ssam level -= 1; 322178354Ssam 323178354Ssam /* Walk to the next level */ 324178354Ssam pte = IOMMU_PTE_PAGE(*pte); 325178354Ssam pte = &pte[PM_LEVEL_INDEX(level, address)]; 326178354Ssam *page_size = PTE_LEVEL_PAGE_SIZE(level); 327178354Ssam } 328178354Ssam 329178354Ssam /* 330178354Ssam * If we have a series of large PTEs, make 331178354Ssam * sure to return a pointer to the first one. 332178354Ssam */ 333178354Ssam if (PM_PTE_LEVEL(*pte) == PAGE_MODE_7_LEVEL) 334193414Ssam pte = first_pte_l7(pte, page_size, NULL); 335193414Ssam 336193414Ssam return pte; 337193414Ssam} 338193414Ssam 339193414Ssamstatic void free_clear_pte(u64 *pte, u64 pteval, struct list_head *freelist) 340193414Ssam{ 341193414Ssam u64 *pt; 342193414Ssam int mode; 343193414Ssam 344193414Ssam while (!try_cmpxchg64(pte, &pteval, 0)) 345193414Ssam pr_warn("AMD-Vi: IOMMU pte changed since we read it\n"); 346178354Ssam 347178354Ssam if (!IOMMU_PTE_PRESENT(pteval)) 348178354Ssam return; 349178354Ssam 350178354Ssam pt = IOMMU_PTE_PAGE(pteval); 351178354Ssam mode = IOMMU_PTE_MODE(pteval); 352178354Ssam 353178354Ssam free_sub_pt(pt, mode, freelist); 354178354Ssam} 355178354Ssam 356178354Ssam/* 357178354Ssam * Generic mapping functions. It maps a physical address into a DMA 358178354Ssam * address space. It allocates the page table pages if necessary. 359193292Ssam * In the future it can be extended to a generic mapping function 360254523Sandre * supporting all features of AMD IOMMU page tables like level skipping 361254523Sandre * and full 64 bit address spaces. 362193292Ssam */ 363178354Ssamstatic int iommu_v1_map_pages(struct io_pgtable_ops *ops, unsigned long iova, 364178354Ssam phys_addr_t paddr, size_t pgsize, size_t pgcount, 365178354Ssam int prot, gfp_t gfp, size_t *mapped) 366178354Ssam{ 367178354Ssam struct protection_domain *dom = io_pgtable_ops_to_domain(ops); 368271861Sglebius LIST_HEAD(freelist); 369178354Ssam bool updated = false; 370178354Ssam u64 __pte, *pte; 371178354Ssam int ret, i, count; 372178354Ssam size_t size = pgcount << __ffs(pgsize); 373178354Ssam unsigned long o_iova = iova; 374178354Ssam 375178354Ssam BUG_ON(!IS_ALIGNED(iova, pgsize)); 376178354Ssam BUG_ON(!IS_ALIGNED(paddr, pgsize)); 377178354Ssam 378178354Ssam ret = -EINVAL; 379178354Ssam if (!(prot & IOMMU_PROT_MASK)) 380178354Ssam goto out; 381178354Ssam 382243882Sglebius while (pgcount > 0) { 383178354Ssam count = PAGE_SIZE_PTE_COUNT(pgsize); 384271861Sglebius pte = alloc_pte(dom, iova, pgsize, NULL, gfp, &updated); 385178354Ssam 386178354Ssam ret = -ENOMEM; 387178354Ssam if (!pte) 388178354Ssam goto out; 389178354Ssam 390178354Ssam for (i = 0; i < count; ++i) 391178354Ssam free_clear_pte(&pte[i], pte[i], &freelist); 392178354Ssam 393178354Ssam if (!list_empty(&freelist)) 394178354Ssam updated = true; 395178354Ssam 396178354Ssam if (count > 1) { 397178354Ssam __pte = PAGE_SIZE_PTE(__sme_set(paddr), pgsize); 398178354Ssam __pte |= PM_LEVEL_ENC(7) | IOMMU_PTE_PR | IOMMU_PTE_FC; 399178354Ssam } else 400178354Ssam __pte = __sme_set(paddr) | IOMMU_PTE_PR | IOMMU_PTE_FC; 401178354Ssam 402178354Ssam if (prot & IOMMU_PROT_IR) 403178354Ssam __pte |= IOMMU_PTE_IR; 404178354Ssam if (prot & IOMMU_PROT_IW) 405178354Ssam __pte |= IOMMU_PTE_IW; 406178354Ssam 407178354Ssam for (i = 0; i < count; ++i) 408178354Ssam pte[i] = __pte; 409178354Ssam 410178354Ssam iova += pgsize; 411178354Ssam paddr += pgsize; 412178354Ssam pgcount--; 413178354Ssam if (mapped) 414178354Ssam *mapped += pgsize; 415301722Savos } 416301722Savos 417178354Ssam ret = 0; 418178354Ssam 419178354Ssamout: 420178354Ssam if (updated) { 421178354Ssam unsigned long flags; 422178354Ssam 423178354Ssam spin_lock_irqsave(&dom->lock, flags); 424178354Ssam /* 425178354Ssam * Flush domain TLB(s) and wait for completion. Any Device-Table 426178354Ssam * Updates and flushing already happened in 427178354Ssam * increase_address_space(). 428178354Ssam */ 429178354Ssam amd_iommu_domain_flush_pages(dom, o_iova, size); 430178354Ssam spin_unlock_irqrestore(&dom->lock, flags); 431178354Ssam } 432178354Ssam 433178354Ssam /* Everything flushed out, free pages now */ 434178354Ssam put_pages_list(&freelist); 435178354Ssam 436178354Ssam return ret; 437178354Ssam} 438178354Ssam 439178354Ssamstatic unsigned long iommu_v1_unmap_pages(struct io_pgtable_ops *ops, 440178354Ssam unsigned long iova, 441178354Ssam size_t pgsize, size_t pgcount, 442178354Ssam struct iommu_iotlb_gather *gather) 443178354Ssam{ 444178354Ssam struct amd_io_pgtable *pgtable = io_pgtable_ops_to_data(ops); 445178354Ssam unsigned long long unmapped; 446178354Ssam unsigned long unmap_size; 447178354Ssam u64 *pte; 448178354Ssam size_t size = pgcount << __ffs(pgsize); 449178354Ssam 450178354Ssam BUG_ON(!is_power_of_2(pgsize)); 451178354Ssam 452178354Ssam unmapped = 0; 453178354Ssam 454178354Ssam while (unmapped < size) { 455178354Ssam pte = fetch_pte(pgtable, iova, &unmap_size); 456178354Ssam if (pte) { 457178354Ssam int i, count; 458178354Ssam 459178354Ssam count = PAGE_SIZE_PTE_COUNT(unmap_size); 460178354Ssam for (i = 0; i < count; i++) 461178354Ssam pte[i] = 0ULL; 462178354Ssam } else { 463178354Ssam return unmapped; 464178354Ssam } 465178354Ssam 466178354Ssam iova = (iova & ~(unmap_size - 1)) + unmap_size; 467178354Ssam unmapped += unmap_size; 468283535Sadrian } 469283535Sadrian 470178354Ssam return unmapped; 471178354Ssam} 472178354Ssam 473178354Ssamstatic phys_addr_t iommu_v1_iova_to_phys(struct io_pgtable_ops *ops, unsigned long iova) 474178354Ssam{ 475178354Ssam struct amd_io_pgtable *pgtable = io_pgtable_ops_to_data(ops); 476178354Ssam unsigned long offset_mask, pte_pgsize; 477203422Srpaulo u64 *pte, __pte; 478178354Ssam 479178354Ssam pte = fetch_pte(pgtable, iova, &pte_pgsize); 480178354Ssam 481183247Ssam if (!pte || !IOMMU_PTE_PRESENT(*pte)) 482178354Ssam return 0; 483178354Ssam 484183247Ssam offset_mask = pte_pgsize - 1; 485183247Ssam __pte = __sme_clr(*pte & PM_ADDR_MASK); 486183247Ssam 487183247Ssam return (__pte & ~offset_mask) | (iova & offset_mask); 488183247Ssam} 489178354Ssam 490178354Ssamstatic bool pte_test_and_clear_dirty(u64 *ptep, unsigned long size, 491178354Ssam unsigned long flags) 492178354Ssam{ 493178354Ssam bool test_only = flags & IOMMU_DIRTY_NO_CLEAR; 494178354Ssam bool dirty = false; 495178354Ssam int i, count; 496178354Ssam 497178354Ssam /* 498178354Ssam * 2.2.3.2 Host Dirty Support 499178354Ssam * When a non-default page size is used , software must OR the 500178354Ssam * Dirty bits in all of the replicated host PTEs used to map 501178354Ssam * the page. The IOMMU does not guarantee the Dirty bits are 502178354Ssam * set in all of the replicated PTEs. Any portion of the page 503178354Ssam * may have been written even if the Dirty bit is set in only 504178354Ssam * one of the replicated PTEs. 505178354Ssam */ 506178354Ssam count = PAGE_SIZE_PTE_COUNT(size); 507178354Ssam for (i = 0; i < count && test_only; i++) { 508178354Ssam if (test_bit(IOMMU_PTE_HD_BIT, (unsigned long *)&ptep[i])) { 509178354Ssam dirty = true; 510178354Ssam break; 511178354Ssam } 512178354Ssam } 513178354Ssam 514178354Ssam for (i = 0; i < count && !test_only; i++) { 515178354Ssam if (test_and_clear_bit(IOMMU_PTE_HD_BIT, 516178354Ssam (unsigned long *)&ptep[i])) { 517178354Ssam dirty = true; 518178354Ssam } 519178354Ssam } 520178354Ssam 521191547Ssam return dirty; 522191547Ssam} 523178354Ssam 524178354Ssamstatic int iommu_v1_read_and_clear_dirty(struct io_pgtable_ops *ops, 525178354Ssam unsigned long iova, size_t size, 526178354Ssam unsigned long flags, 527178354Ssam struct iommu_dirty_bitmap *dirty) 528178354Ssam{ 529178354Ssam struct amd_io_pgtable *pgtable = io_pgtable_ops_to_data(ops); 530178354Ssam unsigned long end = iova + size - 1; 531178354Ssam 532178354Ssam do { 533178354Ssam unsigned long pgsize = 0; 534178354Ssam u64 *ptep, pte; 535178354Ssam 536178354Ssam ptep = fetch_pte(pgtable, iova, &pgsize); 537178354Ssam if (ptep) 538178354Ssam pte = READ_ONCE(*ptep); 539178354Ssam if (!ptep || !IOMMU_PTE_PRESENT(pte)) { 540178354Ssam pgsize = pgsize ?: PTE_LEVEL_PAGE_SIZE(0); 541178354Ssam iova += pgsize; 542178354Ssam continue; 543178354Ssam } 544178354Ssam 545178354Ssam /* 546178354Ssam * Mark the whole IOVA range as dirty even if only one of 547178354Ssam * the replicated PTEs were marked dirty. 548178354Ssam */ 549178354Ssam if (pte_test_and_clear_dirty(ptep, pgsize, flags)) 550178354Ssam iommu_dirty_bitmap_record(dirty, iova, pgsize); 551178354Ssam iova += pgsize; 552178354Ssam } while (iova < end); 553178354Ssam 554178354Ssam return 0; 555178354Ssam} 556178354Ssam 557178354Ssam/* 558178354Ssam * ---------------------------------------------------- 559178354Ssam */ 560178354Ssamstatic void v1_free_pgtable(struct io_pgtable *iop) 561192468Ssam{ 562282820Sadrian struct amd_io_pgtable *pgtable = container_of(iop, struct amd_io_pgtable, iop); 563178354Ssam struct protection_domain *dom; 564178354Ssam LIST_HEAD(freelist); 565178354Ssam 566178354Ssam if (pgtable->mode == PAGE_MODE_NONE) 567296254Savos return; 568178354Ssam 569178354Ssam dom = container_of(pgtable, struct protection_domain, iop); 570178354Ssam 571178354Ssam /* Page-table is not visible to IOMMU anymore, so free it */ 572178354Ssam BUG_ON(pgtable->mode < PAGE_MODE_NONE || 573178354Ssam pgtable->mode > PAGE_MODE_6_LEVEL); 574178354Ssam 575178354Ssam free_sub_pt(pgtable->root, pgtable->mode, &freelist); 576178354Ssam 577178354Ssam /* Update data structure */ 578178354Ssam amd_iommu_domain_clr_pt_root(dom); 579178354Ssam 580178354Ssam /* Make changes visible to IOMMUs */ 581178354Ssam amd_iommu_domain_update(dom); 582178354Ssam 583178354Ssam put_pages_list(&freelist); 584178354Ssam} 585178354Ssam 586178354Ssamstatic struct io_pgtable *v1_alloc_pgtable(struct io_pgtable_cfg *cfg, void *cookie) 587178354Ssam{ 588178354Ssam struct amd_io_pgtable *pgtable = io_pgtable_cfg_to_data(cfg); 589178354Ssam 590178354Ssam cfg->pgsize_bitmap = AMD_IOMMU_PGSIZES, 591178354Ssam cfg->ias = IOMMU_IN_ADDR_BIT_SIZE, 592178354Ssam cfg->oas = IOMMU_OUT_ADDR_BIT_SIZE, 593178354Ssam cfg->tlb = &v1_flush_ops; 594178354Ssam 595178354Ssam pgtable->iop.ops.map_pages = iommu_v1_map_pages; 596178354Ssam pgtable->iop.ops.unmap_pages = iommu_v1_unmap_pages; 597178354Ssam pgtable->iop.ops.iova_to_phys = iommu_v1_iova_to_phys; 598178354Ssam pgtable->iop.ops.read_and_clear_dirty = iommu_v1_read_and_clear_dirty; 599178354Ssam 600178354Ssam return &pgtable->iop; 601178354Ssam} 602178354Ssam 603178354Ssamstruct io_pgtable_init_fns io_pgtable_amd_iommu_v1_init_fns = { 604178354Ssam .alloc = v1_alloc_pgtable, 605178354Ssam .free = v1_free_pgtable, 606178354Ssam}; 607178354Ssam