/linux-master/drivers/virt/acrn/ |
H A D | mm.c | 162 struct page **pages = NULL, *page; local 171 /* Get the page number of the map region */ 279 page = pages[i]; 280 VM_BUG_ON_PAGE(PageTail(page), page); local 281 order = compound_order(page); 302 page = pages[i]; 303 VM_BUG_ON_PAGE(PageTail(page), page); local [all...] |
/linux-master/drivers/vfio/ |
H A D | vfio_iommu_type1.c | 105 struct page **pages; /* for pin_user_pages_remote */ 106 struct page *fallback_page; /* if pages alloc fails */ 148 * that is 2^28 (256 MB) which maps to 2^31 * 2^12 = 2^43 (8TB) on 4K page 448 * Some mappings aren't backed by a struct page, for example an mmap'd 452 * page needs to set the reserved bit in all subpages to be safe. 465 struct page *page = pfn_to_page(pfn); local 467 unpin_user_pages_dirty_lock(&page, 1, prot & IOMMU_WRITE); 473 #define VFIO_BATCH_MAX_CAPACITY (PAGE_SIZE / sizeof(struct page *)) 483 batch->pages = (struct page **) __get_free_pag [all...] |
/linux-master/drivers/vfio/pci/pds/ |
H A D | dirty.c | 259 dev_dbg(&pdev->dev, "vf%u: Start dirty page tracking\n", 380 struct page **pages; 393 * Start and end of bitmap section to seq/ack might not be page 403 struct page *page = vmalloc_to_page(bmp); local 405 if (!page) { 410 pages[i] = page; 531 dev_dbg(dev, "vf%u: Get dirty page bitmap\n", pds_vfio->vf_id);
|
/linux-master/drivers/parisc/ |
H A D | sba_iommu.c | 49 #include <asm/page.h> /* for PAGE0 */ 607 * of 1 page and a maximum of 2GB. Hardware requires the address be 781 sba_map_page(struct device *dev, struct page *page, unsigned long offset, argument 785 return sba_map_single(dev, page_address(page) + offset, size, 989 ** correct virtual address associated with each DMA page. 1347 /* Set I/O PDIR Page size to system page size */ 1354 panic(__FILE__ "Unsupported system page size %d", 1489 /* Set I/O PDIR Page size to system page size */ 1496 panic(__FILE__ "Unsupported system page siz [all...] |
H A D | ccio-dma.c | 42 #include <asm/page.h> 655 /* round up to nearest page size */ 659 /* invalidate one page at a time */ 776 ccio_map_page(struct device *dev, struct page *page, unsigned long offset, argument 780 return ccio_map_single(dev, page_address(page) + offset, size, 942 ** correct virtual address associated with each DMA page. 954 ** o page/offset contain the virtual address. 1075 /* KLUGE - unmap_sg calls unmap_page for each mapped page */ 1260 /* We could use larger page size [all...] |
/linux-master/drivers/net/ |
H A D | virtio_net.c | 330 struct page *pages; 477 * hdr is in a separate sg buffer, and data sg buffer shares same page 578 static void give_pages(struct receive_queue *rq, struct page *page) argument 580 struct page *end; 583 for (end = page; end->private; end = (struct page *)end->private); 585 rq->pages = page; 588 static struct page *get_a_page(struct receive_queue *rq, gfp_t gfp_mask) 590 struct page * 716 page_to_skb(struct virtnet_info *vi, struct receive_queue *rq, struct page *page, unsigned int offset, unsigned int len, unsigned int truesize, unsigned int headroom) argument 817 struct page *page = virt_to_head_page(buf); local 1268 struct page *page; local 1352 struct page *page = virt_to_head_page(buf); local 1435 struct page *page = virt_to_head_page(buf); local 1480 struct page *page = buf; local 1500 struct page *page; local 1584 struct page *page; local 1657 mergeable_xdp_get_buf(struct virtnet_info *vi, struct receive_queue *rq, struct bpf_prog *xdp_prog, void *ctx, unsigned int *frame_sz, int *num_buf, struct page **page, int offset, unsigned int *len, struct virtio_net_hdr_mrg_rxbuf *hdr) argument 1743 struct page *page = virt_to_head_page(buf); local 1802 struct page *page = virt_to_head_page(buf); local [all...] |
/linux-master/drivers/net/ethernet/wangxun/libwx/ |
H A D | wx_type.h | 846 struct page *page; member in struct:wx_rx_buffer
|
H A D | wx_lib.c | 175 /* If the page was released, just unmap it. */ 177 page_pool_put_full_page(rx_ring->page_pool, rx_buffer->page, false); 192 *rx_buffer_pgcnt = page_count(rx_buffer->page); 197 prefetchw(rx_buffer->page); 228 /* the page has been released from the ring */ 232 rx_buffer->page = NULL; 249 void *page_addr = page_address(rx_buffer->page) + 252 /* prefetch first cache line of first page */ 272 page_pool_put_full_page(rx_ring->page_pool, rx_buffer->page, true); 281 skb_add_rx_frag(skb, 0, rx_buffer->page, 306 struct page *page = bi->page; local [all...] |
/linux-master/drivers/net/ethernet/hisilicon/hns3/ |
H A D | hns3_enet.c | 1036 struct page *page; local 1058 page = alloc_pages_node(dev_to_node(ring_to_dev(ring)), 1060 if (!page) { 1065 dma = dma_map_page(ring_to_dev(ring), page, 0, 1073 tx_spare->buf = page_address(page); 1079 put_page(page); 3367 struct page *p; 3765 /* Avoid re-using remote or pfmem page */ 3771 /* Rx page ca [all...] |
/linux-master/drivers/md/dm-vdo/ |
H A D | Makefile | 50 indexer/index-page-map.o \
|
/linux-master/drivers/iommu/ |
H A D | tegra-smmu.c | 60 struct page **pts; 61 struct page *pd; 127 /* page table definitions */ 322 /* TODO: free page directory and page tables */ 554 /* Set the page directory entry first */ 557 /* The flush the page directory entry from caches */ 567 static u32 *tegra_smmu_pte_offset(struct page *pt_page, unsigned long iova) 579 struct page *pt_page; 593 dma_addr_t *dmap, struct page *pag 592 as_get_pte(struct tegra_smmu_as *as, dma_addr_t iova, dma_addr_t *dmap, struct page *page) argument 640 struct page *page = as->pts[pde]; local 679 struct page *page = as->pts[pde]; local 720 struct page *page; local [all...] |
H A D | sun50i-iommu.c | 149 * The Allwinner H6 IOMMU uses a 2-level page table. 156 * pointing to a 4kB page of physical memory. 232 * Each page entry will then have a reference to the domain they are 233 * affected to, so that we can actually enforce them on a per-page 240 * doesn't seem to restrict page access on a per-device basis. And 241 * then we will use the relevant domain index when generating the page 272 static u32 sun50i_mk_pte(phys_addr_t page, int prot) argument 287 page &= SUN50I_PTE_PAGE_ADDRESS_MASK; 288 return page | flags | SUN50I_PTE_PAGE_VALID; 882 * If the address is not in the page tabl [all...] |
H A D | rockchip-iommu.c | 35 #define RK_MMU_PAGE_FAULT_ADDR 0x0C /* IOVA of last page fault */ 64 #define RK_MMU_CMD_PAGE_FAULT_DONE 5 /* Clear page fault */ 68 #define RK_MMU_IRQ_PAGE_FAULT 0x01 /* page fault */ 79 * Support mapping any size that fits in one page table: 86 u32 *dt; /* page directory table */ 89 spinlock_t dt_lock; /* lock for modifying page directory table */ 102 u32 (*mk_ptentries)(phys_addr_t page, int prot); 143 * The Rockchip rk3288 iommu uses a 2-level page table. 149 * a 4 KB page of physical memory. 151 * The DT and each PT fits in a single 4 KB page ( 269 rk_mk_pte(phys_addr_t page, int prot) argument 289 rk_mk_pte_v2(phys_addr_t page, int prot) argument [all...] |
H A D | iommu-pages.h | 15 * All page allocations that should be reported to as "iommu-pagetables" to 17 * page-tables and other per-iommu_domain configuration structures. 24 * __iommu_alloc_account - account for newly allocated page. 25 * @page: head struct page of the page. 26 * @order: order of the page 28 static inline void __iommu_alloc_account(struct page *page, int order) argument 32 mod_node_page_state(page_pgdat(page), NR_IOMMU_PAGE 41 __iommu_free_account(struct page *page, int order) argument 58 struct page *page; local 74 __iommu_free_pages(struct page *page, int order) argument 94 struct page *page = alloc_pages_node(nid, gfp | __GFP_ZERO, order); local 113 struct page *page = __iommu_alloc_pages(gfp, order); local 175 iommu_put_pages_list(struct list_head *page) argument [all...] |
H A D | dma-iommu.c | 79 /* Trivial linear page allocator for IOMMU_DMA_MSI_COOKIE */ 666 * If the geometry and dma_range_map include address 0, we reserve that page 683 /* Use the smallest supported page size for IOVA granularity */ 734 * page flags. 739 * Return: corresponding IOMMU API page protection flags 878 static void __iommu_dma_free_pages(struct page **pages, int count) 885 static struct page **__iommu_dma_alloc_pages(struct device *dev, 888 struct page **pages; 903 struct page *page local 1137 iommu_dma_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction dir, unsigned long attrs) argument 1536 struct page *page = NULL, **pages = NULL; local 1576 struct page *page = NULL; local 1612 struct page *page = NULL; local 1677 struct page *page; local [all...] |
/linux-master/drivers/iommu/intel/ |
H A D | svm.c | 19 #include <asm/page.h> 37 pr_warn("IOMMU: %s: Failed to allocate page request queue\n", 44 pr_err("IOMMU: %s: Failed to create IRQ vector for page request queue\n", 66 pr_err("IOMMU: %s: Failed to request IRQ for page request queue\n", 121 pr_err("%s SVM disabled, incompatible 1GB page capability\n", 163 /* This might end up being called from exit_mmap(), *before* the page 166 * get called when the page tables are cleared. So we need to protect 167 * against hardware accessing those page tables. 172 * page) so that we end up taking a fault that the hardware really 278 * intel_drain_pasid_prq - Drain page request [all...] |
H A D | iommu.h | 31 * VT-d hardware uses 4KiB page size regardless of host page size. 597 * through the first level page table, 642 /* parent page table which the user domain is nested on */ 644 /* user page table pointer (in GPA) */ 646 /* page table attributes */ 817 * 7: super page 938 static inline unsigned long page_to_dma_pfn(struct page *pg) 1050 /* Returns a number of VTD pages, but aligned to MM page size */
|
H A D | iommu.c | 272 pr_info("Disable supported super page\n"); 458 * 1-level super page supports page size of 2MiB, 2-level super page 459 * supports page size of both 2MiB and 1GiB. 968 * Free the page table if we're below the level we want to 983 * clear last level (leaf) ptes and free page table pages below the 1004 /* When a page at a given level is being unlinked from its parent, we don't 1007 know the hardware page-walk will no longer touch them. 1008 The 'pte' argument is the *parent* PTE, pointing to the page tha [all...] |
/linux-master/drivers/iommu/arm/arm-smmu/ |
H A D | arm-smmu.h | 426 u32 (*read_reg)(struct arm_smmu_device *smmu, int page, int offset); 427 void (*write_reg)(struct arm_smmu_device *smmu, int page, int offset, 429 u64 (*read_reg64)(struct arm_smmu_device *smmu, int page, int offset); 430 void (*write_reg64)(struct arm_smmu_device *smmu, int page, int offset, 436 void (*tlb_sync)(struct arm_smmu_device *smmu, int page, int sync, 474 static inline u32 arm_smmu_readl(struct arm_smmu_device *smmu, int page, int offset) argument 477 return smmu->impl->read_reg(smmu, page, offset); 478 return readl_relaxed(arm_smmu_page(smmu, page) + offset); 481 static inline void arm_smmu_writel(struct arm_smmu_device *smmu, int page, argument 485 smmu->impl->write_reg(smmu, page, offse 490 arm_smmu_readq(struct arm_smmu_device *smmu, int page, int offset) argument 497 arm_smmu_writeq(struct arm_smmu_device *smmu, int page, int offset, u64 val) argument [all...] |
H A D | arm-smmu.c | 210 static void __arm_smmu_tlb_sync(struct arm_smmu_device *smmu, int page, argument 217 return smmu->impl->tlb_sync(smmu, page, sync, status); 219 arm_smmu_writel(smmu, page, sync, QCOM_DUMMY_VAL); 222 reg = arm_smmu_readl(smmu, page, status); 782 /* Update the domain's page sizes to reflect the page table format */ 794 /* Initialise the context bank with our page table cfg */ 827 /* Publish page table ops for map/unmap */ 853 * Disable the context bank and free the page tables before freeing 1852 * What the page tabl [all...] |
H A D | arm-smmu-qcom.c | 22 static void qcom_smmu_tlb_sync(struct arm_smmu_device *smmu, int page, argument 28 arm_smmu_writel(smmu, page, sync, QCOM_DUMMY_VAL); 31 reg = arm_smmu_readl(smmu, page, status); 583 * qcom_smmu_impl_of_match[] table, and GPU per-process page-
|
/linux-master/drivers/iommu/arm/arm-smmu-v3/ |
H A D | arm-smmu-v3.c | 1971 * range size, which must be a power of two number of page sizes. We 2101 /* Get the leaf page size */ 2106 /* Convert page size of 12,14,16 (log2) to 1,2,3 */ 3129 void __iomem *page, 3158 q->prod_reg = page + prod_off; 3159 q->cons_reg = page + cons_off; 3127 arm_smmu_init_one_queue(struct arm_smmu_device *smmu, struct arm_smmu_queue *q, void __iomem *page, unsigned long prod_off, unsigned long cons_off, size_t dwords, const char *name) argument
|
/linux-master/drivers/iommu/amd/ |
H A D | io_pgtable_v2.c | 3 * CPU-agnostic AMD IO page table v2 allocator. 49 static inline u64 set_pgtable_attr(u64 *page) argument 56 return (iommu_virt_to_phys(page) | prot); 75 /* Large page */ 130 /* Allocate page table */ 134 u64 *pte, *page; local 155 page = iommu_alloc_page_node(nid, gfp); 156 if (!page) 159 __npte = set_pgtable_attr(page); 162 iommu_free_page(page); [all...] |
H A D | init.c | 156 /* Guest page table level */ 2067 pr_warn("Cannot enable v2 page table for DMA-API. Fallback to v1.\n"); 2155 pr_info("V2 page table enabled (Paging mode : %d level)\n", 3079 /* 5 level guest page table */ 3196 * configured with V1 page table (DTE[Mode] = 0 is not supported). 3204 pr_warn("SNP: IOMMU is configured with V2 page table mode, SNP cannot be supported.\n"); 3656 /* CPU page table size should match IOMMU guest page table size */ 3663 * (i.e. EFR[SNPSup]=1), IOMMUv2 page table cannot be used without 3664 * setting up IOMMUv1 page tabl 3767 iommu_page_make_shared(void *page) argument 3807 void *page; local [all...] |
H A D | io_pgtable.c | 3 * CPU-agnostic AMD IO page table allocator. 73 * The functions below are used the create the page table mappings for 80 struct page *p = virt_to_page(pt); 148 * This function is used to add another level to an IO page table. Adding 204 u64 *pte, *page; local 211 * page-table. 254 page = iommu_alloc_page_node(domain->nid, gfp); 256 if (!page) 259 __npte = PM_LEVEL_PDE(level, iommu_virt_to_phys(page)); 263 iommu_free_page(page); [all...] |