Searched refs:page_size (Results 76 - 100 of 413) sorted by path

1234567891011>>

/linux-master/drivers/gpu/drm/i915/
H A Dintel_memory_region.h60 resource_size_t page_size,
/linux-master/drivers/gpu/drm/i915/selftests/
H A Dintel_memory_region.c58 resource_size_t page_size; local
65 page_size = PAGE_SIZE;
66 max_pages = div64_u64(total, page_size);
70 resource_size_t size = page_num * page_size;
92 if (page_num * page_size <= rem) {
H A Dmock_region.c64 resource_size_t page_size,
60 mock_object_init(struct intel_memory_region *mem, struct drm_i915_gem_object *obj, resource_size_t offset, resource_size_t size, resource_size_t page_size, unsigned int flags) argument
/linux-master/drivers/gpu/drm/imagination/
H A Dpvr_rogue_mips.h286 #define ROGUE_MIPSFW_TLB_GET_PAGE_MASK(page_size) ((((page_size) << 11) - 1) & ~0x7FF)
/linux-master/drivers/gpu/drm/nouveau/
H A Dnouveau_dmem.c633 *dma_addr = dma_map_page(dev, spage, 0, page_size(spage),
642 if (drm->dmem->migrate.clear_func(drm, page_size(dpage),
/linux-master/drivers/gpu/drm/vmwgfx/
H A Dvmwgfx_cmdbuf.c181 * @page_size: Size of requested command buffer space in pages.
186 size_t page_size; member in struct:vmw_cmdbuf_alloc_info
772 ret = drm_mm_insert_node(&man->mm, info->node, info->page_size);
775 ret = drm_mm_insert_node(&man->mm, info->node, info->page_size);
803 info.page_size = PFN_UP(size);
/linux-master/drivers/gpu/drm/xe/display/
H A Dxe_plane_initial.c61 u64 page_size = xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K ? SZ_64K : SZ_4K; local
68 base = round_down(plane_config->base, page_size);
82 phys_base = pte & ~(page_size - 1);
118 page_size);
/linux-master/drivers/gpu/drm/xe/
H A Dxe_bo.c1820 dma_addr_t __xe_bo_addr(struct xe_bo *bo, u64 offset, size_t page_size) argument
1826 xe_assert(xe, page_size <= PAGE_SIZE);
1834 page_size, &cur); local
1840 page_size, &cur);
1845 dma_addr_t xe_bo_addr(struct xe_bo *bo, u64 offset, size_t page_size) argument
1849 return __xe_bo_addr(bo, offset, page_size);
2275 u32 page_size = max_t(u32, PAGE_SIZE, local
2280 page_size);
H A Dxe_bo.h185 dma_addr_t __xe_bo_addr(struct xe_bo *bo, u64 offset, size_t page_size);
186 dma_addr_t xe_bo_addr(struct xe_bo *bo, u64 offset, size_t page_size);
189 xe_bo_main_addr(struct xe_bo *bo, size_t page_size) argument
191 return xe_bo_addr(bo, 0, page_size);
H A Dxe_lmtt.c383 u64 page_size = lmtt_page_size(lmtt); local
388 lmtt_assert(lmtt, IS_ALIGNED(start, page_size));
389 lmtt_assert(lmtt, IS_ALIGNED(bo->size, page_size));
403 xe_res_next(&cur, page_size);
404 start += page_size;
H A Dxe_pt.c906 u64 page_size = 1ull << xe_pt_shift(xe_pt->level); local
911 start = entry->ofs * page_size;
912 end = start + page_size * entry->qwords;
/linux-master/drivers/hv/
H A Dhv_balloon.c1645 * and page_size.
1653 range->page_size = HV_GPA_PAGE_RANGE_PAGE_SIZE_2MB;
1932 seq_printf(f, "%-22s: %ld\n", "page_size", HV_HYP_PAGE_SIZE);
/linux-master/drivers/infiniband/core/
H A Dumem.c91 unsigned int page_size = BIT(to_ib_umem_odp(umem)->page_shift); local
94 if (!(pgsz_bitmap & page_size))
96 return page_size;
H A Dumem_odp.c59 size_t page_size = 1UL << umem_odp->page_shift; local
64 start = ALIGN_DOWN(umem_odp->umem.address, page_size);
69 end = ALIGN(end, page_size);
70 if (unlikely(end < page_size))
H A Dverbs.c2646 * @page_size: page vector desired page size
2659 unsigned int *meta_sg_offset, unsigned int page_size)
2665 mr->page_size = page_size;
2680 * @page_size: page vector desired page size
2685 * - Each sg element must either be aligned to page_size or virtually
2688 * - The last sg element is allowed to have length less than page_size.
2689 * - If sg_nents total byte length exceeds the mr max_num_sge * page_size
2692 * constraints holds and the page_size argument is ignored.
2700 unsigned int *sg_offset, unsigned int page_size)
2656 ib_map_mr_sg_pi(struct ib_mr *mr, struct scatterlist *data_sg, int data_sg_nents, unsigned int *data_sg_offset, struct scatterlist *meta_sg, int meta_sg_nents, unsigned int *meta_sg_offset, unsigned int page_size) argument
2699 ib_map_mr_sg(struct ib_mr *mr, struct scatterlist *sg, int sg_nents, unsigned int *sg_offset, unsigned int page_size) argument
[all...]
/linux-master/drivers/infiniband/hw/bnxt_re/
H A Dib_verbs.c2607 wqe->frmr.pg_sz_log = ilog2(wr->mr->page_size >> PAGE_SHIFT_4K);
4041 unsigned long page_size; local
4052 page_size = ib_umem_find_best_pgsz(umem, BNXT_RE_PAGE_SIZE_SUPPORTED, virt_addr);
4053 if (!page_size) {
4079 umem_pgs = ib_umem_num_dma_blocks(umem, page_size);
4081 umem_pgs, page_size);
H A Dmain.c702 req.page_size = BNXT_PAGE_SHIFT;
/linux-master/drivers/infiniband/hw/cxgb4/
H A Diw_cxgb4.h386 u32 page_size:5; member in struct:tpt_attributes
H A Dmem.c266 * IN: stag key, pdid, perm, bind_enabled, zbva, to, len, page_size,
274 u64 len, u8 page_size, u32 pbl_size, u32 pbl_addr,
323 FW_RI_TPTE_PS_V(page_size));
386 mhp->ibmr.page_size = 1U << (mhp->attr.page_size + 12);
464 mhp->attr.page_size = 0;
575 mhp->attr.page_size = shift - 12;
270 write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry, u32 *stag, u8 stag_state, u32 pdid, enum fw_ri_stag_type type, enum fw_ri_mem_perms perm, int bind_enabled, u32 zbva, u64 to, u64 len, u8 page_size, u32 pbl_size, u32 pbl_addr, struct sk_buff *skb, struct c4iw_wr_wait *wr_waitp) argument
H A Dqp.c805 FW_RI_TPTE_PS_V(ilog2(wr->mr->page_size) - 12));
834 wqe->fr.pgsz_shift = ilog2(wr->mr->page_size) - 12;
/linux-master/drivers/infiniband/hw/erdma/
H A Derdma_verbs.c87 ilog2(user_qp->sq_mem.page_size) - ERDMA_HW_PAGE_SHIFT);
93 ilog2(user_qp->rq_mem.page_size) - ERDMA_HW_PAGE_SHIFT);
162 ilog2(mr->mem.page_size)) |
191 u32 page_size; local
201 page_size = SZ_32M;
203 ilog2(page_size) - ERDMA_HW_PAGE_SHIFT);
218 ilog2(mem->page_size) - ERDMA_HW_PAGE_SHIFT);
550 rdma_umem_for_each_dma_block(mem->umem, &biter, mem->page_size)
769 mem->page_size = ib_umem_find_best_pgsz(mem->umem, req_page_size, virt);
770 mem->page_offset = start & (mem->page_size
[all...]
H A Derdma_verbs.h115 u32 page_size; member in struct:erdma_mem
/linux-master/drivers/infiniband/hw/hns/
H A Dhns_roce_alloc.c69 u32 trunk_size, page_size, alloced_size; local
85 page_size = 1 << buf->page_shift;
92 buf->trunk_shift = order_base_2(ALIGN(page_size, PAGE_SIZE));
126 buf->npages = DIV_ROUND_UP(alloced_size, page_size);
H A Dhns_roce_mr.c468 mr->pbl_mtr.hem_cfg.buf_pg_shift = (u32)ilog2(ibmr->page_size);
659 size_t page_size = 1 << page_shift; local
663 if (pages[i] - pages[i - 1] != page_size)
/linux-master/drivers/infiniband/hw/irdma/
H A Dctrl.c1062 enum irdma_page_size page_size; local
1067 if (info->page_size == 0x40000000)
1068 page_size = IRDMA_PAGE_SIZE_1G;
1069 else if (info->page_size == 0x200000)
1070 page_size = IRDMA_PAGE_SIZE_2M;
1072 page_size = IRDMA_PAGE_SIZE_4K;
1095 FIELD_PREP(IRDMA_CQPSQ_STAG_HPAGESIZE, page_size) |
1130 enum irdma_page_size page_size; local
1135 if (info->page_size == 0x40000000)
1136 page_size
1298 enum irdma_page_size page_size; local
[all...]

Completed in 393 milliseconds

1234567891011>>