Searched refs:page_shift (Results 1 - 25 of 133) sorted by relevance

123456

/linux-master/arch/powerpc/include/asm/
H A Dultravisor.h50 u64 page_shift)
53 page_shift);
57 u64 page_shift)
60 page_shift);
75 static inline int uv_page_inval(u64 lpid, u64 gpa, u64 page_shift) argument
77 return ucall_norets(UV_PAGE_INVAL, lpid, gpa, page_shift);
49 uv_page_in(u64 lpid, u64 src_ra, u64 dst_gpa, u64 flags, u64 page_shift) argument
56 uv_page_out(u64 lpid, u64 dst_ra, u64 src_gpa, u64 flags, u64 page_shift) argument
H A Dkvm_book3s_uvmem.h15 unsigned long page_shift);
19 unsigned long page_shift);
54 unsigned long flags, unsigned long page_shift)
61 unsigned long flags, unsigned long page_shift)
53 kvmppc_h_svm_page_in(struct kvm *kvm, unsigned long gra, unsigned long flags, unsigned long page_shift) argument
60 kvmppc_h_svm_page_out(struct kvm *kvm, unsigned long gra, unsigned long flags, unsigned long page_shift) argument
H A Diommu.h166 __u32 page_shift,
171 __u32 page_shift,
289 extern int iommu_tce_check_ioba(unsigned long page_shift,
292 extern int iommu_tce_check_gpa(unsigned long page_shift,
/linux-master/tools/testing/selftests/powerpc/mm/
H A Dbad_accesses.c69 unsigned long i, j, addr, region_shift, page_shift, page_size; local
84 page_shift = 16;
86 page_shift = 12;
103 (1 << page_shift) >> 10,
121 for (j = page_shift - 1; j < 60; j++) {
130 addr = (base | delta) & ~((1 << page_shift) - 1);
/linux-master/drivers/infiniband/hw/hns/
H A Dhns_roce_alloc.c63 * @page_shift: the unit size in a continuous dma address range
67 u32 page_shift, u32 flags)
76 if (WARN_ON(page_shift < HNS_HW_PAGE_SHIFT))
84 buf->page_shift = page_shift;
85 page_size = 1 << buf->page_shift;
87 /* Calc the trunk size and num by required size and page_shift */
134 unsigned int page_shift)
140 if (page_shift > buf->trunk_shift) {
142 page_shift, bu
66 hns_roce_buf_alloc(struct hns_roce_dev *hr_dev, u32 size, u32 page_shift, u32 flags) argument
132 hns_roce_get_kmem_bufs(struct hns_roce_dev *hr_dev, dma_addr_t *bufs, int buf_cnt, struct hns_roce_buf *buf, unsigned int page_shift) argument
156 hns_roce_get_umem_bufs(struct hns_roce_dev *hr_dev, dma_addr_t *bufs, int buf_cnt, struct ib_umem *umem, unsigned int page_shift) argument
[all...]
/linux-master/tools/testing/selftests/bpf/progs/
H A Dbpf_iter_vma_offset.c13 __u32 page_shift = 0; variable
33 offset = address - vma->vm_start + (vma->vm_pgoff << page_shift);
/linux-master/drivers/pci/endpoint/
H A Dpci-epc-mem.c26 unsigned int page_shift = ilog2(mem->window.page_size); local
29 size >>= page_shift; local
53 unsigned int page_shift; local
73 page_shift = ilog2(page_size);
74 pages = windows[i].size >> page_shift;
183 unsigned int page_shift; local
198 page_shift = ilog2(mem->window.page_size);
200 ((phys_addr_t)pageno << page_shift);
248 unsigned int page_shift; local
260 page_shift
[all...]
/linux-master/drivers/infiniband/core/
H A Dumem_odp.c59 size_t page_size = 1UL << umem_odp->page_shift;
73 ndmas = (end - start) >> umem_odp->page_shift;
134 umem_odp->page_shift = PAGE_SHIFT;
182 odp_data->page_shift = PAGE_SHIFT;
244 umem_odp->page_shift = PAGE_SHIFT;
247 umem_odp->page_shift = HPAGE_SHIFT;
315 *dma_addr = ib_dma_map_page(dev, page, 0, 1 << umem_odp->page_shift,
353 unsigned int page_shift, hmm_order, pfn_start_idx; variable
365 page_shift = umem_odp->page_shift;
[all...]
/linux-master/include/linux/
H A Dkmsan.h135 * @page_shift: page_shift passed to vmap_range_noflush().
145 unsigned int page_shift);
163 * @page_shift: page_shift argument passed to vmap_range_noflush().
171 unsigned int page_shift);
290 struct page **pages, unsigned int page_shift)
304 unsigned int page_shift)
288 kmsan_vmap_pages_range_noflush( unsigned long start, unsigned long end, pgprot_t prot, struct page **pages, unsigned int page_shift) argument
300 kmsan_ioremap_page_range(unsigned long start, unsigned long end, phys_addr_t phys_addr, pgprot_t prot, unsigned int page_shift) argument
/linux-master/tools/testing/selftests/kvm/lib/aarch64/
H A Dprocessor.c28 unsigned int shift = (vm->pgtable_levels - 1) * (vm->page_shift - 3) + vm->page_shift;
36 unsigned int shift = 2 * (vm->page_shift - 3) + vm->page_shift;
37 uint64_t mask = (1UL << (vm->page_shift - 3)) - 1;
47 unsigned int shift = (vm->page_shift - 3) + vm->page_shift;
48 uint64_t mask = (1UL << (vm->page_shift - 3)) - 1;
58 uint64_t mask = (1UL << (vm->page_shift - 3)) - 1;
59 return (gva >> vm->page_shift)
[all...]
/linux-master/drivers/net/ethernet/mellanox/mlx5/core/en/
H A Dparams.c43 u8 page_shift = mlx5e_mpwrq_page_shift(mdev, xsk); local
48 oversized = xsk->chunk_size < (1 << page_shift);
49 WARN_ON_ONCE(xsk->chunk_size > (1 << page_shift));
101 u8 mlx5e_mpwrq_log_wqe_sz(struct mlx5_core_dev *mdev, u8 page_shift, argument
112 max_log_mpwqe_size = ilog2(max_pages_per_wqe) + page_shift;
119 u8 mlx5e_mpwrq_pages_per_wqe(struct mlx5_core_dev *mdev, u8 page_shift, argument
122 u8 log_wqe_sz = mlx5e_mpwrq_log_wqe_sz(mdev, page_shift, umr_mode);
125 pages_per_wqe = log_wqe_sz > page_shift ? (1 << (log_wqe_sz - page_shift)) : 1;
142 u16 mlx5e_mpwrq_umr_wqe_sz(struct mlx5_core_dev *mdev, u8 page_shift, argument
157 mlx5e_mpwrq_umr_wqebbs(struct mlx5_core_dev *mdev, u8 page_shift, enum mlx5e_mpwrq_umr_mode umr_mode) argument
164 mlx5e_mpwrq_mtts_per_wqe(struct mlx5_core_dev *mdev, u8 page_shift, enum mlx5e_mpwrq_umr_mode umr_mode) argument
201 mlx5e_mpwrq_max_log_rq_size(struct mlx5_core_dev *mdev, u8 page_shift, enum mlx5e_mpwrq_umr_mode umr_mode) argument
210 mlx5e_mpwrq_max_log_rq_pkts(struct mlx5_core_dev *mdev, u8 page_shift, enum mlx5e_mpwrq_umr_mode umr_mode) argument
289 u8 page_shift = mlx5e_mpwrq_page_shift(mdev, xsk); local
319 mlx5e_verify_rx_mpwqe_strides(struct mlx5_core_dev *mdev, u8 log_stride_sz, u8 log_num_strides, u8 page_shift, enum mlx5e_mpwrq_umr_mode umr_mode) argument
348 u8 page_shift = mlx5e_mpwrq_page_shift(mdev, xsk); local
360 u8 page_shift = mlx5e_mpwrq_page_shift(mdev, xsk); local
386 u8 log_pkts_per_wqe, page_shift, max_log_rq_size; local
449 u8 page_shift = mlx5e_mpwrq_page_shift(mdev, xsk); local
606 u8 page_shift = mlx5e_mpwrq_page_shift(mdev, NULL); local
618 u8 page_shift = mlx5e_mpwrq_page_shift(mdev, xsk); local
974 u8 page_shift = mlx5e_mpwrq_page_shift(mdev, xsk); local
1165 u8 page_shift = mlx5e_mpwrq_page_shift(mdev, xsk); local
[all...]
H A Dparams.h63 u8 mlx5e_mpwrq_log_wqe_sz(struct mlx5_core_dev *mdev, u8 page_shift,
65 u8 mlx5e_mpwrq_pages_per_wqe(struct mlx5_core_dev *mdev, u8 page_shift,
67 u16 mlx5e_mpwrq_umr_wqe_sz(struct mlx5_core_dev *mdev, u8 page_shift,
69 u8 mlx5e_mpwrq_umr_wqebbs(struct mlx5_core_dev *mdev, u8 page_shift,
71 u8 mlx5e_mpwrq_mtts_per_wqe(struct mlx5_core_dev *mdev, u8 page_shift,
75 u8 mlx5e_mpwrq_max_log_rq_pkts(struct mlx5_core_dev *mdev, u8 page_shift,
/linux-master/include/rdma/
H A Dib_umem_odp.h44 unsigned int page_shift; member in struct:ib_umem_odp
67 umem_odp->page_shift;
/linux-master/arch/powerpc/kvm/
H A Dbook3s_hv_uvmem.c515 unsigned long end, unsigned long page_shift,
536 if (!kvmppc_gfn_is_uvmem_pfn(gpa >> page_shift, kvm, NULL))
568 ret = uv_page_out(kvm->arch.lpid, pfn << page_shift,
569 gpa, 0, page_shift);
588 unsigned long page_shift,
595 ret = __kvmppc_svm_page_out(vma, start, end, page_shift, kvm, gpa,
743 unsigned long page_shift,
780 ret = uv_page_in(kvm->arch.lpid, pfn << page_shift,
781 gpa, 0, page_shift);
878 unsigned long page_shift)
513 __kvmppc_svm_page_out(struct vm_area_struct *vma, unsigned long start, unsigned long end, unsigned long page_shift, struct kvm *kvm, unsigned long gpa, struct page *fault_page) argument
586 kvmppc_svm_page_out(struct vm_area_struct *vma, unsigned long start, unsigned long end, unsigned long page_shift, struct kvm *kvm, unsigned long gpa, struct page *fault_page) argument
740 kvmppc_svm_page_in(struct vm_area_struct *vma, unsigned long start, unsigned long end, unsigned long gpa, struct kvm *kvm, unsigned long page_shift, bool pagein) argument
877 kvmppc_share_page(struct kvm *kvm, unsigned long gpa, unsigned long page_shift) argument
936 kvmppc_h_svm_page_in(struct kvm *kvm, unsigned long gpa, unsigned long flags, unsigned long page_shift) argument
1046 kvmppc_h_svm_page_out(struct kvm *kvm, unsigned long gpa, unsigned long flags, unsigned long page_shift) argument
[all...]
H A Dbook3s_64_vio.c148 if ((tbltmp->it_page_shift <= stt->page_shift) &&
150 stt->offset << stt->page_shift) &&
152 stt->size << stt->page_shift)) {
301 if (!args->size || args->page_shift < 12 || args->page_shift > 34 ||
302 (args->offset + args->size > (ULLONG_MAX >> args->page_shift)))
316 stt->page_shift = args->page_shift;
383 if (iommu_tce_check_gpa(stt->page_shift, gpa))
440 unsigned long subpages = 1ULL << (stt->page_shift
[all...]
/linux-master/tools/testing/selftests/mm/
H A Dhmm-tests.c72 unsigned int page_shift; local
95 unsigned int page_shift; local
137 self->page_shift = ffs(self->page_size) - 1;
148 self->page_shift = ffs(self->page_size) - 1;
302 npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
304 size = npages << self->page_shift;
365 npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
367 size = npages << self->page_shift;
424 npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
426 size = npages << self->page_shift;
[all...]
/linux-master/tools/testing/selftests/kvm/lib/
H A Dkvm_util.c259 0, (1ULL << (vm->va_bits - 1)) >> vm->page_shift);
261 (~((1ULL << (vm->va_bits - 1)) - 1)) >> vm->page_shift,
262 (1ULL << (vm->va_bits - 1)) >> vm->page_shift);
284 vm->page_shift = vm_guest_mode_params[vm->mode].page_shift;
833 if ((ptr1 >> vm->page_shift) != ((ptr1 + amt) >> vm->page_shift))
835 if ((ptr2 >> vm->page_shift) != ((ptr2 + amt) >> vm->page_shift))
838 assert((ptr1 >> vm->page_shift)
2146 vm_calc_num_pages(unsigned int num_pages, unsigned int page_shift, unsigned int new_page_shift, bool ceil) argument
[all...]
/linux-master/drivers/net/ethernet/mellanox/mlx4/
H A Dmr.c194 int mlx4_mtt_init(struct mlx4_dev *dev, int npages, int page_shift, argument
201 mtt->page_shift = MLX4_ICM_PAGE_SHIFT;
204 mtt->page_shift = page_shift;
419 int page_shift, struct mlx4_mr *mr)
428 return mlx4_mtt_init(dev, npages, page_shift, &mr->mtt);
528 int npages, int page_shift, struct mlx4_mr *mr)
538 access, npages, page_shift, mr);
591 int page_shift, struct mlx4_mpt_entry *mpt_entry)
595 err = mlx4_mtt_init(dev, npages, page_shift,
417 mlx4_mr_alloc_reserved(struct mlx4_dev *dev, u32 mridx, u32 pd, u64 iova, u64 size, u32 access, int npages, int page_shift, struct mlx4_mr *mr) argument
527 mlx4_mr_alloc(struct mlx4_dev *dev, u32 pd, u64 iova, u64 size, u32 access, int npages, int page_shift, struct mlx4_mr *mr) argument
589 mlx4_mr_rereg_mem_write(struct mlx4_dev *dev, struct mlx4_mr *mr, u64 iova, u64 size, int npages, int page_shift, struct mlx4_mpt_entry *mpt_entry) argument
[all...]
/linux-master/drivers/infiniband/hw/mlx5/
H A Dumr.h96 int page_shift, int flags);
/linux-master/tools/testing/selftests/kvm/lib/x86_64/
H A Dvmx.c380 pte->address = paddr >> vm->page_shift;
382 pte->address = vm_alloc_page_table(vm) >> vm->page_shift;
416 TEST_ASSERT((nested_paddr >> vm->page_shift) <= vm->max_gfn,
424 TEST_ASSERT((paddr >> vm->page_shift) <= vm->max_gfn,
506 i = (region->region.guest_phys_addr >> vm->page_shift) - 1;
507 last = i + (region->region.memory_size >> vm->page_shift);
514 (uint64_t)i << vm->page_shift,
515 (uint64_t)i << vm->page_shift,
516 1 << vm->page_shift);
/linux-master/arch/powerpc/platforms/powernv/
H A Dpci-ioda-tce.c50 u64 dma_offset, unsigned int page_shift)
54 tbl->it_page_shift = page_shift;
291 __u32 page_shift, __u64 window_size, __u32 levels,
298 unsigned int entries_shift = window_shift - page_shift;
314 if ((level_shift - 3) * levels + page_shift >= 55)
348 page_shift);
48 pnv_pci_setup_iommu_table(struct iommu_table *tbl, void *tce_mem, u64 tce_size, u64 dma_offset, unsigned int page_shift) argument
290 pnv_pci_ioda2_table_alloc_pages(int nid, __u64 bus_offset, __u32 page_shift, __u64 window_size, __u32 levels, bool alloc_userspace_copy, struct iommu_table *tbl) argument
H A Dpci.h286 extern unsigned long pnv_pci_ioda2_get_table_size(__u32 page_shift,
315 __u32 page_shift, __u64 window_size, __u32 levels,
326 u64 dma_offset, unsigned int page_shift);
/linux-master/tools/testing/selftests/kvm/x86_64/
H A Dvmx_apic_access_test.c87 high_gpa = (vm->max_gfn - 1) << vm->page_shift;
/linux-master/drivers/net/ethernet/mellanox/mlx5/core/
H A Dalloc.c81 buf->page_shift = PAGE_SHIFT;
95 if (frag->map & ((1 << buf->page_shift) - 1)) {
98 mlx5_core_warn(dev, "unexpected map alignment: %pad, page_shift=%d\n",
99 &frag->map, buf->page_shift);
/linux-master/drivers/mtd/nand/raw/
H A Dnand_bbt.c180 from = ((loff_t)page) << this->page_shift;
396 scan_read(this, buf, (loff_t)td->pages[0] << this->page_shift,
405 scan_read(this, buf, (loff_t)md->pages[0] << this->page_shift,
561 int blocktopage = this->bbt_erase_shift - this->page_shift;
666 (this->bbt_erase_shift - this->page_shift);
694 page = block << (this->bbt_erase_shift - this->page_shift);
797 page = block << (this->bbt_erase_shift - this->page_shift);
818 to = ((loff_t)page) << this->page_shift;
834 ops.ooblen = (len >> this->page_shift) * mtd->oobsize;
841 pageoffs = page - (int)(to >> this->page_shift);
[all...]

Completed in 356 milliseconds

123456