Searched refs:npages (Results 1 - 25 of 302) sorted by relevance

1234567891011>>

/linux-master/io_uring/
H A Dmemmap.h4 struct page **io_pin_pages(unsigned long ubuf, unsigned long len, int *npages);
5 void io_pages_free(struct page ***pages, int npages);
7 struct page **pages, int npages);
9 void *io_pages_map(struct page ***out_pages, unsigned short *npages,
11 void io_pages_unmap(void *ptr, struct page ***pages, unsigned short *npages,
14 void *__io_uaddr_map(struct page ***pages, unsigned short *npages,
H A Dmemmap.c59 void *io_pages_map(struct page ***out_pages, unsigned short *npages, argument
80 *npages = nr_pages;
86 *npages = 0;
90 void io_pages_unmap(void *ptr, struct page ***pages, unsigned short *npages, argument
98 if (put_pages && *npages) {
107 *npages = 1;
108 else if (*npages > 1)
110 for (i = 0; i < *npages; i++)
117 *npages = 0;
120 void io_pages_free(struct page ***pages, int npages) argument
132 io_pin_pages(unsigned long uaddr, unsigned long len, int *npages) argument
167 __io_uaddr_map(struct page ***pages, unsigned short *npages, unsigned long uaddr, size_t size) argument
231 io_uring_mmap_pages(struct io_ring_ctx *ctx, struct vm_area_struct *vma, struct page **pages, int npages) argument
247 unsigned int npages; local
[all...]
/linux-master/drivers/infiniband/hw/hfi1/
H A Duser_pages.c30 u32 nlocked, u32 npages)
47 if (atomic64_read(&mm->pinned_vm) + npages > ulimit_pages)
66 if (nlocked + npages > (ulimit_pages / usr_ctxts / 4))
74 if (nlocked + npages > cache_limit_pages)
80 int hfi1_acquire_user_pages(struct mm_struct *mm, unsigned long vaddr, size_t npages, argument
86 ret = pin_user_pages_fast(vaddr, npages, gup_flags, pages);
96 size_t npages, bool dirty)
98 unpin_user_pages_dirty_lock(p, npages, dirty);
101 atomic64_sub(npages, &mm->pinned_vm);
29 hfi1_can_pin_pages(struct hfi1_devdata *dd, struct mm_struct *mm, u32 nlocked, u32 npages) argument
95 hfi1_release_user_pages(struct mm_struct *mm, struct page **p, size_t npages, bool dirty) argument
H A Dpin_system.c20 unsigned int npages; member in struct:sdma_mmu_node
55 static u32 sdma_cache_evict(struct hfi1_user_sdma_pkt_q *pq, u32 npages) argument
60 evict_data.target = npages;
66 unsigned int start, unsigned int npages)
68 hfi1_release_user_pages(mm, pages + start, npages, false);
79 if (node->npages) {
81 node->npages);
82 atomic_sub(node->npages, &node->pq->n_locked);
116 struct sdma_mmu_node *node, int npages)
122 pages = kcalloc(npages, sizeo
65 unpin_vector_pages(struct mm_struct *mm, struct page **pages, unsigned int start, unsigned int npages) argument
114 pin_system_pages(struct user_sdma_request *req, uintptr_t start_address, size_t length, struct sdma_mmu_node *node, int npages) argument
[all...]
H A Duser_exp_rcv.h23 unsigned int npages; member in struct:tid_user_buf
38 unsigned int npages; member in struct:tid_rb_node
39 struct page *pages[] __counted_by(npages);
/linux-master/drivers/gpu/drm/i915/gem/selftests/
H A Dmock_dmabuf.h13 int npages; member in struct:mock_dmabuf
H A Dmock_dmabuf.c22 err = sg_alloc_table(st, mock->npages, GFP_KERNEL);
27 for (i = 0; i < mock->npages; i++) {
59 for (i = 0; i < mock->npages; i++)
70 vaddr = vm_map_ram(mock->pages, mock->npages, 0);
82 vm_unmap_ram(map->vaddr, mock->npages);
99 static struct dma_buf *mock_dmabuf(int npages) argument
106 mock = kmalloc(sizeof(*mock) + npages * sizeof(struct page *),
111 mock->npages = npages;
112 for (i = 0; i < npages;
[all...]
H A Dhuge_gem_object.c32 unsigned int npages; /* restricted by sg_alloc_table */ local
37 if (overflows_type(obj->base.size / PAGE_SIZE, npages))
40 npages = obj->base.size / PAGE_SIZE;
45 if (sg_alloc_table(pages, npages, GFP)) {
63 if (nreal < npages) {
/linux-master/drivers/gpu/drm/i915/selftests/
H A Dscatterlist.c53 unsigned int npages = npages_fn(n, pt->st.nents, rnd); local
61 if (sg->length != npages * PAGE_SIZE) {
63 __func__, who, npages * PAGE_SIZE, sg->length);
70 pfn += npages;
209 unsigned long npages)
211 return first + npages == last;
242 unsigned long npages = npages_fn(n, count, rnd); local
246 pfn_to_page(pfn + npages),
247 npages)) {
254 sg_set_page(sg, pfn_to_page(pfn), npages * PAGE_SIZ
207 page_contiguous(struct page *first, struct page *last, unsigned long npages) argument
292 const npages_fn_t *npages; local
334 const npages_fn_t *npages; local
[all...]
/linux-master/tools/testing/selftests/mm/
H A Dhmm-tests.c181 unsigned long npages)
189 cmd.npages = npages;
270 unsigned long npages)
272 return hmm_dmirror_cmd(fd, HMM_DMIRROR_MIGRATE_TO_DEV, buffer, npages);
277 unsigned long npages)
279 return hmm_dmirror_cmd(fd, HMM_DMIRROR_MIGRATE_TO_SYS, buffer, npages);
295 unsigned long npages; local
302 npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
303 ASSERT_NE(npages,
178 hmm_dmirror_cmd(int fd, unsigned long request, struct hmm_buffer *buffer, unsigned long npages) argument
268 hmm_migrate_sys_to_dev(int fd, struct hmm_buffer *buffer, unsigned long npages) argument
275 hmm_migrate_dev_to_sys(int fd, struct hmm_buffer *buffer, unsigned long npages) argument
359 unsigned long npages; local
418 unsigned long npages; local
466 unsigned long npages; local
532 unsigned long npages; local
610 unsigned long npages; local
687 unsigned long npages; local
791 unsigned long npages; local
848 unsigned long npages; local
903 unsigned long npages; local
961 unsigned long npages; local
1009 unsigned long npages; local
1065 unsigned long npages; local
1117 unsigned long npages; local
1152 unsigned long npages; local
1244 unsigned long npages; local
1303 unsigned long npages; local
1365 unsigned long npages; local
1427 unsigned long npages; local
1469 unsigned long npages; local
1571 unsigned long npages; local
1646 unsigned long npages; local
1721 unsigned long npages; local
1775 unsigned long npages; local
1828 unsigned long npages; local
1873 gup_test_exec(int gup_fd, unsigned long addr, int cmd, int npages, int size, int flags) argument
1902 unsigned long npages; local
1990 unsigned long npages; local
[all...]
/linux-master/drivers/gpu/drm/etnaviv/
H A Detnaviv_gem_prime.c20 int npages = obj->size >> PAGE_SHIFT; local
25 return drm_prime_pages_to_sg(obj->dev, etnaviv_obj->pages, npages);
117 int ret, npages; local
126 npages = size / PAGE_SIZE;
129 etnaviv_obj->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
135 ret = drm_prime_sg_to_page_array(sgt, etnaviv_obj->pages, npages);
/linux-master/lib/
H A Dkunit_iov_iter.c49 size_t npages)
55 pages = kunit_kcalloc(test, npages, sizeof(struct page *), GFP_KERNEL);
59 got = alloc_pages_bulk_array(GFP_KERNEL, npages, pages);
60 if (got != npages) {
62 KUNIT_ASSERT_EQ(test, got, npages);
65 buffer = vmap(pages, npages, VM_MAP | VM_MAP_PUT_PAGES, PAGE_KERNEL);
105 size_t bufsize, npages, size, copied; local
109 npages = bufsize / PAGE_SIZE;
111 scratch = iov_kunit_create_buffer(test, &spages, npages);
115 buffer = iov_kunit_create_buffer(test, &bpages, npages);
47 iov_kunit_create_buffer(struct kunit *test, struct page ***ppages, size_t npages) argument
155 size_t bufsize, npages, size, copied; local
216 iov_kunit_load_bvec(struct kunit *test, struct iov_iter *iter, int dir, struct bio_vec *bvec, unsigned int bvmax, struct page **pages, size_t npages, size_t bufsize, const struct bvec_test_range *pr) argument
264 size_t bufsize, npages, size, copied; local
318 size_t bufsize, npages, size, copied; local
373 iov_kunit_load_xarray(struct kunit *test, struct iov_iter *iter, int dir, struct xarray *xarray, struct page **pages, size_t npages) argument
411 size_t bufsize, npages, size, copied; local
469 size_t bufsize, npages, size, copied; local
533 size_t bufsize, size = 0, npages; local
612 size_t bufsize, size = 0, npages; local
690 size_t bufsize, size = 0, npages; local
[all...]
/linux-master/arch/sparc/include/asm/
H A Diommu-common.h44 unsigned long npages,
50 u64 dma_addr, unsigned long npages,
/linux-master/arch/powerpc/include/asm/
H A Dultravisor.h34 static inline int uv_share_page(u64 pfn, u64 npages) argument
36 return ucall_norets(UV_SHARE_PAGE, pfn, npages);
39 static inline int uv_unshare_page(u64 pfn, u64 npages) argument
41 return ucall_norets(UV_UNSHARE_PAGE, pfn, npages);
/linux-master/drivers/iommu/iommufd/
H A Diova_bitmap.c45 unsigned long npages; member in struct:iova_bitmap_map
167 unsigned long npages; local
177 npages = DIV_ROUND_UP((bitmap->mapped_total_index -
191 npages = min(npages + !!offset_in_page(addr),
194 ret = pin_user_pages_fast((unsigned long)addr, npages,
199 mapped->npages = (unsigned long)ret;
213 * Unpins the bitmap user pages and clears @npages
221 if (mapped->npages) {
222 unpin_user_pages(mapped->pages, mapped->npages);
[all...]
/linux-master/drivers/gpu/drm/xe/
H A Dxe_hmm.c34 u64 i, npages; local
36 npages = xe_npages_in_range(range->start, range->end);
37 for (i = 0; i < npages; i++) {
85 u64 i, npages; local
88 npages = xe_npages_in_range(range->start, range->end);
89 pages = kvmalloc_array(npages, sizeof(*pages), GFP_KERNEL);
93 for (i = 0; i < npages; i++) {
98 ret = sg_alloc_table_from_pages_segment(st, pages, npages, 0, npages << PAGE_SHIFT,
178 u64 npages; local
[all...]
/linux-master/drivers/net/ethernet/mellanox/mlx5/core/
H A Dpagealloc.c52 s32 npages; member in struct:mlx5_pages_req
199 s32 *npages, int boot)
215 *npages = MLX5_GET(query_pages_out, out, num_pages);
353 static int give_pages(struct mlx5_core_dev *dev, u16 func_id, int npages, argument
366 inlen += npages * MLX5_FLD_SZ_BYTES(manage_pages_in, pas[0]);
374 for (i = 0; i < npages; i++) {
381 dev->priv.fw_pages_alloc_failed += (npages - i);
393 MLX5_SET(manage_pages_in, in, input_num_entries, npages);
407 mlx5_core_warn(dev, "func_id 0x%x, npages %d, err %d\n",
408 func_id, npages, er
198 mlx5_cmd_query_pages(struct mlx5_core_dev *dev, u16 *func_id, s32 *npages, int boot) argument
440 int npages = 0; local
464 fwp_fill_manage_pages_out(struct fw_page *fwp, u32 *out, u32 index, u32 npages) argument
490 u32 npages; local
517 reclaim_pages(struct mlx5_core_dev *dev, u16 func_id, int npages, int *nclaimed, bool event, bool ec_function) argument
621 s32 npages; local
653 s32 npages; local
[all...]
/linux-master/drivers/infiniband/hw/mthca/
H A Dmthca_allocator.c195 int npages, shift; local
202 npages = 1;
214 npages *= 2;
217 dma_list = kmalloc_array(npages, sizeof(*dma_list),
222 for (i = 0; i < npages; ++i)
226 npages = (size + PAGE_SIZE - 1) / PAGE_SIZE;
229 dma_list = kmalloc_array(npages, sizeof(*dma_list),
234 buf->page_list = kmalloc_array(npages,
240 for (i = 0; i < npages; ++i)
243 for (i = 0; i < npages;
[all...]
/linux-master/drivers/gpu/drm/i915/gem/
H A Di915_gem_internal.c38 unsigned int npages; /* restricted by sg_alloc_table */ local
43 if (overflows_type(obj->base.size >> PAGE_SHIFT, npages))
46 npages = obj->base.size >> PAGE_SHIFT;
62 if (sg_alloc_table(st, npages, GFP_KERNEL)) {
71 int order = min(fls(npages) - 1, max_order);
89 npages -= 1 << order;
90 if (!npages) {
/linux-master/arch/sparc/kernel/
H A Diommu.c158 unsigned long npages)
162 entry = iommu_tbl_range_alloc(dev, &iommu->tbl, npages, NULL,
204 int npages, nid; local
233 npages = size >> IO_PAGE_SHIFT;
235 while (npages--) {
251 unsigned long order, npages; local
253 npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT;
256 iommu_tbl_range_free(&iommu->tbl, dvma, npages, IOMMU_ERROR_CODE);
271 unsigned long flags, npages, oaddr; local
283 npages
156 alloc_npages(struct device *dev, struct iommu *iommu, unsigned long npages) argument
320 strbuf_flush(struct strbuf *strbuf, struct iommu *iommu, u32 vaddr, unsigned long ctx, unsigned long npages, enum dma_data_direction direction) argument
393 unsigned long flags, npages, ctx, i; local
478 unsigned long paddr, npages, entry, out_entry = 0, slen; local
557 unsigned long vaddr, npages, entry, j; local
627 unsigned long npages, entry; local
663 unsigned long flags, ctx, npages; local
701 unsigned long flags, ctx, npages, i; local
[all...]
H A Dpci_sun4v.c61 unsigned long npages; /* Number of pages in list. */ member in struct:iommu_batch
75 p->npages = 0;
92 unsigned long npages = p->npages; local
101 while (npages != 0) {
105 npages,
113 npages, prot, __pa(pglist),
118 index_count = HV_PCI_IOTSB_INDEX_COUNT(npages, entry),
136 npages -= num;
141 p->npages
186 unsigned long flags, order, first_page, npages, n; local
296 dma_4v_iommu_demap(struct device *dev, unsigned long devhandle, dma_addr_t dvma, unsigned long iotsb_num, unsigned long entry, unsigned long npages) argument
330 unsigned long order, npages, entry; local
364 unsigned long flags, npages, oaddr; local
437 unsigned long npages; local
524 unsigned long paddr, npages, entry, out_entry = 0, slen; local
606 unsigned long vaddr, npages; local
649 unsigned long npages; local
[all...]
/linux-master/drivers/infiniband/core/
H A Dib_core_uverbs.c140 ibdev_dbg(ucontext->device, "mmap: pgoff[%#lx] npages[%#zx] returned\n",
141 pgoff, entry->npages);
171 if (entry->npages * PAGE_SIZE != vma->vm_end - vma->vm_start) {
191 for (i = 0; i < entry->npages; i++)
195 ibdev_dbg(ucontext->device, "mmap: pgoff[%#lx] npages[%#zx] removed\n",
196 entry->start_pgoff, entry->npages);
269 u32 xa_first, xa_last, npages; local
290 npages = (u32)DIV_ROUND_UP(length, PAGE_SIZE);
291 entry->npages = npages;
[all...]
H A Dumem.c152 unsigned long npages; local
191 npages = ib_umem_num_pages(umem);
192 if (npages == 0 || npages > UINT_MAX) {
199 new_pinned = atomic64_add_return(npages, &mm->pinned_vm);
201 atomic64_sub(npages, &mm->pinned_vm);
211 while (npages) {
214 min_t(unsigned long, npages,
224 npages -= pinned;
228 npages, GFP_KERNE
[all...]
/linux-master/arch/x86/include/asm/
H A Dsev.h218 unsigned long npages);
220 unsigned long npages);
221 void snp_set_memory_shared(unsigned long vaddr, unsigned long npages);
222 void snp_set_memory_private(unsigned long vaddr, unsigned long npages);
243 early_snp_set_memory_private(unsigned long vaddr, unsigned long paddr, unsigned long npages) { } argument
245 early_snp_set_memory_shared(unsigned long vaddr, unsigned long paddr, unsigned long npages) { } argument
246 static inline void snp_set_memory_shared(unsigned long vaddr, unsigned long npages) { } argument
247 static inline void snp_set_memory_private(unsigned long vaddr, unsigned long npages) { } argument
270 void snp_leak_pages(u64 pfn, unsigned int npages);
284 static inline void snp_leak_pages(u64 pfn, unsigned int npages) {} argument
[all...]
/linux-master/drivers/gpu/drm/amd/amdkfd/
H A Dkfd_migrate.c49 svm_migrate_gart_map(struct amdgpu_ring *ring, uint64_t npages, argument
65 num_bytes = npages * 8;
93 amdgpu_gart_map(adev, 0, npages, addr, pte_flags, cpu_addr);
106 * @npages: number of pages to copy
111 * vram address uses direct mapping of vram pages, which must have npages
125 uint64_t *vram, uint64_t npages,
138 while (npages) {
139 size = min(GTT_MAX_PAGES, npages);
164 npages -= size;
165 if (npages) {
124 svm_migrate_copy_memory_gart(struct amdgpu_device *adev, dma_addr_t *sys, uint64_t *vram, uint64_t npages, enum MIGRATION_COPY_DIR direction, struct dma_fence **mfence) argument
281 uint64_t npages = migrate->cpages; local
386 uint64_t npages = (end - start) >> PAGE_SHIFT; local
576 svm_migrate_copy_to_ram(struct amdgpu_device *adev, struct svm_range *prange, struct migrate_vma *migrate, struct dma_fence **mfence, dma_addr_t *scratch, uint64_t npages) argument
685 uint64_t npages = (end - start) >> PAGE_SHIFT; local
[all...]

Completed in 578 milliseconds

1234567891011>>