Searched refs:max_pages (Results 26 - 50 of 62) sorted by relevance

123

/linux-master/net/rds/
H A Dib_frmr.c79 pool->max_pages);
243 if (frmr->dma_npages > ibmr->pool->max_pages) {
/linux-master/fs/ceph/
H A Daddr.c170 unsigned long max_pages = inode->i_sb->s_bdi->ra_pages; local
179 max_pages = 0;
181 max_pages = priv->file_ra_pages;
186 if (!max_pages)
189 max_len = max_pages << PAGE_SHIFT;
1018 unsigned i, nr_folios, max_pages, locked_pages = 0; local
1025 max_pages = wsize >> PAGE_SHIFT;
1033 for (i = 0; i < nr_folios && locked_pages < max_pages; i++) {
1119 max_pages = calc_pages_for(0, (u64)len);
1120 pages = kmalloc_array(max_pages,
[all...]
/linux-master/drivers/gpu/drm/i915/selftests/
H A Di915_vma.c577 const unsigned int max_pages = 64; local
584 obj = i915_gem_object_create_internal(vm->i915, max_pages * PAGE_SIZE);
601 GEM_BUG_ON(max_offset > max_pages);
602 max_offset = max_pages - max_offset;
H A Di915_gem_gtt.c416 const unsigned long max_pages = local
418 const unsigned long max_step = max(int_sqrt(max_pages), 2UL);
431 for (npages = 1; npages <= max_pages; npages *= prime) {
638 const unsigned long max_pages = local
652 for_each_prime_number_from(size, 1, max_pages) {
H A Dintel_memory_region.c60 unsigned long max_pages; local
66 max_pages = div64_u64(total, page_size);
69 for_each_prime_number_from(page_num, 1, max_pages) {
/linux-master/drivers/infiniband/core/
H A Drw.c63 u32 max_pages; local
66 max_pages = dev->attrs.max_pi_fast_reg_page_list_len;
68 max_pages = dev->attrs.max_fast_reg_page_list_len;
71 return min_t(u32, max_pages, 256);
/linux-master/include/uapi/linux/
H A Dfs.h375 * @max_pages: Optional limit for number of returned pages (0 = disabled)
389 __u64 max_pages; member in struct:pm_scan_arg
H A Dfuse.h161 * - add FUSE_MAX_PAGES, add max_pages to init_out
398 * FUSE_MAX_PAGES: init_out.max_pages contains the max number of req pages
892 uint16_t max_pages; member in struct:fuse_init_out
/linux-master/drivers/base/
H A Dmemory.c1103 * @max_pages: The maximum number of pages we'll have in this static memory
1112 * memory groups can be registered, or if max_pages is invalid (0). Otherwise,
1115 int memory_group_register_static(int nid, unsigned long max_pages) argument
1120 .max_pages = max_pages,
1124 if (!max_pages)
/linux-master/drivers/infiniband/sw/rxe/
H A Drxe_loc.h64 int rxe_mr_init_fast(int max_pages, struct rxe_mr *mr);
H A Drxe_mr.c188 int rxe_mr_init_fast(int max_pages, struct rxe_mr *mr) argument
195 err = rxe_mr_alloc(mr, max_pages);
/linux-master/tools/include/uapi/linux/
H A Dfs.h375 * @max_pages: Optional limit for number of returned pages (0 = disabled)
389 __u64 max_pages; member in struct:pm_scan_arg
/linux-master/tools/testing/selftests/mm/
H A Dpagemap_ioctl.c44 int max_pages, long required_mask, long anyof_mask, long excluded_mask,
55 arg.max_pages = max_pages;
65 int max_pages, long required_mask, long anyof_mask, long excluded_mask,
77 arg.max_pages = max_pages;
351 /* 5. Repeated pattern of written and non-written pages max_pages */
369 "%s Repeated pattern of written and non-written pages max_pages\n",
480 ksft_test_result(total_pages == mem_size/(page_size*2), "%s Smaller max_pages\n", __func__);
1319 arg.max_pages
43 pagemap_ioctl(void *start, int len, void *vec, int vec_len, int flag, int max_pages, long required_mask, long anyof_mask, long excluded_mask, long return_mask) argument
64 pagemap_ioc(void *start, int len, void *vec, int vec_len, int flag, int max_pages, long required_mask, long anyof_mask, long excluded_mask, long return_mask, long *walk_end) argument
[all...]
H A Dvm_util.c42 arg.max_pages = 0;
/linux-master/drivers/iommu/iommufd/
H A Dpages.c315 static int __batch_init(struct pfn_batch *batch, size_t max_pages, void *backup, argument
319 size_t size = max_pages * elmsz;
332 static int batch_init(struct pfn_batch *batch, size_t max_pages) argument
334 return __batch_init(batch, max_pages, NULL, 0);
337 static void batch_init_backup(struct pfn_batch *batch, size_t max_pages, argument
340 __batch_init(batch, max_pages, backup, backup_len);
/linux-master/mm/
H A Dmemory_hotplug.c968 unsigned long online_pages = 0, max_pages, end_pfn; local
975 max_pages = group->s.max_pages;
982 max_pages = nr_pages;
984 max_pages = group->d.unit_pages;
1009 nr_pages = max_pages - online_pages;
/linux-master/drivers/md/
H A Ddm-writecache.c1835 unsigned int max_pages; local
1842 max_pages = e->wc_list_contiguous;
1844 bio = bio_alloc_bioset(wc->dev->bdev, max_pages, REQ_OP_WRITE,
1851 if (unlikely(max_pages > WB_LIST_INLINE))
1852 wb->wc_list = kmalloc_array(max_pages, sizeof(struct wc_entry *),
1856 if (likely(max_pages <= WB_LIST_INLINE) || unlikely(!wb->wc_list)) {
1858 max_pages = WB_LIST_INLINE;
1866 while (wbl->size && wb->wc_list_n < max_pages) {
/linux-master/kernel/module/
H A Dinternal.h79 unsigned int max_pages; member in struct:load_info
/linux-master/drivers/infiniband/hw/vmw_pvrdma/
H A Dpvrdma.h147 u32 max_pages; member in struct:pvrdma_user_mr
/linux-master/drivers/gpu/drm/amd/amdkfd/
H A Dkfd_svm.c1989 uint64_t max_pages; local
2006 max_pages = READ_ONCE(max_svm_range_pages);
2007 _pages = min_not_zero(max_pages, min_pages);
2008 } while (cmpxchg(&max_svm_range_pages, max_pages, _pages) != max_pages);
2013 uint64_t max_pages, struct list_head *insert_list,
2020 max_pages, start, last);
2023 l = min(last, ALIGN_DOWN(start + max_pages, max_pages) - 1);
2012 svm_range_split_new(struct svm_range_list *svms, uint64_t start, uint64_t last, uint64_t max_pages, struct list_head *insert_list, struct list_head *update_list) argument
/linux-master/drivers/net/ethernet/huawei/hinic/
H A Dhinic_hw_wq.c440 * @max_pages: maximum pages allowed
445 int max_pages)
451 if (num_q_pages > max_pages) {
444 alloc_wq_pages(struct hinic_wq *wq, struct hinic_hwif *hwif, int max_pages) argument
/linux-master/drivers/infiniband/hw/mlx5/
H A Dumr.c733 size_t max_pages = ib_umem_odp_num_pages(odp) - idx; local
735 pages_to_map = min_t(size_t, pages_to_map, max_pages);
/linux-master/fs/proc/
H A Dtask_mmu.c2078 total_pages > p->arg.max_pages) {
2079 size_t n_too_much = total_pages - p->arg.max_pages;
2370 if (!arg->max_pages)
2371 arg->max_pages = ULONG_MAX;
2487 if (p.arg.vec_len == 0 || p.found_pages == p.arg.max_pages)
/linux-master/fs/nfs/
H A Dpnfs.c1091 size_t max_pages = max_response_pages(server); local
1102 if (npages < max_pages)
1103 max_pages = npages;
1106 lgp->args.layout.pages = nfs4_alloc_pages(max_pages, gfp_flags);
1111 lgp->args.layout.pglen = max_pages * PAGE_SIZE;
1143 size_t max_pages = lgp->args.layout.pglen / PAGE_SIZE; local
1145 nfs4_free_pages(lgp->args.layout.pages, max_pages);
/linux-master/fs/smb/client/
H A Dfile.c2724 int max_pages,
2761 if (nr_pages > max_pages) {
2786 max_pages -= nr_pages;
2793 if (max_pages <= 0 || *_len >= max_len || *_count <= 0)
2899 int max_pages = INT_MAX; local
2903 max_pages = server->smbd_conn->max_frmr_depth;
2905 max_pages -= folio_nr_pages(folio);
2907 if (max_pages > 0)
2909 max_pages, max_len, &len);
2720 cifs_extend_writeback(struct address_space *mapping, struct xa_state *xas, long *_count, loff_t start, int max_pages, loff_t max_len, size_t *_len) argument

Completed in 346 milliseconds

123