/linux-master/mm/ |
H A D | slub.c | 52 * Lock order: 143 * around the slab_lock operation, in order to make the slab allocator safe 298 * disabled when slab_debug=O is used and a cache's min order increases with 586 static inline unsigned int order_objects(unsigned int order, unsigned int size) argument 588 return ((unsigned int)PAGE_SIZE << order) / size; 591 static inline struct kmem_cache_order_objects oo_make(unsigned int order, argument 595 (order << OO_SHIFT) + order_objects(order, size) 620 * slabs on the per cpu partial list, in order to limit excessive 1666 * order woul 2178 unsigned int order = oo_order(oo); local 2302 account_slab(struct slab *slab, int order, struct kmem_cache *s, gfp_t gfp) argument 2309 PAGE_SIZE << order); local 2312 unaccount_slab(struct slab *slab, int order, struct kmem_cache *s) argument 2403 int order = folio_order(folio); local 3911 unsigned int order = get_order(size); local 4356 unsigned int order = folio_order(folio); local 4706 unsigned int order; local 4724 unsigned int order; local 4958 unsigned int order; local 5856 int order; local 6154 SLAB_ATTR_RO(order); variable [all...] |
H A D | hugetlb.c | 59 static bool hugetlb_cma_folio(struct folio *folio, unsigned int order) argument 62 1 << order); 65 static bool hugetlb_cma_folio(struct folio *folio, unsigned int order) argument 481 * reference. In order to ensure that one file_region must hold 1513 unsigned int order, bool demote) 1516 int nr_pages = 1 << order; 1536 unsigned int order) 1538 __destroy_compound_gigantic_folio(folio, order, true); 1543 unsigned int order) 1545 __destroy_compound_gigantic_folio(folio, order, fals 1512 __destroy_compound_gigantic_folio(struct folio *folio, unsigned int order, bool demote) argument 1535 destroy_compound_hugetlb_folio_for_demote(struct folio *folio, unsigned int order) argument 1542 destroy_compound_gigantic_folio(struct folio *folio, unsigned int order) argument 1548 free_gigantic_folio(struct folio *folio, unsigned int order) argument 1616 free_gigantic_folio(struct folio *folio, unsigned int order) argument 1618 destroy_compound_gigantic_folio(struct folio *folio, unsigned int order) argument 2073 __prep_compound_gigantic_folio(struct folio *folio, unsigned int order, bool demote) argument 2150 prep_compound_gigantic_folio(struct folio *folio, unsigned int order) argument 2156 prep_compound_gigantic_folio_for_demote(struct folio *folio, unsigned int order) argument 2186 int order = huge_page_order(h); local 2489 unsigned int order; local 4650 hugetlb_add_hstate(unsigned int order) argument 7787 hugetlb_cma_reserve(int order) argument [all...] |
H A D | shmem.c | 191 * pages are allocated, in order to allow large sparse files. 1566 pgoff_t index, unsigned int order, pgoff_t *ilx); 2370 * but this interface is independent of which page order is used, so 2372 * by page order, as in shmem_get_pgoff_policy() and get_vma_policy()). 2380 pgoff_t index, unsigned int order, pgoff_t *ilx) 2385 *ilx = info->vfs_inode.i_ino + (index >> order); 2392 pgoff_t index, unsigned int order, pgoff_t *ilx) 2379 shmem_get_pgoff_policy(struct shmem_inode_info *info, pgoff_t index, unsigned int order, pgoff_t *ilx) argument 2391 shmem_get_pgoff_policy(struct shmem_inode_info *info, pgoff_t index, unsigned int order, pgoff_t *ilx) argument
|
H A D | page_owner.c | 25 unsigned short order; member in struct:page_owner 63 * sure to signal it in order to avoid recursion. 239 unsigned short order, 247 for (i = 0; i < (1 << order); i++) { 250 page_owner->order = order; 266 unsigned short order, 273 for (i = 0; i < (1 << order); i++) { 287 void __reset_page_owner(struct page *page, unsigned short order) argument 303 __update_page_owner_free_handle(page_ext, handle, order, curren 237 __update_page_owner_handle(struct page_ext *page_ext, depot_stack_handle_t handle, unsigned short order, gfp_t gfp_mask, short last_migrate_reason, u64 ts_nsec, pid_t pid, pid_t tgid, char *comm) argument 264 __update_page_owner_free_handle(struct page_ext *page_ext, depot_stack_handle_t handle, unsigned short order, pid_t pid, pid_t tgid, u64 free_ts_nsec) argument 318 __set_page_owner(struct page *page, unsigned short order, gfp_t gfp_mask) argument 802 unsigned long order = buddy_order_unsafe(page); local [all...] |
H A D | internal.h | 218 unsigned int order); 357 * This function returns the order of a free page in the buddy system. In 359 * page from being allocated in parallel and returning garbage as the order. 388 * (c) a page and its buddy have the same order && 394 * For recording page's order, we use page_private(page). 397 unsigned int order) 402 if (buddy_order(buddy) != order) 421 * 1) Any buddy B1 will have an order O twin B2 which satisfies 424 * For example, if the starting buddy (buddy2) is #8 its order 428 * 2) Any buddy B will have an order 396 page_is_buddy(struct page *page, struct page *buddy, unsigned int order) argument 435 __find_buddy_pfn(unsigned long page_pfn, unsigned int order) argument 454 find_buddy_page_pfn(struct page *page, unsigned long pfn, unsigned int order, unsigned long *buddy_pfn) argument 499 folio_set_order(struct folio *folio, unsigned int order) argument 520 prep_compound_head(struct page *page, unsigned int order) argument 598 int order; /* order a direct compactor needs */ member in struct:compact_control 936 node_reclaim(struct pglist_data *pgdat, gfp_t mask, unsigned int order) argument [all...] |
H A D | huge_memory.c | 51 * By default, transparent hugepage support is disabled in order to avoid 130 int order = highest_order(orders); local 134 addr = vma->vm_end - (PAGE_SIZE << order); 135 if (thp_vma_suitable_order(vma, addr, order)) 137 order = next_order(&orders, order); 451 int order; member in struct:thpsize 459 int order = to_thpsize(kobj)->order; local 462 if (test_bit(order, 478 int order = to_thpsize(kobj)->order; local 528 thpsize_create(int order, struct kobject *parent) argument 565 int order; local 2899 int order = folio_order(folio); local [all...] |
/linux-master/kernel/dma/ |
H A D | swiotlb.c | 437 unsigned int order, area_order; local 464 order = get_order(nslabs << IO_TLB_SHIFT); 465 nslabs = SLABS_PER_PAGE << order; 467 while ((SLABS_PER_PAGE << order) > IO_TLB_MIN_SLABS) { 469 order); 472 order--; 473 nslabs = SLABS_PER_PAGE << order; 483 free_pages((unsigned long)vstart, order); 494 (PAGE_SIZE << order) >> 20); 521 free_pages((unsigned long)vstart, order); 576 unsigned int order = get_order(bytes); local [all...] |
/linux-master/ |
H A D | Makefile | 1096 export MODORDER := $(extmod_prefix)modules.order 1446 # the built-in objects during the descend as well, in order to 1733 # We `grep` afterwards in order to remove the directory entry itself. 1939 -o -name '*.symtypes' -o -name 'modules.order' \
|
/linux-master/net/core/ |
H A D | skbuff.c | 229 /* specialized page frag allocator using a single order 0 page 590 /* The following cast might truncate high-order bits of obj_size, this 1016 * in order to preserve any existing bits, such as bit 0 for the 1139 * bit is only set on the head though, so in order to avoid races 1951 int i, order, psize, new_frags; local 1960 /* We might have to allocate high order pages, so compute what minimum 1961 * page order is needed. 1963 order = 0; 1964 while ((PAGE_SIZE << order) * MAX_SKB_FRAGS < __skb_pagelen(skb)) 1965 order 6499 alloc_skb_with_frags(unsigned long header_len, unsigned long data_len, int order, int *errcode, gfp_t gfp_mask) argument [all...] |
/linux-master/lib/ |
H A D | scatterlist.c | 158 * kmalloc (tracked by kmemleak), in order to for that last 603 * @order: Second argument for alloc_pages() 612 unsigned int order, bool chainable, 620 nent = round_up(length, PAGE_SIZE << order) >> (PAGE_SHIFT + order); 622 if (length > (nent << (PAGE_SHIFT + order))) 639 elem_len = min_t(u64, length, PAGE_SIZE << order); 640 page = alloc_pages(gfp, order); 642 sgl_free_order(sgl, order); 676 * @order 611 sgl_alloc_order(unsigned long long length, unsigned int order, bool chainable, gfp_t gfp, unsigned int *nent_p) argument 685 sgl_free_n_order(struct scatterlist *sgl, int nents, int order) argument 707 sgl_free_order(struct scatterlist *sgl, int order) argument [all...] |
/linux-master/scripts/ |
H A D | Makefile.build | 73 subdir-modorder := $(sort $(filter %/modules.order, $(obj-m))) 90 targets-for-modules += $(obj)/modules.order 191 # The empty.o file is created in the make process in order to determine 320 # or a file that it includes, in order to get versioned symbols. We build a 391 $(subdir-modorder): $(obj)/%/modules.order: $(obj)/% ; 409 # Rule to create modules.order and dtbs-list 419 $(obj)/modules.order: $(obj-m) FORCE 487 need-modorder=$(if $(filter $@/modules.order, $(subdir-modorder)),1) \
|
/linux-master/drivers/irqchip/ |
H A D | irq-gic-v3-its.c | 81 u32 order; member in struct:its_baser 141 /* Convert page order to size in bytes */ 1335 * receive all VMOVP commands in the same order. The only way 2329 u64 cache, u64 shr, u32 order, bool indirect) 2340 alloc_pages = (PAGE_ORDER_TO_SIZE(order) / psz); 2346 order = get_order(GITS_BASER_PAGES_MAX * psz); 2349 page = alloc_pages_node(its->numa_node, GFP_KERNEL | __GFP_ZERO, order); 2362 free_pages((unsigned long)base, order); 2394 gic_flush_dcache_to_poc(base, PAGE_ORDER_TO_SIZE(order)); 2418 free_pages((unsigned long)base, order); 2328 its_setup_baser(struct its_node *its, struct its_baser *baser, u64 cache, u64 shr, u32 order, bool indirect) argument 2437 its_parse_indirect_baser(struct its_node *its, struct its_baser *baser, u32 *order, u32 ids) argument 2632 u32 order; local [all...] |
/linux-master/arch/riscv/include/asm/ |
H A D | pgtable.h | 300 static inline pte_t pte_mknapot(pte_t pte, unsigned int order) argument 302 int pos = order - 1 + _PAGE_PFN_SHIFT;
|
/linux-master/tools/testing/selftests/mm/ |
H A D | split_huge_page_test.c | 362 void split_thp_in_pagecache_to_order(size_t fd_size, int order, const char *fs_loc) argument 380 write_debugfs(PID_FMT, getpid(), (uint64_t)addr, (uint64_t)addr + fd_size, order); 400 ksft_exit_fail_msg("Split PMD-mapped pagecache folio to order %d failed\n", order); 401 ksft_test_result_pass("Split PMD-mapped pagecache folio to order %d passed\n", order);
|
/linux-master/include/linux/ |
H A D | mm.h | 589 vm_fault_t (*huge_fault)(struct vm_fault *vmf, unsigned int order); 882 * Use only when the order of set/clear operations is unimportant, otherwise 1086 * set before the order is initialised, or this may be a tail page. 1099 * folio_order - The allocation order of a folio. 1102 * A folio is composed of 2^order pages. See get_order() for the definition 1103 * of order. 1105 * Return: The order of the folio. 1315 void split_page(struct page *page, unsigned int order); 1387 * Pages are allocated by the slab allocator in order to provide memory 1591 * variants) must be used in order t 2862 pagetable_alloc(gfp_t gfp, unsigned int order) argument 3792 set_page_guard(struct zone *zone, struct page *page, unsigned int order, int migratetype) argument 3802 clear_page_guard(struct zone *zone, struct page *page, unsigned int order, int migratetype) argument 3816 set_page_guard(struct zone *zone, struct page *page, unsigned int order, int migratetype) argument 3818 clear_page_guard(struct zone *zone, struct page *page, unsigned int order, int migratetype) argument [all...] |
/linux-master/drivers/net/ethernet/wangxun/libwx/ |
H A D | wx_lib.c | 595 * order to populate the hash, checksum, protocol, and 1524 * in order to meet this minimum size requirement. 2400 .order = 0, 2707 /* Setup new Tx resources and free the old Tx resources in that order.
|
/linux-master/arch/x86/kvm/svm/ |
H A D | svm.c | 868 unsigned int order = get_order(MSRPM_SIZE); local 869 struct page *pages = alloc_pages(GFP_KERNEL_ACCOUNT, order); 876 memset(msrpm, 0xff, PAGE_SIZE * (1 << order)); 4334 * Intercept VMLOAD if the vCPU mode is Intel in order to emulate that 5171 unsigned int order = get_order(IOPM_SIZE); local 5183 iopm_pages = alloc_pages(GFP_KERNEL, order); 5189 memset(iopm_va, 0xff, PAGE_SIZE * (1 << order));
|
/linux-master/arch/x86/kvm/mmu/ |
H A D | mmu.c | 797 * The lower order bits are used to refcount other cases where a hugepage is 1716 * aggregate version in order to make the slab shrinker 2148 * order to read guest page tables. Direct shadow pages are never 2202 * in order to rebuild it. 3387 * order to eliminate unnecessary PML logging. See comments in 4295 static inline u8 kvm_max_level_for_order(int order) argument 4299 KVM_MMU_WARN_ON(order != KVM_HPAGE_GFN_SHIFT(PG_LEVEL_1G) && 4300 order != KVM_HPAGE_GFN_SHIFT(PG_LEVEL_2M) && 4301 order != KVM_HPAGE_GFN_SHIFT(PG_LEVEL_4K)); 4303 if (order > [all...] |
/linux-master/arch/x86/events/intel/ |
H A D | lbr.c | 936 * The enabled order may be different from the counter order. 937 * Update the lbr_counters with the enabled order. 942 int i, j, pos = 0, order[X86_PMC_IDX_MAX]; local 948 order[pos++] = leader->hw.idx; 953 order[pos++] = sibling->hw.idx; 962 cnt = (src >> (order[j] * LBR_INFO_BR_CNTR_BITS)) & LBR_INFO_BR_CNTR_MASK;
|
/linux-master/arch/powerpc/kernel/ |
H A D | iommu.c | 916 unsigned int order; local 922 order = get_order(size); 929 if (order >= IOMAP_MAX_ORDER) { 939 page = alloc_pages_node(node, flag, order); 952 free_pages((unsigned long)ret, order);
|
/linux-master/drivers/net/ethernet/stmicro/stmmac/ |
H A D | stmmac_main.c | 2003 * reception, for example, it pre-allocated the RX socket buffer in order to 2024 pp_params.order = ilog2(num_pages); 2109 * reception, for example, it pre-allocated the RX socket buffer in order to 2186 * reception, for example, it pre-allocated the RX socket buffer in order to 2348 * order to program the tx/rx DMA thresholds or Store-And-Forward mode. 2822 * runtime in order to program the tx/rx DMA thresholds or Store-And-Forward 5818 * in order to transmit a new packet.
|
/linux-master/drivers/net/ethernet/mellanox/mlx5/core/ |
H A D | en_main.c | 900 pp_params.order = 0; 5018 /* The supported periods are organized in ascending order */
|
/linux-master/drivers/gpu/drm/ttm/ |
H A D | ttm_pool.c | 54 * @vaddr: original vaddr return for the mapping and order in the lower bits 79 /* Allocate pages of size 1 << order with the given gfp_flags */ 81 unsigned int order) 88 /* Don't set the __GFP_COMP flag for higher order allocations. 92 if (order) 97 p = alloc_pages_node(pool->nid, gfp_flags, order); 99 p->private = order; 107 if (order) 110 vaddr = dma_alloc_attrs(pool->dev, (1ULL << order) * PAGE_SIZE, 123 dma->vaddr = (unsigned long)vaddr | order; 80 ttm_pool_alloc_page(struct ttm_pool *pool, gfp_t gfp_flags, unsigned int order) argument 133 ttm_pool_free_page(struct ttm_pool *pool, enum ttm_caching caching, unsigned int order, struct page *p) argument 186 ttm_pool_map(struct ttm_pool *pool, unsigned int order, struct page *p, dma_addr_t **dma_addr) argument 259 ttm_pool_type_init(struct ttm_pool_type *pt, struct ttm_pool *pool, enum ttm_caching caching, unsigned int order) argument 287 ttm_pool_select_type(struct ttm_pool *pool, enum ttm_caching caching, unsigned int order) argument 358 ttm_pool_page_allocated(struct ttm_pool *pool, unsigned int order, struct page *p, dma_addr_t **dma_addr, unsigned long *num_pages, struct page ***pages) argument 397 unsigned int order; local 438 unsigned int order; local [all...] |
/linux-master/arch/arm64/mm/ |
H A D | hugetlbpage.c | 41 int order; local 44 order = PUD_SHIFT - PAGE_SHIFT; 46 order = CONT_PMD_SHIFT - PAGE_SHIFT; 48 hugetlb_cma_reserve(order);
|
/linux-master/fs/ceph/ |
H A D | mds_client.c | 36 * in order to balance load. 2543 int order; local 2551 order = get_order(size * num_entries); 2552 while (order >= 0) { 2556 order); 2559 order--; 2564 num_entries = (PAGE_SIZE << order) / size; 2567 rinfo->dir_buf_size = PAGE_SIZE << order; 4724 * If an MDS fails and recovers, clients need to reconnect in order to
|