/linux-master/drivers/gpu/drm/lib/ |
H A D | drm_random.c | 16 void drm_random_reorder(unsigned int *order, unsigned int count, argument 24 swap(order[i], order[j]); 31 unsigned int *order, i; local 33 order = kmalloc_array(count, sizeof(*order), GFP_KERNEL); 34 if (!order) 35 return order; 38 order[i] = i; 40 drm_random_reorder(order, coun [all...] |
/linux-master/arch/s390/mm/ |
H A D | page-states.c | 17 void arch_free_page(struct page *page, int order) argument 21 __set_page_unused(page_to_virt(page), 1UL << order); 24 void arch_alloc_page(struct page *page, int order) argument 29 __set_page_stable_dat(page_to_virt(page), 1UL << order); 31 __set_page_stable_nodat(page_to_virt(page), 1UL << order); local
|
/linux-master/arch/x86/include/asm/xen/ |
H A D | swiotlb-xen.h | 6 int xen_create_contiguous_region(phys_addr_t pstart, unsigned int order, 9 void xen_destroy_contiguous_region(phys_addr_t pstart, unsigned int order);
|
/linux-master/include/linux/ |
H A D | compaction.h | 61 * Number of free order-0 pages that should be available above given watermark 65 static inline unsigned long compact_gap(unsigned int order) argument 69 * free scanner may have up to 1 << order pages on its list and then 70 * try to split an (order - 1) free page. At that point, a gap of 71 * 1 << order might not be enough, so it's safer to require twice that 80 return 2UL << order; 85 extern unsigned int extfrag_for_order(struct zone *zone, unsigned int order); 86 extern int fragmentation_index(struct zone *zone, unsigned int order); 88 unsigned int order, unsigned int alloc_flags, 92 extern bool compaction_suitable(struct zone *zone, int order, 110 compaction_suitable(struct zone *zone, int order, int highest_zoneidx) argument 123 wakeup_kcompactd(pg_data_t *pgdat, int order, int highest_zoneidx) argument [all...] |
H A D | page_owner.h | 11 extern void __reset_page_owner(struct page *page, unsigned short order); 13 unsigned short order, gfp_t gfp_mask); 22 static inline void reset_page_owner(struct page *page, unsigned short order) argument 25 __reset_page_owner(page, order); 29 unsigned short order, gfp_t gfp_mask) 32 __set_page_owner(page, order, gfp_mask); 57 static inline void reset_page_owner(struct page *page, unsigned short order) argument 61 unsigned short order, gfp_t gfp_mask) 28 set_page_owner(struct page *page, unsigned short order, gfp_t gfp_mask) argument 60 set_page_owner(struct page *page, unsigned short order, gfp_t gfp_mask) argument
|
H A D | gfp.h | 64 * The zone fallback order is MOVABLE=>HIGHMEM=>NORMAL=>DMA32=>DMA. 172 static inline void arch_free_page(struct page *page, int order) { } argument 175 static inline void arch_alloc_page(struct page *page, int order) { } argument 178 struct page *__alloc_pages(gfp_t gfp, unsigned int order, int preferred_nid, 180 struct folio *__folio_alloc(gfp_t gfp, unsigned int order, int preferred_nid, 192 /* Bulk allocate order-0 pages */ 233 __alloc_pages_node(int nid, gfp_t gfp_mask, unsigned int order) argument 238 return __alloc_pages(gfp_mask, order, nid, NULL); 242 struct folio *__folio_alloc_node(gfp_t gfp, unsigned int order, int nid) argument 247 return __folio_alloc(gfp, order, ni 255 alloc_pages_node(int nid, gfp_t gfp_mask, unsigned int order) argument 272 alloc_pages(gfp_t gfp_mask, unsigned int order) argument 276 alloc_pages_mpol(gfp_t gfp, unsigned int order, struct mempolicy *mpol, pgoff_t ilx, int nid) argument 281 folio_alloc(gfp_t gfp, unsigned int order) argument [all...] |
/linux-master/scripts/atomic/ |
H A D | gen-atomic-fallback.sh | 8 #gen_template_fallback(template, meta, pfx, name, sfx, order, atomic, int, args...) 16 local order="$1"; shift 28 #gen_order_fallback(meta, pfx, name, sfx, order, atomic, int, args...) 35 local order="$1"; shift 37 local tmpl_order=${order#_} 39 gen_template_fallback "${tmpl}" "${meta}" "${pfx}" "${name}" "${sfx}" "${order}" "$@" 42 #gen_proto_fallback(meta, pfx, name, sfx, order, atomic, int, args...) 49 local order="$1"; shift 51 local tmpl="$(find_fallback_template "${pfx}" "${name}" "${sfx}" "${order}")" 52 gen_template_fallback "${tmpl}" "${meta}" "${pfx}" "${name}" "${sfx}" "${order}" " [all...] |
H A D | gen-atomic-instrumented.sh | 37 local order="$1"; shift 39 if [ "${order}" = "_release" ]; then 41 elif [ -z "${order}" ] && ! meta_in "$meta" "slv"; then 52 #gen_proto_order_variant(meta, pfx, name, sfx, order, atomic, int, arg...) 59 local order="$1"; shift 63 local atomicname="${atomic}_${pfx}${name}${sfx}${order}" 67 local checks="$(gen_params_checks "${meta}" "${order}" "$@")" 71 gen_kerneldoc "" "${meta}" "${pfx}" "${name}" "${sfx}" "${order}" "${atomic}" "${int}" "$@" 88 local order="$1"; shift 92 case "$order" i [all...] |
/linux-master/drivers/gpu/drm/i915/selftests/ |
H A D | i915_random.c | 70 void i915_random_reorder(unsigned int *order, unsigned int count, argument 73 i915_prandom_shuffle(order, sizeof(*order), count, state); 78 unsigned int *order, i; local 80 order = kmalloc_array(count, sizeof(*order), 82 if (!order) 83 return order; 86 order[i] = i; 88 i915_random_reorder(order, coun [all...] |
/linux-master/arch/arm64/kvm/hyp/nvhe/ |
H A D | page_alloc.c | 28 * __find_buddy_nocheck(pool, page 0, order 0) => page 1 29 * __find_buddy_nocheck(pool, page 0, order 1) => page 2 30 * __find_buddy_nocheck(pool, page 1, order 0) => page 0 31 * __find_buddy_nocheck(pool, page 2, order 0) => page 3 35 unsigned short order) 39 addr ^= (PAGE_SIZE << order); 54 unsigned short order) 56 struct hyp_page *buddy = __find_buddy_nocheck(pool, p, order); 58 if (!buddy || buddy->order != order || budd 33 __find_buddy_nocheck(struct hyp_pool *pool, struct hyp_page *p, unsigned short order) argument 52 __find_buddy_avail(struct hyp_pool *pool, struct hyp_page *p, unsigned short order) argument 97 unsigned short order = p->order; local 130 __hyp_extract_page(struct hyp_pool *pool, struct hyp_page *p, unsigned short order) argument 186 unsigned short order = p->order; local 198 hyp_alloc_pages(struct hyp_pool *pool, unsigned short order) argument [all...] |
/linux-master/drivers/gpu/drm/nouveau/nvkm/subdev/therm/ |
H A D | gk104.c | 34 const struct gk104_clkgate_engine_info *order = therm->clkgate_order; local 38 for (i = 0; order[i].type != NVKM_SUBDEV_NR; i++) { 39 if (!nvkm_device_subdev(dev, order[i].type, order[i].inst)) 42 nvkm_mask(dev, 0x20200 + order[i].offset, 0xff00, 0x4500); 50 for (i = 0; order[i].type != NVKM_SUBDEV_NR; i++) { 51 if (!nvkm_device_subdev(dev, order[i].type, order[i].inst)) 54 nvkm_mask(dev, 0x20200 + order[i].offset, 0x00ff, 0x0045); 63 const struct gk104_clkgate_engine_info *order local [all...] |
/linux-master/drivers/net/ethernet/mellanox/mlx5/core/steering/ |
H A D | dr_buddy.c | 29 /* Allocating max_order bitmaps, one for each order */ 39 /* In the beginning, we have only one order that is available for 75 unsigned int *order) 88 "ICM Buddy: failed finding free mem for order %d\n", 99 *order = order_iter; 106 * @order: Order of the buddy to update. 110 * It uses the data structures of the buddy system in order to find the first 111 * area of free place, starting from the current order till the maximum order 120 unsigned int order, 72 dr_buddy_find_free_seg(struct mlx5dr_icm_buddy_mem *buddy, unsigned int start_order, unsigned int *segment, unsigned int *order) argument 119 mlx5dr_buddy_alloc_mem(struct mlx5dr_icm_buddy_mem *buddy, unsigned int order, unsigned int *segment) argument 144 seg <<= order; local 150 mlx5dr_buddy_free_mem(struct mlx5dr_icm_buddy_mem *buddy, unsigned int seg, unsigned int order) argument 153 seg >>= order; local [all...] |
/linux-master/include/trace/events/ |
H A D | compaction.h | 168 int order, 172 TP_ARGS(order, gfp_mask, prio), 175 __field(int, order) 181 __entry->order = order; 186 TP_printk("order=%d gfp_mask=%s priority=%d", 187 __entry->order, 195 int order, 198 TP_ARGS(zone, order, ret), 203 __field(int, order) [all...] |
H A D | vmscan.h | 68 TP_PROTO(int nid, int zid, int order), 70 TP_ARGS(nid, zid, order), 75 __field( int, order ) 81 __entry->order = order; 84 TP_printk("nid=%d order=%d", 86 __entry->order) 91 TP_PROTO(int nid, int zid, int order, gfp_t gfp_flags), 93 TP_ARGS(nid, zid, order, gfp_flags), 98 __field( int, order ) [all...] |
/linux-master/mm/ |
H A D | page_reporting.h | 33 static inline void page_reporting_notify_free(unsigned int order) argument 40 if (order < page_reporting_order) 49 static inline void page_reporting_notify_free(unsigned int order) argument
|
H A D | shuffle.h | 28 static inline bool is_shuffle_order(int order) argument 32 return order >= SHUFFLE_ORDER; 48 static inline bool is_shuffle_order(int order) argument
|
H A D | debug_page_alloc.c | 35 bool __set_page_guard(struct zone *zone, struct page *page, unsigned int order, argument 38 if (order >= debug_guardpage_minorder()) 43 set_page_private(page, order); 46 __mod_zone_freepage_state(zone, -(1 << order), migratetype); 51 void __clear_page_guard(struct zone *zone, struct page *page, unsigned int order, argument 58 __mod_zone_freepage_state(zone, (1 << order), migratetype);
|
H A D | page_alloc.c | 231 static void __free_pages_ok(struct page *page, unsigned int order, 305 static bool page_contains_unaccepted(struct page *page, unsigned int order); 306 static void accept_page(struct page *page, unsigned int order); 307 static bool try_to_accept_memory(struct zone *zone, unsigned int order); 333 _deferred_grow_zone(struct zone *zone, unsigned int order) argument 335 return deferred_grow_zone(zone, order); 522 static inline unsigned int order_to_pindex(int migratetype, int order) argument 525 if (order > PAGE_ALLOC_COSTLY_ORDER) { 526 VM_BUG_ON(order != pageblock_order); 530 VM_BUG_ON(order > PAGE_ALLOC_COSTLY_ORDE 538 int order = pindex / MIGRATE_PCPTYPES; local 550 pcp_allowed_order(unsigned int order) argument 561 free_the_page(struct page *page, unsigned int order) argument 581 prep_compound_page(struct page *page, unsigned int order) argument 607 set_buddy_order(struct page *page, unsigned int order) argument 625 compaction_capture(struct capture_control *capc, struct page *page, int order, int migratetype) argument 656 compaction_capture(struct capture_control *capc, struct page *page, int order, int migratetype) argument 664 add_to_free_list(struct page *page, struct zone *zone, unsigned int order, int migratetype) argument 674 add_to_free_list_tail(struct page *page, struct zone *zone, unsigned int order, int migratetype) argument 688 move_to_free_list(struct page *page, struct zone *zone, unsigned int order, int migratetype) argument 696 del_page_from_free_list(struct page *page, struct zone *zone, unsigned int order) argument 725 buddy_merge_likely(unsigned long pfn, unsigned long buddy_pfn, struct page *page, unsigned int order) argument 765 __free_one_page(struct page *page, unsigned long pfn, struct zone *zone, unsigned int order, int migratetype, fpi_t fpi_flags) argument 859 split_free_page(struct page *free_page, unsigned int order, unsigned long split_pfn_offset) argument 1084 free_pages_prepare(struct page *page, unsigned int order) argument 1146 PAGE_SIZE << order); local 1148 PAGE_SIZE << order); local 1193 unsigned int order; local 1247 free_one_page(struct zone *zone, struct page *page, unsigned long pfn, unsigned int order, int migratetype, fpi_t fpi_flags) argument 1263 __free_pages_ok(struct page *page, unsigned int order, fpi_t fpi_flags) argument 1285 __free_pages_core(struct page *page, unsigned int order) argument 1436 check_new_pages(struct page *page, unsigned int order) argument 1478 post_alloc_hook(struct page *page, unsigned int order, gfp_t gfp_flags) argument 1538 prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags, unsigned int alloc_flags) argument 1563 __rmqueue_smallest(struct zone *zone, unsigned int order, int migratetype) argument 1602 __rmqueue_cma_fallback(struct zone *zone, unsigned int order) argument 1608 __rmqueue_cma_fallback(struct zone *zone, unsigned int order) argument 1623 unsigned int order; local 1699 can_steal_fallback(unsigned int order, int start_mt) argument 1846 find_suitable_fallback(struct free_area *area, unsigned int order, int migratetype, bool only_stealable, bool *can_steal) argument 1931 int order; local 2006 __rmqueue_fallback(struct zone *zone, int order, int start_migratetype, unsigned int alloc_flags) argument 2087 __rmqueue(struct zone *zone, unsigned int order, int migratetype, unsigned int alloc_flags) argument 2124 rmqueue_bulk(struct zone *zone, unsigned int order, unsigned long count, struct list_head *list, int migratetype, unsigned int alloc_flags) argument 2342 free_unref_page_prepare(struct page *page, unsigned long pfn, unsigned int order) argument 2428 free_unref_page_commit(struct zone *zone, struct per_cpu_pages *pcp, struct page *page, int migratetype, unsigned int order) argument 2479 free_unref_page(struct page *page, unsigned int order) argument 2500 free_one_page(page_zone(page), page, pfn, order, migratetype, FPI_NONE); local 2532 unsigned int order = folio_order(folio); local 2547 order, migratetype, FPI_NONE); local 2560 unsigned int order = (unsigned long)folio->private; local 2616 split_page(struct page *page, unsigned int order) argument 2630 __isolate_free_page(struct page *page, unsigned int order) argument 2682 __putback_isolated_page(struct page *page, unsigned int order, int mt) argument 2721 rmqueue_buddy(struct zone *preferred_zone, struct zone *zone, unsigned int order, unsigned int alloc_flags, int migratetype) argument 2761 nr_pcp_alloc(struct per_cpu_pages *pcp, struct zone *zone, int order) argument 2813 __rmqueue_pcplist(struct zone *zone, unsigned int order, int migratetype, unsigned int alloc_flags, struct per_cpu_pages *pcp, struct list_head *list) argument 2844 rmqueue_pcplist(struct zone *preferred_zone, struct zone *zone, unsigned int order, int migratetype, unsigned int alloc_flags) argument 2891 rmqueue(struct zone *preferred_zone, struct zone *zone, unsigned int order, gfp_t gfp_flags, unsigned int alloc_flags, int migratetype) argument 2926 should_fail_alloc_page(gfp_t gfp_mask, unsigned int order) argument 2932 __zone_watermark_unusable_free(struct zone *z, unsigned int order, unsigned int alloc_flags) argument 2963 __zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark, int highest_zoneidx, unsigned int alloc_flags, long free_pages) argument 3041 zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark, int highest_zoneidx, unsigned int alloc_flags) argument 3048 zone_watermark_fast(struct zone *z, unsigned int order, unsigned long mark, int highest_zoneidx, unsigned int alloc_flags, gfp_t gfp_mask) argument 3093 zone_watermark_ok_safe(struct zone *z, unsigned int order, unsigned long mark, int highest_zoneidx) argument 3176 get_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags, const struct alloc_context *ac) argument 3400 __alloc_pages_cpuset_fallback(gfp_t gfp_mask, unsigned int order, unsigned int alloc_flags, const struct alloc_context *ac) argument 3420 __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order, const struct alloc_context *ac, unsigned long *did_some_progress) argument 3515 __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, unsigned int alloc_flags, const struct alloc_context *ac, enum compact_priority prio, enum compact_result *compact_result) argument 3574 should_compact_retry(struct alloc_context *ac, int order, int alloc_flags, enum compact_result compact_result, enum compact_priority *compact_priority, int *compaction_retries) argument 3640 __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, unsigned int alloc_flags, const struct alloc_context *ac, enum compact_priority prio, enum compact_result *compact_result) argument 3649 should_compact_retry(struct alloc_context *ac, unsigned int order, int alloc_flags, enum compact_result compact_result, enum compact_priority *compact_priority, int *compaction_retries) argument 3761 __perform_reclaim(gfp_t gfp_mask, unsigned int order, const struct alloc_context *ac) argument 3787 __alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order, unsigned int alloc_flags, const struct alloc_context *ac, unsigned long *did_some_progress) argument 3820 wake_all_kswapds(unsigned int order, gfp_t gfp_mask, const struct alloc_context *ac) argument 3840 gfp_to_alloc_flags(gfp_t gfp_mask, unsigned int order) argument 3941 should_reclaim_retry(gfp_t gfp_mask, unsigned order, struct alloc_context *ac, int alloc_flags, bool did_some_progress, int *no_progress_loops) argument 4046 __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, struct alloc_context *ac) argument 4324 prepare_alloc_pages(gfp_t gfp_mask, unsigned int order, int preferred_nid, nodemask_t *nodemask, struct alloc_context *ac, gfp_t *alloc_gfp, unsigned int *alloc_flags) argument 4539 __alloc_pages(gfp_t gfp, unsigned int order, int preferred_nid, nodemask_t *nodemask) argument 4604 __folio_alloc(gfp_t gfp, unsigned int order, int preferred_nid, nodemask_t *nodemask) argument 4618 __get_free_pages(gfp_t gfp_mask, unsigned int order) argument 4655 __free_pages(struct page *page, unsigned int order) argument 4668 free_pages(unsigned long addr, unsigned int order) argument 4672 __free_pages(virt_to_page((void *)addr), order); local 4814 make_alloc_exact(unsigned long addr, unsigned int order, size_t size) argument 4851 unsigned int order = get_order(size); local 4876 unsigned int order = get_order(size); local 6343 int order; local 6629 unsigned int order; local 6672 unsigned int order; local 6725 unsigned int order; local 6815 page_contains_unaccepted(struct page *page, unsigned int order) argument 6823 accept_page(struct page *page, unsigned int order) argument 6864 try_to_accept_memory(struct zone *zone, unsigned int order) argument 6914 page_contains_unaccepted(struct page *page, unsigned int order) argument 6919 accept_page(struct page *page, unsigned int order) argument 6923 try_to_accept_memory(struct zone *zone, unsigned int order) argument [all...] |
/linux-master/arch/xtensa/include/uapi/asm/ |
H A D | byteorder.h | 10 # error processor byte order undefined!
|
/linux-master/arch/riscv/kvm/ |
H A D | tlb.c | 22 unsigned long order) 26 if (PTRS_PER_PTE < (gpsz >> order)) { 33 for (pos = gpa; pos < (gpa + gpsz); pos += BIT(order)) 38 for (pos = gpa; pos < (gpa + gpsz); pos += BIT(order)) 50 unsigned long order) 54 if (PTRS_PER_PTE < (gpsz >> order)) { 61 for (pos = gpa; pos < (gpa + gpsz); pos += BIT(order)) 66 for (pos = gpa; pos < (gpa + gpsz); pos += BIT(order)) 81 unsigned long order) 85 if (PTRS_PER_PTE < (gvsz >> order)) { 20 kvm_riscv_local_hfence_gvma_vmid_gpa(unsigned long vmid, gpa_t gpa, gpa_t gpsz, unsigned long order) argument 49 kvm_riscv_local_hfence_gvma_gpa(gpa_t gpa, gpa_t gpsz, unsigned long order) argument 77 kvm_riscv_local_hfence_vvma_asid_gva(unsigned long vmid, unsigned long asid, unsigned long gva, unsigned long gvsz, unsigned long order) argument 119 kvm_riscv_local_hfence_vvma_gva(unsigned long vmid, unsigned long gva, unsigned long gvsz, unsigned long order) argument 332 kvm_riscv_hfence_gvma_vmid_gpa(struct kvm *kvm, unsigned long hbase, unsigned long hmask, gpa_t gpa, gpa_t gpsz, unsigned long order) argument 355 kvm_riscv_hfence_vvma_asid_gva(struct kvm *kvm, unsigned long hbase, unsigned long hmask, unsigned long gva, unsigned long gvsz, unsigned long order, unsigned long asid) argument 384 kvm_riscv_hfence_vvma_gva(struct kvm *kvm, unsigned long hbase, unsigned long hmask, unsigned long gva, unsigned long gvsz, unsigned long order) argument [all...] |
/linux-master/tools/testing/radix-tree/ |
H A D | multiorder.c | 3 * multiorder.c: Multi-order radix tree entry testing 16 unsigned order) 18 XA_STATE_ORDER(xas, xa, index, order); 19 struct item *item = item_create(index, order); 42 int order[NUM_ENTRIES] = {1, 1, 2, 3, 4, 1, 0, 1, 3, 0, 7}; local 47 err = item_insert_order(xa, index[i], order[i]); 53 if (j <= (index[i] | ((1 << order[i]) - 1))) 58 int height = order[i] / XA_CHUNK_SHIFT; 60 unsigned long mask = (1UL << order[i]) - 1; 66 assert(item->order 15 item_insert_order(struct xarray *xa, unsigned long index, unsigned order) argument 82 int order[MT_NUM_ENTRIES] = {1, 0, 2, 4, 3, 1, 3, 0, 7}; local 167 unsigned int order = RADIX_TREE_MAP_SHIFT - 1; local 218 unsigned int order; local [all...] |
/linux-master/drivers/gpu/drm/ttm/ |
H A D | ttm_pool.c | 54 * @vaddr: original vaddr return for the mapping and order in the lower bits 79 /* Allocate pages of size 1 << order with the given gfp_flags */ 81 unsigned int order) 88 /* Don't set the __GFP_COMP flag for higher order allocations. 92 if (order) 97 p = alloc_pages_node(pool->nid, gfp_flags, order); 99 p->private = order; 107 if (order) 110 vaddr = dma_alloc_attrs(pool->dev, (1ULL << order) * PAGE_SIZE, 123 dma->vaddr = (unsigned long)vaddr | order; 80 ttm_pool_alloc_page(struct ttm_pool *pool, gfp_t gfp_flags, unsigned int order) argument 133 ttm_pool_free_page(struct ttm_pool *pool, enum ttm_caching caching, unsigned int order, struct page *p) argument 186 ttm_pool_map(struct ttm_pool *pool, unsigned int order, struct page *p, dma_addr_t **dma_addr) argument 259 ttm_pool_type_init(struct ttm_pool_type *pt, struct ttm_pool *pool, enum ttm_caching caching, unsigned int order) argument 287 ttm_pool_select_type(struct ttm_pool *pool, enum ttm_caching caching, unsigned int order) argument 358 ttm_pool_page_allocated(struct ttm_pool *pool, unsigned int order, struct page *p, dma_addr_t **dma_addr, unsigned long *num_pages, struct page ***pages) argument 397 unsigned int order; local 438 unsigned int order; local [all...] |
/linux-master/kernel/bpf/ |
H A D | cgroup_iter.c | 13 * 1. Walk the descendants of a cgroup in pre-order. 14 * 2. Walk the descendants of a cgroup in post-order. 18 * For walking descendants, cgroup_iter can walk in either pre-order or 19 * post-order. For walking ancestors, the iter walks up from a cgroup to 40 * EOPNOTSUPP. In order to work around, the user may have to update their 54 int order; member in struct:cgroup_iter_priv 77 if (p->order == BPF_CGROUP_ITER_DESCENDANTS_PRE) 79 else if (p->order == BPF_CGROUP_ITER_DESCENDANTS_POST) 110 if (p->order == BPF_CGROUP_ITER_DESCENDANTS_PRE) 112 else if (p->order 200 int order = linfo->cgroup.order; local [all...] |
/linux-master/arch/s390/include/asm/ |
H A D | sigp.h | 5 /* SIGP order codes */ 41 static inline int ____pcpu_sigp(u16 addr, u8 order, unsigned long parm, argument 48 " sigp %[r1],%[addr],0(%[order])\n" 52 : [addr] "d" (addr), [order] "a" (order) 58 static inline int __pcpu_sigp(u16 addr, u8 order, unsigned long parm, argument 64 cc = ____pcpu_sigp(addr, order, parm, &_status);
|
/linux-master/mm/kmsan/ |
H A D | init.c | 105 * by their order: when kmsan_memblock_free_pages() is called for the first 106 * time with a certain order, it is reserved as a shadow block, for the second 109 * after which held_back[order] can be used again. 114 bool kmsan_memblock_free_pages(struct page *page, unsigned int order) argument 118 if (!held_back[order].shadow) { 119 held_back[order].shadow = page; 122 if (!held_back[order].origin) { 123 held_back[order].origin = page; 126 shadow = held_back[order].shadow; 127 origin = held_back[order] 139 int order; member in struct:smallstack [all...] |