Searched refs:order (Results 351 - 375 of 526) sorted by relevance

<<11121314151617181920>>

/linux-master/arch/powerpc/perf/
H A Dimc-pmu.c1601 int i, order = get_order(thread_imc_mem_size); local
1605 free_pages((u64)per_cpu(thread_imc_mem, i), order);
1612 int i, order = get_order(trace_imc_mem_size); local
1616 free_pages((u64)per_cpu(trace_imc_mem, i), order);
/linux-master/drivers/gpu/drm/tegra/
H A Ddrm.c1106 * is another catch: in order to perform cache maintenance on pages
1110 * (typically on the order of a few MiB) for framebuffers (many MiB
1201 unsigned long order; local
1211 order = __ffs(tegra->domain->pgsize_bitmap);
1212 init_iova_domain(&tegra->carveout.domain, 1UL << order,
1213 carveout_start >> order);
/linux-master/arch/powerpc/kernel/
H A Drtas.c1295 int order; local
1302 order = status - RTAS_EXTENDED_DELAY_MIN;
1303 for (ms = 1; order > 0; order--)
1392 * The delay hint is an order-of-magnitude suggestion, not
1724 * after resuming from a partition hibernation or migration in order
/linux-master/tools/perf/util/
H A Dunwind-libdw.c307 * Display what we got based on the order setup.
312 if (callchain_param.order == ORDER_CALLER)
/linux-master/arch/x86/events/intel/
H A Dlbr.c937 * The enabled order may be different from the counter order.
938 * Update the lbr_counters with the enabled order.
943 int i, j, pos = 0, order[X86_PMC_IDX_MAX]; local
949 order[pos++] = leader->hw.idx;
954 order[pos++] = sibling->hw.idx;
963 cnt = (src >> (order[j] * LBR_INFO_BR_CNTR_BITS)) & LBR_INFO_BR_CNTR_MASK;
/linux-master/tools/bpf/bpftool/
H A Dlink.c176 static const char *cgroup_order_string(__u32 order) argument
178 switch (order) {
218 jsonw_string_field(wtr, "order",
219 cgroup_order_string(info->iter.cgroup.order));
648 printf("order %s ",
649 cgroup_order_string(info->iter.cgroup.order));
/linux-master/arch/powerpc/platforms/pseries/
H A Dlpar.c1764 static void pSeries_set_page_state(struct page *page, int order, argument
1773 for (i = 0; i < (1 << order); i++, addr += PAGE_SIZE) {
1779 void arch_free_page(struct page *page, int order) argument
1786 pSeries_set_page_state(page, order, H_PAGE_SET_UNUSED);
/linux-master/drivers/ps3/
H A Dps3av_cmd.c21 u32 order; member in struct:video_fmt
400 video_mode->video_order = ps3av_video_fmt_table[video_fmt].order;
402 pr_debug("%s: video_mode:vid:%x width:%d height:%d pitch:%d out_format:%d format:%x order:%x\n",
421 video_format.video_order = ps3av_video_fmt_table[video_fmt].order;
/linux-master/sound/hda/
H A Dhdmi_chmap.c347 int order; local
350 order = get_channel_allocation_order(ca);
351 ch_alloc = &channel_allocations[order];
355 /* fill actual channel mappings in ALSA channel (i) order */
/linux-master/drivers/mfd/
H A Dtwl4030-power.c163 * It seems that type1 and type2 is just the resource init order
469 static int order; local
496 order = 1;
504 if (!order)
505 pr_warn("TWL4030: Bad order of scripts (sleep script before wakeup) Leads to boot failure on some boards\n");
782 * Note that the type1 and type2 seem to be just the init order number
/linux-master/fs/ext4/
H A Dfile.c86 * flag needs to be cleared here in order to ensure that the
702 static vm_fault_t ext4_dax_huge_fault(struct vm_fault *vmf, unsigned int order) argument
718 * unset for order != 0 (i.e. only in do_cow_fault); for
742 result = dax_iomap_fault(vmf, order, &pfn, &error, &ext4_iomap_ops);
751 result = dax_finish_sync_fault(vmf, order, pfn);
/linux-master/arch/alpha/kernel/
H A Dpci_iommu.c422 long order = get_order(size);
427 cpu_addr = (void *)__get_free_pages(gfp | __GFP_ZERO, order);
440 free_pages((unsigned long)cpu_addr, order);
421 long order = get_order(size); local
/linux-master/mm/
H A Doom_kill.c154 * order == -1 means the oom kill is required by sysrq, otherwise only
159 return oc->order == -1;
456 pr_warn("%s invoked oom-killer: gfp_mask=%#x(%pGg), order=%d, oom_score_adj=%hd\n",
457 current->comm, oc->gfp_mask, &oc->gfp_mask, oc->order,
459 if (!IS_ENABLED(CONFIG_COMPACTION) && oc->order)
944 * in order to prevent the OOM victim from depleting the memory
H A Dmemory.c330 * masks at different levels, in order to test whether a table
430 * seen in-order. See the alpha page table accessors for the
573 * In order to support COW of arbitrary special mappings, we have VM_MIXEDMAP.
581 * PFNMAP mappings in order to support COWable mappings.
1865 * The VMA list must be sorted in ascending virtual address order.
3373 * The critical issue is to order this
4338 int order; local
4364 * Find the highest order where the aligned range is completely
4368 order = highest_order(orders);
4370 addr = ALIGN_DOWN(vmf->address, PAGE_SIZE << order);
[all...]
H A Dmemcontrol.c1183 * Check events in order.
1954 int order)
1961 .order = order,
1968 if (mem_cgroup_margin(memcg) >= (1 << order))
2160 static bool mem_cgroup_oom(struct mem_cgroup *memcg, gfp_t mask, int order) argument
2164 if (order > PAGE_ALLOC_COSTLY_ORDER)
2203 ret = mem_cgroup_out_of_memory(memcg, mask, order);
2670 * proposed penalty in order to reduce to a reasonable number of jiffies, and
3431 * @order
1953 mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask, int order) argument
3435 __memcg_kmem_charge_page(struct page *page, gfp_t gfp, int order) argument
3458 __memcg_kmem_uncharge_page(struct page *page, int order) argument
3926 mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order, gfp_t gfp_mask, unsigned long *total_scanned) argument
[all...]
/linux-master/fs/xfs/
H A Dxfs_log_cil.c561 int order; local
661 * Now update the order of everything modified in the transaction
666 order = atomic_inc_return(&ctx->order_id);
672 lip->li_order_id = order;
823 * same iclog order their IO completion callbacks in the same order that
844 * Ensure that the order of log writes follows checkpoint sequence order. This
904 * sequence order so that log recovery will always use in-order star
[all...]
/linux-master/include/linux/
H A Dpagemap.h352 * limit the maximum allocation order to PMD size. I'm not aware of any
353 * assumptions about maximum order if THP are disabled, but 8 seems like
354 * a good order (that's 1MB if you're using 4kB pages)
555 struct folio *filemap_alloc_folio_noprof(gfp_t gfp, unsigned int order);
557 static inline struct folio *filemap_alloc_folio_noprof(gfp_t gfp, unsigned int order) argument
559 return folio_alloc_noprof(gfp, order);
842 * Context: The caller should have the page locked in order to prevent
1010 * when the locks are being taken in the wrong order, or if making
1012 * them in order). Usually folio_lock() is the correct function to call.
1047 * more folios, they must be in order o
[all...]
/linux-master/kernel/trace/
H A Dftrace.c1119 int order; member in struct:ftrace_page
3239 int order; local
3248 order = fls(pages) - 1;
3251 pg->records = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
3255 if (!order)
3257 order--;
3261 ftrace_number_of_pages += 1 << order;
3264 cnt = (PAGE_SIZE << order) / ENTRY_SIZE;
3265 pg->order = order;
[all...]
/linux-master/drivers/scsi/
H A Dsg.c1876 int blk_size = buff_size, order; local
1903 order = get_order(num);
1905 ret_sz = 1 << (PAGE_SHIFT + order);
1913 schp->pages[k] = alloc_pages(gfp_mask, order);
1929 schp->page_order = order;
1941 __free_pages(schp->pages[i], order);
1943 if (--order >= 0)
H A Dst.c9 order) Klaus Ehrenfried, Eugene Exarevsky, Eric Lee Green, Wolfgang Denk,
148 /* Bit reversed order to get same names for same minors with all
3914 int segs, max_segs, b_size, order, got;
3931 order = STbuffer->reserved_page_order;
3932 b_size = PAGE_SIZE << order;
3934 for (b_size = PAGE_SIZE, order = 0;
3935 order < ST_MAX_ORDER &&
3936 max_segs * (PAGE_SIZE << order) < new_size;
3937 order++, b_size *= 2)
3939 STbuffer->reserved_page_order = order;
3899 int segs, max_segs, b_size, order, got; local
3971 int i, order = STbuffer->reserved_page_order; local
[all...]
/linux-master/drivers/infiniband/hw/mlx5/
H A Dmr.c575 * order to free CPU resources to other tasks.
666 * Find the smallest ent with order >= requested_order.
794 int order = order_base_2(ent->rb_key.ndescs); local
801 order = MLX5_IMR_KSM_CACHE_ENTRY + 2;
803 sprintf(ent->name, "%d", order);
860 int order; local
882 order = MLX5_IMR_KSM_CACHE_ENTRY;
884 order = order_base_2(rb_key.ndescs) - 2;
889 ent->limit = dev->mdev->profile.mr_cache[order].limit;
2481 * In order t
[all...]
/linux-master/arch/sparc/kernel/
H A Dtraps_64.c262 * Must do a little instruction decoding here in order to
855 unsigned long largest_size, smallest_linesize, order, ver; local
901 for (order = 0; order < NR_PAGE_ORDERS; order++) {
902 if ((PAGE_SIZE << order) >= sz)
906 __get_free_pages(GFP_KERNEL, order);
912 memset(cheetah_error_log, 0, PAGE_SIZE << order);
1042 /* In order to make the even parity correct we must do two things.
/linux-master/drivers/md/
H A Ddm-crypt.c1668 * In order to avoid this scenario we allocate the pages under a mutex.
1670 * In order to not degrade performance with excessive locking, we try
1674 * In order to reduce allocation overhead, we try to allocate compound pages in
1684 unsigned int order = MAX_PAGE_ORDER; local
1702 order = min(order, remaining_order);
1704 while (order > 0) {
1706 (1 << order) > dm_crypt_pages_per_client))
1710 order);
1712 percpu_counter_add(&cc->n_allocated_pages, 1 << order);
[all...]
/linux-master/fs/quota/
H A Ddquot.c1643 /* Filesystem must explicitly define it's own method in order to use
3007 unsigned long nr_hash, order; local
3020 order = 0;
3021 dquot_hash = (struct hlist_head *)__get_free_pages(GFP_KERNEL, order);
3031 nr_hash = (1UL << order) * PAGE_SIZE / sizeof(struct hlist_head);
3039 pr_info("VFS: Dquot-cache hash table entries: %ld (order %ld,"
3040 " %ld bytes)\n", nr_hash, order, (PAGE_SIZE << order));
/linux-master/drivers/infiniband/hw/hfi1/
H A Dtrace_tid.h137 u32 index, u32 type, unsigned long pa, u16 order),
138 TP_ARGS(dd, index, type, pa, order),
144 __field(u16, order)
151 __entry->order = order;
153 TP_printk("[%s] type %s pa %lx index %u order %u",
158 __entry->order

Completed in 426 milliseconds

<<11121314151617181920>>