Searched refs:order (Results 176 - 200 of 526) sorted by relevance

1234567891011>>

/linux-master/drivers/iommu/
H A Dio-pgtable-dart.c112 int order = get_order(size); local
115 return iommu_alloc_pages(gfp, order);
437 int order = get_order(DART_GRANULE(data)); local
449 iommu_free_pages(iopte_deref(pte, data), order); local
451 iommu_free_pages(data->pgd[i], order);
/linux-master/mm/
H A Dhuge_memory.c52 * By default, transparent hugepage support is disabled in order to avoid
134 int order = highest_order(orders); local
138 addr = vma->vm_end - (PAGE_SIZE << order);
139 if (thp_vma_suitable_order(vma, addr, order))
141 order = next_order(&orders, order);
455 int order; member in struct:thpsize
463 int order = to_thpsize(kobj)->order; local
466 if (test_bit(order,
482 int order = to_thpsize(kobj)->order; local
534 sum_mthp_stat(int order, enum mthp_stat_item item) argument
578 thpsize_create(int order, struct kobject *parent) argument
621 int order; local
2834 int order = folio_order(folio); local
[all...]
H A Dmempolicy.c582 * intersperse PTEs of other, order 0, folios). This is
1215 unsigned int order; local
1219 order = folio_order(src);
1220 ilx += src->index >> order;
1238 page = alloc_pages_mpol(gfp, order, pol, ilx, nid);
1360 unsigned int order; local
1377 order = folio_order(folio);
1379 mpol_cond_put(get_vma_policy(vma, addr, order,
1382 mmpol.ilx -= folio->index >> order;
1804 * get_vma_policy(@vma, @addr, @order,
1818 get_vma_policy(struct vm_area_struct *vma, unsigned long addr, int order, pgoff_t *ilx) argument
2190 alloc_pages_preferred_many(gfp_t gfp, unsigned int order, int nid, nodemask_t *nodemask) argument
2221 alloc_pages_mpol_noprof(gfp_t gfp, unsigned int order, struct mempolicy *pol, pgoff_t ilx, int nid) argument
2296 vma_alloc_folio_noprof(gfp_t gfp, int order, struct vm_area_struct *vma, unsigned long addr, bool hugepage) argument
2325 alloc_pages_noprof(gfp_t gfp, unsigned int order) argument
2341 folio_alloc_noprof(gfp_t gfp, unsigned int order) argument
[all...]
H A Dhugetlb.c59 static bool hugetlb_cma_folio(struct folio *folio, unsigned int order) argument
62 1 << order);
65 static bool hugetlb_cma_folio(struct folio *folio, unsigned int order) argument
481 * reference. In order to ensure that one file_region must hold
1513 unsigned int order, bool demote)
1516 int nr_pages = 1 << order;
1536 unsigned int order)
1538 __destroy_compound_gigantic_folio(folio, order, true);
1543 unsigned int order)
1545 __destroy_compound_gigantic_folio(folio, order, fals
1512 __destroy_compound_gigantic_folio(struct folio *folio, unsigned int order, bool demote) argument
1535 destroy_compound_hugetlb_folio_for_demote(struct folio *folio, unsigned int order) argument
1542 destroy_compound_gigantic_folio(struct folio *folio, unsigned int order) argument
1548 free_gigantic_folio(struct folio *folio, unsigned int order) argument
1616 free_gigantic_folio(struct folio *folio, unsigned int order) argument
1618 destroy_compound_gigantic_folio(struct folio *folio, unsigned int order) argument
2066 __prep_compound_gigantic_folio(struct folio *folio, unsigned int order, bool demote) argument
2143 prep_compound_gigantic_folio(struct folio *folio, unsigned int order) argument
2149 prep_compound_gigantic_folio_for_demote(struct folio *folio, unsigned int order) argument
2179 int order = huge_page_order(h); local
2480 unsigned int order; local
4645 hugetlb_add_hstate(unsigned int order) argument
7733 hugetlb_cma_reserve(int order) argument
[all...]
H A Dvmscan.c145 /* Allocation order */
146 s8 order; member in struct:scan_control
699 * Must be careful with the order of the tests. When someone has
714 * Reversing the order of the tests ensures such a situation cannot
745 * order to detect refaults, thus thrashing, later on.
1217 int __maybe_unused order = folio_order(folio); local
1230 count_mthp_stat(order, MTHP_STAT_SWPOUT_FALLBACK);
1699 * this disrupts the LRU order when reclaiming for lower zones but
1717 trace_mm_vmscan_lru_isolate(sc->reclaim_idx, sc->order, nr_to_scan,
3961 /* check the order t
6430 try_to_free_pages(struct zonelist *zonelist, int order, gfp_t gfp_mask, nodemask_t *nodemask) argument
6609 pgdat_balanced(pg_data_t *pgdat, int order, int highest_zoneidx) argument
6661 prepare_kswapd_sleep(pg_data_t *pgdat, int order, int highest_zoneidx) argument
6780 balance_pgdat(pg_data_t *pgdat, int order, int highest_zoneidx) argument
7198 wakeup_kswapd(struct zone *zone, gfp_t gfp_flags, int order, enum zone_type highest_zoneidx) argument
7406 __node_reclaim(struct pglist_data *pgdat, gfp_t gfp_mask, unsigned int order) argument
7459 node_reclaim(struct pglist_data *pgdat, gfp_t gfp_mask, unsigned int order) argument
[all...]
/linux-master/drivers/net/ethernet/mellanox/mlx4/
H A Dalloc.c485 * allocated from. This is done in order to handle
685 struct mlx4_db *db, int order)
690 for (o = order; o <= 1; ++o) {
703 if (o > order)
704 set_bit(i ^ 1, pgdir->bits[order]);
710 db->order = order;
715 int mlx4_db_alloc(struct mlx4_dev *dev, struct mlx4_db *db, int order) argument
724 if (!mlx4_alloc_db_from_pgdir(pgdir, db, order))
736 WARN_ON(mlx4_alloc_db_from_pgdir(pgdir, db, order));
684 mlx4_alloc_db_from_pgdir(struct mlx4_db_pgdir *pgdir, struct mlx4_db *db, int order) argument
[all...]
/linux-master/drivers/media/platform/samsung/exynos4-is/
H A Dfimc-is-param.c701 isp->otf_input.order = OTF_INPUT_ORDER_BAYER_GR_BG;
712 isp->dma1_input.order = 0;
724 isp->dma2_input.order = 0;
777 isp->otf_output.order = 0;
787 isp->dma1_output.order = 0;
803 isp->dma2_output.order = 0;
830 drc->otf_input.order = 0;
839 drc->dma_input.order = 0;
853 drc->otf_output.order = 0;
869 fd->otf_input.order
[all...]
/linux-master/lib/
H A Dscatterlist.c158 * kmalloc (tracked by kmemleak), in order to for that last
603 * @order: Second argument for alloc_pages()
612 unsigned int order, bool chainable,
620 nent = round_up(length, PAGE_SIZE << order) >> (PAGE_SHIFT + order);
622 if (length > (nent << (PAGE_SHIFT + order)))
639 elem_len = min_t(u64, length, PAGE_SIZE << order);
640 page = alloc_pages(gfp, order);
642 sgl_free_order(sgl, order);
676 * @order
611 sgl_alloc_order(unsigned long long length, unsigned int order, bool chainable, gfp_t gfp, unsigned int *nent_p) argument
685 sgl_free_n_order(struct scatterlist *sgl, int nents, int order) argument
707 sgl_free_order(struct scatterlist *sgl, int order) argument
[all...]
H A Dxarray.c410 * in order to add the entry described by @xas. Because we cannot store a
647 unsigned int order = xas->xa_shift; local
675 while (shift > order) {
1008 * @order: Current entry order.
1013 * to prepare for the upcoming split of an entry of @order size into
1014 * entries of the order stored in the @xas.
1018 void xas_split_alloc(struct xa_state *xas, void *entry, unsigned int order, argument
1021 unsigned int sibs = (1 << (order % XA_CHUNK_SHIFT)) - 1;
1025 if (WARN_ON(xas->xa_shift + 2 * XA_CHUNK_SHIFT < order))
1069 xas_split(struct xa_state *xas, void *entry, unsigned int order) argument
1745 unsigned int order = BITS_PER_LONG; local
1778 int order = 0; local
1808 int order = 0; local
[all...]
H A Dtest_bitmap.c335 int pos, order; local
341 for (order = 0; order < 10; order++) {
342 pos = bitmap_find_free_region(bmap, 1000, order);
343 if (order == 0)
346 expect_eq_uint(pos, order < 9 ? BIT(order) : -ENOMEM);
350 for (order = 1; order <
[all...]
/linux-master/drivers/net/ethernet/amd/xgbe/
H A Dxgbe-desc.c292 int order; local
295 order = alloc_order;
297 /* Try to obtain pages, decreasing order if necessary */
299 while (order >= 0) {
300 pages = alloc_pages_node(node, gfp, order);
304 order--;
318 PAGE_SIZE << order, DMA_FROM_DEVICE);
325 pa->pages_len = PAGE_SIZE << order;
/linux-master/kernel/dma/
H A Dswiotlb.c437 unsigned int order, area_order; local
464 order = get_order(nslabs << IO_TLB_SHIFT);
465 nslabs = SLABS_PER_PAGE << order;
467 while ((SLABS_PER_PAGE << order) > IO_TLB_MIN_SLABS) {
469 order);
472 order--;
473 nslabs = SLABS_PER_PAGE << order;
483 free_pages((unsigned long)vstart, order);
494 (PAGE_SIZE << order) >> 20);
521 free_pages((unsigned long)vstart, order);
576 unsigned int order = get_order(bytes); local
[all...]
/linux-master/fs/
H A Ddax.c112 * true if the entry that was found is of a smaller order than the entry
203 * if it did. The entry returned may have a larger order than @order.
204 * If @order is larger than the order of the entry found in i_pages, this
209 static void *get_unlocked_entry(struct xa_state *xas, unsigned int order) argument
222 if (dax_entry_order(entry) < order)
555 * evict PTE entries in order to 'upgrade' them to a PMD entry. A PMD
574 struct address_space *mapping, unsigned int order)
583 entry = get_unlocked_entry(xas, order);
573 grab_mapping_entry(struct xa_state *xas, struct address_space *mapping, unsigned int order) argument
1907 dax_iomap_fault(struct vm_fault *vmf, unsigned int order, pfn_t *pfnp, int *iomap_errp, const struct iomap_ops *ops) argument
1929 dax_insert_pfn_mkwrite(struct vm_fault *vmf, pfn_t pfn, unsigned int order) argument
1973 dax_finish_sync_fault(struct vm_fault *vmf, unsigned int order, pfn_t pfn) argument
[all...]
/linux-master/drivers/net/ethernet/aquantia/atlantic/
H A Daq_ring.c43 unsigned int len = PAGE_SIZE << rxpage->order;
48 __free_pages(rxpage->page, rxpage->order);
55 unsigned int order = rx_ring->page_order; local
60 page = dev_alloc_pages(order);
64 daddr = dma_map_page(dev, page, 0, PAGE_SIZE << order,
72 rxpage->order = order;
78 __free_pages(page, order);
86 unsigned int order = self->page_order; local
99 (PAGE_SIZE << order)) {
[all...]
/linux-master/fs/ext4/
H A Dmballoc.c135 * 1) Array of largest free order lists (sbi->s_mb_largest_free_orders)
140 * largest free order in the buddy bitmap of the participating group infos of
156 * structures to decide the order in which groups are to be traversed for
160 * >= the order of the request. We directly look at the largest free order list
161 * in the data structure (1) above where largest_free_order = order of the
163 * order of largest_free_order. This allows us to perform CR_POWER2_ALIGNED
182 * linear order which requires O(N) search time for each CR_POWER2_ALIGNED and
193 * 2 blocks and the order of allocation is >= sbi->s_mb_order2_reqs. The
527 static void *mb_find_buddy(struct ext4_buddy *e4b, int order, in argument
684 int order = e4b->bd_blkbits + 1; local
823 int order; local
917 ext4_mb_find_good_group_avg_frag_lists(struct ext4_allocation_context *ac, int order) argument
998 int i, order, min_order; local
1242 int order = 1; local
1747 int order = 1, max; local
1844 int order = 1; local
1990 int max, order, next; local
4788 int order, i; local
5844 ext4_mb_discard_lg_preallocations(struct super_block *sb, struct ext4_locality_group *lg, int order, int total_entries) argument
5929 int order, added = 0, lg_prealloc_count = 1; local
[all...]
/linux-master/mm/kasan/
H A Dkasan_test.c280 size_t order = 4; local
281 size_t size = (1UL << (PAGE_SHIFT + order));
290 pages = alloc_pages(GFP_KERNEL, order);
295 free_pages((unsigned long)ptr, order);
302 size_t order = 4; local
304 pages = alloc_pages(GFP_KERNEL, order);
307 free_pages((unsigned long)ptr, order);
1107 static void *mempool_prepare_page(struct kunit *test, mempool_t *pool, int order) argument
1114 ret = mempool_init_page_pool(pool, pool_size, order);
1250 int order local
1304 int order = 2; local
1803 int i, size, order; local
[all...]
/linux-master/arch/powerpc/sysdev/xive/
H A Dspapr.c479 __be32 *qpage, u32 order)
487 if (order) {
496 q->msk = order ? ((1u << (order - 2)) - 1) : 0;
515 rc = plpar_int_set_queue_config(flags, target, prio, qpage_phys, order);
524 1 << xive_alloc_order(order));
478 xive_spapr_configure_queue(u32 target, struct xive_q *q, u8 prio, __be32 *qpage, u32 order) argument
/linux-master/drivers/i2c/busses/
H A Di2c-pnx.c38 int order; /* RX Bytes to order via TX */ member in struct:i2c_pnx_mif
316 if (alg_data->mif.order) {
321 if (alg_data->mif.order == 1) {
341 alg_data->mif.order--;
542 alg_data->mif.order = pmsg->len;
595 alg_data->mif.order = 0;
/linux-master/net/9p/
H A Dtrans_xen.c321 unsigned int order)
339 bytes = alloc_pages_exact(1UL << (order + XEN_PAGE_SHIFT),
345 for (; i < (1 << order); i++) {
352 ring->intf->ring_order = order;
354 ring->data.out = bytes + XEN_FLEX_RING_SIZE(order);
371 free_pages_exact(bytes, 1UL << (order + XEN_PAGE_SHIFT));
404 "max-ring-page-order", 0);
319 xen_9pfs_front_alloc_dataring(struct xenbus_device *dev, struct xen_9pfs_dataring *ring, unsigned int order) argument
/linux-master/arch/s390/mm/
H A Dvmem.c30 static void __ref *vmem_alloc_pages(unsigned int order) argument
32 unsigned long size = PAGE_SIZE << order;
35 return (void *)__get_free_pages(GFP_KERNEL, order);
39 static void vmem_free_pages(unsigned long addr, int order, struct vmem_altmap *altmap) argument
42 vmem_altmap_free(altmap, 1 << order);
49 free_pages(addr, order);
/linux-master/include/linux/
H A Dscatterlist.h112 * In order for the low bit stealing approach to work, pages
483 unsigned int order, bool chainable,
487 void sgl_free_n_order(struct scatterlist *sgl, int nents, int order);
488 void sgl_free_order(struct scatterlist *sgl, int order);
516 * single page, to avoid a higher order allocation. We could define this
517 * to SG_MAX_SINGLE_ALLOC to pack correctly at the highest order. The
/linux-master/drivers/gpu/drm/arm/display/komeda/
H A Dkomeda_kms.c162 int order = 0, err; local
187 plane_st->normalized_zpos = order++;
195 order++;
/linux-master/sound/soc/loongson/
H A Dloongson_dma.c33 u32 order; /* Next descriptor address register */ member in struct:loongson_dma_desc
171 desc->order = lower_32_bits(order_addr | BIT(0));
189 desc->order = lower_32_bits(prtd->dma_desc_arr_phy | BIT(0));
/linux-master/drivers/atm/
H A Deni.h38 int order; member in struct:eni_free
/linux-master/include/uapi/misc/
H A Dxilinx_sdfec.h48 * @XSDFEC_MAINTAIN_ORDER: Maintain order execution of blocks.
49 * @XSDFEC_OUT_OF_ORDER: Out-of-order execution of blocks.
51 * This enum is used to indicate whether the order of blocks can change from
215 * @order: Order of Operation
226 __u32 order; member in struct:xsdfec_config
380 * ioctl that sets order, if order of blocks can change from input to output

Completed in 522 milliseconds

1234567891011>>