Searched refs:order (Results 251 - 275 of 523) sorted by last modified time

<<11121314151617181920>>

/linux-master/include/xen/interface/io/
H A Dring.h317 * from the overall order.
353 #define XEN_FLEX_RING_SIZE(order) \
354 (1UL << ((order) + XEN_PAGE_SHIFT - 1))
/linux-master/include/linux/
H A Ddma-map-ops.h129 unsigned int order, bool no_warn);
151 size_t count, unsigned int order, bool no_warn)
179 int dma_release_from_dev_coherent(struct device *dev, int order, void *vaddr);
190 #define dma_release_from_dev_coherent(dev, order, vaddr) (0)
191 #define dma_mmap_from_dev_coherent(dev, vma, vaddr, order, ret) (0)
198 int dma_release_from_global_coherent(int order, void *vaddr);
208 static inline int dma_release_from_global_coherent(int order, void *vaddr) argument
150 dma_alloc_from_contiguous(struct device *dev, size_t count, unsigned int order, bool no_warn) argument
/linux-master/drivers/virtio/
H A Dvirtio_balloon.c35 /* The order of free page blocks to report to host */
300 * is true, we *have* to do it in this order
793 * In order to avoid lock contention while migrating pages concurrently
1009 * The default page reporting order is @pageblock_order, which
1013 * So we specify the page reporting order to 5, corresponding
1017 * Ideally, the page reporting order is selected based on the
1019 * that value. The hard-coded order would be fine currently.
1022 vb->pr_dev_info.order = 5;
/linux-master/drivers/virt/vboxguest/
H A Dvboxguest_utils.c71 int order = get_order(PAGE_ALIGN(len)); local
73 req = (void *)__get_free_pages(GFP_KERNEL | GFP_DMA32, order);
105 * the outl and any reads of the req happen in the correct order.
/linux-master/drivers/tty/
H A Dsysrq.c393 .order = -1,
/linux-master/drivers/s390/char/
H A Dcon3270.c150 static char *tty3270_add_ba(struct tty3270 *tp, char *cp, char order, int x, int y) argument
152 *cp++ = order;
/linux-master/drivers/pci/
H A Dsetup-bus.c957 int order; local
959 for (order = 0; order <= max_order; order++) {
962 align1 <<= (order + 20);
968 align += aligns[order];
1002 int order, max_order; local
1048 * keep "order" from being negative for smaller
1052 order = __ffs(align) - 20;
1053 if (order <
[all...]
/linux-master/drivers/iommu/
H A Dio-pgtable-arm.c180 /* Rotate the packed high-order bits back to the top */
196 int order = get_order(size);
205 pages = iommu_alloc_pages_node(dev_to_node(dev), gfp, order);
233 iommu_free_pages(pages, order);
932 * Concatenate PGDs at level 1 if possible in order to reduce
195 int order = get_order(size); local
/linux-master/arch/x86/kernel/
H A Daperture_64.c166 static u32 __init read_agp(int bus, int slot, int func, int cap, u32 *order) argument
184 old_order = *order;
191 *order = 7 - nbits;
192 if ((int)*order < 0) /* < 32MB */
193 *order = 0;
201 * so let double check that order, and lets trust AMD NB settings:
206 if (aper + (32ULL<<(20 + *order)) > 0x100000000ULL) {
208 bus, slot, func, 32 << *order, apsizereg);
209 *order = old_order;
213 bus, slot, func, aper, aper + (32ULL << (*order
234 search_agp_bridge(u32 *order, int *valid_agp) argument
[all...]
/linux-master/arch/um/kernel/
H A Dprocess.c62 void free_stack(unsigned long stack, int order) argument
64 free_pages(stack, order);
67 unsigned long alloc_stack(int order, int atomic) argument
74 page = __get_free_pages(flags, order);
/linux-master/arch/um/include/shared/
H A Dkern_util.h22 extern unsigned long alloc_stack(int order, int atomic);
23 extern void free_stack(unsigned long stack, int order);
/linux-master/arch/riscv/lib/
H A Dclear_page.S14 #define CBOZ_ALT(order, old, new) \
16 ((order) << 16) | RISCV_ISA_EXT_ZICBOZ, \
/linux-master/arch/powerpc/include/asm/
H A Dkvm_host.h264 /* Guest HPT size is 2**(order) bytes */
265 u32 order; member in struct:kvm_hpt_info
/linux-master/arch/mips/mm/
H A Dc-r4k.c210 #define JUMP_TO_ALIGN(order) \
213 ".align\t" #order "\n\t" \
/linux-master/arch/loongarch/kvm/
H A Dmain.c315 int cpu, order; local
340 order = get_order(kvm_exception_size + kvm_enter_guest_size);
341 addr = (void *)__get_free_pages(GFP_KERNEL, order);
355 kvm_loongarch_ops->page_order = order;
/linux-master/arch/arm/mm/
H A Ddma-mapping.c144 unsigned long order = get_order(size); local
147 page = alloc_pages(gfp, order);
154 split_page(page, order);
155 for (p = page + (size >> PAGE_SHIFT), e = page + (1 << order); p < e; p++)
396 unsigned long order = get_order(size); local
401 page = dma_alloc_from_contiguous(dev, count, order, gfp & __GFP_NOWARN);
759 unsigned int order = get_order(size); local
767 if (order > CONFIG_ARM_DMA_IOMMU_ALIGNMENT)
768 order = CONFIG_ARM_DMA_IOMMU_ALIGNMENT;
771 align = (1 << order)
868 unsigned long order = get_order(size); local
894 int j, order; local
[all...]
/linux-master/tools/testing/selftests/bpf/prog_tests/
H A Dcgrp_local_storage.c196 linfo.cgroup.order = BPF_CGROUP_ITER_SELF_ONLY;
/linux-master/kernel/dma/
H A Dcoherent.c145 int order = get_order(size); local
155 pageno = bitmap_find_free_region(mem->bitmap, mem->size, order);
200 int order, void *vaddr)
208 bitmap_release_region(mem->bitmap, page, order);
218 * @order: the order of pages allocated
227 int dma_release_from_dev_coherent(struct device *dev, int order, void *vaddr) argument
231 return __dma_release_from_coherent(mem, order, vaddr);
292 int dma_release_from_global_coherent(int order, void *vaddr) argument
297 return __dma_release_from_coherent(dma_coherent_default_memory, order,
199 __dma_release_from_coherent(struct dma_coherent_mem *mem, int order, void *vaddr) argument
[all...]
/linux-master/drivers/net/ethernet/socionext/
H A Dnetsec.c1296 .order = 0,
/linux-master/drivers/net/ethernet/chelsio/cxgb3/
H A Dadapter.h112 unsigned int order; /* order of page allocations */ member in struct:sge_fl
/linux-master/drivers/mmc/core/
H A Dmmc_test.c45 * @order: order of the number of pages allocated
49 unsigned int order; member in struct:mmc_test_pages
323 mem->arr[mem->cnt].order);
367 unsigned int order; local
371 order = get_order(max_seg_page_cnt << PAGE_SHIFT);
373 page = alloc_pages(flags, order);
374 if (page || !order)
376 order -= 1;
384 mem->arr[mem->cnt].order
[all...]
/linux-master/drivers/memory/
H A Domap-gpmc.c935 int order; local
938 order = GPMC_CHUNK_SHIFT - 1;
941 order++;
943 size = 1 << order;
/linux-master/drivers/media/test-drivers/vimc/
H A Dvimc-debayer.c26 enum vimc_debayer_rgb_colors order[2][2]; member in struct:vimc_debayer_pix_map
71 .order = { { VIMC_DEBAYER_BLUE, VIMC_DEBAYER_GREEN },
76 .order = { { VIMC_DEBAYER_GREEN, VIMC_DEBAYER_BLUE },
81 .order = { { VIMC_DEBAYER_GREEN, VIMC_DEBAYER_RED },
86 .order = { { VIMC_DEBAYER_RED, VIMC_DEBAYER_GREEN },
91 .order = { { VIMC_DEBAYER_BLUE, VIMC_DEBAYER_GREEN },
96 .order = { { VIMC_DEBAYER_GREEN, VIMC_DEBAYER_BLUE },
101 .order = { { VIMC_DEBAYER_GREEN, VIMC_DEBAYER_RED },
106 .order = { { VIMC_DEBAYER_RED, VIMC_DEBAYER_GREEN },
111 .order
[all...]
H A Dvimc-sensor.c207 const char *order = tpg_g_color_order(&vsensor->tpg); local
210 16, order);
/linux-master/drivers/media/platform/ti/omap3isp/
H A Disppreview.c229 * hardware expects blocks to follow the Bayer order of the input data, while
230 * the driver stores the table in GRBG order in memory. The blocks need to be
242 const unsigned int *order = cfa_coef_order[prev->params.cfa_order]; local
257 const __u32 *block = cfa->table[order[i]];
1004 * hardware expects blocks to follow the Bayer order of the input data, while
1005 * the driver stores the table in GRBG order in memory. The blocks need to be

Completed in 306 milliseconds

<<11121314151617181920>>