Searched refs:start (Results 101 - 125 of 6361) sorted by relevance

1234567891011>>

/linux-master/tools/testing/selftests/mm/
H A Dmlock2.h7 static int mlock2_(void *start, size_t len, int flags) argument
9 return syscall(__NR_mlock2, start, len, flags);
17 unsigned long start, end; local
30 &start, &end, perms, &offset, dev, &inode, path) < 6)
33 if (start <= addr && addr < end)
/linux-master/include/linux/
H A Dnuma.h32 int memory_add_physaddr_to_nid(u64 start);
36 int phys_to_target_node(u64 start);
39 int numa_fill_memblks(u64 start, u64 end);
47 static inline int memory_add_physaddr_to_nid(u64 start) argument
51 static inline int phys_to_target_node(u64 start) argument
/linux-master/drivers/firmware/efi/libstub/
H A Drelocate.c49 u64 start, end; local
63 start = desc->phys_addr;
64 end = start + desc->num_pages * EFI_PAGE_SIZE;
66 if (start < min)
67 start = min;
69 start = round_up(start, align);
70 if ((start + size) > end)
74 EFI_LOADER_DATA, nr_pages, &start);
76 *addr = start;
[all...]
H A Dunaccepted_memory.c91 * unaligned start/end addresses and either:
97 void process_unaccepted_memory(u64 start, u64 end) argument
120 if (end - start < 2 * unit_size) {
121 arch_accept_memory(start, end);
126 * No matter how the start and end are aligned, at least one unaccepted
130 /* Immediately accept a <unit_size piece at the start: */
131 if (start & unit_mask) {
132 arch_accept_memory(start, round_up(start, unit_size));
133 start
180 accept_memory(phys_addr_t start, phys_addr_t end) argument
[all...]
/linux-master/arch/arm/mach-omap1/
H A Dmcbsp.c95 .start = OMAP1510_MCBSP1_BASE,
101 .start = INT_McBSP1RX,
106 .start = INT_McBSP1TX,
111 .start = 9,
116 .start = 8,
122 .start = OMAP1510_MCBSP2_BASE,
128 .start = INT_1510_SPI_RX,
133 .start = INT_1510_SPI_TX,
138 .start = 17,
143 .start
[all...]
/linux-master/arch/powerpc/mm/
H A Dmem.c57 int memory_add_physaddr_to_nid(u64 start) argument
59 return hot_add_scn_to_nid(start);
64 int __weak create_section_mapping(unsigned long start, unsigned long end, argument
70 int __weak remove_section_mapping(unsigned long start, unsigned long end) argument
75 int __ref arch_create_linear_mapping(int nid, u64 start, u64 size, argument
80 start = (unsigned long)__va(start);
82 rc = create_section_mapping(start, start + size, nid,
87 start, star
93 arch_remove_linear_mapping(u64 start, u64 size) argument
117 update_end_of_memory_vars(u64 start, u64 size) argument
144 arch_add_memory(int nid, u64 start, u64 size, struct mhp_params *params) argument
160 arch_remove_memory(u64 start, u64 size, struct vmem_altmap *altmap) argument
359 phys_addr_t start, end; local
420 unsigned long start, end; local
[all...]
H A Ddma-noncoherent.c24 unsigned long start = (unsigned long)vaddr; local
25 unsigned long end = start + size;
35 if ((start | end) & (L1_CACHE_BYTES - 1))
36 flush_dcache_range(start, end);
38 invalidate_dcache_range(start, end);
41 clean_dcache_range(start, end);
44 flush_dcache_range(start, end);
64 unsigned long flags, start, seg_offset = offset; local
71 start = (unsigned long)kmap_atomic(page + seg_nr) + seg_offset;
74 __dma_sync((void *)start, seg_siz
102 unsigned long start = (unsigned long)page_address(page) + offset; local
[all...]
/linux-master/tools/lib/
H A Dstring.c172 static void *check_bytes8(const u8 *start, u8 value, unsigned int bytes) argument
175 if (*start != value)
176 return (void *)start;
177 start++;
185 * @start: The memory area
192 void *memchr_inv(const void *start, int c, size_t bytes) argument
199 return check_bytes8(start, value, bytes);
206 prefix = (unsigned long)start % 8;
211 r = check_bytes8(start, value, prefix);
214 start
[all...]
/linux-master/arch/riscv/kernel/
H A Dhibernate.c168 static int temp_pgtable_map_pte(pmd_t *dst_pmdp, pmd_t *src_pmdp, unsigned long start, argument
182 dst_ptep = pte_offset_kernel(dst_pmdp, start);
183 src_ptep = pte_offset_kernel(src_pmdp, start);
190 } while (dst_ptep++, src_ptep++, start += PAGE_SIZE, start < end);
195 static int temp_pgtable_map_pmd(pud_t *dst_pudp, pud_t *src_pudp, unsigned long start, argument
211 dst_pmdp = pmd_offset(dst_pudp, start);
212 src_pmdp = pmd_offset(src_pudp, start);
217 next = pmd_addr_end(start, end);
225 ret = temp_pgtable_map_pte(dst_pmdp, src_pmdp, start, nex
234 temp_pgtable_map_pud(p4d_t *dst_p4dp, p4d_t *src_p4dp, unsigned long start, unsigned long end, pgprot_t prot) argument
273 temp_pgtable_map_p4d(pgd_t *dst_pgdp, pgd_t *src_pgdp, unsigned long start, unsigned long end, pgprot_t prot) argument
312 temp_pgtable_mapping(pgd_t *pgdp, unsigned long start, unsigned long end, pgprot_t prot) argument
357 unsigned long start = PAGE_OFFSET; local
[all...]
/linux-master/arch/microblaze/kernel/cpu/
H A Dcache.c92 #define CACHE_LOOP_LIMITS(start, end, cache_line_length, cache_size) \
95 if (start < UINT_MAX - cache_size) \
96 end = min(start + cache_size, end); \
97 start &= align; \
120 * start address is cache aligned
125 #define CACHE_RANGE_LOOP_2(start, end, line_length, op) \
131 count = end - start; \
137 : : "r" (start), "r" (count), \
142 #define CACHE_RANGE_LOOP_1(start, end, line_length, op) \
147 WARN_ON(end < start); \
159 __flush_icache_range_msr_irq(unsigned long start, unsigned long end) argument
185 __flush_icache_range_nomsr_irq(unsigned long start, unsigned long end) argument
213 __flush_icache_range_noirq(unsigned long start, unsigned long end) argument
377 __invalidate_dcache_range_wb(unsigned long start, unsigned long end) argument
397 __invalidate_dcache_range_nomsr_wt(unsigned long start, unsigned long end) argument
417 __invalidate_dcache_range_msr_irq_wt(unsigned long start, unsigned long end) argument
444 __invalidate_dcache_range_nomsr_irq(unsigned long start, unsigned long end) argument
489 __flush_dcache_range_wb(unsigned long start, unsigned long end) argument
[all...]
/linux-master/fs/xfs/scrub/
H A Dbitmap.c50 xbitmap64_tree_iter_first(struct rb_root_cached *root, uint64_t start,
54 xbitmap64_tree_iter_next(struct xbitmap64_node *node, uint64_t start,
73 uint64_t start,
78 uint64_t last = start + len - 1;
80 while ((bn = xbitmap64_tree_iter_first(&bitmap->xb_root, start, last))) {
81 if (bn->bn_start < start && bn->bn_last > last) {
86 bn->bn_last = start - 1;
97 } else if (bn->bn_start < start) {
100 bn->bn_last = start - 1;
122 uint64_t start,
71 xbitmap64_clear( struct xbitmap64 *bitmap, uint64_t start, uint64_t len) argument
120 xbitmap64_set( struct xbitmap64 *bitmap, uint64_t start, uint64_t len) argument
277 xbitmap64_test( struct xbitmap64 *bitmap, uint64_t start, uint64_t *len) argument
346 xbitmap32_clear( struct xbitmap32 *bitmap, uint32_t start, uint32_t len) argument
395 xbitmap32_set( struct xbitmap32 *bitmap, uint32_t start, uint32_t len) argument
552 xbitmap32_test( struct xbitmap32 *bitmap, uint32_t start, uint32_t *len) argument
[all...]
/linux-master/arch/arm/mm/
H A Dcache-tauros2.c66 static void tauros2_inv_range(unsigned long start, unsigned long end) argument
71 if (start & (CACHE_LINE_SIZE - 1)) {
72 tauros2_clean_inv_pa(start & ~(CACHE_LINE_SIZE - 1));
73 start = (start | (CACHE_LINE_SIZE - 1)) + 1;
85 * Invalidate all full cache lines between 'start' and 'end'.
87 while (start < end) {
88 tauros2_inv_pa(start);
89 start += CACHE_LINE_SIZE;
95 static void tauros2_clean_range(unsigned long start, unsigne argument
106 tauros2_flush_range(unsigned long start, unsigned long end) argument
[all...]
/linux-master/drivers/soc/fsl/qe/
H A Dqe_common.c34 s32 start; member in struct:muram_block
83 ret = gen_pool_add(muram_pool, r.start - muram_pbase +
118 s32 start; local
123 start = gen_pool_alloc_algo(muram_pool, size, algo, data);
124 if (!start) {
128 start = start - GENPOOL_OFFSET;
129 memset_io(cpm_muram_addr(start), 0, size);
130 entry->start = start;
149 s32 start; local
201 s32 start; local
[all...]
/linux-master/arch/powerpc/include/asm/
H A Drheap.h21 unsigned long start; member in struct:_rh_block
41 unsigned long start; member in struct:_rh_stats
60 extern int rh_attach_region(rh_info_t * info, unsigned long start, int size);
63 extern unsigned long rh_detach_region(rh_info_t * info, unsigned long start, int size);
73 extern unsigned long rh_alloc_fixed(rh_info_t * info, unsigned long start, int size,
77 extern int rh_free(rh_info_t * info, unsigned long start);
90 extern int rh_set_owner(rh_info_t * info, unsigned long start, const char *owner);
/linux-master/fs/squashfs/
H A Did.c67 u64 start, end; local
79 * match the table start and end points
97 start = le64_to_cpu(table[n]);
100 if (start >= end || (end - start) >
107 start = le64_to_cpu(table[indexes - 1]);
108 if (start >= id_table_start || (id_table_start - start) >
/linux-master/drivers/infiniband/hw/usnic/
H A Dusnic_uiom_interval_tree.c42 #define START(node) ((node)->start)
45 #define MAKE_NODE(node, start, end, ref_cnt, flags, err, err_out) \
47 node = usnic_uiom_interval_node_alloc(start, \
57 #define MAKE_NODE_AND_APPEND(node, start, end, ref_cnt, flags, err, \
60 MAKE_NODE(node, start, end, \
70 usnic_uiom_interval_node_alloc(long int start, long int last, int ref_cnt, argument
78 interval->start = start;
95 if (node_a->start < node_b->start)
104 find_intervals_intersection_sorted(struct rb_root_cached *root, unsigned long start, unsigned long last, struct list_head *list) argument
120 usnic_uiom_get_intervals_diff(unsigned long start, unsigned long last, int flags, int flag_mask, struct rb_root_cached *root, struct list_head *diff_set) argument
179 usnic_uiom_insert_interval(struct rb_root_cached *root, unsigned long start, unsigned long last, int flags) argument
250 usnic_uiom_remove_interval(struct rb_root_cached *root, unsigned long start, unsigned long last, struct list_head *removed) argument
[all...]
/linux-master/arch/xtensa/kernel/
H A Dpci.c45 resource_size_t start = res->start; local
54 if (start & 0x300)
55 start = (start + 0x3ff) & ~0x3ff;
58 return start;
85 vma->vm_pgoff += (ioaddr + pci_ctrl->io_space.start) >> PAGE_SHIFT;
/linux-master/mm/damon/
H A Dvaddr-test.h52 * first identifies the start of mappings, end of mappings, and the two biggest
60 * mapped. To cover every mappings, the three regions should start with 10,
86 KUNIT_EXPECT_EQ(test, 10ul, regions[0].start);
88 KUNIT_EXPECT_EQ(test, 200ul, regions[1].start);
90 KUNIT_EXPECT_EQ(test, 300ul, regions[2].start);
111 * regions an array containing start/end addresses of current
115 * expected start/end addresses of monitoring target regions that
148 KUNIT_EXPECT_EQ(test, r->ar.start, expected[i * 2]);
168 (struct damon_addr_range){.start = 5, .end = 27},
169 (struct damon_addr_range){.start
249 damon_test_split_evenly_fail(struct kunit *test, unsigned long start, unsigned long end, unsigned int nr_pieces) argument
268 damon_test_split_evenly_succ(struct kunit *test, unsigned long start, unsigned long end, unsigned int nr_pieces) argument
[all...]
/linux-master/arch/csky/abiv2/inc/abi/
H A Dcacheflush.h16 #define flush_cache_range(vma, start, end) do { } while (0)
37 #define flush_icache_range(start, end) cache_wbinv_range(start, end)
40 unsigned long start, unsigned long end);
43 #define flush_cache_vmap(start, end) do { } while (0)
44 #define flush_cache_vmap_early(start, end) do { } while (0)
45 #define flush_cache_vunmap(start, end) do { } while (0)
/linux-master/fs/btrfs/tests/
H A Dextent-map-tests.c31 "em leak: em (start %llu len %llu block_start %llu block_len %llu) refs %d",
32 em->start, em->len, em->block_start,
65 u64 start = 0; local
77 em->start = 0;
82 ret = btrfs_add_extent_mapping(inode, &em, em->start, em->len);
98 em->start = SZ_16K;
103 ret = btrfs_add_extent_mapping(inode, &em, em->start, em->len);
119 em->start = start;
121 em->block_start = start;
248 __test_case_3(struct btrfs_fs_info *fs_info, struct btrfs_inode *inode, u64 start) argument
355 __test_case_4(struct btrfs_fs_info *fs_info, struct btrfs_inode *inode, u64 start) argument
483 add_compressed_extent(struct btrfs_inode *inode, u64 start, u64 len, u64 block_start) argument
514 u64 start; member in struct:extent_range
610 u64 start, end; local
[all...]
/linux-master/arch/sh/mm/
H A Dcache-sh2.c18 static void sh2__flush_wback_region(void *start, int size) argument
23 begin = (unsigned long)start & ~(L1_CACHE_BYTES-1);
24 end = ((unsigned long)start + size + L1_CACHE_BYTES-1)
39 static void sh2__flush_purge_region(void *start, int size) argument
44 begin = (unsigned long)start & ~(L1_CACHE_BYTES-1);
45 end = ((unsigned long)start + size + L1_CACHE_BYTES-1)
53 static void sh2__flush_invalidate_region(void *start, int size) argument
75 begin = (unsigned long)start & ~(L1_CACHE_BYTES-1);
76 end = ((unsigned long)start + size + L1_CACHE_BYTES-1)
H A Dtlbflush_32.c39 void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, argument
50 size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
60 start &= PAGE_MASK;
67 while (start < end) {
68 local_flush_tlb_one(asid, start);
69 start += PAGE_SIZE;
78 void local_flush_tlb_kernel_range(unsigned long start, unsigned long end) argument
85 size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
93 start &= PAGE_MASK;
97 while (start < en
[all...]
/linux-master/drivers/pnp/
H A Dsystem.c30 resource_size_t start = r->start, end = r->end; local
39 res = request_region(start, end - start + 1, regionid);
41 res = request_mem_region(start, end - start + 1, regionid);
64 if (res->start == 0)
66 if (res->start < 0x100)
76 if (res->end < res->start)
/linux-master/tools/perf/util/
H A Dsvghelper.c85 void open_svg(const char *filename, int cpus, int rows, u64 start, u64 end) argument
94 first_time = start;
153 void svg_ubox(int Yslot, u64 start, u64 end, double height, const char *type, int fd, int err, int merges) argument
155 double w = time2pixels(end) - time2pixels(start);
164 time2pixels(start),
172 void svg_lbox(int Yslot, u64 start, u64 end, double height, const char *type, int fd, int err, int merges) argument
174 double w = time2pixels(end) - time2pixels(start);
183 time2pixels(start),
191 void svg_fbox(int Yslot, u64 start, u64 end, double height, const char *type, int fd, int err, int merges) argument
193 double w = time2pixels(end) - time2pixels(start);
210 svg_box(int Yslot, u64 start, u64 end, const char *type) argument
220 svg_blocked(int Yslot, int cpu, u64 start, u64 end, const char *backtrace) argument
234 svg_running(int Yslot, int cpu, u64 start, u64 end, const char *backtrace) argument
288 svg_waiting(int Yslot, int cpu, u64 start, u64 end, const char *backtrace) argument
382 svg_process(int cpu, u64 start, u64 end, int pid, const char *name, const char *backtrace) argument
416 svg_cstate(int cpu, u64 start, u64 end, int type) argument
473 svg_pstate(int cpu, u64 start, u64 end, u64 freq) argument
494 svg_partial_wakeline(u64 start, int row1, char *desc1, int row2, char *desc2, const char *backtrace) argument
552 svg_wakeline(u64 start, int row1, int row2, const char *backtrace) argument
581 svg_interrupt(u64 start, int row, const char *backtrace) argument
601 svg_text(int Yslot, u64 start, const char *text) argument
[all...]
/linux-master/lib/
H A Diommu-helper.c10 unsigned long start, unsigned int nr,
19 index = bitmap_find_next_zero_area(map, size, start, nr, align_mask);
22 start = ALIGN(shift + index, boundary_size) - shift;
9 iommu_area_alloc(unsigned long *map, unsigned long size, unsigned long start, unsigned int nr, unsigned long shift, unsigned long boundary_size, unsigned long align_mask) argument

Completed in 428 milliseconds

1234567891011>>