Searched refs:end (Results 451 - 475 of 3764) sorted by path

<<11121314151617181920>>

/linux-master/arch/arm64/kvm/hyp/
H A Dpgtable.c67 const u64 end; member in struct:kvm_pgtable_walk_data
95 if (granule > (ctx->end - ctx->addr))
224 .end = data->end,
290 if (data->addr >= data->end)
307 if (data->addr > limit || data->end > limit)
313 for (idx = kvm_pgd_page_idx(pgt, data->addr); data->addr < data->end; ++idx) {
330 .end = PAGE_ALIGN(walk_data.addr + size),
533 if (ctx->end - ctx->addr < granule)
1061 * a table. Accesses beyond 'end' tha
[all...]
/linux-master/arch/arm64/kvm/
H A Dhypercalls.c174 u32 start, end; local
184 end = start + filter.nr_functions - 1;
186 if (end < start || filter.action >= NR_SMCCC_FILTER_ACTIONS)
202 r = mtree_insert_range(&kvm->arch.smccc_filter, start, end,
H A Dmmu.c34 static phys_addr_t __stage2_range_addr_end(phys_addr_t addr, phys_addr_t end, argument
39 return (boundary - 1 < end - 1) ? boundary : end;
42 static phys_addr_t stage2_range_addr_end(phys_addr_t addr, phys_addr_t end) argument
46 return __stage2_range_addr_end(addr, end, size);
57 phys_addr_t end,
70 next = stage2_range_addr_end(addr, end);
75 if (resched && next != end)
77 } while (addr = next, addr != end);
82 #define stage2_apply_range_resched(mmu, addr, end, f
56 stage2_apply_range(struct kvm_s2_mmu *mmu, phys_addr_t addr, phys_addr_t end, int (*fn)(struct kvm_pgtable *, u64, u64), bool resched) argument
114 kvm_mmu_split_huge_pages(struct kvm *kvm, phys_addr_t addr, phys_addr_t end) argument
323 phys_addr_t end = start + size; local
340 phys_addr_t end = addr + PAGE_SIZE * memslot->npages; local
518 phys_addr_t start, end, cur; local
550 phys_addr_t start, end, cur; local
579 unsigned long end = kern_hyp_va((unsigned long)to); local
1110 stage2_wp_range(struct kvm_s2_mmu *mmu, phys_addr_t addr, phys_addr_t end) argument
1132 phys_addr_t start, end; local
1159 phys_addr_t start, end; local
1191 phys_addr_t end = (base_gfn + __fls(mask) + 1) << PAGE_SHIFT; local
[all...]
H A Dva_layout.c93 int32_t *end = (int32_t *)__hyp_reloc_end; local
95 for (rel = begin; rel < end; ++rel) {
/linux-master/arch/arm64/kvm/vgic/
H A Dvgic-mmio-v3.c305 gpa_t end = rdreg->base + rdreg->count * KVM_VGIC_V3_REDIST_SIZE; local
312 if (iter->base == end && iter->free_index > 0)
865 /* cross the end of memory ? */
/linux-master/arch/arm64/lib/
H A Dcopy_from_user.S55 end .req x5
58 add end, x0, x2
70 9998: sub x0, end, dst // bytes not copied
H A Dcopy_to_user.S54 end .req x5
57 add end, x0, x2
70 9998: sub x0, end, dst // bytes not copied
H A Ddelay.c31 u64 end = start + cycles; local
37 wfit(end);
39 wfet(end);
H A Dstrcmp.S86 L(end):
123 checking to make sure that we don't access beyond the end of SRC2. */
171 cbnz syndrome, L(end)
183 b L(end)
/linux-master/arch/arm64/mm/
H A Dcontpte.c33 * Unfold any partially covered contpte block at the beginning and end
244 unsigned long end; local
259 end = addr + (nr << PAGE_SHIFT);
264 next = pte_cont_addr_end(addr, end);
279 } while (addr != end);
379 unsigned long end = start + nr; local
382 end = ALIGN(end, CONT_PTE_SIZE);
389 __clear_young_dirty_ptes(vma, start, ptep, end - start, flags);
419 * and instead flush the whole range at the end
[all...]
H A Dfixmap.c55 unsigned long end)
66 next = pmd_addr_end(addr, end);
68 } while (pmdp++, addr = next, addr != end);
73 unsigned long end)
81 * We only end up here if the kernel mapping and the fixmap
92 early_fixmap_init_pmd(pudp, addr, end);
104 unsigned long end = FIXADDR_TOP; local
109 early_fixmap_init_pud(p4dp, addr, end);
54 early_fixmap_init_pmd(pud_t *pudp, unsigned long addr, unsigned long end) argument
72 early_fixmap_init_pud(p4d_t *p4dp, unsigned long addr, unsigned long end) argument
H A Dflush.c18 void sync_icache_aliases(unsigned long start, unsigned long end) argument
21 dcache_clean_pou(start, end);
28 caches_clean_inval_pou(start, end);
33 unsigned long end)
36 sync_icache_aliases(start, end);
32 flush_ptrace_access(struct vm_area_struct *vma, unsigned long start, unsigned long end) argument
H A Dinit.c264 * We can only add back the initrd memory if we don't end up
443 * entirely contains the interval [start, end - 1].
445 static u64 __init random_bounding_box(u64 size, u64 start, u64 end) argument
449 if ((end - start) >= size)
452 max_pgoff = (size - (end - start)) / PAGE_SIZE;
526 unsigned long start = 0, end = 0; local
536 end = module_direct_base + SZ_128M;
544 end = module_plt_base + SZ_2G;
551 .end = end,
[all...]
H A Dkasan_init.c116 unsigned long end, int node, bool early)
129 } while (ptep++, addr = next, addr != end && pte_none(__ptep_get(ptep)));
133 unsigned long end, int node, bool early)
139 next = pmd_addr_end(addr, end);
141 } while (pmdp++, addr = next, addr != end && pmd_none(READ_ONCE(*pmdp)));
145 unsigned long end, int node, bool early)
151 next = pud_addr_end(addr, end);
153 } while (pudp++, addr = next, addr != end && pud_none(READ_ONCE(*pudp)));
157 unsigned long end, int node, bool early)
163 next = p4d_addr_end(addr, end);
115 kasan_pte_populate(pmd_t *pmdp, unsigned long addr, unsigned long end, int node, bool early) argument
132 kasan_pmd_populate(pud_t *pudp, unsigned long addr, unsigned long end, int node, bool early) argument
144 kasan_pud_populate(p4d_t *p4dp, unsigned long addr, unsigned long end, int node, bool early) argument
156 kasan_p4d_populate(pgd_t *pgdp, unsigned long addr, unsigned long end, int node, bool early) argument
168 kasan_pgd_populate(unsigned long addr, unsigned long end, int node, bool early) argument
228 kasan_map_populate(unsigned long start, unsigned long end, int node) argument
281 clear_next_level(int pgd_idx, int start, int end) argument
289 clear_shadow(u64 start, u64 end) argument
354 void *end = (void *)__phys_to_virt(pa_end); local
[all...]
H A Dmmu.c167 static void init_pte(pte_t *ptep, unsigned long addr, unsigned long end, argument
175 * are deferred to the end of alloc_init_cont_pte().
187 } while (ptep++, addr += PAGE_SIZE, addr != end);
191 unsigned long end, phys_addr_t phys,
221 next = pte_cont_addr_end(addr, end);
232 } while (addr = next, addr != end);
242 static void init_pmd(pmd_t *pmdp, unsigned long addr, unsigned long end, argument
251 next = pmd_addr_end(addr, end);
272 } while (pmdp++, addr = next, addr != end);
276 unsigned long end, phys_addr_
190 alloc_init_cont_pte(pmd_t *pmdp, unsigned long addr, unsigned long end, phys_addr_t phys, pgprot_t prot, phys_addr_t (*pgtable_alloc)(int), int flags) argument
275 alloc_init_cont_pmd(pud_t *pudp, unsigned long addr, unsigned long end, phys_addr_t phys, pgprot_t prot, phys_addr_t (*pgtable_alloc)(int), int flags) argument
324 alloc_init_pud(p4d_t *p4dp, unsigned long addr, unsigned long end, phys_addr_t phys, pgprot_t prot, phys_addr_t (*pgtable_alloc)(int), int flags) argument
382 alloc_init_p4d(pgd_t *pgdp, unsigned long addr, unsigned long end, phys_addr_t phys, pgprot_t prot, phys_addr_t (*pgtable_alloc)(int), int flags) argument
431 unsigned long addr, end, next; local
550 __map_memblock(pgd_t *pgdp, phys_addr_t start, phys_addr_t end, pgprot_t prot, int flags) argument
626 phys_addr_t start, end; local
784 u64 end = __pa_symbol(__idmap_text_end); local
833 pgtable_range_aligned(unsigned long start, unsigned long end, unsigned long floor, unsigned long ceiling, unsigned long mask) argument
852 unmap_hotplug_pte_range(pmd_t *pmdp, unsigned long addr, unsigned long end, bool free_mapped, struct vmem_altmap *altmap) argument
873 unmap_hotplug_pmd_range(pud_t *pudp, unsigned long addr, unsigned long end, bool free_mapped, struct vmem_altmap *altmap) argument
906 unmap_hotplug_pud_range(p4d_t *p4dp, unsigned long addr, unsigned long end, bool free_mapped, struct vmem_altmap *altmap) argument
939 unmap_hotplug_p4d_range(pgd_t *pgdp, unsigned long addr, unsigned long end, bool free_mapped, struct vmem_altmap *altmap) argument
958 unmap_hotplug_range(unsigned long addr, unsigned long end, bool free_mapped, struct vmem_altmap *altmap) argument
984 free_empty_pte_table(pmd_t *pmdp, unsigned long addr, unsigned long end, unsigned long floor, unsigned long ceiling) argument
1021 free_empty_pmd_table(pud_t *pudp, unsigned long addr, unsigned long end, unsigned long floor, unsigned long ceiling) argument
1061 free_empty_pud_table(p4d_t *p4dp, unsigned long addr, unsigned long end, unsigned long floor, unsigned long ceiling) argument
1101 free_empty_p4d_table(pgd_t *pgdp, unsigned long addr, unsigned long end, unsigned long floor, unsigned long ceiling) argument
1141 free_empty_tables(unsigned long addr, unsigned long end, unsigned long floor, unsigned long ceiling) argument
1173 vmemmap_populate(unsigned long start, unsigned long end, int node, struct vmem_altmap *altmap) argument
1185 vmemmap_free(unsigned long start, unsigned long end, struct vmem_altmap *altmap) argument
1269 unsigned long next, end; local
1295 unsigned long end = start + size; local
1396 unsigned long end = start + (1UL << PA_SECTION_SHIFT); local
1446 phys_addr_t start, end, addr; local
[all...]
H A Dpageattr.c72 unsigned long end = start + size; local
78 end = start + size;
97 end > (unsigned long)kasan_reset_tag(area->addr) + area->size ||
H A Dptdump.c288 unsigned long end = ~0UL; local
292 end = TASK_SIZE_64;
302 {info->base_addr, end},
364 { PAGE_END, "Linear Mapping end" },
367 { KASAN_SHADOW_END, "Kasan shadow end" },
370 { MODULES_END, "Modules end" },
372 { VMALLOC_END, "vmalloc() end" },
374 { VMEMMAP_END, "vmemmap end" },
376 { PCI_IO_END, "PCI I/O end" },
378 { FIXADDR_TOP, "Fixmap end" },
[all...]
H A Dtrans_pgd.c63 pmd_t *src_pmdp, unsigned long start, unsigned long end)
78 } while (dst_ptep++, src_ptep++, addr += PAGE_SIZE, addr != end);
84 pud_t *src_pudp, unsigned long start, unsigned long end)
103 next = pmd_addr_end(addr, end);
113 } while (dst_pmdp++, src_pmdp++, addr = next, addr != end);
120 unsigned long end)
139 next = pud_addr_end(addr, end);
149 } while (dst_pudp++, src_pudp++, addr = next, addr != end);
156 unsigned long end)
166 next = p4d_addr_end(addr, end);
62 copy_pte(struct trans_pgd_info *info, pmd_t *dst_pmdp, pmd_t *src_pmdp, unsigned long start, unsigned long end) argument
83 copy_pmd(struct trans_pgd_info *info, pud_t *dst_pudp, pud_t *src_pudp, unsigned long start, unsigned long end) argument
118 copy_pud(struct trans_pgd_info *info, p4d_t *dst_p4dp, p4d_t *src_p4dp, unsigned long start, unsigned long end) argument
154 copy_p4d(struct trans_pgd_info *info, pgd_t *dst_pgdp, pgd_t *src_pgdp, unsigned long start, unsigned long end) argument
176 copy_page_tables(struct trans_pgd_info *info, pgd_t *dst_pgdp, unsigned long start, unsigned long end) argument
204 trans_pgd_create_copy(struct trans_pgd_info *info, pgd_t **dst_pgdp, unsigned long start, unsigned long end) argument
[all...]
/linux-master/arch/arm64/net/
H A Dbpf_jit_comp.c1615 * - offset[0] offset of the end of prologue,
1617 * - offset[1] - offset of the end of 1st instruction,
1620 * - offset[3] - offset of the end of 3rd instruction,
1642 * instruction (end of program)
1674 static inline void bpf_flush_icache(void *start, void *end) argument
1676 flush_icache_range((unsigned long)start, (unsigned long)end);
2371 * dummy_tramp is placed at the end:
2450 /* plt locates at the end of bpf prog */
/linux-master/arch/csky/abiv1/
H A Dcacheflush.c69 unsigned long end)
68 flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) argument
/linux-master/arch/csky/abiv1/inc/abi/
H A Dcacheflush.h41 * if (current_mm != vma->mm) cache_wbinv_range(start, end) will be broken.
44 extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
45 #define flush_cache_vmap(start, end) cache_wbinv_all()
46 #define flush_cache_vmap_early(start, end) do { } while (0)
47 #define flush_cache_vunmap(start, end) cache_wbinv_all()
49 #define flush_icache_range(start, end) cache_wbinv_range(start, end)
50 #define flush_icache_mm_range(mm, start, end) cache_wbinv_range(start, end)
/linux-master/arch/csky/abiv2/
H A Dcacheflush.c56 unsigned long start, unsigned long end)
65 icache_inv_range(start, end);
55 flush_icache_mm_range(struct mm_struct *mm, unsigned long start, unsigned long end) argument
/linux-master/arch/csky/abiv2/inc/abi/
H A Dcacheflush.h16 #define flush_cache_range(vma, start, end) do { } while (0)
37 #define flush_icache_range(start, end) cache_wbinv_range(start, end)
40 unsigned long start, unsigned long end);
43 #define flush_cache_vmap(start, end) do { } while (0)
44 #define flush_cache_vmap_early(start, end) do { } while (0)
45 #define flush_cache_vunmap(start, end) do { } while (0)
/linux-master/arch/csky/include/asm/
H A Dcache.h17 void icache_inv_range(unsigned long start, unsigned long end);
21 void dcache_wb_range(unsigned long start, unsigned long end);
24 void cache_wbinv_range(unsigned long start, unsigned long end);
27 void dma_wbinv_range(unsigned long start, unsigned long end);
28 void dma_inv_range(unsigned long start, unsigned long end);
29 void dma_wb_range(unsigned long start, unsigned long end);
H A Dfixmap.h29 extern void fixrange_init(unsigned long start, unsigned long end,

Completed in 418 milliseconds

<<11121314151617181920>>