/linux-master/arch/arm64/include/asm/ |
H A D | trans_pgd.h | 31 unsigned long start, unsigned long end);
|
/linux-master/arch/arm64/kernel/ |
H A D | acpi.c | 289 u64 end = md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT); local 291 if (phys < md->phys_addr || phys >= end) 294 if (phys + size > end) {
|
H A D | acpi_numa.c | 49 const unsigned long end) 48 acpi_parse_gicc_pxm(union acpi_subtable_headers *header, const unsigned long end) argument
|
H A D | alternative.c | 37 struct alt_instr *end; member in struct:alt_region 124 static noinstr void clean_dcache_range_nopatch(u64 start, u64 end) argument 139 } while (cur += d_size, cur < end); 150 for (alt = region->begin; alt < region->end; alt++) { 216 .end = (void *)hdr + alt->sh_offset + alt->sh_size, 224 .end = (struct alt_instr *)__alt_instructions_end, 284 .end = start + length,
|
H A D | elfcore.c | 23 /* Derived from dump_user_range(); start/end must be page-aligned */ 103 phdr.p_memsz = m->end - m->start;
|
H A D | machine_kexec.c | 294 if (!crashk_res.end) 299 if ((addr < crashk_res.start) || (crashk_res.end < addr)) { 300 if (!crashk_low_res.end) 303 if ((addr < crashk_low_res.start) || (crashk_low_res.end < addr)) 320 void crash_free_reserved_phys_range(unsigned long begin, unsigned long end) argument 325 for (addr = begin; addr < end; addr += PAGE_SIZE) {
|
H A D | machine_kexec_file.c | 49 phys_addr_t start, end; local 52 for_each_mem_range(i, &start, &end) 61 for_each_mem_range(i, &start, &end) { 63 cmem->ranges[cmem->nr_ranges].end = end - 1; 68 ret = crash_exclude_mem_range(cmem, crashk_res.start, crashk_res.end); 72 if (crashk_low_res.end) { 73 ret = crash_exclude_mem_range(cmem, crashk_low_res.start, crashk_low_res.end);
|
H A D | mte.c | 433 /* limit access to the end of the page */ 584 const char __user *end = uaddr + size; local 593 while (uaddr < end) { 600 return end - uaddr;
|
H A D | psci.c | 83 unsigned long start, end; local 89 * potentially end up declaring this cpu undead 94 end = start + msecs_to_jiffies(100); 104 } while (time_before(jiffies, end));
|
H A D | ptrace.c | 818 unsigned long start, end; local 838 end = SVE_PT_SVE_FFR_OFFSET(vq) + SVE_PT_SVE_FFR_SIZE(vq); 839 membuf_write(&to, target->thread.sve_state, end - start); 841 start = end; 842 end = SVE_PT_SVE_FPSR_OFFSET(vq); 843 membuf_zero(&to, end - start); 849 start = end; 850 end = SVE_PT_SVE_FPCR_OFFSET(vq) + SVE_PT_SVE_FPCR_SIZE; 852 end - start); 854 start = end; 882 unsigned long start, end; local 1054 unsigned long start, end; local 1105 unsigned long start, end; local [all...] |
H A D | setup.c | 70 .end = 0, 76 .end = 0, 188 * or else we could end-up printing non-initialized data, etc. 213 kernel_code.end = __pa_symbol(__init_begin - 1); 215 kernel_data.end = __pa_symbol(_end - 1); 231 res->end = __pfn_to_phys(memblock_region_reserved_end_pfn(region)) - 1; 236 res->end = __pfn_to_phys(memblock_region_memory_end_pfn(region)) - 1; 255 resource_size_t start, end; local 258 end = min(PFN_PHYS(PFN_UP(r_end)) - 1, mem->end); [all...] |
H A D | signal.c | 635 struct _aarch64_ctx const __user *end; local 751 end = (struct _aarch64_ctx const __user *)userp; 754 __get_user_error(end_magic, &end->magic, err); 755 __get_user_error(end_size, &end->size, err); 1065 struct _aarch64_ctx __user *end; local 1072 end = (struct _aarch64_ctx __user *)userp; 1089 __put_user_error(0, &end->magic, err); 1090 __put_user_error(0, &end->size, err); 1093 /* set the "end" magic */ 1095 struct _aarch64_ctx __user *end local [all...] |
H A D | smp.c | 584 const unsigned long end) 589 if (BAD_MADT_GICC_ENTRY(processor, end)) 583 acpi_parse_gic_cpu_interface(union acpi_subtable_headers *header, const unsigned long end) argument
|
H A D | sys_compat.c | 24 __do_compat_cache_op(unsigned long start, unsigned long end) argument 29 unsigned long chunk = min(PAGE_SIZE, end - start); 49 } while (start < end); 55 do_compat_cache_op(unsigned long start, unsigned long end, int flags) argument 57 if (end < start || flags) 60 if (!access_ok((const void __user *)start, end - start)) 63 return __do_compat_cache_op(start, end);
|
/linux-master/arch/arm64/kernel/pi/ |
H A D | map_kernel.c | 24 void *start, void *end, pgprot_t prot, 28 ((u64)end + va_offset) & ~PAGE_OFFSET, (u64)start, 33 void *end, int root_level) 35 map_segment(pg_dir, NULL, va_offset, start, end, __pgprot(0), 23 map_segment(pgd_t *pg_dir, u64 *pgd, u64 va_offset, void *start, void *end, pgprot_t prot, bool may_use_cont, int root_level) argument 32 unmap_segment(pgd_t *pg_dir, u64 va_offset, void *start, void *end, int root_level) argument
|
H A D | map_range.c | 20 * @end: Virtual address of the end of the range (exclusive) 29 void __init map_range(u64 *pte, u64 start, u64 end, u64 pa, pgprot_t prot, argument 50 while (start < end) { 51 u64 next = min((start | lmask) + 1, PAGE_ALIGN(end)); 78 if ((end & ~cmask) <= start)
|
H A D | pi.h | 31 void map_range(u64 *pgd, u64 start, u64 end, u64 pa, pgprot_t prot,
|
/linux-master/arch/arm64/kernel/vdso/ |
H A D | vdso.lds.S | 71 PROVIDE(end = .);
|
/linux-master/arch/arm64/kvm/ |
H A D | emulate-nested.c | 455 const u32 end; member in struct:encoding_to_trap_config 463 .end = sr_end, \ 1023 .end = sr, \ 1745 sys_reg_Op0(tc->end), sys_reg_Op1(tc->end), 1746 sys_reg_CRn(tc->end), sys_reg_CRm(tc->end), 1747 sys_reg_Op2(tc->end), 1791 for (u32 enc = cgt->encoding; enc <= cgt->end; enc = encoding_next(enc)) {
|
/linux-master/arch/arm64/kvm/hyp/ |
H A D | hyp-entry.S | 144 .macro check_preamble_length start, end 146 .if ((\end-\start) != KVM_VECTOR_PREAMBLE)
|
/linux-master/arch/arm64/kvm/hyp/include/hyp/ |
H A D | switch.h | 694 struct kvm_exception_table_entry *entry, *end; local 698 end = &__stop___kvm_ex_table; 700 while (entry < end) {
|
/linux-master/arch/arm64/kvm/hyp/nvhe/ |
H A D | early_alloc.c | 16 static unsigned long end; variable 32 if (end - cur < size) 52 end = base + size;
|
H A D | mem_protect.c | 136 static bool host_stage2_force_pte_cb(u64 addr, u64 end, enum kvm_pgtable_prot prot); 164 static bool guest_stage2_force_pte_cb(u64 addr, u64 end, argument 345 u64 end; member in struct:kvm_mem_range 352 phys_addr_t end; local 355 range->end = ULONG_MAX; 361 end = reg->base + reg->size; 364 range->end = reg->base; 365 } else if (addr >= end) { 367 range->start = end; 370 range->end 400 range_is_memory(u64 start, u64 end) argument 410 __host_stage2_idmap(u64 start, u64 end, enum kvm_pgtable_prot prot) argument 486 host_stage2_force_pte_cb(u64 addr, u64 end, enum kvm_pgtable_prot prot) argument 806 void *start = (void *)addr, *end = start + (tx->nr_pages * PAGE_SIZE); local 824 void *start = (void *)addr, *end = start + (tx->nr_pages * PAGE_SIZE); local 1225 u64 end = PAGE_ALIGN((u64)to); local 1255 u64 end = PAGE_ALIGN((u64)to); local [all...] |
H A D | mm.c | 116 unsigned long end = (unsigned long)to; local 123 end = PAGE_ALIGN(end); 125 for (virt_addr = start; virt_addr < end; virt_addr += PAGE_SIZE) { 151 unsigned long i, start, size, end = 0; local 159 * memblock may already be backed by the page backing the end 162 start = max(start, end); 164 end = hyp_memory[i].base + hyp_memory[i].size; 165 end = PAGE_ALIGN((u64)hyp_phys_to_page(end)); 330 unsigned long start, end; local [all...] |
H A D | setup.c | 74 void *start, *end, *virt = hyp_phys_to_virt(phys); local 118 end = start + PAGE_ALIGN(hyp_percpu_size); 119 ret = pkvm_create_mappings(start, end, PAGE_HYP);
|