Lines Matching refs:vmlinux

114 	return vmlinux.default_lma + vmlinux.image_size + vmlinux.bss_size;
136 if (__boot_data_end - __boot_data_start != vmlinux.bootdata_size)
138 memcpy((void *)vmlinux.bootdata_off, __boot_data_start, vmlinux.bootdata_size);
139 if (__boot_data_preserved_end - __boot_data_preserved_start != vmlinux.bootdata_preserved_size)
141 memcpy((void *)vmlinux.bootdata_preserved_off, __boot_data_preserved_start, vmlinux.bootdata_preserved_size);
152 rela_start = (Elf64_Rela *) vmlinux.rela_dyn_start;
153 rela_end = (Elf64_Rela *) vmlinux.rela_dyn_end;
154 dynsym = (Elf64_Sym *) vmlinux.dynsym_start;
219 for (entry = (u64 *)vmlinux.got_start; entry < (u64 *)vmlinux.got_end; entry++)
334 memset((void *)vmlinux_lma + vmlinux.image_size, 0, vmlinux.bss_size);
353 *(unsigned long *)(&vmlinux.entry) += offset;
354 vmlinux.bootdata_off += offset;
355 vmlinux.bootdata_preserved_off += offset;
357 vmlinux.rela_dyn_start += offset;
358 vmlinux.rela_dyn_end += offset;
359 vmlinux.dynsym_start += offset;
361 vmlinux.got_start += offset;
362 vmlinux.got_end += offset;
364 vmlinux.init_mm_off += offset;
365 vmlinux.swapper_pg_dir_off += offset;
366 vmlinux.invalid_pg_dir_off += offset;
368 vmlinux.kasan_early_shadow_page_off += offset;
369 vmlinux.kasan_early_shadow_pte_off += offset;
370 vmlinux.kasan_early_shadow_pmd_off += offset;
371 vmlinux.kasan_early_shadow_pud_off += offset;
372 vmlinux.kasan_early_shadow_p4d_off += offset;
421 vmlinux_lma = randomize_within_range(vmlinux.image_size + vmlinux.bss_size,
422 THREAD_SIZE, vmlinux.default_lma,
425 __kaslr_offset = vmlinux_lma - vmlinux.default_lma;
429 vmlinux_lma = vmlinux_lma ?: vmlinux.default_lma;
430 physmem_reserve(RR_VMLINUX, vmlinux_lma, vmlinux.image_size + vmlinux.bss_size);
434 memmove((void *)vmlinux_lma, img, vmlinux.image_size);
436 img = (void *)vmlinux.default_lma;
437 memmove((void *)vmlinux_lma, img, vmlinux.image_size);
438 memset(img, 0, vmlinux.image_size);
441 /* vmlinux decompression is done, shrink reserved low memory */
444 amode31_lma = randomize_within_range(vmlinux.amode31_size, PAGE_SIZE, 0, SZ_2G);
445 amode31_lma = amode31_lma ?: vmlinux.default_lma - vmlinux.amode31_size;
446 physmem_reserve(RR_AMODE31, amode31_lma, vmlinux.amode31_size);
462 kaslr_adjust_relocs(vmlinux_lma, vmlinux_lma + vmlinux.image_size, __kaslr_offset);
477 psw.addr = vmlinux.entry;