1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * efi.c - EFI subsystem
4 *
5 * Copyright (C) 2001,2003,2004 Dell <Matt_Domsch@dell.com>
6 * Copyright (C) 2004 Intel Corporation <matthew.e.tolentino@intel.com>
7 * Copyright (C) 2013 Tom Gundersen <teg@jklm.no>
8 *
9 * This code registers /sys/firmware/efi{,/efivars} when EFI is supported,
10 * allowing the efivarfs to be mounted or the efivars module to be loaded.
11 * The existance of /sys/firmware/efi may also be used by userspace to
12 * determine that the system supports EFI.
13 */
14
15#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
16
17#include <linux/kobject.h>
18#include <linux/module.h>
19#include <linux/init.h>
20#include <linux/debugfs.h>
21#include <linux/device.h>
22#include <linux/efi.h>
23#include <linux/of.h>
24#include <linux/initrd.h>
25#include <linux/io.h>
26#include <linux/kexec.h>
27#include <linux/platform_device.h>
28#include <linux/random.h>
29#include <linux/reboot.h>
30#include <linux/slab.h>
31#include <linux/acpi.h>
32#include <linux/ucs2_string.h>
33#include <linux/memblock.h>
34#include <linux/security.h>
35#include <linux/notifier.h>
36
37#include <asm/early_ioremap.h>
38
39struct efi __read_mostly efi = {
40	.runtime_supported_mask = EFI_RT_SUPPORTED_ALL,
41	.acpi			= EFI_INVALID_TABLE_ADDR,
42	.acpi20			= EFI_INVALID_TABLE_ADDR,
43	.smbios			= EFI_INVALID_TABLE_ADDR,
44	.smbios3		= EFI_INVALID_TABLE_ADDR,
45	.esrt			= EFI_INVALID_TABLE_ADDR,
46	.tpm_log		= EFI_INVALID_TABLE_ADDR,
47	.tpm_final_log		= EFI_INVALID_TABLE_ADDR,
48#ifdef CONFIG_LOAD_UEFI_KEYS
49	.mokvar_table		= EFI_INVALID_TABLE_ADDR,
50#endif
51#ifdef CONFIG_EFI_COCO_SECRET
52	.coco_secret		= EFI_INVALID_TABLE_ADDR,
53#endif
54#ifdef CONFIG_UNACCEPTED_MEMORY
55	.unaccepted		= EFI_INVALID_TABLE_ADDR,
56#endif
57};
58EXPORT_SYMBOL(efi);
59
60unsigned long __ro_after_init efi_rng_seed = EFI_INVALID_TABLE_ADDR;
61static unsigned long __initdata mem_reserve = EFI_INVALID_TABLE_ADDR;
62static unsigned long __initdata rt_prop = EFI_INVALID_TABLE_ADDR;
63static unsigned long __initdata initrd = EFI_INVALID_TABLE_ADDR;
64
65extern unsigned long screen_info_table;
66
67struct mm_struct efi_mm = {
68	.mm_mt			= MTREE_INIT_EXT(mm_mt, MM_MT_FLAGS, efi_mm.mmap_lock),
69	.mm_users		= ATOMIC_INIT(2),
70	.mm_count		= ATOMIC_INIT(1),
71	.write_protect_seq      = SEQCNT_ZERO(efi_mm.write_protect_seq),
72	MMAP_LOCK_INITIALIZER(efi_mm)
73	.page_table_lock	= __SPIN_LOCK_UNLOCKED(efi_mm.page_table_lock),
74	.mmlist			= LIST_HEAD_INIT(efi_mm.mmlist),
75	.cpu_bitmap		= { [BITS_TO_LONGS(NR_CPUS)] = 0},
76};
77
78struct workqueue_struct *efi_rts_wq;
79
80static bool disable_runtime = IS_ENABLED(CONFIG_EFI_DISABLE_RUNTIME);
81static int __init setup_noefi(char *arg)
82{
83	disable_runtime = true;
84	return 0;
85}
86early_param("noefi", setup_noefi);
87
88bool efi_runtime_disabled(void)
89{
90	return disable_runtime;
91}
92
93bool __pure __efi_soft_reserve_enabled(void)
94{
95	return !efi_enabled(EFI_MEM_NO_SOFT_RESERVE);
96}
97
98static int __init parse_efi_cmdline(char *str)
99{
100	if (!str) {
101		pr_warn("need at least one option\n");
102		return -EINVAL;
103	}
104
105	if (parse_option_str(str, "debug"))
106		set_bit(EFI_DBG, &efi.flags);
107
108	if (parse_option_str(str, "noruntime"))
109		disable_runtime = true;
110
111	if (parse_option_str(str, "runtime"))
112		disable_runtime = false;
113
114	if (parse_option_str(str, "nosoftreserve"))
115		set_bit(EFI_MEM_NO_SOFT_RESERVE, &efi.flags);
116
117	return 0;
118}
119early_param("efi", parse_efi_cmdline);
120
121struct kobject *efi_kobj;
122
123/*
124 * Let's not leave out systab information that snuck into
125 * the efivars driver
126 * Note, do not add more fields in systab sysfs file as it breaks sysfs
127 * one value per file rule!
128 */
129static ssize_t systab_show(struct kobject *kobj,
130			   struct kobj_attribute *attr, char *buf)
131{
132	char *str = buf;
133
134	if (!kobj || !buf)
135		return -EINVAL;
136
137	if (efi.acpi20 != EFI_INVALID_TABLE_ADDR)
138		str += sprintf(str, "ACPI20=0x%lx\n", efi.acpi20);
139	if (efi.acpi != EFI_INVALID_TABLE_ADDR)
140		str += sprintf(str, "ACPI=0x%lx\n", efi.acpi);
141	/*
142	 * If both SMBIOS and SMBIOS3 entry points are implemented, the
143	 * SMBIOS3 entry point shall be preferred, so we list it first to
144	 * let applications stop parsing after the first match.
145	 */
146	if (efi.smbios3 != EFI_INVALID_TABLE_ADDR)
147		str += sprintf(str, "SMBIOS3=0x%lx\n", efi.smbios3);
148	if (efi.smbios != EFI_INVALID_TABLE_ADDR)
149		str += sprintf(str, "SMBIOS=0x%lx\n", efi.smbios);
150
151	if (IS_ENABLED(CONFIG_X86))
152		str = efi_systab_show_arch(str);
153
154	return str - buf;
155}
156
157static struct kobj_attribute efi_attr_systab = __ATTR_RO_MODE(systab, 0400);
158
159static ssize_t fw_platform_size_show(struct kobject *kobj,
160				     struct kobj_attribute *attr, char *buf)
161{
162	return sprintf(buf, "%d\n", efi_enabled(EFI_64BIT) ? 64 : 32);
163}
164
165extern __weak struct kobj_attribute efi_attr_fw_vendor;
166extern __weak struct kobj_attribute efi_attr_runtime;
167extern __weak struct kobj_attribute efi_attr_config_table;
168static struct kobj_attribute efi_attr_fw_platform_size =
169	__ATTR_RO(fw_platform_size);
170
171static struct attribute *efi_subsys_attrs[] = {
172	&efi_attr_systab.attr,
173	&efi_attr_fw_platform_size.attr,
174	&efi_attr_fw_vendor.attr,
175	&efi_attr_runtime.attr,
176	&efi_attr_config_table.attr,
177	NULL,
178};
179
180umode_t __weak efi_attr_is_visible(struct kobject *kobj, struct attribute *attr,
181				   int n)
182{
183	return attr->mode;
184}
185
186static const struct attribute_group efi_subsys_attr_group = {
187	.attrs = efi_subsys_attrs,
188	.is_visible = efi_attr_is_visible,
189};
190
191struct blocking_notifier_head efivar_ops_nh;
192EXPORT_SYMBOL_GPL(efivar_ops_nh);
193
194static struct efivars generic_efivars;
195static struct efivar_operations generic_ops;
196
197static bool generic_ops_supported(void)
198{
199	unsigned long name_size;
200	efi_status_t status;
201	efi_char16_t name;
202	efi_guid_t guid;
203
204	name_size = sizeof(name);
205
206	if (!efi.get_next_variable)
207		return false;
208	status = efi.get_next_variable(&name_size, &name, &guid);
209	if (status == EFI_UNSUPPORTED)
210		return false;
211
212	return true;
213}
214
215static int generic_ops_register(void)
216{
217	if (!generic_ops_supported())
218		return 0;
219
220	generic_ops.get_variable = efi.get_variable;
221	generic_ops.get_next_variable = efi.get_next_variable;
222	generic_ops.query_variable_store = efi_query_variable_store;
223	generic_ops.query_variable_info = efi.query_variable_info;
224
225	if (efi_rt_services_supported(EFI_RT_SUPPORTED_SET_VARIABLE)) {
226		generic_ops.set_variable = efi.set_variable;
227		generic_ops.set_variable_nonblocking = efi.set_variable_nonblocking;
228	}
229	return efivars_register(&generic_efivars, &generic_ops);
230}
231
232static void generic_ops_unregister(void)
233{
234	if (!generic_ops.get_variable)
235		return;
236
237	efivars_unregister(&generic_efivars);
238}
239
240void efivars_generic_ops_register(void)
241{
242	generic_ops_register();
243}
244EXPORT_SYMBOL_GPL(efivars_generic_ops_register);
245
246void efivars_generic_ops_unregister(void)
247{
248	generic_ops_unregister();
249}
250EXPORT_SYMBOL_GPL(efivars_generic_ops_unregister);
251
252#ifdef CONFIG_EFI_CUSTOM_SSDT_OVERLAYS
253#define EFIVAR_SSDT_NAME_MAX	16UL
254static char efivar_ssdt[EFIVAR_SSDT_NAME_MAX] __initdata;
255static int __init efivar_ssdt_setup(char *str)
256{
257	int ret = security_locked_down(LOCKDOWN_ACPI_TABLES);
258
259	if (ret)
260		return ret;
261
262	if (strlen(str) < sizeof(efivar_ssdt))
263		memcpy(efivar_ssdt, str, strlen(str));
264	else
265		pr_warn("efivar_ssdt: name too long: %s\n", str);
266	return 1;
267}
268__setup("efivar_ssdt=", efivar_ssdt_setup);
269
270static __init int efivar_ssdt_load(void)
271{
272	unsigned long name_size = 256;
273	efi_char16_t *name = NULL;
274	efi_status_t status;
275	efi_guid_t guid;
276
277	if (!efivar_ssdt[0])
278		return 0;
279
280	name = kzalloc(name_size, GFP_KERNEL);
281	if (!name)
282		return -ENOMEM;
283
284	for (;;) {
285		char utf8_name[EFIVAR_SSDT_NAME_MAX];
286		unsigned long data_size = 0;
287		void *data;
288		int limit;
289
290		status = efi.get_next_variable(&name_size, name, &guid);
291		if (status == EFI_NOT_FOUND) {
292			break;
293		} else if (status == EFI_BUFFER_TOO_SMALL) {
294			efi_char16_t *name_tmp =
295				krealloc(name, name_size, GFP_KERNEL);
296			if (!name_tmp) {
297				kfree(name);
298				return -ENOMEM;
299			}
300			name = name_tmp;
301			continue;
302		}
303
304		limit = min(EFIVAR_SSDT_NAME_MAX, name_size);
305		ucs2_as_utf8(utf8_name, name, limit - 1);
306		if (strncmp(utf8_name, efivar_ssdt, limit) != 0)
307			continue;
308
309		pr_info("loading SSDT from variable %s-%pUl\n", efivar_ssdt, &guid);
310
311		status = efi.get_variable(name, &guid, NULL, &data_size, NULL);
312		if (status != EFI_BUFFER_TOO_SMALL || !data_size)
313			return -EIO;
314
315		data = kmalloc(data_size, GFP_KERNEL);
316		if (!data)
317			return -ENOMEM;
318
319		status = efi.get_variable(name, &guid, NULL, &data_size, data);
320		if (status == EFI_SUCCESS) {
321			acpi_status ret = acpi_load_table(data, NULL);
322			if (ret)
323				pr_err("failed to load table: %u\n", ret);
324			else
325				continue;
326		} else {
327			pr_err("failed to get var data: 0x%lx\n", status);
328		}
329		kfree(data);
330	}
331	return 0;
332}
333#else
334static inline int efivar_ssdt_load(void) { return 0; }
335#endif
336
337#ifdef CONFIG_DEBUG_FS
338
339#define EFI_DEBUGFS_MAX_BLOBS 32
340
341static struct debugfs_blob_wrapper debugfs_blob[EFI_DEBUGFS_MAX_BLOBS];
342
343static void __init efi_debugfs_init(void)
344{
345	struct dentry *efi_debugfs;
346	efi_memory_desc_t *md;
347	char name[32];
348	int type_count[EFI_BOOT_SERVICES_DATA + 1] = {};
349	int i = 0;
350
351	efi_debugfs = debugfs_create_dir("efi", NULL);
352	if (IS_ERR_OR_NULL(efi_debugfs))
353		return;
354
355	for_each_efi_memory_desc(md) {
356		switch (md->type) {
357		case EFI_BOOT_SERVICES_CODE:
358			snprintf(name, sizeof(name), "boot_services_code%d",
359				 type_count[md->type]++);
360			break;
361		case EFI_BOOT_SERVICES_DATA:
362			snprintf(name, sizeof(name), "boot_services_data%d",
363				 type_count[md->type]++);
364			break;
365		default:
366			continue;
367		}
368
369		if (i >= EFI_DEBUGFS_MAX_BLOBS) {
370			pr_warn("More then %d EFI boot service segments, only showing first %d in debugfs\n",
371				EFI_DEBUGFS_MAX_BLOBS, EFI_DEBUGFS_MAX_BLOBS);
372			break;
373		}
374
375		debugfs_blob[i].size = md->num_pages << EFI_PAGE_SHIFT;
376		debugfs_blob[i].data = memremap(md->phys_addr,
377						debugfs_blob[i].size,
378						MEMREMAP_WB);
379		if (!debugfs_blob[i].data)
380			continue;
381
382		debugfs_create_blob(name, 0400, efi_debugfs, &debugfs_blob[i]);
383		i++;
384	}
385}
386#else
387static inline void efi_debugfs_init(void) {}
388#endif
389
390/*
391 * We register the efi subsystem with the firmware subsystem and the
392 * efivars subsystem with the efi subsystem, if the system was booted with
393 * EFI.
394 */
395static int __init efisubsys_init(void)
396{
397	int error;
398
399	if (!efi_enabled(EFI_RUNTIME_SERVICES))
400		efi.runtime_supported_mask = 0;
401
402	if (!efi_enabled(EFI_BOOT))
403		return 0;
404
405	if (efi.runtime_supported_mask) {
406		/*
407		 * Since we process only one efi_runtime_service() at a time, an
408		 * ordered workqueue (which creates only one execution context)
409		 * should suffice for all our needs.
410		 */
411		efi_rts_wq = alloc_ordered_workqueue("efi_rts_wq", 0);
412		if (!efi_rts_wq) {
413			pr_err("Creating efi_rts_wq failed, EFI runtime services disabled.\n");
414			clear_bit(EFI_RUNTIME_SERVICES, &efi.flags);
415			efi.runtime_supported_mask = 0;
416			return 0;
417		}
418	}
419
420	if (efi_rt_services_supported(EFI_RT_SUPPORTED_TIME_SERVICES))
421		platform_device_register_simple("rtc-efi", 0, NULL, 0);
422
423	/* We register the efi directory at /sys/firmware/efi */
424	efi_kobj = kobject_create_and_add("efi", firmware_kobj);
425	if (!efi_kobj) {
426		pr_err("efi: Firmware registration failed.\n");
427		error = -ENOMEM;
428		goto err_destroy_wq;
429	}
430
431	if (efi_rt_services_supported(EFI_RT_SUPPORTED_GET_VARIABLE |
432				      EFI_RT_SUPPORTED_GET_NEXT_VARIABLE_NAME)) {
433		error = generic_ops_register();
434		if (error)
435			goto err_put;
436		efivar_ssdt_load();
437		platform_device_register_simple("efivars", 0, NULL, 0);
438	}
439
440	BLOCKING_INIT_NOTIFIER_HEAD(&efivar_ops_nh);
441
442	error = sysfs_create_group(efi_kobj, &efi_subsys_attr_group);
443	if (error) {
444		pr_err("efi: Sysfs attribute export failed with error %d.\n",
445		       error);
446		goto err_unregister;
447	}
448
449	/* and the standard mountpoint for efivarfs */
450	error = sysfs_create_mount_point(efi_kobj, "efivars");
451	if (error) {
452		pr_err("efivars: Subsystem registration failed.\n");
453		goto err_remove_group;
454	}
455
456	if (efi_enabled(EFI_DBG) && efi_enabled(EFI_PRESERVE_BS_REGIONS))
457		efi_debugfs_init();
458
459#ifdef CONFIG_EFI_COCO_SECRET
460	if (efi.coco_secret != EFI_INVALID_TABLE_ADDR)
461		platform_device_register_simple("efi_secret", 0, NULL, 0);
462#endif
463
464	return 0;
465
466err_remove_group:
467	sysfs_remove_group(efi_kobj, &efi_subsys_attr_group);
468err_unregister:
469	if (efi_rt_services_supported(EFI_RT_SUPPORTED_GET_VARIABLE |
470				      EFI_RT_SUPPORTED_GET_NEXT_VARIABLE_NAME))
471		generic_ops_unregister();
472err_put:
473	kobject_put(efi_kobj);
474	efi_kobj = NULL;
475err_destroy_wq:
476	if (efi_rts_wq)
477		destroy_workqueue(efi_rts_wq);
478
479	return error;
480}
481
482subsys_initcall(efisubsys_init);
483
484void __init efi_find_mirror(void)
485{
486	efi_memory_desc_t *md;
487	u64 mirror_size = 0, total_size = 0;
488
489	if (!efi_enabled(EFI_MEMMAP))
490		return;
491
492	for_each_efi_memory_desc(md) {
493		unsigned long long start = md->phys_addr;
494		unsigned long long size = md->num_pages << EFI_PAGE_SHIFT;
495
496		total_size += size;
497		if (md->attribute & EFI_MEMORY_MORE_RELIABLE) {
498			memblock_mark_mirror(start, size);
499			mirror_size += size;
500		}
501	}
502	if (mirror_size)
503		pr_info("Memory: %lldM/%lldM mirrored memory\n",
504			mirror_size>>20, total_size>>20);
505}
506
507/*
508 * Find the efi memory descriptor for a given physical address.  Given a
509 * physical address, determine if it exists within an EFI Memory Map entry,
510 * and if so, populate the supplied memory descriptor with the appropriate
511 * data.
512 */
513int __efi_mem_desc_lookup(u64 phys_addr, efi_memory_desc_t *out_md)
514{
515	efi_memory_desc_t *md;
516
517	if (!efi_enabled(EFI_MEMMAP)) {
518		pr_err_once("EFI_MEMMAP is not enabled.\n");
519		return -EINVAL;
520	}
521
522	if (!out_md) {
523		pr_err_once("out_md is null.\n");
524		return -EINVAL;
525        }
526
527	for_each_efi_memory_desc(md) {
528		u64 size;
529		u64 end;
530
531		/* skip bogus entries (including empty ones) */
532		if ((md->phys_addr & (EFI_PAGE_SIZE - 1)) ||
533		    (md->num_pages <= 0) ||
534		    (md->num_pages > (U64_MAX - md->phys_addr) >> EFI_PAGE_SHIFT))
535			continue;
536
537		size = md->num_pages << EFI_PAGE_SHIFT;
538		end = md->phys_addr + size;
539		if (phys_addr >= md->phys_addr && phys_addr < end) {
540			memcpy(out_md, md, sizeof(*out_md));
541			return 0;
542		}
543	}
544	return -ENOENT;
545}
546
547extern int efi_mem_desc_lookup(u64 phys_addr, efi_memory_desc_t *out_md)
548	__weak __alias(__efi_mem_desc_lookup);
549
550/*
551 * Calculate the highest address of an efi memory descriptor.
552 */
553u64 __init efi_mem_desc_end(efi_memory_desc_t *md)
554{
555	u64 size = md->num_pages << EFI_PAGE_SHIFT;
556	u64 end = md->phys_addr + size;
557	return end;
558}
559
560void __init __weak efi_arch_mem_reserve(phys_addr_t addr, u64 size) {}
561
562/**
563 * efi_mem_reserve - Reserve an EFI memory region
564 * @addr: Physical address to reserve
565 * @size: Size of reservation
566 *
567 * Mark a region as reserved from general kernel allocation and
568 * prevent it being released by efi_free_boot_services().
569 *
570 * This function should be called drivers once they've parsed EFI
571 * configuration tables to figure out where their data lives, e.g.
572 * efi_esrt_init().
573 */
574void __init efi_mem_reserve(phys_addr_t addr, u64 size)
575{
576	/* efi_mem_reserve() does not work under Xen */
577	if (WARN_ON_ONCE(efi_enabled(EFI_PARAVIRT)))
578		return;
579
580	if (!memblock_is_region_reserved(addr, size))
581		memblock_reserve(addr, size);
582
583	/*
584	 * Some architectures (x86) reserve all boot services ranges
585	 * until efi_free_boot_services() because of buggy firmware
586	 * implementations. This means the above memblock_reserve() is
587	 * superfluous on x86 and instead what it needs to do is
588	 * ensure the @start, @size is not freed.
589	 */
590	efi_arch_mem_reserve(addr, size);
591}
592
593static const efi_config_table_type_t common_tables[] __initconst = {
594	{ACPI_20_TABLE_GUID,			&efi.acpi20,		"ACPI 2.0"	},
595	{ACPI_TABLE_GUID,			&efi.acpi,		"ACPI"		},
596	{SMBIOS_TABLE_GUID,			&efi.smbios,		"SMBIOS"	},
597	{SMBIOS3_TABLE_GUID,			&efi.smbios3,		"SMBIOS 3.0"	},
598	{EFI_SYSTEM_RESOURCE_TABLE_GUID,	&efi.esrt,		"ESRT"		},
599	{EFI_MEMORY_ATTRIBUTES_TABLE_GUID,	&efi_mem_attr_table,	"MEMATTR"	},
600	{LINUX_EFI_RANDOM_SEED_TABLE_GUID,	&efi_rng_seed,		"RNG"		},
601	{LINUX_EFI_TPM_EVENT_LOG_GUID,		&efi.tpm_log,		"TPMEventLog"	},
602	{EFI_TCG2_FINAL_EVENTS_TABLE_GUID,	&efi.tpm_final_log,	"TPMFinalLog"	},
603	{EFI_CC_FINAL_EVENTS_TABLE_GUID,	&efi.tpm_final_log,	"CCFinalLog"	},
604	{LINUX_EFI_MEMRESERVE_TABLE_GUID,	&mem_reserve,		"MEMRESERVE"	},
605	{LINUX_EFI_INITRD_MEDIA_GUID,		&initrd,		"INITRD"	},
606	{EFI_RT_PROPERTIES_TABLE_GUID,		&rt_prop,		"RTPROP"	},
607#ifdef CONFIG_EFI_RCI2_TABLE
608	{DELLEMC_EFI_RCI2_TABLE_GUID,		&rci2_table_phys			},
609#endif
610#ifdef CONFIG_LOAD_UEFI_KEYS
611	{LINUX_EFI_MOK_VARIABLE_TABLE_GUID,	&efi.mokvar_table,	"MOKvar"	},
612#endif
613#ifdef CONFIG_EFI_COCO_SECRET
614	{LINUX_EFI_COCO_SECRET_AREA_GUID,	&efi.coco_secret,	"CocoSecret"	},
615#endif
616#ifdef CONFIG_UNACCEPTED_MEMORY
617	{LINUX_EFI_UNACCEPTED_MEM_TABLE_GUID,	&efi.unaccepted,	"Unaccepted"	},
618#endif
619#ifdef CONFIG_EFI_GENERIC_STUB
620	{LINUX_EFI_SCREEN_INFO_TABLE_GUID,	&screen_info_table			},
621#endif
622	{},
623};
624
625static __init int match_config_table(const efi_guid_t *guid,
626				     unsigned long table,
627				     const efi_config_table_type_t *table_types)
628{
629	int i;
630
631	for (i = 0; efi_guidcmp(table_types[i].guid, NULL_GUID); i++) {
632		if (efi_guidcmp(*guid, table_types[i].guid))
633			continue;
634
635		if (!efi_config_table_is_usable(guid, table)) {
636			if (table_types[i].name[0])
637				pr_cont("(%s=0x%lx unusable) ",
638					table_types[i].name, table);
639			return 1;
640		}
641
642		*(table_types[i].ptr) = table;
643		if (table_types[i].name[0])
644			pr_cont("%s=0x%lx ", table_types[i].name, table);
645		return 1;
646	}
647
648	return 0;
649}
650
651/**
652 * reserve_unaccepted - Map and reserve unaccepted configuration table
653 * @unaccepted: Pointer to unaccepted memory table
654 *
655 * memblock_add() makes sure that the table is mapped in direct mapping. During
656 * normal boot it happens automatically because the table is allocated from
657 * usable memory. But during crashkernel boot only memory specifically reserved
658 * for crash scenario is mapped. memblock_add() forces the table to be mapped
659 * in crashkernel case.
660 *
661 * Align the range to the nearest page borders. Ranges smaller than page size
662 * are not going to be mapped.
663 *
664 * memblock_reserve() makes sure that future allocations will not touch the
665 * table.
666 */
667
668static __init void reserve_unaccepted(struct efi_unaccepted_memory *unaccepted)
669{
670	phys_addr_t start, size;
671
672	start = PAGE_ALIGN_DOWN(efi.unaccepted);
673	size = PAGE_ALIGN(sizeof(*unaccepted) + unaccepted->size);
674
675	memblock_add(start, size);
676	memblock_reserve(start, size);
677}
678
679int __init efi_config_parse_tables(const efi_config_table_t *config_tables,
680				   int count,
681				   const efi_config_table_type_t *arch_tables)
682{
683	const efi_config_table_64_t *tbl64 = (void *)config_tables;
684	const efi_config_table_32_t *tbl32 = (void *)config_tables;
685	const efi_guid_t *guid;
686	unsigned long table;
687	int i;
688
689	pr_info("");
690	for (i = 0; i < count; i++) {
691		if (!IS_ENABLED(CONFIG_X86)) {
692			guid = &config_tables[i].guid;
693			table = (unsigned long)config_tables[i].table;
694		} else if (efi_enabled(EFI_64BIT)) {
695			guid = &tbl64[i].guid;
696			table = tbl64[i].table;
697
698			if (IS_ENABLED(CONFIG_X86_32) &&
699			    tbl64[i].table > U32_MAX) {
700				pr_cont("\n");
701				pr_err("Table located above 4GB, disabling EFI.\n");
702				return -EINVAL;
703			}
704		} else {
705			guid = &tbl32[i].guid;
706			table = tbl32[i].table;
707		}
708
709		if (!match_config_table(guid, table, common_tables) && arch_tables)
710			match_config_table(guid, table, arch_tables);
711	}
712	pr_cont("\n");
713	set_bit(EFI_CONFIG_TABLES, &efi.flags);
714
715	if (efi_rng_seed != EFI_INVALID_TABLE_ADDR) {
716		struct linux_efi_random_seed *seed;
717		u32 size = 0;
718
719		seed = early_memremap(efi_rng_seed, sizeof(*seed));
720		if (seed != NULL) {
721			size = min_t(u32, seed->size, SZ_1K); // sanity check
722			early_memunmap(seed, sizeof(*seed));
723		} else {
724			pr_err("Could not map UEFI random seed!\n");
725		}
726		if (size > 0) {
727			seed = early_memremap(efi_rng_seed,
728					      sizeof(*seed) + size);
729			if (seed != NULL) {
730				add_bootloader_randomness(seed->bits, size);
731				memzero_explicit(seed->bits, size);
732				early_memunmap(seed, sizeof(*seed) + size);
733			} else {
734				pr_err("Could not map UEFI random seed!\n");
735			}
736		}
737	}
738
739	if (!IS_ENABLED(CONFIG_X86_32) && efi_enabled(EFI_MEMMAP))
740		efi_memattr_init();
741
742	efi_tpm_eventlog_init();
743
744	if (mem_reserve != EFI_INVALID_TABLE_ADDR) {
745		unsigned long prsv = mem_reserve;
746
747		while (prsv) {
748			struct linux_efi_memreserve *rsv;
749			u8 *p;
750
751			/*
752			 * Just map a full page: that is what we will get
753			 * anyway, and it permits us to map the entire entry
754			 * before knowing its size.
755			 */
756			p = early_memremap(ALIGN_DOWN(prsv, PAGE_SIZE),
757					   PAGE_SIZE);
758			if (p == NULL) {
759				pr_err("Could not map UEFI memreserve entry!\n");
760				return -ENOMEM;
761			}
762
763			rsv = (void *)(p + prsv % PAGE_SIZE);
764
765			/* reserve the entry itself */
766			memblock_reserve(prsv,
767					 struct_size(rsv, entry, rsv->size));
768
769			for (i = 0; i < atomic_read(&rsv->count); i++) {
770				memblock_reserve(rsv->entry[i].base,
771						 rsv->entry[i].size);
772			}
773
774			prsv = rsv->next;
775			early_memunmap(p, PAGE_SIZE);
776		}
777	}
778
779	if (rt_prop != EFI_INVALID_TABLE_ADDR) {
780		efi_rt_properties_table_t *tbl;
781
782		tbl = early_memremap(rt_prop, sizeof(*tbl));
783		if (tbl) {
784			efi.runtime_supported_mask &= tbl->runtime_services_supported;
785			early_memunmap(tbl, sizeof(*tbl));
786		}
787	}
788
789	if (IS_ENABLED(CONFIG_BLK_DEV_INITRD) &&
790	    initrd != EFI_INVALID_TABLE_ADDR && phys_initrd_size == 0) {
791		struct linux_efi_initrd *tbl;
792
793		tbl = early_memremap(initrd, sizeof(*tbl));
794		if (tbl) {
795			phys_initrd_start = tbl->base;
796			phys_initrd_size = tbl->size;
797			early_memunmap(tbl, sizeof(*tbl));
798		}
799	}
800
801	if (IS_ENABLED(CONFIG_UNACCEPTED_MEMORY) &&
802	    efi.unaccepted != EFI_INVALID_TABLE_ADDR) {
803		struct efi_unaccepted_memory *unaccepted;
804
805		unaccepted = early_memremap(efi.unaccepted, sizeof(*unaccepted));
806		if (unaccepted) {
807
808			if (unaccepted->version == 1) {
809				reserve_unaccepted(unaccepted);
810			} else {
811				efi.unaccepted = EFI_INVALID_TABLE_ADDR;
812			}
813
814			early_memunmap(unaccepted, sizeof(*unaccepted));
815		}
816	}
817
818	return 0;
819}
820
821int __init efi_systab_check_header(const efi_table_hdr_t *systab_hdr)
822{
823	if (systab_hdr->signature != EFI_SYSTEM_TABLE_SIGNATURE) {
824		pr_err("System table signature incorrect!\n");
825		return -EINVAL;
826	}
827
828	return 0;
829}
830
831static const efi_char16_t *__init map_fw_vendor(unsigned long fw_vendor,
832						size_t size)
833{
834	const efi_char16_t *ret;
835
836	ret = early_memremap_ro(fw_vendor, size);
837	if (!ret)
838		pr_err("Could not map the firmware vendor!\n");
839	return ret;
840}
841
842static void __init unmap_fw_vendor(const void *fw_vendor, size_t size)
843{
844	early_memunmap((void *)fw_vendor, size);
845}
846
847void __init efi_systab_report_header(const efi_table_hdr_t *systab_hdr,
848				     unsigned long fw_vendor)
849{
850	char vendor[100] = "unknown";
851	const efi_char16_t *c16;
852	size_t i;
853	u16 rev;
854
855	c16 = map_fw_vendor(fw_vendor, sizeof(vendor) * sizeof(efi_char16_t));
856	if (c16) {
857		for (i = 0; i < sizeof(vendor) - 1 && c16[i]; ++i)
858			vendor[i] = c16[i];
859		vendor[i] = '\0';
860
861		unmap_fw_vendor(c16, sizeof(vendor) * sizeof(efi_char16_t));
862	}
863
864	rev = (u16)systab_hdr->revision;
865	pr_info("EFI v%u.%u", systab_hdr->revision >> 16, rev / 10);
866
867	rev %= 10;
868	if (rev)
869		pr_cont(".%u", rev);
870
871	pr_cont(" by %s\n", vendor);
872
873	if (IS_ENABLED(CONFIG_X86_64) &&
874	    systab_hdr->revision > EFI_1_10_SYSTEM_TABLE_REVISION &&
875	    !strcmp(vendor, "Apple")) {
876		pr_info("Apple Mac detected, using EFI v1.10 runtime services only\n");
877		efi.runtime_version = EFI_1_10_SYSTEM_TABLE_REVISION;
878	}
879}
880
881static __initdata char memory_type_name[][13] = {
882	"Reserved",
883	"Loader Code",
884	"Loader Data",
885	"Boot Code",
886	"Boot Data",
887	"Runtime Code",
888	"Runtime Data",
889	"Conventional",
890	"Unusable",
891	"ACPI Reclaim",
892	"ACPI Mem NVS",
893	"MMIO",
894	"MMIO Port",
895	"PAL Code",
896	"Persistent",
897	"Unaccepted",
898};
899
900char * __init efi_md_typeattr_format(char *buf, size_t size,
901				     const efi_memory_desc_t *md)
902{
903	char *pos;
904	int type_len;
905	u64 attr;
906
907	pos = buf;
908	if (md->type >= ARRAY_SIZE(memory_type_name))
909		type_len = snprintf(pos, size, "[type=%u", md->type);
910	else
911		type_len = snprintf(pos, size, "[%-*s",
912				    (int)(sizeof(memory_type_name[0]) - 1),
913				    memory_type_name[md->type]);
914	if (type_len >= size)
915		return buf;
916
917	pos += type_len;
918	size -= type_len;
919
920	attr = md->attribute;
921	if (attr & ~(EFI_MEMORY_UC | EFI_MEMORY_WC | EFI_MEMORY_WT |
922		     EFI_MEMORY_WB | EFI_MEMORY_UCE | EFI_MEMORY_RO |
923		     EFI_MEMORY_WP | EFI_MEMORY_RP | EFI_MEMORY_XP |
924		     EFI_MEMORY_NV | EFI_MEMORY_SP | EFI_MEMORY_CPU_CRYPTO |
925		     EFI_MEMORY_RUNTIME | EFI_MEMORY_MORE_RELIABLE))
926		snprintf(pos, size, "|attr=0x%016llx]",
927			 (unsigned long long)attr);
928	else
929		snprintf(pos, size,
930			 "|%3s|%2s|%2s|%2s|%2s|%2s|%2s|%2s|%2s|%3s|%2s|%2s|%2s|%2s]",
931			 attr & EFI_MEMORY_RUNTIME		? "RUN" : "",
932			 attr & EFI_MEMORY_MORE_RELIABLE	? "MR"  : "",
933			 attr & EFI_MEMORY_CPU_CRYPTO   	? "CC"  : "",
934			 attr & EFI_MEMORY_SP			? "SP"  : "",
935			 attr & EFI_MEMORY_NV			? "NV"  : "",
936			 attr & EFI_MEMORY_XP			? "XP"  : "",
937			 attr & EFI_MEMORY_RP			? "RP"  : "",
938			 attr & EFI_MEMORY_WP			? "WP"  : "",
939			 attr & EFI_MEMORY_RO			? "RO"  : "",
940			 attr & EFI_MEMORY_UCE			? "UCE" : "",
941			 attr & EFI_MEMORY_WB			? "WB"  : "",
942			 attr & EFI_MEMORY_WT			? "WT"  : "",
943			 attr & EFI_MEMORY_WC			? "WC"  : "",
944			 attr & EFI_MEMORY_UC			? "UC"  : "");
945	return buf;
946}
947
948/*
949 * efi_mem_attributes - lookup memmap attributes for physical address
950 * @phys_addr: the physical address to lookup
951 *
952 * Search in the EFI memory map for the region covering
953 * @phys_addr. Returns the EFI memory attributes if the region
954 * was found in the memory map, 0 otherwise.
955 */
956u64 efi_mem_attributes(unsigned long phys_addr)
957{
958	efi_memory_desc_t *md;
959
960	if (!efi_enabled(EFI_MEMMAP))
961		return 0;
962
963	for_each_efi_memory_desc(md) {
964		if ((md->phys_addr <= phys_addr) &&
965		    (phys_addr < (md->phys_addr +
966		    (md->num_pages << EFI_PAGE_SHIFT))))
967			return md->attribute;
968	}
969	return 0;
970}
971
972/*
973 * efi_mem_type - lookup memmap type for physical address
974 * @phys_addr: the physical address to lookup
975 *
976 * Search in the EFI memory map for the region covering @phys_addr.
977 * Returns the EFI memory type if the region was found in the memory
978 * map, -EINVAL otherwise.
979 */
980int efi_mem_type(unsigned long phys_addr)
981{
982	const efi_memory_desc_t *md;
983
984	if (!efi_enabled(EFI_MEMMAP))
985		return -ENOTSUPP;
986
987	for_each_efi_memory_desc(md) {
988		if ((md->phys_addr <= phys_addr) &&
989		    (phys_addr < (md->phys_addr +
990				  (md->num_pages << EFI_PAGE_SHIFT))))
991			return md->type;
992	}
993	return -EINVAL;
994}
995
996int efi_status_to_err(efi_status_t status)
997{
998	int err;
999
1000	switch (status) {
1001	case EFI_SUCCESS:
1002		err = 0;
1003		break;
1004	case EFI_INVALID_PARAMETER:
1005		err = -EINVAL;
1006		break;
1007	case EFI_OUT_OF_RESOURCES:
1008		err = -ENOSPC;
1009		break;
1010	case EFI_DEVICE_ERROR:
1011		err = -EIO;
1012		break;
1013	case EFI_WRITE_PROTECTED:
1014		err = -EROFS;
1015		break;
1016	case EFI_SECURITY_VIOLATION:
1017		err = -EACCES;
1018		break;
1019	case EFI_NOT_FOUND:
1020		err = -ENOENT;
1021		break;
1022	case EFI_ABORTED:
1023		err = -EINTR;
1024		break;
1025	default:
1026		err = -EINVAL;
1027	}
1028
1029	return err;
1030}
1031EXPORT_SYMBOL_GPL(efi_status_to_err);
1032
1033static DEFINE_SPINLOCK(efi_mem_reserve_persistent_lock);
1034static struct linux_efi_memreserve *efi_memreserve_root __ro_after_init;
1035
1036static int __init efi_memreserve_map_root(void)
1037{
1038	if (mem_reserve == EFI_INVALID_TABLE_ADDR)
1039		return -ENODEV;
1040
1041	efi_memreserve_root = memremap(mem_reserve,
1042				       sizeof(*efi_memreserve_root),
1043				       MEMREMAP_WB);
1044	if (WARN_ON_ONCE(!efi_memreserve_root))
1045		return -ENOMEM;
1046	return 0;
1047}
1048
1049static int efi_mem_reserve_iomem(phys_addr_t addr, u64 size)
1050{
1051	struct resource *res, *parent;
1052	int ret;
1053
1054	res = kzalloc(sizeof(struct resource), GFP_ATOMIC);
1055	if (!res)
1056		return -ENOMEM;
1057
1058	res->name	= "reserved";
1059	res->flags	= IORESOURCE_MEM;
1060	res->start	= addr;
1061	res->end	= addr + size - 1;
1062
1063	/* we expect a conflict with a 'System RAM' region */
1064	parent = request_resource_conflict(&iomem_resource, res);
1065	ret = parent ? request_resource(parent, res) : 0;
1066
1067	/*
1068	 * Given that efi_mem_reserve_iomem() can be called at any
1069	 * time, only call memblock_reserve() if the architecture
1070	 * keeps the infrastructure around.
1071	 */
1072	if (IS_ENABLED(CONFIG_ARCH_KEEP_MEMBLOCK) && !ret)
1073		memblock_reserve(addr, size);
1074
1075	return ret;
1076}
1077
1078int __ref efi_mem_reserve_persistent(phys_addr_t addr, u64 size)
1079{
1080	struct linux_efi_memreserve *rsv;
1081	unsigned long prsv;
1082	int rc, index;
1083
1084	if (efi_memreserve_root == (void *)ULONG_MAX)
1085		return -ENODEV;
1086
1087	if (!efi_memreserve_root) {
1088		rc = efi_memreserve_map_root();
1089		if (rc)
1090			return rc;
1091	}
1092
1093	/* first try to find a slot in an existing linked list entry */
1094	for (prsv = efi_memreserve_root->next; prsv; ) {
1095		rsv = memremap(prsv, sizeof(*rsv), MEMREMAP_WB);
1096		if (!rsv)
1097			return -ENOMEM;
1098		index = atomic_fetch_add_unless(&rsv->count, 1, rsv->size);
1099		if (index < rsv->size) {
1100			rsv->entry[index].base = addr;
1101			rsv->entry[index].size = size;
1102
1103			memunmap(rsv);
1104			return efi_mem_reserve_iomem(addr, size);
1105		}
1106		prsv = rsv->next;
1107		memunmap(rsv);
1108	}
1109
1110	/* no slot found - allocate a new linked list entry */
1111	rsv = (struct linux_efi_memreserve *)__get_free_page(GFP_ATOMIC);
1112	if (!rsv)
1113		return -ENOMEM;
1114
1115	rc = efi_mem_reserve_iomem(__pa(rsv), SZ_4K);
1116	if (rc) {
1117		free_page((unsigned long)rsv);
1118		return rc;
1119	}
1120
1121	/*
1122	 * The memremap() call above assumes that a linux_efi_memreserve entry
1123	 * never crosses a page boundary, so let's ensure that this remains true
1124	 * even when kexec'ing a 4k pages kernel from a >4k pages kernel, by
1125	 * using SZ_4K explicitly in the size calculation below.
1126	 */
1127	rsv->size = EFI_MEMRESERVE_COUNT(SZ_4K);
1128	atomic_set(&rsv->count, 1);
1129	rsv->entry[0].base = addr;
1130	rsv->entry[0].size = size;
1131
1132	spin_lock(&efi_mem_reserve_persistent_lock);
1133	rsv->next = efi_memreserve_root->next;
1134	efi_memreserve_root->next = __pa(rsv);
1135	spin_unlock(&efi_mem_reserve_persistent_lock);
1136
1137	return efi_mem_reserve_iomem(addr, size);
1138}
1139
1140static int __init efi_memreserve_root_init(void)
1141{
1142	if (efi_memreserve_root)
1143		return 0;
1144	if (efi_memreserve_map_root())
1145		efi_memreserve_root = (void *)ULONG_MAX;
1146	return 0;
1147}
1148early_initcall(efi_memreserve_root_init);
1149
1150#ifdef CONFIG_KEXEC
1151static int update_efi_random_seed(struct notifier_block *nb,
1152				  unsigned long code, void *unused)
1153{
1154	struct linux_efi_random_seed *seed;
1155	u32 size = 0;
1156
1157	if (!kexec_in_progress)
1158		return NOTIFY_DONE;
1159
1160	seed = memremap(efi_rng_seed, sizeof(*seed), MEMREMAP_WB);
1161	if (seed != NULL) {
1162		size = min(seed->size, EFI_RANDOM_SEED_SIZE);
1163		memunmap(seed);
1164	} else {
1165		pr_err("Could not map UEFI random seed!\n");
1166	}
1167	if (size > 0) {
1168		seed = memremap(efi_rng_seed, sizeof(*seed) + size,
1169				MEMREMAP_WB);
1170		if (seed != NULL) {
1171			seed->size = size;
1172			get_random_bytes(seed->bits, seed->size);
1173			memunmap(seed);
1174		} else {
1175			pr_err("Could not map UEFI random seed!\n");
1176		}
1177	}
1178	return NOTIFY_DONE;
1179}
1180
1181static struct notifier_block efi_random_seed_nb = {
1182	.notifier_call = update_efi_random_seed,
1183};
1184
1185static int __init register_update_efi_random_seed(void)
1186{
1187	if (efi_rng_seed == EFI_INVALID_TABLE_ADDR)
1188		return 0;
1189	return register_reboot_notifier(&efi_random_seed_nb);
1190}
1191late_initcall(register_update_efi_random_seed);
1192#endif
1193