xref: /OK3568_Linux_fs/kernel/drivers/firmware/efi/efi.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * efi.c - EFI subsystem
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * Copyright (C) 2001,2003,2004 Dell <Matt_Domsch@dell.com>
6*4882a593Smuzhiyun  * Copyright (C) 2004 Intel Corporation <matthew.e.tolentino@intel.com>
7*4882a593Smuzhiyun  * Copyright (C) 2013 Tom Gundersen <teg@jklm.no>
8*4882a593Smuzhiyun  *
9*4882a593Smuzhiyun  * This code registers /sys/firmware/efi{,/efivars} when EFI is supported,
10*4882a593Smuzhiyun  * allowing the efivarfs to be mounted or the efivars module to be loaded.
11*4882a593Smuzhiyun  * The existance of /sys/firmware/efi may also be used by userspace to
12*4882a593Smuzhiyun  * determine that the system supports EFI.
13*4882a593Smuzhiyun  */
14*4882a593Smuzhiyun 
15*4882a593Smuzhiyun #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
16*4882a593Smuzhiyun 
17*4882a593Smuzhiyun #include <linux/kobject.h>
18*4882a593Smuzhiyun #include <linux/module.h>
19*4882a593Smuzhiyun #include <linux/init.h>
20*4882a593Smuzhiyun #include <linux/debugfs.h>
21*4882a593Smuzhiyun #include <linux/device.h>
22*4882a593Smuzhiyun #include <linux/efi.h>
23*4882a593Smuzhiyun #include <linux/of.h>
24*4882a593Smuzhiyun #include <linux/io.h>
25*4882a593Smuzhiyun #include <linux/kexec.h>
26*4882a593Smuzhiyun #include <linux/platform_device.h>
27*4882a593Smuzhiyun #include <linux/random.h>
28*4882a593Smuzhiyun #include <linux/reboot.h>
29*4882a593Smuzhiyun #include <linux/slab.h>
30*4882a593Smuzhiyun #include <linux/acpi.h>
31*4882a593Smuzhiyun #include <linux/ucs2_string.h>
32*4882a593Smuzhiyun #include <linux/memblock.h>
33*4882a593Smuzhiyun #include <linux/security.h>
34*4882a593Smuzhiyun 
35*4882a593Smuzhiyun #include <asm/early_ioremap.h>
36*4882a593Smuzhiyun 
37*4882a593Smuzhiyun struct efi __read_mostly efi = {
38*4882a593Smuzhiyun 	.runtime_supported_mask = EFI_RT_SUPPORTED_ALL,
39*4882a593Smuzhiyun 	.acpi			= EFI_INVALID_TABLE_ADDR,
40*4882a593Smuzhiyun 	.acpi20			= EFI_INVALID_TABLE_ADDR,
41*4882a593Smuzhiyun 	.smbios			= EFI_INVALID_TABLE_ADDR,
42*4882a593Smuzhiyun 	.smbios3		= EFI_INVALID_TABLE_ADDR,
43*4882a593Smuzhiyun 	.esrt			= EFI_INVALID_TABLE_ADDR,
44*4882a593Smuzhiyun 	.tpm_log		= EFI_INVALID_TABLE_ADDR,
45*4882a593Smuzhiyun 	.tpm_final_log		= EFI_INVALID_TABLE_ADDR,
46*4882a593Smuzhiyun #ifdef CONFIG_LOAD_UEFI_KEYS
47*4882a593Smuzhiyun 	.mokvar_table		= EFI_INVALID_TABLE_ADDR,
48*4882a593Smuzhiyun #endif
49*4882a593Smuzhiyun };
50*4882a593Smuzhiyun EXPORT_SYMBOL(efi);
51*4882a593Smuzhiyun 
52*4882a593Smuzhiyun unsigned long __ro_after_init efi_rng_seed = EFI_INVALID_TABLE_ADDR;
53*4882a593Smuzhiyun static unsigned long __initdata mem_reserve = EFI_INVALID_TABLE_ADDR;
54*4882a593Smuzhiyun static unsigned long __initdata rt_prop = EFI_INVALID_TABLE_ADDR;
55*4882a593Smuzhiyun 
56*4882a593Smuzhiyun struct mm_struct efi_mm = {
57*4882a593Smuzhiyun 	.mm_rb			= RB_ROOT,
58*4882a593Smuzhiyun 	.mm_users		= ATOMIC_INIT(2),
59*4882a593Smuzhiyun 	.mm_count		= ATOMIC_INIT(1),
60*4882a593Smuzhiyun 	.write_protect_seq      = SEQCNT_ZERO(efi_mm.write_protect_seq),
61*4882a593Smuzhiyun 	MMAP_LOCK_INITIALIZER(efi_mm)
62*4882a593Smuzhiyun 	.page_table_lock	= __SPIN_LOCK_UNLOCKED(efi_mm.page_table_lock),
63*4882a593Smuzhiyun 	.mmlist			= LIST_HEAD_INIT(efi_mm.mmlist),
64*4882a593Smuzhiyun 	.cpu_bitmap		= { [BITS_TO_LONGS(NR_CPUS)] = 0},
65*4882a593Smuzhiyun };
66*4882a593Smuzhiyun 
67*4882a593Smuzhiyun struct workqueue_struct *efi_rts_wq;
68*4882a593Smuzhiyun 
69*4882a593Smuzhiyun static bool disable_runtime;
setup_noefi(char * arg)70*4882a593Smuzhiyun static int __init setup_noefi(char *arg)
71*4882a593Smuzhiyun {
72*4882a593Smuzhiyun 	disable_runtime = true;
73*4882a593Smuzhiyun 	return 0;
74*4882a593Smuzhiyun }
75*4882a593Smuzhiyun early_param("noefi", setup_noefi);
76*4882a593Smuzhiyun 
efi_runtime_disabled(void)77*4882a593Smuzhiyun bool efi_runtime_disabled(void)
78*4882a593Smuzhiyun {
79*4882a593Smuzhiyun 	return disable_runtime;
80*4882a593Smuzhiyun }
81*4882a593Smuzhiyun 
__efi_soft_reserve_enabled(void)82*4882a593Smuzhiyun bool __pure __efi_soft_reserve_enabled(void)
83*4882a593Smuzhiyun {
84*4882a593Smuzhiyun 	return !efi_enabled(EFI_MEM_NO_SOFT_RESERVE);
85*4882a593Smuzhiyun }
86*4882a593Smuzhiyun 
parse_efi_cmdline(char * str)87*4882a593Smuzhiyun static int __init parse_efi_cmdline(char *str)
88*4882a593Smuzhiyun {
89*4882a593Smuzhiyun 	if (!str) {
90*4882a593Smuzhiyun 		pr_warn("need at least one option\n");
91*4882a593Smuzhiyun 		return -EINVAL;
92*4882a593Smuzhiyun 	}
93*4882a593Smuzhiyun 
94*4882a593Smuzhiyun 	if (parse_option_str(str, "debug"))
95*4882a593Smuzhiyun 		set_bit(EFI_DBG, &efi.flags);
96*4882a593Smuzhiyun 
97*4882a593Smuzhiyun 	if (parse_option_str(str, "noruntime"))
98*4882a593Smuzhiyun 		disable_runtime = true;
99*4882a593Smuzhiyun 
100*4882a593Smuzhiyun 	if (parse_option_str(str, "nosoftreserve"))
101*4882a593Smuzhiyun 		set_bit(EFI_MEM_NO_SOFT_RESERVE, &efi.flags);
102*4882a593Smuzhiyun 
103*4882a593Smuzhiyun 	return 0;
104*4882a593Smuzhiyun }
105*4882a593Smuzhiyun early_param("efi", parse_efi_cmdline);
106*4882a593Smuzhiyun 
107*4882a593Smuzhiyun struct kobject *efi_kobj;
108*4882a593Smuzhiyun 
109*4882a593Smuzhiyun /*
110*4882a593Smuzhiyun  * Let's not leave out systab information that snuck into
111*4882a593Smuzhiyun  * the efivars driver
112*4882a593Smuzhiyun  * Note, do not add more fields in systab sysfs file as it breaks sysfs
113*4882a593Smuzhiyun  * one value per file rule!
114*4882a593Smuzhiyun  */
systab_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)115*4882a593Smuzhiyun static ssize_t systab_show(struct kobject *kobj,
116*4882a593Smuzhiyun 			   struct kobj_attribute *attr, char *buf)
117*4882a593Smuzhiyun {
118*4882a593Smuzhiyun 	char *str = buf;
119*4882a593Smuzhiyun 
120*4882a593Smuzhiyun 	if (!kobj || !buf)
121*4882a593Smuzhiyun 		return -EINVAL;
122*4882a593Smuzhiyun 
123*4882a593Smuzhiyun 	if (efi.acpi20 != EFI_INVALID_TABLE_ADDR)
124*4882a593Smuzhiyun 		str += sprintf(str, "ACPI20=0x%lx\n", efi.acpi20);
125*4882a593Smuzhiyun 	if (efi.acpi != EFI_INVALID_TABLE_ADDR)
126*4882a593Smuzhiyun 		str += sprintf(str, "ACPI=0x%lx\n", efi.acpi);
127*4882a593Smuzhiyun 	/*
128*4882a593Smuzhiyun 	 * If both SMBIOS and SMBIOS3 entry points are implemented, the
129*4882a593Smuzhiyun 	 * SMBIOS3 entry point shall be preferred, so we list it first to
130*4882a593Smuzhiyun 	 * let applications stop parsing after the first match.
131*4882a593Smuzhiyun 	 */
132*4882a593Smuzhiyun 	if (efi.smbios3 != EFI_INVALID_TABLE_ADDR)
133*4882a593Smuzhiyun 		str += sprintf(str, "SMBIOS3=0x%lx\n", efi.smbios3);
134*4882a593Smuzhiyun 	if (efi.smbios != EFI_INVALID_TABLE_ADDR)
135*4882a593Smuzhiyun 		str += sprintf(str, "SMBIOS=0x%lx\n", efi.smbios);
136*4882a593Smuzhiyun 
137*4882a593Smuzhiyun 	if (IS_ENABLED(CONFIG_IA64) || IS_ENABLED(CONFIG_X86))
138*4882a593Smuzhiyun 		str = efi_systab_show_arch(str);
139*4882a593Smuzhiyun 
140*4882a593Smuzhiyun 	return str - buf;
141*4882a593Smuzhiyun }
142*4882a593Smuzhiyun 
143*4882a593Smuzhiyun static struct kobj_attribute efi_attr_systab = __ATTR_RO_MODE(systab, 0400);
144*4882a593Smuzhiyun 
fw_platform_size_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)145*4882a593Smuzhiyun static ssize_t fw_platform_size_show(struct kobject *kobj,
146*4882a593Smuzhiyun 				     struct kobj_attribute *attr, char *buf)
147*4882a593Smuzhiyun {
148*4882a593Smuzhiyun 	return sprintf(buf, "%d\n", efi_enabled(EFI_64BIT) ? 64 : 32);
149*4882a593Smuzhiyun }
150*4882a593Smuzhiyun 
151*4882a593Smuzhiyun extern __weak struct kobj_attribute efi_attr_fw_vendor;
152*4882a593Smuzhiyun extern __weak struct kobj_attribute efi_attr_runtime;
153*4882a593Smuzhiyun extern __weak struct kobj_attribute efi_attr_config_table;
154*4882a593Smuzhiyun static struct kobj_attribute efi_attr_fw_platform_size =
155*4882a593Smuzhiyun 	__ATTR_RO(fw_platform_size);
156*4882a593Smuzhiyun 
157*4882a593Smuzhiyun static struct attribute *efi_subsys_attrs[] = {
158*4882a593Smuzhiyun 	&efi_attr_systab.attr,
159*4882a593Smuzhiyun 	&efi_attr_fw_platform_size.attr,
160*4882a593Smuzhiyun 	&efi_attr_fw_vendor.attr,
161*4882a593Smuzhiyun 	&efi_attr_runtime.attr,
162*4882a593Smuzhiyun 	&efi_attr_config_table.attr,
163*4882a593Smuzhiyun 	NULL,
164*4882a593Smuzhiyun };
165*4882a593Smuzhiyun 
efi_attr_is_visible(struct kobject * kobj,struct attribute * attr,int n)166*4882a593Smuzhiyun umode_t __weak efi_attr_is_visible(struct kobject *kobj, struct attribute *attr,
167*4882a593Smuzhiyun 				   int n)
168*4882a593Smuzhiyun {
169*4882a593Smuzhiyun 	return attr->mode;
170*4882a593Smuzhiyun }
171*4882a593Smuzhiyun 
172*4882a593Smuzhiyun static const struct attribute_group efi_subsys_attr_group = {
173*4882a593Smuzhiyun 	.attrs = efi_subsys_attrs,
174*4882a593Smuzhiyun 	.is_visible = efi_attr_is_visible,
175*4882a593Smuzhiyun };
176*4882a593Smuzhiyun 
177*4882a593Smuzhiyun static struct efivars generic_efivars;
178*4882a593Smuzhiyun static struct efivar_operations generic_ops;
179*4882a593Smuzhiyun 
generic_ops_register(void)180*4882a593Smuzhiyun static int generic_ops_register(void)
181*4882a593Smuzhiyun {
182*4882a593Smuzhiyun 	generic_ops.get_variable = efi.get_variable;
183*4882a593Smuzhiyun 	generic_ops.get_next_variable = efi.get_next_variable;
184*4882a593Smuzhiyun 	generic_ops.query_variable_store = efi_query_variable_store;
185*4882a593Smuzhiyun 
186*4882a593Smuzhiyun 	if (efi_rt_services_supported(EFI_RT_SUPPORTED_SET_VARIABLE)) {
187*4882a593Smuzhiyun 		generic_ops.set_variable = efi.set_variable;
188*4882a593Smuzhiyun 		generic_ops.set_variable_nonblocking = efi.set_variable_nonblocking;
189*4882a593Smuzhiyun 	}
190*4882a593Smuzhiyun 	return efivars_register(&generic_efivars, &generic_ops, efi_kobj);
191*4882a593Smuzhiyun }
192*4882a593Smuzhiyun 
generic_ops_unregister(void)193*4882a593Smuzhiyun static void generic_ops_unregister(void)
194*4882a593Smuzhiyun {
195*4882a593Smuzhiyun 	efivars_unregister(&generic_efivars);
196*4882a593Smuzhiyun }
197*4882a593Smuzhiyun 
198*4882a593Smuzhiyun #ifdef CONFIG_EFI_CUSTOM_SSDT_OVERLAYS
199*4882a593Smuzhiyun #define EFIVAR_SSDT_NAME_MAX	16
200*4882a593Smuzhiyun static char efivar_ssdt[EFIVAR_SSDT_NAME_MAX] __initdata;
efivar_ssdt_setup(char * str)201*4882a593Smuzhiyun static int __init efivar_ssdt_setup(char *str)
202*4882a593Smuzhiyun {
203*4882a593Smuzhiyun 	int ret = security_locked_down(LOCKDOWN_ACPI_TABLES);
204*4882a593Smuzhiyun 
205*4882a593Smuzhiyun 	if (ret)
206*4882a593Smuzhiyun 		return ret;
207*4882a593Smuzhiyun 
208*4882a593Smuzhiyun 	if (strlen(str) < sizeof(efivar_ssdt))
209*4882a593Smuzhiyun 		memcpy(efivar_ssdt, str, strlen(str));
210*4882a593Smuzhiyun 	else
211*4882a593Smuzhiyun 		pr_warn("efivar_ssdt: name too long: %s\n", str);
212*4882a593Smuzhiyun 	return 1;
213*4882a593Smuzhiyun }
214*4882a593Smuzhiyun __setup("efivar_ssdt=", efivar_ssdt_setup);
215*4882a593Smuzhiyun 
efivar_ssdt_iter(efi_char16_t * name,efi_guid_t vendor,unsigned long name_size,void * data)216*4882a593Smuzhiyun static __init int efivar_ssdt_iter(efi_char16_t *name, efi_guid_t vendor,
217*4882a593Smuzhiyun 				   unsigned long name_size, void *data)
218*4882a593Smuzhiyun {
219*4882a593Smuzhiyun 	struct efivar_entry *entry;
220*4882a593Smuzhiyun 	struct list_head *list = data;
221*4882a593Smuzhiyun 	char utf8_name[EFIVAR_SSDT_NAME_MAX];
222*4882a593Smuzhiyun 	int limit = min_t(unsigned long, EFIVAR_SSDT_NAME_MAX, name_size);
223*4882a593Smuzhiyun 
224*4882a593Smuzhiyun 	ucs2_as_utf8(utf8_name, name, limit - 1);
225*4882a593Smuzhiyun 	if (strncmp(utf8_name, efivar_ssdt, limit) != 0)
226*4882a593Smuzhiyun 		return 0;
227*4882a593Smuzhiyun 
228*4882a593Smuzhiyun 	entry = kmalloc(sizeof(*entry), GFP_KERNEL);
229*4882a593Smuzhiyun 	if (!entry)
230*4882a593Smuzhiyun 		return 0;
231*4882a593Smuzhiyun 
232*4882a593Smuzhiyun 	memcpy(entry->var.VariableName, name, name_size);
233*4882a593Smuzhiyun 	memcpy(&entry->var.VendorGuid, &vendor, sizeof(efi_guid_t));
234*4882a593Smuzhiyun 
235*4882a593Smuzhiyun 	efivar_entry_add(entry, list);
236*4882a593Smuzhiyun 
237*4882a593Smuzhiyun 	return 0;
238*4882a593Smuzhiyun }
239*4882a593Smuzhiyun 
efivar_ssdt_load(void)240*4882a593Smuzhiyun static __init int efivar_ssdt_load(void)
241*4882a593Smuzhiyun {
242*4882a593Smuzhiyun 	LIST_HEAD(entries);
243*4882a593Smuzhiyun 	struct efivar_entry *entry, *aux;
244*4882a593Smuzhiyun 	unsigned long size;
245*4882a593Smuzhiyun 	void *data;
246*4882a593Smuzhiyun 	int ret;
247*4882a593Smuzhiyun 
248*4882a593Smuzhiyun 	if (!efivar_ssdt[0])
249*4882a593Smuzhiyun 		return 0;
250*4882a593Smuzhiyun 
251*4882a593Smuzhiyun 	ret = efivar_init(efivar_ssdt_iter, &entries, true, &entries);
252*4882a593Smuzhiyun 
253*4882a593Smuzhiyun 	list_for_each_entry_safe(entry, aux, &entries, list) {
254*4882a593Smuzhiyun 		pr_info("loading SSDT from variable %s-%pUl\n", efivar_ssdt,
255*4882a593Smuzhiyun 			&entry->var.VendorGuid);
256*4882a593Smuzhiyun 
257*4882a593Smuzhiyun 		list_del(&entry->list);
258*4882a593Smuzhiyun 
259*4882a593Smuzhiyun 		ret = efivar_entry_size(entry, &size);
260*4882a593Smuzhiyun 		if (ret) {
261*4882a593Smuzhiyun 			pr_err("failed to get var size\n");
262*4882a593Smuzhiyun 			goto free_entry;
263*4882a593Smuzhiyun 		}
264*4882a593Smuzhiyun 
265*4882a593Smuzhiyun 		data = kmalloc(size, GFP_KERNEL);
266*4882a593Smuzhiyun 		if (!data) {
267*4882a593Smuzhiyun 			ret = -ENOMEM;
268*4882a593Smuzhiyun 			goto free_entry;
269*4882a593Smuzhiyun 		}
270*4882a593Smuzhiyun 
271*4882a593Smuzhiyun 		ret = efivar_entry_get(entry, NULL, &size, data);
272*4882a593Smuzhiyun 		if (ret) {
273*4882a593Smuzhiyun 			pr_err("failed to get var data\n");
274*4882a593Smuzhiyun 			goto free_data;
275*4882a593Smuzhiyun 		}
276*4882a593Smuzhiyun 
277*4882a593Smuzhiyun 		ret = acpi_load_table(data, NULL);
278*4882a593Smuzhiyun 		if (ret) {
279*4882a593Smuzhiyun 			pr_err("failed to load table: %d\n", ret);
280*4882a593Smuzhiyun 			goto free_data;
281*4882a593Smuzhiyun 		}
282*4882a593Smuzhiyun 
283*4882a593Smuzhiyun 		goto free_entry;
284*4882a593Smuzhiyun 
285*4882a593Smuzhiyun free_data:
286*4882a593Smuzhiyun 		kfree(data);
287*4882a593Smuzhiyun 
288*4882a593Smuzhiyun free_entry:
289*4882a593Smuzhiyun 		kfree(entry);
290*4882a593Smuzhiyun 	}
291*4882a593Smuzhiyun 
292*4882a593Smuzhiyun 	return ret;
293*4882a593Smuzhiyun }
294*4882a593Smuzhiyun #else
efivar_ssdt_load(void)295*4882a593Smuzhiyun static inline int efivar_ssdt_load(void) { return 0; }
296*4882a593Smuzhiyun #endif
297*4882a593Smuzhiyun 
298*4882a593Smuzhiyun #ifdef CONFIG_DEBUG_FS
299*4882a593Smuzhiyun 
300*4882a593Smuzhiyun #define EFI_DEBUGFS_MAX_BLOBS 32
301*4882a593Smuzhiyun 
302*4882a593Smuzhiyun static struct debugfs_blob_wrapper debugfs_blob[EFI_DEBUGFS_MAX_BLOBS];
303*4882a593Smuzhiyun 
efi_debugfs_init(void)304*4882a593Smuzhiyun static void __init efi_debugfs_init(void)
305*4882a593Smuzhiyun {
306*4882a593Smuzhiyun 	struct dentry *efi_debugfs;
307*4882a593Smuzhiyun 	efi_memory_desc_t *md;
308*4882a593Smuzhiyun 	char name[32];
309*4882a593Smuzhiyun 	int type_count[EFI_BOOT_SERVICES_DATA + 1] = {};
310*4882a593Smuzhiyun 	int i = 0;
311*4882a593Smuzhiyun 
312*4882a593Smuzhiyun 	efi_debugfs = debugfs_create_dir("efi", NULL);
313*4882a593Smuzhiyun 	if (IS_ERR_OR_NULL(efi_debugfs))
314*4882a593Smuzhiyun 		return;
315*4882a593Smuzhiyun 
316*4882a593Smuzhiyun 	for_each_efi_memory_desc(md) {
317*4882a593Smuzhiyun 		switch (md->type) {
318*4882a593Smuzhiyun 		case EFI_BOOT_SERVICES_CODE:
319*4882a593Smuzhiyun 			snprintf(name, sizeof(name), "boot_services_code%d",
320*4882a593Smuzhiyun 				 type_count[md->type]++);
321*4882a593Smuzhiyun 			break;
322*4882a593Smuzhiyun 		case EFI_BOOT_SERVICES_DATA:
323*4882a593Smuzhiyun 			snprintf(name, sizeof(name), "boot_services_data%d",
324*4882a593Smuzhiyun 				 type_count[md->type]++);
325*4882a593Smuzhiyun 			break;
326*4882a593Smuzhiyun 		default:
327*4882a593Smuzhiyun 			continue;
328*4882a593Smuzhiyun 		}
329*4882a593Smuzhiyun 
330*4882a593Smuzhiyun 		if (i >= EFI_DEBUGFS_MAX_BLOBS) {
331*4882a593Smuzhiyun 			pr_warn("More then %d EFI boot service segments, only showing first %d in debugfs\n",
332*4882a593Smuzhiyun 				EFI_DEBUGFS_MAX_BLOBS, EFI_DEBUGFS_MAX_BLOBS);
333*4882a593Smuzhiyun 			break;
334*4882a593Smuzhiyun 		}
335*4882a593Smuzhiyun 
336*4882a593Smuzhiyun 		debugfs_blob[i].size = md->num_pages << EFI_PAGE_SHIFT;
337*4882a593Smuzhiyun 		debugfs_blob[i].data = memremap(md->phys_addr,
338*4882a593Smuzhiyun 						debugfs_blob[i].size,
339*4882a593Smuzhiyun 						MEMREMAP_WB);
340*4882a593Smuzhiyun 		if (!debugfs_blob[i].data)
341*4882a593Smuzhiyun 			continue;
342*4882a593Smuzhiyun 
343*4882a593Smuzhiyun 		debugfs_create_blob(name, 0400, efi_debugfs, &debugfs_blob[i]);
344*4882a593Smuzhiyun 		i++;
345*4882a593Smuzhiyun 	}
346*4882a593Smuzhiyun }
347*4882a593Smuzhiyun #else
efi_debugfs_init(void)348*4882a593Smuzhiyun static inline void efi_debugfs_init(void) {}
349*4882a593Smuzhiyun #endif
350*4882a593Smuzhiyun 
351*4882a593Smuzhiyun /*
352*4882a593Smuzhiyun  * We register the efi subsystem with the firmware subsystem and the
353*4882a593Smuzhiyun  * efivars subsystem with the efi subsystem, if the system was booted with
354*4882a593Smuzhiyun  * EFI.
355*4882a593Smuzhiyun  */
efisubsys_init(void)356*4882a593Smuzhiyun static int __init efisubsys_init(void)
357*4882a593Smuzhiyun {
358*4882a593Smuzhiyun 	int error;
359*4882a593Smuzhiyun 
360*4882a593Smuzhiyun 	if (!efi_enabled(EFI_RUNTIME_SERVICES))
361*4882a593Smuzhiyun 		efi.runtime_supported_mask = 0;
362*4882a593Smuzhiyun 
363*4882a593Smuzhiyun 	if (!efi_enabled(EFI_BOOT))
364*4882a593Smuzhiyun 		return 0;
365*4882a593Smuzhiyun 
366*4882a593Smuzhiyun 	if (efi.runtime_supported_mask) {
367*4882a593Smuzhiyun 		/*
368*4882a593Smuzhiyun 		 * Since we process only one efi_runtime_service() at a time, an
369*4882a593Smuzhiyun 		 * ordered workqueue (which creates only one execution context)
370*4882a593Smuzhiyun 		 * should suffice for all our needs.
371*4882a593Smuzhiyun 		 */
372*4882a593Smuzhiyun 		efi_rts_wq = alloc_ordered_workqueue("efi_rts_wq", 0);
373*4882a593Smuzhiyun 		if (!efi_rts_wq) {
374*4882a593Smuzhiyun 			pr_err("Creating efi_rts_wq failed, EFI runtime services disabled.\n");
375*4882a593Smuzhiyun 			clear_bit(EFI_RUNTIME_SERVICES, &efi.flags);
376*4882a593Smuzhiyun 			efi.runtime_supported_mask = 0;
377*4882a593Smuzhiyun 			return 0;
378*4882a593Smuzhiyun 		}
379*4882a593Smuzhiyun 	}
380*4882a593Smuzhiyun 
381*4882a593Smuzhiyun 	if (efi_rt_services_supported(EFI_RT_SUPPORTED_TIME_SERVICES))
382*4882a593Smuzhiyun 		platform_device_register_simple("rtc-efi", 0, NULL, 0);
383*4882a593Smuzhiyun 
384*4882a593Smuzhiyun 	/* We register the efi directory at /sys/firmware/efi */
385*4882a593Smuzhiyun 	efi_kobj = kobject_create_and_add("efi", firmware_kobj);
386*4882a593Smuzhiyun 	if (!efi_kobj) {
387*4882a593Smuzhiyun 		pr_err("efi: Firmware registration failed.\n");
388*4882a593Smuzhiyun 		destroy_workqueue(efi_rts_wq);
389*4882a593Smuzhiyun 		return -ENOMEM;
390*4882a593Smuzhiyun 	}
391*4882a593Smuzhiyun 
392*4882a593Smuzhiyun 	if (efi_rt_services_supported(EFI_RT_SUPPORTED_GET_VARIABLE |
393*4882a593Smuzhiyun 				      EFI_RT_SUPPORTED_GET_NEXT_VARIABLE_NAME)) {
394*4882a593Smuzhiyun 		error = generic_ops_register();
395*4882a593Smuzhiyun 		if (error)
396*4882a593Smuzhiyun 			goto err_put;
397*4882a593Smuzhiyun 		efivar_ssdt_load();
398*4882a593Smuzhiyun 		platform_device_register_simple("efivars", 0, NULL, 0);
399*4882a593Smuzhiyun 	}
400*4882a593Smuzhiyun 
401*4882a593Smuzhiyun 	error = sysfs_create_group(efi_kobj, &efi_subsys_attr_group);
402*4882a593Smuzhiyun 	if (error) {
403*4882a593Smuzhiyun 		pr_err("efi: Sysfs attribute export failed with error %d.\n",
404*4882a593Smuzhiyun 		       error);
405*4882a593Smuzhiyun 		goto err_unregister;
406*4882a593Smuzhiyun 	}
407*4882a593Smuzhiyun 
408*4882a593Smuzhiyun 	error = efi_runtime_map_init(efi_kobj);
409*4882a593Smuzhiyun 	if (error)
410*4882a593Smuzhiyun 		goto err_remove_group;
411*4882a593Smuzhiyun 
412*4882a593Smuzhiyun 	/* and the standard mountpoint for efivarfs */
413*4882a593Smuzhiyun 	error = sysfs_create_mount_point(efi_kobj, "efivars");
414*4882a593Smuzhiyun 	if (error) {
415*4882a593Smuzhiyun 		pr_err("efivars: Subsystem registration failed.\n");
416*4882a593Smuzhiyun 		goto err_remove_group;
417*4882a593Smuzhiyun 	}
418*4882a593Smuzhiyun 
419*4882a593Smuzhiyun 	if (efi_enabled(EFI_DBG) && efi_enabled(EFI_PRESERVE_BS_REGIONS))
420*4882a593Smuzhiyun 		efi_debugfs_init();
421*4882a593Smuzhiyun 
422*4882a593Smuzhiyun 	return 0;
423*4882a593Smuzhiyun 
424*4882a593Smuzhiyun err_remove_group:
425*4882a593Smuzhiyun 	sysfs_remove_group(efi_kobj, &efi_subsys_attr_group);
426*4882a593Smuzhiyun err_unregister:
427*4882a593Smuzhiyun 	if (efi_rt_services_supported(EFI_RT_SUPPORTED_GET_VARIABLE |
428*4882a593Smuzhiyun 				      EFI_RT_SUPPORTED_GET_NEXT_VARIABLE_NAME))
429*4882a593Smuzhiyun 		generic_ops_unregister();
430*4882a593Smuzhiyun err_put:
431*4882a593Smuzhiyun 	kobject_put(efi_kobj);
432*4882a593Smuzhiyun 	destroy_workqueue(efi_rts_wq);
433*4882a593Smuzhiyun 	return error;
434*4882a593Smuzhiyun }
435*4882a593Smuzhiyun 
436*4882a593Smuzhiyun subsys_initcall(efisubsys_init);
437*4882a593Smuzhiyun 
438*4882a593Smuzhiyun /*
439*4882a593Smuzhiyun  * Find the efi memory descriptor for a given physical address.  Given a
440*4882a593Smuzhiyun  * physical address, determine if it exists within an EFI Memory Map entry,
441*4882a593Smuzhiyun  * and if so, populate the supplied memory descriptor with the appropriate
442*4882a593Smuzhiyun  * data.
443*4882a593Smuzhiyun  */
efi_mem_desc_lookup(u64 phys_addr,efi_memory_desc_t * out_md)444*4882a593Smuzhiyun int efi_mem_desc_lookup(u64 phys_addr, efi_memory_desc_t *out_md)
445*4882a593Smuzhiyun {
446*4882a593Smuzhiyun 	efi_memory_desc_t *md;
447*4882a593Smuzhiyun 
448*4882a593Smuzhiyun 	if (!efi_enabled(EFI_MEMMAP)) {
449*4882a593Smuzhiyun 		pr_err_once("EFI_MEMMAP is not enabled.\n");
450*4882a593Smuzhiyun 		return -EINVAL;
451*4882a593Smuzhiyun 	}
452*4882a593Smuzhiyun 
453*4882a593Smuzhiyun 	if (!out_md) {
454*4882a593Smuzhiyun 		pr_err_once("out_md is null.\n");
455*4882a593Smuzhiyun 		return -EINVAL;
456*4882a593Smuzhiyun         }
457*4882a593Smuzhiyun 
458*4882a593Smuzhiyun 	for_each_efi_memory_desc(md) {
459*4882a593Smuzhiyun 		u64 size;
460*4882a593Smuzhiyun 		u64 end;
461*4882a593Smuzhiyun 
462*4882a593Smuzhiyun 		size = md->num_pages << EFI_PAGE_SHIFT;
463*4882a593Smuzhiyun 		end = md->phys_addr + size;
464*4882a593Smuzhiyun 		if (phys_addr >= md->phys_addr && phys_addr < end) {
465*4882a593Smuzhiyun 			memcpy(out_md, md, sizeof(*out_md));
466*4882a593Smuzhiyun 			return 0;
467*4882a593Smuzhiyun 		}
468*4882a593Smuzhiyun 	}
469*4882a593Smuzhiyun 	return -ENOENT;
470*4882a593Smuzhiyun }
471*4882a593Smuzhiyun 
472*4882a593Smuzhiyun /*
473*4882a593Smuzhiyun  * Calculate the highest address of an efi memory descriptor.
474*4882a593Smuzhiyun  */
efi_mem_desc_end(efi_memory_desc_t * md)475*4882a593Smuzhiyun u64 __init efi_mem_desc_end(efi_memory_desc_t *md)
476*4882a593Smuzhiyun {
477*4882a593Smuzhiyun 	u64 size = md->num_pages << EFI_PAGE_SHIFT;
478*4882a593Smuzhiyun 	u64 end = md->phys_addr + size;
479*4882a593Smuzhiyun 	return end;
480*4882a593Smuzhiyun }
481*4882a593Smuzhiyun 
efi_arch_mem_reserve(phys_addr_t addr,u64 size)482*4882a593Smuzhiyun void __init __weak efi_arch_mem_reserve(phys_addr_t addr, u64 size) {}
483*4882a593Smuzhiyun 
484*4882a593Smuzhiyun /**
485*4882a593Smuzhiyun  * efi_mem_reserve - Reserve an EFI memory region
486*4882a593Smuzhiyun  * @addr: Physical address to reserve
487*4882a593Smuzhiyun  * @size: Size of reservation
488*4882a593Smuzhiyun  *
489*4882a593Smuzhiyun  * Mark a region as reserved from general kernel allocation and
490*4882a593Smuzhiyun  * prevent it being released by efi_free_boot_services().
491*4882a593Smuzhiyun  *
492*4882a593Smuzhiyun  * This function should be called drivers once they've parsed EFI
493*4882a593Smuzhiyun  * configuration tables to figure out where their data lives, e.g.
494*4882a593Smuzhiyun  * efi_esrt_init().
495*4882a593Smuzhiyun  */
efi_mem_reserve(phys_addr_t addr,u64 size)496*4882a593Smuzhiyun void __init efi_mem_reserve(phys_addr_t addr, u64 size)
497*4882a593Smuzhiyun {
498*4882a593Smuzhiyun 	if (!memblock_is_region_reserved(addr, size))
499*4882a593Smuzhiyun 		memblock_reserve(addr, size);
500*4882a593Smuzhiyun 
501*4882a593Smuzhiyun 	/*
502*4882a593Smuzhiyun 	 * Some architectures (x86) reserve all boot services ranges
503*4882a593Smuzhiyun 	 * until efi_free_boot_services() because of buggy firmware
504*4882a593Smuzhiyun 	 * implementations. This means the above memblock_reserve() is
505*4882a593Smuzhiyun 	 * superfluous on x86 and instead what it needs to do is
506*4882a593Smuzhiyun 	 * ensure the @start, @size is not freed.
507*4882a593Smuzhiyun 	 */
508*4882a593Smuzhiyun 	efi_arch_mem_reserve(addr, size);
509*4882a593Smuzhiyun }
510*4882a593Smuzhiyun 
511*4882a593Smuzhiyun static const efi_config_table_type_t common_tables[] __initconst = {
512*4882a593Smuzhiyun 	{ACPI_20_TABLE_GUID,			&efi.acpi20,		"ACPI 2.0"	},
513*4882a593Smuzhiyun 	{ACPI_TABLE_GUID,			&efi.acpi,		"ACPI"		},
514*4882a593Smuzhiyun 	{SMBIOS_TABLE_GUID,			&efi.smbios,		"SMBIOS"	},
515*4882a593Smuzhiyun 	{SMBIOS3_TABLE_GUID,			&efi.smbios3,		"SMBIOS 3.0"	},
516*4882a593Smuzhiyun 	{EFI_SYSTEM_RESOURCE_TABLE_GUID,	&efi.esrt,		"ESRT"		},
517*4882a593Smuzhiyun 	{EFI_MEMORY_ATTRIBUTES_TABLE_GUID,	&efi_mem_attr_table,	"MEMATTR"	},
518*4882a593Smuzhiyun 	{LINUX_EFI_RANDOM_SEED_TABLE_GUID,	&efi_rng_seed,		"RNG"		},
519*4882a593Smuzhiyun 	{LINUX_EFI_TPM_EVENT_LOG_GUID,		&efi.tpm_log,		"TPMEventLog"	},
520*4882a593Smuzhiyun 	{LINUX_EFI_TPM_FINAL_LOG_GUID,		&efi.tpm_final_log,	"TPMFinalLog"	},
521*4882a593Smuzhiyun 	{LINUX_EFI_MEMRESERVE_TABLE_GUID,	&mem_reserve,		"MEMRESERVE"	},
522*4882a593Smuzhiyun 	{EFI_RT_PROPERTIES_TABLE_GUID,		&rt_prop,		"RTPROP"	},
523*4882a593Smuzhiyun #ifdef CONFIG_EFI_RCI2_TABLE
524*4882a593Smuzhiyun 	{DELLEMC_EFI_RCI2_TABLE_GUID,		&rci2_table_phys			},
525*4882a593Smuzhiyun #endif
526*4882a593Smuzhiyun #ifdef CONFIG_LOAD_UEFI_KEYS
527*4882a593Smuzhiyun 	{LINUX_EFI_MOK_VARIABLE_TABLE_GUID,	&efi.mokvar_table,	"MOKvar"	},
528*4882a593Smuzhiyun #endif
529*4882a593Smuzhiyun 	{},
530*4882a593Smuzhiyun };
531*4882a593Smuzhiyun 
match_config_table(const efi_guid_t * guid,unsigned long table,const efi_config_table_type_t * table_types)532*4882a593Smuzhiyun static __init int match_config_table(const efi_guid_t *guid,
533*4882a593Smuzhiyun 				     unsigned long table,
534*4882a593Smuzhiyun 				     const efi_config_table_type_t *table_types)
535*4882a593Smuzhiyun {
536*4882a593Smuzhiyun 	int i;
537*4882a593Smuzhiyun 
538*4882a593Smuzhiyun 	for (i = 0; efi_guidcmp(table_types[i].guid, NULL_GUID); i++) {
539*4882a593Smuzhiyun 		if (!efi_guidcmp(*guid, table_types[i].guid)) {
540*4882a593Smuzhiyun 			*(table_types[i].ptr) = table;
541*4882a593Smuzhiyun 			if (table_types[i].name[0])
542*4882a593Smuzhiyun 				pr_cont("%s=0x%lx ",
543*4882a593Smuzhiyun 					table_types[i].name, table);
544*4882a593Smuzhiyun 			return 1;
545*4882a593Smuzhiyun 		}
546*4882a593Smuzhiyun 	}
547*4882a593Smuzhiyun 
548*4882a593Smuzhiyun 	return 0;
549*4882a593Smuzhiyun }
550*4882a593Smuzhiyun 
efi_config_parse_tables(const efi_config_table_t * config_tables,int count,const efi_config_table_type_t * arch_tables)551*4882a593Smuzhiyun int __init efi_config_parse_tables(const efi_config_table_t *config_tables,
552*4882a593Smuzhiyun 				   int count,
553*4882a593Smuzhiyun 				   const efi_config_table_type_t *arch_tables)
554*4882a593Smuzhiyun {
555*4882a593Smuzhiyun 	const efi_config_table_64_t *tbl64 = (void *)config_tables;
556*4882a593Smuzhiyun 	const efi_config_table_32_t *tbl32 = (void *)config_tables;
557*4882a593Smuzhiyun 	const efi_guid_t *guid;
558*4882a593Smuzhiyun 	unsigned long table;
559*4882a593Smuzhiyun 	int i;
560*4882a593Smuzhiyun 
561*4882a593Smuzhiyun 	pr_info("");
562*4882a593Smuzhiyun 	for (i = 0; i < count; i++) {
563*4882a593Smuzhiyun 		if (!IS_ENABLED(CONFIG_X86)) {
564*4882a593Smuzhiyun 			guid = &config_tables[i].guid;
565*4882a593Smuzhiyun 			table = (unsigned long)config_tables[i].table;
566*4882a593Smuzhiyun 		} else if (efi_enabled(EFI_64BIT)) {
567*4882a593Smuzhiyun 			guid = &tbl64[i].guid;
568*4882a593Smuzhiyun 			table = tbl64[i].table;
569*4882a593Smuzhiyun 
570*4882a593Smuzhiyun 			if (IS_ENABLED(CONFIG_X86_32) &&
571*4882a593Smuzhiyun 			    tbl64[i].table > U32_MAX) {
572*4882a593Smuzhiyun 				pr_cont("\n");
573*4882a593Smuzhiyun 				pr_err("Table located above 4GB, disabling EFI.\n");
574*4882a593Smuzhiyun 				return -EINVAL;
575*4882a593Smuzhiyun 			}
576*4882a593Smuzhiyun 		} else {
577*4882a593Smuzhiyun 			guid = &tbl32[i].guid;
578*4882a593Smuzhiyun 			table = tbl32[i].table;
579*4882a593Smuzhiyun 		}
580*4882a593Smuzhiyun 
581*4882a593Smuzhiyun 		if (!match_config_table(guid, table, common_tables) && arch_tables)
582*4882a593Smuzhiyun 			match_config_table(guid, table, arch_tables);
583*4882a593Smuzhiyun 	}
584*4882a593Smuzhiyun 	pr_cont("\n");
585*4882a593Smuzhiyun 	set_bit(EFI_CONFIG_TABLES, &efi.flags);
586*4882a593Smuzhiyun 
587*4882a593Smuzhiyun 	if (efi_rng_seed != EFI_INVALID_TABLE_ADDR) {
588*4882a593Smuzhiyun 		struct linux_efi_random_seed *seed;
589*4882a593Smuzhiyun 		u32 size = 0;
590*4882a593Smuzhiyun 
591*4882a593Smuzhiyun 		seed = early_memremap(efi_rng_seed, sizeof(*seed));
592*4882a593Smuzhiyun 		if (seed != NULL) {
593*4882a593Smuzhiyun 			size = min(seed->size, EFI_RANDOM_SEED_SIZE);
594*4882a593Smuzhiyun 			early_memunmap(seed, sizeof(*seed));
595*4882a593Smuzhiyun 		} else {
596*4882a593Smuzhiyun 			pr_err("Could not map UEFI random seed!\n");
597*4882a593Smuzhiyun 		}
598*4882a593Smuzhiyun 		if (size > 0) {
599*4882a593Smuzhiyun 			seed = early_memremap(efi_rng_seed,
600*4882a593Smuzhiyun 					      sizeof(*seed) + size);
601*4882a593Smuzhiyun 			if (seed != NULL) {
602*4882a593Smuzhiyun 				pr_notice("seeding entropy pool\n");
603*4882a593Smuzhiyun 				add_bootloader_randomness(seed->bits, size);
604*4882a593Smuzhiyun 				early_memunmap(seed, sizeof(*seed) + size);
605*4882a593Smuzhiyun 			} else {
606*4882a593Smuzhiyun 				pr_err("Could not map UEFI random seed!\n");
607*4882a593Smuzhiyun 			}
608*4882a593Smuzhiyun 		}
609*4882a593Smuzhiyun 	}
610*4882a593Smuzhiyun 
611*4882a593Smuzhiyun 	if (!IS_ENABLED(CONFIG_X86_32) && efi_enabled(EFI_MEMMAP))
612*4882a593Smuzhiyun 		efi_memattr_init();
613*4882a593Smuzhiyun 
614*4882a593Smuzhiyun 	efi_tpm_eventlog_init();
615*4882a593Smuzhiyun 
616*4882a593Smuzhiyun 	if (mem_reserve != EFI_INVALID_TABLE_ADDR) {
617*4882a593Smuzhiyun 		unsigned long prsv = mem_reserve;
618*4882a593Smuzhiyun 
619*4882a593Smuzhiyun 		while (prsv) {
620*4882a593Smuzhiyun 			struct linux_efi_memreserve *rsv;
621*4882a593Smuzhiyun 			u8 *p;
622*4882a593Smuzhiyun 
623*4882a593Smuzhiyun 			/*
624*4882a593Smuzhiyun 			 * Just map a full page: that is what we will get
625*4882a593Smuzhiyun 			 * anyway, and it permits us to map the entire entry
626*4882a593Smuzhiyun 			 * before knowing its size.
627*4882a593Smuzhiyun 			 */
628*4882a593Smuzhiyun 			p = early_memremap(ALIGN_DOWN(prsv, PAGE_SIZE),
629*4882a593Smuzhiyun 					   PAGE_SIZE);
630*4882a593Smuzhiyun 			if (p == NULL) {
631*4882a593Smuzhiyun 				pr_err("Could not map UEFI memreserve entry!\n");
632*4882a593Smuzhiyun 				return -ENOMEM;
633*4882a593Smuzhiyun 			}
634*4882a593Smuzhiyun 
635*4882a593Smuzhiyun 			rsv = (void *)(p + prsv % PAGE_SIZE);
636*4882a593Smuzhiyun 
637*4882a593Smuzhiyun 			/* reserve the entry itself */
638*4882a593Smuzhiyun 			memblock_reserve(prsv,
639*4882a593Smuzhiyun 					 struct_size(rsv, entry, rsv->size));
640*4882a593Smuzhiyun 
641*4882a593Smuzhiyun 			for (i = 0; i < atomic_read(&rsv->count); i++) {
642*4882a593Smuzhiyun 				memblock_reserve(rsv->entry[i].base,
643*4882a593Smuzhiyun 						 rsv->entry[i].size);
644*4882a593Smuzhiyun 			}
645*4882a593Smuzhiyun 
646*4882a593Smuzhiyun 			prsv = rsv->next;
647*4882a593Smuzhiyun 			early_memunmap(p, PAGE_SIZE);
648*4882a593Smuzhiyun 		}
649*4882a593Smuzhiyun 	}
650*4882a593Smuzhiyun 
651*4882a593Smuzhiyun 	if (rt_prop != EFI_INVALID_TABLE_ADDR) {
652*4882a593Smuzhiyun 		efi_rt_properties_table_t *tbl;
653*4882a593Smuzhiyun 
654*4882a593Smuzhiyun 		tbl = early_memremap(rt_prop, sizeof(*tbl));
655*4882a593Smuzhiyun 		if (tbl) {
656*4882a593Smuzhiyun 			efi.runtime_supported_mask &= tbl->runtime_services_supported;
657*4882a593Smuzhiyun 			early_memunmap(tbl, sizeof(*tbl));
658*4882a593Smuzhiyun 		}
659*4882a593Smuzhiyun 	}
660*4882a593Smuzhiyun 
661*4882a593Smuzhiyun 	return 0;
662*4882a593Smuzhiyun }
663*4882a593Smuzhiyun 
efi_systab_check_header(const efi_table_hdr_t * systab_hdr,int min_major_version)664*4882a593Smuzhiyun int __init efi_systab_check_header(const efi_table_hdr_t *systab_hdr,
665*4882a593Smuzhiyun 				   int min_major_version)
666*4882a593Smuzhiyun {
667*4882a593Smuzhiyun 	if (systab_hdr->signature != EFI_SYSTEM_TABLE_SIGNATURE) {
668*4882a593Smuzhiyun 		pr_err("System table signature incorrect!\n");
669*4882a593Smuzhiyun 		return -EINVAL;
670*4882a593Smuzhiyun 	}
671*4882a593Smuzhiyun 
672*4882a593Smuzhiyun 	if ((systab_hdr->revision >> 16) < min_major_version)
673*4882a593Smuzhiyun 		pr_err("Warning: System table version %d.%02d, expected %d.00 or greater!\n",
674*4882a593Smuzhiyun 		       systab_hdr->revision >> 16,
675*4882a593Smuzhiyun 		       systab_hdr->revision & 0xffff,
676*4882a593Smuzhiyun 		       min_major_version);
677*4882a593Smuzhiyun 
678*4882a593Smuzhiyun 	return 0;
679*4882a593Smuzhiyun }
680*4882a593Smuzhiyun 
681*4882a593Smuzhiyun #ifndef CONFIG_IA64
map_fw_vendor(unsigned long fw_vendor,size_t size)682*4882a593Smuzhiyun static const efi_char16_t *__init map_fw_vendor(unsigned long fw_vendor,
683*4882a593Smuzhiyun 						size_t size)
684*4882a593Smuzhiyun {
685*4882a593Smuzhiyun 	const efi_char16_t *ret;
686*4882a593Smuzhiyun 
687*4882a593Smuzhiyun 	ret = early_memremap_ro(fw_vendor, size);
688*4882a593Smuzhiyun 	if (!ret)
689*4882a593Smuzhiyun 		pr_err("Could not map the firmware vendor!\n");
690*4882a593Smuzhiyun 	return ret;
691*4882a593Smuzhiyun }
692*4882a593Smuzhiyun 
unmap_fw_vendor(const void * fw_vendor,size_t size)693*4882a593Smuzhiyun static void __init unmap_fw_vendor(const void *fw_vendor, size_t size)
694*4882a593Smuzhiyun {
695*4882a593Smuzhiyun 	early_memunmap((void *)fw_vendor, size);
696*4882a593Smuzhiyun }
697*4882a593Smuzhiyun #else
698*4882a593Smuzhiyun #define map_fw_vendor(p, s)	__va(p)
699*4882a593Smuzhiyun #define unmap_fw_vendor(v, s)
700*4882a593Smuzhiyun #endif
701*4882a593Smuzhiyun 
efi_systab_report_header(const efi_table_hdr_t * systab_hdr,unsigned long fw_vendor)702*4882a593Smuzhiyun void __init efi_systab_report_header(const efi_table_hdr_t *systab_hdr,
703*4882a593Smuzhiyun 				     unsigned long fw_vendor)
704*4882a593Smuzhiyun {
705*4882a593Smuzhiyun 	char vendor[100] = "unknown";
706*4882a593Smuzhiyun 	const efi_char16_t *c16;
707*4882a593Smuzhiyun 	size_t i;
708*4882a593Smuzhiyun 
709*4882a593Smuzhiyun 	c16 = map_fw_vendor(fw_vendor, sizeof(vendor) * sizeof(efi_char16_t));
710*4882a593Smuzhiyun 	if (c16) {
711*4882a593Smuzhiyun 		for (i = 0; i < sizeof(vendor) - 1 && c16[i]; ++i)
712*4882a593Smuzhiyun 			vendor[i] = c16[i];
713*4882a593Smuzhiyun 		vendor[i] = '\0';
714*4882a593Smuzhiyun 
715*4882a593Smuzhiyun 		unmap_fw_vendor(c16, sizeof(vendor) * sizeof(efi_char16_t));
716*4882a593Smuzhiyun 	}
717*4882a593Smuzhiyun 
718*4882a593Smuzhiyun 	pr_info("EFI v%u.%.02u by %s\n",
719*4882a593Smuzhiyun 		systab_hdr->revision >> 16,
720*4882a593Smuzhiyun 		systab_hdr->revision & 0xffff,
721*4882a593Smuzhiyun 		vendor);
722*4882a593Smuzhiyun 
723*4882a593Smuzhiyun 	if (IS_ENABLED(CONFIG_X86_64) &&
724*4882a593Smuzhiyun 	    systab_hdr->revision > EFI_1_10_SYSTEM_TABLE_REVISION &&
725*4882a593Smuzhiyun 	    !strcmp(vendor, "Apple")) {
726*4882a593Smuzhiyun 		pr_info("Apple Mac detected, using EFI v1.10 runtime services only\n");
727*4882a593Smuzhiyun 		efi.runtime_version = EFI_1_10_SYSTEM_TABLE_REVISION;
728*4882a593Smuzhiyun 	}
729*4882a593Smuzhiyun }
730*4882a593Smuzhiyun 
731*4882a593Smuzhiyun static __initdata char memory_type_name[][13] = {
732*4882a593Smuzhiyun 	"Reserved",
733*4882a593Smuzhiyun 	"Loader Code",
734*4882a593Smuzhiyun 	"Loader Data",
735*4882a593Smuzhiyun 	"Boot Code",
736*4882a593Smuzhiyun 	"Boot Data",
737*4882a593Smuzhiyun 	"Runtime Code",
738*4882a593Smuzhiyun 	"Runtime Data",
739*4882a593Smuzhiyun 	"Conventional",
740*4882a593Smuzhiyun 	"Unusable",
741*4882a593Smuzhiyun 	"ACPI Reclaim",
742*4882a593Smuzhiyun 	"ACPI Mem NVS",
743*4882a593Smuzhiyun 	"MMIO",
744*4882a593Smuzhiyun 	"MMIO Port",
745*4882a593Smuzhiyun 	"PAL Code",
746*4882a593Smuzhiyun 	"Persistent",
747*4882a593Smuzhiyun };
748*4882a593Smuzhiyun 
efi_md_typeattr_format(char * buf,size_t size,const efi_memory_desc_t * md)749*4882a593Smuzhiyun char * __init efi_md_typeattr_format(char *buf, size_t size,
750*4882a593Smuzhiyun 				     const efi_memory_desc_t *md)
751*4882a593Smuzhiyun {
752*4882a593Smuzhiyun 	char *pos;
753*4882a593Smuzhiyun 	int type_len;
754*4882a593Smuzhiyun 	u64 attr;
755*4882a593Smuzhiyun 
756*4882a593Smuzhiyun 	pos = buf;
757*4882a593Smuzhiyun 	if (md->type >= ARRAY_SIZE(memory_type_name))
758*4882a593Smuzhiyun 		type_len = snprintf(pos, size, "[type=%u", md->type);
759*4882a593Smuzhiyun 	else
760*4882a593Smuzhiyun 		type_len = snprintf(pos, size, "[%-*s",
761*4882a593Smuzhiyun 				    (int)(sizeof(memory_type_name[0]) - 1),
762*4882a593Smuzhiyun 				    memory_type_name[md->type]);
763*4882a593Smuzhiyun 	if (type_len >= size)
764*4882a593Smuzhiyun 		return buf;
765*4882a593Smuzhiyun 
766*4882a593Smuzhiyun 	pos += type_len;
767*4882a593Smuzhiyun 	size -= type_len;
768*4882a593Smuzhiyun 
769*4882a593Smuzhiyun 	attr = md->attribute;
770*4882a593Smuzhiyun 	if (attr & ~(EFI_MEMORY_UC | EFI_MEMORY_WC | EFI_MEMORY_WT |
771*4882a593Smuzhiyun 		     EFI_MEMORY_WB | EFI_MEMORY_UCE | EFI_MEMORY_RO |
772*4882a593Smuzhiyun 		     EFI_MEMORY_WP | EFI_MEMORY_RP | EFI_MEMORY_XP |
773*4882a593Smuzhiyun 		     EFI_MEMORY_NV | EFI_MEMORY_SP | EFI_MEMORY_CPU_CRYPTO |
774*4882a593Smuzhiyun 		     EFI_MEMORY_RUNTIME | EFI_MEMORY_MORE_RELIABLE))
775*4882a593Smuzhiyun 		snprintf(pos, size, "|attr=0x%016llx]",
776*4882a593Smuzhiyun 			 (unsigned long long)attr);
777*4882a593Smuzhiyun 	else
778*4882a593Smuzhiyun 		snprintf(pos, size,
779*4882a593Smuzhiyun 			 "|%3s|%2s|%2s|%2s|%2s|%2s|%2s|%2s|%2s|%3s|%2s|%2s|%2s|%2s]",
780*4882a593Smuzhiyun 			 attr & EFI_MEMORY_RUNTIME		? "RUN" : "",
781*4882a593Smuzhiyun 			 attr & EFI_MEMORY_MORE_RELIABLE	? "MR"  : "",
782*4882a593Smuzhiyun 			 attr & EFI_MEMORY_CPU_CRYPTO   	? "CC"  : "",
783*4882a593Smuzhiyun 			 attr & EFI_MEMORY_SP			? "SP"  : "",
784*4882a593Smuzhiyun 			 attr & EFI_MEMORY_NV			? "NV"  : "",
785*4882a593Smuzhiyun 			 attr & EFI_MEMORY_XP			? "XP"  : "",
786*4882a593Smuzhiyun 			 attr & EFI_MEMORY_RP			? "RP"  : "",
787*4882a593Smuzhiyun 			 attr & EFI_MEMORY_WP			? "WP"  : "",
788*4882a593Smuzhiyun 			 attr & EFI_MEMORY_RO			? "RO"  : "",
789*4882a593Smuzhiyun 			 attr & EFI_MEMORY_UCE			? "UCE" : "",
790*4882a593Smuzhiyun 			 attr & EFI_MEMORY_WB			? "WB"  : "",
791*4882a593Smuzhiyun 			 attr & EFI_MEMORY_WT			? "WT"  : "",
792*4882a593Smuzhiyun 			 attr & EFI_MEMORY_WC			? "WC"  : "",
793*4882a593Smuzhiyun 			 attr & EFI_MEMORY_UC			? "UC"  : "");
794*4882a593Smuzhiyun 	return buf;
795*4882a593Smuzhiyun }
796*4882a593Smuzhiyun 
797*4882a593Smuzhiyun /*
798*4882a593Smuzhiyun  * IA64 has a funky EFI memory map that doesn't work the same way as
799*4882a593Smuzhiyun  * other architectures.
800*4882a593Smuzhiyun  */
801*4882a593Smuzhiyun #ifndef CONFIG_IA64
802*4882a593Smuzhiyun /*
803*4882a593Smuzhiyun  * efi_mem_attributes - lookup memmap attributes for physical address
804*4882a593Smuzhiyun  * @phys_addr: the physical address to lookup
805*4882a593Smuzhiyun  *
806*4882a593Smuzhiyun  * Search in the EFI memory map for the region covering
807*4882a593Smuzhiyun  * @phys_addr. Returns the EFI memory attributes if the region
808*4882a593Smuzhiyun  * was found in the memory map, 0 otherwise.
809*4882a593Smuzhiyun  */
efi_mem_attributes(unsigned long phys_addr)810*4882a593Smuzhiyun u64 efi_mem_attributes(unsigned long phys_addr)
811*4882a593Smuzhiyun {
812*4882a593Smuzhiyun 	efi_memory_desc_t *md;
813*4882a593Smuzhiyun 
814*4882a593Smuzhiyun 	if (!efi_enabled(EFI_MEMMAP))
815*4882a593Smuzhiyun 		return 0;
816*4882a593Smuzhiyun 
817*4882a593Smuzhiyun 	for_each_efi_memory_desc(md) {
818*4882a593Smuzhiyun 		if ((md->phys_addr <= phys_addr) &&
819*4882a593Smuzhiyun 		    (phys_addr < (md->phys_addr +
820*4882a593Smuzhiyun 		    (md->num_pages << EFI_PAGE_SHIFT))))
821*4882a593Smuzhiyun 			return md->attribute;
822*4882a593Smuzhiyun 	}
823*4882a593Smuzhiyun 	return 0;
824*4882a593Smuzhiyun }
825*4882a593Smuzhiyun 
826*4882a593Smuzhiyun /*
827*4882a593Smuzhiyun  * efi_mem_type - lookup memmap type for physical address
828*4882a593Smuzhiyun  * @phys_addr: the physical address to lookup
829*4882a593Smuzhiyun  *
830*4882a593Smuzhiyun  * Search in the EFI memory map for the region covering @phys_addr.
831*4882a593Smuzhiyun  * Returns the EFI memory type if the region was found in the memory
832*4882a593Smuzhiyun  * map, -EINVAL otherwise.
833*4882a593Smuzhiyun  */
efi_mem_type(unsigned long phys_addr)834*4882a593Smuzhiyun int efi_mem_type(unsigned long phys_addr)
835*4882a593Smuzhiyun {
836*4882a593Smuzhiyun 	const efi_memory_desc_t *md;
837*4882a593Smuzhiyun 
838*4882a593Smuzhiyun 	if (!efi_enabled(EFI_MEMMAP))
839*4882a593Smuzhiyun 		return -ENOTSUPP;
840*4882a593Smuzhiyun 
841*4882a593Smuzhiyun 	for_each_efi_memory_desc(md) {
842*4882a593Smuzhiyun 		if ((md->phys_addr <= phys_addr) &&
843*4882a593Smuzhiyun 		    (phys_addr < (md->phys_addr +
844*4882a593Smuzhiyun 				  (md->num_pages << EFI_PAGE_SHIFT))))
845*4882a593Smuzhiyun 			return md->type;
846*4882a593Smuzhiyun 	}
847*4882a593Smuzhiyun 	return -EINVAL;
848*4882a593Smuzhiyun }
849*4882a593Smuzhiyun #endif
850*4882a593Smuzhiyun 
efi_status_to_err(efi_status_t status)851*4882a593Smuzhiyun int efi_status_to_err(efi_status_t status)
852*4882a593Smuzhiyun {
853*4882a593Smuzhiyun 	int err;
854*4882a593Smuzhiyun 
855*4882a593Smuzhiyun 	switch (status) {
856*4882a593Smuzhiyun 	case EFI_SUCCESS:
857*4882a593Smuzhiyun 		err = 0;
858*4882a593Smuzhiyun 		break;
859*4882a593Smuzhiyun 	case EFI_INVALID_PARAMETER:
860*4882a593Smuzhiyun 		err = -EINVAL;
861*4882a593Smuzhiyun 		break;
862*4882a593Smuzhiyun 	case EFI_OUT_OF_RESOURCES:
863*4882a593Smuzhiyun 		err = -ENOSPC;
864*4882a593Smuzhiyun 		break;
865*4882a593Smuzhiyun 	case EFI_DEVICE_ERROR:
866*4882a593Smuzhiyun 		err = -EIO;
867*4882a593Smuzhiyun 		break;
868*4882a593Smuzhiyun 	case EFI_WRITE_PROTECTED:
869*4882a593Smuzhiyun 		err = -EROFS;
870*4882a593Smuzhiyun 		break;
871*4882a593Smuzhiyun 	case EFI_SECURITY_VIOLATION:
872*4882a593Smuzhiyun 		err = -EACCES;
873*4882a593Smuzhiyun 		break;
874*4882a593Smuzhiyun 	case EFI_NOT_FOUND:
875*4882a593Smuzhiyun 		err = -ENOENT;
876*4882a593Smuzhiyun 		break;
877*4882a593Smuzhiyun 	case EFI_ABORTED:
878*4882a593Smuzhiyun 		err = -EINTR;
879*4882a593Smuzhiyun 		break;
880*4882a593Smuzhiyun 	default:
881*4882a593Smuzhiyun 		err = -EINVAL;
882*4882a593Smuzhiyun 	}
883*4882a593Smuzhiyun 
884*4882a593Smuzhiyun 	return err;
885*4882a593Smuzhiyun }
886*4882a593Smuzhiyun 
887*4882a593Smuzhiyun static DEFINE_SPINLOCK(efi_mem_reserve_persistent_lock);
888*4882a593Smuzhiyun static struct linux_efi_memreserve *efi_memreserve_root __ro_after_init;
889*4882a593Smuzhiyun 
efi_memreserve_map_root(void)890*4882a593Smuzhiyun static int __init efi_memreserve_map_root(void)
891*4882a593Smuzhiyun {
892*4882a593Smuzhiyun 	if (mem_reserve == EFI_INVALID_TABLE_ADDR)
893*4882a593Smuzhiyun 		return -ENODEV;
894*4882a593Smuzhiyun 
895*4882a593Smuzhiyun 	efi_memreserve_root = memremap(mem_reserve,
896*4882a593Smuzhiyun 				       sizeof(*efi_memreserve_root),
897*4882a593Smuzhiyun 				       MEMREMAP_WB);
898*4882a593Smuzhiyun 	if (WARN_ON_ONCE(!efi_memreserve_root))
899*4882a593Smuzhiyun 		return -ENOMEM;
900*4882a593Smuzhiyun 	return 0;
901*4882a593Smuzhiyun }
902*4882a593Smuzhiyun 
efi_mem_reserve_iomem(phys_addr_t addr,u64 size)903*4882a593Smuzhiyun static int efi_mem_reserve_iomem(phys_addr_t addr, u64 size)
904*4882a593Smuzhiyun {
905*4882a593Smuzhiyun 	struct resource *res, *parent;
906*4882a593Smuzhiyun 	int ret;
907*4882a593Smuzhiyun 
908*4882a593Smuzhiyun 	res = kzalloc(sizeof(struct resource), GFP_ATOMIC);
909*4882a593Smuzhiyun 	if (!res)
910*4882a593Smuzhiyun 		return -ENOMEM;
911*4882a593Smuzhiyun 
912*4882a593Smuzhiyun 	res->name	= "reserved";
913*4882a593Smuzhiyun 	res->flags	= IORESOURCE_MEM;
914*4882a593Smuzhiyun 	res->start	= addr;
915*4882a593Smuzhiyun 	res->end	= addr + size - 1;
916*4882a593Smuzhiyun 
917*4882a593Smuzhiyun 	/* we expect a conflict with a 'System RAM' region */
918*4882a593Smuzhiyun 	parent = request_resource_conflict(&iomem_resource, res);
919*4882a593Smuzhiyun 	ret = parent ? request_resource(parent, res) : 0;
920*4882a593Smuzhiyun 
921*4882a593Smuzhiyun 	/*
922*4882a593Smuzhiyun 	 * Given that efi_mem_reserve_iomem() can be called at any
923*4882a593Smuzhiyun 	 * time, only call memblock_reserve() if the architecture
924*4882a593Smuzhiyun 	 * keeps the infrastructure around.
925*4882a593Smuzhiyun 	 */
926*4882a593Smuzhiyun 	if (IS_ENABLED(CONFIG_ARCH_KEEP_MEMBLOCK) && !ret)
927*4882a593Smuzhiyun 		memblock_reserve(addr, size);
928*4882a593Smuzhiyun 
929*4882a593Smuzhiyun 	return ret;
930*4882a593Smuzhiyun }
931*4882a593Smuzhiyun 
efi_mem_reserve_persistent(phys_addr_t addr,u64 size)932*4882a593Smuzhiyun int __ref efi_mem_reserve_persistent(phys_addr_t addr, u64 size)
933*4882a593Smuzhiyun {
934*4882a593Smuzhiyun 	struct linux_efi_memreserve *rsv;
935*4882a593Smuzhiyun 	unsigned long prsv;
936*4882a593Smuzhiyun 	int rc, index;
937*4882a593Smuzhiyun 
938*4882a593Smuzhiyun 	if (efi_memreserve_root == (void *)ULONG_MAX)
939*4882a593Smuzhiyun 		return -ENODEV;
940*4882a593Smuzhiyun 
941*4882a593Smuzhiyun 	if (!efi_memreserve_root) {
942*4882a593Smuzhiyun 		rc = efi_memreserve_map_root();
943*4882a593Smuzhiyun 		if (rc)
944*4882a593Smuzhiyun 			return rc;
945*4882a593Smuzhiyun 	}
946*4882a593Smuzhiyun 
947*4882a593Smuzhiyun 	/* first try to find a slot in an existing linked list entry */
948*4882a593Smuzhiyun 	for (prsv = efi_memreserve_root->next; prsv; ) {
949*4882a593Smuzhiyun 		rsv = memremap(prsv, sizeof(*rsv), MEMREMAP_WB);
950*4882a593Smuzhiyun 		index = atomic_fetch_add_unless(&rsv->count, 1, rsv->size);
951*4882a593Smuzhiyun 		if (index < rsv->size) {
952*4882a593Smuzhiyun 			rsv->entry[index].base = addr;
953*4882a593Smuzhiyun 			rsv->entry[index].size = size;
954*4882a593Smuzhiyun 
955*4882a593Smuzhiyun 			memunmap(rsv);
956*4882a593Smuzhiyun 			return efi_mem_reserve_iomem(addr, size);
957*4882a593Smuzhiyun 		}
958*4882a593Smuzhiyun 		prsv = rsv->next;
959*4882a593Smuzhiyun 		memunmap(rsv);
960*4882a593Smuzhiyun 	}
961*4882a593Smuzhiyun 
962*4882a593Smuzhiyun 	/* no slot found - allocate a new linked list entry */
963*4882a593Smuzhiyun 	rsv = (struct linux_efi_memreserve *)__get_free_page(GFP_ATOMIC);
964*4882a593Smuzhiyun 	if (!rsv)
965*4882a593Smuzhiyun 		return -ENOMEM;
966*4882a593Smuzhiyun 
967*4882a593Smuzhiyun 	rc = efi_mem_reserve_iomem(__pa(rsv), SZ_4K);
968*4882a593Smuzhiyun 	if (rc) {
969*4882a593Smuzhiyun 		free_page((unsigned long)rsv);
970*4882a593Smuzhiyun 		return rc;
971*4882a593Smuzhiyun 	}
972*4882a593Smuzhiyun 
973*4882a593Smuzhiyun 	/*
974*4882a593Smuzhiyun 	 * The memremap() call above assumes that a linux_efi_memreserve entry
975*4882a593Smuzhiyun 	 * never crosses a page boundary, so let's ensure that this remains true
976*4882a593Smuzhiyun 	 * even when kexec'ing a 4k pages kernel from a >4k pages kernel, by
977*4882a593Smuzhiyun 	 * using SZ_4K explicitly in the size calculation below.
978*4882a593Smuzhiyun 	 */
979*4882a593Smuzhiyun 	rsv->size = EFI_MEMRESERVE_COUNT(SZ_4K);
980*4882a593Smuzhiyun 	atomic_set(&rsv->count, 1);
981*4882a593Smuzhiyun 	rsv->entry[0].base = addr;
982*4882a593Smuzhiyun 	rsv->entry[0].size = size;
983*4882a593Smuzhiyun 
984*4882a593Smuzhiyun 	spin_lock(&efi_mem_reserve_persistent_lock);
985*4882a593Smuzhiyun 	rsv->next = efi_memreserve_root->next;
986*4882a593Smuzhiyun 	efi_memreserve_root->next = __pa(rsv);
987*4882a593Smuzhiyun 	spin_unlock(&efi_mem_reserve_persistent_lock);
988*4882a593Smuzhiyun 
989*4882a593Smuzhiyun 	return efi_mem_reserve_iomem(addr, size);
990*4882a593Smuzhiyun }
991*4882a593Smuzhiyun 
efi_memreserve_root_init(void)992*4882a593Smuzhiyun static int __init efi_memreserve_root_init(void)
993*4882a593Smuzhiyun {
994*4882a593Smuzhiyun 	if (efi_memreserve_root)
995*4882a593Smuzhiyun 		return 0;
996*4882a593Smuzhiyun 	if (efi_memreserve_map_root())
997*4882a593Smuzhiyun 		efi_memreserve_root = (void *)ULONG_MAX;
998*4882a593Smuzhiyun 	return 0;
999*4882a593Smuzhiyun }
1000*4882a593Smuzhiyun early_initcall(efi_memreserve_root_init);
1001*4882a593Smuzhiyun 
1002*4882a593Smuzhiyun #ifdef CONFIG_KEXEC
update_efi_random_seed(struct notifier_block * nb,unsigned long code,void * unused)1003*4882a593Smuzhiyun static int update_efi_random_seed(struct notifier_block *nb,
1004*4882a593Smuzhiyun 				  unsigned long code, void *unused)
1005*4882a593Smuzhiyun {
1006*4882a593Smuzhiyun 	struct linux_efi_random_seed *seed;
1007*4882a593Smuzhiyun 	u32 size = 0;
1008*4882a593Smuzhiyun 
1009*4882a593Smuzhiyun 	if (!kexec_in_progress)
1010*4882a593Smuzhiyun 		return NOTIFY_DONE;
1011*4882a593Smuzhiyun 
1012*4882a593Smuzhiyun 	seed = memremap(efi_rng_seed, sizeof(*seed), MEMREMAP_WB);
1013*4882a593Smuzhiyun 	if (seed != NULL) {
1014*4882a593Smuzhiyun 		size = min(seed->size, EFI_RANDOM_SEED_SIZE);
1015*4882a593Smuzhiyun 		memunmap(seed);
1016*4882a593Smuzhiyun 	} else {
1017*4882a593Smuzhiyun 		pr_err("Could not map UEFI random seed!\n");
1018*4882a593Smuzhiyun 	}
1019*4882a593Smuzhiyun 	if (size > 0) {
1020*4882a593Smuzhiyun 		seed = memremap(efi_rng_seed, sizeof(*seed) + size,
1021*4882a593Smuzhiyun 				MEMREMAP_WB);
1022*4882a593Smuzhiyun 		if (seed != NULL) {
1023*4882a593Smuzhiyun 			seed->size = size;
1024*4882a593Smuzhiyun 			get_random_bytes(seed->bits, seed->size);
1025*4882a593Smuzhiyun 			memunmap(seed);
1026*4882a593Smuzhiyun 		} else {
1027*4882a593Smuzhiyun 			pr_err("Could not map UEFI random seed!\n");
1028*4882a593Smuzhiyun 		}
1029*4882a593Smuzhiyun 	}
1030*4882a593Smuzhiyun 	return NOTIFY_DONE;
1031*4882a593Smuzhiyun }
1032*4882a593Smuzhiyun 
1033*4882a593Smuzhiyun static struct notifier_block efi_random_seed_nb = {
1034*4882a593Smuzhiyun 	.notifier_call = update_efi_random_seed,
1035*4882a593Smuzhiyun };
1036*4882a593Smuzhiyun 
register_update_efi_random_seed(void)1037*4882a593Smuzhiyun static int __init register_update_efi_random_seed(void)
1038*4882a593Smuzhiyun {
1039*4882a593Smuzhiyun 	if (efi_rng_seed == EFI_INVALID_TABLE_ADDR)
1040*4882a593Smuzhiyun 		return 0;
1041*4882a593Smuzhiyun 	return register_reboot_notifier(&efi_random_seed_nb);
1042*4882a593Smuzhiyun }
1043*4882a593Smuzhiyun late_initcall(register_update_efi_random_seed);
1044*4882a593Smuzhiyun #endif
1045