Update Linux to v5.4.2
Change-Id: Idf6911045d9d382da2cfe01b1edff026404ac8fd
diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
index 2a29dd9..e98bbf8 100644
--- a/drivers/firmware/efi/efi.c
+++ b/drivers/firmware/efi/efi.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* efi.c - EFI subsystem
*
@@ -9,8 +10,6 @@
* allowing the efivarfs to be mounted or the efivars module to be loaded.
* The existance of /sys/firmware/efi may also be used by userspace to
* determine that the system supports EFI.
- *
- * This file is released under the GPLv2.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@@ -31,6 +30,7 @@
#include <linux/acpi.h>
#include <linux/ucs2_string.h>
#include <linux/memblock.h>
+#include <linux/security.h>
#include <asm/early_ioremap.h>
@@ -40,11 +40,9 @@
.acpi20 = EFI_INVALID_TABLE_ADDR,
.smbios = EFI_INVALID_TABLE_ADDR,
.smbios3 = EFI_INVALID_TABLE_ADDR,
- .sal_systab = EFI_INVALID_TABLE_ADDR,
.boot_info = EFI_INVALID_TABLE_ADDR,
.hcdp = EFI_INVALID_TABLE_ADDR,
.uga = EFI_INVALID_TABLE_ADDR,
- .uv_systab = EFI_INVALID_TABLE_ADDR,
.fw_vendor = EFI_INVALID_TABLE_ADDR,
.runtime = EFI_INVALID_TABLE_ADDR,
.config_table = EFI_INVALID_TABLE_ADDR,
@@ -52,29 +50,12 @@
.properties_table = EFI_INVALID_TABLE_ADDR,
.mem_attr_table = EFI_INVALID_TABLE_ADDR,
.rng_seed = EFI_INVALID_TABLE_ADDR,
- .tpm_log = EFI_INVALID_TABLE_ADDR
+ .tpm_log = EFI_INVALID_TABLE_ADDR,
+ .tpm_final_log = EFI_INVALID_TABLE_ADDR,
+ .mem_reserve = EFI_INVALID_TABLE_ADDR,
};
EXPORT_SYMBOL(efi);
-static unsigned long *efi_tables[] = {
- &efi.mps,
- &efi.acpi,
- &efi.acpi20,
- &efi.smbios,
- &efi.smbios3,
- &efi.sal_systab,
- &efi.boot_info,
- &efi.hcdp,
- &efi.uga,
- &efi.uv_systab,
- &efi.fw_vendor,
- &efi.runtime,
- &efi.config_table,
- &efi.esrt,
- &efi.properties_table,
- &efi.mem_attr_table,
-};
-
struct mm_struct efi_mm = {
.mm_rb = RB_ROOT,
.mm_users = ATOMIC_INIT(2),
@@ -241,6 +222,11 @@
static char efivar_ssdt[EFIVAR_SSDT_NAME_MAX] __initdata;
static int __init efivar_ssdt_setup(char *str)
{
+ int ret = security_locked_down(LOCKDOWN_ACPI_TABLES);
+
+ if (ret)
+ return ret;
+
if (strlen(str) < sizeof(efivar_ssdt))
memcpy(efivar_ssdt, str, strlen(str));
else
@@ -281,6 +267,9 @@
void *data;
int ret;
+ if (!efivar_ssdt[0])
+ return 0;
+
ret = efivar_init(efivar_ssdt_iter, &entries, true, &entries);
list_for_each_entry_safe(entry, aux, &entries, list) {
@@ -475,7 +464,6 @@
{ACPI_TABLE_GUID, "ACPI", &efi.acpi},
{HCDP_TABLE_GUID, "HCDP", &efi.hcdp},
{MPS_TABLE_GUID, "MPS", &efi.mps},
- {SAL_SYSTEM_TABLE_GUID, "SALsystab", &efi.sal_systab},
{SMBIOS_TABLE_GUID, "SMBIOS", &efi.smbios},
{SMBIOS3_TABLE_GUID, "SMBIOS 3.0", &efi.smbios3},
{UGA_IO_PROTOCOL_GUID, "UGA", &efi.uga},
@@ -484,6 +472,11 @@
{EFI_MEMORY_ATTRIBUTES_TABLE_GUID, "MEMATTR", &efi.mem_attr_table},
{LINUX_EFI_RANDOM_SEED_TABLE_GUID, "RNG", &efi.rng_seed},
{LINUX_EFI_TPM_EVENT_LOG_GUID, "TPMEventLog", &efi.tpm_log},
+ {LINUX_EFI_TPM_FINAL_LOG_GUID, "TPMFinalLog", &efi.tpm_final_log},
+ {LINUX_EFI_MEMRESERVE_TABLE_GUID, "MEMRESERVE", &efi.mem_reserve},
+#ifdef CONFIG_EFI_RCI2_TABLE
+ {DELLEMC_EFI_RCI2_TABLE_GUID, NULL, &rci2_table_phys},
+#endif
{NULL_GUID, NULL, NULL},
};
@@ -561,7 +554,7 @@
sizeof(*seed) + size);
if (seed != NULL) {
pr_notice("seeding entropy pool\n");
- add_device_randomness(seed->bits, seed->size);
+ add_bootloader_randomness(seed->bits, seed->size);
early_memunmap(seed, sizeof(*seed) + size);
} else {
pr_err("Could not map UEFI random seed!\n");
@@ -591,6 +584,41 @@
early_memunmap(tbl, sizeof(*tbl));
}
+ if (efi.mem_reserve != EFI_INVALID_TABLE_ADDR) {
+ unsigned long prsv = efi.mem_reserve;
+
+ while (prsv) {
+ struct linux_efi_memreserve *rsv;
+ u8 *p;
+ int i;
+
+ /*
+ * Just map a full page: that is what we will get
+ * anyway, and it permits us to map the entire entry
+ * before knowing its size.
+ */
+ p = early_memremap(ALIGN_DOWN(prsv, PAGE_SIZE),
+ PAGE_SIZE);
+ if (p == NULL) {
+ pr_err("Could not map UEFI memreserve entry!\n");
+ return -ENOMEM;
+ }
+
+ rsv = (void *)(p + prsv % PAGE_SIZE);
+
+ /* reserve the entry itself */
+ memblock_reserve(prsv, EFI_MEMRESERVE_SIZE(rsv->size));
+
+ for (i = 0; i < atomic_read(&rsv->count); i++) {
+ memblock_reserve(rsv->entry[i].base,
+ rsv->entry[i].size);
+ }
+
+ prsv = rsv->next;
+ early_memunmap(p, PAGE_SIZE);
+ }
+ }
+
return 0;
}
@@ -599,6 +627,9 @@
void *config_tables;
int sz, ret;
+ if (efi.systab->nr_tables == 0)
+ return 0;
+
if (efi_enabled(EFI_64BIT))
sz = sizeof(efi_config_table_64_t);
else
@@ -923,20 +954,85 @@
return err;
}
-bool efi_is_table_address(unsigned long phys_addr)
+static DEFINE_SPINLOCK(efi_mem_reserve_persistent_lock);
+static struct linux_efi_memreserve *efi_memreserve_root __ro_after_init;
+
+static int __init efi_memreserve_map_root(void)
{
- unsigned int i;
+ if (efi.mem_reserve == EFI_INVALID_TABLE_ADDR)
+ return -ENODEV;
- if (phys_addr == EFI_INVALID_TABLE_ADDR)
- return false;
-
- for (i = 0; i < ARRAY_SIZE(efi_tables); i++)
- if (*(efi_tables[i]) == phys_addr)
- return true;
-
- return false;
+ efi_memreserve_root = memremap(efi.mem_reserve,
+ sizeof(*efi_memreserve_root),
+ MEMREMAP_WB);
+ if (WARN_ON_ONCE(!efi_memreserve_root))
+ return -ENOMEM;
+ return 0;
}
+int __ref efi_mem_reserve_persistent(phys_addr_t addr, u64 size)
+{
+ struct linux_efi_memreserve *rsv;
+ unsigned long prsv;
+ int rc, index;
+
+ if (efi_memreserve_root == (void *)ULONG_MAX)
+ return -ENODEV;
+
+ if (!efi_memreserve_root) {
+ rc = efi_memreserve_map_root();
+ if (rc)
+ return rc;
+ }
+
+ /* first try to find a slot in an existing linked list entry */
+ for (prsv = efi_memreserve_root->next; prsv; prsv = rsv->next) {
+ rsv = memremap(prsv, sizeof(*rsv), MEMREMAP_WB);
+ index = atomic_fetch_add_unless(&rsv->count, 1, rsv->size);
+ if (index < rsv->size) {
+ rsv->entry[index].base = addr;
+ rsv->entry[index].size = size;
+
+ memunmap(rsv);
+ return 0;
+ }
+ memunmap(rsv);
+ }
+
+ /* no slot found - allocate a new linked list entry */
+ rsv = (struct linux_efi_memreserve *)__get_free_page(GFP_ATOMIC);
+ if (!rsv)
+ return -ENOMEM;
+
+ /*
+ * The memremap() call above assumes that a linux_efi_memreserve entry
+ * never crosses a page boundary, so let's ensure that this remains true
+ * even when kexec'ing a 4k pages kernel from a >4k pages kernel, by
+ * using SZ_4K explicitly in the size calculation below.
+ */
+ rsv->size = EFI_MEMRESERVE_COUNT(SZ_4K);
+ atomic_set(&rsv->count, 1);
+ rsv->entry[0].base = addr;
+ rsv->entry[0].size = size;
+
+ spin_lock(&efi_mem_reserve_persistent_lock);
+ rsv->next = efi_memreserve_root->next;
+ efi_memreserve_root->next = __pa(rsv);
+ spin_unlock(&efi_mem_reserve_persistent_lock);
+
+ return 0;
+}
+
+static int __init efi_memreserve_root_init(void)
+{
+ if (efi_memreserve_root)
+ return 0;
+ if (efi_memreserve_map_root())
+ efi_memreserve_root = (void *)ULONG_MAX;
+ return 0;
+}
+early_initcall(efi_memreserve_root_init);
+
#ifdef CONFIG_KEXEC
static int update_efi_random_seed(struct notifier_block *nb,
unsigned long code, void *unused)