Update Linux to v5.10.109
Sourced from [1]
[1] https://cdn.kernel.org/pub/linux/kernel/v5.x/linux-5.10.109.tar.xz
Change-Id: I19bca9fc6762d4e63bcf3e4cba88bbe560d9c76c
Signed-off-by: Olivier Deprez <olivier.deprez@arm.com>
diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c
index 0fe4f3e..99e23a5 100644
--- a/drivers/acpi/nfit/core.c
+++ b/drivers/acpi/nfit/core.c
@@ -5,6 +5,7 @@
#include <linux/list_sort.h>
#include <linux/libnvdimm.h>
#include <linux/module.h>
+#include <linux/nospec.h>
#include <linux/mutex.h>
#include <linux/ndctl.h>
#include <linux/sysfs.h>
@@ -73,6 +74,18 @@
}
EXPORT_SYMBOL(to_nfit_uuid);
+static const guid_t *to_nfit_bus_uuid(int family)
+{
+ if (WARN_ONCE(family == NVDIMM_BUS_FAMILY_NFIT,
+ "only secondary bus families can be translated\n"))
+ return NULL;
+ /*
+ * The index of bus UUIDs starts immediately following the last
+ * NVDIMM/leaf family.
+ */
+ return to_nfit_uuid(family + NVDIMM_FAMILY_MAX);
+}
+
static struct acpi_device *to_acpi_dev(struct acpi_nfit_desc *acpi_desc)
{
struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc;
@@ -362,24 +375,8 @@
{
static const u8 revid_table[NVDIMM_FAMILY_MAX+1][NVDIMM_CMD_MAX+1] = {
[NVDIMM_FAMILY_INTEL] = {
- [NVDIMM_INTEL_GET_MODES] = 2,
- [NVDIMM_INTEL_GET_FWINFO] = 2,
- [NVDIMM_INTEL_START_FWUPDATE] = 2,
- [NVDIMM_INTEL_SEND_FWUPDATE] = 2,
- [NVDIMM_INTEL_FINISH_FWUPDATE] = 2,
- [NVDIMM_INTEL_QUERY_FWUPDATE] = 2,
- [NVDIMM_INTEL_SET_THRESHOLD] = 2,
- [NVDIMM_INTEL_INJECT_ERROR] = 2,
- [NVDIMM_INTEL_GET_SECURITY_STATE] = 2,
- [NVDIMM_INTEL_SET_PASSPHRASE] = 2,
- [NVDIMM_INTEL_DISABLE_PASSPHRASE] = 2,
- [NVDIMM_INTEL_UNLOCK_UNIT] = 2,
- [NVDIMM_INTEL_FREEZE_LOCK] = 2,
- [NVDIMM_INTEL_SECURE_ERASE] = 2,
- [NVDIMM_INTEL_OVERWRITE] = 2,
- [NVDIMM_INTEL_QUERY_OVERWRITE] = 2,
- [NVDIMM_INTEL_SET_MASTER_PASSPHRASE] = 2,
- [NVDIMM_INTEL_MASTER_SECURE_ERASE] = 2,
+ [NVDIMM_INTEL_GET_MODES ...
+ NVDIMM_INTEL_FW_ACTIVATE_ARM] = 2,
},
};
u8 id;
@@ -406,7 +403,7 @@
}
static int cmd_to_func(struct nfit_mem *nfit_mem, unsigned int cmd,
- struct nd_cmd_pkg *call_pkg)
+ struct nd_cmd_pkg *call_pkg, int *family)
{
if (call_pkg) {
int i;
@@ -417,6 +414,7 @@
for (i = 0; i < ARRAY_SIZE(call_pkg->nd_reserved2); i++)
if (call_pkg->nd_reserved2[i])
return -EINVAL;
+ *family = call_pkg->nd_family;
return call_pkg->nd_command;
}
@@ -450,13 +448,14 @@
acpi_handle handle;
const guid_t *guid;
int func, rc, i;
+ int family = 0;
if (cmd_rc)
*cmd_rc = -EINVAL;
if (cmd == ND_CMD_CALL)
call_pkg = buf;
- func = cmd_to_func(nfit_mem, cmd, call_pkg);
+ func = cmd_to_func(nfit_mem, cmd, call_pkg, &family);
if (func < 0)
return func;
@@ -478,9 +477,20 @@
cmd_name = nvdimm_bus_cmd_name(cmd);
cmd_mask = nd_desc->cmd_mask;
- dsm_mask = nd_desc->bus_dsm_mask;
+ if (cmd == ND_CMD_CALL && call_pkg->nd_family) {
+ family = call_pkg->nd_family;
+ if (family > NVDIMM_BUS_FAMILY_MAX ||
+ !test_bit(family, &nd_desc->bus_family_mask))
+ return -EINVAL;
+ family = array_index_nospec(family,
+ NVDIMM_BUS_FAMILY_MAX + 1);
+ dsm_mask = acpi_desc->family_dsm_mask[family];
+ guid = to_nfit_bus_uuid(family);
+ } else {
+ dsm_mask = acpi_desc->bus_dsm_mask;
+ guid = to_nfit_uuid(NFIT_DEV_BUS);
+ }
desc = nd_cmd_bus_desc(cmd);
- guid = to_nfit_uuid(NFIT_DEV_BUS);
handle = adev->handle;
dimm_name = "bus";
}
@@ -516,8 +526,8 @@
in_buf.buffer.length = call_pkg->nd_size_in;
}
- dev_dbg(dev, "%s cmd: %d: func: %d input length: %d\n",
- dimm_name, cmd, func, in_buf.buffer.length);
+ dev_dbg(dev, "%s cmd: %d: family: %d func: %d input length: %d\n",
+ dimm_name, cmd, family, func, in_buf.buffer.length);
if (payload_dumpable(nvdimm, func))
print_hex_dump_debug("nvdimm in ", DUMP_PREFIX_OFFSET, 4, 4,
in_buf.buffer.pointer,
@@ -1184,7 +1194,8 @@
return 0;
}
-static int nfit_mem_cmp(void *priv, struct list_head *_a, struct list_head *_b)
+static int nfit_mem_cmp(void *priv, const struct list_head *_a,
+ const struct list_head *_b)
{
struct nfit_mem *a = container_of(_a, typeof(*a), list);
struct nfit_mem *b = container_of(_b, typeof(*b), list);
@@ -1238,8 +1249,9 @@
{
struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev);
struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus);
+ struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
- return sprintf(buf, "%#lx\n", nd_desc->bus_dsm_mask);
+ return sprintf(buf, "%#lx\n", acpi_desc->bus_dsm_mask);
}
static struct device_attribute dev_attr_bus_dsm_mask =
__ATTR(dsm_mask, 0444, bus_dsm_mask_show, NULL);
@@ -1382,11 +1394,15 @@
static umode_t nfit_visible(struct kobject *kobj, struct attribute *a, int n)
{
- struct device *dev = container_of(kobj, struct device, kobj);
+ struct device *dev = kobj_to_dev(kobj);
struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev);
- if (a == &dev_attr_scrub.attr && !ars_supported(nvdimm_bus))
- return 0;
+ if (a == &dev_attr_scrub.attr)
+ return ars_supported(nvdimm_bus) ? a->mode : 0;
+
+ if (a == &dev_attr_firmware_activate_noidle.attr)
+ return intel_fwa_supported(nvdimm_bus) ? a->mode : 0;
+
return a->mode;
}
@@ -1395,6 +1411,7 @@
&dev_attr_scrub.attr,
&dev_attr_hw_error_scrub.attr,
&dev_attr_bus_dsm_mask.attr,
+ &dev_attr_firmware_activate_noidle.attr,
NULL,
};
@@ -1405,7 +1422,6 @@
};
static const struct attribute_group *acpi_nfit_attribute_groups[] = {
- &nvdimm_bus_attribute_group,
&acpi_nfit_attribute_group,
NULL,
};
@@ -1668,7 +1684,7 @@
static umode_t acpi_nfit_dimm_attr_visible(struct kobject *kobj,
struct attribute *a, int n)
{
- struct device *dev = container_of(kobj, struct device, kobj);
+ struct device *dev = kobj_to_dev(kobj);
struct nvdimm *nvdimm = to_nvdimm(dev);
struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
@@ -1699,8 +1715,6 @@
};
static const struct attribute_group *acpi_nfit_dimm_attribute_groups[] = {
- &nvdimm_attribute_group,
- &nd_device_attribute_group,
&acpi_nfit_dimm_attribute_group,
NULL,
};
@@ -1826,6 +1840,7 @@
static int acpi_nfit_add_dimm(struct acpi_nfit_desc *acpi_desc,
struct nfit_mem *nfit_mem, u32 device_handle)
{
+ struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc;
struct acpi_device *adev, *adev_dimm;
struct device *dev = acpi_desc->dev;
unsigned long dsm_mask, label_mask;
@@ -1837,6 +1852,7 @@
/* nfit test assumes 1:1 relationship between commands and dsms */
nfit_mem->dsm_mask = acpi_desc->dimm_cmd_force_en;
nfit_mem->family = NVDIMM_FAMILY_INTEL;
+ set_bit(NVDIMM_FAMILY_INTEL, &nd_desc->dimm_family_mask);
if (dcr->valid_fields & ACPI_NFIT_CONTROL_MFG_INFO_VALID)
sprintf(nfit_mem->id, "%04x-%02x-%04x-%08x",
@@ -1889,10 +1905,13 @@
* Note, that checking for function0 (bit0) tells us if any commands
* are reachable through this GUID.
*/
+ clear_bit(NVDIMM_FAMILY_INTEL, &nd_desc->dimm_family_mask);
for (i = 0; i <= NVDIMM_FAMILY_MAX; i++)
- if (acpi_check_dsm(adev_dimm->handle, to_nfit_uuid(i), 1, 1))
+ if (acpi_check_dsm(adev_dimm->handle, to_nfit_uuid(i), 1, 1)) {
+ set_bit(i, &nd_desc->dimm_family_mask);
if (family < 0 || i == default_dsm_family)
family = i;
+ }
/* limit the supported commands to those that are publicly documented */
nfit_mem->family = family;
@@ -2010,6 +2029,26 @@
}
}
+static const struct nvdimm_fw_ops *acpi_nfit_get_fw_ops(
+ struct nfit_mem *nfit_mem)
+{
+ unsigned long mask;
+ struct acpi_nfit_desc *acpi_desc = nfit_mem->acpi_desc;
+ struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc;
+
+ if (!nd_desc->fw_ops)
+ return NULL;
+
+ if (nfit_mem->family != NVDIMM_FAMILY_INTEL)
+ return NULL;
+
+ mask = nfit_mem->dsm_mask & NVDIMM_INTEL_FW_ACTIVATE_CMDMASK;
+ if (mask != NVDIMM_INTEL_FW_ACTIVATE_CMDMASK)
+ return NULL;
+
+ return intel_fw_ops;
+}
+
static int acpi_nfit_register_dimms(struct acpi_nfit_desc *acpi_desc)
{
struct nfit_mem *nfit_mem;
@@ -2030,8 +2069,10 @@
continue;
}
- if (nfit_mem->bdw && nfit_mem->memdev_pmem)
+ if (nfit_mem->bdw && nfit_mem->memdev_pmem) {
set_bit(NDD_ALIASING, &flags);
+ set_bit(NDD_LABELING, &flags);
+ }
/* collate flags across all memdevs for this dimm */
list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) {
@@ -2084,7 +2125,8 @@
acpi_nfit_dimm_attribute_groups,
flags, cmd_mask, flush ? flush->hint_count : 0,
nfit_mem->flush_wpq, &nfit_mem->id[0],
- acpi_nfit_get_security_ops(nfit_mem->family));
+ acpi_nfit_get_security_ops(nfit_mem->family),
+ acpi_nfit_get_fw_ops(nfit_mem));
if (!nvdimm)
return -ENOMEM;
@@ -2138,22 +2180,33 @@
* these commands.
*/
enum nfit_aux_cmds {
- NFIT_CMD_TRANSLATE_SPA = 5,
- NFIT_CMD_ARS_INJECT_SET = 7,
- NFIT_CMD_ARS_INJECT_CLEAR = 8,
- NFIT_CMD_ARS_INJECT_GET = 9,
+ NFIT_CMD_TRANSLATE_SPA = 5,
+ NFIT_CMD_ARS_INJECT_SET = 7,
+ NFIT_CMD_ARS_INJECT_CLEAR = 8,
+ NFIT_CMD_ARS_INJECT_GET = 9,
};
static void acpi_nfit_init_dsms(struct acpi_nfit_desc *acpi_desc)
{
struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc;
const guid_t *guid = to_nfit_uuid(NFIT_DEV_BUS);
+ unsigned long dsm_mask, *mask;
struct acpi_device *adev;
- unsigned long dsm_mask;
int i;
- nd_desc->cmd_mask = acpi_desc->bus_cmd_force_en;
- nd_desc->bus_dsm_mask = acpi_desc->bus_nfit_cmd_force_en;
+ set_bit(ND_CMD_CALL, &nd_desc->cmd_mask);
+ set_bit(NVDIMM_BUS_FAMILY_NFIT, &nd_desc->bus_family_mask);
+
+ /* enable nfit_test to inject bus command emulation */
+ if (acpi_desc->bus_cmd_force_en) {
+ nd_desc->cmd_mask = acpi_desc->bus_cmd_force_en;
+ mask = &nd_desc->bus_family_mask;
+ if (acpi_desc->family_dsm_mask[NVDIMM_BUS_FAMILY_INTEL]) {
+ set_bit(NVDIMM_BUS_FAMILY_INTEL, mask);
+ nd_desc->fw_ops = intel_bus_fw_ops;
+ }
+ }
+
adev = to_acpi_dev(acpi_desc);
if (!adev)
return;
@@ -2161,7 +2214,6 @@
for (i = ND_CMD_ARS_CAP; i <= ND_CMD_CLEAR_ERROR; i++)
if (acpi_check_dsm(adev->handle, guid, 1, 1ULL << i))
set_bit(i, &nd_desc->cmd_mask);
- set_bit(ND_CMD_CALL, &nd_desc->cmd_mask);
dsm_mask =
(1 << ND_CMD_ARS_CAP) |
@@ -2174,7 +2226,20 @@
(1 << NFIT_CMD_ARS_INJECT_GET);
for_each_set_bit(i, &dsm_mask, BITS_PER_LONG)
if (acpi_check_dsm(adev->handle, guid, 1, 1ULL << i))
- set_bit(i, &nd_desc->bus_dsm_mask);
+ set_bit(i, &acpi_desc->bus_dsm_mask);
+
+ /* Enumerate allowed NVDIMM_BUS_FAMILY_INTEL commands */
+ dsm_mask = NVDIMM_BUS_INTEL_FW_ACTIVATE_CMDMASK;
+ guid = to_nfit_bus_uuid(NVDIMM_BUS_FAMILY_INTEL);
+ mask = &acpi_desc->family_dsm_mask[NVDIMM_BUS_FAMILY_INTEL];
+ for_each_set_bit(i, &dsm_mask, BITS_PER_LONG)
+ if (acpi_check_dsm(adev->handle, guid, 1, 1ULL << i))
+ set_bit(i, mask);
+
+ if (*mask == dsm_mask) {
+ set_bit(NVDIMM_BUS_FAMILY_INTEL, &nd_desc->bus_family_mask);
+ nd_desc->fw_ops = intel_bus_fw_ops;
+ }
}
static ssize_t range_index_show(struct device *dev,
@@ -2198,10 +2263,6 @@
};
static const struct attribute_group *acpi_nfit_region_attribute_groups[] = {
- &nd_region_attribute_group,
- &nd_mapping_attribute_group,
- &nd_device_attribute_group,
- &nd_numa_attribute_group,
&acpi_nfit_region_attribute_group,
NULL,
};
@@ -2298,7 +2359,7 @@
nd_set = devm_kzalloc(dev, sizeof(*nd_set), GFP_KERNEL);
if (!nd_set)
return -ENOMEM;
- guid_copy(&nd_set->type_guid, (guid_t *) spa->range_guid);
+ import_guid(&nd_set->type_guid, spa->range_guid);
info = devm_kzalloc(dev, sizeof_nfit_set_info(nr), GFP_KERNEL);
if (!info)
@@ -2576,7 +2637,7 @@
nfit_blk->bdw_offset = nfit_mem->bdw->offset;
mmio = &nfit_blk->mmio[BDW];
mmio->addr.base = devm_nvdimm_memremap(dev, nfit_mem->spa_bdw->address,
- nfit_mem->spa_bdw->length, nd_blk_memremap_flags(ndbr));
+ nfit_mem->spa_bdw->length, nd_blk_memremap_flags(ndbr));
if (!mmio->addr.base) {
dev_dbg(dev, "%s failed to map bdw\n",
nvdimm_name(nvdimm));
@@ -2950,15 +3011,25 @@
ndr_desc->provider_data = nfit_spa;
ndr_desc->attr_groups = acpi_nfit_region_attribute_groups;
if (spa->flags & ACPI_NFIT_PROXIMITY_VALID) {
- ndr_desc->numa_node = acpi_map_pxm_to_online_node(
- spa->proximity_domain);
- ndr_desc->target_node = acpi_map_pxm_to_node(
- spa->proximity_domain);
+ ndr_desc->numa_node = pxm_to_online_node(spa->proximity_domain);
+ ndr_desc->target_node = pxm_to_node(spa->proximity_domain);
} else {
ndr_desc->numa_node = NUMA_NO_NODE;
ndr_desc->target_node = NUMA_NO_NODE;
}
+ /* Fallback to address based numa information if node lookup failed */
+ if (ndr_desc->numa_node == NUMA_NO_NODE) {
+ ndr_desc->numa_node = memory_add_physaddr_to_nid(spa->address);
+ dev_info(acpi_desc->dev, "changing numa node from %d to %d for nfit region [%pa-%pa]",
+ NUMA_NO_NODE, ndr_desc->numa_node, &res.start, &res.end);
+ }
+ if (ndr_desc->target_node == NUMA_NO_NODE) {
+ ndr_desc->target_node = phys_to_target_node(spa->address);
+ dev_info(acpi_desc->dev, "changing target node from %d to %d for nfit region [%pa-%pa]",
+ NUMA_NO_NODE, ndr_desc->numa_node, &res.start, &res.end);
+ }
+
/*
* Persistence domain bits are hierarchical, if
* ACPI_NFIT_CAPABILITY_CACHE_FLUSH is set then
@@ -3281,7 +3352,7 @@
static int acpi_nfit_register_regions(struct acpi_nfit_desc *acpi_desc)
{
struct nfit_spa *nfit_spa;
- int rc;
+ int rc, do_sched_ars = 0;
set_bit(ARS_VALID, &acpi_desc->scrub_flags);
list_for_each_entry(nfit_spa, &acpi_desc->spas, list) {
@@ -3293,7 +3364,7 @@
}
}
- list_for_each_entry(nfit_spa, &acpi_desc->spas, list)
+ list_for_each_entry(nfit_spa, &acpi_desc->spas, list) {
switch (nfit_spa_type(nfit_spa->spa)) {
case NFIT_SPA_VOLATILE:
case NFIT_SPA_PM:
@@ -3301,6 +3372,13 @@
rc = ars_register(acpi_desc, nfit_spa);
if (rc)
return rc;
+
+ /*
+ * Kick off background ARS if at least one
+ * region successfully registered ARS
+ */
+ if (!test_bit(ARS_FAILED, &nfit_spa->ars_state))
+ do_sched_ars++;
break;
case NFIT_SPA_BDW:
/* nothing to register */
@@ -3319,8 +3397,10 @@
/* don't register unknown regions */
break;
}
+ }
- sched_ars(acpi_desc);
+ if (do_sched_ars)
+ sched_ars(acpi_desc);
return 0;
}
@@ -3493,7 +3573,10 @@
return 0;
}
-/* prevent security commands from being issued via ioctl */
+/*
+ * Prevent security and firmware activate commands from being issued via
+ * ioctl.
+ */
static int acpi_nfit_clear_to_send(struct nvdimm_bus_descriptor *nd_desc,
struct nvdimm *nvdimm, unsigned int cmd, void *buf)
{
@@ -3504,10 +3587,15 @@
call_pkg->nd_family == NVDIMM_FAMILY_INTEL) {
func = call_pkg->nd_command;
if (func > NVDIMM_CMD_MAX ||
- (1 << func) & NVDIMM_INTEL_SECURITY_CMDMASK)
+ (1 << func) & NVDIMM_INTEL_DENY_CMDMASK)
return -EOPNOTSUPP;
}
+ /* block all non-nfit bus commands */
+ if (!nvdimm && cmd == ND_CMD_CALL &&
+ call_pkg->nd_family != NVDIMM_BUS_FAMILY_NFIT)
+ return -EOPNOTSUPP;
+
return __acpi_nfit_clear_to_send(nd_desc, nvdimm, cmd);
}
@@ -3799,6 +3887,7 @@
guid_parse(UUID_NFIT_DIMM_N_HPE2, &nfit_uuid[NFIT_DEV_DIMM_N_HPE2]);
guid_parse(UUID_NFIT_DIMM_N_MSFT, &nfit_uuid[NFIT_DEV_DIMM_N_MSFT]);
guid_parse(UUID_NFIT_DIMM_N_HYPERV, &nfit_uuid[NFIT_DEV_DIMM_N_HYPERV]);
+ guid_parse(UUID_INTEL_BUS, &nfit_uuid[NFIT_BUS_INTEL]);
nfit_wq = create_singlethread_workqueue("nfit");
if (!nfit_wq)
diff --git a/drivers/acpi/nfit/intel.c b/drivers/acpi/nfit/intel.c
index 1113b67..8dd792a 100644
--- a/drivers/acpi/nfit/intel.c
+++ b/drivers/acpi/nfit/intel.c
@@ -7,6 +7,48 @@
#include "intel.h"
#include "nfit.h"
+static ssize_t firmware_activate_noidle_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev);
+ struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus);
+ struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
+
+ return sprintf(buf, "%s\n", acpi_desc->fwa_noidle ? "Y" : "N");
+}
+
+static ssize_t firmware_activate_noidle_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t size)
+{
+ struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev);
+ struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus);
+ struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
+ ssize_t rc;
+ bool val;
+
+ rc = kstrtobool(buf, &val);
+ if (rc)
+ return rc;
+ if (val != acpi_desc->fwa_noidle)
+ acpi_desc->fwa_cap = NVDIMM_FWA_CAP_INVALID;
+ acpi_desc->fwa_noidle = val;
+ return size;
+}
+DEVICE_ATTR_RW(firmware_activate_noidle);
+
+bool intel_fwa_supported(struct nvdimm_bus *nvdimm_bus)
+{
+ struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus);
+ struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
+ unsigned long *mask;
+
+ if (!test_bit(NVDIMM_BUS_FAMILY_INTEL, &nd_desc->bus_family_mask))
+ return false;
+
+ mask = &acpi_desc->family_dsm_mask[NVDIMM_BUS_FAMILY_INTEL];
+ return *mask == NVDIMM_BUS_INTEL_FW_ACTIVATE_CMDMASK;
+}
+
static unsigned long intel_security_flags(struct nvdimm *nvdimm,
enum nvdimm_passphrase_type ptype)
{
@@ -389,3 +431,347 @@
};
const struct nvdimm_security_ops *intel_security_ops = &__intel_security_ops;
+
+static int intel_bus_fwa_businfo(struct nvdimm_bus_descriptor *nd_desc,
+ struct nd_intel_bus_fw_activate_businfo *info)
+{
+ struct {
+ struct nd_cmd_pkg pkg;
+ struct nd_intel_bus_fw_activate_businfo cmd;
+ } nd_cmd = {
+ .pkg = {
+ .nd_command = NVDIMM_BUS_INTEL_FW_ACTIVATE_BUSINFO,
+ .nd_family = NVDIMM_BUS_FAMILY_INTEL,
+ .nd_size_out =
+ sizeof(struct nd_intel_bus_fw_activate_businfo),
+ .nd_fw_size =
+ sizeof(struct nd_intel_bus_fw_activate_businfo),
+ },
+ };
+ int rc;
+
+ rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_CALL, &nd_cmd, sizeof(nd_cmd),
+ NULL);
+ *info = nd_cmd.cmd;
+ return rc;
+}
+
+/* The fw_ops expect to be called with the nvdimm_bus_lock() held */
+static enum nvdimm_fwa_state intel_bus_fwa_state(
+ struct nvdimm_bus_descriptor *nd_desc)
+{
+ struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
+ struct nd_intel_bus_fw_activate_businfo info;
+ struct device *dev = acpi_desc->dev;
+ enum nvdimm_fwa_state state;
+ int rc;
+
+ /*
+ * It should not be possible for platform firmware to return
+ * busy because activate is a synchronous operation. Treat it
+ * similar to invalid, i.e. always refresh / poll the status.
+ */
+ switch (acpi_desc->fwa_state) {
+ case NVDIMM_FWA_INVALID:
+ case NVDIMM_FWA_BUSY:
+ break;
+ default:
+ /* check if capability needs to be refreshed */
+ if (acpi_desc->fwa_cap == NVDIMM_FWA_CAP_INVALID)
+ break;
+ return acpi_desc->fwa_state;
+ }
+
+ /* Refresh with platform firmware */
+ rc = intel_bus_fwa_businfo(nd_desc, &info);
+ if (rc)
+ return NVDIMM_FWA_INVALID;
+
+ switch (info.state) {
+ case ND_INTEL_FWA_IDLE:
+ state = NVDIMM_FWA_IDLE;
+ break;
+ case ND_INTEL_FWA_BUSY:
+ state = NVDIMM_FWA_BUSY;
+ break;
+ case ND_INTEL_FWA_ARMED:
+ if (info.activate_tmo > info.max_quiesce_tmo)
+ state = NVDIMM_FWA_ARM_OVERFLOW;
+ else
+ state = NVDIMM_FWA_ARMED;
+ break;
+ default:
+ dev_err_once(dev, "invalid firmware activate state %d\n",
+ info.state);
+ return NVDIMM_FWA_INVALID;
+ }
+
+ /*
+ * Capability data is available in the same payload as state. It
+ * is expected to be static.
+ */
+ if (acpi_desc->fwa_cap == NVDIMM_FWA_CAP_INVALID) {
+ if (info.capability & ND_INTEL_BUS_FWA_CAP_FWQUIESCE)
+ acpi_desc->fwa_cap = NVDIMM_FWA_CAP_QUIESCE;
+ else if (info.capability & ND_INTEL_BUS_FWA_CAP_OSQUIESCE) {
+ /*
+ * Skip hibernate cycle by default if platform
+ * indicates that it does not need devices to be
+ * quiesced.
+ */
+ acpi_desc->fwa_cap = NVDIMM_FWA_CAP_LIVE;
+ } else
+ acpi_desc->fwa_cap = NVDIMM_FWA_CAP_NONE;
+ }
+
+ acpi_desc->fwa_state = state;
+
+ return state;
+}
+
+static enum nvdimm_fwa_capability intel_bus_fwa_capability(
+ struct nvdimm_bus_descriptor *nd_desc)
+{
+ struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
+
+ if (acpi_desc->fwa_cap > NVDIMM_FWA_CAP_INVALID)
+ return acpi_desc->fwa_cap;
+
+ if (intel_bus_fwa_state(nd_desc) > NVDIMM_FWA_INVALID)
+ return acpi_desc->fwa_cap;
+
+ return NVDIMM_FWA_CAP_INVALID;
+}
+
+static int intel_bus_fwa_activate(struct nvdimm_bus_descriptor *nd_desc)
+{
+ struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
+ struct {
+ struct nd_cmd_pkg pkg;
+ struct nd_intel_bus_fw_activate cmd;
+ } nd_cmd = {
+ .pkg = {
+ .nd_command = NVDIMM_BUS_INTEL_FW_ACTIVATE,
+ .nd_family = NVDIMM_BUS_FAMILY_INTEL,
+ .nd_size_in = sizeof(nd_cmd.cmd.iodev_state),
+ .nd_size_out =
+ sizeof(struct nd_intel_bus_fw_activate),
+ .nd_fw_size =
+ sizeof(struct nd_intel_bus_fw_activate),
+ },
+ /*
+ * Even though activate is run from a suspended context,
+ * for safety, still ask platform firmware to force
+ * quiesce devices by default. Let a module
+ * parameter override that policy.
+ */
+ .cmd = {
+ .iodev_state = acpi_desc->fwa_noidle
+ ? ND_INTEL_BUS_FWA_IODEV_OS_IDLE
+ : ND_INTEL_BUS_FWA_IODEV_FORCE_IDLE,
+ },
+ };
+ int rc;
+
+ switch (intel_bus_fwa_state(nd_desc)) {
+ case NVDIMM_FWA_ARMED:
+ case NVDIMM_FWA_ARM_OVERFLOW:
+ break;
+ default:
+ return -ENXIO;
+ }
+
+ rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_CALL, &nd_cmd, sizeof(nd_cmd),
+ NULL);
+
+ /*
+ * Whether the command succeeded, or failed, the agent checking
+ * for the result needs to query the DIMMs individually.
+ * Increment the activation count to invalidate all the DIMM
+ * states at once (it's otherwise not possible to take
+ * acpi_desc->init_mutex in this context)
+ */
+ acpi_desc->fwa_state = NVDIMM_FWA_INVALID;
+ acpi_desc->fwa_count++;
+
+ dev_dbg(acpi_desc->dev, "result: %d\n", rc);
+
+ return rc;
+}
+
+static const struct nvdimm_bus_fw_ops __intel_bus_fw_ops = {
+ .activate_state = intel_bus_fwa_state,
+ .capability = intel_bus_fwa_capability,
+ .activate = intel_bus_fwa_activate,
+};
+
+const struct nvdimm_bus_fw_ops *intel_bus_fw_ops = &__intel_bus_fw_ops;
+
+static int intel_fwa_dimminfo(struct nvdimm *nvdimm,
+ struct nd_intel_fw_activate_dimminfo *info)
+{
+ struct {
+ struct nd_cmd_pkg pkg;
+ struct nd_intel_fw_activate_dimminfo cmd;
+ } nd_cmd = {
+ .pkg = {
+ .nd_command = NVDIMM_INTEL_FW_ACTIVATE_DIMMINFO,
+ .nd_family = NVDIMM_FAMILY_INTEL,
+ .nd_size_out =
+ sizeof(struct nd_intel_fw_activate_dimminfo),
+ .nd_fw_size =
+ sizeof(struct nd_intel_fw_activate_dimminfo),
+ },
+ };
+ int rc;
+
+ rc = nvdimm_ctl(nvdimm, ND_CMD_CALL, &nd_cmd, sizeof(nd_cmd), NULL);
+ *info = nd_cmd.cmd;
+ return rc;
+}
+
+static enum nvdimm_fwa_state intel_fwa_state(struct nvdimm *nvdimm)
+{
+ struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
+ struct acpi_nfit_desc *acpi_desc = nfit_mem->acpi_desc;
+ struct nd_intel_fw_activate_dimminfo info;
+ int rc;
+
+ /*
+ * Similar to the bus state, since activate is synchronous the
+ * busy state should resolve within the context of 'activate'.
+ */
+ switch (nfit_mem->fwa_state) {
+ case NVDIMM_FWA_INVALID:
+ case NVDIMM_FWA_BUSY:
+ break;
+ default:
+ /* If no activations occurred the old state is still valid */
+ if (nfit_mem->fwa_count == acpi_desc->fwa_count)
+ return nfit_mem->fwa_state;
+ }
+
+ rc = intel_fwa_dimminfo(nvdimm, &info);
+ if (rc)
+ return NVDIMM_FWA_INVALID;
+
+ switch (info.state) {
+ case ND_INTEL_FWA_IDLE:
+ nfit_mem->fwa_state = NVDIMM_FWA_IDLE;
+ break;
+ case ND_INTEL_FWA_BUSY:
+ nfit_mem->fwa_state = NVDIMM_FWA_BUSY;
+ break;
+ case ND_INTEL_FWA_ARMED:
+ nfit_mem->fwa_state = NVDIMM_FWA_ARMED;
+ break;
+ default:
+ nfit_mem->fwa_state = NVDIMM_FWA_INVALID;
+ break;
+ }
+
+ switch (info.result) {
+ case ND_INTEL_DIMM_FWA_NONE:
+ nfit_mem->fwa_result = NVDIMM_FWA_RESULT_NONE;
+ break;
+ case ND_INTEL_DIMM_FWA_SUCCESS:
+ nfit_mem->fwa_result = NVDIMM_FWA_RESULT_SUCCESS;
+ break;
+ case ND_INTEL_DIMM_FWA_NOTSTAGED:
+ nfit_mem->fwa_result = NVDIMM_FWA_RESULT_NOTSTAGED;
+ break;
+ case ND_INTEL_DIMM_FWA_NEEDRESET:
+ nfit_mem->fwa_result = NVDIMM_FWA_RESULT_NEEDRESET;
+ break;
+ case ND_INTEL_DIMM_FWA_MEDIAFAILED:
+ case ND_INTEL_DIMM_FWA_ABORT:
+ case ND_INTEL_DIMM_FWA_NOTSUPP:
+ case ND_INTEL_DIMM_FWA_ERROR:
+ default:
+ nfit_mem->fwa_result = NVDIMM_FWA_RESULT_FAIL;
+ break;
+ }
+
+ nfit_mem->fwa_count = acpi_desc->fwa_count;
+
+ return nfit_mem->fwa_state;
+}
+
+static enum nvdimm_fwa_result intel_fwa_result(struct nvdimm *nvdimm)
+{
+ struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
+ struct acpi_nfit_desc *acpi_desc = nfit_mem->acpi_desc;
+
+ if (nfit_mem->fwa_count == acpi_desc->fwa_count
+ && nfit_mem->fwa_result > NVDIMM_FWA_RESULT_INVALID)
+ return nfit_mem->fwa_result;
+
+ if (intel_fwa_state(nvdimm) > NVDIMM_FWA_INVALID)
+ return nfit_mem->fwa_result;
+
+ return NVDIMM_FWA_RESULT_INVALID;
+}
+
+static int intel_fwa_arm(struct nvdimm *nvdimm, enum nvdimm_fwa_trigger arm)
+{
+ struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
+ struct acpi_nfit_desc *acpi_desc = nfit_mem->acpi_desc;
+ struct {
+ struct nd_cmd_pkg pkg;
+ struct nd_intel_fw_activate_arm cmd;
+ } nd_cmd = {
+ .pkg = {
+ .nd_command = NVDIMM_INTEL_FW_ACTIVATE_ARM,
+ .nd_family = NVDIMM_FAMILY_INTEL,
+ .nd_size_in = sizeof(nd_cmd.cmd.activate_arm),
+ .nd_size_out =
+ sizeof(struct nd_intel_fw_activate_arm),
+ .nd_fw_size =
+ sizeof(struct nd_intel_fw_activate_arm),
+ },
+ .cmd = {
+ .activate_arm = arm == NVDIMM_FWA_ARM
+ ? ND_INTEL_DIMM_FWA_ARM
+ : ND_INTEL_DIMM_FWA_DISARM,
+ },
+ };
+ int rc;
+
+ switch (intel_fwa_state(nvdimm)) {
+ case NVDIMM_FWA_INVALID:
+ return -ENXIO;
+ case NVDIMM_FWA_BUSY:
+ return -EBUSY;
+ case NVDIMM_FWA_IDLE:
+ if (arm == NVDIMM_FWA_DISARM)
+ return 0;
+ break;
+ case NVDIMM_FWA_ARMED:
+ if (arm == NVDIMM_FWA_ARM)
+ return 0;
+ break;
+ default:
+ return -ENXIO;
+ }
+
+ /*
+ * Invalidate the bus-level state, now that we're committed to
+ * changing the 'arm' state.
+ */
+ acpi_desc->fwa_state = NVDIMM_FWA_INVALID;
+ nfit_mem->fwa_state = NVDIMM_FWA_INVALID;
+
+ rc = nvdimm_ctl(nvdimm, ND_CMD_CALL, &nd_cmd, sizeof(nd_cmd), NULL);
+
+ dev_dbg(acpi_desc->dev, "%s result: %d\n", arm == NVDIMM_FWA_ARM
+ ? "arm" : "disarm", rc);
+ return rc;
+}
+
+static const struct nvdimm_fw_ops __intel_fw_ops = {
+ .activate_state = intel_fwa_state,
+ .activate_result = intel_fwa_result,
+ .arm = intel_fwa_arm,
+};
+
+const struct nvdimm_fw_ops *intel_fw_ops = &__intel_fw_ops;
diff --git a/drivers/acpi/nfit/intel.h b/drivers/acpi/nfit/intel.h
index 0aca682..b768234 100644
--- a/drivers/acpi/nfit/intel.h
+++ b/drivers/acpi/nfit/intel.h
@@ -111,4 +111,65 @@
u8 passphrase[ND_INTEL_PASSPHRASE_SIZE];
u32 status;
} __packed;
+
+#define ND_INTEL_FWA_IDLE 0
+#define ND_INTEL_FWA_ARMED 1
+#define ND_INTEL_FWA_BUSY 2
+
+#define ND_INTEL_DIMM_FWA_NONE 0
+#define ND_INTEL_DIMM_FWA_NOTSTAGED 1
+#define ND_INTEL_DIMM_FWA_SUCCESS 2
+#define ND_INTEL_DIMM_FWA_NEEDRESET 3
+#define ND_INTEL_DIMM_FWA_MEDIAFAILED 4
+#define ND_INTEL_DIMM_FWA_ABORT 5
+#define ND_INTEL_DIMM_FWA_NOTSUPP 6
+#define ND_INTEL_DIMM_FWA_ERROR 7
+
+struct nd_intel_fw_activate_dimminfo {
+ u32 status;
+ u16 result;
+ u8 state;
+ u8 reserved[7];
+} __packed;
+
+#define ND_INTEL_DIMM_FWA_ARM 1
+#define ND_INTEL_DIMM_FWA_DISARM 0
+
+struct nd_intel_fw_activate_arm {
+ u8 activate_arm;
+ u32 status;
+} __packed;
+
+/* Root device command payloads */
+#define ND_INTEL_BUS_FWA_CAP_FWQUIESCE (1 << 0)
+#define ND_INTEL_BUS_FWA_CAP_OSQUIESCE (1 << 1)
+#define ND_INTEL_BUS_FWA_CAP_RESET (1 << 2)
+
+struct nd_intel_bus_fw_activate_businfo {
+ u32 status;
+ u16 reserved;
+ u8 state;
+ u8 capability;
+ u64 activate_tmo;
+ u64 cpu_quiesce_tmo;
+ u64 io_quiesce_tmo;
+ u64 max_quiesce_tmo;
+} __packed;
+
+#define ND_INTEL_BUS_FWA_STATUS_NOARM (6 | 1 << 16)
+#define ND_INTEL_BUS_FWA_STATUS_BUSY (6 | 2 << 16)
+#define ND_INTEL_BUS_FWA_STATUS_NOFW (6 | 3 << 16)
+#define ND_INTEL_BUS_FWA_STATUS_TMO (6 | 4 << 16)
+#define ND_INTEL_BUS_FWA_STATUS_NOIDLE (6 | 5 << 16)
+#define ND_INTEL_BUS_FWA_STATUS_ABORT (6 | 6 << 16)
+
+#define ND_INTEL_BUS_FWA_IODEV_FORCE_IDLE (0)
+#define ND_INTEL_BUS_FWA_IODEV_OS_IDLE (1)
+struct nd_intel_bus_fw_activate {
+ u8 iodev_state;
+ u32 status;
+} __packed;
+
+extern const struct nvdimm_fw_ops *intel_fw_ops;
+extern const struct nvdimm_bus_fw_ops *intel_bus_fw_ops;
#endif
diff --git a/drivers/acpi/nfit/mce.c b/drivers/acpi/nfit/mce.c
index f0ae485..ee8d997 100644
--- a/drivers/acpi/nfit/mce.c
+++ b/drivers/acpi/nfit/mce.c
@@ -76,6 +76,7 @@
*/
acpi_nfit_ars_rescan(acpi_desc, 0);
}
+ mce->kflags |= MCE_HANDLED_NFIT;
break;
}
diff --git a/drivers/acpi/nfit/nfit.h b/drivers/acpi/nfit/nfit.h
index b317f40..c674f3d 100644
--- a/drivers/acpi/nfit/nfit.h
+++ b/drivers/acpi/nfit/nfit.h
@@ -16,8 +16,9 @@
/* ACPI 6.1 */
#define UUID_NFIT_BUS "2f10e7a4-9e91-11e4-89d3-123b93f75cba"
-/* http://pmem.io/documents/NVDIMM_DSM_Interface-V1.6.pdf */
+/* https://pmem.io/documents/NVDIMM_DSM_Interface-V1.6.pdf */
#define UUID_NFIT_DIMM "4309ac30-0d11-11e4-9191-0800200c9a66"
+#define UUID_INTEL_BUS "c7d8acd4-2df8-4b82-9f65-a325335af149"
/* https://github.com/HewlettPackard/hpe-nvm/blob/master/Documentation/ */
#define UUID_NFIT_DIMM_N_HPE1 "9002c334-acf3-4c0e-9642-a235f0d53bc6"
@@ -33,7 +34,6 @@
| ACPI_NFIT_MEM_RESTORE_FAILED | ACPI_NFIT_MEM_FLUSH_FAILED \
| ACPI_NFIT_MEM_NOT_ARMED | ACPI_NFIT_MEM_MAP_FAILED)
-#define NVDIMM_FAMILY_MAX NVDIMM_FAMILY_HYPERV
#define NVDIMM_CMD_MAX 31
#define NVDIMM_STANDARD_CMDMASK \
@@ -66,6 +66,13 @@
NVDIMM_INTEL_QUERY_OVERWRITE = 26,
NVDIMM_INTEL_SET_MASTER_PASSPHRASE = 27,
NVDIMM_INTEL_MASTER_SECURE_ERASE = 28,
+ NVDIMM_INTEL_FW_ACTIVATE_DIMMINFO = 29,
+ NVDIMM_INTEL_FW_ACTIVATE_ARM = 30,
+};
+
+enum nvdimm_bus_family_cmds {
+ NVDIMM_BUS_INTEL_FW_ACTIVATE_BUSINFO = 1,
+ NVDIMM_BUS_INTEL_FW_ACTIVATE = 2,
};
#define NVDIMM_INTEL_SECURITY_CMDMASK \
@@ -76,13 +83,22 @@
| 1 << NVDIMM_INTEL_SET_MASTER_PASSPHRASE \
| 1 << NVDIMM_INTEL_MASTER_SECURE_ERASE)
+#define NVDIMM_INTEL_FW_ACTIVATE_CMDMASK \
+(1 << NVDIMM_INTEL_FW_ACTIVATE_DIMMINFO | 1 << NVDIMM_INTEL_FW_ACTIVATE_ARM)
+
+#define NVDIMM_BUS_INTEL_FW_ACTIVATE_CMDMASK \
+(1 << NVDIMM_BUS_INTEL_FW_ACTIVATE_BUSINFO | 1 << NVDIMM_BUS_INTEL_FW_ACTIVATE)
+
#define NVDIMM_INTEL_CMDMASK \
(NVDIMM_STANDARD_CMDMASK | 1 << NVDIMM_INTEL_GET_MODES \
| 1 << NVDIMM_INTEL_GET_FWINFO | 1 << NVDIMM_INTEL_START_FWUPDATE \
| 1 << NVDIMM_INTEL_SEND_FWUPDATE | 1 << NVDIMM_INTEL_FINISH_FWUPDATE \
| 1 << NVDIMM_INTEL_QUERY_FWUPDATE | 1 << NVDIMM_INTEL_SET_THRESHOLD \
| 1 << NVDIMM_INTEL_INJECT_ERROR | 1 << NVDIMM_INTEL_LATCH_SHUTDOWN \
- | NVDIMM_INTEL_SECURITY_CMDMASK)
+ | NVDIMM_INTEL_SECURITY_CMDMASK | NVDIMM_INTEL_FW_ACTIVATE_CMDMASK)
+
+#define NVDIMM_INTEL_DENY_CMDMASK \
+(NVDIMM_INTEL_SECURITY_CMDMASK | NVDIMM_INTEL_FW_ACTIVATE_CMDMASK)
enum nfit_uuids {
/* for simplicity alias the uuid index with the family id */
@@ -91,6 +107,11 @@
NFIT_DEV_DIMM_N_HPE2 = NVDIMM_FAMILY_HPE2,
NFIT_DEV_DIMM_N_MSFT = NVDIMM_FAMILY_MSFT,
NFIT_DEV_DIMM_N_HYPERV = NVDIMM_FAMILY_HYPERV,
+ /*
+ * to_nfit_bus_uuid() expects to translate bus uuid family ids
+ * to a UUID index using NVDIMM_FAMILY_MAX as an offset
+ */
+ NFIT_BUS_INTEL = NVDIMM_FAMILY_MAX + NVDIMM_BUS_FAMILY_INTEL,
NFIT_SPA_VOLATILE,
NFIT_SPA_PM,
NFIT_SPA_DCR,
@@ -145,32 +166,32 @@
unsigned long ars_state;
u32 clear_err_unit;
u32 max_ars;
- struct acpi_nfit_system_address spa[0];
+ struct acpi_nfit_system_address spa[];
};
struct nfit_dcr {
struct list_head list;
- struct acpi_nfit_control_region dcr[0];
+ struct acpi_nfit_control_region dcr[];
};
struct nfit_bdw {
struct list_head list;
- struct acpi_nfit_data_region bdw[0];
+ struct acpi_nfit_data_region bdw[];
};
struct nfit_idt {
struct list_head list;
- struct acpi_nfit_interleave idt[0];
+ struct acpi_nfit_interleave idt[];
};
struct nfit_flush {
struct list_head list;
- struct acpi_nfit_flush_address flush[0];
+ struct acpi_nfit_flush_address flush[];
};
struct nfit_memdev {
struct list_head list;
- struct acpi_nfit_memory_map memdev[0];
+ struct acpi_nfit_memory_map memdev[];
};
enum nfit_mem_flags {
@@ -199,6 +220,9 @@
struct list_head list;
struct acpi_device *adev;
struct acpi_nfit_desc *acpi_desc;
+ enum nvdimm_fwa_state fwa_state;
+ enum nvdimm_fwa_result fwa_result;
+ int fwa_count;
char id[NFIT_DIMM_ID_LEN+1];
struct resource *flush_wpq;
unsigned long dsm_mask;
@@ -238,11 +262,17 @@
unsigned long scrub_flags;
unsigned long dimm_cmd_force_en;
unsigned long bus_cmd_force_en;
- unsigned long bus_nfit_cmd_force_en;
+ unsigned long bus_dsm_mask;
+ unsigned long family_dsm_mask[NVDIMM_BUS_FAMILY_MAX + 1];
unsigned int platform_cap;
unsigned int scrub_tmo;
int (*blk_do_io)(struct nd_blk_region *ndbr, resource_size_t dpa,
void *iobuf, u64 len, int rw);
+ enum nvdimm_fwa_state fwa_state;
+ enum nvdimm_fwa_capability fwa_cap;
+ int fwa_count;
+ bool fwa_noidle;
+ bool fwa_nosuspend;
};
enum scrub_mode {
@@ -345,4 +375,6 @@
int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
unsigned int cmd, void *buf, unsigned int buf_len, int *cmd_rc);
void acpi_nfit_desc_init(struct acpi_nfit_desc *acpi_desc, struct device *dev);
+bool intel_fwa_supported(struct nvdimm_bus *nvdimm_bus);
+extern struct device_attribute dev_attr_firmware_activate_noidle;
#endif /* __NFIT_H__ */