Update Linux to v5.4.2
Change-Id: Idf6911045d9d382da2cfe01b1edff026404ac8fd
diff --git a/drivers/vfio/pci/Kconfig b/drivers/vfio/pci/Kconfig
index 42dc1d3..ac3c1dd 100644
--- a/drivers/vfio/pci/Kconfig
+++ b/drivers/vfio/pci/Kconfig
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
config VFIO_PCI
tristate "VFIO support for PCI devices"
depends on VFIO && PCI && EVENTFD
@@ -38,3 +39,9 @@
and LPC bridge config space.
To enable Intel IGD assignment through vfio-pci, say Y.
+
+config VFIO_PCI_NVLINK2
+ def_bool y
+ depends on VFIO_PCI && PPC_POWERNV
+ help
+ VFIO PCI support for P9 Witherspoon machine with NVIDIA V100 GPUs
diff --git a/drivers/vfio/pci/Makefile b/drivers/vfio/pci/Makefile
index 76d8ec0..f027f8a 100644
--- a/drivers/vfio/pci/Makefile
+++ b/drivers/vfio/pci/Makefile
@@ -1,5 +1,7 @@
+# SPDX-License-Identifier: GPL-2.0-only
vfio-pci-y := vfio_pci.o vfio_pci_intrs.o vfio_pci_rdwr.o vfio_pci_config.o
vfio-pci-$(CONFIG_VFIO_PCI_IGD) += vfio_pci_igd.o
+vfio-pci-$(CONFIG_VFIO_PCI_NVLINK2) += vfio_pci_nvlink2.o
obj-$(CONFIG_VFIO_PCI) += vfio-pci.o
diff --git a/drivers/vfio/pci/trace.h b/drivers/vfio/pci/trace.h
new file mode 100644
index 0000000..b2aa986
--- /dev/null
+++ b/drivers/vfio/pci/trace.h
@@ -0,0 +1,98 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * VFIO PCI mmap/mmap_fault tracepoints
+ *
+ * Copyright (C) 2018 IBM Corp. All rights reserved.
+ * Author: Alexey Kardashevskiy <aik@ozlabs.ru>
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM vfio_pci
+
+#if !defined(_TRACE_VFIO_PCI_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_VFIO_PCI_H
+
+#include <linux/tracepoint.h>
+
+TRACE_EVENT(vfio_pci_nvgpu_mmap_fault,
+ TP_PROTO(struct pci_dev *pdev, unsigned long hpa, unsigned long ua,
+ vm_fault_t ret),
+ TP_ARGS(pdev, hpa, ua, ret),
+
+ TP_STRUCT__entry(
+ __field(const char *, name)
+ __field(unsigned long, hpa)
+ __field(unsigned long, ua)
+ __field(int, ret)
+ ),
+
+ TP_fast_assign(
+ __entry->name = dev_name(&pdev->dev),
+ __entry->hpa = hpa;
+ __entry->ua = ua;
+ __entry->ret = ret;
+ ),
+
+ TP_printk("%s: %lx -> %lx ret=%d", __entry->name, __entry->hpa,
+ __entry->ua, __entry->ret)
+);
+
+TRACE_EVENT(vfio_pci_nvgpu_mmap,
+ TP_PROTO(struct pci_dev *pdev, unsigned long hpa, unsigned long ua,
+ unsigned long size, int ret),
+ TP_ARGS(pdev, hpa, ua, size, ret),
+
+ TP_STRUCT__entry(
+ __field(const char *, name)
+ __field(unsigned long, hpa)
+ __field(unsigned long, ua)
+ __field(unsigned long, size)
+ __field(int, ret)
+ ),
+
+ TP_fast_assign(
+ __entry->name = dev_name(&pdev->dev),
+ __entry->hpa = hpa;
+ __entry->ua = ua;
+ __entry->size = size;
+ __entry->ret = ret;
+ ),
+
+ TP_printk("%s: %lx -> %lx size=%lx ret=%d", __entry->name, __entry->hpa,
+ __entry->ua, __entry->size, __entry->ret)
+);
+
+TRACE_EVENT(vfio_pci_npu2_mmap,
+ TP_PROTO(struct pci_dev *pdev, unsigned long hpa, unsigned long ua,
+ unsigned long size, int ret),
+ TP_ARGS(pdev, hpa, ua, size, ret),
+
+ TP_STRUCT__entry(
+ __field(const char *, name)
+ __field(unsigned long, hpa)
+ __field(unsigned long, ua)
+ __field(unsigned long, size)
+ __field(int, ret)
+ ),
+
+ TP_fast_assign(
+ __entry->name = dev_name(&pdev->dev),
+ __entry->hpa = hpa;
+ __entry->ua = ua;
+ __entry->size = size;
+ __entry->ret = ret;
+ ),
+
+ TP_printk("%s: %lx -> %lx size=%lx ret=%d", __entry->name, __entry->hpa,
+ __entry->ua, __entry->size, __entry->ret)
+);
+
+#endif /* _TRACE_VFIO_PCI_H */
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH ../../drivers/vfio/pci
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE trace
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c
index cddb453..0220616 100644
--- a/drivers/vfio/pci/vfio_pci.c
+++ b/drivers/vfio/pci/vfio_pci.c
@@ -1,17 +1,15 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2012 Red Hat, Inc. All rights reserved.
* Author: Alex Williamson <alex.williamson@redhat.com>
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
* Derived from original vfio:
* Copyright 2010 Cisco Systems, Inc. All rights reserved.
* Author: Tom Lyon, pugs@cisco.com
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#define dev_fmt pr_fmt
#include <linux/device.h>
#include <linux/eventfd.h>
@@ -56,8 +54,6 @@
MODULE_PARM_DESC(disable_idle_d3,
"Disable using the PCI D3 low power state for idle, unused devices");
-static DEFINE_MUTEX(driver_lock);
-
static inline bool vfio_vga_disabled(void)
{
#ifdef CONFIG_VFIO_PCI_VGA
@@ -211,6 +207,57 @@
return false;
}
+static void vfio_pci_probe_power_state(struct vfio_pci_device *vdev)
+{
+ struct pci_dev *pdev = vdev->pdev;
+ u16 pmcsr;
+
+ if (!pdev->pm_cap)
+ return;
+
+ pci_read_config_word(pdev, pdev->pm_cap + PCI_PM_CTRL, &pmcsr);
+
+ vdev->needs_pm_restore = !(pmcsr & PCI_PM_CTRL_NO_SOFT_RESET);
+}
+
+/*
+ * pci_set_power_state() wrapper handling devices which perform a soft reset on
+ * D3->D0 transition. Save state prior to D0/1/2->D3, stash it on the vdev,
+ * restore when returned to D0. Saved separately from pci_saved_state for use
+ * by PM capability emulation and separately from pci_dev internal saved state
+ * to avoid it being overwritten and consumed around other resets.
+ */
+int vfio_pci_set_power_state(struct vfio_pci_device *vdev, pci_power_t state)
+{
+ struct pci_dev *pdev = vdev->pdev;
+ bool needs_restore = false, needs_save = false;
+ int ret;
+
+ if (vdev->needs_pm_restore) {
+ if (pdev->current_state < PCI_D3hot && state >= PCI_D3hot) {
+ pci_save_state(pdev);
+ needs_save = true;
+ }
+
+ if (pdev->current_state >= PCI_D3hot && state <= PCI_D0)
+ needs_restore = true;
+ }
+
+ ret = pci_set_power_state(pdev, state);
+
+ if (!ret) {
+ /* D3 might be unsupported via quirk, skip unless in D3 */
+ if (needs_save && pdev->current_state >= PCI_D3hot) {
+ vdev->pm_save = pci_store_saved_state(pdev);
+ } else if (needs_restore) {
+ pci_load_and_free_saved_state(pdev, &vdev->pm_save);
+ pci_restore_state(pdev);
+ }
+ }
+
+ return ret;
+}
+
static int vfio_pci_enable(struct vfio_pci_device *vdev)
{
struct pci_dev *pdev = vdev->pdev;
@@ -218,7 +265,7 @@
u16 cmd;
u8 msix_pos;
- pci_set_power_state(pdev, PCI_D0);
+ vfio_pci_set_power_state(vdev, PCI_D0);
/* Don't allow our initial saved state to include busmaster */
pci_clear_master(pdev);
@@ -238,12 +285,11 @@
pci_save_state(pdev);
vdev->pci_saved_state = pci_store_saved_state(pdev);
if (!vdev->pci_saved_state)
- pr_debug("%s: Couldn't store %s saved state\n",
- __func__, dev_name(&pdev->dev));
+ pci_dbg(pdev, "%s: Couldn't store saved state\n", __func__);
if (likely(!nointxmask)) {
if (vfio_pci_nointx(pdev)) {
- dev_info(&pdev->dev, "Masking broken INTx support\n");
+ pci_info(pdev, "Masking broken INTx support\n");
vdev->nointx = true;
pci_intx(pdev, 0);
} else
@@ -287,16 +333,36 @@
IS_ENABLED(CONFIG_VFIO_PCI_IGD)) {
ret = vfio_pci_igd_init(vdev);
if (ret) {
- dev_warn(&vdev->pdev->dev,
- "Failed to setup Intel IGD regions\n");
- vfio_pci_disable(vdev);
- return ret;
+ pci_warn(pdev, "Failed to setup Intel IGD regions\n");
+ goto disable_exit;
+ }
+ }
+
+ if (pdev->vendor == PCI_VENDOR_ID_NVIDIA &&
+ IS_ENABLED(CONFIG_VFIO_PCI_NVLINK2)) {
+ ret = vfio_pci_nvdia_v100_nvlink2_init(vdev);
+ if (ret && ret != -ENODEV) {
+ pci_warn(pdev, "Failed to setup NVIDIA NV2 RAM region\n");
+ goto disable_exit;
+ }
+ }
+
+ if (pdev->vendor == PCI_VENDOR_ID_IBM &&
+ IS_ENABLED(CONFIG_VFIO_PCI_NVLINK2)) {
+ ret = vfio_pci_ibm_npu2_init(vdev);
+ if (ret && ret != -ENODEV) {
+ pci_warn(pdev, "Failed to setup NVIDIA NV2 ATSD region\n");
+ goto disable_exit;
}
}
vfio_pci_probe_mmaps(vdev);
return 0;
+
+disable_exit:
+ vfio_pci_disable(vdev);
+ return ret;
}
static void vfio_pci_disable(struct vfio_pci_device *vdev)
@@ -357,8 +423,7 @@
* is just busy work.
*/
if (pci_load_and_free_saved_state(pdev, &vdev->pci_saved_state)) {
- pr_info("%s: Couldn't reload %s saved state\n",
- __func__, dev_name(&pdev->dev));
+ pci_info(pdev, "%s: Couldn't reload saved state\n", __func__);
if (!vdev->reset_works)
goto out;
@@ -373,11 +438,20 @@
pci_write_config_word(pdev, PCI_COMMAND, PCI_COMMAND_INTX_DISABLE);
/*
- * Try to reset the device. The success of this is dependent on
- * being able to lock the device, which is not always possible.
+ * Try to get the locks ourselves to prevent a deadlock. The
+ * success of this is dependent on being able to lock the device,
+ * which is not always possible.
+ * We can not use the "try" reset interface here, which will
+ * overwrite the previously restored configuration information.
*/
- if (vdev->reset_works && !pci_try_reset_function(pdev))
- vdev->needs_reset = false;
+ if (vdev->reset_works && pci_cfg_access_trylock(pdev)) {
+ if (device_trylock(&pdev->dev)) {
+ if (!__pci_reset_function_locked(pdev))
+ vdev->needs_reset = false;
+ device_unlock(&pdev->dev);
+ }
+ pci_cfg_access_unlock(pdev);
+ }
pci_restore_state(pdev);
out:
@@ -386,21 +460,21 @@
vfio_pci_try_bus_reset(vdev);
if (!disable_idle_d3)
- pci_set_power_state(pdev, PCI_D3hot);
+ vfio_pci_set_power_state(vdev, PCI_D3hot);
}
static void vfio_pci_release(void *device_data)
{
struct vfio_pci_device *vdev = device_data;
- mutex_lock(&driver_lock);
+ mutex_lock(&vdev->reflck->lock);
if (!(--vdev->refcnt)) {
vfio_spapr_pci_eeh_release(vdev->pdev);
vfio_pci_disable(vdev);
}
- mutex_unlock(&driver_lock);
+ mutex_unlock(&vdev->reflck->lock);
module_put(THIS_MODULE);
}
@@ -413,7 +487,7 @@
if (!try_module_get(THIS_MODULE))
return -ENODEV;
- mutex_lock(&driver_lock);
+ mutex_lock(&vdev->reflck->lock);
if (!vdev->refcnt) {
ret = vfio_pci_enable(vdev);
@@ -424,7 +498,7 @@
}
vdev->refcnt++;
error:
- mutex_unlock(&driver_lock);
+ mutex_unlock(&vdev->reflck->lock);
if (ret)
module_put(THIS_MODULE);
return ret;
@@ -434,10 +508,14 @@
{
if (irq_type == VFIO_PCI_INTX_IRQ_INDEX) {
u8 pin;
- pci_read_config_byte(vdev->pdev, PCI_INTERRUPT_PIN, &pin);
- if (IS_ENABLED(CONFIG_VFIO_PCI_INTX) && !vdev->nointx && pin)
- return 1;
+ if (!IS_ENABLED(CONFIG_VFIO_PCI_INTX) ||
+ vdev->nointx || vdev->pdev->is_virtfn)
+ return 0;
+
+ pci_read_config_byte(vdev->pdev, PCI_INTERRUPT_PIN, &pin);
+
+ return pin ? 1 : 0;
} else if (irq_type == VFIO_PCI_MSI_IRQ_INDEX) {
u8 pos;
u16 flags;
@@ -683,6 +761,7 @@
{
void __iomem *io;
size_t size;
+ u16 orig_cmd;
info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
info.flags = 0;
@@ -698,15 +777,23 @@
break;
}
- /* Is it really there? */
- io = pci_map_rom(pdev, &size);
- if (!io || !size) {
- info.size = 0;
- break;
- }
- pci_unmap_rom(pdev, io);
+ /*
+ * Is it really there? Enable memory decode for
+ * implicit access in pci_map_rom().
+ */
+ pci_read_config_word(pdev, PCI_COMMAND, &orig_cmd);
+ pci_write_config_word(pdev, PCI_COMMAND,
+ orig_cmd | PCI_COMMAND_MEMORY);
- info.flags = VFIO_REGION_INFO_FLAG_READ;
+ io = pci_map_rom(pdev, &size);
+ if (io) {
+ info.flags = VFIO_REGION_INFO_FLAG_READ;
+ pci_unmap_rom(pdev, io);
+ } else {
+ info.size = 0;
+ }
+
+ pci_write_config_word(pdev, PCI_COMMAND, orig_cmd);
break;
}
case VFIO_PCI_VGA_REGION_INDEX:
@@ -746,6 +833,12 @@
if (ret)
return ret;
+ if (vdev->region[i].ops->add_capability) {
+ ret = vdev->region[i].ops->add_capability(vdev,
+ &vdev->region[i], &caps);
+ if (ret)
+ return ret;
+ }
}
}
@@ -1113,6 +1206,15 @@
return -EINVAL;
if ((vma->vm_flags & VM_SHARED) == 0)
return -EINVAL;
+ if (index >= VFIO_PCI_NUM_REGIONS) {
+ int regnum = index - VFIO_PCI_NUM_REGIONS;
+ struct vfio_pci_region *region = vdev->region + regnum;
+
+ if (region && region->ops && region->ops->mmap &&
+ (region->flags & VFIO_REGION_INFO_FLAG_MMAP))
+ return region->ops->mmap(vdev, region, vma);
+ return -EINVAL;
+ }
if (index >= VFIO_PCI_ROM_REGION_INDEX)
return -EINVAL;
if (!vdev->bar_mmap_supported[index])
@@ -1155,17 +1257,18 @@
static void vfio_pci_request(void *device_data, unsigned int count)
{
struct vfio_pci_device *vdev = device_data;
+ struct pci_dev *pdev = vdev->pdev;
mutex_lock(&vdev->igate);
if (vdev->req_trigger) {
if (!(count % 10))
- dev_notice_ratelimited(&vdev->pdev->dev,
+ pci_notice_ratelimited(pdev,
"Relaying device request to user (#%u)\n",
count);
eventfd_signal(vdev->req_trigger, 1);
} else if (count == 0) {
- dev_warn(&vdev->pdev->dev,
+ pci_warn(pdev,
"No device request channel registered, blocked until released by user\n");
}
@@ -1183,6 +1286,9 @@
.request = vfio_pci_request,
};
+static int vfio_pci_reflck_attach(struct vfio_pci_device *vdev);
+static void vfio_pci_reflck_put(struct vfio_pci_reflck *reflck);
+
static int vfio_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct vfio_pci_device *vdev;
@@ -1229,12 +1335,22 @@
return ret;
}
+ ret = vfio_pci_reflck_attach(vdev);
+ if (ret) {
+ vfio_del_group_dev(&pdev->dev);
+ vfio_iommu_group_put(group, &pdev->dev);
+ kfree(vdev);
+ return ret;
+ }
+
if (vfio_pci_is_vga(pdev)) {
vga_client_register(pdev, vdev, NULL, vfio_pci_set_vga_decode);
vga_set_legacy_decoding(pdev,
vfio_pci_set_vga_decode(vdev, false));
}
+ vfio_pci_probe_power_state(vdev);
+
if (!disable_idle_d3) {
/*
* pci-core sets the device power state to an unknown value at
@@ -1245,8 +1361,8 @@
* be able to get to D3. Therefore first do a D0 transition
* before going to D3.
*/
- pci_set_power_state(pdev, PCI_D0);
- pci_set_power_state(pdev, PCI_D3hot);
+ vfio_pci_set_power_state(vdev, PCI_D0);
+ vfio_pci_set_power_state(vdev, PCI_D3hot);
}
return ret;
@@ -1260,9 +1376,16 @@
if (!vdev)
return;
+ vfio_pci_reflck_put(vdev->reflck);
+
vfio_iommu_group_put(pdev->dev.iommu_group, &pdev->dev);
kfree(vdev->region);
mutex_destroy(&vdev->ioeventfds_lock);
+
+ if (!disable_idle_d3)
+ vfio_pci_set_power_state(vdev, PCI_D0);
+
+ kfree(vdev->pm_save);
kfree(vdev);
if (vfio_pci_is_vga(pdev)) {
@@ -1271,9 +1394,6 @@
VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM |
VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM);
}
-
- if (!disable_idle_d3)
- pci_set_power_state(pdev, PCI_D0);
}
static pci_ers_result_t vfio_pci_aer_err_detected(struct pci_dev *pdev,
@@ -1316,16 +1436,97 @@
.err_handler = &vfio_err_handlers,
};
+static DEFINE_MUTEX(reflck_lock);
+
+static struct vfio_pci_reflck *vfio_pci_reflck_alloc(void)
+{
+ struct vfio_pci_reflck *reflck;
+
+ reflck = kzalloc(sizeof(*reflck), GFP_KERNEL);
+ if (!reflck)
+ return ERR_PTR(-ENOMEM);
+
+ kref_init(&reflck->kref);
+ mutex_init(&reflck->lock);
+
+ return reflck;
+}
+
+static void vfio_pci_reflck_get(struct vfio_pci_reflck *reflck)
+{
+ kref_get(&reflck->kref);
+}
+
+static int vfio_pci_reflck_find(struct pci_dev *pdev, void *data)
+{
+ struct vfio_pci_reflck **preflck = data;
+ struct vfio_device *device;
+ struct vfio_pci_device *vdev;
+
+ device = vfio_device_get_from_dev(&pdev->dev);
+ if (!device)
+ return 0;
+
+ if (pci_dev_driver(pdev) != &vfio_pci_driver) {
+ vfio_device_put(device);
+ return 0;
+ }
+
+ vdev = vfio_device_data(device);
+
+ if (vdev->reflck) {
+ vfio_pci_reflck_get(vdev->reflck);
+ *preflck = vdev->reflck;
+ vfio_device_put(device);
+ return 1;
+ }
+
+ vfio_device_put(device);
+ return 0;
+}
+
+static int vfio_pci_reflck_attach(struct vfio_pci_device *vdev)
+{
+ bool slot = !pci_probe_reset_slot(vdev->pdev->slot);
+
+ mutex_lock(&reflck_lock);
+
+ if (pci_is_root_bus(vdev->pdev->bus) ||
+ vfio_pci_for_each_slot_or_bus(vdev->pdev, vfio_pci_reflck_find,
+ &vdev->reflck, slot) <= 0)
+ vdev->reflck = vfio_pci_reflck_alloc();
+
+ mutex_unlock(&reflck_lock);
+
+ return PTR_ERR_OR_ZERO(vdev->reflck);
+}
+
+static void vfio_pci_reflck_release(struct kref *kref)
+{
+ struct vfio_pci_reflck *reflck = container_of(kref,
+ struct vfio_pci_reflck,
+ kref);
+
+ kfree(reflck);
+ mutex_unlock(&reflck_lock);
+}
+
+static void vfio_pci_reflck_put(struct vfio_pci_reflck *reflck)
+{
+ kref_put_mutex(&reflck->kref, vfio_pci_reflck_release, &reflck_lock);
+}
+
struct vfio_devices {
struct vfio_device **devices;
int cur_index;
int max_index;
};
-static int vfio_pci_get_devs(struct pci_dev *pdev, void *data)
+static int vfio_pci_get_unused_devs(struct pci_dev *pdev, void *data)
{
struct vfio_devices *devs = data;
struct vfio_device *device;
+ struct vfio_pci_device *vdev;
if (devs->cur_index == devs->max_index)
return -ENOSPC;
@@ -1339,16 +1540,28 @@
return -EBUSY;
}
+ vdev = vfio_device_data(device);
+
+ /* Fault if the device is not unused */
+ if (vdev->refcnt) {
+ vfio_device_put(device);
+ return -EBUSY;
+ }
+
devs->devices[devs->cur_index++] = device;
return 0;
}
/*
- * Attempt to do a bus/slot reset if there are devices affected by a reset for
- * this device that are needs_reset and all of the affected devices are unused
- * (!refcnt). Callers are required to hold driver_lock when calling this to
- * prevent device opens and concurrent bus reset attempts. We prevent device
- * unbinds by acquiring and holding a reference to the vfio_device.
+ * If a bus or slot reset is available for the provided device and:
+ * - All of the devices affected by that bus or slot reset are unused
+ * (!refcnt)
+ * - At least one of the affected devices is marked dirty via
+ * needs_reset (such as by lack of FLR support)
+ * Then attempt to perform that bus or slot reset. Callers are required
+ * to hold vdev->reflck->lock, protecting the bus/slot reset group from
+ * concurrent opens. A vfio_device reference is acquired for each device
+ * to prevent unbinds during the reset operation.
*
* NB: vfio-core considers a group to be viable even if some devices are
* bound to drivers like pci-stub or pcieport. Here we require all devices
@@ -1359,7 +1572,7 @@
{
struct vfio_devices devs = { .cur_index = 0 };
int i = 0, ret = -EINVAL;
- bool needs_reset = false, slot = false;
+ bool slot = false;
struct vfio_pci_device *tmp;
if (!pci_probe_reset_slot(vdev->pdev->slot))
@@ -1377,28 +1590,36 @@
return;
if (vfio_pci_for_each_slot_or_bus(vdev->pdev,
- vfio_pci_get_devs, &devs, slot))
+ vfio_pci_get_unused_devs,
+ &devs, slot))
goto put_devs;
+ /* Does at least one need a reset? */
for (i = 0; i < devs.cur_index; i++) {
tmp = vfio_device_data(devs.devices[i]);
- if (tmp->needs_reset)
- needs_reset = true;
- if (tmp->refcnt)
- goto put_devs;
+ if (tmp->needs_reset) {
+ ret = pci_reset_bus(vdev->pdev);
+ break;
+ }
}
- if (needs_reset)
- ret = pci_reset_bus(vdev->pdev);
-
put_devs:
for (i = 0; i < devs.cur_index; i++) {
tmp = vfio_device_data(devs.devices[i]);
- if (!ret)
+
+ /*
+ * If reset was successful, affected devices no longer need
+ * a reset and we should return all the collateral devices
+ * to low power. If not successful, we either didn't reset
+ * the bus or timed out waiting for it, so let's not touch
+ * the power state.
+ */
+ if (!ret) {
tmp->needs_reset = false;
- if (!tmp->refcnt && !disable_idle_d3)
- pci_set_power_state(tmp->pdev, PCI_D3hot);
+ if (tmp != vdev && !disable_idle_d3)
+ vfio_pci_set_power_state(tmp, PCI_D3hot);
+ }
vfio_device_put(devs.devices[i]);
}
@@ -1443,11 +1664,11 @@
rc = pci_add_dynid(&vfio_pci_driver, vendor, device,
subvendor, subdevice, class, class_mask, 0);
if (rc)
- pr_warn("failed to add dynamic id [%04hx:%04hx[%04hx:%04hx]] class %#08x/%08x (%d)\n",
+ pr_warn("failed to add dynamic id [%04x:%04x[%04x:%04x]] class %#08x/%08x (%d)\n",
vendor, device, subvendor, subdevice,
class, class_mask, rc);
else
- pr_info("add [%04hx:%04hx[%04hx:%04hx]] class %#08x/%08x\n",
+ pr_info("add [%04x:%04x[%04x:%04x]] class %#08x/%08x\n",
vendor, device, subvendor, subdevice,
class, class_mask);
}
diff --git a/drivers/vfio/pci/vfio_pci_config.c b/drivers/vfio/pci/vfio_pci_config.c
index 115a36f..f0891bd 100644
--- a/drivers/vfio/pci/vfio_pci_config.c
+++ b/drivers/vfio/pci/vfio_pci_config.c
@@ -1,13 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* VFIO PCI config space virtualization
*
* Copyright (C) 2012 Red Hat, Inc. All rights reserved.
* Author: Alex Williamson <alex.williamson@redhat.com>
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
* Derived from original vfio:
* Copyright 2010 Cisco Systems, Inc. All rights reserved.
* Author: Tom Lyon, pugs@cisco.com
@@ -412,8 +409,7 @@
if (pdev->is_virtfn)
return;
- pr_info("%s: %s reset recovery - restoring bars\n",
- __func__, dev_name(&pdev->dev));
+ pci_info(pdev, "%s: reset recovery - restoring BARs\n", __func__);
for (i = PCI_BASE_ADDRESS_0; i <= PCI_BASE_ADDRESS_5; i += 4, rbar++)
pci_user_write_config_dword(pdev, i, *rbar);
@@ -691,7 +687,7 @@
break;
}
- pci_set_power_state(vdev->pdev, state);
+ vfio_pci_set_power_state(vdev, state);
}
return count;
@@ -1180,8 +1176,10 @@
return -ENOMEM;
ret = init_pci_cap_msi_perm(vdev->msi_perm, len, flags);
- if (ret)
+ if (ret) {
+ kfree(vdev->msi_perm);
return ret;
+ }
return len;
}
@@ -1296,8 +1294,8 @@
else
return PCI_SATA_SIZEOF_SHORT;
default:
- pr_warn("%s: %s unknown length for pci cap 0x%x@0x%x\n",
- dev_name(&pdev->dev), __func__, cap, pos);
+ pci_warn(pdev, "%s: unknown length for PCI cap %#x@%#x\n",
+ __func__, cap, pos);
}
return 0;
@@ -1370,8 +1368,8 @@
}
return PCI_TPH_BASE_SIZEOF;
default:
- pr_warn("%s: %s unknown length for pci ecap 0x%x@0x%x\n",
- dev_name(&pdev->dev), __func__, ecap, epos);
+ pci_warn(pdev, "%s: unknown length for PCI ecap %#x@%#x\n",
+ __func__, ecap, epos);
}
return 0;
@@ -1472,8 +1470,8 @@
}
if (!len) {
- pr_info("%s: %s hiding cap 0x%x\n",
- __func__, dev_name(&pdev->dev), cap);
+ pci_info(pdev, "%s: hiding cap %#x@%#x\n", __func__,
+ cap, pos);
*prev = next;
pos = next;
continue;
@@ -1484,9 +1482,8 @@
if (likely(map[pos + i] == PCI_CAP_ID_INVALID))
continue;
- pr_warn("%s: %s pci config conflict @0x%x, was cap 0x%x now cap 0x%x\n",
- __func__, dev_name(&pdev->dev),
- pos + i, map[pos + i], cap);
+ pci_warn(pdev, "%s: PCI config conflict @%#x, was cap %#x now cap %#x\n",
+ __func__, pos + i, map[pos + i], cap);
}
BUILD_BUG_ON(PCI_CAP_ID_MAX >= PCI_CAP_ID_INVALID_VIRT);
@@ -1547,8 +1544,8 @@
}
if (!len) {
- pr_info("%s: %s hiding ecap 0x%x@0x%x\n",
- __func__, dev_name(&pdev->dev), ecap, epos);
+ pci_info(pdev, "%s: hiding ecap %#x@%#x\n",
+ __func__, ecap, epos);
/* If not the first in the chain, we can skip over it */
if (prev) {
@@ -1570,9 +1567,8 @@
if (likely(map[epos + i] == PCI_CAP_ID_INVALID))
continue;
- pr_warn("%s: %s pci config conflict @0x%x, was ecap 0x%x now ecap 0x%x\n",
- __func__, dev_name(&pdev->dev),
- epos + i, map[epos + i], ecap);
+ pci_warn(pdev, "%s: PCI config conflict @%#x, was ecap %#x now ecap %#x\n",
+ __func__, epos + i, map[epos + i], ecap);
}
/*
@@ -1610,6 +1606,15 @@
}
/*
+ * Nag about hardware bugs, hopefully to have vendors fix them, but at least
+ * to collect a list of dependencies for the VF INTx pin quirk below.
+ */
+static const struct pci_device_id known_bogus_vf_intx_pin[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x270c) },
+ {}
+};
+
+/*
* For each device we allocate a pci_config_map that indicates the
* capability occupying each dword and thus the struct perm_bits we
* use for read and write. We also allocate a virtualized config
@@ -1674,6 +1679,24 @@
if (pdev->is_virtfn) {
*(__le16 *)&vconfig[PCI_VENDOR_ID] = cpu_to_le16(pdev->vendor);
*(__le16 *)&vconfig[PCI_DEVICE_ID] = cpu_to_le16(pdev->device);
+
+ /*
+ * Per SR-IOV spec rev 1.1, 3.4.1.18 the interrupt pin register
+ * does not apply to VFs and VFs must implement this register
+ * as read-only with value zero. Userspace is not readily able
+ * to identify whether a device is a VF and thus that the pin
+ * definition on the device is bogus should it violate this
+ * requirement. We already virtualize the pin register for
+ * other purposes, so we simply need to replace the bogus value
+ * and consider VFs when we determine INTx IRQ count.
+ */
+ if (vconfig[PCI_INTERRUPT_PIN] &&
+ !pci_match_id(known_bogus_vf_intx_pin, pdev))
+ pci_warn(pdev,
+ "Hardware bug: VF reports bogus INTx pin %d\n",
+ vconfig[PCI_INTERRUPT_PIN]);
+
+ vconfig[PCI_INTERRUPT_PIN] = 0; /* Gratuitous for good VFs */
}
if (!IS_ENABLED(CONFIG_VFIO_PCI_INTX) || vdev->nointx)
diff --git a/drivers/vfio/pci/vfio_pci_igd.c b/drivers/vfio/pci/vfio_pci_igd.c
index 6394b16..53d97f4 100644
--- a/drivers/vfio/pci/vfio_pci_igd.c
+++ b/drivers/vfio/pci/vfio_pci_igd.c
@@ -1,13 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* VFIO PCI Intel Graphics support
*
* Copyright (C) 2016 Red Hat, Inc. All rights reserved.
* Author: Alex Williamson <alex.williamson@redhat.com>
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
* Register a device specific region through which to provide read-only
* access to the Intel IGD opregion. The register defining the opregion
* address is also virtualized to prevent user modification.
diff --git a/drivers/vfio/pci/vfio_pci_intrs.c b/drivers/vfio/pci/vfio_pci_intrs.c
index 1c46045..3fa3f72 100644
--- a/drivers/vfio/pci/vfio_pci_intrs.c
+++ b/drivers/vfio/pci/vfio_pci_intrs.c
@@ -1,13 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* VFIO PCI interrupt handling
*
* Copyright (C) 2012 Red Hat, Inc. All rights reserved.
* Author: Alex Williamson <alex.williamson@redhat.com>
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
* Derived from original vfio:
* Copyright 2010 Cisco Systems, Inc. All rights reserved.
* Author: Tom Lyon, pugs@cisco.com
diff --git a/drivers/vfio/pci/vfio_pci_nvlink2.c b/drivers/vfio/pci/vfio_pci_nvlink2.c
new file mode 100644
index 0000000..f2983f0
--- /dev/null
+++ b/drivers/vfio/pci/vfio_pci_nvlink2.c
@@ -0,0 +1,479 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * VFIO PCI NVIDIA Whitherspoon GPU support a.k.a. NVLink2.
+ *
+ * Copyright (C) 2018 IBM Corp. All rights reserved.
+ * Author: Alexey Kardashevskiy <aik@ozlabs.ru>
+ *
+ * Register an on-GPU RAM region for cacheable access.
+ *
+ * Derived from original vfio_pci_igd.c:
+ * Copyright (C) 2016 Red Hat, Inc. All rights reserved.
+ * Author: Alex Williamson <alex.williamson@redhat.com>
+ */
+
+#include <linux/io.h>
+#include <linux/pci.h>
+#include <linux/uaccess.h>
+#include <linux/vfio.h>
+#include <linux/sched/mm.h>
+#include <linux/mmu_context.h>
+#include <asm/kvm_ppc.h>
+#include "vfio_pci_private.h"
+
+#define CREATE_TRACE_POINTS
+#include "trace.h"
+
+EXPORT_TRACEPOINT_SYMBOL_GPL(vfio_pci_nvgpu_mmap_fault);
+EXPORT_TRACEPOINT_SYMBOL_GPL(vfio_pci_nvgpu_mmap);
+EXPORT_TRACEPOINT_SYMBOL_GPL(vfio_pci_npu2_mmap);
+
+struct vfio_pci_nvgpu_data {
+ unsigned long gpu_hpa; /* GPU RAM physical address */
+ unsigned long gpu_tgt; /* TGT address of corresponding GPU RAM */
+ unsigned long useraddr; /* GPU RAM userspace address */
+ unsigned long size; /* Size of the GPU RAM window (usually 128GB) */
+ struct mm_struct *mm;
+ struct mm_iommu_table_group_mem_t *mem; /* Pre-registered RAM descr. */
+ struct pci_dev *gpdev;
+ struct notifier_block group_notifier;
+};
+
+static size_t vfio_pci_nvgpu_rw(struct vfio_pci_device *vdev,
+ char __user *buf, size_t count, loff_t *ppos, bool iswrite)
+{
+ unsigned int i = VFIO_PCI_OFFSET_TO_INDEX(*ppos) - VFIO_PCI_NUM_REGIONS;
+ struct vfio_pci_nvgpu_data *data = vdev->region[i].data;
+ loff_t pos = *ppos & VFIO_PCI_OFFSET_MASK;
+ loff_t posaligned = pos & PAGE_MASK, posoff = pos & ~PAGE_MASK;
+ size_t sizealigned;
+ void __iomem *ptr;
+
+ if (pos >= vdev->region[i].size)
+ return -EINVAL;
+
+ count = min(count, (size_t)(vdev->region[i].size - pos));
+
+ /*
+ * We map only a bit of GPU RAM for a short time instead of mapping it
+ * for the guest lifetime as:
+ *
+ * 1) we do not know GPU RAM size, only aperture which is 4-8 times
+ * bigger than actual RAM size (16/32GB RAM vs. 128GB aperture);
+ * 2) mapping GPU RAM allows CPU to prefetch and if this happens
+ * before NVLink bridge is reset (which fences GPU RAM),
+ * hardware management interrupts (HMI) might happen, this
+ * will freeze NVLink bridge.
+ *
+ * This is not fast path anyway.
+ */
+ sizealigned = _ALIGN_UP(posoff + count, PAGE_SIZE);
+ ptr = ioremap_cache(data->gpu_hpa + posaligned, sizealigned);
+ if (!ptr)
+ return -EFAULT;
+
+ if (iswrite) {
+ if (copy_from_user(ptr + posoff, buf, count))
+ count = -EFAULT;
+ else
+ *ppos += count;
+ } else {
+ if (copy_to_user(buf, ptr + posoff, count))
+ count = -EFAULT;
+ else
+ *ppos += count;
+ }
+
+ iounmap(ptr);
+
+ return count;
+}
+
+static void vfio_pci_nvgpu_release(struct vfio_pci_device *vdev,
+ struct vfio_pci_region *region)
+{
+ struct vfio_pci_nvgpu_data *data = region->data;
+ long ret;
+
+ /* If there were any mappings at all... */
+ if (data->mm) {
+ ret = mm_iommu_put(data->mm, data->mem);
+ WARN_ON(ret);
+
+ mmdrop(data->mm);
+ }
+
+ vfio_unregister_notifier(&data->gpdev->dev, VFIO_GROUP_NOTIFY,
+ &data->group_notifier);
+
+ pnv_npu2_unmap_lpar_dev(data->gpdev);
+
+ kfree(data);
+}
+
+static vm_fault_t vfio_pci_nvgpu_mmap_fault(struct vm_fault *vmf)
+{
+ vm_fault_t ret;
+ struct vm_area_struct *vma = vmf->vma;
+ struct vfio_pci_region *region = vma->vm_private_data;
+ struct vfio_pci_nvgpu_data *data = region->data;
+ unsigned long vmf_off = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
+ unsigned long nv2pg = data->gpu_hpa >> PAGE_SHIFT;
+ unsigned long vm_pgoff = vma->vm_pgoff &
+ ((1U << (VFIO_PCI_OFFSET_SHIFT - PAGE_SHIFT)) - 1);
+ unsigned long pfn = nv2pg + vm_pgoff + vmf_off;
+
+ ret = vmf_insert_pfn(vma, vmf->address, pfn);
+ trace_vfio_pci_nvgpu_mmap_fault(data->gpdev, pfn << PAGE_SHIFT,
+ vmf->address, ret);
+
+ return ret;
+}
+
+static const struct vm_operations_struct vfio_pci_nvgpu_mmap_vmops = {
+ .fault = vfio_pci_nvgpu_mmap_fault,
+};
+
+static int vfio_pci_nvgpu_mmap(struct vfio_pci_device *vdev,
+ struct vfio_pci_region *region, struct vm_area_struct *vma)
+{
+ int ret;
+ struct vfio_pci_nvgpu_data *data = region->data;
+
+ if (data->useraddr)
+ return -EPERM;
+
+ if (vma->vm_end - vma->vm_start > data->size)
+ return -EINVAL;
+
+ vma->vm_private_data = region;
+ vma->vm_flags |= VM_PFNMAP;
+ vma->vm_ops = &vfio_pci_nvgpu_mmap_vmops;
+
+ /*
+ * Calling mm_iommu_newdev() here once as the region is not
+ * registered yet and therefore right initialization will happen now.
+ * Other places will use mm_iommu_find() which returns
+ * registered @mem and does not go gup().
+ */
+ data->useraddr = vma->vm_start;
+ data->mm = current->mm;
+
+ atomic_inc(&data->mm->mm_count);
+ ret = (int) mm_iommu_newdev(data->mm, data->useraddr,
+ vma_pages(vma), data->gpu_hpa, &data->mem);
+
+ trace_vfio_pci_nvgpu_mmap(vdev->pdev, data->gpu_hpa, data->useraddr,
+ vma->vm_end - vma->vm_start, ret);
+
+ return ret;
+}
+
+static int vfio_pci_nvgpu_add_capability(struct vfio_pci_device *vdev,
+ struct vfio_pci_region *region, struct vfio_info_cap *caps)
+{
+ struct vfio_pci_nvgpu_data *data = region->data;
+ struct vfio_region_info_cap_nvlink2_ssatgt cap = {
+ .header.id = VFIO_REGION_INFO_CAP_NVLINK2_SSATGT,
+ .header.version = 1,
+ .tgt = data->gpu_tgt
+ };
+
+ return vfio_info_add_capability(caps, &cap.header, sizeof(cap));
+}
+
+static const struct vfio_pci_regops vfio_pci_nvgpu_regops = {
+ .rw = vfio_pci_nvgpu_rw,
+ .release = vfio_pci_nvgpu_release,
+ .mmap = vfio_pci_nvgpu_mmap,
+ .add_capability = vfio_pci_nvgpu_add_capability,
+};
+
+static int vfio_pci_nvgpu_group_notifier(struct notifier_block *nb,
+ unsigned long action, void *opaque)
+{
+ struct kvm *kvm = opaque;
+ struct vfio_pci_nvgpu_data *data = container_of(nb,
+ struct vfio_pci_nvgpu_data,
+ group_notifier);
+
+ if (action == VFIO_GROUP_NOTIFY_SET_KVM && kvm &&
+ pnv_npu2_map_lpar_dev(data->gpdev,
+ kvm->arch.lpid, MSR_DR | MSR_PR))
+ return NOTIFY_BAD;
+
+ return NOTIFY_OK;
+}
+
+int vfio_pci_nvdia_v100_nvlink2_init(struct vfio_pci_device *vdev)
+{
+ int ret;
+ u64 reg[2];
+ u64 tgt = 0;
+ struct device_node *npu_node, *mem_node;
+ struct pci_dev *npu_dev;
+ struct vfio_pci_nvgpu_data *data;
+ uint32_t mem_phandle = 0;
+ unsigned long events = VFIO_GROUP_NOTIFY_SET_KVM;
+
+ /*
+ * PCI config space does not tell us about NVLink presense but
+ * platform does, use this.
+ */
+ npu_dev = pnv_pci_get_npu_dev(vdev->pdev, 0);
+ if (!npu_dev)
+ return -ENODEV;
+
+ npu_node = pci_device_to_OF_node(npu_dev);
+ if (!npu_node)
+ return -EINVAL;
+
+ if (of_property_read_u32(npu_node, "memory-region", &mem_phandle))
+ return -EINVAL;
+
+ mem_node = of_find_node_by_phandle(mem_phandle);
+ if (!mem_node)
+ return -EINVAL;
+
+ if (of_property_read_variable_u64_array(mem_node, "reg", reg,
+ ARRAY_SIZE(reg), ARRAY_SIZE(reg)) !=
+ ARRAY_SIZE(reg))
+ return -EINVAL;
+
+ if (of_property_read_u64(npu_node, "ibm,device-tgt-addr", &tgt)) {
+ dev_warn(&vdev->pdev->dev, "No ibm,device-tgt-addr found\n");
+ return -EFAULT;
+ }
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->gpu_hpa = reg[0];
+ data->gpu_tgt = tgt;
+ data->size = reg[1];
+
+ dev_dbg(&vdev->pdev->dev, "%lx..%lx\n", data->gpu_hpa,
+ data->gpu_hpa + data->size - 1);
+
+ data->gpdev = vdev->pdev;
+ data->group_notifier.notifier_call = vfio_pci_nvgpu_group_notifier;
+
+ ret = vfio_register_notifier(&data->gpdev->dev, VFIO_GROUP_NOTIFY,
+ &events, &data->group_notifier);
+ if (ret)
+ goto free_exit;
+
+ /*
+ * We have just set KVM, we do not need the listener anymore.
+ * Also, keeping it registered means that if more than one GPU is
+ * assigned, we will get several similar notifiers notifying about
+ * the same device again which does not help with anything.
+ */
+ vfio_unregister_notifier(&data->gpdev->dev, VFIO_GROUP_NOTIFY,
+ &data->group_notifier);
+
+ ret = vfio_pci_register_dev_region(vdev,
+ PCI_VENDOR_ID_NVIDIA | VFIO_REGION_TYPE_PCI_VENDOR_TYPE,
+ VFIO_REGION_SUBTYPE_NVIDIA_NVLINK2_RAM,
+ &vfio_pci_nvgpu_regops,
+ data->size,
+ VFIO_REGION_INFO_FLAG_READ |
+ VFIO_REGION_INFO_FLAG_WRITE |
+ VFIO_REGION_INFO_FLAG_MMAP,
+ data);
+ if (ret)
+ goto free_exit;
+
+ return 0;
+free_exit:
+ kfree(data);
+
+ return ret;
+}
+
+/*
+ * IBM NPU2 bridge
+ */
+struct vfio_pci_npu2_data {
+ void *base; /* ATSD register virtual address, for emulated access */
+ unsigned long mmio_atsd; /* ATSD physical address */
+ unsigned long gpu_tgt; /* TGT address of corresponding GPU RAM */
+ unsigned int link_speed; /* The link speed from DT's ibm,nvlink-speed */
+};
+
+static size_t vfio_pci_npu2_rw(struct vfio_pci_device *vdev,
+ char __user *buf, size_t count, loff_t *ppos, bool iswrite)
+{
+ unsigned int i = VFIO_PCI_OFFSET_TO_INDEX(*ppos) - VFIO_PCI_NUM_REGIONS;
+ struct vfio_pci_npu2_data *data = vdev->region[i].data;
+ loff_t pos = *ppos & VFIO_PCI_OFFSET_MASK;
+
+ if (pos >= vdev->region[i].size)
+ return -EINVAL;
+
+ count = min(count, (size_t)(vdev->region[i].size - pos));
+
+ if (iswrite) {
+ if (copy_from_user(data->base + pos, buf, count))
+ return -EFAULT;
+ } else {
+ if (copy_to_user(buf, data->base + pos, count))
+ return -EFAULT;
+ }
+ *ppos += count;
+
+ return count;
+}
+
+static int vfio_pci_npu2_mmap(struct vfio_pci_device *vdev,
+ struct vfio_pci_region *region, struct vm_area_struct *vma)
+{
+ int ret;
+ struct vfio_pci_npu2_data *data = region->data;
+ unsigned long req_len = vma->vm_end - vma->vm_start;
+
+ if (req_len != PAGE_SIZE)
+ return -EINVAL;
+
+ vma->vm_flags |= VM_PFNMAP;
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+
+ ret = remap_pfn_range(vma, vma->vm_start, data->mmio_atsd >> PAGE_SHIFT,
+ req_len, vma->vm_page_prot);
+ trace_vfio_pci_npu2_mmap(vdev->pdev, data->mmio_atsd, vma->vm_start,
+ vma->vm_end - vma->vm_start, ret);
+
+ return ret;
+}
+
+static void vfio_pci_npu2_release(struct vfio_pci_device *vdev,
+ struct vfio_pci_region *region)
+{
+ struct vfio_pci_npu2_data *data = region->data;
+
+ memunmap(data->base);
+ kfree(data);
+}
+
+static int vfio_pci_npu2_add_capability(struct vfio_pci_device *vdev,
+ struct vfio_pci_region *region, struct vfio_info_cap *caps)
+{
+ struct vfio_pci_npu2_data *data = region->data;
+ struct vfio_region_info_cap_nvlink2_ssatgt captgt = {
+ .header.id = VFIO_REGION_INFO_CAP_NVLINK2_SSATGT,
+ .header.version = 1,
+ .tgt = data->gpu_tgt
+ };
+ struct vfio_region_info_cap_nvlink2_lnkspd capspd = {
+ .header.id = VFIO_REGION_INFO_CAP_NVLINK2_LNKSPD,
+ .header.version = 1,
+ .link_speed = data->link_speed
+ };
+ int ret;
+
+ ret = vfio_info_add_capability(caps, &captgt.header, sizeof(captgt));
+ if (ret)
+ return ret;
+
+ return vfio_info_add_capability(caps, &capspd.header, sizeof(capspd));
+}
+
+static const struct vfio_pci_regops vfio_pci_npu2_regops = {
+ .rw = vfio_pci_npu2_rw,
+ .mmap = vfio_pci_npu2_mmap,
+ .release = vfio_pci_npu2_release,
+ .add_capability = vfio_pci_npu2_add_capability,
+};
+
+int vfio_pci_ibm_npu2_init(struct vfio_pci_device *vdev)
+{
+ int ret;
+ struct vfio_pci_npu2_data *data;
+ struct device_node *nvlink_dn;
+ u32 nvlink_index = 0;
+ struct pci_dev *npdev = vdev->pdev;
+ struct device_node *npu_node = pci_device_to_OF_node(npdev);
+ struct pci_controller *hose = pci_bus_to_host(npdev->bus);
+ u64 mmio_atsd = 0;
+ u64 tgt = 0;
+ u32 link_speed = 0xff;
+
+ /*
+ * PCI config space does not tell us about NVLink presense but
+ * platform does, use this.
+ */
+ if (!pnv_pci_get_gpu_dev(vdev->pdev))
+ return -ENODEV;
+
+ /*
+ * NPU2 normally has 8 ATSD registers (for concurrency) and 6 links
+ * so we can allocate one register per link, using nvlink index as
+ * a key.
+ * There is always at least one ATSD register so as long as at least
+ * NVLink bridge #0 is passed to the guest, ATSD will be available.
+ */
+ nvlink_dn = of_parse_phandle(npdev->dev.of_node, "ibm,nvlink", 0);
+ if (WARN_ON(of_property_read_u32(nvlink_dn, "ibm,npu-link-index",
+ &nvlink_index)))
+ return -ENODEV;
+
+ if (of_property_read_u64_index(hose->dn, "ibm,mmio-atsd", nvlink_index,
+ &mmio_atsd)) {
+ dev_warn(&vdev->pdev->dev, "No available ATSD found\n");
+ mmio_atsd = 0;
+ }
+
+ if (of_property_read_u64(npu_node, "ibm,device-tgt-addr", &tgt)) {
+ dev_warn(&vdev->pdev->dev, "No ibm,device-tgt-addr found\n");
+ return -EFAULT;
+ }
+
+ if (of_property_read_u32(npu_node, "ibm,nvlink-speed", &link_speed)) {
+ dev_warn(&vdev->pdev->dev, "No ibm,nvlink-speed found\n");
+ return -EFAULT;
+ }
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->mmio_atsd = mmio_atsd;
+ data->gpu_tgt = tgt;
+ data->link_speed = link_speed;
+ if (data->mmio_atsd) {
+ data->base = memremap(data->mmio_atsd, SZ_64K, MEMREMAP_WT);
+ if (!data->base) {
+ ret = -ENOMEM;
+ goto free_exit;
+ }
+ }
+
+ /*
+ * We want to expose the capability even if this specific NVLink
+ * did not get its own ATSD register because capabilities
+ * belong to VFIO regions and normally there will be ATSD register
+ * assigned to the NVLink bridge.
+ */
+ ret = vfio_pci_register_dev_region(vdev,
+ PCI_VENDOR_ID_IBM |
+ VFIO_REGION_TYPE_PCI_VENDOR_TYPE,
+ VFIO_REGION_SUBTYPE_IBM_NVLINK2_ATSD,
+ &vfio_pci_npu2_regops,
+ data->mmio_atsd ? PAGE_SIZE : 0,
+ VFIO_REGION_INFO_FLAG_READ |
+ VFIO_REGION_INFO_FLAG_WRITE |
+ VFIO_REGION_INFO_FLAG_MMAP,
+ data);
+ if (ret)
+ goto free_exit;
+
+ return 0;
+
+free_exit:
+ if (data->base)
+ memunmap(data->base);
+ kfree(data);
+
+ return ret;
+}
diff --git a/drivers/vfio/pci/vfio_pci_private.h b/drivers/vfio/pci/vfio_pci_private.h
index cde3b5d..ee6ee91 100644
--- a/drivers/vfio/pci/vfio_pci_private.h
+++ b/drivers/vfio/pci/vfio_pci_private.h
@@ -1,11 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2012 Red Hat, Inc. All rights reserved.
* Author: Alex Williamson <alex.williamson@redhat.com>
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
* Derived from original vfio:
* Copyright 2010 Cisco Systems, Inc. All rights reserved.
* Author: Tom Lyon, pugs@cisco.com
@@ -59,6 +56,12 @@
size_t count, loff_t *ppos, bool iswrite);
void (*release)(struct vfio_pci_device *vdev,
struct vfio_pci_region *region);
+ int (*mmap)(struct vfio_pci_device *vdev,
+ struct vfio_pci_region *region,
+ struct vm_area_struct *vma);
+ int (*add_capability)(struct vfio_pci_device *vdev,
+ struct vfio_pci_region *region,
+ struct vfio_info_cap *caps);
};
struct vfio_pci_region {
@@ -76,6 +79,11 @@
struct list_head res_next;
};
+struct vfio_pci_reflck {
+ struct kref kref;
+ struct mutex lock;
+};
+
struct vfio_pci_device {
struct pci_dev *pdev;
void __iomem *barmap[PCI_STD_RESOURCE_END + 1];
@@ -103,7 +111,10 @@
bool has_vga;
bool needs_reset;
bool nointx;
+ bool needs_pm_restore;
struct pci_saved_state *pci_saved_state;
+ struct pci_saved_state *pm_save;
+ struct vfio_pci_reflck *reflck;
int refcnt;
int ioeventfds_nr;
struct eventfd_ctx *err_trigger;
@@ -149,6 +160,10 @@
unsigned int type, unsigned int subtype,
const struct vfio_pci_regops *ops,
size_t size, u32 flags, void *data);
+
+extern int vfio_pci_set_power_state(struct vfio_pci_device *vdev,
+ pci_power_t state);
+
#ifdef CONFIG_VFIO_PCI_IGD
extern int vfio_pci_igd_init(struct vfio_pci_device *vdev);
#else
@@ -157,4 +172,18 @@
return -ENODEV;
}
#endif
+#ifdef CONFIG_VFIO_PCI_NVLINK2
+extern int vfio_pci_nvdia_v100_nvlink2_init(struct vfio_pci_device *vdev);
+extern int vfio_pci_ibm_npu2_init(struct vfio_pci_device *vdev);
+#else
+static inline int vfio_pci_nvdia_v100_nvlink2_init(struct vfio_pci_device *vdev)
+{
+ return -ENODEV;
+}
+
+static inline int vfio_pci_ibm_npu2_init(struct vfio_pci_device *vdev)
+{
+ return -ENODEV;
+}
+#endif
#endif /* VFIO_PCI_PRIVATE_H */
diff --git a/drivers/vfio/pci/vfio_pci_rdwr.c b/drivers/vfio/pci/vfio_pci_rdwr.c
index a6029d0..0120d83 100644
--- a/drivers/vfio/pci/vfio_pci_rdwr.c
+++ b/drivers/vfio/pci/vfio_pci_rdwr.c
@@ -1,13 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* VFIO PCI I/O Port & MMIO access
*
* Copyright (C) 2012 Red Hat, Inc. All rights reserved.
* Author: Alex Williamson <alex.williamson@redhat.com>
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
* Derived from original vfio:
* Copyright 2010 Cisco Systems, Inc. All rights reserved.
* Author: Tom Lyon, pugs@cisco.com