Update Linux to v5.4.2
Change-Id: Idf6911045d9d382da2cfe01b1edff026404ac8fd
diff --git a/drivers/pci/controller/vmd.c b/drivers/pci/controller/vmd.c
index fd2dbd7..a35d3f3 100644
--- a/drivers/pci/controller/vmd.c
+++ b/drivers/pci/controller/vmd.c
@@ -31,6 +31,9 @@
#define PCI_REG_VMLOCK 0x70
#define MB2_SHADOW_EN(vmlock) (vmlock & 0x2)
+#define MB2_SHADOW_OFFSET 0x2000
+#define MB2_SHADOW_SIZE 16
+
enum vmd_features {
/*
* Device may contain registers which hint the physical location of the
@@ -94,11 +97,10 @@
struct resource resources[3];
struct irq_domain *irq_domain;
struct pci_bus *bus;
+ u8 busn_start;
-#ifdef CONFIG_X86_DEV_DMA_OPS
struct dma_map_ops dma_ops;
struct dma_domain dma_domain;
-#endif
};
static inline struct vmd_dev *vmd_from_bus(struct pci_bus *bus)
@@ -293,7 +295,6 @@
.chip = &vmd_msi_controller,
};
-#ifdef CONFIG_X86_DEV_DMA_OPS
/*
* VMD replaces the requester ID with its own. DMA mappings for devices in a
* VMD domain need to be mapped for the VMD, not the device requiring
@@ -307,39 +308,32 @@
return &vmd->dev->dev;
}
-static const struct dma_map_ops *vmd_dma_ops(struct device *dev)
-{
- return get_dma_ops(to_vmd_dev(dev));
-}
-
static void *vmd_alloc(struct device *dev, size_t size, dma_addr_t *addr,
gfp_t flag, unsigned long attrs)
{
- return vmd_dma_ops(dev)->alloc(to_vmd_dev(dev), size, addr, flag,
- attrs);
+ return dma_alloc_attrs(to_vmd_dev(dev), size, addr, flag, attrs);
}
static void vmd_free(struct device *dev, size_t size, void *vaddr,
dma_addr_t addr, unsigned long attrs)
{
- return vmd_dma_ops(dev)->free(to_vmd_dev(dev), size, vaddr, addr,
- attrs);
+ return dma_free_attrs(to_vmd_dev(dev), size, vaddr, addr, attrs);
}
static int vmd_mmap(struct device *dev, struct vm_area_struct *vma,
void *cpu_addr, dma_addr_t addr, size_t size,
unsigned long attrs)
{
- return vmd_dma_ops(dev)->mmap(to_vmd_dev(dev), vma, cpu_addr, addr,
- size, attrs);
+ return dma_mmap_attrs(to_vmd_dev(dev), vma, cpu_addr, addr, size,
+ attrs);
}
static int vmd_get_sgtable(struct device *dev, struct sg_table *sgt,
void *cpu_addr, dma_addr_t addr, size_t size,
unsigned long attrs)
{
- return vmd_dma_ops(dev)->get_sgtable(to_vmd_dev(dev), sgt, cpu_addr,
- addr, size, attrs);
+ return dma_get_sgtable_attrs(to_vmd_dev(dev), sgt, cpu_addr, addr, size,
+ attrs);
}
static dma_addr_t vmd_map_page(struct device *dev, struct page *page,
@@ -347,69 +341,61 @@
enum dma_data_direction dir,
unsigned long attrs)
{
- return vmd_dma_ops(dev)->map_page(to_vmd_dev(dev), page, offset, size,
- dir, attrs);
+ return dma_map_page_attrs(to_vmd_dev(dev), page, offset, size, dir,
+ attrs);
}
static void vmd_unmap_page(struct device *dev, dma_addr_t addr, size_t size,
enum dma_data_direction dir, unsigned long attrs)
{
- vmd_dma_ops(dev)->unmap_page(to_vmd_dev(dev), addr, size, dir, attrs);
+ dma_unmap_page_attrs(to_vmd_dev(dev), addr, size, dir, attrs);
}
static int vmd_map_sg(struct device *dev, struct scatterlist *sg, int nents,
enum dma_data_direction dir, unsigned long attrs)
{
- return vmd_dma_ops(dev)->map_sg(to_vmd_dev(dev), sg, nents, dir, attrs);
+ return dma_map_sg_attrs(to_vmd_dev(dev), sg, nents, dir, attrs);
}
static void vmd_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
enum dma_data_direction dir, unsigned long attrs)
{
- vmd_dma_ops(dev)->unmap_sg(to_vmd_dev(dev), sg, nents, dir, attrs);
+ dma_unmap_sg_attrs(to_vmd_dev(dev), sg, nents, dir, attrs);
}
static void vmd_sync_single_for_cpu(struct device *dev, dma_addr_t addr,
size_t size, enum dma_data_direction dir)
{
- vmd_dma_ops(dev)->sync_single_for_cpu(to_vmd_dev(dev), addr, size, dir);
+ dma_sync_single_for_cpu(to_vmd_dev(dev), addr, size, dir);
}
static void vmd_sync_single_for_device(struct device *dev, dma_addr_t addr,
size_t size, enum dma_data_direction dir)
{
- vmd_dma_ops(dev)->sync_single_for_device(to_vmd_dev(dev), addr, size,
- dir);
+ dma_sync_single_for_device(to_vmd_dev(dev), addr, size, dir);
}
static void vmd_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
int nents, enum dma_data_direction dir)
{
- vmd_dma_ops(dev)->sync_sg_for_cpu(to_vmd_dev(dev), sg, nents, dir);
+ dma_sync_sg_for_cpu(to_vmd_dev(dev), sg, nents, dir);
}
static void vmd_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
int nents, enum dma_data_direction dir)
{
- vmd_dma_ops(dev)->sync_sg_for_device(to_vmd_dev(dev), sg, nents, dir);
-}
-
-static int vmd_mapping_error(struct device *dev, dma_addr_t addr)
-{
- return vmd_dma_ops(dev)->mapping_error(to_vmd_dev(dev), addr);
+ dma_sync_sg_for_device(to_vmd_dev(dev), sg, nents, dir);
}
static int vmd_dma_supported(struct device *dev, u64 mask)
{
- return vmd_dma_ops(dev)->dma_supported(to_vmd_dev(dev), mask);
+ return dma_supported(to_vmd_dev(dev), mask);
}
-#ifdef ARCH_HAS_DMA_GET_REQUIRED_MASK
static u64 vmd_get_required_mask(struct device *dev)
{
- return vmd_dma_ops(dev)->get_required_mask(to_vmd_dev(dev));
+ return dma_get_required_mask(to_vmd_dev(dev));
}
-#endif
static void vmd_teardown_dma_ops(struct vmd_dev *vmd)
{
@@ -448,24 +434,18 @@
ASSIGN_VMD_DMA_OPS(source, dest, sync_single_for_device);
ASSIGN_VMD_DMA_OPS(source, dest, sync_sg_for_cpu);
ASSIGN_VMD_DMA_OPS(source, dest, sync_sg_for_device);
- ASSIGN_VMD_DMA_OPS(source, dest, mapping_error);
ASSIGN_VMD_DMA_OPS(source, dest, dma_supported);
-#ifdef ARCH_HAS_DMA_GET_REQUIRED_MASK
ASSIGN_VMD_DMA_OPS(source, dest, get_required_mask);
-#endif
add_dma_domain(domain);
}
#undef ASSIGN_VMD_DMA_OPS
-#else
-static void vmd_teardown_dma_ops(struct vmd_dev *vmd) {}
-static void vmd_setup_dma_ops(struct vmd_dev *vmd) {}
-#endif
static char __iomem *vmd_cfg_addr(struct vmd_dev *vmd, struct pci_bus *bus,
unsigned int devfn, int reg, int len)
{
char __iomem *addr = vmd->cfgbar +
- (bus->number << 20) + (devfn << 12) + reg;
+ ((bus->number - vmd->busn_start) << 20) +
+ (devfn << 12) + reg;
if ((addr - vmd->cfgbar) + len >=
resource_size(&vmd->dev->resource[VMD_CFGBAR]))
@@ -588,7 +568,8 @@
unsigned long flags;
LIST_HEAD(resources);
resource_size_t offset[2] = {0};
- resource_size_t membar2_offset = 0x2000, busn_start = 0;
+ resource_size_t membar2_offset = 0x2000;
+ struct pci_bus *child;
/*
* Shadow registers may exist in certain VMD device ids which allow
@@ -600,7 +581,7 @@
u32 vmlock;
int ret;
- membar2_offset = 0x2018;
+ membar2_offset = MB2_SHADOW_OFFSET + MB2_SHADOW_SIZE;
ret = pci_read_config_dword(vmd->dev, PCI_REG_VMLOCK, &vmlock);
if (ret || vmlock == ~0)
return -ENODEV;
@@ -612,9 +593,9 @@
if (!membar2)
return -ENOMEM;
offset[0] = vmd->dev->resource[VMD_MEMBAR1].start -
- readq(membar2 + 0x2008);
+ readq(membar2 + MB2_SHADOW_OFFSET);
offset[1] = vmd->dev->resource[VMD_MEMBAR2].start -
- readq(membar2 + 0x2010);
+ readq(membar2 + MB2_SHADOW_OFFSET + 8);
pci_iounmap(vmd->dev, membar2);
}
}
@@ -630,14 +611,14 @@
pci_read_config_dword(vmd->dev, PCI_REG_VMCONFIG, &vmconfig);
if (BUS_RESTRICT_CAP(vmcap) &&
(BUS_RESTRICT_CFG(vmconfig) == 0x1))
- busn_start = 128;
+ vmd->busn_start = 128;
}
res = &vmd->dev->resource[VMD_CFGBAR];
vmd->resources[0] = (struct resource) {
.name = "VMD CFGBAR",
- .start = busn_start,
- .end = busn_start + (resource_size(res) >> 20) - 1,
+ .start = vmd->busn_start,
+ .end = vmd->busn_start + (resource_size(res) >> 20) - 1,
.flags = IORESOURCE_BUS | IORESOURCE_PCI_FIXED,
};
@@ -651,7 +632,7 @@
* 32-bit resources. __pci_assign_resource() enforces that
* artificial restriction to make sure everything will fit.
*
- * The only way we could use a 64-bit non-prefechable MEMBAR is
+ * The only way we could use a 64-bit non-prefetchable MEMBAR is
* if its address is <4GB so that we can convert it to a 32-bit
* resource. To be visible to the host OS, all VMD endpoints must
* be initially configured by platform BIOS, which includes setting
@@ -705,8 +686,8 @@
pci_add_resource_offset(&resources, &vmd->resources[1], offset[0]);
pci_add_resource_offset(&resources, &vmd->resources[2], offset[1]);
- vmd->bus = pci_create_root_bus(&vmd->dev->dev, busn_start, &vmd_ops,
- sd, &resources);
+ vmd->bus = pci_create_root_bus(&vmd->dev->dev, vmd->busn_start,
+ &vmd_ops, sd, &resources);
if (!vmd->bus) {
pci_free_resource_list(&resources);
irq_domain_remove(vmd->irq_domain);
@@ -716,7 +697,19 @@
vmd_attach_resources(vmd);
vmd_setup_dma_ops(vmd);
dev_set_msi_domain(&vmd->bus->dev, vmd->irq_domain);
- pci_rescan_bus(vmd->bus);
+
+ pci_scan_child_bus(vmd->bus);
+ pci_assign_unassigned_bus_resources(vmd->bus);
+
+ /*
+ * VMD root buses are virtual and don't return true on pci_is_pcie()
+ * and will fail pcie_bus_configure_settings() early. It can instead be
+ * run on each of the real root ports.
+ */
+ list_for_each_entry(child, &vmd->bus->children, node)
+ pcie_bus_configure_settings(child);
+
+ pci_bus_add_devices(vmd->bus);
WARN(sysfs_create_link(&vmd->dev->dev.kobj, &vmd->bus->dev.kobj,
"domain"), "Can't create symlink to domain\n");
@@ -813,12 +806,12 @@
{
struct vmd_dev *vmd = pci_get_drvdata(dev);
- vmd_detach_resources(vmd);
sysfs_remove_link(&vmd->dev->dev.kobj, "domain");
pci_stop_root_bus(vmd->bus);
pci_remove_root_bus(vmd->bus);
vmd_cleanup_srcu(vmd);
vmd_teardown_dma_ops(vmd);
+ vmd_detach_resources(vmd);
irq_domain_remove(vmd->irq_domain);
}