Update Linux to v5.4.148
Sourced from [1]
[1] https://cdn.kernel.org/pub/linux/kernel/v5.x/linux-5.4.148.tar.gz
Change-Id: Ib3d26c5ba9b022e2e03533005c4fed4d7c30b61b
Signed-off-by: Olivier Deprez <olivier.deprez@arm.com>
diff --git a/drivers/pci/access.c b/drivers/pci/access.c
index 2fccb57..0914dde 100644
--- a/drivers/pci/access.c
+++ b/drivers/pci/access.c
@@ -204,17 +204,13 @@
static DECLARE_WAIT_QUEUE_HEAD(pci_cfg_wait);
static noinline void pci_wait_cfg(struct pci_dev *dev)
+ __must_hold(&pci_lock)
{
- DECLARE_WAITQUEUE(wait, current);
-
- __add_wait_queue(&pci_cfg_wait, &wait);
do {
- set_current_state(TASK_UNINTERRUPTIBLE);
raw_spin_unlock_irq(&pci_lock);
- schedule();
+ wait_event(pci_cfg_wait, !dev->block_cfg_access);
raw_spin_lock_irq(&pci_lock);
} while (dev->block_cfg_access);
- __remove_wait_queue(&pci_cfg_wait, &wait);
}
/* Returns 0 on success, negative values indicate error. */
diff --git a/drivers/pci/bus.c b/drivers/pci/bus.c
index 8e40b3e..3cef835 100644
--- a/drivers/pci/bus.c
+++ b/drivers/pci/bus.c
@@ -322,12 +322,8 @@
dev->match_driver = true;
retval = device_attach(&dev->dev);
- if (retval < 0 && retval != -EPROBE_DEFER) {
+ if (retval < 0 && retval != -EPROBE_DEFER)
pci_warn(dev, "device attach failed (%d)\n", retval);
- pci_proc_detach_device(dev);
- pci_remove_sysfs_dev_files(dev);
- return;
- }
pci_dev_assign_added(dev, true);
}
diff --git a/drivers/pci/controller/dwc/pci-keystone.c b/drivers/pci/controller/dwc/pci-keystone.c
index af67725..c8c702c 100644
--- a/drivers/pci/controller/dwc/pci-keystone.c
+++ b/drivers/pci/controller/dwc/pci-keystone.c
@@ -422,7 +422,7 @@
lower_32_bits(start) | OB_ENABLEN);
ks_pcie_app_writel(ks_pcie, OB_OFFSET_HI(i),
upper_32_bits(start));
- start += OB_WIN_SIZE;
+ start += OB_WIN_SIZE * SZ_1M;
}
val = ks_pcie_app_readl(ks_pcie, CMD_STATUS);
@@ -510,7 +510,7 @@
/* Disable Link training */
val = ks_pcie_app_readl(ks_pcie, CMD_STATUS);
val &= ~LTSSM_EN_VAL;
- ks_pcie_app_writel(ks_pcie, CMD_STATUS, LTSSM_EN_VAL | val);
+ ks_pcie_app_writel(ks_pcie, CMD_STATUS, val);
}
static int ks_pcie_start_link(struct dw_pcie *pci)
@@ -1354,7 +1354,7 @@
ret = of_property_read_u32(np, "num-viewport", &num_viewport);
if (ret < 0) {
dev_err(dev, "unable to read *num-viewport* property\n");
- return ret;
+ goto err_get_sync;
}
/*
diff --git a/drivers/pci/controller/dwc/pci-meson.c b/drivers/pci/controller/dwc/pci-meson.c
index e35e9ea..8c9f887 100644
--- a/drivers/pci/controller/dwc/pci-meson.c
+++ b/drivers/pci/controller/dwc/pci-meson.c
@@ -250,15 +250,15 @@
if (IS_ERR(res->port_clk))
return PTR_ERR(res->port_clk);
- res->mipi_gate = meson_pcie_probe_clock(dev, "pcie_mipi_en", 0);
+ res->mipi_gate = meson_pcie_probe_clock(dev, "mipi", 0);
if (IS_ERR(res->mipi_gate))
return PTR_ERR(res->mipi_gate);
- res->general_clk = meson_pcie_probe_clock(dev, "pcie_general", 0);
+ res->general_clk = meson_pcie_probe_clock(dev, "general", 0);
if (IS_ERR(res->general_clk))
return PTR_ERR(res->general_clk);
- res->clk = meson_pcie_probe_clock(dev, "pcie", 0);
+ res->clk = meson_pcie_probe_clock(dev, "pclk", 0);
if (IS_ERR(res->clk))
return PTR_ERR(res->clk);
@@ -301,11 +301,11 @@
meson_cfg_writel(mp, val, PCIE_CFG0);
val = meson_elb_readl(mp, PCIE_PORT_LINK_CTRL_OFF);
- val &= ~LINK_CAPABLE_MASK;
+ val &= ~(LINK_CAPABLE_MASK | FAST_LINK_MODE);
meson_elb_writel(mp, val, PCIE_PORT_LINK_CTRL_OFF);
val = meson_elb_readl(mp, PCIE_PORT_LINK_CTRL_OFF);
- val |= LINK_CAPABLE_X1 | FAST_LINK_MODE;
+ val |= LINK_CAPABLE_X1;
meson_elb_writel(mp, val, PCIE_PORT_LINK_CTRL_OFF);
val = meson_elb_readl(mp, PCIE_GEN2_CTRL_OFF);
diff --git a/drivers/pci/controller/dwc/pcie-designware-host.c b/drivers/pci/controller/dwc/pcie-designware-host.c
index 0f36a92..fbcb211 100644
--- a/drivers/pci/controller/dwc/pcie-designware-host.c
+++ b/drivers/pci/controller/dwc/pcie-designware-host.c
@@ -78,7 +78,8 @@
irqreturn_t dw_handle_msi_irq(struct pcie_port *pp)
{
int i, pos, irq;
- u32 val, num_ctrls;
+ unsigned long val;
+ u32 status, num_ctrls;
irqreturn_t ret = IRQ_NONE;
num_ctrls = pp->num_vectors / MAX_MSI_IRQS_PER_CTRL;
@@ -86,14 +87,14 @@
for (i = 0; i < num_ctrls; i++) {
dw_pcie_rd_own_conf(pp, PCIE_MSI_INTR0_STATUS +
(i * MSI_REG_CTRL_BLOCK_SIZE),
- 4, &val);
- if (!val)
+ 4, &status);
+ if (!status)
continue;
ret = IRQ_HANDLED;
+ val = status;
pos = 0;
- while ((pos = find_next_bit((unsigned long *) &val,
- MAX_MSI_IRQS_PER_CTRL,
+ while ((pos = find_next_bit(&val, MAX_MSI_IRQS_PER_CTRL,
pos)) != MAX_MSI_IRQS_PER_CTRL) {
irq = irq_find_mapping(pp->irq_domain,
(i * MAX_MSI_IRQS_PER_CTRL) +
@@ -262,6 +263,8 @@
return -ENOMEM;
}
+ irq_domain_update_bus_token(pp->irq_domain, DOMAIN_BUS_NEXUS);
+
pp->msi_domain = pci_msi_create_irq_domain(fwnode,
&dw_pcie_msi_domain_info,
pp->irq_domain);
diff --git a/drivers/pci/controller/dwc/pcie-qcom.c b/drivers/pci/controller/dwc/pcie-qcom.c
index 7e58174..a8eab4e 100644
--- a/drivers/pci/controller/dwc/pcie-qcom.c
+++ b/drivers/pci/controller/dwc/pcie-qcom.c
@@ -45,7 +45,13 @@
#define PCIE_CAP_CPL_TIMEOUT_DISABLE 0x10
#define PCIE20_PARF_PHY_CTRL 0x40
+#define PHY_CTRL_PHY_TX0_TERM_OFFSET_MASK GENMASK(20, 16)
+#define PHY_CTRL_PHY_TX0_TERM_OFFSET(x) ((x) << 16)
+
#define PCIE20_PARF_PHY_REFCLK 0x4C
+#define PHY_REFCLK_SSP_EN BIT(16)
+#define PHY_REFCLK_USE_PAD BIT(12)
+
#define PCIE20_PARF_DBI_BASE_ADDR 0x168
#define PCIE20_PARF_SLV_ADDR_SPACE_SIZE 0x16C
#define PCIE20_PARF_MHI_CLOCK_RESET_CTRL 0x174
@@ -76,6 +82,18 @@
#define DBI_RO_WR_EN 1
#define PERST_DELAY_US 1000
+/* PARF registers */
+#define PCIE20_PARF_PCS_DEEMPH 0x34
+#define PCS_DEEMPH_TX_DEEMPH_GEN1(x) ((x) << 16)
+#define PCS_DEEMPH_TX_DEEMPH_GEN2_3_5DB(x) ((x) << 8)
+#define PCS_DEEMPH_TX_DEEMPH_GEN2_6DB(x) ((x) << 0)
+
+#define PCIE20_PARF_PCS_SWING 0x38
+#define PCS_SWING_TX_SWING_FULL(x) ((x) << 8)
+#define PCS_SWING_TX_SWING_LOW(x) ((x) << 0)
+
+#define PCIE20_PARF_CONFIG_BITS 0x50
+#define PHY_RX0_EQ(x) ((x) << 24)
#define PCIE20_v3_PARF_SLV_ADDR_SPACE_SIZE 0x358
#define SLV_ADDR_SPACE_SZ 0x10000000
@@ -85,11 +103,14 @@
struct clk *iface_clk;
struct clk *core_clk;
struct clk *phy_clk;
+ struct clk *aux_clk;
+ struct clk *ref_clk;
struct reset_control *pci_reset;
struct reset_control *axi_reset;
struct reset_control *ahb_reset;
struct reset_control *por_reset;
struct reset_control *phy_reset;
+ struct reset_control *ext_reset;
struct regulator_bulk_data supplies[QCOM_PCIE_2_1_0_MAX_SUPPLY];
};
@@ -235,6 +256,14 @@
if (IS_ERR(res->phy_clk))
return PTR_ERR(res->phy_clk);
+ res->aux_clk = devm_clk_get_optional(dev, "aux");
+ if (IS_ERR(res->aux_clk))
+ return PTR_ERR(res->aux_clk);
+
+ res->ref_clk = devm_clk_get_optional(dev, "ref");
+ if (IS_ERR(res->ref_clk))
+ return PTR_ERR(res->ref_clk);
+
res->pci_reset = devm_reset_control_get_exclusive(dev, "pci");
if (IS_ERR(res->pci_reset))
return PTR_ERR(res->pci_reset);
@@ -251,6 +280,10 @@
if (IS_ERR(res->por_reset))
return PTR_ERR(res->por_reset);
+ res->ext_reset = devm_reset_control_get_optional_exclusive(dev, "ext");
+ if (IS_ERR(res->ext_reset))
+ return PTR_ERR(res->ext_reset);
+
res->phy_reset = devm_reset_control_get_exclusive(dev, "phy");
return PTR_ERR_OR_ZERO(res->phy_reset);
}
@@ -259,14 +292,20 @@
{
struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0;
+ clk_disable_unprepare(res->phy_clk);
reset_control_assert(res->pci_reset);
reset_control_assert(res->axi_reset);
reset_control_assert(res->ahb_reset);
reset_control_assert(res->por_reset);
- reset_control_assert(res->pci_reset);
+ reset_control_assert(res->ext_reset);
+ reset_control_assert(res->phy_reset);
clk_disable_unprepare(res->iface_clk);
clk_disable_unprepare(res->core_clk);
- clk_disable_unprepare(res->phy_clk);
+ clk_disable_unprepare(res->aux_clk);
+ clk_disable_unprepare(res->ref_clk);
+
+ writel(1, pcie->parf + PCIE20_PARF_PHY_CTRL);
+
regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
}
@@ -275,9 +314,20 @@
struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0;
struct dw_pcie *pci = pcie->pci;
struct device *dev = pci->dev;
+ struct device_node *node = dev->of_node;
u32 val;
int ret;
+ /* reset the PCIe interface as uboot can leave it undefined state */
+ reset_control_assert(res->pci_reset);
+ reset_control_assert(res->axi_reset);
+ reset_control_assert(res->ahb_reset);
+ reset_control_assert(res->por_reset);
+ reset_control_assert(res->ext_reset);
+ reset_control_assert(res->phy_reset);
+
+ writel(1, pcie->parf + PCIE20_PARF_PHY_CTRL);
+
ret = regulator_bulk_enable(ARRAY_SIZE(res->supplies), res->supplies);
if (ret < 0) {
dev_err(dev, "cannot enable regulators\n");
@@ -296,32 +346,66 @@
goto err_assert_ahb;
}
- ret = clk_prepare_enable(res->phy_clk);
- if (ret) {
- dev_err(dev, "cannot prepare/enable phy clock\n");
- goto err_clk_phy;
- }
-
ret = clk_prepare_enable(res->core_clk);
if (ret) {
dev_err(dev, "cannot prepare/enable core clock\n");
goto err_clk_core;
}
+ ret = clk_prepare_enable(res->aux_clk);
+ if (ret) {
+ dev_err(dev, "cannot prepare/enable aux clock\n");
+ goto err_clk_aux;
+ }
+
+ ret = clk_prepare_enable(res->ref_clk);
+ if (ret) {
+ dev_err(dev, "cannot prepare/enable ref clock\n");
+ goto err_clk_ref;
+ }
+
ret = reset_control_deassert(res->ahb_reset);
if (ret) {
dev_err(dev, "cannot deassert ahb reset\n");
goto err_deassert_ahb;
}
+ ret = reset_control_deassert(res->ext_reset);
+ if (ret) {
+ dev_err(dev, "cannot deassert ext reset\n");
+ goto err_deassert_ahb;
+ }
+
/* enable PCIe clocks and resets */
val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL);
val &= ~BIT(0);
writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL);
+ if (of_device_is_compatible(node, "qcom,pcie-ipq8064")) {
+ writel(PCS_DEEMPH_TX_DEEMPH_GEN1(24) |
+ PCS_DEEMPH_TX_DEEMPH_GEN2_3_5DB(24) |
+ PCS_DEEMPH_TX_DEEMPH_GEN2_6DB(34),
+ pcie->parf + PCIE20_PARF_PCS_DEEMPH);
+ writel(PCS_SWING_TX_SWING_FULL(120) |
+ PCS_SWING_TX_SWING_LOW(120),
+ pcie->parf + PCIE20_PARF_PCS_SWING);
+ writel(PHY_RX0_EQ(4), pcie->parf + PCIE20_PARF_CONFIG_BITS);
+ }
+
+ if (of_device_is_compatible(node, "qcom,pcie-ipq8064")) {
+ /* set TX termination offset */
+ val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL);
+ val &= ~PHY_CTRL_PHY_TX0_TERM_OFFSET_MASK;
+ val |= PHY_CTRL_PHY_TX0_TERM_OFFSET(7);
+ writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL);
+ }
+
/* enable external reference clock */
val = readl(pcie->parf + PCIE20_PARF_PHY_REFCLK);
- val |= BIT(16);
+ /* USE_PAD is required only for ipq806x */
+ if (!of_device_is_compatible(node, "qcom,pcie-apq8064"))
+ val &= ~PHY_REFCLK_USE_PAD;
+ val |= PHY_REFCLK_SSP_EN;
writel(val, pcie->parf + PCIE20_PARF_PHY_REFCLK);
ret = reset_control_deassert(res->phy_reset);
@@ -348,6 +432,12 @@
return ret;
}
+ ret = clk_prepare_enable(res->phy_clk);
+ if (ret) {
+ dev_err(dev, "cannot prepare/enable phy clock\n");
+ goto err_deassert_ahb;
+ }
+
/* wait for clock acquisition */
usleep_range(1000, 1500);
@@ -361,10 +451,12 @@
return 0;
err_deassert_ahb:
+ clk_disable_unprepare(res->ref_clk);
+err_clk_ref:
+ clk_disable_unprepare(res->aux_clk);
+err_clk_aux:
clk_disable_unprepare(res->core_clk);
err_clk_core:
- clk_disable_unprepare(res->phy_clk);
-err_clk_phy:
clk_disable_unprepare(res->iface_clk);
err_assert_ahb:
regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
@@ -1289,7 +1381,13 @@
{
dev->class = PCI_CLASS_BRIDGE_PCI << 8;
}
-DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, PCI_ANY_ID, qcom_fixup_class);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0101, qcom_fixup_class);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0104, qcom_fixup_class);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0106, qcom_fixup_class);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0107, qcom_fixup_class);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0302, qcom_fixup_class);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x1000, qcom_fixup_class);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x1001, qcom_fixup_class);
static struct platform_driver qcom_pcie_driver = {
.probe = qcom_pcie_probe,
diff --git a/drivers/pci/controller/dwc/pcie-tegra194.c b/drivers/pci/controller/dwc/pcie-tegra194.c
index f89f5ac..c06b05a 100644
--- a/drivers/pci/controller/dwc/pcie-tegra194.c
+++ b/drivers/pci/controller/dwc/pcie-tegra194.c
@@ -1395,7 +1395,7 @@
ret = pinctrl_pm_select_default_state(dev);
if (ret < 0) {
dev_err(dev, "Failed to configure sideband pins: %d\n", ret);
- goto fail_pinctrl;
+ goto fail_pm_get_sync;
}
tegra_pcie_init_controller(pcie);
@@ -1422,9 +1422,8 @@
fail_host_init:
tegra_pcie_deinit_controller(pcie);
-fail_pinctrl:
- pm_runtime_put_sync(dev);
fail_pm_get_sync:
+ pm_runtime_put_sync(dev);
pm_runtime_disable(dev);
return ret;
}
diff --git a/drivers/pci/controller/pci-aardvark.c b/drivers/pci/controller/pci-aardvark.c
index fc0fe4d..0538348 100644
--- a/drivers/pci/controller/pci-aardvark.c
+++ b/drivers/pci/controller/pci-aardvark.c
@@ -61,7 +61,8 @@
#define PIO_COMPLETION_STATUS_UR 1
#define PIO_COMPLETION_STATUS_CRS 2
#define PIO_COMPLETION_STATUS_CA 4
-#define PIO_NON_POSTED_REQ BIT(0)
+#define PIO_NON_POSTED_REQ BIT(10)
+#define PIO_ERR_STATUS BIT(11)
#define PIO_ADDR_LS (PIO_BASE_ADDR + 0x8)
#define PIO_ADDR_MS (PIO_BASE_ADDR + 0xc)
#define PIO_WR_DATA (PIO_BASE_ADDR + 0x10)
@@ -127,6 +128,7 @@
#define LTSSM_MASK 0x3f
#define LTSSM_L0 0x10
#define RC_BAR_CONFIG 0x300
+#define VENDOR_ID_REG (LMI_BASE_ADDR + 0x44)
/* PCIe core controller registers */
#define CTRL_CORE_BASE_ADDR 0x18000
@@ -175,11 +177,14 @@
(PCIE_CONF_BUS(bus) | PCIE_CONF_DEV(PCI_SLOT(devfn)) | \
PCIE_CONF_FUNC(PCI_FUNC(devfn)) | PCIE_CONF_REG(where))
-#define PIO_TIMEOUT_MS 1
+#define PIO_RETRY_CNT 750000 /* 1.5 s */
+#define PIO_RETRY_DELAY 2 /* 2 us*/
#define LINK_WAIT_MAX_RETRIES 10
#define LINK_WAIT_USLEEP_MIN 90000
#define LINK_WAIT_USLEEP_MAX 100000
+#define RETRAIN_WAIT_MAX_RETRIES 10
+#define RETRAIN_WAIT_USLEEP_US 2000
#define MSI_IRQ_NUM 32
@@ -189,6 +194,7 @@
struct list_head resources;
struct irq_domain *irq_domain;
struct irq_chip irq_chip;
+ raw_spinlock_t irq_lock;
struct irq_domain *msi_domain;
struct irq_domain *msi_inner_domain;
struct irq_chip msi_bottom_irq_chip;
@@ -239,6 +245,17 @@
return -ETIMEDOUT;
}
+static void advk_pcie_wait_for_retrain(struct advk_pcie *pcie)
+{
+ size_t retries;
+
+ for (retries = 0; retries < RETRAIN_WAIT_MAX_RETRIES; ++retries) {
+ if (!advk_pcie_link_up(pcie))
+ break;
+ udelay(RETRAIN_WAIT_USLEEP_US);
+ }
+}
+
static void advk_pcie_setup_hw(struct advk_pcie *pcie)
{
u32 reg;
@@ -254,6 +271,16 @@
reg |= (IS_RC_MSK << IS_RC_SHIFT);
advk_writel(pcie, reg, PCIE_CORE_CTRL0_REG);
+ /*
+ * Replace incorrect PCI vendor id value 0x1b4b by correct value 0x11ab.
+ * VENDOR_ID_REG contains vendor id in low 16 bits and subsystem vendor
+ * id in high 16 bits. Updating this register changes readback value of
+ * read-only vendor id bits in PCIE_CORE_DEV_ID_REG register. Workaround
+ * for erratum 4.1: "The value of device and vendor ID is incorrect".
+ */
+ reg = (PCI_VENDOR_ID_MARVELL << 16) | PCI_VENDOR_ID_MARVELL;
+ advk_writel(pcie, reg, VENDOR_ID_REG);
+
/* Set Advanced Error Capabilities and Control PF0 register */
reg = PCIE_CORE_ERR_CAPCTL_ECRC_CHK_TX |
PCIE_CORE_ERR_CAPCTL_ECRC_CHK_TX_EN |
@@ -331,10 +358,6 @@
advk_pcie_wait_for_link(pcie);
- reg = PCIE_CORE_LINK_L0S_ENTRY |
- (1 << PCIE_CORE_LINK_WIDTH_SHIFT);
- advk_writel(pcie, reg, PCIE_CORE_LINK_CTRL_STAT_REG);
-
reg = advk_readl(pcie, PCIE_CORE_CMD_STATUS_REG);
reg |= PCIE_CORE_CMD_MEM_ACCESS_EN |
PCIE_CORE_CMD_IO_ACCESS_EN |
@@ -342,7 +365,7 @@
advk_writel(pcie, reg, PCIE_CORE_CMD_STATUS_REG);
}
-static void advk_pcie_check_pio_status(struct advk_pcie *pcie)
+static int advk_pcie_check_pio_status(struct advk_pcie *pcie, u32 *val)
{
struct device *dev = &pcie->pdev->dev;
u32 reg;
@@ -353,14 +376,49 @@
status = (reg & PIO_COMPLETION_STATUS_MASK) >>
PIO_COMPLETION_STATUS_SHIFT;
- if (!status)
- return;
-
+ /*
+ * According to HW spec, the PIO status check sequence as below:
+ * 1) even if COMPLETION_STATUS(bit9:7) indicates successful,
+ * it still needs to check Error Status(bit11), only when this bit
+ * indicates no error happen, the operation is successful.
+ * 2) value Unsupported Request(1) of COMPLETION_STATUS(bit9:7) only
+ * means a PIO write error, and for PIO read it is successful with
+ * a read value of 0xFFFFFFFF.
+ * 3) value Completion Retry Status(CRS) of COMPLETION_STATUS(bit9:7)
+ * only means a PIO write error, and for PIO read it is successful
+ * with a read value of 0xFFFF0001.
+ * 4) value Completer Abort (CA) of COMPLETION_STATUS(bit9:7) means
+ * error for both PIO read and PIO write operation.
+ * 5) other errors are indicated as 'unknown'.
+ */
switch (status) {
+ case PIO_COMPLETION_STATUS_OK:
+ if (reg & PIO_ERR_STATUS) {
+ strcomp_status = "COMP_ERR";
+ break;
+ }
+ /* Get the read result */
+ if (val)
+ *val = advk_readl(pcie, PIO_RD_DATA);
+ /* No error */
+ strcomp_status = NULL;
+ break;
case PIO_COMPLETION_STATUS_UR:
strcomp_status = "UR";
break;
case PIO_COMPLETION_STATUS_CRS:
+ /* PCIe r4.0, sec 2.3.2, says:
+ * If CRS Software Visibility is not enabled, the Root Complex
+ * must re-issue the Configuration Request as a new Request.
+ * A Root Complex implementation may choose to limit the number
+ * of Configuration Request/CRS Completion Status loops before
+ * determining that something is wrong with the target of the
+ * Request and taking appropriate action, e.g., complete the
+ * Request to the host as a failed transaction.
+ *
+ * To simplify implementation do not re-issue the Configuration
+ * Request and complete the Request as a failed transaction.
+ */
strcomp_status = "CRS";
break;
case PIO_COMPLETION_STATUS_CA:
@@ -371,6 +429,9 @@
break;
}
+ if (!strcomp_status)
+ return 0;
+
if (reg & PIO_NON_POSTED_REQ)
str_posted = "Non-posted";
else
@@ -378,25 +439,26 @@
dev_err(dev, "%s PIO Response Status: %s, %#x @ %#x\n",
str_posted, strcomp_status, reg, advk_readl(pcie, PIO_ADDR_LS));
+
+ return -EFAULT;
}
static int advk_pcie_wait_pio(struct advk_pcie *pcie)
{
struct device *dev = &pcie->pdev->dev;
- unsigned long timeout;
+ int i;
- timeout = jiffies + msecs_to_jiffies(PIO_TIMEOUT_MS);
-
- while (time_before(jiffies, timeout)) {
+ for (i = 0; i < PIO_RETRY_CNT; i++) {
u32 start, isr;
start = advk_readl(pcie, PIO_START);
isr = advk_readl(pcie, PIO_ISR);
if (!start && isr)
return 0;
+ udelay(PIO_RETRY_DELAY);
}
- dev_err(dev, "config read/write timed out\n");
+ dev_err(dev, "PIO read/write transfer time out\n");
return -ETIMEDOUT;
}
@@ -415,7 +477,7 @@
case PCI_EXP_RTCTL: {
u32 val = advk_readl(pcie, PCIE_ISR0_MASK_REG);
- *value = (val & PCIE_MSG_PM_PME_MASK) ? PCI_EXP_RTCTL_PMEIE : 0;
+ *value = (val & PCIE_MSG_PM_PME_MASK) ? 0 : PCI_EXP_RTCTL_PMEIE;
return PCI_BRIDGE_EMUL_HANDLED;
}
@@ -426,11 +488,20 @@
return PCI_BRIDGE_EMUL_HANDLED;
}
+ case PCI_EXP_LNKCTL: {
+ /* u32 contains both PCI_EXP_LNKCTL and PCI_EXP_LNKSTA */
+ u32 val = advk_readl(pcie, PCIE_CORE_PCIEXP_CAP + reg) &
+ ~(PCI_EXP_LNKSTA_LT << 16);
+ if (!advk_pcie_link_up(pcie))
+ val |= (PCI_EXP_LNKSTA_LT << 16);
+ *value = val;
+ return PCI_BRIDGE_EMUL_HANDLED;
+ }
+
case PCI_CAP_LIST_ID:
case PCI_EXP_DEVCAP:
case PCI_EXP_DEVCTL:
case PCI_EXP_LNKCAP:
- case PCI_EXP_LNKCTL:
*value = advk_readl(pcie, PCIE_CORE_PCIEXP_CAP + reg);
return PCI_BRIDGE_EMUL_HANDLED;
default:
@@ -447,15 +518,25 @@
switch (reg) {
case PCI_EXP_DEVCTL:
- case PCI_EXP_LNKCTL:
advk_writel(pcie, new, PCIE_CORE_PCIEXP_CAP + reg);
break;
- case PCI_EXP_RTCTL:
- new = (new & PCI_EXP_RTCTL_PMEIE) << 3;
- advk_writel(pcie, new, PCIE_ISR0_MASK_REG);
+ case PCI_EXP_LNKCTL:
+ advk_writel(pcie, new, PCIE_CORE_PCIEXP_CAP + reg);
+ if (new & PCI_EXP_LNKCTL_RL)
+ advk_pcie_wait_for_retrain(pcie);
break;
+ case PCI_EXP_RTCTL: {
+ /* Only mask/unmask PME interrupt */
+ u32 val = advk_readl(pcie, PCIE_ISR0_MASK_REG) &
+ ~PCIE_MSG_PM_PME_MASK;
+ if ((new & PCI_EXP_RTCTL_PMEIE) == 0)
+ val |= PCIE_MSG_PM_PME_MASK;
+ advk_writel(pcie, val, PCIE_ISR0_MASK_REG);
+ break;
+ }
+
case PCI_EXP_RTSTA:
new = (new & PCI_EXP_RTSTA_PME) >> 9;
advk_writel(pcie, new, PCIE_ISR0_REG);
@@ -475,7 +556,7 @@
* Initialize the configuration space of the PCI-to-PCI bridge
* associated with the given PCIe interface.
*/
-static void advk_sw_pci_bridge_init(struct advk_pcie *pcie)
+static int advk_sw_pci_bridge_init(struct advk_pcie *pcie)
{
struct pci_bridge_emul *bridge = &pcie->bridge;
@@ -499,8 +580,7 @@
bridge->data = pcie;
bridge->ops = &advk_pci_bridge_emul_ops;
- pci_bridge_emul_init(bridge, 0);
-
+ return pci_bridge_emul_init(bridge, 0);
}
static bool advk_pcie_valid_device(struct advk_pcie *pcie, struct pci_bus *bus,
@@ -512,6 +592,35 @@
return true;
}
+static bool advk_pcie_pio_is_running(struct advk_pcie *pcie)
+{
+ struct device *dev = &pcie->pdev->dev;
+
+ /*
+ * Trying to start a new PIO transfer when previous has not completed
+ * cause External Abort on CPU which results in kernel panic:
+ *
+ * SError Interrupt on CPU0, code 0xbf000002 -- SError
+ * Kernel panic - not syncing: Asynchronous SError Interrupt
+ *
+ * Functions advk_pcie_rd_conf() and advk_pcie_wr_conf() are protected
+ * by raw_spin_lock_irqsave() at pci_lock_config() level to prevent
+ * concurrent calls at the same time. But because PIO transfer may take
+ * about 1.5s when link is down or card is disconnected, it means that
+ * advk_pcie_wait_pio() does not always have to wait for completion.
+ *
+ * Some versions of ARM Trusted Firmware handles this External Abort at
+ * EL3 level and mask it to prevent kernel panic. Relevant TF-A commit:
+ * https://git.trustedfirmware.org/TF-A/trusted-firmware-a.git/commit/?id=3c7dcdac5c50
+ */
+ if (advk_readl(pcie, PIO_START)) {
+ dev_err(dev, "Previous PIO read/write transfer is still running\n");
+ return true;
+ }
+
+ return false;
+}
+
static int advk_pcie_rd_conf(struct pci_bus *bus, u32 devfn,
int where, int size, u32 *val)
{
@@ -528,9 +637,10 @@
return pci_bridge_emul_conf_read(&pcie->bridge, where,
size, val);
- /* Start PIO */
- advk_writel(pcie, 0, PIO_START);
- advk_writel(pcie, 1, PIO_ISR);
+ if (advk_pcie_pio_is_running(pcie)) {
+ *val = 0xffffffff;
+ return PCIBIOS_SET_FAILED;
+ }
/* Program the control register */
reg = advk_readl(pcie, PIO_CTRL);
@@ -549,17 +659,21 @@
/* Program the data strobe */
advk_writel(pcie, 0xf, PIO_WR_DATA_STRB);
- /* Start the transfer */
+ /* Clear PIO DONE ISR and start the transfer */
+ advk_writel(pcie, 1, PIO_ISR);
advk_writel(pcie, 1, PIO_START);
ret = advk_pcie_wait_pio(pcie);
if (ret < 0)
return PCIBIOS_SET_FAILED;
- advk_pcie_check_pio_status(pcie);
+ /* Check PIO status and get the read result */
+ ret = advk_pcie_check_pio_status(pcie, val);
+ if (ret < 0) {
+ *val = 0xffffffff;
+ return PCIBIOS_SET_FAILED;
+ }
- /* Get the read result */
- *val = advk_readl(pcie, PIO_RD_DATA);
if (size == 1)
*val = (*val >> (8 * (where & 3))) & 0xff;
else if (size == 2)
@@ -587,9 +701,8 @@
if (where % size)
return PCIBIOS_SET_FAILED;
- /* Start PIO */
- advk_writel(pcie, 0, PIO_START);
- advk_writel(pcie, 1, PIO_ISR);
+ if (advk_pcie_pio_is_running(pcie))
+ return PCIBIOS_SET_FAILED;
/* Program the control register */
reg = advk_readl(pcie, PIO_CTRL);
@@ -616,14 +729,17 @@
/* Program the data strobe */
advk_writel(pcie, data_strobe, PIO_WR_DATA_STRB);
- /* Start the transfer */
+ /* Clear PIO DONE ISR and start the transfer */
+ advk_writel(pcie, 1, PIO_ISR);
advk_writel(pcie, 1, PIO_START);
ret = advk_pcie_wait_pio(pcie);
if (ret < 0)
return PCIBIOS_SET_FAILED;
- advk_pcie_check_pio_status(pcie);
+ ret = advk_pcie_check_pio_status(pcie, NULL);
+ if (ret < 0)
+ return PCIBIOS_SET_FAILED;
return PCIBIOS_SUCCESSFUL;
}
@@ -697,22 +813,28 @@
{
struct advk_pcie *pcie = d->domain->host_data;
irq_hw_number_t hwirq = irqd_to_hwirq(d);
+ unsigned long flags;
u32 mask;
+ raw_spin_lock_irqsave(&pcie->irq_lock, flags);
mask = advk_readl(pcie, PCIE_ISR1_MASK_REG);
mask |= PCIE_ISR1_INTX_ASSERT(hwirq);
advk_writel(pcie, mask, PCIE_ISR1_MASK_REG);
+ raw_spin_unlock_irqrestore(&pcie->irq_lock, flags);
}
static void advk_pcie_irq_unmask(struct irq_data *d)
{
struct advk_pcie *pcie = d->domain->host_data;
irq_hw_number_t hwirq = irqd_to_hwirq(d);
+ unsigned long flags;
u32 mask;
+ raw_spin_lock_irqsave(&pcie->irq_lock, flags);
mask = advk_readl(pcie, PCIE_ISR1_MASK_REG);
mask &= ~PCIE_ISR1_INTX_ASSERT(hwirq);
advk_writel(pcie, mask, PCIE_ISR1_MASK_REG);
+ raw_spin_unlock_irqrestore(&pcie->irq_lock, flags);
}
static int advk_pcie_irq_map(struct irq_domain *h,
@@ -796,6 +918,8 @@
struct irq_chip *irq_chip;
int ret = 0;
+ raw_spin_lock_init(&pcie->irq_lock);
+
pcie_intc_node = of_get_next_child(node, NULL);
if (!pcie_intc_node) {
dev_err(dev, "No PCIe Intc node found\n");
@@ -999,7 +1123,11 @@
advk_pcie_setup_hw(pcie);
- advk_sw_pci_bridge_init(pcie);
+ ret = advk_sw_pci_bridge_init(pcie);
+ if (ret) {
+ dev_err(dev, "Failed to register emulated root PCI bridge\n");
+ return ret;
+ }
ret = advk_pcie_init_irq_domain(pcie);
if (ret) {
diff --git a/drivers/pci/controller/pci-hyperv.c b/drivers/pci/controller/pci-hyperv.c
index f1f3002..8c45d6c 100644
--- a/drivers/pci/controller/pci-hyperv.c
+++ b/drivers/pci/controller/pci-hyperv.c
@@ -3121,6 +3121,9 @@
static int __init init_hv_pci_drv(void)
{
+ if (!hv_is_hyperv_initialized())
+ return -ENODEV;
+
/* Set the invalid domain number's bit, so it will not be used */
set_bit(HVPCI_DOM_INVALID, hvpci_dom_map);
diff --git a/drivers/pci/controller/pci-mvebu.c b/drivers/pci/controller/pci-mvebu.c
index d3a0419..5a2483e 100644
--- a/drivers/pci/controller/pci-mvebu.c
+++ b/drivers/pci/controller/pci-mvebu.c
@@ -105,6 +105,7 @@
struct mvebu_pcie_window memwin;
struct mvebu_pcie_window iowin;
u32 saved_pcie_stat;
+ struct resource regs;
};
static inline void mvebu_writel(struct mvebu_pcie_port *port, u32 val, u32 reg)
@@ -149,7 +150,9 @@
/*
* Setup PCIE BARs and Address Decode Wins:
- * BAR[0,2] -> disabled, BAR[1] -> covers all DRAM banks
+ * BAR[0] -> internal registers (needed for MSI)
+ * BAR[1] -> covers all DRAM banks
+ * BAR[2] -> Disabled
* WIN[0-3] -> DRAM bank[0-3]
*/
static void mvebu_pcie_setup_wins(struct mvebu_pcie_port *port)
@@ -203,6 +206,12 @@
mvebu_writel(port, 0, PCIE_BAR_HI_OFF(1));
mvebu_writel(port, ((size - 1) & 0xffff0000) | 1,
PCIE_BAR_CTRL_OFF(1));
+
+ /*
+ * Point BAR[0] to the device's internal registers.
+ */
+ mvebu_writel(port, round_down(port->regs.start, SZ_1M), PCIE_BAR_LO_OFF(0));
+ mvebu_writel(port, 0, PCIE_BAR_HI_OFF(0));
}
static void mvebu_pcie_setup_hw(struct mvebu_pcie_port *port)
@@ -708,14 +717,13 @@
struct device_node *np,
struct mvebu_pcie_port *port)
{
- struct resource regs;
int ret = 0;
- ret = of_address_to_resource(np, 0, ®s);
+ ret = of_address_to_resource(np, 0, &port->regs);
if (ret)
return ERR_PTR(ret);
- return devm_ioremap_resource(&pdev->dev, ®s);
+ return devm_ioremap_resource(&pdev->dev, &port->regs);
}
#define DT_FLAGS_TO_TYPE(flags) (((flags) >> 24) & 0x03)
diff --git a/drivers/pci/controller/pci-tegra.c b/drivers/pci/controller/pci-tegra.c
index 673a172..99d505a 100644
--- a/drivers/pci/controller/pci-tegra.c
+++ b/drivers/pci/controller/pci-tegra.c
@@ -181,13 +181,6 @@
#define AFI_PEXBIAS_CTRL_0 0x168
-#define RP_PRIV_XP_DL 0x00000494
-#define RP_PRIV_XP_DL_GEN2_UPD_FC_TSHOLD (0x1ff << 1)
-
-#define RP_RX_HDR_LIMIT 0x00000e00
-#define RP_RX_HDR_LIMIT_PW_MASK (0xff << 8)
-#define RP_RX_HDR_LIMIT_PW (0x0e << 8)
-
#define RP_ECTL_2_R1 0x00000e84
#define RP_ECTL_2_R1_RX_CTLE_1C_MASK 0xffff
@@ -323,7 +316,6 @@
bool program_uphy;
bool update_clamp_threshold;
bool program_deskew_time;
- bool raw_violation_fixup;
bool update_fc_timer;
bool has_cache_bars;
struct {
@@ -669,23 +661,6 @@
writel(value, port->base + RP_VEND_CTL0);
}
- /* Fixup for read after write violation. */
- if (soc->raw_violation_fixup) {
- value = readl(port->base + RP_RX_HDR_LIMIT);
- value &= ~RP_RX_HDR_LIMIT_PW_MASK;
- value |= RP_RX_HDR_LIMIT_PW;
- writel(value, port->base + RP_RX_HDR_LIMIT);
-
- value = readl(port->base + RP_PRIV_XP_DL);
- value |= RP_PRIV_XP_DL_GEN2_UPD_FC_TSHOLD;
- writel(value, port->base + RP_PRIV_XP_DL);
-
- value = readl(port->base + RP_VEND_XP);
- value &= ~RP_VEND_XP_UPDATE_FC_THRESHOLD_MASK;
- value |= soc->update_fc_threshold;
- writel(value, port->base + RP_VEND_XP);
- }
-
if (soc->update_fc_timer) {
value = readl(port->base + RP_VEND_XP);
value &= ~RP_VEND_XP_UPDATE_FC_THRESHOLD_MASK;
@@ -2499,7 +2474,6 @@
.num_ports = 2,
.ports = tegra20_pcie_ports,
.msi_base_shift = 0,
- .afi_pex2_ctrl = 0x128,
.pads_pll_ctl = PADS_PLL_CTL_TEGRA20,
.tx_ref_sel = PADS_PLL_CTL_TXCLKREF_DIV10,
.pads_refclk_cfg0 = 0xfa5cfa5c,
@@ -2512,7 +2486,6 @@
.program_uphy = true,
.update_clamp_threshold = false,
.program_deskew_time = false,
- .raw_violation_fixup = false,
.update_fc_timer = false,
.has_cache_bars = true,
.ectl.enable = false,
@@ -2528,6 +2501,7 @@
.num_ports = 3,
.ports = tegra30_pcie_ports,
.msi_base_shift = 8,
+ .afi_pex2_ctrl = 0x128,
.pads_pll_ctl = PADS_PLL_CTL_TEGRA30,
.tx_ref_sel = PADS_PLL_CTL_TXCLKREF_BUF_EN,
.pads_refclk_cfg0 = 0xfa5cfa5c,
@@ -2541,7 +2515,6 @@
.program_uphy = true,
.update_clamp_threshold = false,
.program_deskew_time = false,
- .raw_violation_fixup = false,
.update_fc_timer = false,
.has_cache_bars = false,
.ectl.enable = false,
@@ -2554,8 +2527,6 @@
.pads_pll_ctl = PADS_PLL_CTL_TEGRA30,
.tx_ref_sel = PADS_PLL_CTL_TXCLKREF_BUF_EN,
.pads_refclk_cfg0 = 0x44ac44ac,
- /* FC threshold is bit[25:18] */
- .update_fc_threshold = 0x03fc0000,
.has_pex_clkreq_en = true,
.has_pex_bias_ctrl = true,
.has_intr_prsnt_sense = true,
@@ -2565,7 +2536,6 @@
.program_uphy = true,
.update_clamp_threshold = true,
.program_deskew_time = false,
- .raw_violation_fixup = true,
.update_fc_timer = false,
.has_cache_bars = false,
.ectl.enable = false,
@@ -2589,7 +2559,6 @@
.program_uphy = true,
.update_clamp_threshold = true,
.program_deskew_time = true,
- .raw_violation_fixup = false,
.update_fc_timer = true,
.has_cache_bars = false,
.ectl = {
@@ -2631,7 +2600,6 @@
.program_uphy = false,
.update_clamp_threshold = false,
.program_deskew_time = false,
- .raw_violation_fixup = false,
.update_fc_timer = false,
.has_cache_bars = false,
.ectl.enable = false,
@@ -2645,6 +2613,7 @@
{ .compatible = "nvidia,tegra20-pcie", .data = &tegra20_pcie },
{ },
};
+MODULE_DEVICE_TABLE(of, tegra_pcie_of_match);
static void *tegra_pcie_ports_seq_start(struct seq_file *s, loff_t *pos)
{
@@ -2798,9 +2767,9 @@
pm_runtime_enable(pcie->dev);
err = pm_runtime_get_sync(pcie->dev);
- if (err) {
+ if (err < 0) {
dev_err(dev, "fail to enable pcie controller: %d\n", err);
- goto teardown_msi;
+ goto pm_runtime_put;
}
err = tegra_pcie_request_resources(pcie);
@@ -2840,7 +2809,6 @@
pm_runtime_put:
pm_runtime_put_sync(pcie->dev);
pm_runtime_disable(pcie->dev);
-teardown_msi:
tegra_pcie_msi_teardown(pcie);
put_resources:
tegra_pcie_put_resources(pcie);
diff --git a/drivers/pci/controller/pci-thunder-ecam.c b/drivers/pci/controller/pci-thunder-ecam.c
index 32d1d7b..18715d2 100644
--- a/drivers/pci/controller/pci-thunder-ecam.c
+++ b/drivers/pci/controller/pci-thunder-ecam.c
@@ -116,7 +116,7 @@
* the config space access window. Since we are working with
* the high-order 32 bits, shift everything down by 32 bits.
*/
- node_bits = (cfg->res.start >> 32) & (1 << 12);
+ node_bits = upper_32_bits(cfg->res.start) & (1 << 12);
v |= node_bits;
set_val(v, where, size, val);
diff --git a/drivers/pci/controller/pci-thunder-pem.c b/drivers/pci/controller/pci-thunder-pem.c
index f127ce8..1650ec2 100644
--- a/drivers/pci/controller/pci-thunder-pem.c
+++ b/drivers/pci/controller/pci-thunder-pem.c
@@ -11,6 +11,7 @@
#include <linux/pci-acpi.h>
#include <linux/pci-ecam.h>
#include <linux/platform_device.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
#include "../pci.h"
#if defined(CONFIG_PCI_HOST_THUNDER_PEM) || (defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS))
@@ -314,9 +315,9 @@
* structure here for the BAR.
*/
bar4_start = res_pem->start + 0xf00000;
- pem_pci->ea_entry[0] = (u32)bar4_start | 2;
- pem_pci->ea_entry[1] = (u32)(res_pem->end - bar4_start) & ~3u;
- pem_pci->ea_entry[2] = (u32)(bar4_start >> 32);
+ pem_pci->ea_entry[0] = lower_32_bits(bar4_start) | 2;
+ pem_pci->ea_entry[1] = lower_32_bits(res_pem->end - bar4_start) & ~3u;
+ pem_pci->ea_entry[2] = upper_32_bits(bar4_start);
cfg->priv = pem_pci;
return 0;
@@ -324,9 +325,9 @@
#if defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS)
-#define PEM_RES_BASE 0x87e0c0000000UL
-#define PEM_NODE_MASK GENMASK(45, 44)
-#define PEM_INDX_MASK GENMASK(26, 24)
+#define PEM_RES_BASE 0x87e0c0000000ULL
+#define PEM_NODE_MASK GENMASK_ULL(45, 44)
+#define PEM_INDX_MASK GENMASK_ULL(26, 24)
#define PEM_MIN_DOM_IN_NODE 4
#define PEM_MAX_DOM_IN_NODE 10
diff --git a/drivers/pci/controller/pci-v3-semi.c b/drivers/pci/controller/pci-v3-semi.c
index d219404..9a86bb7 100644
--- a/drivers/pci/controller/pci-v3-semi.c
+++ b/drivers/pci/controller/pci-v3-semi.c
@@ -743,7 +743,7 @@
int ret;
LIST_HEAD(res);
- host = pci_alloc_host_bridge(sizeof(*v3));
+ host = devm_pci_alloc_host_bridge(dev, sizeof(*v3));
if (!host)
return -ENOMEM;
diff --git a/drivers/pci/controller/pci-xgene-msi.c b/drivers/pci/controller/pci-xgene-msi.c
index f4c02da..0bfa506 100644
--- a/drivers/pci/controller/pci-xgene-msi.c
+++ b/drivers/pci/controller/pci-xgene-msi.c
@@ -384,13 +384,9 @@
if (!msi_group->gic_irq)
continue;
- irq_set_chained_handler(msi_group->gic_irq,
- xgene_msi_isr);
- err = irq_set_handler_data(msi_group->gic_irq, msi_group);
- if (err) {
- pr_err("failed to register GIC IRQ handler\n");
- return -EINVAL;
- }
+ irq_set_chained_handler_and_data(msi_group->gic_irq,
+ xgene_msi_isr, msi_group);
+
/*
* Statically allocate MSI GIC IRQs to each CPU core.
* With 8-core X-Gene v1, 2 MSI GIC IRQs are allocated
diff --git a/drivers/pci/controller/pcie-cadence-host.c b/drivers/pci/controller/pcie-cadence-host.c
index 97e2510..0dfc778 100644
--- a/drivers/pci/controller/pcie-cadence-host.c
+++ b/drivers/pci/controller/pcie-cadence-host.c
@@ -102,6 +102,7 @@
{
struct cdns_pcie *pcie = &rc->pcie;
u32 value, ctrl;
+ u32 id;
/*
* Set the root complex BAR configuration register:
@@ -121,8 +122,12 @@
cdns_pcie_writel(pcie, CDNS_PCIE_LM_RC_BAR_CFG, value);
/* Set root port configuration space */
- if (rc->vendor_id != 0xffff)
- cdns_pcie_rp_writew(pcie, PCI_VENDOR_ID, rc->vendor_id);
+ if (rc->vendor_id != 0xffff) {
+ id = CDNS_PCIE_LM_ID_VENDOR(rc->vendor_id) |
+ CDNS_PCIE_LM_ID_SUBSYS(rc->vendor_id);
+ cdns_pcie_writel(pcie, CDNS_PCIE_LM_ID, id);
+ }
+
if (rc->device_id != 0xffff)
cdns_pcie_rp_writew(pcie, PCI_DEVICE_ID, rc->device_id);
diff --git a/drivers/pci/controller/pcie-iproc-msi.c b/drivers/pci/controller/pcie-iproc-msi.c
index 0a3f61b..0cb2fa1 100644
--- a/drivers/pci/controller/pcie-iproc-msi.c
+++ b/drivers/pci/controller/pcie-iproc-msi.c
@@ -171,7 +171,7 @@
static struct msi_domain_info iproc_msi_domain_info = {
.flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
- MSI_FLAG_MULTI_PCI_MSI | MSI_FLAG_PCI_MSIX,
+ MSI_FLAG_PCI_MSIX,
.chip = &iproc_msi_irq_chip,
};
@@ -209,15 +209,20 @@
struct iproc_msi *msi = irq_data_get_irq_chip_data(data);
int target_cpu = cpumask_first(mask);
int curr_cpu;
+ int ret;
curr_cpu = hwirq_to_cpu(msi, data->hwirq);
if (curr_cpu == target_cpu)
- return IRQ_SET_MASK_OK_DONE;
+ ret = IRQ_SET_MASK_OK_DONE;
+ else {
+ /* steer MSI to the target CPU */
+ data->hwirq = hwirq_to_canonical_hwirq(msi, data->hwirq) + target_cpu;
+ ret = IRQ_SET_MASK_OK;
+ }
- /* steer MSI to the target CPU */
- data->hwirq = hwirq_to_canonical_hwirq(msi, data->hwirq) + target_cpu;
+ irq_data_update_effective_affinity(data, cpumask_of(target_cpu));
- return IRQ_SET_MASK_OK;
+ return ret;
}
static void iproc_msi_irq_compose_msi_msg(struct irq_data *data,
@@ -245,20 +250,23 @@
struct iproc_msi *msi = domain->host_data;
int hwirq, i;
+ if (msi->nr_cpus > 1 && nr_irqs > 1)
+ return -EINVAL;
+
mutex_lock(&msi->bitmap_lock);
- /* Allocate 'nr_cpus' number of MSI vectors each time */
- hwirq = bitmap_find_next_zero_area(msi->bitmap, msi->nr_msi_vecs, 0,
- msi->nr_cpus, 0);
- if (hwirq < msi->nr_msi_vecs) {
- bitmap_set(msi->bitmap, hwirq, msi->nr_cpus);
- } else {
- mutex_unlock(&msi->bitmap_lock);
- return -ENOSPC;
- }
+ /*
+ * Allocate 'nr_irqs' multiplied by 'nr_cpus' number of MSI vectors
+ * each time
+ */
+ hwirq = bitmap_find_free_region(msi->bitmap, msi->nr_msi_vecs,
+ order_base_2(msi->nr_cpus * nr_irqs));
mutex_unlock(&msi->bitmap_lock);
+ if (hwirq < 0)
+ return -ENOSPC;
+
for (i = 0; i < nr_irqs; i++) {
irq_domain_set_info(domain, virq + i, hwirq + i,
&iproc_msi_bottom_irq_chip,
@@ -266,7 +274,7 @@
NULL, NULL);
}
- return hwirq;
+ return 0;
}
static void iproc_msi_irq_domain_free(struct irq_domain *domain,
@@ -279,7 +287,8 @@
mutex_lock(&msi->bitmap_lock);
hwirq = hwirq_to_canonical_hwirq(msi, data->hwirq);
- bitmap_clear(msi->bitmap, hwirq, msi->nr_cpus);
+ bitmap_release_region(msi->bitmap, hwirq,
+ order_base_2(msi->nr_cpus * nr_irqs));
mutex_unlock(&msi->bitmap_lock);
@@ -533,6 +542,9 @@
mutex_init(&msi->bitmap_lock);
msi->nr_cpus = num_possible_cpus();
+ if (msi->nr_cpus == 1)
+ iproc_msi_domain_info.flags |= MSI_FLAG_MULTI_PCI_MSI;
+
msi->nr_irqs = of_irq_count(node);
if (!msi->nr_irqs) {
dev_err(pcie->dev, "found no MSI GIC interrupt\n");
diff --git a/drivers/pci/controller/pcie-iproc.c b/drivers/pci/controller/pcie-iproc.c
index 2d457bf..c6b1c18 100644
--- a/drivers/pci/controller/pcie-iproc.c
+++ b/drivers/pci/controller/pcie-iproc.c
@@ -307,7 +307,7 @@
};
/* iProc PCIe PAXB BCMA registers */
-static const u16 iproc_pcie_reg_paxb_bcma[] = {
+static const u16 iproc_pcie_reg_paxb_bcma[IPROC_PCIE_MAX_NUM_REG] = {
[IPROC_PCIE_CLK_CTRL] = 0x000,
[IPROC_PCIE_CFG_IND_ADDR] = 0x120,
[IPROC_PCIE_CFG_IND_DATA] = 0x124,
@@ -318,7 +318,7 @@
};
/* iProc PCIe PAXB registers */
-static const u16 iproc_pcie_reg_paxb[] = {
+static const u16 iproc_pcie_reg_paxb[IPROC_PCIE_MAX_NUM_REG] = {
[IPROC_PCIE_CLK_CTRL] = 0x000,
[IPROC_PCIE_CFG_IND_ADDR] = 0x120,
[IPROC_PCIE_CFG_IND_DATA] = 0x124,
@@ -334,7 +334,7 @@
};
/* iProc PCIe PAXB v2 registers */
-static const u16 iproc_pcie_reg_paxb_v2[] = {
+static const u16 iproc_pcie_reg_paxb_v2[IPROC_PCIE_MAX_NUM_REG] = {
[IPROC_PCIE_CLK_CTRL] = 0x000,
[IPROC_PCIE_CFG_IND_ADDR] = 0x120,
[IPROC_PCIE_CFG_IND_DATA] = 0x124,
@@ -363,7 +363,7 @@
};
/* iProc PCIe PAXC v1 registers */
-static const u16 iproc_pcie_reg_paxc[] = {
+static const u16 iproc_pcie_reg_paxc[IPROC_PCIE_MAX_NUM_REG] = {
[IPROC_PCIE_CLK_CTRL] = 0x000,
[IPROC_PCIE_CFG_IND_ADDR] = 0x1f0,
[IPROC_PCIE_CFG_IND_DATA] = 0x1f4,
@@ -372,7 +372,7 @@
};
/* iProc PCIe PAXC v2 registers */
-static const u16 iproc_pcie_reg_paxc_v2[] = {
+static const u16 iproc_pcie_reg_paxc_v2[IPROC_PCIE_MAX_NUM_REG] = {
[IPROC_PCIE_MSI_GIC_MODE] = 0x050,
[IPROC_PCIE_MSI_BASE_ADDR] = 0x074,
[IPROC_PCIE_MSI_WINDOW_SIZE] = 0x078,
@@ -1608,6 +1608,30 @@
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_BROADCOM, 0xd804,
quirk_paxc_disable_msi_parsing);
+static void quirk_paxc_bridge(struct pci_dev *pdev)
+{
+ /*
+ * The PCI config space is shared with the PAXC root port and the first
+ * Ethernet device. So, we need to workaround this by telling the PCI
+ * code that the bridge is not an Ethernet device.
+ */
+ if (pdev->hdr_type == PCI_HEADER_TYPE_BRIDGE)
+ pdev->class = PCI_CLASS_BRIDGE_PCI << 8;
+
+ /*
+ * MPSS is not being set properly (as it is currently 0). This is
+ * because that area of the PCI config space is hard coded to zero, and
+ * is not modifiable by firmware. Set this to 2 (e.g., 512 byte MPS)
+ * so that the MPS can be set to the real max value.
+ */
+ pdev->pcie_mpss = 2;
+}
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_BROADCOM, 0x16cd, quirk_paxc_bridge);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_BROADCOM, 0x16f0, quirk_paxc_bridge);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_BROADCOM, 0xd750, quirk_paxc_bridge);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_BROADCOM, 0xd802, quirk_paxc_bridge);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_BROADCOM, 0xd804, quirk_paxc_bridge);
+
MODULE_AUTHOR("Ray Jui <rjui@broadcom.com>");
MODULE_DESCRIPTION("Broadcom iPROC PCIe common driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/controller/pcie-mediatek.c b/drivers/pci/controller/pcie-mediatek.c
index 626a7c3..728a596 100644
--- a/drivers/pci/controller/pcie-mediatek.c
+++ b/drivers/pci/controller/pcie-mediatek.c
@@ -1063,14 +1063,14 @@
err = of_pci_get_devfn(child);
if (err < 0) {
dev_err(dev, "failed to parse devfn: %d\n", err);
- return err;
+ goto error_put_node;
}
slot = PCI_SLOT(err);
err = mtk_pcie_parse_port(pcie, child, slot);
if (err)
- return err;
+ goto error_put_node;
}
err = mtk_pcie_subsys_powerup(pcie);
@@ -1086,6 +1086,9 @@
mtk_pcie_subsys_powerdown(pcie);
return 0;
+error_put_node:
+ of_node_put(child);
+ return err;
}
static int mtk_pcie_probe(struct platform_device *pdev)
diff --git a/drivers/pci/controller/pcie-mobiveil.c b/drivers/pci/controller/pcie-mobiveil.c
index a45a644..32f37d0 100644
--- a/drivers/pci/controller/pcie-mobiveil.c
+++ b/drivers/pci/controller/pcie-mobiveil.c
@@ -235,7 +235,7 @@
return PCIBIOS_SUCCESSFUL;
}
-static u32 csr_read(struct mobiveil_pcie *pcie, u32 off, size_t size)
+static u32 mobiveil_csr_read(struct mobiveil_pcie *pcie, u32 off, size_t size)
{
void *addr;
u32 val;
@@ -250,7 +250,8 @@
return val;
}
-static void csr_write(struct mobiveil_pcie *pcie, u32 val, u32 off, size_t size)
+static void mobiveil_csr_write(struct mobiveil_pcie *pcie, u32 val, u32 off,
+ size_t size)
{
void *addr;
int ret;
@@ -262,19 +263,19 @@
dev_err(&pcie->pdev->dev, "write CSR address failed\n");
}
-static u32 csr_readl(struct mobiveil_pcie *pcie, u32 off)
+static u32 mobiveil_csr_readl(struct mobiveil_pcie *pcie, u32 off)
{
- return csr_read(pcie, off, 0x4);
+ return mobiveil_csr_read(pcie, off, 0x4);
}
-static void csr_writel(struct mobiveil_pcie *pcie, u32 val, u32 off)
+static void mobiveil_csr_writel(struct mobiveil_pcie *pcie, u32 val, u32 off)
{
- csr_write(pcie, val, off, 0x4);
+ mobiveil_csr_write(pcie, val, off, 0x4);
}
static bool mobiveil_pcie_link_up(struct mobiveil_pcie *pcie)
{
- return (csr_readl(pcie, LTSSM_STATUS) &
+ return (mobiveil_csr_readl(pcie, LTSSM_STATUS) &
LTSSM_STATUS_L0_MASK) == LTSSM_STATUS_L0;
}
@@ -323,7 +324,7 @@
PCI_SLOT(devfn) << PAB_DEVICE_SHIFT |
PCI_FUNC(devfn) << PAB_FUNCTION_SHIFT;
- csr_writel(pcie, value, PAB_AXI_AMAP_PEX_WIN_L(WIN_NUM_0));
+ mobiveil_csr_writel(pcie, value, PAB_AXI_AMAP_PEX_WIN_L(WIN_NUM_0));
return pcie->config_axi_slave_base + where;
}
@@ -353,13 +354,14 @@
chained_irq_enter(chip, desc);
/* read INTx status */
- val = csr_readl(pcie, PAB_INTP_AMBA_MISC_STAT);
- mask = csr_readl(pcie, PAB_INTP_AMBA_MISC_ENB);
+ val = mobiveil_csr_readl(pcie, PAB_INTP_AMBA_MISC_STAT);
+ mask = mobiveil_csr_readl(pcie, PAB_INTP_AMBA_MISC_ENB);
intr_status = val & mask;
/* Handle INTx */
if (intr_status & PAB_INTP_INTX_MASK) {
- shifted_status = csr_readl(pcie, PAB_INTP_AMBA_MISC_STAT);
+ shifted_status = mobiveil_csr_readl(pcie,
+ PAB_INTP_AMBA_MISC_STAT);
shifted_status &= PAB_INTP_INTX_MASK;
shifted_status >>= PAB_INTX_START;
do {
@@ -373,12 +375,13 @@
bit);
/* clear interrupt handled */
- csr_writel(pcie, 1 << (PAB_INTX_START + bit),
- PAB_INTP_AMBA_MISC_STAT);
+ mobiveil_csr_writel(pcie,
+ 1 << (PAB_INTX_START + bit),
+ PAB_INTP_AMBA_MISC_STAT);
}
- shifted_status = csr_readl(pcie,
- PAB_INTP_AMBA_MISC_STAT);
+ shifted_status = mobiveil_csr_readl(pcie,
+ PAB_INTP_AMBA_MISC_STAT);
shifted_status &= PAB_INTP_INTX_MASK;
shifted_status >>= PAB_INTX_START;
} while (shifted_status != 0);
@@ -413,7 +416,7 @@
}
/* Clear the interrupt status */
- csr_writel(pcie, intr_status, PAB_INTP_AMBA_MISC_STAT);
+ mobiveil_csr_writel(pcie, intr_status, PAB_INTP_AMBA_MISC_STAT);
chained_irq_exit(chip, desc);
}
@@ -474,24 +477,24 @@
return;
}
- value = csr_readl(pcie, PAB_PEX_AMAP_CTRL(win_num));
+ value = mobiveil_csr_readl(pcie, PAB_PEX_AMAP_CTRL(win_num));
value &= ~(AMAP_CTRL_TYPE_MASK << AMAP_CTRL_TYPE_SHIFT | WIN_SIZE_MASK);
value |= type << AMAP_CTRL_TYPE_SHIFT | 1 << AMAP_CTRL_EN_SHIFT |
(lower_32_bits(size64) & WIN_SIZE_MASK);
- csr_writel(pcie, value, PAB_PEX_AMAP_CTRL(win_num));
+ mobiveil_csr_writel(pcie, value, PAB_PEX_AMAP_CTRL(win_num));
- csr_writel(pcie, upper_32_bits(size64),
- PAB_EXT_PEX_AMAP_SIZEN(win_num));
+ mobiveil_csr_writel(pcie, upper_32_bits(size64),
+ PAB_EXT_PEX_AMAP_SIZEN(win_num));
- csr_writel(pcie, lower_32_bits(cpu_addr),
- PAB_PEX_AMAP_AXI_WIN(win_num));
- csr_writel(pcie, upper_32_bits(cpu_addr),
- PAB_EXT_PEX_AMAP_AXI_WIN(win_num));
+ mobiveil_csr_writel(pcie, lower_32_bits(cpu_addr),
+ PAB_PEX_AMAP_AXI_WIN(win_num));
+ mobiveil_csr_writel(pcie, upper_32_bits(cpu_addr),
+ PAB_EXT_PEX_AMAP_AXI_WIN(win_num));
- csr_writel(pcie, lower_32_bits(pci_addr),
- PAB_PEX_AMAP_PEX_WIN_L(win_num));
- csr_writel(pcie, upper_32_bits(pci_addr),
- PAB_PEX_AMAP_PEX_WIN_H(win_num));
+ mobiveil_csr_writel(pcie, lower_32_bits(pci_addr),
+ PAB_PEX_AMAP_PEX_WIN_L(win_num));
+ mobiveil_csr_writel(pcie, upper_32_bits(pci_addr),
+ PAB_PEX_AMAP_PEX_WIN_H(win_num));
pcie->ib_wins_configured++;
}
@@ -515,27 +518,29 @@
* program Enable Bit to 1, Type Bit to (00) base 2, AXI Window Size Bit
* to 4 KB in PAB_AXI_AMAP_CTRL register
*/
- value = csr_readl(pcie, PAB_AXI_AMAP_CTRL(win_num));
+ value = mobiveil_csr_readl(pcie, PAB_AXI_AMAP_CTRL(win_num));
value &= ~(WIN_TYPE_MASK << WIN_TYPE_SHIFT | WIN_SIZE_MASK);
value |= 1 << WIN_ENABLE_SHIFT | type << WIN_TYPE_SHIFT |
(lower_32_bits(size64) & WIN_SIZE_MASK);
- csr_writel(pcie, value, PAB_AXI_AMAP_CTRL(win_num));
+ mobiveil_csr_writel(pcie, value, PAB_AXI_AMAP_CTRL(win_num));
- csr_writel(pcie, upper_32_bits(size64), PAB_EXT_AXI_AMAP_SIZE(win_num));
+ mobiveil_csr_writel(pcie, upper_32_bits(size64),
+ PAB_EXT_AXI_AMAP_SIZE(win_num));
/*
* program AXI window base with appropriate value in
* PAB_AXI_AMAP_AXI_WIN0 register
*/
- csr_writel(pcie, lower_32_bits(cpu_addr) & (~AXI_WINDOW_ALIGN_MASK),
- PAB_AXI_AMAP_AXI_WIN(win_num));
- csr_writel(pcie, upper_32_bits(cpu_addr),
- PAB_EXT_AXI_AMAP_AXI_WIN(win_num));
+ mobiveil_csr_writel(pcie,
+ lower_32_bits(cpu_addr) & (~AXI_WINDOW_ALIGN_MASK),
+ PAB_AXI_AMAP_AXI_WIN(win_num));
+ mobiveil_csr_writel(pcie, upper_32_bits(cpu_addr),
+ PAB_EXT_AXI_AMAP_AXI_WIN(win_num));
- csr_writel(pcie, lower_32_bits(pci_addr),
- PAB_AXI_AMAP_PEX_WIN_L(win_num));
- csr_writel(pcie, upper_32_bits(pci_addr),
- PAB_AXI_AMAP_PEX_WIN_H(win_num));
+ mobiveil_csr_writel(pcie, lower_32_bits(pci_addr),
+ PAB_AXI_AMAP_PEX_WIN_L(win_num));
+ mobiveil_csr_writel(pcie, upper_32_bits(pci_addr),
+ PAB_AXI_AMAP_PEX_WIN_H(win_num));
pcie->ob_wins_configured++;
}
@@ -579,42 +584,42 @@
struct resource_entry *win;
/* setup bus numbers */
- value = csr_readl(pcie, PCI_PRIMARY_BUS);
+ value = mobiveil_csr_readl(pcie, PCI_PRIMARY_BUS);
value &= 0xff000000;
value |= 0x00ff0100;
- csr_writel(pcie, value, PCI_PRIMARY_BUS);
+ mobiveil_csr_writel(pcie, value, PCI_PRIMARY_BUS);
/*
* program Bus Master Enable Bit in Command Register in PAB Config
* Space
*/
- value = csr_readl(pcie, PCI_COMMAND);
+ value = mobiveil_csr_readl(pcie, PCI_COMMAND);
value |= PCI_COMMAND_IO | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER;
- csr_writel(pcie, value, PCI_COMMAND);
+ mobiveil_csr_writel(pcie, value, PCI_COMMAND);
/*
* program PIO Enable Bit to 1 (and PEX PIO Enable to 1) in PAB_CTRL
* register
*/
- pab_ctrl = csr_readl(pcie, PAB_CTRL);
+ pab_ctrl = mobiveil_csr_readl(pcie, PAB_CTRL);
pab_ctrl |= (1 << AMBA_PIO_ENABLE_SHIFT) | (1 << PEX_PIO_ENABLE_SHIFT);
- csr_writel(pcie, pab_ctrl, PAB_CTRL);
+ mobiveil_csr_writel(pcie, pab_ctrl, PAB_CTRL);
- csr_writel(pcie, (PAB_INTP_INTX_MASK | PAB_INTP_MSI_MASK),
- PAB_INTP_AMBA_MISC_ENB);
+ mobiveil_csr_writel(pcie, (PAB_INTP_INTX_MASK | PAB_INTP_MSI_MASK),
+ PAB_INTP_AMBA_MISC_ENB);
/*
* program PIO Enable Bit to 1 and Config Window Enable Bit to 1 in
* PAB_AXI_PIO_CTRL Register
*/
- value = csr_readl(pcie, PAB_AXI_PIO_CTRL);
+ value = mobiveil_csr_readl(pcie, PAB_AXI_PIO_CTRL);
value |= APIO_EN_MASK;
- csr_writel(pcie, value, PAB_AXI_PIO_CTRL);
+ mobiveil_csr_writel(pcie, value, PAB_AXI_PIO_CTRL);
/* Enable PCIe PIO master */
- value = csr_readl(pcie, PAB_PEX_PIO_CTRL);
+ value = mobiveil_csr_readl(pcie, PAB_PEX_PIO_CTRL);
value |= 1 << PIO_ENABLE_SHIFT;
- csr_writel(pcie, value, PAB_PEX_PIO_CTRL);
+ mobiveil_csr_writel(pcie, value, PAB_PEX_PIO_CTRL);
/*
* we'll program one outbound window for config reads and
@@ -647,10 +652,10 @@
}
/* fixup for PCIe class register */
- value = csr_readl(pcie, PAB_INTP_AXI_PIO_CLASS);
+ value = mobiveil_csr_readl(pcie, PAB_INTP_AXI_PIO_CLASS);
value &= 0xff;
value |= (PCI_CLASS_BRIDGE_PCI << 16);
- csr_writel(pcie, value, PAB_INTP_AXI_PIO_CLASS);
+ mobiveil_csr_writel(pcie, value, PAB_INTP_AXI_PIO_CLASS);
/* setup MSI hardware registers */
mobiveil_pcie_enable_msi(pcie);
@@ -668,9 +673,9 @@
pcie = irq_desc_get_chip_data(desc);
mask = 1 << ((data->hwirq + PAB_INTX_START) - 1);
raw_spin_lock_irqsave(&pcie->intx_mask_lock, flags);
- shifted_val = csr_readl(pcie, PAB_INTP_AMBA_MISC_ENB);
+ shifted_val = mobiveil_csr_readl(pcie, PAB_INTP_AMBA_MISC_ENB);
shifted_val &= ~mask;
- csr_writel(pcie, shifted_val, PAB_INTP_AMBA_MISC_ENB);
+ mobiveil_csr_writel(pcie, shifted_val, PAB_INTP_AMBA_MISC_ENB);
raw_spin_unlock_irqrestore(&pcie->intx_mask_lock, flags);
}
@@ -684,9 +689,9 @@
pcie = irq_desc_get_chip_data(desc);
mask = 1 << ((data->hwirq + PAB_INTX_START) - 1);
raw_spin_lock_irqsave(&pcie->intx_mask_lock, flags);
- shifted_val = csr_readl(pcie, PAB_INTP_AMBA_MISC_ENB);
+ shifted_val = mobiveil_csr_readl(pcie, PAB_INTP_AMBA_MISC_ENB);
shifted_val |= mask;
- csr_writel(pcie, shifted_val, PAB_INTP_AMBA_MISC_ENB);
+ mobiveil_csr_writel(pcie, shifted_val, PAB_INTP_AMBA_MISC_ENB);
raw_spin_unlock_irqrestore(&pcie->intx_mask_lock, flags);
}
diff --git a/drivers/pci/controller/pcie-rcar.c b/drivers/pci/controller/pcie-rcar.c
index f6a669a..0411435 100644
--- a/drivers/pci/controller/pcie-rcar.c
+++ b/drivers/pci/controller/pcie-rcar.c
@@ -93,8 +93,11 @@
#define LINK_SPEED_2_5GTS (1 << 16)
#define LINK_SPEED_5_0GTS (2 << 16)
#define MACCTLR 0x011058
+#define MACCTLR_NFTS_MASK GENMASK(23, 16) /* The name is from SH7786 */
#define SPEED_CHANGE BIT(24)
#define SCRAMBLE_DISABLE BIT(27)
+#define LTSMDIS BIT(31)
+#define MACCTLR_INIT_VAL (LTSMDIS | MACCTLR_NFTS_MASK)
#define PMSR 0x01105c
#define MACS2R 0x011078
#define MACCGSPSETR 0x011084
@@ -332,11 +335,12 @@
};
static void rcar_pcie_setup_window(int win, struct rcar_pcie *pcie,
- struct resource *res)
+ struct resource_entry *window)
{
/* Setup PCIe address space mappings for each resource */
resource_size_t size;
resource_size_t res_start;
+ struct resource *res = window->res;
u32 mask;
rcar_pci_write_reg(pcie, 0x00000000, PCIEPTCTLR(win));
@@ -350,9 +354,9 @@
rcar_pci_write_reg(pcie, mask << 7, PCIEPAMR(win));
if (res->flags & IORESOURCE_IO)
- res_start = pci_pio_to_address(res->start);
+ res_start = pci_pio_to_address(res->start) - window->offset;
else
- res_start = res->start;
+ res_start = res->start - window->offset;
rcar_pci_write_reg(pcie, upper_32_bits(res_start), PCIEPAUR(win));
rcar_pci_write_reg(pcie, lower_32_bits(res_start) & ~0x7F,
@@ -381,7 +385,7 @@
switch (resource_type(res)) {
case IORESOURCE_IO:
case IORESOURCE_MEM:
- rcar_pcie_setup_window(i, pci, res);
+ rcar_pcie_setup_window(i, pci, win);
i++;
break;
case IORESOURCE_BUS:
@@ -615,6 +619,8 @@
if (IS_ENABLED(CONFIG_PCI_MSI))
rcar_pci_write_reg(pcie, 0x801f0000, PCIEMSITXR);
+ rcar_pci_write_reg(pcie, MACCTLR_INIT_VAL, MACCTLR);
+
/* Finish initialization - establish a PCI Express link */
rcar_pci_write_reg(pcie, CFINIT, PCIETCTLR);
@@ -1237,6 +1243,7 @@
return 0;
/* Re-establish the PCIe link */
+ rcar_pci_write_reg(pcie, MACCTLR_INIT_VAL, MACCTLR);
rcar_pci_write_reg(pcie, CFINIT, PCIETCTLR);
return rcar_pcie_wait_for_dl(pcie);
}
diff --git a/drivers/pci/controller/pcie-xilinx-nwl.c b/drivers/pci/controller/pcie-xilinx-nwl.c
index 45c0f34..11b046b 100644
--- a/drivers/pci/controller/pcie-xilinx-nwl.c
+++ b/drivers/pci/controller/pcie-xilinx-nwl.c
@@ -6,6 +6,7 @@
* (C) Copyright 2014 - 2015, Xilinx, Inc.
*/
+#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
@@ -169,6 +170,7 @@
u8 root_busno;
struct nwl_msi msi;
struct irq_domain *legacy_irq_domain;
+ struct clk *clk;
raw_spinlock_t leg_mask_lock;
};
@@ -839,6 +841,16 @@
return err;
}
+ pcie->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(pcie->clk))
+ return PTR_ERR(pcie->clk);
+
+ err = clk_prepare_enable(pcie->clk);
+ if (err) {
+ dev_err(dev, "can't enable PCIe ref clock\n");
+ return err;
+ }
+
err = nwl_pcie_bridge_init(pcie);
if (err) {
dev_err(dev, "HW Initialization failed\n");
diff --git a/drivers/pci/controller/vmd.c b/drivers/pci/controller/vmd.c
index a35d3f3..9966dcf 100644
--- a/drivers/pci/controller/vmd.c
+++ b/drivers/pci/controller/vmd.c
@@ -593,9 +593,11 @@
if (!membar2)
return -ENOMEM;
offset[0] = vmd->dev->resource[VMD_MEMBAR1].start -
- readq(membar2 + MB2_SHADOW_OFFSET);
+ (readq(membar2 + MB2_SHADOW_OFFSET) &
+ PCI_BASE_ADDRESS_MEM_MASK);
offset[1] = vmd->dev->resource[VMD_MEMBAR2].start -
- readq(membar2 + MB2_SHADOW_OFFSET + 8);
+ (readq(membar2 + MB2_SHADOW_OFFSET + 8) &
+ PCI_BASE_ADDRESS_MEM_MASK);
pci_iounmap(vmd->dev, membar2);
}
}
@@ -678,9 +680,10 @@
vmd->irq_domain = pci_msi_create_irq_domain(fn, &vmd_msi_domain_info,
x86_vector_domain);
- irq_domain_free_fwnode(fn);
- if (!vmd->irq_domain)
+ if (!vmd->irq_domain) {
+ irq_domain_free_fwnode(fn);
return -ENODEV;
+ }
pci_add_resource(&resources, &vmd->resources[0]);
pci_add_resource_offset(&resources, &vmd->resources[1], offset[0]);
@@ -691,6 +694,7 @@
if (!vmd->bus) {
pci_free_resource_list(&resources);
irq_domain_remove(vmd->irq_domain);
+ irq_domain_free_fwnode(fn);
return -ENODEV;
}
@@ -805,6 +809,7 @@
static void vmd_remove(struct pci_dev *dev)
{
struct vmd_dev *vmd = pci_get_drvdata(dev);
+ struct fwnode_handle *fn = vmd->irq_domain->fwnode;
sysfs_remove_link(&vmd->dev->dev.kobj, "domain");
pci_stop_root_bus(vmd->bus);
@@ -813,6 +818,7 @@
vmd_teardown_dma_ops(vmd);
vmd_detach_resources(vmd);
irq_domain_remove(vmd->irq_domain);
+ irq_domain_free_fwnode(fn);
}
#ifdef CONFIG_PM_SLEEP
@@ -854,6 +860,8 @@
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_VMD_28C0),
.driver_data = VMD_FEAT_HAS_MEMBAR_SHADOW |
VMD_FEAT_HAS_BUS_RESTRICTIONS,},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_VMD_9A0B),
+ .driver_data = VMD_FEAT_HAS_BUS_RESTRICTIONS,},
{0,}
};
MODULE_DEVICE_TABLE(pci, vmd_ids);
diff --git a/drivers/pci/endpoint/functions/pci-epf-test.c b/drivers/pci/endpoint/functions/pci-epf-test.c
index 1cfe368..6dcee39 100644
--- a/drivers/pci/endpoint/functions/pci-epf-test.c
+++ b/drivers/pci/endpoint/functions/pci-epf-test.c
@@ -604,6 +604,7 @@
ret = pci_epf_register_driver(&test_driver);
if (ret) {
+ destroy_workqueue(kpcitest_workqueue);
pr_err("Failed to register pci epf test driver --> %d\n", ret);
return ret;
}
@@ -614,6 +615,8 @@
static void __exit pci_epf_test_exit(void)
{
+ if (kpcitest_workqueue)
+ destroy_workqueue(kpcitest_workqueue);
pci_epf_unregister_driver(&test_driver);
}
module_exit(pci_epf_test_exit);
diff --git a/drivers/pci/endpoint/pci-epc-mem.c b/drivers/pci/endpoint/pci-epc-mem.c
index 2bf8bd1..0471643 100644
--- a/drivers/pci/endpoint/pci-epc-mem.c
+++ b/drivers/pci/endpoint/pci-epc-mem.c
@@ -79,6 +79,7 @@
mem->page_size = page_size;
mem->pages = pages;
mem->size = size;
+ mutex_init(&mem->lock);
epc->mem = mem;
@@ -122,7 +123,7 @@
phys_addr_t *phys_addr, size_t size)
{
int pageno;
- void __iomem *virt_addr;
+ void __iomem *virt_addr = NULL;
struct pci_epc_mem *mem = epc->mem;
unsigned int page_shift = ilog2(mem->page_size);
int order;
@@ -130,15 +131,18 @@
size = ALIGN(size, mem->page_size);
order = pci_epc_mem_get_order(mem, size);
+ mutex_lock(&mem->lock);
pageno = bitmap_find_free_region(mem->bitmap, mem->pages, order);
if (pageno < 0)
- return NULL;
+ goto ret;
*phys_addr = mem->phys_base + (pageno << page_shift);
virt_addr = ioremap(*phys_addr, size);
if (!virt_addr)
bitmap_release_region(mem->bitmap, pageno, order);
+ret:
+ mutex_unlock(&mem->lock);
return virt_addr;
}
EXPORT_SYMBOL_GPL(pci_epc_mem_alloc_addr);
@@ -164,7 +168,9 @@
pageno = (phys_addr - mem->phys_base) >> page_shift;
size = ALIGN(size, mem->page_size);
order = pci_epc_mem_get_order(mem, size);
+ mutex_lock(&mem->lock);
bitmap_release_region(mem->bitmap, pageno, order);
+ mutex_unlock(&mem->lock);
}
EXPORT_SYMBOL_GPL(pci_epc_mem_free_addr);
diff --git a/drivers/pci/hotplug/TODO b/drivers/pci/hotplug/TODO
index a32070b..cc6194a 100644
--- a/drivers/pci/hotplug/TODO
+++ b/drivers/pci/hotplug/TODO
@@ -40,9 +40,6 @@
* The return value of pci_hp_register() is not checked.
-* iounmap(io_mem) is called in the error path of ebda_rsrc_controller()
- and once more in the error path of its caller ibmphp_access_ebda().
-
* The various slot data structures are difficult to follow and need to be
simplified. A lot of functions are too large and too complex, they need
to be broken up into smaller, manageable pieces. Negative examples are
diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c
index e4c4663..98be06a 100644
--- a/drivers/pci/hotplug/acpiphp_glue.c
+++ b/drivers/pci/hotplug/acpiphp_glue.c
@@ -122,13 +122,21 @@
struct acpiphp_context *context;
acpi_lock_hp_context();
+
context = acpiphp_get_context(adev);
- if (!context || context->func.parent->is_going_away) {
- acpi_unlock_hp_context();
- return NULL;
+ if (!context)
+ goto unlock;
+
+ if (context->func.parent->is_going_away) {
+ acpiphp_put_context(context);
+ context = NULL;
+ goto unlock;
}
+
get_bridge(context->func.parent);
acpiphp_put_context(context);
+
+unlock:
acpi_unlock_hp_context();
return context;
}
@@ -449,8 +457,15 @@
/* Scan non-hotplug bridges that need to be reconfigured */
for_each_pci_bridge(dev, bus) {
- if (!hotplug_is_native(dev))
- max = pci_scan_bridge(bus, dev, max, 1);
+ if (hotplug_is_native(dev))
+ continue;
+
+ max = pci_scan_bridge(bus, dev, max, 1);
+ if (dev->subordinate) {
+ pcibios_resource_survey_bus(dev->subordinate);
+ pci_bus_size_bridges(dev->subordinate);
+ pci_bus_assign_resources(dev->subordinate);
+ }
}
}
@@ -480,7 +495,6 @@
if (PCI_SLOT(dev->devfn) == slot->device)
acpiphp_native_scan_bridge(dev);
}
- pci_assign_unassigned_bridge_resources(bus->self);
} else {
LIST_HEAD(add_list);
int max, pass;
@@ -526,6 +540,7 @@
slot->flags &= ~SLOT_ENABLED;
continue;
}
+ pci_dev_put(dev);
}
}
diff --git a/drivers/pci/hotplug/ibmphp_ebda.c b/drivers/pci/hotplug/ibmphp_ebda.c
index 11a2661..7fb7540 100644
--- a/drivers/pci/hotplug/ibmphp_ebda.c
+++ b/drivers/pci/hotplug/ibmphp_ebda.c
@@ -714,8 +714,7 @@
/* init hpc structure */
hpc_ptr = alloc_ebda_hpc(slot_num, bus_num);
if (!hpc_ptr) {
- rc = -ENOMEM;
- goto error_no_hpc;
+ return -ENOMEM;
}
hpc_ptr->ctlr_id = ctlr_id;
hpc_ptr->ctlr_relative_id = ctlr;
@@ -910,8 +909,6 @@
kfree(tmp_slot);
error_no_slot:
free_ebda_hpc(hpc_ptr);
-error_no_hpc:
- iounmap(io_mem);
return rc;
}
diff --git a/drivers/pci/hotplug/pciehp.h b/drivers/pci/hotplug/pciehp.h
index 654c972..aa61d4c 100644
--- a/drivers/pci/hotplug/pciehp.h
+++ b/drivers/pci/hotplug/pciehp.h
@@ -72,6 +72,7 @@
* @reset_lock: prevents access to the Data Link Layer Link Active bit in the
* Link Status register and to the Presence Detect State bit in the Slot
* Status register during a slot reset which may cause them to flap
+ * @ist_running: flag to keep user request waiting while IRQ thread is running
* @request_result: result of last user request submitted to the IRQ thread
* @requester: wait queue to wake up on completion of user request,
* used for synchronous slot enable/disable request via sysfs
@@ -101,6 +102,7 @@
struct hotplug_slot hotplug_slot; /* hotplug core interface */
struct rw_semaphore reset_lock;
+ unsigned int ist_running;
int request_result;
wait_queue_head_t requester;
};
@@ -172,10 +174,10 @@
void pciehp_get_latch_status(struct controller *ctrl, u8 *status);
int pciehp_query_power_fault(struct controller *ctrl);
-bool pciehp_card_present(struct controller *ctrl);
-bool pciehp_card_present_or_link_active(struct controller *ctrl);
+int pciehp_card_present(struct controller *ctrl);
+int pciehp_card_present_or_link_active(struct controller *ctrl);
int pciehp_check_link_status(struct controller *ctrl);
-bool pciehp_check_link_active(struct controller *ctrl);
+int pciehp_check_link_active(struct controller *ctrl);
void pciehp_release_ctrl(struct controller *ctrl);
int pciehp_sysfs_enable_slot(struct hotplug_slot *hotplug_slot);
diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c
index b3122c1..312cc45 100644
--- a/drivers/pci/hotplug/pciehp_core.c
+++ b/drivers/pci/hotplug/pciehp_core.c
@@ -139,10 +139,15 @@
{
struct controller *ctrl = to_ctrl(hotplug_slot);
struct pci_dev *pdev = ctrl->pcie->port;
+ int ret;
pci_config_pm_runtime_get(pdev);
- *value = pciehp_card_present_or_link_active(ctrl);
+ ret = pciehp_card_present_or_link_active(ctrl);
pci_config_pm_runtime_put(pdev);
+ if (ret < 0)
+ return ret;
+
+ *value = ret;
return 0;
}
@@ -158,13 +163,13 @@
*/
static void pciehp_check_presence(struct controller *ctrl)
{
- bool occupied;
+ int occupied;
down_read(&ctrl->reset_lock);
mutex_lock(&ctrl->state_lock);
occupied = pciehp_card_present_or_link_active(ctrl);
- if ((occupied && (ctrl->state == OFF_STATE ||
+ if ((occupied > 0 && (ctrl->state == OFF_STATE ||
ctrl->state == BLINKINGON_STATE)) ||
(!occupied && (ctrl->state == ON_STATE ||
ctrl->state == BLINKINGOFF_STATE)))
@@ -253,7 +258,7 @@
return pcie_ports_native || host->native_pme;
}
-static int pciehp_suspend(struct pcie_device *dev)
+static void pciehp_disable_interrupt(struct pcie_device *dev)
{
/*
* Disable hotplug interrupt so that it does not trigger
@@ -261,7 +266,19 @@
*/
if (pme_is_native(dev))
pcie_disable_interrupt(get_service_data(dev));
+}
+#ifdef CONFIG_PM_SLEEP
+static int pciehp_suspend(struct pcie_device *dev)
+{
+ /*
+ * If the port is already runtime suspended we can keep it that
+ * way.
+ */
+ if (dev_pm_smart_suspend_and_suspended(&dev->port->dev))
+ return 0;
+
+ pciehp_disable_interrupt(dev);
return 0;
}
@@ -279,6 +296,7 @@
return 0;
}
+#endif
static int pciehp_resume(struct pcie_device *dev)
{
@@ -292,6 +310,12 @@
return 0;
}
+static int pciehp_runtime_suspend(struct pcie_device *dev)
+{
+ pciehp_disable_interrupt(dev);
+ return 0;
+}
+
static int pciehp_runtime_resume(struct pcie_device *dev)
{
struct controller *ctrl = get_service_data(dev);
@@ -318,10 +342,12 @@
.remove = pciehp_remove,
#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
.suspend = pciehp_suspend,
.resume_noirq = pciehp_resume_noirq,
.resume = pciehp_resume,
- .runtime_suspend = pciehp_suspend,
+#endif
+ .runtime_suspend = pciehp_runtime_suspend,
.runtime_resume = pciehp_runtime_resume,
#endif /* PM */
};
diff --git a/drivers/pci/hotplug/pciehp_ctrl.c b/drivers/pci/hotplug/pciehp_ctrl.c
index 21af7b1..6503d15 100644
--- a/drivers/pci/hotplug/pciehp_ctrl.c
+++ b/drivers/pci/hotplug/pciehp_ctrl.c
@@ -226,7 +226,7 @@
void pciehp_handle_presence_or_link_change(struct controller *ctrl, u32 events)
{
- bool present, link_active;
+ int present, link_active;
/*
* If the slot is on and presence or link has changed, turn it off.
@@ -257,7 +257,7 @@
mutex_lock(&ctrl->state_lock);
present = pciehp_card_present(ctrl);
link_active = pciehp_check_link_active(ctrl);
- if (!present && !link_active) {
+ if (present <= 0 && link_active <= 0) {
mutex_unlock(&ctrl->state_lock);
return;
}
@@ -375,7 +375,8 @@
ctrl->request_result = -ENODEV;
pciehp_request(ctrl, PCI_EXP_SLTSTA_PDC);
wait_event(ctrl->requester,
- !atomic_read(&ctrl->pending_events));
+ !atomic_read(&ctrl->pending_events) &&
+ !ctrl->ist_running);
return ctrl->request_result;
case POWERON_STATE:
ctrl_info(ctrl, "Slot(%s): Already in powering on state\n",
@@ -408,7 +409,8 @@
mutex_unlock(&ctrl->state_lock);
pciehp_request(ctrl, DISABLE_SLOT);
wait_event(ctrl->requester,
- !atomic_read(&ctrl->pending_events));
+ !atomic_read(&ctrl->pending_events) &&
+ !ctrl->ist_running);
return ctrl->request_result;
case POWEROFF_STATE:
ctrl_info(ctrl, "Slot(%s): Already in powering off state\n",
diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c
index 1a522c1..88b9967 100644
--- a/drivers/pci/hotplug/pciehp_hpc.c
+++ b/drivers/pci/hotplug/pciehp_hpc.c
@@ -201,17 +201,29 @@
pcie_do_write_cmd(ctrl, cmd, mask, false);
}
-bool pciehp_check_link_active(struct controller *ctrl)
+/**
+ * pciehp_check_link_active() - Is the link active
+ * @ctrl: PCIe hotplug controller
+ *
+ * Check whether the downstream link is currently active. Note it is
+ * possible that the card is removed immediately after this so the
+ * caller may need to take it into account.
+ *
+ * If the hotplug controller itself is not available anymore returns
+ * %-ENODEV.
+ */
+int pciehp_check_link_active(struct controller *ctrl)
{
struct pci_dev *pdev = ctrl_dev(ctrl);
u16 lnk_status;
- bool ret;
+ int ret;
- pcie_capability_read_word(pdev, PCI_EXP_LNKSTA, &lnk_status);
+ ret = pcie_capability_read_word(pdev, PCI_EXP_LNKSTA, &lnk_status);
+ if (ret == PCIBIOS_DEVICE_NOT_FOUND || lnk_status == (u16)~0)
+ return -ENODEV;
+
ret = !!(lnk_status & PCI_EXP_LNKSTA_DLLLA);
-
- if (ret)
- ctrl_dbg(ctrl, "%s: lnk_status = %x\n", __func__, lnk_status);
+ ctrl_dbg(ctrl, "%s: lnk_status = %x\n", __func__, lnk_status);
return ret;
}
@@ -373,13 +385,29 @@
*status = !!(slot_status & PCI_EXP_SLTSTA_MRLSS);
}
-bool pciehp_card_present(struct controller *ctrl)
+/**
+ * pciehp_card_present() - Is the card present
+ * @ctrl: PCIe hotplug controller
+ *
+ * Function checks whether the card is currently present in the slot and
+ * in that case returns true. Note it is possible that the card is
+ * removed immediately after the check so the caller may need to take
+ * this into account.
+ *
+ * It the hotplug controller itself is not available anymore returns
+ * %-ENODEV.
+ */
+int pciehp_card_present(struct controller *ctrl)
{
struct pci_dev *pdev = ctrl_dev(ctrl);
u16 slot_status;
+ int ret;
- pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &slot_status);
- return slot_status & PCI_EXP_SLTSTA_PDS;
+ ret = pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &slot_status);
+ if (ret == PCIBIOS_DEVICE_NOT_FOUND || slot_status == (u16)~0)
+ return -ENODEV;
+
+ return !!(slot_status & PCI_EXP_SLTSTA_PDS);
}
/**
@@ -390,10 +418,19 @@
* Presence Detect State bit, this helper also returns true if the Link Active
* bit is set. This is a concession to broken hotplug ports which hardwire
* Presence Detect State to zero, such as Wilocity's [1ae9:0200].
+ *
+ * Returns: %1 if the slot is occupied and %0 if it is not. If the hotplug
+ * port is not present anymore returns %-ENODEV.
*/
-bool pciehp_card_present_or_link_active(struct controller *ctrl)
+int pciehp_card_present_or_link_active(struct controller *ctrl)
{
- return pciehp_card_present(ctrl) || pciehp_check_link_active(ctrl);
+ int ret;
+
+ ret = pciehp_card_present(ctrl);
+ if (ret)
+ return ret;
+
+ return pciehp_check_link_active(ctrl);
}
int pciehp_query_power_fault(struct controller *ctrl)
@@ -492,7 +529,7 @@
struct controller *ctrl = (struct controller *)dev_id;
struct pci_dev *pdev = ctrl_dev(ctrl);
struct device *parent = pdev->dev.parent;
- u16 status, events;
+ u16 status, events = 0;
/*
* Interrupts only occur in D3hot or shallower and only if enabled
@@ -517,6 +554,7 @@
}
}
+read_status:
pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &status);
if (status == (u16) ~0) {
ctrl_info(ctrl, "%s: no response from device\n", __func__);
@@ -529,24 +567,37 @@
* Slot Status contains plain status bits as well as event
* notification bits; right now we only want the event bits.
*/
- events = status & (PCI_EXP_SLTSTA_ABP | PCI_EXP_SLTSTA_PFD |
- PCI_EXP_SLTSTA_PDC | PCI_EXP_SLTSTA_CC |
- PCI_EXP_SLTSTA_DLLSC);
+ status &= PCI_EXP_SLTSTA_ABP | PCI_EXP_SLTSTA_PFD |
+ PCI_EXP_SLTSTA_PDC | PCI_EXP_SLTSTA_CC |
+ PCI_EXP_SLTSTA_DLLSC;
/*
* If we've already reported a power fault, don't report it again
* until we've done something to handle it.
*/
if (ctrl->power_fault_detected)
- events &= ~PCI_EXP_SLTSTA_PFD;
+ status &= ~PCI_EXP_SLTSTA_PFD;
+ events |= status;
if (!events) {
if (parent)
pm_runtime_put(parent);
return IRQ_NONE;
}
- pcie_capability_write_word(pdev, PCI_EXP_SLTSTA, events);
+ if (status) {
+ pcie_capability_write_word(pdev, PCI_EXP_SLTSTA, events);
+
+ /*
+ * In MSI mode, all event bits must be zero before the port
+ * will send a new interrupt (PCIe Base Spec r5.0 sec 6.7.3.4).
+ * So re-read the Slot Status register in case a bit was set
+ * between read and write.
+ */
+ if (pci_dev_msi_enabled(pdev) && !pciehp_poll_mode)
+ goto read_status;
+ }
+
ctrl_dbg(ctrl, "pending interrupts %#06x from Slot Status\n", events);
if (parent)
pm_runtime_put(parent);
@@ -583,23 +634,22 @@
irqreturn_t ret;
u32 events;
+ ctrl->ist_running = true;
pci_config_pm_runtime_get(pdev);
/* rerun pciehp_isr() if the port was inaccessible on interrupt */
if (atomic_fetch_and(~RERUN_ISR, &ctrl->pending_events) & RERUN_ISR) {
ret = pciehp_isr(irq, dev_id);
enable_irq(irq);
- if (ret != IRQ_WAKE_THREAD) {
- pci_config_pm_runtime_put(pdev);
- return ret;
- }
+ if (ret != IRQ_WAKE_THREAD)
+ goto out;
}
synchronize_hardirq(irq);
events = atomic_xchg(&ctrl->pending_events, 0);
if (!events) {
- pci_config_pm_runtime_put(pdev);
- return IRQ_NONE;
+ ret = IRQ_NONE;
+ goto out;
}
/* Check Attention Button Pressed */
@@ -628,9 +678,12 @@
pciehp_handle_presence_or_link_change(ctrl, events);
up_read(&ctrl->reset_lock);
+ ret = IRQ_HANDLED;
+out:
pci_config_pm_runtime_put(pdev);
+ ctrl->ist_running = false;
wake_up(&ctrl->requester);
- return IRQ_HANDLED;
+ return ret;
}
static int pciehp_poll(void *data)
diff --git a/drivers/pci/hotplug/rpadlpar_sysfs.c b/drivers/pci/hotplug/rpadlpar_sysfs.c
index cdbfa5d..dbfa0b5 100644
--- a/drivers/pci/hotplug/rpadlpar_sysfs.c
+++ b/drivers/pci/hotplug/rpadlpar_sysfs.c
@@ -34,12 +34,11 @@
if (nbytes >= MAX_DRC_NAME_LEN)
return 0;
- memcpy(drc_name, buf, nbytes);
+ strscpy(drc_name, buf, nbytes + 1);
end = strchr(drc_name, '\n');
- if (!end)
- end = &drc_name[nbytes];
- *end = '\0';
+ if (end)
+ *end = '\0';
rc = dlpar_add_slot(drc_name);
if (rc)
@@ -65,12 +64,11 @@
if (nbytes >= MAX_DRC_NAME_LEN)
return 0;
- memcpy(drc_name, buf, nbytes);
+ strscpy(drc_name, buf, nbytes + 1);
end = strchr(drc_name, '\n');
- if (!end)
- end = &drc_name[nbytes];
- *end = '\0';
+ if (end)
+ *end = '\0';
rc = dlpar_remove_slot(drc_name);
if (rc)
diff --git a/drivers/pci/hotplug/rpaphp_core.c b/drivers/pci/hotplug/rpaphp_core.c
index 18627bb..32eab17 100644
--- a/drivers/pci/hotplug/rpaphp_core.c
+++ b/drivers/pci/hotplug/rpaphp_core.c
@@ -154,11 +154,11 @@
return speed;
}
-static int get_children_props(struct device_node *dn, const int **drc_indexes,
- const int **drc_names, const int **drc_types,
- const int **drc_power_domains)
+static int get_children_props(struct device_node *dn, const __be32 **drc_indexes,
+ const __be32 **drc_names, const __be32 **drc_types,
+ const __be32 **drc_power_domains)
{
- const int *indexes, *names, *types, *domains;
+ const __be32 *indexes, *names, *types, *domains;
indexes = of_get_property(dn, "ibm,drc-indexes", NULL);
names = of_get_property(dn, "ibm,drc-names", NULL);
@@ -194,8 +194,8 @@
char *drc_type, unsigned int my_index)
{
char *name_tmp, *type_tmp;
- const int *indexes, *names;
- const int *types, *domains;
+ const __be32 *indexes, *names;
+ const __be32 *types, *domains;
int i, rc;
rc = get_children_props(dn->parent, &indexes, &names, &types, &domains);
@@ -208,7 +208,7 @@
/* Iterate through parent properties, looking for my-drc-index */
for (i = 0; i < be32_to_cpu(indexes[0]); i++) {
- if ((unsigned int) indexes[i + 1] == my_index)
+ if (be32_to_cpu(indexes[i + 1]) == my_index)
break;
name_tmp += (strlen(name_tmp) + 1);
@@ -239,6 +239,8 @@
value = of_prop_next_u32(info, NULL, &entries);
if (!value)
return -EINVAL;
+ else
+ value++;
for (j = 0; j < entries; j++) {
of_read_drc_info_cell(&info, &value, &drc);
@@ -246,9 +248,10 @@
/* Should now know end of current entry */
/* Found it */
- if (my_index <= drc.last_drc_index) {
+ if (my_index >= drc.drc_index_start && my_index <= drc.last_drc_index) {
+ int index = my_index - drc.drc_index_start;
sprintf(cell_drc_name, "%s%d", drc.drc_name_prefix,
- my_index);
+ drc.drc_name_suffix_start + index);
break;
}
}
@@ -265,7 +268,7 @@
int rpaphp_check_drc_props(struct device_node *dn, char *drc_name,
char *drc_type)
{
- const unsigned int *my_index;
+ const __be32 *my_index;
my_index = of_get_property(dn, "ibm,my-drc-index", NULL);
if (!my_index) {
@@ -273,12 +276,12 @@
return -EINVAL;
}
- if (firmware_has_feature(FW_FEATURE_DRC_INFO))
+ if (of_find_property(dn->parent, "ibm,drc-info", NULL))
return rpaphp_check_drc_props_v2(dn, drc_name, drc_type,
- *my_index);
+ be32_to_cpu(*my_index));
else
return rpaphp_check_drc_props_v1(dn, drc_name, drc_type,
- *my_index);
+ be32_to_cpu(*my_index));
}
EXPORT_SYMBOL_GPL(rpaphp_check_drc_props);
@@ -309,10 +312,11 @@
* for built-in pci slots (even when the built-in slots are
* dlparable.)
*/
-static int is_php_dn(struct device_node *dn, const int **indexes,
- const int **names, const int **types, const int **power_domains)
+static int is_php_dn(struct device_node *dn, const __be32 **indexes,
+ const __be32 **names, const __be32 **types,
+ const __be32 **power_domains)
{
- const int *drc_types;
+ const __be32 *drc_types;
int rc;
rc = get_children_props(dn, indexes, names, &drc_types, power_domains);
@@ -347,7 +351,7 @@
struct slot *slot;
int retval = 0;
int i;
- const int *indexes, *names, *types, *power_domains;
+ const __be32 *indexes, *names, *types, *power_domains;
char *name, *type;
if (!dn->name || strcmp(dn->name, "pci"))
diff --git a/drivers/pci/iov.c b/drivers/pci/iov.c
index b3f972e..e152203 100644
--- a/drivers/pci/iov.c
+++ b/drivers/pci/iov.c
@@ -158,6 +158,7 @@
virtfn->device = iov->vf_device;
virtfn->is_virtfn = 1;
virtfn->physfn = pci_dev_get(dev);
+ virtfn->no_command_memory = 1;
if (id == 0)
pci_read_vf_config_common(virtfn);
@@ -187,10 +188,10 @@
sprintf(buf, "virtfn%u", id);
rc = sysfs_create_link(&dev->dev.kobj, &virtfn->dev.kobj, buf);
if (rc)
- goto failed2;
+ goto failed1;
rc = sysfs_create_link(&virtfn->dev.kobj, &dev->dev.kobj, "physfn");
if (rc)
- goto failed3;
+ goto failed2;
kobject_uevent(&virtfn->dev.kobj, KOBJ_CHANGE);
@@ -198,11 +199,10 @@
return 0;
-failed3:
- sysfs_remove_link(&dev->dev.kobj, buf);
failed2:
- pci_stop_and_remove_bus_device(virtfn);
+ sysfs_remove_link(&dev->dev.kobj, buf);
failed1:
+ pci_stop_and_remove_bus_device(virtfn);
pci_dev_put(dev);
failed0:
virtfn_remove_bus(dev->bus, bus);
@@ -254,8 +254,14 @@
char *buf)
{
struct pci_dev *pdev = to_pci_dev(dev);
+ u16 num_vfs;
- return sprintf(buf, "%u\n", pdev->sriov->num_VFs);
+ /* Serialize vs sriov_numvfs_store() so readers see valid num_VFs */
+ device_lock(&pdev->dev);
+ num_vfs = pdev->sriov->num_VFs;
+ device_unlock(&pdev->dev);
+
+ return sprintf(buf, "%u\n", num_vfs);
}
/*
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
index 0884bed..c8bd243 100644
--- a/drivers/pci/msi.c
+++ b/drivers/pci/msi.c
@@ -170,24 +170,25 @@
* reliably as devices without an INTx disable bit will then generate a
* level IRQ which will never be cleared.
*/
-u32 __pci_msi_desc_mask_irq(struct msi_desc *desc, u32 mask, u32 flag)
+void __pci_msi_desc_mask_irq(struct msi_desc *desc, u32 mask, u32 flag)
{
- u32 mask_bits = desc->masked;
+ raw_spinlock_t *lock = &desc->dev->msi_lock;
+ unsigned long flags;
if (pci_msi_ignore_mask || !desc->msi_attrib.maskbit)
- return 0;
+ return;
- mask_bits &= ~mask;
- mask_bits |= flag;
+ raw_spin_lock_irqsave(lock, flags);
+ desc->masked &= ~mask;
+ desc->masked |= flag;
pci_write_config_dword(msi_desc_to_pci_dev(desc), desc->mask_pos,
- mask_bits);
-
- return mask_bits;
+ desc->masked);
+ raw_spin_unlock_irqrestore(lock, flags);
}
static void msi_mask_irq(struct msi_desc *desc, u32 mask, u32 flag)
{
- desc->masked = __pci_msi_desc_mask_irq(desc, mask, flag);
+ __pci_msi_desc_mask_irq(desc, mask, flag);
}
static void __iomem *pci_msix_desc_addr(struct msi_desc *desc)
@@ -213,12 +214,13 @@
if (pci_msi_ignore_mask)
return 0;
+
desc_addr = pci_msix_desc_addr(desc);
if (!desc_addr)
return 0;
mask_bits &= ~PCI_MSIX_ENTRY_CTRL_MASKBIT;
- if (flag)
+ if (flag & PCI_MSIX_ENTRY_CTRL_MASKBIT)
mask_bits |= PCI_MSIX_ENTRY_CTRL_MASKBIT;
writel(mask_bits, desc_addr + PCI_MSIX_ENTRY_VECTOR_CTRL);
@@ -315,13 +317,31 @@
/* Don't touch the hardware now */
} else if (entry->msi_attrib.is_msix) {
void __iomem *base = pci_msix_desc_addr(entry);
+ bool unmasked = !(entry->masked & PCI_MSIX_ENTRY_CTRL_MASKBIT);
if (!base)
goto skip;
+ /*
+ * The specification mandates that the entry is masked
+ * when the message is modified:
+ *
+ * "If software changes the Address or Data value of an
+ * entry while the entry is unmasked, the result is
+ * undefined."
+ */
+ if (unmasked)
+ __pci_msix_desc_mask_irq(entry, PCI_MSIX_ENTRY_CTRL_MASKBIT);
+
writel(msg->address_lo, base + PCI_MSIX_ENTRY_LOWER_ADDR);
writel(msg->address_hi, base + PCI_MSIX_ENTRY_UPPER_ADDR);
writel(msg->data, base + PCI_MSIX_ENTRY_DATA);
+
+ if (unmasked)
+ __pci_msix_desc_mask_irq(entry, 0);
+
+ /* Ensure that the writes are visible in the device */
+ readl(base + PCI_MSIX_ENTRY_DATA);
} else {
int pos = dev->msi_cap;
u16 msgctl;
@@ -342,6 +362,8 @@
pci_write_config_word(dev, pos + PCI_MSI_DATA_32,
msg->data);
}
+ /* Ensure that the writes are visible in the device */
+ pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &msgctl);
}
skip:
@@ -641,21 +663,21 @@
/* Configure MSI capability structure */
ret = pci_msi_setup_msi_irqs(dev, nvec, PCI_CAP_ID_MSI);
if (ret) {
- msi_mask_irq(entry, mask, ~mask);
+ msi_mask_irq(entry, mask, 0);
free_msi_irqs(dev);
return ret;
}
ret = msi_verify_entries(dev);
if (ret) {
- msi_mask_irq(entry, mask, ~mask);
+ msi_mask_irq(entry, mask, 0);
free_msi_irqs(dev);
return ret;
}
ret = populate_msi_sysfs(dev);
if (ret) {
- msi_mask_irq(entry, mask, ~mask);
+ msi_mask_irq(entry, mask, 0);
free_msi_irqs(dev);
return ret;
}
@@ -696,6 +718,7 @@
{
struct irq_affinity_desc *curmsk, *masks = NULL;
struct msi_desc *entry;
+ void __iomem *addr;
int ret, i;
int vec_count = pci_msix_vec_count(dev);
@@ -716,6 +739,7 @@
entry->msi_attrib.is_msix = 1;
entry->msi_attrib.is_64 = 1;
+
if (entries)
entry->msi_attrib.entry_nr = entries[i].entry;
else
@@ -727,6 +751,10 @@
entry->msi_attrib.default_irq = dev->irq;
entry->mask_base = base;
+ addr = pci_msix_desc_addr(entry);
+ if (addr)
+ entry->masked = readl(addr + PCI_MSIX_ENTRY_VECTOR_CTRL);
+
list_add_tail(&entry->list, dev_to_msi_list(&dev->dev));
if (masks)
curmsk++;
@@ -737,28 +765,30 @@
return ret;
}
-static void msix_program_entries(struct pci_dev *dev,
- struct msix_entry *entries)
+static void msix_update_entries(struct pci_dev *dev, struct msix_entry *entries)
{
struct msi_desc *entry;
- int i = 0;
- void __iomem *desc_addr;
for_each_pci_msi_entry(entry, dev) {
- if (entries)
- entries[i++].vector = entry->irq;
-
- desc_addr = pci_msix_desc_addr(entry);
- if (desc_addr)
- entry->masked = readl(desc_addr +
- PCI_MSIX_ENTRY_VECTOR_CTRL);
- else
- entry->masked = 0;
-
- msix_mask_irq(entry, 1);
+ if (entries) {
+ entries->vector = entry->irq;
+ entries++;
+ }
}
}
+static void msix_mask_all(void __iomem *base, int tsize)
+{
+ u32 ctrl = PCI_MSIX_ENTRY_CTRL_MASKBIT;
+ int i;
+
+ if (pci_msi_ignore_mask)
+ return;
+
+ for (i = 0; i < tsize; i++, base += PCI_MSIX_ENTRY_SIZE)
+ writel(ctrl, base + PCI_MSIX_ENTRY_VECTOR_CTRL);
+}
+
/**
* msix_capability_init - configure device's MSI-X capability
* @dev: pointer to the pci_dev data structure of MSI-X device function
@@ -773,22 +803,33 @@
static int msix_capability_init(struct pci_dev *dev, struct msix_entry *entries,
int nvec, struct irq_affinity *affd)
{
- int ret;
- u16 control;
void __iomem *base;
+ int ret, tsize;
+ u16 control;
- /* Ensure MSI-X is disabled while it is set up */
- pci_msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_ENABLE, 0);
+ /*
+ * Some devices require MSI-X to be enabled before the MSI-X
+ * registers can be accessed. Mask all the vectors to prevent
+ * interrupts coming in before they're fully set up.
+ */
+ pci_msix_clear_and_set_ctrl(dev, 0, PCI_MSIX_FLAGS_MASKALL |
+ PCI_MSIX_FLAGS_ENABLE);
pci_read_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, &control);
/* Request & Map MSI-X table region */
- base = msix_map_region(dev, msix_table_size(control));
- if (!base)
- return -ENOMEM;
+ tsize = msix_table_size(control);
+ base = msix_map_region(dev, tsize);
+ if (!base) {
+ ret = -ENOMEM;
+ goto out_disable;
+ }
+
+ /* Ensure that all table entries are masked. */
+ msix_mask_all(base, tsize);
ret = msix_setup_entries(dev, base, entries, nvec, affd);
if (ret)
- return ret;
+ goto out_disable;
ret = pci_msi_setup_msi_irqs(dev, nvec, PCI_CAP_ID_MSIX);
if (ret)
@@ -799,15 +840,7 @@
if (ret)
goto out_free;
- /*
- * Some devices require MSI-X to be enabled before we can touch the
- * MSI-X registers. We need to mask all the vectors to prevent
- * interrupts coming in before they're fully set up.
- */
- pci_msix_clear_and_set_ctrl(dev, 0,
- PCI_MSIX_FLAGS_MASKALL | PCI_MSIX_FLAGS_ENABLE);
-
- msix_program_entries(dev, entries);
+ msix_update_entries(dev, entries);
ret = populate_msi_sysfs(dev);
if (ret)
@@ -841,6 +874,9 @@
out_free:
free_msi_irqs(dev);
+out_disable:
+ pci_msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_ENABLE, 0);
+
return ret;
}
@@ -928,8 +964,7 @@
/* Return the device with MSI unmasked as initial states */
mask = msi_mask(desc->msi_attrib.multi_cap);
- /* Keep cached state to be restored */
- __pci_msi_desc_mask_irq(desc, mask, ~mask);
+ msi_mask_irq(desc, mask, 0);
/* Restore dev->irq to its default pin-assertion IRQ */
dev->irq = desc->msi_attrib.default_irq;
@@ -1014,10 +1049,8 @@
}
/* Return the device with MSI-X masked as initial states */
- for_each_pci_msi_entry(entry, dev) {
- /* Keep cached states to be restored */
+ for_each_pci_msi_entry(entry, dev)
__pci_msix_desc_mask_irq(entry, 1);
- }
pci_msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_ENABLE, 0);
pci_intx_for_msi(dev, 1);
diff --git a/drivers/pci/p2pdma.c b/drivers/pci/p2pdma.c
index 0608aae..0153abd 100644
--- a/drivers/pci/p2pdma.c
+++ b/drivers/pci/p2pdma.c
@@ -292,10 +292,41 @@
{}
};
+/*
+ * This lookup function tries to find the PCI device corresponding to a given
+ * host bridge.
+ *
+ * It assumes the host bridge device is the first PCI device in the
+ * bus->devices list and that the devfn is 00.0. These assumptions should hold
+ * for all the devices in the whitelist above.
+ *
+ * This function is equivalent to pci_get_slot(host->bus, 0), however it does
+ * not take the pci_bus_sem lock seeing __host_bridge_whitelist() must not
+ * sleep.
+ *
+ * For this to be safe, the caller should hold a reference to a device on the
+ * bridge, which should ensure the host_bridge device will not be freed
+ * or removed from the head of the devices list.
+ */
+static struct pci_dev *pci_host_bridge_dev(struct pci_host_bridge *host)
+{
+ struct pci_dev *root;
+
+ root = list_first_entry_or_null(&host->bus->devices,
+ struct pci_dev, bus_list);
+
+ if (!root)
+ return NULL;
+ if (root->devfn != PCI_DEVFN(0, 0))
+ return NULL;
+
+ return root;
+}
+
static bool __host_bridge_whitelist(struct pci_host_bridge *host,
bool same_host_bridge)
{
- struct pci_dev *root = pci_get_slot(host->bus, PCI_DEVFN(0, 0));
+ struct pci_dev *root = pci_host_bridge_dev(host);
const struct pci_p2pdma_whitelist_entry *entry;
unsigned short vendor, device;
@@ -304,7 +335,6 @@
vendor = root->vendor;
device = root->device;
- pci_dev_put(root);
for (entry = pci_p2pdma_whitelist; entry->vendor; entry++) {
if (vendor != entry->vendor || device != entry->device)
diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c
index 0c02d50..e30c2a7 100644
--- a/drivers/pci/pci-acpi.c
+++ b/drivers/pci/pci-acpi.c
@@ -944,6 +944,16 @@
if (!dev->is_hotplug_bridge)
return false;
+ /* Assume D3 support if the bridge is power-manageable by ACPI. */
+ adev = ACPI_COMPANION(&dev->dev);
+ if (!adev && !pci_dev_is_added(dev)) {
+ adev = acpi_pci_find_companion(&dev->dev);
+ ACPI_COMPANION_SET(&dev->dev, adev);
+ }
+
+ if (adev && acpi_device_power_manageable(adev))
+ return true;
+
/*
* Look for a special _DSD property for the root port and if it
* is set we know the hierarchy behind it supports D3 just fine.
@@ -1050,7 +1060,7 @@
{
while (bus->parent) {
if (acpi_pm_device_can_wakeup(&bus->self->dev))
- return acpi_pm_set_bridge_wakeup(&bus->self->dev, enable);
+ return acpi_pm_set_device_wakeup(&bus->self->dev, enable);
bus = bus->parent;
}
@@ -1058,7 +1068,7 @@
/* We have reached the root bus. */
if (bus->bridge) {
if (acpi_pm_device_can_wakeup(bus->bridge))
- return acpi_pm_set_bridge_wakeup(bus->bridge, enable);
+ return acpi_pm_set_device_wakeup(bus->bridge, enable);
}
return 0;
}
diff --git a/drivers/pci/pci-bridge-emul.c b/drivers/pci/pci-bridge-emul.c
index 5fd9010..d3b6b9a 100644
--- a/drivers/pci/pci-bridge-emul.c
+++ b/drivers/pci/pci-bridge-emul.c
@@ -195,8 +195,8 @@
* RO, the rest is reserved
*/
.w1c = GENMASK(19, 16),
- .ro = GENMASK(20, 19),
- .rsvd = GENMASK(31, 21),
+ .ro = GENMASK(21, 20),
+ .rsvd = GENMASK(31, 22),
},
[PCI_EXP_LNKCAP / 4] = {
@@ -236,7 +236,7 @@
PCI_EXP_SLTSTA_CC | PCI_EXP_SLTSTA_DLLSC) << 16,
.ro = (PCI_EXP_SLTSTA_MRLSS | PCI_EXP_SLTSTA_PDS |
PCI_EXP_SLTSTA_EIS) << 16,
- .rsvd = GENMASK(15, 12) | (GENMASK(15, 9) << 16),
+ .rsvd = GENMASK(15, 13) | (GENMASK(15, 9) << 16),
},
[PCI_EXP_RTCTL / 4] = {
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index a8124e4..5ea612a 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -919,6 +919,8 @@
struct pci_dev *pci_dev = to_pci_dev(dev);
struct device_driver *drv = dev->driver;
int error = 0;
+ pci_power_t prev_state = pci_dev->current_state;
+ bool skip_bus_pm = pci_dev->skip_bus_pm;
if (dev_pm_may_skip_resume(dev))
return 0;
@@ -937,16 +939,18 @@
* configuration here and attempting to put them into D0 again is
* pointless, so avoid doing that.
*/
- if (!(pci_dev->skip_bus_pm && pm_suspend_no_platform()))
+ if (!(skip_bus_pm && pm_suspend_no_platform()))
pci_pm_default_resume_early(pci_dev);
pci_fixup_device(pci_fixup_resume_early, pci_dev);
+ pcie_pme_root_status_cleanup(pci_dev);
+
+ if (!skip_bus_pm && prev_state == PCI_D3cold)
+ pci_bridge_wait_for_secondary_bus(pci_dev);
if (pci_has_legacy_pm_support(pci_dev))
return pci_legacy_resume_early(dev);
- pcie_pme_root_status_cleanup(pci_dev);
-
if (drv && drv->pm && drv->pm->resume_noirq)
error = drv->pm->resume_noirq(dev);
@@ -1076,17 +1080,22 @@
return error;
}
- if (pci_has_legacy_pm_support(pci_dev))
- return pci_legacy_resume_early(dev);
-
/*
- * pci_restore_state() requires the device to be in D0 (because of MSI
- * restoration among other things), so force it into D0 in case the
- * driver's "freeze" callbacks put it into a low-power state directly.
+ * Both the legacy ->resume_early() and the new pm->thaw_noirq()
+ * callbacks assume the device has been returned to D0 and its
+ * config state has been restored.
+ *
+ * In addition, pci_restore_state() restores MSI-X state in MMIO
+ * space, which requires the device to be in D0, so return it to D0
+ * in case the driver's "freeze" callbacks put it into a low-power
+ * state.
*/
pci_set_power_state(pci_dev, PCI_D0);
pci_restore_state(pci_dev);
+ if (pci_has_legacy_pm_support(pci_dev))
+ return pci_legacy_resume_early(dev);
+
if (drv && drv->pm && drv->pm->thaw_noirq)
error = drv->pm->thaw_noirq(dev);
@@ -1329,6 +1338,7 @@
int rc = 0;
struct pci_dev *pci_dev = to_pci_dev(dev);
const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
+ pci_power_t prev_state = pci_dev->current_state;
/*
* Restoring config space is necessary even if the device is not bound
@@ -1344,6 +1354,9 @@
pci_enable_wake(pci_dev, PCI_D0, false);
pci_fixup_device(pci_fixup_resume, pci_dev);
+ if (prev_state == PCI_D3cold)
+ pci_bridge_wait_for_secondary_bus(pci_dev);
+
if (pm && pm->runtime_resume)
rc = pm->runtime_resume(dev);
diff --git a/drivers/pci/pci-label.c b/drivers/pci/pci-label.c
index a5910f9..9fb4ef5 100644
--- a/drivers/pci/pci-label.c
+++ b/drivers/pci/pci-label.c
@@ -162,7 +162,7 @@
len = utf16s_to_utf8s((const wchar_t *)obj->buffer.pointer,
obj->buffer.length,
UTF16_LITTLE_ENDIAN,
- buf, PAGE_SIZE);
+ buf, PAGE_SIZE - 1);
buf[len] = '\n';
}
diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
index 7934129..e401f04 100644
--- a/drivers/pci/pci-sysfs.c
+++ b/drivers/pci/pci-sysfs.c
@@ -464,7 +464,8 @@
}
return count;
}
-static DEVICE_ATTR_WO(dev_rescan);
+static struct device_attribute dev_attr_dev_rescan = __ATTR(rescan, 0200, NULL,
+ dev_rescan_store);
static ssize_t remove_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
@@ -501,7 +502,8 @@
}
return count;
}
-static DEVICE_ATTR_WO(bus_rescan);
+static struct device_attribute dev_attr_bus_rescan = __ATTR(rescan, 0200, NULL,
+ bus_rescan_store);
#if defined(CONFIG_PM) && defined(CONFIG_ACPI)
static ssize_t d3cold_allowed_store(struct device *dev,
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index a97e257..b9550cd 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -224,7 +224,7 @@
*endptr = strchrnul(path, ';');
- wpath = kmemdup_nul(path, *endptr - path, GFP_KERNEL);
+ wpath = kmemdup_nul(path, *endptr - path, GFP_ATOMIC);
if (!wpath)
return -ENOMEM;
@@ -802,7 +802,9 @@
static inline bool platform_pci_bridge_d3(struct pci_dev *dev)
{
- return pci_platform_pm ? pci_platform_pm->bridge_d3(dev) : false;
+ if (pci_platform_pm && pci_platform_pm->bridge_d3)
+ return pci_platform_pm->bridge_d3(dev);
+ return false;
}
/**
@@ -1019,8 +1021,6 @@
* because have already delayed for the bridge.
*/
if (dev->runtime_d3cold) {
- if (dev->d3cold_delay && !dev->imm_ready)
- msleep(dev->d3cold_delay);
/*
* When powering on a bridge from D3cold, the
* whole hierarchy may be powered on into
@@ -1672,11 +1672,7 @@
* so that things like MSI message writing will behave as expected
* (e.g. if the device really is in D0 at enable time).
*/
- if (dev->pm_cap) {
- u16 pmcsr;
- pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
- dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
- }
+ pci_update_current_state(dev, dev->current_state);
if (atomic_inc_return(&dev->enable_cnt) > 1)
return 0; /* already enabled */
@@ -2253,7 +2249,14 @@
if (enable) {
int error;
- if (pci_pme_capable(dev, state))
+ /*
+ * Enable PME signaling if the device can signal PME from
+ * D3cold regardless of whether or not it can signal PME from
+ * the current target state, because that will allow it to
+ * signal PME when the hierarchy above it goes into D3cold and
+ * the device itself ends up in D3cold as a result of that.
+ */
+ if (pci_pme_capable(dev, state) || pci_pme_capable(dev, PCI_D3cold))
pci_pme_active(dev, true);
else
ret = 1;
@@ -2357,16 +2360,20 @@
if (dev->current_state == PCI_D3cold)
target_state = PCI_D3cold;
- if (wakeup) {
+ if (wakeup && dev->pme_support) {
+ pci_power_t state = target_state;
+
/*
* Find the deepest state from which the device can generate
* PME#.
*/
- if (dev->pme_support) {
- while (target_state
- && !(dev->pme_support & (1 << target_state)))
- target_state--;
- }
+ while (state && !(dev->pme_support & (1 << state)))
+ state--;
+
+ if (state)
+ return state;
+ else if (dev->pme_support & 1)
+ return PCI_D0;
}
return target_state;
@@ -3471,7 +3478,14 @@
return 0;
pci_read_config_dword(pdev, pos + PCI_REBAR_CAP, &cap);
- return (cap & PCI_REBAR_CAP_SIZES) >> 4;
+ cap &= PCI_REBAR_CAP_SIZES;
+
+ /* Sapphire RX 5600 XT Pulse has an invalid cap dword for BAR 0 */
+ if (pdev->vendor == PCI_VENDOR_ID_ATI && pdev->device == 0x731f &&
+ bar == 0 && cap == 0x7000)
+ cap = 0x3f000;
+
+ return cap >> 4;
}
/**
@@ -3896,6 +3910,10 @@
ret = logic_pio_register_range(range);
if (ret)
kfree(range);
+
+ /* Ignore duplicates due to deferred probing */
+ if (ret == -EEXIST)
+ ret = 0;
#endif
return ret;
@@ -4605,14 +4623,17 @@
return pci_dev_wait(dev, "PM D3->D0", PCIE_RESET_READY_POLL_MS);
}
+
/**
- * pcie_wait_for_link - Wait until link is active or inactive
+ * pcie_wait_for_link_delay - Wait until link is active or inactive
* @pdev: Bridge device
* @active: waiting for active or inactive?
+ * @delay: Delay to wait after link has become active (in ms)
*
* Use this to wait till link becomes active or inactive.
*/
-bool pcie_wait_for_link(struct pci_dev *pdev, bool active)
+static bool pcie_wait_for_link_delay(struct pci_dev *pdev, bool active,
+ int delay)
{
int timeout = 1000;
bool ret;
@@ -4620,10 +4641,10 @@
/*
* Some controllers might not implement link active reporting. In this
- * case, we wait for 1000 + 100 ms.
+ * case, we wait for 1000 ms + any delay requested by the caller.
*/
if (!pdev->link_active_reporting) {
- msleep(1100);
+ msleep(timeout + delay);
return true;
}
@@ -4649,13 +4670,144 @@
timeout -= 10;
}
if (active && ret)
- msleep(100);
+ msleep(delay);
else if (ret != active)
pci_info(pdev, "Data Link Layer Link Active not %s in 1000 msec\n",
active ? "set" : "cleared");
return ret == active;
}
+/**
+ * pcie_wait_for_link - Wait until link is active or inactive
+ * @pdev: Bridge device
+ * @active: waiting for active or inactive?
+ *
+ * Use this to wait till link becomes active or inactive.
+ */
+bool pcie_wait_for_link(struct pci_dev *pdev, bool active)
+{
+ return pcie_wait_for_link_delay(pdev, active, 100);
+}
+
+/*
+ * Find maximum D3cold delay required by all the devices on the bus. The
+ * spec says 100 ms, but firmware can lower it and we allow drivers to
+ * increase it as well.
+ *
+ * Called with @pci_bus_sem locked for reading.
+ */
+static int pci_bus_max_d3cold_delay(const struct pci_bus *bus)
+{
+ const struct pci_dev *pdev;
+ int min_delay = 100;
+ int max_delay = 0;
+
+ list_for_each_entry(pdev, &bus->devices, bus_list) {
+ if (pdev->d3cold_delay < min_delay)
+ min_delay = pdev->d3cold_delay;
+ if (pdev->d3cold_delay > max_delay)
+ max_delay = pdev->d3cold_delay;
+ }
+
+ return max(min_delay, max_delay);
+}
+
+/**
+ * pci_bridge_wait_for_secondary_bus - Wait for secondary bus to be accessible
+ * @dev: PCI bridge
+ *
+ * Handle necessary delays before access to the devices on the secondary
+ * side of the bridge are permitted after D3cold to D0 transition.
+ *
+ * For PCIe this means the delays in PCIe 5.0 section 6.6.1. For
+ * conventional PCI it means Tpvrh + Trhfa specified in PCI 3.0 section
+ * 4.3.2.
+ */
+void pci_bridge_wait_for_secondary_bus(struct pci_dev *dev)
+{
+ struct pci_dev *child;
+ int delay;
+
+ if (pci_dev_is_disconnected(dev))
+ return;
+
+ if (!pci_is_bridge(dev) || !dev->bridge_d3)
+ return;
+
+ down_read(&pci_bus_sem);
+
+ /*
+ * We only deal with devices that are present currently on the bus.
+ * For any hot-added devices the access delay is handled in pciehp
+ * board_added(). In case of ACPI hotplug the firmware is expected
+ * to configure the devices before OS is notified.
+ */
+ if (!dev->subordinate || list_empty(&dev->subordinate->devices)) {
+ up_read(&pci_bus_sem);
+ return;
+ }
+
+ /* Take d3cold_delay requirements into account */
+ delay = pci_bus_max_d3cold_delay(dev->subordinate);
+ if (!delay) {
+ up_read(&pci_bus_sem);
+ return;
+ }
+
+ child = list_first_entry(&dev->subordinate->devices, struct pci_dev,
+ bus_list);
+ up_read(&pci_bus_sem);
+
+ /*
+ * Conventional PCI and PCI-X we need to wait Tpvrh + Trhfa before
+ * accessing the device after reset (that is 1000 ms + 100 ms). In
+ * practice this should not be needed because we don't do power
+ * management for them (see pci_bridge_d3_possible()).
+ */
+ if (!pci_is_pcie(dev)) {
+ pci_dbg(dev, "waiting %d ms for secondary bus\n", 1000 + delay);
+ msleep(1000 + delay);
+ return;
+ }
+
+ /*
+ * For PCIe downstream and root ports that do not support speeds
+ * greater than 5 GT/s need to wait minimum 100 ms. For higher
+ * speeds (gen3) we need to wait first for the data link layer to
+ * become active.
+ *
+ * However, 100 ms is the minimum and the PCIe spec says the
+ * software must allow at least 1s before it can determine that the
+ * device that did not respond is a broken device. There is
+ * evidence that 100 ms is not always enough, for example certain
+ * Titan Ridge xHCI controller does not always respond to
+ * configuration requests if we only wait for 100 ms (see
+ * https://bugzilla.kernel.org/show_bug.cgi?id=203885).
+ *
+ * Therefore we wait for 100 ms and check for the device presence.
+ * If it is still not present give it an additional 100 ms.
+ */
+ if (!pcie_downstream_port(dev))
+ return;
+
+ if (pcie_get_speed_cap(dev) <= PCIE_SPEED_5_0GT) {
+ pci_dbg(dev, "waiting %d ms for downstream link\n", delay);
+ msleep(delay);
+ } else {
+ pci_dbg(dev, "waiting %d ms for downstream link, after activation\n",
+ delay);
+ if (!pcie_wait_for_link_delay(dev, true, delay)) {
+ /* Did not train, no need to wait any further */
+ return;
+ }
+ }
+
+ if (!pci_device_is_present(child)) {
+ pci_dbg(child, "waiting additional %d ms to become accessible\n", delay);
+ msleep(delay);
+ }
+}
+
void pci_reset_secondary_bus(struct pci_dev *dev)
{
u16 ctrl;
@@ -5854,10 +6006,29 @@
return 0;
}
+#ifdef CONFIG_ACPI
+bool pci_pr3_present(struct pci_dev *pdev)
+{
+ struct acpi_device *adev;
+
+ if (acpi_disabled)
+ return false;
+
+ adev = ACPI_COMPANION(&pdev->dev);
+ if (!adev)
+ return false;
+
+ return adev->power.flags.power_resources &&
+ acpi_has_method(adev->handle, "_PR3");
+}
+EXPORT_SYMBOL_GPL(pci_pr3_present);
+#endif
+
/**
* pci_add_dma_alias - Add a DMA devfn alias for a device
* @dev: the PCI device for which alias is added
- * @devfn: alias slot and function
+ * @devfn_from: alias slot and function
+ * @nr_devfns: number of subsequent devfns to alias
*
* This helper encodes an 8-bit devfn as a bit number in dma_alias_mask
* which is used to program permissible bus-devfn source addresses for DMA
@@ -5873,18 +6044,29 @@
* cannot be left as a userspace activity). DMA aliases should therefore
* be configured via quirks, such as the PCI fixup header quirk.
*/
-void pci_add_dma_alias(struct pci_dev *dev, u8 devfn)
+void pci_add_dma_alias(struct pci_dev *dev, u8 devfn_from, unsigned nr_devfns)
{
+ int devfn_to;
+
+ nr_devfns = min(nr_devfns, (unsigned) MAX_NR_DEVFNS - devfn_from);
+ devfn_to = devfn_from + nr_devfns - 1;
+
if (!dev->dma_alias_mask)
- dev->dma_alias_mask = bitmap_zalloc(U8_MAX, GFP_KERNEL);
+ dev->dma_alias_mask = bitmap_zalloc(MAX_NR_DEVFNS, GFP_KERNEL);
if (!dev->dma_alias_mask) {
pci_warn(dev, "Unable to allocate DMA alias mask\n");
return;
}
- set_bit(devfn, dev->dma_alias_mask);
- pci_info(dev, "Enabling fixed DMA alias to %02x.%d\n",
- PCI_SLOT(devfn), PCI_FUNC(devfn));
+ bitmap_set(dev->dma_alias_mask, devfn_from, nr_devfns);
+
+ if (nr_devfns == 1)
+ pci_info(dev, "Enabling fixed DMA alias to %02x.%d\n",
+ PCI_SLOT(devfn_from), PCI_FUNC(devfn_from));
+ else if (nr_devfns > 1)
+ pci_info(dev, "Enabling fixed DMA alias for devfn range from %02x.%d to %02x.%d\n",
+ PCI_SLOT(devfn_from), PCI_FUNC(devfn_from),
+ PCI_SLOT(devfn_to), PCI_FUNC(devfn_to));
}
bool pci_devs_are_dma_aliases(struct pci_dev *dev1, struct pci_dev *dev2)
@@ -5965,19 +6147,21 @@
while (*p) {
count = 0;
if (sscanf(p, "%d%n", &align_order, &count) == 1 &&
- p[count] == '@') {
+ p[count] == '@') {
p += count + 1;
+ if (align_order > 63) {
+ pr_err("PCI: Invalid requested alignment (order %d)\n",
+ align_order);
+ align_order = PAGE_SHIFT;
+ }
} else {
- align_order = -1;
+ align_order = PAGE_SHIFT;
}
ret = pci_dev_str_match(dev, p, &p);
if (ret == 1) {
*resize = true;
- if (align_order == -1)
- align = PAGE_SIZE;
- else
- align = 1 << align_order;
+ align = 1ULL << align_order;
break;
} else if (ret < 0) {
pr_err("PCI: Can't parse resource_alignment parameter: %s\n",
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index 3f6947e..572c2f0 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -4,6 +4,9 @@
#include <linux/pci.h>
+/* Number of possible devfns: 0.0 to 1f.7 inclusive */
+#define MAX_NR_DEVFNS 256
+
#define PCI_FIND_CAP_TTL 48
#define PCI_VSEC_ID_INTEL_TBT 0x1234 /* Thunderbolt */
@@ -104,6 +107,7 @@
void pci_free_cap_save_buffers(struct pci_dev *dev);
bool pci_bridge_d3_possible(struct pci_dev *dev);
void pci_bridge_d3_update(struct pci_dev *dev);
+void pci_bridge_wait_for_secondary_bus(struct pci_dev *dev);
static inline void pci_wakeup_event(struct pci_dev *dev)
{
@@ -584,6 +588,12 @@
#if defined(CONFIG_PCI_QUIRKS) && defined(CONFIG_ARM64)
int acpi_get_rc_resources(struct device *dev, const char *hid, u16 segment,
struct resource *res);
+#else
+static inline int acpi_get_rc_resources(struct device *dev, const char *hid,
+ u16 segment, struct resource *res)
+{
+ return -ENODEV;
+}
#endif
u32 pci_rebar_get_possible_sizes(struct pci_dev *pdev, int bar);
diff --git a/drivers/pci/pcie/aer.c b/drivers/pci/pcie/aer.c
index b45bc47..271aecf 100644
--- a/drivers/pci/pcie/aer.c
+++ b/drivers/pci/pcie/aer.c
@@ -1387,6 +1387,7 @@
return -ENOMEM;
rpc->rpd = port;
+ INIT_KFIFO(rpc->aer_fifo);
set_service_data(dev, rpc);
status = devm_request_threaded_irq(device, dev->irq, aer_irq, aer_isr,
diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
index 652ef23..7624c71 100644
--- a/drivers/pci/pcie/aspm.c
+++ b/drivers/pci/pcie/aspm.c
@@ -64,6 +64,7 @@
u32 clkpm_capable:1; /* Clock PM capable? */
u32 clkpm_enabled:1; /* Current Clock PM state */
u32 clkpm_default:1; /* Default Clock PM state by BIOS */
+ u32 clkpm_disable:1; /* Clock PM disabled */
/* Exit latencies */
struct aspm_latency latency_up; /* Upstream direction exit latency */
@@ -161,8 +162,11 @@
static void pcie_set_clkpm(struct pcie_link_state *link, int enable)
{
- /* Don't enable Clock PM if the link is not Clock PM capable */
- if (!link->clkpm_capable)
+ /*
+ * Don't enable Clock PM if the link is not Clock PM capable
+ * or Clock PM is disabled
+ */
+ if (!link->clkpm_capable || link->clkpm_disable)
enable = 0;
/* Need nothing if the specified equals to current state */
if (link->clkpm_enabled == enable)
@@ -192,7 +196,8 @@
}
link->clkpm_enabled = enabled;
link->clkpm_default = enabled;
- link->clkpm_capable = (blacklist) ? 0 : capable;
+ link->clkpm_capable = capable;
+ link->clkpm_disable = blacklist ? 1 : 0;
}
static bool pcie_retrain_link(struct pcie_link_state *link)
@@ -623,16 +628,6 @@
/* Setup initial capable state. Will be updated later */
link->aspm_capable = link->aspm_support;
- /*
- * If the downstream component has pci bridge function, don't
- * do ASPM for now.
- */
- list_for_each_entry(child, &linkbus->devices, bus_list) {
- if (pci_pcie_type(child) == PCI_EXP_TYPE_PCI_BRIDGE) {
- link->aspm_disable = ASPM_STATE_ALL;
- break;
- }
- }
/* Get and check endpoint acceptable latencies */
list_for_each_entry(child, &linkbus->devices, bus_list) {
@@ -742,9 +737,9 @@
/* Enable what we need to enable */
pci_clear_and_set_dword(parent, up_cap_ptr + PCI_L1SS_CTL1,
- PCI_L1SS_CAP_L1_PM_SS, val);
+ PCI_L1SS_CTL1_L1SS_MASK, val);
pci_clear_and_set_dword(child, dw_cap_ptr + PCI_L1SS_CTL1,
- PCI_L1SS_CAP_L1_PM_SS, val);
+ PCI_L1SS_CTL1_L1SS_MASK, val);
}
static void pcie_config_aspm_dev(struct pci_dev *pdev, u32 val)
@@ -1097,10 +1092,9 @@
link->aspm_disable |= ASPM_STATE_L1;
pcie_config_aspm_link(link, policy_to_aspm_state(link));
- if (state & PCIE_LINK_STATE_CLKPM) {
- link->clkpm_capable = 0;
- pcie_set_clkpm(link, 0);
- }
+ if (state & PCIE_LINK_STATE_CLKPM)
+ link->clkpm_disable = 1;
+ pcie_set_clkpm(link, policy_to_clkpm_state(link));
mutex_unlock(&aspm_lock);
if (sem)
up_read(&pci_bus_sem);
@@ -1163,6 +1157,7 @@
cnt += sprintf(buffer + cnt, "[%s] ", policy_str[i]);
else
cnt += sprintf(buffer + cnt, "%s ", policy_str[i]);
+ cnt += sprintf(buffer + cnt, "\n");
return cnt;
}
diff --git a/drivers/pci/pcie/portdrv_core.c b/drivers/pci/pcie/portdrv_core.c
index 1b33012..8637f60 100644
--- a/drivers/pci/pcie/portdrv_core.c
+++ b/drivers/pci/pcie/portdrv_core.c
@@ -255,8 +255,13 @@
services |= PCIE_PORT_SERVICE_DPC;
if (pci_pcie_type(dev) == PCI_EXP_TYPE_DOWNSTREAM ||
- pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT)
- services |= PCIE_PORT_SERVICE_BWNOTIF;
+ pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT) {
+ u32 linkcap;
+
+ pcie_capability_read_dword(dev, PCI_EXP_LNKCAP, &linkcap);
+ if (linkcap & PCI_EXP_LNKCAP_LBNC)
+ services |= PCIE_PORT_SERVICE_BWNOTIF;
+ }
return services;
}
diff --git a/drivers/pci/pcie/ptm.c b/drivers/pci/pcie/ptm.c
index 98cfa30..357a454 100644
--- a/drivers/pci/pcie/ptm.c
+++ b/drivers/pci/pcie/ptm.c
@@ -21,7 +21,7 @@
snprintf(clock_desc, sizeof(clock_desc), ">254ns");
break;
default:
- snprintf(clock_desc, sizeof(clock_desc), "%udns",
+ snprintf(clock_desc, sizeof(clock_desc), "%uns",
dev->ptm_granularity);
break;
}
@@ -39,10 +39,6 @@
if (!pci_is_pcie(dev))
return;
- pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_PTM);
- if (!pos)
- return;
-
/*
* Enable PTM only on interior devices (root ports, switch ports,
* etc.) on the assumption that it causes no link traffic until an
@@ -52,6 +48,23 @@
pci_pcie_type(dev) == PCI_EXP_TYPE_RC_END))
return;
+ /*
+ * Switch Downstream Ports are not permitted to have a PTM
+ * capability; their PTM behavior is controlled by the Upstream
+ * Port (PCIe r5.0, sec 7.9.16).
+ */
+ ups = pci_upstream_bridge(dev);
+ if (pci_pcie_type(dev) == PCI_EXP_TYPE_DOWNSTREAM &&
+ ups && ups->ptm_enabled) {
+ dev->ptm_granularity = ups->ptm_granularity;
+ dev->ptm_enabled = 1;
+ return;
+ }
+
+ pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_PTM);
+ if (!pos)
+ return;
+
pci_read_config_dword(dev, pos + PCI_PTM_CAP, &cap);
local_clock = (cap & PCI_PTM_GRANULARITY_MASK) >> 8;
@@ -61,7 +74,6 @@
* the spec recommendation (PCIe r3.1, sec 7.32.3), select the
* furthest upstream Time Source as the PTM Root.
*/
- ups = pci_upstream_bridge(dev);
if (ups && ups->ptm_enabled) {
ctrl = PCI_PTM_CTRL_ENABLE;
if (ups->ptm_granularity == 0)
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index 3d5271a..f28213b 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -572,6 +572,7 @@
bridge->release_fn(bridge);
pci_free_resource_list(&bridge->windows);
+ pci_free_resource_list(&bridge->dma_ranges);
}
static void pci_release_host_bridge_dev(struct device *dev)
@@ -866,9 +867,10 @@
goto free;
err = device_register(&bridge->dev);
- if (err)
+ if (err) {
put_device(&bridge->dev);
-
+ goto free;
+ }
bus->bridge = get_device(&bridge->dev);
device_enable_async_suspend(bus->bridge);
pci_set_bus_of_node(bus);
@@ -1089,14 +1091,15 @@
* @sec: updated with secondary bus number from EA
* @sub: updated with subordinate bus number from EA
*
- * If @dev is a bridge with EA capability, update @sec and @sub with
- * fixed bus numbers from the capability and return true. Otherwise,
- * return false.
+ * If @dev is a bridge with EA capability that specifies valid secondary
+ * and subordinate bus numbers, return true with the bus numbers in @sec
+ * and @sub. Otherwise return false.
*/
static bool pci_ea_fixed_busnrs(struct pci_dev *dev, u8 *sec, u8 *sub)
{
int ea, offset;
u32 dw;
+ u8 ea_sec, ea_sub;
if (dev->hdr_type != PCI_HEADER_TYPE_BRIDGE)
return false;
@@ -1108,8 +1111,13 @@
offset = ea + PCI_EA_FIRST_ENT;
pci_read_config_dword(dev, offset, &dw);
- *sec = dw & PCI_EA_SEC_BUS_MASK;
- *sub = (dw & PCI_EA_SUB_BUS_MASK) >> PCI_EA_SUB_BUS_SHIFT;
+ ea_sec = dw & PCI_EA_SEC_BUS_MASK;
+ ea_sub = (dw & PCI_EA_SUB_BUS_MASK) >> PCI_EA_SUB_BUS_SHIFT;
+ if (ea_sec == 0 || ea_sub < ea_sec)
+ return false;
+
+ *sec = ea_sec;
+ *sub = ea_sub;
return true;
}
@@ -1770,7 +1778,7 @@
/* Device class may be changed after fixup */
class = dev->class >> 8;
- if (dev->non_compliant_bars) {
+ if (dev->non_compliant_bars && !dev->mmio_always_on) {
pci_read_config_word(dev, PCI_COMMAND, &cmd);
if (cmd & (PCI_COMMAND_IO | PCI_COMMAND_MEMORY)) {
pci_info(dev, "device has non-compliant BARs; disabling IO/MEM decoding\n");
@@ -1882,13 +1890,33 @@
struct pci_dev *bridge = pci_upstream_bridge(dev);
int mps, mpss, p_mps, rc;
- if (!pci_is_pcie(dev) || !bridge || !pci_is_pcie(bridge))
+ if (!pci_is_pcie(dev))
return;
/* MPS and MRRS fields are of type 'RsvdP' for VFs, short-circuit out */
if (dev->is_virtfn)
return;
+ /*
+ * For Root Complex Integrated Endpoints, program the maximum
+ * supported value unless limited by the PCIE_BUS_PEER2PEER case.
+ */
+ if (pci_pcie_type(dev) == PCI_EXP_TYPE_RC_END) {
+ if (pcie_bus_config == PCIE_BUS_PEER2PEER)
+ mps = 128;
+ else
+ mps = 128 << dev->pcie_mpss;
+ rc = pcie_set_mps(dev, mps);
+ if (rc) {
+ pci_warn(dev, "can't set Max Payload Size to %d; if necessary, use \"pci=pcie_bus_safe\" and report a bug\n",
+ mps);
+ }
+ return;
+ }
+
+ if (!bridge || !pci_is_pcie(bridge))
+ return;
+
mps = pcie_get_mps(dev);
p_mps = pcie_get_mps(bridge);
@@ -2271,6 +2299,7 @@
pci_set_of_node(dev);
if (pci_setup_device(dev)) {
+ pci_release_of_node(dev);
pci_bus_put(dev->bus);
kfree(dev);
return NULL;
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index 320255e..686298c 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -27,6 +27,7 @@
#include <linux/nvme.h>
#include <linux/platform_data/x86/apple.h>
#include <linux/pm_runtime.h>
+#include <linux/suspend.h>
#include <linux/switchtec.h>
#include <asm/dma.h> /* isa_dma_bridge_buggy */
#include "pci.h"
@@ -1871,19 +1872,41 @@
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x260a, quirk_intel_pcie_pm);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x260b, quirk_intel_pcie_pm);
+static void quirk_d3hot_delay(struct pci_dev *dev, unsigned int delay)
+{
+ if (dev->d3_delay >= delay)
+ return;
+
+ dev->d3_delay = delay;
+ pci_info(dev, "extending delay after power-on from D3hot to %d msec\n",
+ dev->d3_delay);
+}
+
static void quirk_radeon_pm(struct pci_dev *dev)
{
if (dev->subsystem_vendor == PCI_VENDOR_ID_APPLE &&
- dev->subsystem_device == 0x00e2) {
- if (dev->d3_delay < 20) {
- dev->d3_delay = 20;
- pci_info(dev, "extending delay after power-on from D3 to %d msec\n",
- dev->d3_delay);
- }
- }
+ dev->subsystem_device == 0x00e2)
+ quirk_d3hot_delay(dev, 20);
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x6741, quirk_radeon_pm);
+/*
+ * Ryzen5/7 XHCI controllers fail upon resume from runtime suspend or s2idle.
+ * https://bugzilla.kernel.org/show_bug.cgi?id=205587
+ *
+ * The kernel attempts to transition these devices to D3cold, but that seems
+ * to be ineffective on the platforms in question; the PCI device appears to
+ * remain on in D3hot state. The D3hot-to-D0 transition then requires an
+ * extended delay in order to succeed.
+ */
+static void quirk_ryzen_xhci_d3hot(struct pci_dev *dev)
+{
+ quirk_d3hot_delay(dev, 20);
+}
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x15e0, quirk_ryzen_xhci_d3hot);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x15e1, quirk_ryzen_xhci_d3hot);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x1639, quirk_ryzen_xhci_d3hot);
+
#ifdef CONFIG_X86_IO_APIC
static int dmi_disable_ioapicreroute(const struct dmi_system_id *d)
{
@@ -1949,26 +1972,92 @@
/*
* IO-APIC1 on 6300ESB generates boot interrupts, see Intel order no
* 300641-004US, section 5.7.3.
+ *
+ * Core IO on Xeon E5 1600/2600/4600, see Intel order no 326509-003.
+ * Core IO on Xeon E5 v2, see Intel order no 329188-003.
+ * Core IO on Xeon E7 v2, see Intel order no 329595-002.
+ * Core IO on Xeon E5 v3, see Intel order no 330784-003.
+ * Core IO on Xeon E7 v3, see Intel order no 332315-001US.
+ * Core IO on Xeon E5 v4, see Intel order no 333810-002US.
+ * Core IO on Xeon E7 v4, see Intel order no 332315-001US.
+ * Core IO on Xeon D-1500, see Intel order no 332051-001.
+ * Core IO on Xeon Scalable, see Intel order no 610950.
*/
-#define INTEL_6300_IOAPIC_ABAR 0x40
+#define INTEL_6300_IOAPIC_ABAR 0x40 /* Bus 0, Dev 29, Func 5 */
#define INTEL_6300_DISABLE_BOOT_IRQ (1<<14)
+#define INTEL_CIPINTRC_CFG_OFFSET 0x14C /* Bus 0, Dev 5, Func 0 */
+#define INTEL_CIPINTRC_DIS_INTX_ICH (1<<25)
+
static void quirk_disable_intel_boot_interrupt(struct pci_dev *dev)
{
u16 pci_config_word;
+ u32 pci_config_dword;
if (noioapicquirk)
return;
- pci_read_config_word(dev, INTEL_6300_IOAPIC_ABAR, &pci_config_word);
- pci_config_word |= INTEL_6300_DISABLE_BOOT_IRQ;
- pci_write_config_word(dev, INTEL_6300_IOAPIC_ABAR, pci_config_word);
-
+ switch (dev->device) {
+ case PCI_DEVICE_ID_INTEL_ESB_10:
+ pci_read_config_word(dev, INTEL_6300_IOAPIC_ABAR,
+ &pci_config_word);
+ pci_config_word |= INTEL_6300_DISABLE_BOOT_IRQ;
+ pci_write_config_word(dev, INTEL_6300_IOAPIC_ABAR,
+ pci_config_word);
+ break;
+ case 0x3c28: /* Xeon E5 1600/2600/4600 */
+ case 0x0e28: /* Xeon E5/E7 V2 */
+ case 0x2f28: /* Xeon E5/E7 V3,V4 */
+ case 0x6f28: /* Xeon D-1500 */
+ case 0x2034: /* Xeon Scalable Family */
+ pci_read_config_dword(dev, INTEL_CIPINTRC_CFG_OFFSET,
+ &pci_config_dword);
+ pci_config_dword |= INTEL_CIPINTRC_DIS_INTX_ICH;
+ pci_write_config_dword(dev, INTEL_CIPINTRC_CFG_OFFSET,
+ pci_config_dword);
+ break;
+ default:
+ return;
+ }
pci_info(dev, "disabled boot interrupts on device [%04x:%04x]\n",
dev->vendor, dev->device);
}
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB_10, quirk_disable_intel_boot_interrupt);
-DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB_10, quirk_disable_intel_boot_interrupt);
+/*
+ * Device 29 Func 5 Device IDs of IO-APIC
+ * containing ABAR—APIC1 Alternate Base Address Register
+ */
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB_10,
+ quirk_disable_intel_boot_interrupt);
+DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB_10,
+ quirk_disable_intel_boot_interrupt);
+
+/*
+ * Device 5 Func 0 Device IDs of Core IO modules/hubs
+ * containing Coherent Interface Protocol Interrupt Control
+ *
+ * Device IDs obtained from volume 2 datasheets of commented
+ * families above.
+ */
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x3c28,
+ quirk_disable_intel_boot_interrupt);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0e28,
+ quirk_disable_intel_boot_interrupt);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x2f28,
+ quirk_disable_intel_boot_interrupt);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x6f28,
+ quirk_disable_intel_boot_interrupt);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x2034,
+ quirk_disable_intel_boot_interrupt);
+DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, 0x3c28,
+ quirk_disable_intel_boot_interrupt);
+DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, 0x0e28,
+ quirk_disable_intel_boot_interrupt);
+DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, 0x2f28,
+ quirk_disable_intel_boot_interrupt);
+DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, 0x6f28,
+ quirk_disable_intel_boot_interrupt);
+DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, 0x2034,
+ quirk_disable_intel_boot_interrupt);
/* Disable boot interrupts on HT-1000 */
#define BC_HT1000_FEATURE_REG 0x64
@@ -2243,6 +2332,19 @@
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10f4, quirk_disable_aspm_l0s);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1508, quirk_disable_aspm_l0s);
+static void quirk_disable_aspm_l0s_l1(struct pci_dev *dev)
+{
+ pci_info(dev, "Disabling ASPM L0s/L1\n");
+ pci_disable_link_state(dev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1);
+}
+
+/*
+ * ASM1083/1085 PCIe-PCI bridge devices cause AER timeout errors on the
+ * upstream PCIe root port when ASPM is enabled. At least L0s mode is affected;
+ * disable both L0s and L1 for now to be safe.
+ */
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ASMEDIA, 0x1080, quirk_disable_aspm_l0s_l1);
+
/*
* Some Pericom PCIe-to-PCI bridges in reverse mode need the PCIe Retrain
* Link bit cleared after starting the link retrain process to allow this
@@ -2381,32 +2483,6 @@
PCI_DEVICE_ID_TIGON3_5719,
quirk_brcm_5719_limit_mrrs);
-#ifdef CONFIG_PCIE_IPROC_PLATFORM
-static void quirk_paxc_bridge(struct pci_dev *pdev)
-{
- /*
- * The PCI config space is shared with the PAXC root port and the first
- * Ethernet device. So, we need to workaround this by telling the PCI
- * code that the bridge is not an Ethernet device.
- */
- if (pdev->hdr_type == PCI_HEADER_TYPE_BRIDGE)
- pdev->class = PCI_CLASS_BRIDGE_PCI << 8;
-
- /*
- * MPSS is not being set properly (as it is currently 0). This is
- * because that area of the PCI config space is hard coded to zero, and
- * is not modifiable by firmware. Set this to 2 (e.g., 512 byte MPS)
- * so that the MPS can be set to the real max value.
- */
- pdev->pcie_mpss = 2;
-}
-DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_BROADCOM, 0x16cd, quirk_paxc_bridge);
-DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_BROADCOM, 0x16f0, quirk_paxc_bridge);
-DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_BROADCOM, 0xd750, quirk_paxc_bridge);
-DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_BROADCOM, 0xd802, quirk_paxc_bridge);
-DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_BROADCOM, 0xd804, quirk_paxc_bridge);
-#endif
-
/*
* Originally in EDAC sources for i82875P: Intel tells BIOS developers to
* hide device 6 which configures the overflow device access containing the
@@ -3170,12 +3246,13 @@
{
dev->pcie_mpss = 1; /* 256 bytes */
}
-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SOLARFLARE,
- PCI_DEVICE_ID_SOLARFLARE_SFC4000A_0, fixup_mpss_256);
-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SOLARFLARE,
- PCI_DEVICE_ID_SOLARFLARE_SFC4000A_1, fixup_mpss_256);
-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SOLARFLARE,
- PCI_DEVICE_ID_SOLARFLARE_SFC4000B, fixup_mpss_256);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SOLARFLARE,
+ PCI_DEVICE_ID_SOLARFLARE_SFC4000A_0, fixup_mpss_256);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SOLARFLARE,
+ PCI_DEVICE_ID_SOLARFLARE_SFC4000A_1, fixup_mpss_256);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SOLARFLARE,
+ PCI_DEVICE_ID_SOLARFLARE_SFC4000B, fixup_mpss_256);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_ASMEDIA, 0x0612, fixup_mpss_256);
/*
* Intel 5000 and 5100 Memory controllers have an erratum with read completion
@@ -3484,6 +3561,18 @@
}
/*
+ * Some NVIDIA GPU devices do not work with bus reset, SBR needs to be
+ * prevented for those affected devices.
+ */
+static void quirk_nvidia_no_bus_reset(struct pci_dev *dev)
+{
+ if ((dev->device & 0xffc0) == 0x2340)
+ quirk_no_bus_reset(dev);
+}
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID,
+ quirk_nvidia_no_bus_reset);
+
+/*
* Some Atheros AR9xxx and QCA988x chips do not behave after a bus reset.
* The device will throw a Link Down error on AER-capable systems and
* regardless of AER, config space of the device is never accessible again
@@ -3503,6 +3592,16 @@
*/
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_CAVIUM, 0xa100, quirk_no_bus_reset);
+/*
+ * Some TI KeyStone C667X devices do not support bus/hot reset. The PCIESS
+ * automatically disables LTSSM when Secondary Bus Reset is received and
+ * the device stops working. Prevent bus reset for these devices. With
+ * this change, the device can be assigned to VMs with VFIO, but it will
+ * leak state between VMs. Reference
+ * https://e2e.ti.com/support/processors/f/791/t/954382
+ */
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_TI, 0xb005, quirk_no_bus_reset);
+
static void quirk_no_pm_reset(struct pci_dev *dev)
{
/*
@@ -3571,6 +3670,16 @@
return;
if (pci_pcie_type(dev) != PCI_EXP_TYPE_UPSTREAM)
return;
+
+ /*
+ * SXIO/SXFP/SXLF turns off power to the Thunderbolt controller.
+ * We don't know how to turn it back on again, but firmware does,
+ * so we can only use SXIO/SXFP/SXLF if we're suspending via
+ * firmware.
+ */
+ if (!pm_suspend_via_firmware())
+ return;
+
bridge = ACPI_HANDLE(&dev->dev);
if (!bridge)
return;
@@ -3895,6 +4004,69 @@
return 0;
}
+#define PCI_DEVICE_ID_HINIC_VF 0x375E
+#define HINIC_VF_FLR_TYPE 0x1000
+#define HINIC_VF_FLR_CAP_BIT (1UL << 30)
+#define HINIC_VF_OP 0xE80
+#define HINIC_VF_FLR_PROC_BIT (1UL << 18)
+#define HINIC_OPERATION_TIMEOUT 15000 /* 15 seconds */
+
+/* Device-specific reset method for Huawei Intelligent NIC virtual functions */
+static int reset_hinic_vf_dev(struct pci_dev *pdev, int probe)
+{
+ unsigned long timeout;
+ void __iomem *bar;
+ u32 val;
+
+ if (probe)
+ return 0;
+
+ bar = pci_iomap(pdev, 0, 0);
+ if (!bar)
+ return -ENOTTY;
+
+ /* Get and check firmware capabilities */
+ val = ioread32be(bar + HINIC_VF_FLR_TYPE);
+ if (!(val & HINIC_VF_FLR_CAP_BIT)) {
+ pci_iounmap(pdev, bar);
+ return -ENOTTY;
+ }
+
+ /* Set HINIC_VF_FLR_PROC_BIT for the start of FLR */
+ val = ioread32be(bar + HINIC_VF_OP);
+ val = val | HINIC_VF_FLR_PROC_BIT;
+ iowrite32be(val, bar + HINIC_VF_OP);
+
+ pcie_flr(pdev);
+
+ /*
+ * The device must recapture its Bus and Device Numbers after FLR
+ * in order generate Completions. Issue a config write to let the
+ * device capture this information.
+ */
+ pci_write_config_word(pdev, PCI_VENDOR_ID, 0);
+
+ /* Firmware clears HINIC_VF_FLR_PROC_BIT when reset is complete */
+ timeout = jiffies + msecs_to_jiffies(HINIC_OPERATION_TIMEOUT);
+ do {
+ val = ioread32be(bar + HINIC_VF_OP);
+ if (!(val & HINIC_VF_FLR_PROC_BIT))
+ goto reset_complete;
+ msleep(20);
+ } while (time_before(jiffies, timeout));
+
+ val = ioread32be(bar + HINIC_VF_OP);
+ if (!(val & HINIC_VF_FLR_PROC_BIT))
+ goto reset_complete;
+
+ pci_warn(pdev, "Reset dev timeout, FLR ack reg: %#010x\n", val);
+
+reset_complete:
+ pci_iounmap(pdev, bar);
+
+ return 0;
+}
+
static const struct pci_dev_reset_methods pci_dev_reset_methods[] = {
{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82599_SFP_VF,
reset_intel_82599_sfp_virtfn },
@@ -3906,6 +4078,8 @@
{ PCI_VENDOR_ID_INTEL, 0x0953, delay_250ms_after_flr },
{ PCI_VENDOR_ID_CHELSIO, PCI_ANY_ID,
reset_chelsio_generic_dev },
+ { PCI_VENDOR_ID_HUAWEI, PCI_DEVICE_ID_HINIC_VF,
+ reset_hinic_vf_dev },
{ 0 }
};
@@ -3932,7 +4106,7 @@
static void quirk_dma_func0_alias(struct pci_dev *dev)
{
if (PCI_FUNC(dev->devfn) != 0)
- pci_add_dma_alias(dev, PCI_DEVFN(PCI_SLOT(dev->devfn), 0));
+ pci_add_dma_alias(dev, PCI_DEVFN(PCI_SLOT(dev->devfn), 0), 1);
}
/*
@@ -3946,7 +4120,7 @@
static void quirk_dma_func1_alias(struct pci_dev *dev)
{
if (PCI_FUNC(dev->devfn) != 1)
- pci_add_dma_alias(dev, PCI_DEVFN(PCI_SLOT(dev->devfn), 1));
+ pci_add_dma_alias(dev, PCI_DEVFN(PCI_SLOT(dev->devfn), 1), 1);
}
/*
@@ -3981,6 +4155,9 @@
/* https://bugzilla.kernel.org/show_bug.cgi?id=42679#c46 */
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x91a0,
quirk_dma_func1_alias);
+/* https://bugzilla.kernel.org/show_bug.cgi?id=42679#c135 */
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9215,
+ quirk_dma_func1_alias);
/* https://bugzilla.kernel.org/show_bug.cgi?id=42679#c127 */
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9220,
quirk_dma_func1_alias);
@@ -4031,7 +4208,7 @@
id = pci_match_id(fixed_dma_alias_tbl, dev);
if (id)
- pci_add_dma_alias(dev, id->driver_data);
+ pci_add_dma_alias(dev, id->driver_data, 1);
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ADAPTEC2, 0x0285, quirk_fixed_dma_alias);
@@ -4073,14 +4250,43 @@
*/
static void quirk_mic_x200_dma_alias(struct pci_dev *pdev)
{
- pci_add_dma_alias(pdev, PCI_DEVFN(0x10, 0x0));
- pci_add_dma_alias(pdev, PCI_DEVFN(0x11, 0x0));
- pci_add_dma_alias(pdev, PCI_DEVFN(0x12, 0x3));
+ pci_add_dma_alias(pdev, PCI_DEVFN(0x10, 0x0), 1);
+ pci_add_dma_alias(pdev, PCI_DEVFN(0x11, 0x0), 1);
+ pci_add_dma_alias(pdev, PCI_DEVFN(0x12, 0x3), 1);
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2260, quirk_mic_x200_dma_alias);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2264, quirk_mic_x200_dma_alias);
/*
+ * Intel Visual Compute Accelerator (VCA) is a family of PCIe add-in devices
+ * exposing computational units via Non Transparent Bridges (NTB, PEX 87xx).
+ *
+ * Similarly to MIC x200, we need to add DMA aliases to allow buffer access
+ * when IOMMU is enabled. These aliases allow computational unit access to
+ * host memory. These aliases mark the whole VCA device as one IOMMU
+ * group.
+ *
+ * All possible slot numbers (0x20) are used, since we are unable to tell
+ * what slot is used on other side. This quirk is intended for both host
+ * and computational unit sides. The VCA devices have up to five functions
+ * (four for DMA channels and one additional).
+ */
+static void quirk_pex_vca_alias(struct pci_dev *pdev)
+{
+ const unsigned int num_pci_slots = 0x20;
+ unsigned int slot;
+
+ for (slot = 0; slot < num_pci_slots; slot++)
+ pci_add_dma_alias(pdev, PCI_DEVFN(slot, 0x0), 5);
+}
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2954, quirk_pex_vca_alias);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2955, quirk_pex_vca_alias);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2956, quirk_pex_vca_alias);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2958, quirk_pex_vca_alias);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2959, quirk_pex_vca_alias);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x295A, quirk_pex_vca_alias);
+
+/*
* The IOMMU and interrupt controller on Broadcom Vulcan/Cavium ThunderX2 are
* associated not at the root bus, but at a bridge below. This quirk avoids
* generating invalid DMA aliases.
@@ -4263,6 +4469,47 @@
quirk_chelsio_T5_disable_root_port_attributes);
/*
+ * pci_acs_ctrl_enabled - compare desired ACS controls with those provided
+ * by a device
+ * @acs_ctrl_req: Bitmask of desired ACS controls
+ * @acs_ctrl_ena: Bitmask of ACS controls enabled or provided implicitly by
+ * the hardware design
+ *
+ * Return 1 if all ACS controls in the @acs_ctrl_req bitmask are included
+ * in @acs_ctrl_ena, i.e., the device provides all the access controls the
+ * caller desires. Return 0 otherwise.
+ */
+static int pci_acs_ctrl_enabled(u16 acs_ctrl_req, u16 acs_ctrl_ena)
+{
+ if ((acs_ctrl_req & acs_ctrl_ena) == acs_ctrl_req)
+ return 1;
+ return 0;
+}
+
+/*
+ * Many Zhaoxin Root Ports and Switch Downstream Ports have no ACS capability.
+ * But the implementation could block peer-to-peer transactions between them
+ * and provide ACS-like functionality.
+ */
+static int pci_quirk_zhaoxin_pcie_ports_acs(struct pci_dev *dev, u16 acs_flags)
+{
+ if (!pci_is_pcie(dev) ||
+ ((pci_pcie_type(dev) != PCI_EXP_TYPE_ROOT_PORT) &&
+ (pci_pcie_type(dev) != PCI_EXP_TYPE_DOWNSTREAM)))
+ return -ENOTTY;
+
+ switch (dev->device) {
+ case 0x0710 ... 0x071e:
+ case 0x0721:
+ case 0x0723 ... 0x0732:
+ return pci_acs_ctrl_enabled(acs_flags,
+ PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF);
+ }
+
+ return false;
+}
+
+/*
* AMD has indicated that the devices below do not support peer-to-peer
* in any system where they are found in the southbridge with an AMD
* IOMMU in the system. Multifunction devices that do not support
@@ -4302,10 +4549,12 @@
if (ACPI_FAILURE(status))
return -ENODEV;
+ acpi_put_table(header);
+
/* Filter out flags not applicable to multifunction */
acs_flags &= (PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_EC | PCI_ACS_DT);
- return acs_flags & ~(PCI_ACS_RR | PCI_ACS_CR) ? 0 : 1;
+ return pci_acs_ctrl_enabled(acs_flags, PCI_ACS_RR | PCI_ACS_CR);
#else
return -ENODEV;
#endif
@@ -4313,33 +4562,38 @@
static bool pci_quirk_cavium_acs_match(struct pci_dev *dev)
{
+ if (!pci_is_pcie(dev) || pci_pcie_type(dev) != PCI_EXP_TYPE_ROOT_PORT)
+ return false;
+
+ switch (dev->device) {
/*
- * Effectively selects all downstream ports for whole ThunderX 1
- * family by 0xf800 mask (which represents 8 SoCs), while the lower
- * bits of device ID are used to indicate which subdevice is used
- * within the SoC.
+ * Effectively selects all downstream ports for whole ThunderX1
+ * (which represents 8 SoCs).
*/
- return (pci_is_pcie(dev) &&
- (pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT) &&
- ((dev->device & 0xf800) == 0xa000));
+ case 0xa000 ... 0xa7ff: /* ThunderX1 */
+ case 0xaf84: /* ThunderX2 */
+ case 0xb884: /* ThunderX3 */
+ return true;
+ default:
+ return false;
+ }
}
static int pci_quirk_cavium_acs(struct pci_dev *dev, u16 acs_flags)
{
+ if (!pci_quirk_cavium_acs_match(dev))
+ return -ENOTTY;
+
/*
- * Cavium root ports don't advertise an ACS capability. However,
+ * Cavium Root Ports don't advertise an ACS capability. However,
* the RTL internally implements similar protection as if ACS had
- * Request Redirection, Completion Redirection, Source Validation,
+ * Source Validation, Request Redirection, Completion Redirection,
* and Upstream Forwarding features enabled. Assert that the
* hardware implements and enables equivalent ACS functionality for
* these flags.
*/
- acs_flags &= ~(PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_SV | PCI_ACS_UF);
-
- if (!pci_quirk_cavium_acs_match(dev))
- return -ENOTTY;
-
- return acs_flags ? 0 : 1;
+ return pci_acs_ctrl_enabled(acs_flags,
+ PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF);
}
static int pci_quirk_xgene_acs(struct pci_dev *dev, u16 acs_flags)
@@ -4349,13 +4603,12 @@
* transactions with others, allowing masking out these bits as if they
* were unimplemented in the ACS capability.
*/
- acs_flags &= ~(PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF);
-
- return acs_flags ? 0 : 1;
+ return pci_acs_ctrl_enabled(acs_flags,
+ PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF);
}
/*
- * Many Intel PCH root ports do provide ACS-like features to disable peer
+ * Many Intel PCH Root Ports do provide ACS-like features to disable peer
* transactions and validate bus numbers in requests, but do not provide an
* actual PCIe ACS capability. This is the list of device IDs known to fall
* into that category as provided by Intel in Red Hat bugzilla 1037684.
@@ -4403,37 +4656,44 @@
return false;
}
-#define INTEL_PCH_ACS_FLAGS (PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF | PCI_ACS_SV)
-
static int pci_quirk_intel_pch_acs(struct pci_dev *dev, u16 acs_flags)
{
- u16 flags = dev->dev_flags & PCI_DEV_FLAGS_ACS_ENABLED_QUIRK ?
- INTEL_PCH_ACS_FLAGS : 0;
-
if (!pci_quirk_intel_pch_acs_match(dev))
return -ENOTTY;
- return acs_flags & ~flags ? 0 : 1;
+ if (dev->dev_flags & PCI_DEV_FLAGS_ACS_ENABLED_QUIRK)
+ return pci_acs_ctrl_enabled(acs_flags,
+ PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF);
+
+ return pci_acs_ctrl_enabled(acs_flags, 0);
}
/*
- * These QCOM root ports do provide ACS-like features to disable peer
+ * These QCOM Root Ports do provide ACS-like features to disable peer
* transactions and validate bus numbers in requests, but do not provide an
* actual PCIe ACS capability. Hardware supports source validation but it
* will report the issue as Completer Abort instead of ACS Violation.
- * Hardware doesn't support peer-to-peer and each root port is a root
- * complex with unique segment numbers. It is not possible for one root
- * port to pass traffic to another root port. All PCIe transactions are
- * terminated inside the root port.
+ * Hardware doesn't support peer-to-peer and each Root Port is a Root
+ * Complex with unique segment numbers. It is not possible for one Root
+ * Port to pass traffic to another Root Port. All PCIe transactions are
+ * terminated inside the Root Port.
*/
static int pci_quirk_qcom_rp_acs(struct pci_dev *dev, u16 acs_flags)
{
- u16 flags = (PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF | PCI_ACS_SV);
- int ret = acs_flags & ~flags ? 0 : 1;
+ return pci_acs_ctrl_enabled(acs_flags,
+ PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF);
+}
- pci_info(dev, "Using QCOM ACS Quirk (%d)\n", ret);
-
- return ret;
+/*
+ * Each of these NXP Root Ports is in a Root Complex with a unique segment
+ * number and does provide isolation features to disable peer transactions
+ * and validate bus numbers in requests, but does not provide an ACS
+ * capability.
+ */
+static int pci_quirk_nxp_rp_acs(struct pci_dev *dev, u16 acs_flags)
+{
+ return pci_acs_ctrl_enabled(acs_flags,
+ PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF);
}
static int pci_quirk_al_acs(struct pci_dev *dev, u16 acs_flags)
@@ -4534,7 +4794,7 @@
pci_read_config_dword(dev, pos + INTEL_SPT_ACS_CTRL, &ctrl);
- return acs_flags & ~ctrl ? 0 : 1;
+ return pci_acs_ctrl_enabled(acs_flags, ctrl);
}
static int pci_quirk_mf_endpoint_acs(struct pci_dev *dev, u16 acs_flags)
@@ -4548,10 +4808,23 @@
* perform peer-to-peer with other functions, allowing us to mask out
* these bits as if they were unimplemented in the ACS capability.
*/
- acs_flags &= ~(PCI_ACS_SV | PCI_ACS_TB | PCI_ACS_RR |
- PCI_ACS_CR | PCI_ACS_UF | PCI_ACS_DT);
+ return pci_acs_ctrl_enabled(acs_flags,
+ PCI_ACS_SV | PCI_ACS_TB | PCI_ACS_RR |
+ PCI_ACS_CR | PCI_ACS_UF | PCI_ACS_DT);
+}
- return acs_flags ? 0 : 1;
+static int pci_quirk_rciep_acs(struct pci_dev *dev, u16 acs_flags)
+{
+ /*
+ * Intel RCiEP's are required to allow p2p only on translated
+ * addresses. Refer to Intel VT-d specification, r3.1, sec 3.16,
+ * "Root-Complex Peer to Peer Considerations".
+ */
+ if (pci_pcie_type(dev) != PCI_EXP_TYPE_RC_END)
+ return -ENOTTY;
+
+ return pci_acs_ctrl_enabled(acs_flags,
+ PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF);
}
static int pci_quirk_brcm_acs(struct pci_dev *dev, u16 acs_flags)
@@ -4562,9 +4835,8 @@
* Allow each Root Port to be in a separate IOMMU group by masking
* SV/RR/CR/UF bits.
*/
- acs_flags &= ~(PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF);
-
- return acs_flags ? 0 : 1;
+ return pci_acs_ctrl_enabled(acs_flags,
+ PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF);
}
static const struct pci_dev_acs_enabled {
@@ -4637,6 +4909,7 @@
/* I219 */
{ PCI_VENDOR_ID_INTEL, 0x15b7, pci_quirk_mf_endpoint_acs },
{ PCI_VENDOR_ID_INTEL, 0x15b8, pci_quirk_mf_endpoint_acs },
+ { PCI_VENDOR_ID_INTEL, PCI_ANY_ID, pci_quirk_rciep_acs },
/* QCOM QDF2xxx root ports */
{ PCI_VENDOR_ID_QCOM, 0x0400, pci_quirk_qcom_rp_acs },
{ PCI_VENDOR_ID_QCOM, 0x0401, pci_quirk_qcom_rp_acs },
@@ -4649,6 +4922,10 @@
{ 0x10df, 0x720, pci_quirk_mf_endpoint_acs }, /* Emulex Skyhawk-R */
/* Cavium ThunderX */
{ PCI_VENDOR_ID_CAVIUM, PCI_ANY_ID, pci_quirk_cavium_acs },
+ /* Cavium multi-function devices */
+ { PCI_VENDOR_ID_CAVIUM, 0xA026, pci_quirk_mf_endpoint_acs },
+ { PCI_VENDOR_ID_CAVIUM, 0xA059, pci_quirk_mf_endpoint_acs },
+ { PCI_VENDOR_ID_CAVIUM, 0xA060, pci_quirk_mf_endpoint_acs },
/* APM X-Gene */
{ PCI_VENDOR_ID_AMCC, 0xE004, pci_quirk_xgene_acs },
/* Ampere Computing */
@@ -4660,12 +4937,64 @@
{ PCI_VENDOR_ID_AMPERE, 0xE00A, pci_quirk_xgene_acs },
{ PCI_VENDOR_ID_AMPERE, 0xE00B, pci_quirk_xgene_acs },
{ PCI_VENDOR_ID_AMPERE, 0xE00C, pci_quirk_xgene_acs },
+ /* Broadcom multi-function device */
+ { PCI_VENDOR_ID_BROADCOM, 0x16D7, pci_quirk_mf_endpoint_acs },
{ PCI_VENDOR_ID_BROADCOM, 0xD714, pci_quirk_brcm_acs },
/* Amazon Annapurna Labs */
{ PCI_VENDOR_ID_AMAZON_ANNAPURNA_LABS, 0x0031, pci_quirk_al_acs },
+ /* Zhaoxin multi-function devices */
+ { PCI_VENDOR_ID_ZHAOXIN, 0x3038, pci_quirk_mf_endpoint_acs },
+ { PCI_VENDOR_ID_ZHAOXIN, 0x3104, pci_quirk_mf_endpoint_acs },
+ { PCI_VENDOR_ID_ZHAOXIN, 0x9083, pci_quirk_mf_endpoint_acs },
+ /* NXP root ports, xx=16, 12, or 08 cores */
+ /* LX2xx0A : without security features + CAN-FD */
+ { PCI_VENDOR_ID_NXP, 0x8d81, pci_quirk_nxp_rp_acs },
+ { PCI_VENDOR_ID_NXP, 0x8da1, pci_quirk_nxp_rp_acs },
+ { PCI_VENDOR_ID_NXP, 0x8d83, pci_quirk_nxp_rp_acs },
+ /* LX2xx0C : security features + CAN-FD */
+ { PCI_VENDOR_ID_NXP, 0x8d80, pci_quirk_nxp_rp_acs },
+ { PCI_VENDOR_ID_NXP, 0x8da0, pci_quirk_nxp_rp_acs },
+ { PCI_VENDOR_ID_NXP, 0x8d82, pci_quirk_nxp_rp_acs },
+ /* LX2xx0E : security features + CAN */
+ { PCI_VENDOR_ID_NXP, 0x8d90, pci_quirk_nxp_rp_acs },
+ { PCI_VENDOR_ID_NXP, 0x8db0, pci_quirk_nxp_rp_acs },
+ { PCI_VENDOR_ID_NXP, 0x8d92, pci_quirk_nxp_rp_acs },
+ /* LX2xx0N : without security features + CAN */
+ { PCI_VENDOR_ID_NXP, 0x8d91, pci_quirk_nxp_rp_acs },
+ { PCI_VENDOR_ID_NXP, 0x8db1, pci_quirk_nxp_rp_acs },
+ { PCI_VENDOR_ID_NXP, 0x8d93, pci_quirk_nxp_rp_acs },
+ /* LX2xx2A : without security features + CAN-FD */
+ { PCI_VENDOR_ID_NXP, 0x8d89, pci_quirk_nxp_rp_acs },
+ { PCI_VENDOR_ID_NXP, 0x8da9, pci_quirk_nxp_rp_acs },
+ { PCI_VENDOR_ID_NXP, 0x8d8b, pci_quirk_nxp_rp_acs },
+ /* LX2xx2C : security features + CAN-FD */
+ { PCI_VENDOR_ID_NXP, 0x8d88, pci_quirk_nxp_rp_acs },
+ { PCI_VENDOR_ID_NXP, 0x8da8, pci_quirk_nxp_rp_acs },
+ { PCI_VENDOR_ID_NXP, 0x8d8a, pci_quirk_nxp_rp_acs },
+ /* LX2xx2E : security features + CAN */
+ { PCI_VENDOR_ID_NXP, 0x8d98, pci_quirk_nxp_rp_acs },
+ { PCI_VENDOR_ID_NXP, 0x8db8, pci_quirk_nxp_rp_acs },
+ { PCI_VENDOR_ID_NXP, 0x8d9a, pci_quirk_nxp_rp_acs },
+ /* LX2xx2N : without security features + CAN */
+ { PCI_VENDOR_ID_NXP, 0x8d99, pci_quirk_nxp_rp_acs },
+ { PCI_VENDOR_ID_NXP, 0x8db9, pci_quirk_nxp_rp_acs },
+ { PCI_VENDOR_ID_NXP, 0x8d9b, pci_quirk_nxp_rp_acs },
+ /* Zhaoxin Root/Downstream Ports */
+ { PCI_VENDOR_ID_ZHAOXIN, PCI_ANY_ID, pci_quirk_zhaoxin_pcie_ports_acs },
{ 0 }
};
+/*
+ * pci_dev_specific_acs_enabled - check whether device provides ACS controls
+ * @dev: PCI device
+ * @acs_flags: Bitmask of desired ACS controls
+ *
+ * Returns:
+ * -ENOTTY: No quirk applies to this device; we can't tell whether the
+ * device provides the desired controls
+ * 0: Device does not provide all the desired controls
+ * >0: Device provides all the controls in @acs_flags
+ */
int pci_dev_specific_acs_enabled(struct pci_dev *dev, u16 acs_flags)
{
const struct pci_dev_acs_enabled *i;
@@ -4706,7 +5035,7 @@
#define INTEL_BSPR_REG_BPPD (1 << 9)
/* Upstream Peer Decode Configuration Register */
-#define INTEL_UPDCR_REG 0x1114
+#define INTEL_UPDCR_REG 0x1014
/* 5:0 Peer Decode Enable bits */
#define INTEL_UPDCR_REG_MASK 0x3f
@@ -4985,13 +5314,25 @@
}
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x443, quirk_intel_qat_vf_cap);
-/* FLR may cause some 82579 devices to hang */
-static void quirk_intel_no_flr(struct pci_dev *dev)
+/*
+ * FLR may cause the following to devices to hang:
+ *
+ * AMD Starship/Matisse HD Audio Controller 0x1487
+ * AMD Starship USB 3.0 Host Controller 0x148c
+ * AMD Matisse USB 3.0 Host Controller 0x149c
+ * Intel 82579LM Gigabit Ethernet Controller 0x1502
+ * Intel 82579V Gigabit Ethernet Controller 0x1503
+ *
+ */
+static void quirk_no_flr(struct pci_dev *dev)
{
dev->dev_flags |= PCI_DEV_FLAGS_NO_FLR_RESET;
}
-DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x1502, quirk_intel_no_flr);
-DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x1503, quirk_intel_no_flr);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_AMD, 0x1487, quirk_no_flr);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_AMD, 0x148c, quirk_no_flr);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_AMD, 0x149c, quirk_no_flr);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x1502, quirk_no_flr);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x1503, quirk_no_flr);
static void quirk_no_ext_tags(struct pci_dev *pdev)
{
@@ -5015,18 +5356,30 @@
#ifdef CONFIG_PCI_ATS
/*
- * Some devices have a broken ATS implementation causing IOMMU stalls.
- * Don't use ATS for those devices.
+ * Some devices require additional driver setup to enable ATS. Don't use
+ * ATS for those devices as ATS will be enabled before the driver has had a
+ * chance to load and configure the device.
*/
-static void quirk_no_ats(struct pci_dev *pdev)
+static void quirk_amd_harvest_no_ats(struct pci_dev *pdev)
{
- pci_info(pdev, "disabling ATS (broken on this device)\n");
+ if ((pdev->device == 0x7312 && pdev->revision != 0x00) ||
+ (pdev->device == 0x7340 && pdev->revision != 0xc5) ||
+ (pdev->device == 0x7341 && pdev->revision != 0x00))
+ return;
+
+ pci_info(pdev, "disabling ATS\n");
pdev->ats_cap = 0;
}
/* AMD Stoney platform GPU */
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x98e4, quirk_no_ats);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x6900, quirk_no_ats);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x98e4, quirk_amd_harvest_no_ats);
+/* AMD Iceland dGPU */
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x6900, quirk_amd_harvest_no_ats);
+/* AMD Navi10 dGPU */
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x7312, quirk_amd_harvest_no_ats);
+/* AMD Navi14 dGPU */
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x7340, quirk_amd_harvest_no_ats);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x7341, quirk_amd_harvest_no_ats);
#endif /* CONFIG_PCI_ATS */
/* Freescale PCIe doesn't support MSI in RC mode */
@@ -5090,7 +5443,7 @@
PCI_CLASS_MULTIMEDIA_HD_AUDIO, 8, quirk_gpu_hda);
/*
- * Create device link for NVIDIA GPU with integrated USB xHCI Host
+ * Create device link for GPUs with integrated USB xHCI Host
* controller to VGA.
*/
static void quirk_gpu_usb(struct pci_dev *usb)
@@ -5099,9 +5452,11 @@
}
DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID,
PCI_CLASS_SERIAL_USB, 8, quirk_gpu_usb);
+DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_ATI, PCI_ANY_ID,
+ PCI_CLASS_SERIAL_USB, 8, quirk_gpu_usb);
/*
- * Create device link for NVIDIA GPU with integrated Type-C UCSI controller
+ * Create device link for GPUs with integrated Type-C UCSI controller
* to VGA. Currently there is no class code defined for UCSI device over PCI
* so using UNKNOWN class for now and it will be updated when UCSI
* over PCI gets a class code.
@@ -5114,6 +5469,9 @@
DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID,
PCI_CLASS_SERIAL_UNKNOWN, 8,
quirk_gpu_usb_typec_ucsi);
+DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_ATI, PCI_ANY_ID,
+ PCI_CLASS_SERIAL_UNKNOWN, 8,
+ quirk_gpu_usb_typec_ucsi);
/*
* Enable the NVIDIA GPU integrated HDA controller if the BIOS left it
@@ -5273,7 +5631,7 @@
pci_dbg(pdev,
"Aliasing Partition %d Proxy ID %02x.%d\n",
pp, PCI_SLOT(devfn), PCI_FUNC(devfn));
- pci_add_dma_alias(pdev, devfn);
+ pci_add_dma_alias(pdev, devfn, 1);
}
}
@@ -5316,6 +5674,21 @@
SWITCHTEC_QUIRK(0x8576); /* PFXI 96XG3 */
/*
+ * The PLX NTB uses devfn proxy IDs to move TLPs between NT endpoints.
+ * These IDs are used to forward responses to the originator on the other
+ * side of the NTB. Alias all possible IDs to the NTB to permit access when
+ * the IOMMU is turned on.
+ */
+static void quirk_plx_ntb_dma_alias(struct pci_dev *pdev)
+{
+ pci_info(pdev, "Setting PLX NTB proxy ID aliases\n");
+ /* PLX NTB may use all 256 devfns */
+ pci_add_dma_alias(pdev, 0, 256);
+}
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_PLX, 0x87b0, quirk_plx_ntb_dma_alias);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_PLX, 0x87b1, quirk_plx_ntb_dma_alias);
+
+/*
* On Lenovo Thinkpad P50 SKUs with a Nvidia Quadro M1000M, the BIOS does
* not always reset the secondary Nvidia GPU between reboots if the system
* is configured to use Hybrid Graphics mode. This results in the GPU
@@ -5372,3 +5745,34 @@
DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_NVIDIA, 0x13b1,
PCI_CLASS_DISPLAY_VGA, 8,
quirk_reset_lenovo_thinkpad_p50_nvgpu);
+
+/*
+ * Device [1b21:2142]
+ * When in D0, PME# doesn't get asserted when plugging USB 3.0 device.
+ */
+static void pci_fixup_no_d0_pme(struct pci_dev *dev)
+{
+ pci_info(dev, "PME# does not work under D0, disabling it\n");
+ dev->pme_support &= ~(PCI_PM_CAP_PME_D0 >> PCI_PM_CAP_PME_SHIFT);
+}
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ASMEDIA, 0x2142, pci_fixup_no_d0_pme);
+
+/*
+ * Device [12d8:0x400e] and [12d8:0x400f]
+ * These devices advertise PME# support in all power states but don't
+ * reliably assert it.
+ */
+static void pci_fixup_no_pme(struct pci_dev *dev)
+{
+ pci_info(dev, "PME# is unreliable, disabling it\n");
+ dev->pme_support = 0;
+}
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_PERICOM, 0x400e, pci_fixup_no_pme);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_PERICOM, 0x400f, pci_fixup_no_pme);
+
+static void apex_pci_fixup_class(struct pci_dev *pdev)
+{
+ pdev->class = (PCI_CLASS_SYSTEM_OTHER << 8) | pdev->class;
+}
+DECLARE_PCI_FIXUP_CLASS_HEADER(0x1ac1, 0x089a,
+ PCI_CLASS_NOT_DEFINED, 8, apex_pci_fixup_class);
diff --git a/drivers/pci/rom.c b/drivers/pci/rom.c
index 137bf0c..8fc9a4e 100644
--- a/drivers/pci/rom.c
+++ b/drivers/pci/rom.c
@@ -195,20 +195,3 @@
pci_disable_rom(pdev);
}
EXPORT_SYMBOL(pci_unmap_rom);
-
-/**
- * pci_platform_rom - provides a pointer to any ROM image provided by the
- * platform
- * @pdev: pointer to pci device struct
- * @size: pointer to receive size of pci window over ROM
- */
-void __iomem *pci_platform_rom(struct pci_dev *pdev, size_t *size)
-{
- if (pdev->rom && pdev->romlen) {
- *size = pdev->romlen;
- return phys_to_virt((phys_addr_t)pdev->rom);
- }
-
- return NULL;
-}
-EXPORT_SYMBOL(pci_platform_rom);
diff --git a/drivers/pci/search.c b/drivers/pci/search.c
index bade140..e4dbdef 100644
--- a/drivers/pci/search.c
+++ b/drivers/pci/search.c
@@ -41,9 +41,9 @@
* DMA, iterate over that too.
*/
if (unlikely(pdev->dma_alias_mask)) {
- u8 devfn;
+ unsigned int devfn;
- for_each_set_bit(devfn, pdev->dma_alias_mask, U8_MAX) {
+ for_each_set_bit(devfn, pdev->dma_alias_mask, MAX_NR_DEVFNS) {
ret = fn(pdev, PCI_DEVID(pdev->bus->number, devfn),
data);
if (ret)
diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c
index e7dbe21..44f4866 100644
--- a/drivers/pci/setup-bus.c
+++ b/drivers/pci/setup-bus.c
@@ -752,24 +752,32 @@
}
/*
- * Helper function for sizing routines: find first available bus resource
- * of a given type. Note: we intentionally skip the bus resources which
- * have already been assigned (that is, have non-NULL parent resource).
+ * Helper function for sizing routines. Assigned resources have non-NULL
+ * parent resource.
+ *
+ * Return first unassigned resource of the correct type. If there is none,
+ * return first assigned resource of the correct type. If none of the
+ * above, return NULL.
+ *
+ * Returning an assigned resource of the correct type allows the caller to
+ * distinguish between already assigned and no resource of the correct type.
*/
-static struct resource *find_free_bus_resource(struct pci_bus *bus,
- unsigned long type_mask,
- unsigned long type)
+static struct resource *find_bus_resource_of_type(struct pci_bus *bus,
+ unsigned long type_mask,
+ unsigned long type)
{
+ struct resource *r, *r_assigned = NULL;
int i;
- struct resource *r;
pci_bus_for_each_resource(bus, r, i) {
if (r == &ioport_resource || r == &iomem_resource)
continue;
if (r && (r->flags & type_mask) == type && !r->parent)
return r;
+ if (r && (r->flags & type_mask) == type && !r_assigned)
+ r_assigned = r;
}
- return NULL;
+ return r_assigned;
}
static resource_size_t calculate_iosize(resource_size_t size,
@@ -866,8 +874,8 @@
struct list_head *realloc_head)
{
struct pci_dev *dev;
- struct resource *b_res = find_free_bus_resource(bus, IORESOURCE_IO,
- IORESOURCE_IO);
+ struct resource *b_res = find_bus_resource_of_type(bus, IORESOURCE_IO,
+ IORESOURCE_IO);
resource_size_t size = 0, size0 = 0, size1 = 0;
resource_size_t children_add_size = 0;
resource_size_t min_align, align;
@@ -875,6 +883,10 @@
if (!b_res)
return;
+ /* If resource is already assigned, nothing more to do */
+ if (b_res->parent)
+ return;
+
min_align = window_alignment(bus, IORESOURCE_IO);
list_for_each_entry(dev, &bus->devices, bus_list) {
int i;
@@ -978,7 +990,7 @@
resource_size_t min_align, align, size, size0, size1;
resource_size_t aligns[18]; /* Alignments from 1MB to 128GB */
int order, max_order;
- struct resource *b_res = find_free_bus_resource(bus,
+ struct resource *b_res = find_bus_resource_of_type(bus,
mask | IORESOURCE_PREFETCH, type);
resource_size_t children_add_size = 0;
resource_size_t children_add_align = 0;
@@ -987,6 +999,10 @@
if (!b_res)
return -ENOSPC;
+ /* If resource is already assigned, nothing more to do */
+ if (b_res->parent)
+ return 0;
+
memset(aligns, 0, sizeof(aligns));
max_order = 0;
size = 0;
@@ -1785,12 +1801,18 @@
/* Restore size and flags */
list_for_each_entry(fail_res, &fail_head, list) {
struct resource *res = fail_res->res;
+ int idx;
res->start = fail_res->start;
res->end = fail_res->end;
res->flags = fail_res->flags;
- if (fail_res->dev->subordinate)
- res->flags = 0;
+
+ if (pci_is_bridge(fail_res->dev)) {
+ idx = res - &fail_res->dev->resource[0];
+ if (idx >= PCI_BRIDGE_RESOURCES &&
+ idx <= PCI_BRIDGE_RESOURCE_END)
+ res->flags = 0;
+ }
}
free_list(&fail_head);
@@ -2037,12 +2059,18 @@
/* Restore size and flags */
list_for_each_entry(fail_res, &fail_head, list) {
struct resource *res = fail_res->res;
+ int idx;
res->start = fail_res->start;
res->end = fail_res->end;
res->flags = fail_res->flags;
- if (fail_res->dev->subordinate)
- res->flags = 0;
+
+ if (pci_is_bridge(fail_res->dev)) {
+ idx = res - &fail_res->dev->resource[0];
+ if (idx >= PCI_BRIDGE_RESOURCES &&
+ idx <= PCI_BRIDGE_RESOURCE_END)
+ res->flags = 0;
+ }
}
free_list(&fail_head);
diff --git a/drivers/pci/setup-res.c b/drivers/pci/setup-res.c
index d8ca40a..da5023e 100644
--- a/drivers/pci/setup-res.c
+++ b/drivers/pci/setup-res.c
@@ -409,10 +409,16 @@
int pci_resize_resource(struct pci_dev *dev, int resno, int size)
{
struct resource *res = dev->resource + resno;
+ struct pci_host_bridge *host;
int old, ret;
u32 sizes;
u16 cmd;
+ /* Check if we must preserve the firmware's resource assignment */
+ host = pci_find_host_bridge(dev->bus);
+ if (host->preserve_config)
+ return -ENOTSUPP;
+
/* Make sure the resource isn't assigned before resizing it. */
if (!(res->flags & IORESOURCE_UNSET))
return -EBUSY;
@@ -439,10 +445,11 @@
res->end = res->start + pci_rebar_size_to_bytes(size) - 1;
/* Check if the new config works by trying to assign everything. */
- ret = pci_reassign_bridge_resources(dev->bus->self, res->flags);
- if (ret)
- goto error_resize;
-
+ if (dev->bus->self) {
+ ret = pci_reassign_bridge_resources(dev->bus->self, res->flags);
+ if (ret)
+ goto error_resize;
+ }
return 0;
error_resize:
diff --git a/drivers/pci/slot.c b/drivers/pci/slot.c
index ae4aa0e..1e3ed6e 100644
--- a/drivers/pci/slot.c
+++ b/drivers/pci/slot.c
@@ -304,17 +304,20 @@
slot_name = make_slot_name(name);
if (!slot_name) {
err = -ENOMEM;
+ kfree(slot);
goto err;
}
- err = kobject_init_and_add(&slot->kobj, &pci_slot_ktype, NULL,
- "%s", slot_name);
- if (err)
- goto err;
-
INIT_LIST_HEAD(&slot->list);
list_add(&slot->list, &parent->slots);
+ err = kobject_init_and_add(&slot->kobj, &pci_slot_ktype, NULL,
+ "%s", slot_name);
+ if (err) {
+ kobject_put(&slot->kobj);
+ goto err;
+ }
+
down_read(&pci_bus_sem);
list_for_each_entry(dev, &parent->devices, bus_list)
if (PCI_SLOT(dev->devfn) == slot_nr)
@@ -329,7 +332,6 @@
mutex_unlock(&pci_slot_mutex);
return slot;
err:
- kfree(slot);
slot = ERR_PTR(err);
goto out;
}
diff --git a/drivers/pci/switch/switchtec.c b/drivers/pci/switch/switchtec.c
index 8c94cd3..2c9c306 100644
--- a/drivers/pci/switch/switchtec.c
+++ b/drivers/pci/switch/switchtec.c
@@ -175,7 +175,7 @@
kref_get(&stuser->kref);
stuser->read_len = sizeof(stuser->data);
stuser_set_state(stuser, MRPC_QUEUED);
- init_completion(&stuser->comp);
+ reinit_completion(&stuser->comp);
list_add_tail(&stuser->list, &stdev->mrpc_queue);
mrpc_cmd_submit(stdev);
@@ -675,7 +675,7 @@
return -ENOMEM;
s->global = ioread32(&stdev->mmio_sw_event->global_summary);
- s->part_bitmap = ioread32(&stdev->mmio_sw_event->part_event_bitmap);
+ s->part_bitmap = ioread64(&stdev->mmio_sw_event->part_event_bitmap);
s->local_part = ioread32(&stdev->mmio_part_cfg->part_event_summary);
for (i = 0; i < stdev->partition_count; i++) {
@@ -1276,7 +1276,7 @@
if (nvecs < 0)
return nvecs;
- event_irq = ioread32(&stdev->mmio_part_cfg->vep_vector_number);
+ event_irq = ioread16(&stdev->mmio_part_cfg->vep_vector_number);
if (event_irq < 0 || event_irq >= nvecs)
return -EFAULT;
@@ -1349,7 +1349,7 @@
if (rc)
return rc;
- rc = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
+ rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
if (rc)
return rc;
diff --git a/drivers/pci/syscall.c b/drivers/pci/syscall.c
index 31e3955..c9f0341 100644
--- a/drivers/pci/syscall.c
+++ b/drivers/pci/syscall.c
@@ -20,10 +20,12 @@
u16 word;
u32 dword;
long err;
- long cfg_ret;
+ int cfg_ret;
+ err = -EPERM;
+ dev = NULL;
if (!capable(CAP_SYS_ADMIN))
- return -EPERM;
+ goto error;
err = -ENODEV;
dev = pci_get_domain_bus_and_slot(0, bus, dfn);
@@ -46,7 +48,7 @@
}
err = -EIO;
- if (cfg_ret != PCIBIOS_SUCCESSFUL)
+ if (cfg_ret)
goto error;
switch (len) {
@@ -105,7 +107,7 @@
if (err)
break;
err = pci_user_write_config_byte(dev, off, byte);
- if (err != PCIBIOS_SUCCESSFUL)
+ if (err)
err = -EIO;
break;
@@ -114,7 +116,7 @@
if (err)
break;
err = pci_user_write_config_word(dev, off, word);
- if (err != PCIBIOS_SUCCESSFUL)
+ if (err)
err = -EIO;
break;
@@ -123,7 +125,7 @@
if (err)
break;
err = pci_user_write_config_dword(dev, off, dword);
- if (err != PCIBIOS_SUCCESSFUL)
+ if (err)
err = -EIO;
break;
diff --git a/drivers/pci/vpd.c b/drivers/pci/vpd.c
index 7915d10..bd54907 100644
--- a/drivers/pci/vpd.c
+++ b/drivers/pci/vpd.c
@@ -570,7 +570,6 @@
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LSI_LOGIC, 0x005f, quirk_blacklist_vpd);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, PCI_ANY_ID,
quirk_blacklist_vpd);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_QLOGIC, 0x2261, quirk_blacklist_vpd);
/*
* The Amazon Annapurna Labs 0x0031 device id is reused for other non Root Port
* device types, so the quirk is registered for the PCI_CLASS_BRIDGE_PCI class.