Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame^] | 1 | // SPDX-License-Identifier: GPL-2.0 |
| 2 | /* |
| 3 | * PCIe host controller driver for Marvell Armada-8K SoCs |
| 4 | * |
| 5 | * Armada-8K PCIe Glue Layer Source Code |
| 6 | * |
| 7 | * Copyright (C) 2016 Marvell Technology Group Ltd. |
| 8 | * |
| 9 | * Author: Yehuda Yitshak <yehuday@marvell.com> |
| 10 | * Author: Shadi Ammouri <shadi@marvell.com> |
| 11 | */ |
| 12 | |
| 13 | #include <linux/clk.h> |
| 14 | #include <linux/delay.h> |
| 15 | #include <linux/interrupt.h> |
| 16 | #include <linux/kernel.h> |
| 17 | #include <linux/init.h> |
| 18 | #include <linux/of.h> |
| 19 | #include <linux/pci.h> |
| 20 | #include <linux/phy/phy.h> |
| 21 | #include <linux/platform_device.h> |
| 22 | #include <linux/resource.h> |
| 23 | #include <linux/of_pci.h> |
| 24 | #include <linux/of_irq.h> |
| 25 | |
| 26 | #include "pcie-designware.h" |
| 27 | |
| 28 | struct armada8k_pcie { |
| 29 | struct dw_pcie *pci; |
| 30 | struct clk *clk; |
| 31 | struct clk *clk_reg; |
| 32 | }; |
| 33 | |
| 34 | #define PCIE_VENDOR_REGS_OFFSET 0x8000 |
| 35 | |
| 36 | #define PCIE_GLOBAL_CONTROL_REG (PCIE_VENDOR_REGS_OFFSET + 0x0) |
| 37 | #define PCIE_APP_LTSSM_EN BIT(2) |
| 38 | #define PCIE_DEVICE_TYPE_SHIFT 4 |
| 39 | #define PCIE_DEVICE_TYPE_MASK 0xF |
| 40 | #define PCIE_DEVICE_TYPE_RC 0x4 /* Root complex */ |
| 41 | |
| 42 | #define PCIE_GLOBAL_STATUS_REG (PCIE_VENDOR_REGS_OFFSET + 0x8) |
| 43 | #define PCIE_GLB_STS_RDLH_LINK_UP BIT(1) |
| 44 | #define PCIE_GLB_STS_PHY_LINK_UP BIT(9) |
| 45 | |
| 46 | #define PCIE_GLOBAL_INT_CAUSE1_REG (PCIE_VENDOR_REGS_OFFSET + 0x1C) |
| 47 | #define PCIE_GLOBAL_INT_MASK1_REG (PCIE_VENDOR_REGS_OFFSET + 0x20) |
| 48 | #define PCIE_INT_A_ASSERT_MASK BIT(9) |
| 49 | #define PCIE_INT_B_ASSERT_MASK BIT(10) |
| 50 | #define PCIE_INT_C_ASSERT_MASK BIT(11) |
| 51 | #define PCIE_INT_D_ASSERT_MASK BIT(12) |
| 52 | |
| 53 | #define PCIE_ARCACHE_TRC_REG (PCIE_VENDOR_REGS_OFFSET + 0x50) |
| 54 | #define PCIE_AWCACHE_TRC_REG (PCIE_VENDOR_REGS_OFFSET + 0x54) |
| 55 | #define PCIE_ARUSER_REG (PCIE_VENDOR_REGS_OFFSET + 0x5C) |
| 56 | #define PCIE_AWUSER_REG (PCIE_VENDOR_REGS_OFFSET + 0x60) |
| 57 | /* |
| 58 | * AR/AW Cache defauls: Normal memory, Write-Back, Read / Write |
| 59 | * allocate |
| 60 | */ |
| 61 | #define ARCACHE_DEFAULT_VALUE 0x3511 |
| 62 | #define AWCACHE_DEFAULT_VALUE 0x5311 |
| 63 | |
| 64 | #define DOMAIN_OUTER_SHAREABLE 0x2 |
| 65 | #define AX_USER_DOMAIN_MASK 0x3 |
| 66 | #define AX_USER_DOMAIN_SHIFT 4 |
| 67 | |
| 68 | #define to_armada8k_pcie(x) dev_get_drvdata((x)->dev) |
| 69 | |
| 70 | static int armada8k_pcie_link_up(struct dw_pcie *pci) |
| 71 | { |
| 72 | u32 reg; |
| 73 | u32 mask = PCIE_GLB_STS_RDLH_LINK_UP | PCIE_GLB_STS_PHY_LINK_UP; |
| 74 | |
| 75 | reg = dw_pcie_readl_dbi(pci, PCIE_GLOBAL_STATUS_REG); |
| 76 | |
| 77 | if ((reg & mask) == mask) |
| 78 | return 1; |
| 79 | |
| 80 | dev_dbg(pci->dev, "No link detected (Global-Status: 0x%08x).\n", reg); |
| 81 | return 0; |
| 82 | } |
| 83 | |
| 84 | static void armada8k_pcie_establish_link(struct armada8k_pcie *pcie) |
| 85 | { |
| 86 | struct dw_pcie *pci = pcie->pci; |
| 87 | u32 reg; |
| 88 | |
| 89 | if (!dw_pcie_link_up(pci)) { |
| 90 | /* Disable LTSSM state machine to enable configuration */ |
| 91 | reg = dw_pcie_readl_dbi(pci, PCIE_GLOBAL_CONTROL_REG); |
| 92 | reg &= ~(PCIE_APP_LTSSM_EN); |
| 93 | dw_pcie_writel_dbi(pci, PCIE_GLOBAL_CONTROL_REG, reg); |
| 94 | } |
| 95 | |
| 96 | /* Set the device to root complex mode */ |
| 97 | reg = dw_pcie_readl_dbi(pci, PCIE_GLOBAL_CONTROL_REG); |
| 98 | reg &= ~(PCIE_DEVICE_TYPE_MASK << PCIE_DEVICE_TYPE_SHIFT); |
| 99 | reg |= PCIE_DEVICE_TYPE_RC << PCIE_DEVICE_TYPE_SHIFT; |
| 100 | dw_pcie_writel_dbi(pci, PCIE_GLOBAL_CONTROL_REG, reg); |
| 101 | |
| 102 | /* Set the PCIe master AxCache attributes */ |
| 103 | dw_pcie_writel_dbi(pci, PCIE_ARCACHE_TRC_REG, ARCACHE_DEFAULT_VALUE); |
| 104 | dw_pcie_writel_dbi(pci, PCIE_AWCACHE_TRC_REG, AWCACHE_DEFAULT_VALUE); |
| 105 | |
| 106 | /* Set the PCIe master AxDomain attributes */ |
| 107 | reg = dw_pcie_readl_dbi(pci, PCIE_ARUSER_REG); |
| 108 | reg &= ~(AX_USER_DOMAIN_MASK << AX_USER_DOMAIN_SHIFT); |
| 109 | reg |= DOMAIN_OUTER_SHAREABLE << AX_USER_DOMAIN_SHIFT; |
| 110 | dw_pcie_writel_dbi(pci, PCIE_ARUSER_REG, reg); |
| 111 | |
| 112 | reg = dw_pcie_readl_dbi(pci, PCIE_AWUSER_REG); |
| 113 | reg &= ~(AX_USER_DOMAIN_MASK << AX_USER_DOMAIN_SHIFT); |
| 114 | reg |= DOMAIN_OUTER_SHAREABLE << AX_USER_DOMAIN_SHIFT; |
| 115 | dw_pcie_writel_dbi(pci, PCIE_AWUSER_REG, reg); |
| 116 | |
| 117 | /* Enable INT A-D interrupts */ |
| 118 | reg = dw_pcie_readl_dbi(pci, PCIE_GLOBAL_INT_MASK1_REG); |
| 119 | reg |= PCIE_INT_A_ASSERT_MASK | PCIE_INT_B_ASSERT_MASK | |
| 120 | PCIE_INT_C_ASSERT_MASK | PCIE_INT_D_ASSERT_MASK; |
| 121 | dw_pcie_writel_dbi(pci, PCIE_GLOBAL_INT_MASK1_REG, reg); |
| 122 | |
| 123 | if (!dw_pcie_link_up(pci)) { |
| 124 | /* Configuration done. Start LTSSM */ |
| 125 | reg = dw_pcie_readl_dbi(pci, PCIE_GLOBAL_CONTROL_REG); |
| 126 | reg |= PCIE_APP_LTSSM_EN; |
| 127 | dw_pcie_writel_dbi(pci, PCIE_GLOBAL_CONTROL_REG, reg); |
| 128 | } |
| 129 | |
| 130 | /* Wait until the link becomes active again */ |
| 131 | if (dw_pcie_wait_for_link(pci)) |
| 132 | dev_err(pci->dev, "Link not up after reconfiguration\n"); |
| 133 | } |
| 134 | |
| 135 | static int armada8k_pcie_host_init(struct pcie_port *pp) |
| 136 | { |
| 137 | struct dw_pcie *pci = to_dw_pcie_from_pp(pp); |
| 138 | struct armada8k_pcie *pcie = to_armada8k_pcie(pci); |
| 139 | |
| 140 | dw_pcie_setup_rc(pp); |
| 141 | armada8k_pcie_establish_link(pcie); |
| 142 | |
| 143 | return 0; |
| 144 | } |
| 145 | |
| 146 | static irqreturn_t armada8k_pcie_irq_handler(int irq, void *arg) |
| 147 | { |
| 148 | struct armada8k_pcie *pcie = arg; |
| 149 | struct dw_pcie *pci = pcie->pci; |
| 150 | u32 val; |
| 151 | |
| 152 | /* |
| 153 | * Interrupts are directly handled by the device driver of the |
| 154 | * PCI device. However, they are also latched into the PCIe |
| 155 | * controller, so we simply discard them. |
| 156 | */ |
| 157 | val = dw_pcie_readl_dbi(pci, PCIE_GLOBAL_INT_CAUSE1_REG); |
| 158 | dw_pcie_writel_dbi(pci, PCIE_GLOBAL_INT_CAUSE1_REG, val); |
| 159 | |
| 160 | return IRQ_HANDLED; |
| 161 | } |
| 162 | |
| 163 | static const struct dw_pcie_host_ops armada8k_pcie_host_ops = { |
| 164 | .host_init = armada8k_pcie_host_init, |
| 165 | }; |
| 166 | |
| 167 | static int armada8k_add_pcie_port(struct armada8k_pcie *pcie, |
| 168 | struct platform_device *pdev) |
| 169 | { |
| 170 | struct dw_pcie *pci = pcie->pci; |
| 171 | struct pcie_port *pp = &pci->pp; |
| 172 | struct device *dev = &pdev->dev; |
| 173 | int ret; |
| 174 | |
| 175 | pp->ops = &armada8k_pcie_host_ops; |
| 176 | |
| 177 | pp->irq = platform_get_irq(pdev, 0); |
| 178 | if (pp->irq < 0) { |
| 179 | dev_err(dev, "failed to get irq for port\n"); |
| 180 | return pp->irq; |
| 181 | } |
| 182 | |
| 183 | ret = devm_request_irq(dev, pp->irq, armada8k_pcie_irq_handler, |
| 184 | IRQF_SHARED, "armada8k-pcie", pcie); |
| 185 | if (ret) { |
| 186 | dev_err(dev, "failed to request irq %d\n", pp->irq); |
| 187 | return ret; |
| 188 | } |
| 189 | |
| 190 | ret = dw_pcie_host_init(pp); |
| 191 | if (ret) { |
| 192 | dev_err(dev, "failed to initialize host: %d\n", ret); |
| 193 | return ret; |
| 194 | } |
| 195 | |
| 196 | return 0; |
| 197 | } |
| 198 | |
| 199 | static const struct dw_pcie_ops dw_pcie_ops = { |
| 200 | .link_up = armada8k_pcie_link_up, |
| 201 | }; |
| 202 | |
| 203 | static int armada8k_pcie_probe(struct platform_device *pdev) |
| 204 | { |
| 205 | struct dw_pcie *pci; |
| 206 | struct armada8k_pcie *pcie; |
| 207 | struct device *dev = &pdev->dev; |
| 208 | struct resource *base; |
| 209 | int ret; |
| 210 | |
| 211 | pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL); |
| 212 | if (!pcie) |
| 213 | return -ENOMEM; |
| 214 | |
| 215 | pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL); |
| 216 | if (!pci) |
| 217 | return -ENOMEM; |
| 218 | |
| 219 | pci->dev = dev; |
| 220 | pci->ops = &dw_pcie_ops; |
| 221 | |
| 222 | pcie->pci = pci; |
| 223 | |
| 224 | pcie->clk = devm_clk_get(dev, NULL); |
| 225 | if (IS_ERR(pcie->clk)) |
| 226 | return PTR_ERR(pcie->clk); |
| 227 | |
| 228 | ret = clk_prepare_enable(pcie->clk); |
| 229 | if (ret) |
| 230 | return ret; |
| 231 | |
| 232 | pcie->clk_reg = devm_clk_get(dev, "reg"); |
| 233 | if (pcie->clk_reg == ERR_PTR(-EPROBE_DEFER)) { |
| 234 | ret = -EPROBE_DEFER; |
| 235 | goto fail; |
| 236 | } |
| 237 | if (!IS_ERR(pcie->clk_reg)) { |
| 238 | ret = clk_prepare_enable(pcie->clk_reg); |
| 239 | if (ret) |
| 240 | goto fail_clkreg; |
| 241 | } |
| 242 | |
| 243 | /* Get the dw-pcie unit configuration/control registers base. */ |
| 244 | base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ctrl"); |
| 245 | pci->dbi_base = devm_pci_remap_cfg_resource(dev, base); |
| 246 | if (IS_ERR(pci->dbi_base)) { |
| 247 | dev_err(dev, "couldn't remap regs base %p\n", base); |
| 248 | ret = PTR_ERR(pci->dbi_base); |
| 249 | goto fail_clkreg; |
| 250 | } |
| 251 | |
| 252 | platform_set_drvdata(pdev, pcie); |
| 253 | |
| 254 | ret = armada8k_add_pcie_port(pcie, pdev); |
| 255 | if (ret) |
| 256 | goto fail_clkreg; |
| 257 | |
| 258 | return 0; |
| 259 | |
| 260 | fail_clkreg: |
| 261 | clk_disable_unprepare(pcie->clk_reg); |
| 262 | fail: |
| 263 | clk_disable_unprepare(pcie->clk); |
| 264 | |
| 265 | return ret; |
| 266 | } |
| 267 | |
| 268 | static const struct of_device_id armada8k_pcie_of_match[] = { |
| 269 | { .compatible = "marvell,armada8k-pcie", }, |
| 270 | {}, |
| 271 | }; |
| 272 | |
| 273 | static struct platform_driver armada8k_pcie_driver = { |
| 274 | .probe = armada8k_pcie_probe, |
| 275 | .driver = { |
| 276 | .name = "armada8k-pcie", |
| 277 | .of_match_table = of_match_ptr(armada8k_pcie_of_match), |
| 278 | .suppress_bind_attrs = true, |
| 279 | }, |
| 280 | }; |
| 281 | builtin_platform_driver(armada8k_pcie_driver); |