Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 1 | // SPDX-License-Identifier: GPL-2.0 |
| 2 | /* |
| 3 | * PCIe host controller driver for Intel Gateway SoCs |
| 4 | * |
| 5 | * Copyright (c) 2019 Intel Corporation. |
| 6 | */ |
| 7 | |
| 8 | #include <linux/bitfield.h> |
| 9 | #include <linux/clk.h> |
| 10 | #include <linux/gpio/consumer.h> |
| 11 | #include <linux/iopoll.h> |
| 12 | #include <linux/pci_regs.h> |
| 13 | #include <linux/phy/phy.h> |
| 14 | #include <linux/platform_device.h> |
| 15 | #include <linux/reset.h> |
| 16 | |
| 17 | #include "../../pci.h" |
| 18 | #include "pcie-designware.h" |
| 19 | |
| 20 | #define PORT_AFR_N_FTS_GEN12_DFT (SZ_128 - 1) |
| 21 | #define PORT_AFR_N_FTS_GEN3 180 |
| 22 | #define PORT_AFR_N_FTS_GEN4 196 |
| 23 | |
| 24 | /* PCIe Application logic Registers */ |
| 25 | #define PCIE_APP_CCR 0x10 |
| 26 | #define PCIE_APP_CCR_LTSSM_ENABLE BIT(0) |
| 27 | |
| 28 | #define PCIE_APP_MSG_CR 0x30 |
| 29 | #define PCIE_APP_MSG_XMT_PM_TURNOFF BIT(0) |
| 30 | |
| 31 | #define PCIE_APP_PMC 0x44 |
| 32 | #define PCIE_APP_PMC_IN_L2 BIT(20) |
| 33 | |
| 34 | #define PCIE_APP_IRNEN 0xF4 |
| 35 | #define PCIE_APP_IRNCR 0xF8 |
| 36 | #define PCIE_APP_IRN_AER_REPORT BIT(0) |
| 37 | #define PCIE_APP_IRN_PME BIT(2) |
| 38 | #define PCIE_APP_IRN_RX_VDM_MSG BIT(4) |
| 39 | #define PCIE_APP_IRN_PM_TO_ACK BIT(9) |
| 40 | #define PCIE_APP_IRN_LINK_AUTO_BW_STAT BIT(11) |
| 41 | #define PCIE_APP_IRN_BW_MGT BIT(12) |
| 42 | #define PCIE_APP_IRN_INTA BIT(13) |
| 43 | #define PCIE_APP_IRN_INTB BIT(14) |
| 44 | #define PCIE_APP_IRN_INTC BIT(15) |
| 45 | #define PCIE_APP_IRN_INTD BIT(16) |
| 46 | #define PCIE_APP_IRN_MSG_LTR BIT(18) |
| 47 | #define PCIE_APP_IRN_SYS_ERR_RC BIT(29) |
| 48 | #define PCIE_APP_INTX_OFST 12 |
| 49 | |
| 50 | #define PCIE_APP_IRN_INT \ |
| 51 | (PCIE_APP_IRN_AER_REPORT | PCIE_APP_IRN_PME | \ |
| 52 | PCIE_APP_IRN_RX_VDM_MSG | PCIE_APP_IRN_SYS_ERR_RC | \ |
| 53 | PCIE_APP_IRN_PM_TO_ACK | PCIE_APP_IRN_MSG_LTR | \ |
| 54 | PCIE_APP_IRN_BW_MGT | PCIE_APP_IRN_LINK_AUTO_BW_STAT | \ |
| 55 | PCIE_APP_IRN_INTA | PCIE_APP_IRN_INTB | \ |
| 56 | PCIE_APP_IRN_INTC | PCIE_APP_IRN_INTD) |
| 57 | |
| 58 | #define BUS_IATU_OFFSET SZ_256M |
| 59 | #define RESET_INTERVAL_MS 100 |
| 60 | |
| 61 | struct intel_pcie_soc { |
| 62 | unsigned int pcie_ver; |
| 63 | unsigned int pcie_atu_offset; |
| 64 | u32 num_viewport; |
| 65 | }; |
| 66 | |
| 67 | struct intel_pcie_port { |
| 68 | struct dw_pcie pci; |
| 69 | void __iomem *app_base; |
| 70 | struct gpio_desc *reset_gpio; |
| 71 | u32 rst_intrvl; |
| 72 | struct clk *core_clk; |
| 73 | struct reset_control *core_rst; |
| 74 | struct phy *phy; |
| 75 | }; |
| 76 | |
| 77 | static void pcie_update_bits(void __iomem *base, u32 ofs, u32 mask, u32 val) |
| 78 | { |
| 79 | u32 old; |
| 80 | |
| 81 | old = readl(base + ofs); |
| 82 | val = (old & ~mask) | (val & mask); |
| 83 | |
| 84 | if (val != old) |
| 85 | writel(val, base + ofs); |
| 86 | } |
| 87 | |
| 88 | static inline u32 pcie_app_rd(struct intel_pcie_port *lpp, u32 ofs) |
| 89 | { |
| 90 | return readl(lpp->app_base + ofs); |
| 91 | } |
| 92 | |
| 93 | static inline void pcie_app_wr(struct intel_pcie_port *lpp, u32 ofs, u32 val) |
| 94 | { |
| 95 | writel(val, lpp->app_base + ofs); |
| 96 | } |
| 97 | |
| 98 | static void pcie_app_wr_mask(struct intel_pcie_port *lpp, u32 ofs, |
| 99 | u32 mask, u32 val) |
| 100 | { |
| 101 | pcie_update_bits(lpp->app_base, ofs, mask, val); |
| 102 | } |
| 103 | |
| 104 | static inline u32 pcie_rc_cfg_rd(struct intel_pcie_port *lpp, u32 ofs) |
| 105 | { |
| 106 | return dw_pcie_readl_dbi(&lpp->pci, ofs); |
| 107 | } |
| 108 | |
| 109 | static inline void pcie_rc_cfg_wr(struct intel_pcie_port *lpp, u32 ofs, u32 val) |
| 110 | { |
| 111 | dw_pcie_writel_dbi(&lpp->pci, ofs, val); |
| 112 | } |
| 113 | |
| 114 | static void pcie_rc_cfg_wr_mask(struct intel_pcie_port *lpp, u32 ofs, |
| 115 | u32 mask, u32 val) |
| 116 | { |
| 117 | pcie_update_bits(lpp->pci.dbi_base, ofs, mask, val); |
| 118 | } |
| 119 | |
| 120 | static void intel_pcie_ltssm_enable(struct intel_pcie_port *lpp) |
| 121 | { |
| 122 | pcie_app_wr_mask(lpp, PCIE_APP_CCR, PCIE_APP_CCR_LTSSM_ENABLE, |
| 123 | PCIE_APP_CCR_LTSSM_ENABLE); |
| 124 | } |
| 125 | |
| 126 | static void intel_pcie_ltssm_disable(struct intel_pcie_port *lpp) |
| 127 | { |
| 128 | pcie_app_wr_mask(lpp, PCIE_APP_CCR, PCIE_APP_CCR_LTSSM_ENABLE, 0); |
| 129 | } |
| 130 | |
| 131 | static void intel_pcie_link_setup(struct intel_pcie_port *lpp) |
| 132 | { |
| 133 | u32 val; |
| 134 | u8 offset = dw_pcie_find_capability(&lpp->pci, PCI_CAP_ID_EXP); |
| 135 | |
| 136 | val = pcie_rc_cfg_rd(lpp, offset + PCI_EXP_LNKCTL); |
| 137 | |
| 138 | val &= ~(PCI_EXP_LNKCTL_LD | PCI_EXP_LNKCTL_ASPMC); |
| 139 | pcie_rc_cfg_wr(lpp, offset + PCI_EXP_LNKCTL, val); |
| 140 | } |
| 141 | |
| 142 | static void intel_pcie_init_n_fts(struct dw_pcie *pci) |
| 143 | { |
| 144 | switch (pci->link_gen) { |
| 145 | case 3: |
| 146 | pci->n_fts[1] = PORT_AFR_N_FTS_GEN3; |
| 147 | break; |
| 148 | case 4: |
| 149 | pci->n_fts[1] = PORT_AFR_N_FTS_GEN4; |
| 150 | break; |
| 151 | default: |
| 152 | pci->n_fts[1] = PORT_AFR_N_FTS_GEN12_DFT; |
| 153 | break; |
| 154 | } |
| 155 | pci->n_fts[0] = PORT_AFR_N_FTS_GEN12_DFT; |
| 156 | } |
| 157 | |
| 158 | static void intel_pcie_rc_setup(struct intel_pcie_port *lpp) |
| 159 | { |
| 160 | intel_pcie_ltssm_disable(lpp); |
| 161 | intel_pcie_link_setup(lpp); |
| 162 | intel_pcie_init_n_fts(&lpp->pci); |
| 163 | dw_pcie_setup_rc(&lpp->pci.pp); |
| 164 | dw_pcie_upconfig_setup(&lpp->pci); |
| 165 | } |
| 166 | |
| 167 | static int intel_pcie_ep_rst_init(struct intel_pcie_port *lpp) |
| 168 | { |
| 169 | struct device *dev = lpp->pci.dev; |
| 170 | int ret; |
| 171 | |
| 172 | lpp->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_LOW); |
| 173 | if (IS_ERR(lpp->reset_gpio)) { |
| 174 | ret = PTR_ERR(lpp->reset_gpio); |
| 175 | if (ret != -EPROBE_DEFER) |
| 176 | dev_err(dev, "Failed to request PCIe GPIO: %d\n", ret); |
| 177 | return ret; |
| 178 | } |
| 179 | |
| 180 | /* Make initial reset last for 100us */ |
| 181 | usleep_range(100, 200); |
| 182 | |
| 183 | return 0; |
| 184 | } |
| 185 | |
| 186 | static void intel_pcie_core_rst_assert(struct intel_pcie_port *lpp) |
| 187 | { |
| 188 | reset_control_assert(lpp->core_rst); |
| 189 | } |
| 190 | |
| 191 | static void intel_pcie_core_rst_deassert(struct intel_pcie_port *lpp) |
| 192 | { |
| 193 | /* |
| 194 | * One micro-second delay to make sure the reset pulse |
| 195 | * wide enough so that core reset is clean. |
| 196 | */ |
| 197 | udelay(1); |
| 198 | reset_control_deassert(lpp->core_rst); |
| 199 | |
| 200 | /* |
| 201 | * Some SoC core reset also reset PHY, more delay needed |
| 202 | * to make sure the reset process is done. |
| 203 | */ |
| 204 | usleep_range(1000, 2000); |
| 205 | } |
| 206 | |
| 207 | static void intel_pcie_device_rst_assert(struct intel_pcie_port *lpp) |
| 208 | { |
| 209 | gpiod_set_value_cansleep(lpp->reset_gpio, 1); |
| 210 | } |
| 211 | |
| 212 | static void intel_pcie_device_rst_deassert(struct intel_pcie_port *lpp) |
| 213 | { |
| 214 | msleep(lpp->rst_intrvl); |
| 215 | gpiod_set_value_cansleep(lpp->reset_gpio, 0); |
| 216 | } |
| 217 | |
| 218 | static int intel_pcie_app_logic_setup(struct intel_pcie_port *lpp) |
| 219 | { |
| 220 | intel_pcie_device_rst_deassert(lpp); |
| 221 | intel_pcie_ltssm_enable(lpp); |
| 222 | |
| 223 | return dw_pcie_wait_for_link(&lpp->pci); |
| 224 | } |
| 225 | |
| 226 | static void intel_pcie_core_irq_disable(struct intel_pcie_port *lpp) |
| 227 | { |
| 228 | pcie_app_wr(lpp, PCIE_APP_IRNEN, 0); |
| 229 | pcie_app_wr(lpp, PCIE_APP_IRNCR, PCIE_APP_IRN_INT); |
| 230 | } |
| 231 | |
| 232 | static int intel_pcie_get_resources(struct platform_device *pdev) |
| 233 | { |
| 234 | struct intel_pcie_port *lpp = platform_get_drvdata(pdev); |
| 235 | struct dw_pcie *pci = &lpp->pci; |
| 236 | struct device *dev = pci->dev; |
| 237 | int ret; |
| 238 | |
| 239 | pci->dbi_base = devm_platform_ioremap_resource_byname(pdev, "dbi"); |
| 240 | if (IS_ERR(pci->dbi_base)) |
| 241 | return PTR_ERR(pci->dbi_base); |
| 242 | |
| 243 | lpp->core_clk = devm_clk_get(dev, NULL); |
| 244 | if (IS_ERR(lpp->core_clk)) { |
| 245 | ret = PTR_ERR(lpp->core_clk); |
| 246 | if (ret != -EPROBE_DEFER) |
| 247 | dev_err(dev, "Failed to get clks: %d\n", ret); |
| 248 | return ret; |
| 249 | } |
| 250 | |
| 251 | lpp->core_rst = devm_reset_control_get(dev, NULL); |
| 252 | if (IS_ERR(lpp->core_rst)) { |
| 253 | ret = PTR_ERR(lpp->core_rst); |
| 254 | if (ret != -EPROBE_DEFER) |
| 255 | dev_err(dev, "Failed to get resets: %d\n", ret); |
| 256 | return ret; |
| 257 | } |
| 258 | |
| 259 | ret = device_property_read_u32(dev, "reset-assert-ms", |
| 260 | &lpp->rst_intrvl); |
| 261 | if (ret) |
| 262 | lpp->rst_intrvl = RESET_INTERVAL_MS; |
| 263 | |
| 264 | lpp->app_base = devm_platform_ioremap_resource_byname(pdev, "app"); |
| 265 | if (IS_ERR(lpp->app_base)) |
| 266 | return PTR_ERR(lpp->app_base); |
| 267 | |
| 268 | lpp->phy = devm_phy_get(dev, "pcie"); |
| 269 | if (IS_ERR(lpp->phy)) { |
| 270 | ret = PTR_ERR(lpp->phy); |
| 271 | if (ret != -EPROBE_DEFER) |
| 272 | dev_err(dev, "Couldn't get pcie-phy: %d\n", ret); |
| 273 | return ret; |
| 274 | } |
| 275 | |
| 276 | return 0; |
| 277 | } |
| 278 | |
| 279 | static void intel_pcie_deinit_phy(struct intel_pcie_port *lpp) |
| 280 | { |
| 281 | phy_exit(lpp->phy); |
| 282 | } |
| 283 | |
| 284 | static int intel_pcie_wait_l2(struct intel_pcie_port *lpp) |
| 285 | { |
| 286 | u32 value; |
| 287 | int ret; |
| 288 | struct dw_pcie *pci = &lpp->pci; |
| 289 | |
| 290 | if (pci->link_gen < 3) |
| 291 | return 0; |
| 292 | |
| 293 | /* Send PME_TURN_OFF message */ |
| 294 | pcie_app_wr_mask(lpp, PCIE_APP_MSG_CR, PCIE_APP_MSG_XMT_PM_TURNOFF, |
| 295 | PCIE_APP_MSG_XMT_PM_TURNOFF); |
| 296 | |
| 297 | /* Read PMC status and wait for falling into L2 link state */ |
| 298 | ret = readl_poll_timeout(lpp->app_base + PCIE_APP_PMC, value, |
| 299 | value & PCIE_APP_PMC_IN_L2, 20, |
| 300 | jiffies_to_usecs(5 * HZ)); |
| 301 | if (ret) |
| 302 | dev_err(lpp->pci.dev, "PCIe link enter L2 timeout!\n"); |
| 303 | |
| 304 | return ret; |
| 305 | } |
| 306 | |
| 307 | static void intel_pcie_turn_off(struct intel_pcie_port *lpp) |
| 308 | { |
| 309 | if (dw_pcie_link_up(&lpp->pci)) |
| 310 | intel_pcie_wait_l2(lpp); |
| 311 | |
| 312 | /* Put endpoint device in reset state */ |
| 313 | intel_pcie_device_rst_assert(lpp); |
| 314 | pcie_rc_cfg_wr_mask(lpp, PCI_COMMAND, PCI_COMMAND_MEMORY, 0); |
| 315 | } |
| 316 | |
| 317 | static int intel_pcie_host_setup(struct intel_pcie_port *lpp) |
| 318 | { |
| 319 | int ret; |
| 320 | |
| 321 | intel_pcie_core_rst_assert(lpp); |
| 322 | intel_pcie_device_rst_assert(lpp); |
| 323 | |
| 324 | ret = phy_init(lpp->phy); |
| 325 | if (ret) |
| 326 | return ret; |
| 327 | |
| 328 | intel_pcie_core_rst_deassert(lpp); |
| 329 | |
| 330 | ret = clk_prepare_enable(lpp->core_clk); |
| 331 | if (ret) { |
| 332 | dev_err(lpp->pci.dev, "Core clock enable failed: %d\n", ret); |
| 333 | goto clk_err; |
| 334 | } |
| 335 | |
| 336 | intel_pcie_rc_setup(lpp); |
| 337 | ret = intel_pcie_app_logic_setup(lpp); |
| 338 | if (ret) |
| 339 | goto app_init_err; |
| 340 | |
| 341 | /* Enable integrated interrupts */ |
| 342 | pcie_app_wr_mask(lpp, PCIE_APP_IRNEN, PCIE_APP_IRN_INT, |
| 343 | PCIE_APP_IRN_INT); |
| 344 | |
| 345 | return 0; |
| 346 | |
| 347 | app_init_err: |
| 348 | clk_disable_unprepare(lpp->core_clk); |
| 349 | clk_err: |
| 350 | intel_pcie_core_rst_assert(lpp); |
| 351 | intel_pcie_deinit_phy(lpp); |
| 352 | |
| 353 | return ret; |
| 354 | } |
| 355 | |
| 356 | static void __intel_pcie_remove(struct intel_pcie_port *lpp) |
| 357 | { |
| 358 | intel_pcie_core_irq_disable(lpp); |
| 359 | intel_pcie_turn_off(lpp); |
| 360 | clk_disable_unprepare(lpp->core_clk); |
| 361 | intel_pcie_core_rst_assert(lpp); |
| 362 | intel_pcie_deinit_phy(lpp); |
| 363 | } |
| 364 | |
| 365 | static int intel_pcie_remove(struct platform_device *pdev) |
| 366 | { |
| 367 | struct intel_pcie_port *lpp = platform_get_drvdata(pdev); |
| 368 | struct pcie_port *pp = &lpp->pci.pp; |
| 369 | |
| 370 | dw_pcie_host_deinit(pp); |
| 371 | __intel_pcie_remove(lpp); |
| 372 | |
| 373 | return 0; |
| 374 | } |
| 375 | |
| 376 | static int __maybe_unused intel_pcie_suspend_noirq(struct device *dev) |
| 377 | { |
| 378 | struct intel_pcie_port *lpp = dev_get_drvdata(dev); |
| 379 | int ret; |
| 380 | |
| 381 | intel_pcie_core_irq_disable(lpp); |
| 382 | ret = intel_pcie_wait_l2(lpp); |
| 383 | if (ret) |
| 384 | return ret; |
| 385 | |
| 386 | intel_pcie_deinit_phy(lpp); |
| 387 | clk_disable_unprepare(lpp->core_clk); |
| 388 | return ret; |
| 389 | } |
| 390 | |
| 391 | static int __maybe_unused intel_pcie_resume_noirq(struct device *dev) |
| 392 | { |
| 393 | struct intel_pcie_port *lpp = dev_get_drvdata(dev); |
| 394 | |
| 395 | return intel_pcie_host_setup(lpp); |
| 396 | } |
| 397 | |
| 398 | static int intel_pcie_rc_init(struct pcie_port *pp) |
| 399 | { |
| 400 | struct dw_pcie *pci = to_dw_pcie_from_pp(pp); |
| 401 | struct intel_pcie_port *lpp = dev_get_drvdata(pci->dev); |
| 402 | |
| 403 | return intel_pcie_host_setup(lpp); |
| 404 | } |
| 405 | |
| 406 | /* |
| 407 | * Dummy function so that DW core doesn't configure MSI |
| 408 | */ |
| 409 | static int intel_pcie_msi_init(struct pcie_port *pp) |
| 410 | { |
| 411 | return 0; |
| 412 | } |
| 413 | |
| 414 | static u64 intel_pcie_cpu_addr(struct dw_pcie *pcie, u64 cpu_addr) |
| 415 | { |
| 416 | return cpu_addr + BUS_IATU_OFFSET; |
| 417 | } |
| 418 | |
| 419 | static const struct dw_pcie_ops intel_pcie_ops = { |
| 420 | .cpu_addr_fixup = intel_pcie_cpu_addr, |
| 421 | }; |
| 422 | |
| 423 | static const struct dw_pcie_host_ops intel_pcie_dw_ops = { |
| 424 | .host_init = intel_pcie_rc_init, |
| 425 | .msi_host_init = intel_pcie_msi_init, |
| 426 | }; |
| 427 | |
| 428 | static const struct intel_pcie_soc pcie_data = { |
| 429 | .pcie_ver = 0x520A, |
| 430 | .pcie_atu_offset = 0xC0000, |
| 431 | .num_viewport = 3, |
| 432 | }; |
| 433 | |
| 434 | static int intel_pcie_probe(struct platform_device *pdev) |
| 435 | { |
| 436 | const struct intel_pcie_soc *data; |
| 437 | struct device *dev = &pdev->dev; |
| 438 | struct intel_pcie_port *lpp; |
| 439 | struct pcie_port *pp; |
| 440 | struct dw_pcie *pci; |
| 441 | int ret; |
| 442 | |
| 443 | lpp = devm_kzalloc(dev, sizeof(*lpp), GFP_KERNEL); |
| 444 | if (!lpp) |
| 445 | return -ENOMEM; |
| 446 | |
| 447 | platform_set_drvdata(pdev, lpp); |
| 448 | pci = &lpp->pci; |
| 449 | pci->dev = dev; |
| 450 | pp = &pci->pp; |
| 451 | |
| 452 | ret = intel_pcie_get_resources(pdev); |
| 453 | if (ret) |
| 454 | return ret; |
| 455 | |
| 456 | ret = intel_pcie_ep_rst_init(lpp); |
| 457 | if (ret) |
| 458 | return ret; |
| 459 | |
| 460 | data = device_get_match_data(dev); |
| 461 | if (!data) |
| 462 | return -ENODEV; |
| 463 | |
| 464 | pci->ops = &intel_pcie_ops; |
| 465 | pci->version = data->pcie_ver; |
| 466 | pci->atu_base = pci->dbi_base + data->pcie_atu_offset; |
| 467 | pp->ops = &intel_pcie_dw_ops; |
| 468 | |
| 469 | ret = dw_pcie_host_init(pp); |
| 470 | if (ret) { |
| 471 | dev_err(dev, "Cannot initialize host\n"); |
| 472 | return ret; |
| 473 | } |
| 474 | |
| 475 | /* |
| 476 | * Intel PCIe doesn't configure IO region, so set viewport |
| 477 | * to not perform IO region access. |
| 478 | */ |
| 479 | pci->num_viewport = data->num_viewport; |
| 480 | |
| 481 | return 0; |
| 482 | } |
| 483 | |
| 484 | static const struct dev_pm_ops intel_pcie_pm_ops = { |
| 485 | SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(intel_pcie_suspend_noirq, |
| 486 | intel_pcie_resume_noirq) |
| 487 | }; |
| 488 | |
| 489 | static const struct of_device_id of_intel_pcie_match[] = { |
| 490 | { .compatible = "intel,lgm-pcie", .data = &pcie_data }, |
| 491 | {} |
| 492 | }; |
| 493 | |
| 494 | static struct platform_driver intel_pcie_driver = { |
| 495 | .probe = intel_pcie_probe, |
| 496 | .remove = intel_pcie_remove, |
| 497 | .driver = { |
| 498 | .name = "intel-gw-pcie", |
| 499 | .of_match_table = of_intel_pcie_match, |
| 500 | .pm = &intel_pcie_pm_ops, |
| 501 | }, |
| 502 | }; |
| 503 | builtin_platform_driver(intel_pcie_driver); |