blob: a8eab4e67af10f0f25df222444d4add240473451 [file] [log] [blame]
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001// SPDX-License-Identifier: GPL-2.0
2/*
3 * Qualcomm PCIe root complex driver
4 *
5 * Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
6 * Copyright 2015 Linaro Limited.
7 *
8 * Author: Stanimir Varbanov <svarbanov@mm-sol.com>
9 */
10
11#include <linux/clk.h>
12#include <linux/delay.h>
13#include <linux/gpio/consumer.h>
14#include <linux/interrupt.h>
15#include <linux/io.h>
16#include <linux/iopoll.h>
17#include <linux/kernel.h>
18#include <linux/init.h>
19#include <linux/of_device.h>
20#include <linux/of_gpio.h>
21#include <linux/pci.h>
22#include <linux/pm_runtime.h>
23#include <linux/platform_device.h>
24#include <linux/phy/phy.h>
25#include <linux/regulator/consumer.h>
26#include <linux/reset.h>
27#include <linux/slab.h>
28#include <linux/types.h>
29
30#include "pcie-designware.h"
31
32#define PCIE20_PARF_SYS_CTRL 0x00
33#define MST_WAKEUP_EN BIT(13)
34#define SLV_WAKEUP_EN BIT(12)
35#define MSTR_ACLK_CGC_DIS BIT(10)
36#define SLV_ACLK_CGC_DIS BIT(9)
37#define CORE_CLK_CGC_DIS BIT(6)
38#define AUX_PWR_DET BIT(4)
39#define L23_CLK_RMV_DIS BIT(2)
40#define L1_CLK_RMV_DIS BIT(1)
41
42#define PCIE20_COMMAND_STATUS 0x04
43#define CMD_BME_VAL 0x4
44#define PCIE20_DEVICE_CONTROL2_STATUS2 0x98
45#define PCIE_CAP_CPL_TIMEOUT_DISABLE 0x10
46
47#define PCIE20_PARF_PHY_CTRL 0x40
Olivier Deprez0e641232021-09-23 10:07:05 +020048#define PHY_CTRL_PHY_TX0_TERM_OFFSET_MASK GENMASK(20, 16)
49#define PHY_CTRL_PHY_TX0_TERM_OFFSET(x) ((x) << 16)
50
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000051#define PCIE20_PARF_PHY_REFCLK 0x4C
Olivier Deprez0e641232021-09-23 10:07:05 +020052#define PHY_REFCLK_SSP_EN BIT(16)
53#define PHY_REFCLK_USE_PAD BIT(12)
54
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000055#define PCIE20_PARF_DBI_BASE_ADDR 0x168
56#define PCIE20_PARF_SLV_ADDR_SPACE_SIZE 0x16C
57#define PCIE20_PARF_MHI_CLOCK_RESET_CTRL 0x174
58#define PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT 0x178
59#define PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2 0x1A8
60#define PCIE20_PARF_LTSSM 0x1B0
61#define PCIE20_PARF_SID_OFFSET 0x234
62#define PCIE20_PARF_BDF_TRANSLATE_CFG 0x24C
63
64#define PCIE20_ELBI_SYS_CTRL 0x04
65#define PCIE20_ELBI_SYS_CTRL_LT_ENABLE BIT(0)
66
67#define PCIE20_AXI_MSTR_RESP_COMP_CTRL0 0x818
68#define CFG_REMOTE_RD_REQ_BRIDGE_SIZE_2K 0x4
69#define CFG_REMOTE_RD_REQ_BRIDGE_SIZE_4K 0x5
70#define PCIE20_AXI_MSTR_RESP_COMP_CTRL1 0x81c
71#define CFG_BRIDGE_SB_INIT BIT(0)
72
73#define PCIE20_CAP 0x70
74#define PCIE20_CAP_LINK_CAPABILITIES (PCIE20_CAP + 0xC)
75#define PCIE20_CAP_ACTIVE_STATE_LINK_PM_SUPPORT (BIT(10) | BIT(11))
76#define PCIE20_CAP_LINK_1 (PCIE20_CAP + 0x14)
77#define PCIE_CAP_LINK1_VAL 0x2FD7F
78
79#define PCIE20_PARF_Q2A_FLUSH 0x1AC
80
81#define PCIE20_MISC_CONTROL_1_REG 0x8BC
82#define DBI_RO_WR_EN 1
83
84#define PERST_DELAY_US 1000
Olivier Deprez0e641232021-09-23 10:07:05 +020085/* PARF registers */
86#define PCIE20_PARF_PCS_DEEMPH 0x34
87#define PCS_DEEMPH_TX_DEEMPH_GEN1(x) ((x) << 16)
88#define PCS_DEEMPH_TX_DEEMPH_GEN2_3_5DB(x) ((x) << 8)
89#define PCS_DEEMPH_TX_DEEMPH_GEN2_6DB(x) ((x) << 0)
90
91#define PCIE20_PARF_PCS_SWING 0x38
92#define PCS_SWING_TX_SWING_FULL(x) ((x) << 8)
93#define PCS_SWING_TX_SWING_LOW(x) ((x) << 0)
94
95#define PCIE20_PARF_CONFIG_BITS 0x50
96#define PHY_RX0_EQ(x) ((x) << 24)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000097
98#define PCIE20_v3_PARF_SLV_ADDR_SPACE_SIZE 0x358
99#define SLV_ADDR_SPACE_SZ 0x10000000
100
101#define QCOM_PCIE_2_1_0_MAX_SUPPLY 3
102struct qcom_pcie_resources_2_1_0 {
103 struct clk *iface_clk;
104 struct clk *core_clk;
105 struct clk *phy_clk;
Olivier Deprez0e641232021-09-23 10:07:05 +0200106 struct clk *aux_clk;
107 struct clk *ref_clk;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000108 struct reset_control *pci_reset;
109 struct reset_control *axi_reset;
110 struct reset_control *ahb_reset;
111 struct reset_control *por_reset;
112 struct reset_control *phy_reset;
Olivier Deprez0e641232021-09-23 10:07:05 +0200113 struct reset_control *ext_reset;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000114 struct regulator_bulk_data supplies[QCOM_PCIE_2_1_0_MAX_SUPPLY];
115};
116
117struct qcom_pcie_resources_1_0_0 {
118 struct clk *iface;
119 struct clk *aux;
120 struct clk *master_bus;
121 struct clk *slave_bus;
122 struct reset_control *core;
123 struct regulator *vdda;
124};
125
126#define QCOM_PCIE_2_3_2_MAX_SUPPLY 2
127struct qcom_pcie_resources_2_3_2 {
128 struct clk *aux_clk;
129 struct clk *master_clk;
130 struct clk *slave_clk;
131 struct clk *cfg_clk;
132 struct clk *pipe_clk;
133 struct regulator_bulk_data supplies[QCOM_PCIE_2_3_2_MAX_SUPPLY];
134};
135
David Brazdil0f672f62019-12-10 10:32:29 +0000136#define QCOM_PCIE_2_4_0_MAX_CLOCKS 4
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000137struct qcom_pcie_resources_2_4_0 {
David Brazdil0f672f62019-12-10 10:32:29 +0000138 struct clk_bulk_data clks[QCOM_PCIE_2_4_0_MAX_CLOCKS];
139 int num_clks;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000140 struct reset_control *axi_m_reset;
141 struct reset_control *axi_s_reset;
142 struct reset_control *pipe_reset;
143 struct reset_control *axi_m_vmid_reset;
144 struct reset_control *axi_s_xpu_reset;
145 struct reset_control *parf_reset;
146 struct reset_control *phy_reset;
147 struct reset_control *axi_m_sticky_reset;
148 struct reset_control *pipe_sticky_reset;
149 struct reset_control *pwr_reset;
150 struct reset_control *ahb_reset;
151 struct reset_control *phy_ahb_reset;
152};
153
154struct qcom_pcie_resources_2_3_3 {
155 struct clk *iface;
156 struct clk *axi_m_clk;
157 struct clk *axi_s_clk;
158 struct clk *ahb_clk;
159 struct clk *aux_clk;
160 struct reset_control *rst[7];
161};
162
163union qcom_pcie_resources {
164 struct qcom_pcie_resources_1_0_0 v1_0_0;
165 struct qcom_pcie_resources_2_1_0 v2_1_0;
166 struct qcom_pcie_resources_2_3_2 v2_3_2;
167 struct qcom_pcie_resources_2_3_3 v2_3_3;
168 struct qcom_pcie_resources_2_4_0 v2_4_0;
169};
170
171struct qcom_pcie;
172
173struct qcom_pcie_ops {
174 int (*get_resources)(struct qcom_pcie *pcie);
175 int (*init)(struct qcom_pcie *pcie);
176 int (*post_init)(struct qcom_pcie *pcie);
177 void (*deinit)(struct qcom_pcie *pcie);
178 void (*post_deinit)(struct qcom_pcie *pcie);
179 void (*ltssm_enable)(struct qcom_pcie *pcie);
180};
181
182struct qcom_pcie {
183 struct dw_pcie *pci;
184 void __iomem *parf; /* DT parf */
185 void __iomem *elbi; /* DT elbi */
186 union qcom_pcie_resources res;
187 struct phy *phy;
188 struct gpio_desc *reset;
189 const struct qcom_pcie_ops *ops;
190};
191
192#define to_qcom_pcie(x) dev_get_drvdata((x)->dev)
193
194static void qcom_ep_reset_assert(struct qcom_pcie *pcie)
195{
196 gpiod_set_value_cansleep(pcie->reset, 1);
197 usleep_range(PERST_DELAY_US, PERST_DELAY_US + 500);
198}
199
200static void qcom_ep_reset_deassert(struct qcom_pcie *pcie)
201{
David Brazdil0f672f62019-12-10 10:32:29 +0000202 /* Ensure that PERST has been asserted for at least 100 ms */
203 msleep(100);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000204 gpiod_set_value_cansleep(pcie->reset, 0);
205 usleep_range(PERST_DELAY_US, PERST_DELAY_US + 500);
206}
207
208static int qcom_pcie_establish_link(struct qcom_pcie *pcie)
209{
210 struct dw_pcie *pci = pcie->pci;
211
212 if (dw_pcie_link_up(pci))
213 return 0;
214
215 /* Enable Link Training state machine */
216 if (pcie->ops->ltssm_enable)
217 pcie->ops->ltssm_enable(pcie);
218
219 return dw_pcie_wait_for_link(pci);
220}
221
222static void qcom_pcie_2_1_0_ltssm_enable(struct qcom_pcie *pcie)
223{
224 u32 val;
225
226 /* enable link training */
227 val = readl(pcie->elbi + PCIE20_ELBI_SYS_CTRL);
228 val |= PCIE20_ELBI_SYS_CTRL_LT_ENABLE;
229 writel(val, pcie->elbi + PCIE20_ELBI_SYS_CTRL);
230}
231
232static int qcom_pcie_get_resources_2_1_0(struct qcom_pcie *pcie)
233{
234 struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0;
235 struct dw_pcie *pci = pcie->pci;
236 struct device *dev = pci->dev;
237 int ret;
238
239 res->supplies[0].supply = "vdda";
240 res->supplies[1].supply = "vdda_phy";
241 res->supplies[2].supply = "vdda_refclk";
242 ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(res->supplies),
243 res->supplies);
244 if (ret)
245 return ret;
246
247 res->iface_clk = devm_clk_get(dev, "iface");
248 if (IS_ERR(res->iface_clk))
249 return PTR_ERR(res->iface_clk);
250
251 res->core_clk = devm_clk_get(dev, "core");
252 if (IS_ERR(res->core_clk))
253 return PTR_ERR(res->core_clk);
254
255 res->phy_clk = devm_clk_get(dev, "phy");
256 if (IS_ERR(res->phy_clk))
257 return PTR_ERR(res->phy_clk);
258
Olivier Deprez0e641232021-09-23 10:07:05 +0200259 res->aux_clk = devm_clk_get_optional(dev, "aux");
260 if (IS_ERR(res->aux_clk))
261 return PTR_ERR(res->aux_clk);
262
263 res->ref_clk = devm_clk_get_optional(dev, "ref");
264 if (IS_ERR(res->ref_clk))
265 return PTR_ERR(res->ref_clk);
266
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000267 res->pci_reset = devm_reset_control_get_exclusive(dev, "pci");
268 if (IS_ERR(res->pci_reset))
269 return PTR_ERR(res->pci_reset);
270
271 res->axi_reset = devm_reset_control_get_exclusive(dev, "axi");
272 if (IS_ERR(res->axi_reset))
273 return PTR_ERR(res->axi_reset);
274
275 res->ahb_reset = devm_reset_control_get_exclusive(dev, "ahb");
276 if (IS_ERR(res->ahb_reset))
277 return PTR_ERR(res->ahb_reset);
278
279 res->por_reset = devm_reset_control_get_exclusive(dev, "por");
280 if (IS_ERR(res->por_reset))
281 return PTR_ERR(res->por_reset);
282
Olivier Deprez0e641232021-09-23 10:07:05 +0200283 res->ext_reset = devm_reset_control_get_optional_exclusive(dev, "ext");
284 if (IS_ERR(res->ext_reset))
285 return PTR_ERR(res->ext_reset);
286
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000287 res->phy_reset = devm_reset_control_get_exclusive(dev, "phy");
288 return PTR_ERR_OR_ZERO(res->phy_reset);
289}
290
291static void qcom_pcie_deinit_2_1_0(struct qcom_pcie *pcie)
292{
293 struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0;
294
Olivier Deprez0e641232021-09-23 10:07:05 +0200295 clk_disable_unprepare(res->phy_clk);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000296 reset_control_assert(res->pci_reset);
297 reset_control_assert(res->axi_reset);
298 reset_control_assert(res->ahb_reset);
299 reset_control_assert(res->por_reset);
Olivier Deprez0e641232021-09-23 10:07:05 +0200300 reset_control_assert(res->ext_reset);
301 reset_control_assert(res->phy_reset);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000302 clk_disable_unprepare(res->iface_clk);
303 clk_disable_unprepare(res->core_clk);
Olivier Deprez0e641232021-09-23 10:07:05 +0200304 clk_disable_unprepare(res->aux_clk);
305 clk_disable_unprepare(res->ref_clk);
306
307 writel(1, pcie->parf + PCIE20_PARF_PHY_CTRL);
308
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000309 regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
310}
311
312static int qcom_pcie_init_2_1_0(struct qcom_pcie *pcie)
313{
314 struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0;
315 struct dw_pcie *pci = pcie->pci;
316 struct device *dev = pci->dev;
Olivier Deprez0e641232021-09-23 10:07:05 +0200317 struct device_node *node = dev->of_node;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000318 u32 val;
319 int ret;
320
Olivier Deprez0e641232021-09-23 10:07:05 +0200321 /* reset the PCIe interface as uboot can leave it undefined state */
322 reset_control_assert(res->pci_reset);
323 reset_control_assert(res->axi_reset);
324 reset_control_assert(res->ahb_reset);
325 reset_control_assert(res->por_reset);
326 reset_control_assert(res->ext_reset);
327 reset_control_assert(res->phy_reset);
328
329 writel(1, pcie->parf + PCIE20_PARF_PHY_CTRL);
330
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000331 ret = regulator_bulk_enable(ARRAY_SIZE(res->supplies), res->supplies);
332 if (ret < 0) {
333 dev_err(dev, "cannot enable regulators\n");
334 return ret;
335 }
336
337 ret = reset_control_assert(res->ahb_reset);
338 if (ret) {
339 dev_err(dev, "cannot assert ahb reset\n");
340 goto err_assert_ahb;
341 }
342
343 ret = clk_prepare_enable(res->iface_clk);
344 if (ret) {
345 dev_err(dev, "cannot prepare/enable iface clock\n");
346 goto err_assert_ahb;
347 }
348
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000349 ret = clk_prepare_enable(res->core_clk);
350 if (ret) {
351 dev_err(dev, "cannot prepare/enable core clock\n");
352 goto err_clk_core;
353 }
354
Olivier Deprez0e641232021-09-23 10:07:05 +0200355 ret = clk_prepare_enable(res->aux_clk);
356 if (ret) {
357 dev_err(dev, "cannot prepare/enable aux clock\n");
358 goto err_clk_aux;
359 }
360
361 ret = clk_prepare_enable(res->ref_clk);
362 if (ret) {
363 dev_err(dev, "cannot prepare/enable ref clock\n");
364 goto err_clk_ref;
365 }
366
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000367 ret = reset_control_deassert(res->ahb_reset);
368 if (ret) {
369 dev_err(dev, "cannot deassert ahb reset\n");
370 goto err_deassert_ahb;
371 }
372
Olivier Deprez0e641232021-09-23 10:07:05 +0200373 ret = reset_control_deassert(res->ext_reset);
374 if (ret) {
375 dev_err(dev, "cannot deassert ext reset\n");
376 goto err_deassert_ahb;
377 }
378
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000379 /* enable PCIe clocks and resets */
380 val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL);
381 val &= ~BIT(0);
382 writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL);
383
Olivier Deprez0e641232021-09-23 10:07:05 +0200384 if (of_device_is_compatible(node, "qcom,pcie-ipq8064")) {
385 writel(PCS_DEEMPH_TX_DEEMPH_GEN1(24) |
386 PCS_DEEMPH_TX_DEEMPH_GEN2_3_5DB(24) |
387 PCS_DEEMPH_TX_DEEMPH_GEN2_6DB(34),
388 pcie->parf + PCIE20_PARF_PCS_DEEMPH);
389 writel(PCS_SWING_TX_SWING_FULL(120) |
390 PCS_SWING_TX_SWING_LOW(120),
391 pcie->parf + PCIE20_PARF_PCS_SWING);
392 writel(PHY_RX0_EQ(4), pcie->parf + PCIE20_PARF_CONFIG_BITS);
393 }
394
395 if (of_device_is_compatible(node, "qcom,pcie-ipq8064")) {
396 /* set TX termination offset */
397 val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL);
398 val &= ~PHY_CTRL_PHY_TX0_TERM_OFFSET_MASK;
399 val |= PHY_CTRL_PHY_TX0_TERM_OFFSET(7);
400 writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL);
401 }
402
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000403 /* enable external reference clock */
404 val = readl(pcie->parf + PCIE20_PARF_PHY_REFCLK);
Olivier Deprez0e641232021-09-23 10:07:05 +0200405 /* USE_PAD is required only for ipq806x */
406 if (!of_device_is_compatible(node, "qcom,pcie-apq8064"))
407 val &= ~PHY_REFCLK_USE_PAD;
408 val |= PHY_REFCLK_SSP_EN;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000409 writel(val, pcie->parf + PCIE20_PARF_PHY_REFCLK);
410
411 ret = reset_control_deassert(res->phy_reset);
412 if (ret) {
413 dev_err(dev, "cannot deassert phy reset\n");
414 return ret;
415 }
416
417 ret = reset_control_deassert(res->pci_reset);
418 if (ret) {
419 dev_err(dev, "cannot deassert pci reset\n");
420 return ret;
421 }
422
423 ret = reset_control_deassert(res->por_reset);
424 if (ret) {
425 dev_err(dev, "cannot deassert por reset\n");
426 return ret;
427 }
428
429 ret = reset_control_deassert(res->axi_reset);
430 if (ret) {
431 dev_err(dev, "cannot deassert axi reset\n");
432 return ret;
433 }
434
Olivier Deprez0e641232021-09-23 10:07:05 +0200435 ret = clk_prepare_enable(res->phy_clk);
436 if (ret) {
437 dev_err(dev, "cannot prepare/enable phy clock\n");
438 goto err_deassert_ahb;
439 }
440
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000441 /* wait for clock acquisition */
442 usleep_range(1000, 1500);
443
444
445 /* Set the Max TLP size to 2K, instead of using default of 4K */
446 writel(CFG_REMOTE_RD_REQ_BRIDGE_SIZE_2K,
447 pci->dbi_base + PCIE20_AXI_MSTR_RESP_COMP_CTRL0);
448 writel(CFG_BRIDGE_SB_INIT,
449 pci->dbi_base + PCIE20_AXI_MSTR_RESP_COMP_CTRL1);
450
451 return 0;
452
453err_deassert_ahb:
Olivier Deprez0e641232021-09-23 10:07:05 +0200454 clk_disable_unprepare(res->ref_clk);
455err_clk_ref:
456 clk_disable_unprepare(res->aux_clk);
457err_clk_aux:
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000458 clk_disable_unprepare(res->core_clk);
459err_clk_core:
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000460 clk_disable_unprepare(res->iface_clk);
461err_assert_ahb:
462 regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
463
464 return ret;
465}
466
467static int qcom_pcie_get_resources_1_0_0(struct qcom_pcie *pcie)
468{
469 struct qcom_pcie_resources_1_0_0 *res = &pcie->res.v1_0_0;
470 struct dw_pcie *pci = pcie->pci;
471 struct device *dev = pci->dev;
472
473 res->vdda = devm_regulator_get(dev, "vdda");
474 if (IS_ERR(res->vdda))
475 return PTR_ERR(res->vdda);
476
477 res->iface = devm_clk_get(dev, "iface");
478 if (IS_ERR(res->iface))
479 return PTR_ERR(res->iface);
480
481 res->aux = devm_clk_get(dev, "aux");
482 if (IS_ERR(res->aux))
483 return PTR_ERR(res->aux);
484
485 res->master_bus = devm_clk_get(dev, "master_bus");
486 if (IS_ERR(res->master_bus))
487 return PTR_ERR(res->master_bus);
488
489 res->slave_bus = devm_clk_get(dev, "slave_bus");
490 if (IS_ERR(res->slave_bus))
491 return PTR_ERR(res->slave_bus);
492
493 res->core = devm_reset_control_get_exclusive(dev, "core");
494 return PTR_ERR_OR_ZERO(res->core);
495}
496
497static void qcom_pcie_deinit_1_0_0(struct qcom_pcie *pcie)
498{
499 struct qcom_pcie_resources_1_0_0 *res = &pcie->res.v1_0_0;
500
501 reset_control_assert(res->core);
502 clk_disable_unprepare(res->slave_bus);
503 clk_disable_unprepare(res->master_bus);
504 clk_disable_unprepare(res->iface);
505 clk_disable_unprepare(res->aux);
506 regulator_disable(res->vdda);
507}
508
509static int qcom_pcie_init_1_0_0(struct qcom_pcie *pcie)
510{
511 struct qcom_pcie_resources_1_0_0 *res = &pcie->res.v1_0_0;
512 struct dw_pcie *pci = pcie->pci;
513 struct device *dev = pci->dev;
514 int ret;
515
516 ret = reset_control_deassert(res->core);
517 if (ret) {
518 dev_err(dev, "cannot deassert core reset\n");
519 return ret;
520 }
521
522 ret = clk_prepare_enable(res->aux);
523 if (ret) {
524 dev_err(dev, "cannot prepare/enable aux clock\n");
525 goto err_res;
526 }
527
528 ret = clk_prepare_enable(res->iface);
529 if (ret) {
530 dev_err(dev, "cannot prepare/enable iface clock\n");
531 goto err_aux;
532 }
533
534 ret = clk_prepare_enable(res->master_bus);
535 if (ret) {
536 dev_err(dev, "cannot prepare/enable master_bus clock\n");
537 goto err_iface;
538 }
539
540 ret = clk_prepare_enable(res->slave_bus);
541 if (ret) {
542 dev_err(dev, "cannot prepare/enable slave_bus clock\n");
543 goto err_master;
544 }
545
546 ret = regulator_enable(res->vdda);
547 if (ret) {
548 dev_err(dev, "cannot enable vdda regulator\n");
549 goto err_slave;
550 }
551
552 /* change DBI base address */
553 writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR);
554
555 if (IS_ENABLED(CONFIG_PCI_MSI)) {
556 u32 val = readl(pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT);
557
558 val |= BIT(31);
559 writel(val, pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT);
560 }
561
562 return 0;
563err_slave:
564 clk_disable_unprepare(res->slave_bus);
565err_master:
566 clk_disable_unprepare(res->master_bus);
567err_iface:
568 clk_disable_unprepare(res->iface);
569err_aux:
570 clk_disable_unprepare(res->aux);
571err_res:
572 reset_control_assert(res->core);
573
574 return ret;
575}
576
577static void qcom_pcie_2_3_2_ltssm_enable(struct qcom_pcie *pcie)
578{
579 u32 val;
580
581 /* enable link training */
582 val = readl(pcie->parf + PCIE20_PARF_LTSSM);
583 val |= BIT(8);
584 writel(val, pcie->parf + PCIE20_PARF_LTSSM);
585}
586
587static int qcom_pcie_get_resources_2_3_2(struct qcom_pcie *pcie)
588{
589 struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2;
590 struct dw_pcie *pci = pcie->pci;
591 struct device *dev = pci->dev;
592 int ret;
593
594 res->supplies[0].supply = "vdda";
595 res->supplies[1].supply = "vddpe-3v3";
596 ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(res->supplies),
597 res->supplies);
598 if (ret)
599 return ret;
600
601 res->aux_clk = devm_clk_get(dev, "aux");
602 if (IS_ERR(res->aux_clk))
603 return PTR_ERR(res->aux_clk);
604
605 res->cfg_clk = devm_clk_get(dev, "cfg");
606 if (IS_ERR(res->cfg_clk))
607 return PTR_ERR(res->cfg_clk);
608
609 res->master_clk = devm_clk_get(dev, "bus_master");
610 if (IS_ERR(res->master_clk))
611 return PTR_ERR(res->master_clk);
612
613 res->slave_clk = devm_clk_get(dev, "bus_slave");
614 if (IS_ERR(res->slave_clk))
615 return PTR_ERR(res->slave_clk);
616
617 res->pipe_clk = devm_clk_get(dev, "pipe");
618 return PTR_ERR_OR_ZERO(res->pipe_clk);
619}
620
621static void qcom_pcie_deinit_2_3_2(struct qcom_pcie *pcie)
622{
623 struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2;
624
625 clk_disable_unprepare(res->slave_clk);
626 clk_disable_unprepare(res->master_clk);
627 clk_disable_unprepare(res->cfg_clk);
628 clk_disable_unprepare(res->aux_clk);
629
630 regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
631}
632
633static void qcom_pcie_post_deinit_2_3_2(struct qcom_pcie *pcie)
634{
635 struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2;
636
637 clk_disable_unprepare(res->pipe_clk);
638}
639
640static int qcom_pcie_init_2_3_2(struct qcom_pcie *pcie)
641{
642 struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2;
643 struct dw_pcie *pci = pcie->pci;
644 struct device *dev = pci->dev;
645 u32 val;
646 int ret;
647
648 ret = regulator_bulk_enable(ARRAY_SIZE(res->supplies), res->supplies);
649 if (ret < 0) {
650 dev_err(dev, "cannot enable regulators\n");
651 return ret;
652 }
653
654 ret = clk_prepare_enable(res->aux_clk);
655 if (ret) {
656 dev_err(dev, "cannot prepare/enable aux clock\n");
657 goto err_aux_clk;
658 }
659
660 ret = clk_prepare_enable(res->cfg_clk);
661 if (ret) {
662 dev_err(dev, "cannot prepare/enable cfg clock\n");
663 goto err_cfg_clk;
664 }
665
666 ret = clk_prepare_enable(res->master_clk);
667 if (ret) {
668 dev_err(dev, "cannot prepare/enable master clock\n");
669 goto err_master_clk;
670 }
671
672 ret = clk_prepare_enable(res->slave_clk);
673 if (ret) {
674 dev_err(dev, "cannot prepare/enable slave clock\n");
675 goto err_slave_clk;
676 }
677
678 /* enable PCIe clocks and resets */
679 val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL);
680 val &= ~BIT(0);
681 writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL);
682
683 /* change DBI base address */
684 writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR);
685
686 /* MAC PHY_POWERDOWN MUX DISABLE */
687 val = readl(pcie->parf + PCIE20_PARF_SYS_CTRL);
688 val &= ~BIT(29);
689 writel(val, pcie->parf + PCIE20_PARF_SYS_CTRL);
690
691 val = readl(pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL);
692 val |= BIT(4);
693 writel(val, pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL);
694
695 val = readl(pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2);
696 val |= BIT(31);
697 writel(val, pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2);
698
699 return 0;
700
701err_slave_clk:
702 clk_disable_unprepare(res->master_clk);
703err_master_clk:
704 clk_disable_unprepare(res->cfg_clk);
705err_cfg_clk:
706 clk_disable_unprepare(res->aux_clk);
707
708err_aux_clk:
709 regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
710
711 return ret;
712}
713
714static int qcom_pcie_post_init_2_3_2(struct qcom_pcie *pcie)
715{
716 struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2;
717 struct dw_pcie *pci = pcie->pci;
718 struct device *dev = pci->dev;
719 int ret;
720
721 ret = clk_prepare_enable(res->pipe_clk);
722 if (ret) {
723 dev_err(dev, "cannot prepare/enable pipe clock\n");
724 return ret;
725 }
726
727 return 0;
728}
729
730static int qcom_pcie_get_resources_2_4_0(struct qcom_pcie *pcie)
731{
732 struct qcom_pcie_resources_2_4_0 *res = &pcie->res.v2_4_0;
733 struct dw_pcie *pci = pcie->pci;
734 struct device *dev = pci->dev;
David Brazdil0f672f62019-12-10 10:32:29 +0000735 bool is_ipq = of_device_is_compatible(dev->of_node, "qcom,pcie-ipq4019");
736 int ret;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000737
David Brazdil0f672f62019-12-10 10:32:29 +0000738 res->clks[0].id = "aux";
739 res->clks[1].id = "master_bus";
740 res->clks[2].id = "slave_bus";
741 res->clks[3].id = "iface";
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000742
David Brazdil0f672f62019-12-10 10:32:29 +0000743 /* qcom,pcie-ipq4019 is defined without "iface" */
744 res->num_clks = is_ipq ? 3 : 4;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000745
David Brazdil0f672f62019-12-10 10:32:29 +0000746 ret = devm_clk_bulk_get(dev, res->num_clks, res->clks);
747 if (ret < 0)
748 return ret;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000749
750 res->axi_m_reset = devm_reset_control_get_exclusive(dev, "axi_m");
751 if (IS_ERR(res->axi_m_reset))
752 return PTR_ERR(res->axi_m_reset);
753
754 res->axi_s_reset = devm_reset_control_get_exclusive(dev, "axi_s");
755 if (IS_ERR(res->axi_s_reset))
756 return PTR_ERR(res->axi_s_reset);
757
David Brazdil0f672f62019-12-10 10:32:29 +0000758 if (is_ipq) {
759 /*
760 * These resources relates to the PHY or are secure clocks, but
761 * are controlled here for IPQ4019
762 */
763 res->pipe_reset = devm_reset_control_get_exclusive(dev, "pipe");
764 if (IS_ERR(res->pipe_reset))
765 return PTR_ERR(res->pipe_reset);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000766
David Brazdil0f672f62019-12-10 10:32:29 +0000767 res->axi_m_vmid_reset = devm_reset_control_get_exclusive(dev,
768 "axi_m_vmid");
769 if (IS_ERR(res->axi_m_vmid_reset))
770 return PTR_ERR(res->axi_m_vmid_reset);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000771
David Brazdil0f672f62019-12-10 10:32:29 +0000772 res->axi_s_xpu_reset = devm_reset_control_get_exclusive(dev,
773 "axi_s_xpu");
774 if (IS_ERR(res->axi_s_xpu_reset))
775 return PTR_ERR(res->axi_s_xpu_reset);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000776
David Brazdil0f672f62019-12-10 10:32:29 +0000777 res->parf_reset = devm_reset_control_get_exclusive(dev, "parf");
778 if (IS_ERR(res->parf_reset))
779 return PTR_ERR(res->parf_reset);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000780
David Brazdil0f672f62019-12-10 10:32:29 +0000781 res->phy_reset = devm_reset_control_get_exclusive(dev, "phy");
782 if (IS_ERR(res->phy_reset))
783 return PTR_ERR(res->phy_reset);
784 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000785
786 res->axi_m_sticky_reset = devm_reset_control_get_exclusive(dev,
787 "axi_m_sticky");
788 if (IS_ERR(res->axi_m_sticky_reset))
789 return PTR_ERR(res->axi_m_sticky_reset);
790
791 res->pipe_sticky_reset = devm_reset_control_get_exclusive(dev,
792 "pipe_sticky");
793 if (IS_ERR(res->pipe_sticky_reset))
794 return PTR_ERR(res->pipe_sticky_reset);
795
796 res->pwr_reset = devm_reset_control_get_exclusive(dev, "pwr");
797 if (IS_ERR(res->pwr_reset))
798 return PTR_ERR(res->pwr_reset);
799
800 res->ahb_reset = devm_reset_control_get_exclusive(dev, "ahb");
801 if (IS_ERR(res->ahb_reset))
802 return PTR_ERR(res->ahb_reset);
803
David Brazdil0f672f62019-12-10 10:32:29 +0000804 if (is_ipq) {
805 res->phy_ahb_reset = devm_reset_control_get_exclusive(dev, "phy_ahb");
806 if (IS_ERR(res->phy_ahb_reset))
807 return PTR_ERR(res->phy_ahb_reset);
808 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000809
810 return 0;
811}
812
813static void qcom_pcie_deinit_2_4_0(struct qcom_pcie *pcie)
814{
815 struct qcom_pcie_resources_2_4_0 *res = &pcie->res.v2_4_0;
816
817 reset_control_assert(res->axi_m_reset);
818 reset_control_assert(res->axi_s_reset);
819 reset_control_assert(res->pipe_reset);
820 reset_control_assert(res->pipe_sticky_reset);
821 reset_control_assert(res->phy_reset);
822 reset_control_assert(res->phy_ahb_reset);
823 reset_control_assert(res->axi_m_sticky_reset);
824 reset_control_assert(res->pwr_reset);
825 reset_control_assert(res->ahb_reset);
David Brazdil0f672f62019-12-10 10:32:29 +0000826 clk_bulk_disable_unprepare(res->num_clks, res->clks);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000827}
828
829static int qcom_pcie_init_2_4_0(struct qcom_pcie *pcie)
830{
831 struct qcom_pcie_resources_2_4_0 *res = &pcie->res.v2_4_0;
832 struct dw_pcie *pci = pcie->pci;
833 struct device *dev = pci->dev;
834 u32 val;
835 int ret;
836
837 ret = reset_control_assert(res->axi_m_reset);
838 if (ret) {
839 dev_err(dev, "cannot assert axi master reset\n");
840 return ret;
841 }
842
843 ret = reset_control_assert(res->axi_s_reset);
844 if (ret) {
845 dev_err(dev, "cannot assert axi slave reset\n");
846 return ret;
847 }
848
849 usleep_range(10000, 12000);
850
851 ret = reset_control_assert(res->pipe_reset);
852 if (ret) {
853 dev_err(dev, "cannot assert pipe reset\n");
854 return ret;
855 }
856
857 ret = reset_control_assert(res->pipe_sticky_reset);
858 if (ret) {
859 dev_err(dev, "cannot assert pipe sticky reset\n");
860 return ret;
861 }
862
863 ret = reset_control_assert(res->phy_reset);
864 if (ret) {
865 dev_err(dev, "cannot assert phy reset\n");
866 return ret;
867 }
868
869 ret = reset_control_assert(res->phy_ahb_reset);
870 if (ret) {
871 dev_err(dev, "cannot assert phy ahb reset\n");
872 return ret;
873 }
874
875 usleep_range(10000, 12000);
876
877 ret = reset_control_assert(res->axi_m_sticky_reset);
878 if (ret) {
879 dev_err(dev, "cannot assert axi master sticky reset\n");
880 return ret;
881 }
882
883 ret = reset_control_assert(res->pwr_reset);
884 if (ret) {
885 dev_err(dev, "cannot assert power reset\n");
886 return ret;
887 }
888
889 ret = reset_control_assert(res->ahb_reset);
890 if (ret) {
891 dev_err(dev, "cannot assert ahb reset\n");
892 return ret;
893 }
894
895 usleep_range(10000, 12000);
896
897 ret = reset_control_deassert(res->phy_ahb_reset);
898 if (ret) {
899 dev_err(dev, "cannot deassert phy ahb reset\n");
900 return ret;
901 }
902
903 ret = reset_control_deassert(res->phy_reset);
904 if (ret) {
905 dev_err(dev, "cannot deassert phy reset\n");
906 goto err_rst_phy;
907 }
908
909 ret = reset_control_deassert(res->pipe_reset);
910 if (ret) {
911 dev_err(dev, "cannot deassert pipe reset\n");
912 goto err_rst_pipe;
913 }
914
915 ret = reset_control_deassert(res->pipe_sticky_reset);
916 if (ret) {
917 dev_err(dev, "cannot deassert pipe sticky reset\n");
918 goto err_rst_pipe_sticky;
919 }
920
921 usleep_range(10000, 12000);
922
923 ret = reset_control_deassert(res->axi_m_reset);
924 if (ret) {
925 dev_err(dev, "cannot deassert axi master reset\n");
926 goto err_rst_axi_m;
927 }
928
929 ret = reset_control_deassert(res->axi_m_sticky_reset);
930 if (ret) {
931 dev_err(dev, "cannot deassert axi master sticky reset\n");
932 goto err_rst_axi_m_sticky;
933 }
934
935 ret = reset_control_deassert(res->axi_s_reset);
936 if (ret) {
937 dev_err(dev, "cannot deassert axi slave reset\n");
938 goto err_rst_axi_s;
939 }
940
941 ret = reset_control_deassert(res->pwr_reset);
942 if (ret) {
943 dev_err(dev, "cannot deassert power reset\n");
944 goto err_rst_pwr;
945 }
946
947 ret = reset_control_deassert(res->ahb_reset);
948 if (ret) {
949 dev_err(dev, "cannot deassert ahb reset\n");
950 goto err_rst_ahb;
951 }
952
953 usleep_range(10000, 12000);
954
David Brazdil0f672f62019-12-10 10:32:29 +0000955 ret = clk_bulk_prepare_enable(res->num_clks, res->clks);
956 if (ret)
957 goto err_clks;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000958
959 /* enable PCIe clocks and resets */
960 val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL);
961 val &= ~BIT(0);
962 writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL);
963
964 /* change DBI base address */
965 writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR);
966
967 /* MAC PHY_POWERDOWN MUX DISABLE */
968 val = readl(pcie->parf + PCIE20_PARF_SYS_CTRL);
969 val &= ~BIT(29);
970 writel(val, pcie->parf + PCIE20_PARF_SYS_CTRL);
971
972 val = readl(pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL);
973 val |= BIT(4);
974 writel(val, pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL);
975
976 val = readl(pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2);
977 val |= BIT(31);
978 writel(val, pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2);
979
980 return 0;
981
David Brazdil0f672f62019-12-10 10:32:29 +0000982err_clks:
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000983 reset_control_assert(res->ahb_reset);
984err_rst_ahb:
985 reset_control_assert(res->pwr_reset);
986err_rst_pwr:
987 reset_control_assert(res->axi_s_reset);
988err_rst_axi_s:
989 reset_control_assert(res->axi_m_sticky_reset);
990err_rst_axi_m_sticky:
991 reset_control_assert(res->axi_m_reset);
992err_rst_axi_m:
993 reset_control_assert(res->pipe_sticky_reset);
994err_rst_pipe_sticky:
995 reset_control_assert(res->pipe_reset);
996err_rst_pipe:
997 reset_control_assert(res->phy_reset);
998err_rst_phy:
999 reset_control_assert(res->phy_ahb_reset);
1000 return ret;
1001}
1002
1003static int qcom_pcie_get_resources_2_3_3(struct qcom_pcie *pcie)
1004{
1005 struct qcom_pcie_resources_2_3_3 *res = &pcie->res.v2_3_3;
1006 struct dw_pcie *pci = pcie->pci;
1007 struct device *dev = pci->dev;
1008 int i;
1009 const char *rst_names[] = { "axi_m", "axi_s", "pipe",
1010 "axi_m_sticky", "sticky",
1011 "ahb", "sleep", };
1012
1013 res->iface = devm_clk_get(dev, "iface");
1014 if (IS_ERR(res->iface))
1015 return PTR_ERR(res->iface);
1016
1017 res->axi_m_clk = devm_clk_get(dev, "axi_m");
1018 if (IS_ERR(res->axi_m_clk))
1019 return PTR_ERR(res->axi_m_clk);
1020
1021 res->axi_s_clk = devm_clk_get(dev, "axi_s");
1022 if (IS_ERR(res->axi_s_clk))
1023 return PTR_ERR(res->axi_s_clk);
1024
1025 res->ahb_clk = devm_clk_get(dev, "ahb");
1026 if (IS_ERR(res->ahb_clk))
1027 return PTR_ERR(res->ahb_clk);
1028
1029 res->aux_clk = devm_clk_get(dev, "aux");
1030 if (IS_ERR(res->aux_clk))
1031 return PTR_ERR(res->aux_clk);
1032
1033 for (i = 0; i < ARRAY_SIZE(rst_names); i++) {
1034 res->rst[i] = devm_reset_control_get(dev, rst_names[i]);
1035 if (IS_ERR(res->rst[i]))
1036 return PTR_ERR(res->rst[i]);
1037 }
1038
1039 return 0;
1040}
1041
1042static void qcom_pcie_deinit_2_3_3(struct qcom_pcie *pcie)
1043{
1044 struct qcom_pcie_resources_2_3_3 *res = &pcie->res.v2_3_3;
1045
1046 clk_disable_unprepare(res->iface);
1047 clk_disable_unprepare(res->axi_m_clk);
1048 clk_disable_unprepare(res->axi_s_clk);
1049 clk_disable_unprepare(res->ahb_clk);
1050 clk_disable_unprepare(res->aux_clk);
1051}
1052
1053static int qcom_pcie_init_2_3_3(struct qcom_pcie *pcie)
1054{
1055 struct qcom_pcie_resources_2_3_3 *res = &pcie->res.v2_3_3;
1056 struct dw_pcie *pci = pcie->pci;
1057 struct device *dev = pci->dev;
1058 int i, ret;
1059 u32 val;
1060
1061 for (i = 0; i < ARRAY_SIZE(res->rst); i++) {
1062 ret = reset_control_assert(res->rst[i]);
1063 if (ret) {
1064 dev_err(dev, "reset #%d assert failed (%d)\n", i, ret);
1065 return ret;
1066 }
1067 }
1068
1069 usleep_range(2000, 2500);
1070
1071 for (i = 0; i < ARRAY_SIZE(res->rst); i++) {
1072 ret = reset_control_deassert(res->rst[i]);
1073 if (ret) {
1074 dev_err(dev, "reset #%d deassert failed (%d)\n", i,
1075 ret);
1076 return ret;
1077 }
1078 }
1079
1080 /*
1081 * Don't have a way to see if the reset has completed.
1082 * Wait for some time.
1083 */
1084 usleep_range(2000, 2500);
1085
1086 ret = clk_prepare_enable(res->iface);
1087 if (ret) {
1088 dev_err(dev, "cannot prepare/enable core clock\n");
1089 goto err_clk_iface;
1090 }
1091
1092 ret = clk_prepare_enable(res->axi_m_clk);
1093 if (ret) {
1094 dev_err(dev, "cannot prepare/enable core clock\n");
1095 goto err_clk_axi_m;
1096 }
1097
1098 ret = clk_prepare_enable(res->axi_s_clk);
1099 if (ret) {
1100 dev_err(dev, "cannot prepare/enable axi slave clock\n");
1101 goto err_clk_axi_s;
1102 }
1103
1104 ret = clk_prepare_enable(res->ahb_clk);
1105 if (ret) {
1106 dev_err(dev, "cannot prepare/enable ahb clock\n");
1107 goto err_clk_ahb;
1108 }
1109
1110 ret = clk_prepare_enable(res->aux_clk);
1111 if (ret) {
1112 dev_err(dev, "cannot prepare/enable aux clock\n");
1113 goto err_clk_aux;
1114 }
1115
1116 writel(SLV_ADDR_SPACE_SZ,
1117 pcie->parf + PCIE20_v3_PARF_SLV_ADDR_SPACE_SIZE);
1118
1119 val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL);
1120 val &= ~BIT(0);
1121 writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL);
1122
1123 writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR);
1124
1125 writel(MST_WAKEUP_EN | SLV_WAKEUP_EN | MSTR_ACLK_CGC_DIS
1126 | SLV_ACLK_CGC_DIS | CORE_CLK_CGC_DIS |
1127 AUX_PWR_DET | L23_CLK_RMV_DIS | L1_CLK_RMV_DIS,
1128 pcie->parf + PCIE20_PARF_SYS_CTRL);
1129 writel(0, pcie->parf + PCIE20_PARF_Q2A_FLUSH);
1130
1131 writel(CMD_BME_VAL, pci->dbi_base + PCIE20_COMMAND_STATUS);
1132 writel(DBI_RO_WR_EN, pci->dbi_base + PCIE20_MISC_CONTROL_1_REG);
1133 writel(PCIE_CAP_LINK1_VAL, pci->dbi_base + PCIE20_CAP_LINK_1);
1134
1135 val = readl(pci->dbi_base + PCIE20_CAP_LINK_CAPABILITIES);
1136 val &= ~PCIE20_CAP_ACTIVE_STATE_LINK_PM_SUPPORT;
1137 writel(val, pci->dbi_base + PCIE20_CAP_LINK_CAPABILITIES);
1138
1139 writel(PCIE_CAP_CPL_TIMEOUT_DISABLE, pci->dbi_base +
1140 PCIE20_DEVICE_CONTROL2_STATUS2);
1141
1142 return 0;
1143
1144err_clk_aux:
1145 clk_disable_unprepare(res->ahb_clk);
1146err_clk_ahb:
1147 clk_disable_unprepare(res->axi_s_clk);
1148err_clk_axi_s:
1149 clk_disable_unprepare(res->axi_m_clk);
1150err_clk_axi_m:
1151 clk_disable_unprepare(res->iface);
1152err_clk_iface:
1153 /*
1154 * Not checking for failure, will anyway return
1155 * the original failure in 'ret'.
1156 */
1157 for (i = 0; i < ARRAY_SIZE(res->rst); i++)
1158 reset_control_assert(res->rst[i]);
1159
1160 return ret;
1161}
1162
1163static int qcom_pcie_link_up(struct dw_pcie *pci)
1164{
1165 u16 val = readw(pci->dbi_base + PCIE20_CAP + PCI_EXP_LNKSTA);
1166
1167 return !!(val & PCI_EXP_LNKSTA_DLLLA);
1168}
1169
1170static int qcom_pcie_host_init(struct pcie_port *pp)
1171{
1172 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
1173 struct qcom_pcie *pcie = to_qcom_pcie(pci);
1174 int ret;
1175
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001176 qcom_ep_reset_assert(pcie);
1177
1178 ret = pcie->ops->init(pcie);
1179 if (ret)
1180 return ret;
1181
1182 ret = phy_power_on(pcie->phy);
1183 if (ret)
1184 goto err_deinit;
1185
1186 if (pcie->ops->post_init) {
1187 ret = pcie->ops->post_init(pcie);
1188 if (ret)
1189 goto err_disable_phy;
1190 }
1191
1192 dw_pcie_setup_rc(pp);
1193
1194 if (IS_ENABLED(CONFIG_PCI_MSI))
1195 dw_pcie_msi_init(pp);
1196
1197 qcom_ep_reset_deassert(pcie);
1198
1199 ret = qcom_pcie_establish_link(pcie);
1200 if (ret)
1201 goto err;
1202
1203 return 0;
1204err:
1205 qcom_ep_reset_assert(pcie);
1206 if (pcie->ops->post_deinit)
1207 pcie->ops->post_deinit(pcie);
1208err_disable_phy:
1209 phy_power_off(pcie->phy);
1210err_deinit:
1211 pcie->ops->deinit(pcie);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001212
1213 return ret;
1214}
1215
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001216static const struct dw_pcie_host_ops qcom_pcie_dw_ops = {
1217 .host_init = qcom_pcie_host_init,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001218};
1219
1220/* Qcom IP rev.: 2.1.0 Synopsys IP rev.: 4.01a */
1221static const struct qcom_pcie_ops ops_2_1_0 = {
1222 .get_resources = qcom_pcie_get_resources_2_1_0,
1223 .init = qcom_pcie_init_2_1_0,
1224 .deinit = qcom_pcie_deinit_2_1_0,
1225 .ltssm_enable = qcom_pcie_2_1_0_ltssm_enable,
1226};
1227
1228/* Qcom IP rev.: 1.0.0 Synopsys IP rev.: 4.11a */
1229static const struct qcom_pcie_ops ops_1_0_0 = {
1230 .get_resources = qcom_pcie_get_resources_1_0_0,
1231 .init = qcom_pcie_init_1_0_0,
1232 .deinit = qcom_pcie_deinit_1_0_0,
1233 .ltssm_enable = qcom_pcie_2_1_0_ltssm_enable,
1234};
1235
1236/* Qcom IP rev.: 2.3.2 Synopsys IP rev.: 4.21a */
1237static const struct qcom_pcie_ops ops_2_3_2 = {
1238 .get_resources = qcom_pcie_get_resources_2_3_2,
1239 .init = qcom_pcie_init_2_3_2,
1240 .post_init = qcom_pcie_post_init_2_3_2,
1241 .deinit = qcom_pcie_deinit_2_3_2,
1242 .post_deinit = qcom_pcie_post_deinit_2_3_2,
1243 .ltssm_enable = qcom_pcie_2_3_2_ltssm_enable,
1244};
1245
1246/* Qcom IP rev.: 2.4.0 Synopsys IP rev.: 4.20a */
1247static const struct qcom_pcie_ops ops_2_4_0 = {
1248 .get_resources = qcom_pcie_get_resources_2_4_0,
1249 .init = qcom_pcie_init_2_4_0,
1250 .deinit = qcom_pcie_deinit_2_4_0,
1251 .ltssm_enable = qcom_pcie_2_3_2_ltssm_enable,
1252};
1253
1254/* Qcom IP rev.: 2.3.3 Synopsys IP rev.: 4.30a */
1255static const struct qcom_pcie_ops ops_2_3_3 = {
1256 .get_resources = qcom_pcie_get_resources_2_3_3,
1257 .init = qcom_pcie_init_2_3_3,
1258 .deinit = qcom_pcie_deinit_2_3_3,
1259 .ltssm_enable = qcom_pcie_2_3_2_ltssm_enable,
1260};
1261
1262static const struct dw_pcie_ops dw_pcie_ops = {
1263 .link_up = qcom_pcie_link_up,
1264};
1265
1266static int qcom_pcie_probe(struct platform_device *pdev)
1267{
1268 struct device *dev = &pdev->dev;
1269 struct resource *res;
1270 struct pcie_port *pp;
1271 struct dw_pcie *pci;
1272 struct qcom_pcie *pcie;
1273 int ret;
1274
1275 pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
1276 if (!pcie)
1277 return -ENOMEM;
1278
1279 pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL);
1280 if (!pci)
1281 return -ENOMEM;
1282
1283 pm_runtime_enable(dev);
David Brazdil0f672f62019-12-10 10:32:29 +00001284 ret = pm_runtime_get_sync(dev);
1285 if (ret < 0) {
1286 pm_runtime_disable(dev);
1287 return ret;
1288 }
1289
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001290 pci->dev = dev;
1291 pci->ops = &dw_pcie_ops;
1292 pp = &pci->pp;
1293
1294 pcie->pci = pci;
1295
1296 pcie->ops = of_device_get_match_data(dev);
1297
David Brazdil0f672f62019-12-10 10:32:29 +00001298 pcie->reset = devm_gpiod_get_optional(dev, "perst", GPIOD_OUT_HIGH);
1299 if (IS_ERR(pcie->reset)) {
1300 ret = PTR_ERR(pcie->reset);
1301 goto err_pm_runtime_put;
1302 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001303
1304 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "parf");
1305 pcie->parf = devm_ioremap_resource(dev, res);
David Brazdil0f672f62019-12-10 10:32:29 +00001306 if (IS_ERR(pcie->parf)) {
1307 ret = PTR_ERR(pcie->parf);
1308 goto err_pm_runtime_put;
1309 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001310
1311 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi");
1312 pci->dbi_base = devm_pci_remap_cfg_resource(dev, res);
David Brazdil0f672f62019-12-10 10:32:29 +00001313 if (IS_ERR(pci->dbi_base)) {
1314 ret = PTR_ERR(pci->dbi_base);
1315 goto err_pm_runtime_put;
1316 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001317
1318 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "elbi");
1319 pcie->elbi = devm_ioremap_resource(dev, res);
David Brazdil0f672f62019-12-10 10:32:29 +00001320 if (IS_ERR(pcie->elbi)) {
1321 ret = PTR_ERR(pcie->elbi);
1322 goto err_pm_runtime_put;
1323 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001324
1325 pcie->phy = devm_phy_optional_get(dev, "pciephy");
David Brazdil0f672f62019-12-10 10:32:29 +00001326 if (IS_ERR(pcie->phy)) {
1327 ret = PTR_ERR(pcie->phy);
1328 goto err_pm_runtime_put;
1329 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001330
1331 ret = pcie->ops->get_resources(pcie);
1332 if (ret)
David Brazdil0f672f62019-12-10 10:32:29 +00001333 goto err_pm_runtime_put;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001334
1335 pp->ops = &qcom_pcie_dw_ops;
1336
1337 if (IS_ENABLED(CONFIG_PCI_MSI)) {
1338 pp->msi_irq = platform_get_irq_byname(pdev, "msi");
David Brazdil0f672f62019-12-10 10:32:29 +00001339 if (pp->msi_irq < 0) {
1340 ret = pp->msi_irq;
1341 goto err_pm_runtime_put;
1342 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001343 }
1344
1345 ret = phy_init(pcie->phy);
1346 if (ret) {
1347 pm_runtime_disable(&pdev->dev);
David Brazdil0f672f62019-12-10 10:32:29 +00001348 goto err_pm_runtime_put;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001349 }
1350
1351 platform_set_drvdata(pdev, pcie);
1352
1353 ret = dw_pcie_host_init(pp);
1354 if (ret) {
1355 dev_err(dev, "cannot initialize host\n");
1356 pm_runtime_disable(&pdev->dev);
David Brazdil0f672f62019-12-10 10:32:29 +00001357 goto err_pm_runtime_put;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001358 }
1359
1360 return 0;
David Brazdil0f672f62019-12-10 10:32:29 +00001361
1362err_pm_runtime_put:
1363 pm_runtime_put(dev);
1364 pm_runtime_disable(dev);
1365
1366 return ret;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001367}
1368
1369static const struct of_device_id qcom_pcie_match[] = {
1370 { .compatible = "qcom,pcie-apq8084", .data = &ops_1_0_0 },
1371 { .compatible = "qcom,pcie-ipq8064", .data = &ops_2_1_0 },
1372 { .compatible = "qcom,pcie-apq8064", .data = &ops_2_1_0 },
1373 { .compatible = "qcom,pcie-msm8996", .data = &ops_2_3_2 },
1374 { .compatible = "qcom,pcie-ipq8074", .data = &ops_2_3_3 },
1375 { .compatible = "qcom,pcie-ipq4019", .data = &ops_2_4_0 },
David Brazdil0f672f62019-12-10 10:32:29 +00001376 { .compatible = "qcom,pcie-qcs404", .data = &ops_2_4_0 },
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001377 { }
1378};
1379
David Brazdil0f672f62019-12-10 10:32:29 +00001380static void qcom_fixup_class(struct pci_dev *dev)
1381{
1382 dev->class = PCI_CLASS_BRIDGE_PCI << 8;
1383}
Olivier Deprez0e641232021-09-23 10:07:05 +02001384DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0101, qcom_fixup_class);
1385DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0104, qcom_fixup_class);
1386DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0106, qcom_fixup_class);
1387DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0107, qcom_fixup_class);
1388DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0302, qcom_fixup_class);
1389DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x1000, qcom_fixup_class);
1390DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x1001, qcom_fixup_class);
David Brazdil0f672f62019-12-10 10:32:29 +00001391
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001392static struct platform_driver qcom_pcie_driver = {
1393 .probe = qcom_pcie_probe,
1394 .driver = {
1395 .name = "qcom-pcie",
1396 .suppress_bind_attrs = true,
1397 .of_match_table = qcom_pcie_match,
1398 },
1399};
1400builtin_platform_driver(qcom_pcie_driver);