blob: 5fbd80908a99a5d1efa9632dbc8792e129bfb223 [file] [log] [blame]
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001// SPDX-License-Identifier: GPL-2.0
2/*
3 * Qualcomm PCIe root complex driver
4 *
5 * Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
6 * Copyright 2015 Linaro Limited.
7 *
8 * Author: Stanimir Varbanov <svarbanov@mm-sol.com>
9 */
10
11#include <linux/clk.h>
12#include <linux/delay.h>
13#include <linux/gpio/consumer.h>
14#include <linux/interrupt.h>
15#include <linux/io.h>
16#include <linux/iopoll.h>
17#include <linux/kernel.h>
18#include <linux/init.h>
19#include <linux/of_device.h>
20#include <linux/of_gpio.h>
21#include <linux/pci.h>
22#include <linux/pm_runtime.h>
23#include <linux/platform_device.h>
24#include <linux/phy/phy.h>
25#include <linux/regulator/consumer.h>
26#include <linux/reset.h>
27#include <linux/slab.h>
28#include <linux/types.h>
29
Olivier Deprez157378f2022-04-04 15:47:50 +020030#include "../../pci.h"
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000031#include "pcie-designware.h"
32
33#define PCIE20_PARF_SYS_CTRL 0x00
34#define MST_WAKEUP_EN BIT(13)
35#define SLV_WAKEUP_EN BIT(12)
36#define MSTR_ACLK_CGC_DIS BIT(10)
37#define SLV_ACLK_CGC_DIS BIT(9)
38#define CORE_CLK_CGC_DIS BIT(6)
39#define AUX_PWR_DET BIT(4)
40#define L23_CLK_RMV_DIS BIT(2)
41#define L1_CLK_RMV_DIS BIT(1)
42
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000043#define PCIE20_PARF_PHY_CTRL 0x40
Olivier Deprez0e641232021-09-23 10:07:05 +020044#define PHY_CTRL_PHY_TX0_TERM_OFFSET_MASK GENMASK(20, 16)
45#define PHY_CTRL_PHY_TX0_TERM_OFFSET(x) ((x) << 16)
46
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000047#define PCIE20_PARF_PHY_REFCLK 0x4C
Olivier Deprez0e641232021-09-23 10:07:05 +020048#define PHY_REFCLK_SSP_EN BIT(16)
49#define PHY_REFCLK_USE_PAD BIT(12)
50
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000051#define PCIE20_PARF_DBI_BASE_ADDR 0x168
52#define PCIE20_PARF_SLV_ADDR_SPACE_SIZE 0x16C
53#define PCIE20_PARF_MHI_CLOCK_RESET_CTRL 0x174
54#define PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT 0x178
55#define PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2 0x1A8
56#define PCIE20_PARF_LTSSM 0x1B0
57#define PCIE20_PARF_SID_OFFSET 0x234
58#define PCIE20_PARF_BDF_TRANSLATE_CFG 0x24C
Olivier Deprez157378f2022-04-04 15:47:50 +020059#define PCIE20_PARF_DEVICE_TYPE 0x1000
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000060
61#define PCIE20_ELBI_SYS_CTRL 0x04
62#define PCIE20_ELBI_SYS_CTRL_LT_ENABLE BIT(0)
63
64#define PCIE20_AXI_MSTR_RESP_COMP_CTRL0 0x818
65#define CFG_REMOTE_RD_REQ_BRIDGE_SIZE_2K 0x4
66#define CFG_REMOTE_RD_REQ_BRIDGE_SIZE_4K 0x5
67#define PCIE20_AXI_MSTR_RESP_COMP_CTRL1 0x81c
68#define CFG_BRIDGE_SB_INIT BIT(0)
69
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000070#define PCIE_CAP_LINK1_VAL 0x2FD7F
71
72#define PCIE20_PARF_Q2A_FLUSH 0x1AC
73
74#define PCIE20_MISC_CONTROL_1_REG 0x8BC
75#define DBI_RO_WR_EN 1
76
77#define PERST_DELAY_US 1000
Olivier Deprez0e641232021-09-23 10:07:05 +020078/* PARF registers */
79#define PCIE20_PARF_PCS_DEEMPH 0x34
80#define PCS_DEEMPH_TX_DEEMPH_GEN1(x) ((x) << 16)
81#define PCS_DEEMPH_TX_DEEMPH_GEN2_3_5DB(x) ((x) << 8)
82#define PCS_DEEMPH_TX_DEEMPH_GEN2_6DB(x) ((x) << 0)
83
84#define PCIE20_PARF_PCS_SWING 0x38
85#define PCS_SWING_TX_SWING_FULL(x) ((x) << 8)
86#define PCS_SWING_TX_SWING_LOW(x) ((x) << 0)
87
88#define PCIE20_PARF_CONFIG_BITS 0x50
89#define PHY_RX0_EQ(x) ((x) << 24)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000090
91#define PCIE20_v3_PARF_SLV_ADDR_SPACE_SIZE 0x358
92#define SLV_ADDR_SPACE_SZ 0x10000000
93
Olivier Deprez157378f2022-04-04 15:47:50 +020094#define PCIE20_LNK_CONTROL2_LINK_STATUS2 0xa0
95
96#define DEVICE_TYPE_RC 0x4
97
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000098#define QCOM_PCIE_2_1_0_MAX_SUPPLY 3
Olivier Deprez157378f2022-04-04 15:47:50 +020099#define QCOM_PCIE_2_1_0_MAX_CLOCKS 5
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000100struct qcom_pcie_resources_2_1_0 {
Olivier Deprez157378f2022-04-04 15:47:50 +0200101 struct clk_bulk_data clks[QCOM_PCIE_2_1_0_MAX_CLOCKS];
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000102 struct reset_control *pci_reset;
103 struct reset_control *axi_reset;
104 struct reset_control *ahb_reset;
105 struct reset_control *por_reset;
106 struct reset_control *phy_reset;
Olivier Deprez0e641232021-09-23 10:07:05 +0200107 struct reset_control *ext_reset;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000108 struct regulator_bulk_data supplies[QCOM_PCIE_2_1_0_MAX_SUPPLY];
109};
110
111struct qcom_pcie_resources_1_0_0 {
112 struct clk *iface;
113 struct clk *aux;
114 struct clk *master_bus;
115 struct clk *slave_bus;
116 struct reset_control *core;
117 struct regulator *vdda;
118};
119
120#define QCOM_PCIE_2_3_2_MAX_SUPPLY 2
121struct qcom_pcie_resources_2_3_2 {
122 struct clk *aux_clk;
123 struct clk *master_clk;
124 struct clk *slave_clk;
125 struct clk *cfg_clk;
126 struct clk *pipe_clk;
127 struct regulator_bulk_data supplies[QCOM_PCIE_2_3_2_MAX_SUPPLY];
128};
129
David Brazdil0f672f62019-12-10 10:32:29 +0000130#define QCOM_PCIE_2_4_0_MAX_CLOCKS 4
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000131struct qcom_pcie_resources_2_4_0 {
David Brazdil0f672f62019-12-10 10:32:29 +0000132 struct clk_bulk_data clks[QCOM_PCIE_2_4_0_MAX_CLOCKS];
133 int num_clks;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000134 struct reset_control *axi_m_reset;
135 struct reset_control *axi_s_reset;
136 struct reset_control *pipe_reset;
137 struct reset_control *axi_m_vmid_reset;
138 struct reset_control *axi_s_xpu_reset;
139 struct reset_control *parf_reset;
140 struct reset_control *phy_reset;
141 struct reset_control *axi_m_sticky_reset;
142 struct reset_control *pipe_sticky_reset;
143 struct reset_control *pwr_reset;
144 struct reset_control *ahb_reset;
145 struct reset_control *phy_ahb_reset;
146};
147
148struct qcom_pcie_resources_2_3_3 {
149 struct clk *iface;
150 struct clk *axi_m_clk;
151 struct clk *axi_s_clk;
152 struct clk *ahb_clk;
153 struct clk *aux_clk;
154 struct reset_control *rst[7];
155};
156
Olivier Deprez157378f2022-04-04 15:47:50 +0200157struct qcom_pcie_resources_2_7_0 {
158 struct clk_bulk_data clks[6];
159 struct regulator_bulk_data supplies[2];
160 struct reset_control *pci_reset;
161 struct clk *pipe_clk;
162};
163
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000164union qcom_pcie_resources {
165 struct qcom_pcie_resources_1_0_0 v1_0_0;
166 struct qcom_pcie_resources_2_1_0 v2_1_0;
167 struct qcom_pcie_resources_2_3_2 v2_3_2;
168 struct qcom_pcie_resources_2_3_3 v2_3_3;
169 struct qcom_pcie_resources_2_4_0 v2_4_0;
Olivier Deprez157378f2022-04-04 15:47:50 +0200170 struct qcom_pcie_resources_2_7_0 v2_7_0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000171};
172
173struct qcom_pcie;
174
175struct qcom_pcie_ops {
176 int (*get_resources)(struct qcom_pcie *pcie);
177 int (*init)(struct qcom_pcie *pcie);
178 int (*post_init)(struct qcom_pcie *pcie);
179 void (*deinit)(struct qcom_pcie *pcie);
180 void (*post_deinit)(struct qcom_pcie *pcie);
181 void (*ltssm_enable)(struct qcom_pcie *pcie);
182};
183
184struct qcom_pcie {
185 struct dw_pcie *pci;
186 void __iomem *parf; /* DT parf */
187 void __iomem *elbi; /* DT elbi */
188 union qcom_pcie_resources res;
189 struct phy *phy;
190 struct gpio_desc *reset;
191 const struct qcom_pcie_ops *ops;
192};
193
194#define to_qcom_pcie(x) dev_get_drvdata((x)->dev)
195
196static void qcom_ep_reset_assert(struct qcom_pcie *pcie)
197{
198 gpiod_set_value_cansleep(pcie->reset, 1);
199 usleep_range(PERST_DELAY_US, PERST_DELAY_US + 500);
200}
201
202static void qcom_ep_reset_deassert(struct qcom_pcie *pcie)
203{
David Brazdil0f672f62019-12-10 10:32:29 +0000204 /* Ensure that PERST has been asserted for at least 100 ms */
205 msleep(100);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000206 gpiod_set_value_cansleep(pcie->reset, 0);
207 usleep_range(PERST_DELAY_US, PERST_DELAY_US + 500);
208}
209
210static int qcom_pcie_establish_link(struct qcom_pcie *pcie)
211{
212 struct dw_pcie *pci = pcie->pci;
213
214 if (dw_pcie_link_up(pci))
215 return 0;
216
217 /* Enable Link Training state machine */
218 if (pcie->ops->ltssm_enable)
219 pcie->ops->ltssm_enable(pcie);
220
221 return dw_pcie_wait_for_link(pci);
222}
223
224static void qcom_pcie_2_1_0_ltssm_enable(struct qcom_pcie *pcie)
225{
226 u32 val;
227
228 /* enable link training */
229 val = readl(pcie->elbi + PCIE20_ELBI_SYS_CTRL);
230 val |= PCIE20_ELBI_SYS_CTRL_LT_ENABLE;
231 writel(val, pcie->elbi + PCIE20_ELBI_SYS_CTRL);
232}
233
234static int qcom_pcie_get_resources_2_1_0(struct qcom_pcie *pcie)
235{
236 struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0;
237 struct dw_pcie *pci = pcie->pci;
238 struct device *dev = pci->dev;
239 int ret;
240
241 res->supplies[0].supply = "vdda";
242 res->supplies[1].supply = "vdda_phy";
243 res->supplies[2].supply = "vdda_refclk";
244 ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(res->supplies),
245 res->supplies);
246 if (ret)
247 return ret;
248
Olivier Deprez157378f2022-04-04 15:47:50 +0200249 res->clks[0].id = "iface";
250 res->clks[1].id = "core";
251 res->clks[2].id = "phy";
252 res->clks[3].id = "aux";
253 res->clks[4].id = "ref";
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000254
Olivier Deprez157378f2022-04-04 15:47:50 +0200255 /* iface, core, phy are required */
256 ret = devm_clk_bulk_get(dev, 3, res->clks);
257 if (ret < 0)
258 return ret;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000259
Olivier Deprez157378f2022-04-04 15:47:50 +0200260 /* aux, ref are optional */
261 ret = devm_clk_bulk_get_optional(dev, 2, res->clks + 3);
262 if (ret < 0)
263 return ret;
Olivier Deprez0e641232021-09-23 10:07:05 +0200264
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000265 res->pci_reset = devm_reset_control_get_exclusive(dev, "pci");
266 if (IS_ERR(res->pci_reset))
267 return PTR_ERR(res->pci_reset);
268
269 res->axi_reset = devm_reset_control_get_exclusive(dev, "axi");
270 if (IS_ERR(res->axi_reset))
271 return PTR_ERR(res->axi_reset);
272
273 res->ahb_reset = devm_reset_control_get_exclusive(dev, "ahb");
274 if (IS_ERR(res->ahb_reset))
275 return PTR_ERR(res->ahb_reset);
276
277 res->por_reset = devm_reset_control_get_exclusive(dev, "por");
278 if (IS_ERR(res->por_reset))
279 return PTR_ERR(res->por_reset);
280
Olivier Deprez0e641232021-09-23 10:07:05 +0200281 res->ext_reset = devm_reset_control_get_optional_exclusive(dev, "ext");
282 if (IS_ERR(res->ext_reset))
283 return PTR_ERR(res->ext_reset);
284
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000285 res->phy_reset = devm_reset_control_get_exclusive(dev, "phy");
286 return PTR_ERR_OR_ZERO(res->phy_reset);
287}
288
289static void qcom_pcie_deinit_2_1_0(struct qcom_pcie *pcie)
290{
291 struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0;
292
Olivier Deprez157378f2022-04-04 15:47:50 +0200293 clk_bulk_disable_unprepare(ARRAY_SIZE(res->clks), res->clks);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000294 reset_control_assert(res->pci_reset);
295 reset_control_assert(res->axi_reset);
296 reset_control_assert(res->ahb_reset);
297 reset_control_assert(res->por_reset);
Olivier Deprez0e641232021-09-23 10:07:05 +0200298 reset_control_assert(res->ext_reset);
299 reset_control_assert(res->phy_reset);
Olivier Deprez0e641232021-09-23 10:07:05 +0200300
301 writel(1, pcie->parf + PCIE20_PARF_PHY_CTRL);
302
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000303 regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
304}
305
306static int qcom_pcie_init_2_1_0(struct qcom_pcie *pcie)
307{
308 struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0;
309 struct dw_pcie *pci = pcie->pci;
310 struct device *dev = pci->dev;
Olivier Deprez0e641232021-09-23 10:07:05 +0200311 struct device_node *node = dev->of_node;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000312 u32 val;
313 int ret;
314
Olivier Deprez0e641232021-09-23 10:07:05 +0200315 /* reset the PCIe interface as uboot can leave it undefined state */
316 reset_control_assert(res->pci_reset);
317 reset_control_assert(res->axi_reset);
318 reset_control_assert(res->ahb_reset);
319 reset_control_assert(res->por_reset);
320 reset_control_assert(res->ext_reset);
321 reset_control_assert(res->phy_reset);
322
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000323 ret = regulator_bulk_enable(ARRAY_SIZE(res->supplies), res->supplies);
324 if (ret < 0) {
325 dev_err(dev, "cannot enable regulators\n");
326 return ret;
327 }
328
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000329 ret = reset_control_deassert(res->ahb_reset);
330 if (ret) {
331 dev_err(dev, "cannot deassert ahb reset\n");
332 goto err_deassert_ahb;
333 }
334
Olivier Deprez0e641232021-09-23 10:07:05 +0200335 ret = reset_control_deassert(res->ext_reset);
336 if (ret) {
337 dev_err(dev, "cannot deassert ext reset\n");
Olivier Deprez157378f2022-04-04 15:47:50 +0200338 goto err_deassert_ext;
Olivier Deprez0e641232021-09-23 10:07:05 +0200339 }
340
Olivier Deprez157378f2022-04-04 15:47:50 +0200341 ret = reset_control_deassert(res->phy_reset);
342 if (ret) {
343 dev_err(dev, "cannot deassert phy reset\n");
344 goto err_deassert_phy;
345 }
346
347 ret = reset_control_deassert(res->pci_reset);
348 if (ret) {
349 dev_err(dev, "cannot deassert pci reset\n");
350 goto err_deassert_pci;
351 }
352
353 ret = reset_control_deassert(res->por_reset);
354 if (ret) {
355 dev_err(dev, "cannot deassert por reset\n");
356 goto err_deassert_por;
357 }
358
359 ret = reset_control_deassert(res->axi_reset);
360 if (ret) {
361 dev_err(dev, "cannot deassert axi reset\n");
362 goto err_deassert_axi;
363 }
364
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000365 /* enable PCIe clocks and resets */
366 val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL);
367 val &= ~BIT(0);
368 writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL);
369
Olivier Deprez92d4c212022-12-06 15:05:30 +0100370 ret = clk_bulk_prepare_enable(ARRAY_SIZE(res->clks), res->clks);
371 if (ret)
372 goto err_clks;
373
Olivier Deprez157378f2022-04-04 15:47:50 +0200374 if (of_device_is_compatible(node, "qcom,pcie-ipq8064") ||
375 of_device_is_compatible(node, "qcom,pcie-ipq8064-v2")) {
Olivier Deprez0e641232021-09-23 10:07:05 +0200376 writel(PCS_DEEMPH_TX_DEEMPH_GEN1(24) |
377 PCS_DEEMPH_TX_DEEMPH_GEN2_3_5DB(24) |
378 PCS_DEEMPH_TX_DEEMPH_GEN2_6DB(34),
379 pcie->parf + PCIE20_PARF_PCS_DEEMPH);
380 writel(PCS_SWING_TX_SWING_FULL(120) |
381 PCS_SWING_TX_SWING_LOW(120),
382 pcie->parf + PCIE20_PARF_PCS_SWING);
383 writel(PHY_RX0_EQ(4), pcie->parf + PCIE20_PARF_CONFIG_BITS);
384 }
385
386 if (of_device_is_compatible(node, "qcom,pcie-ipq8064")) {
387 /* set TX termination offset */
388 val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL);
389 val &= ~PHY_CTRL_PHY_TX0_TERM_OFFSET_MASK;
390 val |= PHY_CTRL_PHY_TX0_TERM_OFFSET(7);
391 writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL);
392 }
393
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000394 /* enable external reference clock */
395 val = readl(pcie->parf + PCIE20_PARF_PHY_REFCLK);
Olivier Deprez0e641232021-09-23 10:07:05 +0200396 /* USE_PAD is required only for ipq806x */
397 if (!of_device_is_compatible(node, "qcom,pcie-apq8064"))
398 val &= ~PHY_REFCLK_USE_PAD;
399 val |= PHY_REFCLK_SSP_EN;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000400 writel(val, pcie->parf + PCIE20_PARF_PHY_REFCLK);
401
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000402 /* wait for clock acquisition */
403 usleep_range(1000, 1500);
404
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000405 /* Set the Max TLP size to 2K, instead of using default of 4K */
406 writel(CFG_REMOTE_RD_REQ_BRIDGE_SIZE_2K,
407 pci->dbi_base + PCIE20_AXI_MSTR_RESP_COMP_CTRL0);
408 writel(CFG_BRIDGE_SB_INIT,
409 pci->dbi_base + PCIE20_AXI_MSTR_RESP_COMP_CTRL1);
410
411 return 0;
412
Olivier Deprez157378f2022-04-04 15:47:50 +0200413err_clks:
414 reset_control_assert(res->axi_reset);
415err_deassert_axi:
416 reset_control_assert(res->por_reset);
417err_deassert_por:
418 reset_control_assert(res->pci_reset);
419err_deassert_pci:
420 reset_control_assert(res->phy_reset);
421err_deassert_phy:
422 reset_control_assert(res->ext_reset);
423err_deassert_ext:
424 reset_control_assert(res->ahb_reset);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000425err_deassert_ahb:
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000426 regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
427
428 return ret;
429}
430
431static int qcom_pcie_get_resources_1_0_0(struct qcom_pcie *pcie)
432{
433 struct qcom_pcie_resources_1_0_0 *res = &pcie->res.v1_0_0;
434 struct dw_pcie *pci = pcie->pci;
435 struct device *dev = pci->dev;
436
437 res->vdda = devm_regulator_get(dev, "vdda");
438 if (IS_ERR(res->vdda))
439 return PTR_ERR(res->vdda);
440
441 res->iface = devm_clk_get(dev, "iface");
442 if (IS_ERR(res->iface))
443 return PTR_ERR(res->iface);
444
445 res->aux = devm_clk_get(dev, "aux");
446 if (IS_ERR(res->aux))
447 return PTR_ERR(res->aux);
448
449 res->master_bus = devm_clk_get(dev, "master_bus");
450 if (IS_ERR(res->master_bus))
451 return PTR_ERR(res->master_bus);
452
453 res->slave_bus = devm_clk_get(dev, "slave_bus");
454 if (IS_ERR(res->slave_bus))
455 return PTR_ERR(res->slave_bus);
456
457 res->core = devm_reset_control_get_exclusive(dev, "core");
458 return PTR_ERR_OR_ZERO(res->core);
459}
460
461static void qcom_pcie_deinit_1_0_0(struct qcom_pcie *pcie)
462{
463 struct qcom_pcie_resources_1_0_0 *res = &pcie->res.v1_0_0;
464
465 reset_control_assert(res->core);
466 clk_disable_unprepare(res->slave_bus);
467 clk_disable_unprepare(res->master_bus);
468 clk_disable_unprepare(res->iface);
469 clk_disable_unprepare(res->aux);
470 regulator_disable(res->vdda);
471}
472
473static int qcom_pcie_init_1_0_0(struct qcom_pcie *pcie)
474{
475 struct qcom_pcie_resources_1_0_0 *res = &pcie->res.v1_0_0;
476 struct dw_pcie *pci = pcie->pci;
477 struct device *dev = pci->dev;
478 int ret;
479
480 ret = reset_control_deassert(res->core);
481 if (ret) {
482 dev_err(dev, "cannot deassert core reset\n");
483 return ret;
484 }
485
486 ret = clk_prepare_enable(res->aux);
487 if (ret) {
488 dev_err(dev, "cannot prepare/enable aux clock\n");
489 goto err_res;
490 }
491
492 ret = clk_prepare_enable(res->iface);
493 if (ret) {
494 dev_err(dev, "cannot prepare/enable iface clock\n");
495 goto err_aux;
496 }
497
498 ret = clk_prepare_enable(res->master_bus);
499 if (ret) {
500 dev_err(dev, "cannot prepare/enable master_bus clock\n");
501 goto err_iface;
502 }
503
504 ret = clk_prepare_enable(res->slave_bus);
505 if (ret) {
506 dev_err(dev, "cannot prepare/enable slave_bus clock\n");
507 goto err_master;
508 }
509
510 ret = regulator_enable(res->vdda);
511 if (ret) {
512 dev_err(dev, "cannot enable vdda regulator\n");
513 goto err_slave;
514 }
515
516 /* change DBI base address */
517 writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR);
518
519 if (IS_ENABLED(CONFIG_PCI_MSI)) {
520 u32 val = readl(pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT);
521
522 val |= BIT(31);
523 writel(val, pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT);
524 }
525
526 return 0;
527err_slave:
528 clk_disable_unprepare(res->slave_bus);
529err_master:
530 clk_disable_unprepare(res->master_bus);
531err_iface:
532 clk_disable_unprepare(res->iface);
533err_aux:
534 clk_disable_unprepare(res->aux);
535err_res:
536 reset_control_assert(res->core);
537
538 return ret;
539}
540
541static void qcom_pcie_2_3_2_ltssm_enable(struct qcom_pcie *pcie)
542{
543 u32 val;
544
545 /* enable link training */
546 val = readl(pcie->parf + PCIE20_PARF_LTSSM);
547 val |= BIT(8);
548 writel(val, pcie->parf + PCIE20_PARF_LTSSM);
549}
550
551static int qcom_pcie_get_resources_2_3_2(struct qcom_pcie *pcie)
552{
553 struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2;
554 struct dw_pcie *pci = pcie->pci;
555 struct device *dev = pci->dev;
556 int ret;
557
558 res->supplies[0].supply = "vdda";
559 res->supplies[1].supply = "vddpe-3v3";
560 ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(res->supplies),
561 res->supplies);
562 if (ret)
563 return ret;
564
565 res->aux_clk = devm_clk_get(dev, "aux");
566 if (IS_ERR(res->aux_clk))
567 return PTR_ERR(res->aux_clk);
568
569 res->cfg_clk = devm_clk_get(dev, "cfg");
570 if (IS_ERR(res->cfg_clk))
571 return PTR_ERR(res->cfg_clk);
572
573 res->master_clk = devm_clk_get(dev, "bus_master");
574 if (IS_ERR(res->master_clk))
575 return PTR_ERR(res->master_clk);
576
577 res->slave_clk = devm_clk_get(dev, "bus_slave");
578 if (IS_ERR(res->slave_clk))
579 return PTR_ERR(res->slave_clk);
580
581 res->pipe_clk = devm_clk_get(dev, "pipe");
582 return PTR_ERR_OR_ZERO(res->pipe_clk);
583}
584
585static void qcom_pcie_deinit_2_3_2(struct qcom_pcie *pcie)
586{
587 struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2;
588
589 clk_disable_unprepare(res->slave_clk);
590 clk_disable_unprepare(res->master_clk);
591 clk_disable_unprepare(res->cfg_clk);
592 clk_disable_unprepare(res->aux_clk);
593
594 regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
595}
596
597static void qcom_pcie_post_deinit_2_3_2(struct qcom_pcie *pcie)
598{
599 struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2;
600
601 clk_disable_unprepare(res->pipe_clk);
602}
603
604static int qcom_pcie_init_2_3_2(struct qcom_pcie *pcie)
605{
606 struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2;
607 struct dw_pcie *pci = pcie->pci;
608 struct device *dev = pci->dev;
609 u32 val;
610 int ret;
611
612 ret = regulator_bulk_enable(ARRAY_SIZE(res->supplies), res->supplies);
613 if (ret < 0) {
614 dev_err(dev, "cannot enable regulators\n");
615 return ret;
616 }
617
618 ret = clk_prepare_enable(res->aux_clk);
619 if (ret) {
620 dev_err(dev, "cannot prepare/enable aux clock\n");
621 goto err_aux_clk;
622 }
623
624 ret = clk_prepare_enable(res->cfg_clk);
625 if (ret) {
626 dev_err(dev, "cannot prepare/enable cfg clock\n");
627 goto err_cfg_clk;
628 }
629
630 ret = clk_prepare_enable(res->master_clk);
631 if (ret) {
632 dev_err(dev, "cannot prepare/enable master clock\n");
633 goto err_master_clk;
634 }
635
636 ret = clk_prepare_enable(res->slave_clk);
637 if (ret) {
638 dev_err(dev, "cannot prepare/enable slave clock\n");
639 goto err_slave_clk;
640 }
641
642 /* enable PCIe clocks and resets */
643 val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL);
644 val &= ~BIT(0);
645 writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL);
646
647 /* change DBI base address */
648 writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR);
649
650 /* MAC PHY_POWERDOWN MUX DISABLE */
651 val = readl(pcie->parf + PCIE20_PARF_SYS_CTRL);
652 val &= ~BIT(29);
653 writel(val, pcie->parf + PCIE20_PARF_SYS_CTRL);
654
655 val = readl(pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL);
656 val |= BIT(4);
657 writel(val, pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL);
658
659 val = readl(pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2);
660 val |= BIT(31);
661 writel(val, pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2);
662
663 return 0;
664
665err_slave_clk:
666 clk_disable_unprepare(res->master_clk);
667err_master_clk:
668 clk_disable_unprepare(res->cfg_clk);
669err_cfg_clk:
670 clk_disable_unprepare(res->aux_clk);
671
672err_aux_clk:
673 regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
674
675 return ret;
676}
677
678static int qcom_pcie_post_init_2_3_2(struct qcom_pcie *pcie)
679{
680 struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2;
681 struct dw_pcie *pci = pcie->pci;
682 struct device *dev = pci->dev;
683 int ret;
684
685 ret = clk_prepare_enable(res->pipe_clk);
686 if (ret) {
687 dev_err(dev, "cannot prepare/enable pipe clock\n");
688 return ret;
689 }
690
691 return 0;
692}
693
694static int qcom_pcie_get_resources_2_4_0(struct qcom_pcie *pcie)
695{
696 struct qcom_pcie_resources_2_4_0 *res = &pcie->res.v2_4_0;
697 struct dw_pcie *pci = pcie->pci;
698 struct device *dev = pci->dev;
David Brazdil0f672f62019-12-10 10:32:29 +0000699 bool is_ipq = of_device_is_compatible(dev->of_node, "qcom,pcie-ipq4019");
700 int ret;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000701
David Brazdil0f672f62019-12-10 10:32:29 +0000702 res->clks[0].id = "aux";
703 res->clks[1].id = "master_bus";
704 res->clks[2].id = "slave_bus";
705 res->clks[3].id = "iface";
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000706
David Brazdil0f672f62019-12-10 10:32:29 +0000707 /* qcom,pcie-ipq4019 is defined without "iface" */
708 res->num_clks = is_ipq ? 3 : 4;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000709
David Brazdil0f672f62019-12-10 10:32:29 +0000710 ret = devm_clk_bulk_get(dev, res->num_clks, res->clks);
711 if (ret < 0)
712 return ret;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000713
714 res->axi_m_reset = devm_reset_control_get_exclusive(dev, "axi_m");
715 if (IS_ERR(res->axi_m_reset))
716 return PTR_ERR(res->axi_m_reset);
717
718 res->axi_s_reset = devm_reset_control_get_exclusive(dev, "axi_s");
719 if (IS_ERR(res->axi_s_reset))
720 return PTR_ERR(res->axi_s_reset);
721
David Brazdil0f672f62019-12-10 10:32:29 +0000722 if (is_ipq) {
723 /*
724 * These resources relates to the PHY or are secure clocks, but
725 * are controlled here for IPQ4019
726 */
727 res->pipe_reset = devm_reset_control_get_exclusive(dev, "pipe");
728 if (IS_ERR(res->pipe_reset))
729 return PTR_ERR(res->pipe_reset);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000730
David Brazdil0f672f62019-12-10 10:32:29 +0000731 res->axi_m_vmid_reset = devm_reset_control_get_exclusive(dev,
732 "axi_m_vmid");
733 if (IS_ERR(res->axi_m_vmid_reset))
734 return PTR_ERR(res->axi_m_vmid_reset);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000735
David Brazdil0f672f62019-12-10 10:32:29 +0000736 res->axi_s_xpu_reset = devm_reset_control_get_exclusive(dev,
737 "axi_s_xpu");
738 if (IS_ERR(res->axi_s_xpu_reset))
739 return PTR_ERR(res->axi_s_xpu_reset);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000740
David Brazdil0f672f62019-12-10 10:32:29 +0000741 res->parf_reset = devm_reset_control_get_exclusive(dev, "parf");
742 if (IS_ERR(res->parf_reset))
743 return PTR_ERR(res->parf_reset);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000744
David Brazdil0f672f62019-12-10 10:32:29 +0000745 res->phy_reset = devm_reset_control_get_exclusive(dev, "phy");
746 if (IS_ERR(res->phy_reset))
747 return PTR_ERR(res->phy_reset);
748 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000749
750 res->axi_m_sticky_reset = devm_reset_control_get_exclusive(dev,
751 "axi_m_sticky");
752 if (IS_ERR(res->axi_m_sticky_reset))
753 return PTR_ERR(res->axi_m_sticky_reset);
754
755 res->pipe_sticky_reset = devm_reset_control_get_exclusive(dev,
756 "pipe_sticky");
757 if (IS_ERR(res->pipe_sticky_reset))
758 return PTR_ERR(res->pipe_sticky_reset);
759
760 res->pwr_reset = devm_reset_control_get_exclusive(dev, "pwr");
761 if (IS_ERR(res->pwr_reset))
762 return PTR_ERR(res->pwr_reset);
763
764 res->ahb_reset = devm_reset_control_get_exclusive(dev, "ahb");
765 if (IS_ERR(res->ahb_reset))
766 return PTR_ERR(res->ahb_reset);
767
David Brazdil0f672f62019-12-10 10:32:29 +0000768 if (is_ipq) {
769 res->phy_ahb_reset = devm_reset_control_get_exclusive(dev, "phy_ahb");
770 if (IS_ERR(res->phy_ahb_reset))
771 return PTR_ERR(res->phy_ahb_reset);
772 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000773
774 return 0;
775}
776
777static void qcom_pcie_deinit_2_4_0(struct qcom_pcie *pcie)
778{
779 struct qcom_pcie_resources_2_4_0 *res = &pcie->res.v2_4_0;
780
781 reset_control_assert(res->axi_m_reset);
782 reset_control_assert(res->axi_s_reset);
783 reset_control_assert(res->pipe_reset);
784 reset_control_assert(res->pipe_sticky_reset);
785 reset_control_assert(res->phy_reset);
786 reset_control_assert(res->phy_ahb_reset);
787 reset_control_assert(res->axi_m_sticky_reset);
788 reset_control_assert(res->pwr_reset);
789 reset_control_assert(res->ahb_reset);
David Brazdil0f672f62019-12-10 10:32:29 +0000790 clk_bulk_disable_unprepare(res->num_clks, res->clks);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000791}
792
793static int qcom_pcie_init_2_4_0(struct qcom_pcie *pcie)
794{
795 struct qcom_pcie_resources_2_4_0 *res = &pcie->res.v2_4_0;
796 struct dw_pcie *pci = pcie->pci;
797 struct device *dev = pci->dev;
798 u32 val;
799 int ret;
800
801 ret = reset_control_assert(res->axi_m_reset);
802 if (ret) {
803 dev_err(dev, "cannot assert axi master reset\n");
804 return ret;
805 }
806
807 ret = reset_control_assert(res->axi_s_reset);
808 if (ret) {
809 dev_err(dev, "cannot assert axi slave reset\n");
810 return ret;
811 }
812
813 usleep_range(10000, 12000);
814
815 ret = reset_control_assert(res->pipe_reset);
816 if (ret) {
817 dev_err(dev, "cannot assert pipe reset\n");
818 return ret;
819 }
820
821 ret = reset_control_assert(res->pipe_sticky_reset);
822 if (ret) {
823 dev_err(dev, "cannot assert pipe sticky reset\n");
824 return ret;
825 }
826
827 ret = reset_control_assert(res->phy_reset);
828 if (ret) {
829 dev_err(dev, "cannot assert phy reset\n");
830 return ret;
831 }
832
833 ret = reset_control_assert(res->phy_ahb_reset);
834 if (ret) {
835 dev_err(dev, "cannot assert phy ahb reset\n");
836 return ret;
837 }
838
839 usleep_range(10000, 12000);
840
841 ret = reset_control_assert(res->axi_m_sticky_reset);
842 if (ret) {
843 dev_err(dev, "cannot assert axi master sticky reset\n");
844 return ret;
845 }
846
847 ret = reset_control_assert(res->pwr_reset);
848 if (ret) {
849 dev_err(dev, "cannot assert power reset\n");
850 return ret;
851 }
852
853 ret = reset_control_assert(res->ahb_reset);
854 if (ret) {
855 dev_err(dev, "cannot assert ahb reset\n");
856 return ret;
857 }
858
859 usleep_range(10000, 12000);
860
861 ret = reset_control_deassert(res->phy_ahb_reset);
862 if (ret) {
863 dev_err(dev, "cannot deassert phy ahb reset\n");
864 return ret;
865 }
866
867 ret = reset_control_deassert(res->phy_reset);
868 if (ret) {
869 dev_err(dev, "cannot deassert phy reset\n");
870 goto err_rst_phy;
871 }
872
873 ret = reset_control_deassert(res->pipe_reset);
874 if (ret) {
875 dev_err(dev, "cannot deassert pipe reset\n");
876 goto err_rst_pipe;
877 }
878
879 ret = reset_control_deassert(res->pipe_sticky_reset);
880 if (ret) {
881 dev_err(dev, "cannot deassert pipe sticky reset\n");
882 goto err_rst_pipe_sticky;
883 }
884
885 usleep_range(10000, 12000);
886
887 ret = reset_control_deassert(res->axi_m_reset);
888 if (ret) {
889 dev_err(dev, "cannot deassert axi master reset\n");
890 goto err_rst_axi_m;
891 }
892
893 ret = reset_control_deassert(res->axi_m_sticky_reset);
894 if (ret) {
895 dev_err(dev, "cannot deassert axi master sticky reset\n");
896 goto err_rst_axi_m_sticky;
897 }
898
899 ret = reset_control_deassert(res->axi_s_reset);
900 if (ret) {
901 dev_err(dev, "cannot deassert axi slave reset\n");
902 goto err_rst_axi_s;
903 }
904
905 ret = reset_control_deassert(res->pwr_reset);
906 if (ret) {
907 dev_err(dev, "cannot deassert power reset\n");
908 goto err_rst_pwr;
909 }
910
911 ret = reset_control_deassert(res->ahb_reset);
912 if (ret) {
913 dev_err(dev, "cannot deassert ahb reset\n");
914 goto err_rst_ahb;
915 }
916
917 usleep_range(10000, 12000);
918
David Brazdil0f672f62019-12-10 10:32:29 +0000919 ret = clk_bulk_prepare_enable(res->num_clks, res->clks);
920 if (ret)
921 goto err_clks;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000922
923 /* enable PCIe clocks and resets */
924 val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL);
925 val &= ~BIT(0);
926 writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL);
927
928 /* change DBI base address */
929 writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR);
930
931 /* MAC PHY_POWERDOWN MUX DISABLE */
932 val = readl(pcie->parf + PCIE20_PARF_SYS_CTRL);
933 val &= ~BIT(29);
934 writel(val, pcie->parf + PCIE20_PARF_SYS_CTRL);
935
936 val = readl(pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL);
937 val |= BIT(4);
938 writel(val, pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL);
939
940 val = readl(pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2);
941 val |= BIT(31);
942 writel(val, pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2);
943
944 return 0;
945
David Brazdil0f672f62019-12-10 10:32:29 +0000946err_clks:
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000947 reset_control_assert(res->ahb_reset);
948err_rst_ahb:
949 reset_control_assert(res->pwr_reset);
950err_rst_pwr:
951 reset_control_assert(res->axi_s_reset);
952err_rst_axi_s:
953 reset_control_assert(res->axi_m_sticky_reset);
954err_rst_axi_m_sticky:
955 reset_control_assert(res->axi_m_reset);
956err_rst_axi_m:
957 reset_control_assert(res->pipe_sticky_reset);
958err_rst_pipe_sticky:
959 reset_control_assert(res->pipe_reset);
960err_rst_pipe:
961 reset_control_assert(res->phy_reset);
962err_rst_phy:
963 reset_control_assert(res->phy_ahb_reset);
964 return ret;
965}
966
967static int qcom_pcie_get_resources_2_3_3(struct qcom_pcie *pcie)
968{
969 struct qcom_pcie_resources_2_3_3 *res = &pcie->res.v2_3_3;
970 struct dw_pcie *pci = pcie->pci;
971 struct device *dev = pci->dev;
972 int i;
973 const char *rst_names[] = { "axi_m", "axi_s", "pipe",
974 "axi_m_sticky", "sticky",
975 "ahb", "sleep", };
976
977 res->iface = devm_clk_get(dev, "iface");
978 if (IS_ERR(res->iface))
979 return PTR_ERR(res->iface);
980
981 res->axi_m_clk = devm_clk_get(dev, "axi_m");
982 if (IS_ERR(res->axi_m_clk))
983 return PTR_ERR(res->axi_m_clk);
984
985 res->axi_s_clk = devm_clk_get(dev, "axi_s");
986 if (IS_ERR(res->axi_s_clk))
987 return PTR_ERR(res->axi_s_clk);
988
989 res->ahb_clk = devm_clk_get(dev, "ahb");
990 if (IS_ERR(res->ahb_clk))
991 return PTR_ERR(res->ahb_clk);
992
993 res->aux_clk = devm_clk_get(dev, "aux");
994 if (IS_ERR(res->aux_clk))
995 return PTR_ERR(res->aux_clk);
996
997 for (i = 0; i < ARRAY_SIZE(rst_names); i++) {
998 res->rst[i] = devm_reset_control_get(dev, rst_names[i]);
999 if (IS_ERR(res->rst[i]))
1000 return PTR_ERR(res->rst[i]);
1001 }
1002
1003 return 0;
1004}
1005
1006static void qcom_pcie_deinit_2_3_3(struct qcom_pcie *pcie)
1007{
1008 struct qcom_pcie_resources_2_3_3 *res = &pcie->res.v2_3_3;
1009
1010 clk_disable_unprepare(res->iface);
1011 clk_disable_unprepare(res->axi_m_clk);
1012 clk_disable_unprepare(res->axi_s_clk);
1013 clk_disable_unprepare(res->ahb_clk);
1014 clk_disable_unprepare(res->aux_clk);
1015}
1016
1017static int qcom_pcie_init_2_3_3(struct qcom_pcie *pcie)
1018{
1019 struct qcom_pcie_resources_2_3_3 *res = &pcie->res.v2_3_3;
1020 struct dw_pcie *pci = pcie->pci;
1021 struct device *dev = pci->dev;
Olivier Deprez157378f2022-04-04 15:47:50 +02001022 u16 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001023 int i, ret;
1024 u32 val;
1025
1026 for (i = 0; i < ARRAY_SIZE(res->rst); i++) {
1027 ret = reset_control_assert(res->rst[i]);
1028 if (ret) {
1029 dev_err(dev, "reset #%d assert failed (%d)\n", i, ret);
1030 return ret;
1031 }
1032 }
1033
1034 usleep_range(2000, 2500);
1035
1036 for (i = 0; i < ARRAY_SIZE(res->rst); i++) {
1037 ret = reset_control_deassert(res->rst[i]);
1038 if (ret) {
1039 dev_err(dev, "reset #%d deassert failed (%d)\n", i,
1040 ret);
1041 return ret;
1042 }
1043 }
1044
1045 /*
1046 * Don't have a way to see if the reset has completed.
1047 * Wait for some time.
1048 */
1049 usleep_range(2000, 2500);
1050
1051 ret = clk_prepare_enable(res->iface);
1052 if (ret) {
1053 dev_err(dev, "cannot prepare/enable core clock\n");
1054 goto err_clk_iface;
1055 }
1056
1057 ret = clk_prepare_enable(res->axi_m_clk);
1058 if (ret) {
1059 dev_err(dev, "cannot prepare/enable core clock\n");
1060 goto err_clk_axi_m;
1061 }
1062
1063 ret = clk_prepare_enable(res->axi_s_clk);
1064 if (ret) {
1065 dev_err(dev, "cannot prepare/enable axi slave clock\n");
1066 goto err_clk_axi_s;
1067 }
1068
1069 ret = clk_prepare_enable(res->ahb_clk);
1070 if (ret) {
1071 dev_err(dev, "cannot prepare/enable ahb clock\n");
1072 goto err_clk_ahb;
1073 }
1074
1075 ret = clk_prepare_enable(res->aux_clk);
1076 if (ret) {
1077 dev_err(dev, "cannot prepare/enable aux clock\n");
1078 goto err_clk_aux;
1079 }
1080
1081 writel(SLV_ADDR_SPACE_SZ,
1082 pcie->parf + PCIE20_v3_PARF_SLV_ADDR_SPACE_SIZE);
1083
1084 val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL);
1085 val &= ~BIT(0);
1086 writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL);
1087
1088 writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR);
1089
1090 writel(MST_WAKEUP_EN | SLV_WAKEUP_EN | MSTR_ACLK_CGC_DIS
1091 | SLV_ACLK_CGC_DIS | CORE_CLK_CGC_DIS |
1092 AUX_PWR_DET | L23_CLK_RMV_DIS | L1_CLK_RMV_DIS,
1093 pcie->parf + PCIE20_PARF_SYS_CTRL);
1094 writel(0, pcie->parf + PCIE20_PARF_Q2A_FLUSH);
1095
Olivier Deprez157378f2022-04-04 15:47:50 +02001096 writel(PCI_COMMAND_MASTER, pci->dbi_base + PCI_COMMAND);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001097 writel(DBI_RO_WR_EN, pci->dbi_base + PCIE20_MISC_CONTROL_1_REG);
Olivier Deprez157378f2022-04-04 15:47:50 +02001098 writel(PCIE_CAP_LINK1_VAL, pci->dbi_base + offset + PCI_EXP_SLTCAP);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001099
Olivier Deprez157378f2022-04-04 15:47:50 +02001100 val = readl(pci->dbi_base + offset + PCI_EXP_LNKCAP);
1101 val &= ~PCI_EXP_LNKCAP_ASPMS;
1102 writel(val, pci->dbi_base + offset + PCI_EXP_LNKCAP);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001103
Olivier Deprez157378f2022-04-04 15:47:50 +02001104 writel(PCI_EXP_DEVCTL2_COMP_TMOUT_DIS, pci->dbi_base + offset +
1105 PCI_EXP_DEVCTL2);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001106
1107 return 0;
1108
1109err_clk_aux:
1110 clk_disable_unprepare(res->ahb_clk);
1111err_clk_ahb:
1112 clk_disable_unprepare(res->axi_s_clk);
1113err_clk_axi_s:
1114 clk_disable_unprepare(res->axi_m_clk);
1115err_clk_axi_m:
1116 clk_disable_unprepare(res->iface);
1117err_clk_iface:
1118 /*
1119 * Not checking for failure, will anyway return
1120 * the original failure in 'ret'.
1121 */
1122 for (i = 0; i < ARRAY_SIZE(res->rst); i++)
1123 reset_control_assert(res->rst[i]);
1124
1125 return ret;
1126}
1127
Olivier Deprez157378f2022-04-04 15:47:50 +02001128static int qcom_pcie_get_resources_2_7_0(struct qcom_pcie *pcie)
1129{
1130 struct qcom_pcie_resources_2_7_0 *res = &pcie->res.v2_7_0;
1131 struct dw_pcie *pci = pcie->pci;
1132 struct device *dev = pci->dev;
1133 int ret;
1134
1135 res->pci_reset = devm_reset_control_get_exclusive(dev, "pci");
1136 if (IS_ERR(res->pci_reset))
1137 return PTR_ERR(res->pci_reset);
1138
1139 res->supplies[0].supply = "vdda";
1140 res->supplies[1].supply = "vddpe-3v3";
1141 ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(res->supplies),
1142 res->supplies);
1143 if (ret)
1144 return ret;
1145
1146 res->clks[0].id = "aux";
1147 res->clks[1].id = "cfg";
1148 res->clks[2].id = "bus_master";
1149 res->clks[3].id = "bus_slave";
1150 res->clks[4].id = "slave_q2a";
1151 res->clks[5].id = "tbu";
1152
1153 ret = devm_clk_bulk_get(dev, ARRAY_SIZE(res->clks), res->clks);
1154 if (ret < 0)
1155 return ret;
1156
1157 res->pipe_clk = devm_clk_get(dev, "pipe");
1158 return PTR_ERR_OR_ZERO(res->pipe_clk);
1159}
1160
1161static int qcom_pcie_init_2_7_0(struct qcom_pcie *pcie)
1162{
1163 struct qcom_pcie_resources_2_7_0 *res = &pcie->res.v2_7_0;
1164 struct dw_pcie *pci = pcie->pci;
1165 struct device *dev = pci->dev;
1166 u32 val;
1167 int ret;
1168
1169 ret = regulator_bulk_enable(ARRAY_SIZE(res->supplies), res->supplies);
1170 if (ret < 0) {
1171 dev_err(dev, "cannot enable regulators\n");
1172 return ret;
1173 }
1174
1175 ret = clk_bulk_prepare_enable(ARRAY_SIZE(res->clks), res->clks);
1176 if (ret < 0)
1177 goto err_disable_regulators;
1178
1179 ret = reset_control_assert(res->pci_reset);
1180 if (ret < 0) {
1181 dev_err(dev, "cannot deassert pci reset\n");
1182 goto err_disable_clocks;
1183 }
1184
1185 usleep_range(1000, 1500);
1186
1187 ret = reset_control_deassert(res->pci_reset);
1188 if (ret < 0) {
1189 dev_err(dev, "cannot deassert pci reset\n");
1190 goto err_disable_clocks;
1191 }
1192
Olivier Deprez157378f2022-04-04 15:47:50 +02001193 /* configure PCIe to RC mode */
1194 writel(DEVICE_TYPE_RC, pcie->parf + PCIE20_PARF_DEVICE_TYPE);
1195
1196 /* enable PCIe clocks and resets */
1197 val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL);
1198 val &= ~BIT(0);
1199 writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL);
1200
1201 /* change DBI base address */
1202 writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR);
1203
1204 /* MAC PHY_POWERDOWN MUX DISABLE */
1205 val = readl(pcie->parf + PCIE20_PARF_SYS_CTRL);
1206 val &= ~BIT(29);
1207 writel(val, pcie->parf + PCIE20_PARF_SYS_CTRL);
1208
1209 val = readl(pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL);
1210 val |= BIT(4);
1211 writel(val, pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL);
1212
1213 if (IS_ENABLED(CONFIG_PCI_MSI)) {
1214 val = readl(pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT);
1215 val |= BIT(31);
1216 writel(val, pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT);
1217 }
1218
1219 return 0;
1220err_disable_clocks:
1221 clk_bulk_disable_unprepare(ARRAY_SIZE(res->clks), res->clks);
1222err_disable_regulators:
1223 regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
1224
1225 return ret;
1226}
1227
1228static void qcom_pcie_deinit_2_7_0(struct qcom_pcie *pcie)
1229{
1230 struct qcom_pcie_resources_2_7_0 *res = &pcie->res.v2_7_0;
1231
1232 clk_bulk_disable_unprepare(ARRAY_SIZE(res->clks), res->clks);
1233 regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
1234}
1235
1236static int qcom_pcie_post_init_2_7_0(struct qcom_pcie *pcie)
1237{
1238 struct qcom_pcie_resources_2_7_0 *res = &pcie->res.v2_7_0;
1239
1240 return clk_prepare_enable(res->pipe_clk);
1241}
1242
1243static void qcom_pcie_post_deinit_2_7_0(struct qcom_pcie *pcie)
1244{
1245 struct qcom_pcie_resources_2_7_0 *res = &pcie->res.v2_7_0;
1246
1247 clk_disable_unprepare(res->pipe_clk);
1248}
1249
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001250static int qcom_pcie_link_up(struct dw_pcie *pci)
1251{
Olivier Deprez157378f2022-04-04 15:47:50 +02001252 u16 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
1253 u16 val = readw(pci->dbi_base + offset + PCI_EXP_LNKSTA);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001254
1255 return !!(val & PCI_EXP_LNKSTA_DLLLA);
1256}
1257
1258static int qcom_pcie_host_init(struct pcie_port *pp)
1259{
1260 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
1261 struct qcom_pcie *pcie = to_qcom_pcie(pci);
1262 int ret;
1263
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001264 qcom_ep_reset_assert(pcie);
1265
1266 ret = pcie->ops->init(pcie);
1267 if (ret)
1268 return ret;
1269
1270 ret = phy_power_on(pcie->phy);
1271 if (ret)
1272 goto err_deinit;
1273
1274 if (pcie->ops->post_init) {
1275 ret = pcie->ops->post_init(pcie);
1276 if (ret)
1277 goto err_disable_phy;
1278 }
1279
1280 dw_pcie_setup_rc(pp);
Olivier Deprez157378f2022-04-04 15:47:50 +02001281 dw_pcie_msi_init(pp);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001282
1283 qcom_ep_reset_deassert(pcie);
1284
1285 ret = qcom_pcie_establish_link(pcie);
1286 if (ret)
1287 goto err;
1288
1289 return 0;
1290err:
1291 qcom_ep_reset_assert(pcie);
1292 if (pcie->ops->post_deinit)
1293 pcie->ops->post_deinit(pcie);
1294err_disable_phy:
1295 phy_power_off(pcie->phy);
1296err_deinit:
1297 pcie->ops->deinit(pcie);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001298
1299 return ret;
1300}
1301
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001302static const struct dw_pcie_host_ops qcom_pcie_dw_ops = {
1303 .host_init = qcom_pcie_host_init,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001304};
1305
1306/* Qcom IP rev.: 2.1.0 Synopsys IP rev.: 4.01a */
1307static const struct qcom_pcie_ops ops_2_1_0 = {
1308 .get_resources = qcom_pcie_get_resources_2_1_0,
1309 .init = qcom_pcie_init_2_1_0,
1310 .deinit = qcom_pcie_deinit_2_1_0,
1311 .ltssm_enable = qcom_pcie_2_1_0_ltssm_enable,
1312};
1313
1314/* Qcom IP rev.: 1.0.0 Synopsys IP rev.: 4.11a */
1315static const struct qcom_pcie_ops ops_1_0_0 = {
1316 .get_resources = qcom_pcie_get_resources_1_0_0,
1317 .init = qcom_pcie_init_1_0_0,
1318 .deinit = qcom_pcie_deinit_1_0_0,
1319 .ltssm_enable = qcom_pcie_2_1_0_ltssm_enable,
1320};
1321
1322/* Qcom IP rev.: 2.3.2 Synopsys IP rev.: 4.21a */
1323static const struct qcom_pcie_ops ops_2_3_2 = {
1324 .get_resources = qcom_pcie_get_resources_2_3_2,
1325 .init = qcom_pcie_init_2_3_2,
1326 .post_init = qcom_pcie_post_init_2_3_2,
1327 .deinit = qcom_pcie_deinit_2_3_2,
1328 .post_deinit = qcom_pcie_post_deinit_2_3_2,
1329 .ltssm_enable = qcom_pcie_2_3_2_ltssm_enable,
1330};
1331
1332/* Qcom IP rev.: 2.4.0 Synopsys IP rev.: 4.20a */
1333static const struct qcom_pcie_ops ops_2_4_0 = {
1334 .get_resources = qcom_pcie_get_resources_2_4_0,
1335 .init = qcom_pcie_init_2_4_0,
1336 .deinit = qcom_pcie_deinit_2_4_0,
1337 .ltssm_enable = qcom_pcie_2_3_2_ltssm_enable,
1338};
1339
1340/* Qcom IP rev.: 2.3.3 Synopsys IP rev.: 4.30a */
1341static const struct qcom_pcie_ops ops_2_3_3 = {
1342 .get_resources = qcom_pcie_get_resources_2_3_3,
1343 .init = qcom_pcie_init_2_3_3,
1344 .deinit = qcom_pcie_deinit_2_3_3,
1345 .ltssm_enable = qcom_pcie_2_3_2_ltssm_enable,
1346};
1347
Olivier Deprez157378f2022-04-04 15:47:50 +02001348/* Qcom IP rev.: 2.7.0 Synopsys IP rev.: 4.30a */
1349static const struct qcom_pcie_ops ops_2_7_0 = {
1350 .get_resources = qcom_pcie_get_resources_2_7_0,
1351 .init = qcom_pcie_init_2_7_0,
1352 .deinit = qcom_pcie_deinit_2_7_0,
1353 .ltssm_enable = qcom_pcie_2_3_2_ltssm_enable,
1354 .post_init = qcom_pcie_post_init_2_7_0,
1355 .post_deinit = qcom_pcie_post_deinit_2_7_0,
1356};
1357
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001358static const struct dw_pcie_ops dw_pcie_ops = {
1359 .link_up = qcom_pcie_link_up,
1360};
1361
1362static int qcom_pcie_probe(struct platform_device *pdev)
1363{
1364 struct device *dev = &pdev->dev;
1365 struct resource *res;
1366 struct pcie_port *pp;
1367 struct dw_pcie *pci;
1368 struct qcom_pcie *pcie;
1369 int ret;
1370
1371 pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
1372 if (!pcie)
1373 return -ENOMEM;
1374
1375 pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL);
1376 if (!pci)
1377 return -ENOMEM;
1378
1379 pm_runtime_enable(dev);
David Brazdil0f672f62019-12-10 10:32:29 +00001380 ret = pm_runtime_get_sync(dev);
Olivier Deprez157378f2022-04-04 15:47:50 +02001381 if (ret < 0)
1382 goto err_pm_runtime_put;
David Brazdil0f672f62019-12-10 10:32:29 +00001383
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001384 pci->dev = dev;
1385 pci->ops = &dw_pcie_ops;
1386 pp = &pci->pp;
1387
1388 pcie->pci = pci;
1389
1390 pcie->ops = of_device_get_match_data(dev);
1391
David Brazdil0f672f62019-12-10 10:32:29 +00001392 pcie->reset = devm_gpiod_get_optional(dev, "perst", GPIOD_OUT_HIGH);
1393 if (IS_ERR(pcie->reset)) {
1394 ret = PTR_ERR(pcie->reset);
1395 goto err_pm_runtime_put;
1396 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001397
Olivier Deprez157378f2022-04-04 15:47:50 +02001398 pcie->parf = devm_platform_ioremap_resource_byname(pdev, "parf");
David Brazdil0f672f62019-12-10 10:32:29 +00001399 if (IS_ERR(pcie->parf)) {
1400 ret = PTR_ERR(pcie->parf);
1401 goto err_pm_runtime_put;
1402 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001403
1404 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi");
1405 pci->dbi_base = devm_pci_remap_cfg_resource(dev, res);
David Brazdil0f672f62019-12-10 10:32:29 +00001406 if (IS_ERR(pci->dbi_base)) {
1407 ret = PTR_ERR(pci->dbi_base);
1408 goto err_pm_runtime_put;
1409 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001410
Olivier Deprez157378f2022-04-04 15:47:50 +02001411 pcie->elbi = devm_platform_ioremap_resource_byname(pdev, "elbi");
David Brazdil0f672f62019-12-10 10:32:29 +00001412 if (IS_ERR(pcie->elbi)) {
1413 ret = PTR_ERR(pcie->elbi);
1414 goto err_pm_runtime_put;
1415 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001416
1417 pcie->phy = devm_phy_optional_get(dev, "pciephy");
David Brazdil0f672f62019-12-10 10:32:29 +00001418 if (IS_ERR(pcie->phy)) {
1419 ret = PTR_ERR(pcie->phy);
1420 goto err_pm_runtime_put;
1421 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001422
1423 ret = pcie->ops->get_resources(pcie);
1424 if (ret)
David Brazdil0f672f62019-12-10 10:32:29 +00001425 goto err_pm_runtime_put;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001426
1427 pp->ops = &qcom_pcie_dw_ops;
1428
1429 if (IS_ENABLED(CONFIG_PCI_MSI)) {
1430 pp->msi_irq = platform_get_irq_byname(pdev, "msi");
David Brazdil0f672f62019-12-10 10:32:29 +00001431 if (pp->msi_irq < 0) {
1432 ret = pp->msi_irq;
1433 goto err_pm_runtime_put;
1434 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001435 }
1436
1437 ret = phy_init(pcie->phy);
Olivier Deprez92d4c212022-12-06 15:05:30 +01001438 if (ret)
David Brazdil0f672f62019-12-10 10:32:29 +00001439 goto err_pm_runtime_put;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001440
1441 platform_set_drvdata(pdev, pcie);
1442
1443 ret = dw_pcie_host_init(pp);
1444 if (ret) {
1445 dev_err(dev, "cannot initialize host\n");
Olivier Deprez92d4c212022-12-06 15:05:30 +01001446 goto err_phy_exit;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001447 }
1448
1449 return 0;
David Brazdil0f672f62019-12-10 10:32:29 +00001450
Olivier Deprez92d4c212022-12-06 15:05:30 +01001451err_phy_exit:
1452 phy_exit(pcie->phy);
David Brazdil0f672f62019-12-10 10:32:29 +00001453err_pm_runtime_put:
1454 pm_runtime_put(dev);
1455 pm_runtime_disable(dev);
1456
1457 return ret;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001458}
1459
1460static const struct of_device_id qcom_pcie_match[] = {
1461 { .compatible = "qcom,pcie-apq8084", .data = &ops_1_0_0 },
1462 { .compatible = "qcom,pcie-ipq8064", .data = &ops_2_1_0 },
Olivier Deprez157378f2022-04-04 15:47:50 +02001463 { .compatible = "qcom,pcie-ipq8064-v2", .data = &ops_2_1_0 },
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001464 { .compatible = "qcom,pcie-apq8064", .data = &ops_2_1_0 },
1465 { .compatible = "qcom,pcie-msm8996", .data = &ops_2_3_2 },
1466 { .compatible = "qcom,pcie-ipq8074", .data = &ops_2_3_3 },
1467 { .compatible = "qcom,pcie-ipq4019", .data = &ops_2_4_0 },
David Brazdil0f672f62019-12-10 10:32:29 +00001468 { .compatible = "qcom,pcie-qcs404", .data = &ops_2_4_0 },
Olivier Deprez157378f2022-04-04 15:47:50 +02001469 { .compatible = "qcom,pcie-sdm845", .data = &ops_2_7_0 },
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001470 { }
1471};
1472
David Brazdil0f672f62019-12-10 10:32:29 +00001473static void qcom_fixup_class(struct pci_dev *dev)
1474{
1475 dev->class = PCI_CLASS_BRIDGE_PCI << 8;
1476}
Olivier Deprez0e641232021-09-23 10:07:05 +02001477DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0101, qcom_fixup_class);
1478DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0104, qcom_fixup_class);
1479DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0106, qcom_fixup_class);
1480DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0107, qcom_fixup_class);
1481DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0302, qcom_fixup_class);
1482DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x1000, qcom_fixup_class);
1483DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x1001, qcom_fixup_class);
David Brazdil0f672f62019-12-10 10:32:29 +00001484
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001485static struct platform_driver qcom_pcie_driver = {
1486 .probe = qcom_pcie_probe,
1487 .driver = {
1488 .name = "qcom-pcie",
1489 .suppress_bind_attrs = true,
1490 .of_match_table = qcom_pcie_match,
1491 },
1492};
1493builtin_platform_driver(qcom_pcie_driver);