blob: 5cf1ef12fb9b05f892cfed3aa84db05e99dc7bcd [file] [log] [blame]
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001// SPDX-License-Identifier: GPL-2.0
2/*
3 * PCIe host controller driver for Freescale i.MX6 SoCs
4 *
5 * Copyright (C) 2013 Kosagi
Olivier Deprez157378f2022-04-04 15:47:50 +02006 * https://www.kosagi.com
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00007 *
8 * Author: Sean Cross <xobs@kosagi.com>
9 */
10
David Brazdil0f672f62019-12-10 10:32:29 +000011#include <linux/bitfield.h>
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000012#include <linux/clk.h>
13#include <linux/delay.h>
14#include <linux/gpio.h>
15#include <linux/kernel.h>
16#include <linux/mfd/syscon.h>
17#include <linux/mfd/syscon/imx6q-iomuxc-gpr.h>
18#include <linux/mfd/syscon/imx7-iomuxc-gpr.h>
19#include <linux/module.h>
20#include <linux/of_gpio.h>
21#include <linux/of_device.h>
David Brazdil0f672f62019-12-10 10:32:29 +000022#include <linux/of_address.h>
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000023#include <linux/pci.h>
24#include <linux/platform_device.h>
25#include <linux/regmap.h>
26#include <linux/regulator/consumer.h>
27#include <linux/resource.h>
28#include <linux/signal.h>
29#include <linux/types.h>
30#include <linux/interrupt.h>
31#include <linux/reset.h>
David Brazdil0f672f62019-12-10 10:32:29 +000032#include <linux/pm_domain.h>
33#include <linux/pm_runtime.h>
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000034
35#include "pcie-designware.h"
36
David Brazdil0f672f62019-12-10 10:32:29 +000037#define IMX8MQ_GPR_PCIE_REF_USE_PAD BIT(9)
38#define IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE_EN BIT(10)
39#define IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE BIT(11)
40#define IMX8MQ_GPR12_PCIE2_CTRL_DEVICE_TYPE GENMASK(11, 8)
41#define IMX8MQ_PCIE2_BASE_ADDR 0x33c00000
42
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000043#define to_imx6_pcie(x) dev_get_drvdata((x)->dev)
44
45enum imx6_pcie_variants {
46 IMX6Q,
47 IMX6SX,
48 IMX6QP,
49 IMX7D,
David Brazdil0f672f62019-12-10 10:32:29 +000050 IMX8MQ,
51};
52
53#define IMX6_PCIE_FLAG_IMX6_PHY BIT(0)
54#define IMX6_PCIE_FLAG_IMX6_SPEED_CHANGE BIT(1)
55#define IMX6_PCIE_FLAG_SUPPORTS_SUSPEND BIT(2)
56
57struct imx6_pcie_drvdata {
58 enum imx6_pcie_variants variant;
59 u32 flags;
60 int dbi_length;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000061};
62
63struct imx6_pcie {
64 struct dw_pcie *pci;
65 int reset_gpio;
66 bool gpio_active_high;
67 struct clk *pcie_bus;
68 struct clk *pcie_phy;
69 struct clk *pcie_inbound_axi;
70 struct clk *pcie;
David Brazdil0f672f62019-12-10 10:32:29 +000071 struct clk *pcie_aux;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000072 struct regmap *iomuxc_gpr;
David Brazdil0f672f62019-12-10 10:32:29 +000073 u32 controller_id;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000074 struct reset_control *pciephy_reset;
75 struct reset_control *apps_reset;
David Brazdil0f672f62019-12-10 10:32:29 +000076 struct reset_control *turnoff_reset;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000077 u32 tx_deemph_gen1;
78 u32 tx_deemph_gen2_3p5db;
79 u32 tx_deemph_gen2_6db;
80 u32 tx_swing_full;
81 u32 tx_swing_low;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000082 struct regulator *vpcie;
David Brazdil0f672f62019-12-10 10:32:29 +000083 void __iomem *phy_base;
84
85 /* power domain for pcie */
86 struct device *pd_pcie;
87 /* power domain for pcie phy */
88 struct device *pd_pcie_phy;
89 const struct imx6_pcie_drvdata *drvdata;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000090};
91
92/* Parameters for the waiting for PCIe PHY PLL to lock on i.MX7 */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000093#define PHY_PLL_LOCK_WAIT_USLEEP_MAX 200
David Brazdil0f672f62019-12-10 10:32:29 +000094#define PHY_PLL_LOCK_WAIT_TIMEOUT (2000 * PHY_PLL_LOCK_WAIT_USLEEP_MAX)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000095
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000096/* PCIe Port Logic registers (memory-mapped) */
97#define PL_OFFSET 0x700
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000098
99#define PCIE_PHY_CTRL (PL_OFFSET + 0x114)
David Brazdil0f672f62019-12-10 10:32:29 +0000100#define PCIE_PHY_CTRL_DATA(x) FIELD_PREP(GENMASK(15, 0), (x))
101#define PCIE_PHY_CTRL_CAP_ADR BIT(16)
102#define PCIE_PHY_CTRL_CAP_DAT BIT(17)
103#define PCIE_PHY_CTRL_WR BIT(18)
104#define PCIE_PHY_CTRL_RD BIT(19)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000105
106#define PCIE_PHY_STAT (PL_OFFSET + 0x110)
David Brazdil0f672f62019-12-10 10:32:29 +0000107#define PCIE_PHY_STAT_ACK BIT(16)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000108
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000109/* PHY registers (not memory-mapped) */
David Brazdil0f672f62019-12-10 10:32:29 +0000110#define PCIE_PHY_ATEOVRD 0x10
111#define PCIE_PHY_ATEOVRD_EN BIT(2)
112#define PCIE_PHY_ATEOVRD_REF_CLKDIV_SHIFT 0
113#define PCIE_PHY_ATEOVRD_REF_CLKDIV_MASK 0x1
114
115#define PCIE_PHY_MPLL_OVRD_IN_LO 0x11
116#define PCIE_PHY_MPLL_MULTIPLIER_SHIFT 2
117#define PCIE_PHY_MPLL_MULTIPLIER_MASK 0x7f
118#define PCIE_PHY_MPLL_MULTIPLIER_OVRD BIT(9)
119
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000120#define PCIE_PHY_RX_ASIC_OUT 0x100D
121#define PCIE_PHY_RX_ASIC_OUT_VALID (1 << 0)
122
David Brazdil0f672f62019-12-10 10:32:29 +0000123/* iMX7 PCIe PHY registers */
124#define PCIE_PHY_CMN_REG4 0x14
125/* These are probably the bits that *aren't* DCC_FB_EN */
126#define PCIE_PHY_CMN_REG4_DCC_FB_EN 0x29
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000127
David Brazdil0f672f62019-12-10 10:32:29 +0000128#define PCIE_PHY_CMN_REG15 0x54
129#define PCIE_PHY_CMN_REG15_DLY_4 BIT(2)
130#define PCIE_PHY_CMN_REG15_PLL_PD BIT(5)
131#define PCIE_PHY_CMN_REG15_OVRD_PLL_PD BIT(7)
132
133#define PCIE_PHY_CMN_REG24 0x90
134#define PCIE_PHY_CMN_REG24_RX_EQ BIT(6)
135#define PCIE_PHY_CMN_REG24_RX_EQ_SEL BIT(3)
136
137#define PCIE_PHY_CMN_REG26 0x98
138#define PCIE_PHY_CMN_REG26_ATT_MODE 0xBC
139
140#define PHY_RX_OVRD_IN_LO 0x1005
141#define PHY_RX_OVRD_IN_LO_RX_DATA_EN BIT(5)
142#define PHY_RX_OVRD_IN_LO_RX_PLL_EN BIT(3)
143
144static int pcie_phy_poll_ack(struct imx6_pcie *imx6_pcie, bool exp_val)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000145{
146 struct dw_pcie *pci = imx6_pcie->pci;
David Brazdil0f672f62019-12-10 10:32:29 +0000147 bool val;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000148 u32 max_iterations = 10;
149 u32 wait_counter = 0;
150
151 do {
David Brazdil0f672f62019-12-10 10:32:29 +0000152 val = dw_pcie_readl_dbi(pci, PCIE_PHY_STAT) &
153 PCIE_PHY_STAT_ACK;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000154 wait_counter++;
155
156 if (val == exp_val)
157 return 0;
158
159 udelay(1);
160 } while (wait_counter < max_iterations);
161
162 return -ETIMEDOUT;
163}
164
165static int pcie_phy_wait_ack(struct imx6_pcie *imx6_pcie, int addr)
166{
167 struct dw_pcie *pci = imx6_pcie->pci;
168 u32 val;
169 int ret;
170
David Brazdil0f672f62019-12-10 10:32:29 +0000171 val = PCIE_PHY_CTRL_DATA(addr);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000172 dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, val);
173
David Brazdil0f672f62019-12-10 10:32:29 +0000174 val |= PCIE_PHY_CTRL_CAP_ADR;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000175 dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, val);
176
David Brazdil0f672f62019-12-10 10:32:29 +0000177 ret = pcie_phy_poll_ack(imx6_pcie, true);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000178 if (ret)
179 return ret;
180
David Brazdil0f672f62019-12-10 10:32:29 +0000181 val = PCIE_PHY_CTRL_DATA(addr);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000182 dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, val);
183
David Brazdil0f672f62019-12-10 10:32:29 +0000184 return pcie_phy_poll_ack(imx6_pcie, false);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000185}
186
187/* Read from the 16-bit PCIe PHY control registers (not memory-mapped) */
David Brazdil0f672f62019-12-10 10:32:29 +0000188static int pcie_phy_read(struct imx6_pcie *imx6_pcie, int addr, u16 *data)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000189{
190 struct dw_pcie *pci = imx6_pcie->pci;
David Brazdil0f672f62019-12-10 10:32:29 +0000191 u32 phy_ctl;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000192 int ret;
193
194 ret = pcie_phy_wait_ack(imx6_pcie, addr);
195 if (ret)
196 return ret;
197
198 /* assert Read signal */
David Brazdil0f672f62019-12-10 10:32:29 +0000199 phy_ctl = PCIE_PHY_CTRL_RD;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000200 dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, phy_ctl);
201
David Brazdil0f672f62019-12-10 10:32:29 +0000202 ret = pcie_phy_poll_ack(imx6_pcie, true);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000203 if (ret)
204 return ret;
205
David Brazdil0f672f62019-12-10 10:32:29 +0000206 *data = dw_pcie_readl_dbi(pci, PCIE_PHY_STAT);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000207
208 /* deassert Read signal */
209 dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, 0x00);
210
David Brazdil0f672f62019-12-10 10:32:29 +0000211 return pcie_phy_poll_ack(imx6_pcie, false);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000212}
213
David Brazdil0f672f62019-12-10 10:32:29 +0000214static int pcie_phy_write(struct imx6_pcie *imx6_pcie, int addr, u16 data)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000215{
216 struct dw_pcie *pci = imx6_pcie->pci;
217 u32 var;
218 int ret;
219
220 /* write addr */
221 /* cap addr */
222 ret = pcie_phy_wait_ack(imx6_pcie, addr);
223 if (ret)
224 return ret;
225
David Brazdil0f672f62019-12-10 10:32:29 +0000226 var = PCIE_PHY_CTRL_DATA(data);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000227 dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, var);
228
229 /* capture data */
David Brazdil0f672f62019-12-10 10:32:29 +0000230 var |= PCIE_PHY_CTRL_CAP_DAT;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000231 dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, var);
232
David Brazdil0f672f62019-12-10 10:32:29 +0000233 ret = pcie_phy_poll_ack(imx6_pcie, true);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000234 if (ret)
235 return ret;
236
237 /* deassert cap data */
David Brazdil0f672f62019-12-10 10:32:29 +0000238 var = PCIE_PHY_CTRL_DATA(data);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000239 dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, var);
240
241 /* wait for ack de-assertion */
David Brazdil0f672f62019-12-10 10:32:29 +0000242 ret = pcie_phy_poll_ack(imx6_pcie, false);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000243 if (ret)
244 return ret;
245
246 /* assert wr signal */
David Brazdil0f672f62019-12-10 10:32:29 +0000247 var = PCIE_PHY_CTRL_WR;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000248 dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, var);
249
250 /* wait for ack */
David Brazdil0f672f62019-12-10 10:32:29 +0000251 ret = pcie_phy_poll_ack(imx6_pcie, true);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000252 if (ret)
253 return ret;
254
255 /* deassert wr signal */
David Brazdil0f672f62019-12-10 10:32:29 +0000256 var = PCIE_PHY_CTRL_DATA(data);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000257 dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, var);
258
259 /* wait for ack de-assertion */
David Brazdil0f672f62019-12-10 10:32:29 +0000260 ret = pcie_phy_poll_ack(imx6_pcie, false);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000261 if (ret)
262 return ret;
263
264 dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, 0x0);
265
266 return 0;
267}
268
269static void imx6_pcie_reset_phy(struct imx6_pcie *imx6_pcie)
270{
David Brazdil0f672f62019-12-10 10:32:29 +0000271 u16 tmp;
272
273 if (!(imx6_pcie->drvdata->flags & IMX6_PCIE_FLAG_IMX6_PHY))
274 return;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000275
276 pcie_phy_read(imx6_pcie, PHY_RX_OVRD_IN_LO, &tmp);
277 tmp |= (PHY_RX_OVRD_IN_LO_RX_DATA_EN |
278 PHY_RX_OVRD_IN_LO_RX_PLL_EN);
279 pcie_phy_write(imx6_pcie, PHY_RX_OVRD_IN_LO, tmp);
280
281 usleep_range(2000, 3000);
282
283 pcie_phy_read(imx6_pcie, PHY_RX_OVRD_IN_LO, &tmp);
284 tmp &= ~(PHY_RX_OVRD_IN_LO_RX_DATA_EN |
285 PHY_RX_OVRD_IN_LO_RX_PLL_EN);
286 pcie_phy_write(imx6_pcie, PHY_RX_OVRD_IN_LO, tmp);
287}
288
David Brazdil0f672f62019-12-10 10:32:29 +0000289#ifdef CONFIG_ARM
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000290/* Added for PCI abort handling */
291static int imx6q_pcie_abort_handler(unsigned long addr,
292 unsigned int fsr, struct pt_regs *regs)
293{
294 unsigned long pc = instruction_pointer(regs);
295 unsigned long instr = *(unsigned long *)pc;
296 int reg = (instr >> 12) & 15;
297
298 /*
299 * If the instruction being executed was a read,
300 * make it look like it read all-ones.
301 */
302 if ((instr & 0x0c100000) == 0x04100000) {
303 unsigned long val;
304
305 if (instr & 0x00400000)
306 val = 255;
307 else
308 val = -1;
309
310 regs->uregs[reg] = val;
311 regs->ARM_pc += 4;
312 return 0;
313 }
314
315 if ((instr & 0x0e100090) == 0x00100090) {
316 regs->uregs[reg] = -1;
317 regs->ARM_pc += 4;
318 return 0;
319 }
320
321 return 1;
322}
David Brazdil0f672f62019-12-10 10:32:29 +0000323#endif
324
325static int imx6_pcie_attach_pd(struct device *dev)
326{
327 struct imx6_pcie *imx6_pcie = dev_get_drvdata(dev);
328 struct device_link *link;
329
330 /* Do nothing when in a single power domain */
331 if (dev->pm_domain)
332 return 0;
333
334 imx6_pcie->pd_pcie = dev_pm_domain_attach_by_name(dev, "pcie");
335 if (IS_ERR(imx6_pcie->pd_pcie))
336 return PTR_ERR(imx6_pcie->pd_pcie);
337 /* Do nothing when power domain missing */
338 if (!imx6_pcie->pd_pcie)
339 return 0;
340 link = device_link_add(dev, imx6_pcie->pd_pcie,
341 DL_FLAG_STATELESS |
342 DL_FLAG_PM_RUNTIME |
343 DL_FLAG_RPM_ACTIVE);
344 if (!link) {
345 dev_err(dev, "Failed to add device_link to pcie pd.\n");
346 return -EINVAL;
347 }
348
349 imx6_pcie->pd_pcie_phy = dev_pm_domain_attach_by_name(dev, "pcie_phy");
350 if (IS_ERR(imx6_pcie->pd_pcie_phy))
351 return PTR_ERR(imx6_pcie->pd_pcie_phy);
352
353 link = device_link_add(dev, imx6_pcie->pd_pcie_phy,
354 DL_FLAG_STATELESS |
355 DL_FLAG_PM_RUNTIME |
356 DL_FLAG_RPM_ACTIVE);
357 if (!link) {
358 dev_err(dev, "Failed to add device_link to pcie_phy pd.\n");
359 return -EINVAL;
360 }
361
362 return 0;
363}
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000364
365static void imx6_pcie_assert_core_reset(struct imx6_pcie *imx6_pcie)
366{
367 struct device *dev = imx6_pcie->pci->dev;
368
David Brazdil0f672f62019-12-10 10:32:29 +0000369 switch (imx6_pcie->drvdata->variant) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000370 case IMX7D:
David Brazdil0f672f62019-12-10 10:32:29 +0000371 case IMX8MQ:
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000372 reset_control_assert(imx6_pcie->pciephy_reset);
373 reset_control_assert(imx6_pcie->apps_reset);
374 break;
375 case IMX6SX:
376 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
377 IMX6SX_GPR12_PCIE_TEST_POWERDOWN,
378 IMX6SX_GPR12_PCIE_TEST_POWERDOWN);
379 /* Force PCIe PHY reset */
380 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR5,
381 IMX6SX_GPR5_PCIE_BTNRST_RESET,
382 IMX6SX_GPR5_PCIE_BTNRST_RESET);
383 break;
384 case IMX6QP:
385 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
386 IMX6Q_GPR1_PCIE_SW_RST,
387 IMX6Q_GPR1_PCIE_SW_RST);
388 break;
389 case IMX6Q:
390 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
391 IMX6Q_GPR1_PCIE_TEST_PD, 1 << 18);
392 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
393 IMX6Q_GPR1_PCIE_REF_CLK_EN, 0 << 16);
394 break;
395 }
396
397 if (imx6_pcie->vpcie && regulator_is_enabled(imx6_pcie->vpcie) > 0) {
398 int ret = regulator_disable(imx6_pcie->vpcie);
399
400 if (ret)
401 dev_err(dev, "failed to disable vpcie regulator: %d\n",
402 ret);
403 }
404}
405
David Brazdil0f672f62019-12-10 10:32:29 +0000406static unsigned int imx6_pcie_grp_offset(const struct imx6_pcie *imx6_pcie)
407{
408 WARN_ON(imx6_pcie->drvdata->variant != IMX8MQ);
409 return imx6_pcie->controller_id == 1 ? IOMUXC_GPR16 : IOMUXC_GPR14;
410}
411
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000412static int imx6_pcie_enable_ref_clk(struct imx6_pcie *imx6_pcie)
413{
414 struct dw_pcie *pci = imx6_pcie->pci;
415 struct device *dev = pci->dev;
David Brazdil0f672f62019-12-10 10:32:29 +0000416 unsigned int offset;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000417 int ret = 0;
418
David Brazdil0f672f62019-12-10 10:32:29 +0000419 switch (imx6_pcie->drvdata->variant) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000420 case IMX6SX:
421 ret = clk_prepare_enable(imx6_pcie->pcie_inbound_axi);
422 if (ret) {
423 dev_err(dev, "unable to enable pcie_axi clock\n");
424 break;
425 }
426
427 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
428 IMX6SX_GPR12_PCIE_TEST_POWERDOWN, 0);
429 break;
Olivier Deprez157378f2022-04-04 15:47:50 +0200430 case IMX6QP:
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000431 case IMX6Q:
432 /* power up core phy and enable ref clock */
433 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
434 IMX6Q_GPR1_PCIE_TEST_PD, 0 << 18);
435 /*
436 * the async reset input need ref clock to sync internally,
437 * when the ref clock comes after reset, internal synced
438 * reset time is too short, cannot meet the requirement.
439 * add one ~10us delay here.
440 */
David Brazdil0f672f62019-12-10 10:32:29 +0000441 usleep_range(10, 100);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000442 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
443 IMX6Q_GPR1_PCIE_REF_CLK_EN, 1 << 16);
444 break;
445 case IMX7D:
446 break;
David Brazdil0f672f62019-12-10 10:32:29 +0000447 case IMX8MQ:
448 ret = clk_prepare_enable(imx6_pcie->pcie_aux);
449 if (ret) {
450 dev_err(dev, "unable to enable pcie_aux clock\n");
451 break;
452 }
453
454 offset = imx6_pcie_grp_offset(imx6_pcie);
455 /*
456 * Set the over ride low and enabled
457 * make sure that REF_CLK is turned on.
458 */
459 regmap_update_bits(imx6_pcie->iomuxc_gpr, offset,
460 IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE,
461 0);
462 regmap_update_bits(imx6_pcie->iomuxc_gpr, offset,
463 IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE_EN,
464 IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE_EN);
465 break;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000466 }
467
468 return ret;
469}
470
471static void imx7d_pcie_wait_for_phy_pll_lock(struct imx6_pcie *imx6_pcie)
472{
473 u32 val;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000474 struct device *dev = imx6_pcie->pci->dev;
475
David Brazdil0f672f62019-12-10 10:32:29 +0000476 if (regmap_read_poll_timeout(imx6_pcie->iomuxc_gpr,
477 IOMUXC_GPR22, val,
478 val & IMX7D_GPR22_PCIE_PHY_PLL_LOCKED,
479 PHY_PLL_LOCK_WAIT_USLEEP_MAX,
480 PHY_PLL_LOCK_WAIT_TIMEOUT))
481 dev_err(dev, "PCIe PLL lock timeout\n");
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000482}
483
484static void imx6_pcie_deassert_core_reset(struct imx6_pcie *imx6_pcie)
485{
486 struct dw_pcie *pci = imx6_pcie->pci;
487 struct device *dev = pci->dev;
488 int ret;
489
490 if (imx6_pcie->vpcie && !regulator_is_enabled(imx6_pcie->vpcie)) {
491 ret = regulator_enable(imx6_pcie->vpcie);
492 if (ret) {
493 dev_err(dev, "failed to enable vpcie regulator: %d\n",
494 ret);
495 return;
496 }
497 }
498
499 ret = clk_prepare_enable(imx6_pcie->pcie_phy);
500 if (ret) {
501 dev_err(dev, "unable to enable pcie_phy clock\n");
502 goto err_pcie_phy;
503 }
504
505 ret = clk_prepare_enable(imx6_pcie->pcie_bus);
506 if (ret) {
507 dev_err(dev, "unable to enable pcie_bus clock\n");
508 goto err_pcie_bus;
509 }
510
511 ret = clk_prepare_enable(imx6_pcie->pcie);
512 if (ret) {
513 dev_err(dev, "unable to enable pcie clock\n");
514 goto err_pcie;
515 }
516
517 ret = imx6_pcie_enable_ref_clk(imx6_pcie);
518 if (ret) {
519 dev_err(dev, "unable to enable pcie ref clock\n");
520 goto err_ref_clk;
521 }
522
523 /* allow the clocks to stabilize */
524 usleep_range(200, 500);
525
526 /* Some boards don't have PCIe reset GPIO. */
527 if (gpio_is_valid(imx6_pcie->reset_gpio)) {
528 gpio_set_value_cansleep(imx6_pcie->reset_gpio,
529 imx6_pcie->gpio_active_high);
530 msleep(100);
531 gpio_set_value_cansleep(imx6_pcie->reset_gpio,
532 !imx6_pcie->gpio_active_high);
533 }
534
David Brazdil0f672f62019-12-10 10:32:29 +0000535 switch (imx6_pcie->drvdata->variant) {
536 case IMX8MQ:
537 reset_control_deassert(imx6_pcie->pciephy_reset);
538 break;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000539 case IMX7D:
540 reset_control_deassert(imx6_pcie->pciephy_reset);
David Brazdil0f672f62019-12-10 10:32:29 +0000541
542 /* Workaround for ERR010728, failure of PCI-e PLL VCO to
543 * oscillate, especially when cold. This turns off "Duty-cycle
544 * Corrector" and other mysterious undocumented things.
545 */
546 if (likely(imx6_pcie->phy_base)) {
547 /* De-assert DCC_FB_EN */
548 writel(PCIE_PHY_CMN_REG4_DCC_FB_EN,
549 imx6_pcie->phy_base + PCIE_PHY_CMN_REG4);
550 /* Assert RX_EQS and RX_EQS_SEL */
551 writel(PCIE_PHY_CMN_REG24_RX_EQ_SEL
552 | PCIE_PHY_CMN_REG24_RX_EQ,
553 imx6_pcie->phy_base + PCIE_PHY_CMN_REG24);
554 /* Assert ATT_MODE */
555 writel(PCIE_PHY_CMN_REG26_ATT_MODE,
556 imx6_pcie->phy_base + PCIE_PHY_CMN_REG26);
557 } else {
558 dev_warn(dev, "Unable to apply ERR010728 workaround. DT missing fsl,imx7d-pcie-phy phandle ?\n");
559 }
560
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000561 imx7d_pcie_wait_for_phy_pll_lock(imx6_pcie);
562 break;
563 case IMX6SX:
564 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR5,
565 IMX6SX_GPR5_PCIE_BTNRST_RESET, 0);
566 break;
567 case IMX6QP:
568 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
569 IMX6Q_GPR1_PCIE_SW_RST, 0);
570
571 usleep_range(200, 500);
572 break;
573 case IMX6Q: /* Nothing to do */
574 break;
575 }
576
577 return;
578
579err_ref_clk:
580 clk_disable_unprepare(imx6_pcie->pcie);
581err_pcie:
582 clk_disable_unprepare(imx6_pcie->pcie_bus);
583err_pcie_bus:
584 clk_disable_unprepare(imx6_pcie->pcie_phy);
585err_pcie_phy:
586 if (imx6_pcie->vpcie && regulator_is_enabled(imx6_pcie->vpcie) > 0) {
587 ret = regulator_disable(imx6_pcie->vpcie);
588 if (ret)
589 dev_err(dev, "failed to disable vpcie regulator: %d\n",
590 ret);
591 }
592}
593
David Brazdil0f672f62019-12-10 10:32:29 +0000594static void imx6_pcie_configure_type(struct imx6_pcie *imx6_pcie)
595{
596 unsigned int mask, val;
597
598 if (imx6_pcie->drvdata->variant == IMX8MQ &&
599 imx6_pcie->controller_id == 1) {
600 mask = IMX8MQ_GPR12_PCIE2_CTRL_DEVICE_TYPE;
601 val = FIELD_PREP(IMX8MQ_GPR12_PCIE2_CTRL_DEVICE_TYPE,
602 PCI_EXP_TYPE_ROOT_PORT);
603 } else {
604 mask = IMX6Q_GPR12_DEVICE_TYPE;
605 val = FIELD_PREP(IMX6Q_GPR12_DEVICE_TYPE,
606 PCI_EXP_TYPE_ROOT_PORT);
607 }
608
609 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, mask, val);
610}
611
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000612static void imx6_pcie_init_phy(struct imx6_pcie *imx6_pcie)
613{
David Brazdil0f672f62019-12-10 10:32:29 +0000614 switch (imx6_pcie->drvdata->variant) {
615 case IMX8MQ:
616 /*
617 * TODO: Currently this code assumes external
618 * oscillator is being used
619 */
620 regmap_update_bits(imx6_pcie->iomuxc_gpr,
621 imx6_pcie_grp_offset(imx6_pcie),
622 IMX8MQ_GPR_PCIE_REF_USE_PAD,
623 IMX8MQ_GPR_PCIE_REF_USE_PAD);
624 break;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000625 case IMX7D:
626 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
627 IMX7D_GPR12_PCIE_PHY_REFCLK_SEL, 0);
628 break;
629 case IMX6SX:
630 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
631 IMX6SX_GPR12_PCIE_RX_EQ_MASK,
632 IMX6SX_GPR12_PCIE_RX_EQ_2);
Olivier Deprez157378f2022-04-04 15:47:50 +0200633 fallthrough;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000634 default:
635 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
636 IMX6Q_GPR12_PCIE_CTL_2, 0 << 10);
637
638 /* configure constant input signal to the pcie ctrl and phy */
639 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
640 IMX6Q_GPR12_LOS_LEVEL, 9 << 4);
641
642 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
643 IMX6Q_GPR8_TX_DEEMPH_GEN1,
644 imx6_pcie->tx_deemph_gen1 << 0);
645 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
646 IMX6Q_GPR8_TX_DEEMPH_GEN2_3P5DB,
647 imx6_pcie->tx_deemph_gen2_3p5db << 6);
648 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
649 IMX6Q_GPR8_TX_DEEMPH_GEN2_6DB,
650 imx6_pcie->tx_deemph_gen2_6db << 12);
651 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
652 IMX6Q_GPR8_TX_SWING_FULL,
653 imx6_pcie->tx_swing_full << 18);
654 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
655 IMX6Q_GPR8_TX_SWING_LOW,
656 imx6_pcie->tx_swing_low << 25);
657 break;
658 }
659
David Brazdil0f672f62019-12-10 10:32:29 +0000660 imx6_pcie_configure_type(imx6_pcie);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000661}
662
David Brazdil0f672f62019-12-10 10:32:29 +0000663static int imx6_setup_phy_mpll(struct imx6_pcie *imx6_pcie)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000664{
David Brazdil0f672f62019-12-10 10:32:29 +0000665 unsigned long phy_rate = clk_get_rate(imx6_pcie->pcie_phy);
666 int mult, div;
667 u16 val;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000668
David Brazdil0f672f62019-12-10 10:32:29 +0000669 if (!(imx6_pcie->drvdata->flags & IMX6_PCIE_FLAG_IMX6_PHY))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000670 return 0;
671
David Brazdil0f672f62019-12-10 10:32:29 +0000672 switch (phy_rate) {
673 case 125000000:
674 /*
675 * The default settings of the MPLL are for a 125MHz input
676 * clock, so no need to reconfigure anything in that case.
677 */
678 return 0;
679 case 100000000:
680 mult = 25;
681 div = 0;
682 break;
683 case 200000000:
684 mult = 25;
685 div = 1;
686 break;
687 default:
688 dev_err(imx6_pcie->pci->dev,
689 "Unsupported PHY reference clock rate %lu\n", phy_rate);
690 return -EINVAL;
691 }
692
693 pcie_phy_read(imx6_pcie, PCIE_PHY_MPLL_OVRD_IN_LO, &val);
694 val &= ~(PCIE_PHY_MPLL_MULTIPLIER_MASK <<
695 PCIE_PHY_MPLL_MULTIPLIER_SHIFT);
696 val |= mult << PCIE_PHY_MPLL_MULTIPLIER_SHIFT;
697 val |= PCIE_PHY_MPLL_MULTIPLIER_OVRD;
698 pcie_phy_write(imx6_pcie, PCIE_PHY_MPLL_OVRD_IN_LO, val);
699
700 pcie_phy_read(imx6_pcie, PCIE_PHY_ATEOVRD, &val);
701 val &= ~(PCIE_PHY_ATEOVRD_REF_CLKDIV_MASK <<
702 PCIE_PHY_ATEOVRD_REF_CLKDIV_SHIFT);
703 val |= div << PCIE_PHY_ATEOVRD_REF_CLKDIV_SHIFT;
704 val |= PCIE_PHY_ATEOVRD_EN;
705 pcie_phy_write(imx6_pcie, PCIE_PHY_ATEOVRD, val);
706
707 return 0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000708}
709
710static int imx6_pcie_wait_for_speed_change(struct imx6_pcie *imx6_pcie)
711{
712 struct dw_pcie *pci = imx6_pcie->pci;
713 struct device *dev = pci->dev;
714 u32 tmp;
715 unsigned int retries;
716
717 for (retries = 0; retries < 200; retries++) {
718 tmp = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL);
719 /* Test if the speed change finished. */
720 if (!(tmp & PORT_LOGIC_SPEED_CHANGE))
721 return 0;
722 usleep_range(100, 1000);
723 }
724
725 dev_err(dev, "Speed change timeout\n");
David Brazdil0f672f62019-12-10 10:32:29 +0000726 return -ETIMEDOUT;
727}
728
729static void imx6_pcie_ltssm_enable(struct device *dev)
730{
731 struct imx6_pcie *imx6_pcie = dev_get_drvdata(dev);
732
733 switch (imx6_pcie->drvdata->variant) {
734 case IMX6Q:
735 case IMX6SX:
736 case IMX6QP:
737 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
738 IMX6Q_GPR12_PCIE_CTL_2,
739 IMX6Q_GPR12_PCIE_CTL_2);
740 break;
741 case IMX7D:
742 case IMX8MQ:
743 reset_control_deassert(imx6_pcie->apps_reset);
744 break;
745 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000746}
747
748static int imx6_pcie_establish_link(struct imx6_pcie *imx6_pcie)
749{
750 struct dw_pcie *pci = imx6_pcie->pci;
751 struct device *dev = pci->dev;
Olivier Deprez157378f2022-04-04 15:47:50 +0200752 u8 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000753 u32 tmp;
754 int ret;
755
756 /*
757 * Force Gen1 operation when starting the link. In case the link is
758 * started in Gen2 mode, there is a possibility the devices on the
759 * bus will not be detected at all. This happens with PCIe switches.
760 */
Olivier Deprez157378f2022-04-04 15:47:50 +0200761 tmp = dw_pcie_readl_dbi(pci, offset + PCI_EXP_LNKCAP);
762 tmp &= ~PCI_EXP_LNKCAP_SLS;
763 tmp |= PCI_EXP_LNKCAP_SLS_2_5GB;
764 dw_pcie_writel_dbi(pci, offset + PCI_EXP_LNKCAP, tmp);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000765
766 /* Start LTSSM. */
David Brazdil0f672f62019-12-10 10:32:29 +0000767 imx6_pcie_ltssm_enable(dev);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000768
David Brazdil0f672f62019-12-10 10:32:29 +0000769 ret = dw_pcie_wait_for_link(pci);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000770 if (ret)
771 goto err_reset_phy;
772
Olivier Deprez157378f2022-04-04 15:47:50 +0200773 if (pci->link_gen == 2) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000774 /* Allow Gen2 mode after the link is up. */
Olivier Deprez157378f2022-04-04 15:47:50 +0200775 tmp = dw_pcie_readl_dbi(pci, offset + PCI_EXP_LNKCAP);
776 tmp &= ~PCI_EXP_LNKCAP_SLS;
777 tmp |= PCI_EXP_LNKCAP_SLS_5_0GB;
778 dw_pcie_writel_dbi(pci, offset + PCI_EXP_LNKCAP, tmp);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000779
780 /*
781 * Start Directed Speed Change so the best possible
782 * speed both link partners support can be negotiated.
783 */
784 tmp = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL);
785 tmp |= PORT_LOGIC_SPEED_CHANGE;
786 dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, tmp);
787
David Brazdil0f672f62019-12-10 10:32:29 +0000788 if (imx6_pcie->drvdata->flags &
789 IMX6_PCIE_FLAG_IMX6_SPEED_CHANGE) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000790 /*
791 * On i.MX7, DIRECT_SPEED_CHANGE behaves differently
792 * from i.MX6 family when no link speed transition
793 * occurs and we go Gen1 -> yep, Gen1. The difference
794 * is that, in such case, it will not be cleared by HW
795 * which will cause the following code to report false
796 * failure.
797 */
798
799 ret = imx6_pcie_wait_for_speed_change(imx6_pcie);
800 if (ret) {
801 dev_err(dev, "Failed to bring link up!\n");
802 goto err_reset_phy;
803 }
804 }
805
806 /* Make sure link training is finished as well! */
David Brazdil0f672f62019-12-10 10:32:29 +0000807 ret = dw_pcie_wait_for_link(pci);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000808 if (ret) {
809 dev_err(dev, "Failed to bring link up!\n");
810 goto err_reset_phy;
811 }
812 } else {
813 dev_info(dev, "Link: Gen2 disabled\n");
814 }
815
Olivier Deprez157378f2022-04-04 15:47:50 +0200816 tmp = dw_pcie_readw_dbi(pci, offset + PCI_EXP_LNKSTA);
817 dev_info(dev, "Link up, Gen%i\n", tmp & PCI_EXP_LNKSTA_CLS);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000818 return 0;
819
820err_reset_phy:
821 dev_dbg(dev, "PHY DEBUG_R0=0x%08x DEBUG_R1=0x%08x\n",
David Brazdil0f672f62019-12-10 10:32:29 +0000822 dw_pcie_readl_dbi(pci, PCIE_PORT_DEBUG0),
823 dw_pcie_readl_dbi(pci, PCIE_PORT_DEBUG1));
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000824 imx6_pcie_reset_phy(imx6_pcie);
825 return ret;
826}
827
828static int imx6_pcie_host_init(struct pcie_port *pp)
829{
830 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
831 struct imx6_pcie *imx6_pcie = to_imx6_pcie(pci);
832
833 imx6_pcie_assert_core_reset(imx6_pcie);
834 imx6_pcie_init_phy(imx6_pcie);
835 imx6_pcie_deassert_core_reset(imx6_pcie);
David Brazdil0f672f62019-12-10 10:32:29 +0000836 imx6_setup_phy_mpll(imx6_pcie);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000837 dw_pcie_setup_rc(pp);
838 imx6_pcie_establish_link(imx6_pcie);
Olivier Deprez157378f2022-04-04 15:47:50 +0200839 dw_pcie_msi_init(pp);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000840
841 return 0;
842}
843
844static const struct dw_pcie_host_ops imx6_pcie_host_ops = {
845 .host_init = imx6_pcie_host_init,
846};
847
848static int imx6_add_pcie_port(struct imx6_pcie *imx6_pcie,
849 struct platform_device *pdev)
850{
851 struct dw_pcie *pci = imx6_pcie->pci;
852 struct pcie_port *pp = &pci->pp;
853 struct device *dev = &pdev->dev;
854 int ret;
855
856 if (IS_ENABLED(CONFIG_PCI_MSI)) {
857 pp->msi_irq = platform_get_irq_byname(pdev, "msi");
Olivier Deprez157378f2022-04-04 15:47:50 +0200858 if (pp->msi_irq < 0)
859 return pp->msi_irq;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000860 }
861
862 pp->ops = &imx6_pcie_host_ops;
863
864 ret = dw_pcie_host_init(pp);
865 if (ret) {
866 dev_err(dev, "failed to initialize host\n");
867 return ret;
868 }
869
870 return 0;
871}
872
873static const struct dw_pcie_ops dw_pcie_ops = {
874 /* No special ops needed, but pcie-designware still expects this struct */
875};
876
David Brazdil0f672f62019-12-10 10:32:29 +0000877#ifdef CONFIG_PM_SLEEP
878static void imx6_pcie_ltssm_disable(struct device *dev)
879{
880 struct imx6_pcie *imx6_pcie = dev_get_drvdata(dev);
881
882 switch (imx6_pcie->drvdata->variant) {
883 case IMX6SX:
884 case IMX6QP:
885 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
886 IMX6Q_GPR12_PCIE_CTL_2, 0);
887 break;
888 case IMX7D:
889 reset_control_assert(imx6_pcie->apps_reset);
890 break;
891 default:
892 dev_err(dev, "ltssm_disable not supported\n");
893 }
894}
895
896static void imx6_pcie_pm_turnoff(struct imx6_pcie *imx6_pcie)
897{
898 struct device *dev = imx6_pcie->pci->dev;
899
900 /* Some variants have a turnoff reset in DT */
901 if (imx6_pcie->turnoff_reset) {
902 reset_control_assert(imx6_pcie->turnoff_reset);
903 reset_control_deassert(imx6_pcie->turnoff_reset);
904 goto pm_turnoff_sleep;
905 }
906
907 /* Others poke directly at IOMUXC registers */
908 switch (imx6_pcie->drvdata->variant) {
909 case IMX6SX:
910 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
911 IMX6SX_GPR12_PCIE_PM_TURN_OFF,
912 IMX6SX_GPR12_PCIE_PM_TURN_OFF);
913 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
914 IMX6SX_GPR12_PCIE_PM_TURN_OFF, 0);
915 break;
916 default:
917 dev_err(dev, "PME_Turn_Off not implemented\n");
918 return;
919 }
920
921 /*
922 * Components with an upstream port must respond to
923 * PME_Turn_Off with PME_TO_Ack but we can't check.
924 *
925 * The standard recommends a 1-10ms timeout after which to
926 * proceed anyway as if acks were received.
927 */
928pm_turnoff_sleep:
929 usleep_range(1000, 10000);
930}
931
932static void imx6_pcie_clk_disable(struct imx6_pcie *imx6_pcie)
933{
934 clk_disable_unprepare(imx6_pcie->pcie);
935 clk_disable_unprepare(imx6_pcie->pcie_phy);
936 clk_disable_unprepare(imx6_pcie->pcie_bus);
937
938 switch (imx6_pcie->drvdata->variant) {
939 case IMX6SX:
940 clk_disable_unprepare(imx6_pcie->pcie_inbound_axi);
941 break;
942 case IMX7D:
943 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
944 IMX7D_GPR12_PCIE_PHY_REFCLK_SEL,
945 IMX7D_GPR12_PCIE_PHY_REFCLK_SEL);
946 break;
947 case IMX8MQ:
948 clk_disable_unprepare(imx6_pcie->pcie_aux);
949 break;
950 default:
951 break;
952 }
953}
954
955static int imx6_pcie_suspend_noirq(struct device *dev)
956{
957 struct imx6_pcie *imx6_pcie = dev_get_drvdata(dev);
958
959 if (!(imx6_pcie->drvdata->flags & IMX6_PCIE_FLAG_SUPPORTS_SUSPEND))
960 return 0;
961
962 imx6_pcie_pm_turnoff(imx6_pcie);
963 imx6_pcie_clk_disable(imx6_pcie);
964 imx6_pcie_ltssm_disable(dev);
965
966 return 0;
967}
968
969static int imx6_pcie_resume_noirq(struct device *dev)
970{
971 int ret;
972 struct imx6_pcie *imx6_pcie = dev_get_drvdata(dev);
973 struct pcie_port *pp = &imx6_pcie->pci->pp;
974
975 if (!(imx6_pcie->drvdata->flags & IMX6_PCIE_FLAG_SUPPORTS_SUSPEND))
976 return 0;
977
978 imx6_pcie_assert_core_reset(imx6_pcie);
979 imx6_pcie_init_phy(imx6_pcie);
980 imx6_pcie_deassert_core_reset(imx6_pcie);
981 dw_pcie_setup_rc(pp);
982
983 ret = imx6_pcie_establish_link(imx6_pcie);
984 if (ret < 0)
985 dev_info(dev, "pcie link is down after resume.\n");
986
987 return 0;
988}
989#endif
990
991static const struct dev_pm_ops imx6_pcie_pm_ops = {
992 SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(imx6_pcie_suspend_noirq,
993 imx6_pcie_resume_noirq)
994};
995
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000996static int imx6_pcie_probe(struct platform_device *pdev)
997{
998 struct device *dev = &pdev->dev;
999 struct dw_pcie *pci;
1000 struct imx6_pcie *imx6_pcie;
David Brazdil0f672f62019-12-10 10:32:29 +00001001 struct device_node *np;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001002 struct resource *dbi_base;
1003 struct device_node *node = dev->of_node;
1004 int ret;
David Brazdil0f672f62019-12-10 10:32:29 +00001005 u16 val;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001006
1007 imx6_pcie = devm_kzalloc(dev, sizeof(*imx6_pcie), GFP_KERNEL);
1008 if (!imx6_pcie)
1009 return -ENOMEM;
1010
1011 pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL);
1012 if (!pci)
1013 return -ENOMEM;
1014
1015 pci->dev = dev;
1016 pci->ops = &dw_pcie_ops;
1017
1018 imx6_pcie->pci = pci;
David Brazdil0f672f62019-12-10 10:32:29 +00001019 imx6_pcie->drvdata = of_device_get_match_data(dev);
1020
1021 /* Find the PHY if one is defined, only imx7d uses it */
1022 np = of_parse_phandle(node, "fsl,imx7d-pcie-phy", 0);
1023 if (np) {
1024 struct resource res;
1025
1026 ret = of_address_to_resource(np, 0, &res);
1027 if (ret) {
1028 dev_err(dev, "Unable to map PCIe PHY\n");
1029 return ret;
1030 }
1031 imx6_pcie->phy_base = devm_ioremap_resource(dev, &res);
1032 if (IS_ERR(imx6_pcie->phy_base)) {
1033 dev_err(dev, "Unable to map PCIe PHY\n");
1034 return PTR_ERR(imx6_pcie->phy_base);
1035 }
1036 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001037
1038 dbi_base = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1039 pci->dbi_base = devm_ioremap_resource(dev, dbi_base);
1040 if (IS_ERR(pci->dbi_base))
1041 return PTR_ERR(pci->dbi_base);
1042
1043 /* Fetch GPIOs */
1044 imx6_pcie->reset_gpio = of_get_named_gpio(node, "reset-gpio", 0);
1045 imx6_pcie->gpio_active_high = of_property_read_bool(node,
1046 "reset-gpio-active-high");
1047 if (gpio_is_valid(imx6_pcie->reset_gpio)) {
1048 ret = devm_gpio_request_one(dev, imx6_pcie->reset_gpio,
1049 imx6_pcie->gpio_active_high ?
1050 GPIOF_OUT_INIT_HIGH :
1051 GPIOF_OUT_INIT_LOW,
1052 "PCIe reset");
1053 if (ret) {
1054 dev_err(dev, "unable to get reset gpio\n");
1055 return ret;
1056 }
1057 } else if (imx6_pcie->reset_gpio == -EPROBE_DEFER) {
1058 return imx6_pcie->reset_gpio;
1059 }
1060
1061 /* Fetch clocks */
1062 imx6_pcie->pcie_phy = devm_clk_get(dev, "pcie_phy");
Olivier Deprez157378f2022-04-04 15:47:50 +02001063 if (IS_ERR(imx6_pcie->pcie_phy))
1064 return dev_err_probe(dev, PTR_ERR(imx6_pcie->pcie_phy),
1065 "pcie_phy clock source missing or invalid\n");
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001066
1067 imx6_pcie->pcie_bus = devm_clk_get(dev, "pcie_bus");
Olivier Deprez157378f2022-04-04 15:47:50 +02001068 if (IS_ERR(imx6_pcie->pcie_bus))
1069 return dev_err_probe(dev, PTR_ERR(imx6_pcie->pcie_bus),
1070 "pcie_bus clock source missing or invalid\n");
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001071
1072 imx6_pcie->pcie = devm_clk_get(dev, "pcie");
Olivier Deprez157378f2022-04-04 15:47:50 +02001073 if (IS_ERR(imx6_pcie->pcie))
1074 return dev_err_probe(dev, PTR_ERR(imx6_pcie->pcie),
1075 "pcie clock source missing or invalid\n");
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001076
David Brazdil0f672f62019-12-10 10:32:29 +00001077 switch (imx6_pcie->drvdata->variant) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001078 case IMX6SX:
1079 imx6_pcie->pcie_inbound_axi = devm_clk_get(dev,
1080 "pcie_inbound_axi");
Olivier Deprez157378f2022-04-04 15:47:50 +02001081 if (IS_ERR(imx6_pcie->pcie_inbound_axi))
1082 return dev_err_probe(dev, PTR_ERR(imx6_pcie->pcie_inbound_axi),
1083 "pcie_inbound_axi clock missing or invalid\n");
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001084 break;
David Brazdil0f672f62019-12-10 10:32:29 +00001085 case IMX8MQ:
1086 imx6_pcie->pcie_aux = devm_clk_get(dev, "pcie_aux");
Olivier Deprez157378f2022-04-04 15:47:50 +02001087 if (IS_ERR(imx6_pcie->pcie_aux))
1088 return dev_err_probe(dev, PTR_ERR(imx6_pcie->pcie_aux),
1089 "pcie_aux clock source missing or invalid\n");
1090 fallthrough;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001091 case IMX7D:
David Brazdil0f672f62019-12-10 10:32:29 +00001092 if (dbi_base->start == IMX8MQ_PCIE2_BASE_ADDR)
1093 imx6_pcie->controller_id = 1;
1094
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001095 imx6_pcie->pciephy_reset = devm_reset_control_get_exclusive(dev,
1096 "pciephy");
1097 if (IS_ERR(imx6_pcie->pciephy_reset)) {
1098 dev_err(dev, "Failed to get PCIEPHY reset control\n");
1099 return PTR_ERR(imx6_pcie->pciephy_reset);
1100 }
1101
1102 imx6_pcie->apps_reset = devm_reset_control_get_exclusive(dev,
1103 "apps");
1104 if (IS_ERR(imx6_pcie->apps_reset)) {
1105 dev_err(dev, "Failed to get PCIE APPS reset control\n");
1106 return PTR_ERR(imx6_pcie->apps_reset);
1107 }
1108 break;
1109 default:
1110 break;
1111 }
1112
David Brazdil0f672f62019-12-10 10:32:29 +00001113 /* Grab turnoff reset */
1114 imx6_pcie->turnoff_reset = devm_reset_control_get_optional_exclusive(dev, "turnoff");
1115 if (IS_ERR(imx6_pcie->turnoff_reset)) {
1116 dev_err(dev, "Failed to get TURNOFF reset control\n");
1117 return PTR_ERR(imx6_pcie->turnoff_reset);
1118 }
1119
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001120 /* Grab GPR config register range */
1121 imx6_pcie->iomuxc_gpr =
1122 syscon_regmap_lookup_by_compatible("fsl,imx6q-iomuxc-gpr");
1123 if (IS_ERR(imx6_pcie->iomuxc_gpr)) {
1124 dev_err(dev, "unable to find iomuxc registers\n");
1125 return PTR_ERR(imx6_pcie->iomuxc_gpr);
1126 }
1127
1128 /* Grab PCIe PHY Tx Settings */
1129 if (of_property_read_u32(node, "fsl,tx-deemph-gen1",
1130 &imx6_pcie->tx_deemph_gen1))
1131 imx6_pcie->tx_deemph_gen1 = 0;
1132
1133 if (of_property_read_u32(node, "fsl,tx-deemph-gen2-3p5db",
1134 &imx6_pcie->tx_deemph_gen2_3p5db))
1135 imx6_pcie->tx_deemph_gen2_3p5db = 0;
1136
1137 if (of_property_read_u32(node, "fsl,tx-deemph-gen2-6db",
1138 &imx6_pcie->tx_deemph_gen2_6db))
1139 imx6_pcie->tx_deemph_gen2_6db = 20;
1140
1141 if (of_property_read_u32(node, "fsl,tx-swing-full",
1142 &imx6_pcie->tx_swing_full))
1143 imx6_pcie->tx_swing_full = 127;
1144
1145 if (of_property_read_u32(node, "fsl,tx-swing-low",
1146 &imx6_pcie->tx_swing_low))
1147 imx6_pcie->tx_swing_low = 127;
1148
1149 /* Limit link speed */
Olivier Deprez157378f2022-04-04 15:47:50 +02001150 pci->link_gen = 1;
1151 ret = of_property_read_u32(node, "fsl,max-link-speed", &pci->link_gen);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001152
1153 imx6_pcie->vpcie = devm_regulator_get_optional(&pdev->dev, "vpcie");
1154 if (IS_ERR(imx6_pcie->vpcie)) {
David Brazdil0f672f62019-12-10 10:32:29 +00001155 if (PTR_ERR(imx6_pcie->vpcie) != -ENODEV)
1156 return PTR_ERR(imx6_pcie->vpcie);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001157 imx6_pcie->vpcie = NULL;
1158 }
1159
1160 platform_set_drvdata(pdev, imx6_pcie);
1161
David Brazdil0f672f62019-12-10 10:32:29 +00001162 ret = imx6_pcie_attach_pd(dev);
1163 if (ret)
1164 return ret;
1165
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001166 ret = imx6_add_pcie_port(imx6_pcie, pdev);
1167 if (ret < 0)
1168 return ret;
1169
David Brazdil0f672f62019-12-10 10:32:29 +00001170 if (pci_msi_enabled()) {
Olivier Deprez157378f2022-04-04 15:47:50 +02001171 u8 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_MSI);
1172 val = dw_pcie_readw_dbi(pci, offset + PCI_MSI_FLAGS);
David Brazdil0f672f62019-12-10 10:32:29 +00001173 val |= PCI_MSI_FLAGS_ENABLE;
Olivier Deprez157378f2022-04-04 15:47:50 +02001174 dw_pcie_writew_dbi(pci, offset + PCI_MSI_FLAGS, val);
David Brazdil0f672f62019-12-10 10:32:29 +00001175 }
1176
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001177 return 0;
1178}
1179
1180static void imx6_pcie_shutdown(struct platform_device *pdev)
1181{
1182 struct imx6_pcie *imx6_pcie = platform_get_drvdata(pdev);
1183
1184 /* bring down link, so bootloader gets clean state in case of reboot */
1185 imx6_pcie_assert_core_reset(imx6_pcie);
1186}
1187
David Brazdil0f672f62019-12-10 10:32:29 +00001188static const struct imx6_pcie_drvdata drvdata[] = {
1189 [IMX6Q] = {
1190 .variant = IMX6Q,
1191 .flags = IMX6_PCIE_FLAG_IMX6_PHY |
1192 IMX6_PCIE_FLAG_IMX6_SPEED_CHANGE,
1193 .dbi_length = 0x200,
1194 },
1195 [IMX6SX] = {
1196 .variant = IMX6SX,
1197 .flags = IMX6_PCIE_FLAG_IMX6_PHY |
1198 IMX6_PCIE_FLAG_IMX6_SPEED_CHANGE |
1199 IMX6_PCIE_FLAG_SUPPORTS_SUSPEND,
1200 },
1201 [IMX6QP] = {
1202 .variant = IMX6QP,
1203 .flags = IMX6_PCIE_FLAG_IMX6_PHY |
1204 IMX6_PCIE_FLAG_IMX6_SPEED_CHANGE,
1205 },
1206 [IMX7D] = {
1207 .variant = IMX7D,
1208 .flags = IMX6_PCIE_FLAG_SUPPORTS_SUSPEND,
1209 },
1210 [IMX8MQ] = {
1211 .variant = IMX8MQ,
1212 },
1213};
1214
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001215static const struct of_device_id imx6_pcie_of_match[] = {
David Brazdil0f672f62019-12-10 10:32:29 +00001216 { .compatible = "fsl,imx6q-pcie", .data = &drvdata[IMX6Q], },
1217 { .compatible = "fsl,imx6sx-pcie", .data = &drvdata[IMX6SX], },
1218 { .compatible = "fsl,imx6qp-pcie", .data = &drvdata[IMX6QP], },
1219 { .compatible = "fsl,imx7d-pcie", .data = &drvdata[IMX7D], },
1220 { .compatible = "fsl,imx8mq-pcie", .data = &drvdata[IMX8MQ], } ,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001221 {},
1222};
1223
1224static struct platform_driver imx6_pcie_driver = {
1225 .driver = {
1226 .name = "imx6q-pcie",
1227 .of_match_table = imx6_pcie_of_match,
1228 .suppress_bind_attrs = true,
David Brazdil0f672f62019-12-10 10:32:29 +00001229 .pm = &imx6_pcie_pm_ops,
1230 .probe_type = PROBE_PREFER_ASYNCHRONOUS,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001231 },
1232 .probe = imx6_pcie_probe,
1233 .shutdown = imx6_pcie_shutdown,
1234};
1235
David Brazdil0f672f62019-12-10 10:32:29 +00001236static void imx6_pcie_quirk(struct pci_dev *dev)
1237{
1238 struct pci_bus *bus = dev->bus;
1239 struct pcie_port *pp = bus->sysdata;
1240
1241 /* Bus parent is the PCI bridge, its parent is this platform driver */
1242 if (!bus->dev.parent || !bus->dev.parent->parent)
1243 return;
1244
1245 /* Make sure we only quirk devices associated with this driver */
1246 if (bus->dev.parent->parent->driver != &imx6_pcie_driver.driver)
1247 return;
1248
Olivier Deprez157378f2022-04-04 15:47:50 +02001249 if (pci_is_root_bus(bus)) {
David Brazdil0f672f62019-12-10 10:32:29 +00001250 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
1251 struct imx6_pcie *imx6_pcie = to_imx6_pcie(pci);
1252
1253 /*
1254 * Limit config length to avoid the kernel reading beyond
1255 * the register set and causing an abort on i.MX 6Quad
1256 */
1257 if (imx6_pcie->drvdata->dbi_length) {
1258 dev->cfg_size = imx6_pcie->drvdata->dbi_length;
1259 dev_info(&dev->dev, "Limiting cfg_size to %d\n",
1260 dev->cfg_size);
1261 }
1262 }
1263}
1264DECLARE_PCI_FIXUP_CLASS_HEADER(PCI_VENDOR_ID_SYNOPSYS, 0xabcd,
1265 PCI_CLASS_BRIDGE_PCI, 8, imx6_pcie_quirk);
1266
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001267static int __init imx6_pcie_init(void)
1268{
David Brazdil0f672f62019-12-10 10:32:29 +00001269#ifdef CONFIG_ARM
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001270 /*
1271 * Since probe() can be deferred we need to make sure that
1272 * hook_fault_code is not called after __init memory is freed
1273 * by kernel and since imx6q_pcie_abort_handler() is a no-op,
1274 * we can install the handler here without risking it
1275 * accessing some uninitialized driver state.
1276 */
1277 hook_fault_code(8, imx6q_pcie_abort_handler, SIGBUS, 0,
1278 "external abort on non-linefetch");
David Brazdil0f672f62019-12-10 10:32:29 +00001279#endif
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001280
1281 return platform_driver_register(&imx6_pcie_driver);
1282}
1283device_initcall(imx6_pcie_init);