blob: 99d505a85067b06bf13619ca2588d25a2405684b [file] [log] [blame]
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001// SPDX-License-Identifier: GPL-2.0+
2/*
3 * PCIe host controller driver for Tegra SoCs
4 *
5 * Copyright (c) 2010, CompuLab, Ltd.
6 * Author: Mike Rapoport <mike@compulab.co.il>
7 *
8 * Based on NVIDIA PCIe driver
9 * Copyright (c) 2008-2009, NVIDIA Corporation.
10 *
11 * Bits taken from arch/arm/mach-dove/pcie.c
12 *
13 * Author: Thierry Reding <treding@nvidia.com>
14 */
15
16#include <linux/clk.h>
17#include <linux/debugfs.h>
18#include <linux/delay.h>
19#include <linux/export.h>
David Brazdil0f672f62019-12-10 10:32:29 +000020#include <linux/gpio/consumer.h>
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000021#include <linux/interrupt.h>
22#include <linux/iopoll.h>
23#include <linux/irq.h>
24#include <linux/irqdomain.h>
25#include <linux/kernel.h>
26#include <linux/init.h>
27#include <linux/module.h>
28#include <linux/msi.h>
29#include <linux/of_address.h>
30#include <linux/of_pci.h>
31#include <linux/of_platform.h>
32#include <linux/pci.h>
33#include <linux/phy/phy.h>
David Brazdil0f672f62019-12-10 10:32:29 +000034#include <linux/pinctrl/consumer.h>
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000035#include <linux/platform_device.h>
36#include <linux/reset.h>
37#include <linux/sizes.h>
38#include <linux/slab.h>
39#include <linux/vmalloc.h>
40#include <linux/regulator/consumer.h>
41
42#include <soc/tegra/cpuidle.h>
43#include <soc/tegra/pmc.h>
44
45#include "../pci.h"
46
47#define INT_PCI_MSI_NR (8 * 32)
48
49/* register definitions */
50
51#define AFI_AXI_BAR0_SZ 0x00
52#define AFI_AXI_BAR1_SZ 0x04
53#define AFI_AXI_BAR2_SZ 0x08
54#define AFI_AXI_BAR3_SZ 0x0c
55#define AFI_AXI_BAR4_SZ 0x10
56#define AFI_AXI_BAR5_SZ 0x14
57
58#define AFI_AXI_BAR0_START 0x18
59#define AFI_AXI_BAR1_START 0x1c
60#define AFI_AXI_BAR2_START 0x20
61#define AFI_AXI_BAR3_START 0x24
62#define AFI_AXI_BAR4_START 0x28
63#define AFI_AXI_BAR5_START 0x2c
64
65#define AFI_FPCI_BAR0 0x30
66#define AFI_FPCI_BAR1 0x34
67#define AFI_FPCI_BAR2 0x38
68#define AFI_FPCI_BAR3 0x3c
69#define AFI_FPCI_BAR4 0x40
70#define AFI_FPCI_BAR5 0x44
71
72#define AFI_CACHE_BAR0_SZ 0x48
73#define AFI_CACHE_BAR0_ST 0x4c
74#define AFI_CACHE_BAR1_SZ 0x50
75#define AFI_CACHE_BAR1_ST 0x54
76
77#define AFI_MSI_BAR_SZ 0x60
78#define AFI_MSI_FPCI_BAR_ST 0x64
79#define AFI_MSI_AXI_BAR_ST 0x68
80
81#define AFI_MSI_VEC0 0x6c
82#define AFI_MSI_VEC1 0x70
83#define AFI_MSI_VEC2 0x74
84#define AFI_MSI_VEC3 0x78
85#define AFI_MSI_VEC4 0x7c
86#define AFI_MSI_VEC5 0x80
87#define AFI_MSI_VEC6 0x84
88#define AFI_MSI_VEC7 0x88
89
90#define AFI_MSI_EN_VEC0 0x8c
91#define AFI_MSI_EN_VEC1 0x90
92#define AFI_MSI_EN_VEC2 0x94
93#define AFI_MSI_EN_VEC3 0x98
94#define AFI_MSI_EN_VEC4 0x9c
95#define AFI_MSI_EN_VEC5 0xa0
96#define AFI_MSI_EN_VEC6 0xa4
97#define AFI_MSI_EN_VEC7 0xa8
98
99#define AFI_CONFIGURATION 0xac
David Brazdil0f672f62019-12-10 10:32:29 +0000100#define AFI_CONFIGURATION_EN_FPCI (1 << 0)
101#define AFI_CONFIGURATION_CLKEN_OVERRIDE (1 << 31)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000102
103#define AFI_FPCI_ERROR_MASKS 0xb0
104
105#define AFI_INTR_MASK 0xb4
106#define AFI_INTR_MASK_INT_MASK (1 << 0)
107#define AFI_INTR_MASK_MSI_MASK (1 << 8)
108
109#define AFI_INTR_CODE 0xb8
110#define AFI_INTR_CODE_MASK 0xf
111#define AFI_INTR_INI_SLAVE_ERROR 1
112#define AFI_INTR_INI_DECODE_ERROR 2
113#define AFI_INTR_TARGET_ABORT 3
114#define AFI_INTR_MASTER_ABORT 4
115#define AFI_INTR_INVALID_WRITE 5
116#define AFI_INTR_LEGACY 6
117#define AFI_INTR_FPCI_DECODE_ERROR 7
118#define AFI_INTR_AXI_DECODE_ERROR 8
119#define AFI_INTR_FPCI_TIMEOUT 9
120#define AFI_INTR_PE_PRSNT_SENSE 10
121#define AFI_INTR_PE_CLKREQ_SENSE 11
122#define AFI_INTR_CLKCLAMP_SENSE 12
123#define AFI_INTR_RDY4PD_SENSE 13
124#define AFI_INTR_P2P_ERROR 14
125
126#define AFI_INTR_SIGNATURE 0xbc
127#define AFI_UPPER_FPCI_ADDRESS 0xc0
128#define AFI_SM_INTR_ENABLE 0xc4
129#define AFI_SM_INTR_INTA_ASSERT (1 << 0)
130#define AFI_SM_INTR_INTB_ASSERT (1 << 1)
131#define AFI_SM_INTR_INTC_ASSERT (1 << 2)
132#define AFI_SM_INTR_INTD_ASSERT (1 << 3)
133#define AFI_SM_INTR_INTA_DEASSERT (1 << 4)
134#define AFI_SM_INTR_INTB_DEASSERT (1 << 5)
135#define AFI_SM_INTR_INTC_DEASSERT (1 << 6)
136#define AFI_SM_INTR_INTD_DEASSERT (1 << 7)
137
138#define AFI_AFI_INTR_ENABLE 0xc8
139#define AFI_INTR_EN_INI_SLVERR (1 << 0)
140#define AFI_INTR_EN_INI_DECERR (1 << 1)
141#define AFI_INTR_EN_TGT_SLVERR (1 << 2)
142#define AFI_INTR_EN_TGT_DECERR (1 << 3)
143#define AFI_INTR_EN_TGT_WRERR (1 << 4)
144#define AFI_INTR_EN_DFPCI_DECERR (1 << 5)
145#define AFI_INTR_EN_AXI_DECERR (1 << 6)
146#define AFI_INTR_EN_FPCI_TIMEOUT (1 << 7)
147#define AFI_INTR_EN_PRSNT_SENSE (1 << 8)
148
149#define AFI_PCIE_PME 0xf0
150
151#define AFI_PCIE_CONFIG 0x0f8
152#define AFI_PCIE_CONFIG_PCIE_DISABLE(x) (1 << ((x) + 1))
153#define AFI_PCIE_CONFIG_PCIE_DISABLE_ALL 0xe
154#define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_MASK (0xf << 20)
155#define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_SINGLE (0x0 << 20)
156#define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_420 (0x0 << 20)
157#define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_X2_X1 (0x0 << 20)
158#define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_401 (0x0 << 20)
159#define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_DUAL (0x1 << 20)
160#define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_222 (0x1 << 20)
161#define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_X4_X1 (0x1 << 20)
162#define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_211 (0x1 << 20)
163#define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_411 (0x2 << 20)
164#define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_111 (0x2 << 20)
David Brazdil0f672f62019-12-10 10:32:29 +0000165#define AFI_PCIE_CONFIG_PCIE_CLKREQ_GPIO(x) (1 << ((x) + 29))
166#define AFI_PCIE_CONFIG_PCIE_CLKREQ_GPIO_ALL (0x7 << 29)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000167
168#define AFI_FUSE 0x104
169#define AFI_FUSE_PCIE_T0_GEN2_DIS (1 << 2)
170
171#define AFI_PEX0_CTRL 0x110
172#define AFI_PEX1_CTRL 0x118
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000173#define AFI_PEX_CTRL_RST (1 << 0)
174#define AFI_PEX_CTRL_CLKREQ_EN (1 << 1)
175#define AFI_PEX_CTRL_REFCLK_EN (1 << 3)
176#define AFI_PEX_CTRL_OVERRIDE_EN (1 << 4)
177
178#define AFI_PLLE_CONTROL 0x160
179#define AFI_PLLE_CONTROL_BYPASS_PADS2PLLE_CONTROL (1 << 9)
180#define AFI_PLLE_CONTROL_PADS2PLLE_CONTROL_EN (1 << 1)
181
182#define AFI_PEXBIAS_CTRL_0 0x168
183
David Brazdil0f672f62019-12-10 10:32:29 +0000184#define RP_ECTL_2_R1 0x00000e84
185#define RP_ECTL_2_R1_RX_CTLE_1C_MASK 0xffff
186
187#define RP_ECTL_4_R1 0x00000e8c
188#define RP_ECTL_4_R1_RX_CDR_CTRL_1C_MASK (0xffff << 16)
189#define RP_ECTL_4_R1_RX_CDR_CTRL_1C_SHIFT 16
190
191#define RP_ECTL_5_R1 0x00000e90
192#define RP_ECTL_5_R1_RX_EQ_CTRL_L_1C_MASK 0xffffffff
193
194#define RP_ECTL_6_R1 0x00000e94
195#define RP_ECTL_6_R1_RX_EQ_CTRL_H_1C_MASK 0xffffffff
196
197#define RP_ECTL_2_R2 0x00000ea4
198#define RP_ECTL_2_R2_RX_CTLE_1C_MASK 0xffff
199
200#define RP_ECTL_4_R2 0x00000eac
201#define RP_ECTL_4_R2_RX_CDR_CTRL_1C_MASK (0xffff << 16)
202#define RP_ECTL_4_R2_RX_CDR_CTRL_1C_SHIFT 16
203
204#define RP_ECTL_5_R2 0x00000eb0
205#define RP_ECTL_5_R2_RX_EQ_CTRL_L_1C_MASK 0xffffffff
206
207#define RP_ECTL_6_R2 0x00000eb4
208#define RP_ECTL_6_R2_RX_EQ_CTRL_H_1C_MASK 0xffffffff
209
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000210#define RP_VEND_XP 0x00000f00
David Brazdil0f672f62019-12-10 10:32:29 +0000211#define RP_VEND_XP_DL_UP (1 << 30)
212#define RP_VEND_XP_OPPORTUNISTIC_ACK (1 << 27)
213#define RP_VEND_XP_OPPORTUNISTIC_UPDATEFC (1 << 28)
214#define RP_VEND_XP_UPDATE_FC_THRESHOLD_MASK (0xff << 18)
215
216#define RP_VEND_CTL0 0x00000f44
217#define RP_VEND_CTL0_DSK_RST_PULSE_WIDTH_MASK (0xf << 12)
218#define RP_VEND_CTL0_DSK_RST_PULSE_WIDTH (0x9 << 12)
219
220#define RP_VEND_CTL1 0x00000f48
221#define RP_VEND_CTL1_ERPT (1 << 13)
222
223#define RP_VEND_XP_BIST 0x00000f4c
224#define RP_VEND_XP_BIST_GOTO_L1_L2_AFTER_DLLP_DONE (1 << 28)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000225
226#define RP_VEND_CTL2 0x00000fa8
227#define RP_VEND_CTL2_PCA_ENABLE (1 << 7)
228
229#define RP_PRIV_MISC 0x00000fe0
David Brazdil0f672f62019-12-10 10:32:29 +0000230#define RP_PRIV_MISC_PRSNT_MAP_EP_PRSNT (0xe << 0)
231#define RP_PRIV_MISC_PRSNT_MAP_EP_ABSNT (0xf << 0)
232#define RP_PRIV_MISC_CTLR_CLK_CLAMP_THRESHOLD_MASK (0x7f << 16)
233#define RP_PRIV_MISC_CTLR_CLK_CLAMP_THRESHOLD (0xf << 16)
234#define RP_PRIV_MISC_CTLR_CLK_CLAMP_ENABLE (1 << 23)
235#define RP_PRIV_MISC_TMS_CLK_CLAMP_THRESHOLD_MASK (0x7f << 24)
236#define RP_PRIV_MISC_TMS_CLK_CLAMP_THRESHOLD (0xf << 24)
237#define RP_PRIV_MISC_TMS_CLK_CLAMP_ENABLE (1 << 31)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000238
239#define RP_LINK_CONTROL_STATUS 0x00000090
240#define RP_LINK_CONTROL_STATUS_DL_LINK_ACTIVE 0x20000000
241#define RP_LINK_CONTROL_STATUS_LINKSTAT_MASK 0x3fff0000
242
David Brazdil0f672f62019-12-10 10:32:29 +0000243#define RP_LINK_CONTROL_STATUS_2 0x000000b0
244
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000245#define PADS_CTL_SEL 0x0000009c
246
247#define PADS_CTL 0x000000a0
248#define PADS_CTL_IDDQ_1L (1 << 0)
249#define PADS_CTL_TX_DATA_EN_1L (1 << 6)
250#define PADS_CTL_RX_DATA_EN_1L (1 << 10)
251
252#define PADS_PLL_CTL_TEGRA20 0x000000b8
253#define PADS_PLL_CTL_TEGRA30 0x000000b4
254#define PADS_PLL_CTL_RST_B4SM (1 << 1)
255#define PADS_PLL_CTL_LOCKDET (1 << 8)
256#define PADS_PLL_CTL_REFCLK_MASK (0x3 << 16)
257#define PADS_PLL_CTL_REFCLK_INTERNAL_CML (0 << 16)
258#define PADS_PLL_CTL_REFCLK_INTERNAL_CMOS (1 << 16)
259#define PADS_PLL_CTL_REFCLK_EXTERNAL (2 << 16)
260#define PADS_PLL_CTL_TXCLKREF_MASK (0x1 << 20)
261#define PADS_PLL_CTL_TXCLKREF_DIV10 (0 << 20)
262#define PADS_PLL_CTL_TXCLKREF_DIV5 (1 << 20)
263#define PADS_PLL_CTL_TXCLKREF_BUF_EN (1 << 22)
264
265#define PADS_REFCLK_CFG0 0x000000c8
266#define PADS_REFCLK_CFG1 0x000000cc
267#define PADS_REFCLK_BIAS 0x000000d0
268
269/*
270 * Fields in PADS_REFCLK_CFG*. Those registers form an array of 16-bit
271 * entries, one entry per PCIe port. These field definitions and desired
272 * values aren't in the TRM, but do come from NVIDIA.
273 */
274#define PADS_REFCLK_CFG_TERM_SHIFT 2 /* 6:2 */
275#define PADS_REFCLK_CFG_E_TERM_SHIFT 7
276#define PADS_REFCLK_CFG_PREDI_SHIFT 8 /* 11:8 */
277#define PADS_REFCLK_CFG_DRVI_SHIFT 12 /* 15:12 */
278
279#define PME_ACK_TIMEOUT 10000
David Brazdil0f672f62019-12-10 10:32:29 +0000280#define LINK_RETRAIN_TIMEOUT 100000 /* in usec */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000281
282struct tegra_msi {
283 struct msi_controller chip;
284 DECLARE_BITMAP(used, INT_PCI_MSI_NR);
285 struct irq_domain *domain;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000286 struct mutex lock;
David Brazdil0f672f62019-12-10 10:32:29 +0000287 void *virt;
288 dma_addr_t phys;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000289 int irq;
290};
291
292/* used to differentiate between Tegra SoC generations */
293struct tegra_pcie_port_soc {
294 struct {
295 u8 turnoff_bit;
296 u8 ack_bit;
297 } pme;
298};
299
300struct tegra_pcie_soc {
301 unsigned int num_ports;
302 const struct tegra_pcie_port_soc *ports;
303 unsigned int msi_base_shift;
David Brazdil0f672f62019-12-10 10:32:29 +0000304 unsigned long afi_pex2_ctrl;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000305 u32 pads_pll_ctl;
306 u32 tx_ref_sel;
307 u32 pads_refclk_cfg0;
308 u32 pads_refclk_cfg1;
David Brazdil0f672f62019-12-10 10:32:29 +0000309 u32 update_fc_threshold;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000310 bool has_pex_clkreq_en;
311 bool has_pex_bias_ctrl;
312 bool has_intr_prsnt_sense;
313 bool has_cml_clk;
314 bool has_gen2;
315 bool force_pca_enable;
316 bool program_uphy;
David Brazdil0f672f62019-12-10 10:32:29 +0000317 bool update_clamp_threshold;
318 bool program_deskew_time;
David Brazdil0f672f62019-12-10 10:32:29 +0000319 bool update_fc_timer;
320 bool has_cache_bars;
321 struct {
322 struct {
323 u32 rp_ectl_2_r1;
324 u32 rp_ectl_4_r1;
325 u32 rp_ectl_5_r1;
326 u32 rp_ectl_6_r1;
327 u32 rp_ectl_2_r2;
328 u32 rp_ectl_4_r2;
329 u32 rp_ectl_5_r2;
330 u32 rp_ectl_6_r2;
331 } regs;
332 bool enable;
333 } ectl;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000334};
335
336static inline struct tegra_msi *to_tegra_msi(struct msi_controller *chip)
337{
338 return container_of(chip, struct tegra_msi, chip);
339}
340
341struct tegra_pcie {
342 struct device *dev;
343
344 void __iomem *pads;
345 void __iomem *afi;
346 void __iomem *cfg;
347 int irq;
348
349 struct resource cs;
350 struct resource io;
351 struct resource pio;
352 struct resource mem;
353 struct resource prefetch;
354 struct resource busn;
355
356 struct {
357 resource_size_t mem;
358 resource_size_t io;
359 } offset;
360
361 struct clk *pex_clk;
362 struct clk *afi_clk;
363 struct clk *pll_e;
364 struct clk *cml_clk;
365
366 struct reset_control *pex_rst;
367 struct reset_control *afi_rst;
368 struct reset_control *pcie_xrst;
369
370 bool legacy_phy;
371 struct phy *phy;
372
373 struct tegra_msi msi;
374
375 struct list_head ports;
376 u32 xbar_config;
377
378 struct regulator_bulk_data *supplies;
379 unsigned int num_supplies;
380
381 const struct tegra_pcie_soc *soc;
382 struct dentry *debugfs;
383};
384
385struct tegra_pcie_port {
386 struct tegra_pcie *pcie;
387 struct device_node *np;
388 struct list_head list;
389 struct resource regs;
390 void __iomem *base;
391 unsigned int index;
392 unsigned int lanes;
393
394 struct phy **phys;
David Brazdil0f672f62019-12-10 10:32:29 +0000395
396 struct gpio_desc *reset_gpio;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000397};
398
399struct tegra_pcie_bus {
400 struct list_head list;
401 unsigned int nr;
402};
403
404static inline void afi_writel(struct tegra_pcie *pcie, u32 value,
405 unsigned long offset)
406{
407 writel(value, pcie->afi + offset);
408}
409
410static inline u32 afi_readl(struct tegra_pcie *pcie, unsigned long offset)
411{
412 return readl(pcie->afi + offset);
413}
414
415static inline void pads_writel(struct tegra_pcie *pcie, u32 value,
416 unsigned long offset)
417{
418 writel(value, pcie->pads + offset);
419}
420
421static inline u32 pads_readl(struct tegra_pcie *pcie, unsigned long offset)
422{
423 return readl(pcie->pads + offset);
424}
425
426/*
427 * The configuration space mapping on Tegra is somewhat similar to the ECAM
428 * defined by PCIe. However it deviates a bit in how the 4 bits for extended
429 * register accesses are mapped:
430 *
431 * [27:24] extended register number
432 * [23:16] bus number
433 * [15:11] device number
434 * [10: 8] function number
435 * [ 7: 0] register number
436 *
437 * Mapping the whole extended configuration space would require 256 MiB of
438 * virtual address space, only a small part of which will actually be used.
439 *
440 * To work around this, a 4 KiB region is used to generate the required
441 * configuration transaction with relevant B:D:F and register offset values.
442 * This is achieved by dynamically programming base address and size of
443 * AFI_AXI_BAR used for end point config space mapping to make sure that the
444 * address (access to which generates correct config transaction) falls in
445 * this 4 KiB region.
446 */
447static unsigned int tegra_pcie_conf_offset(u8 bus, unsigned int devfn,
448 unsigned int where)
449{
450 return ((where & 0xf00) << 16) | (bus << 16) | (PCI_SLOT(devfn) << 11) |
451 (PCI_FUNC(devfn) << 8) | (where & 0xff);
452}
453
454static void __iomem *tegra_pcie_map_bus(struct pci_bus *bus,
455 unsigned int devfn,
456 int where)
457{
458 struct tegra_pcie *pcie = bus->sysdata;
459 void __iomem *addr = NULL;
460
461 if (bus->number == 0) {
462 unsigned int slot = PCI_SLOT(devfn);
463 struct tegra_pcie_port *port;
464
465 list_for_each_entry(port, &pcie->ports, list) {
466 if (port->index + 1 == slot) {
467 addr = port->base + (where & ~3);
468 break;
469 }
470 }
471 } else {
472 unsigned int offset;
473 u32 base;
474
475 offset = tegra_pcie_conf_offset(bus->number, devfn, where);
476
477 /* move 4 KiB window to offset within the FPCI region */
478 base = 0xfe100000 + ((offset & ~(SZ_4K - 1)) >> 8);
479 afi_writel(pcie, base, AFI_FPCI_BAR0);
480
481 /* move to correct offset within the 4 KiB page */
482 addr = pcie->cfg + (offset & (SZ_4K - 1));
483 }
484
485 return addr;
486}
487
488static int tegra_pcie_config_read(struct pci_bus *bus, unsigned int devfn,
489 int where, int size, u32 *value)
490{
491 if (bus->number == 0)
492 return pci_generic_config_read32(bus, devfn, where, size,
493 value);
494
495 return pci_generic_config_read(bus, devfn, where, size, value);
496}
497
498static int tegra_pcie_config_write(struct pci_bus *bus, unsigned int devfn,
499 int where, int size, u32 value)
500{
501 if (bus->number == 0)
502 return pci_generic_config_write32(bus, devfn, where, size,
503 value);
504
505 return pci_generic_config_write(bus, devfn, where, size, value);
506}
507
508static struct pci_ops tegra_pcie_ops = {
509 .map_bus = tegra_pcie_map_bus,
510 .read = tegra_pcie_config_read,
511 .write = tegra_pcie_config_write,
512};
513
514static unsigned long tegra_pcie_port_get_pex_ctrl(struct tegra_pcie_port *port)
515{
David Brazdil0f672f62019-12-10 10:32:29 +0000516 const struct tegra_pcie_soc *soc = port->pcie->soc;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000517 unsigned long ret = 0;
518
519 switch (port->index) {
520 case 0:
521 ret = AFI_PEX0_CTRL;
522 break;
523
524 case 1:
525 ret = AFI_PEX1_CTRL;
526 break;
527
528 case 2:
David Brazdil0f672f62019-12-10 10:32:29 +0000529 ret = soc->afi_pex2_ctrl;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000530 break;
531 }
532
533 return ret;
534}
535
536static void tegra_pcie_port_reset(struct tegra_pcie_port *port)
537{
538 unsigned long ctrl = tegra_pcie_port_get_pex_ctrl(port);
539 unsigned long value;
540
541 /* pulse reset signal */
David Brazdil0f672f62019-12-10 10:32:29 +0000542 if (port->reset_gpio) {
543 gpiod_set_value(port->reset_gpio, 1);
544 } else {
545 value = afi_readl(port->pcie, ctrl);
546 value &= ~AFI_PEX_CTRL_RST;
547 afi_writel(port->pcie, value, ctrl);
548 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000549
550 usleep_range(1000, 2000);
551
David Brazdil0f672f62019-12-10 10:32:29 +0000552 if (port->reset_gpio) {
553 gpiod_set_value(port->reset_gpio, 0);
554 } else {
555 value = afi_readl(port->pcie, ctrl);
556 value |= AFI_PEX_CTRL_RST;
557 afi_writel(port->pcie, value, ctrl);
558 }
559}
560
561static void tegra_pcie_enable_rp_features(struct tegra_pcie_port *port)
562{
563 const struct tegra_pcie_soc *soc = port->pcie->soc;
564 u32 value;
565
566 /* Enable AER capability */
567 value = readl(port->base + RP_VEND_CTL1);
568 value |= RP_VEND_CTL1_ERPT;
569 writel(value, port->base + RP_VEND_CTL1);
570
571 /* Optimal settings to enhance bandwidth */
572 value = readl(port->base + RP_VEND_XP);
573 value |= RP_VEND_XP_OPPORTUNISTIC_ACK;
574 value |= RP_VEND_XP_OPPORTUNISTIC_UPDATEFC;
575 writel(value, port->base + RP_VEND_XP);
576
577 /*
578 * LTSSM will wait for DLLP to finish before entering L1 or L2,
579 * to avoid truncation of PM messages which results in receiver errors
580 */
581 value = readl(port->base + RP_VEND_XP_BIST);
582 value |= RP_VEND_XP_BIST_GOTO_L1_L2_AFTER_DLLP_DONE;
583 writel(value, port->base + RP_VEND_XP_BIST);
584
585 value = readl(port->base + RP_PRIV_MISC);
586 value |= RP_PRIV_MISC_CTLR_CLK_CLAMP_ENABLE;
587 value |= RP_PRIV_MISC_TMS_CLK_CLAMP_ENABLE;
588
589 if (soc->update_clamp_threshold) {
590 value &= ~(RP_PRIV_MISC_CTLR_CLK_CLAMP_THRESHOLD_MASK |
591 RP_PRIV_MISC_TMS_CLK_CLAMP_THRESHOLD_MASK);
592 value |= RP_PRIV_MISC_CTLR_CLK_CLAMP_THRESHOLD |
593 RP_PRIV_MISC_TMS_CLK_CLAMP_THRESHOLD;
594 }
595
596 writel(value, port->base + RP_PRIV_MISC);
597}
598
599static void tegra_pcie_program_ectl_settings(struct tegra_pcie_port *port)
600{
601 const struct tegra_pcie_soc *soc = port->pcie->soc;
602 u32 value;
603
604 value = readl(port->base + RP_ECTL_2_R1);
605 value &= ~RP_ECTL_2_R1_RX_CTLE_1C_MASK;
606 value |= soc->ectl.regs.rp_ectl_2_r1;
607 writel(value, port->base + RP_ECTL_2_R1);
608
609 value = readl(port->base + RP_ECTL_4_R1);
610 value &= ~RP_ECTL_4_R1_RX_CDR_CTRL_1C_MASK;
611 value |= soc->ectl.regs.rp_ectl_4_r1 <<
612 RP_ECTL_4_R1_RX_CDR_CTRL_1C_SHIFT;
613 writel(value, port->base + RP_ECTL_4_R1);
614
615 value = readl(port->base + RP_ECTL_5_R1);
616 value &= ~RP_ECTL_5_R1_RX_EQ_CTRL_L_1C_MASK;
617 value |= soc->ectl.regs.rp_ectl_5_r1;
618 writel(value, port->base + RP_ECTL_5_R1);
619
620 value = readl(port->base + RP_ECTL_6_R1);
621 value &= ~RP_ECTL_6_R1_RX_EQ_CTRL_H_1C_MASK;
622 value |= soc->ectl.regs.rp_ectl_6_r1;
623 writel(value, port->base + RP_ECTL_6_R1);
624
625 value = readl(port->base + RP_ECTL_2_R2);
626 value &= ~RP_ECTL_2_R2_RX_CTLE_1C_MASK;
627 value |= soc->ectl.regs.rp_ectl_2_r2;
628 writel(value, port->base + RP_ECTL_2_R2);
629
630 value = readl(port->base + RP_ECTL_4_R2);
631 value &= ~RP_ECTL_4_R2_RX_CDR_CTRL_1C_MASK;
632 value |= soc->ectl.regs.rp_ectl_4_r2 <<
633 RP_ECTL_4_R2_RX_CDR_CTRL_1C_SHIFT;
634 writel(value, port->base + RP_ECTL_4_R2);
635
636 value = readl(port->base + RP_ECTL_5_R2);
637 value &= ~RP_ECTL_5_R2_RX_EQ_CTRL_L_1C_MASK;
638 value |= soc->ectl.regs.rp_ectl_5_r2;
639 writel(value, port->base + RP_ECTL_5_R2);
640
641 value = readl(port->base + RP_ECTL_6_R2);
642 value &= ~RP_ECTL_6_R2_RX_EQ_CTRL_H_1C_MASK;
643 value |= soc->ectl.regs.rp_ectl_6_r2;
644 writel(value, port->base + RP_ECTL_6_R2);
645}
646
647static void tegra_pcie_apply_sw_fixup(struct tegra_pcie_port *port)
648{
649 const struct tegra_pcie_soc *soc = port->pcie->soc;
650 u32 value;
651
652 /*
653 * Sometimes link speed change from Gen2 to Gen1 fails due to
654 * instability in deskew logic on lane-0. Increase the deskew
655 * retry time to resolve this issue.
656 */
657 if (soc->program_deskew_time) {
658 value = readl(port->base + RP_VEND_CTL0);
659 value &= ~RP_VEND_CTL0_DSK_RST_PULSE_WIDTH_MASK;
660 value |= RP_VEND_CTL0_DSK_RST_PULSE_WIDTH;
661 writel(value, port->base + RP_VEND_CTL0);
662 }
663
David Brazdil0f672f62019-12-10 10:32:29 +0000664 if (soc->update_fc_timer) {
665 value = readl(port->base + RP_VEND_XP);
666 value &= ~RP_VEND_XP_UPDATE_FC_THRESHOLD_MASK;
667 value |= soc->update_fc_threshold;
668 writel(value, port->base + RP_VEND_XP);
669 }
670
671 /*
672 * PCIe link doesn't come up with few legacy PCIe endpoints if
673 * root port advertises both Gen-1 and Gen-2 speeds in Tegra.
674 * Hence, the strategy followed here is to initially advertise
675 * only Gen-1 and after link is up, retrain link to Gen-2 speed
676 */
677 value = readl(port->base + RP_LINK_CONTROL_STATUS_2);
678 value &= ~PCI_EXP_LNKSTA_CLS;
679 value |= PCI_EXP_LNKSTA_CLS_2_5GB;
680 writel(value, port->base + RP_LINK_CONTROL_STATUS_2);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000681}
682
683static void tegra_pcie_port_enable(struct tegra_pcie_port *port)
684{
685 unsigned long ctrl = tegra_pcie_port_get_pex_ctrl(port);
686 const struct tegra_pcie_soc *soc = port->pcie->soc;
687 unsigned long value;
688
689 /* enable reference clock */
690 value = afi_readl(port->pcie, ctrl);
691 value |= AFI_PEX_CTRL_REFCLK_EN;
692
693 if (soc->has_pex_clkreq_en)
694 value |= AFI_PEX_CTRL_CLKREQ_EN;
695
696 value |= AFI_PEX_CTRL_OVERRIDE_EN;
697
698 afi_writel(port->pcie, value, ctrl);
699
700 tegra_pcie_port_reset(port);
701
702 if (soc->force_pca_enable) {
703 value = readl(port->base + RP_VEND_CTL2);
704 value |= RP_VEND_CTL2_PCA_ENABLE;
705 writel(value, port->base + RP_VEND_CTL2);
706 }
David Brazdil0f672f62019-12-10 10:32:29 +0000707
708 tegra_pcie_enable_rp_features(port);
709
710 if (soc->ectl.enable)
711 tegra_pcie_program_ectl_settings(port);
712
713 tegra_pcie_apply_sw_fixup(port);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000714}
715
716static void tegra_pcie_port_disable(struct tegra_pcie_port *port)
717{
718 unsigned long ctrl = tegra_pcie_port_get_pex_ctrl(port);
719 const struct tegra_pcie_soc *soc = port->pcie->soc;
720 unsigned long value;
721
722 /* assert port reset */
723 value = afi_readl(port->pcie, ctrl);
724 value &= ~AFI_PEX_CTRL_RST;
725 afi_writel(port->pcie, value, ctrl);
726
727 /* disable reference clock */
728 value = afi_readl(port->pcie, ctrl);
729
730 if (soc->has_pex_clkreq_en)
731 value &= ~AFI_PEX_CTRL_CLKREQ_EN;
732
733 value &= ~AFI_PEX_CTRL_REFCLK_EN;
734 afi_writel(port->pcie, value, ctrl);
David Brazdil0f672f62019-12-10 10:32:29 +0000735
736 /* disable PCIe port and set CLKREQ# as GPIO to allow PLLE power down */
737 value = afi_readl(port->pcie, AFI_PCIE_CONFIG);
738 value |= AFI_PCIE_CONFIG_PCIE_DISABLE(port->index);
739 value |= AFI_PCIE_CONFIG_PCIE_CLKREQ_GPIO(port->index);
740 afi_writel(port->pcie, value, AFI_PCIE_CONFIG);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000741}
742
743static void tegra_pcie_port_free(struct tegra_pcie_port *port)
744{
745 struct tegra_pcie *pcie = port->pcie;
746 struct device *dev = pcie->dev;
747
748 devm_iounmap(dev, port->base);
749 devm_release_mem_region(dev, port->regs.start,
750 resource_size(&port->regs));
751 list_del(&port->list);
752 devm_kfree(dev, port);
753}
754
755/* Tegra PCIE root complex wrongly reports device class */
756static void tegra_pcie_fixup_class(struct pci_dev *dev)
757{
758 dev->class = PCI_CLASS_BRIDGE_PCI << 8;
759}
760DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0bf0, tegra_pcie_fixup_class);
761DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0bf1, tegra_pcie_fixup_class);
762DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0e1c, tegra_pcie_fixup_class);
763DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0e1d, tegra_pcie_fixup_class);
764
David Brazdil0f672f62019-12-10 10:32:29 +0000765/* Tegra20 and Tegra30 PCIE requires relaxed ordering */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000766static void tegra_pcie_relax_enable(struct pci_dev *dev)
767{
768 pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_RELAX_EN);
769}
David Brazdil0f672f62019-12-10 10:32:29 +0000770DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, 0x0bf0, tegra_pcie_relax_enable);
771DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, 0x0bf1, tegra_pcie_relax_enable);
772DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, 0x0e1c, tegra_pcie_relax_enable);
773DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, 0x0e1d, tegra_pcie_relax_enable);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000774
775static int tegra_pcie_request_resources(struct tegra_pcie *pcie)
776{
777 struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie);
778 struct list_head *windows = &host->windows;
779 struct device *dev = pcie->dev;
780 int err;
781
782 pci_add_resource_offset(windows, &pcie->pio, pcie->offset.io);
783 pci_add_resource_offset(windows, &pcie->mem, pcie->offset.mem);
784 pci_add_resource_offset(windows, &pcie->prefetch, pcie->offset.mem);
785 pci_add_resource(windows, &pcie->busn);
786
787 err = devm_request_pci_bus_resources(dev, windows);
788 if (err < 0) {
789 pci_free_resource_list(windows);
790 return err;
791 }
792
793 pci_remap_iospace(&pcie->pio, pcie->io.start);
794
795 return 0;
796}
797
798static void tegra_pcie_free_resources(struct tegra_pcie *pcie)
799{
800 struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie);
801 struct list_head *windows = &host->windows;
802
803 pci_unmap_iospace(&pcie->pio);
804 pci_free_resource_list(windows);
805}
806
807static int tegra_pcie_map_irq(const struct pci_dev *pdev, u8 slot, u8 pin)
808{
809 struct tegra_pcie *pcie = pdev->bus->sysdata;
810 int irq;
811
812 tegra_cpuidle_pcie_irqs_in_use();
813
814 irq = of_irq_parse_and_map_pci(pdev, slot, pin);
815 if (!irq)
816 irq = pcie->irq;
817
818 return irq;
819}
820
821static irqreturn_t tegra_pcie_isr(int irq, void *arg)
822{
823 const char *err_msg[] = {
824 "Unknown",
825 "AXI slave error",
826 "AXI decode error",
827 "Target abort",
828 "Master abort",
829 "Invalid write",
830 "Legacy interrupt",
831 "Response decoding error",
832 "AXI response decoding error",
833 "Transaction timeout",
834 "Slot present pin change",
835 "Slot clock request change",
836 "TMS clock ramp change",
837 "TMS ready for power down",
838 "Peer2Peer error",
839 };
840 struct tegra_pcie *pcie = arg;
841 struct device *dev = pcie->dev;
842 u32 code, signature;
843
844 code = afi_readl(pcie, AFI_INTR_CODE) & AFI_INTR_CODE_MASK;
845 signature = afi_readl(pcie, AFI_INTR_SIGNATURE);
846 afi_writel(pcie, 0, AFI_INTR_CODE);
847
848 if (code == AFI_INTR_LEGACY)
849 return IRQ_NONE;
850
851 if (code >= ARRAY_SIZE(err_msg))
852 code = 0;
853
854 /*
855 * do not pollute kernel log with master abort reports since they
856 * happen a lot during enumeration
857 */
David Brazdil0f672f62019-12-10 10:32:29 +0000858 if (code == AFI_INTR_MASTER_ABORT || code == AFI_INTR_PE_PRSNT_SENSE)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000859 dev_dbg(dev, "%s, signature: %08x\n", err_msg[code], signature);
860 else
861 dev_err(dev, "%s, signature: %08x\n", err_msg[code], signature);
862
863 if (code == AFI_INTR_TARGET_ABORT || code == AFI_INTR_MASTER_ABORT ||
864 code == AFI_INTR_FPCI_DECODE_ERROR) {
865 u32 fpci = afi_readl(pcie, AFI_UPPER_FPCI_ADDRESS) & 0xff;
866 u64 address = (u64)fpci << 32 | (signature & 0xfffffffc);
867
868 if (code == AFI_INTR_MASTER_ABORT)
869 dev_dbg(dev, " FPCI address: %10llx\n", address);
870 else
871 dev_err(dev, " FPCI address: %10llx\n", address);
872 }
873
874 return IRQ_HANDLED;
875}
876
877/*
878 * FPCI map is as follows:
879 * - 0xfdfc000000: I/O space
880 * - 0xfdfe000000: type 0 configuration space
881 * - 0xfdff000000: type 1 configuration space
882 * - 0xfe00000000: type 0 extended configuration space
883 * - 0xfe10000000: type 1 extended configuration space
884 */
885static void tegra_pcie_setup_translations(struct tegra_pcie *pcie)
886{
887 u32 fpci_bar, size, axi_address;
888
889 /* Bar 0: type 1 extended configuration space */
890 size = resource_size(&pcie->cs);
891 afi_writel(pcie, pcie->cs.start, AFI_AXI_BAR0_START);
892 afi_writel(pcie, size >> 12, AFI_AXI_BAR0_SZ);
893
894 /* Bar 1: downstream IO bar */
895 fpci_bar = 0xfdfc0000;
896 size = resource_size(&pcie->io);
897 axi_address = pcie->io.start;
898 afi_writel(pcie, axi_address, AFI_AXI_BAR1_START);
899 afi_writel(pcie, size >> 12, AFI_AXI_BAR1_SZ);
900 afi_writel(pcie, fpci_bar, AFI_FPCI_BAR1);
901
902 /* Bar 2: prefetchable memory BAR */
903 fpci_bar = (((pcie->prefetch.start >> 12) & 0x0fffffff) << 4) | 0x1;
904 size = resource_size(&pcie->prefetch);
905 axi_address = pcie->prefetch.start;
906 afi_writel(pcie, axi_address, AFI_AXI_BAR2_START);
907 afi_writel(pcie, size >> 12, AFI_AXI_BAR2_SZ);
908 afi_writel(pcie, fpci_bar, AFI_FPCI_BAR2);
909
910 /* Bar 3: non prefetchable memory BAR */
911 fpci_bar = (((pcie->mem.start >> 12) & 0x0fffffff) << 4) | 0x1;
912 size = resource_size(&pcie->mem);
913 axi_address = pcie->mem.start;
914 afi_writel(pcie, axi_address, AFI_AXI_BAR3_START);
915 afi_writel(pcie, size >> 12, AFI_AXI_BAR3_SZ);
916 afi_writel(pcie, fpci_bar, AFI_FPCI_BAR3);
917
918 /* NULL out the remaining BARs as they are not used */
919 afi_writel(pcie, 0, AFI_AXI_BAR4_START);
920 afi_writel(pcie, 0, AFI_AXI_BAR4_SZ);
921 afi_writel(pcie, 0, AFI_FPCI_BAR4);
922
923 afi_writel(pcie, 0, AFI_AXI_BAR5_START);
924 afi_writel(pcie, 0, AFI_AXI_BAR5_SZ);
925 afi_writel(pcie, 0, AFI_FPCI_BAR5);
926
David Brazdil0f672f62019-12-10 10:32:29 +0000927 if (pcie->soc->has_cache_bars) {
928 /* map all upstream transactions as uncached */
929 afi_writel(pcie, 0, AFI_CACHE_BAR0_ST);
930 afi_writel(pcie, 0, AFI_CACHE_BAR0_SZ);
931 afi_writel(pcie, 0, AFI_CACHE_BAR1_ST);
932 afi_writel(pcie, 0, AFI_CACHE_BAR1_SZ);
933 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000934
935 /* MSI translations are setup only when needed */
936 afi_writel(pcie, 0, AFI_MSI_FPCI_BAR_ST);
937 afi_writel(pcie, 0, AFI_MSI_BAR_SZ);
938 afi_writel(pcie, 0, AFI_MSI_AXI_BAR_ST);
939 afi_writel(pcie, 0, AFI_MSI_BAR_SZ);
940}
941
942static int tegra_pcie_pll_wait(struct tegra_pcie *pcie, unsigned long timeout)
943{
944 const struct tegra_pcie_soc *soc = pcie->soc;
945 u32 value;
946
947 timeout = jiffies + msecs_to_jiffies(timeout);
948
949 while (time_before(jiffies, timeout)) {
950 value = pads_readl(pcie, soc->pads_pll_ctl);
951 if (value & PADS_PLL_CTL_LOCKDET)
952 return 0;
953 }
954
955 return -ETIMEDOUT;
956}
957
958static int tegra_pcie_phy_enable(struct tegra_pcie *pcie)
959{
960 struct device *dev = pcie->dev;
961 const struct tegra_pcie_soc *soc = pcie->soc;
962 u32 value;
963 int err;
964
965 /* initialize internal PHY, enable up to 16 PCIE lanes */
966 pads_writel(pcie, 0x0, PADS_CTL_SEL);
967
968 /* override IDDQ to 1 on all 4 lanes */
969 value = pads_readl(pcie, PADS_CTL);
970 value |= PADS_CTL_IDDQ_1L;
971 pads_writel(pcie, value, PADS_CTL);
972
973 /*
974 * Set up PHY PLL inputs select PLLE output as refclock,
975 * set TX ref sel to div10 (not div5).
976 */
977 value = pads_readl(pcie, soc->pads_pll_ctl);
978 value &= ~(PADS_PLL_CTL_REFCLK_MASK | PADS_PLL_CTL_TXCLKREF_MASK);
979 value |= PADS_PLL_CTL_REFCLK_INTERNAL_CML | soc->tx_ref_sel;
980 pads_writel(pcie, value, soc->pads_pll_ctl);
981
982 /* reset PLL */
983 value = pads_readl(pcie, soc->pads_pll_ctl);
984 value &= ~PADS_PLL_CTL_RST_B4SM;
985 pads_writel(pcie, value, soc->pads_pll_ctl);
986
987 usleep_range(20, 100);
988
989 /* take PLL out of reset */
990 value = pads_readl(pcie, soc->pads_pll_ctl);
991 value |= PADS_PLL_CTL_RST_B4SM;
992 pads_writel(pcie, value, soc->pads_pll_ctl);
993
994 /* wait for the PLL to lock */
995 err = tegra_pcie_pll_wait(pcie, 500);
996 if (err < 0) {
997 dev_err(dev, "PLL failed to lock: %d\n", err);
998 return err;
999 }
1000
1001 /* turn off IDDQ override */
1002 value = pads_readl(pcie, PADS_CTL);
1003 value &= ~PADS_CTL_IDDQ_1L;
1004 pads_writel(pcie, value, PADS_CTL);
1005
1006 /* enable TX/RX data */
1007 value = pads_readl(pcie, PADS_CTL);
1008 value |= PADS_CTL_TX_DATA_EN_1L | PADS_CTL_RX_DATA_EN_1L;
1009 pads_writel(pcie, value, PADS_CTL);
1010
1011 return 0;
1012}
1013
1014static int tegra_pcie_phy_disable(struct tegra_pcie *pcie)
1015{
1016 const struct tegra_pcie_soc *soc = pcie->soc;
1017 u32 value;
1018
1019 /* disable TX/RX data */
1020 value = pads_readl(pcie, PADS_CTL);
1021 value &= ~(PADS_CTL_TX_DATA_EN_1L | PADS_CTL_RX_DATA_EN_1L);
1022 pads_writel(pcie, value, PADS_CTL);
1023
1024 /* override IDDQ */
1025 value = pads_readl(pcie, PADS_CTL);
1026 value |= PADS_CTL_IDDQ_1L;
1027 pads_writel(pcie, value, PADS_CTL);
1028
1029 /* reset PLL */
1030 value = pads_readl(pcie, soc->pads_pll_ctl);
1031 value &= ~PADS_PLL_CTL_RST_B4SM;
1032 pads_writel(pcie, value, soc->pads_pll_ctl);
1033
1034 usleep_range(20, 100);
1035
1036 return 0;
1037}
1038
1039static int tegra_pcie_port_phy_power_on(struct tegra_pcie_port *port)
1040{
1041 struct device *dev = port->pcie->dev;
1042 unsigned int i;
1043 int err;
1044
1045 for (i = 0; i < port->lanes; i++) {
1046 err = phy_power_on(port->phys[i]);
1047 if (err < 0) {
1048 dev_err(dev, "failed to power on PHY#%u: %d\n", i, err);
1049 return err;
1050 }
1051 }
1052
1053 return 0;
1054}
1055
1056static int tegra_pcie_port_phy_power_off(struct tegra_pcie_port *port)
1057{
1058 struct device *dev = port->pcie->dev;
1059 unsigned int i;
1060 int err;
1061
1062 for (i = 0; i < port->lanes; i++) {
1063 err = phy_power_off(port->phys[i]);
1064 if (err < 0) {
1065 dev_err(dev, "failed to power off PHY#%u: %d\n", i,
1066 err);
1067 return err;
1068 }
1069 }
1070
1071 return 0;
1072}
1073
1074static int tegra_pcie_phy_power_on(struct tegra_pcie *pcie)
1075{
1076 struct device *dev = pcie->dev;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001077 struct tegra_pcie_port *port;
1078 int err;
1079
1080 if (pcie->legacy_phy) {
1081 if (pcie->phy)
1082 err = phy_power_on(pcie->phy);
1083 else
1084 err = tegra_pcie_phy_enable(pcie);
1085
1086 if (err < 0)
1087 dev_err(dev, "failed to power on PHY: %d\n", err);
1088
1089 return err;
1090 }
1091
1092 list_for_each_entry(port, &pcie->ports, list) {
1093 err = tegra_pcie_port_phy_power_on(port);
1094 if (err < 0) {
1095 dev_err(dev,
1096 "failed to power on PCIe port %u PHY: %d\n",
1097 port->index, err);
1098 return err;
1099 }
1100 }
1101
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001102 return 0;
1103}
1104
1105static int tegra_pcie_phy_power_off(struct tegra_pcie *pcie)
1106{
1107 struct device *dev = pcie->dev;
1108 struct tegra_pcie_port *port;
1109 int err;
1110
1111 if (pcie->legacy_phy) {
1112 if (pcie->phy)
1113 err = phy_power_off(pcie->phy);
1114 else
1115 err = tegra_pcie_phy_disable(pcie);
1116
1117 if (err < 0)
1118 dev_err(dev, "failed to power off PHY: %d\n", err);
1119
1120 return err;
1121 }
1122
1123 list_for_each_entry(port, &pcie->ports, list) {
1124 err = tegra_pcie_port_phy_power_off(port);
1125 if (err < 0) {
1126 dev_err(dev,
1127 "failed to power off PCIe port %u PHY: %d\n",
1128 port->index, err);
1129 return err;
1130 }
1131 }
1132
1133 return 0;
1134}
1135
David Brazdil0f672f62019-12-10 10:32:29 +00001136static void tegra_pcie_enable_controller(struct tegra_pcie *pcie)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001137{
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001138 const struct tegra_pcie_soc *soc = pcie->soc;
1139 struct tegra_pcie_port *port;
1140 unsigned long value;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001141
1142 /* enable PLL power down */
1143 if (pcie->phy) {
1144 value = afi_readl(pcie, AFI_PLLE_CONTROL);
1145 value &= ~AFI_PLLE_CONTROL_BYPASS_PADS2PLLE_CONTROL;
1146 value |= AFI_PLLE_CONTROL_PADS2PLLE_CONTROL_EN;
1147 afi_writel(pcie, value, AFI_PLLE_CONTROL);
1148 }
1149
1150 /* power down PCIe slot clock bias pad */
1151 if (soc->has_pex_bias_ctrl)
1152 afi_writel(pcie, 0, AFI_PEXBIAS_CTRL_0);
1153
1154 /* configure mode and disable all ports */
1155 value = afi_readl(pcie, AFI_PCIE_CONFIG);
1156 value &= ~AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_MASK;
1157 value |= AFI_PCIE_CONFIG_PCIE_DISABLE_ALL | pcie->xbar_config;
David Brazdil0f672f62019-12-10 10:32:29 +00001158 value |= AFI_PCIE_CONFIG_PCIE_CLKREQ_GPIO_ALL;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001159
David Brazdil0f672f62019-12-10 10:32:29 +00001160 list_for_each_entry(port, &pcie->ports, list) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001161 value &= ~AFI_PCIE_CONFIG_PCIE_DISABLE(port->index);
David Brazdil0f672f62019-12-10 10:32:29 +00001162 value &= ~AFI_PCIE_CONFIG_PCIE_CLKREQ_GPIO(port->index);
1163 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001164
1165 afi_writel(pcie, value, AFI_PCIE_CONFIG);
1166
1167 if (soc->has_gen2) {
1168 value = afi_readl(pcie, AFI_FUSE);
1169 value &= ~AFI_FUSE_PCIE_T0_GEN2_DIS;
1170 afi_writel(pcie, value, AFI_FUSE);
1171 } else {
1172 value = afi_readl(pcie, AFI_FUSE);
1173 value |= AFI_FUSE_PCIE_T0_GEN2_DIS;
1174 afi_writel(pcie, value, AFI_FUSE);
1175 }
1176
David Brazdil0f672f62019-12-10 10:32:29 +00001177 /* Disable AFI dynamic clock gating and enable PCIe */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001178 value = afi_readl(pcie, AFI_CONFIGURATION);
1179 value |= AFI_CONFIGURATION_EN_FPCI;
David Brazdil0f672f62019-12-10 10:32:29 +00001180 value |= AFI_CONFIGURATION_CLKEN_OVERRIDE;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001181 afi_writel(pcie, value, AFI_CONFIGURATION);
1182
1183 value = AFI_INTR_EN_INI_SLVERR | AFI_INTR_EN_INI_DECERR |
1184 AFI_INTR_EN_TGT_SLVERR | AFI_INTR_EN_TGT_DECERR |
1185 AFI_INTR_EN_TGT_WRERR | AFI_INTR_EN_DFPCI_DECERR;
1186
1187 if (soc->has_intr_prsnt_sense)
1188 value |= AFI_INTR_EN_PRSNT_SENSE;
1189
1190 afi_writel(pcie, value, AFI_AFI_INTR_ENABLE);
1191 afi_writel(pcie, 0xffffffff, AFI_SM_INTR_ENABLE);
1192
1193 /* don't enable MSI for now, only when needed */
1194 afi_writel(pcie, AFI_INTR_MASK_INT_MASK, AFI_INTR_MASK);
1195
1196 /* disable all exceptions */
1197 afi_writel(pcie, 0, AFI_FPCI_ERROR_MASKS);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001198}
1199
1200static void tegra_pcie_power_off(struct tegra_pcie *pcie)
1201{
1202 struct device *dev = pcie->dev;
1203 const struct tegra_pcie_soc *soc = pcie->soc;
1204 int err;
1205
1206 reset_control_assert(pcie->afi_rst);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001207
1208 clk_disable_unprepare(pcie->pll_e);
1209 if (soc->has_cml_clk)
1210 clk_disable_unprepare(pcie->cml_clk);
1211 clk_disable_unprepare(pcie->afi_clk);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001212
1213 if (!dev->pm_domain)
1214 tegra_powergate_power_off(TEGRA_POWERGATE_PCIE);
1215
1216 err = regulator_bulk_disable(pcie->num_supplies, pcie->supplies);
1217 if (err < 0)
1218 dev_warn(dev, "failed to disable regulators: %d\n", err);
1219}
1220
1221static int tegra_pcie_power_on(struct tegra_pcie *pcie)
1222{
1223 struct device *dev = pcie->dev;
1224 const struct tegra_pcie_soc *soc = pcie->soc;
1225 int err;
1226
1227 reset_control_assert(pcie->pcie_xrst);
1228 reset_control_assert(pcie->afi_rst);
1229 reset_control_assert(pcie->pex_rst);
1230
1231 if (!dev->pm_domain)
1232 tegra_powergate_power_off(TEGRA_POWERGATE_PCIE);
1233
1234 /* enable regulators */
1235 err = regulator_bulk_enable(pcie->num_supplies, pcie->supplies);
1236 if (err < 0)
1237 dev_err(dev, "failed to enable regulators: %d\n", err);
1238
David Brazdil0f672f62019-12-10 10:32:29 +00001239 if (!dev->pm_domain) {
1240 err = tegra_powergate_power_on(TEGRA_POWERGATE_PCIE);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001241 if (err) {
David Brazdil0f672f62019-12-10 10:32:29 +00001242 dev_err(dev, "failed to power ungate: %d\n", err);
1243 goto regulator_disable;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001244 }
David Brazdil0f672f62019-12-10 10:32:29 +00001245 err = tegra_powergate_remove_clamping(TEGRA_POWERGATE_PCIE);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001246 if (err) {
David Brazdil0f672f62019-12-10 10:32:29 +00001247 dev_err(dev, "failed to remove clamp: %d\n", err);
1248 goto powergate;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001249 }
1250 }
1251
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001252 err = clk_prepare_enable(pcie->afi_clk);
1253 if (err < 0) {
1254 dev_err(dev, "failed to enable AFI clock: %d\n", err);
David Brazdil0f672f62019-12-10 10:32:29 +00001255 goto powergate;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001256 }
1257
1258 if (soc->has_cml_clk) {
1259 err = clk_prepare_enable(pcie->cml_clk);
1260 if (err < 0) {
1261 dev_err(dev, "failed to enable CML clock: %d\n", err);
David Brazdil0f672f62019-12-10 10:32:29 +00001262 goto disable_afi_clk;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001263 }
1264 }
1265
1266 err = clk_prepare_enable(pcie->pll_e);
1267 if (err < 0) {
1268 dev_err(dev, "failed to enable PLLE clock: %d\n", err);
David Brazdil0f672f62019-12-10 10:32:29 +00001269 goto disable_cml_clk;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001270 }
1271
David Brazdil0f672f62019-12-10 10:32:29 +00001272 reset_control_deassert(pcie->afi_rst);
1273
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001274 return 0;
David Brazdil0f672f62019-12-10 10:32:29 +00001275
1276disable_cml_clk:
1277 if (soc->has_cml_clk)
1278 clk_disable_unprepare(pcie->cml_clk);
1279disable_afi_clk:
1280 clk_disable_unprepare(pcie->afi_clk);
1281powergate:
1282 if (!dev->pm_domain)
1283 tegra_powergate_power_off(TEGRA_POWERGATE_PCIE);
1284regulator_disable:
1285 regulator_bulk_disable(pcie->num_supplies, pcie->supplies);
1286
1287 return err;
1288}
1289
1290static void tegra_pcie_apply_pad_settings(struct tegra_pcie *pcie)
1291{
1292 const struct tegra_pcie_soc *soc = pcie->soc;
1293
1294 /* Configure the reference clock driver */
1295 pads_writel(pcie, soc->pads_refclk_cfg0, PADS_REFCLK_CFG0);
1296
1297 if (soc->num_ports > 2)
1298 pads_writel(pcie, soc->pads_refclk_cfg1, PADS_REFCLK_CFG1);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001299}
1300
1301static int tegra_pcie_clocks_get(struct tegra_pcie *pcie)
1302{
1303 struct device *dev = pcie->dev;
1304 const struct tegra_pcie_soc *soc = pcie->soc;
1305
1306 pcie->pex_clk = devm_clk_get(dev, "pex");
1307 if (IS_ERR(pcie->pex_clk))
1308 return PTR_ERR(pcie->pex_clk);
1309
1310 pcie->afi_clk = devm_clk_get(dev, "afi");
1311 if (IS_ERR(pcie->afi_clk))
1312 return PTR_ERR(pcie->afi_clk);
1313
1314 pcie->pll_e = devm_clk_get(dev, "pll_e");
1315 if (IS_ERR(pcie->pll_e))
1316 return PTR_ERR(pcie->pll_e);
1317
1318 if (soc->has_cml_clk) {
1319 pcie->cml_clk = devm_clk_get(dev, "cml");
1320 if (IS_ERR(pcie->cml_clk))
1321 return PTR_ERR(pcie->cml_clk);
1322 }
1323
1324 return 0;
1325}
1326
1327static int tegra_pcie_resets_get(struct tegra_pcie *pcie)
1328{
1329 struct device *dev = pcie->dev;
1330
1331 pcie->pex_rst = devm_reset_control_get_exclusive(dev, "pex");
1332 if (IS_ERR(pcie->pex_rst))
1333 return PTR_ERR(pcie->pex_rst);
1334
1335 pcie->afi_rst = devm_reset_control_get_exclusive(dev, "afi");
1336 if (IS_ERR(pcie->afi_rst))
1337 return PTR_ERR(pcie->afi_rst);
1338
1339 pcie->pcie_xrst = devm_reset_control_get_exclusive(dev, "pcie_x");
1340 if (IS_ERR(pcie->pcie_xrst))
1341 return PTR_ERR(pcie->pcie_xrst);
1342
1343 return 0;
1344}
1345
1346static int tegra_pcie_phys_get_legacy(struct tegra_pcie *pcie)
1347{
1348 struct device *dev = pcie->dev;
1349 int err;
1350
1351 pcie->phy = devm_phy_optional_get(dev, "pcie");
1352 if (IS_ERR(pcie->phy)) {
1353 err = PTR_ERR(pcie->phy);
1354 dev_err(dev, "failed to get PHY: %d\n", err);
1355 return err;
1356 }
1357
1358 err = phy_init(pcie->phy);
1359 if (err < 0) {
1360 dev_err(dev, "failed to initialize PHY: %d\n", err);
1361 return err;
1362 }
1363
1364 pcie->legacy_phy = true;
1365
1366 return 0;
1367}
1368
1369static struct phy *devm_of_phy_optional_get_index(struct device *dev,
1370 struct device_node *np,
1371 const char *consumer,
1372 unsigned int index)
1373{
1374 struct phy *phy;
1375 char *name;
1376
1377 name = kasprintf(GFP_KERNEL, "%s-%u", consumer, index);
1378 if (!name)
1379 return ERR_PTR(-ENOMEM);
1380
1381 phy = devm_of_phy_get(dev, np, name);
1382 kfree(name);
1383
1384 if (IS_ERR(phy) && PTR_ERR(phy) == -ENODEV)
1385 phy = NULL;
1386
1387 return phy;
1388}
1389
1390static int tegra_pcie_port_get_phys(struct tegra_pcie_port *port)
1391{
1392 struct device *dev = port->pcie->dev;
1393 struct phy *phy;
1394 unsigned int i;
1395 int err;
1396
1397 port->phys = devm_kcalloc(dev, sizeof(phy), port->lanes, GFP_KERNEL);
1398 if (!port->phys)
1399 return -ENOMEM;
1400
1401 for (i = 0; i < port->lanes; i++) {
1402 phy = devm_of_phy_optional_get_index(dev, port->np, "pcie", i);
1403 if (IS_ERR(phy)) {
1404 dev_err(dev, "failed to get PHY#%u: %ld\n", i,
1405 PTR_ERR(phy));
1406 return PTR_ERR(phy);
1407 }
1408
1409 err = phy_init(phy);
1410 if (err < 0) {
1411 dev_err(dev, "failed to initialize PHY#%u: %d\n", i,
1412 err);
1413 return err;
1414 }
1415
1416 port->phys[i] = phy;
1417 }
1418
1419 return 0;
1420}
1421
1422static int tegra_pcie_phys_get(struct tegra_pcie *pcie)
1423{
1424 const struct tegra_pcie_soc *soc = pcie->soc;
1425 struct device_node *np = pcie->dev->of_node;
1426 struct tegra_pcie_port *port;
1427 int err;
1428
1429 if (!soc->has_gen2 || of_find_property(np, "phys", NULL) != NULL)
1430 return tegra_pcie_phys_get_legacy(pcie);
1431
1432 list_for_each_entry(port, &pcie->ports, list) {
1433 err = tegra_pcie_port_get_phys(port);
1434 if (err < 0)
1435 return err;
1436 }
1437
1438 return 0;
1439}
1440
1441static void tegra_pcie_phys_put(struct tegra_pcie *pcie)
1442{
1443 struct tegra_pcie_port *port;
1444 struct device *dev = pcie->dev;
1445 int err, i;
1446
1447 if (pcie->legacy_phy) {
1448 err = phy_exit(pcie->phy);
1449 if (err < 0)
1450 dev_err(dev, "failed to teardown PHY: %d\n", err);
1451 return;
1452 }
1453
1454 list_for_each_entry(port, &pcie->ports, list) {
1455 for (i = 0; i < port->lanes; i++) {
1456 err = phy_exit(port->phys[i]);
1457 if (err < 0)
1458 dev_err(dev, "failed to teardown PHY#%u: %d\n",
1459 i, err);
1460 }
1461 }
1462}
1463
1464
1465static int tegra_pcie_get_resources(struct tegra_pcie *pcie)
1466{
1467 struct device *dev = pcie->dev;
1468 struct platform_device *pdev = to_platform_device(dev);
1469 struct resource *pads, *afi, *res;
1470 const struct tegra_pcie_soc *soc = pcie->soc;
1471 int err;
1472
1473 err = tegra_pcie_clocks_get(pcie);
1474 if (err) {
1475 dev_err(dev, "failed to get clocks: %d\n", err);
1476 return err;
1477 }
1478
1479 err = tegra_pcie_resets_get(pcie);
1480 if (err) {
1481 dev_err(dev, "failed to get resets: %d\n", err);
1482 return err;
1483 }
1484
1485 if (soc->program_uphy) {
1486 err = tegra_pcie_phys_get(pcie);
1487 if (err < 0) {
1488 dev_err(dev, "failed to get PHYs: %d\n", err);
1489 return err;
1490 }
1491 }
1492
1493 pads = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pads");
1494 pcie->pads = devm_ioremap_resource(dev, pads);
1495 if (IS_ERR(pcie->pads)) {
1496 err = PTR_ERR(pcie->pads);
1497 goto phys_put;
1498 }
1499
1500 afi = platform_get_resource_byname(pdev, IORESOURCE_MEM, "afi");
1501 pcie->afi = devm_ioremap_resource(dev, afi);
1502 if (IS_ERR(pcie->afi)) {
1503 err = PTR_ERR(pcie->afi);
1504 goto phys_put;
1505 }
1506
1507 /* request configuration space, but remap later, on demand */
1508 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cs");
1509 if (!res) {
1510 err = -EADDRNOTAVAIL;
1511 goto phys_put;
1512 }
1513
1514 pcie->cs = *res;
1515
1516 /* constrain configuration space to 4 KiB */
1517 pcie->cs.end = pcie->cs.start + SZ_4K - 1;
1518
1519 pcie->cfg = devm_ioremap_resource(dev, &pcie->cs);
1520 if (IS_ERR(pcie->cfg)) {
1521 err = PTR_ERR(pcie->cfg);
1522 goto phys_put;
1523 }
1524
1525 /* request interrupt */
1526 err = platform_get_irq_byname(pdev, "intr");
1527 if (err < 0) {
1528 dev_err(dev, "failed to get IRQ: %d\n", err);
1529 goto phys_put;
1530 }
1531
1532 pcie->irq = err;
1533
1534 err = request_irq(pcie->irq, tegra_pcie_isr, IRQF_SHARED, "PCIE", pcie);
1535 if (err) {
1536 dev_err(dev, "failed to register IRQ: %d\n", err);
1537 goto phys_put;
1538 }
1539
1540 return 0;
1541
1542phys_put:
1543 if (soc->program_uphy)
1544 tegra_pcie_phys_put(pcie);
1545 return err;
1546}
1547
1548static int tegra_pcie_put_resources(struct tegra_pcie *pcie)
1549{
1550 const struct tegra_pcie_soc *soc = pcie->soc;
1551
1552 if (pcie->irq > 0)
1553 free_irq(pcie->irq, pcie);
1554
1555 if (soc->program_uphy)
1556 tegra_pcie_phys_put(pcie);
1557
1558 return 0;
1559}
1560
1561static void tegra_pcie_pme_turnoff(struct tegra_pcie_port *port)
1562{
1563 struct tegra_pcie *pcie = port->pcie;
1564 const struct tegra_pcie_soc *soc = pcie->soc;
1565 int err;
1566 u32 val;
1567 u8 ack_bit;
1568
1569 val = afi_readl(pcie, AFI_PCIE_PME);
1570 val |= (0x1 << soc->ports[port->index].pme.turnoff_bit);
1571 afi_writel(pcie, val, AFI_PCIE_PME);
1572
1573 ack_bit = soc->ports[port->index].pme.ack_bit;
1574 err = readl_poll_timeout(pcie->afi + AFI_PCIE_PME, val,
1575 val & (0x1 << ack_bit), 1, PME_ACK_TIMEOUT);
1576 if (err)
1577 dev_err(pcie->dev, "PME Ack is not received on port: %d\n",
1578 port->index);
1579
1580 usleep_range(10000, 11000);
1581
1582 val = afi_readl(pcie, AFI_PCIE_PME);
1583 val &= ~(0x1 << soc->ports[port->index].pme.turnoff_bit);
1584 afi_writel(pcie, val, AFI_PCIE_PME);
1585}
1586
1587static int tegra_msi_alloc(struct tegra_msi *chip)
1588{
1589 int msi;
1590
1591 mutex_lock(&chip->lock);
1592
1593 msi = find_first_zero_bit(chip->used, INT_PCI_MSI_NR);
1594 if (msi < INT_PCI_MSI_NR)
1595 set_bit(msi, chip->used);
1596 else
1597 msi = -ENOSPC;
1598
1599 mutex_unlock(&chip->lock);
1600
1601 return msi;
1602}
1603
1604static void tegra_msi_free(struct tegra_msi *chip, unsigned long irq)
1605{
1606 struct device *dev = chip->chip.dev;
1607
1608 mutex_lock(&chip->lock);
1609
1610 if (!test_bit(irq, chip->used))
1611 dev_err(dev, "trying to free unused MSI#%lu\n", irq);
1612 else
1613 clear_bit(irq, chip->used);
1614
1615 mutex_unlock(&chip->lock);
1616}
1617
1618static irqreturn_t tegra_pcie_msi_irq(int irq, void *data)
1619{
1620 struct tegra_pcie *pcie = data;
1621 struct device *dev = pcie->dev;
1622 struct tegra_msi *msi = &pcie->msi;
1623 unsigned int i, processed = 0;
1624
1625 for (i = 0; i < 8; i++) {
1626 unsigned long reg = afi_readl(pcie, AFI_MSI_VEC0 + i * 4);
1627
1628 while (reg) {
1629 unsigned int offset = find_first_bit(&reg, 32);
1630 unsigned int index = i * 32 + offset;
1631 unsigned int irq;
1632
1633 /* clear the interrupt */
1634 afi_writel(pcie, 1 << offset, AFI_MSI_VEC0 + i * 4);
1635
1636 irq = irq_find_mapping(msi->domain, index);
1637 if (irq) {
1638 if (test_bit(index, msi->used))
1639 generic_handle_irq(irq);
1640 else
1641 dev_info(dev, "unhandled MSI\n");
1642 } else {
1643 /*
1644 * that's weird who triggered this?
1645 * just clear it
1646 */
1647 dev_info(dev, "unexpected MSI\n");
1648 }
1649
1650 /* see if there's any more pending in this vector */
1651 reg = afi_readl(pcie, AFI_MSI_VEC0 + i * 4);
1652
1653 processed++;
1654 }
1655 }
1656
1657 return processed > 0 ? IRQ_HANDLED : IRQ_NONE;
1658}
1659
1660static int tegra_msi_setup_irq(struct msi_controller *chip,
1661 struct pci_dev *pdev, struct msi_desc *desc)
1662{
1663 struct tegra_msi *msi = to_tegra_msi(chip);
1664 struct msi_msg msg;
1665 unsigned int irq;
1666 int hwirq;
1667
1668 hwirq = tegra_msi_alloc(msi);
1669 if (hwirq < 0)
1670 return hwirq;
1671
1672 irq = irq_create_mapping(msi->domain, hwirq);
1673 if (!irq) {
1674 tegra_msi_free(msi, hwirq);
1675 return -EINVAL;
1676 }
1677
1678 irq_set_msi_desc(irq, desc);
1679
1680 msg.address_lo = lower_32_bits(msi->phys);
1681 msg.address_hi = upper_32_bits(msi->phys);
1682 msg.data = hwirq;
1683
1684 pci_write_msi_msg(irq, &msg);
1685
1686 return 0;
1687}
1688
1689static void tegra_msi_teardown_irq(struct msi_controller *chip,
1690 unsigned int irq)
1691{
1692 struct tegra_msi *msi = to_tegra_msi(chip);
1693 struct irq_data *d = irq_get_irq_data(irq);
1694 irq_hw_number_t hwirq = irqd_to_hwirq(d);
1695
1696 irq_dispose_mapping(irq);
1697 tegra_msi_free(msi, hwirq);
1698}
1699
1700static struct irq_chip tegra_msi_irq_chip = {
1701 .name = "Tegra PCIe MSI",
1702 .irq_enable = pci_msi_unmask_irq,
1703 .irq_disable = pci_msi_mask_irq,
1704 .irq_mask = pci_msi_mask_irq,
1705 .irq_unmask = pci_msi_unmask_irq,
1706};
1707
1708static int tegra_msi_map(struct irq_domain *domain, unsigned int irq,
1709 irq_hw_number_t hwirq)
1710{
1711 irq_set_chip_and_handler(irq, &tegra_msi_irq_chip, handle_simple_irq);
1712 irq_set_chip_data(irq, domain->host_data);
1713
1714 tegra_cpuidle_pcie_irqs_in_use();
1715
1716 return 0;
1717}
1718
1719static const struct irq_domain_ops msi_domain_ops = {
1720 .map = tegra_msi_map,
1721};
1722
1723static int tegra_pcie_msi_setup(struct tegra_pcie *pcie)
1724{
1725 struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie);
1726 struct platform_device *pdev = to_platform_device(pcie->dev);
1727 struct tegra_msi *msi = &pcie->msi;
1728 struct device *dev = pcie->dev;
1729 int err;
1730
1731 mutex_init(&msi->lock);
1732
1733 msi->chip.dev = dev;
1734 msi->chip.setup_irq = tegra_msi_setup_irq;
1735 msi->chip.teardown_irq = tegra_msi_teardown_irq;
1736
1737 msi->domain = irq_domain_add_linear(dev->of_node, INT_PCI_MSI_NR,
1738 &msi_domain_ops, &msi->chip);
1739 if (!msi->domain) {
1740 dev_err(dev, "failed to create IRQ domain\n");
1741 return -ENOMEM;
1742 }
1743
1744 err = platform_get_irq_byname(pdev, "msi");
1745 if (err < 0) {
1746 dev_err(dev, "failed to get IRQ: %d\n", err);
David Brazdil0f672f62019-12-10 10:32:29 +00001747 goto free_irq_domain;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001748 }
1749
1750 msi->irq = err;
1751
1752 err = request_irq(msi->irq, tegra_pcie_msi_irq, IRQF_NO_THREAD,
1753 tegra_msi_irq_chip.name, pcie);
1754 if (err < 0) {
1755 dev_err(dev, "failed to request IRQ: %d\n", err);
David Brazdil0f672f62019-12-10 10:32:29 +00001756 goto free_irq_domain;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001757 }
1758
David Brazdil0f672f62019-12-10 10:32:29 +00001759 /* Though the PCIe controller can address >32-bit address space, to
1760 * facilitate endpoints that support only 32-bit MSI target address,
1761 * the mask is set to 32-bit to make sure that MSI target address is
1762 * always a 32-bit address
1763 */
1764 err = dma_set_coherent_mask(dev, DMA_BIT_MASK(32));
1765 if (err < 0) {
1766 dev_err(dev, "failed to set DMA coherent mask: %d\n", err);
1767 goto free_irq;
1768 }
1769
1770 msi->virt = dma_alloc_attrs(dev, PAGE_SIZE, &msi->phys, GFP_KERNEL,
1771 DMA_ATTR_NO_KERNEL_MAPPING);
1772 if (!msi->virt) {
1773 dev_err(dev, "failed to allocate DMA memory for MSI\n");
1774 err = -ENOMEM;
1775 goto free_irq;
1776 }
1777
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001778 host->msi = &msi->chip;
1779
1780 return 0;
1781
David Brazdil0f672f62019-12-10 10:32:29 +00001782free_irq:
1783 free_irq(msi->irq, pcie);
1784free_irq_domain:
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001785 irq_domain_remove(msi->domain);
1786 return err;
1787}
1788
1789static void tegra_pcie_enable_msi(struct tegra_pcie *pcie)
1790{
1791 const struct tegra_pcie_soc *soc = pcie->soc;
1792 struct tegra_msi *msi = &pcie->msi;
1793 u32 reg;
1794
1795 afi_writel(pcie, msi->phys >> soc->msi_base_shift, AFI_MSI_FPCI_BAR_ST);
1796 afi_writel(pcie, msi->phys, AFI_MSI_AXI_BAR_ST);
1797 /* this register is in 4K increments */
1798 afi_writel(pcie, 1, AFI_MSI_BAR_SZ);
1799
1800 /* enable all MSI vectors */
1801 afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC0);
1802 afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC1);
1803 afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC2);
1804 afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC3);
1805 afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC4);
1806 afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC5);
1807 afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC6);
1808 afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC7);
1809
1810 /* and unmask the MSI interrupt */
1811 reg = afi_readl(pcie, AFI_INTR_MASK);
1812 reg |= AFI_INTR_MASK_MSI_MASK;
1813 afi_writel(pcie, reg, AFI_INTR_MASK);
1814}
1815
1816static void tegra_pcie_msi_teardown(struct tegra_pcie *pcie)
1817{
1818 struct tegra_msi *msi = &pcie->msi;
1819 unsigned int i, irq;
1820
David Brazdil0f672f62019-12-10 10:32:29 +00001821 dma_free_attrs(pcie->dev, PAGE_SIZE, msi->virt, msi->phys,
1822 DMA_ATTR_NO_KERNEL_MAPPING);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001823
1824 if (msi->irq > 0)
1825 free_irq(msi->irq, pcie);
1826
1827 for (i = 0; i < INT_PCI_MSI_NR; i++) {
1828 irq = irq_find_mapping(msi->domain, i);
1829 if (irq > 0)
1830 irq_dispose_mapping(irq);
1831 }
1832
1833 irq_domain_remove(msi->domain);
1834}
1835
1836static int tegra_pcie_disable_msi(struct tegra_pcie *pcie)
1837{
1838 u32 value;
1839
1840 /* mask the MSI interrupt */
1841 value = afi_readl(pcie, AFI_INTR_MASK);
1842 value &= ~AFI_INTR_MASK_MSI_MASK;
1843 afi_writel(pcie, value, AFI_INTR_MASK);
1844
1845 /* disable all MSI vectors */
1846 afi_writel(pcie, 0, AFI_MSI_EN_VEC0);
1847 afi_writel(pcie, 0, AFI_MSI_EN_VEC1);
1848 afi_writel(pcie, 0, AFI_MSI_EN_VEC2);
1849 afi_writel(pcie, 0, AFI_MSI_EN_VEC3);
1850 afi_writel(pcie, 0, AFI_MSI_EN_VEC4);
1851 afi_writel(pcie, 0, AFI_MSI_EN_VEC5);
1852 afi_writel(pcie, 0, AFI_MSI_EN_VEC6);
1853 afi_writel(pcie, 0, AFI_MSI_EN_VEC7);
1854
1855 return 0;
1856}
1857
David Brazdil0f672f62019-12-10 10:32:29 +00001858static void tegra_pcie_disable_interrupts(struct tegra_pcie *pcie)
1859{
1860 u32 value;
1861
1862 value = afi_readl(pcie, AFI_INTR_MASK);
1863 value &= ~AFI_INTR_MASK_INT_MASK;
1864 afi_writel(pcie, value, AFI_INTR_MASK);
1865}
1866
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001867static int tegra_pcie_get_xbar_config(struct tegra_pcie *pcie, u32 lanes,
1868 u32 *xbar)
1869{
1870 struct device *dev = pcie->dev;
1871 struct device_node *np = dev->of_node;
1872
1873 if (of_device_is_compatible(np, "nvidia,tegra186-pcie")) {
1874 switch (lanes) {
1875 case 0x010004:
1876 dev_info(dev, "4x1, 1x1 configuration\n");
1877 *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_401;
1878 return 0;
1879
1880 case 0x010102:
1881 dev_info(dev, "2x1, 1X1, 1x1 configuration\n");
1882 *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_211;
1883 return 0;
1884
1885 case 0x010101:
1886 dev_info(dev, "1x1, 1x1, 1x1 configuration\n");
1887 *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_111;
1888 return 0;
1889
1890 default:
1891 dev_info(dev, "wrong configuration updated in DT, "
1892 "switching to default 2x1, 1x1, 1x1 "
1893 "configuration\n");
1894 *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_211;
1895 return 0;
1896 }
1897 } else if (of_device_is_compatible(np, "nvidia,tegra124-pcie") ||
1898 of_device_is_compatible(np, "nvidia,tegra210-pcie")) {
1899 switch (lanes) {
1900 case 0x0000104:
1901 dev_info(dev, "4x1, 1x1 configuration\n");
1902 *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_X4_X1;
1903 return 0;
1904
1905 case 0x0000102:
1906 dev_info(dev, "2x1, 1x1 configuration\n");
1907 *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_X2_X1;
1908 return 0;
1909 }
1910 } else if (of_device_is_compatible(np, "nvidia,tegra30-pcie")) {
1911 switch (lanes) {
1912 case 0x00000204:
1913 dev_info(dev, "4x1, 2x1 configuration\n");
1914 *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_420;
1915 return 0;
1916
1917 case 0x00020202:
1918 dev_info(dev, "2x3 configuration\n");
1919 *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_222;
1920 return 0;
1921
1922 case 0x00010104:
1923 dev_info(dev, "4x1, 1x2 configuration\n");
1924 *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_411;
1925 return 0;
1926 }
1927 } else if (of_device_is_compatible(np, "nvidia,tegra20-pcie")) {
1928 switch (lanes) {
1929 case 0x00000004:
1930 dev_info(dev, "single-mode configuration\n");
1931 *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_SINGLE;
1932 return 0;
1933
1934 case 0x00000202:
1935 dev_info(dev, "dual-mode configuration\n");
1936 *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_DUAL;
1937 return 0;
1938 }
1939 }
1940
1941 return -EINVAL;
1942}
1943
1944/*
1945 * Check whether a given set of supplies is available in a device tree node.
1946 * This is used to check whether the new or the legacy device tree bindings
1947 * should be used.
1948 */
1949static bool of_regulator_bulk_available(struct device_node *np,
1950 struct regulator_bulk_data *supplies,
1951 unsigned int num_supplies)
1952{
1953 char property[32];
1954 unsigned int i;
1955
1956 for (i = 0; i < num_supplies; i++) {
1957 snprintf(property, 32, "%s-supply", supplies[i].supply);
1958
1959 if (of_find_property(np, property, NULL) == NULL)
1960 return false;
1961 }
1962
1963 return true;
1964}
1965
1966/*
1967 * Old versions of the device tree binding for this device used a set of power
1968 * supplies that didn't match the hardware inputs. This happened to work for a
1969 * number of cases but is not future proof. However to preserve backwards-
1970 * compatibility with old device trees, this function will try to use the old
1971 * set of supplies.
1972 */
1973static int tegra_pcie_get_legacy_regulators(struct tegra_pcie *pcie)
1974{
1975 struct device *dev = pcie->dev;
1976 struct device_node *np = dev->of_node;
1977
1978 if (of_device_is_compatible(np, "nvidia,tegra30-pcie"))
1979 pcie->num_supplies = 3;
1980 else if (of_device_is_compatible(np, "nvidia,tegra20-pcie"))
1981 pcie->num_supplies = 2;
1982
1983 if (pcie->num_supplies == 0) {
1984 dev_err(dev, "device %pOF not supported in legacy mode\n", np);
1985 return -ENODEV;
1986 }
1987
1988 pcie->supplies = devm_kcalloc(dev, pcie->num_supplies,
1989 sizeof(*pcie->supplies),
1990 GFP_KERNEL);
1991 if (!pcie->supplies)
1992 return -ENOMEM;
1993
1994 pcie->supplies[0].supply = "pex-clk";
1995 pcie->supplies[1].supply = "vdd";
1996
1997 if (pcie->num_supplies > 2)
1998 pcie->supplies[2].supply = "avdd";
1999
2000 return devm_regulator_bulk_get(dev, pcie->num_supplies, pcie->supplies);
2001}
2002
2003/*
2004 * Obtains the list of regulators required for a particular generation of the
2005 * IP block.
2006 *
2007 * This would've been nice to do simply by providing static tables for use
2008 * with the regulator_bulk_*() API, but unfortunately Tegra30 is a bit quirky
2009 * in that it has two pairs or AVDD_PEX and VDD_PEX supplies (PEXA and PEXB)
2010 * and either seems to be optional depending on which ports are being used.
2011 */
2012static int tegra_pcie_get_regulators(struct tegra_pcie *pcie, u32 lane_mask)
2013{
2014 struct device *dev = pcie->dev;
2015 struct device_node *np = dev->of_node;
2016 unsigned int i = 0;
2017
2018 if (of_device_is_compatible(np, "nvidia,tegra186-pcie")) {
2019 pcie->num_supplies = 4;
2020
2021 pcie->supplies = devm_kcalloc(pcie->dev, pcie->num_supplies,
2022 sizeof(*pcie->supplies),
2023 GFP_KERNEL);
2024 if (!pcie->supplies)
2025 return -ENOMEM;
2026
2027 pcie->supplies[i++].supply = "dvdd-pex";
2028 pcie->supplies[i++].supply = "hvdd-pex-pll";
2029 pcie->supplies[i++].supply = "hvdd-pex";
2030 pcie->supplies[i++].supply = "vddio-pexctl-aud";
2031 } else if (of_device_is_compatible(np, "nvidia,tegra210-pcie")) {
2032 pcie->num_supplies = 6;
2033
2034 pcie->supplies = devm_kcalloc(pcie->dev, pcie->num_supplies,
2035 sizeof(*pcie->supplies),
2036 GFP_KERNEL);
2037 if (!pcie->supplies)
2038 return -ENOMEM;
2039
2040 pcie->supplies[i++].supply = "avdd-pll-uerefe";
2041 pcie->supplies[i++].supply = "hvddio-pex";
2042 pcie->supplies[i++].supply = "dvddio-pex";
2043 pcie->supplies[i++].supply = "dvdd-pex-pll";
2044 pcie->supplies[i++].supply = "hvdd-pex-pll-e";
2045 pcie->supplies[i++].supply = "vddio-pex-ctl";
2046 } else if (of_device_is_compatible(np, "nvidia,tegra124-pcie")) {
2047 pcie->num_supplies = 7;
2048
2049 pcie->supplies = devm_kcalloc(dev, pcie->num_supplies,
2050 sizeof(*pcie->supplies),
2051 GFP_KERNEL);
2052 if (!pcie->supplies)
2053 return -ENOMEM;
2054
2055 pcie->supplies[i++].supply = "avddio-pex";
2056 pcie->supplies[i++].supply = "dvddio-pex";
2057 pcie->supplies[i++].supply = "avdd-pex-pll";
2058 pcie->supplies[i++].supply = "hvdd-pex";
2059 pcie->supplies[i++].supply = "hvdd-pex-pll-e";
2060 pcie->supplies[i++].supply = "vddio-pex-ctl";
2061 pcie->supplies[i++].supply = "avdd-pll-erefe";
2062 } else if (of_device_is_compatible(np, "nvidia,tegra30-pcie")) {
2063 bool need_pexa = false, need_pexb = false;
2064
2065 /* VDD_PEXA and AVDD_PEXA supply lanes 0 to 3 */
2066 if (lane_mask & 0x0f)
2067 need_pexa = true;
2068
2069 /* VDD_PEXB and AVDD_PEXB supply lanes 4 to 5 */
2070 if (lane_mask & 0x30)
2071 need_pexb = true;
2072
2073 pcie->num_supplies = 4 + (need_pexa ? 2 : 0) +
2074 (need_pexb ? 2 : 0);
2075
2076 pcie->supplies = devm_kcalloc(dev, pcie->num_supplies,
2077 sizeof(*pcie->supplies),
2078 GFP_KERNEL);
2079 if (!pcie->supplies)
2080 return -ENOMEM;
2081
2082 pcie->supplies[i++].supply = "avdd-pex-pll";
2083 pcie->supplies[i++].supply = "hvdd-pex";
2084 pcie->supplies[i++].supply = "vddio-pex-ctl";
2085 pcie->supplies[i++].supply = "avdd-plle";
2086
2087 if (need_pexa) {
2088 pcie->supplies[i++].supply = "avdd-pexa";
2089 pcie->supplies[i++].supply = "vdd-pexa";
2090 }
2091
2092 if (need_pexb) {
2093 pcie->supplies[i++].supply = "avdd-pexb";
2094 pcie->supplies[i++].supply = "vdd-pexb";
2095 }
2096 } else if (of_device_is_compatible(np, "nvidia,tegra20-pcie")) {
2097 pcie->num_supplies = 5;
2098
2099 pcie->supplies = devm_kcalloc(dev, pcie->num_supplies,
2100 sizeof(*pcie->supplies),
2101 GFP_KERNEL);
2102 if (!pcie->supplies)
2103 return -ENOMEM;
2104
2105 pcie->supplies[0].supply = "avdd-pex";
2106 pcie->supplies[1].supply = "vdd-pex";
2107 pcie->supplies[2].supply = "avdd-pex-pll";
2108 pcie->supplies[3].supply = "avdd-plle";
2109 pcie->supplies[4].supply = "vddio-pex-clk";
2110 }
2111
2112 if (of_regulator_bulk_available(dev->of_node, pcie->supplies,
2113 pcie->num_supplies))
2114 return devm_regulator_bulk_get(dev, pcie->num_supplies,
2115 pcie->supplies);
2116
2117 /*
2118 * If not all regulators are available for this new scheme, assume
2119 * that the device tree complies with an older version of the device
2120 * tree binding.
2121 */
2122 dev_info(dev, "using legacy DT binding for power supplies\n");
2123
2124 devm_kfree(dev, pcie->supplies);
2125 pcie->num_supplies = 0;
2126
2127 return tegra_pcie_get_legacy_regulators(pcie);
2128}
2129
2130static int tegra_pcie_parse_dt(struct tegra_pcie *pcie)
2131{
2132 struct device *dev = pcie->dev;
2133 struct device_node *np = dev->of_node, *port;
2134 const struct tegra_pcie_soc *soc = pcie->soc;
2135 struct of_pci_range_parser parser;
2136 struct of_pci_range range;
2137 u32 lanes = 0, mask = 0;
2138 unsigned int lane = 0;
2139 struct resource res;
2140 int err;
2141
2142 if (of_pci_range_parser_init(&parser, np)) {
2143 dev_err(dev, "missing \"ranges\" property\n");
2144 return -EINVAL;
2145 }
2146
2147 for_each_of_pci_range(&parser, &range) {
2148 err = of_pci_range_to_resource(&range, np, &res);
2149 if (err < 0)
2150 return err;
2151
2152 switch (res.flags & IORESOURCE_TYPE_BITS) {
2153 case IORESOURCE_IO:
2154 /* Track the bus -> CPU I/O mapping offset. */
2155 pcie->offset.io = res.start - range.pci_addr;
2156
2157 memcpy(&pcie->pio, &res, sizeof(res));
2158 pcie->pio.name = np->full_name;
2159
2160 /*
2161 * The Tegra PCIe host bridge uses this to program the
2162 * mapping of the I/O space to the physical address,
2163 * so we override the .start and .end fields here that
2164 * of_pci_range_to_resource() converted to I/O space.
2165 * We also set the IORESOURCE_MEM type to clarify that
2166 * the resource is in the physical memory space.
2167 */
2168 pcie->io.start = range.cpu_addr;
2169 pcie->io.end = range.cpu_addr + range.size - 1;
2170 pcie->io.flags = IORESOURCE_MEM;
2171 pcie->io.name = "I/O";
2172
2173 memcpy(&res, &pcie->io, sizeof(res));
2174 break;
2175
2176 case IORESOURCE_MEM:
2177 /*
2178 * Track the bus -> CPU memory mapping offset. This
2179 * assumes that the prefetchable and non-prefetchable
2180 * regions will be the last of type IORESOURCE_MEM in
2181 * the ranges property.
2182 * */
2183 pcie->offset.mem = res.start - range.pci_addr;
2184
2185 if (res.flags & IORESOURCE_PREFETCH) {
2186 memcpy(&pcie->prefetch, &res, sizeof(res));
2187 pcie->prefetch.name = "prefetchable";
2188 } else {
2189 memcpy(&pcie->mem, &res, sizeof(res));
2190 pcie->mem.name = "non-prefetchable";
2191 }
2192 break;
2193 }
2194 }
2195
2196 err = of_pci_parse_bus_range(np, &pcie->busn);
2197 if (err < 0) {
2198 dev_err(dev, "failed to parse ranges property: %d\n", err);
2199 pcie->busn.name = np->name;
2200 pcie->busn.start = 0;
2201 pcie->busn.end = 0xff;
2202 pcie->busn.flags = IORESOURCE_BUS;
2203 }
2204
2205 /* parse root ports */
2206 for_each_child_of_node(np, port) {
2207 struct tegra_pcie_port *rp;
2208 unsigned int index;
2209 u32 value;
David Brazdil0f672f62019-12-10 10:32:29 +00002210 char *label;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002211
2212 err = of_pci_get_devfn(port);
2213 if (err < 0) {
2214 dev_err(dev, "failed to parse address: %d\n", err);
David Brazdil0f672f62019-12-10 10:32:29 +00002215 goto err_node_put;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002216 }
2217
2218 index = PCI_SLOT(err);
2219
2220 if (index < 1 || index > soc->num_ports) {
2221 dev_err(dev, "invalid port number: %d\n", index);
David Brazdil0f672f62019-12-10 10:32:29 +00002222 err = -EINVAL;
2223 goto err_node_put;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002224 }
2225
2226 index--;
2227
2228 err = of_property_read_u32(port, "nvidia,num-lanes", &value);
2229 if (err < 0) {
2230 dev_err(dev, "failed to parse # of lanes: %d\n",
2231 err);
David Brazdil0f672f62019-12-10 10:32:29 +00002232 goto err_node_put;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002233 }
2234
2235 if (value > 16) {
2236 dev_err(dev, "invalid # of lanes: %u\n", value);
David Brazdil0f672f62019-12-10 10:32:29 +00002237 err = -EINVAL;
2238 goto err_node_put;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002239 }
2240
2241 lanes |= value << (index << 3);
2242
2243 if (!of_device_is_available(port)) {
2244 lane += value;
2245 continue;
2246 }
2247
2248 mask |= ((1 << value) - 1) << lane;
2249 lane += value;
2250
2251 rp = devm_kzalloc(dev, sizeof(*rp), GFP_KERNEL);
David Brazdil0f672f62019-12-10 10:32:29 +00002252 if (!rp) {
2253 err = -ENOMEM;
2254 goto err_node_put;
2255 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002256
2257 err = of_address_to_resource(port, 0, &rp->regs);
2258 if (err < 0) {
2259 dev_err(dev, "failed to parse address: %d\n", err);
David Brazdil0f672f62019-12-10 10:32:29 +00002260 goto err_node_put;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002261 }
2262
2263 INIT_LIST_HEAD(&rp->list);
2264 rp->index = index;
2265 rp->lanes = value;
2266 rp->pcie = pcie;
2267 rp->np = port;
2268
2269 rp->base = devm_pci_remap_cfg_resource(dev, &rp->regs);
2270 if (IS_ERR(rp->base))
2271 return PTR_ERR(rp->base);
2272
David Brazdil0f672f62019-12-10 10:32:29 +00002273 label = devm_kasprintf(dev, GFP_KERNEL, "pex-reset-%u", index);
2274 if (!label) {
2275 dev_err(dev, "failed to create reset GPIO label\n");
2276 return -ENOMEM;
2277 }
2278
2279 /*
2280 * Returns -ENOENT if reset-gpios property is not populated
2281 * and in this case fall back to using AFI per port register
2282 * to toggle PERST# SFIO line.
2283 */
2284 rp->reset_gpio = devm_gpiod_get_from_of_node(dev, port,
2285 "reset-gpios", 0,
2286 GPIOD_OUT_LOW,
2287 label);
2288 if (IS_ERR(rp->reset_gpio)) {
2289 if (PTR_ERR(rp->reset_gpio) == -ENOENT) {
2290 rp->reset_gpio = NULL;
2291 } else {
2292 dev_err(dev, "failed to get reset GPIO: %d\n",
2293 err);
2294 return PTR_ERR(rp->reset_gpio);
2295 }
2296 }
2297
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002298 list_add_tail(&rp->list, &pcie->ports);
2299 }
2300
2301 err = tegra_pcie_get_xbar_config(pcie, lanes, &pcie->xbar_config);
2302 if (err < 0) {
2303 dev_err(dev, "invalid lane configuration\n");
2304 return err;
2305 }
2306
2307 err = tegra_pcie_get_regulators(pcie, mask);
2308 if (err < 0)
2309 return err;
2310
2311 return 0;
David Brazdil0f672f62019-12-10 10:32:29 +00002312
2313err_node_put:
2314 of_node_put(port);
2315 return err;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002316}
2317
2318/*
2319 * FIXME: If there are no PCIe cards attached, then calling this function
2320 * can result in the increase of the bootup time as there are big timeout
2321 * loops.
2322 */
2323#define TEGRA_PCIE_LINKUP_TIMEOUT 200 /* up to 1.2 seconds */
2324static bool tegra_pcie_port_check_link(struct tegra_pcie_port *port)
2325{
2326 struct device *dev = port->pcie->dev;
2327 unsigned int retries = 3;
2328 unsigned long value;
2329
2330 /* override presence detection */
2331 value = readl(port->base + RP_PRIV_MISC);
2332 value &= ~RP_PRIV_MISC_PRSNT_MAP_EP_ABSNT;
2333 value |= RP_PRIV_MISC_PRSNT_MAP_EP_PRSNT;
2334 writel(value, port->base + RP_PRIV_MISC);
2335
2336 do {
2337 unsigned int timeout = TEGRA_PCIE_LINKUP_TIMEOUT;
2338
2339 do {
2340 value = readl(port->base + RP_VEND_XP);
2341
2342 if (value & RP_VEND_XP_DL_UP)
2343 break;
2344
2345 usleep_range(1000, 2000);
2346 } while (--timeout);
2347
2348 if (!timeout) {
David Brazdil0f672f62019-12-10 10:32:29 +00002349 dev_dbg(dev, "link %u down, retrying\n", port->index);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002350 goto retry;
2351 }
2352
2353 timeout = TEGRA_PCIE_LINKUP_TIMEOUT;
2354
2355 do {
2356 value = readl(port->base + RP_LINK_CONTROL_STATUS);
2357
2358 if (value & RP_LINK_CONTROL_STATUS_DL_LINK_ACTIVE)
2359 return true;
2360
2361 usleep_range(1000, 2000);
2362 } while (--timeout);
2363
2364retry:
2365 tegra_pcie_port_reset(port);
2366 } while (--retries);
2367
2368 return false;
2369}
2370
David Brazdil0f672f62019-12-10 10:32:29 +00002371static void tegra_pcie_change_link_speed(struct tegra_pcie *pcie)
2372{
2373 struct device *dev = pcie->dev;
2374 struct tegra_pcie_port *port;
2375 ktime_t deadline;
2376 u32 value;
2377
2378 list_for_each_entry(port, &pcie->ports, list) {
2379 /*
2380 * "Supported Link Speeds Vector" in "Link Capabilities 2"
2381 * is not supported by Tegra. tegra_pcie_change_link_speed()
2382 * is called only for Tegra chips which support Gen2.
2383 * So there no harm if supported link speed is not verified.
2384 */
2385 value = readl(port->base + RP_LINK_CONTROL_STATUS_2);
2386 value &= ~PCI_EXP_LNKSTA_CLS;
2387 value |= PCI_EXP_LNKSTA_CLS_5_0GB;
2388 writel(value, port->base + RP_LINK_CONTROL_STATUS_2);
2389
2390 /*
2391 * Poll until link comes back from recovery to avoid race
2392 * condition.
2393 */
2394 deadline = ktime_add_us(ktime_get(), LINK_RETRAIN_TIMEOUT);
2395
2396 while (ktime_before(ktime_get(), deadline)) {
2397 value = readl(port->base + RP_LINK_CONTROL_STATUS);
2398 if ((value & PCI_EXP_LNKSTA_LT) == 0)
2399 break;
2400
2401 usleep_range(2000, 3000);
2402 }
2403
2404 if (value & PCI_EXP_LNKSTA_LT)
2405 dev_warn(dev, "PCIe port %u link is in recovery\n",
2406 port->index);
2407
2408 /* Retrain the link */
2409 value = readl(port->base + RP_LINK_CONTROL_STATUS);
2410 value |= PCI_EXP_LNKCTL_RL;
2411 writel(value, port->base + RP_LINK_CONTROL_STATUS);
2412
2413 deadline = ktime_add_us(ktime_get(), LINK_RETRAIN_TIMEOUT);
2414
2415 while (ktime_before(ktime_get(), deadline)) {
2416 value = readl(port->base + RP_LINK_CONTROL_STATUS);
2417 if ((value & PCI_EXP_LNKSTA_LT) == 0)
2418 break;
2419
2420 usleep_range(2000, 3000);
2421 }
2422
2423 if (value & PCI_EXP_LNKSTA_LT)
2424 dev_err(dev, "failed to retrain link of port %u\n",
2425 port->index);
2426 }
2427}
2428
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002429static void tegra_pcie_enable_ports(struct tegra_pcie *pcie)
2430{
2431 struct device *dev = pcie->dev;
2432 struct tegra_pcie_port *port, *tmp;
2433
2434 list_for_each_entry_safe(port, tmp, &pcie->ports, list) {
2435 dev_info(dev, "probing port %u, using %u lanes\n",
2436 port->index, port->lanes);
2437
2438 tegra_pcie_port_enable(port);
David Brazdil0f672f62019-12-10 10:32:29 +00002439 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002440
David Brazdil0f672f62019-12-10 10:32:29 +00002441 /* Start LTSSM from Tegra side */
2442 reset_control_deassert(pcie->pcie_xrst);
2443
2444 list_for_each_entry_safe(port, tmp, &pcie->ports, list) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002445 if (tegra_pcie_port_check_link(port))
2446 continue;
2447
2448 dev_info(dev, "link %u down, ignoring\n", port->index);
2449
2450 tegra_pcie_port_disable(port);
2451 tegra_pcie_port_free(port);
2452 }
David Brazdil0f672f62019-12-10 10:32:29 +00002453
2454 if (pcie->soc->has_gen2)
2455 tegra_pcie_change_link_speed(pcie);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002456}
2457
2458static void tegra_pcie_disable_ports(struct tegra_pcie *pcie)
2459{
2460 struct tegra_pcie_port *port, *tmp;
2461
David Brazdil0f672f62019-12-10 10:32:29 +00002462 reset_control_assert(pcie->pcie_xrst);
2463
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002464 list_for_each_entry_safe(port, tmp, &pcie->ports, list)
2465 tegra_pcie_port_disable(port);
2466}
2467
2468static const struct tegra_pcie_port_soc tegra20_pcie_ports[] = {
2469 { .pme.turnoff_bit = 0, .pme.ack_bit = 5 },
2470 { .pme.turnoff_bit = 8, .pme.ack_bit = 10 },
2471};
2472
2473static const struct tegra_pcie_soc tegra20_pcie = {
2474 .num_ports = 2,
2475 .ports = tegra20_pcie_ports,
2476 .msi_base_shift = 0,
2477 .pads_pll_ctl = PADS_PLL_CTL_TEGRA20,
2478 .tx_ref_sel = PADS_PLL_CTL_TXCLKREF_DIV10,
2479 .pads_refclk_cfg0 = 0xfa5cfa5c,
2480 .has_pex_clkreq_en = false,
2481 .has_pex_bias_ctrl = false,
2482 .has_intr_prsnt_sense = false,
2483 .has_cml_clk = false,
2484 .has_gen2 = false,
2485 .force_pca_enable = false,
2486 .program_uphy = true,
David Brazdil0f672f62019-12-10 10:32:29 +00002487 .update_clamp_threshold = false,
2488 .program_deskew_time = false,
David Brazdil0f672f62019-12-10 10:32:29 +00002489 .update_fc_timer = false,
2490 .has_cache_bars = true,
2491 .ectl.enable = false,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002492};
2493
2494static const struct tegra_pcie_port_soc tegra30_pcie_ports[] = {
2495 { .pme.turnoff_bit = 0, .pme.ack_bit = 5 },
2496 { .pme.turnoff_bit = 8, .pme.ack_bit = 10 },
2497 { .pme.turnoff_bit = 16, .pme.ack_bit = 18 },
2498};
2499
2500static const struct tegra_pcie_soc tegra30_pcie = {
2501 .num_ports = 3,
2502 .ports = tegra30_pcie_ports,
2503 .msi_base_shift = 8,
Olivier Deprez0e641232021-09-23 10:07:05 +02002504 .afi_pex2_ctrl = 0x128,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002505 .pads_pll_ctl = PADS_PLL_CTL_TEGRA30,
2506 .tx_ref_sel = PADS_PLL_CTL_TXCLKREF_BUF_EN,
2507 .pads_refclk_cfg0 = 0xfa5cfa5c,
2508 .pads_refclk_cfg1 = 0xfa5cfa5c,
2509 .has_pex_clkreq_en = true,
2510 .has_pex_bias_ctrl = true,
2511 .has_intr_prsnt_sense = true,
2512 .has_cml_clk = true,
2513 .has_gen2 = false,
2514 .force_pca_enable = false,
2515 .program_uphy = true,
David Brazdil0f672f62019-12-10 10:32:29 +00002516 .update_clamp_threshold = false,
2517 .program_deskew_time = false,
David Brazdil0f672f62019-12-10 10:32:29 +00002518 .update_fc_timer = false,
2519 .has_cache_bars = false,
2520 .ectl.enable = false,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002521};
2522
2523static const struct tegra_pcie_soc tegra124_pcie = {
2524 .num_ports = 2,
2525 .ports = tegra20_pcie_ports,
2526 .msi_base_shift = 8,
2527 .pads_pll_ctl = PADS_PLL_CTL_TEGRA30,
2528 .tx_ref_sel = PADS_PLL_CTL_TXCLKREF_BUF_EN,
2529 .pads_refclk_cfg0 = 0x44ac44ac,
2530 .has_pex_clkreq_en = true,
2531 .has_pex_bias_ctrl = true,
2532 .has_intr_prsnt_sense = true,
2533 .has_cml_clk = true,
2534 .has_gen2 = true,
2535 .force_pca_enable = false,
2536 .program_uphy = true,
David Brazdil0f672f62019-12-10 10:32:29 +00002537 .update_clamp_threshold = true,
2538 .program_deskew_time = false,
David Brazdil0f672f62019-12-10 10:32:29 +00002539 .update_fc_timer = false,
2540 .has_cache_bars = false,
2541 .ectl.enable = false,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002542};
2543
2544static const struct tegra_pcie_soc tegra210_pcie = {
2545 .num_ports = 2,
2546 .ports = tegra20_pcie_ports,
2547 .msi_base_shift = 8,
2548 .pads_pll_ctl = PADS_PLL_CTL_TEGRA30,
2549 .tx_ref_sel = PADS_PLL_CTL_TXCLKREF_BUF_EN,
2550 .pads_refclk_cfg0 = 0x90b890b8,
David Brazdil0f672f62019-12-10 10:32:29 +00002551 /* FC threshold is bit[25:18] */
2552 .update_fc_threshold = 0x01800000,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002553 .has_pex_clkreq_en = true,
2554 .has_pex_bias_ctrl = true,
2555 .has_intr_prsnt_sense = true,
2556 .has_cml_clk = true,
2557 .has_gen2 = true,
2558 .force_pca_enable = true,
2559 .program_uphy = true,
David Brazdil0f672f62019-12-10 10:32:29 +00002560 .update_clamp_threshold = true,
2561 .program_deskew_time = true,
David Brazdil0f672f62019-12-10 10:32:29 +00002562 .update_fc_timer = true,
2563 .has_cache_bars = false,
2564 .ectl = {
2565 .regs = {
2566 .rp_ectl_2_r1 = 0x0000000f,
2567 .rp_ectl_4_r1 = 0x00000067,
2568 .rp_ectl_5_r1 = 0x55010000,
2569 .rp_ectl_6_r1 = 0x00000001,
2570 .rp_ectl_2_r2 = 0x0000008f,
2571 .rp_ectl_4_r2 = 0x000000c7,
2572 .rp_ectl_5_r2 = 0x55010000,
2573 .rp_ectl_6_r2 = 0x00000001,
2574 },
2575 .enable = true,
2576 },
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002577};
2578
2579static const struct tegra_pcie_port_soc tegra186_pcie_ports[] = {
2580 { .pme.turnoff_bit = 0, .pme.ack_bit = 5 },
2581 { .pme.turnoff_bit = 8, .pme.ack_bit = 10 },
2582 { .pme.turnoff_bit = 12, .pme.ack_bit = 14 },
2583};
2584
2585static const struct tegra_pcie_soc tegra186_pcie = {
2586 .num_ports = 3,
2587 .ports = tegra186_pcie_ports,
2588 .msi_base_shift = 8,
David Brazdil0f672f62019-12-10 10:32:29 +00002589 .afi_pex2_ctrl = 0x19c,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002590 .pads_pll_ctl = PADS_PLL_CTL_TEGRA30,
2591 .tx_ref_sel = PADS_PLL_CTL_TXCLKREF_BUF_EN,
2592 .pads_refclk_cfg0 = 0x80b880b8,
2593 .pads_refclk_cfg1 = 0x000480b8,
2594 .has_pex_clkreq_en = true,
2595 .has_pex_bias_ctrl = true,
2596 .has_intr_prsnt_sense = true,
2597 .has_cml_clk = false,
2598 .has_gen2 = true,
2599 .force_pca_enable = false,
2600 .program_uphy = false,
David Brazdil0f672f62019-12-10 10:32:29 +00002601 .update_clamp_threshold = false,
2602 .program_deskew_time = false,
David Brazdil0f672f62019-12-10 10:32:29 +00002603 .update_fc_timer = false,
2604 .has_cache_bars = false,
2605 .ectl.enable = false,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002606};
2607
2608static const struct of_device_id tegra_pcie_of_match[] = {
2609 { .compatible = "nvidia,tegra186-pcie", .data = &tegra186_pcie },
2610 { .compatible = "nvidia,tegra210-pcie", .data = &tegra210_pcie },
2611 { .compatible = "nvidia,tegra124-pcie", .data = &tegra124_pcie },
2612 { .compatible = "nvidia,tegra30-pcie", .data = &tegra30_pcie },
2613 { .compatible = "nvidia,tegra20-pcie", .data = &tegra20_pcie },
2614 { },
2615};
Olivier Deprez0e641232021-09-23 10:07:05 +02002616MODULE_DEVICE_TABLE(of, tegra_pcie_of_match);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002617
2618static void *tegra_pcie_ports_seq_start(struct seq_file *s, loff_t *pos)
2619{
2620 struct tegra_pcie *pcie = s->private;
2621
2622 if (list_empty(&pcie->ports))
2623 return NULL;
2624
2625 seq_printf(s, "Index Status\n");
2626
2627 return seq_list_start(&pcie->ports, *pos);
2628}
2629
2630static void *tegra_pcie_ports_seq_next(struct seq_file *s, void *v, loff_t *pos)
2631{
2632 struct tegra_pcie *pcie = s->private;
2633
2634 return seq_list_next(v, &pcie->ports, pos);
2635}
2636
2637static void tegra_pcie_ports_seq_stop(struct seq_file *s, void *v)
2638{
2639}
2640
2641static int tegra_pcie_ports_seq_show(struct seq_file *s, void *v)
2642{
2643 bool up = false, active = false;
2644 struct tegra_pcie_port *port;
2645 unsigned int value;
2646
2647 port = list_entry(v, struct tegra_pcie_port, list);
2648
2649 value = readl(port->base + RP_VEND_XP);
2650
2651 if (value & RP_VEND_XP_DL_UP)
2652 up = true;
2653
2654 value = readl(port->base + RP_LINK_CONTROL_STATUS);
2655
2656 if (value & RP_LINK_CONTROL_STATUS_DL_LINK_ACTIVE)
2657 active = true;
2658
2659 seq_printf(s, "%2u ", port->index);
2660
2661 if (up)
2662 seq_printf(s, "up");
2663
2664 if (active) {
2665 if (up)
2666 seq_printf(s, ", ");
2667
2668 seq_printf(s, "active");
2669 }
2670
2671 seq_printf(s, "\n");
2672 return 0;
2673}
2674
2675static const struct seq_operations tegra_pcie_ports_seq_ops = {
2676 .start = tegra_pcie_ports_seq_start,
2677 .next = tegra_pcie_ports_seq_next,
2678 .stop = tegra_pcie_ports_seq_stop,
2679 .show = tegra_pcie_ports_seq_show,
2680};
2681
2682static int tegra_pcie_ports_open(struct inode *inode, struct file *file)
2683{
2684 struct tegra_pcie *pcie = inode->i_private;
2685 struct seq_file *s;
2686 int err;
2687
2688 err = seq_open(file, &tegra_pcie_ports_seq_ops);
2689 if (err)
2690 return err;
2691
2692 s = file->private_data;
2693 s->private = pcie;
2694
2695 return 0;
2696}
2697
2698static const struct file_operations tegra_pcie_ports_ops = {
2699 .owner = THIS_MODULE,
2700 .open = tegra_pcie_ports_open,
2701 .read = seq_read,
2702 .llseek = seq_lseek,
2703 .release = seq_release,
2704};
2705
2706static void tegra_pcie_debugfs_exit(struct tegra_pcie *pcie)
2707{
2708 debugfs_remove_recursive(pcie->debugfs);
2709 pcie->debugfs = NULL;
2710}
2711
2712static int tegra_pcie_debugfs_init(struct tegra_pcie *pcie)
2713{
2714 struct dentry *file;
2715
2716 pcie->debugfs = debugfs_create_dir("pcie", NULL);
2717 if (!pcie->debugfs)
2718 return -ENOMEM;
2719
2720 file = debugfs_create_file("ports", S_IFREG | S_IRUGO, pcie->debugfs,
2721 pcie, &tegra_pcie_ports_ops);
2722 if (!file)
2723 goto remove;
2724
2725 return 0;
2726
2727remove:
2728 tegra_pcie_debugfs_exit(pcie);
2729 return -ENOMEM;
2730}
2731
2732static int tegra_pcie_probe(struct platform_device *pdev)
2733{
2734 struct device *dev = &pdev->dev;
2735 struct pci_host_bridge *host;
2736 struct tegra_pcie *pcie;
2737 struct pci_bus *child;
2738 int err;
2739
2740 host = devm_pci_alloc_host_bridge(dev, sizeof(*pcie));
2741 if (!host)
2742 return -ENOMEM;
2743
2744 pcie = pci_host_bridge_priv(host);
2745 host->sysdata = pcie;
2746 platform_set_drvdata(pdev, pcie);
2747
2748 pcie->soc = of_device_get_match_data(dev);
2749 INIT_LIST_HEAD(&pcie->ports);
2750 pcie->dev = dev;
2751
2752 err = tegra_pcie_parse_dt(pcie);
2753 if (err < 0)
2754 return err;
2755
2756 err = tegra_pcie_get_resources(pcie);
2757 if (err < 0) {
2758 dev_err(dev, "failed to request resources: %d\n", err);
2759 return err;
2760 }
2761
2762 err = tegra_pcie_msi_setup(pcie);
2763 if (err < 0) {
2764 dev_err(dev, "failed to enable MSI support: %d\n", err);
2765 goto put_resources;
2766 }
2767
2768 pm_runtime_enable(pcie->dev);
2769 err = pm_runtime_get_sync(pcie->dev);
Olivier Deprez0e641232021-09-23 10:07:05 +02002770 if (err < 0) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002771 dev_err(dev, "fail to enable pcie controller: %d\n", err);
Olivier Deprez0e641232021-09-23 10:07:05 +02002772 goto pm_runtime_put;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002773 }
2774
2775 err = tegra_pcie_request_resources(pcie);
2776 if (err)
2777 goto pm_runtime_put;
2778
2779 host->busnr = pcie->busn.start;
2780 host->dev.parent = &pdev->dev;
2781 host->ops = &tegra_pcie_ops;
2782 host->map_irq = tegra_pcie_map_irq;
2783 host->swizzle_irq = pci_common_swizzle;
2784
2785 err = pci_scan_root_bus_bridge(host);
2786 if (err < 0) {
2787 dev_err(dev, "failed to register host: %d\n", err);
2788 goto free_resources;
2789 }
2790
2791 pci_bus_size_bridges(host->bus);
2792 pci_bus_assign_resources(host->bus);
2793
2794 list_for_each_entry(child, &host->bus->children, node)
2795 pcie_bus_configure_settings(child);
2796
2797 pci_bus_add_devices(host->bus);
2798
2799 if (IS_ENABLED(CONFIG_DEBUG_FS)) {
2800 err = tegra_pcie_debugfs_init(pcie);
2801 if (err < 0)
2802 dev_err(dev, "failed to setup debugfs: %d\n", err);
2803 }
2804
2805 return 0;
2806
2807free_resources:
2808 tegra_pcie_free_resources(pcie);
2809pm_runtime_put:
2810 pm_runtime_put_sync(pcie->dev);
2811 pm_runtime_disable(pcie->dev);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002812 tegra_pcie_msi_teardown(pcie);
2813put_resources:
2814 tegra_pcie_put_resources(pcie);
2815 return err;
2816}
2817
2818static int tegra_pcie_remove(struct platform_device *pdev)
2819{
2820 struct tegra_pcie *pcie = platform_get_drvdata(pdev);
2821 struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie);
2822 struct tegra_pcie_port *port, *tmp;
2823
2824 if (IS_ENABLED(CONFIG_DEBUG_FS))
2825 tegra_pcie_debugfs_exit(pcie);
2826
2827 pci_stop_root_bus(host->bus);
2828 pci_remove_root_bus(host->bus);
2829 tegra_pcie_free_resources(pcie);
2830 pm_runtime_put_sync(pcie->dev);
2831 pm_runtime_disable(pcie->dev);
2832
2833 if (IS_ENABLED(CONFIG_PCI_MSI))
2834 tegra_pcie_msi_teardown(pcie);
2835
2836 tegra_pcie_put_resources(pcie);
2837
2838 list_for_each_entry_safe(port, tmp, &pcie->ports, list)
2839 tegra_pcie_port_free(port);
2840
2841 return 0;
2842}
2843
2844static int __maybe_unused tegra_pcie_pm_suspend(struct device *dev)
2845{
2846 struct tegra_pcie *pcie = dev_get_drvdata(dev);
2847 struct tegra_pcie_port *port;
David Brazdil0f672f62019-12-10 10:32:29 +00002848 int err;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002849
2850 list_for_each_entry(port, &pcie->ports, list)
2851 tegra_pcie_pme_turnoff(port);
2852
2853 tegra_pcie_disable_ports(pcie);
2854
David Brazdil0f672f62019-12-10 10:32:29 +00002855 /*
2856 * AFI_INTR is unmasked in tegra_pcie_enable_controller(), mask it to
2857 * avoid unwanted interrupts raised by AFI after pex_rst is asserted.
2858 */
2859 tegra_pcie_disable_interrupts(pcie);
2860
2861 if (pcie->soc->program_uphy) {
2862 err = tegra_pcie_phy_power_off(pcie);
2863 if (err < 0)
2864 dev_err(dev, "failed to power off PHY(s): %d\n", err);
2865 }
2866
2867 reset_control_assert(pcie->pex_rst);
2868 clk_disable_unprepare(pcie->pex_clk);
2869
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002870 if (IS_ENABLED(CONFIG_PCI_MSI))
2871 tegra_pcie_disable_msi(pcie);
2872
David Brazdil0f672f62019-12-10 10:32:29 +00002873 pinctrl_pm_select_idle_state(dev);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002874 tegra_pcie_power_off(pcie);
2875
2876 return 0;
2877}
2878
2879static int __maybe_unused tegra_pcie_pm_resume(struct device *dev)
2880{
2881 struct tegra_pcie *pcie = dev_get_drvdata(dev);
2882 int err;
2883
2884 err = tegra_pcie_power_on(pcie);
2885 if (err) {
2886 dev_err(dev, "tegra pcie power on fail: %d\n", err);
2887 return err;
2888 }
David Brazdil0f672f62019-12-10 10:32:29 +00002889
2890 err = pinctrl_pm_select_default_state(dev);
2891 if (err < 0) {
2892 dev_err(dev, "failed to disable PCIe IO DPD: %d\n", err);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002893 goto poweroff;
2894 }
David Brazdil0f672f62019-12-10 10:32:29 +00002895
2896 tegra_pcie_enable_controller(pcie);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002897 tegra_pcie_setup_translations(pcie);
2898
2899 if (IS_ENABLED(CONFIG_PCI_MSI))
2900 tegra_pcie_enable_msi(pcie);
2901
David Brazdil0f672f62019-12-10 10:32:29 +00002902 err = clk_prepare_enable(pcie->pex_clk);
2903 if (err) {
2904 dev_err(dev, "failed to enable PEX clock: %d\n", err);
2905 goto pex_dpd_enable;
2906 }
2907
2908 reset_control_deassert(pcie->pex_rst);
2909
2910 if (pcie->soc->program_uphy) {
2911 err = tegra_pcie_phy_power_on(pcie);
2912 if (err < 0) {
2913 dev_err(dev, "failed to power on PHY(s): %d\n", err);
2914 goto disable_pex_clk;
2915 }
2916 }
2917
2918 tegra_pcie_apply_pad_settings(pcie);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002919 tegra_pcie_enable_ports(pcie);
2920
2921 return 0;
2922
David Brazdil0f672f62019-12-10 10:32:29 +00002923disable_pex_clk:
2924 reset_control_assert(pcie->pex_rst);
2925 clk_disable_unprepare(pcie->pex_clk);
2926pex_dpd_enable:
2927 pinctrl_pm_select_idle_state(dev);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002928poweroff:
2929 tegra_pcie_power_off(pcie);
2930
2931 return err;
2932}
2933
2934static const struct dev_pm_ops tegra_pcie_pm_ops = {
2935 SET_RUNTIME_PM_OPS(tegra_pcie_pm_suspend, tegra_pcie_pm_resume, NULL)
2936 SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(tegra_pcie_pm_suspend,
2937 tegra_pcie_pm_resume)
2938};
2939
2940static struct platform_driver tegra_pcie_driver = {
2941 .driver = {
2942 .name = "tegra-pcie",
2943 .of_match_table = tegra_pcie_of_match,
2944 .suppress_bind_attrs = true,
2945 .pm = &tegra_pcie_pm_ops,
2946 },
2947 .probe = tegra_pcie_probe,
2948 .remove = tegra_pcie_remove,
2949};
2950module_platform_driver(tegra_pcie_driver);
2951MODULE_LICENSE("GPL");