Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
| 2 | /* |
| 3 | * Synopsys DesignWare PCIe host controller driver |
| 4 | * |
| 5 | * Copyright (C) 2013 Samsung Electronics Co., Ltd. |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 6 | * https://www.samsung.com |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 7 | * |
| 8 | * Author: Jingoo Han <jg1.han@samsung.com> |
| 9 | */ |
| 10 | |
| 11 | #include <linux/irqchip/chained_irq.h> |
| 12 | #include <linux/irqdomain.h> |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 13 | #include <linux/msi.h> |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 14 | #include <linux/of_address.h> |
| 15 | #include <linux/of_pci.h> |
| 16 | #include <linux/pci_regs.h> |
| 17 | #include <linux/platform_device.h> |
| 18 | |
| 19 | #include "../../pci.h" |
| 20 | #include "pcie-designware.h" |
| 21 | |
| 22 | static struct pci_ops dw_pcie_ops; |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 23 | static struct pci_ops dw_child_pcie_ops; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 24 | |
| 25 | static void dw_msi_ack_irq(struct irq_data *d) |
| 26 | { |
| 27 | irq_chip_ack_parent(d); |
| 28 | } |
| 29 | |
| 30 | static void dw_msi_mask_irq(struct irq_data *d) |
| 31 | { |
| 32 | pci_msi_mask_irq(d); |
| 33 | irq_chip_mask_parent(d); |
| 34 | } |
| 35 | |
| 36 | static void dw_msi_unmask_irq(struct irq_data *d) |
| 37 | { |
| 38 | pci_msi_unmask_irq(d); |
| 39 | irq_chip_unmask_parent(d); |
| 40 | } |
| 41 | |
| 42 | static struct irq_chip dw_pcie_msi_irq_chip = { |
| 43 | .name = "PCI-MSI", |
| 44 | .irq_ack = dw_msi_ack_irq, |
| 45 | .irq_mask = dw_msi_mask_irq, |
| 46 | .irq_unmask = dw_msi_unmask_irq, |
| 47 | }; |
| 48 | |
| 49 | static struct msi_domain_info dw_pcie_msi_domain_info = { |
| 50 | .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | |
| 51 | MSI_FLAG_PCI_MSIX | MSI_FLAG_MULTI_PCI_MSI), |
| 52 | .chip = &dw_pcie_msi_irq_chip, |
| 53 | }; |
| 54 | |
| 55 | /* MSI int handler */ |
| 56 | irqreturn_t dw_handle_msi_irq(struct pcie_port *pp) |
| 57 | { |
| 58 | int i, pos, irq; |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame] | 59 | unsigned long val; |
| 60 | u32 status, num_ctrls; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 61 | irqreturn_t ret = IRQ_NONE; |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 62 | struct dw_pcie *pci = to_dw_pcie_from_pp(pp); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 63 | |
| 64 | num_ctrls = pp->num_vectors / MAX_MSI_IRQS_PER_CTRL; |
| 65 | |
| 66 | for (i = 0; i < num_ctrls; i++) { |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 67 | status = dw_pcie_readl_dbi(pci, PCIE_MSI_INTR0_STATUS + |
| 68 | (i * MSI_REG_CTRL_BLOCK_SIZE)); |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame] | 69 | if (!status) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 70 | continue; |
| 71 | |
| 72 | ret = IRQ_HANDLED; |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame] | 73 | val = status; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 74 | pos = 0; |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame] | 75 | while ((pos = find_next_bit(&val, MAX_MSI_IRQS_PER_CTRL, |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 76 | pos)) != MAX_MSI_IRQS_PER_CTRL) { |
| 77 | irq = irq_find_mapping(pp->irq_domain, |
| 78 | (i * MAX_MSI_IRQS_PER_CTRL) + |
| 79 | pos); |
| 80 | generic_handle_irq(irq); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 81 | pos++; |
| 82 | } |
| 83 | } |
| 84 | |
| 85 | return ret; |
| 86 | } |
| 87 | |
| 88 | /* Chained MSI interrupt service routine */ |
| 89 | static void dw_chained_msi_isr(struct irq_desc *desc) |
| 90 | { |
| 91 | struct irq_chip *chip = irq_desc_get_chip(desc); |
| 92 | struct pcie_port *pp; |
| 93 | |
| 94 | chained_irq_enter(chip, desc); |
| 95 | |
| 96 | pp = irq_desc_get_handler_data(desc); |
| 97 | dw_handle_msi_irq(pp); |
| 98 | |
| 99 | chained_irq_exit(chip, desc); |
| 100 | } |
| 101 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 102 | static void dw_pci_setup_msi_msg(struct irq_data *d, struct msi_msg *msg) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 103 | { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 104 | struct pcie_port *pp = irq_data_get_irq_chip_data(d); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 105 | struct dw_pcie *pci = to_dw_pcie_from_pp(pp); |
| 106 | u64 msi_target; |
| 107 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 108 | msi_target = (u64)pp->msi_data; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 109 | |
| 110 | msg->address_lo = lower_32_bits(msi_target); |
| 111 | msg->address_hi = upper_32_bits(msi_target); |
| 112 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 113 | msg->data = d->hwirq; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 114 | |
| 115 | dev_dbg(pci->dev, "msi#%d address_hi %#x address_lo %#x\n", |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 116 | (int)d->hwirq, msg->address_hi, msg->address_lo); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 117 | } |
| 118 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 119 | static int dw_pci_msi_set_affinity(struct irq_data *d, |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 120 | const struct cpumask *mask, bool force) |
| 121 | { |
| 122 | return -EINVAL; |
| 123 | } |
| 124 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 125 | static void dw_pci_bottom_mask(struct irq_data *d) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 126 | { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 127 | struct pcie_port *pp = irq_data_get_irq_chip_data(d); |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 128 | struct dw_pcie *pci = to_dw_pcie_from_pp(pp); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 129 | unsigned int res, bit, ctrl; |
| 130 | unsigned long flags; |
| 131 | |
| 132 | raw_spin_lock_irqsave(&pp->lock, flags); |
| 133 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 134 | ctrl = d->hwirq / MAX_MSI_IRQS_PER_CTRL; |
| 135 | res = ctrl * MSI_REG_CTRL_BLOCK_SIZE; |
| 136 | bit = d->hwirq % MAX_MSI_IRQS_PER_CTRL; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 137 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 138 | pp->irq_mask[ctrl] |= BIT(bit); |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 139 | dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_MASK + res, pp->irq_mask[ctrl]); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 140 | |
| 141 | raw_spin_unlock_irqrestore(&pp->lock, flags); |
| 142 | } |
| 143 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 144 | static void dw_pci_bottom_unmask(struct irq_data *d) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 145 | { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 146 | struct pcie_port *pp = irq_data_get_irq_chip_data(d); |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 147 | struct dw_pcie *pci = to_dw_pcie_from_pp(pp); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 148 | unsigned int res, bit, ctrl; |
| 149 | unsigned long flags; |
| 150 | |
| 151 | raw_spin_lock_irqsave(&pp->lock, flags); |
| 152 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 153 | ctrl = d->hwirq / MAX_MSI_IRQS_PER_CTRL; |
| 154 | res = ctrl * MSI_REG_CTRL_BLOCK_SIZE; |
| 155 | bit = d->hwirq % MAX_MSI_IRQS_PER_CTRL; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 156 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 157 | pp->irq_mask[ctrl] &= ~BIT(bit); |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 158 | dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_MASK + res, pp->irq_mask[ctrl]); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 159 | |
| 160 | raw_spin_unlock_irqrestore(&pp->lock, flags); |
| 161 | } |
| 162 | |
| 163 | static void dw_pci_bottom_ack(struct irq_data *d) |
| 164 | { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 165 | struct pcie_port *pp = irq_data_get_irq_chip_data(d); |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 166 | struct dw_pcie *pci = to_dw_pcie_from_pp(pp); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 167 | unsigned int res, bit, ctrl; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 168 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 169 | ctrl = d->hwirq / MAX_MSI_IRQS_PER_CTRL; |
| 170 | res = ctrl * MSI_REG_CTRL_BLOCK_SIZE; |
| 171 | bit = d->hwirq % MAX_MSI_IRQS_PER_CTRL; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 172 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 173 | dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_STATUS + res, BIT(bit)); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 174 | } |
| 175 | |
| 176 | static struct irq_chip dw_pci_msi_bottom_irq_chip = { |
| 177 | .name = "DWPCI-MSI", |
| 178 | .irq_ack = dw_pci_bottom_ack, |
| 179 | .irq_compose_msi_msg = dw_pci_setup_msi_msg, |
| 180 | .irq_set_affinity = dw_pci_msi_set_affinity, |
| 181 | .irq_mask = dw_pci_bottom_mask, |
| 182 | .irq_unmask = dw_pci_bottom_unmask, |
| 183 | }; |
| 184 | |
| 185 | static int dw_pcie_irq_domain_alloc(struct irq_domain *domain, |
| 186 | unsigned int virq, unsigned int nr_irqs, |
| 187 | void *args) |
| 188 | { |
| 189 | struct pcie_port *pp = domain->host_data; |
| 190 | unsigned long flags; |
| 191 | u32 i; |
| 192 | int bit; |
| 193 | |
| 194 | raw_spin_lock_irqsave(&pp->lock, flags); |
| 195 | |
| 196 | bit = bitmap_find_free_region(pp->msi_irq_in_use, pp->num_vectors, |
| 197 | order_base_2(nr_irqs)); |
| 198 | |
| 199 | raw_spin_unlock_irqrestore(&pp->lock, flags); |
| 200 | |
| 201 | if (bit < 0) |
| 202 | return -ENOSPC; |
| 203 | |
| 204 | for (i = 0; i < nr_irqs; i++) |
| 205 | irq_domain_set_info(domain, virq + i, bit + i, |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 206 | pp->msi_irq_chip, |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 207 | pp, handle_edge_irq, |
| 208 | NULL, NULL); |
| 209 | |
| 210 | return 0; |
| 211 | } |
| 212 | |
| 213 | static void dw_pcie_irq_domain_free(struct irq_domain *domain, |
| 214 | unsigned int virq, unsigned int nr_irqs) |
| 215 | { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 216 | struct irq_data *d = irq_domain_get_irq_data(domain, virq); |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 217 | struct pcie_port *pp = domain->host_data; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 218 | unsigned long flags; |
| 219 | |
| 220 | raw_spin_lock_irqsave(&pp->lock, flags); |
| 221 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 222 | bitmap_release_region(pp->msi_irq_in_use, d->hwirq, |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 223 | order_base_2(nr_irqs)); |
| 224 | |
| 225 | raw_spin_unlock_irqrestore(&pp->lock, flags); |
| 226 | } |
| 227 | |
| 228 | static const struct irq_domain_ops dw_pcie_msi_domain_ops = { |
| 229 | .alloc = dw_pcie_irq_domain_alloc, |
| 230 | .free = dw_pcie_irq_domain_free, |
| 231 | }; |
| 232 | |
| 233 | int dw_pcie_allocate_domains(struct pcie_port *pp) |
| 234 | { |
| 235 | struct dw_pcie *pci = to_dw_pcie_from_pp(pp); |
| 236 | struct fwnode_handle *fwnode = of_node_to_fwnode(pci->dev->of_node); |
| 237 | |
| 238 | pp->irq_domain = irq_domain_create_linear(fwnode, pp->num_vectors, |
| 239 | &dw_pcie_msi_domain_ops, pp); |
| 240 | if (!pp->irq_domain) { |
| 241 | dev_err(pci->dev, "Failed to create IRQ domain\n"); |
| 242 | return -ENOMEM; |
| 243 | } |
| 244 | |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame] | 245 | irq_domain_update_bus_token(pp->irq_domain, DOMAIN_BUS_NEXUS); |
| 246 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 247 | pp->msi_domain = pci_msi_create_irq_domain(fwnode, |
| 248 | &dw_pcie_msi_domain_info, |
| 249 | pp->irq_domain); |
| 250 | if (!pp->msi_domain) { |
| 251 | dev_err(pci->dev, "Failed to create MSI domain\n"); |
| 252 | irq_domain_remove(pp->irq_domain); |
| 253 | return -ENOMEM; |
| 254 | } |
| 255 | |
| 256 | return 0; |
| 257 | } |
| 258 | |
| 259 | void dw_pcie_free_msi(struct pcie_port *pp) |
| 260 | { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 261 | if (pp->msi_irq) { |
| 262 | irq_set_chained_handler(pp->msi_irq, NULL); |
| 263 | irq_set_handler_data(pp->msi_irq, NULL); |
| 264 | } |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 265 | |
| 266 | irq_domain_remove(pp->msi_domain); |
| 267 | irq_domain_remove(pp->irq_domain); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 268 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 269 | if (pp->msi_data) { |
| 270 | struct dw_pcie *pci = to_dw_pcie_from_pp(pp); |
| 271 | struct device *dev = pci->dev; |
| 272 | |
| 273 | dma_unmap_single_attrs(dev, pp->msi_data, sizeof(pp->msi_msg), |
| 274 | DMA_FROM_DEVICE, DMA_ATTR_SKIP_CPU_SYNC); |
| 275 | } |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 276 | } |
| 277 | |
| 278 | void dw_pcie_msi_init(struct pcie_port *pp) |
| 279 | { |
| 280 | struct dw_pcie *pci = to_dw_pcie_from_pp(pp); |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 281 | u64 msi_target = (u64)pp->msi_data; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 282 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 283 | if (!IS_ENABLED(CONFIG_PCI_MSI)) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 284 | return; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 285 | |
| 286 | /* Program the msi_data */ |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 287 | dw_pcie_writel_dbi(pci, PCIE_MSI_ADDR_LO, lower_32_bits(msi_target)); |
| 288 | dw_pcie_writel_dbi(pci, PCIE_MSI_ADDR_HI, upper_32_bits(msi_target)); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 289 | } |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 290 | EXPORT_SYMBOL_GPL(dw_pcie_msi_init); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 291 | |
| 292 | int dw_pcie_host_init(struct pcie_port *pp) |
| 293 | { |
| 294 | struct dw_pcie *pci = to_dw_pcie_from_pp(pp); |
| 295 | struct device *dev = pci->dev; |
| 296 | struct device_node *np = dev->of_node; |
| 297 | struct platform_device *pdev = to_platform_device(dev); |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 298 | struct resource_entry *win; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 299 | struct pci_host_bridge *bridge; |
| 300 | struct resource *cfg_res; |
| 301 | int ret; |
| 302 | |
| 303 | raw_spin_lock_init(&pci->pp.lock); |
| 304 | |
| 305 | cfg_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "config"); |
| 306 | if (cfg_res) { |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 307 | pp->cfg0_size = resource_size(cfg_res); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 308 | pp->cfg0_base = cfg_res->start; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 309 | } else if (!pp->va_cfg0_base) { |
| 310 | dev_err(dev, "Missing *config* reg space\n"); |
| 311 | } |
| 312 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 313 | bridge = devm_pci_alloc_host_bridge(dev, 0); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 314 | if (!bridge) |
| 315 | return -ENOMEM; |
| 316 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 317 | pp->bridge = bridge; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 318 | |
| 319 | /* Get the I/O and memory ranges from DT */ |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 320 | resource_list_for_each_entry(win, &bridge->windows) { |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 321 | switch (resource_type(win->res)) { |
| 322 | case IORESOURCE_IO: |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 323 | pp->io_size = resource_size(win->res); |
| 324 | pp->io_bus_addr = win->res->start - win->offset; |
| 325 | pp->io_base = pci_pio_to_address(win->res->start); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 326 | break; |
| 327 | case 0: |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 328 | dev_err(dev, "Missing *config* reg space\n"); |
| 329 | pp->cfg0_size = resource_size(win->res); |
| 330 | pp->cfg0_base = win->res->start; |
| 331 | if (!pci->dbi_base) { |
| 332 | pci->dbi_base = devm_pci_remap_cfgspace(dev, |
| 333 | pp->cfg0_base, |
| 334 | pp->cfg0_size); |
| 335 | if (!pci->dbi_base) { |
| 336 | dev_err(dev, "Error with ioremap\n"); |
| 337 | return -ENOMEM; |
| 338 | } |
| 339 | } |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 340 | break; |
| 341 | } |
| 342 | } |
| 343 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 344 | if (!pp->va_cfg0_base) { |
| 345 | pp->va_cfg0_base = devm_pci_remap_cfgspace(dev, |
| 346 | pp->cfg0_base, pp->cfg0_size); |
| 347 | if (!pp->va_cfg0_base) { |
| 348 | dev_err(dev, "Error with ioremap in function\n"); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 349 | return -ENOMEM; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 350 | } |
| 351 | } |
| 352 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 353 | ret = of_property_read_u32(np, "num-viewport", &pci->num_viewport); |
| 354 | if (ret) |
| 355 | pci->num_viewport = 2; |
| 356 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 357 | if (pci->link_gen < 1) |
| 358 | pci->link_gen = of_pci_get_max_link_speed(np); |
| 359 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 360 | if (pci_msi_enabled()) { |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 361 | /* |
| 362 | * If a specific SoC driver needs to change the |
| 363 | * default number of vectors, it needs to implement |
| 364 | * the set_num_vectors callback. |
| 365 | */ |
| 366 | if (!pp->ops->set_num_vectors) { |
| 367 | pp->num_vectors = MSI_DEF_NUM_VECTORS; |
| 368 | } else { |
| 369 | pp->ops->set_num_vectors(pp); |
| 370 | |
| 371 | if (pp->num_vectors > MAX_MSI_IRQS || |
| 372 | pp->num_vectors == 0) { |
| 373 | dev_err(dev, |
| 374 | "Invalid number of vectors\n"); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 375 | return -EINVAL; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 376 | } |
| 377 | } |
| 378 | |
| 379 | if (!pp->ops->msi_host_init) { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 380 | pp->msi_irq_chip = &dw_pci_msi_bottom_irq_chip; |
| 381 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 382 | ret = dw_pcie_allocate_domains(pp); |
| 383 | if (ret) |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 384 | return ret; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 385 | |
| 386 | if (pp->msi_irq) |
| 387 | irq_set_chained_handler_and_data(pp->msi_irq, |
| 388 | dw_chained_msi_isr, |
| 389 | pp); |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 390 | |
| 391 | pp->msi_data = dma_map_single_attrs(pci->dev, &pp->msi_msg, |
| 392 | sizeof(pp->msi_msg), |
| 393 | DMA_FROM_DEVICE, |
| 394 | DMA_ATTR_SKIP_CPU_SYNC); |
| 395 | if (dma_mapping_error(pci->dev, pp->msi_data)) { |
| 396 | dev_err(pci->dev, "Failed to map MSI data\n"); |
| 397 | pp->msi_data = 0; |
| 398 | goto err_free_msi; |
| 399 | } |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 400 | } else { |
| 401 | ret = pp->ops->msi_host_init(pp); |
| 402 | if (ret < 0) |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 403 | return ret; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 404 | } |
| 405 | } |
| 406 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 407 | /* Set default bus ops */ |
| 408 | bridge->ops = &dw_pcie_ops; |
| 409 | bridge->child_ops = &dw_child_pcie_ops; |
| 410 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 411 | if (pp->ops->host_init) { |
| 412 | ret = pp->ops->host_init(pp); |
| 413 | if (ret) |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 414 | goto err_free_msi; |
| 415 | } |
| 416 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 417 | bridge->sysdata = pp; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 418 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 419 | ret = pci_host_probe(bridge); |
| 420 | if (!ret) |
| 421 | return 0; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 422 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 423 | err_free_msi: |
| 424 | if (pci_msi_enabled() && !pp->ops->msi_host_init) |
| 425 | dw_pcie_free_msi(pp); |
| 426 | return ret; |
| 427 | } |
| 428 | EXPORT_SYMBOL_GPL(dw_pcie_host_init); |
| 429 | |
| 430 | void dw_pcie_host_deinit(struct pcie_port *pp) |
| 431 | { |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 432 | pci_stop_root_bus(pp->bridge->bus); |
| 433 | pci_remove_root_bus(pp->bridge->bus); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 434 | if (pci_msi_enabled() && !pp->ops->msi_host_init) |
| 435 | dw_pcie_free_msi(pp); |
| 436 | } |
| 437 | EXPORT_SYMBOL_GPL(dw_pcie_host_deinit); |
| 438 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 439 | static void __iomem *dw_pcie_other_conf_map_bus(struct pci_bus *bus, |
| 440 | unsigned int devfn, int where) |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 441 | { |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 442 | int type; |
| 443 | u32 busdev; |
| 444 | struct pcie_port *pp = bus->sysdata; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 445 | struct dw_pcie *pci = to_dw_pcie_from_pp(pp); |
| 446 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 447 | /* |
| 448 | * Checking whether the link is up here is a last line of defense |
| 449 | * against platforms that forward errors on the system bus as |
| 450 | * SError upon PCI configuration transactions issued when the link |
| 451 | * is down. This check is racy by definition and does not stop |
| 452 | * the system from triggering an SError if the link goes down |
| 453 | * after this check is performed. |
| 454 | */ |
| 455 | if (!dw_pcie_link_up(pci)) |
| 456 | return NULL; |
| 457 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 458 | busdev = PCIE_ATU_BUS(bus->number) | PCIE_ATU_DEV(PCI_SLOT(devfn)) | |
| 459 | PCIE_ATU_FUNC(PCI_FUNC(devfn)); |
| 460 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 461 | if (pci_is_root_bus(bus->parent)) |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 462 | type = PCIE_ATU_TYPE_CFG0; |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 463 | else |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 464 | type = PCIE_ATU_TYPE_CFG1; |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 465 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 466 | |
| 467 | dw_pcie_prog_outbound_atu(pci, PCIE_ATU_REGION_INDEX1, |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 468 | type, pp->cfg0_base, |
| 469 | busdev, pp->cfg0_size); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 470 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 471 | return pp->va_cfg0_base + where; |
| 472 | } |
| 473 | |
| 474 | static int dw_pcie_rd_other_conf(struct pci_bus *bus, unsigned int devfn, |
| 475 | int where, int size, u32 *val) |
| 476 | { |
| 477 | int ret; |
| 478 | struct pcie_port *pp = bus->sysdata; |
| 479 | struct dw_pcie *pci = to_dw_pcie_from_pp(pp); |
| 480 | |
| 481 | ret = pci_generic_config_read(bus, devfn, where, size, val); |
| 482 | |
| 483 | if (!ret && pci->num_viewport <= 2) |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 484 | dw_pcie_prog_outbound_atu(pci, PCIE_ATU_REGION_INDEX1, |
| 485 | PCIE_ATU_TYPE_IO, pp->io_base, |
| 486 | pp->io_bus_addr, pp->io_size); |
| 487 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 488 | return ret; |
| 489 | } |
| 490 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 491 | static int dw_pcie_wr_other_conf(struct pci_bus *bus, unsigned int devfn, |
| 492 | int where, int size, u32 val) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 493 | { |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 494 | int ret; |
| 495 | struct pcie_port *pp = bus->sysdata; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 496 | struct dw_pcie *pci = to_dw_pcie_from_pp(pp); |
| 497 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 498 | ret = pci_generic_config_write(bus, devfn, where, size, val); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 499 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 500 | if (!ret && pci->num_viewport <= 2) |
| 501 | dw_pcie_prog_outbound_atu(pci, PCIE_ATU_REGION_INDEX1, |
| 502 | PCIE_ATU_TYPE_IO, pp->io_base, |
| 503 | pp->io_bus_addr, pp->io_size); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 504 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 505 | return ret; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 506 | } |
| 507 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 508 | static struct pci_ops dw_child_pcie_ops = { |
| 509 | .map_bus = dw_pcie_other_conf_map_bus, |
| 510 | .read = dw_pcie_rd_other_conf, |
| 511 | .write = dw_pcie_wr_other_conf, |
| 512 | }; |
| 513 | |
| 514 | void __iomem *dw_pcie_own_conf_map_bus(struct pci_bus *bus, unsigned int devfn, int where) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 515 | { |
| 516 | struct pcie_port *pp = bus->sysdata; |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 517 | struct dw_pcie *pci = to_dw_pcie_from_pp(pp); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 518 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 519 | if (PCI_SLOT(devfn) > 0) |
| 520 | return NULL; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 521 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 522 | return pci->dbi_base + where; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 523 | } |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 524 | EXPORT_SYMBOL_GPL(dw_pcie_own_conf_map_bus); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 525 | |
| 526 | static struct pci_ops dw_pcie_ops = { |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 527 | .map_bus = dw_pcie_own_conf_map_bus, |
| 528 | .read = pci_generic_config_read, |
| 529 | .write = pci_generic_config_write, |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 530 | }; |
| 531 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 532 | void dw_pcie_setup_rc(struct pcie_port *pp) |
| 533 | { |
| 534 | u32 val, ctrl, num_ctrls; |
| 535 | struct dw_pcie *pci = to_dw_pcie_from_pp(pp); |
| 536 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 537 | /* |
| 538 | * Enable DBI read-only registers for writing/updating configuration. |
| 539 | * Write permission gets disabled towards the end of this function. |
| 540 | */ |
| 541 | dw_pcie_dbi_ro_wr_en(pci); |
| 542 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 543 | dw_pcie_setup(pci); |
| 544 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 545 | if (pci_msi_enabled() && !pp->ops->msi_host_init) { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 546 | num_ctrls = pp->num_vectors / MAX_MSI_IRQS_PER_CTRL; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 547 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 548 | /* Initialize IRQ Status array */ |
| 549 | for (ctrl = 0; ctrl < num_ctrls; ctrl++) { |
| 550 | pp->irq_mask[ctrl] = ~0; |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 551 | dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_MASK + |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 552 | (ctrl * MSI_REG_CTRL_BLOCK_SIZE), |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 553 | pp->irq_mask[ctrl]); |
| 554 | dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_ENABLE + |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 555 | (ctrl * MSI_REG_CTRL_BLOCK_SIZE), |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 556 | ~0); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 557 | } |
| 558 | } |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 559 | |
| 560 | /* Setup RC BARs */ |
| 561 | dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 0x00000004); |
| 562 | dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_1, 0x00000000); |
| 563 | |
| 564 | /* Setup interrupt pins */ |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 565 | val = dw_pcie_readl_dbi(pci, PCI_INTERRUPT_LINE); |
| 566 | val &= 0xffff00ff; |
| 567 | val |= 0x00000100; |
| 568 | dw_pcie_writel_dbi(pci, PCI_INTERRUPT_LINE, val); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 569 | |
| 570 | /* Setup bus numbers */ |
| 571 | val = dw_pcie_readl_dbi(pci, PCI_PRIMARY_BUS); |
| 572 | val &= 0xff000000; |
| 573 | val |= 0x00ff0100; |
| 574 | dw_pcie_writel_dbi(pci, PCI_PRIMARY_BUS, val); |
| 575 | |
| 576 | /* Setup command register */ |
| 577 | val = dw_pcie_readl_dbi(pci, PCI_COMMAND); |
| 578 | val &= 0xffff0000; |
| 579 | val |= PCI_COMMAND_IO | PCI_COMMAND_MEMORY | |
| 580 | PCI_COMMAND_MASTER | PCI_COMMAND_SERR; |
| 581 | dw_pcie_writel_dbi(pci, PCI_COMMAND, val); |
| 582 | |
| 583 | /* |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 584 | * If the platform provides its own child bus config accesses, it means |
| 585 | * the platform uses its own address translation component rather than |
| 586 | * ATU, so we should not program the ATU here. |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 587 | */ |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 588 | if (pp->bridge->child_ops == &dw_child_pcie_ops) { |
| 589 | struct resource_entry *tmp, *entry = NULL; |
| 590 | |
| 591 | /* Get last memory resource entry */ |
| 592 | resource_list_for_each_entry(tmp, &pp->bridge->windows) |
| 593 | if (resource_type(tmp->res) == IORESOURCE_MEM) |
| 594 | entry = tmp; |
| 595 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 596 | dw_pcie_prog_outbound_atu(pci, PCIE_ATU_REGION_INDEX0, |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 597 | PCIE_ATU_TYPE_MEM, entry->res->start, |
| 598 | entry->res->start - entry->offset, |
| 599 | resource_size(entry->res)); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 600 | if (pci->num_viewport > 2) |
| 601 | dw_pcie_prog_outbound_atu(pci, PCIE_ATU_REGION_INDEX2, |
| 602 | PCIE_ATU_TYPE_IO, pp->io_base, |
| 603 | pp->io_bus_addr, pp->io_size); |
| 604 | } |
| 605 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 606 | dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 0); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 607 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 608 | /* Program correct class for RC */ |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 609 | dw_pcie_writew_dbi(pci, PCI_CLASS_DEVICE, PCI_CLASS_BRIDGE_PCI); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 610 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 611 | val = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 612 | val |= PORT_LOGIC_SPEED_CHANGE; |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 613 | dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, val); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 614 | |
| 615 | dw_pcie_dbi_ro_wr_dis(pci); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 616 | } |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 617 | EXPORT_SYMBOL_GPL(dw_pcie_setup_rc); |