David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-only |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 2 | /* |
| 3 | * Support of MSI, HPET and DMAR interrupts. |
| 4 | * |
| 5 | * Copyright (C) 1997, 1998, 1999, 2000, 2009 Ingo Molnar, Hajnalka Szabo |
| 6 | * Moved from arch/x86/kernel/apic/io_apic.c. |
| 7 | * Jiang Liu <jiang.liu@linux.intel.com> |
| 8 | * Convert to hierarchical irqdomain |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 9 | */ |
| 10 | #include <linux/mm.h> |
| 11 | #include <linux/interrupt.h> |
| 12 | #include <linux/irq.h> |
| 13 | #include <linux/pci.h> |
| 14 | #include <linux/dmar.h> |
| 15 | #include <linux/hpet.h> |
| 16 | #include <linux/msi.h> |
| 17 | #include <asm/irqdomain.h> |
| 18 | #include <asm/msidef.h> |
| 19 | #include <asm/hpet.h> |
| 20 | #include <asm/hw_irq.h> |
| 21 | #include <asm/apic.h> |
| 22 | #include <asm/irq_remapping.h> |
| 23 | |
| 24 | static struct irq_domain *msi_default_domain; |
| 25 | |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame^] | 26 | static void __irq_msi_compose_msg(struct irq_cfg *cfg, struct msi_msg *msg) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 27 | { |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 28 | msg->address_hi = MSI_ADDR_BASE_HI; |
| 29 | |
| 30 | if (x2apic_enabled()) |
| 31 | msg->address_hi |= MSI_ADDR_EXT_DEST_ID(cfg->dest_apicid); |
| 32 | |
| 33 | msg->address_lo = |
| 34 | MSI_ADDR_BASE_LO | |
| 35 | ((apic->irq_dest_mode == 0) ? |
| 36 | MSI_ADDR_DEST_MODE_PHYSICAL : |
| 37 | MSI_ADDR_DEST_MODE_LOGICAL) | |
| 38 | MSI_ADDR_REDIRECTION_CPU | |
| 39 | MSI_ADDR_DEST_ID(cfg->dest_apicid); |
| 40 | |
| 41 | msg->data = |
| 42 | MSI_DATA_TRIGGER_EDGE | |
| 43 | MSI_DATA_LEVEL_ASSERT | |
| 44 | MSI_DATA_DELIVERY_FIXED | |
| 45 | MSI_DATA_VECTOR(cfg->vector); |
| 46 | } |
| 47 | |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame^] | 48 | static void irq_msi_compose_msg(struct irq_data *data, struct msi_msg *msg) |
| 49 | { |
| 50 | __irq_msi_compose_msg(irqd_cfg(data), msg); |
| 51 | } |
| 52 | |
| 53 | static void irq_msi_update_msg(struct irq_data *irqd, struct irq_cfg *cfg) |
| 54 | { |
| 55 | struct msi_msg msg[2] = { [1] = { }, }; |
| 56 | |
| 57 | __irq_msi_compose_msg(cfg, msg); |
| 58 | irq_data_get_irq_chip(irqd)->irq_write_msi_msg(irqd, msg); |
| 59 | } |
| 60 | |
| 61 | static int |
| 62 | msi_set_affinity(struct irq_data *irqd, const struct cpumask *mask, bool force) |
| 63 | { |
| 64 | struct irq_cfg old_cfg, *cfg = irqd_cfg(irqd); |
| 65 | struct irq_data *parent = irqd->parent_data; |
| 66 | unsigned int cpu; |
| 67 | int ret; |
| 68 | |
| 69 | /* Save the current configuration */ |
| 70 | cpu = cpumask_first(irq_data_get_effective_affinity_mask(irqd)); |
| 71 | old_cfg = *cfg; |
| 72 | |
| 73 | /* Allocate a new target vector */ |
| 74 | ret = parent->chip->irq_set_affinity(parent, mask, force); |
| 75 | if (ret < 0 || ret == IRQ_SET_MASK_OK_DONE) |
| 76 | return ret; |
| 77 | |
| 78 | /* |
| 79 | * For non-maskable and non-remapped MSI interrupts the migration |
| 80 | * to a different destination CPU and a different vector has to be |
| 81 | * done careful to handle the possible stray interrupt which can be |
| 82 | * caused by the non-atomic update of the address/data pair. |
| 83 | * |
| 84 | * Direct update is possible when: |
| 85 | * - The MSI is maskable (remapped MSI does not use this code path)). |
| 86 | * The quirk bit is not set in this case. |
| 87 | * - The new vector is the same as the old vector |
| 88 | * - The old vector is MANAGED_IRQ_SHUTDOWN_VECTOR (interrupt starts up) |
| 89 | * - The interrupt is not yet started up |
| 90 | * - The new destination CPU is the same as the old destination CPU |
| 91 | */ |
| 92 | if (!irqd_msi_nomask_quirk(irqd) || |
| 93 | cfg->vector == old_cfg.vector || |
| 94 | old_cfg.vector == MANAGED_IRQ_SHUTDOWN_VECTOR || |
| 95 | !irqd_is_started(irqd) || |
| 96 | cfg->dest_apicid == old_cfg.dest_apicid) { |
| 97 | irq_msi_update_msg(irqd, cfg); |
| 98 | return ret; |
| 99 | } |
| 100 | |
| 101 | /* |
| 102 | * Paranoia: Validate that the interrupt target is the local |
| 103 | * CPU. |
| 104 | */ |
| 105 | if (WARN_ON_ONCE(cpu != smp_processor_id())) { |
| 106 | irq_msi_update_msg(irqd, cfg); |
| 107 | return ret; |
| 108 | } |
| 109 | |
| 110 | /* |
| 111 | * Redirect the interrupt to the new vector on the current CPU |
| 112 | * first. This might cause a spurious interrupt on this vector if |
| 113 | * the device raises an interrupt right between this update and the |
| 114 | * update to the final destination CPU. |
| 115 | * |
| 116 | * If the vector is in use then the installed device handler will |
| 117 | * denote it as spurious which is no harm as this is a rare event |
| 118 | * and interrupt handlers have to cope with spurious interrupts |
| 119 | * anyway. If the vector is unused, then it is marked so it won't |
| 120 | * trigger the 'No irq handler for vector' warning in do_IRQ(). |
| 121 | * |
| 122 | * This requires to hold vector lock to prevent concurrent updates to |
| 123 | * the affected vector. |
| 124 | */ |
| 125 | lock_vector_lock(); |
| 126 | |
| 127 | /* |
| 128 | * Mark the new target vector on the local CPU if it is currently |
| 129 | * unused. Reuse the VECTOR_RETRIGGERED state which is also used in |
| 130 | * the CPU hotplug path for a similar purpose. This cannot be |
| 131 | * undone here as the current CPU has interrupts disabled and |
| 132 | * cannot handle the interrupt before the whole set_affinity() |
| 133 | * section is done. In the CPU unplug case, the current CPU is |
| 134 | * about to vanish and will not handle any interrupts anymore. The |
| 135 | * vector is cleaned up when the CPU comes online again. |
| 136 | */ |
| 137 | if (IS_ERR_OR_NULL(this_cpu_read(vector_irq[cfg->vector]))) |
| 138 | this_cpu_write(vector_irq[cfg->vector], VECTOR_RETRIGGERED); |
| 139 | |
| 140 | /* Redirect it to the new vector on the local CPU temporarily */ |
| 141 | old_cfg.vector = cfg->vector; |
| 142 | irq_msi_update_msg(irqd, &old_cfg); |
| 143 | |
| 144 | /* Now transition it to the target CPU */ |
| 145 | irq_msi_update_msg(irqd, cfg); |
| 146 | |
| 147 | /* |
| 148 | * All interrupts after this point are now targeted at the new |
| 149 | * vector/CPU. |
| 150 | * |
| 151 | * Drop vector lock before testing whether the temporary assignment |
| 152 | * to the local CPU was hit by an interrupt raised in the device, |
| 153 | * because the retrigger function acquires vector lock again. |
| 154 | */ |
| 155 | unlock_vector_lock(); |
| 156 | |
| 157 | /* |
| 158 | * Check whether the transition raced with a device interrupt and |
| 159 | * is pending in the local APICs IRR. It is safe to do this outside |
| 160 | * of vector lock as the irq_desc::lock of this interrupt is still |
| 161 | * held and interrupts are disabled: The check is not accessing the |
| 162 | * underlying vector store. It's just checking the local APIC's |
| 163 | * IRR. |
| 164 | */ |
| 165 | if (lapic_vector_set_in_irr(cfg->vector)) |
| 166 | irq_data_get_irq_chip(irqd)->irq_retrigger(irqd); |
| 167 | |
| 168 | return ret; |
| 169 | } |
| 170 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 171 | /* |
| 172 | * IRQ Chip for MSI PCI/PCI-X/PCI-Express Devices, |
| 173 | * which implement the MSI or MSI-X Capability Structure. |
| 174 | */ |
| 175 | static struct irq_chip pci_msi_controller = { |
| 176 | .name = "PCI-MSI", |
| 177 | .irq_unmask = pci_msi_unmask_irq, |
| 178 | .irq_mask = pci_msi_mask_irq, |
| 179 | .irq_ack = irq_chip_ack_parent, |
| 180 | .irq_retrigger = irq_chip_retrigger_hierarchy, |
| 181 | .irq_compose_msi_msg = irq_msi_compose_msg, |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame^] | 182 | .irq_set_affinity = msi_set_affinity, |
| 183 | .flags = IRQCHIP_SKIP_SET_WAKE | |
| 184 | IRQCHIP_AFFINITY_PRE_STARTUP, |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 185 | }; |
| 186 | |
| 187 | int native_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) |
| 188 | { |
| 189 | struct irq_domain *domain; |
| 190 | struct irq_alloc_info info; |
| 191 | |
| 192 | init_irq_alloc_info(&info, NULL); |
| 193 | info.type = X86_IRQ_ALLOC_TYPE_MSI; |
| 194 | info.msi_dev = dev; |
| 195 | |
| 196 | domain = irq_remapping_get_irq_domain(&info); |
| 197 | if (domain == NULL) |
| 198 | domain = msi_default_domain; |
| 199 | if (domain == NULL) |
| 200 | return -ENOSYS; |
| 201 | |
| 202 | return msi_domain_alloc_irqs(domain, &dev->dev, nvec); |
| 203 | } |
| 204 | |
| 205 | void native_teardown_msi_irq(unsigned int irq) |
| 206 | { |
| 207 | irq_domain_free_irqs(irq, 1); |
| 208 | } |
| 209 | |
| 210 | static irq_hw_number_t pci_msi_get_hwirq(struct msi_domain_info *info, |
| 211 | msi_alloc_info_t *arg) |
| 212 | { |
| 213 | return arg->msi_hwirq; |
| 214 | } |
| 215 | |
| 216 | int pci_msi_prepare(struct irq_domain *domain, struct device *dev, int nvec, |
| 217 | msi_alloc_info_t *arg) |
| 218 | { |
| 219 | struct pci_dev *pdev = to_pci_dev(dev); |
| 220 | struct msi_desc *desc = first_pci_msi_entry(pdev); |
| 221 | |
| 222 | init_irq_alloc_info(arg, NULL); |
| 223 | arg->msi_dev = pdev; |
| 224 | if (desc->msi_attrib.is_msix) { |
| 225 | arg->type = X86_IRQ_ALLOC_TYPE_MSIX; |
| 226 | } else { |
| 227 | arg->type = X86_IRQ_ALLOC_TYPE_MSI; |
| 228 | arg->flags |= X86_IRQ_ALLOC_CONTIGUOUS_VECTORS; |
| 229 | } |
| 230 | |
| 231 | return 0; |
| 232 | } |
| 233 | EXPORT_SYMBOL_GPL(pci_msi_prepare); |
| 234 | |
| 235 | void pci_msi_set_desc(msi_alloc_info_t *arg, struct msi_desc *desc) |
| 236 | { |
| 237 | arg->msi_hwirq = pci_msi_domain_calc_hwirq(arg->msi_dev, desc); |
| 238 | } |
| 239 | EXPORT_SYMBOL_GPL(pci_msi_set_desc); |
| 240 | |
| 241 | static struct msi_domain_ops pci_msi_domain_ops = { |
| 242 | .get_hwirq = pci_msi_get_hwirq, |
| 243 | .msi_prepare = pci_msi_prepare, |
| 244 | .set_desc = pci_msi_set_desc, |
| 245 | }; |
| 246 | |
| 247 | static struct msi_domain_info pci_msi_domain_info = { |
| 248 | .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | |
| 249 | MSI_FLAG_PCI_MSIX, |
| 250 | .ops = &pci_msi_domain_ops, |
| 251 | .chip = &pci_msi_controller, |
| 252 | .handler = handle_edge_irq, |
| 253 | .handler_name = "edge", |
| 254 | }; |
| 255 | |
| 256 | void __init arch_init_msi_domain(struct irq_domain *parent) |
| 257 | { |
| 258 | struct fwnode_handle *fn; |
| 259 | |
| 260 | if (disable_apic) |
| 261 | return; |
| 262 | |
| 263 | fn = irq_domain_alloc_named_fwnode("PCI-MSI"); |
| 264 | if (fn) { |
| 265 | msi_default_domain = |
| 266 | pci_msi_create_irq_domain(fn, &pci_msi_domain_info, |
| 267 | parent); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 268 | } |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame^] | 269 | if (!msi_default_domain) { |
| 270 | irq_domain_free_fwnode(fn); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 271 | pr_warn("failed to initialize irqdomain for MSI/MSI-x.\n"); |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame^] | 272 | } else { |
| 273 | msi_default_domain->flags |= IRQ_DOMAIN_MSI_NOMASK_QUIRK; |
| 274 | } |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 275 | } |
| 276 | |
| 277 | #ifdef CONFIG_IRQ_REMAP |
| 278 | static struct irq_chip pci_msi_ir_controller = { |
| 279 | .name = "IR-PCI-MSI", |
| 280 | .irq_unmask = pci_msi_unmask_irq, |
| 281 | .irq_mask = pci_msi_mask_irq, |
| 282 | .irq_ack = irq_chip_ack_parent, |
| 283 | .irq_retrigger = irq_chip_retrigger_hierarchy, |
| 284 | .irq_set_vcpu_affinity = irq_chip_set_vcpu_affinity_parent, |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame^] | 285 | .flags = IRQCHIP_SKIP_SET_WAKE | |
| 286 | IRQCHIP_AFFINITY_PRE_STARTUP, |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 287 | }; |
| 288 | |
| 289 | static struct msi_domain_info pci_msi_ir_domain_info = { |
| 290 | .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | |
| 291 | MSI_FLAG_MULTI_PCI_MSI | MSI_FLAG_PCI_MSIX, |
| 292 | .ops = &pci_msi_domain_ops, |
| 293 | .chip = &pci_msi_ir_controller, |
| 294 | .handler = handle_edge_irq, |
| 295 | .handler_name = "edge", |
| 296 | }; |
| 297 | |
| 298 | struct irq_domain *arch_create_remap_msi_irq_domain(struct irq_domain *parent, |
| 299 | const char *name, int id) |
| 300 | { |
| 301 | struct fwnode_handle *fn; |
| 302 | struct irq_domain *d; |
| 303 | |
| 304 | fn = irq_domain_alloc_named_id_fwnode(name, id); |
| 305 | if (!fn) |
| 306 | return NULL; |
| 307 | d = pci_msi_create_irq_domain(fn, &pci_msi_ir_domain_info, parent); |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame^] | 308 | if (!d) |
| 309 | irq_domain_free_fwnode(fn); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 310 | return d; |
| 311 | } |
| 312 | #endif |
| 313 | |
| 314 | #ifdef CONFIG_DMAR_TABLE |
| 315 | static void dmar_msi_write_msg(struct irq_data *data, struct msi_msg *msg) |
| 316 | { |
| 317 | dmar_msi_write(data->irq, msg); |
| 318 | } |
| 319 | |
| 320 | static struct irq_chip dmar_msi_controller = { |
| 321 | .name = "DMAR-MSI", |
| 322 | .irq_unmask = dmar_msi_unmask, |
| 323 | .irq_mask = dmar_msi_mask, |
| 324 | .irq_ack = irq_chip_ack_parent, |
| 325 | .irq_set_affinity = msi_domain_set_affinity, |
| 326 | .irq_retrigger = irq_chip_retrigger_hierarchy, |
| 327 | .irq_compose_msi_msg = irq_msi_compose_msg, |
| 328 | .irq_write_msi_msg = dmar_msi_write_msg, |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame^] | 329 | .flags = IRQCHIP_SKIP_SET_WAKE | |
| 330 | IRQCHIP_AFFINITY_PRE_STARTUP, |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 331 | }; |
| 332 | |
| 333 | static irq_hw_number_t dmar_msi_get_hwirq(struct msi_domain_info *info, |
| 334 | msi_alloc_info_t *arg) |
| 335 | { |
| 336 | return arg->dmar_id; |
| 337 | } |
| 338 | |
| 339 | static int dmar_msi_init(struct irq_domain *domain, |
| 340 | struct msi_domain_info *info, unsigned int virq, |
| 341 | irq_hw_number_t hwirq, msi_alloc_info_t *arg) |
| 342 | { |
| 343 | irq_domain_set_info(domain, virq, arg->dmar_id, info->chip, NULL, |
| 344 | handle_edge_irq, arg->dmar_data, "edge"); |
| 345 | |
| 346 | return 0; |
| 347 | } |
| 348 | |
| 349 | static struct msi_domain_ops dmar_msi_domain_ops = { |
| 350 | .get_hwirq = dmar_msi_get_hwirq, |
| 351 | .msi_init = dmar_msi_init, |
| 352 | }; |
| 353 | |
| 354 | static struct msi_domain_info dmar_msi_domain_info = { |
| 355 | .ops = &dmar_msi_domain_ops, |
| 356 | .chip = &dmar_msi_controller, |
| 357 | }; |
| 358 | |
| 359 | static struct irq_domain *dmar_get_irq_domain(void) |
| 360 | { |
| 361 | static struct irq_domain *dmar_domain; |
| 362 | static DEFINE_MUTEX(dmar_lock); |
| 363 | struct fwnode_handle *fn; |
| 364 | |
| 365 | mutex_lock(&dmar_lock); |
| 366 | if (dmar_domain) |
| 367 | goto out; |
| 368 | |
| 369 | fn = irq_domain_alloc_named_fwnode("DMAR-MSI"); |
| 370 | if (fn) { |
| 371 | dmar_domain = msi_create_irq_domain(fn, &dmar_msi_domain_info, |
| 372 | x86_vector_domain); |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame^] | 373 | if (!dmar_domain) |
| 374 | irq_domain_free_fwnode(fn); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 375 | } |
| 376 | out: |
| 377 | mutex_unlock(&dmar_lock); |
| 378 | return dmar_domain; |
| 379 | } |
| 380 | |
| 381 | int dmar_alloc_hwirq(int id, int node, void *arg) |
| 382 | { |
| 383 | struct irq_domain *domain = dmar_get_irq_domain(); |
| 384 | struct irq_alloc_info info; |
| 385 | |
| 386 | if (!domain) |
| 387 | return -1; |
| 388 | |
| 389 | init_irq_alloc_info(&info, NULL); |
| 390 | info.type = X86_IRQ_ALLOC_TYPE_DMAR; |
| 391 | info.dmar_id = id; |
| 392 | info.dmar_data = arg; |
| 393 | |
| 394 | return irq_domain_alloc_irqs(domain, 1, node, &info); |
| 395 | } |
| 396 | |
| 397 | void dmar_free_hwirq(int irq) |
| 398 | { |
| 399 | irq_domain_free_irqs(irq, 1); |
| 400 | } |
| 401 | #endif |
| 402 | |
| 403 | /* |
| 404 | * MSI message composition |
| 405 | */ |
| 406 | #ifdef CONFIG_HPET_TIMER |
| 407 | static inline int hpet_dev_id(struct irq_domain *domain) |
| 408 | { |
| 409 | struct msi_domain_info *info = msi_get_domain_info(domain); |
| 410 | |
| 411 | return (int)(long)info->data; |
| 412 | } |
| 413 | |
| 414 | static void hpet_msi_write_msg(struct irq_data *data, struct msi_msg *msg) |
| 415 | { |
| 416 | hpet_msi_write(irq_data_get_irq_handler_data(data), msg); |
| 417 | } |
| 418 | |
| 419 | static struct irq_chip hpet_msi_controller __ro_after_init = { |
| 420 | .name = "HPET-MSI", |
| 421 | .irq_unmask = hpet_msi_unmask, |
| 422 | .irq_mask = hpet_msi_mask, |
| 423 | .irq_ack = irq_chip_ack_parent, |
| 424 | .irq_set_affinity = msi_domain_set_affinity, |
| 425 | .irq_retrigger = irq_chip_retrigger_hierarchy, |
| 426 | .irq_compose_msi_msg = irq_msi_compose_msg, |
| 427 | .irq_write_msi_msg = hpet_msi_write_msg, |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame^] | 428 | .flags = IRQCHIP_SKIP_SET_WAKE | IRQCHIP_AFFINITY_PRE_STARTUP, |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 429 | }; |
| 430 | |
| 431 | static irq_hw_number_t hpet_msi_get_hwirq(struct msi_domain_info *info, |
| 432 | msi_alloc_info_t *arg) |
| 433 | { |
| 434 | return arg->hpet_index; |
| 435 | } |
| 436 | |
| 437 | static int hpet_msi_init(struct irq_domain *domain, |
| 438 | struct msi_domain_info *info, unsigned int virq, |
| 439 | irq_hw_number_t hwirq, msi_alloc_info_t *arg) |
| 440 | { |
| 441 | irq_set_status_flags(virq, IRQ_MOVE_PCNTXT); |
| 442 | irq_domain_set_info(domain, virq, arg->hpet_index, info->chip, NULL, |
| 443 | handle_edge_irq, arg->hpet_data, "edge"); |
| 444 | |
| 445 | return 0; |
| 446 | } |
| 447 | |
| 448 | static void hpet_msi_free(struct irq_domain *domain, |
| 449 | struct msi_domain_info *info, unsigned int virq) |
| 450 | { |
| 451 | irq_clear_status_flags(virq, IRQ_MOVE_PCNTXT); |
| 452 | } |
| 453 | |
| 454 | static struct msi_domain_ops hpet_msi_domain_ops = { |
| 455 | .get_hwirq = hpet_msi_get_hwirq, |
| 456 | .msi_init = hpet_msi_init, |
| 457 | .msi_free = hpet_msi_free, |
| 458 | }; |
| 459 | |
| 460 | static struct msi_domain_info hpet_msi_domain_info = { |
| 461 | .ops = &hpet_msi_domain_ops, |
| 462 | .chip = &hpet_msi_controller, |
| 463 | }; |
| 464 | |
| 465 | struct irq_domain *hpet_create_irq_domain(int hpet_id) |
| 466 | { |
| 467 | struct msi_domain_info *domain_info; |
| 468 | struct irq_domain *parent, *d; |
| 469 | struct irq_alloc_info info; |
| 470 | struct fwnode_handle *fn; |
| 471 | |
| 472 | if (x86_vector_domain == NULL) |
| 473 | return NULL; |
| 474 | |
| 475 | domain_info = kzalloc(sizeof(*domain_info), GFP_KERNEL); |
| 476 | if (!domain_info) |
| 477 | return NULL; |
| 478 | |
| 479 | *domain_info = hpet_msi_domain_info; |
| 480 | domain_info->data = (void *)(long)hpet_id; |
| 481 | |
| 482 | init_irq_alloc_info(&info, NULL); |
| 483 | info.type = X86_IRQ_ALLOC_TYPE_HPET; |
| 484 | info.hpet_id = hpet_id; |
| 485 | parent = irq_remapping_get_ir_irq_domain(&info); |
| 486 | if (parent == NULL) |
| 487 | parent = x86_vector_domain; |
| 488 | else |
| 489 | hpet_msi_controller.name = "IR-HPET-MSI"; |
| 490 | |
| 491 | fn = irq_domain_alloc_named_id_fwnode(hpet_msi_controller.name, |
| 492 | hpet_id); |
| 493 | if (!fn) { |
| 494 | kfree(domain_info); |
| 495 | return NULL; |
| 496 | } |
| 497 | |
| 498 | d = msi_create_irq_domain(fn, domain_info, parent); |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame^] | 499 | if (!d) { |
| 500 | irq_domain_free_fwnode(fn); |
| 501 | kfree(domain_info); |
| 502 | } |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 503 | return d; |
| 504 | } |
| 505 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 506 | int hpet_assign_irq(struct irq_domain *domain, struct hpet_channel *hc, |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 507 | int dev_num) |
| 508 | { |
| 509 | struct irq_alloc_info info; |
| 510 | |
| 511 | init_irq_alloc_info(&info, NULL); |
| 512 | info.type = X86_IRQ_ALLOC_TYPE_HPET; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 513 | info.hpet_data = hc; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 514 | info.hpet_id = hpet_dev_id(domain); |
| 515 | info.hpet_index = dev_num; |
| 516 | |
| 517 | return irq_domain_alloc_irqs(domain, 1, NUMA_NO_NODE, &info); |
| 518 | } |
| 519 | #endif |