Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
| 2 | #ifndef LINUX_MSI_H |
| 3 | #define LINUX_MSI_H |
| 4 | |
| 5 | #include <linux/kobject.h> |
| 6 | #include <linux/list.h> |
| 7 | |
| 8 | struct msi_msg { |
| 9 | u32 address_lo; /* low 32 bits of msi message address */ |
| 10 | u32 address_hi; /* high 32 bits of msi message address */ |
| 11 | u32 data; /* 16 bits of msi message data */ |
| 12 | }; |
| 13 | |
| 14 | extern int pci_msi_ignore_mask; |
| 15 | /* Helper functions */ |
| 16 | struct irq_data; |
| 17 | struct msi_desc; |
| 18 | struct pci_dev; |
| 19 | struct platform_msi_priv_data; |
| 20 | void __get_cached_msi_msg(struct msi_desc *entry, struct msi_msg *msg); |
| 21 | #ifdef CONFIG_GENERIC_MSI_IRQ |
| 22 | void get_cached_msi_msg(unsigned int irq, struct msi_msg *msg); |
| 23 | #else |
| 24 | static inline void get_cached_msi_msg(unsigned int irq, struct msi_msg *msg) |
| 25 | { |
| 26 | } |
| 27 | #endif |
| 28 | |
| 29 | typedef void (*irq_write_msi_msg_t)(struct msi_desc *desc, |
| 30 | struct msi_msg *msg); |
| 31 | |
| 32 | /** |
| 33 | * platform_msi_desc - Platform device specific msi descriptor data |
| 34 | * @msi_priv_data: Pointer to platform private data |
| 35 | * @msi_index: The index of the MSI descriptor for multi MSI |
| 36 | */ |
| 37 | struct platform_msi_desc { |
| 38 | struct platform_msi_priv_data *msi_priv_data; |
| 39 | u16 msi_index; |
| 40 | }; |
| 41 | |
| 42 | /** |
| 43 | * fsl_mc_msi_desc - FSL-MC device specific msi descriptor data |
| 44 | * @msi_index: The index of the MSI descriptor |
| 45 | */ |
| 46 | struct fsl_mc_msi_desc { |
| 47 | u16 msi_index; |
| 48 | }; |
| 49 | |
| 50 | /** |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 51 | * ti_sci_inta_msi_desc - TISCI based INTA specific msi descriptor data |
| 52 | * @dev_index: TISCI device index |
| 53 | */ |
| 54 | struct ti_sci_inta_msi_desc { |
| 55 | u16 dev_index; |
| 56 | }; |
| 57 | |
| 58 | /** |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 59 | * struct msi_desc - Descriptor structure for MSI based interrupts |
| 60 | * @list: List head for management |
| 61 | * @irq: The base interrupt number |
| 62 | * @nvec_used: The number of vectors used |
| 63 | * @dev: Pointer to the device which uses this descriptor |
| 64 | * @msg: The last set MSI message cached for reuse |
| 65 | * @affinity: Optional pointer to a cpu affinity mask for this descriptor |
| 66 | * |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 67 | * @write_msi_msg: Callback that may be called when the MSI message |
| 68 | * address or data changes |
| 69 | * @write_msi_msg_data: Data parameter for the callback. |
| 70 | * |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 71 | * @masked: [PCI MSI/X] Mask bits |
| 72 | * @is_msix: [PCI MSI/X] True if MSI-X |
| 73 | * @multiple: [PCI MSI/X] log2 num of messages allocated |
| 74 | * @multi_cap: [PCI MSI/X] log2 num of messages supported |
| 75 | * @maskbit: [PCI MSI/X] Mask-Pending bit supported? |
| 76 | * @is_64: [PCI MSI/X] Address size: 0=32bit 1=64bit |
| 77 | * @entry_nr: [PCI MSI/X] Entry which is described by this descriptor |
| 78 | * @default_irq:[PCI MSI/X] The default pre-assigned non-MSI irq |
| 79 | * @mask_pos: [PCI MSI] Mask register position |
| 80 | * @mask_base: [PCI MSI-X] Mask register base address |
| 81 | * @platform: [platform] Platform device specific msi descriptor data |
| 82 | * @fsl_mc: [fsl-mc] FSL MC device specific msi descriptor data |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 83 | * @inta: [INTA] TISCI based INTA specific msi descriptor data |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 84 | */ |
| 85 | struct msi_desc { |
| 86 | /* Shared device/bus type independent data */ |
| 87 | struct list_head list; |
| 88 | unsigned int irq; |
| 89 | unsigned int nvec_used; |
| 90 | struct device *dev; |
| 91 | struct msi_msg msg; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 92 | struct irq_affinity_desc *affinity; |
| 93 | #ifdef CONFIG_IRQ_MSI_IOMMU |
| 94 | const void *iommu_cookie; |
| 95 | #endif |
| 96 | |
| 97 | void (*write_msi_msg)(struct msi_desc *entry, void *data); |
| 98 | void *write_msi_msg_data; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 99 | |
| 100 | union { |
| 101 | /* PCI MSI/X specific data */ |
| 102 | struct { |
| 103 | u32 masked; |
| 104 | struct { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 105 | u8 is_msix : 1; |
| 106 | u8 multiple : 3; |
| 107 | u8 multi_cap : 3; |
| 108 | u8 maskbit : 1; |
| 109 | u8 is_64 : 1; |
| 110 | u8 is_virtual : 1; |
| 111 | u16 entry_nr; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 112 | unsigned default_irq; |
| 113 | } msi_attrib; |
| 114 | union { |
| 115 | u8 mask_pos; |
| 116 | void __iomem *mask_base; |
| 117 | }; |
| 118 | }; |
| 119 | |
| 120 | /* |
| 121 | * Non PCI variants add their data structure here. New |
| 122 | * entries need to use a named structure. We want |
| 123 | * proper name spaces for this. The PCI part is |
| 124 | * anonymous for now as it would require an immediate |
| 125 | * tree wide cleanup. |
| 126 | */ |
| 127 | struct platform_msi_desc platform; |
| 128 | struct fsl_mc_msi_desc fsl_mc; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 129 | struct ti_sci_inta_msi_desc inta; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 130 | }; |
| 131 | }; |
| 132 | |
| 133 | /* Helpers to hide struct msi_desc implementation details */ |
| 134 | #define msi_desc_to_dev(desc) ((desc)->dev) |
| 135 | #define dev_to_msi_list(dev) (&(dev)->msi_list) |
| 136 | #define first_msi_entry(dev) \ |
| 137 | list_first_entry(dev_to_msi_list((dev)), struct msi_desc, list) |
| 138 | #define for_each_msi_entry(desc, dev) \ |
| 139 | list_for_each_entry((desc), dev_to_msi_list((dev)), list) |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 140 | #define for_each_msi_entry_safe(desc, tmp, dev) \ |
| 141 | list_for_each_entry_safe((desc), (tmp), dev_to_msi_list((dev)), list) |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame] | 142 | #define for_each_msi_vector(desc, __irq, dev) \ |
| 143 | for_each_msi_entry((desc), (dev)) \ |
| 144 | if ((desc)->irq) \ |
| 145 | for (__irq = (desc)->irq; \ |
| 146 | __irq < ((desc)->irq + (desc)->nvec_used); \ |
| 147 | __irq++) |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 148 | |
| 149 | #ifdef CONFIG_IRQ_MSI_IOMMU |
| 150 | static inline const void *msi_desc_get_iommu_cookie(struct msi_desc *desc) |
| 151 | { |
| 152 | return desc->iommu_cookie; |
| 153 | } |
| 154 | |
| 155 | static inline void msi_desc_set_iommu_cookie(struct msi_desc *desc, |
| 156 | const void *iommu_cookie) |
| 157 | { |
| 158 | desc->iommu_cookie = iommu_cookie; |
| 159 | } |
| 160 | #else |
| 161 | static inline const void *msi_desc_get_iommu_cookie(struct msi_desc *desc) |
| 162 | { |
| 163 | return NULL; |
| 164 | } |
| 165 | |
| 166 | static inline void msi_desc_set_iommu_cookie(struct msi_desc *desc, |
| 167 | const void *iommu_cookie) |
| 168 | { |
| 169 | } |
| 170 | #endif |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 171 | |
| 172 | #ifdef CONFIG_PCI_MSI |
| 173 | #define first_pci_msi_entry(pdev) first_msi_entry(&(pdev)->dev) |
| 174 | #define for_each_pci_msi_entry(desc, pdev) \ |
| 175 | for_each_msi_entry((desc), &(pdev)->dev) |
| 176 | |
| 177 | struct pci_dev *msi_desc_to_pci_dev(struct msi_desc *desc); |
| 178 | void *msi_desc_to_pci_sysdata(struct msi_desc *desc); |
| 179 | void pci_write_msi_msg(unsigned int irq, struct msi_msg *msg); |
| 180 | #else /* CONFIG_PCI_MSI */ |
| 181 | static inline void *msi_desc_to_pci_sysdata(struct msi_desc *desc) |
| 182 | { |
| 183 | return NULL; |
| 184 | } |
| 185 | static inline void pci_write_msi_msg(unsigned int irq, struct msi_msg *msg) |
| 186 | { |
| 187 | } |
| 188 | #endif /* CONFIG_PCI_MSI */ |
| 189 | |
| 190 | struct msi_desc *alloc_msi_entry(struct device *dev, int nvec, |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 191 | const struct irq_affinity_desc *affinity); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 192 | void free_msi_entry(struct msi_desc *entry); |
| 193 | void __pci_read_msi_msg(struct msi_desc *entry, struct msi_msg *msg); |
| 194 | void __pci_write_msi_msg(struct msi_desc *entry, struct msi_msg *msg); |
| 195 | |
| 196 | u32 __pci_msix_desc_mask_irq(struct msi_desc *desc, u32 flag); |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame] | 197 | void __pci_msi_desc_mask_irq(struct msi_desc *desc, u32 mask, u32 flag); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 198 | void pci_msi_mask_irq(struct irq_data *data); |
| 199 | void pci_msi_unmask_irq(struct irq_data *data); |
| 200 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 201 | /* |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 202 | * The arch hooks to setup up msi irqs. Default functions are implemented |
| 203 | * as weak symbols so that they /can/ be overriden by architecture specific |
| 204 | * code if needed. These hooks must be enabled by the architecture or by |
| 205 | * drivers which depend on them via msi_controller based MSI handling. |
| 206 | * |
| 207 | * If CONFIG_PCI_MSI_ARCH_FALLBACKS is not selected they are replaced by |
| 208 | * stubs with warnings. |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 209 | */ |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 210 | #ifdef CONFIG_PCI_MSI_ARCH_FALLBACKS |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 211 | int arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *desc); |
| 212 | void arch_teardown_msi_irq(unsigned int irq); |
| 213 | int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type); |
| 214 | void arch_teardown_msi_irqs(struct pci_dev *dev); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 215 | void default_teardown_msi_irqs(struct pci_dev *dev); |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 216 | #else |
| 217 | static inline int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) |
| 218 | { |
| 219 | WARN_ON_ONCE(1); |
| 220 | return -ENODEV; |
| 221 | } |
| 222 | |
| 223 | static inline void arch_teardown_msi_irqs(struct pci_dev *dev) |
| 224 | { |
| 225 | WARN_ON_ONCE(1); |
| 226 | } |
| 227 | #endif |
| 228 | |
| 229 | /* |
| 230 | * The restore hooks are still available as they are useful even |
| 231 | * for fully irq domain based setups. Courtesy to XEN/X86. |
| 232 | */ |
| 233 | void arch_restore_msi_irqs(struct pci_dev *dev); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 234 | void default_restore_msi_irqs(struct pci_dev *dev); |
| 235 | |
| 236 | struct msi_controller { |
| 237 | struct module *owner; |
| 238 | struct device *dev; |
| 239 | struct device_node *of_node; |
| 240 | struct list_head list; |
| 241 | |
| 242 | int (*setup_irq)(struct msi_controller *chip, struct pci_dev *dev, |
| 243 | struct msi_desc *desc); |
| 244 | int (*setup_irqs)(struct msi_controller *chip, struct pci_dev *dev, |
| 245 | int nvec, int type); |
| 246 | void (*teardown_irq)(struct msi_controller *chip, unsigned int irq); |
| 247 | }; |
| 248 | |
| 249 | #ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN |
| 250 | |
| 251 | #include <linux/irqhandler.h> |
| 252 | #include <asm/msi.h> |
| 253 | |
| 254 | struct irq_domain; |
| 255 | struct irq_domain_ops; |
| 256 | struct irq_chip; |
| 257 | struct device_node; |
| 258 | struct fwnode_handle; |
| 259 | struct msi_domain_info; |
| 260 | |
| 261 | /** |
| 262 | * struct msi_domain_ops - MSI interrupt domain callbacks |
| 263 | * @get_hwirq: Retrieve the resulting hw irq number |
| 264 | * @msi_init: Domain specific init function for MSI interrupts |
| 265 | * @msi_free: Domain specific function to free a MSI interrupts |
| 266 | * @msi_check: Callback for verification of the domain/info/dev data |
| 267 | * @msi_prepare: Prepare the allocation of the interrupts in the domain |
| 268 | * @msi_finish: Optional callback to finalize the allocation |
| 269 | * @set_desc: Set the msi descriptor for an interrupt |
| 270 | * @handle_error: Optional error handler if the allocation fails |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 271 | * @domain_alloc_irqs: Optional function to override the default allocation |
| 272 | * function. |
| 273 | * @domain_free_irqs: Optional function to override the default free |
| 274 | * function. |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 275 | * |
| 276 | * @get_hwirq, @msi_init and @msi_free are callbacks used by |
| 277 | * msi_create_irq_domain() and related interfaces |
| 278 | * |
| 279 | * @msi_check, @msi_prepare, @msi_finish, @set_desc and @handle_error |
| 280 | * are callbacks used by msi_domain_alloc_irqs() and related |
| 281 | * interfaces which are based on msi_desc. |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 282 | * |
| 283 | * @domain_alloc_irqs, @domain_free_irqs can be used to override the |
| 284 | * default allocation/free functions (__msi_domain_alloc/free_irqs). This |
| 285 | * is initially for a wrapper around XENs seperate MSI universe which can't |
| 286 | * be wrapped into the regular irq domains concepts by mere mortals. This |
| 287 | * allows to universally use msi_domain_alloc/free_irqs without having to |
| 288 | * special case XEN all over the place. |
| 289 | * |
| 290 | * Contrary to other operations @domain_alloc_irqs and @domain_free_irqs |
| 291 | * are set to the default implementation if NULL and even when |
| 292 | * MSI_FLAG_USE_DEF_DOM_OPS is not set to avoid breaking existing users and |
| 293 | * because these callbacks are obviously mandatory. |
| 294 | * |
| 295 | * This is NOT meant to be abused, but it can be useful to build wrappers |
| 296 | * for specialized MSI irq domains which need extra work before and after |
| 297 | * calling __msi_domain_alloc_irqs()/__msi_domain_free_irqs(). |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 298 | */ |
| 299 | struct msi_domain_ops { |
| 300 | irq_hw_number_t (*get_hwirq)(struct msi_domain_info *info, |
| 301 | msi_alloc_info_t *arg); |
| 302 | int (*msi_init)(struct irq_domain *domain, |
| 303 | struct msi_domain_info *info, |
| 304 | unsigned int virq, irq_hw_number_t hwirq, |
| 305 | msi_alloc_info_t *arg); |
| 306 | void (*msi_free)(struct irq_domain *domain, |
| 307 | struct msi_domain_info *info, |
| 308 | unsigned int virq); |
| 309 | int (*msi_check)(struct irq_domain *domain, |
| 310 | struct msi_domain_info *info, |
| 311 | struct device *dev); |
| 312 | int (*msi_prepare)(struct irq_domain *domain, |
| 313 | struct device *dev, int nvec, |
| 314 | msi_alloc_info_t *arg); |
| 315 | void (*msi_finish)(msi_alloc_info_t *arg, int retval); |
| 316 | void (*set_desc)(msi_alloc_info_t *arg, |
| 317 | struct msi_desc *desc); |
| 318 | int (*handle_error)(struct irq_domain *domain, |
| 319 | struct msi_desc *desc, int error); |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 320 | int (*domain_alloc_irqs)(struct irq_domain *domain, |
| 321 | struct device *dev, int nvec); |
| 322 | void (*domain_free_irqs)(struct irq_domain *domain, |
| 323 | struct device *dev); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 324 | }; |
| 325 | |
| 326 | /** |
| 327 | * struct msi_domain_info - MSI interrupt domain data |
| 328 | * @flags: Flags to decribe features and capabilities |
| 329 | * @ops: The callback data structure |
| 330 | * @chip: Optional: associated interrupt chip |
| 331 | * @chip_data: Optional: associated interrupt chip data |
| 332 | * @handler: Optional: associated interrupt flow handler |
| 333 | * @handler_data: Optional: associated interrupt flow handler data |
| 334 | * @handler_name: Optional: associated interrupt flow handler name |
| 335 | * @data: Optional: domain specific data |
| 336 | */ |
| 337 | struct msi_domain_info { |
| 338 | u32 flags; |
| 339 | struct msi_domain_ops *ops; |
| 340 | struct irq_chip *chip; |
| 341 | void *chip_data; |
| 342 | irq_flow_handler_t handler; |
| 343 | void *handler_data; |
| 344 | const char *handler_name; |
| 345 | void *data; |
| 346 | }; |
| 347 | |
| 348 | /* Flags for msi_domain_info */ |
| 349 | enum { |
| 350 | /* |
| 351 | * Init non implemented ops callbacks with default MSI domain |
| 352 | * callbacks. |
| 353 | */ |
| 354 | MSI_FLAG_USE_DEF_DOM_OPS = (1 << 0), |
| 355 | /* |
| 356 | * Init non implemented chip callbacks with default MSI chip |
| 357 | * callbacks. |
| 358 | */ |
| 359 | MSI_FLAG_USE_DEF_CHIP_OPS = (1 << 1), |
| 360 | /* Support multiple PCI MSI interrupts */ |
| 361 | MSI_FLAG_MULTI_PCI_MSI = (1 << 2), |
| 362 | /* Support PCI MSIX interrupts */ |
| 363 | MSI_FLAG_PCI_MSIX = (1 << 3), |
| 364 | /* Needs early activate, required for PCI */ |
| 365 | MSI_FLAG_ACTIVATE_EARLY = (1 << 4), |
| 366 | /* |
| 367 | * Must reactivate when irq is started even when |
| 368 | * MSI_FLAG_ACTIVATE_EARLY has been set. |
| 369 | */ |
| 370 | MSI_FLAG_MUST_REACTIVATE = (1 << 5), |
| 371 | /* Is level-triggered capable, using two messages */ |
| 372 | MSI_FLAG_LEVEL_CAPABLE = (1 << 6), |
| 373 | }; |
| 374 | |
| 375 | int msi_domain_set_affinity(struct irq_data *data, const struct cpumask *mask, |
| 376 | bool force); |
| 377 | |
| 378 | struct irq_domain *msi_create_irq_domain(struct fwnode_handle *fwnode, |
| 379 | struct msi_domain_info *info, |
| 380 | struct irq_domain *parent); |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 381 | int __msi_domain_alloc_irqs(struct irq_domain *domain, struct device *dev, |
| 382 | int nvec); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 383 | int msi_domain_alloc_irqs(struct irq_domain *domain, struct device *dev, |
| 384 | int nvec); |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 385 | void __msi_domain_free_irqs(struct irq_domain *domain, struct device *dev); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 386 | void msi_domain_free_irqs(struct irq_domain *domain, struct device *dev); |
| 387 | struct msi_domain_info *msi_get_domain_info(struct irq_domain *domain); |
| 388 | |
| 389 | struct irq_domain *platform_msi_create_irq_domain(struct fwnode_handle *fwnode, |
| 390 | struct msi_domain_info *info, |
| 391 | struct irq_domain *parent); |
| 392 | int platform_msi_domain_alloc_irqs(struct device *dev, unsigned int nvec, |
| 393 | irq_write_msi_msg_t write_msi_msg); |
| 394 | void platform_msi_domain_free_irqs(struct device *dev); |
| 395 | |
| 396 | /* When an MSI domain is used as an intermediate domain */ |
| 397 | int msi_domain_prepare_irqs(struct irq_domain *domain, struct device *dev, |
| 398 | int nvec, msi_alloc_info_t *args); |
| 399 | int msi_domain_populate_irqs(struct irq_domain *domain, struct device *dev, |
| 400 | int virq, int nvec, msi_alloc_info_t *args); |
| 401 | struct irq_domain * |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 402 | __platform_msi_create_device_domain(struct device *dev, |
| 403 | unsigned int nvec, |
| 404 | bool is_tree, |
| 405 | irq_write_msi_msg_t write_msi_msg, |
| 406 | const struct irq_domain_ops *ops, |
| 407 | void *host_data); |
| 408 | |
| 409 | #define platform_msi_create_device_domain(dev, nvec, write, ops, data) \ |
| 410 | __platform_msi_create_device_domain(dev, nvec, false, write, ops, data) |
| 411 | #define platform_msi_create_device_tree_domain(dev, nvec, write, ops, data) \ |
| 412 | __platform_msi_create_device_domain(dev, nvec, true, write, ops, data) |
| 413 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 414 | int platform_msi_domain_alloc(struct irq_domain *domain, unsigned int virq, |
| 415 | unsigned int nr_irqs); |
| 416 | void platform_msi_domain_free(struct irq_domain *domain, unsigned int virq, |
| 417 | unsigned int nvec); |
| 418 | void *platform_msi_get_host_data(struct irq_domain *domain); |
| 419 | #endif /* CONFIG_GENERIC_MSI_IRQ_DOMAIN */ |
| 420 | |
| 421 | #ifdef CONFIG_PCI_MSI_IRQ_DOMAIN |
| 422 | void pci_msi_domain_write_msg(struct irq_data *irq_data, struct msi_msg *msg); |
| 423 | struct irq_domain *pci_msi_create_irq_domain(struct fwnode_handle *fwnode, |
| 424 | struct msi_domain_info *info, |
| 425 | struct irq_domain *parent); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 426 | int pci_msi_domain_check_cap(struct irq_domain *domain, |
| 427 | struct msi_domain_info *info, struct device *dev); |
| 428 | u32 pci_msi_domain_get_msi_rid(struct irq_domain *domain, struct pci_dev *pdev); |
| 429 | struct irq_domain *pci_msi_get_device_domain(struct pci_dev *pdev); |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 430 | bool pci_dev_has_special_msi_domain(struct pci_dev *pdev); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 431 | #else |
| 432 | static inline struct irq_domain *pci_msi_get_device_domain(struct pci_dev *pdev) |
| 433 | { |
| 434 | return NULL; |
| 435 | } |
| 436 | #endif /* CONFIG_PCI_MSI_IRQ_DOMAIN */ |
| 437 | |
| 438 | #endif /* LINUX_MSI_H */ |