blob: 758e32f0d443474c9de3da307f21a93c324b8e63 [file] [log] [blame]
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef LINUX_MSI_H
3#define LINUX_MSI_H
4
5#include <linux/kobject.h>
6#include <linux/list.h>
7
8struct msi_msg {
9 u32 address_lo; /* low 32 bits of msi message address */
10 u32 address_hi; /* high 32 bits of msi message address */
11 u32 data; /* 16 bits of msi message data */
12};
13
14extern int pci_msi_ignore_mask;
15/* Helper functions */
16struct irq_data;
17struct msi_desc;
18struct pci_dev;
19struct platform_msi_priv_data;
20void __get_cached_msi_msg(struct msi_desc *entry, struct msi_msg *msg);
21#ifdef CONFIG_GENERIC_MSI_IRQ
22void get_cached_msi_msg(unsigned int irq, struct msi_msg *msg);
23#else
24static inline void get_cached_msi_msg(unsigned int irq, struct msi_msg *msg)
25{
26}
27#endif
28
29typedef void (*irq_write_msi_msg_t)(struct msi_desc *desc,
30 struct msi_msg *msg);
31
32/**
33 * platform_msi_desc - Platform device specific msi descriptor data
34 * @msi_priv_data: Pointer to platform private data
35 * @msi_index: The index of the MSI descriptor for multi MSI
36 */
37struct platform_msi_desc {
38 struct platform_msi_priv_data *msi_priv_data;
39 u16 msi_index;
40};
41
42/**
43 * fsl_mc_msi_desc - FSL-MC device specific msi descriptor data
44 * @msi_index: The index of the MSI descriptor
45 */
46struct fsl_mc_msi_desc {
47 u16 msi_index;
48};
49
50/**
David Brazdil0f672f62019-12-10 10:32:29 +000051 * ti_sci_inta_msi_desc - TISCI based INTA specific msi descriptor data
52 * @dev_index: TISCI device index
53 */
54struct ti_sci_inta_msi_desc {
55 u16 dev_index;
56};
57
58/**
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000059 * struct msi_desc - Descriptor structure for MSI based interrupts
60 * @list: List head for management
61 * @irq: The base interrupt number
62 * @nvec_used: The number of vectors used
63 * @dev: Pointer to the device which uses this descriptor
64 * @msg: The last set MSI message cached for reuse
65 * @affinity: Optional pointer to a cpu affinity mask for this descriptor
66 *
David Brazdil0f672f62019-12-10 10:32:29 +000067 * @write_msi_msg: Callback that may be called when the MSI message
68 * address or data changes
69 * @write_msi_msg_data: Data parameter for the callback.
70 *
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000071 * @masked: [PCI MSI/X] Mask bits
72 * @is_msix: [PCI MSI/X] True if MSI-X
73 * @multiple: [PCI MSI/X] log2 num of messages allocated
74 * @multi_cap: [PCI MSI/X] log2 num of messages supported
75 * @maskbit: [PCI MSI/X] Mask-Pending bit supported?
76 * @is_64: [PCI MSI/X] Address size: 0=32bit 1=64bit
77 * @entry_nr: [PCI MSI/X] Entry which is described by this descriptor
78 * @default_irq:[PCI MSI/X] The default pre-assigned non-MSI irq
79 * @mask_pos: [PCI MSI] Mask register position
80 * @mask_base: [PCI MSI-X] Mask register base address
81 * @platform: [platform] Platform device specific msi descriptor data
82 * @fsl_mc: [fsl-mc] FSL MC device specific msi descriptor data
David Brazdil0f672f62019-12-10 10:32:29 +000083 * @inta: [INTA] TISCI based INTA specific msi descriptor data
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000084 */
85struct msi_desc {
86 /* Shared device/bus type independent data */
87 struct list_head list;
88 unsigned int irq;
89 unsigned int nvec_used;
90 struct device *dev;
91 struct msi_msg msg;
David Brazdil0f672f62019-12-10 10:32:29 +000092 struct irq_affinity_desc *affinity;
93#ifdef CONFIG_IRQ_MSI_IOMMU
94 const void *iommu_cookie;
95#endif
96
97 void (*write_msi_msg)(struct msi_desc *entry, void *data);
98 void *write_msi_msg_data;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000099
100 union {
101 /* PCI MSI/X specific data */
102 struct {
103 u32 masked;
104 struct {
David Brazdil0f672f62019-12-10 10:32:29 +0000105 u8 is_msix : 1;
106 u8 multiple : 3;
107 u8 multi_cap : 3;
108 u8 maskbit : 1;
109 u8 is_64 : 1;
110 u8 is_virtual : 1;
111 u16 entry_nr;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000112 unsigned default_irq;
113 } msi_attrib;
114 union {
115 u8 mask_pos;
116 void __iomem *mask_base;
117 };
118 };
119
120 /*
121 * Non PCI variants add their data structure here. New
122 * entries need to use a named structure. We want
123 * proper name spaces for this. The PCI part is
124 * anonymous for now as it would require an immediate
125 * tree wide cleanup.
126 */
127 struct platform_msi_desc platform;
128 struct fsl_mc_msi_desc fsl_mc;
David Brazdil0f672f62019-12-10 10:32:29 +0000129 struct ti_sci_inta_msi_desc inta;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000130 };
131};
132
133/* Helpers to hide struct msi_desc implementation details */
134#define msi_desc_to_dev(desc) ((desc)->dev)
135#define dev_to_msi_list(dev) (&(dev)->msi_list)
136#define first_msi_entry(dev) \
137 list_first_entry(dev_to_msi_list((dev)), struct msi_desc, list)
138#define for_each_msi_entry(desc, dev) \
139 list_for_each_entry((desc), dev_to_msi_list((dev)), list)
David Brazdil0f672f62019-12-10 10:32:29 +0000140#define for_each_msi_entry_safe(desc, tmp, dev) \
141 list_for_each_entry_safe((desc), (tmp), dev_to_msi_list((dev)), list)
Olivier Deprez0e641232021-09-23 10:07:05 +0200142#define for_each_msi_vector(desc, __irq, dev) \
143 for_each_msi_entry((desc), (dev)) \
144 if ((desc)->irq) \
145 for (__irq = (desc)->irq; \
146 __irq < ((desc)->irq + (desc)->nvec_used); \
147 __irq++)
David Brazdil0f672f62019-12-10 10:32:29 +0000148
149#ifdef CONFIG_IRQ_MSI_IOMMU
150static inline const void *msi_desc_get_iommu_cookie(struct msi_desc *desc)
151{
152 return desc->iommu_cookie;
153}
154
155static inline void msi_desc_set_iommu_cookie(struct msi_desc *desc,
156 const void *iommu_cookie)
157{
158 desc->iommu_cookie = iommu_cookie;
159}
160#else
161static inline const void *msi_desc_get_iommu_cookie(struct msi_desc *desc)
162{
163 return NULL;
164}
165
166static inline void msi_desc_set_iommu_cookie(struct msi_desc *desc,
167 const void *iommu_cookie)
168{
169}
170#endif
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000171
172#ifdef CONFIG_PCI_MSI
173#define first_pci_msi_entry(pdev) first_msi_entry(&(pdev)->dev)
174#define for_each_pci_msi_entry(desc, pdev) \
175 for_each_msi_entry((desc), &(pdev)->dev)
176
177struct pci_dev *msi_desc_to_pci_dev(struct msi_desc *desc);
178void *msi_desc_to_pci_sysdata(struct msi_desc *desc);
179void pci_write_msi_msg(unsigned int irq, struct msi_msg *msg);
180#else /* CONFIG_PCI_MSI */
181static inline void *msi_desc_to_pci_sysdata(struct msi_desc *desc)
182{
183 return NULL;
184}
185static inline void pci_write_msi_msg(unsigned int irq, struct msi_msg *msg)
186{
187}
188#endif /* CONFIG_PCI_MSI */
189
190struct msi_desc *alloc_msi_entry(struct device *dev, int nvec,
David Brazdil0f672f62019-12-10 10:32:29 +0000191 const struct irq_affinity_desc *affinity);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000192void free_msi_entry(struct msi_desc *entry);
193void __pci_read_msi_msg(struct msi_desc *entry, struct msi_msg *msg);
194void __pci_write_msi_msg(struct msi_desc *entry, struct msi_msg *msg);
195
196u32 __pci_msix_desc_mask_irq(struct msi_desc *desc, u32 flag);
Olivier Deprez0e641232021-09-23 10:07:05 +0200197void __pci_msi_desc_mask_irq(struct msi_desc *desc, u32 mask, u32 flag);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000198void pci_msi_mask_irq(struct irq_data *data);
199void pci_msi_unmask_irq(struct irq_data *data);
200
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000201/*
202 * The arch hooks to setup up msi irqs. Those functions are
203 * implemented as weak symbols so that they /can/ be overriden by
204 * architecture specific code if needed.
205 */
206int arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *desc);
207void arch_teardown_msi_irq(unsigned int irq);
208int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type);
209void arch_teardown_msi_irqs(struct pci_dev *dev);
210void arch_restore_msi_irqs(struct pci_dev *dev);
211
212void default_teardown_msi_irqs(struct pci_dev *dev);
213void default_restore_msi_irqs(struct pci_dev *dev);
214
215struct msi_controller {
216 struct module *owner;
217 struct device *dev;
218 struct device_node *of_node;
219 struct list_head list;
220
221 int (*setup_irq)(struct msi_controller *chip, struct pci_dev *dev,
222 struct msi_desc *desc);
223 int (*setup_irqs)(struct msi_controller *chip, struct pci_dev *dev,
224 int nvec, int type);
225 void (*teardown_irq)(struct msi_controller *chip, unsigned int irq);
226};
227
228#ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
229
230#include <linux/irqhandler.h>
231#include <asm/msi.h>
232
233struct irq_domain;
234struct irq_domain_ops;
235struct irq_chip;
236struct device_node;
237struct fwnode_handle;
238struct msi_domain_info;
239
240/**
241 * struct msi_domain_ops - MSI interrupt domain callbacks
242 * @get_hwirq: Retrieve the resulting hw irq number
243 * @msi_init: Domain specific init function for MSI interrupts
244 * @msi_free: Domain specific function to free a MSI interrupts
245 * @msi_check: Callback for verification of the domain/info/dev data
246 * @msi_prepare: Prepare the allocation of the interrupts in the domain
247 * @msi_finish: Optional callback to finalize the allocation
248 * @set_desc: Set the msi descriptor for an interrupt
249 * @handle_error: Optional error handler if the allocation fails
250 *
251 * @get_hwirq, @msi_init and @msi_free are callbacks used by
252 * msi_create_irq_domain() and related interfaces
253 *
254 * @msi_check, @msi_prepare, @msi_finish, @set_desc and @handle_error
255 * are callbacks used by msi_domain_alloc_irqs() and related
256 * interfaces which are based on msi_desc.
257 */
258struct msi_domain_ops {
259 irq_hw_number_t (*get_hwirq)(struct msi_domain_info *info,
260 msi_alloc_info_t *arg);
261 int (*msi_init)(struct irq_domain *domain,
262 struct msi_domain_info *info,
263 unsigned int virq, irq_hw_number_t hwirq,
264 msi_alloc_info_t *arg);
265 void (*msi_free)(struct irq_domain *domain,
266 struct msi_domain_info *info,
267 unsigned int virq);
268 int (*msi_check)(struct irq_domain *domain,
269 struct msi_domain_info *info,
270 struct device *dev);
271 int (*msi_prepare)(struct irq_domain *domain,
272 struct device *dev, int nvec,
273 msi_alloc_info_t *arg);
274 void (*msi_finish)(msi_alloc_info_t *arg, int retval);
275 void (*set_desc)(msi_alloc_info_t *arg,
276 struct msi_desc *desc);
277 int (*handle_error)(struct irq_domain *domain,
278 struct msi_desc *desc, int error);
279};
280
281/**
282 * struct msi_domain_info - MSI interrupt domain data
283 * @flags: Flags to decribe features and capabilities
284 * @ops: The callback data structure
285 * @chip: Optional: associated interrupt chip
286 * @chip_data: Optional: associated interrupt chip data
287 * @handler: Optional: associated interrupt flow handler
288 * @handler_data: Optional: associated interrupt flow handler data
289 * @handler_name: Optional: associated interrupt flow handler name
290 * @data: Optional: domain specific data
291 */
292struct msi_domain_info {
293 u32 flags;
294 struct msi_domain_ops *ops;
295 struct irq_chip *chip;
296 void *chip_data;
297 irq_flow_handler_t handler;
298 void *handler_data;
299 const char *handler_name;
300 void *data;
301};
302
303/* Flags for msi_domain_info */
304enum {
305 /*
306 * Init non implemented ops callbacks with default MSI domain
307 * callbacks.
308 */
309 MSI_FLAG_USE_DEF_DOM_OPS = (1 << 0),
310 /*
311 * Init non implemented chip callbacks with default MSI chip
312 * callbacks.
313 */
314 MSI_FLAG_USE_DEF_CHIP_OPS = (1 << 1),
315 /* Support multiple PCI MSI interrupts */
316 MSI_FLAG_MULTI_PCI_MSI = (1 << 2),
317 /* Support PCI MSIX interrupts */
318 MSI_FLAG_PCI_MSIX = (1 << 3),
319 /* Needs early activate, required for PCI */
320 MSI_FLAG_ACTIVATE_EARLY = (1 << 4),
321 /*
322 * Must reactivate when irq is started even when
323 * MSI_FLAG_ACTIVATE_EARLY has been set.
324 */
325 MSI_FLAG_MUST_REACTIVATE = (1 << 5),
326 /* Is level-triggered capable, using two messages */
327 MSI_FLAG_LEVEL_CAPABLE = (1 << 6),
328};
329
330int msi_domain_set_affinity(struct irq_data *data, const struct cpumask *mask,
331 bool force);
332
333struct irq_domain *msi_create_irq_domain(struct fwnode_handle *fwnode,
334 struct msi_domain_info *info,
335 struct irq_domain *parent);
336int msi_domain_alloc_irqs(struct irq_domain *domain, struct device *dev,
337 int nvec);
338void msi_domain_free_irqs(struct irq_domain *domain, struct device *dev);
339struct msi_domain_info *msi_get_domain_info(struct irq_domain *domain);
340
341struct irq_domain *platform_msi_create_irq_domain(struct fwnode_handle *fwnode,
342 struct msi_domain_info *info,
343 struct irq_domain *parent);
344int platform_msi_domain_alloc_irqs(struct device *dev, unsigned int nvec,
345 irq_write_msi_msg_t write_msi_msg);
346void platform_msi_domain_free_irqs(struct device *dev);
347
348/* When an MSI domain is used as an intermediate domain */
349int msi_domain_prepare_irqs(struct irq_domain *domain, struct device *dev,
350 int nvec, msi_alloc_info_t *args);
351int msi_domain_populate_irqs(struct irq_domain *domain, struct device *dev,
352 int virq, int nvec, msi_alloc_info_t *args);
353struct irq_domain *
David Brazdil0f672f62019-12-10 10:32:29 +0000354__platform_msi_create_device_domain(struct device *dev,
355 unsigned int nvec,
356 bool is_tree,
357 irq_write_msi_msg_t write_msi_msg,
358 const struct irq_domain_ops *ops,
359 void *host_data);
360
361#define platform_msi_create_device_domain(dev, nvec, write, ops, data) \
362 __platform_msi_create_device_domain(dev, nvec, false, write, ops, data)
363#define platform_msi_create_device_tree_domain(dev, nvec, write, ops, data) \
364 __platform_msi_create_device_domain(dev, nvec, true, write, ops, data)
365
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000366int platform_msi_domain_alloc(struct irq_domain *domain, unsigned int virq,
367 unsigned int nr_irqs);
368void platform_msi_domain_free(struct irq_domain *domain, unsigned int virq,
369 unsigned int nvec);
370void *platform_msi_get_host_data(struct irq_domain *domain);
371#endif /* CONFIG_GENERIC_MSI_IRQ_DOMAIN */
372
373#ifdef CONFIG_PCI_MSI_IRQ_DOMAIN
374void pci_msi_domain_write_msg(struct irq_data *irq_data, struct msi_msg *msg);
375struct irq_domain *pci_msi_create_irq_domain(struct fwnode_handle *fwnode,
376 struct msi_domain_info *info,
377 struct irq_domain *parent);
378irq_hw_number_t pci_msi_domain_calc_hwirq(struct pci_dev *dev,
379 struct msi_desc *desc);
380int pci_msi_domain_check_cap(struct irq_domain *domain,
381 struct msi_domain_info *info, struct device *dev);
382u32 pci_msi_domain_get_msi_rid(struct irq_domain *domain, struct pci_dev *pdev);
383struct irq_domain *pci_msi_get_device_domain(struct pci_dev *pdev);
384#else
385static inline struct irq_domain *pci_msi_get_device_domain(struct pci_dev *pdev)
386{
387 return NULL;
388}
389#endif /* CONFIG_PCI_MSI_IRQ_DOMAIN */
390
391#endif /* LINUX_MSI_H */