blob: 65565820328ab59eb38546f98ca80605099178a5 [file] [log] [blame]
David Brazdil0f672f62019-12-10 10:32:29 +00001/* SPDX-License-Identifier: GPL-2.0-only */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002/*
3 * Copyright (c) 2006, Intel Corporation.
4 *
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005 * Copyright (C) Ashok Raj <ashok.raj@intel.com>
6 * Copyright (C) Shaohua Li <shaohua.li@intel.com>
7 */
8
9#ifndef __DMAR_H__
10#define __DMAR_H__
11
12#include <linux/acpi.h>
13#include <linux/types.h>
14#include <linux/msi.h>
15#include <linux/irqreturn.h>
16#include <linux/rwsem.h>
17#include <linux/rculist.h>
18
19struct acpi_dmar_header;
20
21#ifdef CONFIG_X86
22# define DMAR_UNITS_SUPPORTED MAX_IO_APICS
23#else
24# define DMAR_UNITS_SUPPORTED 64
25#endif
26
27/* DMAR Flags */
28#define DMAR_INTR_REMAP 0x1
29#define DMAR_X2APIC_OPT_OUT 0x2
David Brazdil0f672f62019-12-10 10:32:29 +000030#define DMAR_PLATFORM_OPT_IN 0x4
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000031
32struct intel_iommu;
33
34struct dmar_dev_scope {
35 struct device __rcu *dev;
36 u8 bus;
37 u8 devfn;
38};
39
40#ifdef CONFIG_DMAR_TABLE
41extern struct acpi_table_header *dmar_tbl;
42struct dmar_drhd_unit {
43 struct list_head list; /* list of drhd units */
44 struct acpi_dmar_header *hdr; /* ACPI header */
45 u64 reg_base_addr; /* register base address*/
46 struct dmar_dev_scope *devices;/* target device array */
47 int devices_cnt; /* target device count */
48 u16 segment; /* PCI domain */
49 u8 ignored:1; /* ignore drhd */
50 u8 include_all:1;
Olivier Deprez157378f2022-04-04 15:47:50 +020051 u8 gfx_dedicated:1; /* graphic dedicated */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000052 struct intel_iommu *iommu;
53};
54
55struct dmar_pci_path {
56 u8 bus;
57 u8 device;
58 u8 function;
59};
60
61struct dmar_pci_notify_info {
62 struct pci_dev *dev;
63 unsigned long event;
64 int bus;
65 u16 seg;
66 u16 level;
67 struct dmar_pci_path path[];
68} __attribute__((packed));
69
70extern struct rw_semaphore dmar_global_lock;
71extern struct list_head dmar_drhd_units;
72
Olivier Deprez0e641232021-09-23 10:07:05 +020073#define for_each_drhd_unit(drhd) \
74 list_for_each_entry_rcu(drhd, &dmar_drhd_units, list, \
75 dmar_rcu_check())
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000076
77#define for_each_active_drhd_unit(drhd) \
Olivier Deprez0e641232021-09-23 10:07:05 +020078 list_for_each_entry_rcu(drhd, &dmar_drhd_units, list, \
79 dmar_rcu_check()) \
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000080 if (drhd->ignored) {} else
81
82#define for_each_active_iommu(i, drhd) \
Olivier Deprez0e641232021-09-23 10:07:05 +020083 list_for_each_entry_rcu(drhd, &dmar_drhd_units, list, \
84 dmar_rcu_check()) \
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000085 if (i=drhd->iommu, drhd->ignored) {} else
86
87#define for_each_iommu(i, drhd) \
Olivier Deprez0e641232021-09-23 10:07:05 +020088 list_for_each_entry_rcu(drhd, &dmar_drhd_units, list, \
89 dmar_rcu_check()) \
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000090 if (i=drhd->iommu, 0) {} else
91
92static inline bool dmar_rcu_check(void)
93{
94 return rwsem_is_locked(&dmar_global_lock) ||
95 system_state == SYSTEM_BOOTING;
96}
97
98#define dmar_rcu_dereference(p) rcu_dereference_check((p), dmar_rcu_check())
99
David Brazdil0f672f62019-12-10 10:32:29 +0000100#define for_each_dev_scope(devs, cnt, i, tmp) \
101 for ((i) = 0; ((tmp) = (i) < (cnt) ? \
102 dmar_rcu_dereference((devs)[(i)].dev) : NULL, (i) < (cnt)); \
103 (i)++)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000104
David Brazdil0f672f62019-12-10 10:32:29 +0000105#define for_each_active_dev_scope(devs, cnt, i, tmp) \
106 for_each_dev_scope((devs), (cnt), (i), (tmp)) \
107 if (!(tmp)) { continue; } else
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000108
109extern int dmar_table_init(void);
110extern int dmar_dev_scope_init(void);
111extern void dmar_register_bus_notifier(void);
112extern int dmar_parse_dev_scope(void *start, void *end, int *cnt,
113 struct dmar_dev_scope **devices, u16 segment);
114extern void *dmar_alloc_dev_scope(void *start, void *end, int *cnt);
115extern void dmar_free_dev_scope(struct dmar_dev_scope **devices, int *cnt);
116extern int dmar_insert_dev_scope(struct dmar_pci_notify_info *info,
117 void *start, void*end, u16 segment,
118 struct dmar_dev_scope *devices,
119 int devices_cnt);
120extern int dmar_remove_dev_scope(struct dmar_pci_notify_info *info,
121 u16 segment, struct dmar_dev_scope *devices,
122 int count);
123/* Intel IOMMU detection */
124extern int detect_intel_iommu(void);
125extern int enable_drhd_fault_handling(void);
126extern int dmar_device_add(acpi_handle handle);
127extern int dmar_device_remove(acpi_handle handle);
128
129static inline int dmar_res_noop(struct acpi_dmar_header *hdr, void *arg)
130{
131 return 0;
132}
133
134#ifdef CONFIG_INTEL_IOMMU
135extern int iommu_detected, no_iommu;
136extern int intel_iommu_init(void);
Olivier Deprez157378f2022-04-04 15:47:50 +0200137extern void intel_iommu_shutdown(void);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000138extern int dmar_parse_one_rmrr(struct acpi_dmar_header *header, void *arg);
139extern int dmar_parse_one_atsr(struct acpi_dmar_header *header, void *arg);
140extern int dmar_check_one_atsr(struct acpi_dmar_header *hdr, void *arg);
141extern int dmar_release_one_atsr(struct acpi_dmar_header *hdr, void *arg);
142extern int dmar_iommu_hotplug(struct dmar_drhd_unit *dmaru, bool insert);
143extern int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info);
144#else /* !CONFIG_INTEL_IOMMU: */
145static inline int intel_iommu_init(void) { return -ENODEV; }
Olivier Deprez157378f2022-04-04 15:47:50 +0200146static inline void intel_iommu_shutdown(void) { }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000147
148#define dmar_parse_one_rmrr dmar_res_noop
149#define dmar_parse_one_atsr dmar_res_noop
150#define dmar_check_one_atsr dmar_res_noop
151#define dmar_release_one_atsr dmar_res_noop
152
153static inline int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info)
154{
155 return 0;
156}
157
158static inline int dmar_iommu_hotplug(struct dmar_drhd_unit *dmaru, bool insert)
159{
160 return 0;
161}
162#endif /* CONFIG_INTEL_IOMMU */
163
164#ifdef CONFIG_IRQ_REMAP
165extern int dmar_ir_hotplug(struct dmar_drhd_unit *dmaru, bool insert);
166#else /* CONFIG_IRQ_REMAP */
167static inline int dmar_ir_hotplug(struct dmar_drhd_unit *dmaru, bool insert)
168{ return 0; }
169#endif /* CONFIG_IRQ_REMAP */
170
David Brazdil0f672f62019-12-10 10:32:29 +0000171extern bool dmar_platform_optin(void);
172
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000173#else /* CONFIG_DMAR_TABLE */
174
175static inline int dmar_device_add(void *handle)
176{
177 return 0;
178}
179
180static inline int dmar_device_remove(void *handle)
181{
182 return 0;
183}
184
David Brazdil0f672f62019-12-10 10:32:29 +0000185static inline bool dmar_platform_optin(void)
186{
187 return false;
188}
189
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000190#endif /* CONFIG_DMAR_TABLE */
191
192struct irte {
193 union {
194 /* Shared between remapped and posted mode*/
195 struct {
196 __u64 present : 1, /* 0 */
197 fpd : 1, /* 1 */
198 __res0 : 6, /* 2 - 6 */
199 avail : 4, /* 8 - 11 */
200 __res1 : 3, /* 12 - 14 */
201 pst : 1, /* 15 */
202 vector : 8, /* 16 - 23 */
203 __res2 : 40; /* 24 - 63 */
204 };
205
206 /* Remapped mode */
207 struct {
208 __u64 r_present : 1, /* 0 */
209 r_fpd : 1, /* 1 */
210 dst_mode : 1, /* 2 */
211 redir_hint : 1, /* 3 */
212 trigger_mode : 1, /* 4 */
213 dlvry_mode : 3, /* 5 - 7 */
214 r_avail : 4, /* 8 - 11 */
215 r_res0 : 4, /* 12 - 15 */
216 r_vector : 8, /* 16 - 23 */
217 r_res1 : 8, /* 24 - 31 */
218 dest_id : 32; /* 32 - 63 */
219 };
220
221 /* Posted mode */
222 struct {
223 __u64 p_present : 1, /* 0 */
224 p_fpd : 1, /* 1 */
225 p_res0 : 6, /* 2 - 7 */
226 p_avail : 4, /* 8 - 11 */
227 p_res1 : 2, /* 12 - 13 */
228 p_urgent : 1, /* 14 */
229 p_pst : 1, /* 15 */
230 p_vector : 8, /* 16 - 23 */
231 p_res2 : 14, /* 24 - 37 */
232 pda_l : 26; /* 38 - 63 */
233 };
234 __u64 low;
235 };
236
237 union {
238 /* Shared between remapped and posted mode*/
239 struct {
240 __u64 sid : 16, /* 64 - 79 */
241 sq : 2, /* 80 - 81 */
242 svt : 2, /* 82 - 83 */
243 __res3 : 44; /* 84 - 127 */
244 };
245
246 /* Posted mode*/
247 struct {
248 __u64 p_sid : 16, /* 64 - 79 */
249 p_sq : 2, /* 80 - 81 */
250 p_svt : 2, /* 82 - 83 */
251 p_res3 : 12, /* 84 - 95 */
252 pda_h : 32; /* 96 - 127 */
253 };
254 __u64 high;
255 };
256};
257
258static inline void dmar_copy_shared_irte(struct irte *dst, struct irte *src)
259{
260 dst->present = src->present;
261 dst->fpd = src->fpd;
262 dst->avail = src->avail;
263 dst->pst = src->pst;
264 dst->vector = src->vector;
265 dst->sid = src->sid;
266 dst->sq = src->sq;
267 dst->svt = src->svt;
268}
269
270#define PDA_LOW_BIT 26
271#define PDA_HIGH_BIT 32
272
273/* Can't use the common MSI interrupt functions
274 * since DMAR is not a pci device
275 */
276struct irq_data;
277extern void dmar_msi_unmask(struct irq_data *data);
278extern void dmar_msi_mask(struct irq_data *data);
279extern void dmar_msi_read(int irq, struct msi_msg *msg);
280extern void dmar_msi_write(int irq, struct msi_msg *msg);
281extern int dmar_set_interrupt(struct intel_iommu *iommu);
282extern irqreturn_t dmar_fault(int irq, void *dev_id);
283extern int dmar_alloc_hwirq(int id, int node, void *arg);
284extern void dmar_free_hwirq(int irq);
285
286#endif /* __DMAR_H__ */