David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0-only */ |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 2 | /* |
| 3 | * Copyright (c) 2006, Intel Corporation. |
| 4 | * |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 5 | * Copyright (C) Ashok Raj <ashok.raj@intel.com> |
| 6 | * Copyright (C) Shaohua Li <shaohua.li@intel.com> |
| 7 | */ |
| 8 | |
| 9 | #ifndef __DMAR_H__ |
| 10 | #define __DMAR_H__ |
| 11 | |
| 12 | #include <linux/acpi.h> |
| 13 | #include <linux/types.h> |
| 14 | #include <linux/msi.h> |
| 15 | #include <linux/irqreturn.h> |
| 16 | #include <linux/rwsem.h> |
| 17 | #include <linux/rculist.h> |
| 18 | |
| 19 | struct acpi_dmar_header; |
| 20 | |
| 21 | #ifdef CONFIG_X86 |
| 22 | # define DMAR_UNITS_SUPPORTED MAX_IO_APICS |
| 23 | #else |
| 24 | # define DMAR_UNITS_SUPPORTED 64 |
| 25 | #endif |
| 26 | |
| 27 | /* DMAR Flags */ |
| 28 | #define DMAR_INTR_REMAP 0x1 |
| 29 | #define DMAR_X2APIC_OPT_OUT 0x2 |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 30 | #define DMAR_PLATFORM_OPT_IN 0x4 |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 31 | |
| 32 | struct intel_iommu; |
| 33 | |
| 34 | struct dmar_dev_scope { |
| 35 | struct device __rcu *dev; |
| 36 | u8 bus; |
| 37 | u8 devfn; |
| 38 | }; |
| 39 | |
| 40 | #ifdef CONFIG_DMAR_TABLE |
| 41 | extern struct acpi_table_header *dmar_tbl; |
| 42 | struct dmar_drhd_unit { |
| 43 | struct list_head list; /* list of drhd units */ |
| 44 | struct acpi_dmar_header *hdr; /* ACPI header */ |
| 45 | u64 reg_base_addr; /* register base address*/ |
| 46 | struct dmar_dev_scope *devices;/* target device array */ |
| 47 | int devices_cnt; /* target device count */ |
| 48 | u16 segment; /* PCI domain */ |
| 49 | u8 ignored:1; /* ignore drhd */ |
| 50 | u8 include_all:1; |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame] | 51 | u8 gfx_dedicated:1; /* graphic dedicated */ |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 52 | struct intel_iommu *iommu; |
| 53 | }; |
| 54 | |
| 55 | struct dmar_pci_path { |
| 56 | u8 bus; |
| 57 | u8 device; |
| 58 | u8 function; |
| 59 | }; |
| 60 | |
| 61 | struct dmar_pci_notify_info { |
| 62 | struct pci_dev *dev; |
| 63 | unsigned long event; |
| 64 | int bus; |
| 65 | u16 seg; |
| 66 | u16 level; |
| 67 | struct dmar_pci_path path[]; |
| 68 | } __attribute__((packed)); |
| 69 | |
| 70 | extern struct rw_semaphore dmar_global_lock; |
| 71 | extern struct list_head dmar_drhd_units; |
| 72 | |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame] | 73 | #define for_each_drhd_unit(drhd) \ |
| 74 | list_for_each_entry_rcu(drhd, &dmar_drhd_units, list, \ |
| 75 | dmar_rcu_check()) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 76 | |
| 77 | #define for_each_active_drhd_unit(drhd) \ |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame] | 78 | list_for_each_entry_rcu(drhd, &dmar_drhd_units, list, \ |
| 79 | dmar_rcu_check()) \ |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 80 | if (drhd->ignored) {} else |
| 81 | |
| 82 | #define for_each_active_iommu(i, drhd) \ |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame] | 83 | list_for_each_entry_rcu(drhd, &dmar_drhd_units, list, \ |
| 84 | dmar_rcu_check()) \ |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 85 | if (i=drhd->iommu, drhd->ignored) {} else |
| 86 | |
| 87 | #define for_each_iommu(i, drhd) \ |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame] | 88 | list_for_each_entry_rcu(drhd, &dmar_drhd_units, list, \ |
| 89 | dmar_rcu_check()) \ |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 90 | if (i=drhd->iommu, 0) {} else |
| 91 | |
| 92 | static inline bool dmar_rcu_check(void) |
| 93 | { |
| 94 | return rwsem_is_locked(&dmar_global_lock) || |
| 95 | system_state == SYSTEM_BOOTING; |
| 96 | } |
| 97 | |
| 98 | #define dmar_rcu_dereference(p) rcu_dereference_check((p), dmar_rcu_check()) |
| 99 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 100 | #define for_each_dev_scope(devs, cnt, i, tmp) \ |
| 101 | for ((i) = 0; ((tmp) = (i) < (cnt) ? \ |
| 102 | dmar_rcu_dereference((devs)[(i)].dev) : NULL, (i) < (cnt)); \ |
| 103 | (i)++) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 104 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 105 | #define for_each_active_dev_scope(devs, cnt, i, tmp) \ |
| 106 | for_each_dev_scope((devs), (cnt), (i), (tmp)) \ |
| 107 | if (!(tmp)) { continue; } else |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 108 | |
| 109 | extern int dmar_table_init(void); |
| 110 | extern int dmar_dev_scope_init(void); |
| 111 | extern void dmar_register_bus_notifier(void); |
| 112 | extern int dmar_parse_dev_scope(void *start, void *end, int *cnt, |
| 113 | struct dmar_dev_scope **devices, u16 segment); |
| 114 | extern void *dmar_alloc_dev_scope(void *start, void *end, int *cnt); |
| 115 | extern void dmar_free_dev_scope(struct dmar_dev_scope **devices, int *cnt); |
| 116 | extern int dmar_insert_dev_scope(struct dmar_pci_notify_info *info, |
| 117 | void *start, void*end, u16 segment, |
| 118 | struct dmar_dev_scope *devices, |
| 119 | int devices_cnt); |
| 120 | extern int dmar_remove_dev_scope(struct dmar_pci_notify_info *info, |
| 121 | u16 segment, struct dmar_dev_scope *devices, |
| 122 | int count); |
| 123 | /* Intel IOMMU detection */ |
| 124 | extern int detect_intel_iommu(void); |
| 125 | extern int enable_drhd_fault_handling(void); |
| 126 | extern int dmar_device_add(acpi_handle handle); |
| 127 | extern int dmar_device_remove(acpi_handle handle); |
| 128 | |
| 129 | static inline int dmar_res_noop(struct acpi_dmar_header *hdr, void *arg) |
| 130 | { |
| 131 | return 0; |
| 132 | } |
| 133 | |
| 134 | #ifdef CONFIG_INTEL_IOMMU |
| 135 | extern int iommu_detected, no_iommu; |
| 136 | extern int intel_iommu_init(void); |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame] | 137 | extern void intel_iommu_shutdown(void); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 138 | extern int dmar_parse_one_rmrr(struct acpi_dmar_header *header, void *arg); |
| 139 | extern int dmar_parse_one_atsr(struct acpi_dmar_header *header, void *arg); |
| 140 | extern int dmar_check_one_atsr(struct acpi_dmar_header *hdr, void *arg); |
| 141 | extern int dmar_release_one_atsr(struct acpi_dmar_header *hdr, void *arg); |
| 142 | extern int dmar_iommu_hotplug(struct dmar_drhd_unit *dmaru, bool insert); |
| 143 | extern int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info); |
| 144 | #else /* !CONFIG_INTEL_IOMMU: */ |
| 145 | static inline int intel_iommu_init(void) { return -ENODEV; } |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame] | 146 | static inline void intel_iommu_shutdown(void) { } |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 147 | |
| 148 | #define dmar_parse_one_rmrr dmar_res_noop |
| 149 | #define dmar_parse_one_atsr dmar_res_noop |
| 150 | #define dmar_check_one_atsr dmar_res_noop |
| 151 | #define dmar_release_one_atsr dmar_res_noop |
| 152 | |
| 153 | static inline int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info) |
| 154 | { |
| 155 | return 0; |
| 156 | } |
| 157 | |
| 158 | static inline int dmar_iommu_hotplug(struct dmar_drhd_unit *dmaru, bool insert) |
| 159 | { |
| 160 | return 0; |
| 161 | } |
| 162 | #endif /* CONFIG_INTEL_IOMMU */ |
| 163 | |
| 164 | #ifdef CONFIG_IRQ_REMAP |
| 165 | extern int dmar_ir_hotplug(struct dmar_drhd_unit *dmaru, bool insert); |
| 166 | #else /* CONFIG_IRQ_REMAP */ |
| 167 | static inline int dmar_ir_hotplug(struct dmar_drhd_unit *dmaru, bool insert) |
| 168 | { return 0; } |
| 169 | #endif /* CONFIG_IRQ_REMAP */ |
| 170 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 171 | extern bool dmar_platform_optin(void); |
| 172 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 173 | #else /* CONFIG_DMAR_TABLE */ |
| 174 | |
| 175 | static inline int dmar_device_add(void *handle) |
| 176 | { |
| 177 | return 0; |
| 178 | } |
| 179 | |
| 180 | static inline int dmar_device_remove(void *handle) |
| 181 | { |
| 182 | return 0; |
| 183 | } |
| 184 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 185 | static inline bool dmar_platform_optin(void) |
| 186 | { |
| 187 | return false; |
| 188 | } |
| 189 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 190 | #endif /* CONFIG_DMAR_TABLE */ |
| 191 | |
| 192 | struct irte { |
| 193 | union { |
| 194 | /* Shared between remapped and posted mode*/ |
| 195 | struct { |
| 196 | __u64 present : 1, /* 0 */ |
| 197 | fpd : 1, /* 1 */ |
| 198 | __res0 : 6, /* 2 - 6 */ |
| 199 | avail : 4, /* 8 - 11 */ |
| 200 | __res1 : 3, /* 12 - 14 */ |
| 201 | pst : 1, /* 15 */ |
| 202 | vector : 8, /* 16 - 23 */ |
| 203 | __res2 : 40; /* 24 - 63 */ |
| 204 | }; |
| 205 | |
| 206 | /* Remapped mode */ |
| 207 | struct { |
| 208 | __u64 r_present : 1, /* 0 */ |
| 209 | r_fpd : 1, /* 1 */ |
| 210 | dst_mode : 1, /* 2 */ |
| 211 | redir_hint : 1, /* 3 */ |
| 212 | trigger_mode : 1, /* 4 */ |
| 213 | dlvry_mode : 3, /* 5 - 7 */ |
| 214 | r_avail : 4, /* 8 - 11 */ |
| 215 | r_res0 : 4, /* 12 - 15 */ |
| 216 | r_vector : 8, /* 16 - 23 */ |
| 217 | r_res1 : 8, /* 24 - 31 */ |
| 218 | dest_id : 32; /* 32 - 63 */ |
| 219 | }; |
| 220 | |
| 221 | /* Posted mode */ |
| 222 | struct { |
| 223 | __u64 p_present : 1, /* 0 */ |
| 224 | p_fpd : 1, /* 1 */ |
| 225 | p_res0 : 6, /* 2 - 7 */ |
| 226 | p_avail : 4, /* 8 - 11 */ |
| 227 | p_res1 : 2, /* 12 - 13 */ |
| 228 | p_urgent : 1, /* 14 */ |
| 229 | p_pst : 1, /* 15 */ |
| 230 | p_vector : 8, /* 16 - 23 */ |
| 231 | p_res2 : 14, /* 24 - 37 */ |
| 232 | pda_l : 26; /* 38 - 63 */ |
| 233 | }; |
| 234 | __u64 low; |
| 235 | }; |
| 236 | |
| 237 | union { |
| 238 | /* Shared between remapped and posted mode*/ |
| 239 | struct { |
| 240 | __u64 sid : 16, /* 64 - 79 */ |
| 241 | sq : 2, /* 80 - 81 */ |
| 242 | svt : 2, /* 82 - 83 */ |
| 243 | __res3 : 44; /* 84 - 127 */ |
| 244 | }; |
| 245 | |
| 246 | /* Posted mode*/ |
| 247 | struct { |
| 248 | __u64 p_sid : 16, /* 64 - 79 */ |
| 249 | p_sq : 2, /* 80 - 81 */ |
| 250 | p_svt : 2, /* 82 - 83 */ |
| 251 | p_res3 : 12, /* 84 - 95 */ |
| 252 | pda_h : 32; /* 96 - 127 */ |
| 253 | }; |
| 254 | __u64 high; |
| 255 | }; |
| 256 | }; |
| 257 | |
| 258 | static inline void dmar_copy_shared_irte(struct irte *dst, struct irte *src) |
| 259 | { |
| 260 | dst->present = src->present; |
| 261 | dst->fpd = src->fpd; |
| 262 | dst->avail = src->avail; |
| 263 | dst->pst = src->pst; |
| 264 | dst->vector = src->vector; |
| 265 | dst->sid = src->sid; |
| 266 | dst->sq = src->sq; |
| 267 | dst->svt = src->svt; |
| 268 | } |
| 269 | |
| 270 | #define PDA_LOW_BIT 26 |
| 271 | #define PDA_HIGH_BIT 32 |
| 272 | |
| 273 | /* Can't use the common MSI interrupt functions |
| 274 | * since DMAR is not a pci device |
| 275 | */ |
| 276 | struct irq_data; |
| 277 | extern void dmar_msi_unmask(struct irq_data *data); |
| 278 | extern void dmar_msi_mask(struct irq_data *data); |
| 279 | extern void dmar_msi_read(int irq, struct msi_msg *msg); |
| 280 | extern void dmar_msi_write(int irq, struct msi_msg *msg); |
| 281 | extern int dmar_set_interrupt(struct intel_iommu *iommu); |
| 282 | extern irqreturn_t dmar_fault(int irq, void *dev_id); |
| 283 | extern int dmar_alloc_hwirq(int id, int node, void *arg); |
| 284 | extern void dmar_free_hwirq(int irq); |
| 285 | |
| 286 | #endif /* __DMAR_H__ */ |