blob: 7ba73fe0d917de810abf81b3f344f2b68fa2c491 [file] [log] [blame]
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001// SPDX-License-Identifier: GPL-2.0
2#include <linux/dma-direct.h>
3#include <linux/dma-debug.h>
4#include <linux/dmar.h>
5#include <linux/export.h>
6#include <linux/bootmem.h>
7#include <linux/gfp.h>
8#include <linux/pci.h>
9
10#include <asm/proto.h>
11#include <asm/dma.h>
12#include <asm/iommu.h>
13#include <asm/gart.h>
14#include <asm/calgary.h>
15#include <asm/x86_init.h>
16#include <asm/iommu_table.h>
17
18static bool disable_dac_quirk __read_mostly;
19
20const struct dma_map_ops *dma_ops = &dma_direct_ops;
21EXPORT_SYMBOL(dma_ops);
22
23#ifdef CONFIG_IOMMU_DEBUG
24int panic_on_overflow __read_mostly = 1;
25int force_iommu __read_mostly = 1;
26#else
27int panic_on_overflow __read_mostly = 0;
28int force_iommu __read_mostly = 0;
29#endif
30
31int iommu_merge __read_mostly = 0;
32
33int no_iommu __read_mostly;
34/* Set this to 1 if there is a HW IOMMU in the system */
35int iommu_detected __read_mostly = 0;
36
37/*
38 * This variable becomes 1 if iommu=pt is passed on the kernel command line.
39 * If this variable is 1, IOMMU implementations do no DMA translation for
40 * devices and allow every device to access to whole physical memory. This is
41 * useful if a user wants to use an IOMMU only for KVM device assignment to
42 * guests and not for driver dma translation.
43 * It is also possible to disable by default in kernel config, and enable with
44 * iommu=nopt at boot time.
45 */
46#ifdef CONFIG_IOMMU_DEFAULT_PASSTHROUGH
47int iommu_pass_through __read_mostly = 1;
48#else
49int iommu_pass_through __read_mostly;
50#endif
51
52extern struct iommu_table_entry __iommu_table[], __iommu_table_end[];
53
54/* Dummy device used for NULL arguments (normally ISA). */
55struct device x86_dma_fallback_dev = {
56 .init_name = "fallback device",
57 .coherent_dma_mask = ISA_DMA_BIT_MASK,
58 .dma_mask = &x86_dma_fallback_dev.coherent_dma_mask,
59};
60EXPORT_SYMBOL(x86_dma_fallback_dev);
61
62void __init pci_iommu_alloc(void)
63{
64 struct iommu_table_entry *p;
65
66 sort_iommu_table(__iommu_table, __iommu_table_end);
67 check_iommu_entries(__iommu_table, __iommu_table_end);
68
69 for (p = __iommu_table; p < __iommu_table_end; p++) {
70 if (p && p->detect && p->detect() > 0) {
71 p->flags |= IOMMU_DETECTED;
72 if (p->early_init)
73 p->early_init();
74 if (p->flags & IOMMU_FINISH_IF_DETECTED)
75 break;
76 }
77 }
78}
79
80bool arch_dma_alloc_attrs(struct device **dev)
81{
82 if (!*dev)
83 *dev = &x86_dma_fallback_dev;
84
85 if (!is_device_dma_capable(*dev))
86 return false;
87 return true;
88
89}
90EXPORT_SYMBOL(arch_dma_alloc_attrs);
91
92/*
93 * See <Documentation/x86/x86_64/boot-options.txt> for the iommu kernel
94 * parameter documentation.
95 */
96static __init int iommu_setup(char *p)
97{
98 iommu_merge = 1;
99
100 if (!p)
101 return -EINVAL;
102
103 while (*p) {
104 if (!strncmp(p, "off", 3))
105 no_iommu = 1;
106 /* gart_parse_options has more force support */
107 if (!strncmp(p, "force", 5))
108 force_iommu = 1;
109 if (!strncmp(p, "noforce", 7)) {
110 iommu_merge = 0;
111 force_iommu = 0;
112 }
113
114 if (!strncmp(p, "biomerge", 8)) {
115 iommu_merge = 1;
116 force_iommu = 1;
117 }
118 if (!strncmp(p, "panic", 5))
119 panic_on_overflow = 1;
120 if (!strncmp(p, "nopanic", 7))
121 panic_on_overflow = 0;
122 if (!strncmp(p, "merge", 5)) {
123 iommu_merge = 1;
124 force_iommu = 1;
125 }
126 if (!strncmp(p, "nomerge", 7))
127 iommu_merge = 0;
128 if (!strncmp(p, "forcesac", 8))
129 pr_warn("forcesac option ignored.\n");
130 if (!strncmp(p, "allowdac", 8))
131 pr_warn("allowdac option ignored.\n");
132 if (!strncmp(p, "nodac", 5))
133 pr_warn("nodac option ignored.\n");
134 if (!strncmp(p, "usedac", 6)) {
135 disable_dac_quirk = true;
136 return 1;
137 }
138#ifdef CONFIG_SWIOTLB
139 if (!strncmp(p, "soft", 4))
140 swiotlb = 1;
141#endif
142 if (!strncmp(p, "pt", 2))
143 iommu_pass_through = 1;
144 if (!strncmp(p, "nopt", 4))
145 iommu_pass_through = 0;
146
147 gart_parse_options(p);
148
149#ifdef CONFIG_CALGARY_IOMMU
150 if (!strncmp(p, "calgary", 7))
151 use_calgary = 1;
152#endif /* CONFIG_CALGARY_IOMMU */
153
154 p += strcspn(p, ",");
155 if (*p == ',')
156 ++p;
157 }
158 return 0;
159}
160early_param("iommu", iommu_setup);
161
162static int __init pci_iommu_init(void)
163{
164 struct iommu_table_entry *p;
165
166 x86_init.iommu.iommu_init();
167
168 for (p = __iommu_table; p < __iommu_table_end; p++) {
169 if (p && (p->flags & IOMMU_DETECTED) && p->late_init)
170 p->late_init();
171 }
172
173 return 0;
174}
175/* Must execute after PCI subsystem */
176rootfs_initcall(pci_iommu_init);
177
178#ifdef CONFIG_PCI
179/* Many VIA bridges seem to corrupt data for DAC. Disable it here */
180
181static int via_no_dac_cb(struct pci_dev *pdev, void *data)
182{
183 pdev->dev.bus_dma_mask = DMA_BIT_MASK(32);
184 return 0;
185}
186
187static void via_no_dac(struct pci_dev *dev)
188{
189 if (!disable_dac_quirk) {
190 dev_info(&dev->dev, "disabling DAC on VIA PCI bridge\n");
191 pci_walk_bus(dev->subordinate, via_no_dac_cb, NULL);
192 }
193}
194DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_VIA, PCI_ANY_ID,
195 PCI_CLASS_BRIDGE_PCI, 8, via_no_dac);
196#endif