1#include <linux/dma-mapping.h>
2#include <linux/dma-debug.h>
3#include <linux/dmar.h>
4#include <linux/bootmem.h>
5#include <linux/pci.h>
6#include <linux/kmemleak.h>
7
8#include <asm/proto.h>
9#include <asm/dma.h>
10#include <asm/iommu.h>
11#include <asm/gart.h>
12#include <asm/calgary.h>
13#include <asm/amd_iommu.h>
14
15static int forbid_dac __read_mostly;
16
17struct dma_map_ops *dma_ops;
18EXPORT_SYMBOL(dma_ops);
19
20static int iommu_sac_force __read_mostly;
21
22#ifdef CONFIG_IOMMU_DEBUG
23int panic_on_overflow __read_mostly = 1;
24int force_iommu __read_mostly = 1;
25#else
26int panic_on_overflow __read_mostly = 0;
27int force_iommu __read_mostly = 0;
28#endif
29
30int iommu_merge __read_mostly = 0;
31
32int no_iommu __read_mostly;
33
34int iommu_detected __read_mostly = 0;
35
36
37
38
39
40
41
42
43int iommu_pass_through __read_mostly;
44
45dma_addr_t bad_dma_address __read_mostly = 0;
46EXPORT_SYMBOL(bad_dma_address);
47
48
49struct device x86_dma_fallback_dev = {
50 .init_name = "fallback device",
51 .coherent_dma_mask = ISA_DMA_BIT_MASK,
52 .dma_mask = &x86_dma_fallback_dev.coherent_dma_mask,
53};
54EXPORT_SYMBOL(x86_dma_fallback_dev);
55
56
57#define PREALLOC_DMA_DEBUG_ENTRIES 32768
58
59int dma_set_mask(struct device *dev, u64 mask)
60{
61 if (!dev->dma_mask || !dma_supported(dev, mask))
62 return -EIO;
63
64 *dev->dma_mask = mask;
65
66 return 0;
67}
68EXPORT_SYMBOL(dma_set_mask);
69
70#ifdef CONFIG_X86_64
71static __initdata void *dma32_bootmem_ptr;
72static unsigned long dma32_bootmem_size __initdata = (128ULL<<20);
73
74static int __init parse_dma32_size_opt(char *p)
75{
76 if (!p)
77 return -EINVAL;
78 dma32_bootmem_size = memparse(p, &p);
79 return 0;
80}
81early_param("dma32_size", parse_dma32_size_opt);
82
83void __init dma32_reserve_bootmem(void)
84{
85 unsigned long size, align;
86 if (max_pfn <= MAX_DMA32_PFN)
87 return;
88
89
90
91
92
93 align = 64ULL<<20;
94 size = roundup(dma32_bootmem_size, align);
95 dma32_bootmem_ptr = __alloc_bootmem_nopanic(size, align,
96 512ULL<<20);
97
98
99
100
101 kmemleak_ignore(dma32_bootmem_ptr);
102 if (dma32_bootmem_ptr)
103 dma32_bootmem_size = size;
104 else
105 dma32_bootmem_size = 0;
106}
107static void __init dma32_free_bootmem(void)
108{
109
110 if (max_pfn <= MAX_DMA32_PFN)
111 return;
112
113 if (!dma32_bootmem_ptr)
114 return;
115
116 free_bootmem(__pa(dma32_bootmem_ptr), dma32_bootmem_size);
117
118 dma32_bootmem_ptr = NULL;
119 dma32_bootmem_size = 0;
120}
121#endif
122
123void __init pci_iommu_alloc(void)
124{
125#ifdef CONFIG_X86_64
126
127 dma32_free_bootmem();
128#endif
129
130
131
132
133
134 gart_iommu_hole_init();
135
136 detect_calgary();
137
138 detect_intel_iommu();
139
140 amd_iommu_detect();
141
142 pci_swiotlb_init();
143}
144
145void *dma_generic_alloc_coherent(struct device *dev, size_t size,
146 dma_addr_t *dma_addr, gfp_t flag)
147{
148 unsigned long dma_mask;
149 struct page *page;
150 dma_addr_t addr;
151
152 dma_mask = dma_alloc_coherent_mask(dev, flag);
153
154 flag |= __GFP_ZERO;
155again:
156 page = alloc_pages_node(dev_to_node(dev), flag, get_order(size));
157 if (!page)
158 return NULL;
159
160 addr = page_to_phys(page);
161 if (addr + size > dma_mask) {
162 __free_pages(page, get_order(size));
163
164 if (dma_mask < DMA_BIT_MASK(32) && !(flag & GFP_DMA)) {
165 flag = (flag & ~GFP_DMA32) | GFP_DMA;
166 goto again;
167 }
168
169 return NULL;
170 }
171
172 *dma_addr = addr;
173 return page_address(page);
174}
175
176
177
178
179
180static __init int iommu_setup(char *p)
181{
182 iommu_merge = 1;
183
184 if (!p)
185 return -EINVAL;
186
187 while (*p) {
188 if (!strncmp(p, "off", 3))
189 no_iommu = 1;
190
191 if (!strncmp(p, "force", 5))
192 force_iommu = 1;
193 if (!strncmp(p, "noforce", 7)) {
194 iommu_merge = 0;
195 force_iommu = 0;
196 }
197
198 if (!strncmp(p, "biomerge", 8)) {
199 iommu_merge = 1;
200 force_iommu = 1;
201 }
202 if (!strncmp(p, "panic", 5))
203 panic_on_overflow = 1;
204 if (!strncmp(p, "nopanic", 7))
205 panic_on_overflow = 0;
206 if (!strncmp(p, "merge", 5)) {
207 iommu_merge = 1;
208 force_iommu = 1;
209 }
210 if (!strncmp(p, "nomerge", 7))
211 iommu_merge = 0;
212 if (!strncmp(p, "forcesac", 8))
213 iommu_sac_force = 1;
214 if (!strncmp(p, "allowdac", 8))
215 forbid_dac = 0;
216 if (!strncmp(p, "nodac", 5))
217 forbid_dac = -1;
218 if (!strncmp(p, "usedac", 6)) {
219 forbid_dac = -1;
220 return 1;
221 }
222#ifdef CONFIG_SWIOTLB
223 if (!strncmp(p, "soft", 4))
224 swiotlb = 1;
225#endif
226 if (!strncmp(p, "pt", 2))
227 iommu_pass_through = 1;
228
229 gart_parse_options(p);
230
231#ifdef CONFIG_CALGARY_IOMMU
232 if (!strncmp(p, "calgary", 7))
233 use_calgary = 1;
234#endif
235
236 p += strcspn(p, ",");
237 if (*p == ',')
238 ++p;
239 }
240 return 0;
241}
242early_param("iommu", iommu_setup);
243
244int dma_supported(struct device *dev, u64 mask)
245{
246 struct dma_map_ops *ops = get_dma_ops(dev);
247
248#ifdef CONFIG_PCI
249 if (mask > 0xffffffff && forbid_dac > 0) {
250 dev_info(dev, "PCI: Disallowing DAC for device\n");
251 return 0;
252 }
253#endif
254
255 if (ops->dma_supported)
256 return ops->dma_supported(dev, mask);
257
258
259
260
261 if (mask < DMA_BIT_MASK(24))
262 return 0;
263
264
265
266
267
268
269
270
271
272
273
274
275
276 if (iommu_sac_force && (mask >= DMA_BIT_MASK(40))) {
277 dev_info(dev, "Force SAC with mask %Lx\n", mask);
278 return 0;
279 }
280
281 return 1;
282}
283EXPORT_SYMBOL(dma_supported);
284
285static int __init pci_iommu_init(void)
286{
287 dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
288
289#ifdef CONFIG_PCI
290 dma_debug_add_bus(&pci_bus_type);
291#endif
292
293 calgary_iommu_init();
294
295 intel_iommu_init();
296
297 amd_iommu_init();
298
299 gart_iommu_init();
300
301 no_iommu_init();
302 return 0;
303}
304
305void pci_iommu_shutdown(void)
306{
307 gart_iommu_shutdown();
308
309 amd_iommu_shutdown();
310}
311
312rootfs_initcall(pci_iommu_init);
313
314#ifdef CONFIG_PCI
315
316
317static __devinit void via_no_dac(struct pci_dev *dev)
318{
319 if ((dev->class >> 8) == PCI_CLASS_BRIDGE_PCI && forbid_dac == 0) {
320 dev_info(&dev->dev, "disabling DAC on VIA PCI bridge\n");
321 forbid_dac = 1;
322 }
323}
324DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_ANY_ID, via_no_dac);
325#endif
326