1#include <linux/dma-mapping.h>
2#include <linux/dma-debug.h>
3#include <linux/dmar.h>
4#include <linux/bootmem.h>
5#include <linux/gfp.h>
6#include <linux/pci.h>
7#include <linux/kmemleak.h>
8
9#include <asm/proto.h>
10#include <asm/dma.h>
11#include <asm/iommu.h>
12#include <asm/gart.h>
13#include <asm/calgary.h>
14#include <asm/x86_init.h>
15#include <asm/iommu_table.h>
16
17static int forbid_dac __read_mostly;
18
19struct dma_map_ops *dma_ops = &nommu_dma_ops;
20EXPORT_SYMBOL(dma_ops);
21
22static int iommu_sac_force __read_mostly;
23
24#ifdef CONFIG_IOMMU_DEBUG
25int panic_on_overflow __read_mostly = 1;
26int force_iommu __read_mostly = 1;
27#else
28int panic_on_overflow __read_mostly = 0;
29int force_iommu __read_mostly = 0;
30#endif
31
32int iommu_merge __read_mostly = 0;
33
34int no_iommu __read_mostly;
35
36int iommu_detected __read_mostly = 0;
37
38
39
40
41
42
43
44
45int iommu_pass_through __read_mostly;
46
47extern struct iommu_table_entry __iommu_table[], __iommu_table_end[];
48
49
50struct device x86_dma_fallback_dev = {
51 .init_name = "fallback device",
52 .coherent_dma_mask = ISA_DMA_BIT_MASK,
53 .dma_mask = &x86_dma_fallback_dev.coherent_dma_mask,
54};
55EXPORT_SYMBOL(x86_dma_fallback_dev);
56
57
58#define PREALLOC_DMA_DEBUG_ENTRIES 32768
59
60int dma_set_mask(struct device *dev, u64 mask)
61{
62 if (!dev->dma_mask || !dma_supported(dev, mask))
63 return -EIO;
64
65 *dev->dma_mask = mask;
66
67 return 0;
68}
69EXPORT_SYMBOL(dma_set_mask);
70
71#if defined(CONFIG_X86_64) && !defined(CONFIG_NUMA)
72static __initdata void *dma32_bootmem_ptr;
73static unsigned long dma32_bootmem_size __initdata = (128ULL<<20);
74
75static int __init parse_dma32_size_opt(char *p)
76{
77 if (!p)
78 return -EINVAL;
79 dma32_bootmem_size = memparse(p, &p);
80 return 0;
81}
82early_param("dma32_size", parse_dma32_size_opt);
83
84void __init dma32_reserve_bootmem(void)
85{
86 unsigned long size, align;
87 if (max_pfn <= MAX_DMA32_PFN)
88 return;
89
90
91
92
93
94 align = 64ULL<<20;
95 size = roundup(dma32_bootmem_size, align);
96 dma32_bootmem_ptr = __alloc_bootmem_nopanic(size, align,
97 512ULL<<20);
98
99
100
101
102 kmemleak_ignore(dma32_bootmem_ptr);
103 if (dma32_bootmem_ptr)
104 dma32_bootmem_size = size;
105 else
106 dma32_bootmem_size = 0;
107}
108static void __init dma32_free_bootmem(void)
109{
110
111 if (max_pfn <= MAX_DMA32_PFN)
112 return;
113
114 if (!dma32_bootmem_ptr)
115 return;
116
117 free_bootmem(__pa(dma32_bootmem_ptr), dma32_bootmem_size);
118
119 dma32_bootmem_ptr = NULL;
120 dma32_bootmem_size = 0;
121}
122#else
123void __init dma32_reserve_bootmem(void)
124{
125}
126static void __init dma32_free_bootmem(void)
127{
128}
129
130#endif
131
132void __init pci_iommu_alloc(void)
133{
134 struct iommu_table_entry *p;
135
136
137 dma32_free_bootmem();
138
139 sort_iommu_table(__iommu_table, __iommu_table_end);
140 check_iommu_entries(__iommu_table, __iommu_table_end);
141
142 for (p = __iommu_table; p < __iommu_table_end; p++) {
143 if (p && p->detect && p->detect() > 0) {
144 p->flags |= IOMMU_DETECTED;
145 if (p->early_init)
146 p->early_init();
147 if (p->flags & IOMMU_FINISH_IF_DETECTED)
148 break;
149 }
150 }
151}
152void *dma_generic_alloc_coherent(struct device *dev, size_t size,
153 dma_addr_t *dma_addr, gfp_t flag)
154{
155 unsigned long dma_mask;
156 struct page *page;
157 dma_addr_t addr;
158
159 dma_mask = dma_alloc_coherent_mask(dev, flag);
160
161 flag |= __GFP_ZERO;
162again:
163 page = alloc_pages_node(dev_to_node(dev), flag, get_order(size));
164 if (!page)
165 return NULL;
166
167 addr = page_to_phys(page);
168 if (addr + size > dma_mask) {
169 __free_pages(page, get_order(size));
170
171 if (dma_mask < DMA_BIT_MASK(32) && !(flag & GFP_DMA)) {
172 flag = (flag & ~GFP_DMA32) | GFP_DMA;
173 goto again;
174 }
175
176 return NULL;
177 }
178
179 *dma_addr = addr;
180 return page_address(page);
181}
182
183
184
185
186
187static __init int iommu_setup(char *p)
188{
189 iommu_merge = 1;
190
191 if (!p)
192 return -EINVAL;
193
194 while (*p) {
195 if (!strncmp(p, "off", 3))
196 no_iommu = 1;
197
198 if (!strncmp(p, "force", 5))
199 force_iommu = 1;
200 if (!strncmp(p, "noforce", 7)) {
201 iommu_merge = 0;
202 force_iommu = 0;
203 }
204
205 if (!strncmp(p, "biomerge", 8)) {
206 iommu_merge = 1;
207 force_iommu = 1;
208 }
209 if (!strncmp(p, "panic", 5))
210 panic_on_overflow = 1;
211 if (!strncmp(p, "nopanic", 7))
212 panic_on_overflow = 0;
213 if (!strncmp(p, "merge", 5)) {
214 iommu_merge = 1;
215 force_iommu = 1;
216 }
217 if (!strncmp(p, "nomerge", 7))
218 iommu_merge = 0;
219 if (!strncmp(p, "forcesac", 8))
220 iommu_sac_force = 1;
221 if (!strncmp(p, "allowdac", 8))
222 forbid_dac = 0;
223 if (!strncmp(p, "nodac", 5))
224 forbid_dac = 1;
225 if (!strncmp(p, "usedac", 6)) {
226 forbid_dac = -1;
227 return 1;
228 }
229#ifdef CONFIG_SWIOTLB
230 if (!strncmp(p, "soft", 4))
231 swiotlb = 1;
232#endif
233 if (!strncmp(p, "pt", 2))
234 iommu_pass_through = 1;
235
236 gart_parse_options(p);
237
238#ifdef CONFIG_CALGARY_IOMMU
239 if (!strncmp(p, "calgary", 7))
240 use_calgary = 1;
241#endif
242
243 p += strcspn(p, ",");
244 if (*p == ',')
245 ++p;
246 }
247 return 0;
248}
249early_param("iommu", iommu_setup);
250
251int dma_supported(struct device *dev, u64 mask)
252{
253 struct dma_map_ops *ops = get_dma_ops(dev);
254
255#ifdef CONFIG_PCI
256 if (mask > 0xffffffff && forbid_dac > 0) {
257 dev_info(dev, "PCI: Disallowing DAC for device\n");
258 return 0;
259 }
260#endif
261
262 if (ops->dma_supported)
263 return ops->dma_supported(dev, mask);
264
265
266
267
268 if (mask < DMA_BIT_MASK(24))
269 return 0;
270
271
272
273
274
275
276
277
278
279
280
281
282
283 if (iommu_sac_force && (mask >= DMA_BIT_MASK(40))) {
284 dev_info(dev, "Force SAC with mask %Lx\n", mask);
285 return 0;
286 }
287
288 return 1;
289}
290EXPORT_SYMBOL(dma_supported);
291
292static int __init pci_iommu_init(void)
293{
294 struct iommu_table_entry *p;
295 dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
296
297#ifdef CONFIG_PCI
298 dma_debug_add_bus(&pci_bus_type);
299#endif
300 x86_init.iommu.iommu_init();
301
302 for (p = __iommu_table; p < __iommu_table_end; p++) {
303 if (p && (p->flags & IOMMU_DETECTED) && p->late_init)
304 p->late_init();
305 }
306
307 return 0;
308}
309
310rootfs_initcall(pci_iommu_init);
311
312#ifdef CONFIG_PCI
313
314
315static __devinit void via_no_dac(struct pci_dev *dev)
316{
317 if ((dev->class >> 8) == PCI_CLASS_BRIDGE_PCI && forbid_dac == 0) {
318 dev_info(&dev->dev, "disabling DAC on VIA PCI bridge\n");
319 forbid_dac = 1;
320 }
321}
322DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_ANY_ID, via_no_dac);
323#endif
324