1
2#include <linux/dma-mapping.h>
3#include <linux/dma-debug.h>
4#include <linux/dmar.h>
5#include <linux/export.h>
6#include <linux/bootmem.h>
7#include <linux/gfp.h>
8#include <linux/pci.h>
9#include <linux/kmemleak.h>
10
11#include <asm/proto.h>
12#include <asm/dma.h>
13#include <asm/iommu.h>
14#include <asm/gart.h>
15#include <asm/calgary.h>
16#include <asm/x86_init.h>
17#include <asm/iommu_table.h>
18
19static int forbid_dac __read_mostly;
20
21const struct dma_map_ops *dma_ops = &nommu_dma_ops;
22EXPORT_SYMBOL(dma_ops);
23
24static int iommu_sac_force __read_mostly;
25
26#ifdef CONFIG_IOMMU_DEBUG
27int panic_on_overflow __read_mostly = 1;
28int force_iommu __read_mostly = 1;
29#else
30int panic_on_overflow __read_mostly = 0;
31int force_iommu __read_mostly = 0;
32#endif
33
34int iommu_merge __read_mostly = 0;
35
36int no_iommu __read_mostly;
37
38int iommu_detected __read_mostly = 0;
39
40
41
42
43
44
45
46
47int iommu_pass_through __read_mostly;
48
49extern struct iommu_table_entry __iommu_table[], __iommu_table_end[];
50
51
52struct device x86_dma_fallback_dev = {
53 .init_name = "fallback device",
54 .coherent_dma_mask = ISA_DMA_BIT_MASK,
55 .dma_mask = &x86_dma_fallback_dev.coherent_dma_mask,
56};
57EXPORT_SYMBOL(x86_dma_fallback_dev);
58
59
60#define PREALLOC_DMA_DEBUG_ENTRIES 65536
61
62void __init pci_iommu_alloc(void)
63{
64 struct iommu_table_entry *p;
65
66 sort_iommu_table(__iommu_table, __iommu_table_end);
67 check_iommu_entries(__iommu_table, __iommu_table_end);
68
69 for (p = __iommu_table; p < __iommu_table_end; p++) {
70 if (p && p->detect && p->detect() > 0) {
71 p->flags |= IOMMU_DETECTED;
72 if (p->early_init)
73 p->early_init();
74 if (p->flags & IOMMU_FINISH_IF_DETECTED)
75 break;
76 }
77 }
78}
79void *dma_generic_alloc_coherent(struct device *dev, size_t size,
80 dma_addr_t *dma_addr, gfp_t flag,
81 unsigned long attrs)
82{
83 unsigned long dma_mask;
84 struct page *page;
85 unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
86 dma_addr_t addr;
87
88 dma_mask = dma_alloc_coherent_mask(dev, flag);
89
90 flag &= ~__GFP_ZERO;
91again:
92 page = NULL;
93
94 if (gfpflags_allow_blocking(flag)) {
95 page = dma_alloc_from_contiguous(dev, count, get_order(size),
96 flag);
97 if (page) {
98 addr = phys_to_dma(dev, page_to_phys(page));
99 if (addr + size > dma_mask) {
100 dma_release_from_contiguous(dev, page, count);
101 page = NULL;
102 }
103 }
104 }
105
106 if (!page)
107 page = alloc_pages_node(dev_to_node(dev), flag, get_order(size));
108 if (!page)
109 return NULL;
110
111 addr = phys_to_dma(dev, page_to_phys(page));
112 if (addr + size > dma_mask) {
113 __free_pages(page, get_order(size));
114
115 if (dma_mask < DMA_BIT_MASK(32) && !(flag & GFP_DMA)) {
116 flag = (flag & ~GFP_DMA32) | GFP_DMA;
117 goto again;
118 }
119
120 return NULL;
121 }
122 memset(page_address(page), 0, size);
123 *dma_addr = addr;
124 return page_address(page);
125}
126
127void dma_generic_free_coherent(struct device *dev, size_t size, void *vaddr,
128 dma_addr_t dma_addr, unsigned long attrs)
129{
130 unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
131 struct page *page = virt_to_page(vaddr);
132
133 if (!dma_release_from_contiguous(dev, page, count))
134 free_pages((unsigned long)vaddr, get_order(size));
135}
136
137bool arch_dma_alloc_attrs(struct device **dev, gfp_t *gfp)
138{
139 if (!*dev)
140 *dev = &x86_dma_fallback_dev;
141
142 *gfp &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
143 *gfp = dma_alloc_coherent_gfp_flags(*dev, *gfp);
144
145 if (!is_device_dma_capable(*dev))
146 return false;
147 return true;
148
149}
150EXPORT_SYMBOL(arch_dma_alloc_attrs);
151
152
153
154
155
156static __init int iommu_setup(char *p)
157{
158 iommu_merge = 1;
159
160 if (!p)
161 return -EINVAL;
162
163 while (*p) {
164 if (!strncmp(p, "off", 3))
165 no_iommu = 1;
166
167 if (!strncmp(p, "force", 5))
168 force_iommu = 1;
169 if (!strncmp(p, "noforce", 7)) {
170 iommu_merge = 0;
171 force_iommu = 0;
172 }
173
174 if (!strncmp(p, "biomerge", 8)) {
175 iommu_merge = 1;
176 force_iommu = 1;
177 }
178 if (!strncmp(p, "panic", 5))
179 panic_on_overflow = 1;
180 if (!strncmp(p, "nopanic", 7))
181 panic_on_overflow = 0;
182 if (!strncmp(p, "merge", 5)) {
183 iommu_merge = 1;
184 force_iommu = 1;
185 }
186 if (!strncmp(p, "nomerge", 7))
187 iommu_merge = 0;
188 if (!strncmp(p, "forcesac", 8))
189 iommu_sac_force = 1;
190 if (!strncmp(p, "allowdac", 8))
191 forbid_dac = 0;
192 if (!strncmp(p, "nodac", 5))
193 forbid_dac = 1;
194 if (!strncmp(p, "usedac", 6)) {
195 forbid_dac = -1;
196 return 1;
197 }
198#ifdef CONFIG_SWIOTLB
199 if (!strncmp(p, "soft", 4))
200 swiotlb = 1;
201#endif
202 if (!strncmp(p, "pt", 2))
203 iommu_pass_through = 1;
204
205 gart_parse_options(p);
206
207#ifdef CONFIG_CALGARY_IOMMU
208 if (!strncmp(p, "calgary", 7))
209 use_calgary = 1;
210#endif
211
212 p += strcspn(p, ",");
213 if (*p == ',')
214 ++p;
215 }
216 return 0;
217}
218early_param("iommu", iommu_setup);
219
220int x86_dma_supported(struct device *dev, u64 mask)
221{
222#ifdef CONFIG_PCI
223 if (mask > 0xffffffff && forbid_dac > 0) {
224 dev_info(dev, "PCI: Disallowing DAC for device\n");
225 return 0;
226 }
227#endif
228
229
230
231
232 if (mask < DMA_BIT_MASK(24))
233 return 0;
234
235
236
237
238
239
240
241
242
243
244
245
246
247 if (iommu_sac_force && (mask >= DMA_BIT_MASK(40))) {
248 dev_info(dev, "Force SAC with mask %Lx\n", mask);
249 return 0;
250 }
251
252 return 1;
253}
254
255static int __init pci_iommu_init(void)
256{
257 struct iommu_table_entry *p;
258 dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
259
260#ifdef CONFIG_PCI
261 dma_debug_add_bus(&pci_bus_type);
262#endif
263 x86_init.iommu.iommu_init();
264
265 for (p = __iommu_table; p < __iommu_table_end; p++) {
266 if (p && (p->flags & IOMMU_DETECTED) && p->late_init)
267 p->late_init();
268 }
269
270 return 0;
271}
272
273rootfs_initcall(pci_iommu_init);
274
275#ifdef CONFIG_PCI
276
277
278static void via_no_dac(struct pci_dev *dev)
279{
280 if (forbid_dac == 0) {
281 dev_info(&dev->dev, "disabling DAC on VIA PCI bridge\n");
282 forbid_dac = 1;
283 }
284}
285DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_VIA, PCI_ANY_ID,
286 PCI_CLASS_BRIDGE_PCI, 8, via_no_dac);
287#endif
288