1#include <linux/dma-mapping.h>
2#include <linux/dma-debug.h>
3#include <linux/dmar.h>
4#include <linux/export.h>
5#include <linux/bootmem.h>
6#include <linux/gfp.h>
7#include <linux/pci.h>
8#include <linux/kmemleak.h>
9
10#include <asm/proto.h>
11#include <asm/dma.h>
12#include <asm/iommu.h>
13#include <asm/gart.h>
14#include <asm/calgary.h>
15#include <asm/x86_init.h>
16#include <asm/iommu_table.h>
17
18static int forbid_dac __read_mostly;
19
20struct dma_map_ops *dma_ops = &nommu_dma_ops;
21EXPORT_SYMBOL(dma_ops);
22
23static int iommu_sac_force __read_mostly;
24
25#ifdef CONFIG_IOMMU_DEBUG
26int panic_on_overflow __read_mostly = 1;
27int force_iommu __read_mostly = 1;
28#else
29int panic_on_overflow __read_mostly = 0;
30int force_iommu __read_mostly = 0;
31#endif
32
33int iommu_merge __read_mostly = 0;
34
35int no_iommu __read_mostly;
36
37int iommu_detected __read_mostly = 0;
38
39
40
41
42
43
44
45
46int iommu_pass_through __read_mostly;
47
48extern struct iommu_table_entry __iommu_table[], __iommu_table_end[];
49
50
51struct device x86_dma_fallback_dev = {
52 .init_name = "fallback device",
53 .coherent_dma_mask = ISA_DMA_BIT_MASK,
54 .dma_mask = &x86_dma_fallback_dev.coherent_dma_mask,
55};
56EXPORT_SYMBOL(x86_dma_fallback_dev);
57
58
59#define PREALLOC_DMA_DEBUG_ENTRIES 65536
60
61int dma_set_mask(struct device *dev, u64 mask)
62{
63 if (!dev->dma_mask || !dma_supported(dev, mask))
64 return -EIO;
65
66 *dev->dma_mask = mask;
67
68 return 0;
69}
70EXPORT_SYMBOL(dma_set_mask);
71
72void __init pci_iommu_alloc(void)
73{
74 struct iommu_table_entry *p;
75
76 sort_iommu_table(__iommu_table, __iommu_table_end);
77 check_iommu_entries(__iommu_table, __iommu_table_end);
78
79 for (p = __iommu_table; p < __iommu_table_end; p++) {
80 if (p && p->detect && p->detect() > 0) {
81 p->flags |= IOMMU_DETECTED;
82 if (p->early_init)
83 p->early_init();
84 if (p->flags & IOMMU_FINISH_IF_DETECTED)
85 break;
86 }
87 }
88}
89void *dma_generic_alloc_coherent(struct device *dev, size_t size,
90 dma_addr_t *dma_addr, gfp_t flag,
91 struct dma_attrs *attrs)
92{
93 unsigned long dma_mask;
94 struct page *page;
95 unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
96 dma_addr_t addr;
97
98 dma_mask = dma_alloc_coherent_mask(dev, flag);
99
100 flag &= ~__GFP_ZERO;
101again:
102 page = NULL;
103
104 if (flag & __GFP_WAIT) {
105 page = dma_alloc_from_contiguous(dev, count, get_order(size));
106 if (page && page_to_phys(page) + size > dma_mask) {
107 dma_release_from_contiguous(dev, page, count);
108 page = NULL;
109 }
110 }
111
112 if (!page)
113 page = alloc_pages_node(dev_to_node(dev), flag, get_order(size));
114 if (!page)
115 return NULL;
116
117 addr = page_to_phys(page);
118 if (addr + size > dma_mask) {
119 __free_pages(page, get_order(size));
120
121 if (dma_mask < DMA_BIT_MASK(32) && !(flag & GFP_DMA)) {
122 flag = (flag & ~GFP_DMA32) | GFP_DMA;
123 goto again;
124 }
125
126 return NULL;
127 }
128 memset(page_address(page), 0, size);
129 *dma_addr = addr;
130 return page_address(page);
131}
132
133void dma_generic_free_coherent(struct device *dev, size_t size, void *vaddr,
134 dma_addr_t dma_addr, struct dma_attrs *attrs)
135{
136 unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
137 struct page *page = virt_to_page(vaddr);
138
139 if (!dma_release_from_contiguous(dev, page, count))
140 free_pages((unsigned long)vaddr, get_order(size));
141}
142
143
144
145
146
147static __init int iommu_setup(char *p)
148{
149 iommu_merge = 1;
150
151 if (!p)
152 return -EINVAL;
153
154 while (*p) {
155 if (!strncmp(p, "off", 3))
156 no_iommu = 1;
157
158 if (!strncmp(p, "force", 5))
159 force_iommu = 1;
160 if (!strncmp(p, "noforce", 7)) {
161 iommu_merge = 0;
162 force_iommu = 0;
163 }
164
165 if (!strncmp(p, "biomerge", 8)) {
166 iommu_merge = 1;
167 force_iommu = 1;
168 }
169 if (!strncmp(p, "panic", 5))
170 panic_on_overflow = 1;
171 if (!strncmp(p, "nopanic", 7))
172 panic_on_overflow = 0;
173 if (!strncmp(p, "merge", 5)) {
174 iommu_merge = 1;
175 force_iommu = 1;
176 }
177 if (!strncmp(p, "nomerge", 7))
178 iommu_merge = 0;
179 if (!strncmp(p, "forcesac", 8))
180 iommu_sac_force = 1;
181 if (!strncmp(p, "allowdac", 8))
182 forbid_dac = 0;
183 if (!strncmp(p, "nodac", 5))
184 forbid_dac = 1;
185 if (!strncmp(p, "usedac", 6)) {
186 forbid_dac = -1;
187 return 1;
188 }
189#ifdef CONFIG_SWIOTLB
190 if (!strncmp(p, "soft", 4))
191 swiotlb = 1;
192#endif
193 if (!strncmp(p, "pt", 2))
194 iommu_pass_through = 1;
195
196 gart_parse_options(p);
197
198#ifdef CONFIG_CALGARY_IOMMU
199 if (!strncmp(p, "calgary", 7))
200 use_calgary = 1;
201#endif
202
203 p += strcspn(p, ",");
204 if (*p == ',')
205 ++p;
206 }
207 return 0;
208}
209early_param("iommu", iommu_setup);
210
211int dma_supported(struct device *dev, u64 mask)
212{
213 struct dma_map_ops *ops = get_dma_ops(dev);
214
215#ifdef CONFIG_PCI
216 if (mask > 0xffffffff && forbid_dac > 0) {
217 dev_info(dev, "PCI: Disallowing DAC for device\n");
218 return 0;
219 }
220#endif
221
222 if (ops->dma_supported)
223 return ops->dma_supported(dev, mask);
224
225
226
227
228 if (mask < DMA_BIT_MASK(24))
229 return 0;
230
231
232
233
234
235
236
237
238
239
240
241
242
243 if (iommu_sac_force && (mask >= DMA_BIT_MASK(40))) {
244 dev_info(dev, "Force SAC with mask %Lx\n", mask);
245 return 0;
246 }
247
248 return 1;
249}
250EXPORT_SYMBOL(dma_supported);
251
252static int __init pci_iommu_init(void)
253{
254 struct iommu_table_entry *p;
255 dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
256
257#ifdef CONFIG_PCI
258 dma_debug_add_bus(&pci_bus_type);
259#endif
260 x86_init.iommu.iommu_init();
261
262 for (p = __iommu_table; p < __iommu_table_end; p++) {
263 if (p && (p->flags & IOMMU_DETECTED) && p->late_init)
264 p->late_init();
265 }
266
267 return 0;
268}
269
270rootfs_initcall(pci_iommu_init);
271
272#ifdef CONFIG_PCI
273
274
275static void via_no_dac(struct pci_dev *dev)
276{
277 if (forbid_dac == 0) {
278 dev_info(&dev->dev, "disabling DAC on VIA PCI bridge\n");
279 forbid_dac = 1;
280 }
281}
282DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_VIA, PCI_ANY_ID,
283 PCI_CLASS_BRIDGE_PCI, 8, via_no_dac);
284#endif
285