1
2
3
4
5
6
7
8#include <linux/device.h>
9#include <linux/dma-mapping.h>
10#include <linux/dma-debug.h>
11#include <linux/gfp.h>
12#include <linux/memblock.h>
13#include <linux/export.h>
14#include <linux/pci.h>
15#include <asm/vio.h>
16#include <asm/bug.h>
17#include <asm/machdep.h>
18#include <asm/iommu.h>
19
20
21
22
23
24
25
26
27
28
29
30static int dma_direct_dma_supported(struct device *dev, u64 mask)
31{
32#ifdef CONFIG_PPC64
33 u64 limit = get_dma_offset(dev) + (memblock_end_of_DRAM() - 1);
34
35
36 if (mask >= limit)
37 return 1;
38
39#ifdef CONFIG_FSL_SOC
40
41
42
43 return 1;
44#endif
45
46 return 0;
47#else
48 return 1;
49#endif
50}
51
52void *__dma_direct_alloc_coherent(struct device *dev, size_t size,
53 dma_addr_t *dma_handle, gfp_t flag,
54 struct dma_attrs *attrs)
55{
56 void *ret;
57#ifdef CONFIG_NOT_COHERENT_CACHE
58 ret = __dma_alloc_coherent(dev, size, dma_handle, flag);
59 if (ret == NULL)
60 return NULL;
61 *dma_handle += get_dma_offset(dev);
62 return ret;
63#else
64 struct page *page;
65 int node = dev_to_node(dev);
66
67
68 flag &= ~(__GFP_HIGHMEM);
69
70 page = alloc_pages_node(node, flag, get_order(size));
71 if (page == NULL)
72 return NULL;
73 ret = page_address(page);
74 memset(ret, 0, size);
75 *dma_handle = __pa(ret) + get_dma_offset(dev);
76
77 return ret;
78#endif
79}
80
81void __dma_direct_free_coherent(struct device *dev, size_t size,
82 void *vaddr, dma_addr_t dma_handle,
83 struct dma_attrs *attrs)
84{
85#ifdef CONFIG_NOT_COHERENT_CACHE
86 __dma_free_coherent(size, vaddr);
87#else
88 free_pages((unsigned long)vaddr, get_order(size));
89#endif
90}
91
92static void *dma_direct_alloc_coherent(struct device *dev, size_t size,
93 dma_addr_t *dma_handle, gfp_t flag,
94 struct dma_attrs *attrs)
95{
96 struct iommu_table *iommu;
97
98
99
100
101 if (dma_direct_dma_supported(dev, dev->coherent_dma_mask))
102 return __dma_direct_alloc_coherent(dev, size, dma_handle,
103 flag, attrs);
104
105
106 iommu = get_iommu_table_base(dev);
107 if (!iommu)
108 return NULL;
109
110
111 return iommu_alloc_coherent(dev, iommu, size, dma_handle,
112 dev->coherent_dma_mask, flag,
113 dev_to_node(dev));
114}
115
116static void dma_direct_free_coherent(struct device *dev, size_t size,
117 void *vaddr, dma_addr_t dma_handle,
118 struct dma_attrs *attrs)
119{
120 struct iommu_table *iommu;
121
122
123 if (dma_direct_dma_supported(dev, dev->coherent_dma_mask))
124 return __dma_direct_free_coherent(dev, size, vaddr, dma_handle,
125 attrs);
126
127 iommu = get_iommu_table_base(dev);
128
129
130
131
132 if (WARN_ON(!iommu))
133 return;
134 iommu_free_coherent(iommu, size, vaddr, dma_handle);
135}
136
137int dma_direct_mmap_coherent(struct device *dev, struct vm_area_struct *vma,
138 void *cpu_addr, dma_addr_t handle, size_t size,
139 struct dma_attrs *attrs)
140{
141 unsigned long pfn;
142
143#ifdef CONFIG_NOT_COHERENT_CACHE
144 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
145 pfn = __dma_get_coherent_pfn((unsigned long)cpu_addr);
146#else
147 pfn = page_to_pfn(virt_to_page(cpu_addr));
148#endif
149 return remap_pfn_range(vma, vma->vm_start,
150 pfn + vma->vm_pgoff,
151 vma->vm_end - vma->vm_start,
152 vma->vm_page_prot);
153}
154
155static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl,
156 int nents, enum dma_data_direction direction,
157 struct dma_attrs *attrs)
158{
159 struct scatterlist *sg;
160 int i;
161
162 for_each_sg(sgl, sg, nents, i) {
163 sg->dma_address = sg_phys(sg) + get_dma_offset(dev);
164 sg->dma_length = sg->length;
165 __dma_sync_page(sg_page(sg), sg->offset, sg->length, direction);
166 }
167
168 return nents;
169}
170
171static void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sg,
172 int nents, enum dma_data_direction direction,
173 struct dma_attrs *attrs)
174{
175}
176
177static u64 dma_direct_get_required_mask(struct device *dev)
178{
179 u64 end, mask;
180
181 end = memblock_end_of_DRAM() + get_dma_offset(dev);
182
183 mask = 1ULL << (fls64(end) - 1);
184 mask += mask - 1;
185
186 return mask;
187}
188
189static inline dma_addr_t dma_direct_map_page(struct device *dev,
190 struct page *page,
191 unsigned long offset,
192 size_t size,
193 enum dma_data_direction dir,
194 struct dma_attrs *attrs)
195{
196 BUG_ON(dir == DMA_NONE);
197 __dma_sync_page(page, offset, size, dir);
198 return page_to_phys(page) + offset + get_dma_offset(dev);
199}
200
201static inline void dma_direct_unmap_page(struct device *dev,
202 dma_addr_t dma_address,
203 size_t size,
204 enum dma_data_direction direction,
205 struct dma_attrs *attrs)
206{
207}
208
209#ifdef CONFIG_NOT_COHERENT_CACHE
210static inline void dma_direct_sync_sg(struct device *dev,
211 struct scatterlist *sgl, int nents,
212 enum dma_data_direction direction)
213{
214 struct scatterlist *sg;
215 int i;
216
217 for_each_sg(sgl, sg, nents, i)
218 __dma_sync_page(sg_page(sg), sg->offset, sg->length, direction);
219}
220
221static inline void dma_direct_sync_single(struct device *dev,
222 dma_addr_t dma_handle, size_t size,
223 enum dma_data_direction direction)
224{
225 __dma_sync(bus_to_virt(dma_handle), size, direction);
226}
227#endif
228
229struct dma_map_ops dma_direct_ops = {
230 .alloc = dma_direct_alloc_coherent,
231 .free = dma_direct_free_coherent,
232 .mmap = dma_direct_mmap_coherent,
233 .map_sg = dma_direct_map_sg,
234 .unmap_sg = dma_direct_unmap_sg,
235 .dma_supported = dma_direct_dma_supported,
236 .map_page = dma_direct_map_page,
237 .unmap_page = dma_direct_unmap_page,
238 .get_required_mask = dma_direct_get_required_mask,
239#ifdef CONFIG_NOT_COHERENT_CACHE
240 .sync_single_for_cpu = dma_direct_sync_single,
241 .sync_single_for_device = dma_direct_sync_single,
242 .sync_sg_for_cpu = dma_direct_sync_sg,
243 .sync_sg_for_device = dma_direct_sync_sg,
244#endif
245};
246EXPORT_SYMBOL(dma_direct_ops);
247
248int dma_set_coherent_mask(struct device *dev, u64 mask)
249{
250 if (!dma_supported(dev, mask)) {
251
252
253
254
255
256
257 if (get_dma_ops(dev) != &dma_direct_ops ||
258 get_iommu_table_base(dev) == NULL ||
259 !dma_iommu_dma_supported(dev, mask))
260 return -EIO;
261 }
262 dev->coherent_dma_mask = mask;
263 return 0;
264}
265EXPORT_SYMBOL(dma_set_coherent_mask);
266
267#define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
268
269int __dma_set_mask(struct device *dev, u64 dma_mask)
270{
271 struct dma_map_ops *dma_ops = get_dma_ops(dev);
272
273 if ((dma_ops != NULL) && (dma_ops->set_dma_mask != NULL))
274 return dma_ops->set_dma_mask(dev, dma_mask);
275 if (!dev->dma_mask || !dma_supported(dev, dma_mask))
276 return -EIO;
277 *dev->dma_mask = dma_mask;
278 return 0;
279}
280
281int dma_set_mask(struct device *dev, u64 dma_mask)
282{
283 if (ppc_md.dma_set_mask)
284 return ppc_md.dma_set_mask(dev, dma_mask);
285
286 if (dev_is_pci(dev)) {
287 struct pci_dev *pdev = to_pci_dev(dev);
288 struct pci_controller *phb = pci_bus_to_host(pdev->bus);
289 if (phb->controller_ops.dma_set_mask)
290 return phb->controller_ops.dma_set_mask(pdev, dma_mask);
291 }
292
293 return __dma_set_mask(dev, dma_mask);
294}
295EXPORT_SYMBOL(dma_set_mask);
296
297u64 __dma_get_required_mask(struct device *dev)
298{
299 struct dma_map_ops *dma_ops = get_dma_ops(dev);
300
301 if (unlikely(dma_ops == NULL))
302 return 0;
303
304 if (dma_ops->get_required_mask)
305 return dma_ops->get_required_mask(dev);
306
307 return DMA_BIT_MASK(8 * sizeof(dma_addr_t));
308}
309
310u64 dma_get_required_mask(struct device *dev)
311{
312 if (ppc_md.dma_get_required_mask)
313 return ppc_md.dma_get_required_mask(dev);
314
315 return __dma_get_required_mask(dev);
316}
317EXPORT_SYMBOL_GPL(dma_get_required_mask);
318
319static void arch_dma_devres_release(struct device *dev, void *res) { }
320
321int arch_dma_init(struct device *dev)
322{
323 if (!dev->archdata.hybrid_dma_data)
324 dev->archdata.hybrid_dma_data =
325 devres_alloc(arch_dma_devres_release,
326 sizeof(struct dev_arch_dmadata), GFP_KERNEL);
327
328 if (!dev->archdata.hybrid_dma_data)
329 return -ENOMEM;
330 return 0;
331}
332
333static int __init arch_platform_init(void)
334{
335 platform_notify = arch_dma_init;
336 return 0;
337}
338
339arch_initcall(arch_platform_init);
340
341static int __init dma_init(void)
342{
343 dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
344#ifdef CONFIG_PCI
345 dma_debug_add_bus(&pci_bus_type);
346#endif
347#ifdef CONFIG_IBMVIO
348 dma_debug_add_bus(&vio_bus_type);
349#endif
350
351 return 0;
352}
353fs_initcall(dma_init);
354
355