1
2
3
4
5
6
7
8
9#include <linux/device.h>
10#include <linux/dma-mapping.h>
11#include <linux/gfp.h>
12#include <linux/dma-debug.h>
13#include <linux/export.h>
14#include <linux/bug.h>
15
16#define NOT_COHERENT_CACHE
17
18static void *dma_direct_alloc_coherent(struct device *dev, size_t size,
19 dma_addr_t *dma_handle, gfp_t flag,
20 unsigned long attrs)
21{
22#ifdef NOT_COHERENT_CACHE
23 return consistent_alloc(flag, size, dma_handle);
24#else
25 void *ret;
26 struct page *page;
27 int node = dev_to_node(dev);
28
29
30 flag &= ~(__GFP_HIGHMEM);
31
32 page = alloc_pages_node(node, flag, get_order(size));
33 if (page == NULL)
34 return NULL;
35 ret = page_address(page);
36 memset(ret, 0, size);
37 *dma_handle = virt_to_phys(ret);
38
39 return ret;
40#endif
41}
42
43static void dma_direct_free_coherent(struct device *dev, size_t size,
44 void *vaddr, dma_addr_t dma_handle,
45 unsigned long attrs)
46{
47#ifdef NOT_COHERENT_CACHE
48 consistent_free(size, vaddr);
49#else
50 free_pages((unsigned long)vaddr, get_order(size));
51#endif
52}
53
54static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl,
55 int nents, enum dma_data_direction direction,
56 unsigned long attrs)
57{
58 struct scatterlist *sg;
59 int i;
60
61
62 for_each_sg(sgl, sg, nents, i) {
63 sg->dma_address = sg_phys(sg);
64
65 if (attrs & DMA_ATTR_SKIP_CPU_SYNC)
66 continue;
67
68 __dma_sync(page_to_phys(sg_page(sg)) + sg->offset,
69 sg->length, direction);
70 }
71
72 return nents;
73}
74
75static int dma_direct_dma_supported(struct device *dev, u64 mask)
76{
77 return 1;
78}
79
80static inline dma_addr_t dma_direct_map_page(struct device *dev,
81 struct page *page,
82 unsigned long offset,
83 size_t size,
84 enum dma_data_direction direction,
85 unsigned long attrs)
86{
87 if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
88 __dma_sync(page_to_phys(page) + offset, size, direction);
89 return page_to_phys(page) + offset;
90}
91
92static inline void dma_direct_unmap_page(struct device *dev,
93 dma_addr_t dma_address,
94 size_t size,
95 enum dma_data_direction direction,
96 unsigned long attrs)
97{
98
99
100
101
102
103 if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
104 __dma_sync(dma_address, size, direction);
105}
106
107static inline void
108dma_direct_sync_single_for_cpu(struct device *dev,
109 dma_addr_t dma_handle, size_t size,
110 enum dma_data_direction direction)
111{
112
113
114
115
116
117 if (direction == DMA_FROM_DEVICE)
118 __dma_sync(dma_handle, size, direction);
119}
120
121static inline void
122dma_direct_sync_single_for_device(struct device *dev,
123 dma_addr_t dma_handle, size_t size,
124 enum dma_data_direction direction)
125{
126
127
128
129
130
131 if (direction == DMA_TO_DEVICE)
132 __dma_sync(dma_handle, size, direction);
133}
134
135static inline void
136dma_direct_sync_sg_for_cpu(struct device *dev,
137 struct scatterlist *sgl, int nents,
138 enum dma_data_direction direction)
139{
140 struct scatterlist *sg;
141 int i;
142
143
144 if (direction == DMA_FROM_DEVICE)
145 for_each_sg(sgl, sg, nents, i)
146 __dma_sync(sg->dma_address, sg->length, direction);
147}
148
149static inline void
150dma_direct_sync_sg_for_device(struct device *dev,
151 struct scatterlist *sgl, int nents,
152 enum dma_data_direction direction)
153{
154 struct scatterlist *sg;
155 int i;
156
157
158 if (direction == DMA_TO_DEVICE)
159 for_each_sg(sgl, sg, nents, i)
160 __dma_sync(sg->dma_address, sg->length, direction);
161}
162
163static
164int dma_direct_mmap_coherent(struct device *dev, struct vm_area_struct *vma,
165 void *cpu_addr, dma_addr_t handle, size_t size,
166 unsigned long attrs)
167{
168#ifdef CONFIG_MMU
169 unsigned long user_count = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
170 unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
171 unsigned long off = vma->vm_pgoff;
172 unsigned long pfn;
173
174 if (off >= count || user_count > (count - off))
175 return -ENXIO;
176
177#ifdef NOT_COHERENT_CACHE
178 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
179 pfn = consistent_virt_to_pfn(cpu_addr);
180#else
181 pfn = virt_to_pfn(cpu_addr);
182#endif
183 return remap_pfn_range(vma, vma->vm_start, pfn + off,
184 vma->vm_end - vma->vm_start, vma->vm_page_prot);
185#else
186 return -ENXIO;
187#endif
188}
189
190const struct dma_map_ops dma_direct_ops = {
191 .alloc = dma_direct_alloc_coherent,
192 .free = dma_direct_free_coherent,
193 .mmap = dma_direct_mmap_coherent,
194 .map_sg = dma_direct_map_sg,
195 .dma_supported = dma_direct_dma_supported,
196 .map_page = dma_direct_map_page,
197 .unmap_page = dma_direct_unmap_page,
198 .sync_single_for_cpu = dma_direct_sync_single_for_cpu,
199 .sync_single_for_device = dma_direct_sync_single_for_device,
200 .sync_sg_for_cpu = dma_direct_sync_sg_for_cpu,
201 .sync_sg_for_device = dma_direct_sync_sg_for_device,
202};
203EXPORT_SYMBOL(dma_direct_ops);
204
205
206#define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
207
208static int __init dma_init(void)
209{
210 dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
211
212 return 0;
213}
214fs_initcall(dma_init);
215