1
2
3
4
5
6
7
8
9#include <linux/slab.h>
10#include <linux/mm.h>
11#include <linux/dma-mapping.h>
12#include <linux/genalloc.h>
13#include <linux/vmalloc.h>
14#ifdef CONFIG_X86
15#include <asm/set_memory.h>
16#endif
17#include <sound/memalloc.h>
18#include "memalloc_local.h"
19
20static const struct snd_malloc_ops *snd_dma_get_ops(struct snd_dma_buffer *dmab);
21
22
23static inline gfp_t snd_mem_get_gfp_flags(const struct snd_dma_buffer *dmab,
24 gfp_t default_gfp)
25{
26 if (!dmab->dev.dev)
27 return default_gfp;
28 else
29 return (__force gfp_t)(unsigned long)dmab->dev.dev;
30}
31
32static int __snd_dma_alloc_pages(struct snd_dma_buffer *dmab, size_t size)
33{
34 const struct snd_malloc_ops *ops = snd_dma_get_ops(dmab);
35
36 if (WARN_ON_ONCE(!ops || !ops->alloc))
37 return -EINVAL;
38 return ops->alloc(dmab, size);
39}
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54int snd_dma_alloc_pages(int type, struct device *device, size_t size,
55 struct snd_dma_buffer *dmab)
56{
57 int err;
58
59 if (WARN_ON(!size))
60 return -ENXIO;
61 if (WARN_ON(!dmab))
62 return -ENXIO;
63
64 size = PAGE_ALIGN(size);
65 dmab->dev.type = type;
66 dmab->dev.dev = device;
67 dmab->bytes = 0;
68 dmab->area = NULL;
69 dmab->addr = 0;
70 dmab->private_data = NULL;
71 err = __snd_dma_alloc_pages(dmab, size);
72 if (err < 0)
73 return err;
74 if (!dmab->area)
75 return -ENOMEM;
76 dmab->bytes = size;
77 return 0;
78}
79EXPORT_SYMBOL(snd_dma_alloc_pages);
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96int snd_dma_alloc_pages_fallback(int type, struct device *device, size_t size,
97 struct snd_dma_buffer *dmab)
98{
99 int err;
100
101 while ((err = snd_dma_alloc_pages(type, device, size, dmab)) < 0) {
102 if (err != -ENOMEM)
103 return err;
104 if (size <= PAGE_SIZE)
105 return -ENOMEM;
106 size >>= 1;
107 size = PAGE_SIZE << get_order(size);
108 }
109 if (! dmab->area)
110 return -ENOMEM;
111 return 0;
112}
113EXPORT_SYMBOL(snd_dma_alloc_pages_fallback);
114
115
116
117
118
119
120
121void snd_dma_free_pages(struct snd_dma_buffer *dmab)
122{
123 const struct snd_malloc_ops *ops = snd_dma_get_ops(dmab);
124
125 if (ops && ops->free)
126 ops->free(dmab);
127}
128EXPORT_SYMBOL(snd_dma_free_pages);
129
130
131
132
133
134
135int snd_dma_buffer_mmap(struct snd_dma_buffer *dmab,
136 struct vm_area_struct *area)
137{
138 const struct snd_malloc_ops *ops = snd_dma_get_ops(dmab);
139
140 if (ops && ops->mmap)
141 return ops->mmap(dmab, area);
142 else
143 return -ENOENT;
144}
145EXPORT_SYMBOL(snd_dma_buffer_mmap);
146
147
148
149
150
151
152dma_addr_t snd_sgbuf_get_addr(struct snd_dma_buffer *dmab, size_t offset)
153{
154 const struct snd_malloc_ops *ops = snd_dma_get_ops(dmab);
155
156 if (ops && ops->get_addr)
157 return ops->get_addr(dmab, offset);
158 else
159 return dmab->addr + offset;
160}
161EXPORT_SYMBOL(snd_sgbuf_get_addr);
162
163
164
165
166
167
168struct page *snd_sgbuf_get_page(struct snd_dma_buffer *dmab, size_t offset)
169{
170 const struct snd_malloc_ops *ops = snd_dma_get_ops(dmab);
171
172 if (ops && ops->get_page)
173 return ops->get_page(dmab, offset);
174 else
175 return virt_to_page(dmab->area + offset);
176}
177EXPORT_SYMBOL(snd_sgbuf_get_page);
178
179
180
181
182
183
184
185
186unsigned int snd_sgbuf_get_chunk_size(struct snd_dma_buffer *dmab,
187 unsigned int ofs, unsigned int size)
188{
189 const struct snd_malloc_ops *ops = snd_dma_get_ops(dmab);
190
191 if (ops && ops->get_chunk_size)
192 return ops->get_chunk_size(dmab, ofs, size);
193 else
194 return size;
195}
196EXPORT_SYMBOL(snd_sgbuf_get_chunk_size);
197
198
199
200
201static int snd_dma_continuous_alloc(struct snd_dma_buffer *dmab, size_t size)
202{
203 gfp_t gfp = snd_mem_get_gfp_flags(dmab, GFP_KERNEL);
204
205 dmab->area = alloc_pages_exact(size, gfp);
206 return 0;
207}
208
209static void snd_dma_continuous_free(struct snd_dma_buffer *dmab)
210{
211 free_pages_exact(dmab->area, dmab->bytes);
212}
213
214static int snd_dma_continuous_mmap(struct snd_dma_buffer *dmab,
215 struct vm_area_struct *area)
216{
217 return remap_pfn_range(area, area->vm_start,
218 page_to_pfn(virt_to_page(dmab->area)),
219 area->vm_end - area->vm_start,
220 area->vm_page_prot);
221}
222
223static const struct snd_malloc_ops snd_dma_continuous_ops = {
224 .alloc = snd_dma_continuous_alloc,
225 .free = snd_dma_continuous_free,
226 .mmap = snd_dma_continuous_mmap,
227};
228
229
230
231
232static int snd_dma_vmalloc_alloc(struct snd_dma_buffer *dmab, size_t size)
233{
234 gfp_t gfp = snd_mem_get_gfp_flags(dmab, GFP_KERNEL | __GFP_HIGHMEM);
235
236 dmab->area = __vmalloc(size, gfp);
237 return 0;
238}
239
240static void snd_dma_vmalloc_free(struct snd_dma_buffer *dmab)
241{
242 vfree(dmab->area);
243}
244
245static int snd_dma_vmalloc_mmap(struct snd_dma_buffer *dmab,
246 struct vm_area_struct *area)
247{
248 return remap_vmalloc_range(area, dmab->area, 0);
249}
250
251static dma_addr_t snd_dma_vmalloc_get_addr(struct snd_dma_buffer *dmab,
252 size_t offset)
253{
254 return page_to_phys(vmalloc_to_page(dmab->area + offset)) +
255 offset % PAGE_SIZE;
256}
257
258static struct page *snd_dma_vmalloc_get_page(struct snd_dma_buffer *dmab,
259 size_t offset)
260{
261 return vmalloc_to_page(dmab->area + offset);
262}
263
264static unsigned int
265snd_dma_vmalloc_get_chunk_size(struct snd_dma_buffer *dmab,
266 unsigned int ofs, unsigned int size)
267{
268 ofs %= PAGE_SIZE;
269 size += ofs;
270 if (size > PAGE_SIZE)
271 size = PAGE_SIZE;
272 return size - ofs;
273}
274
275static const struct snd_malloc_ops snd_dma_vmalloc_ops = {
276 .alloc = snd_dma_vmalloc_alloc,
277 .free = snd_dma_vmalloc_free,
278 .mmap = snd_dma_vmalloc_mmap,
279 .get_addr = snd_dma_vmalloc_get_addr,
280 .get_page = snd_dma_vmalloc_get_page,
281 .get_chunk_size = snd_dma_vmalloc_get_chunk_size,
282};
283
284#ifdef CONFIG_HAS_DMA
285
286
287
288#ifdef CONFIG_GENERIC_ALLOCATOR
289static int snd_dma_iram_alloc(struct snd_dma_buffer *dmab, size_t size)
290{
291 struct device *dev = dmab->dev.dev;
292 struct gen_pool *pool;
293
294 if (dev->of_node) {
295 pool = of_gen_pool_get(dev->of_node, "iram", 0);
296
297 dmab->private_data = pool;
298
299 dmab->area = gen_pool_dma_alloc_align(pool, size, &dmab->addr,
300 PAGE_SIZE);
301 if (dmab->area)
302 return 0;
303 }
304
305
306
307
308 dmab->dev.type = SNDRV_DMA_TYPE_DEV;
309 return __snd_dma_alloc_pages(dmab, size);
310}
311
312static void snd_dma_iram_free(struct snd_dma_buffer *dmab)
313{
314 struct gen_pool *pool = dmab->private_data;
315
316 if (pool && dmab->area)
317 gen_pool_free(pool, (unsigned long)dmab->area, dmab->bytes);
318}
319
320static int snd_dma_iram_mmap(struct snd_dma_buffer *dmab,
321 struct vm_area_struct *area)
322{
323 area->vm_page_prot = pgprot_writecombine(area->vm_page_prot);
324 return remap_pfn_range(area, area->vm_start,
325 dmab->addr >> PAGE_SHIFT,
326 area->vm_end - area->vm_start,
327 area->vm_page_prot);
328}
329
330static const struct snd_malloc_ops snd_dma_iram_ops = {
331 .alloc = snd_dma_iram_alloc,
332 .free = snd_dma_iram_free,
333 .mmap = snd_dma_iram_mmap,
334};
335#endif
336
337
338
339
340static int snd_dma_dev_alloc(struct snd_dma_buffer *dmab, size_t size)
341{
342 gfp_t gfp_flags;
343
344 gfp_flags = GFP_KERNEL
345 | __GFP_COMP
346 | __GFP_NORETRY
347 | __GFP_NOWARN;
348 dmab->area = dma_alloc_coherent(dmab->dev.dev, size, &dmab->addr,
349 gfp_flags);
350#ifdef CONFIG_X86
351 if (dmab->area && dmab->dev.type == SNDRV_DMA_TYPE_DEV_UC)
352 set_memory_wc((unsigned long)dmab->area,
353 PAGE_ALIGN(size) >> PAGE_SHIFT);
354#endif
355 return 0;
356}
357
358static void snd_dma_dev_free(struct snd_dma_buffer *dmab)
359{
360#ifdef CONFIG_X86
361 if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_UC)
362 set_memory_wb((unsigned long)dmab->area,
363 PAGE_ALIGN(dmab->bytes) >> PAGE_SHIFT);
364#endif
365 dma_free_coherent(dmab->dev.dev, dmab->bytes, dmab->area, dmab->addr);
366}
367
368static int snd_dma_dev_mmap(struct snd_dma_buffer *dmab,
369 struct vm_area_struct *area)
370{
371 return dma_mmap_coherent(dmab->dev.dev, area,
372 dmab->area, dmab->addr, dmab->bytes);
373}
374
375static const struct snd_malloc_ops snd_dma_dev_ops = {
376 .alloc = snd_dma_dev_alloc,
377 .free = snd_dma_dev_free,
378 .mmap = snd_dma_dev_mmap,
379};
380#endif
381
382
383
384
385static const struct snd_malloc_ops *dma_ops[] = {
386 [SNDRV_DMA_TYPE_CONTINUOUS] = &snd_dma_continuous_ops,
387 [SNDRV_DMA_TYPE_VMALLOC] = &snd_dma_vmalloc_ops,
388#ifdef CONFIG_HAS_DMA
389 [SNDRV_DMA_TYPE_DEV] = &snd_dma_dev_ops,
390 [SNDRV_DMA_TYPE_DEV_UC] = &snd_dma_dev_ops,
391#ifdef CONFIG_GENERIC_ALLOCATOR
392 [SNDRV_DMA_TYPE_DEV_IRAM] = &snd_dma_iram_ops,
393#endif
394#endif
395#ifdef CONFIG_SND_DMA_SGBUF
396 [SNDRV_DMA_TYPE_DEV_SG] = &snd_dma_sg_ops,
397 [SNDRV_DMA_TYPE_DEV_UC_SG] = &snd_dma_sg_ops,
398#endif
399};
400
401static const struct snd_malloc_ops *snd_dma_get_ops(struct snd_dma_buffer *dmab)
402{
403 if (WARN_ON_ONCE(dmab->dev.type <= SNDRV_DMA_TYPE_UNKNOWN ||
404 dmab->dev.type >= ARRAY_SIZE(dma_ops)))
405 return NULL;
406 return dma_ops[dmab->dev.type];
407}
408