1
2
3
4
5
6
7
8
9#include <linux/slab.h>
10#include <linux/mm.h>
11#include <linux/dma-mapping.h>
12#include <linux/genalloc.h>
13#include <linux/vmalloc.h>
14#ifdef CONFIG_X86
15#include <asm/set_memory.h>
16#endif
17#include <sound/memalloc.h>
18#include "memalloc_local.h"
19
20static const struct snd_malloc_ops *snd_dma_get_ops(struct snd_dma_buffer *dmab);
21
22
23static inline gfp_t snd_mem_get_gfp_flags(const struct snd_dma_buffer *dmab,
24 gfp_t default_gfp)
25{
26 if (!dmab->dev.dev)
27 return default_gfp;
28 else
29 return (__force gfp_t)(unsigned long)dmab->dev.dev;
30}
31
32static void *__snd_dma_alloc_pages(struct snd_dma_buffer *dmab, size_t size)
33{
34 const struct snd_malloc_ops *ops = snd_dma_get_ops(dmab);
35
36 if (WARN_ON_ONCE(!ops || !ops->alloc))
37 return NULL;
38 return ops->alloc(dmab, size);
39}
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54int snd_dma_alloc_pages(int type, struct device *device, size_t size,
55 struct snd_dma_buffer *dmab)
56{
57 if (WARN_ON(!size))
58 return -ENXIO;
59 if (WARN_ON(!dmab))
60 return -ENXIO;
61
62 size = PAGE_ALIGN(size);
63 dmab->dev.type = type;
64 dmab->dev.dev = device;
65 dmab->bytes = 0;
66 dmab->addr = 0;
67 dmab->private_data = NULL;
68 dmab->area = __snd_dma_alloc_pages(dmab, size);
69 if (!dmab->area)
70 return -ENOMEM;
71 dmab->bytes = size;
72 return 0;
73}
74EXPORT_SYMBOL(snd_dma_alloc_pages);
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91int snd_dma_alloc_pages_fallback(int type, struct device *device, size_t size,
92 struct snd_dma_buffer *dmab)
93{
94 int err;
95
96 while ((err = snd_dma_alloc_pages(type, device, size, dmab)) < 0) {
97 if (err != -ENOMEM)
98 return err;
99 if (size <= PAGE_SIZE)
100 return -ENOMEM;
101 size >>= 1;
102 size = PAGE_SIZE << get_order(size);
103 }
104 if (! dmab->area)
105 return -ENOMEM;
106 return 0;
107}
108EXPORT_SYMBOL(snd_dma_alloc_pages_fallback);
109
110
111
112
113
114
115
116void snd_dma_free_pages(struct snd_dma_buffer *dmab)
117{
118 const struct snd_malloc_ops *ops = snd_dma_get_ops(dmab);
119
120 if (ops && ops->free)
121 ops->free(dmab);
122}
123EXPORT_SYMBOL(snd_dma_free_pages);
124
125
126static void __snd_release_pages(struct device *dev, void *res)
127{
128 snd_dma_free_pages(res);
129}
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146struct snd_dma_buffer *
147snd_devm_alloc_pages(struct device *dev, int type, size_t size)
148{
149 struct snd_dma_buffer *dmab;
150 int err;
151
152 if (WARN_ON(type == SNDRV_DMA_TYPE_CONTINUOUS ||
153 type == SNDRV_DMA_TYPE_VMALLOC))
154 return NULL;
155
156 dmab = devres_alloc(__snd_release_pages, sizeof(*dmab), GFP_KERNEL);
157 if (!dmab)
158 return NULL;
159
160 err = snd_dma_alloc_pages(type, dev, size, dmab);
161 if (err < 0) {
162 devres_free(dmab);
163 return NULL;
164 }
165
166 devres_add(dev, dmab);
167 return dmab;
168}
169EXPORT_SYMBOL_GPL(snd_devm_alloc_pages);
170
171
172
173
174
175
176int snd_dma_buffer_mmap(struct snd_dma_buffer *dmab,
177 struct vm_area_struct *area)
178{
179 const struct snd_malloc_ops *ops = snd_dma_get_ops(dmab);
180
181 if (ops && ops->mmap)
182 return ops->mmap(dmab, area);
183 else
184 return -ENOENT;
185}
186EXPORT_SYMBOL(snd_dma_buffer_mmap);
187
188
189
190
191
192
193dma_addr_t snd_sgbuf_get_addr(struct snd_dma_buffer *dmab, size_t offset)
194{
195 const struct snd_malloc_ops *ops = snd_dma_get_ops(dmab);
196
197 if (ops && ops->get_addr)
198 return ops->get_addr(dmab, offset);
199 else
200 return dmab->addr + offset;
201}
202EXPORT_SYMBOL(snd_sgbuf_get_addr);
203
204
205
206
207
208
209struct page *snd_sgbuf_get_page(struct snd_dma_buffer *dmab, size_t offset)
210{
211 const struct snd_malloc_ops *ops = snd_dma_get_ops(dmab);
212
213 if (ops && ops->get_page)
214 return ops->get_page(dmab, offset);
215 else
216 return virt_to_page(dmab->area + offset);
217}
218EXPORT_SYMBOL(snd_sgbuf_get_page);
219
220
221
222
223
224
225
226
227unsigned int snd_sgbuf_get_chunk_size(struct snd_dma_buffer *dmab,
228 unsigned int ofs, unsigned int size)
229{
230 const struct snd_malloc_ops *ops = snd_dma_get_ops(dmab);
231
232 if (ops && ops->get_chunk_size)
233 return ops->get_chunk_size(dmab, ofs, size);
234 else
235 return size;
236}
237EXPORT_SYMBOL(snd_sgbuf_get_chunk_size);
238
239
240
241
242static void *snd_dma_continuous_alloc(struct snd_dma_buffer *dmab, size_t size)
243{
244 gfp_t gfp = snd_mem_get_gfp_flags(dmab, GFP_KERNEL);
245 void *p = alloc_pages_exact(size, gfp);
246
247 if (p)
248 dmab->addr = page_to_phys(virt_to_page(p));
249 return p;
250}
251
252static void snd_dma_continuous_free(struct snd_dma_buffer *dmab)
253{
254 free_pages_exact(dmab->area, dmab->bytes);
255}
256
257static int snd_dma_continuous_mmap(struct snd_dma_buffer *dmab,
258 struct vm_area_struct *area)
259{
260 return remap_pfn_range(area, area->vm_start,
261 dmab->addr >> PAGE_SHIFT,
262 area->vm_end - area->vm_start,
263 area->vm_page_prot);
264}
265
266static const struct snd_malloc_ops snd_dma_continuous_ops = {
267 .alloc = snd_dma_continuous_alloc,
268 .free = snd_dma_continuous_free,
269 .mmap = snd_dma_continuous_mmap,
270};
271
272
273
274
275static void *snd_dma_vmalloc_alloc(struct snd_dma_buffer *dmab, size_t size)
276{
277 gfp_t gfp = snd_mem_get_gfp_flags(dmab, GFP_KERNEL | __GFP_HIGHMEM);
278
279 return __vmalloc(size, gfp);
280}
281
282static void snd_dma_vmalloc_free(struct snd_dma_buffer *dmab)
283{
284 vfree(dmab->area);
285}
286
287static int snd_dma_vmalloc_mmap(struct snd_dma_buffer *dmab,
288 struct vm_area_struct *area)
289{
290 return remap_vmalloc_range(area, dmab->area, 0);
291}
292
293#define get_vmalloc_page_addr(dmab, offset) \
294 page_to_phys(vmalloc_to_page((dmab)->area + (offset)))
295
296static dma_addr_t snd_dma_vmalloc_get_addr(struct snd_dma_buffer *dmab,
297 size_t offset)
298{
299 return get_vmalloc_page_addr(dmab, offset) + offset % PAGE_SIZE;
300}
301
302static struct page *snd_dma_vmalloc_get_page(struct snd_dma_buffer *dmab,
303 size_t offset)
304{
305 return vmalloc_to_page(dmab->area + offset);
306}
307
308static unsigned int
309snd_dma_vmalloc_get_chunk_size(struct snd_dma_buffer *dmab,
310 unsigned int ofs, unsigned int size)
311{
312 unsigned int start, end;
313 unsigned long addr;
314
315 start = ALIGN_DOWN(ofs, PAGE_SIZE);
316 end = ofs + size - 1;
317
318 addr = get_vmalloc_page_addr(dmab, start);
319 for (;;) {
320 start += PAGE_SIZE;
321 if (start > end)
322 break;
323 addr += PAGE_SIZE;
324 if (get_vmalloc_page_addr(dmab, start) != addr)
325 return start - ofs;
326 }
327
328 return size;
329}
330
331static const struct snd_malloc_ops snd_dma_vmalloc_ops = {
332 .alloc = snd_dma_vmalloc_alloc,
333 .free = snd_dma_vmalloc_free,
334 .mmap = snd_dma_vmalloc_mmap,
335 .get_addr = snd_dma_vmalloc_get_addr,
336 .get_page = snd_dma_vmalloc_get_page,
337 .get_chunk_size = snd_dma_vmalloc_get_chunk_size,
338};
339
340#ifdef CONFIG_HAS_DMA
341
342
343
344#ifdef CONFIG_GENERIC_ALLOCATOR
345static void *snd_dma_iram_alloc(struct snd_dma_buffer *dmab, size_t size)
346{
347 struct device *dev = dmab->dev.dev;
348 struct gen_pool *pool;
349 void *p;
350
351 if (dev->of_node) {
352 pool = of_gen_pool_get(dev->of_node, "iram", 0);
353
354 dmab->private_data = pool;
355
356 p = gen_pool_dma_alloc_align(pool, size, &dmab->addr, PAGE_SIZE);
357 if (p)
358 return p;
359 }
360
361
362
363
364 dmab->dev.type = SNDRV_DMA_TYPE_DEV;
365 return __snd_dma_alloc_pages(dmab, size);
366}
367
368static void snd_dma_iram_free(struct snd_dma_buffer *dmab)
369{
370 struct gen_pool *pool = dmab->private_data;
371
372 if (pool && dmab->area)
373 gen_pool_free(pool, (unsigned long)dmab->area, dmab->bytes);
374}
375
376static int snd_dma_iram_mmap(struct snd_dma_buffer *dmab,
377 struct vm_area_struct *area)
378{
379 area->vm_page_prot = pgprot_writecombine(area->vm_page_prot);
380 return remap_pfn_range(area, area->vm_start,
381 dmab->addr >> PAGE_SHIFT,
382 area->vm_end - area->vm_start,
383 area->vm_page_prot);
384}
385
386static const struct snd_malloc_ops snd_dma_iram_ops = {
387 .alloc = snd_dma_iram_alloc,
388 .free = snd_dma_iram_free,
389 .mmap = snd_dma_iram_mmap,
390};
391#endif
392
393#define DEFAULT_GFP \
394 (GFP_KERNEL | \
395 __GFP_COMP | \
396 __GFP_NORETRY | \
397 __GFP_NOWARN)
398
399
400
401
402static void *snd_dma_dev_alloc(struct snd_dma_buffer *dmab, size_t size)
403{
404 void *p;
405
406 p = dma_alloc_coherent(dmab->dev.dev, size, &dmab->addr, DEFAULT_GFP);
407#ifdef CONFIG_X86
408 if (p && dmab->dev.type == SNDRV_DMA_TYPE_DEV_WC)
409 set_memory_wc((unsigned long)p, PAGE_ALIGN(size) >> PAGE_SHIFT);
410#endif
411 return p;
412}
413
414static void snd_dma_dev_free(struct snd_dma_buffer *dmab)
415{
416#ifdef CONFIG_X86
417 if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_WC)
418 set_memory_wb((unsigned long)dmab->area,
419 PAGE_ALIGN(dmab->bytes) >> PAGE_SHIFT);
420#endif
421 dma_free_coherent(dmab->dev.dev, dmab->bytes, dmab->area, dmab->addr);
422}
423
424static int snd_dma_dev_mmap(struct snd_dma_buffer *dmab,
425 struct vm_area_struct *area)
426{
427#ifdef CONFIG_X86
428 if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_WC)
429 area->vm_page_prot = pgprot_writecombine(area->vm_page_prot);
430#endif
431 return dma_mmap_coherent(dmab->dev.dev, area,
432 dmab->area, dmab->addr, dmab->bytes);
433}
434
435static const struct snd_malloc_ops snd_dma_dev_ops = {
436 .alloc = snd_dma_dev_alloc,
437 .free = snd_dma_dev_free,
438 .mmap = snd_dma_dev_mmap,
439};
440
441
442
443
444#ifdef CONFIG_X86
445
446#define snd_dma_wc_ops snd_dma_dev_ops
447#else
448static void *snd_dma_wc_alloc(struct snd_dma_buffer *dmab, size_t size)
449{
450 return dma_alloc_wc(dmab->dev.dev, size, &dmab->addr, DEFAULT_GFP);
451}
452
453static void snd_dma_wc_free(struct snd_dma_buffer *dmab)
454{
455 dma_free_wc(dmab->dev.dev, dmab->bytes, dmab->area, dmab->addr);
456}
457
458static int snd_dma_wc_mmap(struct snd_dma_buffer *dmab,
459 struct vm_area_struct *area)
460{
461 return dma_mmap_wc(dmab->dev.dev, area,
462 dmab->area, dmab->addr, dmab->bytes);
463}
464
465static const struct snd_malloc_ops snd_dma_wc_ops = {
466 .alloc = snd_dma_wc_alloc,
467 .free = snd_dma_wc_free,
468 .mmap = snd_dma_wc_mmap,
469};
470#endif
471#endif
472
473
474
475
476static const struct snd_malloc_ops *dma_ops[] = {
477 [SNDRV_DMA_TYPE_CONTINUOUS] = &snd_dma_continuous_ops,
478 [SNDRV_DMA_TYPE_VMALLOC] = &snd_dma_vmalloc_ops,
479#ifdef CONFIG_HAS_DMA
480 [SNDRV_DMA_TYPE_DEV] = &snd_dma_dev_ops,
481 [SNDRV_DMA_TYPE_DEV_WC] = &snd_dma_wc_ops,
482#ifdef CONFIG_GENERIC_ALLOCATOR
483 [SNDRV_DMA_TYPE_DEV_IRAM] = &snd_dma_iram_ops,
484#endif
485#endif
486#ifdef CONFIG_SND_DMA_SGBUF
487 [SNDRV_DMA_TYPE_DEV_SG] = &snd_dma_sg_ops,
488 [SNDRV_DMA_TYPE_DEV_WC_SG] = &snd_dma_sg_ops,
489#endif
490};
491
492static const struct snd_malloc_ops *snd_dma_get_ops(struct snd_dma_buffer *dmab)
493{
494 if (WARN_ON_ONCE(dmab->dev.type <= SNDRV_DMA_TYPE_UNKNOWN ||
495 dmab->dev.type >= ARRAY_SIZE(dma_ops)))
496 return NULL;
497 return dma_ops[dmab->dev.type];
498}
499