1
2
3
4
5
6
7
8
9#include <linux/slab.h>
10#include <linux/mm.h>
11#include <linux/dma-mapping.h>
12#include <linux/genalloc.h>
13#include <linux/highmem.h>
14#include <linux/vmalloc.h>
15#ifdef CONFIG_X86
16#include <asm/set_memory.h>
17#endif
18#include <sound/memalloc.h>
19#include "memalloc_local.h"
20
21static const struct snd_malloc_ops *snd_dma_get_ops(struct snd_dma_buffer *dmab);
22
23
24static inline gfp_t snd_mem_get_gfp_flags(const struct snd_dma_buffer *dmab,
25 gfp_t default_gfp)
26{
27 if (!dmab->dev.dev)
28 return default_gfp;
29 else
30 return (__force gfp_t)(unsigned long)dmab->dev.dev;
31}
32
33static void *__snd_dma_alloc_pages(struct snd_dma_buffer *dmab, size_t size)
34{
35 const struct snd_malloc_ops *ops = snd_dma_get_ops(dmab);
36
37 if (WARN_ON_ONCE(!ops || !ops->alloc))
38 return NULL;
39 return ops->alloc(dmab, size);
40}
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57int snd_dma_alloc_dir_pages(int type, struct device *device,
58 enum dma_data_direction dir, size_t size,
59 struct snd_dma_buffer *dmab)
60{
61 if (WARN_ON(!size))
62 return -ENXIO;
63 if (WARN_ON(!dmab))
64 return -ENXIO;
65
66 size = PAGE_ALIGN(size);
67 dmab->dev.type = type;
68 dmab->dev.dev = device;
69 dmab->dev.dir = dir;
70 dmab->bytes = 0;
71 dmab->addr = 0;
72 dmab->private_data = NULL;
73 dmab->area = __snd_dma_alloc_pages(dmab, size);
74 if (!dmab->area)
75 return -ENOMEM;
76 dmab->bytes = size;
77 return 0;
78}
79EXPORT_SYMBOL(snd_dma_alloc_dir_pages);
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96int snd_dma_alloc_pages_fallback(int type, struct device *device, size_t size,
97 struct snd_dma_buffer *dmab)
98{
99 int err;
100
101 while ((err = snd_dma_alloc_pages(type, device, size, dmab)) < 0) {
102 if (err != -ENOMEM)
103 return err;
104 if (size <= PAGE_SIZE)
105 return -ENOMEM;
106 size >>= 1;
107 size = PAGE_SIZE << get_order(size);
108 }
109 if (! dmab->area)
110 return -ENOMEM;
111 return 0;
112}
113EXPORT_SYMBOL(snd_dma_alloc_pages_fallback);
114
115
116
117
118
119
120
121void snd_dma_free_pages(struct snd_dma_buffer *dmab)
122{
123 const struct snd_malloc_ops *ops = snd_dma_get_ops(dmab);
124
125 if (ops && ops->free)
126 ops->free(dmab);
127}
128EXPORT_SYMBOL(snd_dma_free_pages);
129
130
131static void __snd_release_pages(struct device *dev, void *res)
132{
133 snd_dma_free_pages(res);
134}
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152struct snd_dma_buffer *
153snd_devm_alloc_dir_pages(struct device *dev, int type,
154 enum dma_data_direction dir, size_t size)
155{
156 struct snd_dma_buffer *dmab;
157 int err;
158
159 if (WARN_ON(type == SNDRV_DMA_TYPE_CONTINUOUS ||
160 type == SNDRV_DMA_TYPE_VMALLOC))
161 return NULL;
162
163 dmab = devres_alloc(__snd_release_pages, sizeof(*dmab), GFP_KERNEL);
164 if (!dmab)
165 return NULL;
166
167 err = snd_dma_alloc_dir_pages(type, dev, dir, size, dmab);
168 if (err < 0) {
169 devres_free(dmab);
170 return NULL;
171 }
172
173 devres_add(dev, dmab);
174 return dmab;
175}
176EXPORT_SYMBOL_GPL(snd_devm_alloc_dir_pages);
177
178
179
180
181
182
183int snd_dma_buffer_mmap(struct snd_dma_buffer *dmab,
184 struct vm_area_struct *area)
185{
186 const struct snd_malloc_ops *ops;
187
188 if (!dmab)
189 return -ENOENT;
190 ops = snd_dma_get_ops(dmab);
191 if (ops && ops->mmap)
192 return ops->mmap(dmab, area);
193 else
194 return -ENOENT;
195}
196EXPORT_SYMBOL(snd_dma_buffer_mmap);
197
198#ifdef CONFIG_HAS_DMA
199
200
201
202
203
204void snd_dma_buffer_sync(struct snd_dma_buffer *dmab,
205 enum snd_dma_sync_mode mode)
206{
207 const struct snd_malloc_ops *ops;
208
209 if (!dmab || !dmab->dev.need_sync)
210 return;
211 ops = snd_dma_get_ops(dmab);
212 if (ops && ops->sync)
213 ops->sync(dmab, mode);
214}
215EXPORT_SYMBOL_GPL(snd_dma_buffer_sync);
216#endif
217
218
219
220
221
222
223dma_addr_t snd_sgbuf_get_addr(struct snd_dma_buffer *dmab, size_t offset)
224{
225 const struct snd_malloc_ops *ops = snd_dma_get_ops(dmab);
226
227 if (ops && ops->get_addr)
228 return ops->get_addr(dmab, offset);
229 else
230 return dmab->addr + offset;
231}
232EXPORT_SYMBOL(snd_sgbuf_get_addr);
233
234
235
236
237
238
239struct page *snd_sgbuf_get_page(struct snd_dma_buffer *dmab, size_t offset)
240{
241 const struct snd_malloc_ops *ops = snd_dma_get_ops(dmab);
242
243 if (ops && ops->get_page)
244 return ops->get_page(dmab, offset);
245 else
246 return virt_to_page(dmab->area + offset);
247}
248EXPORT_SYMBOL(snd_sgbuf_get_page);
249
250
251
252
253
254
255
256
257unsigned int snd_sgbuf_get_chunk_size(struct snd_dma_buffer *dmab,
258 unsigned int ofs, unsigned int size)
259{
260 const struct snd_malloc_ops *ops = snd_dma_get_ops(dmab);
261
262 if (ops && ops->get_chunk_size)
263 return ops->get_chunk_size(dmab, ofs, size);
264 else
265 return size;
266}
267EXPORT_SYMBOL(snd_sgbuf_get_chunk_size);
268
269
270
271
272static void *snd_dma_continuous_alloc(struct snd_dma_buffer *dmab, size_t size)
273{
274 gfp_t gfp = snd_mem_get_gfp_flags(dmab, GFP_KERNEL);
275 void *p = alloc_pages_exact(size, gfp);
276
277 if (p)
278 dmab->addr = page_to_phys(virt_to_page(p));
279 return p;
280}
281
282static void snd_dma_continuous_free(struct snd_dma_buffer *dmab)
283{
284 free_pages_exact(dmab->area, dmab->bytes);
285}
286
287static int snd_dma_continuous_mmap(struct snd_dma_buffer *dmab,
288 struct vm_area_struct *area)
289{
290 return remap_pfn_range(area, area->vm_start,
291 dmab->addr >> PAGE_SHIFT,
292 area->vm_end - area->vm_start,
293 area->vm_page_prot);
294}
295
296static const struct snd_malloc_ops snd_dma_continuous_ops = {
297 .alloc = snd_dma_continuous_alloc,
298 .free = snd_dma_continuous_free,
299 .mmap = snd_dma_continuous_mmap,
300};
301
302
303
304
305static void *snd_dma_vmalloc_alloc(struct snd_dma_buffer *dmab, size_t size)
306{
307 gfp_t gfp = snd_mem_get_gfp_flags(dmab, GFP_KERNEL | __GFP_HIGHMEM);
308
309 return __vmalloc(size, gfp);
310}
311
312static void snd_dma_vmalloc_free(struct snd_dma_buffer *dmab)
313{
314 vfree(dmab->area);
315}
316
317static int snd_dma_vmalloc_mmap(struct snd_dma_buffer *dmab,
318 struct vm_area_struct *area)
319{
320 return remap_vmalloc_range(area, dmab->area, 0);
321}
322
323#define get_vmalloc_page_addr(dmab, offset) \
324 page_to_phys(vmalloc_to_page((dmab)->area + (offset)))
325
326static dma_addr_t snd_dma_vmalloc_get_addr(struct snd_dma_buffer *dmab,
327 size_t offset)
328{
329 return get_vmalloc_page_addr(dmab, offset) + offset % PAGE_SIZE;
330}
331
332static struct page *snd_dma_vmalloc_get_page(struct snd_dma_buffer *dmab,
333 size_t offset)
334{
335 return vmalloc_to_page(dmab->area + offset);
336}
337
338static unsigned int
339snd_dma_vmalloc_get_chunk_size(struct snd_dma_buffer *dmab,
340 unsigned int ofs, unsigned int size)
341{
342 unsigned int start, end;
343 unsigned long addr;
344
345 start = ALIGN_DOWN(ofs, PAGE_SIZE);
346 end = ofs + size - 1;
347
348 addr = get_vmalloc_page_addr(dmab, start);
349 for (;;) {
350 start += PAGE_SIZE;
351 if (start > end)
352 break;
353 addr += PAGE_SIZE;
354 if (get_vmalloc_page_addr(dmab, start) != addr)
355 return start - ofs;
356 }
357
358 return size;
359}
360
361static const struct snd_malloc_ops snd_dma_vmalloc_ops = {
362 .alloc = snd_dma_vmalloc_alloc,
363 .free = snd_dma_vmalloc_free,
364 .mmap = snd_dma_vmalloc_mmap,
365 .get_addr = snd_dma_vmalloc_get_addr,
366 .get_page = snd_dma_vmalloc_get_page,
367 .get_chunk_size = snd_dma_vmalloc_get_chunk_size,
368};
369
370#ifdef CONFIG_HAS_DMA
371
372
373
374#ifdef CONFIG_GENERIC_ALLOCATOR
375static void *snd_dma_iram_alloc(struct snd_dma_buffer *dmab, size_t size)
376{
377 struct device *dev = dmab->dev.dev;
378 struct gen_pool *pool;
379 void *p;
380
381 if (dev->of_node) {
382 pool = of_gen_pool_get(dev->of_node, "iram", 0);
383
384 dmab->private_data = pool;
385
386 p = gen_pool_dma_alloc_align(pool, size, &dmab->addr, PAGE_SIZE);
387 if (p)
388 return p;
389 }
390
391
392
393
394 dmab->dev.type = SNDRV_DMA_TYPE_DEV;
395 return __snd_dma_alloc_pages(dmab, size);
396}
397
398static void snd_dma_iram_free(struct snd_dma_buffer *dmab)
399{
400 struct gen_pool *pool = dmab->private_data;
401
402 if (pool && dmab->area)
403 gen_pool_free(pool, (unsigned long)dmab->area, dmab->bytes);
404}
405
406static int snd_dma_iram_mmap(struct snd_dma_buffer *dmab,
407 struct vm_area_struct *area)
408{
409 area->vm_page_prot = pgprot_writecombine(area->vm_page_prot);
410 return remap_pfn_range(area, area->vm_start,
411 dmab->addr >> PAGE_SHIFT,
412 area->vm_end - area->vm_start,
413 area->vm_page_prot);
414}
415
416static const struct snd_malloc_ops snd_dma_iram_ops = {
417 .alloc = snd_dma_iram_alloc,
418 .free = snd_dma_iram_free,
419 .mmap = snd_dma_iram_mmap,
420};
421#endif
422
423#define DEFAULT_GFP \
424 (GFP_KERNEL | \
425 __GFP_COMP | \
426 __GFP_NORETRY | \
427 __GFP_NOWARN)
428
429
430
431
432static void *snd_dma_dev_alloc(struct snd_dma_buffer *dmab, size_t size)
433{
434 return dma_alloc_coherent(dmab->dev.dev, size, &dmab->addr, DEFAULT_GFP);
435}
436
437static void snd_dma_dev_free(struct snd_dma_buffer *dmab)
438{
439 dma_free_coherent(dmab->dev.dev, dmab->bytes, dmab->area, dmab->addr);
440}
441
442static int snd_dma_dev_mmap(struct snd_dma_buffer *dmab,
443 struct vm_area_struct *area)
444{
445 return dma_mmap_coherent(dmab->dev.dev, area,
446 dmab->area, dmab->addr, dmab->bytes);
447}
448
449static const struct snd_malloc_ops snd_dma_dev_ops = {
450 .alloc = snd_dma_dev_alloc,
451 .free = snd_dma_dev_free,
452 .mmap = snd_dma_dev_mmap,
453};
454
455
456
457
458static void *snd_dma_wc_alloc(struct snd_dma_buffer *dmab, size_t size)
459{
460 return dma_alloc_wc(dmab->dev.dev, size, &dmab->addr, DEFAULT_GFP);
461}
462
463static void snd_dma_wc_free(struct snd_dma_buffer *dmab)
464{
465 dma_free_wc(dmab->dev.dev, dmab->bytes, dmab->area, dmab->addr);
466}
467
468static int snd_dma_wc_mmap(struct snd_dma_buffer *dmab,
469 struct vm_area_struct *area)
470{
471 return dma_mmap_wc(dmab->dev.dev, area,
472 dmab->area, dmab->addr, dmab->bytes);
473}
474
475static const struct snd_malloc_ops snd_dma_wc_ops = {
476 .alloc = snd_dma_wc_alloc,
477 .free = snd_dma_wc_free,
478 .mmap = snd_dma_wc_mmap,
479};
480
481#ifdef CONFIG_SND_DMA_SGBUF
482static void *snd_dma_sg_fallback_alloc(struct snd_dma_buffer *dmab, size_t size);
483#endif
484
485
486
487
488static void *snd_dma_noncontig_alloc(struct snd_dma_buffer *dmab, size_t size)
489{
490 struct sg_table *sgt;
491 void *p;
492
493 sgt = dma_alloc_noncontiguous(dmab->dev.dev, size, dmab->dev.dir,
494 DEFAULT_GFP, 0);
495 if (!sgt) {
496#ifdef CONFIG_SND_DMA_SGBUF
497 if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_WC_SG)
498 dmab->dev.type = SNDRV_DMA_TYPE_DEV_WC_SG_FALLBACK;
499 else
500 dmab->dev.type = SNDRV_DMA_TYPE_DEV_SG_FALLBACK;
501 return snd_dma_sg_fallback_alloc(dmab, size);
502#else
503 return NULL;
504#endif
505 }
506
507 dmab->dev.need_sync = dma_need_sync(dmab->dev.dev,
508 sg_dma_address(sgt->sgl));
509 p = dma_vmap_noncontiguous(dmab->dev.dev, size, sgt);
510 if (p)
511 dmab->private_data = sgt;
512 else
513 dma_free_noncontiguous(dmab->dev.dev, size, sgt, dmab->dev.dir);
514 return p;
515}
516
517static void snd_dma_noncontig_free(struct snd_dma_buffer *dmab)
518{
519 dma_vunmap_noncontiguous(dmab->dev.dev, dmab->area);
520 dma_free_noncontiguous(dmab->dev.dev, dmab->bytes, dmab->private_data,
521 dmab->dev.dir);
522}
523
524static int snd_dma_noncontig_mmap(struct snd_dma_buffer *dmab,
525 struct vm_area_struct *area)
526{
527 return dma_mmap_noncontiguous(dmab->dev.dev, area,
528 dmab->bytes, dmab->private_data);
529}
530
531static void snd_dma_noncontig_sync(struct snd_dma_buffer *dmab,
532 enum snd_dma_sync_mode mode)
533{
534 if (mode == SNDRV_DMA_SYNC_CPU) {
535 if (dmab->dev.dir == DMA_TO_DEVICE)
536 return;
537 invalidate_kernel_vmap_range(dmab->area, dmab->bytes);
538 dma_sync_sgtable_for_cpu(dmab->dev.dev, dmab->private_data,
539 dmab->dev.dir);
540 } else {
541 if (dmab->dev.dir == DMA_FROM_DEVICE)
542 return;
543 flush_kernel_vmap_range(dmab->area, dmab->bytes);
544 dma_sync_sgtable_for_device(dmab->dev.dev, dmab->private_data,
545 dmab->dev.dir);
546 }
547}
548
549static inline void snd_dma_noncontig_iter_set(struct snd_dma_buffer *dmab,
550 struct sg_page_iter *piter,
551 size_t offset)
552{
553 struct sg_table *sgt = dmab->private_data;
554
555 __sg_page_iter_start(piter, sgt->sgl, sgt->orig_nents,
556 offset >> PAGE_SHIFT);
557}
558
559static dma_addr_t snd_dma_noncontig_get_addr(struct snd_dma_buffer *dmab,
560 size_t offset)
561{
562 struct sg_dma_page_iter iter;
563
564 snd_dma_noncontig_iter_set(dmab, &iter.base, offset);
565 __sg_page_iter_dma_next(&iter);
566 return sg_page_iter_dma_address(&iter) + offset % PAGE_SIZE;
567}
568
569static struct page *snd_dma_noncontig_get_page(struct snd_dma_buffer *dmab,
570 size_t offset)
571{
572 struct sg_page_iter iter;
573
574 snd_dma_noncontig_iter_set(dmab, &iter, offset);
575 __sg_page_iter_next(&iter);
576 return sg_page_iter_page(&iter);
577}
578
579static unsigned int
580snd_dma_noncontig_get_chunk_size(struct snd_dma_buffer *dmab,
581 unsigned int ofs, unsigned int size)
582{
583 struct sg_dma_page_iter iter;
584 unsigned int start, end;
585 unsigned long addr;
586
587 start = ALIGN_DOWN(ofs, PAGE_SIZE);
588 end = ofs + size - 1;
589 snd_dma_noncontig_iter_set(dmab, &iter.base, start);
590 if (!__sg_page_iter_dma_next(&iter))
591 return 0;
592
593 addr = sg_page_iter_dma_address(&iter);
594 for (;;) {
595 start += PAGE_SIZE;
596 if (start > end)
597 break;
598 addr += PAGE_SIZE;
599 if (!__sg_page_iter_dma_next(&iter) ||
600 sg_page_iter_dma_address(&iter) != addr)
601 return start - ofs;
602 }
603
604 return size;
605}
606
607static const struct snd_malloc_ops snd_dma_noncontig_ops = {
608 .alloc = snd_dma_noncontig_alloc,
609 .free = snd_dma_noncontig_free,
610 .mmap = snd_dma_noncontig_mmap,
611 .sync = snd_dma_noncontig_sync,
612 .get_addr = snd_dma_noncontig_get_addr,
613 .get_page = snd_dma_noncontig_get_page,
614 .get_chunk_size = snd_dma_noncontig_get_chunk_size,
615};
616
617
618#ifdef CONFIG_SND_DMA_SGBUF
619#define sg_wc_address(it) ((unsigned long)page_address(sg_page_iter_page(it)))
620
621static void *snd_dma_sg_wc_alloc(struct snd_dma_buffer *dmab, size_t size)
622{
623 void *p = snd_dma_noncontig_alloc(dmab, size);
624 struct sg_table *sgt = dmab->private_data;
625 struct sg_page_iter iter;
626
627 if (!p)
628 return NULL;
629 if (dmab->dev.type != SNDRV_DMA_TYPE_DEV_WC_SG)
630 return p;
631 for_each_sgtable_page(sgt, &iter, 0)
632 set_memory_wc(sg_wc_address(&iter), 1);
633 return p;
634}
635
636static void snd_dma_sg_wc_free(struct snd_dma_buffer *dmab)
637{
638 struct sg_table *sgt = dmab->private_data;
639 struct sg_page_iter iter;
640
641 for_each_sgtable_page(sgt, &iter, 0)
642 set_memory_wb(sg_wc_address(&iter), 1);
643 snd_dma_noncontig_free(dmab);
644}
645
646static int snd_dma_sg_wc_mmap(struct snd_dma_buffer *dmab,
647 struct vm_area_struct *area)
648{
649 area->vm_page_prot = pgprot_writecombine(area->vm_page_prot);
650 return dma_mmap_noncontiguous(dmab->dev.dev, area,
651 dmab->bytes, dmab->private_data);
652}
653
654static const struct snd_malloc_ops snd_dma_sg_wc_ops = {
655 .alloc = snd_dma_sg_wc_alloc,
656 .free = snd_dma_sg_wc_free,
657 .mmap = snd_dma_sg_wc_mmap,
658 .sync = snd_dma_noncontig_sync,
659 .get_addr = snd_dma_noncontig_get_addr,
660 .get_page = snd_dma_noncontig_get_page,
661 .get_chunk_size = snd_dma_noncontig_get_chunk_size,
662};
663
664
665struct snd_dma_sg_fallback {
666 size_t count;
667 struct page **pages;
668 dma_addr_t *addrs;
669};
670
671static void __snd_dma_sg_fallback_free(struct snd_dma_buffer *dmab,
672 struct snd_dma_sg_fallback *sgbuf)
673{
674 size_t i;
675
676 if (sgbuf->count && dmab->dev.type == SNDRV_DMA_TYPE_DEV_WC_SG_FALLBACK)
677 set_pages_array_wb(sgbuf->pages, sgbuf->count);
678 for (i = 0; i < sgbuf->count && sgbuf->pages[i]; i++)
679 dma_free_coherent(dmab->dev.dev, PAGE_SIZE,
680 page_address(sgbuf->pages[i]),
681 sgbuf->addrs[i]);
682 kvfree(sgbuf->pages);
683 kvfree(sgbuf->addrs);
684 kfree(sgbuf);
685}
686
687static void *snd_dma_sg_fallback_alloc(struct snd_dma_buffer *dmab, size_t size)
688{
689 struct snd_dma_sg_fallback *sgbuf;
690 struct page **pages;
691 size_t i, count;
692 void *p;
693
694 sgbuf = kzalloc(sizeof(*sgbuf), GFP_KERNEL);
695 if (!sgbuf)
696 return NULL;
697 count = PAGE_ALIGN(size) >> PAGE_SHIFT;
698 pages = kvcalloc(count, sizeof(*pages), GFP_KERNEL);
699 if (!pages)
700 goto error;
701 sgbuf->pages = pages;
702 sgbuf->addrs = kvcalloc(count, sizeof(*sgbuf->addrs), GFP_KERNEL);
703 if (!sgbuf->addrs)
704 goto error;
705
706 for (i = 0; i < count; sgbuf->count++, i++) {
707 p = dma_alloc_coherent(dmab->dev.dev, PAGE_SIZE,
708 &sgbuf->addrs[i], DEFAULT_GFP);
709 if (!p)
710 goto error;
711 sgbuf->pages[i] = virt_to_page(p);
712 }
713
714 if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_WC_SG_FALLBACK)
715 set_pages_array_wc(pages, count);
716 p = vmap(pages, count, VM_MAP, PAGE_KERNEL);
717 if (!p)
718 goto error;
719 dmab->private_data = sgbuf;
720 return p;
721
722 error:
723 __snd_dma_sg_fallback_free(dmab, sgbuf);
724 return NULL;
725}
726
727static void snd_dma_sg_fallback_free(struct snd_dma_buffer *dmab)
728{
729 vunmap(dmab->area);
730 __snd_dma_sg_fallback_free(dmab, dmab->private_data);
731}
732
733static int snd_dma_sg_fallback_mmap(struct snd_dma_buffer *dmab,
734 struct vm_area_struct *area)
735{
736 struct snd_dma_sg_fallback *sgbuf = dmab->private_data;
737
738 if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_WC_SG_FALLBACK)
739 area->vm_page_prot = pgprot_writecombine(area->vm_page_prot);
740 return vm_map_pages(area, sgbuf->pages, sgbuf->count);
741}
742
743static const struct snd_malloc_ops snd_dma_sg_fallback_ops = {
744 .alloc = snd_dma_sg_fallback_alloc,
745 .free = snd_dma_sg_fallback_free,
746 .mmap = snd_dma_sg_fallback_mmap,
747
748 .get_addr = snd_dma_vmalloc_get_addr,
749 .get_page = snd_dma_vmalloc_get_page,
750 .get_chunk_size = snd_dma_vmalloc_get_chunk_size,
751};
752#endif
753
754
755
756
757static void *snd_dma_noncoherent_alloc(struct snd_dma_buffer *dmab, size_t size)
758{
759 void *p;
760
761 p = dma_alloc_noncoherent(dmab->dev.dev, size, &dmab->addr,
762 dmab->dev.dir, DEFAULT_GFP);
763 if (p)
764 dmab->dev.need_sync = dma_need_sync(dmab->dev.dev, dmab->addr);
765 return p;
766}
767
768static void snd_dma_noncoherent_free(struct snd_dma_buffer *dmab)
769{
770 dma_free_noncoherent(dmab->dev.dev, dmab->bytes, dmab->area,
771 dmab->addr, dmab->dev.dir);
772}
773
774static int snd_dma_noncoherent_mmap(struct snd_dma_buffer *dmab,
775 struct vm_area_struct *area)
776{
777 area->vm_page_prot = vm_get_page_prot(area->vm_flags);
778 return dma_mmap_pages(dmab->dev.dev, area,
779 area->vm_end - area->vm_start,
780 virt_to_page(dmab->area));
781}
782
783static void snd_dma_noncoherent_sync(struct snd_dma_buffer *dmab,
784 enum snd_dma_sync_mode mode)
785{
786 if (mode == SNDRV_DMA_SYNC_CPU) {
787 if (dmab->dev.dir != DMA_TO_DEVICE)
788 dma_sync_single_for_cpu(dmab->dev.dev, dmab->addr,
789 dmab->bytes, dmab->dev.dir);
790 } else {
791 if (dmab->dev.dir != DMA_FROM_DEVICE)
792 dma_sync_single_for_device(dmab->dev.dev, dmab->addr,
793 dmab->bytes, dmab->dev.dir);
794 }
795}
796
797static const struct snd_malloc_ops snd_dma_noncoherent_ops = {
798 .alloc = snd_dma_noncoherent_alloc,
799 .free = snd_dma_noncoherent_free,
800 .mmap = snd_dma_noncoherent_mmap,
801 .sync = snd_dma_noncoherent_sync,
802};
803
804#endif
805
806
807
808
809static const struct snd_malloc_ops *dma_ops[] = {
810 [SNDRV_DMA_TYPE_CONTINUOUS] = &snd_dma_continuous_ops,
811 [SNDRV_DMA_TYPE_VMALLOC] = &snd_dma_vmalloc_ops,
812#ifdef CONFIG_HAS_DMA
813 [SNDRV_DMA_TYPE_DEV] = &snd_dma_dev_ops,
814 [SNDRV_DMA_TYPE_DEV_WC] = &snd_dma_wc_ops,
815 [SNDRV_DMA_TYPE_NONCONTIG] = &snd_dma_noncontig_ops,
816 [SNDRV_DMA_TYPE_NONCOHERENT] = &snd_dma_noncoherent_ops,
817#ifdef CONFIG_SND_DMA_SGBUF
818 [SNDRV_DMA_TYPE_DEV_WC_SG] = &snd_dma_sg_wc_ops,
819#endif
820#ifdef CONFIG_GENERIC_ALLOCATOR
821 [SNDRV_DMA_TYPE_DEV_IRAM] = &snd_dma_iram_ops,
822#endif
823#ifdef CONFIG_SND_DMA_SGBUF
824 [SNDRV_DMA_TYPE_DEV_SG_FALLBACK] = &snd_dma_sg_fallback_ops,
825 [SNDRV_DMA_TYPE_DEV_WC_SG_FALLBACK] = &snd_dma_sg_fallback_ops,
826#endif
827#endif
828};
829
830static const struct snd_malloc_ops *snd_dma_get_ops(struct snd_dma_buffer *dmab)
831{
832 if (WARN_ON_ONCE(!dmab))
833 return NULL;
834 if (WARN_ON_ONCE(dmab->dev.type <= SNDRV_DMA_TYPE_UNKNOWN ||
835 dmab->dev.type >= ARRAY_SIZE(dma_ops)))
836 return NULL;
837 return dma_ops[dmab->dev.type];
838}
839