1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17#include <linux/slab.h>
18#include <linux/bitmap.h>
19#include <linux/bitops.h>
20#include <linux/module.h>
21#include <linux/interrupt.h>
22#include <linux/dma-noncoherent.h>
23#include <linux/memblock.h>
24
25#include <asm/cacheflush.h>
26#include <asm/page.h>
27#include <asm/setup.h>
28
29
30
31
32
33
34
35static phys_addr_t dma_base;
36static u32 dma_size;
37static u32 dma_pages;
38
39static unsigned long *dma_bitmap;
40
41
42static DEFINE_SPINLOCK(dma_lock);
43
44
45
46
47static inline u32 __alloc_dma_pages(int order)
48{
49 unsigned long flags;
50 u32 pos;
51
52 spin_lock_irqsave(&dma_lock, flags);
53 pos = bitmap_find_free_region(dma_bitmap, dma_pages, order);
54 spin_unlock_irqrestore(&dma_lock, flags);
55
56 return dma_base + (pos << PAGE_SHIFT);
57}
58
59static void __free_dma_pages(u32 addr, int order)
60{
61 unsigned long flags;
62 u32 pos = (addr - dma_base) >> PAGE_SHIFT;
63
64 if (addr < dma_base || (pos + (1 << order)) >= dma_pages) {
65 printk(KERN_ERR "%s: freeing outside range.\n", __func__);
66 BUG();
67 }
68
69 spin_lock_irqsave(&dma_lock, flags);
70 bitmap_release_region(dma_bitmap, pos, order);
71 spin_unlock_irqrestore(&dma_lock, flags);
72}
73
74
75
76
77
78void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
79 gfp_t gfp, unsigned long attrs)
80{
81 void *ret;
82 u32 paddr;
83 int order;
84
85 if (!dma_size || !size)
86 return NULL;
87
88 order = get_count_order(((size - 1) >> PAGE_SHIFT) + 1);
89
90 paddr = __alloc_dma_pages(order);
91
92 if (handle)
93 *handle = paddr;
94
95 if (!paddr)
96 return NULL;
97
98 ret = phys_to_virt(paddr);
99 memset(ret, 0, 1 << order);
100 return ret;
101}
102
103
104
105
106void arch_dma_free(struct device *dev, size_t size, void *vaddr,
107 dma_addr_t dma_handle, unsigned long attrs)
108{
109 int order;
110
111 if (!dma_size || !size)
112 return;
113
114 order = get_count_order(((size - 1) >> PAGE_SHIFT) + 1);
115
116 __free_dma_pages(virt_to_phys(vaddr), order);
117}
118
119
120
121
122void __init coherent_mem_init(phys_addr_t start, u32 size)
123{
124 phys_addr_t bitmap_phys;
125
126 if (!size)
127 return;
128
129 printk(KERN_INFO
130 "Coherent memory (DMA) region start=0x%x size=0x%x\n",
131 start, size);
132
133 dma_base = start;
134 dma_size = size;
135
136
137 dma_pages = dma_size >> PAGE_SHIFT;
138 if (dma_size & (PAGE_SIZE - 1))
139 ++dma_pages;
140
141 bitmap_phys = memblock_phys_alloc(BITS_TO_LONGS(dma_pages) * sizeof(long),
142 sizeof(long));
143
144 dma_bitmap = phys_to_virt(bitmap_phys);
145 memset(dma_bitmap, 0, dma_pages * PAGE_SIZE);
146}
147
148static void c6x_dma_sync(struct device *dev, phys_addr_t paddr, size_t size,
149 enum dma_data_direction dir)
150{
151 BUG_ON(!valid_dma_direction(dir));
152
153 switch (dir) {
154 case DMA_FROM_DEVICE:
155 L2_cache_block_invalidate(paddr, paddr + size);
156 break;
157 case DMA_TO_DEVICE:
158 L2_cache_block_writeback(paddr, paddr + size);
159 break;
160 case DMA_BIDIRECTIONAL:
161 L2_cache_block_writeback_invalidate(paddr, paddr + size);
162 break;
163 default:
164 break;
165 }
166}
167
168void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
169 size_t size, enum dma_data_direction dir)
170{
171 return c6x_dma_sync(dev, paddr, size, dir);
172}
173
174void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
175 size_t size, enum dma_data_direction dir)
176{
177 return c6x_dma_sync(dev, paddr, size, dir);
178}
179