1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25#include <linux/sched.h>
26#include <linux/kernel.h>
27#include <linux/errno.h>
28#include <linux/string.h>
29#include <linux/types.h>
30#include <linux/highmem.h>
31#include <linux/dma-mapping.h>
32
33#include <asm/tlbflush.h>
34
35
36
37
38
39
40
41#define CONSISTENT_BASE (CONFIG_CONSISTENT_START)
42#define CONSISTENT_END (CONFIG_CONSISTENT_START + CONFIG_CONSISTENT_SIZE)
43#define CONSISTENT_OFFSET(x) (((unsigned long)(x) - CONSISTENT_BASE) >> PAGE_SHIFT)
44
45
46
47
48static pte_t *consistent_pte;
49static DEFINE_SPINLOCK(consistent_lock);
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80struct vm_region {
81 struct list_head vm_list;
82 unsigned long vm_start;
83 unsigned long vm_end;
84};
85
86static struct vm_region consistent_head = {
87 .vm_list = LIST_HEAD_INIT(consistent_head.vm_list),
88 .vm_start = CONSISTENT_BASE,
89 .vm_end = CONSISTENT_END,
90};
91
92static struct vm_region *
93vm_region_alloc(struct vm_region *head, size_t size, gfp_t gfp)
94{
95 unsigned long addr = head->vm_start, end = head->vm_end - size;
96 unsigned long flags;
97 struct vm_region *c, *new;
98
99 new = kmalloc(sizeof(struct vm_region), gfp);
100 if (!new)
101 goto out;
102
103 spin_lock_irqsave(&consistent_lock, flags);
104
105 list_for_each_entry(c, &head->vm_list, vm_list) {
106 if ((addr + size) < addr)
107 goto nospc;
108 if ((addr + size) <= c->vm_start)
109 goto found;
110 addr = c->vm_end;
111 if (addr > end)
112 goto nospc;
113 }
114
115 found:
116
117
118
119 list_add_tail(&new->vm_list, &c->vm_list);
120 new->vm_start = addr;
121 new->vm_end = addr + size;
122
123 spin_unlock_irqrestore(&consistent_lock, flags);
124 return new;
125
126 nospc:
127 spin_unlock_irqrestore(&consistent_lock, flags);
128 kfree(new);
129 out:
130 return NULL;
131}
132
133static struct vm_region *vm_region_find(struct vm_region *head, unsigned long addr)
134{
135 struct vm_region *c;
136
137 list_for_each_entry(c, &head->vm_list, vm_list) {
138 if (c->vm_start == addr)
139 goto out;
140 }
141 c = NULL;
142 out:
143 return c;
144}
145
146
147
148
149
150void *
151__dma_alloc_coherent(size_t size, dma_addr_t *handle, gfp_t gfp)
152{
153 struct page *page;
154 struct vm_region *c;
155 unsigned long order;
156 u64 mask = 0x00ffffff, limit;
157
158 if (!consistent_pte) {
159 printk(KERN_ERR "%s: not initialised\n", __func__);
160 dump_stack();
161 return NULL;
162 }
163
164 size = PAGE_ALIGN(size);
165 limit = (mask + 1) & ~mask;
166 if ((limit && size >= limit) || size >= (CONSISTENT_END - CONSISTENT_BASE)) {
167 printk(KERN_WARNING "coherent allocation too big (requested %#x mask %#Lx)\n",
168 size, mask);
169 return NULL;
170 }
171
172 order = get_order(size);
173
174 if (mask != 0xffffffff)
175 gfp |= GFP_DMA;
176
177 page = alloc_pages(gfp, order);
178 if (!page)
179 goto no_page;
180
181
182
183
184
185 {
186 unsigned long kaddr = (unsigned long)page_address(page);
187 memset(page_address(page), 0, size);
188 flush_dcache_range(kaddr, kaddr + size);
189 }
190
191
192
193
194 c = vm_region_alloc(&consistent_head, size,
195 gfp & ~(__GFP_DMA | __GFP_HIGHMEM));
196 if (c) {
197 unsigned long vaddr = c->vm_start;
198 pte_t *pte = consistent_pte + CONSISTENT_OFFSET(vaddr);
199 struct page *end = page + (1 << order);
200
201 split_page(page, order);
202
203
204
205
206 *handle = page_to_bus(page);
207
208 do {
209 BUG_ON(!pte_none(*pte));
210
211 SetPageReserved(page);
212 set_pte_at(&init_mm, vaddr,
213 pte, mk_pte(page, pgprot_noncached(PAGE_KERNEL)));
214 page++;
215 pte++;
216 vaddr += PAGE_SIZE;
217 } while (size -= PAGE_SIZE);
218
219
220
221
222 while (page < end) {
223 __free_page(page);
224 page++;
225 }
226
227 return (void *)c->vm_start;
228 }
229
230 if (page)
231 __free_pages(page, order);
232 no_page:
233 return NULL;
234}
235EXPORT_SYMBOL(__dma_alloc_coherent);
236
237
238
239
240void __dma_free_coherent(size_t size, void *vaddr)
241{
242 struct vm_region *c;
243 unsigned long flags, addr;
244 pte_t *ptep;
245
246 size = PAGE_ALIGN(size);
247
248 spin_lock_irqsave(&consistent_lock, flags);
249
250 c = vm_region_find(&consistent_head, (unsigned long)vaddr);
251 if (!c)
252 goto no_area;
253
254 if ((c->vm_end - c->vm_start) != size) {
255 printk(KERN_ERR "%s: freeing wrong coherent size (%ld != %d)\n",
256 __func__, c->vm_end - c->vm_start, size);
257 dump_stack();
258 size = c->vm_end - c->vm_start;
259 }
260
261 ptep = consistent_pte + CONSISTENT_OFFSET(c->vm_start);
262 addr = c->vm_start;
263 do {
264 pte_t pte = ptep_get_and_clear(&init_mm, addr, ptep);
265 unsigned long pfn;
266
267 ptep++;
268 addr += PAGE_SIZE;
269
270 if (!pte_none(pte) && pte_present(pte)) {
271 pfn = pte_pfn(pte);
272
273 if (pfn_valid(pfn)) {
274 struct page *page = pfn_to_page(pfn);
275 ClearPageReserved(page);
276
277 __free_page(page);
278 continue;
279 }
280 }
281
282 printk(KERN_CRIT "%s: bad page in kernel page table\n",
283 __func__);
284 } while (size -= PAGE_SIZE);
285
286 flush_tlb_kernel_range(c->vm_start, c->vm_end);
287
288 list_del(&c->vm_list);
289
290 spin_unlock_irqrestore(&consistent_lock, flags);
291
292 kfree(c);
293 return;
294
295 no_area:
296 spin_unlock_irqrestore(&consistent_lock, flags);
297 printk(KERN_ERR "%s: trying to free invalid coherent area: %p\n",
298 __func__, vaddr);
299 dump_stack();
300}
301EXPORT_SYMBOL(__dma_free_coherent);
302
303
304
305
306static int __init dma_alloc_init(void)
307{
308 pgd_t *pgd;
309 pud_t *pud;
310 pmd_t *pmd;
311 pte_t *pte;
312 int ret = 0;
313
314 do {
315 pgd = pgd_offset(&init_mm, CONSISTENT_BASE);
316 pud = pud_alloc(&init_mm, pgd, CONSISTENT_BASE);
317 pmd = pmd_alloc(&init_mm, pud, CONSISTENT_BASE);
318 if (!pmd) {
319 printk(KERN_ERR "%s: no pmd tables\n", __func__);
320 ret = -ENOMEM;
321 break;
322 }
323 WARN_ON(!pmd_none(*pmd));
324
325 pte = pte_alloc_kernel(pmd, CONSISTENT_BASE);
326 if (!pte) {
327 printk(KERN_ERR "%s: no pte tables\n", __func__);
328 ret = -ENOMEM;
329 break;
330 }
331
332 consistent_pte = pte;
333 } while (0);
334
335 return ret;
336}
337
338core_initcall(dma_alloc_init);
339
340
341
342
343void __dma_sync(void *vaddr, size_t size, int direction)
344{
345 unsigned long start = (unsigned long)vaddr;
346 unsigned long end = start + size;
347
348 switch (direction) {
349 case DMA_NONE:
350 BUG();
351 case DMA_FROM_DEVICE:
352 invalidate_dcache_range(start, end);
353 break;
354 case DMA_TO_DEVICE:
355 clean_dcache_range(start, end);
356 break;
357 case DMA_BIDIRECTIONAL:
358 flush_dcache_range(start, end);
359 break;
360 }
361}
362EXPORT_SYMBOL(__dma_sync);
363
364#ifdef CONFIG_HIGHMEM
365
366
367
368
369
370
371
372
373
374static inline void __dma_sync_page_highmem(struct page *page,
375 unsigned long offset, size_t size, int direction)
376{
377 size_t seg_size = min((size_t)(PAGE_SIZE - offset), size);
378 size_t cur_size = seg_size;
379 unsigned long flags, start, seg_offset = offset;
380 int nr_segs = 1 + ((size - seg_size) + PAGE_SIZE - 1)/PAGE_SIZE;
381 int seg_nr = 0;
382
383 local_irq_save(flags);
384
385 do {
386 start = (unsigned long)kmap_atomic(page + seg_nr,
387 KM_PPC_SYNC_PAGE) + seg_offset;
388
389
390 __dma_sync((void *)start, seg_size, direction);
391 kunmap_atomic((void *)start, KM_PPC_SYNC_PAGE);
392 seg_nr++;
393
394
395 seg_size = min((size_t)PAGE_SIZE, size - cur_size);
396
397
398 cur_size += seg_size;
399 seg_offset = 0;
400 } while (seg_nr < nr_segs);
401
402 local_irq_restore(flags);
403}
404#endif
405
406
407
408
409
410void __dma_sync_page(struct page *page, unsigned long offset,
411 size_t size, int direction)
412{
413#ifdef CONFIG_HIGHMEM
414 __dma_sync_page_highmem(page, offset, size, direction);
415#else
416 unsigned long start = (unsigned long)page_address(page) + offset;
417 __dma_sync((void *)start, size, direction);
418#endif
419}
420EXPORT_SYMBOL(__dma_sync_page);
421