1
2
3
4
5
6
7
8
9
10
11
12#include <linux/module.h>
13#include <linux/mm.h>
14#include <linux/slab.h>
15#include <linux/errno.h>
16#include <linux/list.h>
17#include <linux/init.h>
18#include <linux/device.h>
19#include <linux/dma-mapping.h>
20
21#include <asm/memory.h>
22#include <asm/cacheflush.h>
23#include <asm/tlbflush.h>
24#include <asm/sizes.h>
25
26
27#if (CONSISTENT_DMA_SIZE % SZ_2M)
28#error "CONSISTENT_DMA_SIZE must be multiple of 2MiB"
29#endif
30
31#define CONSISTENT_END (0xffe00000)
32#define CONSISTENT_BASE (CONSISTENT_END - CONSISTENT_DMA_SIZE)
33
34#define CONSISTENT_OFFSET(x) (((unsigned long)(x) - CONSISTENT_BASE) >> PAGE_SHIFT)
35#define CONSISTENT_PTE_INDEX(x) (((unsigned long)(x) - CONSISTENT_BASE) >> PGDIR_SHIFT)
36#define NUM_CONSISTENT_PTES (CONSISTENT_DMA_SIZE >> PGDIR_SHIFT)
37
38
39
40
41
42static pte_t *consistent_pte[NUM_CONSISTENT_PTES];
43static DEFINE_SPINLOCK(consistent_lock);
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74struct vm_region {
75 struct list_head vm_list;
76 unsigned long vm_start;
77 unsigned long vm_end;
78 struct page *vm_pages;
79 int vm_active;
80};
81
82static struct vm_region consistent_head = {
83 .vm_list = LIST_HEAD_INIT(consistent_head.vm_list),
84 .vm_start = CONSISTENT_BASE,
85 .vm_end = CONSISTENT_END,
86};
87
88static struct vm_region *
89vm_region_alloc(struct vm_region *head, size_t size, gfp_t gfp)
90{
91 unsigned long addr = head->vm_start, end = head->vm_end - size;
92 unsigned long flags;
93 struct vm_region *c, *new;
94
95 new = kmalloc(sizeof(struct vm_region), gfp);
96 if (!new)
97 goto out;
98
99 spin_lock_irqsave(&consistent_lock, flags);
100
101 list_for_each_entry(c, &head->vm_list, vm_list) {
102 if ((addr + size) < addr)
103 goto nospc;
104 if ((addr + size) <= c->vm_start)
105 goto found;
106 addr = c->vm_end;
107 if (addr > end)
108 goto nospc;
109 }
110
111 found:
112
113
114
115 list_add_tail(&new->vm_list, &c->vm_list);
116 new->vm_start = addr;
117 new->vm_end = addr + size;
118 new->vm_active = 1;
119
120 spin_unlock_irqrestore(&consistent_lock, flags);
121 return new;
122
123 nospc:
124 spin_unlock_irqrestore(&consistent_lock, flags);
125 kfree(new);
126 out:
127 return NULL;
128}
129
130static struct vm_region *vm_region_find(struct vm_region *head, unsigned long addr)
131{
132 struct vm_region *c;
133
134 list_for_each_entry(c, &head->vm_list, vm_list) {
135 if (c->vm_active && c->vm_start == addr)
136 goto out;
137 }
138 c = NULL;
139 out:
140 return c;
141}
142
143#ifdef CONFIG_HUGETLB_PAGE
144#error ARM Coherent DMA allocator does not (yet) support huge TLB
145#endif
146
147static void *
148__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp,
149 pgprot_t prot)
150{
151 struct page *page;
152 struct vm_region *c;
153 unsigned long order;
154 u64 mask = ISA_DMA_THRESHOLD, limit;
155
156 if (!consistent_pte[0]) {
157 printk(KERN_ERR "%s: not initialised\n", __func__);
158 dump_stack();
159 return NULL;
160 }
161
162 if (dev) {
163 mask = dev->coherent_dma_mask;
164
165
166
167
168
169 if (mask == 0) {
170 dev_warn(dev, "coherent DMA mask is unset\n");
171 goto no_page;
172 }
173
174 if ((~mask) & ISA_DMA_THRESHOLD) {
175 dev_warn(dev, "coherent DMA mask %#llx is smaller "
176 "than system GFP_DMA mask %#llx\n",
177 mask, (unsigned long long)ISA_DMA_THRESHOLD);
178 goto no_page;
179 }
180 }
181
182
183
184
185 size = PAGE_ALIGN(size);
186 limit = (mask + 1) & ~mask;
187 if ((limit && size >= limit) ||
188 size >= (CONSISTENT_END - CONSISTENT_BASE)) {
189 printk(KERN_WARNING "coherent allocation too big "
190 "(requested %#x mask %#llx)\n", size, mask);
191 goto no_page;
192 }
193
194 order = get_order(size);
195
196 if (mask != 0xffffffff)
197 gfp |= GFP_DMA;
198
199 page = alloc_pages(gfp, order);
200 if (!page)
201 goto no_page;
202
203
204
205
206
207 {
208 void *ptr = page_address(page);
209 memset(ptr, 0, size);
210 dmac_flush_range(ptr, ptr + size);
211 outer_flush_range(__pa(ptr), __pa(ptr) + size);
212 }
213
214
215
216
217 c = vm_region_alloc(&consistent_head, size,
218 gfp & ~(__GFP_DMA | __GFP_HIGHMEM));
219 if (c) {
220 pte_t *pte;
221 struct page *end = page + (1 << order);
222 int idx = CONSISTENT_PTE_INDEX(c->vm_start);
223 u32 off = CONSISTENT_OFFSET(c->vm_start) & (PTRS_PER_PTE-1);
224
225 pte = consistent_pte[idx] + off;
226 c->vm_pages = page;
227
228 split_page(page, order);
229
230
231
232
233 *handle = page_to_dma(dev, page);
234
235 do {
236 BUG_ON(!pte_none(*pte));
237
238
239
240
241 SetPageReserved(page);
242 set_pte_ext(pte, mk_pte(page, prot), 0);
243 page++;
244 pte++;
245 off++;
246 if (off >= PTRS_PER_PTE) {
247 off = 0;
248 pte = consistent_pte[++idx];
249 }
250 } while (size -= PAGE_SIZE);
251
252
253
254
255 while (page < end) {
256 __free_page(page);
257 page++;
258 }
259
260 return (void *)c->vm_start;
261 }
262
263 if (page)
264 __free_pages(page, order);
265 no_page:
266 *handle = ~0;
267 return NULL;
268}
269
270
271
272
273
274void *
275dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp)
276{
277 if (arch_is_coherent()) {
278 void *virt;
279
280 virt = kmalloc(size, gfp);
281 if (!virt)
282 return NULL;
283 *handle = virt_to_dma(dev, virt);
284
285 return virt;
286 }
287
288 return __dma_alloc(dev, size, handle, gfp,
289 pgprot_noncached(pgprot_kernel));
290}
291EXPORT_SYMBOL(dma_alloc_coherent);
292
293
294
295
296
297void *
298dma_alloc_writecombine(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp)
299{
300 return __dma_alloc(dev, size, handle, gfp,
301 pgprot_writecombine(pgprot_kernel));
302}
303EXPORT_SYMBOL(dma_alloc_writecombine);
304
305static int dma_mmap(struct device *dev, struct vm_area_struct *vma,
306 void *cpu_addr, dma_addr_t dma_addr, size_t size)
307{
308 unsigned long flags, user_size, kern_size;
309 struct vm_region *c;
310 int ret = -ENXIO;
311
312 user_size = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
313
314 spin_lock_irqsave(&consistent_lock, flags);
315 c = vm_region_find(&consistent_head, (unsigned long)cpu_addr);
316 spin_unlock_irqrestore(&consistent_lock, flags);
317
318 if (c) {
319 unsigned long off = vma->vm_pgoff;
320
321 kern_size = (c->vm_end - c->vm_start) >> PAGE_SHIFT;
322
323 if (off < kern_size &&
324 user_size <= (kern_size - off)) {
325 ret = remap_pfn_range(vma, vma->vm_start,
326 page_to_pfn(c->vm_pages) + off,
327 user_size << PAGE_SHIFT,
328 vma->vm_page_prot);
329 }
330 }
331
332 return ret;
333}
334
335int dma_mmap_coherent(struct device *dev, struct vm_area_struct *vma,
336 void *cpu_addr, dma_addr_t dma_addr, size_t size)
337{
338 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
339 return dma_mmap(dev, vma, cpu_addr, dma_addr, size);
340}
341EXPORT_SYMBOL(dma_mmap_coherent);
342
343int dma_mmap_writecombine(struct device *dev, struct vm_area_struct *vma,
344 void *cpu_addr, dma_addr_t dma_addr, size_t size)
345{
346 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
347 return dma_mmap(dev, vma, cpu_addr, dma_addr, size);
348}
349EXPORT_SYMBOL(dma_mmap_writecombine);
350
351
352
353
354
355void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, dma_addr_t handle)
356{
357 struct vm_region *c;
358 unsigned long flags, addr;
359 pte_t *ptep;
360 int idx;
361 u32 off;
362
363 WARN_ON(irqs_disabled());
364
365 if (arch_is_coherent()) {
366 kfree(cpu_addr);
367 return;
368 }
369
370 size = PAGE_ALIGN(size);
371
372 spin_lock_irqsave(&consistent_lock, flags);
373 c = vm_region_find(&consistent_head, (unsigned long)cpu_addr);
374 if (!c)
375 goto no_area;
376
377 c->vm_active = 0;
378 spin_unlock_irqrestore(&consistent_lock, flags);
379
380 if ((c->vm_end - c->vm_start) != size) {
381 printk(KERN_ERR "%s: freeing wrong coherent size (%ld != %d)\n",
382 __func__, c->vm_end - c->vm_start, size);
383 dump_stack();
384 size = c->vm_end - c->vm_start;
385 }
386
387 idx = CONSISTENT_PTE_INDEX(c->vm_start);
388 off = CONSISTENT_OFFSET(c->vm_start) & (PTRS_PER_PTE-1);
389 ptep = consistent_pte[idx] + off;
390 addr = c->vm_start;
391 do {
392 pte_t pte = ptep_get_and_clear(&init_mm, addr, ptep);
393 unsigned long pfn;
394
395 ptep++;
396 addr += PAGE_SIZE;
397 off++;
398 if (off >= PTRS_PER_PTE) {
399 off = 0;
400 ptep = consistent_pte[++idx];
401 }
402
403 if (!pte_none(pte) && pte_present(pte)) {
404 pfn = pte_pfn(pte);
405
406 if (pfn_valid(pfn)) {
407 struct page *page = pfn_to_page(pfn);
408
409
410
411
412 ClearPageReserved(page);
413
414 __free_page(page);
415 continue;
416 }
417 }
418
419 printk(KERN_CRIT "%s: bad page in kernel page table\n",
420 __func__);
421 } while (size -= PAGE_SIZE);
422
423 flush_tlb_kernel_range(c->vm_start, c->vm_end);
424
425 spin_lock_irqsave(&consistent_lock, flags);
426 list_del(&c->vm_list);
427 spin_unlock_irqrestore(&consistent_lock, flags);
428
429 kfree(c);
430 return;
431
432 no_area:
433 spin_unlock_irqrestore(&consistent_lock, flags);
434 printk(KERN_ERR "%s: trying to free invalid coherent area: %p\n",
435 __func__, cpu_addr);
436 dump_stack();
437}
438EXPORT_SYMBOL(dma_free_coherent);
439
440
441
442
443static int __init consistent_init(void)
444{
445 pgd_t *pgd;
446 pmd_t *pmd;
447 pte_t *pte;
448 int ret = 0, i = 0;
449 u32 base = CONSISTENT_BASE;
450
451 do {
452 pgd = pgd_offset(&init_mm, base);
453 pmd = pmd_alloc(&init_mm, pgd, base);
454 if (!pmd) {
455 printk(KERN_ERR "%s: no pmd tables\n", __func__);
456 ret = -ENOMEM;
457 break;
458 }
459 WARN_ON(!pmd_none(*pmd));
460
461 pte = pte_alloc_kernel(pmd, base);
462 if (!pte) {
463 printk(KERN_ERR "%s: no pte tables\n", __func__);
464 ret = -ENOMEM;
465 break;
466 }
467
468 consistent_pte[i++] = pte;
469 base += (1 << PGDIR_SHIFT);
470 } while (base < CONSISTENT_END);
471
472 return ret;
473}
474
475core_initcall(consistent_init);
476
477
478
479
480
481
482
483void dma_cache_maint(const void *start, size_t size, int direction)
484{
485 const void *end = start + size;
486
487 BUG_ON(!virt_addr_valid(start) || !virt_addr_valid(end - 1));
488
489 switch (direction) {
490 case DMA_FROM_DEVICE:
491 dmac_inv_range(start, end);
492 outer_inv_range(__pa(start), __pa(end));
493 break;
494 case DMA_TO_DEVICE:
495 dmac_clean_range(start, end);
496 outer_clean_range(__pa(start), __pa(end));
497 break;
498 case DMA_BIDIRECTIONAL:
499 dmac_flush_range(start, end);
500 outer_flush_range(__pa(start), __pa(end));
501 break;
502 default:
503 BUG();
504 }
505}
506EXPORT_SYMBOL(dma_cache_maint);
507