1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25#include <linux/sched.h>
26#include <linux/slab.h>
27#include <linux/kernel.h>
28#include <linux/errno.h>
29#include <linux/string.h>
30#include <linux/types.h>
31#include <linux/highmem.h>
32#include <linux/dma-mapping.h>
33#include <linux/export.h>
34
35#include <asm/tlbflush.h>
36#include <asm/dma.h>
37
38#include "mmu_decl.h"
39
40
41
42
43
44
45
46#define CONSISTENT_BASE (IOREMAP_TOP)
47#define CONSISTENT_END (CONSISTENT_BASE + CONFIG_CONSISTENT_SIZE)
48#define CONSISTENT_OFFSET(x) (((unsigned long)(x) - CONSISTENT_BASE) >> PAGE_SHIFT)
49
50
51
52
53static DEFINE_SPINLOCK(consistent_lock);
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84struct ppc_vm_region {
85 struct list_head vm_list;
86 unsigned long vm_start;
87 unsigned long vm_end;
88};
89
90static struct ppc_vm_region consistent_head = {
91 .vm_list = LIST_HEAD_INIT(consistent_head.vm_list),
92 .vm_start = CONSISTENT_BASE,
93 .vm_end = CONSISTENT_END,
94};
95
96static struct ppc_vm_region *
97ppc_vm_region_alloc(struct ppc_vm_region *head, size_t size, gfp_t gfp)
98{
99 unsigned long addr = head->vm_start, end = head->vm_end - size;
100 unsigned long flags;
101 struct ppc_vm_region *c, *new;
102
103 new = kmalloc(sizeof(struct ppc_vm_region), gfp);
104 if (!new)
105 goto out;
106
107 spin_lock_irqsave(&consistent_lock, flags);
108
109 list_for_each_entry(c, &head->vm_list, vm_list) {
110 if ((addr + size) < addr)
111 goto nospc;
112 if ((addr + size) <= c->vm_start)
113 goto found;
114 addr = c->vm_end;
115 if (addr > end)
116 goto nospc;
117 }
118
119 found:
120
121
122
123 list_add_tail(&new->vm_list, &c->vm_list);
124 new->vm_start = addr;
125 new->vm_end = addr + size;
126
127 spin_unlock_irqrestore(&consistent_lock, flags);
128 return new;
129
130 nospc:
131 spin_unlock_irqrestore(&consistent_lock, flags);
132 kfree(new);
133 out:
134 return NULL;
135}
136
137static struct ppc_vm_region *ppc_vm_region_find(struct ppc_vm_region *head, unsigned long addr)
138{
139 struct ppc_vm_region *c;
140
141 list_for_each_entry(c, &head->vm_list, vm_list) {
142 if (c->vm_start == addr)
143 goto out;
144 }
145 c = NULL;
146 out:
147 return c;
148}
149
150
151
152
153
154void *
155__dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp)
156{
157 struct page *page;
158 struct ppc_vm_region *c;
159 unsigned long order;
160 u64 mask = ISA_DMA_THRESHOLD, limit;
161
162 if (dev) {
163 mask = dev->coherent_dma_mask;
164
165
166
167
168
169 if (mask == 0) {
170 dev_warn(dev, "coherent DMA mask is unset\n");
171 goto no_page;
172 }
173
174 if ((~mask) & ISA_DMA_THRESHOLD) {
175 dev_warn(dev, "coherent DMA mask %#llx is smaller "
176 "than system GFP_DMA mask %#llx\n",
177 mask, (unsigned long long)ISA_DMA_THRESHOLD);
178 goto no_page;
179 }
180 }
181
182
183 size = PAGE_ALIGN(size);
184 limit = (mask + 1) & ~mask;
185 if ((limit && size >= limit) ||
186 size >= (CONSISTENT_END - CONSISTENT_BASE)) {
187 printk(KERN_WARNING "coherent allocation too big (requested %#x mask %#Lx)\n",
188 size, mask);
189 return NULL;
190 }
191
192 order = get_order(size);
193
194
195 if (mask != 0xffffffff)
196 gfp |= GFP_DMA;
197
198 page = alloc_pages(gfp, order);
199 if (!page)
200 goto no_page;
201
202
203
204
205
206 {
207 unsigned long kaddr = (unsigned long)page_address(page);
208 memset(page_address(page), 0, size);
209 flush_dcache_range(kaddr, kaddr + size);
210 }
211
212
213
214
215 c = ppc_vm_region_alloc(&consistent_head, size,
216 gfp & ~(__GFP_DMA | __GFP_HIGHMEM));
217 if (c) {
218 unsigned long vaddr = c->vm_start;
219 struct page *end = page + (1 << order);
220
221 split_page(page, order);
222
223
224
225
226 *handle = page_to_phys(page);
227
228 do {
229 SetPageReserved(page);
230 map_kernel_page(vaddr, page_to_phys(page),
231 pgprot_val(pgprot_noncached(PAGE_KERNEL)));
232 page++;
233 vaddr += PAGE_SIZE;
234 } while (size -= PAGE_SIZE);
235
236
237
238
239 while (page < end) {
240 __free_page(page);
241 page++;
242 }
243
244 return (void *)c->vm_start;
245 }
246
247 if (page)
248 __free_pages(page, order);
249 no_page:
250 return NULL;
251}
252EXPORT_SYMBOL(__dma_alloc_coherent);
253
254
255
256
257void __dma_free_coherent(size_t size, void *vaddr)
258{
259 struct ppc_vm_region *c;
260 unsigned long flags, addr;
261
262 size = PAGE_ALIGN(size);
263
264 spin_lock_irqsave(&consistent_lock, flags);
265
266 c = ppc_vm_region_find(&consistent_head, (unsigned long)vaddr);
267 if (!c)
268 goto no_area;
269
270 if ((c->vm_end - c->vm_start) != size) {
271 printk(KERN_ERR "%s: freeing wrong coherent size (%ld != %d)\n",
272 __func__, c->vm_end - c->vm_start, size);
273 dump_stack();
274 size = c->vm_end - c->vm_start;
275 }
276
277 addr = c->vm_start;
278 do {
279 pte_t *ptep;
280 unsigned long pfn;
281
282 ptep = pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(addr),
283 addr),
284 addr),
285 addr);
286 if (!pte_none(*ptep) && pte_present(*ptep)) {
287 pfn = pte_pfn(*ptep);
288 pte_clear(&init_mm, addr, ptep);
289 if (pfn_valid(pfn)) {
290 struct page *page = pfn_to_page(pfn);
291 __free_reserved_page(page);
292 }
293 }
294 addr += PAGE_SIZE;
295 } while (size -= PAGE_SIZE);
296
297 flush_tlb_kernel_range(c->vm_start, c->vm_end);
298
299 list_del(&c->vm_list);
300
301 spin_unlock_irqrestore(&consistent_lock, flags);
302
303 kfree(c);
304 return;
305
306 no_area:
307 spin_unlock_irqrestore(&consistent_lock, flags);
308 printk(KERN_ERR "%s: trying to free invalid coherent area: %p\n",
309 __func__, vaddr);
310 dump_stack();
311}
312EXPORT_SYMBOL(__dma_free_coherent);
313
314
315
316
317void __dma_sync(void *vaddr, size_t size, int direction)
318{
319 unsigned long start = (unsigned long)vaddr;
320 unsigned long end = start + size;
321
322 switch (direction) {
323 case DMA_NONE:
324 BUG();
325 case DMA_FROM_DEVICE:
326
327
328
329
330 if ((start | end) & (L1_CACHE_BYTES - 1))
331 flush_dcache_range(start, end);
332 else
333 invalidate_dcache_range(start, end);
334 break;
335 case DMA_TO_DEVICE:
336 clean_dcache_range(start, end);
337 break;
338 case DMA_BIDIRECTIONAL:
339 flush_dcache_range(start, end);
340 break;
341 }
342}
343EXPORT_SYMBOL(__dma_sync);
344
345#ifdef CONFIG_HIGHMEM
346
347
348
349
350
351
352
353
354
355static inline void __dma_sync_page_highmem(struct page *page,
356 unsigned long offset, size_t size, int direction)
357{
358 size_t seg_size = min((size_t)(PAGE_SIZE - offset), size);
359 size_t cur_size = seg_size;
360 unsigned long flags, start, seg_offset = offset;
361 int nr_segs = 1 + ((size - seg_size) + PAGE_SIZE - 1)/PAGE_SIZE;
362 int seg_nr = 0;
363
364 local_irq_save(flags);
365
366 do {
367 start = (unsigned long)kmap_atomic(page + seg_nr) + seg_offset;
368
369
370 __dma_sync((void *)start, seg_size, direction);
371 kunmap_atomic((void *)start);
372 seg_nr++;
373
374
375 seg_size = min((size_t)PAGE_SIZE, size - cur_size);
376
377
378 cur_size += seg_size;
379 seg_offset = 0;
380 } while (seg_nr < nr_segs);
381
382 local_irq_restore(flags);
383}
384#endif
385
386
387
388
389
390void __dma_sync_page(struct page *page, unsigned long offset,
391 size_t size, int direction)
392{
393#ifdef CONFIG_HIGHMEM
394 __dma_sync_page_highmem(page, offset, size, direction);
395#else
396 unsigned long start = (unsigned long)page_address(page) + offset;
397 __dma_sync((void *)start, size, direction);
398#endif
399}
400EXPORT_SYMBOL(__dma_sync_page);
401
402
403
404
405
406unsigned long __dma_get_coherent_pfn(unsigned long cpu_addr)
407{
408
409
410
411
412 pgd_t *pgd = pgd_offset_k(cpu_addr);
413 pud_t *pud = pud_offset(pgd, cpu_addr);
414 pmd_t *pmd = pmd_offset(pud, cpu_addr);
415 pte_t *ptep = pte_offset_kernel(pmd, cpu_addr);
416
417 if (pte_none(*ptep) || !pte_present(*ptep))
418 return 0;
419 return pte_pfn(*ptep);
420}
421