1
2
3
4#include <linux/types.h>
5#include <linux/mm.h>
6#include <linux/dma-map-ops.h>
7#include <linux/cache.h>
8#include <linux/highmem.h>
9#include <asm/cacheflush.h>
10#include <asm/tlbflush.h>
11#include <asm/proc-fns.h>
12
13static inline void cache_op(phys_addr_t paddr, size_t size,
14 void (*fn)(unsigned long start, unsigned long end))
15{
16 struct page *page = pfn_to_page(paddr >> PAGE_SHIFT);
17 unsigned offset = paddr & ~PAGE_MASK;
18 size_t left = size;
19 unsigned long start;
20
21 do {
22 size_t len = left;
23
24 if (PageHighMem(page)) {
25 void *addr;
26
27 if (offset + len > PAGE_SIZE) {
28 if (offset >= PAGE_SIZE) {
29 page += offset >> PAGE_SHIFT;
30 offset &= ~PAGE_MASK;
31 }
32 len = PAGE_SIZE - offset;
33 }
34
35 addr = kmap_atomic(page);
36 start = (unsigned long)(addr + offset);
37 fn(start, start + len);
38 kunmap_atomic(addr);
39 } else {
40 start = (unsigned long)phys_to_virt(paddr);
41 fn(start, start + size);
42 }
43 offset = 0;
44 page++;
45 left -= len;
46 } while (left);
47}
48
49void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
50 enum dma_data_direction dir)
51{
52 switch (dir) {
53 case DMA_FROM_DEVICE:
54 break;
55 case DMA_TO_DEVICE:
56 case DMA_BIDIRECTIONAL:
57 cache_op(paddr, size, cpu_dma_wb_range);
58 break;
59 default:
60 BUG();
61 }
62}
63
64void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
65 enum dma_data_direction dir)
66{
67 switch (dir) {
68 case DMA_TO_DEVICE:
69 break;
70 case DMA_FROM_DEVICE:
71 case DMA_BIDIRECTIONAL:
72 cache_op(paddr, size, cpu_dma_inval_range);
73 break;
74 default:
75 BUG();
76 }
77}
78
79void arch_dma_prep_coherent(struct page *page, size_t size)
80{
81 cache_op(page_to_phys(page), size, cpu_dma_wbinval_range);
82}
83