1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16#include <linux/module.h>
17#include <linux/signal.h>
18#include <linux/sched.h>
19#include <linux/kernel.h>
20#include <linux/errno.h>
21#include <linux/string.h>
22#include <linux/types.h>
23#include <linux/ptrace.h>
24#include <linux/mman.h>
25#include <linux/mm.h>
26#include <linux/swap.h>
27#include <linux/stddef.h>
28#include <linux/vmalloc.h>
29#include <linux/init.h>
30#include <linux/delay.h>
31#include <linux/bootmem.h>
32#include <linux/highmem.h>
33#include <linux/pci.h>
34#include <linux/interrupt.h>
35#include <linux/gfp.h>
36
37#include <asm/pgalloc.h>
38#include <linux/io.h>
39#include <linux/hardirq.h>
40#include <asm/mmu_context.h>
41#include <asm/mmu.h>
42#include <linux/uaccess.h>
43#include <asm/pgtable.h>
44#include <asm/cpuinfo.h>
45#include <asm/tlbflush.h>
46
47#ifndef CONFIG_MMU
48
49# define UNCACHED_SHADOW_MASK (cpuinfo.dcache_high - cpuinfo.dcache_base + 1)
50#endif
51
52
53
54
55
56
57
58
59
60
61
62void *consistent_alloc(int gfp, size_t size, dma_addr_t *dma_handle)
63{
64 unsigned long order, vaddr;
65 void *ret;
66 unsigned int i, err = 0;
67 struct page *page, *end;
68
69#ifdef CONFIG_MMU
70 phys_addr_t pa;
71 struct vm_struct *area;
72 unsigned long va;
73#endif
74
75 if (in_interrupt())
76 BUG();
77
78
79 size = PAGE_ALIGN(size);
80 order = get_order(size);
81
82 vaddr = __get_free_pages(gfp, order);
83 if (!vaddr)
84 return NULL;
85
86
87
88
89
90 flush_dcache_range(virt_to_phys((void *)vaddr),
91 virt_to_phys((void *)vaddr) + size);
92
93#ifndef CONFIG_MMU
94 ret = (void *)vaddr;
95
96
97
98
99
100# ifdef CONFIG_XILINX_UNCACHED_SHADOW
101 ret = (void *)((unsigned) ret | UNCACHED_SHADOW_MASK);
102# endif
103 if ((unsigned int)ret > cpuinfo.dcache_base &&
104 (unsigned int)ret < cpuinfo.dcache_high)
105 printk(KERN_WARNING
106 "ERROR: Your cache coherent area is CACHED!!!\n");
107
108
109 *dma_handle = (dma_addr_t)ret;
110#else
111
112 area = get_vm_area(size, VM_ALLOC);
113 if (!area) {
114 free_pages(vaddr, order);
115 return NULL;
116 }
117 va = (unsigned long) area->addr;
118 ret = (void *)va;
119
120
121 *dma_handle = pa = virt_to_bus((void *)vaddr);
122#endif
123
124
125
126
127
128
129
130 page = virt_to_page(vaddr);
131 end = page + (1 << order);
132
133 split_page(page, order);
134
135 for (i = 0; i < size && err == 0; i += PAGE_SIZE) {
136#ifdef CONFIG_MMU
137
138 err = map_page(va + i, pa + i, _PAGE_KERNEL | _PAGE_NO_CACHE);
139#endif
140
141 SetPageReserved(page);
142 page++;
143 }
144
145
146 while (page < end) {
147 __free_page(page);
148 page++;
149 }
150
151 if (err) {
152 free_pages(vaddr, order);
153 return NULL;
154 }
155
156 return ret;
157}
158EXPORT_SYMBOL(consistent_alloc);
159
160
161
162
163void consistent_free(size_t size, void *vaddr)
164{
165 struct page *page;
166
167 if (in_interrupt())
168 BUG();
169
170 size = PAGE_ALIGN(size);
171
172#ifndef CONFIG_MMU
173
174# ifdef CONFIG_XILINX_UNCACHED_SHADOW
175 vaddr = (void *)((unsigned)vaddr & ~UNCACHED_SHADOW_MASK);
176# endif
177 page = virt_to_page(vaddr);
178
179 do {
180 ClearPageReserved(page);
181 __free_page(page);
182 page++;
183 } while (size -= PAGE_SIZE);
184#else
185 do {
186 pte_t *ptep;
187 unsigned long pfn;
188
189 ptep = pte_offset_kernel(pmd_offset(pgd_offset_k(
190 (unsigned int)vaddr),
191 (unsigned int)vaddr),
192 (unsigned int)vaddr);
193 if (!pte_none(*ptep) && pte_present(*ptep)) {
194 pfn = pte_pfn(*ptep);
195 pte_clear(&init_mm, (unsigned int)vaddr, ptep);
196 if (pfn_valid(pfn)) {
197 page = pfn_to_page(pfn);
198
199 ClearPageReserved(page);
200 __free_page(page);
201 }
202 }
203 vaddr += PAGE_SIZE;
204 } while (size -= PAGE_SIZE);
205
206
207 flush_tlb_all();
208#endif
209}
210EXPORT_SYMBOL(consistent_free);
211
212
213
214
215void consistent_sync(void *vaddr, size_t size, int direction)
216{
217 unsigned long start;
218 unsigned long end;
219
220 start = (unsigned long)vaddr;
221
222
223#ifdef CONFIG_XILINX_UNCACHED_SHADOW
224 start &= ~UNCACHED_SHADOW_MASK;
225#endif
226 end = start + size;
227
228 switch (direction) {
229 case PCI_DMA_NONE:
230 BUG();
231 case PCI_DMA_FROMDEVICE:
232 invalidate_dcache_range(start, end);
233 break;
234 case PCI_DMA_TODEVICE:
235 flush_dcache_range(start, end);
236 break;
237 case PCI_DMA_BIDIRECTIONAL:
238 flush_dcache_range(start, end);
239 break;
240 }
241}
242EXPORT_SYMBOL(consistent_sync);
243
244
245
246
247
248
249void consistent_sync_page(struct page *page, unsigned long offset,
250 size_t size, int direction)
251{
252 unsigned long start = (unsigned long)page_address(page) + offset;
253 consistent_sync((void *)start, size, direction);
254}
255EXPORT_SYMBOL(consistent_sync_page);
256