1
2
3
4
5
6
7
8
9#include <linux/bootmem.h>
10#include <linux/init.h>
11#include <linux/module.h>
12#include <linux/kernel.h>
13#include <linux/gfp.h>
14#include <linux/mm.h>
15#include <linux/list.h>
16
17#include <asm/page.h>
18#include <asm/pgtable.h>
19#include <asm/dvma.h>
20
21#undef DVMA_DEBUG
22
23#ifdef CONFIG_SUN3X
24extern void dvma_unmap_iommu(unsigned long baddr, int len);
25#else
26static inline void dvma_unmap_iommu(unsigned long a, int b)
27{
28}
29#endif
30
31#ifdef CONFIG_SUN3
32extern void sun3_dvma_init(void);
33#endif
34
35static unsigned long *iommu_use;
36
37#define dvma_index(baddr) ((baddr - DVMA_START) >> DVMA_PAGE_SHIFT)
38
39#define dvma_entry_use(baddr) (iommu_use[dvma_index(baddr)])
40
41struct hole {
42 unsigned long start;
43 unsigned long end;
44 unsigned long size;
45 struct list_head list;
46};
47
48static struct list_head hole_list;
49static struct list_head hole_cache;
50static struct hole initholes[64];
51
52#ifdef DVMA_DEBUG
53
54static unsigned long dvma_allocs;
55static unsigned long dvma_frees;
56static unsigned long long dvma_alloc_bytes;
57static unsigned long long dvma_free_bytes;
58
59static void print_use(void)
60{
61
62 int i;
63 int j = 0;
64
65 pr_info("dvma entry usage:\n");
66
67 for(i = 0; i < IOMMU_TOTAL_ENTRIES; i++) {
68 if(!iommu_use[i])
69 continue;
70
71 j++;
72
73 pr_info("dvma entry: %08x len %08lx\n",
74 (i << DVMA_PAGE_SHIFT) + DVMA_START, iommu_use[i]);
75 }
76
77 pr_info("%d entries in use total\n", j);
78
79 pr_info("allocation/free calls: %lu/%lu\n", dvma_allocs, dvma_frees);
80 pr_info("allocation/free bytes: %Lx/%Lx\n", dvma_alloc_bytes,
81 dvma_free_bytes);
82}
83
84static void print_holes(struct list_head *holes)
85{
86
87 struct list_head *cur;
88 struct hole *hole;
89
90 pr_info("listing dvma holes\n");
91 list_for_each(cur, holes) {
92 hole = list_entry(cur, struct hole, list);
93
94 if((hole->start == 0) && (hole->end == 0) && (hole->size == 0))
95 continue;
96
97 pr_info("hole: start %08lx end %08lx size %08lx\n",
98 hole->start, hole->end, hole->size);
99 }
100
101 pr_info("end of hole listing...\n");
102}
103#endif
104
105static inline int refill(void)
106{
107
108 struct hole *hole;
109 struct hole *prev = NULL;
110 struct list_head *cur;
111 int ret = 0;
112
113 list_for_each(cur, &hole_list) {
114 hole = list_entry(cur, struct hole, list);
115
116 if(!prev) {
117 prev = hole;
118 continue;
119 }
120
121 if(hole->end == prev->start) {
122 hole->size += prev->size;
123 hole->end = prev->end;
124 list_move(&(prev->list), &hole_cache);
125 ret++;
126 }
127
128 }
129
130 return ret;
131}
132
133static inline struct hole *rmcache(void)
134{
135 struct hole *ret;
136
137 if(list_empty(&hole_cache)) {
138 if(!refill()) {
139 pr_crit("out of dvma hole cache!\n");
140 BUG();
141 }
142 }
143
144 ret = list_entry(hole_cache.next, struct hole, list);
145 list_del(&(ret->list));
146
147 return ret;
148
149}
150
151static inline unsigned long get_baddr(int len, unsigned long align)
152{
153
154 struct list_head *cur;
155 struct hole *hole;
156
157 if(list_empty(&hole_list)) {
158#ifdef DVMA_DEBUG
159 pr_crit("out of dvma holes! (printing hole cache)\n");
160 print_holes(&hole_cache);
161 print_use();
162#endif
163 BUG();
164 }
165
166 list_for_each(cur, &hole_list) {
167 unsigned long newlen;
168
169 hole = list_entry(cur, struct hole, list);
170
171 if(align > DVMA_PAGE_SIZE)
172 newlen = len + ((hole->end - len) & (align-1));
173 else
174 newlen = len;
175
176 if(hole->size > newlen) {
177 hole->end -= newlen;
178 hole->size -= newlen;
179 dvma_entry_use(hole->end) = newlen;
180#ifdef DVMA_DEBUG
181 dvma_allocs++;
182 dvma_alloc_bytes += newlen;
183#endif
184 return hole->end;
185 } else if(hole->size == newlen) {
186 list_move(&(hole->list), &hole_cache);
187 dvma_entry_use(hole->start) = newlen;
188#ifdef DVMA_DEBUG
189 dvma_allocs++;
190 dvma_alloc_bytes += newlen;
191#endif
192 return hole->start;
193 }
194
195 }
196
197 pr_crit("unable to find dvma hole!\n");
198 BUG();
199 return 0;
200}
201
202static inline int free_baddr(unsigned long baddr)
203{
204
205 unsigned long len;
206 struct hole *hole;
207 struct list_head *cur;
208 unsigned long orig_baddr;
209
210 orig_baddr = baddr;
211 len = dvma_entry_use(baddr);
212 dvma_entry_use(baddr) = 0;
213 baddr &= DVMA_PAGE_MASK;
214 dvma_unmap_iommu(baddr, len);
215
216#ifdef DVMA_DEBUG
217 dvma_frees++;
218 dvma_free_bytes += len;
219#endif
220
221 list_for_each(cur, &hole_list) {
222 hole = list_entry(cur, struct hole, list);
223
224 if(hole->end == baddr) {
225 hole->end += len;
226 hole->size += len;
227 return 0;
228 } else if(hole->start == (baddr + len)) {
229 hole->start = baddr;
230 hole->size += len;
231 return 0;
232 }
233
234 }
235
236 hole = rmcache();
237
238 hole->start = baddr;
239 hole->end = baddr + len;
240 hole->size = len;
241
242
243 list_add(&(hole->list), cur);
244
245 return 0;
246
247}
248
249void __init dvma_init(void)
250{
251
252 struct hole *hole;
253 int i;
254
255 INIT_LIST_HEAD(&hole_list);
256 INIT_LIST_HEAD(&hole_cache);
257
258
259 for(i = 0; i < 64; i++)
260 list_add(&(initholes[i].list), &hole_cache);
261
262 hole = rmcache();
263 hole->start = DVMA_START;
264 hole->end = DVMA_END;
265 hole->size = DVMA_SIZE;
266
267 list_add(&(hole->list), &hole_list);
268
269 iommu_use = alloc_bootmem(IOMMU_TOTAL_ENTRIES * sizeof(unsigned long));
270
271 dvma_unmap_iommu(DVMA_START, DVMA_SIZE);
272
273#ifdef CONFIG_SUN3
274 sun3_dvma_init();
275#endif
276
277}
278
279unsigned long dvma_map_align(unsigned long kaddr, int len, int align)
280{
281
282 unsigned long baddr;
283 unsigned long off;
284
285 if(!len)
286 len = 0x800;
287
288 if(!kaddr || !len) {
289
290
291 return 0;
292 }
293
294 pr_debug("dvma_map request %08x bytes from %08lx\n", len, kaddr);
295 off = kaddr & ~DVMA_PAGE_MASK;
296 kaddr &= PAGE_MASK;
297 len += off;
298 len = ((len + (DVMA_PAGE_SIZE-1)) & DVMA_PAGE_MASK);
299
300 if(align == 0)
301 align = DVMA_PAGE_SIZE;
302 else
303 align = ((align + (DVMA_PAGE_SIZE-1)) & DVMA_PAGE_MASK);
304
305 baddr = get_baddr(len, align);
306
307
308 if(!dvma_map_iommu(kaddr, baddr, len))
309 return (baddr + off);
310
311 pr_crit("dvma_map failed kaddr %lx baddr %lx len %x\n", kaddr, baddr,
312 len);
313 BUG();
314 return 0;
315}
316EXPORT_SYMBOL(dvma_map_align);
317
318void dvma_unmap(void *baddr)
319{
320 unsigned long addr;
321
322 addr = (unsigned long)baddr;
323
324 if(!(addr & 0x00f00000))
325 addr |= 0xf00000;
326
327 free_baddr(addr);
328
329 return;
330
331}
332EXPORT_SYMBOL(dvma_unmap);
333
334void *dvma_malloc_align(unsigned long len, unsigned long align)
335{
336 unsigned long kaddr;
337 unsigned long baddr;
338 unsigned long vaddr;
339
340 if(!len)
341 return NULL;
342
343 pr_debug("dvma_malloc request %lx bytes\n", len);
344 len = ((len + (DVMA_PAGE_SIZE-1)) & DVMA_PAGE_MASK);
345
346 if((kaddr = __get_free_pages(GFP_ATOMIC, get_order(len))) == 0)
347 return NULL;
348
349 if((baddr = (unsigned long)dvma_map_align(kaddr, len, align)) == 0) {
350 free_pages(kaddr, get_order(len));
351 return NULL;
352 }
353
354 vaddr = dvma_btov(baddr);
355
356 if(dvma_map_cpu(kaddr, vaddr, len) < 0) {
357 dvma_unmap((void *)baddr);
358 free_pages(kaddr, get_order(len));
359 return NULL;
360 }
361
362 pr_debug("mapped %08lx bytes %08lx kern -> %08lx bus\n", len, kaddr,
363 baddr);
364
365 return (void *)vaddr;
366
367}
368EXPORT_SYMBOL(dvma_malloc_align);
369
370void dvma_free(void *vaddr)
371{
372
373 return;
374
375}
376EXPORT_SYMBOL(dvma_free);
377