1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28#include <linux/module.h>
29#include <linux/sched.h>
30#include <linux/kernel.h>
31#include <linux/errno.h>
32#include <linux/types.h>
33#include <linux/ioport.h>
34#include <linux/mm.h>
35#include <linux/slab.h>
36#include <linux/pci.h>
37#include <linux/proc_fs.h>
38#include <linux/seq_file.h>
39#include <linux/scatterlist.h>
40#include <linux/of_device.h>
41
42#include <asm/io.h>
43#include <asm/vaddrs.h>
44#include <asm/oplib.h>
45#include <asm/prom.h>
46#include <asm/page.h>
47#include <asm/pgalloc.h>
48#include <asm/dma.h>
49#include <asm/iommu.h>
50#include <asm/io-unit.h>
51#include <asm/leon.h>
52
53#ifdef CONFIG_SPARC_LEON
54#define mmu_inval_dma_area(p, l) leon_flush_dcache_all()
55#else
56#define mmu_inval_dma_area(p, l)
57#endif
58
59static struct resource *_sparc_find_resource(struct resource *r,
60 unsigned long);
61
62static void __iomem *_sparc_ioremap(struct resource *res, u32 bus, u32 pa, int sz);
63static void __iomem *_sparc_alloc_io(unsigned int busno, unsigned long phys,
64 unsigned long size, char *name);
65static void _sparc_free_io(struct resource *res);
66
67static void register_proc_sparc_ioport(void);
68
69
70static struct resource _sparc_dvma = {
71 .name = "sparc_dvma", .start = DVMA_VADDR, .end = DVMA_END - 1
72};
73
74 struct resource sparc_iomap = {
75 .name = "sparc_iomap", .start = IOBASE_VADDR, .end = IOBASE_END - 1
76};
77
78
79
80
81
82
83
84#define XNMLN 15
85#define XNRES 10
86
87struct xresource {
88 struct resource xres;
89 int xflag;
90 char xname[XNMLN+1];
91};
92
93static struct xresource xresv[XNRES];
94
95static struct xresource *xres_alloc(void) {
96 struct xresource *xrp;
97 int n;
98
99 xrp = xresv;
100 for (n = 0; n < XNRES; n++) {
101 if (xrp->xflag == 0) {
102 xrp->xflag = 1;
103 return xrp;
104 }
105 xrp++;
106 }
107 return NULL;
108}
109
110static void xres_free(struct xresource *xrp) {
111 xrp->xflag = 0;
112}
113
114
115
116
117
118
119
120void __iomem *ioremap(unsigned long offset, unsigned long size)
121{
122 char name[14];
123
124 sprintf(name, "phys_%08x", (u32)offset);
125 return _sparc_alloc_io(0, offset, size, name);
126}
127EXPORT_SYMBOL(ioremap);
128
129
130
131
132void iounmap(volatile void __iomem *virtual)
133{
134 unsigned long vaddr = (unsigned long) virtual & PAGE_MASK;
135 struct resource *res;
136
137 if ((res = _sparc_find_resource(&sparc_iomap, vaddr)) == NULL) {
138 printk("free_io/iounmap: cannot free %lx\n", vaddr);
139 return;
140 }
141 _sparc_free_io(res);
142
143 if ((char *)res >= (char*)xresv && (char *)res < (char *)&xresv[XNRES]) {
144 xres_free((struct xresource *)res);
145 } else {
146 kfree(res);
147 }
148}
149EXPORT_SYMBOL(iounmap);
150
151void __iomem *of_ioremap(struct resource *res, unsigned long offset,
152 unsigned long size, char *name)
153{
154 return _sparc_alloc_io(res->flags & 0xF,
155 res->start + offset,
156 size, name);
157}
158EXPORT_SYMBOL(of_ioremap);
159
160void of_iounmap(struct resource *res, void __iomem *base, unsigned long size)
161{
162 iounmap(base);
163}
164EXPORT_SYMBOL(of_iounmap);
165
166
167
168
169static void __iomem *_sparc_alloc_io(unsigned int busno, unsigned long phys,
170 unsigned long size, char *name)
171{
172 static int printed_full;
173 struct xresource *xres;
174 struct resource *res;
175 char *tack;
176 int tlen;
177 void __iomem *va;
178
179 if (name == NULL) name = "???";
180
181 if ((xres = xres_alloc()) != 0) {
182 tack = xres->xname;
183 res = &xres->xres;
184 } else {
185 if (!printed_full) {
186 printk("ioremap: done with statics, switching to malloc\n");
187 printed_full = 1;
188 }
189 tlen = strlen(name);
190 tack = kmalloc(sizeof (struct resource) + tlen + 1, GFP_KERNEL);
191 if (tack == NULL) return NULL;
192 memset(tack, 0, sizeof(struct resource));
193 res = (struct resource *) tack;
194 tack += sizeof (struct resource);
195 }
196
197 strlcpy(tack, name, XNMLN+1);
198 res->name = tack;
199
200 va = _sparc_ioremap(res, busno, phys, size);
201
202 return va;
203}
204
205
206
207static void __iomem *
208_sparc_ioremap(struct resource *res, u32 bus, u32 pa, int sz)
209{
210 unsigned long offset = ((unsigned long) pa) & (~PAGE_MASK);
211
212 if (allocate_resource(&sparc_iomap, res,
213 (offset + sz + PAGE_SIZE-1) & PAGE_MASK,
214 sparc_iomap.start, sparc_iomap.end, PAGE_SIZE, NULL, NULL) != 0) {
215
216 prom_printf("alloc_io_res(%s): cannot occupy\n",
217 (res->name != NULL)? res->name: "???");
218 prom_halt();
219 }
220
221 pa &= PAGE_MASK;
222 sparc_mapiorange(bus, pa, res->start, res->end - res->start + 1);
223
224 return (void __iomem *)(unsigned long)(res->start + offset);
225}
226
227
228
229
230static void _sparc_free_io(struct resource *res)
231{
232 unsigned long plen;
233
234 plen = res->end - res->start + 1;
235 BUG_ON((plen & (PAGE_SIZE-1)) != 0);
236 sparc_unmapiorange(res->start, plen);
237 release_resource(res);
238}
239
240#ifdef CONFIG_SBUS
241
242void sbus_set_sbus64(struct device *dev, int x)
243{
244 printk("sbus_set_sbus64: unsupported\n");
245}
246EXPORT_SYMBOL(sbus_set_sbus64);
247
248
249
250
251
252
253static void *sbus_alloc_coherent(struct device *dev, size_t len,
254 dma_addr_t *dma_addrp, gfp_t gfp)
255{
256 struct platform_device *op = to_platform_device(dev);
257 unsigned long len_total = (len + PAGE_SIZE-1) & PAGE_MASK;
258 unsigned long va;
259 struct resource *res;
260 int order;
261
262
263 if (len <= 0) {
264 return NULL;
265 }
266
267 if (len > 256*1024) {
268 return NULL;
269 }
270
271 order = get_order(len_total);
272 if ((va = __get_free_pages(GFP_KERNEL|__GFP_COMP, order)) == 0)
273 goto err_nopages;
274
275 if ((res = kzalloc(sizeof(struct resource), GFP_KERNEL)) == NULL)
276 goto err_nomem;
277
278 if (allocate_resource(&_sparc_dvma, res, len_total,
279 _sparc_dvma.start, _sparc_dvma.end, PAGE_SIZE, NULL, NULL) != 0) {
280 printk("sbus_alloc_consistent: cannot occupy 0x%lx", len_total);
281 goto err_nova;
282 }
283 mmu_inval_dma_area(va, len_total);
284
285
286
287
288
289
290 if (mmu_map_dma_area(dev, dma_addrp, va, res->start, len_total) != 0)
291 goto err_noiommu;
292
293 res->name = op->dev.of_node->name;
294
295 return (void *)(unsigned long)res->start;
296
297err_noiommu:
298 release_resource(res);
299err_nova:
300 free_pages(va, order);
301err_nomem:
302 kfree(res);
303err_nopages:
304 return NULL;
305}
306
307static void sbus_free_coherent(struct device *dev, size_t n, void *p,
308 dma_addr_t ba)
309{
310 struct resource *res;
311 struct page *pgv;
312
313 if ((res = _sparc_find_resource(&_sparc_dvma,
314 (unsigned long)p)) == NULL) {
315 printk("sbus_free_consistent: cannot free %p\n", p);
316 return;
317 }
318
319 if (((unsigned long)p & (PAGE_SIZE-1)) != 0) {
320 printk("sbus_free_consistent: unaligned va %p\n", p);
321 return;
322 }
323
324 n = (n + PAGE_SIZE-1) & PAGE_MASK;
325 if ((res->end-res->start)+1 != n) {
326 printk("sbus_free_consistent: region 0x%lx asked 0x%zx\n",
327 (long)((res->end-res->start)+1), n);
328 return;
329 }
330
331 release_resource(res);
332 kfree(res);
333
334
335 pgv = virt_to_page(p);
336 mmu_unmap_dma_area(dev, ba, n);
337
338 __free_pages(pgv, get_order(n));
339}
340
341
342
343
344
345
346static dma_addr_t sbus_map_page(struct device *dev, struct page *page,
347 unsigned long offset, size_t len,
348 enum dma_data_direction dir,
349 struct dma_attrs *attrs)
350{
351 void *va = page_address(page) + offset;
352
353
354 if (len <= 0) {
355 return 0;
356 }
357
358 if (len > 256*1024) {
359 return 0;
360 }
361 return mmu_get_scsi_one(dev, va, len);
362}
363
364static void sbus_unmap_page(struct device *dev, dma_addr_t ba, size_t n,
365 enum dma_data_direction dir, struct dma_attrs *attrs)
366{
367 mmu_release_scsi_one(dev, ba, n);
368}
369
370static int sbus_map_sg(struct device *dev, struct scatterlist *sg, int n,
371 enum dma_data_direction dir, struct dma_attrs *attrs)
372{
373 mmu_get_scsi_sgl(dev, sg, n);
374
375
376
377
378
379 return n;
380}
381
382static void sbus_unmap_sg(struct device *dev, struct scatterlist *sg, int n,
383 enum dma_data_direction dir, struct dma_attrs *attrs)
384{
385 mmu_release_scsi_sgl(dev, sg, n);
386}
387
388static void sbus_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
389 int n, enum dma_data_direction dir)
390{
391 BUG();
392}
393
394static void sbus_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
395 int n, enum dma_data_direction dir)
396{
397 BUG();
398}
399
400struct dma_map_ops sbus_dma_ops = {
401 .alloc_coherent = sbus_alloc_coherent,
402 .free_coherent = sbus_free_coherent,
403 .map_page = sbus_map_page,
404 .unmap_page = sbus_unmap_page,
405 .map_sg = sbus_map_sg,
406 .unmap_sg = sbus_unmap_sg,
407 .sync_sg_for_cpu = sbus_sync_sg_for_cpu,
408 .sync_sg_for_device = sbus_sync_sg_for_device,
409};
410
411struct dma_map_ops *dma_ops = &sbus_dma_ops;
412EXPORT_SYMBOL(dma_ops);
413
414static int __init sparc_register_ioport(void)
415{
416 register_proc_sparc_ioport();
417
418 return 0;
419}
420
421arch_initcall(sparc_register_ioport);
422
423#endif
424
425#ifdef CONFIG_PCI
426
427
428
429
430static void *pci32_alloc_coherent(struct device *dev, size_t len,
431 dma_addr_t *pba, gfp_t gfp)
432{
433 unsigned long len_total = (len + PAGE_SIZE-1) & PAGE_MASK;
434 unsigned long va;
435 struct resource *res;
436 int order;
437
438 if (len == 0) {
439 return NULL;
440 }
441 if (len > 256*1024) {
442 return NULL;
443 }
444
445 order = get_order(len_total);
446 va = __get_free_pages(GFP_KERNEL, order);
447 if (va == 0) {
448 printk("pci_alloc_consistent: no %ld pages\n", len_total>>PAGE_SHIFT);
449 return NULL;
450 }
451
452 if ((res = kzalloc(sizeof(struct resource), GFP_KERNEL)) == NULL) {
453 free_pages(va, order);
454 printk("pci_alloc_consistent: no core\n");
455 return NULL;
456 }
457
458 if (allocate_resource(&_sparc_dvma, res, len_total,
459 _sparc_dvma.start, _sparc_dvma.end, PAGE_SIZE, NULL, NULL) != 0) {
460 printk("pci_alloc_consistent: cannot occupy 0x%lx", len_total);
461 free_pages(va, order);
462 kfree(res);
463 return NULL;
464 }
465 mmu_inval_dma_area(va, len_total);
466#if 0
467 printk("pci_alloc_consistent: kva %lx uncva %lx phys %lx size %lx\n",
468 (long)va, (long)res->start, (long)virt_to_phys(va), len_total);
469#endif
470 sparc_mapiorange(0, virt_to_phys(va), res->start, len_total);
471
472 *pba = virt_to_phys(va);
473 return (void *) res->start;
474}
475
476
477
478
479
480
481
482
483
484static void pci32_free_coherent(struct device *dev, size_t n, void *p,
485 dma_addr_t ba)
486{
487 struct resource *res;
488 unsigned long pgp;
489
490 if ((res = _sparc_find_resource(&_sparc_dvma,
491 (unsigned long)p)) == NULL) {
492 printk("pci_free_consistent: cannot free %p\n", p);
493 return;
494 }
495
496 if (((unsigned long)p & (PAGE_SIZE-1)) != 0) {
497 printk("pci_free_consistent: unaligned va %p\n", p);
498 return;
499 }
500
501 n = (n + PAGE_SIZE-1) & PAGE_MASK;
502 if ((res->end-res->start)+1 != n) {
503 printk("pci_free_consistent: region 0x%lx asked 0x%lx\n",
504 (long)((res->end-res->start)+1), (long)n);
505 return;
506 }
507
508 pgp = (unsigned long) phys_to_virt(ba);
509 mmu_inval_dma_area(pgp, n);
510 sparc_unmapiorange((unsigned long)p, n);
511
512 release_resource(res);
513 kfree(res);
514
515 free_pages(pgp, get_order(n));
516}
517
518
519
520
521static dma_addr_t pci32_map_page(struct device *dev, struct page *page,
522 unsigned long offset, size_t size,
523 enum dma_data_direction dir,
524 struct dma_attrs *attrs)
525{
526
527 return page_to_phys(page) + offset;
528}
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545static int pci32_map_sg(struct device *device, struct scatterlist *sgl,
546 int nents, enum dma_data_direction dir,
547 struct dma_attrs *attrs)
548{
549 struct scatterlist *sg;
550 int n;
551
552
553 for_each_sg(sgl, sg, nents, n) {
554 BUG_ON(page_address(sg_page(sg)) == NULL);
555 sg->dma_address = virt_to_phys(sg_virt(sg));
556 sg->dma_length = sg->length;
557 }
558 return nents;
559}
560
561
562
563
564
565static void pci32_unmap_sg(struct device *dev, struct scatterlist *sgl,
566 int nents, enum dma_data_direction dir,
567 struct dma_attrs *attrs)
568{
569 struct scatterlist *sg;
570 int n;
571
572 if (dir != PCI_DMA_TODEVICE) {
573 for_each_sg(sgl, sg, nents, n) {
574 BUG_ON(page_address(sg_page(sg)) == NULL);
575 mmu_inval_dma_area(
576 (unsigned long) page_address(sg_page(sg)),
577 (sg->length + PAGE_SIZE-1) & PAGE_MASK);
578 }
579 }
580}
581
582
583
584
585
586
587
588
589
590
591
592static void pci32_sync_single_for_cpu(struct device *dev, dma_addr_t ba,
593 size_t size, enum dma_data_direction dir)
594{
595 if (dir != PCI_DMA_TODEVICE) {
596 mmu_inval_dma_area((unsigned long)phys_to_virt(ba),
597 (size + PAGE_SIZE-1) & PAGE_MASK);
598 }
599}
600
601static void pci32_sync_single_for_device(struct device *dev, dma_addr_t ba,
602 size_t size, enum dma_data_direction dir)
603{
604 if (dir != PCI_DMA_TODEVICE) {
605 mmu_inval_dma_area((unsigned long)phys_to_virt(ba),
606 (size + PAGE_SIZE-1) & PAGE_MASK);
607 }
608}
609
610
611
612
613
614
615
616static void pci32_sync_sg_for_cpu(struct device *dev, struct scatterlist *sgl,
617 int nents, enum dma_data_direction dir)
618{
619 struct scatterlist *sg;
620 int n;
621
622 if (dir != PCI_DMA_TODEVICE) {
623 for_each_sg(sgl, sg, nents, n) {
624 BUG_ON(page_address(sg_page(sg)) == NULL);
625 mmu_inval_dma_area(
626 (unsigned long) page_address(sg_page(sg)),
627 (sg->length + PAGE_SIZE-1) & PAGE_MASK);
628 }
629 }
630}
631
632static void pci32_sync_sg_for_device(struct device *device, struct scatterlist *sgl,
633 int nents, enum dma_data_direction dir)
634{
635 struct scatterlist *sg;
636 int n;
637
638 if (dir != PCI_DMA_TODEVICE) {
639 for_each_sg(sgl, sg, nents, n) {
640 BUG_ON(page_address(sg_page(sg)) == NULL);
641 mmu_inval_dma_area(
642 (unsigned long) page_address(sg_page(sg)),
643 (sg->length + PAGE_SIZE-1) & PAGE_MASK);
644 }
645 }
646}
647
648struct dma_map_ops pci32_dma_ops = {
649 .alloc_coherent = pci32_alloc_coherent,
650 .free_coherent = pci32_free_coherent,
651 .map_page = pci32_map_page,
652 .map_sg = pci32_map_sg,
653 .unmap_sg = pci32_unmap_sg,
654 .sync_single_for_cpu = pci32_sync_single_for_cpu,
655 .sync_single_for_device = pci32_sync_single_for_device,
656 .sync_sg_for_cpu = pci32_sync_sg_for_cpu,
657 .sync_sg_for_device = pci32_sync_sg_for_device,
658};
659EXPORT_SYMBOL(pci32_dma_ops);
660
661#endif
662
663
664
665
666
667
668
669int dma_supported(struct device *dev, u64 mask)
670{
671#ifdef CONFIG_PCI
672 if (dev->bus == &pci_bus_type)
673 return 1;
674#endif
675 return 0;
676}
677EXPORT_SYMBOL(dma_supported);
678
679#ifdef CONFIG_PROC_FS
680
681static int sparc_io_proc_show(struct seq_file *m, void *v)
682{
683 struct resource *root = m->private, *r;
684 const char *nm;
685
686 for (r = root->child; r != NULL; r = r->sibling) {
687 if ((nm = r->name) == 0) nm = "???";
688 seq_printf(m, "%016llx-%016llx: %s\n",
689 (unsigned long long)r->start,
690 (unsigned long long)r->end, nm);
691 }
692
693 return 0;
694}
695
696static int sparc_io_proc_open(struct inode *inode, struct file *file)
697{
698 return single_open(file, sparc_io_proc_show, PDE(inode)->data);
699}
700
701static const struct file_operations sparc_io_proc_fops = {
702 .owner = THIS_MODULE,
703 .open = sparc_io_proc_open,
704 .read = seq_read,
705 .llseek = seq_lseek,
706 .release = single_release,
707};
708#endif
709
710
711
712
713
714
715
716
717static struct resource *_sparc_find_resource(struct resource *root,
718 unsigned long hit)
719{
720 struct resource *tmp;
721
722 for (tmp = root->child; tmp != 0; tmp = tmp->sibling) {
723 if (tmp->start <= hit && tmp->end >= hit)
724 return tmp;
725 }
726 return NULL;
727}
728
729static void register_proc_sparc_ioport(void)
730{
731#ifdef CONFIG_PROC_FS
732 proc_create_data("io_map", 0, NULL, &sparc_io_proc_fops, &sparc_iomap);
733 proc_create_data("dvma_map", 0, NULL, &sparc_io_proc_fops, &_sparc_dvma);
734#endif
735}
736