1
2
3
4
5#include <linux/kernel.h>
6#include <linux/mm.h>
7#include <linux/pci.h>
8#include <linux/slab.h>
9#include <linux/bootmem.h>
10#include <linux/scatterlist.h>
11#include <linux/log2.h>
12#include <linux/dma-mapping.h>
13#include <linux/iommu-helper.h>
14
15#include <asm/io.h>
16#include <asm/hwrpb.h>
17
18#include "proto.h"
19#include "pci_impl.h"
20
21
22#define DEBUG_ALLOC 0
23#if DEBUG_ALLOC > 0
24# define DBGA(args...) printk(KERN_DEBUG args)
25#else
26# define DBGA(args...)
27#endif
28#if DEBUG_ALLOC > 1
29# define DBGA2(args...) printk(KERN_DEBUG args)
30#else
31# define DBGA2(args...)
32#endif
33
34#define DEBUG_NODIRECT 0
35
36#define ISA_DMA_MASK 0x00ffffff
37
38static inline unsigned long
39mk_iommu_pte(unsigned long paddr)
40{
41 return (paddr >> (PAGE_SHIFT-1)) | 1;
42}
43
44
45
46
47unsigned long
48size_for_memory(unsigned long max)
49{
50 unsigned long mem = max_low_pfn << PAGE_SHIFT;
51 if (mem < max)
52 max = roundup_pow_of_two(mem);
53 return max;
54}
55
56struct pci_iommu_arena * __init
57iommu_arena_new_node(int nid, struct pci_controller *hose, dma_addr_t base,
58 unsigned long window_size, unsigned long align)
59{
60 unsigned long mem_size;
61 struct pci_iommu_arena *arena;
62
63 mem_size = window_size / (PAGE_SIZE / sizeof(unsigned long));
64
65
66
67
68
69 if (align < mem_size)
70 align = mem_size;
71
72
73#ifdef CONFIG_DISCONTIGMEM
74
75 arena = alloc_bootmem_node(NODE_DATA(nid), sizeof(*arena));
76 if (!NODE_DATA(nid) || !arena) {
77 printk("%s: couldn't allocate arena from node %d\n"
78 " falling back to system-wide allocation\n",
79 __func__, nid);
80 arena = alloc_bootmem(sizeof(*arena));
81 }
82
83 arena->ptes = __alloc_bootmem_node(NODE_DATA(nid), mem_size, align, 0);
84 if (!NODE_DATA(nid) || !arena->ptes) {
85 printk("%s: couldn't allocate arena ptes from node %d\n"
86 " falling back to system-wide allocation\n",
87 __func__, nid);
88 arena->ptes = __alloc_bootmem(mem_size, align, 0);
89 }
90
91#else
92
93 arena = alloc_bootmem(sizeof(*arena));
94 arena->ptes = __alloc_bootmem(mem_size, align, 0);
95
96#endif
97
98 spin_lock_init(&arena->lock);
99 arena->hose = hose;
100 arena->dma_base = base;
101 arena->size = window_size;
102 arena->next_entry = 0;
103
104
105
106 arena->align_entry = 1;
107
108 return arena;
109}
110
111struct pci_iommu_arena * __init
112iommu_arena_new(struct pci_controller *hose, dma_addr_t base,
113 unsigned long window_size, unsigned long align)
114{
115 return iommu_arena_new_node(0, hose, base, window_size, align);
116}
117
118
119static long
120iommu_arena_find_pages(struct device *dev, struct pci_iommu_arena *arena,
121 long n, long mask)
122{
123 unsigned long *ptes;
124 long i, p, nent;
125 int pass = 0;
126 unsigned long base;
127 unsigned long boundary_size;
128
129 base = arena->dma_base >> PAGE_SHIFT;
130 if (dev) {
131 boundary_size = dma_get_seg_boundary(dev) + 1;
132 boundary_size >>= PAGE_SHIFT;
133 } else {
134 boundary_size = 1UL << (32 - PAGE_SHIFT);
135 }
136
137
138 ptes = arena->ptes;
139 nent = arena->size >> PAGE_SHIFT;
140 p = ALIGN(arena->next_entry, mask + 1);
141 i = 0;
142
143again:
144 while (i < n && p+i < nent) {
145 if (!i && iommu_is_span_boundary(p, n, base, boundary_size)) {
146 p = ALIGN(p + 1, mask + 1);
147 goto again;
148 }
149
150 if (ptes[p+i])
151 p = ALIGN(p + i + 1, mask + 1), i = 0;
152 else
153 i = i + 1;
154 }
155
156 if (i < n) {
157 if (pass < 1) {
158
159
160
161
162 alpha_mv.mv_pci_tbi(arena->hose, 0, -1);
163
164 pass++;
165 p = 0;
166 i = 0;
167 goto again;
168 } else
169 return -1;
170 }
171
172
173
174 return p;
175}
176
177static long
178iommu_arena_alloc(struct device *dev, struct pci_iommu_arena *arena, long n,
179 unsigned int align)
180{
181 unsigned long flags;
182 unsigned long *ptes;
183 long i, p, mask;
184
185 spin_lock_irqsave(&arena->lock, flags);
186
187
188 ptes = arena->ptes;
189 mask = max(align, arena->align_entry) - 1;
190 p = iommu_arena_find_pages(dev, arena, n, mask);
191 if (p < 0) {
192 spin_unlock_irqrestore(&arena->lock, flags);
193 return -1;
194 }
195
196
197
198
199
200 for (i = 0; i < n; ++i)
201 ptes[p+i] = IOMMU_INVALID_PTE;
202
203 arena->next_entry = p + n;
204 spin_unlock_irqrestore(&arena->lock, flags);
205
206 return p;
207}
208
209static void
210iommu_arena_free(struct pci_iommu_arena *arena, long ofs, long n)
211{
212 unsigned long *p;
213 long i;
214
215 p = arena->ptes + ofs;
216 for (i = 0; i < n; ++i)
217 p[i] = 0;
218}
219
220
221
222static int pci_dac_dma_supported(struct pci_dev *hwdev, u64 mask);
223
224
225
226
227
228
229static dma_addr_t
230pci_map_single_1(struct pci_dev *pdev, void *cpu_addr, size_t size,
231 int dac_allowed)
232{
233 struct pci_controller *hose = pdev ? pdev->sysdata : pci_isa_hose;
234 dma_addr_t max_dma = pdev ? pdev->dma_mask : ISA_DMA_MASK;
235 struct pci_iommu_arena *arena;
236 long npages, dma_ofs, i;
237 unsigned long paddr;
238 dma_addr_t ret;
239 unsigned int align = 0;
240 struct device *dev = pdev ? &pdev->dev : NULL;
241
242 paddr = __pa(cpu_addr);
243
244#if !DEBUG_NODIRECT
245
246 if (paddr + size + __direct_map_base - 1 <= max_dma
247 && paddr + size <= __direct_map_size) {
248 ret = paddr + __direct_map_base;
249
250 DBGA2("pci_map_single: [%p,%zx] -> direct %llx from %p\n",
251 cpu_addr, size, ret, __builtin_return_address(0));
252
253 return ret;
254 }
255#endif
256
257
258 if (dac_allowed) {
259 ret = paddr + alpha_mv.pci_dac_offset;
260
261 DBGA2("pci_map_single: [%p,%zx] -> DAC %llx from %p\n",
262 cpu_addr, size, ret, __builtin_return_address(0));
263
264 return ret;
265 }
266
267
268
269
270 if (! alpha_mv.mv_pci_tbi) {
271 printk_once(KERN_WARNING "pci_map_single: no HW sg\n");
272 return 0;
273 }
274
275 arena = hose->sg_pci;
276 if (!arena || arena->dma_base + arena->size - 1 > max_dma)
277 arena = hose->sg_isa;
278
279 npages = iommu_num_pages(paddr, size, PAGE_SIZE);
280
281
282 if (pdev && pdev == isa_bridge)
283 align = 8;
284 dma_ofs = iommu_arena_alloc(dev, arena, npages, align);
285 if (dma_ofs < 0) {
286 printk(KERN_WARNING "pci_map_single failed: "
287 "could not allocate dma page tables\n");
288 return 0;
289 }
290
291 paddr &= PAGE_MASK;
292 for (i = 0; i < npages; ++i, paddr += PAGE_SIZE)
293 arena->ptes[i + dma_ofs] = mk_iommu_pte(paddr);
294
295 ret = arena->dma_base + dma_ofs * PAGE_SIZE;
296 ret += (unsigned long)cpu_addr & ~PAGE_MASK;
297
298 DBGA2("pci_map_single: [%p,%zx] np %ld -> sg %llx from %p\n",
299 cpu_addr, size, npages, ret, __builtin_return_address(0));
300
301 return ret;
302}
303
304dma_addr_t
305pci_map_single(struct pci_dev *pdev, void *cpu_addr, size_t size, int dir)
306{
307 int dac_allowed;
308
309 if (dir == PCI_DMA_NONE)
310 BUG();
311
312 dac_allowed = pdev ? pci_dac_dma_supported(pdev, pdev->dma_mask) : 0;
313 return pci_map_single_1(pdev, cpu_addr, size, dac_allowed);
314}
315EXPORT_SYMBOL(pci_map_single);
316
317dma_addr_t
318pci_map_page(struct pci_dev *pdev, struct page *page, unsigned long offset,
319 size_t size, int dir)
320{
321 int dac_allowed;
322
323 if (dir == PCI_DMA_NONE)
324 BUG();
325
326 dac_allowed = pdev ? pci_dac_dma_supported(pdev, pdev->dma_mask) : 0;
327 return pci_map_single_1(pdev, (char *)page_address(page) + offset,
328 size, dac_allowed);
329}
330EXPORT_SYMBOL(pci_map_page);
331
332
333
334
335
336
337
338void
339pci_unmap_single(struct pci_dev *pdev, dma_addr_t dma_addr, size_t size,
340 int direction)
341{
342 unsigned long flags;
343 struct pci_controller *hose = pdev ? pdev->sysdata : pci_isa_hose;
344 struct pci_iommu_arena *arena;
345 long dma_ofs, npages;
346
347 if (direction == PCI_DMA_NONE)
348 BUG();
349
350 if (dma_addr >= __direct_map_base
351 && dma_addr < __direct_map_base + __direct_map_size) {
352
353
354 DBGA2("pci_unmap_single: direct [%llx,%zx] from %p\n",
355 dma_addr, size, __builtin_return_address(0));
356
357 return;
358 }
359
360 if (dma_addr > 0xffffffff) {
361 DBGA2("pci64_unmap_single: DAC [%llx,%zx] from %p\n",
362 dma_addr, size, __builtin_return_address(0));
363 return;
364 }
365
366 arena = hose->sg_pci;
367 if (!arena || dma_addr < arena->dma_base)
368 arena = hose->sg_isa;
369
370 dma_ofs = (dma_addr - arena->dma_base) >> PAGE_SHIFT;
371 if (dma_ofs * PAGE_SIZE >= arena->size) {
372 printk(KERN_ERR "Bogus pci_unmap_single: dma_addr %llx "
373 " base %llx size %x\n",
374 dma_addr, arena->dma_base, arena->size);
375 return;
376 BUG();
377 }
378
379 npages = iommu_num_pages(dma_addr, size, PAGE_SIZE);
380
381 spin_lock_irqsave(&arena->lock, flags);
382
383 iommu_arena_free(arena, dma_ofs, npages);
384
385
386
387
388 if (dma_ofs >= arena->next_entry)
389 alpha_mv.mv_pci_tbi(hose, dma_addr, dma_addr + size - 1);
390
391 spin_unlock_irqrestore(&arena->lock, flags);
392
393 DBGA2("pci_unmap_single: sg [%llx,%zx] np %ld from %p\n",
394 dma_addr, size, npages, __builtin_return_address(0));
395}
396EXPORT_SYMBOL(pci_unmap_single);
397
398void
399pci_unmap_page(struct pci_dev *pdev, dma_addr_t dma_addr,
400 size_t size, int direction)
401{
402 pci_unmap_single(pdev, dma_addr, size, direction);
403}
404EXPORT_SYMBOL(pci_unmap_page);
405
406
407
408
409
410
411void *
412__pci_alloc_consistent(struct pci_dev *pdev, size_t size,
413 dma_addr_t *dma_addrp, gfp_t gfp)
414{
415 void *cpu_addr;
416 long order = get_order(size);
417
418 gfp &= ~GFP_DMA;
419
420try_again:
421 cpu_addr = (void *)__get_free_pages(gfp, order);
422 if (! cpu_addr) {
423 printk(KERN_INFO "pci_alloc_consistent: "
424 "get_free_pages failed from %p\n",
425 __builtin_return_address(0));
426
427
428 return NULL;
429 }
430 memset(cpu_addr, 0, size);
431
432 *dma_addrp = pci_map_single_1(pdev, cpu_addr, size, 0);
433 if (*dma_addrp == 0) {
434 free_pages((unsigned long)cpu_addr, order);
435 if (alpha_mv.mv_pci_tbi || (gfp & GFP_DMA))
436 return NULL;
437
438
439 gfp |= GFP_DMA;
440 goto try_again;
441 }
442
443 DBGA2("pci_alloc_consistent: %zx -> [%p,%llx] from %p\n",
444 size, cpu_addr, *dma_addrp, __builtin_return_address(0));
445
446 return cpu_addr;
447}
448EXPORT_SYMBOL(__pci_alloc_consistent);
449
450
451
452
453
454
455
456void
457pci_free_consistent(struct pci_dev *pdev, size_t size, void *cpu_addr,
458 dma_addr_t dma_addr)
459{
460 pci_unmap_single(pdev, dma_addr, size, PCI_DMA_BIDIRECTIONAL);
461 free_pages((unsigned long)cpu_addr, get_order(size));
462
463 DBGA2("pci_free_consistent: [%llx,%zx] from %p\n",
464 dma_addr, size, __builtin_return_address(0));
465}
466EXPORT_SYMBOL(pci_free_consistent);
467
468
469
470
471
472
473
474
475
476
477#define SG_ENT_VIRT_ADDRESS(SG) (sg_virt((SG)))
478#define SG_ENT_PHYS_ADDRESS(SG) __pa(SG_ENT_VIRT_ADDRESS(SG))
479
480static void
481sg_classify(struct device *dev, struct scatterlist *sg, struct scatterlist *end,
482 int virt_ok)
483{
484 unsigned long next_paddr;
485 struct scatterlist *leader;
486 long leader_flag, leader_length;
487 unsigned int max_seg_size;
488
489 leader = sg;
490 leader_flag = 0;
491 leader_length = leader->length;
492 next_paddr = SG_ENT_PHYS_ADDRESS(leader) + leader_length;
493
494
495 max_seg_size = dev ? dma_get_max_seg_size(dev) : 0;
496 for (++sg; sg < end; ++sg) {
497 unsigned long addr, len;
498 addr = SG_ENT_PHYS_ADDRESS(sg);
499 len = sg->length;
500
501 if (leader_length + len > max_seg_size)
502 goto new_segment;
503
504 if (next_paddr == addr) {
505 sg->dma_address = -1;
506 leader_length += len;
507 } else if (((next_paddr | addr) & ~PAGE_MASK) == 0 && virt_ok) {
508 sg->dma_address = -2;
509 leader_flag = 1;
510 leader_length += len;
511 } else {
512new_segment:
513 leader->dma_address = leader_flag;
514 leader->dma_length = leader_length;
515 leader = sg;
516 leader_flag = 0;
517 leader_length = len;
518 }
519
520 next_paddr = addr + len;
521 }
522
523 leader->dma_address = leader_flag;
524 leader->dma_length = leader_length;
525}
526
527
528
529
530static int
531sg_fill(struct device *dev, struct scatterlist *leader, struct scatterlist *end,
532 struct scatterlist *out, struct pci_iommu_arena *arena,
533 dma_addr_t max_dma, int dac_allowed)
534{
535 unsigned long paddr = SG_ENT_PHYS_ADDRESS(leader);
536 long size = leader->dma_length;
537 struct scatterlist *sg;
538 unsigned long *ptes;
539 long npages, dma_ofs, i;
540
541#if !DEBUG_NODIRECT
542
543
544 if (leader->dma_address == 0
545 && paddr + size + __direct_map_base - 1 <= max_dma
546 && paddr + size <= __direct_map_size) {
547 out->dma_address = paddr + __direct_map_base;
548 out->dma_length = size;
549
550 DBGA(" sg_fill: [%p,%lx] -> direct %llx\n",
551 __va(paddr), size, out->dma_address);
552
553 return 0;
554 }
555#endif
556
557
558 if (leader->dma_address == 0 && dac_allowed) {
559 out->dma_address = paddr + alpha_mv.pci_dac_offset;
560 out->dma_length = size;
561
562 DBGA(" sg_fill: [%p,%lx] -> DAC %llx\n",
563 __va(paddr), size, out->dma_address);
564
565 return 0;
566 }
567
568
569
570
571 paddr &= ~PAGE_MASK;
572 npages = iommu_num_pages(paddr, size, PAGE_SIZE);
573 dma_ofs = iommu_arena_alloc(dev, arena, npages, 0);
574 if (dma_ofs < 0) {
575
576 if (leader->dma_address == 0)
577 return -1;
578
579
580
581 sg_classify(dev, leader, end, 0);
582 return sg_fill(dev, leader, end, out, arena, max_dma, dac_allowed);
583 }
584
585 out->dma_address = arena->dma_base + dma_ofs*PAGE_SIZE + paddr;
586 out->dma_length = size;
587
588 DBGA(" sg_fill: [%p,%lx] -> sg %llx np %ld\n",
589 __va(paddr), size, out->dma_address, npages);
590
591
592
593 ptes = &arena->ptes[dma_ofs];
594 sg = leader;
595 do {
596#if DEBUG_ALLOC > 0
597 struct scatterlist *last_sg = sg;
598#endif
599
600 size = sg->length;
601 paddr = SG_ENT_PHYS_ADDRESS(sg);
602
603 while (sg+1 < end && (int) sg[1].dma_address == -1) {
604 size += sg[1].length;
605 sg++;
606 }
607
608 npages = iommu_num_pages(paddr, size, PAGE_SIZE);
609
610 paddr &= PAGE_MASK;
611 for (i = 0; i < npages; ++i, paddr += PAGE_SIZE)
612 *ptes++ = mk_iommu_pte(paddr);
613
614#if DEBUG_ALLOC > 0
615 DBGA(" (%ld) [%p,%x] np %ld\n",
616 last_sg - leader, SG_ENT_VIRT_ADDRESS(last_sg),
617 last_sg->length, npages);
618 while (++last_sg <= sg) {
619 DBGA(" (%ld) [%p,%x] cont\n",
620 last_sg - leader, SG_ENT_VIRT_ADDRESS(last_sg),
621 last_sg->length);
622 }
623#endif
624 } while (++sg < end && (int) sg->dma_address < 0);
625
626 return 1;
627}
628
629int
630pci_map_sg(struct pci_dev *pdev, struct scatterlist *sg, int nents,
631 int direction)
632{
633 struct scatterlist *start, *end, *out;
634 struct pci_controller *hose;
635 struct pci_iommu_arena *arena;
636 dma_addr_t max_dma;
637 int dac_allowed;
638 struct device *dev;
639
640 if (direction == PCI_DMA_NONE)
641 BUG();
642
643 dac_allowed = pdev ? pci_dac_dma_supported(pdev, pdev->dma_mask) : 0;
644
645 dev = pdev ? &pdev->dev : NULL;
646
647
648 if (nents == 1) {
649 sg->dma_length = sg->length;
650 sg->dma_address
651 = pci_map_single_1(pdev, SG_ENT_VIRT_ADDRESS(sg),
652 sg->length, dac_allowed);
653 return sg->dma_address != 0;
654 }
655
656 start = sg;
657 end = sg + nents;
658
659
660 sg_classify(dev, sg, end, alpha_mv.mv_pci_tbi != 0);
661
662
663 if (alpha_mv.mv_pci_tbi) {
664 hose = pdev ? pdev->sysdata : pci_isa_hose;
665 max_dma = pdev ? pdev->dma_mask : ISA_DMA_MASK;
666 arena = hose->sg_pci;
667 if (!arena || arena->dma_base + arena->size - 1 > max_dma)
668 arena = hose->sg_isa;
669 } else {
670 max_dma = -1;
671 arena = NULL;
672 hose = NULL;
673 }
674
675
676
677 for (out = sg; sg < end; ++sg) {
678 if ((int) sg->dma_address < 0)
679 continue;
680 if (sg_fill(dev, sg, end, out, arena, max_dma, dac_allowed) < 0)
681 goto error;
682 out++;
683 }
684
685
686 if (out < end)
687 out->dma_length = 0;
688
689 if (out - start == 0)
690 printk(KERN_WARNING "pci_map_sg failed: no entries?\n");
691 DBGA("pci_map_sg: %ld entries\n", out - start);
692
693 return out - start;
694
695 error:
696 printk(KERN_WARNING "pci_map_sg failed: "
697 "could not allocate dma page tables\n");
698
699
700
701 if (out > start)
702 pci_unmap_sg(pdev, start, out - start, direction);
703 return 0;
704}
705EXPORT_SYMBOL(pci_map_sg);
706
707
708
709
710
711void
712pci_unmap_sg(struct pci_dev *pdev, struct scatterlist *sg, int nents,
713 int direction)
714{
715 unsigned long flags;
716 struct pci_controller *hose;
717 struct pci_iommu_arena *arena;
718 struct scatterlist *end;
719 dma_addr_t max_dma;
720 dma_addr_t fbeg, fend;
721
722 if (direction == PCI_DMA_NONE)
723 BUG();
724
725 if (! alpha_mv.mv_pci_tbi)
726 return;
727
728 hose = pdev ? pdev->sysdata : pci_isa_hose;
729 max_dma = pdev ? pdev->dma_mask : ISA_DMA_MASK;
730 arena = hose->sg_pci;
731 if (!arena || arena->dma_base + arena->size - 1 > max_dma)
732 arena = hose->sg_isa;
733
734 fbeg = -1, fend = 0;
735
736 spin_lock_irqsave(&arena->lock, flags);
737
738 for (end = sg + nents; sg < end; ++sg) {
739 dma64_addr_t addr;
740 size_t size;
741 long npages, ofs;
742 dma_addr_t tend;
743
744 addr = sg->dma_address;
745 size = sg->dma_length;
746 if (!size)
747 break;
748
749 if (addr > 0xffffffff) {
750
751 DBGA(" (%ld) DAC [%llx,%zx]\n",
752 sg - end + nents, addr, size);
753 continue;
754 }
755
756 if (addr >= __direct_map_base
757 && addr < __direct_map_base + __direct_map_size) {
758
759 DBGA(" (%ld) direct [%llx,%zx]\n",
760 sg - end + nents, addr, size);
761 continue;
762 }
763
764 DBGA(" (%ld) sg [%llx,%zx]\n",
765 sg - end + nents, addr, size);
766
767 npages = iommu_num_pages(addr, size, PAGE_SIZE);
768 ofs = (addr - arena->dma_base) >> PAGE_SHIFT;
769 iommu_arena_free(arena, ofs, npages);
770
771 tend = addr + size - 1;
772 if (fbeg > addr) fbeg = addr;
773 if (fend < tend) fend = tend;
774 }
775
776
777
778
779 if ((fend - arena->dma_base) >> PAGE_SHIFT >= arena->next_entry)
780 alpha_mv.mv_pci_tbi(hose, fbeg, fend);
781
782 spin_unlock_irqrestore(&arena->lock, flags);
783
784 DBGA("pci_unmap_sg: %ld entries\n", nents - (end - sg));
785}
786EXPORT_SYMBOL(pci_unmap_sg);
787
788
789
790
791
792int
793pci_dma_supported(struct pci_dev *pdev, u64 mask)
794{
795 struct pci_controller *hose;
796 struct pci_iommu_arena *arena;
797
798
799
800
801 if (__direct_map_size != 0
802 && (__direct_map_base + __direct_map_size - 1 <= mask ||
803 __direct_map_base + (max_low_pfn << PAGE_SHIFT) - 1 <= mask))
804 return 1;
805
806
807 hose = pdev ? pdev->sysdata : pci_isa_hose;
808 arena = hose->sg_isa;
809 if (arena && arena->dma_base + arena->size - 1 <= mask)
810 return 1;
811 arena = hose->sg_pci;
812 if (arena && arena->dma_base + arena->size - 1 <= mask)
813 return 1;
814
815
816 if (!__direct_map_base && MAX_DMA_ADDRESS - IDENT_ADDR - 1 <= mask)
817 return 1;
818
819 return 0;
820}
821EXPORT_SYMBOL(pci_dma_supported);
822
823
824
825
826
827int
828iommu_reserve(struct pci_iommu_arena *arena, long pg_count, long align_mask)
829{
830 unsigned long flags;
831 unsigned long *ptes;
832 long i, p;
833
834 if (!arena) return -EINVAL;
835
836 spin_lock_irqsave(&arena->lock, flags);
837
838
839 ptes = arena->ptes;
840 p = iommu_arena_find_pages(NULL, arena, pg_count, align_mask);
841 if (p < 0) {
842 spin_unlock_irqrestore(&arena->lock, flags);
843 return -1;
844 }
845
846
847
848
849 for (i = 0; i < pg_count; ++i)
850 ptes[p+i] = IOMMU_RESERVED_PTE;
851
852 arena->next_entry = p + pg_count;
853 spin_unlock_irqrestore(&arena->lock, flags);
854
855 return p;
856}
857
858int
859iommu_release(struct pci_iommu_arena *arena, long pg_start, long pg_count)
860{
861 unsigned long *ptes;
862 long i;
863
864 if (!arena) return -EINVAL;
865
866 ptes = arena->ptes;
867
868
869 for(i = pg_start; i < pg_start + pg_count; i++)
870 if (ptes[i] != IOMMU_RESERVED_PTE)
871 return -EBUSY;
872
873 iommu_arena_free(arena, pg_start, pg_count);
874 return 0;
875}
876
877int
878iommu_bind(struct pci_iommu_arena *arena, long pg_start, long pg_count,
879 struct page **pages)
880{
881 unsigned long flags;
882 unsigned long *ptes;
883 long i, j;
884
885 if (!arena) return -EINVAL;
886
887 spin_lock_irqsave(&arena->lock, flags);
888
889 ptes = arena->ptes;
890
891 for(j = pg_start; j < pg_start + pg_count; j++) {
892 if (ptes[j] != IOMMU_RESERVED_PTE) {
893 spin_unlock_irqrestore(&arena->lock, flags);
894 return -EBUSY;
895 }
896 }
897
898 for(i = 0, j = pg_start; i < pg_count; i++, j++)
899 ptes[j] = mk_iommu_pte(page_to_phys(pages[i]));
900
901 spin_unlock_irqrestore(&arena->lock, flags);
902
903 return 0;
904}
905
906int
907iommu_unbind(struct pci_iommu_arena *arena, long pg_start, long pg_count)
908{
909 unsigned long *p;
910 long i;
911
912 if (!arena) return -EINVAL;
913
914 p = arena->ptes + pg_start;
915 for(i = 0; i < pg_count; i++)
916 p[i] = IOMMU_RESERVED_PTE;
917
918 return 0;
919}
920
921
922
923
924static int
925pci_dac_dma_supported(struct pci_dev *dev, u64 mask)
926{
927 dma64_addr_t dac_offset = alpha_mv.pci_dac_offset;
928 int ok = 1;
929
930
931 if (dac_offset == 0)
932 ok = 0;
933
934
935 if ((dac_offset & dev->dma_mask) != dac_offset)
936 ok = 0;
937
938
939 DBGA("pci_dac_dma_supported %s from %p\n",
940 ok ? "yes" : "no", __builtin_return_address(0));
941
942 return ok;
943}
944
945
946
947struct pci_dev *
948alpha_gendev_to_pci(struct device *dev)
949{
950 if (dev && dev->bus == &pci_bus_type)
951 return to_pci_dev(dev);
952
953
954
955 BUG_ON(!isa_bridge);
956
957
958
959 if (!dev || !dev->dma_mask || !*dev->dma_mask)
960 return isa_bridge;
961
962
963
964 if (*dev->dma_mask >= isa_bridge->dma_mask)
965 return isa_bridge;
966
967
968 return NULL;
969}
970EXPORT_SYMBOL(alpha_gendev_to_pci);
971
972int
973dma_set_mask(struct device *dev, u64 mask)
974{
975 if (!dev->dma_mask ||
976 !pci_dma_supported(alpha_gendev_to_pci(dev), mask))
977 return -EIO;
978
979 *dev->dma_mask = mask;
980
981 return 0;
982}
983EXPORT_SYMBOL(dma_set_mask);
984