1
2
3
4
5
6#include <linux/kernel.h>
7#include <linux/mm.h>
8#include <linux/pci.h>
9#include <linux/gfp.h>
10#include <linux/bootmem.h>
11#include <linux/export.h>
12#include <linux/scatterlist.h>
13#include <linux/log2.h>
14#include <linux/dma-mapping.h>
15#include <linux/iommu-helper.h>
16
17#include <asm/io.h>
18#include <asm/hwrpb.h>
19
20#include "proto.h"
21#include "pci_impl.h"
22
23
24#define DEBUG_ALLOC 0
25#if DEBUG_ALLOC > 0
26# define DBGA(args...) printk(KERN_DEBUG args)
27#else
28# define DBGA(args...)
29#endif
30#if DEBUG_ALLOC > 1
31# define DBGA2(args...) printk(KERN_DEBUG args)
32#else
33# define DBGA2(args...)
34#endif
35
36#define DEBUG_NODIRECT 0
37
38#define ISA_DMA_MASK 0x00ffffff
39
40static inline unsigned long
41mk_iommu_pte(unsigned long paddr)
42{
43 return (paddr >> (PAGE_SHIFT-1)) | 1;
44}
45
46
47
48
49unsigned long
50size_for_memory(unsigned long max)
51{
52 unsigned long mem = max_low_pfn << PAGE_SHIFT;
53 if (mem < max)
54 max = roundup_pow_of_two(mem);
55 return max;
56}
57
58struct pci_iommu_arena * __init
59iommu_arena_new_node(int nid, struct pci_controller *hose, dma_addr_t base,
60 unsigned long window_size, unsigned long align)
61{
62 unsigned long mem_size;
63 struct pci_iommu_arena *arena;
64
65 mem_size = window_size / (PAGE_SIZE / sizeof(unsigned long));
66
67
68
69
70
71 if (align < mem_size)
72 align = mem_size;
73
74
75#ifdef CONFIG_DISCONTIGMEM
76
77 arena = alloc_bootmem_node(NODE_DATA(nid), sizeof(*arena));
78 if (!NODE_DATA(nid) || !arena) {
79 printk("%s: couldn't allocate arena from node %d\n"
80 " falling back to system-wide allocation\n",
81 __func__, nid);
82 arena = alloc_bootmem(sizeof(*arena));
83 }
84
85 arena->ptes = __alloc_bootmem_node(NODE_DATA(nid), mem_size, align, 0);
86 if (!NODE_DATA(nid) || !arena->ptes) {
87 printk("%s: couldn't allocate arena ptes from node %d\n"
88 " falling back to system-wide allocation\n",
89 __func__, nid);
90 arena->ptes = __alloc_bootmem(mem_size, align, 0);
91 }
92
93#else
94
95 arena = alloc_bootmem(sizeof(*arena));
96 arena->ptes = __alloc_bootmem(mem_size, align, 0);
97
98#endif
99
100 spin_lock_init(&arena->lock);
101 arena->hose = hose;
102 arena->dma_base = base;
103 arena->size = window_size;
104 arena->next_entry = 0;
105
106
107
108 arena->align_entry = 1;
109
110 return arena;
111}
112
113struct pci_iommu_arena * __init
114iommu_arena_new(struct pci_controller *hose, dma_addr_t base,
115 unsigned long window_size, unsigned long align)
116{
117 return iommu_arena_new_node(0, hose, base, window_size, align);
118}
119
120
121static long
122iommu_arena_find_pages(struct device *dev, struct pci_iommu_arena *arena,
123 long n, long mask)
124{
125 unsigned long *ptes;
126 long i, p, nent;
127 int pass = 0;
128 unsigned long base;
129 unsigned long boundary_size;
130
131 base = arena->dma_base >> PAGE_SHIFT;
132 if (dev) {
133 boundary_size = dma_get_seg_boundary(dev) + 1;
134 boundary_size >>= PAGE_SHIFT;
135 } else {
136 boundary_size = 1UL << (32 - PAGE_SHIFT);
137 }
138
139
140 ptes = arena->ptes;
141 nent = arena->size >> PAGE_SHIFT;
142 p = ALIGN(arena->next_entry, mask + 1);
143 i = 0;
144
145again:
146 while (i < n && p+i < nent) {
147 if (!i && iommu_is_span_boundary(p, n, base, boundary_size)) {
148 p = ALIGN(p + 1, mask + 1);
149 goto again;
150 }
151
152 if (ptes[p+i])
153 p = ALIGN(p + i + 1, mask + 1), i = 0;
154 else
155 i = i + 1;
156 }
157
158 if (i < n) {
159 if (pass < 1) {
160
161
162
163
164 alpha_mv.mv_pci_tbi(arena->hose, 0, -1);
165
166 pass++;
167 p = 0;
168 i = 0;
169 goto again;
170 } else
171 return -1;
172 }
173
174
175
176 return p;
177}
178
179static long
180iommu_arena_alloc(struct device *dev, struct pci_iommu_arena *arena, long n,
181 unsigned int align)
182{
183 unsigned long flags;
184 unsigned long *ptes;
185 long i, p, mask;
186
187 spin_lock_irqsave(&arena->lock, flags);
188
189
190 ptes = arena->ptes;
191 mask = max(align, arena->align_entry) - 1;
192 p = iommu_arena_find_pages(dev, arena, n, mask);
193 if (p < 0) {
194 spin_unlock_irqrestore(&arena->lock, flags);
195 return -1;
196 }
197
198
199
200
201
202 for (i = 0; i < n; ++i)
203 ptes[p+i] = IOMMU_INVALID_PTE;
204
205 arena->next_entry = p + n;
206 spin_unlock_irqrestore(&arena->lock, flags);
207
208 return p;
209}
210
211static void
212iommu_arena_free(struct pci_iommu_arena *arena, long ofs, long n)
213{
214 unsigned long *p;
215 long i;
216
217 p = arena->ptes + ofs;
218 for (i = 0; i < n; ++i)
219 p[i] = 0;
220}
221
222
223
224
225
226static int pci_dac_dma_supported(struct pci_dev *dev, u64 mask)
227{
228 dma_addr_t dac_offset = alpha_mv.pci_dac_offset;
229 int ok = 1;
230
231
232 if (dac_offset == 0)
233 ok = 0;
234
235
236 if ((dac_offset & dev->dma_mask) != dac_offset)
237 ok = 0;
238
239
240 DBGA("pci_dac_dma_supported %s from %pf\n",
241 ok ? "yes" : "no", __builtin_return_address(0));
242
243 return ok;
244}
245
246
247
248
249
250
251static dma_addr_t
252pci_map_single_1(struct pci_dev *pdev, void *cpu_addr, size_t size,
253 int dac_allowed)
254{
255 struct pci_controller *hose = pdev ? pdev->sysdata : pci_isa_hose;
256 dma_addr_t max_dma = pdev ? pdev->dma_mask : ISA_DMA_MASK;
257 struct pci_iommu_arena *arena;
258 long npages, dma_ofs, i;
259 unsigned long paddr;
260 dma_addr_t ret;
261 unsigned int align = 0;
262 struct device *dev = pdev ? &pdev->dev : NULL;
263
264 paddr = __pa(cpu_addr);
265
266#if !DEBUG_NODIRECT
267
268 if (paddr + size + __direct_map_base - 1 <= max_dma
269 && paddr + size <= __direct_map_size) {
270 ret = paddr + __direct_map_base;
271
272 DBGA2("pci_map_single: [%p,%zx] -> direct %llx from %pf\n",
273 cpu_addr, size, ret, __builtin_return_address(0));
274
275 return ret;
276 }
277#endif
278
279
280 if (dac_allowed) {
281 ret = paddr + alpha_mv.pci_dac_offset;
282
283 DBGA2("pci_map_single: [%p,%zx] -> DAC %llx from %pf\n",
284 cpu_addr, size, ret, __builtin_return_address(0));
285
286 return ret;
287 }
288
289
290
291
292 if (! alpha_mv.mv_pci_tbi) {
293 printk_once(KERN_WARNING "pci_map_single: no HW sg\n");
294 return 0;
295 }
296
297 arena = hose->sg_pci;
298 if (!arena || arena->dma_base + arena->size - 1 > max_dma)
299 arena = hose->sg_isa;
300
301 npages = iommu_num_pages(paddr, size, PAGE_SIZE);
302
303
304 if (pdev && pdev == isa_bridge)
305 align = 8;
306 dma_ofs = iommu_arena_alloc(dev, arena, npages, align);
307 if (dma_ofs < 0) {
308 printk(KERN_WARNING "pci_map_single failed: "
309 "could not allocate dma page tables\n");
310 return 0;
311 }
312
313 paddr &= PAGE_MASK;
314 for (i = 0; i < npages; ++i, paddr += PAGE_SIZE)
315 arena->ptes[i + dma_ofs] = mk_iommu_pte(paddr);
316
317 ret = arena->dma_base + dma_ofs * PAGE_SIZE;
318 ret += (unsigned long)cpu_addr & ~PAGE_MASK;
319
320 DBGA2("pci_map_single: [%p,%zx] np %ld -> sg %llx from %pf\n",
321 cpu_addr, size, npages, ret, __builtin_return_address(0));
322
323 return ret;
324}
325
326
327static struct pci_dev *alpha_gendev_to_pci(struct device *dev)
328{
329 if (dev && dev_is_pci(dev))
330 return to_pci_dev(dev);
331
332
333
334 BUG_ON(!isa_bridge);
335
336
337
338 if (!dev || !dev->dma_mask || !*dev->dma_mask)
339 return isa_bridge;
340
341
342
343 if (*dev->dma_mask >= isa_bridge->dma_mask)
344 return isa_bridge;
345
346
347 return NULL;
348}
349
350static dma_addr_t alpha_pci_map_page(struct device *dev, struct page *page,
351 unsigned long offset, size_t size,
352 enum dma_data_direction dir,
353 unsigned long attrs)
354{
355 struct pci_dev *pdev = alpha_gendev_to_pci(dev);
356 int dac_allowed;
357
358 BUG_ON(dir == PCI_DMA_NONE);
359
360 dac_allowed = pdev ? pci_dac_dma_supported(pdev, pdev->dma_mask) : 0;
361 return pci_map_single_1(pdev, (char *)page_address(page) + offset,
362 size, dac_allowed);
363}
364
365
366
367
368
369
370
371static void alpha_pci_unmap_page(struct device *dev, dma_addr_t dma_addr,
372 size_t size, enum dma_data_direction dir,
373 unsigned long attrs)
374{
375 unsigned long flags;
376 struct pci_dev *pdev = alpha_gendev_to_pci(dev);
377 struct pci_controller *hose = pdev ? pdev->sysdata : pci_isa_hose;
378 struct pci_iommu_arena *arena;
379 long dma_ofs, npages;
380
381 BUG_ON(dir == PCI_DMA_NONE);
382
383 if (dma_addr >= __direct_map_base
384 && dma_addr < __direct_map_base + __direct_map_size) {
385
386
387 DBGA2("pci_unmap_single: direct [%llx,%zx] from %pf\n",
388 dma_addr, size, __builtin_return_address(0));
389
390 return;
391 }
392
393 if (dma_addr > 0xffffffff) {
394 DBGA2("pci64_unmap_single: DAC [%llx,%zx] from %pf\n",
395 dma_addr, size, __builtin_return_address(0));
396 return;
397 }
398
399 arena = hose->sg_pci;
400 if (!arena || dma_addr < arena->dma_base)
401 arena = hose->sg_isa;
402
403 dma_ofs = (dma_addr - arena->dma_base) >> PAGE_SHIFT;
404 if (dma_ofs * PAGE_SIZE >= arena->size) {
405 printk(KERN_ERR "Bogus pci_unmap_single: dma_addr %llx "
406 " base %llx size %x\n",
407 dma_addr, arena->dma_base, arena->size);
408 return;
409 BUG();
410 }
411
412 npages = iommu_num_pages(dma_addr, size, PAGE_SIZE);
413
414 spin_lock_irqsave(&arena->lock, flags);
415
416 iommu_arena_free(arena, dma_ofs, npages);
417
418
419
420
421 if (dma_ofs >= arena->next_entry)
422 alpha_mv.mv_pci_tbi(hose, dma_addr, dma_addr + size - 1);
423
424 spin_unlock_irqrestore(&arena->lock, flags);
425
426 DBGA2("pci_unmap_single: sg [%llx,%zx] np %ld from %pf\n",
427 dma_addr, size, npages, __builtin_return_address(0));
428}
429
430
431
432
433
434
435static void *alpha_pci_alloc_coherent(struct device *dev, size_t size,
436 dma_addr_t *dma_addrp, gfp_t gfp,
437 unsigned long attrs)
438{
439 struct pci_dev *pdev = alpha_gendev_to_pci(dev);
440 void *cpu_addr;
441 long order = get_order(size);
442
443 gfp &= ~GFP_DMA;
444
445try_again:
446 cpu_addr = (void *)__get_free_pages(gfp, order);
447 if (! cpu_addr) {
448 printk(KERN_INFO "pci_alloc_consistent: "
449 "get_free_pages failed from %pf\n",
450 __builtin_return_address(0));
451
452
453 return NULL;
454 }
455 memset(cpu_addr, 0, size);
456
457 *dma_addrp = pci_map_single_1(pdev, cpu_addr, size, 0);
458 if (*dma_addrp == 0) {
459 free_pages((unsigned long)cpu_addr, order);
460 if (alpha_mv.mv_pci_tbi || (gfp & GFP_DMA))
461 return NULL;
462
463
464 gfp |= GFP_DMA;
465 goto try_again;
466 }
467
468 DBGA2("pci_alloc_consistent: %zx -> [%p,%llx] from %pf\n",
469 size, cpu_addr, *dma_addrp, __builtin_return_address(0));
470
471 return cpu_addr;
472}
473
474
475
476
477
478
479
480static void alpha_pci_free_coherent(struct device *dev, size_t size,
481 void *cpu_addr, dma_addr_t dma_addr,
482 unsigned long attrs)
483{
484 struct pci_dev *pdev = alpha_gendev_to_pci(dev);
485 pci_unmap_single(pdev, dma_addr, size, PCI_DMA_BIDIRECTIONAL);
486 free_pages((unsigned long)cpu_addr, get_order(size));
487
488 DBGA2("pci_free_consistent: [%llx,%zx] from %pf\n",
489 dma_addr, size, __builtin_return_address(0));
490}
491
492
493
494
495
496
497
498
499
500
501#define SG_ENT_VIRT_ADDRESS(SG) (sg_virt((SG)))
502#define SG_ENT_PHYS_ADDRESS(SG) __pa(SG_ENT_VIRT_ADDRESS(SG))
503
504static void
505sg_classify(struct device *dev, struct scatterlist *sg, struct scatterlist *end,
506 int virt_ok)
507{
508 unsigned long next_paddr;
509 struct scatterlist *leader;
510 long leader_flag, leader_length;
511 unsigned int max_seg_size;
512
513 leader = sg;
514 leader_flag = 0;
515 leader_length = leader->length;
516 next_paddr = SG_ENT_PHYS_ADDRESS(leader) + leader_length;
517
518
519 max_seg_size = dev ? dma_get_max_seg_size(dev) : 0;
520 for (++sg; sg < end; ++sg) {
521 unsigned long addr, len;
522 addr = SG_ENT_PHYS_ADDRESS(sg);
523 len = sg->length;
524
525 if (leader_length + len > max_seg_size)
526 goto new_segment;
527
528 if (next_paddr == addr) {
529 sg->dma_address = -1;
530 leader_length += len;
531 } else if (((next_paddr | addr) & ~PAGE_MASK) == 0 && virt_ok) {
532 sg->dma_address = -2;
533 leader_flag = 1;
534 leader_length += len;
535 } else {
536new_segment:
537 leader->dma_address = leader_flag;
538 leader->dma_length = leader_length;
539 leader = sg;
540 leader_flag = 0;
541 leader_length = len;
542 }
543
544 next_paddr = addr + len;
545 }
546
547 leader->dma_address = leader_flag;
548 leader->dma_length = leader_length;
549}
550
551
552
553
554static int
555sg_fill(struct device *dev, struct scatterlist *leader, struct scatterlist *end,
556 struct scatterlist *out, struct pci_iommu_arena *arena,
557 dma_addr_t max_dma, int dac_allowed)
558{
559 unsigned long paddr = SG_ENT_PHYS_ADDRESS(leader);
560 long size = leader->dma_length;
561 struct scatterlist *sg;
562 unsigned long *ptes;
563 long npages, dma_ofs, i;
564
565#if !DEBUG_NODIRECT
566
567
568 if (leader->dma_address == 0
569 && paddr + size + __direct_map_base - 1 <= max_dma
570 && paddr + size <= __direct_map_size) {
571 out->dma_address = paddr + __direct_map_base;
572 out->dma_length = size;
573
574 DBGA(" sg_fill: [%p,%lx] -> direct %llx\n",
575 __va(paddr), size, out->dma_address);
576
577 return 0;
578 }
579#endif
580
581
582 if (leader->dma_address == 0 && dac_allowed) {
583 out->dma_address = paddr + alpha_mv.pci_dac_offset;
584 out->dma_length = size;
585
586 DBGA(" sg_fill: [%p,%lx] -> DAC %llx\n",
587 __va(paddr), size, out->dma_address);
588
589 return 0;
590 }
591
592
593
594
595 paddr &= ~PAGE_MASK;
596 npages = iommu_num_pages(paddr, size, PAGE_SIZE);
597 dma_ofs = iommu_arena_alloc(dev, arena, npages, 0);
598 if (dma_ofs < 0) {
599
600 if (leader->dma_address == 0)
601 return -1;
602
603
604
605 sg_classify(dev, leader, end, 0);
606 return sg_fill(dev, leader, end, out, arena, max_dma, dac_allowed);
607 }
608
609 out->dma_address = arena->dma_base + dma_ofs*PAGE_SIZE + paddr;
610 out->dma_length = size;
611
612 DBGA(" sg_fill: [%p,%lx] -> sg %llx np %ld\n",
613 __va(paddr), size, out->dma_address, npages);
614
615
616
617 ptes = &arena->ptes[dma_ofs];
618 sg = leader;
619 do {
620#if DEBUG_ALLOC > 0
621 struct scatterlist *last_sg = sg;
622#endif
623
624 size = sg->length;
625 paddr = SG_ENT_PHYS_ADDRESS(sg);
626
627 while (sg+1 < end && (int) sg[1].dma_address == -1) {
628 size += sg[1].length;
629 sg++;
630 }
631
632 npages = iommu_num_pages(paddr, size, PAGE_SIZE);
633
634 paddr &= PAGE_MASK;
635 for (i = 0; i < npages; ++i, paddr += PAGE_SIZE)
636 *ptes++ = mk_iommu_pte(paddr);
637
638#if DEBUG_ALLOC > 0
639 DBGA(" (%ld) [%p,%x] np %ld\n",
640 last_sg - leader, SG_ENT_VIRT_ADDRESS(last_sg),
641 last_sg->length, npages);
642 while (++last_sg <= sg) {
643 DBGA(" (%ld) [%p,%x] cont\n",
644 last_sg - leader, SG_ENT_VIRT_ADDRESS(last_sg),
645 last_sg->length);
646 }
647#endif
648 } while (++sg < end && (int) sg->dma_address < 0);
649
650 return 1;
651}
652
653static int alpha_pci_map_sg(struct device *dev, struct scatterlist *sg,
654 int nents, enum dma_data_direction dir,
655 unsigned long attrs)
656{
657 struct pci_dev *pdev = alpha_gendev_to_pci(dev);
658 struct scatterlist *start, *end, *out;
659 struct pci_controller *hose;
660 struct pci_iommu_arena *arena;
661 dma_addr_t max_dma;
662 int dac_allowed;
663
664 BUG_ON(dir == PCI_DMA_NONE);
665
666 dac_allowed = dev ? pci_dac_dma_supported(pdev, pdev->dma_mask) : 0;
667
668
669 if (nents == 1) {
670 sg->dma_length = sg->length;
671 sg->dma_address
672 = pci_map_single_1(pdev, SG_ENT_VIRT_ADDRESS(sg),
673 sg->length, dac_allowed);
674 return sg->dma_address != 0;
675 }
676
677 start = sg;
678 end = sg + nents;
679
680
681 sg_classify(dev, sg, end, alpha_mv.mv_pci_tbi != 0);
682
683
684 if (alpha_mv.mv_pci_tbi) {
685 hose = pdev ? pdev->sysdata : pci_isa_hose;
686 max_dma = pdev ? pdev->dma_mask : ISA_DMA_MASK;
687 arena = hose->sg_pci;
688 if (!arena || arena->dma_base + arena->size - 1 > max_dma)
689 arena = hose->sg_isa;
690 } else {
691 max_dma = -1;
692 arena = NULL;
693 hose = NULL;
694 }
695
696
697
698 for (out = sg; sg < end; ++sg) {
699 if ((int) sg->dma_address < 0)
700 continue;
701 if (sg_fill(dev, sg, end, out, arena, max_dma, dac_allowed) < 0)
702 goto error;
703 out++;
704 }
705
706
707 if (out < end)
708 out->dma_length = 0;
709
710 if (out - start == 0)
711 printk(KERN_WARNING "pci_map_sg failed: no entries?\n");
712 DBGA("pci_map_sg: %ld entries\n", out - start);
713
714 return out - start;
715
716 error:
717 printk(KERN_WARNING "pci_map_sg failed: "
718 "could not allocate dma page tables\n");
719
720
721
722 if (out > start)
723 pci_unmap_sg(pdev, start, out - start, dir);
724 return 0;
725}
726
727
728
729
730
731static void alpha_pci_unmap_sg(struct device *dev, struct scatterlist *sg,
732 int nents, enum dma_data_direction dir,
733 unsigned long attrs)
734{
735 struct pci_dev *pdev = alpha_gendev_to_pci(dev);
736 unsigned long flags;
737 struct pci_controller *hose;
738 struct pci_iommu_arena *arena;
739 struct scatterlist *end;
740 dma_addr_t max_dma;
741 dma_addr_t fbeg, fend;
742
743 BUG_ON(dir == PCI_DMA_NONE);
744
745 if (! alpha_mv.mv_pci_tbi)
746 return;
747
748 hose = pdev ? pdev->sysdata : pci_isa_hose;
749 max_dma = pdev ? pdev->dma_mask : ISA_DMA_MASK;
750 arena = hose->sg_pci;
751 if (!arena || arena->dma_base + arena->size - 1 > max_dma)
752 arena = hose->sg_isa;
753
754 fbeg = -1, fend = 0;
755
756 spin_lock_irqsave(&arena->lock, flags);
757
758 for (end = sg + nents; sg < end; ++sg) {
759 dma_addr_t addr;
760 size_t size;
761 long npages, ofs;
762 dma_addr_t tend;
763
764 addr = sg->dma_address;
765 size = sg->dma_length;
766 if (!size)
767 break;
768
769 if (addr > 0xffffffff) {
770
771 DBGA(" (%ld) DAC [%llx,%zx]\n",
772 sg - end + nents, addr, size);
773 continue;
774 }
775
776 if (addr >= __direct_map_base
777 && addr < __direct_map_base + __direct_map_size) {
778
779 DBGA(" (%ld) direct [%llx,%zx]\n",
780 sg - end + nents, addr, size);
781 continue;
782 }
783
784 DBGA(" (%ld) sg [%llx,%zx]\n",
785 sg - end + nents, addr, size);
786
787 npages = iommu_num_pages(addr, size, PAGE_SIZE);
788 ofs = (addr - arena->dma_base) >> PAGE_SHIFT;
789 iommu_arena_free(arena, ofs, npages);
790
791 tend = addr + size - 1;
792 if (fbeg > addr) fbeg = addr;
793 if (fend < tend) fend = tend;
794 }
795
796
797
798
799 if ((fend - arena->dma_base) >> PAGE_SHIFT >= arena->next_entry)
800 alpha_mv.mv_pci_tbi(hose, fbeg, fend);
801
802 spin_unlock_irqrestore(&arena->lock, flags);
803
804 DBGA("pci_unmap_sg: %ld entries\n", nents - (end - sg));
805}
806
807
808
809
810static int alpha_pci_supported(struct device *dev, u64 mask)
811{
812 struct pci_dev *pdev = alpha_gendev_to_pci(dev);
813 struct pci_controller *hose;
814 struct pci_iommu_arena *arena;
815
816
817
818
819 if (__direct_map_size != 0
820 && (__direct_map_base + __direct_map_size - 1 <= mask ||
821 __direct_map_base + (max_low_pfn << PAGE_SHIFT) - 1 <= mask))
822 return 1;
823
824
825 hose = pdev ? pdev->sysdata : pci_isa_hose;
826 arena = hose->sg_isa;
827 if (arena && arena->dma_base + arena->size - 1 <= mask)
828 return 1;
829 arena = hose->sg_pci;
830 if (arena && arena->dma_base + arena->size - 1 <= mask)
831 return 1;
832
833
834 if (!__direct_map_base && MAX_DMA_ADDRESS - IDENT_ADDR - 1 <= mask)
835 return 1;
836
837 return 0;
838}
839
840
841
842
843
844int
845iommu_reserve(struct pci_iommu_arena *arena, long pg_count, long align_mask)
846{
847 unsigned long flags;
848 unsigned long *ptes;
849 long i, p;
850
851 if (!arena) return -EINVAL;
852
853 spin_lock_irqsave(&arena->lock, flags);
854
855
856 ptes = arena->ptes;
857 p = iommu_arena_find_pages(NULL, arena, pg_count, align_mask);
858 if (p < 0) {
859 spin_unlock_irqrestore(&arena->lock, flags);
860 return -1;
861 }
862
863
864
865
866 for (i = 0; i < pg_count; ++i)
867 ptes[p+i] = IOMMU_RESERVED_PTE;
868
869 arena->next_entry = p + pg_count;
870 spin_unlock_irqrestore(&arena->lock, flags);
871
872 return p;
873}
874
875int
876iommu_release(struct pci_iommu_arena *arena, long pg_start, long pg_count)
877{
878 unsigned long *ptes;
879 long i;
880
881 if (!arena) return -EINVAL;
882
883 ptes = arena->ptes;
884
885
886 for(i = pg_start; i < pg_start + pg_count; i++)
887 if (ptes[i] != IOMMU_RESERVED_PTE)
888 return -EBUSY;
889
890 iommu_arena_free(arena, pg_start, pg_count);
891 return 0;
892}
893
894int
895iommu_bind(struct pci_iommu_arena *arena, long pg_start, long pg_count,
896 struct page **pages)
897{
898 unsigned long flags;
899 unsigned long *ptes;
900 long i, j;
901
902 if (!arena) return -EINVAL;
903
904 spin_lock_irqsave(&arena->lock, flags);
905
906 ptes = arena->ptes;
907
908 for(j = pg_start; j < pg_start + pg_count; j++) {
909 if (ptes[j] != IOMMU_RESERVED_PTE) {
910 spin_unlock_irqrestore(&arena->lock, flags);
911 return -EBUSY;
912 }
913 }
914
915 for(i = 0, j = pg_start; i < pg_count; i++, j++)
916 ptes[j] = mk_iommu_pte(page_to_phys(pages[i]));
917
918 spin_unlock_irqrestore(&arena->lock, flags);
919
920 return 0;
921}
922
923int
924iommu_unbind(struct pci_iommu_arena *arena, long pg_start, long pg_count)
925{
926 unsigned long *p;
927 long i;
928
929 if (!arena) return -EINVAL;
930
931 p = arena->ptes + pg_start;
932 for(i = 0; i < pg_count; i++)
933 p[i] = IOMMU_RESERVED_PTE;
934
935 return 0;
936}
937
938static int alpha_pci_mapping_error(struct device *dev, dma_addr_t dma_addr)
939{
940 return dma_addr == 0;
941}
942
943const struct dma_map_ops alpha_pci_ops = {
944 .alloc = alpha_pci_alloc_coherent,
945 .free = alpha_pci_free_coherent,
946 .map_page = alpha_pci_map_page,
947 .unmap_page = alpha_pci_unmap_page,
948 .map_sg = alpha_pci_map_sg,
949 .unmap_sg = alpha_pci_unmap_sg,
950 .mapping_error = alpha_pci_mapping_error,
951 .dma_supported = alpha_pci_supported,
952};
953EXPORT_SYMBOL(alpha_pci_ops);
954