1
2
3
4
5#include <linux/kernel.h>
6#include <linux/mm.h>
7#include <linux/pci.h>
8#include <linux/gfp.h>
9#include <linux/bootmem.h>
10#include <linux/scatterlist.h>
11#include <linux/log2.h>
12#include <linux/dma-mapping.h>
13#include <linux/iommu-helper.h>
14
15#include <asm/io.h>
16#include <asm/hwrpb.h>
17
18#include "proto.h"
19#include "pci_impl.h"
20
21
22#define DEBUG_ALLOC 0
23#if DEBUG_ALLOC > 0
24# define DBGA(args...) printk(KERN_DEBUG args)
25#else
26# define DBGA(args...)
27#endif
28#if DEBUG_ALLOC > 1
29# define DBGA2(args...) printk(KERN_DEBUG args)
30#else
31# define DBGA2(args...)
32#endif
33
34#define DEBUG_NODIRECT 0
35
36#define ISA_DMA_MASK 0x00ffffff
37
38static inline unsigned long
39mk_iommu_pte(unsigned long paddr)
40{
41 return (paddr >> (PAGE_SHIFT-1)) | 1;
42}
43
44
45
46
47unsigned long
48size_for_memory(unsigned long max)
49{
50 unsigned long mem = max_low_pfn << PAGE_SHIFT;
51 if (mem < max)
52 max = roundup_pow_of_two(mem);
53 return max;
54}
55
56struct pci_iommu_arena * __init
57iommu_arena_new_node(int nid, struct pci_controller *hose, dma_addr_t base,
58 unsigned long window_size, unsigned long align)
59{
60 unsigned long mem_size;
61 struct pci_iommu_arena *arena;
62
63 mem_size = window_size / (PAGE_SIZE / sizeof(unsigned long));
64
65
66
67
68
69 if (align < mem_size)
70 align = mem_size;
71
72
73#ifdef CONFIG_DISCONTIGMEM
74
75 arena = alloc_bootmem_node(NODE_DATA(nid), sizeof(*arena));
76 if (!NODE_DATA(nid) || !arena) {
77 printk("%s: couldn't allocate arena from node %d\n"
78 " falling back to system-wide allocation\n",
79 __func__, nid);
80 arena = alloc_bootmem(sizeof(*arena));
81 }
82
83 arena->ptes = __alloc_bootmem_node(NODE_DATA(nid), mem_size, align, 0);
84 if (!NODE_DATA(nid) || !arena->ptes) {
85 printk("%s: couldn't allocate arena ptes from node %d\n"
86 " falling back to system-wide allocation\n",
87 __func__, nid);
88 arena->ptes = __alloc_bootmem(mem_size, align, 0);
89 }
90
91#else
92
93 arena = alloc_bootmem(sizeof(*arena));
94 arena->ptes = __alloc_bootmem(mem_size, align, 0);
95
96#endif
97
98 spin_lock_init(&arena->lock);
99 arena->hose = hose;
100 arena->dma_base = base;
101 arena->size = window_size;
102 arena->next_entry = 0;
103
104
105
106 arena->align_entry = 1;
107
108 return arena;
109}
110
111struct pci_iommu_arena * __init
112iommu_arena_new(struct pci_controller *hose, dma_addr_t base,
113 unsigned long window_size, unsigned long align)
114{
115 return iommu_arena_new_node(0, hose, base, window_size, align);
116}
117
118
119static long
120iommu_arena_find_pages(struct device *dev, struct pci_iommu_arena *arena,
121 long n, long mask)
122{
123 unsigned long *ptes;
124 long i, p, nent;
125 int pass = 0;
126 unsigned long base;
127 unsigned long boundary_size;
128
129 base = arena->dma_base >> PAGE_SHIFT;
130 if (dev) {
131 boundary_size = dma_get_seg_boundary(dev) + 1;
132 boundary_size >>= PAGE_SHIFT;
133 } else {
134 boundary_size = 1UL << (32 - PAGE_SHIFT);
135 }
136
137
138 ptes = arena->ptes;
139 nent = arena->size >> PAGE_SHIFT;
140 p = ALIGN(arena->next_entry, mask + 1);
141 i = 0;
142
143again:
144 while (i < n && p+i < nent) {
145 if (!i && iommu_is_span_boundary(p, n, base, boundary_size)) {
146 p = ALIGN(p + 1, mask + 1);
147 goto again;
148 }
149
150 if (ptes[p+i])
151 p = ALIGN(p + i + 1, mask + 1), i = 0;
152 else
153 i = i + 1;
154 }
155
156 if (i < n) {
157 if (pass < 1) {
158
159
160
161
162 alpha_mv.mv_pci_tbi(arena->hose, 0, -1);
163
164 pass++;
165 p = 0;
166 i = 0;
167 goto again;
168 } else
169 return -1;
170 }
171
172
173
174 return p;
175}
176
177static long
178iommu_arena_alloc(struct device *dev, struct pci_iommu_arena *arena, long n,
179 unsigned int align)
180{
181 unsigned long flags;
182 unsigned long *ptes;
183 long i, p, mask;
184
185 spin_lock_irqsave(&arena->lock, flags);
186
187
188 ptes = arena->ptes;
189 mask = max(align, arena->align_entry) - 1;
190 p = iommu_arena_find_pages(dev, arena, n, mask);
191 if (p < 0) {
192 spin_unlock_irqrestore(&arena->lock, flags);
193 return -1;
194 }
195
196
197
198
199
200 for (i = 0; i < n; ++i)
201 ptes[p+i] = IOMMU_INVALID_PTE;
202
203 arena->next_entry = p + n;
204 spin_unlock_irqrestore(&arena->lock, flags);
205
206 return p;
207}
208
209static void
210iommu_arena_free(struct pci_iommu_arena *arena, long ofs, long n)
211{
212 unsigned long *p;
213 long i;
214
215 p = arena->ptes + ofs;
216 for (i = 0; i < n; ++i)
217 p[i] = 0;
218}
219
220
221
222
223
224static int pci_dac_dma_supported(struct pci_dev *dev, u64 mask)
225{
226 dma_addr_t dac_offset = alpha_mv.pci_dac_offset;
227 int ok = 1;
228
229
230 if (dac_offset == 0)
231 ok = 0;
232
233
234 if ((dac_offset & dev->dma_mask) != dac_offset)
235 ok = 0;
236
237
238 DBGA("pci_dac_dma_supported %s from %p\n",
239 ok ? "yes" : "no", __builtin_return_address(0));
240
241 return ok;
242}
243
244
245
246
247
248
249static dma_addr_t
250pci_map_single_1(struct pci_dev *pdev, void *cpu_addr, size_t size,
251 int dac_allowed)
252{
253 struct pci_controller *hose = pdev ? pdev->sysdata : pci_isa_hose;
254 dma_addr_t max_dma = pdev ? pdev->dma_mask : ISA_DMA_MASK;
255 struct pci_iommu_arena *arena;
256 long npages, dma_ofs, i;
257 unsigned long paddr;
258 dma_addr_t ret;
259 unsigned int align = 0;
260 struct device *dev = pdev ? &pdev->dev : NULL;
261
262 paddr = __pa(cpu_addr);
263
264#if !DEBUG_NODIRECT
265
266 if (paddr + size + __direct_map_base - 1 <= max_dma
267 && paddr + size <= __direct_map_size) {
268 ret = paddr + __direct_map_base;
269
270 DBGA2("pci_map_single: [%p,%zx] -> direct %llx from %p\n",
271 cpu_addr, size, ret, __builtin_return_address(0));
272
273 return ret;
274 }
275#endif
276
277
278 if (dac_allowed) {
279 ret = paddr + alpha_mv.pci_dac_offset;
280
281 DBGA2("pci_map_single: [%p,%zx] -> DAC %llx from %p\n",
282 cpu_addr, size, ret, __builtin_return_address(0));
283
284 return ret;
285 }
286
287
288
289
290 if (! alpha_mv.mv_pci_tbi) {
291 printk_once(KERN_WARNING "pci_map_single: no HW sg\n");
292 return 0;
293 }
294
295 arena = hose->sg_pci;
296 if (!arena || arena->dma_base + arena->size - 1 > max_dma)
297 arena = hose->sg_isa;
298
299 npages = iommu_num_pages(paddr, size, PAGE_SIZE);
300
301
302 if (pdev && pdev == isa_bridge)
303 align = 8;
304 dma_ofs = iommu_arena_alloc(dev, arena, npages, align);
305 if (dma_ofs < 0) {
306 printk(KERN_WARNING "pci_map_single failed: "
307 "could not allocate dma page tables\n");
308 return 0;
309 }
310
311 paddr &= PAGE_MASK;
312 for (i = 0; i < npages; ++i, paddr += PAGE_SIZE)
313 arena->ptes[i + dma_ofs] = mk_iommu_pte(paddr);
314
315 ret = arena->dma_base + dma_ofs * PAGE_SIZE;
316 ret += (unsigned long)cpu_addr & ~PAGE_MASK;
317
318 DBGA2("pci_map_single: [%p,%zx] np %ld -> sg %llx from %p\n",
319 cpu_addr, size, npages, ret, __builtin_return_address(0));
320
321 return ret;
322}
323
324
325static struct pci_dev *alpha_gendev_to_pci(struct device *dev)
326{
327 if (dev && dev->bus == &pci_bus_type)
328 return to_pci_dev(dev);
329
330
331
332 BUG_ON(!isa_bridge);
333
334
335
336 if (!dev || !dev->dma_mask || !*dev->dma_mask)
337 return isa_bridge;
338
339
340
341 if (*dev->dma_mask >= isa_bridge->dma_mask)
342 return isa_bridge;
343
344
345 return NULL;
346}
347
348static dma_addr_t alpha_pci_map_page(struct device *dev, struct page *page,
349 unsigned long offset, size_t size,
350 enum dma_data_direction dir,
351 struct dma_attrs *attrs)
352{
353 struct pci_dev *pdev = alpha_gendev_to_pci(dev);
354 int dac_allowed;
355
356 if (dir == PCI_DMA_NONE)
357 BUG();
358
359 dac_allowed = pdev ? pci_dac_dma_supported(pdev, pdev->dma_mask) : 0;
360 return pci_map_single_1(pdev, (char *)page_address(page) + offset,
361 size, dac_allowed);
362}
363
364
365
366
367
368
369
370static void alpha_pci_unmap_page(struct device *dev, dma_addr_t dma_addr,
371 size_t size, enum dma_data_direction dir,
372 struct dma_attrs *attrs)
373{
374 unsigned long flags;
375 struct pci_dev *pdev = alpha_gendev_to_pci(dev);
376 struct pci_controller *hose = pdev ? pdev->sysdata : pci_isa_hose;
377 struct pci_iommu_arena *arena;
378 long dma_ofs, npages;
379
380 if (dir == PCI_DMA_NONE)
381 BUG();
382
383 if (dma_addr >= __direct_map_base
384 && dma_addr < __direct_map_base + __direct_map_size) {
385
386
387 DBGA2("pci_unmap_single: direct [%llx,%zx] from %p\n",
388 dma_addr, size, __builtin_return_address(0));
389
390 return;
391 }
392
393 if (dma_addr > 0xffffffff) {
394 DBGA2("pci64_unmap_single: DAC [%llx,%zx] from %p\n",
395 dma_addr, size, __builtin_return_address(0));
396 return;
397 }
398
399 arena = hose->sg_pci;
400 if (!arena || dma_addr < arena->dma_base)
401 arena = hose->sg_isa;
402
403 dma_ofs = (dma_addr - arena->dma_base) >> PAGE_SHIFT;
404 if (dma_ofs * PAGE_SIZE >= arena->size) {
405 printk(KERN_ERR "Bogus pci_unmap_single: dma_addr %llx "
406 " base %llx size %x\n",
407 dma_addr, arena->dma_base, arena->size);
408 return;
409 BUG();
410 }
411
412 npages = iommu_num_pages(dma_addr, size, PAGE_SIZE);
413
414 spin_lock_irqsave(&arena->lock, flags);
415
416 iommu_arena_free(arena, dma_ofs, npages);
417
418
419
420
421 if (dma_ofs >= arena->next_entry)
422 alpha_mv.mv_pci_tbi(hose, dma_addr, dma_addr + size - 1);
423
424 spin_unlock_irqrestore(&arena->lock, flags);
425
426 DBGA2("pci_unmap_single: sg [%llx,%zx] np %ld from %p\n",
427 dma_addr, size, npages, __builtin_return_address(0));
428}
429
430
431
432
433
434
435static void *alpha_pci_alloc_coherent(struct device *dev, size_t size,
436 dma_addr_t *dma_addrp, gfp_t gfp)
437{
438 struct pci_dev *pdev = alpha_gendev_to_pci(dev);
439 void *cpu_addr;
440 long order = get_order(size);
441
442 gfp &= ~GFP_DMA;
443
444try_again:
445 cpu_addr = (void *)__get_free_pages(gfp, order);
446 if (! cpu_addr) {
447 printk(KERN_INFO "pci_alloc_consistent: "
448 "get_free_pages failed from %p\n",
449 __builtin_return_address(0));
450
451
452 return NULL;
453 }
454 memset(cpu_addr, 0, size);
455
456 *dma_addrp = pci_map_single_1(pdev, cpu_addr, size, 0);
457 if (*dma_addrp == 0) {
458 free_pages((unsigned long)cpu_addr, order);
459 if (alpha_mv.mv_pci_tbi || (gfp & GFP_DMA))
460 return NULL;
461
462
463 gfp |= GFP_DMA;
464 goto try_again;
465 }
466
467 DBGA2("pci_alloc_consistent: %zx -> [%p,%llx] from %p\n",
468 size, cpu_addr, *dma_addrp, __builtin_return_address(0));
469
470 return cpu_addr;
471}
472
473
474
475
476
477
478
479static void alpha_pci_free_coherent(struct device *dev, size_t size,
480 void *cpu_addr, dma_addr_t dma_addr)
481{
482 struct pci_dev *pdev = alpha_gendev_to_pci(dev);
483 pci_unmap_single(pdev, dma_addr, size, PCI_DMA_BIDIRECTIONAL);
484 free_pages((unsigned long)cpu_addr, get_order(size));
485
486 DBGA2("pci_free_consistent: [%llx,%zx] from %p\n",
487 dma_addr, size, __builtin_return_address(0));
488}
489
490
491
492
493
494
495
496
497
498
499#define SG_ENT_VIRT_ADDRESS(SG) (sg_virt((SG)))
500#define SG_ENT_PHYS_ADDRESS(SG) __pa(SG_ENT_VIRT_ADDRESS(SG))
501
502static void
503sg_classify(struct device *dev, struct scatterlist *sg, struct scatterlist *end,
504 int virt_ok)
505{
506 unsigned long next_paddr;
507 struct scatterlist *leader;
508 long leader_flag, leader_length;
509 unsigned int max_seg_size;
510
511 leader = sg;
512 leader_flag = 0;
513 leader_length = leader->length;
514 next_paddr = SG_ENT_PHYS_ADDRESS(leader) + leader_length;
515
516
517 max_seg_size = dev ? dma_get_max_seg_size(dev) : 0;
518 for (++sg; sg < end; ++sg) {
519 unsigned long addr, len;
520 addr = SG_ENT_PHYS_ADDRESS(sg);
521 len = sg->length;
522
523 if (leader_length + len > max_seg_size)
524 goto new_segment;
525
526 if (next_paddr == addr) {
527 sg->dma_address = -1;
528 leader_length += len;
529 } else if (((next_paddr | addr) & ~PAGE_MASK) == 0 && virt_ok) {
530 sg->dma_address = -2;
531 leader_flag = 1;
532 leader_length += len;
533 } else {
534new_segment:
535 leader->dma_address = leader_flag;
536 leader->dma_length = leader_length;
537 leader = sg;
538 leader_flag = 0;
539 leader_length = len;
540 }
541
542 next_paddr = addr + len;
543 }
544
545 leader->dma_address = leader_flag;
546 leader->dma_length = leader_length;
547}
548
549
550
551
552static int
553sg_fill(struct device *dev, struct scatterlist *leader, struct scatterlist *end,
554 struct scatterlist *out, struct pci_iommu_arena *arena,
555 dma_addr_t max_dma, int dac_allowed)
556{
557 unsigned long paddr = SG_ENT_PHYS_ADDRESS(leader);
558 long size = leader->dma_length;
559 struct scatterlist *sg;
560 unsigned long *ptes;
561 long npages, dma_ofs, i;
562
563#if !DEBUG_NODIRECT
564
565
566 if (leader->dma_address == 0
567 && paddr + size + __direct_map_base - 1 <= max_dma
568 && paddr + size <= __direct_map_size) {
569 out->dma_address = paddr + __direct_map_base;
570 out->dma_length = size;
571
572 DBGA(" sg_fill: [%p,%lx] -> direct %llx\n",
573 __va(paddr), size, out->dma_address);
574
575 return 0;
576 }
577#endif
578
579
580 if (leader->dma_address == 0 && dac_allowed) {
581 out->dma_address = paddr + alpha_mv.pci_dac_offset;
582 out->dma_length = size;
583
584 DBGA(" sg_fill: [%p,%lx] -> DAC %llx\n",
585 __va(paddr), size, out->dma_address);
586
587 return 0;
588 }
589
590
591
592
593 paddr &= ~PAGE_MASK;
594 npages = iommu_num_pages(paddr, size, PAGE_SIZE);
595 dma_ofs = iommu_arena_alloc(dev, arena, npages, 0);
596 if (dma_ofs < 0) {
597
598 if (leader->dma_address == 0)
599 return -1;
600
601
602
603 sg_classify(dev, leader, end, 0);
604 return sg_fill(dev, leader, end, out, arena, max_dma, dac_allowed);
605 }
606
607 out->dma_address = arena->dma_base + dma_ofs*PAGE_SIZE + paddr;
608 out->dma_length = size;
609
610 DBGA(" sg_fill: [%p,%lx] -> sg %llx np %ld\n",
611 __va(paddr), size, out->dma_address, npages);
612
613
614
615 ptes = &arena->ptes[dma_ofs];
616 sg = leader;
617 do {
618#if DEBUG_ALLOC > 0
619 struct scatterlist *last_sg = sg;
620#endif
621
622 size = sg->length;
623 paddr = SG_ENT_PHYS_ADDRESS(sg);
624
625 while (sg+1 < end && (int) sg[1].dma_address == -1) {
626 size += sg[1].length;
627 sg++;
628 }
629
630 npages = iommu_num_pages(paddr, size, PAGE_SIZE);
631
632 paddr &= PAGE_MASK;
633 for (i = 0; i < npages; ++i, paddr += PAGE_SIZE)
634 *ptes++ = mk_iommu_pte(paddr);
635
636#if DEBUG_ALLOC > 0
637 DBGA(" (%ld) [%p,%x] np %ld\n",
638 last_sg - leader, SG_ENT_VIRT_ADDRESS(last_sg),
639 last_sg->length, npages);
640 while (++last_sg <= sg) {
641 DBGA(" (%ld) [%p,%x] cont\n",
642 last_sg - leader, SG_ENT_VIRT_ADDRESS(last_sg),
643 last_sg->length);
644 }
645#endif
646 } while (++sg < end && (int) sg->dma_address < 0);
647
648 return 1;
649}
650
651static int alpha_pci_map_sg(struct device *dev, struct scatterlist *sg,
652 int nents, enum dma_data_direction dir,
653 struct dma_attrs *attrs)
654{
655 struct pci_dev *pdev = alpha_gendev_to_pci(dev);
656 struct scatterlist *start, *end, *out;
657 struct pci_controller *hose;
658 struct pci_iommu_arena *arena;
659 dma_addr_t max_dma;
660 int dac_allowed;
661
662 if (dir == PCI_DMA_NONE)
663 BUG();
664
665 dac_allowed = dev ? pci_dac_dma_supported(pdev, pdev->dma_mask) : 0;
666
667
668 if (nents == 1) {
669 sg->dma_length = sg->length;
670 sg->dma_address
671 = pci_map_single_1(pdev, SG_ENT_VIRT_ADDRESS(sg),
672 sg->length, dac_allowed);
673 return sg->dma_address != 0;
674 }
675
676 start = sg;
677 end = sg + nents;
678
679
680 sg_classify(dev, sg, end, alpha_mv.mv_pci_tbi != 0);
681
682
683 if (alpha_mv.mv_pci_tbi) {
684 hose = pdev ? pdev->sysdata : pci_isa_hose;
685 max_dma = pdev ? pdev->dma_mask : ISA_DMA_MASK;
686 arena = hose->sg_pci;
687 if (!arena || arena->dma_base + arena->size - 1 > max_dma)
688 arena = hose->sg_isa;
689 } else {
690 max_dma = -1;
691 arena = NULL;
692 hose = NULL;
693 }
694
695
696
697 for (out = sg; sg < end; ++sg) {
698 if ((int) sg->dma_address < 0)
699 continue;
700 if (sg_fill(dev, sg, end, out, arena, max_dma, dac_allowed) < 0)
701 goto error;
702 out++;
703 }
704
705
706 if (out < end)
707 out->dma_length = 0;
708
709 if (out - start == 0)
710 printk(KERN_WARNING "pci_map_sg failed: no entries?\n");
711 DBGA("pci_map_sg: %ld entries\n", out - start);
712
713 return out - start;
714
715 error:
716 printk(KERN_WARNING "pci_map_sg failed: "
717 "could not allocate dma page tables\n");
718
719
720
721 if (out > start)
722 pci_unmap_sg(pdev, start, out - start, dir);
723 return 0;
724}
725
726
727
728
729
730static void alpha_pci_unmap_sg(struct device *dev, struct scatterlist *sg,
731 int nents, enum dma_data_direction dir,
732 struct dma_attrs *attrs)
733{
734 struct pci_dev *pdev = alpha_gendev_to_pci(dev);
735 unsigned long flags;
736 struct pci_controller *hose;
737 struct pci_iommu_arena *arena;
738 struct scatterlist *end;
739 dma_addr_t max_dma;
740 dma_addr_t fbeg, fend;
741
742 if (dir == PCI_DMA_NONE)
743 BUG();
744
745 if (! alpha_mv.mv_pci_tbi)
746 return;
747
748 hose = pdev ? pdev->sysdata : pci_isa_hose;
749 max_dma = pdev ? pdev->dma_mask : ISA_DMA_MASK;
750 arena = hose->sg_pci;
751 if (!arena || arena->dma_base + arena->size - 1 > max_dma)
752 arena = hose->sg_isa;
753
754 fbeg = -1, fend = 0;
755
756 spin_lock_irqsave(&arena->lock, flags);
757
758 for (end = sg + nents; sg < end; ++sg) {
759 dma_addr_t addr;
760 size_t size;
761 long npages, ofs;
762 dma_addr_t tend;
763
764 addr = sg->dma_address;
765 size = sg->dma_length;
766 if (!size)
767 break;
768
769 if (addr > 0xffffffff) {
770
771 DBGA(" (%ld) DAC [%llx,%zx]\n",
772 sg - end + nents, addr, size);
773 continue;
774 }
775
776 if (addr >= __direct_map_base
777 && addr < __direct_map_base + __direct_map_size) {
778
779 DBGA(" (%ld) direct [%llx,%zx]\n",
780 sg - end + nents, addr, size);
781 continue;
782 }
783
784 DBGA(" (%ld) sg [%llx,%zx]\n",
785 sg - end + nents, addr, size);
786
787 npages = iommu_num_pages(addr, size, PAGE_SIZE);
788 ofs = (addr - arena->dma_base) >> PAGE_SHIFT;
789 iommu_arena_free(arena, ofs, npages);
790
791 tend = addr + size - 1;
792 if (fbeg > addr) fbeg = addr;
793 if (fend < tend) fend = tend;
794 }
795
796
797
798
799 if ((fend - arena->dma_base) >> PAGE_SHIFT >= arena->next_entry)
800 alpha_mv.mv_pci_tbi(hose, fbeg, fend);
801
802 spin_unlock_irqrestore(&arena->lock, flags);
803
804 DBGA("pci_unmap_sg: %ld entries\n", nents - (end - sg));
805}
806
807
808
809
810static int alpha_pci_supported(struct device *dev, u64 mask)
811{
812 struct pci_dev *pdev = alpha_gendev_to_pci(dev);
813 struct pci_controller *hose;
814 struct pci_iommu_arena *arena;
815
816
817
818
819 if (__direct_map_size != 0
820 && (__direct_map_base + __direct_map_size - 1 <= mask ||
821 __direct_map_base + (max_low_pfn << PAGE_SHIFT) - 1 <= mask))
822 return 1;
823
824
825 hose = pdev ? pdev->sysdata : pci_isa_hose;
826 arena = hose->sg_isa;
827 if (arena && arena->dma_base + arena->size - 1 <= mask)
828 return 1;
829 arena = hose->sg_pci;
830 if (arena && arena->dma_base + arena->size - 1 <= mask)
831 return 1;
832
833
834 if (!__direct_map_base && MAX_DMA_ADDRESS - IDENT_ADDR - 1 <= mask)
835 return 1;
836
837 return 0;
838}
839
840
841
842
843
844int
845iommu_reserve(struct pci_iommu_arena *arena, long pg_count, long align_mask)
846{
847 unsigned long flags;
848 unsigned long *ptes;
849 long i, p;
850
851 if (!arena) return -EINVAL;
852
853 spin_lock_irqsave(&arena->lock, flags);
854
855
856 ptes = arena->ptes;
857 p = iommu_arena_find_pages(NULL, arena, pg_count, align_mask);
858 if (p < 0) {
859 spin_unlock_irqrestore(&arena->lock, flags);
860 return -1;
861 }
862
863
864
865
866 for (i = 0; i < pg_count; ++i)
867 ptes[p+i] = IOMMU_RESERVED_PTE;
868
869 arena->next_entry = p + pg_count;
870 spin_unlock_irqrestore(&arena->lock, flags);
871
872 return p;
873}
874
875int
876iommu_release(struct pci_iommu_arena *arena, long pg_start, long pg_count)
877{
878 unsigned long *ptes;
879 long i;
880
881 if (!arena) return -EINVAL;
882
883 ptes = arena->ptes;
884
885
886 for(i = pg_start; i < pg_start + pg_count; i++)
887 if (ptes[i] != IOMMU_RESERVED_PTE)
888 return -EBUSY;
889
890 iommu_arena_free(arena, pg_start, pg_count);
891 return 0;
892}
893
894int
895iommu_bind(struct pci_iommu_arena *arena, long pg_start, long pg_count,
896 struct page **pages)
897{
898 unsigned long flags;
899 unsigned long *ptes;
900 long i, j;
901
902 if (!arena) return -EINVAL;
903
904 spin_lock_irqsave(&arena->lock, flags);
905
906 ptes = arena->ptes;
907
908 for(j = pg_start; j < pg_start + pg_count; j++) {
909 if (ptes[j] != IOMMU_RESERVED_PTE) {
910 spin_unlock_irqrestore(&arena->lock, flags);
911 return -EBUSY;
912 }
913 }
914
915 for(i = 0, j = pg_start; i < pg_count; i++, j++)
916 ptes[j] = mk_iommu_pte(page_to_phys(pages[i]));
917
918 spin_unlock_irqrestore(&arena->lock, flags);
919
920 return 0;
921}
922
923int
924iommu_unbind(struct pci_iommu_arena *arena, long pg_start, long pg_count)
925{
926 unsigned long *p;
927 long i;
928
929 if (!arena) return -EINVAL;
930
931 p = arena->ptes + pg_start;
932 for(i = 0; i < pg_count; i++)
933 p[i] = IOMMU_RESERVED_PTE;
934
935 return 0;
936}
937
938static int alpha_pci_mapping_error(struct device *dev, dma_addr_t dma_addr)
939{
940 return dma_addr == 0;
941}
942
943static int alpha_pci_set_mask(struct device *dev, u64 mask)
944{
945 if (!dev->dma_mask ||
946 !pci_dma_supported(alpha_gendev_to_pci(dev), mask))
947 return -EIO;
948
949 *dev->dma_mask = mask;
950 return 0;
951}
952
953struct dma_map_ops alpha_pci_ops = {
954 .alloc_coherent = alpha_pci_alloc_coherent,
955 .free_coherent = alpha_pci_free_coherent,
956 .map_page = alpha_pci_map_page,
957 .unmap_page = alpha_pci_unmap_page,
958 .map_sg = alpha_pci_map_sg,
959 .unmap_sg = alpha_pci_unmap_sg,
960 .mapping_error = alpha_pci_mapping_error,
961 .dma_supported = alpha_pci_supported,
962 .set_dma_mask = alpha_pci_set_mask,
963};
964
965struct dma_map_ops *dma_ops = &alpha_pci_ops;
966EXPORT_SYMBOL(dma_ops);
967