1
2
3
4
5#include <linux/kernel.h>
6#include <linux/mm.h>
7#include <linux/pci.h>
8#include <linux/gfp.h>
9#include <linux/bootmem.h>
10#include <linux/export.h>
11#include <linux/scatterlist.h>
12#include <linux/log2.h>
13#include <linux/dma-mapping.h>
14#include <linux/iommu-helper.h>
15
16#include <asm/io.h>
17#include <asm/hwrpb.h>
18
19#include "proto.h"
20#include "pci_impl.h"
21
22
23#define DEBUG_ALLOC 0
24#if DEBUG_ALLOC > 0
25# define DBGA(args...) printk(KERN_DEBUG args)
26#else
27# define DBGA(args...)
28#endif
29#if DEBUG_ALLOC > 1
30# define DBGA2(args...) printk(KERN_DEBUG args)
31#else
32# define DBGA2(args...)
33#endif
34
35#define DEBUG_NODIRECT 0
36
37#define ISA_DMA_MASK 0x00ffffff
38
39static inline unsigned long
40mk_iommu_pte(unsigned long paddr)
41{
42 return (paddr >> (PAGE_SHIFT-1)) | 1;
43}
44
45
46
47
48unsigned long
49size_for_memory(unsigned long max)
50{
51 unsigned long mem = max_low_pfn << PAGE_SHIFT;
52 if (mem < max)
53 max = roundup_pow_of_two(mem);
54 return max;
55}
56
57struct pci_iommu_arena * __init
58iommu_arena_new_node(int nid, struct pci_controller *hose, dma_addr_t base,
59 unsigned long window_size, unsigned long align)
60{
61 unsigned long mem_size;
62 struct pci_iommu_arena *arena;
63
64 mem_size = window_size / (PAGE_SIZE / sizeof(unsigned long));
65
66
67
68
69
70 if (align < mem_size)
71 align = mem_size;
72
73
74#ifdef CONFIG_DISCONTIGMEM
75
76 arena = alloc_bootmem_node(NODE_DATA(nid), sizeof(*arena));
77 if (!NODE_DATA(nid) || !arena) {
78 printk("%s: couldn't allocate arena from node %d\n"
79 " falling back to system-wide allocation\n",
80 __func__, nid);
81 arena = alloc_bootmem(sizeof(*arena));
82 }
83
84 arena->ptes = __alloc_bootmem_node(NODE_DATA(nid), mem_size, align, 0);
85 if (!NODE_DATA(nid) || !arena->ptes) {
86 printk("%s: couldn't allocate arena ptes from node %d\n"
87 " falling back to system-wide allocation\n",
88 __func__, nid);
89 arena->ptes = __alloc_bootmem(mem_size, align, 0);
90 }
91
92#else
93
94 arena = alloc_bootmem(sizeof(*arena));
95 arena->ptes = __alloc_bootmem(mem_size, align, 0);
96
97#endif
98
99 spin_lock_init(&arena->lock);
100 arena->hose = hose;
101 arena->dma_base = base;
102 arena->size = window_size;
103 arena->next_entry = 0;
104
105
106
107 arena->align_entry = 1;
108
109 return arena;
110}
111
112struct pci_iommu_arena * __init
113iommu_arena_new(struct pci_controller *hose, dma_addr_t base,
114 unsigned long window_size, unsigned long align)
115{
116 return iommu_arena_new_node(0, hose, base, window_size, align);
117}
118
119
120static long
121iommu_arena_find_pages(struct device *dev, struct pci_iommu_arena *arena,
122 long n, long mask)
123{
124 unsigned long *ptes;
125 long i, p, nent;
126 int pass = 0;
127 unsigned long base;
128 unsigned long boundary_size;
129
130 base = arena->dma_base >> PAGE_SHIFT;
131 if (dev) {
132 boundary_size = dma_get_seg_boundary(dev) + 1;
133 boundary_size >>= PAGE_SHIFT;
134 } else {
135 boundary_size = 1UL << (32 - PAGE_SHIFT);
136 }
137
138
139 ptes = arena->ptes;
140 nent = arena->size >> PAGE_SHIFT;
141 p = ALIGN(arena->next_entry, mask + 1);
142 i = 0;
143
144again:
145 while (i < n && p+i < nent) {
146 if (!i && iommu_is_span_boundary(p, n, base, boundary_size)) {
147 p = ALIGN(p + 1, mask + 1);
148 goto again;
149 }
150
151 if (ptes[p+i])
152 p = ALIGN(p + i + 1, mask + 1), i = 0;
153 else
154 i = i + 1;
155 }
156
157 if (i < n) {
158 if (pass < 1) {
159
160
161
162
163 alpha_mv.mv_pci_tbi(arena->hose, 0, -1);
164
165 pass++;
166 p = 0;
167 i = 0;
168 goto again;
169 } else
170 return -1;
171 }
172
173
174
175 return p;
176}
177
178static long
179iommu_arena_alloc(struct device *dev, struct pci_iommu_arena *arena, long n,
180 unsigned int align)
181{
182 unsigned long flags;
183 unsigned long *ptes;
184 long i, p, mask;
185
186 spin_lock_irqsave(&arena->lock, flags);
187
188
189 ptes = arena->ptes;
190 mask = max(align, arena->align_entry) - 1;
191 p = iommu_arena_find_pages(dev, arena, n, mask);
192 if (p < 0) {
193 spin_unlock_irqrestore(&arena->lock, flags);
194 return -1;
195 }
196
197
198
199
200
201 for (i = 0; i < n; ++i)
202 ptes[p+i] = IOMMU_INVALID_PTE;
203
204 arena->next_entry = p + n;
205 spin_unlock_irqrestore(&arena->lock, flags);
206
207 return p;
208}
209
210static void
211iommu_arena_free(struct pci_iommu_arena *arena, long ofs, long n)
212{
213 unsigned long *p;
214 long i;
215
216 p = arena->ptes + ofs;
217 for (i = 0; i < n; ++i)
218 p[i] = 0;
219}
220
221
222
223
224
225static int pci_dac_dma_supported(struct pci_dev *dev, u64 mask)
226{
227 dma_addr_t dac_offset = alpha_mv.pci_dac_offset;
228 int ok = 1;
229
230
231 if (dac_offset == 0)
232 ok = 0;
233
234
235 if ((dac_offset & dev->dma_mask) != dac_offset)
236 ok = 0;
237
238
239 DBGA("pci_dac_dma_supported %s from %p\n",
240 ok ? "yes" : "no", __builtin_return_address(0));
241
242 return ok;
243}
244
245
246
247
248
249
250static dma_addr_t
251pci_map_single_1(struct pci_dev *pdev, void *cpu_addr, size_t size,
252 int dac_allowed)
253{
254 struct pci_controller *hose = pdev ? pdev->sysdata : pci_isa_hose;
255 dma_addr_t max_dma = pdev ? pdev->dma_mask : ISA_DMA_MASK;
256 struct pci_iommu_arena *arena;
257 long npages, dma_ofs, i;
258 unsigned long paddr;
259 dma_addr_t ret;
260 unsigned int align = 0;
261 struct device *dev = pdev ? &pdev->dev : NULL;
262
263 paddr = __pa(cpu_addr);
264
265#if !DEBUG_NODIRECT
266
267 if (paddr + size + __direct_map_base - 1 <= max_dma
268 && paddr + size <= __direct_map_size) {
269 ret = paddr + __direct_map_base;
270
271 DBGA2("pci_map_single: [%p,%zx] -> direct %llx from %p\n",
272 cpu_addr, size, ret, __builtin_return_address(0));
273
274 return ret;
275 }
276#endif
277
278
279 if (dac_allowed) {
280 ret = paddr + alpha_mv.pci_dac_offset;
281
282 DBGA2("pci_map_single: [%p,%zx] -> DAC %llx from %p\n",
283 cpu_addr, size, ret, __builtin_return_address(0));
284
285 return ret;
286 }
287
288
289
290
291 if (! alpha_mv.mv_pci_tbi) {
292 printk_once(KERN_WARNING "pci_map_single: no HW sg\n");
293 return 0;
294 }
295
296 arena = hose->sg_pci;
297 if (!arena || arena->dma_base + arena->size - 1 > max_dma)
298 arena = hose->sg_isa;
299
300 npages = iommu_num_pages(paddr, size, PAGE_SIZE);
301
302
303 if (pdev && pdev == isa_bridge)
304 align = 8;
305 dma_ofs = iommu_arena_alloc(dev, arena, npages, align);
306 if (dma_ofs < 0) {
307 printk(KERN_WARNING "pci_map_single failed: "
308 "could not allocate dma page tables\n");
309 return 0;
310 }
311
312 paddr &= PAGE_MASK;
313 for (i = 0; i < npages; ++i, paddr += PAGE_SIZE)
314 arena->ptes[i + dma_ofs] = mk_iommu_pte(paddr);
315
316 ret = arena->dma_base + dma_ofs * PAGE_SIZE;
317 ret += (unsigned long)cpu_addr & ~PAGE_MASK;
318
319 DBGA2("pci_map_single: [%p,%zx] np %ld -> sg %llx from %p\n",
320 cpu_addr, size, npages, ret, __builtin_return_address(0));
321
322 return ret;
323}
324
325
326static struct pci_dev *alpha_gendev_to_pci(struct device *dev)
327{
328 if (dev && dev->bus == &pci_bus_type)
329 return to_pci_dev(dev);
330
331
332
333 BUG_ON(!isa_bridge);
334
335
336
337 if (!dev || !dev->dma_mask || !*dev->dma_mask)
338 return isa_bridge;
339
340
341
342 if (*dev->dma_mask >= isa_bridge->dma_mask)
343 return isa_bridge;
344
345
346 return NULL;
347}
348
349static dma_addr_t alpha_pci_map_page(struct device *dev, struct page *page,
350 unsigned long offset, size_t size,
351 enum dma_data_direction dir,
352 struct dma_attrs *attrs)
353{
354 struct pci_dev *pdev = alpha_gendev_to_pci(dev);
355 int dac_allowed;
356
357 if (dir == PCI_DMA_NONE)
358 BUG();
359
360 dac_allowed = pdev ? pci_dac_dma_supported(pdev, pdev->dma_mask) : 0;
361 return pci_map_single_1(pdev, (char *)page_address(page) + offset,
362 size, dac_allowed);
363}
364
365
366
367
368
369
370
371static void alpha_pci_unmap_page(struct device *dev, dma_addr_t dma_addr,
372 size_t size, enum dma_data_direction dir,
373 struct dma_attrs *attrs)
374{
375 unsigned long flags;
376 struct pci_dev *pdev = alpha_gendev_to_pci(dev);
377 struct pci_controller *hose = pdev ? pdev->sysdata : pci_isa_hose;
378 struct pci_iommu_arena *arena;
379 long dma_ofs, npages;
380
381 if (dir == PCI_DMA_NONE)
382 BUG();
383
384 if (dma_addr >= __direct_map_base
385 && dma_addr < __direct_map_base + __direct_map_size) {
386
387
388 DBGA2("pci_unmap_single: direct [%llx,%zx] from %p\n",
389 dma_addr, size, __builtin_return_address(0));
390
391 return;
392 }
393
394 if (dma_addr > 0xffffffff) {
395 DBGA2("pci64_unmap_single: DAC [%llx,%zx] from %p\n",
396 dma_addr, size, __builtin_return_address(0));
397 return;
398 }
399
400 arena = hose->sg_pci;
401 if (!arena || dma_addr < arena->dma_base)
402 arena = hose->sg_isa;
403
404 dma_ofs = (dma_addr - arena->dma_base) >> PAGE_SHIFT;
405 if (dma_ofs * PAGE_SIZE >= arena->size) {
406 printk(KERN_ERR "Bogus pci_unmap_single: dma_addr %llx "
407 " base %llx size %x\n",
408 dma_addr, arena->dma_base, arena->size);
409 return;
410 BUG();
411 }
412
413 npages = iommu_num_pages(dma_addr, size, PAGE_SIZE);
414
415 spin_lock_irqsave(&arena->lock, flags);
416
417 iommu_arena_free(arena, dma_ofs, npages);
418
419
420
421
422 if (dma_ofs >= arena->next_entry)
423 alpha_mv.mv_pci_tbi(hose, dma_addr, dma_addr + size - 1);
424
425 spin_unlock_irqrestore(&arena->lock, flags);
426
427 DBGA2("pci_unmap_single: sg [%llx,%zx] np %ld from %p\n",
428 dma_addr, size, npages, __builtin_return_address(0));
429}
430
431
432
433
434
435
436static void *alpha_pci_alloc_coherent(struct device *dev, size_t size,
437 dma_addr_t *dma_addrp, gfp_t gfp,
438 struct dma_attrs *attrs)
439{
440 struct pci_dev *pdev = alpha_gendev_to_pci(dev);
441 void *cpu_addr;
442 long order = get_order(size);
443
444 gfp &= ~GFP_DMA;
445
446try_again:
447 cpu_addr = (void *)__get_free_pages(gfp, order);
448 if (! cpu_addr) {
449 printk(KERN_INFO "pci_alloc_consistent: "
450 "get_free_pages failed from %p\n",
451 __builtin_return_address(0));
452
453
454 return NULL;
455 }
456 memset(cpu_addr, 0, size);
457
458 *dma_addrp = pci_map_single_1(pdev, cpu_addr, size, 0);
459 if (*dma_addrp == 0) {
460 free_pages((unsigned long)cpu_addr, order);
461 if (alpha_mv.mv_pci_tbi || (gfp & GFP_DMA))
462 return NULL;
463
464
465 gfp |= GFP_DMA;
466 goto try_again;
467 }
468
469 DBGA2("pci_alloc_consistent: %zx -> [%p,%llx] from %p\n",
470 size, cpu_addr, *dma_addrp, __builtin_return_address(0));
471
472 return cpu_addr;
473}
474
475
476
477
478
479
480
481static void alpha_pci_free_coherent(struct device *dev, size_t size,
482 void *cpu_addr, dma_addr_t dma_addr,
483 struct dma_attrs *attrs)
484{
485 struct pci_dev *pdev = alpha_gendev_to_pci(dev);
486 pci_unmap_single(pdev, dma_addr, size, PCI_DMA_BIDIRECTIONAL);
487 free_pages((unsigned long)cpu_addr, get_order(size));
488
489 DBGA2("pci_free_consistent: [%llx,%zx] from %p\n",
490 dma_addr, size, __builtin_return_address(0));
491}
492
493
494
495
496
497
498
499
500
501
502#define SG_ENT_VIRT_ADDRESS(SG) (sg_virt((SG)))
503#define SG_ENT_PHYS_ADDRESS(SG) __pa(SG_ENT_VIRT_ADDRESS(SG))
504
505static void
506sg_classify(struct device *dev, struct scatterlist *sg, struct scatterlist *end,
507 int virt_ok)
508{
509 unsigned long next_paddr;
510 struct scatterlist *leader;
511 long leader_flag, leader_length;
512 unsigned int max_seg_size;
513
514 leader = sg;
515 leader_flag = 0;
516 leader_length = leader->length;
517 next_paddr = SG_ENT_PHYS_ADDRESS(leader) + leader_length;
518
519
520 max_seg_size = dev ? dma_get_max_seg_size(dev) : 0;
521 for (++sg; sg < end; ++sg) {
522 unsigned long addr, len;
523 addr = SG_ENT_PHYS_ADDRESS(sg);
524 len = sg->length;
525
526 if (leader_length + len > max_seg_size)
527 goto new_segment;
528
529 if (next_paddr == addr) {
530 sg->dma_address = -1;
531 leader_length += len;
532 } else if (((next_paddr | addr) & ~PAGE_MASK) == 0 && virt_ok) {
533 sg->dma_address = -2;
534 leader_flag = 1;
535 leader_length += len;
536 } else {
537new_segment:
538 leader->dma_address = leader_flag;
539 leader->dma_length = leader_length;
540 leader = sg;
541 leader_flag = 0;
542 leader_length = len;
543 }
544
545 next_paddr = addr + len;
546 }
547
548 leader->dma_address = leader_flag;
549 leader->dma_length = leader_length;
550}
551
552
553
554
555static int
556sg_fill(struct device *dev, struct scatterlist *leader, struct scatterlist *end,
557 struct scatterlist *out, struct pci_iommu_arena *arena,
558 dma_addr_t max_dma, int dac_allowed)
559{
560 unsigned long paddr = SG_ENT_PHYS_ADDRESS(leader);
561 long size = leader->dma_length;
562 struct scatterlist *sg;
563 unsigned long *ptes;
564 long npages, dma_ofs, i;
565
566#if !DEBUG_NODIRECT
567
568
569 if (leader->dma_address == 0
570 && paddr + size + __direct_map_base - 1 <= max_dma
571 && paddr + size <= __direct_map_size) {
572 out->dma_address = paddr + __direct_map_base;
573 out->dma_length = size;
574
575 DBGA(" sg_fill: [%p,%lx] -> direct %llx\n",
576 __va(paddr), size, out->dma_address);
577
578 return 0;
579 }
580#endif
581
582
583 if (leader->dma_address == 0 && dac_allowed) {
584 out->dma_address = paddr + alpha_mv.pci_dac_offset;
585 out->dma_length = size;
586
587 DBGA(" sg_fill: [%p,%lx] -> DAC %llx\n",
588 __va(paddr), size, out->dma_address);
589
590 return 0;
591 }
592
593
594
595
596 paddr &= ~PAGE_MASK;
597 npages = iommu_num_pages(paddr, size, PAGE_SIZE);
598 dma_ofs = iommu_arena_alloc(dev, arena, npages, 0);
599 if (dma_ofs < 0) {
600
601 if (leader->dma_address == 0)
602 return -1;
603
604
605
606 sg_classify(dev, leader, end, 0);
607 return sg_fill(dev, leader, end, out, arena, max_dma, dac_allowed);
608 }
609
610 out->dma_address = arena->dma_base + dma_ofs*PAGE_SIZE + paddr;
611 out->dma_length = size;
612
613 DBGA(" sg_fill: [%p,%lx] -> sg %llx np %ld\n",
614 __va(paddr), size, out->dma_address, npages);
615
616
617
618 ptes = &arena->ptes[dma_ofs];
619 sg = leader;
620 do {
621#if DEBUG_ALLOC > 0
622 struct scatterlist *last_sg = sg;
623#endif
624
625 size = sg->length;
626 paddr = SG_ENT_PHYS_ADDRESS(sg);
627
628 while (sg+1 < end && (int) sg[1].dma_address == -1) {
629 size += sg[1].length;
630 sg++;
631 }
632
633 npages = iommu_num_pages(paddr, size, PAGE_SIZE);
634
635 paddr &= PAGE_MASK;
636 for (i = 0; i < npages; ++i, paddr += PAGE_SIZE)
637 *ptes++ = mk_iommu_pte(paddr);
638
639#if DEBUG_ALLOC > 0
640 DBGA(" (%ld) [%p,%x] np %ld\n",
641 last_sg - leader, SG_ENT_VIRT_ADDRESS(last_sg),
642 last_sg->length, npages);
643 while (++last_sg <= sg) {
644 DBGA(" (%ld) [%p,%x] cont\n",
645 last_sg - leader, SG_ENT_VIRT_ADDRESS(last_sg),
646 last_sg->length);
647 }
648#endif
649 } while (++sg < end && (int) sg->dma_address < 0);
650
651 return 1;
652}
653
654static int alpha_pci_map_sg(struct device *dev, struct scatterlist *sg,
655 int nents, enum dma_data_direction dir,
656 struct dma_attrs *attrs)
657{
658 struct pci_dev *pdev = alpha_gendev_to_pci(dev);
659 struct scatterlist *start, *end, *out;
660 struct pci_controller *hose;
661 struct pci_iommu_arena *arena;
662 dma_addr_t max_dma;
663 int dac_allowed;
664
665 if (dir == PCI_DMA_NONE)
666 BUG();
667
668 dac_allowed = dev ? pci_dac_dma_supported(pdev, pdev->dma_mask) : 0;
669
670
671 if (nents == 1) {
672 sg->dma_length = sg->length;
673 sg->dma_address
674 = pci_map_single_1(pdev, SG_ENT_VIRT_ADDRESS(sg),
675 sg->length, dac_allowed);
676 return sg->dma_address != 0;
677 }
678
679 start = sg;
680 end = sg + nents;
681
682
683 sg_classify(dev, sg, end, alpha_mv.mv_pci_tbi != 0);
684
685
686 if (alpha_mv.mv_pci_tbi) {
687 hose = pdev ? pdev->sysdata : pci_isa_hose;
688 max_dma = pdev ? pdev->dma_mask : ISA_DMA_MASK;
689 arena = hose->sg_pci;
690 if (!arena || arena->dma_base + arena->size - 1 > max_dma)
691 arena = hose->sg_isa;
692 } else {
693 max_dma = -1;
694 arena = NULL;
695 hose = NULL;
696 }
697
698
699
700 for (out = sg; sg < end; ++sg) {
701 if ((int) sg->dma_address < 0)
702 continue;
703 if (sg_fill(dev, sg, end, out, arena, max_dma, dac_allowed) < 0)
704 goto error;
705 out++;
706 }
707
708
709 if (out < end)
710 out->dma_length = 0;
711
712 if (out - start == 0)
713 printk(KERN_WARNING "pci_map_sg failed: no entries?\n");
714 DBGA("pci_map_sg: %ld entries\n", out - start);
715
716 return out - start;
717
718 error:
719 printk(KERN_WARNING "pci_map_sg failed: "
720 "could not allocate dma page tables\n");
721
722
723
724 if (out > start)
725 pci_unmap_sg(pdev, start, out - start, dir);
726 return 0;
727}
728
729
730
731
732
733static void alpha_pci_unmap_sg(struct device *dev, struct scatterlist *sg,
734 int nents, enum dma_data_direction dir,
735 struct dma_attrs *attrs)
736{
737 struct pci_dev *pdev = alpha_gendev_to_pci(dev);
738 unsigned long flags;
739 struct pci_controller *hose;
740 struct pci_iommu_arena *arena;
741 struct scatterlist *end;
742 dma_addr_t max_dma;
743 dma_addr_t fbeg, fend;
744
745 if (dir == PCI_DMA_NONE)
746 BUG();
747
748 if (! alpha_mv.mv_pci_tbi)
749 return;
750
751 hose = pdev ? pdev->sysdata : pci_isa_hose;
752 max_dma = pdev ? pdev->dma_mask : ISA_DMA_MASK;
753 arena = hose->sg_pci;
754 if (!arena || arena->dma_base + arena->size - 1 > max_dma)
755 arena = hose->sg_isa;
756
757 fbeg = -1, fend = 0;
758
759 spin_lock_irqsave(&arena->lock, flags);
760
761 for (end = sg + nents; sg < end; ++sg) {
762 dma_addr_t addr;
763 size_t size;
764 long npages, ofs;
765 dma_addr_t tend;
766
767 addr = sg->dma_address;
768 size = sg->dma_length;
769 if (!size)
770 break;
771
772 if (addr > 0xffffffff) {
773
774 DBGA(" (%ld) DAC [%llx,%zx]\n",
775 sg - end + nents, addr, size);
776 continue;
777 }
778
779 if (addr >= __direct_map_base
780 && addr < __direct_map_base + __direct_map_size) {
781
782 DBGA(" (%ld) direct [%llx,%zx]\n",
783 sg - end + nents, addr, size);
784 continue;
785 }
786
787 DBGA(" (%ld) sg [%llx,%zx]\n",
788 sg - end + nents, addr, size);
789
790 npages = iommu_num_pages(addr, size, PAGE_SIZE);
791 ofs = (addr - arena->dma_base) >> PAGE_SHIFT;
792 iommu_arena_free(arena, ofs, npages);
793
794 tend = addr + size - 1;
795 if (fbeg > addr) fbeg = addr;
796 if (fend < tend) fend = tend;
797 }
798
799
800
801
802 if ((fend - arena->dma_base) >> PAGE_SHIFT >= arena->next_entry)
803 alpha_mv.mv_pci_tbi(hose, fbeg, fend);
804
805 spin_unlock_irqrestore(&arena->lock, flags);
806
807 DBGA("pci_unmap_sg: %ld entries\n", nents - (end - sg));
808}
809
810
811
812
813static int alpha_pci_supported(struct device *dev, u64 mask)
814{
815 struct pci_dev *pdev = alpha_gendev_to_pci(dev);
816 struct pci_controller *hose;
817 struct pci_iommu_arena *arena;
818
819
820
821
822 if (__direct_map_size != 0
823 && (__direct_map_base + __direct_map_size - 1 <= mask ||
824 __direct_map_base + (max_low_pfn << PAGE_SHIFT) - 1 <= mask))
825 return 1;
826
827
828 hose = pdev ? pdev->sysdata : pci_isa_hose;
829 arena = hose->sg_isa;
830 if (arena && arena->dma_base + arena->size - 1 <= mask)
831 return 1;
832 arena = hose->sg_pci;
833 if (arena && arena->dma_base + arena->size - 1 <= mask)
834 return 1;
835
836
837 if (!__direct_map_base && MAX_DMA_ADDRESS - IDENT_ADDR - 1 <= mask)
838 return 1;
839
840 return 0;
841}
842
843
844
845
846
847int
848iommu_reserve(struct pci_iommu_arena *arena, long pg_count, long align_mask)
849{
850 unsigned long flags;
851 unsigned long *ptes;
852 long i, p;
853
854 if (!arena) return -EINVAL;
855
856 spin_lock_irqsave(&arena->lock, flags);
857
858
859 ptes = arena->ptes;
860 p = iommu_arena_find_pages(NULL, arena, pg_count, align_mask);
861 if (p < 0) {
862 spin_unlock_irqrestore(&arena->lock, flags);
863 return -1;
864 }
865
866
867
868
869 for (i = 0; i < pg_count; ++i)
870 ptes[p+i] = IOMMU_RESERVED_PTE;
871
872 arena->next_entry = p + pg_count;
873 spin_unlock_irqrestore(&arena->lock, flags);
874
875 return p;
876}
877
878int
879iommu_release(struct pci_iommu_arena *arena, long pg_start, long pg_count)
880{
881 unsigned long *ptes;
882 long i;
883
884 if (!arena) return -EINVAL;
885
886 ptes = arena->ptes;
887
888
889 for(i = pg_start; i < pg_start + pg_count; i++)
890 if (ptes[i] != IOMMU_RESERVED_PTE)
891 return -EBUSY;
892
893 iommu_arena_free(arena, pg_start, pg_count);
894 return 0;
895}
896
897int
898iommu_bind(struct pci_iommu_arena *arena, long pg_start, long pg_count,
899 struct page **pages)
900{
901 unsigned long flags;
902 unsigned long *ptes;
903 long i, j;
904
905 if (!arena) return -EINVAL;
906
907 spin_lock_irqsave(&arena->lock, flags);
908
909 ptes = arena->ptes;
910
911 for(j = pg_start; j < pg_start + pg_count; j++) {
912 if (ptes[j] != IOMMU_RESERVED_PTE) {
913 spin_unlock_irqrestore(&arena->lock, flags);
914 return -EBUSY;
915 }
916 }
917
918 for(i = 0, j = pg_start; i < pg_count; i++, j++)
919 ptes[j] = mk_iommu_pte(page_to_phys(pages[i]));
920
921 spin_unlock_irqrestore(&arena->lock, flags);
922
923 return 0;
924}
925
926int
927iommu_unbind(struct pci_iommu_arena *arena, long pg_start, long pg_count)
928{
929 unsigned long *p;
930 long i;
931
932 if (!arena) return -EINVAL;
933
934 p = arena->ptes + pg_start;
935 for(i = 0; i < pg_count; i++)
936 p[i] = IOMMU_RESERVED_PTE;
937
938 return 0;
939}
940
941static int alpha_pci_mapping_error(struct device *dev, dma_addr_t dma_addr)
942{
943 return dma_addr == 0;
944}
945
946static int alpha_pci_set_mask(struct device *dev, u64 mask)
947{
948 if (!dev->dma_mask ||
949 !pci_dma_supported(alpha_gendev_to_pci(dev), mask))
950 return -EIO;
951
952 *dev->dma_mask = mask;
953 return 0;
954}
955
956struct dma_map_ops alpha_pci_ops = {
957 .alloc = alpha_pci_alloc_coherent,
958 .free = alpha_pci_free_coherent,
959 .map_page = alpha_pci_map_page,
960 .unmap_page = alpha_pci_unmap_page,
961 .map_sg = alpha_pci_map_sg,
962 .unmap_sg = alpha_pci_unmap_sg,
963 .mapping_error = alpha_pci_mapping_error,
964 .dma_supported = alpha_pci_supported,
965 .set_dma_mask = alpha_pci_set_mask,
966};
967
968struct dma_map_ops *dma_ops = &alpha_pci_ops;
969EXPORT_SYMBOL(dma_ops);
970