1
2
3
4
5#include <linux/kernel.h>
6#include <linux/mm.h>
7#include <linux/pci.h>
8#include <linux/gfp.h>
9#include <linux/bootmem.h>
10#include <linux/export.h>
11#include <linux/scatterlist.h>
12#include <linux/log2.h>
13#include <linux/dma-mapping.h>
14#include <linux/iommu-helper.h>
15
16#include <asm/io.h>
17#include <asm/hwrpb.h>
18
19#include "proto.h"
20#include "pci_impl.h"
21
22
23#define DEBUG_ALLOC 0
24#if DEBUG_ALLOC > 0
25# define DBGA(args...) printk(KERN_DEBUG args)
26#else
27# define DBGA(args...)
28#endif
29#if DEBUG_ALLOC > 1
30# define DBGA2(args...) printk(KERN_DEBUG args)
31#else
32# define DBGA2(args...)
33#endif
34
35#define DEBUG_NODIRECT 0
36
37#define ISA_DMA_MASK 0x00ffffff
38
39static inline unsigned long
40mk_iommu_pte(unsigned long paddr)
41{
42 return (paddr >> (PAGE_SHIFT-1)) | 1;
43}
44
45
46
47
48unsigned long
49size_for_memory(unsigned long max)
50{
51 unsigned long mem = max_low_pfn << PAGE_SHIFT;
52 if (mem < max)
53 max = roundup_pow_of_two(mem);
54 return max;
55}
56
57struct pci_iommu_arena * __init
58iommu_arena_new_node(int nid, struct pci_controller *hose, dma_addr_t base,
59 unsigned long window_size, unsigned long align)
60{
61 unsigned long mem_size;
62 struct pci_iommu_arena *arena;
63
64 mem_size = window_size / (PAGE_SIZE / sizeof(unsigned long));
65
66
67
68
69
70 if (align < mem_size)
71 align = mem_size;
72
73
74#ifdef CONFIG_DISCONTIGMEM
75
76 arena = alloc_bootmem_node(NODE_DATA(nid), sizeof(*arena));
77 if (!NODE_DATA(nid) || !arena) {
78 printk("%s: couldn't allocate arena from node %d\n"
79 " falling back to system-wide allocation\n",
80 __func__, nid);
81 arena = alloc_bootmem(sizeof(*arena));
82 }
83
84 arena->ptes = __alloc_bootmem_node(NODE_DATA(nid), mem_size, align, 0);
85 if (!NODE_DATA(nid) || !arena->ptes) {
86 printk("%s: couldn't allocate arena ptes from node %d\n"
87 " falling back to system-wide allocation\n",
88 __func__, nid);
89 arena->ptes = __alloc_bootmem(mem_size, align, 0);
90 }
91
92#else
93
94 arena = alloc_bootmem(sizeof(*arena));
95 arena->ptes = __alloc_bootmem(mem_size, align, 0);
96
97#endif
98
99 spin_lock_init(&arena->lock);
100 arena->hose = hose;
101 arena->dma_base = base;
102 arena->size = window_size;
103 arena->next_entry = 0;
104
105
106
107 arena->align_entry = 1;
108
109 return arena;
110}
111
112struct pci_iommu_arena * __init
113iommu_arena_new(struct pci_controller *hose, dma_addr_t base,
114 unsigned long window_size, unsigned long align)
115{
116 return iommu_arena_new_node(0, hose, base, window_size, align);
117}
118
119
120static long
121iommu_arena_find_pages(struct device *dev, struct pci_iommu_arena *arena,
122 long n, long mask)
123{
124 unsigned long *ptes;
125 long i, p, nent;
126 int pass = 0;
127 unsigned long base;
128 unsigned long boundary_size;
129
130 base = arena->dma_base >> PAGE_SHIFT;
131 if (dev) {
132 boundary_size = dma_get_seg_boundary(dev) + 1;
133 boundary_size >>= PAGE_SHIFT;
134 } else {
135 boundary_size = 1UL << (32 - PAGE_SHIFT);
136 }
137
138
139 ptes = arena->ptes;
140 nent = arena->size >> PAGE_SHIFT;
141 p = ALIGN(arena->next_entry, mask + 1);
142 i = 0;
143
144again:
145 while (i < n && p+i < nent) {
146 if (!i && iommu_is_span_boundary(p, n, base, boundary_size)) {
147 p = ALIGN(p + 1, mask + 1);
148 goto again;
149 }
150
151 if (ptes[p+i])
152 p = ALIGN(p + i + 1, mask + 1), i = 0;
153 else
154 i = i + 1;
155 }
156
157 if (i < n) {
158 if (pass < 1) {
159
160
161
162
163 alpha_mv.mv_pci_tbi(arena->hose, 0, -1);
164
165 pass++;
166 p = 0;
167 i = 0;
168 goto again;
169 } else
170 return -1;
171 }
172
173
174
175 return p;
176}
177
178static long
179iommu_arena_alloc(struct device *dev, struct pci_iommu_arena *arena, long n,
180 unsigned int align)
181{
182 unsigned long flags;
183 unsigned long *ptes;
184 long i, p, mask;
185
186 spin_lock_irqsave(&arena->lock, flags);
187
188
189 ptes = arena->ptes;
190 mask = max(align, arena->align_entry) - 1;
191 p = iommu_arena_find_pages(dev, arena, n, mask);
192 if (p < 0) {
193 spin_unlock_irqrestore(&arena->lock, flags);
194 return -1;
195 }
196
197
198
199
200
201 for (i = 0; i < n; ++i)
202 ptes[p+i] = IOMMU_INVALID_PTE;
203
204 arena->next_entry = p + n;
205 spin_unlock_irqrestore(&arena->lock, flags);
206
207 return p;
208}
209
210static void
211iommu_arena_free(struct pci_iommu_arena *arena, long ofs, long n)
212{
213 unsigned long *p;
214 long i;
215
216 p = arena->ptes + ofs;
217 for (i = 0; i < n; ++i)
218 p[i] = 0;
219}
220
221
222
223
224
225static int pci_dac_dma_supported(struct pci_dev *dev, u64 mask)
226{
227 dma_addr_t dac_offset = alpha_mv.pci_dac_offset;
228 int ok = 1;
229
230
231 if (dac_offset == 0)
232 ok = 0;
233
234
235 if ((dac_offset & dev->dma_mask) != dac_offset)
236 ok = 0;
237
238
239 DBGA("pci_dac_dma_supported %s from %pf\n",
240 ok ? "yes" : "no", __builtin_return_address(0));
241
242 return ok;
243}
244
245
246
247
248
249
250static dma_addr_t
251pci_map_single_1(struct pci_dev *pdev, void *cpu_addr, size_t size,
252 int dac_allowed)
253{
254 struct pci_controller *hose = pdev ? pdev->sysdata : pci_isa_hose;
255 dma_addr_t max_dma = pdev ? pdev->dma_mask : ISA_DMA_MASK;
256 struct pci_iommu_arena *arena;
257 long npages, dma_ofs, i;
258 unsigned long paddr;
259 dma_addr_t ret;
260 unsigned int align = 0;
261 struct device *dev = pdev ? &pdev->dev : NULL;
262
263 paddr = __pa(cpu_addr);
264
265#if !DEBUG_NODIRECT
266
267 if (paddr + size + __direct_map_base - 1 <= max_dma
268 && paddr + size <= __direct_map_size) {
269 ret = paddr + __direct_map_base;
270
271 DBGA2("pci_map_single: [%p,%zx] -> direct %llx from %pf\n",
272 cpu_addr, size, ret, __builtin_return_address(0));
273
274 return ret;
275 }
276#endif
277
278
279 if (dac_allowed) {
280 ret = paddr + alpha_mv.pci_dac_offset;
281
282 DBGA2("pci_map_single: [%p,%zx] -> DAC %llx from %pf\n",
283 cpu_addr, size, ret, __builtin_return_address(0));
284
285 return ret;
286 }
287
288
289
290
291 if (! alpha_mv.mv_pci_tbi) {
292 printk_once(KERN_WARNING "pci_map_single: no HW sg\n");
293 return 0;
294 }
295
296 arena = hose->sg_pci;
297 if (!arena || arena->dma_base + arena->size - 1 > max_dma)
298 arena = hose->sg_isa;
299
300 npages = iommu_num_pages(paddr, size, PAGE_SIZE);
301
302
303 if (pdev && pdev == isa_bridge)
304 align = 8;
305 dma_ofs = iommu_arena_alloc(dev, arena, npages, align);
306 if (dma_ofs < 0) {
307 printk(KERN_WARNING "pci_map_single failed: "
308 "could not allocate dma page tables\n");
309 return 0;
310 }
311
312 paddr &= PAGE_MASK;
313 for (i = 0; i < npages; ++i, paddr += PAGE_SIZE)
314 arena->ptes[i + dma_ofs] = mk_iommu_pte(paddr);
315
316 ret = arena->dma_base + dma_ofs * PAGE_SIZE;
317 ret += (unsigned long)cpu_addr & ~PAGE_MASK;
318
319 DBGA2("pci_map_single: [%p,%zx] np %ld -> sg %llx from %pf\n",
320 cpu_addr, size, npages, ret, __builtin_return_address(0));
321
322 return ret;
323}
324
325
326static struct pci_dev *alpha_gendev_to_pci(struct device *dev)
327{
328 if (dev && dev->bus == &pci_bus_type)
329 return to_pci_dev(dev);
330
331
332
333 BUG_ON(!isa_bridge);
334
335
336
337 if (!dev || !dev->dma_mask || !*dev->dma_mask)
338 return isa_bridge;
339
340
341
342 if (*dev->dma_mask >= isa_bridge->dma_mask)
343 return isa_bridge;
344
345
346 return NULL;
347}
348
349static dma_addr_t alpha_pci_map_page(struct device *dev, struct page *page,
350 unsigned long offset, size_t size,
351 enum dma_data_direction dir,
352 struct dma_attrs *attrs)
353{
354 struct pci_dev *pdev = alpha_gendev_to_pci(dev);
355 int dac_allowed;
356
357 BUG_ON(dir == PCI_DMA_NONE);
358
359 dac_allowed = pdev ? pci_dac_dma_supported(pdev, pdev->dma_mask) : 0;
360 return pci_map_single_1(pdev, (char *)page_address(page) + offset,
361 size, dac_allowed);
362}
363
364
365
366
367
368
369
370static void alpha_pci_unmap_page(struct device *dev, dma_addr_t dma_addr,
371 size_t size, enum dma_data_direction dir,
372 struct dma_attrs *attrs)
373{
374 unsigned long flags;
375 struct pci_dev *pdev = alpha_gendev_to_pci(dev);
376 struct pci_controller *hose = pdev ? pdev->sysdata : pci_isa_hose;
377 struct pci_iommu_arena *arena;
378 long dma_ofs, npages;
379
380 BUG_ON(dir == PCI_DMA_NONE);
381
382 if (dma_addr >= __direct_map_base
383 && dma_addr < __direct_map_base + __direct_map_size) {
384
385
386 DBGA2("pci_unmap_single: direct [%llx,%zx] from %pf\n",
387 dma_addr, size, __builtin_return_address(0));
388
389 return;
390 }
391
392 if (dma_addr > 0xffffffff) {
393 DBGA2("pci64_unmap_single: DAC [%llx,%zx] from %pf\n",
394 dma_addr, size, __builtin_return_address(0));
395 return;
396 }
397
398 arena = hose->sg_pci;
399 if (!arena || dma_addr < arena->dma_base)
400 arena = hose->sg_isa;
401
402 dma_ofs = (dma_addr - arena->dma_base) >> PAGE_SHIFT;
403 if (dma_ofs * PAGE_SIZE >= arena->size) {
404 printk(KERN_ERR "Bogus pci_unmap_single: dma_addr %llx "
405 " base %llx size %x\n",
406 dma_addr, arena->dma_base, arena->size);
407 return;
408 BUG();
409 }
410
411 npages = iommu_num_pages(dma_addr, size, PAGE_SIZE);
412
413 spin_lock_irqsave(&arena->lock, flags);
414
415 iommu_arena_free(arena, dma_ofs, npages);
416
417
418
419
420 if (dma_ofs >= arena->next_entry)
421 alpha_mv.mv_pci_tbi(hose, dma_addr, dma_addr + size - 1);
422
423 spin_unlock_irqrestore(&arena->lock, flags);
424
425 DBGA2("pci_unmap_single: sg [%llx,%zx] np %ld from %pf\n",
426 dma_addr, size, npages, __builtin_return_address(0));
427}
428
429
430
431
432
433
434static void *alpha_pci_alloc_coherent(struct device *dev, size_t size,
435 dma_addr_t *dma_addrp, gfp_t gfp,
436 struct dma_attrs *attrs)
437{
438 struct pci_dev *pdev = alpha_gendev_to_pci(dev);
439 void *cpu_addr;
440 long order = get_order(size);
441
442 gfp &= ~GFP_DMA;
443
444try_again:
445 cpu_addr = (void *)__get_free_pages(gfp, order);
446 if (! cpu_addr) {
447 printk(KERN_INFO "pci_alloc_consistent: "
448 "get_free_pages failed from %pf\n",
449 __builtin_return_address(0));
450
451
452 return NULL;
453 }
454 memset(cpu_addr, 0, size);
455
456 *dma_addrp = pci_map_single_1(pdev, cpu_addr, size, 0);
457 if (*dma_addrp == 0) {
458 free_pages((unsigned long)cpu_addr, order);
459 if (alpha_mv.mv_pci_tbi || (gfp & GFP_DMA))
460 return NULL;
461
462
463 gfp |= GFP_DMA;
464 goto try_again;
465 }
466
467 DBGA2("pci_alloc_consistent: %zx -> [%p,%llx] from %pf\n",
468 size, cpu_addr, *dma_addrp, __builtin_return_address(0));
469
470 return cpu_addr;
471}
472
473
474
475
476
477
478
479static void alpha_pci_free_coherent(struct device *dev, size_t size,
480 void *cpu_addr, dma_addr_t dma_addr,
481 struct dma_attrs *attrs)
482{
483 struct pci_dev *pdev = alpha_gendev_to_pci(dev);
484 pci_unmap_single(pdev, dma_addr, size, PCI_DMA_BIDIRECTIONAL);
485 free_pages((unsigned long)cpu_addr, get_order(size));
486
487 DBGA2("pci_free_consistent: [%llx,%zx] from %pf\n",
488 dma_addr, size, __builtin_return_address(0));
489}
490
491
492
493
494
495
496
497
498
499
500#define SG_ENT_VIRT_ADDRESS(SG) (sg_virt((SG)))
501#define SG_ENT_PHYS_ADDRESS(SG) __pa(SG_ENT_VIRT_ADDRESS(SG))
502
503static void
504sg_classify(struct device *dev, struct scatterlist *sg, struct scatterlist *end,
505 int virt_ok)
506{
507 unsigned long next_paddr;
508 struct scatterlist *leader;
509 long leader_flag, leader_length;
510 unsigned int max_seg_size;
511
512 leader = sg;
513 leader_flag = 0;
514 leader_length = leader->length;
515 next_paddr = SG_ENT_PHYS_ADDRESS(leader) + leader_length;
516
517
518 max_seg_size = dev ? dma_get_max_seg_size(dev) : 0;
519 for (++sg; sg < end; ++sg) {
520 unsigned long addr, len;
521 addr = SG_ENT_PHYS_ADDRESS(sg);
522 len = sg->length;
523
524 if (leader_length + len > max_seg_size)
525 goto new_segment;
526
527 if (next_paddr == addr) {
528 sg->dma_address = -1;
529 leader_length += len;
530 } else if (((next_paddr | addr) & ~PAGE_MASK) == 0 && virt_ok) {
531 sg->dma_address = -2;
532 leader_flag = 1;
533 leader_length += len;
534 } else {
535new_segment:
536 leader->dma_address = leader_flag;
537 leader->dma_length = leader_length;
538 leader = sg;
539 leader_flag = 0;
540 leader_length = len;
541 }
542
543 next_paddr = addr + len;
544 }
545
546 leader->dma_address = leader_flag;
547 leader->dma_length = leader_length;
548}
549
550
551
552
553static int
554sg_fill(struct device *dev, struct scatterlist *leader, struct scatterlist *end,
555 struct scatterlist *out, struct pci_iommu_arena *arena,
556 dma_addr_t max_dma, int dac_allowed)
557{
558 unsigned long paddr = SG_ENT_PHYS_ADDRESS(leader);
559 long size = leader->dma_length;
560 struct scatterlist *sg;
561 unsigned long *ptes;
562 long npages, dma_ofs, i;
563
564#if !DEBUG_NODIRECT
565
566
567 if (leader->dma_address == 0
568 && paddr + size + __direct_map_base - 1 <= max_dma
569 && paddr + size <= __direct_map_size) {
570 out->dma_address = paddr + __direct_map_base;
571 out->dma_length = size;
572
573 DBGA(" sg_fill: [%p,%lx] -> direct %llx\n",
574 __va(paddr), size, out->dma_address);
575
576 return 0;
577 }
578#endif
579
580
581 if (leader->dma_address == 0 && dac_allowed) {
582 out->dma_address = paddr + alpha_mv.pci_dac_offset;
583 out->dma_length = size;
584
585 DBGA(" sg_fill: [%p,%lx] -> DAC %llx\n",
586 __va(paddr), size, out->dma_address);
587
588 return 0;
589 }
590
591
592
593
594 paddr &= ~PAGE_MASK;
595 npages = iommu_num_pages(paddr, size, PAGE_SIZE);
596 dma_ofs = iommu_arena_alloc(dev, arena, npages, 0);
597 if (dma_ofs < 0) {
598
599 if (leader->dma_address == 0)
600 return -1;
601
602
603
604 sg_classify(dev, leader, end, 0);
605 return sg_fill(dev, leader, end, out, arena, max_dma, dac_allowed);
606 }
607
608 out->dma_address = arena->dma_base + dma_ofs*PAGE_SIZE + paddr;
609 out->dma_length = size;
610
611 DBGA(" sg_fill: [%p,%lx] -> sg %llx np %ld\n",
612 __va(paddr), size, out->dma_address, npages);
613
614
615
616 ptes = &arena->ptes[dma_ofs];
617 sg = leader;
618 do {
619#if DEBUG_ALLOC > 0
620 struct scatterlist *last_sg = sg;
621#endif
622
623 size = sg->length;
624 paddr = SG_ENT_PHYS_ADDRESS(sg);
625
626 while (sg+1 < end && (int) sg[1].dma_address == -1) {
627 size += sg[1].length;
628 sg++;
629 }
630
631 npages = iommu_num_pages(paddr, size, PAGE_SIZE);
632
633 paddr &= PAGE_MASK;
634 for (i = 0; i < npages; ++i, paddr += PAGE_SIZE)
635 *ptes++ = mk_iommu_pte(paddr);
636
637#if DEBUG_ALLOC > 0
638 DBGA(" (%ld) [%p,%x] np %ld\n",
639 last_sg - leader, SG_ENT_VIRT_ADDRESS(last_sg),
640 last_sg->length, npages);
641 while (++last_sg <= sg) {
642 DBGA(" (%ld) [%p,%x] cont\n",
643 last_sg - leader, SG_ENT_VIRT_ADDRESS(last_sg),
644 last_sg->length);
645 }
646#endif
647 } while (++sg < end && (int) sg->dma_address < 0);
648
649 return 1;
650}
651
652static int alpha_pci_map_sg(struct device *dev, struct scatterlist *sg,
653 int nents, enum dma_data_direction dir,
654 struct dma_attrs *attrs)
655{
656 struct pci_dev *pdev = alpha_gendev_to_pci(dev);
657 struct scatterlist *start, *end, *out;
658 struct pci_controller *hose;
659 struct pci_iommu_arena *arena;
660 dma_addr_t max_dma;
661 int dac_allowed;
662
663 BUG_ON(dir == PCI_DMA_NONE);
664
665 dac_allowed = dev ? pci_dac_dma_supported(pdev, pdev->dma_mask) : 0;
666
667
668 if (nents == 1) {
669 sg->dma_length = sg->length;
670 sg->dma_address
671 = pci_map_single_1(pdev, SG_ENT_VIRT_ADDRESS(sg),
672 sg->length, dac_allowed);
673 return sg->dma_address != 0;
674 }
675
676 start = sg;
677 end = sg + nents;
678
679
680 sg_classify(dev, sg, end, alpha_mv.mv_pci_tbi != 0);
681
682
683 if (alpha_mv.mv_pci_tbi) {
684 hose = pdev ? pdev->sysdata : pci_isa_hose;
685 max_dma = pdev ? pdev->dma_mask : ISA_DMA_MASK;
686 arena = hose->sg_pci;
687 if (!arena || arena->dma_base + arena->size - 1 > max_dma)
688 arena = hose->sg_isa;
689 } else {
690 max_dma = -1;
691 arena = NULL;
692 hose = NULL;
693 }
694
695
696
697 for (out = sg; sg < end; ++sg) {
698 if ((int) sg->dma_address < 0)
699 continue;
700 if (sg_fill(dev, sg, end, out, arena, max_dma, dac_allowed) < 0)
701 goto error;
702 out++;
703 }
704
705
706 if (out < end)
707 out->dma_length = 0;
708
709 if (out - start == 0)
710 printk(KERN_WARNING "pci_map_sg failed: no entries?\n");
711 DBGA("pci_map_sg: %ld entries\n", out - start);
712
713 return out - start;
714
715 error:
716 printk(KERN_WARNING "pci_map_sg failed: "
717 "could not allocate dma page tables\n");
718
719
720
721 if (out > start)
722 pci_unmap_sg(pdev, start, out - start, dir);
723 return 0;
724}
725
726
727
728
729
730static void alpha_pci_unmap_sg(struct device *dev, struct scatterlist *sg,
731 int nents, enum dma_data_direction dir,
732 struct dma_attrs *attrs)
733{
734 struct pci_dev *pdev = alpha_gendev_to_pci(dev);
735 unsigned long flags;
736 struct pci_controller *hose;
737 struct pci_iommu_arena *arena;
738 struct scatterlist *end;
739 dma_addr_t max_dma;
740 dma_addr_t fbeg, fend;
741
742 BUG_ON(dir == PCI_DMA_NONE);
743
744 if (! alpha_mv.mv_pci_tbi)
745 return;
746
747 hose = pdev ? pdev->sysdata : pci_isa_hose;
748 max_dma = pdev ? pdev->dma_mask : ISA_DMA_MASK;
749 arena = hose->sg_pci;
750 if (!arena || arena->dma_base + arena->size - 1 > max_dma)
751 arena = hose->sg_isa;
752
753 fbeg = -1, fend = 0;
754
755 spin_lock_irqsave(&arena->lock, flags);
756
757 for (end = sg + nents; sg < end; ++sg) {
758 dma_addr_t addr;
759 size_t size;
760 long npages, ofs;
761 dma_addr_t tend;
762
763 addr = sg->dma_address;
764 size = sg->dma_length;
765 if (!size)
766 break;
767
768 if (addr > 0xffffffff) {
769
770 DBGA(" (%ld) DAC [%llx,%zx]\n",
771 sg - end + nents, addr, size);
772 continue;
773 }
774
775 if (addr >= __direct_map_base
776 && addr < __direct_map_base + __direct_map_size) {
777
778 DBGA(" (%ld) direct [%llx,%zx]\n",
779 sg - end + nents, addr, size);
780 continue;
781 }
782
783 DBGA(" (%ld) sg [%llx,%zx]\n",
784 sg - end + nents, addr, size);
785
786 npages = iommu_num_pages(addr, size, PAGE_SIZE);
787 ofs = (addr - arena->dma_base) >> PAGE_SHIFT;
788 iommu_arena_free(arena, ofs, npages);
789
790 tend = addr + size - 1;
791 if (fbeg > addr) fbeg = addr;
792 if (fend < tend) fend = tend;
793 }
794
795
796
797
798 if ((fend - arena->dma_base) >> PAGE_SHIFT >= arena->next_entry)
799 alpha_mv.mv_pci_tbi(hose, fbeg, fend);
800
801 spin_unlock_irqrestore(&arena->lock, flags);
802
803 DBGA("pci_unmap_sg: %ld entries\n", nents - (end - sg));
804}
805
806
807
808
809static int alpha_pci_supported(struct device *dev, u64 mask)
810{
811 struct pci_dev *pdev = alpha_gendev_to_pci(dev);
812 struct pci_controller *hose;
813 struct pci_iommu_arena *arena;
814
815
816
817
818 if (__direct_map_size != 0
819 && (__direct_map_base + __direct_map_size - 1 <= mask ||
820 __direct_map_base + (max_low_pfn << PAGE_SHIFT) - 1 <= mask))
821 return 1;
822
823
824 hose = pdev ? pdev->sysdata : pci_isa_hose;
825 arena = hose->sg_isa;
826 if (arena && arena->dma_base + arena->size - 1 <= mask)
827 return 1;
828 arena = hose->sg_pci;
829 if (arena && arena->dma_base + arena->size - 1 <= mask)
830 return 1;
831
832
833 if (!__direct_map_base && MAX_DMA_ADDRESS - IDENT_ADDR - 1 <= mask)
834 return 1;
835
836 return 0;
837}
838
839
840
841
842
843int
844iommu_reserve(struct pci_iommu_arena *arena, long pg_count, long align_mask)
845{
846 unsigned long flags;
847 unsigned long *ptes;
848 long i, p;
849
850 if (!arena) return -EINVAL;
851
852 spin_lock_irqsave(&arena->lock, flags);
853
854
855 ptes = arena->ptes;
856 p = iommu_arena_find_pages(NULL, arena, pg_count, align_mask);
857 if (p < 0) {
858 spin_unlock_irqrestore(&arena->lock, flags);
859 return -1;
860 }
861
862
863
864
865 for (i = 0; i < pg_count; ++i)
866 ptes[p+i] = IOMMU_RESERVED_PTE;
867
868 arena->next_entry = p + pg_count;
869 spin_unlock_irqrestore(&arena->lock, flags);
870
871 return p;
872}
873
874int
875iommu_release(struct pci_iommu_arena *arena, long pg_start, long pg_count)
876{
877 unsigned long *ptes;
878 long i;
879
880 if (!arena) return -EINVAL;
881
882 ptes = arena->ptes;
883
884
885 for(i = pg_start; i < pg_start + pg_count; i++)
886 if (ptes[i] != IOMMU_RESERVED_PTE)
887 return -EBUSY;
888
889 iommu_arena_free(arena, pg_start, pg_count);
890 return 0;
891}
892
893int
894iommu_bind(struct pci_iommu_arena *arena, long pg_start, long pg_count,
895 struct page **pages)
896{
897 unsigned long flags;
898 unsigned long *ptes;
899 long i, j;
900
901 if (!arena) return -EINVAL;
902
903 spin_lock_irqsave(&arena->lock, flags);
904
905 ptes = arena->ptes;
906
907 for(j = pg_start; j < pg_start + pg_count; j++) {
908 if (ptes[j] != IOMMU_RESERVED_PTE) {
909 spin_unlock_irqrestore(&arena->lock, flags);
910 return -EBUSY;
911 }
912 }
913
914 for(i = 0, j = pg_start; i < pg_count; i++, j++)
915 ptes[j] = mk_iommu_pte(page_to_phys(pages[i]));
916
917 spin_unlock_irqrestore(&arena->lock, flags);
918
919 return 0;
920}
921
922int
923iommu_unbind(struct pci_iommu_arena *arena, long pg_start, long pg_count)
924{
925 unsigned long *p;
926 long i;
927
928 if (!arena) return -EINVAL;
929
930 p = arena->ptes + pg_start;
931 for(i = 0; i < pg_count; i++)
932 p[i] = IOMMU_RESERVED_PTE;
933
934 return 0;
935}
936
937static int alpha_pci_mapping_error(struct device *dev, dma_addr_t dma_addr)
938{
939 return dma_addr == 0;
940}
941
942static int alpha_pci_set_mask(struct device *dev, u64 mask)
943{
944 if (!dev->dma_mask ||
945 !pci_dma_supported(alpha_gendev_to_pci(dev), mask))
946 return -EIO;
947
948 *dev->dma_mask = mask;
949 return 0;
950}
951
952struct dma_map_ops alpha_pci_ops = {
953 .alloc = alpha_pci_alloc_coherent,
954 .free = alpha_pci_free_coherent,
955 .map_page = alpha_pci_map_page,
956 .unmap_page = alpha_pci_unmap_page,
957 .map_sg = alpha_pci_map_sg,
958 .unmap_sg = alpha_pci_unmap_sg,
959 .mapping_error = alpha_pci_mapping_error,
960 .dma_supported = alpha_pci_supported,
961 .set_dma_mask = alpha_pci_set_mask,
962};
963
964struct dma_map_ops *dma_ops = &alpha_pci_ops;
965EXPORT_SYMBOL(dma_ops);
966