1
2
3
4
5#include <linux/kernel.h>
6#include <linux/mm.h>
7#include <linux/pci.h>
8#include <linux/slab.h>
9#include <linux/bootmem.h>
10#include <linux/scatterlist.h>
11#include <linux/log2.h>
12
13#include <asm/io.h>
14#include <asm/hwrpb.h>
15
16#include "proto.h"
17#include "pci_impl.h"
18
19
20#define DEBUG_ALLOC 0
21#if DEBUG_ALLOC > 0
22# define DBGA(args...) printk(KERN_DEBUG args)
23#else
24# define DBGA(args...)
25#endif
26#if DEBUG_ALLOC > 1
27# define DBGA2(args...) printk(KERN_DEBUG args)
28#else
29# define DBGA2(args...)
30#endif
31
32#define DEBUG_NODIRECT 0
33#define DEBUG_FORCEDAC 0
34
35#define ISA_DMA_MASK 0x00ffffff
36
37static inline unsigned long
38mk_iommu_pte(unsigned long paddr)
39{
40 return (paddr >> (PAGE_SHIFT-1)) | 1;
41}
42
43static inline long
44calc_npages(long bytes)
45{
46 return (bytes + PAGE_SIZE - 1) >> PAGE_SHIFT;
47}
48
49
50
51
52
53unsigned long
54size_for_memory(unsigned long max)
55{
56 unsigned long mem = max_low_pfn << PAGE_SHIFT;
57 if (mem < max)
58 max = roundup_pow_of_two(mem);
59 return max;
60}
61
62struct pci_iommu_arena * __init
63iommu_arena_new_node(int nid, struct pci_controller *hose, dma_addr_t base,
64 unsigned long window_size, unsigned long align)
65{
66 unsigned long mem_size;
67 struct pci_iommu_arena *arena;
68
69 mem_size = window_size / (PAGE_SIZE / sizeof(unsigned long));
70
71
72
73
74
75 if (align < mem_size)
76 align = mem_size;
77
78
79#ifdef CONFIG_DISCONTIGMEM
80
81 if (!NODE_DATA(nid) ||
82 (NULL == (arena = alloc_bootmem_node(NODE_DATA(nid),
83 sizeof(*arena))))) {
84 printk("%s: couldn't allocate arena from node %d\n"
85 " falling back to system-wide allocation\n",
86 __FUNCTION__, nid);
87 arena = alloc_bootmem(sizeof(*arena));
88 }
89
90 if (!NODE_DATA(nid) ||
91 (NULL == (arena->ptes = __alloc_bootmem_node(NODE_DATA(nid),
92 mem_size,
93 align,
94 0)))) {
95 printk("%s: couldn't allocate arena ptes from node %d\n"
96 " falling back to system-wide allocation\n",
97 __FUNCTION__, nid);
98 arena->ptes = __alloc_bootmem(mem_size, align, 0);
99 }
100
101#else
102
103 arena = alloc_bootmem(sizeof(*arena));
104 arena->ptes = __alloc_bootmem(mem_size, align, 0);
105
106#endif
107
108 spin_lock_init(&arena->lock);
109 arena->hose = hose;
110 arena->dma_base = base;
111 arena->size = window_size;
112 arena->next_entry = 0;
113
114
115
116 arena->align_entry = 1;
117
118 return arena;
119}
120
121struct pci_iommu_arena * __init
122iommu_arena_new(struct pci_controller *hose, dma_addr_t base,
123 unsigned long window_size, unsigned long align)
124{
125 return iommu_arena_new_node(0, hose, base, window_size, align);
126}
127
128
129static long
130iommu_arena_find_pages(struct pci_iommu_arena *arena, long n, long mask)
131{
132 unsigned long *ptes;
133 long i, p, nent;
134
135
136 ptes = arena->ptes;
137 nent = arena->size >> PAGE_SHIFT;
138 p = (arena->next_entry + mask) & ~mask;
139 i = 0;
140 while (i < n && p+i < nent) {
141 if (ptes[p+i])
142 p = (p + i + 1 + mask) & ~mask, i = 0;
143 else
144 i = i + 1;
145 }
146
147 if (i < n) {
148
149
150 alpha_mv.mv_pci_tbi(arena->hose, 0, -1);
151
152 p = 0, i = 0;
153 while (i < n && p+i < nent) {
154 if (ptes[p+i])
155 p = (p + i + 1 + mask) & ~mask, i = 0;
156 else
157 i = i + 1;
158 }
159
160 if (i < n)
161 return -1;
162 }
163
164
165
166 return p;
167}
168
169static long
170iommu_arena_alloc(struct pci_iommu_arena *arena, long n, unsigned int align)
171{
172 unsigned long flags;
173 unsigned long *ptes;
174 long i, p, mask;
175
176 spin_lock_irqsave(&arena->lock, flags);
177
178
179 ptes = arena->ptes;
180 mask = max(align, arena->align_entry) - 1;
181 p = iommu_arena_find_pages(arena, n, mask);
182 if (p < 0) {
183 spin_unlock_irqrestore(&arena->lock, flags);
184 return -1;
185 }
186
187
188
189
190
191 for (i = 0; i < n; ++i)
192 ptes[p+i] = IOMMU_INVALID_PTE;
193
194 arena->next_entry = p + n;
195 spin_unlock_irqrestore(&arena->lock, flags);
196
197 return p;
198}
199
200static void
201iommu_arena_free(struct pci_iommu_arena *arena, long ofs, long n)
202{
203 unsigned long *p;
204 long i;
205
206 p = arena->ptes + ofs;
207 for (i = 0; i < n; ++i)
208 p[i] = 0;
209}
210
211
212
213static int pci_dac_dma_supported(struct pci_dev *hwdev, u64 mask);
214
215
216
217
218
219
220static dma_addr_t
221pci_map_single_1(struct pci_dev *pdev, void *cpu_addr, size_t size,
222 int dac_allowed)
223{
224 struct pci_controller *hose = pdev ? pdev->sysdata : pci_isa_hose;
225 dma_addr_t max_dma = pdev ? pdev->dma_mask : ISA_DMA_MASK;
226 struct pci_iommu_arena *arena;
227 long npages, dma_ofs, i;
228 unsigned long paddr;
229 dma_addr_t ret;
230 unsigned int align = 0;
231
232 paddr = __pa(cpu_addr);
233
234#if !DEBUG_NODIRECT
235
236 if (paddr + size + __direct_map_base - 1 <= max_dma
237 && paddr + size <= __direct_map_size) {
238 ret = paddr + __direct_map_base;
239
240 DBGA2("pci_map_single: [%p,%lx] -> direct %lx from %p\n",
241 cpu_addr, size, ret, __builtin_return_address(0));
242
243 return ret;
244 }
245#endif
246
247
248 if (dac_allowed) {
249 ret = paddr + alpha_mv.pci_dac_offset;
250
251 DBGA2("pci_map_single: [%p,%lx] -> DAC %lx from %p\n",
252 cpu_addr, size, ret, __builtin_return_address(0));
253
254 return ret;
255 }
256
257
258
259
260 if (! alpha_mv.mv_pci_tbi) {
261 static int been_here = 0;
262 if (!been_here) {
263 printk(KERN_WARNING "pci_map_single: no HW sg\n");
264 been_here = 1;
265 }
266 return 0;
267 }
268
269 arena = hose->sg_pci;
270 if (!arena || arena->dma_base + arena->size - 1 > max_dma)
271 arena = hose->sg_isa;
272
273 npages = calc_npages((paddr & ~PAGE_MASK) + size);
274
275
276 if (pdev && pdev == isa_bridge)
277 align = 8;
278 dma_ofs = iommu_arena_alloc(arena, npages, align);
279 if (dma_ofs < 0) {
280 printk(KERN_WARNING "pci_map_single failed: "
281 "could not allocate dma page tables\n");
282 return 0;
283 }
284
285 paddr &= PAGE_MASK;
286 for (i = 0; i < npages; ++i, paddr += PAGE_SIZE)
287 arena->ptes[i + dma_ofs] = mk_iommu_pte(paddr);
288
289 ret = arena->dma_base + dma_ofs * PAGE_SIZE;
290 ret += (unsigned long)cpu_addr & ~PAGE_MASK;
291
292 DBGA2("pci_map_single: [%p,%lx] np %ld -> sg %lx from %p\n",
293 cpu_addr, size, npages, ret, __builtin_return_address(0));
294
295 return ret;
296}
297
298dma_addr_t
299pci_map_single(struct pci_dev *pdev, void *cpu_addr, size_t size, int dir)
300{
301 int dac_allowed;
302
303 if (dir == PCI_DMA_NONE)
304 BUG();
305
306 dac_allowed = pdev ? pci_dac_dma_supported(pdev, pdev->dma_mask) : 0;
307 return pci_map_single_1(pdev, cpu_addr, size, dac_allowed);
308}
309EXPORT_SYMBOL(pci_map_single);
310
311dma_addr_t
312pci_map_page(struct pci_dev *pdev, struct page *page, unsigned long offset,
313 size_t size, int dir)
314{
315 int dac_allowed;
316
317 if (dir == PCI_DMA_NONE)
318 BUG();
319
320 dac_allowed = pdev ? pci_dac_dma_supported(pdev, pdev->dma_mask) : 0;
321 return pci_map_single_1(pdev, (char *)page_address(page) + offset,
322 size, dac_allowed);
323}
324EXPORT_SYMBOL(pci_map_page);
325
326
327
328
329
330
331
332void
333pci_unmap_single(struct pci_dev *pdev, dma_addr_t dma_addr, size_t size,
334 int direction)
335{
336 unsigned long flags;
337 struct pci_controller *hose = pdev ? pdev->sysdata : pci_isa_hose;
338 struct pci_iommu_arena *arena;
339 long dma_ofs, npages;
340
341 if (direction == PCI_DMA_NONE)
342 BUG();
343
344 if (dma_addr >= __direct_map_base
345 && dma_addr < __direct_map_base + __direct_map_size) {
346
347
348 DBGA2("pci_unmap_single: direct [%lx,%lx] from %p\n",
349 dma_addr, size, __builtin_return_address(0));
350
351 return;
352 }
353
354 if (dma_addr > 0xffffffff) {
355 DBGA2("pci64_unmap_single: DAC [%lx,%lx] from %p\n",
356 dma_addr, size, __builtin_return_address(0));
357 return;
358 }
359
360 arena = hose->sg_pci;
361 if (!arena || dma_addr < arena->dma_base)
362 arena = hose->sg_isa;
363
364 dma_ofs = (dma_addr - arena->dma_base) >> PAGE_SHIFT;
365 if (dma_ofs * PAGE_SIZE >= arena->size) {
366 printk(KERN_ERR "Bogus pci_unmap_single: dma_addr %lx "
367 " base %lx size %x\n", dma_addr, arena->dma_base,
368 arena->size);
369 return;
370 BUG();
371 }
372
373 npages = calc_npages((dma_addr & ~PAGE_MASK) + size);
374
375 spin_lock_irqsave(&arena->lock, flags);
376
377 iommu_arena_free(arena, dma_ofs, npages);
378
379
380
381
382 if (dma_ofs >= arena->next_entry)
383 alpha_mv.mv_pci_tbi(hose, dma_addr, dma_addr + size - 1);
384
385 spin_unlock_irqrestore(&arena->lock, flags);
386
387 DBGA2("pci_unmap_single: sg [%lx,%lx] np %ld from %p\n",
388 dma_addr, size, npages, __builtin_return_address(0));
389}
390EXPORT_SYMBOL(pci_unmap_single);
391
392void
393pci_unmap_page(struct pci_dev *pdev, dma_addr_t dma_addr,
394 size_t size, int direction)
395{
396 pci_unmap_single(pdev, dma_addr, size, direction);
397}
398EXPORT_SYMBOL(pci_unmap_page);
399
400
401
402
403
404
405void *
406pci_alloc_consistent(struct pci_dev *pdev, size_t size, dma_addr_t *dma_addrp)
407{
408 void *cpu_addr;
409 long order = get_order(size);
410 gfp_t gfp = GFP_ATOMIC;
411
412try_again:
413 cpu_addr = (void *)__get_free_pages(gfp, order);
414 if (! cpu_addr) {
415 printk(KERN_INFO "pci_alloc_consistent: "
416 "get_free_pages failed from %p\n",
417 __builtin_return_address(0));
418
419
420 return NULL;
421 }
422 memset(cpu_addr, 0, size);
423
424 *dma_addrp = pci_map_single_1(pdev, cpu_addr, size, 0);
425 if (*dma_addrp == 0) {
426 free_pages((unsigned long)cpu_addr, order);
427 if (alpha_mv.mv_pci_tbi || (gfp & GFP_DMA))
428 return NULL;
429
430
431 gfp |= GFP_DMA;
432 goto try_again;
433 }
434
435 DBGA2("pci_alloc_consistent: %lx -> [%p,%x] from %p\n",
436 size, cpu_addr, *dma_addrp, __builtin_return_address(0));
437
438 return cpu_addr;
439}
440EXPORT_SYMBOL(pci_alloc_consistent);
441
442
443
444
445
446
447
448void
449pci_free_consistent(struct pci_dev *pdev, size_t size, void *cpu_addr,
450 dma_addr_t dma_addr)
451{
452 pci_unmap_single(pdev, dma_addr, size, PCI_DMA_BIDIRECTIONAL);
453 free_pages((unsigned long)cpu_addr, get_order(size));
454
455 DBGA2("pci_free_consistent: [%x,%lx] from %p\n",
456 dma_addr, size, __builtin_return_address(0));
457}
458EXPORT_SYMBOL(pci_free_consistent);
459
460
461
462
463
464
465
466
467
468
469#define SG_ENT_VIRT_ADDRESS(SG) (sg_virt((SG)))
470#define SG_ENT_PHYS_ADDRESS(SG) __pa(SG_ENT_VIRT_ADDRESS(SG))
471
472static void
473sg_classify(struct scatterlist *sg, struct scatterlist *end, int virt_ok)
474{
475 unsigned long next_paddr;
476 struct scatterlist *leader;
477 long leader_flag, leader_length;
478
479 leader = sg;
480 leader_flag = 0;
481 leader_length = leader->length;
482 next_paddr = SG_ENT_PHYS_ADDRESS(leader) + leader_length;
483
484 for (++sg; sg < end; ++sg) {
485 unsigned long addr, len;
486 addr = SG_ENT_PHYS_ADDRESS(sg);
487 len = sg->length;
488
489 if (next_paddr == addr) {
490 sg->dma_address = -1;
491 leader_length += len;
492 } else if (((next_paddr | addr) & ~PAGE_MASK) == 0 && virt_ok) {
493 sg->dma_address = -2;
494 leader_flag = 1;
495 leader_length += len;
496 } else {
497 leader->dma_address = leader_flag;
498 leader->dma_length = leader_length;
499 leader = sg;
500 leader_flag = 0;
501 leader_length = len;
502 }
503
504 next_paddr = addr + len;
505 }
506
507 leader->dma_address = leader_flag;
508 leader->dma_length = leader_length;
509}
510
511
512
513
514static int
515sg_fill(struct scatterlist *leader, struct scatterlist *end,
516 struct scatterlist *out, struct pci_iommu_arena *arena,
517 dma_addr_t max_dma, int dac_allowed)
518{
519 unsigned long paddr = SG_ENT_PHYS_ADDRESS(leader);
520 long size = leader->dma_length;
521 struct scatterlist *sg;
522 unsigned long *ptes;
523 long npages, dma_ofs, i;
524
525#if !DEBUG_NODIRECT
526
527
528 if (leader->dma_address == 0
529 && paddr + size + __direct_map_base - 1 <= max_dma
530 && paddr + size <= __direct_map_size) {
531 out->dma_address = paddr + __direct_map_base;
532 out->dma_length = size;
533
534 DBGA(" sg_fill: [%p,%lx] -> direct %lx\n",
535 __va(paddr), size, out->dma_address);
536
537 return 0;
538 }
539#endif
540
541
542 if (leader->dma_address == 0 && dac_allowed) {
543 out->dma_address = paddr + alpha_mv.pci_dac_offset;
544 out->dma_length = size;
545
546 DBGA(" sg_fill: [%p,%lx] -> DAC %lx\n",
547 __va(paddr), size, out->dma_address);
548
549 return 0;
550 }
551
552
553
554
555 paddr &= ~PAGE_MASK;
556 npages = calc_npages(paddr + size);
557 dma_ofs = iommu_arena_alloc(arena, npages, 0);
558 if (dma_ofs < 0) {
559
560 if (leader->dma_address == 0)
561 return -1;
562
563
564
565 sg_classify(leader, end, 0);
566 return sg_fill(leader, end, out, arena, max_dma, dac_allowed);
567 }
568
569 out->dma_address = arena->dma_base + dma_ofs*PAGE_SIZE + paddr;
570 out->dma_length = size;
571
572 DBGA(" sg_fill: [%p,%lx] -> sg %lx np %ld\n",
573 __va(paddr), size, out->dma_address, npages);
574
575
576
577 ptes = &arena->ptes[dma_ofs];
578 sg = leader;
579 do {
580#if DEBUG_ALLOC > 0
581 struct scatterlist *last_sg = sg;
582#endif
583
584 size = sg->length;
585 paddr = SG_ENT_PHYS_ADDRESS(sg);
586
587 while (sg+1 < end && (int) sg[1].dma_address == -1) {
588 size += sg[1].length;
589 sg++;
590 }
591
592 npages = calc_npages((paddr & ~PAGE_MASK) + size);
593
594 paddr &= PAGE_MASK;
595 for (i = 0; i < npages; ++i, paddr += PAGE_SIZE)
596 *ptes++ = mk_iommu_pte(paddr);
597
598#if DEBUG_ALLOC > 0
599 DBGA(" (%ld) [%p,%x] np %ld\n",
600 last_sg - leader, SG_ENT_VIRT_ADDRESS(last_sg),
601 last_sg->length, npages);
602 while (++last_sg <= sg) {
603 DBGA(" (%ld) [%p,%x] cont\n",
604 last_sg - leader, SG_ENT_VIRT_ADDRESS(last_sg),
605 last_sg->length);
606 }
607#endif
608 } while (++sg < end && (int) sg->dma_address < 0);
609
610 return 1;
611}
612
613int
614pci_map_sg(struct pci_dev *pdev, struct scatterlist *sg, int nents,
615 int direction)
616{
617 struct scatterlist *start, *end, *out;
618 struct pci_controller *hose;
619 struct pci_iommu_arena *arena;
620 dma_addr_t max_dma;
621 int dac_allowed;
622
623 if (direction == PCI_DMA_NONE)
624 BUG();
625
626 dac_allowed = pdev ? pci_dac_dma_supported(pdev, pdev->dma_mask) : 0;
627
628
629 if (nents == 1) {
630 sg->dma_length = sg->length;
631 sg->dma_address
632 = pci_map_single_1(pdev, SG_ENT_VIRT_ADDRESS(sg),
633 sg->length, dac_allowed);
634 return sg->dma_address != 0;
635 }
636
637 start = sg;
638 end = sg + nents;
639
640
641 sg_classify(sg, end, alpha_mv.mv_pci_tbi != 0);
642
643
644 if (alpha_mv.mv_pci_tbi) {
645 hose = pdev ? pdev->sysdata : pci_isa_hose;
646 max_dma = pdev ? pdev->dma_mask : ISA_DMA_MASK;
647 arena = hose->sg_pci;
648 if (!arena || arena->dma_base + arena->size - 1 > max_dma)
649 arena = hose->sg_isa;
650 } else {
651 max_dma = -1;
652 arena = NULL;
653 hose = NULL;
654 }
655
656
657
658 for (out = sg; sg < end; ++sg) {
659 if ((int) sg->dma_address < 0)
660 continue;
661 if (sg_fill(sg, end, out, arena, max_dma, dac_allowed) < 0)
662 goto error;
663 out++;
664 }
665
666
667 if (out < end)
668 out->dma_length = 0;
669
670 if (out - start == 0)
671 printk(KERN_WARNING "pci_map_sg failed: no entries?\n");
672 DBGA("pci_map_sg: %ld entries\n", out - start);
673
674 return out - start;
675
676 error:
677 printk(KERN_WARNING "pci_map_sg failed: "
678 "could not allocate dma page tables\n");
679
680
681
682 if (out > start)
683 pci_unmap_sg(pdev, start, out - start, direction);
684 return 0;
685}
686EXPORT_SYMBOL(pci_map_sg);
687
688
689
690
691
692void
693pci_unmap_sg(struct pci_dev *pdev, struct scatterlist *sg, int nents,
694 int direction)
695{
696 unsigned long flags;
697 struct pci_controller *hose;
698 struct pci_iommu_arena *arena;
699 struct scatterlist *end;
700 dma_addr_t max_dma;
701 dma_addr_t fbeg, fend;
702
703 if (direction == PCI_DMA_NONE)
704 BUG();
705
706 if (! alpha_mv.mv_pci_tbi)
707 return;
708
709 hose = pdev ? pdev->sysdata : pci_isa_hose;
710 max_dma = pdev ? pdev->dma_mask : ISA_DMA_MASK;
711 arena = hose->sg_pci;
712 if (!arena || arena->dma_base + arena->size - 1 > max_dma)
713 arena = hose->sg_isa;
714
715 fbeg = -1, fend = 0;
716
717 spin_lock_irqsave(&arena->lock, flags);
718
719 for (end = sg + nents; sg < end; ++sg) {
720 dma64_addr_t addr;
721 size_t size;
722 long npages, ofs;
723 dma_addr_t tend;
724
725 addr = sg->dma_address;
726 size = sg->dma_length;
727 if (!size)
728 break;
729
730 if (addr > 0xffffffff) {
731
732 DBGA(" (%ld) DAC [%lx,%lx]\n",
733 sg - end + nents, addr, size);
734 continue;
735 }
736
737 if (addr >= __direct_map_base
738 && addr < __direct_map_base + __direct_map_size) {
739
740 DBGA(" (%ld) direct [%lx,%lx]\n",
741 sg - end + nents, addr, size);
742 continue;
743 }
744
745 DBGA(" (%ld) sg [%lx,%lx]\n",
746 sg - end + nents, addr, size);
747
748 npages = calc_npages((addr & ~PAGE_MASK) + size);
749 ofs = (addr - arena->dma_base) >> PAGE_SHIFT;
750 iommu_arena_free(arena, ofs, npages);
751
752 tend = addr + size - 1;
753 if (fbeg > addr) fbeg = addr;
754 if (fend < tend) fend = tend;
755 }
756
757
758
759
760 if ((fend - arena->dma_base) >> PAGE_SHIFT >= arena->next_entry)
761 alpha_mv.mv_pci_tbi(hose, fbeg, fend);
762
763 spin_unlock_irqrestore(&arena->lock, flags);
764
765 DBGA("pci_unmap_sg: %ld entries\n", nents - (end - sg));
766}
767EXPORT_SYMBOL(pci_unmap_sg);
768
769
770
771
772
773int
774pci_dma_supported(struct pci_dev *pdev, u64 mask)
775{
776 struct pci_controller *hose;
777 struct pci_iommu_arena *arena;
778
779
780
781
782 if (__direct_map_size != 0
783 && (__direct_map_base + __direct_map_size - 1 <= mask ||
784 __direct_map_base + (max_low_pfn << PAGE_SHIFT) - 1 <= mask))
785 return 1;
786
787
788 hose = pdev ? pdev->sysdata : pci_isa_hose;
789 arena = hose->sg_isa;
790 if (arena && arena->dma_base + arena->size - 1 <= mask)
791 return 1;
792 arena = hose->sg_pci;
793 if (arena && arena->dma_base + arena->size - 1 <= mask)
794 return 1;
795
796
797 if (!__direct_map_base && MAX_DMA_ADDRESS - IDENT_ADDR - 1 <= mask)
798 return 1;
799
800 return 0;
801}
802EXPORT_SYMBOL(pci_dma_supported);
803
804
805
806
807
808int
809iommu_reserve(struct pci_iommu_arena *arena, long pg_count, long align_mask)
810{
811 unsigned long flags;
812 unsigned long *ptes;
813 long i, p;
814
815 if (!arena) return -EINVAL;
816
817 spin_lock_irqsave(&arena->lock, flags);
818
819
820 ptes = arena->ptes;
821 p = iommu_arena_find_pages(arena, pg_count, align_mask);
822 if (p < 0) {
823 spin_unlock_irqrestore(&arena->lock, flags);
824 return -1;
825 }
826
827
828
829
830 for (i = 0; i < pg_count; ++i)
831 ptes[p+i] = IOMMU_RESERVED_PTE;
832
833 arena->next_entry = p + pg_count;
834 spin_unlock_irqrestore(&arena->lock, flags);
835
836 return p;
837}
838
839int
840iommu_release(struct pci_iommu_arena *arena, long pg_start, long pg_count)
841{
842 unsigned long *ptes;
843 long i;
844
845 if (!arena) return -EINVAL;
846
847 ptes = arena->ptes;
848
849
850 for(i = pg_start; i < pg_start + pg_count; i++)
851 if (ptes[i] != IOMMU_RESERVED_PTE)
852 return -EBUSY;
853
854 iommu_arena_free(arena, pg_start, pg_count);
855 return 0;
856}
857
858int
859iommu_bind(struct pci_iommu_arena *arena, long pg_start, long pg_count,
860 unsigned long *physaddrs)
861{
862 unsigned long flags;
863 unsigned long *ptes;
864 long i, j;
865
866 if (!arena) return -EINVAL;
867
868 spin_lock_irqsave(&arena->lock, flags);
869
870 ptes = arena->ptes;
871
872 for(j = pg_start; j < pg_start + pg_count; j++) {
873 if (ptes[j] != IOMMU_RESERVED_PTE) {
874 spin_unlock_irqrestore(&arena->lock, flags);
875 return -EBUSY;
876 }
877 }
878
879 for(i = 0, j = pg_start; i < pg_count; i++, j++)
880 ptes[j] = mk_iommu_pte(physaddrs[i]);
881
882 spin_unlock_irqrestore(&arena->lock, flags);
883
884 return 0;
885}
886
887int
888iommu_unbind(struct pci_iommu_arena *arena, long pg_start, long pg_count)
889{
890 unsigned long *p;
891 long i;
892
893 if (!arena) return -EINVAL;
894
895 p = arena->ptes + pg_start;
896 for(i = 0; i < pg_count; i++)
897 p[i] = IOMMU_RESERVED_PTE;
898
899 return 0;
900}
901
902
903
904
905static int
906pci_dac_dma_supported(struct pci_dev *dev, u64 mask)
907{
908 dma64_addr_t dac_offset = alpha_mv.pci_dac_offset;
909 int ok = 1;
910
911
912 if (dac_offset == 0)
913 ok = 0;
914
915
916 if ((dac_offset & dev->dma_mask) != dac_offset)
917 ok = 0;
918
919
920 DBGA("pci_dac_dma_supported %s from %p\n",
921 ok ? "yes" : "no", __builtin_return_address(0));
922
923 return ok;
924}
925
926
927
928struct pci_dev *
929alpha_gendev_to_pci(struct device *dev)
930{
931 if (dev && dev->bus == &pci_bus_type)
932 return to_pci_dev(dev);
933
934
935
936 BUG_ON(!isa_bridge);
937
938
939
940 if (!dev || !dev->dma_mask || !*dev->dma_mask)
941 return isa_bridge;
942
943
944
945 if (*dev->dma_mask >= isa_bridge->dma_mask)
946 return isa_bridge;
947
948
949 return NULL;
950}
951EXPORT_SYMBOL(alpha_gendev_to_pci);
952
953int
954dma_set_mask(struct device *dev, u64 mask)
955{
956 if (!dev->dma_mask ||
957 !pci_dma_supported(alpha_gendev_to_pci(dev), mask))
958 return -EIO;
959
960 *dev->dma_mask = mask;
961
962 return 0;
963}
964EXPORT_SYMBOL(dma_set_mask);
965