1
2
3
4
5
6#include <linux/kernel.h>
7#include <linux/mm.h>
8#include <linux/pci.h>
9#include <linux/gfp.h>
10#include <linux/memblock.h>
11#include <linux/export.h>
12#include <linux/scatterlist.h>
13#include <linux/log2.h>
14#include <linux/dma-map-ops.h>
15#include <linux/iommu-helper.h>
16
17#include <asm/io.h>
18#include <asm/hwrpb.h>
19
20#include "proto.h"
21#include "pci_impl.h"
22
23
24#define DEBUG_ALLOC 0
25#if DEBUG_ALLOC > 0
26# define DBGA(args...) printk(KERN_DEBUG args)
27#else
28# define DBGA(args...)
29#endif
30#if DEBUG_ALLOC > 1
31# define DBGA2(args...) printk(KERN_DEBUG args)
32#else
33# define DBGA2(args...)
34#endif
35
36#define DEBUG_NODIRECT 0
37
38#define ISA_DMA_MASK 0x00ffffff
39
40static inline unsigned long
41mk_iommu_pte(unsigned long paddr)
42{
43 return (paddr >> (PAGE_SHIFT-1)) | 1;
44}
45
46
47
48
49unsigned long
50size_for_memory(unsigned long max)
51{
52 unsigned long mem = max_low_pfn << PAGE_SHIFT;
53 if (mem < max)
54 max = roundup_pow_of_two(mem);
55 return max;
56}
57
58struct pci_iommu_arena * __init
59iommu_arena_new_node(int nid, struct pci_controller *hose, dma_addr_t base,
60 unsigned long window_size, unsigned long align)
61{
62 unsigned long mem_size;
63 struct pci_iommu_arena *arena;
64
65 mem_size = window_size / (PAGE_SIZE / sizeof(unsigned long));
66
67
68
69
70
71 if (align < mem_size)
72 align = mem_size;
73
74 arena = memblock_alloc(sizeof(*arena), SMP_CACHE_BYTES);
75 if (!arena)
76 panic("%s: Failed to allocate %zu bytes\n", __func__,
77 sizeof(*arena));
78 arena->ptes = memblock_alloc(mem_size, align);
79 if (!arena->ptes)
80 panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
81 __func__, mem_size, align);
82
83 spin_lock_init(&arena->lock);
84 arena->hose = hose;
85 arena->dma_base = base;
86 arena->size = window_size;
87 arena->next_entry = 0;
88
89
90
91 arena->align_entry = 1;
92
93 return arena;
94}
95
96struct pci_iommu_arena * __init
97iommu_arena_new(struct pci_controller *hose, dma_addr_t base,
98 unsigned long window_size, unsigned long align)
99{
100 return iommu_arena_new_node(0, hose, base, window_size, align);
101}
102
103
104static long
105iommu_arena_find_pages(struct device *dev, struct pci_iommu_arena *arena,
106 long n, long mask)
107{
108 unsigned long *ptes;
109 long i, p, nent;
110 int pass = 0;
111 unsigned long base;
112 unsigned long boundary_size;
113
114 base = arena->dma_base >> PAGE_SHIFT;
115 boundary_size = dma_get_seg_boundary_nr_pages(dev, PAGE_SHIFT);
116
117
118 ptes = arena->ptes;
119 nent = arena->size >> PAGE_SHIFT;
120 p = ALIGN(arena->next_entry, mask + 1);
121 i = 0;
122
123again:
124 while (i < n && p+i < nent) {
125 if (!i && iommu_is_span_boundary(p, n, base, boundary_size)) {
126 p = ALIGN(p + 1, mask + 1);
127 goto again;
128 }
129
130 if (ptes[p+i])
131 p = ALIGN(p + i + 1, mask + 1), i = 0;
132 else
133 i = i + 1;
134 }
135
136 if (i < n) {
137 if (pass < 1) {
138
139
140
141
142 alpha_mv.mv_pci_tbi(arena->hose, 0, -1);
143
144 pass++;
145 p = 0;
146 i = 0;
147 goto again;
148 } else
149 return -1;
150 }
151
152
153
154 return p;
155}
156
157static long
158iommu_arena_alloc(struct device *dev, struct pci_iommu_arena *arena, long n,
159 unsigned int align)
160{
161 unsigned long flags;
162 unsigned long *ptes;
163 long i, p, mask;
164
165 spin_lock_irqsave(&arena->lock, flags);
166
167
168 ptes = arena->ptes;
169 mask = max(align, arena->align_entry) - 1;
170 p = iommu_arena_find_pages(dev, arena, n, mask);
171 if (p < 0) {
172 spin_unlock_irqrestore(&arena->lock, flags);
173 return -1;
174 }
175
176
177
178
179
180 for (i = 0; i < n; ++i)
181 ptes[p+i] = IOMMU_INVALID_PTE;
182
183 arena->next_entry = p + n;
184 spin_unlock_irqrestore(&arena->lock, flags);
185
186 return p;
187}
188
189static void
190iommu_arena_free(struct pci_iommu_arena *arena, long ofs, long n)
191{
192 unsigned long *p;
193 long i;
194
195 p = arena->ptes + ofs;
196 for (i = 0; i < n; ++i)
197 p[i] = 0;
198}
199
200
201
202
203
204static int pci_dac_dma_supported(struct pci_dev *dev, u64 mask)
205{
206 dma_addr_t dac_offset = alpha_mv.pci_dac_offset;
207 int ok = 1;
208
209
210 if (dac_offset == 0)
211 ok = 0;
212
213
214 if ((dac_offset & dev->dma_mask) != dac_offset)
215 ok = 0;
216
217
218 DBGA("pci_dac_dma_supported %s from %ps\n",
219 ok ? "yes" : "no", __builtin_return_address(0));
220
221 return ok;
222}
223
224
225
226
227
228
229static dma_addr_t
230pci_map_single_1(struct pci_dev *pdev, void *cpu_addr, size_t size,
231 int dac_allowed)
232{
233 struct pci_controller *hose = pdev ? pdev->sysdata : pci_isa_hose;
234 dma_addr_t max_dma = pdev ? pdev->dma_mask : ISA_DMA_MASK;
235 struct pci_iommu_arena *arena;
236 long npages, dma_ofs, i;
237 unsigned long paddr;
238 dma_addr_t ret;
239 unsigned int align = 0;
240 struct device *dev = pdev ? &pdev->dev : NULL;
241
242 paddr = __pa(cpu_addr);
243
244#if !DEBUG_NODIRECT
245
246 if (paddr + size + __direct_map_base - 1 <= max_dma
247 && paddr + size <= __direct_map_size) {
248 ret = paddr + __direct_map_base;
249
250 DBGA2("pci_map_single: [%p,%zx] -> direct %llx from %ps\n",
251 cpu_addr, size, ret, __builtin_return_address(0));
252
253 return ret;
254 }
255#endif
256
257
258 if (dac_allowed) {
259 ret = paddr + alpha_mv.pci_dac_offset;
260
261 DBGA2("pci_map_single: [%p,%zx] -> DAC %llx from %ps\n",
262 cpu_addr, size, ret, __builtin_return_address(0));
263
264 return ret;
265 }
266
267
268
269
270 if (! alpha_mv.mv_pci_tbi) {
271 printk_once(KERN_WARNING "pci_map_single: no HW sg\n");
272 return DMA_MAPPING_ERROR;
273 }
274
275 arena = hose->sg_pci;
276 if (!arena || arena->dma_base + arena->size - 1 > max_dma)
277 arena = hose->sg_isa;
278
279 npages = iommu_num_pages(paddr, size, PAGE_SIZE);
280
281
282 if (pdev && pdev == isa_bridge)
283 align = 8;
284 dma_ofs = iommu_arena_alloc(dev, arena, npages, align);
285 if (dma_ofs < 0) {
286 printk(KERN_WARNING "pci_map_single failed: "
287 "could not allocate dma page tables\n");
288 return DMA_MAPPING_ERROR;
289 }
290
291 paddr &= PAGE_MASK;
292 for (i = 0; i < npages; ++i, paddr += PAGE_SIZE)
293 arena->ptes[i + dma_ofs] = mk_iommu_pte(paddr);
294
295 ret = arena->dma_base + dma_ofs * PAGE_SIZE;
296 ret += (unsigned long)cpu_addr & ~PAGE_MASK;
297
298 DBGA2("pci_map_single: [%p,%zx] np %ld -> sg %llx from %ps\n",
299 cpu_addr, size, npages, ret, __builtin_return_address(0));
300
301 return ret;
302}
303
304
305static struct pci_dev *alpha_gendev_to_pci(struct device *dev)
306{
307 if (dev && dev_is_pci(dev))
308 return to_pci_dev(dev);
309
310
311
312 BUG_ON(!isa_bridge);
313
314
315
316 if (!dev || !dev->dma_mask || !*dev->dma_mask)
317 return isa_bridge;
318
319
320
321 if (*dev->dma_mask >= isa_bridge->dma_mask)
322 return isa_bridge;
323
324
325 return NULL;
326}
327
328static dma_addr_t alpha_pci_map_page(struct device *dev, struct page *page,
329 unsigned long offset, size_t size,
330 enum dma_data_direction dir,
331 unsigned long attrs)
332{
333 struct pci_dev *pdev = alpha_gendev_to_pci(dev);
334 int dac_allowed;
335
336 BUG_ON(dir == PCI_DMA_NONE);
337
338 dac_allowed = pdev ? pci_dac_dma_supported(pdev, pdev->dma_mask) : 0;
339 return pci_map_single_1(pdev, (char *)page_address(page) + offset,
340 size, dac_allowed);
341}
342
343
344
345
346
347
348
349static void alpha_pci_unmap_page(struct device *dev, dma_addr_t dma_addr,
350 size_t size, enum dma_data_direction dir,
351 unsigned long attrs)
352{
353 unsigned long flags;
354 struct pci_dev *pdev = alpha_gendev_to_pci(dev);
355 struct pci_controller *hose = pdev ? pdev->sysdata : pci_isa_hose;
356 struct pci_iommu_arena *arena;
357 long dma_ofs, npages;
358
359 BUG_ON(dir == PCI_DMA_NONE);
360
361 if (dma_addr >= __direct_map_base
362 && dma_addr < __direct_map_base + __direct_map_size) {
363
364
365 DBGA2("pci_unmap_single: direct [%llx,%zx] from %ps\n",
366 dma_addr, size, __builtin_return_address(0));
367
368 return;
369 }
370
371 if (dma_addr > 0xffffffff) {
372 DBGA2("pci64_unmap_single: DAC [%llx,%zx] from %ps\n",
373 dma_addr, size, __builtin_return_address(0));
374 return;
375 }
376
377 arena = hose->sg_pci;
378 if (!arena || dma_addr < arena->dma_base)
379 arena = hose->sg_isa;
380
381 dma_ofs = (dma_addr - arena->dma_base) >> PAGE_SHIFT;
382 if (dma_ofs * PAGE_SIZE >= arena->size) {
383 printk(KERN_ERR "Bogus pci_unmap_single: dma_addr %llx "
384 " base %llx size %x\n",
385 dma_addr, arena->dma_base, arena->size);
386 return;
387 BUG();
388 }
389
390 npages = iommu_num_pages(dma_addr, size, PAGE_SIZE);
391
392 spin_lock_irqsave(&arena->lock, flags);
393
394 iommu_arena_free(arena, dma_ofs, npages);
395
396
397
398
399 if (dma_ofs >= arena->next_entry)
400 alpha_mv.mv_pci_tbi(hose, dma_addr, dma_addr + size - 1);
401
402 spin_unlock_irqrestore(&arena->lock, flags);
403
404 DBGA2("pci_unmap_single: sg [%llx,%zx] np %ld from %ps\n",
405 dma_addr, size, npages, __builtin_return_address(0));
406}
407
408
409
410
411
412
413static void *alpha_pci_alloc_coherent(struct device *dev, size_t size,
414 dma_addr_t *dma_addrp, gfp_t gfp,
415 unsigned long attrs)
416{
417 struct pci_dev *pdev = alpha_gendev_to_pci(dev);
418 void *cpu_addr;
419 long order = get_order(size);
420
421 gfp &= ~GFP_DMA;
422
423try_again:
424 cpu_addr = (void *)__get_free_pages(gfp | __GFP_ZERO, order);
425 if (! cpu_addr) {
426 printk(KERN_INFO "pci_alloc_consistent: "
427 "get_free_pages failed from %ps\n",
428 __builtin_return_address(0));
429
430
431 return NULL;
432 }
433 memset(cpu_addr, 0, size);
434
435 *dma_addrp = pci_map_single_1(pdev, cpu_addr, size, 0);
436 if (*dma_addrp == DMA_MAPPING_ERROR) {
437 free_pages((unsigned long)cpu_addr, order);
438 if (alpha_mv.mv_pci_tbi || (gfp & GFP_DMA))
439 return NULL;
440
441
442 gfp |= GFP_DMA;
443 goto try_again;
444 }
445
446 DBGA2("pci_alloc_consistent: %zx -> [%p,%llx] from %ps\n",
447 size, cpu_addr, *dma_addrp, __builtin_return_address(0));
448
449 return cpu_addr;
450}
451
452
453
454
455
456
457
458static void alpha_pci_free_coherent(struct device *dev, size_t size,
459 void *cpu_addr, dma_addr_t dma_addr,
460 unsigned long attrs)
461{
462 struct pci_dev *pdev = alpha_gendev_to_pci(dev);
463 pci_unmap_single(pdev, dma_addr, size, PCI_DMA_BIDIRECTIONAL);
464 free_pages((unsigned long)cpu_addr, get_order(size));
465
466 DBGA2("pci_free_consistent: [%llx,%zx] from %ps\n",
467 dma_addr, size, __builtin_return_address(0));
468}
469
470
471
472
473
474
475
476
477
478
479#define SG_ENT_VIRT_ADDRESS(SG) (sg_virt((SG)))
480#define SG_ENT_PHYS_ADDRESS(SG) __pa(SG_ENT_VIRT_ADDRESS(SG))
481
482static void
483sg_classify(struct device *dev, struct scatterlist *sg, struct scatterlist *end,
484 int virt_ok)
485{
486 unsigned long next_paddr;
487 struct scatterlist *leader;
488 long leader_flag, leader_length;
489 unsigned int max_seg_size;
490
491 leader = sg;
492 leader_flag = 0;
493 leader_length = leader->length;
494 next_paddr = SG_ENT_PHYS_ADDRESS(leader) + leader_length;
495
496
497 max_seg_size = dev ? dma_get_max_seg_size(dev) : 0;
498 for (++sg; sg < end; ++sg) {
499 unsigned long addr, len;
500 addr = SG_ENT_PHYS_ADDRESS(sg);
501 len = sg->length;
502
503 if (leader_length + len > max_seg_size)
504 goto new_segment;
505
506 if (next_paddr == addr) {
507 sg->dma_address = -1;
508 leader_length += len;
509 } else if (((next_paddr | addr) & ~PAGE_MASK) == 0 && virt_ok) {
510 sg->dma_address = -2;
511 leader_flag = 1;
512 leader_length += len;
513 } else {
514new_segment:
515 leader->dma_address = leader_flag;
516 leader->dma_length = leader_length;
517 leader = sg;
518 leader_flag = 0;
519 leader_length = len;
520 }
521
522 next_paddr = addr + len;
523 }
524
525 leader->dma_address = leader_flag;
526 leader->dma_length = leader_length;
527}
528
529
530
531
532static int
533sg_fill(struct device *dev, struct scatterlist *leader, struct scatterlist *end,
534 struct scatterlist *out, struct pci_iommu_arena *arena,
535 dma_addr_t max_dma, int dac_allowed)
536{
537 unsigned long paddr = SG_ENT_PHYS_ADDRESS(leader);
538 long size = leader->dma_length;
539 struct scatterlist *sg;
540 unsigned long *ptes;
541 long npages, dma_ofs, i;
542
543#if !DEBUG_NODIRECT
544
545
546 if (leader->dma_address == 0
547 && paddr + size + __direct_map_base - 1 <= max_dma
548 && paddr + size <= __direct_map_size) {
549 out->dma_address = paddr + __direct_map_base;
550 out->dma_length = size;
551
552 DBGA(" sg_fill: [%p,%lx] -> direct %llx\n",
553 __va(paddr), size, out->dma_address);
554
555 return 0;
556 }
557#endif
558
559
560 if (leader->dma_address == 0 && dac_allowed) {
561 out->dma_address = paddr + alpha_mv.pci_dac_offset;
562 out->dma_length = size;
563
564 DBGA(" sg_fill: [%p,%lx] -> DAC %llx\n",
565 __va(paddr), size, out->dma_address);
566
567 return 0;
568 }
569
570
571
572
573 paddr &= ~PAGE_MASK;
574 npages = iommu_num_pages(paddr, size, PAGE_SIZE);
575 dma_ofs = iommu_arena_alloc(dev, arena, npages, 0);
576 if (dma_ofs < 0) {
577
578 if (leader->dma_address == 0)
579 return -1;
580
581
582
583 sg_classify(dev, leader, end, 0);
584 return sg_fill(dev, leader, end, out, arena, max_dma, dac_allowed);
585 }
586
587 out->dma_address = arena->dma_base + dma_ofs*PAGE_SIZE + paddr;
588 out->dma_length = size;
589
590 DBGA(" sg_fill: [%p,%lx] -> sg %llx np %ld\n",
591 __va(paddr), size, out->dma_address, npages);
592
593
594
595 ptes = &arena->ptes[dma_ofs];
596 sg = leader;
597 do {
598#if DEBUG_ALLOC > 0
599 struct scatterlist *last_sg = sg;
600#endif
601
602 size = sg->length;
603 paddr = SG_ENT_PHYS_ADDRESS(sg);
604
605 while (sg+1 < end && (int) sg[1].dma_address == -1) {
606 size += sg[1].length;
607 sg = sg_next(sg);
608 }
609
610 npages = iommu_num_pages(paddr, size, PAGE_SIZE);
611
612 paddr &= PAGE_MASK;
613 for (i = 0; i < npages; ++i, paddr += PAGE_SIZE)
614 *ptes++ = mk_iommu_pte(paddr);
615
616#if DEBUG_ALLOC > 0
617 DBGA(" (%ld) [%p,%x] np %ld\n",
618 last_sg - leader, SG_ENT_VIRT_ADDRESS(last_sg),
619 last_sg->length, npages);
620 while (++last_sg <= sg) {
621 DBGA(" (%ld) [%p,%x] cont\n",
622 last_sg - leader, SG_ENT_VIRT_ADDRESS(last_sg),
623 last_sg->length);
624 }
625#endif
626 } while (++sg < end && (int) sg->dma_address < 0);
627
628 return 1;
629}
630
631static int alpha_pci_map_sg(struct device *dev, struct scatterlist *sg,
632 int nents, enum dma_data_direction dir,
633 unsigned long attrs)
634{
635 struct pci_dev *pdev = alpha_gendev_to_pci(dev);
636 struct scatterlist *start, *end, *out;
637 struct pci_controller *hose;
638 struct pci_iommu_arena *arena;
639 dma_addr_t max_dma;
640 int dac_allowed;
641
642 BUG_ON(dir == PCI_DMA_NONE);
643
644 dac_allowed = dev ? pci_dac_dma_supported(pdev, pdev->dma_mask) : 0;
645
646
647 if (nents == 1) {
648 sg->dma_length = sg->length;
649 sg->dma_address
650 = pci_map_single_1(pdev, SG_ENT_VIRT_ADDRESS(sg),
651 sg->length, dac_allowed);
652 if (sg->dma_address == DMA_MAPPING_ERROR)
653 return -EIO;
654 return 1;
655 }
656
657 start = sg;
658 end = sg + nents;
659
660
661 sg_classify(dev, sg, end, alpha_mv.mv_pci_tbi != 0);
662
663
664 if (alpha_mv.mv_pci_tbi) {
665 hose = pdev ? pdev->sysdata : pci_isa_hose;
666 max_dma = pdev ? pdev->dma_mask : ISA_DMA_MASK;
667 arena = hose->sg_pci;
668 if (!arena || arena->dma_base + arena->size - 1 > max_dma)
669 arena = hose->sg_isa;
670 } else {
671 max_dma = -1;
672 arena = NULL;
673 hose = NULL;
674 }
675
676
677
678 for (out = sg; sg < end; ++sg) {
679 if ((int) sg->dma_address < 0)
680 continue;
681 if (sg_fill(dev, sg, end, out, arena, max_dma, dac_allowed) < 0)
682 goto error;
683 out++;
684 }
685
686
687 if (out < end)
688 out->dma_length = 0;
689
690 if (out - start == 0) {
691 printk(KERN_WARNING "pci_map_sg failed: no entries?\n");
692 return -ENOMEM;
693 }
694 DBGA("pci_map_sg: %ld entries\n", out - start);
695
696 return out - start;
697
698 error:
699 printk(KERN_WARNING "pci_map_sg failed: "
700 "could not allocate dma page tables\n");
701
702
703
704 if (out > start)
705 pci_unmap_sg(pdev, start, out - start, dir);
706 return -ENOMEM;
707}
708
709
710
711
712
713static void alpha_pci_unmap_sg(struct device *dev, struct scatterlist *sg,
714 int nents, enum dma_data_direction dir,
715 unsigned long attrs)
716{
717 struct pci_dev *pdev = alpha_gendev_to_pci(dev);
718 unsigned long flags;
719 struct pci_controller *hose;
720 struct pci_iommu_arena *arena;
721 struct scatterlist *end;
722 dma_addr_t max_dma;
723 dma_addr_t fbeg, fend;
724
725 BUG_ON(dir == PCI_DMA_NONE);
726
727 if (! alpha_mv.mv_pci_tbi)
728 return;
729
730 hose = pdev ? pdev->sysdata : pci_isa_hose;
731 max_dma = pdev ? pdev->dma_mask : ISA_DMA_MASK;
732 arena = hose->sg_pci;
733 if (!arena || arena->dma_base + arena->size - 1 > max_dma)
734 arena = hose->sg_isa;
735
736 fbeg = -1, fend = 0;
737
738 spin_lock_irqsave(&arena->lock, flags);
739
740 for (end = sg + nents; sg < end; ++sg) {
741 dma_addr_t addr;
742 size_t size;
743 long npages, ofs;
744 dma_addr_t tend;
745
746 addr = sg->dma_address;
747 size = sg->dma_length;
748 if (!size)
749 break;
750
751 if (addr > 0xffffffff) {
752
753 DBGA(" (%ld) DAC [%llx,%zx]\n",
754 sg - end + nents, addr, size);
755 continue;
756 }
757
758 if (addr >= __direct_map_base
759 && addr < __direct_map_base + __direct_map_size) {
760
761 DBGA(" (%ld) direct [%llx,%zx]\n",
762 sg - end + nents, addr, size);
763 continue;
764 }
765
766 DBGA(" (%ld) sg [%llx,%zx]\n",
767 sg - end + nents, addr, size);
768
769 npages = iommu_num_pages(addr, size, PAGE_SIZE);
770 ofs = (addr - arena->dma_base) >> PAGE_SHIFT;
771 iommu_arena_free(arena, ofs, npages);
772
773 tend = addr + size - 1;
774 if (fbeg > addr) fbeg = addr;
775 if (fend < tend) fend = tend;
776 }
777
778
779
780
781 if ((fend - arena->dma_base) >> PAGE_SHIFT >= arena->next_entry)
782 alpha_mv.mv_pci_tbi(hose, fbeg, fend);
783
784 spin_unlock_irqrestore(&arena->lock, flags);
785
786 DBGA("pci_unmap_sg: %ld entries\n", nents - (end - sg));
787}
788
789
790
791
792static int alpha_pci_supported(struct device *dev, u64 mask)
793{
794 struct pci_dev *pdev = alpha_gendev_to_pci(dev);
795 struct pci_controller *hose;
796 struct pci_iommu_arena *arena;
797
798
799
800
801 if (__direct_map_size != 0
802 && (__direct_map_base + __direct_map_size - 1 <= mask ||
803 __direct_map_base + (max_low_pfn << PAGE_SHIFT) - 1 <= mask))
804 return 1;
805
806
807 hose = pdev ? pdev->sysdata : pci_isa_hose;
808 arena = hose->sg_isa;
809 if (arena && arena->dma_base + arena->size - 1 <= mask)
810 return 1;
811 arena = hose->sg_pci;
812 if (arena && arena->dma_base + arena->size - 1 <= mask)
813 return 1;
814
815
816 if (!__direct_map_base && MAX_DMA_ADDRESS - IDENT_ADDR - 1 <= mask)
817 return 1;
818
819 return 0;
820}
821
822
823
824
825
826int
827iommu_reserve(struct pci_iommu_arena *arena, long pg_count, long align_mask)
828{
829 unsigned long flags;
830 unsigned long *ptes;
831 long i, p;
832
833 if (!arena) return -EINVAL;
834
835 spin_lock_irqsave(&arena->lock, flags);
836
837
838 ptes = arena->ptes;
839 p = iommu_arena_find_pages(NULL, arena, pg_count, align_mask);
840 if (p < 0) {
841 spin_unlock_irqrestore(&arena->lock, flags);
842 return -1;
843 }
844
845
846
847
848 for (i = 0; i < pg_count; ++i)
849 ptes[p+i] = IOMMU_RESERVED_PTE;
850
851 arena->next_entry = p + pg_count;
852 spin_unlock_irqrestore(&arena->lock, flags);
853
854 return p;
855}
856
857int
858iommu_release(struct pci_iommu_arena *arena, long pg_start, long pg_count)
859{
860 unsigned long *ptes;
861 long i;
862
863 if (!arena) return -EINVAL;
864
865 ptes = arena->ptes;
866
867
868 for(i = pg_start; i < pg_start + pg_count; i++)
869 if (ptes[i] != IOMMU_RESERVED_PTE)
870 return -EBUSY;
871
872 iommu_arena_free(arena, pg_start, pg_count);
873 return 0;
874}
875
876int
877iommu_bind(struct pci_iommu_arena *arena, long pg_start, long pg_count,
878 struct page **pages)
879{
880 unsigned long flags;
881 unsigned long *ptes;
882 long i, j;
883
884 if (!arena) return -EINVAL;
885
886 spin_lock_irqsave(&arena->lock, flags);
887
888 ptes = arena->ptes;
889
890 for(j = pg_start; j < pg_start + pg_count; j++) {
891 if (ptes[j] != IOMMU_RESERVED_PTE) {
892 spin_unlock_irqrestore(&arena->lock, flags);
893 return -EBUSY;
894 }
895 }
896
897 for(i = 0, j = pg_start; i < pg_count; i++, j++)
898 ptes[j] = mk_iommu_pte(page_to_phys(pages[i]));
899
900 spin_unlock_irqrestore(&arena->lock, flags);
901
902 return 0;
903}
904
905int
906iommu_unbind(struct pci_iommu_arena *arena, long pg_start, long pg_count)
907{
908 unsigned long *p;
909 long i;
910
911 if (!arena) return -EINVAL;
912
913 p = arena->ptes + pg_start;
914 for(i = 0; i < pg_count; i++)
915 p[i] = IOMMU_RESERVED_PTE;
916
917 return 0;
918}
919
920const struct dma_map_ops alpha_pci_ops = {
921 .alloc = alpha_pci_alloc_coherent,
922 .free = alpha_pci_free_coherent,
923 .map_page = alpha_pci_map_page,
924 .unmap_page = alpha_pci_unmap_page,
925 .map_sg = alpha_pci_map_sg,
926 .unmap_sg = alpha_pci_unmap_sg,
927 .dma_supported = alpha_pci_supported,
928 .mmap = dma_common_mmap,
929 .get_sgtable = dma_common_get_sgtable,
930 .alloc_pages = dma_common_alloc_pages,
931 .free_pages = dma_common_free_pages,
932};
933EXPORT_SYMBOL(alpha_pci_ops);
934