1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26#include <linux/init.h>
27#include <linux/types.h>
28#include <linux/slab.h>
29#include <linux/mm.h>
30#include <linux/spinlock.h>
31#include <linux/string.h>
32#include <linux/dma-mapping.h>
33#include <linux/bitmap.h>
34#include <linux/iommu-helper.h>
35#include <linux/crash_dump.h>
36#include <linux/hash.h>
37#include <linux/fault-inject.h>
38#include <linux/pci.h>
39#include <linux/iommu.h>
40#include <linux/sched.h>
41#include <asm/io.h>
42#include <asm/prom.h>
43#include <asm/iommu.h>
44#include <asm/pci-bridge.h>
45#include <asm/machdep.h>
46#include <asm/kdump.h>
47#include <asm/fadump.h>
48#include <asm/vio.h>
49#include <asm/tce.h>
50
51#define DBG(...)
52
53static int novmerge;
54
55static void __iommu_free(struct iommu_table *, dma_addr_t, unsigned int);
56
57static int __init setup_iommu(char *str)
58{
59 if (!strcmp(str, "novmerge"))
60 novmerge = 1;
61 else if (!strcmp(str, "vmerge"))
62 novmerge = 0;
63 return 1;
64}
65
66__setup("iommu=", setup_iommu);
67
68static DEFINE_PER_CPU(unsigned int, iommu_pool_hash);
69
70
71
72
73
74
75
76
77static int __init setup_iommu_pool_hash(void)
78{
79 unsigned int i;
80
81 for_each_possible_cpu(i)
82 per_cpu(iommu_pool_hash, i) = hash_32(i, IOMMU_POOL_HASHBITS);
83
84 return 0;
85}
86subsys_initcall(setup_iommu_pool_hash);
87
88#ifdef CONFIG_FAIL_IOMMU
89
90static DECLARE_FAULT_ATTR(fail_iommu);
91
92static int __init setup_fail_iommu(char *str)
93{
94 return setup_fault_attr(&fail_iommu, str);
95}
96__setup("fail_iommu=", setup_fail_iommu);
97
98static bool should_fail_iommu(struct device *dev)
99{
100 return dev->archdata.fail_iommu && should_fail(&fail_iommu, 1);
101}
102
103static int __init fail_iommu_debugfs(void)
104{
105 struct dentry *dir = fault_create_debugfs_attr("fail_iommu",
106 NULL, &fail_iommu);
107
108 return PTR_ERR_OR_ZERO(dir);
109}
110late_initcall(fail_iommu_debugfs);
111
112static ssize_t fail_iommu_show(struct device *dev,
113 struct device_attribute *attr, char *buf)
114{
115 return sprintf(buf, "%d\n", dev->archdata.fail_iommu);
116}
117
118static ssize_t fail_iommu_store(struct device *dev,
119 struct device_attribute *attr, const char *buf,
120 size_t count)
121{
122 int i;
123
124 if (count > 0 && sscanf(buf, "%d", &i) > 0)
125 dev->archdata.fail_iommu = (i == 0) ? 0 : 1;
126
127 return count;
128}
129
130static DEVICE_ATTR(fail_iommu, S_IRUGO|S_IWUSR, fail_iommu_show,
131 fail_iommu_store);
132
133static int fail_iommu_bus_notify(struct notifier_block *nb,
134 unsigned long action, void *data)
135{
136 struct device *dev = data;
137
138 if (action == BUS_NOTIFY_ADD_DEVICE) {
139 if (device_create_file(dev, &dev_attr_fail_iommu))
140 pr_warn("Unable to create IOMMU fault injection sysfs "
141 "entries\n");
142 } else if (action == BUS_NOTIFY_DEL_DEVICE) {
143 device_remove_file(dev, &dev_attr_fail_iommu);
144 }
145
146 return 0;
147}
148
149static struct notifier_block fail_iommu_bus_notifier = {
150 .notifier_call = fail_iommu_bus_notify
151};
152
153static int __init fail_iommu_setup(void)
154{
155#ifdef CONFIG_PCI
156 bus_register_notifier(&pci_bus_type, &fail_iommu_bus_notifier);
157#endif
158#ifdef CONFIG_IBMVIO
159 bus_register_notifier(&vio_bus_type, &fail_iommu_bus_notifier);
160#endif
161
162 return 0;
163}
164
165
166
167
168arch_initcall(fail_iommu_setup);
169#else
170static inline bool should_fail_iommu(struct device *dev)
171{
172 return false;
173}
174#endif
175
176static unsigned long iommu_range_alloc(struct device *dev,
177 struct iommu_table *tbl,
178 unsigned long npages,
179 unsigned long *handle,
180 unsigned long mask,
181 unsigned int align_order)
182{
183 unsigned long n, end, start;
184 unsigned long limit;
185 int largealloc = npages > 15;
186 int pass = 0;
187 unsigned long align_mask;
188 unsigned long boundary_size;
189 unsigned long flags;
190 unsigned int pool_nr;
191 struct iommu_pool *pool;
192
193 align_mask = 0xffffffffffffffffl >> (64 - align_order);
194
195
196
197
198 if (unlikely(npages == 0)) {
199 if (printk_ratelimit())
200 WARN_ON(1);
201 return DMA_ERROR_CODE;
202 }
203
204 if (should_fail_iommu(dev))
205 return DMA_ERROR_CODE;
206
207
208
209
210
211 pool_nr = __raw_get_cpu_var(iommu_pool_hash) & (tbl->nr_pools - 1);
212
213 if (largealloc)
214 pool = &(tbl->large_pool);
215 else
216 pool = &(tbl->pools[pool_nr]);
217
218 spin_lock_irqsave(&(pool->lock), flags);
219
220again:
221 if ((pass == 0) && handle && *handle &&
222 (*handle >= pool->start) && (*handle < pool->end))
223 start = *handle;
224 else
225 start = pool->hint;
226
227 limit = pool->end;
228
229
230
231
232
233 if (start >= limit)
234 start = pool->start;
235
236 if (limit + tbl->it_offset > mask) {
237 limit = mask - tbl->it_offset + 1;
238
239
240
241
242 if ((start & mask) >= limit || pass > 0) {
243 spin_unlock(&(pool->lock));
244 pool = &(tbl->pools[0]);
245 spin_lock(&(pool->lock));
246 start = pool->start;
247 } else {
248 start &= mask;
249 }
250 }
251
252 if (dev)
253 boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
254 1 << tbl->it_page_shift);
255 else
256 boundary_size = ALIGN(1UL << 32, 1 << tbl->it_page_shift);
257
258
259 n = iommu_area_alloc(tbl->it_map, limit, start, npages, tbl->it_offset,
260 boundary_size >> tbl->it_page_shift, align_mask);
261 if (n == -1) {
262 if (likely(pass == 0)) {
263
264 pool->hint = pool->start;
265 pass++;
266 goto again;
267
268 } else if (pass <= tbl->nr_pools) {
269
270 spin_unlock(&(pool->lock));
271 pool_nr = (pool_nr + 1) & (tbl->nr_pools - 1);
272 pool = &tbl->pools[pool_nr];
273 spin_lock(&(pool->lock));
274 pool->hint = pool->start;
275 pass++;
276 goto again;
277
278 } else {
279
280 spin_unlock_irqrestore(&(pool->lock), flags);
281 return DMA_ERROR_CODE;
282 }
283 }
284
285 end = n + npages;
286
287
288 if (largealloc) {
289
290 pool->hint = end;
291 } else {
292
293 pool->hint = (end + tbl->it_blocksize - 1) &
294 ~(tbl->it_blocksize - 1);
295 }
296
297
298 if (handle)
299 *handle = end;
300
301 spin_unlock_irqrestore(&(pool->lock), flags);
302
303 return n;
304}
305
306static dma_addr_t iommu_alloc(struct device *dev, struct iommu_table *tbl,
307 void *page, unsigned int npages,
308 enum dma_data_direction direction,
309 unsigned long mask, unsigned int align_order,
310 struct dma_attrs *attrs)
311{
312 unsigned long entry;
313 dma_addr_t ret = DMA_ERROR_CODE;
314 int build_fail;
315
316 entry = iommu_range_alloc(dev, tbl, npages, NULL, mask, align_order);
317
318 if (unlikely(entry == DMA_ERROR_CODE))
319 return DMA_ERROR_CODE;
320
321 entry += tbl->it_offset;
322 ret = entry << tbl->it_page_shift;
323
324
325 build_fail = ppc_md.tce_build(tbl, entry, npages,
326 (unsigned long)page &
327 IOMMU_PAGE_MASK(tbl), direction, attrs);
328
329
330
331
332
333
334 if (unlikely(build_fail)) {
335 __iommu_free(tbl, ret, npages);
336 return DMA_ERROR_CODE;
337 }
338
339
340 if (ppc_md.tce_flush)
341 ppc_md.tce_flush(tbl);
342
343
344 mb();
345
346 return ret;
347}
348
349static bool iommu_free_check(struct iommu_table *tbl, dma_addr_t dma_addr,
350 unsigned int npages)
351{
352 unsigned long entry, free_entry;
353
354 entry = dma_addr >> tbl->it_page_shift;
355 free_entry = entry - tbl->it_offset;
356
357 if (((free_entry + npages) > tbl->it_size) ||
358 (entry < tbl->it_offset)) {
359 if (printk_ratelimit()) {
360 printk(KERN_INFO "iommu_free: invalid entry\n");
361 printk(KERN_INFO "\tentry = 0x%lx\n", entry);
362 printk(KERN_INFO "\tdma_addr = 0x%llx\n", (u64)dma_addr);
363 printk(KERN_INFO "\tTable = 0x%llx\n", (u64)tbl);
364 printk(KERN_INFO "\tbus# = 0x%llx\n", (u64)tbl->it_busno);
365 printk(KERN_INFO "\tsize = 0x%llx\n", (u64)tbl->it_size);
366 printk(KERN_INFO "\tstartOff = 0x%llx\n", (u64)tbl->it_offset);
367 printk(KERN_INFO "\tindex = 0x%llx\n", (u64)tbl->it_index);
368 WARN_ON(1);
369 }
370
371 return false;
372 }
373
374 return true;
375}
376
377static struct iommu_pool *get_pool(struct iommu_table *tbl,
378 unsigned long entry)
379{
380 struct iommu_pool *p;
381 unsigned long largepool_start = tbl->large_pool.start;
382
383
384 if (entry >= largepool_start) {
385 p = &tbl->large_pool;
386 } else {
387 unsigned int pool_nr = entry / tbl->poolsize;
388
389 BUG_ON(pool_nr > tbl->nr_pools);
390 p = &tbl->pools[pool_nr];
391 }
392
393 return p;
394}
395
396static void __iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
397 unsigned int npages)
398{
399 unsigned long entry, free_entry;
400 unsigned long flags;
401 struct iommu_pool *pool;
402
403 entry = dma_addr >> tbl->it_page_shift;
404 free_entry = entry - tbl->it_offset;
405
406 pool = get_pool(tbl, free_entry);
407
408 if (!iommu_free_check(tbl, dma_addr, npages))
409 return;
410
411 ppc_md.tce_free(tbl, entry, npages);
412
413 spin_lock_irqsave(&(pool->lock), flags);
414 bitmap_clear(tbl->it_map, free_entry, npages);
415 spin_unlock_irqrestore(&(pool->lock), flags);
416}
417
418static void iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
419 unsigned int npages)
420{
421 __iommu_free(tbl, dma_addr, npages);
422
423
424
425
426
427 if (ppc_md.tce_flush)
428 ppc_md.tce_flush(tbl);
429}
430
431int iommu_map_sg(struct device *dev, struct iommu_table *tbl,
432 struct scatterlist *sglist, int nelems,
433 unsigned long mask, enum dma_data_direction direction,
434 struct dma_attrs *attrs)
435{
436 dma_addr_t dma_next = 0, dma_addr;
437 struct scatterlist *s, *outs, *segstart;
438 int outcount, incount, i, build_fail = 0;
439 unsigned int align;
440 unsigned long handle;
441 unsigned int max_seg_size;
442
443 BUG_ON(direction == DMA_NONE);
444
445 if ((nelems == 0) || !tbl)
446 return 0;
447
448 outs = s = segstart = &sglist[0];
449 outcount = 1;
450 incount = nelems;
451 handle = 0;
452
453
454 outs->dma_length = 0;
455
456 DBG("sg mapping %d elements:\n", nelems);
457
458 max_seg_size = dma_get_max_seg_size(dev);
459 for_each_sg(sglist, s, nelems, i) {
460 unsigned long vaddr, npages, entry, slen;
461
462 slen = s->length;
463
464 if (slen == 0) {
465 dma_next = 0;
466 continue;
467 }
468
469 vaddr = (unsigned long) sg_virt(s);
470 npages = iommu_num_pages(vaddr, slen, IOMMU_PAGE_SIZE(tbl));
471 align = 0;
472 if (tbl->it_page_shift < PAGE_SHIFT && slen >= PAGE_SIZE &&
473 (vaddr & ~PAGE_MASK) == 0)
474 align = PAGE_SHIFT - tbl->it_page_shift;
475 entry = iommu_range_alloc(dev, tbl, npages, &handle,
476 mask >> tbl->it_page_shift, align);
477
478 DBG(" - vaddr: %lx, size: %lx\n", vaddr, slen);
479
480
481 if (unlikely(entry == DMA_ERROR_CODE)) {
482 if (printk_ratelimit())
483 dev_info(dev, "iommu_alloc failed, tbl %p "
484 "vaddr %lx npages %lu\n", tbl, vaddr,
485 npages);
486 goto failure;
487 }
488
489
490 entry += tbl->it_offset;
491 dma_addr = entry << tbl->it_page_shift;
492 dma_addr |= (s->offset & ~IOMMU_PAGE_MASK(tbl));
493
494 DBG(" - %lu pages, entry: %lx, dma_addr: %lx\n",
495 npages, entry, dma_addr);
496
497
498 build_fail = ppc_md.tce_build(tbl, entry, npages,
499 vaddr & IOMMU_PAGE_MASK(tbl),
500 direction, attrs);
501 if(unlikely(build_fail))
502 goto failure;
503
504
505 if (segstart != s) {
506 DBG(" - trying merge...\n");
507
508
509
510 if (novmerge || (dma_addr != dma_next) ||
511 (outs->dma_length + s->length > max_seg_size)) {
512
513 segstart = s;
514 outcount++;
515 outs = sg_next(outs);
516 DBG(" can't merge, new segment.\n");
517 } else {
518 outs->dma_length += s->length;
519 DBG(" merged, new len: %ux\n", outs->dma_length);
520 }
521 }
522
523 if (segstart == s) {
524
525 DBG(" - filling new segment.\n");
526 outs->dma_address = dma_addr;
527 outs->dma_length = slen;
528 }
529
530
531 dma_next = dma_addr + slen;
532
533 DBG(" - dma next is: %lx\n", dma_next);
534 }
535
536
537 if (ppc_md.tce_flush)
538 ppc_md.tce_flush(tbl);
539
540 DBG("mapped %d elements:\n", outcount);
541
542
543
544
545 if (outcount < incount) {
546 outs = sg_next(outs);
547 outs->dma_address = DMA_ERROR_CODE;
548 outs->dma_length = 0;
549 }
550
551
552 mb();
553
554 return outcount;
555
556 failure:
557 for_each_sg(sglist, s, nelems, i) {
558 if (s->dma_length != 0) {
559 unsigned long vaddr, npages;
560
561 vaddr = s->dma_address & IOMMU_PAGE_MASK(tbl);
562 npages = iommu_num_pages(s->dma_address, s->dma_length,
563 IOMMU_PAGE_SIZE(tbl));
564 __iommu_free(tbl, vaddr, npages);
565 s->dma_address = DMA_ERROR_CODE;
566 s->dma_length = 0;
567 }
568 if (s == outs)
569 break;
570 }
571 return 0;
572}
573
574
575void iommu_unmap_sg(struct iommu_table *tbl, struct scatterlist *sglist,
576 int nelems, enum dma_data_direction direction,
577 struct dma_attrs *attrs)
578{
579 struct scatterlist *sg;
580
581 BUG_ON(direction == DMA_NONE);
582
583 if (!tbl)
584 return;
585
586 sg = sglist;
587 while (nelems--) {
588 unsigned int npages;
589 dma_addr_t dma_handle = sg->dma_address;
590
591 if (sg->dma_length == 0)
592 break;
593 npages = iommu_num_pages(dma_handle, sg->dma_length,
594 IOMMU_PAGE_SIZE(tbl));
595 __iommu_free(tbl, dma_handle, npages);
596 sg = sg_next(sg);
597 }
598
599
600
601
602
603 if (ppc_md.tce_flush)
604 ppc_md.tce_flush(tbl);
605}
606
607static void iommu_table_clear(struct iommu_table *tbl)
608{
609
610
611
612
613
614 if (!is_kdump_kernel() || is_fadump_active()) {
615
616 ppc_md.tce_free(tbl, tbl->it_offset, tbl->it_size);
617 return;
618 }
619
620#ifdef CONFIG_CRASH_DUMP
621 if (ppc_md.tce_get) {
622 unsigned long index, tceval, tcecount = 0;
623
624
625 for (index = 0; index < tbl->it_size; index++) {
626 tceval = ppc_md.tce_get(tbl, index + tbl->it_offset);
627
628
629
630 if (tceval && (tceval != 0x7fffffffffffffffUL)) {
631 __set_bit(index, tbl->it_map);
632 tcecount++;
633 }
634 }
635
636 if ((tbl->it_size - tcecount) < KDUMP_MIN_TCE_ENTRIES) {
637 printk(KERN_WARNING "TCE table is full; freeing ");
638 printk(KERN_WARNING "%d entries for the kdump boot\n",
639 KDUMP_MIN_TCE_ENTRIES);
640 for (index = tbl->it_size - KDUMP_MIN_TCE_ENTRIES;
641 index < tbl->it_size; index++)
642 __clear_bit(index, tbl->it_map);
643 }
644 }
645#endif
646}
647
648
649
650
651
652struct iommu_table *iommu_init_table(struct iommu_table *tbl, int nid)
653{
654 unsigned long sz;
655 static int welcomed = 0;
656 struct page *page;
657 unsigned int i;
658 struct iommu_pool *p;
659
660
661 sz = BITS_TO_LONGS(tbl->it_size) * sizeof(unsigned long);
662
663 page = alloc_pages_node(nid, GFP_KERNEL, get_order(sz));
664 if (!page)
665 panic("iommu_init_table: Can't allocate %ld bytes\n", sz);
666 tbl->it_map = page_address(page);
667 memset(tbl->it_map, 0, sz);
668
669
670
671
672
673
674 if (tbl->it_offset == 0)
675 set_bit(0, tbl->it_map);
676
677
678 if ((tbl->it_size << tbl->it_page_shift) >= (1UL * 1024 * 1024 * 1024))
679 tbl->nr_pools = IOMMU_NR_POOLS;
680 else
681 tbl->nr_pools = 1;
682
683
684 tbl->poolsize = (tbl->it_size * 3 / 4) / tbl->nr_pools;
685
686 for (i = 0; i < tbl->nr_pools; i++) {
687 p = &tbl->pools[i];
688 spin_lock_init(&(p->lock));
689 p->start = tbl->poolsize * i;
690 p->hint = p->start;
691 p->end = p->start + tbl->poolsize;
692 }
693
694 p = &tbl->large_pool;
695 spin_lock_init(&(p->lock));
696 p->start = tbl->poolsize * i;
697 p->hint = p->start;
698 p->end = tbl->it_size;
699
700 iommu_table_clear(tbl);
701
702 if (!welcomed) {
703 printk(KERN_INFO "IOMMU table initialized, virtual merging %s\n",
704 novmerge ? "disabled" : "enabled");
705 welcomed = 1;
706 }
707
708 return tbl;
709}
710
711void iommu_free_table(struct iommu_table *tbl, const char *node_name)
712{
713 unsigned long bitmap_sz;
714 unsigned int order;
715
716 if (!tbl || !tbl->it_map) {
717 printk(KERN_ERR "%s: expected TCE map for %s\n", __func__,
718 node_name);
719 return;
720 }
721
722
723
724
725
726 if (tbl->it_offset == 0)
727 clear_bit(0, tbl->it_map);
728
729#ifdef CONFIG_IOMMU_API
730 if (tbl->it_group) {
731 iommu_group_put(tbl->it_group);
732 BUG_ON(tbl->it_group);
733 }
734#endif
735
736
737 if (!bitmap_empty(tbl->it_map, tbl->it_size))
738 pr_warn("%s: Unexpected TCEs for %s\n", __func__, node_name);
739
740
741 bitmap_sz = BITS_TO_LONGS(tbl->it_size) * sizeof(unsigned long);
742
743
744 order = get_order(bitmap_sz);
745 free_pages((unsigned long) tbl->it_map, order);
746
747
748 kfree(tbl);
749}
750
751
752
753
754
755
756dma_addr_t iommu_map_page(struct device *dev, struct iommu_table *tbl,
757 struct page *page, unsigned long offset, size_t size,
758 unsigned long mask, enum dma_data_direction direction,
759 struct dma_attrs *attrs)
760{
761 dma_addr_t dma_handle = DMA_ERROR_CODE;
762 void *vaddr;
763 unsigned long uaddr;
764 unsigned int npages, align;
765
766 BUG_ON(direction == DMA_NONE);
767
768 vaddr = page_address(page) + offset;
769 uaddr = (unsigned long)vaddr;
770 npages = iommu_num_pages(uaddr, size, IOMMU_PAGE_SIZE(tbl));
771
772 if (tbl) {
773 align = 0;
774 if (tbl->it_page_shift < PAGE_SHIFT && size >= PAGE_SIZE &&
775 ((unsigned long)vaddr & ~PAGE_MASK) == 0)
776 align = PAGE_SHIFT - tbl->it_page_shift;
777
778 dma_handle = iommu_alloc(dev, tbl, vaddr, npages, direction,
779 mask >> tbl->it_page_shift, align,
780 attrs);
781 if (dma_handle == DMA_ERROR_CODE) {
782 if (printk_ratelimit()) {
783 dev_info(dev, "iommu_alloc failed, tbl %p "
784 "vaddr %p npages %d\n", tbl, vaddr,
785 npages);
786 }
787 } else
788 dma_handle |= (uaddr & ~IOMMU_PAGE_MASK(tbl));
789 }
790
791 return dma_handle;
792}
793
794void iommu_unmap_page(struct iommu_table *tbl, dma_addr_t dma_handle,
795 size_t size, enum dma_data_direction direction,
796 struct dma_attrs *attrs)
797{
798 unsigned int npages;
799
800 BUG_ON(direction == DMA_NONE);
801
802 if (tbl) {
803 npages = iommu_num_pages(dma_handle, size,
804 IOMMU_PAGE_SIZE(tbl));
805 iommu_free(tbl, dma_handle, npages);
806 }
807}
808
809
810
811
812
813void *iommu_alloc_coherent(struct device *dev, struct iommu_table *tbl,
814 size_t size, dma_addr_t *dma_handle,
815 unsigned long mask, gfp_t flag, int node)
816{
817 void *ret = NULL;
818 dma_addr_t mapping;
819 unsigned int order;
820 unsigned int nio_pages, io_order;
821 struct page *page;
822
823 size = PAGE_ALIGN(size);
824 order = get_order(size);
825
826
827
828
829
830
831 if (order >= IOMAP_MAX_ORDER) {
832 dev_info(dev, "iommu_alloc_consistent size too large: 0x%lx\n",
833 size);
834 return NULL;
835 }
836
837 if (!tbl)
838 return NULL;
839
840
841 page = alloc_pages_node(node, flag, order);
842 if (!page)
843 return NULL;
844 ret = page_address(page);
845 memset(ret, 0, size);
846
847
848 nio_pages = size >> tbl->it_page_shift;
849 io_order = get_iommu_order(size, tbl);
850 mapping = iommu_alloc(dev, tbl, ret, nio_pages, DMA_BIDIRECTIONAL,
851 mask >> tbl->it_page_shift, io_order, NULL);
852 if (mapping == DMA_ERROR_CODE) {
853 free_pages((unsigned long)ret, order);
854 return NULL;
855 }
856 *dma_handle = mapping;
857 return ret;
858}
859
860void iommu_free_coherent(struct iommu_table *tbl, size_t size,
861 void *vaddr, dma_addr_t dma_handle)
862{
863 if (tbl) {
864 unsigned int nio_pages;
865
866 size = PAGE_ALIGN(size);
867 nio_pages = size >> tbl->it_page_shift;
868 iommu_free(tbl, dma_handle, nio_pages);
869 size = PAGE_ALIGN(size);
870 free_pages((unsigned long)vaddr, get_order(size));
871 }
872}
873
874#ifdef CONFIG_IOMMU_API
875
876
877
878static void group_release(void *iommu_data)
879{
880 struct iommu_table *tbl = iommu_data;
881 tbl->it_group = NULL;
882}
883
884void iommu_register_group(struct iommu_table *tbl,
885 int pci_domain_number, unsigned long pe_num)
886{
887 struct iommu_group *grp;
888 char *name;
889
890 grp = iommu_group_alloc();
891 if (IS_ERR(grp)) {
892 pr_warn("powerpc iommu api: cannot create new group, err=%ld\n",
893 PTR_ERR(grp));
894 return;
895 }
896 tbl->it_group = grp;
897 iommu_group_set_iommudata(grp, tbl, group_release);
898 name = kasprintf(GFP_KERNEL, "domain%d-pe%lx",
899 pci_domain_number, pe_num);
900 if (!name)
901 return;
902 iommu_group_set_name(grp, name);
903 kfree(name);
904}
905
906enum dma_data_direction iommu_tce_direction(unsigned long tce)
907{
908 if ((tce & TCE_PCI_READ) && (tce & TCE_PCI_WRITE))
909 return DMA_BIDIRECTIONAL;
910 else if (tce & TCE_PCI_READ)
911 return DMA_TO_DEVICE;
912 else if (tce & TCE_PCI_WRITE)
913 return DMA_FROM_DEVICE;
914 else
915 return DMA_NONE;
916}
917EXPORT_SYMBOL_GPL(iommu_tce_direction);
918
919void iommu_flush_tce(struct iommu_table *tbl)
920{
921
922 if (ppc_md.tce_flush)
923 ppc_md.tce_flush(tbl);
924
925
926 mb();
927}
928EXPORT_SYMBOL_GPL(iommu_flush_tce);
929
930int iommu_tce_clear_param_check(struct iommu_table *tbl,
931 unsigned long ioba, unsigned long tce_value,
932 unsigned long npages)
933{
934
935 if (tce_value)
936 return -EINVAL;
937
938 if (ioba & ~IOMMU_PAGE_MASK(tbl))
939 return -EINVAL;
940
941 ioba >>= tbl->it_page_shift;
942 if (ioba < tbl->it_offset)
943 return -EINVAL;
944
945 if ((ioba + npages) > (tbl->it_offset + tbl->it_size))
946 return -EINVAL;
947
948 return 0;
949}
950EXPORT_SYMBOL_GPL(iommu_tce_clear_param_check);
951
952int iommu_tce_put_param_check(struct iommu_table *tbl,
953 unsigned long ioba, unsigned long tce)
954{
955 if (!(tce & (TCE_PCI_WRITE | TCE_PCI_READ)))
956 return -EINVAL;
957
958 if (tce & ~(IOMMU_PAGE_MASK(tbl) | TCE_PCI_WRITE | TCE_PCI_READ))
959 return -EINVAL;
960
961 if (ioba & ~IOMMU_PAGE_MASK(tbl))
962 return -EINVAL;
963
964 ioba >>= tbl->it_page_shift;
965 if (ioba < tbl->it_offset)
966 return -EINVAL;
967
968 if ((ioba + 1) > (tbl->it_offset + tbl->it_size))
969 return -EINVAL;
970
971 return 0;
972}
973EXPORT_SYMBOL_GPL(iommu_tce_put_param_check);
974
975unsigned long iommu_clear_tce(struct iommu_table *tbl, unsigned long entry)
976{
977 unsigned long oldtce;
978 struct iommu_pool *pool = get_pool(tbl, entry);
979
980 spin_lock(&(pool->lock));
981
982 oldtce = ppc_md.tce_get(tbl, entry);
983 if (oldtce & (TCE_PCI_WRITE | TCE_PCI_READ))
984 ppc_md.tce_free(tbl, entry, 1);
985 else
986 oldtce = 0;
987
988 spin_unlock(&(pool->lock));
989
990 return oldtce;
991}
992EXPORT_SYMBOL_GPL(iommu_clear_tce);
993
994int iommu_clear_tces_and_put_pages(struct iommu_table *tbl,
995 unsigned long entry, unsigned long pages)
996{
997 unsigned long oldtce;
998 struct page *page;
999
1000 for ( ; pages; --pages, ++entry) {
1001 oldtce = iommu_clear_tce(tbl, entry);
1002 if (!oldtce)
1003 continue;
1004
1005 page = pfn_to_page(oldtce >> PAGE_SHIFT);
1006 WARN_ON(!page);
1007 if (page) {
1008 if (oldtce & TCE_PCI_WRITE)
1009 SetPageDirty(page);
1010 put_page(page);
1011 }
1012 }
1013
1014 return 0;
1015}
1016EXPORT_SYMBOL_GPL(iommu_clear_tces_and_put_pages);
1017
1018
1019
1020
1021
1022int iommu_tce_build(struct iommu_table *tbl, unsigned long entry,
1023 unsigned long hwaddr, enum dma_data_direction direction)
1024{
1025 int ret = -EBUSY;
1026 unsigned long oldtce;
1027 struct iommu_pool *pool = get_pool(tbl, entry);
1028
1029 spin_lock(&(pool->lock));
1030
1031 oldtce = ppc_md.tce_get(tbl, entry);
1032
1033 if (!(oldtce & (TCE_PCI_WRITE | TCE_PCI_READ)))
1034 ret = ppc_md.tce_build(tbl, entry, 1, hwaddr, direction, NULL);
1035
1036 spin_unlock(&(pool->lock));
1037
1038
1039
1040
1041
1042
1043 return ret;
1044}
1045EXPORT_SYMBOL_GPL(iommu_tce_build);
1046
1047int iommu_put_tce_user_mode(struct iommu_table *tbl, unsigned long entry,
1048 unsigned long tce)
1049{
1050 int ret;
1051 struct page *page = NULL;
1052 unsigned long hwaddr, offset = tce & IOMMU_PAGE_MASK(tbl) & ~PAGE_MASK;
1053 enum dma_data_direction direction = iommu_tce_direction(tce);
1054
1055 ret = get_user_pages_fast(tce & PAGE_MASK, 1,
1056 direction != DMA_TO_DEVICE, &page);
1057 if (unlikely(ret != 1)) {
1058
1059
1060 return -EFAULT;
1061 }
1062 hwaddr = (unsigned long) page_address(page) + offset;
1063
1064 ret = iommu_tce_build(tbl, entry, hwaddr, direction);
1065 if (ret)
1066 put_page(page);
1067
1068 if (ret < 0)
1069 pr_err("iommu_tce: %s failed ioba=%lx, tce=%lx, ret=%d\n",
1070 __func__, entry << tbl->it_page_shift, tce, ret);
1071
1072 return ret;
1073}
1074EXPORT_SYMBOL_GPL(iommu_put_tce_user_mode);
1075
1076int iommu_take_ownership(struct iommu_table *tbl)
1077{
1078 unsigned long sz = (tbl->it_size + 7) >> 3;
1079
1080 if (tbl->it_offset == 0)
1081 clear_bit(0, tbl->it_map);
1082
1083 if (!bitmap_empty(tbl->it_map, tbl->it_size)) {
1084 pr_err("iommu_tce: it_map is not empty");
1085 return -EBUSY;
1086 }
1087
1088 memset(tbl->it_map, 0xff, sz);
1089 iommu_clear_tces_and_put_pages(tbl, tbl->it_offset, tbl->it_size);
1090
1091
1092
1093
1094
1095
1096 if (tbl->set_bypass)
1097 tbl->set_bypass(tbl, false);
1098
1099 return 0;
1100}
1101EXPORT_SYMBOL_GPL(iommu_take_ownership);
1102
1103void iommu_release_ownership(struct iommu_table *tbl)
1104{
1105 unsigned long sz = (tbl->it_size + 7) >> 3;
1106
1107 iommu_clear_tces_and_put_pages(tbl, tbl->it_offset, tbl->it_size);
1108 memset(tbl->it_map, 0, sz);
1109
1110
1111 if (tbl->it_offset == 0)
1112 set_bit(0, tbl->it_map);
1113
1114
1115 if (tbl->set_bypass)
1116 tbl->set_bypass(tbl, true);
1117}
1118EXPORT_SYMBOL_GPL(iommu_release_ownership);
1119
1120int iommu_add_device(struct device *dev)
1121{
1122 struct iommu_table *tbl;
1123
1124
1125
1126
1127
1128
1129 if (!device_is_registered(dev))
1130 return -ENOENT;
1131
1132 if (dev->iommu_group) {
1133 pr_debug("%s: Skipping device %s with iommu group %d\n",
1134 __func__, dev_name(dev),
1135 iommu_group_id(dev->iommu_group));
1136 return -EBUSY;
1137 }
1138
1139 tbl = get_iommu_table_base(dev);
1140 if (!tbl || !tbl->it_group) {
1141 pr_debug("%s: Skipping device %s with no tbl\n",
1142 __func__, dev_name(dev));
1143 return 0;
1144 }
1145
1146 pr_debug("%s: Adding %s to iommu group %d\n",
1147 __func__, dev_name(dev),
1148 iommu_group_id(tbl->it_group));
1149
1150 if (PAGE_SIZE < IOMMU_PAGE_SIZE(tbl)) {
1151 pr_err("%s: Invalid IOMMU page size %lx (%lx) on %s\n",
1152 __func__, IOMMU_PAGE_SIZE(tbl),
1153 PAGE_SIZE, dev_name(dev));
1154 return -EINVAL;
1155 }
1156
1157 return iommu_group_add_device(tbl->it_group, dev);
1158}
1159EXPORT_SYMBOL_GPL(iommu_add_device);
1160
1161void iommu_del_device(struct device *dev)
1162{
1163
1164
1165
1166
1167
1168 if (!dev->iommu_group) {
1169 pr_debug("iommu_tce: skipping device %s with no tbl\n",
1170 dev_name(dev));
1171 return;
1172 }
1173
1174 iommu_group_remove_device(dev);
1175}
1176EXPORT_SYMBOL_GPL(iommu_del_device);
1177
1178#endif
1179