1
2
3
4
5
6
7
8
9
10
11
12
13#include <linux/err.h>
14#include <linux/vmalloc.h>
15#include <linux/device.h>
16#include <linux/scatterlist.h>
17
18#include <asm/cacheflush.h>
19#include <asm/mach/map.h>
20
21#include <mach/iommu.h>
22#include <mach/iovmm.h>
23
24#include "iopgtable.h"
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60static struct kmem_cache *iovm_area_cachep;
61
62
63static size_t sgtable_len(const struct sg_table *sgt)
64{
65 unsigned int i, total = 0;
66 struct scatterlist *sg;
67
68 if (!sgt)
69 return 0;
70
71 for_each_sg(sgt->sgl, sg, sgt->nents, i) {
72 size_t bytes;
73
74 bytes = sg_dma_len(sg);
75
76 if (!iopgsz_ok(bytes)) {
77 pr_err("%s: sg[%d] not iommu pagesize(%x)\n",
78 __func__, i, bytes);
79 return 0;
80 }
81
82 total += bytes;
83 }
84
85 return total;
86}
87#define sgtable_ok(x) (!!sgtable_len(x))
88
89
90
91
92
93static unsigned int sgtable_nents(size_t bytes)
94{
95 int i;
96 unsigned int nr_entries;
97 const unsigned long pagesize[] = { SZ_16M, SZ_1M, SZ_64K, SZ_4K, };
98
99 if (!IS_ALIGNED(bytes, PAGE_SIZE)) {
100 pr_err("%s: wrong size %08x\n", __func__, bytes);
101 return 0;
102 }
103
104 nr_entries = 0;
105 for (i = 0; i < ARRAY_SIZE(pagesize); i++) {
106 if (bytes >= pagesize[i]) {
107 nr_entries += (bytes / pagesize[i]);
108 bytes %= pagesize[i];
109 }
110 }
111 BUG_ON(bytes);
112
113 return nr_entries;
114}
115
116
117static struct sg_table *sgtable_alloc(const size_t bytes, u32 flags)
118{
119 unsigned int nr_entries;
120 int err;
121 struct sg_table *sgt;
122
123 if (!bytes)
124 return ERR_PTR(-EINVAL);
125
126 if (!IS_ALIGNED(bytes, PAGE_SIZE))
127 return ERR_PTR(-EINVAL);
128
129
130 if ((flags & IOVMF_LINEAR) && (flags & IOVMF_DA_ANON)) {
131 nr_entries = sgtable_nents(bytes);
132 if (!nr_entries)
133 return ERR_PTR(-EINVAL);
134 } else
135 nr_entries = bytes / PAGE_SIZE;
136
137 sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
138 if (!sgt)
139 return ERR_PTR(-ENOMEM);
140
141 err = sg_alloc_table(sgt, nr_entries, GFP_KERNEL);
142 if (err)
143 return ERR_PTR(err);
144
145 pr_debug("%s: sgt:%p(%d entries)\n", __func__, sgt, nr_entries);
146
147 return sgt;
148}
149
150
151static void sgtable_free(struct sg_table *sgt)
152{
153 if (!sgt)
154 return;
155
156 sg_free_table(sgt);
157 kfree(sgt);
158
159 pr_debug("%s: sgt:%p\n", __func__, sgt);
160}
161
162
163static void *vmap_sg(const struct sg_table *sgt)
164{
165 u32 va;
166 size_t total;
167 unsigned int i;
168 struct scatterlist *sg;
169 struct vm_struct *new;
170 const struct mem_type *mtype;
171
172 mtype = get_mem_type(MT_DEVICE);
173 if (!mtype)
174 return ERR_PTR(-EINVAL);
175
176 total = sgtable_len(sgt);
177 if (!total)
178 return ERR_PTR(-EINVAL);
179
180 new = __get_vm_area(total, VM_IOREMAP, VMALLOC_START, VMALLOC_END);
181 if (!new)
182 return ERR_PTR(-ENOMEM);
183 va = (u32)new->addr;
184
185 for_each_sg(sgt->sgl, sg, sgt->nents, i) {
186 size_t bytes;
187 u32 pa;
188 int err;
189
190 pa = sg_phys(sg);
191 bytes = sg_dma_len(sg);
192
193 BUG_ON(bytes != PAGE_SIZE);
194
195 err = ioremap_page(va, pa, mtype);
196 if (err)
197 goto err_out;
198
199 va += bytes;
200 }
201
202 flush_cache_vmap((unsigned long)new->addr,
203 (unsigned long)(new->addr + total));
204 return new->addr;
205
206err_out:
207 WARN_ON(1);
208 vunmap(new->addr);
209 return ERR_PTR(-EAGAIN);
210}
211
212static inline void vunmap_sg(const void *va)
213{
214 vunmap(va);
215}
216
217static struct iovm_struct *__find_iovm_area(struct iommu *obj, const u32 da)
218{
219 struct iovm_struct *tmp;
220
221 list_for_each_entry(tmp, &obj->mmap, list) {
222 if ((da >= tmp->da_start) && (da < tmp->da_end)) {
223 size_t len;
224
225 len = tmp->da_end - tmp->da_start;
226
227 dev_dbg(obj->dev, "%s: %08x-%08x-%08x(%x) %08x\n",
228 __func__, tmp->da_start, da, tmp->da_end, len,
229 tmp->flags);
230
231 return tmp;
232 }
233 }
234
235 return NULL;
236}
237
238
239
240
241
242
243
244struct iovm_struct *find_iovm_area(struct iommu *obj, u32 da)
245{
246 struct iovm_struct *area;
247
248 mutex_lock(&obj->mmap_lock);
249 area = __find_iovm_area(obj, da);
250 mutex_unlock(&obj->mmap_lock);
251
252 return area;
253}
254EXPORT_SYMBOL_GPL(find_iovm_area);
255
256
257
258
259
260static struct iovm_struct *alloc_iovm_area(struct iommu *obj, u32 da,
261 size_t bytes, u32 flags)
262{
263 struct iovm_struct *new, *tmp;
264 u32 start, prev_end, alignement;
265
266 if (!obj || !bytes)
267 return ERR_PTR(-EINVAL);
268
269 start = da;
270 alignement = PAGE_SIZE;
271
272 if (flags & IOVMF_DA_ANON) {
273
274
275
276 start = PAGE_SIZE;
277 if (flags & IOVMF_LINEAR)
278 alignement = iopgsz_max(bytes);
279 start = roundup(start, alignement);
280 }
281
282 tmp = NULL;
283 if (list_empty(&obj->mmap))
284 goto found;
285
286 prev_end = 0;
287 list_for_each_entry(tmp, &obj->mmap, list) {
288
289 if ((prev_end <= start) && (start + bytes < tmp->da_start))
290 goto found;
291
292 if (flags & IOVMF_DA_ANON)
293 start = roundup(tmp->da_end, alignement);
294
295 prev_end = tmp->da_end;
296 }
297
298 if ((start >= prev_end) && (ULONG_MAX - start >= bytes))
299 goto found;
300
301 dev_dbg(obj->dev, "%s: no space to fit %08x(%x) flags: %08x\n",
302 __func__, da, bytes, flags);
303
304 return ERR_PTR(-EINVAL);
305
306found:
307 new = kmem_cache_zalloc(iovm_area_cachep, GFP_KERNEL);
308 if (!new)
309 return ERR_PTR(-ENOMEM);
310
311 new->iommu = obj;
312 new->da_start = start;
313 new->da_end = start + bytes;
314 new->flags = flags;
315
316
317
318
319 if (tmp)
320 list_add_tail(&new->list, &tmp->list);
321 else
322 list_add(&new->list, &obj->mmap);
323
324 dev_dbg(obj->dev, "%s: found %08x-%08x-%08x(%x) %08x\n",
325 __func__, new->da_start, start, new->da_end, bytes, flags);
326
327 return new;
328}
329
330static void free_iovm_area(struct iommu *obj, struct iovm_struct *area)
331{
332 size_t bytes;
333
334 BUG_ON(!obj || !area);
335
336 bytes = area->da_end - area->da_start;
337
338 dev_dbg(obj->dev, "%s: %08x-%08x(%x) %08x\n",
339 __func__, area->da_start, area->da_end, bytes, area->flags);
340
341 list_del(&area->list);
342 kmem_cache_free(iovm_area_cachep, area);
343}
344
345
346
347
348
349
350
351
352
353void *da_to_va(struct iommu *obj, u32 da)
354{
355 void *va = NULL;
356 struct iovm_struct *area;
357
358 mutex_lock(&obj->mmap_lock);
359
360 area = __find_iovm_area(obj, da);
361 if (!area) {
362 dev_dbg(obj->dev, "%s: no da area(%08x)\n", __func__, da);
363 goto out;
364 }
365 va = area->va;
366out:
367 mutex_unlock(&obj->mmap_lock);
368
369 return va;
370}
371EXPORT_SYMBOL_GPL(da_to_va);
372
373static void sgtable_fill_vmalloc(struct sg_table *sgt, void *_va)
374{
375 unsigned int i;
376 struct scatterlist *sg;
377 void *va = _va;
378 void *va_end;
379
380 for_each_sg(sgt->sgl, sg, sgt->nents, i) {
381 struct page *pg;
382 const size_t bytes = PAGE_SIZE;
383
384
385
386
387 pg = vmalloc_to_page(va);
388 BUG_ON(!pg);
389 sg_set_page(sg, pg, bytes, 0);
390
391 va += bytes;
392 }
393
394 va_end = _va + PAGE_SIZE * i;
395 flush_cache_vmap((unsigned long)_va, (unsigned long)va_end);
396}
397
398static inline void sgtable_drain_vmalloc(struct sg_table *sgt)
399{
400
401
402
403
404 BUG_ON(!sgt);
405}
406
407static void sgtable_fill_kmalloc(struct sg_table *sgt, u32 pa, size_t len)
408{
409 unsigned int i;
410 struct scatterlist *sg;
411 void *va;
412
413 va = phys_to_virt(pa);
414
415 for_each_sg(sgt->sgl, sg, sgt->nents, i) {
416 size_t bytes;
417
418 bytes = iopgsz_max(len);
419
420 BUG_ON(!iopgsz_ok(bytes));
421
422 sg_set_buf(sg, phys_to_virt(pa), bytes);
423
424
425
426 pa += bytes;
427 len -= bytes;
428 }
429 BUG_ON(len);
430
431 clean_dcache_area(va, len);
432}
433
434static inline void sgtable_drain_kmalloc(struct sg_table *sgt)
435{
436
437
438
439
440 BUG_ON(!sgt);
441}
442
443
444static int map_iovm_area(struct iommu *obj, struct iovm_struct *new,
445 const struct sg_table *sgt, u32 flags)
446{
447 int err;
448 unsigned int i, j;
449 struct scatterlist *sg;
450 u32 da = new->da_start;
451
452 if (!obj || !new || !sgt)
453 return -EINVAL;
454
455 BUG_ON(!sgtable_ok(sgt));
456
457 for_each_sg(sgt->sgl, sg, sgt->nents, i) {
458 u32 pa;
459 int pgsz;
460 size_t bytes;
461 struct iotlb_entry e;
462
463 pa = sg_phys(sg);
464 bytes = sg_dma_len(sg);
465
466 flags &= ~IOVMF_PGSZ_MASK;
467 pgsz = bytes_to_iopgsz(bytes);
468 if (pgsz < 0)
469 goto err_out;
470 flags |= pgsz;
471
472 pr_debug("%s: [%d] %08x %08x(%x)\n", __func__,
473 i, da, pa, bytes);
474
475 iotlb_init_entry(&e, da, pa, flags);
476 err = iopgtable_store_entry(obj, &e);
477 if (err)
478 goto err_out;
479
480 da += bytes;
481 }
482 return 0;
483
484err_out:
485 da = new->da_start;
486
487 for_each_sg(sgt->sgl, sg, i, j) {
488 size_t bytes;
489
490 bytes = iopgtable_clear_entry(obj, da);
491
492 BUG_ON(!iopgsz_ok(bytes));
493
494 da += bytes;
495 }
496 return err;
497}
498
499
500static void unmap_iovm_area(struct iommu *obj, struct iovm_struct *area)
501{
502 u32 start;
503 size_t total = area->da_end - area->da_start;
504
505 BUG_ON((!total) || !IS_ALIGNED(total, PAGE_SIZE));
506
507 start = area->da_start;
508 while (total > 0) {
509 size_t bytes;
510
511 bytes = iopgtable_clear_entry(obj, start);
512 if (bytes == 0)
513 bytes = PAGE_SIZE;
514 else
515 dev_dbg(obj->dev, "%s: unmap %08x(%x) %08x\n",
516 __func__, start, bytes, area->flags);
517
518 BUG_ON(!IS_ALIGNED(bytes, PAGE_SIZE));
519
520 total -= bytes;
521 start += bytes;
522 }
523 BUG_ON(total);
524}
525
526
527static struct sg_table *unmap_vm_area(struct iommu *obj, const u32 da,
528 void (*fn)(const void *), u32 flags)
529{
530 struct sg_table *sgt = NULL;
531 struct iovm_struct *area;
532
533 if (!IS_ALIGNED(da, PAGE_SIZE)) {
534 dev_err(obj->dev, "%s: alignment err(%08x)\n", __func__, da);
535 return NULL;
536 }
537
538 mutex_lock(&obj->mmap_lock);
539
540 area = __find_iovm_area(obj, da);
541 if (!area) {
542 dev_dbg(obj->dev, "%s: no da area(%08x)\n", __func__, da);
543 goto out;
544 }
545
546 if ((area->flags & flags) != flags) {
547 dev_err(obj->dev, "%s: wrong flags(%08x)\n", __func__,
548 area->flags);
549 goto out;
550 }
551 sgt = (struct sg_table *)area->sgt;
552
553 unmap_iovm_area(obj, area);
554
555 fn(area->va);
556
557 dev_dbg(obj->dev, "%s: %08x-%08x-%08x(%x) %08x\n", __func__,
558 area->da_start, da, area->da_end,
559 area->da_end - area->da_start, area->flags);
560
561 free_iovm_area(obj, area);
562out:
563 mutex_unlock(&obj->mmap_lock);
564
565 return sgt;
566}
567
568static u32 map_iommu_region(struct iommu *obj, u32 da,
569 const struct sg_table *sgt, void *va, size_t bytes, u32 flags)
570{
571 int err = -ENOMEM;
572 struct iovm_struct *new;
573
574 mutex_lock(&obj->mmap_lock);
575
576 new = alloc_iovm_area(obj, da, bytes, flags);
577 if (IS_ERR(new)) {
578 err = PTR_ERR(new);
579 goto err_alloc_iovma;
580 }
581 new->va = va;
582 new->sgt = sgt;
583
584 if (map_iovm_area(obj, new, sgt, new->flags))
585 goto err_map;
586
587 mutex_unlock(&obj->mmap_lock);
588
589 dev_dbg(obj->dev, "%s: da:%08x(%x) flags:%08x va:%p\n",
590 __func__, new->da_start, bytes, new->flags, va);
591
592 return new->da_start;
593
594err_map:
595 free_iovm_area(obj, new);
596err_alloc_iovma:
597 mutex_unlock(&obj->mmap_lock);
598 return err;
599}
600
601static inline u32 __iommu_vmap(struct iommu *obj, u32 da,
602 const struct sg_table *sgt, void *va, size_t bytes, u32 flags)
603{
604 return map_iommu_region(obj, da, sgt, va, bytes, flags);
605}
606
607
608
609
610
611
612
613
614
615
616u32 iommu_vmap(struct iommu *obj, u32 da, const struct sg_table *sgt,
617 u32 flags)
618{
619 size_t bytes;
620 void *va;
621
622 if (!obj || !obj->dev || !sgt)
623 return -EINVAL;
624
625 bytes = sgtable_len(sgt);
626 if (!bytes)
627 return -EINVAL;
628 bytes = PAGE_ALIGN(bytes);
629
630 va = vmap_sg(sgt);
631 if (IS_ERR(va))
632 return PTR_ERR(va);
633
634 flags &= IOVMF_HW_MASK;
635 flags |= IOVMF_DISCONT;
636 flags |= IOVMF_MMIO;
637 flags |= (da ? IOVMF_DA_FIXED : IOVMF_DA_ANON);
638
639 da = __iommu_vmap(obj, da, sgt, va, bytes, flags);
640 if (IS_ERR_VALUE(da))
641 vunmap_sg(va);
642
643 return da;
644}
645EXPORT_SYMBOL_GPL(iommu_vmap);
646
647
648
649
650
651
652
653
654
655struct sg_table *iommu_vunmap(struct iommu *obj, u32 da)
656{
657 struct sg_table *sgt;
658
659
660
661
662 sgt = unmap_vm_area(obj, da, vunmap_sg, IOVMF_DISCONT | IOVMF_MMIO);
663 if (!sgt)
664 dev_dbg(obj->dev, "%s: No sgt\n", __func__);
665 return sgt;
666}
667EXPORT_SYMBOL_GPL(iommu_vunmap);
668
669
670
671
672
673
674
675
676
677
678
679u32 iommu_vmalloc(struct iommu *obj, u32 da, size_t bytes, u32 flags)
680{
681 void *va;
682 struct sg_table *sgt;
683
684 if (!obj || !obj->dev || !bytes)
685 return -EINVAL;
686
687 bytes = PAGE_ALIGN(bytes);
688
689 va = vmalloc(bytes);
690 if (!va)
691 return -ENOMEM;
692
693 sgt = sgtable_alloc(bytes, flags);
694 if (IS_ERR(sgt)) {
695 da = PTR_ERR(sgt);
696 goto err_sgt_alloc;
697 }
698 sgtable_fill_vmalloc(sgt, va);
699
700 flags &= IOVMF_HW_MASK;
701 flags |= IOVMF_DISCONT;
702 flags |= IOVMF_ALLOC;
703 flags |= (da ? IOVMF_DA_FIXED : IOVMF_DA_ANON);
704
705 da = __iommu_vmap(obj, da, sgt, va, bytes, flags);
706 if (IS_ERR_VALUE(da))
707 goto err_iommu_vmap;
708
709 return da;
710
711err_iommu_vmap:
712 sgtable_drain_vmalloc(sgt);
713 sgtable_free(sgt);
714err_sgt_alloc:
715 vfree(va);
716 return da;
717}
718EXPORT_SYMBOL_GPL(iommu_vmalloc);
719
720
721
722
723
724
725
726
727
728void iommu_vfree(struct iommu *obj, const u32 da)
729{
730 struct sg_table *sgt;
731
732 sgt = unmap_vm_area(obj, da, vfree, IOVMF_DISCONT | IOVMF_ALLOC);
733 if (!sgt)
734 dev_dbg(obj->dev, "%s: No sgt\n", __func__);
735 sgtable_free(sgt);
736}
737EXPORT_SYMBOL_GPL(iommu_vfree);
738
739static u32 __iommu_kmap(struct iommu *obj, u32 da, u32 pa, void *va,
740 size_t bytes, u32 flags)
741{
742 struct sg_table *sgt;
743
744 sgt = sgtable_alloc(bytes, flags);
745 if (IS_ERR(sgt))
746 return PTR_ERR(sgt);
747
748 sgtable_fill_kmalloc(sgt, pa, bytes);
749
750 da = map_iommu_region(obj, da, sgt, va, bytes, flags);
751 if (IS_ERR_VALUE(da)) {
752 sgtable_drain_kmalloc(sgt);
753 sgtable_free(sgt);
754 }
755
756 return da;
757}
758
759
760
761
762
763
764
765
766
767
768
769u32 iommu_kmap(struct iommu *obj, u32 da, u32 pa, size_t bytes,
770 u32 flags)
771{
772 void *va;
773
774 if (!obj || !obj->dev || !bytes)
775 return -EINVAL;
776
777 bytes = PAGE_ALIGN(bytes);
778
779 va = ioremap(pa, bytes);
780 if (!va)
781 return -ENOMEM;
782
783 flags &= IOVMF_HW_MASK;
784 flags |= IOVMF_LINEAR;
785 flags |= IOVMF_MMIO;
786 flags |= (da ? IOVMF_DA_FIXED : IOVMF_DA_ANON);
787
788 da = __iommu_kmap(obj, da, pa, va, bytes, flags);
789 if (IS_ERR_VALUE(da))
790 iounmap(va);
791
792 return da;
793}
794EXPORT_SYMBOL_GPL(iommu_kmap);
795
796
797
798
799
800
801
802
803
804void iommu_kunmap(struct iommu *obj, u32 da)
805{
806 struct sg_table *sgt;
807 typedef void (*func_t)(const void *);
808
809 sgt = unmap_vm_area(obj, da, (func_t)__iounmap,
810 IOVMF_LINEAR | IOVMF_MMIO);
811 if (!sgt)
812 dev_dbg(obj->dev, "%s: No sgt\n", __func__);
813 sgtable_free(sgt);
814}
815EXPORT_SYMBOL_GPL(iommu_kunmap);
816
817
818
819
820
821
822
823
824
825
826
827u32 iommu_kmalloc(struct iommu *obj, u32 da, size_t bytes, u32 flags)
828{
829 void *va;
830 u32 pa;
831
832 if (!obj || !obj->dev || !bytes)
833 return -EINVAL;
834
835 bytes = PAGE_ALIGN(bytes);
836
837 va = kmalloc(bytes, GFP_KERNEL | GFP_DMA);
838 if (!va)
839 return -ENOMEM;
840 pa = virt_to_phys(va);
841
842 flags &= IOVMF_HW_MASK;
843 flags |= IOVMF_LINEAR;
844 flags |= IOVMF_ALLOC;
845 flags |= (da ? IOVMF_DA_FIXED : IOVMF_DA_ANON);
846
847 da = __iommu_kmap(obj, da, pa, va, bytes, flags);
848 if (IS_ERR_VALUE(da))
849 kfree(va);
850
851 return da;
852}
853EXPORT_SYMBOL_GPL(iommu_kmalloc);
854
855
856
857
858
859
860
861
862
863void iommu_kfree(struct iommu *obj, u32 da)
864{
865 struct sg_table *sgt;
866
867 sgt = unmap_vm_area(obj, da, kfree, IOVMF_LINEAR | IOVMF_ALLOC);
868 if (!sgt)
869 dev_dbg(obj->dev, "%s: No sgt\n", __func__);
870 sgtable_free(sgt);
871}
872EXPORT_SYMBOL_GPL(iommu_kfree);
873
874
875static int __init iovmm_init(void)
876{
877 const unsigned long flags = SLAB_HWCACHE_ALIGN;
878 struct kmem_cache *p;
879
880 p = kmem_cache_create("iovm_area_cache", sizeof(struct iovm_struct), 0,
881 flags, NULL);
882 if (!p)
883 return -ENOMEM;
884 iovm_area_cachep = p;
885
886 return 0;
887}
888module_init(iovmm_init);
889
890static void __exit iovmm_exit(void)
891{
892 kmem_cache_destroy(iovm_area_cachep);
893}
894module_exit(iovmm_exit);
895
896MODULE_DESCRIPTION("omap iommu: simple virtual address space management");
897MODULE_AUTHOR("Hiroshi DOYU <Hiroshi.DOYU@nokia.com>");
898MODULE_LICENSE("GPL v2");
899