1
2
3
4
5
6
7
8
9
10
11
12
13#include <linux/err.h>
14#include <linux/slab.h>
15#include <linux/vmalloc.h>
16#include <linux/device.h>
17#include <linux/scatterlist.h>
18
19#include <asm/cacheflush.h>
20#include <asm/mach/map.h>
21
22#include <plat/iommu.h>
23#include <plat/iovmm.h>
24
25#include "iopgtable.h"
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61static struct kmem_cache *iovm_area_cachep;
62
63
64static size_t sgtable_len(const struct sg_table *sgt)
65{
66 unsigned int i, total = 0;
67 struct scatterlist *sg;
68
69 if (!sgt)
70 return 0;
71
72 for_each_sg(sgt->sgl, sg, sgt->nents, i) {
73 size_t bytes;
74
75 bytes = sg_dma_len(sg);
76
77 if (!iopgsz_ok(bytes)) {
78 pr_err("%s: sg[%d] not iommu pagesize(%x)\n",
79 __func__, i, bytes);
80 return 0;
81 }
82
83 total += bytes;
84 }
85
86 return total;
87}
88#define sgtable_ok(x) (!!sgtable_len(x))
89
90static unsigned max_alignment(u32 addr)
91{
92 int i;
93 unsigned pagesize[] = { SZ_16M, SZ_1M, SZ_64K, SZ_4K, };
94 for (i = 0; i < ARRAY_SIZE(pagesize) && addr & (pagesize[i] - 1); i++)
95 ;
96 return (i < ARRAY_SIZE(pagesize)) ? pagesize[i] : 0;
97}
98
99
100
101
102
103static unsigned sgtable_nents(size_t bytes, u32 da, u32 pa)
104{
105 unsigned nr_entries = 0, ent_sz;
106
107 if (!IS_ALIGNED(bytes, PAGE_SIZE)) {
108 pr_err("%s: wrong size %08x\n", __func__, bytes);
109 return 0;
110 }
111
112 while (bytes) {
113 ent_sz = max_alignment(da | pa);
114 ent_sz = min_t(unsigned, ent_sz, iopgsz_max(bytes));
115 nr_entries++;
116 da += ent_sz;
117 pa += ent_sz;
118 bytes -= ent_sz;
119 }
120
121 return nr_entries;
122}
123
124
125static struct sg_table *sgtable_alloc(const size_t bytes, u32 flags,
126 u32 da, u32 pa)
127{
128 unsigned int nr_entries;
129 int err;
130 struct sg_table *sgt;
131
132 if (!bytes)
133 return ERR_PTR(-EINVAL);
134
135 if (!IS_ALIGNED(bytes, PAGE_SIZE))
136 return ERR_PTR(-EINVAL);
137
138 if (flags & IOVMF_LINEAR) {
139 nr_entries = sgtable_nents(bytes, da, pa);
140 if (!nr_entries)
141 return ERR_PTR(-EINVAL);
142 } else
143 nr_entries = bytes / PAGE_SIZE;
144
145 sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
146 if (!sgt)
147 return ERR_PTR(-ENOMEM);
148
149 err = sg_alloc_table(sgt, nr_entries, GFP_KERNEL);
150 if (err) {
151 kfree(sgt);
152 return ERR_PTR(err);
153 }
154
155 pr_debug("%s: sgt:%p(%d entries)\n", __func__, sgt, nr_entries);
156
157 return sgt;
158}
159
160
161static void sgtable_free(struct sg_table *sgt)
162{
163 if (!sgt)
164 return;
165
166 sg_free_table(sgt);
167 kfree(sgt);
168
169 pr_debug("%s: sgt:%p\n", __func__, sgt);
170}
171
172
173static void *vmap_sg(const struct sg_table *sgt)
174{
175 u32 va;
176 size_t total;
177 unsigned int i;
178 struct scatterlist *sg;
179 struct vm_struct *new;
180 const struct mem_type *mtype;
181
182 mtype = get_mem_type(MT_DEVICE);
183 if (!mtype)
184 return ERR_PTR(-EINVAL);
185
186 total = sgtable_len(sgt);
187 if (!total)
188 return ERR_PTR(-EINVAL);
189
190 new = __get_vm_area(total, VM_IOREMAP, VMALLOC_START, VMALLOC_END);
191 if (!new)
192 return ERR_PTR(-ENOMEM);
193 va = (u32)new->addr;
194
195 for_each_sg(sgt->sgl, sg, sgt->nents, i) {
196 size_t bytes;
197 u32 pa;
198 int err;
199
200 pa = sg_phys(sg);
201 bytes = sg_dma_len(sg);
202
203 BUG_ON(bytes != PAGE_SIZE);
204
205 err = ioremap_page(va, pa, mtype);
206 if (err)
207 goto err_out;
208
209 va += bytes;
210 }
211
212 flush_cache_vmap((unsigned long)new->addr,
213 (unsigned long)(new->addr + total));
214 return new->addr;
215
216err_out:
217 WARN_ON(1);
218 vunmap(new->addr);
219 return ERR_PTR(-EAGAIN);
220}
221
222static inline void vunmap_sg(const void *va)
223{
224 vunmap(va);
225}
226
227static struct iovm_struct *__find_iovm_area(struct iommu *obj, const u32 da)
228{
229 struct iovm_struct *tmp;
230
231 list_for_each_entry(tmp, &obj->mmap, list) {
232 if ((da >= tmp->da_start) && (da < tmp->da_end)) {
233 size_t len;
234
235 len = tmp->da_end - tmp->da_start;
236
237 dev_dbg(obj->dev, "%s: %08x-%08x-%08x(%x) %08x\n",
238 __func__, tmp->da_start, da, tmp->da_end, len,
239 tmp->flags);
240
241 return tmp;
242 }
243 }
244
245 return NULL;
246}
247
248
249
250
251
252
253
254struct iovm_struct *find_iovm_area(struct iommu *obj, u32 da)
255{
256 struct iovm_struct *area;
257
258 mutex_lock(&obj->mmap_lock);
259 area = __find_iovm_area(obj, da);
260 mutex_unlock(&obj->mmap_lock);
261
262 return area;
263}
264EXPORT_SYMBOL_GPL(find_iovm_area);
265
266
267
268
269
270static struct iovm_struct *alloc_iovm_area(struct iommu *obj, u32 da,
271 size_t bytes, u32 flags)
272{
273 struct iovm_struct *new, *tmp;
274 u32 start, prev_end, alignment;
275
276 if (!obj || !bytes)
277 return ERR_PTR(-EINVAL);
278
279 start = da;
280 alignment = PAGE_SIZE;
281
282 if (~flags & IOVMF_DA_FIXED) {
283
284 start = obj->da_start ? obj->da_start : alignment;
285
286 if (flags & IOVMF_LINEAR)
287 alignment = iopgsz_max(bytes);
288 start = roundup(start, alignment);
289 } else if (start < obj->da_start || start > obj->da_end ||
290 obj->da_end - start < bytes) {
291 return ERR_PTR(-EINVAL);
292 }
293
294 tmp = NULL;
295 if (list_empty(&obj->mmap))
296 goto found;
297
298 prev_end = 0;
299 list_for_each_entry(tmp, &obj->mmap, list) {
300
301 if (prev_end > start)
302 break;
303
304 if (tmp->da_start > start && (tmp->da_start - start) >= bytes)
305 goto found;
306
307 if (tmp->da_end >= start && ~flags & IOVMF_DA_FIXED)
308 start = roundup(tmp->da_end + 1, alignment);
309
310 prev_end = tmp->da_end;
311 }
312
313 if ((start >= prev_end) && (obj->da_end - start >= bytes))
314 goto found;
315
316 dev_dbg(obj->dev, "%s: no space to fit %08x(%x) flags: %08x\n",
317 __func__, da, bytes, flags);
318
319 return ERR_PTR(-EINVAL);
320
321found:
322 new = kmem_cache_zalloc(iovm_area_cachep, GFP_KERNEL);
323 if (!new)
324 return ERR_PTR(-ENOMEM);
325
326 new->iommu = obj;
327 new->da_start = start;
328 new->da_end = start + bytes;
329 new->flags = flags;
330
331
332
333
334 if (tmp)
335 list_add_tail(&new->list, &tmp->list);
336 else
337 list_add(&new->list, &obj->mmap);
338
339 dev_dbg(obj->dev, "%s: found %08x-%08x-%08x(%x) %08x\n",
340 __func__, new->da_start, start, new->da_end, bytes, flags);
341
342 return new;
343}
344
345static void free_iovm_area(struct iommu *obj, struct iovm_struct *area)
346{
347 size_t bytes;
348
349 BUG_ON(!obj || !area);
350
351 bytes = area->da_end - area->da_start;
352
353 dev_dbg(obj->dev, "%s: %08x-%08x(%x) %08x\n",
354 __func__, area->da_start, area->da_end, bytes, area->flags);
355
356 list_del(&area->list);
357 kmem_cache_free(iovm_area_cachep, area);
358}
359
360
361
362
363
364
365
366
367
368void *da_to_va(struct iommu *obj, u32 da)
369{
370 void *va = NULL;
371 struct iovm_struct *area;
372
373 mutex_lock(&obj->mmap_lock);
374
375 area = __find_iovm_area(obj, da);
376 if (!area) {
377 dev_dbg(obj->dev, "%s: no da area(%08x)\n", __func__, da);
378 goto out;
379 }
380 va = area->va;
381out:
382 mutex_unlock(&obj->mmap_lock);
383
384 return va;
385}
386EXPORT_SYMBOL_GPL(da_to_va);
387
388static void sgtable_fill_vmalloc(struct sg_table *sgt, void *_va)
389{
390 unsigned int i;
391 struct scatterlist *sg;
392 void *va = _va;
393 void *va_end;
394
395 for_each_sg(sgt->sgl, sg, sgt->nents, i) {
396 struct page *pg;
397 const size_t bytes = PAGE_SIZE;
398
399
400
401
402 pg = vmalloc_to_page(va);
403 BUG_ON(!pg);
404 sg_set_page(sg, pg, bytes, 0);
405
406 va += bytes;
407 }
408
409 va_end = _va + PAGE_SIZE * i;
410}
411
412static inline void sgtable_drain_vmalloc(struct sg_table *sgt)
413{
414
415
416
417
418 BUG_ON(!sgt);
419}
420
421static void sgtable_fill_kmalloc(struct sg_table *sgt, u32 pa, u32 da,
422 size_t len)
423{
424 unsigned int i;
425 struct scatterlist *sg;
426 void *va;
427
428 va = phys_to_virt(pa);
429
430 for_each_sg(sgt->sgl, sg, sgt->nents, i) {
431 unsigned bytes;
432
433 bytes = max_alignment(da | pa);
434 bytes = min_t(unsigned, bytes, iopgsz_max(len));
435
436 BUG_ON(!iopgsz_ok(bytes));
437
438 sg_set_buf(sg, phys_to_virt(pa), bytes);
439
440
441
442 pa += bytes;
443 da += bytes;
444 len -= bytes;
445 }
446 BUG_ON(len);
447}
448
449static inline void sgtable_drain_kmalloc(struct sg_table *sgt)
450{
451
452
453
454
455 BUG_ON(!sgt);
456}
457
458
459static int map_iovm_area(struct iommu *obj, struct iovm_struct *new,
460 const struct sg_table *sgt, u32 flags)
461{
462 int err;
463 unsigned int i, j;
464 struct scatterlist *sg;
465 u32 da = new->da_start;
466
467 if (!obj || !sgt)
468 return -EINVAL;
469
470 BUG_ON(!sgtable_ok(sgt));
471
472 for_each_sg(sgt->sgl, sg, sgt->nents, i) {
473 u32 pa;
474 int pgsz;
475 size_t bytes;
476 struct iotlb_entry e;
477
478 pa = sg_phys(sg);
479 bytes = sg_dma_len(sg);
480
481 flags &= ~IOVMF_PGSZ_MASK;
482 pgsz = bytes_to_iopgsz(bytes);
483 if (pgsz < 0)
484 goto err_out;
485 flags |= pgsz;
486
487 pr_debug("%s: [%d] %08x %08x(%x)\n", __func__,
488 i, da, pa, bytes);
489
490 iotlb_init_entry(&e, da, pa, flags);
491 err = iopgtable_store_entry(obj, &e);
492 if (err)
493 goto err_out;
494
495 da += bytes;
496 }
497 return 0;
498
499err_out:
500 da = new->da_start;
501
502 for_each_sg(sgt->sgl, sg, i, j) {
503 size_t bytes;
504
505 bytes = iopgtable_clear_entry(obj, da);
506
507 BUG_ON(!iopgsz_ok(bytes));
508
509 da += bytes;
510 }
511 return err;
512}
513
514
515static void unmap_iovm_area(struct iommu *obj, struct iovm_struct *area)
516{
517 u32 start;
518 size_t total = area->da_end - area->da_start;
519
520 BUG_ON((!total) || !IS_ALIGNED(total, PAGE_SIZE));
521
522 start = area->da_start;
523 while (total > 0) {
524 size_t bytes;
525
526 bytes = iopgtable_clear_entry(obj, start);
527 if (bytes == 0)
528 bytes = PAGE_SIZE;
529 else
530 dev_dbg(obj->dev, "%s: unmap %08x(%x) %08x\n",
531 __func__, start, bytes, area->flags);
532
533 BUG_ON(!IS_ALIGNED(bytes, PAGE_SIZE));
534
535 total -= bytes;
536 start += bytes;
537 }
538 BUG_ON(total);
539}
540
541
542static struct sg_table *unmap_vm_area(struct iommu *obj, const u32 da,
543 void (*fn)(const void *), u32 flags)
544{
545 struct sg_table *sgt = NULL;
546 struct iovm_struct *area;
547
548 if (!IS_ALIGNED(da, PAGE_SIZE)) {
549 dev_err(obj->dev, "%s: alignment err(%08x)\n", __func__, da);
550 return NULL;
551 }
552
553 mutex_lock(&obj->mmap_lock);
554
555 area = __find_iovm_area(obj, da);
556 if (!area) {
557 dev_dbg(obj->dev, "%s: no da area(%08x)\n", __func__, da);
558 goto out;
559 }
560
561 if ((area->flags & flags) != flags) {
562 dev_err(obj->dev, "%s: wrong flags(%08x)\n", __func__,
563 area->flags);
564 goto out;
565 }
566 sgt = (struct sg_table *)area->sgt;
567
568 unmap_iovm_area(obj, area);
569
570 fn(area->va);
571
572 dev_dbg(obj->dev, "%s: %08x-%08x-%08x(%x) %08x\n", __func__,
573 area->da_start, da, area->da_end,
574 area->da_end - area->da_start, area->flags);
575
576 free_iovm_area(obj, area);
577out:
578 mutex_unlock(&obj->mmap_lock);
579
580 return sgt;
581}
582
583static u32 map_iommu_region(struct iommu *obj, u32 da,
584 const struct sg_table *sgt, void *va, size_t bytes, u32 flags)
585{
586 int err = -ENOMEM;
587 struct iovm_struct *new;
588
589 mutex_lock(&obj->mmap_lock);
590
591 new = alloc_iovm_area(obj, da, bytes, flags);
592 if (IS_ERR(new)) {
593 err = PTR_ERR(new);
594 goto err_alloc_iovma;
595 }
596 new->va = va;
597 new->sgt = sgt;
598
599 if (map_iovm_area(obj, new, sgt, new->flags))
600 goto err_map;
601
602 mutex_unlock(&obj->mmap_lock);
603
604 dev_dbg(obj->dev, "%s: da:%08x(%x) flags:%08x va:%p\n",
605 __func__, new->da_start, bytes, new->flags, va);
606
607 return new->da_start;
608
609err_map:
610 free_iovm_area(obj, new);
611err_alloc_iovma:
612 mutex_unlock(&obj->mmap_lock);
613 return err;
614}
615
616static inline u32 __iommu_vmap(struct iommu *obj, u32 da,
617 const struct sg_table *sgt, void *va, size_t bytes, u32 flags)
618{
619 return map_iommu_region(obj, da, sgt, va, bytes, flags);
620}
621
622
623
624
625
626
627
628
629
630
631u32 iommu_vmap(struct iommu *obj, u32 da, const struct sg_table *sgt,
632 u32 flags)
633{
634 size_t bytes;
635 void *va = NULL;
636
637 if (!obj || !obj->dev || !sgt)
638 return -EINVAL;
639
640 bytes = sgtable_len(sgt);
641 if (!bytes)
642 return -EINVAL;
643 bytes = PAGE_ALIGN(bytes);
644
645 if (flags & IOVMF_MMIO) {
646 va = vmap_sg(sgt);
647 if (IS_ERR(va))
648 return PTR_ERR(va);
649 }
650
651 flags |= IOVMF_DISCONT;
652 flags |= IOVMF_MMIO;
653
654 da = __iommu_vmap(obj, da, sgt, va, bytes, flags);
655 if (IS_ERR_VALUE(da))
656 vunmap_sg(va);
657
658 return da;
659}
660EXPORT_SYMBOL_GPL(iommu_vmap);
661
662
663
664
665
666
667
668
669
670struct sg_table *iommu_vunmap(struct iommu *obj, u32 da)
671{
672 struct sg_table *sgt;
673
674
675
676
677 sgt = unmap_vm_area(obj, da, vunmap_sg, IOVMF_DISCONT | IOVMF_MMIO);
678 if (!sgt)
679 dev_dbg(obj->dev, "%s: No sgt\n", __func__);
680 return sgt;
681}
682EXPORT_SYMBOL_GPL(iommu_vunmap);
683
684
685
686
687
688
689
690
691
692
693
694u32 iommu_vmalloc(struct iommu *obj, u32 da, size_t bytes, u32 flags)
695{
696 void *va;
697 struct sg_table *sgt;
698
699 if (!obj || !obj->dev || !bytes)
700 return -EINVAL;
701
702 bytes = PAGE_ALIGN(bytes);
703
704 va = vmalloc(bytes);
705 if (!va)
706 return -ENOMEM;
707
708 flags |= IOVMF_DISCONT;
709 flags |= IOVMF_ALLOC;
710
711 sgt = sgtable_alloc(bytes, flags, da, 0);
712 if (IS_ERR(sgt)) {
713 da = PTR_ERR(sgt);
714 goto err_sgt_alloc;
715 }
716 sgtable_fill_vmalloc(sgt, va);
717
718 da = __iommu_vmap(obj, da, sgt, va, bytes, flags);
719 if (IS_ERR_VALUE(da))
720 goto err_iommu_vmap;
721
722 return da;
723
724err_iommu_vmap:
725 sgtable_drain_vmalloc(sgt);
726 sgtable_free(sgt);
727err_sgt_alloc:
728 vfree(va);
729 return da;
730}
731EXPORT_SYMBOL_GPL(iommu_vmalloc);
732
733
734
735
736
737
738
739
740
741void iommu_vfree(struct iommu *obj, const u32 da)
742{
743 struct sg_table *sgt;
744
745 sgt = unmap_vm_area(obj, da, vfree, IOVMF_DISCONT | IOVMF_ALLOC);
746 if (!sgt)
747 dev_dbg(obj->dev, "%s: No sgt\n", __func__);
748 sgtable_free(sgt);
749}
750EXPORT_SYMBOL_GPL(iommu_vfree);
751
752static u32 __iommu_kmap(struct iommu *obj, u32 da, u32 pa, void *va,
753 size_t bytes, u32 flags)
754{
755 struct sg_table *sgt;
756
757 sgt = sgtable_alloc(bytes, flags, da, pa);
758 if (IS_ERR(sgt))
759 return PTR_ERR(sgt);
760
761 sgtable_fill_kmalloc(sgt, pa, da, bytes);
762
763 da = map_iommu_region(obj, da, sgt, va, bytes, flags);
764 if (IS_ERR_VALUE(da)) {
765 sgtable_drain_kmalloc(sgt);
766 sgtable_free(sgt);
767 }
768
769 return da;
770}
771
772
773
774
775
776
777
778
779
780
781
782u32 iommu_kmap(struct iommu *obj, u32 da, u32 pa, size_t bytes,
783 u32 flags)
784{
785 void *va;
786
787 if (!obj || !obj->dev || !bytes)
788 return -EINVAL;
789
790 bytes = PAGE_ALIGN(bytes);
791
792 va = ioremap(pa, bytes);
793 if (!va)
794 return -ENOMEM;
795
796 flags |= IOVMF_LINEAR;
797 flags |= IOVMF_MMIO;
798
799 da = __iommu_kmap(obj, da, pa, va, bytes, flags);
800 if (IS_ERR_VALUE(da))
801 iounmap(va);
802
803 return da;
804}
805EXPORT_SYMBOL_GPL(iommu_kmap);
806
807
808
809
810
811
812
813
814
815void iommu_kunmap(struct iommu *obj, u32 da)
816{
817 struct sg_table *sgt;
818 typedef void (*func_t)(const void *);
819
820 sgt = unmap_vm_area(obj, da, (func_t)iounmap,
821 IOVMF_LINEAR | IOVMF_MMIO);
822 if (!sgt)
823 dev_dbg(obj->dev, "%s: No sgt\n", __func__);
824 sgtable_free(sgt);
825}
826EXPORT_SYMBOL_GPL(iommu_kunmap);
827
828
829
830
831
832
833
834
835
836
837
838u32 iommu_kmalloc(struct iommu *obj, u32 da, size_t bytes, u32 flags)
839{
840 void *va;
841 u32 pa;
842
843 if (!obj || !obj->dev || !bytes)
844 return -EINVAL;
845
846 bytes = PAGE_ALIGN(bytes);
847
848 va = kmalloc(bytes, GFP_KERNEL | GFP_DMA);
849 if (!va)
850 return -ENOMEM;
851 pa = virt_to_phys(va);
852
853 flags |= IOVMF_LINEAR;
854 flags |= IOVMF_ALLOC;
855
856 da = __iommu_kmap(obj, da, pa, va, bytes, flags);
857 if (IS_ERR_VALUE(da))
858 kfree(va);
859
860 return da;
861}
862EXPORT_SYMBOL_GPL(iommu_kmalloc);
863
864
865
866
867
868
869
870
871
872void iommu_kfree(struct iommu *obj, u32 da)
873{
874 struct sg_table *sgt;
875
876 sgt = unmap_vm_area(obj, da, kfree, IOVMF_LINEAR | IOVMF_ALLOC);
877 if (!sgt)
878 dev_dbg(obj->dev, "%s: No sgt\n", __func__);
879 sgtable_free(sgt);
880}
881EXPORT_SYMBOL_GPL(iommu_kfree);
882
883
884static int __init iovmm_init(void)
885{
886 const unsigned long flags = SLAB_HWCACHE_ALIGN;
887 struct kmem_cache *p;
888
889 p = kmem_cache_create("iovm_area_cache", sizeof(struct iovm_struct), 0,
890 flags, NULL);
891 if (!p)
892 return -ENOMEM;
893 iovm_area_cachep = p;
894
895 return 0;
896}
897module_init(iovmm_init);
898
899static void __exit iovmm_exit(void)
900{
901 kmem_cache_destroy(iovm_area_cachep);
902}
903module_exit(iovmm_exit);
904
905MODULE_DESCRIPTION("omap iommu: simple virtual address space management");
906MODULE_AUTHOR("Hiroshi DOYU <Hiroshi.DOYU@nokia.com>");
907MODULE_LICENSE("GPL v2");
908