1
2
3
4
5
6
7
8
9
10
11#ifdef CONFIG_EXYNOS_IOMMU_DEBUG
12#define DEBUG
13#endif
14
15#include <linux/io.h>
16#include <linux/interrupt.h>
17#include <linux/platform_device.h>
18#include <linux/slab.h>
19#include <linux/pm_runtime.h>
20#include <linux/clk.h>
21#include <linux/err.h>
22#include <linux/mm.h>
23#include <linux/iommu.h>
24#include <linux/errno.h>
25#include <linux/list.h>
26#include <linux/memblock.h>
27#include <linux/export.h>
28
29#include <asm/cacheflush.h>
30#include <asm/pgtable.h>
31
32typedef u32 sysmmu_iova_t;
33typedef u32 sysmmu_pte_t;
34
35
36#define SECT_ORDER 20
37#define LPAGE_ORDER 16
38#define SPAGE_ORDER 12
39
40#define SECT_SIZE (1 << SECT_ORDER)
41#define LPAGE_SIZE (1 << LPAGE_ORDER)
42#define SPAGE_SIZE (1 << SPAGE_ORDER)
43
44#define SECT_MASK (~(SECT_SIZE - 1))
45#define LPAGE_MASK (~(LPAGE_SIZE - 1))
46#define SPAGE_MASK (~(SPAGE_SIZE - 1))
47
48#define lv1ent_fault(sent) ((*(sent) == ZERO_LV2LINK) || \
49 ((*(sent) & 3) == 0) || ((*(sent) & 3) == 3))
50#define lv1ent_zero(sent) (*(sent) == ZERO_LV2LINK)
51#define lv1ent_page_zero(sent) ((*(sent) & 3) == 1)
52#define lv1ent_page(sent) ((*(sent) != ZERO_LV2LINK) && \
53 ((*(sent) & 3) == 1))
54#define lv1ent_section(sent) ((*(sent) & 3) == 2)
55
56#define lv2ent_fault(pent) ((*(pent) & 3) == 0)
57#define lv2ent_small(pent) ((*(pent) & 2) == 2)
58#define lv2ent_large(pent) ((*(pent) & 3) == 1)
59
60static u32 sysmmu_page_offset(sysmmu_iova_t iova, u32 size)
61{
62 return iova & (size - 1);
63}
64
65#define section_phys(sent) (*(sent) & SECT_MASK)
66#define section_offs(iova) sysmmu_page_offset((iova), SECT_SIZE)
67#define lpage_phys(pent) (*(pent) & LPAGE_MASK)
68#define lpage_offs(iova) sysmmu_page_offset((iova), LPAGE_SIZE)
69#define spage_phys(pent) (*(pent) & SPAGE_MASK)
70#define spage_offs(iova) sysmmu_page_offset((iova), SPAGE_SIZE)
71
72#define NUM_LV1ENTRIES 4096
73#define NUM_LV2ENTRIES (SECT_SIZE / SPAGE_SIZE)
74
75static u32 lv1ent_offset(sysmmu_iova_t iova)
76{
77 return iova >> SECT_ORDER;
78}
79
80static u32 lv2ent_offset(sysmmu_iova_t iova)
81{
82 return (iova >> SPAGE_ORDER) & (NUM_LV2ENTRIES - 1);
83}
84
85#define LV2TABLE_SIZE (NUM_LV2ENTRIES * sizeof(sysmmu_pte_t))
86
87#define SPAGES_PER_LPAGE (LPAGE_SIZE / SPAGE_SIZE)
88
89#define lv2table_base(sent) (*(sent) & 0xFFFFFC00)
90
91#define mk_lv1ent_sect(pa) ((pa) | 2)
92#define mk_lv1ent_page(pa) ((pa) | 1)
93#define mk_lv2ent_lpage(pa) ((pa) | 1)
94#define mk_lv2ent_spage(pa) ((pa) | 2)
95
96#define CTRL_ENABLE 0x5
97#define CTRL_BLOCK 0x7
98#define CTRL_DISABLE 0x0
99
100#define CFG_LRU 0x1
101#define CFG_QOS(n) ((n & 0xF) << 7)
102#define CFG_MASK 0x0150FFFF
103#define CFG_ACGEN (1 << 24)
104#define CFG_SYSSEL (1 << 22)
105#define CFG_FLPDCACHE (1 << 20)
106
107#define REG_MMU_CTRL 0x000
108#define REG_MMU_CFG 0x004
109#define REG_MMU_STATUS 0x008
110#define REG_MMU_FLUSH 0x00C
111#define REG_MMU_FLUSH_ENTRY 0x010
112#define REG_PT_BASE_ADDR 0x014
113#define REG_INT_STATUS 0x018
114#define REG_INT_CLEAR 0x01C
115
116#define REG_PAGE_FAULT_ADDR 0x024
117#define REG_AW_FAULT_ADDR 0x028
118#define REG_AR_FAULT_ADDR 0x02C
119#define REG_DEFAULT_SLAVE_ADDR 0x030
120
121#define REG_MMU_VERSION 0x034
122
123#define MMU_MAJ_VER(val) ((val) >> 7)
124#define MMU_MIN_VER(val) ((val) & 0x7F)
125#define MMU_RAW_VER(reg) (((reg) >> 21) & ((1 << 11) - 1))
126
127#define MAKE_MMU_VER(maj, min) ((((maj) & 0xF) << 7) | ((min) & 0x7F))
128
129#define REG_PB0_SADDR 0x04C
130#define REG_PB0_EADDR 0x050
131#define REG_PB1_SADDR 0x054
132#define REG_PB1_EADDR 0x058
133
134#define has_sysmmu(dev) (dev->archdata.iommu != NULL)
135
136static struct kmem_cache *lv2table_kmem_cache;
137static sysmmu_pte_t *zero_lv2_table;
138#define ZERO_LV2LINK mk_lv1ent_page(virt_to_phys(zero_lv2_table))
139
140static sysmmu_pte_t *section_entry(sysmmu_pte_t *pgtable, sysmmu_iova_t iova)
141{
142 return pgtable + lv1ent_offset(iova);
143}
144
145static sysmmu_pte_t *page_entry(sysmmu_pte_t *sent, sysmmu_iova_t iova)
146{
147 return (sysmmu_pte_t *)phys_to_virt(
148 lv2table_base(sent)) + lv2ent_offset(iova);
149}
150
151enum exynos_sysmmu_inttype {
152 SYSMMU_PAGEFAULT,
153 SYSMMU_AR_MULTIHIT,
154 SYSMMU_AW_MULTIHIT,
155 SYSMMU_BUSERROR,
156 SYSMMU_AR_SECURITY,
157 SYSMMU_AR_ACCESS,
158 SYSMMU_AW_SECURITY,
159 SYSMMU_AW_PROTECTION,
160 SYSMMU_FAULT_UNKNOWN,
161 SYSMMU_FAULTS_NUM
162};
163
164static unsigned short fault_reg_offset[SYSMMU_FAULTS_NUM] = {
165 REG_PAGE_FAULT_ADDR,
166 REG_AR_FAULT_ADDR,
167 REG_AW_FAULT_ADDR,
168 REG_DEFAULT_SLAVE_ADDR,
169 REG_AR_FAULT_ADDR,
170 REG_AR_FAULT_ADDR,
171 REG_AW_FAULT_ADDR,
172 REG_AW_FAULT_ADDR
173};
174
175static char *sysmmu_fault_name[SYSMMU_FAULTS_NUM] = {
176 "PAGE FAULT",
177 "AR MULTI-HIT FAULT",
178 "AW MULTI-HIT FAULT",
179 "BUS ERROR",
180 "AR SECURITY PROTECTION FAULT",
181 "AR ACCESS PROTECTION FAULT",
182 "AW SECURITY PROTECTION FAULT",
183 "AW ACCESS PROTECTION FAULT",
184 "UNKNOWN FAULT"
185};
186
187
188struct exynos_iommu_owner {
189 struct list_head client;
190 struct device *dev;
191 struct device *sysmmu;
192 struct iommu_domain *domain;
193 void *vmm_data;
194 spinlock_t lock;
195};
196
197struct exynos_iommu_domain {
198 struct list_head clients;
199 sysmmu_pte_t *pgtable;
200 short *lv2entcnt;
201 spinlock_t lock;
202 spinlock_t pgtablelock;
203 struct iommu_domain domain;
204};
205
206struct sysmmu_drvdata {
207 struct device *sysmmu;
208 struct device *master;
209 void __iomem *sfrbase;
210 struct clk *clk;
211 struct clk *clk_master;
212 int activations;
213 spinlock_t lock;
214 struct iommu_domain *domain;
215 phys_addr_t pgtable;
216};
217
218static struct exynos_iommu_domain *to_exynos_domain(struct iommu_domain *dom)
219{
220 return container_of(dom, struct exynos_iommu_domain, domain);
221}
222
223static bool set_sysmmu_active(struct sysmmu_drvdata *data)
224{
225
226
227 return ++data->activations == 1;
228}
229
230static bool set_sysmmu_inactive(struct sysmmu_drvdata *data)
231{
232
233 BUG_ON(data->activations < 1);
234 return --data->activations == 0;
235}
236
237static bool is_sysmmu_active(struct sysmmu_drvdata *data)
238{
239 return data->activations > 0;
240}
241
242static void sysmmu_unblock(void __iomem *sfrbase)
243{
244 __raw_writel(CTRL_ENABLE, sfrbase + REG_MMU_CTRL);
245}
246
247static unsigned int __raw_sysmmu_version(struct sysmmu_drvdata *data)
248{
249 return MMU_RAW_VER(__raw_readl(data->sfrbase + REG_MMU_VERSION));
250}
251
252static bool sysmmu_block(void __iomem *sfrbase)
253{
254 int i = 120;
255
256 __raw_writel(CTRL_BLOCK, sfrbase + REG_MMU_CTRL);
257 while ((i > 0) && !(__raw_readl(sfrbase + REG_MMU_STATUS) & 1))
258 --i;
259
260 if (!(__raw_readl(sfrbase + REG_MMU_STATUS) & 1)) {
261 sysmmu_unblock(sfrbase);
262 return false;
263 }
264
265 return true;
266}
267
268static void __sysmmu_tlb_invalidate(void __iomem *sfrbase)
269{
270 __raw_writel(0x1, sfrbase + REG_MMU_FLUSH);
271}
272
273static void __sysmmu_tlb_invalidate_entry(void __iomem *sfrbase,
274 sysmmu_iova_t iova, unsigned int num_inv)
275{
276 unsigned int i;
277
278 for (i = 0; i < num_inv; i++) {
279 __raw_writel((iova & SPAGE_MASK) | 1,
280 sfrbase + REG_MMU_FLUSH_ENTRY);
281 iova += SPAGE_SIZE;
282 }
283}
284
285static void __sysmmu_set_ptbase(void __iomem *sfrbase,
286 phys_addr_t pgd)
287{
288 __raw_writel(pgd, sfrbase + REG_PT_BASE_ADDR);
289
290 __sysmmu_tlb_invalidate(sfrbase);
291}
292
293static void show_fault_information(const char *name,
294 enum exynos_sysmmu_inttype itype,
295 phys_addr_t pgtable_base, sysmmu_iova_t fault_addr)
296{
297 sysmmu_pte_t *ent;
298
299 if ((itype >= SYSMMU_FAULTS_NUM) || (itype < SYSMMU_PAGEFAULT))
300 itype = SYSMMU_FAULT_UNKNOWN;
301
302 pr_err("%s occurred at %#x by %s(Page table base: %pa)\n",
303 sysmmu_fault_name[itype], fault_addr, name, &pgtable_base);
304
305 ent = section_entry(phys_to_virt(pgtable_base), fault_addr);
306 pr_err("\tLv1 entry: %#x\n", *ent);
307
308 if (lv1ent_page(ent)) {
309 ent = page_entry(ent, fault_addr);
310 pr_err("\t Lv2 entry: %#x\n", *ent);
311 }
312}
313
314static irqreturn_t exynos_sysmmu_irq(int irq, void *dev_id)
315{
316
317 struct sysmmu_drvdata *data = dev_id;
318 enum exynos_sysmmu_inttype itype;
319 sysmmu_iova_t addr = -1;
320 int ret = -ENOSYS;
321
322 WARN_ON(!is_sysmmu_active(data));
323
324 spin_lock(&data->lock);
325
326 if (!IS_ERR(data->clk_master))
327 clk_enable(data->clk_master);
328
329 itype = (enum exynos_sysmmu_inttype)
330 __ffs(__raw_readl(data->sfrbase + REG_INT_STATUS));
331 if (WARN_ON(!((itype >= 0) && (itype < SYSMMU_FAULT_UNKNOWN))))
332 itype = SYSMMU_FAULT_UNKNOWN;
333 else
334 addr = __raw_readl(data->sfrbase + fault_reg_offset[itype]);
335
336 if (itype == SYSMMU_FAULT_UNKNOWN) {
337 pr_err("%s: Fault is not occurred by System MMU '%s'!\n",
338 __func__, dev_name(data->sysmmu));
339 pr_err("%s: Please check if IRQ is correctly configured.\n",
340 __func__);
341 BUG();
342 } else {
343 unsigned int base =
344 __raw_readl(data->sfrbase + REG_PT_BASE_ADDR);
345 show_fault_information(dev_name(data->sysmmu),
346 itype, base, addr);
347 if (data->domain)
348 ret = report_iommu_fault(data->domain,
349 data->master, addr, itype);
350 }
351
352
353 BUG_ON(ret != 0);
354
355 __raw_writel(1 << itype, data->sfrbase + REG_INT_CLEAR);
356
357 sysmmu_unblock(data->sfrbase);
358
359 if (!IS_ERR(data->clk_master))
360 clk_disable(data->clk_master);
361
362 spin_unlock(&data->lock);
363
364 return IRQ_HANDLED;
365}
366
367static void __sysmmu_disable_nocount(struct sysmmu_drvdata *data)
368{
369 if (!IS_ERR(data->clk_master))
370 clk_enable(data->clk_master);
371
372 __raw_writel(CTRL_DISABLE, data->sfrbase + REG_MMU_CTRL);
373 __raw_writel(0, data->sfrbase + REG_MMU_CFG);
374
375 clk_disable(data->clk);
376 if (!IS_ERR(data->clk_master))
377 clk_disable(data->clk_master);
378}
379
380static bool __sysmmu_disable(struct sysmmu_drvdata *data)
381{
382 bool disabled;
383 unsigned long flags;
384
385 spin_lock_irqsave(&data->lock, flags);
386
387 disabled = set_sysmmu_inactive(data);
388
389 if (disabled) {
390 data->pgtable = 0;
391 data->domain = NULL;
392
393 __sysmmu_disable_nocount(data);
394
395 dev_dbg(data->sysmmu, "Disabled\n");
396 } else {
397 dev_dbg(data->sysmmu, "%d times left to disable\n",
398 data->activations);
399 }
400
401 spin_unlock_irqrestore(&data->lock, flags);
402
403 return disabled;
404}
405
406static void __sysmmu_init_config(struct sysmmu_drvdata *data)
407{
408 unsigned int cfg = CFG_LRU | CFG_QOS(15);
409 unsigned int ver;
410
411 ver = __raw_sysmmu_version(data);
412 if (MMU_MAJ_VER(ver) == 3) {
413 if (MMU_MIN_VER(ver) >= 2) {
414 cfg |= CFG_FLPDCACHE;
415 if (MMU_MIN_VER(ver) == 3) {
416 cfg |= CFG_ACGEN;
417 cfg &= ~CFG_LRU;
418 } else {
419 cfg |= CFG_SYSSEL;
420 }
421 }
422 }
423
424 __raw_writel(cfg, data->sfrbase + REG_MMU_CFG);
425}
426
427static void __sysmmu_enable_nocount(struct sysmmu_drvdata *data)
428{
429 if (!IS_ERR(data->clk_master))
430 clk_enable(data->clk_master);
431 clk_enable(data->clk);
432
433 __raw_writel(CTRL_BLOCK, data->sfrbase + REG_MMU_CTRL);
434
435 __sysmmu_init_config(data);
436
437 __sysmmu_set_ptbase(data->sfrbase, data->pgtable);
438
439 __raw_writel(CTRL_ENABLE, data->sfrbase + REG_MMU_CTRL);
440
441 if (!IS_ERR(data->clk_master))
442 clk_disable(data->clk_master);
443}
444
445static int __sysmmu_enable(struct sysmmu_drvdata *data,
446 phys_addr_t pgtable, struct iommu_domain *domain)
447{
448 int ret = 0;
449 unsigned long flags;
450
451 spin_lock_irqsave(&data->lock, flags);
452 if (set_sysmmu_active(data)) {
453 data->pgtable = pgtable;
454 data->domain = domain;
455
456 __sysmmu_enable_nocount(data);
457
458 dev_dbg(data->sysmmu, "Enabled\n");
459 } else {
460 ret = (pgtable == data->pgtable) ? 1 : -EBUSY;
461
462 dev_dbg(data->sysmmu, "already enabled\n");
463 }
464
465 if (WARN_ON(ret < 0))
466 set_sysmmu_inactive(data);
467
468 spin_unlock_irqrestore(&data->lock, flags);
469
470 return ret;
471}
472
473
474
475
476
477
478
479static int __exynos_sysmmu_enable(struct device *dev, phys_addr_t pgtable,
480 struct iommu_domain *domain)
481{
482 int ret = 0;
483 unsigned long flags;
484 struct exynos_iommu_owner *owner = dev->archdata.iommu;
485 struct sysmmu_drvdata *data;
486
487 BUG_ON(!has_sysmmu(dev));
488
489 spin_lock_irqsave(&owner->lock, flags);
490
491 data = dev_get_drvdata(owner->sysmmu);
492
493 ret = __sysmmu_enable(data, pgtable, domain);
494 if (ret >= 0)
495 data->master = dev;
496
497 spin_unlock_irqrestore(&owner->lock, flags);
498
499 return ret;
500}
501
502int exynos_sysmmu_enable(struct device *dev, phys_addr_t pgtable)
503{
504 BUG_ON(!memblock_is_memory(pgtable));
505
506 return __exynos_sysmmu_enable(dev, pgtable, NULL);
507}
508
509static bool exynos_sysmmu_disable(struct device *dev)
510{
511 unsigned long flags;
512 bool disabled = true;
513 struct exynos_iommu_owner *owner = dev->archdata.iommu;
514 struct sysmmu_drvdata *data;
515
516 BUG_ON(!has_sysmmu(dev));
517
518 spin_lock_irqsave(&owner->lock, flags);
519
520 data = dev_get_drvdata(owner->sysmmu);
521
522 disabled = __sysmmu_disable(data);
523 if (disabled)
524 data->master = NULL;
525
526 spin_unlock_irqrestore(&owner->lock, flags);
527
528 return disabled;
529}
530
531static void __sysmmu_tlb_invalidate_flpdcache(struct sysmmu_drvdata *data,
532 sysmmu_iova_t iova)
533{
534 if (__raw_sysmmu_version(data) == MAKE_MMU_VER(3, 3))
535 __raw_writel(iova | 0x1, data->sfrbase + REG_MMU_FLUSH_ENTRY);
536}
537
538static void sysmmu_tlb_invalidate_flpdcache(struct device *dev,
539 sysmmu_iova_t iova)
540{
541 unsigned long flags;
542 struct exynos_iommu_owner *owner = dev->archdata.iommu;
543 struct sysmmu_drvdata *data = dev_get_drvdata(owner->sysmmu);
544
545 if (!IS_ERR(data->clk_master))
546 clk_enable(data->clk_master);
547
548 spin_lock_irqsave(&data->lock, flags);
549 if (is_sysmmu_active(data))
550 __sysmmu_tlb_invalidate_flpdcache(data, iova);
551 spin_unlock_irqrestore(&data->lock, flags);
552
553 if (!IS_ERR(data->clk_master))
554 clk_disable(data->clk_master);
555}
556
557static void sysmmu_tlb_invalidate_entry(struct device *dev, sysmmu_iova_t iova,
558 size_t size)
559{
560 struct exynos_iommu_owner *owner = dev->archdata.iommu;
561 unsigned long flags;
562 struct sysmmu_drvdata *data;
563
564 data = dev_get_drvdata(owner->sysmmu);
565
566 spin_lock_irqsave(&data->lock, flags);
567 if (is_sysmmu_active(data)) {
568 unsigned int num_inv = 1;
569
570 if (!IS_ERR(data->clk_master))
571 clk_enable(data->clk_master);
572
573
574
575
576
577
578
579
580
581
582
583 if (MMU_MAJ_VER(__raw_sysmmu_version(data)) == 2)
584 num_inv = min_t(unsigned int, size / PAGE_SIZE, 64);
585
586 if (sysmmu_block(data->sfrbase)) {
587 __sysmmu_tlb_invalidate_entry(
588 data->sfrbase, iova, num_inv);
589 sysmmu_unblock(data->sfrbase);
590 }
591 if (!IS_ERR(data->clk_master))
592 clk_disable(data->clk_master);
593 } else {
594 dev_dbg(dev, "disabled. Skipping TLB invalidation @ %#x\n",
595 iova);
596 }
597 spin_unlock_irqrestore(&data->lock, flags);
598}
599
600void exynos_sysmmu_tlb_invalidate(struct device *dev)
601{
602 struct exynos_iommu_owner *owner = dev->archdata.iommu;
603 unsigned long flags;
604 struct sysmmu_drvdata *data;
605
606 data = dev_get_drvdata(owner->sysmmu);
607
608 spin_lock_irqsave(&data->lock, flags);
609 if (is_sysmmu_active(data)) {
610 if (!IS_ERR(data->clk_master))
611 clk_enable(data->clk_master);
612 if (sysmmu_block(data->sfrbase)) {
613 __sysmmu_tlb_invalidate(data->sfrbase);
614 sysmmu_unblock(data->sfrbase);
615 }
616 if (!IS_ERR(data->clk_master))
617 clk_disable(data->clk_master);
618 } else {
619 dev_dbg(dev, "disabled. Skipping TLB invalidation\n");
620 }
621 spin_unlock_irqrestore(&data->lock, flags);
622}
623
624static int __init exynos_sysmmu_probe(struct platform_device *pdev)
625{
626 int irq, ret;
627 struct device *dev = &pdev->dev;
628 struct sysmmu_drvdata *data;
629 struct resource *res;
630
631 data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
632 if (!data)
633 return -ENOMEM;
634
635 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
636 data->sfrbase = devm_ioremap_resource(dev, res);
637 if (IS_ERR(data->sfrbase))
638 return PTR_ERR(data->sfrbase);
639
640 irq = platform_get_irq(pdev, 0);
641 if (irq <= 0) {
642 dev_err(dev, "Unable to find IRQ resource\n");
643 return irq;
644 }
645
646 ret = devm_request_irq(dev, irq, exynos_sysmmu_irq, 0,
647 dev_name(dev), data);
648 if (ret) {
649 dev_err(dev, "Unabled to register handler of irq %d\n", irq);
650 return ret;
651 }
652
653 data->clk = devm_clk_get(dev, "sysmmu");
654 if (IS_ERR(data->clk)) {
655 dev_err(dev, "Failed to get clock!\n");
656 return PTR_ERR(data->clk);
657 } else {
658 ret = clk_prepare(data->clk);
659 if (ret) {
660 dev_err(dev, "Failed to prepare clk\n");
661 return ret;
662 }
663 }
664
665 data->clk_master = devm_clk_get(dev, "master");
666 if (!IS_ERR(data->clk_master)) {
667 ret = clk_prepare(data->clk_master);
668 if (ret) {
669 clk_unprepare(data->clk);
670 dev_err(dev, "Failed to prepare master's clk\n");
671 return ret;
672 }
673 }
674
675 data->sysmmu = dev;
676 spin_lock_init(&data->lock);
677
678 platform_set_drvdata(pdev, data);
679
680 pm_runtime_enable(dev);
681
682 return 0;
683}
684
685static const struct of_device_id sysmmu_of_match[] __initconst = {
686 { .compatible = "samsung,exynos-sysmmu", },
687 { },
688};
689
690static struct platform_driver exynos_sysmmu_driver __refdata = {
691 .probe = exynos_sysmmu_probe,
692 .driver = {
693 .name = "exynos-sysmmu",
694 .of_match_table = sysmmu_of_match,
695 }
696};
697
698static inline void pgtable_flush(void *vastart, void *vaend)
699{
700 dmac_flush_range(vastart, vaend);
701 outer_flush_range(virt_to_phys(vastart),
702 virt_to_phys(vaend));
703}
704
705static struct iommu_domain *exynos_iommu_domain_alloc(unsigned type)
706{
707 struct exynos_iommu_domain *exynos_domain;
708 int i;
709
710 if (type != IOMMU_DOMAIN_UNMANAGED)
711 return NULL;
712
713 exynos_domain = kzalloc(sizeof(*exynos_domain), GFP_KERNEL);
714 if (!exynos_domain)
715 return NULL;
716
717 exynos_domain->pgtable = (sysmmu_pte_t *)__get_free_pages(GFP_KERNEL, 2);
718 if (!exynos_domain->pgtable)
719 goto err_pgtable;
720
721 exynos_domain->lv2entcnt = (short *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 1);
722 if (!exynos_domain->lv2entcnt)
723 goto err_counter;
724
725
726 for (i = 0; i < NUM_LV1ENTRIES; i += 8) {
727 exynos_domain->pgtable[i + 0] = ZERO_LV2LINK;
728 exynos_domain->pgtable[i + 1] = ZERO_LV2LINK;
729 exynos_domain->pgtable[i + 2] = ZERO_LV2LINK;
730 exynos_domain->pgtable[i + 3] = ZERO_LV2LINK;
731 exynos_domain->pgtable[i + 4] = ZERO_LV2LINK;
732 exynos_domain->pgtable[i + 5] = ZERO_LV2LINK;
733 exynos_domain->pgtable[i + 6] = ZERO_LV2LINK;
734 exynos_domain->pgtable[i + 7] = ZERO_LV2LINK;
735 }
736
737 pgtable_flush(exynos_domain->pgtable, exynos_domain->pgtable + NUM_LV1ENTRIES);
738
739 spin_lock_init(&exynos_domain->lock);
740 spin_lock_init(&exynos_domain->pgtablelock);
741 INIT_LIST_HEAD(&exynos_domain->clients);
742
743 exynos_domain->domain.geometry.aperture_start = 0;
744 exynos_domain->domain.geometry.aperture_end = ~0UL;
745 exynos_domain->domain.geometry.force_aperture = true;
746
747 return &exynos_domain->domain;
748
749err_counter:
750 free_pages((unsigned long)exynos_domain->pgtable, 2);
751err_pgtable:
752 kfree(exynos_domain);
753 return NULL;
754}
755
756static void exynos_iommu_domain_free(struct iommu_domain *domain)
757{
758 struct exynos_iommu_domain *priv = to_exynos_domain(domain);
759 struct exynos_iommu_owner *owner;
760 unsigned long flags;
761 int i;
762
763 WARN_ON(!list_empty(&priv->clients));
764
765 spin_lock_irqsave(&priv->lock, flags);
766
767 list_for_each_entry(owner, &priv->clients, client) {
768 while (!exynos_sysmmu_disable(owner->dev))
769 ;
770 }
771
772 while (!list_empty(&priv->clients))
773 list_del_init(priv->clients.next);
774
775 spin_unlock_irqrestore(&priv->lock, flags);
776
777 for (i = 0; i < NUM_LV1ENTRIES; i++)
778 if (lv1ent_page(priv->pgtable + i))
779 kmem_cache_free(lv2table_kmem_cache,
780 phys_to_virt(lv2table_base(priv->pgtable + i)));
781
782 free_pages((unsigned long)priv->pgtable, 2);
783 free_pages((unsigned long)priv->lv2entcnt, 1);
784 kfree(priv);
785}
786
787static int exynos_iommu_attach_device(struct iommu_domain *domain,
788 struct device *dev)
789{
790 struct exynos_iommu_owner *owner = dev->archdata.iommu;
791 struct exynos_iommu_domain *priv = to_exynos_domain(domain);
792 phys_addr_t pagetable = virt_to_phys(priv->pgtable);
793 unsigned long flags;
794 int ret;
795
796 spin_lock_irqsave(&priv->lock, flags);
797
798 ret = __exynos_sysmmu_enable(dev, pagetable, domain);
799 if (ret == 0) {
800 list_add_tail(&owner->client, &priv->clients);
801 owner->domain = domain;
802 }
803
804 spin_unlock_irqrestore(&priv->lock, flags);
805
806 if (ret < 0) {
807 dev_err(dev, "%s: Failed to attach IOMMU with pgtable %pa\n",
808 __func__, &pagetable);
809 return ret;
810 }
811
812 dev_dbg(dev, "%s: Attached IOMMU with pgtable %pa %s\n",
813 __func__, &pagetable, (ret == 0) ? "" : ", again");
814
815 return ret;
816}
817
818static void exynos_iommu_detach_device(struct iommu_domain *domain,
819 struct device *dev)
820{
821 struct exynos_iommu_owner *owner;
822 struct exynos_iommu_domain *priv = to_exynos_domain(domain);
823 phys_addr_t pagetable = virt_to_phys(priv->pgtable);
824 unsigned long flags;
825
826 spin_lock_irqsave(&priv->lock, flags);
827
828 list_for_each_entry(owner, &priv->clients, client) {
829 if (owner == dev->archdata.iommu) {
830 if (exynos_sysmmu_disable(dev)) {
831 list_del_init(&owner->client);
832 owner->domain = NULL;
833 }
834 break;
835 }
836 }
837
838 spin_unlock_irqrestore(&priv->lock, flags);
839
840 if (owner == dev->archdata.iommu)
841 dev_dbg(dev, "%s: Detached IOMMU with pgtable %pa\n",
842 __func__, &pagetable);
843 else
844 dev_err(dev, "%s: No IOMMU is attached\n", __func__);
845}
846
847static sysmmu_pte_t *alloc_lv2entry(struct exynos_iommu_domain *priv,
848 sysmmu_pte_t *sent, sysmmu_iova_t iova, short *pgcounter)
849{
850 if (lv1ent_section(sent)) {
851 WARN(1, "Trying mapping on %#08x mapped with 1MiB page", iova);
852 return ERR_PTR(-EADDRINUSE);
853 }
854
855 if (lv1ent_fault(sent)) {
856 sysmmu_pte_t *pent;
857 bool need_flush_flpd_cache = lv1ent_zero(sent);
858
859 pent = kmem_cache_zalloc(lv2table_kmem_cache, GFP_ATOMIC);
860 BUG_ON((unsigned int)pent & (LV2TABLE_SIZE - 1));
861 if (!pent)
862 return ERR_PTR(-ENOMEM);
863
864 *sent = mk_lv1ent_page(virt_to_phys(pent));
865 *pgcounter = NUM_LV2ENTRIES;
866 pgtable_flush(pent, pent + NUM_LV2ENTRIES);
867 pgtable_flush(sent, sent + 1);
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886 if (need_flush_flpd_cache) {
887 struct exynos_iommu_owner *owner;
888
889 spin_lock(&priv->lock);
890 list_for_each_entry(owner, &priv->clients, client)
891 sysmmu_tlb_invalidate_flpdcache(
892 owner->dev, iova);
893 spin_unlock(&priv->lock);
894 }
895 }
896
897 return page_entry(sent, iova);
898}
899
900static int lv1set_section(struct exynos_iommu_domain *priv,
901 sysmmu_pte_t *sent, sysmmu_iova_t iova,
902 phys_addr_t paddr, short *pgcnt)
903{
904 if (lv1ent_section(sent)) {
905 WARN(1, "Trying mapping on 1MiB@%#08x that is mapped",
906 iova);
907 return -EADDRINUSE;
908 }
909
910 if (lv1ent_page(sent)) {
911 if (*pgcnt != NUM_LV2ENTRIES) {
912 WARN(1, "Trying mapping on 1MiB@%#08x that is mapped",
913 iova);
914 return -EADDRINUSE;
915 }
916
917 kmem_cache_free(lv2table_kmem_cache, page_entry(sent, 0));
918 *pgcnt = 0;
919 }
920
921 *sent = mk_lv1ent_sect(paddr);
922
923 pgtable_flush(sent, sent + 1);
924
925 spin_lock(&priv->lock);
926 if (lv1ent_page_zero(sent)) {
927 struct exynos_iommu_owner *owner;
928
929
930
931
932 list_for_each_entry(owner, &priv->clients, client)
933 sysmmu_tlb_invalidate_flpdcache(owner->dev, iova);
934 }
935 spin_unlock(&priv->lock);
936
937 return 0;
938}
939
940static int lv2set_page(sysmmu_pte_t *pent, phys_addr_t paddr, size_t size,
941 short *pgcnt)
942{
943 if (size == SPAGE_SIZE) {
944 if (WARN_ON(!lv2ent_fault(pent)))
945 return -EADDRINUSE;
946
947 *pent = mk_lv2ent_spage(paddr);
948 pgtable_flush(pent, pent + 1);
949 *pgcnt -= 1;
950 } else {
951 int i;
952
953 for (i = 0; i < SPAGES_PER_LPAGE; i++, pent++) {
954 if (WARN_ON(!lv2ent_fault(pent))) {
955 if (i > 0)
956 memset(pent - i, 0, sizeof(*pent) * i);
957 return -EADDRINUSE;
958 }
959
960 *pent = mk_lv2ent_lpage(paddr);
961 }
962 pgtable_flush(pent - SPAGES_PER_LPAGE, pent);
963 *pgcnt -= SPAGES_PER_LPAGE;
964 }
965
966 return 0;
967}
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995static int exynos_iommu_map(struct iommu_domain *domain, unsigned long l_iova,
996 phys_addr_t paddr, size_t size, int prot)
997{
998 struct exynos_iommu_domain *priv = to_exynos_domain(domain);
999 sysmmu_pte_t *entry;
1000 sysmmu_iova_t iova = (sysmmu_iova_t)l_iova;
1001 unsigned long flags;
1002 int ret = -ENOMEM;
1003
1004 BUG_ON(priv->pgtable == NULL);
1005
1006 spin_lock_irqsave(&priv->pgtablelock, flags);
1007
1008 entry = section_entry(priv->pgtable, iova);
1009
1010 if (size == SECT_SIZE) {
1011 ret = lv1set_section(priv, entry, iova, paddr,
1012 &priv->lv2entcnt[lv1ent_offset(iova)]);
1013 } else {
1014 sysmmu_pte_t *pent;
1015
1016 pent = alloc_lv2entry(priv, entry, iova,
1017 &priv->lv2entcnt[lv1ent_offset(iova)]);
1018
1019 if (IS_ERR(pent))
1020 ret = PTR_ERR(pent);
1021 else
1022 ret = lv2set_page(pent, paddr, size,
1023 &priv->lv2entcnt[lv1ent_offset(iova)]);
1024 }
1025
1026 if (ret)
1027 pr_err("%s: Failed(%d) to map %#zx bytes @ %#x\n",
1028 __func__, ret, size, iova);
1029
1030 spin_unlock_irqrestore(&priv->pgtablelock, flags);
1031
1032 return ret;
1033}
1034
1035static void exynos_iommu_tlb_invalidate_entry(struct exynos_iommu_domain *priv,
1036 sysmmu_iova_t iova, size_t size)
1037{
1038 struct exynos_iommu_owner *owner;
1039 unsigned long flags;
1040
1041 spin_lock_irqsave(&priv->lock, flags);
1042
1043 list_for_each_entry(owner, &priv->clients, client)
1044 sysmmu_tlb_invalidate_entry(owner->dev, iova, size);
1045
1046 spin_unlock_irqrestore(&priv->lock, flags);
1047}
1048
1049static size_t exynos_iommu_unmap(struct iommu_domain *domain,
1050 unsigned long l_iova, size_t size)
1051{
1052 struct exynos_iommu_domain *priv = to_exynos_domain(domain);
1053 sysmmu_iova_t iova = (sysmmu_iova_t)l_iova;
1054 sysmmu_pte_t *ent;
1055 size_t err_pgsize;
1056 unsigned long flags;
1057
1058 BUG_ON(priv->pgtable == NULL);
1059
1060 spin_lock_irqsave(&priv->pgtablelock, flags);
1061
1062 ent = section_entry(priv->pgtable, iova);
1063
1064 if (lv1ent_section(ent)) {
1065 if (WARN_ON(size < SECT_SIZE)) {
1066 err_pgsize = SECT_SIZE;
1067 goto err;
1068 }
1069
1070
1071 *ent = ZERO_LV2LINK;
1072 pgtable_flush(ent, ent + 1);
1073 size = SECT_SIZE;
1074 goto done;
1075 }
1076
1077 if (unlikely(lv1ent_fault(ent))) {
1078 if (size > SECT_SIZE)
1079 size = SECT_SIZE;
1080 goto done;
1081 }
1082
1083
1084
1085 ent = page_entry(ent, iova);
1086
1087 if (unlikely(lv2ent_fault(ent))) {
1088 size = SPAGE_SIZE;
1089 goto done;
1090 }
1091
1092 if (lv2ent_small(ent)) {
1093 *ent = 0;
1094 size = SPAGE_SIZE;
1095 pgtable_flush(ent, ent + 1);
1096 priv->lv2entcnt[lv1ent_offset(iova)] += 1;
1097 goto done;
1098 }
1099
1100
1101 if (WARN_ON(size < LPAGE_SIZE)) {
1102 err_pgsize = LPAGE_SIZE;
1103 goto err;
1104 }
1105
1106 memset(ent, 0, sizeof(*ent) * SPAGES_PER_LPAGE);
1107 pgtable_flush(ent, ent + SPAGES_PER_LPAGE);
1108
1109 size = LPAGE_SIZE;
1110 priv->lv2entcnt[lv1ent_offset(iova)] += SPAGES_PER_LPAGE;
1111done:
1112 spin_unlock_irqrestore(&priv->pgtablelock, flags);
1113
1114 exynos_iommu_tlb_invalidate_entry(priv, iova, size);
1115
1116 return size;
1117err:
1118 spin_unlock_irqrestore(&priv->pgtablelock, flags);
1119
1120 pr_err("%s: Failed: size(%#zx) @ %#x is smaller than page size %#zx\n",
1121 __func__, size, iova, err_pgsize);
1122
1123 return 0;
1124}
1125
1126static phys_addr_t exynos_iommu_iova_to_phys(struct iommu_domain *domain,
1127 dma_addr_t iova)
1128{
1129 struct exynos_iommu_domain *priv = to_exynos_domain(domain);
1130 sysmmu_pte_t *entry;
1131 unsigned long flags;
1132 phys_addr_t phys = 0;
1133
1134 spin_lock_irqsave(&priv->pgtablelock, flags);
1135
1136 entry = section_entry(priv->pgtable, iova);
1137
1138 if (lv1ent_section(entry)) {
1139 phys = section_phys(entry) + section_offs(iova);
1140 } else if (lv1ent_page(entry)) {
1141 entry = page_entry(entry, iova);
1142
1143 if (lv2ent_large(entry))
1144 phys = lpage_phys(entry) + lpage_offs(iova);
1145 else if (lv2ent_small(entry))
1146 phys = spage_phys(entry) + spage_offs(iova);
1147 }
1148
1149 spin_unlock_irqrestore(&priv->pgtablelock, flags);
1150
1151 return phys;
1152}
1153
1154static int exynos_iommu_add_device(struct device *dev)
1155{
1156 struct iommu_group *group;
1157 int ret;
1158
1159 group = iommu_group_get(dev);
1160
1161 if (!group) {
1162 group = iommu_group_alloc();
1163 if (IS_ERR(group)) {
1164 dev_err(dev, "Failed to allocate IOMMU group\n");
1165 return PTR_ERR(group);
1166 }
1167 }
1168
1169 ret = iommu_group_add_device(group, dev);
1170 iommu_group_put(group);
1171
1172 return ret;
1173}
1174
1175static void exynos_iommu_remove_device(struct device *dev)
1176{
1177 iommu_group_remove_device(dev);
1178}
1179
1180static const struct iommu_ops exynos_iommu_ops = {
1181 .domain_alloc = exynos_iommu_domain_alloc,
1182 .domain_free = exynos_iommu_domain_free,
1183 .attach_dev = exynos_iommu_attach_device,
1184 .detach_dev = exynos_iommu_detach_device,
1185 .map = exynos_iommu_map,
1186 .unmap = exynos_iommu_unmap,
1187 .map_sg = default_iommu_map_sg,
1188 .iova_to_phys = exynos_iommu_iova_to_phys,
1189 .add_device = exynos_iommu_add_device,
1190 .remove_device = exynos_iommu_remove_device,
1191 .pgsize_bitmap = SECT_SIZE | LPAGE_SIZE | SPAGE_SIZE,
1192};
1193
1194static int __init exynos_iommu_init(void)
1195{
1196 struct device_node *np;
1197 int ret;
1198
1199 np = of_find_matching_node(NULL, sysmmu_of_match);
1200 if (!np)
1201 return 0;
1202
1203 of_node_put(np);
1204
1205 lv2table_kmem_cache = kmem_cache_create("exynos-iommu-lv2table",
1206 LV2TABLE_SIZE, LV2TABLE_SIZE, 0, NULL);
1207 if (!lv2table_kmem_cache) {
1208 pr_err("%s: Failed to create kmem cache\n", __func__);
1209 return -ENOMEM;
1210 }
1211
1212 ret = platform_driver_register(&exynos_sysmmu_driver);
1213 if (ret) {
1214 pr_err("%s: Failed to register driver\n", __func__);
1215 goto err_reg_driver;
1216 }
1217
1218 zero_lv2_table = kmem_cache_zalloc(lv2table_kmem_cache, GFP_KERNEL);
1219 if (zero_lv2_table == NULL) {
1220 pr_err("%s: Failed to allocate zero level2 page table\n",
1221 __func__);
1222 ret = -ENOMEM;
1223 goto err_zero_lv2;
1224 }
1225
1226 ret = bus_set_iommu(&platform_bus_type, &exynos_iommu_ops);
1227 if (ret) {
1228 pr_err("%s: Failed to register exynos-iommu driver.\n",
1229 __func__);
1230 goto err_set_iommu;
1231 }
1232
1233 return 0;
1234err_set_iommu:
1235 kmem_cache_free(lv2table_kmem_cache, zero_lv2_table);
1236err_zero_lv2:
1237 platform_driver_unregister(&exynos_sysmmu_driver);
1238err_reg_driver:
1239 kmem_cache_destroy(lv2table_kmem_cache);
1240 return ret;
1241}
1242subsys_initcall(exynos_iommu_init);
1243