1
2
3
4
5
6
7
8
9#include <linux/bitmap.h>
10#include <linux/delay.h>
11#include <linux/dma-iommu.h>
12#include <linux/dma-mapping.h>
13#include <linux/err.h>
14#include <linux/export.h>
15#include <linux/init.h>
16#include <linux/interrupt.h>
17#include <linux/io.h>
18#include <linux/io-pgtable.h>
19#include <linux/iommu.h>
20#include <linux/of.h>
21#include <linux/of_device.h>
22#include <linux/of_iommu.h>
23#include <linux/of_platform.h>
24#include <linux/platform_device.h>
25#include <linux/sizes.h>
26#include <linux/slab.h>
27#include <linux/sys_soc.h>
28
29#if defined(CONFIG_ARM) && !defined(CONFIG_IOMMU_DMA)
30#include <asm/dma-iommu.h>
31#include <asm/pgalloc.h>
32#else
33#define arm_iommu_create_mapping(...) NULL
34#define arm_iommu_attach_device(...) -ENODEV
35#define arm_iommu_release_mapping(...) do {} while (0)
36#define arm_iommu_detach_device(...) do {} while (0)
37#endif
38
39#define IPMMU_CTX_MAX 8U
40#define IPMMU_CTX_INVALID -1
41
42#define IPMMU_UTLB_MAX 48U
43
44struct ipmmu_features {
45 bool use_ns_alias_offset;
46 bool has_cache_leaf_nodes;
47 unsigned int number_of_contexts;
48 unsigned int num_utlbs;
49 bool setup_imbuscr;
50 bool twobit_imttbcr_sl0;
51 bool reserved_context;
52 bool cache_snoop;
53 unsigned int ctx_offset_base;
54 unsigned int ctx_offset_stride;
55 unsigned int utlb_offset_base;
56};
57
58struct ipmmu_vmsa_device {
59 struct device *dev;
60 void __iomem *base;
61 struct iommu_device iommu;
62 struct ipmmu_vmsa_device *root;
63 const struct ipmmu_features *features;
64 unsigned int num_ctx;
65 spinlock_t lock;
66 DECLARE_BITMAP(ctx, IPMMU_CTX_MAX);
67 struct ipmmu_vmsa_domain *domains[IPMMU_CTX_MAX];
68 s8 utlb_ctx[IPMMU_UTLB_MAX];
69
70 struct iommu_group *group;
71 struct dma_iommu_mapping *mapping;
72};
73
74struct ipmmu_vmsa_domain {
75 struct ipmmu_vmsa_device *mmu;
76 struct iommu_domain io_domain;
77
78 struct io_pgtable_cfg cfg;
79 struct io_pgtable_ops *iop;
80
81 unsigned int context_id;
82 struct mutex mutex;
83};
84
85static struct ipmmu_vmsa_domain *to_vmsa_domain(struct iommu_domain *dom)
86{
87 return container_of(dom, struct ipmmu_vmsa_domain, io_domain);
88}
89
90static struct ipmmu_vmsa_device *to_ipmmu(struct device *dev)
91{
92 return dev_iommu_priv_get(dev);
93}
94
95#define TLB_LOOP_TIMEOUT 100
96
97
98
99
100
101#define IM_NS_ALIAS_OFFSET 0x800
102
103
104#define IMCTR 0x0000
105#define IMCTR_INTEN (1 << 2)
106#define IMCTR_FLUSH (1 << 1)
107#define IMCTR_MMUEN (1 << 0)
108
109#define IMTTBCR 0x0008
110#define IMTTBCR_EAE (1 << 31)
111#define IMTTBCR_SH0_INNER_SHAREABLE (3 << 12)
112#define IMTTBCR_ORGN0_WB_WA (1 << 10)
113#define IMTTBCR_IRGN0_WB_WA (1 << 8)
114#define IMTTBCR_SL0_TWOBIT_LVL_1 (2 << 6)
115#define IMTTBCR_SL0_LVL_1 (1 << 4)
116
117#define IMBUSCR 0x000c
118#define IMBUSCR_DVM (1 << 2)
119#define IMBUSCR_BUSSEL_MASK (3 << 0)
120
121#define IMTTLBR0 0x0010
122#define IMTTUBR0 0x0014
123
124#define IMSTR 0x0020
125#define IMSTR_MHIT (1 << 4)
126#define IMSTR_ABORT (1 << 2)
127#define IMSTR_PF (1 << 1)
128#define IMSTR_TF (1 << 0)
129
130#define IMMAIR0 0x0028
131
132#define IMELAR 0x0030
133#define IMEUAR 0x0034
134
135
136#define IMUCTR(n) ((n) < 32 ? IMUCTR0(n) : IMUCTR32(n))
137#define IMUCTR0(n) (0x0300 + ((n) * 16))
138#define IMUCTR32(n) (0x0600 + (((n) - 32) * 16))
139#define IMUCTR_TTSEL_MMU(n) ((n) << 4)
140#define IMUCTR_FLUSH (1 << 1)
141#define IMUCTR_MMUEN (1 << 0)
142
143#define IMUASID(n) ((n) < 32 ? IMUASID0(n) : IMUASID32(n))
144#define IMUASID0(n) (0x0308 + ((n) * 16))
145#define IMUASID32(n) (0x0608 + (((n) - 32) * 16))
146
147
148
149
150
151static struct platform_driver ipmmu_driver;
152
153static bool ipmmu_is_root(struct ipmmu_vmsa_device *mmu)
154{
155 return mmu->root == mmu;
156}
157
158static int __ipmmu_check_device(struct device *dev, void *data)
159{
160 struct ipmmu_vmsa_device *mmu = dev_get_drvdata(dev);
161 struct ipmmu_vmsa_device **rootp = data;
162
163 if (ipmmu_is_root(mmu))
164 *rootp = mmu;
165
166 return 0;
167}
168
169static struct ipmmu_vmsa_device *ipmmu_find_root(void)
170{
171 struct ipmmu_vmsa_device *root = NULL;
172
173 return driver_for_each_device(&ipmmu_driver.driver, NULL, &root,
174 __ipmmu_check_device) == 0 ? root : NULL;
175}
176
177
178
179
180
181static u32 ipmmu_read(struct ipmmu_vmsa_device *mmu, unsigned int offset)
182{
183 return ioread32(mmu->base + offset);
184}
185
186static void ipmmu_write(struct ipmmu_vmsa_device *mmu, unsigned int offset,
187 u32 data)
188{
189 iowrite32(data, mmu->base + offset);
190}
191
192static unsigned int ipmmu_ctx_reg(struct ipmmu_vmsa_device *mmu,
193 unsigned int context_id, unsigned int reg)
194{
195 return mmu->features->ctx_offset_base +
196 context_id * mmu->features->ctx_offset_stride + reg;
197}
198
199static u32 ipmmu_ctx_read(struct ipmmu_vmsa_device *mmu,
200 unsigned int context_id, unsigned int reg)
201{
202 return ipmmu_read(mmu, ipmmu_ctx_reg(mmu, context_id, reg));
203}
204
205static void ipmmu_ctx_write(struct ipmmu_vmsa_device *mmu,
206 unsigned int context_id, unsigned int reg, u32 data)
207{
208 ipmmu_write(mmu, ipmmu_ctx_reg(mmu, context_id, reg), data);
209}
210
211static u32 ipmmu_ctx_read_root(struct ipmmu_vmsa_domain *domain,
212 unsigned int reg)
213{
214 return ipmmu_ctx_read(domain->mmu->root, domain->context_id, reg);
215}
216
217static void ipmmu_ctx_write_root(struct ipmmu_vmsa_domain *domain,
218 unsigned int reg, u32 data)
219{
220 ipmmu_ctx_write(domain->mmu->root, domain->context_id, reg, data);
221}
222
223static void ipmmu_ctx_write_all(struct ipmmu_vmsa_domain *domain,
224 unsigned int reg, u32 data)
225{
226 if (domain->mmu != domain->mmu->root)
227 ipmmu_ctx_write(domain->mmu, domain->context_id, reg, data);
228
229 ipmmu_ctx_write(domain->mmu->root, domain->context_id, reg, data);
230}
231
232static u32 ipmmu_utlb_reg(struct ipmmu_vmsa_device *mmu, unsigned int reg)
233{
234 return mmu->features->utlb_offset_base + reg;
235}
236
237static void ipmmu_imuasid_write(struct ipmmu_vmsa_device *mmu,
238 unsigned int utlb, u32 data)
239{
240 ipmmu_write(mmu, ipmmu_utlb_reg(mmu, IMUASID(utlb)), data);
241}
242
243static void ipmmu_imuctr_write(struct ipmmu_vmsa_device *mmu,
244 unsigned int utlb, u32 data)
245{
246 ipmmu_write(mmu, ipmmu_utlb_reg(mmu, IMUCTR(utlb)), data);
247}
248
249
250
251
252
253
254static void ipmmu_tlb_sync(struct ipmmu_vmsa_domain *domain)
255{
256 unsigned int count = 0;
257
258 while (ipmmu_ctx_read_root(domain, IMCTR) & IMCTR_FLUSH) {
259 cpu_relax();
260 if (++count == TLB_LOOP_TIMEOUT) {
261 dev_err_ratelimited(domain->mmu->dev,
262 "TLB sync timed out -- MMU may be deadlocked\n");
263 return;
264 }
265 udelay(1);
266 }
267}
268
269static void ipmmu_tlb_invalidate(struct ipmmu_vmsa_domain *domain)
270{
271 u32 reg;
272
273 reg = ipmmu_ctx_read_root(domain, IMCTR);
274 reg |= IMCTR_FLUSH;
275 ipmmu_ctx_write_all(domain, IMCTR, reg);
276
277 ipmmu_tlb_sync(domain);
278}
279
280
281
282
283static void ipmmu_utlb_enable(struct ipmmu_vmsa_domain *domain,
284 unsigned int utlb)
285{
286 struct ipmmu_vmsa_device *mmu = domain->mmu;
287
288
289
290
291
292
293
294 ipmmu_imuasid_write(mmu, utlb, 0);
295
296 ipmmu_imuctr_write(mmu, utlb, IMUCTR_TTSEL_MMU(domain->context_id) |
297 IMUCTR_FLUSH | IMUCTR_MMUEN);
298 mmu->utlb_ctx[utlb] = domain->context_id;
299}
300
301
302
303
304static void ipmmu_utlb_disable(struct ipmmu_vmsa_domain *domain,
305 unsigned int utlb)
306{
307 struct ipmmu_vmsa_device *mmu = domain->mmu;
308
309 ipmmu_imuctr_write(mmu, utlb, 0);
310 mmu->utlb_ctx[utlb] = IPMMU_CTX_INVALID;
311}
312
313static void ipmmu_tlb_flush_all(void *cookie)
314{
315 struct ipmmu_vmsa_domain *domain = cookie;
316
317 ipmmu_tlb_invalidate(domain);
318}
319
320static void ipmmu_tlb_flush(unsigned long iova, size_t size,
321 size_t granule, void *cookie)
322{
323 ipmmu_tlb_flush_all(cookie);
324}
325
326static const struct iommu_flush_ops ipmmu_flush_ops = {
327 .tlb_flush_all = ipmmu_tlb_flush_all,
328 .tlb_flush_walk = ipmmu_tlb_flush,
329 .tlb_flush_leaf = ipmmu_tlb_flush,
330};
331
332
333
334
335
336static int ipmmu_domain_allocate_context(struct ipmmu_vmsa_device *mmu,
337 struct ipmmu_vmsa_domain *domain)
338{
339 unsigned long flags;
340 int ret;
341
342 spin_lock_irqsave(&mmu->lock, flags);
343
344 ret = find_first_zero_bit(mmu->ctx, mmu->num_ctx);
345 if (ret != mmu->num_ctx) {
346 mmu->domains[ret] = domain;
347 set_bit(ret, mmu->ctx);
348 } else
349 ret = -EBUSY;
350
351 spin_unlock_irqrestore(&mmu->lock, flags);
352
353 return ret;
354}
355
356static void ipmmu_domain_free_context(struct ipmmu_vmsa_device *mmu,
357 unsigned int context_id)
358{
359 unsigned long flags;
360
361 spin_lock_irqsave(&mmu->lock, flags);
362
363 clear_bit(context_id, mmu->ctx);
364 mmu->domains[context_id] = NULL;
365
366 spin_unlock_irqrestore(&mmu->lock, flags);
367}
368
369static void ipmmu_domain_setup_context(struct ipmmu_vmsa_domain *domain)
370{
371 u64 ttbr;
372 u32 tmp;
373
374
375 ttbr = domain->cfg.arm_lpae_s1_cfg.ttbr;
376 ipmmu_ctx_write_root(domain, IMTTLBR0, ttbr);
377 ipmmu_ctx_write_root(domain, IMTTUBR0, ttbr >> 32);
378
379
380
381
382
383
384 if (domain->mmu->features->twobit_imttbcr_sl0)
385 tmp = IMTTBCR_SL0_TWOBIT_LVL_1;
386 else
387 tmp = IMTTBCR_SL0_LVL_1;
388
389 if (domain->mmu->features->cache_snoop)
390 tmp |= IMTTBCR_SH0_INNER_SHAREABLE | IMTTBCR_ORGN0_WB_WA |
391 IMTTBCR_IRGN0_WB_WA;
392
393 ipmmu_ctx_write_root(domain, IMTTBCR, IMTTBCR_EAE | tmp);
394
395
396 ipmmu_ctx_write_root(domain, IMMAIR0,
397 domain->cfg.arm_lpae_s1_cfg.mair);
398
399
400 if (domain->mmu->features->setup_imbuscr)
401 ipmmu_ctx_write_root(domain, IMBUSCR,
402 ipmmu_ctx_read_root(domain, IMBUSCR) &
403 ~(IMBUSCR_DVM | IMBUSCR_BUSSEL_MASK));
404
405
406
407
408
409 ipmmu_ctx_write_root(domain, IMSTR, ipmmu_ctx_read_root(domain, IMSTR));
410
411
412
413
414
415
416
417
418 ipmmu_ctx_write_all(domain, IMCTR,
419 IMCTR_INTEN | IMCTR_FLUSH | IMCTR_MMUEN);
420}
421
422static int ipmmu_domain_init_context(struct ipmmu_vmsa_domain *domain)
423{
424 int ret;
425
426
427
428
429
430
431
432
433
434
435
436
437 domain->cfg.quirks = IO_PGTABLE_QUIRK_ARM_NS;
438 domain->cfg.pgsize_bitmap = SZ_1G | SZ_2M | SZ_4K;
439 domain->cfg.ias = 32;
440 domain->cfg.oas = 40;
441 domain->cfg.tlb = &ipmmu_flush_ops;
442 domain->io_domain.geometry.aperture_end = DMA_BIT_MASK(32);
443 domain->io_domain.geometry.force_aperture = true;
444
445
446
447
448 domain->cfg.coherent_walk = false;
449 domain->cfg.iommu_dev = domain->mmu->root->dev;
450
451
452
453
454 ret = ipmmu_domain_allocate_context(domain->mmu->root, domain);
455 if (ret < 0)
456 return ret;
457
458 domain->context_id = ret;
459
460 domain->iop = alloc_io_pgtable_ops(ARM_32_LPAE_S1, &domain->cfg,
461 domain);
462 if (!domain->iop) {
463 ipmmu_domain_free_context(domain->mmu->root,
464 domain->context_id);
465 return -EINVAL;
466 }
467
468 ipmmu_domain_setup_context(domain);
469 return 0;
470}
471
472static void ipmmu_domain_destroy_context(struct ipmmu_vmsa_domain *domain)
473{
474 if (!domain->mmu)
475 return;
476
477
478
479
480
481
482
483 ipmmu_ctx_write_all(domain, IMCTR, IMCTR_FLUSH);
484 ipmmu_tlb_sync(domain);
485 ipmmu_domain_free_context(domain->mmu->root, domain->context_id);
486}
487
488
489
490
491
492static irqreturn_t ipmmu_domain_irq(struct ipmmu_vmsa_domain *domain)
493{
494 const u32 err_mask = IMSTR_MHIT | IMSTR_ABORT | IMSTR_PF | IMSTR_TF;
495 struct ipmmu_vmsa_device *mmu = domain->mmu;
496 unsigned long iova;
497 u32 status;
498
499 status = ipmmu_ctx_read_root(domain, IMSTR);
500 if (!(status & err_mask))
501 return IRQ_NONE;
502
503 iova = ipmmu_ctx_read_root(domain, IMELAR);
504 if (IS_ENABLED(CONFIG_64BIT))
505 iova |= (u64)ipmmu_ctx_read_root(domain, IMEUAR) << 32;
506
507
508
509
510
511
512
513 ipmmu_ctx_write_root(domain, IMSTR, 0);
514
515
516 if (status & IMSTR_MHIT)
517 dev_err_ratelimited(mmu->dev, "Multiple TLB hits @0x%lx\n",
518 iova);
519 if (status & IMSTR_ABORT)
520 dev_err_ratelimited(mmu->dev, "Page Table Walk Abort @0x%lx\n",
521 iova);
522
523 if (!(status & (IMSTR_PF | IMSTR_TF)))
524 return IRQ_NONE;
525
526
527
528
529
530
531
532 if (!report_iommu_fault(&domain->io_domain, mmu->dev, iova, 0))
533 return IRQ_HANDLED;
534
535 dev_err_ratelimited(mmu->dev,
536 "Unhandled fault: status 0x%08x iova 0x%lx\n",
537 status, iova);
538
539 return IRQ_HANDLED;
540}
541
542static irqreturn_t ipmmu_irq(int irq, void *dev)
543{
544 struct ipmmu_vmsa_device *mmu = dev;
545 irqreturn_t status = IRQ_NONE;
546 unsigned int i;
547 unsigned long flags;
548
549 spin_lock_irqsave(&mmu->lock, flags);
550
551
552
553
554 for (i = 0; i < mmu->num_ctx; i++) {
555 if (!mmu->domains[i])
556 continue;
557 if (ipmmu_domain_irq(mmu->domains[i]) == IRQ_HANDLED)
558 status = IRQ_HANDLED;
559 }
560
561 spin_unlock_irqrestore(&mmu->lock, flags);
562
563 return status;
564}
565
566
567
568
569
570static struct iommu_domain *__ipmmu_domain_alloc(unsigned type)
571{
572 struct ipmmu_vmsa_domain *domain;
573
574 domain = kzalloc(sizeof(*domain), GFP_KERNEL);
575 if (!domain)
576 return NULL;
577
578 mutex_init(&domain->mutex);
579
580 return &domain->io_domain;
581}
582
583static struct iommu_domain *ipmmu_domain_alloc(unsigned type)
584{
585 struct iommu_domain *io_domain = NULL;
586
587 switch (type) {
588 case IOMMU_DOMAIN_UNMANAGED:
589 io_domain = __ipmmu_domain_alloc(type);
590 break;
591
592 case IOMMU_DOMAIN_DMA:
593 io_domain = __ipmmu_domain_alloc(type);
594 if (io_domain && iommu_get_dma_cookie(io_domain)) {
595 kfree(io_domain);
596 io_domain = NULL;
597 }
598 break;
599 }
600
601 return io_domain;
602}
603
604static void ipmmu_domain_free(struct iommu_domain *io_domain)
605{
606 struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
607
608
609
610
611
612 iommu_put_dma_cookie(io_domain);
613 ipmmu_domain_destroy_context(domain);
614 free_io_pgtable_ops(domain->iop);
615 kfree(domain);
616}
617
618static int ipmmu_attach_device(struct iommu_domain *io_domain,
619 struct device *dev)
620{
621 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
622 struct ipmmu_vmsa_device *mmu = to_ipmmu(dev);
623 struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
624 unsigned int i;
625 int ret = 0;
626
627 if (!mmu) {
628 dev_err(dev, "Cannot attach to IPMMU\n");
629 return -ENXIO;
630 }
631
632 mutex_lock(&domain->mutex);
633
634 if (!domain->mmu) {
635
636 domain->mmu = mmu;
637 ret = ipmmu_domain_init_context(domain);
638 if (ret < 0) {
639 dev_err(dev, "Unable to initialize IPMMU context\n");
640 domain->mmu = NULL;
641 } else {
642 dev_info(dev, "Using IPMMU context %u\n",
643 domain->context_id);
644 }
645 } else if (domain->mmu != mmu) {
646
647
648
649
650 dev_err(dev, "Can't attach IPMMU %s to domain on IPMMU %s\n",
651 dev_name(mmu->dev), dev_name(domain->mmu->dev));
652 ret = -EINVAL;
653 } else
654 dev_info(dev, "Reusing IPMMU context %u\n", domain->context_id);
655
656 mutex_unlock(&domain->mutex);
657
658 if (ret < 0)
659 return ret;
660
661 for (i = 0; i < fwspec->num_ids; ++i)
662 ipmmu_utlb_enable(domain, fwspec->ids[i]);
663
664 return 0;
665}
666
667static void ipmmu_detach_device(struct iommu_domain *io_domain,
668 struct device *dev)
669{
670 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
671 struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
672 unsigned int i;
673
674 for (i = 0; i < fwspec->num_ids; ++i)
675 ipmmu_utlb_disable(domain, fwspec->ids[i]);
676
677
678
679
680}
681
682static int ipmmu_map(struct iommu_domain *io_domain, unsigned long iova,
683 phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
684{
685 struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
686
687 if (!domain)
688 return -ENODEV;
689
690 return domain->iop->map(domain->iop, iova, paddr, size, prot);
691}
692
693static size_t ipmmu_unmap(struct iommu_domain *io_domain, unsigned long iova,
694 size_t size, struct iommu_iotlb_gather *gather)
695{
696 struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
697
698 return domain->iop->unmap(domain->iop, iova, size, gather);
699}
700
701static void ipmmu_flush_iotlb_all(struct iommu_domain *io_domain)
702{
703 struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
704
705 if (domain->mmu)
706 ipmmu_tlb_flush_all(domain);
707}
708
709static void ipmmu_iotlb_sync(struct iommu_domain *io_domain,
710 struct iommu_iotlb_gather *gather)
711{
712 ipmmu_flush_iotlb_all(io_domain);
713}
714
715static phys_addr_t ipmmu_iova_to_phys(struct iommu_domain *io_domain,
716 dma_addr_t iova)
717{
718 struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
719
720
721
722 return domain->iop->iova_to_phys(domain->iop, iova);
723}
724
725static int ipmmu_init_platform_device(struct device *dev,
726 struct of_phandle_args *args)
727{
728 struct platform_device *ipmmu_pdev;
729
730 ipmmu_pdev = of_find_device_by_node(args->np);
731 if (!ipmmu_pdev)
732 return -ENODEV;
733
734 dev_iommu_priv_set(dev, platform_get_drvdata(ipmmu_pdev));
735
736 return 0;
737}
738
739static const struct soc_device_attribute soc_rcar_gen3[] = {
740 { .soc_id = "r8a774a1", },
741 { .soc_id = "r8a774b1", },
742 { .soc_id = "r8a774c0", },
743 { .soc_id = "r8a7795", },
744 { .soc_id = "r8a7796", },
745 { .soc_id = "r8a77965", },
746 { .soc_id = "r8a77970", },
747 { .soc_id = "r8a77990", },
748 { .soc_id = "r8a77995", },
749 { }
750};
751
752static const struct soc_device_attribute soc_rcar_gen3_whitelist[] = {
753 { .soc_id = "r8a774b1", },
754 { .soc_id = "r8a774c0", },
755 { .soc_id = "r8a7795", .revision = "ES3.*" },
756 { .soc_id = "r8a77965", },
757 { .soc_id = "r8a77990", },
758 { .soc_id = "r8a77995", },
759 { }
760};
761
762static const char * const rcar_gen3_slave_whitelist[] = {
763};
764
765static bool ipmmu_slave_whitelist(struct device *dev)
766{
767 unsigned int i;
768
769
770
771
772
773 if (!soc_device_match(soc_rcar_gen3))
774 return true;
775
776
777 if (!soc_device_match(soc_rcar_gen3_whitelist))
778 return false;
779
780
781 for (i = 0; i < ARRAY_SIZE(rcar_gen3_slave_whitelist); i++) {
782 if (!strcmp(dev_name(dev), rcar_gen3_slave_whitelist[i]))
783 return true;
784 }
785
786
787 return false;
788}
789
790static int ipmmu_of_xlate(struct device *dev,
791 struct of_phandle_args *spec)
792{
793 if (!ipmmu_slave_whitelist(dev))
794 return -ENODEV;
795
796 iommu_fwspec_add_ids(dev, spec->args, 1);
797
798
799 if (to_ipmmu(dev))
800 return 0;
801
802 return ipmmu_init_platform_device(dev, spec);
803}
804
805static int ipmmu_init_arm_mapping(struct device *dev)
806{
807 struct ipmmu_vmsa_device *mmu = to_ipmmu(dev);
808 struct iommu_group *group;
809 int ret;
810
811
812 group = iommu_group_alloc();
813 if (IS_ERR(group)) {
814 dev_err(dev, "Failed to allocate IOMMU group\n");
815 return PTR_ERR(group);
816 }
817
818 ret = iommu_group_add_device(group, dev);
819 iommu_group_put(group);
820
821 if (ret < 0) {
822 dev_err(dev, "Failed to add device to IPMMU group\n");
823 return ret;
824 }
825
826
827
828
829
830
831
832
833
834
835 if (!mmu->mapping) {
836 struct dma_iommu_mapping *mapping;
837
838 mapping = arm_iommu_create_mapping(&platform_bus_type,
839 SZ_1G, SZ_2G);
840 if (IS_ERR(mapping)) {
841 dev_err(mmu->dev, "failed to create ARM IOMMU mapping\n");
842 ret = PTR_ERR(mapping);
843 goto error;
844 }
845
846 mmu->mapping = mapping;
847 }
848
849
850 ret = arm_iommu_attach_device(dev, mmu->mapping);
851 if (ret < 0) {
852 dev_err(dev, "Failed to attach device to VA mapping\n");
853 goto error;
854 }
855
856 return 0;
857
858error:
859 iommu_group_remove_device(dev);
860 if (mmu->mapping)
861 arm_iommu_release_mapping(mmu->mapping);
862
863 return ret;
864}
865
866static int ipmmu_add_device(struct device *dev)
867{
868 struct ipmmu_vmsa_device *mmu = to_ipmmu(dev);
869 struct iommu_group *group;
870 int ret;
871
872
873
874
875 if (!mmu)
876 return -ENODEV;
877
878 if (IS_ENABLED(CONFIG_ARM) && !IS_ENABLED(CONFIG_IOMMU_DMA)) {
879 ret = ipmmu_init_arm_mapping(dev);
880 if (ret)
881 return ret;
882 } else {
883 group = iommu_group_get_for_dev(dev);
884 if (IS_ERR(group))
885 return PTR_ERR(group);
886
887 iommu_group_put(group);
888 }
889
890 iommu_device_link(&mmu->iommu, dev);
891 return 0;
892}
893
894static void ipmmu_remove_device(struct device *dev)
895{
896 struct ipmmu_vmsa_device *mmu = to_ipmmu(dev);
897
898 iommu_device_unlink(&mmu->iommu, dev);
899 arm_iommu_detach_device(dev);
900 iommu_group_remove_device(dev);
901}
902
903static struct iommu_group *ipmmu_find_group(struct device *dev)
904{
905 struct ipmmu_vmsa_device *mmu = to_ipmmu(dev);
906 struct iommu_group *group;
907
908 if (mmu->group)
909 return iommu_group_ref_get(mmu->group);
910
911 group = iommu_group_alloc();
912 if (!IS_ERR(group))
913 mmu->group = group;
914
915 return group;
916}
917
918static const struct iommu_ops ipmmu_ops = {
919 .domain_alloc = ipmmu_domain_alloc,
920 .domain_free = ipmmu_domain_free,
921 .attach_dev = ipmmu_attach_device,
922 .detach_dev = ipmmu_detach_device,
923 .map = ipmmu_map,
924 .unmap = ipmmu_unmap,
925 .flush_iotlb_all = ipmmu_flush_iotlb_all,
926 .iotlb_sync = ipmmu_iotlb_sync,
927 .iova_to_phys = ipmmu_iova_to_phys,
928 .add_device = ipmmu_add_device,
929 .remove_device = ipmmu_remove_device,
930 .device_group = ipmmu_find_group,
931 .pgsize_bitmap = SZ_1G | SZ_2M | SZ_4K,
932 .of_xlate = ipmmu_of_xlate,
933};
934
935
936
937
938
939static void ipmmu_device_reset(struct ipmmu_vmsa_device *mmu)
940{
941 unsigned int i;
942
943
944 for (i = 0; i < mmu->num_ctx; ++i)
945 ipmmu_ctx_write(mmu, i, IMCTR, 0);
946}
947
948static const struct ipmmu_features ipmmu_features_default = {
949 .use_ns_alias_offset = true,
950 .has_cache_leaf_nodes = false,
951 .number_of_contexts = 1,
952 .num_utlbs = 32,
953 .setup_imbuscr = true,
954 .twobit_imttbcr_sl0 = false,
955 .reserved_context = false,
956 .cache_snoop = true,
957 .ctx_offset_base = 0,
958 .ctx_offset_stride = 0x40,
959 .utlb_offset_base = 0,
960};
961
962static const struct ipmmu_features ipmmu_features_rcar_gen3 = {
963 .use_ns_alias_offset = false,
964 .has_cache_leaf_nodes = true,
965 .number_of_contexts = 8,
966 .num_utlbs = 48,
967 .setup_imbuscr = false,
968 .twobit_imttbcr_sl0 = true,
969 .reserved_context = true,
970 .cache_snoop = false,
971 .ctx_offset_base = 0,
972 .ctx_offset_stride = 0x40,
973 .utlb_offset_base = 0,
974};
975
976static const struct of_device_id ipmmu_of_ids[] = {
977 {
978 .compatible = "renesas,ipmmu-vmsa",
979 .data = &ipmmu_features_default,
980 }, {
981 .compatible = "renesas,ipmmu-r8a774a1",
982 .data = &ipmmu_features_rcar_gen3,
983 }, {
984 .compatible = "renesas,ipmmu-r8a774b1",
985 .data = &ipmmu_features_rcar_gen3,
986 }, {
987 .compatible = "renesas,ipmmu-r8a774c0",
988 .data = &ipmmu_features_rcar_gen3,
989 }, {
990 .compatible = "renesas,ipmmu-r8a7795",
991 .data = &ipmmu_features_rcar_gen3,
992 }, {
993 .compatible = "renesas,ipmmu-r8a7796",
994 .data = &ipmmu_features_rcar_gen3,
995 }, {
996 .compatible = "renesas,ipmmu-r8a77965",
997 .data = &ipmmu_features_rcar_gen3,
998 }, {
999 .compatible = "renesas,ipmmu-r8a77970",
1000 .data = &ipmmu_features_rcar_gen3,
1001 }, {
1002 .compatible = "renesas,ipmmu-r8a77990",
1003 .data = &ipmmu_features_rcar_gen3,
1004 }, {
1005 .compatible = "renesas,ipmmu-r8a77995",
1006 .data = &ipmmu_features_rcar_gen3,
1007 }, {
1008
1009 },
1010};
1011
1012static int ipmmu_probe(struct platform_device *pdev)
1013{
1014 struct ipmmu_vmsa_device *mmu;
1015 struct resource *res;
1016 int irq;
1017 int ret;
1018
1019 mmu = devm_kzalloc(&pdev->dev, sizeof(*mmu), GFP_KERNEL);
1020 if (!mmu) {
1021 dev_err(&pdev->dev, "cannot allocate device data\n");
1022 return -ENOMEM;
1023 }
1024
1025 mmu->dev = &pdev->dev;
1026 spin_lock_init(&mmu->lock);
1027 bitmap_zero(mmu->ctx, IPMMU_CTX_MAX);
1028 mmu->features = of_device_get_match_data(&pdev->dev);
1029 memset(mmu->utlb_ctx, IPMMU_CTX_INVALID, mmu->features->num_utlbs);
1030 dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(40));
1031
1032
1033 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1034 mmu->base = devm_ioremap_resource(&pdev->dev, res);
1035 if (IS_ERR(mmu->base))
1036 return PTR_ERR(mmu->base);
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050 if (mmu->features->use_ns_alias_offset)
1051 mmu->base += IM_NS_ALIAS_OFFSET;
1052
1053 mmu->num_ctx = min(IPMMU_CTX_MAX, mmu->features->number_of_contexts);
1054
1055
1056
1057
1058
1059 if (!mmu->features->has_cache_leaf_nodes ||
1060 !of_find_property(pdev->dev.of_node, "renesas,ipmmu-main", NULL))
1061 mmu->root = mmu;
1062 else
1063 mmu->root = ipmmu_find_root();
1064
1065
1066
1067
1068 if (!mmu->root)
1069 return -EPROBE_DEFER;
1070
1071
1072 if (ipmmu_is_root(mmu)) {
1073 irq = platform_get_irq(pdev, 0);
1074 if (irq < 0)
1075 return irq;
1076
1077 ret = devm_request_irq(&pdev->dev, irq, ipmmu_irq, 0,
1078 dev_name(&pdev->dev), mmu);
1079 if (ret < 0) {
1080 dev_err(&pdev->dev, "failed to request IRQ %d\n", irq);
1081 return ret;
1082 }
1083
1084 ipmmu_device_reset(mmu);
1085
1086 if (mmu->features->reserved_context) {
1087 dev_info(&pdev->dev, "IPMMU context 0 is reserved\n");
1088 set_bit(0, mmu->ctx);
1089 }
1090 }
1091
1092
1093
1094
1095
1096
1097 if (!mmu->features->has_cache_leaf_nodes || !ipmmu_is_root(mmu)) {
1098 ret = iommu_device_sysfs_add(&mmu->iommu, &pdev->dev, NULL,
1099 dev_name(&pdev->dev));
1100 if (ret)
1101 return ret;
1102
1103 iommu_device_set_ops(&mmu->iommu, &ipmmu_ops);
1104 iommu_device_set_fwnode(&mmu->iommu,
1105 &pdev->dev.of_node->fwnode);
1106
1107 ret = iommu_device_register(&mmu->iommu);
1108 if (ret)
1109 return ret;
1110
1111#if defined(CONFIG_IOMMU_DMA)
1112 if (!iommu_present(&platform_bus_type))
1113 bus_set_iommu(&platform_bus_type, &ipmmu_ops);
1114#endif
1115 }
1116
1117
1118
1119
1120
1121
1122
1123 platform_set_drvdata(pdev, mmu);
1124
1125 return 0;
1126}
1127
1128static int ipmmu_remove(struct platform_device *pdev)
1129{
1130 struct ipmmu_vmsa_device *mmu = platform_get_drvdata(pdev);
1131
1132 iommu_device_sysfs_remove(&mmu->iommu);
1133 iommu_device_unregister(&mmu->iommu);
1134
1135 arm_iommu_release_mapping(mmu->mapping);
1136
1137 ipmmu_device_reset(mmu);
1138
1139 return 0;
1140}
1141
1142#ifdef CONFIG_PM_SLEEP
1143static int ipmmu_resume_noirq(struct device *dev)
1144{
1145 struct ipmmu_vmsa_device *mmu = dev_get_drvdata(dev);
1146 unsigned int i;
1147
1148
1149 if (ipmmu_is_root(mmu)) {
1150 ipmmu_device_reset(mmu);
1151
1152 for (i = 0; i < mmu->num_ctx; i++) {
1153 if (!mmu->domains[i])
1154 continue;
1155
1156 ipmmu_domain_setup_context(mmu->domains[i]);
1157 }
1158 }
1159
1160
1161 for (i = 0; i < mmu->features->num_utlbs; i++) {
1162 if (mmu->utlb_ctx[i] == IPMMU_CTX_INVALID)
1163 continue;
1164
1165 ipmmu_utlb_enable(mmu->root->domains[mmu->utlb_ctx[i]], i);
1166 }
1167
1168 return 0;
1169}
1170
1171static const struct dev_pm_ops ipmmu_pm = {
1172 SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(NULL, ipmmu_resume_noirq)
1173};
1174#define DEV_PM_OPS &ipmmu_pm
1175#else
1176#define DEV_PM_OPS NULL
1177#endif
1178
1179static struct platform_driver ipmmu_driver = {
1180 .driver = {
1181 .name = "ipmmu-vmsa",
1182 .of_match_table = of_match_ptr(ipmmu_of_ids),
1183 .pm = DEV_PM_OPS,
1184 },
1185 .probe = ipmmu_probe,
1186 .remove = ipmmu_remove,
1187};
1188
1189static int __init ipmmu_init(void)
1190{
1191 struct device_node *np;
1192 static bool setup_done;
1193 int ret;
1194
1195 if (setup_done)
1196 return 0;
1197
1198 np = of_find_matching_node(NULL, ipmmu_of_ids);
1199 if (!np)
1200 return 0;
1201
1202 of_node_put(np);
1203
1204 ret = platform_driver_register(&ipmmu_driver);
1205 if (ret < 0)
1206 return ret;
1207
1208#if defined(CONFIG_ARM) && !defined(CONFIG_IOMMU_DMA)
1209 if (!iommu_present(&platform_bus_type))
1210 bus_set_iommu(&platform_bus_type, &ipmmu_ops);
1211#endif
1212
1213 setup_done = true;
1214 return 0;
1215}
1216subsys_initcall(ipmmu_init);
1217