1
2
3
4
5
6
7
8
9#include <linux/atomic.h>
10#include <linux/bitfield.h>
11#include <linux/clk.h>
12#include <linux/delay.h>
13#include <linux/dma-iommu.h>
14#include <linux/dma-mapping.h>
15#include <linux/err.h>
16#include <linux/interrupt.h>
17#include <linux/io.h>
18#include <linux/io-64-nonatomic-hi-lo.h>
19#include <linux/io-pgtable.h>
20#include <linux/iommu.h>
21#include <linux/iopoll.h>
22#include <linux/kconfig.h>
23#include <linux/init.h>
24#include <linux/mutex.h>
25#include <linux/of.h>
26#include <linux/of_address.h>
27#include <linux/of_device.h>
28#include <linux/of_iommu.h>
29#include <linux/platform_device.h>
30#include <linux/pm.h>
31#include <linux/pm_runtime.h>
32#include <linux/qcom_scm.h>
33#include <linux/slab.h>
34#include <linux/spinlock.h>
35
36#include "arm-smmu.h"
37
38#define SMMU_INTR_SEL_NS 0x2000
39
40enum qcom_iommu_clk {
41 CLK_IFACE,
42 CLK_BUS,
43 CLK_TBU,
44 CLK_NUM,
45};
46
47struct qcom_iommu_ctx;
48
49struct qcom_iommu_dev {
50
51 struct iommu_device iommu;
52 struct device *dev;
53 struct clk_bulk_data clks[CLK_NUM];
54 void __iomem *local_base;
55 u32 sec_id;
56 u8 num_ctxs;
57 struct qcom_iommu_ctx *ctxs[];
58};
59
60struct qcom_iommu_ctx {
61 struct device *dev;
62 void __iomem *base;
63 bool secure_init;
64 u8 asid;
65 struct iommu_domain *domain;
66};
67
68struct qcom_iommu_domain {
69 struct io_pgtable_ops *pgtbl_ops;
70 spinlock_t pgtbl_lock;
71 struct mutex init_mutex;
72 struct iommu_domain domain;
73 struct qcom_iommu_dev *iommu;
74 struct iommu_fwspec *fwspec;
75};
76
77static struct qcom_iommu_domain *to_qcom_iommu_domain(struct iommu_domain *dom)
78{
79 return container_of(dom, struct qcom_iommu_domain, domain);
80}
81
82static const struct iommu_ops qcom_iommu_ops;
83
84static struct qcom_iommu_dev * to_iommu(struct device *dev)
85{
86 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
87
88 if (!fwspec || fwspec->ops != &qcom_iommu_ops)
89 return NULL;
90
91 return dev_iommu_priv_get(dev);
92}
93
94static struct qcom_iommu_ctx * to_ctx(struct qcom_iommu_domain *d, unsigned asid)
95{
96 struct qcom_iommu_dev *qcom_iommu = d->iommu;
97 if (!qcom_iommu)
98 return NULL;
99 return qcom_iommu->ctxs[asid - 1];
100}
101
102static inline void
103iommu_writel(struct qcom_iommu_ctx *ctx, unsigned reg, u32 val)
104{
105 writel_relaxed(val, ctx->base + reg);
106}
107
108static inline void
109iommu_writeq(struct qcom_iommu_ctx *ctx, unsigned reg, u64 val)
110{
111 writeq_relaxed(val, ctx->base + reg);
112}
113
114static inline u32
115iommu_readl(struct qcom_iommu_ctx *ctx, unsigned reg)
116{
117 return readl_relaxed(ctx->base + reg);
118}
119
120static inline u64
121iommu_readq(struct qcom_iommu_ctx *ctx, unsigned reg)
122{
123 return readq_relaxed(ctx->base + reg);
124}
125
126static void qcom_iommu_tlb_sync(void *cookie)
127{
128 struct qcom_iommu_domain *qcom_domain = cookie;
129 struct iommu_fwspec *fwspec = qcom_domain->fwspec;
130 unsigned i;
131
132 for (i = 0; i < fwspec->num_ids; i++) {
133 struct qcom_iommu_ctx *ctx = to_ctx(qcom_domain, fwspec->ids[i]);
134 unsigned int val, ret;
135
136 iommu_writel(ctx, ARM_SMMU_CB_TLBSYNC, 0);
137
138 ret = readl_poll_timeout(ctx->base + ARM_SMMU_CB_TLBSTATUS, val,
139 (val & 0x1) == 0, 0, 5000000);
140 if (ret)
141 dev_err(ctx->dev, "timeout waiting for TLB SYNC\n");
142 }
143}
144
145static void qcom_iommu_tlb_inv_context(void *cookie)
146{
147 struct qcom_iommu_domain *qcom_domain = cookie;
148 struct iommu_fwspec *fwspec = qcom_domain->fwspec;
149 unsigned i;
150
151 for (i = 0; i < fwspec->num_ids; i++) {
152 struct qcom_iommu_ctx *ctx = to_ctx(qcom_domain, fwspec->ids[i]);
153 iommu_writel(ctx, ARM_SMMU_CB_S1_TLBIASID, ctx->asid);
154 }
155
156 qcom_iommu_tlb_sync(cookie);
157}
158
159static void qcom_iommu_tlb_inv_range_nosync(unsigned long iova, size_t size,
160 size_t granule, bool leaf, void *cookie)
161{
162 struct qcom_iommu_domain *qcom_domain = cookie;
163 struct iommu_fwspec *fwspec = qcom_domain->fwspec;
164 unsigned i, reg;
165
166 reg = leaf ? ARM_SMMU_CB_S1_TLBIVAL : ARM_SMMU_CB_S1_TLBIVA;
167
168 for (i = 0; i < fwspec->num_ids; i++) {
169 struct qcom_iommu_ctx *ctx = to_ctx(qcom_domain, fwspec->ids[i]);
170 size_t s = size;
171
172 iova = (iova >> 12) << 12;
173 iova |= ctx->asid;
174 do {
175 iommu_writel(ctx, reg, iova);
176 iova += granule;
177 } while (s -= granule);
178 }
179}
180
181static void qcom_iommu_tlb_flush_walk(unsigned long iova, size_t size,
182 size_t granule, void *cookie)
183{
184 qcom_iommu_tlb_inv_range_nosync(iova, size, granule, false, cookie);
185 qcom_iommu_tlb_sync(cookie);
186}
187
188static void qcom_iommu_tlb_flush_leaf(unsigned long iova, size_t size,
189 size_t granule, void *cookie)
190{
191 qcom_iommu_tlb_inv_range_nosync(iova, size, granule, true, cookie);
192 qcom_iommu_tlb_sync(cookie);
193}
194
195static void qcom_iommu_tlb_add_page(struct iommu_iotlb_gather *gather,
196 unsigned long iova, size_t granule,
197 void *cookie)
198{
199 qcom_iommu_tlb_inv_range_nosync(iova, granule, granule, true, cookie);
200}
201
202static const struct iommu_flush_ops qcom_flush_ops = {
203 .tlb_flush_all = qcom_iommu_tlb_inv_context,
204 .tlb_flush_walk = qcom_iommu_tlb_flush_walk,
205 .tlb_flush_leaf = qcom_iommu_tlb_flush_leaf,
206 .tlb_add_page = qcom_iommu_tlb_add_page,
207};
208
209static irqreturn_t qcom_iommu_fault(int irq, void *dev)
210{
211 struct qcom_iommu_ctx *ctx = dev;
212 u32 fsr, fsynr;
213 u64 iova;
214
215 fsr = iommu_readl(ctx, ARM_SMMU_CB_FSR);
216
217 if (!(fsr & ARM_SMMU_FSR_FAULT))
218 return IRQ_NONE;
219
220 fsynr = iommu_readl(ctx, ARM_SMMU_CB_FSYNR0);
221 iova = iommu_readq(ctx, ARM_SMMU_CB_FAR);
222
223 if (!report_iommu_fault(ctx->domain, ctx->dev, iova, 0)) {
224 dev_err_ratelimited(ctx->dev,
225 "Unhandled context fault: fsr=0x%x, "
226 "iova=0x%016llx, fsynr=0x%x, cb=%d\n",
227 fsr, iova, fsynr, ctx->asid);
228 }
229
230 iommu_writel(ctx, ARM_SMMU_CB_FSR, fsr);
231 iommu_writel(ctx, ARM_SMMU_CB_RESUME, ARM_SMMU_RESUME_TERMINATE);
232
233 return IRQ_HANDLED;
234}
235
236static int qcom_iommu_init_domain(struct iommu_domain *domain,
237 struct qcom_iommu_dev *qcom_iommu,
238 struct device *dev)
239{
240 struct qcom_iommu_domain *qcom_domain = to_qcom_iommu_domain(domain);
241 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
242 struct io_pgtable_ops *pgtbl_ops;
243 struct io_pgtable_cfg pgtbl_cfg;
244 int i, ret = 0;
245 u32 reg;
246
247 mutex_lock(&qcom_domain->init_mutex);
248 if (qcom_domain->iommu)
249 goto out_unlock;
250
251 pgtbl_cfg = (struct io_pgtable_cfg) {
252 .pgsize_bitmap = qcom_iommu_ops.pgsize_bitmap,
253 .ias = 32,
254 .oas = 40,
255 .tlb = &qcom_flush_ops,
256 .iommu_dev = qcom_iommu->dev,
257 };
258
259 qcom_domain->iommu = qcom_iommu;
260 qcom_domain->fwspec = fwspec;
261
262 pgtbl_ops = alloc_io_pgtable_ops(ARM_32_LPAE_S1, &pgtbl_cfg, qcom_domain);
263 if (!pgtbl_ops) {
264 dev_err(qcom_iommu->dev, "failed to allocate pagetable ops\n");
265 ret = -ENOMEM;
266 goto out_clear_iommu;
267 }
268
269
270 domain->pgsize_bitmap = pgtbl_cfg.pgsize_bitmap;
271 domain->geometry.aperture_end = (1ULL << pgtbl_cfg.ias) - 1;
272 domain->geometry.force_aperture = true;
273
274 for (i = 0; i < fwspec->num_ids; i++) {
275 struct qcom_iommu_ctx *ctx = to_ctx(qcom_domain, fwspec->ids[i]);
276
277 if (!ctx->secure_init) {
278 ret = qcom_scm_restore_sec_cfg(qcom_iommu->sec_id, ctx->asid);
279 if (ret) {
280 dev_err(qcom_iommu->dev, "secure init failed: %d\n", ret);
281 goto out_clear_iommu;
282 }
283 ctx->secure_init = true;
284 }
285
286
287 iommu_writeq(ctx, ARM_SMMU_CB_TTBR0,
288 pgtbl_cfg.arm_lpae_s1_cfg.ttbr |
289 FIELD_PREP(ARM_SMMU_TTBRn_ASID, ctx->asid));
290 iommu_writeq(ctx, ARM_SMMU_CB_TTBR1, 0);
291
292
293 iommu_writel(ctx, ARM_SMMU_CB_TCR2,
294 arm_smmu_lpae_tcr2(&pgtbl_cfg));
295 iommu_writel(ctx, ARM_SMMU_CB_TCR,
296 arm_smmu_lpae_tcr(&pgtbl_cfg) | ARM_SMMU_TCR_EAE);
297
298
299 iommu_writel(ctx, ARM_SMMU_CB_S1_MAIR0,
300 pgtbl_cfg.arm_lpae_s1_cfg.mair);
301 iommu_writel(ctx, ARM_SMMU_CB_S1_MAIR1,
302 pgtbl_cfg.arm_lpae_s1_cfg.mair >> 32);
303
304
305 reg = ARM_SMMU_SCTLR_CFIE | ARM_SMMU_SCTLR_CFRE |
306 ARM_SMMU_SCTLR_AFE | ARM_SMMU_SCTLR_TRE |
307 ARM_SMMU_SCTLR_M | ARM_SMMU_SCTLR_S1_ASIDPNE |
308 ARM_SMMU_SCTLR_CFCFG;
309
310 if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
311 reg |= ARM_SMMU_SCTLR_E;
312
313 iommu_writel(ctx, ARM_SMMU_CB_SCTLR, reg);
314
315 ctx->domain = domain;
316 }
317
318 mutex_unlock(&qcom_domain->init_mutex);
319
320
321 qcom_domain->pgtbl_ops = pgtbl_ops;
322
323 return 0;
324
325out_clear_iommu:
326 qcom_domain->iommu = NULL;
327out_unlock:
328 mutex_unlock(&qcom_domain->init_mutex);
329 return ret;
330}
331
332static struct iommu_domain *qcom_iommu_domain_alloc(unsigned type)
333{
334 struct qcom_iommu_domain *qcom_domain;
335
336 if (type != IOMMU_DOMAIN_UNMANAGED && type != IOMMU_DOMAIN_DMA)
337 return NULL;
338
339
340
341
342
343 qcom_domain = kzalloc(sizeof(*qcom_domain), GFP_KERNEL);
344 if (!qcom_domain)
345 return NULL;
346
347 if (type == IOMMU_DOMAIN_DMA &&
348 iommu_get_dma_cookie(&qcom_domain->domain)) {
349 kfree(qcom_domain);
350 return NULL;
351 }
352
353 mutex_init(&qcom_domain->init_mutex);
354 spin_lock_init(&qcom_domain->pgtbl_lock);
355
356 return &qcom_domain->domain;
357}
358
359static void qcom_iommu_domain_free(struct iommu_domain *domain)
360{
361 struct qcom_iommu_domain *qcom_domain = to_qcom_iommu_domain(domain);
362
363 iommu_put_dma_cookie(domain);
364
365 if (qcom_domain->iommu) {
366
367
368
369
370
371
372 pm_runtime_get_sync(qcom_domain->iommu->dev);
373 free_io_pgtable_ops(qcom_domain->pgtbl_ops);
374 pm_runtime_put_sync(qcom_domain->iommu->dev);
375 }
376
377 kfree(qcom_domain);
378}
379
380static int qcom_iommu_attach_dev(struct iommu_domain *domain, struct device *dev)
381{
382 struct qcom_iommu_dev *qcom_iommu = to_iommu(dev);
383 struct qcom_iommu_domain *qcom_domain = to_qcom_iommu_domain(domain);
384 int ret;
385
386 if (!qcom_iommu) {
387 dev_err(dev, "cannot attach to IOMMU, is it on the same bus?\n");
388 return -ENXIO;
389 }
390
391
392 pm_runtime_get_sync(qcom_iommu->dev);
393 ret = qcom_iommu_init_domain(domain, qcom_iommu, dev);
394 pm_runtime_put_sync(qcom_iommu->dev);
395 if (ret < 0)
396 return ret;
397
398
399
400
401
402 if (qcom_domain->iommu != qcom_iommu) {
403 dev_err(dev, "cannot attach to IOMMU %s while already "
404 "attached to domain on IOMMU %s\n",
405 dev_name(qcom_domain->iommu->dev),
406 dev_name(qcom_iommu->dev));
407 return -EINVAL;
408 }
409
410 return 0;
411}
412
413static void qcom_iommu_detach_dev(struct iommu_domain *domain, struct device *dev)
414{
415 struct qcom_iommu_domain *qcom_domain = to_qcom_iommu_domain(domain);
416 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
417 struct qcom_iommu_dev *qcom_iommu = to_iommu(dev);
418 unsigned i;
419
420 if (WARN_ON(!qcom_domain->iommu))
421 return;
422
423 pm_runtime_get_sync(qcom_iommu->dev);
424 for (i = 0; i < fwspec->num_ids; i++) {
425 struct qcom_iommu_ctx *ctx = to_ctx(qcom_domain, fwspec->ids[i]);
426
427
428 iommu_writel(ctx, ARM_SMMU_CB_SCTLR, 0);
429
430 ctx->domain = NULL;
431 }
432 pm_runtime_put_sync(qcom_iommu->dev);
433}
434
435static int qcom_iommu_map(struct iommu_domain *domain, unsigned long iova,
436 phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
437{
438 int ret;
439 unsigned long flags;
440 struct qcom_iommu_domain *qcom_domain = to_qcom_iommu_domain(domain);
441 struct io_pgtable_ops *ops = qcom_domain->pgtbl_ops;
442
443 if (!ops)
444 return -ENODEV;
445
446 spin_lock_irqsave(&qcom_domain->pgtbl_lock, flags);
447 ret = ops->map(ops, iova, paddr, size, prot, GFP_ATOMIC);
448 spin_unlock_irqrestore(&qcom_domain->pgtbl_lock, flags);
449 return ret;
450}
451
452static size_t qcom_iommu_unmap(struct iommu_domain *domain, unsigned long iova,
453 size_t size, struct iommu_iotlb_gather *gather)
454{
455 size_t ret;
456 unsigned long flags;
457 struct qcom_iommu_domain *qcom_domain = to_qcom_iommu_domain(domain);
458 struct io_pgtable_ops *ops = qcom_domain->pgtbl_ops;
459
460 if (!ops)
461 return 0;
462
463
464
465
466
467
468 pm_runtime_get_sync(qcom_domain->iommu->dev);
469 spin_lock_irqsave(&qcom_domain->pgtbl_lock, flags);
470 ret = ops->unmap(ops, iova, size, gather);
471 spin_unlock_irqrestore(&qcom_domain->pgtbl_lock, flags);
472 pm_runtime_put_sync(qcom_domain->iommu->dev);
473
474 return ret;
475}
476
477static void qcom_iommu_flush_iotlb_all(struct iommu_domain *domain)
478{
479 struct qcom_iommu_domain *qcom_domain = to_qcom_iommu_domain(domain);
480 struct io_pgtable *pgtable = container_of(qcom_domain->pgtbl_ops,
481 struct io_pgtable, ops);
482 if (!qcom_domain->pgtbl_ops)
483 return;
484
485 pm_runtime_get_sync(qcom_domain->iommu->dev);
486 qcom_iommu_tlb_sync(pgtable->cookie);
487 pm_runtime_put_sync(qcom_domain->iommu->dev);
488}
489
490static void qcom_iommu_iotlb_sync(struct iommu_domain *domain,
491 struct iommu_iotlb_gather *gather)
492{
493 qcom_iommu_flush_iotlb_all(domain);
494}
495
496static phys_addr_t qcom_iommu_iova_to_phys(struct iommu_domain *domain,
497 dma_addr_t iova)
498{
499 phys_addr_t ret;
500 unsigned long flags;
501 struct qcom_iommu_domain *qcom_domain = to_qcom_iommu_domain(domain);
502 struct io_pgtable_ops *ops = qcom_domain->pgtbl_ops;
503
504 if (!ops)
505 return 0;
506
507 spin_lock_irqsave(&qcom_domain->pgtbl_lock, flags);
508 ret = ops->iova_to_phys(ops, iova);
509 spin_unlock_irqrestore(&qcom_domain->pgtbl_lock, flags);
510
511 return ret;
512}
513
514static bool qcom_iommu_capable(enum iommu_cap cap)
515{
516 switch (cap) {
517 case IOMMU_CAP_CACHE_COHERENCY:
518
519
520
521
522 return true;
523 case IOMMU_CAP_NOEXEC:
524 return true;
525 default:
526 return false;
527 }
528}
529
530static struct iommu_device *qcom_iommu_probe_device(struct device *dev)
531{
532 struct qcom_iommu_dev *qcom_iommu = to_iommu(dev);
533 struct device_link *link;
534
535 if (!qcom_iommu)
536 return ERR_PTR(-ENODEV);
537
538
539
540
541
542
543 link = device_link_add(dev, qcom_iommu->dev, DL_FLAG_PM_RUNTIME);
544 if (!link) {
545 dev_err(qcom_iommu->dev, "Unable to create device link between %s and %s\n",
546 dev_name(qcom_iommu->dev), dev_name(dev));
547 return ERR_PTR(-ENODEV);
548 }
549
550 return &qcom_iommu->iommu;
551}
552
553static void qcom_iommu_release_device(struct device *dev)
554{
555 struct qcom_iommu_dev *qcom_iommu = to_iommu(dev);
556
557 if (!qcom_iommu)
558 return;
559
560 iommu_fwspec_free(dev);
561}
562
563static int qcom_iommu_of_xlate(struct device *dev, struct of_phandle_args *args)
564{
565 struct qcom_iommu_dev *qcom_iommu;
566 struct platform_device *iommu_pdev;
567 unsigned asid = args->args[0];
568
569 if (args->args_count != 1) {
570 dev_err(dev, "incorrect number of iommu params found for %s "
571 "(found %d, expected 1)\n",
572 args->np->full_name, args->args_count);
573 return -EINVAL;
574 }
575
576 iommu_pdev = of_find_device_by_node(args->np);
577 if (WARN_ON(!iommu_pdev))
578 return -EINVAL;
579
580 qcom_iommu = platform_get_drvdata(iommu_pdev);
581
582
583
584
585
586 if (WARN_ON(asid < 1) ||
587 WARN_ON(asid > qcom_iommu->num_ctxs))
588 return -EINVAL;
589
590 if (!dev_iommu_priv_get(dev)) {
591 dev_iommu_priv_set(dev, qcom_iommu);
592 } else {
593
594
595
596
597 if (WARN_ON(qcom_iommu != dev_iommu_priv_get(dev)))
598 return -EINVAL;
599 }
600
601 return iommu_fwspec_add_ids(dev, &asid, 1);
602}
603
604static const struct iommu_ops qcom_iommu_ops = {
605 .capable = qcom_iommu_capable,
606 .domain_alloc = qcom_iommu_domain_alloc,
607 .domain_free = qcom_iommu_domain_free,
608 .attach_dev = qcom_iommu_attach_dev,
609 .detach_dev = qcom_iommu_detach_dev,
610 .map = qcom_iommu_map,
611 .unmap = qcom_iommu_unmap,
612 .flush_iotlb_all = qcom_iommu_flush_iotlb_all,
613 .iotlb_sync = qcom_iommu_iotlb_sync,
614 .iova_to_phys = qcom_iommu_iova_to_phys,
615 .probe_device = qcom_iommu_probe_device,
616 .release_device = qcom_iommu_release_device,
617 .device_group = generic_device_group,
618 .of_xlate = qcom_iommu_of_xlate,
619 .pgsize_bitmap = SZ_4K | SZ_64K | SZ_1M | SZ_16M,
620};
621
622static int qcom_iommu_sec_ptbl_init(struct device *dev)
623{
624 size_t psize = 0;
625 unsigned int spare = 0;
626 void *cpu_addr;
627 dma_addr_t paddr;
628 unsigned long attrs;
629 static bool allocated = false;
630 int ret;
631
632 if (allocated)
633 return 0;
634
635 ret = qcom_scm_iommu_secure_ptbl_size(spare, &psize);
636 if (ret) {
637 dev_err(dev, "failed to get iommu secure pgtable size (%d)\n",
638 ret);
639 return ret;
640 }
641
642 dev_info(dev, "iommu sec: pgtable size: %zu\n", psize);
643
644 attrs = DMA_ATTR_NO_KERNEL_MAPPING;
645
646 cpu_addr = dma_alloc_attrs(dev, psize, &paddr, GFP_KERNEL, attrs);
647 if (!cpu_addr) {
648 dev_err(dev, "failed to allocate %zu bytes for pgtable\n",
649 psize);
650 return -ENOMEM;
651 }
652
653 ret = qcom_scm_iommu_secure_ptbl_init(paddr, psize, spare);
654 if (ret) {
655 dev_err(dev, "failed to init iommu pgtable (%d)\n", ret);
656 goto free_mem;
657 }
658
659 allocated = true;
660 return 0;
661
662free_mem:
663 dma_free_attrs(dev, psize, cpu_addr, paddr, attrs);
664 return ret;
665}
666
667static int get_asid(const struct device_node *np)
668{
669 u32 reg;
670
671
672
673
674 if (of_property_read_u32_index(np, "reg", 0, ®))
675 return -ENODEV;
676
677 return reg / 0x1000;
678}
679
680static int qcom_iommu_ctx_probe(struct platform_device *pdev)
681{
682 struct qcom_iommu_ctx *ctx;
683 struct device *dev = &pdev->dev;
684 struct qcom_iommu_dev *qcom_iommu = dev_get_drvdata(dev->parent);
685 struct resource *res;
686 int ret, irq;
687
688 ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
689 if (!ctx)
690 return -ENOMEM;
691
692 ctx->dev = dev;
693 platform_set_drvdata(pdev, ctx);
694
695 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
696 ctx->base = devm_ioremap_resource(dev, res);
697 if (IS_ERR(ctx->base))
698 return PTR_ERR(ctx->base);
699
700 irq = platform_get_irq(pdev, 0);
701 if (irq < 0)
702 return -ENODEV;
703
704
705
706
707 iommu_writel(ctx, ARM_SMMU_CB_FSR, iommu_readl(ctx, ARM_SMMU_CB_FSR));
708
709 ret = devm_request_irq(dev, irq,
710 qcom_iommu_fault,
711 IRQF_SHARED,
712 "qcom-iommu-fault",
713 ctx);
714 if (ret) {
715 dev_err(dev, "failed to request IRQ %u\n", irq);
716 return ret;
717 }
718
719 ret = get_asid(dev->of_node);
720 if (ret < 0) {
721 dev_err(dev, "missing reg property\n");
722 return ret;
723 }
724
725 ctx->asid = ret;
726
727 dev_dbg(dev, "found asid %u\n", ctx->asid);
728
729 qcom_iommu->ctxs[ctx->asid - 1] = ctx;
730
731 return 0;
732}
733
734static int qcom_iommu_ctx_remove(struct platform_device *pdev)
735{
736 struct qcom_iommu_dev *qcom_iommu = dev_get_drvdata(pdev->dev.parent);
737 struct qcom_iommu_ctx *ctx = platform_get_drvdata(pdev);
738
739 platform_set_drvdata(pdev, NULL);
740
741 qcom_iommu->ctxs[ctx->asid - 1] = NULL;
742
743 return 0;
744}
745
746static const struct of_device_id ctx_of_match[] = {
747 { .compatible = "qcom,msm-iommu-v1-ns" },
748 { .compatible = "qcom,msm-iommu-v1-sec" },
749 { }
750};
751
752static struct platform_driver qcom_iommu_ctx_driver = {
753 .driver = {
754 .name = "qcom-iommu-ctx",
755 .of_match_table = of_match_ptr(ctx_of_match),
756 },
757 .probe = qcom_iommu_ctx_probe,
758 .remove = qcom_iommu_ctx_remove,
759};
760
761static bool qcom_iommu_has_secure_context(struct qcom_iommu_dev *qcom_iommu)
762{
763 struct device_node *child;
764
765 for_each_child_of_node(qcom_iommu->dev->of_node, child)
766 if (of_device_is_compatible(child, "qcom,msm-iommu-v1-sec"))
767 return true;
768
769 return false;
770}
771
772static int qcom_iommu_device_probe(struct platform_device *pdev)
773{
774 struct device_node *child;
775 struct qcom_iommu_dev *qcom_iommu;
776 struct device *dev = &pdev->dev;
777 struct resource *res;
778 struct clk *clk;
779 int ret, max_asid = 0;
780
781
782
783
784 for_each_child_of_node(dev->of_node, child)
785 max_asid = max(max_asid, get_asid(child));
786
787 qcom_iommu = devm_kzalloc(dev, struct_size(qcom_iommu, ctxs, max_asid),
788 GFP_KERNEL);
789 if (!qcom_iommu)
790 return -ENOMEM;
791 qcom_iommu->num_ctxs = max_asid;
792 qcom_iommu->dev = dev;
793
794 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
795 if (res) {
796 qcom_iommu->local_base = devm_ioremap_resource(dev, res);
797 if (IS_ERR(qcom_iommu->local_base))
798 return PTR_ERR(qcom_iommu->local_base);
799 }
800
801 clk = devm_clk_get(dev, "iface");
802 if (IS_ERR(clk)) {
803 dev_err(dev, "failed to get iface clock\n");
804 return PTR_ERR(clk);
805 }
806 qcom_iommu->clks[CLK_IFACE].clk = clk;
807
808 clk = devm_clk_get(dev, "bus");
809 if (IS_ERR(clk)) {
810 dev_err(dev, "failed to get bus clock\n");
811 return PTR_ERR(clk);
812 }
813 qcom_iommu->clks[CLK_BUS].clk = clk;
814
815 clk = devm_clk_get_optional(dev, "tbu");
816 if (IS_ERR(clk)) {
817 dev_err(dev, "failed to get tbu clock\n");
818 return PTR_ERR(clk);
819 }
820 qcom_iommu->clks[CLK_TBU].clk = clk;
821
822 if (of_property_read_u32(dev->of_node, "qcom,iommu-secure-id",
823 &qcom_iommu->sec_id)) {
824 dev_err(dev, "missing qcom,iommu-secure-id property\n");
825 return -ENODEV;
826 }
827
828 if (qcom_iommu_has_secure_context(qcom_iommu)) {
829 ret = qcom_iommu_sec_ptbl_init(dev);
830 if (ret) {
831 dev_err(dev, "cannot init secure pg table(%d)\n", ret);
832 return ret;
833 }
834 }
835
836 platform_set_drvdata(pdev, qcom_iommu);
837
838 pm_runtime_enable(dev);
839
840
841 ret = devm_of_platform_populate(dev);
842 if (ret) {
843 dev_err(dev, "Failed to populate iommu contexts\n");
844 return ret;
845 }
846
847 ret = iommu_device_sysfs_add(&qcom_iommu->iommu, dev, NULL,
848 dev_name(dev));
849 if (ret) {
850 dev_err(dev, "Failed to register iommu in sysfs\n");
851 return ret;
852 }
853
854 iommu_device_set_ops(&qcom_iommu->iommu, &qcom_iommu_ops);
855 iommu_device_set_fwnode(&qcom_iommu->iommu, dev->fwnode);
856
857 ret = iommu_device_register(&qcom_iommu->iommu);
858 if (ret) {
859 dev_err(dev, "Failed to register iommu\n");
860 return ret;
861 }
862
863 bus_set_iommu(&platform_bus_type, &qcom_iommu_ops);
864
865 if (qcom_iommu->local_base) {
866 pm_runtime_get_sync(dev);
867 writel_relaxed(0xffffffff, qcom_iommu->local_base + SMMU_INTR_SEL_NS);
868 pm_runtime_put_sync(dev);
869 }
870
871 return 0;
872}
873
874static int qcom_iommu_device_remove(struct platform_device *pdev)
875{
876 struct qcom_iommu_dev *qcom_iommu = platform_get_drvdata(pdev);
877
878 bus_set_iommu(&platform_bus_type, NULL);
879
880 pm_runtime_force_suspend(&pdev->dev);
881 platform_set_drvdata(pdev, NULL);
882 iommu_device_sysfs_remove(&qcom_iommu->iommu);
883 iommu_device_unregister(&qcom_iommu->iommu);
884
885 return 0;
886}
887
888static int __maybe_unused qcom_iommu_resume(struct device *dev)
889{
890 struct qcom_iommu_dev *qcom_iommu = dev_get_drvdata(dev);
891
892 return clk_bulk_prepare_enable(CLK_NUM, qcom_iommu->clks);
893}
894
895static int __maybe_unused qcom_iommu_suspend(struct device *dev)
896{
897 struct qcom_iommu_dev *qcom_iommu = dev_get_drvdata(dev);
898
899 clk_bulk_disable_unprepare(CLK_NUM, qcom_iommu->clks);
900
901 return 0;
902}
903
904static const struct dev_pm_ops qcom_iommu_pm_ops = {
905 SET_RUNTIME_PM_OPS(qcom_iommu_suspend, qcom_iommu_resume, NULL)
906 SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
907 pm_runtime_force_resume)
908};
909
910static const struct of_device_id qcom_iommu_of_match[] = {
911 { .compatible = "qcom,msm-iommu-v1" },
912 { }
913};
914
915static struct platform_driver qcom_iommu_driver = {
916 .driver = {
917 .name = "qcom-iommu",
918 .of_match_table = of_match_ptr(qcom_iommu_of_match),
919 .pm = &qcom_iommu_pm_ops,
920 },
921 .probe = qcom_iommu_device_probe,
922 .remove = qcom_iommu_device_remove,
923};
924
925static int __init qcom_iommu_init(void)
926{
927 int ret;
928
929 ret = platform_driver_register(&qcom_iommu_ctx_driver);
930 if (ret)
931 return ret;
932
933 ret = platform_driver_register(&qcom_iommu_driver);
934 if (ret)
935 platform_driver_unregister(&qcom_iommu_ctx_driver);
936
937 return ret;
938}
939device_initcall(qcom_iommu_init);
940