1
2
3
4
5
6
7
8
9#include <linux/atomic.h>
10#include <linux/bitfield.h>
11#include <linux/clk.h>
12#include <linux/delay.h>
13#include <linux/dma-iommu.h>
14#include <linux/dma-mapping.h>
15#include <linux/err.h>
16#include <linux/interrupt.h>
17#include <linux/io.h>
18#include <linux/io-64-nonatomic-hi-lo.h>
19#include <linux/io-pgtable.h>
20#include <linux/iommu.h>
21#include <linux/iopoll.h>
22#include <linux/kconfig.h>
23#include <linux/init.h>
24#include <linux/mutex.h>
25#include <linux/of.h>
26#include <linux/of_address.h>
27#include <linux/of_device.h>
28#include <linux/of_iommu.h>
29#include <linux/platform_device.h>
30#include <linux/pm.h>
31#include <linux/pm_runtime.h>
32#include <linux/qcom_scm.h>
33#include <linux/slab.h>
34#include <linux/spinlock.h>
35
36#include "arm-smmu.h"
37
38#define SMMU_INTR_SEL_NS 0x2000
39
40struct qcom_iommu_ctx;
41
42struct qcom_iommu_dev {
43
44 struct iommu_device iommu;
45 struct device *dev;
46 struct clk *iface_clk;
47 struct clk *bus_clk;
48 void __iomem *local_base;
49 u32 sec_id;
50 u8 num_ctxs;
51 struct qcom_iommu_ctx *ctxs[0];
52};
53
54struct qcom_iommu_ctx {
55 struct device *dev;
56 void __iomem *base;
57 bool secure_init;
58 u8 asid;
59 struct iommu_domain *domain;
60};
61
62struct qcom_iommu_domain {
63 struct io_pgtable_ops *pgtbl_ops;
64 spinlock_t pgtbl_lock;
65 struct mutex init_mutex;
66 struct iommu_domain domain;
67 struct qcom_iommu_dev *iommu;
68};
69
70static struct qcom_iommu_domain *to_qcom_iommu_domain(struct iommu_domain *dom)
71{
72 return container_of(dom, struct qcom_iommu_domain, domain);
73}
74
75static const struct iommu_ops qcom_iommu_ops;
76
77static struct qcom_iommu_dev * to_iommu(struct iommu_fwspec *fwspec)
78{
79 if (!fwspec || fwspec->ops != &qcom_iommu_ops)
80 return NULL;
81 return fwspec->iommu_priv;
82}
83
84static struct qcom_iommu_ctx * to_ctx(struct iommu_fwspec *fwspec, unsigned asid)
85{
86 struct qcom_iommu_dev *qcom_iommu = to_iommu(fwspec);
87 if (!qcom_iommu)
88 return NULL;
89 return qcom_iommu->ctxs[asid - 1];
90}
91
92static inline void
93iommu_writel(struct qcom_iommu_ctx *ctx, unsigned reg, u32 val)
94{
95 writel_relaxed(val, ctx->base + reg);
96}
97
98static inline void
99iommu_writeq(struct qcom_iommu_ctx *ctx, unsigned reg, u64 val)
100{
101 writeq_relaxed(val, ctx->base + reg);
102}
103
104static inline u32
105iommu_readl(struct qcom_iommu_ctx *ctx, unsigned reg)
106{
107 return readl_relaxed(ctx->base + reg);
108}
109
110static inline u64
111iommu_readq(struct qcom_iommu_ctx *ctx, unsigned reg)
112{
113 return readq_relaxed(ctx->base + reg);
114}
115
116static void qcom_iommu_tlb_sync(void *cookie)
117{
118 struct iommu_fwspec *fwspec = cookie;
119 unsigned i;
120
121 for (i = 0; i < fwspec->num_ids; i++) {
122 struct qcom_iommu_ctx *ctx = to_ctx(fwspec, fwspec->ids[i]);
123 unsigned int val, ret;
124
125 iommu_writel(ctx, ARM_SMMU_CB_TLBSYNC, 0);
126
127 ret = readl_poll_timeout(ctx->base + ARM_SMMU_CB_TLBSTATUS, val,
128 (val & 0x1) == 0, 0, 5000000);
129 if (ret)
130 dev_err(ctx->dev, "timeout waiting for TLB SYNC\n");
131 }
132}
133
134static void qcom_iommu_tlb_inv_context(void *cookie)
135{
136 struct iommu_fwspec *fwspec = cookie;
137 unsigned i;
138
139 for (i = 0; i < fwspec->num_ids; i++) {
140 struct qcom_iommu_ctx *ctx = to_ctx(fwspec, fwspec->ids[i]);
141 iommu_writel(ctx, ARM_SMMU_CB_S1_TLBIASID, ctx->asid);
142 }
143
144 qcom_iommu_tlb_sync(cookie);
145}
146
147static void qcom_iommu_tlb_inv_range_nosync(unsigned long iova, size_t size,
148 size_t granule, bool leaf, void *cookie)
149{
150 struct iommu_fwspec *fwspec = cookie;
151 unsigned i, reg;
152
153 reg = leaf ? ARM_SMMU_CB_S1_TLBIVAL : ARM_SMMU_CB_S1_TLBIVA;
154
155 for (i = 0; i < fwspec->num_ids; i++) {
156 struct qcom_iommu_ctx *ctx = to_ctx(fwspec, fwspec->ids[i]);
157 size_t s = size;
158
159 iova = (iova >> 12) << 12;
160 iova |= ctx->asid;
161 do {
162 iommu_writel(ctx, reg, iova);
163 iova += granule;
164 } while (s -= granule);
165 }
166}
167
168static void qcom_iommu_tlb_flush_walk(unsigned long iova, size_t size,
169 size_t granule, void *cookie)
170{
171 qcom_iommu_tlb_inv_range_nosync(iova, size, granule, false, cookie);
172 qcom_iommu_tlb_sync(cookie);
173}
174
175static void qcom_iommu_tlb_flush_leaf(unsigned long iova, size_t size,
176 size_t granule, void *cookie)
177{
178 qcom_iommu_tlb_inv_range_nosync(iova, size, granule, true, cookie);
179 qcom_iommu_tlb_sync(cookie);
180}
181
182static void qcom_iommu_tlb_add_page(struct iommu_iotlb_gather *gather,
183 unsigned long iova, size_t granule,
184 void *cookie)
185{
186 qcom_iommu_tlb_inv_range_nosync(iova, granule, granule, true, cookie);
187}
188
189static const struct iommu_flush_ops qcom_flush_ops = {
190 .tlb_flush_all = qcom_iommu_tlb_inv_context,
191 .tlb_flush_walk = qcom_iommu_tlb_flush_walk,
192 .tlb_flush_leaf = qcom_iommu_tlb_flush_leaf,
193 .tlb_add_page = qcom_iommu_tlb_add_page,
194};
195
196static irqreturn_t qcom_iommu_fault(int irq, void *dev)
197{
198 struct qcom_iommu_ctx *ctx = dev;
199 u32 fsr, fsynr;
200 u64 iova;
201
202 fsr = iommu_readl(ctx, ARM_SMMU_CB_FSR);
203
204 if (!(fsr & FSR_FAULT))
205 return IRQ_NONE;
206
207 fsynr = iommu_readl(ctx, ARM_SMMU_CB_FSYNR0);
208 iova = iommu_readq(ctx, ARM_SMMU_CB_FAR);
209
210 if (!report_iommu_fault(ctx->domain, ctx->dev, iova, 0)) {
211 dev_err_ratelimited(ctx->dev,
212 "Unhandled context fault: fsr=0x%x, "
213 "iova=0x%016llx, fsynr=0x%x, cb=%d\n",
214 fsr, iova, fsynr, ctx->asid);
215 }
216
217 iommu_writel(ctx, ARM_SMMU_CB_FSR, fsr);
218 iommu_writel(ctx, ARM_SMMU_CB_RESUME, RESUME_TERMINATE);
219
220 return IRQ_HANDLED;
221}
222
223static int qcom_iommu_init_domain(struct iommu_domain *domain,
224 struct qcom_iommu_dev *qcom_iommu,
225 struct iommu_fwspec *fwspec)
226{
227 struct qcom_iommu_domain *qcom_domain = to_qcom_iommu_domain(domain);
228 struct io_pgtable_ops *pgtbl_ops;
229 struct io_pgtable_cfg pgtbl_cfg;
230 int i, ret = 0;
231 u32 reg;
232
233 mutex_lock(&qcom_domain->init_mutex);
234 if (qcom_domain->iommu)
235 goto out_unlock;
236
237 pgtbl_cfg = (struct io_pgtable_cfg) {
238 .pgsize_bitmap = qcom_iommu_ops.pgsize_bitmap,
239 .ias = 32,
240 .oas = 40,
241 .tlb = &qcom_flush_ops,
242 .iommu_dev = qcom_iommu->dev,
243 };
244
245 qcom_domain->iommu = qcom_iommu;
246 pgtbl_ops = alloc_io_pgtable_ops(ARM_32_LPAE_S1, &pgtbl_cfg, fwspec);
247 if (!pgtbl_ops) {
248 dev_err(qcom_iommu->dev, "failed to allocate pagetable ops\n");
249 ret = -ENOMEM;
250 goto out_clear_iommu;
251 }
252
253
254 domain->pgsize_bitmap = pgtbl_cfg.pgsize_bitmap;
255 domain->geometry.aperture_end = (1ULL << pgtbl_cfg.ias) - 1;
256 domain->geometry.force_aperture = true;
257
258 for (i = 0; i < fwspec->num_ids; i++) {
259 struct qcom_iommu_ctx *ctx = to_ctx(fwspec, fwspec->ids[i]);
260
261 if (!ctx->secure_init) {
262 ret = qcom_scm_restore_sec_cfg(qcom_iommu->sec_id, ctx->asid);
263 if (ret) {
264 dev_err(qcom_iommu->dev, "secure init failed: %d\n", ret);
265 goto out_clear_iommu;
266 }
267 ctx->secure_init = true;
268 }
269
270
271 iommu_writeq(ctx, ARM_SMMU_CB_TTBR0,
272 pgtbl_cfg.arm_lpae_s1_cfg.ttbr[0] |
273 FIELD_PREP(TTBRn_ASID, ctx->asid));
274 iommu_writeq(ctx, ARM_SMMU_CB_TTBR1,
275 pgtbl_cfg.arm_lpae_s1_cfg.ttbr[1] |
276 FIELD_PREP(TTBRn_ASID, ctx->asid));
277
278
279 iommu_writel(ctx, ARM_SMMU_CB_TCR2,
280 (pgtbl_cfg.arm_lpae_s1_cfg.tcr >> 32) |
281 FIELD_PREP(TCR2_SEP, TCR2_SEP_UPSTREAM));
282 iommu_writel(ctx, ARM_SMMU_CB_TCR,
283 pgtbl_cfg.arm_lpae_s1_cfg.tcr);
284
285
286 iommu_writel(ctx, ARM_SMMU_CB_S1_MAIR0,
287 pgtbl_cfg.arm_lpae_s1_cfg.mair[0]);
288 iommu_writel(ctx, ARM_SMMU_CB_S1_MAIR1,
289 pgtbl_cfg.arm_lpae_s1_cfg.mair[1]);
290
291
292 reg = SCTLR_CFIE | SCTLR_CFRE | SCTLR_AFE | SCTLR_TRE |
293 SCTLR_M | SCTLR_S1_ASIDPNE | SCTLR_CFCFG;
294
295 if (IS_ENABLED(CONFIG_BIG_ENDIAN))
296 reg |= SCTLR_E;
297
298 iommu_writel(ctx, ARM_SMMU_CB_SCTLR, reg);
299
300 ctx->domain = domain;
301 }
302
303 mutex_unlock(&qcom_domain->init_mutex);
304
305
306 qcom_domain->pgtbl_ops = pgtbl_ops;
307
308 return 0;
309
310out_clear_iommu:
311 qcom_domain->iommu = NULL;
312out_unlock:
313 mutex_unlock(&qcom_domain->init_mutex);
314 return ret;
315}
316
317static struct iommu_domain *qcom_iommu_domain_alloc(unsigned type)
318{
319 struct qcom_iommu_domain *qcom_domain;
320
321 if (type != IOMMU_DOMAIN_UNMANAGED && type != IOMMU_DOMAIN_DMA)
322 return NULL;
323
324
325
326
327
328 qcom_domain = kzalloc(sizeof(*qcom_domain), GFP_KERNEL);
329 if (!qcom_domain)
330 return NULL;
331
332 if (type == IOMMU_DOMAIN_DMA &&
333 iommu_get_dma_cookie(&qcom_domain->domain)) {
334 kfree(qcom_domain);
335 return NULL;
336 }
337
338 mutex_init(&qcom_domain->init_mutex);
339 spin_lock_init(&qcom_domain->pgtbl_lock);
340
341 return &qcom_domain->domain;
342}
343
344static void qcom_iommu_domain_free(struct iommu_domain *domain)
345{
346 struct qcom_iommu_domain *qcom_domain = to_qcom_iommu_domain(domain);
347
348 if (WARN_ON(qcom_domain->iommu))
349 return;
350
351 iommu_put_dma_cookie(domain);
352
353
354
355
356
357
358 pm_runtime_get_sync(qcom_domain->iommu->dev);
359
360 free_io_pgtable_ops(qcom_domain->pgtbl_ops);
361
362 pm_runtime_put_sync(qcom_domain->iommu->dev);
363
364 kfree(qcom_domain);
365}
366
367static int qcom_iommu_attach_dev(struct iommu_domain *domain, struct device *dev)
368{
369 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
370 struct qcom_iommu_dev *qcom_iommu = to_iommu(fwspec);
371 struct qcom_iommu_domain *qcom_domain = to_qcom_iommu_domain(domain);
372 int ret;
373
374 if (!qcom_iommu) {
375 dev_err(dev, "cannot attach to IOMMU, is it on the same bus?\n");
376 return -ENXIO;
377 }
378
379
380 pm_runtime_get_sync(qcom_iommu->dev);
381 ret = qcom_iommu_init_domain(domain, qcom_iommu, fwspec);
382 pm_runtime_put_sync(qcom_iommu->dev);
383 if (ret < 0)
384 return ret;
385
386
387
388
389
390 if (qcom_domain->iommu != qcom_iommu) {
391 dev_err(dev, "cannot attach to IOMMU %s while already "
392 "attached to domain on IOMMU %s\n",
393 dev_name(qcom_domain->iommu->dev),
394 dev_name(qcom_iommu->dev));
395 return -EINVAL;
396 }
397
398 return 0;
399}
400
401static void qcom_iommu_detach_dev(struct iommu_domain *domain, struct device *dev)
402{
403 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
404 struct qcom_iommu_dev *qcom_iommu = to_iommu(fwspec);
405 struct qcom_iommu_domain *qcom_domain = to_qcom_iommu_domain(domain);
406 unsigned i;
407
408 if (!qcom_domain->iommu)
409 return;
410
411 pm_runtime_get_sync(qcom_iommu->dev);
412 for (i = 0; i < fwspec->num_ids; i++) {
413 struct qcom_iommu_ctx *ctx = to_ctx(fwspec, fwspec->ids[i]);
414
415
416 iommu_writel(ctx, ARM_SMMU_CB_SCTLR, 0);
417
418 ctx->domain = NULL;
419 }
420 pm_runtime_put_sync(qcom_iommu->dev);
421
422 qcom_domain->iommu = NULL;
423}
424
425static int qcom_iommu_map(struct iommu_domain *domain, unsigned long iova,
426 phys_addr_t paddr, size_t size, int prot)
427{
428 int ret;
429 unsigned long flags;
430 struct qcom_iommu_domain *qcom_domain = to_qcom_iommu_domain(domain);
431 struct io_pgtable_ops *ops = qcom_domain->pgtbl_ops;
432
433 if (!ops)
434 return -ENODEV;
435
436 spin_lock_irqsave(&qcom_domain->pgtbl_lock, flags);
437 ret = ops->map(ops, iova, paddr, size, prot);
438 spin_unlock_irqrestore(&qcom_domain->pgtbl_lock, flags);
439 return ret;
440}
441
442static size_t qcom_iommu_unmap(struct iommu_domain *domain, unsigned long iova,
443 size_t size, struct iommu_iotlb_gather *gather)
444{
445 size_t ret;
446 unsigned long flags;
447 struct qcom_iommu_domain *qcom_domain = to_qcom_iommu_domain(domain);
448 struct io_pgtable_ops *ops = qcom_domain->pgtbl_ops;
449
450 if (!ops)
451 return 0;
452
453
454
455
456
457
458 pm_runtime_get_sync(qcom_domain->iommu->dev);
459 spin_lock_irqsave(&qcom_domain->pgtbl_lock, flags);
460 ret = ops->unmap(ops, iova, size, gather);
461 spin_unlock_irqrestore(&qcom_domain->pgtbl_lock, flags);
462 pm_runtime_put_sync(qcom_domain->iommu->dev);
463
464 return ret;
465}
466
467static void qcom_iommu_flush_iotlb_all(struct iommu_domain *domain)
468{
469 struct qcom_iommu_domain *qcom_domain = to_qcom_iommu_domain(domain);
470 struct io_pgtable *pgtable = container_of(qcom_domain->pgtbl_ops,
471 struct io_pgtable, ops);
472 if (!qcom_domain->pgtbl_ops)
473 return;
474
475 pm_runtime_get_sync(qcom_domain->iommu->dev);
476 qcom_iommu_tlb_sync(pgtable->cookie);
477 pm_runtime_put_sync(qcom_domain->iommu->dev);
478}
479
480static void qcom_iommu_iotlb_sync(struct iommu_domain *domain,
481 struct iommu_iotlb_gather *gather)
482{
483 qcom_iommu_flush_iotlb_all(domain);
484}
485
486static phys_addr_t qcom_iommu_iova_to_phys(struct iommu_domain *domain,
487 dma_addr_t iova)
488{
489 phys_addr_t ret;
490 unsigned long flags;
491 struct qcom_iommu_domain *qcom_domain = to_qcom_iommu_domain(domain);
492 struct io_pgtable_ops *ops = qcom_domain->pgtbl_ops;
493
494 if (!ops)
495 return 0;
496
497 spin_lock_irqsave(&qcom_domain->pgtbl_lock, flags);
498 ret = ops->iova_to_phys(ops, iova);
499 spin_unlock_irqrestore(&qcom_domain->pgtbl_lock, flags);
500
501 return ret;
502}
503
504static bool qcom_iommu_capable(enum iommu_cap cap)
505{
506 switch (cap) {
507 case IOMMU_CAP_CACHE_COHERENCY:
508
509
510
511
512 return true;
513 case IOMMU_CAP_NOEXEC:
514 return true;
515 default:
516 return false;
517 }
518}
519
520static int qcom_iommu_add_device(struct device *dev)
521{
522 struct qcom_iommu_dev *qcom_iommu = to_iommu(dev_iommu_fwspec_get(dev));
523 struct iommu_group *group;
524 struct device_link *link;
525
526 if (!qcom_iommu)
527 return -ENODEV;
528
529
530
531
532
533
534 link = device_link_add(dev, qcom_iommu->dev, DL_FLAG_PM_RUNTIME);
535 if (!link) {
536 dev_err(qcom_iommu->dev, "Unable to create device link between %s and %s\n",
537 dev_name(qcom_iommu->dev), dev_name(dev));
538 return -ENODEV;
539 }
540
541 group = iommu_group_get_for_dev(dev);
542 if (IS_ERR_OR_NULL(group))
543 return PTR_ERR_OR_ZERO(group);
544
545 iommu_group_put(group);
546 iommu_device_link(&qcom_iommu->iommu, dev);
547
548 return 0;
549}
550
551static void qcom_iommu_remove_device(struct device *dev)
552{
553 struct qcom_iommu_dev *qcom_iommu = to_iommu(dev_iommu_fwspec_get(dev));
554
555 if (!qcom_iommu)
556 return;
557
558 iommu_device_unlink(&qcom_iommu->iommu, dev);
559 iommu_group_remove_device(dev);
560 iommu_fwspec_free(dev);
561}
562
563static int qcom_iommu_of_xlate(struct device *dev, struct of_phandle_args *args)
564{
565 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
566 struct qcom_iommu_dev *qcom_iommu;
567 struct platform_device *iommu_pdev;
568 unsigned asid = args->args[0];
569
570 if (args->args_count != 1) {
571 dev_err(dev, "incorrect number of iommu params found for %s "
572 "(found %d, expected 1)\n",
573 args->np->full_name, args->args_count);
574 return -EINVAL;
575 }
576
577 iommu_pdev = of_find_device_by_node(args->np);
578 if (WARN_ON(!iommu_pdev))
579 return -EINVAL;
580
581 qcom_iommu = platform_get_drvdata(iommu_pdev);
582
583
584
585
586
587 if (WARN_ON(asid < 1) ||
588 WARN_ON(asid > qcom_iommu->num_ctxs))
589 return -EINVAL;
590
591 if (!fwspec->iommu_priv) {
592 fwspec->iommu_priv = qcom_iommu;
593 } else {
594
595
596
597
598 if (WARN_ON(qcom_iommu != fwspec->iommu_priv))
599 return -EINVAL;
600 }
601
602 return iommu_fwspec_add_ids(dev, &asid, 1);
603}
604
605static const struct iommu_ops qcom_iommu_ops = {
606 .capable = qcom_iommu_capable,
607 .domain_alloc = qcom_iommu_domain_alloc,
608 .domain_free = qcom_iommu_domain_free,
609 .attach_dev = qcom_iommu_attach_dev,
610 .detach_dev = qcom_iommu_detach_dev,
611 .map = qcom_iommu_map,
612 .unmap = qcom_iommu_unmap,
613 .flush_iotlb_all = qcom_iommu_flush_iotlb_all,
614 .iotlb_sync = qcom_iommu_iotlb_sync,
615 .iova_to_phys = qcom_iommu_iova_to_phys,
616 .add_device = qcom_iommu_add_device,
617 .remove_device = qcom_iommu_remove_device,
618 .device_group = generic_device_group,
619 .of_xlate = qcom_iommu_of_xlate,
620 .pgsize_bitmap = SZ_4K | SZ_64K | SZ_1M | SZ_16M,
621};
622
623static int qcom_iommu_enable_clocks(struct qcom_iommu_dev *qcom_iommu)
624{
625 int ret;
626
627 ret = clk_prepare_enable(qcom_iommu->iface_clk);
628 if (ret) {
629 dev_err(qcom_iommu->dev, "Couldn't enable iface_clk\n");
630 return ret;
631 }
632
633 ret = clk_prepare_enable(qcom_iommu->bus_clk);
634 if (ret) {
635 dev_err(qcom_iommu->dev, "Couldn't enable bus_clk\n");
636 clk_disable_unprepare(qcom_iommu->iface_clk);
637 return ret;
638 }
639
640 return 0;
641}
642
643static void qcom_iommu_disable_clocks(struct qcom_iommu_dev *qcom_iommu)
644{
645 clk_disable_unprepare(qcom_iommu->bus_clk);
646 clk_disable_unprepare(qcom_iommu->iface_clk);
647}
648
649static int qcom_iommu_sec_ptbl_init(struct device *dev)
650{
651 size_t psize = 0;
652 unsigned int spare = 0;
653 void *cpu_addr;
654 dma_addr_t paddr;
655 unsigned long attrs;
656 static bool allocated = false;
657 int ret;
658
659 if (allocated)
660 return 0;
661
662 ret = qcom_scm_iommu_secure_ptbl_size(spare, &psize);
663 if (ret) {
664 dev_err(dev, "failed to get iommu secure pgtable size (%d)\n",
665 ret);
666 return ret;
667 }
668
669 dev_info(dev, "iommu sec: pgtable size: %zu\n", psize);
670
671 attrs = DMA_ATTR_NO_KERNEL_MAPPING;
672
673 cpu_addr = dma_alloc_attrs(dev, psize, &paddr, GFP_KERNEL, attrs);
674 if (!cpu_addr) {
675 dev_err(dev, "failed to allocate %zu bytes for pgtable\n",
676 psize);
677 return -ENOMEM;
678 }
679
680 ret = qcom_scm_iommu_secure_ptbl_init(paddr, psize, spare);
681 if (ret) {
682 dev_err(dev, "failed to init iommu pgtable (%d)\n", ret);
683 goto free_mem;
684 }
685
686 allocated = true;
687 return 0;
688
689free_mem:
690 dma_free_attrs(dev, psize, cpu_addr, paddr, attrs);
691 return ret;
692}
693
694static int get_asid(const struct device_node *np)
695{
696 u32 reg;
697
698
699
700
701 if (of_property_read_u32_index(np, "reg", 0, ®))
702 return -ENODEV;
703
704 return reg / 0x1000;
705}
706
707static int qcom_iommu_ctx_probe(struct platform_device *pdev)
708{
709 struct qcom_iommu_ctx *ctx;
710 struct device *dev = &pdev->dev;
711 struct qcom_iommu_dev *qcom_iommu = dev_get_drvdata(dev->parent);
712 struct resource *res;
713 int ret, irq;
714
715 ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
716 if (!ctx)
717 return -ENOMEM;
718
719 ctx->dev = dev;
720 platform_set_drvdata(pdev, ctx);
721
722 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
723 ctx->base = devm_ioremap_resource(dev, res);
724 if (IS_ERR(ctx->base))
725 return PTR_ERR(ctx->base);
726
727 irq = platform_get_irq(pdev, 0);
728 if (irq < 0)
729 return -ENODEV;
730
731
732
733
734 iommu_writel(ctx, ARM_SMMU_CB_FSR, iommu_readl(ctx, ARM_SMMU_CB_FSR));
735
736 ret = devm_request_irq(dev, irq,
737 qcom_iommu_fault,
738 IRQF_SHARED,
739 "qcom-iommu-fault",
740 ctx);
741 if (ret) {
742 dev_err(dev, "failed to request IRQ %u\n", irq);
743 return ret;
744 }
745
746 ret = get_asid(dev->of_node);
747 if (ret < 0) {
748 dev_err(dev, "missing reg property\n");
749 return ret;
750 }
751
752 ctx->asid = ret;
753
754 dev_dbg(dev, "found asid %u\n", ctx->asid);
755
756 qcom_iommu->ctxs[ctx->asid - 1] = ctx;
757
758 return 0;
759}
760
761static int qcom_iommu_ctx_remove(struct platform_device *pdev)
762{
763 struct qcom_iommu_dev *qcom_iommu = dev_get_drvdata(pdev->dev.parent);
764 struct qcom_iommu_ctx *ctx = platform_get_drvdata(pdev);
765
766 platform_set_drvdata(pdev, NULL);
767
768 qcom_iommu->ctxs[ctx->asid - 1] = NULL;
769
770 return 0;
771}
772
773static const struct of_device_id ctx_of_match[] = {
774 { .compatible = "qcom,msm-iommu-v1-ns" },
775 { .compatible = "qcom,msm-iommu-v1-sec" },
776 { }
777};
778
779static struct platform_driver qcom_iommu_ctx_driver = {
780 .driver = {
781 .name = "qcom-iommu-ctx",
782 .of_match_table = of_match_ptr(ctx_of_match),
783 },
784 .probe = qcom_iommu_ctx_probe,
785 .remove = qcom_iommu_ctx_remove,
786};
787
788static bool qcom_iommu_has_secure_context(struct qcom_iommu_dev *qcom_iommu)
789{
790 struct device_node *child;
791
792 for_each_child_of_node(qcom_iommu->dev->of_node, child)
793 if (of_device_is_compatible(child, "qcom,msm-iommu-v1-sec"))
794 return true;
795
796 return false;
797}
798
799static int qcom_iommu_device_probe(struct platform_device *pdev)
800{
801 struct device_node *child;
802 struct qcom_iommu_dev *qcom_iommu;
803 struct device *dev = &pdev->dev;
804 struct resource *res;
805 int ret, max_asid = 0;
806
807
808
809
810 for_each_child_of_node(dev->of_node, child)
811 max_asid = max(max_asid, get_asid(child));
812
813 qcom_iommu = devm_kzalloc(dev, struct_size(qcom_iommu, ctxs, max_asid),
814 GFP_KERNEL);
815 if (!qcom_iommu)
816 return -ENOMEM;
817 qcom_iommu->num_ctxs = max_asid;
818 qcom_iommu->dev = dev;
819
820 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
821 if (res)
822 qcom_iommu->local_base = devm_ioremap_resource(dev, res);
823
824 qcom_iommu->iface_clk = devm_clk_get(dev, "iface");
825 if (IS_ERR(qcom_iommu->iface_clk)) {
826 dev_err(dev, "failed to get iface clock\n");
827 return PTR_ERR(qcom_iommu->iface_clk);
828 }
829
830 qcom_iommu->bus_clk = devm_clk_get(dev, "bus");
831 if (IS_ERR(qcom_iommu->bus_clk)) {
832 dev_err(dev, "failed to get bus clock\n");
833 return PTR_ERR(qcom_iommu->bus_clk);
834 }
835
836 if (of_property_read_u32(dev->of_node, "qcom,iommu-secure-id",
837 &qcom_iommu->sec_id)) {
838 dev_err(dev, "missing qcom,iommu-secure-id property\n");
839 return -ENODEV;
840 }
841
842 if (qcom_iommu_has_secure_context(qcom_iommu)) {
843 ret = qcom_iommu_sec_ptbl_init(dev);
844 if (ret) {
845 dev_err(dev, "cannot init secure pg table(%d)\n", ret);
846 return ret;
847 }
848 }
849
850 platform_set_drvdata(pdev, qcom_iommu);
851
852 pm_runtime_enable(dev);
853
854
855 ret = devm_of_platform_populate(dev);
856 if (ret) {
857 dev_err(dev, "Failed to populate iommu contexts\n");
858 return ret;
859 }
860
861 ret = iommu_device_sysfs_add(&qcom_iommu->iommu, dev, NULL,
862 dev_name(dev));
863 if (ret) {
864 dev_err(dev, "Failed to register iommu in sysfs\n");
865 return ret;
866 }
867
868 iommu_device_set_ops(&qcom_iommu->iommu, &qcom_iommu_ops);
869 iommu_device_set_fwnode(&qcom_iommu->iommu, dev->fwnode);
870
871 ret = iommu_device_register(&qcom_iommu->iommu);
872 if (ret) {
873 dev_err(dev, "Failed to register iommu\n");
874 return ret;
875 }
876
877 bus_set_iommu(&platform_bus_type, &qcom_iommu_ops);
878
879 if (qcom_iommu->local_base) {
880 pm_runtime_get_sync(dev);
881 writel_relaxed(0xffffffff, qcom_iommu->local_base + SMMU_INTR_SEL_NS);
882 pm_runtime_put_sync(dev);
883 }
884
885 return 0;
886}
887
888static int qcom_iommu_device_remove(struct platform_device *pdev)
889{
890 struct qcom_iommu_dev *qcom_iommu = platform_get_drvdata(pdev);
891
892 bus_set_iommu(&platform_bus_type, NULL);
893
894 pm_runtime_force_suspend(&pdev->dev);
895 platform_set_drvdata(pdev, NULL);
896 iommu_device_sysfs_remove(&qcom_iommu->iommu);
897 iommu_device_unregister(&qcom_iommu->iommu);
898
899 return 0;
900}
901
902static int __maybe_unused qcom_iommu_resume(struct device *dev)
903{
904 struct qcom_iommu_dev *qcom_iommu = dev_get_drvdata(dev);
905
906 return qcom_iommu_enable_clocks(qcom_iommu);
907}
908
909static int __maybe_unused qcom_iommu_suspend(struct device *dev)
910{
911 struct qcom_iommu_dev *qcom_iommu = dev_get_drvdata(dev);
912
913 qcom_iommu_disable_clocks(qcom_iommu);
914
915 return 0;
916}
917
918static const struct dev_pm_ops qcom_iommu_pm_ops = {
919 SET_RUNTIME_PM_OPS(qcom_iommu_suspend, qcom_iommu_resume, NULL)
920 SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
921 pm_runtime_force_resume)
922};
923
924static const struct of_device_id qcom_iommu_of_match[] = {
925 { .compatible = "qcom,msm-iommu-v1" },
926 { }
927};
928
929static struct platform_driver qcom_iommu_driver = {
930 .driver = {
931 .name = "qcom-iommu",
932 .of_match_table = of_match_ptr(qcom_iommu_of_match),
933 .pm = &qcom_iommu_pm_ops,
934 },
935 .probe = qcom_iommu_device_probe,
936 .remove = qcom_iommu_device_remove,
937};
938
939static int __init qcom_iommu_init(void)
940{
941 int ret;
942
943 ret = platform_driver_register(&qcom_iommu_ctx_driver);
944 if (ret)
945 return ret;
946
947 ret = platform_driver_register(&qcom_iommu_driver);
948 if (ret)
949 platform_driver_unregister(&qcom_iommu_ctx_driver);
950
951 return ret;
952}
953device_initcall(qcom_iommu_init);
954