1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30#define pr_fmt(fmt) "arm-smmu: " fmt
31
32#include <linux/acpi.h>
33#include <linux/acpi_iort.h>
34#include <linux/bitfield.h>
35#include <linux/delay.h>
36#include <linux/dma-iommu.h>
37#include <linux/dma-mapping.h>
38#include <linux/err.h>
39#include <linux/interrupt.h>
40#include <linux/io.h>
41#include <linux/iopoll.h>
42#include <linux/module.h>
43#include <linux/of.h>
44#include <linux/of_address.h>
45#include <linux/of_device.h>
46#include <linux/pci.h>
47#include <linux/platform_device.h>
48#include <linux/pm_runtime.h>
49#include <linux/ratelimit.h>
50#include <linux/slab.h>
51
52#include <linux/amba/bus.h>
53
54#include "arm-smmu.h"
55
56
57
58
59
60
61
62
63#define QCOM_DUMMY_VAL -1
64
65#define MSI_IOVA_BASE 0x8000000
66#define MSI_IOVA_LENGTH 0x100000
67
68static int force_stage;
69module_param(force_stage, int, S_IRUGO);
70MODULE_PARM_DESC(force_stage,
71 "Force SMMU mappings to be installed at a particular stage of translation. A value of '1' or '2' forces the corresponding stage. All other values are ignored (i.e. no stage is forced). Note that selecting a specific stage will disable support for nested translation.");
72static bool disable_bypass =
73 IS_ENABLED(CONFIG_ARM_SMMU_DISABLE_BYPASS_BY_DEFAULT);
74module_param(disable_bypass, bool, S_IRUGO);
75MODULE_PARM_DESC(disable_bypass,
76 "Disable bypass streams such that incoming transactions from devices that are not attached to an iommu domain will report an abort back to the device and will not be allowed to pass through the SMMU.");
77
78#define s2cr_init_val (struct arm_smmu_s2cr){ \
79 .type = disable_bypass ? S2CR_TYPE_FAULT : S2CR_TYPE_BYPASS, \
80}
81
82static bool using_legacy_binding, using_generic_binding;
83
84static inline int arm_smmu_rpm_get(struct arm_smmu_device *smmu)
85{
86 if (pm_runtime_enabled(smmu->dev))
87 return pm_runtime_resume_and_get(smmu->dev);
88
89 return 0;
90}
91
92static inline void arm_smmu_rpm_put(struct arm_smmu_device *smmu)
93{
94 if (pm_runtime_enabled(smmu->dev))
95 pm_runtime_put_autosuspend(smmu->dev);
96}
97
98static struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom)
99{
100 return container_of(dom, struct arm_smmu_domain, domain);
101}
102
103static struct platform_driver arm_smmu_driver;
104static struct iommu_ops arm_smmu_ops;
105
106#ifdef CONFIG_ARM_SMMU_LEGACY_DT_BINDINGS
107static int arm_smmu_bus_init(struct iommu_ops *ops);
108
109static struct device_node *dev_get_dev_node(struct device *dev)
110{
111 if (dev_is_pci(dev)) {
112 struct pci_bus *bus = to_pci_dev(dev)->bus;
113
114 while (!pci_is_root_bus(bus))
115 bus = bus->parent;
116 return of_node_get(bus->bridge->parent->of_node);
117 }
118
119 return of_node_get(dev->of_node);
120}
121
122static int __arm_smmu_get_pci_sid(struct pci_dev *pdev, u16 alias, void *data)
123{
124 *((__be32 *)data) = cpu_to_be32(alias);
125 return 0;
126}
127
128static int __find_legacy_master_phandle(struct device *dev, void *data)
129{
130 struct of_phandle_iterator *it = *(void **)data;
131 struct device_node *np = it->node;
132 int err;
133
134 of_for_each_phandle(it, err, dev->of_node, "mmu-masters",
135 "#stream-id-cells", -1)
136 if (it->node == np) {
137 *(void **)data = dev;
138 return 1;
139 }
140 it->node = np;
141 return err == -ENOENT ? 0 : err;
142}
143
144static int arm_smmu_register_legacy_master(struct device *dev,
145 struct arm_smmu_device **smmu)
146{
147 struct device *smmu_dev;
148 struct device_node *np;
149 struct of_phandle_iterator it;
150 void *data = ⁢
151 u32 *sids;
152 __be32 pci_sid;
153 int err;
154
155 np = dev_get_dev_node(dev);
156 if (!np || !of_find_property(np, "#stream-id-cells", NULL)) {
157 of_node_put(np);
158 return -ENODEV;
159 }
160
161 it.node = np;
162 err = driver_for_each_device(&arm_smmu_driver.driver, NULL, &data,
163 __find_legacy_master_phandle);
164 smmu_dev = data;
165 of_node_put(np);
166 if (err == 0)
167 return -ENODEV;
168 if (err < 0)
169 return err;
170
171 if (dev_is_pci(dev)) {
172
173 pci_for_each_dma_alias(to_pci_dev(dev), __arm_smmu_get_pci_sid,
174 &pci_sid);
175 it.cur = &pci_sid;
176 it.cur_count = 1;
177 }
178
179 err = iommu_fwspec_init(dev, &smmu_dev->of_node->fwnode,
180 &arm_smmu_ops);
181 if (err)
182 return err;
183
184 sids = kcalloc(it.cur_count, sizeof(*sids), GFP_KERNEL);
185 if (!sids)
186 return -ENOMEM;
187
188 *smmu = dev_get_drvdata(smmu_dev);
189 of_phandle_iterator_args(&it, sids, it.cur_count);
190 err = iommu_fwspec_add_ids(dev, sids, it.cur_count);
191 kfree(sids);
192 return err;
193}
194
195
196
197
198
199
200
201static int arm_smmu_legacy_bus_init(void)
202{
203 if (using_legacy_binding)
204 return arm_smmu_bus_init(&arm_smmu_ops);
205 return 0;
206}
207device_initcall_sync(arm_smmu_legacy_bus_init);
208#else
209static int arm_smmu_register_legacy_master(struct device *dev,
210 struct arm_smmu_device **smmu)
211{
212 return -ENODEV;
213}
214#endif
215
216static void __arm_smmu_free_bitmap(unsigned long *map, int idx)
217{
218 clear_bit(idx, map);
219}
220
221
222static void __arm_smmu_tlb_sync(struct arm_smmu_device *smmu, int page,
223 int sync, int status)
224{
225 unsigned int spin_cnt, delay;
226 u32 reg;
227
228 if (smmu->impl && unlikely(smmu->impl->tlb_sync))
229 return smmu->impl->tlb_sync(smmu, page, sync, status);
230
231 arm_smmu_writel(smmu, page, sync, QCOM_DUMMY_VAL);
232 for (delay = 1; delay < TLB_LOOP_TIMEOUT; delay *= 2) {
233 for (spin_cnt = TLB_SPIN_COUNT; spin_cnt > 0; spin_cnt--) {
234 reg = arm_smmu_readl(smmu, page, status);
235 if (!(reg & ARM_SMMU_sTLBGSTATUS_GSACTIVE))
236 return;
237 cpu_relax();
238 }
239 udelay(delay);
240 }
241 dev_err_ratelimited(smmu->dev,
242 "TLB sync timed out -- SMMU may be deadlocked\n");
243}
244
245static void arm_smmu_tlb_sync_global(struct arm_smmu_device *smmu)
246{
247 unsigned long flags;
248
249 spin_lock_irqsave(&smmu->global_sync_lock, flags);
250 __arm_smmu_tlb_sync(smmu, ARM_SMMU_GR0, ARM_SMMU_GR0_sTLBGSYNC,
251 ARM_SMMU_GR0_sTLBGSTATUS);
252 spin_unlock_irqrestore(&smmu->global_sync_lock, flags);
253}
254
255static void arm_smmu_tlb_sync_context(struct arm_smmu_domain *smmu_domain)
256{
257 struct arm_smmu_device *smmu = smmu_domain->smmu;
258 unsigned long flags;
259
260 spin_lock_irqsave(&smmu_domain->cb_lock, flags);
261 __arm_smmu_tlb_sync(smmu, ARM_SMMU_CB(smmu, smmu_domain->cfg.cbndx),
262 ARM_SMMU_CB_TLBSYNC, ARM_SMMU_CB_TLBSTATUS);
263 spin_unlock_irqrestore(&smmu_domain->cb_lock, flags);
264}
265
266static void arm_smmu_tlb_inv_context_s1(void *cookie)
267{
268 struct arm_smmu_domain *smmu_domain = cookie;
269
270
271
272
273 wmb();
274 arm_smmu_cb_write(smmu_domain->smmu, smmu_domain->cfg.cbndx,
275 ARM_SMMU_CB_S1_TLBIASID, smmu_domain->cfg.asid);
276 arm_smmu_tlb_sync_context(smmu_domain);
277}
278
279static void arm_smmu_tlb_inv_context_s2(void *cookie)
280{
281 struct arm_smmu_domain *smmu_domain = cookie;
282 struct arm_smmu_device *smmu = smmu_domain->smmu;
283
284
285 wmb();
286 arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_TLBIVMID, smmu_domain->cfg.vmid);
287 arm_smmu_tlb_sync_global(smmu);
288}
289
290static void arm_smmu_tlb_inv_range_s1(unsigned long iova, size_t size,
291 size_t granule, void *cookie, int reg)
292{
293 struct arm_smmu_domain *smmu_domain = cookie;
294 struct arm_smmu_device *smmu = smmu_domain->smmu;
295 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
296 int idx = cfg->cbndx;
297
298 if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK)
299 wmb();
300
301 if (cfg->fmt != ARM_SMMU_CTX_FMT_AARCH64) {
302 iova = (iova >> 12) << 12;
303 iova |= cfg->asid;
304 do {
305 arm_smmu_cb_write(smmu, idx, reg, iova);
306 iova += granule;
307 } while (size -= granule);
308 } else {
309 iova >>= 12;
310 iova |= (u64)cfg->asid << 48;
311 do {
312 arm_smmu_cb_writeq(smmu, idx, reg, iova);
313 iova += granule >> 12;
314 } while (size -= granule);
315 }
316}
317
318static void arm_smmu_tlb_inv_range_s2(unsigned long iova, size_t size,
319 size_t granule, void *cookie, int reg)
320{
321 struct arm_smmu_domain *smmu_domain = cookie;
322 struct arm_smmu_device *smmu = smmu_domain->smmu;
323 int idx = smmu_domain->cfg.cbndx;
324
325 if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK)
326 wmb();
327
328 iova >>= 12;
329 do {
330 if (smmu_domain->cfg.fmt == ARM_SMMU_CTX_FMT_AARCH64)
331 arm_smmu_cb_writeq(smmu, idx, reg, iova);
332 else
333 arm_smmu_cb_write(smmu, idx, reg, iova);
334 iova += granule >> 12;
335 } while (size -= granule);
336}
337
338static void arm_smmu_tlb_inv_walk_s1(unsigned long iova, size_t size,
339 size_t granule, void *cookie)
340{
341 arm_smmu_tlb_inv_range_s1(iova, size, granule, cookie,
342 ARM_SMMU_CB_S1_TLBIVA);
343 arm_smmu_tlb_sync_context(cookie);
344}
345
346static void arm_smmu_tlb_add_page_s1(struct iommu_iotlb_gather *gather,
347 unsigned long iova, size_t granule,
348 void *cookie)
349{
350 arm_smmu_tlb_inv_range_s1(iova, granule, granule, cookie,
351 ARM_SMMU_CB_S1_TLBIVAL);
352}
353
354static void arm_smmu_tlb_inv_walk_s2(unsigned long iova, size_t size,
355 size_t granule, void *cookie)
356{
357 arm_smmu_tlb_inv_range_s2(iova, size, granule, cookie,
358 ARM_SMMU_CB_S2_TLBIIPAS2);
359 arm_smmu_tlb_sync_context(cookie);
360}
361
362static void arm_smmu_tlb_add_page_s2(struct iommu_iotlb_gather *gather,
363 unsigned long iova, size_t granule,
364 void *cookie)
365{
366 arm_smmu_tlb_inv_range_s2(iova, granule, granule, cookie,
367 ARM_SMMU_CB_S2_TLBIIPAS2L);
368}
369
370static void arm_smmu_tlb_inv_walk_s2_v1(unsigned long iova, size_t size,
371 size_t granule, void *cookie)
372{
373 arm_smmu_tlb_inv_context_s2(cookie);
374}
375
376
377
378
379
380
381
382static void arm_smmu_tlb_add_page_s2_v1(struct iommu_iotlb_gather *gather,
383 unsigned long iova, size_t granule,
384 void *cookie)
385{
386 struct arm_smmu_domain *smmu_domain = cookie;
387 struct arm_smmu_device *smmu = smmu_domain->smmu;
388
389 if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK)
390 wmb();
391
392 arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_TLBIVMID, smmu_domain->cfg.vmid);
393}
394
395static const struct iommu_flush_ops arm_smmu_s1_tlb_ops = {
396 .tlb_flush_all = arm_smmu_tlb_inv_context_s1,
397 .tlb_flush_walk = arm_smmu_tlb_inv_walk_s1,
398 .tlb_add_page = arm_smmu_tlb_add_page_s1,
399};
400
401static const struct iommu_flush_ops arm_smmu_s2_tlb_ops_v2 = {
402 .tlb_flush_all = arm_smmu_tlb_inv_context_s2,
403 .tlb_flush_walk = arm_smmu_tlb_inv_walk_s2,
404 .tlb_add_page = arm_smmu_tlb_add_page_s2,
405};
406
407static const struct iommu_flush_ops arm_smmu_s2_tlb_ops_v1 = {
408 .tlb_flush_all = arm_smmu_tlb_inv_context_s2,
409 .tlb_flush_walk = arm_smmu_tlb_inv_walk_s2_v1,
410 .tlb_add_page = arm_smmu_tlb_add_page_s2_v1,
411};
412
413static irqreturn_t arm_smmu_context_fault(int irq, void *dev)
414{
415 u32 fsr, fsynr, cbfrsynra;
416 unsigned long iova;
417 struct iommu_domain *domain = dev;
418 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
419 struct arm_smmu_device *smmu = smmu_domain->smmu;
420 int idx = smmu_domain->cfg.cbndx;
421 int ret;
422
423 fsr = arm_smmu_cb_read(smmu, idx, ARM_SMMU_CB_FSR);
424 if (!(fsr & ARM_SMMU_FSR_FAULT))
425 return IRQ_NONE;
426
427 fsynr = arm_smmu_cb_read(smmu, idx, ARM_SMMU_CB_FSYNR0);
428 iova = arm_smmu_cb_readq(smmu, idx, ARM_SMMU_CB_FAR);
429 cbfrsynra = arm_smmu_gr1_read(smmu, ARM_SMMU_GR1_CBFRSYNRA(idx));
430
431 ret = report_iommu_fault(domain, NULL, iova,
432 fsynr & ARM_SMMU_FSYNR0_WNR ? IOMMU_FAULT_WRITE : IOMMU_FAULT_READ);
433
434 if (ret == -ENOSYS)
435 dev_err_ratelimited(smmu->dev,
436 "Unhandled context fault: fsr=0x%x, iova=0x%08lx, fsynr=0x%x, cbfrsynra=0x%x, cb=%d\n",
437 fsr, iova, fsynr, cbfrsynra, idx);
438
439 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_FSR, fsr);
440 return IRQ_HANDLED;
441}
442
443static irqreturn_t arm_smmu_global_fault(int irq, void *dev)
444{
445 u32 gfsr, gfsynr0, gfsynr1, gfsynr2;
446 struct arm_smmu_device *smmu = dev;
447 static DEFINE_RATELIMIT_STATE(rs, DEFAULT_RATELIMIT_INTERVAL,
448 DEFAULT_RATELIMIT_BURST);
449
450 gfsr = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_sGFSR);
451 gfsynr0 = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_sGFSYNR0);
452 gfsynr1 = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_sGFSYNR1);
453 gfsynr2 = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_sGFSYNR2);
454
455 if (!gfsr)
456 return IRQ_NONE;
457
458 if (__ratelimit(&rs)) {
459 if (IS_ENABLED(CONFIG_ARM_SMMU_DISABLE_BYPASS_BY_DEFAULT) &&
460 (gfsr & ARM_SMMU_sGFSR_USF))
461 dev_err(smmu->dev,
462 "Blocked unknown Stream ID 0x%hx; boot with \"arm-smmu.disable_bypass=0\" to allow, but this may have security implications\n",
463 (u16)gfsynr1);
464 else
465 dev_err(smmu->dev,
466 "Unexpected global fault, this could be serious\n");
467 dev_err(smmu->dev,
468 "\tGFSR 0x%08x, GFSYNR0 0x%08x, GFSYNR1 0x%08x, GFSYNR2 0x%08x\n",
469 gfsr, gfsynr0, gfsynr1, gfsynr2);
470 }
471
472 arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_sGFSR, gfsr);
473 return IRQ_HANDLED;
474}
475
476static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain,
477 struct io_pgtable_cfg *pgtbl_cfg)
478{
479 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
480 struct arm_smmu_cb *cb = &smmu_domain->smmu->cbs[cfg->cbndx];
481 bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
482
483 cb->cfg = cfg;
484
485
486 if (stage1) {
487 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
488 cb->tcr[0] = pgtbl_cfg->arm_v7s_cfg.tcr;
489 } else {
490 cb->tcr[0] = arm_smmu_lpae_tcr(pgtbl_cfg);
491 cb->tcr[1] = arm_smmu_lpae_tcr2(pgtbl_cfg);
492 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64)
493 cb->tcr[1] |= ARM_SMMU_TCR2_AS;
494 else
495 cb->tcr[0] |= ARM_SMMU_TCR_EAE;
496 }
497 } else {
498 cb->tcr[0] = arm_smmu_lpae_vtcr(pgtbl_cfg);
499 }
500
501
502 if (stage1) {
503 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
504 cb->ttbr[0] = pgtbl_cfg->arm_v7s_cfg.ttbr;
505 cb->ttbr[1] = 0;
506 } else {
507 cb->ttbr[0] = FIELD_PREP(ARM_SMMU_TTBRn_ASID,
508 cfg->asid);
509 cb->ttbr[1] = FIELD_PREP(ARM_SMMU_TTBRn_ASID,
510 cfg->asid);
511
512 if (pgtbl_cfg->quirks & IO_PGTABLE_QUIRK_ARM_TTBR1)
513 cb->ttbr[1] |= pgtbl_cfg->arm_lpae_s1_cfg.ttbr;
514 else
515 cb->ttbr[0] |= pgtbl_cfg->arm_lpae_s1_cfg.ttbr;
516 }
517 } else {
518 cb->ttbr[0] = pgtbl_cfg->arm_lpae_s2_cfg.vttbr;
519 }
520
521
522 if (stage1) {
523 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
524 cb->mair[0] = pgtbl_cfg->arm_v7s_cfg.prrr;
525 cb->mair[1] = pgtbl_cfg->arm_v7s_cfg.nmrr;
526 } else {
527 cb->mair[0] = pgtbl_cfg->arm_lpae_s1_cfg.mair;
528 cb->mair[1] = pgtbl_cfg->arm_lpae_s1_cfg.mair >> 32;
529 }
530 }
531}
532
533void arm_smmu_write_context_bank(struct arm_smmu_device *smmu, int idx)
534{
535 u32 reg;
536 bool stage1;
537 struct arm_smmu_cb *cb = &smmu->cbs[idx];
538 struct arm_smmu_cfg *cfg = cb->cfg;
539
540
541 if (!cfg) {
542 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_SCTLR, 0);
543 return;
544 }
545
546 stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
547
548
549 if (smmu->version > ARM_SMMU_V1) {
550 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64)
551 reg = ARM_SMMU_CBA2R_VA64;
552 else
553 reg = 0;
554
555 if (smmu->features & ARM_SMMU_FEAT_VMID16)
556 reg |= FIELD_PREP(ARM_SMMU_CBA2R_VMID16, cfg->vmid);
557
558 arm_smmu_gr1_write(smmu, ARM_SMMU_GR1_CBA2R(idx), reg);
559 }
560
561
562 reg = FIELD_PREP(ARM_SMMU_CBAR_TYPE, cfg->cbar);
563 if (smmu->version < ARM_SMMU_V2)
564 reg |= FIELD_PREP(ARM_SMMU_CBAR_IRPTNDX, cfg->irptndx);
565
566
567
568
569
570 if (stage1) {
571 reg |= FIELD_PREP(ARM_SMMU_CBAR_S1_BPSHCFG,
572 ARM_SMMU_CBAR_S1_BPSHCFG_NSH) |
573 FIELD_PREP(ARM_SMMU_CBAR_S1_MEMATTR,
574 ARM_SMMU_CBAR_S1_MEMATTR_WB);
575 } else if (!(smmu->features & ARM_SMMU_FEAT_VMID16)) {
576
577 reg |= FIELD_PREP(ARM_SMMU_CBAR_VMID, cfg->vmid);
578 }
579 arm_smmu_gr1_write(smmu, ARM_SMMU_GR1_CBAR(idx), reg);
580
581
582
583
584
585
586 if (stage1 && smmu->version > ARM_SMMU_V1)
587 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_TCR2, cb->tcr[1]);
588 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_TCR, cb->tcr[0]);
589
590
591 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
592 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_CONTEXTIDR, cfg->asid);
593 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_TTBR0, cb->ttbr[0]);
594 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_TTBR1, cb->ttbr[1]);
595 } else {
596 arm_smmu_cb_writeq(smmu, idx, ARM_SMMU_CB_TTBR0, cb->ttbr[0]);
597 if (stage1)
598 arm_smmu_cb_writeq(smmu, idx, ARM_SMMU_CB_TTBR1,
599 cb->ttbr[1]);
600 }
601
602
603 if (stage1) {
604 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_S1_MAIR0, cb->mair[0]);
605 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_S1_MAIR1, cb->mair[1]);
606 }
607
608
609 reg = ARM_SMMU_SCTLR_CFIE | ARM_SMMU_SCTLR_CFRE | ARM_SMMU_SCTLR_AFE |
610 ARM_SMMU_SCTLR_TRE | ARM_SMMU_SCTLR_M;
611 if (stage1)
612 reg |= ARM_SMMU_SCTLR_S1_ASIDPNE;
613 if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
614 reg |= ARM_SMMU_SCTLR_E;
615
616 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_SCTLR, reg);
617}
618
619static int arm_smmu_alloc_context_bank(struct arm_smmu_domain *smmu_domain,
620 struct arm_smmu_device *smmu,
621 struct device *dev, unsigned int start)
622{
623 if (smmu->impl && smmu->impl->alloc_context_bank)
624 return smmu->impl->alloc_context_bank(smmu_domain, smmu, dev, start);
625
626 return __arm_smmu_alloc_bitmap(smmu->context_map, start, smmu->num_context_banks);
627}
628
629static int arm_smmu_init_domain_context(struct iommu_domain *domain,
630 struct arm_smmu_device *smmu,
631 struct device *dev)
632{
633 int irq, start, ret = 0;
634 unsigned long ias, oas;
635 struct io_pgtable_ops *pgtbl_ops;
636 struct io_pgtable_cfg pgtbl_cfg;
637 enum io_pgtable_fmt fmt;
638 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
639 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
640 irqreturn_t (*context_fault)(int irq, void *dev);
641
642 mutex_lock(&smmu_domain->init_mutex);
643 if (smmu_domain->smmu)
644 goto out_unlock;
645
646 if (domain->type == IOMMU_DOMAIN_IDENTITY) {
647 smmu_domain->stage = ARM_SMMU_DOMAIN_BYPASS;
648 smmu_domain->smmu = smmu;
649 goto out_unlock;
650 }
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S1))
671 smmu_domain->stage = ARM_SMMU_DOMAIN_S2;
672 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S2))
673 smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
674
675
676
677
678
679
680
681
682
683 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_L)
684 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH32_L;
685 if (IS_ENABLED(CONFIG_IOMMU_IO_PGTABLE_ARMV7S) &&
686 !IS_ENABLED(CONFIG_64BIT) && !IS_ENABLED(CONFIG_ARM_LPAE) &&
687 (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_S) &&
688 (smmu_domain->stage == ARM_SMMU_DOMAIN_S1))
689 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH32_S;
690 if ((IS_ENABLED(CONFIG_64BIT) || cfg->fmt == ARM_SMMU_CTX_FMT_NONE) &&
691 (smmu->features & (ARM_SMMU_FEAT_FMT_AARCH64_64K |
692 ARM_SMMU_FEAT_FMT_AARCH64_16K |
693 ARM_SMMU_FEAT_FMT_AARCH64_4K)))
694 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH64;
695
696 if (cfg->fmt == ARM_SMMU_CTX_FMT_NONE) {
697 ret = -EINVAL;
698 goto out_unlock;
699 }
700
701 switch (smmu_domain->stage) {
702 case ARM_SMMU_DOMAIN_S1:
703 cfg->cbar = CBAR_TYPE_S1_TRANS_S2_BYPASS;
704 start = smmu->num_s2_context_banks;
705 ias = smmu->va_size;
706 oas = smmu->ipa_size;
707 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) {
708 fmt = ARM_64_LPAE_S1;
709 } else if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_L) {
710 fmt = ARM_32_LPAE_S1;
711 ias = min(ias, 32UL);
712 oas = min(oas, 40UL);
713 } else {
714 fmt = ARM_V7S;
715 ias = min(ias, 32UL);
716 oas = min(oas, 32UL);
717 }
718 smmu_domain->flush_ops = &arm_smmu_s1_tlb_ops;
719 break;
720 case ARM_SMMU_DOMAIN_NESTED:
721
722
723
724
725 case ARM_SMMU_DOMAIN_S2:
726 cfg->cbar = CBAR_TYPE_S2_TRANS;
727 start = 0;
728 ias = smmu->ipa_size;
729 oas = smmu->pa_size;
730 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) {
731 fmt = ARM_64_LPAE_S2;
732 } else {
733 fmt = ARM_32_LPAE_S2;
734 ias = min(ias, 40UL);
735 oas = min(oas, 40UL);
736 }
737 if (smmu->version == ARM_SMMU_V2)
738 smmu_domain->flush_ops = &arm_smmu_s2_tlb_ops_v2;
739 else
740 smmu_domain->flush_ops = &arm_smmu_s2_tlb_ops_v1;
741 break;
742 default:
743 ret = -EINVAL;
744 goto out_unlock;
745 }
746
747 ret = arm_smmu_alloc_context_bank(smmu_domain, smmu, dev, start);
748 if (ret < 0) {
749 goto out_unlock;
750 }
751
752 smmu_domain->smmu = smmu;
753
754 cfg->cbndx = ret;
755 if (smmu->version < ARM_SMMU_V2) {
756 cfg->irptndx = atomic_inc_return(&smmu->irptndx);
757 cfg->irptndx %= smmu->num_context_irqs;
758 } else {
759 cfg->irptndx = cfg->cbndx;
760 }
761
762 if (smmu_domain->stage == ARM_SMMU_DOMAIN_S2)
763 cfg->vmid = cfg->cbndx + 1;
764 else
765 cfg->asid = cfg->cbndx;
766
767 pgtbl_cfg = (struct io_pgtable_cfg) {
768 .pgsize_bitmap = smmu->pgsize_bitmap,
769 .ias = ias,
770 .oas = oas,
771 .coherent_walk = smmu->features & ARM_SMMU_FEAT_COHERENT_WALK,
772 .tlb = smmu_domain->flush_ops,
773 .iommu_dev = smmu->dev,
774 };
775
776 if (!iommu_get_dma_strict(domain))
777 pgtbl_cfg.quirks |= IO_PGTABLE_QUIRK_NON_STRICT;
778
779 if (smmu->impl && smmu->impl->init_context) {
780 ret = smmu->impl->init_context(smmu_domain, &pgtbl_cfg, dev);
781 if (ret)
782 goto out_clear_smmu;
783 }
784
785 if (smmu_domain->pgtbl_quirks)
786 pgtbl_cfg.quirks |= smmu_domain->pgtbl_quirks;
787
788 pgtbl_ops = alloc_io_pgtable_ops(fmt, &pgtbl_cfg, smmu_domain);
789 if (!pgtbl_ops) {
790 ret = -ENOMEM;
791 goto out_clear_smmu;
792 }
793
794
795 domain->pgsize_bitmap = pgtbl_cfg.pgsize_bitmap;
796
797 if (pgtbl_cfg.quirks & IO_PGTABLE_QUIRK_ARM_TTBR1) {
798 domain->geometry.aperture_start = ~0UL << ias;
799 domain->geometry.aperture_end = ~0UL;
800 } else {
801 domain->geometry.aperture_end = (1UL << ias) - 1;
802 }
803
804 domain->geometry.force_aperture = true;
805
806
807 arm_smmu_init_context_bank(smmu_domain, &pgtbl_cfg);
808 arm_smmu_write_context_bank(smmu, cfg->cbndx);
809
810
811
812
813
814 irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
815
816 if (smmu->impl && smmu->impl->context_fault)
817 context_fault = smmu->impl->context_fault;
818 else
819 context_fault = arm_smmu_context_fault;
820
821 ret = devm_request_irq(smmu->dev, irq, context_fault,
822 IRQF_SHARED, "arm-smmu-context-fault", domain);
823 if (ret < 0) {
824 dev_err(smmu->dev, "failed to request context IRQ %d (%u)\n",
825 cfg->irptndx, irq);
826 cfg->irptndx = ARM_SMMU_INVALID_IRPTNDX;
827 }
828
829 mutex_unlock(&smmu_domain->init_mutex);
830
831
832 smmu_domain->pgtbl_ops = pgtbl_ops;
833 return 0;
834
835out_clear_smmu:
836 __arm_smmu_free_bitmap(smmu->context_map, cfg->cbndx);
837 smmu_domain->smmu = NULL;
838out_unlock:
839 mutex_unlock(&smmu_domain->init_mutex);
840 return ret;
841}
842
843static void arm_smmu_destroy_domain_context(struct iommu_domain *domain)
844{
845 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
846 struct arm_smmu_device *smmu = smmu_domain->smmu;
847 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
848 int ret, irq;
849
850 if (!smmu || domain->type == IOMMU_DOMAIN_IDENTITY)
851 return;
852
853 ret = arm_smmu_rpm_get(smmu);
854 if (ret < 0)
855 return;
856
857
858
859
860
861 smmu->cbs[cfg->cbndx].cfg = NULL;
862 arm_smmu_write_context_bank(smmu, cfg->cbndx);
863
864 if (cfg->irptndx != ARM_SMMU_INVALID_IRPTNDX) {
865 irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
866 devm_free_irq(smmu->dev, irq, domain);
867 }
868
869 free_io_pgtable_ops(smmu_domain->pgtbl_ops);
870 __arm_smmu_free_bitmap(smmu->context_map, cfg->cbndx);
871
872 arm_smmu_rpm_put(smmu);
873}
874
875static struct iommu_domain *arm_smmu_domain_alloc(unsigned type)
876{
877 struct arm_smmu_domain *smmu_domain;
878
879 if (type != IOMMU_DOMAIN_UNMANAGED && type != IOMMU_DOMAIN_IDENTITY) {
880 if (using_legacy_binding || type != IOMMU_DOMAIN_DMA)
881 return NULL;
882 }
883
884
885
886
887
888 smmu_domain = kzalloc(sizeof(*smmu_domain), GFP_KERNEL);
889 if (!smmu_domain)
890 return NULL;
891
892 mutex_init(&smmu_domain->init_mutex);
893 spin_lock_init(&smmu_domain->cb_lock);
894
895 return &smmu_domain->domain;
896}
897
898static void arm_smmu_domain_free(struct iommu_domain *domain)
899{
900 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
901
902
903
904
905
906 arm_smmu_destroy_domain_context(domain);
907 kfree(smmu_domain);
908}
909
910static void arm_smmu_write_smr(struct arm_smmu_device *smmu, int idx)
911{
912 struct arm_smmu_smr *smr = smmu->smrs + idx;
913 u32 reg = FIELD_PREP(ARM_SMMU_SMR_ID, smr->id) |
914 FIELD_PREP(ARM_SMMU_SMR_MASK, smr->mask);
915
916 if (!(smmu->features & ARM_SMMU_FEAT_EXIDS) && smr->valid)
917 reg |= ARM_SMMU_SMR_VALID;
918 arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_SMR(idx), reg);
919}
920
921static void arm_smmu_write_s2cr(struct arm_smmu_device *smmu, int idx)
922{
923 struct arm_smmu_s2cr *s2cr = smmu->s2crs + idx;
924 u32 reg = FIELD_PREP(ARM_SMMU_S2CR_TYPE, s2cr->type) |
925 FIELD_PREP(ARM_SMMU_S2CR_CBNDX, s2cr->cbndx) |
926 FIELD_PREP(ARM_SMMU_S2CR_PRIVCFG, s2cr->privcfg);
927
928 if (smmu->features & ARM_SMMU_FEAT_EXIDS && smmu->smrs &&
929 smmu->smrs[idx].valid)
930 reg |= ARM_SMMU_S2CR_EXIDVALID;
931 arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_S2CR(idx), reg);
932}
933
934static void arm_smmu_write_sme(struct arm_smmu_device *smmu, int idx)
935{
936 arm_smmu_write_s2cr(smmu, idx);
937 if (smmu->smrs)
938 arm_smmu_write_smr(smmu, idx);
939}
940
941
942
943
944
945static void arm_smmu_test_smr_masks(struct arm_smmu_device *smmu)
946{
947 u32 smr;
948 int i;
949
950 if (!smmu->smrs)
951 return;
952
953
954
955
956
957
958
959
960 for (i = 0; i < smmu->num_mapping_groups; i++)
961 if (!smmu->smrs[i].valid)
962 goto smr_ok;
963 return;
964smr_ok:
965
966
967
968
969
970 smr = FIELD_PREP(ARM_SMMU_SMR_ID, smmu->streamid_mask);
971 arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_SMR(i), smr);
972 smr = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_SMR(i));
973 smmu->streamid_mask = FIELD_GET(ARM_SMMU_SMR_ID, smr);
974
975 smr = FIELD_PREP(ARM_SMMU_SMR_MASK, smmu->streamid_mask);
976 arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_SMR(i), smr);
977 smr = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_SMR(i));
978 smmu->smr_mask_mask = FIELD_GET(ARM_SMMU_SMR_MASK, smr);
979}
980
981static int arm_smmu_find_sme(struct arm_smmu_device *smmu, u16 id, u16 mask)
982{
983 struct arm_smmu_smr *smrs = smmu->smrs;
984 int i, free_idx = -ENOSPC;
985
986
987 if (!smrs)
988 return id;
989
990
991 for (i = 0; i < smmu->num_mapping_groups; ++i) {
992 if (!smrs[i].valid) {
993
994
995
996
997 if (free_idx < 0)
998 free_idx = i;
999 continue;
1000 }
1001
1002
1003
1004
1005
1006
1007
1008 if ((mask & smrs[i].mask) == mask &&
1009 !((id ^ smrs[i].id) & ~smrs[i].mask))
1010 return i;
1011
1012
1013
1014
1015
1016 if (!((id ^ smrs[i].id) & ~(smrs[i].mask | mask)))
1017 return -EINVAL;
1018 }
1019
1020 return free_idx;
1021}
1022
1023static bool arm_smmu_free_sme(struct arm_smmu_device *smmu, int idx)
1024{
1025 if (--smmu->s2crs[idx].count)
1026 return false;
1027
1028 smmu->s2crs[idx] = s2cr_init_val;
1029 if (smmu->smrs)
1030 smmu->smrs[idx].valid = false;
1031
1032 return true;
1033}
1034
1035static int arm_smmu_master_alloc_smes(struct device *dev)
1036{
1037 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
1038 struct arm_smmu_master_cfg *cfg = dev_iommu_priv_get(dev);
1039 struct arm_smmu_device *smmu = cfg->smmu;
1040 struct arm_smmu_smr *smrs = smmu->smrs;
1041 int i, idx, ret;
1042
1043 mutex_lock(&smmu->stream_map_mutex);
1044
1045 for_each_cfg_sme(cfg, fwspec, i, idx) {
1046 u16 sid = FIELD_GET(ARM_SMMU_SMR_ID, fwspec->ids[i]);
1047 u16 mask = FIELD_GET(ARM_SMMU_SMR_MASK, fwspec->ids[i]);
1048
1049 if (idx != INVALID_SMENDX) {
1050 ret = -EEXIST;
1051 goto out_err;
1052 }
1053
1054 ret = arm_smmu_find_sme(smmu, sid, mask);
1055 if (ret < 0)
1056 goto out_err;
1057
1058 idx = ret;
1059 if (smrs && smmu->s2crs[idx].count == 0) {
1060 smrs[idx].id = sid;
1061 smrs[idx].mask = mask;
1062 smrs[idx].valid = true;
1063 }
1064 smmu->s2crs[idx].count++;
1065 cfg->smendx[i] = (s16)idx;
1066 }
1067
1068
1069 for_each_cfg_sme(cfg, fwspec, i, idx)
1070 arm_smmu_write_sme(smmu, idx);
1071
1072 mutex_unlock(&smmu->stream_map_mutex);
1073 return 0;
1074
1075out_err:
1076 while (i--) {
1077 arm_smmu_free_sme(smmu, cfg->smendx[i]);
1078 cfg->smendx[i] = INVALID_SMENDX;
1079 }
1080 mutex_unlock(&smmu->stream_map_mutex);
1081 return ret;
1082}
1083
1084static void arm_smmu_master_free_smes(struct arm_smmu_master_cfg *cfg,
1085 struct iommu_fwspec *fwspec)
1086{
1087 struct arm_smmu_device *smmu = cfg->smmu;
1088 int i, idx;
1089
1090 mutex_lock(&smmu->stream_map_mutex);
1091 for_each_cfg_sme(cfg, fwspec, i, idx) {
1092 if (arm_smmu_free_sme(smmu, idx))
1093 arm_smmu_write_sme(smmu, idx);
1094 cfg->smendx[i] = INVALID_SMENDX;
1095 }
1096 mutex_unlock(&smmu->stream_map_mutex);
1097}
1098
1099static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain,
1100 struct arm_smmu_master_cfg *cfg,
1101 struct iommu_fwspec *fwspec)
1102{
1103 struct arm_smmu_device *smmu = smmu_domain->smmu;
1104 struct arm_smmu_s2cr *s2cr = smmu->s2crs;
1105 u8 cbndx = smmu_domain->cfg.cbndx;
1106 enum arm_smmu_s2cr_type type;
1107 int i, idx;
1108
1109 if (smmu_domain->stage == ARM_SMMU_DOMAIN_BYPASS)
1110 type = S2CR_TYPE_BYPASS;
1111 else
1112 type = S2CR_TYPE_TRANS;
1113
1114 for_each_cfg_sme(cfg, fwspec, i, idx) {
1115 if (type == s2cr[idx].type && cbndx == s2cr[idx].cbndx)
1116 continue;
1117
1118 s2cr[idx].type = type;
1119 s2cr[idx].privcfg = S2CR_PRIVCFG_DEFAULT;
1120 s2cr[idx].cbndx = cbndx;
1121 arm_smmu_write_s2cr(smmu, idx);
1122 }
1123 return 0;
1124}
1125
1126static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
1127{
1128 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1129 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
1130 struct arm_smmu_master_cfg *cfg;
1131 struct arm_smmu_device *smmu;
1132 int ret;
1133
1134 if (!fwspec || fwspec->ops != &arm_smmu_ops) {
1135 dev_err(dev, "cannot attach to SMMU, is it on the same bus?\n");
1136 return -ENXIO;
1137 }
1138
1139
1140
1141
1142
1143
1144
1145
1146 cfg = dev_iommu_priv_get(dev);
1147 if (!cfg)
1148 return -ENODEV;
1149
1150 smmu = cfg->smmu;
1151
1152 ret = arm_smmu_rpm_get(smmu);
1153 if (ret < 0)
1154 return ret;
1155
1156
1157 ret = arm_smmu_init_domain_context(domain, smmu, dev);
1158 if (ret < 0)
1159 goto rpm_put;
1160
1161
1162
1163
1164
1165 if (smmu_domain->smmu != smmu) {
1166 dev_err(dev,
1167 "cannot attach to SMMU %s whilst already attached to domain on SMMU %s\n",
1168 dev_name(smmu_domain->smmu->dev), dev_name(smmu->dev));
1169 ret = -EINVAL;
1170 goto rpm_put;
1171 }
1172
1173
1174 ret = arm_smmu_domain_add_master(smmu_domain, cfg, fwspec);
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187 pm_runtime_set_autosuspend_delay(smmu->dev, 20);
1188 pm_runtime_use_autosuspend(smmu->dev);
1189
1190rpm_put:
1191 arm_smmu_rpm_put(smmu);
1192 return ret;
1193}
1194
1195static int arm_smmu_map_pages(struct iommu_domain *domain, unsigned long iova,
1196 phys_addr_t paddr, size_t pgsize, size_t pgcount,
1197 int prot, gfp_t gfp, size_t *mapped)
1198{
1199 struct io_pgtable_ops *ops = to_smmu_domain(domain)->pgtbl_ops;
1200 struct arm_smmu_device *smmu = to_smmu_domain(domain)->smmu;
1201 int ret;
1202
1203 if (!ops)
1204 return -ENODEV;
1205
1206 arm_smmu_rpm_get(smmu);
1207 ret = ops->map_pages(ops, iova, paddr, pgsize, pgcount, prot, gfp, mapped);
1208 arm_smmu_rpm_put(smmu);
1209
1210 return ret;
1211}
1212
1213static size_t arm_smmu_unmap_pages(struct iommu_domain *domain, unsigned long iova,
1214 size_t pgsize, size_t pgcount,
1215 struct iommu_iotlb_gather *iotlb_gather)
1216{
1217 struct io_pgtable_ops *ops = to_smmu_domain(domain)->pgtbl_ops;
1218 struct arm_smmu_device *smmu = to_smmu_domain(domain)->smmu;
1219 size_t ret;
1220
1221 if (!ops)
1222 return 0;
1223
1224 arm_smmu_rpm_get(smmu);
1225 ret = ops->unmap_pages(ops, iova, pgsize, pgcount, iotlb_gather);
1226 arm_smmu_rpm_put(smmu);
1227
1228 return ret;
1229}
1230
1231static void arm_smmu_flush_iotlb_all(struct iommu_domain *domain)
1232{
1233 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1234 struct arm_smmu_device *smmu = smmu_domain->smmu;
1235
1236 if (smmu_domain->flush_ops) {
1237 arm_smmu_rpm_get(smmu);
1238 smmu_domain->flush_ops->tlb_flush_all(smmu_domain);
1239 arm_smmu_rpm_put(smmu);
1240 }
1241}
1242
1243static void arm_smmu_iotlb_sync(struct iommu_domain *domain,
1244 struct iommu_iotlb_gather *gather)
1245{
1246 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1247 struct arm_smmu_device *smmu = smmu_domain->smmu;
1248
1249 if (!smmu)
1250 return;
1251
1252 arm_smmu_rpm_get(smmu);
1253 if (smmu->version == ARM_SMMU_V2 ||
1254 smmu_domain->stage == ARM_SMMU_DOMAIN_S1)
1255 arm_smmu_tlb_sync_context(smmu_domain);
1256 else
1257 arm_smmu_tlb_sync_global(smmu);
1258 arm_smmu_rpm_put(smmu);
1259}
1260
1261static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
1262 dma_addr_t iova)
1263{
1264 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1265 struct arm_smmu_device *smmu = smmu_domain->smmu;
1266 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1267 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
1268 struct device *dev = smmu->dev;
1269 void __iomem *reg;
1270 u32 tmp;
1271 u64 phys;
1272 unsigned long va, flags;
1273 int ret, idx = cfg->cbndx;
1274 phys_addr_t addr = 0;
1275
1276 ret = arm_smmu_rpm_get(smmu);
1277 if (ret < 0)
1278 return 0;
1279
1280 spin_lock_irqsave(&smmu_domain->cb_lock, flags);
1281 va = iova & ~0xfffUL;
1282 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64)
1283 arm_smmu_cb_writeq(smmu, idx, ARM_SMMU_CB_ATS1PR, va);
1284 else
1285 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_ATS1PR, va);
1286
1287 reg = arm_smmu_page(smmu, ARM_SMMU_CB(smmu, idx)) + ARM_SMMU_CB_ATSR;
1288 if (readl_poll_timeout_atomic(reg, tmp, !(tmp & ARM_SMMU_ATSR_ACTIVE),
1289 5, 50)) {
1290 spin_unlock_irqrestore(&smmu_domain->cb_lock, flags);
1291 dev_err(dev,
1292 "iova to phys timed out on %pad. Falling back to software table walk.\n",
1293 &iova);
1294 arm_smmu_rpm_put(smmu);
1295 return ops->iova_to_phys(ops, iova);
1296 }
1297
1298 phys = arm_smmu_cb_readq(smmu, idx, ARM_SMMU_CB_PAR);
1299 spin_unlock_irqrestore(&smmu_domain->cb_lock, flags);
1300 if (phys & ARM_SMMU_CB_PAR_F) {
1301 dev_err(dev, "translation fault!\n");
1302 dev_err(dev, "PAR = 0x%llx\n", phys);
1303 goto out;
1304 }
1305
1306 addr = (phys & GENMASK_ULL(39, 12)) | (iova & 0xfff);
1307out:
1308 arm_smmu_rpm_put(smmu);
1309
1310 return addr;
1311}
1312
1313static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain,
1314 dma_addr_t iova)
1315{
1316 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1317 struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops;
1318
1319 if (!ops)
1320 return 0;
1321
1322 if (smmu_domain->smmu->features & ARM_SMMU_FEAT_TRANS_OPS &&
1323 smmu_domain->stage == ARM_SMMU_DOMAIN_S1)
1324 return arm_smmu_iova_to_phys_hard(domain, iova);
1325
1326 return ops->iova_to_phys(ops, iova);
1327}
1328
1329static bool arm_smmu_capable(enum iommu_cap cap)
1330{
1331 switch (cap) {
1332 case IOMMU_CAP_CACHE_COHERENCY:
1333
1334
1335
1336
1337 return true;
1338 case IOMMU_CAP_NOEXEC:
1339 return true;
1340 default:
1341 return false;
1342 }
1343}
1344
1345static
1346struct arm_smmu_device *arm_smmu_get_by_fwnode(struct fwnode_handle *fwnode)
1347{
1348 struct device *dev = driver_find_device_by_fwnode(&arm_smmu_driver.driver,
1349 fwnode);
1350 put_device(dev);
1351 return dev ? dev_get_drvdata(dev) : NULL;
1352}
1353
1354static struct iommu_device *arm_smmu_probe_device(struct device *dev)
1355{
1356 struct arm_smmu_device *smmu = NULL;
1357 struct arm_smmu_master_cfg *cfg;
1358 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
1359 int i, ret;
1360
1361 if (using_legacy_binding) {
1362 ret = arm_smmu_register_legacy_master(dev, &smmu);
1363
1364
1365
1366
1367
1368
1369 fwspec = dev_iommu_fwspec_get(dev);
1370 if (ret)
1371 goto out_free;
1372 } else if (fwspec && fwspec->ops == &arm_smmu_ops) {
1373 smmu = arm_smmu_get_by_fwnode(fwspec->iommu_fwnode);
1374 } else {
1375 return ERR_PTR(-ENODEV);
1376 }
1377
1378 ret = -EINVAL;
1379 for (i = 0; i < fwspec->num_ids; i++) {
1380 u16 sid = FIELD_GET(ARM_SMMU_SMR_ID, fwspec->ids[i]);
1381 u16 mask = FIELD_GET(ARM_SMMU_SMR_MASK, fwspec->ids[i]);
1382
1383 if (sid & ~smmu->streamid_mask) {
1384 dev_err(dev, "stream ID 0x%x out of range for SMMU (0x%x)\n",
1385 sid, smmu->streamid_mask);
1386 goto out_free;
1387 }
1388 if (mask & ~smmu->smr_mask_mask) {
1389 dev_err(dev, "SMR mask 0x%x out of range for SMMU (0x%x)\n",
1390 mask, smmu->smr_mask_mask);
1391 goto out_free;
1392 }
1393 }
1394
1395 ret = -ENOMEM;
1396 cfg = kzalloc(offsetof(struct arm_smmu_master_cfg, smendx[i]),
1397 GFP_KERNEL);
1398 if (!cfg)
1399 goto out_free;
1400
1401 cfg->smmu = smmu;
1402 dev_iommu_priv_set(dev, cfg);
1403 while (i--)
1404 cfg->smendx[i] = INVALID_SMENDX;
1405
1406 ret = arm_smmu_rpm_get(smmu);
1407 if (ret < 0)
1408 goto out_cfg_free;
1409
1410 ret = arm_smmu_master_alloc_smes(dev);
1411 arm_smmu_rpm_put(smmu);
1412
1413 if (ret)
1414 goto out_cfg_free;
1415
1416 device_link_add(dev, smmu->dev,
1417 DL_FLAG_PM_RUNTIME | DL_FLAG_AUTOREMOVE_SUPPLIER);
1418
1419 return &smmu->iommu;
1420
1421out_cfg_free:
1422 kfree(cfg);
1423out_free:
1424 iommu_fwspec_free(dev);
1425 return ERR_PTR(ret);
1426}
1427
1428static void arm_smmu_release_device(struct device *dev)
1429{
1430 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
1431 struct arm_smmu_master_cfg *cfg;
1432 struct arm_smmu_device *smmu;
1433 int ret;
1434
1435 if (!fwspec || fwspec->ops != &arm_smmu_ops)
1436 return;
1437
1438 cfg = dev_iommu_priv_get(dev);
1439 smmu = cfg->smmu;
1440
1441 ret = arm_smmu_rpm_get(smmu);
1442 if (ret < 0)
1443 return;
1444
1445 arm_smmu_master_free_smes(cfg, fwspec);
1446
1447 arm_smmu_rpm_put(smmu);
1448
1449 dev_iommu_priv_set(dev, NULL);
1450 kfree(cfg);
1451 iommu_fwspec_free(dev);
1452}
1453
1454static void arm_smmu_probe_finalize(struct device *dev)
1455{
1456 struct arm_smmu_master_cfg *cfg;
1457 struct arm_smmu_device *smmu;
1458
1459 cfg = dev_iommu_priv_get(dev);
1460 smmu = cfg->smmu;
1461
1462 if (smmu->impl && smmu->impl->probe_finalize)
1463 smmu->impl->probe_finalize(smmu, dev);
1464}
1465
1466static struct iommu_group *arm_smmu_device_group(struct device *dev)
1467{
1468 struct arm_smmu_master_cfg *cfg = dev_iommu_priv_get(dev);
1469 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
1470 struct arm_smmu_device *smmu = cfg->smmu;
1471 struct iommu_group *group = NULL;
1472 int i, idx;
1473
1474 mutex_lock(&smmu->stream_map_mutex);
1475 for_each_cfg_sme(cfg, fwspec, i, idx) {
1476 if (group && smmu->s2crs[idx].group &&
1477 group != smmu->s2crs[idx].group) {
1478 mutex_unlock(&smmu->stream_map_mutex);
1479 return ERR_PTR(-EINVAL);
1480 }
1481
1482 group = smmu->s2crs[idx].group;
1483 }
1484
1485 if (group) {
1486 mutex_unlock(&smmu->stream_map_mutex);
1487 return iommu_group_ref_get(group);
1488 }
1489
1490 if (dev_is_pci(dev))
1491 group = pci_device_group(dev);
1492 else
1493 group = generic_device_group(dev);
1494
1495
1496 if (!IS_ERR(group))
1497 for_each_cfg_sme(cfg, fwspec, i, idx)
1498 smmu->s2crs[idx].group = group;
1499
1500 mutex_unlock(&smmu->stream_map_mutex);
1501 return group;
1502}
1503
1504static int arm_smmu_enable_nesting(struct iommu_domain *domain)
1505{
1506 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1507 int ret = 0;
1508
1509 mutex_lock(&smmu_domain->init_mutex);
1510 if (smmu_domain->smmu)
1511 ret = -EPERM;
1512 else
1513 smmu_domain->stage = ARM_SMMU_DOMAIN_NESTED;
1514 mutex_unlock(&smmu_domain->init_mutex);
1515
1516 return ret;
1517}
1518
1519static int arm_smmu_set_pgtable_quirks(struct iommu_domain *domain,
1520 unsigned long quirks)
1521{
1522 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1523 int ret = 0;
1524
1525 mutex_lock(&smmu_domain->init_mutex);
1526 if (smmu_domain->smmu)
1527 ret = -EPERM;
1528 else
1529 smmu_domain->pgtbl_quirks = quirks;
1530 mutex_unlock(&smmu_domain->init_mutex);
1531
1532 return ret;
1533}
1534
1535static int arm_smmu_of_xlate(struct device *dev, struct of_phandle_args *args)
1536{
1537 u32 mask, fwid = 0;
1538
1539 if (args->args_count > 0)
1540 fwid |= FIELD_PREP(ARM_SMMU_SMR_ID, args->args[0]);
1541
1542 if (args->args_count > 1)
1543 fwid |= FIELD_PREP(ARM_SMMU_SMR_MASK, args->args[1]);
1544 else if (!of_property_read_u32(args->np, "stream-match-mask", &mask))
1545 fwid |= FIELD_PREP(ARM_SMMU_SMR_MASK, mask);
1546
1547 return iommu_fwspec_add_ids(dev, &fwid, 1);
1548}
1549
1550static void arm_smmu_get_resv_regions(struct device *dev,
1551 struct list_head *head)
1552{
1553 struct iommu_resv_region *region;
1554 int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO;
1555
1556 region = iommu_alloc_resv_region(MSI_IOVA_BASE, MSI_IOVA_LENGTH,
1557 prot, IOMMU_RESV_SW_MSI);
1558 if (!region)
1559 return;
1560
1561 list_add_tail(®ion->list, head);
1562
1563 iommu_dma_get_resv_regions(dev, head);
1564}
1565
1566static int arm_smmu_def_domain_type(struct device *dev)
1567{
1568 struct arm_smmu_master_cfg *cfg = dev_iommu_priv_get(dev);
1569 const struct arm_smmu_impl *impl = cfg->smmu->impl;
1570
1571 if (impl && impl->def_domain_type)
1572 return impl->def_domain_type(dev);
1573
1574 return 0;
1575}
1576
1577static struct iommu_ops arm_smmu_ops = {
1578 .capable = arm_smmu_capable,
1579 .domain_alloc = arm_smmu_domain_alloc,
1580 .domain_free = arm_smmu_domain_free,
1581 .attach_dev = arm_smmu_attach_dev,
1582 .map_pages = arm_smmu_map_pages,
1583 .unmap_pages = arm_smmu_unmap_pages,
1584 .flush_iotlb_all = arm_smmu_flush_iotlb_all,
1585 .iotlb_sync = arm_smmu_iotlb_sync,
1586 .iova_to_phys = arm_smmu_iova_to_phys,
1587 .probe_device = arm_smmu_probe_device,
1588 .release_device = arm_smmu_release_device,
1589 .probe_finalize = arm_smmu_probe_finalize,
1590 .device_group = arm_smmu_device_group,
1591 .enable_nesting = arm_smmu_enable_nesting,
1592 .set_pgtable_quirks = arm_smmu_set_pgtable_quirks,
1593 .of_xlate = arm_smmu_of_xlate,
1594 .get_resv_regions = arm_smmu_get_resv_regions,
1595 .put_resv_regions = generic_iommu_put_resv_regions,
1596 .def_domain_type = arm_smmu_def_domain_type,
1597 .pgsize_bitmap = -1UL,
1598 .owner = THIS_MODULE,
1599};
1600
1601static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
1602{
1603 int i;
1604 u32 reg;
1605
1606
1607 reg = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_sGFSR);
1608 arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_sGFSR, reg);
1609
1610
1611
1612
1613
1614 for (i = 0; i < smmu->num_mapping_groups; ++i)
1615 arm_smmu_write_sme(smmu, i);
1616
1617
1618 for (i = 0; i < smmu->num_context_banks; ++i) {
1619 arm_smmu_write_context_bank(smmu, i);
1620 arm_smmu_cb_write(smmu, i, ARM_SMMU_CB_FSR, ARM_SMMU_FSR_FAULT);
1621 }
1622
1623
1624 arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_TLBIALLH, QCOM_DUMMY_VAL);
1625 arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_TLBIALLNSNH, QCOM_DUMMY_VAL);
1626
1627 reg = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_sCR0);
1628
1629
1630 reg |= (ARM_SMMU_sCR0_GFRE | ARM_SMMU_sCR0_GFIE |
1631 ARM_SMMU_sCR0_GCFGFRE | ARM_SMMU_sCR0_GCFGFIE);
1632
1633
1634 reg |= (ARM_SMMU_sCR0_VMIDPNE | ARM_SMMU_sCR0_PTM);
1635
1636
1637 reg &= ~ARM_SMMU_sCR0_CLIENTPD;
1638 if (disable_bypass)
1639 reg |= ARM_SMMU_sCR0_USFCFG;
1640 else
1641 reg &= ~ARM_SMMU_sCR0_USFCFG;
1642
1643
1644 reg &= ~ARM_SMMU_sCR0_FB;
1645
1646
1647 reg &= ~(ARM_SMMU_sCR0_BSU);
1648
1649 if (smmu->features & ARM_SMMU_FEAT_VMID16)
1650 reg |= ARM_SMMU_sCR0_VMID16EN;
1651
1652 if (smmu->features & ARM_SMMU_FEAT_EXIDS)
1653 reg |= ARM_SMMU_sCR0_EXIDENABLE;
1654
1655 if (smmu->impl && smmu->impl->reset)
1656 smmu->impl->reset(smmu);
1657
1658
1659 arm_smmu_tlb_sync_global(smmu);
1660 arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_sCR0, reg);
1661}
1662
1663static int arm_smmu_id_size_to_bits(int size)
1664{
1665 switch (size) {
1666 case 0:
1667 return 32;
1668 case 1:
1669 return 36;
1670 case 2:
1671 return 40;
1672 case 3:
1673 return 42;
1674 case 4:
1675 return 44;
1676 case 5:
1677 default:
1678 return 48;
1679 }
1680}
1681
1682static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
1683{
1684 unsigned int size;
1685 u32 id;
1686 bool cttw_reg, cttw_fw = smmu->features & ARM_SMMU_FEAT_COHERENT_WALK;
1687 int i, ret;
1688
1689 dev_notice(smmu->dev, "probing hardware configuration...\n");
1690 dev_notice(smmu->dev, "SMMUv%d with:\n",
1691 smmu->version == ARM_SMMU_V2 ? 2 : 1);
1692
1693
1694 id = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_ID0);
1695
1696
1697 if (force_stage == 1)
1698 id &= ~(ARM_SMMU_ID0_S2TS | ARM_SMMU_ID0_NTS);
1699 else if (force_stage == 2)
1700 id &= ~(ARM_SMMU_ID0_S1TS | ARM_SMMU_ID0_NTS);
1701
1702 if (id & ARM_SMMU_ID0_S1TS) {
1703 smmu->features |= ARM_SMMU_FEAT_TRANS_S1;
1704 dev_notice(smmu->dev, "\tstage 1 translation\n");
1705 }
1706
1707 if (id & ARM_SMMU_ID0_S2TS) {
1708 smmu->features |= ARM_SMMU_FEAT_TRANS_S2;
1709 dev_notice(smmu->dev, "\tstage 2 translation\n");
1710 }
1711
1712 if (id & ARM_SMMU_ID0_NTS) {
1713 smmu->features |= ARM_SMMU_FEAT_TRANS_NESTED;
1714 dev_notice(smmu->dev, "\tnested translation\n");
1715 }
1716
1717 if (!(smmu->features &
1718 (ARM_SMMU_FEAT_TRANS_S1 | ARM_SMMU_FEAT_TRANS_S2))) {
1719 dev_err(smmu->dev, "\tno translation support!\n");
1720 return -ENODEV;
1721 }
1722
1723 if ((id & ARM_SMMU_ID0_S1TS) &&
1724 ((smmu->version < ARM_SMMU_V2) || !(id & ARM_SMMU_ID0_ATOSNS))) {
1725 smmu->features |= ARM_SMMU_FEAT_TRANS_OPS;
1726 dev_notice(smmu->dev, "\taddress translation ops\n");
1727 }
1728
1729
1730
1731
1732
1733
1734
1735 cttw_reg = !!(id & ARM_SMMU_ID0_CTTW);
1736 if (cttw_fw || cttw_reg)
1737 dev_notice(smmu->dev, "\t%scoherent table walk\n",
1738 cttw_fw ? "" : "non-");
1739 if (cttw_fw != cttw_reg)
1740 dev_notice(smmu->dev,
1741 "\t(IDR0.CTTW overridden by FW configuration)\n");
1742
1743
1744 if (smmu->version == ARM_SMMU_V2 && id & ARM_SMMU_ID0_EXIDS) {
1745 smmu->features |= ARM_SMMU_FEAT_EXIDS;
1746 size = 1 << 16;
1747 } else {
1748 size = 1 << FIELD_GET(ARM_SMMU_ID0_NUMSIDB, id);
1749 }
1750 smmu->streamid_mask = size - 1;
1751 if (id & ARM_SMMU_ID0_SMS) {
1752 smmu->features |= ARM_SMMU_FEAT_STREAM_MATCH;
1753 size = FIELD_GET(ARM_SMMU_ID0_NUMSMRG, id);
1754 if (size == 0) {
1755 dev_err(smmu->dev,
1756 "stream-matching supported, but no SMRs present!\n");
1757 return -ENODEV;
1758 }
1759
1760
1761 smmu->smrs = devm_kcalloc(smmu->dev, size, sizeof(*smmu->smrs),
1762 GFP_KERNEL);
1763 if (!smmu->smrs)
1764 return -ENOMEM;
1765
1766 dev_notice(smmu->dev,
1767 "\tstream matching with %u register groups", size);
1768 }
1769
1770 smmu->s2crs = devm_kmalloc_array(smmu->dev, size, sizeof(*smmu->s2crs),
1771 GFP_KERNEL);
1772 if (!smmu->s2crs)
1773 return -ENOMEM;
1774 for (i = 0; i < size; i++)
1775 smmu->s2crs[i] = s2cr_init_val;
1776
1777 smmu->num_mapping_groups = size;
1778 mutex_init(&smmu->stream_map_mutex);
1779 spin_lock_init(&smmu->global_sync_lock);
1780
1781 if (smmu->version < ARM_SMMU_V2 ||
1782 !(id & ARM_SMMU_ID0_PTFS_NO_AARCH32)) {
1783 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_L;
1784 if (!(id & ARM_SMMU_ID0_PTFS_NO_AARCH32S))
1785 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_S;
1786 }
1787
1788
1789 id = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_ID1);
1790 smmu->pgshift = (id & ARM_SMMU_ID1_PAGESIZE) ? 16 : 12;
1791
1792
1793 size = 1 << (FIELD_GET(ARM_SMMU_ID1_NUMPAGENDXB, id) + 1);
1794 if (smmu->numpage != 2 * size << smmu->pgshift)
1795 dev_warn(smmu->dev,
1796 "SMMU address space size (0x%x) differs from mapped region size (0x%x)!\n",
1797 2 * size << smmu->pgshift, smmu->numpage);
1798
1799 smmu->numpage = size;
1800
1801 smmu->num_s2_context_banks = FIELD_GET(ARM_SMMU_ID1_NUMS2CB, id);
1802 smmu->num_context_banks = FIELD_GET(ARM_SMMU_ID1_NUMCB, id);
1803 if (smmu->num_s2_context_banks > smmu->num_context_banks) {
1804 dev_err(smmu->dev, "impossible number of S2 context banks!\n");
1805 return -ENODEV;
1806 }
1807 dev_notice(smmu->dev, "\t%u context banks (%u stage-2 only)\n",
1808 smmu->num_context_banks, smmu->num_s2_context_banks);
1809 smmu->cbs = devm_kcalloc(smmu->dev, smmu->num_context_banks,
1810 sizeof(*smmu->cbs), GFP_KERNEL);
1811 if (!smmu->cbs)
1812 return -ENOMEM;
1813
1814
1815 id = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_ID2);
1816 size = arm_smmu_id_size_to_bits(FIELD_GET(ARM_SMMU_ID2_IAS, id));
1817 smmu->ipa_size = size;
1818
1819
1820 size = arm_smmu_id_size_to_bits(FIELD_GET(ARM_SMMU_ID2_OAS, id));
1821 smmu->pa_size = size;
1822
1823 if (id & ARM_SMMU_ID2_VMID16)
1824 smmu->features |= ARM_SMMU_FEAT_VMID16;
1825
1826
1827
1828
1829
1830
1831 if (dma_set_mask_and_coherent(smmu->dev, DMA_BIT_MASK(size)))
1832 dev_warn(smmu->dev,
1833 "failed to set DMA mask for table walker\n");
1834
1835 if (smmu->version < ARM_SMMU_V2) {
1836 smmu->va_size = smmu->ipa_size;
1837 if (smmu->version == ARM_SMMU_V1_64K)
1838 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_64K;
1839 } else {
1840 size = FIELD_GET(ARM_SMMU_ID2_UBS, id);
1841 smmu->va_size = arm_smmu_id_size_to_bits(size);
1842 if (id & ARM_SMMU_ID2_PTFS_4K)
1843 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_4K;
1844 if (id & ARM_SMMU_ID2_PTFS_16K)
1845 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_16K;
1846 if (id & ARM_SMMU_ID2_PTFS_64K)
1847 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_64K;
1848 }
1849
1850 if (smmu->impl && smmu->impl->cfg_probe) {
1851 ret = smmu->impl->cfg_probe(smmu);
1852 if (ret)
1853 return ret;
1854 }
1855
1856
1857 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_S)
1858 smmu->pgsize_bitmap |= SZ_4K | SZ_64K | SZ_1M | SZ_16M;
1859 if (smmu->features &
1860 (ARM_SMMU_FEAT_FMT_AARCH32_L | ARM_SMMU_FEAT_FMT_AARCH64_4K))
1861 smmu->pgsize_bitmap |= SZ_4K | SZ_2M | SZ_1G;
1862 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH64_16K)
1863 smmu->pgsize_bitmap |= SZ_16K | SZ_32M;
1864 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH64_64K)
1865 smmu->pgsize_bitmap |= SZ_64K | SZ_512M;
1866
1867 if (arm_smmu_ops.pgsize_bitmap == -1UL)
1868 arm_smmu_ops.pgsize_bitmap = smmu->pgsize_bitmap;
1869 else
1870 arm_smmu_ops.pgsize_bitmap |= smmu->pgsize_bitmap;
1871 dev_notice(smmu->dev, "\tSupported page sizes: 0x%08lx\n",
1872 smmu->pgsize_bitmap);
1873
1874
1875 if (smmu->features & ARM_SMMU_FEAT_TRANS_S1)
1876 dev_notice(smmu->dev, "\tStage-1: %lu-bit VA -> %lu-bit IPA\n",
1877 smmu->va_size, smmu->ipa_size);
1878
1879 if (smmu->features & ARM_SMMU_FEAT_TRANS_S2)
1880 dev_notice(smmu->dev, "\tStage-2: %lu-bit IPA -> %lu-bit PA\n",
1881 smmu->ipa_size, smmu->pa_size);
1882
1883 return 0;
1884}
1885
1886struct arm_smmu_match_data {
1887 enum arm_smmu_arch_version version;
1888 enum arm_smmu_implementation model;
1889};
1890
1891#define ARM_SMMU_MATCH_DATA(name, ver, imp) \
1892static const struct arm_smmu_match_data name = { .version = ver, .model = imp }
1893
1894ARM_SMMU_MATCH_DATA(smmu_generic_v1, ARM_SMMU_V1, GENERIC_SMMU);
1895ARM_SMMU_MATCH_DATA(smmu_generic_v2, ARM_SMMU_V2, GENERIC_SMMU);
1896ARM_SMMU_MATCH_DATA(arm_mmu401, ARM_SMMU_V1_64K, GENERIC_SMMU);
1897ARM_SMMU_MATCH_DATA(arm_mmu500, ARM_SMMU_V2, ARM_MMU500);
1898ARM_SMMU_MATCH_DATA(cavium_smmuv2, ARM_SMMU_V2, CAVIUM_SMMUV2);
1899ARM_SMMU_MATCH_DATA(qcom_smmuv2, ARM_SMMU_V2, QCOM_SMMUV2);
1900
1901static const struct of_device_id arm_smmu_of_match[] = {
1902 { .compatible = "arm,smmu-v1", .data = &smmu_generic_v1 },
1903 { .compatible = "arm,smmu-v2", .data = &smmu_generic_v2 },
1904 { .compatible = "arm,mmu-400", .data = &smmu_generic_v1 },
1905 { .compatible = "arm,mmu-401", .data = &arm_mmu401 },
1906 { .compatible = "arm,mmu-500", .data = &arm_mmu500 },
1907 { .compatible = "cavium,smmu-v2", .data = &cavium_smmuv2 },
1908 { .compatible = "nvidia,smmu-500", .data = &arm_mmu500 },
1909 { .compatible = "qcom,smmu-v2", .data = &qcom_smmuv2 },
1910 { },
1911};
1912MODULE_DEVICE_TABLE(of, arm_smmu_of_match);
1913
1914#ifdef CONFIG_ACPI
1915static int acpi_smmu_get_data(u32 model, struct arm_smmu_device *smmu)
1916{
1917 int ret = 0;
1918
1919 switch (model) {
1920 case ACPI_IORT_SMMU_V1:
1921 case ACPI_IORT_SMMU_CORELINK_MMU400:
1922 smmu->version = ARM_SMMU_V1;
1923 smmu->model = GENERIC_SMMU;
1924 break;
1925 case ACPI_IORT_SMMU_CORELINK_MMU401:
1926 smmu->version = ARM_SMMU_V1_64K;
1927 smmu->model = GENERIC_SMMU;
1928 break;
1929 case ACPI_IORT_SMMU_V2:
1930 smmu->version = ARM_SMMU_V2;
1931 smmu->model = GENERIC_SMMU;
1932 break;
1933 case ACPI_IORT_SMMU_CORELINK_MMU500:
1934 smmu->version = ARM_SMMU_V2;
1935 smmu->model = ARM_MMU500;
1936 break;
1937 case ACPI_IORT_SMMU_CAVIUM_THUNDERX:
1938 smmu->version = ARM_SMMU_V2;
1939 smmu->model = CAVIUM_SMMUV2;
1940 break;
1941 default:
1942 ret = -ENODEV;
1943 }
1944
1945 return ret;
1946}
1947
1948static int arm_smmu_device_acpi_probe(struct platform_device *pdev,
1949 struct arm_smmu_device *smmu)
1950{
1951 struct device *dev = smmu->dev;
1952 struct acpi_iort_node *node =
1953 *(struct acpi_iort_node **)dev_get_platdata(dev);
1954 struct acpi_iort_smmu *iort_smmu;
1955 int ret;
1956
1957
1958 iort_smmu = (struct acpi_iort_smmu *)node->node_data;
1959
1960 ret = acpi_smmu_get_data(iort_smmu->model, smmu);
1961 if (ret < 0)
1962 return ret;
1963
1964
1965 smmu->num_global_irqs = 1;
1966
1967 if (iort_smmu->flags & ACPI_IORT_SMMU_COHERENT_WALK)
1968 smmu->features |= ARM_SMMU_FEAT_COHERENT_WALK;
1969
1970 return 0;
1971}
1972#else
1973static inline int arm_smmu_device_acpi_probe(struct platform_device *pdev,
1974 struct arm_smmu_device *smmu)
1975{
1976 return -ENODEV;
1977}
1978#endif
1979
1980static int arm_smmu_device_dt_probe(struct platform_device *pdev,
1981 struct arm_smmu_device *smmu)
1982{
1983 const struct arm_smmu_match_data *data;
1984 struct device *dev = &pdev->dev;
1985 bool legacy_binding;
1986
1987 if (of_property_read_u32(dev->of_node, "#global-interrupts",
1988 &smmu->num_global_irqs)) {
1989 dev_err(dev, "missing #global-interrupts property\n");
1990 return -ENODEV;
1991 }
1992
1993 data = of_device_get_match_data(dev);
1994 smmu->version = data->version;
1995 smmu->model = data->model;
1996
1997 legacy_binding = of_find_property(dev->of_node, "mmu-masters", NULL);
1998 if (legacy_binding && !using_generic_binding) {
1999 if (!using_legacy_binding) {
2000 pr_notice("deprecated \"mmu-masters\" DT property in use; %s support unavailable\n",
2001 IS_ENABLED(CONFIG_ARM_SMMU_LEGACY_DT_BINDINGS) ? "DMA API" : "SMMU");
2002 }
2003 using_legacy_binding = true;
2004 } else if (!legacy_binding && !using_legacy_binding) {
2005 using_generic_binding = true;
2006 } else {
2007 dev_err(dev, "not probing due to mismatched DT properties\n");
2008 return -ENODEV;
2009 }
2010
2011 if (of_dma_is_coherent(dev->of_node))
2012 smmu->features |= ARM_SMMU_FEAT_COHERENT_WALK;
2013
2014 return 0;
2015}
2016
2017static int arm_smmu_bus_init(struct iommu_ops *ops)
2018{
2019 int err;
2020
2021
2022 if (!iommu_present(&platform_bus_type)) {
2023 err = bus_set_iommu(&platform_bus_type, ops);
2024 if (err)
2025 return err;
2026 }
2027#ifdef CONFIG_ARM_AMBA
2028 if (!iommu_present(&amba_bustype)) {
2029 err = bus_set_iommu(&amba_bustype, ops);
2030 if (err)
2031 goto err_reset_platform_ops;
2032 }
2033#endif
2034#ifdef CONFIG_PCI
2035 if (!iommu_present(&pci_bus_type)) {
2036 err = bus_set_iommu(&pci_bus_type, ops);
2037 if (err)
2038 goto err_reset_amba_ops;
2039 }
2040#endif
2041 return 0;
2042
2043err_reset_pci_ops: __maybe_unused;
2044#ifdef CONFIG_PCI
2045 bus_set_iommu(&pci_bus_type, NULL);
2046#endif
2047err_reset_amba_ops: __maybe_unused;
2048#ifdef CONFIG_ARM_AMBA
2049 bus_set_iommu(&amba_bustype, NULL);
2050#endif
2051err_reset_platform_ops: __maybe_unused;
2052 bus_set_iommu(&platform_bus_type, NULL);
2053 return err;
2054}
2055
2056static int arm_smmu_device_probe(struct platform_device *pdev)
2057{
2058 struct resource *res;
2059 resource_size_t ioaddr;
2060 struct arm_smmu_device *smmu;
2061 struct device *dev = &pdev->dev;
2062 int num_irqs, i, err;
2063 irqreturn_t (*global_fault)(int irq, void *dev);
2064
2065 smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL);
2066 if (!smmu) {
2067 dev_err(dev, "failed to allocate arm_smmu_device\n");
2068 return -ENOMEM;
2069 }
2070 smmu->dev = dev;
2071
2072 if (dev->of_node)
2073 err = arm_smmu_device_dt_probe(pdev, smmu);
2074 else
2075 err = arm_smmu_device_acpi_probe(pdev, smmu);
2076
2077 if (err)
2078 return err;
2079
2080 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2081 ioaddr = res->start;
2082 smmu->base = devm_ioremap_resource(dev, res);
2083 if (IS_ERR(smmu->base))
2084 return PTR_ERR(smmu->base);
2085
2086
2087
2088
2089 smmu->numpage = resource_size(res);
2090
2091 smmu = arm_smmu_impl_init(smmu);
2092 if (IS_ERR(smmu))
2093 return PTR_ERR(smmu);
2094
2095 num_irqs = 0;
2096 while ((res = platform_get_resource(pdev, IORESOURCE_IRQ, num_irqs))) {
2097 num_irqs++;
2098 if (num_irqs > smmu->num_global_irqs)
2099 smmu->num_context_irqs++;
2100 }
2101
2102 if (!smmu->num_context_irqs) {
2103 dev_err(dev, "found %d interrupts but expected at least %d\n",
2104 num_irqs, smmu->num_global_irqs + 1);
2105 return -ENODEV;
2106 }
2107
2108 smmu->irqs = devm_kcalloc(dev, num_irqs, sizeof(*smmu->irqs),
2109 GFP_KERNEL);
2110 if (!smmu->irqs) {
2111 dev_err(dev, "failed to allocate %d irqs\n", num_irqs);
2112 return -ENOMEM;
2113 }
2114
2115 for (i = 0; i < num_irqs; ++i) {
2116 int irq = platform_get_irq(pdev, i);
2117
2118 if (irq < 0)
2119 return -ENODEV;
2120 smmu->irqs[i] = irq;
2121 }
2122
2123 err = devm_clk_bulk_get_all(dev, &smmu->clks);
2124 if (err < 0) {
2125 dev_err(dev, "failed to get clocks %d\n", err);
2126 return err;
2127 }
2128 smmu->num_clks = err;
2129
2130 err = clk_bulk_prepare_enable(smmu->num_clks, smmu->clks);
2131 if (err)
2132 return err;
2133
2134 err = arm_smmu_device_cfg_probe(smmu);
2135 if (err)
2136 return err;
2137
2138 if (smmu->version == ARM_SMMU_V2) {
2139 if (smmu->num_context_banks > smmu->num_context_irqs) {
2140 dev_err(dev,
2141 "found only %d context irq(s) but %d required\n",
2142 smmu->num_context_irqs, smmu->num_context_banks);
2143 return -ENODEV;
2144 }
2145
2146
2147 smmu->num_context_irqs = smmu->num_context_banks;
2148 }
2149
2150 if (smmu->impl && smmu->impl->global_fault)
2151 global_fault = smmu->impl->global_fault;
2152 else
2153 global_fault = arm_smmu_global_fault;
2154
2155 for (i = 0; i < smmu->num_global_irqs; ++i) {
2156 err = devm_request_irq(smmu->dev, smmu->irqs[i],
2157 global_fault,
2158 IRQF_SHARED,
2159 "arm-smmu global fault",
2160 smmu);
2161 if (err) {
2162 dev_err(dev, "failed to request global IRQ %d (%u)\n",
2163 i, smmu->irqs[i]);
2164 return err;
2165 }
2166 }
2167
2168 err = iommu_device_sysfs_add(&smmu->iommu, smmu->dev, NULL,
2169 "smmu.%pa", &ioaddr);
2170 if (err) {
2171 dev_err(dev, "Failed to register iommu in sysfs\n");
2172 return err;
2173 }
2174
2175 err = iommu_device_register(&smmu->iommu, &arm_smmu_ops, dev);
2176 if (err) {
2177 dev_err(dev, "Failed to register iommu\n");
2178 goto err_sysfs_remove;
2179 }
2180
2181 platform_set_drvdata(pdev, smmu);
2182 arm_smmu_device_reset(smmu);
2183 arm_smmu_test_smr_masks(smmu);
2184
2185
2186
2187
2188
2189
2190
2191 if (dev->pm_domain) {
2192 pm_runtime_set_active(dev);
2193 pm_runtime_enable(dev);
2194 }
2195
2196
2197
2198
2199
2200
2201 if (!using_legacy_binding) {
2202 err = arm_smmu_bus_init(&arm_smmu_ops);
2203 if (err)
2204 goto err_unregister_device;
2205 }
2206
2207 return 0;
2208
2209err_unregister_device:
2210 iommu_device_unregister(&smmu->iommu);
2211err_sysfs_remove:
2212 iommu_device_sysfs_remove(&smmu->iommu);
2213 return err;
2214}
2215
2216static int arm_smmu_device_remove(struct platform_device *pdev)
2217{
2218 struct arm_smmu_device *smmu = platform_get_drvdata(pdev);
2219
2220 if (!smmu)
2221 return -ENODEV;
2222
2223 if (!bitmap_empty(smmu->context_map, ARM_SMMU_MAX_CBS))
2224 dev_notice(&pdev->dev, "disabling translation\n");
2225
2226 arm_smmu_bus_init(NULL);
2227 iommu_device_unregister(&smmu->iommu);
2228 iommu_device_sysfs_remove(&smmu->iommu);
2229
2230 arm_smmu_rpm_get(smmu);
2231
2232 arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_sCR0, ARM_SMMU_sCR0_CLIENTPD);
2233 arm_smmu_rpm_put(smmu);
2234
2235 if (pm_runtime_enabled(smmu->dev))
2236 pm_runtime_force_suspend(smmu->dev);
2237 else
2238 clk_bulk_disable(smmu->num_clks, smmu->clks);
2239
2240 clk_bulk_unprepare(smmu->num_clks, smmu->clks);
2241 return 0;
2242}
2243
2244static void arm_smmu_device_shutdown(struct platform_device *pdev)
2245{
2246 arm_smmu_device_remove(pdev);
2247}
2248
2249static int __maybe_unused arm_smmu_runtime_resume(struct device *dev)
2250{
2251 struct arm_smmu_device *smmu = dev_get_drvdata(dev);
2252 int ret;
2253
2254 ret = clk_bulk_enable(smmu->num_clks, smmu->clks);
2255 if (ret)
2256 return ret;
2257
2258 arm_smmu_device_reset(smmu);
2259
2260 return 0;
2261}
2262
2263static int __maybe_unused arm_smmu_runtime_suspend(struct device *dev)
2264{
2265 struct arm_smmu_device *smmu = dev_get_drvdata(dev);
2266
2267 clk_bulk_disable(smmu->num_clks, smmu->clks);
2268
2269 return 0;
2270}
2271
2272static int __maybe_unused arm_smmu_pm_resume(struct device *dev)
2273{
2274 if (pm_runtime_suspended(dev))
2275 return 0;
2276
2277 return arm_smmu_runtime_resume(dev);
2278}
2279
2280static int __maybe_unused arm_smmu_pm_suspend(struct device *dev)
2281{
2282 if (pm_runtime_suspended(dev))
2283 return 0;
2284
2285 return arm_smmu_runtime_suspend(dev);
2286}
2287
2288static const struct dev_pm_ops arm_smmu_pm_ops = {
2289 SET_SYSTEM_SLEEP_PM_OPS(arm_smmu_pm_suspend, arm_smmu_pm_resume)
2290 SET_RUNTIME_PM_OPS(arm_smmu_runtime_suspend,
2291 arm_smmu_runtime_resume, NULL)
2292};
2293
2294static struct platform_driver arm_smmu_driver = {
2295 .driver = {
2296 .name = "arm-smmu",
2297 .of_match_table = arm_smmu_of_match,
2298 .pm = &arm_smmu_pm_ops,
2299 .suppress_bind_attrs = true,
2300 },
2301 .probe = arm_smmu_device_probe,
2302 .remove = arm_smmu_device_remove,
2303 .shutdown = arm_smmu_device_shutdown,
2304};
2305module_platform_driver(arm_smmu_driver);
2306
2307MODULE_DESCRIPTION("IOMMU API for ARM architected SMMU implementations");
2308MODULE_AUTHOR("Will Deacon <will@kernel.org>");
2309MODULE_ALIAS("platform:arm-smmu");
2310MODULE_LICENSE("GPL v2");
2311