1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18#define pr_fmt(fmt) "arm-smmu: " fmt
19
20#include <linux/acpi.h>
21#include <linux/acpi_iort.h>
22#include <linux/bitfield.h>
23#include <linux/delay.h>
24#include <linux/dma-iommu.h>
25#include <linux/dma-mapping.h>
26#include <linux/err.h>
27#include <linux/interrupt.h>
28#include <linux/io.h>
29#include <linux/iopoll.h>
30#include <linux/module.h>
31#include <linux/of.h>
32#include <linux/of_address.h>
33#include <linux/of_device.h>
34#include <linux/of_iommu.h>
35#include <linux/pci.h>
36#include <linux/platform_device.h>
37#include <linux/pm_runtime.h>
38#include <linux/ratelimit.h>
39#include <linux/slab.h>
40
41#include <linux/amba/bus.h>
42#include <linux/fsl/mc.h>
43
44#include "arm-smmu.h"
45
46
47
48
49
50
51
52
53#define QCOM_DUMMY_VAL -1
54
55#define MSI_IOVA_BASE 0x8000000
56#define MSI_IOVA_LENGTH 0x100000
57
58static int force_stage;
59module_param(force_stage, int, S_IRUGO);
60MODULE_PARM_DESC(force_stage,
61 "Force SMMU mappings to be installed at a particular stage of translation. A value of '1' or '2' forces the corresponding stage. All other values are ignored (i.e. no stage is forced). Note that selecting a specific stage will disable support for nested translation.");
62static bool disable_bypass =
63 IS_ENABLED(CONFIG_ARM_SMMU_DISABLE_BYPASS_BY_DEFAULT);
64module_param(disable_bypass, bool, S_IRUGO);
65MODULE_PARM_DESC(disable_bypass,
66 "Disable bypass streams such that incoming transactions from devices that are not attached to an iommu domain will report an abort back to the device and will not be allowed to pass through the SMMU.");
67
68struct arm_smmu_s2cr {
69 struct iommu_group *group;
70 int count;
71 enum arm_smmu_s2cr_type type;
72 enum arm_smmu_s2cr_privcfg privcfg;
73 u8 cbndx;
74};
75
76#define s2cr_init_val (struct arm_smmu_s2cr){ \
77 .type = disable_bypass ? S2CR_TYPE_FAULT : S2CR_TYPE_BYPASS, \
78}
79
80struct arm_smmu_smr {
81 u16 mask;
82 u16 id;
83 bool valid;
84};
85
86struct arm_smmu_cb {
87 u64 ttbr[2];
88 u32 tcr[2];
89 u32 mair[2];
90 struct arm_smmu_cfg *cfg;
91};
92
93struct arm_smmu_master_cfg {
94 struct arm_smmu_device *smmu;
95 s16 smendx[];
96};
97#define INVALID_SMENDX -1
98#define cfg_smendx(cfg, fw, i) \
99 (i >= fw->num_ids ? INVALID_SMENDX : cfg->smendx[i])
100#define for_each_cfg_sme(cfg, fw, i, idx) \
101 for (i = 0; idx = cfg_smendx(cfg, fw, i), i < fw->num_ids; ++i)
102
103static bool using_legacy_binding, using_generic_binding;
104
105static inline int arm_smmu_rpm_get(struct arm_smmu_device *smmu)
106{
107 if (pm_runtime_enabled(smmu->dev))
108 return pm_runtime_get_sync(smmu->dev);
109
110 return 0;
111}
112
113static inline void arm_smmu_rpm_put(struct arm_smmu_device *smmu)
114{
115 if (pm_runtime_enabled(smmu->dev))
116 pm_runtime_put_autosuspend(smmu->dev);
117}
118
119static struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom)
120{
121 return container_of(dom, struct arm_smmu_domain, domain);
122}
123
124static struct platform_driver arm_smmu_driver;
125static struct iommu_ops arm_smmu_ops;
126
127#ifdef CONFIG_ARM_SMMU_LEGACY_DT_BINDINGS
128static int arm_smmu_bus_init(struct iommu_ops *ops);
129
130static struct device_node *dev_get_dev_node(struct device *dev)
131{
132 if (dev_is_pci(dev)) {
133 struct pci_bus *bus = to_pci_dev(dev)->bus;
134
135 while (!pci_is_root_bus(bus))
136 bus = bus->parent;
137 return of_node_get(bus->bridge->parent->of_node);
138 }
139
140 return of_node_get(dev->of_node);
141}
142
143static int __arm_smmu_get_pci_sid(struct pci_dev *pdev, u16 alias, void *data)
144{
145 *((__be32 *)data) = cpu_to_be32(alias);
146 return 0;
147}
148
149static int __find_legacy_master_phandle(struct device *dev, void *data)
150{
151 struct of_phandle_iterator *it = *(void **)data;
152 struct device_node *np = it->node;
153 int err;
154
155 of_for_each_phandle(it, err, dev->of_node, "mmu-masters",
156 "#stream-id-cells", -1)
157 if (it->node == np) {
158 *(void **)data = dev;
159 return 1;
160 }
161 it->node = np;
162 return err == -ENOENT ? 0 : err;
163}
164
165static int arm_smmu_register_legacy_master(struct device *dev,
166 struct arm_smmu_device **smmu)
167{
168 struct device *smmu_dev;
169 struct device_node *np;
170 struct of_phandle_iterator it;
171 void *data = ⁢
172 u32 *sids;
173 __be32 pci_sid;
174 int err;
175
176 np = dev_get_dev_node(dev);
177 if (!np || !of_find_property(np, "#stream-id-cells", NULL)) {
178 of_node_put(np);
179 return -ENODEV;
180 }
181
182 it.node = np;
183 err = driver_for_each_device(&arm_smmu_driver.driver, NULL, &data,
184 __find_legacy_master_phandle);
185 smmu_dev = data;
186 of_node_put(np);
187 if (err == 0)
188 return -ENODEV;
189 if (err < 0)
190 return err;
191
192 if (dev_is_pci(dev)) {
193
194 pci_for_each_dma_alias(to_pci_dev(dev), __arm_smmu_get_pci_sid,
195 &pci_sid);
196 it.cur = &pci_sid;
197 it.cur_count = 1;
198 }
199
200 err = iommu_fwspec_init(dev, &smmu_dev->of_node->fwnode,
201 &arm_smmu_ops);
202 if (err)
203 return err;
204
205 sids = kcalloc(it.cur_count, sizeof(*sids), GFP_KERNEL);
206 if (!sids)
207 return -ENOMEM;
208
209 *smmu = dev_get_drvdata(smmu_dev);
210 of_phandle_iterator_args(&it, sids, it.cur_count);
211 err = iommu_fwspec_add_ids(dev, sids, it.cur_count);
212 kfree(sids);
213 return err;
214}
215
216
217
218
219
220
221
222static int arm_smmu_legacy_bus_init(void)
223{
224 if (using_legacy_binding)
225 return arm_smmu_bus_init(&arm_smmu_ops);
226 return 0;
227}
228device_initcall_sync(arm_smmu_legacy_bus_init);
229#else
230static int arm_smmu_register_legacy_master(struct device *dev,
231 struct arm_smmu_device **smmu)
232{
233 return -ENODEV;
234}
235#endif
236
237static int __arm_smmu_alloc_bitmap(unsigned long *map, int start, int end)
238{
239 int idx;
240
241 do {
242 idx = find_next_zero_bit(map, end, start);
243 if (idx == end)
244 return -ENOSPC;
245 } while (test_and_set_bit(idx, map));
246
247 return idx;
248}
249
250static void __arm_smmu_free_bitmap(unsigned long *map, int idx)
251{
252 clear_bit(idx, map);
253}
254
255
256static void __arm_smmu_tlb_sync(struct arm_smmu_device *smmu, int page,
257 int sync, int status)
258{
259 unsigned int spin_cnt, delay;
260 u32 reg;
261
262 if (smmu->impl && unlikely(smmu->impl->tlb_sync))
263 return smmu->impl->tlb_sync(smmu, page, sync, status);
264
265 arm_smmu_writel(smmu, page, sync, QCOM_DUMMY_VAL);
266 for (delay = 1; delay < TLB_LOOP_TIMEOUT; delay *= 2) {
267 for (spin_cnt = TLB_SPIN_COUNT; spin_cnt > 0; spin_cnt--) {
268 reg = arm_smmu_readl(smmu, page, status);
269 if (!(reg & ARM_SMMU_sTLBGSTATUS_GSACTIVE))
270 return;
271 cpu_relax();
272 }
273 udelay(delay);
274 }
275 dev_err_ratelimited(smmu->dev,
276 "TLB sync timed out -- SMMU may be deadlocked\n");
277}
278
279static void arm_smmu_tlb_sync_global(struct arm_smmu_device *smmu)
280{
281 unsigned long flags;
282
283 spin_lock_irqsave(&smmu->global_sync_lock, flags);
284 __arm_smmu_tlb_sync(smmu, ARM_SMMU_GR0, ARM_SMMU_GR0_sTLBGSYNC,
285 ARM_SMMU_GR0_sTLBGSTATUS);
286 spin_unlock_irqrestore(&smmu->global_sync_lock, flags);
287}
288
289static void arm_smmu_tlb_sync_context(struct arm_smmu_domain *smmu_domain)
290{
291 struct arm_smmu_device *smmu = smmu_domain->smmu;
292 unsigned long flags;
293
294 spin_lock_irqsave(&smmu_domain->cb_lock, flags);
295 __arm_smmu_tlb_sync(smmu, ARM_SMMU_CB(smmu, smmu_domain->cfg.cbndx),
296 ARM_SMMU_CB_TLBSYNC, ARM_SMMU_CB_TLBSTATUS);
297 spin_unlock_irqrestore(&smmu_domain->cb_lock, flags);
298}
299
300static void arm_smmu_tlb_inv_context_s1(void *cookie)
301{
302 struct arm_smmu_domain *smmu_domain = cookie;
303
304
305
306
307 wmb();
308 arm_smmu_cb_write(smmu_domain->smmu, smmu_domain->cfg.cbndx,
309 ARM_SMMU_CB_S1_TLBIASID, smmu_domain->cfg.asid);
310 arm_smmu_tlb_sync_context(smmu_domain);
311}
312
313static void arm_smmu_tlb_inv_context_s2(void *cookie)
314{
315 struct arm_smmu_domain *smmu_domain = cookie;
316 struct arm_smmu_device *smmu = smmu_domain->smmu;
317
318
319 wmb();
320 arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_TLBIVMID, smmu_domain->cfg.vmid);
321 arm_smmu_tlb_sync_global(smmu);
322}
323
324static void arm_smmu_tlb_inv_range_s1(unsigned long iova, size_t size,
325 size_t granule, void *cookie, int reg)
326{
327 struct arm_smmu_domain *smmu_domain = cookie;
328 struct arm_smmu_device *smmu = smmu_domain->smmu;
329 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
330 int idx = cfg->cbndx;
331
332 if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK)
333 wmb();
334
335 if (cfg->fmt != ARM_SMMU_CTX_FMT_AARCH64) {
336 iova = (iova >> 12) << 12;
337 iova |= cfg->asid;
338 do {
339 arm_smmu_cb_write(smmu, idx, reg, iova);
340 iova += granule;
341 } while (size -= granule);
342 } else {
343 iova >>= 12;
344 iova |= (u64)cfg->asid << 48;
345 do {
346 arm_smmu_cb_writeq(smmu, idx, reg, iova);
347 iova += granule >> 12;
348 } while (size -= granule);
349 }
350}
351
352static void arm_smmu_tlb_inv_range_s2(unsigned long iova, size_t size,
353 size_t granule, void *cookie, int reg)
354{
355 struct arm_smmu_domain *smmu_domain = cookie;
356 struct arm_smmu_device *smmu = smmu_domain->smmu;
357 int idx = smmu_domain->cfg.cbndx;
358
359 if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK)
360 wmb();
361
362 iova >>= 12;
363 do {
364 if (smmu_domain->cfg.fmt == ARM_SMMU_CTX_FMT_AARCH64)
365 arm_smmu_cb_writeq(smmu, idx, reg, iova);
366 else
367 arm_smmu_cb_write(smmu, idx, reg, iova);
368 iova += granule >> 12;
369 } while (size -= granule);
370}
371
372static void arm_smmu_tlb_inv_walk_s1(unsigned long iova, size_t size,
373 size_t granule, void *cookie)
374{
375 arm_smmu_tlb_inv_range_s1(iova, size, granule, cookie,
376 ARM_SMMU_CB_S1_TLBIVA);
377 arm_smmu_tlb_sync_context(cookie);
378}
379
380static void arm_smmu_tlb_inv_leaf_s1(unsigned long iova, size_t size,
381 size_t granule, void *cookie)
382{
383 arm_smmu_tlb_inv_range_s1(iova, size, granule, cookie,
384 ARM_SMMU_CB_S1_TLBIVAL);
385 arm_smmu_tlb_sync_context(cookie);
386}
387
388static void arm_smmu_tlb_add_page_s1(struct iommu_iotlb_gather *gather,
389 unsigned long iova, size_t granule,
390 void *cookie)
391{
392 arm_smmu_tlb_inv_range_s1(iova, granule, granule, cookie,
393 ARM_SMMU_CB_S1_TLBIVAL);
394}
395
396static void arm_smmu_tlb_inv_walk_s2(unsigned long iova, size_t size,
397 size_t granule, void *cookie)
398{
399 arm_smmu_tlb_inv_range_s2(iova, size, granule, cookie,
400 ARM_SMMU_CB_S2_TLBIIPAS2);
401 arm_smmu_tlb_sync_context(cookie);
402}
403
404static void arm_smmu_tlb_inv_leaf_s2(unsigned long iova, size_t size,
405 size_t granule, void *cookie)
406{
407 arm_smmu_tlb_inv_range_s2(iova, size, granule, cookie,
408 ARM_SMMU_CB_S2_TLBIIPAS2L);
409 arm_smmu_tlb_sync_context(cookie);
410}
411
412static void arm_smmu_tlb_add_page_s2(struct iommu_iotlb_gather *gather,
413 unsigned long iova, size_t granule,
414 void *cookie)
415{
416 arm_smmu_tlb_inv_range_s2(iova, granule, granule, cookie,
417 ARM_SMMU_CB_S2_TLBIIPAS2L);
418}
419
420static void arm_smmu_tlb_inv_any_s2_v1(unsigned long iova, size_t size,
421 size_t granule, void *cookie)
422{
423 arm_smmu_tlb_inv_context_s2(cookie);
424}
425
426
427
428
429
430
431
432static void arm_smmu_tlb_add_page_s2_v1(struct iommu_iotlb_gather *gather,
433 unsigned long iova, size_t granule,
434 void *cookie)
435{
436 struct arm_smmu_domain *smmu_domain = cookie;
437 struct arm_smmu_device *smmu = smmu_domain->smmu;
438
439 if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK)
440 wmb();
441
442 arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_TLBIVMID, smmu_domain->cfg.vmid);
443}
444
445static const struct iommu_flush_ops arm_smmu_s1_tlb_ops = {
446 .tlb_flush_all = arm_smmu_tlb_inv_context_s1,
447 .tlb_flush_walk = arm_smmu_tlb_inv_walk_s1,
448 .tlb_flush_leaf = arm_smmu_tlb_inv_leaf_s1,
449 .tlb_add_page = arm_smmu_tlb_add_page_s1,
450};
451
452static const struct iommu_flush_ops arm_smmu_s2_tlb_ops_v2 = {
453 .tlb_flush_all = arm_smmu_tlb_inv_context_s2,
454 .tlb_flush_walk = arm_smmu_tlb_inv_walk_s2,
455 .tlb_flush_leaf = arm_smmu_tlb_inv_leaf_s2,
456 .tlb_add_page = arm_smmu_tlb_add_page_s2,
457};
458
459static const struct iommu_flush_ops arm_smmu_s2_tlb_ops_v1 = {
460 .tlb_flush_all = arm_smmu_tlb_inv_context_s2,
461 .tlb_flush_walk = arm_smmu_tlb_inv_any_s2_v1,
462 .tlb_flush_leaf = arm_smmu_tlb_inv_any_s2_v1,
463 .tlb_add_page = arm_smmu_tlb_add_page_s2_v1,
464};
465
466static irqreturn_t arm_smmu_context_fault(int irq, void *dev)
467{
468 u32 fsr, fsynr, cbfrsynra;
469 unsigned long iova;
470 struct iommu_domain *domain = dev;
471 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
472 struct arm_smmu_device *smmu = smmu_domain->smmu;
473 int idx = smmu_domain->cfg.cbndx;
474
475 fsr = arm_smmu_cb_read(smmu, idx, ARM_SMMU_CB_FSR);
476 if (!(fsr & ARM_SMMU_FSR_FAULT))
477 return IRQ_NONE;
478
479 fsynr = arm_smmu_cb_read(smmu, idx, ARM_SMMU_CB_FSYNR0);
480 iova = arm_smmu_cb_readq(smmu, idx, ARM_SMMU_CB_FAR);
481 cbfrsynra = arm_smmu_gr1_read(smmu, ARM_SMMU_GR1_CBFRSYNRA(idx));
482
483 dev_err_ratelimited(smmu->dev,
484 "Unhandled context fault: fsr=0x%x, iova=0x%08lx, fsynr=0x%x, cbfrsynra=0x%x, cb=%d\n",
485 fsr, iova, fsynr, cbfrsynra, idx);
486
487 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_FSR, fsr);
488 return IRQ_HANDLED;
489}
490
491static irqreturn_t arm_smmu_global_fault(int irq, void *dev)
492{
493 u32 gfsr, gfsynr0, gfsynr1, gfsynr2;
494 struct arm_smmu_device *smmu = dev;
495 static DEFINE_RATELIMIT_STATE(rs, DEFAULT_RATELIMIT_INTERVAL,
496 DEFAULT_RATELIMIT_BURST);
497
498 gfsr = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_sGFSR);
499 gfsynr0 = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_sGFSYNR0);
500 gfsynr1 = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_sGFSYNR1);
501 gfsynr2 = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_sGFSYNR2);
502
503 if (!gfsr)
504 return IRQ_NONE;
505
506 if (__ratelimit(&rs)) {
507 if (IS_ENABLED(CONFIG_ARM_SMMU_DISABLE_BYPASS_BY_DEFAULT) &&
508 (gfsr & ARM_SMMU_sGFSR_USF))
509 dev_err(smmu->dev,
510 "Blocked unknown Stream ID 0x%hx; boot with \"arm-smmu.disable_bypass=0\" to allow, but this may have security implications\n",
511 (u16)gfsynr1);
512 else
513 dev_err(smmu->dev,
514 "Unexpected global fault, this could be serious\n");
515 dev_err(smmu->dev,
516 "\tGFSR 0x%08x, GFSYNR0 0x%08x, GFSYNR1 0x%08x, GFSYNR2 0x%08x\n",
517 gfsr, gfsynr0, gfsynr1, gfsynr2);
518 }
519
520 arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_sGFSR, gfsr);
521 return IRQ_HANDLED;
522}
523
524static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain,
525 struct io_pgtable_cfg *pgtbl_cfg)
526{
527 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
528 struct arm_smmu_cb *cb = &smmu_domain->smmu->cbs[cfg->cbndx];
529 bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
530
531 cb->cfg = cfg;
532
533
534 if (stage1) {
535 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
536 cb->tcr[0] = pgtbl_cfg->arm_v7s_cfg.tcr;
537 } else {
538 cb->tcr[0] = arm_smmu_lpae_tcr(pgtbl_cfg);
539 cb->tcr[1] = arm_smmu_lpae_tcr2(pgtbl_cfg);
540 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64)
541 cb->tcr[1] |= ARM_SMMU_TCR2_AS;
542 else
543 cb->tcr[0] |= ARM_SMMU_TCR_EAE;
544 }
545 } else {
546 cb->tcr[0] = arm_smmu_lpae_vtcr(pgtbl_cfg);
547 }
548
549
550 if (stage1) {
551 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
552 cb->ttbr[0] = pgtbl_cfg->arm_v7s_cfg.ttbr;
553 cb->ttbr[1] = 0;
554 } else {
555 cb->ttbr[0] = pgtbl_cfg->arm_lpae_s1_cfg.ttbr;
556 cb->ttbr[0] |= FIELD_PREP(ARM_SMMU_TTBRn_ASID,
557 cfg->asid);
558 cb->ttbr[1] = FIELD_PREP(ARM_SMMU_TTBRn_ASID,
559 cfg->asid);
560 }
561 } else {
562 cb->ttbr[0] = pgtbl_cfg->arm_lpae_s2_cfg.vttbr;
563 }
564
565
566 if (stage1) {
567 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
568 cb->mair[0] = pgtbl_cfg->arm_v7s_cfg.prrr;
569 cb->mair[1] = pgtbl_cfg->arm_v7s_cfg.nmrr;
570 } else {
571 cb->mair[0] = pgtbl_cfg->arm_lpae_s1_cfg.mair;
572 cb->mair[1] = pgtbl_cfg->arm_lpae_s1_cfg.mair >> 32;
573 }
574 }
575}
576
577static void arm_smmu_write_context_bank(struct arm_smmu_device *smmu, int idx)
578{
579 u32 reg;
580 bool stage1;
581 struct arm_smmu_cb *cb = &smmu->cbs[idx];
582 struct arm_smmu_cfg *cfg = cb->cfg;
583
584
585 if (!cfg) {
586 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_SCTLR, 0);
587 return;
588 }
589
590 stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
591
592
593 if (smmu->version > ARM_SMMU_V1) {
594 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64)
595 reg = ARM_SMMU_CBA2R_VA64;
596 else
597 reg = 0;
598
599 if (smmu->features & ARM_SMMU_FEAT_VMID16)
600 reg |= FIELD_PREP(ARM_SMMU_CBA2R_VMID16, cfg->vmid);
601
602 arm_smmu_gr1_write(smmu, ARM_SMMU_GR1_CBA2R(idx), reg);
603 }
604
605
606 reg = FIELD_PREP(ARM_SMMU_CBAR_TYPE, cfg->cbar);
607 if (smmu->version < ARM_SMMU_V2)
608 reg |= FIELD_PREP(ARM_SMMU_CBAR_IRPTNDX, cfg->irptndx);
609
610
611
612
613
614 if (stage1) {
615 reg |= FIELD_PREP(ARM_SMMU_CBAR_S1_BPSHCFG,
616 ARM_SMMU_CBAR_S1_BPSHCFG_NSH) |
617 FIELD_PREP(ARM_SMMU_CBAR_S1_MEMATTR,
618 ARM_SMMU_CBAR_S1_MEMATTR_WB);
619 } else if (!(smmu->features & ARM_SMMU_FEAT_VMID16)) {
620
621 reg |= FIELD_PREP(ARM_SMMU_CBAR_VMID, cfg->vmid);
622 }
623 arm_smmu_gr1_write(smmu, ARM_SMMU_GR1_CBAR(idx), reg);
624
625
626
627
628
629
630 if (stage1 && smmu->version > ARM_SMMU_V1)
631 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_TCR2, cb->tcr[1]);
632 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_TCR, cb->tcr[0]);
633
634
635 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
636 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_CONTEXTIDR, cfg->asid);
637 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_TTBR0, cb->ttbr[0]);
638 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_TTBR1, cb->ttbr[1]);
639 } else {
640 arm_smmu_cb_writeq(smmu, idx, ARM_SMMU_CB_TTBR0, cb->ttbr[0]);
641 if (stage1)
642 arm_smmu_cb_writeq(smmu, idx, ARM_SMMU_CB_TTBR1,
643 cb->ttbr[1]);
644 }
645
646
647 if (stage1) {
648 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_S1_MAIR0, cb->mair[0]);
649 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_S1_MAIR1, cb->mair[1]);
650 }
651
652
653 reg = ARM_SMMU_SCTLR_CFIE | ARM_SMMU_SCTLR_CFRE | ARM_SMMU_SCTLR_AFE |
654 ARM_SMMU_SCTLR_TRE | ARM_SMMU_SCTLR_M;
655 if (stage1)
656 reg |= ARM_SMMU_SCTLR_S1_ASIDPNE;
657 if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
658 reg |= ARM_SMMU_SCTLR_E;
659
660 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_SCTLR, reg);
661}
662
663static int arm_smmu_init_domain_context(struct iommu_domain *domain,
664 struct arm_smmu_device *smmu)
665{
666 int irq, start, ret = 0;
667 unsigned long ias, oas;
668 struct io_pgtable_ops *pgtbl_ops;
669 struct io_pgtable_cfg pgtbl_cfg;
670 enum io_pgtable_fmt fmt;
671 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
672 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
673 irqreturn_t (*context_fault)(int irq, void *dev);
674
675 mutex_lock(&smmu_domain->init_mutex);
676 if (smmu_domain->smmu)
677 goto out_unlock;
678
679 if (domain->type == IOMMU_DOMAIN_IDENTITY) {
680 smmu_domain->stage = ARM_SMMU_DOMAIN_BYPASS;
681 smmu_domain->smmu = smmu;
682 goto out_unlock;
683 }
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S1))
704 smmu_domain->stage = ARM_SMMU_DOMAIN_S2;
705 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S2))
706 smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
707
708
709
710
711
712
713
714
715
716 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_L)
717 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH32_L;
718 if (IS_ENABLED(CONFIG_IOMMU_IO_PGTABLE_ARMV7S) &&
719 !IS_ENABLED(CONFIG_64BIT) && !IS_ENABLED(CONFIG_ARM_LPAE) &&
720 (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_S) &&
721 (smmu_domain->stage == ARM_SMMU_DOMAIN_S1))
722 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH32_S;
723 if ((IS_ENABLED(CONFIG_64BIT) || cfg->fmt == ARM_SMMU_CTX_FMT_NONE) &&
724 (smmu->features & (ARM_SMMU_FEAT_FMT_AARCH64_64K |
725 ARM_SMMU_FEAT_FMT_AARCH64_16K |
726 ARM_SMMU_FEAT_FMT_AARCH64_4K)))
727 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH64;
728
729 if (cfg->fmt == ARM_SMMU_CTX_FMT_NONE) {
730 ret = -EINVAL;
731 goto out_unlock;
732 }
733
734 switch (smmu_domain->stage) {
735 case ARM_SMMU_DOMAIN_S1:
736 cfg->cbar = CBAR_TYPE_S1_TRANS_S2_BYPASS;
737 start = smmu->num_s2_context_banks;
738 ias = smmu->va_size;
739 oas = smmu->ipa_size;
740 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) {
741 fmt = ARM_64_LPAE_S1;
742 } else if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_L) {
743 fmt = ARM_32_LPAE_S1;
744 ias = min(ias, 32UL);
745 oas = min(oas, 40UL);
746 } else {
747 fmt = ARM_V7S;
748 ias = min(ias, 32UL);
749 oas = min(oas, 32UL);
750 }
751 smmu_domain->flush_ops = &arm_smmu_s1_tlb_ops;
752 break;
753 case ARM_SMMU_DOMAIN_NESTED:
754
755
756
757
758 case ARM_SMMU_DOMAIN_S2:
759 cfg->cbar = CBAR_TYPE_S2_TRANS;
760 start = 0;
761 ias = smmu->ipa_size;
762 oas = smmu->pa_size;
763 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) {
764 fmt = ARM_64_LPAE_S2;
765 } else {
766 fmt = ARM_32_LPAE_S2;
767 ias = min(ias, 40UL);
768 oas = min(oas, 40UL);
769 }
770 if (smmu->version == ARM_SMMU_V2)
771 smmu_domain->flush_ops = &arm_smmu_s2_tlb_ops_v2;
772 else
773 smmu_domain->flush_ops = &arm_smmu_s2_tlb_ops_v1;
774 break;
775 default:
776 ret = -EINVAL;
777 goto out_unlock;
778 }
779 ret = __arm_smmu_alloc_bitmap(smmu->context_map, start,
780 smmu->num_context_banks);
781 if (ret < 0)
782 goto out_unlock;
783
784 cfg->cbndx = ret;
785 if (smmu->version < ARM_SMMU_V2) {
786 cfg->irptndx = atomic_inc_return(&smmu->irptndx);
787 cfg->irptndx %= smmu->num_context_irqs;
788 } else {
789 cfg->irptndx = cfg->cbndx;
790 }
791
792 if (smmu_domain->stage == ARM_SMMU_DOMAIN_S2)
793 cfg->vmid = cfg->cbndx + 1;
794 else
795 cfg->asid = cfg->cbndx;
796
797 smmu_domain->smmu = smmu;
798 if (smmu->impl && smmu->impl->init_context) {
799 ret = smmu->impl->init_context(smmu_domain);
800 if (ret)
801 goto out_unlock;
802 }
803
804 pgtbl_cfg = (struct io_pgtable_cfg) {
805 .pgsize_bitmap = smmu->pgsize_bitmap,
806 .ias = ias,
807 .oas = oas,
808 .coherent_walk = smmu->features & ARM_SMMU_FEAT_COHERENT_WALK,
809 .tlb = smmu_domain->flush_ops,
810 .iommu_dev = smmu->dev,
811 };
812
813 if (smmu_domain->non_strict)
814 pgtbl_cfg.quirks |= IO_PGTABLE_QUIRK_NON_STRICT;
815
816 pgtbl_ops = alloc_io_pgtable_ops(fmt, &pgtbl_cfg, smmu_domain);
817 if (!pgtbl_ops) {
818 ret = -ENOMEM;
819 goto out_clear_smmu;
820 }
821
822
823 domain->pgsize_bitmap = pgtbl_cfg.pgsize_bitmap;
824 domain->geometry.aperture_end = (1UL << ias) - 1;
825 domain->geometry.force_aperture = true;
826
827
828 arm_smmu_init_context_bank(smmu_domain, &pgtbl_cfg);
829 arm_smmu_write_context_bank(smmu, cfg->cbndx);
830
831
832
833
834
835 irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
836
837 if (smmu->impl && smmu->impl->context_fault)
838 context_fault = smmu->impl->context_fault;
839 else
840 context_fault = arm_smmu_context_fault;
841
842 ret = devm_request_irq(smmu->dev, irq, context_fault,
843 IRQF_SHARED, "arm-smmu-context-fault", domain);
844 if (ret < 0) {
845 dev_err(smmu->dev, "failed to request context IRQ %d (%u)\n",
846 cfg->irptndx, irq);
847 cfg->irptndx = ARM_SMMU_INVALID_IRPTNDX;
848 }
849
850 mutex_unlock(&smmu_domain->init_mutex);
851
852
853 smmu_domain->pgtbl_ops = pgtbl_ops;
854 return 0;
855
856out_clear_smmu:
857 __arm_smmu_free_bitmap(smmu->context_map, cfg->cbndx);
858 smmu_domain->smmu = NULL;
859out_unlock:
860 mutex_unlock(&smmu_domain->init_mutex);
861 return ret;
862}
863
864static void arm_smmu_destroy_domain_context(struct iommu_domain *domain)
865{
866 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
867 struct arm_smmu_device *smmu = smmu_domain->smmu;
868 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
869 int ret, irq;
870
871 if (!smmu || domain->type == IOMMU_DOMAIN_IDENTITY)
872 return;
873
874 ret = arm_smmu_rpm_get(smmu);
875 if (ret < 0)
876 return;
877
878
879
880
881
882 smmu->cbs[cfg->cbndx].cfg = NULL;
883 arm_smmu_write_context_bank(smmu, cfg->cbndx);
884
885 if (cfg->irptndx != ARM_SMMU_INVALID_IRPTNDX) {
886 irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
887 devm_free_irq(smmu->dev, irq, domain);
888 }
889
890 free_io_pgtable_ops(smmu_domain->pgtbl_ops);
891 __arm_smmu_free_bitmap(smmu->context_map, cfg->cbndx);
892
893 arm_smmu_rpm_put(smmu);
894}
895
896static struct iommu_domain *arm_smmu_domain_alloc(unsigned type)
897{
898 struct arm_smmu_domain *smmu_domain;
899
900 if (type != IOMMU_DOMAIN_UNMANAGED &&
901 type != IOMMU_DOMAIN_DMA &&
902 type != IOMMU_DOMAIN_IDENTITY)
903 return NULL;
904
905
906
907
908
909 smmu_domain = kzalloc(sizeof(*smmu_domain), GFP_KERNEL);
910 if (!smmu_domain)
911 return NULL;
912
913 if (type == IOMMU_DOMAIN_DMA && (using_legacy_binding ||
914 iommu_get_dma_cookie(&smmu_domain->domain))) {
915 kfree(smmu_domain);
916 return NULL;
917 }
918
919 mutex_init(&smmu_domain->init_mutex);
920 spin_lock_init(&smmu_domain->cb_lock);
921
922 return &smmu_domain->domain;
923}
924
925static void arm_smmu_domain_free(struct iommu_domain *domain)
926{
927 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
928
929
930
931
932
933 iommu_put_dma_cookie(domain);
934 arm_smmu_destroy_domain_context(domain);
935 kfree(smmu_domain);
936}
937
938static void arm_smmu_write_smr(struct arm_smmu_device *smmu, int idx)
939{
940 struct arm_smmu_smr *smr = smmu->smrs + idx;
941 u32 reg = FIELD_PREP(ARM_SMMU_SMR_ID, smr->id) |
942 FIELD_PREP(ARM_SMMU_SMR_MASK, smr->mask);
943
944 if (!(smmu->features & ARM_SMMU_FEAT_EXIDS) && smr->valid)
945 reg |= ARM_SMMU_SMR_VALID;
946 arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_SMR(idx), reg);
947}
948
949static void arm_smmu_write_s2cr(struct arm_smmu_device *smmu, int idx)
950{
951 struct arm_smmu_s2cr *s2cr = smmu->s2crs + idx;
952 u32 reg = FIELD_PREP(ARM_SMMU_S2CR_TYPE, s2cr->type) |
953 FIELD_PREP(ARM_SMMU_S2CR_CBNDX, s2cr->cbndx) |
954 FIELD_PREP(ARM_SMMU_S2CR_PRIVCFG, s2cr->privcfg);
955
956 if (smmu->features & ARM_SMMU_FEAT_EXIDS && smmu->smrs &&
957 smmu->smrs[idx].valid)
958 reg |= ARM_SMMU_S2CR_EXIDVALID;
959 arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_S2CR(idx), reg);
960}
961
962static void arm_smmu_write_sme(struct arm_smmu_device *smmu, int idx)
963{
964 arm_smmu_write_s2cr(smmu, idx);
965 if (smmu->smrs)
966 arm_smmu_write_smr(smmu, idx);
967}
968
969
970
971
972
973static void arm_smmu_test_smr_masks(struct arm_smmu_device *smmu)
974{
975 u32 smr;
976 int i;
977
978 if (!smmu->smrs)
979 return;
980
981
982
983
984
985
986
987
988 for (i = 0; i < smmu->num_mapping_groups; i++)
989 if (!smmu->smrs[i].valid)
990 goto smr_ok;
991 return;
992smr_ok:
993
994
995
996
997
998 smr = FIELD_PREP(ARM_SMMU_SMR_ID, smmu->streamid_mask);
999 arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_SMR(i), smr);
1000 smr = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_SMR(i));
1001 smmu->streamid_mask = FIELD_GET(ARM_SMMU_SMR_ID, smr);
1002
1003 smr = FIELD_PREP(ARM_SMMU_SMR_MASK, smmu->streamid_mask);
1004 arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_SMR(i), smr);
1005 smr = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_SMR(i));
1006 smmu->smr_mask_mask = FIELD_GET(ARM_SMMU_SMR_MASK, smr);
1007}
1008
1009static int arm_smmu_find_sme(struct arm_smmu_device *smmu, u16 id, u16 mask)
1010{
1011 struct arm_smmu_smr *smrs = smmu->smrs;
1012 int i, free_idx = -ENOSPC;
1013
1014
1015 if (!smrs)
1016 return id;
1017
1018
1019 for (i = 0; i < smmu->num_mapping_groups; ++i) {
1020 if (!smrs[i].valid) {
1021
1022
1023
1024
1025 if (free_idx < 0)
1026 free_idx = i;
1027 continue;
1028 }
1029
1030
1031
1032
1033
1034
1035
1036 if ((mask & smrs[i].mask) == mask &&
1037 !((id ^ smrs[i].id) & ~smrs[i].mask))
1038 return i;
1039
1040
1041
1042
1043
1044 if (!((id ^ smrs[i].id) & ~(smrs[i].mask | mask)))
1045 return -EINVAL;
1046 }
1047
1048 return free_idx;
1049}
1050
1051static bool arm_smmu_free_sme(struct arm_smmu_device *smmu, int idx)
1052{
1053 if (--smmu->s2crs[idx].count)
1054 return false;
1055
1056 smmu->s2crs[idx] = s2cr_init_val;
1057 if (smmu->smrs)
1058 smmu->smrs[idx].valid = false;
1059
1060 return true;
1061}
1062
1063static int arm_smmu_master_alloc_smes(struct device *dev)
1064{
1065 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
1066 struct arm_smmu_master_cfg *cfg = dev_iommu_priv_get(dev);
1067 struct arm_smmu_device *smmu = cfg->smmu;
1068 struct arm_smmu_smr *smrs = smmu->smrs;
1069 int i, idx, ret;
1070
1071 mutex_lock(&smmu->stream_map_mutex);
1072
1073 for_each_cfg_sme(cfg, fwspec, i, idx) {
1074 u16 sid = FIELD_GET(ARM_SMMU_SMR_ID, fwspec->ids[i]);
1075 u16 mask = FIELD_GET(ARM_SMMU_SMR_MASK, fwspec->ids[i]);
1076
1077 if (idx != INVALID_SMENDX) {
1078 ret = -EEXIST;
1079 goto out_err;
1080 }
1081
1082 ret = arm_smmu_find_sme(smmu, sid, mask);
1083 if (ret < 0)
1084 goto out_err;
1085
1086 idx = ret;
1087 if (smrs && smmu->s2crs[idx].count == 0) {
1088 smrs[idx].id = sid;
1089 smrs[idx].mask = mask;
1090 smrs[idx].valid = true;
1091 }
1092 smmu->s2crs[idx].count++;
1093 cfg->smendx[i] = (s16)idx;
1094 }
1095
1096
1097 for_each_cfg_sme(cfg, fwspec, i, idx)
1098 arm_smmu_write_sme(smmu, idx);
1099
1100 mutex_unlock(&smmu->stream_map_mutex);
1101 return 0;
1102
1103out_err:
1104 while (i--) {
1105 arm_smmu_free_sme(smmu, cfg->smendx[i]);
1106 cfg->smendx[i] = INVALID_SMENDX;
1107 }
1108 mutex_unlock(&smmu->stream_map_mutex);
1109 return ret;
1110}
1111
1112static void arm_smmu_master_free_smes(struct arm_smmu_master_cfg *cfg,
1113 struct iommu_fwspec *fwspec)
1114{
1115 struct arm_smmu_device *smmu = cfg->smmu;
1116 int i, idx;
1117
1118 mutex_lock(&smmu->stream_map_mutex);
1119 for_each_cfg_sme(cfg, fwspec, i, idx) {
1120 if (arm_smmu_free_sme(smmu, idx))
1121 arm_smmu_write_sme(smmu, idx);
1122 cfg->smendx[i] = INVALID_SMENDX;
1123 }
1124 mutex_unlock(&smmu->stream_map_mutex);
1125}
1126
1127static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain,
1128 struct arm_smmu_master_cfg *cfg,
1129 struct iommu_fwspec *fwspec)
1130{
1131 struct arm_smmu_device *smmu = smmu_domain->smmu;
1132 struct arm_smmu_s2cr *s2cr = smmu->s2crs;
1133 u8 cbndx = smmu_domain->cfg.cbndx;
1134 enum arm_smmu_s2cr_type type;
1135 int i, idx;
1136
1137 if (smmu_domain->stage == ARM_SMMU_DOMAIN_BYPASS)
1138 type = S2CR_TYPE_BYPASS;
1139 else
1140 type = S2CR_TYPE_TRANS;
1141
1142 for_each_cfg_sme(cfg, fwspec, i, idx) {
1143 if (type == s2cr[idx].type && cbndx == s2cr[idx].cbndx)
1144 continue;
1145
1146 s2cr[idx].type = type;
1147 s2cr[idx].privcfg = S2CR_PRIVCFG_DEFAULT;
1148 s2cr[idx].cbndx = cbndx;
1149 arm_smmu_write_s2cr(smmu, idx);
1150 }
1151 return 0;
1152}
1153
1154static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
1155{
1156 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1157 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
1158 struct arm_smmu_master_cfg *cfg;
1159 struct arm_smmu_device *smmu;
1160 int ret;
1161
1162 if (!fwspec || fwspec->ops != &arm_smmu_ops) {
1163 dev_err(dev, "cannot attach to SMMU, is it on the same bus?\n");
1164 return -ENXIO;
1165 }
1166
1167
1168
1169
1170
1171
1172
1173
1174 cfg = dev_iommu_priv_get(dev);
1175 if (!cfg)
1176 return -ENODEV;
1177
1178 smmu = cfg->smmu;
1179
1180 ret = arm_smmu_rpm_get(smmu);
1181 if (ret < 0)
1182 return ret;
1183
1184
1185 ret = arm_smmu_init_domain_context(domain, smmu);
1186 if (ret < 0)
1187 goto rpm_put;
1188
1189
1190
1191
1192
1193 if (smmu_domain->smmu != smmu) {
1194 dev_err(dev,
1195 "cannot attach to SMMU %s whilst already attached to domain on SMMU %s\n",
1196 dev_name(smmu_domain->smmu->dev), dev_name(smmu->dev));
1197 ret = -EINVAL;
1198 goto rpm_put;
1199 }
1200
1201
1202 ret = arm_smmu_domain_add_master(smmu_domain, cfg, fwspec);
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215 pm_runtime_set_autosuspend_delay(smmu->dev, 20);
1216 pm_runtime_use_autosuspend(smmu->dev);
1217
1218rpm_put:
1219 arm_smmu_rpm_put(smmu);
1220 return ret;
1221}
1222
1223static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova,
1224 phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
1225{
1226 struct io_pgtable_ops *ops = to_smmu_domain(domain)->pgtbl_ops;
1227 struct arm_smmu_device *smmu = to_smmu_domain(domain)->smmu;
1228 int ret;
1229
1230 if (!ops)
1231 return -ENODEV;
1232
1233 arm_smmu_rpm_get(smmu);
1234 ret = ops->map(ops, iova, paddr, size, prot, gfp);
1235 arm_smmu_rpm_put(smmu);
1236
1237 return ret;
1238}
1239
1240static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
1241 size_t size, struct iommu_iotlb_gather *gather)
1242{
1243 struct io_pgtable_ops *ops = to_smmu_domain(domain)->pgtbl_ops;
1244 struct arm_smmu_device *smmu = to_smmu_domain(domain)->smmu;
1245 size_t ret;
1246
1247 if (!ops)
1248 return 0;
1249
1250 arm_smmu_rpm_get(smmu);
1251 ret = ops->unmap(ops, iova, size, gather);
1252 arm_smmu_rpm_put(smmu);
1253
1254 return ret;
1255}
1256
1257static void arm_smmu_flush_iotlb_all(struct iommu_domain *domain)
1258{
1259 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1260 struct arm_smmu_device *smmu = smmu_domain->smmu;
1261
1262 if (smmu_domain->flush_ops) {
1263 arm_smmu_rpm_get(smmu);
1264 smmu_domain->flush_ops->tlb_flush_all(smmu_domain);
1265 arm_smmu_rpm_put(smmu);
1266 }
1267}
1268
1269static void arm_smmu_iotlb_sync(struct iommu_domain *domain,
1270 struct iommu_iotlb_gather *gather)
1271{
1272 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1273 struct arm_smmu_device *smmu = smmu_domain->smmu;
1274
1275 if (!smmu)
1276 return;
1277
1278 arm_smmu_rpm_get(smmu);
1279 if (smmu->version == ARM_SMMU_V2 ||
1280 smmu_domain->stage == ARM_SMMU_DOMAIN_S1)
1281 arm_smmu_tlb_sync_context(smmu_domain);
1282 else
1283 arm_smmu_tlb_sync_global(smmu);
1284 arm_smmu_rpm_put(smmu);
1285}
1286
1287static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
1288 dma_addr_t iova)
1289{
1290 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1291 struct arm_smmu_device *smmu = smmu_domain->smmu;
1292 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1293 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
1294 struct device *dev = smmu->dev;
1295 void __iomem *reg;
1296 u32 tmp;
1297 u64 phys;
1298 unsigned long va, flags;
1299 int ret, idx = cfg->cbndx;
1300
1301 ret = arm_smmu_rpm_get(smmu);
1302 if (ret < 0)
1303 return 0;
1304
1305 spin_lock_irqsave(&smmu_domain->cb_lock, flags);
1306 va = iova & ~0xfffUL;
1307 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64)
1308 arm_smmu_cb_writeq(smmu, idx, ARM_SMMU_CB_ATS1PR, va);
1309 else
1310 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_ATS1PR, va);
1311
1312 reg = arm_smmu_page(smmu, ARM_SMMU_CB(smmu, idx)) + ARM_SMMU_CB_ATSR;
1313 if (readl_poll_timeout_atomic(reg, tmp, !(tmp & ARM_SMMU_ATSR_ACTIVE),
1314 5, 50)) {
1315 spin_unlock_irqrestore(&smmu_domain->cb_lock, flags);
1316 dev_err(dev,
1317 "iova to phys timed out on %pad. Falling back to software table walk.\n",
1318 &iova);
1319 return ops->iova_to_phys(ops, iova);
1320 }
1321
1322 phys = arm_smmu_cb_readq(smmu, idx, ARM_SMMU_CB_PAR);
1323 spin_unlock_irqrestore(&smmu_domain->cb_lock, flags);
1324 if (phys & ARM_SMMU_CB_PAR_F) {
1325 dev_err(dev, "translation fault!\n");
1326 dev_err(dev, "PAR = 0x%llx\n", phys);
1327 return 0;
1328 }
1329
1330 arm_smmu_rpm_put(smmu);
1331
1332 return (phys & GENMASK_ULL(39, 12)) | (iova & 0xfff);
1333}
1334
1335static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain,
1336 dma_addr_t iova)
1337{
1338 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1339 struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops;
1340
1341 if (domain->type == IOMMU_DOMAIN_IDENTITY)
1342 return iova;
1343
1344 if (!ops)
1345 return 0;
1346
1347 if (smmu_domain->smmu->features & ARM_SMMU_FEAT_TRANS_OPS &&
1348 smmu_domain->stage == ARM_SMMU_DOMAIN_S1)
1349 return arm_smmu_iova_to_phys_hard(domain, iova);
1350
1351 return ops->iova_to_phys(ops, iova);
1352}
1353
1354static bool arm_smmu_capable(enum iommu_cap cap)
1355{
1356 switch (cap) {
1357 case IOMMU_CAP_CACHE_COHERENCY:
1358
1359
1360
1361
1362 return true;
1363 case IOMMU_CAP_NOEXEC:
1364 return true;
1365 default:
1366 return false;
1367 }
1368}
1369
1370static
1371struct arm_smmu_device *arm_smmu_get_by_fwnode(struct fwnode_handle *fwnode)
1372{
1373 struct device *dev = driver_find_device_by_fwnode(&arm_smmu_driver.driver,
1374 fwnode);
1375 put_device(dev);
1376 return dev ? dev_get_drvdata(dev) : NULL;
1377}
1378
1379static struct iommu_device *arm_smmu_probe_device(struct device *dev)
1380{
1381 struct arm_smmu_device *smmu = NULL;
1382 struct arm_smmu_master_cfg *cfg;
1383 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
1384 int i, ret;
1385
1386 if (using_legacy_binding) {
1387 ret = arm_smmu_register_legacy_master(dev, &smmu);
1388
1389
1390
1391
1392
1393
1394 fwspec = dev_iommu_fwspec_get(dev);
1395 if (ret)
1396 goto out_free;
1397 } else if (fwspec && fwspec->ops == &arm_smmu_ops) {
1398 smmu = arm_smmu_get_by_fwnode(fwspec->iommu_fwnode);
1399 } else {
1400 return ERR_PTR(-ENODEV);
1401 }
1402
1403 ret = -EINVAL;
1404 for (i = 0; i < fwspec->num_ids; i++) {
1405 u16 sid = FIELD_GET(ARM_SMMU_SMR_ID, fwspec->ids[i]);
1406 u16 mask = FIELD_GET(ARM_SMMU_SMR_MASK, fwspec->ids[i]);
1407
1408 if (sid & ~smmu->streamid_mask) {
1409 dev_err(dev, "stream ID 0x%x out of range for SMMU (0x%x)\n",
1410 sid, smmu->streamid_mask);
1411 goto out_free;
1412 }
1413 if (mask & ~smmu->smr_mask_mask) {
1414 dev_err(dev, "SMR mask 0x%x out of range for SMMU (0x%x)\n",
1415 mask, smmu->smr_mask_mask);
1416 goto out_free;
1417 }
1418 }
1419
1420 ret = -ENOMEM;
1421 cfg = kzalloc(offsetof(struct arm_smmu_master_cfg, smendx[i]),
1422 GFP_KERNEL);
1423 if (!cfg)
1424 goto out_free;
1425
1426 cfg->smmu = smmu;
1427 dev_iommu_priv_set(dev, cfg);
1428 while (i--)
1429 cfg->smendx[i] = INVALID_SMENDX;
1430
1431 ret = arm_smmu_rpm_get(smmu);
1432 if (ret < 0)
1433 goto out_cfg_free;
1434
1435 ret = arm_smmu_master_alloc_smes(dev);
1436 arm_smmu_rpm_put(smmu);
1437
1438 if (ret)
1439 goto out_cfg_free;
1440
1441 device_link_add(dev, smmu->dev,
1442 DL_FLAG_PM_RUNTIME | DL_FLAG_AUTOREMOVE_SUPPLIER);
1443
1444 return &smmu->iommu;
1445
1446out_cfg_free:
1447 kfree(cfg);
1448out_free:
1449 iommu_fwspec_free(dev);
1450 return ERR_PTR(ret);
1451}
1452
1453static void arm_smmu_release_device(struct device *dev)
1454{
1455 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
1456 struct arm_smmu_master_cfg *cfg;
1457 struct arm_smmu_device *smmu;
1458 int ret;
1459
1460 if (!fwspec || fwspec->ops != &arm_smmu_ops)
1461 return;
1462
1463 cfg = dev_iommu_priv_get(dev);
1464 smmu = cfg->smmu;
1465
1466 ret = arm_smmu_rpm_get(smmu);
1467 if (ret < 0)
1468 return;
1469
1470 arm_smmu_master_free_smes(cfg, fwspec);
1471
1472 arm_smmu_rpm_put(smmu);
1473
1474 dev_iommu_priv_set(dev, NULL);
1475 kfree(cfg);
1476 iommu_fwspec_free(dev);
1477}
1478
1479static struct iommu_group *arm_smmu_device_group(struct device *dev)
1480{
1481 struct arm_smmu_master_cfg *cfg = dev_iommu_priv_get(dev);
1482 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
1483 struct arm_smmu_device *smmu = cfg->smmu;
1484 struct iommu_group *group = NULL;
1485 int i, idx;
1486
1487 for_each_cfg_sme(cfg, fwspec, i, idx) {
1488 if (group && smmu->s2crs[idx].group &&
1489 group != smmu->s2crs[idx].group)
1490 return ERR_PTR(-EINVAL);
1491
1492 group = smmu->s2crs[idx].group;
1493 }
1494
1495 if (group)
1496 return iommu_group_ref_get(group);
1497
1498 if (dev_is_pci(dev))
1499 group = pci_device_group(dev);
1500 else if (dev_is_fsl_mc(dev))
1501 group = fsl_mc_device_group(dev);
1502 else
1503 group = generic_device_group(dev);
1504
1505
1506 if (!IS_ERR(group))
1507 for_each_cfg_sme(cfg, fwspec, i, idx)
1508 smmu->s2crs[idx].group = group;
1509
1510 return group;
1511}
1512
1513static int arm_smmu_domain_get_attr(struct iommu_domain *domain,
1514 enum iommu_attr attr, void *data)
1515{
1516 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1517
1518 switch(domain->type) {
1519 case IOMMU_DOMAIN_UNMANAGED:
1520 switch (attr) {
1521 case DOMAIN_ATTR_NESTING:
1522 *(int *)data = (smmu_domain->stage == ARM_SMMU_DOMAIN_NESTED);
1523 return 0;
1524 default:
1525 return -ENODEV;
1526 }
1527 break;
1528 case IOMMU_DOMAIN_DMA:
1529 switch (attr) {
1530 case DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE:
1531 *(int *)data = smmu_domain->non_strict;
1532 return 0;
1533 default:
1534 return -ENODEV;
1535 }
1536 break;
1537 default:
1538 return -EINVAL;
1539 }
1540}
1541
1542static int arm_smmu_domain_set_attr(struct iommu_domain *domain,
1543 enum iommu_attr attr, void *data)
1544{
1545 int ret = 0;
1546 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1547
1548 mutex_lock(&smmu_domain->init_mutex);
1549
1550 switch(domain->type) {
1551 case IOMMU_DOMAIN_UNMANAGED:
1552 switch (attr) {
1553 case DOMAIN_ATTR_NESTING:
1554 if (smmu_domain->smmu) {
1555 ret = -EPERM;
1556 goto out_unlock;
1557 }
1558
1559 if (*(int *)data)
1560 smmu_domain->stage = ARM_SMMU_DOMAIN_NESTED;
1561 else
1562 smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
1563 break;
1564 default:
1565 ret = -ENODEV;
1566 }
1567 break;
1568 case IOMMU_DOMAIN_DMA:
1569 switch (attr) {
1570 case DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE:
1571 smmu_domain->non_strict = *(int *)data;
1572 break;
1573 default:
1574 ret = -ENODEV;
1575 }
1576 break;
1577 default:
1578 ret = -EINVAL;
1579 }
1580out_unlock:
1581 mutex_unlock(&smmu_domain->init_mutex);
1582 return ret;
1583}
1584
1585static int arm_smmu_of_xlate(struct device *dev, struct of_phandle_args *args)
1586{
1587 u32 mask, fwid = 0;
1588
1589 if (args->args_count > 0)
1590 fwid |= FIELD_PREP(ARM_SMMU_SMR_ID, args->args[0]);
1591
1592 if (args->args_count > 1)
1593 fwid |= FIELD_PREP(ARM_SMMU_SMR_MASK, args->args[1]);
1594 else if (!of_property_read_u32(args->np, "stream-match-mask", &mask))
1595 fwid |= FIELD_PREP(ARM_SMMU_SMR_MASK, mask);
1596
1597 return iommu_fwspec_add_ids(dev, &fwid, 1);
1598}
1599
1600static void arm_smmu_get_resv_regions(struct device *dev,
1601 struct list_head *head)
1602{
1603 struct iommu_resv_region *region;
1604 int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO;
1605
1606 region = iommu_alloc_resv_region(MSI_IOVA_BASE, MSI_IOVA_LENGTH,
1607 prot, IOMMU_RESV_SW_MSI);
1608 if (!region)
1609 return;
1610
1611 list_add_tail(®ion->list, head);
1612
1613 iommu_dma_get_resv_regions(dev, head);
1614}
1615
1616static int arm_smmu_def_domain_type(struct device *dev)
1617{
1618 struct arm_smmu_master_cfg *cfg = dev_iommu_priv_get(dev);
1619 const struct arm_smmu_impl *impl = cfg->smmu->impl;
1620
1621 if (impl && impl->def_domain_type)
1622 return impl->def_domain_type(dev);
1623
1624 return 0;
1625}
1626
1627static struct iommu_ops arm_smmu_ops = {
1628 .capable = arm_smmu_capable,
1629 .domain_alloc = arm_smmu_domain_alloc,
1630 .domain_free = arm_smmu_domain_free,
1631 .attach_dev = arm_smmu_attach_dev,
1632 .map = arm_smmu_map,
1633 .unmap = arm_smmu_unmap,
1634 .flush_iotlb_all = arm_smmu_flush_iotlb_all,
1635 .iotlb_sync = arm_smmu_iotlb_sync,
1636 .iova_to_phys = arm_smmu_iova_to_phys,
1637 .probe_device = arm_smmu_probe_device,
1638 .release_device = arm_smmu_release_device,
1639 .device_group = arm_smmu_device_group,
1640 .domain_get_attr = arm_smmu_domain_get_attr,
1641 .domain_set_attr = arm_smmu_domain_set_attr,
1642 .of_xlate = arm_smmu_of_xlate,
1643 .get_resv_regions = arm_smmu_get_resv_regions,
1644 .put_resv_regions = generic_iommu_put_resv_regions,
1645 .def_domain_type = arm_smmu_def_domain_type,
1646 .pgsize_bitmap = -1UL,
1647};
1648
1649static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
1650{
1651 int i;
1652 u32 reg;
1653
1654
1655 reg = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_sGFSR);
1656 arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_sGFSR, reg);
1657
1658
1659
1660
1661
1662 for (i = 0; i < smmu->num_mapping_groups; ++i)
1663 arm_smmu_write_sme(smmu, i);
1664
1665
1666 for (i = 0; i < smmu->num_context_banks; ++i) {
1667 arm_smmu_write_context_bank(smmu, i);
1668 arm_smmu_cb_write(smmu, i, ARM_SMMU_CB_FSR, ARM_SMMU_FSR_FAULT);
1669 }
1670
1671
1672 arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_TLBIALLH, QCOM_DUMMY_VAL);
1673 arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_TLBIALLNSNH, QCOM_DUMMY_VAL);
1674
1675 reg = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_sCR0);
1676
1677
1678 reg |= (ARM_SMMU_sCR0_GFRE | ARM_SMMU_sCR0_GFIE |
1679 ARM_SMMU_sCR0_GCFGFRE | ARM_SMMU_sCR0_GCFGFIE);
1680
1681
1682 reg |= (ARM_SMMU_sCR0_VMIDPNE | ARM_SMMU_sCR0_PTM);
1683
1684
1685 reg &= ~ARM_SMMU_sCR0_CLIENTPD;
1686 if (disable_bypass)
1687 reg |= ARM_SMMU_sCR0_USFCFG;
1688 else
1689 reg &= ~ARM_SMMU_sCR0_USFCFG;
1690
1691
1692 reg &= ~ARM_SMMU_sCR0_FB;
1693
1694
1695 reg &= ~(ARM_SMMU_sCR0_BSU);
1696
1697 if (smmu->features & ARM_SMMU_FEAT_VMID16)
1698 reg |= ARM_SMMU_sCR0_VMID16EN;
1699
1700 if (smmu->features & ARM_SMMU_FEAT_EXIDS)
1701 reg |= ARM_SMMU_sCR0_EXIDENABLE;
1702
1703 if (smmu->impl && smmu->impl->reset)
1704 smmu->impl->reset(smmu);
1705
1706
1707 arm_smmu_tlb_sync_global(smmu);
1708 arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_sCR0, reg);
1709}
1710
1711static int arm_smmu_id_size_to_bits(int size)
1712{
1713 switch (size) {
1714 case 0:
1715 return 32;
1716 case 1:
1717 return 36;
1718 case 2:
1719 return 40;
1720 case 3:
1721 return 42;
1722 case 4:
1723 return 44;
1724 case 5:
1725 default:
1726 return 48;
1727 }
1728}
1729
1730static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
1731{
1732 unsigned int size;
1733 u32 id;
1734 bool cttw_reg, cttw_fw = smmu->features & ARM_SMMU_FEAT_COHERENT_WALK;
1735 int i, ret;
1736
1737 dev_notice(smmu->dev, "probing hardware configuration...\n");
1738 dev_notice(smmu->dev, "SMMUv%d with:\n",
1739 smmu->version == ARM_SMMU_V2 ? 2 : 1);
1740
1741
1742 id = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_ID0);
1743
1744
1745 if (force_stage == 1)
1746 id &= ~(ARM_SMMU_ID0_S2TS | ARM_SMMU_ID0_NTS);
1747 else if (force_stage == 2)
1748 id &= ~(ARM_SMMU_ID0_S1TS | ARM_SMMU_ID0_NTS);
1749
1750 if (id & ARM_SMMU_ID0_S1TS) {
1751 smmu->features |= ARM_SMMU_FEAT_TRANS_S1;
1752 dev_notice(smmu->dev, "\tstage 1 translation\n");
1753 }
1754
1755 if (id & ARM_SMMU_ID0_S2TS) {
1756 smmu->features |= ARM_SMMU_FEAT_TRANS_S2;
1757 dev_notice(smmu->dev, "\tstage 2 translation\n");
1758 }
1759
1760 if (id & ARM_SMMU_ID0_NTS) {
1761 smmu->features |= ARM_SMMU_FEAT_TRANS_NESTED;
1762 dev_notice(smmu->dev, "\tnested translation\n");
1763 }
1764
1765 if (!(smmu->features &
1766 (ARM_SMMU_FEAT_TRANS_S1 | ARM_SMMU_FEAT_TRANS_S2))) {
1767 dev_err(smmu->dev, "\tno translation support!\n");
1768 return -ENODEV;
1769 }
1770
1771 if ((id & ARM_SMMU_ID0_S1TS) &&
1772 ((smmu->version < ARM_SMMU_V2) || !(id & ARM_SMMU_ID0_ATOSNS))) {
1773 smmu->features |= ARM_SMMU_FEAT_TRANS_OPS;
1774 dev_notice(smmu->dev, "\taddress translation ops\n");
1775 }
1776
1777
1778
1779
1780
1781
1782
1783 cttw_reg = !!(id & ARM_SMMU_ID0_CTTW);
1784 if (cttw_fw || cttw_reg)
1785 dev_notice(smmu->dev, "\t%scoherent table walk\n",
1786 cttw_fw ? "" : "non-");
1787 if (cttw_fw != cttw_reg)
1788 dev_notice(smmu->dev,
1789 "\t(IDR0.CTTW overridden by FW configuration)\n");
1790
1791
1792 if (smmu->version == ARM_SMMU_V2 && id & ARM_SMMU_ID0_EXIDS) {
1793 smmu->features |= ARM_SMMU_FEAT_EXIDS;
1794 size = 1 << 16;
1795 } else {
1796 size = 1 << FIELD_GET(ARM_SMMU_ID0_NUMSIDB, id);
1797 }
1798 smmu->streamid_mask = size - 1;
1799 if (id & ARM_SMMU_ID0_SMS) {
1800 smmu->features |= ARM_SMMU_FEAT_STREAM_MATCH;
1801 size = FIELD_GET(ARM_SMMU_ID0_NUMSMRG, id);
1802 if (size == 0) {
1803 dev_err(smmu->dev,
1804 "stream-matching supported, but no SMRs present!\n");
1805 return -ENODEV;
1806 }
1807
1808
1809 smmu->smrs = devm_kcalloc(smmu->dev, size, sizeof(*smmu->smrs),
1810 GFP_KERNEL);
1811 if (!smmu->smrs)
1812 return -ENOMEM;
1813
1814 dev_notice(smmu->dev,
1815 "\tstream matching with %u register groups", size);
1816 }
1817
1818 smmu->s2crs = devm_kmalloc_array(smmu->dev, size, sizeof(*smmu->s2crs),
1819 GFP_KERNEL);
1820 if (!smmu->s2crs)
1821 return -ENOMEM;
1822 for (i = 0; i < size; i++)
1823 smmu->s2crs[i] = s2cr_init_val;
1824
1825 smmu->num_mapping_groups = size;
1826 mutex_init(&smmu->stream_map_mutex);
1827 spin_lock_init(&smmu->global_sync_lock);
1828
1829 if (smmu->version < ARM_SMMU_V2 ||
1830 !(id & ARM_SMMU_ID0_PTFS_NO_AARCH32)) {
1831 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_L;
1832 if (!(id & ARM_SMMU_ID0_PTFS_NO_AARCH32S))
1833 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_S;
1834 }
1835
1836
1837 id = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_ID1);
1838 smmu->pgshift = (id & ARM_SMMU_ID1_PAGESIZE) ? 16 : 12;
1839
1840
1841 size = 1 << (FIELD_GET(ARM_SMMU_ID1_NUMPAGENDXB, id) + 1);
1842 if (smmu->numpage != 2 * size << smmu->pgshift)
1843 dev_warn(smmu->dev,
1844 "SMMU address space size (0x%x) differs from mapped region size (0x%x)!\n",
1845 2 * size << smmu->pgshift, smmu->numpage);
1846
1847 smmu->numpage = size;
1848
1849 smmu->num_s2_context_banks = FIELD_GET(ARM_SMMU_ID1_NUMS2CB, id);
1850 smmu->num_context_banks = FIELD_GET(ARM_SMMU_ID1_NUMCB, id);
1851 if (smmu->num_s2_context_banks > smmu->num_context_banks) {
1852 dev_err(smmu->dev, "impossible number of S2 context banks!\n");
1853 return -ENODEV;
1854 }
1855 dev_notice(smmu->dev, "\t%u context banks (%u stage-2 only)\n",
1856 smmu->num_context_banks, smmu->num_s2_context_banks);
1857 smmu->cbs = devm_kcalloc(smmu->dev, smmu->num_context_banks,
1858 sizeof(*smmu->cbs), GFP_KERNEL);
1859 if (!smmu->cbs)
1860 return -ENOMEM;
1861
1862
1863 id = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_ID2);
1864 size = arm_smmu_id_size_to_bits(FIELD_GET(ARM_SMMU_ID2_IAS, id));
1865 smmu->ipa_size = size;
1866
1867
1868 size = arm_smmu_id_size_to_bits(FIELD_GET(ARM_SMMU_ID2_OAS, id));
1869 smmu->pa_size = size;
1870
1871 if (id & ARM_SMMU_ID2_VMID16)
1872 smmu->features |= ARM_SMMU_FEAT_VMID16;
1873
1874
1875
1876
1877
1878
1879 if (dma_set_mask_and_coherent(smmu->dev, DMA_BIT_MASK(size)))
1880 dev_warn(smmu->dev,
1881 "failed to set DMA mask for table walker\n");
1882
1883 if (smmu->version < ARM_SMMU_V2) {
1884 smmu->va_size = smmu->ipa_size;
1885 if (smmu->version == ARM_SMMU_V1_64K)
1886 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_64K;
1887 } else {
1888 size = FIELD_GET(ARM_SMMU_ID2_UBS, id);
1889 smmu->va_size = arm_smmu_id_size_to_bits(size);
1890 if (id & ARM_SMMU_ID2_PTFS_4K)
1891 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_4K;
1892 if (id & ARM_SMMU_ID2_PTFS_16K)
1893 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_16K;
1894 if (id & ARM_SMMU_ID2_PTFS_64K)
1895 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_64K;
1896 }
1897
1898 if (smmu->impl && smmu->impl->cfg_probe) {
1899 ret = smmu->impl->cfg_probe(smmu);
1900 if (ret)
1901 return ret;
1902 }
1903
1904
1905 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_S)
1906 smmu->pgsize_bitmap |= SZ_4K | SZ_64K | SZ_1M | SZ_16M;
1907 if (smmu->features &
1908 (ARM_SMMU_FEAT_FMT_AARCH32_L | ARM_SMMU_FEAT_FMT_AARCH64_4K))
1909 smmu->pgsize_bitmap |= SZ_4K | SZ_2M | SZ_1G;
1910 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH64_16K)
1911 smmu->pgsize_bitmap |= SZ_16K | SZ_32M;
1912 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH64_64K)
1913 smmu->pgsize_bitmap |= SZ_64K | SZ_512M;
1914
1915 if (arm_smmu_ops.pgsize_bitmap == -1UL)
1916 arm_smmu_ops.pgsize_bitmap = smmu->pgsize_bitmap;
1917 else
1918 arm_smmu_ops.pgsize_bitmap |= smmu->pgsize_bitmap;
1919 dev_notice(smmu->dev, "\tSupported page sizes: 0x%08lx\n",
1920 smmu->pgsize_bitmap);
1921
1922
1923 if (smmu->features & ARM_SMMU_FEAT_TRANS_S1)
1924 dev_notice(smmu->dev, "\tStage-1: %lu-bit VA -> %lu-bit IPA\n",
1925 smmu->va_size, smmu->ipa_size);
1926
1927 if (smmu->features & ARM_SMMU_FEAT_TRANS_S2)
1928 dev_notice(smmu->dev, "\tStage-2: %lu-bit IPA -> %lu-bit PA\n",
1929 smmu->ipa_size, smmu->pa_size);
1930
1931 return 0;
1932}
1933
1934struct arm_smmu_match_data {
1935 enum arm_smmu_arch_version version;
1936 enum arm_smmu_implementation model;
1937};
1938
1939#define ARM_SMMU_MATCH_DATA(name, ver, imp) \
1940static const struct arm_smmu_match_data name = { .version = ver, .model = imp }
1941
1942ARM_SMMU_MATCH_DATA(smmu_generic_v1, ARM_SMMU_V1, GENERIC_SMMU);
1943ARM_SMMU_MATCH_DATA(smmu_generic_v2, ARM_SMMU_V2, GENERIC_SMMU);
1944ARM_SMMU_MATCH_DATA(arm_mmu401, ARM_SMMU_V1_64K, GENERIC_SMMU);
1945ARM_SMMU_MATCH_DATA(arm_mmu500, ARM_SMMU_V2, ARM_MMU500);
1946ARM_SMMU_MATCH_DATA(cavium_smmuv2, ARM_SMMU_V2, CAVIUM_SMMUV2);
1947ARM_SMMU_MATCH_DATA(qcom_smmuv2, ARM_SMMU_V2, QCOM_SMMUV2);
1948
1949static const struct of_device_id arm_smmu_of_match[] = {
1950 { .compatible = "arm,smmu-v1", .data = &smmu_generic_v1 },
1951 { .compatible = "arm,smmu-v2", .data = &smmu_generic_v2 },
1952 { .compatible = "arm,mmu-400", .data = &smmu_generic_v1 },
1953 { .compatible = "arm,mmu-401", .data = &arm_mmu401 },
1954 { .compatible = "arm,mmu-500", .data = &arm_mmu500 },
1955 { .compatible = "cavium,smmu-v2", .data = &cavium_smmuv2 },
1956 { .compatible = "nvidia,smmu-500", .data = &arm_mmu500 },
1957 { .compatible = "qcom,smmu-v2", .data = &qcom_smmuv2 },
1958 { },
1959};
1960MODULE_DEVICE_TABLE(of, arm_smmu_of_match);
1961
1962#ifdef CONFIG_ACPI
1963static int acpi_smmu_get_data(u32 model, struct arm_smmu_device *smmu)
1964{
1965 int ret = 0;
1966
1967 switch (model) {
1968 case ACPI_IORT_SMMU_V1:
1969 case ACPI_IORT_SMMU_CORELINK_MMU400:
1970 smmu->version = ARM_SMMU_V1;
1971 smmu->model = GENERIC_SMMU;
1972 break;
1973 case ACPI_IORT_SMMU_CORELINK_MMU401:
1974 smmu->version = ARM_SMMU_V1_64K;
1975 smmu->model = GENERIC_SMMU;
1976 break;
1977 case ACPI_IORT_SMMU_V2:
1978 smmu->version = ARM_SMMU_V2;
1979 smmu->model = GENERIC_SMMU;
1980 break;
1981 case ACPI_IORT_SMMU_CORELINK_MMU500:
1982 smmu->version = ARM_SMMU_V2;
1983 smmu->model = ARM_MMU500;
1984 break;
1985 case ACPI_IORT_SMMU_CAVIUM_THUNDERX:
1986 smmu->version = ARM_SMMU_V2;
1987 smmu->model = CAVIUM_SMMUV2;
1988 break;
1989 default:
1990 ret = -ENODEV;
1991 }
1992
1993 return ret;
1994}
1995
1996static int arm_smmu_device_acpi_probe(struct platform_device *pdev,
1997 struct arm_smmu_device *smmu)
1998{
1999 struct device *dev = smmu->dev;
2000 struct acpi_iort_node *node =
2001 *(struct acpi_iort_node **)dev_get_platdata(dev);
2002 struct acpi_iort_smmu *iort_smmu;
2003 int ret;
2004
2005
2006 iort_smmu = (struct acpi_iort_smmu *)node->node_data;
2007
2008 ret = acpi_smmu_get_data(iort_smmu->model, smmu);
2009 if (ret < 0)
2010 return ret;
2011
2012
2013 smmu->num_global_irqs = 1;
2014
2015 if (iort_smmu->flags & ACPI_IORT_SMMU_COHERENT_WALK)
2016 smmu->features |= ARM_SMMU_FEAT_COHERENT_WALK;
2017
2018 return 0;
2019}
2020#else
2021static inline int arm_smmu_device_acpi_probe(struct platform_device *pdev,
2022 struct arm_smmu_device *smmu)
2023{
2024 return -ENODEV;
2025}
2026#endif
2027
2028static int arm_smmu_device_dt_probe(struct platform_device *pdev,
2029 struct arm_smmu_device *smmu)
2030{
2031 const struct arm_smmu_match_data *data;
2032 struct device *dev = &pdev->dev;
2033 bool legacy_binding;
2034
2035 if (of_property_read_u32(dev->of_node, "#global-interrupts",
2036 &smmu->num_global_irqs)) {
2037 dev_err(dev, "missing #global-interrupts property\n");
2038 return -ENODEV;
2039 }
2040
2041 data = of_device_get_match_data(dev);
2042 smmu->version = data->version;
2043 smmu->model = data->model;
2044
2045 legacy_binding = of_find_property(dev->of_node, "mmu-masters", NULL);
2046 if (legacy_binding && !using_generic_binding) {
2047 if (!using_legacy_binding) {
2048 pr_notice("deprecated \"mmu-masters\" DT property in use; %s support unavailable\n",
2049 IS_ENABLED(CONFIG_ARM_SMMU_LEGACY_DT_BINDINGS) ? "DMA API" : "SMMU");
2050 }
2051 using_legacy_binding = true;
2052 } else if (!legacy_binding && !using_legacy_binding) {
2053 using_generic_binding = true;
2054 } else {
2055 dev_err(dev, "not probing due to mismatched DT properties\n");
2056 return -ENODEV;
2057 }
2058
2059 if (of_dma_is_coherent(dev->of_node))
2060 smmu->features |= ARM_SMMU_FEAT_COHERENT_WALK;
2061
2062 return 0;
2063}
2064
2065static int arm_smmu_bus_init(struct iommu_ops *ops)
2066{
2067 int err;
2068
2069
2070 if (!iommu_present(&platform_bus_type)) {
2071 err = bus_set_iommu(&platform_bus_type, ops);
2072 if (err)
2073 return err;
2074 }
2075#ifdef CONFIG_ARM_AMBA
2076 if (!iommu_present(&amba_bustype)) {
2077 err = bus_set_iommu(&amba_bustype, ops);
2078 if (err)
2079 goto err_reset_platform_ops;
2080 }
2081#endif
2082#ifdef CONFIG_PCI
2083 if (!iommu_present(&pci_bus_type)) {
2084 err = bus_set_iommu(&pci_bus_type, ops);
2085 if (err)
2086 goto err_reset_amba_ops;
2087 }
2088#endif
2089#ifdef CONFIG_FSL_MC_BUS
2090 if (!iommu_present(&fsl_mc_bus_type)) {
2091 err = bus_set_iommu(&fsl_mc_bus_type, ops);
2092 if (err)
2093 goto err_reset_pci_ops;
2094 }
2095#endif
2096 return 0;
2097
2098err_reset_pci_ops: __maybe_unused;
2099#ifdef CONFIG_PCI
2100 bus_set_iommu(&pci_bus_type, NULL);
2101#endif
2102err_reset_amba_ops: __maybe_unused;
2103#ifdef CONFIG_ARM_AMBA
2104 bus_set_iommu(&amba_bustype, NULL);
2105#endif
2106err_reset_platform_ops: __maybe_unused;
2107 bus_set_iommu(&platform_bus_type, NULL);
2108 return err;
2109}
2110
2111static int arm_smmu_device_probe(struct platform_device *pdev)
2112{
2113 struct resource *res;
2114 resource_size_t ioaddr;
2115 struct arm_smmu_device *smmu;
2116 struct device *dev = &pdev->dev;
2117 int num_irqs, i, err;
2118 irqreturn_t (*global_fault)(int irq, void *dev);
2119
2120 smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL);
2121 if (!smmu) {
2122 dev_err(dev, "failed to allocate arm_smmu_device\n");
2123 return -ENOMEM;
2124 }
2125 smmu->dev = dev;
2126
2127 if (dev->of_node)
2128 err = arm_smmu_device_dt_probe(pdev, smmu);
2129 else
2130 err = arm_smmu_device_acpi_probe(pdev, smmu);
2131
2132 if (err)
2133 return err;
2134
2135 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2136 ioaddr = res->start;
2137 smmu->base = devm_ioremap_resource(dev, res);
2138 if (IS_ERR(smmu->base))
2139 return PTR_ERR(smmu->base);
2140
2141
2142
2143
2144 smmu->numpage = resource_size(res);
2145
2146 smmu = arm_smmu_impl_init(smmu);
2147 if (IS_ERR(smmu))
2148 return PTR_ERR(smmu);
2149
2150 num_irqs = 0;
2151 while ((res = platform_get_resource(pdev, IORESOURCE_IRQ, num_irqs))) {
2152 num_irqs++;
2153 if (num_irqs > smmu->num_global_irqs)
2154 smmu->num_context_irqs++;
2155 }
2156
2157 if (!smmu->num_context_irqs) {
2158 dev_err(dev, "found %d interrupts but expected at least %d\n",
2159 num_irqs, smmu->num_global_irqs + 1);
2160 return -ENODEV;
2161 }
2162
2163 smmu->irqs = devm_kcalloc(dev, num_irqs, sizeof(*smmu->irqs),
2164 GFP_KERNEL);
2165 if (!smmu->irqs) {
2166 dev_err(dev, "failed to allocate %d irqs\n", num_irqs);
2167 return -ENOMEM;
2168 }
2169
2170 for (i = 0; i < num_irqs; ++i) {
2171 int irq = platform_get_irq(pdev, i);
2172
2173 if (irq < 0)
2174 return -ENODEV;
2175 smmu->irqs[i] = irq;
2176 }
2177
2178 err = devm_clk_bulk_get_all(dev, &smmu->clks);
2179 if (err < 0) {
2180 dev_err(dev, "failed to get clocks %d\n", err);
2181 return err;
2182 }
2183 smmu->num_clks = err;
2184
2185 err = clk_bulk_prepare_enable(smmu->num_clks, smmu->clks);
2186 if (err)
2187 return err;
2188
2189 err = arm_smmu_device_cfg_probe(smmu);
2190 if (err)
2191 return err;
2192
2193 if (smmu->version == ARM_SMMU_V2) {
2194 if (smmu->num_context_banks > smmu->num_context_irqs) {
2195 dev_err(dev,
2196 "found only %d context irq(s) but %d required\n",
2197 smmu->num_context_irqs, smmu->num_context_banks);
2198 return -ENODEV;
2199 }
2200
2201
2202 smmu->num_context_irqs = smmu->num_context_banks;
2203 }
2204
2205 if (smmu->impl && smmu->impl->global_fault)
2206 global_fault = smmu->impl->global_fault;
2207 else
2208 global_fault = arm_smmu_global_fault;
2209
2210 for (i = 0; i < smmu->num_global_irqs; ++i) {
2211 err = devm_request_irq(smmu->dev, smmu->irqs[i],
2212 global_fault,
2213 IRQF_SHARED,
2214 "arm-smmu global fault",
2215 smmu);
2216 if (err) {
2217 dev_err(dev, "failed to request global IRQ %d (%u)\n",
2218 i, smmu->irqs[i]);
2219 return err;
2220 }
2221 }
2222
2223 err = iommu_device_sysfs_add(&smmu->iommu, smmu->dev, NULL,
2224 "smmu.%pa", &ioaddr);
2225 if (err) {
2226 dev_err(dev, "Failed to register iommu in sysfs\n");
2227 return err;
2228 }
2229
2230 iommu_device_set_ops(&smmu->iommu, &arm_smmu_ops);
2231 iommu_device_set_fwnode(&smmu->iommu, dev->fwnode);
2232
2233 err = iommu_device_register(&smmu->iommu);
2234 if (err) {
2235 dev_err(dev, "Failed to register iommu\n");
2236 return err;
2237 }
2238
2239 platform_set_drvdata(pdev, smmu);
2240 arm_smmu_device_reset(smmu);
2241 arm_smmu_test_smr_masks(smmu);
2242
2243
2244
2245
2246
2247
2248
2249 if (dev->pm_domain) {
2250 pm_runtime_set_active(dev);
2251 pm_runtime_enable(dev);
2252 }
2253
2254
2255
2256
2257
2258
2259 if (!using_legacy_binding)
2260 return arm_smmu_bus_init(&arm_smmu_ops);
2261
2262 return 0;
2263}
2264
2265static int arm_smmu_device_remove(struct platform_device *pdev)
2266{
2267 struct arm_smmu_device *smmu = platform_get_drvdata(pdev);
2268
2269 if (!smmu)
2270 return -ENODEV;
2271
2272 if (!bitmap_empty(smmu->context_map, ARM_SMMU_MAX_CBS))
2273 dev_notice(&pdev->dev, "disabling translation\n");
2274
2275 arm_smmu_bus_init(NULL);
2276 iommu_device_unregister(&smmu->iommu);
2277 iommu_device_sysfs_remove(&smmu->iommu);
2278
2279 arm_smmu_rpm_get(smmu);
2280
2281 arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_sCR0, ARM_SMMU_sCR0_CLIENTPD);
2282 arm_smmu_rpm_put(smmu);
2283
2284 if (pm_runtime_enabled(smmu->dev))
2285 pm_runtime_force_suspend(smmu->dev);
2286 else
2287 clk_bulk_disable(smmu->num_clks, smmu->clks);
2288
2289 clk_bulk_unprepare(smmu->num_clks, smmu->clks);
2290 return 0;
2291}
2292
2293static void arm_smmu_device_shutdown(struct platform_device *pdev)
2294{
2295 arm_smmu_device_remove(pdev);
2296}
2297
2298static int __maybe_unused arm_smmu_runtime_resume(struct device *dev)
2299{
2300 struct arm_smmu_device *smmu = dev_get_drvdata(dev);
2301 int ret;
2302
2303 ret = clk_bulk_enable(smmu->num_clks, smmu->clks);
2304 if (ret)
2305 return ret;
2306
2307 arm_smmu_device_reset(smmu);
2308
2309 return 0;
2310}
2311
2312static int __maybe_unused arm_smmu_runtime_suspend(struct device *dev)
2313{
2314 struct arm_smmu_device *smmu = dev_get_drvdata(dev);
2315
2316 clk_bulk_disable(smmu->num_clks, smmu->clks);
2317
2318 return 0;
2319}
2320
2321static int __maybe_unused arm_smmu_pm_resume(struct device *dev)
2322{
2323 if (pm_runtime_suspended(dev))
2324 return 0;
2325
2326 return arm_smmu_runtime_resume(dev);
2327}
2328
2329static int __maybe_unused arm_smmu_pm_suspend(struct device *dev)
2330{
2331 if (pm_runtime_suspended(dev))
2332 return 0;
2333
2334 return arm_smmu_runtime_suspend(dev);
2335}
2336
2337static const struct dev_pm_ops arm_smmu_pm_ops = {
2338 SET_SYSTEM_SLEEP_PM_OPS(arm_smmu_pm_suspend, arm_smmu_pm_resume)
2339 SET_RUNTIME_PM_OPS(arm_smmu_runtime_suspend,
2340 arm_smmu_runtime_resume, NULL)
2341};
2342
2343static struct platform_driver arm_smmu_driver = {
2344 .driver = {
2345 .name = "arm-smmu",
2346 .of_match_table = arm_smmu_of_match,
2347 .pm = &arm_smmu_pm_ops,
2348 .suppress_bind_attrs = true,
2349 },
2350 .probe = arm_smmu_device_probe,
2351 .remove = arm_smmu_device_remove,
2352 .shutdown = arm_smmu_device_shutdown,
2353};
2354module_platform_driver(arm_smmu_driver);
2355
2356MODULE_DESCRIPTION("IOMMU API for ARM architected SMMU implementations");
2357MODULE_AUTHOR("Will Deacon <will@kernel.org>");
2358MODULE_ALIAS("platform:arm-smmu");
2359MODULE_LICENSE("GPL v2");
2360