1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30#define pr_fmt(fmt) "arm-smmu: " fmt
31
32#include <linux/acpi.h>
33#include <linux/acpi_iort.h>
34#include <linux/bitfield.h>
35#include <linux/delay.h>
36#include <linux/dma-iommu.h>
37#include <linux/dma-mapping.h>
38#include <linux/err.h>
39#include <linux/interrupt.h>
40#include <linux/io.h>
41#include <linux/iopoll.h>
42#include <linux/module.h>
43#include <linux/of.h>
44#include <linux/of_address.h>
45#include <linux/of_device.h>
46#include <linux/of_iommu.h>
47#include <linux/pci.h>
48#include <linux/platform_device.h>
49#include <linux/pm_runtime.h>
50#include <linux/ratelimit.h>
51#include <linux/slab.h>
52
53#include <linux/amba/bus.h>
54
55#include "arm-smmu.h"
56
57
58
59
60
61
62
63
64#define QCOM_DUMMY_VAL -1
65
66#define MSI_IOVA_BASE 0x8000000
67#define MSI_IOVA_LENGTH 0x100000
68
69static int force_stage;
70module_param(force_stage, int, S_IRUGO);
71MODULE_PARM_DESC(force_stage,
72 "Force SMMU mappings to be installed at a particular stage of translation. A value of '1' or '2' forces the corresponding stage. All other values are ignored (i.e. no stage is forced). Note that selecting a specific stage will disable support for nested translation.");
73static bool disable_bypass =
74 IS_ENABLED(CONFIG_ARM_SMMU_DISABLE_BYPASS_BY_DEFAULT);
75module_param(disable_bypass, bool, S_IRUGO);
76MODULE_PARM_DESC(disable_bypass,
77 "Disable bypass streams such that incoming transactions from devices that are not attached to an iommu domain will report an abort back to the device and will not be allowed to pass through the SMMU.");
78
79#define s2cr_init_val (struct arm_smmu_s2cr){ \
80 .type = disable_bypass ? S2CR_TYPE_FAULT : S2CR_TYPE_BYPASS, \
81}
82
83static bool using_legacy_binding, using_generic_binding;
84
85static inline int arm_smmu_rpm_get(struct arm_smmu_device *smmu)
86{
87 if (pm_runtime_enabled(smmu->dev))
88 return pm_runtime_get_sync(smmu->dev);
89
90 return 0;
91}
92
93static inline void arm_smmu_rpm_put(struct arm_smmu_device *smmu)
94{
95 if (pm_runtime_enabled(smmu->dev))
96 pm_runtime_put_autosuspend(smmu->dev);
97}
98
99static struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom)
100{
101 return container_of(dom, struct arm_smmu_domain, domain);
102}
103
104static struct platform_driver arm_smmu_driver;
105static struct iommu_ops arm_smmu_ops;
106
107#ifdef CONFIG_ARM_SMMU_LEGACY_DT_BINDINGS
108static int arm_smmu_bus_init(struct iommu_ops *ops);
109
110static struct device_node *dev_get_dev_node(struct device *dev)
111{
112 if (dev_is_pci(dev)) {
113 struct pci_bus *bus = to_pci_dev(dev)->bus;
114
115 while (!pci_is_root_bus(bus))
116 bus = bus->parent;
117 return of_node_get(bus->bridge->parent->of_node);
118 }
119
120 return of_node_get(dev->of_node);
121}
122
123static int __arm_smmu_get_pci_sid(struct pci_dev *pdev, u16 alias, void *data)
124{
125 *((__be32 *)data) = cpu_to_be32(alias);
126 return 0;
127}
128
129static int __find_legacy_master_phandle(struct device *dev, void *data)
130{
131 struct of_phandle_iterator *it = *(void **)data;
132 struct device_node *np = it->node;
133 int err;
134
135 of_for_each_phandle(it, err, dev->of_node, "mmu-masters",
136 "#stream-id-cells", -1)
137 if (it->node == np) {
138 *(void **)data = dev;
139 return 1;
140 }
141 it->node = np;
142 return err == -ENOENT ? 0 : err;
143}
144
145static int arm_smmu_register_legacy_master(struct device *dev,
146 struct arm_smmu_device **smmu)
147{
148 struct device *smmu_dev;
149 struct device_node *np;
150 struct of_phandle_iterator it;
151 void *data = ⁢
152 u32 *sids;
153 __be32 pci_sid;
154 int err;
155
156 np = dev_get_dev_node(dev);
157 if (!np || !of_find_property(np, "#stream-id-cells", NULL)) {
158 of_node_put(np);
159 return -ENODEV;
160 }
161
162 it.node = np;
163 err = driver_for_each_device(&arm_smmu_driver.driver, NULL, &data,
164 __find_legacy_master_phandle);
165 smmu_dev = data;
166 of_node_put(np);
167 if (err == 0)
168 return -ENODEV;
169 if (err < 0)
170 return err;
171
172 if (dev_is_pci(dev)) {
173
174 pci_for_each_dma_alias(to_pci_dev(dev), __arm_smmu_get_pci_sid,
175 &pci_sid);
176 it.cur = &pci_sid;
177 it.cur_count = 1;
178 }
179
180 err = iommu_fwspec_init(dev, &smmu_dev->of_node->fwnode,
181 &arm_smmu_ops);
182 if (err)
183 return err;
184
185 sids = kcalloc(it.cur_count, sizeof(*sids), GFP_KERNEL);
186 if (!sids)
187 return -ENOMEM;
188
189 *smmu = dev_get_drvdata(smmu_dev);
190 of_phandle_iterator_args(&it, sids, it.cur_count);
191 err = iommu_fwspec_add_ids(dev, sids, it.cur_count);
192 kfree(sids);
193 return err;
194}
195
196
197
198
199
200
201
202static int arm_smmu_legacy_bus_init(void)
203{
204 if (using_legacy_binding)
205 return arm_smmu_bus_init(&arm_smmu_ops);
206 return 0;
207}
208device_initcall_sync(arm_smmu_legacy_bus_init);
209#else
210static int arm_smmu_register_legacy_master(struct device *dev,
211 struct arm_smmu_device **smmu)
212{
213 return -ENODEV;
214}
215#endif
216
217static void __arm_smmu_free_bitmap(unsigned long *map, int idx)
218{
219 clear_bit(idx, map);
220}
221
222
223static void __arm_smmu_tlb_sync(struct arm_smmu_device *smmu, int page,
224 int sync, int status)
225{
226 unsigned int spin_cnt, delay;
227 u32 reg;
228
229 if (smmu->impl && unlikely(smmu->impl->tlb_sync))
230 return smmu->impl->tlb_sync(smmu, page, sync, status);
231
232 arm_smmu_writel(smmu, page, sync, QCOM_DUMMY_VAL);
233 for (delay = 1; delay < TLB_LOOP_TIMEOUT; delay *= 2) {
234 for (spin_cnt = TLB_SPIN_COUNT; spin_cnt > 0; spin_cnt--) {
235 reg = arm_smmu_readl(smmu, page, status);
236 if (!(reg & ARM_SMMU_sTLBGSTATUS_GSACTIVE))
237 return;
238 cpu_relax();
239 }
240 udelay(delay);
241 }
242 dev_err_ratelimited(smmu->dev,
243 "TLB sync timed out -- SMMU may be deadlocked\n");
244}
245
246static void arm_smmu_tlb_sync_global(struct arm_smmu_device *smmu)
247{
248 unsigned long flags;
249
250 spin_lock_irqsave(&smmu->global_sync_lock, flags);
251 __arm_smmu_tlb_sync(smmu, ARM_SMMU_GR0, ARM_SMMU_GR0_sTLBGSYNC,
252 ARM_SMMU_GR0_sTLBGSTATUS);
253 spin_unlock_irqrestore(&smmu->global_sync_lock, flags);
254}
255
256static void arm_smmu_tlb_sync_context(struct arm_smmu_domain *smmu_domain)
257{
258 struct arm_smmu_device *smmu = smmu_domain->smmu;
259 unsigned long flags;
260
261 spin_lock_irqsave(&smmu_domain->cb_lock, flags);
262 __arm_smmu_tlb_sync(smmu, ARM_SMMU_CB(smmu, smmu_domain->cfg.cbndx),
263 ARM_SMMU_CB_TLBSYNC, ARM_SMMU_CB_TLBSTATUS);
264 spin_unlock_irqrestore(&smmu_domain->cb_lock, flags);
265}
266
267static void arm_smmu_tlb_inv_context_s1(void *cookie)
268{
269 struct arm_smmu_domain *smmu_domain = cookie;
270
271
272
273
274 wmb();
275 arm_smmu_cb_write(smmu_domain->smmu, smmu_domain->cfg.cbndx,
276 ARM_SMMU_CB_S1_TLBIASID, smmu_domain->cfg.asid);
277 arm_smmu_tlb_sync_context(smmu_domain);
278}
279
280static void arm_smmu_tlb_inv_context_s2(void *cookie)
281{
282 struct arm_smmu_domain *smmu_domain = cookie;
283 struct arm_smmu_device *smmu = smmu_domain->smmu;
284
285
286 wmb();
287 arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_TLBIVMID, smmu_domain->cfg.vmid);
288 arm_smmu_tlb_sync_global(smmu);
289}
290
291static void arm_smmu_tlb_inv_range_s1(unsigned long iova, size_t size,
292 size_t granule, void *cookie, int reg)
293{
294 struct arm_smmu_domain *smmu_domain = cookie;
295 struct arm_smmu_device *smmu = smmu_domain->smmu;
296 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
297 int idx = cfg->cbndx;
298
299 if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK)
300 wmb();
301
302 if (cfg->fmt != ARM_SMMU_CTX_FMT_AARCH64) {
303 iova = (iova >> 12) << 12;
304 iova |= cfg->asid;
305 do {
306 arm_smmu_cb_write(smmu, idx, reg, iova);
307 iova += granule;
308 } while (size -= granule);
309 } else {
310 iova >>= 12;
311 iova |= (u64)cfg->asid << 48;
312 do {
313 arm_smmu_cb_writeq(smmu, idx, reg, iova);
314 iova += granule >> 12;
315 } while (size -= granule);
316 }
317}
318
319static void arm_smmu_tlb_inv_range_s2(unsigned long iova, size_t size,
320 size_t granule, void *cookie, int reg)
321{
322 struct arm_smmu_domain *smmu_domain = cookie;
323 struct arm_smmu_device *smmu = smmu_domain->smmu;
324 int idx = smmu_domain->cfg.cbndx;
325
326 if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK)
327 wmb();
328
329 iova >>= 12;
330 do {
331 if (smmu_domain->cfg.fmt == ARM_SMMU_CTX_FMT_AARCH64)
332 arm_smmu_cb_writeq(smmu, idx, reg, iova);
333 else
334 arm_smmu_cb_write(smmu, idx, reg, iova);
335 iova += granule >> 12;
336 } while (size -= granule);
337}
338
339static void arm_smmu_tlb_inv_walk_s1(unsigned long iova, size_t size,
340 size_t granule, void *cookie)
341{
342 arm_smmu_tlb_inv_range_s1(iova, size, granule, cookie,
343 ARM_SMMU_CB_S1_TLBIVA);
344 arm_smmu_tlb_sync_context(cookie);
345}
346
347static void arm_smmu_tlb_add_page_s1(struct iommu_iotlb_gather *gather,
348 unsigned long iova, size_t granule,
349 void *cookie)
350{
351 arm_smmu_tlb_inv_range_s1(iova, granule, granule, cookie,
352 ARM_SMMU_CB_S1_TLBIVAL);
353}
354
355static void arm_smmu_tlb_inv_walk_s2(unsigned long iova, size_t size,
356 size_t granule, void *cookie)
357{
358 arm_smmu_tlb_inv_range_s2(iova, size, granule, cookie,
359 ARM_SMMU_CB_S2_TLBIIPAS2);
360 arm_smmu_tlb_sync_context(cookie);
361}
362
363static void arm_smmu_tlb_add_page_s2(struct iommu_iotlb_gather *gather,
364 unsigned long iova, size_t granule,
365 void *cookie)
366{
367 arm_smmu_tlb_inv_range_s2(iova, granule, granule, cookie,
368 ARM_SMMU_CB_S2_TLBIIPAS2L);
369}
370
371static void arm_smmu_tlb_inv_walk_s2_v1(unsigned long iova, size_t size,
372 size_t granule, void *cookie)
373{
374 arm_smmu_tlb_inv_context_s2(cookie);
375}
376
377
378
379
380
381
382
383static void arm_smmu_tlb_add_page_s2_v1(struct iommu_iotlb_gather *gather,
384 unsigned long iova, size_t granule,
385 void *cookie)
386{
387 struct arm_smmu_domain *smmu_domain = cookie;
388 struct arm_smmu_device *smmu = smmu_domain->smmu;
389
390 if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK)
391 wmb();
392
393 arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_TLBIVMID, smmu_domain->cfg.vmid);
394}
395
396static const struct iommu_flush_ops arm_smmu_s1_tlb_ops = {
397 .tlb_flush_all = arm_smmu_tlb_inv_context_s1,
398 .tlb_flush_walk = arm_smmu_tlb_inv_walk_s1,
399 .tlb_add_page = arm_smmu_tlb_add_page_s1,
400};
401
402static const struct iommu_flush_ops arm_smmu_s2_tlb_ops_v2 = {
403 .tlb_flush_all = arm_smmu_tlb_inv_context_s2,
404 .tlb_flush_walk = arm_smmu_tlb_inv_walk_s2,
405 .tlb_add_page = arm_smmu_tlb_add_page_s2,
406};
407
408static const struct iommu_flush_ops arm_smmu_s2_tlb_ops_v1 = {
409 .tlb_flush_all = arm_smmu_tlb_inv_context_s2,
410 .tlb_flush_walk = arm_smmu_tlb_inv_walk_s2_v1,
411 .tlb_add_page = arm_smmu_tlb_add_page_s2_v1,
412};
413
414static irqreturn_t arm_smmu_context_fault(int irq, void *dev)
415{
416 u32 fsr, fsynr, cbfrsynra;
417 unsigned long iova;
418 struct iommu_domain *domain = dev;
419 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
420 struct arm_smmu_device *smmu = smmu_domain->smmu;
421 int idx = smmu_domain->cfg.cbndx;
422
423 fsr = arm_smmu_cb_read(smmu, idx, ARM_SMMU_CB_FSR);
424 if (!(fsr & ARM_SMMU_FSR_FAULT))
425 return IRQ_NONE;
426
427 fsynr = arm_smmu_cb_read(smmu, idx, ARM_SMMU_CB_FSYNR0);
428 iova = arm_smmu_cb_readq(smmu, idx, ARM_SMMU_CB_FAR);
429 cbfrsynra = arm_smmu_gr1_read(smmu, ARM_SMMU_GR1_CBFRSYNRA(idx));
430
431 dev_err_ratelimited(smmu->dev,
432 "Unhandled context fault: fsr=0x%x, iova=0x%08lx, fsynr=0x%x, cbfrsynra=0x%x, cb=%d\n",
433 fsr, iova, fsynr, cbfrsynra, idx);
434
435 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_FSR, fsr);
436 return IRQ_HANDLED;
437}
438
439static irqreturn_t arm_smmu_global_fault(int irq, void *dev)
440{
441 u32 gfsr, gfsynr0, gfsynr1, gfsynr2;
442 struct arm_smmu_device *smmu = dev;
443 static DEFINE_RATELIMIT_STATE(rs, DEFAULT_RATELIMIT_INTERVAL,
444 DEFAULT_RATELIMIT_BURST);
445
446 gfsr = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_sGFSR);
447 gfsynr0 = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_sGFSYNR0);
448 gfsynr1 = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_sGFSYNR1);
449 gfsynr2 = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_sGFSYNR2);
450
451 if (!gfsr)
452 return IRQ_NONE;
453
454 if (__ratelimit(&rs)) {
455 if (IS_ENABLED(CONFIG_ARM_SMMU_DISABLE_BYPASS_BY_DEFAULT) &&
456 (gfsr & ARM_SMMU_sGFSR_USF))
457 dev_err(smmu->dev,
458 "Blocked unknown Stream ID 0x%hx; boot with \"arm-smmu.disable_bypass=0\" to allow, but this may have security implications\n",
459 (u16)gfsynr1);
460 else
461 dev_err(smmu->dev,
462 "Unexpected global fault, this could be serious\n");
463 dev_err(smmu->dev,
464 "\tGFSR 0x%08x, GFSYNR0 0x%08x, GFSYNR1 0x%08x, GFSYNR2 0x%08x\n",
465 gfsr, gfsynr0, gfsynr1, gfsynr2);
466 }
467
468 arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_sGFSR, gfsr);
469 return IRQ_HANDLED;
470}
471
472static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain,
473 struct io_pgtable_cfg *pgtbl_cfg)
474{
475 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
476 struct arm_smmu_cb *cb = &smmu_domain->smmu->cbs[cfg->cbndx];
477 bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
478
479 cb->cfg = cfg;
480
481
482 if (stage1) {
483 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
484 cb->tcr[0] = pgtbl_cfg->arm_v7s_cfg.tcr;
485 } else {
486 cb->tcr[0] = arm_smmu_lpae_tcr(pgtbl_cfg);
487 cb->tcr[1] = arm_smmu_lpae_tcr2(pgtbl_cfg);
488 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64)
489 cb->tcr[1] |= ARM_SMMU_TCR2_AS;
490 else
491 cb->tcr[0] |= ARM_SMMU_TCR_EAE;
492 }
493 } else {
494 cb->tcr[0] = arm_smmu_lpae_vtcr(pgtbl_cfg);
495 }
496
497
498 if (stage1) {
499 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
500 cb->ttbr[0] = pgtbl_cfg->arm_v7s_cfg.ttbr;
501 cb->ttbr[1] = 0;
502 } else {
503 cb->ttbr[0] = FIELD_PREP(ARM_SMMU_TTBRn_ASID,
504 cfg->asid);
505 cb->ttbr[1] = FIELD_PREP(ARM_SMMU_TTBRn_ASID,
506 cfg->asid);
507
508 if (pgtbl_cfg->quirks & IO_PGTABLE_QUIRK_ARM_TTBR1)
509 cb->ttbr[1] |= pgtbl_cfg->arm_lpae_s1_cfg.ttbr;
510 else
511 cb->ttbr[0] |= pgtbl_cfg->arm_lpae_s1_cfg.ttbr;
512 }
513 } else {
514 cb->ttbr[0] = pgtbl_cfg->arm_lpae_s2_cfg.vttbr;
515 }
516
517
518 if (stage1) {
519 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
520 cb->mair[0] = pgtbl_cfg->arm_v7s_cfg.prrr;
521 cb->mair[1] = pgtbl_cfg->arm_v7s_cfg.nmrr;
522 } else {
523 cb->mair[0] = pgtbl_cfg->arm_lpae_s1_cfg.mair;
524 cb->mair[1] = pgtbl_cfg->arm_lpae_s1_cfg.mair >> 32;
525 }
526 }
527}
528
529void arm_smmu_write_context_bank(struct arm_smmu_device *smmu, int idx)
530{
531 u32 reg;
532 bool stage1;
533 struct arm_smmu_cb *cb = &smmu->cbs[idx];
534 struct arm_smmu_cfg *cfg = cb->cfg;
535
536
537 if (!cfg) {
538 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_SCTLR, 0);
539 return;
540 }
541
542 stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
543
544
545 if (smmu->version > ARM_SMMU_V1) {
546 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64)
547 reg = ARM_SMMU_CBA2R_VA64;
548 else
549 reg = 0;
550
551 if (smmu->features & ARM_SMMU_FEAT_VMID16)
552 reg |= FIELD_PREP(ARM_SMMU_CBA2R_VMID16, cfg->vmid);
553
554 arm_smmu_gr1_write(smmu, ARM_SMMU_GR1_CBA2R(idx), reg);
555 }
556
557
558 reg = FIELD_PREP(ARM_SMMU_CBAR_TYPE, cfg->cbar);
559 if (smmu->version < ARM_SMMU_V2)
560 reg |= FIELD_PREP(ARM_SMMU_CBAR_IRPTNDX, cfg->irptndx);
561
562
563
564
565
566 if (stage1) {
567 reg |= FIELD_PREP(ARM_SMMU_CBAR_S1_BPSHCFG,
568 ARM_SMMU_CBAR_S1_BPSHCFG_NSH) |
569 FIELD_PREP(ARM_SMMU_CBAR_S1_MEMATTR,
570 ARM_SMMU_CBAR_S1_MEMATTR_WB);
571 } else if (!(smmu->features & ARM_SMMU_FEAT_VMID16)) {
572
573 reg |= FIELD_PREP(ARM_SMMU_CBAR_VMID, cfg->vmid);
574 }
575 arm_smmu_gr1_write(smmu, ARM_SMMU_GR1_CBAR(idx), reg);
576
577
578
579
580
581
582 if (stage1 && smmu->version > ARM_SMMU_V1)
583 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_TCR2, cb->tcr[1]);
584 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_TCR, cb->tcr[0]);
585
586
587 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
588 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_CONTEXTIDR, cfg->asid);
589 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_TTBR0, cb->ttbr[0]);
590 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_TTBR1, cb->ttbr[1]);
591 } else {
592 arm_smmu_cb_writeq(smmu, idx, ARM_SMMU_CB_TTBR0, cb->ttbr[0]);
593 if (stage1)
594 arm_smmu_cb_writeq(smmu, idx, ARM_SMMU_CB_TTBR1,
595 cb->ttbr[1]);
596 }
597
598
599 if (stage1) {
600 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_S1_MAIR0, cb->mair[0]);
601 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_S1_MAIR1, cb->mair[1]);
602 }
603
604
605 reg = ARM_SMMU_SCTLR_CFIE | ARM_SMMU_SCTLR_CFRE | ARM_SMMU_SCTLR_AFE |
606 ARM_SMMU_SCTLR_TRE | ARM_SMMU_SCTLR_M;
607 if (stage1)
608 reg |= ARM_SMMU_SCTLR_S1_ASIDPNE;
609 if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
610 reg |= ARM_SMMU_SCTLR_E;
611
612 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_SCTLR, reg);
613}
614
615static int arm_smmu_alloc_context_bank(struct arm_smmu_domain *smmu_domain,
616 struct arm_smmu_device *smmu,
617 struct device *dev, unsigned int start)
618{
619 if (smmu->impl && smmu->impl->alloc_context_bank)
620 return smmu->impl->alloc_context_bank(smmu_domain, smmu, dev, start);
621
622 return __arm_smmu_alloc_bitmap(smmu->context_map, start, smmu->num_context_banks);
623}
624
625static int arm_smmu_init_domain_context(struct iommu_domain *domain,
626 struct arm_smmu_device *smmu,
627 struct device *dev)
628{
629 int irq, start, ret = 0;
630 unsigned long ias, oas;
631 struct io_pgtable_ops *pgtbl_ops;
632 struct io_pgtable_cfg pgtbl_cfg;
633 enum io_pgtable_fmt fmt;
634 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
635 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
636 irqreturn_t (*context_fault)(int irq, void *dev);
637
638 mutex_lock(&smmu_domain->init_mutex);
639 if (smmu_domain->smmu)
640 goto out_unlock;
641
642 if (domain->type == IOMMU_DOMAIN_IDENTITY) {
643 smmu_domain->stage = ARM_SMMU_DOMAIN_BYPASS;
644 smmu_domain->smmu = smmu;
645 goto out_unlock;
646 }
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S1))
667 smmu_domain->stage = ARM_SMMU_DOMAIN_S2;
668 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S2))
669 smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
670
671
672
673
674
675
676
677
678
679 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_L)
680 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH32_L;
681 if (IS_ENABLED(CONFIG_IOMMU_IO_PGTABLE_ARMV7S) &&
682 !IS_ENABLED(CONFIG_64BIT) && !IS_ENABLED(CONFIG_ARM_LPAE) &&
683 (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_S) &&
684 (smmu_domain->stage == ARM_SMMU_DOMAIN_S1))
685 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH32_S;
686 if ((IS_ENABLED(CONFIG_64BIT) || cfg->fmt == ARM_SMMU_CTX_FMT_NONE) &&
687 (smmu->features & (ARM_SMMU_FEAT_FMT_AARCH64_64K |
688 ARM_SMMU_FEAT_FMT_AARCH64_16K |
689 ARM_SMMU_FEAT_FMT_AARCH64_4K)))
690 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH64;
691
692 if (cfg->fmt == ARM_SMMU_CTX_FMT_NONE) {
693 ret = -EINVAL;
694 goto out_unlock;
695 }
696
697 switch (smmu_domain->stage) {
698 case ARM_SMMU_DOMAIN_S1:
699 cfg->cbar = CBAR_TYPE_S1_TRANS_S2_BYPASS;
700 start = smmu->num_s2_context_banks;
701 ias = smmu->va_size;
702 oas = smmu->ipa_size;
703 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) {
704 fmt = ARM_64_LPAE_S1;
705 } else if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_L) {
706 fmt = ARM_32_LPAE_S1;
707 ias = min(ias, 32UL);
708 oas = min(oas, 40UL);
709 } else {
710 fmt = ARM_V7S;
711 ias = min(ias, 32UL);
712 oas = min(oas, 32UL);
713 }
714 smmu_domain->flush_ops = &arm_smmu_s1_tlb_ops;
715 break;
716 case ARM_SMMU_DOMAIN_NESTED:
717
718
719
720
721 case ARM_SMMU_DOMAIN_S2:
722 cfg->cbar = CBAR_TYPE_S2_TRANS;
723 start = 0;
724 ias = smmu->ipa_size;
725 oas = smmu->pa_size;
726 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) {
727 fmt = ARM_64_LPAE_S2;
728 } else {
729 fmt = ARM_32_LPAE_S2;
730 ias = min(ias, 40UL);
731 oas = min(oas, 40UL);
732 }
733 if (smmu->version == ARM_SMMU_V2)
734 smmu_domain->flush_ops = &arm_smmu_s2_tlb_ops_v2;
735 else
736 smmu_domain->flush_ops = &arm_smmu_s2_tlb_ops_v1;
737 break;
738 default:
739 ret = -EINVAL;
740 goto out_unlock;
741 }
742
743 ret = arm_smmu_alloc_context_bank(smmu_domain, smmu, dev, start);
744 if (ret < 0) {
745 goto out_unlock;
746 }
747
748 smmu_domain->smmu = smmu;
749
750 cfg->cbndx = ret;
751 if (smmu->version < ARM_SMMU_V2) {
752 cfg->irptndx = atomic_inc_return(&smmu->irptndx);
753 cfg->irptndx %= smmu->num_context_irqs;
754 } else {
755 cfg->irptndx = cfg->cbndx;
756 }
757
758 if (smmu_domain->stage == ARM_SMMU_DOMAIN_S2)
759 cfg->vmid = cfg->cbndx + 1;
760 else
761 cfg->asid = cfg->cbndx;
762
763 pgtbl_cfg = (struct io_pgtable_cfg) {
764 .pgsize_bitmap = smmu->pgsize_bitmap,
765 .ias = ias,
766 .oas = oas,
767 .coherent_walk = smmu->features & ARM_SMMU_FEAT_COHERENT_WALK,
768 .tlb = smmu_domain->flush_ops,
769 .iommu_dev = smmu->dev,
770 };
771
772 if (smmu->impl && smmu->impl->init_context) {
773 ret = smmu->impl->init_context(smmu_domain, &pgtbl_cfg, dev);
774 if (ret)
775 goto out_clear_smmu;
776 }
777
778 if (smmu_domain->pgtbl_cfg.quirks)
779 pgtbl_cfg.quirks |= smmu_domain->pgtbl_cfg.quirks;
780
781 pgtbl_ops = alloc_io_pgtable_ops(fmt, &pgtbl_cfg, smmu_domain);
782 if (!pgtbl_ops) {
783 ret = -ENOMEM;
784 goto out_clear_smmu;
785 }
786
787
788 domain->pgsize_bitmap = pgtbl_cfg.pgsize_bitmap;
789
790 if (pgtbl_cfg.quirks & IO_PGTABLE_QUIRK_ARM_TTBR1) {
791 domain->geometry.aperture_start = ~0UL << ias;
792 domain->geometry.aperture_end = ~0UL;
793 } else {
794 domain->geometry.aperture_end = (1UL << ias) - 1;
795 }
796
797 domain->geometry.force_aperture = true;
798
799
800 arm_smmu_init_context_bank(smmu_domain, &pgtbl_cfg);
801 arm_smmu_write_context_bank(smmu, cfg->cbndx);
802
803
804
805
806
807 irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
808
809 if (smmu->impl && smmu->impl->context_fault)
810 context_fault = smmu->impl->context_fault;
811 else
812 context_fault = arm_smmu_context_fault;
813
814 ret = devm_request_irq(smmu->dev, irq, context_fault,
815 IRQF_SHARED, "arm-smmu-context-fault", domain);
816 if (ret < 0) {
817 dev_err(smmu->dev, "failed to request context IRQ %d (%u)\n",
818 cfg->irptndx, irq);
819 cfg->irptndx = ARM_SMMU_INVALID_IRPTNDX;
820 }
821
822 mutex_unlock(&smmu_domain->init_mutex);
823
824
825 smmu_domain->pgtbl_ops = pgtbl_ops;
826 return 0;
827
828out_clear_smmu:
829 __arm_smmu_free_bitmap(smmu->context_map, cfg->cbndx);
830 smmu_domain->smmu = NULL;
831out_unlock:
832 mutex_unlock(&smmu_domain->init_mutex);
833 return ret;
834}
835
836static void arm_smmu_destroy_domain_context(struct iommu_domain *domain)
837{
838 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
839 struct arm_smmu_device *smmu = smmu_domain->smmu;
840 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
841 int ret, irq;
842
843 if (!smmu || domain->type == IOMMU_DOMAIN_IDENTITY)
844 return;
845
846 ret = arm_smmu_rpm_get(smmu);
847 if (ret < 0)
848 return;
849
850
851
852
853
854 smmu->cbs[cfg->cbndx].cfg = NULL;
855 arm_smmu_write_context_bank(smmu, cfg->cbndx);
856
857 if (cfg->irptndx != ARM_SMMU_INVALID_IRPTNDX) {
858 irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
859 devm_free_irq(smmu->dev, irq, domain);
860 }
861
862 free_io_pgtable_ops(smmu_domain->pgtbl_ops);
863 __arm_smmu_free_bitmap(smmu->context_map, cfg->cbndx);
864
865 arm_smmu_rpm_put(smmu);
866}
867
868static struct iommu_domain *arm_smmu_domain_alloc(unsigned type)
869{
870 struct arm_smmu_domain *smmu_domain;
871
872 if (type != IOMMU_DOMAIN_UNMANAGED &&
873 type != IOMMU_DOMAIN_DMA &&
874 type != IOMMU_DOMAIN_IDENTITY)
875 return NULL;
876
877
878
879
880
881 smmu_domain = kzalloc(sizeof(*smmu_domain), GFP_KERNEL);
882 if (!smmu_domain)
883 return NULL;
884
885 if (type == IOMMU_DOMAIN_DMA && (using_legacy_binding ||
886 iommu_get_dma_cookie(&smmu_domain->domain))) {
887 kfree(smmu_domain);
888 return NULL;
889 }
890
891 mutex_init(&smmu_domain->init_mutex);
892 spin_lock_init(&smmu_domain->cb_lock);
893
894 return &smmu_domain->domain;
895}
896
897static void arm_smmu_domain_free(struct iommu_domain *domain)
898{
899 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
900
901
902
903
904
905 iommu_put_dma_cookie(domain);
906 arm_smmu_destroy_domain_context(domain);
907 kfree(smmu_domain);
908}
909
910static void arm_smmu_write_smr(struct arm_smmu_device *smmu, int idx)
911{
912 struct arm_smmu_smr *smr = smmu->smrs + idx;
913 u32 reg = FIELD_PREP(ARM_SMMU_SMR_ID, smr->id) |
914 FIELD_PREP(ARM_SMMU_SMR_MASK, smr->mask);
915
916 if (!(smmu->features & ARM_SMMU_FEAT_EXIDS) && smr->valid)
917 reg |= ARM_SMMU_SMR_VALID;
918 arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_SMR(idx), reg);
919}
920
921static void arm_smmu_write_s2cr(struct arm_smmu_device *smmu, int idx)
922{
923 struct arm_smmu_s2cr *s2cr = smmu->s2crs + idx;
924 u32 reg = FIELD_PREP(ARM_SMMU_S2CR_TYPE, s2cr->type) |
925 FIELD_PREP(ARM_SMMU_S2CR_CBNDX, s2cr->cbndx) |
926 FIELD_PREP(ARM_SMMU_S2CR_PRIVCFG, s2cr->privcfg);
927
928 if (smmu->features & ARM_SMMU_FEAT_EXIDS && smmu->smrs &&
929 smmu->smrs[idx].valid)
930 reg |= ARM_SMMU_S2CR_EXIDVALID;
931 arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_S2CR(idx), reg);
932}
933
934static void arm_smmu_write_sme(struct arm_smmu_device *smmu, int idx)
935{
936 arm_smmu_write_s2cr(smmu, idx);
937 if (smmu->smrs)
938 arm_smmu_write_smr(smmu, idx);
939}
940
941
942
943
944
945static void arm_smmu_test_smr_masks(struct arm_smmu_device *smmu)
946{
947 u32 smr;
948 int i;
949
950 if (!smmu->smrs)
951 return;
952
953
954
955
956
957
958
959
960 for (i = 0; i < smmu->num_mapping_groups; i++)
961 if (!smmu->smrs[i].valid)
962 goto smr_ok;
963 return;
964smr_ok:
965
966
967
968
969
970 smr = FIELD_PREP(ARM_SMMU_SMR_ID, smmu->streamid_mask);
971 arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_SMR(i), smr);
972 smr = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_SMR(i));
973 smmu->streamid_mask = FIELD_GET(ARM_SMMU_SMR_ID, smr);
974
975 smr = FIELD_PREP(ARM_SMMU_SMR_MASK, smmu->streamid_mask);
976 arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_SMR(i), smr);
977 smr = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_SMR(i));
978 smmu->smr_mask_mask = FIELD_GET(ARM_SMMU_SMR_MASK, smr);
979}
980
981static int arm_smmu_find_sme(struct arm_smmu_device *smmu, u16 id, u16 mask)
982{
983 struct arm_smmu_smr *smrs = smmu->smrs;
984 int i, free_idx = -ENOSPC;
985
986
987 if (!smrs)
988 return id;
989
990
991 for (i = 0; i < smmu->num_mapping_groups; ++i) {
992 if (!smrs[i].valid) {
993
994
995
996
997 if (free_idx < 0)
998 free_idx = i;
999 continue;
1000 }
1001
1002
1003
1004
1005
1006
1007
1008 if ((mask & smrs[i].mask) == mask &&
1009 !((id ^ smrs[i].id) & ~smrs[i].mask))
1010 return i;
1011
1012
1013
1014
1015
1016 if (!((id ^ smrs[i].id) & ~(smrs[i].mask | mask)))
1017 return -EINVAL;
1018 }
1019
1020 return free_idx;
1021}
1022
1023static bool arm_smmu_free_sme(struct arm_smmu_device *smmu, int idx)
1024{
1025 if (--smmu->s2crs[idx].count)
1026 return false;
1027
1028 smmu->s2crs[idx] = s2cr_init_val;
1029 if (smmu->smrs)
1030 smmu->smrs[idx].valid = false;
1031
1032 return true;
1033}
1034
1035static int arm_smmu_master_alloc_smes(struct device *dev)
1036{
1037 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
1038 struct arm_smmu_master_cfg *cfg = dev_iommu_priv_get(dev);
1039 struct arm_smmu_device *smmu = cfg->smmu;
1040 struct arm_smmu_smr *smrs = smmu->smrs;
1041 int i, idx, ret;
1042
1043 mutex_lock(&smmu->stream_map_mutex);
1044
1045 for_each_cfg_sme(cfg, fwspec, i, idx) {
1046 u16 sid = FIELD_GET(ARM_SMMU_SMR_ID, fwspec->ids[i]);
1047 u16 mask = FIELD_GET(ARM_SMMU_SMR_MASK, fwspec->ids[i]);
1048
1049 if (idx != INVALID_SMENDX) {
1050 ret = -EEXIST;
1051 goto out_err;
1052 }
1053
1054 ret = arm_smmu_find_sme(smmu, sid, mask);
1055 if (ret < 0)
1056 goto out_err;
1057
1058 idx = ret;
1059 if (smrs && smmu->s2crs[idx].count == 0) {
1060 smrs[idx].id = sid;
1061 smrs[idx].mask = mask;
1062 smrs[idx].valid = true;
1063 }
1064 smmu->s2crs[idx].count++;
1065 cfg->smendx[i] = (s16)idx;
1066 }
1067
1068
1069 for_each_cfg_sme(cfg, fwspec, i, idx)
1070 arm_smmu_write_sme(smmu, idx);
1071
1072 mutex_unlock(&smmu->stream_map_mutex);
1073 return 0;
1074
1075out_err:
1076 while (i--) {
1077 arm_smmu_free_sme(smmu, cfg->smendx[i]);
1078 cfg->smendx[i] = INVALID_SMENDX;
1079 }
1080 mutex_unlock(&smmu->stream_map_mutex);
1081 return ret;
1082}
1083
1084static void arm_smmu_master_free_smes(struct arm_smmu_master_cfg *cfg,
1085 struct iommu_fwspec *fwspec)
1086{
1087 struct arm_smmu_device *smmu = cfg->smmu;
1088 int i, idx;
1089
1090 mutex_lock(&smmu->stream_map_mutex);
1091 for_each_cfg_sme(cfg, fwspec, i, idx) {
1092 if (arm_smmu_free_sme(smmu, idx))
1093 arm_smmu_write_sme(smmu, idx);
1094 cfg->smendx[i] = INVALID_SMENDX;
1095 }
1096 mutex_unlock(&smmu->stream_map_mutex);
1097}
1098
1099static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain,
1100 struct arm_smmu_master_cfg *cfg,
1101 struct iommu_fwspec *fwspec)
1102{
1103 struct arm_smmu_device *smmu = smmu_domain->smmu;
1104 struct arm_smmu_s2cr *s2cr = smmu->s2crs;
1105 u8 cbndx = smmu_domain->cfg.cbndx;
1106 enum arm_smmu_s2cr_type type;
1107 int i, idx;
1108
1109 if (smmu_domain->stage == ARM_SMMU_DOMAIN_BYPASS)
1110 type = S2CR_TYPE_BYPASS;
1111 else
1112 type = S2CR_TYPE_TRANS;
1113
1114 for_each_cfg_sme(cfg, fwspec, i, idx) {
1115 if (type == s2cr[idx].type && cbndx == s2cr[idx].cbndx)
1116 continue;
1117
1118 s2cr[idx].type = type;
1119 s2cr[idx].privcfg = S2CR_PRIVCFG_DEFAULT;
1120 s2cr[idx].cbndx = cbndx;
1121 arm_smmu_write_s2cr(smmu, idx);
1122 }
1123 return 0;
1124}
1125
1126static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
1127{
1128 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1129 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
1130 struct arm_smmu_master_cfg *cfg;
1131 struct arm_smmu_device *smmu;
1132 int ret;
1133
1134 if (!fwspec || fwspec->ops != &arm_smmu_ops) {
1135 dev_err(dev, "cannot attach to SMMU, is it on the same bus?\n");
1136 return -ENXIO;
1137 }
1138
1139
1140
1141
1142
1143
1144
1145
1146 cfg = dev_iommu_priv_get(dev);
1147 if (!cfg)
1148 return -ENODEV;
1149
1150 smmu = cfg->smmu;
1151
1152 ret = arm_smmu_rpm_get(smmu);
1153 if (ret < 0)
1154 return ret;
1155
1156
1157 ret = arm_smmu_init_domain_context(domain, smmu, dev);
1158 if (ret < 0)
1159 goto rpm_put;
1160
1161
1162
1163
1164
1165 if (smmu_domain->smmu != smmu) {
1166 dev_err(dev,
1167 "cannot attach to SMMU %s whilst already attached to domain on SMMU %s\n",
1168 dev_name(smmu_domain->smmu->dev), dev_name(smmu->dev));
1169 ret = -EINVAL;
1170 goto rpm_put;
1171 }
1172
1173
1174 ret = arm_smmu_domain_add_master(smmu_domain, cfg, fwspec);
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187 pm_runtime_set_autosuspend_delay(smmu->dev, 20);
1188 pm_runtime_use_autosuspend(smmu->dev);
1189
1190rpm_put:
1191 arm_smmu_rpm_put(smmu);
1192 return ret;
1193}
1194
1195static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova,
1196 phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
1197{
1198 struct io_pgtable_ops *ops = to_smmu_domain(domain)->pgtbl_ops;
1199 struct arm_smmu_device *smmu = to_smmu_domain(domain)->smmu;
1200 int ret;
1201
1202 if (!ops)
1203 return -ENODEV;
1204
1205 arm_smmu_rpm_get(smmu);
1206 ret = ops->map(ops, iova, paddr, size, prot, gfp);
1207 arm_smmu_rpm_put(smmu);
1208
1209 return ret;
1210}
1211
1212static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
1213 size_t size, struct iommu_iotlb_gather *gather)
1214{
1215 struct io_pgtable_ops *ops = to_smmu_domain(domain)->pgtbl_ops;
1216 struct arm_smmu_device *smmu = to_smmu_domain(domain)->smmu;
1217 size_t ret;
1218
1219 if (!ops)
1220 return 0;
1221
1222 arm_smmu_rpm_get(smmu);
1223 ret = ops->unmap(ops, iova, size, gather);
1224 arm_smmu_rpm_put(smmu);
1225
1226 return ret;
1227}
1228
1229static void arm_smmu_flush_iotlb_all(struct iommu_domain *domain)
1230{
1231 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1232 struct arm_smmu_device *smmu = smmu_domain->smmu;
1233
1234 if (smmu_domain->flush_ops) {
1235 arm_smmu_rpm_get(smmu);
1236 smmu_domain->flush_ops->tlb_flush_all(smmu_domain);
1237 arm_smmu_rpm_put(smmu);
1238 }
1239}
1240
1241static void arm_smmu_iotlb_sync(struct iommu_domain *domain,
1242 struct iommu_iotlb_gather *gather)
1243{
1244 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1245 struct arm_smmu_device *smmu = smmu_domain->smmu;
1246
1247 if (!smmu)
1248 return;
1249
1250 arm_smmu_rpm_get(smmu);
1251 if (smmu->version == ARM_SMMU_V2 ||
1252 smmu_domain->stage == ARM_SMMU_DOMAIN_S1)
1253 arm_smmu_tlb_sync_context(smmu_domain);
1254 else
1255 arm_smmu_tlb_sync_global(smmu);
1256 arm_smmu_rpm_put(smmu);
1257}
1258
1259static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
1260 dma_addr_t iova)
1261{
1262 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1263 struct arm_smmu_device *smmu = smmu_domain->smmu;
1264 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1265 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
1266 struct device *dev = smmu->dev;
1267 void __iomem *reg;
1268 u32 tmp;
1269 u64 phys;
1270 unsigned long va, flags;
1271 int ret, idx = cfg->cbndx;
1272
1273 ret = arm_smmu_rpm_get(smmu);
1274 if (ret < 0)
1275 return 0;
1276
1277 spin_lock_irqsave(&smmu_domain->cb_lock, flags);
1278 va = iova & ~0xfffUL;
1279 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64)
1280 arm_smmu_cb_writeq(smmu, idx, ARM_SMMU_CB_ATS1PR, va);
1281 else
1282 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_ATS1PR, va);
1283
1284 reg = arm_smmu_page(smmu, ARM_SMMU_CB(smmu, idx)) + ARM_SMMU_CB_ATSR;
1285 if (readl_poll_timeout_atomic(reg, tmp, !(tmp & ARM_SMMU_ATSR_ACTIVE),
1286 5, 50)) {
1287 spin_unlock_irqrestore(&smmu_domain->cb_lock, flags);
1288 dev_err(dev,
1289 "iova to phys timed out on %pad. Falling back to software table walk.\n",
1290 &iova);
1291 return ops->iova_to_phys(ops, iova);
1292 }
1293
1294 phys = arm_smmu_cb_readq(smmu, idx, ARM_SMMU_CB_PAR);
1295 spin_unlock_irqrestore(&smmu_domain->cb_lock, flags);
1296 if (phys & ARM_SMMU_CB_PAR_F) {
1297 dev_err(dev, "translation fault!\n");
1298 dev_err(dev, "PAR = 0x%llx\n", phys);
1299 return 0;
1300 }
1301
1302 arm_smmu_rpm_put(smmu);
1303
1304 return (phys & GENMASK_ULL(39, 12)) | (iova & 0xfff);
1305}
1306
1307static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain,
1308 dma_addr_t iova)
1309{
1310 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1311 struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops;
1312
1313 if (domain->type == IOMMU_DOMAIN_IDENTITY)
1314 return iova;
1315
1316 if (!ops)
1317 return 0;
1318
1319 if (smmu_domain->smmu->features & ARM_SMMU_FEAT_TRANS_OPS &&
1320 smmu_domain->stage == ARM_SMMU_DOMAIN_S1)
1321 return arm_smmu_iova_to_phys_hard(domain, iova);
1322
1323 return ops->iova_to_phys(ops, iova);
1324}
1325
1326static bool arm_smmu_capable(enum iommu_cap cap)
1327{
1328 switch (cap) {
1329 case IOMMU_CAP_CACHE_COHERENCY:
1330
1331
1332
1333
1334 return true;
1335 case IOMMU_CAP_NOEXEC:
1336 return true;
1337 default:
1338 return false;
1339 }
1340}
1341
1342static
1343struct arm_smmu_device *arm_smmu_get_by_fwnode(struct fwnode_handle *fwnode)
1344{
1345 struct device *dev = driver_find_device_by_fwnode(&arm_smmu_driver.driver,
1346 fwnode);
1347 put_device(dev);
1348 return dev ? dev_get_drvdata(dev) : NULL;
1349}
1350
1351static struct iommu_device *arm_smmu_probe_device(struct device *dev)
1352{
1353 struct arm_smmu_device *smmu = NULL;
1354 struct arm_smmu_master_cfg *cfg;
1355 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
1356 int i, ret;
1357
1358 if (using_legacy_binding) {
1359 ret = arm_smmu_register_legacy_master(dev, &smmu);
1360
1361
1362
1363
1364
1365
1366 fwspec = dev_iommu_fwspec_get(dev);
1367 if (ret)
1368 goto out_free;
1369 } else if (fwspec && fwspec->ops == &arm_smmu_ops) {
1370 smmu = arm_smmu_get_by_fwnode(fwspec->iommu_fwnode);
1371 } else {
1372 return ERR_PTR(-ENODEV);
1373 }
1374
1375 ret = -EINVAL;
1376 for (i = 0; i < fwspec->num_ids; i++) {
1377 u16 sid = FIELD_GET(ARM_SMMU_SMR_ID, fwspec->ids[i]);
1378 u16 mask = FIELD_GET(ARM_SMMU_SMR_MASK, fwspec->ids[i]);
1379
1380 if (sid & ~smmu->streamid_mask) {
1381 dev_err(dev, "stream ID 0x%x out of range for SMMU (0x%x)\n",
1382 sid, smmu->streamid_mask);
1383 goto out_free;
1384 }
1385 if (mask & ~smmu->smr_mask_mask) {
1386 dev_err(dev, "SMR mask 0x%x out of range for SMMU (0x%x)\n",
1387 mask, smmu->smr_mask_mask);
1388 goto out_free;
1389 }
1390 }
1391
1392 ret = -ENOMEM;
1393 cfg = kzalloc(offsetof(struct arm_smmu_master_cfg, smendx[i]),
1394 GFP_KERNEL);
1395 if (!cfg)
1396 goto out_free;
1397
1398 cfg->smmu = smmu;
1399 dev_iommu_priv_set(dev, cfg);
1400 while (i--)
1401 cfg->smendx[i] = INVALID_SMENDX;
1402
1403 ret = arm_smmu_rpm_get(smmu);
1404 if (ret < 0)
1405 goto out_cfg_free;
1406
1407 ret = arm_smmu_master_alloc_smes(dev);
1408 arm_smmu_rpm_put(smmu);
1409
1410 if (ret)
1411 goto out_cfg_free;
1412
1413 device_link_add(dev, smmu->dev,
1414 DL_FLAG_PM_RUNTIME | DL_FLAG_AUTOREMOVE_SUPPLIER);
1415
1416 return &smmu->iommu;
1417
1418out_cfg_free:
1419 kfree(cfg);
1420out_free:
1421 iommu_fwspec_free(dev);
1422 return ERR_PTR(ret);
1423}
1424
1425static void arm_smmu_release_device(struct device *dev)
1426{
1427 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
1428 struct arm_smmu_master_cfg *cfg;
1429 struct arm_smmu_device *smmu;
1430 int ret;
1431
1432 if (!fwspec || fwspec->ops != &arm_smmu_ops)
1433 return;
1434
1435 cfg = dev_iommu_priv_get(dev);
1436 smmu = cfg->smmu;
1437
1438 ret = arm_smmu_rpm_get(smmu);
1439 if (ret < 0)
1440 return;
1441
1442 arm_smmu_master_free_smes(cfg, fwspec);
1443
1444 arm_smmu_rpm_put(smmu);
1445
1446 dev_iommu_priv_set(dev, NULL);
1447 kfree(cfg);
1448 iommu_fwspec_free(dev);
1449}
1450
1451static struct iommu_group *arm_smmu_device_group(struct device *dev)
1452{
1453 struct arm_smmu_master_cfg *cfg = dev_iommu_priv_get(dev);
1454 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
1455 struct arm_smmu_device *smmu = cfg->smmu;
1456 struct iommu_group *group = NULL;
1457 int i, idx;
1458
1459 for_each_cfg_sme(cfg, fwspec, i, idx) {
1460 if (group && smmu->s2crs[idx].group &&
1461 group != smmu->s2crs[idx].group)
1462 return ERR_PTR(-EINVAL);
1463
1464 group = smmu->s2crs[idx].group;
1465 }
1466
1467 if (group)
1468 return iommu_group_ref_get(group);
1469
1470 if (dev_is_pci(dev))
1471 group = pci_device_group(dev);
1472 else
1473 group = generic_device_group(dev);
1474
1475
1476 if (!IS_ERR(group))
1477 for_each_cfg_sme(cfg, fwspec, i, idx)
1478 smmu->s2crs[idx].group = group;
1479
1480 return group;
1481}
1482
1483static int arm_smmu_domain_get_attr(struct iommu_domain *domain,
1484 enum iommu_attr attr, void *data)
1485{
1486 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1487
1488 switch(domain->type) {
1489 case IOMMU_DOMAIN_UNMANAGED:
1490 switch (attr) {
1491 case DOMAIN_ATTR_NESTING:
1492 *(int *)data = (smmu_domain->stage == ARM_SMMU_DOMAIN_NESTED);
1493 return 0;
1494 case DOMAIN_ATTR_IO_PGTABLE_CFG: {
1495 struct io_pgtable_domain_attr *pgtbl_cfg = data;
1496 *pgtbl_cfg = smmu_domain->pgtbl_cfg;
1497
1498 return 0;
1499 }
1500 default:
1501 return -ENODEV;
1502 }
1503 break;
1504 case IOMMU_DOMAIN_DMA:
1505 switch (attr) {
1506 case DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE: {
1507 bool non_strict = smmu_domain->pgtbl_cfg.quirks &
1508 IO_PGTABLE_QUIRK_NON_STRICT;
1509 *(int *)data = non_strict;
1510 return 0;
1511 }
1512 default:
1513 return -ENODEV;
1514 }
1515 break;
1516 default:
1517 return -EINVAL;
1518 }
1519}
1520
1521static int arm_smmu_domain_set_attr(struct iommu_domain *domain,
1522 enum iommu_attr attr, void *data)
1523{
1524 int ret = 0;
1525 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1526
1527 mutex_lock(&smmu_domain->init_mutex);
1528
1529 switch(domain->type) {
1530 case IOMMU_DOMAIN_UNMANAGED:
1531 switch (attr) {
1532 case DOMAIN_ATTR_NESTING:
1533 if (smmu_domain->smmu) {
1534 ret = -EPERM;
1535 goto out_unlock;
1536 }
1537
1538 if (*(int *)data)
1539 smmu_domain->stage = ARM_SMMU_DOMAIN_NESTED;
1540 else
1541 smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
1542 break;
1543 case DOMAIN_ATTR_IO_PGTABLE_CFG: {
1544 struct io_pgtable_domain_attr *pgtbl_cfg = data;
1545
1546 if (smmu_domain->smmu) {
1547 ret = -EPERM;
1548 goto out_unlock;
1549 }
1550
1551 smmu_domain->pgtbl_cfg = *pgtbl_cfg;
1552 break;
1553 }
1554 default:
1555 ret = -ENODEV;
1556 }
1557 break;
1558 case IOMMU_DOMAIN_DMA:
1559 switch (attr) {
1560 case DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE:
1561 if (*(int *)data)
1562 smmu_domain->pgtbl_cfg.quirks |= IO_PGTABLE_QUIRK_NON_STRICT;
1563 else
1564 smmu_domain->pgtbl_cfg.quirks &= ~IO_PGTABLE_QUIRK_NON_STRICT;
1565 break;
1566 default:
1567 ret = -ENODEV;
1568 }
1569 break;
1570 default:
1571 ret = -EINVAL;
1572 }
1573out_unlock:
1574 mutex_unlock(&smmu_domain->init_mutex);
1575 return ret;
1576}
1577
1578static int arm_smmu_of_xlate(struct device *dev, struct of_phandle_args *args)
1579{
1580 u32 mask, fwid = 0;
1581
1582 if (args->args_count > 0)
1583 fwid |= FIELD_PREP(ARM_SMMU_SMR_ID, args->args[0]);
1584
1585 if (args->args_count > 1)
1586 fwid |= FIELD_PREP(ARM_SMMU_SMR_MASK, args->args[1]);
1587 else if (!of_property_read_u32(args->np, "stream-match-mask", &mask))
1588 fwid |= FIELD_PREP(ARM_SMMU_SMR_MASK, mask);
1589
1590 return iommu_fwspec_add_ids(dev, &fwid, 1);
1591}
1592
1593static void arm_smmu_get_resv_regions(struct device *dev,
1594 struct list_head *head)
1595{
1596 struct iommu_resv_region *region;
1597 int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO;
1598
1599 region = iommu_alloc_resv_region(MSI_IOVA_BASE, MSI_IOVA_LENGTH,
1600 prot, IOMMU_RESV_SW_MSI);
1601 if (!region)
1602 return;
1603
1604 list_add_tail(®ion->list, head);
1605
1606 iommu_dma_get_resv_regions(dev, head);
1607}
1608
1609static int arm_smmu_def_domain_type(struct device *dev)
1610{
1611 struct arm_smmu_master_cfg *cfg = dev_iommu_priv_get(dev);
1612 const struct arm_smmu_impl *impl = cfg->smmu->impl;
1613
1614 if (impl && impl->def_domain_type)
1615 return impl->def_domain_type(dev);
1616
1617 return 0;
1618}
1619
1620static struct iommu_ops arm_smmu_ops = {
1621 .capable = arm_smmu_capable,
1622 .domain_alloc = arm_smmu_domain_alloc,
1623 .domain_free = arm_smmu_domain_free,
1624 .attach_dev = arm_smmu_attach_dev,
1625 .map = arm_smmu_map,
1626 .unmap = arm_smmu_unmap,
1627 .flush_iotlb_all = arm_smmu_flush_iotlb_all,
1628 .iotlb_sync = arm_smmu_iotlb_sync,
1629 .iova_to_phys = arm_smmu_iova_to_phys,
1630 .probe_device = arm_smmu_probe_device,
1631 .release_device = arm_smmu_release_device,
1632 .device_group = arm_smmu_device_group,
1633 .domain_get_attr = arm_smmu_domain_get_attr,
1634 .domain_set_attr = arm_smmu_domain_set_attr,
1635 .of_xlate = arm_smmu_of_xlate,
1636 .get_resv_regions = arm_smmu_get_resv_regions,
1637 .put_resv_regions = generic_iommu_put_resv_regions,
1638 .def_domain_type = arm_smmu_def_domain_type,
1639 .pgsize_bitmap = -1UL,
1640};
1641
1642static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
1643{
1644 int i;
1645 u32 reg;
1646
1647
1648 reg = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_sGFSR);
1649 arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_sGFSR, reg);
1650
1651
1652
1653
1654
1655 for (i = 0; i < smmu->num_mapping_groups; ++i)
1656 arm_smmu_write_sme(smmu, i);
1657
1658
1659 for (i = 0; i < smmu->num_context_banks; ++i) {
1660 arm_smmu_write_context_bank(smmu, i);
1661 arm_smmu_cb_write(smmu, i, ARM_SMMU_CB_FSR, ARM_SMMU_FSR_FAULT);
1662 }
1663
1664
1665 arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_TLBIALLH, QCOM_DUMMY_VAL);
1666 arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_TLBIALLNSNH, QCOM_DUMMY_VAL);
1667
1668 reg = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_sCR0);
1669
1670
1671 reg |= (ARM_SMMU_sCR0_GFRE | ARM_SMMU_sCR0_GFIE |
1672 ARM_SMMU_sCR0_GCFGFRE | ARM_SMMU_sCR0_GCFGFIE);
1673
1674
1675 reg |= (ARM_SMMU_sCR0_VMIDPNE | ARM_SMMU_sCR0_PTM);
1676
1677
1678 reg &= ~ARM_SMMU_sCR0_CLIENTPD;
1679 if (disable_bypass)
1680 reg |= ARM_SMMU_sCR0_USFCFG;
1681 else
1682 reg &= ~ARM_SMMU_sCR0_USFCFG;
1683
1684
1685 reg &= ~ARM_SMMU_sCR0_FB;
1686
1687
1688 reg &= ~(ARM_SMMU_sCR0_BSU);
1689
1690 if (smmu->features & ARM_SMMU_FEAT_VMID16)
1691 reg |= ARM_SMMU_sCR0_VMID16EN;
1692
1693 if (smmu->features & ARM_SMMU_FEAT_EXIDS)
1694 reg |= ARM_SMMU_sCR0_EXIDENABLE;
1695
1696 if (smmu->impl && smmu->impl->reset)
1697 smmu->impl->reset(smmu);
1698
1699
1700 arm_smmu_tlb_sync_global(smmu);
1701 arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_sCR0, reg);
1702}
1703
1704static int arm_smmu_id_size_to_bits(int size)
1705{
1706 switch (size) {
1707 case 0:
1708 return 32;
1709 case 1:
1710 return 36;
1711 case 2:
1712 return 40;
1713 case 3:
1714 return 42;
1715 case 4:
1716 return 44;
1717 case 5:
1718 default:
1719 return 48;
1720 }
1721}
1722
1723static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
1724{
1725 unsigned int size;
1726 u32 id;
1727 bool cttw_reg, cttw_fw = smmu->features & ARM_SMMU_FEAT_COHERENT_WALK;
1728 int i, ret;
1729
1730 dev_notice(smmu->dev, "probing hardware configuration...\n");
1731 dev_notice(smmu->dev, "SMMUv%d with:\n",
1732 smmu->version == ARM_SMMU_V2 ? 2 : 1);
1733
1734
1735 id = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_ID0);
1736
1737
1738 if (force_stage == 1)
1739 id &= ~(ARM_SMMU_ID0_S2TS | ARM_SMMU_ID0_NTS);
1740 else if (force_stage == 2)
1741 id &= ~(ARM_SMMU_ID0_S1TS | ARM_SMMU_ID0_NTS);
1742
1743 if (id & ARM_SMMU_ID0_S1TS) {
1744 smmu->features |= ARM_SMMU_FEAT_TRANS_S1;
1745 dev_notice(smmu->dev, "\tstage 1 translation\n");
1746 }
1747
1748 if (id & ARM_SMMU_ID0_S2TS) {
1749 smmu->features |= ARM_SMMU_FEAT_TRANS_S2;
1750 dev_notice(smmu->dev, "\tstage 2 translation\n");
1751 }
1752
1753 if (id & ARM_SMMU_ID0_NTS) {
1754 smmu->features |= ARM_SMMU_FEAT_TRANS_NESTED;
1755 dev_notice(smmu->dev, "\tnested translation\n");
1756 }
1757
1758 if (!(smmu->features &
1759 (ARM_SMMU_FEAT_TRANS_S1 | ARM_SMMU_FEAT_TRANS_S2))) {
1760 dev_err(smmu->dev, "\tno translation support!\n");
1761 return -ENODEV;
1762 }
1763
1764 if ((id & ARM_SMMU_ID0_S1TS) &&
1765 ((smmu->version < ARM_SMMU_V2) || !(id & ARM_SMMU_ID0_ATOSNS))) {
1766 smmu->features |= ARM_SMMU_FEAT_TRANS_OPS;
1767 dev_notice(smmu->dev, "\taddress translation ops\n");
1768 }
1769
1770
1771
1772
1773
1774
1775
1776 cttw_reg = !!(id & ARM_SMMU_ID0_CTTW);
1777 if (cttw_fw || cttw_reg)
1778 dev_notice(smmu->dev, "\t%scoherent table walk\n",
1779 cttw_fw ? "" : "non-");
1780 if (cttw_fw != cttw_reg)
1781 dev_notice(smmu->dev,
1782 "\t(IDR0.CTTW overridden by FW configuration)\n");
1783
1784
1785 if (smmu->version == ARM_SMMU_V2 && id & ARM_SMMU_ID0_EXIDS) {
1786 smmu->features |= ARM_SMMU_FEAT_EXIDS;
1787 size = 1 << 16;
1788 } else {
1789 size = 1 << FIELD_GET(ARM_SMMU_ID0_NUMSIDB, id);
1790 }
1791 smmu->streamid_mask = size - 1;
1792 if (id & ARM_SMMU_ID0_SMS) {
1793 smmu->features |= ARM_SMMU_FEAT_STREAM_MATCH;
1794 size = FIELD_GET(ARM_SMMU_ID0_NUMSMRG, id);
1795 if (size == 0) {
1796 dev_err(smmu->dev,
1797 "stream-matching supported, but no SMRs present!\n");
1798 return -ENODEV;
1799 }
1800
1801
1802 smmu->smrs = devm_kcalloc(smmu->dev, size, sizeof(*smmu->smrs),
1803 GFP_KERNEL);
1804 if (!smmu->smrs)
1805 return -ENOMEM;
1806
1807 dev_notice(smmu->dev,
1808 "\tstream matching with %u register groups", size);
1809 }
1810
1811 smmu->s2crs = devm_kmalloc_array(smmu->dev, size, sizeof(*smmu->s2crs),
1812 GFP_KERNEL);
1813 if (!smmu->s2crs)
1814 return -ENOMEM;
1815 for (i = 0; i < size; i++)
1816 smmu->s2crs[i] = s2cr_init_val;
1817
1818 smmu->num_mapping_groups = size;
1819 mutex_init(&smmu->stream_map_mutex);
1820 spin_lock_init(&smmu->global_sync_lock);
1821
1822 if (smmu->version < ARM_SMMU_V2 ||
1823 !(id & ARM_SMMU_ID0_PTFS_NO_AARCH32)) {
1824 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_L;
1825 if (!(id & ARM_SMMU_ID0_PTFS_NO_AARCH32S))
1826 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_S;
1827 }
1828
1829
1830 id = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_ID1);
1831 smmu->pgshift = (id & ARM_SMMU_ID1_PAGESIZE) ? 16 : 12;
1832
1833
1834 size = 1 << (FIELD_GET(ARM_SMMU_ID1_NUMPAGENDXB, id) + 1);
1835 if (smmu->numpage != 2 * size << smmu->pgshift)
1836 dev_warn(smmu->dev,
1837 "SMMU address space size (0x%x) differs from mapped region size (0x%x)!\n",
1838 2 * size << smmu->pgshift, smmu->numpage);
1839
1840 smmu->numpage = size;
1841
1842 smmu->num_s2_context_banks = FIELD_GET(ARM_SMMU_ID1_NUMS2CB, id);
1843 smmu->num_context_banks = FIELD_GET(ARM_SMMU_ID1_NUMCB, id);
1844 if (smmu->num_s2_context_banks > smmu->num_context_banks) {
1845 dev_err(smmu->dev, "impossible number of S2 context banks!\n");
1846 return -ENODEV;
1847 }
1848 dev_notice(smmu->dev, "\t%u context banks (%u stage-2 only)\n",
1849 smmu->num_context_banks, smmu->num_s2_context_banks);
1850 smmu->cbs = devm_kcalloc(smmu->dev, smmu->num_context_banks,
1851 sizeof(*smmu->cbs), GFP_KERNEL);
1852 if (!smmu->cbs)
1853 return -ENOMEM;
1854
1855
1856 id = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_ID2);
1857 size = arm_smmu_id_size_to_bits(FIELD_GET(ARM_SMMU_ID2_IAS, id));
1858 smmu->ipa_size = size;
1859
1860
1861 size = arm_smmu_id_size_to_bits(FIELD_GET(ARM_SMMU_ID2_OAS, id));
1862 smmu->pa_size = size;
1863
1864 if (id & ARM_SMMU_ID2_VMID16)
1865 smmu->features |= ARM_SMMU_FEAT_VMID16;
1866
1867
1868
1869
1870
1871
1872 if (dma_set_mask_and_coherent(smmu->dev, DMA_BIT_MASK(size)))
1873 dev_warn(smmu->dev,
1874 "failed to set DMA mask for table walker\n");
1875
1876 if (smmu->version < ARM_SMMU_V2) {
1877 smmu->va_size = smmu->ipa_size;
1878 if (smmu->version == ARM_SMMU_V1_64K)
1879 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_64K;
1880 } else {
1881 size = FIELD_GET(ARM_SMMU_ID2_UBS, id);
1882 smmu->va_size = arm_smmu_id_size_to_bits(size);
1883 if (id & ARM_SMMU_ID2_PTFS_4K)
1884 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_4K;
1885 if (id & ARM_SMMU_ID2_PTFS_16K)
1886 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_16K;
1887 if (id & ARM_SMMU_ID2_PTFS_64K)
1888 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_64K;
1889 }
1890
1891 if (smmu->impl && smmu->impl->cfg_probe) {
1892 ret = smmu->impl->cfg_probe(smmu);
1893 if (ret)
1894 return ret;
1895 }
1896
1897
1898 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_S)
1899 smmu->pgsize_bitmap |= SZ_4K | SZ_64K | SZ_1M | SZ_16M;
1900 if (smmu->features &
1901 (ARM_SMMU_FEAT_FMT_AARCH32_L | ARM_SMMU_FEAT_FMT_AARCH64_4K))
1902 smmu->pgsize_bitmap |= SZ_4K | SZ_2M | SZ_1G;
1903 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH64_16K)
1904 smmu->pgsize_bitmap |= SZ_16K | SZ_32M;
1905 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH64_64K)
1906 smmu->pgsize_bitmap |= SZ_64K | SZ_512M;
1907
1908 if (arm_smmu_ops.pgsize_bitmap == -1UL)
1909 arm_smmu_ops.pgsize_bitmap = smmu->pgsize_bitmap;
1910 else
1911 arm_smmu_ops.pgsize_bitmap |= smmu->pgsize_bitmap;
1912 dev_notice(smmu->dev, "\tSupported page sizes: 0x%08lx\n",
1913 smmu->pgsize_bitmap);
1914
1915
1916 if (smmu->features & ARM_SMMU_FEAT_TRANS_S1)
1917 dev_notice(smmu->dev, "\tStage-1: %lu-bit VA -> %lu-bit IPA\n",
1918 smmu->va_size, smmu->ipa_size);
1919
1920 if (smmu->features & ARM_SMMU_FEAT_TRANS_S2)
1921 dev_notice(smmu->dev, "\tStage-2: %lu-bit IPA -> %lu-bit PA\n",
1922 smmu->ipa_size, smmu->pa_size);
1923
1924 return 0;
1925}
1926
1927struct arm_smmu_match_data {
1928 enum arm_smmu_arch_version version;
1929 enum arm_smmu_implementation model;
1930};
1931
1932#define ARM_SMMU_MATCH_DATA(name, ver, imp) \
1933static const struct arm_smmu_match_data name = { .version = ver, .model = imp }
1934
1935ARM_SMMU_MATCH_DATA(smmu_generic_v1, ARM_SMMU_V1, GENERIC_SMMU);
1936ARM_SMMU_MATCH_DATA(smmu_generic_v2, ARM_SMMU_V2, GENERIC_SMMU);
1937ARM_SMMU_MATCH_DATA(arm_mmu401, ARM_SMMU_V1_64K, GENERIC_SMMU);
1938ARM_SMMU_MATCH_DATA(arm_mmu500, ARM_SMMU_V2, ARM_MMU500);
1939ARM_SMMU_MATCH_DATA(cavium_smmuv2, ARM_SMMU_V2, CAVIUM_SMMUV2);
1940ARM_SMMU_MATCH_DATA(qcom_smmuv2, ARM_SMMU_V2, QCOM_SMMUV2);
1941
1942static const struct of_device_id arm_smmu_of_match[] = {
1943 { .compatible = "arm,smmu-v1", .data = &smmu_generic_v1 },
1944 { .compatible = "arm,smmu-v2", .data = &smmu_generic_v2 },
1945 { .compatible = "arm,mmu-400", .data = &smmu_generic_v1 },
1946 { .compatible = "arm,mmu-401", .data = &arm_mmu401 },
1947 { .compatible = "arm,mmu-500", .data = &arm_mmu500 },
1948 { .compatible = "cavium,smmu-v2", .data = &cavium_smmuv2 },
1949 { .compatible = "nvidia,smmu-500", .data = &arm_mmu500 },
1950 { .compatible = "qcom,smmu-v2", .data = &qcom_smmuv2 },
1951 { },
1952};
1953MODULE_DEVICE_TABLE(of, arm_smmu_of_match);
1954
1955#ifdef CONFIG_ACPI
1956static int acpi_smmu_get_data(u32 model, struct arm_smmu_device *smmu)
1957{
1958 int ret = 0;
1959
1960 switch (model) {
1961 case ACPI_IORT_SMMU_V1:
1962 case ACPI_IORT_SMMU_CORELINK_MMU400:
1963 smmu->version = ARM_SMMU_V1;
1964 smmu->model = GENERIC_SMMU;
1965 break;
1966 case ACPI_IORT_SMMU_CORELINK_MMU401:
1967 smmu->version = ARM_SMMU_V1_64K;
1968 smmu->model = GENERIC_SMMU;
1969 break;
1970 case ACPI_IORT_SMMU_V2:
1971 smmu->version = ARM_SMMU_V2;
1972 smmu->model = GENERIC_SMMU;
1973 break;
1974 case ACPI_IORT_SMMU_CORELINK_MMU500:
1975 smmu->version = ARM_SMMU_V2;
1976 smmu->model = ARM_MMU500;
1977 break;
1978 case ACPI_IORT_SMMU_CAVIUM_THUNDERX:
1979 smmu->version = ARM_SMMU_V2;
1980 smmu->model = CAVIUM_SMMUV2;
1981 break;
1982 default:
1983 ret = -ENODEV;
1984 }
1985
1986 return ret;
1987}
1988
1989static int arm_smmu_device_acpi_probe(struct platform_device *pdev,
1990 struct arm_smmu_device *smmu)
1991{
1992 struct device *dev = smmu->dev;
1993 struct acpi_iort_node *node =
1994 *(struct acpi_iort_node **)dev_get_platdata(dev);
1995 struct acpi_iort_smmu *iort_smmu;
1996 int ret;
1997
1998
1999 iort_smmu = (struct acpi_iort_smmu *)node->node_data;
2000
2001 ret = acpi_smmu_get_data(iort_smmu->model, smmu);
2002 if (ret < 0)
2003 return ret;
2004
2005
2006 smmu->num_global_irqs = 1;
2007
2008 if (iort_smmu->flags & ACPI_IORT_SMMU_COHERENT_WALK)
2009 smmu->features |= ARM_SMMU_FEAT_COHERENT_WALK;
2010
2011 return 0;
2012}
2013#else
2014static inline int arm_smmu_device_acpi_probe(struct platform_device *pdev,
2015 struct arm_smmu_device *smmu)
2016{
2017 return -ENODEV;
2018}
2019#endif
2020
2021static int arm_smmu_device_dt_probe(struct platform_device *pdev,
2022 struct arm_smmu_device *smmu)
2023{
2024 const struct arm_smmu_match_data *data;
2025 struct device *dev = &pdev->dev;
2026 bool legacy_binding;
2027
2028 if (of_property_read_u32(dev->of_node, "#global-interrupts",
2029 &smmu->num_global_irqs)) {
2030 dev_err(dev, "missing #global-interrupts property\n");
2031 return -ENODEV;
2032 }
2033
2034 data = of_device_get_match_data(dev);
2035 smmu->version = data->version;
2036 smmu->model = data->model;
2037
2038 legacy_binding = of_find_property(dev->of_node, "mmu-masters", NULL);
2039 if (legacy_binding && !using_generic_binding) {
2040 if (!using_legacy_binding) {
2041 pr_notice("deprecated \"mmu-masters\" DT property in use; %s support unavailable\n",
2042 IS_ENABLED(CONFIG_ARM_SMMU_LEGACY_DT_BINDINGS) ? "DMA API" : "SMMU");
2043 }
2044 using_legacy_binding = true;
2045 } else if (!legacy_binding && !using_legacy_binding) {
2046 using_generic_binding = true;
2047 } else {
2048 dev_err(dev, "not probing due to mismatched DT properties\n");
2049 return -ENODEV;
2050 }
2051
2052 if (of_dma_is_coherent(dev->of_node))
2053 smmu->features |= ARM_SMMU_FEAT_COHERENT_WALK;
2054
2055 return 0;
2056}
2057
2058static int arm_smmu_bus_init(struct iommu_ops *ops)
2059{
2060 int err;
2061
2062
2063 if (!iommu_present(&platform_bus_type)) {
2064 err = bus_set_iommu(&platform_bus_type, ops);
2065 if (err)
2066 return err;
2067 }
2068#ifdef CONFIG_ARM_AMBA
2069 if (!iommu_present(&amba_bustype)) {
2070 err = bus_set_iommu(&amba_bustype, ops);
2071 if (err)
2072 goto err_reset_platform_ops;
2073 }
2074#endif
2075#ifdef CONFIG_PCI
2076 if (!iommu_present(&pci_bus_type)) {
2077 err = bus_set_iommu(&pci_bus_type, ops);
2078 if (err)
2079 goto err_reset_amba_ops;
2080 }
2081#endif
2082 return 0;
2083
2084err_reset_pci_ops: __maybe_unused;
2085#ifdef CONFIG_PCI
2086 bus_set_iommu(&pci_bus_type, NULL);
2087#endif
2088err_reset_amba_ops: __maybe_unused;
2089#ifdef CONFIG_ARM_AMBA
2090 bus_set_iommu(&amba_bustype, NULL);
2091#endif
2092err_reset_platform_ops: __maybe_unused;
2093 bus_set_iommu(&platform_bus_type, NULL);
2094 return err;
2095}
2096
2097static int arm_smmu_device_probe(struct platform_device *pdev)
2098{
2099 struct resource *res;
2100 resource_size_t ioaddr;
2101 struct arm_smmu_device *smmu;
2102 struct device *dev = &pdev->dev;
2103 int num_irqs, i, err;
2104 irqreturn_t (*global_fault)(int irq, void *dev);
2105
2106 smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL);
2107 if (!smmu) {
2108 dev_err(dev, "failed to allocate arm_smmu_device\n");
2109 return -ENOMEM;
2110 }
2111 smmu->dev = dev;
2112
2113 if (dev->of_node)
2114 err = arm_smmu_device_dt_probe(pdev, smmu);
2115 else
2116 err = arm_smmu_device_acpi_probe(pdev, smmu);
2117
2118 if (err)
2119 return err;
2120
2121 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2122 ioaddr = res->start;
2123 smmu->base = devm_ioremap_resource(dev, res);
2124 if (IS_ERR(smmu->base))
2125 return PTR_ERR(smmu->base);
2126
2127
2128
2129
2130 smmu->numpage = resource_size(res);
2131
2132 smmu = arm_smmu_impl_init(smmu);
2133 if (IS_ERR(smmu))
2134 return PTR_ERR(smmu);
2135
2136 num_irqs = 0;
2137 while ((res = platform_get_resource(pdev, IORESOURCE_IRQ, num_irqs))) {
2138 num_irqs++;
2139 if (num_irqs > smmu->num_global_irqs)
2140 smmu->num_context_irqs++;
2141 }
2142
2143 if (!smmu->num_context_irqs) {
2144 dev_err(dev, "found %d interrupts but expected at least %d\n",
2145 num_irqs, smmu->num_global_irqs + 1);
2146 return -ENODEV;
2147 }
2148
2149 smmu->irqs = devm_kcalloc(dev, num_irqs, sizeof(*smmu->irqs),
2150 GFP_KERNEL);
2151 if (!smmu->irqs) {
2152 dev_err(dev, "failed to allocate %d irqs\n", num_irqs);
2153 return -ENOMEM;
2154 }
2155
2156 for (i = 0; i < num_irqs; ++i) {
2157 int irq = platform_get_irq(pdev, i);
2158
2159 if (irq < 0)
2160 return -ENODEV;
2161 smmu->irqs[i] = irq;
2162 }
2163
2164 err = devm_clk_bulk_get_all(dev, &smmu->clks);
2165 if (err < 0) {
2166 dev_err(dev, "failed to get clocks %d\n", err);
2167 return err;
2168 }
2169 smmu->num_clks = err;
2170
2171 err = clk_bulk_prepare_enable(smmu->num_clks, smmu->clks);
2172 if (err)
2173 return err;
2174
2175 err = arm_smmu_device_cfg_probe(smmu);
2176 if (err)
2177 return err;
2178
2179 if (smmu->version == ARM_SMMU_V2) {
2180 if (smmu->num_context_banks > smmu->num_context_irqs) {
2181 dev_err(dev,
2182 "found only %d context irq(s) but %d required\n",
2183 smmu->num_context_irqs, smmu->num_context_banks);
2184 return -ENODEV;
2185 }
2186
2187
2188 smmu->num_context_irqs = smmu->num_context_banks;
2189 }
2190
2191 if (smmu->impl && smmu->impl->global_fault)
2192 global_fault = smmu->impl->global_fault;
2193 else
2194 global_fault = arm_smmu_global_fault;
2195
2196 for (i = 0; i < smmu->num_global_irqs; ++i) {
2197 err = devm_request_irq(smmu->dev, smmu->irqs[i],
2198 global_fault,
2199 IRQF_SHARED,
2200 "arm-smmu global fault",
2201 smmu);
2202 if (err) {
2203 dev_err(dev, "failed to request global IRQ %d (%u)\n",
2204 i, smmu->irqs[i]);
2205 return err;
2206 }
2207 }
2208
2209 err = iommu_device_sysfs_add(&smmu->iommu, smmu->dev, NULL,
2210 "smmu.%pa", &ioaddr);
2211 if (err) {
2212 dev_err(dev, "Failed to register iommu in sysfs\n");
2213 return err;
2214 }
2215
2216 iommu_device_set_ops(&smmu->iommu, &arm_smmu_ops);
2217 iommu_device_set_fwnode(&smmu->iommu, dev->fwnode);
2218
2219 err = iommu_device_register(&smmu->iommu);
2220 if (err) {
2221 dev_err(dev, "Failed to register iommu\n");
2222 return err;
2223 }
2224
2225 platform_set_drvdata(pdev, smmu);
2226 arm_smmu_device_reset(smmu);
2227 arm_smmu_test_smr_masks(smmu);
2228
2229
2230
2231
2232
2233
2234
2235 if (dev->pm_domain) {
2236 pm_runtime_set_active(dev);
2237 pm_runtime_enable(dev);
2238 }
2239
2240
2241
2242
2243
2244
2245 if (!using_legacy_binding)
2246 return arm_smmu_bus_init(&arm_smmu_ops);
2247
2248 return 0;
2249}
2250
2251static int arm_smmu_device_remove(struct platform_device *pdev)
2252{
2253 struct arm_smmu_device *smmu = platform_get_drvdata(pdev);
2254
2255 if (!smmu)
2256 return -ENODEV;
2257
2258 if (!bitmap_empty(smmu->context_map, ARM_SMMU_MAX_CBS))
2259 dev_notice(&pdev->dev, "disabling translation\n");
2260
2261 arm_smmu_bus_init(NULL);
2262 iommu_device_unregister(&smmu->iommu);
2263 iommu_device_sysfs_remove(&smmu->iommu);
2264
2265 arm_smmu_rpm_get(smmu);
2266
2267 arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_sCR0, ARM_SMMU_sCR0_CLIENTPD);
2268 arm_smmu_rpm_put(smmu);
2269
2270 if (pm_runtime_enabled(smmu->dev))
2271 pm_runtime_force_suspend(smmu->dev);
2272 else
2273 clk_bulk_disable(smmu->num_clks, smmu->clks);
2274
2275 clk_bulk_unprepare(smmu->num_clks, smmu->clks);
2276 return 0;
2277}
2278
2279static void arm_smmu_device_shutdown(struct platform_device *pdev)
2280{
2281 arm_smmu_device_remove(pdev);
2282}
2283
2284static int __maybe_unused arm_smmu_runtime_resume(struct device *dev)
2285{
2286 struct arm_smmu_device *smmu = dev_get_drvdata(dev);
2287 int ret;
2288
2289 ret = clk_bulk_enable(smmu->num_clks, smmu->clks);
2290 if (ret)
2291 return ret;
2292
2293 arm_smmu_device_reset(smmu);
2294
2295 return 0;
2296}
2297
2298static int __maybe_unused arm_smmu_runtime_suspend(struct device *dev)
2299{
2300 struct arm_smmu_device *smmu = dev_get_drvdata(dev);
2301
2302 clk_bulk_disable(smmu->num_clks, smmu->clks);
2303
2304 return 0;
2305}
2306
2307static int __maybe_unused arm_smmu_pm_resume(struct device *dev)
2308{
2309 if (pm_runtime_suspended(dev))
2310 return 0;
2311
2312 return arm_smmu_runtime_resume(dev);
2313}
2314
2315static int __maybe_unused arm_smmu_pm_suspend(struct device *dev)
2316{
2317 if (pm_runtime_suspended(dev))
2318 return 0;
2319
2320 return arm_smmu_runtime_suspend(dev);
2321}
2322
2323static const struct dev_pm_ops arm_smmu_pm_ops = {
2324 SET_SYSTEM_SLEEP_PM_OPS(arm_smmu_pm_suspend, arm_smmu_pm_resume)
2325 SET_RUNTIME_PM_OPS(arm_smmu_runtime_suspend,
2326 arm_smmu_runtime_resume, NULL)
2327};
2328
2329static struct platform_driver arm_smmu_driver = {
2330 .driver = {
2331 .name = "arm-smmu",
2332 .of_match_table = arm_smmu_of_match,
2333 .pm = &arm_smmu_pm_ops,
2334 .suppress_bind_attrs = true,
2335 },
2336 .probe = arm_smmu_device_probe,
2337 .remove = arm_smmu_device_remove,
2338 .shutdown = arm_smmu_device_shutdown,
2339};
2340module_platform_driver(arm_smmu_driver);
2341
2342MODULE_DESCRIPTION("IOMMU API for ARM architected SMMU implementations");
2343MODULE_AUTHOR("Will Deacon <will@kernel.org>");
2344MODULE_ALIAS("platform:arm-smmu");
2345MODULE_LICENSE("GPL v2");
2346