1
2
3
4
5
6
7
8
9
10
11
12#include <linux/acpi.h>
13#include <linux/acpi_iort.h>
14#include <linux/bitfield.h>
15#include <linux/bitops.h>
16#include <linux/crash_dump.h>
17#include <linux/delay.h>
18#include <linux/dma-iommu.h>
19#include <linux/err.h>
20#include <linux/interrupt.h>
21#include <linux/io-pgtable.h>
22#include <linux/iommu.h>
23#include <linux/iopoll.h>
24#include <linux/init.h>
25#include <linux/moduleparam.h>
26#include <linux/msi.h>
27#include <linux/of.h>
28#include <linux/of_address.h>
29#include <linux/of_iommu.h>
30#include <linux/of_platform.h>
31#include <linux/pci.h>
32#include <linux/pci-ats.h>
33#include <linux/platform_device.h>
34
35#include <linux/amba/bus.h>
36
37
38#define ARM_SMMU_IDR0 0x0
39#define IDR0_ST_LVL GENMASK(28, 27)
40#define IDR0_ST_LVL_2LVL 1
41#define IDR0_STALL_MODEL GENMASK(25, 24)
42#define IDR0_STALL_MODEL_STALL 0
43#define IDR0_STALL_MODEL_FORCE 2
44#define IDR0_TTENDIAN GENMASK(22, 21)
45#define IDR0_TTENDIAN_MIXED 0
46#define IDR0_TTENDIAN_LE 2
47#define IDR0_TTENDIAN_BE 3
48#define IDR0_CD2L (1 << 19)
49#define IDR0_VMID16 (1 << 18)
50#define IDR0_PRI (1 << 16)
51#define IDR0_SEV (1 << 14)
52#define IDR0_MSI (1 << 13)
53#define IDR0_ASID16 (1 << 12)
54#define IDR0_ATS (1 << 10)
55#define IDR0_HYP (1 << 9)
56#define IDR0_COHACC (1 << 4)
57#define IDR0_TTF GENMASK(3, 2)
58#define IDR0_TTF_AARCH64 2
59#define IDR0_TTF_AARCH32_64 3
60#define IDR0_S1P (1 << 1)
61#define IDR0_S2P (1 << 0)
62
63#define ARM_SMMU_IDR1 0x4
64#define IDR1_TABLES_PRESET (1 << 30)
65#define IDR1_QUEUES_PRESET (1 << 29)
66#define IDR1_REL (1 << 28)
67#define IDR1_CMDQS GENMASK(25, 21)
68#define IDR1_EVTQS GENMASK(20, 16)
69#define IDR1_PRIQS GENMASK(15, 11)
70#define IDR1_SSIDSIZE GENMASK(10, 6)
71#define IDR1_SIDSIZE GENMASK(5, 0)
72
73#define ARM_SMMU_IDR5 0x14
74#define IDR5_STALL_MAX GENMASK(31, 16)
75#define IDR5_GRAN64K (1 << 6)
76#define IDR5_GRAN16K (1 << 5)
77#define IDR5_GRAN4K (1 << 4)
78#define IDR5_OAS GENMASK(2, 0)
79#define IDR5_OAS_32_BIT 0
80#define IDR5_OAS_36_BIT 1
81#define IDR5_OAS_40_BIT 2
82#define IDR5_OAS_42_BIT 3
83#define IDR5_OAS_44_BIT 4
84#define IDR5_OAS_48_BIT 5
85#define IDR5_OAS_52_BIT 6
86#define IDR5_VAX GENMASK(11, 10)
87#define IDR5_VAX_52_BIT 1
88
89#define ARM_SMMU_CR0 0x20
90#define CR0_ATSCHK (1 << 4)
91#define CR0_CMDQEN (1 << 3)
92#define CR0_EVTQEN (1 << 2)
93#define CR0_PRIQEN (1 << 1)
94#define CR0_SMMUEN (1 << 0)
95
96#define ARM_SMMU_CR0ACK 0x24
97
98#define ARM_SMMU_CR1 0x28
99#define CR1_TABLE_SH GENMASK(11, 10)
100#define CR1_TABLE_OC GENMASK(9, 8)
101#define CR1_TABLE_IC GENMASK(7, 6)
102#define CR1_QUEUE_SH GENMASK(5, 4)
103#define CR1_QUEUE_OC GENMASK(3, 2)
104#define CR1_QUEUE_IC GENMASK(1, 0)
105
106#define CR1_CACHE_NC 0
107#define CR1_CACHE_WB 1
108#define CR1_CACHE_WT 2
109
110#define ARM_SMMU_CR2 0x2c
111#define CR2_PTM (1 << 2)
112#define CR2_RECINVSID (1 << 1)
113#define CR2_E2H (1 << 0)
114
115#define ARM_SMMU_GBPA 0x44
116#define GBPA_UPDATE (1 << 31)
117#define GBPA_ABORT (1 << 20)
118
119#define ARM_SMMU_IRQ_CTRL 0x50
120#define IRQ_CTRL_EVTQ_IRQEN (1 << 2)
121#define IRQ_CTRL_PRIQ_IRQEN (1 << 1)
122#define IRQ_CTRL_GERROR_IRQEN (1 << 0)
123
124#define ARM_SMMU_IRQ_CTRLACK 0x54
125
126#define ARM_SMMU_GERROR 0x60
127#define GERROR_SFM_ERR (1 << 8)
128#define GERROR_MSI_GERROR_ABT_ERR (1 << 7)
129#define GERROR_MSI_PRIQ_ABT_ERR (1 << 6)
130#define GERROR_MSI_EVTQ_ABT_ERR (1 << 5)
131#define GERROR_MSI_CMDQ_ABT_ERR (1 << 4)
132#define GERROR_PRIQ_ABT_ERR (1 << 3)
133#define GERROR_EVTQ_ABT_ERR (1 << 2)
134#define GERROR_CMDQ_ERR (1 << 0)
135#define GERROR_ERR_MASK 0xfd
136
137#define ARM_SMMU_GERRORN 0x64
138
139#define ARM_SMMU_GERROR_IRQ_CFG0 0x68
140#define ARM_SMMU_GERROR_IRQ_CFG1 0x70
141#define ARM_SMMU_GERROR_IRQ_CFG2 0x74
142
143#define ARM_SMMU_STRTAB_BASE 0x80
144#define STRTAB_BASE_RA (1UL << 62)
145#define STRTAB_BASE_ADDR_MASK GENMASK_ULL(51, 6)
146
147#define ARM_SMMU_STRTAB_BASE_CFG 0x88
148#define STRTAB_BASE_CFG_FMT GENMASK(17, 16)
149#define STRTAB_BASE_CFG_FMT_LINEAR 0
150#define STRTAB_BASE_CFG_FMT_2LVL 1
151#define STRTAB_BASE_CFG_SPLIT GENMASK(10, 6)
152#define STRTAB_BASE_CFG_LOG2SIZE GENMASK(5, 0)
153
154#define ARM_SMMU_CMDQ_BASE 0x90
155#define ARM_SMMU_CMDQ_PROD 0x98
156#define ARM_SMMU_CMDQ_CONS 0x9c
157
158#define ARM_SMMU_EVTQ_BASE 0xa0
159#define ARM_SMMU_EVTQ_PROD 0x100a8
160#define ARM_SMMU_EVTQ_CONS 0x100ac
161#define ARM_SMMU_EVTQ_IRQ_CFG0 0xb0
162#define ARM_SMMU_EVTQ_IRQ_CFG1 0xb8
163#define ARM_SMMU_EVTQ_IRQ_CFG2 0xbc
164
165#define ARM_SMMU_PRIQ_BASE 0xc0
166#define ARM_SMMU_PRIQ_PROD 0x100c8
167#define ARM_SMMU_PRIQ_CONS 0x100cc
168#define ARM_SMMU_PRIQ_IRQ_CFG0 0xd0
169#define ARM_SMMU_PRIQ_IRQ_CFG1 0xd8
170#define ARM_SMMU_PRIQ_IRQ_CFG2 0xdc
171
172
173#define MSI_CFG0_ADDR_MASK GENMASK_ULL(51, 2)
174#define MSI_CFG2_SH GENMASK(5, 4)
175#define MSI_CFG2_MEMATTR GENMASK(3, 0)
176
177
178#define ARM_SMMU_SH_NSH 0
179#define ARM_SMMU_SH_OSH 2
180#define ARM_SMMU_SH_ISH 3
181#define ARM_SMMU_MEMATTR_DEVICE_nGnRE 0x1
182#define ARM_SMMU_MEMATTR_OIWB 0xf
183
184#define Q_IDX(q, p) ((p) & ((1 << (q)->max_n_shift) - 1))
185#define Q_WRP(q, p) ((p) & (1 << (q)->max_n_shift))
186#define Q_OVERFLOW_FLAG (1 << 31)
187#define Q_OVF(q, p) ((p) & Q_OVERFLOW_FLAG)
188#define Q_ENT(q, p) ((q)->base + \
189 Q_IDX(q, p) * (q)->ent_dwords)
190
191#define Q_BASE_RWA (1UL << 62)
192#define Q_BASE_ADDR_MASK GENMASK_ULL(51, 5)
193#define Q_BASE_LOG2SIZE GENMASK(4, 0)
194
195
196#ifdef CONFIG_CMA_ALIGNMENT
197#define Q_MAX_SZ_SHIFT (PAGE_SHIFT + CONFIG_CMA_ALIGNMENT)
198#else
199#define Q_MAX_SZ_SHIFT (PAGE_SHIFT + MAX_ORDER - 1)
200#endif
201
202
203
204
205
206
207
208
209#define STRTAB_L1_SZ_SHIFT 20
210#define STRTAB_SPLIT 8
211
212#define STRTAB_L1_DESC_DWORDS 1
213#define STRTAB_L1_DESC_SPAN GENMASK_ULL(4, 0)
214#define STRTAB_L1_DESC_L2PTR_MASK GENMASK_ULL(51, 6)
215
216#define STRTAB_STE_DWORDS 8
217#define STRTAB_STE_0_V (1UL << 0)
218#define STRTAB_STE_0_CFG GENMASK_ULL(3, 1)
219#define STRTAB_STE_0_CFG_ABORT 0
220#define STRTAB_STE_0_CFG_BYPASS 4
221#define STRTAB_STE_0_CFG_S1_TRANS 5
222#define STRTAB_STE_0_CFG_S2_TRANS 6
223
224#define STRTAB_STE_0_S1FMT GENMASK_ULL(5, 4)
225#define STRTAB_STE_0_S1FMT_LINEAR 0
226#define STRTAB_STE_0_S1CTXPTR_MASK GENMASK_ULL(51, 6)
227#define STRTAB_STE_0_S1CDMAX GENMASK_ULL(63, 59)
228
229#define STRTAB_STE_1_S1C_CACHE_NC 0UL
230#define STRTAB_STE_1_S1C_CACHE_WBRA 1UL
231#define STRTAB_STE_1_S1C_CACHE_WT 2UL
232#define STRTAB_STE_1_S1C_CACHE_WB 3UL
233#define STRTAB_STE_1_S1CIR GENMASK_ULL(3, 2)
234#define STRTAB_STE_1_S1COR GENMASK_ULL(5, 4)
235#define STRTAB_STE_1_S1CSH GENMASK_ULL(7, 6)
236
237#define STRTAB_STE_1_S1STALLD (1UL << 27)
238
239#define STRTAB_STE_1_EATS GENMASK_ULL(29, 28)
240#define STRTAB_STE_1_EATS_ABT 0UL
241#define STRTAB_STE_1_EATS_TRANS 1UL
242#define STRTAB_STE_1_EATS_S1CHK 2UL
243
244#define STRTAB_STE_1_STRW GENMASK_ULL(31, 30)
245#define STRTAB_STE_1_STRW_NSEL1 0UL
246#define STRTAB_STE_1_STRW_EL2 2UL
247
248#define STRTAB_STE_1_SHCFG GENMASK_ULL(45, 44)
249#define STRTAB_STE_1_SHCFG_INCOMING 1UL
250
251#define STRTAB_STE_2_S2VMID GENMASK_ULL(15, 0)
252#define STRTAB_STE_2_VTCR GENMASK_ULL(50, 32)
253#define STRTAB_STE_2_S2AA64 (1UL << 51)
254#define STRTAB_STE_2_S2ENDI (1UL << 52)
255#define STRTAB_STE_2_S2PTW (1UL << 54)
256#define STRTAB_STE_2_S2R (1UL << 58)
257
258#define STRTAB_STE_3_S2TTB_MASK GENMASK_ULL(51, 4)
259
260
261#define CTXDESC_CD_DWORDS 8
262#define CTXDESC_CD_0_TCR_T0SZ GENMASK_ULL(5, 0)
263#define ARM64_TCR_T0SZ GENMASK_ULL(5, 0)
264#define CTXDESC_CD_0_TCR_TG0 GENMASK_ULL(7, 6)
265#define ARM64_TCR_TG0 GENMASK_ULL(15, 14)
266#define CTXDESC_CD_0_TCR_IRGN0 GENMASK_ULL(9, 8)
267#define ARM64_TCR_IRGN0 GENMASK_ULL(9, 8)
268#define CTXDESC_CD_0_TCR_ORGN0 GENMASK_ULL(11, 10)
269#define ARM64_TCR_ORGN0 GENMASK_ULL(11, 10)
270#define CTXDESC_CD_0_TCR_SH0 GENMASK_ULL(13, 12)
271#define ARM64_TCR_SH0 GENMASK_ULL(13, 12)
272#define CTXDESC_CD_0_TCR_EPD0 (1ULL << 14)
273#define ARM64_TCR_EPD0 (1ULL << 7)
274#define CTXDESC_CD_0_TCR_EPD1 (1ULL << 30)
275#define ARM64_TCR_EPD1 (1ULL << 23)
276
277#define CTXDESC_CD_0_ENDI (1UL << 15)
278#define CTXDESC_CD_0_V (1UL << 31)
279
280#define CTXDESC_CD_0_TCR_IPS GENMASK_ULL(34, 32)
281#define ARM64_TCR_IPS GENMASK_ULL(34, 32)
282#define CTXDESC_CD_0_TCR_TBI0 (1ULL << 38)
283#define ARM64_TCR_TBI0 (1ULL << 37)
284
285#define CTXDESC_CD_0_AA64 (1UL << 41)
286#define CTXDESC_CD_0_S (1UL << 44)
287#define CTXDESC_CD_0_R (1UL << 45)
288#define CTXDESC_CD_0_A (1UL << 46)
289#define CTXDESC_CD_0_ASET (1UL << 47)
290#define CTXDESC_CD_0_ASID GENMASK_ULL(63, 48)
291
292#define CTXDESC_CD_1_TTB0_MASK GENMASK_ULL(51, 4)
293
294
295#define ARM_SMMU_TCR2CD(tcr, fld) FIELD_PREP(CTXDESC_CD_0_TCR_##fld, \
296 FIELD_GET(ARM64_TCR_##fld, tcr))
297
298
299#define CMDQ_ENT_SZ_SHIFT 4
300#define CMDQ_ENT_DWORDS ((1 << CMDQ_ENT_SZ_SHIFT) >> 3)
301#define CMDQ_MAX_SZ_SHIFT (Q_MAX_SZ_SHIFT - CMDQ_ENT_SZ_SHIFT)
302
303#define CMDQ_CONS_ERR GENMASK(30, 24)
304#define CMDQ_ERR_CERROR_NONE_IDX 0
305#define CMDQ_ERR_CERROR_ILL_IDX 1
306#define CMDQ_ERR_CERROR_ABT_IDX 2
307#define CMDQ_ERR_CERROR_ATC_INV_IDX 3
308
309#define CMDQ_0_OP GENMASK_ULL(7, 0)
310#define CMDQ_0_SSV (1UL << 11)
311
312#define CMDQ_PREFETCH_0_SID GENMASK_ULL(63, 32)
313#define CMDQ_PREFETCH_1_SIZE GENMASK_ULL(4, 0)
314#define CMDQ_PREFETCH_1_ADDR_MASK GENMASK_ULL(63, 12)
315
316#define CMDQ_CFGI_0_SID GENMASK_ULL(63, 32)
317#define CMDQ_CFGI_1_LEAF (1UL << 0)
318#define CMDQ_CFGI_1_RANGE GENMASK_ULL(4, 0)
319
320#define CMDQ_TLBI_0_VMID GENMASK_ULL(47, 32)
321#define CMDQ_TLBI_0_ASID GENMASK_ULL(63, 48)
322#define CMDQ_TLBI_1_LEAF (1UL << 0)
323#define CMDQ_TLBI_1_VA_MASK GENMASK_ULL(63, 12)
324#define CMDQ_TLBI_1_IPA_MASK GENMASK_ULL(51, 12)
325
326#define CMDQ_ATC_0_SSID GENMASK_ULL(31, 12)
327#define CMDQ_ATC_0_SID GENMASK_ULL(63, 32)
328#define CMDQ_ATC_0_GLOBAL (1UL << 9)
329#define CMDQ_ATC_1_SIZE GENMASK_ULL(5, 0)
330#define CMDQ_ATC_1_ADDR_MASK GENMASK_ULL(63, 12)
331
332#define CMDQ_PRI_0_SSID GENMASK_ULL(31, 12)
333#define CMDQ_PRI_0_SID GENMASK_ULL(63, 32)
334#define CMDQ_PRI_1_GRPID GENMASK_ULL(8, 0)
335#define CMDQ_PRI_1_RESP GENMASK_ULL(13, 12)
336
337#define CMDQ_SYNC_0_CS GENMASK_ULL(13, 12)
338#define CMDQ_SYNC_0_CS_NONE 0
339#define CMDQ_SYNC_0_CS_IRQ 1
340#define CMDQ_SYNC_0_CS_SEV 2
341#define CMDQ_SYNC_0_MSH GENMASK_ULL(23, 22)
342#define CMDQ_SYNC_0_MSIATTR GENMASK_ULL(27, 24)
343#define CMDQ_SYNC_0_MSIDATA GENMASK_ULL(63, 32)
344#define CMDQ_SYNC_1_MSIADDR_MASK GENMASK_ULL(51, 2)
345
346
347#define EVTQ_ENT_SZ_SHIFT 5
348#define EVTQ_ENT_DWORDS ((1 << EVTQ_ENT_SZ_SHIFT) >> 3)
349#define EVTQ_MAX_SZ_SHIFT (Q_MAX_SZ_SHIFT - EVTQ_ENT_SZ_SHIFT)
350
351#define EVTQ_0_ID GENMASK_ULL(7, 0)
352
353
354#define PRIQ_ENT_SZ_SHIFT 4
355#define PRIQ_ENT_DWORDS ((1 << PRIQ_ENT_SZ_SHIFT) >> 3)
356#define PRIQ_MAX_SZ_SHIFT (Q_MAX_SZ_SHIFT - PRIQ_ENT_SZ_SHIFT)
357
358#define PRIQ_0_SID GENMASK_ULL(31, 0)
359#define PRIQ_0_SSID GENMASK_ULL(51, 32)
360#define PRIQ_0_PERM_PRIV (1UL << 58)
361#define PRIQ_0_PERM_EXEC (1UL << 59)
362#define PRIQ_0_PERM_READ (1UL << 60)
363#define PRIQ_0_PERM_WRITE (1UL << 61)
364#define PRIQ_0_PRG_LAST (1UL << 62)
365#define PRIQ_0_SSID_V (1UL << 63)
366
367#define PRIQ_1_PRG_IDX GENMASK_ULL(8, 0)
368#define PRIQ_1_ADDR_MASK GENMASK_ULL(63, 12)
369
370
371#define ARM_SMMU_POLL_TIMEOUT_US 100
372#define ARM_SMMU_CMDQ_SYNC_TIMEOUT_US 1000000
373#define ARM_SMMU_CMDQ_SYNC_SPIN_COUNT 10
374
375#define MSI_IOVA_BASE 0x8000000
376#define MSI_IOVA_LENGTH 0x100000
377
378
379
380
381
382static bool disable_bypass = 1;
383module_param_named(disable_bypass, disable_bypass, bool, S_IRUGO);
384MODULE_PARM_DESC(disable_bypass,
385 "Disable bypass streams such that incoming transactions from devices that are not attached to an iommu domain will report an abort back to the device and will not be allowed to pass through the SMMU.");
386
387enum pri_resp {
388 PRI_RESP_DENY = 0,
389 PRI_RESP_FAIL = 1,
390 PRI_RESP_SUCC = 2,
391};
392
393enum arm_smmu_msi_index {
394 EVTQ_MSI_INDEX,
395 GERROR_MSI_INDEX,
396 PRIQ_MSI_INDEX,
397 ARM_SMMU_MAX_MSIS,
398};
399
400static phys_addr_t arm_smmu_msi_cfg[ARM_SMMU_MAX_MSIS][3] = {
401 [EVTQ_MSI_INDEX] = {
402 ARM_SMMU_EVTQ_IRQ_CFG0,
403 ARM_SMMU_EVTQ_IRQ_CFG1,
404 ARM_SMMU_EVTQ_IRQ_CFG2,
405 },
406 [GERROR_MSI_INDEX] = {
407 ARM_SMMU_GERROR_IRQ_CFG0,
408 ARM_SMMU_GERROR_IRQ_CFG1,
409 ARM_SMMU_GERROR_IRQ_CFG2,
410 },
411 [PRIQ_MSI_INDEX] = {
412 ARM_SMMU_PRIQ_IRQ_CFG0,
413 ARM_SMMU_PRIQ_IRQ_CFG1,
414 ARM_SMMU_PRIQ_IRQ_CFG2,
415 },
416};
417
418struct arm_smmu_cmdq_ent {
419
420 u8 opcode;
421 bool substream_valid;
422
423
424 union {
425 #define CMDQ_OP_PREFETCH_CFG 0x1
426 struct {
427 u32 sid;
428 u8 size;
429 u64 addr;
430 } prefetch;
431
432 #define CMDQ_OP_CFGI_STE 0x3
433 #define CMDQ_OP_CFGI_ALL 0x4
434 struct {
435 u32 sid;
436 union {
437 bool leaf;
438 u8 span;
439 };
440 } cfgi;
441
442 #define CMDQ_OP_TLBI_NH_ASID 0x11
443 #define CMDQ_OP_TLBI_NH_VA 0x12
444 #define CMDQ_OP_TLBI_EL2_ALL 0x20
445 #define CMDQ_OP_TLBI_S12_VMALL 0x28
446 #define CMDQ_OP_TLBI_S2_IPA 0x2a
447 #define CMDQ_OP_TLBI_NSNH_ALL 0x30
448 struct {
449 u16 asid;
450 u16 vmid;
451 bool leaf;
452 u64 addr;
453 } tlbi;
454
455 #define CMDQ_OP_ATC_INV 0x40
456 #define ATC_INV_SIZE_ALL 52
457 struct {
458 u32 sid;
459 u32 ssid;
460 u64 addr;
461 u8 size;
462 bool global;
463 } atc;
464
465 #define CMDQ_OP_PRI_RESP 0x41
466 struct {
467 u32 sid;
468 u32 ssid;
469 u16 grpid;
470 enum pri_resp resp;
471 } pri;
472
473 #define CMDQ_OP_CMD_SYNC 0x46
474 struct {
475 u32 msidata;
476 u64 msiaddr;
477 } sync;
478 };
479};
480
481struct arm_smmu_queue {
482 int irq;
483
484 __le64 *base;
485 dma_addr_t base_dma;
486 u64 q_base;
487
488 size_t ent_dwords;
489 u32 max_n_shift;
490 u32 prod;
491 u32 cons;
492
493 u32 __iomem *prod_reg;
494 u32 __iomem *cons_reg;
495};
496
497struct arm_smmu_cmdq {
498 struct arm_smmu_queue q;
499 spinlock_t lock;
500};
501
502struct arm_smmu_evtq {
503 struct arm_smmu_queue q;
504 u32 max_stalls;
505};
506
507struct arm_smmu_priq {
508 struct arm_smmu_queue q;
509};
510
511
512struct arm_smmu_strtab_l1_desc {
513 u8 span;
514
515 __le64 *l2ptr;
516 dma_addr_t l2ptr_dma;
517};
518
519struct arm_smmu_s1_cfg {
520 __le64 *cdptr;
521 dma_addr_t cdptr_dma;
522
523 struct arm_smmu_ctx_desc {
524 u16 asid;
525 u64 ttbr;
526 u64 tcr;
527 u64 mair;
528 } cd;
529};
530
531struct arm_smmu_s2_cfg {
532 u16 vmid;
533 u64 vttbr;
534 u64 vtcr;
535};
536
537struct arm_smmu_strtab_cfg {
538 __le64 *strtab;
539 dma_addr_t strtab_dma;
540 struct arm_smmu_strtab_l1_desc *l1_desc;
541 unsigned int num_l1_ents;
542
543 u64 strtab_base;
544 u32 strtab_base_cfg;
545};
546
547
548struct arm_smmu_device {
549 struct device *dev;
550 void __iomem *base;
551
552#define ARM_SMMU_FEAT_2_LVL_STRTAB (1 << 0)
553#define ARM_SMMU_FEAT_2_LVL_CDTAB (1 << 1)
554#define ARM_SMMU_FEAT_TT_LE (1 << 2)
555#define ARM_SMMU_FEAT_TT_BE (1 << 3)
556#define ARM_SMMU_FEAT_PRI (1 << 4)
557#define ARM_SMMU_FEAT_ATS (1 << 5)
558#define ARM_SMMU_FEAT_SEV (1 << 6)
559#define ARM_SMMU_FEAT_MSI (1 << 7)
560#define ARM_SMMU_FEAT_COHERENCY (1 << 8)
561#define ARM_SMMU_FEAT_TRANS_S1 (1 << 9)
562#define ARM_SMMU_FEAT_TRANS_S2 (1 << 10)
563#define ARM_SMMU_FEAT_STALLS (1 << 11)
564#define ARM_SMMU_FEAT_HYP (1 << 12)
565#define ARM_SMMU_FEAT_STALL_FORCE (1 << 13)
566#define ARM_SMMU_FEAT_VAX (1 << 14)
567 u32 features;
568
569#define ARM_SMMU_OPT_SKIP_PREFETCH (1 << 0)
570#define ARM_SMMU_OPT_PAGE0_REGS_ONLY (1 << 1)
571 u32 options;
572
573 struct arm_smmu_cmdq cmdq;
574 struct arm_smmu_evtq evtq;
575 struct arm_smmu_priq priq;
576
577 int gerr_irq;
578 int combined_irq;
579 u32 sync_nr;
580 u8 prev_cmd_opcode;
581
582 unsigned long ias;
583 unsigned long oas;
584 unsigned long pgsize_bitmap;
585
586#define ARM_SMMU_MAX_ASIDS (1 << 16)
587 unsigned int asid_bits;
588 DECLARE_BITMAP(asid_map, ARM_SMMU_MAX_ASIDS);
589
590#define ARM_SMMU_MAX_VMIDS (1 << 16)
591 unsigned int vmid_bits;
592 DECLARE_BITMAP(vmid_map, ARM_SMMU_MAX_VMIDS);
593
594 unsigned int ssid_bits;
595 unsigned int sid_bits;
596
597 struct arm_smmu_strtab_cfg strtab_cfg;
598
599
600 union {
601 u32 sync_count;
602 u64 padding;
603 };
604
605
606 struct iommu_device iommu;
607};
608
609
610struct arm_smmu_master {
611 struct arm_smmu_device *smmu;
612 struct device *dev;
613 struct arm_smmu_domain *domain;
614 struct list_head domain_head;
615 u32 *sids;
616 unsigned int num_sids;
617 bool ats_enabled :1;
618};
619
620
621enum arm_smmu_domain_stage {
622 ARM_SMMU_DOMAIN_S1 = 0,
623 ARM_SMMU_DOMAIN_S2,
624 ARM_SMMU_DOMAIN_NESTED,
625 ARM_SMMU_DOMAIN_BYPASS,
626};
627
628struct arm_smmu_domain {
629 struct arm_smmu_device *smmu;
630 struct mutex init_mutex;
631
632 struct io_pgtable_ops *pgtbl_ops;
633 bool non_strict;
634
635 enum arm_smmu_domain_stage stage;
636 union {
637 struct arm_smmu_s1_cfg s1_cfg;
638 struct arm_smmu_s2_cfg s2_cfg;
639 };
640
641 struct iommu_domain domain;
642
643 struct list_head devices;
644 spinlock_t devices_lock;
645};
646
647struct arm_smmu_option_prop {
648 u32 opt;
649 const char *prop;
650};
651
652static struct arm_smmu_option_prop arm_smmu_options[] = {
653 { ARM_SMMU_OPT_SKIP_PREFETCH, "hisilicon,broken-prefetch-cmd" },
654 { ARM_SMMU_OPT_PAGE0_REGS_ONLY, "cavium,cn9900-broken-page1-regspace"},
655 { 0, NULL},
656};
657
658static inline void __iomem *arm_smmu_page1_fixup(unsigned long offset,
659 struct arm_smmu_device *smmu)
660{
661 if ((offset > SZ_64K) &&
662 (smmu->options & ARM_SMMU_OPT_PAGE0_REGS_ONLY))
663 offset -= SZ_64K;
664
665 return smmu->base + offset;
666}
667
668static struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom)
669{
670 return container_of(dom, struct arm_smmu_domain, domain);
671}
672
673static void parse_driver_options(struct arm_smmu_device *smmu)
674{
675 int i = 0;
676
677 do {
678 if (of_property_read_bool(smmu->dev->of_node,
679 arm_smmu_options[i].prop)) {
680 smmu->options |= arm_smmu_options[i].opt;
681 dev_notice(smmu->dev, "option %s\n",
682 arm_smmu_options[i].prop);
683 }
684 } while (arm_smmu_options[++i].opt);
685}
686
687
688static bool queue_full(struct arm_smmu_queue *q)
689{
690 return Q_IDX(q, q->prod) == Q_IDX(q, q->cons) &&
691 Q_WRP(q, q->prod) != Q_WRP(q, q->cons);
692}
693
694static bool queue_empty(struct arm_smmu_queue *q)
695{
696 return Q_IDX(q, q->prod) == Q_IDX(q, q->cons) &&
697 Q_WRP(q, q->prod) == Q_WRP(q, q->cons);
698}
699
700static void queue_sync_cons(struct arm_smmu_queue *q)
701{
702 q->cons = readl_relaxed(q->cons_reg);
703}
704
705static void queue_inc_cons(struct arm_smmu_queue *q)
706{
707 u32 cons = (Q_WRP(q, q->cons) | Q_IDX(q, q->cons)) + 1;
708
709 q->cons = Q_OVF(q, q->cons) | Q_WRP(q, cons) | Q_IDX(q, cons);
710
711
712
713
714
715 mb();
716 writel_relaxed(q->cons, q->cons_reg);
717}
718
719static int queue_sync_prod(struct arm_smmu_queue *q)
720{
721 int ret = 0;
722 u32 prod = readl_relaxed(q->prod_reg);
723
724 if (Q_OVF(q, prod) != Q_OVF(q, q->prod))
725 ret = -EOVERFLOW;
726
727 q->prod = prod;
728 return ret;
729}
730
731static void queue_inc_prod(struct arm_smmu_queue *q)
732{
733 u32 prod = (Q_WRP(q, q->prod) | Q_IDX(q, q->prod)) + 1;
734
735 q->prod = Q_OVF(q, q->prod) | Q_WRP(q, prod) | Q_IDX(q, prod);
736 writel(q->prod, q->prod_reg);
737}
738
739
740
741
742
743static int queue_poll_cons(struct arm_smmu_queue *q, bool sync, bool wfe)
744{
745 ktime_t timeout;
746 unsigned int delay = 1, spin_cnt = 0;
747
748
749 timeout = ktime_add_us(ktime_get(), sync ?
750 ARM_SMMU_CMDQ_SYNC_TIMEOUT_US :
751 ARM_SMMU_POLL_TIMEOUT_US);
752
753 while (queue_sync_cons(q), (sync ? !queue_empty(q) : queue_full(q))) {
754 if (ktime_compare(ktime_get(), timeout) > 0)
755 return -ETIMEDOUT;
756
757 if (wfe) {
758 wfe();
759 } else if (++spin_cnt < ARM_SMMU_CMDQ_SYNC_SPIN_COUNT) {
760 cpu_relax();
761 continue;
762 } else {
763 udelay(delay);
764 delay *= 2;
765 spin_cnt = 0;
766 }
767 }
768
769 return 0;
770}
771
772static void queue_write(__le64 *dst, u64 *src, size_t n_dwords)
773{
774 int i;
775
776 for (i = 0; i < n_dwords; ++i)
777 *dst++ = cpu_to_le64(*src++);
778}
779
780static int queue_insert_raw(struct arm_smmu_queue *q, u64 *ent)
781{
782 if (queue_full(q))
783 return -ENOSPC;
784
785 queue_write(Q_ENT(q, q->prod), ent, q->ent_dwords);
786 queue_inc_prod(q);
787 return 0;
788}
789
790static void queue_read(__le64 *dst, u64 *src, size_t n_dwords)
791{
792 int i;
793
794 for (i = 0; i < n_dwords; ++i)
795 *dst++ = le64_to_cpu(*src++);
796}
797
798static int queue_remove_raw(struct arm_smmu_queue *q, u64 *ent)
799{
800 if (queue_empty(q))
801 return -EAGAIN;
802
803 queue_read(ent, Q_ENT(q, q->cons), q->ent_dwords);
804 queue_inc_cons(q);
805 return 0;
806}
807
808
809static int arm_smmu_cmdq_build_cmd(u64 *cmd, struct arm_smmu_cmdq_ent *ent)
810{
811 memset(cmd, 0, 1 << CMDQ_ENT_SZ_SHIFT);
812 cmd[0] |= FIELD_PREP(CMDQ_0_OP, ent->opcode);
813
814 switch (ent->opcode) {
815 case CMDQ_OP_TLBI_EL2_ALL:
816 case CMDQ_OP_TLBI_NSNH_ALL:
817 break;
818 case CMDQ_OP_PREFETCH_CFG:
819 cmd[0] |= FIELD_PREP(CMDQ_PREFETCH_0_SID, ent->prefetch.sid);
820 cmd[1] |= FIELD_PREP(CMDQ_PREFETCH_1_SIZE, ent->prefetch.size);
821 cmd[1] |= ent->prefetch.addr & CMDQ_PREFETCH_1_ADDR_MASK;
822 break;
823 case CMDQ_OP_CFGI_STE:
824 cmd[0] |= FIELD_PREP(CMDQ_CFGI_0_SID, ent->cfgi.sid);
825 cmd[1] |= FIELD_PREP(CMDQ_CFGI_1_LEAF, ent->cfgi.leaf);
826 break;
827 case CMDQ_OP_CFGI_ALL:
828
829 cmd[1] |= FIELD_PREP(CMDQ_CFGI_1_RANGE, 31);
830 break;
831 case CMDQ_OP_TLBI_NH_VA:
832 cmd[0] |= FIELD_PREP(CMDQ_TLBI_0_ASID, ent->tlbi.asid);
833 cmd[1] |= FIELD_PREP(CMDQ_TLBI_1_LEAF, ent->tlbi.leaf);
834 cmd[1] |= ent->tlbi.addr & CMDQ_TLBI_1_VA_MASK;
835 break;
836 case CMDQ_OP_TLBI_S2_IPA:
837 cmd[0] |= FIELD_PREP(CMDQ_TLBI_0_VMID, ent->tlbi.vmid);
838 cmd[1] |= FIELD_PREP(CMDQ_TLBI_1_LEAF, ent->tlbi.leaf);
839 cmd[1] |= ent->tlbi.addr & CMDQ_TLBI_1_IPA_MASK;
840 break;
841 case CMDQ_OP_TLBI_NH_ASID:
842 cmd[0] |= FIELD_PREP(CMDQ_TLBI_0_ASID, ent->tlbi.asid);
843
844 case CMDQ_OP_TLBI_S12_VMALL:
845 cmd[0] |= FIELD_PREP(CMDQ_TLBI_0_VMID, ent->tlbi.vmid);
846 break;
847 case CMDQ_OP_ATC_INV:
848 cmd[0] |= FIELD_PREP(CMDQ_0_SSV, ent->substream_valid);
849 cmd[0] |= FIELD_PREP(CMDQ_ATC_0_GLOBAL, ent->atc.global);
850 cmd[0] |= FIELD_PREP(CMDQ_ATC_0_SSID, ent->atc.ssid);
851 cmd[0] |= FIELD_PREP(CMDQ_ATC_0_SID, ent->atc.sid);
852 cmd[1] |= FIELD_PREP(CMDQ_ATC_1_SIZE, ent->atc.size);
853 cmd[1] |= ent->atc.addr & CMDQ_ATC_1_ADDR_MASK;
854 break;
855 case CMDQ_OP_PRI_RESP:
856 cmd[0] |= FIELD_PREP(CMDQ_0_SSV, ent->substream_valid);
857 cmd[0] |= FIELD_PREP(CMDQ_PRI_0_SSID, ent->pri.ssid);
858 cmd[0] |= FIELD_PREP(CMDQ_PRI_0_SID, ent->pri.sid);
859 cmd[1] |= FIELD_PREP(CMDQ_PRI_1_GRPID, ent->pri.grpid);
860 switch (ent->pri.resp) {
861 case PRI_RESP_DENY:
862 case PRI_RESP_FAIL:
863 case PRI_RESP_SUCC:
864 break;
865 default:
866 return -EINVAL;
867 }
868 cmd[1] |= FIELD_PREP(CMDQ_PRI_1_RESP, ent->pri.resp);
869 break;
870 case CMDQ_OP_CMD_SYNC:
871 if (ent->sync.msiaddr)
872 cmd[0] |= FIELD_PREP(CMDQ_SYNC_0_CS, CMDQ_SYNC_0_CS_IRQ);
873 else
874 cmd[0] |= FIELD_PREP(CMDQ_SYNC_0_CS, CMDQ_SYNC_0_CS_SEV);
875 cmd[0] |= FIELD_PREP(CMDQ_SYNC_0_MSH, ARM_SMMU_SH_ISH);
876 cmd[0] |= FIELD_PREP(CMDQ_SYNC_0_MSIATTR, ARM_SMMU_MEMATTR_OIWB);
877
878
879
880
881
882 cmd[0] |= FIELD_PREP(CMDQ_SYNC_0_MSIDATA,
883 cpu_to_le32(ent->sync.msidata));
884 cmd[1] |= ent->sync.msiaddr & CMDQ_SYNC_1_MSIADDR_MASK;
885 break;
886 default:
887 return -ENOENT;
888 }
889
890 return 0;
891}
892
893static void arm_smmu_cmdq_skip_err(struct arm_smmu_device *smmu)
894{
895 static const char *cerror_str[] = {
896 [CMDQ_ERR_CERROR_NONE_IDX] = "No error",
897 [CMDQ_ERR_CERROR_ILL_IDX] = "Illegal command",
898 [CMDQ_ERR_CERROR_ABT_IDX] = "Abort on command fetch",
899 [CMDQ_ERR_CERROR_ATC_INV_IDX] = "ATC invalidate timeout",
900 };
901
902 int i;
903 u64 cmd[CMDQ_ENT_DWORDS];
904 struct arm_smmu_queue *q = &smmu->cmdq.q;
905 u32 cons = readl_relaxed(q->cons_reg);
906 u32 idx = FIELD_GET(CMDQ_CONS_ERR, cons);
907 struct arm_smmu_cmdq_ent cmd_sync = {
908 .opcode = CMDQ_OP_CMD_SYNC,
909 };
910
911 dev_err(smmu->dev, "CMDQ error (cons 0x%08x): %s\n", cons,
912 idx < ARRAY_SIZE(cerror_str) ? cerror_str[idx] : "Unknown");
913
914 switch (idx) {
915 case CMDQ_ERR_CERROR_ABT_IDX:
916 dev_err(smmu->dev, "retrying command fetch\n");
917 case CMDQ_ERR_CERROR_NONE_IDX:
918 return;
919 case CMDQ_ERR_CERROR_ATC_INV_IDX:
920
921
922
923
924
925
926 return;
927 case CMDQ_ERR_CERROR_ILL_IDX:
928
929 default:
930 break;
931 }
932
933
934
935
936
937 queue_read(cmd, Q_ENT(q, cons), q->ent_dwords);
938 dev_err(smmu->dev, "skipping command in error state:\n");
939 for (i = 0; i < ARRAY_SIZE(cmd); ++i)
940 dev_err(smmu->dev, "\t0x%016llx\n", (unsigned long long)cmd[i]);
941
942
943 if (arm_smmu_cmdq_build_cmd(cmd, &cmd_sync)) {
944 dev_err(smmu->dev, "failed to convert to CMD_SYNC\n");
945 return;
946 }
947
948 queue_write(Q_ENT(q, cons), cmd, q->ent_dwords);
949}
950
951static void arm_smmu_cmdq_insert_cmd(struct arm_smmu_device *smmu, u64 *cmd)
952{
953 struct arm_smmu_queue *q = &smmu->cmdq.q;
954 bool wfe = !!(smmu->features & ARM_SMMU_FEAT_SEV);
955
956 smmu->prev_cmd_opcode = FIELD_GET(CMDQ_0_OP, cmd[0]);
957
958 while (queue_insert_raw(q, cmd) == -ENOSPC) {
959 if (queue_poll_cons(q, false, wfe))
960 dev_err_ratelimited(smmu->dev, "CMDQ timeout\n");
961 }
962}
963
964static void arm_smmu_cmdq_issue_cmd(struct arm_smmu_device *smmu,
965 struct arm_smmu_cmdq_ent *ent)
966{
967 u64 cmd[CMDQ_ENT_DWORDS];
968 unsigned long flags;
969
970 if (arm_smmu_cmdq_build_cmd(cmd, ent)) {
971 dev_warn(smmu->dev, "ignoring unknown CMDQ opcode 0x%x\n",
972 ent->opcode);
973 return;
974 }
975
976 spin_lock_irqsave(&smmu->cmdq.lock, flags);
977 arm_smmu_cmdq_insert_cmd(smmu, cmd);
978 spin_unlock_irqrestore(&smmu->cmdq.lock, flags);
979}
980
981
982
983
984
985static int __arm_smmu_sync_poll_msi(struct arm_smmu_device *smmu, u32 sync_idx)
986{
987 ktime_t timeout;
988 u32 val;
989
990 timeout = ktime_add_us(ktime_get(), ARM_SMMU_CMDQ_SYNC_TIMEOUT_US);
991 val = smp_cond_load_acquire(&smmu->sync_count,
992 (int)(VAL - sync_idx) >= 0 ||
993 !ktime_before(ktime_get(), timeout));
994
995 return (int)(val - sync_idx) < 0 ? -ETIMEDOUT : 0;
996}
997
998static int __arm_smmu_cmdq_issue_sync_msi(struct arm_smmu_device *smmu)
999{
1000 u64 cmd[CMDQ_ENT_DWORDS];
1001 unsigned long flags;
1002 struct arm_smmu_cmdq_ent ent = {
1003 .opcode = CMDQ_OP_CMD_SYNC,
1004 .sync = {
1005 .msiaddr = virt_to_phys(&smmu->sync_count),
1006 },
1007 };
1008
1009 spin_lock_irqsave(&smmu->cmdq.lock, flags);
1010
1011
1012 if (smmu->prev_cmd_opcode == CMDQ_OP_CMD_SYNC) {
1013 ent.sync.msidata = smmu->sync_nr;
1014 } else {
1015 ent.sync.msidata = ++smmu->sync_nr;
1016 arm_smmu_cmdq_build_cmd(cmd, &ent);
1017 arm_smmu_cmdq_insert_cmd(smmu, cmd);
1018 }
1019
1020 spin_unlock_irqrestore(&smmu->cmdq.lock, flags);
1021
1022 return __arm_smmu_sync_poll_msi(smmu, ent.sync.msidata);
1023}
1024
1025static int __arm_smmu_cmdq_issue_sync(struct arm_smmu_device *smmu)
1026{
1027 u64 cmd[CMDQ_ENT_DWORDS];
1028 unsigned long flags;
1029 bool wfe = !!(smmu->features & ARM_SMMU_FEAT_SEV);
1030 struct arm_smmu_cmdq_ent ent = { .opcode = CMDQ_OP_CMD_SYNC };
1031 int ret;
1032
1033 arm_smmu_cmdq_build_cmd(cmd, &ent);
1034
1035 spin_lock_irqsave(&smmu->cmdq.lock, flags);
1036 arm_smmu_cmdq_insert_cmd(smmu, cmd);
1037 ret = queue_poll_cons(&smmu->cmdq.q, true, wfe);
1038 spin_unlock_irqrestore(&smmu->cmdq.lock, flags);
1039
1040 return ret;
1041}
1042
1043static int arm_smmu_cmdq_issue_sync(struct arm_smmu_device *smmu)
1044{
1045 int ret;
1046 bool msi = (smmu->features & ARM_SMMU_FEAT_MSI) &&
1047 (smmu->features & ARM_SMMU_FEAT_COHERENCY);
1048
1049 ret = msi ? __arm_smmu_cmdq_issue_sync_msi(smmu)
1050 : __arm_smmu_cmdq_issue_sync(smmu);
1051 if (ret)
1052 dev_err_ratelimited(smmu->dev, "CMD_SYNC timeout\n");
1053 return ret;
1054}
1055
1056
1057static u64 arm_smmu_cpu_tcr_to_cd(u64 tcr)
1058{
1059 u64 val = 0;
1060
1061
1062 val |= ARM_SMMU_TCR2CD(tcr, T0SZ);
1063 val |= ARM_SMMU_TCR2CD(tcr, TG0);
1064 val |= ARM_SMMU_TCR2CD(tcr, IRGN0);
1065 val |= ARM_SMMU_TCR2CD(tcr, ORGN0);
1066 val |= ARM_SMMU_TCR2CD(tcr, SH0);
1067 val |= ARM_SMMU_TCR2CD(tcr, EPD0);
1068 val |= ARM_SMMU_TCR2CD(tcr, EPD1);
1069 val |= ARM_SMMU_TCR2CD(tcr, IPS);
1070
1071 return val;
1072}
1073
1074static void arm_smmu_write_ctx_desc(struct arm_smmu_device *smmu,
1075 struct arm_smmu_s1_cfg *cfg)
1076{
1077 u64 val;
1078
1079
1080
1081
1082
1083 val = arm_smmu_cpu_tcr_to_cd(cfg->cd.tcr) |
1084#ifdef __BIG_ENDIAN
1085 CTXDESC_CD_0_ENDI |
1086#endif
1087 CTXDESC_CD_0_R | CTXDESC_CD_0_A | CTXDESC_CD_0_ASET |
1088 CTXDESC_CD_0_AA64 | FIELD_PREP(CTXDESC_CD_0_ASID, cfg->cd.asid) |
1089 CTXDESC_CD_0_V;
1090
1091
1092 if (smmu->features & ARM_SMMU_FEAT_STALL_FORCE)
1093 val |= CTXDESC_CD_0_S;
1094
1095 cfg->cdptr[0] = cpu_to_le64(val);
1096
1097 val = cfg->cd.ttbr & CTXDESC_CD_1_TTB0_MASK;
1098 cfg->cdptr[1] = cpu_to_le64(val);
1099
1100 cfg->cdptr[3] = cpu_to_le64(cfg->cd.mair);
1101}
1102
1103
1104static void
1105arm_smmu_write_strtab_l1_desc(__le64 *dst, struct arm_smmu_strtab_l1_desc *desc)
1106{
1107 u64 val = 0;
1108
1109 val |= FIELD_PREP(STRTAB_L1_DESC_SPAN, desc->span);
1110 val |= desc->l2ptr_dma & STRTAB_L1_DESC_L2PTR_MASK;
1111
1112 *dst = cpu_to_le64(val);
1113}
1114
1115static void arm_smmu_sync_ste_for_sid(struct arm_smmu_device *smmu, u32 sid)
1116{
1117 struct arm_smmu_cmdq_ent cmd = {
1118 .opcode = CMDQ_OP_CFGI_STE,
1119 .cfgi = {
1120 .sid = sid,
1121 .leaf = true,
1122 },
1123 };
1124
1125 arm_smmu_cmdq_issue_cmd(smmu, &cmd);
1126 arm_smmu_cmdq_issue_sync(smmu);
1127}
1128
1129static void arm_smmu_write_strtab_ent(struct arm_smmu_master *master, u32 sid,
1130 __le64 *dst)
1131{
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148 u64 val = le64_to_cpu(dst[0]);
1149 bool ste_live = false;
1150 struct arm_smmu_device *smmu = NULL;
1151 struct arm_smmu_s1_cfg *s1_cfg = NULL;
1152 struct arm_smmu_s2_cfg *s2_cfg = NULL;
1153 struct arm_smmu_domain *smmu_domain = NULL;
1154 struct arm_smmu_cmdq_ent prefetch_cmd = {
1155 .opcode = CMDQ_OP_PREFETCH_CFG,
1156 .prefetch = {
1157 .sid = sid,
1158 },
1159 };
1160
1161 if (master) {
1162 smmu_domain = master->domain;
1163 smmu = master->smmu;
1164 }
1165
1166 if (smmu_domain) {
1167 switch (smmu_domain->stage) {
1168 case ARM_SMMU_DOMAIN_S1:
1169 s1_cfg = &smmu_domain->s1_cfg;
1170 break;
1171 case ARM_SMMU_DOMAIN_S2:
1172 case ARM_SMMU_DOMAIN_NESTED:
1173 s2_cfg = &smmu_domain->s2_cfg;
1174 break;
1175 default:
1176 break;
1177 }
1178 }
1179
1180 if (val & STRTAB_STE_0_V) {
1181 switch (FIELD_GET(STRTAB_STE_0_CFG, val)) {
1182 case STRTAB_STE_0_CFG_BYPASS:
1183 break;
1184 case STRTAB_STE_0_CFG_S1_TRANS:
1185 case STRTAB_STE_0_CFG_S2_TRANS:
1186 ste_live = true;
1187 break;
1188 case STRTAB_STE_0_CFG_ABORT:
1189 BUG_ON(!disable_bypass);
1190 break;
1191 default:
1192 BUG();
1193 }
1194 }
1195
1196
1197 val = STRTAB_STE_0_V;
1198
1199
1200 if (!smmu_domain || !(s1_cfg || s2_cfg)) {
1201 if (!smmu_domain && disable_bypass)
1202 val |= FIELD_PREP(STRTAB_STE_0_CFG, STRTAB_STE_0_CFG_ABORT);
1203 else
1204 val |= FIELD_PREP(STRTAB_STE_0_CFG, STRTAB_STE_0_CFG_BYPASS);
1205
1206 dst[0] = cpu_to_le64(val);
1207 dst[1] = cpu_to_le64(FIELD_PREP(STRTAB_STE_1_SHCFG,
1208 STRTAB_STE_1_SHCFG_INCOMING));
1209 dst[2] = 0;
1210
1211
1212
1213
1214 if (smmu)
1215 arm_smmu_sync_ste_for_sid(smmu, sid);
1216 return;
1217 }
1218
1219 if (s1_cfg) {
1220 BUG_ON(ste_live);
1221 dst[1] = cpu_to_le64(
1222 FIELD_PREP(STRTAB_STE_1_S1CIR, STRTAB_STE_1_S1C_CACHE_WBRA) |
1223 FIELD_PREP(STRTAB_STE_1_S1COR, STRTAB_STE_1_S1C_CACHE_WBRA) |
1224 FIELD_PREP(STRTAB_STE_1_S1CSH, ARM_SMMU_SH_ISH) |
1225 FIELD_PREP(STRTAB_STE_1_STRW, STRTAB_STE_1_STRW_NSEL1));
1226
1227 if (smmu->features & ARM_SMMU_FEAT_STALLS &&
1228 !(smmu->features & ARM_SMMU_FEAT_STALL_FORCE))
1229 dst[1] |= cpu_to_le64(STRTAB_STE_1_S1STALLD);
1230
1231 val |= (s1_cfg->cdptr_dma & STRTAB_STE_0_S1CTXPTR_MASK) |
1232 FIELD_PREP(STRTAB_STE_0_CFG, STRTAB_STE_0_CFG_S1_TRANS);
1233 }
1234
1235 if (s2_cfg) {
1236 BUG_ON(ste_live);
1237 dst[2] = cpu_to_le64(
1238 FIELD_PREP(STRTAB_STE_2_S2VMID, s2_cfg->vmid) |
1239 FIELD_PREP(STRTAB_STE_2_VTCR, s2_cfg->vtcr) |
1240#ifdef __BIG_ENDIAN
1241 STRTAB_STE_2_S2ENDI |
1242#endif
1243 STRTAB_STE_2_S2PTW | STRTAB_STE_2_S2AA64 |
1244 STRTAB_STE_2_S2R);
1245
1246 dst[3] = cpu_to_le64(s2_cfg->vttbr & STRTAB_STE_3_S2TTB_MASK);
1247
1248 val |= FIELD_PREP(STRTAB_STE_0_CFG, STRTAB_STE_0_CFG_S2_TRANS);
1249 }
1250
1251 if (master->ats_enabled)
1252 dst[1] |= cpu_to_le64(FIELD_PREP(STRTAB_STE_1_EATS,
1253 STRTAB_STE_1_EATS_TRANS));
1254
1255 arm_smmu_sync_ste_for_sid(smmu, sid);
1256 dst[0] = cpu_to_le64(val);
1257 arm_smmu_sync_ste_for_sid(smmu, sid);
1258
1259
1260 if (!(smmu->options & ARM_SMMU_OPT_SKIP_PREFETCH))
1261 arm_smmu_cmdq_issue_cmd(smmu, &prefetch_cmd);
1262}
1263
1264static void arm_smmu_init_bypass_stes(u64 *strtab, unsigned int nent)
1265{
1266 unsigned int i;
1267
1268 for (i = 0; i < nent; ++i) {
1269 arm_smmu_write_strtab_ent(NULL, -1, strtab);
1270 strtab += STRTAB_STE_DWORDS;
1271 }
1272}
1273
1274static int arm_smmu_init_l2_strtab(struct arm_smmu_device *smmu, u32 sid)
1275{
1276 size_t size;
1277 void *strtab;
1278 struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg;
1279 struct arm_smmu_strtab_l1_desc *desc = &cfg->l1_desc[sid >> STRTAB_SPLIT];
1280
1281 if (desc->l2ptr)
1282 return 0;
1283
1284 size = 1 << (STRTAB_SPLIT + ilog2(STRTAB_STE_DWORDS) + 3);
1285 strtab = &cfg->strtab[(sid >> STRTAB_SPLIT) * STRTAB_L1_DESC_DWORDS];
1286
1287 desc->span = STRTAB_SPLIT + 1;
1288 desc->l2ptr = dmam_alloc_coherent(smmu->dev, size, &desc->l2ptr_dma,
1289 GFP_KERNEL | __GFP_ZERO);
1290 if (!desc->l2ptr) {
1291 dev_err(smmu->dev,
1292 "failed to allocate l2 stream table for SID %u\n",
1293 sid);
1294 return -ENOMEM;
1295 }
1296
1297 arm_smmu_init_bypass_stes(desc->l2ptr, 1 << STRTAB_SPLIT);
1298 arm_smmu_write_strtab_l1_desc(strtab, desc);
1299 return 0;
1300}
1301
1302
1303static irqreturn_t arm_smmu_evtq_thread(int irq, void *dev)
1304{
1305 int i;
1306 struct arm_smmu_device *smmu = dev;
1307 struct arm_smmu_queue *q = &smmu->evtq.q;
1308 u64 evt[EVTQ_ENT_DWORDS];
1309
1310 do {
1311 while (!queue_remove_raw(q, evt)) {
1312 u8 id = FIELD_GET(EVTQ_0_ID, evt[0]);
1313
1314 dev_info(smmu->dev, "event 0x%02x received:\n", id);
1315 for (i = 0; i < ARRAY_SIZE(evt); ++i)
1316 dev_info(smmu->dev, "\t0x%016llx\n",
1317 (unsigned long long)evt[i]);
1318
1319 }
1320
1321
1322
1323
1324
1325 if (queue_sync_prod(q) == -EOVERFLOW)
1326 dev_err(smmu->dev, "EVTQ overflow detected -- events lost\n");
1327 } while (!queue_empty(q));
1328
1329
1330 q->cons = Q_OVF(q, q->prod) | Q_WRP(q, q->cons) | Q_IDX(q, q->cons);
1331 return IRQ_HANDLED;
1332}
1333
1334static void arm_smmu_handle_ppr(struct arm_smmu_device *smmu, u64 *evt)
1335{
1336 u32 sid, ssid;
1337 u16 grpid;
1338 bool ssv, last;
1339
1340 sid = FIELD_GET(PRIQ_0_SID, evt[0]);
1341 ssv = FIELD_GET(PRIQ_0_SSID_V, evt[0]);
1342 ssid = ssv ? FIELD_GET(PRIQ_0_SSID, evt[0]) : 0;
1343 last = FIELD_GET(PRIQ_0_PRG_LAST, evt[0]);
1344 grpid = FIELD_GET(PRIQ_1_PRG_IDX, evt[1]);
1345
1346 dev_info(smmu->dev, "unexpected PRI request received:\n");
1347 dev_info(smmu->dev,
1348 "\tsid 0x%08x.0x%05x: [%u%s] %sprivileged %s%s%s access at iova 0x%016llx\n",
1349 sid, ssid, grpid, last ? "L" : "",
1350 evt[0] & PRIQ_0_PERM_PRIV ? "" : "un",
1351 evt[0] & PRIQ_0_PERM_READ ? "R" : "",
1352 evt[0] & PRIQ_0_PERM_WRITE ? "W" : "",
1353 evt[0] & PRIQ_0_PERM_EXEC ? "X" : "",
1354 evt[1] & PRIQ_1_ADDR_MASK);
1355
1356 if (last) {
1357 struct arm_smmu_cmdq_ent cmd = {
1358 .opcode = CMDQ_OP_PRI_RESP,
1359 .substream_valid = ssv,
1360 .pri = {
1361 .sid = sid,
1362 .ssid = ssid,
1363 .grpid = grpid,
1364 .resp = PRI_RESP_DENY,
1365 },
1366 };
1367
1368 arm_smmu_cmdq_issue_cmd(smmu, &cmd);
1369 }
1370}
1371
1372static irqreturn_t arm_smmu_priq_thread(int irq, void *dev)
1373{
1374 struct arm_smmu_device *smmu = dev;
1375 struct arm_smmu_queue *q = &smmu->priq.q;
1376 u64 evt[PRIQ_ENT_DWORDS];
1377
1378 do {
1379 while (!queue_remove_raw(q, evt))
1380 arm_smmu_handle_ppr(smmu, evt);
1381
1382 if (queue_sync_prod(q) == -EOVERFLOW)
1383 dev_err(smmu->dev, "PRIQ overflow detected -- requests lost\n");
1384 } while (!queue_empty(q));
1385
1386
1387 q->cons = Q_OVF(q, q->prod) | Q_WRP(q, q->cons) | Q_IDX(q, q->cons);
1388 writel(q->cons, q->cons_reg);
1389 return IRQ_HANDLED;
1390}
1391
1392static int arm_smmu_device_disable(struct arm_smmu_device *smmu);
1393
1394static irqreturn_t arm_smmu_gerror_handler(int irq, void *dev)
1395{
1396 u32 gerror, gerrorn, active;
1397 struct arm_smmu_device *smmu = dev;
1398
1399 gerror = readl_relaxed(smmu->base + ARM_SMMU_GERROR);
1400 gerrorn = readl_relaxed(smmu->base + ARM_SMMU_GERRORN);
1401
1402 active = gerror ^ gerrorn;
1403 if (!(active & GERROR_ERR_MASK))
1404 return IRQ_NONE;
1405
1406 dev_warn(smmu->dev,
1407 "unexpected global error reported (0x%08x), this could be serious\n",
1408 active);
1409
1410 if (active & GERROR_SFM_ERR) {
1411 dev_err(smmu->dev, "device has entered Service Failure Mode!\n");
1412 arm_smmu_device_disable(smmu);
1413 }
1414
1415 if (active & GERROR_MSI_GERROR_ABT_ERR)
1416 dev_warn(smmu->dev, "GERROR MSI write aborted\n");
1417
1418 if (active & GERROR_MSI_PRIQ_ABT_ERR)
1419 dev_warn(smmu->dev, "PRIQ MSI write aborted\n");
1420
1421 if (active & GERROR_MSI_EVTQ_ABT_ERR)
1422 dev_warn(smmu->dev, "EVTQ MSI write aborted\n");
1423
1424 if (active & GERROR_MSI_CMDQ_ABT_ERR)
1425 dev_warn(smmu->dev, "CMDQ MSI write aborted\n");
1426
1427 if (active & GERROR_PRIQ_ABT_ERR)
1428 dev_err(smmu->dev, "PRIQ write aborted -- events may have been lost\n");
1429
1430 if (active & GERROR_EVTQ_ABT_ERR)
1431 dev_err(smmu->dev, "EVTQ write aborted -- events may have been lost\n");
1432
1433 if (active & GERROR_CMDQ_ERR)
1434 arm_smmu_cmdq_skip_err(smmu);
1435
1436 writel(gerror, smmu->base + ARM_SMMU_GERRORN);
1437 return IRQ_HANDLED;
1438}
1439
1440static irqreturn_t arm_smmu_combined_irq_thread(int irq, void *dev)
1441{
1442 struct arm_smmu_device *smmu = dev;
1443
1444 arm_smmu_evtq_thread(irq, dev);
1445 if (smmu->features & ARM_SMMU_FEAT_PRI)
1446 arm_smmu_priq_thread(irq, dev);
1447
1448 return IRQ_HANDLED;
1449}
1450
1451static irqreturn_t arm_smmu_combined_irq_handler(int irq, void *dev)
1452{
1453 arm_smmu_gerror_handler(irq, dev);
1454 return IRQ_WAKE_THREAD;
1455}
1456
1457static void
1458arm_smmu_atc_inv_to_cmd(int ssid, unsigned long iova, size_t size,
1459 struct arm_smmu_cmdq_ent *cmd)
1460{
1461 size_t log2_span;
1462 size_t span_mask;
1463
1464 size_t inval_grain_shift = 12;
1465 unsigned long page_start, page_end;
1466
1467 *cmd = (struct arm_smmu_cmdq_ent) {
1468 .opcode = CMDQ_OP_ATC_INV,
1469 .substream_valid = !!ssid,
1470 .atc.ssid = ssid,
1471 };
1472
1473 if (!size) {
1474 cmd->atc.size = ATC_INV_SIZE_ALL;
1475 return;
1476 }
1477
1478 page_start = iova >> inval_grain_shift;
1479 page_end = (iova + size - 1) >> inval_grain_shift;
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501 log2_span = fls_long(page_start ^ page_end);
1502 span_mask = (1ULL << log2_span) - 1;
1503
1504 page_start &= ~span_mask;
1505
1506 cmd->atc.addr = page_start << inval_grain_shift;
1507 cmd->atc.size = log2_span;
1508}
1509
1510static int arm_smmu_atc_inv_master(struct arm_smmu_master *master,
1511 struct arm_smmu_cmdq_ent *cmd)
1512{
1513 int i;
1514
1515 if (!master->ats_enabled)
1516 return 0;
1517
1518 for (i = 0; i < master->num_sids; i++) {
1519 cmd->atc.sid = master->sids[i];
1520 arm_smmu_cmdq_issue_cmd(master->smmu, cmd);
1521 }
1522
1523 return arm_smmu_cmdq_issue_sync(master->smmu);
1524}
1525
1526static int arm_smmu_atc_inv_domain(struct arm_smmu_domain *smmu_domain,
1527 int ssid, unsigned long iova, size_t size)
1528{
1529 int ret = 0;
1530 unsigned long flags;
1531 struct arm_smmu_cmdq_ent cmd;
1532 struct arm_smmu_master *master;
1533
1534 if (!(smmu_domain->smmu->features & ARM_SMMU_FEAT_ATS))
1535 return 0;
1536
1537 arm_smmu_atc_inv_to_cmd(ssid, iova, size, &cmd);
1538
1539 spin_lock_irqsave(&smmu_domain->devices_lock, flags);
1540 list_for_each_entry(master, &smmu_domain->devices, domain_head)
1541 ret |= arm_smmu_atc_inv_master(master, &cmd);
1542 spin_unlock_irqrestore(&smmu_domain->devices_lock, flags);
1543
1544 return ret ? -ETIMEDOUT : 0;
1545}
1546
1547
1548static void arm_smmu_tlb_sync(void *cookie)
1549{
1550 struct arm_smmu_domain *smmu_domain = cookie;
1551
1552 arm_smmu_cmdq_issue_sync(smmu_domain->smmu);
1553}
1554
1555static void arm_smmu_tlb_inv_context(void *cookie)
1556{
1557 struct arm_smmu_domain *smmu_domain = cookie;
1558 struct arm_smmu_device *smmu = smmu_domain->smmu;
1559 struct arm_smmu_cmdq_ent cmd;
1560
1561 if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
1562 cmd.opcode = CMDQ_OP_TLBI_NH_ASID;
1563 cmd.tlbi.asid = smmu_domain->s1_cfg.cd.asid;
1564 cmd.tlbi.vmid = 0;
1565 } else {
1566 cmd.opcode = CMDQ_OP_TLBI_S12_VMALL;
1567 cmd.tlbi.vmid = smmu_domain->s2_cfg.vmid;
1568 }
1569
1570
1571
1572
1573
1574
1575
1576 arm_smmu_cmdq_issue_cmd(smmu, &cmd);
1577 arm_smmu_cmdq_issue_sync(smmu);
1578}
1579
1580static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size,
1581 size_t granule, bool leaf, void *cookie)
1582{
1583 struct arm_smmu_domain *smmu_domain = cookie;
1584 struct arm_smmu_device *smmu = smmu_domain->smmu;
1585 struct arm_smmu_cmdq_ent cmd = {
1586 .tlbi = {
1587 .leaf = leaf,
1588 .addr = iova,
1589 },
1590 };
1591
1592 if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
1593 cmd.opcode = CMDQ_OP_TLBI_NH_VA;
1594 cmd.tlbi.asid = smmu_domain->s1_cfg.cd.asid;
1595 } else {
1596 cmd.opcode = CMDQ_OP_TLBI_S2_IPA;
1597 cmd.tlbi.vmid = smmu_domain->s2_cfg.vmid;
1598 }
1599
1600 do {
1601 arm_smmu_cmdq_issue_cmd(smmu, &cmd);
1602 cmd.tlbi.addr += granule;
1603 } while (size -= granule);
1604}
1605
1606static const struct iommu_gather_ops arm_smmu_gather_ops = {
1607 .tlb_flush_all = arm_smmu_tlb_inv_context,
1608 .tlb_add_flush = arm_smmu_tlb_inv_range_nosync,
1609 .tlb_sync = arm_smmu_tlb_sync,
1610};
1611
1612
1613static bool arm_smmu_capable(enum iommu_cap cap)
1614{
1615 switch (cap) {
1616 case IOMMU_CAP_CACHE_COHERENCY:
1617 return true;
1618 case IOMMU_CAP_NOEXEC:
1619 return true;
1620 default:
1621 return false;
1622 }
1623}
1624
1625static struct iommu_domain *arm_smmu_domain_alloc(unsigned type)
1626{
1627 struct arm_smmu_domain *smmu_domain;
1628
1629 if (type != IOMMU_DOMAIN_UNMANAGED &&
1630 type != IOMMU_DOMAIN_DMA &&
1631 type != IOMMU_DOMAIN_IDENTITY)
1632 return NULL;
1633
1634
1635
1636
1637
1638
1639 smmu_domain = kzalloc(sizeof(*smmu_domain), GFP_KERNEL);
1640 if (!smmu_domain)
1641 return NULL;
1642
1643 if (type == IOMMU_DOMAIN_DMA &&
1644 iommu_get_dma_cookie(&smmu_domain->domain)) {
1645 kfree(smmu_domain);
1646 return NULL;
1647 }
1648
1649 mutex_init(&smmu_domain->init_mutex);
1650 INIT_LIST_HEAD(&smmu_domain->devices);
1651 spin_lock_init(&smmu_domain->devices_lock);
1652
1653 return &smmu_domain->domain;
1654}
1655
1656static int arm_smmu_bitmap_alloc(unsigned long *map, int span)
1657{
1658 int idx, size = 1 << span;
1659
1660 do {
1661 idx = find_first_zero_bit(map, size);
1662 if (idx == size)
1663 return -ENOSPC;
1664 } while (test_and_set_bit(idx, map));
1665
1666 return idx;
1667}
1668
1669static void arm_smmu_bitmap_free(unsigned long *map, int idx)
1670{
1671 clear_bit(idx, map);
1672}
1673
1674static void arm_smmu_domain_free(struct iommu_domain *domain)
1675{
1676 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1677 struct arm_smmu_device *smmu = smmu_domain->smmu;
1678
1679 iommu_put_dma_cookie(domain);
1680 free_io_pgtable_ops(smmu_domain->pgtbl_ops);
1681
1682
1683 if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
1684 struct arm_smmu_s1_cfg *cfg = &smmu_domain->s1_cfg;
1685
1686 if (cfg->cdptr) {
1687 dmam_free_coherent(smmu_domain->smmu->dev,
1688 CTXDESC_CD_DWORDS << 3,
1689 cfg->cdptr,
1690 cfg->cdptr_dma);
1691
1692 arm_smmu_bitmap_free(smmu->asid_map, cfg->cd.asid);
1693 }
1694 } else {
1695 struct arm_smmu_s2_cfg *cfg = &smmu_domain->s2_cfg;
1696 if (cfg->vmid)
1697 arm_smmu_bitmap_free(smmu->vmid_map, cfg->vmid);
1698 }
1699
1700 kfree(smmu_domain);
1701}
1702
1703static int arm_smmu_domain_finalise_s1(struct arm_smmu_domain *smmu_domain,
1704 struct io_pgtable_cfg *pgtbl_cfg)
1705{
1706 int ret;
1707 int asid;
1708 struct arm_smmu_device *smmu = smmu_domain->smmu;
1709 struct arm_smmu_s1_cfg *cfg = &smmu_domain->s1_cfg;
1710
1711 asid = arm_smmu_bitmap_alloc(smmu->asid_map, smmu->asid_bits);
1712 if (asid < 0)
1713 return asid;
1714
1715 cfg->cdptr = dmam_alloc_coherent(smmu->dev, CTXDESC_CD_DWORDS << 3,
1716 &cfg->cdptr_dma,
1717 GFP_KERNEL | __GFP_ZERO);
1718 if (!cfg->cdptr) {
1719 dev_warn(smmu->dev, "failed to allocate context descriptor\n");
1720 ret = -ENOMEM;
1721 goto out_free_asid;
1722 }
1723
1724 cfg->cd.asid = (u16)asid;
1725 cfg->cd.ttbr = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[0];
1726 cfg->cd.tcr = pgtbl_cfg->arm_lpae_s1_cfg.tcr;
1727 cfg->cd.mair = pgtbl_cfg->arm_lpae_s1_cfg.mair[0];
1728 return 0;
1729
1730out_free_asid:
1731 arm_smmu_bitmap_free(smmu->asid_map, asid);
1732 return ret;
1733}
1734
1735static int arm_smmu_domain_finalise_s2(struct arm_smmu_domain *smmu_domain,
1736 struct io_pgtable_cfg *pgtbl_cfg)
1737{
1738 int vmid;
1739 struct arm_smmu_device *smmu = smmu_domain->smmu;
1740 struct arm_smmu_s2_cfg *cfg = &smmu_domain->s2_cfg;
1741
1742 vmid = arm_smmu_bitmap_alloc(smmu->vmid_map, smmu->vmid_bits);
1743 if (vmid < 0)
1744 return vmid;
1745
1746 cfg->vmid = (u16)vmid;
1747 cfg->vttbr = pgtbl_cfg->arm_lpae_s2_cfg.vttbr;
1748 cfg->vtcr = pgtbl_cfg->arm_lpae_s2_cfg.vtcr;
1749 return 0;
1750}
1751
1752static int arm_smmu_domain_finalise(struct iommu_domain *domain)
1753{
1754 int ret;
1755 unsigned long ias, oas;
1756 enum io_pgtable_fmt fmt;
1757 struct io_pgtable_cfg pgtbl_cfg;
1758 struct io_pgtable_ops *pgtbl_ops;
1759 int (*finalise_stage_fn)(struct arm_smmu_domain *,
1760 struct io_pgtable_cfg *);
1761 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1762 struct arm_smmu_device *smmu = smmu_domain->smmu;
1763
1764 if (domain->type == IOMMU_DOMAIN_IDENTITY) {
1765 smmu_domain->stage = ARM_SMMU_DOMAIN_BYPASS;
1766 return 0;
1767 }
1768
1769
1770 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S1))
1771 smmu_domain->stage = ARM_SMMU_DOMAIN_S2;
1772 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S2))
1773 smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
1774
1775 switch (smmu_domain->stage) {
1776 case ARM_SMMU_DOMAIN_S1:
1777 ias = (smmu->features & ARM_SMMU_FEAT_VAX) ? 52 : 48;
1778 ias = min_t(unsigned long, ias, VA_BITS);
1779 oas = smmu->ias;
1780 fmt = ARM_64_LPAE_S1;
1781 finalise_stage_fn = arm_smmu_domain_finalise_s1;
1782 break;
1783 case ARM_SMMU_DOMAIN_NESTED:
1784 case ARM_SMMU_DOMAIN_S2:
1785 ias = smmu->ias;
1786 oas = smmu->oas;
1787 fmt = ARM_64_LPAE_S2;
1788 finalise_stage_fn = arm_smmu_domain_finalise_s2;
1789 break;
1790 default:
1791 return -EINVAL;
1792 }
1793
1794 pgtbl_cfg = (struct io_pgtable_cfg) {
1795 .pgsize_bitmap = smmu->pgsize_bitmap,
1796 .ias = ias,
1797 .oas = oas,
1798 .coherent_walk = smmu->features & ARM_SMMU_FEAT_COHERENCY,
1799 .tlb = &arm_smmu_gather_ops,
1800 .iommu_dev = smmu->dev,
1801 };
1802
1803 if (smmu_domain->non_strict)
1804 pgtbl_cfg.quirks |= IO_PGTABLE_QUIRK_NON_STRICT;
1805
1806 pgtbl_ops = alloc_io_pgtable_ops(fmt, &pgtbl_cfg, smmu_domain);
1807 if (!pgtbl_ops)
1808 return -ENOMEM;
1809
1810 domain->pgsize_bitmap = pgtbl_cfg.pgsize_bitmap;
1811 domain->geometry.aperture_end = (1UL << pgtbl_cfg.ias) - 1;
1812 domain->geometry.force_aperture = true;
1813
1814 ret = finalise_stage_fn(smmu_domain, &pgtbl_cfg);
1815 if (ret < 0) {
1816 free_io_pgtable_ops(pgtbl_ops);
1817 return ret;
1818 }
1819
1820 smmu_domain->pgtbl_ops = pgtbl_ops;
1821 return 0;
1822}
1823
1824static __le64 *arm_smmu_get_step_for_sid(struct arm_smmu_device *smmu, u32 sid)
1825{
1826 __le64 *step;
1827 struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg;
1828
1829 if (smmu->features & ARM_SMMU_FEAT_2_LVL_STRTAB) {
1830 struct arm_smmu_strtab_l1_desc *l1_desc;
1831 int idx;
1832
1833
1834 idx = (sid >> STRTAB_SPLIT) * STRTAB_L1_DESC_DWORDS;
1835 l1_desc = &cfg->l1_desc[idx];
1836 idx = (sid & ((1 << STRTAB_SPLIT) - 1)) * STRTAB_STE_DWORDS;
1837 step = &l1_desc->l2ptr[idx];
1838 } else {
1839
1840 step = &cfg->strtab[sid * STRTAB_STE_DWORDS];
1841 }
1842
1843 return step;
1844}
1845
1846static void arm_smmu_install_ste_for_dev(struct arm_smmu_master *master)
1847{
1848 int i, j;
1849 struct arm_smmu_device *smmu = master->smmu;
1850
1851 for (i = 0; i < master->num_sids; ++i) {
1852 u32 sid = master->sids[i];
1853 __le64 *step = arm_smmu_get_step_for_sid(smmu, sid);
1854
1855
1856 for (j = 0; j < i; j++)
1857 if (master->sids[j] == sid)
1858 break;
1859 if (j < i)
1860 continue;
1861
1862 arm_smmu_write_strtab_ent(master, sid, step);
1863 }
1864}
1865
1866static int arm_smmu_enable_ats(struct arm_smmu_master *master)
1867{
1868 int ret;
1869 size_t stu;
1870 struct pci_dev *pdev;
1871 struct arm_smmu_device *smmu = master->smmu;
1872 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(master->dev);
1873
1874 if (!(smmu->features & ARM_SMMU_FEAT_ATS) || !dev_is_pci(master->dev) ||
1875 !(fwspec->flags & IOMMU_FWSPEC_PCI_RC_ATS) || pci_ats_disabled())
1876 return -ENXIO;
1877
1878 pdev = to_pci_dev(master->dev);
1879 if (pdev->untrusted)
1880 return -EPERM;
1881
1882
1883 stu = __ffs(smmu->pgsize_bitmap);
1884
1885 ret = pci_enable_ats(pdev, stu);
1886 if (ret)
1887 return ret;
1888
1889 master->ats_enabled = true;
1890 return 0;
1891}
1892
1893static void arm_smmu_disable_ats(struct arm_smmu_master *master)
1894{
1895 struct arm_smmu_cmdq_ent cmd;
1896
1897 if (!master->ats_enabled || !dev_is_pci(master->dev))
1898 return;
1899
1900 arm_smmu_atc_inv_to_cmd(0, 0, 0, &cmd);
1901 arm_smmu_atc_inv_master(master, &cmd);
1902 pci_disable_ats(to_pci_dev(master->dev));
1903 master->ats_enabled = false;
1904}
1905
1906static void arm_smmu_detach_dev(struct arm_smmu_master *master)
1907{
1908 unsigned long flags;
1909 struct arm_smmu_domain *smmu_domain = master->domain;
1910
1911 if (!smmu_domain)
1912 return;
1913
1914 spin_lock_irqsave(&smmu_domain->devices_lock, flags);
1915 list_del(&master->domain_head);
1916 spin_unlock_irqrestore(&smmu_domain->devices_lock, flags);
1917
1918 master->domain = NULL;
1919 arm_smmu_install_ste_for_dev(master);
1920
1921 arm_smmu_disable_ats(master);
1922}
1923
1924static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
1925{
1926 int ret = 0;
1927 unsigned long flags;
1928 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
1929 struct arm_smmu_device *smmu;
1930 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1931 struct arm_smmu_master *master;
1932
1933 if (!fwspec)
1934 return -ENOENT;
1935
1936 master = fwspec->iommu_priv;
1937 smmu = master->smmu;
1938
1939 arm_smmu_detach_dev(master);
1940
1941 mutex_lock(&smmu_domain->init_mutex);
1942
1943 if (!smmu_domain->smmu) {
1944 smmu_domain->smmu = smmu;
1945 ret = arm_smmu_domain_finalise(domain);
1946 if (ret) {
1947 smmu_domain->smmu = NULL;
1948 goto out_unlock;
1949 }
1950 } else if (smmu_domain->smmu != smmu) {
1951 dev_err(dev,
1952 "cannot attach to SMMU %s (upstream of %s)\n",
1953 dev_name(smmu_domain->smmu->dev),
1954 dev_name(smmu->dev));
1955 ret = -ENXIO;
1956 goto out_unlock;
1957 }
1958
1959 master->domain = smmu_domain;
1960
1961 spin_lock_irqsave(&smmu_domain->devices_lock, flags);
1962 list_add(&master->domain_head, &smmu_domain->devices);
1963 spin_unlock_irqrestore(&smmu_domain->devices_lock, flags);
1964
1965 if (smmu_domain->stage != ARM_SMMU_DOMAIN_BYPASS)
1966 arm_smmu_enable_ats(master);
1967
1968 if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1)
1969 arm_smmu_write_ctx_desc(smmu, &smmu_domain->s1_cfg);
1970
1971 arm_smmu_install_ste_for_dev(master);
1972out_unlock:
1973 mutex_unlock(&smmu_domain->init_mutex);
1974 return ret;
1975}
1976
1977static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova,
1978 phys_addr_t paddr, size_t size, int prot)
1979{
1980 struct io_pgtable_ops *ops = to_smmu_domain(domain)->pgtbl_ops;
1981
1982 if (!ops)
1983 return -ENODEV;
1984
1985 return ops->map(ops, iova, paddr, size, prot);
1986}
1987
1988static size_t
1989arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova, size_t size)
1990{
1991 int ret;
1992 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1993 struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops;
1994
1995 if (!ops)
1996 return 0;
1997
1998 ret = ops->unmap(ops, iova, size);
1999 if (ret && arm_smmu_atc_inv_domain(smmu_domain, 0, iova, size))
2000 return 0;
2001
2002 return ret;
2003}
2004
2005static void arm_smmu_flush_iotlb_all(struct iommu_domain *domain)
2006{
2007 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
2008
2009 if (smmu_domain->smmu)
2010 arm_smmu_tlb_inv_context(smmu_domain);
2011}
2012
2013static void arm_smmu_iotlb_sync(struct iommu_domain *domain)
2014{
2015 struct arm_smmu_device *smmu = to_smmu_domain(domain)->smmu;
2016
2017 if (smmu)
2018 arm_smmu_cmdq_issue_sync(smmu);
2019}
2020
2021static phys_addr_t
2022arm_smmu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova)
2023{
2024 struct io_pgtable_ops *ops = to_smmu_domain(domain)->pgtbl_ops;
2025
2026 if (domain->type == IOMMU_DOMAIN_IDENTITY)
2027 return iova;
2028
2029 if (!ops)
2030 return 0;
2031
2032 return ops->iova_to_phys(ops, iova);
2033}
2034
2035static struct platform_driver arm_smmu_driver;
2036
2037static int arm_smmu_match_node(struct device *dev, const void *data)
2038{
2039 return dev->fwnode == data;
2040}
2041
2042static
2043struct arm_smmu_device *arm_smmu_get_by_fwnode(struct fwnode_handle *fwnode)
2044{
2045 struct device *dev = driver_find_device(&arm_smmu_driver.driver, NULL,
2046 fwnode, arm_smmu_match_node);
2047 put_device(dev);
2048 return dev ? dev_get_drvdata(dev) : NULL;
2049}
2050
2051static bool arm_smmu_sid_in_range(struct arm_smmu_device *smmu, u32 sid)
2052{
2053 unsigned long limit = smmu->strtab_cfg.num_l1_ents;
2054
2055 if (smmu->features & ARM_SMMU_FEAT_2_LVL_STRTAB)
2056 limit *= 1UL << STRTAB_SPLIT;
2057
2058 return sid < limit;
2059}
2060
2061static struct iommu_ops arm_smmu_ops;
2062
2063static int arm_smmu_add_device(struct device *dev)
2064{
2065 int i, ret;
2066 struct arm_smmu_device *smmu;
2067 struct arm_smmu_master *master;
2068 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
2069 struct iommu_group *group;
2070
2071 if (!fwspec || fwspec->ops != &arm_smmu_ops)
2072 return -ENODEV;
2073
2074
2075
2076
2077
2078 if (WARN_ON_ONCE(fwspec->iommu_priv)) {
2079 master = fwspec->iommu_priv;
2080 smmu = master->smmu;
2081 } else {
2082 smmu = arm_smmu_get_by_fwnode(fwspec->iommu_fwnode);
2083 if (!smmu)
2084 return -ENODEV;
2085 master = kzalloc(sizeof(*master), GFP_KERNEL);
2086 if (!master)
2087 return -ENOMEM;
2088
2089 master->dev = dev;
2090 master->smmu = smmu;
2091 master->sids = fwspec->ids;
2092 master->num_sids = fwspec->num_ids;
2093 fwspec->iommu_priv = master;
2094 }
2095
2096
2097 for (i = 0; i < master->num_sids; i++) {
2098 u32 sid = master->sids[i];
2099
2100 if (!arm_smmu_sid_in_range(smmu, sid))
2101 return -ERANGE;
2102
2103
2104 if (smmu->features & ARM_SMMU_FEAT_2_LVL_STRTAB) {
2105 ret = arm_smmu_init_l2_strtab(smmu, sid);
2106 if (ret)
2107 return ret;
2108 }
2109 }
2110
2111 group = iommu_group_get_for_dev(dev);
2112 if (!IS_ERR(group)) {
2113 iommu_group_put(group);
2114 iommu_device_link(&smmu->iommu, dev);
2115 }
2116
2117 return PTR_ERR_OR_ZERO(group);
2118}
2119
2120static void arm_smmu_remove_device(struct device *dev)
2121{
2122 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
2123 struct arm_smmu_master *master;
2124 struct arm_smmu_device *smmu;
2125
2126 if (!fwspec || fwspec->ops != &arm_smmu_ops)
2127 return;
2128
2129 master = fwspec->iommu_priv;
2130 smmu = master->smmu;
2131 arm_smmu_detach_dev(master);
2132 iommu_group_remove_device(dev);
2133 iommu_device_unlink(&smmu->iommu, dev);
2134 kfree(master);
2135 iommu_fwspec_free(dev);
2136}
2137
2138static struct iommu_group *arm_smmu_device_group(struct device *dev)
2139{
2140 struct iommu_group *group;
2141
2142
2143
2144
2145
2146
2147 if (dev_is_pci(dev))
2148 group = pci_device_group(dev);
2149 else
2150 group = generic_device_group(dev);
2151
2152 return group;
2153}
2154
2155static int arm_smmu_domain_get_attr(struct iommu_domain *domain,
2156 enum iommu_attr attr, void *data)
2157{
2158 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
2159
2160 switch (domain->type) {
2161 case IOMMU_DOMAIN_UNMANAGED:
2162 switch (attr) {
2163 case DOMAIN_ATTR_NESTING:
2164 *(int *)data = (smmu_domain->stage == ARM_SMMU_DOMAIN_NESTED);
2165 return 0;
2166 default:
2167 return -ENODEV;
2168 }
2169 break;
2170 case IOMMU_DOMAIN_DMA:
2171 switch (attr) {
2172 case DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE:
2173 *(int *)data = smmu_domain->non_strict;
2174 return 0;
2175 default:
2176 return -ENODEV;
2177 }
2178 break;
2179 default:
2180 return -EINVAL;
2181 }
2182}
2183
2184static int arm_smmu_domain_set_attr(struct iommu_domain *domain,
2185 enum iommu_attr attr, void *data)
2186{
2187 int ret = 0;
2188 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
2189
2190 mutex_lock(&smmu_domain->init_mutex);
2191
2192 switch (domain->type) {
2193 case IOMMU_DOMAIN_UNMANAGED:
2194 switch (attr) {
2195 case DOMAIN_ATTR_NESTING:
2196 if (smmu_domain->smmu) {
2197 ret = -EPERM;
2198 goto out_unlock;
2199 }
2200
2201 if (*(int *)data)
2202 smmu_domain->stage = ARM_SMMU_DOMAIN_NESTED;
2203 else
2204 smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
2205 break;
2206 default:
2207 ret = -ENODEV;
2208 }
2209 break;
2210 case IOMMU_DOMAIN_DMA:
2211 switch(attr) {
2212 case DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE:
2213 smmu_domain->non_strict = *(int *)data;
2214 break;
2215 default:
2216 ret = -ENODEV;
2217 }
2218 break;
2219 default:
2220 ret = -EINVAL;
2221 }
2222
2223out_unlock:
2224 mutex_unlock(&smmu_domain->init_mutex);
2225 return ret;
2226}
2227
2228static int arm_smmu_of_xlate(struct device *dev, struct of_phandle_args *args)
2229{
2230 return iommu_fwspec_add_ids(dev, args->args, 1);
2231}
2232
2233static void arm_smmu_get_resv_regions(struct device *dev,
2234 struct list_head *head)
2235{
2236 struct iommu_resv_region *region;
2237 int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO;
2238
2239 region = iommu_alloc_resv_region(MSI_IOVA_BASE, MSI_IOVA_LENGTH,
2240 prot, IOMMU_RESV_SW_MSI);
2241 if (!region)
2242 return;
2243
2244 list_add_tail(®ion->list, head);
2245
2246 iommu_dma_get_resv_regions(dev, head);
2247}
2248
2249static void arm_smmu_put_resv_regions(struct device *dev,
2250 struct list_head *head)
2251{
2252 struct iommu_resv_region *entry, *next;
2253
2254 list_for_each_entry_safe(entry, next, head, list)
2255 kfree(entry);
2256}
2257
2258static struct iommu_ops arm_smmu_ops = {
2259 .capable = arm_smmu_capable,
2260 .domain_alloc = arm_smmu_domain_alloc,
2261 .domain_free = arm_smmu_domain_free,
2262 .attach_dev = arm_smmu_attach_dev,
2263 .map = arm_smmu_map,
2264 .unmap = arm_smmu_unmap,
2265 .flush_iotlb_all = arm_smmu_flush_iotlb_all,
2266 .iotlb_sync = arm_smmu_iotlb_sync,
2267 .iova_to_phys = arm_smmu_iova_to_phys,
2268 .add_device = arm_smmu_add_device,
2269 .remove_device = arm_smmu_remove_device,
2270 .device_group = arm_smmu_device_group,
2271 .domain_get_attr = arm_smmu_domain_get_attr,
2272 .domain_set_attr = arm_smmu_domain_set_attr,
2273 .of_xlate = arm_smmu_of_xlate,
2274 .get_resv_regions = arm_smmu_get_resv_regions,
2275 .put_resv_regions = arm_smmu_put_resv_regions,
2276 .pgsize_bitmap = -1UL,
2277};
2278
2279
2280static int arm_smmu_init_one_queue(struct arm_smmu_device *smmu,
2281 struct arm_smmu_queue *q,
2282 unsigned long prod_off,
2283 unsigned long cons_off,
2284 size_t dwords, const char *name)
2285{
2286 size_t qsz;
2287
2288 do {
2289 qsz = ((1 << q->max_n_shift) * dwords) << 3;
2290 q->base = dmam_alloc_coherent(smmu->dev, qsz, &q->base_dma,
2291 GFP_KERNEL);
2292 if (q->base || qsz < PAGE_SIZE)
2293 break;
2294
2295 q->max_n_shift--;
2296 } while (1);
2297
2298 if (!q->base) {
2299 dev_err(smmu->dev,
2300 "failed to allocate queue (0x%zx bytes) for %s\n",
2301 qsz, name);
2302 return -ENOMEM;
2303 }
2304
2305 if (!WARN_ON(q->base_dma & (qsz - 1))) {
2306 dev_info(smmu->dev, "allocated %u entries for %s\n",
2307 1 << q->max_n_shift, name);
2308 }
2309
2310 q->prod_reg = arm_smmu_page1_fixup(prod_off, smmu);
2311 q->cons_reg = arm_smmu_page1_fixup(cons_off, smmu);
2312 q->ent_dwords = dwords;
2313
2314 q->q_base = Q_BASE_RWA;
2315 q->q_base |= q->base_dma & Q_BASE_ADDR_MASK;
2316 q->q_base |= FIELD_PREP(Q_BASE_LOG2SIZE, q->max_n_shift);
2317
2318 q->prod = q->cons = 0;
2319 return 0;
2320}
2321
2322static int arm_smmu_init_queues(struct arm_smmu_device *smmu)
2323{
2324 int ret;
2325
2326
2327 spin_lock_init(&smmu->cmdq.lock);
2328 ret = arm_smmu_init_one_queue(smmu, &smmu->cmdq.q, ARM_SMMU_CMDQ_PROD,
2329 ARM_SMMU_CMDQ_CONS, CMDQ_ENT_DWORDS,
2330 "cmdq");
2331 if (ret)
2332 return ret;
2333
2334
2335 ret = arm_smmu_init_one_queue(smmu, &smmu->evtq.q, ARM_SMMU_EVTQ_PROD,
2336 ARM_SMMU_EVTQ_CONS, EVTQ_ENT_DWORDS,
2337 "evtq");
2338 if (ret)
2339 return ret;
2340
2341
2342 if (!(smmu->features & ARM_SMMU_FEAT_PRI))
2343 return 0;
2344
2345 return arm_smmu_init_one_queue(smmu, &smmu->priq.q, ARM_SMMU_PRIQ_PROD,
2346 ARM_SMMU_PRIQ_CONS, PRIQ_ENT_DWORDS,
2347 "priq");
2348}
2349
2350static int arm_smmu_init_l1_strtab(struct arm_smmu_device *smmu)
2351{
2352 unsigned int i;
2353 struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg;
2354 size_t size = sizeof(*cfg->l1_desc) * cfg->num_l1_ents;
2355 void *strtab = smmu->strtab_cfg.strtab;
2356
2357 cfg->l1_desc = devm_kzalloc(smmu->dev, size, GFP_KERNEL);
2358 if (!cfg->l1_desc) {
2359 dev_err(smmu->dev, "failed to allocate l1 stream table desc\n");
2360 return -ENOMEM;
2361 }
2362
2363 for (i = 0; i < cfg->num_l1_ents; ++i) {
2364 arm_smmu_write_strtab_l1_desc(strtab, &cfg->l1_desc[i]);
2365 strtab += STRTAB_L1_DESC_DWORDS << 3;
2366 }
2367
2368 return 0;
2369}
2370
2371static int arm_smmu_init_strtab_2lvl(struct arm_smmu_device *smmu)
2372{
2373 void *strtab;
2374 u64 reg;
2375 u32 size, l1size;
2376 struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg;
2377
2378
2379 size = STRTAB_L1_SZ_SHIFT - (ilog2(STRTAB_L1_DESC_DWORDS) + 3);
2380 size = min(size, smmu->sid_bits - STRTAB_SPLIT);
2381 cfg->num_l1_ents = 1 << size;
2382
2383 size += STRTAB_SPLIT;
2384 if (size < smmu->sid_bits)
2385 dev_warn(smmu->dev,
2386 "2-level strtab only covers %u/%u bits of SID\n",
2387 size, smmu->sid_bits);
2388
2389 l1size = cfg->num_l1_ents * (STRTAB_L1_DESC_DWORDS << 3);
2390 strtab = dmam_alloc_coherent(smmu->dev, l1size, &cfg->strtab_dma,
2391 GFP_KERNEL | __GFP_ZERO);
2392 if (!strtab) {
2393 dev_err(smmu->dev,
2394 "failed to allocate l1 stream table (%u bytes)\n",
2395 size);
2396 return -ENOMEM;
2397 }
2398 cfg->strtab = strtab;
2399
2400
2401 reg = FIELD_PREP(STRTAB_BASE_CFG_FMT, STRTAB_BASE_CFG_FMT_2LVL);
2402 reg |= FIELD_PREP(STRTAB_BASE_CFG_LOG2SIZE, size);
2403 reg |= FIELD_PREP(STRTAB_BASE_CFG_SPLIT, STRTAB_SPLIT);
2404 cfg->strtab_base_cfg = reg;
2405
2406 return arm_smmu_init_l1_strtab(smmu);
2407}
2408
2409static int arm_smmu_init_strtab_linear(struct arm_smmu_device *smmu)
2410{
2411 void *strtab;
2412 u64 reg;
2413 u32 size;
2414 struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg;
2415
2416 size = (1 << smmu->sid_bits) * (STRTAB_STE_DWORDS << 3);
2417 strtab = dmam_alloc_coherent(smmu->dev, size, &cfg->strtab_dma,
2418 GFP_KERNEL | __GFP_ZERO);
2419 if (!strtab) {
2420 dev_err(smmu->dev,
2421 "failed to allocate linear stream table (%u bytes)\n",
2422 size);
2423 return -ENOMEM;
2424 }
2425 cfg->strtab = strtab;
2426 cfg->num_l1_ents = 1 << smmu->sid_bits;
2427
2428
2429 reg = FIELD_PREP(STRTAB_BASE_CFG_FMT, STRTAB_BASE_CFG_FMT_LINEAR);
2430 reg |= FIELD_PREP(STRTAB_BASE_CFG_LOG2SIZE, smmu->sid_bits);
2431 cfg->strtab_base_cfg = reg;
2432
2433 arm_smmu_init_bypass_stes(strtab, cfg->num_l1_ents);
2434 return 0;
2435}
2436
2437static int arm_smmu_init_strtab(struct arm_smmu_device *smmu)
2438{
2439 u64 reg;
2440 int ret;
2441
2442 if (smmu->features & ARM_SMMU_FEAT_2_LVL_STRTAB)
2443 ret = arm_smmu_init_strtab_2lvl(smmu);
2444 else
2445 ret = arm_smmu_init_strtab_linear(smmu);
2446
2447 if (ret)
2448 return ret;
2449
2450
2451 reg = smmu->strtab_cfg.strtab_dma & STRTAB_BASE_ADDR_MASK;
2452 reg |= STRTAB_BASE_RA;
2453 smmu->strtab_cfg.strtab_base = reg;
2454
2455
2456 set_bit(0, smmu->vmid_map);
2457 return 0;
2458}
2459
2460static int arm_smmu_init_structures(struct arm_smmu_device *smmu)
2461{
2462 int ret;
2463
2464 ret = arm_smmu_init_queues(smmu);
2465 if (ret)
2466 return ret;
2467
2468 return arm_smmu_init_strtab(smmu);
2469}
2470
2471static int arm_smmu_write_reg_sync(struct arm_smmu_device *smmu, u32 val,
2472 unsigned int reg_off, unsigned int ack_off)
2473{
2474 u32 reg;
2475
2476 writel_relaxed(val, smmu->base + reg_off);
2477 return readl_relaxed_poll_timeout(smmu->base + ack_off, reg, reg == val,
2478 1, ARM_SMMU_POLL_TIMEOUT_US);
2479}
2480
2481
2482static int arm_smmu_update_gbpa(struct arm_smmu_device *smmu, u32 set, u32 clr)
2483{
2484 int ret;
2485 u32 reg, __iomem *gbpa = smmu->base + ARM_SMMU_GBPA;
2486
2487 ret = readl_relaxed_poll_timeout(gbpa, reg, !(reg & GBPA_UPDATE),
2488 1, ARM_SMMU_POLL_TIMEOUT_US);
2489 if (ret)
2490 return ret;
2491
2492 reg &= ~clr;
2493 reg |= set;
2494 writel_relaxed(reg | GBPA_UPDATE, gbpa);
2495 ret = readl_relaxed_poll_timeout(gbpa, reg, !(reg & GBPA_UPDATE),
2496 1, ARM_SMMU_POLL_TIMEOUT_US);
2497
2498 if (ret)
2499 dev_err(smmu->dev, "GBPA not responding to update\n");
2500 return ret;
2501}
2502
2503static void arm_smmu_free_msis(void *data)
2504{
2505 struct device *dev = data;
2506 platform_msi_domain_free_irqs(dev);
2507}
2508
2509static void arm_smmu_write_msi_msg(struct msi_desc *desc, struct msi_msg *msg)
2510{
2511 phys_addr_t doorbell;
2512 struct device *dev = msi_desc_to_dev(desc);
2513 struct arm_smmu_device *smmu = dev_get_drvdata(dev);
2514 phys_addr_t *cfg = arm_smmu_msi_cfg[desc->platform.msi_index];
2515
2516 doorbell = (((u64)msg->address_hi) << 32) | msg->address_lo;
2517 doorbell &= MSI_CFG0_ADDR_MASK;
2518
2519 writeq_relaxed(doorbell, smmu->base + cfg[0]);
2520 writel_relaxed(msg->data, smmu->base + cfg[1]);
2521 writel_relaxed(ARM_SMMU_MEMATTR_DEVICE_nGnRE, smmu->base + cfg[2]);
2522}
2523
2524static void arm_smmu_setup_msis(struct arm_smmu_device *smmu)
2525{
2526 struct msi_desc *desc;
2527 int ret, nvec = ARM_SMMU_MAX_MSIS;
2528 struct device *dev = smmu->dev;
2529
2530
2531 writeq_relaxed(0, smmu->base + ARM_SMMU_GERROR_IRQ_CFG0);
2532 writeq_relaxed(0, smmu->base + ARM_SMMU_EVTQ_IRQ_CFG0);
2533
2534 if (smmu->features & ARM_SMMU_FEAT_PRI)
2535 writeq_relaxed(0, smmu->base + ARM_SMMU_PRIQ_IRQ_CFG0);
2536 else
2537 nvec--;
2538
2539 if (!(smmu->features & ARM_SMMU_FEAT_MSI))
2540 return;
2541
2542 if (!dev->msi_domain) {
2543 dev_info(smmu->dev, "msi_domain absent - falling back to wired irqs\n");
2544 return;
2545 }
2546
2547
2548 ret = platform_msi_domain_alloc_irqs(dev, nvec, arm_smmu_write_msi_msg);
2549 if (ret) {
2550 dev_warn(dev, "failed to allocate MSIs - falling back to wired irqs\n");
2551 return;
2552 }
2553
2554 for_each_msi_entry(desc, dev) {
2555 switch (desc->platform.msi_index) {
2556 case EVTQ_MSI_INDEX:
2557 smmu->evtq.q.irq = desc->irq;
2558 break;
2559 case GERROR_MSI_INDEX:
2560 smmu->gerr_irq = desc->irq;
2561 break;
2562 case PRIQ_MSI_INDEX:
2563 smmu->priq.q.irq = desc->irq;
2564 break;
2565 default:
2566 continue;
2567 }
2568 }
2569
2570
2571 devm_add_action(dev, arm_smmu_free_msis, dev);
2572}
2573
2574static void arm_smmu_setup_unique_irqs(struct arm_smmu_device *smmu)
2575{
2576 int irq, ret;
2577
2578 arm_smmu_setup_msis(smmu);
2579
2580
2581 irq = smmu->evtq.q.irq;
2582 if (irq) {
2583 ret = devm_request_threaded_irq(smmu->dev, irq, NULL,
2584 arm_smmu_evtq_thread,
2585 IRQF_ONESHOT,
2586 "arm-smmu-v3-evtq", smmu);
2587 if (ret < 0)
2588 dev_warn(smmu->dev, "failed to enable evtq irq\n");
2589 } else {
2590 dev_warn(smmu->dev, "no evtq irq - events will not be reported!\n");
2591 }
2592
2593 irq = smmu->gerr_irq;
2594 if (irq) {
2595 ret = devm_request_irq(smmu->dev, irq, arm_smmu_gerror_handler,
2596 0, "arm-smmu-v3-gerror", smmu);
2597 if (ret < 0)
2598 dev_warn(smmu->dev, "failed to enable gerror irq\n");
2599 } else {
2600 dev_warn(smmu->dev, "no gerr irq - errors will not be reported!\n");
2601 }
2602
2603 if (smmu->features & ARM_SMMU_FEAT_PRI) {
2604 irq = smmu->priq.q.irq;
2605 if (irq) {
2606 ret = devm_request_threaded_irq(smmu->dev, irq, NULL,
2607 arm_smmu_priq_thread,
2608 IRQF_ONESHOT,
2609 "arm-smmu-v3-priq",
2610 smmu);
2611 if (ret < 0)
2612 dev_warn(smmu->dev,
2613 "failed to enable priq irq\n");
2614 } else {
2615 dev_warn(smmu->dev, "no priq irq - PRI will be broken\n");
2616 }
2617 }
2618}
2619
2620static int arm_smmu_setup_irqs(struct arm_smmu_device *smmu)
2621{
2622 int ret, irq;
2623 u32 irqen_flags = IRQ_CTRL_EVTQ_IRQEN | IRQ_CTRL_GERROR_IRQEN;
2624
2625
2626 ret = arm_smmu_write_reg_sync(smmu, 0, ARM_SMMU_IRQ_CTRL,
2627 ARM_SMMU_IRQ_CTRLACK);
2628 if (ret) {
2629 dev_err(smmu->dev, "failed to disable irqs\n");
2630 return ret;
2631 }
2632
2633 irq = smmu->combined_irq;
2634 if (irq) {
2635
2636
2637
2638
2639 ret = devm_request_threaded_irq(smmu->dev, irq,
2640 arm_smmu_combined_irq_handler,
2641 arm_smmu_combined_irq_thread,
2642 IRQF_ONESHOT,
2643 "arm-smmu-v3-combined-irq", smmu);
2644 if (ret < 0)
2645 dev_warn(smmu->dev, "failed to enable combined irq\n");
2646 } else
2647 arm_smmu_setup_unique_irqs(smmu);
2648
2649 if (smmu->features & ARM_SMMU_FEAT_PRI)
2650 irqen_flags |= IRQ_CTRL_PRIQ_IRQEN;
2651
2652
2653 ret = arm_smmu_write_reg_sync(smmu, irqen_flags,
2654 ARM_SMMU_IRQ_CTRL, ARM_SMMU_IRQ_CTRLACK);
2655 if (ret)
2656 dev_warn(smmu->dev, "failed to enable irqs\n");
2657
2658 return 0;
2659}
2660
2661static int arm_smmu_device_disable(struct arm_smmu_device *smmu)
2662{
2663 int ret;
2664
2665 ret = arm_smmu_write_reg_sync(smmu, 0, ARM_SMMU_CR0, ARM_SMMU_CR0ACK);
2666 if (ret)
2667 dev_err(smmu->dev, "failed to clear cr0\n");
2668
2669 return ret;
2670}
2671
2672static int arm_smmu_device_reset(struct arm_smmu_device *smmu, bool bypass)
2673{
2674 int ret;
2675 u32 reg, enables;
2676 struct arm_smmu_cmdq_ent cmd;
2677
2678
2679 reg = readl_relaxed(smmu->base + ARM_SMMU_CR0);
2680 if (reg & CR0_SMMUEN) {
2681 dev_warn(smmu->dev, "SMMU currently enabled! Resetting...\n");
2682 WARN_ON(is_kdump_kernel() && !disable_bypass);
2683 arm_smmu_update_gbpa(smmu, GBPA_ABORT, 0);
2684 }
2685
2686 ret = arm_smmu_device_disable(smmu);
2687 if (ret)
2688 return ret;
2689
2690
2691 reg = FIELD_PREP(CR1_TABLE_SH, ARM_SMMU_SH_ISH) |
2692 FIELD_PREP(CR1_TABLE_OC, CR1_CACHE_WB) |
2693 FIELD_PREP(CR1_TABLE_IC, CR1_CACHE_WB) |
2694 FIELD_PREP(CR1_QUEUE_SH, ARM_SMMU_SH_ISH) |
2695 FIELD_PREP(CR1_QUEUE_OC, CR1_CACHE_WB) |
2696 FIELD_PREP(CR1_QUEUE_IC, CR1_CACHE_WB);
2697 writel_relaxed(reg, smmu->base + ARM_SMMU_CR1);
2698
2699
2700 reg = CR2_PTM | CR2_RECINVSID | CR2_E2H;
2701 writel_relaxed(reg, smmu->base + ARM_SMMU_CR2);
2702
2703
2704 writeq_relaxed(smmu->strtab_cfg.strtab_base,
2705 smmu->base + ARM_SMMU_STRTAB_BASE);
2706 writel_relaxed(smmu->strtab_cfg.strtab_base_cfg,
2707 smmu->base + ARM_SMMU_STRTAB_BASE_CFG);
2708
2709
2710 writeq_relaxed(smmu->cmdq.q.q_base, smmu->base + ARM_SMMU_CMDQ_BASE);
2711 writel_relaxed(smmu->cmdq.q.prod, smmu->base + ARM_SMMU_CMDQ_PROD);
2712 writel_relaxed(smmu->cmdq.q.cons, smmu->base + ARM_SMMU_CMDQ_CONS);
2713
2714 enables = CR0_CMDQEN;
2715 ret = arm_smmu_write_reg_sync(smmu, enables, ARM_SMMU_CR0,
2716 ARM_SMMU_CR0ACK);
2717 if (ret) {
2718 dev_err(smmu->dev, "failed to enable command queue\n");
2719 return ret;
2720 }
2721
2722
2723 cmd.opcode = CMDQ_OP_CFGI_ALL;
2724 arm_smmu_cmdq_issue_cmd(smmu, &cmd);
2725 arm_smmu_cmdq_issue_sync(smmu);
2726
2727
2728 if (smmu->features & ARM_SMMU_FEAT_HYP) {
2729 cmd.opcode = CMDQ_OP_TLBI_EL2_ALL;
2730 arm_smmu_cmdq_issue_cmd(smmu, &cmd);
2731 }
2732
2733 cmd.opcode = CMDQ_OP_TLBI_NSNH_ALL;
2734 arm_smmu_cmdq_issue_cmd(smmu, &cmd);
2735 arm_smmu_cmdq_issue_sync(smmu);
2736
2737
2738 writeq_relaxed(smmu->evtq.q.q_base, smmu->base + ARM_SMMU_EVTQ_BASE);
2739 writel_relaxed(smmu->evtq.q.prod,
2740 arm_smmu_page1_fixup(ARM_SMMU_EVTQ_PROD, smmu));
2741 writel_relaxed(smmu->evtq.q.cons,
2742 arm_smmu_page1_fixup(ARM_SMMU_EVTQ_CONS, smmu));
2743
2744 enables |= CR0_EVTQEN;
2745 ret = arm_smmu_write_reg_sync(smmu, enables, ARM_SMMU_CR0,
2746 ARM_SMMU_CR0ACK);
2747 if (ret) {
2748 dev_err(smmu->dev, "failed to enable event queue\n");
2749 return ret;
2750 }
2751
2752
2753 if (smmu->features & ARM_SMMU_FEAT_PRI) {
2754 writeq_relaxed(smmu->priq.q.q_base,
2755 smmu->base + ARM_SMMU_PRIQ_BASE);
2756 writel_relaxed(smmu->priq.q.prod,
2757 arm_smmu_page1_fixup(ARM_SMMU_PRIQ_PROD, smmu));
2758 writel_relaxed(smmu->priq.q.cons,
2759 arm_smmu_page1_fixup(ARM_SMMU_PRIQ_CONS, smmu));
2760
2761 enables |= CR0_PRIQEN;
2762 ret = arm_smmu_write_reg_sync(smmu, enables, ARM_SMMU_CR0,
2763 ARM_SMMU_CR0ACK);
2764 if (ret) {
2765 dev_err(smmu->dev, "failed to enable PRI queue\n");
2766 return ret;
2767 }
2768 }
2769
2770 if (smmu->features & ARM_SMMU_FEAT_ATS) {
2771 enables |= CR0_ATSCHK;
2772 ret = arm_smmu_write_reg_sync(smmu, enables, ARM_SMMU_CR0,
2773 ARM_SMMU_CR0ACK);
2774 if (ret) {
2775 dev_err(smmu->dev, "failed to enable ATS check\n");
2776 return ret;
2777 }
2778 }
2779
2780 ret = arm_smmu_setup_irqs(smmu);
2781 if (ret) {
2782 dev_err(smmu->dev, "failed to setup irqs\n");
2783 return ret;
2784 }
2785
2786 if (is_kdump_kernel())
2787 enables &= ~(CR0_EVTQEN | CR0_PRIQEN);
2788
2789
2790 if (!bypass || disable_bypass) {
2791 enables |= CR0_SMMUEN;
2792 } else {
2793 ret = arm_smmu_update_gbpa(smmu, 0, GBPA_ABORT);
2794 if (ret)
2795 return ret;
2796 }
2797 ret = arm_smmu_write_reg_sync(smmu, enables, ARM_SMMU_CR0,
2798 ARM_SMMU_CR0ACK);
2799 if (ret) {
2800 dev_err(smmu->dev, "failed to enable SMMU interface\n");
2801 return ret;
2802 }
2803
2804 return 0;
2805}
2806
2807static int arm_smmu_device_hw_probe(struct arm_smmu_device *smmu)
2808{
2809 u32 reg;
2810 bool coherent = smmu->features & ARM_SMMU_FEAT_COHERENCY;
2811
2812
2813 reg = readl_relaxed(smmu->base + ARM_SMMU_IDR0);
2814
2815
2816 if (FIELD_GET(IDR0_ST_LVL, reg) == IDR0_ST_LVL_2LVL)
2817 smmu->features |= ARM_SMMU_FEAT_2_LVL_STRTAB;
2818
2819 if (reg & IDR0_CD2L)
2820 smmu->features |= ARM_SMMU_FEAT_2_LVL_CDTAB;
2821
2822
2823
2824
2825
2826
2827 switch (FIELD_GET(IDR0_TTENDIAN, reg)) {
2828 case IDR0_TTENDIAN_MIXED:
2829 smmu->features |= ARM_SMMU_FEAT_TT_LE | ARM_SMMU_FEAT_TT_BE;
2830 break;
2831#ifdef __BIG_ENDIAN
2832 case IDR0_TTENDIAN_BE:
2833 smmu->features |= ARM_SMMU_FEAT_TT_BE;
2834 break;
2835#else
2836 case IDR0_TTENDIAN_LE:
2837 smmu->features |= ARM_SMMU_FEAT_TT_LE;
2838 break;
2839#endif
2840 default:
2841 dev_err(smmu->dev, "unknown/unsupported TT endianness!\n");
2842 return -ENXIO;
2843 }
2844
2845
2846 if (IS_ENABLED(CONFIG_PCI_PRI) && reg & IDR0_PRI)
2847 smmu->features |= ARM_SMMU_FEAT_PRI;
2848
2849 if (IS_ENABLED(CONFIG_PCI_ATS) && reg & IDR0_ATS)
2850 smmu->features |= ARM_SMMU_FEAT_ATS;
2851
2852 if (reg & IDR0_SEV)
2853 smmu->features |= ARM_SMMU_FEAT_SEV;
2854
2855 if (reg & IDR0_MSI)
2856 smmu->features |= ARM_SMMU_FEAT_MSI;
2857
2858 if (reg & IDR0_HYP)
2859 smmu->features |= ARM_SMMU_FEAT_HYP;
2860
2861
2862
2863
2864
2865 if (!!(reg & IDR0_COHACC) != coherent)
2866 dev_warn(smmu->dev, "IDR0.COHACC overridden by FW configuration (%s)\n",
2867 coherent ? "true" : "false");
2868
2869 switch (FIELD_GET(IDR0_STALL_MODEL, reg)) {
2870 case IDR0_STALL_MODEL_FORCE:
2871 smmu->features |= ARM_SMMU_FEAT_STALL_FORCE;
2872
2873 case IDR0_STALL_MODEL_STALL:
2874 smmu->features |= ARM_SMMU_FEAT_STALLS;
2875 }
2876
2877 if (reg & IDR0_S1P)
2878 smmu->features |= ARM_SMMU_FEAT_TRANS_S1;
2879
2880 if (reg & IDR0_S2P)
2881 smmu->features |= ARM_SMMU_FEAT_TRANS_S2;
2882
2883 if (!(reg & (IDR0_S1P | IDR0_S2P))) {
2884 dev_err(smmu->dev, "no translation support!\n");
2885 return -ENXIO;
2886 }
2887
2888
2889 switch (FIELD_GET(IDR0_TTF, reg)) {
2890 case IDR0_TTF_AARCH32_64:
2891 smmu->ias = 40;
2892
2893 case IDR0_TTF_AARCH64:
2894 break;
2895 default:
2896 dev_err(smmu->dev, "AArch64 table format not supported!\n");
2897 return -ENXIO;
2898 }
2899
2900
2901 smmu->asid_bits = reg & IDR0_ASID16 ? 16 : 8;
2902 smmu->vmid_bits = reg & IDR0_VMID16 ? 16 : 8;
2903
2904
2905 reg = readl_relaxed(smmu->base + ARM_SMMU_IDR1);
2906 if (reg & (IDR1_TABLES_PRESET | IDR1_QUEUES_PRESET | IDR1_REL)) {
2907 dev_err(smmu->dev, "embedded implementation not supported\n");
2908 return -ENXIO;
2909 }
2910
2911
2912 smmu->cmdq.q.max_n_shift = min_t(u32, CMDQ_MAX_SZ_SHIFT,
2913 FIELD_GET(IDR1_CMDQS, reg));
2914 if (!smmu->cmdq.q.max_n_shift) {
2915
2916 dev_err(smmu->dev, "unit-length command queue not supported\n");
2917 return -ENXIO;
2918 }
2919
2920 smmu->evtq.q.max_n_shift = min_t(u32, EVTQ_MAX_SZ_SHIFT,
2921 FIELD_GET(IDR1_EVTQS, reg));
2922 smmu->priq.q.max_n_shift = min_t(u32, PRIQ_MAX_SZ_SHIFT,
2923 FIELD_GET(IDR1_PRIQS, reg));
2924
2925
2926 smmu->ssid_bits = FIELD_GET(IDR1_SSIDSIZE, reg);
2927 smmu->sid_bits = FIELD_GET(IDR1_SIDSIZE, reg);
2928
2929
2930
2931
2932
2933 if (smmu->sid_bits <= STRTAB_SPLIT)
2934 smmu->features &= ~ARM_SMMU_FEAT_2_LVL_STRTAB;
2935
2936
2937 reg = readl_relaxed(smmu->base + ARM_SMMU_IDR5);
2938
2939
2940 smmu->evtq.max_stalls = FIELD_GET(IDR5_STALL_MAX, reg);
2941
2942
2943 if (reg & IDR5_GRAN64K)
2944 smmu->pgsize_bitmap |= SZ_64K | SZ_512M;
2945 if (reg & IDR5_GRAN16K)
2946 smmu->pgsize_bitmap |= SZ_16K | SZ_32M;
2947 if (reg & IDR5_GRAN4K)
2948 smmu->pgsize_bitmap |= SZ_4K | SZ_2M | SZ_1G;
2949
2950
2951 if (FIELD_GET(IDR5_VAX, reg) == IDR5_VAX_52_BIT)
2952 smmu->features |= ARM_SMMU_FEAT_VAX;
2953
2954
2955 switch (FIELD_GET(IDR5_OAS, reg)) {
2956 case IDR5_OAS_32_BIT:
2957 smmu->oas = 32;
2958 break;
2959 case IDR5_OAS_36_BIT:
2960 smmu->oas = 36;
2961 break;
2962 case IDR5_OAS_40_BIT:
2963 smmu->oas = 40;
2964 break;
2965 case IDR5_OAS_42_BIT:
2966 smmu->oas = 42;
2967 break;
2968 case IDR5_OAS_44_BIT:
2969 smmu->oas = 44;
2970 break;
2971 case IDR5_OAS_52_BIT:
2972 smmu->oas = 52;
2973 smmu->pgsize_bitmap |= 1ULL << 42;
2974 break;
2975 default:
2976 dev_info(smmu->dev,
2977 "unknown output address size. Truncating to 48-bit\n");
2978
2979 case IDR5_OAS_48_BIT:
2980 smmu->oas = 48;
2981 }
2982
2983 if (arm_smmu_ops.pgsize_bitmap == -1UL)
2984 arm_smmu_ops.pgsize_bitmap = smmu->pgsize_bitmap;
2985 else
2986 arm_smmu_ops.pgsize_bitmap |= smmu->pgsize_bitmap;
2987
2988
2989 if (dma_set_mask_and_coherent(smmu->dev, DMA_BIT_MASK(smmu->oas)))
2990 dev_warn(smmu->dev,
2991 "failed to set DMA mask for table walker\n");
2992
2993 smmu->ias = max(smmu->ias, smmu->oas);
2994
2995 dev_info(smmu->dev, "ias %lu-bit, oas %lu-bit (features 0x%08x)\n",
2996 smmu->ias, smmu->oas, smmu->features);
2997 return 0;
2998}
2999
3000#ifdef CONFIG_ACPI
3001static void acpi_smmu_get_options(u32 model, struct arm_smmu_device *smmu)
3002{
3003 switch (model) {
3004 case ACPI_IORT_SMMU_V3_CAVIUM_CN99XX:
3005 smmu->options |= ARM_SMMU_OPT_PAGE0_REGS_ONLY;
3006 break;
3007 case ACPI_IORT_SMMU_V3_HISILICON_HI161X:
3008 smmu->options |= ARM_SMMU_OPT_SKIP_PREFETCH;
3009 break;
3010 }
3011
3012 dev_notice(smmu->dev, "option mask 0x%x\n", smmu->options);
3013}
3014
3015static int arm_smmu_device_acpi_probe(struct platform_device *pdev,
3016 struct arm_smmu_device *smmu)
3017{
3018 struct acpi_iort_smmu_v3 *iort_smmu;
3019 struct device *dev = smmu->dev;
3020 struct acpi_iort_node *node;
3021
3022 node = *(struct acpi_iort_node **)dev_get_platdata(dev);
3023
3024
3025 iort_smmu = (struct acpi_iort_smmu_v3 *)node->node_data;
3026
3027 acpi_smmu_get_options(iort_smmu->model, smmu);
3028
3029 if (iort_smmu->flags & ACPI_IORT_SMMU_V3_COHACC_OVERRIDE)
3030 smmu->features |= ARM_SMMU_FEAT_COHERENCY;
3031
3032 return 0;
3033}
3034#else
3035static inline int arm_smmu_device_acpi_probe(struct platform_device *pdev,
3036 struct arm_smmu_device *smmu)
3037{
3038 return -ENODEV;
3039}
3040#endif
3041
3042static int arm_smmu_device_dt_probe(struct platform_device *pdev,
3043 struct arm_smmu_device *smmu)
3044{
3045 struct device *dev = &pdev->dev;
3046 u32 cells;
3047 int ret = -EINVAL;
3048
3049 if (of_property_read_u32(dev->of_node, "#iommu-cells", &cells))
3050 dev_err(dev, "missing #iommu-cells property\n");
3051 else if (cells != 1)
3052 dev_err(dev, "invalid #iommu-cells value (%d)\n", cells);
3053 else
3054 ret = 0;
3055
3056 parse_driver_options(smmu);
3057
3058 if (of_dma_is_coherent(dev->of_node))
3059 smmu->features |= ARM_SMMU_FEAT_COHERENCY;
3060
3061 return ret;
3062}
3063
3064static unsigned long arm_smmu_resource_size(struct arm_smmu_device *smmu)
3065{
3066 if (smmu->options & ARM_SMMU_OPT_PAGE0_REGS_ONLY)
3067 return SZ_64K;
3068 else
3069 return SZ_128K;
3070}
3071
3072static int arm_smmu_device_probe(struct platform_device *pdev)
3073{
3074 int irq, ret;
3075 struct resource *res;
3076 resource_size_t ioaddr;
3077 struct arm_smmu_device *smmu;
3078 struct device *dev = &pdev->dev;
3079 bool bypass;
3080
3081 smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL);
3082 if (!smmu) {
3083 dev_err(dev, "failed to allocate arm_smmu_device\n");
3084 return -ENOMEM;
3085 }
3086 smmu->dev = dev;
3087
3088 if (dev->of_node) {
3089 ret = arm_smmu_device_dt_probe(pdev, smmu);
3090 } else {
3091 ret = arm_smmu_device_acpi_probe(pdev, smmu);
3092 if (ret == -ENODEV)
3093 return ret;
3094 }
3095
3096
3097 bypass = !!ret;
3098
3099
3100 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
3101 if (resource_size(res) + 1 < arm_smmu_resource_size(smmu)) {
3102 dev_err(dev, "MMIO region too small (%pr)\n", res);
3103 return -EINVAL;
3104 }
3105 ioaddr = res->start;
3106
3107 smmu->base = devm_ioremap_resource(dev, res);
3108 if (IS_ERR(smmu->base))
3109 return PTR_ERR(smmu->base);
3110
3111
3112
3113 irq = platform_get_irq_byname(pdev, "combined");
3114 if (irq > 0)
3115 smmu->combined_irq = irq;
3116 else {
3117 irq = platform_get_irq_byname(pdev, "eventq");
3118 if (irq > 0)
3119 smmu->evtq.q.irq = irq;
3120
3121 irq = platform_get_irq_byname(pdev, "priq");
3122 if (irq > 0)
3123 smmu->priq.q.irq = irq;
3124
3125 irq = platform_get_irq_byname(pdev, "gerror");
3126 if (irq > 0)
3127 smmu->gerr_irq = irq;
3128 }
3129
3130 ret = arm_smmu_device_hw_probe(smmu);
3131 if (ret)
3132 return ret;
3133
3134
3135 ret = arm_smmu_init_structures(smmu);
3136 if (ret)
3137 return ret;
3138
3139
3140 platform_set_drvdata(pdev, smmu);
3141
3142
3143 ret = arm_smmu_device_reset(smmu, bypass);
3144 if (ret)
3145 return ret;
3146
3147
3148 ret = iommu_device_sysfs_add(&smmu->iommu, dev, NULL,
3149 "smmu3.%pa", &ioaddr);
3150 if (ret)
3151 return ret;
3152
3153 iommu_device_set_ops(&smmu->iommu, &arm_smmu_ops);
3154 iommu_device_set_fwnode(&smmu->iommu, dev->fwnode);
3155
3156 ret = iommu_device_register(&smmu->iommu);
3157 if (ret) {
3158 dev_err(dev, "Failed to register iommu\n");
3159 return ret;
3160 }
3161
3162#ifdef CONFIG_PCI
3163 if (pci_bus_type.iommu_ops != &arm_smmu_ops) {
3164 pci_request_acs();
3165 ret = bus_set_iommu(&pci_bus_type, &arm_smmu_ops);
3166 if (ret)
3167 return ret;
3168 }
3169#endif
3170#ifdef CONFIG_ARM_AMBA
3171 if (amba_bustype.iommu_ops != &arm_smmu_ops) {
3172 ret = bus_set_iommu(&amba_bustype, &arm_smmu_ops);
3173 if (ret)
3174 return ret;
3175 }
3176#endif
3177 if (platform_bus_type.iommu_ops != &arm_smmu_ops) {
3178 ret = bus_set_iommu(&platform_bus_type, &arm_smmu_ops);
3179 if (ret)
3180 return ret;
3181 }
3182 return 0;
3183}
3184
3185static void arm_smmu_device_shutdown(struct platform_device *pdev)
3186{
3187 struct arm_smmu_device *smmu = platform_get_drvdata(pdev);
3188
3189 arm_smmu_device_disable(smmu);
3190}
3191
3192static const struct of_device_id arm_smmu_of_match[] = {
3193 { .compatible = "arm,smmu-v3", },
3194 { },
3195};
3196
3197static struct platform_driver arm_smmu_driver = {
3198 .driver = {
3199 .name = "arm-smmu-v3",
3200 .of_match_table = of_match_ptr(arm_smmu_of_match),
3201 .suppress_bind_attrs = true,
3202 },
3203 .probe = arm_smmu_device_probe,
3204 .shutdown = arm_smmu_device_shutdown,
3205};
3206builtin_platform_driver(arm_smmu_driver);
3207