1
2
3
4
5
6
7#include <linux/compiler.h>
8#include <linux/delay.h>
9#include <linux/device.h>
10#include <linux/dma-iommu.h>
11#include <linux/dma-mapping.h>
12#include <linux/errno.h>
13#include <linux/interrupt.h>
14#include <linux/io.h>
15#include <linux/iommu.h>
16#include <linux/jiffies.h>
17#include <linux/list.h>
18#include <linux/mm.h>
19#include <linux/module.h>
20#include <linux/of.h>
21#include <linux/of_platform.h>
22#include <linux/platform_device.h>
23#include <linux/slab.h>
24#include <linux/spinlock.h>
25
26
27#define RK_MMU_DTE_ADDR 0x00
28#define RK_MMU_STATUS 0x04
29#define RK_MMU_COMMAND 0x08
30#define RK_MMU_PAGE_FAULT_ADDR 0x0C
31#define RK_MMU_ZAP_ONE_LINE 0x10
32#define RK_MMU_INT_RAWSTAT 0x14
33#define RK_MMU_INT_CLEAR 0x18
34#define RK_MMU_INT_MASK 0x1C
35#define RK_MMU_INT_STATUS 0x20
36#define RK_MMU_AUTO_GATING 0x24
37
38#define DTE_ADDR_DUMMY 0xCAFEBABE
39#define FORCE_RESET_TIMEOUT 100
40
41
42#define RK_MMU_STATUS_PAGING_ENABLED BIT(0)
43#define RK_MMU_STATUS_PAGE_FAULT_ACTIVE BIT(1)
44#define RK_MMU_STATUS_STALL_ACTIVE BIT(2)
45#define RK_MMU_STATUS_IDLE BIT(3)
46#define RK_MMU_STATUS_REPLAY_BUFFER_EMPTY BIT(4)
47#define RK_MMU_STATUS_PAGE_FAULT_IS_WRITE BIT(5)
48#define RK_MMU_STATUS_STALL_NOT_ACTIVE BIT(31)
49
50
51#define RK_MMU_CMD_ENABLE_PAGING 0
52#define RK_MMU_CMD_DISABLE_PAGING 1
53#define RK_MMU_CMD_ENABLE_STALL 2
54#define RK_MMU_CMD_DISABLE_STALL 3
55#define RK_MMU_CMD_ZAP_CACHE 4
56#define RK_MMU_CMD_PAGE_FAULT_DONE 5
57#define RK_MMU_CMD_FORCE_RESET 6
58
59
60#define RK_MMU_IRQ_PAGE_FAULT 0x01
61#define RK_MMU_IRQ_BUS_ERROR 0x02
62#define RK_MMU_IRQ_MASK (RK_MMU_IRQ_PAGE_FAULT | RK_MMU_IRQ_BUS_ERROR)
63
64#define NUM_DT_ENTRIES 1024
65#define NUM_PT_ENTRIES 1024
66
67#define SPAGE_ORDER 12
68#define SPAGE_SIZE (1 << SPAGE_ORDER)
69
70
71
72
73
74#define RK_IOMMU_PGSIZE_BITMAP 0x007ff000
75
76#define IOMMU_REG_POLL_COUNT_FAST 1000
77
78struct rk_iommu_domain {
79 struct list_head iommus;
80 struct platform_device *pdev;
81 u32 *dt;
82 dma_addr_t dt_dma;
83 spinlock_t iommus_lock;
84 spinlock_t dt_lock;
85
86 struct iommu_domain domain;
87};
88
89struct rk_iommu {
90 struct device *dev;
91 void __iomem **bases;
92 int num_mmu;
93 int *irq;
94 int num_irq;
95 bool reset_disabled;
96 struct iommu_device iommu;
97 struct list_head node;
98 struct iommu_domain *domain;
99};
100
101static inline void rk_table_flush(struct rk_iommu_domain *dom, dma_addr_t dma,
102 unsigned int count)
103{
104 size_t size = count * sizeof(u32);
105
106 dma_sync_single_for_device(&dom->pdev->dev, dma, size, DMA_TO_DEVICE);
107}
108
109static struct rk_iommu_domain *to_rk_domain(struct iommu_domain *dom)
110{
111 return container_of(dom, struct rk_iommu_domain, domain);
112}
113
114
115
116
117
118
119
120
121
122#define rk_wait_for(COND, MS) ({ \
123 unsigned long timeout__ = jiffies + msecs_to_jiffies(MS) + 1; \
124 int ret__ = 0; \
125 while (!(COND)) { \
126 if (time_after(jiffies, timeout__)) { \
127 ret__ = (COND) ? 0 : -ETIMEDOUT; \
128 break; \
129 } \
130 usleep_range(50, 100); \
131 } \
132 ret__; \
133})
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176#define RK_DTE_PT_ADDRESS_MASK 0xfffff000
177#define RK_DTE_PT_VALID BIT(0)
178
179static inline phys_addr_t rk_dte_pt_address(u32 dte)
180{
181 return (phys_addr_t)dte & RK_DTE_PT_ADDRESS_MASK;
182}
183
184static inline bool rk_dte_is_pt_valid(u32 dte)
185{
186 return dte & RK_DTE_PT_VALID;
187}
188
189static inline u32 rk_mk_dte(dma_addr_t pt_dma)
190{
191 return (pt_dma & RK_DTE_PT_ADDRESS_MASK) | RK_DTE_PT_VALID;
192}
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214#define RK_PTE_PAGE_ADDRESS_MASK 0xfffff000
215#define RK_PTE_PAGE_FLAGS_MASK 0x000001fe
216#define RK_PTE_PAGE_WRITABLE BIT(2)
217#define RK_PTE_PAGE_READABLE BIT(1)
218#define RK_PTE_PAGE_VALID BIT(0)
219
220static inline phys_addr_t rk_pte_page_address(u32 pte)
221{
222 return (phys_addr_t)pte & RK_PTE_PAGE_ADDRESS_MASK;
223}
224
225static inline bool rk_pte_is_page_valid(u32 pte)
226{
227 return pte & RK_PTE_PAGE_VALID;
228}
229
230
231static u32 rk_mk_pte(phys_addr_t page, int prot)
232{
233 u32 flags = 0;
234 flags |= (prot & IOMMU_READ) ? RK_PTE_PAGE_READABLE : 0;
235 flags |= (prot & IOMMU_WRITE) ? RK_PTE_PAGE_WRITABLE : 0;
236 page &= RK_PTE_PAGE_ADDRESS_MASK;
237 return page | flags | RK_PTE_PAGE_VALID;
238}
239
240static u32 rk_mk_pte_invalid(u32 pte)
241{
242 return pte & ~RK_PTE_PAGE_VALID;
243}
244
245
246
247
248
249
250
251
252
253
254
255#define RK_IOVA_DTE_MASK 0xffc00000
256#define RK_IOVA_DTE_SHIFT 22
257#define RK_IOVA_PTE_MASK 0x003ff000
258#define RK_IOVA_PTE_SHIFT 12
259#define RK_IOVA_PAGE_MASK 0x00000fff
260#define RK_IOVA_PAGE_SHIFT 0
261
262static u32 rk_iova_dte_index(dma_addr_t iova)
263{
264 return (u32)(iova & RK_IOVA_DTE_MASK) >> RK_IOVA_DTE_SHIFT;
265}
266
267static u32 rk_iova_pte_index(dma_addr_t iova)
268{
269 return (u32)(iova & RK_IOVA_PTE_MASK) >> RK_IOVA_PTE_SHIFT;
270}
271
272static u32 rk_iova_page_offset(dma_addr_t iova)
273{
274 return (u32)(iova & RK_IOVA_PAGE_MASK) >> RK_IOVA_PAGE_SHIFT;
275}
276
277static u32 rk_iommu_read(void __iomem *base, u32 offset)
278{
279 return readl(base + offset);
280}
281
282static void rk_iommu_write(void __iomem *base, u32 offset, u32 value)
283{
284 writel(value, base + offset);
285}
286
287static void rk_iommu_command(struct rk_iommu *iommu, u32 command)
288{
289 int i;
290
291 for (i = 0; i < iommu->num_mmu; i++)
292 writel(command, iommu->bases[i] + RK_MMU_COMMAND);
293}
294
295static void rk_iommu_base_command(void __iomem *base, u32 command)
296{
297 writel(command, base + RK_MMU_COMMAND);
298}
299static void rk_iommu_zap_lines(struct rk_iommu *iommu, dma_addr_t iova,
300 size_t size)
301{
302 int i;
303
304 dma_addr_t iova_end = iova + size;
305
306
307
308
309 for (i = 0; i < iommu->num_mmu; i++)
310 for (; iova < iova_end; iova += SPAGE_SIZE)
311 rk_iommu_write(iommu->bases[i], RK_MMU_ZAP_ONE_LINE, iova);
312}
313
314static bool rk_iommu_is_stall_active(struct rk_iommu *iommu)
315{
316 bool active = true;
317 int i;
318
319 for (i = 0; i < iommu->num_mmu; i++)
320 active &= !!(rk_iommu_read(iommu->bases[i], RK_MMU_STATUS) &
321 RK_MMU_STATUS_STALL_ACTIVE);
322
323 return active;
324}
325
326static bool rk_iommu_is_paging_enabled(struct rk_iommu *iommu)
327{
328 bool enable = true;
329 int i;
330
331 for (i = 0; i < iommu->num_mmu; i++)
332 enable &= !!(rk_iommu_read(iommu->bases[i], RK_MMU_STATUS) &
333 RK_MMU_STATUS_PAGING_ENABLED);
334
335 return enable;
336}
337
338static int rk_iommu_enable_stall(struct rk_iommu *iommu)
339{
340 int ret, i;
341
342 if (rk_iommu_is_stall_active(iommu))
343 return 0;
344
345
346 if (!rk_iommu_is_paging_enabled(iommu))
347 return 0;
348
349 rk_iommu_command(iommu, RK_MMU_CMD_ENABLE_STALL);
350
351 ret = rk_wait_for(rk_iommu_is_stall_active(iommu), 1);
352 if (ret)
353 for (i = 0; i < iommu->num_mmu; i++)
354 dev_err(iommu->dev, "Enable stall request timed out, status: %#08x\n",
355 rk_iommu_read(iommu->bases[i], RK_MMU_STATUS));
356
357 return ret;
358}
359
360static int rk_iommu_disable_stall(struct rk_iommu *iommu)
361{
362 int ret, i;
363
364 if (!rk_iommu_is_stall_active(iommu))
365 return 0;
366
367 rk_iommu_command(iommu, RK_MMU_CMD_DISABLE_STALL);
368
369 ret = rk_wait_for(!rk_iommu_is_stall_active(iommu), 1);
370 if (ret)
371 for (i = 0; i < iommu->num_mmu; i++)
372 dev_err(iommu->dev, "Disable stall request timed out, status: %#08x\n",
373 rk_iommu_read(iommu->bases[i], RK_MMU_STATUS));
374
375 return ret;
376}
377
378static int rk_iommu_enable_paging(struct rk_iommu *iommu)
379{
380 int ret, i;
381
382 if (rk_iommu_is_paging_enabled(iommu))
383 return 0;
384
385 rk_iommu_command(iommu, RK_MMU_CMD_ENABLE_PAGING);
386
387 ret = rk_wait_for(rk_iommu_is_paging_enabled(iommu), 1);
388 if (ret)
389 for (i = 0; i < iommu->num_mmu; i++)
390 dev_err(iommu->dev, "Enable paging request timed out, status: %#08x\n",
391 rk_iommu_read(iommu->bases[i], RK_MMU_STATUS));
392
393 return ret;
394}
395
396static int rk_iommu_disable_paging(struct rk_iommu *iommu)
397{
398 int ret, i;
399
400 if (!rk_iommu_is_paging_enabled(iommu))
401 return 0;
402
403 rk_iommu_command(iommu, RK_MMU_CMD_DISABLE_PAGING);
404
405 ret = rk_wait_for(!rk_iommu_is_paging_enabled(iommu), 1);
406 if (ret)
407 for (i = 0; i < iommu->num_mmu; i++)
408 dev_err(iommu->dev, "Disable paging request timed out, status: %#08x\n",
409 rk_iommu_read(iommu->bases[i], RK_MMU_STATUS));
410
411 return ret;
412}
413
414static int rk_iommu_force_reset(struct rk_iommu *iommu)
415{
416 int ret, i;
417 u32 dte_addr;
418
419 if (iommu->reset_disabled)
420 return 0;
421
422
423
424
425
426 for (i = 0; i < iommu->num_mmu; i++) {
427 rk_iommu_write(iommu->bases[i], RK_MMU_DTE_ADDR, DTE_ADDR_DUMMY);
428
429 dte_addr = rk_iommu_read(iommu->bases[i], RK_MMU_DTE_ADDR);
430 if (dte_addr != (DTE_ADDR_DUMMY & RK_DTE_PT_ADDRESS_MASK)) {
431 dev_err(iommu->dev, "Error during raw reset. MMU_DTE_ADDR is not functioning\n");
432 return -EFAULT;
433 }
434 }
435
436 rk_iommu_command(iommu, RK_MMU_CMD_FORCE_RESET);
437
438 for (i = 0; i < iommu->num_mmu; i++) {
439 ret = rk_wait_for(rk_iommu_read(iommu->bases[i], RK_MMU_DTE_ADDR) == 0x00000000,
440 FORCE_RESET_TIMEOUT);
441 if (ret) {
442 dev_err(iommu->dev, "FORCE_RESET command timed out\n");
443 return ret;
444 }
445 }
446
447 return 0;
448}
449
450static void log_iova(struct rk_iommu *iommu, int index, dma_addr_t iova)
451{
452 void __iomem *base = iommu->bases[index];
453 u32 dte_index, pte_index, page_offset;
454 u32 mmu_dte_addr;
455 phys_addr_t mmu_dte_addr_phys, dte_addr_phys;
456 u32 *dte_addr;
457 u32 dte;
458 phys_addr_t pte_addr_phys = 0;
459 u32 *pte_addr = NULL;
460 u32 pte = 0;
461 phys_addr_t page_addr_phys = 0;
462 u32 page_flags = 0;
463
464 dte_index = rk_iova_dte_index(iova);
465 pte_index = rk_iova_pte_index(iova);
466 page_offset = rk_iova_page_offset(iova);
467
468 mmu_dte_addr = rk_iommu_read(base, RK_MMU_DTE_ADDR);
469 mmu_dte_addr_phys = (phys_addr_t)mmu_dte_addr;
470
471 dte_addr_phys = mmu_dte_addr_phys + (4 * dte_index);
472 dte_addr = phys_to_virt(dte_addr_phys);
473 dte = *dte_addr;
474
475 if (!rk_dte_is_pt_valid(dte))
476 goto print_it;
477
478 pte_addr_phys = rk_dte_pt_address(dte) + (pte_index * 4);
479 pte_addr = phys_to_virt(pte_addr_phys);
480 pte = *pte_addr;
481
482 if (!rk_pte_is_page_valid(pte))
483 goto print_it;
484
485 page_addr_phys = rk_pte_page_address(pte) + page_offset;
486 page_flags = pte & RK_PTE_PAGE_FLAGS_MASK;
487
488print_it:
489 dev_err(iommu->dev, "iova = %pad: dte_index: %#03x pte_index: %#03x page_offset: %#03x\n",
490 &iova, dte_index, pte_index, page_offset);
491 dev_err(iommu->dev, "mmu_dte_addr: %pa dte@%pa: %#08x valid: %u pte@%pa: %#08x valid: %u page@%pa flags: %#03x\n",
492 &mmu_dte_addr_phys, &dte_addr_phys, dte,
493 rk_dte_is_pt_valid(dte), &pte_addr_phys, pte,
494 rk_pte_is_page_valid(pte), &page_addr_phys, page_flags);
495}
496
497static irqreturn_t rk_iommu_irq(int irq, void *dev_id)
498{
499 struct rk_iommu *iommu = dev_id;
500 u32 status;
501 u32 int_status;
502 dma_addr_t iova;
503 irqreturn_t ret = IRQ_NONE;
504 int i;
505
506 for (i = 0; i < iommu->num_mmu; i++) {
507 int_status = rk_iommu_read(iommu->bases[i], RK_MMU_INT_STATUS);
508 if (int_status == 0)
509 continue;
510
511 ret = IRQ_HANDLED;
512 iova = rk_iommu_read(iommu->bases[i], RK_MMU_PAGE_FAULT_ADDR);
513
514 if (int_status & RK_MMU_IRQ_PAGE_FAULT) {
515 int flags;
516
517 status = rk_iommu_read(iommu->bases[i], RK_MMU_STATUS);
518 flags = (status & RK_MMU_STATUS_PAGE_FAULT_IS_WRITE) ?
519 IOMMU_FAULT_WRITE : IOMMU_FAULT_READ;
520
521 dev_err(iommu->dev, "Page fault at %pad of type %s\n",
522 &iova,
523 (flags == IOMMU_FAULT_WRITE) ? "write" : "read");
524
525 log_iova(iommu, i, iova);
526
527
528
529
530
531
532 if (iommu->domain)
533 report_iommu_fault(iommu->domain, iommu->dev, iova,
534 flags);
535 else
536 dev_err(iommu->dev, "Page fault while iommu not attached to domain?\n");
537
538 rk_iommu_base_command(iommu->bases[i], RK_MMU_CMD_ZAP_CACHE);
539 rk_iommu_base_command(iommu->bases[i], RK_MMU_CMD_PAGE_FAULT_DONE);
540 }
541
542 if (int_status & RK_MMU_IRQ_BUS_ERROR)
543 dev_err(iommu->dev, "BUS_ERROR occurred at %pad\n", &iova);
544
545 if (int_status & ~RK_MMU_IRQ_MASK)
546 dev_err(iommu->dev, "unexpected int_status: %#08x\n",
547 int_status);
548
549 rk_iommu_write(iommu->bases[i], RK_MMU_INT_CLEAR, int_status);
550 }
551
552 return ret;
553}
554
555static phys_addr_t rk_iommu_iova_to_phys(struct iommu_domain *domain,
556 dma_addr_t iova)
557{
558 struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
559 unsigned long flags;
560 phys_addr_t pt_phys, phys = 0;
561 u32 dte, pte;
562 u32 *page_table;
563
564 spin_lock_irqsave(&rk_domain->dt_lock, flags);
565
566 dte = rk_domain->dt[rk_iova_dte_index(iova)];
567 if (!rk_dte_is_pt_valid(dte))
568 goto out;
569
570 pt_phys = rk_dte_pt_address(dte);
571 page_table = (u32 *)phys_to_virt(pt_phys);
572 pte = page_table[rk_iova_pte_index(iova)];
573 if (!rk_pte_is_page_valid(pte))
574 goto out;
575
576 phys = rk_pte_page_address(pte) + rk_iova_page_offset(iova);
577out:
578 spin_unlock_irqrestore(&rk_domain->dt_lock, flags);
579
580 return phys;
581}
582
583static void rk_iommu_zap_iova(struct rk_iommu_domain *rk_domain,
584 dma_addr_t iova, size_t size)
585{
586 struct list_head *pos;
587 unsigned long flags;
588
589
590 spin_lock_irqsave(&rk_domain->iommus_lock, flags);
591 list_for_each(pos, &rk_domain->iommus) {
592 struct rk_iommu *iommu;
593 iommu = list_entry(pos, struct rk_iommu, node);
594 rk_iommu_zap_lines(iommu, iova, size);
595 }
596 spin_unlock_irqrestore(&rk_domain->iommus_lock, flags);
597}
598
599static void rk_iommu_zap_iova_first_last(struct rk_iommu_domain *rk_domain,
600 dma_addr_t iova, size_t size)
601{
602 rk_iommu_zap_iova(rk_domain, iova, SPAGE_SIZE);
603 if (size > SPAGE_SIZE)
604 rk_iommu_zap_iova(rk_domain, iova + size - SPAGE_SIZE,
605 SPAGE_SIZE);
606}
607
608static u32 *rk_dte_get_page_table(struct rk_iommu_domain *rk_domain,
609 dma_addr_t iova)
610{
611 struct device *dev = &rk_domain->pdev->dev;
612 u32 *page_table, *dte_addr;
613 u32 dte_index, dte;
614 phys_addr_t pt_phys;
615 dma_addr_t pt_dma;
616
617 assert_spin_locked(&rk_domain->dt_lock);
618
619 dte_index = rk_iova_dte_index(iova);
620 dte_addr = &rk_domain->dt[dte_index];
621 dte = *dte_addr;
622 if (rk_dte_is_pt_valid(dte))
623 goto done;
624
625 page_table = (u32 *)get_zeroed_page(GFP_ATOMIC | GFP_DMA32);
626 if (!page_table)
627 return ERR_PTR(-ENOMEM);
628
629 pt_dma = dma_map_single(dev, page_table, SPAGE_SIZE, DMA_TO_DEVICE);
630 if (dma_mapping_error(dev, pt_dma)) {
631 dev_err(dev, "DMA mapping error while allocating page table\n");
632 free_page((unsigned long)page_table);
633 return ERR_PTR(-ENOMEM);
634 }
635
636 dte = rk_mk_dte(pt_dma);
637 *dte_addr = dte;
638
639 rk_table_flush(rk_domain, pt_dma, NUM_PT_ENTRIES);
640 rk_table_flush(rk_domain,
641 rk_domain->dt_dma + dte_index * sizeof(u32), 1);
642done:
643 pt_phys = rk_dte_pt_address(dte);
644 return (u32 *)phys_to_virt(pt_phys);
645}
646
647static size_t rk_iommu_unmap_iova(struct rk_iommu_domain *rk_domain,
648 u32 *pte_addr, dma_addr_t pte_dma,
649 size_t size)
650{
651 unsigned int pte_count;
652 unsigned int pte_total = size / SPAGE_SIZE;
653
654 assert_spin_locked(&rk_domain->dt_lock);
655
656 for (pte_count = 0; pte_count < pte_total; pte_count++) {
657 u32 pte = pte_addr[pte_count];
658 if (!rk_pte_is_page_valid(pte))
659 break;
660
661 pte_addr[pte_count] = rk_mk_pte_invalid(pte);
662 }
663
664 rk_table_flush(rk_domain, pte_dma, pte_count);
665
666 return pte_count * SPAGE_SIZE;
667}
668
669static int rk_iommu_map_iova(struct rk_iommu_domain *rk_domain, u32 *pte_addr,
670 dma_addr_t pte_dma, dma_addr_t iova,
671 phys_addr_t paddr, size_t size, int prot)
672{
673 unsigned int pte_count;
674 unsigned int pte_total = size / SPAGE_SIZE;
675 phys_addr_t page_phys;
676
677 assert_spin_locked(&rk_domain->dt_lock);
678
679 for (pte_count = 0; pte_count < pte_total; pte_count++) {
680 u32 pte = pte_addr[pte_count];
681
682 if (rk_pte_is_page_valid(pte))
683 goto unwind;
684
685 pte_addr[pte_count] = rk_mk_pte(paddr, prot);
686
687 paddr += SPAGE_SIZE;
688 }
689
690 rk_table_flush(rk_domain, pte_dma, pte_total);
691
692
693
694
695
696
697
698 rk_iommu_zap_iova_first_last(rk_domain, iova, size);
699
700 return 0;
701unwind:
702
703 rk_iommu_unmap_iova(rk_domain, pte_addr, pte_dma,
704 pte_count * SPAGE_SIZE);
705
706 iova += pte_count * SPAGE_SIZE;
707 page_phys = rk_pte_page_address(pte_addr[pte_count]);
708 pr_err("iova: %pad already mapped to %pa cannot remap to phys: %pa prot: %#x\n",
709 &iova, &page_phys, &paddr, prot);
710
711 return -EADDRINUSE;
712}
713
714static int rk_iommu_map(struct iommu_domain *domain, unsigned long _iova,
715 phys_addr_t paddr, size_t size, int prot)
716{
717 struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
718 unsigned long flags;
719 dma_addr_t pte_dma, iova = (dma_addr_t)_iova;
720 u32 *page_table, *pte_addr;
721 u32 dte_index, pte_index;
722 int ret;
723
724 spin_lock_irqsave(&rk_domain->dt_lock, flags);
725
726
727
728
729
730
731
732
733 page_table = rk_dte_get_page_table(rk_domain, iova);
734 if (IS_ERR(page_table)) {
735 spin_unlock_irqrestore(&rk_domain->dt_lock, flags);
736 return PTR_ERR(page_table);
737 }
738
739 dte_index = rk_domain->dt[rk_iova_dte_index(iova)];
740 pte_index = rk_iova_pte_index(iova);
741 pte_addr = &page_table[pte_index];
742 pte_dma = rk_dte_pt_address(dte_index) + pte_index * sizeof(u32);
743 ret = rk_iommu_map_iova(rk_domain, pte_addr, pte_dma, iova,
744 paddr, size, prot);
745
746 spin_unlock_irqrestore(&rk_domain->dt_lock, flags);
747
748 return ret;
749}
750
751static size_t rk_iommu_unmap(struct iommu_domain *domain, unsigned long _iova,
752 size_t size)
753{
754 struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
755 unsigned long flags;
756 dma_addr_t pte_dma, iova = (dma_addr_t)_iova;
757 phys_addr_t pt_phys;
758 u32 dte;
759 u32 *pte_addr;
760 size_t unmap_size;
761
762 spin_lock_irqsave(&rk_domain->dt_lock, flags);
763
764
765
766
767
768
769
770
771 dte = rk_domain->dt[rk_iova_dte_index(iova)];
772
773 if (!rk_dte_is_pt_valid(dte)) {
774 spin_unlock_irqrestore(&rk_domain->dt_lock, flags);
775 return 0;
776 }
777
778 pt_phys = rk_dte_pt_address(dte);
779 pte_addr = (u32 *)phys_to_virt(pt_phys) + rk_iova_pte_index(iova);
780 pte_dma = pt_phys + rk_iova_pte_index(iova) * sizeof(u32);
781 unmap_size = rk_iommu_unmap_iova(rk_domain, pte_addr, pte_dma, size);
782
783 spin_unlock_irqrestore(&rk_domain->dt_lock, flags);
784
785
786 rk_iommu_zap_iova(rk_domain, iova, unmap_size);
787
788 return unmap_size;
789}
790
791static struct rk_iommu *rk_iommu_from_dev(struct device *dev)
792{
793 struct iommu_group *group;
794 struct device *iommu_dev;
795 struct rk_iommu *rk_iommu;
796
797 group = iommu_group_get(dev);
798 if (!group)
799 return NULL;
800 iommu_dev = iommu_group_get_iommudata(group);
801 rk_iommu = dev_get_drvdata(iommu_dev);
802 iommu_group_put(group);
803
804 return rk_iommu;
805}
806
807static int rk_iommu_attach_device(struct iommu_domain *domain,
808 struct device *dev)
809{
810 struct rk_iommu *iommu;
811 struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
812 unsigned long flags;
813 int ret, i;
814
815
816
817
818
819 iommu = rk_iommu_from_dev(dev);
820 if (!iommu)
821 return 0;
822
823 ret = rk_iommu_enable_stall(iommu);
824 if (ret)
825 return ret;
826
827 ret = rk_iommu_force_reset(iommu);
828 if (ret)
829 return ret;
830
831 iommu->domain = domain;
832
833 for (i = 0; i < iommu->num_irq; i++) {
834 ret = devm_request_irq(iommu->dev, iommu->irq[i], rk_iommu_irq,
835 IRQF_SHARED, dev_name(dev), iommu);
836 if (ret)
837 return ret;
838 }
839
840 for (i = 0; i < iommu->num_mmu; i++) {
841 rk_iommu_write(iommu->bases[i], RK_MMU_DTE_ADDR,
842 rk_domain->dt_dma);
843 rk_iommu_base_command(iommu->bases[i], RK_MMU_CMD_ZAP_CACHE);
844 rk_iommu_write(iommu->bases[i], RK_MMU_INT_MASK, RK_MMU_IRQ_MASK);
845 }
846
847 ret = rk_iommu_enable_paging(iommu);
848 if (ret)
849 return ret;
850
851 spin_lock_irqsave(&rk_domain->iommus_lock, flags);
852 list_add_tail(&iommu->node, &rk_domain->iommus);
853 spin_unlock_irqrestore(&rk_domain->iommus_lock, flags);
854
855 dev_dbg(dev, "Attached to iommu domain\n");
856
857 rk_iommu_disable_stall(iommu);
858
859 return 0;
860}
861
862static void rk_iommu_detach_device(struct iommu_domain *domain,
863 struct device *dev)
864{
865 struct rk_iommu *iommu;
866 struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
867 unsigned long flags;
868 int i;
869
870
871 iommu = rk_iommu_from_dev(dev);
872 if (!iommu)
873 return;
874
875 spin_lock_irqsave(&rk_domain->iommus_lock, flags);
876 list_del_init(&iommu->node);
877 spin_unlock_irqrestore(&rk_domain->iommus_lock, flags);
878
879
880 rk_iommu_enable_stall(iommu);
881 rk_iommu_disable_paging(iommu);
882 for (i = 0; i < iommu->num_mmu; i++) {
883 rk_iommu_write(iommu->bases[i], RK_MMU_INT_MASK, 0);
884 rk_iommu_write(iommu->bases[i], RK_MMU_DTE_ADDR, 0);
885 }
886 rk_iommu_disable_stall(iommu);
887
888 for (i = 0; i < iommu->num_irq; i++)
889 devm_free_irq(iommu->dev, iommu->irq[i], iommu);
890
891 iommu->domain = NULL;
892
893 dev_dbg(dev, "Detached from iommu domain\n");
894}
895
896static struct iommu_domain *rk_iommu_domain_alloc(unsigned type)
897{
898 struct rk_iommu_domain *rk_domain;
899 struct platform_device *pdev;
900 struct device *iommu_dev;
901
902 if (type != IOMMU_DOMAIN_UNMANAGED && type != IOMMU_DOMAIN_DMA)
903 return NULL;
904
905
906
907
908 pdev = platform_device_register_simple("rk_iommu_domain",
909 PLATFORM_DEVID_AUTO, NULL, 0);
910 if (IS_ERR(pdev))
911 return NULL;
912
913 rk_domain = devm_kzalloc(&pdev->dev, sizeof(*rk_domain), GFP_KERNEL);
914 if (!rk_domain)
915 goto err_unreg_pdev;
916
917 rk_domain->pdev = pdev;
918
919 if (type == IOMMU_DOMAIN_DMA &&
920 iommu_get_dma_cookie(&rk_domain->domain))
921 goto err_unreg_pdev;
922
923
924
925
926
927
928 rk_domain->dt = (u32 *)get_zeroed_page(GFP_KERNEL | GFP_DMA32);
929 if (!rk_domain->dt)
930 goto err_put_cookie;
931
932 iommu_dev = &pdev->dev;
933 rk_domain->dt_dma = dma_map_single(iommu_dev, rk_domain->dt,
934 SPAGE_SIZE, DMA_TO_DEVICE);
935 if (dma_mapping_error(iommu_dev, rk_domain->dt_dma)) {
936 dev_err(iommu_dev, "DMA map error for DT\n");
937 goto err_free_dt;
938 }
939
940 rk_table_flush(rk_domain, rk_domain->dt_dma, NUM_DT_ENTRIES);
941
942 spin_lock_init(&rk_domain->iommus_lock);
943 spin_lock_init(&rk_domain->dt_lock);
944 INIT_LIST_HEAD(&rk_domain->iommus);
945
946 rk_domain->domain.geometry.aperture_start = 0;
947 rk_domain->domain.geometry.aperture_end = DMA_BIT_MASK(32);
948 rk_domain->domain.geometry.force_aperture = true;
949
950 return &rk_domain->domain;
951
952err_free_dt:
953 free_page((unsigned long)rk_domain->dt);
954err_put_cookie:
955 if (type == IOMMU_DOMAIN_DMA)
956 iommu_put_dma_cookie(&rk_domain->domain);
957err_unreg_pdev:
958 platform_device_unregister(pdev);
959
960 return NULL;
961}
962
963static void rk_iommu_domain_free(struct iommu_domain *domain)
964{
965 struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
966 int i;
967
968 WARN_ON(!list_empty(&rk_domain->iommus));
969
970 for (i = 0; i < NUM_DT_ENTRIES; i++) {
971 u32 dte = rk_domain->dt[i];
972 if (rk_dte_is_pt_valid(dte)) {
973 phys_addr_t pt_phys = rk_dte_pt_address(dte);
974 u32 *page_table = phys_to_virt(pt_phys);
975 dma_unmap_single(&rk_domain->pdev->dev, pt_phys,
976 SPAGE_SIZE, DMA_TO_DEVICE);
977 free_page((unsigned long)page_table);
978 }
979 }
980
981 dma_unmap_single(&rk_domain->pdev->dev, rk_domain->dt_dma,
982 SPAGE_SIZE, DMA_TO_DEVICE);
983 free_page((unsigned long)rk_domain->dt);
984
985 if (domain->type == IOMMU_DOMAIN_DMA)
986 iommu_put_dma_cookie(&rk_domain->domain);
987
988 platform_device_unregister(rk_domain->pdev);
989}
990
991static bool rk_iommu_is_dev_iommu_master(struct device *dev)
992{
993 struct device_node *np = dev->of_node;
994 int ret;
995
996
997
998
999
1000 ret = of_count_phandle_with_args(np, "iommus", "#iommu-cells");
1001 return (ret > 0);
1002}
1003
1004static int rk_iommu_group_set_iommudata(struct iommu_group *group,
1005 struct device *dev)
1006{
1007 struct device_node *np = dev->of_node;
1008 struct platform_device *pd;
1009 int ret;
1010 struct of_phandle_args args;
1011
1012
1013
1014
1015
1016 ret = of_parse_phandle_with_args(np, "iommus", "#iommu-cells", 0,
1017 &args);
1018 if (ret) {
1019 dev_err(dev, "of_parse_phandle_with_args(%pOF) => %d\n",
1020 np, ret);
1021 return ret;
1022 }
1023 if (args.args_count != 0) {
1024 dev_err(dev, "incorrect number of iommu params found for %pOF (found %d, expected 0)\n",
1025 args.np, args.args_count);
1026 return -EINVAL;
1027 }
1028
1029 pd = of_find_device_by_node(args.np);
1030 of_node_put(args.np);
1031 if (!pd) {
1032 dev_err(dev, "iommu %pOF not found\n", args.np);
1033 return -EPROBE_DEFER;
1034 }
1035
1036
1037 iommu_group_set_iommudata(group, &pd->dev, NULL);
1038
1039 return 0;
1040}
1041
1042static int rk_iommu_add_device(struct device *dev)
1043{
1044 struct iommu_group *group;
1045 struct rk_iommu *iommu;
1046 int ret;
1047
1048 if (!rk_iommu_is_dev_iommu_master(dev))
1049 return -ENODEV;
1050
1051 group = iommu_group_get(dev);
1052 if (!group) {
1053 group = iommu_group_alloc();
1054 if (IS_ERR(group)) {
1055 dev_err(dev, "Failed to allocate IOMMU group\n");
1056 return PTR_ERR(group);
1057 }
1058 }
1059
1060 ret = iommu_group_add_device(group, dev);
1061 if (ret)
1062 goto err_put_group;
1063
1064 ret = rk_iommu_group_set_iommudata(group, dev);
1065 if (ret)
1066 goto err_remove_device;
1067
1068 iommu = rk_iommu_from_dev(dev);
1069 if (iommu)
1070 iommu_device_link(&iommu->iommu, dev);
1071
1072 iommu_group_put(group);
1073
1074 return 0;
1075
1076err_remove_device:
1077 iommu_group_remove_device(dev);
1078err_put_group:
1079 iommu_group_put(group);
1080 return ret;
1081}
1082
1083static void rk_iommu_remove_device(struct device *dev)
1084{
1085 struct rk_iommu *iommu;
1086
1087 if (!rk_iommu_is_dev_iommu_master(dev))
1088 return;
1089
1090 iommu = rk_iommu_from_dev(dev);
1091 if (iommu)
1092 iommu_device_unlink(&iommu->iommu, dev);
1093
1094 iommu_group_remove_device(dev);
1095}
1096
1097static const struct iommu_ops rk_iommu_ops = {
1098 .domain_alloc = rk_iommu_domain_alloc,
1099 .domain_free = rk_iommu_domain_free,
1100 .attach_dev = rk_iommu_attach_device,
1101 .detach_dev = rk_iommu_detach_device,
1102 .map = rk_iommu_map,
1103 .unmap = rk_iommu_unmap,
1104 .map_sg = default_iommu_map_sg,
1105 .add_device = rk_iommu_add_device,
1106 .remove_device = rk_iommu_remove_device,
1107 .iova_to_phys = rk_iommu_iova_to_phys,
1108 .pgsize_bitmap = RK_IOMMU_PGSIZE_BITMAP,
1109};
1110
1111static int rk_iommu_domain_probe(struct platform_device *pdev)
1112{
1113 struct device *dev = &pdev->dev;
1114
1115 dev->dma_parms = devm_kzalloc(dev, sizeof(*dev->dma_parms), GFP_KERNEL);
1116 if (!dev->dma_parms)
1117 return -ENOMEM;
1118
1119
1120 arch_setup_dma_ops(dev, 0, DMA_BIT_MASK(32), NULL, false);
1121
1122 dma_set_max_seg_size(dev, DMA_BIT_MASK(32));
1123 dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(32));
1124
1125 return 0;
1126}
1127
1128static struct platform_driver rk_iommu_domain_driver = {
1129 .probe = rk_iommu_domain_probe,
1130 .driver = {
1131 .name = "rk_iommu_domain",
1132 },
1133};
1134
1135static int rk_iommu_probe(struct platform_device *pdev)
1136{
1137 struct device *dev = &pdev->dev;
1138 struct rk_iommu *iommu;
1139 struct resource *res;
1140 int num_res = pdev->num_resources;
1141 int err, i;
1142
1143 iommu = devm_kzalloc(dev, sizeof(*iommu), GFP_KERNEL);
1144 if (!iommu)
1145 return -ENOMEM;
1146
1147 platform_set_drvdata(pdev, iommu);
1148 iommu->dev = dev;
1149 iommu->num_mmu = 0;
1150
1151 iommu->bases = devm_kzalloc(dev, sizeof(*iommu->bases) * num_res,
1152 GFP_KERNEL);
1153 if (!iommu->bases)
1154 return -ENOMEM;
1155
1156 for (i = 0; i < num_res; i++) {
1157 res = platform_get_resource(pdev, IORESOURCE_MEM, i);
1158 if (!res)
1159 continue;
1160 iommu->bases[i] = devm_ioremap_resource(&pdev->dev, res);
1161 if (IS_ERR(iommu->bases[i]))
1162 continue;
1163 iommu->num_mmu++;
1164 }
1165 if (iommu->num_mmu == 0)
1166 return PTR_ERR(iommu->bases[0]);
1167
1168 iommu->num_irq = platform_irq_count(pdev);
1169 if (iommu->num_irq < 0)
1170 return iommu->num_irq;
1171 if (iommu->num_irq == 0)
1172 return -ENXIO;
1173
1174 iommu->irq = devm_kcalloc(dev, iommu->num_irq, sizeof(*iommu->irq),
1175 GFP_KERNEL);
1176 if (!iommu->irq)
1177 return -ENOMEM;
1178
1179 for (i = 0; i < iommu->num_irq; i++) {
1180 iommu->irq[i] = platform_get_irq(pdev, i);
1181 if (iommu->irq[i] < 0) {
1182 dev_err(dev, "Failed to get IRQ, %d\n", iommu->irq[i]);
1183 return -ENXIO;
1184 }
1185 }
1186
1187 iommu->reset_disabled = device_property_read_bool(dev,
1188 "rockchip,disable-mmu-reset");
1189
1190 err = iommu_device_sysfs_add(&iommu->iommu, dev, NULL, dev_name(dev));
1191 if (err)
1192 return err;
1193
1194 iommu_device_set_ops(&iommu->iommu, &rk_iommu_ops);
1195 err = iommu_device_register(&iommu->iommu);
1196
1197 return err;
1198}
1199
1200static int rk_iommu_remove(struct platform_device *pdev)
1201{
1202 struct rk_iommu *iommu = platform_get_drvdata(pdev);
1203
1204 if (iommu) {
1205 iommu_device_sysfs_remove(&iommu->iommu);
1206 iommu_device_unregister(&iommu->iommu);
1207 }
1208
1209 return 0;
1210}
1211
1212static const struct of_device_id rk_iommu_dt_ids[] = {
1213 { .compatible = "rockchip,iommu" },
1214 { }
1215};
1216MODULE_DEVICE_TABLE(of, rk_iommu_dt_ids);
1217
1218static struct platform_driver rk_iommu_driver = {
1219 .probe = rk_iommu_probe,
1220 .remove = rk_iommu_remove,
1221 .driver = {
1222 .name = "rk_iommu",
1223 .of_match_table = rk_iommu_dt_ids,
1224 },
1225};
1226
1227static int __init rk_iommu_init(void)
1228{
1229 struct device_node *np;
1230 int ret;
1231
1232 np = of_find_matching_node(NULL, rk_iommu_dt_ids);
1233 if (!np)
1234 return 0;
1235
1236 of_node_put(np);
1237
1238 ret = bus_set_iommu(&platform_bus_type, &rk_iommu_ops);
1239 if (ret)
1240 return ret;
1241
1242 ret = platform_driver_register(&rk_iommu_domain_driver);
1243 if (ret)
1244 return ret;
1245
1246 ret = platform_driver_register(&rk_iommu_driver);
1247 if (ret)
1248 platform_driver_unregister(&rk_iommu_domain_driver);
1249 return ret;
1250}
1251static void __exit rk_iommu_exit(void)
1252{
1253 platform_driver_unregister(&rk_iommu_driver);
1254 platform_driver_unregister(&rk_iommu_domain_driver);
1255}
1256
1257subsys_initcall(rk_iommu_init);
1258module_exit(rk_iommu_exit);
1259
1260MODULE_DESCRIPTION("IOMMU API for Rockchip");
1261MODULE_AUTHOR("Simon Xue <xxm@rock-chips.com> and Daniel Kurtz <djkurtz@chromium.org>");
1262MODULE_ALIAS("platform:rockchip-iommu");
1263MODULE_LICENSE("GPL v2");
1264