1
2
3
4
5
6
7
8
9
10
11
12#include <linux/clk.h>
13#include <linux/compiler.h>
14#include <linux/delay.h>
15#include <linux/device.h>
16#include <linux/dma-iommu.h>
17#include <linux/dma-mapping.h>
18#include <linux/errno.h>
19#include <linux/interrupt.h>
20#include <linux/io.h>
21#include <linux/iommu.h>
22#include <linux/iopoll.h>
23#include <linux/list.h>
24#include <linux/mm.h>
25#include <linux/init.h>
26#include <linux/of.h>
27#include <linux/of_iommu.h>
28#include <linux/of_platform.h>
29#include <linux/platform_device.h>
30#include <linux/pm_runtime.h>
31#include <linux/slab.h>
32#include <linux/spinlock.h>
33
34
35#define RK_MMU_DTE_ADDR 0x00
36#define RK_MMU_STATUS 0x04
37#define RK_MMU_COMMAND 0x08
38#define RK_MMU_PAGE_FAULT_ADDR 0x0C
39#define RK_MMU_ZAP_ONE_LINE 0x10
40#define RK_MMU_INT_RAWSTAT 0x14
41#define RK_MMU_INT_CLEAR 0x18
42#define RK_MMU_INT_MASK 0x1C
43#define RK_MMU_INT_STATUS 0x20
44#define RK_MMU_AUTO_GATING 0x24
45
46#define DTE_ADDR_DUMMY 0xCAFEBABE
47
48#define RK_MMU_POLL_PERIOD_US 100
49#define RK_MMU_FORCE_RESET_TIMEOUT_US 100000
50#define RK_MMU_POLL_TIMEOUT_US 1000
51
52
53#define RK_MMU_STATUS_PAGING_ENABLED BIT(0)
54#define RK_MMU_STATUS_PAGE_FAULT_ACTIVE BIT(1)
55#define RK_MMU_STATUS_STALL_ACTIVE BIT(2)
56#define RK_MMU_STATUS_IDLE BIT(3)
57#define RK_MMU_STATUS_REPLAY_BUFFER_EMPTY BIT(4)
58#define RK_MMU_STATUS_PAGE_FAULT_IS_WRITE BIT(5)
59#define RK_MMU_STATUS_STALL_NOT_ACTIVE BIT(31)
60
61
62#define RK_MMU_CMD_ENABLE_PAGING 0
63#define RK_MMU_CMD_DISABLE_PAGING 1
64#define RK_MMU_CMD_ENABLE_STALL 2
65#define RK_MMU_CMD_DISABLE_STALL 3
66#define RK_MMU_CMD_ZAP_CACHE 4
67#define RK_MMU_CMD_PAGE_FAULT_DONE 5
68#define RK_MMU_CMD_FORCE_RESET 6
69
70
71#define RK_MMU_IRQ_PAGE_FAULT 0x01
72#define RK_MMU_IRQ_BUS_ERROR 0x02
73#define RK_MMU_IRQ_MASK (RK_MMU_IRQ_PAGE_FAULT | RK_MMU_IRQ_BUS_ERROR)
74
75#define NUM_DT_ENTRIES 1024
76#define NUM_PT_ENTRIES 1024
77
78#define SPAGE_ORDER 12
79#define SPAGE_SIZE (1 << SPAGE_ORDER)
80
81
82
83
84
85#define RK_IOMMU_PGSIZE_BITMAP 0x007ff000
86
87struct rk_iommu_domain {
88 struct list_head iommus;
89 u32 *dt;
90 dma_addr_t dt_dma;
91 spinlock_t iommus_lock;
92 spinlock_t dt_lock;
93
94 struct iommu_domain domain;
95};
96
97
98static const char * const rk_iommu_clocks[] = {
99 "aclk", "iface",
100};
101
102struct rk_iommu {
103 struct device *dev;
104 void __iomem **bases;
105 int num_mmu;
106 struct clk_bulk_data *clocks;
107 int num_clocks;
108 bool reset_disabled;
109 struct iommu_device iommu;
110 struct list_head node;
111 struct iommu_domain *domain;
112 struct iommu_group *group;
113};
114
115struct rk_iommudata {
116 struct device_link *link;
117 struct rk_iommu *iommu;
118};
119
120static struct device *dma_dev;
121
122static inline void rk_table_flush(struct rk_iommu_domain *dom, dma_addr_t dma,
123 unsigned int count)
124{
125 size_t size = count * sizeof(u32);
126
127 dma_sync_single_for_device(dma_dev, dma, size, DMA_TO_DEVICE);
128}
129
130static struct rk_iommu_domain *to_rk_domain(struct iommu_domain *dom)
131{
132 return container_of(dom, struct rk_iommu_domain, domain);
133}
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176#define RK_DTE_PT_ADDRESS_MASK 0xfffff000
177#define RK_DTE_PT_VALID BIT(0)
178
179static inline phys_addr_t rk_dte_pt_address(u32 dte)
180{
181 return (phys_addr_t)dte & RK_DTE_PT_ADDRESS_MASK;
182}
183
184static inline bool rk_dte_is_pt_valid(u32 dte)
185{
186 return dte & RK_DTE_PT_VALID;
187}
188
189static inline u32 rk_mk_dte(dma_addr_t pt_dma)
190{
191 return (pt_dma & RK_DTE_PT_ADDRESS_MASK) | RK_DTE_PT_VALID;
192}
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214#define RK_PTE_PAGE_ADDRESS_MASK 0xfffff000
215#define RK_PTE_PAGE_FLAGS_MASK 0x000001fe
216#define RK_PTE_PAGE_WRITABLE BIT(2)
217#define RK_PTE_PAGE_READABLE BIT(1)
218#define RK_PTE_PAGE_VALID BIT(0)
219
220static inline phys_addr_t rk_pte_page_address(u32 pte)
221{
222 return (phys_addr_t)pte & RK_PTE_PAGE_ADDRESS_MASK;
223}
224
225static inline bool rk_pte_is_page_valid(u32 pte)
226{
227 return pte & RK_PTE_PAGE_VALID;
228}
229
230
231static u32 rk_mk_pte(phys_addr_t page, int prot)
232{
233 u32 flags = 0;
234 flags |= (prot & IOMMU_READ) ? RK_PTE_PAGE_READABLE : 0;
235 flags |= (prot & IOMMU_WRITE) ? RK_PTE_PAGE_WRITABLE : 0;
236 page &= RK_PTE_PAGE_ADDRESS_MASK;
237 return page | flags | RK_PTE_PAGE_VALID;
238}
239
240static u32 rk_mk_pte_invalid(u32 pte)
241{
242 return pte & ~RK_PTE_PAGE_VALID;
243}
244
245
246
247
248
249
250
251
252
253
254
255#define RK_IOVA_DTE_MASK 0xffc00000
256#define RK_IOVA_DTE_SHIFT 22
257#define RK_IOVA_PTE_MASK 0x003ff000
258#define RK_IOVA_PTE_SHIFT 12
259#define RK_IOVA_PAGE_MASK 0x00000fff
260#define RK_IOVA_PAGE_SHIFT 0
261
262static u32 rk_iova_dte_index(dma_addr_t iova)
263{
264 return (u32)(iova & RK_IOVA_DTE_MASK) >> RK_IOVA_DTE_SHIFT;
265}
266
267static u32 rk_iova_pte_index(dma_addr_t iova)
268{
269 return (u32)(iova & RK_IOVA_PTE_MASK) >> RK_IOVA_PTE_SHIFT;
270}
271
272static u32 rk_iova_page_offset(dma_addr_t iova)
273{
274 return (u32)(iova & RK_IOVA_PAGE_MASK) >> RK_IOVA_PAGE_SHIFT;
275}
276
277static u32 rk_iommu_read(void __iomem *base, u32 offset)
278{
279 return readl(base + offset);
280}
281
282static void rk_iommu_write(void __iomem *base, u32 offset, u32 value)
283{
284 writel(value, base + offset);
285}
286
287static void rk_iommu_command(struct rk_iommu *iommu, u32 command)
288{
289 int i;
290
291 for (i = 0; i < iommu->num_mmu; i++)
292 writel(command, iommu->bases[i] + RK_MMU_COMMAND);
293}
294
295static void rk_iommu_base_command(void __iomem *base, u32 command)
296{
297 writel(command, base + RK_MMU_COMMAND);
298}
299static void rk_iommu_zap_lines(struct rk_iommu *iommu, dma_addr_t iova_start,
300 size_t size)
301{
302 int i;
303 dma_addr_t iova_end = iova_start + size;
304
305
306
307
308 for (i = 0; i < iommu->num_mmu; i++) {
309 dma_addr_t iova;
310
311 for (iova = iova_start; iova < iova_end; iova += SPAGE_SIZE)
312 rk_iommu_write(iommu->bases[i], RK_MMU_ZAP_ONE_LINE, iova);
313 }
314}
315
316static bool rk_iommu_is_stall_active(struct rk_iommu *iommu)
317{
318 bool active = true;
319 int i;
320
321 for (i = 0; i < iommu->num_mmu; i++)
322 active &= !!(rk_iommu_read(iommu->bases[i], RK_MMU_STATUS) &
323 RK_MMU_STATUS_STALL_ACTIVE);
324
325 return active;
326}
327
328static bool rk_iommu_is_paging_enabled(struct rk_iommu *iommu)
329{
330 bool enable = true;
331 int i;
332
333 for (i = 0; i < iommu->num_mmu; i++)
334 enable &= !!(rk_iommu_read(iommu->bases[i], RK_MMU_STATUS) &
335 RK_MMU_STATUS_PAGING_ENABLED);
336
337 return enable;
338}
339
340static bool rk_iommu_is_reset_done(struct rk_iommu *iommu)
341{
342 bool done = true;
343 int i;
344
345 for (i = 0; i < iommu->num_mmu; i++)
346 done &= rk_iommu_read(iommu->bases[i], RK_MMU_DTE_ADDR) == 0;
347
348 return done;
349}
350
351static int rk_iommu_enable_stall(struct rk_iommu *iommu)
352{
353 int ret, i;
354 bool val;
355
356 if (rk_iommu_is_stall_active(iommu))
357 return 0;
358
359
360 if (!rk_iommu_is_paging_enabled(iommu))
361 return 0;
362
363 rk_iommu_command(iommu, RK_MMU_CMD_ENABLE_STALL);
364
365 ret = readx_poll_timeout(rk_iommu_is_stall_active, iommu, val,
366 val, RK_MMU_POLL_PERIOD_US,
367 RK_MMU_POLL_TIMEOUT_US);
368 if (ret)
369 for (i = 0; i < iommu->num_mmu; i++)
370 dev_err(iommu->dev, "Enable stall request timed out, status: %#08x\n",
371 rk_iommu_read(iommu->bases[i], RK_MMU_STATUS));
372
373 return ret;
374}
375
376static int rk_iommu_disable_stall(struct rk_iommu *iommu)
377{
378 int ret, i;
379 bool val;
380
381 if (!rk_iommu_is_stall_active(iommu))
382 return 0;
383
384 rk_iommu_command(iommu, RK_MMU_CMD_DISABLE_STALL);
385
386 ret = readx_poll_timeout(rk_iommu_is_stall_active, iommu, val,
387 !val, RK_MMU_POLL_PERIOD_US,
388 RK_MMU_POLL_TIMEOUT_US);
389 if (ret)
390 for (i = 0; i < iommu->num_mmu; i++)
391 dev_err(iommu->dev, "Disable stall request timed out, status: %#08x\n",
392 rk_iommu_read(iommu->bases[i], RK_MMU_STATUS));
393
394 return ret;
395}
396
397static int rk_iommu_enable_paging(struct rk_iommu *iommu)
398{
399 int ret, i;
400 bool val;
401
402 if (rk_iommu_is_paging_enabled(iommu))
403 return 0;
404
405 rk_iommu_command(iommu, RK_MMU_CMD_ENABLE_PAGING);
406
407 ret = readx_poll_timeout(rk_iommu_is_paging_enabled, iommu, val,
408 val, RK_MMU_POLL_PERIOD_US,
409 RK_MMU_POLL_TIMEOUT_US);
410 if (ret)
411 for (i = 0; i < iommu->num_mmu; i++)
412 dev_err(iommu->dev, "Enable paging request timed out, status: %#08x\n",
413 rk_iommu_read(iommu->bases[i], RK_MMU_STATUS));
414
415 return ret;
416}
417
418static int rk_iommu_disable_paging(struct rk_iommu *iommu)
419{
420 int ret, i;
421 bool val;
422
423 if (!rk_iommu_is_paging_enabled(iommu))
424 return 0;
425
426 rk_iommu_command(iommu, RK_MMU_CMD_DISABLE_PAGING);
427
428 ret = readx_poll_timeout(rk_iommu_is_paging_enabled, iommu, val,
429 !val, RK_MMU_POLL_PERIOD_US,
430 RK_MMU_POLL_TIMEOUT_US);
431 if (ret)
432 for (i = 0; i < iommu->num_mmu; i++)
433 dev_err(iommu->dev, "Disable paging request timed out, status: %#08x\n",
434 rk_iommu_read(iommu->bases[i], RK_MMU_STATUS));
435
436 return ret;
437}
438
439static int rk_iommu_force_reset(struct rk_iommu *iommu)
440{
441 int ret, i;
442 u32 dte_addr;
443 bool val;
444
445 if (iommu->reset_disabled)
446 return 0;
447
448
449
450
451
452 for (i = 0; i < iommu->num_mmu; i++) {
453 rk_iommu_write(iommu->bases[i], RK_MMU_DTE_ADDR, DTE_ADDR_DUMMY);
454
455 dte_addr = rk_iommu_read(iommu->bases[i], RK_MMU_DTE_ADDR);
456 if (dte_addr != (DTE_ADDR_DUMMY & RK_DTE_PT_ADDRESS_MASK)) {
457 dev_err(iommu->dev, "Error during raw reset. MMU_DTE_ADDR is not functioning\n");
458 return -EFAULT;
459 }
460 }
461
462 rk_iommu_command(iommu, RK_MMU_CMD_FORCE_RESET);
463
464 ret = readx_poll_timeout(rk_iommu_is_reset_done, iommu, val,
465 val, RK_MMU_FORCE_RESET_TIMEOUT_US,
466 RK_MMU_POLL_TIMEOUT_US);
467 if (ret) {
468 dev_err(iommu->dev, "FORCE_RESET command timed out\n");
469 return ret;
470 }
471
472 return 0;
473}
474
475static void log_iova(struct rk_iommu *iommu, int index, dma_addr_t iova)
476{
477 void __iomem *base = iommu->bases[index];
478 u32 dte_index, pte_index, page_offset;
479 u32 mmu_dte_addr;
480 phys_addr_t mmu_dte_addr_phys, dte_addr_phys;
481 u32 *dte_addr;
482 u32 dte;
483 phys_addr_t pte_addr_phys = 0;
484 u32 *pte_addr = NULL;
485 u32 pte = 0;
486 phys_addr_t page_addr_phys = 0;
487 u32 page_flags = 0;
488
489 dte_index = rk_iova_dte_index(iova);
490 pte_index = rk_iova_pte_index(iova);
491 page_offset = rk_iova_page_offset(iova);
492
493 mmu_dte_addr = rk_iommu_read(base, RK_MMU_DTE_ADDR);
494 mmu_dte_addr_phys = (phys_addr_t)mmu_dte_addr;
495
496 dte_addr_phys = mmu_dte_addr_phys + (4 * dte_index);
497 dte_addr = phys_to_virt(dte_addr_phys);
498 dte = *dte_addr;
499
500 if (!rk_dte_is_pt_valid(dte))
501 goto print_it;
502
503 pte_addr_phys = rk_dte_pt_address(dte) + (pte_index * 4);
504 pte_addr = phys_to_virt(pte_addr_phys);
505 pte = *pte_addr;
506
507 if (!rk_pte_is_page_valid(pte))
508 goto print_it;
509
510 page_addr_phys = rk_pte_page_address(pte) + page_offset;
511 page_flags = pte & RK_PTE_PAGE_FLAGS_MASK;
512
513print_it:
514 dev_err(iommu->dev, "iova = %pad: dte_index: %#03x pte_index: %#03x page_offset: %#03x\n",
515 &iova, dte_index, pte_index, page_offset);
516 dev_err(iommu->dev, "mmu_dte_addr: %pa dte@%pa: %#08x valid: %u pte@%pa: %#08x valid: %u page@%pa flags: %#03x\n",
517 &mmu_dte_addr_phys, &dte_addr_phys, dte,
518 rk_dte_is_pt_valid(dte), &pte_addr_phys, pte,
519 rk_pte_is_page_valid(pte), &page_addr_phys, page_flags);
520}
521
522static irqreturn_t rk_iommu_irq(int irq, void *dev_id)
523{
524 struct rk_iommu *iommu = dev_id;
525 u32 status;
526 u32 int_status;
527 dma_addr_t iova;
528 irqreturn_t ret = IRQ_NONE;
529 int i, err;
530
531 err = pm_runtime_get_if_in_use(iommu->dev);
532 if (WARN_ON_ONCE(err <= 0))
533 return ret;
534
535 if (WARN_ON(clk_bulk_enable(iommu->num_clocks, iommu->clocks)))
536 goto out;
537
538 for (i = 0; i < iommu->num_mmu; i++) {
539 int_status = rk_iommu_read(iommu->bases[i], RK_MMU_INT_STATUS);
540 if (int_status == 0)
541 continue;
542
543 ret = IRQ_HANDLED;
544 iova = rk_iommu_read(iommu->bases[i], RK_MMU_PAGE_FAULT_ADDR);
545
546 if (int_status & RK_MMU_IRQ_PAGE_FAULT) {
547 int flags;
548
549 status = rk_iommu_read(iommu->bases[i], RK_MMU_STATUS);
550 flags = (status & RK_MMU_STATUS_PAGE_FAULT_IS_WRITE) ?
551 IOMMU_FAULT_WRITE : IOMMU_FAULT_READ;
552
553 dev_err(iommu->dev, "Page fault at %pad of type %s\n",
554 &iova,
555 (flags == IOMMU_FAULT_WRITE) ? "write" : "read");
556
557 log_iova(iommu, i, iova);
558
559
560
561
562
563
564 if (iommu->domain)
565 report_iommu_fault(iommu->domain, iommu->dev, iova,
566 flags);
567 else
568 dev_err(iommu->dev, "Page fault while iommu not attached to domain?\n");
569
570 rk_iommu_base_command(iommu->bases[i], RK_MMU_CMD_ZAP_CACHE);
571 rk_iommu_base_command(iommu->bases[i], RK_MMU_CMD_PAGE_FAULT_DONE);
572 }
573
574 if (int_status & RK_MMU_IRQ_BUS_ERROR)
575 dev_err(iommu->dev, "BUS_ERROR occurred at %pad\n", &iova);
576
577 if (int_status & ~RK_MMU_IRQ_MASK)
578 dev_err(iommu->dev, "unexpected int_status: %#08x\n",
579 int_status);
580
581 rk_iommu_write(iommu->bases[i], RK_MMU_INT_CLEAR, int_status);
582 }
583
584 clk_bulk_disable(iommu->num_clocks, iommu->clocks);
585
586out:
587 pm_runtime_put(iommu->dev);
588 return ret;
589}
590
591static phys_addr_t rk_iommu_iova_to_phys(struct iommu_domain *domain,
592 dma_addr_t iova)
593{
594 struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
595 unsigned long flags;
596 phys_addr_t pt_phys, phys = 0;
597 u32 dte, pte;
598 u32 *page_table;
599
600 spin_lock_irqsave(&rk_domain->dt_lock, flags);
601
602 dte = rk_domain->dt[rk_iova_dte_index(iova)];
603 if (!rk_dte_is_pt_valid(dte))
604 goto out;
605
606 pt_phys = rk_dte_pt_address(dte);
607 page_table = (u32 *)phys_to_virt(pt_phys);
608 pte = page_table[rk_iova_pte_index(iova)];
609 if (!rk_pte_is_page_valid(pte))
610 goto out;
611
612 phys = rk_pte_page_address(pte) + rk_iova_page_offset(iova);
613out:
614 spin_unlock_irqrestore(&rk_domain->dt_lock, flags);
615
616 return phys;
617}
618
619static void rk_iommu_zap_iova(struct rk_iommu_domain *rk_domain,
620 dma_addr_t iova, size_t size)
621{
622 struct list_head *pos;
623 unsigned long flags;
624
625
626 spin_lock_irqsave(&rk_domain->iommus_lock, flags);
627 list_for_each(pos, &rk_domain->iommus) {
628 struct rk_iommu *iommu;
629 int ret;
630
631 iommu = list_entry(pos, struct rk_iommu, node);
632
633
634 ret = pm_runtime_get_if_in_use(iommu->dev);
635 if (WARN_ON_ONCE(ret < 0))
636 continue;
637 if (ret) {
638 WARN_ON(clk_bulk_enable(iommu->num_clocks,
639 iommu->clocks));
640 rk_iommu_zap_lines(iommu, iova, size);
641 clk_bulk_disable(iommu->num_clocks, iommu->clocks);
642 pm_runtime_put(iommu->dev);
643 }
644 }
645 spin_unlock_irqrestore(&rk_domain->iommus_lock, flags);
646}
647
648static void rk_iommu_zap_iova_first_last(struct rk_iommu_domain *rk_domain,
649 dma_addr_t iova, size_t size)
650{
651 rk_iommu_zap_iova(rk_domain, iova, SPAGE_SIZE);
652 if (size > SPAGE_SIZE)
653 rk_iommu_zap_iova(rk_domain, iova + size - SPAGE_SIZE,
654 SPAGE_SIZE);
655}
656
657static u32 *rk_dte_get_page_table(struct rk_iommu_domain *rk_domain,
658 dma_addr_t iova)
659{
660 u32 *page_table, *dte_addr;
661 u32 dte_index, dte;
662 phys_addr_t pt_phys;
663 dma_addr_t pt_dma;
664
665 assert_spin_locked(&rk_domain->dt_lock);
666
667 dte_index = rk_iova_dte_index(iova);
668 dte_addr = &rk_domain->dt[dte_index];
669 dte = *dte_addr;
670 if (rk_dte_is_pt_valid(dte))
671 goto done;
672
673 page_table = (u32 *)get_zeroed_page(GFP_ATOMIC | GFP_DMA32);
674 if (!page_table)
675 return ERR_PTR(-ENOMEM);
676
677 pt_dma = dma_map_single(dma_dev, page_table, SPAGE_SIZE, DMA_TO_DEVICE);
678 if (dma_mapping_error(dma_dev, pt_dma)) {
679 dev_err(dma_dev, "DMA mapping error while allocating page table\n");
680 free_page((unsigned long)page_table);
681 return ERR_PTR(-ENOMEM);
682 }
683
684 dte = rk_mk_dte(pt_dma);
685 *dte_addr = dte;
686
687 rk_table_flush(rk_domain, pt_dma, NUM_PT_ENTRIES);
688 rk_table_flush(rk_domain,
689 rk_domain->dt_dma + dte_index * sizeof(u32), 1);
690done:
691 pt_phys = rk_dte_pt_address(dte);
692 return (u32 *)phys_to_virt(pt_phys);
693}
694
695static size_t rk_iommu_unmap_iova(struct rk_iommu_domain *rk_domain,
696 u32 *pte_addr, dma_addr_t pte_dma,
697 size_t size)
698{
699 unsigned int pte_count;
700 unsigned int pte_total = size / SPAGE_SIZE;
701
702 assert_spin_locked(&rk_domain->dt_lock);
703
704 for (pte_count = 0; pte_count < pte_total; pte_count++) {
705 u32 pte = pte_addr[pte_count];
706 if (!rk_pte_is_page_valid(pte))
707 break;
708
709 pte_addr[pte_count] = rk_mk_pte_invalid(pte);
710 }
711
712 rk_table_flush(rk_domain, pte_dma, pte_count);
713
714 return pte_count * SPAGE_SIZE;
715}
716
717static int rk_iommu_map_iova(struct rk_iommu_domain *rk_domain, u32 *pte_addr,
718 dma_addr_t pte_dma, dma_addr_t iova,
719 phys_addr_t paddr, size_t size, int prot)
720{
721 unsigned int pte_count;
722 unsigned int pte_total = size / SPAGE_SIZE;
723 phys_addr_t page_phys;
724
725 assert_spin_locked(&rk_domain->dt_lock);
726
727 for (pte_count = 0; pte_count < pte_total; pte_count++) {
728 u32 pte = pte_addr[pte_count];
729
730 if (rk_pte_is_page_valid(pte))
731 goto unwind;
732
733 pte_addr[pte_count] = rk_mk_pte(paddr, prot);
734
735 paddr += SPAGE_SIZE;
736 }
737
738 rk_table_flush(rk_domain, pte_dma, pte_total);
739
740
741
742
743
744
745
746 rk_iommu_zap_iova_first_last(rk_domain, iova, size);
747
748 return 0;
749unwind:
750
751 rk_iommu_unmap_iova(rk_domain, pte_addr, pte_dma,
752 pte_count * SPAGE_SIZE);
753
754 iova += pte_count * SPAGE_SIZE;
755 page_phys = rk_pte_page_address(pte_addr[pte_count]);
756 pr_err("iova: %pad already mapped to %pa cannot remap to phys: %pa prot: %#x\n",
757 &iova, &page_phys, &paddr, prot);
758
759 return -EADDRINUSE;
760}
761
762static int rk_iommu_map(struct iommu_domain *domain, unsigned long _iova,
763 phys_addr_t paddr, size_t size, int prot)
764{
765 struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
766 unsigned long flags;
767 dma_addr_t pte_dma, iova = (dma_addr_t)_iova;
768 u32 *page_table, *pte_addr;
769 u32 dte_index, pte_index;
770 int ret;
771
772 spin_lock_irqsave(&rk_domain->dt_lock, flags);
773
774
775
776
777
778
779
780
781 page_table = rk_dte_get_page_table(rk_domain, iova);
782 if (IS_ERR(page_table)) {
783 spin_unlock_irqrestore(&rk_domain->dt_lock, flags);
784 return PTR_ERR(page_table);
785 }
786
787 dte_index = rk_domain->dt[rk_iova_dte_index(iova)];
788 pte_index = rk_iova_pte_index(iova);
789 pte_addr = &page_table[pte_index];
790 pte_dma = rk_dte_pt_address(dte_index) + pte_index * sizeof(u32);
791 ret = rk_iommu_map_iova(rk_domain, pte_addr, pte_dma, iova,
792 paddr, size, prot);
793
794 spin_unlock_irqrestore(&rk_domain->dt_lock, flags);
795
796 return ret;
797}
798
799static size_t rk_iommu_unmap(struct iommu_domain *domain, unsigned long _iova,
800 size_t size)
801{
802 struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
803 unsigned long flags;
804 dma_addr_t pte_dma, iova = (dma_addr_t)_iova;
805 phys_addr_t pt_phys;
806 u32 dte;
807 u32 *pte_addr;
808 size_t unmap_size;
809
810 spin_lock_irqsave(&rk_domain->dt_lock, flags);
811
812
813
814
815
816
817
818
819 dte = rk_domain->dt[rk_iova_dte_index(iova)];
820
821 if (!rk_dte_is_pt_valid(dte)) {
822 spin_unlock_irqrestore(&rk_domain->dt_lock, flags);
823 return 0;
824 }
825
826 pt_phys = rk_dte_pt_address(dte);
827 pte_addr = (u32 *)phys_to_virt(pt_phys) + rk_iova_pte_index(iova);
828 pte_dma = pt_phys + rk_iova_pte_index(iova) * sizeof(u32);
829 unmap_size = rk_iommu_unmap_iova(rk_domain, pte_addr, pte_dma, size);
830
831 spin_unlock_irqrestore(&rk_domain->dt_lock, flags);
832
833
834 rk_iommu_zap_iova(rk_domain, iova, unmap_size);
835
836 return unmap_size;
837}
838
839static struct rk_iommu *rk_iommu_from_dev(struct device *dev)
840{
841 struct rk_iommudata *data = dev->archdata.iommu;
842
843 return data ? data->iommu : NULL;
844}
845
846
847static void rk_iommu_disable(struct rk_iommu *iommu)
848{
849 int i;
850
851
852 WARN_ON(clk_bulk_enable(iommu->num_clocks, iommu->clocks));
853 rk_iommu_enable_stall(iommu);
854 rk_iommu_disable_paging(iommu);
855 for (i = 0; i < iommu->num_mmu; i++) {
856 rk_iommu_write(iommu->bases[i], RK_MMU_INT_MASK, 0);
857 rk_iommu_write(iommu->bases[i], RK_MMU_DTE_ADDR, 0);
858 }
859 rk_iommu_disable_stall(iommu);
860 clk_bulk_disable(iommu->num_clocks, iommu->clocks);
861}
862
863
864static int rk_iommu_enable(struct rk_iommu *iommu)
865{
866 struct iommu_domain *domain = iommu->domain;
867 struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
868 int ret, i;
869
870 ret = clk_bulk_enable(iommu->num_clocks, iommu->clocks);
871 if (ret)
872 return ret;
873
874 ret = rk_iommu_enable_stall(iommu);
875 if (ret)
876 goto out_disable_clocks;
877
878 ret = rk_iommu_force_reset(iommu);
879 if (ret)
880 goto out_disable_stall;
881
882 for (i = 0; i < iommu->num_mmu; i++) {
883 rk_iommu_write(iommu->bases[i], RK_MMU_DTE_ADDR,
884 rk_domain->dt_dma);
885 rk_iommu_base_command(iommu->bases[i], RK_MMU_CMD_ZAP_CACHE);
886 rk_iommu_write(iommu->bases[i], RK_MMU_INT_MASK, RK_MMU_IRQ_MASK);
887 }
888
889 ret = rk_iommu_enable_paging(iommu);
890
891out_disable_stall:
892 rk_iommu_disable_stall(iommu);
893out_disable_clocks:
894 clk_bulk_disable(iommu->num_clocks, iommu->clocks);
895 return ret;
896}
897
898static void rk_iommu_detach_device(struct iommu_domain *domain,
899 struct device *dev)
900{
901 struct rk_iommu *iommu;
902 struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
903 unsigned long flags;
904 int ret;
905
906
907 iommu = rk_iommu_from_dev(dev);
908 if (!iommu)
909 return;
910
911 dev_dbg(dev, "Detaching from iommu domain\n");
912
913
914 if (iommu->domain != domain)
915 return;
916
917 iommu->domain = NULL;
918
919 spin_lock_irqsave(&rk_domain->iommus_lock, flags);
920 list_del_init(&iommu->node);
921 spin_unlock_irqrestore(&rk_domain->iommus_lock, flags);
922
923 ret = pm_runtime_get_if_in_use(iommu->dev);
924 WARN_ON_ONCE(ret < 0);
925 if (ret > 0) {
926 rk_iommu_disable(iommu);
927 pm_runtime_put(iommu->dev);
928 }
929}
930
931static int rk_iommu_attach_device(struct iommu_domain *domain,
932 struct device *dev)
933{
934 struct rk_iommu *iommu;
935 struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
936 unsigned long flags;
937 int ret;
938
939
940
941
942
943 iommu = rk_iommu_from_dev(dev);
944 if (!iommu)
945 return 0;
946
947 dev_dbg(dev, "Attaching to iommu domain\n");
948
949
950 if (iommu->domain == domain)
951 return 0;
952
953 if (iommu->domain)
954 rk_iommu_detach_device(iommu->domain, dev);
955
956 iommu->domain = domain;
957
958 spin_lock_irqsave(&rk_domain->iommus_lock, flags);
959 list_add_tail(&iommu->node, &rk_domain->iommus);
960 spin_unlock_irqrestore(&rk_domain->iommus_lock, flags);
961
962 ret = pm_runtime_get_if_in_use(iommu->dev);
963 if (!ret || WARN_ON_ONCE(ret < 0))
964 return 0;
965
966 ret = rk_iommu_enable(iommu);
967 if (ret)
968 rk_iommu_detach_device(iommu->domain, dev);
969
970 pm_runtime_put(iommu->dev);
971
972 return ret;
973}
974
975static struct iommu_domain *rk_iommu_domain_alloc(unsigned type)
976{
977 struct rk_iommu_domain *rk_domain;
978
979 if (type != IOMMU_DOMAIN_UNMANAGED && type != IOMMU_DOMAIN_DMA)
980 return NULL;
981
982 if (!dma_dev)
983 return NULL;
984
985 rk_domain = devm_kzalloc(dma_dev, sizeof(*rk_domain), GFP_KERNEL);
986 if (!rk_domain)
987 return NULL;
988
989 if (type == IOMMU_DOMAIN_DMA &&
990 iommu_get_dma_cookie(&rk_domain->domain))
991 return NULL;
992
993
994
995
996
997
998 rk_domain->dt = (u32 *)get_zeroed_page(GFP_KERNEL | GFP_DMA32);
999 if (!rk_domain->dt)
1000 goto err_put_cookie;
1001
1002 rk_domain->dt_dma = dma_map_single(dma_dev, rk_domain->dt,
1003 SPAGE_SIZE, DMA_TO_DEVICE);
1004 if (dma_mapping_error(dma_dev, rk_domain->dt_dma)) {
1005 dev_err(dma_dev, "DMA map error for DT\n");
1006 goto err_free_dt;
1007 }
1008
1009 rk_table_flush(rk_domain, rk_domain->dt_dma, NUM_DT_ENTRIES);
1010
1011 spin_lock_init(&rk_domain->iommus_lock);
1012 spin_lock_init(&rk_domain->dt_lock);
1013 INIT_LIST_HEAD(&rk_domain->iommus);
1014
1015 rk_domain->domain.geometry.aperture_start = 0;
1016 rk_domain->domain.geometry.aperture_end = DMA_BIT_MASK(32);
1017 rk_domain->domain.geometry.force_aperture = true;
1018
1019 return &rk_domain->domain;
1020
1021err_free_dt:
1022 free_page((unsigned long)rk_domain->dt);
1023err_put_cookie:
1024 if (type == IOMMU_DOMAIN_DMA)
1025 iommu_put_dma_cookie(&rk_domain->domain);
1026
1027 return NULL;
1028}
1029
1030static void rk_iommu_domain_free(struct iommu_domain *domain)
1031{
1032 struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
1033 int i;
1034
1035 WARN_ON(!list_empty(&rk_domain->iommus));
1036
1037 for (i = 0; i < NUM_DT_ENTRIES; i++) {
1038 u32 dte = rk_domain->dt[i];
1039 if (rk_dte_is_pt_valid(dte)) {
1040 phys_addr_t pt_phys = rk_dte_pt_address(dte);
1041 u32 *page_table = phys_to_virt(pt_phys);
1042 dma_unmap_single(dma_dev, pt_phys,
1043 SPAGE_SIZE, DMA_TO_DEVICE);
1044 free_page((unsigned long)page_table);
1045 }
1046 }
1047
1048 dma_unmap_single(dma_dev, rk_domain->dt_dma,
1049 SPAGE_SIZE, DMA_TO_DEVICE);
1050 free_page((unsigned long)rk_domain->dt);
1051
1052 if (domain->type == IOMMU_DOMAIN_DMA)
1053 iommu_put_dma_cookie(&rk_domain->domain);
1054}
1055
1056static int rk_iommu_add_device(struct device *dev)
1057{
1058 struct iommu_group *group;
1059 struct rk_iommu *iommu;
1060 struct rk_iommudata *data;
1061
1062 data = dev->archdata.iommu;
1063 if (!data)
1064 return -ENODEV;
1065
1066 iommu = rk_iommu_from_dev(dev);
1067
1068 group = iommu_group_get_for_dev(dev);
1069 if (IS_ERR(group))
1070 return PTR_ERR(group);
1071 iommu_group_put(group);
1072
1073 iommu_device_link(&iommu->iommu, dev);
1074 data->link = device_link_add(dev, iommu->dev,
1075 DL_FLAG_STATELESS | DL_FLAG_PM_RUNTIME);
1076
1077 return 0;
1078}
1079
1080static void rk_iommu_remove_device(struct device *dev)
1081{
1082 struct rk_iommu *iommu;
1083 struct rk_iommudata *data = dev->archdata.iommu;
1084
1085 iommu = rk_iommu_from_dev(dev);
1086
1087 device_link_del(data->link);
1088 iommu_device_unlink(&iommu->iommu, dev);
1089 iommu_group_remove_device(dev);
1090}
1091
1092static struct iommu_group *rk_iommu_device_group(struct device *dev)
1093{
1094 struct rk_iommu *iommu;
1095
1096 iommu = rk_iommu_from_dev(dev);
1097
1098 return iommu_group_ref_get(iommu->group);
1099}
1100
1101static int rk_iommu_of_xlate(struct device *dev,
1102 struct of_phandle_args *args)
1103{
1104 struct platform_device *iommu_dev;
1105 struct rk_iommudata *data;
1106
1107 data = devm_kzalloc(dma_dev, sizeof(*data), GFP_KERNEL);
1108 if (!data)
1109 return -ENOMEM;
1110
1111 iommu_dev = of_find_device_by_node(args->np);
1112
1113 data->iommu = platform_get_drvdata(iommu_dev);
1114 dev->archdata.iommu = data;
1115
1116 platform_device_put(iommu_dev);
1117
1118 return 0;
1119}
1120
1121static const struct iommu_ops rk_iommu_ops = {
1122 .domain_alloc = rk_iommu_domain_alloc,
1123 .domain_free = rk_iommu_domain_free,
1124 .attach_dev = rk_iommu_attach_device,
1125 .detach_dev = rk_iommu_detach_device,
1126 .map = rk_iommu_map,
1127 .unmap = rk_iommu_unmap,
1128 .add_device = rk_iommu_add_device,
1129 .remove_device = rk_iommu_remove_device,
1130 .iova_to_phys = rk_iommu_iova_to_phys,
1131 .device_group = rk_iommu_device_group,
1132 .pgsize_bitmap = RK_IOMMU_PGSIZE_BITMAP,
1133 .of_xlate = rk_iommu_of_xlate,
1134};
1135
1136static int rk_iommu_probe(struct platform_device *pdev)
1137{
1138 struct device *dev = &pdev->dev;
1139 struct rk_iommu *iommu;
1140 struct resource *res;
1141 int num_res = pdev->num_resources;
1142 int err, i, irq;
1143
1144 iommu = devm_kzalloc(dev, sizeof(*iommu), GFP_KERNEL);
1145 if (!iommu)
1146 return -ENOMEM;
1147
1148 platform_set_drvdata(pdev, iommu);
1149 iommu->dev = dev;
1150 iommu->num_mmu = 0;
1151
1152 iommu->bases = devm_kcalloc(dev, num_res, sizeof(*iommu->bases),
1153 GFP_KERNEL);
1154 if (!iommu->bases)
1155 return -ENOMEM;
1156
1157 for (i = 0; i < num_res; i++) {
1158 res = platform_get_resource(pdev, IORESOURCE_MEM, i);
1159 if (!res)
1160 continue;
1161 iommu->bases[i] = devm_ioremap_resource(&pdev->dev, res);
1162 if (IS_ERR(iommu->bases[i]))
1163 continue;
1164 iommu->num_mmu++;
1165 }
1166 if (iommu->num_mmu == 0)
1167 return PTR_ERR(iommu->bases[0]);
1168
1169 iommu->reset_disabled = device_property_read_bool(dev,
1170 "rockchip,disable-mmu-reset");
1171
1172 iommu->num_clocks = ARRAY_SIZE(rk_iommu_clocks);
1173 iommu->clocks = devm_kcalloc(iommu->dev, iommu->num_clocks,
1174 sizeof(*iommu->clocks), GFP_KERNEL);
1175 if (!iommu->clocks)
1176 return -ENOMEM;
1177
1178 for (i = 0; i < iommu->num_clocks; ++i)
1179 iommu->clocks[i].id = rk_iommu_clocks[i];
1180
1181
1182
1183
1184
1185
1186 err = devm_clk_bulk_get(iommu->dev, iommu->num_clocks, iommu->clocks);
1187 if (err == -ENOENT)
1188 iommu->num_clocks = 0;
1189 else if (err)
1190 return err;
1191
1192 err = clk_bulk_prepare(iommu->num_clocks, iommu->clocks);
1193 if (err)
1194 return err;
1195
1196 iommu->group = iommu_group_alloc();
1197 if (IS_ERR(iommu->group)) {
1198 err = PTR_ERR(iommu->group);
1199 goto err_unprepare_clocks;
1200 }
1201
1202 err = iommu_device_sysfs_add(&iommu->iommu, dev, NULL, dev_name(dev));
1203 if (err)
1204 goto err_put_group;
1205
1206 iommu_device_set_ops(&iommu->iommu, &rk_iommu_ops);
1207 iommu_device_set_fwnode(&iommu->iommu, &dev->of_node->fwnode);
1208
1209 err = iommu_device_register(&iommu->iommu);
1210 if (err)
1211 goto err_remove_sysfs;
1212
1213
1214
1215
1216
1217
1218 if (!dma_dev)
1219 dma_dev = &pdev->dev;
1220
1221 bus_set_iommu(&platform_bus_type, &rk_iommu_ops);
1222
1223 pm_runtime_enable(dev);
1224
1225 i = 0;
1226 while ((irq = platform_get_irq(pdev, i++)) != -ENXIO) {
1227 if (irq < 0)
1228 return irq;
1229
1230 err = devm_request_irq(iommu->dev, irq, rk_iommu_irq,
1231 IRQF_SHARED, dev_name(dev), iommu);
1232 if (err) {
1233 pm_runtime_disable(dev);
1234 goto err_remove_sysfs;
1235 }
1236 }
1237
1238 return 0;
1239err_remove_sysfs:
1240 iommu_device_sysfs_remove(&iommu->iommu);
1241err_put_group:
1242 iommu_group_put(iommu->group);
1243err_unprepare_clocks:
1244 clk_bulk_unprepare(iommu->num_clocks, iommu->clocks);
1245 return err;
1246}
1247
1248static void rk_iommu_shutdown(struct platform_device *pdev)
1249{
1250 struct rk_iommu *iommu = platform_get_drvdata(pdev);
1251 int i = 0, irq;
1252
1253 while ((irq = platform_get_irq(pdev, i++)) != -ENXIO)
1254 devm_free_irq(iommu->dev, irq, iommu);
1255
1256 pm_runtime_force_suspend(&pdev->dev);
1257}
1258
1259static int __maybe_unused rk_iommu_suspend(struct device *dev)
1260{
1261 struct rk_iommu *iommu = dev_get_drvdata(dev);
1262
1263 if (!iommu->domain)
1264 return 0;
1265
1266 rk_iommu_disable(iommu);
1267 return 0;
1268}
1269
1270static int __maybe_unused rk_iommu_resume(struct device *dev)
1271{
1272 struct rk_iommu *iommu = dev_get_drvdata(dev);
1273
1274 if (!iommu->domain)
1275 return 0;
1276
1277 return rk_iommu_enable(iommu);
1278}
1279
1280static const struct dev_pm_ops rk_iommu_pm_ops = {
1281 SET_RUNTIME_PM_OPS(rk_iommu_suspend, rk_iommu_resume, NULL)
1282 SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
1283 pm_runtime_force_resume)
1284};
1285
1286static const struct of_device_id rk_iommu_dt_ids[] = {
1287 { .compatible = "rockchip,iommu" },
1288 { }
1289};
1290
1291static struct platform_driver rk_iommu_driver = {
1292 .probe = rk_iommu_probe,
1293 .shutdown = rk_iommu_shutdown,
1294 .driver = {
1295 .name = "rk_iommu",
1296 .of_match_table = rk_iommu_dt_ids,
1297 .pm = &rk_iommu_pm_ops,
1298 .suppress_bind_attrs = true,
1299 },
1300};
1301
1302static int __init rk_iommu_init(void)
1303{
1304 return platform_driver_register(&rk_iommu_driver);
1305}
1306subsys_initcall(rk_iommu_init);
1307