1
2
3
4
5
6
7#include <linux/clk.h>
8#include <linux/compiler.h>
9#include <linux/delay.h>
10#include <linux/device.h>
11#include <linux/dma-iommu.h>
12#include <linux/dma-mapping.h>
13#include <linux/errno.h>
14#include <linux/interrupt.h>
15#include <linux/io.h>
16#include <linux/iommu.h>
17#include <linux/iopoll.h>
18#include <linux/list.h>
19#include <linux/mm.h>
20#include <linux/module.h>
21#include <linux/of.h>
22#include <linux/of_iommu.h>
23#include <linux/of_platform.h>
24#include <linux/platform_device.h>
25#include <linux/pm_runtime.h>
26#include <linux/slab.h>
27#include <linux/spinlock.h>
28
29
30#define RK_MMU_DTE_ADDR 0x00
31#define RK_MMU_STATUS 0x04
32#define RK_MMU_COMMAND 0x08
33#define RK_MMU_PAGE_FAULT_ADDR 0x0C
34#define RK_MMU_ZAP_ONE_LINE 0x10
35#define RK_MMU_INT_RAWSTAT 0x14
36#define RK_MMU_INT_CLEAR 0x18
37#define RK_MMU_INT_MASK 0x1C
38#define RK_MMU_INT_STATUS 0x20
39#define RK_MMU_AUTO_GATING 0x24
40
41#define DTE_ADDR_DUMMY 0xCAFEBABE
42
43#define RK_MMU_POLL_PERIOD_US 100
44#define RK_MMU_FORCE_RESET_TIMEOUT_US 100000
45#define RK_MMU_POLL_TIMEOUT_US 1000
46
47
48#define RK_MMU_STATUS_PAGING_ENABLED BIT(0)
49#define RK_MMU_STATUS_PAGE_FAULT_ACTIVE BIT(1)
50#define RK_MMU_STATUS_STALL_ACTIVE BIT(2)
51#define RK_MMU_STATUS_IDLE BIT(3)
52#define RK_MMU_STATUS_REPLAY_BUFFER_EMPTY BIT(4)
53#define RK_MMU_STATUS_PAGE_FAULT_IS_WRITE BIT(5)
54#define RK_MMU_STATUS_STALL_NOT_ACTIVE BIT(31)
55
56
57#define RK_MMU_CMD_ENABLE_PAGING 0
58#define RK_MMU_CMD_DISABLE_PAGING 1
59#define RK_MMU_CMD_ENABLE_STALL 2
60#define RK_MMU_CMD_DISABLE_STALL 3
61#define RK_MMU_CMD_ZAP_CACHE 4
62#define RK_MMU_CMD_PAGE_FAULT_DONE 5
63#define RK_MMU_CMD_FORCE_RESET 6
64
65
66#define RK_MMU_IRQ_PAGE_FAULT 0x01
67#define RK_MMU_IRQ_BUS_ERROR 0x02
68#define RK_MMU_IRQ_MASK (RK_MMU_IRQ_PAGE_FAULT | RK_MMU_IRQ_BUS_ERROR)
69
70#define NUM_DT_ENTRIES 1024
71#define NUM_PT_ENTRIES 1024
72
73#define SPAGE_ORDER 12
74#define SPAGE_SIZE (1 << SPAGE_ORDER)
75
76
77
78
79
80#define RK_IOMMU_PGSIZE_BITMAP 0x007ff000
81
82struct rk_iommu_domain {
83 struct list_head iommus;
84 u32 *dt;
85 dma_addr_t dt_dma;
86 spinlock_t iommus_lock;
87 spinlock_t dt_lock;
88
89 struct iommu_domain domain;
90};
91
92
93static const char * const rk_iommu_clocks[] = {
94 "aclk", "iface",
95};
96
97struct rk_iommu {
98 struct device *dev;
99 void __iomem **bases;
100 int num_mmu;
101 struct clk_bulk_data *clocks;
102 int num_clocks;
103 bool reset_disabled;
104 struct iommu_device iommu;
105 struct list_head node;
106 struct iommu_domain *domain;
107 struct iommu_group *group;
108};
109
110struct rk_iommudata {
111 struct device_link *link;
112 struct rk_iommu *iommu;
113};
114
115static struct device *dma_dev;
116
117static inline void rk_table_flush(struct rk_iommu_domain *dom, dma_addr_t dma,
118 unsigned int count)
119{
120 size_t size = count * sizeof(u32);
121
122 dma_sync_single_for_device(dma_dev, dma, size, DMA_TO_DEVICE);
123}
124
125static struct rk_iommu_domain *to_rk_domain(struct iommu_domain *dom)
126{
127 return container_of(dom, struct rk_iommu_domain, domain);
128}
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171#define RK_DTE_PT_ADDRESS_MASK 0xfffff000
172#define RK_DTE_PT_VALID BIT(0)
173
174static inline phys_addr_t rk_dte_pt_address(u32 dte)
175{
176 return (phys_addr_t)dte & RK_DTE_PT_ADDRESS_MASK;
177}
178
179static inline bool rk_dte_is_pt_valid(u32 dte)
180{
181 return dte & RK_DTE_PT_VALID;
182}
183
184static inline u32 rk_mk_dte(dma_addr_t pt_dma)
185{
186 return (pt_dma & RK_DTE_PT_ADDRESS_MASK) | RK_DTE_PT_VALID;
187}
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209#define RK_PTE_PAGE_ADDRESS_MASK 0xfffff000
210#define RK_PTE_PAGE_FLAGS_MASK 0x000001fe
211#define RK_PTE_PAGE_WRITABLE BIT(2)
212#define RK_PTE_PAGE_READABLE BIT(1)
213#define RK_PTE_PAGE_VALID BIT(0)
214
215static inline phys_addr_t rk_pte_page_address(u32 pte)
216{
217 return (phys_addr_t)pte & RK_PTE_PAGE_ADDRESS_MASK;
218}
219
220static inline bool rk_pte_is_page_valid(u32 pte)
221{
222 return pte & RK_PTE_PAGE_VALID;
223}
224
225
226static u32 rk_mk_pte(phys_addr_t page, int prot)
227{
228 u32 flags = 0;
229 flags |= (prot & IOMMU_READ) ? RK_PTE_PAGE_READABLE : 0;
230 flags |= (prot & IOMMU_WRITE) ? RK_PTE_PAGE_WRITABLE : 0;
231 page &= RK_PTE_PAGE_ADDRESS_MASK;
232 return page | flags | RK_PTE_PAGE_VALID;
233}
234
235static u32 rk_mk_pte_invalid(u32 pte)
236{
237 return pte & ~RK_PTE_PAGE_VALID;
238}
239
240
241
242
243
244
245
246
247
248
249
250#define RK_IOVA_DTE_MASK 0xffc00000
251#define RK_IOVA_DTE_SHIFT 22
252#define RK_IOVA_PTE_MASK 0x003ff000
253#define RK_IOVA_PTE_SHIFT 12
254#define RK_IOVA_PAGE_MASK 0x00000fff
255#define RK_IOVA_PAGE_SHIFT 0
256
257static u32 rk_iova_dte_index(dma_addr_t iova)
258{
259 return (u32)(iova & RK_IOVA_DTE_MASK) >> RK_IOVA_DTE_SHIFT;
260}
261
262static u32 rk_iova_pte_index(dma_addr_t iova)
263{
264 return (u32)(iova & RK_IOVA_PTE_MASK) >> RK_IOVA_PTE_SHIFT;
265}
266
267static u32 rk_iova_page_offset(dma_addr_t iova)
268{
269 return (u32)(iova & RK_IOVA_PAGE_MASK) >> RK_IOVA_PAGE_SHIFT;
270}
271
272static u32 rk_iommu_read(void __iomem *base, u32 offset)
273{
274 return readl(base + offset);
275}
276
277static void rk_iommu_write(void __iomem *base, u32 offset, u32 value)
278{
279 writel(value, base + offset);
280}
281
282static void rk_iommu_command(struct rk_iommu *iommu, u32 command)
283{
284 int i;
285
286 for (i = 0; i < iommu->num_mmu; i++)
287 writel(command, iommu->bases[i] + RK_MMU_COMMAND);
288}
289
290static void rk_iommu_base_command(void __iomem *base, u32 command)
291{
292 writel(command, base + RK_MMU_COMMAND);
293}
294static void rk_iommu_zap_lines(struct rk_iommu *iommu, dma_addr_t iova_start,
295 size_t size)
296{
297 int i;
298 dma_addr_t iova_end = iova_start + size;
299
300
301
302
303 for (i = 0; i < iommu->num_mmu; i++) {
304 dma_addr_t iova;
305
306 for (iova = iova_start; iova < iova_end; iova += SPAGE_SIZE)
307 rk_iommu_write(iommu->bases[i], RK_MMU_ZAP_ONE_LINE, iova);
308 }
309}
310
311static bool rk_iommu_is_stall_active(struct rk_iommu *iommu)
312{
313 bool active = true;
314 int i;
315
316 for (i = 0; i < iommu->num_mmu; i++)
317 active &= !!(rk_iommu_read(iommu->bases[i], RK_MMU_STATUS) &
318 RK_MMU_STATUS_STALL_ACTIVE);
319
320 return active;
321}
322
323static bool rk_iommu_is_paging_enabled(struct rk_iommu *iommu)
324{
325 bool enable = true;
326 int i;
327
328 for (i = 0; i < iommu->num_mmu; i++)
329 enable &= !!(rk_iommu_read(iommu->bases[i], RK_MMU_STATUS) &
330 RK_MMU_STATUS_PAGING_ENABLED);
331
332 return enable;
333}
334
335static bool rk_iommu_is_reset_done(struct rk_iommu *iommu)
336{
337 bool done = true;
338 int i;
339
340 for (i = 0; i < iommu->num_mmu; i++)
341 done &= rk_iommu_read(iommu->bases[i], RK_MMU_DTE_ADDR) == 0;
342
343 return done;
344}
345
346static int rk_iommu_enable_stall(struct rk_iommu *iommu)
347{
348 int ret, i;
349 bool val;
350
351 if (rk_iommu_is_stall_active(iommu))
352 return 0;
353
354
355 if (!rk_iommu_is_paging_enabled(iommu))
356 return 0;
357
358 rk_iommu_command(iommu, RK_MMU_CMD_ENABLE_STALL);
359
360 ret = readx_poll_timeout(rk_iommu_is_stall_active, iommu, val,
361 val, RK_MMU_POLL_PERIOD_US,
362 RK_MMU_POLL_TIMEOUT_US);
363 if (ret)
364 for (i = 0; i < iommu->num_mmu; i++)
365 dev_err(iommu->dev, "Enable stall request timed out, status: %#08x\n",
366 rk_iommu_read(iommu->bases[i], RK_MMU_STATUS));
367
368 return ret;
369}
370
371static int rk_iommu_disable_stall(struct rk_iommu *iommu)
372{
373 int ret, i;
374 bool val;
375
376 if (!rk_iommu_is_stall_active(iommu))
377 return 0;
378
379 rk_iommu_command(iommu, RK_MMU_CMD_DISABLE_STALL);
380
381 ret = readx_poll_timeout(rk_iommu_is_stall_active, iommu, val,
382 !val, RK_MMU_POLL_PERIOD_US,
383 RK_MMU_POLL_TIMEOUT_US);
384 if (ret)
385 for (i = 0; i < iommu->num_mmu; i++)
386 dev_err(iommu->dev, "Disable stall request timed out, status: %#08x\n",
387 rk_iommu_read(iommu->bases[i], RK_MMU_STATUS));
388
389 return ret;
390}
391
392static int rk_iommu_enable_paging(struct rk_iommu *iommu)
393{
394 int ret, i;
395 bool val;
396
397 if (rk_iommu_is_paging_enabled(iommu))
398 return 0;
399
400 rk_iommu_command(iommu, RK_MMU_CMD_ENABLE_PAGING);
401
402 ret = readx_poll_timeout(rk_iommu_is_paging_enabled, iommu, val,
403 val, RK_MMU_POLL_PERIOD_US,
404 RK_MMU_POLL_TIMEOUT_US);
405 if (ret)
406 for (i = 0; i < iommu->num_mmu; i++)
407 dev_err(iommu->dev, "Enable paging request timed out, status: %#08x\n",
408 rk_iommu_read(iommu->bases[i], RK_MMU_STATUS));
409
410 return ret;
411}
412
413static int rk_iommu_disable_paging(struct rk_iommu *iommu)
414{
415 int ret, i;
416 bool val;
417
418 if (!rk_iommu_is_paging_enabled(iommu))
419 return 0;
420
421 rk_iommu_command(iommu, RK_MMU_CMD_DISABLE_PAGING);
422
423 ret = readx_poll_timeout(rk_iommu_is_paging_enabled, iommu, val,
424 !val, RK_MMU_POLL_PERIOD_US,
425 RK_MMU_POLL_TIMEOUT_US);
426 if (ret)
427 for (i = 0; i < iommu->num_mmu; i++)
428 dev_err(iommu->dev, "Disable paging request timed out, status: %#08x\n",
429 rk_iommu_read(iommu->bases[i], RK_MMU_STATUS));
430
431 return ret;
432}
433
434static int rk_iommu_force_reset(struct rk_iommu *iommu)
435{
436 int ret, i;
437 u32 dte_addr;
438 bool val;
439
440 if (iommu->reset_disabled)
441 return 0;
442
443
444
445
446
447 for (i = 0; i < iommu->num_mmu; i++) {
448 rk_iommu_write(iommu->bases[i], RK_MMU_DTE_ADDR, DTE_ADDR_DUMMY);
449
450 dte_addr = rk_iommu_read(iommu->bases[i], RK_MMU_DTE_ADDR);
451 if (dte_addr != (DTE_ADDR_DUMMY & RK_DTE_PT_ADDRESS_MASK)) {
452 dev_err(iommu->dev, "Error during raw reset. MMU_DTE_ADDR is not functioning\n");
453 return -EFAULT;
454 }
455 }
456
457 rk_iommu_command(iommu, RK_MMU_CMD_FORCE_RESET);
458
459 ret = readx_poll_timeout(rk_iommu_is_reset_done, iommu, val,
460 val, RK_MMU_FORCE_RESET_TIMEOUT_US,
461 RK_MMU_POLL_TIMEOUT_US);
462 if (ret) {
463 dev_err(iommu->dev, "FORCE_RESET command timed out\n");
464 return ret;
465 }
466
467 return 0;
468}
469
470static void log_iova(struct rk_iommu *iommu, int index, dma_addr_t iova)
471{
472 void __iomem *base = iommu->bases[index];
473 u32 dte_index, pte_index, page_offset;
474 u32 mmu_dte_addr;
475 phys_addr_t mmu_dte_addr_phys, dte_addr_phys;
476 u32 *dte_addr;
477 u32 dte;
478 phys_addr_t pte_addr_phys = 0;
479 u32 *pte_addr = NULL;
480 u32 pte = 0;
481 phys_addr_t page_addr_phys = 0;
482 u32 page_flags = 0;
483
484 dte_index = rk_iova_dte_index(iova);
485 pte_index = rk_iova_pte_index(iova);
486 page_offset = rk_iova_page_offset(iova);
487
488 mmu_dte_addr = rk_iommu_read(base, RK_MMU_DTE_ADDR);
489 mmu_dte_addr_phys = (phys_addr_t)mmu_dte_addr;
490
491 dte_addr_phys = mmu_dte_addr_phys + (4 * dte_index);
492 dte_addr = phys_to_virt(dte_addr_phys);
493 dte = *dte_addr;
494
495 if (!rk_dte_is_pt_valid(dte))
496 goto print_it;
497
498 pte_addr_phys = rk_dte_pt_address(dte) + (pte_index * 4);
499 pte_addr = phys_to_virt(pte_addr_phys);
500 pte = *pte_addr;
501
502 if (!rk_pte_is_page_valid(pte))
503 goto print_it;
504
505 page_addr_phys = rk_pte_page_address(pte) + page_offset;
506 page_flags = pte & RK_PTE_PAGE_FLAGS_MASK;
507
508print_it:
509 dev_err(iommu->dev, "iova = %pad: dte_index: %#03x pte_index: %#03x page_offset: %#03x\n",
510 &iova, dte_index, pte_index, page_offset);
511 dev_err(iommu->dev, "mmu_dte_addr: %pa dte@%pa: %#08x valid: %u pte@%pa: %#08x valid: %u page@%pa flags: %#03x\n",
512 &mmu_dte_addr_phys, &dte_addr_phys, dte,
513 rk_dte_is_pt_valid(dte), &pte_addr_phys, pte,
514 rk_pte_is_page_valid(pte), &page_addr_phys, page_flags);
515}
516
517static irqreturn_t rk_iommu_irq(int irq, void *dev_id)
518{
519 struct rk_iommu *iommu = dev_id;
520 u32 status;
521 u32 int_status;
522 dma_addr_t iova;
523 irqreturn_t ret = IRQ_NONE;
524 int i;
525
526 if (WARN_ON(!pm_runtime_get_if_in_use(iommu->dev)))
527 return 0;
528
529 if (WARN_ON(clk_bulk_enable(iommu->num_clocks, iommu->clocks)))
530 goto out;
531
532 for (i = 0; i < iommu->num_mmu; i++) {
533 int_status = rk_iommu_read(iommu->bases[i], RK_MMU_INT_STATUS);
534 if (int_status == 0)
535 continue;
536
537 ret = IRQ_HANDLED;
538 iova = rk_iommu_read(iommu->bases[i], RK_MMU_PAGE_FAULT_ADDR);
539
540 if (int_status & RK_MMU_IRQ_PAGE_FAULT) {
541 int flags;
542
543 status = rk_iommu_read(iommu->bases[i], RK_MMU_STATUS);
544 flags = (status & RK_MMU_STATUS_PAGE_FAULT_IS_WRITE) ?
545 IOMMU_FAULT_WRITE : IOMMU_FAULT_READ;
546
547 dev_err(iommu->dev, "Page fault at %pad of type %s\n",
548 &iova,
549 (flags == IOMMU_FAULT_WRITE) ? "write" : "read");
550
551 log_iova(iommu, i, iova);
552
553
554
555
556
557
558 if (iommu->domain)
559 report_iommu_fault(iommu->domain, iommu->dev, iova,
560 flags);
561 else
562 dev_err(iommu->dev, "Page fault while iommu not attached to domain?\n");
563
564 rk_iommu_base_command(iommu->bases[i], RK_MMU_CMD_ZAP_CACHE);
565 rk_iommu_base_command(iommu->bases[i], RK_MMU_CMD_PAGE_FAULT_DONE);
566 }
567
568 if (int_status & RK_MMU_IRQ_BUS_ERROR)
569 dev_err(iommu->dev, "BUS_ERROR occurred at %pad\n", &iova);
570
571 if (int_status & ~RK_MMU_IRQ_MASK)
572 dev_err(iommu->dev, "unexpected int_status: %#08x\n",
573 int_status);
574
575 rk_iommu_write(iommu->bases[i], RK_MMU_INT_CLEAR, int_status);
576 }
577
578 clk_bulk_disable(iommu->num_clocks, iommu->clocks);
579
580out:
581 pm_runtime_put(iommu->dev);
582 return ret;
583}
584
585static phys_addr_t rk_iommu_iova_to_phys(struct iommu_domain *domain,
586 dma_addr_t iova)
587{
588 struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
589 unsigned long flags;
590 phys_addr_t pt_phys, phys = 0;
591 u32 dte, pte;
592 u32 *page_table;
593
594 spin_lock_irqsave(&rk_domain->dt_lock, flags);
595
596 dte = rk_domain->dt[rk_iova_dte_index(iova)];
597 if (!rk_dte_is_pt_valid(dte))
598 goto out;
599
600 pt_phys = rk_dte_pt_address(dte);
601 page_table = (u32 *)phys_to_virt(pt_phys);
602 pte = page_table[rk_iova_pte_index(iova)];
603 if (!rk_pte_is_page_valid(pte))
604 goto out;
605
606 phys = rk_pte_page_address(pte) + rk_iova_page_offset(iova);
607out:
608 spin_unlock_irqrestore(&rk_domain->dt_lock, flags);
609
610 return phys;
611}
612
613static void rk_iommu_zap_iova(struct rk_iommu_domain *rk_domain,
614 dma_addr_t iova, size_t size)
615{
616 struct list_head *pos;
617 unsigned long flags;
618
619
620 spin_lock_irqsave(&rk_domain->iommus_lock, flags);
621 list_for_each(pos, &rk_domain->iommus) {
622 struct rk_iommu *iommu;
623
624 iommu = list_entry(pos, struct rk_iommu, node);
625
626
627 if (pm_runtime_get_if_in_use(iommu->dev)) {
628 WARN_ON(clk_bulk_enable(iommu->num_clocks,
629 iommu->clocks));
630 rk_iommu_zap_lines(iommu, iova, size);
631 clk_bulk_disable(iommu->num_clocks, iommu->clocks);
632 pm_runtime_put(iommu->dev);
633 }
634 }
635 spin_unlock_irqrestore(&rk_domain->iommus_lock, flags);
636}
637
638static void rk_iommu_zap_iova_first_last(struct rk_iommu_domain *rk_domain,
639 dma_addr_t iova, size_t size)
640{
641 rk_iommu_zap_iova(rk_domain, iova, SPAGE_SIZE);
642 if (size > SPAGE_SIZE)
643 rk_iommu_zap_iova(rk_domain, iova + size - SPAGE_SIZE,
644 SPAGE_SIZE);
645}
646
647static u32 *rk_dte_get_page_table(struct rk_iommu_domain *rk_domain,
648 dma_addr_t iova)
649{
650 u32 *page_table, *dte_addr;
651 u32 dte_index, dte;
652 phys_addr_t pt_phys;
653 dma_addr_t pt_dma;
654
655 assert_spin_locked(&rk_domain->dt_lock);
656
657 dte_index = rk_iova_dte_index(iova);
658 dte_addr = &rk_domain->dt[dte_index];
659 dte = *dte_addr;
660 if (rk_dte_is_pt_valid(dte))
661 goto done;
662
663 page_table = (u32 *)get_zeroed_page(GFP_ATOMIC | GFP_DMA32);
664 if (!page_table)
665 return ERR_PTR(-ENOMEM);
666
667 pt_dma = dma_map_single(dma_dev, page_table, SPAGE_SIZE, DMA_TO_DEVICE);
668 if (dma_mapping_error(dma_dev, pt_dma)) {
669 dev_err(dma_dev, "DMA mapping error while allocating page table\n");
670 free_page((unsigned long)page_table);
671 return ERR_PTR(-ENOMEM);
672 }
673
674 dte = rk_mk_dte(pt_dma);
675 *dte_addr = dte;
676
677 rk_table_flush(rk_domain, pt_dma, NUM_PT_ENTRIES);
678 rk_table_flush(rk_domain,
679 rk_domain->dt_dma + dte_index * sizeof(u32), 1);
680done:
681 pt_phys = rk_dte_pt_address(dte);
682 return (u32 *)phys_to_virt(pt_phys);
683}
684
685static size_t rk_iommu_unmap_iova(struct rk_iommu_domain *rk_domain,
686 u32 *pte_addr, dma_addr_t pte_dma,
687 size_t size)
688{
689 unsigned int pte_count;
690 unsigned int pte_total = size / SPAGE_SIZE;
691
692 assert_spin_locked(&rk_domain->dt_lock);
693
694 for (pte_count = 0; pte_count < pte_total; pte_count++) {
695 u32 pte = pte_addr[pte_count];
696 if (!rk_pte_is_page_valid(pte))
697 break;
698
699 pte_addr[pte_count] = rk_mk_pte_invalid(pte);
700 }
701
702 rk_table_flush(rk_domain, pte_dma, pte_count);
703
704 return pte_count * SPAGE_SIZE;
705}
706
707static int rk_iommu_map_iova(struct rk_iommu_domain *rk_domain, u32 *pte_addr,
708 dma_addr_t pte_dma, dma_addr_t iova,
709 phys_addr_t paddr, size_t size, int prot)
710{
711 unsigned int pte_count;
712 unsigned int pte_total = size / SPAGE_SIZE;
713 phys_addr_t page_phys;
714
715 assert_spin_locked(&rk_domain->dt_lock);
716
717 for (pte_count = 0; pte_count < pte_total; pte_count++) {
718 u32 pte = pte_addr[pte_count];
719
720 if (rk_pte_is_page_valid(pte))
721 goto unwind;
722
723 pte_addr[pte_count] = rk_mk_pte(paddr, prot);
724
725 paddr += SPAGE_SIZE;
726 }
727
728 rk_table_flush(rk_domain, pte_dma, pte_total);
729
730
731
732
733
734
735
736 rk_iommu_zap_iova_first_last(rk_domain, iova, size);
737
738 return 0;
739unwind:
740
741 rk_iommu_unmap_iova(rk_domain, pte_addr, pte_dma,
742 pte_count * SPAGE_SIZE);
743
744 iova += pte_count * SPAGE_SIZE;
745 page_phys = rk_pte_page_address(pte_addr[pte_count]);
746 pr_err("iova: %pad already mapped to %pa cannot remap to phys: %pa prot: %#x\n",
747 &iova, &page_phys, &paddr, prot);
748
749 return -EADDRINUSE;
750}
751
752static int rk_iommu_map(struct iommu_domain *domain, unsigned long _iova,
753 phys_addr_t paddr, size_t size, int prot)
754{
755 struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
756 unsigned long flags;
757 dma_addr_t pte_dma, iova = (dma_addr_t)_iova;
758 u32 *page_table, *pte_addr;
759 u32 dte_index, pte_index;
760 int ret;
761
762 spin_lock_irqsave(&rk_domain->dt_lock, flags);
763
764
765
766
767
768
769
770
771 page_table = rk_dte_get_page_table(rk_domain, iova);
772 if (IS_ERR(page_table)) {
773 spin_unlock_irqrestore(&rk_domain->dt_lock, flags);
774 return PTR_ERR(page_table);
775 }
776
777 dte_index = rk_domain->dt[rk_iova_dte_index(iova)];
778 pte_index = rk_iova_pte_index(iova);
779 pte_addr = &page_table[pte_index];
780 pte_dma = rk_dte_pt_address(dte_index) + pte_index * sizeof(u32);
781 ret = rk_iommu_map_iova(rk_domain, pte_addr, pte_dma, iova,
782 paddr, size, prot);
783
784 spin_unlock_irqrestore(&rk_domain->dt_lock, flags);
785
786 return ret;
787}
788
789static size_t rk_iommu_unmap(struct iommu_domain *domain, unsigned long _iova,
790 size_t size)
791{
792 struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
793 unsigned long flags;
794 dma_addr_t pte_dma, iova = (dma_addr_t)_iova;
795 phys_addr_t pt_phys;
796 u32 dte;
797 u32 *pte_addr;
798 size_t unmap_size;
799
800 spin_lock_irqsave(&rk_domain->dt_lock, flags);
801
802
803
804
805
806
807
808
809 dte = rk_domain->dt[rk_iova_dte_index(iova)];
810
811 if (!rk_dte_is_pt_valid(dte)) {
812 spin_unlock_irqrestore(&rk_domain->dt_lock, flags);
813 return 0;
814 }
815
816 pt_phys = rk_dte_pt_address(dte);
817 pte_addr = (u32 *)phys_to_virt(pt_phys) + rk_iova_pte_index(iova);
818 pte_dma = pt_phys + rk_iova_pte_index(iova) * sizeof(u32);
819 unmap_size = rk_iommu_unmap_iova(rk_domain, pte_addr, pte_dma, size);
820
821 spin_unlock_irqrestore(&rk_domain->dt_lock, flags);
822
823
824 rk_iommu_zap_iova(rk_domain, iova, unmap_size);
825
826 return unmap_size;
827}
828
829static struct rk_iommu *rk_iommu_from_dev(struct device *dev)
830{
831 struct rk_iommudata *data = dev->archdata.iommu;
832
833 return data ? data->iommu : NULL;
834}
835
836
837static void rk_iommu_disable(struct rk_iommu *iommu)
838{
839 int i;
840
841
842 WARN_ON(clk_bulk_enable(iommu->num_clocks, iommu->clocks));
843 rk_iommu_enable_stall(iommu);
844 rk_iommu_disable_paging(iommu);
845 for (i = 0; i < iommu->num_mmu; i++) {
846 rk_iommu_write(iommu->bases[i], RK_MMU_INT_MASK, 0);
847 rk_iommu_write(iommu->bases[i], RK_MMU_DTE_ADDR, 0);
848 }
849 rk_iommu_disable_stall(iommu);
850 clk_bulk_disable(iommu->num_clocks, iommu->clocks);
851}
852
853
854static int rk_iommu_enable(struct rk_iommu *iommu)
855{
856 struct iommu_domain *domain = iommu->domain;
857 struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
858 int ret, i;
859
860 ret = clk_bulk_enable(iommu->num_clocks, iommu->clocks);
861 if (ret)
862 return ret;
863
864 ret = rk_iommu_enable_stall(iommu);
865 if (ret)
866 goto out_disable_clocks;
867
868 ret = rk_iommu_force_reset(iommu);
869 if (ret)
870 goto out_disable_stall;
871
872 for (i = 0; i < iommu->num_mmu; i++) {
873 rk_iommu_write(iommu->bases[i], RK_MMU_DTE_ADDR,
874 rk_domain->dt_dma);
875 rk_iommu_base_command(iommu->bases[i], RK_MMU_CMD_ZAP_CACHE);
876 rk_iommu_write(iommu->bases[i], RK_MMU_INT_MASK, RK_MMU_IRQ_MASK);
877 }
878
879 ret = rk_iommu_enable_paging(iommu);
880
881out_disable_stall:
882 rk_iommu_disable_stall(iommu);
883out_disable_clocks:
884 clk_bulk_disable(iommu->num_clocks, iommu->clocks);
885 return ret;
886}
887
888static void rk_iommu_detach_device(struct iommu_domain *domain,
889 struct device *dev)
890{
891 struct rk_iommu *iommu;
892 struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
893 unsigned long flags;
894
895
896 iommu = rk_iommu_from_dev(dev);
897 if (!iommu)
898 return;
899
900 dev_dbg(dev, "Detaching from iommu domain\n");
901
902
903 if (iommu->domain != domain)
904 return;
905
906 iommu->domain = NULL;
907
908 spin_lock_irqsave(&rk_domain->iommus_lock, flags);
909 list_del_init(&iommu->node);
910 spin_unlock_irqrestore(&rk_domain->iommus_lock, flags);
911
912 if (pm_runtime_get_if_in_use(iommu->dev)) {
913 rk_iommu_disable(iommu);
914 pm_runtime_put(iommu->dev);
915 }
916}
917
918static int rk_iommu_attach_device(struct iommu_domain *domain,
919 struct device *dev)
920{
921 struct rk_iommu *iommu;
922 struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
923 unsigned long flags;
924 int ret;
925
926
927
928
929
930 iommu = rk_iommu_from_dev(dev);
931 if (!iommu)
932 return 0;
933
934 dev_dbg(dev, "Attaching to iommu domain\n");
935
936
937 if (iommu->domain == domain)
938 return 0;
939
940 if (iommu->domain)
941 rk_iommu_detach_device(iommu->domain, dev);
942
943 iommu->domain = domain;
944
945 spin_lock_irqsave(&rk_domain->iommus_lock, flags);
946 list_add_tail(&iommu->node, &rk_domain->iommus);
947 spin_unlock_irqrestore(&rk_domain->iommus_lock, flags);
948
949 if (!pm_runtime_get_if_in_use(iommu->dev))
950 return 0;
951
952 ret = rk_iommu_enable(iommu);
953 if (ret)
954 rk_iommu_detach_device(iommu->domain, dev);
955
956 pm_runtime_put(iommu->dev);
957
958 return ret;
959}
960
961static struct iommu_domain *rk_iommu_domain_alloc(unsigned type)
962{
963 struct rk_iommu_domain *rk_domain;
964
965 if (type != IOMMU_DOMAIN_UNMANAGED && type != IOMMU_DOMAIN_DMA)
966 return NULL;
967
968 if (!dma_dev)
969 return NULL;
970
971 rk_domain = devm_kzalloc(dma_dev, sizeof(*rk_domain), GFP_KERNEL);
972 if (!rk_domain)
973 return NULL;
974
975 if (type == IOMMU_DOMAIN_DMA &&
976 iommu_get_dma_cookie(&rk_domain->domain))
977 return NULL;
978
979
980
981
982
983
984 rk_domain->dt = (u32 *)get_zeroed_page(GFP_KERNEL | GFP_DMA32);
985 if (!rk_domain->dt)
986 goto err_put_cookie;
987
988 rk_domain->dt_dma = dma_map_single(dma_dev, rk_domain->dt,
989 SPAGE_SIZE, DMA_TO_DEVICE);
990 if (dma_mapping_error(dma_dev, rk_domain->dt_dma)) {
991 dev_err(dma_dev, "DMA map error for DT\n");
992 goto err_free_dt;
993 }
994
995 rk_table_flush(rk_domain, rk_domain->dt_dma, NUM_DT_ENTRIES);
996
997 spin_lock_init(&rk_domain->iommus_lock);
998 spin_lock_init(&rk_domain->dt_lock);
999 INIT_LIST_HEAD(&rk_domain->iommus);
1000
1001 rk_domain->domain.geometry.aperture_start = 0;
1002 rk_domain->domain.geometry.aperture_end = DMA_BIT_MASK(32);
1003 rk_domain->domain.geometry.force_aperture = true;
1004
1005 return &rk_domain->domain;
1006
1007err_free_dt:
1008 free_page((unsigned long)rk_domain->dt);
1009err_put_cookie:
1010 if (type == IOMMU_DOMAIN_DMA)
1011 iommu_put_dma_cookie(&rk_domain->domain);
1012
1013 return NULL;
1014}
1015
1016static void rk_iommu_domain_free(struct iommu_domain *domain)
1017{
1018 struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
1019 int i;
1020
1021 WARN_ON(!list_empty(&rk_domain->iommus));
1022
1023 for (i = 0; i < NUM_DT_ENTRIES; i++) {
1024 u32 dte = rk_domain->dt[i];
1025 if (rk_dte_is_pt_valid(dte)) {
1026 phys_addr_t pt_phys = rk_dte_pt_address(dte);
1027 u32 *page_table = phys_to_virt(pt_phys);
1028 dma_unmap_single(dma_dev, pt_phys,
1029 SPAGE_SIZE, DMA_TO_DEVICE);
1030 free_page((unsigned long)page_table);
1031 }
1032 }
1033
1034 dma_unmap_single(dma_dev, rk_domain->dt_dma,
1035 SPAGE_SIZE, DMA_TO_DEVICE);
1036 free_page((unsigned long)rk_domain->dt);
1037
1038 if (domain->type == IOMMU_DOMAIN_DMA)
1039 iommu_put_dma_cookie(&rk_domain->domain);
1040}
1041
1042static int rk_iommu_add_device(struct device *dev)
1043{
1044 struct iommu_group *group;
1045 struct rk_iommu *iommu;
1046 struct rk_iommudata *data;
1047
1048 data = dev->archdata.iommu;
1049 if (!data)
1050 return -ENODEV;
1051
1052 iommu = rk_iommu_from_dev(dev);
1053
1054 group = iommu_group_get_for_dev(dev);
1055 if (IS_ERR(group))
1056 return PTR_ERR(group);
1057 iommu_group_put(group);
1058
1059 iommu_device_link(&iommu->iommu, dev);
1060 data->link = device_link_add(dev, iommu->dev, DL_FLAG_PM_RUNTIME);
1061
1062 return 0;
1063}
1064
1065static void rk_iommu_remove_device(struct device *dev)
1066{
1067 struct rk_iommu *iommu;
1068 struct rk_iommudata *data = dev->archdata.iommu;
1069
1070 iommu = rk_iommu_from_dev(dev);
1071
1072 device_link_del(data->link);
1073 iommu_device_unlink(&iommu->iommu, dev);
1074 iommu_group_remove_device(dev);
1075}
1076
1077static struct iommu_group *rk_iommu_device_group(struct device *dev)
1078{
1079 struct rk_iommu *iommu;
1080
1081 iommu = rk_iommu_from_dev(dev);
1082
1083 return iommu_group_ref_get(iommu->group);
1084}
1085
1086static int rk_iommu_of_xlate(struct device *dev,
1087 struct of_phandle_args *args)
1088{
1089 struct platform_device *iommu_dev;
1090 struct rk_iommudata *data;
1091
1092 data = devm_kzalloc(dma_dev, sizeof(*data), GFP_KERNEL);
1093 if (!data)
1094 return -ENOMEM;
1095
1096 iommu_dev = of_find_device_by_node(args->np);
1097
1098 data->iommu = platform_get_drvdata(iommu_dev);
1099 dev->archdata.iommu = data;
1100
1101 platform_device_put(iommu_dev);
1102
1103 return 0;
1104}
1105
1106static const struct iommu_ops rk_iommu_ops = {
1107 .domain_alloc = rk_iommu_domain_alloc,
1108 .domain_free = rk_iommu_domain_free,
1109 .attach_dev = rk_iommu_attach_device,
1110 .detach_dev = rk_iommu_detach_device,
1111 .map = rk_iommu_map,
1112 .unmap = rk_iommu_unmap,
1113 .map_sg = default_iommu_map_sg,
1114 .add_device = rk_iommu_add_device,
1115 .remove_device = rk_iommu_remove_device,
1116 .iova_to_phys = rk_iommu_iova_to_phys,
1117 .device_group = rk_iommu_device_group,
1118 .pgsize_bitmap = RK_IOMMU_PGSIZE_BITMAP,
1119 .of_xlate = rk_iommu_of_xlate,
1120};
1121
1122static int rk_iommu_probe(struct platform_device *pdev)
1123{
1124 struct device *dev = &pdev->dev;
1125 struct rk_iommu *iommu;
1126 struct resource *res;
1127 int num_res = pdev->num_resources;
1128 int err, i, irq;
1129
1130 iommu = devm_kzalloc(dev, sizeof(*iommu), GFP_KERNEL);
1131 if (!iommu)
1132 return -ENOMEM;
1133
1134 platform_set_drvdata(pdev, iommu);
1135 iommu->dev = dev;
1136 iommu->num_mmu = 0;
1137
1138 iommu->bases = devm_kcalloc(dev, num_res, sizeof(*iommu->bases),
1139 GFP_KERNEL);
1140 if (!iommu->bases)
1141 return -ENOMEM;
1142
1143 for (i = 0; i < num_res; i++) {
1144 res = platform_get_resource(pdev, IORESOURCE_MEM, i);
1145 if (!res)
1146 continue;
1147 iommu->bases[i] = devm_ioremap_resource(&pdev->dev, res);
1148 if (IS_ERR(iommu->bases[i]))
1149 continue;
1150 iommu->num_mmu++;
1151 }
1152 if (iommu->num_mmu == 0)
1153 return PTR_ERR(iommu->bases[0]);
1154
1155 i = 0;
1156 while ((irq = platform_get_irq(pdev, i++)) != -ENXIO) {
1157 if (irq < 0)
1158 return irq;
1159
1160 err = devm_request_irq(iommu->dev, irq, rk_iommu_irq,
1161 IRQF_SHARED, dev_name(dev), iommu);
1162 if (err)
1163 return err;
1164 }
1165
1166 iommu->reset_disabled = device_property_read_bool(dev,
1167 "rockchip,disable-mmu-reset");
1168
1169 iommu->num_clocks = ARRAY_SIZE(rk_iommu_clocks);
1170 iommu->clocks = devm_kcalloc(iommu->dev, iommu->num_clocks,
1171 sizeof(*iommu->clocks), GFP_KERNEL);
1172 if (!iommu->clocks)
1173 return -ENOMEM;
1174
1175 for (i = 0; i < iommu->num_clocks; ++i)
1176 iommu->clocks[i].id = rk_iommu_clocks[i];
1177
1178
1179
1180
1181
1182
1183 err = devm_clk_bulk_get(iommu->dev, iommu->num_clocks, iommu->clocks);
1184 if (err == -ENOENT)
1185 iommu->num_clocks = 0;
1186 else if (err)
1187 return err;
1188
1189 err = clk_bulk_prepare(iommu->num_clocks, iommu->clocks);
1190 if (err)
1191 return err;
1192
1193 iommu->group = iommu_group_alloc();
1194 if (IS_ERR(iommu->group)) {
1195 err = PTR_ERR(iommu->group);
1196 goto err_unprepare_clocks;
1197 }
1198
1199 err = iommu_device_sysfs_add(&iommu->iommu, dev, NULL, dev_name(dev));
1200 if (err)
1201 goto err_put_group;
1202
1203 iommu_device_set_ops(&iommu->iommu, &rk_iommu_ops);
1204 iommu_device_set_fwnode(&iommu->iommu, &dev->of_node->fwnode);
1205
1206 err = iommu_device_register(&iommu->iommu);
1207 if (err)
1208 goto err_remove_sysfs;
1209
1210
1211
1212
1213
1214
1215 if (!dma_dev)
1216 dma_dev = &pdev->dev;
1217
1218 bus_set_iommu(&platform_bus_type, &rk_iommu_ops);
1219
1220 pm_runtime_enable(dev);
1221
1222 return 0;
1223err_remove_sysfs:
1224 iommu_device_sysfs_remove(&iommu->iommu);
1225err_put_group:
1226 iommu_group_put(iommu->group);
1227err_unprepare_clocks:
1228 clk_bulk_unprepare(iommu->num_clocks, iommu->clocks);
1229 return err;
1230}
1231
1232static void rk_iommu_shutdown(struct platform_device *pdev)
1233{
1234 pm_runtime_force_suspend(&pdev->dev);
1235}
1236
1237static int __maybe_unused rk_iommu_suspend(struct device *dev)
1238{
1239 struct rk_iommu *iommu = dev_get_drvdata(dev);
1240
1241 if (!iommu->domain)
1242 return 0;
1243
1244 rk_iommu_disable(iommu);
1245 return 0;
1246}
1247
1248static int __maybe_unused rk_iommu_resume(struct device *dev)
1249{
1250 struct rk_iommu *iommu = dev_get_drvdata(dev);
1251
1252 if (!iommu->domain)
1253 return 0;
1254
1255 return rk_iommu_enable(iommu);
1256}
1257
1258static const struct dev_pm_ops rk_iommu_pm_ops = {
1259 SET_RUNTIME_PM_OPS(rk_iommu_suspend, rk_iommu_resume, NULL)
1260 SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
1261 pm_runtime_force_resume)
1262};
1263
1264static const struct of_device_id rk_iommu_dt_ids[] = {
1265 { .compatible = "rockchip,iommu" },
1266 { }
1267};
1268MODULE_DEVICE_TABLE(of, rk_iommu_dt_ids);
1269
1270static struct platform_driver rk_iommu_driver = {
1271 .probe = rk_iommu_probe,
1272 .shutdown = rk_iommu_shutdown,
1273 .driver = {
1274 .name = "rk_iommu",
1275 .of_match_table = rk_iommu_dt_ids,
1276 .pm = &rk_iommu_pm_ops,
1277 .suppress_bind_attrs = true,
1278 },
1279};
1280
1281static int __init rk_iommu_init(void)
1282{
1283 return platform_driver_register(&rk_iommu_driver);
1284}
1285subsys_initcall(rk_iommu_init);
1286
1287IOMMU_OF_DECLARE(rk_iommu_of, "rockchip,iommu");
1288
1289MODULE_DESCRIPTION("IOMMU API for Rockchip");
1290MODULE_AUTHOR("Simon Xue <xxm@rock-chips.com> and Daniel Kurtz <djkurtz@chromium.org>");
1291MODULE_ALIAS("platform:rockchip-iommu");
1292MODULE_LICENSE("GPL v2");
1293