1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21#include "v3d_drv.h"
22#include "v3d_regs.h"
23
24#define V3D_MMU_PAGE_SHIFT 12
25
26
27
28
29#define V3D_PTE_SUPERPAGE BIT(31)
30#define V3D_PTE_WRITEABLE BIT(29)
31#define V3D_PTE_VALID BIT(28)
32
33static int v3d_mmu_flush_all(struct v3d_dev *v3d)
34{
35 int ret;
36
37
38
39
40 ret = wait_for(!(V3D_READ(V3D_MMU_CTL) &
41 V3D_MMU_CTL_TLB_CLEARING), 100);
42 if (ret)
43 dev_err(v3d->dev, "TLB clear wait idle pre-wait failed\n");
44
45 V3D_WRITE(V3D_MMU_CTL, V3D_READ(V3D_MMU_CTL) |
46 V3D_MMU_CTL_TLB_CLEAR);
47
48 V3D_WRITE(V3D_MMUC_CONTROL,
49 V3D_MMUC_CONTROL_FLUSH |
50 V3D_MMUC_CONTROL_ENABLE);
51
52 ret = wait_for(!(V3D_READ(V3D_MMU_CTL) &
53 V3D_MMU_CTL_TLB_CLEARING), 100);
54 if (ret) {
55 dev_err(v3d->dev, "TLB clear wait idle failed\n");
56 return ret;
57 }
58
59 ret = wait_for(!(V3D_READ(V3D_MMUC_CONTROL) &
60 V3D_MMUC_CONTROL_FLUSHING), 100);
61 if (ret)
62 dev_err(v3d->dev, "MMUC flush wait idle failed\n");
63
64 return ret;
65}
66
67int v3d_mmu_set_page_table(struct v3d_dev *v3d)
68{
69 V3D_WRITE(V3D_MMU_PT_PA_BASE, v3d->pt_paddr >> V3D_MMU_PAGE_SHIFT);
70 V3D_WRITE(V3D_MMU_CTL,
71 V3D_MMU_CTL_ENABLE |
72 V3D_MMU_CTL_PT_INVALID_ENABLE |
73 V3D_MMU_CTL_PT_INVALID_ABORT |
74 V3D_MMU_CTL_PT_INVALID_INT |
75 V3D_MMU_CTL_WRITE_VIOLATION_ABORT |
76 V3D_MMU_CTL_WRITE_VIOLATION_INT |
77 V3D_MMU_CTL_CAP_EXCEEDED_ABORT |
78 V3D_MMU_CTL_CAP_EXCEEDED_INT);
79 V3D_WRITE(V3D_MMU_ILLEGAL_ADDR,
80 (v3d->mmu_scratch_paddr >> V3D_MMU_PAGE_SHIFT) |
81 V3D_MMU_ILLEGAL_ADDR_ENABLE);
82 V3D_WRITE(V3D_MMUC_CONTROL, V3D_MMUC_CONTROL_ENABLE);
83
84 return v3d_mmu_flush_all(v3d);
85}
86
87void v3d_mmu_insert_ptes(struct v3d_bo *bo)
88{
89 struct drm_gem_shmem_object *shmem_obj = &bo->base;
90 struct v3d_dev *v3d = to_v3d_dev(shmem_obj->base.dev);
91 u32 page = bo->node.start;
92 u32 page_prot = V3D_PTE_WRITEABLE | V3D_PTE_VALID;
93 unsigned int count;
94 struct scatterlist *sgl;
95
96 for_each_sg(shmem_obj->sgt->sgl, sgl, shmem_obj->sgt->nents, count) {
97 u32 page_address = sg_dma_address(sgl) >> V3D_MMU_PAGE_SHIFT;
98 u32 pte = page_prot | page_address;
99 u32 i;
100
101 BUG_ON(page_address + (sg_dma_len(sgl) >> V3D_MMU_PAGE_SHIFT) >=
102 BIT(24));
103
104 for (i = 0; i < sg_dma_len(sgl) >> V3D_MMU_PAGE_SHIFT; i++)
105 v3d->pt[page++] = pte + i;
106 }
107
108 WARN_ON_ONCE(page - bo->node.start !=
109 shmem_obj->base.size >> V3D_MMU_PAGE_SHIFT);
110
111 if (v3d_mmu_flush_all(v3d))
112 dev_err(v3d->dev, "MMU flush timeout\n");
113}
114
115void v3d_mmu_remove_ptes(struct v3d_bo *bo)
116{
117 struct v3d_dev *v3d = to_v3d_dev(bo->base.base.dev);
118 u32 npages = bo->base.base.size >> V3D_MMU_PAGE_SHIFT;
119 u32 page;
120
121 for (page = bo->node.start; page < bo->node.start + npages; page++)
122 v3d->pt[page] = 0;
123
124 if (v3d_mmu_flush_all(v3d))
125 dev_err(v3d->dev, "MMU flush timeout\n");
126}
127