1
2
3
4
5
6#include <linux/platform_device.h>
7#include <linux/sizes.h>
8#include <linux/slab.h>
9#include <linux/dma-mapping.h>
10#include <linux/bitops.h>
11
12#include "etnaviv_cmdbuf.h"
13#include "etnaviv_gpu.h"
14#include "etnaviv_mmu.h"
15#include "etnaviv_iommu.h"
16#include "state.xml.h"
17#include "state_hi.xml.h"
18
19#define MMUv2_PTE_PRESENT BIT(0)
20#define MMUv2_PTE_EXCEPTION BIT(1)
21#define MMUv2_PTE_WRITEABLE BIT(2)
22
23#define MMUv2_MTLB_MASK 0xffc00000
24#define MMUv2_MTLB_SHIFT 22
25#define MMUv2_STLB_MASK 0x003ff000
26#define MMUv2_STLB_SHIFT 12
27
28#define MMUv2_MAX_STLB_ENTRIES 1024
29
30struct etnaviv_iommuv2_domain {
31 struct etnaviv_iommu_domain base;
32
33 u64 *pta_cpu;
34 dma_addr_t pta_dma;
35
36 u32 *mtlb_cpu;
37 dma_addr_t mtlb_dma;
38
39 u32 *stlb_cpu[MMUv2_MAX_STLB_ENTRIES];
40 dma_addr_t stlb_dma[MMUv2_MAX_STLB_ENTRIES];
41};
42
43static struct etnaviv_iommuv2_domain *
44to_etnaviv_domain(struct etnaviv_iommu_domain *domain)
45{
46 return container_of(domain, struct etnaviv_iommuv2_domain, base);
47}
48
49static int
50etnaviv_iommuv2_ensure_stlb(struct etnaviv_iommuv2_domain *etnaviv_domain,
51 int stlb)
52{
53 if (etnaviv_domain->stlb_cpu[stlb])
54 return 0;
55
56 etnaviv_domain->stlb_cpu[stlb] =
57 dma_alloc_wc(etnaviv_domain->base.dev, SZ_4K,
58 &etnaviv_domain->stlb_dma[stlb],
59 GFP_KERNEL);
60
61 if (!etnaviv_domain->stlb_cpu[stlb])
62 return -ENOMEM;
63
64 memset32(etnaviv_domain->stlb_cpu[stlb], MMUv2_PTE_EXCEPTION,
65 SZ_4K / sizeof(u32));
66
67 etnaviv_domain->mtlb_cpu[stlb] = etnaviv_domain->stlb_dma[stlb] |
68 MMUv2_PTE_PRESENT;
69 return 0;
70}
71
72static int etnaviv_iommuv2_map(struct etnaviv_iommu_domain *domain,
73 unsigned long iova, phys_addr_t paddr,
74 size_t size, int prot)
75{
76 struct etnaviv_iommuv2_domain *etnaviv_domain =
77 to_etnaviv_domain(domain);
78 int mtlb_entry, stlb_entry, ret;
79 u32 entry = lower_32_bits(paddr) | MMUv2_PTE_PRESENT;
80
81 if (size != SZ_4K)
82 return -EINVAL;
83
84 if (IS_ENABLED(CONFIG_PHYS_ADDR_T_64BIT))
85 entry |= (upper_32_bits(paddr) & 0xff) << 4;
86
87 if (prot & ETNAVIV_PROT_WRITE)
88 entry |= MMUv2_PTE_WRITEABLE;
89
90 mtlb_entry = (iova & MMUv2_MTLB_MASK) >> MMUv2_MTLB_SHIFT;
91 stlb_entry = (iova & MMUv2_STLB_MASK) >> MMUv2_STLB_SHIFT;
92
93 ret = etnaviv_iommuv2_ensure_stlb(etnaviv_domain, mtlb_entry);
94 if (ret)
95 return ret;
96
97 etnaviv_domain->stlb_cpu[mtlb_entry][stlb_entry] = entry;
98
99 return 0;
100}
101
102static size_t etnaviv_iommuv2_unmap(struct etnaviv_iommu_domain *domain,
103 unsigned long iova, size_t size)
104{
105 struct etnaviv_iommuv2_domain *etnaviv_domain =
106 to_etnaviv_domain(domain);
107 int mtlb_entry, stlb_entry;
108
109 if (size != SZ_4K)
110 return -EINVAL;
111
112 mtlb_entry = (iova & MMUv2_MTLB_MASK) >> MMUv2_MTLB_SHIFT;
113 stlb_entry = (iova & MMUv2_STLB_MASK) >> MMUv2_STLB_SHIFT;
114
115 etnaviv_domain->stlb_cpu[mtlb_entry][stlb_entry] = MMUv2_PTE_EXCEPTION;
116
117 return SZ_4K;
118}
119
120static int etnaviv_iommuv2_init(struct etnaviv_iommuv2_domain *etnaviv_domain)
121{
122 u32 *p;
123 int ret, i;
124
125
126 etnaviv_domain->base.bad_page_cpu =
127 dma_alloc_wc(etnaviv_domain->base.dev, SZ_4K,
128 &etnaviv_domain->base.bad_page_dma,
129 GFP_KERNEL);
130 if (!etnaviv_domain->base.bad_page_cpu) {
131 ret = -ENOMEM;
132 goto fail_mem;
133 }
134 p = etnaviv_domain->base.bad_page_cpu;
135 for (i = 0; i < SZ_4K / 4; i++)
136 *p++ = 0xdead55aa;
137
138 etnaviv_domain->pta_cpu = dma_alloc_wc(etnaviv_domain->base.dev,
139 SZ_4K, &etnaviv_domain->pta_dma,
140 GFP_KERNEL);
141 if (!etnaviv_domain->pta_cpu) {
142 ret = -ENOMEM;
143 goto fail_mem;
144 }
145
146 etnaviv_domain->mtlb_cpu = dma_alloc_wc(etnaviv_domain->base.dev,
147 SZ_4K, &etnaviv_domain->mtlb_dma,
148 GFP_KERNEL);
149 if (!etnaviv_domain->mtlb_cpu) {
150 ret = -ENOMEM;
151 goto fail_mem;
152 }
153
154 memset32(etnaviv_domain->mtlb_cpu, MMUv2_PTE_EXCEPTION,
155 MMUv2_MAX_STLB_ENTRIES);
156
157 return 0;
158
159fail_mem:
160 if (etnaviv_domain->base.bad_page_cpu)
161 dma_free_wc(etnaviv_domain->base.dev, SZ_4K,
162 etnaviv_domain->base.bad_page_cpu,
163 etnaviv_domain->base.bad_page_dma);
164
165 if (etnaviv_domain->pta_cpu)
166 dma_free_wc(etnaviv_domain->base.dev, SZ_4K,
167 etnaviv_domain->pta_cpu, etnaviv_domain->pta_dma);
168
169 if (etnaviv_domain->mtlb_cpu)
170 dma_free_wc(etnaviv_domain->base.dev, SZ_4K,
171 etnaviv_domain->mtlb_cpu, etnaviv_domain->mtlb_dma);
172
173 return ret;
174}
175
176static void etnaviv_iommuv2_domain_free(struct etnaviv_iommu_domain *domain)
177{
178 struct etnaviv_iommuv2_domain *etnaviv_domain =
179 to_etnaviv_domain(domain);
180 int i;
181
182 dma_free_wc(etnaviv_domain->base.dev, SZ_4K,
183 etnaviv_domain->base.bad_page_cpu,
184 etnaviv_domain->base.bad_page_dma);
185
186 dma_free_wc(etnaviv_domain->base.dev, SZ_4K,
187 etnaviv_domain->pta_cpu, etnaviv_domain->pta_dma);
188
189 dma_free_wc(etnaviv_domain->base.dev, SZ_4K,
190 etnaviv_domain->mtlb_cpu, etnaviv_domain->mtlb_dma);
191
192 for (i = 0; i < MMUv2_MAX_STLB_ENTRIES; i++) {
193 if (etnaviv_domain->stlb_cpu[i])
194 dma_free_wc(etnaviv_domain->base.dev, SZ_4K,
195 etnaviv_domain->stlb_cpu[i],
196 etnaviv_domain->stlb_dma[i]);
197 }
198
199 vfree(etnaviv_domain);
200}
201
202static size_t etnaviv_iommuv2_dump_size(struct etnaviv_iommu_domain *domain)
203{
204 struct etnaviv_iommuv2_domain *etnaviv_domain =
205 to_etnaviv_domain(domain);
206 size_t dump_size = SZ_4K;
207 int i;
208
209 for (i = 0; i < MMUv2_MAX_STLB_ENTRIES; i++)
210 if (etnaviv_domain->mtlb_cpu[i] & MMUv2_PTE_PRESENT)
211 dump_size += SZ_4K;
212
213 return dump_size;
214}
215
216static void etnaviv_iommuv2_dump(struct etnaviv_iommu_domain *domain, void *buf)
217{
218 struct etnaviv_iommuv2_domain *etnaviv_domain =
219 to_etnaviv_domain(domain);
220 int i;
221
222 memcpy(buf, etnaviv_domain->mtlb_cpu, SZ_4K);
223 buf += SZ_4K;
224 for (i = 0; i < MMUv2_MAX_STLB_ENTRIES; i++, buf += SZ_4K)
225 if (etnaviv_domain->mtlb_cpu[i] & MMUv2_PTE_PRESENT)
226 memcpy(buf, etnaviv_domain->stlb_cpu[i], SZ_4K);
227}
228
229static void etnaviv_iommuv2_restore_nonsec(struct etnaviv_gpu *gpu)
230{
231 struct etnaviv_iommuv2_domain *etnaviv_domain =
232 to_etnaviv_domain(gpu->mmu->domain);
233 u16 prefetch;
234
235
236 if (gpu_read(gpu, VIVS_MMUv2_CONTROL) & VIVS_MMUv2_CONTROL_ENABLE)
237 return;
238
239 prefetch = etnaviv_buffer_config_mmuv2(gpu,
240 (u32)etnaviv_domain->mtlb_dma,
241 (u32)etnaviv_domain->base.bad_page_dma);
242 etnaviv_gpu_start_fe(gpu, (u32)etnaviv_cmdbuf_get_pa(&gpu->buffer),
243 prefetch);
244 etnaviv_gpu_wait_idle(gpu, 100);
245
246 gpu_write(gpu, VIVS_MMUv2_CONTROL, VIVS_MMUv2_CONTROL_ENABLE);
247}
248
249static void etnaviv_iommuv2_restore_sec(struct etnaviv_gpu *gpu)
250{
251 struct etnaviv_iommuv2_domain *etnaviv_domain =
252 to_etnaviv_domain(gpu->mmu->domain);
253 u16 prefetch;
254
255
256 if (gpu_read(gpu, VIVS_MMUv2_SEC_CONTROL) & VIVS_MMUv2_SEC_CONTROL_ENABLE)
257 return;
258
259 gpu_write(gpu, VIVS_MMUv2_PTA_ADDRESS_LOW,
260 lower_32_bits(etnaviv_domain->pta_dma));
261 gpu_write(gpu, VIVS_MMUv2_PTA_ADDRESS_HIGH,
262 upper_32_bits(etnaviv_domain->pta_dma));
263 gpu_write(gpu, VIVS_MMUv2_PTA_CONTROL, VIVS_MMUv2_PTA_CONTROL_ENABLE);
264
265 gpu_write(gpu, VIVS_MMUv2_NONSEC_SAFE_ADDR_LOW,
266 lower_32_bits(etnaviv_domain->base.bad_page_dma));
267 gpu_write(gpu, VIVS_MMUv2_SEC_SAFE_ADDR_LOW,
268 lower_32_bits(etnaviv_domain->base.bad_page_dma));
269 gpu_write(gpu, VIVS_MMUv2_SAFE_ADDRESS_CONFIG,
270 VIVS_MMUv2_SAFE_ADDRESS_CONFIG_NON_SEC_SAFE_ADDR_HIGH(
271 upper_32_bits(etnaviv_domain->base.bad_page_dma)) |
272 VIVS_MMUv2_SAFE_ADDRESS_CONFIG_SEC_SAFE_ADDR_HIGH(
273 upper_32_bits(etnaviv_domain->base.bad_page_dma)));
274
275 etnaviv_domain->pta_cpu[0] = etnaviv_domain->mtlb_dma |
276 VIVS_MMUv2_CONFIGURATION_MODE_MODE4_K;
277
278
279 prefetch = etnaviv_buffer_config_pta(gpu);
280 etnaviv_gpu_start_fe(gpu, (u32)etnaviv_cmdbuf_get_pa(&gpu->buffer),
281 prefetch);
282 etnaviv_gpu_wait_idle(gpu, 100);
283
284 gpu_write(gpu, VIVS_MMUv2_SEC_CONTROL, VIVS_MMUv2_SEC_CONTROL_ENABLE);
285}
286
287void etnaviv_iommuv2_restore(struct etnaviv_gpu *gpu)
288{
289 switch (gpu->sec_mode) {
290 case ETNA_SEC_NONE:
291 etnaviv_iommuv2_restore_nonsec(gpu);
292 break;
293 case ETNA_SEC_KERNEL:
294 etnaviv_iommuv2_restore_sec(gpu);
295 break;
296 default:
297 WARN(1, "unhandled GPU security mode\n");
298 break;
299 }
300}
301
302static const struct etnaviv_iommu_domain_ops etnaviv_iommuv2_ops = {
303 .free = etnaviv_iommuv2_domain_free,
304 .map = etnaviv_iommuv2_map,
305 .unmap = etnaviv_iommuv2_unmap,
306 .dump_size = etnaviv_iommuv2_dump_size,
307 .dump = etnaviv_iommuv2_dump,
308};
309
310struct etnaviv_iommu_domain *
311etnaviv_iommuv2_domain_alloc(struct etnaviv_gpu *gpu)
312{
313 struct etnaviv_iommuv2_domain *etnaviv_domain;
314 struct etnaviv_iommu_domain *domain;
315 int ret;
316
317 etnaviv_domain = vzalloc(sizeof(*etnaviv_domain));
318 if (!etnaviv_domain)
319 return NULL;
320
321 domain = &etnaviv_domain->base;
322
323 domain->dev = gpu->dev;
324 domain->base = 0;
325 domain->size = (u64)SZ_1G * 4;
326 domain->ops = &etnaviv_iommuv2_ops;
327
328 ret = etnaviv_iommuv2_init(etnaviv_domain);
329 if (ret)
330 goto out_free;
331
332 return &etnaviv_domain->base;
333
334out_free:
335 vfree(etnaviv_domain);
336 return NULL;
337}
338