1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41#include <subdev/fb.h>
42#include <core/mm.h>
43#include <core/device.h>
44
45#ifdef __KERNEL__
46#include <linux/dma-attrs.h>
47#include <linux/iommu.h>
48#include <nouveau_platform.h>
49#endif
50
51#include "priv.h"
52
53struct gk20a_instobj_priv {
54 struct nvkm_instobj base;
55
56 struct nvkm_mem *mem;
57
58 struct nvkm_mem _mem;
59};
60
61
62
63
64struct gk20a_instobj_dma {
65 struct gk20a_instobj_priv base;
66
67 void *cpuaddr;
68 dma_addr_t handle;
69 struct nvkm_mm_node r;
70};
71
72
73
74
75struct gk20a_instobj_iommu {
76 struct gk20a_instobj_priv base;
77
78
79 struct page *pages[];
80};
81
82struct gk20a_instmem_priv {
83 struct nvkm_instmem base;
84 spinlock_t lock;
85 u64 addr;
86
87
88 struct mutex *mm_mutex;
89 struct nvkm_mm *mm;
90 struct iommu_domain *domain;
91 unsigned long iommu_pgshift;
92
93
94 struct dma_attrs attrs;
95};
96
97
98
99
100
101
102
103
104
105
106static u32
107gk20a_instobj_rd32(struct nvkm_object *object, u64 offset)
108{
109 struct gk20a_instmem_priv *priv = (void *)nvkm_instmem(object);
110 struct gk20a_instobj_priv *node = (void *)object;
111 unsigned long flags;
112 u64 base = (node->mem->offset + offset) & 0xffffff00000ULL;
113 u64 addr = (node->mem->offset + offset) & 0x000000fffffULL;
114 u32 data;
115
116 spin_lock_irqsave(&priv->lock, flags);
117 if (unlikely(priv->addr != base)) {
118 nv_wr32(priv, 0x001700, base >> 16);
119 priv->addr = base;
120 }
121 data = nv_rd32(priv, 0x700000 + addr);
122 spin_unlock_irqrestore(&priv->lock, flags);
123 return data;
124}
125
126static void
127gk20a_instobj_wr32(struct nvkm_object *object, u64 offset, u32 data)
128{
129 struct gk20a_instmem_priv *priv = (void *)nvkm_instmem(object);
130 struct gk20a_instobj_priv *node = (void *)object;
131 unsigned long flags;
132 u64 base = (node->mem->offset + offset) & 0xffffff00000ULL;
133 u64 addr = (node->mem->offset + offset) & 0x000000fffffULL;
134
135 spin_lock_irqsave(&priv->lock, flags);
136 if (unlikely(priv->addr != base)) {
137 nv_wr32(priv, 0x001700, base >> 16);
138 priv->addr = base;
139 }
140 nv_wr32(priv, 0x700000 + addr, data);
141 spin_unlock_irqrestore(&priv->lock, flags);
142}
143
144static void
145gk20a_instobj_dtor_dma(struct gk20a_instobj_priv *_node)
146{
147 struct gk20a_instobj_dma *node = (void *)_node;
148 struct gk20a_instmem_priv *priv = (void *)nvkm_instmem(node);
149 struct device *dev = nv_device_base(nv_device(priv));
150
151 if (unlikely(!node->cpuaddr))
152 return;
153
154 dma_free_attrs(dev, _node->mem->size << PAGE_SHIFT, node->cpuaddr,
155 node->handle, &priv->attrs);
156}
157
158static void
159gk20a_instobj_dtor_iommu(struct gk20a_instobj_priv *_node)
160{
161 struct gk20a_instobj_iommu *node = (void *)_node;
162 struct gk20a_instmem_priv *priv = (void *)nvkm_instmem(node);
163 struct nvkm_mm_node *r;
164 int i;
165
166 if (unlikely(list_empty(&_node->mem->regions)))
167 return;
168
169 r = list_first_entry(&_node->mem->regions, struct nvkm_mm_node,
170 rl_entry);
171
172
173 r->offset &= ~BIT(34 - priv->iommu_pgshift);
174
175
176 for (i = 0; i < _node->mem->size; i++) {
177 iommu_unmap(priv->domain,
178 (r->offset + i) << priv->iommu_pgshift, PAGE_SIZE);
179 __free_page(node->pages[i]);
180 }
181
182
183 mutex_lock(priv->mm_mutex);
184 nvkm_mm_free(priv->mm, &r);
185 mutex_unlock(priv->mm_mutex);
186}
187
188static void
189gk20a_instobj_dtor(struct nvkm_object *object)
190{
191 struct gk20a_instobj_priv *node = (void *)object;
192 struct gk20a_instmem_priv *priv = (void *)nvkm_instmem(node);
193
194 if (priv->domain)
195 gk20a_instobj_dtor_iommu(node);
196 else
197 gk20a_instobj_dtor_dma(node);
198
199 nvkm_instobj_destroy(&node->base);
200}
201
202static int
203gk20a_instobj_ctor_dma(struct nvkm_object *parent, struct nvkm_object *engine,
204 struct nvkm_oclass *oclass, u32 npages, u32 align,
205 struct gk20a_instobj_priv **_node)
206{
207 struct gk20a_instobj_dma *node;
208 struct gk20a_instmem_priv *priv = (void *)nvkm_instmem(parent);
209 struct device *dev = nv_device_base(nv_device(parent));
210 int ret;
211
212 ret = nvkm_instobj_create_(parent, engine, oclass, sizeof(*node),
213 (void **)&node);
214 *_node = &node->base;
215 if (ret)
216 return ret;
217
218 node->cpuaddr = dma_alloc_attrs(dev, npages << PAGE_SHIFT,
219 &node->handle, GFP_KERNEL,
220 &priv->attrs);
221 if (!node->cpuaddr) {
222 nv_error(priv, "cannot allocate DMA memory\n");
223 return -ENOMEM;
224 }
225
226
227 if (unlikely(node->handle & (align - 1)))
228 nv_warn(priv, "memory not aligned as requested: %pad (0x%x)\n",
229 &node->handle, align);
230
231
232 node->r.type = 12;
233 node->r.offset = node->handle >> 12;
234 node->r.length = (npages << PAGE_SHIFT) >> 12;
235
236 node->base._mem.offset = node->handle;
237
238 INIT_LIST_HEAD(&node->base._mem.regions);
239 list_add_tail(&node->r.rl_entry, &node->base._mem.regions);
240
241 return 0;
242}
243
244static int
245gk20a_instobj_ctor_iommu(struct nvkm_object *parent, struct nvkm_object *engine,
246 struct nvkm_oclass *oclass, u32 npages, u32 align,
247 struct gk20a_instobj_priv **_node)
248{
249 struct gk20a_instobj_iommu *node;
250 struct gk20a_instmem_priv *priv = (void *)nvkm_instmem(parent);
251 struct nvkm_mm_node *r;
252 int ret;
253 int i;
254
255 ret = nvkm_instobj_create_(parent, engine, oclass,
256 sizeof(*node) + sizeof(node->pages[0]) * npages,
257 (void **)&node);
258 *_node = &node->base;
259 if (ret)
260 return ret;
261
262
263 for (i = 0; i < npages; i++) {
264 struct page *p = alloc_page(GFP_KERNEL);
265
266 if (p == NULL) {
267 ret = -ENOMEM;
268 goto free_pages;
269 }
270 node->pages[i] = p;
271 }
272
273 mutex_lock(priv->mm_mutex);
274
275 ret = nvkm_mm_head(priv->mm, 0, 1, npages, npages,
276 align >> priv->iommu_pgshift, &r);
277 mutex_unlock(priv->mm_mutex);
278 if (ret) {
279 nv_error(priv, "virtual space is full!\n");
280 goto free_pages;
281 }
282
283
284 for (i = 0; i < npages; i++) {
285 struct page *p = node->pages[i];
286 u32 offset = (r->offset + i) << priv->iommu_pgshift;
287
288 ret = iommu_map(priv->domain, offset, page_to_phys(p),
289 PAGE_SIZE, IOMMU_READ | IOMMU_WRITE);
290 if (ret < 0) {
291 nv_error(priv, "IOMMU mapping failure: %d\n", ret);
292
293 while (i-- > 0) {
294 offset -= PAGE_SIZE;
295 iommu_unmap(priv->domain, offset, PAGE_SIZE);
296 }
297 goto release_area;
298 }
299 }
300
301
302 r->offset |= BIT(34 - priv->iommu_pgshift);
303
304 node->base._mem.offset = ((u64)r->offset) << priv->iommu_pgshift;
305
306 INIT_LIST_HEAD(&node->base._mem.regions);
307 list_add_tail(&r->rl_entry, &node->base._mem.regions);
308
309 return 0;
310
311release_area:
312 mutex_lock(priv->mm_mutex);
313 nvkm_mm_free(priv->mm, &r);
314 mutex_unlock(priv->mm_mutex);
315
316free_pages:
317 for (i = 0; i < npages && node->pages[i] != NULL; i++)
318 __free_page(node->pages[i]);
319
320 return ret;
321}
322
323static int
324gk20a_instobj_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
325 struct nvkm_oclass *oclass, void *data, u32 _size,
326 struct nvkm_object **pobject)
327{
328 struct nvkm_instobj_args *args = data;
329 struct gk20a_instmem_priv *priv = (void *)nvkm_instmem(parent);
330 struct gk20a_instobj_priv *node;
331 u32 size, align;
332 int ret;
333
334 nv_debug(parent, "%s (%s): size: %x align: %x\n", __func__,
335 priv->domain ? "IOMMU" : "DMA", args->size, args->align);
336
337
338 size = max(roundup(args->size, PAGE_SIZE), PAGE_SIZE);
339 align = max(roundup(args->align, PAGE_SIZE), PAGE_SIZE);
340
341 if (priv->domain)
342 ret = gk20a_instobj_ctor_iommu(parent, engine, oclass,
343 size >> PAGE_SHIFT, align, &node);
344 else
345 ret = gk20a_instobj_ctor_dma(parent, engine, oclass,
346 size >> PAGE_SHIFT, align, &node);
347 *pobject = nv_object(node);
348 if (ret)
349 return ret;
350
351 node->mem = &node->_mem;
352
353
354 node->mem->size = size >> 12;
355 node->mem->memtype = 0;
356 node->mem->page_shift = 12;
357
358 node->base.addr = node->mem->offset;
359 node->base.size = size;
360
361 nv_debug(parent, "alloc size: 0x%x, align: 0x%x, gaddr: 0x%llx\n",
362 size, align, node->mem->offset);
363
364 return 0;
365}
366
367static struct nvkm_instobj_impl
368gk20a_instobj_oclass = {
369 .base.ofuncs = &(struct nvkm_ofuncs) {
370 .ctor = gk20a_instobj_ctor,
371 .dtor = gk20a_instobj_dtor,
372 .init = _nvkm_instobj_init,
373 .fini = _nvkm_instobj_fini,
374 .rd32 = gk20a_instobj_rd32,
375 .wr32 = gk20a_instobj_wr32,
376 },
377};
378
379
380
381static int
382gk20a_instmem_fini(struct nvkm_object *object, bool suspend)
383{
384 struct gk20a_instmem_priv *priv = (void *)object;
385 priv->addr = ~0ULL;
386 return nvkm_instmem_fini(&priv->base, suspend);
387}
388
389static int
390gk20a_instmem_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
391 struct nvkm_oclass *oclass, void *data, u32 size,
392 struct nvkm_object **pobject)
393{
394 struct gk20a_instmem_priv *priv;
395 struct nouveau_platform_device *plat;
396 int ret;
397
398 ret = nvkm_instmem_create(parent, engine, oclass, &priv);
399 *pobject = nv_object(priv);
400 if (ret)
401 return ret;
402
403 spin_lock_init(&priv->lock);
404
405 plat = nv_device_to_platform(nv_device(parent));
406 if (plat->gpu->iommu.domain) {
407 priv->domain = plat->gpu->iommu.domain;
408 priv->mm = plat->gpu->iommu.mm;
409 priv->iommu_pgshift = plat->gpu->iommu.pgshift;
410 priv->mm_mutex = &plat->gpu->iommu.mutex;
411
412 nv_info(priv, "using IOMMU\n");
413 } else {
414 init_dma_attrs(&priv->attrs);
415
416
417
418
419 dma_set_attr(DMA_ATTR_NON_CONSISTENT, &priv->attrs);
420 dma_set_attr(DMA_ATTR_WEAK_ORDERING, &priv->attrs);
421 dma_set_attr(DMA_ATTR_WRITE_COMBINE, &priv->attrs);
422 dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &priv->attrs);
423
424 nv_info(priv, "using DMA API\n");
425 }
426
427 return 0;
428}
429
430struct nvkm_oclass *
431gk20a_instmem_oclass = &(struct nvkm_instmem_impl) {
432 .base.handle = NV_SUBDEV(INSTMEM, 0xea),
433 .base.ofuncs = &(struct nvkm_ofuncs) {
434 .ctor = gk20a_instmem_ctor,
435 .dtor = _nvkm_instmem_dtor,
436 .init = _nvkm_instmem_init,
437 .fini = gk20a_instmem_fini,
438 },
439 .instobj = &gk20a_instobj_oclass.base,
440}.base;
441