1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27#include "nouveau_drv.h"
28#include "nouveau_ttm.h"
29#include "nouveau_gem.h"
30
31#include <drm/drm_legacy.h>
32
33#include <core/tegra.h>
34
35static int
36nouveau_vram_manager_init(struct ttm_mem_type_manager *man, unsigned long psize)
37{
38 struct nouveau_drm *drm = nouveau_bdev(man->bdev);
39 struct nvkm_fb *fb = nvxx_fb(&drm->client.device);
40 man->priv = fb;
41 return 0;
42}
43
44static int
45nouveau_vram_manager_fini(struct ttm_mem_type_manager *man)
46{
47 man->priv = NULL;
48 return 0;
49}
50
51static inline void
52nvkm_mem_node_cleanup(struct nvkm_mem *node)
53{
54 if (node->vma[0].node) {
55 nvkm_vm_unmap(&node->vma[0]);
56 nvkm_vm_put(&node->vma[0]);
57 }
58
59 if (node->vma[1].node) {
60 nvkm_vm_unmap(&node->vma[1]);
61 nvkm_vm_put(&node->vma[1]);
62 }
63}
64
65static void
66nouveau_vram_manager_del(struct ttm_mem_type_manager *man,
67 struct ttm_mem_reg *reg)
68{
69 struct nouveau_drm *drm = nouveau_bdev(man->bdev);
70 struct nvkm_ram *ram = nvxx_fb(&drm->client.device)->ram;
71 nvkm_mem_node_cleanup(reg->mm_node);
72 ram->func->put(ram, (struct nvkm_mem **)®->mm_node);
73}
74
75static int
76nouveau_vram_manager_new(struct ttm_mem_type_manager *man,
77 struct ttm_buffer_object *bo,
78 const struct ttm_place *place,
79 struct ttm_mem_reg *reg)
80{
81 struct nouveau_drm *drm = nouveau_bdev(man->bdev);
82 struct nvkm_ram *ram = nvxx_fb(&drm->client.device)->ram;
83 struct nouveau_bo *nvbo = nouveau_bo(bo);
84 struct nvkm_mem *node;
85 u32 size_nc = 0;
86 int ret;
87
88 if (drm->client.device.info.ram_size == 0)
89 return -ENOMEM;
90
91 if (nvbo->tile_flags & NOUVEAU_GEM_TILE_NONCONTIG)
92 size_nc = 1 << nvbo->page_shift;
93
94 ret = ram->func->get(ram, reg->num_pages << PAGE_SHIFT,
95 reg->page_alignment << PAGE_SHIFT, size_nc,
96 (nvbo->tile_flags >> 8) & 0x3ff, &node);
97 if (ret) {
98 reg->mm_node = NULL;
99 return (ret == -ENOSPC) ? 0 : ret;
100 }
101
102 node->page_shift = nvbo->page_shift;
103
104 reg->mm_node = node;
105 reg->start = node->offset >> PAGE_SHIFT;
106 return 0;
107}
108
109const struct ttm_mem_type_manager_func nouveau_vram_manager = {
110 .init = nouveau_vram_manager_init,
111 .takedown = nouveau_vram_manager_fini,
112 .get_node = nouveau_vram_manager_new,
113 .put_node = nouveau_vram_manager_del,
114};
115
116static int
117nouveau_gart_manager_init(struct ttm_mem_type_manager *man, unsigned long psize)
118{
119 return 0;
120}
121
122static int
123nouveau_gart_manager_fini(struct ttm_mem_type_manager *man)
124{
125 return 0;
126}
127
128static void
129nouveau_gart_manager_del(struct ttm_mem_type_manager *man,
130 struct ttm_mem_reg *reg)
131{
132 nvkm_mem_node_cleanup(reg->mm_node);
133 kfree(reg->mm_node);
134 reg->mm_node = NULL;
135}
136
137static int
138nouveau_gart_manager_new(struct ttm_mem_type_manager *man,
139 struct ttm_buffer_object *bo,
140 const struct ttm_place *place,
141 struct ttm_mem_reg *reg)
142{
143 struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
144 struct nouveau_bo *nvbo = nouveau_bo(bo);
145 struct nvkm_mem *node;
146
147 node = kzalloc(sizeof(*node), GFP_KERNEL);
148 if (!node)
149 return -ENOMEM;
150
151 node->page_shift = 12;
152
153 switch (drm->client.device.info.family) {
154 case NV_DEVICE_INFO_V0_TNT:
155 case NV_DEVICE_INFO_V0_CELSIUS:
156 case NV_DEVICE_INFO_V0_KELVIN:
157 case NV_DEVICE_INFO_V0_RANKINE:
158 case NV_DEVICE_INFO_V0_CURIE:
159 break;
160 case NV_DEVICE_INFO_V0_TESLA:
161 if (drm->client.device.info.chipset != 0x50)
162 node->memtype = (nvbo->tile_flags & 0x7f00) >> 8;
163 break;
164 case NV_DEVICE_INFO_V0_FERMI:
165 case NV_DEVICE_INFO_V0_KEPLER:
166 case NV_DEVICE_INFO_V0_MAXWELL:
167 case NV_DEVICE_INFO_V0_PASCAL:
168 node->memtype = (nvbo->tile_flags & 0xff00) >> 8;
169 break;
170 default:
171 NV_WARN(drm, "%s: unhandled family type %x\n", __func__,
172 drm->client.device.info.family);
173 break;
174 }
175
176 reg->mm_node = node;
177 reg->start = 0;
178 return 0;
179}
180
181static void
182nouveau_gart_manager_debug(struct ttm_mem_type_manager *man,
183 struct drm_printer *printer)
184{
185}
186
187const struct ttm_mem_type_manager_func nouveau_gart_manager = {
188 .init = nouveau_gart_manager_init,
189 .takedown = nouveau_gart_manager_fini,
190 .get_node = nouveau_gart_manager_new,
191 .put_node = nouveau_gart_manager_del,
192 .debug = nouveau_gart_manager_debug
193};
194
195
196#include <subdev/mmu/nv04.h>
197static int
198nv04_gart_manager_init(struct ttm_mem_type_manager *man, unsigned long psize)
199{
200 struct nouveau_drm *drm = nouveau_bdev(man->bdev);
201 struct nvkm_mmu *mmu = nvxx_mmu(&drm->client.device);
202 struct nv04_mmu *priv = (void *)mmu;
203 struct nvkm_vm *vm = NULL;
204 nvkm_vm_ref(priv->vm, &vm, NULL);
205 man->priv = vm;
206 return 0;
207}
208
209static int
210nv04_gart_manager_fini(struct ttm_mem_type_manager *man)
211{
212 struct nvkm_vm *vm = man->priv;
213 nvkm_vm_ref(NULL, &vm, NULL);
214 man->priv = NULL;
215 return 0;
216}
217
218static void
219nv04_gart_manager_del(struct ttm_mem_type_manager *man, struct ttm_mem_reg *reg)
220{
221 struct nvkm_mem *node = reg->mm_node;
222 if (node->vma[0].node)
223 nvkm_vm_put(&node->vma[0]);
224 kfree(reg->mm_node);
225 reg->mm_node = NULL;
226}
227
228static int
229nv04_gart_manager_new(struct ttm_mem_type_manager *man,
230 struct ttm_buffer_object *bo,
231 const struct ttm_place *place,
232 struct ttm_mem_reg *reg)
233{
234 struct nvkm_mem *node;
235 int ret;
236
237 node = kzalloc(sizeof(*node), GFP_KERNEL);
238 if (!node)
239 return -ENOMEM;
240
241 node->page_shift = 12;
242
243 ret = nvkm_vm_get(man->priv, reg->num_pages << 12, node->page_shift,
244 NV_MEM_ACCESS_RW, &node->vma[0]);
245 if (ret) {
246 kfree(node);
247 return ret;
248 }
249
250 reg->mm_node = node;
251 reg->start = node->vma[0].offset >> PAGE_SHIFT;
252 return 0;
253}
254
255static void
256nv04_gart_manager_debug(struct ttm_mem_type_manager *man,
257 struct drm_printer *printer)
258{
259}
260
261const struct ttm_mem_type_manager_func nv04_gart_manager = {
262 .init = nv04_gart_manager_init,
263 .takedown = nv04_gart_manager_fini,
264 .get_node = nv04_gart_manager_new,
265 .put_node = nv04_gart_manager_del,
266 .debug = nv04_gart_manager_debug
267};
268
269int
270nouveau_ttm_mmap(struct file *filp, struct vm_area_struct *vma)
271{
272 struct drm_file *file_priv = filp->private_data;
273 struct nouveau_drm *drm = nouveau_drm(file_priv->minor->dev);
274
275 if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET))
276 return drm_legacy_mmap(filp, vma);
277
278 return ttm_bo_mmap(filp, vma, &drm->ttm.bdev);
279}
280
281static int
282nouveau_ttm_mem_global_init(struct drm_global_reference *ref)
283{
284 return ttm_mem_global_init(ref->object);
285}
286
287static void
288nouveau_ttm_mem_global_release(struct drm_global_reference *ref)
289{
290 ttm_mem_global_release(ref->object);
291}
292
293int
294nouveau_ttm_global_init(struct nouveau_drm *drm)
295{
296 struct drm_global_reference *global_ref;
297 int ret;
298
299 global_ref = &drm->ttm.mem_global_ref;
300 global_ref->global_type = DRM_GLOBAL_TTM_MEM;
301 global_ref->size = sizeof(struct ttm_mem_global);
302 global_ref->init = &nouveau_ttm_mem_global_init;
303 global_ref->release = &nouveau_ttm_mem_global_release;
304
305 ret = drm_global_item_ref(global_ref);
306 if (unlikely(ret != 0)) {
307 DRM_ERROR("Failed setting up TTM memory accounting\n");
308 drm->ttm.mem_global_ref.release = NULL;
309 return ret;
310 }
311
312 drm->ttm.bo_global_ref.mem_glob = global_ref->object;
313 global_ref = &drm->ttm.bo_global_ref.ref;
314 global_ref->global_type = DRM_GLOBAL_TTM_BO;
315 global_ref->size = sizeof(struct ttm_bo_global);
316 global_ref->init = &ttm_bo_global_init;
317 global_ref->release = &ttm_bo_global_release;
318
319 ret = drm_global_item_ref(global_ref);
320 if (unlikely(ret != 0)) {
321 DRM_ERROR("Failed setting up TTM BO subsystem\n");
322 drm_global_item_unref(&drm->ttm.mem_global_ref);
323 drm->ttm.mem_global_ref.release = NULL;
324 return ret;
325 }
326
327 return 0;
328}
329
330void
331nouveau_ttm_global_release(struct nouveau_drm *drm)
332{
333 if (drm->ttm.mem_global_ref.release == NULL)
334 return;
335
336 drm_global_item_unref(&drm->ttm.bo_global_ref.ref);
337 drm_global_item_unref(&drm->ttm.mem_global_ref);
338 drm->ttm.mem_global_ref.release = NULL;
339}
340
341int
342nouveau_ttm_init(struct nouveau_drm *drm)
343{
344 struct nvkm_device *device = nvxx_device(&drm->client.device);
345 struct nvkm_pci *pci = device->pci;
346 struct drm_device *dev = drm->dev;
347 u8 bits;
348 int ret;
349
350 if (pci && pci->agp.bridge) {
351 drm->agp.bridge = pci->agp.bridge;
352 drm->agp.base = pci->agp.base;
353 drm->agp.size = pci->agp.size;
354 drm->agp.cma = pci->agp.cma;
355 }
356
357 bits = nvxx_mmu(&drm->client.device)->dma_bits;
358 if (nvxx_device(&drm->client.device)->func->pci) {
359 if (drm->agp.bridge)
360 bits = 32;
361 } else if (device->func->tegra) {
362 struct nvkm_device_tegra *tegra = device->func->tegra(device);
363
364
365
366
367
368 if (tegra->func->iommu_bit)
369 bits = min(bits, tegra->func->iommu_bit);
370
371 }
372
373 ret = dma_set_mask(dev->dev, DMA_BIT_MASK(bits));
374 if (ret && bits != 32) {
375 bits = 32;
376 ret = dma_set_mask(dev->dev, DMA_BIT_MASK(bits));
377 }
378 if (ret)
379 return ret;
380
381 ret = dma_set_coherent_mask(dev->dev, DMA_BIT_MASK(bits));
382 if (ret)
383 dma_set_coherent_mask(dev->dev, DMA_BIT_MASK(32));
384
385 ret = nouveau_ttm_global_init(drm);
386 if (ret)
387 return ret;
388
389 ret = ttm_bo_device_init(&drm->ttm.bdev,
390 drm->ttm.bo_global_ref.ref.object,
391 &nouveau_bo_driver,
392 dev->anon_inode->i_mapping,
393 DRM_FILE_PAGE_OFFSET,
394 bits <= 32 ? true : false);
395 if (ret) {
396 NV_ERROR(drm, "error initialising bo driver, %d\n", ret);
397 return ret;
398 }
399
400
401 drm->gem.vram_available = drm->client.device.info.ram_user;
402
403 arch_io_reserve_memtype_wc(device->func->resource_addr(device, 1),
404 device->func->resource_size(device, 1));
405
406 ret = ttm_bo_init_mm(&drm->ttm.bdev, TTM_PL_VRAM,
407 drm->gem.vram_available >> PAGE_SHIFT);
408 if (ret) {
409 NV_ERROR(drm, "VRAM mm init failed, %d\n", ret);
410 return ret;
411 }
412
413 drm->ttm.mtrr = arch_phys_wc_add(device->func->resource_addr(device, 1),
414 device->func->resource_size(device, 1));
415
416
417 if (!drm->agp.bridge) {
418 drm->gem.gart_available = nvxx_mmu(&drm->client.device)->limit;
419 } else {
420 drm->gem.gart_available = drm->agp.size;
421 }
422
423 ret = ttm_bo_init_mm(&drm->ttm.bdev, TTM_PL_TT,
424 drm->gem.gart_available >> PAGE_SHIFT);
425 if (ret) {
426 NV_ERROR(drm, "GART mm init failed, %d\n", ret);
427 return ret;
428 }
429
430 NV_INFO(drm, "VRAM: %d MiB\n", (u32)(drm->gem.vram_available >> 20));
431 NV_INFO(drm, "GART: %d MiB\n", (u32)(drm->gem.gart_available >> 20));
432 return 0;
433}
434
435void
436nouveau_ttm_fini(struct nouveau_drm *drm)
437{
438 struct nvkm_device *device = nvxx_device(&drm->client.device);
439
440 ttm_bo_clean_mm(&drm->ttm.bdev, TTM_PL_VRAM);
441 ttm_bo_clean_mm(&drm->ttm.bdev, TTM_PL_TT);
442
443 ttm_bo_device_release(&drm->ttm.bdev);
444
445 nouveau_ttm_global_release(drm);
446
447 arch_phys_wc_del(drm->ttm.mtrr);
448 drm->ttm.mtrr = 0;
449 arch_io_free_memtype_wc(device->func->resource_addr(device, 1),
450 device->func->resource_size(device, 1));
451
452}
453