1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25#include "nouveau_drv.h"
26#include "nouveau_gem.h"
27#include "nouveau_mem.h"
28#include "nouveau_ttm.h"
29
30#include <drm/drm_legacy.h>
31
32#include <core/tegra.h>
33
34static int
35nouveau_manager_init(struct ttm_mem_type_manager *man, unsigned long psize)
36{
37 return 0;
38}
39
40static int
41nouveau_manager_fini(struct ttm_mem_type_manager *man)
42{
43 return 0;
44}
45
46static void
47nouveau_manager_del(struct ttm_mem_type_manager *man, struct ttm_mem_reg *reg)
48{
49 nouveau_mem_del(reg);
50}
51
52static void
53nouveau_manager_debug(struct ttm_mem_type_manager *man,
54 struct drm_printer *printer)
55{
56}
57
58static int
59nouveau_vram_manager_new(struct ttm_mem_type_manager *man,
60 struct ttm_buffer_object *bo,
61 const struct ttm_place *place,
62 struct ttm_mem_reg *reg)
63{
64 struct nouveau_bo *nvbo = nouveau_bo(bo);
65 struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
66 struct nouveau_mem *mem;
67 int ret;
68
69 if (drm->client.device.info.ram_size == 0)
70 return -ENOMEM;
71
72 ret = nouveau_mem_new(&drm->master, nvbo->kind, nvbo->comp, reg);
73 mem = nouveau_mem(reg);
74 if (ret)
75 return ret;
76
77 ret = nouveau_mem_vram(reg, nvbo->contig, nvbo->page);
78 if (ret) {
79 nouveau_mem_del(reg);
80 if (ret == -ENOSPC) {
81 reg->mm_node = NULL;
82 return 0;
83 }
84 return ret;
85 }
86
87 return 0;
88}
89
90const struct ttm_mem_type_manager_func nouveau_vram_manager = {
91 .init = nouveau_manager_init,
92 .takedown = nouveau_manager_fini,
93 .get_node = nouveau_vram_manager_new,
94 .put_node = nouveau_manager_del,
95 .debug = nouveau_manager_debug,
96};
97
98static int
99nouveau_gart_manager_new(struct ttm_mem_type_manager *man,
100 struct ttm_buffer_object *bo,
101 const struct ttm_place *place,
102 struct ttm_mem_reg *reg)
103{
104 struct nouveau_bo *nvbo = nouveau_bo(bo);
105 struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
106 struct nouveau_mem *mem;
107 int ret;
108
109 ret = nouveau_mem_new(&drm->master, nvbo->kind, nvbo->comp, reg);
110 mem = nouveau_mem(reg);
111 if (ret)
112 return ret;
113
114 reg->start = 0;
115 return 0;
116}
117
118const struct ttm_mem_type_manager_func nouveau_gart_manager = {
119 .init = nouveau_manager_init,
120 .takedown = nouveau_manager_fini,
121 .get_node = nouveau_gart_manager_new,
122 .put_node = nouveau_manager_del,
123 .debug = nouveau_manager_debug
124};
125
126static int
127nv04_gart_manager_new(struct ttm_mem_type_manager *man,
128 struct ttm_buffer_object *bo,
129 const struct ttm_place *place,
130 struct ttm_mem_reg *reg)
131{
132 struct nouveau_bo *nvbo = nouveau_bo(bo);
133 struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
134 struct nouveau_mem *mem;
135 int ret;
136
137 ret = nouveau_mem_new(&drm->master, nvbo->kind, nvbo->comp, reg);
138 mem = nouveau_mem(reg);
139 if (ret)
140 return ret;
141
142 ret = nvif_vmm_get(&mem->cli->vmm.vmm, PTES, false, 12, 0,
143 reg->num_pages << PAGE_SHIFT, &mem->vma[0]);
144 if (ret) {
145 nouveau_mem_del(reg);
146 if (ret == -ENOSPC) {
147 reg->mm_node = NULL;
148 return 0;
149 }
150 return ret;
151 }
152
153 reg->start = mem->vma[0].addr >> PAGE_SHIFT;
154 return 0;
155}
156
157const struct ttm_mem_type_manager_func nv04_gart_manager = {
158 .init = nouveau_manager_init,
159 .takedown = nouveau_manager_fini,
160 .get_node = nv04_gart_manager_new,
161 .put_node = nouveau_manager_del,
162 .debug = nouveau_manager_debug
163};
164
165int
166nouveau_ttm_mmap(struct file *filp, struct vm_area_struct *vma)
167{
168 struct drm_file *file_priv = filp->private_data;
169 struct nouveau_drm *drm = nouveau_drm(file_priv->minor->dev);
170
171 if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET))
172 return drm_legacy_mmap(filp, vma);
173
174 return ttm_bo_mmap(filp, vma, &drm->ttm.bdev);
175}
176
177static int
178nouveau_ttm_init_host(struct nouveau_drm *drm, u8 kind)
179{
180 struct nvif_mmu *mmu = &drm->client.mmu;
181 int typei;
182
183 typei = nvif_mmu_type(mmu, NVIF_MEM_HOST | NVIF_MEM_MAPPABLE |
184 kind | NVIF_MEM_COHERENT);
185 if (typei < 0)
186 return -ENOSYS;
187
188 drm->ttm.type_host[!!kind] = typei;
189
190 typei = nvif_mmu_type(mmu, NVIF_MEM_HOST | NVIF_MEM_MAPPABLE | kind);
191 if (typei < 0)
192 return -ENOSYS;
193
194 drm->ttm.type_ncoh[!!kind] = typei;
195 return 0;
196}
197
198int
199nouveau_ttm_init(struct nouveau_drm *drm)
200{
201 struct nvkm_device *device = nvxx_device(&drm->client.device);
202 struct nvkm_pci *pci = device->pci;
203 struct nvif_mmu *mmu = &drm->client.mmu;
204 struct drm_device *dev = drm->dev;
205 int typei, ret;
206
207 ret = nouveau_ttm_init_host(drm, 0);
208 if (ret)
209 return ret;
210
211 if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA &&
212 drm->client.device.info.chipset != 0x50) {
213 ret = nouveau_ttm_init_host(drm, NVIF_MEM_KIND);
214 if (ret)
215 return ret;
216 }
217
218 if (drm->client.device.info.platform != NV_DEVICE_INFO_V0_SOC &&
219 drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
220 typei = nvif_mmu_type(mmu, NVIF_MEM_VRAM | NVIF_MEM_MAPPABLE |
221 NVIF_MEM_KIND |
222 NVIF_MEM_COMP |
223 NVIF_MEM_DISP);
224 if (typei < 0)
225 return -ENOSYS;
226
227 drm->ttm.type_vram = typei;
228 } else {
229 drm->ttm.type_vram = -1;
230 }
231
232 if (pci && pci->agp.bridge) {
233 drm->agp.bridge = pci->agp.bridge;
234 drm->agp.base = pci->agp.base;
235 drm->agp.size = pci->agp.size;
236 drm->agp.cma = pci->agp.cma;
237 }
238
239 ret = ttm_bo_device_init(&drm->ttm.bdev,
240 &nouveau_bo_driver,
241 dev->anon_inode->i_mapping,
242 DRM_FILE_PAGE_OFFSET,
243 drm->client.mmu.dmabits <= 32 ? true : false);
244 if (ret) {
245 NV_ERROR(drm, "error initialising bo driver, %d\n", ret);
246 return ret;
247 }
248
249
250 drm->gem.vram_available = drm->client.device.info.ram_user;
251
252 arch_io_reserve_memtype_wc(device->func->resource_addr(device, 1),
253 device->func->resource_size(device, 1));
254
255 ret = ttm_bo_init_mm(&drm->ttm.bdev, TTM_PL_VRAM,
256 drm->gem.vram_available >> PAGE_SHIFT);
257 if (ret) {
258 NV_ERROR(drm, "VRAM mm init failed, %d\n", ret);
259 return ret;
260 }
261
262 drm->ttm.mtrr = arch_phys_wc_add(device->func->resource_addr(device, 1),
263 device->func->resource_size(device, 1));
264
265
266 if (!drm->agp.bridge) {
267 drm->gem.gart_available = drm->client.vmm.vmm.limit;
268 } else {
269 drm->gem.gart_available = drm->agp.size;
270 }
271
272 ret = ttm_bo_init_mm(&drm->ttm.bdev, TTM_PL_TT,
273 drm->gem.gart_available >> PAGE_SHIFT);
274 if (ret) {
275 NV_ERROR(drm, "GART mm init failed, %d\n", ret);
276 return ret;
277 }
278
279 NV_INFO(drm, "VRAM: %d MiB\n", (u32)(drm->gem.vram_available >> 20));
280 NV_INFO(drm, "GART: %d MiB\n", (u32)(drm->gem.gart_available >> 20));
281 return 0;
282}
283
284void
285nouveau_ttm_fini(struct nouveau_drm *drm)
286{
287 struct nvkm_device *device = nvxx_device(&drm->client.device);
288
289 ttm_bo_clean_mm(&drm->ttm.bdev, TTM_PL_VRAM);
290 ttm_bo_clean_mm(&drm->ttm.bdev, TTM_PL_TT);
291
292 ttm_bo_device_release(&drm->ttm.bdev);
293
294 arch_phys_wc_del(drm->ttm.mtrr);
295 drm->ttm.mtrr = 0;
296 arch_io_free_memtype_wc(device->func->resource_addr(device, 1),
297 device->func->resource_size(device, 1));
298
299}
300