1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26#include <drm/ttm/ttm_bo_api.h>
27#include <drm/ttm/ttm_bo_driver.h>
28#include <drm/ttm/ttm_placement.h>
29#include <drm/ttm/ttm_page_alloc.h>
30#include <drm/ttm/ttm_module.h>
31#include <drm/drmP.h>
32#include <drm/drm.h>
33#include <drm/qxl_drm.h>
34#include "qxl_drv.h"
35#include "qxl_object.h"
36
37#include <linux/delay.h>
38
39static struct qxl_device *qxl_get_qdev(struct ttm_bo_device *bdev)
40{
41 struct qxl_mman *mman;
42 struct qxl_device *qdev;
43
44 mman = container_of(bdev, struct qxl_mman, bdev);
45 qdev = container_of(mman, struct qxl_device, mman);
46 return qdev;
47}
48
49static struct vm_operations_struct qxl_ttm_vm_ops;
50static const struct vm_operations_struct *ttm_vm_ops;
51
52static vm_fault_t qxl_ttm_fault(struct vm_fault *vmf)
53{
54 struct ttm_buffer_object *bo;
55 vm_fault_t ret;
56
57 bo = (struct ttm_buffer_object *)vmf->vma->vm_private_data;
58 if (bo == NULL)
59 return VM_FAULT_NOPAGE;
60 ret = ttm_vm_ops->fault(vmf);
61 return ret;
62}
63
64int qxl_mmap(struct file *filp, struct vm_area_struct *vma)
65{
66 int r;
67 struct drm_file *file_priv = filp->private_data;
68 struct qxl_device *qdev = file_priv->minor->dev->dev_private;
69
70 if (qdev == NULL) {
71 DRM_ERROR(
72 "filp->private_data->minor->dev->dev_private == NULL\n");
73 return -EINVAL;
74 }
75 DRM_DEBUG_DRIVER("filp->private_data = 0x%p, vma->vm_pgoff = %lx\n",
76 filp->private_data, vma->vm_pgoff);
77
78 r = ttm_bo_mmap(filp, vma, &qdev->mman.bdev);
79 if (unlikely(r != 0))
80 return r;
81 if (unlikely(ttm_vm_ops == NULL)) {
82 ttm_vm_ops = vma->vm_ops;
83 qxl_ttm_vm_ops = *ttm_vm_ops;
84 qxl_ttm_vm_ops.fault = &qxl_ttm_fault;
85 }
86 vma->vm_ops = &qxl_ttm_vm_ops;
87 return 0;
88}
89
90static int qxl_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
91{
92 return 0;
93}
94
95static int qxl_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
96 struct ttm_mem_type_manager *man)
97{
98 struct qxl_device *qdev = qxl_get_qdev(bdev);
99 unsigned int gpu_offset_shift =
100 64 - (qdev->rom->slot_gen_bits + qdev->rom->slot_id_bits + 8);
101 struct qxl_memslot *slot;
102
103 switch (type) {
104 case TTM_PL_SYSTEM:
105
106 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
107 man->available_caching = TTM_PL_MASK_CACHING;
108 man->default_caching = TTM_PL_FLAG_CACHED;
109 break;
110 case TTM_PL_VRAM:
111 case TTM_PL_PRIV:
112
113 slot = (type == TTM_PL_VRAM) ?
114 &qdev->main_slot : &qdev->surfaces_slot;
115 slot->gpu_offset = (uint64_t)type << gpu_offset_shift;
116 man->func = &ttm_bo_manager_func;
117 man->gpu_offset = slot->gpu_offset;
118 man->flags = TTM_MEMTYPE_FLAG_FIXED |
119 TTM_MEMTYPE_FLAG_MAPPABLE;
120 man->available_caching = TTM_PL_MASK_CACHING;
121 man->default_caching = TTM_PL_FLAG_CACHED;
122 break;
123 default:
124 DRM_ERROR("Unsupported memory type %u\n", (unsigned int)type);
125 return -EINVAL;
126 }
127 return 0;
128}
129
130static void qxl_evict_flags(struct ttm_buffer_object *bo,
131 struct ttm_placement *placement)
132{
133 struct qxl_bo *qbo;
134 static const struct ttm_place placements = {
135 .fpfn = 0,
136 .lpfn = 0,
137 .flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM
138 };
139
140 if (!qxl_ttm_bo_is_qxl_bo(bo)) {
141 placement->placement = &placements;
142 placement->busy_placement = &placements;
143 placement->num_placement = 1;
144 placement->num_busy_placement = 1;
145 return;
146 }
147 qbo = to_qxl_bo(bo);
148 qxl_ttm_placement_from_domain(qbo, QXL_GEM_DOMAIN_CPU, false);
149 *placement = qbo->placement;
150}
151
152static int qxl_verify_access(struct ttm_buffer_object *bo, struct file *filp)
153{
154 struct qxl_bo *qbo = to_qxl_bo(bo);
155
156 return drm_vma_node_verify_access(&qbo->gem_base.vma_node,
157 filp->private_data);
158}
159
160static int qxl_ttm_io_mem_reserve(struct ttm_bo_device *bdev,
161 struct ttm_mem_reg *mem)
162{
163 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
164 struct qxl_device *qdev = qxl_get_qdev(bdev);
165
166 mem->bus.addr = NULL;
167 mem->bus.offset = 0;
168 mem->bus.size = mem->num_pages << PAGE_SHIFT;
169 mem->bus.base = 0;
170 mem->bus.is_iomem = false;
171 if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
172 return -EINVAL;
173 switch (mem->mem_type) {
174 case TTM_PL_SYSTEM:
175
176 return 0;
177 case TTM_PL_VRAM:
178 mem->bus.is_iomem = true;
179 mem->bus.base = qdev->vram_base;
180 mem->bus.offset = mem->start << PAGE_SHIFT;
181 break;
182 case TTM_PL_PRIV:
183 mem->bus.is_iomem = true;
184 mem->bus.base = qdev->surfaceram_base;
185 mem->bus.offset = mem->start << PAGE_SHIFT;
186 break;
187 default:
188 return -EINVAL;
189 }
190 return 0;
191}
192
193static void qxl_ttm_io_mem_free(struct ttm_bo_device *bdev,
194 struct ttm_mem_reg *mem)
195{
196}
197
198
199
200
201struct qxl_ttm_tt {
202 struct ttm_tt ttm;
203 struct qxl_device *qdev;
204 u64 offset;
205};
206
207static int qxl_ttm_backend_bind(struct ttm_tt *ttm,
208 struct ttm_mem_reg *bo_mem)
209{
210 struct qxl_ttm_tt *gtt = (void *)ttm;
211
212 gtt->offset = (unsigned long)(bo_mem->start << PAGE_SHIFT);
213 if (!ttm->num_pages) {
214 WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n",
215 ttm->num_pages, bo_mem, ttm);
216 }
217
218 return -1;
219}
220
221static int qxl_ttm_backend_unbind(struct ttm_tt *ttm)
222{
223
224 return -1;
225}
226
227static void qxl_ttm_backend_destroy(struct ttm_tt *ttm)
228{
229 struct qxl_ttm_tt *gtt = (void *)ttm;
230
231 ttm_tt_fini(>t->ttm);
232 kfree(gtt);
233}
234
235static struct ttm_backend_func qxl_backend_func = {
236 .bind = &qxl_ttm_backend_bind,
237 .unbind = &qxl_ttm_backend_unbind,
238 .destroy = &qxl_ttm_backend_destroy,
239};
240
241static struct ttm_tt *qxl_ttm_tt_create(struct ttm_buffer_object *bo,
242 uint32_t page_flags)
243{
244 struct qxl_device *qdev;
245 struct qxl_ttm_tt *gtt;
246
247 qdev = qxl_get_qdev(bo->bdev);
248 gtt = kzalloc(sizeof(struct qxl_ttm_tt), GFP_KERNEL);
249 if (gtt == NULL)
250 return NULL;
251 gtt->ttm.func = &qxl_backend_func;
252 gtt->qdev = qdev;
253 if (ttm_tt_init(>t->ttm, bo, page_flags)) {
254 kfree(gtt);
255 return NULL;
256 }
257 return >t->ttm;
258}
259
260static void qxl_move_null(struct ttm_buffer_object *bo,
261 struct ttm_mem_reg *new_mem)
262{
263 struct ttm_mem_reg *old_mem = &bo->mem;
264
265 BUG_ON(old_mem->mm_node != NULL);
266 *old_mem = *new_mem;
267 new_mem->mm_node = NULL;
268}
269
270static int qxl_bo_move(struct ttm_buffer_object *bo, bool evict,
271 struct ttm_operation_ctx *ctx,
272 struct ttm_mem_reg *new_mem)
273{
274 struct ttm_mem_reg *old_mem = &bo->mem;
275 int ret;
276
277 ret = ttm_bo_wait(bo, ctx->interruptible, ctx->no_wait_gpu);
278 if (ret)
279 return ret;
280
281 if (old_mem->mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) {
282 qxl_move_null(bo, new_mem);
283 return 0;
284 }
285 return ttm_bo_move_memcpy(bo, ctx, new_mem);
286}
287
288static void qxl_bo_move_notify(struct ttm_buffer_object *bo,
289 bool evict,
290 struct ttm_mem_reg *new_mem)
291{
292 struct qxl_bo *qbo;
293 struct qxl_device *qdev;
294
295 if (!qxl_ttm_bo_is_qxl_bo(bo))
296 return;
297 qbo = to_qxl_bo(bo);
298 qdev = qbo->gem_base.dev->dev_private;
299
300 if (bo->mem.mem_type == TTM_PL_PRIV && qbo->surface_id)
301 qxl_surface_evict(qdev, qbo, new_mem ? true : false);
302}
303
304static struct ttm_bo_driver qxl_bo_driver = {
305 .ttm_tt_create = &qxl_ttm_tt_create,
306 .invalidate_caches = &qxl_invalidate_caches,
307 .init_mem_type = &qxl_init_mem_type,
308 .eviction_valuable = ttm_bo_eviction_valuable,
309 .evict_flags = &qxl_evict_flags,
310 .move = &qxl_bo_move,
311 .verify_access = &qxl_verify_access,
312 .io_mem_reserve = &qxl_ttm_io_mem_reserve,
313 .io_mem_free = &qxl_ttm_io_mem_free,
314 .move_notify = &qxl_bo_move_notify,
315};
316
317int qxl_ttm_init(struct qxl_device *qdev)
318{
319 int r;
320 int num_io_pages;
321
322
323 r = ttm_bo_device_init(&qdev->mman.bdev,
324 &qxl_bo_driver,
325 qdev->ddev.anon_inode->i_mapping,
326 false);
327 if (r) {
328 DRM_ERROR("failed initializing buffer object driver(%d).\n", r);
329 return r;
330 }
331
332 num_io_pages = qdev->rom->ram_header_offset / PAGE_SIZE;
333 r = ttm_bo_init_mm(&qdev->mman.bdev, TTM_PL_VRAM,
334 num_io_pages);
335 if (r) {
336 DRM_ERROR("Failed initializing VRAM heap.\n");
337 return r;
338 }
339 r = ttm_bo_init_mm(&qdev->mman.bdev, TTM_PL_PRIV,
340 qdev->surfaceram_size / PAGE_SIZE);
341 if (r) {
342 DRM_ERROR("Failed initializing Surfaces heap.\n");
343 return r;
344 }
345 DRM_INFO("qxl: %uM of VRAM memory size\n",
346 (unsigned int)qdev->vram_size / (1024 * 1024));
347 DRM_INFO("qxl: %luM of IO pages memory ready (VRAM domain)\n",
348 ((unsigned int)num_io_pages * PAGE_SIZE) / (1024 * 1024));
349 DRM_INFO("qxl: %uM of Surface memory size\n",
350 (unsigned int)qdev->surfaceram_size / (1024 * 1024));
351 return 0;
352}
353
354void qxl_ttm_fini(struct qxl_device *qdev)
355{
356 ttm_bo_clean_mm(&qdev->mman.bdev, TTM_PL_VRAM);
357 ttm_bo_clean_mm(&qdev->mman.bdev, TTM_PL_PRIV);
358 ttm_bo_device_release(&qdev->mman.bdev);
359 DRM_INFO("qxl: ttm finalized\n");
360}
361
362#define QXL_DEBUGFS_MEM_TYPES 2
363
364#if defined(CONFIG_DEBUG_FS)
365static int qxl_mm_dump_table(struct seq_file *m, void *data)
366{
367 struct drm_info_node *node = (struct drm_info_node *)m->private;
368 struct drm_mm *mm = (struct drm_mm *)node->info_ent->data;
369 struct drm_device *dev = node->minor->dev;
370 struct qxl_device *rdev = dev->dev_private;
371 struct ttm_bo_global *glob = rdev->mman.bdev.glob;
372 struct drm_printer p = drm_seq_file_printer(m);
373
374 spin_lock(&glob->lru_lock);
375 drm_mm_print(mm, &p);
376 spin_unlock(&glob->lru_lock);
377 return 0;
378}
379#endif
380
381int qxl_ttm_debugfs_init(struct qxl_device *qdev)
382{
383#if defined(CONFIG_DEBUG_FS)
384 static struct drm_info_list qxl_mem_types_list[QXL_DEBUGFS_MEM_TYPES];
385 static char qxl_mem_types_names[QXL_DEBUGFS_MEM_TYPES][32];
386 unsigned int i;
387
388 for (i = 0; i < QXL_DEBUGFS_MEM_TYPES; i++) {
389 if (i == 0)
390 sprintf(qxl_mem_types_names[i], "qxl_mem_mm");
391 else
392 sprintf(qxl_mem_types_names[i], "qxl_surf_mm");
393 qxl_mem_types_list[i].name = qxl_mem_types_names[i];
394 qxl_mem_types_list[i].show = &qxl_mm_dump_table;
395 qxl_mem_types_list[i].driver_features = 0;
396 if (i == 0)
397 qxl_mem_types_list[i].data = qdev->mman.bdev.man[TTM_PL_VRAM].priv;
398 else
399 qxl_mem_types_list[i].data = qdev->mman.bdev.man[TTM_PL_PRIV].priv;
400
401 }
402 return qxl_debugfs_add_files(qdev, qxl_mem_types_list, i);
403#else
404 return 0;
405#endif
406}
407