1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26#include "qxl_drv.h"
27#include "qxl_object.h"
28
29#include <linux/io-mapping.h>
30static void qxl_ttm_bo_destroy(struct ttm_buffer_object *tbo)
31{
32 struct qxl_bo *bo;
33 struct qxl_device *qdev;
34
35 bo = to_qxl_bo(tbo);
36 qdev = (struct qxl_device *)bo->gem_base.dev->dev_private;
37
38 qxl_surface_evict(qdev, bo, false);
39 mutex_lock(&qdev->gem.mutex);
40 list_del_init(&bo->list);
41 mutex_unlock(&qdev->gem.mutex);
42 drm_gem_object_release(&bo->gem_base);
43 kfree(bo);
44}
45
46bool qxl_ttm_bo_is_qxl_bo(struct ttm_buffer_object *bo)
47{
48 if (bo->destroy == &qxl_ttm_bo_destroy)
49 return true;
50 return false;
51}
52
53void qxl_ttm_placement_from_domain(struct qxl_bo *qbo, u32 domain, bool pinned)
54{
55 u32 c = 0;
56 u32 pflag = pinned ? TTM_PL_FLAG_NO_EVICT : 0;
57 unsigned i;
58
59 qbo->placement.placement = qbo->placements;
60 qbo->placement.busy_placement = qbo->placements;
61 if (domain == QXL_GEM_DOMAIN_VRAM)
62 qbo->placements[c++].flags = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_VRAM | pflag;
63 if (domain == QXL_GEM_DOMAIN_SURFACE)
64 qbo->placements[c++].flags = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_PRIV | pflag;
65 if (domain == QXL_GEM_DOMAIN_CPU)
66 qbo->placements[c++].flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM | pflag;
67 if (!c)
68 qbo->placements[c++].flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
69 qbo->placement.num_placement = c;
70 qbo->placement.num_busy_placement = c;
71 for (i = 0; i < c; ++i) {
72 qbo->placements[i].fpfn = 0;
73 qbo->placements[i].lpfn = 0;
74 }
75}
76
77
78int qxl_bo_create(struct qxl_device *qdev,
79 unsigned long size, bool kernel, bool pinned, u32 domain,
80 struct qxl_surface *surf,
81 struct qxl_bo **bo_ptr)
82{
83 struct qxl_bo *bo;
84 enum ttm_bo_type type;
85 int r;
86
87 if (kernel)
88 type = ttm_bo_type_kernel;
89 else
90 type = ttm_bo_type_device;
91 *bo_ptr = NULL;
92 bo = kzalloc(sizeof(struct qxl_bo), GFP_KERNEL);
93 if (bo == NULL)
94 return -ENOMEM;
95 size = roundup(size, PAGE_SIZE);
96 r = drm_gem_object_init(&qdev->ddev, &bo->gem_base, size);
97 if (unlikely(r)) {
98 kfree(bo);
99 return r;
100 }
101 bo->type = domain;
102 bo->pin_count = pinned ? 1 : 0;
103 bo->surface_id = 0;
104 INIT_LIST_HEAD(&bo->list);
105
106 if (surf)
107 bo->surf = *surf;
108
109 qxl_ttm_placement_from_domain(bo, domain, pinned);
110
111 r = ttm_bo_init(&qdev->mman.bdev, &bo->tbo, size, type,
112 &bo->placement, 0, !kernel, NULL, size,
113 NULL, NULL, &qxl_ttm_bo_destroy);
114 if (unlikely(r != 0)) {
115 if (r != -ERESTARTSYS)
116 dev_err(qdev->ddev.dev,
117 "object_init failed for (%lu, 0x%08X)\n",
118 size, domain);
119 return r;
120 }
121 *bo_ptr = bo;
122 return 0;
123}
124
125int qxl_bo_kmap(struct qxl_bo *bo, void **ptr)
126{
127 bool is_iomem;
128 int r;
129
130 if (bo->kptr) {
131 if (ptr)
132 *ptr = bo->kptr;
133 return 0;
134 }
135 r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, &bo->kmap);
136 if (r)
137 return r;
138 bo->kptr = ttm_kmap_obj_virtual(&bo->kmap, &is_iomem);
139 if (ptr)
140 *ptr = bo->kptr;
141 return 0;
142}
143
144void *qxl_bo_kmap_atomic_page(struct qxl_device *qdev,
145 struct qxl_bo *bo, int page_offset)
146{
147 struct ttm_mem_type_manager *man = &bo->tbo.bdev->man[bo->tbo.mem.mem_type];
148 void *rptr;
149 int ret;
150 struct io_mapping *map;
151
152 if (bo->tbo.mem.mem_type == TTM_PL_VRAM)
153 map = qdev->vram_mapping;
154 else if (bo->tbo.mem.mem_type == TTM_PL_PRIV)
155 map = qdev->surface_mapping;
156 else
157 goto fallback;
158
159 (void) ttm_mem_io_lock(man, false);
160 ret = ttm_mem_io_reserve(bo->tbo.bdev, &bo->tbo.mem);
161 ttm_mem_io_unlock(man);
162
163 return io_mapping_map_atomic_wc(map, bo->tbo.mem.bus.offset + page_offset);
164fallback:
165 if (bo->kptr) {
166 rptr = bo->kptr + (page_offset * PAGE_SIZE);
167 return rptr;
168 }
169
170 ret = qxl_bo_kmap(bo, &rptr);
171 if (ret)
172 return NULL;
173
174 rptr += page_offset * PAGE_SIZE;
175 return rptr;
176}
177
178void qxl_bo_kunmap(struct qxl_bo *bo)
179{
180 if (bo->kptr == NULL)
181 return;
182 bo->kptr = NULL;
183 ttm_bo_kunmap(&bo->kmap);
184}
185
186void qxl_bo_kunmap_atomic_page(struct qxl_device *qdev,
187 struct qxl_bo *bo, void *pmap)
188{
189 struct ttm_mem_type_manager *man = &bo->tbo.bdev->man[bo->tbo.mem.mem_type];
190 struct io_mapping *map;
191
192 if (bo->tbo.mem.mem_type == TTM_PL_VRAM)
193 map = qdev->vram_mapping;
194 else if (bo->tbo.mem.mem_type == TTM_PL_PRIV)
195 map = qdev->surface_mapping;
196 else
197 goto fallback;
198
199 io_mapping_unmap_atomic(pmap);
200
201 (void) ttm_mem_io_lock(man, false);
202 ttm_mem_io_free(bo->tbo.bdev, &bo->tbo.mem);
203 ttm_mem_io_unlock(man);
204 return ;
205 fallback:
206 qxl_bo_kunmap(bo);
207}
208
209void qxl_bo_unref(struct qxl_bo **bo)
210{
211 if ((*bo) == NULL)
212 return;
213
214 drm_gem_object_unreference_unlocked(&(*bo)->gem_base);
215 *bo = NULL;
216}
217
218struct qxl_bo *qxl_bo_ref(struct qxl_bo *bo)
219{
220 drm_gem_object_reference(&bo->gem_base);
221 return bo;
222}
223
224static int __qxl_bo_pin(struct qxl_bo *bo, u32 domain, u64 *gpu_addr)
225{
226 struct drm_device *ddev = bo->gem_base.dev;
227 int r;
228
229 if (bo->pin_count) {
230 bo->pin_count++;
231 if (gpu_addr)
232 *gpu_addr = qxl_bo_gpu_offset(bo);
233 return 0;
234 }
235 qxl_ttm_placement_from_domain(bo, domain, true);
236 r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
237 if (likely(r == 0)) {
238 bo->pin_count = 1;
239 if (gpu_addr != NULL)
240 *gpu_addr = qxl_bo_gpu_offset(bo);
241 }
242 if (unlikely(r != 0))
243 dev_err(ddev->dev, "%p pin failed\n", bo);
244 return r;
245}
246
247static int __qxl_bo_unpin(struct qxl_bo *bo)
248{
249 struct drm_device *ddev = bo->gem_base.dev;
250 int r, i;
251
252 if (!bo->pin_count) {
253 dev_warn(ddev->dev, "%p unpin not necessary\n", bo);
254 return 0;
255 }
256 bo->pin_count--;
257 if (bo->pin_count)
258 return 0;
259 for (i = 0; i < bo->placement.num_placement; i++)
260 bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT;
261 r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
262 if (unlikely(r != 0))
263 dev_err(ddev->dev, "%p validate failed for unpin\n", bo);
264 return r;
265}
266
267
268
269
270
271
272
273int qxl_bo_pin(struct qxl_bo *bo, u32 domain, u64 *gpu_addr)
274{
275 int r;
276
277 r = qxl_bo_reserve(bo, false);
278 if (r)
279 return r;
280
281 r = __qxl_bo_pin(bo, bo->type, NULL);
282 qxl_bo_unreserve(bo);
283 return r;
284}
285
286
287
288
289
290
291int qxl_bo_unpin(struct qxl_bo *bo)
292{
293 int r;
294
295 r = qxl_bo_reserve(bo, false);
296 if (r)
297 return r;
298
299 r = __qxl_bo_unpin(bo);
300 qxl_bo_unreserve(bo);
301 return r;
302}
303
304void qxl_bo_force_delete(struct qxl_device *qdev)
305{
306 struct qxl_bo *bo, *n;
307
308 if (list_empty(&qdev->gem.objects))
309 return;
310 dev_err(qdev->ddev.dev, "Userspace still has active objects !\n");
311 list_for_each_entry_safe(bo, n, &qdev->gem.objects, list) {
312 dev_err(qdev->ddev.dev, "%p %p %lu %lu force free\n",
313 &bo->gem_base, bo, (unsigned long)bo->gem_base.size,
314 *((unsigned long *)&bo->gem_base.refcount));
315 mutex_lock(&qdev->gem.mutex);
316 list_del_init(&bo->list);
317 mutex_unlock(&qdev->gem.mutex);
318
319 drm_gem_object_unreference_unlocked(&bo->gem_base);
320 }
321}
322
323int qxl_bo_init(struct qxl_device *qdev)
324{
325 return qxl_ttm_init(qdev);
326}
327
328void qxl_bo_fini(struct qxl_device *qdev)
329{
330 qxl_ttm_fini(qdev);
331}
332
333int qxl_bo_check_id(struct qxl_device *qdev, struct qxl_bo *bo)
334{
335 int ret;
336 if (bo->type == QXL_GEM_DOMAIN_SURFACE && bo->surface_id == 0) {
337
338 ret = qxl_surface_id_alloc(qdev, bo);
339 if (ret)
340 return ret;
341
342 ret = qxl_hw_surface_alloc(qdev, bo, NULL);
343 if (ret)
344 return ret;
345 }
346 return 0;
347}
348
349int qxl_surf_evict(struct qxl_device *qdev)
350{
351 return ttm_bo_evict_mm(&qdev->mman.bdev, TTM_PL_PRIV);
352}
353
354int qxl_vram_evict(struct qxl_device *qdev)
355{
356 return ttm_bo_evict_mm(&qdev->mman.bdev, TTM_PL_VRAM);
357}
358