1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28#include "qxl_drv.h"
29#include "qxl_object.h"
30
31static int qxl_reap_surface_id(struct qxl_device *qdev, int max_to_reap);
32
33struct ring {
34 struct qxl_ring_header header;
35 uint8_t elements[0];
36};
37
38struct qxl_ring {
39 struct ring *ring;
40 int element_size;
41 int n_elements;
42 int prod_notify;
43 wait_queue_head_t *push_event;
44 spinlock_t lock;
45};
46
47void qxl_ring_free(struct qxl_ring *ring)
48{
49 kfree(ring);
50}
51
52void qxl_ring_init_hdr(struct qxl_ring *ring)
53{
54 ring->ring->header.notify_on_prod = ring->n_elements;
55}
56
57struct qxl_ring *
58qxl_ring_create(struct qxl_ring_header *header,
59 int element_size,
60 int n_elements,
61 int prod_notify,
62 bool set_prod_notify,
63 wait_queue_head_t *push_event)
64{
65 struct qxl_ring *ring;
66
67 ring = kmalloc(sizeof(*ring), GFP_KERNEL);
68 if (!ring)
69 return NULL;
70
71 ring->ring = (struct ring *)header;
72 ring->element_size = element_size;
73 ring->n_elements = n_elements;
74 ring->prod_notify = prod_notify;
75 ring->push_event = push_event;
76 if (set_prod_notify)
77 qxl_ring_init_hdr(ring);
78 spin_lock_init(&ring->lock);
79 return ring;
80}
81
82static int qxl_check_header(struct qxl_ring *ring)
83{
84 int ret;
85 struct qxl_ring_header *header = &(ring->ring->header);
86 unsigned long flags;
87 spin_lock_irqsave(&ring->lock, flags);
88 ret = header->prod - header->cons < header->num_items;
89 if (ret == 0)
90 header->notify_on_cons = header->cons + 1;
91 spin_unlock_irqrestore(&ring->lock, flags);
92 return ret;
93}
94
95int qxl_check_idle(struct qxl_ring *ring)
96{
97 int ret;
98 struct qxl_ring_header *header = &(ring->ring->header);
99 unsigned long flags;
100 spin_lock_irqsave(&ring->lock, flags);
101 ret = header->prod == header->cons;
102 spin_unlock_irqrestore(&ring->lock, flags);
103 return ret;
104}
105
106int qxl_ring_push(struct qxl_ring *ring,
107 const void *new_elt, bool interruptible)
108{
109 struct qxl_ring_header *header = &(ring->ring->header);
110 uint8_t *elt;
111 int idx, ret;
112 unsigned long flags;
113 spin_lock_irqsave(&ring->lock, flags);
114 if (header->prod - header->cons == header->num_items) {
115 header->notify_on_cons = header->cons + 1;
116 mb();
117 spin_unlock_irqrestore(&ring->lock, flags);
118 if (!drm_can_sleep()) {
119 while (!qxl_check_header(ring))
120 udelay(1);
121 } else {
122 if (interruptible) {
123 ret = wait_event_interruptible(*ring->push_event,
124 qxl_check_header(ring));
125 if (ret)
126 return ret;
127 } else {
128 wait_event(*ring->push_event,
129 qxl_check_header(ring));
130 }
131
132 }
133 spin_lock_irqsave(&ring->lock, flags);
134 }
135
136 idx = header->prod & (ring->n_elements - 1);
137 elt = ring->ring->elements + idx * ring->element_size;
138
139 memcpy((void *)elt, new_elt, ring->element_size);
140
141 header->prod++;
142
143 mb();
144
145 if (header->prod == header->notify_on_prod)
146 outb(0, ring->prod_notify);
147
148 spin_unlock_irqrestore(&ring->lock, flags);
149 return 0;
150}
151
152static bool qxl_ring_pop(struct qxl_ring *ring,
153 void *element)
154{
155 volatile struct qxl_ring_header *header = &(ring->ring->header);
156 volatile uint8_t *ring_elt;
157 int idx;
158 unsigned long flags;
159 spin_lock_irqsave(&ring->lock, flags);
160 if (header->cons == header->prod) {
161 header->notify_on_prod = header->cons + 1;
162 spin_unlock_irqrestore(&ring->lock, flags);
163 return false;
164 }
165
166 idx = header->cons & (ring->n_elements - 1);
167 ring_elt = ring->ring->elements + idx * ring->element_size;
168
169 memcpy(element, (void *)ring_elt, ring->element_size);
170
171 header->cons++;
172
173 spin_unlock_irqrestore(&ring->lock, flags);
174 return true;
175}
176
177int
178qxl_push_command_ring_release(struct qxl_device *qdev, struct qxl_release *release,
179 uint32_t type, bool interruptible)
180{
181 struct qxl_command cmd;
182
183 cmd.type = type;
184 cmd.data = qxl_bo_physical_address(qdev, release->release_bo, release->release_offset);
185
186 return qxl_ring_push(qdev->command_ring, &cmd, interruptible);
187}
188
189int
190qxl_push_cursor_ring_release(struct qxl_device *qdev, struct qxl_release *release,
191 uint32_t type, bool interruptible)
192{
193 struct qxl_command cmd;
194
195 cmd.type = type;
196 cmd.data = qxl_bo_physical_address(qdev, release->release_bo, release->release_offset);
197
198 return qxl_ring_push(qdev->cursor_ring, &cmd, interruptible);
199}
200
201bool qxl_queue_garbage_collect(struct qxl_device *qdev, bool flush)
202{
203 if (!qxl_check_idle(qdev->release_ring)) {
204 schedule_work(&qdev->gc_work);
205 if (flush)
206 flush_work(&qdev->gc_work);
207 return true;
208 }
209 return false;
210}
211
212int qxl_garbage_collect(struct qxl_device *qdev)
213{
214 struct qxl_release *release;
215 uint64_t id, next_id;
216 int i = 0;
217 union qxl_release_info *info;
218
219 while (qxl_ring_pop(qdev->release_ring, &id)) {
220 DRM_DEBUG_DRIVER("popped %lld\n", id);
221 while (id) {
222 release = qxl_release_from_id_locked(qdev, id);
223 if (release == NULL)
224 break;
225
226 info = qxl_release_map(qdev, release);
227 next_id = info->next;
228 qxl_release_unmap(qdev, release, info);
229
230 DRM_DEBUG_DRIVER("popped %lld, next %lld\n", id,
231 next_id);
232
233 switch (release->type) {
234 case QXL_RELEASE_DRAWABLE:
235 case QXL_RELEASE_SURFACE_CMD:
236 case QXL_RELEASE_CURSOR_CMD:
237 break;
238 default:
239 DRM_ERROR("unexpected release type\n");
240 break;
241 }
242 id = next_id;
243
244 qxl_release_free(qdev, release);
245 ++i;
246 }
247 }
248
249 DRM_DEBUG_DRIVER("%d\n", i);
250
251 return i;
252}
253
254int qxl_alloc_bo_reserved(struct qxl_device *qdev,
255 struct qxl_release *release,
256 unsigned long size,
257 struct qxl_bo **_bo)
258{
259 struct qxl_bo *bo;
260 int ret;
261
262 ret = qxl_bo_create(qdev, size, false ,
263 false, QXL_GEM_DOMAIN_VRAM, NULL, &bo);
264 if (ret) {
265 DRM_ERROR("failed to allocate VRAM BO\n");
266 return ret;
267 }
268 ret = qxl_release_list_add(release, bo);
269 if (ret)
270 goto out_unref;
271
272 *_bo = bo;
273 return 0;
274out_unref:
275 qxl_bo_unref(&bo);
276 return ret;
277}
278
279static int wait_for_io_cmd_user(struct qxl_device *qdev, uint8_t val, long port, bool intr)
280{
281 int irq_num;
282 long addr = qdev->io_base + port;
283 int ret;
284
285 mutex_lock(&qdev->async_io_mutex);
286 irq_num = atomic_read(&qdev->irq_received_io_cmd);
287 if (qdev->last_sent_io_cmd > irq_num) {
288 if (intr)
289 ret = wait_event_interruptible_timeout(qdev->io_cmd_event,
290 atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
291 else
292 ret = wait_event_timeout(qdev->io_cmd_event,
293 atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
294
295 if (ret <= 0)
296 goto out;
297 irq_num = atomic_read(&qdev->irq_received_io_cmd);
298 }
299 outb(val, addr);
300 qdev->last_sent_io_cmd = irq_num + 1;
301 if (intr)
302 ret = wait_event_interruptible_timeout(qdev->io_cmd_event,
303 atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
304 else
305 ret = wait_event_timeout(qdev->io_cmd_event,
306 atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
307out:
308 if (ret > 0)
309 ret = 0;
310 mutex_unlock(&qdev->async_io_mutex);
311 return ret;
312}
313
314static void wait_for_io_cmd(struct qxl_device *qdev, uint8_t val, long port)
315{
316 int ret;
317
318restart:
319 ret = wait_for_io_cmd_user(qdev, val, port, false);
320 if (ret == -ERESTARTSYS)
321 goto restart;
322}
323
324int qxl_io_update_area(struct qxl_device *qdev, struct qxl_bo *surf,
325 const struct qxl_rect *area)
326{
327 int surface_id;
328 uint32_t surface_width, surface_height;
329 int ret;
330
331 if (!surf->hw_surf_alloc)
332 DRM_ERROR("got io update area with no hw surface\n");
333
334 if (surf->is_primary)
335 surface_id = 0;
336 else
337 surface_id = surf->surface_id;
338 surface_width = surf->surf.width;
339 surface_height = surf->surf.height;
340
341 if (area->left < 0 || area->top < 0 ||
342 area->right > surface_width || area->bottom > surface_height) {
343 qxl_io_log(qdev, "%s: not doing area update for "
344 "%d, (%d,%d,%d,%d) (%d,%d)\n", __func__, surface_id, area->left,
345 area->top, area->right, area->bottom, surface_width, surface_height);
346 return -EINVAL;
347 }
348 mutex_lock(&qdev->update_area_mutex);
349 qdev->ram_header->update_area = *area;
350 qdev->ram_header->update_surface = surface_id;
351 ret = wait_for_io_cmd_user(qdev, 0, QXL_IO_UPDATE_AREA_ASYNC, true);
352 mutex_unlock(&qdev->update_area_mutex);
353 return ret;
354}
355
356void qxl_io_notify_oom(struct qxl_device *qdev)
357{
358 outb(0, qdev->io_base + QXL_IO_NOTIFY_OOM);
359}
360
361void qxl_io_flush_release(struct qxl_device *qdev)
362{
363 outb(0, qdev->io_base + QXL_IO_FLUSH_RELEASE);
364}
365
366void qxl_io_flush_surfaces(struct qxl_device *qdev)
367{
368 wait_for_io_cmd(qdev, 0, QXL_IO_FLUSH_SURFACES_ASYNC);
369}
370
371
372void qxl_io_destroy_primary(struct qxl_device *qdev)
373{
374 wait_for_io_cmd(qdev, 0, QXL_IO_DESTROY_PRIMARY_ASYNC);
375}
376
377void qxl_io_create_primary(struct qxl_device *qdev,
378 unsigned offset, struct qxl_bo *bo)
379{
380 struct qxl_surface_create *create;
381
382 DRM_DEBUG_DRIVER("qdev %p, ram_header %p\n", qdev, qdev->ram_header);
383 create = &qdev->ram_header->create_surface;
384 create->format = bo->surf.format;
385 create->width = bo->surf.width;
386 create->height = bo->surf.height;
387 create->stride = bo->surf.stride;
388 if (bo->shadow) {
389 create->mem = qxl_bo_physical_address(qdev, bo->shadow, offset);
390 } else {
391 create->mem = qxl_bo_physical_address(qdev, bo, offset);
392 }
393
394 DRM_DEBUG_DRIVER("mem = %llx, from %p\n", create->mem, bo->kptr);
395
396 create->flags = QXL_SURF_FLAG_KEEP_DATA;
397 create->type = QXL_SURF_TYPE_PRIMARY;
398
399 wait_for_io_cmd(qdev, 0, QXL_IO_CREATE_PRIMARY_ASYNC);
400}
401
402void qxl_io_memslot_add(struct qxl_device *qdev, uint8_t id)
403{
404 DRM_DEBUG_DRIVER("qxl_memslot_add %d\n", id);
405 wait_for_io_cmd(qdev, id, QXL_IO_MEMSLOT_ADD_ASYNC);
406}
407
408void qxl_io_log(struct qxl_device *qdev, const char *fmt, ...)
409{
410 va_list args;
411
412 va_start(args, fmt);
413 vsnprintf(qdev->ram_header->log_buf, QXL_LOG_BUF_SIZE, fmt, args);
414 va_end(args);
415
416
417
418
419 outb(0, qdev->io_base + QXL_IO_LOG);
420}
421
422void qxl_io_reset(struct qxl_device *qdev)
423{
424 outb(0, qdev->io_base + QXL_IO_RESET);
425}
426
427void qxl_io_monitors_config(struct qxl_device *qdev)
428{
429 qxl_io_log(qdev, "%s: %d [%dx%d+%d+%d]\n", __func__,
430 qdev->monitors_config ?
431 qdev->monitors_config->count : -1,
432 qdev->monitors_config && qdev->monitors_config->count ?
433 qdev->monitors_config->heads[0].width : -1,
434 qdev->monitors_config && qdev->monitors_config->count ?
435 qdev->monitors_config->heads[0].height : -1,
436 qdev->monitors_config && qdev->monitors_config->count ?
437 qdev->monitors_config->heads[0].x : -1,
438 qdev->monitors_config && qdev->monitors_config->count ?
439 qdev->monitors_config->heads[0].y : -1
440 );
441
442 wait_for_io_cmd(qdev, 0, QXL_IO_MONITORS_CONFIG_ASYNC);
443}
444
445int qxl_surface_id_alloc(struct qxl_device *qdev,
446 struct qxl_bo *surf)
447{
448 uint32_t handle;
449 int idr_ret;
450 int count = 0;
451again:
452 idr_preload(GFP_ATOMIC);
453 spin_lock(&qdev->surf_id_idr_lock);
454 idr_ret = idr_alloc(&qdev->surf_id_idr, NULL, 1, 0, GFP_NOWAIT);
455 spin_unlock(&qdev->surf_id_idr_lock);
456 idr_preload_end();
457 if (idr_ret < 0)
458 return idr_ret;
459 handle = idr_ret;
460
461 if (handle >= qdev->rom->n_surfaces) {
462 count++;
463 spin_lock(&qdev->surf_id_idr_lock);
464 idr_remove(&qdev->surf_id_idr, handle);
465 spin_unlock(&qdev->surf_id_idr_lock);
466 qxl_reap_surface_id(qdev, 2);
467 goto again;
468 }
469 surf->surface_id = handle;
470
471 spin_lock(&qdev->surf_id_idr_lock);
472 qdev->last_alloced_surf_id = handle;
473 spin_unlock(&qdev->surf_id_idr_lock);
474 return 0;
475}
476
477void qxl_surface_id_dealloc(struct qxl_device *qdev,
478 uint32_t surface_id)
479{
480 spin_lock(&qdev->surf_id_idr_lock);
481 idr_remove(&qdev->surf_id_idr, surface_id);
482 spin_unlock(&qdev->surf_id_idr_lock);
483}
484
485int qxl_hw_surface_alloc(struct qxl_device *qdev,
486 struct qxl_bo *surf,
487 struct ttm_mem_reg *new_mem)
488{
489 struct qxl_surface_cmd *cmd;
490 struct qxl_release *release;
491 int ret;
492
493 if (surf->hw_surf_alloc)
494 return 0;
495
496 ret = qxl_alloc_surface_release_reserved(qdev, QXL_SURFACE_CMD_CREATE,
497 NULL,
498 &release);
499 if (ret)
500 return ret;
501
502 ret = qxl_release_reserve_list(release, true);
503 if (ret)
504 return ret;
505
506 cmd = (struct qxl_surface_cmd *)qxl_release_map(qdev, release);
507 cmd->type = QXL_SURFACE_CMD_CREATE;
508 cmd->flags = QXL_SURF_FLAG_KEEP_DATA;
509 cmd->u.surface_create.format = surf->surf.format;
510 cmd->u.surface_create.width = surf->surf.width;
511 cmd->u.surface_create.height = surf->surf.height;
512 cmd->u.surface_create.stride = surf->surf.stride;
513 if (new_mem) {
514 int slot_id = surf->type == QXL_GEM_DOMAIN_VRAM ? qdev->main_mem_slot : qdev->surfaces_mem_slot;
515 struct qxl_memslot *slot = &(qdev->mem_slots[slot_id]);
516
517
518 cmd->u.surface_create.data = slot->high_bits;
519
520 cmd->u.surface_create.data |= (new_mem->start << PAGE_SHIFT) + surf->tbo.bdev->man[new_mem->mem_type].gpu_offset;
521 } else
522 cmd->u.surface_create.data = qxl_bo_physical_address(qdev, surf, 0);
523 cmd->surface_id = surf->surface_id;
524 qxl_release_unmap(qdev, release, &cmd->release_info);
525
526 surf->surf_create = release;
527
528
529
530
531 qxl_push_command_ring_release(qdev, release, QXL_CMD_SURFACE, false);
532 qxl_release_fence_buffer_objects(release);
533
534 surf->hw_surf_alloc = true;
535 spin_lock(&qdev->surf_id_idr_lock);
536 idr_replace(&qdev->surf_id_idr, surf, surf->surface_id);
537 spin_unlock(&qdev->surf_id_idr_lock);
538 return 0;
539}
540
541int qxl_hw_surface_dealloc(struct qxl_device *qdev,
542 struct qxl_bo *surf)
543{
544 struct qxl_surface_cmd *cmd;
545 struct qxl_release *release;
546 int ret;
547 int id;
548
549 if (!surf->hw_surf_alloc)
550 return 0;
551
552 ret = qxl_alloc_surface_release_reserved(qdev, QXL_SURFACE_CMD_DESTROY,
553 surf->surf_create,
554 &release);
555 if (ret)
556 return ret;
557
558 surf->surf_create = NULL;
559
560 spin_lock(&qdev->surf_id_idr_lock);
561 idr_replace(&qdev->surf_id_idr, NULL, surf->surface_id);
562 spin_unlock(&qdev->surf_id_idr_lock);
563 surf->hw_surf_alloc = false;
564
565 id = surf->surface_id;
566 surf->surface_id = 0;
567
568 release->surface_release_id = id;
569 cmd = (struct qxl_surface_cmd *)qxl_release_map(qdev, release);
570 cmd->type = QXL_SURFACE_CMD_DESTROY;
571 cmd->surface_id = id;
572 qxl_release_unmap(qdev, release, &cmd->release_info);
573
574 qxl_push_command_ring_release(qdev, release, QXL_CMD_SURFACE, false);
575
576 qxl_release_fence_buffer_objects(release);
577
578 return 0;
579}
580
581static int qxl_update_surface(struct qxl_device *qdev, struct qxl_bo *surf)
582{
583 struct qxl_rect rect;
584 int ret;
585
586
587
588 rect.left = 0;
589 rect.right = surf->surf.width;
590 rect.top = 0;
591 rect.bottom = surf->surf.height;
592retry:
593 ret = qxl_io_update_area(qdev, surf, &rect);
594 if (ret == -ERESTARTSYS)
595 goto retry;
596 return ret;
597}
598
599static void qxl_surface_evict_locked(struct qxl_device *qdev, struct qxl_bo *surf, bool do_update_area)
600{
601
602 if (do_update_area)
603 qxl_update_surface(qdev, surf);
604
605
606 qxl_hw_surface_dealloc(qdev, surf);
607}
608
609void qxl_surface_evict(struct qxl_device *qdev, struct qxl_bo *surf, bool do_update_area)
610{
611 mutex_lock(&qdev->surf_evict_mutex);
612 qxl_surface_evict_locked(qdev, surf, do_update_area);
613 mutex_unlock(&qdev->surf_evict_mutex);
614}
615
616static int qxl_reap_surf(struct qxl_device *qdev, struct qxl_bo *surf, bool stall)
617{
618 int ret;
619
620 ret = qxl_bo_reserve(surf, false);
621 if (ret)
622 return ret;
623
624 if (stall)
625 mutex_unlock(&qdev->surf_evict_mutex);
626
627 ret = ttm_bo_wait(&surf->tbo, true, !stall);
628
629 if (stall)
630 mutex_lock(&qdev->surf_evict_mutex);
631 if (ret) {
632 qxl_bo_unreserve(surf);
633 return ret;
634 }
635
636 qxl_surface_evict_locked(qdev, surf, true);
637 qxl_bo_unreserve(surf);
638 return 0;
639}
640
641static int qxl_reap_surface_id(struct qxl_device *qdev, int max_to_reap)
642{
643 int num_reaped = 0;
644 int i, ret;
645 bool stall = false;
646 int start = 0;
647
648 mutex_lock(&qdev->surf_evict_mutex);
649again:
650
651 spin_lock(&qdev->surf_id_idr_lock);
652 start = qdev->last_alloced_surf_id + 1;
653 spin_unlock(&qdev->surf_id_idr_lock);
654
655 for (i = start; i < start + qdev->rom->n_surfaces; i++) {
656 void *objptr;
657 int surfid = i % qdev->rom->n_surfaces;
658
659
660
661
662 spin_lock(&qdev->surf_id_idr_lock);
663 objptr = idr_find(&qdev->surf_id_idr, surfid);
664 spin_unlock(&qdev->surf_id_idr_lock);
665
666 if (!objptr)
667 continue;
668
669 ret = qxl_reap_surf(qdev, objptr, stall);
670 if (ret == 0)
671 num_reaped++;
672 if (num_reaped >= max_to_reap)
673 break;
674 }
675 if (num_reaped == 0 && stall == false) {
676 stall = true;
677 goto again;
678 }
679
680 mutex_unlock(&qdev->surf_evict_mutex);
681 if (num_reaped) {
682 usleep_range(500, 1000);
683 qxl_queue_garbage_collect(qdev, true);
684 }
685
686 return 0;
687}
688