1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28#include <linux/dmapool.h>
29#include <linux/pci.h>
30
31#include <drm/ttm/ttm_bo_api.h>
32
33#include "vmwgfx_drv.h"
34
35
36
37
38
39#define VMW_CMDBUF_INLINE_ALIGN 64
40#define VMW_CMDBUF_INLINE_SIZE \
41 (1024 - ALIGN(sizeof(SVGACBHeader), VMW_CMDBUF_INLINE_ALIGN))
42
43
44
45
46
47
48
49
50
51
52
53struct vmw_cmdbuf_context {
54 struct list_head submitted;
55 struct list_head hw_submitted;
56 struct list_head preempted;
57 unsigned num_hw_submitted;
58 bool block_submission;
59};
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110struct vmw_cmdbuf_man {
111 struct mutex cur_mutex;
112 struct mutex space_mutex;
113 struct mutex error_mutex;
114 struct work_struct work;
115 struct vmw_private *dev_priv;
116 struct vmw_cmdbuf_context ctx[SVGA_CB_CONTEXT_MAX];
117 struct list_head error;
118 struct drm_mm mm;
119 struct ttm_buffer_object *cmd_space;
120 struct ttm_bo_kmap_obj map_obj;
121 u8 *map;
122 struct vmw_cmdbuf_header *cur;
123 size_t cur_pos;
124 size_t default_size;
125 unsigned max_hw_submitted;
126 spinlock_t lock;
127 struct dma_pool *headers;
128 struct dma_pool *dheaders;
129 wait_queue_head_t alloc_queue;
130 wait_queue_head_t idle_queue;
131 bool irq_on;
132 bool using_mob;
133 bool has_pool;
134 dma_addr_t handle;
135 size_t size;
136 u32 num_contexts;
137};
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154struct vmw_cmdbuf_header {
155 struct vmw_cmdbuf_man *man;
156 SVGACBHeader *cb_header;
157 SVGACBContext cb_context;
158 struct list_head list;
159 struct drm_mm_node node;
160 dma_addr_t handle;
161 u8 *cmd;
162 size_t size;
163 size_t reserved;
164 bool inline_space;
165};
166
167
168
169
170
171
172
173
174struct vmw_cmdbuf_dheader {
175 SVGACBHeader cb_header;
176 u8 cmd[VMW_CMDBUF_INLINE_SIZE] __aligned(VMW_CMDBUF_INLINE_ALIGN);
177};
178
179
180
181
182
183
184
185
186struct vmw_cmdbuf_alloc_info {
187 size_t page_size;
188 struct drm_mm_node *node;
189 bool done;
190};
191
192
193#define for_each_cmdbuf_ctx(_man, _i, _ctx) \
194 for (_i = 0, _ctx = &(_man)->ctx[0]; (_i) < (_man)->num_contexts; \
195 ++(_i), ++(_ctx))
196
197static int vmw_cmdbuf_startstop(struct vmw_cmdbuf_man *man, u32 context,
198 bool enable);
199static int vmw_cmdbuf_preempt(struct vmw_cmdbuf_man *man, u32 context);
200
201
202
203
204
205
206
207static int vmw_cmdbuf_cur_lock(struct vmw_cmdbuf_man *man, bool interruptible)
208{
209 if (interruptible) {
210 if (mutex_lock_interruptible(&man->cur_mutex))
211 return -ERESTARTSYS;
212 } else {
213 mutex_lock(&man->cur_mutex);
214 }
215
216 return 0;
217}
218
219
220
221
222
223
224static void vmw_cmdbuf_cur_unlock(struct vmw_cmdbuf_man *man)
225{
226 mutex_unlock(&man->cur_mutex);
227}
228
229
230
231
232
233
234
235
236static void vmw_cmdbuf_header_inline_free(struct vmw_cmdbuf_header *header)
237{
238 struct vmw_cmdbuf_dheader *dheader;
239
240 if (WARN_ON_ONCE(!header->inline_space))
241 return;
242
243 dheader = container_of(header->cb_header, struct vmw_cmdbuf_dheader,
244 cb_header);
245 dma_pool_free(header->man->dheaders, dheader, header->handle);
246 kfree(header);
247}
248
249
250
251
252
253
254
255
256
257static void __vmw_cmdbuf_header_free(struct vmw_cmdbuf_header *header)
258{
259 struct vmw_cmdbuf_man *man = header->man;
260
261 lockdep_assert_held_once(&man->lock);
262
263 if (header->inline_space) {
264 vmw_cmdbuf_header_inline_free(header);
265 return;
266 }
267
268 drm_mm_remove_node(&header->node);
269 wake_up_all(&man->alloc_queue);
270 if (header->cb_header)
271 dma_pool_free(man->headers, header->cb_header,
272 header->handle);
273 kfree(header);
274}
275
276
277
278
279
280
281
282void vmw_cmdbuf_header_free(struct vmw_cmdbuf_header *header)
283{
284 struct vmw_cmdbuf_man *man = header->man;
285
286
287 if (header->inline_space) {
288 vmw_cmdbuf_header_inline_free(header);
289 return;
290 }
291 spin_lock(&man->lock);
292 __vmw_cmdbuf_header_free(header);
293 spin_unlock(&man->lock);
294}
295
296
297
298
299
300
301
302static int vmw_cmdbuf_header_submit(struct vmw_cmdbuf_header *header)
303{
304 struct vmw_cmdbuf_man *man = header->man;
305 u32 val;
306
307 val = upper_32_bits(header->handle);
308 vmw_write(man->dev_priv, SVGA_REG_COMMAND_HIGH, val);
309
310 val = lower_32_bits(header->handle);
311 val |= header->cb_context & SVGA_CB_CONTEXT_MASK;
312 vmw_write(man->dev_priv, SVGA_REG_COMMAND_LOW, val);
313
314 return header->cb_header->status;
315}
316
317
318
319
320
321
322static void vmw_cmdbuf_ctx_init(struct vmw_cmdbuf_context *ctx)
323{
324 INIT_LIST_HEAD(&ctx->hw_submitted);
325 INIT_LIST_HEAD(&ctx->submitted);
326 INIT_LIST_HEAD(&ctx->preempted);
327 ctx->num_hw_submitted = 0;
328}
329
330
331
332
333
334
335
336
337
338
339
340static void vmw_cmdbuf_ctx_submit(struct vmw_cmdbuf_man *man,
341 struct vmw_cmdbuf_context *ctx)
342{
343 while (ctx->num_hw_submitted < man->max_hw_submitted &&
344 !list_empty(&ctx->submitted) &&
345 !ctx->block_submission) {
346 struct vmw_cmdbuf_header *entry;
347 SVGACBStatus status;
348
349 entry = list_first_entry(&ctx->submitted,
350 struct vmw_cmdbuf_header,
351 list);
352
353 status = vmw_cmdbuf_header_submit(entry);
354
355
356 if (WARN_ON_ONCE(status == SVGA_CB_STATUS_QUEUE_FULL)) {
357 entry->cb_header->status = SVGA_CB_STATUS_NONE;
358 break;
359 }
360
361 list_move_tail(&entry->list, &ctx->hw_submitted);
362 ctx->num_hw_submitted++;
363 }
364
365}
366
367
368
369
370
371
372
373
374
375
376
377
378static void vmw_cmdbuf_ctx_process(struct vmw_cmdbuf_man *man,
379 struct vmw_cmdbuf_context *ctx,
380 int *notempty)
381{
382 struct vmw_cmdbuf_header *entry, *next;
383
384 vmw_cmdbuf_ctx_submit(man, ctx);
385
386 list_for_each_entry_safe(entry, next, &ctx->hw_submitted, list) {
387 SVGACBStatus status = entry->cb_header->status;
388
389 if (status == SVGA_CB_STATUS_NONE)
390 break;
391
392 list_del(&entry->list);
393 wake_up_all(&man->idle_queue);
394 ctx->num_hw_submitted--;
395 switch (status) {
396 case SVGA_CB_STATUS_COMPLETED:
397 __vmw_cmdbuf_header_free(entry);
398 break;
399 case SVGA_CB_STATUS_COMMAND_ERROR:
400 WARN_ONCE(true, "Command buffer error.\n");
401 entry->cb_header->status = SVGA_CB_STATUS_NONE;
402 list_add_tail(&entry->list, &man->error);
403 schedule_work(&man->work);
404 break;
405 case SVGA_CB_STATUS_PREEMPTED:
406 entry->cb_header->status = SVGA_CB_STATUS_NONE;
407 list_add_tail(&entry->list, &ctx->preempted);
408 break;
409 case SVGA_CB_STATUS_CB_HEADER_ERROR:
410 WARN_ONCE(true, "Command buffer header error.\n");
411 __vmw_cmdbuf_header_free(entry);
412 break;
413 default:
414 WARN_ONCE(true, "Undefined command buffer status.\n");
415 __vmw_cmdbuf_header_free(entry);
416 break;
417 }
418 }
419
420 vmw_cmdbuf_ctx_submit(man, ctx);
421 if (!list_empty(&ctx->submitted))
422 (*notempty)++;
423}
424
425
426
427
428
429
430
431
432
433
434
435static void vmw_cmdbuf_man_process(struct vmw_cmdbuf_man *man)
436{
437 int notempty;
438 struct vmw_cmdbuf_context *ctx;
439 int i;
440
441retry:
442 notempty = 0;
443 for_each_cmdbuf_ctx(man, i, ctx)
444 vmw_cmdbuf_ctx_process(man, ctx, ¬empty);
445
446 if (man->irq_on && !notempty) {
447 vmw_generic_waiter_remove(man->dev_priv,
448 SVGA_IRQFLAG_COMMAND_BUFFER,
449 &man->dev_priv->cmdbuf_waiters);
450 man->irq_on = false;
451 } else if (!man->irq_on && notempty) {
452 vmw_generic_waiter_add(man->dev_priv,
453 SVGA_IRQFLAG_COMMAND_BUFFER,
454 &man->dev_priv->cmdbuf_waiters);
455 man->irq_on = true;
456
457
458 goto retry;
459 }
460}
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475static void vmw_cmdbuf_ctx_add(struct vmw_cmdbuf_man *man,
476 struct vmw_cmdbuf_header *header,
477 SVGACBContext cb_context)
478{
479 if (!(header->cb_header->flags & SVGA_CB_FLAG_DX_CONTEXT))
480 header->cb_header->dxContext = 0;
481 header->cb_context = cb_context;
482 list_add_tail(&header->list, &man->ctx[cb_context].submitted);
483
484 vmw_cmdbuf_man_process(man);
485}
486
487
488
489
490
491
492
493
494
495
496
497void vmw_cmdbuf_irqthread(struct vmw_cmdbuf_man *man)
498{
499 spin_lock(&man->lock);
500 vmw_cmdbuf_man_process(man);
501 spin_unlock(&man->lock);
502}
503
504
505
506
507
508
509
510
511
512
513static void vmw_cmdbuf_work_func(struct work_struct *work)
514{
515 struct vmw_cmdbuf_man *man =
516 container_of(work, struct vmw_cmdbuf_man, work);
517 struct vmw_cmdbuf_header *entry, *next;
518 uint32_t dummy = 0;
519 bool send_fence = false;
520 struct list_head restart_head[SVGA_CB_CONTEXT_MAX];
521 int i;
522 struct vmw_cmdbuf_context *ctx;
523 bool global_block = false;
524
525 for_each_cmdbuf_ctx(man, i, ctx)
526 INIT_LIST_HEAD(&restart_head[i]);
527
528 mutex_lock(&man->error_mutex);
529 spin_lock(&man->lock);
530 list_for_each_entry_safe(entry, next, &man->error, list) {
531 SVGACBHeader *cb_hdr = entry->cb_header;
532 SVGA3dCmdHeader *header = (SVGA3dCmdHeader *)
533 (entry->cmd + cb_hdr->errorOffset);
534 u32 error_cmd_size, new_start_offset;
535 const char *cmd_name;
536
537 list_del_init(&entry->list);
538 global_block = true;
539
540 if (!vmw_cmd_describe(header, &error_cmd_size, &cmd_name)) {
541 VMW_DEBUG_USER("Unknown command causing device error.\n");
542 VMW_DEBUG_USER("Command buffer offset is %lu\n",
543 (unsigned long) cb_hdr->errorOffset);
544 __vmw_cmdbuf_header_free(entry);
545 send_fence = true;
546 continue;
547 }
548
549 VMW_DEBUG_USER("Command \"%s\" causing device error.\n",
550 cmd_name);
551 VMW_DEBUG_USER("Command buffer offset is %lu\n",
552 (unsigned long) cb_hdr->errorOffset);
553 VMW_DEBUG_USER("Command size is %lu\n",
554 (unsigned long) error_cmd_size);
555
556 new_start_offset = cb_hdr->errorOffset + error_cmd_size;
557
558 if (new_start_offset >= cb_hdr->length) {
559 __vmw_cmdbuf_header_free(entry);
560 send_fence = true;
561 continue;
562 }
563
564 if (man->using_mob)
565 cb_hdr->ptr.mob.mobOffset += new_start_offset;
566 else
567 cb_hdr->ptr.pa += (u64) new_start_offset;
568
569 entry->cmd += new_start_offset;
570 cb_hdr->length -= new_start_offset;
571 cb_hdr->errorOffset = 0;
572 cb_hdr->offset = 0;
573
574 list_add_tail(&entry->list, &restart_head[entry->cb_context]);
575 }
576
577 for_each_cmdbuf_ctx(man, i, ctx)
578 man->ctx[i].block_submission = true;
579
580 spin_unlock(&man->lock);
581
582
583 if (global_block && vmw_cmdbuf_preempt(man, 0))
584 DRM_ERROR("Failed preempting command buffer contexts\n");
585
586 spin_lock(&man->lock);
587 for_each_cmdbuf_ctx(man, i, ctx) {
588
589 vmw_cmdbuf_ctx_process(man, ctx, &dummy);
590
591
592
593
594
595 list_splice_init(&ctx->preempted, restart_head[i].prev);
596
597
598
599
600
601
602 ctx->block_submission = false;
603 list_splice_init(&restart_head[i], &ctx->submitted);
604 }
605
606 vmw_cmdbuf_man_process(man);
607 spin_unlock(&man->lock);
608
609 if (global_block && vmw_cmdbuf_startstop(man, 0, true))
610 DRM_ERROR("Failed restarting command buffer contexts\n");
611
612
613 if (send_fence) {
614 vmw_cmd_send_fence(man->dev_priv, &dummy);
615 wake_up_all(&man->idle_queue);
616 }
617
618 mutex_unlock(&man->error_mutex);
619}
620
621
622
623
624
625
626
627
628static bool vmw_cmdbuf_man_idle(struct vmw_cmdbuf_man *man,
629 bool check_preempted)
630{
631 struct vmw_cmdbuf_context *ctx;
632 bool idle = false;
633 int i;
634
635 spin_lock(&man->lock);
636 vmw_cmdbuf_man_process(man);
637 for_each_cmdbuf_ctx(man, i, ctx) {
638 if (!list_empty(&ctx->submitted) ||
639 !list_empty(&ctx->hw_submitted) ||
640 (check_preempted && !list_empty(&ctx->preempted)))
641 goto out_unlock;
642 }
643
644 idle = list_empty(&man->error);
645
646out_unlock:
647 spin_unlock(&man->lock);
648
649 return idle;
650}
651
652
653
654
655
656
657
658
659
660
661static void __vmw_cmdbuf_cur_flush(struct vmw_cmdbuf_man *man)
662{
663 struct vmw_cmdbuf_header *cur = man->cur;
664
665 lockdep_assert_held_once(&man->cur_mutex);
666
667 if (!cur)
668 return;
669
670 spin_lock(&man->lock);
671 if (man->cur_pos == 0) {
672 __vmw_cmdbuf_header_free(cur);
673 goto out_unlock;
674 }
675
676 man->cur->cb_header->length = man->cur_pos;
677 vmw_cmdbuf_ctx_add(man, man->cur, SVGA_CB_CONTEXT_0);
678out_unlock:
679 spin_unlock(&man->lock);
680 man->cur = NULL;
681 man->cur_pos = 0;
682}
683
684
685
686
687
688
689
690
691
692
693
694int vmw_cmdbuf_cur_flush(struct vmw_cmdbuf_man *man,
695 bool interruptible)
696{
697 int ret = vmw_cmdbuf_cur_lock(man, interruptible);
698
699 if (ret)
700 return ret;
701
702 __vmw_cmdbuf_cur_flush(man);
703 vmw_cmdbuf_cur_unlock(man);
704
705 return 0;
706}
707
708
709
710
711
712
713
714
715
716
717
718
719int vmw_cmdbuf_idle(struct vmw_cmdbuf_man *man, bool interruptible,
720 unsigned long timeout)
721{
722 int ret;
723
724 ret = vmw_cmdbuf_cur_flush(man, interruptible);
725 vmw_generic_waiter_add(man->dev_priv,
726 SVGA_IRQFLAG_COMMAND_BUFFER,
727 &man->dev_priv->cmdbuf_waiters);
728
729 if (interruptible) {
730 ret = wait_event_interruptible_timeout
731 (man->idle_queue, vmw_cmdbuf_man_idle(man, true),
732 timeout);
733 } else {
734 ret = wait_event_timeout
735 (man->idle_queue, vmw_cmdbuf_man_idle(man, true),
736 timeout);
737 }
738 vmw_generic_waiter_remove(man->dev_priv,
739 SVGA_IRQFLAG_COMMAND_BUFFER,
740 &man->dev_priv->cmdbuf_waiters);
741 if (ret == 0) {
742 if (!vmw_cmdbuf_man_idle(man, true))
743 ret = -EBUSY;
744 else
745 ret = 0;
746 }
747 if (ret > 0)
748 ret = 0;
749
750 return ret;
751}
752
753
754
755
756
757
758
759
760
761
762
763static bool vmw_cmdbuf_try_alloc(struct vmw_cmdbuf_man *man,
764 struct vmw_cmdbuf_alloc_info *info)
765{
766 int ret;
767
768 if (info->done)
769 return true;
770
771 memset(info->node, 0, sizeof(*info->node));
772 spin_lock(&man->lock);
773 ret = drm_mm_insert_node(&man->mm, info->node, info->page_size);
774 if (ret) {
775 vmw_cmdbuf_man_process(man);
776 ret = drm_mm_insert_node(&man->mm, info->node, info->page_size);
777 }
778
779 spin_unlock(&man->lock);
780 info->done = !ret;
781
782 return info->done;
783}
784
785
786
787
788
789
790
791
792
793
794
795
796
797static int vmw_cmdbuf_alloc_space(struct vmw_cmdbuf_man *man,
798 struct drm_mm_node *node,
799 size_t size,
800 bool interruptible)
801{
802 struct vmw_cmdbuf_alloc_info info;
803
804 info.page_size = PFN_UP(size);
805 info.node = node;
806 info.done = false;
807
808
809
810
811
812 if (interruptible) {
813 if (mutex_lock_interruptible(&man->space_mutex))
814 return -ERESTARTSYS;
815 } else {
816 mutex_lock(&man->space_mutex);
817 }
818
819
820 if (vmw_cmdbuf_try_alloc(man, &info))
821 goto out_unlock;
822
823 vmw_generic_waiter_add(man->dev_priv,
824 SVGA_IRQFLAG_COMMAND_BUFFER,
825 &man->dev_priv->cmdbuf_waiters);
826
827 if (interruptible) {
828 int ret;
829
830 ret = wait_event_interruptible
831 (man->alloc_queue, vmw_cmdbuf_try_alloc(man, &info));
832 if (ret) {
833 vmw_generic_waiter_remove
834 (man->dev_priv, SVGA_IRQFLAG_COMMAND_BUFFER,
835 &man->dev_priv->cmdbuf_waiters);
836 mutex_unlock(&man->space_mutex);
837 return ret;
838 }
839 } else {
840 wait_event(man->alloc_queue, vmw_cmdbuf_try_alloc(man, &info));
841 }
842 vmw_generic_waiter_remove(man->dev_priv,
843 SVGA_IRQFLAG_COMMAND_BUFFER,
844 &man->dev_priv->cmdbuf_waiters);
845
846out_unlock:
847 mutex_unlock(&man->space_mutex);
848
849 return 0;
850}
851
852
853
854
855
856
857
858
859
860
861static int vmw_cmdbuf_space_pool(struct vmw_cmdbuf_man *man,
862 struct vmw_cmdbuf_header *header,
863 size_t size,
864 bool interruptible)
865{
866 SVGACBHeader *cb_hdr;
867 size_t offset;
868 int ret;
869
870 if (!man->has_pool)
871 return -ENOMEM;
872
873 ret = vmw_cmdbuf_alloc_space(man, &header->node, size, interruptible);
874
875 if (ret)
876 return ret;
877
878 header->cb_header = dma_pool_zalloc(man->headers, GFP_KERNEL,
879 &header->handle);
880 if (!header->cb_header) {
881 ret = -ENOMEM;
882 goto out_no_cb_header;
883 }
884
885 header->size = header->node.size << PAGE_SHIFT;
886 cb_hdr = header->cb_header;
887 offset = header->node.start << PAGE_SHIFT;
888 header->cmd = man->map + offset;
889 if (man->using_mob) {
890 cb_hdr->flags = SVGA_CB_FLAG_MOB;
891 cb_hdr->ptr.mob.mobid = man->cmd_space->resource->start;
892 cb_hdr->ptr.mob.mobOffset = offset;
893 } else {
894 cb_hdr->ptr.pa = (u64)man->handle + (u64)offset;
895 }
896
897 return 0;
898
899out_no_cb_header:
900 spin_lock(&man->lock);
901 drm_mm_remove_node(&header->node);
902 spin_unlock(&man->lock);
903
904 return ret;
905}
906
907
908
909
910
911
912
913
914
915static int vmw_cmdbuf_space_inline(struct vmw_cmdbuf_man *man,
916 struct vmw_cmdbuf_header *header,
917 int size)
918{
919 struct vmw_cmdbuf_dheader *dheader;
920 SVGACBHeader *cb_hdr;
921
922 if (WARN_ON_ONCE(size > VMW_CMDBUF_INLINE_SIZE))
923 return -ENOMEM;
924
925 dheader = dma_pool_zalloc(man->dheaders, GFP_KERNEL,
926 &header->handle);
927 if (!dheader)
928 return -ENOMEM;
929
930 header->inline_space = true;
931 header->size = VMW_CMDBUF_INLINE_SIZE;
932 cb_hdr = &dheader->cb_header;
933 header->cb_header = cb_hdr;
934 header->cmd = dheader->cmd;
935 cb_hdr->status = SVGA_CB_STATUS_NONE;
936 cb_hdr->flags = SVGA_CB_FLAG_NONE;
937 cb_hdr->ptr.pa = (u64)header->handle +
938 (u64)offsetof(struct vmw_cmdbuf_dheader, cmd);
939
940 return 0;
941}
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956void *vmw_cmdbuf_alloc(struct vmw_cmdbuf_man *man,
957 size_t size, bool interruptible,
958 struct vmw_cmdbuf_header **p_header)
959{
960 struct vmw_cmdbuf_header *header;
961 int ret = 0;
962
963 *p_header = NULL;
964
965 header = kzalloc(sizeof(*header), GFP_KERNEL);
966 if (!header)
967 return ERR_PTR(-ENOMEM);
968
969 if (size <= VMW_CMDBUF_INLINE_SIZE)
970 ret = vmw_cmdbuf_space_inline(man, header, size);
971 else
972 ret = vmw_cmdbuf_space_pool(man, header, size, interruptible);
973
974 if (ret) {
975 kfree(header);
976 return ERR_PTR(ret);
977 }
978
979 header->man = man;
980 INIT_LIST_HEAD(&header->list);
981 header->cb_header->status = SVGA_CB_STATUS_NONE;
982 *p_header = header;
983
984 return header->cmd;
985}
986
987
988
989
990
991
992
993
994
995
996
997
998
999static void *vmw_cmdbuf_reserve_cur(struct vmw_cmdbuf_man *man,
1000 size_t size,
1001 int ctx_id,
1002 bool interruptible)
1003{
1004 struct vmw_cmdbuf_header *cur;
1005 void *ret;
1006
1007 if (vmw_cmdbuf_cur_lock(man, interruptible))
1008 return ERR_PTR(-ERESTARTSYS);
1009
1010 cur = man->cur;
1011 if (cur && (size + man->cur_pos > cur->size ||
1012 ((cur->cb_header->flags & SVGA_CB_FLAG_DX_CONTEXT) &&
1013 ctx_id != cur->cb_header->dxContext)))
1014 __vmw_cmdbuf_cur_flush(man);
1015
1016 if (!man->cur) {
1017 ret = vmw_cmdbuf_alloc(man,
1018 max_t(size_t, size, man->default_size),
1019 interruptible, &man->cur);
1020 if (IS_ERR(ret)) {
1021 vmw_cmdbuf_cur_unlock(man);
1022 return ret;
1023 }
1024
1025 cur = man->cur;
1026 }
1027
1028 if (ctx_id != SVGA3D_INVALID_ID) {
1029 cur->cb_header->flags |= SVGA_CB_FLAG_DX_CONTEXT;
1030 cur->cb_header->dxContext = ctx_id;
1031 }
1032
1033 cur->reserved = size;
1034
1035 return (void *) (man->cur->cmd + man->cur_pos);
1036}
1037
1038
1039
1040
1041
1042
1043
1044
1045static void vmw_cmdbuf_commit_cur(struct vmw_cmdbuf_man *man,
1046 size_t size, bool flush)
1047{
1048 struct vmw_cmdbuf_header *cur = man->cur;
1049
1050 lockdep_assert_held_once(&man->cur_mutex);
1051
1052 WARN_ON(size > cur->reserved);
1053 man->cur_pos += size;
1054 if (!size)
1055 cur->cb_header->flags &= ~SVGA_CB_FLAG_DX_CONTEXT;
1056 if (flush)
1057 __vmw_cmdbuf_cur_flush(man);
1058 vmw_cmdbuf_cur_unlock(man);
1059}
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074void *vmw_cmdbuf_reserve(struct vmw_cmdbuf_man *man, size_t size,
1075 int ctx_id, bool interruptible,
1076 struct vmw_cmdbuf_header *header)
1077{
1078 if (!header)
1079 return vmw_cmdbuf_reserve_cur(man, size, ctx_id, interruptible);
1080
1081 if (size > header->size)
1082 return ERR_PTR(-EINVAL);
1083
1084 if (ctx_id != SVGA3D_INVALID_ID) {
1085 header->cb_header->flags |= SVGA_CB_FLAG_DX_CONTEXT;
1086 header->cb_header->dxContext = ctx_id;
1087 }
1088
1089 header->reserved = size;
1090 return header->cmd;
1091}
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102void vmw_cmdbuf_commit(struct vmw_cmdbuf_man *man, size_t size,
1103 struct vmw_cmdbuf_header *header, bool flush)
1104{
1105 if (!header) {
1106 vmw_cmdbuf_commit_cur(man, size, flush);
1107 return;
1108 }
1109
1110 (void) vmw_cmdbuf_cur_lock(man, false);
1111 __vmw_cmdbuf_cur_flush(man);
1112 WARN_ON(size > header->reserved);
1113 man->cur = header;
1114 man->cur_pos = size;
1115 if (!size)
1116 header->cb_header->flags &= ~SVGA_CB_FLAG_DX_CONTEXT;
1117 if (flush)
1118 __vmw_cmdbuf_cur_flush(man);
1119 vmw_cmdbuf_cur_unlock(man);
1120}
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132static int vmw_cmdbuf_send_device_command(struct vmw_cmdbuf_man *man,
1133 const void *command,
1134 size_t size)
1135{
1136 struct vmw_cmdbuf_header *header;
1137 int status;
1138 void *cmd = vmw_cmdbuf_alloc(man, size, false, &header);
1139
1140 if (IS_ERR(cmd))
1141 return PTR_ERR(cmd);
1142
1143 memcpy(cmd, command, size);
1144 header->cb_header->length = size;
1145 header->cb_context = SVGA_CB_CONTEXT_DEVICE;
1146 spin_lock(&man->lock);
1147 status = vmw_cmdbuf_header_submit(header);
1148 spin_unlock(&man->lock);
1149 vmw_cmdbuf_header_free(header);
1150
1151 if (status != SVGA_CB_STATUS_COMPLETED) {
1152 DRM_ERROR("Device context command failed with status %d\n",
1153 status);
1154 return -EINVAL;
1155 }
1156
1157 return 0;
1158}
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169static int vmw_cmdbuf_preempt(struct vmw_cmdbuf_man *man, u32 context)
1170{
1171 struct {
1172 uint32 id;
1173 SVGADCCmdPreempt body;
1174 } __packed cmd;
1175
1176 cmd.id = SVGA_DC_CMD_PREEMPT;
1177 cmd.body.context = SVGA_CB_CONTEXT_0 + context;
1178 cmd.body.ignoreIDZero = 0;
1179
1180 return vmw_cmdbuf_send_device_command(man, &cmd, sizeof(cmd));
1181}
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194static int vmw_cmdbuf_startstop(struct vmw_cmdbuf_man *man, u32 context,
1195 bool enable)
1196{
1197 struct {
1198 uint32 id;
1199 SVGADCCmdStartStop body;
1200 } __packed cmd;
1201
1202 cmd.id = SVGA_DC_CMD_START_STOP_CONTEXT;
1203 cmd.body.enable = (enable) ? 1 : 0;
1204 cmd.body.context = SVGA_CB_CONTEXT_0 + context;
1205
1206 return vmw_cmdbuf_send_device_command(man, &cmd, sizeof(cmd));
1207}
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221int vmw_cmdbuf_set_pool_size(struct vmw_cmdbuf_man *man, size_t size)
1222{
1223 struct vmw_private *dev_priv = man->dev_priv;
1224 bool dummy;
1225 int ret;
1226
1227 if (man->has_pool)
1228 return -EINVAL;
1229
1230
1231 size = PAGE_ALIGN(size);
1232 man->map = dma_alloc_coherent(dev_priv->drm.dev, size,
1233 &man->handle, GFP_KERNEL);
1234 if (man->map) {
1235 man->using_mob = false;
1236 } else {
1237
1238
1239
1240
1241
1242
1243 if (!(dev_priv->capabilities & SVGA_CAP_DX) ||
1244 !dev_priv->has_mob)
1245 return -ENOMEM;
1246
1247 ret = vmw_bo_create_kernel(dev_priv, size,
1248 &vmw_mob_placement,
1249 &man->cmd_space);
1250 if (ret)
1251 return ret;
1252
1253 man->using_mob = true;
1254 ret = ttm_bo_kmap(man->cmd_space, 0, size >> PAGE_SHIFT,
1255 &man->map_obj);
1256 if (ret)
1257 goto out_no_map;
1258
1259 man->map = ttm_kmap_obj_virtual(&man->map_obj, &dummy);
1260 }
1261
1262 man->size = size;
1263 drm_mm_init(&man->mm, 0, size >> PAGE_SHIFT);
1264
1265 man->has_pool = true;
1266
1267
1268
1269
1270
1271
1272
1273 man->default_size = VMW_CMDBUF_INLINE_SIZE;
1274 drm_info(&dev_priv->drm,
1275 "Using command buffers with %s pool.\n",
1276 (man->using_mob) ? "MOB" : "DMA");
1277
1278 return 0;
1279
1280out_no_map:
1281 if (man->using_mob) {
1282 ttm_bo_put(man->cmd_space);
1283 man->cmd_space = NULL;
1284 }
1285
1286 return ret;
1287}
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299struct vmw_cmdbuf_man *vmw_cmdbuf_man_create(struct vmw_private *dev_priv)
1300{
1301 struct vmw_cmdbuf_man *man;
1302 struct vmw_cmdbuf_context *ctx;
1303 unsigned int i;
1304 int ret;
1305
1306 if (!(dev_priv->capabilities & SVGA_CAP_COMMAND_BUFFERS))
1307 return ERR_PTR(-ENOSYS);
1308
1309 man = kzalloc(sizeof(*man), GFP_KERNEL);
1310 if (!man)
1311 return ERR_PTR(-ENOMEM);
1312
1313 man->num_contexts = (dev_priv->capabilities & SVGA_CAP_HP_CMD_QUEUE) ?
1314 2 : 1;
1315 man->headers = dma_pool_create("vmwgfx cmdbuf",
1316 dev_priv->drm.dev,
1317 sizeof(SVGACBHeader),
1318 64, PAGE_SIZE);
1319 if (!man->headers) {
1320 ret = -ENOMEM;
1321 goto out_no_pool;
1322 }
1323
1324 man->dheaders = dma_pool_create("vmwgfx inline cmdbuf",
1325 dev_priv->drm.dev,
1326 sizeof(struct vmw_cmdbuf_dheader),
1327 64, PAGE_SIZE);
1328 if (!man->dheaders) {
1329 ret = -ENOMEM;
1330 goto out_no_dpool;
1331 }
1332
1333 for_each_cmdbuf_ctx(man, i, ctx)
1334 vmw_cmdbuf_ctx_init(ctx);
1335
1336 INIT_LIST_HEAD(&man->error);
1337 spin_lock_init(&man->lock);
1338 mutex_init(&man->cur_mutex);
1339 mutex_init(&man->space_mutex);
1340 mutex_init(&man->error_mutex);
1341 man->default_size = VMW_CMDBUF_INLINE_SIZE;
1342 init_waitqueue_head(&man->alloc_queue);
1343 init_waitqueue_head(&man->idle_queue);
1344 man->dev_priv = dev_priv;
1345 man->max_hw_submitted = SVGA_CB_MAX_QUEUED_PER_CONTEXT - 1;
1346 INIT_WORK(&man->work, &vmw_cmdbuf_work_func);
1347 vmw_generic_waiter_add(dev_priv, SVGA_IRQFLAG_ERROR,
1348 &dev_priv->error_waiters);
1349 ret = vmw_cmdbuf_startstop(man, 0, true);
1350 if (ret) {
1351 DRM_ERROR("Failed starting command buffer contexts\n");
1352 vmw_cmdbuf_man_destroy(man);
1353 return ERR_PTR(ret);
1354 }
1355
1356 return man;
1357
1358out_no_dpool:
1359 dma_pool_destroy(man->headers);
1360out_no_pool:
1361 kfree(man);
1362
1363 return ERR_PTR(ret);
1364}
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377void vmw_cmdbuf_remove_pool(struct vmw_cmdbuf_man *man)
1378{
1379 if (!man->has_pool)
1380 return;
1381
1382 man->has_pool = false;
1383 man->default_size = VMW_CMDBUF_INLINE_SIZE;
1384 (void) vmw_cmdbuf_idle(man, false, 10*HZ);
1385 if (man->using_mob) {
1386 (void) ttm_bo_kunmap(&man->map_obj);
1387 ttm_bo_put(man->cmd_space);
1388 man->cmd_space = NULL;
1389 } else {
1390 dma_free_coherent(man->dev_priv->drm.dev,
1391 man->size, man->map, man->handle);
1392 }
1393}
1394
1395
1396
1397
1398
1399
1400
1401
1402void vmw_cmdbuf_man_destroy(struct vmw_cmdbuf_man *man)
1403{
1404 WARN_ON_ONCE(man->has_pool);
1405 (void) vmw_cmdbuf_idle(man, false, 10*HZ);
1406
1407 if (vmw_cmdbuf_startstop(man, 0, false))
1408 DRM_ERROR("Failed stopping command buffer contexts.\n");
1409
1410 vmw_generic_waiter_remove(man->dev_priv, SVGA_IRQFLAG_ERROR,
1411 &man->dev_priv->error_waiters);
1412 (void) cancel_work_sync(&man->work);
1413 dma_pool_destroy(man->dheaders);
1414 dma_pool_destroy(man->headers);
1415 mutex_destroy(&man->cur_mutex);
1416 mutex_destroy(&man->space_mutex);
1417 mutex_destroy(&man->error_mutex);
1418 kfree(man);
1419}
1420