1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28#include <linux/slab.h>
29#include "vmwgfx_validation.h"
30#include "vmwgfx_drv.h"
31
32
33
34
35
36
37
38
39
40
41
42struct vmw_validation_bo_node {
43 struct ttm_validate_buffer base;
44 struct drm_hash_item hash;
45 u32 as_mob : 1;
46 u32 cpu_blit : 1;
47};
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69struct vmw_validation_res_node {
70 struct list_head head;
71 struct drm_hash_item hash;
72 struct vmw_resource *res;
73 struct vmw_buffer_object *new_backup;
74 unsigned long new_backup_offset;
75 u32 no_buffer_needed : 1;
76 u32 switching_backup : 1;
77 u32 first_usage : 1;
78 u32 reserved : 1;
79 unsigned long private[0];
80};
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97void *vmw_validation_mem_alloc(struct vmw_validation_context *ctx,
98 unsigned int size)
99{
100 void *addr;
101
102 size = vmw_validation_align(size);
103 if (size > PAGE_SIZE)
104 return NULL;
105
106 if (ctx->mem_size_left < size) {
107 struct page *page;
108
109 if (ctx->vm && ctx->vm_size_left < PAGE_SIZE) {
110 int ret = ctx->vm->reserve_mem(ctx->vm, ctx->vm->gran);
111
112 if (ret)
113 return NULL;
114
115 ctx->vm_size_left += ctx->vm->gran;
116 ctx->total_mem += ctx->vm->gran;
117 }
118
119 page = alloc_page(GFP_KERNEL | __GFP_ZERO);
120 if (!page)
121 return NULL;
122
123 if (ctx->vm)
124 ctx->vm_size_left -= PAGE_SIZE;
125
126 list_add_tail(&page->lru, &ctx->page_list);
127 ctx->page_address = page_address(page);
128 ctx->mem_size_left = PAGE_SIZE;
129 }
130
131 addr = (void *) (ctx->page_address + (PAGE_SIZE - ctx->mem_size_left));
132 ctx->mem_size_left -= size;
133
134 return addr;
135}
136
137
138
139
140
141
142
143
144
145static void vmw_validation_mem_free(struct vmw_validation_context *ctx)
146{
147 struct page *entry, *next;
148
149 list_for_each_entry_safe(entry, next, &ctx->page_list, lru) {
150 list_del_init(&entry->lru);
151 __free_page(entry);
152 }
153
154 ctx->mem_size_left = 0;
155 if (ctx->vm && ctx->total_mem) {
156 ctx->vm->unreserve_mem(ctx->vm, ctx->total_mem);
157 ctx->total_mem = 0;
158 ctx->vm_size_left = 0;
159 }
160}
161
162
163
164
165
166
167
168
169
170
171static struct vmw_validation_bo_node *
172vmw_validation_find_bo_dup(struct vmw_validation_context *ctx,
173 struct vmw_buffer_object *vbo)
174{
175 struct vmw_validation_bo_node *bo_node = NULL;
176
177 if (!ctx->merge_dups)
178 return NULL;
179
180 if (ctx->ht) {
181 struct drm_hash_item *hash;
182
183 if (!drm_ht_find_item(ctx->ht, (unsigned long) vbo, &hash))
184 bo_node = container_of(hash, typeof(*bo_node), hash);
185 } else {
186 struct vmw_validation_bo_node *entry;
187
188 list_for_each_entry(entry, &ctx->bo_list, base.head) {
189 if (entry->base.bo == &vbo->base) {
190 bo_node = entry;
191 break;
192 }
193 }
194 }
195
196 return bo_node;
197}
198
199
200
201
202
203
204
205
206
207
208static struct vmw_validation_res_node *
209vmw_validation_find_res_dup(struct vmw_validation_context *ctx,
210 struct vmw_resource *res)
211{
212 struct vmw_validation_res_node *res_node = NULL;
213
214 if (!ctx->merge_dups)
215 return NULL;
216
217 if (ctx->ht) {
218 struct drm_hash_item *hash;
219
220 if (!drm_ht_find_item(ctx->ht, (unsigned long) res, &hash))
221 res_node = container_of(hash, typeof(*res_node), hash);
222 } else {
223 struct vmw_validation_res_node *entry;
224
225 list_for_each_entry(entry, &ctx->resource_ctx_list, head) {
226 if (entry->res == res) {
227 res_node = entry;
228 goto out;
229 }
230 }
231
232 list_for_each_entry(entry, &ctx->resource_list, head) {
233 if (entry->res == res) {
234 res_node = entry;
235 break;
236 }
237 }
238
239 }
240out:
241 return res_node;
242}
243
244
245
246
247
248
249
250
251
252
253int vmw_validation_add_bo(struct vmw_validation_context *ctx,
254 struct vmw_buffer_object *vbo,
255 bool as_mob,
256 bool cpu_blit)
257{
258 struct vmw_validation_bo_node *bo_node;
259
260 bo_node = vmw_validation_find_bo_dup(ctx, vbo);
261 if (bo_node) {
262 if (bo_node->as_mob != as_mob ||
263 bo_node->cpu_blit != cpu_blit) {
264 DRM_ERROR("Inconsistent buffer usage.\n");
265 return -EINVAL;
266 }
267 } else {
268 struct ttm_validate_buffer *val_buf;
269 int ret;
270
271 bo_node = vmw_validation_mem_alloc(ctx, sizeof(*bo_node));
272 if (!bo_node)
273 return -ENOMEM;
274
275 if (ctx->ht) {
276 bo_node->hash.key = (unsigned long) vbo;
277 ret = drm_ht_insert_item(ctx->ht, &bo_node->hash);
278 if (ret) {
279 DRM_ERROR("Failed to initialize a buffer "
280 "validation entry.\n");
281 return ret;
282 }
283 }
284 val_buf = &bo_node->base;
285 val_buf->bo = ttm_bo_get_unless_zero(&vbo->base);
286 if (!val_buf->bo)
287 return -ESRCH;
288 val_buf->num_shared = 0;
289 list_add_tail(&val_buf->head, &ctx->bo_list);
290 bo_node->as_mob = as_mob;
291 bo_node->cpu_blit = cpu_blit;
292 }
293
294 return 0;
295}
296
297
298
299
300
301
302
303
304
305
306
307int vmw_validation_add_resource(struct vmw_validation_context *ctx,
308 struct vmw_resource *res,
309 size_t priv_size,
310 void **p_node,
311 bool *first_usage)
312{
313 struct vmw_validation_res_node *node;
314 int ret;
315
316 node = vmw_validation_find_res_dup(ctx, res);
317 if (node) {
318 node->first_usage = 0;
319 goto out_fill;
320 }
321
322 node = vmw_validation_mem_alloc(ctx, sizeof(*node) + priv_size);
323 if (!node) {
324 DRM_ERROR("Failed to allocate a resource validation "
325 "entry.\n");
326 return -ENOMEM;
327 }
328
329 if (ctx->ht) {
330 node->hash.key = (unsigned long) res;
331 ret = drm_ht_insert_item(ctx->ht, &node->hash);
332 if (ret) {
333 DRM_ERROR("Failed to initialize a resource validation "
334 "entry.\n");
335 return ret;
336 }
337 }
338 node->res = vmw_resource_reference_unless_doomed(res);
339 if (!node->res)
340 return -ESRCH;
341
342 node->first_usage = 1;
343 if (!res->dev_priv->has_mob) {
344 list_add_tail(&node->head, &ctx->resource_list);
345 } else {
346 switch (vmw_res_type(res)) {
347 case vmw_res_context:
348 case vmw_res_dx_context:
349 list_add(&node->head, &ctx->resource_ctx_list);
350 break;
351 case vmw_res_cotable:
352 list_add_tail(&node->head, &ctx->resource_ctx_list);
353 break;
354 default:
355 list_add_tail(&node->head, &ctx->resource_list);
356 break;
357 }
358 }
359
360out_fill:
361 if (first_usage)
362 *first_usage = node->first_usage;
363 if (p_node)
364 *p_node = &node->private;
365
366 return 0;
367}
368
369
370
371
372
373
374
375
376
377
378
379
380void vmw_validation_res_switch_backup(struct vmw_validation_context *ctx,
381 void *val_private,
382 struct vmw_buffer_object *vbo,
383 unsigned long backup_offset)
384{
385 struct vmw_validation_res_node *val;
386
387 val = container_of(val_private, typeof(*val), private);
388
389 val->switching_backup = 1;
390 if (val->first_usage)
391 val->no_buffer_needed = 1;
392
393 val->new_backup = vbo;
394 val->new_backup_offset = backup_offset;
395}
396
397
398
399
400
401
402
403
404
405
406int vmw_validation_res_reserve(struct vmw_validation_context *ctx,
407 bool intr)
408{
409 struct vmw_validation_res_node *val;
410 int ret = 0;
411
412 list_splice_init(&ctx->resource_ctx_list, &ctx->resource_list);
413
414 list_for_each_entry(val, &ctx->resource_list, head) {
415 struct vmw_resource *res = val->res;
416
417 ret = vmw_resource_reserve(res, intr, val->no_buffer_needed);
418 if (ret)
419 goto out_unreserve;
420
421 val->reserved = 1;
422 if (res->backup) {
423 struct vmw_buffer_object *vbo = res->backup;
424
425 ret = vmw_validation_add_bo
426 (ctx, vbo, vmw_resource_needs_backup(res),
427 false);
428 if (ret)
429 goto out_unreserve;
430 }
431 }
432
433 return 0;
434
435out_unreserve:
436 vmw_validation_res_unreserve(ctx, true);
437 return ret;
438}
439
440
441
442
443
444
445
446
447void vmw_validation_res_unreserve(struct vmw_validation_context *ctx,
448 bool backoff)
449{
450 struct vmw_validation_res_node *val;
451
452 list_splice_init(&ctx->resource_ctx_list, &ctx->resource_list);
453
454 list_for_each_entry(val, &ctx->resource_list, head) {
455 if (val->reserved)
456 vmw_resource_unreserve(val->res,
457 !backoff &&
458 val->switching_backup,
459 val->new_backup,
460 val->new_backup_offset);
461 }
462}
463
464
465
466
467
468
469
470
471
472
473int vmw_validation_bo_validate_single(struct ttm_buffer_object *bo,
474 bool interruptible,
475 bool validate_as_mob)
476{
477 struct vmw_buffer_object *vbo =
478 container_of(bo, struct vmw_buffer_object, base);
479 struct ttm_operation_ctx ctx = {
480 .interruptible = interruptible,
481 .no_wait_gpu = false
482 };
483 int ret;
484
485 if (vbo->pin_count > 0)
486 return 0;
487
488 if (validate_as_mob)
489 return ttm_bo_validate(bo, &vmw_mob_placement, &ctx);
490
491
492
493
494
495
496
497
498 ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, &ctx);
499 if (ret == 0 || ret == -ERESTARTSYS)
500 return ret;
501
502
503
504
505
506
507 ret = ttm_bo_validate(bo, &vmw_vram_placement, &ctx);
508 return ret;
509}
510
511
512
513
514
515
516
517
518
519
520int vmw_validation_bo_validate(struct vmw_validation_context *ctx, bool intr)
521{
522 struct vmw_validation_bo_node *entry;
523 int ret;
524
525 list_for_each_entry(entry, &ctx->bo_list, base.head) {
526 if (entry->cpu_blit) {
527 struct ttm_operation_ctx ctx = {
528 .interruptible = intr,
529 .no_wait_gpu = false
530 };
531
532 ret = ttm_bo_validate(entry->base.bo,
533 &vmw_nonfixed_placement, &ctx);
534 } else {
535 ret = vmw_validation_bo_validate_single
536 (entry->base.bo, intr, entry->as_mob);
537 }
538 if (ret)
539 return ret;
540 }
541 return 0;
542}
543
544
545
546
547
548
549
550
551
552
553
554
555
556int vmw_validation_res_validate(struct vmw_validation_context *ctx, bool intr)
557{
558 struct vmw_validation_res_node *val;
559 int ret;
560
561 list_for_each_entry(val, &ctx->resource_list, head) {
562 struct vmw_resource *res = val->res;
563 struct vmw_buffer_object *backup = res->backup;
564
565 ret = vmw_resource_validate(res, intr);
566 if (ret) {
567 if (ret != -ERESTARTSYS)
568 DRM_ERROR("Failed to validate resource.\n");
569 return ret;
570 }
571
572
573 if (backup && res->backup && (backup != res->backup)) {
574 struct vmw_buffer_object *vbo = res->backup;
575
576 ret = vmw_validation_add_bo
577 (ctx, vbo, vmw_resource_needs_backup(res),
578 false);
579 if (ret)
580 return ret;
581 }
582 }
583 return 0;
584}
585
586
587
588
589
590
591
592
593
594
595
596
597
598void vmw_validation_drop_ht(struct vmw_validation_context *ctx)
599{
600 struct vmw_validation_bo_node *entry;
601 struct vmw_validation_res_node *val;
602
603 if (!ctx->ht)
604 return;
605
606 list_for_each_entry(entry, &ctx->bo_list, base.head)
607 (void) drm_ht_remove_item(ctx->ht, &entry->hash);
608
609 list_for_each_entry(val, &ctx->resource_list, head)
610 (void) drm_ht_remove_item(ctx->ht, &val->hash);
611
612 list_for_each_entry(val, &ctx->resource_ctx_list, head)
613 (void) drm_ht_remove_item(ctx->ht, &val->hash);
614
615 ctx->ht = NULL;
616}
617
618
619
620
621
622
623
624
625
626void vmw_validation_unref_lists(struct vmw_validation_context *ctx)
627{
628 struct vmw_validation_bo_node *entry;
629 struct vmw_validation_res_node *val;
630
631 list_for_each_entry(entry, &ctx->bo_list, base.head)
632 ttm_bo_unref(&entry->base.bo);
633
634 list_splice_init(&ctx->resource_ctx_list, &ctx->resource_list);
635 list_for_each_entry(val, &ctx->resource_list, head)
636 vmw_resource_unreference(&val->res);
637
638
639
640
641
642 INIT_LIST_HEAD(&ctx->bo_list);
643 INIT_LIST_HEAD(&ctx->resource_list);
644
645 vmw_validation_mem_free(ctx);
646}
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663int vmw_validation_prepare(struct vmw_validation_context *ctx,
664 struct mutex *mutex,
665 bool intr)
666{
667 int ret = 0;
668
669 if (mutex) {
670 if (intr)
671 ret = mutex_lock_interruptible(mutex);
672 else
673 mutex_lock(mutex);
674 if (ret)
675 return -ERESTARTSYS;
676 }
677
678 ctx->res_mutex = mutex;
679 ret = vmw_validation_res_reserve(ctx, intr);
680 if (ret)
681 goto out_no_res_reserve;
682
683 ret = vmw_validation_bo_reserve(ctx, intr);
684 if (ret)
685 goto out_no_bo_reserve;
686
687 ret = vmw_validation_bo_validate(ctx, intr);
688 if (ret)
689 goto out_no_validate;
690
691 ret = vmw_validation_res_validate(ctx, intr);
692 if (ret)
693 goto out_no_validate;
694
695 return 0;
696
697out_no_validate:
698 vmw_validation_bo_backoff(ctx);
699out_no_bo_reserve:
700 vmw_validation_res_unreserve(ctx, true);
701out_no_res_reserve:
702 if (mutex)
703 mutex_unlock(mutex);
704
705 return ret;
706}
707
708
709
710
711
712
713
714
715
716void vmw_validation_revert(struct vmw_validation_context *ctx)
717{
718 vmw_validation_bo_backoff(ctx);
719 vmw_validation_res_unreserve(ctx, true);
720 if (ctx->res_mutex)
721 mutex_unlock(ctx->res_mutex);
722 vmw_validation_unref_lists(ctx);
723}
724
725
726
727
728
729
730
731
732
733
734void vmw_validation_done(struct vmw_validation_context *ctx,
735 struct vmw_fence_obj *fence)
736{
737 vmw_validation_bo_fence(ctx, fence);
738 vmw_validation_res_unreserve(ctx, false);
739 if (ctx->res_mutex)
740 mutex_unlock(ctx->res_mutex);
741 vmw_validation_unref_lists(ctx);
742}
743
744
745
746
747
748
749
750
751
752
753
754
755int vmw_validation_preload_bo(struct vmw_validation_context *ctx)
756{
757 unsigned int size = sizeof(struct vmw_validation_bo_node);
758
759 if (!vmw_validation_mem_alloc(ctx, size))
760 return -ENOMEM;
761
762 ctx->mem_size_left += size;
763 return 0;
764}
765
766
767
768
769
770
771
772
773
774
775
776
777
778int vmw_validation_preload_res(struct vmw_validation_context *ctx,
779 unsigned int size)
780{
781 size = vmw_validation_align(sizeof(struct vmw_validation_res_node) +
782 size) +
783 vmw_validation_align(sizeof(struct vmw_validation_bo_node));
784 if (!vmw_validation_mem_alloc(ctx, size))
785 return -ENOMEM;
786
787 ctx->mem_size_left += size;
788 return 0;
789}
790