1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28#include "vmwgfx_drv.h"
29
30
31
32
33
34#define VMW_OTABLE_SETUP_SUB ((VMWGFX_ENABLE_SCREEN_TARGET_OTABLE ? 0 : 1))
35
36#ifdef CONFIG_64BIT
37#define VMW_PPN_SIZE 8
38#define VMW_MOBFMT_PTDEPTH_0 SVGA3D_MOBFMT_PTDEPTH64_0
39#define VMW_MOBFMT_PTDEPTH_1 SVGA3D_MOBFMT_PTDEPTH64_1
40#define VMW_MOBFMT_PTDEPTH_2 SVGA3D_MOBFMT_PTDEPTH64_2
41#else
42#define VMW_PPN_SIZE 4
43#define VMW_MOBFMT_PTDEPTH_0 SVGA3D_MOBFMT_PTDEPTH_0
44#define VMW_MOBFMT_PTDEPTH_1 SVGA3D_MOBFMT_PTDEPTH_1
45#define VMW_MOBFMT_PTDEPTH_2 SVGA3D_MOBFMT_PTDEPTH_2
46#endif
47
48
49
50
51
52
53
54
55
56struct vmw_mob {
57 struct ttm_buffer_object *pt_bo;
58 unsigned long num_pages;
59 unsigned pt_level;
60 dma_addr_t pt_root_page;
61 uint32_t id;
62};
63
64
65
66
67
68
69
70static const struct vmw_otable pre_dx_tables[] = {
71 {VMWGFX_NUM_MOB * SVGA3D_OTABLE_MOB_ENTRY_SIZE, NULL, true},
72 {VMWGFX_NUM_GB_SURFACE * SVGA3D_OTABLE_SURFACE_ENTRY_SIZE, NULL, true},
73 {VMWGFX_NUM_GB_CONTEXT * SVGA3D_OTABLE_CONTEXT_ENTRY_SIZE, NULL, true},
74 {VMWGFX_NUM_GB_SHADER * SVGA3D_OTABLE_SHADER_ENTRY_SIZE, NULL, true},
75 {VMWGFX_NUM_GB_SCREEN_TARGET * SVGA3D_OTABLE_SCREEN_TARGET_ENTRY_SIZE,
76 NULL, VMWGFX_ENABLE_SCREEN_TARGET_OTABLE}
77};
78
79static const struct vmw_otable dx_tables[] = {
80 {VMWGFX_NUM_MOB * SVGA3D_OTABLE_MOB_ENTRY_SIZE, NULL, true},
81 {VMWGFX_NUM_GB_SURFACE * SVGA3D_OTABLE_SURFACE_ENTRY_SIZE, NULL, true},
82 {VMWGFX_NUM_GB_CONTEXT * SVGA3D_OTABLE_CONTEXT_ENTRY_SIZE, NULL, true},
83 {VMWGFX_NUM_GB_SHADER * SVGA3D_OTABLE_SHADER_ENTRY_SIZE, NULL, true},
84 {VMWGFX_NUM_GB_SCREEN_TARGET * SVGA3D_OTABLE_SCREEN_TARGET_ENTRY_SIZE,
85 NULL, VMWGFX_ENABLE_SCREEN_TARGET_OTABLE},
86 {VMWGFX_NUM_DXCONTEXT * sizeof(SVGAOTableDXContextEntry), NULL, true},
87};
88
89static int vmw_mob_pt_populate(struct vmw_private *dev_priv,
90 struct vmw_mob *mob);
91static void vmw_mob_pt_setup(struct vmw_mob *mob,
92 struct vmw_piter data_iter,
93 unsigned long num_data_pages);
94
95
96
97
98
99
100
101
102
103
104
105
106
107static int vmw_setup_otable_base(struct vmw_private *dev_priv,
108 SVGAOTableType type,
109 struct ttm_buffer_object *otable_bo,
110 unsigned long offset,
111 struct vmw_otable *otable)
112{
113 struct {
114 SVGA3dCmdHeader header;
115 SVGA3dCmdSetOTableBase64 body;
116 } *cmd;
117 struct vmw_mob *mob;
118 const struct vmw_sg_table *vsgt;
119 struct vmw_piter iter;
120 int ret;
121
122 BUG_ON(otable->page_table != NULL);
123
124 vsgt = vmw_bo_sg_table(otable_bo);
125 vmw_piter_start(&iter, vsgt, offset >> PAGE_SHIFT);
126 WARN_ON(!vmw_piter_next(&iter));
127
128 mob = vmw_mob_create(otable->size >> PAGE_SHIFT);
129 if (unlikely(mob == NULL)) {
130 DRM_ERROR("Failed creating OTable page table.\n");
131 return -ENOMEM;
132 }
133
134 if (otable->size <= PAGE_SIZE) {
135 mob->pt_level = VMW_MOBFMT_PTDEPTH_0;
136 mob->pt_root_page = vmw_piter_dma_addr(&iter);
137 } else if (vsgt->num_regions == 1) {
138 mob->pt_level = SVGA3D_MOBFMT_RANGE;
139 mob->pt_root_page = vmw_piter_dma_addr(&iter);
140 } else {
141 ret = vmw_mob_pt_populate(dev_priv, mob);
142 if (unlikely(ret != 0))
143 goto out_no_populate;
144
145 vmw_mob_pt_setup(mob, iter, otable->size >> PAGE_SHIFT);
146 mob->pt_level += VMW_MOBFMT_PTDEPTH_1 - SVGA3D_MOBFMT_PTDEPTH_1;
147 }
148
149 cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
150 if (unlikely(cmd == NULL)) {
151 ret = -ENOMEM;
152 goto out_no_fifo;
153 }
154
155 memset(cmd, 0, sizeof(*cmd));
156 cmd->header.id = SVGA_3D_CMD_SET_OTABLE_BASE64;
157 cmd->header.size = sizeof(cmd->body);
158 cmd->body.type = type;
159 cmd->body.baseAddress = mob->pt_root_page >> PAGE_SHIFT;
160 cmd->body.sizeInBytes = otable->size;
161 cmd->body.validSizeInBytes = 0;
162 cmd->body.ptDepth = mob->pt_level;
163
164
165
166
167
168
169 BUG_ON(mob->pt_level == VMW_MOBFMT_PTDEPTH_2);
170
171 vmw_fifo_commit(dev_priv, sizeof(*cmd));
172 otable->page_table = mob;
173
174 return 0;
175
176out_no_fifo:
177out_no_populate:
178 vmw_mob_destroy(mob);
179 return ret;
180}
181
182
183
184
185
186
187
188
189
190static void vmw_takedown_otable_base(struct vmw_private *dev_priv,
191 SVGAOTableType type,
192 struct vmw_otable *otable)
193{
194 struct {
195 SVGA3dCmdHeader header;
196 SVGA3dCmdSetOTableBase body;
197 } *cmd;
198 struct ttm_buffer_object *bo;
199
200 if (otable->page_table == NULL)
201 return;
202
203 bo = otable->page_table->pt_bo;
204 cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
205 if (unlikely(cmd == NULL))
206 return;
207
208 memset(cmd, 0, sizeof(*cmd));
209 cmd->header.id = SVGA_3D_CMD_SET_OTABLE_BASE;
210 cmd->header.size = sizeof(cmd->body);
211 cmd->body.type = type;
212 cmd->body.baseAddress = 0;
213 cmd->body.sizeInBytes = 0;
214 cmd->body.validSizeInBytes = 0;
215 cmd->body.ptDepth = SVGA3D_MOBFMT_INVALID;
216 vmw_fifo_commit(dev_priv, sizeof(*cmd));
217
218 if (bo) {
219 int ret;
220
221 ret = ttm_bo_reserve(bo, false, true, NULL);
222 BUG_ON(ret != 0);
223
224 vmw_bo_fence_single(bo, NULL);
225 ttm_bo_unreserve(bo);
226 }
227
228 vmw_mob_destroy(otable->page_table);
229 otable->page_table = NULL;
230}
231
232
233static int vmw_otable_batch_setup(struct vmw_private *dev_priv,
234 struct vmw_otable_batch *batch)
235{
236 unsigned long offset;
237 unsigned long bo_size;
238 struct vmw_otable *otables = batch->otables;
239 struct ttm_operation_ctx ctx = {
240 .interruptible = false,
241 .no_wait_gpu = false
242 };
243 SVGAOTableType i;
244 int ret;
245
246 bo_size = 0;
247 for (i = 0; i < batch->num_otables; ++i) {
248 if (!otables[i].enabled)
249 continue;
250
251 otables[i].size =
252 (otables[i].size + PAGE_SIZE - 1) & PAGE_MASK;
253 bo_size += otables[i].size;
254 }
255
256 ret = ttm_bo_create(&dev_priv->bdev, bo_size,
257 ttm_bo_type_device,
258 &vmw_sys_ne_placement,
259 0, false, &batch->otable_bo);
260
261 if (unlikely(ret != 0))
262 goto out_no_bo;
263
264 ret = ttm_bo_reserve(batch->otable_bo, false, true, NULL);
265 BUG_ON(ret != 0);
266 ret = vmw_bo_driver.ttm_tt_populate(batch->otable_bo->ttm, &ctx);
267 if (unlikely(ret != 0))
268 goto out_unreserve;
269 ret = vmw_bo_map_dma(batch->otable_bo);
270 if (unlikely(ret != 0))
271 goto out_unreserve;
272
273 ttm_bo_unreserve(batch->otable_bo);
274
275 offset = 0;
276 for (i = 0; i < batch->num_otables; ++i) {
277 if (!batch->otables[i].enabled)
278 continue;
279
280 ret = vmw_setup_otable_base(dev_priv, i, batch->otable_bo,
281 offset,
282 &otables[i]);
283 if (unlikely(ret != 0))
284 goto out_no_setup;
285 offset += otables[i].size;
286 }
287
288 return 0;
289
290out_unreserve:
291 ttm_bo_unreserve(batch->otable_bo);
292out_no_setup:
293 for (i = 0; i < batch->num_otables; ++i) {
294 if (batch->otables[i].enabled)
295 vmw_takedown_otable_base(dev_priv, i,
296 &batch->otables[i]);
297 }
298
299 ttm_bo_put(batch->otable_bo);
300 batch->otable_bo = NULL;
301out_no_bo:
302 return ret;
303}
304
305
306
307
308
309
310
311
312
313
314
315
316int vmw_otables_setup(struct vmw_private *dev_priv)
317{
318 struct vmw_otable **otables = &dev_priv->otable_batch.otables;
319 int ret;
320
321 if (dev_priv->has_dx) {
322 *otables = kmemdup(dx_tables, sizeof(dx_tables), GFP_KERNEL);
323 if (!(*otables))
324 return -ENOMEM;
325
326 dev_priv->otable_batch.num_otables = ARRAY_SIZE(dx_tables);
327 } else {
328 *otables = kmemdup(pre_dx_tables, sizeof(pre_dx_tables),
329 GFP_KERNEL);
330 if (!(*otables))
331 return -ENOMEM;
332
333 dev_priv->otable_batch.num_otables = ARRAY_SIZE(pre_dx_tables);
334 }
335
336 ret = vmw_otable_batch_setup(dev_priv, &dev_priv->otable_batch);
337 if (unlikely(ret != 0))
338 goto out_setup;
339
340 return 0;
341
342out_setup:
343 kfree(*otables);
344 return ret;
345}
346
347static void vmw_otable_batch_takedown(struct vmw_private *dev_priv,
348 struct vmw_otable_batch *batch)
349{
350 SVGAOTableType i;
351 struct ttm_buffer_object *bo = batch->otable_bo;
352 int ret;
353
354 for (i = 0; i < batch->num_otables; ++i)
355 if (batch->otables[i].enabled)
356 vmw_takedown_otable_base(dev_priv, i,
357 &batch->otables[i]);
358
359 ret = ttm_bo_reserve(bo, false, true, NULL);
360 BUG_ON(ret != 0);
361
362 vmw_bo_fence_single(bo, NULL);
363 ttm_bo_unreserve(bo);
364
365 ttm_bo_put(batch->otable_bo);
366 batch->otable_bo = NULL;
367}
368
369
370
371
372
373
374
375
376void vmw_otables_takedown(struct vmw_private *dev_priv)
377{
378 vmw_otable_batch_takedown(dev_priv, &dev_priv->otable_batch);
379 kfree(dev_priv->otable_batch.otables);
380}
381
382
383
384
385
386
387
388static unsigned long vmw_mob_calculate_pt_pages(unsigned long data_pages)
389{
390 unsigned long data_size = data_pages * PAGE_SIZE;
391 unsigned long tot_size = 0;
392
393 while (likely(data_size > PAGE_SIZE)) {
394 data_size = DIV_ROUND_UP(data_size, PAGE_SIZE);
395 data_size *= VMW_PPN_SIZE;
396 tot_size += (data_size + PAGE_SIZE - 1) & PAGE_MASK;
397 }
398
399 return tot_size >> PAGE_SHIFT;
400}
401
402
403
404
405
406
407struct vmw_mob *vmw_mob_create(unsigned long data_pages)
408{
409 struct vmw_mob *mob = kzalloc(sizeof(*mob), GFP_KERNEL);
410
411 if (unlikely(!mob))
412 return NULL;
413
414 mob->num_pages = vmw_mob_calculate_pt_pages(data_pages);
415
416 return mob;
417}
418
419
420
421
422
423
424
425
426
427
428
429
430static int vmw_mob_pt_populate(struct vmw_private *dev_priv,
431 struct vmw_mob *mob)
432{
433 int ret;
434 struct ttm_operation_ctx ctx = {
435 .interruptible = false,
436 .no_wait_gpu = false
437 };
438
439 BUG_ON(mob->pt_bo != NULL);
440
441 ret = ttm_bo_create(&dev_priv->bdev, mob->num_pages * PAGE_SIZE,
442 ttm_bo_type_device,
443 &vmw_sys_ne_placement,
444 0, false, &mob->pt_bo);
445 if (unlikely(ret != 0))
446 return ret;
447
448 ret = ttm_bo_reserve(mob->pt_bo, false, true, NULL);
449
450 BUG_ON(ret != 0);
451 ret = vmw_bo_driver.ttm_tt_populate(mob->pt_bo->ttm, &ctx);
452 if (unlikely(ret != 0))
453 goto out_unreserve;
454 ret = vmw_bo_map_dma(mob->pt_bo);
455 if (unlikely(ret != 0))
456 goto out_unreserve;
457
458 ttm_bo_unreserve(mob->pt_bo);
459
460 return 0;
461
462out_unreserve:
463 ttm_bo_unreserve(mob->pt_bo);
464 ttm_bo_put(mob->pt_bo);
465 mob->pt_bo = NULL;
466
467 return ret;
468}
469
470
471
472
473
474
475
476
477
478
479#if (VMW_PPN_SIZE == 8)
480static void vmw_mob_assign_ppn(u32 **addr, dma_addr_t val)
481{
482 *((u64 *) *addr) = val >> PAGE_SHIFT;
483 *addr += 2;
484}
485#else
486static void vmw_mob_assign_ppn(u32 **addr, dma_addr_t val)
487{
488 *(*addr)++ = val >> PAGE_SHIFT;
489}
490#endif
491
492
493
494
495
496
497
498
499
500
501
502
503static unsigned long vmw_mob_build_pt(struct vmw_piter *data_iter,
504 unsigned long num_data_pages,
505 struct vmw_piter *pt_iter)
506{
507 unsigned long pt_size = num_data_pages * VMW_PPN_SIZE;
508 unsigned long num_pt_pages = DIV_ROUND_UP(pt_size, PAGE_SIZE);
509 unsigned long pt_page;
510 u32 *addr, *save_addr;
511 unsigned long i;
512 struct page *page;
513
514 for (pt_page = 0; pt_page < num_pt_pages; ++pt_page) {
515 page = vmw_piter_page(pt_iter);
516
517 save_addr = addr = kmap_atomic(page);
518
519 for (i = 0; i < PAGE_SIZE / VMW_PPN_SIZE; ++i) {
520 vmw_mob_assign_ppn(&addr,
521 vmw_piter_dma_addr(data_iter));
522 if (unlikely(--num_data_pages == 0))
523 break;
524 WARN_ON(!vmw_piter_next(data_iter));
525 }
526 kunmap_atomic(save_addr);
527 vmw_piter_next(pt_iter);
528 }
529
530 return num_pt_pages;
531}
532
533
534
535
536
537
538
539
540
541
542
543static void vmw_mob_pt_setup(struct vmw_mob *mob,
544 struct vmw_piter data_iter,
545 unsigned long num_data_pages)
546{
547 unsigned long num_pt_pages = 0;
548 struct ttm_buffer_object *bo = mob->pt_bo;
549 struct vmw_piter save_pt_iter;
550 struct vmw_piter pt_iter;
551 const struct vmw_sg_table *vsgt;
552 int ret;
553
554 ret = ttm_bo_reserve(bo, false, true, NULL);
555 BUG_ON(ret != 0);
556
557 vsgt = vmw_bo_sg_table(bo);
558 vmw_piter_start(&pt_iter, vsgt, 0);
559 BUG_ON(!vmw_piter_next(&pt_iter));
560 mob->pt_level = 0;
561 while (likely(num_data_pages > 1)) {
562 ++mob->pt_level;
563 BUG_ON(mob->pt_level > 2);
564 save_pt_iter = pt_iter;
565 num_pt_pages = vmw_mob_build_pt(&data_iter, num_data_pages,
566 &pt_iter);
567 data_iter = save_pt_iter;
568 num_data_pages = num_pt_pages;
569 }
570
571 mob->pt_root_page = vmw_piter_dma_addr(&save_pt_iter);
572 ttm_bo_unreserve(bo);
573}
574
575
576
577
578
579
580void vmw_mob_destroy(struct vmw_mob *mob)
581{
582 if (mob->pt_bo) {
583 ttm_bo_put(mob->pt_bo);
584 mob->pt_bo = NULL;
585 }
586 kfree(mob);
587}
588
589
590
591
592
593
594
595void vmw_mob_unbind(struct vmw_private *dev_priv,
596 struct vmw_mob *mob)
597{
598 struct {
599 SVGA3dCmdHeader header;
600 SVGA3dCmdDestroyGBMob body;
601 } *cmd;
602 int ret;
603 struct ttm_buffer_object *bo = mob->pt_bo;
604
605 if (bo) {
606 ret = ttm_bo_reserve(bo, false, true, NULL);
607
608
609
610 BUG_ON(ret != 0);
611 }
612
613 cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
614 if (cmd) {
615 cmd->header.id = SVGA_3D_CMD_DESTROY_GB_MOB;
616 cmd->header.size = sizeof(cmd->body);
617 cmd->body.mobid = mob->id;
618 vmw_fifo_commit(dev_priv, sizeof(*cmd));
619 }
620
621 if (bo) {
622 vmw_bo_fence_single(bo, NULL);
623 ttm_bo_unreserve(bo);
624 }
625 vmw_fifo_resource_dec(dev_priv);
626}
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643int vmw_mob_bind(struct vmw_private *dev_priv,
644 struct vmw_mob *mob,
645 const struct vmw_sg_table *vsgt,
646 unsigned long num_data_pages,
647 int32_t mob_id)
648{
649 int ret;
650 bool pt_set_up = false;
651 struct vmw_piter data_iter;
652 struct {
653 SVGA3dCmdHeader header;
654 SVGA3dCmdDefineGBMob64 body;
655 } *cmd;
656
657 mob->id = mob_id;
658 vmw_piter_start(&data_iter, vsgt, 0);
659 if (unlikely(!vmw_piter_next(&data_iter)))
660 return 0;
661
662 if (likely(num_data_pages == 1)) {
663 mob->pt_level = VMW_MOBFMT_PTDEPTH_0;
664 mob->pt_root_page = vmw_piter_dma_addr(&data_iter);
665 } else if (vsgt->num_regions == 1) {
666 mob->pt_level = SVGA3D_MOBFMT_RANGE;
667 mob->pt_root_page = vmw_piter_dma_addr(&data_iter);
668 } else if (unlikely(mob->pt_bo == NULL)) {
669 ret = vmw_mob_pt_populate(dev_priv, mob);
670 if (unlikely(ret != 0))
671 return ret;
672
673 vmw_mob_pt_setup(mob, data_iter, num_data_pages);
674 pt_set_up = true;
675 mob->pt_level += VMW_MOBFMT_PTDEPTH_1 - SVGA3D_MOBFMT_PTDEPTH_1;
676 }
677
678 vmw_fifo_resource_inc(dev_priv);
679
680 cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
681 if (unlikely(cmd == NULL))
682 goto out_no_cmd_space;
683
684 cmd->header.id = SVGA_3D_CMD_DEFINE_GB_MOB64;
685 cmd->header.size = sizeof(cmd->body);
686 cmd->body.mobid = mob_id;
687 cmd->body.ptDepth = mob->pt_level;
688 cmd->body.base = mob->pt_root_page >> PAGE_SHIFT;
689 cmd->body.sizeInBytes = num_data_pages * PAGE_SIZE;
690
691 vmw_fifo_commit(dev_priv, sizeof(*cmd));
692
693 return 0;
694
695out_no_cmd_space:
696 vmw_fifo_resource_dec(dev_priv);
697 if (pt_set_up) {
698 ttm_bo_put(mob->pt_bo);
699 mob->pt_bo = NULL;
700 }
701
702 return -ENOMEM;
703}
704