1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28#ifndef _VMWGFX_DRV_H_
29#define _VMWGFX_DRV_H_
30
31#include "vmwgfx_validation.h"
32#include "vmwgfx_reg.h"
33#include <drm/drmP.h>
34#include <drm/vmwgfx_drm.h>
35#include <drm/drm_hashtab.h>
36#include <drm/drm_auth.h>
37#include <linux/suspend.h>
38#include <drm/ttm/ttm_bo_driver.h>
39#include <drm/ttm/ttm_execbuf_util.h>
40#include <drm/ttm/ttm_module.h>
41#include "vmwgfx_fence.h"
42#include "ttm_object.h"
43#include "ttm_lock.h"
44#include <linux/sync_file.h>
45
46#define VMWGFX_DRIVER_NAME "vmwgfx"
47#define VMWGFX_DRIVER_DATE "20180704"
48#define VMWGFX_DRIVER_MAJOR 2
49#define VMWGFX_DRIVER_MINOR 15
50#define VMWGFX_DRIVER_PATCHLEVEL 0
51#define VMWGFX_FIFO_STATIC_SIZE (1024*1024)
52#define VMWGFX_MAX_RELOCATIONS 2048
53#define VMWGFX_MAX_VALIDATIONS 2048
54#define VMWGFX_MAX_DISPLAYS 16
55#define VMWGFX_CMD_BOUNCE_INIT_SIZE 32768
56#define VMWGFX_ENABLE_SCREEN_TARGET_OTABLE 1
57
58
59
60
61#define VMWGFX_NUM_GB_CONTEXT 256
62#define VMWGFX_NUM_GB_SHADER 20000
63#define VMWGFX_NUM_GB_SURFACE 32768
64#define VMWGFX_NUM_GB_SCREEN_TARGET VMWGFX_MAX_DISPLAYS
65#define VMWGFX_NUM_DXCONTEXT 256
66#define VMWGFX_NUM_DXQUERY 512
67#define VMWGFX_NUM_MOB (VMWGFX_NUM_GB_CONTEXT +\
68 VMWGFX_NUM_GB_SHADER +\
69 VMWGFX_NUM_GB_SURFACE +\
70 VMWGFX_NUM_GB_SCREEN_TARGET)
71
72#define VMW_PL_GMR (TTM_PL_PRIV + 0)
73#define VMW_PL_FLAG_GMR (TTM_PL_FLAG_PRIV << 0)
74#define VMW_PL_MOB (TTM_PL_PRIV + 1)
75#define VMW_PL_FLAG_MOB (TTM_PL_FLAG_PRIV << 1)
76
77#define VMW_RES_CONTEXT ttm_driver_type0
78#define VMW_RES_SURFACE ttm_driver_type1
79#define VMW_RES_STREAM ttm_driver_type2
80#define VMW_RES_FENCE ttm_driver_type3
81#define VMW_RES_SHADER ttm_driver_type4
82
83struct vmw_fpriv {
84 struct drm_master *locked_master;
85 struct ttm_object_file *tfile;
86 bool gb_aware;
87};
88
89struct vmw_buffer_object {
90 struct ttm_buffer_object base;
91 struct list_head res_list;
92 s32 pin_count;
93
94 struct vmw_resource *dx_query_ctx;
95
96 struct ttm_bo_kmap_obj map;
97};
98
99
100
101
102
103
104
105
106
107
108struct vmw_validate_buffer {
109 struct ttm_validate_buffer base;
110 struct drm_hash_item hash;
111 bool validate_as_mob;
112};
113
114struct vmw_res_func;
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144struct vmw_resource {
145 struct kref kref;
146 struct vmw_private *dev_priv;
147 int id;
148 unsigned long backup_size;
149 bool res_dirty;
150 bool backup_dirty;
151 struct vmw_buffer_object *backup;
152 unsigned long backup_offset;
153 unsigned long pin_count;
154 const struct vmw_res_func *func;
155 struct list_head lru_head;
156 struct list_head mob_head;
157 struct list_head binding_head;
158 void (*res_free) (struct vmw_resource *res);
159 void (*hw_destroy) (struct vmw_resource *res);
160};
161
162
163
164
165
166enum vmw_res_type {
167 vmw_res_context,
168 vmw_res_surface,
169 vmw_res_stream,
170 vmw_res_shader,
171 vmw_res_dx_context,
172 vmw_res_cotable,
173 vmw_res_view,
174 vmw_res_max
175};
176
177
178
179
180enum vmw_cmdbuf_res_type {
181 vmw_cmdbuf_res_shader,
182 vmw_cmdbuf_res_view
183};
184
185struct vmw_cmdbuf_res_manager;
186
187struct vmw_cursor_snooper {
188 size_t age;
189 uint32_t *image;
190};
191
192struct vmw_framebuffer;
193struct vmw_surface_offset;
194
195struct vmw_surface {
196 struct vmw_resource res;
197 SVGA3dSurfaceAllFlags flags;
198 uint32_t format;
199 uint32_t mip_levels[DRM_VMW_MAX_SURFACE_FACES];
200 struct drm_vmw_size base_size;
201 struct drm_vmw_size *sizes;
202 uint32_t num_sizes;
203 bool scanout;
204 uint32_t array_size;
205
206 struct vmw_cursor_snooper snooper;
207 struct vmw_surface_offset *offsets;
208 SVGA3dTextureFilter autogen_filter;
209 uint32_t multisample_count;
210 struct list_head view_list;
211 SVGA3dMSPattern multisample_pattern;
212 SVGA3dMSQualityLevel quality_level;
213};
214
215struct vmw_marker_queue {
216 struct list_head head;
217 u64 lag;
218 u64 lag_time;
219 spinlock_t lock;
220};
221
222struct vmw_fifo_state {
223 unsigned long reserved_size;
224 u32 *dynamic_buffer;
225 u32 *static_buffer;
226 unsigned long static_buffer_size;
227 bool using_bounce_buffer;
228 uint32_t capabilities;
229 struct mutex fifo_mutex;
230 struct rw_semaphore rwsem;
231 struct vmw_marker_queue marker_queue;
232 bool dx;
233};
234
235
236
237
238
239
240
241
242
243
244
245
246
247struct vmw_res_cache_entry {
248 uint32_t handle;
249 struct vmw_resource *res;
250 void *private;
251 unsigned short valid_handle;
252 unsigned short valid;
253};
254
255
256
257
258enum vmw_dma_map_mode {
259 vmw_dma_phys,
260 vmw_dma_alloc_coherent,
261 vmw_dma_map_populate,
262 vmw_dma_map_bind,
263 vmw_dma_map_max
264};
265
266
267
268
269
270
271
272
273struct vmw_sg_table {
274 enum vmw_dma_map_mode mode;
275 struct page **pages;
276 const dma_addr_t *addrs;
277 struct sg_table *sgt;
278 unsigned long num_regions;
279 unsigned long num_pages;
280};
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296struct vmw_piter {
297 struct page **pages;
298 const dma_addr_t *addrs;
299 struct sg_dma_page_iter iter;
300 unsigned long i;
301 unsigned long num_pages;
302 bool (*next)(struct vmw_piter *);
303 dma_addr_t (*dma_address)(struct vmw_piter *);
304 struct page *(*page)(struct vmw_piter *);
305};
306
307
308
309
310enum vmw_display_unit_type {
311 vmw_du_invalid = 0,
312 vmw_du_legacy,
313 vmw_du_screen_object,
314 vmw_du_screen_target
315};
316
317struct vmw_validation_context;
318struct vmw_ctx_validation_info;
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351struct vmw_sw_context{
352 struct drm_open_hash res_ht;
353 bool res_ht_initialized;
354 bool kernel;
355 struct vmw_fpriv *fp;
356 uint32_t *cmd_bounce;
357 uint32_t cmd_bounce_size;
358 struct vmw_buffer_object *cur_query_bo;
359 struct list_head bo_relocations;
360 struct list_head res_relocations;
361 uint32_t *buf_start;
362 struct vmw_res_cache_entry res_cache[vmw_res_max];
363 struct vmw_resource *last_query_ctx;
364 bool needs_post_query_barrier;
365 struct vmw_ctx_binding_state *staged_bindings;
366 bool staged_bindings_inuse;
367 struct list_head staged_cmd_res;
368 struct list_head ctx_list;
369 struct vmw_ctx_validation_info *dx_ctx_node;
370 struct vmw_buffer_object *dx_query_mob;
371 struct vmw_resource *dx_query_ctx;
372 struct vmw_cmdbuf_res_manager *man;
373 struct vmw_validation_context *ctx;
374};
375
376struct vmw_legacy_display;
377struct vmw_overlay;
378
379struct vmw_master {
380 struct ttm_lock lock;
381};
382
383struct vmw_vga_topology_state {
384 uint32_t width;
385 uint32_t height;
386 uint32_t primary;
387 uint32_t pos_x;
388 uint32_t pos_y;
389};
390
391
392
393
394
395
396
397
398struct vmw_otable {
399 unsigned long size;
400 struct vmw_mob *page_table;
401 bool enabled;
402};
403
404struct vmw_otable_batch {
405 unsigned num_otables;
406 struct vmw_otable *otables;
407 struct vmw_resource *context;
408 struct ttm_buffer_object *otable_bo;
409};
410
411enum {
412 VMW_IRQTHREAD_FENCE,
413 VMW_IRQTHREAD_CMDBUF,
414 VMW_IRQTHREAD_MAX
415};
416
417struct vmw_private {
418 struct ttm_bo_device bdev;
419
420 struct vmw_fifo_state fifo;
421
422 struct drm_device *dev;
423 unsigned long vmw_chipset;
424 unsigned int io_start;
425 uint32_t vram_start;
426 uint32_t vram_size;
427 uint32_t prim_bb_mem;
428 uint32_t mmio_start;
429 uint32_t mmio_size;
430 uint32_t fb_max_width;
431 uint32_t fb_max_height;
432 uint32_t texture_max_width;
433 uint32_t texture_max_height;
434 uint32_t stdu_max_width;
435 uint32_t stdu_max_height;
436 uint32_t initial_width;
437 uint32_t initial_height;
438 u32 *mmio_virt;
439 uint32_t capabilities;
440 uint32_t capabilities2;
441 uint32_t max_gmr_ids;
442 uint32_t max_gmr_pages;
443 uint32_t max_mob_pages;
444 uint32_t max_mob_size;
445 uint32_t memory_size;
446 bool has_gmr;
447 bool has_mob;
448 spinlock_t hw_lock;
449 spinlock_t cap_lock;
450 bool has_dx;
451 bool assume_16bpp;
452 bool has_sm4_1;
453
454
455
456
457
458 struct vmw_vga_topology_state vga_save[VMWGFX_MAX_DISPLAYS];
459 uint32_t vga_width;
460 uint32_t vga_height;
461 uint32_t vga_bpp;
462 uint32_t vga_bpl;
463 uint32_t vga_pitchlock;
464
465 uint32_t num_displays;
466
467
468
469
470
471 void *fb_info;
472 enum vmw_display_unit_type active_display_unit;
473 struct vmw_legacy_display *ldu_priv;
474 struct vmw_overlay *overlay_priv;
475 struct drm_property *hotplug_mode_update_property;
476 struct drm_property *implicit_placement_property;
477 struct mutex global_kms_state_mutex;
478 spinlock_t cursor_lock;
479 struct drm_atomic_state *suspend_state;
480
481
482
483
484
485 spinlock_t resource_lock;
486 struct idr res_idr[vmw_res_max];
487
488
489
490
491 struct mutex init_mutex;
492
493
494
495
496
497
498 struct ttm_object_device *tdev;
499
500
501
502
503
504 atomic_t marker_seq;
505 wait_queue_head_t fence_queue;
506 wait_queue_head_t fifo_queue;
507 spinlock_t waiter_lock;
508 int fence_queue_waiters;
509 int goal_queue_waiters;
510 int cmdbuf_waiters;
511 int error_waiters;
512 int fifo_queue_waiters;
513 uint32_t last_read_seqno;
514 struct vmw_fence_manager *fman;
515 uint32_t irq_mask;
516
517
518
519
520
521 uint32_t traces_state;
522 uint32_t enable_state;
523 uint32_t config_done_state;
524
525
526
527
528
529
530
531
532 struct vmw_sw_context ctx;
533 struct mutex cmdbuf_mutex;
534 struct mutex binding_mutex;
535
536
537
538
539
540 bool stealth;
541 bool enable_fb;
542 spinlock_t svga_lock;
543
544
545
546
547
548 struct vmw_master *active_master;
549 struct vmw_master fbdev_master;
550 struct notifier_block pm_nb;
551 bool refuse_hibernation;
552 bool suspend_locked;
553
554 struct mutex release_mutex;
555 atomic_t num_fifo_resources;
556
557
558
559
560 struct ttm_lock reservation_sem;
561
562
563
564
565
566
567 struct vmw_buffer_object *dummy_query_bo;
568 struct vmw_buffer_object *pinned_bo;
569 uint32_t query_cid;
570 uint32_t query_cid_valid;
571 bool dummy_query_bo_pinned;
572
573
574
575
576
577
578
579
580 struct list_head res_lru[vmw_res_max];
581 uint32_t used_memory_size;
582
583
584
585
586 enum vmw_dma_map_mode map_mode;
587
588
589
590
591 struct vmw_otable_batch otable_batch;
592
593 struct vmw_cmdbuf_man *cman;
594 DECLARE_BITMAP(irqthread_pending, VMW_IRQTHREAD_MAX);
595
596
597 struct vmw_validation_mem vvm;
598};
599
600static inline struct vmw_surface *vmw_res_to_srf(struct vmw_resource *res)
601{
602 return container_of(res, struct vmw_surface, res);
603}
604
605static inline struct vmw_private *vmw_priv(struct drm_device *dev)
606{
607 return (struct vmw_private *)dev->dev_private;
608}
609
610static inline struct vmw_fpriv *vmw_fpriv(struct drm_file *file_priv)
611{
612 return (struct vmw_fpriv *)file_priv->driver_priv;
613}
614
615static inline struct vmw_master *vmw_master(struct drm_master *master)
616{
617 return (struct vmw_master *) master->driver_priv;
618}
619
620
621
622
623
624
625
626
627static inline void vmw_write(struct vmw_private *dev_priv,
628 unsigned int offset, uint32_t value)
629{
630 spin_lock(&dev_priv->hw_lock);
631 outl(offset, dev_priv->io_start + VMWGFX_INDEX_PORT);
632 outl(value, dev_priv->io_start + VMWGFX_VALUE_PORT);
633 spin_unlock(&dev_priv->hw_lock);
634}
635
636static inline uint32_t vmw_read(struct vmw_private *dev_priv,
637 unsigned int offset)
638{
639 u32 val;
640
641 spin_lock(&dev_priv->hw_lock);
642 outl(offset, dev_priv->io_start + VMWGFX_INDEX_PORT);
643 val = inl(dev_priv->io_start + VMWGFX_VALUE_PORT);
644 spin_unlock(&dev_priv->hw_lock);
645
646 return val;
647}
648
649extern void vmw_svga_enable(struct vmw_private *dev_priv);
650extern void vmw_svga_disable(struct vmw_private *dev_priv);
651
652
653
654
655
656
657extern int vmw_gmr_bind(struct vmw_private *dev_priv,
658 const struct vmw_sg_table *vsgt,
659 unsigned long num_pages,
660 int gmr_id);
661extern void vmw_gmr_unbind(struct vmw_private *dev_priv, int gmr_id);
662
663
664
665
666struct vmw_user_resource_conv;
667
668extern void vmw_resource_unreference(struct vmw_resource **p_res);
669extern struct vmw_resource *vmw_resource_reference(struct vmw_resource *res);
670extern struct vmw_resource *
671vmw_resource_reference_unless_doomed(struct vmw_resource *res);
672extern int vmw_resource_validate(struct vmw_resource *res, bool intr);
673extern int vmw_resource_reserve(struct vmw_resource *res, bool interruptible,
674 bool no_backup);
675extern bool vmw_resource_needs_backup(const struct vmw_resource *res);
676extern int vmw_user_lookup_handle(struct vmw_private *dev_priv,
677 struct ttm_object_file *tfile,
678 uint32_t handle,
679 struct vmw_surface **out_surf,
680 struct vmw_buffer_object **out_buf);
681extern int vmw_user_resource_lookup_handle(
682 struct vmw_private *dev_priv,
683 struct ttm_object_file *tfile,
684 uint32_t handle,
685 const struct vmw_user_resource_conv *converter,
686 struct vmw_resource **p_res);
687extern struct vmw_resource *
688vmw_user_resource_noref_lookup_handle(struct vmw_private *dev_priv,
689 struct ttm_object_file *tfile,
690 uint32_t handle,
691 const struct vmw_user_resource_conv *
692 converter);
693extern int vmw_stream_claim_ioctl(struct drm_device *dev, void *data,
694 struct drm_file *file_priv);
695extern int vmw_stream_unref_ioctl(struct drm_device *dev, void *data,
696 struct drm_file *file_priv);
697extern int vmw_user_stream_lookup(struct vmw_private *dev_priv,
698 struct ttm_object_file *tfile,
699 uint32_t *inout_id,
700 struct vmw_resource **out);
701extern void vmw_resource_unreserve(struct vmw_resource *res,
702 bool dirty_set,
703 bool dirty,
704 bool switch_backup,
705 struct vmw_buffer_object *new_backup,
706 unsigned long new_backup_offset);
707extern void vmw_query_move_notify(struct ttm_buffer_object *bo,
708 struct ttm_mem_reg *mem);
709extern int vmw_query_readback_all(struct vmw_buffer_object *dx_query_mob);
710extern void vmw_resource_evict_all(struct vmw_private *dev_priv);
711extern void vmw_resource_unbind_list(struct vmw_buffer_object *vbo);
712
713
714
715
716
717static inline void vmw_user_resource_noref_release(void)
718{
719 ttm_base_object_noref_release();
720}
721
722
723
724
725extern int vmw_bo_pin_in_placement(struct vmw_private *vmw_priv,
726 struct vmw_buffer_object *bo,
727 struct ttm_placement *placement,
728 bool interruptible);
729extern int vmw_bo_pin_in_vram(struct vmw_private *dev_priv,
730 struct vmw_buffer_object *buf,
731 bool interruptible);
732extern int vmw_bo_pin_in_vram_or_gmr(struct vmw_private *dev_priv,
733 struct vmw_buffer_object *buf,
734 bool interruptible);
735extern int vmw_bo_pin_in_start_of_vram(struct vmw_private *vmw_priv,
736 struct vmw_buffer_object *bo,
737 bool interruptible);
738extern int vmw_bo_unpin(struct vmw_private *vmw_priv,
739 struct vmw_buffer_object *bo,
740 bool interruptible);
741extern void vmw_bo_get_guest_ptr(const struct ttm_buffer_object *buf,
742 SVGAGuestPtr *ptr);
743extern void vmw_bo_pin_reserved(struct vmw_buffer_object *bo, bool pin);
744extern void vmw_bo_bo_free(struct ttm_buffer_object *bo);
745extern int vmw_bo_init(struct vmw_private *dev_priv,
746 struct vmw_buffer_object *vmw_bo,
747 size_t size, struct ttm_placement *placement,
748 bool interuptable,
749 void (*bo_free)(struct ttm_buffer_object *bo));
750extern int vmw_user_bo_verify_access(struct ttm_buffer_object *bo,
751 struct ttm_object_file *tfile);
752extern int vmw_user_bo_alloc(struct vmw_private *dev_priv,
753 struct ttm_object_file *tfile,
754 uint32_t size,
755 bool shareable,
756 uint32_t *handle,
757 struct vmw_buffer_object **p_dma_buf,
758 struct ttm_base_object **p_base);
759extern int vmw_user_bo_reference(struct ttm_object_file *tfile,
760 struct vmw_buffer_object *dma_buf,
761 uint32_t *handle);
762extern int vmw_bo_alloc_ioctl(struct drm_device *dev, void *data,
763 struct drm_file *file_priv);
764extern int vmw_bo_unref_ioctl(struct drm_device *dev, void *data,
765 struct drm_file *file_priv);
766extern int vmw_user_bo_synccpu_ioctl(struct drm_device *dev, void *data,
767 struct drm_file *file_priv);
768extern int vmw_user_bo_lookup(struct ttm_object_file *tfile,
769 uint32_t id, struct vmw_buffer_object **out,
770 struct ttm_base_object **base);
771extern void vmw_bo_fence_single(struct ttm_buffer_object *bo,
772 struct vmw_fence_obj *fence);
773extern void *vmw_bo_map_and_cache(struct vmw_buffer_object *vbo);
774extern void vmw_bo_unmap(struct vmw_buffer_object *vbo);
775extern void vmw_bo_move_notify(struct ttm_buffer_object *bo,
776 struct ttm_mem_reg *mem);
777extern void vmw_bo_swap_notify(struct ttm_buffer_object *bo);
778extern struct vmw_buffer_object *
779vmw_user_bo_noref_lookup(struct ttm_object_file *tfile, u32 handle);
780
781
782
783
784
785static inline void vmw_user_bo_noref_release(void)
786{
787 ttm_base_object_noref_release();
788}
789
790
791
792
793
794
795extern int vmw_getparam_ioctl(struct drm_device *dev, void *data,
796 struct drm_file *file_priv);
797extern int vmw_get_cap_3d_ioctl(struct drm_device *dev, void *data,
798 struct drm_file *file_priv);
799extern int vmw_present_ioctl(struct drm_device *dev, void *data,
800 struct drm_file *file_priv);
801extern int vmw_present_readback_ioctl(struct drm_device *dev, void *data,
802 struct drm_file *file_priv);
803extern __poll_t vmw_fops_poll(struct file *filp,
804 struct poll_table_struct *wait);
805extern ssize_t vmw_fops_read(struct file *filp, char __user *buffer,
806 size_t count, loff_t *offset);
807
808
809
810
811
812extern int vmw_fifo_init(struct vmw_private *dev_priv,
813 struct vmw_fifo_state *fifo);
814extern void vmw_fifo_release(struct vmw_private *dev_priv,
815 struct vmw_fifo_state *fifo);
816extern void *
817vmw_fifo_reserve_dx(struct vmw_private *dev_priv, uint32_t bytes, int ctx_id);
818extern void vmw_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes);
819extern void vmw_fifo_commit_flush(struct vmw_private *dev_priv, uint32_t bytes);
820extern int vmw_fifo_send_fence(struct vmw_private *dev_priv,
821 uint32_t *seqno);
822extern void vmw_fifo_ping_host_locked(struct vmw_private *, uint32_t reason);
823extern void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason);
824extern bool vmw_fifo_have_3d(struct vmw_private *dev_priv);
825extern bool vmw_fifo_have_pitchlock(struct vmw_private *dev_priv);
826extern int vmw_fifo_emit_dummy_query(struct vmw_private *dev_priv,
827 uint32_t cid);
828extern int vmw_fifo_flush(struct vmw_private *dev_priv,
829 bool interruptible);
830
831#define VMW_FIFO_RESERVE_DX(__priv, __bytes, __ctx_id) \
832({ \
833 vmw_fifo_reserve_dx(__priv, __bytes, __ctx_id) ? : ({ \
834 DRM_ERROR("FIFO reserve failed at %s for %u bytes\n", \
835 __func__, (unsigned int) __bytes); \
836 NULL; \
837 }); \
838})
839
840#define VMW_FIFO_RESERVE(__priv, __bytes) \
841 VMW_FIFO_RESERVE_DX(__priv, __bytes, SVGA3D_INVALID_ID)
842
843
844
845
846
847extern int vmw_mmap(struct file *filp, struct vm_area_struct *vma);
848
849extern void vmw_validation_mem_init_ttm(struct vmw_private *dev_priv,
850 size_t gran);
851
852
853
854
855extern const size_t vmw_tt_size;
856extern struct ttm_placement vmw_vram_placement;
857extern struct ttm_placement vmw_vram_ne_placement;
858extern struct ttm_placement vmw_vram_sys_placement;
859extern struct ttm_placement vmw_vram_gmr_placement;
860extern struct ttm_placement vmw_vram_gmr_ne_placement;
861extern struct ttm_placement vmw_sys_placement;
862extern struct ttm_placement vmw_sys_ne_placement;
863extern struct ttm_placement vmw_evictable_placement;
864extern struct ttm_placement vmw_srf_placement;
865extern struct ttm_placement vmw_mob_placement;
866extern struct ttm_placement vmw_mob_ne_placement;
867extern struct ttm_placement vmw_nonfixed_placement;
868extern struct ttm_bo_driver vmw_bo_driver;
869extern int vmw_dma_quiescent(struct drm_device *dev);
870extern int vmw_bo_map_dma(struct ttm_buffer_object *bo);
871extern void vmw_bo_unmap_dma(struct ttm_buffer_object *bo);
872extern const struct vmw_sg_table *
873vmw_bo_sg_table(struct ttm_buffer_object *bo);
874extern void vmw_piter_start(struct vmw_piter *viter,
875 const struct vmw_sg_table *vsgt,
876 unsigned long p_offs);
877
878
879
880
881
882
883
884
885static inline bool vmw_piter_next(struct vmw_piter *viter)
886{
887 return viter->next(viter);
888}
889
890
891
892
893
894
895
896
897static inline dma_addr_t vmw_piter_dma_addr(struct vmw_piter *viter)
898{
899 return viter->dma_address(viter);
900}
901
902
903
904
905
906
907
908
909static inline struct page *vmw_piter_page(struct vmw_piter *viter)
910{
911 return viter->page(viter);
912}
913
914
915
916
917
918extern int vmw_execbuf_ioctl(struct drm_device *dev, unsigned long data,
919 struct drm_file *file_priv, size_t size);
920extern int vmw_execbuf_process(struct drm_file *file_priv,
921 struct vmw_private *dev_priv,
922 void __user *user_commands,
923 void *kernel_commands,
924 uint32_t command_size,
925 uint64_t throttle_us,
926 uint32_t dx_context_handle,
927 struct drm_vmw_fence_rep __user
928 *user_fence_rep,
929 struct vmw_fence_obj **out_fence,
930 uint32_t flags);
931extern void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
932 struct vmw_fence_obj *fence);
933extern void vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv);
934
935extern int vmw_execbuf_fence_commands(struct drm_file *file_priv,
936 struct vmw_private *dev_priv,
937 struct vmw_fence_obj **p_fence,
938 uint32_t *p_handle);
939extern void vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,
940 struct vmw_fpriv *vmw_fp,
941 int ret,
942 struct drm_vmw_fence_rep __user
943 *user_fence_rep,
944 struct vmw_fence_obj *fence,
945 uint32_t fence_handle,
946 int32_t out_fence_fd,
947 struct sync_file *sync_file);
948bool vmw_cmd_describe(const void *buf, u32 *size, char const **cmd);
949
950
951
952
953
954extern int vmw_wait_seqno(struct vmw_private *dev_priv, bool lazy,
955 uint32_t seqno, bool interruptible,
956 unsigned long timeout);
957extern int vmw_irq_install(struct drm_device *dev, int irq);
958extern void vmw_irq_uninstall(struct drm_device *dev);
959extern bool vmw_seqno_passed(struct vmw_private *dev_priv,
960 uint32_t seqno);
961extern int vmw_fallback_wait(struct vmw_private *dev_priv,
962 bool lazy,
963 bool fifo_idle,
964 uint32_t seqno,
965 bool interruptible,
966 unsigned long timeout);
967extern void vmw_update_seqno(struct vmw_private *dev_priv,
968 struct vmw_fifo_state *fifo_state);
969extern void vmw_seqno_waiter_add(struct vmw_private *dev_priv);
970extern void vmw_seqno_waiter_remove(struct vmw_private *dev_priv);
971extern void vmw_goal_waiter_add(struct vmw_private *dev_priv);
972extern void vmw_goal_waiter_remove(struct vmw_private *dev_priv);
973extern void vmw_generic_waiter_add(struct vmw_private *dev_priv, u32 flag,
974 int *waiter_count);
975extern void vmw_generic_waiter_remove(struct vmw_private *dev_priv,
976 u32 flag, int *waiter_count);
977
978
979
980
981
982
983extern void vmw_marker_queue_init(struct vmw_marker_queue *queue);
984extern void vmw_marker_queue_takedown(struct vmw_marker_queue *queue);
985extern int vmw_marker_push(struct vmw_marker_queue *queue,
986 uint32_t seqno);
987extern int vmw_marker_pull(struct vmw_marker_queue *queue,
988 uint32_t signaled_seqno);
989extern int vmw_wait_lag(struct vmw_private *dev_priv,
990 struct vmw_marker_queue *queue, uint32_t us);
991
992
993
994
995
996int vmw_fb_init(struct vmw_private *vmw_priv);
997int vmw_fb_close(struct vmw_private *dev_priv);
998int vmw_fb_off(struct vmw_private *vmw_priv);
999int vmw_fb_on(struct vmw_private *vmw_priv);
1000
1001
1002
1003
1004
1005int vmw_kms_init(struct vmw_private *dev_priv);
1006int vmw_kms_close(struct vmw_private *dev_priv);
1007int vmw_kms_save_vga(struct vmw_private *vmw_priv);
1008int vmw_kms_restore_vga(struct vmw_private *vmw_priv);
1009int vmw_kms_cursor_bypass_ioctl(struct drm_device *dev, void *data,
1010 struct drm_file *file_priv);
1011void vmw_kms_cursor_post_execbuf(struct vmw_private *dev_priv);
1012void vmw_kms_cursor_snoop(struct vmw_surface *srf,
1013 struct ttm_object_file *tfile,
1014 struct ttm_buffer_object *bo,
1015 SVGA3dCmdHeader *header);
1016int vmw_kms_write_svga(struct vmw_private *vmw_priv,
1017 unsigned width, unsigned height, unsigned pitch,
1018 unsigned bpp, unsigned depth);
1019void vmw_kms_idle_workqueues(struct vmw_master *vmaster);
1020bool vmw_kms_validate_mode_vram(struct vmw_private *dev_priv,
1021 uint32_t pitch,
1022 uint32_t height);
1023u32 vmw_get_vblank_counter(struct drm_device *dev, unsigned int pipe);
1024int vmw_enable_vblank(struct drm_device *dev, unsigned int pipe);
1025void vmw_disable_vblank(struct drm_device *dev, unsigned int pipe);
1026int vmw_kms_present(struct vmw_private *dev_priv,
1027 struct drm_file *file_priv,
1028 struct vmw_framebuffer *vfb,
1029 struct vmw_surface *surface,
1030 uint32_t sid, int32_t destX, int32_t destY,
1031 struct drm_vmw_rect *clips,
1032 uint32_t num_clips);
1033int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
1034 struct drm_file *file_priv);
1035void vmw_kms_legacy_hotspot_clear(struct vmw_private *dev_priv);
1036int vmw_kms_suspend(struct drm_device *dev);
1037int vmw_kms_resume(struct drm_device *dev);
1038void vmw_kms_lost_device(struct drm_device *dev);
1039
1040int vmw_dumb_create(struct drm_file *file_priv,
1041 struct drm_device *dev,
1042 struct drm_mode_create_dumb *args);
1043
1044int vmw_dumb_map_offset(struct drm_file *file_priv,
1045 struct drm_device *dev, uint32_t handle,
1046 uint64_t *offset);
1047int vmw_dumb_destroy(struct drm_file *file_priv,
1048 struct drm_device *dev,
1049 uint32_t handle);
1050extern int vmw_resource_pin(struct vmw_resource *res, bool interruptible);
1051extern void vmw_resource_unpin(struct vmw_resource *res);
1052extern enum vmw_res_type vmw_res_type(const struct vmw_resource *res);
1053
1054
1055
1056
1057
1058int vmw_overlay_init(struct vmw_private *dev_priv);
1059int vmw_overlay_close(struct vmw_private *dev_priv);
1060int vmw_overlay_ioctl(struct drm_device *dev, void *data,
1061 struct drm_file *file_priv);
1062int vmw_overlay_stop_all(struct vmw_private *dev_priv);
1063int vmw_overlay_resume_all(struct vmw_private *dev_priv);
1064int vmw_overlay_pause_all(struct vmw_private *dev_priv);
1065int vmw_overlay_claim(struct vmw_private *dev_priv, uint32_t *out);
1066int vmw_overlay_unref(struct vmw_private *dev_priv, uint32_t stream_id);
1067int vmw_overlay_num_overlays(struct vmw_private *dev_priv);
1068int vmw_overlay_num_free_overlays(struct vmw_private *dev_priv);
1069
1070
1071
1072
1073
1074extern const struct ttm_mem_type_manager_func vmw_gmrid_manager_func;
1075
1076
1077
1078
1079
1080extern const struct dma_buf_ops vmw_prime_dmabuf_ops;
1081extern int vmw_prime_fd_to_handle(struct drm_device *dev,
1082 struct drm_file *file_priv,
1083 int fd, u32 *handle);
1084extern int vmw_prime_handle_to_fd(struct drm_device *dev,
1085 struct drm_file *file_priv,
1086 uint32_t handle, uint32_t flags,
1087 int *prime_fd);
1088
1089
1090
1091
1092struct vmw_mob;
1093extern int vmw_mob_bind(struct vmw_private *dev_priv, struct vmw_mob *mob,
1094 const struct vmw_sg_table *vsgt,
1095 unsigned long num_data_pages, int32_t mob_id);
1096extern void vmw_mob_unbind(struct vmw_private *dev_priv,
1097 struct vmw_mob *mob);
1098extern void vmw_mob_destroy(struct vmw_mob *mob);
1099extern struct vmw_mob *vmw_mob_create(unsigned long data_pages);
1100extern int vmw_otables_setup(struct vmw_private *dev_priv);
1101extern void vmw_otables_takedown(struct vmw_private *dev_priv);
1102
1103
1104
1105
1106
1107extern const struct vmw_user_resource_conv *user_context_converter;
1108
1109extern int vmw_context_check(struct vmw_private *dev_priv,
1110 struct ttm_object_file *tfile,
1111 int id,
1112 struct vmw_resource **p_res);
1113extern int vmw_context_define_ioctl(struct drm_device *dev, void *data,
1114 struct drm_file *file_priv);
1115extern int vmw_extended_context_define_ioctl(struct drm_device *dev, void *data,
1116 struct drm_file *file_priv);
1117extern int vmw_context_destroy_ioctl(struct drm_device *dev, void *data,
1118 struct drm_file *file_priv);
1119extern struct list_head *vmw_context_binding_list(struct vmw_resource *ctx);
1120extern struct vmw_cmdbuf_res_manager *
1121vmw_context_res_man(struct vmw_resource *ctx);
1122extern struct vmw_resource *vmw_context_cotable(struct vmw_resource *ctx,
1123 SVGACOTableType cotable_type);
1124extern struct list_head *vmw_context_binding_list(struct vmw_resource *ctx);
1125struct vmw_ctx_binding_state;
1126extern struct vmw_ctx_binding_state *
1127vmw_context_binding_state(struct vmw_resource *ctx);
1128extern void vmw_dx_context_scrub_cotables(struct vmw_resource *ctx,
1129 bool readback);
1130extern int vmw_context_bind_dx_query(struct vmw_resource *ctx_res,
1131 struct vmw_buffer_object *mob);
1132extern struct vmw_buffer_object *
1133vmw_context_get_dx_query_mob(struct vmw_resource *ctx_res);
1134
1135
1136
1137
1138
1139
1140extern const struct vmw_user_resource_conv *user_surface_converter;
1141
1142extern void vmw_surface_res_free(struct vmw_resource *res);
1143extern int vmw_surface_destroy_ioctl(struct drm_device *dev, void *data,
1144 struct drm_file *file_priv);
1145extern int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
1146 struct drm_file *file_priv);
1147extern int vmw_surface_reference_ioctl(struct drm_device *dev, void *data,
1148 struct drm_file *file_priv);
1149extern int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data,
1150 struct drm_file *file_priv);
1151extern int vmw_gb_surface_reference_ioctl(struct drm_device *dev, void *data,
1152 struct drm_file *file_priv);
1153extern int vmw_surface_check(struct vmw_private *dev_priv,
1154 struct ttm_object_file *tfile,
1155 uint32_t handle, int *id);
1156extern int vmw_surface_validate(struct vmw_private *dev_priv,
1157 struct vmw_surface *srf);
1158int vmw_surface_gb_priv_define(struct drm_device *dev,
1159 uint32_t user_accounting_size,
1160 SVGA3dSurfaceAllFlags svga3d_flags,
1161 SVGA3dSurfaceFormat format,
1162 bool for_scanout,
1163 uint32_t num_mip_levels,
1164 uint32_t multisample_count,
1165 uint32_t array_size,
1166 struct drm_vmw_size size,
1167 SVGA3dMSPattern multisample_pattern,
1168 SVGA3dMSQualityLevel quality_level,
1169 struct vmw_surface **srf_out);
1170extern int vmw_gb_surface_define_ext_ioctl(struct drm_device *dev,
1171 void *data,
1172 struct drm_file *file_priv);
1173extern int vmw_gb_surface_reference_ext_ioctl(struct drm_device *dev,
1174 void *data,
1175 struct drm_file *file_priv);
1176
1177
1178
1179
1180
1181extern const struct vmw_user_resource_conv *user_shader_converter;
1182
1183extern int vmw_shader_define_ioctl(struct drm_device *dev, void *data,
1184 struct drm_file *file_priv);
1185extern int vmw_shader_destroy_ioctl(struct drm_device *dev, void *data,
1186 struct drm_file *file_priv);
1187extern int vmw_compat_shader_add(struct vmw_private *dev_priv,
1188 struct vmw_cmdbuf_res_manager *man,
1189 u32 user_key, const void *bytecode,
1190 SVGA3dShaderType shader_type,
1191 size_t size,
1192 struct list_head *list);
1193extern int vmw_shader_remove(struct vmw_cmdbuf_res_manager *man,
1194 u32 user_key, SVGA3dShaderType shader_type,
1195 struct list_head *list);
1196extern int vmw_dx_shader_add(struct vmw_cmdbuf_res_manager *man,
1197 struct vmw_resource *ctx,
1198 u32 user_key,
1199 SVGA3dShaderType shader_type,
1200 struct list_head *list);
1201extern void vmw_dx_shader_cotable_list_scrub(struct vmw_private *dev_priv,
1202 struct list_head *list,
1203 bool readback);
1204
1205extern struct vmw_resource *
1206vmw_shader_lookup(struct vmw_cmdbuf_res_manager *man,
1207 u32 user_key, SVGA3dShaderType shader_type);
1208
1209
1210
1211
1212
1213extern struct vmw_cmdbuf_res_manager *
1214vmw_cmdbuf_res_man_create(struct vmw_private *dev_priv);
1215extern void vmw_cmdbuf_res_man_destroy(struct vmw_cmdbuf_res_manager *man);
1216extern size_t vmw_cmdbuf_res_man_size(void);
1217extern struct vmw_resource *
1218vmw_cmdbuf_res_lookup(struct vmw_cmdbuf_res_manager *man,
1219 enum vmw_cmdbuf_res_type res_type,
1220 u32 user_key);
1221extern void vmw_cmdbuf_res_revert(struct list_head *list);
1222extern void vmw_cmdbuf_res_commit(struct list_head *list);
1223extern int vmw_cmdbuf_res_add(struct vmw_cmdbuf_res_manager *man,
1224 enum vmw_cmdbuf_res_type res_type,
1225 u32 user_key,
1226 struct vmw_resource *res,
1227 struct list_head *list);
1228extern int vmw_cmdbuf_res_remove(struct vmw_cmdbuf_res_manager *man,
1229 enum vmw_cmdbuf_res_type res_type,
1230 u32 user_key,
1231 struct list_head *list,
1232 struct vmw_resource **res);
1233
1234
1235
1236
1237extern const SVGACOTableType vmw_cotable_scrub_order[];
1238extern struct vmw_resource *vmw_cotable_alloc(struct vmw_private *dev_priv,
1239 struct vmw_resource *ctx,
1240 u32 type);
1241extern int vmw_cotable_notify(struct vmw_resource *res, int id);
1242extern int vmw_cotable_scrub(struct vmw_resource *res, bool readback);
1243extern void vmw_cotable_add_resource(struct vmw_resource *ctx,
1244 struct list_head *head);
1245
1246
1247
1248
1249struct vmw_cmdbuf_man;
1250struct vmw_cmdbuf_header;
1251
1252extern struct vmw_cmdbuf_man *
1253vmw_cmdbuf_man_create(struct vmw_private *dev_priv);
1254extern int vmw_cmdbuf_set_pool_size(struct vmw_cmdbuf_man *man,
1255 size_t size, size_t default_size);
1256extern void vmw_cmdbuf_remove_pool(struct vmw_cmdbuf_man *man);
1257extern void vmw_cmdbuf_man_destroy(struct vmw_cmdbuf_man *man);
1258extern int vmw_cmdbuf_idle(struct vmw_cmdbuf_man *man, bool interruptible,
1259 unsigned long timeout);
1260extern void *vmw_cmdbuf_reserve(struct vmw_cmdbuf_man *man, size_t size,
1261 int ctx_id, bool interruptible,
1262 struct vmw_cmdbuf_header *header);
1263extern void vmw_cmdbuf_commit(struct vmw_cmdbuf_man *man, size_t size,
1264 struct vmw_cmdbuf_header *header,
1265 bool flush);
1266extern void *vmw_cmdbuf_alloc(struct vmw_cmdbuf_man *man,
1267 size_t size, bool interruptible,
1268 struct vmw_cmdbuf_header **p_header);
1269extern void vmw_cmdbuf_header_free(struct vmw_cmdbuf_header *header);
1270extern int vmw_cmdbuf_cur_flush(struct vmw_cmdbuf_man *man,
1271 bool interruptible);
1272extern void vmw_cmdbuf_irqthread(struct vmw_cmdbuf_man *man);
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285struct vmw_diff_cpy {
1286 struct drm_rect rect;
1287 size_t line;
1288 size_t line_offset;
1289 int cpp;
1290 void (*do_cpy)(struct vmw_diff_cpy *diff, u8 *dest, const u8 *src,
1291 size_t n);
1292};
1293
1294#define VMW_CPU_BLIT_INITIALIZER { \
1295 .do_cpy = vmw_memcpy, \
1296}
1297
1298#define VMW_CPU_BLIT_DIFF_INITIALIZER(_cpp) { \
1299 .line = 0, \
1300 .line_offset = 0, \
1301 .rect = { .x1 = INT_MAX/2, \
1302 .y1 = INT_MAX/2, \
1303 .x2 = INT_MIN/2, \
1304 .y2 = INT_MIN/2 \
1305 }, \
1306 .cpp = _cpp, \
1307 .do_cpy = vmw_diff_memcpy, \
1308}
1309
1310void vmw_diff_memcpy(struct vmw_diff_cpy *diff, u8 *dest, const u8 *src,
1311 size_t n);
1312
1313void vmw_memcpy(struct vmw_diff_cpy *diff, u8 *dest, const u8 *src, size_t n);
1314
1315int vmw_bo_cpu_blit(struct ttm_buffer_object *dst,
1316 u32 dst_offset, u32 dst_stride,
1317 struct ttm_buffer_object *src,
1318 u32 src_offset, u32 src_stride,
1319 u32 w, u32 h,
1320 struct vmw_diff_cpy *diff);
1321
1322
1323int vmw_host_get_guestinfo(const char *guest_info_param,
1324 char *buffer, size_t *length);
1325int vmw_host_log(const char *log);
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338#define VMW_DEBUG_USER(fmt, ...) \
1339 DRM_DEBUG_DRIVER(fmt, ##__VA_ARGS__)
1340
1341
1342
1343
1344
1345static inline void vmw_surface_unreference(struct vmw_surface **srf)
1346{
1347 struct vmw_surface *tmp_srf = *srf;
1348 struct vmw_resource *res = &tmp_srf->res;
1349 *srf = NULL;
1350
1351 vmw_resource_unreference(&res);
1352}
1353
1354static inline struct vmw_surface *vmw_surface_reference(struct vmw_surface *srf)
1355{
1356 (void) vmw_resource_reference(&srf->res);
1357 return srf;
1358}
1359
1360static inline void vmw_bo_unreference(struct vmw_buffer_object **buf)
1361{
1362 struct vmw_buffer_object *tmp_buf = *buf;
1363
1364 *buf = NULL;
1365 if (tmp_buf != NULL) {
1366 ttm_bo_put(&tmp_buf->base);
1367 }
1368}
1369
1370static inline struct vmw_buffer_object *
1371vmw_bo_reference(struct vmw_buffer_object *buf)
1372{
1373 ttm_bo_get(&buf->base);
1374 return buf;
1375}
1376
1377static inline struct ttm_mem_global *vmw_mem_glob(struct vmw_private *dev_priv)
1378{
1379 return &ttm_mem_glob;
1380}
1381
1382static inline void vmw_fifo_resource_inc(struct vmw_private *dev_priv)
1383{
1384 atomic_inc(&dev_priv->num_fifo_resources);
1385}
1386
1387static inline void vmw_fifo_resource_dec(struct vmw_private *dev_priv)
1388{
1389 atomic_dec(&dev_priv->num_fifo_resources);
1390}
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400static inline u32 vmw_mmio_read(u32 *addr)
1401{
1402 return READ_ONCE(*addr);
1403}
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413static inline void vmw_mmio_write(u32 value, u32 *addr)
1414{
1415 WRITE_ONCE(*addr, value);
1416}
1417#endif
1418