1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28#ifndef _VMWGFX_DRV_H_
29#define _VMWGFX_DRV_H_
30
31#include "vmwgfx_reg.h"
32#include "drmP.h"
33#include "vmwgfx_drm.h"
34#include "drm_hashtab.h"
35#include "linux/suspend.h"
36#include "ttm/ttm_bo_driver.h"
37#include "ttm/ttm_object.h"
38#include "ttm/ttm_lock.h"
39#include "ttm/ttm_execbuf_util.h"
40#include "ttm/ttm_module.h"
41
42#define VMWGFX_DRIVER_DATE "20100927"
43#define VMWGFX_DRIVER_MAJOR 1
44#define VMWGFX_DRIVER_MINOR 4
45#define VMWGFX_DRIVER_PATCHLEVEL 0
46#define VMWGFX_FILE_PAGE_OFFSET 0x00100000
47#define VMWGFX_FIFO_STATIC_SIZE (1024*1024)
48#define VMWGFX_MAX_RELOCATIONS 2048
49#define VMWGFX_MAX_GMRS 2048
50#define VMWGFX_MAX_DISPLAYS 16
51
52#define VMW_PL_GMR TTM_PL_PRIV0
53#define VMW_PL_FLAG_GMR TTM_PL_FLAG_PRIV0
54
55struct vmw_fpriv {
56 struct drm_master *locked_master;
57 struct ttm_object_file *tfile;
58};
59
60struct vmw_dma_buffer {
61 struct ttm_buffer_object base;
62 struct list_head validate_list;
63 bool gmr_bound;
64 uint32_t cur_validate_node;
65 bool on_validate_list;
66};
67
68struct vmw_resource {
69 struct kref kref;
70 struct vmw_private *dev_priv;
71 struct idr *idr;
72 int id;
73 enum ttm_object_type res_type;
74 bool avail;
75 void (*hw_destroy) (struct vmw_resource *res);
76 void (*res_free) (struct vmw_resource *res);
77
78
79#if 0
80 void (*snoop)(struct vmw_resource *res,
81 struct ttm_object_file *tfile,
82 SVGA3dCmdHeader *header);
83 void *snoop_priv;
84#endif
85};
86
87struct vmw_cursor_snooper {
88 struct drm_crtc *crtc;
89 size_t age;
90 uint32_t *image;
91};
92
93struct vmw_surface {
94 struct vmw_resource res;
95 uint32_t flags;
96 uint32_t format;
97 uint32_t mip_levels[DRM_VMW_MAX_SURFACE_FACES];
98 struct drm_vmw_size *sizes;
99 uint32_t num_sizes;
100
101 bool scanout;
102
103
104 struct vmw_cursor_snooper snooper;
105};
106
107struct vmw_fence_queue {
108 struct list_head head;
109 struct timespec lag;
110 struct timespec lag_time;
111 spinlock_t lock;
112};
113
114struct vmw_fifo_state {
115 unsigned long reserved_size;
116 __le32 *dynamic_buffer;
117 __le32 *static_buffer;
118 __le32 *last_buffer;
119 uint32_t last_data_size;
120 uint32_t last_buffer_size;
121 bool last_buffer_add;
122 unsigned long static_buffer_size;
123 bool using_bounce_buffer;
124 uint32_t capabilities;
125 struct mutex fifo_mutex;
126 struct rw_semaphore rwsem;
127 struct vmw_fence_queue fence_queue;
128};
129
130struct vmw_relocation {
131 SVGAGuestPtr *location;
132 uint32_t index;
133};
134
135struct vmw_sw_context{
136 struct ida bo_list;
137 uint32_t last_cid;
138 bool cid_valid;
139 uint32_t last_sid;
140 uint32_t sid_translation;
141 bool sid_valid;
142 struct ttm_object_file *tfile;
143 struct list_head validate_nodes;
144 struct vmw_relocation relocs[VMWGFX_MAX_RELOCATIONS];
145 uint32_t cur_reloc;
146 struct ttm_validate_buffer val_bufs[VMWGFX_MAX_GMRS];
147 uint32_t cur_val_buf;
148};
149
150struct vmw_legacy_display;
151struct vmw_overlay;
152
153struct vmw_master {
154 struct ttm_lock lock;
155 struct mutex fb_surf_mutex;
156 struct list_head fb_surf;
157};
158
159struct vmw_vga_topology_state {
160 uint32_t width;
161 uint32_t height;
162 uint32_t primary;
163 uint32_t pos_x;
164 uint32_t pos_y;
165};
166
167struct vmw_private {
168 struct ttm_bo_device bdev;
169 struct ttm_bo_global_ref bo_global_ref;
170 struct drm_global_reference mem_global_ref;
171
172 struct vmw_fifo_state fifo;
173
174 struct drm_device *dev;
175 unsigned long vmw_chipset;
176 unsigned int io_start;
177 uint32_t vram_start;
178 uint32_t vram_size;
179 uint32_t mmio_start;
180 uint32_t mmio_size;
181 uint32_t fb_max_width;
182 uint32_t fb_max_height;
183 __le32 __iomem *mmio_virt;
184 int mmio_mtrr;
185 uint32_t capabilities;
186 uint32_t max_gmr_descriptors;
187 uint32_t max_gmr_ids;
188 bool has_gmr;
189 struct mutex hw_mutex;
190
191
192
193
194
195 struct vmw_vga_topology_state vga_save[VMWGFX_MAX_DISPLAYS];
196 uint32_t vga_width;
197 uint32_t vga_height;
198 uint32_t vga_depth;
199 uint32_t vga_bpp;
200 uint32_t vga_pseudo;
201 uint32_t vga_red_mask;
202 uint32_t vga_green_mask;
203 uint32_t vga_blue_mask;
204 uint32_t vga_bpl;
205 uint32_t vga_pitchlock;
206
207 uint32_t num_displays;
208
209
210
211
212
213 void *fb_info;
214 struct vmw_legacy_display *ldu_priv;
215 struct vmw_overlay *overlay_priv;
216
217
218
219
220
221 rwlock_t resource_lock;
222 struct idr context_idr;
223 struct idr surface_idr;
224 struct idr stream_idr;
225
226
227
228
229
230 struct mutex init_mutex;
231
232
233
234
235
236
237 struct ttm_object_device *tdev;
238
239
240
241
242
243 atomic_t fence_seq;
244 wait_queue_head_t fence_queue;
245 wait_queue_head_t fifo_queue;
246 atomic_t fence_queue_waiters;
247 atomic_t fifo_queue_waiters;
248 uint32_t last_read_sequence;
249 spinlock_t irq_lock;
250
251
252
253
254
255 uint32_t traces_state;
256 uint32_t enable_state;
257 uint32_t config_done_state;
258
259
260
261
262
263
264
265
266 struct vmw_sw_context ctx;
267 struct mutex cmdbuf_mutex;
268
269
270
271
272
273 bool stealth;
274 bool is_opened;
275 bool enable_fb;
276
277
278
279
280
281 struct vmw_master *active_master;
282 struct vmw_master fbdev_master;
283 struct notifier_block pm_nb;
284 bool suspended;
285
286 struct mutex release_mutex;
287 uint32_t num_3d_resources;
288};
289
290static inline struct vmw_private *vmw_priv(struct drm_device *dev)
291{
292 return (struct vmw_private *)dev->dev_private;
293}
294
295static inline struct vmw_fpriv *vmw_fpriv(struct drm_file *file_priv)
296{
297 return (struct vmw_fpriv *)file_priv->driver_priv;
298}
299
300static inline struct vmw_master *vmw_master(struct drm_master *master)
301{
302 return (struct vmw_master *) master->driver_priv;
303}
304
305static inline void vmw_write(struct vmw_private *dev_priv,
306 unsigned int offset, uint32_t value)
307{
308 outl(offset, dev_priv->io_start + VMWGFX_INDEX_PORT);
309 outl(value, dev_priv->io_start + VMWGFX_VALUE_PORT);
310}
311
312static inline uint32_t vmw_read(struct vmw_private *dev_priv,
313 unsigned int offset)
314{
315 uint32_t val;
316
317 outl(offset, dev_priv->io_start + VMWGFX_INDEX_PORT);
318 val = inl(dev_priv->io_start + VMWGFX_VALUE_PORT);
319 return val;
320}
321
322int vmw_3d_resource_inc(struct vmw_private *dev_priv);
323void vmw_3d_resource_dec(struct vmw_private *dev_priv);
324
325
326
327
328
329extern int vmw_gmr_bind(struct vmw_private *dev_priv,
330 struct page *pages[],
331 unsigned long num_pages,
332 int gmr_id);
333extern void vmw_gmr_unbind(struct vmw_private *dev_priv, int gmr_id);
334
335
336
337
338
339extern struct vmw_resource *vmw_context_alloc(struct vmw_private *dev_priv);
340extern void vmw_resource_unreference(struct vmw_resource **p_res);
341extern struct vmw_resource *vmw_resource_reference(struct vmw_resource *res);
342extern int vmw_context_destroy_ioctl(struct drm_device *dev, void *data,
343 struct drm_file *file_priv);
344extern int vmw_context_define_ioctl(struct drm_device *dev, void *data,
345 struct drm_file *file_priv);
346extern int vmw_context_check(struct vmw_private *dev_priv,
347 struct ttm_object_file *tfile,
348 int id);
349extern void vmw_surface_res_free(struct vmw_resource *res);
350extern int vmw_surface_init(struct vmw_private *dev_priv,
351 struct vmw_surface *srf,
352 void (*res_free) (struct vmw_resource *res));
353extern int vmw_user_surface_lookup_handle(struct vmw_private *dev_priv,
354 struct ttm_object_file *tfile,
355 uint32_t handle,
356 struct vmw_surface **out);
357extern int vmw_surface_destroy_ioctl(struct drm_device *dev, void *data,
358 struct drm_file *file_priv);
359extern int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
360 struct drm_file *file_priv);
361extern int vmw_surface_reference_ioctl(struct drm_device *dev, void *data,
362 struct drm_file *file_priv);
363extern int vmw_surface_check(struct vmw_private *dev_priv,
364 struct ttm_object_file *tfile,
365 uint32_t handle, int *id);
366extern void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo);
367extern int vmw_dmabuf_init(struct vmw_private *dev_priv,
368 struct vmw_dma_buffer *vmw_bo,
369 size_t size, struct ttm_placement *placement,
370 bool interuptable,
371 void (*bo_free) (struct ttm_buffer_object *bo));
372extern int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data,
373 struct drm_file *file_priv);
374extern int vmw_dmabuf_unref_ioctl(struct drm_device *dev, void *data,
375 struct drm_file *file_priv);
376extern uint32_t vmw_dmabuf_validate_node(struct ttm_buffer_object *bo,
377 uint32_t cur_validate_node);
378extern void vmw_dmabuf_validate_clear(struct ttm_buffer_object *bo);
379extern int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile,
380 uint32_t id, struct vmw_dma_buffer **out);
381extern int vmw_dmabuf_to_start_of_vram(struct vmw_private *vmw_priv,
382 struct vmw_dma_buffer *bo);
383extern int vmw_dmabuf_from_vram(struct vmw_private *vmw_priv,
384 struct vmw_dma_buffer *bo);
385extern int vmw_stream_claim_ioctl(struct drm_device *dev, void *data,
386 struct drm_file *file_priv);
387extern int vmw_stream_unref_ioctl(struct drm_device *dev, void *data,
388 struct drm_file *file_priv);
389extern int vmw_user_stream_lookup(struct vmw_private *dev_priv,
390 struct ttm_object_file *tfile,
391 uint32_t *inout_id,
392 struct vmw_resource **out);
393
394
395
396
397
398
399extern int vmw_getparam_ioctl(struct drm_device *dev, void *data,
400 struct drm_file *file_priv);
401extern int vmw_fifo_debug_ioctl(struct drm_device *dev, void *data,
402 struct drm_file *file_priv);
403
404
405
406
407
408extern int vmw_fifo_init(struct vmw_private *dev_priv,
409 struct vmw_fifo_state *fifo);
410extern void vmw_fifo_release(struct vmw_private *dev_priv,
411 struct vmw_fifo_state *fifo);
412extern void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes);
413extern void vmw_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes);
414extern int vmw_fifo_send_fence(struct vmw_private *dev_priv,
415 uint32_t *sequence);
416extern void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason);
417extern int vmw_fifo_mmap(struct file *filp, struct vm_area_struct *vma);
418extern bool vmw_fifo_have_3d(struct vmw_private *dev_priv);
419extern bool vmw_fifo_have_pitchlock(struct vmw_private *dev_priv);
420
421
422
423
424
425extern int vmw_ttm_global_init(struct vmw_private *dev_priv);
426extern void vmw_ttm_global_release(struct vmw_private *dev_priv);
427extern int vmw_mmap(struct file *filp, struct vm_area_struct *vma);
428
429
430
431
432
433extern struct ttm_placement vmw_vram_placement;
434extern struct ttm_placement vmw_vram_ne_placement;
435extern struct ttm_placement vmw_vram_sys_placement;
436extern struct ttm_placement vmw_vram_gmr_placement;
437extern struct ttm_placement vmw_sys_placement;
438extern struct ttm_bo_driver vmw_bo_driver;
439extern int vmw_dma_quiescent(struct drm_device *dev);
440
441
442
443
444
445extern int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
446 struct drm_file *file_priv);
447
448
449
450
451
452extern irqreturn_t vmw_irq_handler(DRM_IRQ_ARGS);
453extern int vmw_wait_fence(struct vmw_private *dev_priv, bool lazy,
454 uint32_t sequence, bool interruptible,
455 unsigned long timeout);
456extern void vmw_irq_preinstall(struct drm_device *dev);
457extern int vmw_irq_postinstall(struct drm_device *dev);
458extern void vmw_irq_uninstall(struct drm_device *dev);
459extern bool vmw_fence_signaled(struct vmw_private *dev_priv,
460 uint32_t sequence);
461extern int vmw_fence_wait_ioctl(struct drm_device *dev, void *data,
462 struct drm_file *file_priv);
463extern int vmw_fallback_wait(struct vmw_private *dev_priv,
464 bool lazy,
465 bool fifo_idle,
466 uint32_t sequence,
467 bool interruptible,
468 unsigned long timeout);
469extern void vmw_update_sequence(struct vmw_private *dev_priv,
470 struct vmw_fifo_state *fifo_state);
471
472
473
474
475
476
477
478extern void vmw_fence_queue_init(struct vmw_fence_queue *queue);
479extern void vmw_fence_queue_takedown(struct vmw_fence_queue *queue);
480extern int vmw_fence_push(struct vmw_fence_queue *queue,
481 uint32_t sequence);
482extern int vmw_fence_pull(struct vmw_fence_queue *queue,
483 uint32_t signaled_sequence);
484extern int vmw_wait_lag(struct vmw_private *dev_priv,
485 struct vmw_fence_queue *queue, uint32_t us);
486
487
488
489
490
491int vmw_fb_init(struct vmw_private *vmw_priv);
492int vmw_fb_close(struct vmw_private *dev_priv);
493int vmw_fb_off(struct vmw_private *vmw_priv);
494int vmw_fb_on(struct vmw_private *vmw_priv);
495
496
497
498
499
500int vmw_kms_init(struct vmw_private *dev_priv);
501int vmw_kms_close(struct vmw_private *dev_priv);
502int vmw_kms_save_vga(struct vmw_private *vmw_priv);
503int vmw_kms_restore_vga(struct vmw_private *vmw_priv);
504int vmw_kms_cursor_bypass_ioctl(struct drm_device *dev, void *data,
505 struct drm_file *file_priv);
506void vmw_kms_cursor_post_execbuf(struct vmw_private *dev_priv);
507void vmw_kms_cursor_snoop(struct vmw_surface *srf,
508 struct ttm_object_file *tfile,
509 struct ttm_buffer_object *bo,
510 SVGA3dCmdHeader *header);
511void vmw_kms_write_svga(struct vmw_private *vmw_priv,
512 unsigned width, unsigned height, unsigned pitch,
513 unsigned bbp, unsigned depth);
514int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
515 struct drm_file *file_priv);
516void vmw_kms_idle_workqueues(struct vmw_master *vmaster);
517bool vmw_kms_validate_mode_vram(struct vmw_private *dev_priv,
518 uint32_t pitch,
519 uint32_t height);
520u32 vmw_get_vblank_counter(struct drm_device *dev, int crtc);
521
522
523
524
525
526int vmw_overlay_init(struct vmw_private *dev_priv);
527int vmw_overlay_close(struct vmw_private *dev_priv);
528int vmw_overlay_ioctl(struct drm_device *dev, void *data,
529 struct drm_file *file_priv);
530int vmw_overlay_stop_all(struct vmw_private *dev_priv);
531int vmw_overlay_resume_all(struct vmw_private *dev_priv);
532int vmw_overlay_pause_all(struct vmw_private *dev_priv);
533int vmw_overlay_claim(struct vmw_private *dev_priv, uint32_t *out);
534int vmw_overlay_unref(struct vmw_private *dev_priv, uint32_t stream_id);
535int vmw_overlay_num_overlays(struct vmw_private *dev_priv);
536int vmw_overlay_num_free_overlays(struct vmw_private *dev_priv);
537
538
539
540
541
542extern const struct ttm_mem_type_manager_func vmw_gmrid_manager_func;
543
544
545
546
547
548static inline void vmw_surface_unreference(struct vmw_surface **srf)
549{
550 struct vmw_surface *tmp_srf = *srf;
551 struct vmw_resource *res = &tmp_srf->res;
552 *srf = NULL;
553
554 vmw_resource_unreference(&res);
555}
556
557static inline struct vmw_surface *vmw_surface_reference(struct vmw_surface *srf)
558{
559 (void) vmw_resource_reference(&srf->res);
560 return srf;
561}
562
563static inline void vmw_dmabuf_unreference(struct vmw_dma_buffer **buf)
564{
565 struct vmw_dma_buffer *tmp_buf = *buf;
566 struct ttm_buffer_object *bo = &tmp_buf->base;
567 *buf = NULL;
568
569 ttm_bo_unref(&bo);
570}
571
572static inline struct vmw_dma_buffer *vmw_dmabuf_reference(struct vmw_dma_buffer *buf)
573{
574 if (ttm_bo_reference(&buf->base))
575 return buf;
576 return NULL;
577}
578
579#endif
580