1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33#ifndef _GVT_H_
34#define _GVT_H_
35
36#include <uapi/linux/pci_regs.h>
37
38#include "i915_drv.h"
39
40#include "debug.h"
41#include "hypercall.h"
42#include "mmio.h"
43#include "reg.h"
44#include "interrupt.h"
45#include "gtt.h"
46#include "display.h"
47#include "edid.h"
48#include "execlist.h"
49#include "scheduler.h"
50#include "sched_policy.h"
51#include "mmio_context.h"
52#include "cmd_parser.h"
53#include "fb_decoder.h"
54#include "dmabuf.h"
55#include "page_track.h"
56
57#define GVT_MAX_VGPU 8
58
59struct intel_gvt_host {
60 struct device *dev;
61 bool initialized;
62 int hypervisor_type;
63 const struct intel_gvt_mpt *mpt;
64};
65
66extern struct intel_gvt_host intel_gvt_host;
67
68
69struct intel_gvt_device_info {
70 u32 max_support_vgpus;
71 u32 cfg_space_size;
72 u32 mmio_size;
73 u32 mmio_bar;
74 unsigned long msi_cap_offset;
75 u32 gtt_start_offset;
76 u32 gtt_entry_size;
77 u32 gtt_entry_size_shift;
78 int gmadr_bytes_in_cmd;
79 u32 max_surface_size;
80};
81
82
83struct intel_vgpu_gm {
84 u64 aperture_sz;
85 u64 hidden_sz;
86 struct drm_mm_node low_gm_node;
87 struct drm_mm_node high_gm_node;
88};
89
90#define INTEL_GVT_MAX_NUM_FENCES 32
91
92
93struct intel_vgpu_fence {
94 struct i915_fence_reg *regs[INTEL_GVT_MAX_NUM_FENCES];
95 u32 base;
96 u32 size;
97};
98
99struct intel_vgpu_mmio {
100 void *vreg;
101};
102
103#define INTEL_GVT_MAX_BAR_NUM 4
104
105struct intel_vgpu_pci_bar {
106 u64 size;
107 bool tracked;
108};
109
110struct intel_vgpu_cfg_space {
111 unsigned char virtual_cfg_space[PCI_CFG_SPACE_EXP_SIZE];
112 struct intel_vgpu_pci_bar bar[INTEL_GVT_MAX_BAR_NUM];
113 u32 pmcsr_off;
114};
115
116#define vgpu_cfg_space(vgpu) ((vgpu)->cfg_space.virtual_cfg_space)
117
118struct intel_vgpu_irq {
119 bool irq_warn_once[INTEL_GVT_EVENT_MAX];
120 DECLARE_BITMAP(flip_done_event[I915_MAX_PIPES],
121 INTEL_GVT_EVENT_MAX);
122};
123
124struct intel_vgpu_opregion {
125 bool mapped;
126 void *va;
127 u32 gfn[INTEL_GVT_OPREGION_PAGES];
128};
129
130#define vgpu_opregion(vgpu) (&(vgpu->opregion))
131
132struct intel_vgpu_display {
133 struct intel_vgpu_i2c_edid i2c_edid;
134 struct intel_vgpu_port ports[I915_MAX_PORTS];
135 struct intel_vgpu_sbi sbi;
136 enum port port_num;
137};
138
139struct vgpu_sched_ctl {
140 int weight;
141};
142
143enum {
144 INTEL_VGPU_EXECLIST_SUBMISSION = 1,
145 INTEL_VGPU_GUC_SUBMISSION,
146};
147
148struct intel_vgpu_submission_ops {
149 const char *name;
150 int (*init)(struct intel_vgpu *vgpu, intel_engine_mask_t engine_mask);
151 void (*clean)(struct intel_vgpu *vgpu, intel_engine_mask_t engine_mask);
152 void (*reset)(struct intel_vgpu *vgpu, intel_engine_mask_t engine_mask);
153};
154
155struct intel_vgpu_submission {
156 struct intel_vgpu_execlist execlist[I915_NUM_ENGINES];
157 struct list_head workload_q_head[I915_NUM_ENGINES];
158 struct intel_context *shadow[I915_NUM_ENGINES];
159 struct kmem_cache *workloads;
160 atomic_t running_workload_num;
161 union {
162 u64 i915_context_pml4;
163 u64 i915_context_pdps[GEN8_3LVL_PDPES];
164 };
165 DECLARE_BITMAP(shadow_ctx_desc_updated, I915_NUM_ENGINES);
166 DECLARE_BITMAP(tlb_handle_pending, I915_NUM_ENGINES);
167 void *ring_scan_buffer[I915_NUM_ENGINES];
168 int ring_scan_buffer_size[I915_NUM_ENGINES];
169 const struct intel_vgpu_submission_ops *ops;
170 int virtual_submission_interface;
171 bool active;
172 struct {
173 u32 lrca;
174 bool valid;
175 u64 ring_context_gpa;
176 } last_ctx[I915_NUM_ENGINES];
177};
178
179struct intel_vgpu {
180 struct intel_gvt *gvt;
181 struct mutex vgpu_lock;
182 int id;
183 unsigned long handle;
184 bool active;
185 bool pv_notified;
186 bool failsafe;
187 unsigned int resetting_eng;
188
189
190
191
192
193 void *sched_data;
194 struct vgpu_sched_ctl sched_ctl;
195
196 struct intel_vgpu_fence fence;
197 struct intel_vgpu_gm gm;
198 struct intel_vgpu_cfg_space cfg_space;
199 struct intel_vgpu_mmio mmio;
200 struct intel_vgpu_irq irq;
201 struct intel_vgpu_gtt gtt;
202 struct intel_vgpu_opregion opregion;
203 struct intel_vgpu_display display;
204 struct intel_vgpu_submission submission;
205 struct radix_tree_root page_track_tree;
206 u32 hws_pga[I915_NUM_ENGINES];
207
208 bool d3_entered;
209
210 struct dentry *debugfs;
211
212
213 void *vdev;
214
215 struct list_head dmabuf_obj_list_head;
216 struct mutex dmabuf_lock;
217 struct idr object_idr;
218 struct intel_vgpu_vblank_timer vblank_timer;
219
220 u32 scan_nonprivbb;
221};
222
223static inline void *intel_vgpu_vdev(struct intel_vgpu *vgpu)
224{
225 return vgpu->vdev;
226}
227
228
229#define vgpu_is_vm_unhealthy(ret_val) \
230 (((ret_val) == -EBADRQC) || ((ret_val) == -EFAULT))
231
232struct intel_gvt_gm {
233 unsigned long vgpu_allocated_low_gm_size;
234 unsigned long vgpu_allocated_high_gm_size;
235};
236
237struct intel_gvt_fence {
238 unsigned long vgpu_allocated_fence_num;
239};
240
241
242struct gvt_mmio_block {
243 unsigned int device;
244 i915_reg_t offset;
245 unsigned int size;
246 gvt_mmio_func read;
247 gvt_mmio_func write;
248};
249
250#define INTEL_GVT_MMIO_HASH_BITS 11
251
252struct intel_gvt_mmio {
253 u16 *mmio_attribute;
254
255#define F_RO (1 << 0)
256
257#define F_GMADR (1 << 1)
258
259#define F_MODE_MASK (1 << 2)
260
261#define F_CMD_ACCESS (1 << 3)
262
263#define F_ACCESSED (1 << 4)
264
265#define F_PM_SAVE (1 << 5)
266
267#define F_UNALIGN (1 << 6)
268
269
270
271#define F_SR_IN_CTX (1 << 7)
272
273#define F_CMD_WRITE_PATCH (1 << 8)
274
275 struct gvt_mmio_block *mmio_block;
276 unsigned int num_mmio_block;
277
278 DECLARE_HASHTABLE(mmio_info_table, INTEL_GVT_MMIO_HASH_BITS);
279 unsigned long num_tracked_mmio;
280};
281
282struct intel_gvt_firmware {
283 void *cfg_space;
284 void *mmio;
285 bool firmware_loaded;
286};
287
288#define NR_MAX_INTEL_VGPU_TYPES 20
289struct intel_vgpu_type {
290 char name[16];
291 unsigned int avail_instance;
292 unsigned int low_gm_size;
293 unsigned int high_gm_size;
294 unsigned int fence;
295 unsigned int weight;
296 enum intel_vgpu_edid resolution;
297};
298
299struct intel_gvt {
300
301
302
303 struct mutex lock;
304
305 struct mutex sched_lock;
306
307 struct intel_gt *gt;
308 struct idr vgpu_idr;
309
310 struct intel_gvt_device_info device_info;
311 struct intel_gvt_gm gm;
312 struct intel_gvt_fence fence;
313 struct intel_gvt_mmio mmio;
314 struct intel_gvt_firmware firmware;
315 struct intel_gvt_irq irq;
316 struct intel_gvt_gtt gtt;
317 struct intel_gvt_workload_scheduler scheduler;
318 struct notifier_block shadow_ctx_notifier_block[I915_NUM_ENGINES];
319 DECLARE_HASHTABLE(cmd_table, GVT_CMD_HASH_BITS);
320 struct intel_vgpu_type *types;
321 unsigned int num_types;
322 struct intel_vgpu *idle_vgpu;
323
324 struct task_struct *service_thread;
325 wait_queue_head_t service_thread_wq;
326
327
328
329
330 unsigned long service_request;
331
332 struct {
333 struct engine_mmio *mmio;
334 int ctx_mmio_count[I915_NUM_ENGINES];
335 u32 *tlb_mmio_offset_list;
336 u32 tlb_mmio_offset_list_cnt;
337 u32 *mocs_mmio_offset_list;
338 u32 mocs_mmio_offset_list_cnt;
339 } engine_mmio_list;
340 bool is_reg_whitelist_updated;
341
342 struct dentry *debugfs_root;
343};
344
345static inline struct intel_gvt *to_gvt(struct drm_i915_private *i915)
346{
347 return i915->gvt;
348}
349
350enum {
351
352 INTEL_GVT_REQUEST_SCHED = 0,
353
354
355 INTEL_GVT_REQUEST_EVENT_SCHED = 1,
356
357
358 INTEL_GVT_REQUEST_EMULATE_VBLANK = 2,
359 INTEL_GVT_REQUEST_EMULATE_VBLANK_MAX = INTEL_GVT_REQUEST_EMULATE_VBLANK
360 + GVT_MAX_VGPU,
361};
362
363static inline void intel_gvt_request_service(struct intel_gvt *gvt,
364 int service)
365{
366 set_bit(service, (void *)&gvt->service_request);
367 wake_up(&gvt->service_thread_wq);
368}
369
370void intel_gvt_free_firmware(struct intel_gvt *gvt);
371int intel_gvt_load_firmware(struct intel_gvt *gvt);
372
373
374#define MB_TO_BYTES(mb) ((mb) << 20ULL)
375#define BYTES_TO_MB(b) ((b) >> 20ULL)
376
377#define HOST_LOW_GM_SIZE MB_TO_BYTES(128)
378#define HOST_HIGH_GM_SIZE MB_TO_BYTES(384)
379#define HOST_FENCE 4
380
381#define gvt_to_ggtt(gvt) ((gvt)->gt->ggtt)
382
383
384#define gvt_aperture_sz(gvt) gvt_to_ggtt(gvt)->mappable_end
385#define gvt_aperture_pa_base(gvt) gvt_to_ggtt(gvt)->gmadr.start
386
387#define gvt_ggtt_gm_sz(gvt) gvt_to_ggtt(gvt)->vm.total
388#define gvt_ggtt_sz(gvt) (gvt_to_ggtt(gvt)->vm.total >> PAGE_SHIFT << 3)
389#define gvt_hidden_sz(gvt) (gvt_ggtt_gm_sz(gvt) - gvt_aperture_sz(gvt))
390
391#define gvt_aperture_gmadr_base(gvt) (0)
392#define gvt_aperture_gmadr_end(gvt) (gvt_aperture_gmadr_base(gvt) \
393 + gvt_aperture_sz(gvt) - 1)
394
395#define gvt_hidden_gmadr_base(gvt) (gvt_aperture_gmadr_base(gvt) \
396 + gvt_aperture_sz(gvt))
397#define gvt_hidden_gmadr_end(gvt) (gvt_hidden_gmadr_base(gvt) \
398 + gvt_hidden_sz(gvt) - 1)
399
400#define gvt_fence_sz(gvt) (gvt_to_ggtt(gvt)->num_fences)
401
402
403#define vgpu_aperture_offset(vgpu) ((vgpu)->gm.low_gm_node.start)
404#define vgpu_hidden_offset(vgpu) ((vgpu)->gm.high_gm_node.start)
405#define vgpu_aperture_sz(vgpu) ((vgpu)->gm.aperture_sz)
406#define vgpu_hidden_sz(vgpu) ((vgpu)->gm.hidden_sz)
407
408#define vgpu_aperture_pa_base(vgpu) \
409 (gvt_aperture_pa_base(vgpu->gvt) + vgpu_aperture_offset(vgpu))
410
411#define vgpu_ggtt_gm_sz(vgpu) ((vgpu)->gm.aperture_sz + (vgpu)->gm.hidden_sz)
412
413#define vgpu_aperture_pa_end(vgpu) \
414 (vgpu_aperture_pa_base(vgpu) + vgpu_aperture_sz(vgpu) - 1)
415
416#define vgpu_aperture_gmadr_base(vgpu) (vgpu_aperture_offset(vgpu))
417#define vgpu_aperture_gmadr_end(vgpu) \
418 (vgpu_aperture_gmadr_base(vgpu) + vgpu_aperture_sz(vgpu) - 1)
419
420#define vgpu_hidden_gmadr_base(vgpu) (vgpu_hidden_offset(vgpu))
421#define vgpu_hidden_gmadr_end(vgpu) \
422 (vgpu_hidden_gmadr_base(vgpu) + vgpu_hidden_sz(vgpu) - 1)
423
424#define vgpu_fence_base(vgpu) (vgpu->fence.base)
425#define vgpu_fence_sz(vgpu) (vgpu->fence.size)
426
427
428#define RING_CTX_SIZE 320
429
430struct intel_vgpu_creation_params {
431 __u64 handle;
432 __u64 low_gm_sz;
433 __u64 high_gm_sz;
434 __u64 fence_sz;
435 __u64 resolution;
436 __s32 primary;
437 __u64 vgpu_id;
438
439 __u32 weight;
440};
441
442int intel_vgpu_alloc_resource(struct intel_vgpu *vgpu,
443 struct intel_vgpu_creation_params *param);
444void intel_vgpu_reset_resource(struct intel_vgpu *vgpu);
445void intel_vgpu_free_resource(struct intel_vgpu *vgpu);
446void intel_vgpu_write_fence(struct intel_vgpu *vgpu,
447 u32 fence, u64 value);
448
449
450
451#define vgpu_vreg_t(vgpu, reg) \
452 (*(u32 *)(vgpu->mmio.vreg + i915_mmio_reg_offset(reg)))
453#define vgpu_vreg(vgpu, offset) \
454 (*(u32 *)(vgpu->mmio.vreg + (offset)))
455#define vgpu_vreg64_t(vgpu, reg) \
456 (*(u64 *)(vgpu->mmio.vreg + i915_mmio_reg_offset(reg)))
457#define vgpu_vreg64(vgpu, offset) \
458 (*(u64 *)(vgpu->mmio.vreg + (offset)))
459
460#define for_each_active_vgpu(gvt, vgpu, id) \
461 idr_for_each_entry((&(gvt)->vgpu_idr), (vgpu), (id)) \
462 for_each_if(vgpu->active)
463
464static inline void intel_vgpu_write_pci_bar(struct intel_vgpu *vgpu,
465 u32 offset, u32 val, bool low)
466{
467 u32 *pval;
468
469
470 offset = rounddown(offset, 4);
471 pval = (u32 *)(vgpu_cfg_space(vgpu) + offset);
472
473 if (low) {
474
475
476
477
478 *pval = (val & GENMASK(31, 4)) | (*pval & GENMASK(3, 0));
479 } else {
480 *pval = val;
481 }
482}
483
484int intel_gvt_init_vgpu_types(struct intel_gvt *gvt);
485void intel_gvt_clean_vgpu_types(struct intel_gvt *gvt);
486
487struct intel_vgpu *intel_gvt_create_idle_vgpu(struct intel_gvt *gvt);
488void intel_gvt_destroy_idle_vgpu(struct intel_vgpu *vgpu);
489struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt,
490 struct intel_vgpu_type *type);
491void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu);
492void intel_gvt_release_vgpu(struct intel_vgpu *vgpu);
493void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr,
494 intel_engine_mask_t engine_mask);
495void intel_gvt_reset_vgpu(struct intel_vgpu *vgpu);
496void intel_gvt_activate_vgpu(struct intel_vgpu *vgpu);
497void intel_gvt_deactivate_vgpu(struct intel_vgpu *vgpu);
498
499
500#define vgpu_gmadr_is_aperture(vgpu, gmadr) \
501 ((gmadr >= vgpu_aperture_gmadr_base(vgpu)) && \
502 (gmadr <= vgpu_aperture_gmadr_end(vgpu)))
503
504#define vgpu_gmadr_is_hidden(vgpu, gmadr) \
505 ((gmadr >= vgpu_hidden_gmadr_base(vgpu)) && \
506 (gmadr <= vgpu_hidden_gmadr_end(vgpu)))
507
508#define vgpu_gmadr_is_valid(vgpu, gmadr) \
509 ((vgpu_gmadr_is_aperture(vgpu, gmadr) || \
510 (vgpu_gmadr_is_hidden(vgpu, gmadr))))
511
512#define gvt_gmadr_is_aperture(gvt, gmadr) \
513 ((gmadr >= gvt_aperture_gmadr_base(gvt)) && \
514 (gmadr <= gvt_aperture_gmadr_end(gvt)))
515
516#define gvt_gmadr_is_hidden(gvt, gmadr) \
517 ((gmadr >= gvt_hidden_gmadr_base(gvt)) && \
518 (gmadr <= gvt_hidden_gmadr_end(gvt)))
519
520#define gvt_gmadr_is_valid(gvt, gmadr) \
521 (gvt_gmadr_is_aperture(gvt, gmadr) || \
522 gvt_gmadr_is_hidden(gvt, gmadr))
523
524bool intel_gvt_ggtt_validate_range(struct intel_vgpu *vgpu, u64 addr, u32 size);
525int intel_gvt_ggtt_gmadr_g2h(struct intel_vgpu *vgpu, u64 g_addr, u64 *h_addr);
526int intel_gvt_ggtt_gmadr_h2g(struct intel_vgpu *vgpu, u64 h_addr, u64 *g_addr);
527int intel_gvt_ggtt_index_g2h(struct intel_vgpu *vgpu, unsigned long g_index,
528 unsigned long *h_index);
529int intel_gvt_ggtt_h2g_index(struct intel_vgpu *vgpu, unsigned long h_index,
530 unsigned long *g_index);
531
532void intel_vgpu_init_cfg_space(struct intel_vgpu *vgpu,
533 bool primary);
534void intel_vgpu_reset_cfg_space(struct intel_vgpu *vgpu);
535
536int intel_vgpu_emulate_cfg_read(struct intel_vgpu *vgpu, unsigned int offset,
537 void *p_data, unsigned int bytes);
538
539int intel_vgpu_emulate_cfg_write(struct intel_vgpu *vgpu, unsigned int offset,
540 void *p_data, unsigned int bytes);
541
542void intel_vgpu_emulate_hotplug(struct intel_vgpu *vgpu, bool connected);
543
544static inline u64 intel_vgpu_get_bar_gpa(struct intel_vgpu *vgpu, int bar)
545{
546
547 return (*(u64 *)(vgpu->cfg_space.virtual_cfg_space + bar)) &
548 PCI_BASE_ADDRESS_MEM_MASK;
549}
550
551void intel_vgpu_clean_opregion(struct intel_vgpu *vgpu);
552int intel_vgpu_init_opregion(struct intel_vgpu *vgpu);
553int intel_vgpu_opregion_base_write_handler(struct intel_vgpu *vgpu, u32 gpa);
554
555int intel_vgpu_emulate_opregion_request(struct intel_vgpu *vgpu, u32 swsci);
556void populate_pvinfo_page(struct intel_vgpu *vgpu);
557
558int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload);
559void enter_failsafe_mode(struct intel_vgpu *vgpu, int reason);
560
561struct intel_gvt_ops {
562 int (*emulate_cfg_read)(struct intel_vgpu *, unsigned int, void *,
563 unsigned int);
564 int (*emulate_cfg_write)(struct intel_vgpu *, unsigned int, void *,
565 unsigned int);
566 int (*emulate_mmio_read)(struct intel_vgpu *, u64, void *,
567 unsigned int);
568 int (*emulate_mmio_write)(struct intel_vgpu *, u64, void *,
569 unsigned int);
570 struct intel_vgpu *(*vgpu_create)(struct intel_gvt *,
571 struct intel_vgpu_type *);
572 void (*vgpu_destroy)(struct intel_vgpu *vgpu);
573 void (*vgpu_release)(struct intel_vgpu *vgpu);
574 void (*vgpu_reset)(struct intel_vgpu *);
575 void (*vgpu_activate)(struct intel_vgpu *);
576 void (*vgpu_deactivate)(struct intel_vgpu *);
577 int (*vgpu_query_plane)(struct intel_vgpu *vgpu, void *);
578 int (*vgpu_get_dmabuf)(struct intel_vgpu *vgpu, unsigned int);
579 int (*write_protect_handler)(struct intel_vgpu *, u64, void *,
580 unsigned int);
581 void (*emulate_hotplug)(struct intel_vgpu *vgpu, bool connected);
582};
583
584
585enum {
586 GVT_FAILSAFE_UNSUPPORTED_GUEST,
587 GVT_FAILSAFE_INSUFFICIENT_RESOURCE,
588 GVT_FAILSAFE_GUEST_ERR,
589};
590
591static inline void mmio_hw_access_pre(struct intel_gt *gt)
592{
593 intel_runtime_pm_get(gt->uncore->rpm);
594}
595
596static inline void mmio_hw_access_post(struct intel_gt *gt)
597{
598 intel_runtime_pm_put_unchecked(gt->uncore->rpm);
599}
600
601
602
603
604
605
606
607static inline void intel_gvt_mmio_set_accessed(
608 struct intel_gvt *gvt, unsigned int offset)
609{
610 gvt->mmio.mmio_attribute[offset >> 2] |= F_ACCESSED;
611}
612
613
614
615
616
617
618
619
620
621static inline bool intel_gvt_mmio_is_cmd_accessible(
622 struct intel_gvt *gvt, unsigned int offset)
623{
624 return gvt->mmio.mmio_attribute[offset >> 2] & F_CMD_ACCESS;
625}
626
627
628
629
630
631
632
633
634static inline void intel_gvt_mmio_set_cmd_accessible(
635 struct intel_gvt *gvt, unsigned int offset)
636{
637 gvt->mmio.mmio_attribute[offset >> 2] |= F_CMD_ACCESS;
638}
639
640
641
642
643
644
645
646static inline bool intel_gvt_mmio_is_unalign(
647 struct intel_gvt *gvt, unsigned int offset)
648{
649 return gvt->mmio.mmio_attribute[offset >> 2] & F_UNALIGN;
650}
651
652
653
654
655
656
657
658
659
660
661static inline bool intel_gvt_mmio_has_mode_mask(
662 struct intel_gvt *gvt, unsigned int offset)
663{
664 return gvt->mmio.mmio_attribute[offset >> 2] & F_MODE_MASK;
665}
666
667
668
669
670
671
672
673
674
675
676
677static inline bool intel_gvt_mmio_is_sr_in_ctx(
678 struct intel_gvt *gvt, unsigned int offset)
679{
680 return gvt->mmio.mmio_attribute[offset >> 2] & F_SR_IN_CTX;
681}
682
683
684
685
686
687
688
689
690
691static inline void intel_gvt_mmio_set_sr_in_ctx(
692 struct intel_gvt *gvt, unsigned int offset)
693{
694 gvt->mmio.mmio_attribute[offset >> 2] |= F_SR_IN_CTX;
695}
696
697void intel_gvt_debugfs_add_vgpu(struct intel_vgpu *vgpu);
698
699
700
701
702
703
704
705
706static inline void intel_gvt_mmio_set_cmd_write_patch(
707 struct intel_gvt *gvt, unsigned int offset)
708{
709 gvt->mmio.mmio_attribute[offset >> 2] |= F_CMD_WRITE_PATCH;
710}
711
712
713
714
715
716
717
718
719
720
721static inline bool intel_gvt_mmio_is_cmd_write_patch(
722 struct intel_gvt *gvt, unsigned int offset)
723{
724 return gvt->mmio.mmio_attribute[offset >> 2] & F_CMD_WRITE_PATCH;
725}
726
727void intel_gvt_debugfs_remove_vgpu(struct intel_vgpu *vgpu);
728void intel_gvt_debugfs_init(struct intel_gvt *gvt);
729void intel_gvt_debugfs_clean(struct intel_gvt *gvt);
730
731int intel_gvt_pm_resume(struct intel_gvt *gvt);
732
733#include "trace.h"
734#include "mpt.h"
735
736#endif
737