1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33#ifndef _GVT_H_
34#define _GVT_H_
35
36#include "debug.h"
37#include "hypercall.h"
38#include "mmio.h"
39#include "reg.h"
40#include "interrupt.h"
41#include "gtt.h"
42#include "display.h"
43#include "edid.h"
44#include "execlist.h"
45#include "scheduler.h"
46#include "sched_policy.h"
47#include "mmio_context.h"
48#include "cmd_parser.h"
49#include "fb_decoder.h"
50#include "dmabuf.h"
51#include "page_track.h"
52
53#define GVT_MAX_VGPU 8
54
55enum {
56 INTEL_GVT_HYPERVISOR_XEN = 0,
57 INTEL_GVT_HYPERVISOR_KVM,
58};
59
60struct intel_gvt_host {
61 bool initialized;
62 int hypervisor_type;
63 struct intel_gvt_mpt *mpt;
64};
65
66extern struct intel_gvt_host intel_gvt_host;
67
68
69struct intel_gvt_device_info {
70 u32 max_support_vgpus;
71 u32 cfg_space_size;
72 u32 mmio_size;
73 u32 mmio_bar;
74 unsigned long msi_cap_offset;
75 u32 gtt_start_offset;
76 u32 gtt_entry_size;
77 u32 gtt_entry_size_shift;
78 int gmadr_bytes_in_cmd;
79 u32 max_surface_size;
80};
81
82
83struct intel_vgpu_gm {
84 u64 aperture_sz;
85 u64 hidden_sz;
86 struct drm_mm_node low_gm_node;
87 struct drm_mm_node high_gm_node;
88};
89
90#define INTEL_GVT_MAX_NUM_FENCES 32
91
92
93struct intel_vgpu_fence {
94 struct drm_i915_fence_reg *regs[INTEL_GVT_MAX_NUM_FENCES];
95 u32 base;
96 u32 size;
97};
98
99struct intel_vgpu_mmio {
100 void *vreg;
101 void *sreg;
102};
103
104#define INTEL_GVT_MAX_BAR_NUM 4
105
106struct intel_vgpu_pci_bar {
107 u64 size;
108 bool tracked;
109};
110
111struct intel_vgpu_cfg_space {
112 unsigned char virtual_cfg_space[PCI_CFG_SPACE_EXP_SIZE];
113 struct intel_vgpu_pci_bar bar[INTEL_GVT_MAX_BAR_NUM];
114};
115
116#define vgpu_cfg_space(vgpu) ((vgpu)->cfg_space.virtual_cfg_space)
117
118#define INTEL_GVT_MAX_PIPE 4
119
120struct intel_vgpu_irq {
121 bool irq_warn_once[INTEL_GVT_EVENT_MAX];
122 DECLARE_BITMAP(flip_done_event[INTEL_GVT_MAX_PIPE],
123 INTEL_GVT_EVENT_MAX);
124};
125
126struct intel_vgpu_opregion {
127 bool mapped;
128 void *va;
129 u32 gfn[INTEL_GVT_OPREGION_PAGES];
130};
131
132#define vgpu_opregion(vgpu) (&(vgpu->opregion))
133
134struct intel_vgpu_display {
135 struct intel_vgpu_i2c_edid i2c_edid;
136 struct intel_vgpu_port ports[I915_MAX_PORTS];
137 struct intel_vgpu_sbi sbi;
138};
139
140struct vgpu_sched_ctl {
141 int weight;
142};
143
144enum {
145 INTEL_VGPU_EXECLIST_SUBMISSION = 1,
146 INTEL_VGPU_GUC_SUBMISSION,
147};
148
149struct intel_vgpu_submission_ops {
150 const char *name;
151 int (*init)(struct intel_vgpu *vgpu, unsigned long engine_mask);
152 void (*clean)(struct intel_vgpu *vgpu, unsigned long engine_mask);
153 void (*reset)(struct intel_vgpu *vgpu, unsigned long engine_mask);
154};
155
156struct intel_vgpu_submission {
157 struct intel_vgpu_execlist execlist[I915_NUM_ENGINES];
158 struct list_head workload_q_head[I915_NUM_ENGINES];
159 struct kmem_cache *workloads;
160 atomic_t running_workload_num;
161 struct i915_gem_context *shadow_ctx;
162 DECLARE_BITMAP(shadow_ctx_desc_updated, I915_NUM_ENGINES);
163 DECLARE_BITMAP(tlb_handle_pending, I915_NUM_ENGINES);
164 void *ring_scan_buffer[I915_NUM_ENGINES];
165 int ring_scan_buffer_size[I915_NUM_ENGINES];
166 const struct intel_vgpu_submission_ops *ops;
167 int virtual_submission_interface;
168 bool active;
169};
170
171struct intel_vgpu {
172 struct intel_gvt *gvt;
173 struct mutex vgpu_lock;
174 int id;
175 unsigned long handle;
176 bool active;
177 bool pv_notified;
178 bool failsafe;
179 unsigned int resetting_eng;
180
181
182
183
184
185 void *sched_data;
186 struct vgpu_sched_ctl sched_ctl;
187
188 struct intel_vgpu_fence fence;
189 struct intel_vgpu_gm gm;
190 struct intel_vgpu_cfg_space cfg_space;
191 struct intel_vgpu_mmio mmio;
192 struct intel_vgpu_irq irq;
193 struct intel_vgpu_gtt gtt;
194 struct intel_vgpu_opregion opregion;
195 struct intel_vgpu_display display;
196 struct intel_vgpu_submission submission;
197 struct radix_tree_root page_track_tree;
198 u32 hws_pga[I915_NUM_ENGINES];
199
200 struct dentry *debugfs;
201
202#if IS_ENABLED(CONFIG_DRM_I915_GVT_KVMGT)
203 struct {
204 struct mdev_device *mdev;
205 struct vfio_region *region;
206 int num_regions;
207 struct eventfd_ctx *intx_trigger;
208 struct eventfd_ctx *msi_trigger;
209
210
211
212
213
214 struct rb_root gfn_cache;
215 struct rb_root dma_addr_cache;
216 unsigned long nr_cache_entries;
217 struct mutex cache_lock;
218
219 struct notifier_block iommu_notifier;
220 struct notifier_block group_notifier;
221 struct kvm *kvm;
222 struct work_struct release_work;
223 atomic_t released;
224 struct vfio_device *vfio_device;
225 } vdev;
226#endif
227
228 struct list_head dmabuf_obj_list_head;
229 struct mutex dmabuf_lock;
230 struct idr object_idr;
231
232 struct completion vblank_done;
233
234 u32 scan_nonprivbb;
235};
236
237
238#define vgpu_is_vm_unhealthy(ret_val) \
239 (((ret_val) == -EBADRQC) || ((ret_val) == -EFAULT))
240
241struct intel_gvt_gm {
242 unsigned long vgpu_allocated_low_gm_size;
243 unsigned long vgpu_allocated_high_gm_size;
244};
245
246struct intel_gvt_fence {
247 unsigned long vgpu_allocated_fence_num;
248};
249
250
251struct gvt_mmio_block {
252 unsigned int device;
253 i915_reg_t offset;
254 unsigned int size;
255 gvt_mmio_func read;
256 gvt_mmio_func write;
257};
258
259#define INTEL_GVT_MMIO_HASH_BITS 11
260
261struct intel_gvt_mmio {
262 u8 *mmio_attribute;
263
264#define F_RO (1 << 0)
265
266#define F_GMADR (1 << 1)
267
268#define F_MODE_MASK (1 << 2)
269
270#define F_CMD_ACCESS (1 << 3)
271
272#define F_ACCESSED (1 << 4)
273
274#define F_CMD_ACCESSED (1 << 5)
275
276#define F_UNALIGN (1 << 6)
277
278#define F_IN_CTX (1 << 7)
279
280 struct gvt_mmio_block *mmio_block;
281 unsigned int num_mmio_block;
282
283 DECLARE_HASHTABLE(mmio_info_table, INTEL_GVT_MMIO_HASH_BITS);
284 unsigned long num_tracked_mmio;
285};
286
287struct intel_gvt_firmware {
288 void *cfg_space;
289 void *mmio;
290 bool firmware_loaded;
291};
292
293#define NR_MAX_INTEL_VGPU_TYPES 20
294struct intel_vgpu_type {
295 char name[16];
296 unsigned int avail_instance;
297 unsigned int low_gm_size;
298 unsigned int high_gm_size;
299 unsigned int fence;
300 unsigned int weight;
301 enum intel_vgpu_edid resolution;
302};
303
304struct intel_gvt {
305
306
307
308 struct mutex lock;
309
310 struct mutex sched_lock;
311
312 struct drm_i915_private *dev_priv;
313 struct idr vgpu_idr;
314
315 struct intel_gvt_device_info device_info;
316 struct intel_gvt_gm gm;
317 struct intel_gvt_fence fence;
318 struct intel_gvt_mmio mmio;
319 struct intel_gvt_firmware firmware;
320 struct intel_gvt_irq irq;
321 struct intel_gvt_gtt gtt;
322 struct intel_gvt_workload_scheduler scheduler;
323 struct notifier_block shadow_ctx_notifier_block[I915_NUM_ENGINES];
324 DECLARE_HASHTABLE(cmd_table, GVT_CMD_HASH_BITS);
325 struct intel_vgpu_type *types;
326 unsigned int num_types;
327 struct intel_vgpu *idle_vgpu;
328
329 struct task_struct *service_thread;
330 wait_queue_head_t service_thread_wq;
331
332
333
334
335 unsigned long service_request;
336
337 struct {
338 struct engine_mmio *mmio;
339 int ctx_mmio_count[I915_NUM_ENGINES];
340 } engine_mmio_list;
341
342 struct dentry *debugfs_root;
343};
344
345static inline struct intel_gvt *to_gvt(struct drm_i915_private *i915)
346{
347 return i915->gvt;
348}
349
350enum {
351 INTEL_GVT_REQUEST_EMULATE_VBLANK = 0,
352
353
354 INTEL_GVT_REQUEST_SCHED = 1,
355
356
357 INTEL_GVT_REQUEST_EVENT_SCHED = 2,
358};
359
360static inline void intel_gvt_request_service(struct intel_gvt *gvt,
361 int service)
362{
363 set_bit(service, (void *)&gvt->service_request);
364 wake_up(&gvt->service_thread_wq);
365}
366
367void intel_gvt_free_firmware(struct intel_gvt *gvt);
368int intel_gvt_load_firmware(struct intel_gvt *gvt);
369
370
371#define MB_TO_BYTES(mb) ((mb) << 20ULL)
372#define BYTES_TO_MB(b) ((b) >> 20ULL)
373
374#define HOST_LOW_GM_SIZE MB_TO_BYTES(128)
375#define HOST_HIGH_GM_SIZE MB_TO_BYTES(384)
376#define HOST_FENCE 4
377
378
379#define gvt_aperture_sz(gvt) (gvt->dev_priv->ggtt.mappable_end)
380#define gvt_aperture_pa_base(gvt) (gvt->dev_priv->ggtt.gmadr.start)
381
382#define gvt_ggtt_gm_sz(gvt) (gvt->dev_priv->ggtt.vm.total)
383#define gvt_ggtt_sz(gvt) \
384 ((gvt->dev_priv->ggtt.vm.total >> PAGE_SHIFT) << 3)
385#define gvt_hidden_sz(gvt) (gvt_ggtt_gm_sz(gvt) - gvt_aperture_sz(gvt))
386
387#define gvt_aperture_gmadr_base(gvt) (0)
388#define gvt_aperture_gmadr_end(gvt) (gvt_aperture_gmadr_base(gvt) \
389 + gvt_aperture_sz(gvt) - 1)
390
391#define gvt_hidden_gmadr_base(gvt) (gvt_aperture_gmadr_base(gvt) \
392 + gvt_aperture_sz(gvt))
393#define gvt_hidden_gmadr_end(gvt) (gvt_hidden_gmadr_base(gvt) \
394 + gvt_hidden_sz(gvt) - 1)
395
396#define gvt_fence_sz(gvt) (gvt->dev_priv->num_fence_regs)
397
398
399#define vgpu_aperture_offset(vgpu) ((vgpu)->gm.low_gm_node.start)
400#define vgpu_hidden_offset(vgpu) ((vgpu)->gm.high_gm_node.start)
401#define vgpu_aperture_sz(vgpu) ((vgpu)->gm.aperture_sz)
402#define vgpu_hidden_sz(vgpu) ((vgpu)->gm.hidden_sz)
403
404#define vgpu_aperture_pa_base(vgpu) \
405 (gvt_aperture_pa_base(vgpu->gvt) + vgpu_aperture_offset(vgpu))
406
407#define vgpu_ggtt_gm_sz(vgpu) ((vgpu)->gm.aperture_sz + (vgpu)->gm.hidden_sz)
408
409#define vgpu_aperture_pa_end(vgpu) \
410 (vgpu_aperture_pa_base(vgpu) + vgpu_aperture_sz(vgpu) - 1)
411
412#define vgpu_aperture_gmadr_base(vgpu) (vgpu_aperture_offset(vgpu))
413#define vgpu_aperture_gmadr_end(vgpu) \
414 (vgpu_aperture_gmadr_base(vgpu) + vgpu_aperture_sz(vgpu) - 1)
415
416#define vgpu_hidden_gmadr_base(vgpu) (vgpu_hidden_offset(vgpu))
417#define vgpu_hidden_gmadr_end(vgpu) \
418 (vgpu_hidden_gmadr_base(vgpu) + vgpu_hidden_sz(vgpu) - 1)
419
420#define vgpu_fence_base(vgpu) (vgpu->fence.base)
421#define vgpu_fence_sz(vgpu) (vgpu->fence.size)
422
423struct intel_vgpu_creation_params {
424 __u64 handle;
425 __u64 low_gm_sz;
426 __u64 high_gm_sz;
427 __u64 fence_sz;
428 __u64 resolution;
429 __s32 primary;
430 __u64 vgpu_id;
431
432 __u32 weight;
433};
434
435int intel_vgpu_alloc_resource(struct intel_vgpu *vgpu,
436 struct intel_vgpu_creation_params *param);
437void intel_vgpu_reset_resource(struct intel_vgpu *vgpu);
438void intel_vgpu_free_resource(struct intel_vgpu *vgpu);
439void intel_vgpu_write_fence(struct intel_vgpu *vgpu,
440 u32 fence, u64 value);
441
442
443
444#define vgpu_vreg_t(vgpu, reg) \
445 (*(u32 *)(vgpu->mmio.vreg + i915_mmio_reg_offset(reg)))
446#define vgpu_vreg(vgpu, offset) \
447 (*(u32 *)(vgpu->mmio.vreg + (offset)))
448#define vgpu_vreg64_t(vgpu, reg) \
449 (*(u64 *)(vgpu->mmio.vreg + i915_mmio_reg_offset(reg)))
450#define vgpu_vreg64(vgpu, offset) \
451 (*(u64 *)(vgpu->mmio.vreg + (offset)))
452#define vgpu_sreg_t(vgpu, reg) \
453 (*(u32 *)(vgpu->mmio.sreg + i915_mmio_reg_offset(reg)))
454#define vgpu_sreg(vgpu, offset) \
455 (*(u32 *)(vgpu->mmio.sreg + (offset)))
456
457#define for_each_active_vgpu(gvt, vgpu, id) \
458 idr_for_each_entry((&(gvt)->vgpu_idr), (vgpu), (id)) \
459 for_each_if(vgpu->active)
460
461static inline void intel_vgpu_write_pci_bar(struct intel_vgpu *vgpu,
462 u32 offset, u32 val, bool low)
463{
464 u32 *pval;
465
466
467 offset = rounddown(offset, 4);
468 pval = (u32 *)(vgpu_cfg_space(vgpu) + offset);
469
470 if (low) {
471
472
473
474
475 *pval = (val & GENMASK(31, 4)) | (*pval & GENMASK(3, 0));
476 } else {
477 *pval = val;
478 }
479}
480
481int intel_gvt_init_vgpu_types(struct intel_gvt *gvt);
482void intel_gvt_clean_vgpu_types(struct intel_gvt *gvt);
483
484struct intel_vgpu *intel_gvt_create_idle_vgpu(struct intel_gvt *gvt);
485void intel_gvt_destroy_idle_vgpu(struct intel_vgpu *vgpu);
486struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt,
487 struct intel_vgpu_type *type);
488void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu);
489void intel_gvt_release_vgpu(struct intel_vgpu *vgpu);
490void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr,
491 unsigned int engine_mask);
492void intel_gvt_reset_vgpu(struct intel_vgpu *vgpu);
493void intel_gvt_activate_vgpu(struct intel_vgpu *vgpu);
494void intel_gvt_deactivate_vgpu(struct intel_vgpu *vgpu);
495
496
497#define vgpu_gmadr_is_aperture(vgpu, gmadr) \
498 ((gmadr >= vgpu_aperture_gmadr_base(vgpu)) && \
499 (gmadr <= vgpu_aperture_gmadr_end(vgpu)))
500
501#define vgpu_gmadr_is_hidden(vgpu, gmadr) \
502 ((gmadr >= vgpu_hidden_gmadr_base(vgpu)) && \
503 (gmadr <= vgpu_hidden_gmadr_end(vgpu)))
504
505#define vgpu_gmadr_is_valid(vgpu, gmadr) \
506 ((vgpu_gmadr_is_aperture(vgpu, gmadr) || \
507 (vgpu_gmadr_is_hidden(vgpu, gmadr))))
508
509#define gvt_gmadr_is_aperture(gvt, gmadr) \
510 ((gmadr >= gvt_aperture_gmadr_base(gvt)) && \
511 (gmadr <= gvt_aperture_gmadr_end(gvt)))
512
513#define gvt_gmadr_is_hidden(gvt, gmadr) \
514 ((gmadr >= gvt_hidden_gmadr_base(gvt)) && \
515 (gmadr <= gvt_hidden_gmadr_end(gvt)))
516
517#define gvt_gmadr_is_valid(gvt, gmadr) \
518 (gvt_gmadr_is_aperture(gvt, gmadr) || \
519 gvt_gmadr_is_hidden(gvt, gmadr))
520
521bool intel_gvt_ggtt_validate_range(struct intel_vgpu *vgpu, u64 addr, u32 size);
522int intel_gvt_ggtt_gmadr_g2h(struct intel_vgpu *vgpu, u64 g_addr, u64 *h_addr);
523int intel_gvt_ggtt_gmadr_h2g(struct intel_vgpu *vgpu, u64 h_addr, u64 *g_addr);
524int intel_gvt_ggtt_index_g2h(struct intel_vgpu *vgpu, unsigned long g_index,
525 unsigned long *h_index);
526int intel_gvt_ggtt_h2g_index(struct intel_vgpu *vgpu, unsigned long h_index,
527 unsigned long *g_index);
528
529void intel_vgpu_init_cfg_space(struct intel_vgpu *vgpu,
530 bool primary);
531void intel_vgpu_reset_cfg_space(struct intel_vgpu *vgpu);
532
533int intel_vgpu_emulate_cfg_read(struct intel_vgpu *vgpu, unsigned int offset,
534 void *p_data, unsigned int bytes);
535
536int intel_vgpu_emulate_cfg_write(struct intel_vgpu *vgpu, unsigned int offset,
537 void *p_data, unsigned int bytes);
538
539static inline u64 intel_vgpu_get_bar_gpa(struct intel_vgpu *vgpu, int bar)
540{
541
542 return (*(u64 *)(vgpu->cfg_space.virtual_cfg_space + bar)) &
543 PCI_BASE_ADDRESS_MEM_MASK;
544}
545
546void intel_vgpu_clean_opregion(struct intel_vgpu *vgpu);
547int intel_vgpu_init_opregion(struct intel_vgpu *vgpu);
548int intel_vgpu_opregion_base_write_handler(struct intel_vgpu *vgpu, u32 gpa);
549
550int intel_vgpu_emulate_opregion_request(struct intel_vgpu *vgpu, u32 swsci);
551void populate_pvinfo_page(struct intel_vgpu *vgpu);
552
553int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload);
554void enter_failsafe_mode(struct intel_vgpu *vgpu, int reason);
555
556struct intel_gvt_ops {
557 int (*emulate_cfg_read)(struct intel_vgpu *, unsigned int, void *,
558 unsigned int);
559 int (*emulate_cfg_write)(struct intel_vgpu *, unsigned int, void *,
560 unsigned int);
561 int (*emulate_mmio_read)(struct intel_vgpu *, u64, void *,
562 unsigned int);
563 int (*emulate_mmio_write)(struct intel_vgpu *, u64, void *,
564 unsigned int);
565 struct intel_vgpu *(*vgpu_create)(struct intel_gvt *,
566 struct intel_vgpu_type *);
567 void (*vgpu_destroy)(struct intel_vgpu *vgpu);
568 void (*vgpu_release)(struct intel_vgpu *vgpu);
569 void (*vgpu_reset)(struct intel_vgpu *);
570 void (*vgpu_activate)(struct intel_vgpu *);
571 void (*vgpu_deactivate)(struct intel_vgpu *);
572 struct intel_vgpu_type *(*gvt_find_vgpu_type)(struct intel_gvt *gvt,
573 const char *name);
574 bool (*get_gvt_attrs)(struct attribute ***type_attrs,
575 struct attribute_group ***intel_vgpu_type_groups);
576 int (*vgpu_query_plane)(struct intel_vgpu *vgpu, void *);
577 int (*vgpu_get_dmabuf)(struct intel_vgpu *vgpu, unsigned int);
578 int (*write_protect_handler)(struct intel_vgpu *, u64, void *,
579 unsigned int);
580};
581
582
583enum {
584 GVT_FAILSAFE_UNSUPPORTED_GUEST,
585 GVT_FAILSAFE_INSUFFICIENT_RESOURCE,
586 GVT_FAILSAFE_GUEST_ERR,
587};
588
589static inline void mmio_hw_access_pre(struct drm_i915_private *dev_priv)
590{
591 intel_runtime_pm_get(dev_priv);
592}
593
594static inline void mmio_hw_access_post(struct drm_i915_private *dev_priv)
595{
596 intel_runtime_pm_put(dev_priv);
597}
598
599
600
601
602
603
604
605static inline void intel_gvt_mmio_set_accessed(
606 struct intel_gvt *gvt, unsigned int offset)
607{
608 gvt->mmio.mmio_attribute[offset >> 2] |= F_ACCESSED;
609}
610
611
612
613
614
615
616
617static inline bool intel_gvt_mmio_is_cmd_access(
618 struct intel_gvt *gvt, unsigned int offset)
619{
620 return gvt->mmio.mmio_attribute[offset >> 2] & F_CMD_ACCESS;
621}
622
623
624
625
626
627
628
629static inline bool intel_gvt_mmio_is_unalign(
630 struct intel_gvt *gvt, unsigned int offset)
631{
632 return gvt->mmio.mmio_attribute[offset >> 2] & F_UNALIGN;
633}
634
635
636
637
638
639
640
641static inline void intel_gvt_mmio_set_cmd_accessed(
642 struct intel_gvt *gvt, unsigned int offset)
643{
644 gvt->mmio.mmio_attribute[offset >> 2] |= F_CMD_ACCESSED;
645}
646
647
648
649
650
651
652
653
654
655
656static inline bool intel_gvt_mmio_has_mode_mask(
657 struct intel_gvt *gvt, unsigned int offset)
658{
659 return gvt->mmio.mmio_attribute[offset >> 2] & F_MODE_MASK;
660}
661
662
663
664
665
666
667
668
669
670
671static inline bool intel_gvt_mmio_is_in_ctx(
672 struct intel_gvt *gvt, unsigned int offset)
673{
674 return gvt->mmio.mmio_attribute[offset >> 2] & F_IN_CTX;
675}
676
677
678
679
680
681
682
683static inline void intel_gvt_mmio_set_in_ctx(
684 struct intel_gvt *gvt, unsigned int offset)
685{
686 gvt->mmio.mmio_attribute[offset >> 2] |= F_IN_CTX;
687}
688
689int intel_gvt_debugfs_add_vgpu(struct intel_vgpu *vgpu);
690void intel_gvt_debugfs_remove_vgpu(struct intel_vgpu *vgpu);
691int intel_gvt_debugfs_init(struct intel_gvt *gvt);
692void intel_gvt_debugfs_clean(struct intel_gvt *gvt);
693
694
695#include "trace.h"
696#include "mpt.h"
697
698#endif
699