1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36#ifndef _GVT_SCHEDULER_H_
37#define _GVT_SCHEDULER_H_
38
39struct intel_gvt_workload_scheduler {
40 struct intel_vgpu *current_vgpu;
41 struct intel_vgpu *next_vgpu;
42 struct intel_vgpu_workload *current_workload[I915_NUM_ENGINES];
43 bool need_reschedule;
44
45 spinlock_t mmio_context_lock;
46
47 struct intel_vgpu *engine_owner[I915_NUM_ENGINES];
48
49 wait_queue_head_t workload_complete_wq;
50 struct task_struct *thread[I915_NUM_ENGINES];
51 wait_queue_head_t waitq[I915_NUM_ENGINES];
52
53 void *sched_data;
54 struct intel_gvt_sched_policy_ops *sched_ops;
55};
56
57#define INDIRECT_CTX_ADDR_MASK 0xffffffc0
58#define INDIRECT_CTX_SIZE_MASK 0x3f
59struct shadow_indirect_ctx {
60 struct drm_i915_gem_object *obj;
61 unsigned long guest_gma;
62 unsigned long shadow_gma;
63 void *shadow_va;
64 u32 size;
65};
66
67#define PER_CTX_ADDR_MASK 0xfffff000
68struct shadow_per_ctx {
69 unsigned long guest_gma;
70 unsigned long shadow_gma;
71 unsigned valid;
72};
73
74struct intel_shadow_wa_ctx {
75 struct shadow_indirect_ctx indirect_ctx;
76 struct shadow_per_ctx per_ctx;
77
78};
79
80struct intel_vgpu_workload {
81 struct intel_vgpu *vgpu;
82 const struct intel_engine_cs *engine;
83 struct i915_request *req;
84
85 bool dispatched;
86 bool shadow;
87 int status;
88
89 struct intel_vgpu_mm *shadow_mm;
90 struct list_head lri_shadow_mm;
91
92
93 int (*prepare)(struct intel_vgpu_workload *);
94 int (*complete)(struct intel_vgpu_workload *);
95 struct list_head list;
96
97 DECLARE_BITMAP(pending_events, INTEL_GVT_EVENT_MAX);
98 void *shadow_ring_buffer_va;
99
100
101 struct execlist_ctx_descriptor_format ctx_desc;
102 struct execlist_ring_context *ring_context;
103 unsigned long rb_head, rb_tail, rb_ctl, rb_start, rb_len;
104 unsigned long guest_rb_head;
105 bool restore_inhibit;
106 struct intel_vgpu_elsp_dwords elsp_dwords;
107 bool emulate_schedule_in;
108 atomic_t shadow_ctx_active;
109 wait_queue_head_t shadow_ctx_status_wq;
110 u64 ring_context_gpa;
111
112
113 struct list_head shadow_bb;
114 struct intel_shadow_wa_ctx wa_ctx;
115
116
117 u32 oactxctrl;
118 u32 flex_mmio[7];
119};
120
121struct intel_vgpu_shadow_bb {
122 struct list_head list;
123 struct drm_i915_gem_object *obj;
124 struct i915_vma *vma;
125 void *va;
126 u32 *bb_start_cmd_va;
127 unsigned long bb_offset;
128 bool ppgtt;
129};
130
131#define workload_q_head(vgpu, e) \
132 (&(vgpu)->submission.workload_q_head[(e)->id])
133
134void intel_vgpu_queue_workload(struct intel_vgpu_workload *workload);
135
136int intel_gvt_init_workload_scheduler(struct intel_gvt *gvt);
137
138void intel_gvt_clean_workload_scheduler(struct intel_gvt *gvt);
139
140void intel_gvt_wait_vgpu_idle(struct intel_vgpu *vgpu);
141
142int intel_vgpu_setup_submission(struct intel_vgpu *vgpu);
143
144void intel_vgpu_reset_submission(struct intel_vgpu *vgpu,
145 intel_engine_mask_t engine_mask);
146
147void intel_vgpu_clean_submission(struct intel_vgpu *vgpu);
148
149int intel_vgpu_select_submission_ops(struct intel_vgpu *vgpu,
150 intel_engine_mask_t engine_mask,
151 unsigned int interface);
152
153extern const struct intel_vgpu_submission_ops
154intel_vgpu_execlist_submission_ops;
155
156struct intel_vgpu_workload *
157intel_vgpu_create_workload(struct intel_vgpu *vgpu,
158 const struct intel_engine_cs *engine,
159 struct execlist_ctx_descriptor_format *desc);
160
161void intel_vgpu_destroy_workload(struct intel_vgpu_workload *workload);
162
163void intel_vgpu_clean_workloads(struct intel_vgpu *vgpu,
164 intel_engine_mask_t engine_mask);
165
166#endif
167