1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36#ifndef _GVT_SCHEDULER_H_
37#define _GVT_SCHEDULER_H_
38
39struct intel_gvt_workload_scheduler {
40 struct intel_vgpu *current_vgpu;
41 struct intel_vgpu *next_vgpu;
42 struct intel_vgpu_workload *current_workload[I915_NUM_ENGINES];
43 bool need_reschedule;
44
45 spinlock_t mmio_context_lock;
46
47 struct intel_vgpu *engine_owner[I915_NUM_ENGINES];
48
49 wait_queue_head_t workload_complete_wq;
50 struct task_struct *thread[I915_NUM_ENGINES];
51 wait_queue_head_t waitq[I915_NUM_ENGINES];
52
53 void *sched_data;
54 struct intel_gvt_sched_policy_ops *sched_ops;
55};
56
57#define INDIRECT_CTX_ADDR_MASK 0xffffffc0
58#define INDIRECT_CTX_SIZE_MASK 0x3f
59struct shadow_indirect_ctx {
60 struct drm_i915_gem_object *obj;
61 unsigned long guest_gma;
62 unsigned long shadow_gma;
63 void *shadow_va;
64 uint32_t size;
65};
66
67#define PER_CTX_ADDR_MASK 0xfffff000
68struct shadow_per_ctx {
69 unsigned long guest_gma;
70 unsigned long shadow_gma;
71 unsigned valid;
72};
73
74struct intel_shadow_wa_ctx {
75 struct shadow_indirect_ctx indirect_ctx;
76 struct shadow_per_ctx per_ctx;
77
78};
79
80struct intel_vgpu_workload {
81 struct intel_vgpu *vgpu;
82 int ring_id;
83 struct i915_request *req;
84
85 bool dispatched;
86 int status;
87
88 struct intel_vgpu_mm *shadow_mm;
89
90
91 int (*prepare)(struct intel_vgpu_workload *);
92 int (*complete)(struct intel_vgpu_workload *);
93 struct list_head list;
94
95 DECLARE_BITMAP(pending_events, INTEL_GVT_EVENT_MAX);
96 void *shadow_ring_buffer_va;
97
98
99 struct execlist_ctx_descriptor_format ctx_desc;
100 struct execlist_ring_context *ring_context;
101 unsigned long rb_head, rb_tail, rb_ctl, rb_start, rb_len;
102 bool restore_inhibit;
103 struct intel_vgpu_elsp_dwords elsp_dwords;
104 bool emulate_schedule_in;
105 atomic_t shadow_ctx_active;
106 wait_queue_head_t shadow_ctx_status_wq;
107 u64 ring_context_gpa;
108
109
110 struct list_head shadow_bb;
111 struct intel_shadow_wa_ctx wa_ctx;
112
113
114 u32 oactxctrl;
115 u32 flex_mmio[7];
116};
117
118struct intel_vgpu_shadow_bb {
119 struct list_head list;
120 struct drm_i915_gem_object *obj;
121 struct i915_vma *vma;
122 void *va;
123 u32 *bb_start_cmd_va;
124 unsigned int clflush;
125 bool accessing;
126 unsigned long bb_offset;
127 bool ppgtt;
128};
129
130#define workload_q_head(vgpu, ring_id) \
131 (&(vgpu->submission.workload_q_head[ring_id]))
132
133void intel_vgpu_queue_workload(struct intel_vgpu_workload *workload);
134
135int intel_gvt_init_workload_scheduler(struct intel_gvt *gvt);
136
137void intel_gvt_clean_workload_scheduler(struct intel_gvt *gvt);
138
139void intel_gvt_wait_vgpu_idle(struct intel_vgpu *vgpu);
140
141int intel_vgpu_setup_submission(struct intel_vgpu *vgpu);
142
143void intel_vgpu_reset_submission(struct intel_vgpu *vgpu,
144 unsigned long engine_mask);
145
146void intel_vgpu_clean_submission(struct intel_vgpu *vgpu);
147
148int intel_vgpu_select_submission_ops(struct intel_vgpu *vgpu,
149 unsigned long engine_mask,
150 unsigned int interface);
151
152extern const struct intel_vgpu_submission_ops
153intel_vgpu_execlist_submission_ops;
154
155struct intel_vgpu_workload *
156intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id,
157 struct execlist_ctx_descriptor_format *desc);
158
159void intel_vgpu_destroy_workload(struct intel_vgpu_workload *workload);
160
161void intel_vgpu_clean_workloads(struct intel_vgpu *vgpu,
162 unsigned long engine_mask);
163
164#endif
165