1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36#ifndef _GVT_SCHEDULER_H_
37#define _GVT_SCHEDULER_H_
38
39#include "gt/intel_engine_types.h"
40
41#include "execlist.h"
42#include "interrupt.h"
43
44struct intel_gvt_workload_scheduler {
45 struct intel_vgpu *current_vgpu;
46 struct intel_vgpu *next_vgpu;
47 struct intel_vgpu_workload *current_workload[I915_NUM_ENGINES];
48 bool need_reschedule;
49
50 spinlock_t mmio_context_lock;
51
52 struct intel_vgpu *engine_owner[I915_NUM_ENGINES];
53
54 wait_queue_head_t workload_complete_wq;
55 struct task_struct *thread[I915_NUM_ENGINES];
56 wait_queue_head_t waitq[I915_NUM_ENGINES];
57
58 void *sched_data;
59 struct intel_gvt_sched_policy_ops *sched_ops;
60};
61
62#define INDIRECT_CTX_ADDR_MASK 0xffffffc0
63#define INDIRECT_CTX_SIZE_MASK 0x3f
64struct shadow_indirect_ctx {
65 struct drm_i915_gem_object *obj;
66 unsigned long guest_gma;
67 unsigned long shadow_gma;
68 void *shadow_va;
69 u32 size;
70};
71
72#define PER_CTX_ADDR_MASK 0xfffff000
73struct shadow_per_ctx {
74 unsigned long guest_gma;
75 unsigned long shadow_gma;
76 unsigned valid;
77};
78
79struct intel_shadow_wa_ctx {
80 struct shadow_indirect_ctx indirect_ctx;
81 struct shadow_per_ctx per_ctx;
82
83};
84
85struct intel_vgpu_workload {
86 struct intel_vgpu *vgpu;
87 const struct intel_engine_cs *engine;
88 struct i915_request *req;
89
90 bool dispatched;
91 bool shadow;
92 int status;
93
94 struct intel_vgpu_mm *shadow_mm;
95 struct list_head lri_shadow_mm;
96
97
98 int (*prepare)(struct intel_vgpu_workload *);
99 int (*complete)(struct intel_vgpu_workload *);
100 struct list_head list;
101
102 DECLARE_BITMAP(pending_events, INTEL_GVT_EVENT_MAX);
103 void *shadow_ring_buffer_va;
104
105
106 struct execlist_ctx_descriptor_format ctx_desc;
107 struct execlist_ring_context *ring_context;
108 unsigned long rb_head, rb_tail, rb_ctl, rb_start, rb_len;
109 unsigned long guest_rb_head;
110 bool restore_inhibit;
111 struct intel_vgpu_elsp_dwords elsp_dwords;
112 bool emulate_schedule_in;
113 atomic_t shadow_ctx_active;
114 wait_queue_head_t shadow_ctx_status_wq;
115 u64 ring_context_gpa;
116
117
118 struct list_head shadow_bb;
119 struct intel_shadow_wa_ctx wa_ctx;
120
121
122 u32 oactxctrl;
123 u32 flex_mmio[7];
124};
125
126struct intel_vgpu_shadow_bb {
127 struct list_head list;
128 struct drm_i915_gem_object *obj;
129 struct i915_vma *vma;
130 void *va;
131 u32 *bb_start_cmd_va;
132 unsigned long bb_offset;
133 bool ppgtt;
134};
135
136#define workload_q_head(vgpu, e) \
137 (&(vgpu)->submission.workload_q_head[(e)->id])
138
139void intel_vgpu_queue_workload(struct intel_vgpu_workload *workload);
140
141int intel_gvt_init_workload_scheduler(struct intel_gvt *gvt);
142
143void intel_gvt_clean_workload_scheduler(struct intel_gvt *gvt);
144
145void intel_gvt_wait_vgpu_idle(struct intel_vgpu *vgpu);
146
147int intel_vgpu_setup_submission(struct intel_vgpu *vgpu);
148
149void intel_vgpu_reset_submission(struct intel_vgpu *vgpu,
150 intel_engine_mask_t engine_mask);
151
152void intel_vgpu_clean_submission(struct intel_vgpu *vgpu);
153
154int intel_vgpu_select_submission_ops(struct intel_vgpu *vgpu,
155 intel_engine_mask_t engine_mask,
156 unsigned int interface);
157
158extern const struct intel_vgpu_submission_ops
159intel_vgpu_execlist_submission_ops;
160
161struct intel_vgpu_workload *
162intel_vgpu_create_workload(struct intel_vgpu *vgpu,
163 const struct intel_engine_cs *engine,
164 struct execlist_ctx_descriptor_format *desc);
165
166void intel_vgpu_destroy_workload(struct intel_vgpu_workload *workload);
167
168void intel_vgpu_clean_workloads(struct intel_vgpu *vgpu,
169 intel_engine_mask_t engine_mask);
170
171#endif
172