1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34#include "i915_drv.h"
35#include "gvt.h"
36
37static bool vgpu_has_pending_workload(struct intel_vgpu *vgpu)
38{
39 enum intel_engine_id i;
40 struct intel_engine_cs *engine;
41
42 for_each_engine(engine, vgpu->gvt->dev_priv, i) {
43 if (!list_empty(workload_q_head(vgpu, i)))
44 return true;
45 }
46
47 return false;
48}
49
50struct vgpu_sched_data {
51 struct list_head lru_list;
52 struct intel_vgpu *vgpu;
53
54 ktime_t sched_in_time;
55 ktime_t sched_out_time;
56 ktime_t sched_time;
57 ktime_t left_ts;
58 ktime_t allocated_ts;
59
60 struct vgpu_sched_ctl sched_ctl;
61};
62
63struct gvt_sched_data {
64 struct intel_gvt *gvt;
65 struct hrtimer timer;
66 unsigned long period;
67 struct list_head lru_runq_head;
68};
69
70static void vgpu_update_timeslice(struct intel_vgpu *pre_vgpu)
71{
72 ktime_t delta_ts;
73 struct vgpu_sched_data *vgpu_data = pre_vgpu->sched_data;
74
75 delta_ts = vgpu_data->sched_out_time - vgpu_data->sched_in_time;
76
77 vgpu_data->sched_time += delta_ts;
78 vgpu_data->left_ts -= delta_ts;
79}
80
81#define GVT_TS_BALANCE_PERIOD_MS 100
82#define GVT_TS_BALANCE_STAGE_NUM 10
83
84static void gvt_balance_timeslice(struct gvt_sched_data *sched_data)
85{
86 struct vgpu_sched_data *vgpu_data;
87 struct list_head *pos;
88 static uint64_t stage_check;
89 int stage = stage_check++ % GVT_TS_BALANCE_STAGE_NUM;
90
91
92
93
94 if (stage == 0) {
95 int total_weight = 0;
96 ktime_t fair_timeslice;
97
98 list_for_each(pos, &sched_data->lru_runq_head) {
99 vgpu_data = container_of(pos, struct vgpu_sched_data, lru_list);
100 total_weight += vgpu_data->sched_ctl.weight;
101 }
102
103 list_for_each(pos, &sched_data->lru_runq_head) {
104 vgpu_data = container_of(pos, struct vgpu_sched_data, lru_list);
105 fair_timeslice = ms_to_ktime(GVT_TS_BALANCE_PERIOD_MS) *
106 vgpu_data->sched_ctl.weight /
107 total_weight;
108
109 vgpu_data->allocated_ts = fair_timeslice;
110 vgpu_data->left_ts = vgpu_data->allocated_ts;
111 }
112 } else {
113 list_for_each(pos, &sched_data->lru_runq_head) {
114 vgpu_data = container_of(pos, struct vgpu_sched_data, lru_list);
115
116
117
118
119 vgpu_data->left_ts += vgpu_data->allocated_ts;
120 }
121 }
122}
123
124static void try_to_schedule_next_vgpu(struct intel_gvt *gvt)
125{
126 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
127 enum intel_engine_id i;
128 struct intel_engine_cs *engine;
129 struct vgpu_sched_data *vgpu_data;
130 ktime_t cur_time;
131
132
133
134
135 if (scheduler->next_vgpu == scheduler->current_vgpu) {
136 scheduler->next_vgpu = NULL;
137 return;
138 }
139
140
141
142
143
144 scheduler->need_reschedule = true;
145
146
147 for_each_engine(engine, gvt->dev_priv, i) {
148 if (scheduler->current_workload[i])
149 return;
150 }
151
152 cur_time = ktime_get();
153 if (scheduler->current_vgpu) {
154 vgpu_data = scheduler->current_vgpu->sched_data;
155 vgpu_data->sched_out_time = cur_time;
156 vgpu_update_timeslice(scheduler->current_vgpu);
157 }
158 vgpu_data = scheduler->next_vgpu->sched_data;
159 vgpu_data->sched_in_time = cur_time;
160
161
162 scheduler->current_vgpu = scheduler->next_vgpu;
163 scheduler->next_vgpu = NULL;
164
165 scheduler->need_reschedule = false;
166
167
168 for_each_engine(engine, gvt->dev_priv, i)
169 wake_up(&scheduler->waitq[i]);
170}
171
172static struct intel_vgpu *find_busy_vgpu(struct gvt_sched_data *sched_data)
173{
174 struct vgpu_sched_data *vgpu_data;
175 struct intel_vgpu *vgpu = NULL;
176 struct list_head *head = &sched_data->lru_runq_head;
177 struct list_head *pos;
178
179
180 list_for_each(pos, head) {
181
182 vgpu_data = container_of(pos, struct vgpu_sched_data, lru_list);
183 if (!vgpu_has_pending_workload(vgpu_data->vgpu))
184 continue;
185
186
187 if (vgpu_data->left_ts > 0) {
188 vgpu = vgpu_data->vgpu;
189 break;
190 }
191 }
192
193 return vgpu;
194}
195
196
197#define GVT_DEFAULT_TIME_SLICE 1000000
198
199static void tbs_sched_func(struct gvt_sched_data *sched_data)
200{
201 struct intel_gvt *gvt = sched_data->gvt;
202 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
203 struct vgpu_sched_data *vgpu_data;
204 struct intel_vgpu *vgpu = NULL;
205
206 if (list_empty(&sched_data->lru_runq_head) || scheduler->next_vgpu)
207 goto out;
208
209 vgpu = find_busy_vgpu(sched_data);
210 if (vgpu) {
211 scheduler->next_vgpu = vgpu;
212
213
214 vgpu_data = vgpu->sched_data;
215 list_del_init(&vgpu_data->lru_list);
216 list_add_tail(&vgpu_data->lru_list,
217 &sched_data->lru_runq_head);
218 } else {
219 scheduler->next_vgpu = gvt->idle_vgpu;
220 }
221out:
222 if (scheduler->next_vgpu)
223 try_to_schedule_next_vgpu(gvt);
224}
225
226void intel_gvt_schedule(struct intel_gvt *gvt)
227{
228 struct gvt_sched_data *sched_data = gvt->scheduler.sched_data;
229 static uint64_t timer_check;
230
231 mutex_lock(&gvt->lock);
232
233 if (test_and_clear_bit(INTEL_GVT_REQUEST_SCHED,
234 (void *)&gvt->service_request)) {
235 if (!(timer_check++ % GVT_TS_BALANCE_PERIOD_MS))
236 gvt_balance_timeslice(sched_data);
237 }
238 clear_bit(INTEL_GVT_REQUEST_EVENT_SCHED, (void *)&gvt->service_request);
239
240 tbs_sched_func(sched_data);
241
242 mutex_unlock(&gvt->lock);
243}
244
245static enum hrtimer_restart tbs_timer_fn(struct hrtimer *timer_data)
246{
247 struct gvt_sched_data *data;
248
249 data = container_of(timer_data, struct gvt_sched_data, timer);
250
251 intel_gvt_request_service(data->gvt, INTEL_GVT_REQUEST_SCHED);
252
253 hrtimer_add_expires_ns(&data->timer, data->period);
254
255 return HRTIMER_RESTART;
256}
257
258static int tbs_sched_init(struct intel_gvt *gvt)
259{
260 struct intel_gvt_workload_scheduler *scheduler =
261 &gvt->scheduler;
262
263 struct gvt_sched_data *data;
264
265 data = kzalloc(sizeof(*data), GFP_KERNEL);
266 if (!data)
267 return -ENOMEM;
268
269 INIT_LIST_HEAD(&data->lru_runq_head);
270 hrtimer_init(&data->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
271 data->timer.function = tbs_timer_fn;
272 data->period = GVT_DEFAULT_TIME_SLICE;
273 data->gvt = gvt;
274
275 scheduler->sched_data = data;
276
277 return 0;
278}
279
280static void tbs_sched_clean(struct intel_gvt *gvt)
281{
282 struct intel_gvt_workload_scheduler *scheduler =
283 &gvt->scheduler;
284 struct gvt_sched_data *data = scheduler->sched_data;
285
286 hrtimer_cancel(&data->timer);
287
288 kfree(data);
289 scheduler->sched_data = NULL;
290}
291
292static int tbs_sched_init_vgpu(struct intel_vgpu *vgpu)
293{
294 struct vgpu_sched_data *data;
295
296 data = kzalloc(sizeof(*data), GFP_KERNEL);
297 if (!data)
298 return -ENOMEM;
299
300 data->sched_ctl.weight = vgpu->sched_ctl.weight;
301 data->vgpu = vgpu;
302 INIT_LIST_HEAD(&data->lru_list);
303
304 vgpu->sched_data = data;
305
306 return 0;
307}
308
309static void tbs_sched_clean_vgpu(struct intel_vgpu *vgpu)
310{
311 kfree(vgpu->sched_data);
312 vgpu->sched_data = NULL;
313}
314
315static void tbs_sched_start_schedule(struct intel_vgpu *vgpu)
316{
317 struct gvt_sched_data *sched_data = vgpu->gvt->scheduler.sched_data;
318 struct vgpu_sched_data *vgpu_data = vgpu->sched_data;
319
320 if (!list_empty(&vgpu_data->lru_list))
321 return;
322
323 list_add_tail(&vgpu_data->lru_list, &sched_data->lru_runq_head);
324
325 if (!hrtimer_active(&sched_data->timer))
326 hrtimer_start(&sched_data->timer, ktime_add_ns(ktime_get(),
327 sched_data->period), HRTIMER_MODE_ABS);
328}
329
330static void tbs_sched_stop_schedule(struct intel_vgpu *vgpu)
331{
332 struct vgpu_sched_data *vgpu_data = vgpu->sched_data;
333
334 list_del_init(&vgpu_data->lru_list);
335}
336
337static struct intel_gvt_sched_policy_ops tbs_schedule_ops = {
338 .init = tbs_sched_init,
339 .clean = tbs_sched_clean,
340 .init_vgpu = tbs_sched_init_vgpu,
341 .clean_vgpu = tbs_sched_clean_vgpu,
342 .start_schedule = tbs_sched_start_schedule,
343 .stop_schedule = tbs_sched_stop_schedule,
344};
345
346int intel_gvt_init_sched_policy(struct intel_gvt *gvt)
347{
348 gvt->scheduler.sched_ops = &tbs_schedule_ops;
349
350 return gvt->scheduler.sched_ops->init(gvt);
351}
352
353void intel_gvt_clean_sched_policy(struct intel_gvt *gvt)
354{
355 gvt->scheduler.sched_ops->clean(gvt);
356}
357
358int intel_vgpu_init_sched_policy(struct intel_vgpu *vgpu)
359{
360 return vgpu->gvt->scheduler.sched_ops->init_vgpu(vgpu);
361}
362
363void intel_vgpu_clean_sched_policy(struct intel_vgpu *vgpu)
364{
365 vgpu->gvt->scheduler.sched_ops->clean_vgpu(vgpu);
366}
367
368void intel_vgpu_start_schedule(struct intel_vgpu *vgpu)
369{
370 gvt_dbg_core("vgpu%d: start schedule\n", vgpu->id);
371
372 vgpu->gvt->scheduler.sched_ops->start_schedule(vgpu);
373}
374
375void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu)
376{
377 struct intel_gvt_workload_scheduler *scheduler =
378 &vgpu->gvt->scheduler;
379 int ring_id;
380
381 gvt_dbg_core("vgpu%d: stop schedule\n", vgpu->id);
382
383 scheduler->sched_ops->stop_schedule(vgpu);
384
385 if (scheduler->next_vgpu == vgpu)
386 scheduler->next_vgpu = NULL;
387
388 if (scheduler->current_vgpu == vgpu) {
389
390 scheduler->need_reschedule = true;
391 scheduler->current_vgpu = NULL;
392 }
393
394 spin_lock_bh(&scheduler->mmio_context_lock);
395 for (ring_id = 0; ring_id < I915_NUM_ENGINES; ring_id++) {
396 if (scheduler->engine_owner[ring_id] == vgpu) {
397 intel_gvt_switch_mmio(vgpu, NULL, ring_id);
398 scheduler->engine_owner[ring_id] = NULL;
399 }
400 }
401 spin_unlock_bh(&scheduler->mmio_context_lock);
402}
403