1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34#include "i915_drv.h"
35#include "gvt.h"
36#include "i915_pvinfo.h"
37
38void populate_pvinfo_page(struct intel_vgpu *vgpu)
39{
40 struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
41
42 vgpu_vreg64_t(vgpu, vgtif_reg(magic)) = VGT_MAGIC;
43 vgpu_vreg_t(vgpu, vgtif_reg(version_major)) = 1;
44 vgpu_vreg_t(vgpu, vgtif_reg(version_minor)) = 0;
45 vgpu_vreg_t(vgpu, vgtif_reg(display_ready)) = 0;
46 vgpu_vreg_t(vgpu, vgtif_reg(vgt_id)) = vgpu->id;
47
48 vgpu_vreg_t(vgpu, vgtif_reg(vgt_caps)) = VGT_CAPS_FULL_PPGTT;
49 vgpu_vreg_t(vgpu, vgtif_reg(vgt_caps)) |= VGT_CAPS_HWSP_EMULATION;
50 vgpu_vreg_t(vgpu, vgtif_reg(vgt_caps)) |= VGT_CAPS_HUGE_GTT;
51
52 vgpu_vreg_t(vgpu, vgtif_reg(avail_rs.mappable_gmadr.base)) =
53 vgpu_aperture_gmadr_base(vgpu);
54 vgpu_vreg_t(vgpu, vgtif_reg(avail_rs.mappable_gmadr.size)) =
55 vgpu_aperture_sz(vgpu);
56 vgpu_vreg_t(vgpu, vgtif_reg(avail_rs.nonmappable_gmadr.base)) =
57 vgpu_hidden_gmadr_base(vgpu);
58 vgpu_vreg_t(vgpu, vgtif_reg(avail_rs.nonmappable_gmadr.size)) =
59 vgpu_hidden_sz(vgpu);
60
61 vgpu_vreg_t(vgpu, vgtif_reg(avail_rs.fence_num)) = vgpu_fence_sz(vgpu);
62
63 vgpu_vreg_t(vgpu, vgtif_reg(cursor_x_hot)) = UINT_MAX;
64 vgpu_vreg_t(vgpu, vgtif_reg(cursor_y_hot)) = UINT_MAX;
65
66 gvt_dbg_core("Populate PVINFO PAGE for vGPU %d\n", vgpu->id);
67 gvt_dbg_core("aperture base [GMADR] 0x%llx size 0x%llx\n",
68 vgpu_aperture_gmadr_base(vgpu), vgpu_aperture_sz(vgpu));
69 gvt_dbg_core("hidden base [GMADR] 0x%llx size=0x%llx\n",
70 vgpu_hidden_gmadr_base(vgpu), vgpu_hidden_sz(vgpu));
71 gvt_dbg_core("fence size %d\n", vgpu_fence_sz(vgpu));
72
73 drm_WARN_ON(&i915->drm, sizeof(struct vgt_if) != VGT_PVINFO_SIZE);
74}
75
76#define VGPU_MAX_WEIGHT 16
77#define VGPU_WEIGHT(vgpu_num) \
78 (VGPU_MAX_WEIGHT / (vgpu_num))
79
80static struct {
81 unsigned int low_mm;
82 unsigned int high_mm;
83 unsigned int fence;
84
85
86
87
88
89 unsigned int weight;
90 enum intel_vgpu_edid edid;
91 char *name;
92} vgpu_types[] = {
93
94 { MB_TO_BYTES(64), MB_TO_BYTES(384), 4, VGPU_WEIGHT(8), GVT_EDID_1024_768, "8" },
95 { MB_TO_BYTES(128), MB_TO_BYTES(512), 4, VGPU_WEIGHT(4), GVT_EDID_1920_1200, "4" },
96 { MB_TO_BYTES(256), MB_TO_BYTES(1024), 4, VGPU_WEIGHT(2), GVT_EDID_1920_1200, "2" },
97 { MB_TO_BYTES(512), MB_TO_BYTES(2048), 4, VGPU_WEIGHT(1), GVT_EDID_1920_1200, "1" },
98};
99
100
101
102
103
104
105
106
107int intel_gvt_init_vgpu_types(struct intel_gvt *gvt)
108{
109 unsigned int num_types;
110 unsigned int i, low_avail, high_avail;
111 unsigned int min_low;
112
113
114
115
116
117
118
119
120
121
122
123
124
125 low_avail = gvt_aperture_sz(gvt) - HOST_LOW_GM_SIZE;
126 high_avail = gvt_hidden_sz(gvt) - HOST_HIGH_GM_SIZE;
127 num_types = ARRAY_SIZE(vgpu_types);
128
129 gvt->types = kcalloc(num_types, sizeof(struct intel_vgpu_type),
130 GFP_KERNEL);
131 if (!gvt->types)
132 return -ENOMEM;
133
134 min_low = MB_TO_BYTES(32);
135 for (i = 0; i < num_types; ++i) {
136 if (low_avail / vgpu_types[i].low_mm == 0)
137 break;
138
139 gvt->types[i].low_gm_size = vgpu_types[i].low_mm;
140 gvt->types[i].high_gm_size = vgpu_types[i].high_mm;
141 gvt->types[i].fence = vgpu_types[i].fence;
142
143 if (vgpu_types[i].weight < 1 ||
144 vgpu_types[i].weight > VGPU_MAX_WEIGHT)
145 return -EINVAL;
146
147 gvt->types[i].weight = vgpu_types[i].weight;
148 gvt->types[i].resolution = vgpu_types[i].edid;
149 gvt->types[i].avail_instance = min(low_avail / vgpu_types[i].low_mm,
150 high_avail / vgpu_types[i].high_mm);
151
152 if (GRAPHICS_VER(gvt->gt->i915) == 8)
153 sprintf(gvt->types[i].name, "GVTg_V4_%s",
154 vgpu_types[i].name);
155 else if (GRAPHICS_VER(gvt->gt->i915) == 9)
156 sprintf(gvt->types[i].name, "GVTg_V5_%s",
157 vgpu_types[i].name);
158
159 gvt_dbg_core("type[%d]: %s avail %u low %u high %u fence %u weight %u res %s\n",
160 i, gvt->types[i].name,
161 gvt->types[i].avail_instance,
162 gvt->types[i].low_gm_size,
163 gvt->types[i].high_gm_size, gvt->types[i].fence,
164 gvt->types[i].weight,
165 vgpu_edid_str(gvt->types[i].resolution));
166 }
167
168 gvt->num_types = i;
169 return 0;
170}
171
172void intel_gvt_clean_vgpu_types(struct intel_gvt *gvt)
173{
174 kfree(gvt->types);
175}
176
177static void intel_gvt_update_vgpu_types(struct intel_gvt *gvt)
178{
179 int i;
180 unsigned int low_gm_avail, high_gm_avail, fence_avail;
181 unsigned int low_gm_min, high_gm_min, fence_min;
182
183
184
185
186 low_gm_avail = gvt_aperture_sz(gvt) - HOST_LOW_GM_SIZE -
187 gvt->gm.vgpu_allocated_low_gm_size;
188 high_gm_avail = gvt_hidden_sz(gvt) - HOST_HIGH_GM_SIZE -
189 gvt->gm.vgpu_allocated_high_gm_size;
190 fence_avail = gvt_fence_sz(gvt) - HOST_FENCE -
191 gvt->fence.vgpu_allocated_fence_num;
192
193 for (i = 0; i < gvt->num_types; i++) {
194 low_gm_min = low_gm_avail / gvt->types[i].low_gm_size;
195 high_gm_min = high_gm_avail / gvt->types[i].high_gm_size;
196 fence_min = fence_avail / gvt->types[i].fence;
197 gvt->types[i].avail_instance = min(min(low_gm_min, high_gm_min),
198 fence_min);
199
200 gvt_dbg_core("update type[%d]: %s avail %u low %u high %u fence %u\n",
201 i, gvt->types[i].name,
202 gvt->types[i].avail_instance, gvt->types[i].low_gm_size,
203 gvt->types[i].high_gm_size, gvt->types[i].fence);
204 }
205}
206
207
208
209
210
211
212
213
214void intel_gvt_activate_vgpu(struct intel_vgpu *vgpu)
215{
216 mutex_lock(&vgpu->vgpu_lock);
217 vgpu->active = true;
218 mutex_unlock(&vgpu->vgpu_lock);
219}
220
221
222
223
224
225
226
227
228
229void intel_gvt_deactivate_vgpu(struct intel_vgpu *vgpu)
230{
231 mutex_lock(&vgpu->vgpu_lock);
232
233 vgpu->active = false;
234
235 if (atomic_read(&vgpu->submission.running_workload_num)) {
236 mutex_unlock(&vgpu->vgpu_lock);
237 intel_gvt_wait_vgpu_idle(vgpu);
238 mutex_lock(&vgpu->vgpu_lock);
239 }
240
241 intel_vgpu_stop_schedule(vgpu);
242
243 mutex_unlock(&vgpu->vgpu_lock);
244}
245
246
247
248
249
250
251
252
253
254
255void intel_gvt_release_vgpu(struct intel_vgpu *vgpu)
256{
257 intel_gvt_deactivate_vgpu(vgpu);
258
259 mutex_lock(&vgpu->vgpu_lock);
260 vgpu->d3_entered = false;
261 intel_vgpu_clean_workloads(vgpu, ALL_ENGINES);
262 intel_vgpu_dmabuf_cleanup(vgpu);
263 mutex_unlock(&vgpu->vgpu_lock);
264}
265
266
267
268
269
270
271
272
273void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu)
274{
275 struct intel_gvt *gvt = vgpu->gvt;
276 struct drm_i915_private *i915 = gvt->gt->i915;
277
278 drm_WARN(&i915->drm, vgpu->active, "vGPU is still active!\n");
279
280
281
282
283
284 mutex_lock(&gvt->lock);
285 idr_remove(&gvt->vgpu_idr, vgpu->id);
286 mutex_unlock(&gvt->lock);
287
288 mutex_lock(&vgpu->vgpu_lock);
289 intel_gvt_debugfs_remove_vgpu(vgpu);
290 intel_vgpu_clean_sched_policy(vgpu);
291 intel_vgpu_clean_submission(vgpu);
292 intel_vgpu_clean_display(vgpu);
293 intel_vgpu_clean_opregion(vgpu);
294 intel_vgpu_reset_ggtt(vgpu, true);
295 intel_vgpu_clean_gtt(vgpu);
296 intel_gvt_hypervisor_detach_vgpu(vgpu);
297 intel_vgpu_free_resource(vgpu);
298 intel_vgpu_clean_mmio(vgpu);
299 intel_vgpu_dmabuf_cleanup(vgpu);
300 mutex_unlock(&vgpu->vgpu_lock);
301
302 mutex_lock(&gvt->lock);
303 intel_gvt_update_vgpu_types(gvt);
304 mutex_unlock(&gvt->lock);
305
306 vfree(vgpu);
307}
308
309#define IDLE_VGPU_IDR 0
310
311
312
313
314
315
316
317
318
319
320struct intel_vgpu *intel_gvt_create_idle_vgpu(struct intel_gvt *gvt)
321{
322 struct intel_vgpu *vgpu;
323 enum intel_engine_id i;
324 int ret;
325
326 vgpu = vzalloc(sizeof(*vgpu));
327 if (!vgpu)
328 return ERR_PTR(-ENOMEM);
329
330 vgpu->id = IDLE_VGPU_IDR;
331 vgpu->gvt = gvt;
332 mutex_init(&vgpu->vgpu_lock);
333
334 for (i = 0; i < I915_NUM_ENGINES; i++)
335 INIT_LIST_HEAD(&vgpu->submission.workload_q_head[i]);
336
337 ret = intel_vgpu_init_sched_policy(vgpu);
338 if (ret)
339 goto out_free_vgpu;
340
341 vgpu->active = false;
342
343 return vgpu;
344
345out_free_vgpu:
346 vfree(vgpu);
347 return ERR_PTR(ret);
348}
349
350
351
352
353
354
355
356
357void intel_gvt_destroy_idle_vgpu(struct intel_vgpu *vgpu)
358{
359 mutex_lock(&vgpu->vgpu_lock);
360 intel_vgpu_clean_sched_policy(vgpu);
361 mutex_unlock(&vgpu->vgpu_lock);
362
363 vfree(vgpu);
364}
365
366static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt,
367 struct intel_vgpu_creation_params *param)
368{
369 struct drm_i915_private *dev_priv = gvt->gt->i915;
370 struct intel_vgpu *vgpu;
371 int ret;
372
373 gvt_dbg_core("handle %llu low %llu MB high %llu MB fence %llu\n",
374 param->handle, param->low_gm_sz, param->high_gm_sz,
375 param->fence_sz);
376
377 vgpu = vzalloc(sizeof(*vgpu));
378 if (!vgpu)
379 return ERR_PTR(-ENOMEM);
380
381 ret = idr_alloc(&gvt->vgpu_idr, vgpu, IDLE_VGPU_IDR + 1, GVT_MAX_VGPU,
382 GFP_KERNEL);
383 if (ret < 0)
384 goto out_free_vgpu;
385
386 vgpu->id = ret;
387 vgpu->handle = param->handle;
388 vgpu->gvt = gvt;
389 vgpu->sched_ctl.weight = param->weight;
390 mutex_init(&vgpu->vgpu_lock);
391 mutex_init(&vgpu->dmabuf_lock);
392 INIT_LIST_HEAD(&vgpu->dmabuf_obj_list_head);
393 INIT_RADIX_TREE(&vgpu->page_track_tree, GFP_KERNEL);
394 idr_init_base(&vgpu->object_idr, 1);
395 intel_vgpu_init_cfg_space(vgpu, param->primary);
396 vgpu->d3_entered = false;
397
398 ret = intel_vgpu_init_mmio(vgpu);
399 if (ret)
400 goto out_clean_idr;
401
402 ret = intel_vgpu_alloc_resource(vgpu, param);
403 if (ret)
404 goto out_clean_vgpu_mmio;
405
406 populate_pvinfo_page(vgpu);
407
408 ret = intel_gvt_hypervisor_attach_vgpu(vgpu);
409 if (ret)
410 goto out_clean_vgpu_resource;
411
412 ret = intel_vgpu_init_gtt(vgpu);
413 if (ret)
414 goto out_detach_hypervisor_vgpu;
415
416 ret = intel_vgpu_init_opregion(vgpu);
417 if (ret)
418 goto out_clean_gtt;
419
420 ret = intel_vgpu_init_display(vgpu, param->resolution);
421 if (ret)
422 goto out_clean_opregion;
423
424 ret = intel_vgpu_setup_submission(vgpu);
425 if (ret)
426 goto out_clean_display;
427
428 ret = intel_vgpu_init_sched_policy(vgpu);
429 if (ret)
430 goto out_clean_submission;
431
432 intel_gvt_debugfs_add_vgpu(vgpu);
433
434 ret = intel_gvt_hypervisor_set_opregion(vgpu);
435 if (ret)
436 goto out_clean_sched_policy;
437
438 if (IS_BROADWELL(dev_priv) || IS_BROXTON(dev_priv))
439 ret = intel_gvt_hypervisor_set_edid(vgpu, PORT_B);
440 else
441 ret = intel_gvt_hypervisor_set_edid(vgpu, PORT_D);
442 if (ret)
443 goto out_clean_sched_policy;
444
445 return vgpu;
446
447out_clean_sched_policy:
448 intel_vgpu_clean_sched_policy(vgpu);
449out_clean_submission:
450 intel_vgpu_clean_submission(vgpu);
451out_clean_display:
452 intel_vgpu_clean_display(vgpu);
453out_clean_opregion:
454 intel_vgpu_clean_opregion(vgpu);
455out_clean_gtt:
456 intel_vgpu_clean_gtt(vgpu);
457out_detach_hypervisor_vgpu:
458 intel_gvt_hypervisor_detach_vgpu(vgpu);
459out_clean_vgpu_resource:
460 intel_vgpu_free_resource(vgpu);
461out_clean_vgpu_mmio:
462 intel_vgpu_clean_mmio(vgpu);
463out_clean_idr:
464 idr_remove(&gvt->vgpu_idr, vgpu->id);
465out_free_vgpu:
466 vfree(vgpu);
467 return ERR_PTR(ret);
468}
469
470
471
472
473
474
475
476
477
478
479
480struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt,
481 struct intel_vgpu_type *type)
482{
483 struct intel_vgpu_creation_params param;
484 struct intel_vgpu *vgpu;
485
486 param.handle = 0;
487 param.primary = 1;
488 param.low_gm_sz = type->low_gm_size;
489 param.high_gm_sz = type->high_gm_size;
490 param.fence_sz = type->fence;
491 param.weight = type->weight;
492 param.resolution = type->resolution;
493
494
495 param.low_gm_sz = BYTES_TO_MB(param.low_gm_sz);
496 param.high_gm_sz = BYTES_TO_MB(param.high_gm_sz);
497
498 mutex_lock(&gvt->lock);
499 vgpu = __intel_gvt_create_vgpu(gvt, ¶m);
500 if (!IS_ERR(vgpu)) {
501
502 intel_gvt_update_vgpu_types(gvt);
503 intel_gvt_update_reg_whitelist(vgpu);
504 }
505 mutex_unlock(&gvt->lock);
506
507 return vgpu;
508}
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr,
539 intel_engine_mask_t engine_mask)
540{
541 struct intel_gvt *gvt = vgpu->gvt;
542 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
543 intel_engine_mask_t resetting_eng = dmlr ? ALL_ENGINES : engine_mask;
544
545 gvt_dbg_core("------------------------------------------\n");
546 gvt_dbg_core("resseting vgpu%d, dmlr %d, engine_mask %08x\n",
547 vgpu->id, dmlr, engine_mask);
548
549 vgpu->resetting_eng = resetting_eng;
550
551 intel_vgpu_stop_schedule(vgpu);
552
553
554
555
556 if (scheduler->current_vgpu == NULL) {
557 mutex_unlock(&vgpu->vgpu_lock);
558 intel_gvt_wait_vgpu_idle(vgpu);
559 mutex_lock(&vgpu->vgpu_lock);
560 }
561
562 intel_vgpu_reset_submission(vgpu, resetting_eng);
563
564 if (engine_mask == ALL_ENGINES || dmlr) {
565 intel_vgpu_select_submission_ops(vgpu, ALL_ENGINES, 0);
566 if (engine_mask == ALL_ENGINES)
567 intel_vgpu_invalidate_ppgtt(vgpu);
568
569 if (dmlr) {
570 if(!vgpu->d3_entered) {
571 intel_vgpu_invalidate_ppgtt(vgpu);
572 intel_vgpu_destroy_all_ppgtt_mm(vgpu);
573 }
574 intel_vgpu_reset_ggtt(vgpu, true);
575 intel_vgpu_reset_resource(vgpu);
576 }
577
578 intel_vgpu_reset_mmio(vgpu, dmlr);
579 populate_pvinfo_page(vgpu);
580
581 if (dmlr) {
582 intel_vgpu_reset_display(vgpu);
583 intel_vgpu_reset_cfg_space(vgpu);
584
585 vgpu->failsafe = false;
586
587
588
589
590 if(vgpu->d3_entered)
591 vgpu->d3_entered = false;
592 else
593 vgpu->pv_notified = false;
594 }
595 }
596
597 vgpu->resetting_eng = 0;
598 gvt_dbg_core("reset vgpu%d done\n", vgpu->id);
599 gvt_dbg_core("------------------------------------------\n");
600}
601
602
603
604
605
606
607
608
609void intel_gvt_reset_vgpu(struct intel_vgpu *vgpu)
610{
611 mutex_lock(&vgpu->vgpu_lock);
612 intel_gvt_reset_vgpu_locked(vgpu, true, 0);
613 mutex_unlock(&vgpu->vgpu_lock);
614}
615