1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34#include "i915_drv.h"
35#include "gvt.h"
36#include "i915_pvinfo.h"
37
38void populate_pvinfo_page(struct intel_vgpu *vgpu)
39{
40 struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
41
42 vgpu_vreg64_t(vgpu, vgtif_reg(magic)) = VGT_MAGIC;
43 vgpu_vreg_t(vgpu, vgtif_reg(version_major)) = 1;
44 vgpu_vreg_t(vgpu, vgtif_reg(version_minor)) = 0;
45 vgpu_vreg_t(vgpu, vgtif_reg(display_ready)) = 0;
46 vgpu_vreg_t(vgpu, vgtif_reg(vgt_id)) = vgpu->id;
47
48 vgpu_vreg_t(vgpu, vgtif_reg(vgt_caps)) = VGT_CAPS_FULL_PPGTT;
49 vgpu_vreg_t(vgpu, vgtif_reg(vgt_caps)) |= VGT_CAPS_HWSP_EMULATION;
50 vgpu_vreg_t(vgpu, vgtif_reg(vgt_caps)) |= VGT_CAPS_HUGE_GTT;
51
52 vgpu_vreg_t(vgpu, vgtif_reg(avail_rs.mappable_gmadr.base)) =
53 vgpu_aperture_gmadr_base(vgpu);
54 vgpu_vreg_t(vgpu, vgtif_reg(avail_rs.mappable_gmadr.size)) =
55 vgpu_aperture_sz(vgpu);
56 vgpu_vreg_t(vgpu, vgtif_reg(avail_rs.nonmappable_gmadr.base)) =
57 vgpu_hidden_gmadr_base(vgpu);
58 vgpu_vreg_t(vgpu, vgtif_reg(avail_rs.nonmappable_gmadr.size)) =
59 vgpu_hidden_sz(vgpu);
60
61 vgpu_vreg_t(vgpu, vgtif_reg(avail_rs.fence_num)) = vgpu_fence_sz(vgpu);
62
63 vgpu_vreg_t(vgpu, vgtif_reg(cursor_x_hot)) = UINT_MAX;
64 vgpu_vreg_t(vgpu, vgtif_reg(cursor_y_hot)) = UINT_MAX;
65
66 gvt_dbg_core("Populate PVINFO PAGE for vGPU %d\n", vgpu->id);
67 gvt_dbg_core("aperture base [GMADR] 0x%llx size 0x%llx\n",
68 vgpu_aperture_gmadr_base(vgpu), vgpu_aperture_sz(vgpu));
69 gvt_dbg_core("hidden base [GMADR] 0x%llx size=0x%llx\n",
70 vgpu_hidden_gmadr_base(vgpu), vgpu_hidden_sz(vgpu));
71 gvt_dbg_core("fence size %d\n", vgpu_fence_sz(vgpu));
72
73 drm_WARN_ON(&i915->drm, sizeof(struct vgt_if) != VGT_PVINFO_SIZE);
74}
75
76#define VGPU_MAX_WEIGHT 16
77#define VGPU_WEIGHT(vgpu_num) \
78 (VGPU_MAX_WEIGHT / (vgpu_num))
79
80static struct {
81 unsigned int low_mm;
82 unsigned int high_mm;
83 unsigned int fence;
84
85
86
87
88
89 unsigned int weight;
90 enum intel_vgpu_edid edid;
91 char *name;
92} vgpu_types[] = {
93
94 { MB_TO_BYTES(64), MB_TO_BYTES(384), 4, VGPU_WEIGHT(8), GVT_EDID_1024_768, "8" },
95 { MB_TO_BYTES(128), MB_TO_BYTES(512), 4, VGPU_WEIGHT(4), GVT_EDID_1920_1200, "4" },
96 { MB_TO_BYTES(256), MB_TO_BYTES(1024), 4, VGPU_WEIGHT(2), GVT_EDID_1920_1200, "2" },
97 { MB_TO_BYTES(512), MB_TO_BYTES(2048), 4, VGPU_WEIGHT(1), GVT_EDID_1920_1200, "1" },
98};
99
100
101
102
103
104
105
106
107int intel_gvt_init_vgpu_types(struct intel_gvt *gvt)
108{
109 unsigned int num_types;
110 unsigned int i, low_avail, high_avail;
111 unsigned int min_low;
112
113
114
115
116
117
118
119
120
121
122
123
124
125 low_avail = gvt_aperture_sz(gvt) - HOST_LOW_GM_SIZE;
126 high_avail = gvt_hidden_sz(gvt) - HOST_HIGH_GM_SIZE;
127 num_types = ARRAY_SIZE(vgpu_types);
128
129 gvt->types = kcalloc(num_types, sizeof(struct intel_vgpu_type),
130 GFP_KERNEL);
131 if (!gvt->types)
132 return -ENOMEM;
133
134 min_low = MB_TO_BYTES(32);
135 for (i = 0; i < num_types; ++i) {
136 if (low_avail / vgpu_types[i].low_mm == 0)
137 break;
138
139 gvt->types[i].low_gm_size = vgpu_types[i].low_mm;
140 gvt->types[i].high_gm_size = vgpu_types[i].high_mm;
141 gvt->types[i].fence = vgpu_types[i].fence;
142
143 if (vgpu_types[i].weight < 1 ||
144 vgpu_types[i].weight > VGPU_MAX_WEIGHT)
145 return -EINVAL;
146
147 gvt->types[i].weight = vgpu_types[i].weight;
148 gvt->types[i].resolution = vgpu_types[i].edid;
149 gvt->types[i].avail_instance = min(low_avail / vgpu_types[i].low_mm,
150 high_avail / vgpu_types[i].high_mm);
151
152 if (IS_GEN(gvt->gt->i915, 8))
153 sprintf(gvt->types[i].name, "GVTg_V4_%s",
154 vgpu_types[i].name);
155 else if (IS_GEN(gvt->gt->i915, 9))
156 sprintf(gvt->types[i].name, "GVTg_V5_%s",
157 vgpu_types[i].name);
158
159 gvt_dbg_core("type[%d]: %s avail %u low %u high %u fence %u weight %u res %s\n",
160 i, gvt->types[i].name,
161 gvt->types[i].avail_instance,
162 gvt->types[i].low_gm_size,
163 gvt->types[i].high_gm_size, gvt->types[i].fence,
164 gvt->types[i].weight,
165 vgpu_edid_str(gvt->types[i].resolution));
166 }
167
168 gvt->num_types = i;
169 return 0;
170}
171
172void intel_gvt_clean_vgpu_types(struct intel_gvt *gvt)
173{
174 kfree(gvt->types);
175}
176
177static void intel_gvt_update_vgpu_types(struct intel_gvt *gvt)
178{
179 int i;
180 unsigned int low_gm_avail, high_gm_avail, fence_avail;
181 unsigned int low_gm_min, high_gm_min, fence_min;
182
183
184
185
186 low_gm_avail = gvt_aperture_sz(gvt) - HOST_LOW_GM_SIZE -
187 gvt->gm.vgpu_allocated_low_gm_size;
188 high_gm_avail = gvt_hidden_sz(gvt) - HOST_HIGH_GM_SIZE -
189 gvt->gm.vgpu_allocated_high_gm_size;
190 fence_avail = gvt_fence_sz(gvt) - HOST_FENCE -
191 gvt->fence.vgpu_allocated_fence_num;
192
193 for (i = 0; i < gvt->num_types; i++) {
194 low_gm_min = low_gm_avail / gvt->types[i].low_gm_size;
195 high_gm_min = high_gm_avail / gvt->types[i].high_gm_size;
196 fence_min = fence_avail / gvt->types[i].fence;
197 gvt->types[i].avail_instance = min(min(low_gm_min, high_gm_min),
198 fence_min);
199
200 gvt_dbg_core("update type[%d]: %s avail %u low %u high %u fence %u\n",
201 i, gvt->types[i].name,
202 gvt->types[i].avail_instance, gvt->types[i].low_gm_size,
203 gvt->types[i].high_gm_size, gvt->types[i].fence);
204 }
205}
206
207
208
209
210
211
212
213
214void intel_gvt_activate_vgpu(struct intel_vgpu *vgpu)
215{
216 mutex_lock(&vgpu->vgpu_lock);
217 vgpu->active = true;
218 mutex_unlock(&vgpu->vgpu_lock);
219}
220
221
222
223
224
225
226
227
228
229void intel_gvt_deactivate_vgpu(struct intel_vgpu *vgpu)
230{
231 mutex_lock(&vgpu->vgpu_lock);
232
233 vgpu->active = false;
234
235 if (atomic_read(&vgpu->submission.running_workload_num)) {
236 mutex_unlock(&vgpu->vgpu_lock);
237 intel_gvt_wait_vgpu_idle(vgpu);
238 mutex_lock(&vgpu->vgpu_lock);
239 }
240
241 intel_vgpu_stop_schedule(vgpu);
242
243 mutex_unlock(&vgpu->vgpu_lock);
244}
245
246
247
248
249
250
251
252
253
254
255void intel_gvt_release_vgpu(struct intel_vgpu *vgpu)
256{
257 intel_gvt_deactivate_vgpu(vgpu);
258
259 mutex_lock(&vgpu->vgpu_lock);
260 vgpu->d3_entered = false;
261 intel_vgpu_clean_workloads(vgpu, ALL_ENGINES);
262 intel_vgpu_dmabuf_cleanup(vgpu);
263 mutex_unlock(&vgpu->vgpu_lock);
264}
265
266
267
268
269
270
271
272
273void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu)
274{
275 struct intel_gvt *gvt = vgpu->gvt;
276 struct drm_i915_private *i915 = gvt->gt->i915;
277
278 drm_WARN(&i915->drm, vgpu->active, "vGPU is still active!\n");
279
280
281
282
283
284 mutex_lock(&gvt->lock);
285 idr_remove(&gvt->vgpu_idr, vgpu->id);
286 mutex_unlock(&gvt->lock);
287
288 mutex_lock(&vgpu->vgpu_lock);
289 intel_gvt_debugfs_remove_vgpu(vgpu);
290 intel_vgpu_clean_sched_policy(vgpu);
291 intel_vgpu_clean_submission(vgpu);
292 intel_vgpu_clean_display(vgpu);
293 intel_vgpu_clean_opregion(vgpu);
294 intel_vgpu_reset_ggtt(vgpu, true);
295 intel_vgpu_clean_gtt(vgpu);
296 intel_gvt_hypervisor_detach_vgpu(vgpu);
297 intel_vgpu_free_resource(vgpu);
298 intel_vgpu_clean_mmio(vgpu);
299 intel_vgpu_dmabuf_cleanup(vgpu);
300 mutex_unlock(&vgpu->vgpu_lock);
301
302 mutex_lock(&gvt->lock);
303 if (idr_is_empty(&gvt->vgpu_idr))
304 intel_gvt_clean_irq(gvt);
305 intel_gvt_update_vgpu_types(gvt);
306 mutex_unlock(&gvt->lock);
307
308 vfree(vgpu);
309}
310
311#define IDLE_VGPU_IDR 0
312
313
314
315
316
317
318
319
320
321
322struct intel_vgpu *intel_gvt_create_idle_vgpu(struct intel_gvt *gvt)
323{
324 struct intel_vgpu *vgpu;
325 enum intel_engine_id i;
326 int ret;
327
328 vgpu = vzalloc(sizeof(*vgpu));
329 if (!vgpu)
330 return ERR_PTR(-ENOMEM);
331
332 vgpu->id = IDLE_VGPU_IDR;
333 vgpu->gvt = gvt;
334 mutex_init(&vgpu->vgpu_lock);
335
336 for (i = 0; i < I915_NUM_ENGINES; i++)
337 INIT_LIST_HEAD(&vgpu->submission.workload_q_head[i]);
338
339 ret = intel_vgpu_init_sched_policy(vgpu);
340 if (ret)
341 goto out_free_vgpu;
342
343 vgpu->active = false;
344
345 return vgpu;
346
347out_free_vgpu:
348 vfree(vgpu);
349 return ERR_PTR(ret);
350}
351
352
353
354
355
356
357
358
359void intel_gvt_destroy_idle_vgpu(struct intel_vgpu *vgpu)
360{
361 mutex_lock(&vgpu->vgpu_lock);
362 intel_vgpu_clean_sched_policy(vgpu);
363 mutex_unlock(&vgpu->vgpu_lock);
364
365 vfree(vgpu);
366}
367
368static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt,
369 struct intel_vgpu_creation_params *param)
370{
371 struct drm_i915_private *dev_priv = gvt->gt->i915;
372 struct intel_vgpu *vgpu;
373 int ret;
374
375 gvt_dbg_core("handle %llu low %llu MB high %llu MB fence %llu\n",
376 param->handle, param->low_gm_sz, param->high_gm_sz,
377 param->fence_sz);
378
379 vgpu = vzalloc(sizeof(*vgpu));
380 if (!vgpu)
381 return ERR_PTR(-ENOMEM);
382
383 ret = idr_alloc(&gvt->vgpu_idr, vgpu, IDLE_VGPU_IDR + 1, GVT_MAX_VGPU,
384 GFP_KERNEL);
385 if (ret < 0)
386 goto out_free_vgpu;
387
388 vgpu->id = ret;
389 vgpu->handle = param->handle;
390 vgpu->gvt = gvt;
391 vgpu->sched_ctl.weight = param->weight;
392 mutex_init(&vgpu->vgpu_lock);
393 mutex_init(&vgpu->dmabuf_lock);
394 INIT_LIST_HEAD(&vgpu->dmabuf_obj_list_head);
395 INIT_RADIX_TREE(&vgpu->page_track_tree, GFP_KERNEL);
396 idr_init_base(&vgpu->object_idr, 1);
397 intel_vgpu_init_cfg_space(vgpu, param->primary);
398 vgpu->d3_entered = false;
399
400 ret = intel_vgpu_init_mmio(vgpu);
401 if (ret)
402 goto out_clean_idr;
403
404 ret = intel_vgpu_alloc_resource(vgpu, param);
405 if (ret)
406 goto out_clean_vgpu_mmio;
407
408 populate_pvinfo_page(vgpu);
409
410 ret = intel_gvt_hypervisor_attach_vgpu(vgpu);
411 if (ret)
412 goto out_clean_vgpu_resource;
413
414 ret = intel_vgpu_init_gtt(vgpu);
415 if (ret)
416 goto out_detach_hypervisor_vgpu;
417
418 ret = intel_vgpu_init_opregion(vgpu);
419 if (ret)
420 goto out_clean_gtt;
421
422 ret = intel_vgpu_init_display(vgpu, param->resolution);
423 if (ret)
424 goto out_clean_opregion;
425
426 ret = intel_vgpu_setup_submission(vgpu);
427 if (ret)
428 goto out_clean_display;
429
430 ret = intel_vgpu_init_sched_policy(vgpu);
431 if (ret)
432 goto out_clean_submission;
433
434 intel_gvt_debugfs_add_vgpu(vgpu);
435
436 ret = intel_gvt_hypervisor_set_opregion(vgpu);
437 if (ret)
438 goto out_clean_sched_policy;
439
440 if (IS_BROADWELL(dev_priv) || IS_BROXTON(dev_priv))
441 ret = intel_gvt_hypervisor_set_edid(vgpu, PORT_B);
442 else
443 ret = intel_gvt_hypervisor_set_edid(vgpu, PORT_D);
444 if (ret)
445 goto out_clean_sched_policy;
446
447 return vgpu;
448
449out_clean_sched_policy:
450 intel_vgpu_clean_sched_policy(vgpu);
451out_clean_submission:
452 intel_vgpu_clean_submission(vgpu);
453out_clean_display:
454 intel_vgpu_clean_display(vgpu);
455out_clean_opregion:
456 intel_vgpu_clean_opregion(vgpu);
457out_clean_gtt:
458 intel_vgpu_clean_gtt(vgpu);
459out_detach_hypervisor_vgpu:
460 intel_gvt_hypervisor_detach_vgpu(vgpu);
461out_clean_vgpu_resource:
462 intel_vgpu_free_resource(vgpu);
463out_clean_vgpu_mmio:
464 intel_vgpu_clean_mmio(vgpu);
465out_clean_idr:
466 idr_remove(&gvt->vgpu_idr, vgpu->id);
467out_free_vgpu:
468 vfree(vgpu);
469 return ERR_PTR(ret);
470}
471
472
473
474
475
476
477
478
479
480
481
482struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt,
483 struct intel_vgpu_type *type)
484{
485 struct intel_vgpu_creation_params param;
486 struct intel_vgpu *vgpu;
487
488 param.handle = 0;
489 param.primary = 1;
490 param.low_gm_sz = type->low_gm_size;
491 param.high_gm_sz = type->high_gm_size;
492 param.fence_sz = type->fence;
493 param.weight = type->weight;
494 param.resolution = type->resolution;
495
496
497 param.low_gm_sz = BYTES_TO_MB(param.low_gm_sz);
498 param.high_gm_sz = BYTES_TO_MB(param.high_gm_sz);
499
500 mutex_lock(&gvt->lock);
501 vgpu = __intel_gvt_create_vgpu(gvt, ¶m);
502 if (!IS_ERR(vgpu))
503
504 intel_gvt_update_vgpu_types(gvt);
505 mutex_unlock(&gvt->lock);
506
507 return vgpu;
508}
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr,
539 intel_engine_mask_t engine_mask)
540{
541 struct intel_gvt *gvt = vgpu->gvt;
542 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
543 intel_engine_mask_t resetting_eng = dmlr ? ALL_ENGINES : engine_mask;
544
545 gvt_dbg_core("------------------------------------------\n");
546 gvt_dbg_core("resseting vgpu%d, dmlr %d, engine_mask %08x\n",
547 vgpu->id, dmlr, engine_mask);
548
549 vgpu->resetting_eng = resetting_eng;
550
551 intel_vgpu_stop_schedule(vgpu);
552
553
554
555
556 if (scheduler->current_vgpu == NULL) {
557 mutex_unlock(&vgpu->vgpu_lock);
558 intel_gvt_wait_vgpu_idle(vgpu);
559 mutex_lock(&vgpu->vgpu_lock);
560 }
561
562 intel_vgpu_reset_submission(vgpu, resetting_eng);
563
564 if (engine_mask == ALL_ENGINES || dmlr) {
565 intel_vgpu_select_submission_ops(vgpu, ALL_ENGINES, 0);
566 if (engine_mask == ALL_ENGINES)
567 intel_vgpu_invalidate_ppgtt(vgpu);
568
569 if (dmlr) {
570 if(!vgpu->d3_entered) {
571 intel_vgpu_invalidate_ppgtt(vgpu);
572 intel_vgpu_destroy_all_ppgtt_mm(vgpu);
573 }
574 intel_vgpu_reset_ggtt(vgpu, true);
575 intel_vgpu_reset_resource(vgpu);
576 }
577
578 intel_vgpu_reset_mmio(vgpu, dmlr);
579 populate_pvinfo_page(vgpu);
580
581 if (dmlr) {
582 intel_vgpu_reset_display(vgpu);
583 intel_vgpu_reset_cfg_space(vgpu);
584
585 vgpu->failsafe = false;
586
587
588
589
590 if(vgpu->d3_entered)
591 vgpu->d3_entered = false;
592 else
593 vgpu->pv_notified = false;
594 }
595 }
596
597 vgpu->resetting_eng = 0;
598 gvt_dbg_core("reset vgpu%d done\n", vgpu->id);
599 gvt_dbg_core("------------------------------------------\n");
600}
601
602
603
604
605
606
607
608
609void intel_gvt_reset_vgpu(struct intel_vgpu *vgpu)
610{
611 mutex_lock(&vgpu->vgpu_lock);
612 intel_gvt_reset_vgpu_locked(vgpu, true, 0);
613 mutex_unlock(&vgpu->vgpu_lock);
614}
615