1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34#include "i915_drv.h"
35#include "gvt.h"
36#include "i915_pvinfo.h"
37
38void populate_pvinfo_page(struct intel_vgpu *vgpu)
39{
40
41 vgpu_vreg64_t(vgpu, vgtif_reg(magic)) = VGT_MAGIC;
42 vgpu_vreg_t(vgpu, vgtif_reg(version_major)) = 1;
43 vgpu_vreg_t(vgpu, vgtif_reg(version_minor)) = 0;
44 vgpu_vreg_t(vgpu, vgtif_reg(display_ready)) = 0;
45 vgpu_vreg_t(vgpu, vgtif_reg(vgt_id)) = vgpu->id;
46
47 vgpu_vreg_t(vgpu, vgtif_reg(vgt_caps)) = VGT_CAPS_FULL_PPGTT;
48 vgpu_vreg_t(vgpu, vgtif_reg(vgt_caps)) |= VGT_CAPS_HWSP_EMULATION;
49 vgpu_vreg_t(vgpu, vgtif_reg(vgt_caps)) |= VGT_CAPS_HUGE_GTT;
50
51 vgpu_vreg_t(vgpu, vgtif_reg(avail_rs.mappable_gmadr.base)) =
52 vgpu_aperture_gmadr_base(vgpu);
53 vgpu_vreg_t(vgpu, vgtif_reg(avail_rs.mappable_gmadr.size)) =
54 vgpu_aperture_sz(vgpu);
55 vgpu_vreg_t(vgpu, vgtif_reg(avail_rs.nonmappable_gmadr.base)) =
56 vgpu_hidden_gmadr_base(vgpu);
57 vgpu_vreg_t(vgpu, vgtif_reg(avail_rs.nonmappable_gmadr.size)) =
58 vgpu_hidden_sz(vgpu);
59
60 vgpu_vreg_t(vgpu, vgtif_reg(avail_rs.fence_num)) = vgpu_fence_sz(vgpu);
61
62 vgpu_vreg_t(vgpu, vgtif_reg(cursor_x_hot)) = UINT_MAX;
63 vgpu_vreg_t(vgpu, vgtif_reg(cursor_y_hot)) = UINT_MAX;
64
65 gvt_dbg_core("Populate PVINFO PAGE for vGPU %d\n", vgpu->id);
66 gvt_dbg_core("aperture base [GMADR] 0x%llx size 0x%llx\n",
67 vgpu_aperture_gmadr_base(vgpu), vgpu_aperture_sz(vgpu));
68 gvt_dbg_core("hidden base [GMADR] 0x%llx size=0x%llx\n",
69 vgpu_hidden_gmadr_base(vgpu), vgpu_hidden_sz(vgpu));
70 gvt_dbg_core("fence size %d\n", vgpu_fence_sz(vgpu));
71
72 WARN_ON(sizeof(struct vgt_if) != VGT_PVINFO_SIZE);
73}
74
75#define VGPU_MAX_WEIGHT 16
76#define VGPU_WEIGHT(vgpu_num) \
77 (VGPU_MAX_WEIGHT / (vgpu_num))
78
79static struct {
80 unsigned int low_mm;
81 unsigned int high_mm;
82 unsigned int fence;
83
84
85
86
87
88 unsigned int weight;
89 enum intel_vgpu_edid edid;
90 char *name;
91} vgpu_types[] = {
92
93 { MB_TO_BYTES(64), MB_TO_BYTES(384), 4, VGPU_WEIGHT(8), GVT_EDID_1024_768, "8" },
94 { MB_TO_BYTES(128), MB_TO_BYTES(512), 4, VGPU_WEIGHT(4), GVT_EDID_1920_1200, "4" },
95 { MB_TO_BYTES(256), MB_TO_BYTES(1024), 4, VGPU_WEIGHT(2), GVT_EDID_1920_1200, "2" },
96 { MB_TO_BYTES(512), MB_TO_BYTES(2048), 4, VGPU_WEIGHT(1), GVT_EDID_1920_1200, "1" },
97};
98
99
100
101
102
103
104
105
106int intel_gvt_init_vgpu_types(struct intel_gvt *gvt)
107{
108 unsigned int num_types;
109 unsigned int i, low_avail, high_avail;
110 unsigned int min_low;
111
112
113
114
115
116
117
118
119
120
121
122
123
124 low_avail = gvt_aperture_sz(gvt) - HOST_LOW_GM_SIZE;
125 high_avail = gvt_hidden_sz(gvt) - HOST_HIGH_GM_SIZE;
126 num_types = sizeof(vgpu_types) / sizeof(vgpu_types[0]);
127
128 gvt->types = kcalloc(num_types, sizeof(struct intel_vgpu_type),
129 GFP_KERNEL);
130 if (!gvt->types)
131 return -ENOMEM;
132
133 min_low = MB_TO_BYTES(32);
134 for (i = 0; i < num_types; ++i) {
135 if (low_avail / vgpu_types[i].low_mm == 0)
136 break;
137
138 gvt->types[i].low_gm_size = vgpu_types[i].low_mm;
139 gvt->types[i].high_gm_size = vgpu_types[i].high_mm;
140 gvt->types[i].fence = vgpu_types[i].fence;
141
142 if (vgpu_types[i].weight < 1 ||
143 vgpu_types[i].weight > VGPU_MAX_WEIGHT)
144 return -EINVAL;
145
146 gvt->types[i].weight = vgpu_types[i].weight;
147 gvt->types[i].resolution = vgpu_types[i].edid;
148 gvt->types[i].avail_instance = min(low_avail / vgpu_types[i].low_mm,
149 high_avail / vgpu_types[i].high_mm);
150
151 if (IS_GEN(gvt->dev_priv, 8))
152 sprintf(gvt->types[i].name, "GVTg_V4_%s",
153 vgpu_types[i].name);
154 else if (IS_GEN(gvt->dev_priv, 9))
155 sprintf(gvt->types[i].name, "GVTg_V5_%s",
156 vgpu_types[i].name);
157
158 gvt_dbg_core("type[%d]: %s avail %u low %u high %u fence %u weight %u res %s\n",
159 i, gvt->types[i].name,
160 gvt->types[i].avail_instance,
161 gvt->types[i].low_gm_size,
162 gvt->types[i].high_gm_size, gvt->types[i].fence,
163 gvt->types[i].weight,
164 vgpu_edid_str(gvt->types[i].resolution));
165 }
166
167 gvt->num_types = i;
168 return 0;
169}
170
171void intel_gvt_clean_vgpu_types(struct intel_gvt *gvt)
172{
173 kfree(gvt->types);
174}
175
176static void intel_gvt_update_vgpu_types(struct intel_gvt *gvt)
177{
178 int i;
179 unsigned int low_gm_avail, high_gm_avail, fence_avail;
180 unsigned int low_gm_min, high_gm_min, fence_min;
181
182
183
184
185 low_gm_avail = gvt_aperture_sz(gvt) - HOST_LOW_GM_SIZE -
186 gvt->gm.vgpu_allocated_low_gm_size;
187 high_gm_avail = gvt_hidden_sz(gvt) - HOST_HIGH_GM_SIZE -
188 gvt->gm.vgpu_allocated_high_gm_size;
189 fence_avail = gvt_fence_sz(gvt) - HOST_FENCE -
190 gvt->fence.vgpu_allocated_fence_num;
191
192 for (i = 0; i < gvt->num_types; i++) {
193 low_gm_min = low_gm_avail / gvt->types[i].low_gm_size;
194 high_gm_min = high_gm_avail / gvt->types[i].high_gm_size;
195 fence_min = fence_avail / gvt->types[i].fence;
196 gvt->types[i].avail_instance = min(min(low_gm_min, high_gm_min),
197 fence_min);
198
199 gvt_dbg_core("update type[%d]: %s avail %u low %u high %u fence %u\n",
200 i, gvt->types[i].name,
201 gvt->types[i].avail_instance, gvt->types[i].low_gm_size,
202 gvt->types[i].high_gm_size, gvt->types[i].fence);
203 }
204}
205
206
207
208
209
210
211
212
213void intel_gvt_activate_vgpu(struct intel_vgpu *vgpu)
214{
215 mutex_lock(&vgpu->gvt->lock);
216 vgpu->active = true;
217 mutex_unlock(&vgpu->gvt->lock);
218}
219
220
221
222
223
224
225
226
227
228void intel_gvt_deactivate_vgpu(struct intel_vgpu *vgpu)
229{
230 mutex_lock(&vgpu->vgpu_lock);
231
232 vgpu->active = false;
233
234 if (atomic_read(&vgpu->submission.running_workload_num)) {
235 mutex_unlock(&vgpu->vgpu_lock);
236 intel_gvt_wait_vgpu_idle(vgpu);
237 mutex_lock(&vgpu->vgpu_lock);
238 }
239
240 intel_vgpu_stop_schedule(vgpu);
241
242 mutex_unlock(&vgpu->vgpu_lock);
243}
244
245
246
247
248
249
250
251
252
253
254void intel_gvt_release_vgpu(struct intel_vgpu *vgpu)
255{
256 intel_gvt_deactivate_vgpu(vgpu);
257
258 mutex_lock(&vgpu->vgpu_lock);
259 intel_vgpu_clean_workloads(vgpu, ALL_ENGINES);
260 intel_vgpu_dmabuf_cleanup(vgpu);
261 mutex_unlock(&vgpu->vgpu_lock);
262}
263
264
265
266
267
268
269
270
271void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu)
272{
273 struct intel_gvt *gvt = vgpu->gvt;
274
275 mutex_lock(&vgpu->vgpu_lock);
276
277 WARN(vgpu->active, "vGPU is still active!\n");
278
279 intel_gvt_debugfs_remove_vgpu(vgpu);
280 intel_vgpu_clean_sched_policy(vgpu);
281 intel_vgpu_clean_submission(vgpu);
282 intel_vgpu_clean_display(vgpu);
283 intel_vgpu_clean_opregion(vgpu);
284 intel_vgpu_reset_ggtt(vgpu, true);
285 intel_vgpu_clean_gtt(vgpu);
286 intel_gvt_hypervisor_detach_vgpu(vgpu);
287 intel_vgpu_free_resource(vgpu);
288 intel_vgpu_clean_mmio(vgpu);
289 intel_vgpu_dmabuf_cleanup(vgpu);
290 mutex_unlock(&vgpu->vgpu_lock);
291
292 mutex_lock(&gvt->lock);
293 idr_remove(&gvt->vgpu_idr, vgpu->id);
294 if (idr_is_empty(&gvt->vgpu_idr))
295 intel_gvt_clean_irq(gvt);
296 intel_gvt_update_vgpu_types(gvt);
297 mutex_unlock(&gvt->lock);
298
299 vfree(vgpu);
300}
301
302#define IDLE_VGPU_IDR 0
303
304
305
306
307
308
309
310
311
312
313struct intel_vgpu *intel_gvt_create_idle_vgpu(struct intel_gvt *gvt)
314{
315 struct intel_vgpu *vgpu;
316 enum intel_engine_id i;
317 int ret;
318
319 vgpu = vzalloc(sizeof(*vgpu));
320 if (!vgpu)
321 return ERR_PTR(-ENOMEM);
322
323 vgpu->id = IDLE_VGPU_IDR;
324 vgpu->gvt = gvt;
325 mutex_init(&vgpu->vgpu_lock);
326
327 for (i = 0; i < I915_NUM_ENGINES; i++)
328 INIT_LIST_HEAD(&vgpu->submission.workload_q_head[i]);
329
330 ret = intel_vgpu_init_sched_policy(vgpu);
331 if (ret)
332 goto out_free_vgpu;
333
334 vgpu->active = false;
335
336 return vgpu;
337
338out_free_vgpu:
339 vfree(vgpu);
340 return ERR_PTR(ret);
341}
342
343
344
345
346
347
348
349
350void intel_gvt_destroy_idle_vgpu(struct intel_vgpu *vgpu)
351{
352 mutex_lock(&vgpu->vgpu_lock);
353 intel_vgpu_clean_sched_policy(vgpu);
354 mutex_unlock(&vgpu->vgpu_lock);
355
356 vfree(vgpu);
357}
358
359static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt,
360 struct intel_vgpu_creation_params *param)
361{
362 struct intel_vgpu *vgpu;
363 int ret;
364
365 gvt_dbg_core("handle %llu low %llu MB high %llu MB fence %llu\n",
366 param->handle, param->low_gm_sz, param->high_gm_sz,
367 param->fence_sz);
368
369 vgpu = vzalloc(sizeof(*vgpu));
370 if (!vgpu)
371 return ERR_PTR(-ENOMEM);
372
373 ret = idr_alloc(&gvt->vgpu_idr, vgpu, IDLE_VGPU_IDR + 1, GVT_MAX_VGPU,
374 GFP_KERNEL);
375 if (ret < 0)
376 goto out_free_vgpu;
377
378 vgpu->id = ret;
379 vgpu->handle = param->handle;
380 vgpu->gvt = gvt;
381 vgpu->sched_ctl.weight = param->weight;
382 mutex_init(&vgpu->vgpu_lock);
383 mutex_init(&vgpu->dmabuf_lock);
384 INIT_LIST_HEAD(&vgpu->dmabuf_obj_list_head);
385 INIT_RADIX_TREE(&vgpu->page_track_tree, GFP_KERNEL);
386 idr_init(&vgpu->object_idr);
387 intel_vgpu_init_cfg_space(vgpu, param->primary);
388
389 ret = intel_vgpu_init_mmio(vgpu);
390 if (ret)
391 goto out_clean_idr;
392
393 ret = intel_vgpu_alloc_resource(vgpu, param);
394 if (ret)
395 goto out_clean_vgpu_mmio;
396
397 populate_pvinfo_page(vgpu);
398
399 ret = intel_gvt_hypervisor_attach_vgpu(vgpu);
400 if (ret)
401 goto out_clean_vgpu_resource;
402
403 ret = intel_vgpu_init_gtt(vgpu);
404 if (ret)
405 goto out_detach_hypervisor_vgpu;
406
407 ret = intel_vgpu_init_opregion(vgpu);
408 if (ret)
409 goto out_clean_gtt;
410
411 ret = intel_vgpu_init_display(vgpu, param->resolution);
412 if (ret)
413 goto out_clean_opregion;
414
415 ret = intel_vgpu_setup_submission(vgpu);
416 if (ret)
417 goto out_clean_display;
418
419 ret = intel_vgpu_init_sched_policy(vgpu);
420 if (ret)
421 goto out_clean_submission;
422
423 ret = intel_gvt_debugfs_add_vgpu(vgpu);
424 if (ret)
425 goto out_clean_sched_policy;
426
427 ret = intel_gvt_hypervisor_set_opregion(vgpu);
428 if (ret)
429 goto out_clean_sched_policy;
430
431
432 if (IS_SKYLAKE(gvt->dev_priv) || IS_KABYLAKE(gvt->dev_priv))
433 ret = intel_gvt_hypervisor_set_edid(vgpu, PORT_D);
434 if (ret)
435 goto out_clean_sched_policy;
436
437 return vgpu;
438
439out_clean_sched_policy:
440 intel_vgpu_clean_sched_policy(vgpu);
441out_clean_submission:
442 intel_vgpu_clean_submission(vgpu);
443out_clean_display:
444 intel_vgpu_clean_display(vgpu);
445out_clean_opregion:
446 intel_vgpu_clean_opregion(vgpu);
447out_clean_gtt:
448 intel_vgpu_clean_gtt(vgpu);
449out_detach_hypervisor_vgpu:
450 intel_gvt_hypervisor_detach_vgpu(vgpu);
451out_clean_vgpu_resource:
452 intel_vgpu_free_resource(vgpu);
453out_clean_vgpu_mmio:
454 intel_vgpu_clean_mmio(vgpu);
455out_clean_idr:
456 idr_remove(&gvt->vgpu_idr, vgpu->id);
457out_free_vgpu:
458 vfree(vgpu);
459 return ERR_PTR(ret);
460}
461
462
463
464
465
466
467
468
469
470
471
472struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt,
473 struct intel_vgpu_type *type)
474{
475 struct intel_vgpu_creation_params param;
476 struct intel_vgpu *vgpu;
477
478 param.handle = 0;
479 param.primary = 1;
480 param.low_gm_sz = type->low_gm_size;
481 param.high_gm_sz = type->high_gm_size;
482 param.fence_sz = type->fence;
483 param.weight = type->weight;
484 param.resolution = type->resolution;
485
486
487 param.low_gm_sz = BYTES_TO_MB(param.low_gm_sz);
488 param.high_gm_sz = BYTES_TO_MB(param.high_gm_sz);
489
490 mutex_lock(&gvt->lock);
491 vgpu = __intel_gvt_create_vgpu(gvt, ¶m);
492 if (!IS_ERR(vgpu))
493
494 intel_gvt_update_vgpu_types(gvt);
495 mutex_unlock(&gvt->lock);
496
497 return vgpu;
498}
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr,
529 intel_engine_mask_t engine_mask)
530{
531 struct intel_gvt *gvt = vgpu->gvt;
532 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
533 intel_engine_mask_t resetting_eng = dmlr ? ALL_ENGINES : engine_mask;
534
535 gvt_dbg_core("------------------------------------------\n");
536 gvt_dbg_core("resseting vgpu%d, dmlr %d, engine_mask %08x\n",
537 vgpu->id, dmlr, engine_mask);
538
539 vgpu->resetting_eng = resetting_eng;
540
541 intel_vgpu_stop_schedule(vgpu);
542
543
544
545
546 if (scheduler->current_vgpu == NULL) {
547 mutex_unlock(&vgpu->vgpu_lock);
548 intel_gvt_wait_vgpu_idle(vgpu);
549 mutex_lock(&vgpu->vgpu_lock);
550 }
551
552 intel_vgpu_reset_submission(vgpu, resetting_eng);
553
554 if (engine_mask == ALL_ENGINES || dmlr) {
555 intel_vgpu_select_submission_ops(vgpu, ALL_ENGINES, 0);
556 intel_vgpu_invalidate_ppgtt(vgpu);
557
558 if (dmlr) {
559 intel_vgpu_reset_gtt(vgpu);
560 intel_vgpu_reset_resource(vgpu);
561 }
562
563 intel_vgpu_reset_mmio(vgpu, dmlr);
564 populate_pvinfo_page(vgpu);
565 intel_vgpu_reset_display(vgpu);
566
567 if (dmlr) {
568 intel_vgpu_reset_cfg_space(vgpu);
569
570 vgpu->failsafe = false;
571 vgpu->pv_notified = false;
572 }
573 }
574
575 vgpu->resetting_eng = 0;
576 gvt_dbg_core("reset vgpu%d done\n", vgpu->id);
577 gvt_dbg_core("------------------------------------------\n");
578}
579
580
581
582
583
584
585
586
587void intel_gvt_reset_vgpu(struct intel_vgpu *vgpu)
588{
589 mutex_lock(&vgpu->vgpu_lock);
590 intel_gvt_reset_vgpu_locked(vgpu, true, 0);
591 mutex_unlock(&vgpu->vgpu_lock);
592}
593