1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34#include "i915_drv.h"
35#include "gvt.h"
36#include "i915_pvinfo.h"
37
38void populate_pvinfo_page(struct intel_vgpu *vgpu)
39{
40
41 vgpu_vreg64_t(vgpu, vgtif_reg(magic)) = VGT_MAGIC;
42 vgpu_vreg_t(vgpu, vgtif_reg(version_major)) = 1;
43 vgpu_vreg_t(vgpu, vgtif_reg(version_minor)) = 0;
44 vgpu_vreg_t(vgpu, vgtif_reg(display_ready)) = 0;
45 vgpu_vreg_t(vgpu, vgtif_reg(vgt_id)) = vgpu->id;
46
47 vgpu_vreg_t(vgpu, vgtif_reg(vgt_caps)) = VGT_CAPS_FULL_48BIT_PPGTT;
48 vgpu_vreg_t(vgpu, vgtif_reg(vgt_caps)) |= VGT_CAPS_HWSP_EMULATION;
49
50 vgpu_vreg_t(vgpu, vgtif_reg(avail_rs.mappable_gmadr.base)) =
51 vgpu_aperture_gmadr_base(vgpu);
52 vgpu_vreg_t(vgpu, vgtif_reg(avail_rs.mappable_gmadr.size)) =
53 vgpu_aperture_sz(vgpu);
54 vgpu_vreg_t(vgpu, vgtif_reg(avail_rs.nonmappable_gmadr.base)) =
55 vgpu_hidden_gmadr_base(vgpu);
56 vgpu_vreg_t(vgpu, vgtif_reg(avail_rs.nonmappable_gmadr.size)) =
57 vgpu_hidden_sz(vgpu);
58
59 vgpu_vreg_t(vgpu, vgtif_reg(avail_rs.fence_num)) = vgpu_fence_sz(vgpu);
60
61 gvt_dbg_core("Populate PVINFO PAGE for vGPU %d\n", vgpu->id);
62 gvt_dbg_core("aperture base [GMADR] 0x%llx size 0x%llx\n",
63 vgpu_aperture_gmadr_base(vgpu), vgpu_aperture_sz(vgpu));
64 gvt_dbg_core("hidden base [GMADR] 0x%llx size=0x%llx\n",
65 vgpu_hidden_gmadr_base(vgpu), vgpu_hidden_sz(vgpu));
66 gvt_dbg_core("fence size %d\n", vgpu_fence_sz(vgpu));
67
68 WARN_ON(sizeof(struct vgt_if) != VGT_PVINFO_SIZE);
69}
70
71#define VGPU_MAX_WEIGHT 16
72#define VGPU_WEIGHT(vgpu_num) \
73 (VGPU_MAX_WEIGHT / (vgpu_num))
74
75static struct {
76 unsigned int low_mm;
77 unsigned int high_mm;
78 unsigned int fence;
79
80
81
82
83
84 unsigned int weight;
85 enum intel_vgpu_edid edid;
86 char *name;
87} vgpu_types[] = {
88
89 { MB_TO_BYTES(64), MB_TO_BYTES(384), 4, VGPU_WEIGHT(8), GVT_EDID_1024_768, "8" },
90 { MB_TO_BYTES(128), MB_TO_BYTES(512), 4, VGPU_WEIGHT(4), GVT_EDID_1920_1200, "4" },
91 { MB_TO_BYTES(256), MB_TO_BYTES(1024), 4, VGPU_WEIGHT(2), GVT_EDID_1920_1200, "2" },
92 { MB_TO_BYTES(512), MB_TO_BYTES(2048), 4, VGPU_WEIGHT(1), GVT_EDID_1920_1200, "1" },
93};
94
95
96
97
98
99
100
101
102int intel_gvt_init_vgpu_types(struct intel_gvt *gvt)
103{
104 unsigned int num_types;
105 unsigned int i, low_avail, high_avail;
106 unsigned int min_low;
107
108
109
110
111
112
113
114
115
116
117
118
119
120 low_avail = gvt_aperture_sz(gvt) - HOST_LOW_GM_SIZE;
121 high_avail = gvt_hidden_sz(gvt) - HOST_HIGH_GM_SIZE;
122 num_types = sizeof(vgpu_types) / sizeof(vgpu_types[0]);
123
124 gvt->types = kcalloc(num_types, sizeof(struct intel_vgpu_type),
125 GFP_KERNEL);
126 if (!gvt->types)
127 return -ENOMEM;
128
129 min_low = MB_TO_BYTES(32);
130 for (i = 0; i < num_types; ++i) {
131 if (low_avail / vgpu_types[i].low_mm == 0)
132 break;
133
134 gvt->types[i].low_gm_size = vgpu_types[i].low_mm;
135 gvt->types[i].high_gm_size = vgpu_types[i].high_mm;
136 gvt->types[i].fence = vgpu_types[i].fence;
137
138 if (vgpu_types[i].weight < 1 ||
139 vgpu_types[i].weight > VGPU_MAX_WEIGHT)
140 return -EINVAL;
141
142 gvt->types[i].weight = vgpu_types[i].weight;
143 gvt->types[i].resolution = vgpu_types[i].edid;
144 gvt->types[i].avail_instance = min(low_avail / vgpu_types[i].low_mm,
145 high_avail / vgpu_types[i].high_mm);
146
147 if (IS_GEN8(gvt->dev_priv))
148 sprintf(gvt->types[i].name, "GVTg_V4_%s",
149 vgpu_types[i].name);
150 else if (IS_GEN9(gvt->dev_priv))
151 sprintf(gvt->types[i].name, "GVTg_V5_%s",
152 vgpu_types[i].name);
153
154 gvt_dbg_core("type[%d]: %s avail %u low %u high %u fence %u weight %u res %s\n",
155 i, gvt->types[i].name,
156 gvt->types[i].avail_instance,
157 gvt->types[i].low_gm_size,
158 gvt->types[i].high_gm_size, gvt->types[i].fence,
159 gvt->types[i].weight,
160 vgpu_edid_str(gvt->types[i].resolution));
161 }
162
163 gvt->num_types = i;
164 return 0;
165}
166
167void intel_gvt_clean_vgpu_types(struct intel_gvt *gvt)
168{
169 kfree(gvt->types);
170}
171
172static void intel_gvt_update_vgpu_types(struct intel_gvt *gvt)
173{
174 int i;
175 unsigned int low_gm_avail, high_gm_avail, fence_avail;
176 unsigned int low_gm_min, high_gm_min, fence_min;
177
178
179
180
181 low_gm_avail = gvt_aperture_sz(gvt) - HOST_LOW_GM_SIZE -
182 gvt->gm.vgpu_allocated_low_gm_size;
183 high_gm_avail = gvt_hidden_sz(gvt) - HOST_HIGH_GM_SIZE -
184 gvt->gm.vgpu_allocated_high_gm_size;
185 fence_avail = gvt_fence_sz(gvt) - HOST_FENCE -
186 gvt->fence.vgpu_allocated_fence_num;
187
188 for (i = 0; i < gvt->num_types; i++) {
189 low_gm_min = low_gm_avail / gvt->types[i].low_gm_size;
190 high_gm_min = high_gm_avail / gvt->types[i].high_gm_size;
191 fence_min = fence_avail / gvt->types[i].fence;
192 gvt->types[i].avail_instance = min(min(low_gm_min, high_gm_min),
193 fence_min);
194
195 gvt_dbg_core("update type[%d]: %s avail %u low %u high %u fence %u\n",
196 i, gvt->types[i].name,
197 gvt->types[i].avail_instance, gvt->types[i].low_gm_size,
198 gvt->types[i].high_gm_size, gvt->types[i].fence);
199 }
200}
201
202
203
204
205
206
207
208
209void intel_gvt_activate_vgpu(struct intel_vgpu *vgpu)
210{
211 mutex_lock(&vgpu->gvt->lock);
212 vgpu->active = true;
213 mutex_unlock(&vgpu->gvt->lock);
214}
215
216
217
218
219
220
221
222
223
224void intel_gvt_deactivate_vgpu(struct intel_vgpu *vgpu)
225{
226 struct intel_gvt *gvt = vgpu->gvt;
227
228 mutex_lock(&gvt->lock);
229
230 vgpu->active = false;
231
232 if (atomic_read(&vgpu->submission.running_workload_num)) {
233 mutex_unlock(&gvt->lock);
234 intel_gvt_wait_vgpu_idle(vgpu);
235 mutex_lock(&gvt->lock);
236 }
237
238 intel_vgpu_stop_schedule(vgpu);
239 intel_vgpu_dmabuf_cleanup(vgpu);
240
241 mutex_unlock(&gvt->lock);
242}
243
244
245
246
247
248
249
250
251void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu)
252{
253 struct intel_gvt *gvt = vgpu->gvt;
254
255 mutex_lock(&gvt->lock);
256
257 WARN(vgpu->active, "vGPU is still active!\n");
258
259 intel_gvt_debugfs_remove_vgpu(vgpu);
260 idr_remove(&gvt->vgpu_idr, vgpu->id);
261 if (idr_is_empty(&gvt->vgpu_idr))
262 intel_gvt_clean_irq(gvt);
263 intel_vgpu_clean_sched_policy(vgpu);
264 intel_vgpu_clean_submission(vgpu);
265 intel_vgpu_clean_display(vgpu);
266 intel_vgpu_clean_opregion(vgpu);
267 intel_vgpu_clean_gtt(vgpu);
268 intel_gvt_hypervisor_detach_vgpu(vgpu);
269 intel_vgpu_free_resource(vgpu);
270 intel_vgpu_clean_mmio(vgpu);
271 intel_vgpu_dmabuf_cleanup(vgpu);
272 vfree(vgpu);
273
274 intel_gvt_update_vgpu_types(gvt);
275 mutex_unlock(&gvt->lock);
276}
277
278#define IDLE_VGPU_IDR 0
279
280
281
282
283
284
285
286
287
288
289struct intel_vgpu *intel_gvt_create_idle_vgpu(struct intel_gvt *gvt)
290{
291 struct intel_vgpu *vgpu;
292 enum intel_engine_id i;
293 int ret;
294
295 vgpu = vzalloc(sizeof(*vgpu));
296 if (!vgpu)
297 return ERR_PTR(-ENOMEM);
298
299 vgpu->id = IDLE_VGPU_IDR;
300 vgpu->gvt = gvt;
301
302 for (i = 0; i < I915_NUM_ENGINES; i++)
303 INIT_LIST_HEAD(&vgpu->submission.workload_q_head[i]);
304
305 ret = intel_vgpu_init_sched_policy(vgpu);
306 if (ret)
307 goto out_free_vgpu;
308
309 vgpu->active = false;
310
311 return vgpu;
312
313out_free_vgpu:
314 vfree(vgpu);
315 return ERR_PTR(ret);
316}
317
318
319
320
321
322
323
324
325void intel_gvt_destroy_idle_vgpu(struct intel_vgpu *vgpu)
326{
327 intel_vgpu_clean_sched_policy(vgpu);
328 vfree(vgpu);
329}
330
331static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt,
332 struct intel_vgpu_creation_params *param)
333{
334 struct intel_vgpu *vgpu;
335 int ret;
336
337 gvt_dbg_core("handle %llu low %llu MB high %llu MB fence %llu\n",
338 param->handle, param->low_gm_sz, param->high_gm_sz,
339 param->fence_sz);
340
341 vgpu = vzalloc(sizeof(*vgpu));
342 if (!vgpu)
343 return ERR_PTR(-ENOMEM);
344
345 mutex_lock(&gvt->lock);
346
347 ret = idr_alloc(&gvt->vgpu_idr, vgpu, IDLE_VGPU_IDR + 1, GVT_MAX_VGPU,
348 GFP_KERNEL);
349 if (ret < 0)
350 goto out_free_vgpu;
351
352 vgpu->id = ret;
353 vgpu->handle = param->handle;
354 vgpu->gvt = gvt;
355 vgpu->sched_ctl.weight = param->weight;
356 INIT_LIST_HEAD(&vgpu->dmabuf_obj_list_head);
357 INIT_RADIX_TREE(&vgpu->page_track_tree, GFP_KERNEL);
358 idr_init(&vgpu->object_idr);
359 intel_vgpu_init_cfg_space(vgpu, param->primary);
360
361 ret = intel_vgpu_init_mmio(vgpu);
362 if (ret)
363 goto out_clean_idr;
364
365 ret = intel_vgpu_alloc_resource(vgpu, param);
366 if (ret)
367 goto out_clean_vgpu_mmio;
368
369 populate_pvinfo_page(vgpu);
370
371 ret = intel_gvt_hypervisor_attach_vgpu(vgpu);
372 if (ret)
373 goto out_clean_vgpu_resource;
374
375 ret = intel_vgpu_init_gtt(vgpu);
376 if (ret)
377 goto out_detach_hypervisor_vgpu;
378
379 ret = intel_vgpu_init_opregion(vgpu);
380 if (ret)
381 goto out_clean_gtt;
382
383 ret = intel_vgpu_init_display(vgpu, param->resolution);
384 if (ret)
385 goto out_clean_opregion;
386
387 ret = intel_vgpu_setup_submission(vgpu);
388 if (ret)
389 goto out_clean_display;
390
391 ret = intel_vgpu_init_sched_policy(vgpu);
392 if (ret)
393 goto out_clean_submission;
394
395 ret = intel_gvt_debugfs_add_vgpu(vgpu);
396 if (ret)
397 goto out_clean_sched_policy;
398
399 ret = intel_gvt_hypervisor_set_opregion(vgpu);
400 if (ret)
401 goto out_clean_sched_policy;
402
403 mutex_unlock(&gvt->lock);
404
405 return vgpu;
406
407out_clean_sched_policy:
408 intel_vgpu_clean_sched_policy(vgpu);
409out_clean_submission:
410 intel_vgpu_clean_submission(vgpu);
411out_clean_display:
412 intel_vgpu_clean_display(vgpu);
413out_clean_opregion:
414 intel_vgpu_clean_opregion(vgpu);
415out_clean_gtt:
416 intel_vgpu_clean_gtt(vgpu);
417out_detach_hypervisor_vgpu:
418 intel_gvt_hypervisor_detach_vgpu(vgpu);
419out_clean_vgpu_resource:
420 intel_vgpu_free_resource(vgpu);
421out_clean_vgpu_mmio:
422 intel_vgpu_clean_mmio(vgpu);
423out_clean_idr:
424 idr_remove(&gvt->vgpu_idr, vgpu->id);
425out_free_vgpu:
426 vfree(vgpu);
427 mutex_unlock(&gvt->lock);
428 return ERR_PTR(ret);
429}
430
431
432
433
434
435
436
437
438
439
440
441struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt,
442 struct intel_vgpu_type *type)
443{
444 struct intel_vgpu_creation_params param;
445 struct intel_vgpu *vgpu;
446
447 param.handle = 0;
448 param.primary = 1;
449 param.low_gm_sz = type->low_gm_size;
450 param.high_gm_sz = type->high_gm_size;
451 param.fence_sz = type->fence;
452 param.weight = type->weight;
453 param.resolution = type->resolution;
454
455
456 param.low_gm_sz = BYTES_TO_MB(param.low_gm_sz);
457 param.high_gm_sz = BYTES_TO_MB(param.high_gm_sz);
458
459 vgpu = __intel_gvt_create_vgpu(gvt, ¶m);
460 if (IS_ERR(vgpu))
461 return vgpu;
462
463
464 intel_gvt_update_vgpu_types(gvt);
465
466 return vgpu;
467}
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr,
498 unsigned int engine_mask)
499{
500 struct intel_gvt *gvt = vgpu->gvt;
501 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
502 unsigned int resetting_eng = dmlr ? ALL_ENGINES : engine_mask;
503
504 gvt_dbg_core("------------------------------------------\n");
505 gvt_dbg_core("resseting vgpu%d, dmlr %d, engine_mask %08x\n",
506 vgpu->id, dmlr, engine_mask);
507
508 vgpu->resetting_eng = resetting_eng;
509
510 intel_vgpu_stop_schedule(vgpu);
511
512
513
514
515 if (scheduler->current_vgpu == NULL) {
516 mutex_unlock(&gvt->lock);
517 intel_gvt_wait_vgpu_idle(vgpu);
518 mutex_lock(&gvt->lock);
519 }
520
521 intel_vgpu_reset_submission(vgpu, resetting_eng);
522
523 if (engine_mask == ALL_ENGINES || dmlr) {
524 intel_vgpu_select_submission_ops(vgpu, ALL_ENGINES, 0);
525 intel_vgpu_invalidate_ppgtt(vgpu);
526
527 if (dmlr) {
528 intel_vgpu_reset_gtt(vgpu);
529 intel_vgpu_reset_resource(vgpu);
530 }
531
532 intel_vgpu_reset_mmio(vgpu, dmlr);
533 populate_pvinfo_page(vgpu);
534 intel_vgpu_reset_display(vgpu);
535
536 if (dmlr) {
537 intel_vgpu_reset_cfg_space(vgpu);
538
539 vgpu->failsafe = false;
540 vgpu->pv_notified = false;
541 }
542 }
543
544 vgpu->resetting_eng = 0;
545 gvt_dbg_core("reset vgpu%d done\n", vgpu->id);
546 gvt_dbg_core("------------------------------------------\n");
547}
548
549
550
551
552
553
554
555
556void intel_gvt_reset_vgpu(struct intel_vgpu *vgpu)
557{
558 mutex_lock(&vgpu->gvt->lock);
559 intel_gvt_reset_vgpu_locked(vgpu, true, 0);
560 mutex_unlock(&vgpu->gvt->lock);
561}
562