1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36#include "i915_drv.h"
37#include "gvt.h"
38#include "i915_pvinfo.h"
39#include "trace.h"
40
41#if defined(VERBOSE_DEBUG)
42#define gvt_vdbg_mm(fmt, args...) gvt_dbg_mm(fmt, ##args)
43#else
44#define gvt_vdbg_mm(fmt, args...)
45#endif
46
47static bool enable_out_of_sync = false;
48static int preallocated_oos_pages = 8192;
49
50
51
52
53
54bool intel_gvt_ggtt_validate_range(struct intel_vgpu *vgpu, u64 addr, u32 size)
55{
56 if (size == 0)
57 return vgpu_gmadr_is_valid(vgpu, addr);
58
59 if (vgpu_gmadr_is_aperture(vgpu, addr) &&
60 vgpu_gmadr_is_aperture(vgpu, addr + size - 1))
61 return true;
62 else if (vgpu_gmadr_is_hidden(vgpu, addr) &&
63 vgpu_gmadr_is_hidden(vgpu, addr + size - 1))
64 return true;
65
66 gvt_dbg_mm("Invalid ggtt range at 0x%llx, size: 0x%x\n",
67 addr, size);
68 return false;
69}
70
71
72int intel_gvt_ggtt_gmadr_g2h(struct intel_vgpu *vgpu, u64 g_addr, u64 *h_addr)
73{
74 struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
75
76 if (drm_WARN(&i915->drm, !vgpu_gmadr_is_valid(vgpu, g_addr),
77 "invalid guest gmadr %llx\n", g_addr))
78 return -EACCES;
79
80 if (vgpu_gmadr_is_aperture(vgpu, g_addr))
81 *h_addr = vgpu_aperture_gmadr_base(vgpu)
82 + (g_addr - vgpu_aperture_offset(vgpu));
83 else
84 *h_addr = vgpu_hidden_gmadr_base(vgpu)
85 + (g_addr - vgpu_hidden_offset(vgpu));
86 return 0;
87}
88
89
90int intel_gvt_ggtt_gmadr_h2g(struct intel_vgpu *vgpu, u64 h_addr, u64 *g_addr)
91{
92 struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
93
94 if (drm_WARN(&i915->drm, !gvt_gmadr_is_valid(vgpu->gvt, h_addr),
95 "invalid host gmadr %llx\n", h_addr))
96 return -EACCES;
97
98 if (gvt_gmadr_is_aperture(vgpu->gvt, h_addr))
99 *g_addr = vgpu_aperture_gmadr_base(vgpu)
100 + (h_addr - gvt_aperture_gmadr_base(vgpu->gvt));
101 else
102 *g_addr = vgpu_hidden_gmadr_base(vgpu)
103 + (h_addr - gvt_hidden_gmadr_base(vgpu->gvt));
104 return 0;
105}
106
107int intel_gvt_ggtt_index_g2h(struct intel_vgpu *vgpu, unsigned long g_index,
108 unsigned long *h_index)
109{
110 u64 h_addr;
111 int ret;
112
113 ret = intel_gvt_ggtt_gmadr_g2h(vgpu, g_index << I915_GTT_PAGE_SHIFT,
114 &h_addr);
115 if (ret)
116 return ret;
117
118 *h_index = h_addr >> I915_GTT_PAGE_SHIFT;
119 return 0;
120}
121
122int intel_gvt_ggtt_h2g_index(struct intel_vgpu *vgpu, unsigned long h_index,
123 unsigned long *g_index)
124{
125 u64 g_addr;
126 int ret;
127
128 ret = intel_gvt_ggtt_gmadr_h2g(vgpu, h_index << I915_GTT_PAGE_SHIFT,
129 &g_addr);
130 if (ret)
131 return ret;
132
133 *g_index = g_addr >> I915_GTT_PAGE_SHIFT;
134 return 0;
135}
136
137#define gtt_type_is_entry(type) \
138 (type > GTT_TYPE_INVALID && type < GTT_TYPE_PPGTT_ENTRY \
139 && type != GTT_TYPE_PPGTT_PTE_ENTRY \
140 && type != GTT_TYPE_PPGTT_ROOT_ENTRY)
141
142#define gtt_type_is_pt(type) \
143 (type >= GTT_TYPE_PPGTT_PTE_PT && type < GTT_TYPE_MAX)
144
145#define gtt_type_is_pte_pt(type) \
146 (type == GTT_TYPE_PPGTT_PTE_PT)
147
148#define gtt_type_is_root_pointer(type) \
149 (gtt_type_is_entry(type) && type > GTT_TYPE_PPGTT_ROOT_ENTRY)
150
151#define gtt_init_entry(e, t, p, v) do { \
152 (e)->type = t; \
153 (e)->pdev = p; \
154 memcpy(&(e)->val64, &v, sizeof(v)); \
155} while (0)
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173struct gtt_type_table_entry {
174 int entry_type;
175 int pt_type;
176 int next_pt_type;
177 int pse_entry_type;
178};
179
180#define GTT_TYPE_TABLE_ENTRY(type, e_type, cpt_type, npt_type, pse_type) \
181 [type] = { \
182 .entry_type = e_type, \
183 .pt_type = cpt_type, \
184 .next_pt_type = npt_type, \
185 .pse_entry_type = pse_type, \
186 }
187
188static struct gtt_type_table_entry gtt_type_table[] = {
189 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_ROOT_L4_ENTRY,
190 GTT_TYPE_PPGTT_ROOT_L4_ENTRY,
191 GTT_TYPE_INVALID,
192 GTT_TYPE_PPGTT_PML4_PT,
193 GTT_TYPE_INVALID),
194 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PML4_PT,
195 GTT_TYPE_PPGTT_PML4_ENTRY,
196 GTT_TYPE_PPGTT_PML4_PT,
197 GTT_TYPE_PPGTT_PDP_PT,
198 GTT_TYPE_INVALID),
199 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PML4_ENTRY,
200 GTT_TYPE_PPGTT_PML4_ENTRY,
201 GTT_TYPE_PPGTT_PML4_PT,
202 GTT_TYPE_PPGTT_PDP_PT,
203 GTT_TYPE_INVALID),
204 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PDP_PT,
205 GTT_TYPE_PPGTT_PDP_ENTRY,
206 GTT_TYPE_PPGTT_PDP_PT,
207 GTT_TYPE_PPGTT_PDE_PT,
208 GTT_TYPE_PPGTT_PTE_1G_ENTRY),
209 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_ROOT_L3_ENTRY,
210 GTT_TYPE_PPGTT_ROOT_L3_ENTRY,
211 GTT_TYPE_INVALID,
212 GTT_TYPE_PPGTT_PDE_PT,
213 GTT_TYPE_PPGTT_PTE_1G_ENTRY),
214 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PDP_ENTRY,
215 GTT_TYPE_PPGTT_PDP_ENTRY,
216 GTT_TYPE_PPGTT_PDP_PT,
217 GTT_TYPE_PPGTT_PDE_PT,
218 GTT_TYPE_PPGTT_PTE_1G_ENTRY),
219 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PDE_PT,
220 GTT_TYPE_PPGTT_PDE_ENTRY,
221 GTT_TYPE_PPGTT_PDE_PT,
222 GTT_TYPE_PPGTT_PTE_PT,
223 GTT_TYPE_PPGTT_PTE_2M_ENTRY),
224 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PDE_ENTRY,
225 GTT_TYPE_PPGTT_PDE_ENTRY,
226 GTT_TYPE_PPGTT_PDE_PT,
227 GTT_TYPE_PPGTT_PTE_PT,
228 GTT_TYPE_PPGTT_PTE_2M_ENTRY),
229
230 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PTE_PT,
231 GTT_TYPE_PPGTT_PTE_4K_ENTRY,
232 GTT_TYPE_PPGTT_PTE_PT,
233 GTT_TYPE_INVALID,
234 GTT_TYPE_PPGTT_PTE_64K_ENTRY),
235 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PTE_4K_ENTRY,
236 GTT_TYPE_PPGTT_PTE_4K_ENTRY,
237 GTT_TYPE_PPGTT_PTE_PT,
238 GTT_TYPE_INVALID,
239 GTT_TYPE_PPGTT_PTE_64K_ENTRY),
240 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PTE_64K_ENTRY,
241 GTT_TYPE_PPGTT_PTE_4K_ENTRY,
242 GTT_TYPE_PPGTT_PTE_PT,
243 GTT_TYPE_INVALID,
244 GTT_TYPE_PPGTT_PTE_64K_ENTRY),
245 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PTE_2M_ENTRY,
246 GTT_TYPE_PPGTT_PDE_ENTRY,
247 GTT_TYPE_PPGTT_PDE_PT,
248 GTT_TYPE_INVALID,
249 GTT_TYPE_PPGTT_PTE_2M_ENTRY),
250 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PTE_1G_ENTRY,
251 GTT_TYPE_PPGTT_PDP_ENTRY,
252 GTT_TYPE_PPGTT_PDP_PT,
253 GTT_TYPE_INVALID,
254 GTT_TYPE_PPGTT_PTE_1G_ENTRY),
255 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_GGTT_PTE,
256 GTT_TYPE_GGTT_PTE,
257 GTT_TYPE_INVALID,
258 GTT_TYPE_INVALID,
259 GTT_TYPE_INVALID),
260};
261
262static inline int get_next_pt_type(int type)
263{
264 return gtt_type_table[type].next_pt_type;
265}
266
267static inline int get_pt_type(int type)
268{
269 return gtt_type_table[type].pt_type;
270}
271
272static inline int get_entry_type(int type)
273{
274 return gtt_type_table[type].entry_type;
275}
276
277static inline int get_pse_type(int type)
278{
279 return gtt_type_table[type].pse_entry_type;
280}
281
282static u64 read_pte64(struct i915_ggtt *ggtt, unsigned long index)
283{
284 void __iomem *addr = (gen8_pte_t __iomem *)ggtt->gsm + index;
285
286 return readq(addr);
287}
288
289static void ggtt_invalidate(struct intel_gt *gt)
290{
291 mmio_hw_access_pre(gt);
292 intel_uncore_write(gt->uncore, GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
293 mmio_hw_access_post(gt);
294}
295
296static void write_pte64(struct i915_ggtt *ggtt, unsigned long index, u64 pte)
297{
298 void __iomem *addr = (gen8_pte_t __iomem *)ggtt->gsm + index;
299
300 writeq(pte, addr);
301}
302
303static inline int gtt_get_entry64(void *pt,
304 struct intel_gvt_gtt_entry *e,
305 unsigned long index, bool hypervisor_access, unsigned long gpa,
306 struct intel_vgpu *vgpu)
307{
308 const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
309 int ret;
310
311 if (WARN_ON(info->gtt_entry_size != 8))
312 return -EINVAL;
313
314 if (hypervisor_access) {
315 ret = intel_gvt_hypervisor_read_gpa(vgpu, gpa +
316 (index << info->gtt_entry_size_shift),
317 &e->val64, 8);
318 if (WARN_ON(ret))
319 return ret;
320 } else if (!pt) {
321 e->val64 = read_pte64(vgpu->gvt->gt->ggtt, index);
322 } else {
323 e->val64 = *((u64 *)pt + index);
324 }
325 return 0;
326}
327
328static inline int gtt_set_entry64(void *pt,
329 struct intel_gvt_gtt_entry *e,
330 unsigned long index, bool hypervisor_access, unsigned long gpa,
331 struct intel_vgpu *vgpu)
332{
333 const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
334 int ret;
335
336 if (WARN_ON(info->gtt_entry_size != 8))
337 return -EINVAL;
338
339 if (hypervisor_access) {
340 ret = intel_gvt_hypervisor_write_gpa(vgpu, gpa +
341 (index << info->gtt_entry_size_shift),
342 &e->val64, 8);
343 if (WARN_ON(ret))
344 return ret;
345 } else if (!pt) {
346 write_pte64(vgpu->gvt->gt->ggtt, index, e->val64);
347 } else {
348 *((u64 *)pt + index) = e->val64;
349 }
350 return 0;
351}
352
353#define GTT_HAW 46
354
355#define ADDR_1G_MASK GENMASK_ULL(GTT_HAW - 1, 30)
356#define ADDR_2M_MASK GENMASK_ULL(GTT_HAW - 1, 21)
357#define ADDR_64K_MASK GENMASK_ULL(GTT_HAW - 1, 16)
358#define ADDR_4K_MASK GENMASK_ULL(GTT_HAW - 1, 12)
359
360#define GTT_SPTE_FLAG_MASK GENMASK_ULL(62, 52)
361#define GTT_SPTE_FLAG_64K_SPLITED BIT(52)
362
363#define GTT_64K_PTE_STRIDE 16
364
365static unsigned long gen8_gtt_get_pfn(struct intel_gvt_gtt_entry *e)
366{
367 unsigned long pfn;
368
369 if (e->type == GTT_TYPE_PPGTT_PTE_1G_ENTRY)
370 pfn = (e->val64 & ADDR_1G_MASK) >> PAGE_SHIFT;
371 else if (e->type == GTT_TYPE_PPGTT_PTE_2M_ENTRY)
372 pfn = (e->val64 & ADDR_2M_MASK) >> PAGE_SHIFT;
373 else if (e->type == GTT_TYPE_PPGTT_PTE_64K_ENTRY)
374 pfn = (e->val64 & ADDR_64K_MASK) >> PAGE_SHIFT;
375 else
376 pfn = (e->val64 & ADDR_4K_MASK) >> PAGE_SHIFT;
377 return pfn;
378}
379
380static void gen8_gtt_set_pfn(struct intel_gvt_gtt_entry *e, unsigned long pfn)
381{
382 if (e->type == GTT_TYPE_PPGTT_PTE_1G_ENTRY) {
383 e->val64 &= ~ADDR_1G_MASK;
384 pfn &= (ADDR_1G_MASK >> PAGE_SHIFT);
385 } else if (e->type == GTT_TYPE_PPGTT_PTE_2M_ENTRY) {
386 e->val64 &= ~ADDR_2M_MASK;
387 pfn &= (ADDR_2M_MASK >> PAGE_SHIFT);
388 } else if (e->type == GTT_TYPE_PPGTT_PTE_64K_ENTRY) {
389 e->val64 &= ~ADDR_64K_MASK;
390 pfn &= (ADDR_64K_MASK >> PAGE_SHIFT);
391 } else {
392 e->val64 &= ~ADDR_4K_MASK;
393 pfn &= (ADDR_4K_MASK >> PAGE_SHIFT);
394 }
395
396 e->val64 |= (pfn << PAGE_SHIFT);
397}
398
399static bool gen8_gtt_test_pse(struct intel_gvt_gtt_entry *e)
400{
401 return !!(e->val64 & _PAGE_PSE);
402}
403
404static void gen8_gtt_clear_pse(struct intel_gvt_gtt_entry *e)
405{
406 if (gen8_gtt_test_pse(e)) {
407 switch (e->type) {
408 case GTT_TYPE_PPGTT_PTE_2M_ENTRY:
409 e->val64 &= ~_PAGE_PSE;
410 e->type = GTT_TYPE_PPGTT_PDE_ENTRY;
411 break;
412 case GTT_TYPE_PPGTT_PTE_1G_ENTRY:
413 e->type = GTT_TYPE_PPGTT_PDP_ENTRY;
414 e->val64 &= ~_PAGE_PSE;
415 break;
416 default:
417 WARN_ON(1);
418 }
419 }
420}
421
422static bool gen8_gtt_test_ips(struct intel_gvt_gtt_entry *e)
423{
424 if (GEM_WARN_ON(e->type != GTT_TYPE_PPGTT_PDE_ENTRY))
425 return false;
426
427 return !!(e->val64 & GEN8_PDE_IPS_64K);
428}
429
430static void gen8_gtt_clear_ips(struct intel_gvt_gtt_entry *e)
431{
432 if (GEM_WARN_ON(e->type != GTT_TYPE_PPGTT_PDE_ENTRY))
433 return;
434
435 e->val64 &= ~GEN8_PDE_IPS_64K;
436}
437
438static bool gen8_gtt_test_present(struct intel_gvt_gtt_entry *e)
439{
440
441
442
443
444
445 if (e->type == GTT_TYPE_PPGTT_ROOT_L3_ENTRY
446 || e->type == GTT_TYPE_PPGTT_ROOT_L4_ENTRY)
447 return (e->val64 != 0);
448 else
449 return (e->val64 & _PAGE_PRESENT);
450}
451
452static void gtt_entry_clear_present(struct intel_gvt_gtt_entry *e)
453{
454 e->val64 &= ~_PAGE_PRESENT;
455}
456
457static void gtt_entry_set_present(struct intel_gvt_gtt_entry *e)
458{
459 e->val64 |= _PAGE_PRESENT;
460}
461
462static bool gen8_gtt_test_64k_splited(struct intel_gvt_gtt_entry *e)
463{
464 return !!(e->val64 & GTT_SPTE_FLAG_64K_SPLITED);
465}
466
467static void gen8_gtt_set_64k_splited(struct intel_gvt_gtt_entry *e)
468{
469 e->val64 |= GTT_SPTE_FLAG_64K_SPLITED;
470}
471
472static void gen8_gtt_clear_64k_splited(struct intel_gvt_gtt_entry *e)
473{
474 e->val64 &= ~GTT_SPTE_FLAG_64K_SPLITED;
475}
476
477
478
479
480static unsigned long gma_to_ggtt_pte_index(unsigned long gma)
481{
482 unsigned long x = (gma >> I915_GTT_PAGE_SHIFT);
483
484 trace_gma_index(__func__, gma, x);
485 return x;
486}
487
488#define DEFINE_PPGTT_GMA_TO_INDEX(prefix, ename, exp) \
489static unsigned long prefix##_gma_to_##ename##_index(unsigned long gma) \
490{ \
491 unsigned long x = (exp); \
492 trace_gma_index(__func__, gma, x); \
493 return x; \
494}
495
496DEFINE_PPGTT_GMA_TO_INDEX(gen8, pte, (gma >> 12 & 0x1ff));
497DEFINE_PPGTT_GMA_TO_INDEX(gen8, pde, (gma >> 21 & 0x1ff));
498DEFINE_PPGTT_GMA_TO_INDEX(gen8, l3_pdp, (gma >> 30 & 0x3));
499DEFINE_PPGTT_GMA_TO_INDEX(gen8, l4_pdp, (gma >> 30 & 0x1ff));
500DEFINE_PPGTT_GMA_TO_INDEX(gen8, pml4, (gma >> 39 & 0x1ff));
501
502static struct intel_gvt_gtt_pte_ops gen8_gtt_pte_ops = {
503 .get_entry = gtt_get_entry64,
504 .set_entry = gtt_set_entry64,
505 .clear_present = gtt_entry_clear_present,
506 .set_present = gtt_entry_set_present,
507 .test_present = gen8_gtt_test_present,
508 .test_pse = gen8_gtt_test_pse,
509 .clear_pse = gen8_gtt_clear_pse,
510 .clear_ips = gen8_gtt_clear_ips,
511 .test_ips = gen8_gtt_test_ips,
512 .clear_64k_splited = gen8_gtt_clear_64k_splited,
513 .set_64k_splited = gen8_gtt_set_64k_splited,
514 .test_64k_splited = gen8_gtt_test_64k_splited,
515 .get_pfn = gen8_gtt_get_pfn,
516 .set_pfn = gen8_gtt_set_pfn,
517};
518
519static struct intel_gvt_gtt_gma_ops gen8_gtt_gma_ops = {
520 .gma_to_ggtt_pte_index = gma_to_ggtt_pte_index,
521 .gma_to_pte_index = gen8_gma_to_pte_index,
522 .gma_to_pde_index = gen8_gma_to_pde_index,
523 .gma_to_l3_pdp_index = gen8_gma_to_l3_pdp_index,
524 .gma_to_l4_pdp_index = gen8_gma_to_l4_pdp_index,
525 .gma_to_pml4_index = gen8_gma_to_pml4_index,
526};
527
528
529static void update_entry_type_for_real(struct intel_gvt_gtt_pte_ops *pte_ops,
530 struct intel_gvt_gtt_entry *entry, bool ips)
531{
532 switch (entry->type) {
533 case GTT_TYPE_PPGTT_PDE_ENTRY:
534 case GTT_TYPE_PPGTT_PDP_ENTRY:
535 if (pte_ops->test_pse(entry))
536 entry->type = get_pse_type(entry->type);
537 break;
538 case GTT_TYPE_PPGTT_PTE_4K_ENTRY:
539 if (ips)
540 entry->type = get_pse_type(entry->type);
541 break;
542 default:
543 GEM_BUG_ON(!gtt_type_is_entry(entry->type));
544 }
545
546 GEM_BUG_ON(entry->type == GTT_TYPE_INVALID);
547}
548
549
550
551
552static void _ppgtt_get_root_entry(struct intel_vgpu_mm *mm,
553 struct intel_gvt_gtt_entry *entry, unsigned long index,
554 bool guest)
555{
556 struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops;
557
558 GEM_BUG_ON(mm->type != INTEL_GVT_MM_PPGTT);
559
560 entry->type = mm->ppgtt_mm.root_entry_type;
561 pte_ops->get_entry(guest ? mm->ppgtt_mm.guest_pdps :
562 mm->ppgtt_mm.shadow_pdps,
563 entry, index, false, 0, mm->vgpu);
564 update_entry_type_for_real(pte_ops, entry, false);
565}
566
567static inline void ppgtt_get_guest_root_entry(struct intel_vgpu_mm *mm,
568 struct intel_gvt_gtt_entry *entry, unsigned long index)
569{
570 _ppgtt_get_root_entry(mm, entry, index, true);
571}
572
573static inline void ppgtt_get_shadow_root_entry(struct intel_vgpu_mm *mm,
574 struct intel_gvt_gtt_entry *entry, unsigned long index)
575{
576 _ppgtt_get_root_entry(mm, entry, index, false);
577}
578
579static void _ppgtt_set_root_entry(struct intel_vgpu_mm *mm,
580 struct intel_gvt_gtt_entry *entry, unsigned long index,
581 bool guest)
582{
583 struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops;
584
585 pte_ops->set_entry(guest ? mm->ppgtt_mm.guest_pdps :
586 mm->ppgtt_mm.shadow_pdps,
587 entry, index, false, 0, mm->vgpu);
588}
589
590static inline void ppgtt_set_guest_root_entry(struct intel_vgpu_mm *mm,
591 struct intel_gvt_gtt_entry *entry, unsigned long index)
592{
593 _ppgtt_set_root_entry(mm, entry, index, true);
594}
595
596static inline void ppgtt_set_shadow_root_entry(struct intel_vgpu_mm *mm,
597 struct intel_gvt_gtt_entry *entry, unsigned long index)
598{
599 _ppgtt_set_root_entry(mm, entry, index, false);
600}
601
602static void ggtt_get_guest_entry(struct intel_vgpu_mm *mm,
603 struct intel_gvt_gtt_entry *entry, unsigned long index)
604{
605 struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops;
606
607 GEM_BUG_ON(mm->type != INTEL_GVT_MM_GGTT);
608
609 entry->type = GTT_TYPE_GGTT_PTE;
610 pte_ops->get_entry(mm->ggtt_mm.virtual_ggtt, entry, index,
611 false, 0, mm->vgpu);
612}
613
614static void ggtt_set_guest_entry(struct intel_vgpu_mm *mm,
615 struct intel_gvt_gtt_entry *entry, unsigned long index)
616{
617 struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops;
618
619 GEM_BUG_ON(mm->type != INTEL_GVT_MM_GGTT);
620
621 pte_ops->set_entry(mm->ggtt_mm.virtual_ggtt, entry, index,
622 false, 0, mm->vgpu);
623}
624
625static void ggtt_get_host_entry(struct intel_vgpu_mm *mm,
626 struct intel_gvt_gtt_entry *entry, unsigned long index)
627{
628 struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops;
629
630 GEM_BUG_ON(mm->type != INTEL_GVT_MM_GGTT);
631
632 pte_ops->get_entry(NULL, entry, index, false, 0, mm->vgpu);
633}
634
635static void ggtt_set_host_entry(struct intel_vgpu_mm *mm,
636 struct intel_gvt_gtt_entry *entry, unsigned long index)
637{
638 struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops;
639
640 GEM_BUG_ON(mm->type != INTEL_GVT_MM_GGTT);
641
642 pte_ops->set_entry(NULL, entry, index, false, 0, mm->vgpu);
643}
644
645
646
647
648static inline int ppgtt_spt_get_entry(
649 struct intel_vgpu_ppgtt_spt *spt,
650 void *page_table, int type,
651 struct intel_gvt_gtt_entry *e, unsigned long index,
652 bool guest)
653{
654 struct intel_gvt *gvt = spt->vgpu->gvt;
655 struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops;
656 int ret;
657
658 e->type = get_entry_type(type);
659
660 if (WARN(!gtt_type_is_entry(e->type), "invalid entry type\n"))
661 return -EINVAL;
662
663 ret = ops->get_entry(page_table, e, index, guest,
664 spt->guest_page.gfn << I915_GTT_PAGE_SHIFT,
665 spt->vgpu);
666 if (ret)
667 return ret;
668
669 update_entry_type_for_real(ops, e, guest ?
670 spt->guest_page.pde_ips : false);
671
672 gvt_vdbg_mm("read ppgtt entry, spt type %d, entry type %d, index %lu, value %llx\n",
673 type, e->type, index, e->val64);
674 return 0;
675}
676
677static inline int ppgtt_spt_set_entry(
678 struct intel_vgpu_ppgtt_spt *spt,
679 void *page_table, int type,
680 struct intel_gvt_gtt_entry *e, unsigned long index,
681 bool guest)
682{
683 struct intel_gvt *gvt = spt->vgpu->gvt;
684 struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops;
685
686 if (WARN(!gtt_type_is_entry(e->type), "invalid entry type\n"))
687 return -EINVAL;
688
689 gvt_vdbg_mm("set ppgtt entry, spt type %d, entry type %d, index %lu, value %llx\n",
690 type, e->type, index, e->val64);
691
692 return ops->set_entry(page_table, e, index, guest,
693 spt->guest_page.gfn << I915_GTT_PAGE_SHIFT,
694 spt->vgpu);
695}
696
697#define ppgtt_get_guest_entry(spt, e, index) \
698 ppgtt_spt_get_entry(spt, NULL, \
699 spt->guest_page.type, e, index, true)
700
701#define ppgtt_set_guest_entry(spt, e, index) \
702 ppgtt_spt_set_entry(spt, NULL, \
703 spt->guest_page.type, e, index, true)
704
705#define ppgtt_get_shadow_entry(spt, e, index) \
706 ppgtt_spt_get_entry(spt, spt->shadow_page.vaddr, \
707 spt->shadow_page.type, e, index, false)
708
709#define ppgtt_set_shadow_entry(spt, e, index) \
710 ppgtt_spt_set_entry(spt, spt->shadow_page.vaddr, \
711 spt->shadow_page.type, e, index, false)
712
713static void *alloc_spt(gfp_t gfp_mask)
714{
715 struct intel_vgpu_ppgtt_spt *spt;
716
717 spt = kzalloc(sizeof(*spt), gfp_mask);
718 if (!spt)
719 return NULL;
720
721 spt->shadow_page.page = alloc_page(gfp_mask);
722 if (!spt->shadow_page.page) {
723 kfree(spt);
724 return NULL;
725 }
726 return spt;
727}
728
729static void free_spt(struct intel_vgpu_ppgtt_spt *spt)
730{
731 __free_page(spt->shadow_page.page);
732 kfree(spt);
733}
734
735static int detach_oos_page(struct intel_vgpu *vgpu,
736 struct intel_vgpu_oos_page *oos_page);
737
738static void ppgtt_free_spt(struct intel_vgpu_ppgtt_spt *spt)
739{
740 struct device *kdev = &spt->vgpu->gvt->gt->i915->drm.pdev->dev;
741
742 trace_spt_free(spt->vgpu->id, spt, spt->guest_page.type);
743
744 dma_unmap_page(kdev, spt->shadow_page.mfn << I915_GTT_PAGE_SHIFT, 4096,
745 PCI_DMA_BIDIRECTIONAL);
746
747 radix_tree_delete(&spt->vgpu->gtt.spt_tree, spt->shadow_page.mfn);
748
749 if (spt->guest_page.gfn) {
750 if (spt->guest_page.oos_page)
751 detach_oos_page(spt->vgpu, spt->guest_page.oos_page);
752
753 intel_vgpu_unregister_page_track(spt->vgpu, spt->guest_page.gfn);
754 }
755
756 list_del_init(&spt->post_shadow_list);
757 free_spt(spt);
758}
759
760static void ppgtt_free_all_spt(struct intel_vgpu *vgpu)
761{
762 struct intel_vgpu_ppgtt_spt *spt, *spn;
763 struct radix_tree_iter iter;
764 LIST_HEAD(all_spt);
765 void __rcu **slot;
766
767 rcu_read_lock();
768 radix_tree_for_each_slot(slot, &vgpu->gtt.spt_tree, &iter, 0) {
769 spt = radix_tree_deref_slot(slot);
770 list_move(&spt->post_shadow_list, &all_spt);
771 }
772 rcu_read_unlock();
773
774 list_for_each_entry_safe(spt, spn, &all_spt, post_shadow_list)
775 ppgtt_free_spt(spt);
776}
777
778static int ppgtt_handle_guest_write_page_table_bytes(
779 struct intel_vgpu_ppgtt_spt *spt,
780 u64 pa, void *p_data, int bytes);
781
782static int ppgtt_write_protection_handler(
783 struct intel_vgpu_page_track *page_track,
784 u64 gpa, void *data, int bytes)
785{
786 struct intel_vgpu_ppgtt_spt *spt = page_track->priv_data;
787
788 int ret;
789
790 if (bytes != 4 && bytes != 8)
791 return -EINVAL;
792
793 ret = ppgtt_handle_guest_write_page_table_bytes(spt, gpa, data, bytes);
794 if (ret)
795 return ret;
796 return ret;
797}
798
799
800static struct intel_vgpu_ppgtt_spt *intel_vgpu_find_spt_by_gfn(
801 struct intel_vgpu *vgpu, unsigned long gfn)
802{
803 struct intel_vgpu_page_track *track;
804
805 track = intel_vgpu_find_page_track(vgpu, gfn);
806 if (track && track->handler == ppgtt_write_protection_handler)
807 return track->priv_data;
808
809 return NULL;
810}
811
812
813static inline struct intel_vgpu_ppgtt_spt *intel_vgpu_find_spt_by_mfn(
814 struct intel_vgpu *vgpu, unsigned long mfn)
815{
816 return radix_tree_lookup(&vgpu->gtt.spt_tree, mfn);
817}
818
819static int reclaim_one_ppgtt_mm(struct intel_gvt *gvt);
820
821
822static struct intel_vgpu_ppgtt_spt *ppgtt_alloc_spt(
823 struct intel_vgpu *vgpu, enum intel_gvt_gtt_type type)
824{
825 struct device *kdev = &vgpu->gvt->gt->i915->drm.pdev->dev;
826 struct intel_vgpu_ppgtt_spt *spt = NULL;
827 dma_addr_t daddr;
828 int ret;
829
830retry:
831 spt = alloc_spt(GFP_KERNEL | __GFP_ZERO);
832 if (!spt) {
833 if (reclaim_one_ppgtt_mm(vgpu->gvt))
834 goto retry;
835
836 gvt_vgpu_err("fail to allocate ppgtt shadow page\n");
837 return ERR_PTR(-ENOMEM);
838 }
839
840 spt->vgpu = vgpu;
841 atomic_set(&spt->refcount, 1);
842 INIT_LIST_HEAD(&spt->post_shadow_list);
843
844
845
846
847 spt->shadow_page.type = type;
848 daddr = dma_map_page(kdev, spt->shadow_page.page,
849 0, 4096, PCI_DMA_BIDIRECTIONAL);
850 if (dma_mapping_error(kdev, daddr)) {
851 gvt_vgpu_err("fail to map dma addr\n");
852 ret = -EINVAL;
853 goto err_free_spt;
854 }
855 spt->shadow_page.vaddr = page_address(spt->shadow_page.page);
856 spt->shadow_page.mfn = daddr >> I915_GTT_PAGE_SHIFT;
857
858 ret = radix_tree_insert(&vgpu->gtt.spt_tree, spt->shadow_page.mfn, spt);
859 if (ret)
860 goto err_unmap_dma;
861
862 return spt;
863
864err_unmap_dma:
865 dma_unmap_page(kdev, daddr, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
866err_free_spt:
867 free_spt(spt);
868 return ERR_PTR(ret);
869}
870
871
872static struct intel_vgpu_ppgtt_spt *ppgtt_alloc_spt_gfn(
873 struct intel_vgpu *vgpu, enum intel_gvt_gtt_type type,
874 unsigned long gfn, bool guest_pde_ips)
875{
876 struct intel_vgpu_ppgtt_spt *spt;
877 int ret;
878
879 spt = ppgtt_alloc_spt(vgpu, type);
880 if (IS_ERR(spt))
881 return spt;
882
883
884
885
886 ret = intel_vgpu_register_page_track(vgpu, gfn,
887 ppgtt_write_protection_handler, spt);
888 if (ret) {
889 ppgtt_free_spt(spt);
890 return ERR_PTR(ret);
891 }
892
893 spt->guest_page.type = type;
894 spt->guest_page.gfn = gfn;
895 spt->guest_page.pde_ips = guest_pde_ips;
896
897 trace_spt_alloc(vgpu->id, spt, type, spt->shadow_page.mfn, gfn);
898
899 return spt;
900}
901
902#define pt_entry_size_shift(spt) \
903 ((spt)->vgpu->gvt->device_info.gtt_entry_size_shift)
904
905#define pt_entries(spt) \
906 (I915_GTT_PAGE_SIZE >> pt_entry_size_shift(spt))
907
908#define for_each_present_guest_entry(spt, e, i) \
909 for (i = 0; i < pt_entries(spt); \
910 i += spt->guest_page.pde_ips ? GTT_64K_PTE_STRIDE : 1) \
911 if (!ppgtt_get_guest_entry(spt, e, i) && \
912 spt->vgpu->gvt->gtt.pte_ops->test_present(e))
913
914#define for_each_present_shadow_entry(spt, e, i) \
915 for (i = 0; i < pt_entries(spt); \
916 i += spt->shadow_page.pde_ips ? GTT_64K_PTE_STRIDE : 1) \
917 if (!ppgtt_get_shadow_entry(spt, e, i) && \
918 spt->vgpu->gvt->gtt.pte_ops->test_present(e))
919
920#define for_each_shadow_entry(spt, e, i) \
921 for (i = 0; i < pt_entries(spt); \
922 i += (spt->shadow_page.pde_ips ? GTT_64K_PTE_STRIDE : 1)) \
923 if (!ppgtt_get_shadow_entry(spt, e, i))
924
925static inline void ppgtt_get_spt(struct intel_vgpu_ppgtt_spt *spt)
926{
927 int v = atomic_read(&spt->refcount);
928
929 trace_spt_refcount(spt->vgpu->id, "inc", spt, v, (v + 1));
930 atomic_inc(&spt->refcount);
931}
932
933static inline int ppgtt_put_spt(struct intel_vgpu_ppgtt_spt *spt)
934{
935 int v = atomic_read(&spt->refcount);
936
937 trace_spt_refcount(spt->vgpu->id, "dec", spt, v, (v - 1));
938 return atomic_dec_return(&spt->refcount);
939}
940
941static int ppgtt_invalidate_spt(struct intel_vgpu_ppgtt_spt *spt);
942
943static int ppgtt_invalidate_spt_by_shadow_entry(struct intel_vgpu *vgpu,
944 struct intel_gvt_gtt_entry *e)
945{
946 struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
947 struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
948 struct intel_vgpu_ppgtt_spt *s;
949 enum intel_gvt_gtt_type cur_pt_type;
950
951 GEM_BUG_ON(!gtt_type_is_pt(get_next_pt_type(e->type)));
952
953 if (e->type != GTT_TYPE_PPGTT_ROOT_L3_ENTRY
954 && e->type != GTT_TYPE_PPGTT_ROOT_L4_ENTRY) {
955 cur_pt_type = get_next_pt_type(e->type);
956
957 if (!gtt_type_is_pt(cur_pt_type) ||
958 !gtt_type_is_pt(cur_pt_type + 1)) {
959 drm_WARN(&i915->drm, 1,
960 "Invalid page table type, cur_pt_type is: %d\n",
961 cur_pt_type);
962 return -EINVAL;
963 }
964
965 cur_pt_type += 1;
966
967 if (ops->get_pfn(e) ==
968 vgpu->gtt.scratch_pt[cur_pt_type].page_mfn)
969 return 0;
970 }
971 s = intel_vgpu_find_spt_by_mfn(vgpu, ops->get_pfn(e));
972 if (!s) {
973 gvt_vgpu_err("fail to find shadow page: mfn: 0x%lx\n",
974 ops->get_pfn(e));
975 return -ENXIO;
976 }
977 return ppgtt_invalidate_spt(s);
978}
979
980static inline void ppgtt_invalidate_pte(struct intel_vgpu_ppgtt_spt *spt,
981 struct intel_gvt_gtt_entry *entry)
982{
983 struct intel_vgpu *vgpu = spt->vgpu;
984 struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
985 unsigned long pfn;
986 int type;
987
988 pfn = ops->get_pfn(entry);
989 type = spt->shadow_page.type;
990
991
992 if (!pfn || pfn == vgpu->gtt.scratch_pt[type].page_mfn)
993 return;
994
995 intel_gvt_hypervisor_dma_unmap_guest_page(vgpu, pfn << PAGE_SHIFT);
996}
997
998static int ppgtt_invalidate_spt(struct intel_vgpu_ppgtt_spt *spt)
999{
1000 struct intel_vgpu *vgpu = spt->vgpu;
1001 struct intel_gvt_gtt_entry e;
1002 unsigned long index;
1003 int ret;
1004
1005 trace_spt_change(spt->vgpu->id, "die", spt,
1006 spt->guest_page.gfn, spt->shadow_page.type);
1007
1008 if (ppgtt_put_spt(spt) > 0)
1009 return 0;
1010
1011 for_each_present_shadow_entry(spt, &e, index) {
1012 switch (e.type) {
1013 case GTT_TYPE_PPGTT_PTE_4K_ENTRY:
1014 gvt_vdbg_mm("invalidate 4K entry\n");
1015 ppgtt_invalidate_pte(spt, &e);
1016 break;
1017 case GTT_TYPE_PPGTT_PTE_64K_ENTRY:
1018
1019 WARN(1, "suspicious 64K gtt entry\n");
1020 continue;
1021 case GTT_TYPE_PPGTT_PTE_2M_ENTRY:
1022 gvt_vdbg_mm("invalidate 2M entry\n");
1023 continue;
1024 case GTT_TYPE_PPGTT_PTE_1G_ENTRY:
1025 WARN(1, "GVT doesn't support 1GB page\n");
1026 continue;
1027 case GTT_TYPE_PPGTT_PML4_ENTRY:
1028 case GTT_TYPE_PPGTT_PDP_ENTRY:
1029 case GTT_TYPE_PPGTT_PDE_ENTRY:
1030 gvt_vdbg_mm("invalidate PMUL4/PDP/PDE entry\n");
1031 ret = ppgtt_invalidate_spt_by_shadow_entry(
1032 spt->vgpu, &e);
1033 if (ret)
1034 goto fail;
1035 break;
1036 default:
1037 GEM_BUG_ON(1);
1038 }
1039 }
1040
1041 trace_spt_change(spt->vgpu->id, "release", spt,
1042 spt->guest_page.gfn, spt->shadow_page.type);
1043 ppgtt_free_spt(spt);
1044 return 0;
1045fail:
1046 gvt_vgpu_err("fail: shadow page %p shadow entry 0x%llx type %d\n",
1047 spt, e.val64, e.type);
1048 return ret;
1049}
1050
1051static bool vgpu_ips_enabled(struct intel_vgpu *vgpu)
1052{
1053 struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
1054
1055 if (INTEL_GEN(dev_priv) == 9 || INTEL_GEN(dev_priv) == 10) {
1056 u32 ips = vgpu_vreg_t(vgpu, GEN8_GAMW_ECO_DEV_RW_IA) &
1057 GAMW_ECO_ENABLE_64K_IPS_FIELD;
1058
1059 return ips == GAMW_ECO_ENABLE_64K_IPS_FIELD;
1060 } else if (INTEL_GEN(dev_priv) >= 11) {
1061
1062 return true;
1063 } else
1064 return false;
1065}
1066
1067static int ppgtt_populate_spt(struct intel_vgpu_ppgtt_spt *spt);
1068
1069static struct intel_vgpu_ppgtt_spt *ppgtt_populate_spt_by_guest_entry(
1070 struct intel_vgpu *vgpu, struct intel_gvt_gtt_entry *we)
1071{
1072 struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
1073 struct intel_vgpu_ppgtt_spt *spt = NULL;
1074 bool ips = false;
1075 int ret;
1076
1077 GEM_BUG_ON(!gtt_type_is_pt(get_next_pt_type(we->type)));
1078
1079 if (we->type == GTT_TYPE_PPGTT_PDE_ENTRY)
1080 ips = vgpu_ips_enabled(vgpu) && ops->test_ips(we);
1081
1082 spt = intel_vgpu_find_spt_by_gfn(vgpu, ops->get_pfn(we));
1083 if (spt) {
1084 ppgtt_get_spt(spt);
1085
1086 if (ips != spt->guest_page.pde_ips) {
1087 spt->guest_page.pde_ips = ips;
1088
1089 gvt_dbg_mm("reshadow PDE since ips changed\n");
1090 clear_page(spt->shadow_page.vaddr);
1091 ret = ppgtt_populate_spt(spt);
1092 if (ret) {
1093 ppgtt_put_spt(spt);
1094 goto err;
1095 }
1096 }
1097 } else {
1098 int type = get_next_pt_type(we->type);
1099
1100 if (!gtt_type_is_pt(type)) {
1101 ret = -EINVAL;
1102 goto err;
1103 }
1104
1105 spt = ppgtt_alloc_spt_gfn(vgpu, type, ops->get_pfn(we), ips);
1106 if (IS_ERR(spt)) {
1107 ret = PTR_ERR(spt);
1108 goto err;
1109 }
1110
1111 ret = intel_vgpu_enable_page_track(vgpu, spt->guest_page.gfn);
1112 if (ret)
1113 goto err_free_spt;
1114
1115 ret = ppgtt_populate_spt(spt);
1116 if (ret)
1117 goto err_free_spt;
1118
1119 trace_spt_change(vgpu->id, "new", spt, spt->guest_page.gfn,
1120 spt->shadow_page.type);
1121 }
1122 return spt;
1123
1124err_free_spt:
1125 ppgtt_free_spt(spt);
1126 spt = NULL;
1127err:
1128 gvt_vgpu_err("fail: shadow page %p guest entry 0x%llx type %d\n",
1129 spt, we->val64, we->type);
1130 return ERR_PTR(ret);
1131}
1132
1133static inline void ppgtt_generate_shadow_entry(struct intel_gvt_gtt_entry *se,
1134 struct intel_vgpu_ppgtt_spt *s, struct intel_gvt_gtt_entry *ge)
1135{
1136 struct intel_gvt_gtt_pte_ops *ops = s->vgpu->gvt->gtt.pte_ops;
1137
1138 se->type = ge->type;
1139 se->val64 = ge->val64;
1140
1141
1142 if (se->type == GTT_TYPE_PPGTT_PDE_ENTRY)
1143 ops->clear_ips(se);
1144
1145 ops->set_pfn(se, s->shadow_page.mfn);
1146}
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156static int is_2MB_gtt_possible(struct intel_vgpu *vgpu,
1157 struct intel_gvt_gtt_entry *entry)
1158{
1159 struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
1160 unsigned long pfn;
1161
1162 if (!HAS_PAGE_SIZES(vgpu->gvt->gt->i915, I915_GTT_PAGE_SIZE_2M))
1163 return 0;
1164
1165 pfn = intel_gvt_hypervisor_gfn_to_mfn(vgpu, ops->get_pfn(entry));
1166 if (pfn == INTEL_GVT_INVALID_ADDR)
1167 return -EINVAL;
1168
1169 return PageTransHuge(pfn_to_page(pfn));
1170}
1171
1172static int split_2MB_gtt_entry(struct intel_vgpu *vgpu,
1173 struct intel_vgpu_ppgtt_spt *spt, unsigned long index,
1174 struct intel_gvt_gtt_entry *se)
1175{
1176 struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
1177 struct intel_vgpu_ppgtt_spt *sub_spt;
1178 struct intel_gvt_gtt_entry sub_se;
1179 unsigned long start_gfn;
1180 dma_addr_t dma_addr;
1181 unsigned long sub_index;
1182 int ret;
1183
1184 gvt_dbg_mm("Split 2M gtt entry, index %lu\n", index);
1185
1186 start_gfn = ops->get_pfn(se);
1187
1188 sub_spt = ppgtt_alloc_spt(vgpu, GTT_TYPE_PPGTT_PTE_PT);
1189 if (IS_ERR(sub_spt))
1190 return PTR_ERR(sub_spt);
1191
1192 for_each_shadow_entry(sub_spt, &sub_se, sub_index) {
1193 ret = intel_gvt_hypervisor_dma_map_guest_page(vgpu,
1194 start_gfn + sub_index, PAGE_SIZE, &dma_addr);
1195 if (ret) {
1196 ppgtt_invalidate_spt(spt);
1197 return ret;
1198 }
1199 sub_se.val64 = se->val64;
1200
1201
1202 sub_se.val64 &= ~_PAGE_PAT;
1203 sub_se.val64 |= (se->val64 & _PAGE_PAT_LARGE) >> 5;
1204
1205 ops->set_pfn(&sub_se, dma_addr >> PAGE_SHIFT);
1206 ppgtt_set_shadow_entry(sub_spt, &sub_se, sub_index);
1207 }
1208
1209
1210 se->val64 &= ~_PAGE_DIRTY;
1211
1212 ops->clear_pse(se);
1213 ops->clear_ips(se);
1214 ops->set_pfn(se, sub_spt->shadow_page.mfn);
1215 ppgtt_set_shadow_entry(spt, se, index);
1216 return 0;
1217}
1218
1219static int split_64KB_gtt_entry(struct intel_vgpu *vgpu,
1220 struct intel_vgpu_ppgtt_spt *spt, unsigned long index,
1221 struct intel_gvt_gtt_entry *se)
1222{
1223 struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
1224 struct intel_gvt_gtt_entry entry = *se;
1225 unsigned long start_gfn;
1226 dma_addr_t dma_addr;
1227 int i, ret;
1228
1229 gvt_vdbg_mm("Split 64K gtt entry, index %lu\n", index);
1230
1231 GEM_BUG_ON(index % GTT_64K_PTE_STRIDE);
1232
1233 start_gfn = ops->get_pfn(se);
1234
1235 entry.type = GTT_TYPE_PPGTT_PTE_4K_ENTRY;
1236 ops->set_64k_splited(&entry);
1237
1238 for (i = 0; i < GTT_64K_PTE_STRIDE; i++) {
1239 ret = intel_gvt_hypervisor_dma_map_guest_page(vgpu,
1240 start_gfn + i, PAGE_SIZE, &dma_addr);
1241 if (ret)
1242 return ret;
1243
1244 ops->set_pfn(&entry, dma_addr >> PAGE_SHIFT);
1245 ppgtt_set_shadow_entry(spt, &entry, index + i);
1246 }
1247 return 0;
1248}
1249
1250static int ppgtt_populate_shadow_entry(struct intel_vgpu *vgpu,
1251 struct intel_vgpu_ppgtt_spt *spt, unsigned long index,
1252 struct intel_gvt_gtt_entry *ge)
1253{
1254 struct intel_gvt_gtt_pte_ops *pte_ops = vgpu->gvt->gtt.pte_ops;
1255 struct intel_gvt_gtt_entry se = *ge;
1256 unsigned long gfn, page_size = PAGE_SIZE;
1257 dma_addr_t dma_addr;
1258 int ret;
1259
1260 if (!pte_ops->test_present(ge))
1261 return 0;
1262
1263 gfn = pte_ops->get_pfn(ge);
1264
1265 switch (ge->type) {
1266 case GTT_TYPE_PPGTT_PTE_4K_ENTRY:
1267 gvt_vdbg_mm("shadow 4K gtt entry\n");
1268 break;
1269 case GTT_TYPE_PPGTT_PTE_64K_ENTRY:
1270 gvt_vdbg_mm("shadow 64K gtt entry\n");
1271
1272
1273
1274
1275
1276 return split_64KB_gtt_entry(vgpu, spt, index, &se);
1277 case GTT_TYPE_PPGTT_PTE_2M_ENTRY:
1278 gvt_vdbg_mm("shadow 2M gtt entry\n");
1279 ret = is_2MB_gtt_possible(vgpu, ge);
1280 if (ret == 0)
1281 return split_2MB_gtt_entry(vgpu, spt, index, &se);
1282 else if (ret < 0)
1283 return ret;
1284 page_size = I915_GTT_PAGE_SIZE_2M;
1285 break;
1286 case GTT_TYPE_PPGTT_PTE_1G_ENTRY:
1287 gvt_vgpu_err("GVT doesn't support 1GB entry\n");
1288 return -EINVAL;
1289 default:
1290 GEM_BUG_ON(1);
1291 }
1292
1293
1294 ret = intel_gvt_hypervisor_dma_map_guest_page(vgpu, gfn, page_size,
1295 &dma_addr);
1296 if (ret)
1297 return -ENXIO;
1298
1299 pte_ops->set_pfn(&se, dma_addr >> PAGE_SHIFT);
1300 ppgtt_set_shadow_entry(spt, &se, index);
1301 return 0;
1302}
1303
1304static int ppgtt_populate_spt(struct intel_vgpu_ppgtt_spt *spt)
1305{
1306 struct intel_vgpu *vgpu = spt->vgpu;
1307 struct intel_gvt *gvt = vgpu->gvt;
1308 struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops;
1309 struct intel_vgpu_ppgtt_spt *s;
1310 struct intel_gvt_gtt_entry se, ge;
1311 unsigned long gfn, i;
1312 int ret;
1313
1314 trace_spt_change(spt->vgpu->id, "born", spt,
1315 spt->guest_page.gfn, spt->shadow_page.type);
1316
1317 for_each_present_guest_entry(spt, &ge, i) {
1318 if (gtt_type_is_pt(get_next_pt_type(ge.type))) {
1319 s = ppgtt_populate_spt_by_guest_entry(vgpu, &ge);
1320 if (IS_ERR(s)) {
1321 ret = PTR_ERR(s);
1322 goto fail;
1323 }
1324 ppgtt_get_shadow_entry(spt, &se, i);
1325 ppgtt_generate_shadow_entry(&se, s, &ge);
1326 ppgtt_set_shadow_entry(spt, &se, i);
1327 } else {
1328 gfn = ops->get_pfn(&ge);
1329 if (!intel_gvt_hypervisor_is_valid_gfn(vgpu, gfn)) {
1330 ops->set_pfn(&se, gvt->gtt.scratch_mfn);
1331 ppgtt_set_shadow_entry(spt, &se, i);
1332 continue;
1333 }
1334
1335 ret = ppgtt_populate_shadow_entry(vgpu, spt, i, &ge);
1336 if (ret)
1337 goto fail;
1338 }
1339 }
1340 return 0;
1341fail:
1342 gvt_vgpu_err("fail: shadow page %p guest entry 0x%llx type %d\n",
1343 spt, ge.val64, ge.type);
1344 return ret;
1345}
1346
1347static int ppgtt_handle_guest_entry_removal(struct intel_vgpu_ppgtt_spt *spt,
1348 struct intel_gvt_gtt_entry *se, unsigned long index)
1349{
1350 struct intel_vgpu *vgpu = spt->vgpu;
1351 struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
1352 int ret;
1353
1354 trace_spt_guest_change(spt->vgpu->id, "remove", spt,
1355 spt->shadow_page.type, se->val64, index);
1356
1357 gvt_vdbg_mm("destroy old shadow entry, type %d, index %lu, value %llx\n",
1358 se->type, index, se->val64);
1359
1360 if (!ops->test_present(se))
1361 return 0;
1362
1363 if (ops->get_pfn(se) ==
1364 vgpu->gtt.scratch_pt[spt->shadow_page.type].page_mfn)
1365 return 0;
1366
1367 if (gtt_type_is_pt(get_next_pt_type(se->type))) {
1368 struct intel_vgpu_ppgtt_spt *s =
1369 intel_vgpu_find_spt_by_mfn(vgpu, ops->get_pfn(se));
1370 if (!s) {
1371 gvt_vgpu_err("fail to find guest page\n");
1372 ret = -ENXIO;
1373 goto fail;
1374 }
1375 ret = ppgtt_invalidate_spt(s);
1376 if (ret)
1377 goto fail;
1378 } else {
1379
1380 WARN(se->type == GTT_TYPE_PPGTT_PTE_64K_ENTRY,
1381 "suspicious 64K entry\n");
1382 ppgtt_invalidate_pte(spt, se);
1383 }
1384
1385 return 0;
1386fail:
1387 gvt_vgpu_err("fail: shadow page %p guest entry 0x%llx type %d\n",
1388 spt, se->val64, se->type);
1389 return ret;
1390}
1391
1392static int ppgtt_handle_guest_entry_add(struct intel_vgpu_ppgtt_spt *spt,
1393 struct intel_gvt_gtt_entry *we, unsigned long index)
1394{
1395 struct intel_vgpu *vgpu = spt->vgpu;
1396 struct intel_gvt_gtt_entry m;
1397 struct intel_vgpu_ppgtt_spt *s;
1398 int ret;
1399
1400 trace_spt_guest_change(spt->vgpu->id, "add", spt, spt->shadow_page.type,
1401 we->val64, index);
1402
1403 gvt_vdbg_mm("add shadow entry: type %d, index %lu, value %llx\n",
1404 we->type, index, we->val64);
1405
1406 if (gtt_type_is_pt(get_next_pt_type(we->type))) {
1407 s = ppgtt_populate_spt_by_guest_entry(vgpu, we);
1408 if (IS_ERR(s)) {
1409 ret = PTR_ERR(s);
1410 goto fail;
1411 }
1412 ppgtt_get_shadow_entry(spt, &m, index);
1413 ppgtt_generate_shadow_entry(&m, s, we);
1414 ppgtt_set_shadow_entry(spt, &m, index);
1415 } else {
1416 ret = ppgtt_populate_shadow_entry(vgpu, spt, index, we);
1417 if (ret)
1418 goto fail;
1419 }
1420 return 0;
1421fail:
1422 gvt_vgpu_err("fail: spt %p guest entry 0x%llx type %d\n",
1423 spt, we->val64, we->type);
1424 return ret;
1425}
1426
1427static int sync_oos_page(struct intel_vgpu *vgpu,
1428 struct intel_vgpu_oos_page *oos_page)
1429{
1430 const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
1431 struct intel_gvt *gvt = vgpu->gvt;
1432 struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops;
1433 struct intel_vgpu_ppgtt_spt *spt = oos_page->spt;
1434 struct intel_gvt_gtt_entry old, new;
1435 int index;
1436 int ret;
1437
1438 trace_oos_change(vgpu->id, "sync", oos_page->id,
1439 spt, spt->guest_page.type);
1440
1441 old.type = new.type = get_entry_type(spt->guest_page.type);
1442 old.val64 = new.val64 = 0;
1443
1444 for (index = 0; index < (I915_GTT_PAGE_SIZE >>
1445 info->gtt_entry_size_shift); index++) {
1446 ops->get_entry(oos_page->mem, &old, index, false, 0, vgpu);
1447 ops->get_entry(NULL, &new, index, true,
1448 spt->guest_page.gfn << PAGE_SHIFT, vgpu);
1449
1450 if (old.val64 == new.val64
1451 && !test_and_clear_bit(index, spt->post_shadow_bitmap))
1452 continue;
1453
1454 trace_oos_sync(vgpu->id, oos_page->id,
1455 spt, spt->guest_page.type,
1456 new.val64, index);
1457
1458 ret = ppgtt_populate_shadow_entry(vgpu, spt, index, &new);
1459 if (ret)
1460 return ret;
1461
1462 ops->set_entry(oos_page->mem, &new, index, false, 0, vgpu);
1463 }
1464
1465 spt->guest_page.write_cnt = 0;
1466 list_del_init(&spt->post_shadow_list);
1467 return 0;
1468}
1469
1470static int detach_oos_page(struct intel_vgpu *vgpu,
1471 struct intel_vgpu_oos_page *oos_page)
1472{
1473 struct intel_gvt *gvt = vgpu->gvt;
1474 struct intel_vgpu_ppgtt_spt *spt = oos_page->spt;
1475
1476 trace_oos_change(vgpu->id, "detach", oos_page->id,
1477 spt, spt->guest_page.type);
1478
1479 spt->guest_page.write_cnt = 0;
1480 spt->guest_page.oos_page = NULL;
1481 oos_page->spt = NULL;
1482
1483 list_del_init(&oos_page->vm_list);
1484 list_move_tail(&oos_page->list, &gvt->gtt.oos_page_free_list_head);
1485
1486 return 0;
1487}
1488
1489static int attach_oos_page(struct intel_vgpu_oos_page *oos_page,
1490 struct intel_vgpu_ppgtt_spt *spt)
1491{
1492 struct intel_gvt *gvt = spt->vgpu->gvt;
1493 int ret;
1494
1495 ret = intel_gvt_hypervisor_read_gpa(spt->vgpu,
1496 spt->guest_page.gfn << I915_GTT_PAGE_SHIFT,
1497 oos_page->mem, I915_GTT_PAGE_SIZE);
1498 if (ret)
1499 return ret;
1500
1501 oos_page->spt = spt;
1502 spt->guest_page.oos_page = oos_page;
1503
1504 list_move_tail(&oos_page->list, &gvt->gtt.oos_page_use_list_head);
1505
1506 trace_oos_change(spt->vgpu->id, "attach", oos_page->id,
1507 spt, spt->guest_page.type);
1508 return 0;
1509}
1510
1511static int ppgtt_set_guest_page_sync(struct intel_vgpu_ppgtt_spt *spt)
1512{
1513 struct intel_vgpu_oos_page *oos_page = spt->guest_page.oos_page;
1514 int ret;
1515
1516 ret = intel_vgpu_enable_page_track(spt->vgpu, spt->guest_page.gfn);
1517 if (ret)
1518 return ret;
1519
1520 trace_oos_change(spt->vgpu->id, "set page sync", oos_page->id,
1521 spt, spt->guest_page.type);
1522
1523 list_del_init(&oos_page->vm_list);
1524 return sync_oos_page(spt->vgpu, oos_page);
1525}
1526
1527static int ppgtt_allocate_oos_page(struct intel_vgpu_ppgtt_spt *spt)
1528{
1529 struct intel_gvt *gvt = spt->vgpu->gvt;
1530 struct intel_gvt_gtt *gtt = &gvt->gtt;
1531 struct intel_vgpu_oos_page *oos_page = spt->guest_page.oos_page;
1532 int ret;
1533
1534 WARN(oos_page, "shadow PPGTT page has already has a oos page\n");
1535
1536 if (list_empty(>t->oos_page_free_list_head)) {
1537 oos_page = container_of(gtt->oos_page_use_list_head.next,
1538 struct intel_vgpu_oos_page, list);
1539 ret = ppgtt_set_guest_page_sync(oos_page->spt);
1540 if (ret)
1541 return ret;
1542 ret = detach_oos_page(spt->vgpu, oos_page);
1543 if (ret)
1544 return ret;
1545 } else
1546 oos_page = container_of(gtt->oos_page_free_list_head.next,
1547 struct intel_vgpu_oos_page, list);
1548 return attach_oos_page(oos_page, spt);
1549}
1550
1551static int ppgtt_set_guest_page_oos(struct intel_vgpu_ppgtt_spt *spt)
1552{
1553 struct intel_vgpu_oos_page *oos_page = spt->guest_page.oos_page;
1554
1555 if (WARN(!oos_page, "shadow PPGTT page should have a oos page\n"))
1556 return -EINVAL;
1557
1558 trace_oos_change(spt->vgpu->id, "set page out of sync", oos_page->id,
1559 spt, spt->guest_page.type);
1560
1561 list_add_tail(&oos_page->vm_list, &spt->vgpu->gtt.oos_page_list_head);
1562 return intel_vgpu_disable_page_track(spt->vgpu, spt->guest_page.gfn);
1563}
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575int intel_vgpu_sync_oos_pages(struct intel_vgpu *vgpu)
1576{
1577 struct list_head *pos, *n;
1578 struct intel_vgpu_oos_page *oos_page;
1579 int ret;
1580
1581 if (!enable_out_of_sync)
1582 return 0;
1583
1584 list_for_each_safe(pos, n, &vgpu->gtt.oos_page_list_head) {
1585 oos_page = container_of(pos,
1586 struct intel_vgpu_oos_page, vm_list);
1587 ret = ppgtt_set_guest_page_sync(oos_page->spt);
1588 if (ret)
1589 return ret;
1590 }
1591 return 0;
1592}
1593
1594
1595
1596
1597static int ppgtt_handle_guest_write_page_table(
1598 struct intel_vgpu_ppgtt_spt *spt,
1599 struct intel_gvt_gtt_entry *we, unsigned long index)
1600{
1601 struct intel_vgpu *vgpu = spt->vgpu;
1602 int type = spt->shadow_page.type;
1603 struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
1604 struct intel_gvt_gtt_entry old_se;
1605 int new_present;
1606 int i, ret;
1607
1608 new_present = ops->test_present(we);
1609
1610
1611
1612
1613
1614
1615 ppgtt_get_shadow_entry(spt, &old_se, index);
1616
1617 if (new_present) {
1618 ret = ppgtt_handle_guest_entry_add(spt, we, index);
1619 if (ret)
1620 goto fail;
1621 }
1622
1623 ret = ppgtt_handle_guest_entry_removal(spt, &old_se, index);
1624 if (ret)
1625 goto fail;
1626
1627 if (!new_present) {
1628
1629 if (ops->test_64k_splited(&old_se) &&
1630 !(index % GTT_64K_PTE_STRIDE)) {
1631 gvt_vdbg_mm("remove splited 64K shadow entries\n");
1632 for (i = 0; i < GTT_64K_PTE_STRIDE; i++) {
1633 ops->clear_64k_splited(&old_se);
1634 ops->set_pfn(&old_se,
1635 vgpu->gtt.scratch_pt[type].page_mfn);
1636 ppgtt_set_shadow_entry(spt, &old_se, index + i);
1637 }
1638 } else if (old_se.type == GTT_TYPE_PPGTT_PTE_2M_ENTRY ||
1639 old_se.type == GTT_TYPE_PPGTT_PTE_1G_ENTRY) {
1640 ops->clear_pse(&old_se);
1641 ops->set_pfn(&old_se,
1642 vgpu->gtt.scratch_pt[type].page_mfn);
1643 ppgtt_set_shadow_entry(spt, &old_se, index);
1644 } else {
1645 ops->set_pfn(&old_se,
1646 vgpu->gtt.scratch_pt[type].page_mfn);
1647 ppgtt_set_shadow_entry(spt, &old_se, index);
1648 }
1649 }
1650
1651 return 0;
1652fail:
1653 gvt_vgpu_err("fail: shadow page %p guest entry 0x%llx type %d.\n",
1654 spt, we->val64, we->type);
1655 return ret;
1656}
1657
1658
1659
1660static inline bool can_do_out_of_sync(struct intel_vgpu_ppgtt_spt *spt)
1661{
1662 return enable_out_of_sync
1663 && gtt_type_is_pte_pt(spt->guest_page.type)
1664 && spt->guest_page.write_cnt >= 2;
1665}
1666
1667static void ppgtt_set_post_shadow(struct intel_vgpu_ppgtt_spt *spt,
1668 unsigned long index)
1669{
1670 set_bit(index, spt->post_shadow_bitmap);
1671 if (!list_empty(&spt->post_shadow_list))
1672 return;
1673
1674 list_add_tail(&spt->post_shadow_list,
1675 &spt->vgpu->gtt.post_shadow_list_head);
1676}
1677
1678
1679
1680
1681
1682
1683
1684
1685
1686
1687
1688int intel_vgpu_flush_post_shadow(struct intel_vgpu *vgpu)
1689{
1690 struct list_head *pos, *n;
1691 struct intel_vgpu_ppgtt_spt *spt;
1692 struct intel_gvt_gtt_entry ge;
1693 unsigned long index;
1694 int ret;
1695
1696 list_for_each_safe(pos, n, &vgpu->gtt.post_shadow_list_head) {
1697 spt = container_of(pos, struct intel_vgpu_ppgtt_spt,
1698 post_shadow_list);
1699
1700 for_each_set_bit(index, spt->post_shadow_bitmap,
1701 GTT_ENTRY_NUM_IN_ONE_PAGE) {
1702 ppgtt_get_guest_entry(spt, &ge, index);
1703
1704 ret = ppgtt_handle_guest_write_page_table(spt,
1705 &ge, index);
1706 if (ret)
1707 return ret;
1708 clear_bit(index, spt->post_shadow_bitmap);
1709 }
1710 list_del_init(&spt->post_shadow_list);
1711 }
1712 return 0;
1713}
1714
1715static int ppgtt_handle_guest_write_page_table_bytes(
1716 struct intel_vgpu_ppgtt_spt *spt,
1717 u64 pa, void *p_data, int bytes)
1718{
1719 struct intel_vgpu *vgpu = spt->vgpu;
1720 struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
1721 const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
1722 struct intel_gvt_gtt_entry we, se;
1723 unsigned long index;
1724 int ret;
1725
1726 index = (pa & (PAGE_SIZE - 1)) >> info->gtt_entry_size_shift;
1727
1728 ppgtt_get_guest_entry(spt, &we, index);
1729
1730
1731
1732
1733
1734
1735 if (we.type == GTT_TYPE_PPGTT_PTE_64K_ENTRY &&
1736 (index % GTT_64K_PTE_STRIDE)) {
1737 gvt_vdbg_mm("Ignore write to unused PTE entry, index %lu\n",
1738 index);
1739 return 0;
1740 }
1741
1742 if (bytes == info->gtt_entry_size) {
1743 ret = ppgtt_handle_guest_write_page_table(spt, &we, index);
1744 if (ret)
1745 return ret;
1746 } else {
1747 if (!test_bit(index, spt->post_shadow_bitmap)) {
1748 int type = spt->shadow_page.type;
1749
1750 ppgtt_get_shadow_entry(spt, &se, index);
1751 ret = ppgtt_handle_guest_entry_removal(spt, &se, index);
1752 if (ret)
1753 return ret;
1754 ops->set_pfn(&se, vgpu->gtt.scratch_pt[type].page_mfn);
1755 ppgtt_set_shadow_entry(spt, &se, index);
1756 }
1757 ppgtt_set_post_shadow(spt, index);
1758 }
1759
1760 if (!enable_out_of_sync)
1761 return 0;
1762
1763 spt->guest_page.write_cnt++;
1764
1765 if (spt->guest_page.oos_page)
1766 ops->set_entry(spt->guest_page.oos_page->mem, &we, index,
1767 false, 0, vgpu);
1768
1769 if (can_do_out_of_sync(spt)) {
1770 if (!spt->guest_page.oos_page)
1771 ppgtt_allocate_oos_page(spt);
1772
1773 ret = ppgtt_set_guest_page_oos(spt);
1774 if (ret < 0)
1775 return ret;
1776 }
1777 return 0;
1778}
1779
1780static void invalidate_ppgtt_mm(struct intel_vgpu_mm *mm)
1781{
1782 struct intel_vgpu *vgpu = mm->vgpu;
1783 struct intel_gvt *gvt = vgpu->gvt;
1784 struct intel_gvt_gtt *gtt = &gvt->gtt;
1785 struct intel_gvt_gtt_pte_ops *ops = gtt->pte_ops;
1786 struct intel_gvt_gtt_entry se;
1787 int index;
1788
1789 if (!mm->ppgtt_mm.shadowed)
1790 return;
1791
1792 for (index = 0; index < ARRAY_SIZE(mm->ppgtt_mm.shadow_pdps); index++) {
1793 ppgtt_get_shadow_root_entry(mm, &se, index);
1794
1795 if (!ops->test_present(&se))
1796 continue;
1797
1798 ppgtt_invalidate_spt_by_shadow_entry(vgpu, &se);
1799 se.val64 = 0;
1800 ppgtt_set_shadow_root_entry(mm, &se, index);
1801
1802 trace_spt_guest_change(vgpu->id, "destroy root pointer",
1803 NULL, se.type, se.val64, index);
1804 }
1805
1806 mm->ppgtt_mm.shadowed = false;
1807}
1808
1809
1810static int shadow_ppgtt_mm(struct intel_vgpu_mm *mm)
1811{
1812 struct intel_vgpu *vgpu = mm->vgpu;
1813 struct intel_gvt *gvt = vgpu->gvt;
1814 struct intel_gvt_gtt *gtt = &gvt->gtt;
1815 struct intel_gvt_gtt_pte_ops *ops = gtt->pte_ops;
1816 struct intel_vgpu_ppgtt_spt *spt;
1817 struct intel_gvt_gtt_entry ge, se;
1818 int index, ret;
1819
1820 if (mm->ppgtt_mm.shadowed)
1821 return 0;
1822
1823 mm->ppgtt_mm.shadowed = true;
1824
1825 for (index = 0; index < ARRAY_SIZE(mm->ppgtt_mm.guest_pdps); index++) {
1826 ppgtt_get_guest_root_entry(mm, &ge, index);
1827
1828 if (!ops->test_present(&ge))
1829 continue;
1830
1831 trace_spt_guest_change(vgpu->id, __func__, NULL,
1832 ge.type, ge.val64, index);
1833
1834 spt = ppgtt_populate_spt_by_guest_entry(vgpu, &ge);
1835 if (IS_ERR(spt)) {
1836 gvt_vgpu_err("fail to populate guest root pointer\n");
1837 ret = PTR_ERR(spt);
1838 goto fail;
1839 }
1840 ppgtt_generate_shadow_entry(&se, spt, &ge);
1841 ppgtt_set_shadow_root_entry(mm, &se, index);
1842
1843 trace_spt_guest_change(vgpu->id, "populate root pointer",
1844 NULL, se.type, se.val64, index);
1845 }
1846
1847 return 0;
1848fail:
1849 invalidate_ppgtt_mm(mm);
1850 return ret;
1851}
1852
1853static struct intel_vgpu_mm *vgpu_alloc_mm(struct intel_vgpu *vgpu)
1854{
1855 struct intel_vgpu_mm *mm;
1856
1857 mm = kzalloc(sizeof(*mm), GFP_KERNEL);
1858 if (!mm)
1859 return NULL;
1860
1861 mm->vgpu = vgpu;
1862 kref_init(&mm->ref);
1863 atomic_set(&mm->pincount, 0);
1864
1865 return mm;
1866}
1867
1868static void vgpu_free_mm(struct intel_vgpu_mm *mm)
1869{
1870 kfree(mm);
1871}
1872
1873
1874
1875
1876
1877
1878
1879
1880
1881
1882
1883
1884struct intel_vgpu_mm *intel_vgpu_create_ppgtt_mm(struct intel_vgpu *vgpu,
1885 enum intel_gvt_gtt_type root_entry_type, u64 pdps[])
1886{
1887 struct intel_gvt *gvt = vgpu->gvt;
1888 struct intel_vgpu_mm *mm;
1889 int ret;
1890
1891 mm = vgpu_alloc_mm(vgpu);
1892 if (!mm)
1893 return ERR_PTR(-ENOMEM);
1894
1895 mm->type = INTEL_GVT_MM_PPGTT;
1896
1897 GEM_BUG_ON(root_entry_type != GTT_TYPE_PPGTT_ROOT_L3_ENTRY &&
1898 root_entry_type != GTT_TYPE_PPGTT_ROOT_L4_ENTRY);
1899 mm->ppgtt_mm.root_entry_type = root_entry_type;
1900
1901 INIT_LIST_HEAD(&mm->ppgtt_mm.list);
1902 INIT_LIST_HEAD(&mm->ppgtt_mm.lru_list);
1903 INIT_LIST_HEAD(&mm->ppgtt_mm.link);
1904
1905 if (root_entry_type == GTT_TYPE_PPGTT_ROOT_L4_ENTRY)
1906 mm->ppgtt_mm.guest_pdps[0] = pdps[0];
1907 else
1908 memcpy(mm->ppgtt_mm.guest_pdps, pdps,
1909 sizeof(mm->ppgtt_mm.guest_pdps));
1910
1911 ret = shadow_ppgtt_mm(mm);
1912 if (ret) {
1913 gvt_vgpu_err("failed to shadow ppgtt mm\n");
1914 vgpu_free_mm(mm);
1915 return ERR_PTR(ret);
1916 }
1917
1918 list_add_tail(&mm->ppgtt_mm.list, &vgpu->gtt.ppgtt_mm_list_head);
1919
1920 mutex_lock(&gvt->gtt.ppgtt_mm_lock);
1921 list_add_tail(&mm->ppgtt_mm.lru_list, &gvt->gtt.ppgtt_mm_lru_list_head);
1922 mutex_unlock(&gvt->gtt.ppgtt_mm_lock);
1923
1924 return mm;
1925}
1926
1927static struct intel_vgpu_mm *intel_vgpu_create_ggtt_mm(struct intel_vgpu *vgpu)
1928{
1929 struct intel_vgpu_mm *mm;
1930 unsigned long nr_entries;
1931
1932 mm = vgpu_alloc_mm(vgpu);
1933 if (!mm)
1934 return ERR_PTR(-ENOMEM);
1935
1936 mm->type = INTEL_GVT_MM_GGTT;
1937
1938 nr_entries = gvt_ggtt_gm_sz(vgpu->gvt) >> I915_GTT_PAGE_SHIFT;
1939 mm->ggtt_mm.virtual_ggtt =
1940 vzalloc(array_size(nr_entries,
1941 vgpu->gvt->device_info.gtt_entry_size));
1942 if (!mm->ggtt_mm.virtual_ggtt) {
1943 vgpu_free_mm(mm);
1944 return ERR_PTR(-ENOMEM);
1945 }
1946
1947 return mm;
1948}
1949
1950
1951
1952
1953
1954
1955
1956
1957void _intel_vgpu_mm_release(struct kref *mm_ref)
1958{
1959 struct intel_vgpu_mm *mm = container_of(mm_ref, typeof(*mm), ref);
1960
1961 if (GEM_WARN_ON(atomic_read(&mm->pincount)))
1962 gvt_err("vgpu mm pin count bug detected\n");
1963
1964 if (mm->type == INTEL_GVT_MM_PPGTT) {
1965 list_del(&mm->ppgtt_mm.list);
1966
1967 mutex_lock(&mm->vgpu->gvt->gtt.ppgtt_mm_lock);
1968 list_del(&mm->ppgtt_mm.lru_list);
1969 mutex_unlock(&mm->vgpu->gvt->gtt.ppgtt_mm_lock);
1970
1971 invalidate_ppgtt_mm(mm);
1972 } else {
1973 vfree(mm->ggtt_mm.virtual_ggtt);
1974 }
1975
1976 vgpu_free_mm(mm);
1977}
1978
1979
1980
1981
1982
1983
1984
1985void intel_vgpu_unpin_mm(struct intel_vgpu_mm *mm)
1986{
1987 atomic_dec_if_positive(&mm->pincount);
1988}
1989
1990
1991
1992
1993
1994
1995
1996
1997
1998
1999
2000
2001int intel_vgpu_pin_mm(struct intel_vgpu_mm *mm)
2002{
2003 int ret;
2004
2005 atomic_inc(&mm->pincount);
2006
2007 if (mm->type == INTEL_GVT_MM_PPGTT) {
2008 ret = shadow_ppgtt_mm(mm);
2009 if (ret)
2010 return ret;
2011
2012 mutex_lock(&mm->vgpu->gvt->gtt.ppgtt_mm_lock);
2013 list_move_tail(&mm->ppgtt_mm.lru_list,
2014 &mm->vgpu->gvt->gtt.ppgtt_mm_lru_list_head);
2015 mutex_unlock(&mm->vgpu->gvt->gtt.ppgtt_mm_lock);
2016 }
2017
2018 return 0;
2019}
2020
2021static int reclaim_one_ppgtt_mm(struct intel_gvt *gvt)
2022{
2023 struct intel_vgpu_mm *mm;
2024 struct list_head *pos, *n;
2025
2026 mutex_lock(&gvt->gtt.ppgtt_mm_lock);
2027
2028 list_for_each_safe(pos, n, &gvt->gtt.ppgtt_mm_lru_list_head) {
2029 mm = container_of(pos, struct intel_vgpu_mm, ppgtt_mm.lru_list);
2030
2031 if (atomic_read(&mm->pincount))
2032 continue;
2033
2034 list_del_init(&mm->ppgtt_mm.lru_list);
2035 mutex_unlock(&gvt->gtt.ppgtt_mm_lock);
2036 invalidate_ppgtt_mm(mm);
2037 return 1;
2038 }
2039 mutex_unlock(&gvt->gtt.ppgtt_mm_lock);
2040 return 0;
2041}
2042
2043
2044
2045
2046static inline int ppgtt_get_next_level_entry(struct intel_vgpu_mm *mm,
2047 struct intel_gvt_gtt_entry *e, unsigned long index, bool guest)
2048{
2049 struct intel_vgpu *vgpu = mm->vgpu;
2050 struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
2051 struct intel_vgpu_ppgtt_spt *s;
2052
2053 s = intel_vgpu_find_spt_by_mfn(vgpu, ops->get_pfn(e));
2054 if (!s)
2055 return -ENXIO;
2056
2057 if (!guest)
2058 ppgtt_get_shadow_entry(s, e, index);
2059 else
2060 ppgtt_get_guest_entry(s, e, index);
2061 return 0;
2062}
2063
2064
2065
2066
2067
2068
2069
2070
2071
2072
2073
2074
2075unsigned long intel_vgpu_gma_to_gpa(struct intel_vgpu_mm *mm, unsigned long gma)
2076{
2077 struct intel_vgpu *vgpu = mm->vgpu;
2078 struct intel_gvt *gvt = vgpu->gvt;
2079 struct intel_gvt_gtt_pte_ops *pte_ops = gvt->gtt.pte_ops;
2080 struct intel_gvt_gtt_gma_ops *gma_ops = gvt->gtt.gma_ops;
2081 unsigned long gpa = INTEL_GVT_INVALID_ADDR;
2082 unsigned long gma_index[4];
2083 struct intel_gvt_gtt_entry e;
2084 int i, levels = 0;
2085 int ret;
2086
2087 GEM_BUG_ON(mm->type != INTEL_GVT_MM_GGTT &&
2088 mm->type != INTEL_GVT_MM_PPGTT);
2089
2090 if (mm->type == INTEL_GVT_MM_GGTT) {
2091 if (!vgpu_gmadr_is_valid(vgpu, gma))
2092 goto err;
2093
2094 ggtt_get_guest_entry(mm, &e,
2095 gma_ops->gma_to_ggtt_pte_index(gma));
2096
2097 gpa = (pte_ops->get_pfn(&e) << I915_GTT_PAGE_SHIFT)
2098 + (gma & ~I915_GTT_PAGE_MASK);
2099
2100 trace_gma_translate(vgpu->id, "ggtt", 0, 0, gma, gpa);
2101 } else {
2102 switch (mm->ppgtt_mm.root_entry_type) {
2103 case GTT_TYPE_PPGTT_ROOT_L4_ENTRY:
2104 ppgtt_get_shadow_root_entry(mm, &e, 0);
2105
2106 gma_index[0] = gma_ops->gma_to_pml4_index(gma);
2107 gma_index[1] = gma_ops->gma_to_l4_pdp_index(gma);
2108 gma_index[2] = gma_ops->gma_to_pde_index(gma);
2109 gma_index[3] = gma_ops->gma_to_pte_index(gma);
2110 levels = 4;
2111 break;
2112 case GTT_TYPE_PPGTT_ROOT_L3_ENTRY:
2113 ppgtt_get_shadow_root_entry(mm, &e,
2114 gma_ops->gma_to_l3_pdp_index(gma));
2115
2116 gma_index[0] = gma_ops->gma_to_pde_index(gma);
2117 gma_index[1] = gma_ops->gma_to_pte_index(gma);
2118 levels = 2;
2119 break;
2120 default:
2121 GEM_BUG_ON(1);
2122 }
2123
2124
2125 for (i = 0; i < levels; i++) {
2126 ret = ppgtt_get_next_level_entry(mm, &e, gma_index[i],
2127 (i == levels - 1));
2128 if (ret)
2129 goto err;
2130
2131 if (!pte_ops->test_present(&e)) {
2132 gvt_dbg_core("GMA 0x%lx is not present\n", gma);
2133 goto err;
2134 }
2135 }
2136
2137 gpa = (pte_ops->get_pfn(&e) << I915_GTT_PAGE_SHIFT) +
2138 (gma & ~I915_GTT_PAGE_MASK);
2139 trace_gma_translate(vgpu->id, "ppgtt", 0,
2140 mm->ppgtt_mm.root_entry_type, gma, gpa);
2141 }
2142
2143 return gpa;
2144err:
2145 gvt_vgpu_err("invalid mm type: %d gma %lx\n", mm->type, gma);
2146 return INTEL_GVT_INVALID_ADDR;
2147}
2148
2149static int emulate_ggtt_mmio_read(struct intel_vgpu *vgpu,
2150 unsigned int off, void *p_data, unsigned int bytes)
2151{
2152 struct intel_vgpu_mm *ggtt_mm = vgpu->gtt.ggtt_mm;
2153 const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
2154 unsigned long index = off >> info->gtt_entry_size_shift;
2155 unsigned long gma;
2156 struct intel_gvt_gtt_entry e;
2157
2158 if (bytes != 4 && bytes != 8)
2159 return -EINVAL;
2160
2161 gma = index << I915_GTT_PAGE_SHIFT;
2162 if (!intel_gvt_ggtt_validate_range(vgpu,
2163 gma, 1 << I915_GTT_PAGE_SHIFT)) {
2164 gvt_dbg_mm("read invalid ggtt at 0x%lx\n", gma);
2165 memset(p_data, 0, bytes);
2166 return 0;
2167 }
2168
2169 ggtt_get_guest_entry(ggtt_mm, &e, index);
2170 memcpy(p_data, (void *)&e.val64 + (off & (info->gtt_entry_size - 1)),
2171 bytes);
2172 return 0;
2173}
2174
2175
2176
2177
2178
2179
2180
2181
2182
2183
2184
2185
2186
2187int intel_vgpu_emulate_ggtt_mmio_read(struct intel_vgpu *vgpu, unsigned int off,
2188 void *p_data, unsigned int bytes)
2189{
2190 const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
2191 int ret;
2192
2193 if (bytes != 4 && bytes != 8)
2194 return -EINVAL;
2195
2196 off -= info->gtt_start_offset;
2197 ret = emulate_ggtt_mmio_read(vgpu, off, p_data, bytes);
2198 return ret;
2199}
2200
2201static void ggtt_invalidate_pte(struct intel_vgpu *vgpu,
2202 struct intel_gvt_gtt_entry *entry)
2203{
2204 struct intel_gvt_gtt_pte_ops *pte_ops = vgpu->gvt->gtt.pte_ops;
2205 unsigned long pfn;
2206
2207 pfn = pte_ops->get_pfn(entry);
2208 if (pfn != vgpu->gvt->gtt.scratch_mfn)
2209 intel_gvt_hypervisor_dma_unmap_guest_page(vgpu,
2210 pfn << PAGE_SHIFT);
2211}
2212
2213static int emulate_ggtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
2214 void *p_data, unsigned int bytes)
2215{
2216 struct intel_gvt *gvt = vgpu->gvt;
2217 const struct intel_gvt_device_info *info = &gvt->device_info;
2218 struct intel_vgpu_mm *ggtt_mm = vgpu->gtt.ggtt_mm;
2219 struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops;
2220 unsigned long g_gtt_index = off >> info->gtt_entry_size_shift;
2221 unsigned long gma, gfn;
2222 struct intel_gvt_gtt_entry e = {.val64 = 0, .type = GTT_TYPE_GGTT_PTE};
2223 struct intel_gvt_gtt_entry m = {.val64 = 0, .type = GTT_TYPE_GGTT_PTE};
2224 dma_addr_t dma_addr;
2225 int ret;
2226 struct intel_gvt_partial_pte *partial_pte, *pos, *n;
2227 bool partial_update = false;
2228
2229 if (bytes != 4 && bytes != 8)
2230 return -EINVAL;
2231
2232 gma = g_gtt_index << I915_GTT_PAGE_SHIFT;
2233
2234
2235 if (!vgpu_gmadr_is_valid(vgpu, gma))
2236 return 0;
2237
2238 e.type = GTT_TYPE_GGTT_PTE;
2239 memcpy((void *)&e.val64 + (off & (info->gtt_entry_size - 1)), p_data,
2240 bytes);
2241
2242
2243
2244
2245
2246 if (bytes < info->gtt_entry_size) {
2247 bool found = false;
2248
2249 list_for_each_entry_safe(pos, n,
2250 &ggtt_mm->ggtt_mm.partial_pte_list, list) {
2251 if (g_gtt_index == pos->offset >>
2252 info->gtt_entry_size_shift) {
2253 if (off != pos->offset) {
2254
2255 int last_off = pos->offset &
2256 (info->gtt_entry_size - 1);
2257
2258 memcpy((void *)&e.val64 + last_off,
2259 (void *)&pos->data + last_off,
2260 bytes);
2261
2262 list_del(&pos->list);
2263 kfree(pos);
2264 found = true;
2265 break;
2266 }
2267
2268
2269 pos->data = e.val64;
2270 ggtt_set_guest_entry(ggtt_mm, &e, g_gtt_index);
2271 return 0;
2272 }
2273 }
2274
2275 if (!found) {
2276
2277 partial_pte = kzalloc(sizeof(*partial_pte), GFP_KERNEL);
2278 if (!partial_pte)
2279 return -ENOMEM;
2280 partial_pte->offset = off;
2281 partial_pte->data = e.val64;
2282 list_add_tail(&partial_pte->list,
2283 &ggtt_mm->ggtt_mm.partial_pte_list);
2284 partial_update = true;
2285 }
2286 }
2287
2288 if (!partial_update && (ops->test_present(&e))) {
2289 gfn = ops->get_pfn(&e);
2290 m.val64 = e.val64;
2291 m.type = e.type;
2292
2293
2294
2295
2296 if (!intel_gvt_hypervisor_is_valid_gfn(vgpu, gfn)) {
2297 ops->set_pfn(&m, gvt->gtt.scratch_mfn);
2298 goto out;
2299 }
2300
2301 ret = intel_gvt_hypervisor_dma_map_guest_page(vgpu, gfn,
2302 PAGE_SIZE, &dma_addr);
2303 if (ret) {
2304 gvt_vgpu_err("fail to populate guest ggtt entry\n");
2305
2306
2307
2308
2309 ops->set_pfn(&m, gvt->gtt.scratch_mfn);
2310 } else
2311 ops->set_pfn(&m, dma_addr >> PAGE_SHIFT);
2312 } else {
2313 ops->set_pfn(&m, gvt->gtt.scratch_mfn);
2314 ops->clear_present(&m);
2315 }
2316
2317out:
2318 ggtt_set_guest_entry(ggtt_mm, &e, g_gtt_index);
2319
2320 ggtt_get_host_entry(ggtt_mm, &e, g_gtt_index);
2321 ggtt_invalidate_pte(vgpu, &e);
2322
2323 ggtt_set_host_entry(ggtt_mm, &m, g_gtt_index);
2324 ggtt_invalidate(gvt->gt);
2325 return 0;
2326}
2327
2328
2329
2330
2331
2332
2333
2334
2335
2336
2337
2338
2339
2340int intel_vgpu_emulate_ggtt_mmio_write(struct intel_vgpu *vgpu,
2341 unsigned int off, void *p_data, unsigned int bytes)
2342{
2343 const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
2344 int ret;
2345 struct intel_vgpu_submission *s = &vgpu->submission;
2346 struct intel_engine_cs *engine;
2347 int i;
2348
2349 if (bytes != 4 && bytes != 8)
2350 return -EINVAL;
2351
2352 off -= info->gtt_start_offset;
2353 ret = emulate_ggtt_mmio_write(vgpu, off, p_data, bytes);
2354
2355
2356
2357
2358
2359 for_each_engine(engine, vgpu->gvt->gt, i) {
2360 if (!s->last_ctx[i].valid)
2361 continue;
2362
2363 if (s->last_ctx[i].lrca == (off >> info->gtt_entry_size_shift))
2364 s->last_ctx[i].valid = false;
2365 }
2366 return ret;
2367}
2368
2369static int alloc_scratch_pages(struct intel_vgpu *vgpu,
2370 enum intel_gvt_gtt_type type)
2371{
2372 struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
2373 struct intel_vgpu_gtt *gtt = &vgpu->gtt;
2374 struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
2375 int page_entry_num = I915_GTT_PAGE_SIZE >>
2376 vgpu->gvt->device_info.gtt_entry_size_shift;
2377 void *scratch_pt;
2378 int i;
2379 struct device *dev = &vgpu->gvt->gt->i915->drm.pdev->dev;
2380 dma_addr_t daddr;
2381
2382 if (drm_WARN_ON(&i915->drm,
2383 type < GTT_TYPE_PPGTT_PTE_PT || type >= GTT_TYPE_MAX))
2384 return -EINVAL;
2385
2386 scratch_pt = (void *)get_zeroed_page(GFP_KERNEL);
2387 if (!scratch_pt) {
2388 gvt_vgpu_err("fail to allocate scratch page\n");
2389 return -ENOMEM;
2390 }
2391
2392 daddr = dma_map_page(dev, virt_to_page(scratch_pt), 0,
2393 4096, PCI_DMA_BIDIRECTIONAL);
2394 if (dma_mapping_error(dev, daddr)) {
2395 gvt_vgpu_err("fail to dmamap scratch_pt\n");
2396 __free_page(virt_to_page(scratch_pt));
2397 return -ENOMEM;
2398 }
2399 gtt->scratch_pt[type].page_mfn =
2400 (unsigned long)(daddr >> I915_GTT_PAGE_SHIFT);
2401 gtt->scratch_pt[type].page = virt_to_page(scratch_pt);
2402 gvt_dbg_mm("vgpu%d create scratch_pt: type %d mfn=0x%lx\n",
2403 vgpu->id, type, gtt->scratch_pt[type].page_mfn);
2404
2405
2406
2407
2408
2409
2410
2411
2412
2413 if (type > GTT_TYPE_PPGTT_PTE_PT) {
2414 struct intel_gvt_gtt_entry se;
2415
2416 memset(&se, 0, sizeof(struct intel_gvt_gtt_entry));
2417 se.type = get_entry_type(type - 1);
2418 ops->set_pfn(&se, gtt->scratch_pt[type - 1].page_mfn);
2419
2420
2421
2422
2423 se.val64 |= _PAGE_PRESENT | _PAGE_RW;
2424 if (type == GTT_TYPE_PPGTT_PDE_PT)
2425 se.val64 |= PPAT_CACHED;
2426
2427 for (i = 0; i < page_entry_num; i++)
2428 ops->set_entry(scratch_pt, &se, i, false, 0, vgpu);
2429 }
2430
2431 return 0;
2432}
2433
2434static int release_scratch_page_tree(struct intel_vgpu *vgpu)
2435{
2436 int i;
2437 struct device *dev = &vgpu->gvt->gt->i915->drm.pdev->dev;
2438 dma_addr_t daddr;
2439
2440 for (i = GTT_TYPE_PPGTT_PTE_PT; i < GTT_TYPE_MAX; i++) {
2441 if (vgpu->gtt.scratch_pt[i].page != NULL) {
2442 daddr = (dma_addr_t)(vgpu->gtt.scratch_pt[i].page_mfn <<
2443 I915_GTT_PAGE_SHIFT);
2444 dma_unmap_page(dev, daddr, 4096, PCI_DMA_BIDIRECTIONAL);
2445 __free_page(vgpu->gtt.scratch_pt[i].page);
2446 vgpu->gtt.scratch_pt[i].page = NULL;
2447 vgpu->gtt.scratch_pt[i].page_mfn = 0;
2448 }
2449 }
2450
2451 return 0;
2452}
2453
2454static int create_scratch_page_tree(struct intel_vgpu *vgpu)
2455{
2456 int i, ret;
2457
2458 for (i = GTT_TYPE_PPGTT_PTE_PT; i < GTT_TYPE_MAX; i++) {
2459 ret = alloc_scratch_pages(vgpu, i);
2460 if (ret)
2461 goto err;
2462 }
2463
2464 return 0;
2465
2466err:
2467 release_scratch_page_tree(vgpu);
2468 return ret;
2469}
2470
2471
2472
2473
2474
2475
2476
2477
2478
2479
2480
2481int intel_vgpu_init_gtt(struct intel_vgpu *vgpu)
2482{
2483 struct intel_vgpu_gtt *gtt = &vgpu->gtt;
2484
2485 INIT_RADIX_TREE(>t->spt_tree, GFP_KERNEL);
2486
2487 INIT_LIST_HEAD(>t->ppgtt_mm_list_head);
2488 INIT_LIST_HEAD(>t->oos_page_list_head);
2489 INIT_LIST_HEAD(>t->post_shadow_list_head);
2490
2491 gtt->ggtt_mm = intel_vgpu_create_ggtt_mm(vgpu);
2492 if (IS_ERR(gtt->ggtt_mm)) {
2493 gvt_vgpu_err("fail to create mm for ggtt.\n");
2494 return PTR_ERR(gtt->ggtt_mm);
2495 }
2496
2497 intel_vgpu_reset_ggtt(vgpu, false);
2498
2499 INIT_LIST_HEAD(>t->ggtt_mm->ggtt_mm.partial_pte_list);
2500
2501 return create_scratch_page_tree(vgpu);
2502}
2503
2504void intel_vgpu_destroy_all_ppgtt_mm(struct intel_vgpu *vgpu)
2505{
2506 struct list_head *pos, *n;
2507 struct intel_vgpu_mm *mm;
2508
2509 list_for_each_safe(pos, n, &vgpu->gtt.ppgtt_mm_list_head) {
2510 mm = container_of(pos, struct intel_vgpu_mm, ppgtt_mm.list);
2511 intel_vgpu_destroy_mm(mm);
2512 }
2513
2514 if (GEM_WARN_ON(!list_empty(&vgpu->gtt.ppgtt_mm_list_head)))
2515 gvt_err("vgpu ppgtt mm is not fully destroyed\n");
2516
2517 if (GEM_WARN_ON(!radix_tree_empty(&vgpu->gtt.spt_tree))) {
2518 gvt_err("Why we still has spt not freed?\n");
2519 ppgtt_free_all_spt(vgpu);
2520 }
2521}
2522
2523static void intel_vgpu_destroy_ggtt_mm(struct intel_vgpu *vgpu)
2524{
2525 struct intel_gvt_partial_pte *pos, *next;
2526
2527 list_for_each_entry_safe(pos, next,
2528 &vgpu->gtt.ggtt_mm->ggtt_mm.partial_pte_list,
2529 list) {
2530 gvt_dbg_mm("partial PTE update on hold 0x%lx : 0x%llx\n",
2531 pos->offset, pos->data);
2532 kfree(pos);
2533 }
2534 intel_vgpu_destroy_mm(vgpu->gtt.ggtt_mm);
2535 vgpu->gtt.ggtt_mm = NULL;
2536}
2537
2538
2539
2540
2541
2542
2543
2544
2545
2546
2547
2548void intel_vgpu_clean_gtt(struct intel_vgpu *vgpu)
2549{
2550 intel_vgpu_destroy_all_ppgtt_mm(vgpu);
2551 intel_vgpu_destroy_ggtt_mm(vgpu);
2552 release_scratch_page_tree(vgpu);
2553}
2554
2555static void clean_spt_oos(struct intel_gvt *gvt)
2556{
2557 struct intel_gvt_gtt *gtt = &gvt->gtt;
2558 struct list_head *pos, *n;
2559 struct intel_vgpu_oos_page *oos_page;
2560
2561 WARN(!list_empty(>t->oos_page_use_list_head),
2562 "someone is still using oos page\n");
2563
2564 list_for_each_safe(pos, n, >t->oos_page_free_list_head) {
2565 oos_page = container_of(pos, struct intel_vgpu_oos_page, list);
2566 list_del(&oos_page->list);
2567 free_page((unsigned long)oos_page->mem);
2568 kfree(oos_page);
2569 }
2570}
2571
2572static int setup_spt_oos(struct intel_gvt *gvt)
2573{
2574 struct intel_gvt_gtt *gtt = &gvt->gtt;
2575 struct intel_vgpu_oos_page *oos_page;
2576 int i;
2577 int ret;
2578
2579 INIT_LIST_HEAD(>t->oos_page_free_list_head);
2580 INIT_LIST_HEAD(>t->oos_page_use_list_head);
2581
2582 for (i = 0; i < preallocated_oos_pages; i++) {
2583 oos_page = kzalloc(sizeof(*oos_page), GFP_KERNEL);
2584 if (!oos_page) {
2585 ret = -ENOMEM;
2586 goto fail;
2587 }
2588 oos_page->mem = (void *)__get_free_pages(GFP_KERNEL, 0);
2589 if (!oos_page->mem) {
2590 ret = -ENOMEM;
2591 kfree(oos_page);
2592 goto fail;
2593 }
2594
2595 INIT_LIST_HEAD(&oos_page->list);
2596 INIT_LIST_HEAD(&oos_page->vm_list);
2597 oos_page->id = i;
2598 list_add_tail(&oos_page->list, >t->oos_page_free_list_head);
2599 }
2600
2601 gvt_dbg_mm("%d oos pages preallocated\n", i);
2602
2603 return 0;
2604fail:
2605 clean_spt_oos(gvt);
2606 return ret;
2607}
2608
2609
2610
2611
2612
2613
2614
2615
2616
2617
2618
2619struct intel_vgpu_mm *intel_vgpu_find_ppgtt_mm(struct intel_vgpu *vgpu,
2620 u64 pdps[])
2621{
2622 struct intel_vgpu_mm *mm;
2623 struct list_head *pos;
2624
2625 list_for_each(pos, &vgpu->gtt.ppgtt_mm_list_head) {
2626 mm = container_of(pos, struct intel_vgpu_mm, ppgtt_mm.list);
2627
2628 switch (mm->ppgtt_mm.root_entry_type) {
2629 case GTT_TYPE_PPGTT_ROOT_L4_ENTRY:
2630 if (pdps[0] == mm->ppgtt_mm.guest_pdps[0])
2631 return mm;
2632 break;
2633 case GTT_TYPE_PPGTT_ROOT_L3_ENTRY:
2634 if (!memcmp(pdps, mm->ppgtt_mm.guest_pdps,
2635 sizeof(mm->ppgtt_mm.guest_pdps)))
2636 return mm;
2637 break;
2638 default:
2639 GEM_BUG_ON(1);
2640 }
2641 }
2642 return NULL;
2643}
2644
2645
2646
2647
2648
2649
2650
2651
2652
2653
2654
2655
2656struct intel_vgpu_mm *intel_vgpu_get_ppgtt_mm(struct intel_vgpu *vgpu,
2657 enum intel_gvt_gtt_type root_entry_type, u64 pdps[])
2658{
2659 struct intel_vgpu_mm *mm;
2660
2661 mm = intel_vgpu_find_ppgtt_mm(vgpu, pdps);
2662 if (mm) {
2663 intel_vgpu_mm_get(mm);
2664 } else {
2665 mm = intel_vgpu_create_ppgtt_mm(vgpu, root_entry_type, pdps);
2666 if (IS_ERR(mm))
2667 gvt_vgpu_err("fail to create mm\n");
2668 }
2669 return mm;
2670}
2671
2672
2673
2674
2675
2676
2677
2678
2679
2680
2681
2682int intel_vgpu_put_ppgtt_mm(struct intel_vgpu *vgpu, u64 pdps[])
2683{
2684 struct intel_vgpu_mm *mm;
2685
2686 mm = intel_vgpu_find_ppgtt_mm(vgpu, pdps);
2687 if (!mm) {
2688 gvt_vgpu_err("fail to find ppgtt instance.\n");
2689 return -EINVAL;
2690 }
2691 intel_vgpu_mm_put(mm);
2692 return 0;
2693}
2694
2695
2696
2697
2698
2699
2700
2701
2702
2703
2704
2705int intel_gvt_init_gtt(struct intel_gvt *gvt)
2706{
2707 int ret;
2708 void *page;
2709 struct device *dev = &gvt->gt->i915->drm.pdev->dev;
2710 dma_addr_t daddr;
2711
2712 gvt_dbg_core("init gtt\n");
2713
2714 gvt->gtt.pte_ops = &gen8_gtt_pte_ops;
2715 gvt->gtt.gma_ops = &gen8_gtt_gma_ops;
2716
2717 page = (void *)get_zeroed_page(GFP_KERNEL);
2718 if (!page) {
2719 gvt_err("fail to allocate scratch ggtt page\n");
2720 return -ENOMEM;
2721 }
2722
2723 daddr = dma_map_page(dev, virt_to_page(page), 0,
2724 4096, PCI_DMA_BIDIRECTIONAL);
2725 if (dma_mapping_error(dev, daddr)) {
2726 gvt_err("fail to dmamap scratch ggtt page\n");
2727 __free_page(virt_to_page(page));
2728 return -ENOMEM;
2729 }
2730
2731 gvt->gtt.scratch_page = virt_to_page(page);
2732 gvt->gtt.scratch_mfn = (unsigned long)(daddr >> I915_GTT_PAGE_SHIFT);
2733
2734 if (enable_out_of_sync) {
2735 ret = setup_spt_oos(gvt);
2736 if (ret) {
2737 gvt_err("fail to initialize SPT oos\n");
2738 dma_unmap_page(dev, daddr, 4096, PCI_DMA_BIDIRECTIONAL);
2739 __free_page(gvt->gtt.scratch_page);
2740 return ret;
2741 }
2742 }
2743 INIT_LIST_HEAD(&gvt->gtt.ppgtt_mm_lru_list_head);
2744 mutex_init(&gvt->gtt.ppgtt_mm_lock);
2745 return 0;
2746}
2747
2748
2749
2750
2751
2752
2753
2754
2755
2756void intel_gvt_clean_gtt(struct intel_gvt *gvt)
2757{
2758 struct device *dev = &gvt->gt->i915->drm.pdev->dev;
2759 dma_addr_t daddr = (dma_addr_t)(gvt->gtt.scratch_mfn <<
2760 I915_GTT_PAGE_SHIFT);
2761
2762 dma_unmap_page(dev, daddr, 4096, PCI_DMA_BIDIRECTIONAL);
2763
2764 __free_page(gvt->gtt.scratch_page);
2765
2766 if (enable_out_of_sync)
2767 clean_spt_oos(gvt);
2768}
2769
2770
2771
2772
2773
2774
2775
2776
2777void intel_vgpu_invalidate_ppgtt(struct intel_vgpu *vgpu)
2778{
2779 struct list_head *pos, *n;
2780 struct intel_vgpu_mm *mm;
2781
2782 list_for_each_safe(pos, n, &vgpu->gtt.ppgtt_mm_list_head) {
2783 mm = container_of(pos, struct intel_vgpu_mm, ppgtt_mm.list);
2784 if (mm->type == INTEL_GVT_MM_PPGTT) {
2785 mutex_lock(&vgpu->gvt->gtt.ppgtt_mm_lock);
2786 list_del_init(&mm->ppgtt_mm.lru_list);
2787 mutex_unlock(&vgpu->gvt->gtt.ppgtt_mm_lock);
2788 if (mm->ppgtt_mm.shadowed)
2789 invalidate_ppgtt_mm(mm);
2790 }
2791 }
2792}
2793
2794
2795
2796
2797
2798
2799
2800
2801
2802
2803void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu, bool invalidate_old)
2804{
2805 struct intel_gvt *gvt = vgpu->gvt;
2806 struct intel_gvt_gtt_pte_ops *pte_ops = vgpu->gvt->gtt.pte_ops;
2807 struct intel_gvt_gtt_entry entry = {.type = GTT_TYPE_GGTT_PTE};
2808 struct intel_gvt_gtt_entry old_entry;
2809 u32 index;
2810 u32 num_entries;
2811
2812 pte_ops->set_pfn(&entry, gvt->gtt.scratch_mfn);
2813 pte_ops->set_present(&entry);
2814
2815 index = vgpu_aperture_gmadr_base(vgpu) >> PAGE_SHIFT;
2816 num_entries = vgpu_aperture_sz(vgpu) >> PAGE_SHIFT;
2817 while (num_entries--) {
2818 if (invalidate_old) {
2819 ggtt_get_host_entry(vgpu->gtt.ggtt_mm, &old_entry, index);
2820 ggtt_invalidate_pte(vgpu, &old_entry);
2821 }
2822 ggtt_set_host_entry(vgpu->gtt.ggtt_mm, &entry, index++);
2823 }
2824
2825 index = vgpu_hidden_gmadr_base(vgpu) >> PAGE_SHIFT;
2826 num_entries = vgpu_hidden_sz(vgpu) >> PAGE_SHIFT;
2827 while (num_entries--) {
2828 if (invalidate_old) {
2829 ggtt_get_host_entry(vgpu->gtt.ggtt_mm, &old_entry, index);
2830 ggtt_invalidate_pte(vgpu, &old_entry);
2831 }
2832 ggtt_set_host_entry(vgpu->gtt.ggtt_mm, &entry, index++);
2833 }
2834
2835 ggtt_invalidate(gvt->gt);
2836}
2837
2838
2839
2840
2841
2842
2843
2844
2845
2846void intel_vgpu_reset_gtt(struct intel_vgpu *vgpu)
2847{
2848
2849
2850
2851
2852 intel_vgpu_destroy_all_ppgtt_mm(vgpu);
2853 intel_vgpu_reset_ggtt(vgpu, true);
2854}
2855