1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26#include <linux/slab.h>
27
28#include <linux/fault-inject.h>
29#include <linux/log2.h>
30#include <linux/random.h>
31#include <linux/seq_file.h>
32#include <linux/stop_machine.h>
33
34#include <asm/set_memory.h>
35
36#include <drm/drmP.h>
37#include <drm/i915_drm.h>
38
39#include "i915_drv.h"
40#include "i915_vgpu.h"
41#include "i915_trace.h"
42#include "intel_drv.h"
43#include "intel_frontbuffer.h"
44
45#define I915_GFP_DMA (GFP_KERNEL | __GFP_HIGHMEM)
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108static int
109i915_get_ggtt_vma_pages(struct i915_vma *vma);
110
111static void gen6_ggtt_invalidate(struct drm_i915_private *dev_priv)
112{
113
114
115
116 I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
117}
118
119static void guc_ggtt_invalidate(struct drm_i915_private *dev_priv)
120{
121 gen6_ggtt_invalidate(dev_priv);
122 I915_WRITE(GEN8_GTCR, GEN8_GTCR_INVALIDATE);
123}
124
125static void gmch_ggtt_invalidate(struct drm_i915_private *dev_priv)
126{
127 intel_gtt_chipset_flush();
128}
129
130static inline void i915_ggtt_invalidate(struct drm_i915_private *i915)
131{
132 i915->ggtt.invalidate(i915);
133}
134
135int intel_sanitize_enable_ppgtt(struct drm_i915_private *dev_priv,
136 int enable_ppgtt)
137{
138 bool has_aliasing_ppgtt;
139 bool has_full_ppgtt;
140 bool has_full_48bit_ppgtt;
141
142 has_aliasing_ppgtt = dev_priv->info.has_aliasing_ppgtt;
143 has_full_ppgtt = dev_priv->info.has_full_ppgtt;
144 has_full_48bit_ppgtt = dev_priv->info.has_full_48bit_ppgtt;
145
146 if (intel_vgpu_active(dev_priv)) {
147
148 has_full_ppgtt = false;
149 has_full_48bit_ppgtt = false;
150 }
151
152 if (!has_aliasing_ppgtt)
153 return 0;
154
155
156
157
158
159 if (enable_ppgtt == 0 && INTEL_GEN(dev_priv) < 9)
160 return 0;
161
162 if (enable_ppgtt == 1)
163 return 1;
164
165 if (enable_ppgtt == 2 && has_full_ppgtt)
166 return 2;
167
168 if (enable_ppgtt == 3 && has_full_48bit_ppgtt)
169 return 3;
170
171
172 if (IS_GEN6(dev_priv) && intel_vtd_active()) {
173 DRM_INFO("Disabling PPGTT because VT-d is on\n");
174 return 0;
175 }
176
177
178 if (IS_VALLEYVIEW(dev_priv) && dev_priv->drm.pdev->revision < 0xb) {
179 DRM_DEBUG_DRIVER("disabling PPGTT on pre-B3 step VLV\n");
180 return 0;
181 }
182
183 if (INTEL_GEN(dev_priv) >= 8 && i915.enable_execlists && has_full_ppgtt)
184 return has_full_48bit_ppgtt ? 3 : 2;
185 else
186 return has_aliasing_ppgtt ? 1 : 0;
187}
188
189static int ppgtt_bind_vma(struct i915_vma *vma,
190 enum i915_cache_level cache_level,
191 u32 unused)
192{
193 u32 pte_flags;
194 int ret;
195
196 if (!(vma->flags & I915_VMA_LOCAL_BIND)) {
197 ret = vma->vm->allocate_va_range(vma->vm, vma->node.start,
198 vma->size);
199 if (ret)
200 return ret;
201 }
202
203 vma->pages = vma->obj->mm.pages;
204
205
206 pte_flags = 0;
207 if (vma->obj->gt_ro)
208 pte_flags |= PTE_READ_ONLY;
209
210 vma->vm->insert_entries(vma->vm, vma->pages, vma->node.start,
211 cache_level, pte_flags);
212
213 return 0;
214}
215
216static void ppgtt_unbind_vma(struct i915_vma *vma)
217{
218 vma->vm->clear_range(vma->vm, vma->node.start, vma->size);
219}
220
221static gen8_pte_t gen8_pte_encode(dma_addr_t addr,
222 enum i915_cache_level level)
223{
224 gen8_pte_t pte = _PAGE_PRESENT | _PAGE_RW;
225 pte |= addr;
226
227 switch (level) {
228 case I915_CACHE_NONE:
229 pte |= PPAT_UNCACHED_INDEX;
230 break;
231 case I915_CACHE_WT:
232 pte |= PPAT_DISPLAY_ELLC_INDEX;
233 break;
234 default:
235 pte |= PPAT_CACHED_INDEX;
236 break;
237 }
238
239 return pte;
240}
241
242static gen8_pde_t gen8_pde_encode(const dma_addr_t addr,
243 const enum i915_cache_level level)
244{
245 gen8_pde_t pde = _PAGE_PRESENT | _PAGE_RW;
246 pde |= addr;
247 if (level != I915_CACHE_NONE)
248 pde |= PPAT_CACHED_PDE_INDEX;
249 else
250 pde |= PPAT_UNCACHED_INDEX;
251 return pde;
252}
253
254#define gen8_pdpe_encode gen8_pde_encode
255#define gen8_pml4e_encode gen8_pde_encode
256
257static gen6_pte_t snb_pte_encode(dma_addr_t addr,
258 enum i915_cache_level level,
259 u32 unused)
260{
261 gen6_pte_t pte = GEN6_PTE_VALID;
262 pte |= GEN6_PTE_ADDR_ENCODE(addr);
263
264 switch (level) {
265 case I915_CACHE_L3_LLC:
266 case I915_CACHE_LLC:
267 pte |= GEN6_PTE_CACHE_LLC;
268 break;
269 case I915_CACHE_NONE:
270 pte |= GEN6_PTE_UNCACHED;
271 break;
272 default:
273 MISSING_CASE(level);
274 }
275
276 return pte;
277}
278
279static gen6_pte_t ivb_pte_encode(dma_addr_t addr,
280 enum i915_cache_level level,
281 u32 unused)
282{
283 gen6_pte_t pte = GEN6_PTE_VALID;
284 pte |= GEN6_PTE_ADDR_ENCODE(addr);
285
286 switch (level) {
287 case I915_CACHE_L3_LLC:
288 pte |= GEN7_PTE_CACHE_L3_LLC;
289 break;
290 case I915_CACHE_LLC:
291 pte |= GEN6_PTE_CACHE_LLC;
292 break;
293 case I915_CACHE_NONE:
294 pte |= GEN6_PTE_UNCACHED;
295 break;
296 default:
297 MISSING_CASE(level);
298 }
299
300 return pte;
301}
302
303static gen6_pte_t byt_pte_encode(dma_addr_t addr,
304 enum i915_cache_level level,
305 u32 flags)
306{
307 gen6_pte_t pte = GEN6_PTE_VALID;
308 pte |= GEN6_PTE_ADDR_ENCODE(addr);
309
310 if (!(flags & PTE_READ_ONLY))
311 pte |= BYT_PTE_WRITEABLE;
312
313 if (level != I915_CACHE_NONE)
314 pte |= BYT_PTE_SNOOPED_BY_CPU_CACHES;
315
316 return pte;
317}
318
319static gen6_pte_t hsw_pte_encode(dma_addr_t addr,
320 enum i915_cache_level level,
321 u32 unused)
322{
323 gen6_pte_t pte = GEN6_PTE_VALID;
324 pte |= HSW_PTE_ADDR_ENCODE(addr);
325
326 if (level != I915_CACHE_NONE)
327 pte |= HSW_WB_LLC_AGE3;
328
329 return pte;
330}
331
332static gen6_pte_t iris_pte_encode(dma_addr_t addr,
333 enum i915_cache_level level,
334 u32 unused)
335{
336 gen6_pte_t pte = GEN6_PTE_VALID;
337 pte |= HSW_PTE_ADDR_ENCODE(addr);
338
339 switch (level) {
340 case I915_CACHE_NONE:
341 break;
342 case I915_CACHE_WT:
343 pte |= HSW_WT_ELLC_LLC_AGE3;
344 break;
345 default:
346 pte |= HSW_WB_ELLC_LLC_AGE3;
347 break;
348 }
349
350 return pte;
351}
352
353static struct page *vm_alloc_page(struct i915_address_space *vm, gfp_t gfp)
354{
355 struct page *page;
356
357 if (I915_SELFTEST_ONLY(should_fail(&vm->fault_attr, 1)))
358 i915_gem_shrink_all(vm->i915);
359
360 if (vm->free_pages.nr)
361 return vm->free_pages.pages[--vm->free_pages.nr];
362
363 page = alloc_page(gfp);
364 if (!page)
365 return NULL;
366
367 if (vm->pt_kmap_wc)
368 set_pages_array_wc(&page, 1);
369
370 return page;
371}
372
373static void vm_free_pages_release(struct i915_address_space *vm)
374{
375 GEM_BUG_ON(!pagevec_count(&vm->free_pages));
376
377 if (vm->pt_kmap_wc)
378 set_pages_array_wb(vm->free_pages.pages,
379 pagevec_count(&vm->free_pages));
380
381 __pagevec_release(&vm->free_pages);
382}
383
384static void vm_free_page(struct i915_address_space *vm, struct page *page)
385{
386 if (!pagevec_add(&vm->free_pages, page))
387 vm_free_pages_release(vm);
388}
389
390static int __setup_page_dma(struct i915_address_space *vm,
391 struct i915_page_dma *p,
392 gfp_t gfp)
393{
394 p->page = vm_alloc_page(vm, gfp | __GFP_NOWARN | __GFP_NORETRY);
395 if (unlikely(!p->page))
396 return -ENOMEM;
397
398 p->daddr = dma_map_page(vm->dma, p->page, 0, PAGE_SIZE,
399 PCI_DMA_BIDIRECTIONAL);
400 if (unlikely(dma_mapping_error(vm->dma, p->daddr))) {
401 vm_free_page(vm, p->page);
402 return -ENOMEM;
403 }
404
405 return 0;
406}
407
408static int setup_page_dma(struct i915_address_space *vm,
409 struct i915_page_dma *p)
410{
411 return __setup_page_dma(vm, p, I915_GFP_DMA);
412}
413
414static void cleanup_page_dma(struct i915_address_space *vm,
415 struct i915_page_dma *p)
416{
417 dma_unmap_page(vm->dma, p->daddr, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
418 vm_free_page(vm, p->page);
419}
420
421#define kmap_atomic_px(px) kmap_atomic(px_base(px)->page)
422
423#define setup_px(vm, px) setup_page_dma((vm), px_base(px))
424#define cleanup_px(vm, px) cleanup_page_dma((vm), px_base(px))
425#define fill_px(ppgtt, px, v) fill_page_dma((vm), px_base(px), (v))
426#define fill32_px(ppgtt, px, v) fill_page_dma_32((vm), px_base(px), (v))
427
428static void fill_page_dma(struct i915_address_space *vm,
429 struct i915_page_dma *p,
430 const u64 val)
431{
432 u64 * const vaddr = kmap_atomic(p->page);
433 int i;
434
435 for (i = 0; i < 512; i++)
436 vaddr[i] = val;
437
438 kunmap_atomic(vaddr);
439}
440
441static void fill_page_dma_32(struct i915_address_space *vm,
442 struct i915_page_dma *p,
443 const u32 v)
444{
445 fill_page_dma(vm, p, (u64)v << 32 | v);
446}
447
448static int
449setup_scratch_page(struct i915_address_space *vm, gfp_t gfp)
450{
451 return __setup_page_dma(vm, &vm->scratch_page, gfp | __GFP_ZERO);
452}
453
454static void cleanup_scratch_page(struct i915_address_space *vm)
455{
456 cleanup_page_dma(vm, &vm->scratch_page);
457}
458
459static struct i915_page_table *alloc_pt(struct i915_address_space *vm)
460{
461 struct i915_page_table *pt;
462
463 pt = kmalloc(sizeof(*pt), GFP_KERNEL | __GFP_NOWARN);
464 if (unlikely(!pt))
465 return ERR_PTR(-ENOMEM);
466
467 if (unlikely(setup_px(vm, pt))) {
468 kfree(pt);
469 return ERR_PTR(-ENOMEM);
470 }
471
472 pt->used_ptes = 0;
473 return pt;
474}
475
476static void free_pt(struct i915_address_space *vm, struct i915_page_table *pt)
477{
478 cleanup_px(vm, pt);
479 kfree(pt);
480}
481
482static void gen8_initialize_pt(struct i915_address_space *vm,
483 struct i915_page_table *pt)
484{
485 fill_px(vm, pt,
486 gen8_pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC));
487}
488
489static void gen6_initialize_pt(struct i915_address_space *vm,
490 struct i915_page_table *pt)
491{
492 fill32_px(vm, pt,
493 vm->pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC, 0));
494}
495
496static struct i915_page_directory *alloc_pd(struct i915_address_space *vm)
497{
498 struct i915_page_directory *pd;
499
500 pd = kzalloc(sizeof(*pd), GFP_KERNEL | __GFP_NOWARN);
501 if (unlikely(!pd))
502 return ERR_PTR(-ENOMEM);
503
504 if (unlikely(setup_px(vm, pd))) {
505 kfree(pd);
506 return ERR_PTR(-ENOMEM);
507 }
508
509 pd->used_pdes = 0;
510 return pd;
511}
512
513static void free_pd(struct i915_address_space *vm,
514 struct i915_page_directory *pd)
515{
516 cleanup_px(vm, pd);
517 kfree(pd);
518}
519
520static void gen8_initialize_pd(struct i915_address_space *vm,
521 struct i915_page_directory *pd)
522{
523 unsigned int i;
524
525 fill_px(vm, pd,
526 gen8_pde_encode(px_dma(vm->scratch_pt), I915_CACHE_LLC));
527 for (i = 0; i < I915_PDES; i++)
528 pd->page_table[i] = vm->scratch_pt;
529}
530
531static int __pdp_init(struct i915_address_space *vm,
532 struct i915_page_directory_pointer *pdp)
533{
534 const unsigned int pdpes = i915_pdpes_per_pdp(vm);
535 unsigned int i;
536
537 pdp->page_directory = kmalloc_array(pdpes, sizeof(*pdp->page_directory),
538 GFP_KERNEL | __GFP_NOWARN);
539 if (unlikely(!pdp->page_directory))
540 return -ENOMEM;
541
542 for (i = 0; i < pdpes; i++)
543 pdp->page_directory[i] = vm->scratch_pd;
544
545 return 0;
546}
547
548static void __pdp_fini(struct i915_page_directory_pointer *pdp)
549{
550 kfree(pdp->page_directory);
551 pdp->page_directory = NULL;
552}
553
554static inline bool use_4lvl(const struct i915_address_space *vm)
555{
556 return i915_vm_is_48bit(vm);
557}
558
559static struct i915_page_directory_pointer *
560alloc_pdp(struct i915_address_space *vm)
561{
562 struct i915_page_directory_pointer *pdp;
563 int ret = -ENOMEM;
564
565 WARN_ON(!use_4lvl(vm));
566
567 pdp = kzalloc(sizeof(*pdp), GFP_KERNEL);
568 if (!pdp)
569 return ERR_PTR(-ENOMEM);
570
571 ret = __pdp_init(vm, pdp);
572 if (ret)
573 goto fail_bitmap;
574
575 ret = setup_px(vm, pdp);
576 if (ret)
577 goto fail_page_m;
578
579 return pdp;
580
581fail_page_m:
582 __pdp_fini(pdp);
583fail_bitmap:
584 kfree(pdp);
585
586 return ERR_PTR(ret);
587}
588
589static void free_pdp(struct i915_address_space *vm,
590 struct i915_page_directory_pointer *pdp)
591{
592 __pdp_fini(pdp);
593
594 if (!use_4lvl(vm))
595 return;
596
597 cleanup_px(vm, pdp);
598 kfree(pdp);
599}
600
601static void gen8_initialize_pdp(struct i915_address_space *vm,
602 struct i915_page_directory_pointer *pdp)
603{
604 gen8_ppgtt_pdpe_t scratch_pdpe;
605
606 scratch_pdpe = gen8_pdpe_encode(px_dma(vm->scratch_pd), I915_CACHE_LLC);
607
608 fill_px(vm, pdp, scratch_pdpe);
609}
610
611static void gen8_initialize_pml4(struct i915_address_space *vm,
612 struct i915_pml4 *pml4)
613{
614 unsigned int i;
615
616 fill_px(vm, pml4,
617 gen8_pml4e_encode(px_dma(vm->scratch_pdp), I915_CACHE_LLC));
618 for (i = 0; i < GEN8_PML4ES_PER_PML4; i++)
619 pml4->pdps[i] = vm->scratch_pdp;
620}
621
622
623static int gen8_write_pdp(struct drm_i915_gem_request *req,
624 unsigned entry,
625 dma_addr_t addr)
626{
627 struct intel_engine_cs *engine = req->engine;
628 u32 *cs;
629
630 BUG_ON(entry >= 4);
631
632 cs = intel_ring_begin(req, 6);
633 if (IS_ERR(cs))
634 return PTR_ERR(cs);
635
636 *cs++ = MI_LOAD_REGISTER_IMM(1);
637 *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(engine, entry));
638 *cs++ = upper_32_bits(addr);
639 *cs++ = MI_LOAD_REGISTER_IMM(1);
640 *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(engine, entry));
641 *cs++ = lower_32_bits(addr);
642 intel_ring_advance(req, cs);
643
644 return 0;
645}
646
647static int gen8_mm_switch_3lvl(struct i915_hw_ppgtt *ppgtt,
648 struct drm_i915_gem_request *req)
649{
650 int i, ret;
651
652 for (i = GEN8_3LVL_PDPES - 1; i >= 0; i--) {
653 const dma_addr_t pd_daddr = i915_page_dir_dma_addr(ppgtt, i);
654
655 ret = gen8_write_pdp(req, i, pd_daddr);
656 if (ret)
657 return ret;
658 }
659
660 return 0;
661}
662
663static int gen8_mm_switch_4lvl(struct i915_hw_ppgtt *ppgtt,
664 struct drm_i915_gem_request *req)
665{
666 return gen8_write_pdp(req, 0, px_dma(&ppgtt->pml4));
667}
668
669
670
671
672
673
674static void mark_tlbs_dirty(struct i915_hw_ppgtt *ppgtt)
675{
676 ppgtt->pd_dirty_rings = INTEL_INFO(ppgtt->base.i915)->ring_mask;
677}
678
679
680
681
682static bool gen8_ppgtt_clear_pt(struct i915_address_space *vm,
683 struct i915_page_table *pt,
684 u64 start, u64 length)
685{
686 unsigned int num_entries = gen8_pte_count(start, length);
687 unsigned int pte = gen8_pte_index(start);
688 unsigned int pte_end = pte + num_entries;
689 const gen8_pte_t scratch_pte =
690 gen8_pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC);
691 gen8_pte_t *vaddr;
692
693 GEM_BUG_ON(num_entries > pt->used_ptes);
694
695 pt->used_ptes -= num_entries;
696 if (!pt->used_ptes)
697 return true;
698
699 vaddr = kmap_atomic_px(pt);
700 while (pte < pte_end)
701 vaddr[pte++] = scratch_pte;
702 kunmap_atomic(vaddr);
703
704 return false;
705}
706
707static void gen8_ppgtt_set_pde(struct i915_address_space *vm,
708 struct i915_page_directory *pd,
709 struct i915_page_table *pt,
710 unsigned int pde)
711{
712 gen8_pde_t *vaddr;
713
714 pd->page_table[pde] = pt;
715
716 vaddr = kmap_atomic_px(pd);
717 vaddr[pde] = gen8_pde_encode(px_dma(pt), I915_CACHE_LLC);
718 kunmap_atomic(vaddr);
719}
720
721static bool gen8_ppgtt_clear_pd(struct i915_address_space *vm,
722 struct i915_page_directory *pd,
723 u64 start, u64 length)
724{
725 struct i915_page_table *pt;
726 u32 pde;
727
728 gen8_for_each_pde(pt, pd, start, length, pde) {
729 GEM_BUG_ON(pt == vm->scratch_pt);
730
731 if (!gen8_ppgtt_clear_pt(vm, pt, start, length))
732 continue;
733
734 gen8_ppgtt_set_pde(vm, pd, vm->scratch_pt, pde);
735 GEM_BUG_ON(!pd->used_pdes);
736 pd->used_pdes--;
737
738 free_pt(vm, pt);
739 }
740
741 return !pd->used_pdes;
742}
743
744static void gen8_ppgtt_set_pdpe(struct i915_address_space *vm,
745 struct i915_page_directory_pointer *pdp,
746 struct i915_page_directory *pd,
747 unsigned int pdpe)
748{
749 gen8_ppgtt_pdpe_t *vaddr;
750
751 pdp->page_directory[pdpe] = pd;
752 if (!use_4lvl(vm))
753 return;
754
755 vaddr = kmap_atomic_px(pdp);
756 vaddr[pdpe] = gen8_pdpe_encode(px_dma(pd), I915_CACHE_LLC);
757 kunmap_atomic(vaddr);
758}
759
760
761
762
763static bool gen8_ppgtt_clear_pdp(struct i915_address_space *vm,
764 struct i915_page_directory_pointer *pdp,
765 u64 start, u64 length)
766{
767 struct i915_page_directory *pd;
768 unsigned int pdpe;
769
770 gen8_for_each_pdpe(pd, pdp, start, length, pdpe) {
771 GEM_BUG_ON(pd == vm->scratch_pd);
772
773 if (!gen8_ppgtt_clear_pd(vm, pd, start, length))
774 continue;
775
776 gen8_ppgtt_set_pdpe(vm, pdp, vm->scratch_pd, pdpe);
777 GEM_BUG_ON(!pdp->used_pdpes);
778 pdp->used_pdpes--;
779
780 free_pd(vm, pd);
781 }
782
783 return !pdp->used_pdpes;
784}
785
786static void gen8_ppgtt_clear_3lvl(struct i915_address_space *vm,
787 u64 start, u64 length)
788{
789 gen8_ppgtt_clear_pdp(vm, &i915_vm_to_ppgtt(vm)->pdp, start, length);
790}
791
792static void gen8_ppgtt_set_pml4e(struct i915_pml4 *pml4,
793 struct i915_page_directory_pointer *pdp,
794 unsigned int pml4e)
795{
796 gen8_ppgtt_pml4e_t *vaddr;
797
798 pml4->pdps[pml4e] = pdp;
799
800 vaddr = kmap_atomic_px(pml4);
801 vaddr[pml4e] = gen8_pml4e_encode(px_dma(pdp), I915_CACHE_LLC);
802 kunmap_atomic(vaddr);
803}
804
805
806
807
808
809static void gen8_ppgtt_clear_4lvl(struct i915_address_space *vm,
810 u64 start, u64 length)
811{
812 struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
813 struct i915_pml4 *pml4 = &ppgtt->pml4;
814 struct i915_page_directory_pointer *pdp;
815 unsigned int pml4e;
816
817 GEM_BUG_ON(!use_4lvl(vm));
818
819 gen8_for_each_pml4e(pdp, pml4, start, length, pml4e) {
820 GEM_BUG_ON(pdp == vm->scratch_pdp);
821
822 if (!gen8_ppgtt_clear_pdp(vm, pdp, start, length))
823 continue;
824
825 gen8_ppgtt_set_pml4e(pml4, vm->scratch_pdp, pml4e);
826
827 free_pdp(vm, pdp);
828 }
829}
830
831struct sgt_dma {
832 struct scatterlist *sg;
833 dma_addr_t dma, max;
834};
835
836struct gen8_insert_pte {
837 u16 pml4e;
838 u16 pdpe;
839 u16 pde;
840 u16 pte;
841};
842
843static __always_inline struct gen8_insert_pte gen8_insert_pte(u64 start)
844{
845 return (struct gen8_insert_pte) {
846 gen8_pml4e_index(start),
847 gen8_pdpe_index(start),
848 gen8_pde_index(start),
849 gen8_pte_index(start),
850 };
851}
852
853static __always_inline bool
854gen8_ppgtt_insert_pte_entries(struct i915_hw_ppgtt *ppgtt,
855 struct i915_page_directory_pointer *pdp,
856 struct sgt_dma *iter,
857 struct gen8_insert_pte *idx,
858 enum i915_cache_level cache_level)
859{
860 struct i915_page_directory *pd;
861 const gen8_pte_t pte_encode = gen8_pte_encode(0, cache_level);
862 gen8_pte_t *vaddr;
863 bool ret;
864
865 GEM_BUG_ON(idx->pdpe >= i915_pdpes_per_pdp(&ppgtt->base));
866 pd = pdp->page_directory[idx->pdpe];
867 vaddr = kmap_atomic_px(pd->page_table[idx->pde]);
868 do {
869 vaddr[idx->pte] = pte_encode | iter->dma;
870
871 iter->dma += PAGE_SIZE;
872 if (iter->dma >= iter->max) {
873 iter->sg = __sg_next(iter->sg);
874 if (!iter->sg) {
875 ret = false;
876 break;
877 }
878
879 iter->dma = sg_dma_address(iter->sg);
880 iter->max = iter->dma + iter->sg->length;
881 }
882
883 if (++idx->pte == GEN8_PTES) {
884 idx->pte = 0;
885
886 if (++idx->pde == I915_PDES) {
887 idx->pde = 0;
888
889
890 if (++idx->pdpe == GEN8_PML4ES_PER_PML4) {
891 idx->pdpe = 0;
892 ret = true;
893 break;
894 }
895
896 GEM_BUG_ON(idx->pdpe >= i915_pdpes_per_pdp(&ppgtt->base));
897 pd = pdp->page_directory[idx->pdpe];
898 }
899
900 kunmap_atomic(vaddr);
901 vaddr = kmap_atomic_px(pd->page_table[idx->pde]);
902 }
903 } while (1);
904 kunmap_atomic(vaddr);
905
906 return ret;
907}
908
909static void gen8_ppgtt_insert_3lvl(struct i915_address_space *vm,
910 struct sg_table *pages,
911 u64 start,
912 enum i915_cache_level cache_level,
913 u32 unused)
914{
915 struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
916 struct sgt_dma iter = {
917 .sg = pages->sgl,
918 .dma = sg_dma_address(iter.sg),
919 .max = iter.dma + iter.sg->length,
920 };
921 struct gen8_insert_pte idx = gen8_insert_pte(start);
922
923 gen8_ppgtt_insert_pte_entries(ppgtt, &ppgtt->pdp, &iter, &idx,
924 cache_level);
925}
926
927static void gen8_ppgtt_insert_4lvl(struct i915_address_space *vm,
928 struct sg_table *pages,
929 u64 start,
930 enum i915_cache_level cache_level,
931 u32 unused)
932{
933 struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
934 struct sgt_dma iter = {
935 .sg = pages->sgl,
936 .dma = sg_dma_address(iter.sg),
937 .max = iter.dma + iter.sg->length,
938 };
939 struct i915_page_directory_pointer **pdps = ppgtt->pml4.pdps;
940 struct gen8_insert_pte idx = gen8_insert_pte(start);
941
942 while (gen8_ppgtt_insert_pte_entries(ppgtt, pdps[idx.pml4e++], &iter,
943 &idx, cache_level))
944 GEM_BUG_ON(idx.pml4e >= GEN8_PML4ES_PER_PML4);
945}
946
947static void gen8_free_page_tables(struct i915_address_space *vm,
948 struct i915_page_directory *pd)
949{
950 int i;
951
952 if (!px_page(pd))
953 return;
954
955 for (i = 0; i < I915_PDES; i++) {
956 if (pd->page_table[i] != vm->scratch_pt)
957 free_pt(vm, pd->page_table[i]);
958 }
959}
960
961static int gen8_init_scratch(struct i915_address_space *vm)
962{
963 int ret;
964
965 ret = setup_scratch_page(vm, I915_GFP_DMA);
966 if (ret)
967 return ret;
968
969 vm->scratch_pt = alloc_pt(vm);
970 if (IS_ERR(vm->scratch_pt)) {
971 ret = PTR_ERR(vm->scratch_pt);
972 goto free_scratch_page;
973 }
974
975 vm->scratch_pd = alloc_pd(vm);
976 if (IS_ERR(vm->scratch_pd)) {
977 ret = PTR_ERR(vm->scratch_pd);
978 goto free_pt;
979 }
980
981 if (use_4lvl(vm)) {
982 vm->scratch_pdp = alloc_pdp(vm);
983 if (IS_ERR(vm->scratch_pdp)) {
984 ret = PTR_ERR(vm->scratch_pdp);
985 goto free_pd;
986 }
987 }
988
989 gen8_initialize_pt(vm, vm->scratch_pt);
990 gen8_initialize_pd(vm, vm->scratch_pd);
991 if (use_4lvl(vm))
992 gen8_initialize_pdp(vm, vm->scratch_pdp);
993
994 return 0;
995
996free_pd:
997 free_pd(vm, vm->scratch_pd);
998free_pt:
999 free_pt(vm, vm->scratch_pt);
1000free_scratch_page:
1001 cleanup_scratch_page(vm);
1002
1003 return ret;
1004}
1005
1006static int gen8_ppgtt_notify_vgt(struct i915_hw_ppgtt *ppgtt, bool create)
1007{
1008 struct i915_address_space *vm = &ppgtt->base;
1009 struct drm_i915_private *dev_priv = vm->i915;
1010 enum vgt_g2v_type msg;
1011 int i;
1012
1013 if (use_4lvl(vm)) {
1014 const u64 daddr = px_dma(&ppgtt->pml4);
1015
1016 I915_WRITE(vgtif_reg(pdp[0].lo), lower_32_bits(daddr));
1017 I915_WRITE(vgtif_reg(pdp[0].hi), upper_32_bits(daddr));
1018
1019 msg = (create ? VGT_G2V_PPGTT_L4_PAGE_TABLE_CREATE :
1020 VGT_G2V_PPGTT_L4_PAGE_TABLE_DESTROY);
1021 } else {
1022 for (i = 0; i < GEN8_3LVL_PDPES; i++) {
1023 const u64 daddr = i915_page_dir_dma_addr(ppgtt, i);
1024
1025 I915_WRITE(vgtif_reg(pdp[i].lo), lower_32_bits(daddr));
1026 I915_WRITE(vgtif_reg(pdp[i].hi), upper_32_bits(daddr));
1027 }
1028
1029 msg = (create ? VGT_G2V_PPGTT_L3_PAGE_TABLE_CREATE :
1030 VGT_G2V_PPGTT_L3_PAGE_TABLE_DESTROY);
1031 }
1032
1033 I915_WRITE(vgtif_reg(g2v_notify), msg);
1034
1035 return 0;
1036}
1037
1038static void gen8_free_scratch(struct i915_address_space *vm)
1039{
1040 if (use_4lvl(vm))
1041 free_pdp(vm, vm->scratch_pdp);
1042 free_pd(vm, vm->scratch_pd);
1043 free_pt(vm, vm->scratch_pt);
1044 cleanup_scratch_page(vm);
1045}
1046
1047static void gen8_ppgtt_cleanup_3lvl(struct i915_address_space *vm,
1048 struct i915_page_directory_pointer *pdp)
1049{
1050 const unsigned int pdpes = i915_pdpes_per_pdp(vm);
1051 int i;
1052
1053 for (i = 0; i < pdpes; i++) {
1054 if (pdp->page_directory[i] == vm->scratch_pd)
1055 continue;
1056
1057 gen8_free_page_tables(vm, pdp->page_directory[i]);
1058 free_pd(vm, pdp->page_directory[i]);
1059 }
1060
1061 free_pdp(vm, pdp);
1062}
1063
1064static void gen8_ppgtt_cleanup_4lvl(struct i915_hw_ppgtt *ppgtt)
1065{
1066 int i;
1067
1068 for (i = 0; i < GEN8_PML4ES_PER_PML4; i++) {
1069 if (ppgtt->pml4.pdps[i] == ppgtt->base.scratch_pdp)
1070 continue;
1071
1072 gen8_ppgtt_cleanup_3lvl(&ppgtt->base, ppgtt->pml4.pdps[i]);
1073 }
1074
1075 cleanup_px(&ppgtt->base, &ppgtt->pml4);
1076}
1077
1078static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
1079{
1080 struct drm_i915_private *dev_priv = vm->i915;
1081 struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
1082
1083 if (intel_vgpu_active(dev_priv))
1084 gen8_ppgtt_notify_vgt(ppgtt, false);
1085
1086 if (use_4lvl(vm))
1087 gen8_ppgtt_cleanup_4lvl(ppgtt);
1088 else
1089 gen8_ppgtt_cleanup_3lvl(&ppgtt->base, &ppgtt->pdp);
1090
1091 gen8_free_scratch(vm);
1092}
1093
1094static int gen8_ppgtt_alloc_pd(struct i915_address_space *vm,
1095 struct i915_page_directory *pd,
1096 u64 start, u64 length)
1097{
1098 struct i915_page_table *pt;
1099 u64 from = start;
1100 unsigned int pde;
1101
1102 gen8_for_each_pde(pt, pd, start, length, pde) {
1103 if (pt == vm->scratch_pt) {
1104 pt = alloc_pt(vm);
1105 if (IS_ERR(pt))
1106 goto unwind;
1107
1108 gen8_initialize_pt(vm, pt);
1109
1110 gen8_ppgtt_set_pde(vm, pd, pt, pde);
1111 pd->used_pdes++;
1112 GEM_BUG_ON(pd->used_pdes > I915_PDES);
1113 }
1114
1115 pt->used_ptes += gen8_pte_count(start, length);
1116 }
1117 return 0;
1118
1119unwind:
1120 gen8_ppgtt_clear_pd(vm, pd, from, start - from);
1121 return -ENOMEM;
1122}
1123
1124static int gen8_ppgtt_alloc_pdp(struct i915_address_space *vm,
1125 struct i915_page_directory_pointer *pdp,
1126 u64 start, u64 length)
1127{
1128 struct i915_page_directory *pd;
1129 u64 from = start;
1130 unsigned int pdpe;
1131 int ret;
1132
1133 gen8_for_each_pdpe(pd, pdp, start, length, pdpe) {
1134 if (pd == vm->scratch_pd) {
1135 pd = alloc_pd(vm);
1136 if (IS_ERR(pd))
1137 goto unwind;
1138
1139 gen8_initialize_pd(vm, pd);
1140 gen8_ppgtt_set_pdpe(vm, pdp, pd, pdpe);
1141 pdp->used_pdpes++;
1142 GEM_BUG_ON(pdp->used_pdpes > i915_pdpes_per_pdp(vm));
1143
1144 mark_tlbs_dirty(i915_vm_to_ppgtt(vm));
1145 }
1146
1147 ret = gen8_ppgtt_alloc_pd(vm, pd, start, length);
1148 if (unlikely(ret))
1149 goto unwind_pd;
1150 }
1151
1152 return 0;
1153
1154unwind_pd:
1155 if (!pd->used_pdes) {
1156 gen8_ppgtt_set_pdpe(vm, pdp, vm->scratch_pd, pdpe);
1157 GEM_BUG_ON(!pdp->used_pdpes);
1158 pdp->used_pdpes--;
1159 free_pd(vm, pd);
1160 }
1161unwind:
1162 gen8_ppgtt_clear_pdp(vm, pdp, from, start - from);
1163 return -ENOMEM;
1164}
1165
1166static int gen8_ppgtt_alloc_3lvl(struct i915_address_space *vm,
1167 u64 start, u64 length)
1168{
1169 return gen8_ppgtt_alloc_pdp(vm,
1170 &i915_vm_to_ppgtt(vm)->pdp, start, length);
1171}
1172
1173static int gen8_ppgtt_alloc_4lvl(struct i915_address_space *vm,
1174 u64 start, u64 length)
1175{
1176 struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
1177 struct i915_pml4 *pml4 = &ppgtt->pml4;
1178 struct i915_page_directory_pointer *pdp;
1179 u64 from = start;
1180 u32 pml4e;
1181 int ret;
1182
1183 gen8_for_each_pml4e(pdp, pml4, start, length, pml4e) {
1184 if (pml4->pdps[pml4e] == vm->scratch_pdp) {
1185 pdp = alloc_pdp(vm);
1186 if (IS_ERR(pdp))
1187 goto unwind;
1188
1189 gen8_initialize_pdp(vm, pdp);
1190 gen8_ppgtt_set_pml4e(pml4, pdp, pml4e);
1191 }
1192
1193 ret = gen8_ppgtt_alloc_pdp(vm, pdp, start, length);
1194 if (unlikely(ret))
1195 goto unwind_pdp;
1196 }
1197
1198 return 0;
1199
1200unwind_pdp:
1201 if (!pdp->used_pdpes) {
1202 gen8_ppgtt_set_pml4e(pml4, vm->scratch_pdp, pml4e);
1203 free_pdp(vm, pdp);
1204 }
1205unwind:
1206 gen8_ppgtt_clear_4lvl(vm, from, start - from);
1207 return -ENOMEM;
1208}
1209
1210static void gen8_dump_pdp(struct i915_hw_ppgtt *ppgtt,
1211 struct i915_page_directory_pointer *pdp,
1212 u64 start, u64 length,
1213 gen8_pte_t scratch_pte,
1214 struct seq_file *m)
1215{
1216 struct i915_address_space *vm = &ppgtt->base;
1217 struct i915_page_directory *pd;
1218 u32 pdpe;
1219
1220 gen8_for_each_pdpe(pd, pdp, start, length, pdpe) {
1221 struct i915_page_table *pt;
1222 u64 pd_len = length;
1223 u64 pd_start = start;
1224 u32 pde;
1225
1226 if (pdp->page_directory[pdpe] == ppgtt->base.scratch_pd)
1227 continue;
1228
1229 seq_printf(m, "\tPDPE #%d\n", pdpe);
1230 gen8_for_each_pde(pt, pd, pd_start, pd_len, pde) {
1231 u32 pte;
1232 gen8_pte_t *pt_vaddr;
1233
1234 if (pd->page_table[pde] == ppgtt->base.scratch_pt)
1235 continue;
1236
1237 pt_vaddr = kmap_atomic_px(pt);
1238 for (pte = 0; pte < GEN8_PTES; pte += 4) {
1239 u64 va = (pdpe << GEN8_PDPE_SHIFT |
1240 pde << GEN8_PDE_SHIFT |
1241 pte << GEN8_PTE_SHIFT);
1242 int i;
1243 bool found = false;
1244
1245 for (i = 0; i < 4; i++)
1246 if (pt_vaddr[pte + i] != scratch_pte)
1247 found = true;
1248 if (!found)
1249 continue;
1250
1251 seq_printf(m, "\t\t0x%llx [%03d,%03d,%04d]: =", va, pdpe, pde, pte);
1252 for (i = 0; i < 4; i++) {
1253 if (pt_vaddr[pte + i] != scratch_pte)
1254 seq_printf(m, " %llx", pt_vaddr[pte + i]);
1255 else
1256 seq_puts(m, " SCRATCH ");
1257 }
1258 seq_puts(m, "\n");
1259 }
1260 kunmap_atomic(pt_vaddr);
1261 }
1262 }
1263}
1264
1265static void gen8_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m)
1266{
1267 struct i915_address_space *vm = &ppgtt->base;
1268 const gen8_pte_t scratch_pte =
1269 gen8_pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC);
1270 u64 start = 0, length = ppgtt->base.total;
1271
1272 if (use_4lvl(vm)) {
1273 u64 pml4e;
1274 struct i915_pml4 *pml4 = &ppgtt->pml4;
1275 struct i915_page_directory_pointer *pdp;
1276
1277 gen8_for_each_pml4e(pdp, pml4, start, length, pml4e) {
1278 if (pml4->pdps[pml4e] == ppgtt->base.scratch_pdp)
1279 continue;
1280
1281 seq_printf(m, " PML4E #%llu\n", pml4e);
1282 gen8_dump_pdp(ppgtt, pdp, start, length, scratch_pte, m);
1283 }
1284 } else {
1285 gen8_dump_pdp(ppgtt, &ppgtt->pdp, start, length, scratch_pte, m);
1286 }
1287}
1288
1289static int gen8_preallocate_top_level_pdp(struct i915_hw_ppgtt *ppgtt)
1290{
1291 struct i915_address_space *vm = &ppgtt->base;
1292 struct i915_page_directory_pointer *pdp = &ppgtt->pdp;
1293 struct i915_page_directory *pd;
1294 u64 start = 0, length = ppgtt->base.total;
1295 u64 from = start;
1296 unsigned int pdpe;
1297
1298 gen8_for_each_pdpe(pd, pdp, start, length, pdpe) {
1299 pd = alloc_pd(vm);
1300 if (IS_ERR(pd))
1301 goto unwind;
1302
1303 gen8_initialize_pd(vm, pd);
1304 gen8_ppgtt_set_pdpe(vm, pdp, pd, pdpe);
1305 pdp->used_pdpes++;
1306 }
1307
1308 pdp->used_pdpes++;
1309 return 0;
1310
1311unwind:
1312 start -= from;
1313 gen8_for_each_pdpe(pd, pdp, from, start, pdpe) {
1314 gen8_ppgtt_set_pdpe(vm, pdp, vm->scratch_pd, pdpe);
1315 free_pd(vm, pd);
1316 }
1317 pdp->used_pdpes = 0;
1318 return -ENOMEM;
1319}
1320
1321
1322
1323
1324
1325
1326
1327
1328static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
1329{
1330 struct i915_address_space *vm = &ppgtt->base;
1331 struct drm_i915_private *dev_priv = vm->i915;
1332 int ret;
1333
1334 ppgtt->base.total = USES_FULL_48BIT_PPGTT(dev_priv) ?
1335 1ULL << 48 :
1336 1ULL << 32;
1337
1338 ret = gen8_init_scratch(&ppgtt->base);
1339 if (ret) {
1340 ppgtt->base.total = 0;
1341 return ret;
1342 }
1343
1344
1345
1346
1347 if (IS_CHERRYVIEW(dev_priv) || IS_BROXTON(dev_priv))
1348 ppgtt->base.pt_kmap_wc = true;
1349
1350 if (use_4lvl(vm)) {
1351 ret = setup_px(&ppgtt->base, &ppgtt->pml4);
1352 if (ret)
1353 goto free_scratch;
1354
1355 gen8_initialize_pml4(&ppgtt->base, &ppgtt->pml4);
1356
1357 ppgtt->switch_mm = gen8_mm_switch_4lvl;
1358 ppgtt->base.allocate_va_range = gen8_ppgtt_alloc_4lvl;
1359 ppgtt->base.insert_entries = gen8_ppgtt_insert_4lvl;
1360 ppgtt->base.clear_range = gen8_ppgtt_clear_4lvl;
1361 } else {
1362 ret = __pdp_init(&ppgtt->base, &ppgtt->pdp);
1363 if (ret)
1364 goto free_scratch;
1365
1366 if (intel_vgpu_active(dev_priv)) {
1367 ret = gen8_preallocate_top_level_pdp(ppgtt);
1368 if (ret) {
1369 __pdp_fini(&ppgtt->pdp);
1370 goto free_scratch;
1371 }
1372 }
1373
1374 ppgtt->switch_mm = gen8_mm_switch_3lvl;
1375 ppgtt->base.allocate_va_range = gen8_ppgtt_alloc_3lvl;
1376 ppgtt->base.insert_entries = gen8_ppgtt_insert_3lvl;
1377 ppgtt->base.clear_range = gen8_ppgtt_clear_3lvl;
1378 }
1379
1380 if (intel_vgpu_active(dev_priv))
1381 gen8_ppgtt_notify_vgt(ppgtt, true);
1382
1383 ppgtt->base.cleanup = gen8_ppgtt_cleanup;
1384 ppgtt->base.unbind_vma = ppgtt_unbind_vma;
1385 ppgtt->base.bind_vma = ppgtt_bind_vma;
1386 ppgtt->debug_dump = gen8_dump_ppgtt;
1387
1388 return 0;
1389
1390free_scratch:
1391 gen8_free_scratch(&ppgtt->base);
1392 return ret;
1393}
1394
1395static void gen6_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m)
1396{
1397 struct i915_address_space *vm = &ppgtt->base;
1398 struct i915_page_table *unused;
1399 gen6_pte_t scratch_pte;
1400 u32 pd_entry, pte, pde;
1401 u32 start = 0, length = ppgtt->base.total;
1402
1403 scratch_pte = vm->pte_encode(vm->scratch_page.daddr,
1404 I915_CACHE_LLC, 0);
1405
1406 gen6_for_each_pde(unused, &ppgtt->pd, start, length, pde) {
1407 u32 expected;
1408 gen6_pte_t *pt_vaddr;
1409 const dma_addr_t pt_addr = px_dma(ppgtt->pd.page_table[pde]);
1410 pd_entry = readl(ppgtt->pd_addr + pde);
1411 expected = (GEN6_PDE_ADDR_ENCODE(pt_addr) | GEN6_PDE_VALID);
1412
1413 if (pd_entry != expected)
1414 seq_printf(m, "\tPDE #%d mismatch: Actual PDE: %x Expected PDE: %x\n",
1415 pde,
1416 pd_entry,
1417 expected);
1418 seq_printf(m, "\tPDE: %x\n", pd_entry);
1419
1420 pt_vaddr = kmap_atomic_px(ppgtt->pd.page_table[pde]);
1421
1422 for (pte = 0; pte < GEN6_PTES; pte+=4) {
1423 unsigned long va =
1424 (pde * PAGE_SIZE * GEN6_PTES) +
1425 (pte * PAGE_SIZE);
1426 int i;
1427 bool found = false;
1428 for (i = 0; i < 4; i++)
1429 if (pt_vaddr[pte + i] != scratch_pte)
1430 found = true;
1431 if (!found)
1432 continue;
1433
1434 seq_printf(m, "\t\t0x%lx [%03d,%04d]: =", va, pde, pte);
1435 for (i = 0; i < 4; i++) {
1436 if (pt_vaddr[pte + i] != scratch_pte)
1437 seq_printf(m, " %08x", pt_vaddr[pte + i]);
1438 else
1439 seq_puts(m, " SCRATCH ");
1440 }
1441 seq_puts(m, "\n");
1442 }
1443 kunmap_atomic(pt_vaddr);
1444 }
1445}
1446
1447
1448static inline void gen6_write_pde(const struct i915_hw_ppgtt *ppgtt,
1449 const unsigned int pde,
1450 const struct i915_page_table *pt)
1451{
1452
1453 writel_relaxed(GEN6_PDE_ADDR_ENCODE(px_dma(pt)) | GEN6_PDE_VALID,
1454 ppgtt->pd_addr + pde);
1455}
1456
1457
1458
1459static void gen6_write_page_range(struct i915_hw_ppgtt *ppgtt,
1460 u32 start, u32 length)
1461{
1462 struct i915_page_table *pt;
1463 unsigned int pde;
1464
1465 gen6_for_each_pde(pt, &ppgtt->pd, start, length, pde)
1466 gen6_write_pde(ppgtt, pde, pt);
1467
1468 mark_tlbs_dirty(ppgtt);
1469 wmb();
1470}
1471
1472static inline u32 get_pd_offset(struct i915_hw_ppgtt *ppgtt)
1473{
1474 GEM_BUG_ON(ppgtt->pd.base.ggtt_offset & 0x3f);
1475 return ppgtt->pd.base.ggtt_offset << 10;
1476}
1477
1478static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt,
1479 struct drm_i915_gem_request *req)
1480{
1481 struct intel_engine_cs *engine = req->engine;
1482 u32 *cs;
1483
1484
1485 cs = intel_ring_begin(req, 6);
1486 if (IS_ERR(cs))
1487 return PTR_ERR(cs);
1488
1489 *cs++ = MI_LOAD_REGISTER_IMM(2);
1490 *cs++ = i915_mmio_reg_offset(RING_PP_DIR_DCLV(engine));
1491 *cs++ = PP_DIR_DCLV_2G;
1492 *cs++ = i915_mmio_reg_offset(RING_PP_DIR_BASE(engine));
1493 *cs++ = get_pd_offset(ppgtt);
1494 *cs++ = MI_NOOP;
1495 intel_ring_advance(req, cs);
1496
1497 return 0;
1498}
1499
1500static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt,
1501 struct drm_i915_gem_request *req)
1502{
1503 struct intel_engine_cs *engine = req->engine;
1504 u32 *cs;
1505
1506
1507 cs = intel_ring_begin(req, 6);
1508 if (IS_ERR(cs))
1509 return PTR_ERR(cs);
1510
1511 *cs++ = MI_LOAD_REGISTER_IMM(2);
1512 *cs++ = i915_mmio_reg_offset(RING_PP_DIR_DCLV(engine));
1513 *cs++ = PP_DIR_DCLV_2G;
1514 *cs++ = i915_mmio_reg_offset(RING_PP_DIR_BASE(engine));
1515 *cs++ = get_pd_offset(ppgtt);
1516 *cs++ = MI_NOOP;
1517 intel_ring_advance(req, cs);
1518
1519 return 0;
1520}
1521
1522static int gen6_mm_switch(struct i915_hw_ppgtt *ppgtt,
1523 struct drm_i915_gem_request *req)
1524{
1525 struct intel_engine_cs *engine = req->engine;
1526 struct drm_i915_private *dev_priv = req->i915;
1527
1528 I915_WRITE(RING_PP_DIR_DCLV(engine), PP_DIR_DCLV_2G);
1529 I915_WRITE(RING_PP_DIR_BASE(engine), get_pd_offset(ppgtt));
1530 return 0;
1531}
1532
1533static void gen8_ppgtt_enable(struct drm_i915_private *dev_priv)
1534{
1535 struct intel_engine_cs *engine;
1536 enum intel_engine_id id;
1537
1538 for_each_engine(engine, dev_priv, id) {
1539 u32 four_level = USES_FULL_48BIT_PPGTT(dev_priv) ?
1540 GEN8_GFX_PPGTT_48B : 0;
1541 I915_WRITE(RING_MODE_GEN7(engine),
1542 _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE | four_level));
1543 }
1544}
1545
1546static void gen7_ppgtt_enable(struct drm_i915_private *dev_priv)
1547{
1548 struct intel_engine_cs *engine;
1549 u32 ecochk, ecobits;
1550 enum intel_engine_id id;
1551
1552 ecobits = I915_READ(GAC_ECO_BITS);
1553 I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_PPGTT_CACHE64B);
1554
1555 ecochk = I915_READ(GAM_ECOCHK);
1556 if (IS_HASWELL(dev_priv)) {
1557 ecochk |= ECOCHK_PPGTT_WB_HSW;
1558 } else {
1559 ecochk |= ECOCHK_PPGTT_LLC_IVB;
1560 ecochk &= ~ECOCHK_PPGTT_GFDT_IVB;
1561 }
1562 I915_WRITE(GAM_ECOCHK, ecochk);
1563
1564 for_each_engine(engine, dev_priv, id) {
1565
1566 I915_WRITE(RING_MODE_GEN7(engine),
1567 _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
1568 }
1569}
1570
1571static void gen6_ppgtt_enable(struct drm_i915_private *dev_priv)
1572{
1573 u32 ecochk, gab_ctl, ecobits;
1574
1575 ecobits = I915_READ(GAC_ECO_BITS);
1576 I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_SNB_BIT |
1577 ECOBITS_PPGTT_CACHE64B);
1578
1579 gab_ctl = I915_READ(GAB_CTL);
1580 I915_WRITE(GAB_CTL, gab_ctl | GAB_CTL_CONT_AFTER_PAGEFAULT);
1581
1582 ecochk = I915_READ(GAM_ECOCHK);
1583 I915_WRITE(GAM_ECOCHK, ecochk | ECOCHK_SNB_BIT | ECOCHK_PPGTT_CACHE64B);
1584
1585 I915_WRITE(GFX_MODE, _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
1586}
1587
1588
1589static void gen6_ppgtt_clear_range(struct i915_address_space *vm,
1590 u64 start, u64 length)
1591{
1592 struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
1593 unsigned int first_entry = start >> PAGE_SHIFT;
1594 unsigned int pde = first_entry / GEN6_PTES;
1595 unsigned int pte = first_entry % GEN6_PTES;
1596 unsigned int num_entries = length >> PAGE_SHIFT;
1597 gen6_pte_t scratch_pte =
1598 vm->pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC, 0);
1599
1600 while (num_entries) {
1601 struct i915_page_table *pt = ppgtt->pd.page_table[pde++];
1602 unsigned int end = min(pte + num_entries, GEN6_PTES);
1603 gen6_pte_t *vaddr;
1604
1605 num_entries -= end - pte;
1606
1607
1608
1609
1610
1611
1612
1613 vaddr = kmap_atomic_px(pt);
1614 do {
1615 vaddr[pte++] = scratch_pte;
1616 } while (pte < end);
1617 kunmap_atomic(vaddr);
1618
1619 pte = 0;
1620 }
1621}
1622
1623static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
1624 struct sg_table *pages,
1625 u64 start,
1626 enum i915_cache_level cache_level,
1627 u32 flags)
1628{
1629 struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
1630 unsigned first_entry = start >> PAGE_SHIFT;
1631 unsigned act_pt = first_entry / GEN6_PTES;
1632 unsigned act_pte = first_entry % GEN6_PTES;
1633 const u32 pte_encode = vm->pte_encode(0, cache_level, flags);
1634 struct sgt_dma iter;
1635 gen6_pte_t *vaddr;
1636
1637 vaddr = kmap_atomic_px(ppgtt->pd.page_table[act_pt]);
1638 iter.sg = pages->sgl;
1639 iter.dma = sg_dma_address(iter.sg);
1640 iter.max = iter.dma + iter.sg->length;
1641 do {
1642 vaddr[act_pte] = pte_encode | GEN6_PTE_ADDR_ENCODE(iter.dma);
1643
1644 iter.dma += PAGE_SIZE;
1645 if (iter.dma == iter.max) {
1646 iter.sg = __sg_next(iter.sg);
1647 if (!iter.sg)
1648 break;
1649
1650 iter.dma = sg_dma_address(iter.sg);
1651 iter.max = iter.dma + iter.sg->length;
1652 }
1653
1654 if (++act_pte == GEN6_PTES) {
1655 kunmap_atomic(vaddr);
1656 vaddr = kmap_atomic_px(ppgtt->pd.page_table[++act_pt]);
1657 act_pte = 0;
1658 }
1659 } while (1);
1660 kunmap_atomic(vaddr);
1661}
1662
1663static int gen6_alloc_va_range(struct i915_address_space *vm,
1664 u64 start, u64 length)
1665{
1666 struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
1667 struct i915_page_table *pt;
1668 u64 from = start;
1669 unsigned int pde;
1670 bool flush = false;
1671
1672 gen6_for_each_pde(pt, &ppgtt->pd, start, length, pde) {
1673 if (pt == vm->scratch_pt) {
1674 pt = alloc_pt(vm);
1675 if (IS_ERR(pt))
1676 goto unwind_out;
1677
1678 gen6_initialize_pt(vm, pt);
1679 ppgtt->pd.page_table[pde] = pt;
1680 gen6_write_pde(ppgtt, pde, pt);
1681 flush = true;
1682 }
1683 }
1684
1685 if (flush) {
1686 mark_tlbs_dirty(ppgtt);
1687 wmb();
1688 }
1689
1690 return 0;
1691
1692unwind_out:
1693 gen6_ppgtt_clear_range(vm, from, start);
1694 return -ENOMEM;
1695}
1696
1697static int gen6_init_scratch(struct i915_address_space *vm)
1698{
1699 int ret;
1700
1701 ret = setup_scratch_page(vm, I915_GFP_DMA);
1702 if (ret)
1703 return ret;
1704
1705 vm->scratch_pt = alloc_pt(vm);
1706 if (IS_ERR(vm->scratch_pt)) {
1707 cleanup_scratch_page(vm);
1708 return PTR_ERR(vm->scratch_pt);
1709 }
1710
1711 gen6_initialize_pt(vm, vm->scratch_pt);
1712
1713 return 0;
1714}
1715
1716static void gen6_free_scratch(struct i915_address_space *vm)
1717{
1718 free_pt(vm, vm->scratch_pt);
1719 cleanup_scratch_page(vm);
1720}
1721
1722static void gen6_ppgtt_cleanup(struct i915_address_space *vm)
1723{
1724 struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
1725 struct i915_page_directory *pd = &ppgtt->pd;
1726 struct i915_page_table *pt;
1727 u32 pde;
1728
1729 drm_mm_remove_node(&ppgtt->node);
1730
1731 gen6_for_all_pdes(pt, pd, pde)
1732 if (pt != vm->scratch_pt)
1733 free_pt(vm, pt);
1734
1735 gen6_free_scratch(vm);
1736}
1737
1738static int gen6_ppgtt_allocate_page_directories(struct i915_hw_ppgtt *ppgtt)
1739{
1740 struct i915_address_space *vm = &ppgtt->base;
1741 struct drm_i915_private *dev_priv = ppgtt->base.i915;
1742 struct i915_ggtt *ggtt = &dev_priv->ggtt;
1743 int ret;
1744
1745
1746
1747
1748
1749 BUG_ON(!drm_mm_initialized(&ggtt->base.mm));
1750
1751 ret = gen6_init_scratch(vm);
1752 if (ret)
1753 return ret;
1754
1755 ret = i915_gem_gtt_insert(&ggtt->base, &ppgtt->node,
1756 GEN6_PD_SIZE, GEN6_PD_ALIGN,
1757 I915_COLOR_UNEVICTABLE,
1758 0, ggtt->base.total,
1759 PIN_HIGH);
1760 if (ret)
1761 goto err_out;
1762
1763 if (ppgtt->node.start < ggtt->mappable_end)
1764 DRM_DEBUG("Forced to use aperture for PDEs\n");
1765
1766 ppgtt->pd.base.ggtt_offset =
1767 ppgtt->node.start / PAGE_SIZE * sizeof(gen6_pte_t);
1768
1769 ppgtt->pd_addr = (gen6_pte_t __iomem *)ggtt->gsm +
1770 ppgtt->pd.base.ggtt_offset / sizeof(gen6_pte_t);
1771
1772 return 0;
1773
1774err_out:
1775 gen6_free_scratch(vm);
1776 return ret;
1777}
1778
1779static int gen6_ppgtt_alloc(struct i915_hw_ppgtt *ppgtt)
1780{
1781 return gen6_ppgtt_allocate_page_directories(ppgtt);
1782}
1783
1784static void gen6_scratch_va_range(struct i915_hw_ppgtt *ppgtt,
1785 u64 start, u64 length)
1786{
1787 struct i915_page_table *unused;
1788 u32 pde;
1789
1790 gen6_for_each_pde(unused, &ppgtt->pd, start, length, pde)
1791 ppgtt->pd.page_table[pde] = ppgtt->base.scratch_pt;
1792}
1793
1794static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
1795{
1796 struct drm_i915_private *dev_priv = ppgtt->base.i915;
1797 struct i915_ggtt *ggtt = &dev_priv->ggtt;
1798 int ret;
1799
1800 ppgtt->base.pte_encode = ggtt->base.pte_encode;
1801 if (intel_vgpu_active(dev_priv) || IS_GEN6(dev_priv))
1802 ppgtt->switch_mm = gen6_mm_switch;
1803 else if (IS_HASWELL(dev_priv))
1804 ppgtt->switch_mm = hsw_mm_switch;
1805 else if (IS_GEN7(dev_priv))
1806 ppgtt->switch_mm = gen7_mm_switch;
1807 else
1808 BUG();
1809
1810 ret = gen6_ppgtt_alloc(ppgtt);
1811 if (ret)
1812 return ret;
1813
1814 ppgtt->base.total = I915_PDES * GEN6_PTES * PAGE_SIZE;
1815
1816 gen6_scratch_va_range(ppgtt, 0, ppgtt->base.total);
1817 gen6_write_page_range(ppgtt, 0, ppgtt->base.total);
1818
1819 ret = gen6_alloc_va_range(&ppgtt->base, 0, ppgtt->base.total);
1820 if (ret) {
1821 gen6_ppgtt_cleanup(&ppgtt->base);
1822 return ret;
1823 }
1824
1825 ppgtt->base.clear_range = gen6_ppgtt_clear_range;
1826 ppgtt->base.insert_entries = gen6_ppgtt_insert_entries;
1827 ppgtt->base.unbind_vma = ppgtt_unbind_vma;
1828 ppgtt->base.bind_vma = ppgtt_bind_vma;
1829 ppgtt->base.cleanup = gen6_ppgtt_cleanup;
1830 ppgtt->debug_dump = gen6_dump_ppgtt;
1831
1832 DRM_DEBUG_DRIVER("Allocated pde space (%lldM) at GTT entry: %llx\n",
1833 ppgtt->node.size >> 20,
1834 ppgtt->node.start / PAGE_SIZE);
1835
1836 DRM_DEBUG_DRIVER("Adding PPGTT at offset %x\n",
1837 ppgtt->pd.base.ggtt_offset << 10);
1838
1839 return 0;
1840}
1841
1842static int __hw_ppgtt_init(struct i915_hw_ppgtt *ppgtt,
1843 struct drm_i915_private *dev_priv)
1844{
1845 ppgtt->base.i915 = dev_priv;
1846 ppgtt->base.dma = &dev_priv->drm.pdev->dev;
1847
1848 if (INTEL_INFO(dev_priv)->gen < 8)
1849 return gen6_ppgtt_init(ppgtt);
1850 else
1851 return gen8_ppgtt_init(ppgtt);
1852}
1853
1854static void i915_address_space_init(struct i915_address_space *vm,
1855 struct drm_i915_private *dev_priv,
1856 const char *name)
1857{
1858 i915_gem_timeline_init(dev_priv, &vm->timeline, name);
1859
1860 drm_mm_init(&vm->mm, 0, vm->total);
1861 vm->mm.head_node.color = I915_COLOR_UNEVICTABLE;
1862
1863 INIT_LIST_HEAD(&vm->active_list);
1864 INIT_LIST_HEAD(&vm->inactive_list);
1865 INIT_LIST_HEAD(&vm->unbound_list);
1866
1867 list_add_tail(&vm->global_link, &dev_priv->vm_list);
1868 pagevec_init(&vm->free_pages, false);
1869}
1870
1871static void i915_address_space_fini(struct i915_address_space *vm)
1872{
1873 if (pagevec_count(&vm->free_pages))
1874 vm_free_pages_release(vm);
1875
1876 i915_gem_timeline_fini(&vm->timeline);
1877 drm_mm_takedown(&vm->mm);
1878 list_del(&vm->global_link);
1879}
1880
1881static void gtt_write_workarounds(struct drm_i915_private *dev_priv)
1882{
1883
1884
1885
1886
1887
1888 if (IS_BROADWELL(dev_priv))
1889 I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_BDW);
1890 else if (IS_CHERRYVIEW(dev_priv))
1891 I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_CHV);
1892 else if (IS_GEN9_BC(dev_priv))
1893 I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_SKL);
1894 else if (IS_GEN9_LP(dev_priv))
1895 I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_BXT);
1896}
1897
1898int i915_ppgtt_init_hw(struct drm_i915_private *dev_priv)
1899{
1900 gtt_write_workarounds(dev_priv);
1901
1902
1903
1904
1905 if (i915.enable_execlists)
1906 return 0;
1907
1908 if (!USES_PPGTT(dev_priv))
1909 return 0;
1910
1911 if (IS_GEN6(dev_priv))
1912 gen6_ppgtt_enable(dev_priv);
1913 else if (IS_GEN7(dev_priv))
1914 gen7_ppgtt_enable(dev_priv);
1915 else if (INTEL_GEN(dev_priv) >= 8)
1916 gen8_ppgtt_enable(dev_priv);
1917 else
1918 MISSING_CASE(INTEL_GEN(dev_priv));
1919
1920 return 0;
1921}
1922
1923struct i915_hw_ppgtt *
1924i915_ppgtt_create(struct drm_i915_private *dev_priv,
1925 struct drm_i915_file_private *fpriv,
1926 const char *name)
1927{
1928 struct i915_hw_ppgtt *ppgtt;
1929 int ret;
1930
1931 ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
1932 if (!ppgtt)
1933 return ERR_PTR(-ENOMEM);
1934
1935 ret = __hw_ppgtt_init(ppgtt, dev_priv);
1936 if (ret) {
1937 kfree(ppgtt);
1938 return ERR_PTR(ret);
1939 }
1940
1941 kref_init(&ppgtt->ref);
1942 i915_address_space_init(&ppgtt->base, dev_priv, name);
1943 ppgtt->base.file = fpriv;
1944
1945 trace_i915_ppgtt_create(&ppgtt->base);
1946
1947 return ppgtt;
1948}
1949
1950void i915_ppgtt_close(struct i915_address_space *vm)
1951{
1952 struct list_head *phases[] = {
1953 &vm->active_list,
1954 &vm->inactive_list,
1955 &vm->unbound_list,
1956 NULL,
1957 }, **phase;
1958
1959 GEM_BUG_ON(vm->closed);
1960 vm->closed = true;
1961
1962 for (phase = phases; *phase; phase++) {
1963 struct i915_vma *vma, *vn;
1964
1965 list_for_each_entry_safe(vma, vn, *phase, vm_link)
1966 if (!i915_vma_is_closed(vma))
1967 i915_vma_close(vma);
1968 }
1969}
1970
1971void i915_ppgtt_release(struct kref *kref)
1972{
1973 struct i915_hw_ppgtt *ppgtt =
1974 container_of(kref, struct i915_hw_ppgtt, ref);
1975
1976 trace_i915_ppgtt_release(&ppgtt->base);
1977
1978
1979 WARN_ON(!list_empty(&ppgtt->base.active_list));
1980 WARN_ON(!list_empty(&ppgtt->base.inactive_list));
1981 WARN_ON(!list_empty(&ppgtt->base.unbound_list));
1982
1983 ppgtt->base.cleanup(&ppgtt->base);
1984 i915_address_space_fini(&ppgtt->base);
1985 kfree(ppgtt);
1986}
1987
1988
1989
1990
1991static bool needs_idle_maps(struct drm_i915_private *dev_priv)
1992{
1993
1994
1995
1996 return IS_GEN5(dev_priv) && IS_MOBILE(dev_priv) && intel_vtd_active();
1997}
1998
1999void i915_check_and_clear_faults(struct drm_i915_private *dev_priv)
2000{
2001 struct intel_engine_cs *engine;
2002 enum intel_engine_id id;
2003
2004 if (INTEL_INFO(dev_priv)->gen < 6)
2005 return;
2006
2007 for_each_engine(engine, dev_priv, id) {
2008 u32 fault_reg;
2009 fault_reg = I915_READ(RING_FAULT_REG(engine));
2010 if (fault_reg & RING_FAULT_VALID) {
2011 DRM_DEBUG_DRIVER("Unexpected fault\n"
2012 "\tAddr: 0x%08lx\n"
2013 "\tAddress space: %s\n"
2014 "\tSource ID: %d\n"
2015 "\tType: %d\n",
2016 fault_reg & PAGE_MASK,
2017 fault_reg & RING_FAULT_GTTSEL_MASK ? "GGTT" : "PPGTT",
2018 RING_FAULT_SRCID(fault_reg),
2019 RING_FAULT_FAULT_TYPE(fault_reg));
2020 I915_WRITE(RING_FAULT_REG(engine),
2021 fault_reg & ~RING_FAULT_VALID);
2022 }
2023 }
2024
2025
2026 if (dev_priv->engine[RCS])
2027 POSTING_READ(RING_FAULT_REG(dev_priv->engine[RCS]));
2028}
2029
2030void i915_gem_suspend_gtt_mappings(struct drm_i915_private *dev_priv)
2031{
2032 struct i915_ggtt *ggtt = &dev_priv->ggtt;
2033
2034
2035
2036
2037 if (INTEL_GEN(dev_priv) < 6)
2038 return;
2039
2040 i915_check_and_clear_faults(dev_priv);
2041
2042 ggtt->base.clear_range(&ggtt->base, 0, ggtt->base.total);
2043
2044 i915_ggtt_invalidate(dev_priv);
2045}
2046
2047int i915_gem_gtt_prepare_pages(struct drm_i915_gem_object *obj,
2048 struct sg_table *pages)
2049{
2050 do {
2051 if (dma_map_sg(&obj->base.dev->pdev->dev,
2052 pages->sgl, pages->nents,
2053 PCI_DMA_BIDIRECTIONAL))
2054 return 0;
2055
2056
2057
2058
2059
2060
2061
2062 GEM_BUG_ON(obj->mm.pages == pages);
2063 } while (i915_gem_shrink(to_i915(obj->base.dev),
2064 obj->base.size >> PAGE_SHIFT,
2065 I915_SHRINK_BOUND |
2066 I915_SHRINK_UNBOUND |
2067 I915_SHRINK_ACTIVE));
2068
2069 return -ENOSPC;
2070}
2071
2072static void gen8_set_pte(void __iomem *addr, gen8_pte_t pte)
2073{
2074 writeq(pte, addr);
2075}
2076
2077static void gen8_ggtt_insert_page(struct i915_address_space *vm,
2078 dma_addr_t addr,
2079 u64 offset,
2080 enum i915_cache_level level,
2081 u32 unused)
2082{
2083 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
2084 gen8_pte_t __iomem *pte =
2085 (gen8_pte_t __iomem *)ggtt->gsm + (offset >> PAGE_SHIFT);
2086
2087 gen8_set_pte(pte, gen8_pte_encode(addr, level));
2088
2089 ggtt->invalidate(vm->i915);
2090}
2091
2092static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
2093 struct sg_table *st,
2094 u64 start,
2095 enum i915_cache_level level,
2096 u32 unused)
2097{
2098 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
2099 struct sgt_iter sgt_iter;
2100 gen8_pte_t __iomem *gtt_entries;
2101 const gen8_pte_t pte_encode = gen8_pte_encode(0, level);
2102 dma_addr_t addr;
2103
2104 gtt_entries = (gen8_pte_t __iomem *)ggtt->gsm;
2105 gtt_entries += start >> PAGE_SHIFT;
2106 for_each_sgt_dma(addr, sgt_iter, st)
2107 gen8_set_pte(gtt_entries++, pte_encode | addr);
2108
2109 wmb();
2110
2111
2112
2113
2114
2115 ggtt->invalidate(vm->i915);
2116}
2117
2118static void gen6_ggtt_insert_page(struct i915_address_space *vm,
2119 dma_addr_t addr,
2120 u64 offset,
2121 enum i915_cache_level level,
2122 u32 flags)
2123{
2124 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
2125 gen6_pte_t __iomem *pte =
2126 (gen6_pte_t __iomem *)ggtt->gsm + (offset >> PAGE_SHIFT);
2127
2128 iowrite32(vm->pte_encode(addr, level, flags), pte);
2129
2130 ggtt->invalidate(vm->i915);
2131}
2132
2133
2134
2135
2136
2137
2138
2139static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
2140 struct sg_table *st,
2141 u64 start,
2142 enum i915_cache_level level,
2143 u32 flags)
2144{
2145 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
2146 gen6_pte_t __iomem *entries = (gen6_pte_t __iomem *)ggtt->gsm;
2147 unsigned int i = start >> PAGE_SHIFT;
2148 struct sgt_iter iter;
2149 dma_addr_t addr;
2150 for_each_sgt_dma(addr, iter, st)
2151 iowrite32(vm->pte_encode(addr, level, flags), &entries[i++]);
2152 wmb();
2153
2154
2155
2156
2157
2158 ggtt->invalidate(vm->i915);
2159}
2160
2161static void nop_clear_range(struct i915_address_space *vm,
2162 u64 start, u64 length)
2163{
2164}
2165
2166static void gen8_ggtt_clear_range(struct i915_address_space *vm,
2167 u64 start, u64 length)
2168{
2169 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
2170 unsigned first_entry = start >> PAGE_SHIFT;
2171 unsigned num_entries = length >> PAGE_SHIFT;
2172 const gen8_pte_t scratch_pte =
2173 gen8_pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC);
2174 gen8_pte_t __iomem *gtt_base =
2175 (gen8_pte_t __iomem *)ggtt->gsm + first_entry;
2176 const int max_entries = ggtt_total_entries(ggtt) - first_entry;
2177 int i;
2178
2179 if (WARN(num_entries > max_entries,
2180 "First entry = %d; Num entries = %d (max=%d)\n",
2181 first_entry, num_entries, max_entries))
2182 num_entries = max_entries;
2183
2184 for (i = 0; i < num_entries; i++)
2185 gen8_set_pte(>t_base[i], scratch_pte);
2186}
2187
2188static void bxt_vtd_ggtt_wa(struct i915_address_space *vm)
2189{
2190 struct drm_i915_private *dev_priv = vm->i915;
2191
2192
2193
2194
2195
2196
2197
2198
2199 POSTING_READ(GFX_FLSH_CNTL_GEN6);
2200}
2201
2202struct insert_page {
2203 struct i915_address_space *vm;
2204 dma_addr_t addr;
2205 u64 offset;
2206 enum i915_cache_level level;
2207};
2208
2209static int bxt_vtd_ggtt_insert_page__cb(void *_arg)
2210{
2211 struct insert_page *arg = _arg;
2212
2213 gen8_ggtt_insert_page(arg->vm, arg->addr, arg->offset, arg->level, 0);
2214 bxt_vtd_ggtt_wa(arg->vm);
2215
2216 return 0;
2217}
2218
2219static void bxt_vtd_ggtt_insert_page__BKL(struct i915_address_space *vm,
2220 dma_addr_t addr,
2221 u64 offset,
2222 enum i915_cache_level level,
2223 u32 unused)
2224{
2225 struct insert_page arg = { vm, addr, offset, level };
2226
2227 stop_machine(bxt_vtd_ggtt_insert_page__cb, &arg, NULL);
2228}
2229
2230struct insert_entries {
2231 struct i915_address_space *vm;
2232 struct sg_table *st;
2233 u64 start;
2234 enum i915_cache_level level;
2235};
2236
2237static int bxt_vtd_ggtt_insert_entries__cb(void *_arg)
2238{
2239 struct insert_entries *arg = _arg;
2240
2241 gen8_ggtt_insert_entries(arg->vm, arg->st, arg->start, arg->level, 0);
2242 bxt_vtd_ggtt_wa(arg->vm);
2243
2244 return 0;
2245}
2246
2247static void bxt_vtd_ggtt_insert_entries__BKL(struct i915_address_space *vm,
2248 struct sg_table *st,
2249 u64 start,
2250 enum i915_cache_level level,
2251 u32 unused)
2252{
2253 struct insert_entries arg = { vm, st, start, level };
2254
2255 stop_machine(bxt_vtd_ggtt_insert_entries__cb, &arg, NULL);
2256}
2257
2258struct clear_range {
2259 struct i915_address_space *vm;
2260 u64 start;
2261 u64 length;
2262};
2263
2264static int bxt_vtd_ggtt_clear_range__cb(void *_arg)
2265{
2266 struct clear_range *arg = _arg;
2267
2268 gen8_ggtt_clear_range(arg->vm, arg->start, arg->length);
2269 bxt_vtd_ggtt_wa(arg->vm);
2270
2271 return 0;
2272}
2273
2274static void bxt_vtd_ggtt_clear_range__BKL(struct i915_address_space *vm,
2275 u64 start,
2276 u64 length)
2277{
2278 struct clear_range arg = { vm, start, length };
2279
2280 stop_machine(bxt_vtd_ggtt_clear_range__cb, &arg, NULL);
2281}
2282
2283static void gen6_ggtt_clear_range(struct i915_address_space *vm,
2284 u64 start, u64 length)
2285{
2286 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
2287 unsigned first_entry = start >> PAGE_SHIFT;
2288 unsigned num_entries = length >> PAGE_SHIFT;
2289 gen6_pte_t scratch_pte, __iomem *gtt_base =
2290 (gen6_pte_t __iomem *)ggtt->gsm + first_entry;
2291 const int max_entries = ggtt_total_entries(ggtt) - first_entry;
2292 int i;
2293
2294 if (WARN(num_entries > max_entries,
2295 "First entry = %d; Num entries = %d (max=%d)\n",
2296 first_entry, num_entries, max_entries))
2297 num_entries = max_entries;
2298
2299 scratch_pte = vm->pte_encode(vm->scratch_page.daddr,
2300 I915_CACHE_LLC, 0);
2301
2302 for (i = 0; i < num_entries; i++)
2303 iowrite32(scratch_pte, >t_base[i]);
2304}
2305
2306static void i915_ggtt_insert_page(struct i915_address_space *vm,
2307 dma_addr_t addr,
2308 u64 offset,
2309 enum i915_cache_level cache_level,
2310 u32 unused)
2311{
2312 unsigned int flags = (cache_level == I915_CACHE_NONE) ?
2313 AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
2314
2315 intel_gtt_insert_page(addr, offset >> PAGE_SHIFT, flags);
2316}
2317
2318static void i915_ggtt_insert_entries(struct i915_address_space *vm,
2319 struct sg_table *pages,
2320 u64 start,
2321 enum i915_cache_level cache_level,
2322 u32 unused)
2323{
2324 unsigned int flags = (cache_level == I915_CACHE_NONE) ?
2325 AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
2326
2327 intel_gtt_insert_sg_entries(pages, start >> PAGE_SHIFT, flags);
2328}
2329
2330static void i915_ggtt_clear_range(struct i915_address_space *vm,
2331 u64 start, u64 length)
2332{
2333 intel_gtt_clear_range(start >> PAGE_SHIFT, length >> PAGE_SHIFT);
2334}
2335
2336static int ggtt_bind_vma(struct i915_vma *vma,
2337 enum i915_cache_level cache_level,
2338 u32 flags)
2339{
2340 struct drm_i915_private *i915 = vma->vm->i915;
2341 struct drm_i915_gem_object *obj = vma->obj;
2342 u32 pte_flags;
2343
2344 if (unlikely(!vma->pages)) {
2345 int ret = i915_get_ggtt_vma_pages(vma);
2346 if (ret)
2347 return ret;
2348 }
2349
2350
2351 pte_flags = 0;
2352 if (obj->gt_ro)
2353 pte_flags |= PTE_READ_ONLY;
2354
2355 intel_runtime_pm_get(i915);
2356 vma->vm->insert_entries(vma->vm, vma->pages, vma->node.start,
2357 cache_level, pte_flags);
2358 intel_runtime_pm_put(i915);
2359
2360
2361
2362
2363
2364
2365 vma->flags |= I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND;
2366
2367 return 0;
2368}
2369
2370static void ggtt_unbind_vma(struct i915_vma *vma)
2371{
2372 struct drm_i915_private *i915 = vma->vm->i915;
2373
2374 intel_runtime_pm_get(i915);
2375 vma->vm->clear_range(vma->vm, vma->node.start, vma->size);
2376 intel_runtime_pm_put(i915);
2377}
2378
2379static int aliasing_gtt_bind_vma(struct i915_vma *vma,
2380 enum i915_cache_level cache_level,
2381 u32 flags)
2382{
2383 struct drm_i915_private *i915 = vma->vm->i915;
2384 u32 pte_flags;
2385 int ret;
2386
2387 if (unlikely(!vma->pages)) {
2388 ret = i915_get_ggtt_vma_pages(vma);
2389 if (ret)
2390 return ret;
2391 }
2392
2393
2394 pte_flags = 0;
2395 if (vma->obj->gt_ro)
2396 pte_flags |= PTE_READ_ONLY;
2397
2398 if (flags & I915_VMA_LOCAL_BIND) {
2399 struct i915_hw_ppgtt *appgtt = i915->mm.aliasing_ppgtt;
2400
2401 if (!(vma->flags & I915_VMA_LOCAL_BIND) &&
2402 appgtt->base.allocate_va_range) {
2403 ret = appgtt->base.allocate_va_range(&appgtt->base,
2404 vma->node.start,
2405 vma->size);
2406 if (ret)
2407 goto err_pages;
2408 }
2409
2410 appgtt->base.insert_entries(&appgtt->base,
2411 vma->pages, vma->node.start,
2412 cache_level, pte_flags);
2413 }
2414
2415 if (flags & I915_VMA_GLOBAL_BIND) {
2416 intel_runtime_pm_get(i915);
2417 vma->vm->insert_entries(vma->vm,
2418 vma->pages, vma->node.start,
2419 cache_level, pte_flags);
2420 intel_runtime_pm_put(i915);
2421 }
2422
2423 return 0;
2424
2425err_pages:
2426 if (!(vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND))) {
2427 if (vma->pages != vma->obj->mm.pages) {
2428 GEM_BUG_ON(!vma->pages);
2429 sg_free_table(vma->pages);
2430 kfree(vma->pages);
2431 }
2432 vma->pages = NULL;
2433 }
2434 return ret;
2435}
2436
2437static void aliasing_gtt_unbind_vma(struct i915_vma *vma)
2438{
2439 struct drm_i915_private *i915 = vma->vm->i915;
2440
2441 if (vma->flags & I915_VMA_GLOBAL_BIND) {
2442 intel_runtime_pm_get(i915);
2443 vma->vm->clear_range(vma->vm, vma->node.start, vma->size);
2444 intel_runtime_pm_put(i915);
2445 }
2446
2447 if (vma->flags & I915_VMA_LOCAL_BIND) {
2448 struct i915_address_space *vm = &i915->mm.aliasing_ppgtt->base;
2449
2450 vm->clear_range(vm, vma->node.start, vma->size);
2451 }
2452}
2453
2454void i915_gem_gtt_finish_pages(struct drm_i915_gem_object *obj,
2455 struct sg_table *pages)
2456{
2457 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
2458 struct device *kdev = &dev_priv->drm.pdev->dev;
2459 struct i915_ggtt *ggtt = &dev_priv->ggtt;
2460
2461 if (unlikely(ggtt->do_idle_maps)) {
2462 if (i915_gem_wait_for_idle(dev_priv, 0)) {
2463 DRM_ERROR("Failed to wait for idle; VT'd may hang.\n");
2464
2465 udelay(10);
2466 }
2467 }
2468
2469 dma_unmap_sg(kdev, pages->sgl, pages->nents, PCI_DMA_BIDIRECTIONAL);
2470}
2471
2472static void i915_gtt_color_adjust(const struct drm_mm_node *node,
2473 unsigned long color,
2474 u64 *start,
2475 u64 *end)
2476{
2477 if (node->allocated && node->color != color)
2478 *start += I915_GTT_PAGE_SIZE;
2479
2480
2481
2482
2483
2484
2485 node = list_next_entry(node, node_list);
2486 if (node->color != color)
2487 *end -= I915_GTT_PAGE_SIZE;
2488}
2489
2490int i915_gem_init_aliasing_ppgtt(struct drm_i915_private *i915)
2491{
2492 struct i915_ggtt *ggtt = &i915->ggtt;
2493 struct i915_hw_ppgtt *ppgtt;
2494 int err;
2495
2496 ppgtt = i915_ppgtt_create(i915, ERR_PTR(-EPERM), "[alias]");
2497 if (IS_ERR(ppgtt))
2498 return PTR_ERR(ppgtt);
2499
2500 if (WARN_ON(ppgtt->base.total < ggtt->base.total)) {
2501 err = -ENODEV;
2502 goto err_ppgtt;
2503 }
2504
2505 if (ppgtt->base.allocate_va_range) {
2506
2507
2508
2509
2510
2511 err = ppgtt->base.allocate_va_range(&ppgtt->base,
2512 0, ggtt->base.total);
2513 if (err)
2514 goto err_ppgtt;
2515 }
2516
2517 i915->mm.aliasing_ppgtt = ppgtt;
2518
2519 WARN_ON(ggtt->base.bind_vma != ggtt_bind_vma);
2520 ggtt->base.bind_vma = aliasing_gtt_bind_vma;
2521
2522 WARN_ON(ggtt->base.unbind_vma != ggtt_unbind_vma);
2523 ggtt->base.unbind_vma = aliasing_gtt_unbind_vma;
2524
2525 return 0;
2526
2527err_ppgtt:
2528 i915_ppgtt_put(ppgtt);
2529 return err;
2530}
2531
2532void i915_gem_fini_aliasing_ppgtt(struct drm_i915_private *i915)
2533{
2534 struct i915_ggtt *ggtt = &i915->ggtt;
2535 struct i915_hw_ppgtt *ppgtt;
2536
2537 ppgtt = fetch_and_zero(&i915->mm.aliasing_ppgtt);
2538 if (!ppgtt)
2539 return;
2540
2541 i915_ppgtt_put(ppgtt);
2542
2543 ggtt->base.bind_vma = ggtt_bind_vma;
2544 ggtt->base.unbind_vma = ggtt_unbind_vma;
2545}
2546
2547int i915_gem_init_ggtt(struct drm_i915_private *dev_priv)
2548{
2549
2550
2551
2552
2553
2554
2555
2556
2557
2558 struct i915_ggtt *ggtt = &dev_priv->ggtt;
2559 unsigned long hole_start, hole_end;
2560 struct drm_mm_node *entry;
2561 int ret;
2562
2563 ret = intel_vgt_balloon(dev_priv);
2564 if (ret)
2565 return ret;
2566
2567
2568 ret = drm_mm_insert_node_in_range(&ggtt->base.mm, &ggtt->error_capture,
2569 PAGE_SIZE, 0, I915_COLOR_UNEVICTABLE,
2570 0, ggtt->mappable_end,
2571 DRM_MM_INSERT_LOW);
2572 if (ret)
2573 return ret;
2574
2575
2576 drm_mm_for_each_hole(entry, &ggtt->base.mm, hole_start, hole_end) {
2577 DRM_DEBUG_KMS("clearing unused GTT space: [%lx, %lx]\n",
2578 hole_start, hole_end);
2579 ggtt->base.clear_range(&ggtt->base, hole_start,
2580 hole_end - hole_start);
2581 }
2582
2583
2584 ggtt->base.clear_range(&ggtt->base,
2585 ggtt->base.total - PAGE_SIZE, PAGE_SIZE);
2586
2587 if (USES_PPGTT(dev_priv) && !USES_FULL_PPGTT(dev_priv)) {
2588 ret = i915_gem_init_aliasing_ppgtt(dev_priv);
2589 if (ret)
2590 goto err;
2591 }
2592
2593 return 0;
2594
2595err:
2596 drm_mm_remove_node(&ggtt->error_capture);
2597 return ret;
2598}
2599
2600
2601
2602
2603
2604void i915_ggtt_cleanup_hw(struct drm_i915_private *dev_priv)
2605{
2606 struct i915_ggtt *ggtt = &dev_priv->ggtt;
2607 struct i915_vma *vma, *vn;
2608
2609 ggtt->base.closed = true;
2610
2611 mutex_lock(&dev_priv->drm.struct_mutex);
2612 WARN_ON(!list_empty(&ggtt->base.active_list));
2613 list_for_each_entry_safe(vma, vn, &ggtt->base.inactive_list, vm_link)
2614 WARN_ON(i915_vma_unbind(vma));
2615 mutex_unlock(&dev_priv->drm.struct_mutex);
2616
2617 i915_gem_cleanup_stolen(&dev_priv->drm);
2618
2619 mutex_lock(&dev_priv->drm.struct_mutex);
2620 i915_gem_fini_aliasing_ppgtt(dev_priv);
2621
2622 if (drm_mm_node_allocated(&ggtt->error_capture))
2623 drm_mm_remove_node(&ggtt->error_capture);
2624
2625 if (drm_mm_initialized(&ggtt->base.mm)) {
2626 intel_vgt_deballoon(dev_priv);
2627 i915_address_space_fini(&ggtt->base);
2628 }
2629
2630 ggtt->base.cleanup(&ggtt->base);
2631 mutex_unlock(&dev_priv->drm.struct_mutex);
2632
2633 arch_phys_wc_del(ggtt->mtrr);
2634 io_mapping_fini(&ggtt->mappable);
2635}
2636
2637static unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl)
2638{
2639 snb_gmch_ctl >>= SNB_GMCH_GGMS_SHIFT;
2640 snb_gmch_ctl &= SNB_GMCH_GGMS_MASK;
2641 return snb_gmch_ctl << 20;
2642}
2643
2644static unsigned int gen8_get_total_gtt_size(u16 bdw_gmch_ctl)
2645{
2646 bdw_gmch_ctl >>= BDW_GMCH_GGMS_SHIFT;
2647 bdw_gmch_ctl &= BDW_GMCH_GGMS_MASK;
2648 if (bdw_gmch_ctl)
2649 bdw_gmch_ctl = 1 << bdw_gmch_ctl;
2650
2651#ifdef CONFIG_X86_32
2652
2653 if (bdw_gmch_ctl > 4)
2654 bdw_gmch_ctl = 4;
2655#endif
2656
2657 return bdw_gmch_ctl << 20;
2658}
2659
2660static unsigned int chv_get_total_gtt_size(u16 gmch_ctrl)
2661{
2662 gmch_ctrl >>= SNB_GMCH_GGMS_SHIFT;
2663 gmch_ctrl &= SNB_GMCH_GGMS_MASK;
2664
2665 if (gmch_ctrl)
2666 return 1 << (20 + gmch_ctrl);
2667
2668 return 0;
2669}
2670
2671static size_t gen6_get_stolen_size(u16 snb_gmch_ctl)
2672{
2673 snb_gmch_ctl >>= SNB_GMCH_GMS_SHIFT;
2674 snb_gmch_ctl &= SNB_GMCH_GMS_MASK;
2675 return (size_t)snb_gmch_ctl << 25;
2676}
2677
2678static size_t gen8_get_stolen_size(u16 bdw_gmch_ctl)
2679{
2680 bdw_gmch_ctl >>= BDW_GMCH_GMS_SHIFT;
2681 bdw_gmch_ctl &= BDW_GMCH_GMS_MASK;
2682 return (size_t)bdw_gmch_ctl << 25;
2683}
2684
2685static size_t chv_get_stolen_size(u16 gmch_ctrl)
2686{
2687 gmch_ctrl >>= SNB_GMCH_GMS_SHIFT;
2688 gmch_ctrl &= SNB_GMCH_GMS_MASK;
2689
2690
2691
2692
2693
2694
2695 if (gmch_ctrl < 0x11)
2696 return (size_t)gmch_ctrl << 25;
2697 else if (gmch_ctrl < 0x17)
2698 return (size_t)(gmch_ctrl - 0x11 + 2) << 22;
2699 else
2700 return (size_t)(gmch_ctrl - 0x17 + 9) << 22;
2701}
2702
2703static size_t gen9_get_stolen_size(u16 gen9_gmch_ctl)
2704{
2705 gen9_gmch_ctl >>= BDW_GMCH_GMS_SHIFT;
2706 gen9_gmch_ctl &= BDW_GMCH_GMS_MASK;
2707
2708 if (gen9_gmch_ctl < 0xf0)
2709 return (size_t)gen9_gmch_ctl << 25;
2710 else
2711
2712 return (size_t)(gen9_gmch_ctl - 0xf0 + 1) << 22;
2713}
2714
2715static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size)
2716{
2717 struct drm_i915_private *dev_priv = ggtt->base.i915;
2718 struct pci_dev *pdev = dev_priv->drm.pdev;
2719 phys_addr_t phys_addr;
2720 int ret;
2721
2722
2723 phys_addr = pci_resource_start(pdev, 0) + pci_resource_len(pdev, 0) / 2;
2724
2725
2726
2727
2728
2729
2730
2731
2732 if (IS_GEN9_LP(dev_priv))
2733 ggtt->gsm = ioremap_nocache(phys_addr, size);
2734 else
2735 ggtt->gsm = ioremap_wc(phys_addr, size);
2736 if (!ggtt->gsm) {
2737 DRM_ERROR("Failed to map the ggtt page table\n");
2738 return -ENOMEM;
2739 }
2740
2741 ret = setup_scratch_page(&ggtt->base, GFP_DMA32);
2742 if (ret) {
2743 DRM_ERROR("Scratch setup failed\n");
2744
2745 iounmap(ggtt->gsm);
2746 return ret;
2747 }
2748
2749 return 0;
2750}
2751
2752
2753
2754
2755static void bdw_setup_private_ppat(struct drm_i915_private *dev_priv)
2756{
2757 u64 pat;
2758
2759 pat = GEN8_PPAT(0, GEN8_PPAT_WB | GEN8_PPAT_LLC) |
2760 GEN8_PPAT(1, GEN8_PPAT_WC | GEN8_PPAT_LLCELLC) |
2761 GEN8_PPAT(2, GEN8_PPAT_WT | GEN8_PPAT_LLCELLC) |
2762 GEN8_PPAT(3, GEN8_PPAT_UC) |
2763 GEN8_PPAT(4, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(0)) |
2764 GEN8_PPAT(5, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(1)) |
2765 GEN8_PPAT(6, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(2)) |
2766 GEN8_PPAT(7, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3));
2767
2768 if (!USES_PPGTT(dev_priv))
2769
2770
2771
2772
2773
2774
2775
2776
2777
2778
2779
2780
2781
2782 pat = GEN8_PPAT(0, GEN8_PPAT_UC);
2783
2784
2785
2786 I915_WRITE(GEN8_PRIVATE_PAT_LO, pat);
2787 I915_WRITE(GEN8_PRIVATE_PAT_HI, pat >> 32);
2788}
2789
2790static void chv_setup_private_ppat(struct drm_i915_private *dev_priv)
2791{
2792 u64 pat;
2793
2794
2795
2796
2797
2798
2799
2800
2801
2802
2803
2804
2805
2806
2807
2808
2809
2810
2811
2812 pat = GEN8_PPAT(0, CHV_PPAT_SNOOP) |
2813 GEN8_PPAT(1, 0) |
2814 GEN8_PPAT(2, 0) |
2815 GEN8_PPAT(3, 0) |
2816 GEN8_PPAT(4, CHV_PPAT_SNOOP) |
2817 GEN8_PPAT(5, CHV_PPAT_SNOOP) |
2818 GEN8_PPAT(6, CHV_PPAT_SNOOP) |
2819 GEN8_PPAT(7, CHV_PPAT_SNOOP);
2820
2821 I915_WRITE(GEN8_PRIVATE_PAT_LO, pat);
2822 I915_WRITE(GEN8_PRIVATE_PAT_HI, pat >> 32);
2823}
2824
2825static void gen6_gmch_remove(struct i915_address_space *vm)
2826{
2827 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
2828
2829 iounmap(ggtt->gsm);
2830 cleanup_scratch_page(vm);
2831}
2832
2833static int gen8_gmch_probe(struct i915_ggtt *ggtt)
2834{
2835 struct drm_i915_private *dev_priv = ggtt->base.i915;
2836 struct pci_dev *pdev = dev_priv->drm.pdev;
2837 unsigned int size;
2838 u16 snb_gmch_ctl;
2839 int err;
2840
2841
2842 ggtt->mappable_base = pci_resource_start(pdev, 2);
2843 ggtt->mappable_end = pci_resource_len(pdev, 2);
2844
2845 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(39));
2846 if (!err)
2847 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(39));
2848 if (err)
2849 DRM_ERROR("Can't set DMA mask/consistent mask (%d)\n", err);
2850
2851 pci_read_config_word(pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
2852
2853 if (INTEL_GEN(dev_priv) >= 9) {
2854 ggtt->stolen_size = gen9_get_stolen_size(snb_gmch_ctl);
2855 size = gen8_get_total_gtt_size(snb_gmch_ctl);
2856 } else if (IS_CHERRYVIEW(dev_priv)) {
2857 ggtt->stolen_size = chv_get_stolen_size(snb_gmch_ctl);
2858 size = chv_get_total_gtt_size(snb_gmch_ctl);
2859 } else {
2860 ggtt->stolen_size = gen8_get_stolen_size(snb_gmch_ctl);
2861 size = gen8_get_total_gtt_size(snb_gmch_ctl);
2862 }
2863
2864 ggtt->base.total = (size / sizeof(gen8_pte_t)) << PAGE_SHIFT;
2865
2866 if (IS_CHERRYVIEW(dev_priv) || IS_GEN9_LP(dev_priv))
2867 chv_setup_private_ppat(dev_priv);
2868 else
2869 bdw_setup_private_ppat(dev_priv);
2870
2871 ggtt->base.cleanup = gen6_gmch_remove;
2872 ggtt->base.bind_vma = ggtt_bind_vma;
2873 ggtt->base.unbind_vma = ggtt_unbind_vma;
2874 ggtt->base.insert_page = gen8_ggtt_insert_page;
2875 ggtt->base.clear_range = nop_clear_range;
2876 if (!USES_FULL_PPGTT(dev_priv) || intel_scanout_needs_vtd_wa(dev_priv))
2877 ggtt->base.clear_range = gen8_ggtt_clear_range;
2878
2879 ggtt->base.insert_entries = gen8_ggtt_insert_entries;
2880
2881
2882 if (intel_ggtt_update_needs_vtd_wa(dev_priv)) {
2883 ggtt->base.insert_entries = bxt_vtd_ggtt_insert_entries__BKL;
2884 ggtt->base.insert_page = bxt_vtd_ggtt_insert_page__BKL;
2885 if (ggtt->base.clear_range != nop_clear_range)
2886 ggtt->base.clear_range = bxt_vtd_ggtt_clear_range__BKL;
2887 }
2888
2889 ggtt->invalidate = gen6_ggtt_invalidate;
2890
2891 return ggtt_probe_common(ggtt, size);
2892}
2893
2894static int gen6_gmch_probe(struct i915_ggtt *ggtt)
2895{
2896 struct drm_i915_private *dev_priv = ggtt->base.i915;
2897 struct pci_dev *pdev = dev_priv->drm.pdev;
2898 unsigned int size;
2899 u16 snb_gmch_ctl;
2900 int err;
2901
2902 ggtt->mappable_base = pci_resource_start(pdev, 2);
2903 ggtt->mappable_end = pci_resource_len(pdev, 2);
2904
2905
2906
2907
2908 if (ggtt->mappable_end < (64<<20) || ggtt->mappable_end > (512<<20)) {
2909 DRM_ERROR("Unknown GMADR size (%llx)\n", ggtt->mappable_end);
2910 return -ENXIO;
2911 }
2912
2913 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(40));
2914 if (!err)
2915 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(40));
2916 if (err)
2917 DRM_ERROR("Can't set DMA mask/consistent mask (%d)\n", err);
2918 pci_read_config_word(pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
2919
2920 ggtt->stolen_size = gen6_get_stolen_size(snb_gmch_ctl);
2921
2922 size = gen6_get_total_gtt_size(snb_gmch_ctl);
2923 ggtt->base.total = (size / sizeof(gen6_pte_t)) << PAGE_SHIFT;
2924
2925 ggtt->base.clear_range = gen6_ggtt_clear_range;
2926 ggtt->base.insert_page = gen6_ggtt_insert_page;
2927 ggtt->base.insert_entries = gen6_ggtt_insert_entries;
2928 ggtt->base.bind_vma = ggtt_bind_vma;
2929 ggtt->base.unbind_vma = ggtt_unbind_vma;
2930 ggtt->base.cleanup = gen6_gmch_remove;
2931
2932 ggtt->invalidate = gen6_ggtt_invalidate;
2933
2934 if (HAS_EDRAM(dev_priv))
2935 ggtt->base.pte_encode = iris_pte_encode;
2936 else if (IS_HASWELL(dev_priv))
2937 ggtt->base.pte_encode = hsw_pte_encode;
2938 else if (IS_VALLEYVIEW(dev_priv))
2939 ggtt->base.pte_encode = byt_pte_encode;
2940 else if (INTEL_GEN(dev_priv) >= 7)
2941 ggtt->base.pte_encode = ivb_pte_encode;
2942 else
2943 ggtt->base.pte_encode = snb_pte_encode;
2944
2945 return ggtt_probe_common(ggtt, size);
2946}
2947
2948static void i915_gmch_remove(struct i915_address_space *vm)
2949{
2950 intel_gmch_remove();
2951}
2952
2953static int i915_gmch_probe(struct i915_ggtt *ggtt)
2954{
2955 struct drm_i915_private *dev_priv = ggtt->base.i915;
2956 int ret;
2957
2958 ret = intel_gmch_probe(dev_priv->bridge_dev, dev_priv->drm.pdev, NULL);
2959 if (!ret) {
2960 DRM_ERROR("failed to set up gmch\n");
2961 return -EIO;
2962 }
2963
2964 intel_gtt_get(&ggtt->base.total,
2965 &ggtt->stolen_size,
2966 &ggtt->mappable_base,
2967 &ggtt->mappable_end);
2968
2969 ggtt->do_idle_maps = needs_idle_maps(dev_priv);
2970 ggtt->base.insert_page = i915_ggtt_insert_page;
2971 ggtt->base.insert_entries = i915_ggtt_insert_entries;
2972 ggtt->base.clear_range = i915_ggtt_clear_range;
2973 ggtt->base.bind_vma = ggtt_bind_vma;
2974 ggtt->base.unbind_vma = ggtt_unbind_vma;
2975 ggtt->base.cleanup = i915_gmch_remove;
2976
2977 ggtt->invalidate = gmch_ggtt_invalidate;
2978
2979 if (unlikely(ggtt->do_idle_maps))
2980 DRM_INFO("applying Ironlake quirks for intel_iommu\n");
2981
2982 return 0;
2983}
2984
2985
2986
2987
2988
2989int i915_ggtt_probe_hw(struct drm_i915_private *dev_priv)
2990{
2991 struct i915_ggtt *ggtt = &dev_priv->ggtt;
2992 int ret;
2993
2994 ggtt->base.i915 = dev_priv;
2995 ggtt->base.dma = &dev_priv->drm.pdev->dev;
2996
2997 if (INTEL_GEN(dev_priv) <= 5)
2998 ret = i915_gmch_probe(ggtt);
2999 else if (INTEL_GEN(dev_priv) < 8)
3000 ret = gen6_gmch_probe(ggtt);
3001 else
3002 ret = gen8_gmch_probe(ggtt);
3003 if (ret)
3004 return ret;
3005
3006
3007
3008
3009
3010
3011 if (HAS_GUC(dev_priv) && i915.enable_guc_loading) {
3012 ggtt->base.total = min_t(u64, ggtt->base.total, GUC_GGTT_TOP);
3013 ggtt->mappable_end = min(ggtt->mappable_end, ggtt->base.total);
3014 }
3015
3016 if ((ggtt->base.total - 1) >> 32) {
3017 DRM_ERROR("We never expected a Global GTT with more than 32bits"
3018 " of address space! Found %lldM!\n",
3019 ggtt->base.total >> 20);
3020 ggtt->base.total = 1ULL << 32;
3021 ggtt->mappable_end = min(ggtt->mappable_end, ggtt->base.total);
3022 }
3023
3024 if (ggtt->mappable_end > ggtt->base.total) {
3025 DRM_ERROR("mappable aperture extends past end of GGTT,"
3026 " aperture=%llx, total=%llx\n",
3027 ggtt->mappable_end, ggtt->base.total);
3028 ggtt->mappable_end = ggtt->base.total;
3029 }
3030
3031
3032 DRM_INFO("Memory usable by graphics device = %lluM\n",
3033 ggtt->base.total >> 20);
3034 DRM_DEBUG_DRIVER("GMADR size = %lldM\n", ggtt->mappable_end >> 20);
3035 DRM_DEBUG_DRIVER("GTT stolen size = %uM\n", ggtt->stolen_size >> 20);
3036 if (intel_vtd_active())
3037 DRM_INFO("VT-d active for gfx access\n");
3038
3039 return 0;
3040}
3041
3042
3043
3044
3045
3046int i915_ggtt_init_hw(struct drm_i915_private *dev_priv)
3047{
3048 struct i915_ggtt *ggtt = &dev_priv->ggtt;
3049 int ret;
3050
3051 INIT_LIST_HEAD(&dev_priv->vm_list);
3052
3053
3054
3055
3056
3057
3058 mutex_lock(&dev_priv->drm.struct_mutex);
3059 i915_address_space_init(&ggtt->base, dev_priv, "[global]");
3060 if (!HAS_LLC(dev_priv) && !USES_PPGTT(dev_priv))
3061 ggtt->base.mm.color_adjust = i915_gtt_color_adjust;
3062 mutex_unlock(&dev_priv->drm.struct_mutex);
3063
3064 if (!io_mapping_init_wc(&dev_priv->ggtt.mappable,
3065 dev_priv->ggtt.mappable_base,
3066 dev_priv->ggtt.mappable_end)) {
3067 ret = -EIO;
3068 goto out_gtt_cleanup;
3069 }
3070
3071 ggtt->mtrr = arch_phys_wc_add(ggtt->mappable_base, ggtt->mappable_end);
3072
3073
3074
3075
3076
3077 ret = i915_gem_init_stolen(dev_priv);
3078 if (ret)
3079 goto out_gtt_cleanup;
3080
3081 return 0;
3082
3083out_gtt_cleanup:
3084 ggtt->base.cleanup(&ggtt->base);
3085 return ret;
3086}
3087
3088int i915_ggtt_enable_hw(struct drm_i915_private *dev_priv)
3089{
3090 if (INTEL_GEN(dev_priv) < 6 && !intel_enable_gtt())
3091 return -EIO;
3092
3093 return 0;
3094}
3095
3096void i915_ggtt_enable_guc(struct drm_i915_private *i915)
3097{
3098 GEM_BUG_ON(i915->ggtt.invalidate != gen6_ggtt_invalidate);
3099
3100 i915->ggtt.invalidate = guc_ggtt_invalidate;
3101}
3102
3103void i915_ggtt_disable_guc(struct drm_i915_private *i915)
3104{
3105
3106 GEM_BUG_ON(i915->ggtt.invalidate != guc_ggtt_invalidate);
3107
3108 i915->ggtt.invalidate = gen6_ggtt_invalidate;
3109}
3110
3111void i915_gem_restore_gtt_mappings(struct drm_i915_private *dev_priv)
3112{
3113 struct i915_ggtt *ggtt = &dev_priv->ggtt;
3114 struct drm_i915_gem_object *obj, *on;
3115
3116 i915_check_and_clear_faults(dev_priv);
3117
3118
3119 ggtt->base.clear_range(&ggtt->base, 0, ggtt->base.total);
3120
3121 ggtt->base.closed = true;
3122
3123
3124 list_for_each_entry_safe(obj, on,
3125 &dev_priv->mm.bound_list, global_link) {
3126 bool ggtt_bound = false;
3127 struct i915_vma *vma;
3128
3129 list_for_each_entry(vma, &obj->vma_list, obj_link) {
3130 if (vma->vm != &ggtt->base)
3131 continue;
3132
3133 if (!i915_vma_unbind(vma))
3134 continue;
3135
3136 WARN_ON(i915_vma_bind(vma, obj->cache_level,
3137 PIN_UPDATE));
3138 ggtt_bound = true;
3139 }
3140
3141 if (ggtt_bound)
3142 WARN_ON(i915_gem_object_set_to_gtt_domain(obj, false));
3143 }
3144
3145 ggtt->base.closed = false;
3146
3147 if (INTEL_GEN(dev_priv) >= 8) {
3148 if (IS_CHERRYVIEW(dev_priv) || IS_GEN9_LP(dev_priv))
3149 chv_setup_private_ppat(dev_priv);
3150 else
3151 bdw_setup_private_ppat(dev_priv);
3152
3153 return;
3154 }
3155
3156 if (USES_PPGTT(dev_priv)) {
3157 struct i915_address_space *vm;
3158
3159 list_for_each_entry(vm, &dev_priv->vm_list, global_link) {
3160 struct i915_hw_ppgtt *ppgtt;
3161
3162 if (i915_is_ggtt(vm))
3163 ppgtt = dev_priv->mm.aliasing_ppgtt;
3164 else
3165 ppgtt = i915_vm_to_ppgtt(vm);
3166
3167 gen6_write_page_range(ppgtt, 0, ppgtt->base.total);
3168 }
3169 }
3170
3171 i915_ggtt_invalidate(dev_priv);
3172}
3173
3174static struct scatterlist *
3175rotate_pages(const dma_addr_t *in, unsigned int offset,
3176 unsigned int width, unsigned int height,
3177 unsigned int stride,
3178 struct sg_table *st, struct scatterlist *sg)
3179{
3180 unsigned int column, row;
3181 unsigned int src_idx;
3182
3183 for (column = 0; column < width; column++) {
3184 src_idx = stride * (height - 1) + column;
3185 for (row = 0; row < height; row++) {
3186 st->nents++;
3187
3188
3189
3190
3191 sg_set_page(sg, NULL, PAGE_SIZE, 0);
3192 sg_dma_address(sg) = in[offset + src_idx];
3193 sg_dma_len(sg) = PAGE_SIZE;
3194 sg = sg_next(sg);
3195 src_idx -= stride;
3196 }
3197 }
3198
3199 return sg;
3200}
3201
3202static noinline struct sg_table *
3203intel_rotate_pages(struct intel_rotation_info *rot_info,
3204 struct drm_i915_gem_object *obj)
3205{
3206 const unsigned long n_pages = obj->base.size / PAGE_SIZE;
3207 unsigned int size = intel_rotation_info_size(rot_info);
3208 struct sgt_iter sgt_iter;
3209 dma_addr_t dma_addr;
3210 unsigned long i;
3211 dma_addr_t *page_addr_list;
3212 struct sg_table *st;
3213 struct scatterlist *sg;
3214 int ret = -ENOMEM;
3215
3216
3217 page_addr_list = kvmalloc_array(n_pages,
3218 sizeof(dma_addr_t),
3219 GFP_TEMPORARY);
3220 if (!page_addr_list)
3221 return ERR_PTR(ret);
3222
3223
3224 st = kmalloc(sizeof(*st), GFP_KERNEL);
3225 if (!st)
3226 goto err_st_alloc;
3227
3228 ret = sg_alloc_table(st, size, GFP_KERNEL);
3229 if (ret)
3230 goto err_sg_alloc;
3231
3232
3233 i = 0;
3234 for_each_sgt_dma(dma_addr, sgt_iter, obj->mm.pages)
3235 page_addr_list[i++] = dma_addr;
3236
3237 GEM_BUG_ON(i != n_pages);
3238 st->nents = 0;
3239 sg = st->sgl;
3240
3241 for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++) {
3242 sg = rotate_pages(page_addr_list, rot_info->plane[i].offset,
3243 rot_info->plane[i].width, rot_info->plane[i].height,
3244 rot_info->plane[i].stride, st, sg);
3245 }
3246
3247 DRM_DEBUG_KMS("Created rotated page mapping for object size %zu (%ux%u tiles, %u pages)\n",
3248 obj->base.size, rot_info->plane[0].width, rot_info->plane[0].height, size);
3249
3250 kvfree(page_addr_list);
3251
3252 return st;
3253
3254err_sg_alloc:
3255 kfree(st);
3256err_st_alloc:
3257 kvfree(page_addr_list);
3258
3259 DRM_DEBUG_KMS("Failed to create rotated mapping for object size %zu! (%ux%u tiles, %u pages)\n",
3260 obj->base.size, rot_info->plane[0].width, rot_info->plane[0].height, size);
3261
3262 return ERR_PTR(ret);
3263}
3264
3265static noinline struct sg_table *
3266intel_partial_pages(const struct i915_ggtt_view *view,
3267 struct drm_i915_gem_object *obj)
3268{
3269 struct sg_table *st;
3270 struct scatterlist *sg, *iter;
3271 unsigned int count = view->partial.size;
3272 unsigned int offset;
3273 int ret = -ENOMEM;
3274
3275 st = kmalloc(sizeof(*st), GFP_KERNEL);
3276 if (!st)
3277 goto err_st_alloc;
3278
3279 ret = sg_alloc_table(st, count, GFP_KERNEL);
3280 if (ret)
3281 goto err_sg_alloc;
3282
3283 iter = i915_gem_object_get_sg(obj, view->partial.offset, &offset);
3284 GEM_BUG_ON(!iter);
3285
3286 sg = st->sgl;
3287 st->nents = 0;
3288 do {
3289 unsigned int len;
3290
3291 len = min(iter->length - (offset << PAGE_SHIFT),
3292 count << PAGE_SHIFT);
3293 sg_set_page(sg, NULL, len, 0);
3294 sg_dma_address(sg) =
3295 sg_dma_address(iter) + (offset << PAGE_SHIFT);
3296 sg_dma_len(sg) = len;
3297
3298 st->nents++;
3299 count -= len >> PAGE_SHIFT;
3300 if (count == 0) {
3301 sg_mark_end(sg);
3302 return st;
3303 }
3304
3305 sg = __sg_next(sg);
3306 iter = __sg_next(iter);
3307 offset = 0;
3308 } while (1);
3309
3310err_sg_alloc:
3311 kfree(st);
3312err_st_alloc:
3313 return ERR_PTR(ret);
3314}
3315
3316static int
3317i915_get_ggtt_vma_pages(struct i915_vma *vma)
3318{
3319 int ret;
3320
3321
3322
3323
3324
3325
3326 GEM_BUG_ON(!i915_gem_object_has_pinned_pages(vma->obj));
3327
3328 switch (vma->ggtt_view.type) {
3329 case I915_GGTT_VIEW_NORMAL:
3330 vma->pages = vma->obj->mm.pages;
3331 return 0;
3332
3333 case I915_GGTT_VIEW_ROTATED:
3334 vma->pages =
3335 intel_rotate_pages(&vma->ggtt_view.rotated, vma->obj);
3336 break;
3337
3338 case I915_GGTT_VIEW_PARTIAL:
3339 vma->pages = intel_partial_pages(&vma->ggtt_view, vma->obj);
3340 break;
3341
3342 default:
3343 WARN_ONCE(1, "GGTT view %u not implemented!\n",
3344 vma->ggtt_view.type);
3345 return -EINVAL;
3346 }
3347
3348 ret = 0;
3349 if (unlikely(IS_ERR(vma->pages))) {
3350 ret = PTR_ERR(vma->pages);
3351 vma->pages = NULL;
3352 DRM_ERROR("Failed to get pages for VMA view type %u (%d)!\n",
3353 vma->ggtt_view.type, ret);
3354 }
3355 return ret;
3356}
3357
3358
3359
3360
3361
3362
3363
3364
3365
3366
3367
3368
3369
3370
3371
3372
3373
3374
3375
3376
3377
3378
3379
3380
3381
3382
3383int i915_gem_gtt_reserve(struct i915_address_space *vm,
3384 struct drm_mm_node *node,
3385 u64 size, u64 offset, unsigned long color,
3386 unsigned int flags)
3387{
3388 int err;
3389
3390 GEM_BUG_ON(!size);
3391 GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
3392 GEM_BUG_ON(!IS_ALIGNED(offset, I915_GTT_MIN_ALIGNMENT));
3393 GEM_BUG_ON(range_overflows(offset, size, vm->total));
3394 GEM_BUG_ON(vm == &vm->i915->mm.aliasing_ppgtt->base);
3395 GEM_BUG_ON(drm_mm_node_allocated(node));
3396
3397 node->size = size;
3398 node->start = offset;
3399 node->color = color;
3400
3401 err = drm_mm_reserve_node(&vm->mm, node);
3402 if (err != -ENOSPC)
3403 return err;
3404
3405 if (flags & PIN_NOEVICT)
3406 return -ENOSPC;
3407
3408 err = i915_gem_evict_for_node(vm, node, flags);
3409 if (err == 0)
3410 err = drm_mm_reserve_node(&vm->mm, node);
3411
3412 return err;
3413}
3414
3415static u64 random_offset(u64 start, u64 end, u64 len, u64 align)
3416{
3417 u64 range, addr;
3418
3419 GEM_BUG_ON(range_overflows(start, len, end));
3420 GEM_BUG_ON(round_up(start, align) > round_down(end - len, align));
3421
3422 range = round_down(end - len, align) - round_up(start, align);
3423 if (range) {
3424 if (sizeof(unsigned long) == sizeof(u64)) {
3425 addr = get_random_long();
3426 } else {
3427 addr = get_random_int();
3428 if (range > U32_MAX) {
3429 addr <<= 32;
3430 addr |= get_random_int();
3431 }
3432 }
3433 div64_u64_rem(addr, range, &addr);
3434 start += addr;
3435 }
3436
3437 return round_up(start, align);
3438}
3439
3440
3441
3442
3443
3444
3445
3446
3447
3448
3449
3450
3451
3452
3453
3454
3455
3456
3457
3458
3459
3460
3461
3462
3463
3464
3465
3466
3467
3468
3469
3470
3471
3472
3473
3474int i915_gem_gtt_insert(struct i915_address_space *vm,
3475 struct drm_mm_node *node,
3476 u64 size, u64 alignment, unsigned long color,
3477 u64 start, u64 end, unsigned int flags)
3478{
3479 enum drm_mm_insert_mode mode;
3480 u64 offset;
3481 int err;
3482
3483 lockdep_assert_held(&vm->i915->drm.struct_mutex);
3484 GEM_BUG_ON(!size);
3485 GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
3486 GEM_BUG_ON(alignment && !is_power_of_2(alignment));
3487 GEM_BUG_ON(alignment && !IS_ALIGNED(alignment, I915_GTT_MIN_ALIGNMENT));
3488 GEM_BUG_ON(start >= end);
3489 GEM_BUG_ON(start > 0 && !IS_ALIGNED(start, I915_GTT_PAGE_SIZE));
3490 GEM_BUG_ON(end < U64_MAX && !IS_ALIGNED(end, I915_GTT_PAGE_SIZE));
3491 GEM_BUG_ON(vm == &vm->i915->mm.aliasing_ppgtt->base);
3492 GEM_BUG_ON(drm_mm_node_allocated(node));
3493
3494 if (unlikely(range_overflows(start, size, end)))
3495 return -ENOSPC;
3496
3497 if (unlikely(round_up(start, alignment) > round_down(end - size, alignment)))
3498 return -ENOSPC;
3499
3500 mode = DRM_MM_INSERT_BEST;
3501 if (flags & PIN_HIGH)
3502 mode = DRM_MM_INSERT_HIGH;
3503 if (flags & PIN_MAPPABLE)
3504 mode = DRM_MM_INSERT_LOW;
3505
3506
3507
3508
3509
3510
3511
3512 BUILD_BUG_ON(I915_GTT_MIN_ALIGNMENT > I915_GTT_PAGE_SIZE);
3513 if (alignment <= I915_GTT_MIN_ALIGNMENT)
3514 alignment = 0;
3515
3516 err = drm_mm_insert_node_in_range(&vm->mm, node,
3517 size, alignment, color,
3518 start, end, mode);
3519 if (err != -ENOSPC)
3520 return err;
3521
3522 if (flags & PIN_NOEVICT)
3523 return -ENOSPC;
3524
3525
3526
3527
3528
3529
3530
3531
3532
3533
3534
3535
3536
3537
3538
3539
3540
3541
3542
3543
3544
3545
3546
3547 offset = random_offset(start, end,
3548 size, alignment ?: I915_GTT_MIN_ALIGNMENT);
3549 err = i915_gem_gtt_reserve(vm, node, size, offset, color, flags);
3550 if (err != -ENOSPC)
3551 return err;
3552
3553
3554 err = i915_gem_evict_something(vm, size, alignment, color,
3555 start, end, flags);
3556 if (err)
3557 return err;
3558
3559 return drm_mm_insert_node_in_range(&vm->mm, node,
3560 size, alignment, color,
3561 start, end, DRM_MM_INSERT_EVICT);
3562}
3563
3564#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
3565#include "selftests/mock_gtt.c"
3566#include "selftests/i915_gem_gtt.c"
3567#endif
3568