1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34#ifndef __I915_GEM_GTT_H__
35#define __I915_GEM_GTT_H__
36
37#include <linux/io-mapping.h>
38#include <linux/mm.h>
39#include <linux/pagevec.h>
40
41#include "gt/intel_reset.h"
42#include "i915_gem_fence_reg.h"
43#include "i915_request.h"
44#include "i915_scatterlist.h"
45#include "i915_selftest.h"
46#include "i915_timeline.h"
47
48#define I915_GTT_PAGE_SIZE_4K BIT_ULL(12)
49#define I915_GTT_PAGE_SIZE_64K BIT_ULL(16)
50#define I915_GTT_PAGE_SIZE_2M BIT_ULL(21)
51
52#define I915_GTT_PAGE_SIZE I915_GTT_PAGE_SIZE_4K
53#define I915_GTT_MAX_PAGE_SIZE I915_GTT_PAGE_SIZE_2M
54
55#define I915_GTT_PAGE_MASK -I915_GTT_PAGE_SIZE
56
57#define I915_GTT_MIN_ALIGNMENT I915_GTT_PAGE_SIZE
58
59#define I915_FENCE_REG_NONE -1
60#define I915_MAX_NUM_FENCES 32
61
62#define I915_MAX_NUM_FENCE_BITS 6
63
64struct drm_i915_file_private;
65struct drm_i915_gem_object;
66struct i915_vma;
67
68typedef u32 gen6_pte_t;
69typedef u64 gen8_pte_t;
70typedef u64 gen8_pde_t;
71typedef u64 gen8_ppgtt_pdpe_t;
72typedef u64 gen8_ppgtt_pml4e_t;
73
74#define ggtt_total_entries(ggtt) ((ggtt)->vm.total >> PAGE_SHIFT)
75
76
77#define GEN6_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0xff0))
78#define GEN6_PTE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr)
79#define GEN6_PDE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr)
80#define GEN6_PTE_CACHE_LLC (2 << 1)
81#define GEN6_PTE_UNCACHED (1 << 1)
82#define GEN6_PTE_VALID (1 << 0)
83
84#define I915_PTES(pte_len) ((unsigned int)(PAGE_SIZE / (pte_len)))
85#define I915_PTE_MASK(pte_len) (I915_PTES(pte_len) - 1)
86#define I915_PDES 512
87#define I915_PDE_MASK (I915_PDES - 1)
88#define NUM_PTE(pde_shift) (1 << (pde_shift - PAGE_SHIFT))
89
90#define GEN6_PTES I915_PTES(sizeof(gen6_pte_t))
91#define GEN6_PD_SIZE (I915_PDES * PAGE_SIZE)
92#define GEN6_PD_ALIGN (PAGE_SIZE * 16)
93#define GEN6_PDE_SHIFT 22
94#define GEN6_PDE_VALID (1 << 0)
95
96#define GEN7_PTE_CACHE_L3_LLC (3 << 1)
97
98#define BYT_PTE_SNOOPED_BY_CPU_CACHES (1 << 2)
99#define BYT_PTE_WRITEABLE (1 << 1)
100
101
102
103
104#define HSW_CACHEABILITY_CONTROL(bits) ((((bits) & 0x7) << 1) | \
105 (((bits) & 0x8) << (11 - 3)))
106#define HSW_WB_LLC_AGE3 HSW_CACHEABILITY_CONTROL(0x2)
107#define HSW_WB_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0x3)
108#define HSW_WB_ELLC_LLC_AGE3 HSW_CACHEABILITY_CONTROL(0x8)
109#define HSW_WB_ELLC_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0xb)
110#define HSW_WT_ELLC_LLC_AGE3 HSW_CACHEABILITY_CONTROL(0x7)
111#define HSW_WT_ELLC_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0x6)
112#define HSW_PTE_UNCACHED (0)
113#define HSW_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0x7f0))
114#define HSW_PTE_ADDR_ENCODE(addr) HSW_GTT_ADDR_ENCODE(addr)
115
116
117
118
119
120
121
122#define GEN8_3LVL_PDPES 4
123#define GEN8_PDE_SHIFT 21
124#define GEN8_PDE_MASK 0x1ff
125#define GEN8_PTE_SHIFT 12
126#define GEN8_PTE_MASK 0x1ff
127#define GEN8_PTES I915_PTES(sizeof(gen8_pte_t))
128
129
130
131
132
133#define GEN8_PML4ES_PER_PML4 512
134#define GEN8_PML4E_SHIFT 39
135#define GEN8_PML4E_MASK (GEN8_PML4ES_PER_PML4 - 1)
136#define GEN8_PDPE_SHIFT 30
137
138
139#define GEN8_PDPE_MASK 0x1ff
140
141#define PPAT_UNCACHED (_PAGE_PWT | _PAGE_PCD)
142#define PPAT_CACHED_PDE 0
143#define PPAT_CACHED _PAGE_PAT
144#define PPAT_DISPLAY_ELLC _PAGE_PCD
145
146#define CHV_PPAT_SNOOP (1<<6)
147#define GEN8_PPAT_AGE(x) ((x)<<4)
148#define GEN8_PPAT_LLCeLLC (3<<2)
149#define GEN8_PPAT_LLCELLC (2<<2)
150#define GEN8_PPAT_LLC (1<<2)
151#define GEN8_PPAT_WB (3<<0)
152#define GEN8_PPAT_WT (2<<0)
153#define GEN8_PPAT_WC (1<<0)
154#define GEN8_PPAT_UC (0<<0)
155#define GEN8_PPAT_ELLC_OVERRIDE (0<<2)
156#define GEN8_PPAT(i, x) ((u64)(x) << ((i) * 8))
157
158#define GEN8_PPAT_GET_CA(x) ((x) & 3)
159#define GEN8_PPAT_GET_TC(x) ((x) & (3 << 2))
160#define GEN8_PPAT_GET_AGE(x) ((x) & (3 << 4))
161#define CHV_PPAT_GET_SNOOP(x) ((x) & (1 << 6))
162
163#define GEN8_PDE_IPS_64K BIT(11)
164#define GEN8_PDE_PS_2M BIT(7)
165
166#define for_each_sgt_dma(__dmap, __iter, __sgt) \
167 __for_each_sgt_dma(__dmap, __iter, __sgt, I915_GTT_PAGE_SIZE)
168
169struct intel_remapped_plane_info {
170
171 unsigned int width, height, stride, offset;
172} __packed;
173
174struct intel_remapped_info {
175 struct intel_remapped_plane_info plane[2];
176 unsigned int unused_mbz;
177} __packed;
178
179struct intel_rotation_info {
180 struct intel_remapped_plane_info plane[2];
181} __packed;
182
183struct intel_partial_info {
184 u64 offset;
185 unsigned int size;
186} __packed;
187
188enum i915_ggtt_view_type {
189 I915_GGTT_VIEW_NORMAL = 0,
190 I915_GGTT_VIEW_ROTATED = sizeof(struct intel_rotation_info),
191 I915_GGTT_VIEW_PARTIAL = sizeof(struct intel_partial_info),
192 I915_GGTT_VIEW_REMAPPED = sizeof(struct intel_remapped_info),
193};
194
195static inline void assert_i915_gem_gtt_types(void)
196{
197 BUILD_BUG_ON(sizeof(struct intel_rotation_info) != 8*sizeof(unsigned int));
198 BUILD_BUG_ON(sizeof(struct intel_partial_info) != sizeof(u64) + sizeof(unsigned int));
199 BUILD_BUG_ON(sizeof(struct intel_remapped_info) != 9*sizeof(unsigned int));
200
201
202 BUILD_BUG_ON(offsetof(struct intel_remapped_info, plane[0]) !=
203 offsetof(struct intel_rotation_info, plane[0]));
204 BUILD_BUG_ON(offsetofend(struct intel_remapped_info, plane[1]) !=
205 offsetofend(struct intel_rotation_info, plane[1]));
206
207
208
209
210 switch ((enum i915_ggtt_view_type)0) {
211 case I915_GGTT_VIEW_NORMAL:
212 case I915_GGTT_VIEW_PARTIAL:
213 case I915_GGTT_VIEW_ROTATED:
214 case I915_GGTT_VIEW_REMAPPED:
215
216 break;
217 }
218}
219
220struct i915_ggtt_view {
221 enum i915_ggtt_view_type type;
222 union {
223
224 struct intel_partial_info partial;
225 struct intel_rotation_info rotated;
226 struct intel_remapped_info remapped;
227 };
228};
229
230enum i915_cache_level;
231
232struct i915_vma;
233
234struct i915_page_dma {
235 struct page *page;
236 union {
237 dma_addr_t daddr;
238
239
240
241
242 u32 ggtt_offset;
243 };
244};
245
246#define px_base(px) (&(px)->base)
247#define px_dma(px) (px_base(px)->daddr)
248
249struct i915_page_table {
250 struct i915_page_dma base;
251 atomic_t used;
252};
253
254struct i915_page_directory {
255 struct i915_page_dma base;
256 atomic_t used;
257 spinlock_t lock;
258 void *entry[512];
259};
260
261struct i915_vma_ops {
262
263 int (*bind_vma)(struct i915_vma *vma,
264 enum i915_cache_level cache_level,
265 u32 flags);
266
267
268
269
270 void (*unbind_vma)(struct i915_vma *vma);
271
272 int (*set_pages)(struct i915_vma *vma);
273 void (*clear_pages)(struct i915_vma *vma);
274};
275
276struct pagestash {
277 spinlock_t lock;
278 struct pagevec pvec;
279};
280
281struct i915_address_space {
282 struct kref ref;
283
284 struct drm_mm mm;
285 struct drm_i915_private *i915;
286 struct device *dma;
287
288
289
290
291
292
293
294
295 struct drm_i915_file_private *file;
296 u64 total;
297 u64 reserved;
298
299 bool closed;
300
301 struct mutex mutex;
302#define VM_CLASS_GGTT 0
303#define VM_CLASS_PPGTT 1
304
305 u64 scratch_pte;
306 int scratch_order;
307 struct i915_page_dma scratch_page;
308 struct i915_page_table *scratch_pt;
309 struct i915_page_directory *scratch_pd;
310 struct i915_page_directory *scratch_pdp;
311
312
313
314
315 struct list_head bound_list;
316
317
318
319
320 struct list_head unbound_list;
321
322 struct pagestash free_pages;
323
324
325 bool is_ggtt:1;
326
327
328 bool pt_kmap_wc:1;
329
330
331 bool has_read_only:1;
332
333 u64 (*pte_encode)(dma_addr_t addr,
334 enum i915_cache_level level,
335 u32 flags);
336#define PTE_READ_ONLY (1<<0)
337
338 int (*allocate_va_range)(struct i915_address_space *vm,
339 u64 start, u64 length);
340 void (*clear_range)(struct i915_address_space *vm,
341 u64 start, u64 length);
342 void (*insert_page)(struct i915_address_space *vm,
343 dma_addr_t addr,
344 u64 offset,
345 enum i915_cache_level cache_level,
346 u32 flags);
347 void (*insert_entries)(struct i915_address_space *vm,
348 struct i915_vma *vma,
349 enum i915_cache_level cache_level,
350 u32 flags);
351 void (*cleanup)(struct i915_address_space *vm);
352
353 struct i915_vma_ops vma_ops;
354
355 I915_SELFTEST_DECLARE(struct fault_attr fault_attr);
356 I915_SELFTEST_DECLARE(bool scrub_64K);
357};
358
359#define i915_is_ggtt(vm) ((vm)->is_ggtt)
360
361static inline bool
362i915_vm_is_4lvl(const struct i915_address_space *vm)
363{
364 return (vm->total - 1) >> 32;
365}
366
367static inline bool
368i915_vm_has_scratch_64K(struct i915_address_space *vm)
369{
370 return vm->scratch_order == get_order(I915_GTT_PAGE_SIZE_64K);
371}
372
373
374
375
376
377
378
379
380struct i915_ggtt {
381 struct i915_address_space vm;
382
383 struct io_mapping iomap;
384 struct resource gmadr;
385 resource_size_t mappable_end;
386
387
388 void __iomem *gsm;
389 void (*invalidate)(struct drm_i915_private *dev_priv);
390
391 bool do_idle_maps;
392
393 int mtrr;
394
395 u32 pin_bias;
396
397 unsigned int num_fences;
398 struct i915_fence_reg fence_regs[I915_MAX_NUM_FENCES];
399 struct list_head fence_list;
400
401
402
403
404 struct list_head userfault_list;
405
406
407 struct intel_wakeref_auto userfault_wakeref;
408
409 struct drm_mm_node error_capture;
410 struct drm_mm_node uc_fw;
411};
412
413struct i915_ppgtt {
414 struct i915_address_space vm;
415
416 intel_engine_mask_t pd_dirty_engines;
417 struct i915_page_directory *pd;
418};
419
420struct gen6_ppgtt {
421 struct i915_ppgtt base;
422
423 struct i915_vma *vma;
424 gen6_pte_t __iomem *pd_addr;
425
426 unsigned int pin_count;
427 bool scan_for_unused_pt;
428
429 struct gen6_ppgtt_cleanup_work *work;
430};
431
432#define __to_gen6_ppgtt(base) container_of(base, struct gen6_ppgtt, base)
433
434static inline struct gen6_ppgtt *to_gen6_ppgtt(struct i915_ppgtt *base)
435{
436 BUILD_BUG_ON(offsetof(struct gen6_ppgtt, base));
437 return __to_gen6_ppgtt(base);
438}
439
440
441
442
443
444
445
446
447
448#define gen6_for_each_pde(pt, pd, start, length, iter) \
449 for (iter = gen6_pde_index(start); \
450 length > 0 && iter < I915_PDES && \
451 (pt = i915_pt_entry(pd, iter), true); \
452 ({ u32 temp = ALIGN(start+1, 1 << GEN6_PDE_SHIFT); \
453 temp = min(temp - start, length); \
454 start += temp, length -= temp; }), ++iter)
455
456#define gen6_for_all_pdes(pt, pd, iter) \
457 for (iter = 0; \
458 iter < I915_PDES && \
459 (pt = i915_pt_entry(pd, iter), true); \
460 ++iter)
461
462static inline u32 i915_pte_index(u64 address, unsigned int pde_shift)
463{
464 const u32 mask = NUM_PTE(pde_shift) - 1;
465
466 return (address >> PAGE_SHIFT) & mask;
467}
468
469
470
471
472
473static inline u32 i915_pte_count(u64 addr, u64 length, unsigned int pde_shift)
474{
475 const u64 mask = ~((1ULL << pde_shift) - 1);
476 u64 end;
477
478 GEM_BUG_ON(length == 0);
479 GEM_BUG_ON(offset_in_page(addr | length));
480
481 end = addr + length;
482
483 if ((addr & mask) != (end & mask))
484 return NUM_PTE(pde_shift) - i915_pte_index(addr, pde_shift);
485
486 return i915_pte_index(end, pde_shift) - i915_pte_index(addr, pde_shift);
487}
488
489static inline u32 i915_pde_index(u64 addr, u32 shift)
490{
491 return (addr >> shift) & I915_PDE_MASK;
492}
493
494static inline u32 gen6_pte_index(u32 addr)
495{
496 return i915_pte_index(addr, GEN6_PDE_SHIFT);
497}
498
499static inline u32 gen6_pte_count(u32 addr, u32 length)
500{
501 return i915_pte_count(addr, length, GEN6_PDE_SHIFT);
502}
503
504static inline u32 gen6_pde_index(u32 addr)
505{
506 return i915_pde_index(addr, GEN6_PDE_SHIFT);
507}
508
509static inline unsigned int
510i915_pdpes_per_pdp(const struct i915_address_space *vm)
511{
512 if (i915_vm_is_4lvl(vm))
513 return GEN8_PML4ES_PER_PML4;
514
515 return GEN8_3LVL_PDPES;
516}
517
518static inline struct i915_page_table *
519i915_pt_entry(const struct i915_page_directory * const pd,
520 const unsigned short n)
521{
522 return pd->entry[n];
523}
524
525static inline struct i915_page_directory *
526i915_pd_entry(const struct i915_page_directory * const pdp,
527 const unsigned short n)
528{
529 return pdp->entry[n];
530}
531
532static inline struct i915_page_directory *
533i915_pdp_entry(const struct i915_page_directory * const pml4,
534 const unsigned short n)
535{
536 return pml4->entry[n];
537}
538
539
540
541
542
543#define gen8_for_each_pde(pt, pd, start, length, iter) \
544 for (iter = gen8_pde_index(start); \
545 length > 0 && iter < I915_PDES && \
546 (pt = i915_pt_entry(pd, iter), true); \
547 ({ u64 temp = ALIGN(start+1, 1 << GEN8_PDE_SHIFT); \
548 temp = min(temp - start, length); \
549 start += temp, length -= temp; }), ++iter)
550
551#define gen8_for_each_pdpe(pd, pdp, start, length, iter) \
552 for (iter = gen8_pdpe_index(start); \
553 length > 0 && iter < i915_pdpes_per_pdp(vm) && \
554 (pd = i915_pd_entry(pdp, iter), true); \
555 ({ u64 temp = ALIGN(start+1, 1 << GEN8_PDPE_SHIFT); \
556 temp = min(temp - start, length); \
557 start += temp, length -= temp; }), ++iter)
558
559#define gen8_for_each_pml4e(pdp, pml4, start, length, iter) \
560 for (iter = gen8_pml4e_index(start); \
561 length > 0 && iter < GEN8_PML4ES_PER_PML4 && \
562 (pdp = i915_pdp_entry(pml4, iter), true); \
563 ({ u64 temp = ALIGN(start+1, 1ULL << GEN8_PML4E_SHIFT); \
564 temp = min(temp - start, length); \
565 start += temp, length -= temp; }), ++iter)
566
567static inline u32 gen8_pte_index(u64 address)
568{
569 return i915_pte_index(address, GEN8_PDE_SHIFT);
570}
571
572static inline u32 gen8_pde_index(u64 address)
573{
574 return i915_pde_index(address, GEN8_PDE_SHIFT);
575}
576
577static inline u32 gen8_pdpe_index(u64 address)
578{
579 return (address >> GEN8_PDPE_SHIFT) & GEN8_PDPE_MASK;
580}
581
582static inline u32 gen8_pml4e_index(u64 address)
583{
584 return (address >> GEN8_PML4E_SHIFT) & GEN8_PML4E_MASK;
585}
586
587static inline u64 gen8_pte_count(u64 address, u64 length)
588{
589 return i915_pte_count(address, length, GEN8_PDE_SHIFT);
590}
591
592static inline dma_addr_t
593i915_page_dir_dma_addr(const struct i915_ppgtt *ppgtt, const unsigned int n)
594{
595 struct i915_page_directory *pd;
596
597 pd = i915_pdp_entry(ppgtt->pd, n);
598 return px_dma(pd);
599}
600
601static inline struct i915_ggtt *
602i915_vm_to_ggtt(struct i915_address_space *vm)
603{
604 BUILD_BUG_ON(offsetof(struct i915_ggtt, vm));
605 GEM_BUG_ON(!i915_is_ggtt(vm));
606 return container_of(vm, struct i915_ggtt, vm);
607}
608
609static inline struct i915_ppgtt *
610i915_vm_to_ppgtt(struct i915_address_space *vm)
611{
612 BUILD_BUG_ON(offsetof(struct i915_ppgtt, vm));
613 GEM_BUG_ON(i915_is_ggtt(vm));
614 return container_of(vm, struct i915_ppgtt, vm);
615}
616
617#define INTEL_MAX_PPAT_ENTRIES 8
618#define INTEL_PPAT_PERFECT_MATCH (~0U)
619
620struct intel_ppat;
621
622struct intel_ppat_entry {
623 struct intel_ppat *ppat;
624 struct kref ref;
625 u8 value;
626};
627
628struct intel_ppat {
629 struct intel_ppat_entry entries[INTEL_MAX_PPAT_ENTRIES];
630 DECLARE_BITMAP(used, INTEL_MAX_PPAT_ENTRIES);
631 DECLARE_BITMAP(dirty, INTEL_MAX_PPAT_ENTRIES);
632 unsigned int max_entries;
633 u8 clear_value;
634
635
636
637
638 unsigned int (*match)(u8 src, u8 dst);
639 void (*update_hw)(struct drm_i915_private *i915);
640
641 struct drm_i915_private *i915;
642};
643
644const struct intel_ppat_entry *
645intel_ppat_get(struct drm_i915_private *i915, u8 value);
646void intel_ppat_put(const struct intel_ppat_entry *entry);
647
648int i915_ggtt_probe_hw(struct drm_i915_private *dev_priv);
649int i915_ggtt_init_hw(struct drm_i915_private *dev_priv);
650int i915_ggtt_enable_hw(struct drm_i915_private *dev_priv);
651void i915_ggtt_enable_guc(struct drm_i915_private *i915);
652void i915_ggtt_disable_guc(struct drm_i915_private *i915);
653int i915_gem_init_ggtt(struct drm_i915_private *dev_priv);
654void i915_ggtt_cleanup_hw(struct drm_i915_private *dev_priv);
655
656int i915_ppgtt_init_hw(struct drm_i915_private *dev_priv);
657
658struct i915_ppgtt *i915_ppgtt_create(struct drm_i915_private *dev_priv);
659
660static inline struct i915_address_space *
661i915_vm_get(struct i915_address_space *vm)
662{
663 kref_get(&vm->ref);
664 return vm;
665}
666
667void i915_vm_release(struct kref *kref);
668
669static inline void i915_vm_put(struct i915_address_space *vm)
670{
671 kref_put(&vm->ref, i915_vm_release);
672}
673
674int gen6_ppgtt_pin(struct i915_ppgtt *base);
675void gen6_ppgtt_unpin(struct i915_ppgtt *base);
676void gen6_ppgtt_unpin_all(struct i915_ppgtt *base);
677
678void i915_gem_suspend_gtt_mappings(struct drm_i915_private *dev_priv);
679void i915_gem_restore_gtt_mappings(struct drm_i915_private *dev_priv);
680
681int __must_check i915_gem_gtt_prepare_pages(struct drm_i915_gem_object *obj,
682 struct sg_table *pages);
683void i915_gem_gtt_finish_pages(struct drm_i915_gem_object *obj,
684 struct sg_table *pages);
685
686int i915_gem_gtt_reserve(struct i915_address_space *vm,
687 struct drm_mm_node *node,
688 u64 size, u64 offset, unsigned long color,
689 unsigned int flags);
690
691int i915_gem_gtt_insert(struct i915_address_space *vm,
692 struct drm_mm_node *node,
693 u64 size, u64 alignment, unsigned long color,
694 u64 start, u64 end, unsigned int flags);
695
696
697#define PIN_NONBLOCK BIT_ULL(0)
698#define PIN_NONFAULT BIT_ULL(1)
699#define PIN_NOEVICT BIT_ULL(2)
700#define PIN_MAPPABLE BIT_ULL(3)
701#define PIN_ZONE_4G BIT_ULL(4)
702#define PIN_HIGH BIT_ULL(5)
703#define PIN_OFFSET_BIAS BIT_ULL(6)
704#define PIN_OFFSET_FIXED BIT_ULL(7)
705
706#define PIN_MBZ BIT_ULL(8)
707#define PIN_GLOBAL BIT_ULL(9)
708#define PIN_USER BIT_ULL(10)
709#define PIN_UPDATE BIT_ULL(11)
710
711#define PIN_OFFSET_MASK (-I915_GTT_PAGE_SIZE)
712
713#endif
714