1
2
3
4
5
6#include <linux/slab.h>
7
8#include <linux/fault-inject.h>
9
10#include "i915_trace.h"
11#include "intel_gt.h"
12#include "intel_gtt.h"
13
14void stash_init(struct pagestash *stash)
15{
16 pagevec_init(&stash->pvec);
17 spin_lock_init(&stash->lock);
18}
19
20static struct page *stash_pop_page(struct pagestash *stash)
21{
22 struct page *page = NULL;
23
24 spin_lock(&stash->lock);
25 if (likely(stash->pvec.nr))
26 page = stash->pvec.pages[--stash->pvec.nr];
27 spin_unlock(&stash->lock);
28
29 return page;
30}
31
32static void stash_push_pagevec(struct pagestash *stash, struct pagevec *pvec)
33{
34 unsigned int nr;
35
36 spin_lock_nested(&stash->lock, SINGLE_DEPTH_NESTING);
37
38 nr = min_t(typeof(nr), pvec->nr, pagevec_space(&stash->pvec));
39 memcpy(stash->pvec.pages + stash->pvec.nr,
40 pvec->pages + pvec->nr - nr,
41 sizeof(pvec->pages[0]) * nr);
42 stash->pvec.nr += nr;
43
44 spin_unlock(&stash->lock);
45
46 pvec->nr -= nr;
47}
48
49static struct page *vm_alloc_page(struct i915_address_space *vm, gfp_t gfp)
50{
51 struct pagevec stack;
52 struct page *page;
53
54 if (I915_SELFTEST_ONLY(should_fail(&vm->fault_attr, 1)))
55 i915_gem_shrink_all(vm->i915);
56
57 page = stash_pop_page(&vm->free_pages);
58 if (page)
59 return page;
60
61 if (!vm->pt_kmap_wc)
62 return alloc_page(gfp);
63
64
65 page = stash_pop_page(&vm->i915->mm.wc_stash);
66 if (page)
67 return page;
68
69
70
71
72
73
74
75
76
77 pagevec_init(&stack);
78 do {
79 struct page *page;
80
81 page = alloc_page(gfp);
82 if (unlikely(!page))
83 break;
84
85 stack.pages[stack.nr++] = page;
86 } while (pagevec_space(&stack));
87
88 if (stack.nr && !set_pages_array_wc(stack.pages, stack.nr)) {
89 page = stack.pages[--stack.nr];
90
91
92 if (stack.nr)
93 stash_push_pagevec(&vm->i915->mm.wc_stash, &stack);
94
95
96 if (stack.nr)
97 stash_push_pagevec(&vm->free_pages, &stack);
98 }
99
100
101 if (unlikely(stack.nr)) {
102 WARN_ON_ONCE(set_pages_array_wb(stack.pages, stack.nr));
103 __pagevec_release(&stack);
104 }
105
106 return page;
107}
108
109static void vm_free_pages_release(struct i915_address_space *vm,
110 bool immediate)
111{
112 struct pagevec *pvec = &vm->free_pages.pvec;
113 struct pagevec stack;
114
115 lockdep_assert_held(&vm->free_pages.lock);
116 GEM_BUG_ON(!pagevec_count(pvec));
117
118 if (vm->pt_kmap_wc) {
119
120
121
122
123 stash_push_pagevec(&vm->i915->mm.wc_stash, pvec);
124
125
126
127
128
129
130
131 if (pvec->nr <= (immediate ? 0 : PAGEVEC_SIZE - 1))
132 return;
133
134
135
136
137
138
139 stack = *pvec;
140 pagevec_reinit(pvec);
141 spin_unlock(&vm->free_pages.lock);
142
143 pvec = &stack;
144 set_pages_array_wb(pvec->pages, pvec->nr);
145
146 spin_lock(&vm->free_pages.lock);
147 }
148
149 __pagevec_release(pvec);
150}
151
152static void vm_free_page(struct i915_address_space *vm, struct page *page)
153{
154
155
156
157
158
159
160
161 might_sleep();
162 spin_lock(&vm->free_pages.lock);
163 while (!pagevec_space(&vm->free_pages.pvec))
164 vm_free_pages_release(vm, false);
165 GEM_BUG_ON(pagevec_count(&vm->free_pages.pvec) >= PAGEVEC_SIZE);
166 pagevec_add(&vm->free_pages.pvec, page);
167 spin_unlock(&vm->free_pages.lock);
168}
169
170void __i915_vm_close(struct i915_address_space *vm)
171{
172 struct i915_vma *vma, *vn;
173
174 if (!atomic_dec_and_mutex_lock(&vm->open, &vm->mutex))
175 return;
176
177 list_for_each_entry_safe(vma, vn, &vm->bound_list, vm_link) {
178 struct drm_i915_gem_object *obj = vma->obj;
179
180
181 if (!kref_get_unless_zero(&obj->base.refcount))
182 continue;
183
184 atomic_and(~I915_VMA_PIN_MASK, &vma->flags);
185 WARN_ON(__i915_vma_unbind(vma));
186 __i915_vma_put(vma);
187
188 i915_gem_object_put(obj);
189 }
190 GEM_BUG_ON(!list_empty(&vm->bound_list));
191
192 mutex_unlock(&vm->mutex);
193}
194
195void i915_address_space_fini(struct i915_address_space *vm)
196{
197 spin_lock(&vm->free_pages.lock);
198 if (pagevec_count(&vm->free_pages.pvec))
199 vm_free_pages_release(vm, true);
200 GEM_BUG_ON(pagevec_count(&vm->free_pages.pvec));
201 spin_unlock(&vm->free_pages.lock);
202
203 drm_mm_takedown(&vm->mm);
204
205 mutex_destroy(&vm->mutex);
206}
207
208static void __i915_vm_release(struct work_struct *work)
209{
210 struct i915_address_space *vm =
211 container_of(work, struct i915_address_space, rcu.work);
212
213 vm->cleanup(vm);
214 i915_address_space_fini(vm);
215
216 kfree(vm);
217}
218
219void i915_vm_release(struct kref *kref)
220{
221 struct i915_address_space *vm =
222 container_of(kref, struct i915_address_space, ref);
223
224 GEM_BUG_ON(i915_is_ggtt(vm));
225 trace_i915_ppgtt_release(vm);
226
227 queue_rcu_work(vm->i915->wq, &vm->rcu);
228}
229
230void i915_address_space_init(struct i915_address_space *vm, int subclass)
231{
232 kref_init(&vm->ref);
233 INIT_RCU_WORK(&vm->rcu, __i915_vm_release);
234 atomic_set(&vm->open, 1);
235
236
237
238
239
240
241 mutex_init(&vm->mutex);
242 lockdep_set_subclass(&vm->mutex, subclass);
243 i915_gem_shrinker_taints_mutex(vm->i915, &vm->mutex);
244
245 GEM_BUG_ON(!vm->total);
246 drm_mm_init(&vm->mm, 0, vm->total);
247 vm->mm.head_node.color = I915_COLOR_UNEVICTABLE;
248
249 stash_init(&vm->free_pages);
250
251 INIT_LIST_HEAD(&vm->bound_list);
252}
253
254void clear_pages(struct i915_vma *vma)
255{
256 GEM_BUG_ON(!vma->pages);
257
258 if (vma->pages != vma->obj->mm.pages) {
259 sg_free_table(vma->pages);
260 kfree(vma->pages);
261 }
262 vma->pages = NULL;
263
264 memset(&vma->page_sizes, 0, sizeof(vma->page_sizes));
265}
266
267static int __setup_page_dma(struct i915_address_space *vm,
268 struct i915_page_dma *p,
269 gfp_t gfp)
270{
271 p->page = vm_alloc_page(vm, gfp | I915_GFP_ALLOW_FAIL);
272 if (unlikely(!p->page))
273 return -ENOMEM;
274
275 p->daddr = dma_map_page_attrs(vm->dma,
276 p->page, 0, PAGE_SIZE,
277 PCI_DMA_BIDIRECTIONAL,
278 DMA_ATTR_SKIP_CPU_SYNC |
279 DMA_ATTR_NO_WARN);
280 if (unlikely(dma_mapping_error(vm->dma, p->daddr))) {
281 vm_free_page(vm, p->page);
282 return -ENOMEM;
283 }
284
285 return 0;
286}
287
288int setup_page_dma(struct i915_address_space *vm, struct i915_page_dma *p)
289{
290 return __setup_page_dma(vm, p, __GFP_HIGHMEM);
291}
292
293void cleanup_page_dma(struct i915_address_space *vm, struct i915_page_dma *p)
294{
295 dma_unmap_page(vm->dma, p->daddr, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
296 vm_free_page(vm, p->page);
297}
298
299void
300fill_page_dma(const struct i915_page_dma *p, const u64 val, unsigned int count)
301{
302 kunmap_atomic(memset64(kmap_atomic(p->page), val, count));
303}
304
305static void poison_scratch_page(struct page *page, unsigned long size)
306{
307 if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
308 return;
309
310 GEM_BUG_ON(!IS_ALIGNED(size, PAGE_SIZE));
311
312 do {
313 void *vaddr;
314
315 vaddr = kmap(page);
316 memset(vaddr, POISON_FREE, PAGE_SIZE);
317 kunmap(page);
318
319 page = pfn_to_page(page_to_pfn(page) + 1);
320 size -= PAGE_SIZE;
321 } while (size);
322}
323
324int setup_scratch_page(struct i915_address_space *vm, gfp_t gfp)
325{
326 unsigned long size;
327
328
329
330
331
332
333
334
335
336
337
338
339 size = I915_GTT_PAGE_SIZE_4K;
340 if (i915_vm_is_4lvl(vm) &&
341 HAS_PAGE_SIZES(vm->i915, I915_GTT_PAGE_SIZE_64K)) {
342 size = I915_GTT_PAGE_SIZE_64K;
343 gfp |= __GFP_NOWARN;
344 }
345 gfp |= __GFP_ZERO | __GFP_RETRY_MAYFAIL;
346
347 do {
348 unsigned int order = get_order(size);
349 struct page *page;
350 dma_addr_t addr;
351
352 page = alloc_pages(gfp, order);
353 if (unlikely(!page))
354 goto skip;
355
356
357
358
359
360
361
362
363
364
365 poison_scratch_page(page, size);
366
367 addr = dma_map_page_attrs(vm->dma,
368 page, 0, size,
369 PCI_DMA_BIDIRECTIONAL,
370 DMA_ATTR_SKIP_CPU_SYNC |
371 DMA_ATTR_NO_WARN);
372 if (unlikely(dma_mapping_error(vm->dma, addr)))
373 goto free_page;
374
375 if (unlikely(!IS_ALIGNED(addr, size)))
376 goto unmap_page;
377
378 vm->scratch[0].base.page = page;
379 vm->scratch[0].base.daddr = addr;
380 vm->scratch_order = order;
381 return 0;
382
383unmap_page:
384 dma_unmap_page(vm->dma, addr, size, PCI_DMA_BIDIRECTIONAL);
385free_page:
386 __free_pages(page, order);
387skip:
388 if (size == I915_GTT_PAGE_SIZE_4K)
389 return -ENOMEM;
390
391 size = I915_GTT_PAGE_SIZE_4K;
392 gfp &= ~__GFP_NOWARN;
393 } while (1);
394}
395
396void cleanup_scratch_page(struct i915_address_space *vm)
397{
398 struct i915_page_dma *p = px_base(&vm->scratch[0]);
399 unsigned int order = vm->scratch_order;
400
401 dma_unmap_page(vm->dma, p->daddr, BIT(order) << PAGE_SHIFT,
402 PCI_DMA_BIDIRECTIONAL);
403 __free_pages(p->page, order);
404}
405
406void free_scratch(struct i915_address_space *vm)
407{
408 int i;
409
410 if (!px_dma(&vm->scratch[0]))
411 return;
412
413 for (i = 1; i <= vm->top; i++) {
414 if (!px_dma(&vm->scratch[i]))
415 break;
416 cleanup_page_dma(vm, px_base(&vm->scratch[i]));
417 }
418
419 cleanup_scratch_page(vm);
420}
421
422void gtt_write_workarounds(struct intel_gt *gt)
423{
424 struct drm_i915_private *i915 = gt->i915;
425 struct intel_uncore *uncore = gt->uncore;
426
427
428
429
430
431
432
433 if (IS_BROADWELL(i915))
434 intel_uncore_write(uncore,
435 GEN8_L3_LRA_1_GPGPU,
436 GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_BDW);
437 else if (IS_CHERRYVIEW(i915))
438 intel_uncore_write(uncore,
439 GEN8_L3_LRA_1_GPGPU,
440 GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_CHV);
441 else if (IS_GEN9_LP(i915))
442 intel_uncore_write(uncore,
443 GEN8_L3_LRA_1_GPGPU,
444 GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_BXT);
445 else if (INTEL_GEN(i915) >= 9 && INTEL_GEN(i915) <= 11)
446 intel_uncore_write(uncore,
447 GEN8_L3_LRA_1_GPGPU,
448 GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_SKL);
449
450
451
452
453
454
455
456
457
458
459
460
461 if (HAS_PAGE_SIZES(i915, I915_GTT_PAGE_SIZE_64K) &&
462 INTEL_GEN(i915) <= 10)
463 intel_uncore_rmw(uncore,
464 GEN8_GAMW_ECO_DEV_RW_IA,
465 0,
466 GAMW_ECO_ENABLE_64K_IPS_FIELD);
467
468 if (IS_GEN_RANGE(i915, 8, 11)) {
469 bool can_use_gtt_cache = true;
470
471
472
473
474
475
476
477 if (HAS_PAGE_SIZES(i915, I915_GTT_PAGE_SIZE_2M))
478 can_use_gtt_cache = false;
479
480
481 intel_uncore_write(uncore,
482 HSW_GTT_CACHE_EN,
483 can_use_gtt_cache ? GTT_CACHE_EN_ALL : 0);
484 drm_WARN_ON_ONCE(&i915->drm, can_use_gtt_cache &&
485 intel_uncore_read(uncore,
486 HSW_GTT_CACHE_EN) == 0);
487 }
488}
489
490static void tgl_setup_private_ppat(struct intel_uncore *uncore)
491{
492
493 intel_uncore_write(uncore, GEN12_PAT_INDEX(0), GEN8_PPAT_WB);
494 intel_uncore_write(uncore, GEN12_PAT_INDEX(1), GEN8_PPAT_WC);
495 intel_uncore_write(uncore, GEN12_PAT_INDEX(2), GEN8_PPAT_WT);
496 intel_uncore_write(uncore, GEN12_PAT_INDEX(3), GEN8_PPAT_UC);
497 intel_uncore_write(uncore, GEN12_PAT_INDEX(4), GEN8_PPAT_WB);
498 intel_uncore_write(uncore, GEN12_PAT_INDEX(5), GEN8_PPAT_WB);
499 intel_uncore_write(uncore, GEN12_PAT_INDEX(6), GEN8_PPAT_WB);
500 intel_uncore_write(uncore, GEN12_PAT_INDEX(7), GEN8_PPAT_WB);
501}
502
503static void cnl_setup_private_ppat(struct intel_uncore *uncore)
504{
505 intel_uncore_write(uncore,
506 GEN10_PAT_INDEX(0),
507 GEN8_PPAT_WB | GEN8_PPAT_LLC);
508 intel_uncore_write(uncore,
509 GEN10_PAT_INDEX(1),
510 GEN8_PPAT_WC | GEN8_PPAT_LLCELLC);
511 intel_uncore_write(uncore,
512 GEN10_PAT_INDEX(2),
513 GEN8_PPAT_WT | GEN8_PPAT_LLCELLC);
514 intel_uncore_write(uncore,
515 GEN10_PAT_INDEX(3),
516 GEN8_PPAT_UC);
517 intel_uncore_write(uncore,
518 GEN10_PAT_INDEX(4),
519 GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(0));
520 intel_uncore_write(uncore,
521 GEN10_PAT_INDEX(5),
522 GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(1));
523 intel_uncore_write(uncore,
524 GEN10_PAT_INDEX(6),
525 GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(2));
526 intel_uncore_write(uncore,
527 GEN10_PAT_INDEX(7),
528 GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3));
529}
530
531
532
533
534
535
536static void bdw_setup_private_ppat(struct intel_uncore *uncore)
537{
538 u64 pat;
539
540 pat = GEN8_PPAT(0, GEN8_PPAT_WB | GEN8_PPAT_LLC) |
541 GEN8_PPAT(1, GEN8_PPAT_WC | GEN8_PPAT_LLCELLC) |
542 GEN8_PPAT(2, GEN8_PPAT_WT | GEN8_PPAT_LLCELLC) |
543 GEN8_PPAT(3, GEN8_PPAT_UC) |
544 GEN8_PPAT(4, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(0)) |
545 GEN8_PPAT(5, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(1)) |
546 GEN8_PPAT(6, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(2)) |
547 GEN8_PPAT(7, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3));
548
549 intel_uncore_write(uncore, GEN8_PRIVATE_PAT_LO, lower_32_bits(pat));
550 intel_uncore_write(uncore, GEN8_PRIVATE_PAT_HI, upper_32_bits(pat));
551}
552
553static void chv_setup_private_ppat(struct intel_uncore *uncore)
554{
555 u64 pat;
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576 pat = GEN8_PPAT(0, CHV_PPAT_SNOOP) |
577 GEN8_PPAT(1, 0) |
578 GEN8_PPAT(2, 0) |
579 GEN8_PPAT(3, 0) |
580 GEN8_PPAT(4, CHV_PPAT_SNOOP) |
581 GEN8_PPAT(5, CHV_PPAT_SNOOP) |
582 GEN8_PPAT(6, CHV_PPAT_SNOOP) |
583 GEN8_PPAT(7, CHV_PPAT_SNOOP);
584
585 intel_uncore_write(uncore, GEN8_PRIVATE_PAT_LO, lower_32_bits(pat));
586 intel_uncore_write(uncore, GEN8_PRIVATE_PAT_HI, upper_32_bits(pat));
587}
588
589void setup_private_pat(struct intel_uncore *uncore)
590{
591 struct drm_i915_private *i915 = uncore->i915;
592
593 GEM_BUG_ON(INTEL_GEN(i915) < 8);
594
595 if (INTEL_GEN(i915) >= 12)
596 tgl_setup_private_ppat(uncore);
597 else if (INTEL_GEN(i915) >= 10)
598 cnl_setup_private_ppat(uncore);
599 else if (IS_CHERRYVIEW(i915) || IS_GEN9_LP(i915))
600 chv_setup_private_ppat(uncore);
601 else
602 bdw_setup_private_ppat(uncore);
603}
604
605#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
606#include "selftests/mock_gtt.c"
607#endif
608