1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29#include <drm/drmP.h>
30#include <drm/i915_drm.h>
31
32#include "i915_drv.h"
33#include "intel_drv.h"
34#include "i915_trace.h"
35
36I915_SELFTEST_DECLARE(static struct igt_evict_ctl {
37 bool fail_if_busy:1;
38} igt_evict_ctl;)
39
40static bool ggtt_is_idle(struct drm_i915_private *i915)
41{
42 struct intel_engine_cs *engine;
43 enum intel_engine_id id;
44
45 if (i915->gt.active_requests)
46 return false;
47
48 for_each_engine(engine, i915, id) {
49 if (!intel_engine_has_kernel_context(engine))
50 return false;
51 }
52
53 return true;
54}
55
56static int ggtt_flush(struct drm_i915_private *i915)
57{
58 int err;
59
60
61
62
63
64
65
66 err = i915_gem_switch_to_kernel_context(i915);
67 if (err)
68 return err;
69
70 err = i915_gem_wait_for_idle(i915,
71 I915_WAIT_INTERRUPTIBLE |
72 I915_WAIT_LOCKED,
73 MAX_SCHEDULE_TIMEOUT);
74 if (err)
75 return err;
76
77 GEM_BUG_ON(!ggtt_is_idle(i915));
78 return 0;
79}
80
81static bool
82mark_free(struct drm_mm_scan *scan,
83 struct i915_vma *vma,
84 unsigned int flags,
85 struct list_head *unwind)
86{
87 if (i915_vma_is_pinned(vma))
88 return false;
89
90 if (flags & PIN_NONFAULT && i915_vma_has_userfault(vma))
91 return false;
92
93 list_add(&vma->evict_link, unwind);
94 return drm_mm_scan_add_block(scan, &vma->node);
95}
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120int
121i915_gem_evict_something(struct i915_address_space *vm,
122 u64 min_size, u64 alignment,
123 unsigned cache_level,
124 u64 start, u64 end,
125 unsigned flags)
126{
127 struct drm_i915_private *dev_priv = vm->i915;
128 struct drm_mm_scan scan;
129 struct list_head eviction_list;
130 struct list_head *phases[] = {
131 &vm->inactive_list,
132 &vm->active_list,
133 NULL,
134 }, **phase;
135 struct i915_vma *vma, *next;
136 struct drm_mm_node *node;
137 enum drm_mm_insert_mode mode;
138 int ret;
139
140 lockdep_assert_held(&vm->i915->drm.struct_mutex);
141 trace_i915_gem_evict(vm, min_size, alignment, flags);
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156 mode = DRM_MM_INSERT_BEST;
157 if (flags & PIN_HIGH)
158 mode = DRM_MM_INSERT_HIGH;
159 if (flags & PIN_MAPPABLE)
160 mode = DRM_MM_INSERT_LOW;
161 drm_mm_scan_init_with_range(&scan, &vm->mm,
162 min_size, alignment, cache_level,
163 start, end, mode);
164
165
166
167
168
169
170
171 if (!(flags & PIN_NONBLOCK))
172 i915_retire_requests(dev_priv);
173 else
174 phases[1] = NULL;
175
176search_again:
177 INIT_LIST_HEAD(&eviction_list);
178 phase = phases;
179 do {
180 list_for_each_entry(vma, *phase, vm_link)
181 if (mark_free(&scan, vma, flags, &eviction_list))
182 goto found;
183 } while (*++phase);
184
185
186 list_for_each_entry_safe(vma, next, &eviction_list, evict_link) {
187 ret = drm_mm_scan_remove_block(&scan, &vma->node);
188 BUG_ON(ret);
189 }
190
191
192
193
194
195
196
197 if (!i915_is_ggtt(vm) || flags & PIN_NONBLOCK)
198 return -ENOSPC;
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213 if (!ggtt_is_idle(dev_priv)) {
214 if (I915_SELFTEST_ONLY(igt_evict_ctl.fail_if_busy))
215 return -EBUSY;
216
217 ret = ggtt_flush(dev_priv);
218 if (ret)
219 return ret;
220
221 cond_resched();
222 goto search_again;
223 }
224
225
226
227
228
229
230 return intel_has_pending_fb_unpin(dev_priv) ? -EAGAIN : -ENOSPC;
231
232found:
233
234
235
236
237
238
239 list_for_each_entry_safe(vma, next, &eviction_list, evict_link) {
240 if (drm_mm_scan_remove_block(&scan, &vma->node))
241 __i915_vma_pin(vma);
242 else
243 list_del(&vma->evict_link);
244 }
245
246
247 ret = 0;
248 list_for_each_entry_safe(vma, next, &eviction_list, evict_link) {
249 __i915_vma_unpin(vma);
250 if (ret == 0)
251 ret = i915_vma_unbind(vma);
252 }
253
254 while (ret == 0 && (node = drm_mm_scan_color_evict(&scan))) {
255 vma = container_of(node, struct i915_vma, node);
256 ret = i915_vma_unbind(vma);
257 }
258
259 return ret;
260}
261
262
263
264
265
266
267
268
269
270
271
272
273int i915_gem_evict_for_node(struct i915_address_space *vm,
274 struct drm_mm_node *target,
275 unsigned int flags)
276{
277 LIST_HEAD(eviction_list);
278 struct drm_mm_node *node;
279 u64 start = target->start;
280 u64 end = start + target->size;
281 struct i915_vma *vma, *next;
282 bool check_color;
283 int ret = 0;
284
285 lockdep_assert_held(&vm->i915->drm.struct_mutex);
286 GEM_BUG_ON(!IS_ALIGNED(start, I915_GTT_PAGE_SIZE));
287 GEM_BUG_ON(!IS_ALIGNED(end, I915_GTT_PAGE_SIZE));
288
289 trace_i915_gem_evict_node(vm, target, flags);
290
291
292
293
294
295
296 if (!(flags & PIN_NONBLOCK))
297 i915_retire_requests(vm->i915);
298
299 check_color = vm->mm.color_adjust;
300 if (check_color) {
301
302 if (start)
303 start -= I915_GTT_PAGE_SIZE;
304
305
306 end += I915_GTT_PAGE_SIZE;
307 }
308 GEM_BUG_ON(start >= end);
309
310 drm_mm_for_each_node_in_range(node, &vm->mm, start, end) {
311
312 if (node->color == I915_COLOR_UNEVICTABLE) {
313 ret = -ENOSPC;
314 break;
315 }
316
317 GEM_BUG_ON(!node->allocated);
318 vma = container_of(node, typeof(*vma), node);
319
320
321
322
323
324
325
326 if (check_color) {
327 if (node->start + node->size == target->start) {
328 if (node->color == target->color)
329 continue;
330 }
331 if (node->start == target->start + target->size) {
332 if (node->color == target->color)
333 continue;
334 }
335 }
336
337 if (flags & PIN_NONBLOCK &&
338 (i915_vma_is_pinned(vma) || i915_vma_is_active(vma))) {
339 ret = -ENOSPC;
340 break;
341 }
342
343 if (flags & PIN_NONFAULT && i915_vma_has_userfault(vma)) {
344 ret = -ENOSPC;
345 break;
346 }
347
348
349 if (i915_vma_is_pinned(vma)) {
350 ret = -ENOSPC;
351 if (vma->exec_flags &&
352 *vma->exec_flags & EXEC_OBJECT_PINNED)
353 ret = -EINVAL;
354 break;
355 }
356
357
358
359
360
361
362
363
364
365 __i915_vma_pin(vma);
366 list_add(&vma->evict_link, &eviction_list);
367 }
368
369 list_for_each_entry_safe(vma, next, &eviction_list, evict_link) {
370 __i915_vma_unpin(vma);
371 if (ret == 0)
372 ret = i915_vma_unbind(vma);
373 }
374
375 return ret;
376}
377
378
379
380
381
382
383
384
385
386
387
388
389
390int i915_gem_evict_vm(struct i915_address_space *vm)
391{
392 struct list_head *phases[] = {
393 &vm->inactive_list,
394 &vm->active_list,
395 NULL
396 }, **phase;
397 struct list_head eviction_list;
398 struct i915_vma *vma, *next;
399 int ret;
400
401 lockdep_assert_held(&vm->i915->drm.struct_mutex);
402 trace_i915_gem_evict_vm(vm);
403
404
405
406
407
408
409 if (i915_is_ggtt(vm)) {
410 ret = ggtt_flush(vm->i915);
411 if (ret)
412 return ret;
413 }
414
415 INIT_LIST_HEAD(&eviction_list);
416 phase = phases;
417 do {
418 list_for_each_entry(vma, *phase, vm_link) {
419 if (i915_vma_is_pinned(vma))
420 continue;
421
422 __i915_vma_pin(vma);
423 list_add(&vma->evict_link, &eviction_list);
424 }
425 } while (*++phase);
426
427 ret = 0;
428 list_for_each_entry_safe(vma, next, &eviction_list, evict_link) {
429 __i915_vma_unpin(vma);
430 if (ret == 0)
431 ret = i915_vma_unbind(vma);
432 }
433 return ret;
434}
435
436#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
437#include "selftests/i915_gem_evict.c"
438#endif
439