1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29#include <drm/drmP.h>
30#include <drm/i915_drm.h>
31
32#include "i915_drv.h"
33#include "intel_drv.h"
34#include "i915_trace.h"
35
36I915_SELFTEST_DECLARE(static struct igt_evict_ctl {
37 bool fail_if_busy:1;
38} igt_evict_ctl;)
39
40static bool ggtt_is_idle(struct drm_i915_private *i915)
41{
42 struct intel_engine_cs *engine;
43 enum intel_engine_id id;
44
45 if (i915->gt.active_requests)
46 return false;
47
48 for_each_engine(engine, i915, id) {
49 if (!intel_engine_has_kernel_context(engine))
50 return false;
51 }
52
53 return true;
54}
55
56static int ggtt_flush(struct drm_i915_private *i915)
57{
58 int err;
59
60
61
62
63
64
65
66 err = i915_gem_switch_to_kernel_context(i915);
67 if (err)
68 return err;
69
70 err = i915_gem_wait_for_idle(i915,
71 I915_WAIT_INTERRUPTIBLE |
72 I915_WAIT_LOCKED);
73 if (err)
74 return err;
75
76 GEM_BUG_ON(!ggtt_is_idle(i915));
77 return 0;
78}
79
80static bool
81mark_free(struct drm_mm_scan *scan,
82 struct i915_vma *vma,
83 unsigned int flags,
84 struct list_head *unwind)
85{
86 if (i915_vma_is_pinned(vma))
87 return false;
88
89 if (flags & PIN_NONFAULT && i915_vma_has_userfault(vma))
90 return false;
91
92 list_add(&vma->evict_link, unwind);
93 return drm_mm_scan_add_block(scan, &vma->node);
94}
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119int
120i915_gem_evict_something(struct i915_address_space *vm,
121 u64 min_size, u64 alignment,
122 unsigned cache_level,
123 u64 start, u64 end,
124 unsigned flags)
125{
126 struct drm_i915_private *dev_priv = vm->i915;
127 struct drm_mm_scan scan;
128 struct list_head eviction_list;
129 struct list_head *phases[] = {
130 &vm->inactive_list,
131 &vm->active_list,
132 NULL,
133 }, **phase;
134 struct i915_vma *vma, *next;
135 struct drm_mm_node *node;
136 enum drm_mm_insert_mode mode;
137 int ret;
138
139 lockdep_assert_held(&vm->i915->drm.struct_mutex);
140 trace_i915_gem_evict(vm, min_size, alignment, flags);
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155 mode = DRM_MM_INSERT_BEST;
156 if (flags & PIN_HIGH)
157 mode = DRM_MM_INSERT_HIGH;
158 if (flags & PIN_MAPPABLE)
159 mode = DRM_MM_INSERT_LOW;
160 drm_mm_scan_init_with_range(&scan, &vm->mm,
161 min_size, alignment, cache_level,
162 start, end, mode);
163
164
165
166
167
168
169
170 if (!(flags & PIN_NONBLOCK))
171 i915_retire_requests(dev_priv);
172 else
173 phases[1] = NULL;
174
175search_again:
176 INIT_LIST_HEAD(&eviction_list);
177 phase = phases;
178 do {
179 list_for_each_entry(vma, *phase, vm_link)
180 if (mark_free(&scan, vma, flags, &eviction_list))
181 goto found;
182 } while (*++phase);
183
184
185 list_for_each_entry_safe(vma, next, &eviction_list, evict_link) {
186 ret = drm_mm_scan_remove_block(&scan, &vma->node);
187 BUG_ON(ret);
188 }
189
190
191
192
193
194
195
196 if (!i915_is_ggtt(vm) || flags & PIN_NONBLOCK)
197 return -ENOSPC;
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212 if (!ggtt_is_idle(dev_priv)) {
213 if (I915_SELFTEST_ONLY(igt_evict_ctl.fail_if_busy))
214 return -EBUSY;
215
216 ret = ggtt_flush(dev_priv);
217 if (ret)
218 return ret;
219
220 cond_resched();
221 goto search_again;
222 }
223
224
225
226
227
228
229 return intel_has_pending_fb_unpin(dev_priv) ? -EAGAIN : -ENOSPC;
230
231found:
232
233
234
235
236
237
238 list_for_each_entry_safe(vma, next, &eviction_list, evict_link) {
239 if (drm_mm_scan_remove_block(&scan, &vma->node))
240 __i915_vma_pin(vma);
241 else
242 list_del(&vma->evict_link);
243 }
244
245
246 ret = 0;
247 list_for_each_entry_safe(vma, next, &eviction_list, evict_link) {
248 __i915_vma_unpin(vma);
249 if (ret == 0)
250 ret = i915_vma_unbind(vma);
251 }
252
253 while (ret == 0 && (node = drm_mm_scan_color_evict(&scan))) {
254 vma = container_of(node, struct i915_vma, node);
255 ret = i915_vma_unbind(vma);
256 }
257
258 return ret;
259}
260
261
262
263
264
265
266
267
268
269
270
271
272int i915_gem_evict_for_node(struct i915_address_space *vm,
273 struct drm_mm_node *target,
274 unsigned int flags)
275{
276 LIST_HEAD(eviction_list);
277 struct drm_mm_node *node;
278 u64 start = target->start;
279 u64 end = start + target->size;
280 struct i915_vma *vma, *next;
281 bool check_color;
282 int ret = 0;
283
284 lockdep_assert_held(&vm->i915->drm.struct_mutex);
285 GEM_BUG_ON(!IS_ALIGNED(start, I915_GTT_PAGE_SIZE));
286 GEM_BUG_ON(!IS_ALIGNED(end, I915_GTT_PAGE_SIZE));
287
288 trace_i915_gem_evict_node(vm, target, flags);
289
290
291
292
293
294
295 if (!(flags & PIN_NONBLOCK))
296 i915_retire_requests(vm->i915);
297
298 check_color = vm->mm.color_adjust;
299 if (check_color) {
300
301 if (start)
302 start -= I915_GTT_PAGE_SIZE;
303
304
305 end += I915_GTT_PAGE_SIZE;
306 }
307 GEM_BUG_ON(start >= end);
308
309 drm_mm_for_each_node_in_range(node, &vm->mm, start, end) {
310
311 if (node->color == I915_COLOR_UNEVICTABLE) {
312 ret = -ENOSPC;
313 break;
314 }
315
316 GEM_BUG_ON(!node->allocated);
317 vma = container_of(node, typeof(*vma), node);
318
319
320
321
322
323
324
325 if (check_color) {
326 if (node->start + node->size == target->start) {
327 if (node->color == target->color)
328 continue;
329 }
330 if (node->start == target->start + target->size) {
331 if (node->color == target->color)
332 continue;
333 }
334 }
335
336 if (flags & PIN_NONBLOCK &&
337 (i915_vma_is_pinned(vma) || i915_vma_is_active(vma))) {
338 ret = -ENOSPC;
339 break;
340 }
341
342 if (flags & PIN_NONFAULT && i915_vma_has_userfault(vma)) {
343 ret = -ENOSPC;
344 break;
345 }
346
347
348 if (i915_vma_is_pinned(vma)) {
349 ret = -ENOSPC;
350 if (vma->exec_flags &&
351 *vma->exec_flags & EXEC_OBJECT_PINNED)
352 ret = -EINVAL;
353 break;
354 }
355
356
357
358
359
360
361
362
363
364 __i915_vma_pin(vma);
365 list_add(&vma->evict_link, &eviction_list);
366 }
367
368 list_for_each_entry_safe(vma, next, &eviction_list, evict_link) {
369 __i915_vma_unpin(vma);
370 if (ret == 0)
371 ret = i915_vma_unbind(vma);
372 }
373
374 return ret;
375}
376
377
378
379
380
381
382
383
384
385
386
387
388
389int i915_gem_evict_vm(struct i915_address_space *vm)
390{
391 struct list_head *phases[] = {
392 &vm->inactive_list,
393 &vm->active_list,
394 NULL
395 }, **phase;
396 struct list_head eviction_list;
397 struct i915_vma *vma, *next;
398 int ret;
399
400 lockdep_assert_held(&vm->i915->drm.struct_mutex);
401 trace_i915_gem_evict_vm(vm);
402
403
404
405
406
407
408 if (i915_is_ggtt(vm)) {
409 ret = ggtt_flush(vm->i915);
410 if (ret)
411 return ret;
412 }
413
414 INIT_LIST_HEAD(&eviction_list);
415 phase = phases;
416 do {
417 list_for_each_entry(vma, *phase, vm_link) {
418 if (i915_vma_is_pinned(vma))
419 continue;
420
421 __i915_vma_pin(vma);
422 list_add(&vma->evict_link, &eviction_list);
423 }
424 } while (*++phase);
425
426 ret = 0;
427 list_for_each_entry_safe(vma, next, &eviction_list, evict_link) {
428 __i915_vma_unpin(vma);
429 if (ret == 0)
430 ret = i915_vma_unbind(vma);
431 }
432 return ret;
433}
434
435#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
436#include "selftests/i915_gem_evict.c"
437#endif
438