1
2
3
4
5
6
7#include <linux/slab.h>
8
9#include <linux/fault-inject.h>
10#include <linux/log2.h>
11#include <linux/random.h>
12#include <linux/seq_file.h>
13#include <linux/stop_machine.h>
14
15#include <asm/set_memory.h>
16#include <asm/smp.h>
17
18#include "display/intel_frontbuffer.h"
19#include "gt/intel_gt.h"
20#include "gt/intel_gt_requests.h"
21
22#include "i915_drv.h"
23#include "i915_scatterlist.h"
24#include "i915_trace.h"
25#include "i915_vgpu.h"
26
27int i915_gem_gtt_prepare_pages(struct drm_i915_gem_object *obj,
28 struct sg_table *pages)
29{
30 do {
31 if (dma_map_sg_attrs(&obj->base.dev->pdev->dev,
32 pages->sgl, pages->nents,
33 PCI_DMA_BIDIRECTIONAL,
34 DMA_ATTR_SKIP_CPU_SYNC |
35 DMA_ATTR_NO_KERNEL_MAPPING |
36 DMA_ATTR_NO_WARN))
37 return 0;
38
39
40
41
42
43
44
45
46 GEM_BUG_ON(obj->mm.pages == pages);
47 } while (i915_gem_shrink(to_i915(obj->base.dev),
48 obj->base.size >> PAGE_SHIFT, NULL,
49 I915_SHRINK_BOUND |
50 I915_SHRINK_UNBOUND));
51
52 return -ENOSPC;
53}
54
55void i915_gem_gtt_finish_pages(struct drm_i915_gem_object *obj,
56 struct sg_table *pages)
57{
58 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
59 struct device *kdev = &dev_priv->drm.pdev->dev;
60 struct i915_ggtt *ggtt = &dev_priv->ggtt;
61
62 if (unlikely(ggtt->do_idle_maps)) {
63
64 if (intel_gt_retire_requests_timeout(ggtt->vm.gt,
65 -MAX_SCHEDULE_TIMEOUT)) {
66 drm_err(&dev_priv->drm,
67 "Failed to wait for idle; VT'd may hang.\n");
68
69 udelay(10);
70 }
71 }
72
73 dma_unmap_sg(kdev, pages->sgl, pages->nents, PCI_DMA_BIDIRECTIONAL);
74}
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101int i915_gem_gtt_reserve(struct i915_address_space *vm,
102 struct drm_mm_node *node,
103 u64 size, u64 offset, unsigned long color,
104 unsigned int flags)
105{
106 int err;
107
108 GEM_BUG_ON(!size);
109 GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
110 GEM_BUG_ON(!IS_ALIGNED(offset, I915_GTT_MIN_ALIGNMENT));
111 GEM_BUG_ON(range_overflows(offset, size, vm->total));
112 GEM_BUG_ON(vm == &vm->i915->ggtt.alias->vm);
113 GEM_BUG_ON(drm_mm_node_allocated(node));
114
115 node->size = size;
116 node->start = offset;
117 node->color = color;
118
119 err = drm_mm_reserve_node(&vm->mm, node);
120 if (err != -ENOSPC)
121 return err;
122
123 if (flags & PIN_NOEVICT)
124 return -ENOSPC;
125
126 err = i915_gem_evict_for_node(vm, node, flags);
127 if (err == 0)
128 err = drm_mm_reserve_node(&vm->mm, node);
129
130 return err;
131}
132
133static u64 random_offset(u64 start, u64 end, u64 len, u64 align)
134{
135 u64 range, addr;
136
137 GEM_BUG_ON(range_overflows(start, len, end));
138 GEM_BUG_ON(round_up(start, align) > round_down(end - len, align));
139
140 range = round_down(end - len, align) - round_up(start, align);
141 if (range) {
142 if (sizeof(unsigned long) == sizeof(u64)) {
143 addr = get_random_long();
144 } else {
145 addr = get_random_int();
146 if (range > U32_MAX) {
147 addr <<= 32;
148 addr |= get_random_int();
149 }
150 }
151 div64_u64_rem(addr, range, &addr);
152 start += addr;
153 }
154
155 return round_up(start, align);
156}
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192int i915_gem_gtt_insert(struct i915_address_space *vm,
193 struct drm_mm_node *node,
194 u64 size, u64 alignment, unsigned long color,
195 u64 start, u64 end, unsigned int flags)
196{
197 enum drm_mm_insert_mode mode;
198 u64 offset;
199 int err;
200
201 lockdep_assert_held(&vm->mutex);
202
203 GEM_BUG_ON(!size);
204 GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
205 GEM_BUG_ON(alignment && !is_power_of_2(alignment));
206 GEM_BUG_ON(alignment && !IS_ALIGNED(alignment, I915_GTT_MIN_ALIGNMENT));
207 GEM_BUG_ON(start >= end);
208 GEM_BUG_ON(start > 0 && !IS_ALIGNED(start, I915_GTT_PAGE_SIZE));
209 GEM_BUG_ON(end < U64_MAX && !IS_ALIGNED(end, I915_GTT_PAGE_SIZE));
210 GEM_BUG_ON(vm == &vm->i915->ggtt.alias->vm);
211 GEM_BUG_ON(drm_mm_node_allocated(node));
212
213 if (unlikely(range_overflows(start, size, end)))
214 return -ENOSPC;
215
216 if (unlikely(round_up(start, alignment) > round_down(end - size, alignment)))
217 return -ENOSPC;
218
219 mode = DRM_MM_INSERT_BEST;
220 if (flags & PIN_HIGH)
221 mode = DRM_MM_INSERT_HIGHEST;
222 if (flags & PIN_MAPPABLE)
223 mode = DRM_MM_INSERT_LOW;
224
225
226
227
228
229
230
231 BUILD_BUG_ON(I915_GTT_MIN_ALIGNMENT > I915_GTT_PAGE_SIZE);
232 if (alignment <= I915_GTT_MIN_ALIGNMENT)
233 alignment = 0;
234
235 err = drm_mm_insert_node_in_range(&vm->mm, node,
236 size, alignment, color,
237 start, end, mode);
238 if (err != -ENOSPC)
239 return err;
240
241 if (mode & DRM_MM_INSERT_ONCE) {
242 err = drm_mm_insert_node_in_range(&vm->mm, node,
243 size, alignment, color,
244 start, end,
245 DRM_MM_INSERT_BEST);
246 if (err != -ENOSPC)
247 return err;
248 }
249
250 if (flags & PIN_NOEVICT)
251 return -ENOSPC;
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276 offset = random_offset(start, end,
277 size, alignment ?: I915_GTT_MIN_ALIGNMENT);
278 err = i915_gem_gtt_reserve(vm, node, size, offset, color, flags);
279 if (err != -ENOSPC)
280 return err;
281
282 if (flags & PIN_NOSEARCH)
283 return -ENOSPC;
284
285
286 err = i915_gem_evict_something(vm, size, alignment, color,
287 start, end, flags);
288 if (err)
289 return err;
290
291 return drm_mm_insert_node_in_range(&vm->mm, node,
292 size, alignment, color,
293 start, end, DRM_MM_INSERT_EVICT);
294}
295
296#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
297#include "selftests/i915_gem_gtt.c"
298#endif
299