1
2
3
4
5
6
7#include <linux/slab.h>
8
9#include <linux/fault-inject.h>
10#include <linux/log2.h>
11#include <linux/random.h>
12#include <linux/seq_file.h>
13#include <linux/stop_machine.h>
14
15#include <asm/set_memory.h>
16#include <asm/smp.h>
17
18#include "display/intel_frontbuffer.h"
19#include "gt/intel_gt.h"
20#include "gt/intel_gt_requests.h"
21
22#include "i915_drv.h"
23#include "i915_scatterlist.h"
24#include "i915_trace.h"
25#include "i915_vgpu.h"
26
27int i915_gem_gtt_prepare_pages(struct drm_i915_gem_object *obj,
28 struct sg_table *pages)
29{
30 do {
31 if (dma_map_sg_attrs(obj->base.dev->dev,
32 pages->sgl, pages->nents,
33 PCI_DMA_BIDIRECTIONAL,
34 DMA_ATTR_SKIP_CPU_SYNC |
35 DMA_ATTR_NO_KERNEL_MAPPING |
36 DMA_ATTR_NO_WARN))
37 return 0;
38
39
40
41
42
43
44
45
46 GEM_BUG_ON(obj->mm.pages == pages);
47 } while (i915_gem_shrink(NULL, to_i915(obj->base.dev),
48 obj->base.size >> PAGE_SHIFT, NULL,
49 I915_SHRINK_BOUND |
50 I915_SHRINK_UNBOUND));
51
52 return -ENOSPC;
53}
54
55void i915_gem_gtt_finish_pages(struct drm_i915_gem_object *obj,
56 struct sg_table *pages)
57{
58 struct drm_i915_private *i915 = to_i915(obj->base.dev);
59 struct i915_ggtt *ggtt = &i915->ggtt;
60
61
62 if (unlikely(ggtt->do_idle_maps))
63
64 usleep_range(100, 250);
65
66 dma_unmap_sg(i915->drm.dev, pages->sgl, pages->nents,
67 PCI_DMA_BIDIRECTIONAL);
68}
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95int i915_gem_gtt_reserve(struct i915_address_space *vm,
96 struct drm_mm_node *node,
97 u64 size, u64 offset, unsigned long color,
98 unsigned int flags)
99{
100 int err;
101
102 GEM_BUG_ON(!size);
103 GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
104 GEM_BUG_ON(!IS_ALIGNED(offset, I915_GTT_MIN_ALIGNMENT));
105 GEM_BUG_ON(range_overflows(offset, size, vm->total));
106 GEM_BUG_ON(vm == &vm->i915->ggtt.alias->vm);
107 GEM_BUG_ON(drm_mm_node_allocated(node));
108
109 node->size = size;
110 node->start = offset;
111 node->color = color;
112
113 err = drm_mm_reserve_node(&vm->mm, node);
114 if (err != -ENOSPC)
115 return err;
116
117 if (flags & PIN_NOEVICT)
118 return -ENOSPC;
119
120 err = i915_gem_evict_for_node(vm, node, flags);
121 if (err == 0)
122 err = drm_mm_reserve_node(&vm->mm, node);
123
124 return err;
125}
126
127static u64 random_offset(u64 start, u64 end, u64 len, u64 align)
128{
129 u64 range, addr;
130
131 GEM_BUG_ON(range_overflows(start, len, end));
132 GEM_BUG_ON(round_up(start, align) > round_down(end - len, align));
133
134 range = round_down(end - len, align) - round_up(start, align);
135 if (range) {
136 if (sizeof(unsigned long) == sizeof(u64)) {
137 addr = get_random_long();
138 } else {
139 addr = get_random_int();
140 if (range > U32_MAX) {
141 addr <<= 32;
142 addr |= get_random_int();
143 }
144 }
145 div64_u64_rem(addr, range, &addr);
146 start += addr;
147 }
148
149 return round_up(start, align);
150}
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186int i915_gem_gtt_insert(struct i915_address_space *vm,
187 struct drm_mm_node *node,
188 u64 size, u64 alignment, unsigned long color,
189 u64 start, u64 end, unsigned int flags)
190{
191 enum drm_mm_insert_mode mode;
192 u64 offset;
193 int err;
194
195 lockdep_assert_held(&vm->mutex);
196
197 GEM_BUG_ON(!size);
198 GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
199 GEM_BUG_ON(alignment && !is_power_of_2(alignment));
200 GEM_BUG_ON(alignment && !IS_ALIGNED(alignment, I915_GTT_MIN_ALIGNMENT));
201 GEM_BUG_ON(start >= end);
202 GEM_BUG_ON(start > 0 && !IS_ALIGNED(start, I915_GTT_PAGE_SIZE));
203 GEM_BUG_ON(end < U64_MAX && !IS_ALIGNED(end, I915_GTT_PAGE_SIZE));
204 GEM_BUG_ON(vm == &vm->i915->ggtt.alias->vm);
205 GEM_BUG_ON(drm_mm_node_allocated(node));
206
207 if (unlikely(range_overflows(start, size, end)))
208 return -ENOSPC;
209
210 if (unlikely(round_up(start, alignment) > round_down(end - size, alignment)))
211 return -ENOSPC;
212
213 mode = DRM_MM_INSERT_BEST;
214 if (flags & PIN_HIGH)
215 mode = DRM_MM_INSERT_HIGHEST;
216 if (flags & PIN_MAPPABLE)
217 mode = DRM_MM_INSERT_LOW;
218
219
220
221
222
223
224
225 BUILD_BUG_ON(I915_GTT_MIN_ALIGNMENT > I915_GTT_PAGE_SIZE);
226 if (alignment <= I915_GTT_MIN_ALIGNMENT)
227 alignment = 0;
228
229 err = drm_mm_insert_node_in_range(&vm->mm, node,
230 size, alignment, color,
231 start, end, mode);
232 if (err != -ENOSPC)
233 return err;
234
235 if (mode & DRM_MM_INSERT_ONCE) {
236 err = drm_mm_insert_node_in_range(&vm->mm, node,
237 size, alignment, color,
238 start, end,
239 DRM_MM_INSERT_BEST);
240 if (err != -ENOSPC)
241 return err;
242 }
243
244 if (flags & PIN_NOEVICT)
245 return -ENOSPC;
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270 offset = random_offset(start, end,
271 size, alignment ?: I915_GTT_MIN_ALIGNMENT);
272 err = i915_gem_gtt_reserve(vm, node, size, offset, color, flags);
273 if (err != -ENOSPC)
274 return err;
275
276 if (flags & PIN_NOSEARCH)
277 return -ENOSPC;
278
279
280 err = i915_gem_evict_something(vm, size, alignment, color,
281 start, end, flags);
282 if (err)
283 return err;
284
285 return drm_mm_insert_node_in_range(&vm->mm, node,
286 size, alignment, color,
287 start, end, DRM_MM_INSERT_EVICT);
288}
289
290#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
291#include "selftests/i915_gem_gtt.c"
292#endif
293