1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37#include "i915_drv.h"
38#include "gt/intel_ggtt_fencing.h"
39#include "gvt.h"
40
41static int alloc_gm(struct intel_vgpu *vgpu, bool high_gm)
42{
43 struct intel_gvt *gvt = vgpu->gvt;
44 struct intel_gt *gt = gvt->gt;
45 unsigned int flags;
46 u64 start, end, size;
47 struct drm_mm_node *node;
48 int ret;
49
50 if (high_gm) {
51 node = &vgpu->gm.high_gm_node;
52 size = vgpu_hidden_sz(vgpu);
53 start = ALIGN(gvt_hidden_gmadr_base(gvt), I915_GTT_PAGE_SIZE);
54 end = ALIGN(gvt_hidden_gmadr_end(gvt), I915_GTT_PAGE_SIZE);
55 flags = PIN_HIGH;
56 } else {
57 node = &vgpu->gm.low_gm_node;
58 size = vgpu_aperture_sz(vgpu);
59 start = ALIGN(gvt_aperture_gmadr_base(gvt), I915_GTT_PAGE_SIZE);
60 end = ALIGN(gvt_aperture_gmadr_end(gvt), I915_GTT_PAGE_SIZE);
61 flags = PIN_MAPPABLE;
62 }
63
64 mutex_lock(>->ggtt->vm.mutex);
65 mmio_hw_access_pre(gt);
66 ret = i915_gem_gtt_insert(>->ggtt->vm, node,
67 size, I915_GTT_PAGE_SIZE,
68 I915_COLOR_UNEVICTABLE,
69 start, end, flags);
70 mmio_hw_access_post(gt);
71 mutex_unlock(>->ggtt->vm.mutex);
72 if (ret)
73 gvt_err("fail to alloc %s gm space from host\n",
74 high_gm ? "high" : "low");
75
76 return ret;
77}
78
79static int alloc_vgpu_gm(struct intel_vgpu *vgpu)
80{
81 struct intel_gvt *gvt = vgpu->gvt;
82 struct intel_gt *gt = gvt->gt;
83 int ret;
84
85 ret = alloc_gm(vgpu, false);
86 if (ret)
87 return ret;
88
89 ret = alloc_gm(vgpu, true);
90 if (ret)
91 goto out_free_aperture;
92
93 gvt_dbg_core("vgpu%d: alloc low GM start %llx size %llx\n", vgpu->id,
94 vgpu_aperture_offset(vgpu), vgpu_aperture_sz(vgpu));
95
96 gvt_dbg_core("vgpu%d: alloc high GM start %llx size %llx\n", vgpu->id,
97 vgpu_hidden_offset(vgpu), vgpu_hidden_sz(vgpu));
98
99 return 0;
100out_free_aperture:
101 mutex_lock(>->ggtt->vm.mutex);
102 drm_mm_remove_node(&vgpu->gm.low_gm_node);
103 mutex_unlock(>->ggtt->vm.mutex);
104 return ret;
105}
106
107static void free_vgpu_gm(struct intel_vgpu *vgpu)
108{
109 struct intel_gvt *gvt = vgpu->gvt;
110 struct intel_gt *gt = gvt->gt;
111
112 mutex_lock(>->ggtt->vm.mutex);
113 drm_mm_remove_node(&vgpu->gm.low_gm_node);
114 drm_mm_remove_node(&vgpu->gm.high_gm_node);
115 mutex_unlock(>->ggtt->vm.mutex);
116}
117
118
119
120
121
122
123
124
125
126
127
128void intel_vgpu_write_fence(struct intel_vgpu *vgpu,
129 u32 fence, u64 value)
130{
131 struct intel_gvt *gvt = vgpu->gvt;
132 struct drm_i915_private *i915 = gvt->gt->i915;
133 struct intel_uncore *uncore = gvt->gt->uncore;
134 struct i915_fence_reg *reg;
135 i915_reg_t fence_reg_lo, fence_reg_hi;
136
137 assert_rpm_wakelock_held(uncore->rpm);
138
139 if (drm_WARN_ON(&i915->drm, fence >= vgpu_fence_sz(vgpu)))
140 return;
141
142 reg = vgpu->fence.regs[fence];
143 if (drm_WARN_ON(&i915->drm, !reg))
144 return;
145
146 fence_reg_lo = FENCE_REG_GEN6_LO(reg->id);
147 fence_reg_hi = FENCE_REG_GEN6_HI(reg->id);
148
149 intel_uncore_write(uncore, fence_reg_lo, 0);
150 intel_uncore_posting_read(uncore, fence_reg_lo);
151
152 intel_uncore_write(uncore, fence_reg_hi, upper_32_bits(value));
153 intel_uncore_write(uncore, fence_reg_lo, lower_32_bits(value));
154 intel_uncore_posting_read(uncore, fence_reg_lo);
155}
156
157static void _clear_vgpu_fence(struct intel_vgpu *vgpu)
158{
159 int i;
160
161 for (i = 0; i < vgpu_fence_sz(vgpu); i++)
162 intel_vgpu_write_fence(vgpu, i, 0);
163}
164
165static void free_vgpu_fence(struct intel_vgpu *vgpu)
166{
167 struct intel_gvt *gvt = vgpu->gvt;
168 struct intel_uncore *uncore = gvt->gt->uncore;
169 struct i915_fence_reg *reg;
170 intel_wakeref_t wakeref;
171 u32 i;
172
173 if (drm_WARN_ON(&gvt->gt->i915->drm, !vgpu_fence_sz(vgpu)))
174 return;
175
176 wakeref = intel_runtime_pm_get(uncore->rpm);
177
178 mutex_lock(&gvt->gt->ggtt->vm.mutex);
179 _clear_vgpu_fence(vgpu);
180 for (i = 0; i < vgpu_fence_sz(vgpu); i++) {
181 reg = vgpu->fence.regs[i];
182 i915_unreserve_fence(reg);
183 vgpu->fence.regs[i] = NULL;
184 }
185 mutex_unlock(&gvt->gt->ggtt->vm.mutex);
186
187 intel_runtime_pm_put(uncore->rpm, wakeref);
188}
189
190static int alloc_vgpu_fence(struct intel_vgpu *vgpu)
191{
192 struct intel_gvt *gvt = vgpu->gvt;
193 struct intel_uncore *uncore = gvt->gt->uncore;
194 struct i915_fence_reg *reg;
195 intel_wakeref_t wakeref;
196 int i;
197
198 wakeref = intel_runtime_pm_get(uncore->rpm);
199
200
201 mutex_lock(&gvt->gt->ggtt->vm.mutex);
202
203 for (i = 0; i < vgpu_fence_sz(vgpu); i++) {
204 reg = i915_reserve_fence(gvt->gt->ggtt);
205 if (IS_ERR(reg))
206 goto out_free_fence;
207
208 vgpu->fence.regs[i] = reg;
209 }
210
211 _clear_vgpu_fence(vgpu);
212
213 mutex_unlock(&gvt->gt->ggtt->vm.mutex);
214 intel_runtime_pm_put(uncore->rpm, wakeref);
215 return 0;
216
217out_free_fence:
218 gvt_vgpu_err("Failed to alloc fences\n");
219
220 for (i = 0; i < vgpu_fence_sz(vgpu); i++) {
221 reg = vgpu->fence.regs[i];
222 if (!reg)
223 continue;
224 i915_unreserve_fence(reg);
225 vgpu->fence.regs[i] = NULL;
226 }
227 mutex_unlock(&gvt->gt->ggtt->vm.mutex);
228 intel_runtime_pm_put_unchecked(uncore->rpm);
229 return -ENOSPC;
230}
231
232static void free_resource(struct intel_vgpu *vgpu)
233{
234 struct intel_gvt *gvt = vgpu->gvt;
235
236 gvt->gm.vgpu_allocated_low_gm_size -= vgpu_aperture_sz(vgpu);
237 gvt->gm.vgpu_allocated_high_gm_size -= vgpu_hidden_sz(vgpu);
238 gvt->fence.vgpu_allocated_fence_num -= vgpu_fence_sz(vgpu);
239}
240
241static int alloc_resource(struct intel_vgpu *vgpu,
242 struct intel_vgpu_creation_params *param)
243{
244 struct intel_gvt *gvt = vgpu->gvt;
245 unsigned long request, avail, max, taken;
246 const char *item;
247
248 if (!param->low_gm_sz || !param->high_gm_sz || !param->fence_sz) {
249 gvt_vgpu_err("Invalid vGPU creation params\n");
250 return -EINVAL;
251 }
252
253 item = "low GM space";
254 max = gvt_aperture_sz(gvt) - HOST_LOW_GM_SIZE;
255 taken = gvt->gm.vgpu_allocated_low_gm_size;
256 avail = max - taken;
257 request = MB_TO_BYTES(param->low_gm_sz);
258
259 if (request > avail)
260 goto no_enough_resource;
261
262 vgpu_aperture_sz(vgpu) = ALIGN(request, I915_GTT_PAGE_SIZE);
263
264 item = "high GM space";
265 max = gvt_hidden_sz(gvt) - HOST_HIGH_GM_SIZE;
266 taken = gvt->gm.vgpu_allocated_high_gm_size;
267 avail = max - taken;
268 request = MB_TO_BYTES(param->high_gm_sz);
269
270 if (request > avail)
271 goto no_enough_resource;
272
273 vgpu_hidden_sz(vgpu) = ALIGN(request, I915_GTT_PAGE_SIZE);
274
275 item = "fence";
276 max = gvt_fence_sz(gvt) - HOST_FENCE;
277 taken = gvt->fence.vgpu_allocated_fence_num;
278 avail = max - taken;
279 request = param->fence_sz;
280
281 if (request > avail)
282 goto no_enough_resource;
283
284 vgpu_fence_sz(vgpu) = request;
285
286 gvt->gm.vgpu_allocated_low_gm_size += MB_TO_BYTES(param->low_gm_sz);
287 gvt->gm.vgpu_allocated_high_gm_size += MB_TO_BYTES(param->high_gm_sz);
288 gvt->fence.vgpu_allocated_fence_num += param->fence_sz;
289 return 0;
290
291no_enough_resource:
292 gvt_err("fail to allocate resource %s\n", item);
293 gvt_err("request %luMB avail %luMB max %luMB taken %luMB\n",
294 BYTES_TO_MB(request), BYTES_TO_MB(avail),
295 BYTES_TO_MB(max), BYTES_TO_MB(taken));
296 return -ENOSPC;
297}
298
299
300
301
302
303
304
305
306void intel_vgpu_free_resource(struct intel_vgpu *vgpu)
307{
308 free_vgpu_gm(vgpu);
309 free_vgpu_fence(vgpu);
310 free_resource(vgpu);
311}
312
313
314
315
316
317
318
319
320void intel_vgpu_reset_resource(struct intel_vgpu *vgpu)
321{
322 struct intel_gvt *gvt = vgpu->gvt;
323 intel_wakeref_t wakeref;
324
325 with_intel_runtime_pm(gvt->gt->uncore->rpm, wakeref)
326 _clear_vgpu_fence(vgpu);
327}
328
329
330
331
332
333
334
335
336
337
338
339
340
341int intel_vgpu_alloc_resource(struct intel_vgpu *vgpu,
342 struct intel_vgpu_creation_params *param)
343{
344 int ret;
345
346 ret = alloc_resource(vgpu, param);
347 if (ret)
348 return ret;
349
350 ret = alloc_vgpu_gm(vgpu);
351 if (ret)
352 goto out_free_resource;
353
354 ret = alloc_vgpu_fence(vgpu);
355 if (ret)
356 goto out_free_vgpu_gm;
357
358 return 0;
359
360out_free_vgpu_gm:
361 free_vgpu_gm(vgpu);
362out_free_resource:
363 free_resource(vgpu);
364 return ret;
365}
366