1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31#include <drm/drmP.h>
32#include "amdgpu.h"
33#include "amdgpu_trace.h"
34
35#define AMDGPU_BO_LIST_MAX_PRIORITY 32u
36#define AMDGPU_BO_LIST_NUM_BUCKETS (AMDGPU_BO_LIST_MAX_PRIORITY + 1)
37
38static int amdgpu_bo_list_set(struct amdgpu_device *adev,
39 struct drm_file *filp,
40 struct amdgpu_bo_list *list,
41 struct drm_amdgpu_bo_list_entry *info,
42 unsigned num_entries);
43
44static void amdgpu_bo_list_release_rcu(struct kref *ref)
45{
46 unsigned i;
47 struct amdgpu_bo_list *list = container_of(ref, struct amdgpu_bo_list,
48 refcount);
49
50 for (i = 0; i < list->num_entries; ++i)
51 amdgpu_bo_unref(&list->array[i].robj);
52
53 mutex_destroy(&list->lock);
54 kvfree(list->array);
55 kfree_rcu(list, rhead);
56}
57
58static int amdgpu_bo_list_create(struct amdgpu_device *adev,
59 struct drm_file *filp,
60 struct drm_amdgpu_bo_list_entry *info,
61 unsigned num_entries,
62 int *id)
63{
64 int r;
65 struct amdgpu_fpriv *fpriv = filp->driver_priv;
66 struct amdgpu_bo_list *list;
67
68 list = kzalloc(sizeof(struct amdgpu_bo_list), GFP_KERNEL);
69 if (!list)
70 return -ENOMEM;
71
72
73 mutex_init(&list->lock);
74 kref_init(&list->refcount);
75 r = amdgpu_bo_list_set(adev, filp, list, info, num_entries);
76 if (r) {
77 kfree(list);
78 return r;
79 }
80
81
82 mutex_lock(&fpriv->bo_list_lock);
83 r = idr_alloc(&fpriv->bo_list_handles, list, 1, 0, GFP_KERNEL);
84 mutex_unlock(&fpriv->bo_list_lock);
85 if (r < 0) {
86 amdgpu_bo_list_free(list);
87 return r;
88 }
89 *id = r;
90
91 return 0;
92}
93
94static void amdgpu_bo_list_destroy(struct amdgpu_fpriv *fpriv, int id)
95{
96 struct amdgpu_bo_list *list;
97
98 mutex_lock(&fpriv->bo_list_lock);
99 list = idr_remove(&fpriv->bo_list_handles, id);
100 mutex_unlock(&fpriv->bo_list_lock);
101 if (list)
102 kref_put(&list->refcount, amdgpu_bo_list_release_rcu);
103}
104
105static int amdgpu_bo_list_set(struct amdgpu_device *adev,
106 struct drm_file *filp,
107 struct amdgpu_bo_list *list,
108 struct drm_amdgpu_bo_list_entry *info,
109 unsigned num_entries)
110{
111 struct amdgpu_bo_list_entry *array;
112 struct amdgpu_bo *gds_obj = adev->gds.gds_gfx_bo;
113 struct amdgpu_bo *gws_obj = adev->gds.gws_gfx_bo;
114 struct amdgpu_bo *oa_obj = adev->gds.oa_gfx_bo;
115
116 unsigned last_entry = 0, first_userptr = num_entries;
117 unsigned i;
118 int r;
119 unsigned long total_size = 0;
120
121 array = kvmalloc_array(num_entries, sizeof(struct amdgpu_bo_list_entry), GFP_KERNEL);
122 if (!array)
123 return -ENOMEM;
124 memset(array, 0, num_entries * sizeof(struct amdgpu_bo_list_entry));
125
126 for (i = 0; i < num_entries; ++i) {
127 struct amdgpu_bo_list_entry *entry;
128 struct drm_gem_object *gobj;
129 struct amdgpu_bo *bo;
130 struct mm_struct *usermm;
131
132 gobj = drm_gem_object_lookup(filp, info[i].bo_handle);
133 if (!gobj) {
134 r = -ENOENT;
135 goto error_free;
136 }
137
138 bo = amdgpu_bo_ref(gem_to_amdgpu_bo(gobj));
139 drm_gem_object_put_unlocked(gobj);
140
141 usermm = amdgpu_ttm_tt_get_usermm(bo->tbo.ttm);
142 if (usermm) {
143 if (usermm != current->mm) {
144 amdgpu_bo_unref(&bo);
145 r = -EPERM;
146 goto error_free;
147 }
148 entry = &array[--first_userptr];
149 } else {
150 entry = &array[last_entry++];
151 }
152
153 entry->robj = bo;
154 entry->priority = min(info[i].bo_priority,
155 AMDGPU_BO_LIST_MAX_PRIORITY);
156 entry->tv.bo = &entry->robj->tbo;
157 entry->tv.shared = !entry->robj->prime_shared_count;
158
159 if (entry->robj->preferred_domains == AMDGPU_GEM_DOMAIN_GDS)
160 gds_obj = entry->robj;
161 if (entry->robj->preferred_domains == AMDGPU_GEM_DOMAIN_GWS)
162 gws_obj = entry->robj;
163 if (entry->robj->preferred_domains == AMDGPU_GEM_DOMAIN_OA)
164 oa_obj = entry->robj;
165
166 total_size += amdgpu_bo_size(entry->robj);
167 trace_amdgpu_bo_list_set(list, entry->robj);
168 }
169
170 for (i = 0; i < list->num_entries; ++i)
171 amdgpu_bo_unref(&list->array[i].robj);
172
173 kvfree(list->array);
174
175 list->gds_obj = gds_obj;
176 list->gws_obj = gws_obj;
177 list->oa_obj = oa_obj;
178 list->first_userptr = first_userptr;
179 list->array = array;
180 list->num_entries = num_entries;
181
182 trace_amdgpu_cs_bo_status(list->num_entries, total_size);
183 return 0;
184
185error_free:
186 while (i--)
187 amdgpu_bo_unref(&array[i].robj);
188 kvfree(array);
189 return r;
190}
191
192struct amdgpu_bo_list *
193amdgpu_bo_list_get(struct amdgpu_fpriv *fpriv, int id)
194{
195 struct amdgpu_bo_list *result;
196
197 rcu_read_lock();
198 result = idr_find(&fpriv->bo_list_handles, id);
199
200 if (result) {
201 if (kref_get_unless_zero(&result->refcount)) {
202 rcu_read_unlock();
203 mutex_lock(&result->lock);
204 } else {
205 rcu_read_unlock();
206 result = NULL;
207 }
208 } else {
209 rcu_read_unlock();
210 }
211
212 return result;
213}
214
215void amdgpu_bo_list_get_list(struct amdgpu_bo_list *list,
216 struct list_head *validated)
217{
218
219
220
221
222 struct list_head bucket[AMDGPU_BO_LIST_NUM_BUCKETS];
223 unsigned i;
224
225 for (i = 0; i < AMDGPU_BO_LIST_NUM_BUCKETS; i++)
226 INIT_LIST_HEAD(&bucket[i]);
227
228
229
230
231
232
233 for (i = 0; i < list->num_entries; i++) {
234 unsigned priority = list->array[i].priority;
235
236 if (!list->array[i].robj->parent)
237 list_add_tail(&list->array[i].tv.head,
238 &bucket[priority]);
239
240 list->array[i].user_pages = NULL;
241 }
242
243
244 for (i = 0; i < AMDGPU_BO_LIST_NUM_BUCKETS; i++)
245 list_splice(&bucket[i], validated);
246}
247
248void amdgpu_bo_list_put(struct amdgpu_bo_list *list)
249{
250 mutex_unlock(&list->lock);
251 kref_put(&list->refcount, amdgpu_bo_list_release_rcu);
252}
253
254void amdgpu_bo_list_free(struct amdgpu_bo_list *list)
255{
256 unsigned i;
257
258 for (i = 0; i < list->num_entries; ++i)
259 amdgpu_bo_unref(&list->array[i].robj);
260
261 mutex_destroy(&list->lock);
262 kvfree(list->array);
263 kfree(list);
264}
265
266int amdgpu_bo_list_ioctl(struct drm_device *dev, void *data,
267 struct drm_file *filp)
268{
269 const uint32_t info_size = sizeof(struct drm_amdgpu_bo_list_entry);
270
271 struct amdgpu_device *adev = dev->dev_private;
272 struct amdgpu_fpriv *fpriv = filp->driver_priv;
273 union drm_amdgpu_bo_list *args = data;
274 uint32_t handle = args->in.list_handle;
275 const void __user *uptr = u64_to_user_ptr(args->in.bo_info_ptr);
276
277 struct drm_amdgpu_bo_list_entry *info;
278 struct amdgpu_bo_list *list;
279
280 int r;
281
282 info = kvmalloc_array(args->in.bo_number,
283 sizeof(struct drm_amdgpu_bo_list_entry), GFP_KERNEL);
284 if (!info)
285 return -ENOMEM;
286
287
288 r = -EFAULT;
289 if (likely(info_size == args->in.bo_info_size)) {
290 unsigned long bytes = args->in.bo_number *
291 args->in.bo_info_size;
292
293 if (copy_from_user(info, uptr, bytes))
294 goto error_free;
295
296 } else {
297 unsigned long bytes = min(args->in.bo_info_size, info_size);
298 unsigned i;
299
300 memset(info, 0, args->in.bo_number * info_size);
301 for (i = 0; i < args->in.bo_number; ++i) {
302 if (copy_from_user(&info[i], uptr, bytes))
303 goto error_free;
304
305 uptr += args->in.bo_info_size;
306 }
307 }
308
309 switch (args->in.operation) {
310 case AMDGPU_BO_LIST_OP_CREATE:
311 r = amdgpu_bo_list_create(adev, filp, info, args->in.bo_number,
312 &handle);
313 if (r)
314 goto error_free;
315 break;
316
317 case AMDGPU_BO_LIST_OP_DESTROY:
318 amdgpu_bo_list_destroy(fpriv, handle);
319 handle = 0;
320 break;
321
322 case AMDGPU_BO_LIST_OP_UPDATE:
323 r = -ENOENT;
324 list = amdgpu_bo_list_get(fpriv, handle);
325 if (!list)
326 goto error_free;
327
328 r = amdgpu_bo_list_set(adev, filp, list, info,
329 args->in.bo_number);
330 amdgpu_bo_list_put(list);
331 if (r)
332 goto error_free;
333
334 break;
335
336 default:
337 r = -EINVAL;
338 goto error_free;
339 }
340
341 memset(args, 0, sizeof(*args));
342 args->out.list_handle = handle;
343 kvfree(info);
344
345 return 0;
346
347error_free:
348 kvfree(info);
349 return r;
350}
351