1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31#include <drm/drmP.h>
32#include "amdgpu.h"
33#include "amdgpu_trace.h"
34
35#define AMDGPU_BO_LIST_MAX_PRIORITY 32u
36#define AMDGPU_BO_LIST_NUM_BUCKETS (AMDGPU_BO_LIST_MAX_PRIORITY + 1)
37
38static int amdgpu_bo_list_set(struct amdgpu_device *adev,
39 struct drm_file *filp,
40 struct amdgpu_bo_list *list,
41 struct drm_amdgpu_bo_list_entry *info,
42 unsigned num_entries);
43
44static void amdgpu_bo_list_release_rcu(struct kref *ref)
45{
46 unsigned i;
47 struct amdgpu_bo_list *list = container_of(ref, struct amdgpu_bo_list,
48 refcount);
49
50 for (i = 0; i < list->num_entries; ++i)
51 amdgpu_bo_unref(&list->array[i].robj);
52
53 mutex_destroy(&list->lock);
54 kvfree(list->array);
55 kfree_rcu(list, rhead);
56}
57
58static int amdgpu_bo_list_create(struct amdgpu_device *adev,
59 struct drm_file *filp,
60 struct drm_amdgpu_bo_list_entry *info,
61 unsigned num_entries,
62 int *id)
63{
64 int r;
65 struct amdgpu_fpriv *fpriv = filp->driver_priv;
66 struct amdgpu_bo_list *list;
67
68 list = kzalloc(sizeof(struct amdgpu_bo_list), GFP_KERNEL);
69 if (!list)
70 return -ENOMEM;
71
72
73 mutex_init(&list->lock);
74 kref_init(&list->refcount);
75 r = amdgpu_bo_list_set(adev, filp, list, info, num_entries);
76 if (r) {
77 kfree(list);
78 return r;
79 }
80
81
82 mutex_lock(&fpriv->bo_list_lock);
83 r = idr_alloc(&fpriv->bo_list_handles, list, 1, 0, GFP_KERNEL);
84 mutex_unlock(&fpriv->bo_list_lock);
85 if (r < 0) {
86 amdgpu_bo_list_free(list);
87 return r;
88 }
89 *id = r;
90
91 return 0;
92}
93
94static void amdgpu_bo_list_destroy(struct amdgpu_fpriv *fpriv, int id)
95{
96 struct amdgpu_bo_list *list;
97
98 mutex_lock(&fpriv->bo_list_lock);
99 list = idr_find(&fpriv->bo_list_handles, id);
100 if (list) {
101 mutex_lock(&list->lock);
102 idr_remove(&fpriv->bo_list_handles, id);
103 mutex_unlock(&list->lock);
104 amdgpu_bo_list_free(list);
105 }
106 mutex_unlock(&fpriv->bo_list_lock);
107}
108
109static int amdgpu_bo_list_set(struct amdgpu_device *adev,
110 struct drm_file *filp,
111 struct amdgpu_bo_list *list,
112 struct drm_amdgpu_bo_list_entry *info,
113 unsigned num_entries)
114{
115 struct amdgpu_bo_list_entry *array;
116 struct amdgpu_bo *gds_obj = adev->gds.gds_gfx_bo;
117 struct amdgpu_bo *gws_obj = adev->gds.gws_gfx_bo;
118 struct amdgpu_bo *oa_obj = adev->gds.oa_gfx_bo;
119
120 unsigned last_entry = 0, first_userptr = num_entries;
121 unsigned i;
122 int r;
123 unsigned long total_size = 0;
124
125 array = kvmalloc_array(num_entries, sizeof(struct amdgpu_bo_list_entry), GFP_KERNEL);
126 if (!array)
127 return -ENOMEM;
128 memset(array, 0, num_entries * sizeof(struct amdgpu_bo_list_entry));
129
130 for (i = 0; i < num_entries; ++i) {
131 struct amdgpu_bo_list_entry *entry;
132 struct drm_gem_object *gobj;
133 struct amdgpu_bo *bo;
134 struct mm_struct *usermm;
135
136 gobj = drm_gem_object_lookup(filp, info[i].bo_handle);
137 if (!gobj) {
138 r = -ENOENT;
139 goto error_free;
140 }
141
142 bo = amdgpu_bo_ref(gem_to_amdgpu_bo(gobj));
143 drm_gem_object_put_unlocked(gobj);
144
145 usermm = amdgpu_ttm_tt_get_usermm(bo->tbo.ttm);
146 if (usermm) {
147 if (usermm != current->mm) {
148 amdgpu_bo_unref(&bo);
149 r = -EPERM;
150 goto error_free;
151 }
152 entry = &array[--first_userptr];
153 } else {
154 entry = &array[last_entry++];
155 }
156
157 entry->robj = bo;
158 entry->priority = min(info[i].bo_priority,
159 AMDGPU_BO_LIST_MAX_PRIORITY);
160 entry->tv.bo = &entry->robj->tbo;
161 entry->tv.shared = !entry->robj->prime_shared_count;
162
163 if (entry->robj->preferred_domains == AMDGPU_GEM_DOMAIN_GDS)
164 gds_obj = entry->robj;
165 if (entry->robj->preferred_domains == AMDGPU_GEM_DOMAIN_GWS)
166 gws_obj = entry->robj;
167 if (entry->robj->preferred_domains == AMDGPU_GEM_DOMAIN_OA)
168 oa_obj = entry->robj;
169
170 total_size += amdgpu_bo_size(entry->robj);
171 trace_amdgpu_bo_list_set(list, entry->robj);
172 }
173
174 for (i = 0; i < list->num_entries; ++i)
175 amdgpu_bo_unref(&list->array[i].robj);
176
177 kvfree(list->array);
178
179 list->gds_obj = gds_obj;
180 list->gws_obj = gws_obj;
181 list->oa_obj = oa_obj;
182 list->first_userptr = first_userptr;
183 list->array = array;
184 list->num_entries = num_entries;
185
186 trace_amdgpu_cs_bo_status(list->num_entries, total_size);
187 return 0;
188
189error_free:
190 while (i--)
191 amdgpu_bo_unref(&array[i].robj);
192 kvfree(array);
193 return r;
194}
195
196struct amdgpu_bo_list *
197amdgpu_bo_list_get(struct amdgpu_fpriv *fpriv, int id)
198{
199 struct amdgpu_bo_list *result;
200
201 rcu_read_lock();
202 result = idr_find(&fpriv->bo_list_handles, id);
203
204 if (result) {
205 if (kref_get_unless_zero(&result->refcount)) {
206 rcu_read_unlock();
207 mutex_lock(&result->lock);
208 } else {
209 rcu_read_unlock();
210 result = NULL;
211 }
212 } else {
213 rcu_read_unlock();
214 }
215
216 return result;
217}
218
219void amdgpu_bo_list_get_list(struct amdgpu_bo_list *list,
220 struct list_head *validated)
221{
222
223
224
225
226 struct list_head bucket[AMDGPU_BO_LIST_NUM_BUCKETS];
227 unsigned i;
228
229 for (i = 0; i < AMDGPU_BO_LIST_NUM_BUCKETS; i++)
230 INIT_LIST_HEAD(&bucket[i]);
231
232
233
234
235
236
237 for (i = 0; i < list->num_entries; i++) {
238 unsigned priority = list->array[i].priority;
239
240 if (!list->array[i].robj->parent)
241 list_add_tail(&list->array[i].tv.head,
242 &bucket[priority]);
243
244 list->array[i].user_pages = NULL;
245 }
246
247
248 for (i = 0; i < AMDGPU_BO_LIST_NUM_BUCKETS; i++)
249 list_splice(&bucket[i], validated);
250}
251
252void amdgpu_bo_list_put(struct amdgpu_bo_list *list)
253{
254 mutex_unlock(&list->lock);
255 kref_put(&list->refcount, amdgpu_bo_list_release_rcu);
256}
257
258void amdgpu_bo_list_free(struct amdgpu_bo_list *list)
259{
260 unsigned i;
261
262 for (i = 0; i < list->num_entries; ++i)
263 amdgpu_bo_unref(&list->array[i].robj);
264
265 mutex_destroy(&list->lock);
266 kvfree(list->array);
267 kfree(list);
268}
269
270int amdgpu_bo_list_ioctl(struct drm_device *dev, void *data,
271 struct drm_file *filp)
272{
273 const uint32_t info_size = sizeof(struct drm_amdgpu_bo_list_entry);
274
275 struct amdgpu_device *adev = dev->dev_private;
276 struct amdgpu_fpriv *fpriv = filp->driver_priv;
277 union drm_amdgpu_bo_list *args = data;
278 uint32_t handle = args->in.list_handle;
279 const void __user *uptr = u64_to_user_ptr(args->in.bo_info_ptr);
280
281 struct drm_amdgpu_bo_list_entry *info;
282 struct amdgpu_bo_list *list;
283
284 int r;
285
286 info = kvmalloc_array(args->in.bo_number,
287 sizeof(struct drm_amdgpu_bo_list_entry), GFP_KERNEL);
288 if (!info)
289 return -ENOMEM;
290
291
292 r = -EFAULT;
293 if (likely(info_size == args->in.bo_info_size)) {
294 unsigned long bytes = args->in.bo_number *
295 args->in.bo_info_size;
296
297 if (copy_from_user(info, uptr, bytes))
298 goto error_free;
299
300 } else {
301 unsigned long bytes = min(args->in.bo_info_size, info_size);
302 unsigned i;
303
304 memset(info, 0, args->in.bo_number * info_size);
305 for (i = 0; i < args->in.bo_number; ++i) {
306 if (copy_from_user(&info[i], uptr, bytes))
307 goto error_free;
308
309 uptr += args->in.bo_info_size;
310 }
311 }
312
313 switch (args->in.operation) {
314 case AMDGPU_BO_LIST_OP_CREATE:
315 r = amdgpu_bo_list_create(adev, filp, info, args->in.bo_number,
316 &handle);
317 if (r)
318 goto error_free;
319 break;
320
321 case AMDGPU_BO_LIST_OP_DESTROY:
322 amdgpu_bo_list_destroy(fpriv, handle);
323 handle = 0;
324 break;
325
326 case AMDGPU_BO_LIST_OP_UPDATE:
327 r = -ENOENT;
328 list = amdgpu_bo_list_get(fpriv, handle);
329 if (!list)
330 goto error_free;
331
332 r = amdgpu_bo_list_set(adev, filp, list, info,
333 args->in.bo_number);
334 amdgpu_bo_list_put(list);
335 if (r)
336 goto error_free;
337
338 break;
339
340 default:
341 r = -EINVAL;
342 goto error_free;
343 }
344
345 memset(args, 0, sizeof(*args));
346 args->out.list_handle = handle;
347 kvfree(info);
348
349 return 0;
350
351error_free:
352 kvfree(info);
353 return r;
354}
355