1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24#include <linux/kthread.h>
25#include <linux/slab.h>
26#include <linux/completion.h>
27
28#include <drm/drm_print.h>
29#include <drm/gpu_scheduler.h>
30
31#include "gpu_scheduler_trace.h"
32
33#define to_drm_sched_job(sched_job) \
34 container_of((sched_job), struct drm_sched_job, queue_node)
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53int drm_sched_entity_init(struct drm_sched_entity *entity,
54 enum drm_sched_priority priority,
55 struct drm_gpu_scheduler **sched_list,
56 unsigned int num_sched_list,
57 atomic_t *guilty)
58{
59 if (!(entity && sched_list && (num_sched_list == 0 || sched_list[0])))
60 return -EINVAL;
61
62 memset(entity, 0, sizeof(struct drm_sched_entity));
63 INIT_LIST_HEAD(&entity->list);
64 entity->rq = NULL;
65 entity->guilty = guilty;
66 entity->num_sched_list = num_sched_list;
67 entity->priority = priority;
68 entity->sched_list = num_sched_list > 1 ? sched_list : NULL;
69 entity->last_scheduled = NULL;
70
71 if(num_sched_list)
72 entity->rq = &sched_list[0]->sched_rq[entity->priority];
73
74 init_completion(&entity->entity_idle);
75
76
77 complete(&entity->entity_idle);
78
79 spin_lock_init(&entity->rq_lock);
80 spsc_queue_init(&entity->job_queue);
81
82 atomic_set(&entity->fence_seq, 0);
83 entity->fence_context = dma_fence_context_alloc(2);
84
85 return 0;
86}
87EXPORT_SYMBOL(drm_sched_entity_init);
88
89
90
91
92
93
94
95
96void drm_sched_entity_modify_sched(struct drm_sched_entity *entity,
97 struct drm_gpu_scheduler **sched_list,
98 unsigned int num_sched_list)
99{
100 WARN_ON(!num_sched_list || !sched_list);
101
102 entity->sched_list = sched_list;
103 entity->num_sched_list = num_sched_list;
104}
105EXPORT_SYMBOL(drm_sched_entity_modify_sched);
106
107
108
109
110
111
112
113
114static bool drm_sched_entity_is_idle(struct drm_sched_entity *entity)
115{
116 rmb();
117
118 if (list_empty(&entity->list) ||
119 spsc_queue_count(&entity->job_queue) == 0)
120 return true;
121
122 return false;
123}
124
125
126
127
128
129
130
131
132bool drm_sched_entity_is_ready(struct drm_sched_entity *entity)
133{
134 if (spsc_queue_peek(&entity->job_queue) == NULL)
135 return false;
136
137 if (READ_ONCE(entity->dependency))
138 return false;
139
140 return true;
141}
142
143
144
145
146
147
148
149
150
151
152
153
154
155long drm_sched_entity_flush(struct drm_sched_entity *entity, long timeout)
156{
157 struct drm_gpu_scheduler *sched;
158 struct task_struct *last_user;
159 long ret = timeout;
160
161 if (!entity->rq)
162 return 0;
163
164 sched = entity->rq->sched;
165
166
167
168
169 if (current->flags & PF_EXITING) {
170 if (timeout)
171 ret = wait_event_timeout(
172 sched->job_scheduled,
173 drm_sched_entity_is_idle(entity),
174 timeout);
175 } else {
176 wait_event_killable(sched->job_scheduled,
177 drm_sched_entity_is_idle(entity));
178 }
179
180
181 last_user = cmpxchg(&entity->last_user, current->group_leader, NULL);
182 if ((!last_user || last_user == current->group_leader) &&
183 (current->flags & PF_EXITING) && (current->exit_code == SIGKILL)) {
184 spin_lock(&entity->rq_lock);
185 entity->stopped = true;
186 drm_sched_rq_remove_entity(entity->rq, entity);
187 spin_unlock(&entity->rq_lock);
188 }
189
190 return ret;
191}
192EXPORT_SYMBOL(drm_sched_entity_flush);
193
194
195
196
197
198
199
200
201
202static void drm_sched_entity_kill_jobs_cb(struct dma_fence *f,
203 struct dma_fence_cb *cb)
204{
205 struct drm_sched_job *job = container_of(cb, struct drm_sched_job,
206 finish_cb);
207
208 drm_sched_fence_finished(job->s_fence);
209 WARN_ON(job->s_fence->parent);
210 job->sched->ops->free_job(job);
211}
212
213
214
215
216
217
218
219
220
221static void drm_sched_entity_kill_jobs(struct drm_sched_entity *entity)
222{
223 struct drm_sched_job *job;
224 int r;
225
226 while ((job = to_drm_sched_job(spsc_queue_pop(&entity->job_queue)))) {
227 struct drm_sched_fence *s_fence = job->s_fence;
228
229 drm_sched_fence_scheduled(s_fence);
230 dma_fence_set_error(&s_fence->finished, -ESRCH);
231
232
233
234
235
236
237 if (!entity->last_scheduled) {
238 drm_sched_entity_kill_jobs_cb(NULL, &job->finish_cb);
239 continue;
240 }
241
242 r = dma_fence_add_callback(entity->last_scheduled,
243 &job->finish_cb,
244 drm_sched_entity_kill_jobs_cb);
245 if (r == -ENOENT)
246 drm_sched_entity_kill_jobs_cb(NULL, &job->finish_cb);
247 else if (r)
248 DRM_ERROR("fence add callback failed (%d)\n", r);
249 }
250}
251
252
253
254
255
256
257
258
259
260
261void drm_sched_entity_fini(struct drm_sched_entity *entity)
262{
263 struct drm_gpu_scheduler *sched = NULL;
264
265 if (entity->rq) {
266 sched = entity->rq->sched;
267 drm_sched_rq_remove_entity(entity->rq, entity);
268 }
269
270
271
272
273 if (spsc_queue_count(&entity->job_queue)) {
274 if (sched) {
275
276
277
278
279 wait_for_completion(&entity->entity_idle);
280
281 }
282 if (entity->dependency) {
283 dma_fence_remove_callback(entity->dependency,
284 &entity->cb);
285 dma_fence_put(entity->dependency);
286 entity->dependency = NULL;
287 }
288
289 drm_sched_entity_kill_jobs(entity);
290 }
291
292 dma_fence_put(entity->last_scheduled);
293 entity->last_scheduled = NULL;
294}
295EXPORT_SYMBOL(drm_sched_entity_fini);
296
297
298
299
300
301
302
303
304void drm_sched_entity_destroy(struct drm_sched_entity *entity)
305{
306 drm_sched_entity_flush(entity, MAX_WAIT_SCHED_ENTITY_Q_EMPTY);
307 drm_sched_entity_fini(entity);
308}
309EXPORT_SYMBOL(drm_sched_entity_destroy);
310
311
312
313
314static void drm_sched_entity_clear_dep(struct dma_fence *f,
315 struct dma_fence_cb *cb)
316{
317 struct drm_sched_entity *entity =
318 container_of(cb, struct drm_sched_entity, cb);
319
320 entity->dependency = NULL;
321 dma_fence_put(f);
322}
323
324
325
326
327
328static void drm_sched_entity_wakeup(struct dma_fence *f,
329 struct dma_fence_cb *cb)
330{
331 struct drm_sched_entity *entity =
332 container_of(cb, struct drm_sched_entity, cb);
333
334 drm_sched_entity_clear_dep(f, cb);
335 drm_sched_wakeup(entity->rq->sched);
336}
337
338
339
340
341
342
343
344
345
346void drm_sched_entity_set_priority(struct drm_sched_entity *entity,
347 enum drm_sched_priority priority)
348{
349 spin_lock(&entity->rq_lock);
350 entity->priority = priority;
351 spin_unlock(&entity->rq_lock);
352}
353EXPORT_SYMBOL(drm_sched_entity_set_priority);
354
355
356
357
358
359
360
361
362
363static bool drm_sched_entity_add_dependency_cb(struct drm_sched_entity *entity)
364{
365 struct drm_gpu_scheduler *sched = entity->rq->sched;
366 struct dma_fence *fence = entity->dependency;
367 struct drm_sched_fence *s_fence;
368
369 if (fence->context == entity->fence_context ||
370 fence->context == entity->fence_context + 1) {
371
372
373
374
375
376 dma_fence_put(entity->dependency);
377 return false;
378 }
379
380 s_fence = to_drm_sched_fence(fence);
381 if (s_fence && s_fence->sched == sched) {
382
383
384
385
386
387 fence = dma_fence_get(&s_fence->scheduled);
388 dma_fence_put(entity->dependency);
389 entity->dependency = fence;
390 if (!dma_fence_add_callback(fence, &entity->cb,
391 drm_sched_entity_clear_dep))
392 return true;
393
394
395 dma_fence_put(fence);
396 return false;
397 }
398
399 if (!dma_fence_add_callback(entity->dependency, &entity->cb,
400 drm_sched_entity_wakeup))
401 return true;
402
403 dma_fence_put(entity->dependency);
404 return false;
405}
406
407
408
409
410
411
412
413
414struct drm_sched_job *drm_sched_entity_pop_job(struct drm_sched_entity *entity)
415{
416 struct drm_gpu_scheduler *sched = entity->rq->sched;
417 struct drm_sched_job *sched_job;
418
419 sched_job = to_drm_sched_job(spsc_queue_peek(&entity->job_queue));
420 if (!sched_job)
421 return NULL;
422
423 while ((entity->dependency =
424 sched->ops->dependency(sched_job, entity))) {
425 trace_drm_sched_job_wait_dep(sched_job, entity->dependency);
426
427 if (drm_sched_entity_add_dependency_cb(entity))
428 return NULL;
429 }
430
431
432 if (entity->guilty && atomic_read(entity->guilty))
433 dma_fence_set_error(&sched_job->s_fence->finished, -ECANCELED);
434
435 dma_fence_put(entity->last_scheduled);
436 entity->last_scheduled = dma_fence_get(&sched_job->s_fence->finished);
437
438 spsc_queue_pop(&entity->job_queue);
439 return sched_job;
440}
441
442
443
444
445
446
447
448
449
450void drm_sched_entity_select_rq(struct drm_sched_entity *entity)
451{
452 struct dma_fence *fence;
453 struct drm_gpu_scheduler *sched;
454 struct drm_sched_rq *rq;
455
456 if (spsc_queue_count(&entity->job_queue) || entity->num_sched_list <= 1)
457 return;
458
459 fence = READ_ONCE(entity->last_scheduled);
460 if (fence && !dma_fence_is_signaled(fence))
461 return;
462
463 spin_lock(&entity->rq_lock);
464 sched = drm_sched_pick_best(entity->sched_list, entity->num_sched_list);
465 rq = sched ? &sched->sched_rq[entity->priority] : NULL;
466 if (rq != entity->rq) {
467 drm_sched_rq_remove_entity(entity->rq, entity);
468 entity->rq = rq;
469 }
470
471 spin_unlock(&entity->rq_lock);
472}
473
474
475
476
477
478
479
480
481
482
483
484
485
486void drm_sched_entity_push_job(struct drm_sched_job *sched_job,
487 struct drm_sched_entity *entity)
488{
489 bool first;
490
491 trace_drm_sched_job(sched_job, entity);
492 atomic_inc(&entity->rq->sched->score);
493 WRITE_ONCE(entity->last_user, current->group_leader);
494 first = spsc_queue_push(&entity->job_queue, &sched_job->queue_node);
495
496
497 if (first) {
498
499 spin_lock(&entity->rq_lock);
500 if (entity->stopped) {
501 spin_unlock(&entity->rq_lock);
502
503 DRM_ERROR("Trying to push to a killed entity\n");
504 return;
505 }
506 drm_sched_rq_add_entity(entity->rq, entity);
507 spin_unlock(&entity->rq_lock);
508 drm_sched_wakeup(entity->rq->sched);
509 }
510}
511EXPORT_SYMBOL(drm_sched_entity_push_job);
512