1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24#include <linux/kthread.h>
25#include <drm/gpu_scheduler.h>
26
27#include "gpu_scheduler_trace.h"
28
29#define to_drm_sched_job(sched_job) \
30 container_of((sched_job), struct drm_sched_job, queue_node)
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48int drm_sched_entity_init(struct drm_sched_entity *entity,
49 struct drm_sched_rq **rq_list,
50 unsigned int num_rq_list,
51 atomic_t *guilty)
52{
53 int i;
54
55 if (!(entity && rq_list && (num_rq_list == 0 || rq_list[0])))
56 return -EINVAL;
57
58 memset(entity, 0, sizeof(struct drm_sched_entity));
59 INIT_LIST_HEAD(&entity->list);
60 entity->rq = NULL;
61 entity->guilty = guilty;
62 entity->num_rq_list = num_rq_list;
63 entity->rq_list = kcalloc(num_rq_list, sizeof(struct drm_sched_rq *),
64 GFP_KERNEL);
65 if (!entity->rq_list)
66 return -ENOMEM;
67
68 for (i = 0; i < num_rq_list; ++i)
69 entity->rq_list[i] = rq_list[i];
70
71 if (num_rq_list)
72 entity->rq = rq_list[0];
73
74 entity->last_scheduled = NULL;
75
76 spin_lock_init(&entity->rq_lock);
77 spsc_queue_init(&entity->job_queue);
78
79 atomic_set(&entity->fence_seq, 0);
80 entity->fence_context = dma_fence_context_alloc(2);
81
82 return 0;
83}
84EXPORT_SYMBOL(drm_sched_entity_init);
85
86
87
88
89
90
91
92
93static bool drm_sched_entity_is_idle(struct drm_sched_entity *entity)
94{
95 rmb();
96
97 if (list_empty(&entity->list) ||
98 spsc_queue_count(&entity->job_queue) == 0)
99 return true;
100
101 return false;
102}
103
104
105
106
107
108
109
110
111bool drm_sched_entity_is_ready(struct drm_sched_entity *entity)
112{
113 if (spsc_queue_peek(&entity->job_queue) == NULL)
114 return false;
115
116 if (READ_ONCE(entity->dependency))
117 return false;
118
119 return true;
120}
121
122
123
124
125
126
127
128
129static struct drm_sched_rq *
130drm_sched_entity_get_free_sched(struct drm_sched_entity *entity)
131{
132 struct drm_sched_rq *rq = NULL;
133 unsigned int min_jobs = UINT_MAX, num_jobs;
134 int i;
135
136 for (i = 0; i < entity->num_rq_list; ++i) {
137 struct drm_gpu_scheduler *sched = entity->rq_list[i]->sched;
138
139 if (!entity->rq_list[i]->sched->ready) {
140 DRM_WARN("sched%s is not ready, skipping", sched->name);
141 continue;
142 }
143
144 num_jobs = atomic_read(&sched->num_jobs);
145 if (num_jobs < min_jobs) {
146 min_jobs = num_jobs;
147 rq = entity->rq_list[i];
148 }
149 }
150
151 return rq;
152}
153
154
155
156
157
158
159
160
161
162
163
164
165
166long drm_sched_entity_flush(struct drm_sched_entity *entity, long timeout)
167{
168 struct drm_gpu_scheduler *sched;
169 struct task_struct *last_user;
170 long ret = timeout;
171
172 if (!entity->rq)
173 return 0;
174
175 sched = entity->rq->sched;
176
177
178
179
180 if (current->flags & PF_EXITING) {
181 if (timeout)
182 ret = wait_event_timeout(
183 sched->job_scheduled,
184 drm_sched_entity_is_idle(entity),
185 timeout);
186 } else {
187 wait_event_killable(sched->job_scheduled,
188 drm_sched_entity_is_idle(entity));
189 }
190
191
192 last_user = cmpxchg(&entity->last_user, current->group_leader, NULL);
193 if ((!last_user || last_user == current->group_leader) &&
194 (current->flags & PF_EXITING) && (current->exit_code == SIGKILL)) {
195 spin_lock(&entity->rq_lock);
196 entity->stopped = true;
197 drm_sched_rq_remove_entity(entity->rq, entity);
198 spin_unlock(&entity->rq_lock);
199 }
200
201 return ret;
202}
203EXPORT_SYMBOL(drm_sched_entity_flush);
204
205
206
207
208
209
210
211
212
213static void drm_sched_entity_kill_jobs_cb(struct dma_fence *f,
214 struct dma_fence_cb *cb)
215{
216 struct drm_sched_job *job = container_of(cb, struct drm_sched_job,
217 finish_cb);
218
219 drm_sched_fence_finished(job->s_fence);
220 WARN_ON(job->s_fence->parent);
221 job->sched->ops->free_job(job);
222}
223
224
225
226
227
228
229
230
231
232static void drm_sched_entity_kill_jobs(struct drm_sched_entity *entity)
233{
234 struct drm_sched_job *job;
235 int r;
236
237 while ((job = to_drm_sched_job(spsc_queue_pop(&entity->job_queue)))) {
238 struct drm_sched_fence *s_fence = job->s_fence;
239
240 drm_sched_fence_scheduled(s_fence);
241 dma_fence_set_error(&s_fence->finished, -ESRCH);
242
243
244
245
246
247
248 if (!entity->last_scheduled) {
249 drm_sched_entity_kill_jobs_cb(NULL, &job->finish_cb);
250 continue;
251 }
252
253 r = dma_fence_add_callback(entity->last_scheduled,
254 &job->finish_cb,
255 drm_sched_entity_kill_jobs_cb);
256 if (r == -ENOENT)
257 drm_sched_entity_kill_jobs_cb(NULL, &job->finish_cb);
258 else if (r)
259 DRM_ERROR("fence add callback failed (%d)\n", r);
260 }
261}
262
263
264
265
266
267
268
269
270
271
272void drm_sched_entity_fini(struct drm_sched_entity *entity)
273{
274 struct drm_gpu_scheduler *sched = NULL;
275
276 if (entity->rq) {
277 sched = entity->rq->sched;
278 drm_sched_rq_remove_entity(entity->rq, entity);
279 }
280
281
282
283
284 if (spsc_queue_count(&entity->job_queue)) {
285 if (sched) {
286
287
288
289 kthread_park(sched->thread);
290 kthread_unpark(sched->thread);
291 }
292 if (entity->dependency) {
293 dma_fence_remove_callback(entity->dependency,
294 &entity->cb);
295 dma_fence_put(entity->dependency);
296 entity->dependency = NULL;
297 }
298
299 drm_sched_entity_kill_jobs(entity);
300 }
301
302 dma_fence_put(entity->last_scheduled);
303 entity->last_scheduled = NULL;
304 kfree(entity->rq_list);
305}
306EXPORT_SYMBOL(drm_sched_entity_fini);
307
308
309
310
311
312
313
314
315void drm_sched_entity_destroy(struct drm_sched_entity *entity)
316{
317 drm_sched_entity_flush(entity, MAX_WAIT_SCHED_ENTITY_Q_EMPTY);
318 drm_sched_entity_fini(entity);
319}
320EXPORT_SYMBOL(drm_sched_entity_destroy);
321
322
323
324
325static void drm_sched_entity_clear_dep(struct dma_fence *f,
326 struct dma_fence_cb *cb)
327{
328 struct drm_sched_entity *entity =
329 container_of(cb, struct drm_sched_entity, cb);
330
331 entity->dependency = NULL;
332 dma_fence_put(f);
333}
334
335
336
337
338
339static void drm_sched_entity_wakeup(struct dma_fence *f,
340 struct dma_fence_cb *cb)
341{
342 struct drm_sched_entity *entity =
343 container_of(cb, struct drm_sched_entity, cb);
344
345 drm_sched_entity_clear_dep(f, cb);
346 drm_sched_wakeup(entity->rq->sched);
347}
348
349
350
351
352static void drm_sched_entity_set_rq_priority(struct drm_sched_rq **rq,
353 enum drm_sched_priority priority)
354{
355 *rq = &(*rq)->sched->sched_rq[priority];
356}
357
358
359
360
361
362
363
364
365
366void drm_sched_entity_set_priority(struct drm_sched_entity *entity,
367 enum drm_sched_priority priority)
368{
369 unsigned int i;
370
371 spin_lock(&entity->rq_lock);
372
373 for (i = 0; i < entity->num_rq_list; ++i)
374 drm_sched_entity_set_rq_priority(&entity->rq_list[i], priority);
375
376 if (entity->rq) {
377 drm_sched_rq_remove_entity(entity->rq, entity);
378 drm_sched_entity_set_rq_priority(&entity->rq, priority);
379 drm_sched_rq_add_entity(entity->rq, entity);
380 }
381
382 spin_unlock(&entity->rq_lock);
383}
384EXPORT_SYMBOL(drm_sched_entity_set_priority);
385
386
387
388
389
390
391
392
393
394static bool drm_sched_entity_add_dependency_cb(struct drm_sched_entity *entity)
395{
396 struct drm_gpu_scheduler *sched = entity->rq->sched;
397 struct dma_fence *fence = entity->dependency;
398 struct drm_sched_fence *s_fence;
399
400 if (fence->context == entity->fence_context ||
401 fence->context == entity->fence_context + 1) {
402
403
404
405
406
407 dma_fence_put(entity->dependency);
408 return false;
409 }
410
411 s_fence = to_drm_sched_fence(fence);
412 if (s_fence && s_fence->sched == sched) {
413
414
415
416
417
418 fence = dma_fence_get(&s_fence->scheduled);
419 dma_fence_put(entity->dependency);
420 entity->dependency = fence;
421 if (!dma_fence_add_callback(fence, &entity->cb,
422 drm_sched_entity_clear_dep))
423 return true;
424
425
426 dma_fence_put(fence);
427 return false;
428 }
429
430 if (!dma_fence_add_callback(entity->dependency, &entity->cb,
431 drm_sched_entity_wakeup))
432 return true;
433
434 dma_fence_put(entity->dependency);
435 return false;
436}
437
438
439
440
441
442
443
444
445struct drm_sched_job *drm_sched_entity_pop_job(struct drm_sched_entity *entity)
446{
447 struct drm_gpu_scheduler *sched = entity->rq->sched;
448 struct drm_sched_job *sched_job;
449
450 sched_job = to_drm_sched_job(spsc_queue_peek(&entity->job_queue));
451 if (!sched_job)
452 return NULL;
453
454 while ((entity->dependency =
455 sched->ops->dependency(sched_job, entity))) {
456 trace_drm_sched_job_wait_dep(sched_job, entity->dependency);
457
458 if (drm_sched_entity_add_dependency_cb(entity))
459 return NULL;
460 }
461
462
463 if (entity->guilty && atomic_read(entity->guilty))
464 dma_fence_set_error(&sched_job->s_fence->finished, -ECANCELED);
465
466 dma_fence_put(entity->last_scheduled);
467 entity->last_scheduled = dma_fence_get(&sched_job->s_fence->finished);
468
469 spsc_queue_pop(&entity->job_queue);
470 return sched_job;
471}
472
473
474
475
476
477
478
479
480
481void drm_sched_entity_select_rq(struct drm_sched_entity *entity)
482{
483 struct dma_fence *fence;
484 struct drm_sched_rq *rq;
485
486 if (spsc_queue_count(&entity->job_queue) || entity->num_rq_list <= 1)
487 return;
488
489 fence = READ_ONCE(entity->last_scheduled);
490 if (fence && !dma_fence_is_signaled(fence))
491 return;
492
493 rq = drm_sched_entity_get_free_sched(entity);
494 if (rq == entity->rq)
495 return;
496
497 spin_lock(&entity->rq_lock);
498 drm_sched_rq_remove_entity(entity->rq, entity);
499 entity->rq = rq;
500 spin_unlock(&entity->rq_lock);
501}
502
503
504
505
506
507
508
509
510
511
512
513
514
515void drm_sched_entity_push_job(struct drm_sched_job *sched_job,
516 struct drm_sched_entity *entity)
517{
518 bool first;
519
520 trace_drm_sched_job(sched_job, entity);
521 atomic_inc(&entity->rq->sched->num_jobs);
522 WRITE_ONCE(entity->last_user, current->group_leader);
523 first = spsc_queue_push(&entity->job_queue, &sched_job->queue_node);
524
525
526 if (first) {
527
528 spin_lock(&entity->rq_lock);
529 if (entity->stopped) {
530 spin_unlock(&entity->rq_lock);
531
532 DRM_ERROR("Trying to push to a killed entity\n");
533 return;
534 }
535 drm_sched_rq_add_entity(entity->rq, entity);
536 spin_unlock(&entity->rq_lock);
537 drm_sched_wakeup(entity->rq->sched);
538 }
539}
540EXPORT_SYMBOL(drm_sched_entity_push_job);
541