1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24#include <linux/kthread.h>
25#include <linux/slab.h>
26#include <linux/completion.h>
27
28#include <drm/drm_print.h>
29#include <drm/gpu_scheduler.h>
30
31#include "gpu_scheduler_trace.h"
32
33#define to_drm_sched_job(sched_job) \
34 container_of((sched_job), struct drm_sched_job, queue_node)
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53int drm_sched_entity_init(struct drm_sched_entity *entity,
54 enum drm_sched_priority priority,
55 struct drm_gpu_scheduler **sched_list,
56 unsigned int num_sched_list,
57 atomic_t *guilty)
58{
59 if (!(entity && sched_list && (num_sched_list == 0 || sched_list[0])))
60 return -EINVAL;
61
62 memset(entity, 0, sizeof(struct drm_sched_entity));
63 INIT_LIST_HEAD(&entity->list);
64 entity->rq = NULL;
65 entity->guilty = guilty;
66 entity->num_sched_list = num_sched_list;
67 entity->priority = priority;
68 entity->sched_list = num_sched_list > 1 ? sched_list : NULL;
69 entity->last_scheduled = NULL;
70
71 if(num_sched_list)
72 entity->rq = &sched_list[0]->sched_rq[entity->priority];
73
74 init_completion(&entity->entity_idle);
75
76
77 complete(&entity->entity_idle);
78
79 spin_lock_init(&entity->rq_lock);
80 spsc_queue_init(&entity->job_queue);
81
82 atomic_set(&entity->fence_seq, 0);
83 entity->fence_context = dma_fence_context_alloc(2);
84
85 return 0;
86}
87EXPORT_SYMBOL(drm_sched_entity_init);
88
89
90
91
92
93
94
95
96void drm_sched_entity_modify_sched(struct drm_sched_entity *entity,
97 struct drm_gpu_scheduler **sched_list,
98 unsigned int num_sched_list)
99{
100 WARN_ON(!num_sched_list || !sched_list);
101
102 entity->sched_list = sched_list;
103 entity->num_sched_list = num_sched_list;
104}
105EXPORT_SYMBOL(drm_sched_entity_modify_sched);
106
107
108
109
110
111
112
113
114static bool drm_sched_entity_is_idle(struct drm_sched_entity *entity)
115{
116 rmb();
117
118 if (list_empty(&entity->list) ||
119 spsc_queue_count(&entity->job_queue) == 0 ||
120 entity->stopped)
121 return true;
122
123 return false;
124}
125
126
127
128
129
130
131
132
133bool drm_sched_entity_is_ready(struct drm_sched_entity *entity)
134{
135 if (spsc_queue_peek(&entity->job_queue) == NULL)
136 return false;
137
138 if (READ_ONCE(entity->dependency))
139 return false;
140
141 return true;
142}
143
144
145
146
147
148
149
150
151
152
153
154
155
156long drm_sched_entity_flush(struct drm_sched_entity *entity, long timeout)
157{
158 struct drm_gpu_scheduler *sched;
159 struct task_struct *last_user;
160 long ret = timeout;
161
162 if (!entity->rq)
163 return 0;
164
165 sched = entity->rq->sched;
166
167
168
169
170 if (current->flags & PF_EXITING) {
171 if (timeout)
172 ret = wait_event_timeout(
173 sched->job_scheduled,
174 drm_sched_entity_is_idle(entity),
175 timeout);
176 } else {
177 wait_event_killable(sched->job_scheduled,
178 drm_sched_entity_is_idle(entity));
179 }
180
181
182 last_user = cmpxchg(&entity->last_user, current->group_leader, NULL);
183 if ((!last_user || last_user == current->group_leader) &&
184 (current->flags & PF_EXITING) && (current->exit_code == SIGKILL)) {
185 spin_lock(&entity->rq_lock);
186 entity->stopped = true;
187 drm_sched_rq_remove_entity(entity->rq, entity);
188 spin_unlock(&entity->rq_lock);
189 }
190
191 return ret;
192}
193EXPORT_SYMBOL(drm_sched_entity_flush);
194
195
196
197
198
199
200
201
202
203static void drm_sched_entity_kill_jobs_cb(struct dma_fence *f,
204 struct dma_fence_cb *cb)
205{
206 struct drm_sched_job *job = container_of(cb, struct drm_sched_job,
207 finish_cb);
208
209 drm_sched_fence_finished(job->s_fence);
210 WARN_ON(job->s_fence->parent);
211 job->sched->ops->free_job(job);
212}
213
214
215
216
217
218
219
220
221
222static void drm_sched_entity_kill_jobs(struct drm_sched_entity *entity)
223{
224 struct drm_sched_job *job;
225 struct dma_fence *f;
226 int r;
227
228 while ((job = to_drm_sched_job(spsc_queue_pop(&entity->job_queue)))) {
229 struct drm_sched_fence *s_fence = job->s_fence;
230
231
232 while ((f = job->sched->ops->dependency(job, entity)))
233 dma_fence_wait(f, false);
234
235 drm_sched_fence_scheduled(s_fence);
236 dma_fence_set_error(&s_fence->finished, -ESRCH);
237
238
239
240
241
242
243 if (!entity->last_scheduled) {
244 drm_sched_entity_kill_jobs_cb(NULL, &job->finish_cb);
245 continue;
246 }
247
248 r = dma_fence_add_callback(entity->last_scheduled,
249 &job->finish_cb,
250 drm_sched_entity_kill_jobs_cb);
251 if (r == -ENOENT)
252 drm_sched_entity_kill_jobs_cb(NULL, &job->finish_cb);
253 else if (r)
254 DRM_ERROR("fence add callback failed (%d)\n", r);
255 }
256}
257
258
259
260
261
262
263
264
265
266
267void drm_sched_entity_fini(struct drm_sched_entity *entity)
268{
269 struct drm_gpu_scheduler *sched = NULL;
270
271 if (entity->rq) {
272 sched = entity->rq->sched;
273 drm_sched_rq_remove_entity(entity->rq, entity);
274 }
275
276
277
278
279 if (spsc_queue_count(&entity->job_queue)) {
280 if (sched) {
281
282
283
284
285 wait_for_completion(&entity->entity_idle);
286
287 }
288 if (entity->dependency) {
289 dma_fence_remove_callback(entity->dependency,
290 &entity->cb);
291 dma_fence_put(entity->dependency);
292 entity->dependency = NULL;
293 }
294
295 drm_sched_entity_kill_jobs(entity);
296 }
297
298 dma_fence_put(entity->last_scheduled);
299 entity->last_scheduled = NULL;
300}
301EXPORT_SYMBOL(drm_sched_entity_fini);
302
303
304
305
306
307
308
309
310void drm_sched_entity_destroy(struct drm_sched_entity *entity)
311{
312 drm_sched_entity_flush(entity, MAX_WAIT_SCHED_ENTITY_Q_EMPTY);
313 drm_sched_entity_fini(entity);
314}
315EXPORT_SYMBOL(drm_sched_entity_destroy);
316
317
318
319
320static void drm_sched_entity_clear_dep(struct dma_fence *f,
321 struct dma_fence_cb *cb)
322{
323 struct drm_sched_entity *entity =
324 container_of(cb, struct drm_sched_entity, cb);
325
326 entity->dependency = NULL;
327 dma_fence_put(f);
328}
329
330
331
332
333
334static void drm_sched_entity_wakeup(struct dma_fence *f,
335 struct dma_fence_cb *cb)
336{
337 struct drm_sched_entity *entity =
338 container_of(cb, struct drm_sched_entity, cb);
339
340 drm_sched_entity_clear_dep(f, cb);
341 drm_sched_wakeup(entity->rq->sched);
342}
343
344
345
346
347
348
349
350
351
352void drm_sched_entity_set_priority(struct drm_sched_entity *entity,
353 enum drm_sched_priority priority)
354{
355 spin_lock(&entity->rq_lock);
356 entity->priority = priority;
357 spin_unlock(&entity->rq_lock);
358}
359EXPORT_SYMBOL(drm_sched_entity_set_priority);
360
361
362
363
364
365
366
367
368
369static bool drm_sched_entity_add_dependency_cb(struct drm_sched_entity *entity)
370{
371 struct drm_gpu_scheduler *sched = entity->rq->sched;
372 struct dma_fence *fence = entity->dependency;
373 struct drm_sched_fence *s_fence;
374
375 if (fence->context == entity->fence_context ||
376 fence->context == entity->fence_context + 1) {
377
378
379
380
381
382 dma_fence_put(entity->dependency);
383 return false;
384 }
385
386 s_fence = to_drm_sched_fence(fence);
387 if (s_fence && s_fence->sched == sched) {
388
389
390
391
392
393 fence = dma_fence_get(&s_fence->scheduled);
394 dma_fence_put(entity->dependency);
395 entity->dependency = fence;
396 if (!dma_fence_add_callback(fence, &entity->cb,
397 drm_sched_entity_clear_dep))
398 return true;
399
400
401 dma_fence_put(fence);
402 return false;
403 }
404
405 if (!dma_fence_add_callback(entity->dependency, &entity->cb,
406 drm_sched_entity_wakeup))
407 return true;
408
409 dma_fence_put(entity->dependency);
410 return false;
411}
412
413
414
415
416
417
418
419
420struct drm_sched_job *drm_sched_entity_pop_job(struct drm_sched_entity *entity)
421{
422 struct drm_gpu_scheduler *sched = entity->rq->sched;
423 struct drm_sched_job *sched_job;
424
425 sched_job = to_drm_sched_job(spsc_queue_peek(&entity->job_queue));
426 if (!sched_job)
427 return NULL;
428
429 while ((entity->dependency =
430 sched->ops->dependency(sched_job, entity))) {
431 trace_drm_sched_job_wait_dep(sched_job, entity->dependency);
432
433 if (drm_sched_entity_add_dependency_cb(entity))
434 return NULL;
435 }
436
437
438 if (entity->guilty && atomic_read(entity->guilty))
439 dma_fence_set_error(&sched_job->s_fence->finished, -ECANCELED);
440
441 dma_fence_put(entity->last_scheduled);
442 entity->last_scheduled = dma_fence_get(&sched_job->s_fence->finished);
443
444 spsc_queue_pop(&entity->job_queue);
445 return sched_job;
446}
447
448
449
450
451
452
453
454
455
456void drm_sched_entity_select_rq(struct drm_sched_entity *entity)
457{
458 struct dma_fence *fence;
459 struct drm_gpu_scheduler *sched;
460 struct drm_sched_rq *rq;
461
462 if (spsc_queue_count(&entity->job_queue) || !entity->sched_list)
463 return;
464
465 fence = READ_ONCE(entity->last_scheduled);
466 if (fence && !dma_fence_is_signaled(fence))
467 return;
468
469 spin_lock(&entity->rq_lock);
470 sched = drm_sched_pick_best(entity->sched_list, entity->num_sched_list);
471 rq = sched ? &sched->sched_rq[entity->priority] : NULL;
472 if (rq != entity->rq) {
473 drm_sched_rq_remove_entity(entity->rq, entity);
474 entity->rq = rq;
475 }
476 spin_unlock(&entity->rq_lock);
477
478 if (entity->num_sched_list == 1)
479 entity->sched_list = NULL;
480}
481
482
483
484
485
486
487
488
489
490
491
492
493
494void drm_sched_entity_push_job(struct drm_sched_job *sched_job,
495 struct drm_sched_entity *entity)
496{
497 bool first;
498
499 trace_drm_sched_job(sched_job, entity);
500 atomic_inc(entity->rq->sched->score);
501 WRITE_ONCE(entity->last_user, current->group_leader);
502 first = spsc_queue_push(&entity->job_queue, &sched_job->queue_node);
503
504
505 if (first) {
506
507 spin_lock(&entity->rq_lock);
508 if (entity->stopped) {
509 spin_unlock(&entity->rq_lock);
510
511 DRM_ERROR("Trying to push to a killed entity\n");
512 return;
513 }
514 drm_sched_rq_add_entity(entity->rq, entity);
515 spin_unlock(&entity->rq_lock);
516 drm_sched_wakeup(entity->rq->sched);
517 }
518}
519EXPORT_SYMBOL(drm_sched_entity_push_job);
520