1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24#ifndef _DRM_GPU_SCHEDULER_H_
25#define _DRM_GPU_SCHEDULER_H_
26
27#include <drm/spsc_queue.h>
28#include <linux/dma-fence.h>
29
30struct drm_gpu_scheduler;
31struct drm_sched_rq;
32
33enum drm_sched_priority {
34 DRM_SCHED_PRIORITY_MIN,
35 DRM_SCHED_PRIORITY_LOW = DRM_SCHED_PRIORITY_MIN,
36 DRM_SCHED_PRIORITY_NORMAL,
37 DRM_SCHED_PRIORITY_HIGH_SW,
38 DRM_SCHED_PRIORITY_HIGH_HW,
39 DRM_SCHED_PRIORITY_KERNEL,
40 DRM_SCHED_PRIORITY_MAX,
41 DRM_SCHED_PRIORITY_INVALID = -1,
42 DRM_SCHED_PRIORITY_UNSET = -2
43};
44
45
46
47
48
49
50
51
52
53struct drm_sched_entity {
54 struct list_head list;
55 struct drm_sched_rq *rq;
56 spinlock_t rq_lock;
57 struct drm_gpu_scheduler *sched;
58
59 struct spsc_queue job_queue;
60
61 atomic_t fence_seq;
62 uint64_t fence_context;
63
64 struct dma_fence *dependency;
65 struct dma_fence_cb cb;
66 atomic_t *guilty;
67 int fini_status;
68 struct dma_fence *last_scheduled;
69};
70
71
72
73
74
75
76struct drm_sched_rq {
77 spinlock_t lock;
78 struct list_head entities;
79 struct drm_sched_entity *current_entity;
80};
81
82struct drm_sched_fence {
83 struct dma_fence scheduled;
84
85
86
87
88
89
90
91
92
93
94 struct dma_fence finished;
95
96 struct dma_fence_cb cb;
97 struct dma_fence *parent;
98 struct drm_gpu_scheduler *sched;
99 spinlock_t lock;
100 void *owner;
101};
102
103struct drm_sched_fence *to_drm_sched_fence(struct dma_fence *f);
104
105
106
107
108
109
110
111
112struct drm_sched_job {
113 struct spsc_node queue_node;
114 struct drm_gpu_scheduler *sched;
115 struct drm_sched_fence *s_fence;
116 struct dma_fence_cb finish_cb;
117 struct work_struct finish_work;
118 struct list_head node;
119 struct delayed_work work_tdr;
120 uint64_t id;
121 atomic_t karma;
122 enum drm_sched_priority s_priority;
123 struct drm_sched_entity *entity;
124};
125
126static inline bool drm_sched_invalidate_job(struct drm_sched_job *s_job,
127 int threshold)
128{
129 return (s_job && atomic_inc_return(&s_job->karma) > threshold);
130}
131
132
133
134
135
136struct drm_sched_backend_ops {
137
138
139
140
141 struct dma_fence *(*dependency)(struct drm_sched_job *sched_job,
142 struct drm_sched_entity *s_entity);
143
144
145
146
147
148
149 struct dma_fence *(*run_job)(struct drm_sched_job *sched_job);
150
151
152
153
154 void (*timedout_job)(struct drm_sched_job *sched_job);
155
156
157
158
159 void (*free_job)(struct drm_sched_job *sched_job);
160};
161
162
163
164
165struct drm_gpu_scheduler {
166 const struct drm_sched_backend_ops *ops;
167 uint32_t hw_submission_limit;
168 long timeout;
169 const char *name;
170 struct drm_sched_rq sched_rq[DRM_SCHED_PRIORITY_MAX];
171 wait_queue_head_t wake_up_worker;
172 wait_queue_head_t job_scheduled;
173 atomic_t hw_rq_count;
174 atomic64_t job_id_count;
175 struct task_struct *thread;
176 struct list_head ring_mirror_list;
177 spinlock_t job_list_lock;
178 int hang_limit;
179};
180
181int drm_sched_init(struct drm_gpu_scheduler *sched,
182 const struct drm_sched_backend_ops *ops,
183 uint32_t hw_submission, unsigned hang_limit, long timeout,
184 const char *name);
185void drm_sched_fini(struct drm_gpu_scheduler *sched);
186
187int drm_sched_entity_init(struct drm_gpu_scheduler *sched,
188 struct drm_sched_entity *entity,
189 struct drm_sched_rq *rq,
190 atomic_t *guilty);
191void drm_sched_entity_do_release(struct drm_gpu_scheduler *sched,
192 struct drm_sched_entity *entity);
193void drm_sched_entity_cleanup(struct drm_gpu_scheduler *sched,
194 struct drm_sched_entity *entity);
195void drm_sched_entity_fini(struct drm_gpu_scheduler *sched,
196 struct drm_sched_entity *entity);
197void drm_sched_entity_push_job(struct drm_sched_job *sched_job,
198 struct drm_sched_entity *entity);
199void drm_sched_entity_set_rq(struct drm_sched_entity *entity,
200 struct drm_sched_rq *rq);
201
202struct drm_sched_fence *drm_sched_fence_create(
203 struct drm_sched_entity *s_entity, void *owner);
204void drm_sched_fence_scheduled(struct drm_sched_fence *fence);
205void drm_sched_fence_finished(struct drm_sched_fence *fence);
206int drm_sched_job_init(struct drm_sched_job *job,
207 struct drm_gpu_scheduler *sched,
208 struct drm_sched_entity *entity,
209 void *owner);
210void drm_sched_hw_job_reset(struct drm_gpu_scheduler *sched,
211 struct drm_sched_job *job);
212void drm_sched_job_recovery(struct drm_gpu_scheduler *sched);
213bool drm_sched_dependency_optimized(struct dma_fence* fence,
214 struct drm_sched_entity *entity);
215void drm_sched_job_kickout(struct drm_sched_job *s_job);
216
217#endif
218