1
2
3#include <uapi/linux/sched/types.h>
4
5#include <drm/drm_print.h>
6#include <drm/drm_vblank.h>
7#include <drm/drm_vblank_work.h>
8#include <drm/drm_crtc.h>
9
10#include "drm_internal.h"
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46void drm_handle_vblank_works(struct drm_vblank_crtc *vblank)
47{
48 struct drm_vblank_work *work, *next;
49 u64 count = atomic64_read(&vblank->count);
50 bool wake = false;
51
52 assert_spin_locked(&vblank->dev->event_lock);
53
54 list_for_each_entry_safe(work, next, &vblank->pending_work, node) {
55 if (!drm_vblank_passed(count, work->count))
56 continue;
57
58 list_del_init(&work->node);
59 drm_vblank_put(vblank->dev, vblank->pipe);
60 kthread_queue_work(vblank->worker, &work->base);
61 wake = true;
62 }
63 if (wake)
64 wake_up_all(&vblank->work_wait_queue);
65}
66
67
68
69
70void drm_vblank_cancel_pending_works(struct drm_vblank_crtc *vblank)
71{
72 struct drm_vblank_work *work, *next;
73
74 assert_spin_locked(&vblank->dev->event_lock);
75
76 list_for_each_entry_safe(work, next, &vblank->pending_work, node) {
77 list_del_init(&work->node);
78 drm_vblank_put(vblank->dev, vblank->pipe);
79 }
80
81 wake_up_all(&vblank->work_wait_queue);
82}
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106int drm_vblank_work_schedule(struct drm_vblank_work *work,
107 u64 count, bool nextonmiss)
108{
109 struct drm_vblank_crtc *vblank = work->vblank;
110 struct drm_device *dev = vblank->dev;
111 u64 cur_vbl;
112 unsigned long irqflags;
113 bool passed, inmodeset, rescheduling = false, wake = false;
114 int ret = 0;
115
116 spin_lock_irqsave(&dev->event_lock, irqflags);
117 if (work->cancelling)
118 goto out;
119
120 spin_lock(&dev->vbl_lock);
121 inmodeset = vblank->inmodeset;
122 spin_unlock(&dev->vbl_lock);
123 if (inmodeset)
124 goto out;
125
126 if (list_empty(&work->node)) {
127 ret = drm_vblank_get(dev, vblank->pipe);
128 if (ret < 0)
129 goto out;
130 } else if (work->count == count) {
131
132 goto out;
133 } else {
134 rescheduling = true;
135 }
136
137 work->count = count;
138 cur_vbl = drm_vblank_count(dev, vblank->pipe);
139 passed = drm_vblank_passed(cur_vbl, count);
140 if (passed)
141 drm_dbg_core(dev,
142 "crtc %d vblank %llu already passed (current %llu)\n",
143 vblank->pipe, count, cur_vbl);
144
145 if (!nextonmiss && passed) {
146 drm_vblank_put(dev, vblank->pipe);
147 ret = kthread_queue_work(vblank->worker, &work->base);
148
149 if (rescheduling) {
150 list_del_init(&work->node);
151 wake = true;
152 }
153 } else {
154 if (!rescheduling)
155 list_add_tail(&work->node, &vblank->pending_work);
156 ret = true;
157 }
158
159out:
160 spin_unlock_irqrestore(&dev->event_lock, irqflags);
161 if (wake)
162 wake_up_all(&vblank->work_wait_queue);
163 return ret;
164}
165EXPORT_SYMBOL(drm_vblank_work_schedule);
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182bool drm_vblank_work_cancel_sync(struct drm_vblank_work *work)
183{
184 struct drm_vblank_crtc *vblank = work->vblank;
185 struct drm_device *dev = vblank->dev;
186 bool ret = false;
187
188 spin_lock_irq(&dev->event_lock);
189 if (!list_empty(&work->node)) {
190 list_del_init(&work->node);
191 drm_vblank_put(vblank->dev, vblank->pipe);
192 ret = true;
193 }
194
195 work->cancelling++;
196 spin_unlock_irq(&dev->event_lock);
197
198 wake_up_all(&vblank->work_wait_queue);
199
200 if (kthread_cancel_work_sync(&work->base))
201 ret = true;
202
203 spin_lock_irq(&dev->event_lock);
204 work->cancelling--;
205 spin_unlock_irq(&dev->event_lock);
206
207 return ret;
208}
209EXPORT_SYMBOL(drm_vblank_work_cancel_sync);
210
211
212
213
214
215
216
217
218void drm_vblank_work_flush(struct drm_vblank_work *work)
219{
220 struct drm_vblank_crtc *vblank = work->vblank;
221 struct drm_device *dev = vblank->dev;
222
223 spin_lock_irq(&dev->event_lock);
224 wait_event_lock_irq(vblank->work_wait_queue, list_empty(&work->node),
225 dev->event_lock);
226 spin_unlock_irq(&dev->event_lock);
227
228 kthread_flush_work(&work->base);
229}
230EXPORT_SYMBOL(drm_vblank_work_flush);
231
232
233
234
235
236
237
238
239
240void drm_vblank_work_init(struct drm_vblank_work *work, struct drm_crtc *crtc,
241 void (*func)(struct kthread_work *work))
242{
243 kthread_init_work(&work->base, func);
244 INIT_LIST_HEAD(&work->node);
245 work->vblank = &crtc->dev->vblank[drm_crtc_index(crtc)];
246}
247EXPORT_SYMBOL(drm_vblank_work_init);
248
249int drm_vblank_worker_init(struct drm_vblank_crtc *vblank)
250{
251 struct kthread_worker *worker;
252
253 INIT_LIST_HEAD(&vblank->pending_work);
254 init_waitqueue_head(&vblank->work_wait_queue);
255 worker = kthread_create_worker(0, "card%d-crtc%d",
256 vblank->dev->primary->index,
257 vblank->pipe);
258 if (IS_ERR(worker))
259 return PTR_ERR(worker);
260
261 vblank->worker = worker;
262
263 sched_set_fifo(worker->task);
264 return 0;
265}
266