1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24#include "drmP.h"
25#include "drm_flip_work.h"
26
27
28
29
30
31
32
33
34struct drm_flip_task *drm_flip_work_allocate_task(void *data, gfp_t flags)
35{
36 struct drm_flip_task *task;
37
38 task = kzalloc(sizeof(*task), flags);
39 if (task)
40 task->data = data;
41
42 return task;
43}
44EXPORT_SYMBOL(drm_flip_work_allocate_task);
45
46
47
48
49
50
51
52
53
54void drm_flip_work_queue_task(struct drm_flip_work *work,
55 struct drm_flip_task *task)
56{
57 unsigned long flags;
58
59 spin_lock_irqsave(&work->lock, flags);
60 list_add_tail(&task->node, &work->queued);
61 spin_unlock_irqrestore(&work->lock, flags);
62}
63EXPORT_SYMBOL(drm_flip_work_queue_task);
64
65
66
67
68
69
70
71
72
73void drm_flip_work_queue(struct drm_flip_work *work, void *val)
74{
75 struct drm_flip_task *task;
76
77 task = drm_flip_work_allocate_task(val,
78 drm_can_sleep() ? GFP_KERNEL : GFP_ATOMIC);
79 if (task) {
80 drm_flip_work_queue_task(work, task);
81 } else {
82 DRM_ERROR("%s could not allocate task!\n", work->name);
83 work->func(work, val);
84 }
85}
86EXPORT_SYMBOL(drm_flip_work_queue);
87
88
89
90
91
92
93
94
95
96
97
98void drm_flip_work_commit(struct drm_flip_work *work,
99 struct workqueue_struct *wq)
100{
101 unsigned long flags;
102
103 spin_lock_irqsave(&work->lock, flags);
104 list_splice_tail(&work->queued, &work->commited);
105 INIT_LIST_HEAD(&work->queued);
106 spin_unlock_irqrestore(&work->lock, flags);
107 queue_work(wq, &work->worker);
108}
109EXPORT_SYMBOL(drm_flip_work_commit);
110
111static void flip_worker(struct work_struct *w)
112{
113 struct drm_flip_work *work = container_of(w, struct drm_flip_work, worker);
114 struct list_head tasks;
115 unsigned long flags;
116
117 while (1) {
118 struct drm_flip_task *task, *tmp;
119
120 INIT_LIST_HEAD(&tasks);
121 spin_lock_irqsave(&work->lock, flags);
122 list_splice_tail(&work->commited, &tasks);
123 INIT_LIST_HEAD(&work->commited);
124 spin_unlock_irqrestore(&work->lock, flags);
125
126 if (list_empty(&tasks))
127 break;
128
129 list_for_each_entry_safe(task, tmp, &tasks, node) {
130 work->func(work, task->data);
131 kfree(task);
132 }
133 }
134}
135
136
137
138
139
140
141
142
143
144void drm_flip_work_init(struct drm_flip_work *work,
145 const char *name, drm_flip_func_t func)
146{
147 work->name = name;
148 INIT_LIST_HEAD(&work->queued);
149 INIT_LIST_HEAD(&work->commited);
150 spin_lock_init(&work->lock);
151 work->func = func;
152
153 INIT_WORK(&work->worker, flip_worker);
154}
155EXPORT_SYMBOL(drm_flip_work_init);
156
157
158
159
160
161
162
163void drm_flip_work_cleanup(struct drm_flip_work *work)
164{
165 WARN_ON(!list_empty(&work->queued) || !list_empty(&work->commited));
166}
167EXPORT_SYMBOL(drm_flip_work_cleanup);
168