1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24#include <linux/kthread.h>
25#include <linux/module.h>
26#include <linux/sched.h>
27#include <linux/slab.h>
28#include <linux/wait.h>
29
30#include <drm/gpu_scheduler.h>
31
32static struct kmem_cache *sched_fence_slab;
33
34static int __init drm_sched_fence_slab_init(void)
35{
36 sched_fence_slab = kmem_cache_create(
37 "drm_sched_fence", sizeof(struct drm_sched_fence), 0,
38 SLAB_HWCACHE_ALIGN, NULL);
39 if (!sched_fence_slab)
40 return -ENOMEM;
41
42 return 0;
43}
44
45static void __exit drm_sched_fence_slab_fini(void)
46{
47 rcu_barrier();
48 kmem_cache_destroy(sched_fence_slab);
49}
50
51void drm_sched_fence_scheduled(struct drm_sched_fence *fence)
52{
53 int ret = dma_fence_signal(&fence->scheduled);
54
55 if (!ret)
56 DMA_FENCE_TRACE(&fence->scheduled,
57 "signaled from irq context\n");
58 else
59 DMA_FENCE_TRACE(&fence->scheduled,
60 "was already signaled\n");
61}
62
63void drm_sched_fence_finished(struct drm_sched_fence *fence)
64{
65 int ret = dma_fence_signal(&fence->finished);
66
67 if (!ret)
68 DMA_FENCE_TRACE(&fence->finished,
69 "signaled from irq context\n");
70 else
71 DMA_FENCE_TRACE(&fence->finished,
72 "was already signaled\n");
73}
74
75static const char *drm_sched_fence_get_driver_name(struct dma_fence *fence)
76{
77 return "drm_sched";
78}
79
80static const char *drm_sched_fence_get_timeline_name(struct dma_fence *f)
81{
82 struct drm_sched_fence *fence = to_drm_sched_fence(f);
83 return (const char *)fence->sched->name;
84}
85
86
87
88
89
90
91
92
93static void drm_sched_fence_free(struct rcu_head *rcu)
94{
95 struct dma_fence *f = container_of(rcu, struct dma_fence, rcu);
96 struct drm_sched_fence *fence = to_drm_sched_fence(f);
97
98 kmem_cache_free(sched_fence_slab, fence);
99}
100
101
102
103
104
105
106
107
108
109static void drm_sched_fence_release_scheduled(struct dma_fence *f)
110{
111 struct drm_sched_fence *fence = to_drm_sched_fence(f);
112
113 dma_fence_put(fence->parent);
114 call_rcu(&fence->finished.rcu, drm_sched_fence_free);
115}
116
117
118
119
120
121
122
123
124static void drm_sched_fence_release_finished(struct dma_fence *f)
125{
126 struct drm_sched_fence *fence = to_drm_sched_fence(f);
127
128 dma_fence_put(&fence->scheduled);
129}
130
131static const struct dma_fence_ops drm_sched_fence_ops_scheduled = {
132 .get_driver_name = drm_sched_fence_get_driver_name,
133 .get_timeline_name = drm_sched_fence_get_timeline_name,
134 .release = drm_sched_fence_release_scheduled,
135};
136
137static const struct dma_fence_ops drm_sched_fence_ops_finished = {
138 .get_driver_name = drm_sched_fence_get_driver_name,
139 .get_timeline_name = drm_sched_fence_get_timeline_name,
140 .release = drm_sched_fence_release_finished,
141};
142
143struct drm_sched_fence *to_drm_sched_fence(struct dma_fence *f)
144{
145 if (f->ops == &drm_sched_fence_ops_scheduled)
146 return container_of(f, struct drm_sched_fence, scheduled);
147
148 if (f->ops == &drm_sched_fence_ops_finished)
149 return container_of(f, struct drm_sched_fence, finished);
150
151 return NULL;
152}
153EXPORT_SYMBOL(to_drm_sched_fence);
154
155struct drm_sched_fence *drm_sched_fence_create(struct drm_sched_entity *entity,
156 void *owner)
157{
158 struct drm_sched_fence *fence = NULL;
159 unsigned seq;
160
161 fence = kmem_cache_zalloc(sched_fence_slab, GFP_KERNEL);
162 if (fence == NULL)
163 return NULL;
164
165 fence->owner = owner;
166 fence->sched = entity->rq->sched;
167 spin_lock_init(&fence->lock);
168
169 seq = atomic_inc_return(&entity->fence_seq);
170 dma_fence_init(&fence->scheduled, &drm_sched_fence_ops_scheduled,
171 &fence->lock, entity->fence_context, seq);
172 dma_fence_init(&fence->finished, &drm_sched_fence_ops_finished,
173 &fence->lock, entity->fence_context + 1, seq);
174
175 return fence;
176}
177
178module_init(drm_sched_fence_slab_init);
179module_exit(drm_sched_fence_slab_fini);
180
181MODULE_DESCRIPTION("DRM GPU scheduler");
182MODULE_LICENSE("GPL and additional rights");
183