1
2
3
4
5#include "msm_gem.h"
6#include "a5xx_gpu.h"
7
8
9
10
11
12static inline bool try_preempt_state(struct a5xx_gpu *a5xx_gpu,
13 enum preempt_state old, enum preempt_state new)
14{
15 enum preempt_state cur = atomic_cmpxchg(&a5xx_gpu->preempt_state,
16 old, new);
17
18 return (cur == old);
19}
20
21
22
23
24
25static inline void set_preempt_state(struct a5xx_gpu *gpu,
26 enum preempt_state new)
27{
28
29
30
31
32
33 smp_mb__before_atomic();
34 atomic_set(&gpu->preempt_state, new);
35
36 smp_mb__after_atomic();
37}
38
39
40static inline void update_wptr(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
41{
42 unsigned long flags;
43 uint32_t wptr;
44
45 if (!ring)
46 return;
47
48 spin_lock_irqsave(&ring->preempt_lock, flags);
49 wptr = get_wptr(ring);
50 spin_unlock_irqrestore(&ring->preempt_lock, flags);
51
52 gpu_write(gpu, REG_A5XX_CP_RB_WPTR, wptr);
53}
54
55
56static struct msm_ringbuffer *get_next_ring(struct msm_gpu *gpu)
57{
58 unsigned long flags;
59 int i;
60
61 for (i = 0; i < gpu->nr_rings; i++) {
62 bool empty;
63 struct msm_ringbuffer *ring = gpu->rb[i];
64
65 spin_lock_irqsave(&ring->preempt_lock, flags);
66 empty = (get_wptr(ring) == ring->memptrs->rptr);
67 spin_unlock_irqrestore(&ring->preempt_lock, flags);
68
69 if (!empty)
70 return ring;
71 }
72
73 return NULL;
74}
75
76static void a5xx_preempt_timer(struct timer_list *t)
77{
78 struct a5xx_gpu *a5xx_gpu = from_timer(a5xx_gpu, t, preempt_timer);
79 struct msm_gpu *gpu = &a5xx_gpu->base.base;
80 struct drm_device *dev = gpu->dev;
81
82 if (!try_preempt_state(a5xx_gpu, PREEMPT_TRIGGERED, PREEMPT_FAULTED))
83 return;
84
85 DRM_DEV_ERROR(dev->dev, "%s: preemption timed out\n", gpu->name);
86 kthread_queue_work(gpu->worker, &gpu->recover_work);
87}
88
89
90void a5xx_preempt_trigger(struct msm_gpu *gpu)
91{
92 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
93 struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
94 unsigned long flags;
95 struct msm_ringbuffer *ring;
96
97 if (gpu->nr_rings == 1)
98 return;
99
100
101
102
103
104 if (!try_preempt_state(a5xx_gpu, PREEMPT_NONE, PREEMPT_START))
105 return;
106
107
108 ring = get_next_ring(gpu);
109
110
111
112
113
114 if (!ring || (a5xx_gpu->cur_ring == ring)) {
115
116
117
118
119
120
121
122
123
124
125
126
127 set_preempt_state(a5xx_gpu, PREEMPT_ABORT);
128 update_wptr(gpu, a5xx_gpu->cur_ring);
129 set_preempt_state(a5xx_gpu, PREEMPT_NONE);
130 return;
131 }
132
133
134 spin_lock_irqsave(&ring->preempt_lock, flags);
135 a5xx_gpu->preempt[ring->id]->wptr = get_wptr(ring);
136 spin_unlock_irqrestore(&ring->preempt_lock, flags);
137
138
139 gpu_write64(gpu, REG_A5XX_CP_CONTEXT_SWITCH_RESTORE_ADDR_LO,
140 REG_A5XX_CP_CONTEXT_SWITCH_RESTORE_ADDR_HI,
141 a5xx_gpu->preempt_iova[ring->id]);
142
143 a5xx_gpu->next_ring = ring;
144
145
146 mod_timer(&a5xx_gpu->preempt_timer, jiffies + msecs_to_jiffies(10000));
147
148
149 set_preempt_state(a5xx_gpu, PREEMPT_TRIGGERED);
150
151
152 wmb();
153
154
155 gpu_write(gpu, REG_A5XX_CP_CONTEXT_SWITCH_CNTL, 1);
156}
157
158void a5xx_preempt_irq(struct msm_gpu *gpu)
159{
160 uint32_t status;
161 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
162 struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
163 struct drm_device *dev = gpu->dev;
164
165 if (!try_preempt_state(a5xx_gpu, PREEMPT_TRIGGERED, PREEMPT_PENDING))
166 return;
167
168
169 del_timer(&a5xx_gpu->preempt_timer);
170
171
172
173
174
175
176
177 status = gpu_read(gpu, REG_A5XX_CP_CONTEXT_SWITCH_CNTL);
178 if (unlikely(status)) {
179 set_preempt_state(a5xx_gpu, PREEMPT_FAULTED);
180 DRM_DEV_ERROR(dev->dev, "%s: Preemption failed to complete\n",
181 gpu->name);
182 kthread_queue_work(gpu->worker, &gpu->recover_work);
183 return;
184 }
185
186 a5xx_gpu->cur_ring = a5xx_gpu->next_ring;
187 a5xx_gpu->next_ring = NULL;
188
189 update_wptr(gpu, a5xx_gpu->cur_ring);
190
191 set_preempt_state(a5xx_gpu, PREEMPT_NONE);
192}
193
194void a5xx_preempt_hw_init(struct msm_gpu *gpu)
195{
196 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
197 struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
198 int i;
199
200
201 a5xx_gpu->cur_ring = gpu->rb[0];
202
203
204 if (gpu->nr_rings == 1)
205 return;
206
207 for (i = 0; i < gpu->nr_rings; i++) {
208 a5xx_gpu->preempt[i]->wptr = 0;
209 a5xx_gpu->preempt[i]->rptr = 0;
210 a5xx_gpu->preempt[i]->rbase = gpu->rb[i]->iova;
211 }
212
213
214 gpu_write64(gpu, REG_A5XX_CP_CONTEXT_SWITCH_SMMU_INFO_LO,
215 REG_A5XX_CP_CONTEXT_SWITCH_SMMU_INFO_HI, 0);
216
217
218 set_preempt_state(a5xx_gpu, PREEMPT_NONE);
219}
220
221static int preempt_init_ring(struct a5xx_gpu *a5xx_gpu,
222 struct msm_ringbuffer *ring)
223{
224 struct adreno_gpu *adreno_gpu = &a5xx_gpu->base;
225 struct msm_gpu *gpu = &adreno_gpu->base;
226 struct a5xx_preempt_record *ptr;
227 void *counters;
228 struct drm_gem_object *bo = NULL, *counters_bo = NULL;
229 u64 iova = 0, counters_iova = 0;
230
231 ptr = msm_gem_kernel_new(gpu->dev,
232 A5XX_PREEMPT_RECORD_SIZE + A5XX_PREEMPT_COUNTER_SIZE,
233 MSM_BO_WC | MSM_BO_MAP_PRIV, gpu->aspace, &bo, &iova);
234
235 if (IS_ERR(ptr))
236 return PTR_ERR(ptr);
237
238
239 counters = msm_gem_kernel_new(gpu->dev,
240 A5XX_PREEMPT_COUNTER_SIZE,
241 MSM_BO_WC, gpu->aspace, &counters_bo, &counters_iova);
242 if (IS_ERR(counters)) {
243 msm_gem_kernel_put(bo, gpu->aspace);
244 return PTR_ERR(counters);
245 }
246
247 msm_gem_object_set_name(bo, "preempt");
248 msm_gem_object_set_name(counters_bo, "preempt_counters");
249
250 a5xx_gpu->preempt_bo[ring->id] = bo;
251 a5xx_gpu->preempt_counters_bo[ring->id] = counters_bo;
252 a5xx_gpu->preempt_iova[ring->id] = iova;
253 a5xx_gpu->preempt[ring->id] = ptr;
254
255
256
257 ptr->magic = A5XX_PREEMPT_RECORD_MAGIC;
258 ptr->info = 0;
259 ptr->data = 0;
260 ptr->cntl = MSM_GPU_RB_CNTL_DEFAULT | AXXX_CP_RB_CNTL_NO_UPDATE;
261
262 ptr->rptr_addr = shadowptr(a5xx_gpu, ring);
263 ptr->counter = counters_iova;
264
265 return 0;
266}
267
268void a5xx_preempt_fini(struct msm_gpu *gpu)
269{
270 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
271 struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
272 int i;
273
274 for (i = 0; i < gpu->nr_rings; i++) {
275 msm_gem_kernel_put(a5xx_gpu->preempt_bo[i], gpu->aspace);
276 msm_gem_kernel_put(a5xx_gpu->preempt_counters_bo[i], gpu->aspace);
277 }
278}
279
280void a5xx_preempt_init(struct msm_gpu *gpu)
281{
282 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
283 struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
284 int i;
285
286
287 if (gpu->nr_rings <= 1)
288 return;
289
290 for (i = 0; i < gpu->nr_rings; i++) {
291 if (preempt_init_ring(a5xx_gpu, gpu->rb[i])) {
292
293
294
295
296 a5xx_preempt_fini(gpu);
297 gpu->nr_rings = 1;
298
299 return;
300 }
301 }
302
303 timer_setup(&a5xx_gpu->preempt_timer, a5xx_preempt_timer, 0);
304}
305