1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20#include "mdp5_kms.h"
21#include "mdp5_smp.h"
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90struct mdp5_smp {
91 struct drm_device *dev;
92
93 uint8_t reserved[MAX_CLIENTS];
94
95 int blk_cnt;
96 int blk_size;
97
98 spinlock_t state_lock;
99 mdp5_smp_state_t state;
100
101 struct mdp5_client_smp_state client_state[MAX_CLIENTS];
102};
103
104static void update_smp_state(struct mdp5_smp *smp,
105 u32 cid, mdp5_smp_state_t *assigned);
106
107static inline
108struct mdp5_kms *get_kms(struct mdp5_smp *smp)
109{
110 struct msm_drm_private *priv = smp->dev->dev_private;
111
112 return to_mdp5_kms(to_mdp_kms(priv->kms));
113}
114
115static inline u32 pipe2client(enum mdp5_pipe pipe, int plane)
116{
117#define CID_UNUSED 0
118
119 if (WARN_ON(plane >= pipe2nclients(pipe)))
120 return CID_UNUSED;
121
122
123
124
125
126
127
128
129
130
131
132
133
134 return mdp5_cfg->smp.clients[pipe] + plane;
135}
136
137
138static int smp_request_block(struct mdp5_smp *smp,
139 u32 cid, int nblks)
140{
141 struct mdp5_kms *mdp5_kms = get_kms(smp);
142 struct mdp5_client_smp_state *ps = &smp->client_state[cid];
143 int i, ret, avail, cur_nblks, cnt = smp->blk_cnt;
144 uint8_t reserved;
145 unsigned long flags;
146
147 reserved = smp->reserved[cid];
148
149 spin_lock_irqsave(&smp->state_lock, flags);
150
151 if (reserved) {
152 nblks = max(0, nblks - reserved);
153 DBG("%d MMBs allocated (%d reserved)", nblks, reserved);
154 }
155
156 avail = cnt - bitmap_weight(smp->state, cnt);
157 if (nblks > avail) {
158 dev_err(mdp5_kms->dev->dev, "out of blks (req=%d > avail=%d)\n",
159 nblks, avail);
160 ret = -ENOSPC;
161 goto fail;
162 }
163
164 cur_nblks = bitmap_weight(ps->pending, cnt);
165 if (nblks > cur_nblks) {
166
167 for (i = cur_nblks; i < nblks; i++) {
168 int blk = find_first_zero_bit(smp->state, cnt);
169 set_bit(blk, ps->pending);
170 set_bit(blk, smp->state);
171 }
172 } else {
173
174 for (i = cur_nblks; i > nblks; i--) {
175 int blk = find_first_bit(ps->pending, cnt);
176 clear_bit(blk, ps->pending);
177
178
179
180
181 if (!test_bit(blk, ps->configured))
182 clear_bit(blk, smp->state);
183 }
184 }
185
186fail:
187 spin_unlock_irqrestore(&smp->state_lock, flags);
188 return 0;
189}
190
191static void set_fifo_thresholds(struct mdp5_smp *smp,
192 enum mdp5_pipe pipe, int nblks)
193{
194 struct mdp5_kms *mdp5_kms = get_kms(smp);
195 u32 smp_entries_per_blk = smp->blk_size / (128 / BITS_PER_BYTE);
196 u32 val;
197
198
199 val = (nblks * smp_entries_per_blk) / 4;
200
201 mdp5_write(mdp5_kms, REG_MDP5_PIPE_REQPRIO_FIFO_WM_0(pipe), val * 1);
202 mdp5_write(mdp5_kms, REG_MDP5_PIPE_REQPRIO_FIFO_WM_1(pipe), val * 2);
203 mdp5_write(mdp5_kms, REG_MDP5_PIPE_REQPRIO_FIFO_WM_2(pipe), val * 3);
204}
205
206
207
208
209
210
211
212int mdp5_smp_request(struct mdp5_smp *smp, enum mdp5_pipe pipe,
213 const struct mdp_format *format, u32 width, bool hdecim)
214{
215 struct mdp5_kms *mdp5_kms = get_kms(smp);
216 struct drm_device *dev = mdp5_kms->dev;
217 int rev = mdp5_cfg_get_hw_rev(mdp5_kms->cfg);
218 int i, hsub, nplanes, nlines, nblks, ret;
219 u32 fmt = format->base.pixel_format;
220
221 nplanes = drm_format_num_planes(fmt);
222 hsub = drm_format_horz_chroma_subsampling(fmt);
223
224
225 nlines = 2;
226
227
228
229
230
231 if ((rev > 0) && (format->chroma_sample > CHROMA_FULL)) {
232 fmt = DRM_FORMAT_NV24;
233 nplanes = 2;
234
235
236
237
238 if (hdecim && (hsub > 1))
239 hsub = 1;
240 }
241
242 for (i = 0, nblks = 0; i < nplanes; i++) {
243 int n, fetch_stride, cpp;
244
245 cpp = drm_format_plane_cpp(fmt, i);
246 fetch_stride = width * cpp / (i ? hsub : 1);
247
248 n = DIV_ROUND_UP(fetch_stride * nlines, smp->blk_size);
249
250
251 if (rev == 0)
252 n = roundup_pow_of_two(n);
253
254 DBG("%s[%d]: request %d SMP blocks", pipe2name(pipe), i, n);
255 ret = smp_request_block(smp, pipe2client(pipe, i), n);
256 if (ret) {
257 dev_err(dev->dev, "Cannot allocate %d SMP blocks: %d\n",
258 n, ret);
259 return ret;
260 }
261
262 nblks += n;
263 }
264
265 set_fifo_thresholds(smp, pipe, nblks);
266
267 return 0;
268}
269
270
271void mdp5_smp_release(struct mdp5_smp *smp, enum mdp5_pipe pipe)
272{
273 int i;
274 unsigned long flags;
275 int cnt = smp->blk_cnt;
276
277 for (i = 0; i < pipe2nclients(pipe); i++) {
278 mdp5_smp_state_t assigned;
279 u32 cid = pipe2client(pipe, i);
280 struct mdp5_client_smp_state *ps = &smp->client_state[cid];
281
282 spin_lock_irqsave(&smp->state_lock, flags);
283
284
285 bitmap_or(assigned, ps->inuse, ps->configured, cnt);
286 update_smp_state(smp, CID_UNUSED, &assigned);
287
288
289 bitmap_andnot(smp->state, smp->state, ps->pending, cnt);
290 bitmap_andnot(smp->state, smp->state, assigned, cnt);
291
292
293 bitmap_zero(ps->pending, cnt);
294 bitmap_zero(ps->configured, cnt);
295 bitmap_zero(ps->inuse, cnt);
296
297 spin_unlock_irqrestore(&smp->state_lock, flags);
298 }
299
300 set_fifo_thresholds(smp, pipe, 0);
301}
302
303static void update_smp_state(struct mdp5_smp *smp,
304 u32 cid, mdp5_smp_state_t *assigned)
305{
306 struct mdp5_kms *mdp5_kms = get_kms(smp);
307 int cnt = smp->blk_cnt;
308 u32 blk, val;
309
310 for_each_set_bit(blk, *assigned, cnt) {
311 int idx = blk / 3;
312 int fld = blk % 3;
313
314 val = mdp5_read(mdp5_kms, REG_MDP5_MDP_SMP_ALLOC_W_REG(0, idx));
315
316 switch (fld) {
317 case 0:
318 val &= ~MDP5_MDP_SMP_ALLOC_W_REG_CLIENT0__MASK;
319 val |= MDP5_MDP_SMP_ALLOC_W_REG_CLIENT0(cid);
320 break;
321 case 1:
322 val &= ~MDP5_MDP_SMP_ALLOC_W_REG_CLIENT1__MASK;
323 val |= MDP5_MDP_SMP_ALLOC_W_REG_CLIENT1(cid);
324 break;
325 case 2:
326 val &= ~MDP5_MDP_SMP_ALLOC_W_REG_CLIENT2__MASK;
327 val |= MDP5_MDP_SMP_ALLOC_W_REG_CLIENT2(cid);
328 break;
329 }
330
331 mdp5_write(mdp5_kms, REG_MDP5_MDP_SMP_ALLOC_W_REG(0, idx), val);
332 mdp5_write(mdp5_kms, REG_MDP5_MDP_SMP_ALLOC_R_REG(0, idx), val);
333 }
334}
335
336
337void mdp5_smp_configure(struct mdp5_smp *smp, enum mdp5_pipe pipe)
338{
339 int cnt = smp->blk_cnt;
340 mdp5_smp_state_t assigned;
341 int i;
342
343 for (i = 0; i < pipe2nclients(pipe); i++) {
344 u32 cid = pipe2client(pipe, i);
345 struct mdp5_client_smp_state *ps = &smp->client_state[cid];
346
347
348
349
350
351 if (!bitmap_equal(ps->inuse, ps->configured, cnt))
352 continue;
353
354 bitmap_copy(ps->configured, ps->pending, cnt);
355 bitmap_or(assigned, ps->inuse, ps->configured, cnt);
356 update_smp_state(smp, cid, &assigned);
357 }
358}
359
360
361void mdp5_smp_commit(struct mdp5_smp *smp, enum mdp5_pipe pipe)
362{
363 int cnt = smp->blk_cnt;
364 mdp5_smp_state_t released;
365 int i;
366
367 for (i = 0; i < pipe2nclients(pipe); i++) {
368 u32 cid = pipe2client(pipe, i);
369 struct mdp5_client_smp_state *ps = &smp->client_state[cid];
370
371
372
373
374
375
376 if (bitmap_andnot(released, ps->inuse, ps->configured, cnt)) {
377 unsigned long flags;
378
379 spin_lock_irqsave(&smp->state_lock, flags);
380
381 bitmap_andnot(smp->state, smp->state, released, cnt);
382 spin_unlock_irqrestore(&smp->state_lock, flags);
383
384 update_smp_state(smp, CID_UNUSED, &released);
385 }
386
387 bitmap_copy(ps->inuse, ps->configured, cnt);
388 }
389}
390
391void mdp5_smp_destroy(struct mdp5_smp *smp)
392{
393 kfree(smp);
394}
395
396struct mdp5_smp *mdp5_smp_init(struct drm_device *dev, const struct mdp5_smp_block *cfg)
397{
398 struct mdp5_smp *smp = NULL;
399 int ret;
400
401 smp = kzalloc(sizeof(*smp), GFP_KERNEL);
402 if (unlikely(!smp)) {
403 ret = -ENOMEM;
404 goto fail;
405 }
406
407 smp->dev = dev;
408 smp->blk_cnt = cfg->mmb_count;
409 smp->blk_size = cfg->mmb_size;
410
411
412 bitmap_copy(smp->state, cfg->reserved_state, smp->blk_cnt);
413 memcpy(smp->reserved, cfg->reserved, sizeof(smp->reserved));
414 spin_lock_init(&smp->state_lock);
415
416 return smp;
417fail:
418 if (smp)
419 mdp5_smp_destroy(smp);
420
421 return ERR_PTR(ret);
422}
423