1
2
3
4
5
6
7
8
9
10
11
12
13
14#include "mdp5_kms.h"
15#include "mdp5_ctl.h"
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32#define CTL_STAT_BUSY 0x1
33#define CTL_STAT_BOOKED 0x2
34
35struct mdp5_ctl {
36 struct mdp5_ctl_manager *ctlm;
37
38 u32 id;
39
40
41 u32 status;
42
43 bool encoder_enabled;
44 uint32_t start_mask;
45
46
47 spinlock_t hw_lock;
48 u32 reg_offset;
49
50
51 u32 pending_ctl_trigger;
52
53 bool cursor_on;
54
55
56 bool flush_pending;
57
58 struct mdp5_ctl *pair;
59};
60
61struct mdp5_ctl_manager {
62 struct drm_device *dev;
63
64
65 u32 nlm;
66 u32 nctl;
67
68
69 u32 flush_hw_mask;
70
71
72 bool single_flush_supported;
73 u32 single_flush_pending_mask;
74
75
76 spinlock_t pool_lock;
77 struct mdp5_ctl ctls[MAX_CTL];
78};
79
80static inline
81struct mdp5_kms *get_kms(struct mdp5_ctl_manager *ctl_mgr)
82{
83 struct msm_drm_private *priv = ctl_mgr->dev->dev_private;
84
85 return to_mdp5_kms(to_mdp_kms(priv->kms));
86}
87
88static inline
89void ctl_write(struct mdp5_ctl *ctl, u32 reg, u32 data)
90{
91 struct mdp5_kms *mdp5_kms = get_kms(ctl->ctlm);
92
93 (void)ctl->reg_offset;
94 mdp5_write(mdp5_kms, reg, data);
95}
96
97static inline
98u32 ctl_read(struct mdp5_ctl *ctl, u32 reg)
99{
100 struct mdp5_kms *mdp5_kms = get_kms(ctl->ctlm);
101
102 (void)ctl->reg_offset;
103 return mdp5_read(mdp5_kms, reg);
104}
105
106static void set_display_intf(struct mdp5_kms *mdp5_kms,
107 struct mdp5_interface *intf)
108{
109 unsigned long flags;
110 u32 intf_sel;
111
112 spin_lock_irqsave(&mdp5_kms->resource_lock, flags);
113 intf_sel = mdp5_read(mdp5_kms, REG_MDP5_DISP_INTF_SEL);
114
115 switch (intf->num) {
116 case 0:
117 intf_sel &= ~MDP5_DISP_INTF_SEL_INTF0__MASK;
118 intf_sel |= MDP5_DISP_INTF_SEL_INTF0(intf->type);
119 break;
120 case 1:
121 intf_sel &= ~MDP5_DISP_INTF_SEL_INTF1__MASK;
122 intf_sel |= MDP5_DISP_INTF_SEL_INTF1(intf->type);
123 break;
124 case 2:
125 intf_sel &= ~MDP5_DISP_INTF_SEL_INTF2__MASK;
126 intf_sel |= MDP5_DISP_INTF_SEL_INTF2(intf->type);
127 break;
128 case 3:
129 intf_sel &= ~MDP5_DISP_INTF_SEL_INTF3__MASK;
130 intf_sel |= MDP5_DISP_INTF_SEL_INTF3(intf->type);
131 break;
132 default:
133 BUG();
134 break;
135 }
136
137 mdp5_write(mdp5_kms, REG_MDP5_DISP_INTF_SEL, intf_sel);
138 spin_unlock_irqrestore(&mdp5_kms->resource_lock, flags);
139}
140
141static void set_ctl_op(struct mdp5_ctl *ctl, struct mdp5_pipeline *pipeline)
142{
143 unsigned long flags;
144 struct mdp5_interface *intf = pipeline->intf;
145 u32 ctl_op = 0;
146
147 if (!mdp5_cfg_intf_is_virtual(intf->type))
148 ctl_op |= MDP5_CTL_OP_INTF_NUM(INTF0 + intf->num);
149
150 switch (intf->type) {
151 case INTF_DSI:
152 if (intf->mode == MDP5_INTF_DSI_MODE_COMMAND)
153 ctl_op |= MDP5_CTL_OP_CMD_MODE;
154 break;
155
156 case INTF_WB:
157 if (intf->mode == MDP5_INTF_WB_MODE_LINE)
158 ctl_op |= MDP5_CTL_OP_MODE(MODE_WB_2_LINE);
159 break;
160
161 default:
162 break;
163 }
164
165 if (pipeline->r_mixer)
166 ctl_op |= MDP5_CTL_OP_PACK_3D_ENABLE |
167 MDP5_CTL_OP_PACK_3D(1);
168
169 spin_lock_irqsave(&ctl->hw_lock, flags);
170 ctl_write(ctl, REG_MDP5_CTL_OP(ctl->id), ctl_op);
171 spin_unlock_irqrestore(&ctl->hw_lock, flags);
172}
173
174int mdp5_ctl_set_pipeline(struct mdp5_ctl *ctl, struct mdp5_pipeline *pipeline)
175{
176 struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm;
177 struct mdp5_kms *mdp5_kms = get_kms(ctl_mgr);
178 struct mdp5_interface *intf = pipeline->intf;
179 struct mdp5_hw_mixer *mixer = pipeline->mixer;
180 struct mdp5_hw_mixer *r_mixer = pipeline->r_mixer;
181
182 ctl->start_mask = mdp_ctl_flush_mask_lm(mixer->lm) |
183 mdp_ctl_flush_mask_encoder(intf);
184 if (r_mixer)
185 ctl->start_mask |= mdp_ctl_flush_mask_lm(r_mixer->lm);
186
187
188 if (!mdp5_cfg_intf_is_virtual(intf->type))
189 set_display_intf(mdp5_kms, intf);
190
191 set_ctl_op(ctl, pipeline);
192
193 return 0;
194}
195
196static bool start_signal_needed(struct mdp5_ctl *ctl,
197 struct mdp5_pipeline *pipeline)
198{
199 struct mdp5_interface *intf = pipeline->intf;
200
201 if (!ctl->encoder_enabled || ctl->start_mask != 0)
202 return false;
203
204 switch (intf->type) {
205 case INTF_WB:
206 return true;
207 case INTF_DSI:
208 return intf->mode == MDP5_INTF_DSI_MODE_COMMAND;
209 default:
210 return false;
211 }
212}
213
214
215
216
217
218
219
220
221static void send_start_signal(struct mdp5_ctl *ctl)
222{
223 unsigned long flags;
224
225 spin_lock_irqsave(&ctl->hw_lock, flags);
226 ctl_write(ctl, REG_MDP5_CTL_START(ctl->id), 1);
227 spin_unlock_irqrestore(&ctl->hw_lock, flags);
228}
229
230static void refill_start_mask(struct mdp5_ctl *ctl,
231 struct mdp5_pipeline *pipeline)
232{
233 struct mdp5_interface *intf = pipeline->intf;
234 struct mdp5_hw_mixer *mixer = pipeline->mixer;
235 struct mdp5_hw_mixer *r_mixer = pipeline->r_mixer;
236
237 ctl->start_mask = mdp_ctl_flush_mask_lm(mixer->lm);
238 if (r_mixer)
239 ctl->start_mask |= mdp_ctl_flush_mask_lm(r_mixer->lm);
240
241
242
243
244
245 if (intf->type == INTF_WB)
246 ctl->start_mask |= mdp_ctl_flush_mask_encoder(intf);
247}
248
249
250
251
252
253
254
255
256
257int mdp5_ctl_set_encoder_state(struct mdp5_ctl *ctl,
258 struct mdp5_pipeline *pipeline,
259 bool enabled)
260{
261 struct mdp5_interface *intf = pipeline->intf;
262
263 if (WARN_ON(!ctl))
264 return -EINVAL;
265
266 ctl->encoder_enabled = enabled;
267 DBG("intf_%d: %s", intf->num, enabled ? "on" : "off");
268
269 if (start_signal_needed(ctl, pipeline)) {
270 send_start_signal(ctl);
271 refill_start_mask(ctl, pipeline);
272 }
273
274 return 0;
275}
276
277
278
279
280
281
282int mdp5_ctl_set_cursor(struct mdp5_ctl *ctl, struct mdp5_pipeline *pipeline,
283 int cursor_id, bool enable)
284{
285 struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm;
286 unsigned long flags;
287 u32 blend_cfg;
288 struct mdp5_hw_mixer *mixer = pipeline->mixer;
289
290 if (unlikely(WARN_ON(!mixer))) {
291 dev_err(ctl_mgr->dev->dev, "CTL %d cannot find LM",
292 ctl->id);
293 return -EINVAL;
294 }
295
296 if (pipeline->r_mixer) {
297 dev_err(ctl_mgr->dev->dev, "unsupported configuration");
298 return -EINVAL;
299 }
300
301 spin_lock_irqsave(&ctl->hw_lock, flags);
302
303 blend_cfg = ctl_read(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, mixer->lm));
304
305 if (enable)
306 blend_cfg |= MDP5_CTL_LAYER_REG_CURSOR_OUT;
307 else
308 blend_cfg &= ~MDP5_CTL_LAYER_REG_CURSOR_OUT;
309
310 ctl_write(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, mixer->lm), blend_cfg);
311 ctl->cursor_on = enable;
312
313 spin_unlock_irqrestore(&ctl->hw_lock, flags);
314
315 ctl->pending_ctl_trigger = mdp_ctl_flush_mask_cursor(cursor_id);
316
317 return 0;
318}
319
320static u32 mdp_ctl_blend_mask(enum mdp5_pipe pipe,
321 enum mdp_mixer_stage_id stage)
322{
323 switch (pipe) {
324 case SSPP_VIG0: return MDP5_CTL_LAYER_REG_VIG0(stage);
325 case SSPP_VIG1: return MDP5_CTL_LAYER_REG_VIG1(stage);
326 case SSPP_VIG2: return MDP5_CTL_LAYER_REG_VIG2(stage);
327 case SSPP_RGB0: return MDP5_CTL_LAYER_REG_RGB0(stage);
328 case SSPP_RGB1: return MDP5_CTL_LAYER_REG_RGB1(stage);
329 case SSPP_RGB2: return MDP5_CTL_LAYER_REG_RGB2(stage);
330 case SSPP_DMA0: return MDP5_CTL_LAYER_REG_DMA0(stage);
331 case SSPP_DMA1: return MDP5_CTL_LAYER_REG_DMA1(stage);
332 case SSPP_VIG3: return MDP5_CTL_LAYER_REG_VIG3(stage);
333 case SSPP_RGB3: return MDP5_CTL_LAYER_REG_RGB3(stage);
334 case SSPP_CURSOR0:
335 case SSPP_CURSOR1:
336 default: return 0;
337 }
338}
339
340static u32 mdp_ctl_blend_ext_mask(enum mdp5_pipe pipe,
341 enum mdp_mixer_stage_id stage)
342{
343 if (stage < STAGE6 && (pipe != SSPP_CURSOR0 && pipe != SSPP_CURSOR1))
344 return 0;
345
346 switch (pipe) {
347 case SSPP_VIG0: return MDP5_CTL_LAYER_EXT_REG_VIG0_BIT3;
348 case SSPP_VIG1: return MDP5_CTL_LAYER_EXT_REG_VIG1_BIT3;
349 case SSPP_VIG2: return MDP5_CTL_LAYER_EXT_REG_VIG2_BIT3;
350 case SSPP_RGB0: return MDP5_CTL_LAYER_EXT_REG_RGB0_BIT3;
351 case SSPP_RGB1: return MDP5_CTL_LAYER_EXT_REG_RGB1_BIT3;
352 case SSPP_RGB2: return MDP5_CTL_LAYER_EXT_REG_RGB2_BIT3;
353 case SSPP_DMA0: return MDP5_CTL_LAYER_EXT_REG_DMA0_BIT3;
354 case SSPP_DMA1: return MDP5_CTL_LAYER_EXT_REG_DMA1_BIT3;
355 case SSPP_VIG3: return MDP5_CTL_LAYER_EXT_REG_VIG3_BIT3;
356 case SSPP_RGB3: return MDP5_CTL_LAYER_EXT_REG_RGB3_BIT3;
357 case SSPP_CURSOR0: return MDP5_CTL_LAYER_EXT_REG_CURSOR0(stage);
358 case SSPP_CURSOR1: return MDP5_CTL_LAYER_EXT_REG_CURSOR1(stage);
359 default: return 0;
360 }
361}
362
363static void mdp5_ctl_reset_blend_regs(struct mdp5_ctl *ctl)
364{
365 unsigned long flags;
366 struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm;
367 int i;
368
369 spin_lock_irqsave(&ctl->hw_lock, flags);
370
371 for (i = 0; i < ctl_mgr->nlm; i++) {
372 ctl_write(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, i), 0x0);
373 ctl_write(ctl, REG_MDP5_CTL_LAYER_EXT_REG(ctl->id, i), 0x0);
374 }
375
376 spin_unlock_irqrestore(&ctl->hw_lock, flags);
377}
378
379#define PIPE_LEFT 0
380#define PIPE_RIGHT 1
381int mdp5_ctl_blend(struct mdp5_ctl *ctl, struct mdp5_pipeline *pipeline,
382 enum mdp5_pipe stage[][MAX_PIPE_STAGE],
383 enum mdp5_pipe r_stage[][MAX_PIPE_STAGE],
384 u32 stage_cnt, u32 ctl_blend_op_flags)
385{
386 struct mdp5_hw_mixer *mixer = pipeline->mixer;
387 struct mdp5_hw_mixer *r_mixer = pipeline->r_mixer;
388 unsigned long flags;
389 u32 blend_cfg = 0, blend_ext_cfg = 0;
390 u32 r_blend_cfg = 0, r_blend_ext_cfg = 0;
391 int i, start_stage;
392
393 mdp5_ctl_reset_blend_regs(ctl);
394
395 if (ctl_blend_op_flags & MDP5_CTL_BLEND_OP_FLAG_BORDER_OUT) {
396 start_stage = STAGE0;
397 blend_cfg |= MDP5_CTL_LAYER_REG_BORDER_COLOR;
398 if (r_mixer)
399 r_blend_cfg |= MDP5_CTL_LAYER_REG_BORDER_COLOR;
400 } else {
401 start_stage = STAGE_BASE;
402 }
403
404 for (i = start_stage; stage_cnt && i <= STAGE_MAX; i++) {
405 blend_cfg |=
406 mdp_ctl_blend_mask(stage[i][PIPE_LEFT], i) |
407 mdp_ctl_blend_mask(stage[i][PIPE_RIGHT], i);
408 blend_ext_cfg |=
409 mdp_ctl_blend_ext_mask(stage[i][PIPE_LEFT], i) |
410 mdp_ctl_blend_ext_mask(stage[i][PIPE_RIGHT], i);
411 if (r_mixer) {
412 r_blend_cfg |=
413 mdp_ctl_blend_mask(r_stage[i][PIPE_LEFT], i) |
414 mdp_ctl_blend_mask(r_stage[i][PIPE_RIGHT], i);
415 r_blend_ext_cfg |=
416 mdp_ctl_blend_ext_mask(r_stage[i][PIPE_LEFT], i) |
417 mdp_ctl_blend_ext_mask(r_stage[i][PIPE_RIGHT], i);
418 }
419 }
420
421 spin_lock_irqsave(&ctl->hw_lock, flags);
422 if (ctl->cursor_on)
423 blend_cfg |= MDP5_CTL_LAYER_REG_CURSOR_OUT;
424
425 ctl_write(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, mixer->lm), blend_cfg);
426 ctl_write(ctl, REG_MDP5_CTL_LAYER_EXT_REG(ctl->id, mixer->lm),
427 blend_ext_cfg);
428 if (r_mixer) {
429 ctl_write(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, r_mixer->lm),
430 r_blend_cfg);
431 ctl_write(ctl, REG_MDP5_CTL_LAYER_EXT_REG(ctl->id, r_mixer->lm),
432 r_blend_ext_cfg);
433 }
434 spin_unlock_irqrestore(&ctl->hw_lock, flags);
435
436 ctl->pending_ctl_trigger = mdp_ctl_flush_mask_lm(mixer->lm);
437 if (r_mixer)
438 ctl->pending_ctl_trigger |= mdp_ctl_flush_mask_lm(r_mixer->lm);
439
440 DBG("lm%d: blend config = 0x%08x. ext_cfg = 0x%08x", mixer->lm,
441 blend_cfg, blend_ext_cfg);
442 if (r_mixer)
443 DBG("lm%d: blend config = 0x%08x. ext_cfg = 0x%08x",
444 r_mixer->lm, r_blend_cfg, r_blend_ext_cfg);
445
446 return 0;
447}
448
449u32 mdp_ctl_flush_mask_encoder(struct mdp5_interface *intf)
450{
451 if (intf->type == INTF_WB)
452 return MDP5_CTL_FLUSH_WB;
453
454 switch (intf->num) {
455 case 0: return MDP5_CTL_FLUSH_TIMING_0;
456 case 1: return MDP5_CTL_FLUSH_TIMING_1;
457 case 2: return MDP5_CTL_FLUSH_TIMING_2;
458 case 3: return MDP5_CTL_FLUSH_TIMING_3;
459 default: return 0;
460 }
461}
462
463u32 mdp_ctl_flush_mask_cursor(int cursor_id)
464{
465 switch (cursor_id) {
466 case 0: return MDP5_CTL_FLUSH_CURSOR_0;
467 case 1: return MDP5_CTL_FLUSH_CURSOR_1;
468 default: return 0;
469 }
470}
471
472u32 mdp_ctl_flush_mask_pipe(enum mdp5_pipe pipe)
473{
474 switch (pipe) {
475 case SSPP_VIG0: return MDP5_CTL_FLUSH_VIG0;
476 case SSPP_VIG1: return MDP5_CTL_FLUSH_VIG1;
477 case SSPP_VIG2: return MDP5_CTL_FLUSH_VIG2;
478 case SSPP_RGB0: return MDP5_CTL_FLUSH_RGB0;
479 case SSPP_RGB1: return MDP5_CTL_FLUSH_RGB1;
480 case SSPP_RGB2: return MDP5_CTL_FLUSH_RGB2;
481 case SSPP_DMA0: return MDP5_CTL_FLUSH_DMA0;
482 case SSPP_DMA1: return MDP5_CTL_FLUSH_DMA1;
483 case SSPP_VIG3: return MDP5_CTL_FLUSH_VIG3;
484 case SSPP_RGB3: return MDP5_CTL_FLUSH_RGB3;
485 case SSPP_CURSOR0: return MDP5_CTL_FLUSH_CURSOR_0;
486 case SSPP_CURSOR1: return MDP5_CTL_FLUSH_CURSOR_1;
487 default: return 0;
488 }
489}
490
491u32 mdp_ctl_flush_mask_lm(int lm)
492{
493 switch (lm) {
494 case 0: return MDP5_CTL_FLUSH_LM0;
495 case 1: return MDP5_CTL_FLUSH_LM1;
496 case 2: return MDP5_CTL_FLUSH_LM2;
497 case 5: return MDP5_CTL_FLUSH_LM5;
498 default: return 0;
499 }
500}
501
502static u32 fix_sw_flush(struct mdp5_ctl *ctl, struct mdp5_pipeline *pipeline,
503 u32 flush_mask)
504{
505 struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm;
506 u32 sw_mask = 0;
507#define BIT_NEEDS_SW_FIX(bit) \
508 (!(ctl_mgr->flush_hw_mask & bit) && (flush_mask & bit))
509
510
511 if (BIT_NEEDS_SW_FIX(MDP5_CTL_FLUSH_CURSOR_0))
512 sw_mask |= mdp_ctl_flush_mask_lm(pipeline->mixer->lm);
513
514 return sw_mask;
515}
516
517static void fix_for_single_flush(struct mdp5_ctl *ctl, u32 *flush_mask,
518 u32 *flush_id)
519{
520 struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm;
521
522 if (ctl->pair) {
523 DBG("CTL %d FLUSH pending mask %x", ctl->id, *flush_mask);
524 ctl->flush_pending = true;
525 ctl_mgr->single_flush_pending_mask |= (*flush_mask);
526 *flush_mask = 0;
527
528 if (ctl->pair->flush_pending) {
529 *flush_id = min_t(u32, ctl->id, ctl->pair->id);
530 *flush_mask = ctl_mgr->single_flush_pending_mask;
531
532 ctl->flush_pending = false;
533 ctl->pair->flush_pending = false;
534 ctl_mgr->single_flush_pending_mask = 0;
535
536 DBG("Single FLUSH mask %x,ID %d", *flush_mask,
537 *flush_id);
538 }
539 }
540}
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558u32 mdp5_ctl_commit(struct mdp5_ctl *ctl,
559 struct mdp5_pipeline *pipeline,
560 u32 flush_mask)
561{
562 struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm;
563 unsigned long flags;
564 u32 flush_id = ctl->id;
565 u32 curr_ctl_flush_mask;
566
567 ctl->start_mask &= ~flush_mask;
568
569 VERB("flush_mask=%x, start_mask=%x, trigger=%x", flush_mask,
570 ctl->start_mask, ctl->pending_ctl_trigger);
571
572 if (ctl->pending_ctl_trigger & flush_mask) {
573 flush_mask |= MDP5_CTL_FLUSH_CTL;
574 ctl->pending_ctl_trigger = 0;
575 }
576
577 flush_mask |= fix_sw_flush(ctl, pipeline, flush_mask);
578
579 flush_mask &= ctl_mgr->flush_hw_mask;
580
581 curr_ctl_flush_mask = flush_mask;
582
583 fix_for_single_flush(ctl, &flush_mask, &flush_id);
584
585 if (flush_mask) {
586 spin_lock_irqsave(&ctl->hw_lock, flags);
587 ctl_write(ctl, REG_MDP5_CTL_FLUSH(flush_id), flush_mask);
588 spin_unlock_irqrestore(&ctl->hw_lock, flags);
589 }
590
591 if (start_signal_needed(ctl, pipeline)) {
592 send_start_signal(ctl);
593 refill_start_mask(ctl, pipeline);
594 }
595
596 return curr_ctl_flush_mask;
597}
598
599u32 mdp5_ctl_get_commit_status(struct mdp5_ctl *ctl)
600{
601 return ctl_read(ctl, REG_MDP5_CTL_FLUSH(ctl->id));
602}
603
604int mdp5_ctl_get_ctl_id(struct mdp5_ctl *ctl)
605{
606 return WARN_ON(!ctl) ? -EINVAL : ctl->id;
607}
608
609
610
611
612int mdp5_ctl_pair(struct mdp5_ctl *ctlx, struct mdp5_ctl *ctly, bool enable)
613{
614 struct mdp5_ctl_manager *ctl_mgr = ctlx->ctlm;
615 struct mdp5_kms *mdp5_kms = get_kms(ctl_mgr);
616
617
618 if (!ctl_mgr->single_flush_supported)
619 return 0;
620
621 if (!enable) {
622 ctlx->pair = NULL;
623 ctly->pair = NULL;
624 mdp5_write(mdp5_kms, REG_MDP5_SPARE_0, 0);
625 return 0;
626 } else if ((ctlx->pair != NULL) || (ctly->pair != NULL)) {
627 dev_err(ctl_mgr->dev->dev, "CTLs already paired\n");
628 return -EINVAL;
629 } else if (!(ctlx->status & ctly->status & CTL_STAT_BOOKED)) {
630 dev_err(ctl_mgr->dev->dev, "Only pair booked CTLs\n");
631 return -EINVAL;
632 }
633
634 ctlx->pair = ctly;
635 ctly->pair = ctlx;
636
637 mdp5_write(mdp5_kms, REG_MDP5_SPARE_0,
638 MDP5_SPARE_0_SPLIT_DPL_SINGLE_FLUSH_EN);
639
640 return 0;
641}
642
643
644
645
646
647
648
649
650
651struct mdp5_ctl *mdp5_ctlm_request(struct mdp5_ctl_manager *ctl_mgr,
652 int intf_num)
653{
654 struct mdp5_ctl *ctl = NULL;
655 const u32 checkm = CTL_STAT_BUSY | CTL_STAT_BOOKED;
656 u32 match = ((intf_num == 1) || (intf_num == 2)) ? CTL_STAT_BOOKED : 0;
657 unsigned long flags;
658 int c;
659
660 spin_lock_irqsave(&ctl_mgr->pool_lock, flags);
661
662
663 for (c = 0; c < ctl_mgr->nctl; c++)
664 if ((ctl_mgr->ctls[c].status & checkm) == match)
665 goto found;
666
667 dev_warn(ctl_mgr->dev->dev,
668 "fall back to the other CTL category for INTF %d!\n", intf_num);
669
670 match ^= CTL_STAT_BOOKED;
671 for (c = 0; c < ctl_mgr->nctl; c++)
672 if ((ctl_mgr->ctls[c].status & checkm) == match)
673 goto found;
674
675 dev_err(ctl_mgr->dev->dev, "No more CTL available!");
676 goto unlock;
677
678found:
679 ctl = &ctl_mgr->ctls[c];
680 ctl->status |= CTL_STAT_BUSY;
681 ctl->pending_ctl_trigger = 0;
682 DBG("CTL %d allocated", ctl->id);
683
684unlock:
685 spin_unlock_irqrestore(&ctl_mgr->pool_lock, flags);
686 return ctl;
687}
688
689void mdp5_ctlm_hw_reset(struct mdp5_ctl_manager *ctl_mgr)
690{
691 unsigned long flags;
692 int c;
693
694 for (c = 0; c < ctl_mgr->nctl; c++) {
695 struct mdp5_ctl *ctl = &ctl_mgr->ctls[c];
696
697 spin_lock_irqsave(&ctl->hw_lock, flags);
698 ctl_write(ctl, REG_MDP5_CTL_OP(ctl->id), 0);
699 spin_unlock_irqrestore(&ctl->hw_lock, flags);
700 }
701}
702
703void mdp5_ctlm_destroy(struct mdp5_ctl_manager *ctl_mgr)
704{
705 kfree(ctl_mgr);
706}
707
708struct mdp5_ctl_manager *mdp5_ctlm_init(struct drm_device *dev,
709 void __iomem *mmio_base, struct mdp5_cfg_handler *cfg_hnd)
710{
711 struct mdp5_ctl_manager *ctl_mgr;
712 const struct mdp5_cfg_hw *hw_cfg = mdp5_cfg_get_hw_config(cfg_hnd);
713 int rev = mdp5_cfg_get_hw_rev(cfg_hnd);
714 const struct mdp5_ctl_block *ctl_cfg = &hw_cfg->ctl;
715 unsigned long flags;
716 int c, ret;
717
718 ctl_mgr = kzalloc(sizeof(*ctl_mgr), GFP_KERNEL);
719 if (!ctl_mgr) {
720 dev_err(dev->dev, "failed to allocate CTL manager\n");
721 ret = -ENOMEM;
722 goto fail;
723 }
724
725 if (unlikely(WARN_ON(ctl_cfg->count > MAX_CTL))) {
726 dev_err(dev->dev, "Increase static pool size to at least %d\n",
727 ctl_cfg->count);
728 ret = -ENOSPC;
729 goto fail;
730 }
731
732
733 ctl_mgr->dev = dev;
734 ctl_mgr->nlm = hw_cfg->lm.count;
735 ctl_mgr->nctl = ctl_cfg->count;
736 ctl_mgr->flush_hw_mask = ctl_cfg->flush_hw_mask;
737 spin_lock_init(&ctl_mgr->pool_lock);
738
739
740 spin_lock_irqsave(&ctl_mgr->pool_lock, flags);
741 for (c = 0; c < ctl_mgr->nctl; c++) {
742 struct mdp5_ctl *ctl = &ctl_mgr->ctls[c];
743
744 if (WARN_ON(!ctl_cfg->base[c])) {
745 dev_err(dev->dev, "CTL_%d: base is null!\n", c);
746 ret = -EINVAL;
747 spin_unlock_irqrestore(&ctl_mgr->pool_lock, flags);
748 goto fail;
749 }
750 ctl->ctlm = ctl_mgr;
751 ctl->id = c;
752 ctl->reg_offset = ctl_cfg->base[c];
753 ctl->status = 0;
754 spin_lock_init(&ctl->hw_lock);
755 }
756
757
758
759
760
761
762
763 if (rev >= 3) {
764 ctl_mgr->single_flush_supported = true;
765
766 ctl_mgr->ctls[0].status |= CTL_STAT_BOOKED;
767 ctl_mgr->ctls[1].status |= CTL_STAT_BOOKED;
768 }
769 spin_unlock_irqrestore(&ctl_mgr->pool_lock, flags);
770 DBG("Pool of %d CTLs created.", ctl_mgr->nctl);
771
772 return ctl_mgr;
773
774fail:
775 if (ctl_mgr)
776 mdp5_ctlm_destroy(ctl_mgr);
777
778 return ERR_PTR(ret);
779}
780