1
2
3
4
5
6#include "mdp5_kms.h"
7#include "mdp5_ctl.h"
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24#define CTL_STAT_BUSY 0x1
25#define CTL_STAT_BOOKED 0x2
26
27struct mdp5_ctl {
28 struct mdp5_ctl_manager *ctlm;
29
30 u32 id;
31
32
33 u32 status;
34
35 bool encoder_enabled;
36
37
38 u32 flush_mask;
39
40
41 spinlock_t hw_lock;
42 u32 reg_offset;
43
44
45 u32 pending_ctl_trigger;
46
47 bool cursor_on;
48
49
50 bool flush_pending;
51
52 struct mdp5_ctl *pair;
53};
54
55struct mdp5_ctl_manager {
56 struct drm_device *dev;
57
58
59 u32 nlm;
60 u32 nctl;
61
62
63 u32 flush_hw_mask;
64
65
66 bool single_flush_supported;
67 u32 single_flush_pending_mask;
68
69
70 spinlock_t pool_lock;
71 struct mdp5_ctl ctls[MAX_CTL];
72};
73
74static inline
75struct mdp5_kms *get_kms(struct mdp5_ctl_manager *ctl_mgr)
76{
77 struct msm_drm_private *priv = ctl_mgr->dev->dev_private;
78
79 return to_mdp5_kms(to_mdp_kms(priv->kms));
80}
81
82static inline
83void ctl_write(struct mdp5_ctl *ctl, u32 reg, u32 data)
84{
85 struct mdp5_kms *mdp5_kms = get_kms(ctl->ctlm);
86
87 (void)ctl->reg_offset;
88 mdp5_write(mdp5_kms, reg, data);
89}
90
91static inline
92u32 ctl_read(struct mdp5_ctl *ctl, u32 reg)
93{
94 struct mdp5_kms *mdp5_kms = get_kms(ctl->ctlm);
95
96 (void)ctl->reg_offset;
97 return mdp5_read(mdp5_kms, reg);
98}
99
100static void set_display_intf(struct mdp5_kms *mdp5_kms,
101 struct mdp5_interface *intf)
102{
103 unsigned long flags;
104 u32 intf_sel;
105
106 spin_lock_irqsave(&mdp5_kms->resource_lock, flags);
107 intf_sel = mdp5_read(mdp5_kms, REG_MDP5_DISP_INTF_SEL);
108
109 switch (intf->num) {
110 case 0:
111 intf_sel &= ~MDP5_DISP_INTF_SEL_INTF0__MASK;
112 intf_sel |= MDP5_DISP_INTF_SEL_INTF0(intf->type);
113 break;
114 case 1:
115 intf_sel &= ~MDP5_DISP_INTF_SEL_INTF1__MASK;
116 intf_sel |= MDP5_DISP_INTF_SEL_INTF1(intf->type);
117 break;
118 case 2:
119 intf_sel &= ~MDP5_DISP_INTF_SEL_INTF2__MASK;
120 intf_sel |= MDP5_DISP_INTF_SEL_INTF2(intf->type);
121 break;
122 case 3:
123 intf_sel &= ~MDP5_DISP_INTF_SEL_INTF3__MASK;
124 intf_sel |= MDP5_DISP_INTF_SEL_INTF3(intf->type);
125 break;
126 default:
127 BUG();
128 break;
129 }
130
131 mdp5_write(mdp5_kms, REG_MDP5_DISP_INTF_SEL, intf_sel);
132 spin_unlock_irqrestore(&mdp5_kms->resource_lock, flags);
133}
134
135static void set_ctl_op(struct mdp5_ctl *ctl, struct mdp5_pipeline *pipeline)
136{
137 unsigned long flags;
138 struct mdp5_interface *intf = pipeline->intf;
139 u32 ctl_op = 0;
140
141 if (!mdp5_cfg_intf_is_virtual(intf->type))
142 ctl_op |= MDP5_CTL_OP_INTF_NUM(INTF0 + intf->num);
143
144 switch (intf->type) {
145 case INTF_DSI:
146 if (intf->mode == MDP5_INTF_DSI_MODE_COMMAND)
147 ctl_op |= MDP5_CTL_OP_CMD_MODE;
148 break;
149
150 case INTF_WB:
151 if (intf->mode == MDP5_INTF_WB_MODE_LINE)
152 ctl_op |= MDP5_CTL_OP_MODE(MODE_WB_2_LINE);
153 break;
154
155 default:
156 break;
157 }
158
159 if (pipeline->r_mixer)
160 ctl_op |= MDP5_CTL_OP_PACK_3D_ENABLE |
161 MDP5_CTL_OP_PACK_3D(1);
162
163 spin_lock_irqsave(&ctl->hw_lock, flags);
164 ctl_write(ctl, REG_MDP5_CTL_OP(ctl->id), ctl_op);
165 spin_unlock_irqrestore(&ctl->hw_lock, flags);
166}
167
168int mdp5_ctl_set_pipeline(struct mdp5_ctl *ctl, struct mdp5_pipeline *pipeline)
169{
170 struct mdp5_kms *mdp5_kms = get_kms(ctl->ctlm);
171 struct mdp5_interface *intf = pipeline->intf;
172
173
174 if (!mdp5_cfg_intf_is_virtual(intf->type))
175 set_display_intf(mdp5_kms, intf);
176
177 set_ctl_op(ctl, pipeline);
178
179 return 0;
180}
181
182static bool start_signal_needed(struct mdp5_ctl *ctl,
183 struct mdp5_pipeline *pipeline)
184{
185 struct mdp5_interface *intf = pipeline->intf;
186
187 if (!ctl->encoder_enabled)
188 return false;
189
190 switch (intf->type) {
191 case INTF_WB:
192 return true;
193 case INTF_DSI:
194 return intf->mode == MDP5_INTF_DSI_MODE_COMMAND;
195 default:
196 return false;
197 }
198}
199
200
201
202
203
204
205
206
207static void send_start_signal(struct mdp5_ctl *ctl)
208{
209 unsigned long flags;
210
211 spin_lock_irqsave(&ctl->hw_lock, flags);
212 ctl_write(ctl, REG_MDP5_CTL_START(ctl->id), 1);
213 spin_unlock_irqrestore(&ctl->hw_lock, flags);
214}
215
216
217
218
219
220
221
222
223
224
225
226int mdp5_ctl_set_encoder_state(struct mdp5_ctl *ctl,
227 struct mdp5_pipeline *pipeline,
228 bool enabled)
229{
230 struct mdp5_interface *intf = pipeline->intf;
231
232 if (WARN_ON(!ctl))
233 return -EINVAL;
234
235 ctl->encoder_enabled = enabled;
236 DBG("intf_%d: %s", intf->num, enabled ? "on" : "off");
237
238 if (start_signal_needed(ctl, pipeline)) {
239 send_start_signal(ctl);
240 }
241
242 return 0;
243}
244
245
246
247
248
249
250int mdp5_ctl_set_cursor(struct mdp5_ctl *ctl, struct mdp5_pipeline *pipeline,
251 int cursor_id, bool enable)
252{
253 struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm;
254 unsigned long flags;
255 u32 blend_cfg;
256 struct mdp5_hw_mixer *mixer = pipeline->mixer;
257
258 if (WARN_ON(!mixer)) {
259 DRM_DEV_ERROR(ctl_mgr->dev->dev, "CTL %d cannot find LM",
260 ctl->id);
261 return -EINVAL;
262 }
263
264 if (pipeline->r_mixer) {
265 DRM_DEV_ERROR(ctl_mgr->dev->dev, "unsupported configuration");
266 return -EINVAL;
267 }
268
269 spin_lock_irqsave(&ctl->hw_lock, flags);
270
271 blend_cfg = ctl_read(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, mixer->lm));
272
273 if (enable)
274 blend_cfg |= MDP5_CTL_LAYER_REG_CURSOR_OUT;
275 else
276 blend_cfg &= ~MDP5_CTL_LAYER_REG_CURSOR_OUT;
277
278 ctl_write(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, mixer->lm), blend_cfg);
279 ctl->cursor_on = enable;
280
281 spin_unlock_irqrestore(&ctl->hw_lock, flags);
282
283 ctl->pending_ctl_trigger = mdp_ctl_flush_mask_cursor(cursor_id);
284
285 return 0;
286}
287
288static u32 mdp_ctl_blend_mask(enum mdp5_pipe pipe,
289 enum mdp_mixer_stage_id stage)
290{
291 switch (pipe) {
292 case SSPP_VIG0: return MDP5_CTL_LAYER_REG_VIG0(stage);
293 case SSPP_VIG1: return MDP5_CTL_LAYER_REG_VIG1(stage);
294 case SSPP_VIG2: return MDP5_CTL_LAYER_REG_VIG2(stage);
295 case SSPP_RGB0: return MDP5_CTL_LAYER_REG_RGB0(stage);
296 case SSPP_RGB1: return MDP5_CTL_LAYER_REG_RGB1(stage);
297 case SSPP_RGB2: return MDP5_CTL_LAYER_REG_RGB2(stage);
298 case SSPP_DMA0: return MDP5_CTL_LAYER_REG_DMA0(stage);
299 case SSPP_DMA1: return MDP5_CTL_LAYER_REG_DMA1(stage);
300 case SSPP_VIG3: return MDP5_CTL_LAYER_REG_VIG3(stage);
301 case SSPP_RGB3: return MDP5_CTL_LAYER_REG_RGB3(stage);
302 case SSPP_CURSOR0:
303 case SSPP_CURSOR1:
304 default: return 0;
305 }
306}
307
308static u32 mdp_ctl_blend_ext_mask(enum mdp5_pipe pipe,
309 enum mdp_mixer_stage_id stage)
310{
311 if (stage < STAGE6 && (pipe != SSPP_CURSOR0 && pipe != SSPP_CURSOR1))
312 return 0;
313
314 switch (pipe) {
315 case SSPP_VIG0: return MDP5_CTL_LAYER_EXT_REG_VIG0_BIT3;
316 case SSPP_VIG1: return MDP5_CTL_LAYER_EXT_REG_VIG1_BIT3;
317 case SSPP_VIG2: return MDP5_CTL_LAYER_EXT_REG_VIG2_BIT3;
318 case SSPP_RGB0: return MDP5_CTL_LAYER_EXT_REG_RGB0_BIT3;
319 case SSPP_RGB1: return MDP5_CTL_LAYER_EXT_REG_RGB1_BIT3;
320 case SSPP_RGB2: return MDP5_CTL_LAYER_EXT_REG_RGB2_BIT3;
321 case SSPP_DMA0: return MDP5_CTL_LAYER_EXT_REG_DMA0_BIT3;
322 case SSPP_DMA1: return MDP5_CTL_LAYER_EXT_REG_DMA1_BIT3;
323 case SSPP_VIG3: return MDP5_CTL_LAYER_EXT_REG_VIG3_BIT3;
324 case SSPP_RGB3: return MDP5_CTL_LAYER_EXT_REG_RGB3_BIT3;
325 case SSPP_CURSOR0: return MDP5_CTL_LAYER_EXT_REG_CURSOR0(stage);
326 case SSPP_CURSOR1: return MDP5_CTL_LAYER_EXT_REG_CURSOR1(stage);
327 default: return 0;
328 }
329}
330
331static void mdp5_ctl_reset_blend_regs(struct mdp5_ctl *ctl)
332{
333 unsigned long flags;
334 struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm;
335 int i;
336
337 spin_lock_irqsave(&ctl->hw_lock, flags);
338
339 for (i = 0; i < ctl_mgr->nlm; i++) {
340 ctl_write(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, i), 0x0);
341 ctl_write(ctl, REG_MDP5_CTL_LAYER_EXT_REG(ctl->id, i), 0x0);
342 }
343
344 spin_unlock_irqrestore(&ctl->hw_lock, flags);
345}
346
347#define PIPE_LEFT 0
348#define PIPE_RIGHT 1
349int mdp5_ctl_blend(struct mdp5_ctl *ctl, struct mdp5_pipeline *pipeline,
350 enum mdp5_pipe stage[][MAX_PIPE_STAGE],
351 enum mdp5_pipe r_stage[][MAX_PIPE_STAGE],
352 u32 stage_cnt, u32 ctl_blend_op_flags)
353{
354 struct mdp5_hw_mixer *mixer = pipeline->mixer;
355 struct mdp5_hw_mixer *r_mixer = pipeline->r_mixer;
356 unsigned long flags;
357 u32 blend_cfg = 0, blend_ext_cfg = 0;
358 u32 r_blend_cfg = 0, r_blend_ext_cfg = 0;
359 int i, start_stage;
360
361 mdp5_ctl_reset_blend_regs(ctl);
362
363 if (ctl_blend_op_flags & MDP5_CTL_BLEND_OP_FLAG_BORDER_OUT) {
364 start_stage = STAGE0;
365 blend_cfg |= MDP5_CTL_LAYER_REG_BORDER_COLOR;
366 if (r_mixer)
367 r_blend_cfg |= MDP5_CTL_LAYER_REG_BORDER_COLOR;
368 } else {
369 start_stage = STAGE_BASE;
370 }
371
372 for (i = start_stage; stage_cnt && i <= STAGE_MAX; i++) {
373 blend_cfg |=
374 mdp_ctl_blend_mask(stage[i][PIPE_LEFT], i) |
375 mdp_ctl_blend_mask(stage[i][PIPE_RIGHT], i);
376 blend_ext_cfg |=
377 mdp_ctl_blend_ext_mask(stage[i][PIPE_LEFT], i) |
378 mdp_ctl_blend_ext_mask(stage[i][PIPE_RIGHT], i);
379 if (r_mixer) {
380 r_blend_cfg |=
381 mdp_ctl_blend_mask(r_stage[i][PIPE_LEFT], i) |
382 mdp_ctl_blend_mask(r_stage[i][PIPE_RIGHT], i);
383 r_blend_ext_cfg |=
384 mdp_ctl_blend_ext_mask(r_stage[i][PIPE_LEFT], i) |
385 mdp_ctl_blend_ext_mask(r_stage[i][PIPE_RIGHT], i);
386 }
387 }
388
389 spin_lock_irqsave(&ctl->hw_lock, flags);
390 if (ctl->cursor_on)
391 blend_cfg |= MDP5_CTL_LAYER_REG_CURSOR_OUT;
392
393 ctl_write(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, mixer->lm), blend_cfg);
394 ctl_write(ctl, REG_MDP5_CTL_LAYER_EXT_REG(ctl->id, mixer->lm),
395 blend_ext_cfg);
396 if (r_mixer) {
397 ctl_write(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, r_mixer->lm),
398 r_blend_cfg);
399 ctl_write(ctl, REG_MDP5_CTL_LAYER_EXT_REG(ctl->id, r_mixer->lm),
400 r_blend_ext_cfg);
401 }
402 spin_unlock_irqrestore(&ctl->hw_lock, flags);
403
404 ctl->pending_ctl_trigger = mdp_ctl_flush_mask_lm(mixer->lm);
405 if (r_mixer)
406 ctl->pending_ctl_trigger |= mdp_ctl_flush_mask_lm(r_mixer->lm);
407
408 DBG("lm%d: blend config = 0x%08x. ext_cfg = 0x%08x", mixer->lm,
409 blend_cfg, blend_ext_cfg);
410 if (r_mixer)
411 DBG("lm%d: blend config = 0x%08x. ext_cfg = 0x%08x",
412 r_mixer->lm, r_blend_cfg, r_blend_ext_cfg);
413
414 return 0;
415}
416
417u32 mdp_ctl_flush_mask_encoder(struct mdp5_interface *intf)
418{
419 if (intf->type == INTF_WB)
420 return MDP5_CTL_FLUSH_WB;
421
422 switch (intf->num) {
423 case 0: return MDP5_CTL_FLUSH_TIMING_0;
424 case 1: return MDP5_CTL_FLUSH_TIMING_1;
425 case 2: return MDP5_CTL_FLUSH_TIMING_2;
426 case 3: return MDP5_CTL_FLUSH_TIMING_3;
427 default: return 0;
428 }
429}
430
431u32 mdp_ctl_flush_mask_cursor(int cursor_id)
432{
433 switch (cursor_id) {
434 case 0: return MDP5_CTL_FLUSH_CURSOR_0;
435 case 1: return MDP5_CTL_FLUSH_CURSOR_1;
436 default: return 0;
437 }
438}
439
440u32 mdp_ctl_flush_mask_pipe(enum mdp5_pipe pipe)
441{
442 switch (pipe) {
443 case SSPP_VIG0: return MDP5_CTL_FLUSH_VIG0;
444 case SSPP_VIG1: return MDP5_CTL_FLUSH_VIG1;
445 case SSPP_VIG2: return MDP5_CTL_FLUSH_VIG2;
446 case SSPP_RGB0: return MDP5_CTL_FLUSH_RGB0;
447 case SSPP_RGB1: return MDP5_CTL_FLUSH_RGB1;
448 case SSPP_RGB2: return MDP5_CTL_FLUSH_RGB2;
449 case SSPP_DMA0: return MDP5_CTL_FLUSH_DMA0;
450 case SSPP_DMA1: return MDP5_CTL_FLUSH_DMA1;
451 case SSPP_VIG3: return MDP5_CTL_FLUSH_VIG3;
452 case SSPP_RGB3: return MDP5_CTL_FLUSH_RGB3;
453 case SSPP_CURSOR0: return MDP5_CTL_FLUSH_CURSOR_0;
454 case SSPP_CURSOR1: return MDP5_CTL_FLUSH_CURSOR_1;
455 default: return 0;
456 }
457}
458
459u32 mdp_ctl_flush_mask_lm(int lm)
460{
461 switch (lm) {
462 case 0: return MDP5_CTL_FLUSH_LM0;
463 case 1: return MDP5_CTL_FLUSH_LM1;
464 case 2: return MDP5_CTL_FLUSH_LM2;
465 case 3: return MDP5_CTL_FLUSH_LM3;
466 case 4: return MDP5_CTL_FLUSH_LM4;
467 case 5: return MDP5_CTL_FLUSH_LM5;
468 default: return 0;
469 }
470}
471
472static u32 fix_sw_flush(struct mdp5_ctl *ctl, struct mdp5_pipeline *pipeline,
473 u32 flush_mask)
474{
475 struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm;
476 u32 sw_mask = 0;
477#define BIT_NEEDS_SW_FIX(bit) \
478 (!(ctl_mgr->flush_hw_mask & bit) && (flush_mask & bit))
479
480
481 if (BIT_NEEDS_SW_FIX(MDP5_CTL_FLUSH_CURSOR_0))
482 sw_mask |= mdp_ctl_flush_mask_lm(pipeline->mixer->lm);
483
484 return sw_mask;
485}
486
487static void fix_for_single_flush(struct mdp5_ctl *ctl, u32 *flush_mask,
488 u32 *flush_id)
489{
490 struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm;
491
492 if (ctl->pair) {
493 DBG("CTL %d FLUSH pending mask %x", ctl->id, *flush_mask);
494 ctl->flush_pending = true;
495 ctl_mgr->single_flush_pending_mask |= (*flush_mask);
496 *flush_mask = 0;
497
498 if (ctl->pair->flush_pending) {
499 *flush_id = min_t(u32, ctl->id, ctl->pair->id);
500 *flush_mask = ctl_mgr->single_flush_pending_mask;
501
502 ctl->flush_pending = false;
503 ctl->pair->flush_pending = false;
504 ctl_mgr->single_flush_pending_mask = 0;
505
506 DBG("Single FLUSH mask %x,ID %d", *flush_mask,
507 *flush_id);
508 }
509 }
510}
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535u32 mdp5_ctl_commit(struct mdp5_ctl *ctl,
536 struct mdp5_pipeline *pipeline,
537 u32 flush_mask, bool start)
538{
539 struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm;
540 unsigned long flags;
541 u32 flush_id = ctl->id;
542 u32 curr_ctl_flush_mask;
543
544 VERB("flush_mask=%x, trigger=%x", flush_mask, ctl->pending_ctl_trigger);
545
546 if (ctl->pending_ctl_trigger & flush_mask) {
547 flush_mask |= MDP5_CTL_FLUSH_CTL;
548 ctl->pending_ctl_trigger = 0;
549 }
550
551 flush_mask |= fix_sw_flush(ctl, pipeline, flush_mask);
552
553 flush_mask &= ctl_mgr->flush_hw_mask;
554
555 curr_ctl_flush_mask = flush_mask;
556
557 fix_for_single_flush(ctl, &flush_mask, &flush_id);
558
559 if (!start) {
560 ctl->flush_mask |= flush_mask;
561 return curr_ctl_flush_mask;
562 } else {
563 flush_mask |= ctl->flush_mask;
564 ctl->flush_mask = 0;
565 }
566
567 if (flush_mask) {
568 spin_lock_irqsave(&ctl->hw_lock, flags);
569 ctl_write(ctl, REG_MDP5_CTL_FLUSH(flush_id), flush_mask);
570 spin_unlock_irqrestore(&ctl->hw_lock, flags);
571 }
572
573 if (start_signal_needed(ctl, pipeline)) {
574 send_start_signal(ctl);
575 }
576
577 return curr_ctl_flush_mask;
578}
579
580u32 mdp5_ctl_get_commit_status(struct mdp5_ctl *ctl)
581{
582 return ctl_read(ctl, REG_MDP5_CTL_FLUSH(ctl->id));
583}
584
585int mdp5_ctl_get_ctl_id(struct mdp5_ctl *ctl)
586{
587 return WARN_ON(!ctl) ? -EINVAL : ctl->id;
588}
589
590
591
592
593int mdp5_ctl_pair(struct mdp5_ctl *ctlx, struct mdp5_ctl *ctly, bool enable)
594{
595 struct mdp5_ctl_manager *ctl_mgr = ctlx->ctlm;
596 struct mdp5_kms *mdp5_kms = get_kms(ctl_mgr);
597
598
599 if (!ctl_mgr->single_flush_supported)
600 return 0;
601
602 if (!enable) {
603 ctlx->pair = NULL;
604 ctly->pair = NULL;
605 mdp5_write(mdp5_kms, REG_MDP5_SPARE_0, 0);
606 return 0;
607 } else if ((ctlx->pair != NULL) || (ctly->pair != NULL)) {
608 DRM_DEV_ERROR(ctl_mgr->dev->dev, "CTLs already paired\n");
609 return -EINVAL;
610 } else if (!(ctlx->status & ctly->status & CTL_STAT_BOOKED)) {
611 DRM_DEV_ERROR(ctl_mgr->dev->dev, "Only pair booked CTLs\n");
612 return -EINVAL;
613 }
614
615 ctlx->pair = ctly;
616 ctly->pair = ctlx;
617
618 mdp5_write(mdp5_kms, REG_MDP5_SPARE_0,
619 MDP5_SPARE_0_SPLIT_DPL_SINGLE_FLUSH_EN);
620
621 return 0;
622}
623
624
625
626
627
628
629
630
631
632struct mdp5_ctl *mdp5_ctlm_request(struct mdp5_ctl_manager *ctl_mgr,
633 int intf_num)
634{
635 struct mdp5_ctl *ctl = NULL;
636 const u32 checkm = CTL_STAT_BUSY | CTL_STAT_BOOKED;
637 u32 match = ((intf_num == 1) || (intf_num == 2)) ? CTL_STAT_BOOKED : 0;
638 unsigned long flags;
639 int c;
640
641 spin_lock_irqsave(&ctl_mgr->pool_lock, flags);
642
643
644 for (c = 0; c < ctl_mgr->nctl; c++)
645 if ((ctl_mgr->ctls[c].status & checkm) == match)
646 goto found;
647
648 dev_warn(ctl_mgr->dev->dev,
649 "fall back to the other CTL category for INTF %d!\n", intf_num);
650
651 match ^= CTL_STAT_BOOKED;
652 for (c = 0; c < ctl_mgr->nctl; c++)
653 if ((ctl_mgr->ctls[c].status & checkm) == match)
654 goto found;
655
656 DRM_DEV_ERROR(ctl_mgr->dev->dev, "No more CTL available!");
657 goto unlock;
658
659found:
660 ctl = &ctl_mgr->ctls[c];
661 ctl->status |= CTL_STAT_BUSY;
662 ctl->pending_ctl_trigger = 0;
663 DBG("CTL %d allocated", ctl->id);
664
665unlock:
666 spin_unlock_irqrestore(&ctl_mgr->pool_lock, flags);
667 return ctl;
668}
669
670void mdp5_ctlm_hw_reset(struct mdp5_ctl_manager *ctl_mgr)
671{
672 unsigned long flags;
673 int c;
674
675 for (c = 0; c < ctl_mgr->nctl; c++) {
676 struct mdp5_ctl *ctl = &ctl_mgr->ctls[c];
677
678 spin_lock_irqsave(&ctl->hw_lock, flags);
679 ctl_write(ctl, REG_MDP5_CTL_OP(ctl->id), 0);
680 spin_unlock_irqrestore(&ctl->hw_lock, flags);
681 }
682}
683
684void mdp5_ctlm_destroy(struct mdp5_ctl_manager *ctl_mgr)
685{
686 kfree(ctl_mgr);
687}
688
689struct mdp5_ctl_manager *mdp5_ctlm_init(struct drm_device *dev,
690 void __iomem *mmio_base, struct mdp5_cfg_handler *cfg_hnd)
691{
692 struct mdp5_ctl_manager *ctl_mgr;
693 const struct mdp5_cfg_hw *hw_cfg = mdp5_cfg_get_hw_config(cfg_hnd);
694 int rev = mdp5_cfg_get_hw_rev(cfg_hnd);
695 unsigned dsi_cnt = 0;
696 const struct mdp5_ctl_block *ctl_cfg = &hw_cfg->ctl;
697 unsigned long flags;
698 int c, ret;
699
700 ctl_mgr = kzalloc(sizeof(*ctl_mgr), GFP_KERNEL);
701 if (!ctl_mgr) {
702 DRM_DEV_ERROR(dev->dev, "failed to allocate CTL manager\n");
703 ret = -ENOMEM;
704 goto fail;
705 }
706
707 if (WARN_ON(ctl_cfg->count > MAX_CTL)) {
708 DRM_DEV_ERROR(dev->dev, "Increase static pool size to at least %d\n",
709 ctl_cfg->count);
710 ret = -ENOSPC;
711 goto fail;
712 }
713
714
715 ctl_mgr->dev = dev;
716 ctl_mgr->nlm = hw_cfg->lm.count;
717 ctl_mgr->nctl = ctl_cfg->count;
718 ctl_mgr->flush_hw_mask = ctl_cfg->flush_hw_mask;
719 spin_lock_init(&ctl_mgr->pool_lock);
720
721
722 spin_lock_irqsave(&ctl_mgr->pool_lock, flags);
723 for (c = 0; c < ctl_mgr->nctl; c++) {
724 struct mdp5_ctl *ctl = &ctl_mgr->ctls[c];
725
726 if (WARN_ON(!ctl_cfg->base[c])) {
727 DRM_DEV_ERROR(dev->dev, "CTL_%d: base is null!\n", c);
728 ret = -EINVAL;
729 spin_unlock_irqrestore(&ctl_mgr->pool_lock, flags);
730 goto fail;
731 }
732 ctl->ctlm = ctl_mgr;
733 ctl->id = c;
734 ctl->reg_offset = ctl_cfg->base[c];
735 ctl->status = 0;
736 spin_lock_init(&ctl->hw_lock);
737 }
738
739
740
741
742
743
744
745 for (c = 0; c < ARRAY_SIZE(hw_cfg->intf.connect); c++)
746 if (hw_cfg->intf.connect[c] == INTF_DSI)
747 dsi_cnt++;
748 if ((rev >= 3) && (dsi_cnt > 1)) {
749 ctl_mgr->single_flush_supported = true;
750
751 ctl_mgr->ctls[0].status |= CTL_STAT_BOOKED;
752 ctl_mgr->ctls[1].status |= CTL_STAT_BOOKED;
753 }
754 spin_unlock_irqrestore(&ctl_mgr->pool_lock, flags);
755 DBG("Pool of %d CTLs created.", ctl_mgr->nctl);
756
757 return ctl_mgr;
758
759fail:
760 if (ctl_mgr)
761 mdp5_ctlm_destroy(ctl_mgr);
762
763 return ERR_PTR(ret);
764}
765