1
2
3
4
5
6
7
8
9
10
11
12
13
14#include <linux/clk.h>
15
16#include <drm/drm_atomic.h>
17#include <drm/drm_atomic_helper.h>
18#include <drm/drm_crtc.h>
19#include <drm/drm_gem_framebuffer_helper.h>
20#include <drm/drm_plane_helper.h>
21#include <drm/drm_probe_helper.h>
22#include <drm/drm_vblank.h>
23
24#include "vc4_drv.h"
25#include "vc4_regs.h"
26
27#define HVS_NUM_CHANNELS 3
28
29struct vc4_ctm_state {
30 struct drm_private_state base;
31 struct drm_color_ctm *ctm;
32 int fifo;
33};
34
35static struct vc4_ctm_state *to_vc4_ctm_state(struct drm_private_state *priv)
36{
37 return container_of(priv, struct vc4_ctm_state, base);
38}
39
40struct vc4_hvs_state {
41 struct drm_private_state base;
42
43 struct {
44 unsigned in_use: 1;
45 struct drm_crtc_commit *pending_commit;
46 } fifo_state[HVS_NUM_CHANNELS];
47};
48
49static struct vc4_hvs_state *
50to_vc4_hvs_state(struct drm_private_state *priv)
51{
52 return container_of(priv, struct vc4_hvs_state, base);
53}
54
55struct vc4_load_tracker_state {
56 struct drm_private_state base;
57 u64 hvs_load;
58 u64 membus_load;
59};
60
61static struct vc4_load_tracker_state *
62to_vc4_load_tracker_state(struct drm_private_state *priv)
63{
64 return container_of(priv, struct vc4_load_tracker_state, base);
65}
66
67static struct vc4_ctm_state *vc4_get_ctm_state(struct drm_atomic_state *state,
68 struct drm_private_obj *manager)
69{
70 struct drm_device *dev = state->dev;
71 struct vc4_dev *vc4 = to_vc4_dev(dev);
72 struct drm_private_state *priv_state;
73 int ret;
74
75 ret = drm_modeset_lock(&vc4->ctm_state_lock, state->acquire_ctx);
76 if (ret)
77 return ERR_PTR(ret);
78
79 priv_state = drm_atomic_get_private_obj_state(state, manager);
80 if (IS_ERR(priv_state))
81 return ERR_CAST(priv_state);
82
83 return to_vc4_ctm_state(priv_state);
84}
85
86static struct drm_private_state *
87vc4_ctm_duplicate_state(struct drm_private_obj *obj)
88{
89 struct vc4_ctm_state *state;
90
91 state = kmemdup(obj->state, sizeof(*state), GFP_KERNEL);
92 if (!state)
93 return NULL;
94
95 __drm_atomic_helper_private_obj_duplicate_state(obj, &state->base);
96
97 return &state->base;
98}
99
100static void vc4_ctm_destroy_state(struct drm_private_obj *obj,
101 struct drm_private_state *state)
102{
103 struct vc4_ctm_state *ctm_state = to_vc4_ctm_state(state);
104
105 kfree(ctm_state);
106}
107
108static const struct drm_private_state_funcs vc4_ctm_state_funcs = {
109 .atomic_duplicate_state = vc4_ctm_duplicate_state,
110 .atomic_destroy_state = vc4_ctm_destroy_state,
111};
112
113static void vc4_ctm_obj_fini(struct drm_device *dev, void *unused)
114{
115 struct vc4_dev *vc4 = to_vc4_dev(dev);
116
117 drm_atomic_private_obj_fini(&vc4->ctm_manager);
118}
119
120static int vc4_ctm_obj_init(struct vc4_dev *vc4)
121{
122 struct vc4_ctm_state *ctm_state;
123
124 drm_modeset_lock_init(&vc4->ctm_state_lock);
125
126 ctm_state = kzalloc(sizeof(*ctm_state), GFP_KERNEL);
127 if (!ctm_state)
128 return -ENOMEM;
129
130 drm_atomic_private_obj_init(&vc4->base, &vc4->ctm_manager, &ctm_state->base,
131 &vc4_ctm_state_funcs);
132
133 return drmm_add_action_or_reset(&vc4->base, vc4_ctm_obj_fini, NULL);
134}
135
136
137static u16 vc4_ctm_s31_32_to_s0_9(u64 in)
138{
139 u16 r;
140
141
142 r = in & BIT_ULL(63) ? BIT(9) : 0;
143
144 if ((in & GENMASK_ULL(62, 32)) > 0) {
145
146 r |= GENMASK(8, 0);
147 } else {
148
149 r |= (in >> 23) & GENMASK(8, 0);
150 }
151
152 return r;
153}
154
155static void
156vc4_ctm_commit(struct vc4_dev *vc4, struct drm_atomic_state *state)
157{
158 struct vc4_ctm_state *ctm_state = to_vc4_ctm_state(vc4->ctm_manager.state);
159 struct drm_color_ctm *ctm = ctm_state->ctm;
160
161 if (ctm_state->fifo) {
162 HVS_WRITE(SCALER_OLEDCOEF2,
163 VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[0]),
164 SCALER_OLEDCOEF2_R_TO_R) |
165 VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[3]),
166 SCALER_OLEDCOEF2_R_TO_G) |
167 VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[6]),
168 SCALER_OLEDCOEF2_R_TO_B));
169 HVS_WRITE(SCALER_OLEDCOEF1,
170 VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[1]),
171 SCALER_OLEDCOEF1_G_TO_R) |
172 VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[4]),
173 SCALER_OLEDCOEF1_G_TO_G) |
174 VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[7]),
175 SCALER_OLEDCOEF1_G_TO_B));
176 HVS_WRITE(SCALER_OLEDCOEF0,
177 VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[2]),
178 SCALER_OLEDCOEF0_B_TO_R) |
179 VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[5]),
180 SCALER_OLEDCOEF0_B_TO_G) |
181 VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[8]),
182 SCALER_OLEDCOEF0_B_TO_B));
183 }
184
185 HVS_WRITE(SCALER_OLEDOFFS,
186 VC4_SET_FIELD(ctm_state->fifo, SCALER_OLEDOFFS_DISPFIFO));
187}
188
189static struct vc4_hvs_state *
190vc4_hvs_get_new_global_state(struct drm_atomic_state *state)
191{
192 struct vc4_dev *vc4 = to_vc4_dev(state->dev);
193 struct drm_private_state *priv_state;
194
195 priv_state = drm_atomic_get_new_private_obj_state(state, &vc4->hvs_channels);
196 if (IS_ERR(priv_state))
197 return ERR_CAST(priv_state);
198
199 return to_vc4_hvs_state(priv_state);
200}
201
202static struct vc4_hvs_state *
203vc4_hvs_get_old_global_state(struct drm_atomic_state *state)
204{
205 struct vc4_dev *vc4 = to_vc4_dev(state->dev);
206 struct drm_private_state *priv_state;
207
208 priv_state = drm_atomic_get_old_private_obj_state(state, &vc4->hvs_channels);
209 if (IS_ERR(priv_state))
210 return ERR_CAST(priv_state);
211
212 return to_vc4_hvs_state(priv_state);
213}
214
215static struct vc4_hvs_state *
216vc4_hvs_get_global_state(struct drm_atomic_state *state)
217{
218 struct vc4_dev *vc4 = to_vc4_dev(state->dev);
219 struct drm_private_state *priv_state;
220
221 priv_state = drm_atomic_get_private_obj_state(state, &vc4->hvs_channels);
222 if (IS_ERR(priv_state))
223 return ERR_CAST(priv_state);
224
225 return to_vc4_hvs_state(priv_state);
226}
227
228static void vc4_hvs_pv_muxing_commit(struct vc4_dev *vc4,
229 struct drm_atomic_state *state)
230{
231 struct drm_crtc_state *crtc_state;
232 struct drm_crtc *crtc;
233 unsigned int i;
234
235 for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
236 struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc_state);
237 u32 dispctrl;
238 u32 dsp3_mux;
239
240 if (!crtc_state->active)
241 continue;
242
243 if (vc4_state->assigned_channel != 2)
244 continue;
245
246
247
248
249
250
251
252
253
254
255
256 if (vc4_state->feed_txp)
257 dsp3_mux = VC4_SET_FIELD(3, SCALER_DISPCTRL_DSP3_MUX);
258 else
259 dsp3_mux = VC4_SET_FIELD(2, SCALER_DISPCTRL_DSP3_MUX);
260
261 dispctrl = HVS_READ(SCALER_DISPCTRL) &
262 ~SCALER_DISPCTRL_DSP3_MUX_MASK;
263 HVS_WRITE(SCALER_DISPCTRL, dispctrl | dsp3_mux);
264 }
265}
266
267static void vc5_hvs_pv_muxing_commit(struct vc4_dev *vc4,
268 struct drm_atomic_state *state)
269{
270 struct drm_crtc_state *crtc_state;
271 struct drm_crtc *crtc;
272 unsigned char mux;
273 unsigned int i;
274 u32 reg;
275
276 for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
277 struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc_state);
278 struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
279
280 if (!vc4_state->update_muxing)
281 continue;
282
283 switch (vc4_crtc->data->hvs_output) {
284 case 2:
285 mux = (vc4_state->assigned_channel == 2) ? 0 : 1;
286 reg = HVS_READ(SCALER_DISPECTRL);
287 HVS_WRITE(SCALER_DISPECTRL,
288 (reg & ~SCALER_DISPECTRL_DSP2_MUX_MASK) |
289 VC4_SET_FIELD(mux, SCALER_DISPECTRL_DSP2_MUX));
290 break;
291
292 case 3:
293 if (vc4_state->assigned_channel == VC4_HVS_CHANNEL_DISABLED)
294 mux = 3;
295 else
296 mux = vc4_state->assigned_channel;
297
298 reg = HVS_READ(SCALER_DISPCTRL);
299 HVS_WRITE(SCALER_DISPCTRL,
300 (reg & ~SCALER_DISPCTRL_DSP3_MUX_MASK) |
301 VC4_SET_FIELD(mux, SCALER_DISPCTRL_DSP3_MUX));
302 break;
303
304 case 4:
305 if (vc4_state->assigned_channel == VC4_HVS_CHANNEL_DISABLED)
306 mux = 3;
307 else
308 mux = vc4_state->assigned_channel;
309
310 reg = HVS_READ(SCALER_DISPEOLN);
311 HVS_WRITE(SCALER_DISPEOLN,
312 (reg & ~SCALER_DISPEOLN_DSP4_MUX_MASK) |
313 VC4_SET_FIELD(mux, SCALER_DISPEOLN_DSP4_MUX));
314
315 break;
316
317 case 5:
318 if (vc4_state->assigned_channel == VC4_HVS_CHANNEL_DISABLED)
319 mux = 3;
320 else
321 mux = vc4_state->assigned_channel;
322
323 reg = HVS_READ(SCALER_DISPDITHER);
324 HVS_WRITE(SCALER_DISPDITHER,
325 (reg & ~SCALER_DISPDITHER_DSP5_MUX_MASK) |
326 VC4_SET_FIELD(mux, SCALER_DISPDITHER_DSP5_MUX));
327 break;
328
329 default:
330 break;
331 }
332 }
333}
334
335static void vc4_atomic_commit_tail(struct drm_atomic_state *state)
336{
337 struct drm_device *dev = state->dev;
338 struct vc4_dev *vc4 = to_vc4_dev(dev);
339 struct vc4_hvs *hvs = vc4->hvs;
340 struct drm_crtc_state *old_crtc_state;
341 struct drm_crtc_state *new_crtc_state;
342 struct drm_crtc *crtc;
343 struct vc4_hvs_state *old_hvs_state;
344 int i;
345
346 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
347 struct vc4_crtc_state *vc4_crtc_state;
348
349 if (!new_crtc_state->commit)
350 continue;
351
352 vc4_crtc_state = to_vc4_crtc_state(new_crtc_state);
353 vc4_hvs_mask_underrun(dev, vc4_crtc_state->assigned_channel);
354 }
355
356 if (vc4->hvs->hvs5)
357 clk_set_min_rate(hvs->core_clk, 500000000);
358
359 old_hvs_state = vc4_hvs_get_old_global_state(state);
360 if (!old_hvs_state)
361 return;
362
363 for_each_old_crtc_in_state(state, crtc, old_crtc_state, i) {
364 struct vc4_crtc_state *vc4_crtc_state =
365 to_vc4_crtc_state(old_crtc_state);
366 unsigned int channel = vc4_crtc_state->assigned_channel;
367 int ret;
368
369 if (channel == VC4_HVS_CHANNEL_DISABLED)
370 continue;
371
372 if (!old_hvs_state->fifo_state[channel].in_use)
373 continue;
374
375 ret = drm_crtc_commit_wait(old_hvs_state->fifo_state[channel].pending_commit);
376 if (ret)
377 drm_err(dev, "Timed out waiting for commit\n");
378 }
379
380 drm_atomic_helper_commit_modeset_disables(dev, state);
381
382 vc4_ctm_commit(vc4, state);
383
384 if (vc4->hvs->hvs5)
385 vc5_hvs_pv_muxing_commit(vc4, state);
386 else
387 vc4_hvs_pv_muxing_commit(vc4, state);
388
389 drm_atomic_helper_commit_planes(dev, state, 0);
390
391 drm_atomic_helper_commit_modeset_enables(dev, state);
392
393 drm_atomic_helper_fake_vblank(state);
394
395 drm_atomic_helper_commit_hw_done(state);
396
397 drm_atomic_helper_wait_for_flip_done(dev, state);
398
399 drm_atomic_helper_cleanup_planes(dev, state);
400
401 if (vc4->hvs->hvs5)
402 clk_set_min_rate(hvs->core_clk, 0);
403}
404
405static int vc4_atomic_commit_setup(struct drm_atomic_state *state)
406{
407 struct drm_crtc_state *crtc_state;
408 struct vc4_hvs_state *hvs_state;
409 struct drm_crtc *crtc;
410 unsigned int i;
411
412 hvs_state = vc4_hvs_get_new_global_state(state);
413 if (!hvs_state)
414 return -EINVAL;
415
416 for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
417 struct vc4_crtc_state *vc4_crtc_state =
418 to_vc4_crtc_state(crtc_state);
419 unsigned int channel =
420 vc4_crtc_state->assigned_channel;
421
422 if (channel == VC4_HVS_CHANNEL_DISABLED)
423 continue;
424
425 if (!hvs_state->fifo_state[channel].in_use)
426 continue;
427
428 hvs_state->fifo_state[channel].pending_commit =
429 drm_crtc_commit_get(crtc_state->commit);
430 }
431
432 return 0;
433}
434
435static struct drm_framebuffer *vc4_fb_create(struct drm_device *dev,
436 struct drm_file *file_priv,
437 const struct drm_mode_fb_cmd2 *mode_cmd)
438{
439 struct drm_mode_fb_cmd2 mode_cmd_local;
440
441
442
443
444 if (!(mode_cmd->flags & DRM_MODE_FB_MODIFIERS)) {
445 struct drm_gem_object *gem_obj;
446 struct vc4_bo *bo;
447
448 gem_obj = drm_gem_object_lookup(file_priv,
449 mode_cmd->handles[0]);
450 if (!gem_obj) {
451 DRM_DEBUG("Failed to look up GEM BO %d\n",
452 mode_cmd->handles[0]);
453 return ERR_PTR(-ENOENT);
454 }
455 bo = to_vc4_bo(gem_obj);
456
457 mode_cmd_local = *mode_cmd;
458
459 if (bo->t_format) {
460 mode_cmd_local.modifier[0] =
461 DRM_FORMAT_MOD_BROADCOM_VC4_T_TILED;
462 } else {
463 mode_cmd_local.modifier[0] = DRM_FORMAT_MOD_NONE;
464 }
465
466 drm_gem_object_put(gem_obj);
467
468 mode_cmd = &mode_cmd_local;
469 }
470
471 return drm_gem_fb_create(dev, file_priv, mode_cmd);
472}
473
474
475
476
477
478static int
479vc4_ctm_atomic_check(struct drm_device *dev, struct drm_atomic_state *state)
480{
481 struct vc4_dev *vc4 = to_vc4_dev(dev);
482 struct vc4_ctm_state *ctm_state = NULL;
483 struct drm_crtc *crtc;
484 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
485 struct drm_color_ctm *ctm;
486 int i;
487
488 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
489
490 if (!new_crtc_state->ctm && old_crtc_state->ctm) {
491 ctm_state = vc4_get_ctm_state(state, &vc4->ctm_manager);
492 if (IS_ERR(ctm_state))
493 return PTR_ERR(ctm_state);
494 ctm_state->fifo = 0;
495 }
496 }
497
498 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
499 if (new_crtc_state->ctm == old_crtc_state->ctm)
500 continue;
501
502 if (!ctm_state) {
503 ctm_state = vc4_get_ctm_state(state, &vc4->ctm_manager);
504 if (IS_ERR(ctm_state))
505 return PTR_ERR(ctm_state);
506 }
507
508
509 if (new_crtc_state->ctm) {
510 struct vc4_crtc_state *vc4_crtc_state =
511 to_vc4_crtc_state(new_crtc_state);
512
513
514 int fifo = vc4_crtc_state->assigned_channel + 1;
515
516
517
518
519 if (ctm_state->fifo && ctm_state->fifo != fifo) {
520 DRM_DEBUG_DRIVER("Too many CTM configured\n");
521 return -EINVAL;
522 }
523
524
525
526
527
528 ctm = new_crtc_state->ctm->data;
529 for (i = 0; i < ARRAY_SIZE(ctm->matrix); i++) {
530 u64 val = ctm->matrix[i];
531
532 val &= ~BIT_ULL(63);
533 if (val > BIT_ULL(32))
534 return -EINVAL;
535 }
536
537 ctm_state->fifo = fifo;
538 ctm_state->ctm = ctm;
539 }
540 }
541
542 return 0;
543}
544
545static int vc4_load_tracker_atomic_check(struct drm_atomic_state *state)
546{
547 struct drm_plane_state *old_plane_state, *new_plane_state;
548 struct vc4_dev *vc4 = to_vc4_dev(state->dev);
549 struct vc4_load_tracker_state *load_state;
550 struct drm_private_state *priv_state;
551 struct drm_plane *plane;
552 int i;
553
554 if (!vc4->load_tracker_available)
555 return 0;
556
557 priv_state = drm_atomic_get_private_obj_state(state,
558 &vc4->load_tracker);
559 if (IS_ERR(priv_state))
560 return PTR_ERR(priv_state);
561
562 load_state = to_vc4_load_tracker_state(priv_state);
563 for_each_oldnew_plane_in_state(state, plane, old_plane_state,
564 new_plane_state, i) {
565 struct vc4_plane_state *vc4_plane_state;
566
567 if (old_plane_state->fb && old_plane_state->crtc) {
568 vc4_plane_state = to_vc4_plane_state(old_plane_state);
569 load_state->membus_load -= vc4_plane_state->membus_load;
570 load_state->hvs_load -= vc4_plane_state->hvs_load;
571 }
572
573 if (new_plane_state->fb && new_plane_state->crtc) {
574 vc4_plane_state = to_vc4_plane_state(new_plane_state);
575 load_state->membus_load += vc4_plane_state->membus_load;
576 load_state->hvs_load += vc4_plane_state->hvs_load;
577 }
578 }
579
580
581 if (!vc4->load_tracker_enabled)
582 return 0;
583
584
585
586
587 if (load_state->membus_load > SZ_1G + SZ_512M)
588 return -ENOSPC;
589
590
591
592
593 if (load_state->hvs_load > 240000000ULL)
594 return -ENOSPC;
595
596 return 0;
597}
598
599static struct drm_private_state *
600vc4_load_tracker_duplicate_state(struct drm_private_obj *obj)
601{
602 struct vc4_load_tracker_state *state;
603
604 state = kmemdup(obj->state, sizeof(*state), GFP_KERNEL);
605 if (!state)
606 return NULL;
607
608 __drm_atomic_helper_private_obj_duplicate_state(obj, &state->base);
609
610 return &state->base;
611}
612
613static void vc4_load_tracker_destroy_state(struct drm_private_obj *obj,
614 struct drm_private_state *state)
615{
616 struct vc4_load_tracker_state *load_state;
617
618 load_state = to_vc4_load_tracker_state(state);
619 kfree(load_state);
620}
621
622static const struct drm_private_state_funcs vc4_load_tracker_state_funcs = {
623 .atomic_duplicate_state = vc4_load_tracker_duplicate_state,
624 .atomic_destroy_state = vc4_load_tracker_destroy_state,
625};
626
627static void vc4_load_tracker_obj_fini(struct drm_device *dev, void *unused)
628{
629 struct vc4_dev *vc4 = to_vc4_dev(dev);
630
631 if (!vc4->load_tracker_available)
632 return;
633
634 drm_atomic_private_obj_fini(&vc4->load_tracker);
635}
636
637static int vc4_load_tracker_obj_init(struct vc4_dev *vc4)
638{
639 struct vc4_load_tracker_state *load_state;
640
641 if (!vc4->load_tracker_available)
642 return 0;
643
644 load_state = kzalloc(sizeof(*load_state), GFP_KERNEL);
645 if (!load_state)
646 return -ENOMEM;
647
648 drm_atomic_private_obj_init(&vc4->base, &vc4->load_tracker,
649 &load_state->base,
650 &vc4_load_tracker_state_funcs);
651
652 return drmm_add_action_or_reset(&vc4->base, vc4_load_tracker_obj_fini, NULL);
653}
654
655static struct drm_private_state *
656vc4_hvs_channels_duplicate_state(struct drm_private_obj *obj)
657{
658 struct vc4_hvs_state *old_state = to_vc4_hvs_state(obj->state);
659 struct vc4_hvs_state *state;
660 unsigned int i;
661
662 state = kzalloc(sizeof(*state), GFP_KERNEL);
663 if (!state)
664 return NULL;
665
666 __drm_atomic_helper_private_obj_duplicate_state(obj, &state->base);
667
668
669 for (i = 0; i < HVS_NUM_CHANNELS; i++) {
670 state->fifo_state[i].in_use = old_state->fifo_state[i].in_use;
671
672 if (!old_state->fifo_state[i].pending_commit)
673 continue;
674
675 state->fifo_state[i].pending_commit =
676 drm_crtc_commit_get(old_state->fifo_state[i].pending_commit);
677 }
678
679 return &state->base;
680}
681
682static void vc4_hvs_channels_destroy_state(struct drm_private_obj *obj,
683 struct drm_private_state *state)
684{
685 struct vc4_hvs_state *hvs_state = to_vc4_hvs_state(state);
686 unsigned int i;
687
688 for (i = 0; i < HVS_NUM_CHANNELS; i++) {
689 if (!hvs_state->fifo_state[i].pending_commit)
690 continue;
691
692 drm_crtc_commit_put(hvs_state->fifo_state[i].pending_commit);
693 }
694
695 kfree(hvs_state);
696}
697
698static const struct drm_private_state_funcs vc4_hvs_state_funcs = {
699 .atomic_duplicate_state = vc4_hvs_channels_duplicate_state,
700 .atomic_destroy_state = vc4_hvs_channels_destroy_state,
701};
702
703static void vc4_hvs_channels_obj_fini(struct drm_device *dev, void *unused)
704{
705 struct vc4_dev *vc4 = to_vc4_dev(dev);
706
707 drm_atomic_private_obj_fini(&vc4->hvs_channels);
708}
709
710static int vc4_hvs_channels_obj_init(struct vc4_dev *vc4)
711{
712 struct vc4_hvs_state *state;
713
714 state = kzalloc(sizeof(*state), GFP_KERNEL);
715 if (!state)
716 return -ENOMEM;
717
718 drm_atomic_private_obj_init(&vc4->base, &vc4->hvs_channels,
719 &state->base,
720 &vc4_hvs_state_funcs);
721
722 return drmm_add_action_or_reset(&vc4->base, vc4_hvs_channels_obj_fini, NULL);
723}
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755static int vc4_pv_muxing_atomic_check(struct drm_device *dev,
756 struct drm_atomic_state *state)
757{
758 struct vc4_hvs_state *hvs_new_state;
759 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
760 struct drm_crtc *crtc;
761 unsigned int unassigned_channels = 0;
762 unsigned int i;
763
764 hvs_new_state = vc4_hvs_get_global_state(state);
765 if (!hvs_new_state)
766 return -EINVAL;
767
768 for (i = 0; i < ARRAY_SIZE(hvs_new_state->fifo_state); i++)
769 if (!hvs_new_state->fifo_state[i].in_use)
770 unassigned_channels |= BIT(i);
771
772 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
773 struct vc4_crtc_state *old_vc4_crtc_state =
774 to_vc4_crtc_state(old_crtc_state);
775 struct vc4_crtc_state *new_vc4_crtc_state =
776 to_vc4_crtc_state(new_crtc_state);
777 struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
778 unsigned int matching_channels;
779 unsigned int channel;
780
781
782 if (old_crtc_state->enable == new_crtc_state->enable)
783 continue;
784
785
786 new_vc4_crtc_state->update_muxing = true;
787
788
789 if (!new_crtc_state->enable) {
790 channel = old_vc4_crtc_state->assigned_channel;
791 hvs_new_state->fifo_state[channel].in_use = false;
792 new_vc4_crtc_state->assigned_channel = VC4_HVS_CHANNEL_DISABLED;
793 continue;
794 }
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820 matching_channels = unassigned_channels & vc4_crtc->data->hvs_available_channels;
821 if (!matching_channels)
822 return -EINVAL;
823
824 channel = ffs(matching_channels) - 1;
825 new_vc4_crtc_state->assigned_channel = channel;
826 unassigned_channels &= ~BIT(channel);
827 hvs_new_state->fifo_state[channel].in_use = true;
828 }
829
830 return 0;
831}
832
833static int
834vc4_atomic_check(struct drm_device *dev, struct drm_atomic_state *state)
835{
836 int ret;
837
838 ret = vc4_pv_muxing_atomic_check(dev, state);
839 if (ret)
840 return ret;
841
842 ret = vc4_ctm_atomic_check(dev, state);
843 if (ret < 0)
844 return ret;
845
846 ret = drm_atomic_helper_check(dev, state);
847 if (ret)
848 return ret;
849
850 return vc4_load_tracker_atomic_check(state);
851}
852
853static struct drm_mode_config_helper_funcs vc4_mode_config_helpers = {
854 .atomic_commit_setup = vc4_atomic_commit_setup,
855 .atomic_commit_tail = vc4_atomic_commit_tail,
856};
857
858static const struct drm_mode_config_funcs vc4_mode_funcs = {
859 .atomic_check = vc4_atomic_check,
860 .atomic_commit = drm_atomic_helper_commit,
861 .fb_create = vc4_fb_create,
862};
863
864int vc4_kms_load(struct drm_device *dev)
865{
866 struct vc4_dev *vc4 = to_vc4_dev(dev);
867 bool is_vc5 = of_device_is_compatible(dev->dev->of_node,
868 "brcm,bcm2711-vc5");
869 int ret;
870
871 if (!is_vc5) {
872 vc4->load_tracker_available = true;
873
874
875
876
877 vc4->load_tracker_enabled = true;
878 }
879
880
881 dev->vblank_disable_immediate = true;
882
883 ret = drm_vblank_init(dev, dev->mode_config.num_crtc);
884 if (ret < 0) {
885 dev_err(dev->dev, "failed to initialize vblank\n");
886 return ret;
887 }
888
889 if (is_vc5) {
890 dev->mode_config.max_width = 7680;
891 dev->mode_config.max_height = 7680;
892 } else {
893 dev->mode_config.max_width = 2048;
894 dev->mode_config.max_height = 2048;
895 }
896
897 dev->mode_config.funcs = &vc4_mode_funcs;
898 dev->mode_config.helper_private = &vc4_mode_config_helpers;
899 dev->mode_config.preferred_depth = 24;
900 dev->mode_config.async_page_flip = true;
901
902 ret = vc4_ctm_obj_init(vc4);
903 if (ret)
904 return ret;
905
906 ret = vc4_load_tracker_obj_init(vc4);
907 if (ret)
908 return ret;
909
910 ret = vc4_hvs_channels_obj_init(vc4);
911 if (ret)
912 return ret;
913
914 drm_mode_config_reset(dev);
915
916 drm_kms_helper_poll_init(dev);
917
918 return 0;
919}
920