1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24#include <drm/drm_fourcc.h>
25#include <drm/drm_vblank.h>
26
27#include "amdgpu.h"
28#include "amdgpu_pm.h"
29#include "amdgpu_i2c.h"
30#include "cikd.h"
31#include "atom.h"
32#include "amdgpu_atombios.h"
33#include "atombios_crtc.h"
34#include "atombios_encoders.h"
35#include "amdgpu_pll.h"
36#include "amdgpu_connectors.h"
37#include "amdgpu_display.h"
38#include "dce_v8_0.h"
39
40#include "dce/dce_8_0_d.h"
41#include "dce/dce_8_0_sh_mask.h"
42
43#include "gca/gfx_7_2_enum.h"
44
45#include "gmc/gmc_7_1_d.h"
46#include "gmc/gmc_7_1_sh_mask.h"
47
48#include "oss/oss_2_0_d.h"
49#include "oss/oss_2_0_sh_mask.h"
50
51static void dce_v8_0_set_display_funcs(struct amdgpu_device *adev);
52static void dce_v8_0_set_irq_funcs(struct amdgpu_device *adev);
53
54static const u32 crtc_offsets[6] =
55{
56 CRTC0_REGISTER_OFFSET,
57 CRTC1_REGISTER_OFFSET,
58 CRTC2_REGISTER_OFFSET,
59 CRTC3_REGISTER_OFFSET,
60 CRTC4_REGISTER_OFFSET,
61 CRTC5_REGISTER_OFFSET
62};
63
64static const u32 hpd_offsets[] =
65{
66 HPD0_REGISTER_OFFSET,
67 HPD1_REGISTER_OFFSET,
68 HPD2_REGISTER_OFFSET,
69 HPD3_REGISTER_OFFSET,
70 HPD4_REGISTER_OFFSET,
71 HPD5_REGISTER_OFFSET
72};
73
74static const uint32_t dig_offsets[] = {
75 CRTC0_REGISTER_OFFSET,
76 CRTC1_REGISTER_OFFSET,
77 CRTC2_REGISTER_OFFSET,
78 CRTC3_REGISTER_OFFSET,
79 CRTC4_REGISTER_OFFSET,
80 CRTC5_REGISTER_OFFSET,
81 (0x13830 - 0x7030) >> 2,
82};
83
84static const struct {
85 uint32_t reg;
86 uint32_t vblank;
87 uint32_t vline;
88 uint32_t hpd;
89
90} interrupt_status_offsets[6] = { {
91 .reg = mmDISP_INTERRUPT_STATUS,
92 .vblank = DISP_INTERRUPT_STATUS__LB_D1_VBLANK_INTERRUPT_MASK,
93 .vline = DISP_INTERRUPT_STATUS__LB_D1_VLINE_INTERRUPT_MASK,
94 .hpd = DISP_INTERRUPT_STATUS__DC_HPD1_INTERRUPT_MASK
95}, {
96 .reg = mmDISP_INTERRUPT_STATUS_CONTINUE,
97 .vblank = DISP_INTERRUPT_STATUS_CONTINUE__LB_D2_VBLANK_INTERRUPT_MASK,
98 .vline = DISP_INTERRUPT_STATUS_CONTINUE__LB_D2_VLINE_INTERRUPT_MASK,
99 .hpd = DISP_INTERRUPT_STATUS_CONTINUE__DC_HPD2_INTERRUPT_MASK
100}, {
101 .reg = mmDISP_INTERRUPT_STATUS_CONTINUE2,
102 .vblank = DISP_INTERRUPT_STATUS_CONTINUE2__LB_D3_VBLANK_INTERRUPT_MASK,
103 .vline = DISP_INTERRUPT_STATUS_CONTINUE2__LB_D3_VLINE_INTERRUPT_MASK,
104 .hpd = DISP_INTERRUPT_STATUS_CONTINUE2__DC_HPD3_INTERRUPT_MASK
105}, {
106 .reg = mmDISP_INTERRUPT_STATUS_CONTINUE3,
107 .vblank = DISP_INTERRUPT_STATUS_CONTINUE3__LB_D4_VBLANK_INTERRUPT_MASK,
108 .vline = DISP_INTERRUPT_STATUS_CONTINUE3__LB_D4_VLINE_INTERRUPT_MASK,
109 .hpd = DISP_INTERRUPT_STATUS_CONTINUE3__DC_HPD4_INTERRUPT_MASK
110}, {
111 .reg = mmDISP_INTERRUPT_STATUS_CONTINUE4,
112 .vblank = DISP_INTERRUPT_STATUS_CONTINUE4__LB_D5_VBLANK_INTERRUPT_MASK,
113 .vline = DISP_INTERRUPT_STATUS_CONTINUE4__LB_D5_VLINE_INTERRUPT_MASK,
114 .hpd = DISP_INTERRUPT_STATUS_CONTINUE4__DC_HPD5_INTERRUPT_MASK
115}, {
116 .reg = mmDISP_INTERRUPT_STATUS_CONTINUE5,
117 .vblank = DISP_INTERRUPT_STATUS_CONTINUE5__LB_D6_VBLANK_INTERRUPT_MASK,
118 .vline = DISP_INTERRUPT_STATUS_CONTINUE5__LB_D6_VLINE_INTERRUPT_MASK,
119 .hpd = DISP_INTERRUPT_STATUS_CONTINUE5__DC_HPD6_INTERRUPT_MASK
120} };
121
122static u32 dce_v8_0_audio_endpt_rreg(struct amdgpu_device *adev,
123 u32 block_offset, u32 reg)
124{
125 unsigned long flags;
126 u32 r;
127
128 spin_lock_irqsave(&adev->audio_endpt_idx_lock, flags);
129 WREG32(mmAZALIA_F0_CODEC_ENDPOINT_INDEX + block_offset, reg);
130 r = RREG32(mmAZALIA_F0_CODEC_ENDPOINT_DATA + block_offset);
131 spin_unlock_irqrestore(&adev->audio_endpt_idx_lock, flags);
132
133 return r;
134}
135
136static void dce_v8_0_audio_endpt_wreg(struct amdgpu_device *adev,
137 u32 block_offset, u32 reg, u32 v)
138{
139 unsigned long flags;
140
141 spin_lock_irqsave(&adev->audio_endpt_idx_lock, flags);
142 WREG32(mmAZALIA_F0_CODEC_ENDPOINT_INDEX + block_offset, reg);
143 WREG32(mmAZALIA_F0_CODEC_ENDPOINT_DATA + block_offset, v);
144 spin_unlock_irqrestore(&adev->audio_endpt_idx_lock, flags);
145}
146
147static u32 dce_v8_0_vblank_get_counter(struct amdgpu_device *adev, int crtc)
148{
149 if (crtc >= adev->mode_info.num_crtc)
150 return 0;
151 else
152 return RREG32(mmCRTC_STATUS_FRAME_COUNT + crtc_offsets[crtc]);
153}
154
155static void dce_v8_0_pageflip_interrupt_init(struct amdgpu_device *adev)
156{
157 unsigned i;
158
159
160 for (i = 0; i < adev->mode_info.num_crtc; i++)
161 amdgpu_irq_get(adev, &adev->pageflip_irq, i);
162}
163
164static void dce_v8_0_pageflip_interrupt_fini(struct amdgpu_device *adev)
165{
166 unsigned i;
167
168
169 for (i = 0; i < adev->mode_info.num_crtc; i++)
170 amdgpu_irq_put(adev, &adev->pageflip_irq, i);
171}
172
173
174
175
176
177
178
179
180
181
182
183
184static void dce_v8_0_page_flip(struct amdgpu_device *adev,
185 int crtc_id, u64 crtc_base, bool async)
186{
187 struct amdgpu_crtc *amdgpu_crtc = adev->mode_info.crtcs[crtc_id];
188 struct drm_framebuffer *fb = amdgpu_crtc->base.primary->fb;
189
190
191 WREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset, async ?
192 GRPH_FLIP_CONTROL__GRPH_SURFACE_UPDATE_H_RETRACE_EN_MASK : 0);
193
194 WREG32(mmGRPH_PITCH + amdgpu_crtc->crtc_offset,
195 fb->pitches[0] / fb->format->cpp[0]);
196
197 WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
198 upper_32_bits(crtc_base));
199
200 WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
201 lower_32_bits(crtc_base));
202
203 RREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset);
204}
205
206static int dce_v8_0_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
207 u32 *vbl, u32 *position)
208{
209 if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
210 return -EINVAL;
211
212 *vbl = RREG32(mmCRTC_V_BLANK_START_END + crtc_offsets[crtc]);
213 *position = RREG32(mmCRTC_STATUS_POSITION + crtc_offsets[crtc]);
214
215 return 0;
216}
217
218
219
220
221
222
223
224
225
226
227static bool dce_v8_0_hpd_sense(struct amdgpu_device *adev,
228 enum amdgpu_hpd_id hpd)
229{
230 bool connected = false;
231
232 if (hpd >= adev->mode_info.num_hpd)
233 return connected;
234
235 if (RREG32(mmDC_HPD1_INT_STATUS + hpd_offsets[hpd]) &
236 DC_HPD1_INT_STATUS__DC_HPD1_SENSE_MASK)
237 connected = true;
238
239 return connected;
240}
241
242
243
244
245
246
247
248
249
250static void dce_v8_0_hpd_set_polarity(struct amdgpu_device *adev,
251 enum amdgpu_hpd_id hpd)
252{
253 u32 tmp;
254 bool connected = dce_v8_0_hpd_sense(adev, hpd);
255
256 if (hpd >= adev->mode_info.num_hpd)
257 return;
258
259 tmp = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd]);
260 if (connected)
261 tmp &= ~DC_HPD1_INT_CONTROL__DC_HPD1_INT_POLARITY_MASK;
262 else
263 tmp |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_POLARITY_MASK;
264 WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd], tmp);
265}
266
267
268
269
270
271
272
273
274
275static void dce_v8_0_hpd_init(struct amdgpu_device *adev)
276{
277 struct drm_device *dev = adev_to_drm(adev);
278 struct drm_connector *connector;
279 struct drm_connector_list_iter iter;
280 u32 tmp;
281
282 drm_connector_list_iter_begin(dev, &iter);
283 drm_for_each_connector_iter(connector, &iter) {
284 struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
285
286 if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd)
287 continue;
288
289 tmp = RREG32(mmDC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
290 tmp |= DC_HPD1_CONTROL__DC_HPD1_EN_MASK;
291 WREG32(mmDC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
292
293 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
294 connector->connector_type == DRM_MODE_CONNECTOR_LVDS) {
295
296
297
298
299
300 tmp = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
301 tmp &= ~DC_HPD1_INT_CONTROL__DC_HPD1_INT_EN_MASK;
302 WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
303 continue;
304 }
305
306 dce_v8_0_hpd_set_polarity(adev, amdgpu_connector->hpd.hpd);
307 amdgpu_irq_get(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd);
308 }
309 drm_connector_list_iter_end(&iter);
310}
311
312
313
314
315
316
317
318
319
320static void dce_v8_0_hpd_fini(struct amdgpu_device *adev)
321{
322 struct drm_device *dev = adev_to_drm(adev);
323 struct drm_connector *connector;
324 struct drm_connector_list_iter iter;
325 u32 tmp;
326
327 drm_connector_list_iter_begin(dev, &iter);
328 drm_for_each_connector_iter(connector, &iter) {
329 struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
330
331 if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd)
332 continue;
333
334 tmp = RREG32(mmDC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
335 tmp &= ~DC_HPD1_CONTROL__DC_HPD1_EN_MASK;
336 WREG32(mmDC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], 0);
337
338 amdgpu_irq_put(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd);
339 }
340 drm_connector_list_iter_end(&iter);
341}
342
343static u32 dce_v8_0_hpd_get_gpio_reg(struct amdgpu_device *adev)
344{
345 return mmDC_GPIO_HPD_A;
346}
347
348static bool dce_v8_0_is_display_hung(struct amdgpu_device *adev)
349{
350 u32 crtc_hung = 0;
351 u32 crtc_status[6];
352 u32 i, j, tmp;
353
354 for (i = 0; i < adev->mode_info.num_crtc; i++) {
355 if (RREG32(mmCRTC_CONTROL + crtc_offsets[i]) & CRTC_CONTROL__CRTC_MASTER_EN_MASK) {
356 crtc_status[i] = RREG32(mmCRTC_STATUS_HV_COUNT + crtc_offsets[i]);
357 crtc_hung |= (1 << i);
358 }
359 }
360
361 for (j = 0; j < 10; j++) {
362 for (i = 0; i < adev->mode_info.num_crtc; i++) {
363 if (crtc_hung & (1 << i)) {
364 tmp = RREG32(mmCRTC_STATUS_HV_COUNT + crtc_offsets[i]);
365 if (tmp != crtc_status[i])
366 crtc_hung &= ~(1 << i);
367 }
368 }
369 if (crtc_hung == 0)
370 return false;
371 udelay(100);
372 }
373
374 return true;
375}
376
377static void dce_v8_0_set_vga_render_state(struct amdgpu_device *adev,
378 bool render)
379{
380 u32 tmp;
381
382
383 tmp = RREG32(mmVGA_HDP_CONTROL);
384 if (render)
385 tmp = REG_SET_FIELD(tmp, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 0);
386 else
387 tmp = REG_SET_FIELD(tmp, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 1);
388 WREG32(mmVGA_HDP_CONTROL, tmp);
389
390
391 tmp = RREG32(mmVGA_RENDER_CONTROL);
392 if (render)
393 tmp = REG_SET_FIELD(tmp, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 1);
394 else
395 tmp = REG_SET_FIELD(tmp, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0);
396 WREG32(mmVGA_RENDER_CONTROL, tmp);
397}
398
399static int dce_v8_0_get_num_crtc(struct amdgpu_device *adev)
400{
401 int num_crtc = 0;
402
403 switch (adev->asic_type) {
404 case CHIP_BONAIRE:
405 case CHIP_HAWAII:
406 num_crtc = 6;
407 break;
408 case CHIP_KAVERI:
409 num_crtc = 4;
410 break;
411 case CHIP_KABINI:
412 case CHIP_MULLINS:
413 num_crtc = 2;
414 break;
415 default:
416 num_crtc = 0;
417 }
418 return num_crtc;
419}
420
421void dce_v8_0_disable_dce(struct amdgpu_device *adev)
422{
423
424 if (amdgpu_atombios_has_dce_engine_info(adev)) {
425 u32 tmp;
426 int crtc_enabled, i;
427
428 dce_v8_0_set_vga_render_state(adev, false);
429
430
431 for (i = 0; i < dce_v8_0_get_num_crtc(adev); i++) {
432 crtc_enabled = REG_GET_FIELD(RREG32(mmCRTC_CONTROL + crtc_offsets[i]),
433 CRTC_CONTROL, CRTC_MASTER_EN);
434 if (crtc_enabled) {
435 WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1);
436 tmp = RREG32(mmCRTC_CONTROL + crtc_offsets[i]);
437 tmp = REG_SET_FIELD(tmp, CRTC_CONTROL, CRTC_MASTER_EN, 0);
438 WREG32(mmCRTC_CONTROL + crtc_offsets[i], tmp);
439 WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0);
440 }
441 }
442 }
443}
444
445static void dce_v8_0_program_fmt(struct drm_encoder *encoder)
446{
447 struct drm_device *dev = encoder->dev;
448 struct amdgpu_device *adev = drm_to_adev(dev);
449 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
450 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc);
451 struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
452 int bpc = 0;
453 u32 tmp = 0;
454 enum amdgpu_connector_dither dither = AMDGPU_FMT_DITHER_DISABLE;
455
456 if (connector) {
457 struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
458 bpc = amdgpu_connector_get_monitor_bpc(connector);
459 dither = amdgpu_connector->dither;
460 }
461
462
463 if (amdgpu_encoder->devices & ATOM_DEVICE_LCD_SUPPORT)
464 return;
465
466
467 if ((amdgpu_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1) ||
468 (amdgpu_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2))
469 return;
470
471 if (bpc == 0)
472 return;
473
474 switch (bpc) {
475 case 6:
476 if (dither == AMDGPU_FMT_DITHER_ENABLE)
477
478 tmp |= (FMT_BIT_DEPTH_CONTROL__FMT_FRAME_RANDOM_ENABLE_MASK |
479 FMT_BIT_DEPTH_CONTROL__FMT_HIGHPASS_RANDOM_ENABLE_MASK |
480 FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_EN_MASK |
481 (0 << FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_DEPTH__SHIFT));
482 else
483 tmp |= (FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_EN_MASK |
484 (0 << FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_DEPTH__SHIFT));
485 break;
486 case 8:
487 if (dither == AMDGPU_FMT_DITHER_ENABLE)
488
489 tmp |= (FMT_BIT_DEPTH_CONTROL__FMT_FRAME_RANDOM_ENABLE_MASK |
490 FMT_BIT_DEPTH_CONTROL__FMT_HIGHPASS_RANDOM_ENABLE_MASK |
491 FMT_BIT_DEPTH_CONTROL__FMT_RGB_RANDOM_ENABLE_MASK |
492 FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_EN_MASK |
493 (1 << FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_DEPTH__SHIFT));
494 else
495 tmp |= (FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_EN_MASK |
496 (1 << FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_DEPTH__SHIFT));
497 break;
498 case 10:
499 if (dither == AMDGPU_FMT_DITHER_ENABLE)
500
501 tmp |= (FMT_BIT_DEPTH_CONTROL__FMT_FRAME_RANDOM_ENABLE_MASK |
502 FMT_BIT_DEPTH_CONTROL__FMT_HIGHPASS_RANDOM_ENABLE_MASK |
503 FMT_BIT_DEPTH_CONTROL__FMT_RGB_RANDOM_ENABLE_MASK |
504 FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_EN_MASK |
505 (2 << FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_DEPTH__SHIFT));
506 else
507 tmp |= (FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_EN_MASK |
508 (2 << FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_DEPTH__SHIFT));
509 break;
510 default:
511
512 break;
513 }
514
515 WREG32(mmFMT_BIT_DEPTH_CONTROL + amdgpu_crtc->crtc_offset, tmp);
516}
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532static u32 dce_v8_0_line_buffer_adjust(struct amdgpu_device *adev,
533 struct amdgpu_crtc *amdgpu_crtc,
534 struct drm_display_mode *mode)
535{
536 u32 tmp, buffer_alloc, i;
537 u32 pipe_offset = amdgpu_crtc->crtc_id * 0x8;
538
539
540
541
542
543
544
545
546 if (amdgpu_crtc->base.enabled && mode) {
547 if (mode->crtc_hdisplay < 1920) {
548 tmp = 1;
549 buffer_alloc = 2;
550 } else if (mode->crtc_hdisplay < 2560) {
551 tmp = 2;
552 buffer_alloc = 2;
553 } else if (mode->crtc_hdisplay < 4096) {
554 tmp = 0;
555 buffer_alloc = (adev->flags & AMD_IS_APU) ? 2 : 4;
556 } else {
557 DRM_DEBUG_KMS("Mode too big for LB!\n");
558 tmp = 0;
559 buffer_alloc = (adev->flags & AMD_IS_APU) ? 2 : 4;
560 }
561 } else {
562 tmp = 1;
563 buffer_alloc = 0;
564 }
565
566 WREG32(mmLB_MEMORY_CTRL + amdgpu_crtc->crtc_offset,
567 (tmp << LB_MEMORY_CTRL__LB_MEMORY_CONFIG__SHIFT) |
568 (0x6B0 << LB_MEMORY_CTRL__LB_MEMORY_SIZE__SHIFT));
569
570 WREG32(mmPIPE0_DMIF_BUFFER_CONTROL + pipe_offset,
571 (buffer_alloc << PIPE0_DMIF_BUFFER_CONTROL__DMIF_BUFFERS_ALLOCATED__SHIFT));
572 for (i = 0; i < adev->usec_timeout; i++) {
573 if (RREG32(mmPIPE0_DMIF_BUFFER_CONTROL + pipe_offset) &
574 PIPE0_DMIF_BUFFER_CONTROL__DMIF_BUFFERS_ALLOCATION_COMPLETED_MASK)
575 break;
576 udelay(1);
577 }
578
579 if (amdgpu_crtc->base.enabled && mode) {
580 switch (tmp) {
581 case 0:
582 default:
583 return 4096 * 2;
584 case 1:
585 return 1920 * 2;
586 case 2:
587 return 2560 * 2;
588 }
589 }
590
591
592 return 0;
593}
594
595
596
597
598
599
600
601
602
603
604static u32 cik_get_number_of_dram_channels(struct amdgpu_device *adev)
605{
606 u32 tmp = RREG32(mmMC_SHARED_CHMAP);
607
608 switch ((tmp & MC_SHARED_CHMAP__NOOFCHAN_MASK) >> MC_SHARED_CHMAP__NOOFCHAN__SHIFT) {
609 case 0:
610 default:
611 return 1;
612 case 1:
613 return 2;
614 case 2:
615 return 4;
616 case 3:
617 return 8;
618 case 4:
619 return 3;
620 case 5:
621 return 6;
622 case 6:
623 return 10;
624 case 7:
625 return 12;
626 case 8:
627 return 16;
628 }
629}
630
631struct dce8_wm_params {
632 u32 dram_channels;
633 u32 yclk;
634 u32 sclk;
635 u32 disp_clk;
636 u32 src_width;
637 u32 active_time;
638 u32 blank_time;
639 bool interlaced;
640 fixed20_12 vsc;
641 u32 num_heads;
642 u32 bytes_per_pixel;
643 u32 lb_size;
644 u32 vtaps;
645};
646
647
648
649
650
651
652
653
654
655
656static u32 dce_v8_0_dram_bandwidth(struct dce8_wm_params *wm)
657{
658
659 fixed20_12 dram_efficiency;
660 fixed20_12 yclk, dram_channels, bandwidth;
661 fixed20_12 a;
662
663 a.full = dfixed_const(1000);
664 yclk.full = dfixed_const(wm->yclk);
665 yclk.full = dfixed_div(yclk, a);
666 dram_channels.full = dfixed_const(wm->dram_channels * 4);
667 a.full = dfixed_const(10);
668 dram_efficiency.full = dfixed_const(7);
669 dram_efficiency.full = dfixed_div(dram_efficiency, a);
670 bandwidth.full = dfixed_mul(dram_channels, yclk);
671 bandwidth.full = dfixed_mul(bandwidth, dram_efficiency);
672
673 return dfixed_trunc(bandwidth);
674}
675
676
677
678
679
680
681
682
683
684
685static u32 dce_v8_0_dram_bandwidth_for_display(struct dce8_wm_params *wm)
686{
687
688 fixed20_12 disp_dram_allocation;
689 fixed20_12 yclk, dram_channels, bandwidth;
690 fixed20_12 a;
691
692 a.full = dfixed_const(1000);
693 yclk.full = dfixed_const(wm->yclk);
694 yclk.full = dfixed_div(yclk, a);
695 dram_channels.full = dfixed_const(wm->dram_channels * 4);
696 a.full = dfixed_const(10);
697 disp_dram_allocation.full = dfixed_const(3);
698 disp_dram_allocation.full = dfixed_div(disp_dram_allocation, a);
699 bandwidth.full = dfixed_mul(dram_channels, yclk);
700 bandwidth.full = dfixed_mul(bandwidth, disp_dram_allocation);
701
702 return dfixed_trunc(bandwidth);
703}
704
705
706
707
708
709
710
711
712
713
714static u32 dce_v8_0_data_return_bandwidth(struct dce8_wm_params *wm)
715{
716
717 fixed20_12 return_efficiency;
718 fixed20_12 sclk, bandwidth;
719 fixed20_12 a;
720
721 a.full = dfixed_const(1000);
722 sclk.full = dfixed_const(wm->sclk);
723 sclk.full = dfixed_div(sclk, a);
724 a.full = dfixed_const(10);
725 return_efficiency.full = dfixed_const(8);
726 return_efficiency.full = dfixed_div(return_efficiency, a);
727 a.full = dfixed_const(32);
728 bandwidth.full = dfixed_mul(a, sclk);
729 bandwidth.full = dfixed_mul(bandwidth, return_efficiency);
730
731 return dfixed_trunc(bandwidth);
732}
733
734
735
736
737
738
739
740
741
742
743static u32 dce_v8_0_dmif_request_bandwidth(struct dce8_wm_params *wm)
744{
745
746 fixed20_12 disp_clk_request_efficiency;
747 fixed20_12 disp_clk, bandwidth;
748 fixed20_12 a, b;
749
750 a.full = dfixed_const(1000);
751 disp_clk.full = dfixed_const(wm->disp_clk);
752 disp_clk.full = dfixed_div(disp_clk, a);
753 a.full = dfixed_const(32);
754 b.full = dfixed_mul(a, disp_clk);
755
756 a.full = dfixed_const(10);
757 disp_clk_request_efficiency.full = dfixed_const(8);
758 disp_clk_request_efficiency.full = dfixed_div(disp_clk_request_efficiency, a);
759
760 bandwidth.full = dfixed_mul(b, disp_clk_request_efficiency);
761
762 return dfixed_trunc(bandwidth);
763}
764
765
766
767
768
769
770
771
772
773
774static u32 dce_v8_0_available_bandwidth(struct dce8_wm_params *wm)
775{
776
777 u32 dram_bandwidth = dce_v8_0_dram_bandwidth(wm);
778 u32 data_return_bandwidth = dce_v8_0_data_return_bandwidth(wm);
779 u32 dmif_req_bandwidth = dce_v8_0_dmif_request_bandwidth(wm);
780
781 return min(dram_bandwidth, min(data_return_bandwidth, dmif_req_bandwidth));
782}
783
784
785
786
787
788
789
790
791
792
793static u32 dce_v8_0_average_bandwidth(struct dce8_wm_params *wm)
794{
795
796
797
798
799 fixed20_12 bpp;
800 fixed20_12 line_time;
801 fixed20_12 src_width;
802 fixed20_12 bandwidth;
803 fixed20_12 a;
804
805 a.full = dfixed_const(1000);
806 line_time.full = dfixed_const(wm->active_time + wm->blank_time);
807 line_time.full = dfixed_div(line_time, a);
808 bpp.full = dfixed_const(wm->bytes_per_pixel);
809 src_width.full = dfixed_const(wm->src_width);
810 bandwidth.full = dfixed_mul(src_width, bpp);
811 bandwidth.full = dfixed_mul(bandwidth, wm->vsc);
812 bandwidth.full = dfixed_div(bandwidth, line_time);
813
814 return dfixed_trunc(bandwidth);
815}
816
817
818
819
820
821
822
823
824
825
826static u32 dce_v8_0_latency_watermark(struct dce8_wm_params *wm)
827{
828
829 u32 mc_latency = 2000;
830 u32 available_bandwidth = dce_v8_0_available_bandwidth(wm);
831 u32 worst_chunk_return_time = (512 * 8 * 1000) / available_bandwidth;
832 u32 cursor_line_pair_return_time = (128 * 4 * 1000) / available_bandwidth;
833 u32 dc_latency = 40000000 / wm->disp_clk;
834 u32 other_heads_data_return_time = ((wm->num_heads + 1) * worst_chunk_return_time) +
835 (wm->num_heads * cursor_line_pair_return_time);
836 u32 latency = mc_latency + other_heads_data_return_time + dc_latency;
837 u32 max_src_lines_per_dst_line, lb_fill_bw, line_fill_time;
838 u32 tmp, dmif_size = 12288;
839 fixed20_12 a, b, c;
840
841 if (wm->num_heads == 0)
842 return 0;
843
844 a.full = dfixed_const(2);
845 b.full = dfixed_const(1);
846 if ((wm->vsc.full > a.full) ||
847 ((wm->vsc.full > b.full) && (wm->vtaps >= 3)) ||
848 (wm->vtaps >= 5) ||
849 ((wm->vsc.full >= a.full) && wm->interlaced))
850 max_src_lines_per_dst_line = 4;
851 else
852 max_src_lines_per_dst_line = 2;
853
854 a.full = dfixed_const(available_bandwidth);
855 b.full = dfixed_const(wm->num_heads);
856 a.full = dfixed_div(a, b);
857 tmp = div_u64((u64) dmif_size * (u64) wm->disp_clk, mc_latency + 512);
858 tmp = min(dfixed_trunc(a), tmp);
859
860 lb_fill_bw = min(tmp, wm->disp_clk * wm->bytes_per_pixel / 1000);
861
862 a.full = dfixed_const(max_src_lines_per_dst_line * wm->src_width * wm->bytes_per_pixel);
863 b.full = dfixed_const(1000);
864 c.full = dfixed_const(lb_fill_bw);
865 b.full = dfixed_div(c, b);
866 a.full = dfixed_div(a, b);
867 line_fill_time = dfixed_trunc(a);
868
869 if (line_fill_time < wm->active_time)
870 return latency;
871 else
872 return latency + (line_fill_time - wm->active_time);
873
874}
875
876
877
878
879
880
881
882
883
884
885
886
887static bool dce_v8_0_average_bandwidth_vs_dram_bandwidth_for_display(struct dce8_wm_params *wm)
888{
889 if (dce_v8_0_average_bandwidth(wm) <=
890 (dce_v8_0_dram_bandwidth_for_display(wm) / wm->num_heads))
891 return true;
892 else
893 return false;
894}
895
896
897
898
899
900
901
902
903
904
905
906
907static bool dce_v8_0_average_bandwidth_vs_available_bandwidth(struct dce8_wm_params *wm)
908{
909 if (dce_v8_0_average_bandwidth(wm) <=
910 (dce_v8_0_available_bandwidth(wm) / wm->num_heads))
911 return true;
912 else
913 return false;
914}
915
916
917
918
919
920
921
922
923
924
925static bool dce_v8_0_check_latency_hiding(struct dce8_wm_params *wm)
926{
927 u32 lb_partitions = wm->lb_size / wm->src_width;
928 u32 line_time = wm->active_time + wm->blank_time;
929 u32 latency_tolerant_lines;
930 u32 latency_hiding;
931 fixed20_12 a;
932
933 a.full = dfixed_const(1);
934 if (wm->vsc.full > a.full)
935 latency_tolerant_lines = 1;
936 else {
937 if (lb_partitions <= (wm->vtaps + 1))
938 latency_tolerant_lines = 1;
939 else
940 latency_tolerant_lines = 2;
941 }
942
943 latency_hiding = (latency_tolerant_lines * line_time + wm->blank_time);
944
945 if (dce_v8_0_latency_watermark(wm) <= latency_hiding)
946 return true;
947 else
948 return false;
949}
950
951
952
953
954
955
956
957
958
959
960
961
962static void dce_v8_0_program_watermarks(struct amdgpu_device *adev,
963 struct amdgpu_crtc *amdgpu_crtc,
964 u32 lb_size, u32 num_heads)
965{
966 struct drm_display_mode *mode = &amdgpu_crtc->base.mode;
967 struct dce8_wm_params wm_low, wm_high;
968 u32 active_time;
969 u32 line_time = 0;
970 u32 latency_watermark_a = 0, latency_watermark_b = 0;
971 u32 tmp, wm_mask, lb_vblank_lead_lines = 0;
972
973 if (amdgpu_crtc->base.enabled && num_heads && mode) {
974 active_time = (u32) div_u64((u64)mode->crtc_hdisplay * 1000000,
975 (u32)mode->clock);
976 line_time = (u32) div_u64((u64)mode->crtc_htotal * 1000000,
977 (u32)mode->clock);
978 line_time = min(line_time, (u32)65535);
979
980
981 if (adev->pm.dpm_enabled) {
982 wm_high.yclk =
983 amdgpu_dpm_get_mclk(adev, false) * 10;
984 wm_high.sclk =
985 amdgpu_dpm_get_sclk(adev, false) * 10;
986 } else {
987 wm_high.yclk = adev->pm.current_mclk * 10;
988 wm_high.sclk = adev->pm.current_sclk * 10;
989 }
990
991 wm_high.disp_clk = mode->clock;
992 wm_high.src_width = mode->crtc_hdisplay;
993 wm_high.active_time = active_time;
994 wm_high.blank_time = line_time - wm_high.active_time;
995 wm_high.interlaced = false;
996 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
997 wm_high.interlaced = true;
998 wm_high.vsc = amdgpu_crtc->vsc;
999 wm_high.vtaps = 1;
1000 if (amdgpu_crtc->rmx_type != RMX_OFF)
1001 wm_high.vtaps = 2;
1002 wm_high.bytes_per_pixel = 4;
1003 wm_high.lb_size = lb_size;
1004 wm_high.dram_channels = cik_get_number_of_dram_channels(adev);
1005 wm_high.num_heads = num_heads;
1006
1007
1008 latency_watermark_a = min(dce_v8_0_latency_watermark(&wm_high), (u32)65535);
1009
1010
1011
1012 if (!dce_v8_0_average_bandwidth_vs_dram_bandwidth_for_display(&wm_high) ||
1013 !dce_v8_0_average_bandwidth_vs_available_bandwidth(&wm_high) ||
1014 !dce_v8_0_check_latency_hiding(&wm_high) ||
1015 (adev->mode_info.disp_priority == 2)) {
1016 DRM_DEBUG_KMS("force priority to high\n");
1017 }
1018
1019
1020 if (adev->pm.dpm_enabled) {
1021 wm_low.yclk =
1022 amdgpu_dpm_get_mclk(adev, true) * 10;
1023 wm_low.sclk =
1024 amdgpu_dpm_get_sclk(adev, true) * 10;
1025 } else {
1026 wm_low.yclk = adev->pm.current_mclk * 10;
1027 wm_low.sclk = adev->pm.current_sclk * 10;
1028 }
1029
1030 wm_low.disp_clk = mode->clock;
1031 wm_low.src_width = mode->crtc_hdisplay;
1032 wm_low.active_time = active_time;
1033 wm_low.blank_time = line_time - wm_low.active_time;
1034 wm_low.interlaced = false;
1035 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
1036 wm_low.interlaced = true;
1037 wm_low.vsc = amdgpu_crtc->vsc;
1038 wm_low.vtaps = 1;
1039 if (amdgpu_crtc->rmx_type != RMX_OFF)
1040 wm_low.vtaps = 2;
1041 wm_low.bytes_per_pixel = 4;
1042 wm_low.lb_size = lb_size;
1043 wm_low.dram_channels = cik_get_number_of_dram_channels(adev);
1044 wm_low.num_heads = num_heads;
1045
1046
1047 latency_watermark_b = min(dce_v8_0_latency_watermark(&wm_low), (u32)65535);
1048
1049
1050
1051 if (!dce_v8_0_average_bandwidth_vs_dram_bandwidth_for_display(&wm_low) ||
1052 !dce_v8_0_average_bandwidth_vs_available_bandwidth(&wm_low) ||
1053 !dce_v8_0_check_latency_hiding(&wm_low) ||
1054 (adev->mode_info.disp_priority == 2)) {
1055 DRM_DEBUG_KMS("force priority to high\n");
1056 }
1057 lb_vblank_lead_lines = DIV_ROUND_UP(lb_size, mode->crtc_hdisplay);
1058 }
1059
1060
1061 wm_mask = RREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset);
1062 tmp = wm_mask;
1063 tmp &= ~(3 << DPG_WATERMARK_MASK_CONTROL__URGENCY_WATERMARK_MASK__SHIFT);
1064 tmp |= (1 << DPG_WATERMARK_MASK_CONTROL__URGENCY_WATERMARK_MASK__SHIFT);
1065 WREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset, tmp);
1066 WREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset,
1067 ((latency_watermark_a << DPG_PIPE_URGENCY_CONTROL__URGENCY_LOW_WATERMARK__SHIFT) |
1068 (line_time << DPG_PIPE_URGENCY_CONTROL__URGENCY_HIGH_WATERMARK__SHIFT)));
1069
1070 tmp = RREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset);
1071 tmp &= ~(3 << DPG_WATERMARK_MASK_CONTROL__URGENCY_WATERMARK_MASK__SHIFT);
1072 tmp |= (2 << DPG_WATERMARK_MASK_CONTROL__URGENCY_WATERMARK_MASK__SHIFT);
1073 WREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset, tmp);
1074 WREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset,
1075 ((latency_watermark_b << DPG_PIPE_URGENCY_CONTROL__URGENCY_LOW_WATERMARK__SHIFT) |
1076 (line_time << DPG_PIPE_URGENCY_CONTROL__URGENCY_HIGH_WATERMARK__SHIFT)));
1077
1078 WREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset, wm_mask);
1079
1080
1081 amdgpu_crtc->line_time = line_time;
1082 amdgpu_crtc->wm_high = latency_watermark_a;
1083 amdgpu_crtc->wm_low = latency_watermark_b;
1084
1085 amdgpu_crtc->lb_vblank_lead_lines = lb_vblank_lead_lines;
1086}
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096static void dce_v8_0_bandwidth_update(struct amdgpu_device *adev)
1097{
1098 struct drm_display_mode *mode = NULL;
1099 u32 num_heads = 0, lb_size;
1100 int i;
1101
1102 amdgpu_display_update_priority(adev);
1103
1104 for (i = 0; i < adev->mode_info.num_crtc; i++) {
1105 if (adev->mode_info.crtcs[i]->base.enabled)
1106 num_heads++;
1107 }
1108 for (i = 0; i < adev->mode_info.num_crtc; i++) {
1109 mode = &adev->mode_info.crtcs[i]->base.mode;
1110 lb_size = dce_v8_0_line_buffer_adjust(adev, adev->mode_info.crtcs[i], mode);
1111 dce_v8_0_program_watermarks(adev, adev->mode_info.crtcs[i],
1112 lb_size, num_heads);
1113 }
1114}
1115
1116static void dce_v8_0_audio_get_connected_pins(struct amdgpu_device *adev)
1117{
1118 int i;
1119 u32 offset, tmp;
1120
1121 for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
1122 offset = adev->mode_info.audio.pin[i].offset;
1123 tmp = RREG32_AUDIO_ENDPT(offset,
1124 ixAZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT);
1125 if (((tmp &
1126 AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY_MASK) >>
1127 AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY__SHIFT) == 1)
1128 adev->mode_info.audio.pin[i].connected = false;
1129 else
1130 adev->mode_info.audio.pin[i].connected = true;
1131 }
1132}
1133
1134static struct amdgpu_audio_pin *dce_v8_0_audio_get_pin(struct amdgpu_device *adev)
1135{
1136 int i;
1137
1138 dce_v8_0_audio_get_connected_pins(adev);
1139
1140 for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
1141 if (adev->mode_info.audio.pin[i].connected)
1142 return &adev->mode_info.audio.pin[i];
1143 }
1144 DRM_ERROR("No connected audio pins found!\n");
1145 return NULL;
1146}
1147
1148static void dce_v8_0_afmt_audio_select_pin(struct drm_encoder *encoder)
1149{
1150 struct amdgpu_device *adev = drm_to_adev(encoder->dev);
1151 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1152 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1153 u32 offset;
1154
1155 if (!dig || !dig->afmt || !dig->afmt->pin)
1156 return;
1157
1158 offset = dig->afmt->offset;
1159
1160 WREG32(mmAFMT_AUDIO_SRC_CONTROL + offset,
1161 (dig->afmt->pin->id << AFMT_AUDIO_SRC_CONTROL__AFMT_AUDIO_SRC_SELECT__SHIFT));
1162}
1163
1164static void dce_v8_0_audio_write_latency_fields(struct drm_encoder *encoder,
1165 struct drm_display_mode *mode)
1166{
1167 struct drm_device *dev = encoder->dev;
1168 struct amdgpu_device *adev = drm_to_adev(dev);
1169 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1170 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1171 struct drm_connector *connector;
1172 struct drm_connector_list_iter iter;
1173 struct amdgpu_connector *amdgpu_connector = NULL;
1174 u32 tmp = 0, offset;
1175
1176 if (!dig || !dig->afmt || !dig->afmt->pin)
1177 return;
1178
1179 offset = dig->afmt->pin->offset;
1180
1181 drm_connector_list_iter_begin(dev, &iter);
1182 drm_for_each_connector_iter(connector, &iter) {
1183 if (connector->encoder == encoder) {
1184 amdgpu_connector = to_amdgpu_connector(connector);
1185 break;
1186 }
1187 }
1188 drm_connector_list_iter_end(&iter);
1189
1190 if (!amdgpu_connector) {
1191 DRM_ERROR("Couldn't find encoder's connector\n");
1192 return;
1193 }
1194
1195 if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
1196 if (connector->latency_present[1])
1197 tmp =
1198 (connector->video_latency[1] <<
1199 AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__VIDEO_LIPSYNC__SHIFT) |
1200 (connector->audio_latency[1] <<
1201 AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__AUDIO_LIPSYNC__SHIFT);
1202 else
1203 tmp =
1204 (0 <<
1205 AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__VIDEO_LIPSYNC__SHIFT) |
1206 (0 <<
1207 AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__AUDIO_LIPSYNC__SHIFT);
1208 } else {
1209 if (connector->latency_present[0])
1210 tmp =
1211 (connector->video_latency[0] <<
1212 AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__VIDEO_LIPSYNC__SHIFT) |
1213 (connector->audio_latency[0] <<
1214 AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__AUDIO_LIPSYNC__SHIFT);
1215 else
1216 tmp =
1217 (0 <<
1218 AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__VIDEO_LIPSYNC__SHIFT) |
1219 (0 <<
1220 AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__AUDIO_LIPSYNC__SHIFT);
1221
1222 }
1223 WREG32_AUDIO_ENDPT(offset, ixAZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC, tmp);
1224}
1225
1226static void dce_v8_0_audio_write_speaker_allocation(struct drm_encoder *encoder)
1227{
1228 struct drm_device *dev = encoder->dev;
1229 struct amdgpu_device *adev = drm_to_adev(dev);
1230 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1231 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1232 struct drm_connector *connector;
1233 struct drm_connector_list_iter iter;
1234 struct amdgpu_connector *amdgpu_connector = NULL;
1235 u32 offset, tmp;
1236 u8 *sadb = NULL;
1237 int sad_count;
1238
1239 if (!dig || !dig->afmt || !dig->afmt->pin)
1240 return;
1241
1242 offset = dig->afmt->pin->offset;
1243
1244 drm_connector_list_iter_begin(dev, &iter);
1245 drm_for_each_connector_iter(connector, &iter) {
1246 if (connector->encoder == encoder) {
1247 amdgpu_connector = to_amdgpu_connector(connector);
1248 break;
1249 }
1250 }
1251 drm_connector_list_iter_end(&iter);
1252
1253 if (!amdgpu_connector) {
1254 DRM_ERROR("Couldn't find encoder's connector\n");
1255 return;
1256 }
1257
1258 sad_count = drm_edid_to_speaker_allocation(amdgpu_connector_edid(connector), &sadb);
1259 if (sad_count < 0) {
1260 DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sad_count);
1261 sad_count = 0;
1262 }
1263
1264
1265 tmp = RREG32_AUDIO_ENDPT(offset, ixAZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER);
1266 tmp &= ~(AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__DP_CONNECTION_MASK |
1267 AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__SPEAKER_ALLOCATION_MASK);
1268
1269 tmp |= AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__HDMI_CONNECTION_MASK;
1270 if (sad_count)
1271 tmp |= (sadb[0] << AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__SPEAKER_ALLOCATION__SHIFT);
1272 else
1273 tmp |= (5 << AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__SPEAKER_ALLOCATION__SHIFT);
1274 WREG32_AUDIO_ENDPT(offset, ixAZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, tmp);
1275
1276 kfree(sadb);
1277}
1278
1279static void dce_v8_0_audio_write_sad_regs(struct drm_encoder *encoder)
1280{
1281 struct drm_device *dev = encoder->dev;
1282 struct amdgpu_device *adev = drm_to_adev(dev);
1283 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1284 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1285 u32 offset;
1286 struct drm_connector *connector;
1287 struct drm_connector_list_iter iter;
1288 struct amdgpu_connector *amdgpu_connector = NULL;
1289 struct cea_sad *sads;
1290 int i, sad_count;
1291
1292 static const u16 eld_reg_to_type[][2] = {
1293 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0, HDMI_AUDIO_CODING_TYPE_PCM },
1294 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1, HDMI_AUDIO_CODING_TYPE_AC3 },
1295 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2, HDMI_AUDIO_CODING_TYPE_MPEG1 },
1296 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3, HDMI_AUDIO_CODING_TYPE_MP3 },
1297 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4, HDMI_AUDIO_CODING_TYPE_MPEG2 },
1298 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5, HDMI_AUDIO_CODING_TYPE_AAC_LC },
1299 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6, HDMI_AUDIO_CODING_TYPE_DTS },
1300 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7, HDMI_AUDIO_CODING_TYPE_ATRAC },
1301 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9, HDMI_AUDIO_CODING_TYPE_EAC3 },
1302 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10, HDMI_AUDIO_CODING_TYPE_DTS_HD },
1303 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11, HDMI_AUDIO_CODING_TYPE_MLP },
1304 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13, HDMI_AUDIO_CODING_TYPE_WMA_PRO },
1305 };
1306
1307 if (!dig || !dig->afmt || !dig->afmt->pin)
1308 return;
1309
1310 offset = dig->afmt->pin->offset;
1311
1312 drm_connector_list_iter_begin(dev, &iter);
1313 drm_for_each_connector_iter(connector, &iter) {
1314 if (connector->encoder == encoder) {
1315 amdgpu_connector = to_amdgpu_connector(connector);
1316 break;
1317 }
1318 }
1319 drm_connector_list_iter_end(&iter);
1320
1321 if (!amdgpu_connector) {
1322 DRM_ERROR("Couldn't find encoder's connector\n");
1323 return;
1324 }
1325
1326 sad_count = drm_edid_to_sad(amdgpu_connector_edid(connector), &sads);
1327 if (sad_count < 0)
1328 DRM_ERROR("Couldn't read SADs: %d\n", sad_count);
1329 if (sad_count <= 0)
1330 return;
1331 BUG_ON(!sads);
1332
1333 for (i = 0; i < ARRAY_SIZE(eld_reg_to_type); i++) {
1334 u32 value = 0;
1335 u8 stereo_freqs = 0;
1336 int max_channels = -1;
1337 int j;
1338
1339 for (j = 0; j < sad_count; j++) {
1340 struct cea_sad *sad = &sads[j];
1341
1342 if (sad->format == eld_reg_to_type[i][1]) {
1343 if (sad->channels > max_channels) {
1344 value = (sad->channels <<
1345 AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__MAX_CHANNELS__SHIFT) |
1346 (sad->byte2 <<
1347 AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__DESCRIPTOR_BYTE_2__SHIFT) |
1348 (sad->freq <<
1349 AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES__SHIFT);
1350 max_channels = sad->channels;
1351 }
1352
1353 if (sad->format == HDMI_AUDIO_CODING_TYPE_PCM)
1354 stereo_freqs |= sad->freq;
1355 else
1356 break;
1357 }
1358 }
1359
1360 value |= (stereo_freqs <<
1361 AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES_STEREO__SHIFT);
1362
1363 WREG32_AUDIO_ENDPT(offset, eld_reg_to_type[i][0], value);
1364 }
1365
1366 kfree(sads);
1367}
1368
1369static void dce_v8_0_audio_enable(struct amdgpu_device *adev,
1370 struct amdgpu_audio_pin *pin,
1371 bool enable)
1372{
1373 if (!pin)
1374 return;
1375
1376 WREG32_AUDIO_ENDPT(pin->offset, ixAZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL,
1377 enable ? AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__AUDIO_ENABLED_MASK : 0);
1378}
1379
1380static const u32 pin_offsets[7] =
1381{
1382 (0x1780 - 0x1780),
1383 (0x1786 - 0x1780),
1384 (0x178c - 0x1780),
1385 (0x1792 - 0x1780),
1386 (0x1798 - 0x1780),
1387 (0x179d - 0x1780),
1388 (0x17a4 - 0x1780),
1389};
1390
1391static int dce_v8_0_audio_init(struct amdgpu_device *adev)
1392{
1393 int i;
1394
1395 if (!amdgpu_audio)
1396 return 0;
1397
1398 adev->mode_info.audio.enabled = true;
1399
1400 if (adev->asic_type == CHIP_KAVERI)
1401 adev->mode_info.audio.num_pins = 7;
1402 else if ((adev->asic_type == CHIP_KABINI) ||
1403 (adev->asic_type == CHIP_MULLINS))
1404 adev->mode_info.audio.num_pins = 3;
1405 else if ((adev->asic_type == CHIP_BONAIRE) ||
1406 (adev->asic_type == CHIP_HAWAII))
1407 adev->mode_info.audio.num_pins = 7;
1408 else
1409 adev->mode_info.audio.num_pins = 3;
1410
1411 for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
1412 adev->mode_info.audio.pin[i].channels = -1;
1413 adev->mode_info.audio.pin[i].rate = -1;
1414 adev->mode_info.audio.pin[i].bits_per_sample = -1;
1415 adev->mode_info.audio.pin[i].status_bits = 0;
1416 adev->mode_info.audio.pin[i].category_code = 0;
1417 adev->mode_info.audio.pin[i].connected = false;
1418 adev->mode_info.audio.pin[i].offset = pin_offsets[i];
1419 adev->mode_info.audio.pin[i].id = i;
1420
1421
1422 dce_v8_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
1423 }
1424
1425 return 0;
1426}
1427
1428static void dce_v8_0_audio_fini(struct amdgpu_device *adev)
1429{
1430 int i;
1431
1432 if (!amdgpu_audio)
1433 return;
1434
1435 if (!adev->mode_info.audio.enabled)
1436 return;
1437
1438 for (i = 0; i < adev->mode_info.audio.num_pins; i++)
1439 dce_v8_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
1440
1441 adev->mode_info.audio.enabled = false;
1442}
1443
1444
1445
1446
1447static void dce_v8_0_afmt_update_ACR(struct drm_encoder *encoder, uint32_t clock)
1448{
1449 struct drm_device *dev = encoder->dev;
1450 struct amdgpu_device *adev = drm_to_adev(dev);
1451 struct amdgpu_afmt_acr acr = amdgpu_afmt_acr(clock);
1452 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1453 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1454 uint32_t offset = dig->afmt->offset;
1455
1456 WREG32(mmHDMI_ACR_32_0 + offset, (acr.cts_32khz << HDMI_ACR_32_0__HDMI_ACR_CTS_32__SHIFT));
1457 WREG32(mmHDMI_ACR_32_1 + offset, acr.n_32khz);
1458
1459 WREG32(mmHDMI_ACR_44_0 + offset, (acr.cts_44_1khz << HDMI_ACR_44_0__HDMI_ACR_CTS_44__SHIFT));
1460 WREG32(mmHDMI_ACR_44_1 + offset, acr.n_44_1khz);
1461
1462 WREG32(mmHDMI_ACR_48_0 + offset, (acr.cts_48khz << HDMI_ACR_48_0__HDMI_ACR_CTS_48__SHIFT));
1463 WREG32(mmHDMI_ACR_48_1 + offset, acr.n_48khz);
1464}
1465
1466
1467
1468
1469static void dce_v8_0_afmt_update_avi_infoframe(struct drm_encoder *encoder,
1470 void *buffer, size_t size)
1471{
1472 struct drm_device *dev = encoder->dev;
1473 struct amdgpu_device *adev = drm_to_adev(dev);
1474 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1475 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1476 uint32_t offset = dig->afmt->offset;
1477 uint8_t *frame = buffer + 3;
1478 uint8_t *header = buffer;
1479
1480 WREG32(mmAFMT_AVI_INFO0 + offset,
1481 frame[0x0] | (frame[0x1] << 8) | (frame[0x2] << 16) | (frame[0x3] << 24));
1482 WREG32(mmAFMT_AVI_INFO1 + offset,
1483 frame[0x4] | (frame[0x5] << 8) | (frame[0x6] << 16) | (frame[0x7] << 24));
1484 WREG32(mmAFMT_AVI_INFO2 + offset,
1485 frame[0x8] | (frame[0x9] << 8) | (frame[0xA] << 16) | (frame[0xB] << 24));
1486 WREG32(mmAFMT_AVI_INFO3 + offset,
1487 frame[0xC] | (frame[0xD] << 8) | (header[1] << 24));
1488}
1489
1490static void dce_v8_0_audio_set_dto(struct drm_encoder *encoder, u32 clock)
1491{
1492 struct drm_device *dev = encoder->dev;
1493 struct amdgpu_device *adev = drm_to_adev(dev);
1494 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1495 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1496 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc);
1497 u32 dto_phase = 24 * 1000;
1498 u32 dto_modulo = clock;
1499
1500 if (!dig || !dig->afmt)
1501 return;
1502
1503
1504
1505
1506
1507
1508 WREG32(mmDCCG_AUDIO_DTO_SOURCE, (amdgpu_crtc->crtc_id << DCCG_AUDIO_DTO_SOURCE__DCCG_AUDIO_DTO0_SOURCE_SEL__SHIFT));
1509 WREG32(mmDCCG_AUDIO_DTO0_PHASE, dto_phase);
1510 WREG32(mmDCCG_AUDIO_DTO0_MODULE, dto_modulo);
1511}
1512
1513
1514
1515
1516static void dce_v8_0_afmt_setmode(struct drm_encoder *encoder,
1517 struct drm_display_mode *mode)
1518{
1519 struct drm_device *dev = encoder->dev;
1520 struct amdgpu_device *adev = drm_to_adev(dev);
1521 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1522 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1523 struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
1524 u8 buffer[HDMI_INFOFRAME_HEADER_SIZE + HDMI_AVI_INFOFRAME_SIZE];
1525 struct hdmi_avi_infoframe frame;
1526 uint32_t offset, val;
1527 ssize_t err;
1528 int bpc = 8;
1529
1530 if (!dig || !dig->afmt)
1531 return;
1532
1533
1534 if (!dig->afmt->enabled)
1535 return;
1536
1537 offset = dig->afmt->offset;
1538
1539
1540 if (encoder->crtc) {
1541 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc);
1542 bpc = amdgpu_crtc->bpc;
1543 }
1544
1545
1546 dig->afmt->pin = dce_v8_0_audio_get_pin(adev);
1547 dce_v8_0_audio_enable(adev, dig->afmt->pin, false);
1548
1549 dce_v8_0_audio_set_dto(encoder, mode->clock);
1550
1551 WREG32(mmHDMI_VBI_PACKET_CONTROL + offset,
1552 HDMI_VBI_PACKET_CONTROL__HDMI_NULL_SEND_MASK);
1553
1554 WREG32(mmAFMT_AUDIO_CRC_CONTROL + offset, 0x1000);
1555
1556 val = RREG32(mmHDMI_CONTROL + offset);
1557 val &= ~HDMI_CONTROL__HDMI_DEEP_COLOR_ENABLE_MASK;
1558 val &= ~HDMI_CONTROL__HDMI_DEEP_COLOR_DEPTH_MASK;
1559
1560 switch (bpc) {
1561 case 0:
1562 case 6:
1563 case 8:
1564 case 16:
1565 default:
1566 DRM_DEBUG("%s: Disabling hdmi deep color for %d bpc.\n",
1567 connector->name, bpc);
1568 break;
1569 case 10:
1570 val |= HDMI_CONTROL__HDMI_DEEP_COLOR_ENABLE_MASK;
1571 val |= 1 << HDMI_CONTROL__HDMI_DEEP_COLOR_DEPTH__SHIFT;
1572 DRM_DEBUG("%s: Enabling hdmi deep color 30 for 10 bpc.\n",
1573 connector->name);
1574 break;
1575 case 12:
1576 val |= HDMI_CONTROL__HDMI_DEEP_COLOR_ENABLE_MASK;
1577 val |= 2 << HDMI_CONTROL__HDMI_DEEP_COLOR_DEPTH__SHIFT;
1578 DRM_DEBUG("%s: Enabling hdmi deep color 36 for 12 bpc.\n",
1579 connector->name);
1580 break;
1581 }
1582
1583 WREG32(mmHDMI_CONTROL + offset, val);
1584
1585 WREG32(mmHDMI_VBI_PACKET_CONTROL + offset,
1586 HDMI_VBI_PACKET_CONTROL__HDMI_NULL_SEND_MASK |
1587 HDMI_VBI_PACKET_CONTROL__HDMI_GC_SEND_MASK |
1588 HDMI_VBI_PACKET_CONTROL__HDMI_GC_CONT_MASK);
1589
1590 WREG32(mmHDMI_INFOFRAME_CONTROL0 + offset,
1591 HDMI_INFOFRAME_CONTROL0__HDMI_AUDIO_INFO_SEND_MASK |
1592 HDMI_INFOFRAME_CONTROL0__HDMI_AUDIO_INFO_CONT_MASK);
1593
1594 WREG32(mmAFMT_INFOFRAME_CONTROL0 + offset,
1595 AFMT_INFOFRAME_CONTROL0__AFMT_AUDIO_INFO_UPDATE_MASK);
1596
1597 WREG32(mmHDMI_INFOFRAME_CONTROL1 + offset,
1598 (2 << HDMI_INFOFRAME_CONTROL1__HDMI_AUDIO_INFO_LINE__SHIFT));
1599
1600 WREG32(mmHDMI_GC + offset, 0);
1601
1602 WREG32(mmHDMI_AUDIO_PACKET_CONTROL + offset,
1603 (1 << HDMI_AUDIO_PACKET_CONTROL__HDMI_AUDIO_DELAY_EN__SHIFT) |
1604 (3 << HDMI_AUDIO_PACKET_CONTROL__HDMI_AUDIO_PACKETS_PER_LINE__SHIFT));
1605
1606 WREG32(mmAFMT_AUDIO_PACKET_CONTROL + offset,
1607 AFMT_AUDIO_PACKET_CONTROL__AFMT_60958_CS_UPDATE_MASK);
1608
1609
1610
1611 if (bpc > 8)
1612 WREG32(mmHDMI_ACR_PACKET_CONTROL + offset,
1613 HDMI_ACR_PACKET_CONTROL__HDMI_ACR_AUTO_SEND_MASK);
1614 else
1615 WREG32(mmHDMI_ACR_PACKET_CONTROL + offset,
1616 HDMI_ACR_PACKET_CONTROL__HDMI_ACR_SOURCE_MASK |
1617 HDMI_ACR_PACKET_CONTROL__HDMI_ACR_AUTO_SEND_MASK);
1618
1619 dce_v8_0_afmt_update_ACR(encoder, mode->clock);
1620
1621 WREG32(mmAFMT_60958_0 + offset,
1622 (1 << AFMT_60958_0__AFMT_60958_CS_CHANNEL_NUMBER_L__SHIFT));
1623
1624 WREG32(mmAFMT_60958_1 + offset,
1625 (2 << AFMT_60958_1__AFMT_60958_CS_CHANNEL_NUMBER_R__SHIFT));
1626
1627 WREG32(mmAFMT_60958_2 + offset,
1628 (3 << AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_2__SHIFT) |
1629 (4 << AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_3__SHIFT) |
1630 (5 << AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_4__SHIFT) |
1631 (6 << AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_5__SHIFT) |
1632 (7 << AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_6__SHIFT) |
1633 (8 << AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_7__SHIFT));
1634
1635 dce_v8_0_audio_write_speaker_allocation(encoder);
1636
1637
1638 WREG32(mmAFMT_AUDIO_PACKET_CONTROL2 + offset,
1639 (0xff << AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_CHANNEL_ENABLE__SHIFT));
1640
1641 dce_v8_0_afmt_audio_select_pin(encoder);
1642 dce_v8_0_audio_write_sad_regs(encoder);
1643 dce_v8_0_audio_write_latency_fields(encoder, mode);
1644
1645 err = drm_hdmi_avi_infoframe_from_display_mode(&frame, connector, mode);
1646 if (err < 0) {
1647 DRM_ERROR("failed to setup AVI infoframe: %zd\n", err);
1648 return;
1649 }
1650
1651 err = hdmi_avi_infoframe_pack(&frame, buffer, sizeof(buffer));
1652 if (err < 0) {
1653 DRM_ERROR("failed to pack AVI infoframe: %zd\n", err);
1654 return;
1655 }
1656
1657 dce_v8_0_afmt_update_avi_infoframe(encoder, buffer, sizeof(buffer));
1658
1659 WREG32_OR(mmHDMI_INFOFRAME_CONTROL0 + offset,
1660 HDMI_INFOFRAME_CONTROL0__HDMI_AVI_INFO_SEND_MASK |
1661 HDMI_INFOFRAME_CONTROL0__HDMI_AVI_INFO_CONT_MASK);
1662
1663 WREG32_P(mmHDMI_INFOFRAME_CONTROL1 + offset,
1664 (2 << HDMI_INFOFRAME_CONTROL1__HDMI_AVI_INFO_LINE__SHIFT),
1665 ~HDMI_INFOFRAME_CONTROL1__HDMI_AVI_INFO_LINE_MASK);
1666
1667 WREG32_OR(mmAFMT_AUDIO_PACKET_CONTROL + offset,
1668 AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_SAMPLE_SEND_MASK);
1669
1670 WREG32(mmAFMT_RAMP_CONTROL0 + offset, 0x00FFFFFF);
1671 WREG32(mmAFMT_RAMP_CONTROL1 + offset, 0x007FFFFF);
1672 WREG32(mmAFMT_RAMP_CONTROL2 + offset, 0x00000001);
1673 WREG32(mmAFMT_RAMP_CONTROL3 + offset, 0x00000001);
1674
1675
1676 dce_v8_0_audio_enable(adev, dig->afmt->pin, true);
1677}
1678
1679static void dce_v8_0_afmt_enable(struct drm_encoder *encoder, bool enable)
1680{
1681 struct drm_device *dev = encoder->dev;
1682 struct amdgpu_device *adev = drm_to_adev(dev);
1683 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1684 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1685
1686 if (!dig || !dig->afmt)
1687 return;
1688
1689
1690 if (enable && dig->afmt->enabled)
1691 return;
1692 if (!enable && !dig->afmt->enabled)
1693 return;
1694
1695 if (!enable && dig->afmt->pin) {
1696 dce_v8_0_audio_enable(adev, dig->afmt->pin, false);
1697 dig->afmt->pin = NULL;
1698 }
1699
1700 dig->afmt->enabled = enable;
1701
1702 DRM_DEBUG("%sabling AFMT interface @ 0x%04X for encoder 0x%x\n",
1703 enable ? "En" : "Dis", dig->afmt->offset, amdgpu_encoder->encoder_id);
1704}
1705
1706static int dce_v8_0_afmt_init(struct amdgpu_device *adev)
1707{
1708 int i;
1709
1710 for (i = 0; i < adev->mode_info.num_dig; i++)
1711 adev->mode_info.afmt[i] = NULL;
1712
1713
1714 for (i = 0; i < adev->mode_info.num_dig; i++) {
1715 adev->mode_info.afmt[i] = kzalloc(sizeof(struct amdgpu_afmt), GFP_KERNEL);
1716 if (adev->mode_info.afmt[i]) {
1717 adev->mode_info.afmt[i]->offset = dig_offsets[i];
1718 adev->mode_info.afmt[i]->id = i;
1719 } else {
1720 int j;
1721 for (j = 0; j < i; j++) {
1722 kfree(adev->mode_info.afmt[j]);
1723 adev->mode_info.afmt[j] = NULL;
1724 }
1725 return -ENOMEM;
1726 }
1727 }
1728 return 0;
1729}
1730
1731static void dce_v8_0_afmt_fini(struct amdgpu_device *adev)
1732{
1733 int i;
1734
1735 for (i = 0; i < adev->mode_info.num_dig; i++) {
1736 kfree(adev->mode_info.afmt[i]);
1737 adev->mode_info.afmt[i] = NULL;
1738 }
1739}
1740
1741static const u32 vga_control_regs[6] =
1742{
1743 mmD1VGA_CONTROL,
1744 mmD2VGA_CONTROL,
1745 mmD3VGA_CONTROL,
1746 mmD4VGA_CONTROL,
1747 mmD5VGA_CONTROL,
1748 mmD6VGA_CONTROL,
1749};
1750
1751static void dce_v8_0_vga_enable(struct drm_crtc *crtc, bool enable)
1752{
1753 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1754 struct drm_device *dev = crtc->dev;
1755 struct amdgpu_device *adev = drm_to_adev(dev);
1756 u32 vga_control;
1757
1758 vga_control = RREG32(vga_control_regs[amdgpu_crtc->crtc_id]) & ~1;
1759 if (enable)
1760 WREG32(vga_control_regs[amdgpu_crtc->crtc_id], vga_control | 1);
1761 else
1762 WREG32(vga_control_regs[amdgpu_crtc->crtc_id], vga_control);
1763}
1764
1765static void dce_v8_0_grph_enable(struct drm_crtc *crtc, bool enable)
1766{
1767 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1768 struct drm_device *dev = crtc->dev;
1769 struct amdgpu_device *adev = drm_to_adev(dev);
1770
1771 if (enable)
1772 WREG32(mmGRPH_ENABLE + amdgpu_crtc->crtc_offset, 1);
1773 else
1774 WREG32(mmGRPH_ENABLE + amdgpu_crtc->crtc_offset, 0);
1775}
1776
1777static int dce_v8_0_crtc_do_set_base(struct drm_crtc *crtc,
1778 struct drm_framebuffer *fb,
1779 int x, int y, int atomic)
1780{
1781 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1782 struct drm_device *dev = crtc->dev;
1783 struct amdgpu_device *adev = drm_to_adev(dev);
1784 struct drm_framebuffer *target_fb;
1785 struct drm_gem_object *obj;
1786 struct amdgpu_bo *abo;
1787 uint64_t fb_location, tiling_flags;
1788 uint32_t fb_format, fb_pitch_pixels;
1789 u32 fb_swap = (GRPH_ENDIAN_NONE << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
1790 u32 pipe_config;
1791 u32 viewport_w, viewport_h;
1792 int r;
1793 bool bypass_lut = false;
1794 struct drm_format_name_buf format_name;
1795
1796
1797 if (!atomic && !crtc->primary->fb) {
1798 DRM_DEBUG_KMS("No FB bound\n");
1799 return 0;
1800 }
1801
1802 if (atomic)
1803 target_fb = fb;
1804 else
1805 target_fb = crtc->primary->fb;
1806
1807
1808
1809
1810 obj = target_fb->obj[0];
1811 abo = gem_to_amdgpu_bo(obj);
1812 r = amdgpu_bo_reserve(abo, false);
1813 if (unlikely(r != 0))
1814 return r;
1815
1816 if (!atomic) {
1817 r = amdgpu_bo_pin(abo, AMDGPU_GEM_DOMAIN_VRAM);
1818 if (unlikely(r != 0)) {
1819 amdgpu_bo_unreserve(abo);
1820 return -EINVAL;
1821 }
1822 }
1823 fb_location = amdgpu_bo_gpu_offset(abo);
1824
1825 amdgpu_bo_get_tiling_flags(abo, &tiling_flags);
1826 amdgpu_bo_unreserve(abo);
1827
1828 pipe_config = AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
1829
1830 switch (target_fb->format->format) {
1831 case DRM_FORMAT_C8:
1832 fb_format = ((GRPH_DEPTH_8BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
1833 (GRPH_FORMAT_INDEXED << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
1834 break;
1835 case DRM_FORMAT_XRGB4444:
1836 case DRM_FORMAT_ARGB4444:
1837 fb_format = ((GRPH_DEPTH_16BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
1838 (GRPH_FORMAT_ARGB4444 << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
1839#ifdef __BIG_ENDIAN
1840 fb_swap = (GRPH_ENDIAN_8IN16 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
1841#endif
1842 break;
1843 case DRM_FORMAT_XRGB1555:
1844 case DRM_FORMAT_ARGB1555:
1845 fb_format = ((GRPH_DEPTH_16BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
1846 (GRPH_FORMAT_ARGB1555 << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
1847#ifdef __BIG_ENDIAN
1848 fb_swap = (GRPH_ENDIAN_8IN16 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
1849#endif
1850 break;
1851 case DRM_FORMAT_BGRX5551:
1852 case DRM_FORMAT_BGRA5551:
1853 fb_format = ((GRPH_DEPTH_16BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
1854 (GRPH_FORMAT_BGRA5551 << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
1855#ifdef __BIG_ENDIAN
1856 fb_swap = (GRPH_ENDIAN_8IN16 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
1857#endif
1858 break;
1859 case DRM_FORMAT_RGB565:
1860 fb_format = ((GRPH_DEPTH_16BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
1861 (GRPH_FORMAT_ARGB565 << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
1862#ifdef __BIG_ENDIAN
1863 fb_swap = (GRPH_ENDIAN_8IN16 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
1864#endif
1865 break;
1866 case DRM_FORMAT_XRGB8888:
1867 case DRM_FORMAT_ARGB8888:
1868 fb_format = ((GRPH_DEPTH_32BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
1869 (GRPH_FORMAT_ARGB8888 << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
1870#ifdef __BIG_ENDIAN
1871 fb_swap = (GRPH_ENDIAN_8IN32 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
1872#endif
1873 break;
1874 case DRM_FORMAT_XRGB2101010:
1875 case DRM_FORMAT_ARGB2101010:
1876 fb_format = ((GRPH_DEPTH_32BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
1877 (GRPH_FORMAT_ARGB2101010 << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
1878#ifdef __BIG_ENDIAN
1879 fb_swap = (GRPH_ENDIAN_8IN32 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
1880#endif
1881
1882 bypass_lut = true;
1883 break;
1884 case DRM_FORMAT_BGRX1010102:
1885 case DRM_FORMAT_BGRA1010102:
1886 fb_format = ((GRPH_DEPTH_32BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
1887 (GRPH_FORMAT_BGRA1010102 << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
1888#ifdef __BIG_ENDIAN
1889 fb_swap = (GRPH_ENDIAN_8IN32 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
1890#endif
1891
1892 bypass_lut = true;
1893 break;
1894 case DRM_FORMAT_XBGR8888:
1895 case DRM_FORMAT_ABGR8888:
1896 fb_format = ((GRPH_DEPTH_32BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
1897 (GRPH_FORMAT_ARGB8888 << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
1898 fb_swap = ((GRPH_RED_SEL_B << GRPH_SWAP_CNTL__GRPH_RED_CROSSBAR__SHIFT) |
1899 (GRPH_BLUE_SEL_R << GRPH_SWAP_CNTL__GRPH_BLUE_CROSSBAR__SHIFT));
1900#ifdef __BIG_ENDIAN
1901 fb_swap |= (GRPH_ENDIAN_8IN32 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
1902#endif
1903 break;
1904 default:
1905 DRM_ERROR("Unsupported screen format %s\n",
1906 drm_get_format_name(target_fb->format->format, &format_name));
1907 return -EINVAL;
1908 }
1909
1910 if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == ARRAY_2D_TILED_THIN1) {
1911 unsigned bankw, bankh, mtaspect, tile_split, num_banks;
1912
1913 bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
1914 bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
1915 mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
1916 tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
1917 num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
1918
1919 fb_format |= (num_banks << GRPH_CONTROL__GRPH_NUM_BANKS__SHIFT);
1920 fb_format |= (GRPH_ARRAY_2D_TILED_THIN1 << GRPH_CONTROL__GRPH_ARRAY_MODE__SHIFT);
1921 fb_format |= (tile_split << GRPH_CONTROL__GRPH_TILE_SPLIT__SHIFT);
1922 fb_format |= (bankw << GRPH_CONTROL__GRPH_BANK_WIDTH__SHIFT);
1923 fb_format |= (bankh << GRPH_CONTROL__GRPH_BANK_HEIGHT__SHIFT);
1924 fb_format |= (mtaspect << GRPH_CONTROL__GRPH_MACRO_TILE_ASPECT__SHIFT);
1925 fb_format |= (DISPLAY_MICRO_TILING << GRPH_CONTROL__GRPH_MICRO_TILE_MODE__SHIFT);
1926 } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == ARRAY_1D_TILED_THIN1) {
1927 fb_format |= (GRPH_ARRAY_1D_TILED_THIN1 << GRPH_CONTROL__GRPH_ARRAY_MODE__SHIFT);
1928 }
1929
1930 fb_format |= (pipe_config << GRPH_CONTROL__GRPH_PIPE_CONFIG__SHIFT);
1931
1932 dce_v8_0_vga_enable(crtc, false);
1933
1934
1935
1936
1937 WREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset, 0);
1938
1939 WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
1940 upper_32_bits(fb_location));
1941 WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
1942 upper_32_bits(fb_location));
1943 WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
1944 (u32)fb_location & GRPH_PRIMARY_SURFACE_ADDRESS__GRPH_PRIMARY_SURFACE_ADDRESS_MASK);
1945 WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
1946 (u32) fb_location & GRPH_SECONDARY_SURFACE_ADDRESS__GRPH_SECONDARY_SURFACE_ADDRESS_MASK);
1947 WREG32(mmGRPH_CONTROL + amdgpu_crtc->crtc_offset, fb_format);
1948 WREG32(mmGRPH_SWAP_CNTL + amdgpu_crtc->crtc_offset, fb_swap);
1949
1950
1951
1952
1953
1954
1955 WREG32_P(mmGRPH_LUT_10BIT_BYPASS_CONTROL + amdgpu_crtc->crtc_offset,
1956 (bypass_lut ? LUT_10BIT_BYPASS_EN : 0),
1957 ~LUT_10BIT_BYPASS_EN);
1958
1959 if (bypass_lut)
1960 DRM_DEBUG_KMS("Bypassing hardware LUT due to 10 bit fb scanout.\n");
1961
1962 WREG32(mmGRPH_SURFACE_OFFSET_X + amdgpu_crtc->crtc_offset, 0);
1963 WREG32(mmGRPH_SURFACE_OFFSET_Y + amdgpu_crtc->crtc_offset, 0);
1964 WREG32(mmGRPH_X_START + amdgpu_crtc->crtc_offset, 0);
1965 WREG32(mmGRPH_Y_START + amdgpu_crtc->crtc_offset, 0);
1966 WREG32(mmGRPH_X_END + amdgpu_crtc->crtc_offset, target_fb->width);
1967 WREG32(mmGRPH_Y_END + amdgpu_crtc->crtc_offset, target_fb->height);
1968
1969 fb_pitch_pixels = target_fb->pitches[0] / target_fb->format->cpp[0];
1970 WREG32(mmGRPH_PITCH + amdgpu_crtc->crtc_offset, fb_pitch_pixels);
1971
1972 dce_v8_0_grph_enable(crtc, true);
1973
1974 WREG32(mmLB_DESKTOP_HEIGHT + amdgpu_crtc->crtc_offset,
1975 target_fb->height);
1976
1977 x &= ~3;
1978 y &= ~1;
1979 WREG32(mmVIEWPORT_START + amdgpu_crtc->crtc_offset,
1980 (x << 16) | y);
1981 viewport_w = crtc->mode.hdisplay;
1982 viewport_h = (crtc->mode.vdisplay + 1) & ~1;
1983 WREG32(mmVIEWPORT_SIZE + amdgpu_crtc->crtc_offset,
1984 (viewport_w << 16) | viewport_h);
1985
1986
1987 WREG32(mmMASTER_UPDATE_MODE + amdgpu_crtc->crtc_offset, 0);
1988
1989 if (!atomic && fb && fb != crtc->primary->fb) {
1990 abo = gem_to_amdgpu_bo(fb->obj[0]);
1991 r = amdgpu_bo_reserve(abo, true);
1992 if (unlikely(r != 0))
1993 return r;
1994 amdgpu_bo_unpin(abo);
1995 amdgpu_bo_unreserve(abo);
1996 }
1997
1998
1999 dce_v8_0_bandwidth_update(adev);
2000
2001 return 0;
2002}
2003
2004static void dce_v8_0_set_interleave(struct drm_crtc *crtc,
2005 struct drm_display_mode *mode)
2006{
2007 struct drm_device *dev = crtc->dev;
2008 struct amdgpu_device *adev = drm_to_adev(dev);
2009 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2010
2011 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
2012 WREG32(mmLB_DATA_FORMAT + amdgpu_crtc->crtc_offset,
2013 LB_DATA_FORMAT__INTERLEAVE_EN__SHIFT);
2014 else
2015 WREG32(mmLB_DATA_FORMAT + amdgpu_crtc->crtc_offset, 0);
2016}
2017
2018static void dce_v8_0_crtc_load_lut(struct drm_crtc *crtc)
2019{
2020 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2021 struct drm_device *dev = crtc->dev;
2022 struct amdgpu_device *adev = drm_to_adev(dev);
2023 u16 *r, *g, *b;
2024 int i;
2025
2026 DRM_DEBUG_KMS("%d\n", amdgpu_crtc->crtc_id);
2027
2028 WREG32(mmINPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset,
2029 ((INPUT_CSC_BYPASS << INPUT_CSC_CONTROL__INPUT_CSC_GRPH_MODE__SHIFT) |
2030 (INPUT_CSC_BYPASS << INPUT_CSC_CONTROL__INPUT_CSC_OVL_MODE__SHIFT)));
2031 WREG32(mmPRESCALE_GRPH_CONTROL + amdgpu_crtc->crtc_offset,
2032 PRESCALE_GRPH_CONTROL__GRPH_PRESCALE_BYPASS_MASK);
2033 WREG32(mmPRESCALE_OVL_CONTROL + amdgpu_crtc->crtc_offset,
2034 PRESCALE_OVL_CONTROL__OVL_PRESCALE_BYPASS_MASK);
2035 WREG32(mmINPUT_GAMMA_CONTROL + amdgpu_crtc->crtc_offset,
2036 ((INPUT_GAMMA_USE_LUT << INPUT_GAMMA_CONTROL__GRPH_INPUT_GAMMA_MODE__SHIFT) |
2037 (INPUT_GAMMA_USE_LUT << INPUT_GAMMA_CONTROL__OVL_INPUT_GAMMA_MODE__SHIFT)));
2038
2039 WREG32(mmDC_LUT_CONTROL + amdgpu_crtc->crtc_offset, 0);
2040
2041 WREG32(mmDC_LUT_BLACK_OFFSET_BLUE + amdgpu_crtc->crtc_offset, 0);
2042 WREG32(mmDC_LUT_BLACK_OFFSET_GREEN + amdgpu_crtc->crtc_offset, 0);
2043 WREG32(mmDC_LUT_BLACK_OFFSET_RED + amdgpu_crtc->crtc_offset, 0);
2044
2045 WREG32(mmDC_LUT_WHITE_OFFSET_BLUE + amdgpu_crtc->crtc_offset, 0xffff);
2046 WREG32(mmDC_LUT_WHITE_OFFSET_GREEN + amdgpu_crtc->crtc_offset, 0xffff);
2047 WREG32(mmDC_LUT_WHITE_OFFSET_RED + amdgpu_crtc->crtc_offset, 0xffff);
2048
2049 WREG32(mmDC_LUT_RW_MODE + amdgpu_crtc->crtc_offset, 0);
2050 WREG32(mmDC_LUT_WRITE_EN_MASK + amdgpu_crtc->crtc_offset, 0x00000007);
2051
2052 WREG32(mmDC_LUT_RW_INDEX + amdgpu_crtc->crtc_offset, 0);
2053 r = crtc->gamma_store;
2054 g = r + crtc->gamma_size;
2055 b = g + crtc->gamma_size;
2056 for (i = 0; i < 256; i++) {
2057 WREG32(mmDC_LUT_30_COLOR + amdgpu_crtc->crtc_offset,
2058 ((*r++ & 0xffc0) << 14) |
2059 ((*g++ & 0xffc0) << 4) |
2060 (*b++ >> 6));
2061 }
2062
2063 WREG32(mmDEGAMMA_CONTROL + amdgpu_crtc->crtc_offset,
2064 ((DEGAMMA_BYPASS << DEGAMMA_CONTROL__GRPH_DEGAMMA_MODE__SHIFT) |
2065 (DEGAMMA_BYPASS << DEGAMMA_CONTROL__OVL_DEGAMMA_MODE__SHIFT) |
2066 (DEGAMMA_BYPASS << DEGAMMA_CONTROL__CURSOR_DEGAMMA_MODE__SHIFT)));
2067 WREG32(mmGAMUT_REMAP_CONTROL + amdgpu_crtc->crtc_offset,
2068 ((GAMUT_REMAP_BYPASS << GAMUT_REMAP_CONTROL__GRPH_GAMUT_REMAP_MODE__SHIFT) |
2069 (GAMUT_REMAP_BYPASS << GAMUT_REMAP_CONTROL__OVL_GAMUT_REMAP_MODE__SHIFT)));
2070 WREG32(mmREGAMMA_CONTROL + amdgpu_crtc->crtc_offset,
2071 ((REGAMMA_BYPASS << REGAMMA_CONTROL__GRPH_REGAMMA_MODE__SHIFT) |
2072 (REGAMMA_BYPASS << REGAMMA_CONTROL__OVL_REGAMMA_MODE__SHIFT)));
2073 WREG32(mmOUTPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset,
2074 ((OUTPUT_CSC_BYPASS << OUTPUT_CSC_CONTROL__OUTPUT_CSC_GRPH_MODE__SHIFT) |
2075 (OUTPUT_CSC_BYPASS << OUTPUT_CSC_CONTROL__OUTPUT_CSC_OVL_MODE__SHIFT)));
2076
2077 WREG32(0x1a50 + amdgpu_crtc->crtc_offset, 0);
2078
2079
2080
2081 WREG32(mmALPHA_CONTROL + amdgpu_crtc->crtc_offset,
2082 ALPHA_CONTROL__CURSOR_ALPHA_BLND_ENA_MASK);
2083}
2084
2085static int dce_v8_0_pick_dig_encoder(struct drm_encoder *encoder)
2086{
2087 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
2088 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
2089
2090 switch (amdgpu_encoder->encoder_id) {
2091 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
2092 if (dig->linkb)
2093 return 1;
2094 else
2095 return 0;
2096 break;
2097 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
2098 if (dig->linkb)
2099 return 3;
2100 else
2101 return 2;
2102 break;
2103 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
2104 if (dig->linkb)
2105 return 5;
2106 else
2107 return 4;
2108 break;
2109 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
2110 return 6;
2111 break;
2112 default:
2113 DRM_ERROR("invalid encoder_id: 0x%x\n", amdgpu_encoder->encoder_id);
2114 return 0;
2115 }
2116}
2117
2118
2119
2120
2121
2122
2123
2124
2125
2126
2127
2128
2129
2130
2131
2132
2133
2134
2135
2136
2137
2138
2139
2140static u32 dce_v8_0_pick_pll(struct drm_crtc *crtc)
2141{
2142 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2143 struct drm_device *dev = crtc->dev;
2144 struct amdgpu_device *adev = drm_to_adev(dev);
2145 u32 pll_in_use;
2146 int pll;
2147
2148 if (ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(amdgpu_crtc->encoder))) {
2149 if (adev->clock.dp_extclk)
2150
2151 return ATOM_PPLL_INVALID;
2152 else {
2153
2154 pll = amdgpu_pll_get_shared_dp_ppll(crtc);
2155 if (pll != ATOM_PPLL_INVALID)
2156 return pll;
2157 }
2158 } else {
2159
2160 pll = amdgpu_pll_get_shared_nondp_ppll(crtc);
2161 if (pll != ATOM_PPLL_INVALID)
2162 return pll;
2163 }
2164
2165 if ((adev->asic_type == CHIP_KABINI) ||
2166 (adev->asic_type == CHIP_MULLINS)) {
2167
2168 pll_in_use = amdgpu_pll_get_use_mask(crtc);
2169 if (!(pll_in_use & (1 << ATOM_PPLL2)))
2170 return ATOM_PPLL2;
2171 if (!(pll_in_use & (1 << ATOM_PPLL1)))
2172 return ATOM_PPLL1;
2173 DRM_ERROR("unable to allocate a PPLL\n");
2174 return ATOM_PPLL_INVALID;
2175 } else {
2176
2177 pll_in_use = amdgpu_pll_get_use_mask(crtc);
2178 if (!(pll_in_use & (1 << ATOM_PPLL2)))
2179 return ATOM_PPLL2;
2180 if (!(pll_in_use & (1 << ATOM_PPLL1)))
2181 return ATOM_PPLL1;
2182 if (!(pll_in_use & (1 << ATOM_PPLL0)))
2183 return ATOM_PPLL0;
2184 DRM_ERROR("unable to allocate a PPLL\n");
2185 return ATOM_PPLL_INVALID;
2186 }
2187 return ATOM_PPLL_INVALID;
2188}
2189
2190static void dce_v8_0_lock_cursor(struct drm_crtc *crtc, bool lock)
2191{
2192 struct amdgpu_device *adev = drm_to_adev(crtc->dev);
2193 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2194 uint32_t cur_lock;
2195
2196 cur_lock = RREG32(mmCUR_UPDATE + amdgpu_crtc->crtc_offset);
2197 if (lock)
2198 cur_lock |= CUR_UPDATE__CURSOR_UPDATE_LOCK_MASK;
2199 else
2200 cur_lock &= ~CUR_UPDATE__CURSOR_UPDATE_LOCK_MASK;
2201 WREG32(mmCUR_UPDATE + amdgpu_crtc->crtc_offset, cur_lock);
2202}
2203
2204static void dce_v8_0_hide_cursor(struct drm_crtc *crtc)
2205{
2206 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2207 struct amdgpu_device *adev = drm_to_adev(crtc->dev);
2208
2209 WREG32(mmCUR_CONTROL + amdgpu_crtc->crtc_offset,
2210 (CURSOR_24_8_PRE_MULT << CUR_CONTROL__CURSOR_MODE__SHIFT) |
2211 (CURSOR_URGENT_1_2 << CUR_CONTROL__CURSOR_URGENT_CONTROL__SHIFT));
2212}
2213
2214static void dce_v8_0_show_cursor(struct drm_crtc *crtc)
2215{
2216 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2217 struct amdgpu_device *adev = drm_to_adev(crtc->dev);
2218
2219 WREG32(mmCUR_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
2220 upper_32_bits(amdgpu_crtc->cursor_addr));
2221 WREG32(mmCUR_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
2222 lower_32_bits(amdgpu_crtc->cursor_addr));
2223
2224 WREG32(mmCUR_CONTROL + amdgpu_crtc->crtc_offset,
2225 CUR_CONTROL__CURSOR_EN_MASK |
2226 (CURSOR_24_8_PRE_MULT << CUR_CONTROL__CURSOR_MODE__SHIFT) |
2227 (CURSOR_URGENT_1_2 << CUR_CONTROL__CURSOR_URGENT_CONTROL__SHIFT));
2228}
2229
2230static int dce_v8_0_cursor_move_locked(struct drm_crtc *crtc,
2231 int x, int y)
2232{
2233 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2234 struct amdgpu_device *adev = drm_to_adev(crtc->dev);
2235 int xorigin = 0, yorigin = 0;
2236
2237 amdgpu_crtc->cursor_x = x;
2238 amdgpu_crtc->cursor_y = y;
2239
2240
2241 x += crtc->x;
2242 y += crtc->y;
2243 DRM_DEBUG("x %d y %d c->x %d c->y %d\n", x, y, crtc->x, crtc->y);
2244
2245 if (x < 0) {
2246 xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
2247 x = 0;
2248 }
2249 if (y < 0) {
2250 yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
2251 y = 0;
2252 }
2253
2254 WREG32(mmCUR_POSITION + amdgpu_crtc->crtc_offset, (x << 16) | y);
2255 WREG32(mmCUR_HOT_SPOT + amdgpu_crtc->crtc_offset, (xorigin << 16) | yorigin);
2256 WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
2257 ((amdgpu_crtc->cursor_width - 1) << 16) | (amdgpu_crtc->cursor_height - 1));
2258
2259 return 0;
2260}
2261
2262static int dce_v8_0_crtc_cursor_move(struct drm_crtc *crtc,
2263 int x, int y)
2264{
2265 int ret;
2266
2267 dce_v8_0_lock_cursor(crtc, true);
2268 ret = dce_v8_0_cursor_move_locked(crtc, x, y);
2269 dce_v8_0_lock_cursor(crtc, false);
2270
2271 return ret;
2272}
2273
2274static int dce_v8_0_crtc_cursor_set2(struct drm_crtc *crtc,
2275 struct drm_file *file_priv,
2276 uint32_t handle,
2277 uint32_t width,
2278 uint32_t height,
2279 int32_t hot_x,
2280 int32_t hot_y)
2281{
2282 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2283 struct drm_gem_object *obj;
2284 struct amdgpu_bo *aobj;
2285 int ret;
2286
2287 if (!handle) {
2288
2289 dce_v8_0_hide_cursor(crtc);
2290 obj = NULL;
2291 goto unpin;
2292 }
2293
2294 if ((width > amdgpu_crtc->max_cursor_width) ||
2295 (height > amdgpu_crtc->max_cursor_height)) {
2296 DRM_ERROR("bad cursor width or height %d x %d\n", width, height);
2297 return -EINVAL;
2298 }
2299
2300 obj = drm_gem_object_lookup(file_priv, handle);
2301 if (!obj) {
2302 DRM_ERROR("Cannot find cursor object %x for crtc %d\n", handle, amdgpu_crtc->crtc_id);
2303 return -ENOENT;
2304 }
2305
2306 aobj = gem_to_amdgpu_bo(obj);
2307 ret = amdgpu_bo_reserve(aobj, false);
2308 if (ret != 0) {
2309 drm_gem_object_put(obj);
2310 return ret;
2311 }
2312
2313 ret = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM);
2314 amdgpu_bo_unreserve(aobj);
2315 if (ret) {
2316 DRM_ERROR("Failed to pin new cursor BO (%d)\n", ret);
2317 drm_gem_object_put(obj);
2318 return ret;
2319 }
2320 amdgpu_crtc->cursor_addr = amdgpu_bo_gpu_offset(aobj);
2321
2322 dce_v8_0_lock_cursor(crtc, true);
2323
2324 if (width != amdgpu_crtc->cursor_width ||
2325 height != amdgpu_crtc->cursor_height ||
2326 hot_x != amdgpu_crtc->cursor_hot_x ||
2327 hot_y != amdgpu_crtc->cursor_hot_y) {
2328 int x, y;
2329
2330 x = amdgpu_crtc->cursor_x + amdgpu_crtc->cursor_hot_x - hot_x;
2331 y = amdgpu_crtc->cursor_y + amdgpu_crtc->cursor_hot_y - hot_y;
2332
2333 dce_v8_0_cursor_move_locked(crtc, x, y);
2334
2335 amdgpu_crtc->cursor_width = width;
2336 amdgpu_crtc->cursor_height = height;
2337 amdgpu_crtc->cursor_hot_x = hot_x;
2338 amdgpu_crtc->cursor_hot_y = hot_y;
2339 }
2340
2341 dce_v8_0_show_cursor(crtc);
2342 dce_v8_0_lock_cursor(crtc, false);
2343
2344unpin:
2345 if (amdgpu_crtc->cursor_bo) {
2346 struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
2347 ret = amdgpu_bo_reserve(aobj, true);
2348 if (likely(ret == 0)) {
2349 amdgpu_bo_unpin(aobj);
2350 amdgpu_bo_unreserve(aobj);
2351 }
2352 drm_gem_object_put(amdgpu_crtc->cursor_bo);
2353 }
2354
2355 amdgpu_crtc->cursor_bo = obj;
2356 return 0;
2357}
2358
2359static void dce_v8_0_cursor_reset(struct drm_crtc *crtc)
2360{
2361 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2362
2363 if (amdgpu_crtc->cursor_bo) {
2364 dce_v8_0_lock_cursor(crtc, true);
2365
2366 dce_v8_0_cursor_move_locked(crtc, amdgpu_crtc->cursor_x,
2367 amdgpu_crtc->cursor_y);
2368
2369 dce_v8_0_show_cursor(crtc);
2370
2371 dce_v8_0_lock_cursor(crtc, false);
2372 }
2373}
2374
2375static int dce_v8_0_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
2376 u16 *blue, uint32_t size,
2377 struct drm_modeset_acquire_ctx *ctx)
2378{
2379 dce_v8_0_crtc_load_lut(crtc);
2380
2381 return 0;
2382}
2383
2384static void dce_v8_0_crtc_destroy(struct drm_crtc *crtc)
2385{
2386 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2387
2388 drm_crtc_cleanup(crtc);
2389 kfree(amdgpu_crtc);
2390}
2391
2392static const struct drm_crtc_funcs dce_v8_0_crtc_funcs = {
2393 .cursor_set2 = dce_v8_0_crtc_cursor_set2,
2394 .cursor_move = dce_v8_0_crtc_cursor_move,
2395 .gamma_set = dce_v8_0_crtc_gamma_set,
2396 .set_config = amdgpu_display_crtc_set_config,
2397 .destroy = dce_v8_0_crtc_destroy,
2398 .page_flip_target = amdgpu_display_crtc_page_flip_target,
2399 .get_vblank_counter = amdgpu_get_vblank_counter_kms,
2400 .enable_vblank = amdgpu_enable_vblank_kms,
2401 .disable_vblank = amdgpu_disable_vblank_kms,
2402 .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
2403};
2404
2405static void dce_v8_0_crtc_dpms(struct drm_crtc *crtc, int mode)
2406{
2407 struct drm_device *dev = crtc->dev;
2408 struct amdgpu_device *adev = drm_to_adev(dev);
2409 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2410 unsigned type;
2411
2412 switch (mode) {
2413 case DRM_MODE_DPMS_ON:
2414 amdgpu_crtc->enabled = true;
2415 amdgpu_atombios_crtc_enable(crtc, ATOM_ENABLE);
2416 dce_v8_0_vga_enable(crtc, true);
2417 amdgpu_atombios_crtc_blank(crtc, ATOM_DISABLE);
2418 dce_v8_0_vga_enable(crtc, false);
2419
2420 type = amdgpu_display_crtc_idx_to_irq_type(adev,
2421 amdgpu_crtc->crtc_id);
2422 amdgpu_irq_update(adev, &adev->crtc_irq, type);
2423 amdgpu_irq_update(adev, &adev->pageflip_irq, type);
2424 drm_crtc_vblank_on(crtc);
2425 dce_v8_0_crtc_load_lut(crtc);
2426 break;
2427 case DRM_MODE_DPMS_STANDBY:
2428 case DRM_MODE_DPMS_SUSPEND:
2429 case DRM_MODE_DPMS_OFF:
2430 drm_crtc_vblank_off(crtc);
2431 if (amdgpu_crtc->enabled) {
2432 dce_v8_0_vga_enable(crtc, true);
2433 amdgpu_atombios_crtc_blank(crtc, ATOM_ENABLE);
2434 dce_v8_0_vga_enable(crtc, false);
2435 }
2436 amdgpu_atombios_crtc_enable(crtc, ATOM_DISABLE);
2437 amdgpu_crtc->enabled = false;
2438 break;
2439 }
2440
2441 amdgpu_pm_compute_clocks(adev);
2442}
2443
2444static void dce_v8_0_crtc_prepare(struct drm_crtc *crtc)
2445{
2446
2447 amdgpu_atombios_crtc_powergate(crtc, ATOM_DISABLE);
2448 amdgpu_atombios_crtc_lock(crtc, ATOM_ENABLE);
2449 dce_v8_0_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
2450}
2451
2452static void dce_v8_0_crtc_commit(struct drm_crtc *crtc)
2453{
2454 dce_v8_0_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
2455 amdgpu_atombios_crtc_lock(crtc, ATOM_DISABLE);
2456}
2457
2458static void dce_v8_0_crtc_disable(struct drm_crtc *crtc)
2459{
2460 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2461 struct drm_device *dev = crtc->dev;
2462 struct amdgpu_device *adev = drm_to_adev(dev);
2463 struct amdgpu_atom_ss ss;
2464 int i;
2465
2466 dce_v8_0_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
2467 if (crtc->primary->fb) {
2468 int r;
2469 struct amdgpu_bo *abo;
2470
2471 abo = gem_to_amdgpu_bo(crtc->primary->fb->obj[0]);
2472 r = amdgpu_bo_reserve(abo, true);
2473 if (unlikely(r))
2474 DRM_ERROR("failed to reserve abo before unpin\n");
2475 else {
2476 amdgpu_bo_unpin(abo);
2477 amdgpu_bo_unreserve(abo);
2478 }
2479 }
2480
2481 dce_v8_0_grph_enable(crtc, false);
2482
2483 amdgpu_atombios_crtc_powergate(crtc, ATOM_ENABLE);
2484
2485 for (i = 0; i < adev->mode_info.num_crtc; i++) {
2486 if (adev->mode_info.crtcs[i] &&
2487 adev->mode_info.crtcs[i]->enabled &&
2488 i != amdgpu_crtc->crtc_id &&
2489 amdgpu_crtc->pll_id == adev->mode_info.crtcs[i]->pll_id) {
2490
2491
2492
2493 goto done;
2494 }
2495 }
2496
2497 switch (amdgpu_crtc->pll_id) {
2498 case ATOM_PPLL1:
2499 case ATOM_PPLL2:
2500
2501 amdgpu_atombios_crtc_program_pll(crtc, amdgpu_crtc->crtc_id, amdgpu_crtc->pll_id,
2502 0, 0, ATOM_DISABLE, 0, 0, 0, 0, 0, false, &ss);
2503 break;
2504 case ATOM_PPLL0:
2505
2506 if ((adev->asic_type == CHIP_KAVERI) ||
2507 (adev->asic_type == CHIP_BONAIRE) ||
2508 (adev->asic_type == CHIP_HAWAII))
2509 amdgpu_atombios_crtc_program_pll(crtc, amdgpu_crtc->crtc_id, amdgpu_crtc->pll_id,
2510 0, 0, ATOM_DISABLE, 0, 0, 0, 0, 0, false, &ss);
2511 break;
2512 default:
2513 break;
2514 }
2515done:
2516 amdgpu_crtc->pll_id = ATOM_PPLL_INVALID;
2517 amdgpu_crtc->adjusted_clock = 0;
2518 amdgpu_crtc->encoder = NULL;
2519 amdgpu_crtc->connector = NULL;
2520}
2521
2522static int dce_v8_0_crtc_mode_set(struct drm_crtc *crtc,
2523 struct drm_display_mode *mode,
2524 struct drm_display_mode *adjusted_mode,
2525 int x, int y, struct drm_framebuffer *old_fb)
2526{
2527 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2528
2529 if (!amdgpu_crtc->adjusted_clock)
2530 return -EINVAL;
2531
2532 amdgpu_atombios_crtc_set_pll(crtc, adjusted_mode);
2533 amdgpu_atombios_crtc_set_dtd_timing(crtc, adjusted_mode);
2534 dce_v8_0_crtc_do_set_base(crtc, old_fb, x, y, 0);
2535 amdgpu_atombios_crtc_overscan_setup(crtc, mode, adjusted_mode);
2536 amdgpu_atombios_crtc_scaler_setup(crtc);
2537 dce_v8_0_cursor_reset(crtc);
2538
2539 amdgpu_crtc->hw_mode = *adjusted_mode;
2540
2541 return 0;
2542}
2543
2544static bool dce_v8_0_crtc_mode_fixup(struct drm_crtc *crtc,
2545 const struct drm_display_mode *mode,
2546 struct drm_display_mode *adjusted_mode)
2547{
2548 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2549 struct drm_device *dev = crtc->dev;
2550 struct drm_encoder *encoder;
2551
2552
2553 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
2554 if (encoder->crtc == crtc) {
2555 amdgpu_crtc->encoder = encoder;
2556 amdgpu_crtc->connector = amdgpu_get_connector_for_encoder(encoder);
2557 break;
2558 }
2559 }
2560 if ((amdgpu_crtc->encoder == NULL) || (amdgpu_crtc->connector == NULL)) {
2561 amdgpu_crtc->encoder = NULL;
2562 amdgpu_crtc->connector = NULL;
2563 return false;
2564 }
2565 if (!amdgpu_display_crtc_scaling_mode_fixup(crtc, mode, adjusted_mode))
2566 return false;
2567 if (amdgpu_atombios_crtc_prepare_pll(crtc, adjusted_mode))
2568 return false;
2569
2570 amdgpu_crtc->pll_id = dce_v8_0_pick_pll(crtc);
2571
2572 if ((amdgpu_crtc->pll_id == ATOM_PPLL_INVALID) &&
2573 !ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(amdgpu_crtc->encoder)))
2574 return false;
2575
2576 return true;
2577}
2578
2579static int dce_v8_0_crtc_set_base(struct drm_crtc *crtc, int x, int y,
2580 struct drm_framebuffer *old_fb)
2581{
2582 return dce_v8_0_crtc_do_set_base(crtc, old_fb, x, y, 0);
2583}
2584
2585static int dce_v8_0_crtc_set_base_atomic(struct drm_crtc *crtc,
2586 struct drm_framebuffer *fb,
2587 int x, int y, enum mode_set_atomic state)
2588{
2589 return dce_v8_0_crtc_do_set_base(crtc, fb, x, y, 1);
2590}
2591
2592static const struct drm_crtc_helper_funcs dce_v8_0_crtc_helper_funcs = {
2593 .dpms = dce_v8_0_crtc_dpms,
2594 .mode_fixup = dce_v8_0_crtc_mode_fixup,
2595 .mode_set = dce_v8_0_crtc_mode_set,
2596 .mode_set_base = dce_v8_0_crtc_set_base,
2597 .mode_set_base_atomic = dce_v8_0_crtc_set_base_atomic,
2598 .prepare = dce_v8_0_crtc_prepare,
2599 .commit = dce_v8_0_crtc_commit,
2600 .disable = dce_v8_0_crtc_disable,
2601 .get_scanout_position = amdgpu_crtc_get_scanout_position,
2602};
2603
2604static int dce_v8_0_crtc_init(struct amdgpu_device *adev, int index)
2605{
2606 struct amdgpu_crtc *amdgpu_crtc;
2607
2608 amdgpu_crtc = kzalloc(sizeof(struct amdgpu_crtc) +
2609 (AMDGPUFB_CONN_LIMIT * sizeof(struct drm_connector *)), GFP_KERNEL);
2610 if (amdgpu_crtc == NULL)
2611 return -ENOMEM;
2612
2613 drm_crtc_init(adev_to_drm(adev), &amdgpu_crtc->base, &dce_v8_0_crtc_funcs);
2614
2615 drm_mode_crtc_set_gamma_size(&amdgpu_crtc->base, 256);
2616 amdgpu_crtc->crtc_id = index;
2617 adev->mode_info.crtcs[index] = amdgpu_crtc;
2618
2619 amdgpu_crtc->max_cursor_width = CIK_CURSOR_WIDTH;
2620 amdgpu_crtc->max_cursor_height = CIK_CURSOR_HEIGHT;
2621 adev_to_drm(adev)->mode_config.cursor_width = amdgpu_crtc->max_cursor_width;
2622 adev_to_drm(adev)->mode_config.cursor_height = amdgpu_crtc->max_cursor_height;
2623
2624 amdgpu_crtc->crtc_offset = crtc_offsets[amdgpu_crtc->crtc_id];
2625
2626 amdgpu_crtc->pll_id = ATOM_PPLL_INVALID;
2627 amdgpu_crtc->adjusted_clock = 0;
2628 amdgpu_crtc->encoder = NULL;
2629 amdgpu_crtc->connector = NULL;
2630 drm_crtc_helper_add(&amdgpu_crtc->base, &dce_v8_0_crtc_helper_funcs);
2631
2632 return 0;
2633}
2634
2635static int dce_v8_0_early_init(void *handle)
2636{
2637 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2638
2639 adev->audio_endpt_rreg = &dce_v8_0_audio_endpt_rreg;
2640 adev->audio_endpt_wreg = &dce_v8_0_audio_endpt_wreg;
2641
2642 dce_v8_0_set_display_funcs(adev);
2643
2644 adev->mode_info.num_crtc = dce_v8_0_get_num_crtc(adev);
2645
2646 switch (adev->asic_type) {
2647 case CHIP_BONAIRE:
2648 case CHIP_HAWAII:
2649 adev->mode_info.num_hpd = 6;
2650 adev->mode_info.num_dig = 6;
2651 break;
2652 case CHIP_KAVERI:
2653 adev->mode_info.num_hpd = 6;
2654 adev->mode_info.num_dig = 7;
2655 break;
2656 case CHIP_KABINI:
2657 case CHIP_MULLINS:
2658 adev->mode_info.num_hpd = 6;
2659 adev->mode_info.num_dig = 6;
2660 break;
2661 default:
2662
2663 return -EINVAL;
2664 }
2665
2666 dce_v8_0_set_irq_funcs(adev);
2667
2668 return 0;
2669}
2670
2671static int dce_v8_0_sw_init(void *handle)
2672{
2673 int r, i;
2674 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2675
2676 for (i = 0; i < adev->mode_info.num_crtc; i++) {
2677 r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, i + 1, &adev->crtc_irq);
2678 if (r)
2679 return r;
2680 }
2681
2682 for (i = 8; i < 20; i += 2) {
2683 r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, i, &adev->pageflip_irq);
2684 if (r)
2685 return r;
2686 }
2687
2688
2689 r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 42, &adev->hpd_irq);
2690 if (r)
2691 return r;
2692
2693 adev_to_drm(adev)->mode_config.funcs = &amdgpu_mode_funcs;
2694
2695 adev_to_drm(adev)->mode_config.async_page_flip = true;
2696
2697 adev_to_drm(adev)->mode_config.max_width = 16384;
2698 adev_to_drm(adev)->mode_config.max_height = 16384;
2699
2700 adev_to_drm(adev)->mode_config.preferred_depth = 24;
2701 adev_to_drm(adev)->mode_config.prefer_shadow = 1;
2702
2703 adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
2704
2705 r = amdgpu_display_modeset_create_props(adev);
2706 if (r)
2707 return r;
2708
2709 adev_to_drm(adev)->mode_config.max_width = 16384;
2710 adev_to_drm(adev)->mode_config.max_height = 16384;
2711
2712
2713 for (i = 0; i < adev->mode_info.num_crtc; i++) {
2714 r = dce_v8_0_crtc_init(adev, i);
2715 if (r)
2716 return r;
2717 }
2718
2719 if (amdgpu_atombios_get_connector_info_from_object_table(adev))
2720 amdgpu_display_print_display_setup(adev_to_drm(adev));
2721 else
2722 return -EINVAL;
2723
2724
2725 r = dce_v8_0_afmt_init(adev);
2726 if (r)
2727 return r;
2728
2729 r = dce_v8_0_audio_init(adev);
2730 if (r)
2731 return r;
2732
2733 drm_kms_helper_poll_init(adev_to_drm(adev));
2734
2735 adev->mode_info.mode_config_initialized = true;
2736 return 0;
2737}
2738
2739static int dce_v8_0_sw_fini(void *handle)
2740{
2741 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2742
2743 kfree(adev->mode_info.bios_hardcoded_edid);
2744
2745 drm_kms_helper_poll_fini(adev_to_drm(adev));
2746
2747 dce_v8_0_audio_fini(adev);
2748
2749 dce_v8_0_afmt_fini(adev);
2750
2751 drm_mode_config_cleanup(adev_to_drm(adev));
2752 adev->mode_info.mode_config_initialized = false;
2753
2754 return 0;
2755}
2756
2757static int dce_v8_0_hw_init(void *handle)
2758{
2759 int i;
2760 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2761
2762
2763 dce_v8_0_set_vga_render_state(adev, false);
2764
2765 amdgpu_atombios_encoder_init_dig(adev);
2766 amdgpu_atombios_crtc_set_disp_eng_pll(adev, adev->clock.default_dispclk);
2767
2768
2769 dce_v8_0_hpd_init(adev);
2770
2771 for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
2772 dce_v8_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
2773 }
2774
2775 dce_v8_0_pageflip_interrupt_init(adev);
2776
2777 return 0;
2778}
2779
2780static int dce_v8_0_hw_fini(void *handle)
2781{
2782 int i;
2783 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2784
2785 dce_v8_0_hpd_fini(adev);
2786
2787 for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
2788 dce_v8_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
2789 }
2790
2791 dce_v8_0_pageflip_interrupt_fini(adev);
2792
2793 return 0;
2794}
2795
2796static int dce_v8_0_suspend(void *handle)
2797{
2798 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2799 int r;
2800
2801 r = amdgpu_display_suspend_helper(adev);
2802 if (r)
2803 return r;
2804
2805 adev->mode_info.bl_level =
2806 amdgpu_atombios_encoder_get_backlight_level_from_reg(adev);
2807
2808 return dce_v8_0_hw_fini(handle);
2809}
2810
2811static int dce_v8_0_resume(void *handle)
2812{
2813 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2814 int ret;
2815
2816 amdgpu_atombios_encoder_set_backlight_level_to_reg(adev,
2817 adev->mode_info.bl_level);
2818
2819 ret = dce_v8_0_hw_init(handle);
2820
2821
2822 if (adev->mode_info.bl_encoder) {
2823 u8 bl_level = amdgpu_display_backlight_get_level(adev,
2824 adev->mode_info.bl_encoder);
2825 amdgpu_display_backlight_set_level(adev, adev->mode_info.bl_encoder,
2826 bl_level);
2827 }
2828 if (ret)
2829 return ret;
2830
2831 return amdgpu_display_resume_helper(adev);
2832}
2833
2834static bool dce_v8_0_is_idle(void *handle)
2835{
2836 return true;
2837}
2838
2839static int dce_v8_0_wait_for_idle(void *handle)
2840{
2841 return 0;
2842}
2843
2844static int dce_v8_0_soft_reset(void *handle)
2845{
2846 u32 srbm_soft_reset = 0, tmp;
2847 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2848
2849 if (dce_v8_0_is_display_hung(adev))
2850 srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_DC_MASK;
2851
2852 if (srbm_soft_reset) {
2853 tmp = RREG32(mmSRBM_SOFT_RESET);
2854 tmp |= srbm_soft_reset;
2855 dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
2856 WREG32(mmSRBM_SOFT_RESET, tmp);
2857 tmp = RREG32(mmSRBM_SOFT_RESET);
2858
2859 udelay(50);
2860
2861 tmp &= ~srbm_soft_reset;
2862 WREG32(mmSRBM_SOFT_RESET, tmp);
2863 tmp = RREG32(mmSRBM_SOFT_RESET);
2864
2865
2866 udelay(50);
2867 }
2868 return 0;
2869}
2870
2871static void dce_v8_0_set_crtc_vblank_interrupt_state(struct amdgpu_device *adev,
2872 int crtc,
2873 enum amdgpu_interrupt_state state)
2874{
2875 u32 reg_block, lb_interrupt_mask;
2876
2877 if (crtc >= adev->mode_info.num_crtc) {
2878 DRM_DEBUG("invalid crtc %d\n", crtc);
2879 return;
2880 }
2881
2882 switch (crtc) {
2883 case 0:
2884 reg_block = CRTC0_REGISTER_OFFSET;
2885 break;
2886 case 1:
2887 reg_block = CRTC1_REGISTER_OFFSET;
2888 break;
2889 case 2:
2890 reg_block = CRTC2_REGISTER_OFFSET;
2891 break;
2892 case 3:
2893 reg_block = CRTC3_REGISTER_OFFSET;
2894 break;
2895 case 4:
2896 reg_block = CRTC4_REGISTER_OFFSET;
2897 break;
2898 case 5:
2899 reg_block = CRTC5_REGISTER_OFFSET;
2900 break;
2901 default:
2902 DRM_DEBUG("invalid crtc %d\n", crtc);
2903 return;
2904 }
2905
2906 switch (state) {
2907 case AMDGPU_IRQ_STATE_DISABLE:
2908 lb_interrupt_mask = RREG32(mmLB_INTERRUPT_MASK + reg_block);
2909 lb_interrupt_mask &= ~LB_INTERRUPT_MASK__VBLANK_INTERRUPT_MASK_MASK;
2910 WREG32(mmLB_INTERRUPT_MASK + reg_block, lb_interrupt_mask);
2911 break;
2912 case AMDGPU_IRQ_STATE_ENABLE:
2913 lb_interrupt_mask = RREG32(mmLB_INTERRUPT_MASK + reg_block);
2914 lb_interrupt_mask |= LB_INTERRUPT_MASK__VBLANK_INTERRUPT_MASK_MASK;
2915 WREG32(mmLB_INTERRUPT_MASK + reg_block, lb_interrupt_mask);
2916 break;
2917 default:
2918 break;
2919 }
2920}
2921
2922static void dce_v8_0_set_crtc_vline_interrupt_state(struct amdgpu_device *adev,
2923 int crtc,
2924 enum amdgpu_interrupt_state state)
2925{
2926 u32 reg_block, lb_interrupt_mask;
2927
2928 if (crtc >= adev->mode_info.num_crtc) {
2929 DRM_DEBUG("invalid crtc %d\n", crtc);
2930 return;
2931 }
2932
2933 switch (crtc) {
2934 case 0:
2935 reg_block = CRTC0_REGISTER_OFFSET;
2936 break;
2937 case 1:
2938 reg_block = CRTC1_REGISTER_OFFSET;
2939 break;
2940 case 2:
2941 reg_block = CRTC2_REGISTER_OFFSET;
2942 break;
2943 case 3:
2944 reg_block = CRTC3_REGISTER_OFFSET;
2945 break;
2946 case 4:
2947 reg_block = CRTC4_REGISTER_OFFSET;
2948 break;
2949 case 5:
2950 reg_block = CRTC5_REGISTER_OFFSET;
2951 break;
2952 default:
2953 DRM_DEBUG("invalid crtc %d\n", crtc);
2954 return;
2955 }
2956
2957 switch (state) {
2958 case AMDGPU_IRQ_STATE_DISABLE:
2959 lb_interrupt_mask = RREG32(mmLB_INTERRUPT_MASK + reg_block);
2960 lb_interrupt_mask &= ~LB_INTERRUPT_MASK__VLINE_INTERRUPT_MASK_MASK;
2961 WREG32(mmLB_INTERRUPT_MASK + reg_block, lb_interrupt_mask);
2962 break;
2963 case AMDGPU_IRQ_STATE_ENABLE:
2964 lb_interrupt_mask = RREG32(mmLB_INTERRUPT_MASK + reg_block);
2965 lb_interrupt_mask |= LB_INTERRUPT_MASK__VLINE_INTERRUPT_MASK_MASK;
2966 WREG32(mmLB_INTERRUPT_MASK + reg_block, lb_interrupt_mask);
2967 break;
2968 default:
2969 break;
2970 }
2971}
2972
2973static int dce_v8_0_set_hpd_interrupt_state(struct amdgpu_device *adev,
2974 struct amdgpu_irq_src *src,
2975 unsigned type,
2976 enum amdgpu_interrupt_state state)
2977{
2978 u32 dc_hpd_int_cntl;
2979
2980 if (type >= adev->mode_info.num_hpd) {
2981 DRM_DEBUG("invalid hdp %d\n", type);
2982 return 0;
2983 }
2984
2985 switch (state) {
2986 case AMDGPU_IRQ_STATE_DISABLE:
2987 dc_hpd_int_cntl = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[type]);
2988 dc_hpd_int_cntl &= ~DC_HPD1_INT_CONTROL__DC_HPD1_INT_EN_MASK;
2989 WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[type], dc_hpd_int_cntl);
2990 break;
2991 case AMDGPU_IRQ_STATE_ENABLE:
2992 dc_hpd_int_cntl = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[type]);
2993 dc_hpd_int_cntl |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_EN_MASK;
2994 WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[type], dc_hpd_int_cntl);
2995 break;
2996 default:
2997 break;
2998 }
2999
3000 return 0;
3001}
3002
3003static int dce_v8_0_set_crtc_interrupt_state(struct amdgpu_device *adev,
3004 struct amdgpu_irq_src *src,
3005 unsigned type,
3006 enum amdgpu_interrupt_state state)
3007{
3008 switch (type) {
3009 case AMDGPU_CRTC_IRQ_VBLANK1:
3010 dce_v8_0_set_crtc_vblank_interrupt_state(adev, 0, state);
3011 break;
3012 case AMDGPU_CRTC_IRQ_VBLANK2:
3013 dce_v8_0_set_crtc_vblank_interrupt_state(adev, 1, state);
3014 break;
3015 case AMDGPU_CRTC_IRQ_VBLANK3:
3016 dce_v8_0_set_crtc_vblank_interrupt_state(adev, 2, state);
3017 break;
3018 case AMDGPU_CRTC_IRQ_VBLANK4:
3019 dce_v8_0_set_crtc_vblank_interrupt_state(adev, 3, state);
3020 break;
3021 case AMDGPU_CRTC_IRQ_VBLANK5:
3022 dce_v8_0_set_crtc_vblank_interrupt_state(adev, 4, state);
3023 break;
3024 case AMDGPU_CRTC_IRQ_VBLANK6:
3025 dce_v8_0_set_crtc_vblank_interrupt_state(adev, 5, state);
3026 break;
3027 case AMDGPU_CRTC_IRQ_VLINE1:
3028 dce_v8_0_set_crtc_vline_interrupt_state(adev, 0, state);
3029 break;
3030 case AMDGPU_CRTC_IRQ_VLINE2:
3031 dce_v8_0_set_crtc_vline_interrupt_state(adev, 1, state);
3032 break;
3033 case AMDGPU_CRTC_IRQ_VLINE3:
3034 dce_v8_0_set_crtc_vline_interrupt_state(adev, 2, state);
3035 break;
3036 case AMDGPU_CRTC_IRQ_VLINE4:
3037 dce_v8_0_set_crtc_vline_interrupt_state(adev, 3, state);
3038 break;
3039 case AMDGPU_CRTC_IRQ_VLINE5:
3040 dce_v8_0_set_crtc_vline_interrupt_state(adev, 4, state);
3041 break;
3042 case AMDGPU_CRTC_IRQ_VLINE6:
3043 dce_v8_0_set_crtc_vline_interrupt_state(adev, 5, state);
3044 break;
3045 default:
3046 break;
3047 }
3048 return 0;
3049}
3050
3051static int dce_v8_0_crtc_irq(struct amdgpu_device *adev,
3052 struct amdgpu_irq_src *source,
3053 struct amdgpu_iv_entry *entry)
3054{
3055 unsigned crtc = entry->src_id - 1;
3056 uint32_t disp_int = RREG32(interrupt_status_offsets[crtc].reg);
3057 unsigned int irq_type = amdgpu_display_crtc_idx_to_irq_type(adev,
3058 crtc);
3059
3060 switch (entry->src_data[0]) {
3061 case 0:
3062 if (disp_int & interrupt_status_offsets[crtc].vblank)
3063 WREG32(mmLB_VBLANK_STATUS + crtc_offsets[crtc], LB_VBLANK_STATUS__VBLANK_ACK_MASK);
3064 else
3065 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
3066
3067 if (amdgpu_irq_enabled(adev, source, irq_type)) {
3068 drm_handle_vblank(adev_to_drm(adev), crtc);
3069 }
3070 DRM_DEBUG("IH: D%d vblank\n", crtc + 1);
3071 break;
3072 case 1:
3073 if (disp_int & interrupt_status_offsets[crtc].vline)
3074 WREG32(mmLB_VLINE_STATUS + crtc_offsets[crtc], LB_VLINE_STATUS__VLINE_ACK_MASK);
3075 else
3076 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
3077
3078 DRM_DEBUG("IH: D%d vline\n", crtc + 1);
3079 break;
3080 default:
3081 DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data[0]);
3082 break;
3083 }
3084
3085 return 0;
3086}
3087
3088static int dce_v8_0_set_pageflip_interrupt_state(struct amdgpu_device *adev,
3089 struct amdgpu_irq_src *src,
3090 unsigned type,
3091 enum amdgpu_interrupt_state state)
3092{
3093 u32 reg;
3094
3095 if (type >= adev->mode_info.num_crtc) {
3096 DRM_ERROR("invalid pageflip crtc %d\n", type);
3097 return -EINVAL;
3098 }
3099
3100 reg = RREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type]);
3101 if (state == AMDGPU_IRQ_STATE_DISABLE)
3102 WREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type],
3103 reg & ~GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK);
3104 else
3105 WREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type],
3106 reg | GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK);
3107
3108 return 0;
3109}
3110
3111static int dce_v8_0_pageflip_irq(struct amdgpu_device *adev,
3112 struct amdgpu_irq_src *source,
3113 struct amdgpu_iv_entry *entry)
3114{
3115 unsigned long flags;
3116 unsigned crtc_id;
3117 struct amdgpu_crtc *amdgpu_crtc;
3118 struct amdgpu_flip_work *works;
3119
3120 crtc_id = (entry->src_id - 8) >> 1;
3121 amdgpu_crtc = adev->mode_info.crtcs[crtc_id];
3122
3123 if (crtc_id >= adev->mode_info.num_crtc) {
3124 DRM_ERROR("invalid pageflip crtc %d\n", crtc_id);
3125 return -EINVAL;
3126 }
3127
3128 if (RREG32(mmGRPH_INTERRUPT_STATUS + crtc_offsets[crtc_id]) &
3129 GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_OCCURRED_MASK)
3130 WREG32(mmGRPH_INTERRUPT_STATUS + crtc_offsets[crtc_id],
3131 GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_CLEAR_MASK);
3132
3133
3134 if (amdgpu_crtc == NULL)
3135 return 0;
3136
3137 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
3138 works = amdgpu_crtc->pflip_works;
3139 if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
3140 DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d != "
3141 "AMDGPU_FLIP_SUBMITTED(%d)\n",
3142 amdgpu_crtc->pflip_status,
3143 AMDGPU_FLIP_SUBMITTED);
3144 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
3145 return 0;
3146 }
3147
3148
3149 amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
3150 amdgpu_crtc->pflip_works = NULL;
3151
3152
3153 if (works->event)
3154 drm_crtc_send_vblank_event(&amdgpu_crtc->base, works->event);
3155
3156 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
3157
3158 drm_crtc_vblank_put(&amdgpu_crtc->base);
3159 schedule_work(&works->unpin_work);
3160
3161 return 0;
3162}
3163
3164static int dce_v8_0_hpd_irq(struct amdgpu_device *adev,
3165 struct amdgpu_irq_src *source,
3166 struct amdgpu_iv_entry *entry)
3167{
3168 uint32_t disp_int, mask, tmp;
3169 unsigned hpd;
3170
3171 if (entry->src_data[0] >= adev->mode_info.num_hpd) {
3172 DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data[0]);
3173 return 0;
3174 }
3175
3176 hpd = entry->src_data[0];
3177 disp_int = RREG32(interrupt_status_offsets[hpd].reg);
3178 mask = interrupt_status_offsets[hpd].hpd;
3179
3180 if (disp_int & mask) {
3181 tmp = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd]);
3182 tmp |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_ACK_MASK;
3183 WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd], tmp);
3184 schedule_work(&adev->hotplug_work);
3185 DRM_DEBUG("IH: HPD%d\n", hpd + 1);
3186 }
3187
3188 return 0;
3189
3190}
3191
3192static int dce_v8_0_set_clockgating_state(void *handle,
3193 enum amd_clockgating_state state)
3194{
3195 return 0;
3196}
3197
3198static int dce_v8_0_set_powergating_state(void *handle,
3199 enum amd_powergating_state state)
3200{
3201 return 0;
3202}
3203
3204static const struct amd_ip_funcs dce_v8_0_ip_funcs = {
3205 .name = "dce_v8_0",
3206 .early_init = dce_v8_0_early_init,
3207 .late_init = NULL,
3208 .sw_init = dce_v8_0_sw_init,
3209 .sw_fini = dce_v8_0_sw_fini,
3210 .hw_init = dce_v8_0_hw_init,
3211 .hw_fini = dce_v8_0_hw_fini,
3212 .suspend = dce_v8_0_suspend,
3213 .resume = dce_v8_0_resume,
3214 .is_idle = dce_v8_0_is_idle,
3215 .wait_for_idle = dce_v8_0_wait_for_idle,
3216 .soft_reset = dce_v8_0_soft_reset,
3217 .set_clockgating_state = dce_v8_0_set_clockgating_state,
3218 .set_powergating_state = dce_v8_0_set_powergating_state,
3219};
3220
3221static void
3222dce_v8_0_encoder_mode_set(struct drm_encoder *encoder,
3223 struct drm_display_mode *mode,
3224 struct drm_display_mode *adjusted_mode)
3225{
3226 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
3227
3228 amdgpu_encoder->pixel_clock = adjusted_mode->clock;
3229
3230
3231 amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
3232
3233
3234 dce_v8_0_set_interleave(encoder->crtc, mode);
3235
3236 if (amdgpu_atombios_encoder_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI) {
3237 dce_v8_0_afmt_enable(encoder, true);
3238 dce_v8_0_afmt_setmode(encoder, adjusted_mode);
3239 }
3240}
3241
3242static void dce_v8_0_encoder_prepare(struct drm_encoder *encoder)
3243{
3244 struct amdgpu_device *adev = drm_to_adev(encoder->dev);
3245 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
3246 struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
3247
3248 if ((amdgpu_encoder->active_device &
3249 (ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT)) ||
3250 (amdgpu_encoder_get_dp_bridge_encoder_id(encoder) !=
3251 ENCODER_OBJECT_ID_NONE)) {
3252 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
3253 if (dig) {
3254 dig->dig_encoder = dce_v8_0_pick_dig_encoder(encoder);
3255 if (amdgpu_encoder->active_device & ATOM_DEVICE_DFP_SUPPORT)
3256 dig->afmt = adev->mode_info.afmt[dig->dig_encoder];
3257 }
3258 }
3259
3260 amdgpu_atombios_scratch_regs_lock(adev, true);
3261
3262 if (connector) {
3263 struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
3264
3265
3266 if (amdgpu_connector->router.cd_valid)
3267 amdgpu_i2c_router_select_cd_port(amdgpu_connector);
3268
3269
3270 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
3271 amdgpu_atombios_encoder_set_edp_panel_power(connector,
3272 ATOM_TRANSMITTER_ACTION_POWER_ON);
3273 }
3274
3275
3276 amdgpu_atombios_encoder_set_crtc_source(encoder);
3277
3278 dce_v8_0_program_fmt(encoder);
3279}
3280
3281static void dce_v8_0_encoder_commit(struct drm_encoder *encoder)
3282{
3283 struct drm_device *dev = encoder->dev;
3284 struct amdgpu_device *adev = drm_to_adev(dev);
3285
3286
3287 amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_ON);
3288 amdgpu_atombios_scratch_regs_lock(adev, false);
3289}
3290
3291static void dce_v8_0_encoder_disable(struct drm_encoder *encoder)
3292{
3293 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
3294 struct amdgpu_encoder_atom_dig *dig;
3295
3296 amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
3297
3298 if (amdgpu_atombios_encoder_is_digital(encoder)) {
3299 if (amdgpu_atombios_encoder_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI)
3300 dce_v8_0_afmt_enable(encoder, false);
3301 dig = amdgpu_encoder->enc_priv;
3302 dig->dig_encoder = -1;
3303 }
3304 amdgpu_encoder->active_device = 0;
3305}
3306
3307
3308static void dce_v8_0_ext_prepare(struct drm_encoder *encoder)
3309{
3310
3311}
3312
3313static void dce_v8_0_ext_commit(struct drm_encoder *encoder)
3314{
3315
3316}
3317
3318static void
3319dce_v8_0_ext_mode_set(struct drm_encoder *encoder,
3320 struct drm_display_mode *mode,
3321 struct drm_display_mode *adjusted_mode)
3322{
3323
3324}
3325
3326static void dce_v8_0_ext_disable(struct drm_encoder *encoder)
3327{
3328
3329}
3330
3331static void
3332dce_v8_0_ext_dpms(struct drm_encoder *encoder, int mode)
3333{
3334
3335}
3336
3337static const struct drm_encoder_helper_funcs dce_v8_0_ext_helper_funcs = {
3338 .dpms = dce_v8_0_ext_dpms,
3339 .prepare = dce_v8_0_ext_prepare,
3340 .mode_set = dce_v8_0_ext_mode_set,
3341 .commit = dce_v8_0_ext_commit,
3342 .disable = dce_v8_0_ext_disable,
3343
3344};
3345
3346static const struct drm_encoder_helper_funcs dce_v8_0_dig_helper_funcs = {
3347 .dpms = amdgpu_atombios_encoder_dpms,
3348 .mode_fixup = amdgpu_atombios_encoder_mode_fixup,
3349 .prepare = dce_v8_0_encoder_prepare,
3350 .mode_set = dce_v8_0_encoder_mode_set,
3351 .commit = dce_v8_0_encoder_commit,
3352 .disable = dce_v8_0_encoder_disable,
3353 .detect = amdgpu_atombios_encoder_dig_detect,
3354};
3355
3356static const struct drm_encoder_helper_funcs dce_v8_0_dac_helper_funcs = {
3357 .dpms = amdgpu_atombios_encoder_dpms,
3358 .mode_fixup = amdgpu_atombios_encoder_mode_fixup,
3359 .prepare = dce_v8_0_encoder_prepare,
3360 .mode_set = dce_v8_0_encoder_mode_set,
3361 .commit = dce_v8_0_encoder_commit,
3362 .detect = amdgpu_atombios_encoder_dac_detect,
3363};
3364
3365static void dce_v8_0_encoder_destroy(struct drm_encoder *encoder)
3366{
3367 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
3368 if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
3369 amdgpu_atombios_encoder_fini_backlight(amdgpu_encoder);
3370 kfree(amdgpu_encoder->enc_priv);
3371 drm_encoder_cleanup(encoder);
3372 kfree(amdgpu_encoder);
3373}
3374
3375static const struct drm_encoder_funcs dce_v8_0_encoder_funcs = {
3376 .destroy = dce_v8_0_encoder_destroy,
3377};
3378
3379static void dce_v8_0_encoder_add(struct amdgpu_device *adev,
3380 uint32_t encoder_enum,
3381 uint32_t supported_device,
3382 u16 caps)
3383{
3384 struct drm_device *dev = adev_to_drm(adev);
3385 struct drm_encoder *encoder;
3386 struct amdgpu_encoder *amdgpu_encoder;
3387
3388
3389 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
3390 amdgpu_encoder = to_amdgpu_encoder(encoder);
3391 if (amdgpu_encoder->encoder_enum == encoder_enum) {
3392 amdgpu_encoder->devices |= supported_device;
3393 return;
3394 }
3395
3396 }
3397
3398
3399 amdgpu_encoder = kzalloc(sizeof(struct amdgpu_encoder), GFP_KERNEL);
3400 if (!amdgpu_encoder)
3401 return;
3402
3403 encoder = &amdgpu_encoder->base;
3404 switch (adev->mode_info.num_crtc) {
3405 case 1:
3406 encoder->possible_crtcs = 0x1;
3407 break;
3408 case 2:
3409 default:
3410 encoder->possible_crtcs = 0x3;
3411 break;
3412 case 4:
3413 encoder->possible_crtcs = 0xf;
3414 break;
3415 case 6:
3416 encoder->possible_crtcs = 0x3f;
3417 break;
3418 }
3419
3420 amdgpu_encoder->enc_priv = NULL;
3421
3422 amdgpu_encoder->encoder_enum = encoder_enum;
3423 amdgpu_encoder->encoder_id = (encoder_enum & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT;
3424 amdgpu_encoder->devices = supported_device;
3425 amdgpu_encoder->rmx_type = RMX_OFF;
3426 amdgpu_encoder->underscan_type = UNDERSCAN_OFF;
3427 amdgpu_encoder->is_ext_encoder = false;
3428 amdgpu_encoder->caps = caps;
3429
3430 switch (amdgpu_encoder->encoder_id) {
3431 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
3432 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
3433 drm_encoder_init(dev, encoder, &dce_v8_0_encoder_funcs,
3434 DRM_MODE_ENCODER_DAC, NULL);
3435 drm_encoder_helper_add(encoder, &dce_v8_0_dac_helper_funcs);
3436 break;
3437 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
3438 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
3439 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
3440 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
3441 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
3442 if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
3443 amdgpu_encoder->rmx_type = RMX_FULL;
3444 drm_encoder_init(dev, encoder, &dce_v8_0_encoder_funcs,
3445 DRM_MODE_ENCODER_LVDS, NULL);
3446 amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_lcd_info(amdgpu_encoder);
3447 } else if (amdgpu_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT)) {
3448 drm_encoder_init(dev, encoder, &dce_v8_0_encoder_funcs,
3449 DRM_MODE_ENCODER_DAC, NULL);
3450 amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_dig_info(amdgpu_encoder);
3451 } else {
3452 drm_encoder_init(dev, encoder, &dce_v8_0_encoder_funcs,
3453 DRM_MODE_ENCODER_TMDS, NULL);
3454 amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_dig_info(amdgpu_encoder);
3455 }
3456 drm_encoder_helper_add(encoder, &dce_v8_0_dig_helper_funcs);
3457 break;
3458 case ENCODER_OBJECT_ID_SI170B:
3459 case ENCODER_OBJECT_ID_CH7303:
3460 case ENCODER_OBJECT_ID_EXTERNAL_SDVOA:
3461 case ENCODER_OBJECT_ID_EXTERNAL_SDVOB:
3462 case ENCODER_OBJECT_ID_TITFP513:
3463 case ENCODER_OBJECT_ID_VT1623:
3464 case ENCODER_OBJECT_ID_HDMI_SI1930:
3465 case ENCODER_OBJECT_ID_TRAVIS:
3466 case ENCODER_OBJECT_ID_NUTMEG:
3467
3468 amdgpu_encoder->is_ext_encoder = true;
3469 if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
3470 drm_encoder_init(dev, encoder, &dce_v8_0_encoder_funcs,
3471 DRM_MODE_ENCODER_LVDS, NULL);
3472 else if (amdgpu_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT))
3473 drm_encoder_init(dev, encoder, &dce_v8_0_encoder_funcs,
3474 DRM_MODE_ENCODER_DAC, NULL);
3475 else
3476 drm_encoder_init(dev, encoder, &dce_v8_0_encoder_funcs,
3477 DRM_MODE_ENCODER_TMDS, NULL);
3478 drm_encoder_helper_add(encoder, &dce_v8_0_ext_helper_funcs);
3479 break;
3480 }
3481}
3482
3483static const struct amdgpu_display_funcs dce_v8_0_display_funcs = {
3484 .bandwidth_update = &dce_v8_0_bandwidth_update,
3485 .vblank_get_counter = &dce_v8_0_vblank_get_counter,
3486 .backlight_set_level = &amdgpu_atombios_encoder_set_backlight_level,
3487 .backlight_get_level = &amdgpu_atombios_encoder_get_backlight_level,
3488 .hpd_sense = &dce_v8_0_hpd_sense,
3489 .hpd_set_polarity = &dce_v8_0_hpd_set_polarity,
3490 .hpd_get_gpio_reg = &dce_v8_0_hpd_get_gpio_reg,
3491 .page_flip = &dce_v8_0_page_flip,
3492 .page_flip_get_scanoutpos = &dce_v8_0_crtc_get_scanoutpos,
3493 .add_encoder = &dce_v8_0_encoder_add,
3494 .add_connector = &amdgpu_connector_add,
3495};
3496
3497static void dce_v8_0_set_display_funcs(struct amdgpu_device *adev)
3498{
3499 adev->mode_info.funcs = &dce_v8_0_display_funcs;
3500}
3501
3502static const struct amdgpu_irq_src_funcs dce_v8_0_crtc_irq_funcs = {
3503 .set = dce_v8_0_set_crtc_interrupt_state,
3504 .process = dce_v8_0_crtc_irq,
3505};
3506
3507static const struct amdgpu_irq_src_funcs dce_v8_0_pageflip_irq_funcs = {
3508 .set = dce_v8_0_set_pageflip_interrupt_state,
3509 .process = dce_v8_0_pageflip_irq,
3510};
3511
3512static const struct amdgpu_irq_src_funcs dce_v8_0_hpd_irq_funcs = {
3513 .set = dce_v8_0_set_hpd_interrupt_state,
3514 .process = dce_v8_0_hpd_irq,
3515};
3516
3517static void dce_v8_0_set_irq_funcs(struct amdgpu_device *adev)
3518{
3519 if (adev->mode_info.num_crtc > 0)
3520 adev->crtc_irq.num_types = AMDGPU_CRTC_IRQ_VLINE1 + adev->mode_info.num_crtc;
3521 else
3522 adev->crtc_irq.num_types = 0;
3523 adev->crtc_irq.funcs = &dce_v8_0_crtc_irq_funcs;
3524
3525 adev->pageflip_irq.num_types = adev->mode_info.num_crtc;
3526 adev->pageflip_irq.funcs = &dce_v8_0_pageflip_irq_funcs;
3527
3528 adev->hpd_irq.num_types = adev->mode_info.num_hpd;
3529 adev->hpd_irq.funcs = &dce_v8_0_hpd_irq_funcs;
3530}
3531
3532const struct amdgpu_ip_block_version dce_v8_0_ip_block =
3533{
3534 .type = AMD_IP_BLOCK_TYPE_DCE,
3535 .major = 8,
3536 .minor = 0,
3537 .rev = 0,
3538 .funcs = &dce_v8_0_ip_funcs,
3539};
3540
3541const struct amdgpu_ip_block_version dce_v8_1_ip_block =
3542{
3543 .type = AMD_IP_BLOCK_TYPE_DCE,
3544 .major = 8,
3545 .minor = 1,
3546 .rev = 0,
3547 .funcs = &dce_v8_0_ip_funcs,
3548};
3549
3550const struct amdgpu_ip_block_version dce_v8_2_ip_block =
3551{
3552 .type = AMD_IP_BLOCK_TYPE_DCE,
3553 .major = 8,
3554 .minor = 2,
3555 .rev = 0,
3556 .funcs = &dce_v8_0_ip_funcs,
3557};
3558
3559const struct amdgpu_ip_block_version dce_v8_3_ip_block =
3560{
3561 .type = AMD_IP_BLOCK_TYPE_DCE,
3562 .major = 8,
3563 .minor = 3,
3564 .rev = 0,
3565 .funcs = &dce_v8_0_ip_funcs,
3566};
3567
3568const struct amdgpu_ip_block_version dce_v8_5_ip_block =
3569{
3570 .type = AMD_IP_BLOCK_TYPE_DCE,
3571 .major = 8,
3572 .minor = 5,
3573 .rev = 0,
3574 .funcs = &dce_v8_0_ip_funcs,
3575};
3576