1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24#include <drm/drm_fourcc.h>
25#include <drm/drm_vblank.h>
26
27#include "amdgpu.h"
28#include "amdgpu_pm.h"
29#include "amdgpu_i2c.h"
30#include "cikd.h"
31#include "atom.h"
32#include "amdgpu_atombios.h"
33#include "atombios_crtc.h"
34#include "atombios_encoders.h"
35#include "amdgpu_pll.h"
36#include "amdgpu_connectors.h"
37#include "amdgpu_display.h"
38#include "dce_v8_0.h"
39
40#include "dce/dce_8_0_d.h"
41#include "dce/dce_8_0_sh_mask.h"
42
43#include "gca/gfx_7_2_enum.h"
44
45#include "gmc/gmc_7_1_d.h"
46#include "gmc/gmc_7_1_sh_mask.h"
47
48#include "oss/oss_2_0_d.h"
49#include "oss/oss_2_0_sh_mask.h"
50
51static void dce_v8_0_set_display_funcs(struct amdgpu_device *adev);
52static void dce_v8_0_set_irq_funcs(struct amdgpu_device *adev);
53
54static const u32 crtc_offsets[6] =
55{
56 CRTC0_REGISTER_OFFSET,
57 CRTC1_REGISTER_OFFSET,
58 CRTC2_REGISTER_OFFSET,
59 CRTC3_REGISTER_OFFSET,
60 CRTC4_REGISTER_OFFSET,
61 CRTC5_REGISTER_OFFSET
62};
63
64static const u32 hpd_offsets[] =
65{
66 HPD0_REGISTER_OFFSET,
67 HPD1_REGISTER_OFFSET,
68 HPD2_REGISTER_OFFSET,
69 HPD3_REGISTER_OFFSET,
70 HPD4_REGISTER_OFFSET,
71 HPD5_REGISTER_OFFSET
72};
73
74static const uint32_t dig_offsets[] = {
75 CRTC0_REGISTER_OFFSET,
76 CRTC1_REGISTER_OFFSET,
77 CRTC2_REGISTER_OFFSET,
78 CRTC3_REGISTER_OFFSET,
79 CRTC4_REGISTER_OFFSET,
80 CRTC5_REGISTER_OFFSET,
81 (0x13830 - 0x7030) >> 2,
82};
83
84static const struct {
85 uint32_t reg;
86 uint32_t vblank;
87 uint32_t vline;
88 uint32_t hpd;
89
90} interrupt_status_offsets[6] = { {
91 .reg = mmDISP_INTERRUPT_STATUS,
92 .vblank = DISP_INTERRUPT_STATUS__LB_D1_VBLANK_INTERRUPT_MASK,
93 .vline = DISP_INTERRUPT_STATUS__LB_D1_VLINE_INTERRUPT_MASK,
94 .hpd = DISP_INTERRUPT_STATUS__DC_HPD1_INTERRUPT_MASK
95}, {
96 .reg = mmDISP_INTERRUPT_STATUS_CONTINUE,
97 .vblank = DISP_INTERRUPT_STATUS_CONTINUE__LB_D2_VBLANK_INTERRUPT_MASK,
98 .vline = DISP_INTERRUPT_STATUS_CONTINUE__LB_D2_VLINE_INTERRUPT_MASK,
99 .hpd = DISP_INTERRUPT_STATUS_CONTINUE__DC_HPD2_INTERRUPT_MASK
100}, {
101 .reg = mmDISP_INTERRUPT_STATUS_CONTINUE2,
102 .vblank = DISP_INTERRUPT_STATUS_CONTINUE2__LB_D3_VBLANK_INTERRUPT_MASK,
103 .vline = DISP_INTERRUPT_STATUS_CONTINUE2__LB_D3_VLINE_INTERRUPT_MASK,
104 .hpd = DISP_INTERRUPT_STATUS_CONTINUE2__DC_HPD3_INTERRUPT_MASK
105}, {
106 .reg = mmDISP_INTERRUPT_STATUS_CONTINUE3,
107 .vblank = DISP_INTERRUPT_STATUS_CONTINUE3__LB_D4_VBLANK_INTERRUPT_MASK,
108 .vline = DISP_INTERRUPT_STATUS_CONTINUE3__LB_D4_VLINE_INTERRUPT_MASK,
109 .hpd = DISP_INTERRUPT_STATUS_CONTINUE3__DC_HPD4_INTERRUPT_MASK
110}, {
111 .reg = mmDISP_INTERRUPT_STATUS_CONTINUE4,
112 .vblank = DISP_INTERRUPT_STATUS_CONTINUE4__LB_D5_VBLANK_INTERRUPT_MASK,
113 .vline = DISP_INTERRUPT_STATUS_CONTINUE4__LB_D5_VLINE_INTERRUPT_MASK,
114 .hpd = DISP_INTERRUPT_STATUS_CONTINUE4__DC_HPD5_INTERRUPT_MASK
115}, {
116 .reg = mmDISP_INTERRUPT_STATUS_CONTINUE5,
117 .vblank = DISP_INTERRUPT_STATUS_CONTINUE5__LB_D6_VBLANK_INTERRUPT_MASK,
118 .vline = DISP_INTERRUPT_STATUS_CONTINUE5__LB_D6_VLINE_INTERRUPT_MASK,
119 .hpd = DISP_INTERRUPT_STATUS_CONTINUE5__DC_HPD6_INTERRUPT_MASK
120} };
121
122static u32 dce_v8_0_audio_endpt_rreg(struct amdgpu_device *adev,
123 u32 block_offset, u32 reg)
124{
125 unsigned long flags;
126 u32 r;
127
128 spin_lock_irqsave(&adev->audio_endpt_idx_lock, flags);
129 WREG32(mmAZALIA_F0_CODEC_ENDPOINT_INDEX + block_offset, reg);
130 r = RREG32(mmAZALIA_F0_CODEC_ENDPOINT_DATA + block_offset);
131 spin_unlock_irqrestore(&adev->audio_endpt_idx_lock, flags);
132
133 return r;
134}
135
136static void dce_v8_0_audio_endpt_wreg(struct amdgpu_device *adev,
137 u32 block_offset, u32 reg, u32 v)
138{
139 unsigned long flags;
140
141 spin_lock_irqsave(&adev->audio_endpt_idx_lock, flags);
142 WREG32(mmAZALIA_F0_CODEC_ENDPOINT_INDEX + block_offset, reg);
143 WREG32(mmAZALIA_F0_CODEC_ENDPOINT_DATA + block_offset, v);
144 spin_unlock_irqrestore(&adev->audio_endpt_idx_lock, flags);
145}
146
147static u32 dce_v8_0_vblank_get_counter(struct amdgpu_device *adev, int crtc)
148{
149 if (crtc >= adev->mode_info.num_crtc)
150 return 0;
151 else
152 return RREG32(mmCRTC_STATUS_FRAME_COUNT + crtc_offsets[crtc]);
153}
154
155static void dce_v8_0_pageflip_interrupt_init(struct amdgpu_device *adev)
156{
157 unsigned i;
158
159
160 for (i = 0; i < adev->mode_info.num_crtc; i++)
161 amdgpu_irq_get(adev, &adev->pageflip_irq, i);
162}
163
164static void dce_v8_0_pageflip_interrupt_fini(struct amdgpu_device *adev)
165{
166 unsigned i;
167
168
169 for (i = 0; i < adev->mode_info.num_crtc; i++)
170 amdgpu_irq_put(adev, &adev->pageflip_irq, i);
171}
172
173
174
175
176
177
178
179
180
181
182
183static void dce_v8_0_page_flip(struct amdgpu_device *adev,
184 int crtc_id, u64 crtc_base, bool async)
185{
186 struct amdgpu_crtc *amdgpu_crtc = adev->mode_info.crtcs[crtc_id];
187 struct drm_framebuffer *fb = amdgpu_crtc->base.primary->fb;
188
189
190 WREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset, async ?
191 GRPH_FLIP_CONTROL__GRPH_SURFACE_UPDATE_H_RETRACE_EN_MASK : 0);
192
193 WREG32(mmGRPH_PITCH + amdgpu_crtc->crtc_offset,
194 fb->pitches[0] / fb->format->cpp[0]);
195
196 WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
197 upper_32_bits(crtc_base));
198
199 WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
200 lower_32_bits(crtc_base));
201
202 RREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset);
203}
204
205static int dce_v8_0_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
206 u32 *vbl, u32 *position)
207{
208 if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
209 return -EINVAL;
210
211 *vbl = RREG32(mmCRTC_V_BLANK_START_END + crtc_offsets[crtc]);
212 *position = RREG32(mmCRTC_STATUS_POSITION + crtc_offsets[crtc]);
213
214 return 0;
215}
216
217
218
219
220
221
222
223
224
225
226static bool dce_v8_0_hpd_sense(struct amdgpu_device *adev,
227 enum amdgpu_hpd_id hpd)
228{
229 bool connected = false;
230
231 if (hpd >= adev->mode_info.num_hpd)
232 return connected;
233
234 if (RREG32(mmDC_HPD1_INT_STATUS + hpd_offsets[hpd]) &
235 DC_HPD1_INT_STATUS__DC_HPD1_SENSE_MASK)
236 connected = true;
237
238 return connected;
239}
240
241
242
243
244
245
246
247
248
249static void dce_v8_0_hpd_set_polarity(struct amdgpu_device *adev,
250 enum amdgpu_hpd_id hpd)
251{
252 u32 tmp;
253 bool connected = dce_v8_0_hpd_sense(adev, hpd);
254
255 if (hpd >= adev->mode_info.num_hpd)
256 return;
257
258 tmp = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd]);
259 if (connected)
260 tmp &= ~DC_HPD1_INT_CONTROL__DC_HPD1_INT_POLARITY_MASK;
261 else
262 tmp |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_POLARITY_MASK;
263 WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd], tmp);
264}
265
266
267
268
269
270
271
272
273
274static void dce_v8_0_hpd_init(struct amdgpu_device *adev)
275{
276 struct drm_device *dev = adev->ddev;
277 struct drm_connector *connector;
278 u32 tmp;
279
280 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
281 struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
282
283 if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd)
284 continue;
285
286 tmp = RREG32(mmDC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
287 tmp |= DC_HPD1_CONTROL__DC_HPD1_EN_MASK;
288 WREG32(mmDC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
289
290 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
291 connector->connector_type == DRM_MODE_CONNECTOR_LVDS) {
292
293
294
295
296
297 tmp = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
298 tmp &= ~DC_HPD1_INT_CONTROL__DC_HPD1_INT_EN_MASK;
299 WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
300 continue;
301 }
302
303 dce_v8_0_hpd_set_polarity(adev, amdgpu_connector->hpd.hpd);
304 amdgpu_irq_get(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd);
305 }
306}
307
308
309
310
311
312
313
314
315
316static void dce_v8_0_hpd_fini(struct amdgpu_device *adev)
317{
318 struct drm_device *dev = adev->ddev;
319 struct drm_connector *connector;
320 u32 tmp;
321
322 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
323 struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
324
325 if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd)
326 continue;
327
328 tmp = RREG32(mmDC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
329 tmp &= ~DC_HPD1_CONTROL__DC_HPD1_EN_MASK;
330 WREG32(mmDC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], 0);
331
332 amdgpu_irq_put(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd);
333 }
334}
335
336static u32 dce_v8_0_hpd_get_gpio_reg(struct amdgpu_device *adev)
337{
338 return mmDC_GPIO_HPD_A;
339}
340
341static bool dce_v8_0_is_display_hung(struct amdgpu_device *adev)
342{
343 u32 crtc_hung = 0;
344 u32 crtc_status[6];
345 u32 i, j, tmp;
346
347 for (i = 0; i < adev->mode_info.num_crtc; i++) {
348 if (RREG32(mmCRTC_CONTROL + crtc_offsets[i]) & CRTC_CONTROL__CRTC_MASTER_EN_MASK) {
349 crtc_status[i] = RREG32(mmCRTC_STATUS_HV_COUNT + crtc_offsets[i]);
350 crtc_hung |= (1 << i);
351 }
352 }
353
354 for (j = 0; j < 10; j++) {
355 for (i = 0; i < adev->mode_info.num_crtc; i++) {
356 if (crtc_hung & (1 << i)) {
357 tmp = RREG32(mmCRTC_STATUS_HV_COUNT + crtc_offsets[i]);
358 if (tmp != crtc_status[i])
359 crtc_hung &= ~(1 << i);
360 }
361 }
362 if (crtc_hung == 0)
363 return false;
364 udelay(100);
365 }
366
367 return true;
368}
369
370static void dce_v8_0_set_vga_render_state(struct amdgpu_device *adev,
371 bool render)
372{
373 u32 tmp;
374
375
376 tmp = RREG32(mmVGA_HDP_CONTROL);
377 if (render)
378 tmp = REG_SET_FIELD(tmp, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 0);
379 else
380 tmp = REG_SET_FIELD(tmp, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 1);
381 WREG32(mmVGA_HDP_CONTROL, tmp);
382
383
384 tmp = RREG32(mmVGA_RENDER_CONTROL);
385 if (render)
386 tmp = REG_SET_FIELD(tmp, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 1);
387 else
388 tmp = REG_SET_FIELD(tmp, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0);
389 WREG32(mmVGA_RENDER_CONTROL, tmp);
390}
391
392static int dce_v8_0_get_num_crtc(struct amdgpu_device *adev)
393{
394 int num_crtc = 0;
395
396 switch (adev->asic_type) {
397 case CHIP_BONAIRE:
398 case CHIP_HAWAII:
399 num_crtc = 6;
400 break;
401 case CHIP_KAVERI:
402 num_crtc = 4;
403 break;
404 case CHIP_KABINI:
405 case CHIP_MULLINS:
406 num_crtc = 2;
407 break;
408 default:
409 num_crtc = 0;
410 }
411 return num_crtc;
412}
413
414void dce_v8_0_disable_dce(struct amdgpu_device *adev)
415{
416
417 if (amdgpu_atombios_has_dce_engine_info(adev)) {
418 u32 tmp;
419 int crtc_enabled, i;
420
421 dce_v8_0_set_vga_render_state(adev, false);
422
423
424 for (i = 0; i < dce_v8_0_get_num_crtc(adev); i++) {
425 crtc_enabled = REG_GET_FIELD(RREG32(mmCRTC_CONTROL + crtc_offsets[i]),
426 CRTC_CONTROL, CRTC_MASTER_EN);
427 if (crtc_enabled) {
428 WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1);
429 tmp = RREG32(mmCRTC_CONTROL + crtc_offsets[i]);
430 tmp = REG_SET_FIELD(tmp, CRTC_CONTROL, CRTC_MASTER_EN, 0);
431 WREG32(mmCRTC_CONTROL + crtc_offsets[i], tmp);
432 WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0);
433 }
434 }
435 }
436}
437
438static void dce_v8_0_program_fmt(struct drm_encoder *encoder)
439{
440 struct drm_device *dev = encoder->dev;
441 struct amdgpu_device *adev = dev->dev_private;
442 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
443 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc);
444 struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
445 int bpc = 0;
446 u32 tmp = 0;
447 enum amdgpu_connector_dither dither = AMDGPU_FMT_DITHER_DISABLE;
448
449 if (connector) {
450 struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
451 bpc = amdgpu_connector_get_monitor_bpc(connector);
452 dither = amdgpu_connector->dither;
453 }
454
455
456 if (amdgpu_encoder->devices & ATOM_DEVICE_LCD_SUPPORT)
457 return;
458
459
460 if ((amdgpu_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1) ||
461 (amdgpu_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2))
462 return;
463
464 if (bpc == 0)
465 return;
466
467 switch (bpc) {
468 case 6:
469 if (dither == AMDGPU_FMT_DITHER_ENABLE)
470
471 tmp |= (FMT_BIT_DEPTH_CONTROL__FMT_FRAME_RANDOM_ENABLE_MASK |
472 FMT_BIT_DEPTH_CONTROL__FMT_HIGHPASS_RANDOM_ENABLE_MASK |
473 FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_EN_MASK |
474 (0 << FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_DEPTH__SHIFT));
475 else
476 tmp |= (FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_EN_MASK |
477 (0 << FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_DEPTH__SHIFT));
478 break;
479 case 8:
480 if (dither == AMDGPU_FMT_DITHER_ENABLE)
481
482 tmp |= (FMT_BIT_DEPTH_CONTROL__FMT_FRAME_RANDOM_ENABLE_MASK |
483 FMT_BIT_DEPTH_CONTROL__FMT_HIGHPASS_RANDOM_ENABLE_MASK |
484 FMT_BIT_DEPTH_CONTROL__FMT_RGB_RANDOM_ENABLE_MASK |
485 FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_EN_MASK |
486 (1 << FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_DEPTH__SHIFT));
487 else
488 tmp |= (FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_EN_MASK |
489 (1 << FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_DEPTH__SHIFT));
490 break;
491 case 10:
492 if (dither == AMDGPU_FMT_DITHER_ENABLE)
493
494 tmp |= (FMT_BIT_DEPTH_CONTROL__FMT_FRAME_RANDOM_ENABLE_MASK |
495 FMT_BIT_DEPTH_CONTROL__FMT_HIGHPASS_RANDOM_ENABLE_MASK |
496 FMT_BIT_DEPTH_CONTROL__FMT_RGB_RANDOM_ENABLE_MASK |
497 FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_EN_MASK |
498 (2 << FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_DEPTH__SHIFT));
499 else
500 tmp |= (FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_EN_MASK |
501 (2 << FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_DEPTH__SHIFT));
502 break;
503 default:
504
505 break;
506 }
507
508 WREG32(mmFMT_BIT_DEPTH_CONTROL + amdgpu_crtc->crtc_offset, tmp);
509}
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525static u32 dce_v8_0_line_buffer_adjust(struct amdgpu_device *adev,
526 struct amdgpu_crtc *amdgpu_crtc,
527 struct drm_display_mode *mode)
528{
529 u32 tmp, buffer_alloc, i;
530 u32 pipe_offset = amdgpu_crtc->crtc_id * 0x8;
531
532
533
534
535
536
537
538
539 if (amdgpu_crtc->base.enabled && mode) {
540 if (mode->crtc_hdisplay < 1920) {
541 tmp = 1;
542 buffer_alloc = 2;
543 } else if (mode->crtc_hdisplay < 2560) {
544 tmp = 2;
545 buffer_alloc = 2;
546 } else if (mode->crtc_hdisplay < 4096) {
547 tmp = 0;
548 buffer_alloc = (adev->flags & AMD_IS_APU) ? 2 : 4;
549 } else {
550 DRM_DEBUG_KMS("Mode too big for LB!\n");
551 tmp = 0;
552 buffer_alloc = (adev->flags & AMD_IS_APU) ? 2 : 4;
553 }
554 } else {
555 tmp = 1;
556 buffer_alloc = 0;
557 }
558
559 WREG32(mmLB_MEMORY_CTRL + amdgpu_crtc->crtc_offset,
560 (tmp << LB_MEMORY_CTRL__LB_MEMORY_CONFIG__SHIFT) |
561 (0x6B0 << LB_MEMORY_CTRL__LB_MEMORY_SIZE__SHIFT));
562
563 WREG32(mmPIPE0_DMIF_BUFFER_CONTROL + pipe_offset,
564 (buffer_alloc << PIPE0_DMIF_BUFFER_CONTROL__DMIF_BUFFERS_ALLOCATED__SHIFT));
565 for (i = 0; i < adev->usec_timeout; i++) {
566 if (RREG32(mmPIPE0_DMIF_BUFFER_CONTROL + pipe_offset) &
567 PIPE0_DMIF_BUFFER_CONTROL__DMIF_BUFFERS_ALLOCATION_COMPLETED_MASK)
568 break;
569 udelay(1);
570 }
571
572 if (amdgpu_crtc->base.enabled && mode) {
573 switch (tmp) {
574 case 0:
575 default:
576 return 4096 * 2;
577 case 1:
578 return 1920 * 2;
579 case 2:
580 return 2560 * 2;
581 }
582 }
583
584
585 return 0;
586}
587
588
589
590
591
592
593
594
595
596
597static u32 cik_get_number_of_dram_channels(struct amdgpu_device *adev)
598{
599 u32 tmp = RREG32(mmMC_SHARED_CHMAP);
600
601 switch ((tmp & MC_SHARED_CHMAP__NOOFCHAN_MASK) >> MC_SHARED_CHMAP__NOOFCHAN__SHIFT) {
602 case 0:
603 default:
604 return 1;
605 case 1:
606 return 2;
607 case 2:
608 return 4;
609 case 3:
610 return 8;
611 case 4:
612 return 3;
613 case 5:
614 return 6;
615 case 6:
616 return 10;
617 case 7:
618 return 12;
619 case 8:
620 return 16;
621 }
622}
623
624struct dce8_wm_params {
625 u32 dram_channels;
626 u32 yclk;
627 u32 sclk;
628 u32 disp_clk;
629 u32 src_width;
630 u32 active_time;
631 u32 blank_time;
632 bool interlaced;
633 fixed20_12 vsc;
634 u32 num_heads;
635 u32 bytes_per_pixel;
636 u32 lb_size;
637 u32 vtaps;
638};
639
640
641
642
643
644
645
646
647
648
649static u32 dce_v8_0_dram_bandwidth(struct dce8_wm_params *wm)
650{
651
652 fixed20_12 dram_efficiency;
653 fixed20_12 yclk, dram_channels, bandwidth;
654 fixed20_12 a;
655
656 a.full = dfixed_const(1000);
657 yclk.full = dfixed_const(wm->yclk);
658 yclk.full = dfixed_div(yclk, a);
659 dram_channels.full = dfixed_const(wm->dram_channels * 4);
660 a.full = dfixed_const(10);
661 dram_efficiency.full = dfixed_const(7);
662 dram_efficiency.full = dfixed_div(dram_efficiency, a);
663 bandwidth.full = dfixed_mul(dram_channels, yclk);
664 bandwidth.full = dfixed_mul(bandwidth, dram_efficiency);
665
666 return dfixed_trunc(bandwidth);
667}
668
669
670
671
672
673
674
675
676
677
678static u32 dce_v8_0_dram_bandwidth_for_display(struct dce8_wm_params *wm)
679{
680
681 fixed20_12 disp_dram_allocation;
682 fixed20_12 yclk, dram_channels, bandwidth;
683 fixed20_12 a;
684
685 a.full = dfixed_const(1000);
686 yclk.full = dfixed_const(wm->yclk);
687 yclk.full = dfixed_div(yclk, a);
688 dram_channels.full = dfixed_const(wm->dram_channels * 4);
689 a.full = dfixed_const(10);
690 disp_dram_allocation.full = dfixed_const(3);
691 disp_dram_allocation.full = dfixed_div(disp_dram_allocation, a);
692 bandwidth.full = dfixed_mul(dram_channels, yclk);
693 bandwidth.full = dfixed_mul(bandwidth, disp_dram_allocation);
694
695 return dfixed_trunc(bandwidth);
696}
697
698
699
700
701
702
703
704
705
706
707static u32 dce_v8_0_data_return_bandwidth(struct dce8_wm_params *wm)
708{
709
710 fixed20_12 return_efficiency;
711 fixed20_12 sclk, bandwidth;
712 fixed20_12 a;
713
714 a.full = dfixed_const(1000);
715 sclk.full = dfixed_const(wm->sclk);
716 sclk.full = dfixed_div(sclk, a);
717 a.full = dfixed_const(10);
718 return_efficiency.full = dfixed_const(8);
719 return_efficiency.full = dfixed_div(return_efficiency, a);
720 a.full = dfixed_const(32);
721 bandwidth.full = dfixed_mul(a, sclk);
722 bandwidth.full = dfixed_mul(bandwidth, return_efficiency);
723
724 return dfixed_trunc(bandwidth);
725}
726
727
728
729
730
731
732
733
734
735
736static u32 dce_v8_0_dmif_request_bandwidth(struct dce8_wm_params *wm)
737{
738
739 fixed20_12 disp_clk_request_efficiency;
740 fixed20_12 disp_clk, bandwidth;
741 fixed20_12 a, b;
742
743 a.full = dfixed_const(1000);
744 disp_clk.full = dfixed_const(wm->disp_clk);
745 disp_clk.full = dfixed_div(disp_clk, a);
746 a.full = dfixed_const(32);
747 b.full = dfixed_mul(a, disp_clk);
748
749 a.full = dfixed_const(10);
750 disp_clk_request_efficiency.full = dfixed_const(8);
751 disp_clk_request_efficiency.full = dfixed_div(disp_clk_request_efficiency, a);
752
753 bandwidth.full = dfixed_mul(b, disp_clk_request_efficiency);
754
755 return dfixed_trunc(bandwidth);
756}
757
758
759
760
761
762
763
764
765
766
767static u32 dce_v8_0_available_bandwidth(struct dce8_wm_params *wm)
768{
769
770 u32 dram_bandwidth = dce_v8_0_dram_bandwidth(wm);
771 u32 data_return_bandwidth = dce_v8_0_data_return_bandwidth(wm);
772 u32 dmif_req_bandwidth = dce_v8_0_dmif_request_bandwidth(wm);
773
774 return min(dram_bandwidth, min(data_return_bandwidth, dmif_req_bandwidth));
775}
776
777
778
779
780
781
782
783
784
785
786static u32 dce_v8_0_average_bandwidth(struct dce8_wm_params *wm)
787{
788
789
790
791
792 fixed20_12 bpp;
793 fixed20_12 line_time;
794 fixed20_12 src_width;
795 fixed20_12 bandwidth;
796 fixed20_12 a;
797
798 a.full = dfixed_const(1000);
799 line_time.full = dfixed_const(wm->active_time + wm->blank_time);
800 line_time.full = dfixed_div(line_time, a);
801 bpp.full = dfixed_const(wm->bytes_per_pixel);
802 src_width.full = dfixed_const(wm->src_width);
803 bandwidth.full = dfixed_mul(src_width, bpp);
804 bandwidth.full = dfixed_mul(bandwidth, wm->vsc);
805 bandwidth.full = dfixed_div(bandwidth, line_time);
806
807 return dfixed_trunc(bandwidth);
808}
809
810
811
812
813
814
815
816
817
818
819static u32 dce_v8_0_latency_watermark(struct dce8_wm_params *wm)
820{
821
822 u32 mc_latency = 2000;
823 u32 available_bandwidth = dce_v8_0_available_bandwidth(wm);
824 u32 worst_chunk_return_time = (512 * 8 * 1000) / available_bandwidth;
825 u32 cursor_line_pair_return_time = (128 * 4 * 1000) / available_bandwidth;
826 u32 dc_latency = 40000000 / wm->disp_clk;
827 u32 other_heads_data_return_time = ((wm->num_heads + 1) * worst_chunk_return_time) +
828 (wm->num_heads * cursor_line_pair_return_time);
829 u32 latency = mc_latency + other_heads_data_return_time + dc_latency;
830 u32 max_src_lines_per_dst_line, lb_fill_bw, line_fill_time;
831 u32 tmp, dmif_size = 12288;
832 fixed20_12 a, b, c;
833
834 if (wm->num_heads == 0)
835 return 0;
836
837 a.full = dfixed_const(2);
838 b.full = dfixed_const(1);
839 if ((wm->vsc.full > a.full) ||
840 ((wm->vsc.full > b.full) && (wm->vtaps >= 3)) ||
841 (wm->vtaps >= 5) ||
842 ((wm->vsc.full >= a.full) && wm->interlaced))
843 max_src_lines_per_dst_line = 4;
844 else
845 max_src_lines_per_dst_line = 2;
846
847 a.full = dfixed_const(available_bandwidth);
848 b.full = dfixed_const(wm->num_heads);
849 a.full = dfixed_div(a, b);
850 tmp = div_u64((u64) dmif_size * (u64) wm->disp_clk, mc_latency + 512);
851 tmp = min(dfixed_trunc(a), tmp);
852
853 lb_fill_bw = min(tmp, wm->disp_clk * wm->bytes_per_pixel / 1000);
854
855 a.full = dfixed_const(max_src_lines_per_dst_line * wm->src_width * wm->bytes_per_pixel);
856 b.full = dfixed_const(1000);
857 c.full = dfixed_const(lb_fill_bw);
858 b.full = dfixed_div(c, b);
859 a.full = dfixed_div(a, b);
860 line_fill_time = dfixed_trunc(a);
861
862 if (line_fill_time < wm->active_time)
863 return latency;
864 else
865 return latency + (line_fill_time - wm->active_time);
866
867}
868
869
870
871
872
873
874
875
876
877
878
879
880static bool dce_v8_0_average_bandwidth_vs_dram_bandwidth_for_display(struct dce8_wm_params *wm)
881{
882 if (dce_v8_0_average_bandwidth(wm) <=
883 (dce_v8_0_dram_bandwidth_for_display(wm) / wm->num_heads))
884 return true;
885 else
886 return false;
887}
888
889
890
891
892
893
894
895
896
897
898
899
900static bool dce_v8_0_average_bandwidth_vs_available_bandwidth(struct dce8_wm_params *wm)
901{
902 if (dce_v8_0_average_bandwidth(wm) <=
903 (dce_v8_0_available_bandwidth(wm) / wm->num_heads))
904 return true;
905 else
906 return false;
907}
908
909
910
911
912
913
914
915
916
917
918static bool dce_v8_0_check_latency_hiding(struct dce8_wm_params *wm)
919{
920 u32 lb_partitions = wm->lb_size / wm->src_width;
921 u32 line_time = wm->active_time + wm->blank_time;
922 u32 latency_tolerant_lines;
923 u32 latency_hiding;
924 fixed20_12 a;
925
926 a.full = dfixed_const(1);
927 if (wm->vsc.full > a.full)
928 latency_tolerant_lines = 1;
929 else {
930 if (lb_partitions <= (wm->vtaps + 1))
931 latency_tolerant_lines = 1;
932 else
933 latency_tolerant_lines = 2;
934 }
935
936 latency_hiding = (latency_tolerant_lines * line_time + wm->blank_time);
937
938 if (dce_v8_0_latency_watermark(wm) <= latency_hiding)
939 return true;
940 else
941 return false;
942}
943
944
945
946
947
948
949
950
951
952
953
954
955static void dce_v8_0_program_watermarks(struct amdgpu_device *adev,
956 struct amdgpu_crtc *amdgpu_crtc,
957 u32 lb_size, u32 num_heads)
958{
959 struct drm_display_mode *mode = &amdgpu_crtc->base.mode;
960 struct dce8_wm_params wm_low, wm_high;
961 u32 active_time;
962 u32 line_time = 0;
963 u32 latency_watermark_a = 0, latency_watermark_b = 0;
964 u32 tmp, wm_mask, lb_vblank_lead_lines = 0;
965
966 if (amdgpu_crtc->base.enabled && num_heads && mode) {
967 active_time = (u32) div_u64((u64)mode->crtc_hdisplay * 1000000,
968 (u32)mode->clock);
969 line_time = (u32) div_u64((u64)mode->crtc_htotal * 1000000,
970 (u32)mode->clock);
971 line_time = min(line_time, (u32)65535);
972
973
974 if (adev->pm.dpm_enabled) {
975 wm_high.yclk =
976 amdgpu_dpm_get_mclk(adev, false) * 10;
977 wm_high.sclk =
978 amdgpu_dpm_get_sclk(adev, false) * 10;
979 } else {
980 wm_high.yclk = adev->pm.current_mclk * 10;
981 wm_high.sclk = adev->pm.current_sclk * 10;
982 }
983
984 wm_high.disp_clk = mode->clock;
985 wm_high.src_width = mode->crtc_hdisplay;
986 wm_high.active_time = active_time;
987 wm_high.blank_time = line_time - wm_high.active_time;
988 wm_high.interlaced = false;
989 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
990 wm_high.interlaced = true;
991 wm_high.vsc = amdgpu_crtc->vsc;
992 wm_high.vtaps = 1;
993 if (amdgpu_crtc->rmx_type != RMX_OFF)
994 wm_high.vtaps = 2;
995 wm_high.bytes_per_pixel = 4;
996 wm_high.lb_size = lb_size;
997 wm_high.dram_channels = cik_get_number_of_dram_channels(adev);
998 wm_high.num_heads = num_heads;
999
1000
1001 latency_watermark_a = min(dce_v8_0_latency_watermark(&wm_high), (u32)65535);
1002
1003
1004
1005 if (!dce_v8_0_average_bandwidth_vs_dram_bandwidth_for_display(&wm_high) ||
1006 !dce_v8_0_average_bandwidth_vs_available_bandwidth(&wm_high) ||
1007 !dce_v8_0_check_latency_hiding(&wm_high) ||
1008 (adev->mode_info.disp_priority == 2)) {
1009 DRM_DEBUG_KMS("force priority to high\n");
1010 }
1011
1012
1013 if (adev->pm.dpm_enabled) {
1014 wm_low.yclk =
1015 amdgpu_dpm_get_mclk(adev, true) * 10;
1016 wm_low.sclk =
1017 amdgpu_dpm_get_sclk(adev, true) * 10;
1018 } else {
1019 wm_low.yclk = adev->pm.current_mclk * 10;
1020 wm_low.sclk = adev->pm.current_sclk * 10;
1021 }
1022
1023 wm_low.disp_clk = mode->clock;
1024 wm_low.src_width = mode->crtc_hdisplay;
1025 wm_low.active_time = active_time;
1026 wm_low.blank_time = line_time - wm_low.active_time;
1027 wm_low.interlaced = false;
1028 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
1029 wm_low.interlaced = true;
1030 wm_low.vsc = amdgpu_crtc->vsc;
1031 wm_low.vtaps = 1;
1032 if (amdgpu_crtc->rmx_type != RMX_OFF)
1033 wm_low.vtaps = 2;
1034 wm_low.bytes_per_pixel = 4;
1035 wm_low.lb_size = lb_size;
1036 wm_low.dram_channels = cik_get_number_of_dram_channels(adev);
1037 wm_low.num_heads = num_heads;
1038
1039
1040 latency_watermark_b = min(dce_v8_0_latency_watermark(&wm_low), (u32)65535);
1041
1042
1043
1044 if (!dce_v8_0_average_bandwidth_vs_dram_bandwidth_for_display(&wm_low) ||
1045 !dce_v8_0_average_bandwidth_vs_available_bandwidth(&wm_low) ||
1046 !dce_v8_0_check_latency_hiding(&wm_low) ||
1047 (adev->mode_info.disp_priority == 2)) {
1048 DRM_DEBUG_KMS("force priority to high\n");
1049 }
1050 lb_vblank_lead_lines = DIV_ROUND_UP(lb_size, mode->crtc_hdisplay);
1051 }
1052
1053
1054 wm_mask = RREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset);
1055 tmp = wm_mask;
1056 tmp &= ~(3 << DPG_WATERMARK_MASK_CONTROL__URGENCY_WATERMARK_MASK__SHIFT);
1057 tmp |= (1 << DPG_WATERMARK_MASK_CONTROL__URGENCY_WATERMARK_MASK__SHIFT);
1058 WREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset, tmp);
1059 WREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset,
1060 ((latency_watermark_a << DPG_PIPE_URGENCY_CONTROL__URGENCY_LOW_WATERMARK__SHIFT) |
1061 (line_time << DPG_PIPE_URGENCY_CONTROL__URGENCY_HIGH_WATERMARK__SHIFT)));
1062
1063 tmp = RREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset);
1064 tmp &= ~(3 << DPG_WATERMARK_MASK_CONTROL__URGENCY_WATERMARK_MASK__SHIFT);
1065 tmp |= (2 << DPG_WATERMARK_MASK_CONTROL__URGENCY_WATERMARK_MASK__SHIFT);
1066 WREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset, tmp);
1067 WREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset,
1068 ((latency_watermark_b << DPG_PIPE_URGENCY_CONTROL__URGENCY_LOW_WATERMARK__SHIFT) |
1069 (line_time << DPG_PIPE_URGENCY_CONTROL__URGENCY_HIGH_WATERMARK__SHIFT)));
1070
1071 WREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset, wm_mask);
1072
1073
1074 amdgpu_crtc->line_time = line_time;
1075 amdgpu_crtc->wm_high = latency_watermark_a;
1076 amdgpu_crtc->wm_low = latency_watermark_b;
1077
1078 amdgpu_crtc->lb_vblank_lead_lines = lb_vblank_lead_lines;
1079}
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089static void dce_v8_0_bandwidth_update(struct amdgpu_device *adev)
1090{
1091 struct drm_display_mode *mode = NULL;
1092 u32 num_heads = 0, lb_size;
1093 int i;
1094
1095 amdgpu_display_update_priority(adev);
1096
1097 for (i = 0; i < adev->mode_info.num_crtc; i++) {
1098 if (adev->mode_info.crtcs[i]->base.enabled)
1099 num_heads++;
1100 }
1101 for (i = 0; i < adev->mode_info.num_crtc; i++) {
1102 mode = &adev->mode_info.crtcs[i]->base.mode;
1103 lb_size = dce_v8_0_line_buffer_adjust(adev, adev->mode_info.crtcs[i], mode);
1104 dce_v8_0_program_watermarks(adev, adev->mode_info.crtcs[i],
1105 lb_size, num_heads);
1106 }
1107}
1108
1109static void dce_v8_0_audio_get_connected_pins(struct amdgpu_device *adev)
1110{
1111 int i;
1112 u32 offset, tmp;
1113
1114 for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
1115 offset = adev->mode_info.audio.pin[i].offset;
1116 tmp = RREG32_AUDIO_ENDPT(offset,
1117 ixAZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT);
1118 if (((tmp &
1119 AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY_MASK) >>
1120 AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY__SHIFT) == 1)
1121 adev->mode_info.audio.pin[i].connected = false;
1122 else
1123 adev->mode_info.audio.pin[i].connected = true;
1124 }
1125}
1126
1127static struct amdgpu_audio_pin *dce_v8_0_audio_get_pin(struct amdgpu_device *adev)
1128{
1129 int i;
1130
1131 dce_v8_0_audio_get_connected_pins(adev);
1132
1133 for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
1134 if (adev->mode_info.audio.pin[i].connected)
1135 return &adev->mode_info.audio.pin[i];
1136 }
1137 DRM_ERROR("No connected audio pins found!\n");
1138 return NULL;
1139}
1140
1141static void dce_v8_0_afmt_audio_select_pin(struct drm_encoder *encoder)
1142{
1143 struct amdgpu_device *adev = encoder->dev->dev_private;
1144 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1145 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1146 u32 offset;
1147
1148 if (!dig || !dig->afmt || !dig->afmt->pin)
1149 return;
1150
1151 offset = dig->afmt->offset;
1152
1153 WREG32(mmAFMT_AUDIO_SRC_CONTROL + offset,
1154 (dig->afmt->pin->id << AFMT_AUDIO_SRC_CONTROL__AFMT_AUDIO_SRC_SELECT__SHIFT));
1155}
1156
1157static void dce_v8_0_audio_write_latency_fields(struct drm_encoder *encoder,
1158 struct drm_display_mode *mode)
1159{
1160 struct amdgpu_device *adev = encoder->dev->dev_private;
1161 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1162 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1163 struct drm_connector *connector;
1164 struct amdgpu_connector *amdgpu_connector = NULL;
1165 u32 tmp = 0, offset;
1166
1167 if (!dig || !dig->afmt || !dig->afmt->pin)
1168 return;
1169
1170 offset = dig->afmt->pin->offset;
1171
1172 list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
1173 if (connector->encoder == encoder) {
1174 amdgpu_connector = to_amdgpu_connector(connector);
1175 break;
1176 }
1177 }
1178
1179 if (!amdgpu_connector) {
1180 DRM_ERROR("Couldn't find encoder's connector\n");
1181 return;
1182 }
1183
1184 if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
1185 if (connector->latency_present[1])
1186 tmp =
1187 (connector->video_latency[1] <<
1188 AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__VIDEO_LIPSYNC__SHIFT) |
1189 (connector->audio_latency[1] <<
1190 AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__AUDIO_LIPSYNC__SHIFT);
1191 else
1192 tmp =
1193 (0 <<
1194 AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__VIDEO_LIPSYNC__SHIFT) |
1195 (0 <<
1196 AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__AUDIO_LIPSYNC__SHIFT);
1197 } else {
1198 if (connector->latency_present[0])
1199 tmp =
1200 (connector->video_latency[0] <<
1201 AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__VIDEO_LIPSYNC__SHIFT) |
1202 (connector->audio_latency[0] <<
1203 AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__AUDIO_LIPSYNC__SHIFT);
1204 else
1205 tmp =
1206 (0 <<
1207 AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__VIDEO_LIPSYNC__SHIFT) |
1208 (0 <<
1209 AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__AUDIO_LIPSYNC__SHIFT);
1210
1211 }
1212 WREG32_AUDIO_ENDPT(offset, ixAZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC, tmp);
1213}
1214
1215static void dce_v8_0_audio_write_speaker_allocation(struct drm_encoder *encoder)
1216{
1217 struct amdgpu_device *adev = encoder->dev->dev_private;
1218 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1219 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1220 struct drm_connector *connector;
1221 struct amdgpu_connector *amdgpu_connector = NULL;
1222 u32 offset, tmp;
1223 u8 *sadb = NULL;
1224 int sad_count;
1225
1226 if (!dig || !dig->afmt || !dig->afmt->pin)
1227 return;
1228
1229 offset = dig->afmt->pin->offset;
1230
1231 list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
1232 if (connector->encoder == encoder) {
1233 amdgpu_connector = to_amdgpu_connector(connector);
1234 break;
1235 }
1236 }
1237
1238 if (!amdgpu_connector) {
1239 DRM_ERROR("Couldn't find encoder's connector\n");
1240 return;
1241 }
1242
1243 sad_count = drm_edid_to_speaker_allocation(amdgpu_connector_edid(connector), &sadb);
1244 if (sad_count < 0) {
1245 DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sad_count);
1246 sad_count = 0;
1247 }
1248
1249
1250 tmp = RREG32_AUDIO_ENDPT(offset, ixAZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER);
1251 tmp &= ~(AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__DP_CONNECTION_MASK |
1252 AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__SPEAKER_ALLOCATION_MASK);
1253
1254 tmp |= AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__HDMI_CONNECTION_MASK;
1255 if (sad_count)
1256 tmp |= (sadb[0] << AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__SPEAKER_ALLOCATION__SHIFT);
1257 else
1258 tmp |= (5 << AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__SPEAKER_ALLOCATION__SHIFT);
1259 WREG32_AUDIO_ENDPT(offset, ixAZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, tmp);
1260
1261 kfree(sadb);
1262}
1263
1264static void dce_v8_0_audio_write_sad_regs(struct drm_encoder *encoder)
1265{
1266 struct amdgpu_device *adev = encoder->dev->dev_private;
1267 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1268 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1269 u32 offset;
1270 struct drm_connector *connector;
1271 struct amdgpu_connector *amdgpu_connector = NULL;
1272 struct cea_sad *sads;
1273 int i, sad_count;
1274
1275 static const u16 eld_reg_to_type[][2] = {
1276 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0, HDMI_AUDIO_CODING_TYPE_PCM },
1277 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1, HDMI_AUDIO_CODING_TYPE_AC3 },
1278 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2, HDMI_AUDIO_CODING_TYPE_MPEG1 },
1279 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3, HDMI_AUDIO_CODING_TYPE_MP3 },
1280 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4, HDMI_AUDIO_CODING_TYPE_MPEG2 },
1281 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5, HDMI_AUDIO_CODING_TYPE_AAC_LC },
1282 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6, HDMI_AUDIO_CODING_TYPE_DTS },
1283 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7, HDMI_AUDIO_CODING_TYPE_ATRAC },
1284 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9, HDMI_AUDIO_CODING_TYPE_EAC3 },
1285 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10, HDMI_AUDIO_CODING_TYPE_DTS_HD },
1286 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11, HDMI_AUDIO_CODING_TYPE_MLP },
1287 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13, HDMI_AUDIO_CODING_TYPE_WMA_PRO },
1288 };
1289
1290 if (!dig || !dig->afmt || !dig->afmt->pin)
1291 return;
1292
1293 offset = dig->afmt->pin->offset;
1294
1295 list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
1296 if (connector->encoder == encoder) {
1297 amdgpu_connector = to_amdgpu_connector(connector);
1298 break;
1299 }
1300 }
1301
1302 if (!amdgpu_connector) {
1303 DRM_ERROR("Couldn't find encoder's connector\n");
1304 return;
1305 }
1306
1307 sad_count = drm_edid_to_sad(amdgpu_connector_edid(connector), &sads);
1308 if (sad_count <= 0) {
1309 DRM_ERROR("Couldn't read SADs: %d\n", sad_count);
1310 return;
1311 }
1312 BUG_ON(!sads);
1313
1314 for (i = 0; i < ARRAY_SIZE(eld_reg_to_type); i++) {
1315 u32 value = 0;
1316 u8 stereo_freqs = 0;
1317 int max_channels = -1;
1318 int j;
1319
1320 for (j = 0; j < sad_count; j++) {
1321 struct cea_sad *sad = &sads[j];
1322
1323 if (sad->format == eld_reg_to_type[i][1]) {
1324 if (sad->channels > max_channels) {
1325 value = (sad->channels <<
1326 AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__MAX_CHANNELS__SHIFT) |
1327 (sad->byte2 <<
1328 AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__DESCRIPTOR_BYTE_2__SHIFT) |
1329 (sad->freq <<
1330 AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES__SHIFT);
1331 max_channels = sad->channels;
1332 }
1333
1334 if (sad->format == HDMI_AUDIO_CODING_TYPE_PCM)
1335 stereo_freqs |= sad->freq;
1336 else
1337 break;
1338 }
1339 }
1340
1341 value |= (stereo_freqs <<
1342 AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES_STEREO__SHIFT);
1343
1344 WREG32_AUDIO_ENDPT(offset, eld_reg_to_type[i][0], value);
1345 }
1346
1347 kfree(sads);
1348}
1349
1350static void dce_v8_0_audio_enable(struct amdgpu_device *adev,
1351 struct amdgpu_audio_pin *pin,
1352 bool enable)
1353{
1354 if (!pin)
1355 return;
1356
1357 WREG32_AUDIO_ENDPT(pin->offset, ixAZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL,
1358 enable ? AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__AUDIO_ENABLED_MASK : 0);
1359}
1360
1361static const u32 pin_offsets[7] =
1362{
1363 (0x1780 - 0x1780),
1364 (0x1786 - 0x1780),
1365 (0x178c - 0x1780),
1366 (0x1792 - 0x1780),
1367 (0x1798 - 0x1780),
1368 (0x179d - 0x1780),
1369 (0x17a4 - 0x1780),
1370};
1371
1372static int dce_v8_0_audio_init(struct amdgpu_device *adev)
1373{
1374 int i;
1375
1376 if (!amdgpu_audio)
1377 return 0;
1378
1379 adev->mode_info.audio.enabled = true;
1380
1381 if (adev->asic_type == CHIP_KAVERI)
1382 adev->mode_info.audio.num_pins = 7;
1383 else if ((adev->asic_type == CHIP_KABINI) ||
1384 (adev->asic_type == CHIP_MULLINS))
1385 adev->mode_info.audio.num_pins = 3;
1386 else if ((adev->asic_type == CHIP_BONAIRE) ||
1387 (adev->asic_type == CHIP_HAWAII))
1388 adev->mode_info.audio.num_pins = 7;
1389 else
1390 adev->mode_info.audio.num_pins = 3;
1391
1392 for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
1393 adev->mode_info.audio.pin[i].channels = -1;
1394 adev->mode_info.audio.pin[i].rate = -1;
1395 adev->mode_info.audio.pin[i].bits_per_sample = -1;
1396 adev->mode_info.audio.pin[i].status_bits = 0;
1397 adev->mode_info.audio.pin[i].category_code = 0;
1398 adev->mode_info.audio.pin[i].connected = false;
1399 adev->mode_info.audio.pin[i].offset = pin_offsets[i];
1400 adev->mode_info.audio.pin[i].id = i;
1401
1402
1403 dce_v8_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
1404 }
1405
1406 return 0;
1407}
1408
1409static void dce_v8_0_audio_fini(struct amdgpu_device *adev)
1410{
1411 int i;
1412
1413 if (!amdgpu_audio)
1414 return;
1415
1416 if (!adev->mode_info.audio.enabled)
1417 return;
1418
1419 for (i = 0; i < adev->mode_info.audio.num_pins; i++)
1420 dce_v8_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
1421
1422 adev->mode_info.audio.enabled = false;
1423}
1424
1425
1426
1427
1428static void dce_v8_0_afmt_update_ACR(struct drm_encoder *encoder, uint32_t clock)
1429{
1430 struct drm_device *dev = encoder->dev;
1431 struct amdgpu_device *adev = dev->dev_private;
1432 struct amdgpu_afmt_acr acr = amdgpu_afmt_acr(clock);
1433 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1434 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1435 uint32_t offset = dig->afmt->offset;
1436
1437 WREG32(mmHDMI_ACR_32_0 + offset, (acr.cts_32khz << HDMI_ACR_32_0__HDMI_ACR_CTS_32__SHIFT));
1438 WREG32(mmHDMI_ACR_32_1 + offset, acr.n_32khz);
1439
1440 WREG32(mmHDMI_ACR_44_0 + offset, (acr.cts_44_1khz << HDMI_ACR_44_0__HDMI_ACR_CTS_44__SHIFT));
1441 WREG32(mmHDMI_ACR_44_1 + offset, acr.n_44_1khz);
1442
1443 WREG32(mmHDMI_ACR_48_0 + offset, (acr.cts_48khz << HDMI_ACR_48_0__HDMI_ACR_CTS_48__SHIFT));
1444 WREG32(mmHDMI_ACR_48_1 + offset, acr.n_48khz);
1445}
1446
1447
1448
1449
1450static void dce_v8_0_afmt_update_avi_infoframe(struct drm_encoder *encoder,
1451 void *buffer, size_t size)
1452{
1453 struct drm_device *dev = encoder->dev;
1454 struct amdgpu_device *adev = dev->dev_private;
1455 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1456 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1457 uint32_t offset = dig->afmt->offset;
1458 uint8_t *frame = buffer + 3;
1459 uint8_t *header = buffer;
1460
1461 WREG32(mmAFMT_AVI_INFO0 + offset,
1462 frame[0x0] | (frame[0x1] << 8) | (frame[0x2] << 16) | (frame[0x3] << 24));
1463 WREG32(mmAFMT_AVI_INFO1 + offset,
1464 frame[0x4] | (frame[0x5] << 8) | (frame[0x6] << 16) | (frame[0x7] << 24));
1465 WREG32(mmAFMT_AVI_INFO2 + offset,
1466 frame[0x8] | (frame[0x9] << 8) | (frame[0xA] << 16) | (frame[0xB] << 24));
1467 WREG32(mmAFMT_AVI_INFO3 + offset,
1468 frame[0xC] | (frame[0xD] << 8) | (header[1] << 24));
1469}
1470
1471static void dce_v8_0_audio_set_dto(struct drm_encoder *encoder, u32 clock)
1472{
1473 struct drm_device *dev = encoder->dev;
1474 struct amdgpu_device *adev = dev->dev_private;
1475 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1476 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1477 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc);
1478 u32 dto_phase = 24 * 1000;
1479 u32 dto_modulo = clock;
1480
1481 if (!dig || !dig->afmt)
1482 return;
1483
1484
1485
1486
1487
1488
1489 WREG32(mmDCCG_AUDIO_DTO_SOURCE, (amdgpu_crtc->crtc_id << DCCG_AUDIO_DTO_SOURCE__DCCG_AUDIO_DTO0_SOURCE_SEL__SHIFT));
1490 WREG32(mmDCCG_AUDIO_DTO0_PHASE, dto_phase);
1491 WREG32(mmDCCG_AUDIO_DTO0_MODULE, dto_modulo);
1492}
1493
1494
1495
1496
1497static void dce_v8_0_afmt_setmode(struct drm_encoder *encoder,
1498 struct drm_display_mode *mode)
1499{
1500 struct drm_device *dev = encoder->dev;
1501 struct amdgpu_device *adev = dev->dev_private;
1502 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1503 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1504 struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
1505 u8 buffer[HDMI_INFOFRAME_HEADER_SIZE + HDMI_AVI_INFOFRAME_SIZE];
1506 struct hdmi_avi_infoframe frame;
1507 uint32_t offset, val;
1508 ssize_t err;
1509 int bpc = 8;
1510
1511 if (!dig || !dig->afmt)
1512 return;
1513
1514
1515 if (!dig->afmt->enabled)
1516 return;
1517
1518 offset = dig->afmt->offset;
1519
1520
1521 if (encoder->crtc) {
1522 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc);
1523 bpc = amdgpu_crtc->bpc;
1524 }
1525
1526
1527 dig->afmt->pin = dce_v8_0_audio_get_pin(adev);
1528 dce_v8_0_audio_enable(adev, dig->afmt->pin, false);
1529
1530 dce_v8_0_audio_set_dto(encoder, mode->clock);
1531
1532 WREG32(mmHDMI_VBI_PACKET_CONTROL + offset,
1533 HDMI_VBI_PACKET_CONTROL__HDMI_NULL_SEND_MASK);
1534
1535 WREG32(mmAFMT_AUDIO_CRC_CONTROL + offset, 0x1000);
1536
1537 val = RREG32(mmHDMI_CONTROL + offset);
1538 val &= ~HDMI_CONTROL__HDMI_DEEP_COLOR_ENABLE_MASK;
1539 val &= ~HDMI_CONTROL__HDMI_DEEP_COLOR_DEPTH_MASK;
1540
1541 switch (bpc) {
1542 case 0:
1543 case 6:
1544 case 8:
1545 case 16:
1546 default:
1547 DRM_DEBUG("%s: Disabling hdmi deep color for %d bpc.\n",
1548 connector->name, bpc);
1549 break;
1550 case 10:
1551 val |= HDMI_CONTROL__HDMI_DEEP_COLOR_ENABLE_MASK;
1552 val |= 1 << HDMI_CONTROL__HDMI_DEEP_COLOR_DEPTH__SHIFT;
1553 DRM_DEBUG("%s: Enabling hdmi deep color 30 for 10 bpc.\n",
1554 connector->name);
1555 break;
1556 case 12:
1557 val |= HDMI_CONTROL__HDMI_DEEP_COLOR_ENABLE_MASK;
1558 val |= 2 << HDMI_CONTROL__HDMI_DEEP_COLOR_DEPTH__SHIFT;
1559 DRM_DEBUG("%s: Enabling hdmi deep color 36 for 12 bpc.\n",
1560 connector->name);
1561 break;
1562 }
1563
1564 WREG32(mmHDMI_CONTROL + offset, val);
1565
1566 WREG32(mmHDMI_VBI_PACKET_CONTROL + offset,
1567 HDMI_VBI_PACKET_CONTROL__HDMI_NULL_SEND_MASK |
1568 HDMI_VBI_PACKET_CONTROL__HDMI_GC_SEND_MASK |
1569 HDMI_VBI_PACKET_CONTROL__HDMI_GC_CONT_MASK);
1570
1571 WREG32(mmHDMI_INFOFRAME_CONTROL0 + offset,
1572 HDMI_INFOFRAME_CONTROL0__HDMI_AUDIO_INFO_SEND_MASK |
1573 HDMI_INFOFRAME_CONTROL0__HDMI_AUDIO_INFO_CONT_MASK);
1574
1575 WREG32(mmAFMT_INFOFRAME_CONTROL0 + offset,
1576 AFMT_INFOFRAME_CONTROL0__AFMT_AUDIO_INFO_UPDATE_MASK);
1577
1578 WREG32(mmHDMI_INFOFRAME_CONTROL1 + offset,
1579 (2 << HDMI_INFOFRAME_CONTROL1__HDMI_AUDIO_INFO_LINE__SHIFT));
1580
1581 WREG32(mmHDMI_GC + offset, 0);
1582
1583 WREG32(mmHDMI_AUDIO_PACKET_CONTROL + offset,
1584 (1 << HDMI_AUDIO_PACKET_CONTROL__HDMI_AUDIO_DELAY_EN__SHIFT) |
1585 (3 << HDMI_AUDIO_PACKET_CONTROL__HDMI_AUDIO_PACKETS_PER_LINE__SHIFT));
1586
1587 WREG32(mmAFMT_AUDIO_PACKET_CONTROL + offset,
1588 AFMT_AUDIO_PACKET_CONTROL__AFMT_60958_CS_UPDATE_MASK);
1589
1590
1591
1592 if (bpc > 8)
1593 WREG32(mmHDMI_ACR_PACKET_CONTROL + offset,
1594 HDMI_ACR_PACKET_CONTROL__HDMI_ACR_AUTO_SEND_MASK);
1595 else
1596 WREG32(mmHDMI_ACR_PACKET_CONTROL + offset,
1597 HDMI_ACR_PACKET_CONTROL__HDMI_ACR_SOURCE_MASK |
1598 HDMI_ACR_PACKET_CONTROL__HDMI_ACR_AUTO_SEND_MASK);
1599
1600 dce_v8_0_afmt_update_ACR(encoder, mode->clock);
1601
1602 WREG32(mmAFMT_60958_0 + offset,
1603 (1 << AFMT_60958_0__AFMT_60958_CS_CHANNEL_NUMBER_L__SHIFT));
1604
1605 WREG32(mmAFMT_60958_1 + offset,
1606 (2 << AFMT_60958_1__AFMT_60958_CS_CHANNEL_NUMBER_R__SHIFT));
1607
1608 WREG32(mmAFMT_60958_2 + offset,
1609 (3 << AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_2__SHIFT) |
1610 (4 << AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_3__SHIFT) |
1611 (5 << AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_4__SHIFT) |
1612 (6 << AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_5__SHIFT) |
1613 (7 << AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_6__SHIFT) |
1614 (8 << AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_7__SHIFT));
1615
1616 dce_v8_0_audio_write_speaker_allocation(encoder);
1617
1618
1619 WREG32(mmAFMT_AUDIO_PACKET_CONTROL2 + offset,
1620 (0xff << AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_CHANNEL_ENABLE__SHIFT));
1621
1622 dce_v8_0_afmt_audio_select_pin(encoder);
1623 dce_v8_0_audio_write_sad_regs(encoder);
1624 dce_v8_0_audio_write_latency_fields(encoder, mode);
1625
1626 err = drm_hdmi_avi_infoframe_from_display_mode(&frame, connector, mode);
1627 if (err < 0) {
1628 DRM_ERROR("failed to setup AVI infoframe: %zd\n", err);
1629 return;
1630 }
1631
1632 err = hdmi_avi_infoframe_pack(&frame, buffer, sizeof(buffer));
1633 if (err < 0) {
1634 DRM_ERROR("failed to pack AVI infoframe: %zd\n", err);
1635 return;
1636 }
1637
1638 dce_v8_0_afmt_update_avi_infoframe(encoder, buffer, sizeof(buffer));
1639
1640 WREG32_OR(mmHDMI_INFOFRAME_CONTROL0 + offset,
1641 HDMI_INFOFRAME_CONTROL0__HDMI_AVI_INFO_SEND_MASK |
1642 HDMI_INFOFRAME_CONTROL0__HDMI_AVI_INFO_CONT_MASK);
1643
1644 WREG32_P(mmHDMI_INFOFRAME_CONTROL1 + offset,
1645 (2 << HDMI_INFOFRAME_CONTROL1__HDMI_AVI_INFO_LINE__SHIFT),
1646 ~HDMI_INFOFRAME_CONTROL1__HDMI_AVI_INFO_LINE_MASK);
1647
1648 WREG32_OR(mmAFMT_AUDIO_PACKET_CONTROL + offset,
1649 AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_SAMPLE_SEND_MASK);
1650
1651 WREG32(mmAFMT_RAMP_CONTROL0 + offset, 0x00FFFFFF);
1652 WREG32(mmAFMT_RAMP_CONTROL1 + offset, 0x007FFFFF);
1653 WREG32(mmAFMT_RAMP_CONTROL2 + offset, 0x00000001);
1654 WREG32(mmAFMT_RAMP_CONTROL3 + offset, 0x00000001);
1655
1656
1657 dce_v8_0_audio_enable(adev, dig->afmt->pin, true);
1658}
1659
1660static void dce_v8_0_afmt_enable(struct drm_encoder *encoder, bool enable)
1661{
1662 struct drm_device *dev = encoder->dev;
1663 struct amdgpu_device *adev = dev->dev_private;
1664 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1665 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1666
1667 if (!dig || !dig->afmt)
1668 return;
1669
1670
1671 if (enable && dig->afmt->enabled)
1672 return;
1673 if (!enable && !dig->afmt->enabled)
1674 return;
1675
1676 if (!enable && dig->afmt->pin) {
1677 dce_v8_0_audio_enable(adev, dig->afmt->pin, false);
1678 dig->afmt->pin = NULL;
1679 }
1680
1681 dig->afmt->enabled = enable;
1682
1683 DRM_DEBUG("%sabling AFMT interface @ 0x%04X for encoder 0x%x\n",
1684 enable ? "En" : "Dis", dig->afmt->offset, amdgpu_encoder->encoder_id);
1685}
1686
1687static int dce_v8_0_afmt_init(struct amdgpu_device *adev)
1688{
1689 int i;
1690
1691 for (i = 0; i < adev->mode_info.num_dig; i++)
1692 adev->mode_info.afmt[i] = NULL;
1693
1694
1695 for (i = 0; i < adev->mode_info.num_dig; i++) {
1696 adev->mode_info.afmt[i] = kzalloc(sizeof(struct amdgpu_afmt), GFP_KERNEL);
1697 if (adev->mode_info.afmt[i]) {
1698 adev->mode_info.afmt[i]->offset = dig_offsets[i];
1699 adev->mode_info.afmt[i]->id = i;
1700 } else {
1701 int j;
1702 for (j = 0; j < i; j++) {
1703 kfree(adev->mode_info.afmt[j]);
1704 adev->mode_info.afmt[j] = NULL;
1705 }
1706 return -ENOMEM;
1707 }
1708 }
1709 return 0;
1710}
1711
1712static void dce_v8_0_afmt_fini(struct amdgpu_device *adev)
1713{
1714 int i;
1715
1716 for (i = 0; i < adev->mode_info.num_dig; i++) {
1717 kfree(adev->mode_info.afmt[i]);
1718 adev->mode_info.afmt[i] = NULL;
1719 }
1720}
1721
1722static const u32 vga_control_regs[6] =
1723{
1724 mmD1VGA_CONTROL,
1725 mmD2VGA_CONTROL,
1726 mmD3VGA_CONTROL,
1727 mmD4VGA_CONTROL,
1728 mmD5VGA_CONTROL,
1729 mmD6VGA_CONTROL,
1730};
1731
1732static void dce_v8_0_vga_enable(struct drm_crtc *crtc, bool enable)
1733{
1734 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1735 struct drm_device *dev = crtc->dev;
1736 struct amdgpu_device *adev = dev->dev_private;
1737 u32 vga_control;
1738
1739 vga_control = RREG32(vga_control_regs[amdgpu_crtc->crtc_id]) & ~1;
1740 if (enable)
1741 WREG32(vga_control_regs[amdgpu_crtc->crtc_id], vga_control | 1);
1742 else
1743 WREG32(vga_control_regs[amdgpu_crtc->crtc_id], vga_control);
1744}
1745
1746static void dce_v8_0_grph_enable(struct drm_crtc *crtc, bool enable)
1747{
1748 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1749 struct drm_device *dev = crtc->dev;
1750 struct amdgpu_device *adev = dev->dev_private;
1751
1752 if (enable)
1753 WREG32(mmGRPH_ENABLE + amdgpu_crtc->crtc_offset, 1);
1754 else
1755 WREG32(mmGRPH_ENABLE + amdgpu_crtc->crtc_offset, 0);
1756}
1757
1758static int dce_v8_0_crtc_do_set_base(struct drm_crtc *crtc,
1759 struct drm_framebuffer *fb,
1760 int x, int y, int atomic)
1761{
1762 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1763 struct drm_device *dev = crtc->dev;
1764 struct amdgpu_device *adev = dev->dev_private;
1765 struct drm_framebuffer *target_fb;
1766 struct drm_gem_object *obj;
1767 struct amdgpu_bo *abo;
1768 uint64_t fb_location, tiling_flags;
1769 uint32_t fb_format, fb_pitch_pixels;
1770 u32 fb_swap = (GRPH_ENDIAN_NONE << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
1771 u32 pipe_config;
1772 u32 viewport_w, viewport_h;
1773 int r;
1774 bool bypass_lut = false;
1775 struct drm_format_name_buf format_name;
1776
1777
1778 if (!atomic && !crtc->primary->fb) {
1779 DRM_DEBUG_KMS("No FB bound\n");
1780 return 0;
1781 }
1782
1783 if (atomic)
1784 target_fb = fb;
1785 else
1786 target_fb = crtc->primary->fb;
1787
1788
1789
1790
1791 obj = target_fb->obj[0];
1792 abo = gem_to_amdgpu_bo(obj);
1793 r = amdgpu_bo_reserve(abo, false);
1794 if (unlikely(r != 0))
1795 return r;
1796
1797 if (!atomic) {
1798 r = amdgpu_bo_pin(abo, AMDGPU_GEM_DOMAIN_VRAM);
1799 if (unlikely(r != 0)) {
1800 amdgpu_bo_unreserve(abo);
1801 return -EINVAL;
1802 }
1803 }
1804 fb_location = amdgpu_bo_gpu_offset(abo);
1805
1806 amdgpu_bo_get_tiling_flags(abo, &tiling_flags);
1807 amdgpu_bo_unreserve(abo);
1808
1809 pipe_config = AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
1810
1811 switch (target_fb->format->format) {
1812 case DRM_FORMAT_C8:
1813 fb_format = ((GRPH_DEPTH_8BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
1814 (GRPH_FORMAT_INDEXED << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
1815 break;
1816 case DRM_FORMAT_XRGB4444:
1817 case DRM_FORMAT_ARGB4444:
1818 fb_format = ((GRPH_DEPTH_16BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
1819 (GRPH_FORMAT_ARGB4444 << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
1820#ifdef __BIG_ENDIAN
1821 fb_swap = (GRPH_ENDIAN_8IN16 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
1822#endif
1823 break;
1824 case DRM_FORMAT_XRGB1555:
1825 case DRM_FORMAT_ARGB1555:
1826 fb_format = ((GRPH_DEPTH_16BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
1827 (GRPH_FORMAT_ARGB1555 << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
1828#ifdef __BIG_ENDIAN
1829 fb_swap = (GRPH_ENDIAN_8IN16 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
1830#endif
1831 break;
1832 case DRM_FORMAT_BGRX5551:
1833 case DRM_FORMAT_BGRA5551:
1834 fb_format = ((GRPH_DEPTH_16BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
1835 (GRPH_FORMAT_BGRA5551 << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
1836#ifdef __BIG_ENDIAN
1837 fb_swap = (GRPH_ENDIAN_8IN16 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
1838#endif
1839 break;
1840 case DRM_FORMAT_RGB565:
1841 fb_format = ((GRPH_DEPTH_16BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
1842 (GRPH_FORMAT_ARGB565 << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
1843#ifdef __BIG_ENDIAN
1844 fb_swap = (GRPH_ENDIAN_8IN16 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
1845#endif
1846 break;
1847 case DRM_FORMAT_XRGB8888:
1848 case DRM_FORMAT_ARGB8888:
1849 fb_format = ((GRPH_DEPTH_32BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
1850 (GRPH_FORMAT_ARGB8888 << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
1851#ifdef __BIG_ENDIAN
1852 fb_swap = (GRPH_ENDIAN_8IN32 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
1853#endif
1854 break;
1855 case DRM_FORMAT_XRGB2101010:
1856 case DRM_FORMAT_ARGB2101010:
1857 fb_format = ((GRPH_DEPTH_32BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
1858 (GRPH_FORMAT_ARGB2101010 << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
1859#ifdef __BIG_ENDIAN
1860 fb_swap = (GRPH_ENDIAN_8IN32 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
1861#endif
1862
1863 bypass_lut = true;
1864 break;
1865 case DRM_FORMAT_BGRX1010102:
1866 case DRM_FORMAT_BGRA1010102:
1867 fb_format = ((GRPH_DEPTH_32BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
1868 (GRPH_FORMAT_BGRA1010102 << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
1869#ifdef __BIG_ENDIAN
1870 fb_swap = (GRPH_ENDIAN_8IN32 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
1871#endif
1872
1873 bypass_lut = true;
1874 break;
1875 case DRM_FORMAT_XBGR8888:
1876 case DRM_FORMAT_ABGR8888:
1877 fb_format = ((GRPH_DEPTH_32BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
1878 (GRPH_FORMAT_ARGB8888 << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
1879 fb_swap = ((GRPH_RED_SEL_B << GRPH_SWAP_CNTL__GRPH_RED_CROSSBAR__SHIFT) |
1880 (GRPH_BLUE_SEL_R << GRPH_SWAP_CNTL__GRPH_BLUE_CROSSBAR__SHIFT));
1881#ifdef __BIG_ENDIAN
1882 fb_swap |= (GRPH_ENDIAN_8IN32 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
1883#endif
1884 break;
1885 default:
1886 DRM_ERROR("Unsupported screen format %s\n",
1887 drm_get_format_name(target_fb->format->format, &format_name));
1888 return -EINVAL;
1889 }
1890
1891 if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == ARRAY_2D_TILED_THIN1) {
1892 unsigned bankw, bankh, mtaspect, tile_split, num_banks;
1893
1894 bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
1895 bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
1896 mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
1897 tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
1898 num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
1899
1900 fb_format |= (num_banks << GRPH_CONTROL__GRPH_NUM_BANKS__SHIFT);
1901 fb_format |= (GRPH_ARRAY_2D_TILED_THIN1 << GRPH_CONTROL__GRPH_ARRAY_MODE__SHIFT);
1902 fb_format |= (tile_split << GRPH_CONTROL__GRPH_TILE_SPLIT__SHIFT);
1903 fb_format |= (bankw << GRPH_CONTROL__GRPH_BANK_WIDTH__SHIFT);
1904 fb_format |= (bankh << GRPH_CONTROL__GRPH_BANK_HEIGHT__SHIFT);
1905 fb_format |= (mtaspect << GRPH_CONTROL__GRPH_MACRO_TILE_ASPECT__SHIFT);
1906 fb_format |= (DISPLAY_MICRO_TILING << GRPH_CONTROL__GRPH_MICRO_TILE_MODE__SHIFT);
1907 } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == ARRAY_1D_TILED_THIN1) {
1908 fb_format |= (GRPH_ARRAY_1D_TILED_THIN1 << GRPH_CONTROL__GRPH_ARRAY_MODE__SHIFT);
1909 }
1910
1911 fb_format |= (pipe_config << GRPH_CONTROL__GRPH_PIPE_CONFIG__SHIFT);
1912
1913 dce_v8_0_vga_enable(crtc, false);
1914
1915
1916
1917
1918 WREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset, 0);
1919
1920 WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
1921 upper_32_bits(fb_location));
1922 WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
1923 upper_32_bits(fb_location));
1924 WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
1925 (u32)fb_location & GRPH_PRIMARY_SURFACE_ADDRESS__GRPH_PRIMARY_SURFACE_ADDRESS_MASK);
1926 WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
1927 (u32) fb_location & GRPH_SECONDARY_SURFACE_ADDRESS__GRPH_SECONDARY_SURFACE_ADDRESS_MASK);
1928 WREG32(mmGRPH_CONTROL + amdgpu_crtc->crtc_offset, fb_format);
1929 WREG32(mmGRPH_SWAP_CNTL + amdgpu_crtc->crtc_offset, fb_swap);
1930
1931
1932
1933
1934
1935
1936 WREG32_P(mmGRPH_LUT_10BIT_BYPASS_CONTROL + amdgpu_crtc->crtc_offset,
1937 (bypass_lut ? LUT_10BIT_BYPASS_EN : 0),
1938 ~LUT_10BIT_BYPASS_EN);
1939
1940 if (bypass_lut)
1941 DRM_DEBUG_KMS("Bypassing hardware LUT due to 10 bit fb scanout.\n");
1942
1943 WREG32(mmGRPH_SURFACE_OFFSET_X + amdgpu_crtc->crtc_offset, 0);
1944 WREG32(mmGRPH_SURFACE_OFFSET_Y + amdgpu_crtc->crtc_offset, 0);
1945 WREG32(mmGRPH_X_START + amdgpu_crtc->crtc_offset, 0);
1946 WREG32(mmGRPH_Y_START + amdgpu_crtc->crtc_offset, 0);
1947 WREG32(mmGRPH_X_END + amdgpu_crtc->crtc_offset, target_fb->width);
1948 WREG32(mmGRPH_Y_END + amdgpu_crtc->crtc_offset, target_fb->height);
1949
1950 fb_pitch_pixels = target_fb->pitches[0] / target_fb->format->cpp[0];
1951 WREG32(mmGRPH_PITCH + amdgpu_crtc->crtc_offset, fb_pitch_pixels);
1952
1953 dce_v8_0_grph_enable(crtc, true);
1954
1955 WREG32(mmLB_DESKTOP_HEIGHT + amdgpu_crtc->crtc_offset,
1956 target_fb->height);
1957
1958 x &= ~3;
1959 y &= ~1;
1960 WREG32(mmVIEWPORT_START + amdgpu_crtc->crtc_offset,
1961 (x << 16) | y);
1962 viewport_w = crtc->mode.hdisplay;
1963 viewport_h = (crtc->mode.vdisplay + 1) & ~1;
1964 WREG32(mmVIEWPORT_SIZE + amdgpu_crtc->crtc_offset,
1965 (viewport_w << 16) | viewport_h);
1966
1967
1968 WREG32(mmMASTER_UPDATE_MODE + amdgpu_crtc->crtc_offset, 0);
1969
1970 if (!atomic && fb && fb != crtc->primary->fb) {
1971 abo = gem_to_amdgpu_bo(fb->obj[0]);
1972 r = amdgpu_bo_reserve(abo, true);
1973 if (unlikely(r != 0))
1974 return r;
1975 amdgpu_bo_unpin(abo);
1976 amdgpu_bo_unreserve(abo);
1977 }
1978
1979
1980 dce_v8_0_bandwidth_update(adev);
1981
1982 return 0;
1983}
1984
1985static void dce_v8_0_set_interleave(struct drm_crtc *crtc,
1986 struct drm_display_mode *mode)
1987{
1988 struct drm_device *dev = crtc->dev;
1989 struct amdgpu_device *adev = dev->dev_private;
1990 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1991
1992 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
1993 WREG32(mmLB_DATA_FORMAT + amdgpu_crtc->crtc_offset,
1994 LB_DATA_FORMAT__INTERLEAVE_EN__SHIFT);
1995 else
1996 WREG32(mmLB_DATA_FORMAT + amdgpu_crtc->crtc_offset, 0);
1997}
1998
1999static void dce_v8_0_crtc_load_lut(struct drm_crtc *crtc)
2000{
2001 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2002 struct drm_device *dev = crtc->dev;
2003 struct amdgpu_device *adev = dev->dev_private;
2004 u16 *r, *g, *b;
2005 int i;
2006
2007 DRM_DEBUG_KMS("%d\n", amdgpu_crtc->crtc_id);
2008
2009 WREG32(mmINPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset,
2010 ((INPUT_CSC_BYPASS << INPUT_CSC_CONTROL__INPUT_CSC_GRPH_MODE__SHIFT) |
2011 (INPUT_CSC_BYPASS << INPUT_CSC_CONTROL__INPUT_CSC_OVL_MODE__SHIFT)));
2012 WREG32(mmPRESCALE_GRPH_CONTROL + amdgpu_crtc->crtc_offset,
2013 PRESCALE_GRPH_CONTROL__GRPH_PRESCALE_BYPASS_MASK);
2014 WREG32(mmPRESCALE_OVL_CONTROL + amdgpu_crtc->crtc_offset,
2015 PRESCALE_OVL_CONTROL__OVL_PRESCALE_BYPASS_MASK);
2016 WREG32(mmINPUT_GAMMA_CONTROL + amdgpu_crtc->crtc_offset,
2017 ((INPUT_GAMMA_USE_LUT << INPUT_GAMMA_CONTROL__GRPH_INPUT_GAMMA_MODE__SHIFT) |
2018 (INPUT_GAMMA_USE_LUT << INPUT_GAMMA_CONTROL__OVL_INPUT_GAMMA_MODE__SHIFT)));
2019
2020 WREG32(mmDC_LUT_CONTROL + amdgpu_crtc->crtc_offset, 0);
2021
2022 WREG32(mmDC_LUT_BLACK_OFFSET_BLUE + amdgpu_crtc->crtc_offset, 0);
2023 WREG32(mmDC_LUT_BLACK_OFFSET_GREEN + amdgpu_crtc->crtc_offset, 0);
2024 WREG32(mmDC_LUT_BLACK_OFFSET_RED + amdgpu_crtc->crtc_offset, 0);
2025
2026 WREG32(mmDC_LUT_WHITE_OFFSET_BLUE + amdgpu_crtc->crtc_offset, 0xffff);
2027 WREG32(mmDC_LUT_WHITE_OFFSET_GREEN + amdgpu_crtc->crtc_offset, 0xffff);
2028 WREG32(mmDC_LUT_WHITE_OFFSET_RED + amdgpu_crtc->crtc_offset, 0xffff);
2029
2030 WREG32(mmDC_LUT_RW_MODE + amdgpu_crtc->crtc_offset, 0);
2031 WREG32(mmDC_LUT_WRITE_EN_MASK + amdgpu_crtc->crtc_offset, 0x00000007);
2032
2033 WREG32(mmDC_LUT_RW_INDEX + amdgpu_crtc->crtc_offset, 0);
2034 r = crtc->gamma_store;
2035 g = r + crtc->gamma_size;
2036 b = g + crtc->gamma_size;
2037 for (i = 0; i < 256; i++) {
2038 WREG32(mmDC_LUT_30_COLOR + amdgpu_crtc->crtc_offset,
2039 ((*r++ & 0xffc0) << 14) |
2040 ((*g++ & 0xffc0) << 4) |
2041 (*b++ >> 6));
2042 }
2043
2044 WREG32(mmDEGAMMA_CONTROL + amdgpu_crtc->crtc_offset,
2045 ((DEGAMMA_BYPASS << DEGAMMA_CONTROL__GRPH_DEGAMMA_MODE__SHIFT) |
2046 (DEGAMMA_BYPASS << DEGAMMA_CONTROL__OVL_DEGAMMA_MODE__SHIFT) |
2047 (DEGAMMA_BYPASS << DEGAMMA_CONTROL__CURSOR_DEGAMMA_MODE__SHIFT)));
2048 WREG32(mmGAMUT_REMAP_CONTROL + amdgpu_crtc->crtc_offset,
2049 ((GAMUT_REMAP_BYPASS << GAMUT_REMAP_CONTROL__GRPH_GAMUT_REMAP_MODE__SHIFT) |
2050 (GAMUT_REMAP_BYPASS << GAMUT_REMAP_CONTROL__OVL_GAMUT_REMAP_MODE__SHIFT)));
2051 WREG32(mmREGAMMA_CONTROL + amdgpu_crtc->crtc_offset,
2052 ((REGAMMA_BYPASS << REGAMMA_CONTROL__GRPH_REGAMMA_MODE__SHIFT) |
2053 (REGAMMA_BYPASS << REGAMMA_CONTROL__OVL_REGAMMA_MODE__SHIFT)));
2054 WREG32(mmOUTPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset,
2055 ((OUTPUT_CSC_BYPASS << OUTPUT_CSC_CONTROL__OUTPUT_CSC_GRPH_MODE__SHIFT) |
2056 (OUTPUT_CSC_BYPASS << OUTPUT_CSC_CONTROL__OUTPUT_CSC_OVL_MODE__SHIFT)));
2057
2058 WREG32(0x1a50 + amdgpu_crtc->crtc_offset, 0);
2059
2060
2061
2062 WREG32(mmALPHA_CONTROL + amdgpu_crtc->crtc_offset,
2063 ALPHA_CONTROL__CURSOR_ALPHA_BLND_ENA_MASK);
2064}
2065
2066static int dce_v8_0_pick_dig_encoder(struct drm_encoder *encoder)
2067{
2068 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
2069 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
2070
2071 switch (amdgpu_encoder->encoder_id) {
2072 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
2073 if (dig->linkb)
2074 return 1;
2075 else
2076 return 0;
2077 break;
2078 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
2079 if (dig->linkb)
2080 return 3;
2081 else
2082 return 2;
2083 break;
2084 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
2085 if (dig->linkb)
2086 return 5;
2087 else
2088 return 4;
2089 break;
2090 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
2091 return 6;
2092 break;
2093 default:
2094 DRM_ERROR("invalid encoder_id: 0x%x\n", amdgpu_encoder->encoder_id);
2095 return 0;
2096 }
2097}
2098
2099
2100
2101
2102
2103
2104
2105
2106
2107
2108
2109
2110
2111
2112
2113
2114
2115
2116
2117
2118
2119
2120
2121static u32 dce_v8_0_pick_pll(struct drm_crtc *crtc)
2122{
2123 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2124 struct drm_device *dev = crtc->dev;
2125 struct amdgpu_device *adev = dev->dev_private;
2126 u32 pll_in_use;
2127 int pll;
2128
2129 if (ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(amdgpu_crtc->encoder))) {
2130 if (adev->clock.dp_extclk)
2131
2132 return ATOM_PPLL_INVALID;
2133 else {
2134
2135 pll = amdgpu_pll_get_shared_dp_ppll(crtc);
2136 if (pll != ATOM_PPLL_INVALID)
2137 return pll;
2138 }
2139 } else {
2140
2141 pll = amdgpu_pll_get_shared_nondp_ppll(crtc);
2142 if (pll != ATOM_PPLL_INVALID)
2143 return pll;
2144 }
2145
2146 if ((adev->asic_type == CHIP_KABINI) ||
2147 (adev->asic_type == CHIP_MULLINS)) {
2148
2149 pll_in_use = amdgpu_pll_get_use_mask(crtc);
2150 if (!(pll_in_use & (1 << ATOM_PPLL2)))
2151 return ATOM_PPLL2;
2152 if (!(pll_in_use & (1 << ATOM_PPLL1)))
2153 return ATOM_PPLL1;
2154 DRM_ERROR("unable to allocate a PPLL\n");
2155 return ATOM_PPLL_INVALID;
2156 } else {
2157
2158 pll_in_use = amdgpu_pll_get_use_mask(crtc);
2159 if (!(pll_in_use & (1 << ATOM_PPLL2)))
2160 return ATOM_PPLL2;
2161 if (!(pll_in_use & (1 << ATOM_PPLL1)))
2162 return ATOM_PPLL1;
2163 if (!(pll_in_use & (1 << ATOM_PPLL0)))
2164 return ATOM_PPLL0;
2165 DRM_ERROR("unable to allocate a PPLL\n");
2166 return ATOM_PPLL_INVALID;
2167 }
2168 return ATOM_PPLL_INVALID;
2169}
2170
2171static void dce_v8_0_lock_cursor(struct drm_crtc *crtc, bool lock)
2172{
2173 struct amdgpu_device *adev = crtc->dev->dev_private;
2174 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2175 uint32_t cur_lock;
2176
2177 cur_lock = RREG32(mmCUR_UPDATE + amdgpu_crtc->crtc_offset);
2178 if (lock)
2179 cur_lock |= CUR_UPDATE__CURSOR_UPDATE_LOCK_MASK;
2180 else
2181 cur_lock &= ~CUR_UPDATE__CURSOR_UPDATE_LOCK_MASK;
2182 WREG32(mmCUR_UPDATE + amdgpu_crtc->crtc_offset, cur_lock);
2183}
2184
2185static void dce_v8_0_hide_cursor(struct drm_crtc *crtc)
2186{
2187 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2188 struct amdgpu_device *adev = crtc->dev->dev_private;
2189
2190 WREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset,
2191 (CURSOR_24_8_PRE_MULT << CUR_CONTROL__CURSOR_MODE__SHIFT) |
2192 (CURSOR_URGENT_1_2 << CUR_CONTROL__CURSOR_URGENT_CONTROL__SHIFT));
2193}
2194
2195static void dce_v8_0_show_cursor(struct drm_crtc *crtc)
2196{
2197 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2198 struct amdgpu_device *adev = crtc->dev->dev_private;
2199
2200 WREG32(mmCUR_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
2201 upper_32_bits(amdgpu_crtc->cursor_addr));
2202 WREG32(mmCUR_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
2203 lower_32_bits(amdgpu_crtc->cursor_addr));
2204
2205 WREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset,
2206 CUR_CONTROL__CURSOR_EN_MASK |
2207 (CURSOR_24_8_PRE_MULT << CUR_CONTROL__CURSOR_MODE__SHIFT) |
2208 (CURSOR_URGENT_1_2 << CUR_CONTROL__CURSOR_URGENT_CONTROL__SHIFT));
2209}
2210
2211static int dce_v8_0_cursor_move_locked(struct drm_crtc *crtc,
2212 int x, int y)
2213{
2214 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2215 struct amdgpu_device *adev = crtc->dev->dev_private;
2216 int xorigin = 0, yorigin = 0;
2217
2218 amdgpu_crtc->cursor_x = x;
2219 amdgpu_crtc->cursor_y = y;
2220
2221
2222 x += crtc->x;
2223 y += crtc->y;
2224 DRM_DEBUG("x %d y %d c->x %d c->y %d\n", x, y, crtc->x, crtc->y);
2225
2226 if (x < 0) {
2227 xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
2228 x = 0;
2229 }
2230 if (y < 0) {
2231 yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
2232 y = 0;
2233 }
2234
2235 WREG32(mmCUR_POSITION + amdgpu_crtc->crtc_offset, (x << 16) | y);
2236 WREG32(mmCUR_HOT_SPOT + amdgpu_crtc->crtc_offset, (xorigin << 16) | yorigin);
2237 WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
2238 ((amdgpu_crtc->cursor_width - 1) << 16) | (amdgpu_crtc->cursor_height - 1));
2239
2240 return 0;
2241}
2242
2243static int dce_v8_0_crtc_cursor_move(struct drm_crtc *crtc,
2244 int x, int y)
2245{
2246 int ret;
2247
2248 dce_v8_0_lock_cursor(crtc, true);
2249 ret = dce_v8_0_cursor_move_locked(crtc, x, y);
2250 dce_v8_0_lock_cursor(crtc, false);
2251
2252 return ret;
2253}
2254
2255static int dce_v8_0_crtc_cursor_set2(struct drm_crtc *crtc,
2256 struct drm_file *file_priv,
2257 uint32_t handle,
2258 uint32_t width,
2259 uint32_t height,
2260 int32_t hot_x,
2261 int32_t hot_y)
2262{
2263 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2264 struct drm_gem_object *obj;
2265 struct amdgpu_bo *aobj;
2266 int ret;
2267
2268 if (!handle) {
2269
2270 dce_v8_0_hide_cursor(crtc);
2271 obj = NULL;
2272 goto unpin;
2273 }
2274
2275 if ((width > amdgpu_crtc->max_cursor_width) ||
2276 (height > amdgpu_crtc->max_cursor_height)) {
2277 DRM_ERROR("bad cursor width or height %d x %d\n", width, height);
2278 return -EINVAL;
2279 }
2280
2281 obj = drm_gem_object_lookup(file_priv, handle);
2282 if (!obj) {
2283 DRM_ERROR("Cannot find cursor object %x for crtc %d\n", handle, amdgpu_crtc->crtc_id);
2284 return -ENOENT;
2285 }
2286
2287 aobj = gem_to_amdgpu_bo(obj);
2288 ret = amdgpu_bo_reserve(aobj, false);
2289 if (ret != 0) {
2290 drm_gem_object_put_unlocked(obj);
2291 return ret;
2292 }
2293
2294 ret = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM);
2295 amdgpu_bo_unreserve(aobj);
2296 if (ret) {
2297 DRM_ERROR("Failed to pin new cursor BO (%d)\n", ret);
2298 drm_gem_object_put_unlocked(obj);
2299 return ret;
2300 }
2301 amdgpu_crtc->cursor_addr = amdgpu_bo_gpu_offset(aobj);
2302
2303 dce_v8_0_lock_cursor(crtc, true);
2304
2305 if (width != amdgpu_crtc->cursor_width ||
2306 height != amdgpu_crtc->cursor_height ||
2307 hot_x != amdgpu_crtc->cursor_hot_x ||
2308 hot_y != amdgpu_crtc->cursor_hot_y) {
2309 int x, y;
2310
2311 x = amdgpu_crtc->cursor_x + amdgpu_crtc->cursor_hot_x - hot_x;
2312 y = amdgpu_crtc->cursor_y + amdgpu_crtc->cursor_hot_y - hot_y;
2313
2314 dce_v8_0_cursor_move_locked(crtc, x, y);
2315
2316 amdgpu_crtc->cursor_width = width;
2317 amdgpu_crtc->cursor_height = height;
2318 amdgpu_crtc->cursor_hot_x = hot_x;
2319 amdgpu_crtc->cursor_hot_y = hot_y;
2320 }
2321
2322 dce_v8_0_show_cursor(crtc);
2323 dce_v8_0_lock_cursor(crtc, false);
2324
2325unpin:
2326 if (amdgpu_crtc->cursor_bo) {
2327 struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
2328 ret = amdgpu_bo_reserve(aobj, true);
2329 if (likely(ret == 0)) {
2330 amdgpu_bo_unpin(aobj);
2331 amdgpu_bo_unreserve(aobj);
2332 }
2333 drm_gem_object_put_unlocked(amdgpu_crtc->cursor_bo);
2334 }
2335
2336 amdgpu_crtc->cursor_bo = obj;
2337 return 0;
2338}
2339
2340static void dce_v8_0_cursor_reset(struct drm_crtc *crtc)
2341{
2342 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2343
2344 if (amdgpu_crtc->cursor_bo) {
2345 dce_v8_0_lock_cursor(crtc, true);
2346
2347 dce_v8_0_cursor_move_locked(crtc, amdgpu_crtc->cursor_x,
2348 amdgpu_crtc->cursor_y);
2349
2350 dce_v8_0_show_cursor(crtc);
2351
2352 dce_v8_0_lock_cursor(crtc, false);
2353 }
2354}
2355
2356static int dce_v8_0_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
2357 u16 *blue, uint32_t size,
2358 struct drm_modeset_acquire_ctx *ctx)
2359{
2360 dce_v8_0_crtc_load_lut(crtc);
2361
2362 return 0;
2363}
2364
2365static void dce_v8_0_crtc_destroy(struct drm_crtc *crtc)
2366{
2367 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2368
2369 drm_crtc_cleanup(crtc);
2370 kfree(amdgpu_crtc);
2371}
2372
2373static const struct drm_crtc_funcs dce_v8_0_crtc_funcs = {
2374 .cursor_set2 = dce_v8_0_crtc_cursor_set2,
2375 .cursor_move = dce_v8_0_crtc_cursor_move,
2376 .gamma_set = dce_v8_0_crtc_gamma_set,
2377 .set_config = amdgpu_display_crtc_set_config,
2378 .destroy = dce_v8_0_crtc_destroy,
2379 .page_flip_target = amdgpu_display_crtc_page_flip_target,
2380};
2381
2382static void dce_v8_0_crtc_dpms(struct drm_crtc *crtc, int mode)
2383{
2384 struct drm_device *dev = crtc->dev;
2385 struct amdgpu_device *adev = dev->dev_private;
2386 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2387 unsigned type;
2388
2389 switch (mode) {
2390 case DRM_MODE_DPMS_ON:
2391 amdgpu_crtc->enabled = true;
2392 amdgpu_atombios_crtc_enable(crtc, ATOM_ENABLE);
2393 dce_v8_0_vga_enable(crtc, true);
2394 amdgpu_atombios_crtc_blank(crtc, ATOM_DISABLE);
2395 dce_v8_0_vga_enable(crtc, false);
2396
2397 type = amdgpu_display_crtc_idx_to_irq_type(adev,
2398 amdgpu_crtc->crtc_id);
2399 amdgpu_irq_update(adev, &adev->crtc_irq, type);
2400 amdgpu_irq_update(adev, &adev->pageflip_irq, type);
2401 drm_crtc_vblank_on(crtc);
2402 dce_v8_0_crtc_load_lut(crtc);
2403 break;
2404 case DRM_MODE_DPMS_STANDBY:
2405 case DRM_MODE_DPMS_SUSPEND:
2406 case DRM_MODE_DPMS_OFF:
2407 drm_crtc_vblank_off(crtc);
2408 if (amdgpu_crtc->enabled) {
2409 dce_v8_0_vga_enable(crtc, true);
2410 amdgpu_atombios_crtc_blank(crtc, ATOM_ENABLE);
2411 dce_v8_0_vga_enable(crtc, false);
2412 }
2413 amdgpu_atombios_crtc_enable(crtc, ATOM_DISABLE);
2414 amdgpu_crtc->enabled = false;
2415 break;
2416 }
2417
2418 amdgpu_pm_compute_clocks(adev);
2419}
2420
2421static void dce_v8_0_crtc_prepare(struct drm_crtc *crtc)
2422{
2423
2424 amdgpu_atombios_crtc_powergate(crtc, ATOM_DISABLE);
2425 amdgpu_atombios_crtc_lock(crtc, ATOM_ENABLE);
2426 dce_v8_0_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
2427}
2428
2429static void dce_v8_0_crtc_commit(struct drm_crtc *crtc)
2430{
2431 dce_v8_0_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
2432 amdgpu_atombios_crtc_lock(crtc, ATOM_DISABLE);
2433}
2434
2435static void dce_v8_0_crtc_disable(struct drm_crtc *crtc)
2436{
2437 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2438 struct drm_device *dev = crtc->dev;
2439 struct amdgpu_device *adev = dev->dev_private;
2440 struct amdgpu_atom_ss ss;
2441 int i;
2442
2443 dce_v8_0_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
2444 if (crtc->primary->fb) {
2445 int r;
2446 struct amdgpu_bo *abo;
2447
2448 abo = gem_to_amdgpu_bo(crtc->primary->fb->obj[0]);
2449 r = amdgpu_bo_reserve(abo, true);
2450 if (unlikely(r))
2451 DRM_ERROR("failed to reserve abo before unpin\n");
2452 else {
2453 amdgpu_bo_unpin(abo);
2454 amdgpu_bo_unreserve(abo);
2455 }
2456 }
2457
2458 dce_v8_0_grph_enable(crtc, false);
2459
2460 amdgpu_atombios_crtc_powergate(crtc, ATOM_ENABLE);
2461
2462 for (i = 0; i < adev->mode_info.num_crtc; i++) {
2463 if (adev->mode_info.crtcs[i] &&
2464 adev->mode_info.crtcs[i]->enabled &&
2465 i != amdgpu_crtc->crtc_id &&
2466 amdgpu_crtc->pll_id == adev->mode_info.crtcs[i]->pll_id) {
2467
2468
2469
2470 goto done;
2471 }
2472 }
2473
2474 switch (amdgpu_crtc->pll_id) {
2475 case ATOM_PPLL1:
2476 case ATOM_PPLL2:
2477
2478 amdgpu_atombios_crtc_program_pll(crtc, amdgpu_crtc->crtc_id, amdgpu_crtc->pll_id,
2479 0, 0, ATOM_DISABLE, 0, 0, 0, 0, 0, false, &ss);
2480 break;
2481 case ATOM_PPLL0:
2482
2483 if ((adev->asic_type == CHIP_KAVERI) ||
2484 (adev->asic_type == CHIP_BONAIRE) ||
2485 (adev->asic_type == CHIP_HAWAII))
2486 amdgpu_atombios_crtc_program_pll(crtc, amdgpu_crtc->crtc_id, amdgpu_crtc->pll_id,
2487 0, 0, ATOM_DISABLE, 0, 0, 0, 0, 0, false, &ss);
2488 break;
2489 default:
2490 break;
2491 }
2492done:
2493 amdgpu_crtc->pll_id = ATOM_PPLL_INVALID;
2494 amdgpu_crtc->adjusted_clock = 0;
2495 amdgpu_crtc->encoder = NULL;
2496 amdgpu_crtc->connector = NULL;
2497}
2498
2499static int dce_v8_0_crtc_mode_set(struct drm_crtc *crtc,
2500 struct drm_display_mode *mode,
2501 struct drm_display_mode *adjusted_mode,
2502 int x, int y, struct drm_framebuffer *old_fb)
2503{
2504 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2505
2506 if (!amdgpu_crtc->adjusted_clock)
2507 return -EINVAL;
2508
2509 amdgpu_atombios_crtc_set_pll(crtc, adjusted_mode);
2510 amdgpu_atombios_crtc_set_dtd_timing(crtc, adjusted_mode);
2511 dce_v8_0_crtc_do_set_base(crtc, old_fb, x, y, 0);
2512 amdgpu_atombios_crtc_overscan_setup(crtc, mode, adjusted_mode);
2513 amdgpu_atombios_crtc_scaler_setup(crtc);
2514 dce_v8_0_cursor_reset(crtc);
2515
2516 amdgpu_crtc->hw_mode = *adjusted_mode;
2517
2518 return 0;
2519}
2520
2521static bool dce_v8_0_crtc_mode_fixup(struct drm_crtc *crtc,
2522 const struct drm_display_mode *mode,
2523 struct drm_display_mode *adjusted_mode)
2524{
2525 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2526 struct drm_device *dev = crtc->dev;
2527 struct drm_encoder *encoder;
2528
2529
2530 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
2531 if (encoder->crtc == crtc) {
2532 amdgpu_crtc->encoder = encoder;
2533 amdgpu_crtc->connector = amdgpu_get_connector_for_encoder(encoder);
2534 break;
2535 }
2536 }
2537 if ((amdgpu_crtc->encoder == NULL) || (amdgpu_crtc->connector == NULL)) {
2538 amdgpu_crtc->encoder = NULL;
2539 amdgpu_crtc->connector = NULL;
2540 return false;
2541 }
2542 if (!amdgpu_display_crtc_scaling_mode_fixup(crtc, mode, adjusted_mode))
2543 return false;
2544 if (amdgpu_atombios_crtc_prepare_pll(crtc, adjusted_mode))
2545 return false;
2546
2547 amdgpu_crtc->pll_id = dce_v8_0_pick_pll(crtc);
2548
2549 if ((amdgpu_crtc->pll_id == ATOM_PPLL_INVALID) &&
2550 !ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(amdgpu_crtc->encoder)))
2551 return false;
2552
2553 return true;
2554}
2555
2556static int dce_v8_0_crtc_set_base(struct drm_crtc *crtc, int x, int y,
2557 struct drm_framebuffer *old_fb)
2558{
2559 return dce_v8_0_crtc_do_set_base(crtc, old_fb, x, y, 0);
2560}
2561
2562static int dce_v8_0_crtc_set_base_atomic(struct drm_crtc *crtc,
2563 struct drm_framebuffer *fb,
2564 int x, int y, enum mode_set_atomic state)
2565{
2566 return dce_v8_0_crtc_do_set_base(crtc, fb, x, y, 1);
2567}
2568
2569static const struct drm_crtc_helper_funcs dce_v8_0_crtc_helper_funcs = {
2570 .dpms = dce_v8_0_crtc_dpms,
2571 .mode_fixup = dce_v8_0_crtc_mode_fixup,
2572 .mode_set = dce_v8_0_crtc_mode_set,
2573 .mode_set_base = dce_v8_0_crtc_set_base,
2574 .mode_set_base_atomic = dce_v8_0_crtc_set_base_atomic,
2575 .prepare = dce_v8_0_crtc_prepare,
2576 .commit = dce_v8_0_crtc_commit,
2577 .disable = dce_v8_0_crtc_disable,
2578};
2579
2580static int dce_v8_0_crtc_init(struct amdgpu_device *adev, int index)
2581{
2582 struct amdgpu_crtc *amdgpu_crtc;
2583
2584 amdgpu_crtc = kzalloc(sizeof(struct amdgpu_crtc) +
2585 (AMDGPUFB_CONN_LIMIT * sizeof(struct drm_connector *)), GFP_KERNEL);
2586 if (amdgpu_crtc == NULL)
2587 return -ENOMEM;
2588
2589 drm_crtc_init(adev->ddev, &amdgpu_crtc->base, &dce_v8_0_crtc_funcs);
2590
2591 drm_mode_crtc_set_gamma_size(&amdgpu_crtc->base, 256);
2592 amdgpu_crtc->crtc_id = index;
2593 adev->mode_info.crtcs[index] = amdgpu_crtc;
2594
2595 amdgpu_crtc->max_cursor_width = CIK_CURSOR_WIDTH;
2596 amdgpu_crtc->max_cursor_height = CIK_CURSOR_HEIGHT;
2597 adev->ddev->mode_config.cursor_width = amdgpu_crtc->max_cursor_width;
2598 adev->ddev->mode_config.cursor_height = amdgpu_crtc->max_cursor_height;
2599
2600 amdgpu_crtc->crtc_offset = crtc_offsets[amdgpu_crtc->crtc_id];
2601
2602 amdgpu_crtc->pll_id = ATOM_PPLL_INVALID;
2603 amdgpu_crtc->adjusted_clock = 0;
2604 amdgpu_crtc->encoder = NULL;
2605 amdgpu_crtc->connector = NULL;
2606 drm_crtc_helper_add(&amdgpu_crtc->base, &dce_v8_0_crtc_helper_funcs);
2607
2608 return 0;
2609}
2610
2611static int dce_v8_0_early_init(void *handle)
2612{
2613 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2614
2615 adev->audio_endpt_rreg = &dce_v8_0_audio_endpt_rreg;
2616 adev->audio_endpt_wreg = &dce_v8_0_audio_endpt_wreg;
2617
2618 dce_v8_0_set_display_funcs(adev);
2619
2620 adev->mode_info.num_crtc = dce_v8_0_get_num_crtc(adev);
2621
2622 switch (adev->asic_type) {
2623 case CHIP_BONAIRE:
2624 case CHIP_HAWAII:
2625 adev->mode_info.num_hpd = 6;
2626 adev->mode_info.num_dig = 6;
2627 break;
2628 case CHIP_KAVERI:
2629 adev->mode_info.num_hpd = 6;
2630 adev->mode_info.num_dig = 7;
2631 break;
2632 case CHIP_KABINI:
2633 case CHIP_MULLINS:
2634 adev->mode_info.num_hpd = 6;
2635 adev->mode_info.num_dig = 6;
2636 break;
2637 default:
2638
2639 return -EINVAL;
2640 }
2641
2642 dce_v8_0_set_irq_funcs(adev);
2643
2644 return 0;
2645}
2646
2647static int dce_v8_0_sw_init(void *handle)
2648{
2649 int r, i;
2650 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2651
2652 for (i = 0; i < adev->mode_info.num_crtc; i++) {
2653 r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, i + 1, &adev->crtc_irq);
2654 if (r)
2655 return r;
2656 }
2657
2658 for (i = 8; i < 20; i += 2) {
2659 r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, i, &adev->pageflip_irq);
2660 if (r)
2661 return r;
2662 }
2663
2664
2665 r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 42, &adev->hpd_irq);
2666 if (r)
2667 return r;
2668
2669 adev->ddev->mode_config.funcs = &amdgpu_mode_funcs;
2670
2671 adev->ddev->mode_config.async_page_flip = true;
2672
2673 adev->ddev->mode_config.max_width = 16384;
2674 adev->ddev->mode_config.max_height = 16384;
2675
2676 adev->ddev->mode_config.preferred_depth = 24;
2677 adev->ddev->mode_config.prefer_shadow = 1;
2678
2679 adev->ddev->mode_config.fb_base = adev->gmc.aper_base;
2680
2681 r = amdgpu_display_modeset_create_props(adev);
2682 if (r)
2683 return r;
2684
2685 adev->ddev->mode_config.max_width = 16384;
2686 adev->ddev->mode_config.max_height = 16384;
2687
2688
2689 for (i = 0; i < adev->mode_info.num_crtc; i++) {
2690 r = dce_v8_0_crtc_init(adev, i);
2691 if (r)
2692 return r;
2693 }
2694
2695 if (amdgpu_atombios_get_connector_info_from_object_table(adev))
2696 amdgpu_display_print_display_setup(adev->ddev);
2697 else
2698 return -EINVAL;
2699
2700
2701 r = dce_v8_0_afmt_init(adev);
2702 if (r)
2703 return r;
2704
2705 r = dce_v8_0_audio_init(adev);
2706 if (r)
2707 return r;
2708
2709 drm_kms_helper_poll_init(adev->ddev);
2710
2711 adev->mode_info.mode_config_initialized = true;
2712 return 0;
2713}
2714
2715static int dce_v8_0_sw_fini(void *handle)
2716{
2717 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2718
2719 kfree(adev->mode_info.bios_hardcoded_edid);
2720
2721 drm_kms_helper_poll_fini(adev->ddev);
2722
2723 dce_v8_0_audio_fini(adev);
2724
2725 dce_v8_0_afmt_fini(adev);
2726
2727 drm_mode_config_cleanup(adev->ddev);
2728 adev->mode_info.mode_config_initialized = false;
2729
2730 return 0;
2731}
2732
2733static int dce_v8_0_hw_init(void *handle)
2734{
2735 int i;
2736 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2737
2738
2739 dce_v8_0_set_vga_render_state(adev, false);
2740
2741 amdgpu_atombios_encoder_init_dig(adev);
2742 amdgpu_atombios_crtc_set_disp_eng_pll(adev, adev->clock.default_dispclk);
2743
2744
2745 dce_v8_0_hpd_init(adev);
2746
2747 for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
2748 dce_v8_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
2749 }
2750
2751 dce_v8_0_pageflip_interrupt_init(adev);
2752
2753 return 0;
2754}
2755
2756static int dce_v8_0_hw_fini(void *handle)
2757{
2758 int i;
2759 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2760
2761 dce_v8_0_hpd_fini(adev);
2762
2763 for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
2764 dce_v8_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
2765 }
2766
2767 dce_v8_0_pageflip_interrupt_fini(adev);
2768
2769 return 0;
2770}
2771
2772static int dce_v8_0_suspend(void *handle)
2773{
2774 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2775
2776 adev->mode_info.bl_level =
2777 amdgpu_atombios_encoder_get_backlight_level_from_reg(adev);
2778
2779 return dce_v8_0_hw_fini(handle);
2780}
2781
2782static int dce_v8_0_resume(void *handle)
2783{
2784 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2785 int ret;
2786
2787 amdgpu_atombios_encoder_set_backlight_level_to_reg(adev,
2788 adev->mode_info.bl_level);
2789
2790 ret = dce_v8_0_hw_init(handle);
2791
2792
2793 if (adev->mode_info.bl_encoder) {
2794 u8 bl_level = amdgpu_display_backlight_get_level(adev,
2795 adev->mode_info.bl_encoder);
2796 amdgpu_display_backlight_set_level(adev, adev->mode_info.bl_encoder,
2797 bl_level);
2798 }
2799
2800 return ret;
2801}
2802
2803static bool dce_v8_0_is_idle(void *handle)
2804{
2805 return true;
2806}
2807
2808static int dce_v8_0_wait_for_idle(void *handle)
2809{
2810 return 0;
2811}
2812
2813static int dce_v8_0_soft_reset(void *handle)
2814{
2815 u32 srbm_soft_reset = 0, tmp;
2816 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2817
2818 if (dce_v8_0_is_display_hung(adev))
2819 srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_DC_MASK;
2820
2821 if (srbm_soft_reset) {
2822 tmp = RREG32(mmSRBM_SOFT_RESET);
2823 tmp |= srbm_soft_reset;
2824 dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
2825 WREG32(mmSRBM_SOFT_RESET, tmp);
2826 tmp = RREG32(mmSRBM_SOFT_RESET);
2827
2828 udelay(50);
2829
2830 tmp &= ~srbm_soft_reset;
2831 WREG32(mmSRBM_SOFT_RESET, tmp);
2832 tmp = RREG32(mmSRBM_SOFT_RESET);
2833
2834
2835 udelay(50);
2836 }
2837 return 0;
2838}
2839
2840static void dce_v8_0_set_crtc_vblank_interrupt_state(struct amdgpu_device *adev,
2841 int crtc,
2842 enum amdgpu_interrupt_state state)
2843{
2844 u32 reg_block, lb_interrupt_mask;
2845
2846 if (crtc >= adev->mode_info.num_crtc) {
2847 DRM_DEBUG("invalid crtc %d\n", crtc);
2848 return;
2849 }
2850
2851 switch (crtc) {
2852 case 0:
2853 reg_block = CRTC0_REGISTER_OFFSET;
2854 break;
2855 case 1:
2856 reg_block = CRTC1_REGISTER_OFFSET;
2857 break;
2858 case 2:
2859 reg_block = CRTC2_REGISTER_OFFSET;
2860 break;
2861 case 3:
2862 reg_block = CRTC3_REGISTER_OFFSET;
2863 break;
2864 case 4:
2865 reg_block = CRTC4_REGISTER_OFFSET;
2866 break;
2867 case 5:
2868 reg_block = CRTC5_REGISTER_OFFSET;
2869 break;
2870 default:
2871 DRM_DEBUG("invalid crtc %d\n", crtc);
2872 return;
2873 }
2874
2875 switch (state) {
2876 case AMDGPU_IRQ_STATE_DISABLE:
2877 lb_interrupt_mask = RREG32(mmLB_INTERRUPT_MASK + reg_block);
2878 lb_interrupt_mask &= ~LB_INTERRUPT_MASK__VBLANK_INTERRUPT_MASK_MASK;
2879 WREG32(mmLB_INTERRUPT_MASK + reg_block, lb_interrupt_mask);
2880 break;
2881 case AMDGPU_IRQ_STATE_ENABLE:
2882 lb_interrupt_mask = RREG32(mmLB_INTERRUPT_MASK + reg_block);
2883 lb_interrupt_mask |= LB_INTERRUPT_MASK__VBLANK_INTERRUPT_MASK_MASK;
2884 WREG32(mmLB_INTERRUPT_MASK + reg_block, lb_interrupt_mask);
2885 break;
2886 default:
2887 break;
2888 }
2889}
2890
2891static void dce_v8_0_set_crtc_vline_interrupt_state(struct amdgpu_device *adev,
2892 int crtc,
2893 enum amdgpu_interrupt_state state)
2894{
2895 u32 reg_block, lb_interrupt_mask;
2896
2897 if (crtc >= adev->mode_info.num_crtc) {
2898 DRM_DEBUG("invalid crtc %d\n", crtc);
2899 return;
2900 }
2901
2902 switch (crtc) {
2903 case 0:
2904 reg_block = CRTC0_REGISTER_OFFSET;
2905 break;
2906 case 1:
2907 reg_block = CRTC1_REGISTER_OFFSET;
2908 break;
2909 case 2:
2910 reg_block = CRTC2_REGISTER_OFFSET;
2911 break;
2912 case 3:
2913 reg_block = CRTC3_REGISTER_OFFSET;
2914 break;
2915 case 4:
2916 reg_block = CRTC4_REGISTER_OFFSET;
2917 break;
2918 case 5:
2919 reg_block = CRTC5_REGISTER_OFFSET;
2920 break;
2921 default:
2922 DRM_DEBUG("invalid crtc %d\n", crtc);
2923 return;
2924 }
2925
2926 switch (state) {
2927 case AMDGPU_IRQ_STATE_DISABLE:
2928 lb_interrupt_mask = RREG32(mmLB_INTERRUPT_MASK + reg_block);
2929 lb_interrupt_mask &= ~LB_INTERRUPT_MASK__VLINE_INTERRUPT_MASK_MASK;
2930 WREG32(mmLB_INTERRUPT_MASK + reg_block, lb_interrupt_mask);
2931 break;
2932 case AMDGPU_IRQ_STATE_ENABLE:
2933 lb_interrupt_mask = RREG32(mmLB_INTERRUPT_MASK + reg_block);
2934 lb_interrupt_mask |= LB_INTERRUPT_MASK__VLINE_INTERRUPT_MASK_MASK;
2935 WREG32(mmLB_INTERRUPT_MASK + reg_block, lb_interrupt_mask);
2936 break;
2937 default:
2938 break;
2939 }
2940}
2941
2942static int dce_v8_0_set_hpd_interrupt_state(struct amdgpu_device *adev,
2943 struct amdgpu_irq_src *src,
2944 unsigned type,
2945 enum amdgpu_interrupt_state state)
2946{
2947 u32 dc_hpd_int_cntl;
2948
2949 if (type >= adev->mode_info.num_hpd) {
2950 DRM_DEBUG("invalid hdp %d\n", type);
2951 return 0;
2952 }
2953
2954 switch (state) {
2955 case AMDGPU_IRQ_STATE_DISABLE:
2956 dc_hpd_int_cntl = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[type]);
2957 dc_hpd_int_cntl &= ~DC_HPD1_INT_CONTROL__DC_HPD1_INT_EN_MASK;
2958 WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[type], dc_hpd_int_cntl);
2959 break;
2960 case AMDGPU_IRQ_STATE_ENABLE:
2961 dc_hpd_int_cntl = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[type]);
2962 dc_hpd_int_cntl |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_EN_MASK;
2963 WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[type], dc_hpd_int_cntl);
2964 break;
2965 default:
2966 break;
2967 }
2968
2969 return 0;
2970}
2971
2972static int dce_v8_0_set_crtc_interrupt_state(struct amdgpu_device *adev,
2973 struct amdgpu_irq_src *src,
2974 unsigned type,
2975 enum amdgpu_interrupt_state state)
2976{
2977 switch (type) {
2978 case AMDGPU_CRTC_IRQ_VBLANK1:
2979 dce_v8_0_set_crtc_vblank_interrupt_state(adev, 0, state);
2980 break;
2981 case AMDGPU_CRTC_IRQ_VBLANK2:
2982 dce_v8_0_set_crtc_vblank_interrupt_state(adev, 1, state);
2983 break;
2984 case AMDGPU_CRTC_IRQ_VBLANK3:
2985 dce_v8_0_set_crtc_vblank_interrupt_state(adev, 2, state);
2986 break;
2987 case AMDGPU_CRTC_IRQ_VBLANK4:
2988 dce_v8_0_set_crtc_vblank_interrupt_state(adev, 3, state);
2989 break;
2990 case AMDGPU_CRTC_IRQ_VBLANK5:
2991 dce_v8_0_set_crtc_vblank_interrupt_state(adev, 4, state);
2992 break;
2993 case AMDGPU_CRTC_IRQ_VBLANK6:
2994 dce_v8_0_set_crtc_vblank_interrupt_state(adev, 5, state);
2995 break;
2996 case AMDGPU_CRTC_IRQ_VLINE1:
2997 dce_v8_0_set_crtc_vline_interrupt_state(adev, 0, state);
2998 break;
2999 case AMDGPU_CRTC_IRQ_VLINE2:
3000 dce_v8_0_set_crtc_vline_interrupt_state(adev, 1, state);
3001 break;
3002 case AMDGPU_CRTC_IRQ_VLINE3:
3003 dce_v8_0_set_crtc_vline_interrupt_state(adev, 2, state);
3004 break;
3005 case AMDGPU_CRTC_IRQ_VLINE4:
3006 dce_v8_0_set_crtc_vline_interrupt_state(adev, 3, state);
3007 break;
3008 case AMDGPU_CRTC_IRQ_VLINE5:
3009 dce_v8_0_set_crtc_vline_interrupt_state(adev, 4, state);
3010 break;
3011 case AMDGPU_CRTC_IRQ_VLINE6:
3012 dce_v8_0_set_crtc_vline_interrupt_state(adev, 5, state);
3013 break;
3014 default:
3015 break;
3016 }
3017 return 0;
3018}
3019
3020static int dce_v8_0_crtc_irq(struct amdgpu_device *adev,
3021 struct amdgpu_irq_src *source,
3022 struct amdgpu_iv_entry *entry)
3023{
3024 unsigned crtc = entry->src_id - 1;
3025 uint32_t disp_int = RREG32(interrupt_status_offsets[crtc].reg);
3026 unsigned int irq_type = amdgpu_display_crtc_idx_to_irq_type(adev,
3027 crtc);
3028
3029 switch (entry->src_data[0]) {
3030 case 0:
3031 if (disp_int & interrupt_status_offsets[crtc].vblank)
3032 WREG32(mmLB_VBLANK_STATUS + crtc_offsets[crtc], LB_VBLANK_STATUS__VBLANK_ACK_MASK);
3033 else
3034 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
3035
3036 if (amdgpu_irq_enabled(adev, source, irq_type)) {
3037 drm_handle_vblank(adev->ddev, crtc);
3038 }
3039 DRM_DEBUG("IH: D%d vblank\n", crtc + 1);
3040 break;
3041 case 1:
3042 if (disp_int & interrupt_status_offsets[crtc].vline)
3043 WREG32(mmLB_VLINE_STATUS + crtc_offsets[crtc], LB_VLINE_STATUS__VLINE_ACK_MASK);
3044 else
3045 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
3046
3047 DRM_DEBUG("IH: D%d vline\n", crtc + 1);
3048 break;
3049 default:
3050 DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data[0]);
3051 break;
3052 }
3053
3054 return 0;
3055}
3056
3057static int dce_v8_0_set_pageflip_interrupt_state(struct amdgpu_device *adev,
3058 struct amdgpu_irq_src *src,
3059 unsigned type,
3060 enum amdgpu_interrupt_state state)
3061{
3062 u32 reg;
3063
3064 if (type >= adev->mode_info.num_crtc) {
3065 DRM_ERROR("invalid pageflip crtc %d\n", type);
3066 return -EINVAL;
3067 }
3068
3069 reg = RREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type]);
3070 if (state == AMDGPU_IRQ_STATE_DISABLE)
3071 WREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type],
3072 reg & ~GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK);
3073 else
3074 WREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type],
3075 reg | GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK);
3076
3077 return 0;
3078}
3079
3080static int dce_v8_0_pageflip_irq(struct amdgpu_device *adev,
3081 struct amdgpu_irq_src *source,
3082 struct amdgpu_iv_entry *entry)
3083{
3084 unsigned long flags;
3085 unsigned crtc_id;
3086 struct amdgpu_crtc *amdgpu_crtc;
3087 struct amdgpu_flip_work *works;
3088
3089 crtc_id = (entry->src_id - 8) >> 1;
3090 amdgpu_crtc = adev->mode_info.crtcs[crtc_id];
3091
3092 if (crtc_id >= adev->mode_info.num_crtc) {
3093 DRM_ERROR("invalid pageflip crtc %d\n", crtc_id);
3094 return -EINVAL;
3095 }
3096
3097 if (RREG32(mmGRPH_INTERRUPT_STATUS + crtc_offsets[crtc_id]) &
3098 GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_OCCURRED_MASK)
3099 WREG32(mmGRPH_INTERRUPT_STATUS + crtc_offsets[crtc_id],
3100 GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_CLEAR_MASK);
3101
3102
3103 if (amdgpu_crtc == NULL)
3104 return 0;
3105
3106 spin_lock_irqsave(&adev->ddev->event_lock, flags);
3107 works = amdgpu_crtc->pflip_works;
3108 if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
3109 DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d != "
3110 "AMDGPU_FLIP_SUBMITTED(%d)\n",
3111 amdgpu_crtc->pflip_status,
3112 AMDGPU_FLIP_SUBMITTED);
3113 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
3114 return 0;
3115 }
3116
3117
3118 amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
3119 amdgpu_crtc->pflip_works = NULL;
3120
3121
3122 if (works->event)
3123 drm_crtc_send_vblank_event(&amdgpu_crtc->base, works->event);
3124
3125 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
3126
3127 drm_crtc_vblank_put(&amdgpu_crtc->base);
3128 schedule_work(&works->unpin_work);
3129
3130 return 0;
3131}
3132
3133static int dce_v8_0_hpd_irq(struct amdgpu_device *adev,
3134 struct amdgpu_irq_src *source,
3135 struct amdgpu_iv_entry *entry)
3136{
3137 uint32_t disp_int, mask, tmp;
3138 unsigned hpd;
3139
3140 if (entry->src_data[0] >= adev->mode_info.num_hpd) {
3141 DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data[0]);
3142 return 0;
3143 }
3144
3145 hpd = entry->src_data[0];
3146 disp_int = RREG32(interrupt_status_offsets[hpd].reg);
3147 mask = interrupt_status_offsets[hpd].hpd;
3148
3149 if (disp_int & mask) {
3150 tmp = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd]);
3151 tmp |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_ACK_MASK;
3152 WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd], tmp);
3153 schedule_work(&adev->hotplug_work);
3154 DRM_DEBUG("IH: HPD%d\n", hpd + 1);
3155 }
3156
3157 return 0;
3158
3159}
3160
3161static int dce_v8_0_set_clockgating_state(void *handle,
3162 enum amd_clockgating_state state)
3163{
3164 return 0;
3165}
3166
3167static int dce_v8_0_set_powergating_state(void *handle,
3168 enum amd_powergating_state state)
3169{
3170 return 0;
3171}
3172
3173static const struct amd_ip_funcs dce_v8_0_ip_funcs = {
3174 .name = "dce_v8_0",
3175 .early_init = dce_v8_0_early_init,
3176 .late_init = NULL,
3177 .sw_init = dce_v8_0_sw_init,
3178 .sw_fini = dce_v8_0_sw_fini,
3179 .hw_init = dce_v8_0_hw_init,
3180 .hw_fini = dce_v8_0_hw_fini,
3181 .suspend = dce_v8_0_suspend,
3182 .resume = dce_v8_0_resume,
3183 .is_idle = dce_v8_0_is_idle,
3184 .wait_for_idle = dce_v8_0_wait_for_idle,
3185 .soft_reset = dce_v8_0_soft_reset,
3186 .set_clockgating_state = dce_v8_0_set_clockgating_state,
3187 .set_powergating_state = dce_v8_0_set_powergating_state,
3188};
3189
3190static void
3191dce_v8_0_encoder_mode_set(struct drm_encoder *encoder,
3192 struct drm_display_mode *mode,
3193 struct drm_display_mode *adjusted_mode)
3194{
3195 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
3196
3197 amdgpu_encoder->pixel_clock = adjusted_mode->clock;
3198
3199
3200 amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
3201
3202
3203 dce_v8_0_set_interleave(encoder->crtc, mode);
3204
3205 if (amdgpu_atombios_encoder_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI) {
3206 dce_v8_0_afmt_enable(encoder, true);
3207 dce_v8_0_afmt_setmode(encoder, adjusted_mode);
3208 }
3209}
3210
3211static void dce_v8_0_encoder_prepare(struct drm_encoder *encoder)
3212{
3213 struct amdgpu_device *adev = encoder->dev->dev_private;
3214 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
3215 struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
3216
3217 if ((amdgpu_encoder->active_device &
3218 (ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT)) ||
3219 (amdgpu_encoder_get_dp_bridge_encoder_id(encoder) !=
3220 ENCODER_OBJECT_ID_NONE)) {
3221 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
3222 if (dig) {
3223 dig->dig_encoder = dce_v8_0_pick_dig_encoder(encoder);
3224 if (amdgpu_encoder->active_device & ATOM_DEVICE_DFP_SUPPORT)
3225 dig->afmt = adev->mode_info.afmt[dig->dig_encoder];
3226 }
3227 }
3228
3229 amdgpu_atombios_scratch_regs_lock(adev, true);
3230
3231 if (connector) {
3232 struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
3233
3234
3235 if (amdgpu_connector->router.cd_valid)
3236 amdgpu_i2c_router_select_cd_port(amdgpu_connector);
3237
3238
3239 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
3240 amdgpu_atombios_encoder_set_edp_panel_power(connector,
3241 ATOM_TRANSMITTER_ACTION_POWER_ON);
3242 }
3243
3244
3245 amdgpu_atombios_encoder_set_crtc_source(encoder);
3246
3247 dce_v8_0_program_fmt(encoder);
3248}
3249
3250static void dce_v8_0_encoder_commit(struct drm_encoder *encoder)
3251{
3252 struct drm_device *dev = encoder->dev;
3253 struct amdgpu_device *adev = dev->dev_private;
3254
3255
3256 amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_ON);
3257 amdgpu_atombios_scratch_regs_lock(adev, false);
3258}
3259
3260static void dce_v8_0_encoder_disable(struct drm_encoder *encoder)
3261{
3262 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
3263 struct amdgpu_encoder_atom_dig *dig;
3264
3265 amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
3266
3267 if (amdgpu_atombios_encoder_is_digital(encoder)) {
3268 if (amdgpu_atombios_encoder_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI)
3269 dce_v8_0_afmt_enable(encoder, false);
3270 dig = amdgpu_encoder->enc_priv;
3271 dig->dig_encoder = -1;
3272 }
3273 amdgpu_encoder->active_device = 0;
3274}
3275
3276
3277static void dce_v8_0_ext_prepare(struct drm_encoder *encoder)
3278{
3279
3280}
3281
3282static void dce_v8_0_ext_commit(struct drm_encoder *encoder)
3283{
3284
3285}
3286
3287static void
3288dce_v8_0_ext_mode_set(struct drm_encoder *encoder,
3289 struct drm_display_mode *mode,
3290 struct drm_display_mode *adjusted_mode)
3291{
3292
3293}
3294
3295static void dce_v8_0_ext_disable(struct drm_encoder *encoder)
3296{
3297
3298}
3299
3300static void
3301dce_v8_0_ext_dpms(struct drm_encoder *encoder, int mode)
3302{
3303
3304}
3305
3306static const struct drm_encoder_helper_funcs dce_v8_0_ext_helper_funcs = {
3307 .dpms = dce_v8_0_ext_dpms,
3308 .prepare = dce_v8_0_ext_prepare,
3309 .mode_set = dce_v8_0_ext_mode_set,
3310 .commit = dce_v8_0_ext_commit,
3311 .disable = dce_v8_0_ext_disable,
3312
3313};
3314
3315static const struct drm_encoder_helper_funcs dce_v8_0_dig_helper_funcs = {
3316 .dpms = amdgpu_atombios_encoder_dpms,
3317 .mode_fixup = amdgpu_atombios_encoder_mode_fixup,
3318 .prepare = dce_v8_0_encoder_prepare,
3319 .mode_set = dce_v8_0_encoder_mode_set,
3320 .commit = dce_v8_0_encoder_commit,
3321 .disable = dce_v8_0_encoder_disable,
3322 .detect = amdgpu_atombios_encoder_dig_detect,
3323};
3324
3325static const struct drm_encoder_helper_funcs dce_v8_0_dac_helper_funcs = {
3326 .dpms = amdgpu_atombios_encoder_dpms,
3327 .mode_fixup = amdgpu_atombios_encoder_mode_fixup,
3328 .prepare = dce_v8_0_encoder_prepare,
3329 .mode_set = dce_v8_0_encoder_mode_set,
3330 .commit = dce_v8_0_encoder_commit,
3331 .detect = amdgpu_atombios_encoder_dac_detect,
3332};
3333
3334static void dce_v8_0_encoder_destroy(struct drm_encoder *encoder)
3335{
3336 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
3337 if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
3338 amdgpu_atombios_encoder_fini_backlight(amdgpu_encoder);
3339 kfree(amdgpu_encoder->enc_priv);
3340 drm_encoder_cleanup(encoder);
3341 kfree(amdgpu_encoder);
3342}
3343
3344static const struct drm_encoder_funcs dce_v8_0_encoder_funcs = {
3345 .destroy = dce_v8_0_encoder_destroy,
3346};
3347
3348static void dce_v8_0_encoder_add(struct amdgpu_device *adev,
3349 uint32_t encoder_enum,
3350 uint32_t supported_device,
3351 u16 caps)
3352{
3353 struct drm_device *dev = adev->ddev;
3354 struct drm_encoder *encoder;
3355 struct amdgpu_encoder *amdgpu_encoder;
3356
3357
3358 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
3359 amdgpu_encoder = to_amdgpu_encoder(encoder);
3360 if (amdgpu_encoder->encoder_enum == encoder_enum) {
3361 amdgpu_encoder->devices |= supported_device;
3362 return;
3363 }
3364
3365 }
3366
3367
3368 amdgpu_encoder = kzalloc(sizeof(struct amdgpu_encoder), GFP_KERNEL);
3369 if (!amdgpu_encoder)
3370 return;
3371
3372 encoder = &amdgpu_encoder->base;
3373 switch (adev->mode_info.num_crtc) {
3374 case 1:
3375 encoder->possible_crtcs = 0x1;
3376 break;
3377 case 2:
3378 default:
3379 encoder->possible_crtcs = 0x3;
3380 break;
3381 case 4:
3382 encoder->possible_crtcs = 0xf;
3383 break;
3384 case 6:
3385 encoder->possible_crtcs = 0x3f;
3386 break;
3387 }
3388
3389 amdgpu_encoder->enc_priv = NULL;
3390
3391 amdgpu_encoder->encoder_enum = encoder_enum;
3392 amdgpu_encoder->encoder_id = (encoder_enum & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT;
3393 amdgpu_encoder->devices = supported_device;
3394 amdgpu_encoder->rmx_type = RMX_OFF;
3395 amdgpu_encoder->underscan_type = UNDERSCAN_OFF;
3396 amdgpu_encoder->is_ext_encoder = false;
3397 amdgpu_encoder->caps = caps;
3398
3399 switch (amdgpu_encoder->encoder_id) {
3400 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
3401 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
3402 drm_encoder_init(dev, encoder, &dce_v8_0_encoder_funcs,
3403 DRM_MODE_ENCODER_DAC, NULL);
3404 drm_encoder_helper_add(encoder, &dce_v8_0_dac_helper_funcs);
3405 break;
3406 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
3407 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
3408 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
3409 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
3410 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
3411 if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
3412 amdgpu_encoder->rmx_type = RMX_FULL;
3413 drm_encoder_init(dev, encoder, &dce_v8_0_encoder_funcs,
3414 DRM_MODE_ENCODER_LVDS, NULL);
3415 amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_lcd_info(amdgpu_encoder);
3416 } else if (amdgpu_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT)) {
3417 drm_encoder_init(dev, encoder, &dce_v8_0_encoder_funcs,
3418 DRM_MODE_ENCODER_DAC, NULL);
3419 amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_dig_info(amdgpu_encoder);
3420 } else {
3421 drm_encoder_init(dev, encoder, &dce_v8_0_encoder_funcs,
3422 DRM_MODE_ENCODER_TMDS, NULL);
3423 amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_dig_info(amdgpu_encoder);
3424 }
3425 drm_encoder_helper_add(encoder, &dce_v8_0_dig_helper_funcs);
3426 break;
3427 case ENCODER_OBJECT_ID_SI170B:
3428 case ENCODER_OBJECT_ID_CH7303:
3429 case ENCODER_OBJECT_ID_EXTERNAL_SDVOA:
3430 case ENCODER_OBJECT_ID_EXTERNAL_SDVOB:
3431 case ENCODER_OBJECT_ID_TITFP513:
3432 case ENCODER_OBJECT_ID_VT1623:
3433 case ENCODER_OBJECT_ID_HDMI_SI1930:
3434 case ENCODER_OBJECT_ID_TRAVIS:
3435 case ENCODER_OBJECT_ID_NUTMEG:
3436
3437 amdgpu_encoder->is_ext_encoder = true;
3438 if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
3439 drm_encoder_init(dev, encoder, &dce_v8_0_encoder_funcs,
3440 DRM_MODE_ENCODER_LVDS, NULL);
3441 else if (amdgpu_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT))
3442 drm_encoder_init(dev, encoder, &dce_v8_0_encoder_funcs,
3443 DRM_MODE_ENCODER_DAC, NULL);
3444 else
3445 drm_encoder_init(dev, encoder, &dce_v8_0_encoder_funcs,
3446 DRM_MODE_ENCODER_TMDS, NULL);
3447 drm_encoder_helper_add(encoder, &dce_v8_0_ext_helper_funcs);
3448 break;
3449 }
3450}
3451
3452static const struct amdgpu_display_funcs dce_v8_0_display_funcs = {
3453 .bandwidth_update = &dce_v8_0_bandwidth_update,
3454 .vblank_get_counter = &dce_v8_0_vblank_get_counter,
3455 .backlight_set_level = &amdgpu_atombios_encoder_set_backlight_level,
3456 .backlight_get_level = &amdgpu_atombios_encoder_get_backlight_level,
3457 .hpd_sense = &dce_v8_0_hpd_sense,
3458 .hpd_set_polarity = &dce_v8_0_hpd_set_polarity,
3459 .hpd_get_gpio_reg = &dce_v8_0_hpd_get_gpio_reg,
3460 .page_flip = &dce_v8_0_page_flip,
3461 .page_flip_get_scanoutpos = &dce_v8_0_crtc_get_scanoutpos,
3462 .add_encoder = &dce_v8_0_encoder_add,
3463 .add_connector = &amdgpu_connector_add,
3464};
3465
3466static void dce_v8_0_set_display_funcs(struct amdgpu_device *adev)
3467{
3468 adev->mode_info.funcs = &dce_v8_0_display_funcs;
3469}
3470
3471static const struct amdgpu_irq_src_funcs dce_v8_0_crtc_irq_funcs = {
3472 .set = dce_v8_0_set_crtc_interrupt_state,
3473 .process = dce_v8_0_crtc_irq,
3474};
3475
3476static const struct amdgpu_irq_src_funcs dce_v8_0_pageflip_irq_funcs = {
3477 .set = dce_v8_0_set_pageflip_interrupt_state,
3478 .process = dce_v8_0_pageflip_irq,
3479};
3480
3481static const struct amdgpu_irq_src_funcs dce_v8_0_hpd_irq_funcs = {
3482 .set = dce_v8_0_set_hpd_interrupt_state,
3483 .process = dce_v8_0_hpd_irq,
3484};
3485
3486static void dce_v8_0_set_irq_funcs(struct amdgpu_device *adev)
3487{
3488 if (adev->mode_info.num_crtc > 0)
3489 adev->crtc_irq.num_types = AMDGPU_CRTC_IRQ_VLINE1 + adev->mode_info.num_crtc;
3490 else
3491 adev->crtc_irq.num_types = 0;
3492 adev->crtc_irq.funcs = &dce_v8_0_crtc_irq_funcs;
3493
3494 adev->pageflip_irq.num_types = adev->mode_info.num_crtc;
3495 adev->pageflip_irq.funcs = &dce_v8_0_pageflip_irq_funcs;
3496
3497 adev->hpd_irq.num_types = adev->mode_info.num_hpd;
3498 adev->hpd_irq.funcs = &dce_v8_0_hpd_irq_funcs;
3499}
3500
3501const struct amdgpu_ip_block_version dce_v8_0_ip_block =
3502{
3503 .type = AMD_IP_BLOCK_TYPE_DCE,
3504 .major = 8,
3505 .minor = 0,
3506 .rev = 0,
3507 .funcs = &dce_v8_0_ip_funcs,
3508};
3509
3510const struct amdgpu_ip_block_version dce_v8_1_ip_block =
3511{
3512 .type = AMD_IP_BLOCK_TYPE_DCE,
3513 .major = 8,
3514 .minor = 1,
3515 .rev = 0,
3516 .funcs = &dce_v8_0_ip_funcs,
3517};
3518
3519const struct amdgpu_ip_block_version dce_v8_2_ip_block =
3520{
3521 .type = AMD_IP_BLOCK_TYPE_DCE,
3522 .major = 8,
3523 .minor = 2,
3524 .rev = 0,
3525 .funcs = &dce_v8_0_ip_funcs,
3526};
3527
3528const struct amdgpu_ip_block_version dce_v8_3_ip_block =
3529{
3530 .type = AMD_IP_BLOCK_TYPE_DCE,
3531 .major = 8,
3532 .minor = 3,
3533 .rev = 0,
3534 .funcs = &dce_v8_0_ip_funcs,
3535};
3536
3537const struct amdgpu_ip_block_version dce_v8_5_ip_block =
3538{
3539 .type = AMD_IP_BLOCK_TYPE_DCE,
3540 .major = 8,
3541 .minor = 5,
3542 .rev = 0,
3543 .funcs = &dce_v8_0_ip_funcs,
3544};
3545