1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27#include <drm/amdgpu_drm.h>
28#include "amdgpu.h"
29#include "amdgpu_i2c.h"
30#include "atom.h"
31#include "amdgpu_connectors.h"
32#include "amdgpu_display.h"
33#include <asm/div64.h>
34
35#include <linux/pci.h>
36#include <linux/pm_runtime.h>
37#include <drm/drm_crtc_helper.h>
38#include <drm/drm_edid.h>
39#include <drm/drm_gem_framebuffer_helper.h>
40#include <drm/drm_fb_helper.h>
41#include <drm/drm_fourcc.h>
42#include <drm/drm_vblank.h>
43
44static void amdgpu_display_flip_callback(struct dma_fence *f,
45 struct dma_fence_cb *cb)
46{
47 struct amdgpu_flip_work *work =
48 container_of(cb, struct amdgpu_flip_work, cb);
49
50 dma_fence_put(f);
51 schedule_work(&work->flip_work.work);
52}
53
54static bool amdgpu_display_flip_handle_fence(struct amdgpu_flip_work *work,
55 struct dma_fence **f)
56{
57 struct dma_fence *fence= *f;
58
59 if (fence == NULL)
60 return false;
61
62 *f = NULL;
63
64 if (!dma_fence_add_callback(fence, &work->cb,
65 amdgpu_display_flip_callback))
66 return true;
67
68 dma_fence_put(fence);
69 return false;
70}
71
72static void amdgpu_display_flip_work_func(struct work_struct *__work)
73{
74 struct delayed_work *delayed_work =
75 container_of(__work, struct delayed_work, work);
76 struct amdgpu_flip_work *work =
77 container_of(delayed_work, struct amdgpu_flip_work, flip_work);
78 struct amdgpu_device *adev = work->adev;
79 struct amdgpu_crtc *amdgpu_crtc = adev->mode_info.crtcs[work->crtc_id];
80
81 struct drm_crtc *crtc = &amdgpu_crtc->base;
82 unsigned long flags;
83 unsigned i;
84 int vpos, hpos;
85
86 for (i = 0; i < work->shared_count; ++i)
87 if (amdgpu_display_flip_handle_fence(work, &work->shared[i]))
88 return;
89
90
91
92
93 if (amdgpu_crtc->enabled &&
94 (amdgpu_display_get_crtc_scanoutpos(adev_to_drm(adev), work->crtc_id, 0,
95 &vpos, &hpos, NULL, NULL,
96 &crtc->hwmode)
97 & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
98 (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
99 (int)(work->target_vblank -
100 amdgpu_get_vblank_counter_kms(crtc)) > 0) {
101 schedule_delayed_work(&work->flip_work, usecs_to_jiffies(1000));
102 return;
103 }
104
105
106 spin_lock_irqsave(&crtc->dev->event_lock, flags);
107
108
109 adev->mode_info.funcs->page_flip(adev, work->crtc_id, work->base, work->async);
110
111
112 amdgpu_crtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
113 spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
114
115
116 DRM_DEBUG_DRIVER("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_SUBMITTED, work: %p,\n",
117 amdgpu_crtc->crtc_id, amdgpu_crtc, work);
118
119}
120
121
122
123
124static void amdgpu_display_unpin_work_func(struct work_struct *__work)
125{
126 struct amdgpu_flip_work *work =
127 container_of(__work, struct amdgpu_flip_work, unpin_work);
128 int r;
129
130
131 r = amdgpu_bo_reserve(work->old_abo, true);
132 if (likely(r == 0)) {
133 amdgpu_bo_unpin(work->old_abo);
134 amdgpu_bo_unreserve(work->old_abo);
135 } else
136 DRM_ERROR("failed to reserve buffer after flip\n");
137
138 amdgpu_bo_unref(&work->old_abo);
139 kfree(work->shared);
140 kfree(work);
141}
142
143int amdgpu_display_crtc_page_flip_target(struct drm_crtc *crtc,
144 struct drm_framebuffer *fb,
145 struct drm_pending_vblank_event *event,
146 uint32_t page_flip_flags, uint32_t target,
147 struct drm_modeset_acquire_ctx *ctx)
148{
149 struct drm_device *dev = crtc->dev;
150 struct amdgpu_device *adev = drm_to_adev(dev);
151 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
152 struct drm_gem_object *obj;
153 struct amdgpu_flip_work *work;
154 struct amdgpu_bo *new_abo;
155 unsigned long flags;
156 u64 tiling_flags;
157 int i, r;
158
159 work = kzalloc(sizeof *work, GFP_KERNEL);
160 if (work == NULL)
161 return -ENOMEM;
162
163 INIT_DELAYED_WORK(&work->flip_work, amdgpu_display_flip_work_func);
164 INIT_WORK(&work->unpin_work, amdgpu_display_unpin_work_func);
165
166 work->event = event;
167 work->adev = adev;
168 work->crtc_id = amdgpu_crtc->crtc_id;
169 work->async = (page_flip_flags & DRM_MODE_PAGE_FLIP_ASYNC) != 0;
170
171
172 obj = crtc->primary->fb->obj[0];
173
174
175 work->old_abo = gem_to_amdgpu_bo(obj);
176 amdgpu_bo_ref(work->old_abo);
177
178 obj = fb->obj[0];
179 new_abo = gem_to_amdgpu_bo(obj);
180
181
182 r = amdgpu_bo_reserve(new_abo, false);
183 if (unlikely(r != 0)) {
184 DRM_ERROR("failed to reserve new abo buffer before flip\n");
185 goto cleanup;
186 }
187
188 if (!adev->enable_virtual_display) {
189 r = amdgpu_bo_pin(new_abo,
190 amdgpu_display_supported_domains(adev, new_abo->flags));
191 if (unlikely(r != 0)) {
192 DRM_ERROR("failed to pin new abo buffer before flip\n");
193 goto unreserve;
194 }
195 }
196
197 r = amdgpu_ttm_alloc_gart(&new_abo->tbo);
198 if (unlikely(r != 0)) {
199 DRM_ERROR("%p bind failed\n", new_abo);
200 goto unpin;
201 }
202
203 r = dma_resv_get_fences(new_abo->tbo.base.resv, NULL,
204 &work->shared_count, &work->shared);
205 if (unlikely(r != 0)) {
206 DRM_ERROR("failed to get fences for buffer\n");
207 goto unpin;
208 }
209
210 amdgpu_bo_get_tiling_flags(new_abo, &tiling_flags);
211 amdgpu_bo_unreserve(new_abo);
212
213 if (!adev->enable_virtual_display)
214 work->base = amdgpu_bo_gpu_offset(new_abo);
215 work->target_vblank = target - (uint32_t)drm_crtc_vblank_count(crtc) +
216 amdgpu_get_vblank_counter_kms(crtc);
217
218
219 spin_lock_irqsave(&crtc->dev->event_lock, flags);
220 if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_NONE) {
221 DRM_DEBUG_DRIVER("flip queue: crtc already busy\n");
222 spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
223 r = -EBUSY;
224 goto pflip_cleanup;
225 }
226
227 amdgpu_crtc->pflip_status = AMDGPU_FLIP_PENDING;
228 amdgpu_crtc->pflip_works = work;
229
230
231 DRM_DEBUG_DRIVER("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_PENDING, work: %p,\n",
232 amdgpu_crtc->crtc_id, amdgpu_crtc, work);
233
234 crtc->primary->fb = fb;
235 spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
236 amdgpu_display_flip_work_func(&work->flip_work.work);
237 return 0;
238
239pflip_cleanup:
240 if (unlikely(amdgpu_bo_reserve(new_abo, false) != 0)) {
241 DRM_ERROR("failed to reserve new abo in error path\n");
242 goto cleanup;
243 }
244unpin:
245 if (!adev->enable_virtual_display)
246 amdgpu_bo_unpin(new_abo);
247
248unreserve:
249 amdgpu_bo_unreserve(new_abo);
250
251cleanup:
252 amdgpu_bo_unref(&work->old_abo);
253 for (i = 0; i < work->shared_count; ++i)
254 dma_fence_put(work->shared[i]);
255 kfree(work->shared);
256 kfree(work);
257
258 return r;
259}
260
261int amdgpu_display_crtc_set_config(struct drm_mode_set *set,
262 struct drm_modeset_acquire_ctx *ctx)
263{
264 struct drm_device *dev;
265 struct amdgpu_device *adev;
266 struct drm_crtc *crtc;
267 bool active = false;
268 int ret;
269
270 if (!set || !set->crtc)
271 return -EINVAL;
272
273 dev = set->crtc->dev;
274
275 ret = pm_runtime_get_sync(dev->dev);
276 if (ret < 0)
277 goto out;
278
279 ret = drm_crtc_helper_set_config(set, ctx);
280
281 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
282 if (crtc->enabled)
283 active = true;
284
285 pm_runtime_mark_last_busy(dev->dev);
286
287 adev = drm_to_adev(dev);
288
289
290 if (active && !adev->have_disp_power_ref) {
291 adev->have_disp_power_ref = true;
292 return ret;
293 }
294
295
296 if (!active && adev->have_disp_power_ref) {
297 pm_runtime_put_autosuspend(dev->dev);
298 adev->have_disp_power_ref = false;
299 }
300
301out:
302
303 pm_runtime_put_autosuspend(dev->dev);
304 return ret;
305}
306
307static const char *encoder_names[41] = {
308 "NONE",
309 "INTERNAL_LVDS",
310 "INTERNAL_TMDS1",
311 "INTERNAL_TMDS2",
312 "INTERNAL_DAC1",
313 "INTERNAL_DAC2",
314 "INTERNAL_SDVOA",
315 "INTERNAL_SDVOB",
316 "SI170B",
317 "CH7303",
318 "CH7301",
319 "INTERNAL_DVO1",
320 "EXTERNAL_SDVOA",
321 "EXTERNAL_SDVOB",
322 "TITFP513",
323 "INTERNAL_LVTM1",
324 "VT1623",
325 "HDMI_SI1930",
326 "HDMI_INTERNAL",
327 "INTERNAL_KLDSCP_TMDS1",
328 "INTERNAL_KLDSCP_DVO1",
329 "INTERNAL_KLDSCP_DAC1",
330 "INTERNAL_KLDSCP_DAC2",
331 "SI178",
332 "MVPU_FPGA",
333 "INTERNAL_DDI",
334 "VT1625",
335 "HDMI_SI1932",
336 "DP_AN9801",
337 "DP_DP501",
338 "INTERNAL_UNIPHY",
339 "INTERNAL_KLDSCP_LVTMA",
340 "INTERNAL_UNIPHY1",
341 "INTERNAL_UNIPHY2",
342 "NUTMEG",
343 "TRAVIS",
344 "INTERNAL_VCE",
345 "INTERNAL_UNIPHY3",
346 "HDMI_ANX9805",
347 "INTERNAL_AMCLK",
348 "VIRTUAL",
349};
350
351static const char *hpd_names[6] = {
352 "HPD1",
353 "HPD2",
354 "HPD3",
355 "HPD4",
356 "HPD5",
357 "HPD6",
358};
359
360void amdgpu_display_print_display_setup(struct drm_device *dev)
361{
362 struct drm_connector *connector;
363 struct amdgpu_connector *amdgpu_connector;
364 struct drm_encoder *encoder;
365 struct amdgpu_encoder *amdgpu_encoder;
366 struct drm_connector_list_iter iter;
367 uint32_t devices;
368 int i = 0;
369
370 drm_connector_list_iter_begin(dev, &iter);
371 DRM_INFO("AMDGPU Display Connectors\n");
372 drm_for_each_connector_iter(connector, &iter) {
373 amdgpu_connector = to_amdgpu_connector(connector);
374 DRM_INFO("Connector %d:\n", i);
375 DRM_INFO(" %s\n", connector->name);
376 if (amdgpu_connector->hpd.hpd != AMDGPU_HPD_NONE)
377 DRM_INFO(" %s\n", hpd_names[amdgpu_connector->hpd.hpd]);
378 if (amdgpu_connector->ddc_bus) {
379 DRM_INFO(" DDC: 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n",
380 amdgpu_connector->ddc_bus->rec.mask_clk_reg,
381 amdgpu_connector->ddc_bus->rec.mask_data_reg,
382 amdgpu_connector->ddc_bus->rec.a_clk_reg,
383 amdgpu_connector->ddc_bus->rec.a_data_reg,
384 amdgpu_connector->ddc_bus->rec.en_clk_reg,
385 amdgpu_connector->ddc_bus->rec.en_data_reg,
386 amdgpu_connector->ddc_bus->rec.y_clk_reg,
387 amdgpu_connector->ddc_bus->rec.y_data_reg);
388 if (amdgpu_connector->router.ddc_valid)
389 DRM_INFO(" DDC Router 0x%x/0x%x\n",
390 amdgpu_connector->router.ddc_mux_control_pin,
391 amdgpu_connector->router.ddc_mux_state);
392 if (amdgpu_connector->router.cd_valid)
393 DRM_INFO(" Clock/Data Router 0x%x/0x%x\n",
394 amdgpu_connector->router.cd_mux_control_pin,
395 amdgpu_connector->router.cd_mux_state);
396 } else {
397 if (connector->connector_type == DRM_MODE_CONNECTOR_VGA ||
398 connector->connector_type == DRM_MODE_CONNECTOR_DVII ||
399 connector->connector_type == DRM_MODE_CONNECTOR_DVID ||
400 connector->connector_type == DRM_MODE_CONNECTOR_DVIA ||
401 connector->connector_type == DRM_MODE_CONNECTOR_HDMIA ||
402 connector->connector_type == DRM_MODE_CONNECTOR_HDMIB)
403 DRM_INFO(" DDC: no ddc bus - possible BIOS bug - please report to xorg-driver-ati@lists.x.org\n");
404 }
405 DRM_INFO(" Encoders:\n");
406 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
407 amdgpu_encoder = to_amdgpu_encoder(encoder);
408 devices = amdgpu_encoder->devices & amdgpu_connector->devices;
409 if (devices) {
410 if (devices & ATOM_DEVICE_CRT1_SUPPORT)
411 DRM_INFO(" CRT1: %s\n", encoder_names[amdgpu_encoder->encoder_id]);
412 if (devices & ATOM_DEVICE_CRT2_SUPPORT)
413 DRM_INFO(" CRT2: %s\n", encoder_names[amdgpu_encoder->encoder_id]);
414 if (devices & ATOM_DEVICE_LCD1_SUPPORT)
415 DRM_INFO(" LCD1: %s\n", encoder_names[amdgpu_encoder->encoder_id]);
416 if (devices & ATOM_DEVICE_DFP1_SUPPORT)
417 DRM_INFO(" DFP1: %s\n", encoder_names[amdgpu_encoder->encoder_id]);
418 if (devices & ATOM_DEVICE_DFP2_SUPPORT)
419 DRM_INFO(" DFP2: %s\n", encoder_names[amdgpu_encoder->encoder_id]);
420 if (devices & ATOM_DEVICE_DFP3_SUPPORT)
421 DRM_INFO(" DFP3: %s\n", encoder_names[amdgpu_encoder->encoder_id]);
422 if (devices & ATOM_DEVICE_DFP4_SUPPORT)
423 DRM_INFO(" DFP4: %s\n", encoder_names[amdgpu_encoder->encoder_id]);
424 if (devices & ATOM_DEVICE_DFP5_SUPPORT)
425 DRM_INFO(" DFP5: %s\n", encoder_names[amdgpu_encoder->encoder_id]);
426 if (devices & ATOM_DEVICE_DFP6_SUPPORT)
427 DRM_INFO(" DFP6: %s\n", encoder_names[amdgpu_encoder->encoder_id]);
428 if (devices & ATOM_DEVICE_TV1_SUPPORT)
429 DRM_INFO(" TV1: %s\n", encoder_names[amdgpu_encoder->encoder_id]);
430 if (devices & ATOM_DEVICE_CV_SUPPORT)
431 DRM_INFO(" CV: %s\n", encoder_names[amdgpu_encoder->encoder_id]);
432 }
433 }
434 i++;
435 }
436 drm_connector_list_iter_end(&iter);
437}
438
439bool amdgpu_display_ddc_probe(struct amdgpu_connector *amdgpu_connector,
440 bool use_aux)
441{
442 u8 out = 0x0;
443 u8 buf[8];
444 int ret;
445 struct i2c_msg msgs[] = {
446 {
447 .addr = DDC_ADDR,
448 .flags = 0,
449 .len = 1,
450 .buf = &out,
451 },
452 {
453 .addr = DDC_ADDR,
454 .flags = I2C_M_RD,
455 .len = 8,
456 .buf = buf,
457 }
458 };
459
460
461 if (amdgpu_connector->router.ddc_valid)
462 amdgpu_i2c_router_select_ddc_port(amdgpu_connector);
463
464 if (use_aux) {
465 ret = i2c_transfer(&amdgpu_connector->ddc_bus->aux.ddc, msgs, 2);
466 } else {
467 ret = i2c_transfer(&amdgpu_connector->ddc_bus->adapter, msgs, 2);
468 }
469
470 if (ret != 2)
471
472 return false;
473
474
475
476
477
478 if (drm_edid_header_is_valid(buf) < 6) {
479
480
481 return false;
482 }
483 return true;
484}
485
486static const struct drm_framebuffer_funcs amdgpu_fb_funcs = {
487 .destroy = drm_gem_fb_destroy,
488 .create_handle = drm_gem_fb_create_handle,
489};
490
491uint32_t amdgpu_display_supported_domains(struct amdgpu_device *adev,
492 uint64_t bo_flags)
493{
494 uint32_t domain = AMDGPU_GEM_DOMAIN_VRAM;
495
496#if defined(CONFIG_DRM_AMD_DC)
497
498
499
500
501
502
503
504
505 if ((bo_flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC) &&
506 amdgpu_bo_support_uswc(bo_flags) &&
507 amdgpu_device_asic_has_dc_support(adev->asic_type)) {
508 switch (adev->asic_type) {
509 case CHIP_CARRIZO:
510 case CHIP_STONEY:
511 domain |= AMDGPU_GEM_DOMAIN_GTT;
512 break;
513 case CHIP_RAVEN:
514
515 if ((adev->apu_flags & AMD_APU_IS_RAVEN2) ||
516 (adev->apu_flags & AMD_APU_IS_PICASSO))
517 domain |= AMDGPU_GEM_DOMAIN_GTT;
518 break;
519 case CHIP_RENOIR:
520 case CHIP_VANGOGH:
521 case CHIP_YELLOW_CARP:
522 domain |= AMDGPU_GEM_DOMAIN_GTT;
523 break;
524
525 default:
526 break;
527 }
528 }
529#endif
530
531 return domain;
532}
533
534static const struct drm_format_info dcc_formats[] = {
535 { .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 2,
536 .cpp = { 4, 0, }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1, },
537 { .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 2,
538 .cpp = { 4, 0, }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1, },
539 { .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 2,
540 .cpp = { 4, 0, }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1,
541 .has_alpha = true, },
542 { .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 2,
543 .cpp = { 4, 0, }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1,
544 .has_alpha = true, },
545 { .format = DRM_FORMAT_BGRA8888, .depth = 32, .num_planes = 2,
546 .cpp = { 4, 0, }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1,
547 .has_alpha = true, },
548 { .format = DRM_FORMAT_XRGB2101010, .depth = 30, .num_planes = 2,
549 .cpp = { 4, 0, }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1, },
550 { .format = DRM_FORMAT_XBGR2101010, .depth = 30, .num_planes = 2,
551 .cpp = { 4, 0, }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1, },
552 { .format = DRM_FORMAT_ARGB2101010, .depth = 30, .num_planes = 2,
553 .cpp = { 4, 0, }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1,
554 .has_alpha = true, },
555 { .format = DRM_FORMAT_ABGR2101010, .depth = 30, .num_planes = 2,
556 .cpp = { 4, 0, }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1,
557 .has_alpha = true, },
558 { .format = DRM_FORMAT_RGB565, .depth = 16, .num_planes = 2,
559 .cpp = { 2, 0, }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1, },
560};
561
562static const struct drm_format_info dcc_retile_formats[] = {
563 { .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 3,
564 .cpp = { 4, 0, 0 }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1, },
565 { .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 3,
566 .cpp = { 4, 0, 0 }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1, },
567 { .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 3,
568 .cpp = { 4, 0, 0 }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1,
569 .has_alpha = true, },
570 { .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 3,
571 .cpp = { 4, 0, 0 }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1,
572 .has_alpha = true, },
573 { .format = DRM_FORMAT_BGRA8888, .depth = 32, .num_planes = 3,
574 .cpp = { 4, 0, 0 }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1,
575 .has_alpha = true, },
576 { .format = DRM_FORMAT_XRGB2101010, .depth = 30, .num_planes = 3,
577 .cpp = { 4, 0, 0 }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1, },
578 { .format = DRM_FORMAT_XBGR2101010, .depth = 30, .num_planes = 3,
579 .cpp = { 4, 0, 0 }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1, },
580 { .format = DRM_FORMAT_ARGB2101010, .depth = 30, .num_planes = 3,
581 .cpp = { 4, 0, 0 }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1,
582 .has_alpha = true, },
583 { .format = DRM_FORMAT_ABGR2101010, .depth = 30, .num_planes = 3,
584 .cpp = { 4, 0, 0 }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1,
585 .has_alpha = true, },
586 { .format = DRM_FORMAT_RGB565, .depth = 16, .num_planes = 3,
587 .cpp = { 2, 0, 0 }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1, },
588};
589
590static const struct drm_format_info *
591lookup_format_info(const struct drm_format_info formats[],
592 int num_formats, u32 format)
593{
594 int i;
595
596 for (i = 0; i < num_formats; i++) {
597 if (formats[i].format == format)
598 return &formats[i];
599 }
600
601 return NULL;
602}
603
604const struct drm_format_info *
605amdgpu_lookup_format_info(u32 format, uint64_t modifier)
606{
607 if (!IS_AMD_FMT_MOD(modifier))
608 return NULL;
609
610 if (AMD_FMT_MOD_GET(DCC_RETILE, modifier))
611 return lookup_format_info(dcc_retile_formats,
612 ARRAY_SIZE(dcc_retile_formats),
613 format);
614
615 if (AMD_FMT_MOD_GET(DCC, modifier))
616 return lookup_format_info(dcc_formats, ARRAY_SIZE(dcc_formats),
617 format);
618
619
620 return NULL;
621}
622
623
624
625
626
627
628static int
629extract_render_dcc_offset(struct amdgpu_device *adev,
630 struct drm_gem_object *obj,
631 uint64_t *offset)
632{
633 struct amdgpu_bo *rbo;
634 int r = 0;
635 uint32_t metadata[10];
636 uint32_t size;
637
638 rbo = gem_to_amdgpu_bo(obj);
639 r = amdgpu_bo_reserve(rbo, false);
640
641 if (unlikely(r)) {
642
643 if (r != -ERESTARTSYS)
644 DRM_ERROR("Unable to reserve buffer: %d\n", r);
645 return r;
646 }
647
648 r = amdgpu_bo_get_metadata(rbo, metadata, sizeof(metadata), &size, NULL);
649 amdgpu_bo_unreserve(rbo);
650
651 if (r)
652 return r;
653
654
655
656
657
658 if (size < 40 || metadata[0] != 1)
659 return -EINVAL;
660
661 if (adev->family >= AMDGPU_FAMILY_NV) {
662
663 *offset = ((u64)metadata[9] << 16u) |
664 ((metadata[8] & 0xFF000000u) >> 16);
665 } else {
666
667 *offset = ((u64)metadata[9] << 8u) |
668 ((u64)(metadata[7] & 0x1FE0000u) << 23);
669 }
670
671 return 0;
672}
673
674static int convert_tiling_flags_to_modifier(struct amdgpu_framebuffer *afb)
675{
676 struct amdgpu_device *adev = drm_to_adev(afb->base.dev);
677 uint64_t modifier = 0;
678
679 if (!afb->tiling_flags || !AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE)) {
680 modifier = DRM_FORMAT_MOD_LINEAR;
681 } else {
682 int swizzle = AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE);
683 bool has_xor = swizzle >= 16;
684 int block_size_bits;
685 int version;
686 int pipe_xor_bits = 0;
687 int bank_xor_bits = 0;
688 int packers = 0;
689 int rb = 0;
690 int pipes = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
691 uint32_t dcc_offset = AMDGPU_TILING_GET(afb->tiling_flags, DCC_OFFSET_256B);
692
693 switch (swizzle >> 2) {
694 case 0:
695 block_size_bits = 8;
696 break;
697 case 1:
698 case 5:
699 block_size_bits = 12;
700 break;
701 case 2:
702 case 4:
703 case 6:
704 block_size_bits = 16;
705 break;
706 default:
707
708 return -EINVAL;
709 }
710
711 if (adev->asic_type >= CHIP_SIENNA_CICHLID)
712 version = AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS;
713 else if (adev->family == AMDGPU_FAMILY_NV)
714 version = AMD_FMT_MOD_TILE_VER_GFX10;
715 else
716 version = AMD_FMT_MOD_TILE_VER_GFX9;
717
718 switch (swizzle & 3) {
719 case 0:
720 return -EINVAL;
721 case 1:
722 if (!has_xor)
723 version = AMD_FMT_MOD_TILE_VER_GFX9;
724 break;
725 case 2:
726 if (!has_xor && afb->base.format->cpp[0] != 4)
727 version = AMD_FMT_MOD_TILE_VER_GFX9;
728 break;
729 case 3:
730 break;
731 }
732
733 if (has_xor) {
734 switch (version) {
735 case AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS:
736 pipe_xor_bits = min(block_size_bits - 8, pipes);
737 packers = min(block_size_bits - 8 - pipe_xor_bits,
738 ilog2(adev->gfx.config.gb_addr_config_fields.num_pkrs));
739 break;
740 case AMD_FMT_MOD_TILE_VER_GFX10:
741 pipe_xor_bits = min(block_size_bits - 8, pipes);
742 break;
743 case AMD_FMT_MOD_TILE_VER_GFX9:
744 rb = ilog2(adev->gfx.config.gb_addr_config_fields.num_se) +
745 ilog2(adev->gfx.config.gb_addr_config_fields.num_rb_per_se);
746 pipe_xor_bits = min(block_size_bits - 8, pipes +
747 ilog2(adev->gfx.config.gb_addr_config_fields.num_se));
748 bank_xor_bits = min(block_size_bits - 8 - pipe_xor_bits,
749 ilog2(adev->gfx.config.gb_addr_config_fields.num_banks));
750 break;
751 }
752 }
753
754 modifier = AMD_FMT_MOD |
755 AMD_FMT_MOD_SET(TILE, AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE)) |
756 AMD_FMT_MOD_SET(TILE_VERSION, version) |
757 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
758 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
759 AMD_FMT_MOD_SET(PACKERS, packers);
760
761 if (dcc_offset != 0) {
762 bool dcc_i64b = AMDGPU_TILING_GET(afb->tiling_flags, DCC_INDEPENDENT_64B) != 0;
763 bool dcc_i128b = version >= AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS;
764 const struct drm_format_info *format_info;
765 u64 render_dcc_offset;
766
767
768 bool dcc_constant_encode = adev->asic_type > CHIP_RAVEN ||
769 (adev->asic_type == CHIP_RAVEN &&
770 adev->external_rev_id >= 0x81);
771
772 int max_cblock_size = dcc_i64b ? AMD_FMT_MOD_DCC_BLOCK_64B :
773 dcc_i128b ? AMD_FMT_MOD_DCC_BLOCK_128B :
774 AMD_FMT_MOD_DCC_BLOCK_256B;
775
776 modifier |= AMD_FMT_MOD_SET(DCC, 1) |
777 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, dcc_constant_encode) |
778 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, dcc_i64b) |
779 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, dcc_i128b) |
780 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, max_cblock_size);
781
782 afb->base.offsets[1] = dcc_offset * 256 + afb->base.offsets[0];
783 afb->base.pitches[1] =
784 AMDGPU_TILING_GET(afb->tiling_flags, DCC_PITCH_MAX) + 1;
785
786
787
788
789
790
791
792
793
794 if (extract_render_dcc_offset(adev, afb->base.obj[0],
795 &render_dcc_offset) == 0 &&
796 render_dcc_offset != 0 &&
797 render_dcc_offset != afb->base.offsets[1] &&
798 render_dcc_offset < UINT_MAX) {
799 uint32_t dcc_block_bits;
800
801 modifier |= AMD_FMT_MOD_SET(DCC_RETILE, 1);
802 afb->base.offsets[2] = render_dcc_offset;
803
804 if (adev->family >= AMDGPU_FAMILY_NV) {
805 int extra_pipe = 0;
806
807 if (adev->asic_type >= CHIP_SIENNA_CICHLID &&
808 pipes == packers && pipes > 1)
809 extra_pipe = 1;
810
811 dcc_block_bits = max(20, 16 + pipes + extra_pipe);
812 } else {
813 modifier |= AMD_FMT_MOD_SET(RB, rb) |
814 AMD_FMT_MOD_SET(PIPE, pipes);
815 dcc_block_bits = max(20, 18 + rb);
816 }
817
818 dcc_block_bits -= ilog2(afb->base.format->cpp[0]);
819 afb->base.pitches[2] = ALIGN(afb->base.width,
820 1u << ((dcc_block_bits + 1) / 2));
821 }
822 format_info = amdgpu_lookup_format_info(afb->base.format->format,
823 modifier);
824 if (!format_info)
825 return -EINVAL;
826
827 afb->base.format = format_info;
828 }
829 }
830
831 afb->base.modifier = modifier;
832 afb->base.flags |= DRM_MODE_FB_MODIFIERS;
833 return 0;
834}
835
836
837static int check_tiling_flags_gfx6(struct amdgpu_framebuffer *afb)
838{
839 u64 micro_tile_mode;
840
841
842 if (AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE) == 0)
843 return 0;
844
845 micro_tile_mode = AMDGPU_TILING_GET(afb->tiling_flags, MICRO_TILE_MODE);
846 switch (micro_tile_mode) {
847 case 0:
848 case 3:
849 return 0;
850 default:
851 drm_dbg_kms(afb->base.dev,
852 "Micro tile mode %llu not supported for scanout\n",
853 micro_tile_mode);
854 return -EINVAL;
855 }
856}
857
858static void get_block_dimensions(unsigned int block_log2, unsigned int cpp,
859 unsigned int *width, unsigned int *height)
860{
861 unsigned int cpp_log2 = ilog2(cpp);
862 unsigned int pixel_log2 = block_log2 - cpp_log2;
863 unsigned int width_log2 = (pixel_log2 + 1) / 2;
864 unsigned int height_log2 = pixel_log2 - width_log2;
865
866 *width = 1 << width_log2;
867 *height = 1 << height_log2;
868}
869
870static unsigned int get_dcc_block_size(uint64_t modifier, bool rb_aligned,
871 bool pipe_aligned)
872{
873 unsigned int ver = AMD_FMT_MOD_GET(TILE_VERSION, modifier);
874
875 switch (ver) {
876 case AMD_FMT_MOD_TILE_VER_GFX9: {
877
878
879
880
881
882 return max(10 + (rb_aligned ? (int)AMD_FMT_MOD_GET(RB, modifier) : 0), 12);
883 }
884 case AMD_FMT_MOD_TILE_VER_GFX10:
885 case AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS: {
886 int pipes_log2 = AMD_FMT_MOD_GET(PIPE_XOR_BITS, modifier);
887
888 if (ver == AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS && pipes_log2 > 1 &&
889 AMD_FMT_MOD_GET(PACKERS, modifier) == pipes_log2)
890 ++pipes_log2;
891
892 return max(8 + (pipe_aligned ? pipes_log2 : 0), 12);
893 }
894 default:
895 return 0;
896 }
897}
898
899static int amdgpu_display_verify_plane(struct amdgpu_framebuffer *rfb, int plane,
900 const struct drm_format_info *format,
901 unsigned int block_width, unsigned int block_height,
902 unsigned int block_size_log2)
903{
904 unsigned int width = rfb->base.width /
905 ((plane && plane < format->num_planes) ? format->hsub : 1);
906 unsigned int height = rfb->base.height /
907 ((plane && plane < format->num_planes) ? format->vsub : 1);
908 unsigned int cpp = plane < format->num_planes ? format->cpp[plane] : 1;
909 unsigned int block_pitch = block_width * cpp;
910 unsigned int min_pitch = ALIGN(width * cpp, block_pitch);
911 unsigned int block_size = 1 << block_size_log2;
912 uint64_t size;
913
914 if (rfb->base.pitches[plane] % block_pitch) {
915 drm_dbg_kms(rfb->base.dev,
916 "pitch %d for plane %d is not a multiple of block pitch %d\n",
917 rfb->base.pitches[plane], plane, block_pitch);
918 return -EINVAL;
919 }
920 if (rfb->base.pitches[plane] < min_pitch) {
921 drm_dbg_kms(rfb->base.dev,
922 "pitch %d for plane %d is less than minimum pitch %d\n",
923 rfb->base.pitches[plane], plane, min_pitch);
924 return -EINVAL;
925 }
926
927
928 if (rfb->base.offsets[plane] % block_size) {
929 drm_dbg_kms(rfb->base.dev,
930 "offset 0x%x for plane %d is not a multiple of block pitch 0x%x\n",
931 rfb->base.offsets[plane], plane, block_size);
932 return -EINVAL;
933 }
934
935 size = rfb->base.offsets[plane] +
936 (uint64_t)rfb->base.pitches[plane] / block_pitch *
937 block_size * DIV_ROUND_UP(height, block_height);
938
939 if (rfb->base.obj[0]->size < size) {
940 drm_dbg_kms(rfb->base.dev,
941 "BO size 0x%zx is less than 0x%llx required for plane %d\n",
942 rfb->base.obj[0]->size, size, plane);
943 return -EINVAL;
944 }
945
946 return 0;
947}
948
949
950static int amdgpu_display_verify_sizes(struct amdgpu_framebuffer *rfb)
951{
952 const struct drm_format_info *format_info = drm_format_info(rfb->base.format->format);
953 uint64_t modifier = rfb->base.modifier;
954 int ret;
955 unsigned int i, block_width, block_height, block_size_log2;
956
957 if (!rfb->base.dev->mode_config.allow_fb_modifiers)
958 return 0;
959
960 for (i = 0; i < format_info->num_planes; ++i) {
961 if (modifier == DRM_FORMAT_MOD_LINEAR) {
962 block_width = 256 / format_info->cpp[i];
963 block_height = 1;
964 block_size_log2 = 8;
965 } else {
966 int swizzle = AMD_FMT_MOD_GET(TILE, modifier);
967
968 switch ((swizzle & ~3) + 1) {
969 case DC_SW_256B_S:
970 block_size_log2 = 8;
971 break;
972 case DC_SW_4KB_S:
973 case DC_SW_4KB_S_X:
974 block_size_log2 = 12;
975 break;
976 case DC_SW_64KB_S:
977 case DC_SW_64KB_S_T:
978 case DC_SW_64KB_S_X:
979 block_size_log2 = 16;
980 break;
981 default:
982 drm_dbg_kms(rfb->base.dev,
983 "Swizzle mode with unknown block size: %d\n", swizzle);
984 return -EINVAL;
985 }
986
987 get_block_dimensions(block_size_log2, format_info->cpp[i],
988 &block_width, &block_height);
989 }
990
991 ret = amdgpu_display_verify_plane(rfb, i, format_info,
992 block_width, block_height, block_size_log2);
993 if (ret)
994 return ret;
995 }
996
997 if (AMD_FMT_MOD_GET(DCC, modifier)) {
998 if (AMD_FMT_MOD_GET(DCC_RETILE, modifier)) {
999 block_size_log2 = get_dcc_block_size(modifier, false, false);
1000 get_block_dimensions(block_size_log2 + 8, format_info->cpp[0],
1001 &block_width, &block_height);
1002 ret = amdgpu_display_verify_plane(rfb, i, format_info,
1003 block_width, block_height,
1004 block_size_log2);
1005 if (ret)
1006 return ret;
1007
1008 ++i;
1009 block_size_log2 = get_dcc_block_size(modifier, true, true);
1010 } else {
1011 bool pipe_aligned = AMD_FMT_MOD_GET(DCC_PIPE_ALIGN, modifier);
1012
1013 block_size_log2 = get_dcc_block_size(modifier, true, pipe_aligned);
1014 }
1015 get_block_dimensions(block_size_log2 + 8, format_info->cpp[0],
1016 &block_width, &block_height);
1017 ret = amdgpu_display_verify_plane(rfb, i, format_info,
1018 block_width, block_height, block_size_log2);
1019 if (ret)
1020 return ret;
1021 }
1022
1023 return 0;
1024}
1025
1026static int amdgpu_display_get_fb_info(const struct amdgpu_framebuffer *amdgpu_fb,
1027 uint64_t *tiling_flags, bool *tmz_surface)
1028{
1029 struct amdgpu_bo *rbo;
1030 int r;
1031
1032 if (!amdgpu_fb) {
1033 *tiling_flags = 0;
1034 *tmz_surface = false;
1035 return 0;
1036 }
1037
1038 rbo = gem_to_amdgpu_bo(amdgpu_fb->base.obj[0]);
1039 r = amdgpu_bo_reserve(rbo, false);
1040
1041 if (unlikely(r)) {
1042
1043 if (r != -ERESTARTSYS)
1044 DRM_ERROR("Unable to reserve buffer: %d\n", r);
1045 return r;
1046 }
1047
1048 if (tiling_flags)
1049 amdgpu_bo_get_tiling_flags(rbo, tiling_flags);
1050
1051 if (tmz_surface)
1052 *tmz_surface = amdgpu_bo_encrypted(rbo);
1053
1054 amdgpu_bo_unreserve(rbo);
1055
1056 return r;
1057}
1058
1059int amdgpu_display_gem_fb_init(struct drm_device *dev,
1060 struct amdgpu_framebuffer *rfb,
1061 const struct drm_mode_fb_cmd2 *mode_cmd,
1062 struct drm_gem_object *obj)
1063{
1064 int ret;
1065
1066 rfb->base.obj[0] = obj;
1067 drm_helper_mode_fill_fb_struct(dev, &rfb->base, mode_cmd);
1068
1069 ret = amdgpu_display_framebuffer_init(dev, rfb, mode_cmd, obj);
1070 if (ret)
1071 goto err;
1072
1073 ret = drm_framebuffer_init(dev, &rfb->base, &amdgpu_fb_funcs);
1074 if (ret)
1075 goto err;
1076
1077 return 0;
1078err:
1079 drm_dbg_kms(dev, "Failed to init gem fb: %d\n", ret);
1080 rfb->base.obj[0] = NULL;
1081 return ret;
1082}
1083
1084int amdgpu_display_gem_fb_verify_and_init(
1085 struct drm_device *dev, struct amdgpu_framebuffer *rfb,
1086 struct drm_file *file_priv, const struct drm_mode_fb_cmd2 *mode_cmd,
1087 struct drm_gem_object *obj)
1088{
1089 int ret;
1090
1091 rfb->base.obj[0] = obj;
1092 drm_helper_mode_fill_fb_struct(dev, &rfb->base, mode_cmd);
1093
1094 if (!drm_any_plane_has_format(dev, mode_cmd->pixel_format,
1095 mode_cmd->modifier[0])) {
1096 drm_dbg_kms(dev,
1097 "unsupported pixel format %p4cc / modifier 0x%llx\n",
1098 &mode_cmd->pixel_format, mode_cmd->modifier[0]);
1099
1100 ret = -EINVAL;
1101 goto err;
1102 }
1103
1104 ret = amdgpu_display_framebuffer_init(dev, rfb, mode_cmd, obj);
1105 if (ret)
1106 goto err;
1107
1108 ret = drm_framebuffer_init(dev, &rfb->base, &amdgpu_fb_funcs);
1109 if (ret)
1110 goto err;
1111
1112 return 0;
1113err:
1114 drm_dbg_kms(dev, "Failed to verify and init gem fb: %d\n", ret);
1115 rfb->base.obj[0] = NULL;
1116 return ret;
1117}
1118
1119int amdgpu_display_framebuffer_init(struct drm_device *dev,
1120 struct amdgpu_framebuffer *rfb,
1121 const struct drm_mode_fb_cmd2 *mode_cmd,
1122 struct drm_gem_object *obj)
1123{
1124 struct amdgpu_device *adev = drm_to_adev(dev);
1125 int ret, i;
1126
1127
1128
1129
1130
1131 for (i = 1; i < rfb->base.format->num_planes; ++i) {
1132 if (mode_cmd->handles[i] != mode_cmd->handles[0]) {
1133 drm_dbg_kms(dev, "Plane 0 and %d have different BOs: %u vs. %u\n",
1134 i, mode_cmd->handles[0], mode_cmd->handles[i]);
1135 ret = -EINVAL;
1136 return ret;
1137 }
1138 }
1139
1140 ret = amdgpu_display_get_fb_info(rfb, &rfb->tiling_flags, &rfb->tmz_surface);
1141 if (ret)
1142 return ret;
1143
1144 if (!dev->mode_config.allow_fb_modifiers && !adev->enable_virtual_display) {
1145 drm_WARN_ONCE(dev, adev->family >= AMDGPU_FAMILY_AI,
1146 "GFX9+ requires FB check based on format modifier\n");
1147 ret = check_tiling_flags_gfx6(rfb);
1148 if (ret)
1149 return ret;
1150 }
1151
1152 if (dev->mode_config.allow_fb_modifiers &&
1153 !(rfb->base.flags & DRM_MODE_FB_MODIFIERS)) {
1154 ret = convert_tiling_flags_to_modifier(rfb);
1155 if (ret) {
1156 drm_dbg_kms(dev, "Failed to convert tiling flags 0x%llX to a modifier",
1157 rfb->tiling_flags);
1158 return ret;
1159 }
1160 }
1161
1162 ret = amdgpu_display_verify_sizes(rfb);
1163 if (ret)
1164 return ret;
1165
1166 for (i = 0; i < rfb->base.format->num_planes; ++i) {
1167 drm_gem_object_get(rfb->base.obj[0]);
1168 rfb->base.obj[i] = rfb->base.obj[0];
1169 }
1170
1171 return 0;
1172}
1173
1174struct drm_framebuffer *
1175amdgpu_display_user_framebuffer_create(struct drm_device *dev,
1176 struct drm_file *file_priv,
1177 const struct drm_mode_fb_cmd2 *mode_cmd)
1178{
1179 struct amdgpu_framebuffer *amdgpu_fb;
1180 struct drm_gem_object *obj;
1181 struct amdgpu_bo *bo;
1182 uint32_t domains;
1183 int ret;
1184
1185 obj = drm_gem_object_lookup(file_priv, mode_cmd->handles[0]);
1186 if (obj == NULL) {
1187 drm_dbg_kms(dev, "No GEM object associated to handle 0x%08X, "
1188 "can't create framebuffer\n", mode_cmd->handles[0]);
1189 return ERR_PTR(-ENOENT);
1190 }
1191
1192
1193 bo = gem_to_amdgpu_bo(obj);
1194 domains = amdgpu_display_supported_domains(drm_to_adev(dev), bo->flags);
1195 if (obj->import_attach && !(domains & AMDGPU_GEM_DOMAIN_GTT)) {
1196 drm_dbg_kms(dev, "Cannot create framebuffer from imported dma_buf\n");
1197 drm_gem_object_put(obj);
1198 return ERR_PTR(-EINVAL);
1199 }
1200
1201 amdgpu_fb = kzalloc(sizeof(*amdgpu_fb), GFP_KERNEL);
1202 if (amdgpu_fb == NULL) {
1203 drm_gem_object_put(obj);
1204 return ERR_PTR(-ENOMEM);
1205 }
1206
1207 ret = amdgpu_display_gem_fb_verify_and_init(dev, amdgpu_fb, file_priv,
1208 mode_cmd, obj);
1209 if (ret) {
1210 kfree(amdgpu_fb);
1211 drm_gem_object_put(obj);
1212 return ERR_PTR(ret);
1213 }
1214
1215 drm_gem_object_put(obj);
1216 return &amdgpu_fb->base;
1217}
1218
1219const struct drm_mode_config_funcs amdgpu_mode_funcs = {
1220 .fb_create = amdgpu_display_user_framebuffer_create,
1221 .output_poll_changed = drm_fb_helper_output_poll_changed,
1222};
1223
1224static const struct drm_prop_enum_list amdgpu_underscan_enum_list[] =
1225{ { UNDERSCAN_OFF, "off" },
1226 { UNDERSCAN_ON, "on" },
1227 { UNDERSCAN_AUTO, "auto" },
1228};
1229
1230static const struct drm_prop_enum_list amdgpu_audio_enum_list[] =
1231{ { AMDGPU_AUDIO_DISABLE, "off" },
1232 { AMDGPU_AUDIO_ENABLE, "on" },
1233 { AMDGPU_AUDIO_AUTO, "auto" },
1234};
1235
1236
1237static const struct drm_prop_enum_list amdgpu_dither_enum_list[] =
1238{ { AMDGPU_FMT_DITHER_DISABLE, "off" },
1239 { AMDGPU_FMT_DITHER_ENABLE, "on" },
1240};
1241
1242int amdgpu_display_modeset_create_props(struct amdgpu_device *adev)
1243{
1244 int sz;
1245
1246 adev->mode_info.coherent_mode_property =
1247 drm_property_create_range(adev_to_drm(adev), 0, "coherent", 0, 1);
1248 if (!adev->mode_info.coherent_mode_property)
1249 return -ENOMEM;
1250
1251 adev->mode_info.load_detect_property =
1252 drm_property_create_range(adev_to_drm(adev), 0, "load detection", 0, 1);
1253 if (!adev->mode_info.load_detect_property)
1254 return -ENOMEM;
1255
1256 drm_mode_create_scaling_mode_property(adev_to_drm(adev));
1257
1258 sz = ARRAY_SIZE(amdgpu_underscan_enum_list);
1259 adev->mode_info.underscan_property =
1260 drm_property_create_enum(adev_to_drm(adev), 0,
1261 "underscan",
1262 amdgpu_underscan_enum_list, sz);
1263
1264 adev->mode_info.underscan_hborder_property =
1265 drm_property_create_range(adev_to_drm(adev), 0,
1266 "underscan hborder", 0, 128);
1267 if (!adev->mode_info.underscan_hborder_property)
1268 return -ENOMEM;
1269
1270 adev->mode_info.underscan_vborder_property =
1271 drm_property_create_range(adev_to_drm(adev), 0,
1272 "underscan vborder", 0, 128);
1273 if (!adev->mode_info.underscan_vborder_property)
1274 return -ENOMEM;
1275
1276 sz = ARRAY_SIZE(amdgpu_audio_enum_list);
1277 adev->mode_info.audio_property =
1278 drm_property_create_enum(adev_to_drm(adev), 0,
1279 "audio",
1280 amdgpu_audio_enum_list, sz);
1281
1282 sz = ARRAY_SIZE(amdgpu_dither_enum_list);
1283 adev->mode_info.dither_property =
1284 drm_property_create_enum(adev_to_drm(adev), 0,
1285 "dither",
1286 amdgpu_dither_enum_list, sz);
1287
1288 if (amdgpu_device_has_dc_support(adev)) {
1289 adev->mode_info.abm_level_property =
1290 drm_property_create_range(adev_to_drm(adev), 0,
1291 "abm level", 0, 4);
1292 if (!adev->mode_info.abm_level_property)
1293 return -ENOMEM;
1294 }
1295
1296 return 0;
1297}
1298
1299void amdgpu_display_update_priority(struct amdgpu_device *adev)
1300{
1301
1302 if ((amdgpu_disp_priority == 0) || (amdgpu_disp_priority > 2))
1303 adev->mode_info.disp_priority = 0;
1304 else
1305 adev->mode_info.disp_priority = amdgpu_disp_priority;
1306
1307}
1308
1309static bool amdgpu_display_is_hdtv_mode(const struct drm_display_mode *mode)
1310{
1311
1312 if ((mode->vdisplay == 480 && mode->hdisplay == 720) ||
1313 (mode->vdisplay == 576) ||
1314 (mode->vdisplay == 720) ||
1315 (mode->vdisplay == 1080))
1316 return true;
1317 else
1318 return false;
1319}
1320
1321bool amdgpu_display_crtc_scaling_mode_fixup(struct drm_crtc *crtc,
1322 const struct drm_display_mode *mode,
1323 struct drm_display_mode *adjusted_mode)
1324{
1325 struct drm_device *dev = crtc->dev;
1326 struct drm_encoder *encoder;
1327 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1328 struct amdgpu_encoder *amdgpu_encoder;
1329 struct drm_connector *connector;
1330 u32 src_v = 1, dst_v = 1;
1331 u32 src_h = 1, dst_h = 1;
1332
1333 amdgpu_crtc->h_border = 0;
1334 amdgpu_crtc->v_border = 0;
1335
1336 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
1337 if (encoder->crtc != crtc)
1338 continue;
1339 amdgpu_encoder = to_amdgpu_encoder(encoder);
1340 connector = amdgpu_get_connector_for_encoder(encoder);
1341
1342
1343 if (amdgpu_encoder->rmx_type == RMX_OFF)
1344 amdgpu_crtc->rmx_type = RMX_OFF;
1345 else if (mode->hdisplay < amdgpu_encoder->native_mode.hdisplay ||
1346 mode->vdisplay < amdgpu_encoder->native_mode.vdisplay)
1347 amdgpu_crtc->rmx_type = amdgpu_encoder->rmx_type;
1348 else
1349 amdgpu_crtc->rmx_type = RMX_OFF;
1350
1351 memcpy(&amdgpu_crtc->native_mode,
1352 &amdgpu_encoder->native_mode,
1353 sizeof(struct drm_display_mode));
1354 src_v = crtc->mode.vdisplay;
1355 dst_v = amdgpu_crtc->native_mode.vdisplay;
1356 src_h = crtc->mode.hdisplay;
1357 dst_h = amdgpu_crtc->native_mode.hdisplay;
1358
1359
1360 if ((!(mode->flags & DRM_MODE_FLAG_INTERLACE)) &&
1361 ((amdgpu_encoder->underscan_type == UNDERSCAN_ON) ||
1362 ((amdgpu_encoder->underscan_type == UNDERSCAN_AUTO) &&
1363 connector->display_info.is_hdmi &&
1364 amdgpu_display_is_hdtv_mode(mode)))) {
1365 if (amdgpu_encoder->underscan_hborder != 0)
1366 amdgpu_crtc->h_border = amdgpu_encoder->underscan_hborder;
1367 else
1368 amdgpu_crtc->h_border = (mode->hdisplay >> 5) + 16;
1369 if (amdgpu_encoder->underscan_vborder != 0)
1370 amdgpu_crtc->v_border = amdgpu_encoder->underscan_vborder;
1371 else
1372 amdgpu_crtc->v_border = (mode->vdisplay >> 5) + 16;
1373 amdgpu_crtc->rmx_type = RMX_FULL;
1374 src_v = crtc->mode.vdisplay;
1375 dst_v = crtc->mode.vdisplay - (amdgpu_crtc->v_border * 2);
1376 src_h = crtc->mode.hdisplay;
1377 dst_h = crtc->mode.hdisplay - (amdgpu_crtc->h_border * 2);
1378 }
1379 }
1380 if (amdgpu_crtc->rmx_type != RMX_OFF) {
1381 fixed20_12 a, b;
1382 a.full = dfixed_const(src_v);
1383 b.full = dfixed_const(dst_v);
1384 amdgpu_crtc->vsc.full = dfixed_div(a, b);
1385 a.full = dfixed_const(src_h);
1386 b.full = dfixed_const(dst_h);
1387 amdgpu_crtc->hsc.full = dfixed_div(a, b);
1388 } else {
1389 amdgpu_crtc->vsc.full = dfixed_const(1);
1390 amdgpu_crtc->hsc.full = dfixed_const(1);
1391 }
1392 return true;
1393}
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432int amdgpu_display_get_crtc_scanoutpos(struct drm_device *dev,
1433 unsigned int pipe, unsigned int flags, int *vpos,
1434 int *hpos, ktime_t *stime, ktime_t *etime,
1435 const struct drm_display_mode *mode)
1436{
1437 u32 vbl = 0, position = 0;
1438 int vbl_start, vbl_end, vtotal, ret = 0;
1439 bool in_vbl = true;
1440
1441 struct amdgpu_device *adev = drm_to_adev(dev);
1442
1443
1444
1445
1446 if (stime)
1447 *stime = ktime_get();
1448
1449 if (amdgpu_display_page_flip_get_scanoutpos(adev, pipe, &vbl, &position) == 0)
1450 ret |= DRM_SCANOUTPOS_VALID;
1451
1452
1453 if (etime)
1454 *etime = ktime_get();
1455
1456
1457
1458
1459 *vpos = position & 0x1fff;
1460 *hpos = (position >> 16) & 0x1fff;
1461
1462
1463 if (vbl > 0) {
1464
1465 ret |= DRM_SCANOUTPOS_ACCURATE;
1466 vbl_start = vbl & 0x1fff;
1467 vbl_end = (vbl >> 16) & 0x1fff;
1468 }
1469 else {
1470
1471 vbl_start = mode->crtc_vdisplay;
1472 vbl_end = 0;
1473 }
1474
1475
1476 if (flags & GET_DISTANCE_TO_VBLANKSTART) {
1477
1478 *hpos = *vpos - vbl_start;
1479 }
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491 if (!(flags & USE_REAL_VBLANKSTART))
1492 vbl_start -= adev->mode_info.crtcs[pipe]->lb_vblank_lead_lines;
1493
1494
1495 if ((*vpos < vbl_start) && (*vpos >= vbl_end))
1496 in_vbl = false;
1497
1498
1499 if (in_vbl)
1500 ret |= DRM_SCANOUTPOS_IN_VBLANK;
1501
1502
1503 if (flags & GET_DISTANCE_TO_VBLANKSTART) {
1504
1505 *vpos -= vbl_start;
1506 return ret;
1507 }
1508
1509
1510
1511
1512
1513
1514
1515
1516 if (in_vbl && (*vpos >= vbl_start)) {
1517 vtotal = mode->crtc_vtotal;
1518
1519
1520
1521
1522
1523 *vpos = (*vpos < vtotal) ? (*vpos - vtotal) : 0;
1524 }
1525
1526
1527 *vpos = *vpos - vbl_end;
1528
1529 return ret;
1530}
1531
1532int amdgpu_display_crtc_idx_to_irq_type(struct amdgpu_device *adev, int crtc)
1533{
1534 if (crtc < 0 || crtc >= adev->mode_info.num_crtc)
1535 return AMDGPU_CRTC_IRQ_NONE;
1536
1537 switch (crtc) {
1538 case 0:
1539 return AMDGPU_CRTC_IRQ_VBLANK1;
1540 case 1:
1541 return AMDGPU_CRTC_IRQ_VBLANK2;
1542 case 2:
1543 return AMDGPU_CRTC_IRQ_VBLANK3;
1544 case 3:
1545 return AMDGPU_CRTC_IRQ_VBLANK4;
1546 case 4:
1547 return AMDGPU_CRTC_IRQ_VBLANK5;
1548 case 5:
1549 return AMDGPU_CRTC_IRQ_VBLANK6;
1550 default:
1551 return AMDGPU_CRTC_IRQ_NONE;
1552 }
1553}
1554
1555bool amdgpu_crtc_get_scanout_position(struct drm_crtc *crtc,
1556 bool in_vblank_irq, int *vpos,
1557 int *hpos, ktime_t *stime, ktime_t *etime,
1558 const struct drm_display_mode *mode)
1559{
1560 struct drm_device *dev = crtc->dev;
1561 unsigned int pipe = crtc->index;
1562
1563 return amdgpu_display_get_crtc_scanoutpos(dev, pipe, 0, vpos, hpos,
1564 stime, etime, mode);
1565}
1566
1567int amdgpu_display_suspend_helper(struct amdgpu_device *adev)
1568{
1569 struct drm_device *dev = adev_to_drm(adev);
1570 struct drm_crtc *crtc;
1571 struct drm_connector *connector;
1572 struct drm_connector_list_iter iter;
1573 int r;
1574
1575
1576 drm_modeset_lock_all(dev);
1577 drm_connector_list_iter_begin(dev, &iter);
1578 drm_for_each_connector_iter(connector, &iter)
1579 drm_helper_connector_dpms(connector,
1580 DRM_MODE_DPMS_OFF);
1581 drm_connector_list_iter_end(&iter);
1582 drm_modeset_unlock_all(dev);
1583
1584 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
1585 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1586 struct drm_framebuffer *fb = crtc->primary->fb;
1587 struct amdgpu_bo *robj;
1588
1589 if (amdgpu_crtc->cursor_bo && !adev->enable_virtual_display) {
1590 struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
1591 r = amdgpu_bo_reserve(aobj, true);
1592 if (r == 0) {
1593 amdgpu_bo_unpin(aobj);
1594 amdgpu_bo_unreserve(aobj);
1595 }
1596 }
1597
1598 if (fb == NULL || fb->obj[0] == NULL) {
1599 continue;
1600 }
1601 robj = gem_to_amdgpu_bo(fb->obj[0]);
1602 r = amdgpu_bo_reserve(robj, true);
1603 if (r == 0) {
1604 amdgpu_bo_unpin(robj);
1605 amdgpu_bo_unreserve(robj);
1606 }
1607 }
1608 return 0;
1609}
1610
1611int amdgpu_display_resume_helper(struct amdgpu_device *adev)
1612{
1613 struct drm_device *dev = adev_to_drm(adev);
1614 struct drm_connector *connector;
1615 struct drm_connector_list_iter iter;
1616 struct drm_crtc *crtc;
1617 int r;
1618
1619
1620 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
1621 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1622
1623 if (amdgpu_crtc->cursor_bo && !adev->enable_virtual_display) {
1624 struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
1625 r = amdgpu_bo_reserve(aobj, true);
1626 if (r == 0) {
1627 r = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM);
1628 if (r != 0)
1629 dev_err(adev->dev, "Failed to pin cursor BO (%d)\n", r);
1630 amdgpu_crtc->cursor_addr = amdgpu_bo_gpu_offset(aobj);
1631 amdgpu_bo_unreserve(aobj);
1632 }
1633 }
1634 }
1635
1636 drm_helper_resume_force_mode(dev);
1637
1638
1639 drm_modeset_lock_all(dev);
1640
1641 drm_connector_list_iter_begin(dev, &iter);
1642 drm_for_each_connector_iter(connector, &iter)
1643 drm_helper_connector_dpms(connector,
1644 DRM_MODE_DPMS_ON);
1645 drm_connector_list_iter_end(&iter);
1646
1647 drm_modeset_unlock_all(dev);
1648
1649 return 0;
1650}
1651
1652