1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27#include <drm/amdgpu_drm.h>
28#include "amdgpu.h"
29#include "amdgpu_i2c.h"
30#include "atom.h"
31#include "amdgpu_connectors.h"
32#include "amdgpu_display.h"
33#include <asm/div64.h>
34
35#include <linux/pci.h>
36#include <linux/pm_runtime.h>
37#include <drm/drm_crtc_helper.h>
38#include <drm/drm_edid.h>
39#include <drm/drm_gem_framebuffer_helper.h>
40#include <drm/drm_fb_helper.h>
41#include <drm/drm_fourcc.h>
42#include <drm/drm_vblank.h>
43
44static void amdgpu_display_flip_callback(struct dma_fence *f,
45 struct dma_fence_cb *cb)
46{
47 struct amdgpu_flip_work *work =
48 container_of(cb, struct amdgpu_flip_work, cb);
49
50 dma_fence_put(f);
51 schedule_work(&work->flip_work.work);
52}
53
54static bool amdgpu_display_flip_handle_fence(struct amdgpu_flip_work *work,
55 struct dma_fence **f)
56{
57 struct dma_fence *fence= *f;
58
59 if (fence == NULL)
60 return false;
61
62 *f = NULL;
63
64 if (!dma_fence_add_callback(fence, &work->cb,
65 amdgpu_display_flip_callback))
66 return true;
67
68 dma_fence_put(fence);
69 return false;
70}
71
72static void amdgpu_display_flip_work_func(struct work_struct *__work)
73{
74 struct delayed_work *delayed_work =
75 container_of(__work, struct delayed_work, work);
76 struct amdgpu_flip_work *work =
77 container_of(delayed_work, struct amdgpu_flip_work, flip_work);
78 struct amdgpu_device *adev = work->adev;
79 struct amdgpu_crtc *amdgpu_crtc = adev->mode_info.crtcs[work->crtc_id];
80
81 struct drm_crtc *crtc = &amdgpu_crtc->base;
82 unsigned long flags;
83 unsigned i;
84 int vpos, hpos;
85
86 for (i = 0; i < work->shared_count; ++i)
87 if (amdgpu_display_flip_handle_fence(work, &work->shared[i]))
88 return;
89
90
91
92
93 if (amdgpu_crtc->enabled &&
94 (amdgpu_display_get_crtc_scanoutpos(adev_to_drm(adev), work->crtc_id, 0,
95 &vpos, &hpos, NULL, NULL,
96 &crtc->hwmode)
97 & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
98 (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
99 (int)(work->target_vblank -
100 amdgpu_get_vblank_counter_kms(crtc)) > 0) {
101 schedule_delayed_work(&work->flip_work, usecs_to_jiffies(1000));
102 return;
103 }
104
105
106 spin_lock_irqsave(&crtc->dev->event_lock, flags);
107
108
109 adev->mode_info.funcs->page_flip(adev, work->crtc_id, work->base, work->async);
110
111
112 amdgpu_crtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
113 spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
114
115
116 DRM_DEBUG_DRIVER("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_SUBMITTED, work: %p,\n",
117 amdgpu_crtc->crtc_id, amdgpu_crtc, work);
118
119}
120
121
122
123
124static void amdgpu_display_unpin_work_func(struct work_struct *__work)
125{
126 struct amdgpu_flip_work *work =
127 container_of(__work, struct amdgpu_flip_work, unpin_work);
128 int r;
129
130
131 r = amdgpu_bo_reserve(work->old_abo, true);
132 if (likely(r == 0)) {
133 amdgpu_bo_unpin(work->old_abo);
134 amdgpu_bo_unreserve(work->old_abo);
135 } else
136 DRM_ERROR("failed to reserve buffer after flip\n");
137
138 amdgpu_bo_unref(&work->old_abo);
139 kfree(work->shared);
140 kfree(work);
141}
142
143int amdgpu_display_crtc_page_flip_target(struct drm_crtc *crtc,
144 struct drm_framebuffer *fb,
145 struct drm_pending_vblank_event *event,
146 uint32_t page_flip_flags, uint32_t target,
147 struct drm_modeset_acquire_ctx *ctx)
148{
149 struct drm_device *dev = crtc->dev;
150 struct amdgpu_device *adev = drm_to_adev(dev);
151 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
152 struct drm_gem_object *obj;
153 struct amdgpu_flip_work *work;
154 struct amdgpu_bo *new_abo;
155 unsigned long flags;
156 u64 tiling_flags;
157 int i, r;
158
159 work = kzalloc(sizeof *work, GFP_KERNEL);
160 if (work == NULL)
161 return -ENOMEM;
162
163 INIT_DELAYED_WORK(&work->flip_work, amdgpu_display_flip_work_func);
164 INIT_WORK(&work->unpin_work, amdgpu_display_unpin_work_func);
165
166 work->event = event;
167 work->adev = adev;
168 work->crtc_id = amdgpu_crtc->crtc_id;
169 work->async = (page_flip_flags & DRM_MODE_PAGE_FLIP_ASYNC) != 0;
170
171
172 obj = crtc->primary->fb->obj[0];
173
174
175 work->old_abo = gem_to_amdgpu_bo(obj);
176 amdgpu_bo_ref(work->old_abo);
177
178 obj = fb->obj[0];
179 new_abo = gem_to_amdgpu_bo(obj);
180
181
182 r = amdgpu_bo_reserve(new_abo, false);
183 if (unlikely(r != 0)) {
184 DRM_ERROR("failed to reserve new abo buffer before flip\n");
185 goto cleanup;
186 }
187
188 if (!adev->enable_virtual_display) {
189 r = amdgpu_bo_pin(new_abo,
190 amdgpu_display_supported_domains(adev, new_abo->flags));
191 if (unlikely(r != 0)) {
192 DRM_ERROR("failed to pin new abo buffer before flip\n");
193 goto unreserve;
194 }
195 }
196
197 r = amdgpu_ttm_alloc_gart(&new_abo->tbo);
198 if (unlikely(r != 0)) {
199 DRM_ERROR("%p bind failed\n", new_abo);
200 goto unpin;
201 }
202
203
204 r = dma_resv_get_fences(new_abo->tbo.base.resv, true,
205 &work->shared_count,
206 &work->shared);
207 if (unlikely(r != 0)) {
208 DRM_ERROR("failed to get fences for buffer\n");
209 goto unpin;
210 }
211
212 amdgpu_bo_get_tiling_flags(new_abo, &tiling_flags);
213 amdgpu_bo_unreserve(new_abo);
214
215 if (!adev->enable_virtual_display)
216 work->base = amdgpu_bo_gpu_offset(new_abo);
217 work->target_vblank = target - (uint32_t)drm_crtc_vblank_count(crtc) +
218 amdgpu_get_vblank_counter_kms(crtc);
219
220
221 spin_lock_irqsave(&crtc->dev->event_lock, flags);
222 if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_NONE) {
223 DRM_DEBUG_DRIVER("flip queue: crtc already busy\n");
224 spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
225 r = -EBUSY;
226 goto pflip_cleanup;
227 }
228
229 amdgpu_crtc->pflip_status = AMDGPU_FLIP_PENDING;
230 amdgpu_crtc->pflip_works = work;
231
232
233 DRM_DEBUG_DRIVER("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_PENDING, work: %p,\n",
234 amdgpu_crtc->crtc_id, amdgpu_crtc, work);
235
236 crtc->primary->fb = fb;
237 spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
238 amdgpu_display_flip_work_func(&work->flip_work.work);
239 return 0;
240
241pflip_cleanup:
242 if (unlikely(amdgpu_bo_reserve(new_abo, false) != 0)) {
243 DRM_ERROR("failed to reserve new abo in error path\n");
244 goto cleanup;
245 }
246unpin:
247 if (!adev->enable_virtual_display)
248 amdgpu_bo_unpin(new_abo);
249
250unreserve:
251 amdgpu_bo_unreserve(new_abo);
252
253cleanup:
254 amdgpu_bo_unref(&work->old_abo);
255 for (i = 0; i < work->shared_count; ++i)
256 dma_fence_put(work->shared[i]);
257 kfree(work->shared);
258 kfree(work);
259
260 return r;
261}
262
263int amdgpu_display_crtc_set_config(struct drm_mode_set *set,
264 struct drm_modeset_acquire_ctx *ctx)
265{
266 struct drm_device *dev;
267 struct amdgpu_device *adev;
268 struct drm_crtc *crtc;
269 bool active = false;
270 int ret;
271
272 if (!set || !set->crtc)
273 return -EINVAL;
274
275 dev = set->crtc->dev;
276
277 ret = pm_runtime_get_sync(dev->dev);
278 if (ret < 0)
279 goto out;
280
281 ret = drm_crtc_helper_set_config(set, ctx);
282
283 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
284 if (crtc->enabled)
285 active = true;
286
287 pm_runtime_mark_last_busy(dev->dev);
288
289 adev = drm_to_adev(dev);
290
291
292 if (active && !adev->have_disp_power_ref) {
293 adev->have_disp_power_ref = true;
294 return ret;
295 }
296
297
298 if (!active && adev->have_disp_power_ref) {
299 pm_runtime_put_autosuspend(dev->dev);
300 adev->have_disp_power_ref = false;
301 }
302
303out:
304
305 pm_runtime_put_autosuspend(dev->dev);
306 return ret;
307}
308
309static const char *encoder_names[41] = {
310 "NONE",
311 "INTERNAL_LVDS",
312 "INTERNAL_TMDS1",
313 "INTERNAL_TMDS2",
314 "INTERNAL_DAC1",
315 "INTERNAL_DAC2",
316 "INTERNAL_SDVOA",
317 "INTERNAL_SDVOB",
318 "SI170B",
319 "CH7303",
320 "CH7301",
321 "INTERNAL_DVO1",
322 "EXTERNAL_SDVOA",
323 "EXTERNAL_SDVOB",
324 "TITFP513",
325 "INTERNAL_LVTM1",
326 "VT1623",
327 "HDMI_SI1930",
328 "HDMI_INTERNAL",
329 "INTERNAL_KLDSCP_TMDS1",
330 "INTERNAL_KLDSCP_DVO1",
331 "INTERNAL_KLDSCP_DAC1",
332 "INTERNAL_KLDSCP_DAC2",
333 "SI178",
334 "MVPU_FPGA",
335 "INTERNAL_DDI",
336 "VT1625",
337 "HDMI_SI1932",
338 "DP_AN9801",
339 "DP_DP501",
340 "INTERNAL_UNIPHY",
341 "INTERNAL_KLDSCP_LVTMA",
342 "INTERNAL_UNIPHY1",
343 "INTERNAL_UNIPHY2",
344 "NUTMEG",
345 "TRAVIS",
346 "INTERNAL_VCE",
347 "INTERNAL_UNIPHY3",
348 "HDMI_ANX9805",
349 "INTERNAL_AMCLK",
350 "VIRTUAL",
351};
352
353static const char *hpd_names[6] = {
354 "HPD1",
355 "HPD2",
356 "HPD3",
357 "HPD4",
358 "HPD5",
359 "HPD6",
360};
361
362void amdgpu_display_print_display_setup(struct drm_device *dev)
363{
364 struct drm_connector *connector;
365 struct amdgpu_connector *amdgpu_connector;
366 struct drm_encoder *encoder;
367 struct amdgpu_encoder *amdgpu_encoder;
368 struct drm_connector_list_iter iter;
369 uint32_t devices;
370 int i = 0;
371
372 drm_connector_list_iter_begin(dev, &iter);
373 DRM_INFO("AMDGPU Display Connectors\n");
374 drm_for_each_connector_iter(connector, &iter) {
375 amdgpu_connector = to_amdgpu_connector(connector);
376 DRM_INFO("Connector %d:\n", i);
377 DRM_INFO(" %s\n", connector->name);
378 if (amdgpu_connector->hpd.hpd != AMDGPU_HPD_NONE)
379 DRM_INFO(" %s\n", hpd_names[amdgpu_connector->hpd.hpd]);
380 if (amdgpu_connector->ddc_bus) {
381 DRM_INFO(" DDC: 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n",
382 amdgpu_connector->ddc_bus->rec.mask_clk_reg,
383 amdgpu_connector->ddc_bus->rec.mask_data_reg,
384 amdgpu_connector->ddc_bus->rec.a_clk_reg,
385 amdgpu_connector->ddc_bus->rec.a_data_reg,
386 amdgpu_connector->ddc_bus->rec.en_clk_reg,
387 amdgpu_connector->ddc_bus->rec.en_data_reg,
388 amdgpu_connector->ddc_bus->rec.y_clk_reg,
389 amdgpu_connector->ddc_bus->rec.y_data_reg);
390 if (amdgpu_connector->router.ddc_valid)
391 DRM_INFO(" DDC Router 0x%x/0x%x\n",
392 amdgpu_connector->router.ddc_mux_control_pin,
393 amdgpu_connector->router.ddc_mux_state);
394 if (amdgpu_connector->router.cd_valid)
395 DRM_INFO(" Clock/Data Router 0x%x/0x%x\n",
396 amdgpu_connector->router.cd_mux_control_pin,
397 amdgpu_connector->router.cd_mux_state);
398 } else {
399 if (connector->connector_type == DRM_MODE_CONNECTOR_VGA ||
400 connector->connector_type == DRM_MODE_CONNECTOR_DVII ||
401 connector->connector_type == DRM_MODE_CONNECTOR_DVID ||
402 connector->connector_type == DRM_MODE_CONNECTOR_DVIA ||
403 connector->connector_type == DRM_MODE_CONNECTOR_HDMIA ||
404 connector->connector_type == DRM_MODE_CONNECTOR_HDMIB)
405 DRM_INFO(" DDC: no ddc bus - possible BIOS bug - please report to xorg-driver-ati@lists.x.org\n");
406 }
407 DRM_INFO(" Encoders:\n");
408 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
409 amdgpu_encoder = to_amdgpu_encoder(encoder);
410 devices = amdgpu_encoder->devices & amdgpu_connector->devices;
411 if (devices) {
412 if (devices & ATOM_DEVICE_CRT1_SUPPORT)
413 DRM_INFO(" CRT1: %s\n", encoder_names[amdgpu_encoder->encoder_id]);
414 if (devices & ATOM_DEVICE_CRT2_SUPPORT)
415 DRM_INFO(" CRT2: %s\n", encoder_names[amdgpu_encoder->encoder_id]);
416 if (devices & ATOM_DEVICE_LCD1_SUPPORT)
417 DRM_INFO(" LCD1: %s\n", encoder_names[amdgpu_encoder->encoder_id]);
418 if (devices & ATOM_DEVICE_DFP1_SUPPORT)
419 DRM_INFO(" DFP1: %s\n", encoder_names[amdgpu_encoder->encoder_id]);
420 if (devices & ATOM_DEVICE_DFP2_SUPPORT)
421 DRM_INFO(" DFP2: %s\n", encoder_names[amdgpu_encoder->encoder_id]);
422 if (devices & ATOM_DEVICE_DFP3_SUPPORT)
423 DRM_INFO(" DFP3: %s\n", encoder_names[amdgpu_encoder->encoder_id]);
424 if (devices & ATOM_DEVICE_DFP4_SUPPORT)
425 DRM_INFO(" DFP4: %s\n", encoder_names[amdgpu_encoder->encoder_id]);
426 if (devices & ATOM_DEVICE_DFP5_SUPPORT)
427 DRM_INFO(" DFP5: %s\n", encoder_names[amdgpu_encoder->encoder_id]);
428 if (devices & ATOM_DEVICE_DFP6_SUPPORT)
429 DRM_INFO(" DFP6: %s\n", encoder_names[amdgpu_encoder->encoder_id]);
430 if (devices & ATOM_DEVICE_TV1_SUPPORT)
431 DRM_INFO(" TV1: %s\n", encoder_names[amdgpu_encoder->encoder_id]);
432 if (devices & ATOM_DEVICE_CV_SUPPORT)
433 DRM_INFO(" CV: %s\n", encoder_names[amdgpu_encoder->encoder_id]);
434 }
435 }
436 i++;
437 }
438 drm_connector_list_iter_end(&iter);
439}
440
441bool amdgpu_display_ddc_probe(struct amdgpu_connector *amdgpu_connector,
442 bool use_aux)
443{
444 u8 out = 0x0;
445 u8 buf[8];
446 int ret;
447 struct i2c_msg msgs[] = {
448 {
449 .addr = DDC_ADDR,
450 .flags = 0,
451 .len = 1,
452 .buf = &out,
453 },
454 {
455 .addr = DDC_ADDR,
456 .flags = I2C_M_RD,
457 .len = 8,
458 .buf = buf,
459 }
460 };
461
462
463 if (amdgpu_connector->router.ddc_valid)
464 amdgpu_i2c_router_select_ddc_port(amdgpu_connector);
465
466 if (use_aux) {
467 ret = i2c_transfer(&amdgpu_connector->ddc_bus->aux.ddc, msgs, 2);
468 } else {
469 ret = i2c_transfer(&amdgpu_connector->ddc_bus->adapter, msgs, 2);
470 }
471
472 if (ret != 2)
473
474 return false;
475
476
477
478
479
480 if (drm_edid_header_is_valid(buf) < 6) {
481
482
483 return false;
484 }
485 return true;
486}
487
488static const struct drm_framebuffer_funcs amdgpu_fb_funcs = {
489 .destroy = drm_gem_fb_destroy,
490 .create_handle = drm_gem_fb_create_handle,
491};
492
493uint32_t amdgpu_display_supported_domains(struct amdgpu_device *adev,
494 uint64_t bo_flags)
495{
496 uint32_t domain = AMDGPU_GEM_DOMAIN_VRAM;
497
498#if defined(CONFIG_DRM_AMD_DC)
499
500
501
502
503
504
505
506
507 if ((bo_flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC) &&
508 amdgpu_bo_support_uswc(bo_flags) &&
509 amdgpu_device_asic_has_dc_support(adev->asic_type) &&
510 adev->mode_info.gpu_vm_support)
511 domain |= AMDGPU_GEM_DOMAIN_GTT;
512#endif
513
514 return domain;
515}
516
517static const struct drm_format_info dcc_formats[] = {
518 { .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 2,
519 .cpp = { 4, 0, }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1, },
520 { .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 2,
521 .cpp = { 4, 0, }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1, },
522 { .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 2,
523 .cpp = { 4, 0, }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1,
524 .has_alpha = true, },
525 { .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 2,
526 .cpp = { 4, 0, }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1,
527 .has_alpha = true, },
528 { .format = DRM_FORMAT_BGRA8888, .depth = 32, .num_planes = 2,
529 .cpp = { 4, 0, }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1,
530 .has_alpha = true, },
531 { .format = DRM_FORMAT_XRGB2101010, .depth = 30, .num_planes = 2,
532 .cpp = { 4, 0, }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1, },
533 { .format = DRM_FORMAT_XBGR2101010, .depth = 30, .num_planes = 2,
534 .cpp = { 4, 0, }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1, },
535 { .format = DRM_FORMAT_ARGB2101010, .depth = 30, .num_planes = 2,
536 .cpp = { 4, 0, }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1,
537 .has_alpha = true, },
538 { .format = DRM_FORMAT_ABGR2101010, .depth = 30, .num_planes = 2,
539 .cpp = { 4, 0, }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1,
540 .has_alpha = true, },
541 { .format = DRM_FORMAT_RGB565, .depth = 16, .num_planes = 2,
542 .cpp = { 2, 0, }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1, },
543};
544
545static const struct drm_format_info dcc_retile_formats[] = {
546 { .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 3,
547 .cpp = { 4, 0, 0 }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1, },
548 { .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 3,
549 .cpp = { 4, 0, 0 }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1, },
550 { .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 3,
551 .cpp = { 4, 0, 0 }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1,
552 .has_alpha = true, },
553 { .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 3,
554 .cpp = { 4, 0, 0 }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1,
555 .has_alpha = true, },
556 { .format = DRM_FORMAT_BGRA8888, .depth = 32, .num_planes = 3,
557 .cpp = { 4, 0, 0 }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1,
558 .has_alpha = true, },
559 { .format = DRM_FORMAT_XRGB2101010, .depth = 30, .num_planes = 3,
560 .cpp = { 4, 0, 0 }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1, },
561 { .format = DRM_FORMAT_XBGR2101010, .depth = 30, .num_planes = 3,
562 .cpp = { 4, 0, 0 }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1, },
563 { .format = DRM_FORMAT_ARGB2101010, .depth = 30, .num_planes = 3,
564 .cpp = { 4, 0, 0 }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1,
565 .has_alpha = true, },
566 { .format = DRM_FORMAT_ABGR2101010, .depth = 30, .num_planes = 3,
567 .cpp = { 4, 0, 0 }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1,
568 .has_alpha = true, },
569 { .format = DRM_FORMAT_RGB565, .depth = 16, .num_planes = 3,
570 .cpp = { 2, 0, 0 }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1, },
571};
572
573static const struct drm_format_info *
574lookup_format_info(const struct drm_format_info formats[],
575 int num_formats, u32 format)
576{
577 int i;
578
579 for (i = 0; i < num_formats; i++) {
580 if (formats[i].format == format)
581 return &formats[i];
582 }
583
584 return NULL;
585}
586
587const struct drm_format_info *
588amdgpu_lookup_format_info(u32 format, uint64_t modifier)
589{
590 if (!IS_AMD_FMT_MOD(modifier))
591 return NULL;
592
593 if (AMD_FMT_MOD_GET(DCC_RETILE, modifier))
594 return lookup_format_info(dcc_retile_formats,
595 ARRAY_SIZE(dcc_retile_formats),
596 format);
597
598 if (AMD_FMT_MOD_GET(DCC, modifier))
599 return lookup_format_info(dcc_formats, ARRAY_SIZE(dcc_formats),
600 format);
601
602
603 return NULL;
604}
605
606
607
608
609
610
611static int
612extract_render_dcc_offset(struct amdgpu_device *adev,
613 struct drm_gem_object *obj,
614 uint64_t *offset)
615{
616 struct amdgpu_bo *rbo;
617 int r = 0;
618 uint32_t metadata[10];
619 uint32_t size;
620
621 rbo = gem_to_amdgpu_bo(obj);
622 r = amdgpu_bo_reserve(rbo, false);
623
624 if (unlikely(r)) {
625
626 if (r != -ERESTARTSYS)
627 DRM_ERROR("Unable to reserve buffer: %d\n", r);
628 return r;
629 }
630
631 r = amdgpu_bo_get_metadata(rbo, metadata, sizeof(metadata), &size, NULL);
632 amdgpu_bo_unreserve(rbo);
633
634 if (r)
635 return r;
636
637
638
639
640
641 if (size < 40 || metadata[0] != 1)
642 return -EINVAL;
643
644 if (adev->family >= AMDGPU_FAMILY_NV) {
645
646 *offset = ((u64)metadata[9] << 16u) |
647 ((metadata[8] & 0xFF000000u) >> 16);
648 } else {
649
650 *offset = ((u64)metadata[9] << 8u) |
651 ((u64)(metadata[7] & 0x1FE0000u) << 23);
652 }
653
654 return 0;
655}
656
657static int convert_tiling_flags_to_modifier(struct amdgpu_framebuffer *afb)
658{
659 struct amdgpu_device *adev = drm_to_adev(afb->base.dev);
660 uint64_t modifier = 0;
661
662 if (!afb->tiling_flags || !AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE)) {
663 modifier = DRM_FORMAT_MOD_LINEAR;
664 } else {
665 int swizzle = AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE);
666 bool has_xor = swizzle >= 16;
667 int block_size_bits;
668 int version;
669 int pipe_xor_bits = 0;
670 int bank_xor_bits = 0;
671 int packers = 0;
672 int rb = 0;
673 int pipes = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
674 uint32_t dcc_offset = AMDGPU_TILING_GET(afb->tiling_flags, DCC_OFFSET_256B);
675
676 switch (swizzle >> 2) {
677 case 0:
678 block_size_bits = 8;
679 break;
680 case 1:
681 case 5:
682 block_size_bits = 12;
683 break;
684 case 2:
685 case 4:
686 case 6:
687 block_size_bits = 16;
688 break;
689 default:
690
691 return -EINVAL;
692 }
693
694 if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 3, 0))
695 version = AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS;
696 else if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 0, 0))
697 version = AMD_FMT_MOD_TILE_VER_GFX10;
698 else
699 version = AMD_FMT_MOD_TILE_VER_GFX9;
700
701 switch (swizzle & 3) {
702 case 0:
703 return -EINVAL;
704 case 1:
705 if (!has_xor)
706 version = AMD_FMT_MOD_TILE_VER_GFX9;
707 break;
708 case 2:
709 if (!has_xor && afb->base.format->cpp[0] != 4)
710 version = AMD_FMT_MOD_TILE_VER_GFX9;
711 break;
712 case 3:
713 break;
714 }
715
716 if (has_xor) {
717 switch (version) {
718 case AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS:
719 pipe_xor_bits = min(block_size_bits - 8, pipes);
720 packers = min(block_size_bits - 8 - pipe_xor_bits,
721 ilog2(adev->gfx.config.gb_addr_config_fields.num_pkrs));
722 break;
723 case AMD_FMT_MOD_TILE_VER_GFX10:
724 pipe_xor_bits = min(block_size_bits - 8, pipes);
725 break;
726 case AMD_FMT_MOD_TILE_VER_GFX9:
727 rb = ilog2(adev->gfx.config.gb_addr_config_fields.num_se) +
728 ilog2(adev->gfx.config.gb_addr_config_fields.num_rb_per_se);
729 pipe_xor_bits = min(block_size_bits - 8, pipes +
730 ilog2(adev->gfx.config.gb_addr_config_fields.num_se));
731 bank_xor_bits = min(block_size_bits - 8 - pipe_xor_bits,
732 ilog2(adev->gfx.config.gb_addr_config_fields.num_banks));
733 break;
734 }
735 }
736
737 modifier = AMD_FMT_MOD |
738 AMD_FMT_MOD_SET(TILE, AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE)) |
739 AMD_FMT_MOD_SET(TILE_VERSION, version) |
740 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
741 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
742 AMD_FMT_MOD_SET(PACKERS, packers);
743
744 if (dcc_offset != 0) {
745 bool dcc_i64b = AMDGPU_TILING_GET(afb->tiling_flags, DCC_INDEPENDENT_64B) != 0;
746 bool dcc_i128b = version >= AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS;
747 const struct drm_format_info *format_info;
748 u64 render_dcc_offset;
749
750
751 bool dcc_constant_encode = adev->asic_type > CHIP_RAVEN ||
752 (adev->asic_type == CHIP_RAVEN &&
753 adev->external_rev_id >= 0x81);
754
755 int max_cblock_size = dcc_i64b ? AMD_FMT_MOD_DCC_BLOCK_64B :
756 dcc_i128b ? AMD_FMT_MOD_DCC_BLOCK_128B :
757 AMD_FMT_MOD_DCC_BLOCK_256B;
758
759 modifier |= AMD_FMT_MOD_SET(DCC, 1) |
760 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, dcc_constant_encode) |
761 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, dcc_i64b) |
762 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, dcc_i128b) |
763 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, max_cblock_size);
764
765 afb->base.offsets[1] = dcc_offset * 256 + afb->base.offsets[0];
766 afb->base.pitches[1] =
767 AMDGPU_TILING_GET(afb->tiling_flags, DCC_PITCH_MAX) + 1;
768
769
770
771
772
773
774
775
776
777 if (extract_render_dcc_offset(adev, afb->base.obj[0],
778 &render_dcc_offset) == 0 &&
779 render_dcc_offset != 0 &&
780 render_dcc_offset != afb->base.offsets[1] &&
781 render_dcc_offset < UINT_MAX) {
782 uint32_t dcc_block_bits;
783
784 modifier |= AMD_FMT_MOD_SET(DCC_RETILE, 1);
785 afb->base.offsets[2] = render_dcc_offset;
786
787 if (adev->family >= AMDGPU_FAMILY_NV) {
788 int extra_pipe = 0;
789
790 if ((adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 3, 0)) &&
791 pipes == packers && pipes > 1)
792 extra_pipe = 1;
793
794 dcc_block_bits = max(20, 16 + pipes + extra_pipe);
795 } else {
796 modifier |= AMD_FMT_MOD_SET(RB, rb) |
797 AMD_FMT_MOD_SET(PIPE, pipes);
798 dcc_block_bits = max(20, 18 + rb);
799 }
800
801 dcc_block_bits -= ilog2(afb->base.format->cpp[0]);
802 afb->base.pitches[2] = ALIGN(afb->base.width,
803 1u << ((dcc_block_bits + 1) / 2));
804 }
805 format_info = amdgpu_lookup_format_info(afb->base.format->format,
806 modifier);
807 if (!format_info)
808 return -EINVAL;
809
810 afb->base.format = format_info;
811 }
812 }
813
814 afb->base.modifier = modifier;
815 afb->base.flags |= DRM_MODE_FB_MODIFIERS;
816 return 0;
817}
818
819
820static int check_tiling_flags_gfx6(struct amdgpu_framebuffer *afb)
821{
822 u64 micro_tile_mode;
823
824
825 if (AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE) == 0)
826 return 0;
827
828 micro_tile_mode = AMDGPU_TILING_GET(afb->tiling_flags, MICRO_TILE_MODE);
829 switch (micro_tile_mode) {
830 case 0:
831 case 3:
832 return 0;
833 default:
834 drm_dbg_kms(afb->base.dev,
835 "Micro tile mode %llu not supported for scanout\n",
836 micro_tile_mode);
837 return -EINVAL;
838 }
839}
840
841static void get_block_dimensions(unsigned int block_log2, unsigned int cpp,
842 unsigned int *width, unsigned int *height)
843{
844 unsigned int cpp_log2 = ilog2(cpp);
845 unsigned int pixel_log2 = block_log2 - cpp_log2;
846 unsigned int width_log2 = (pixel_log2 + 1) / 2;
847 unsigned int height_log2 = pixel_log2 - width_log2;
848
849 *width = 1 << width_log2;
850 *height = 1 << height_log2;
851}
852
853static unsigned int get_dcc_block_size(uint64_t modifier, bool rb_aligned,
854 bool pipe_aligned)
855{
856 unsigned int ver = AMD_FMT_MOD_GET(TILE_VERSION, modifier);
857
858 switch (ver) {
859 case AMD_FMT_MOD_TILE_VER_GFX9: {
860
861
862
863
864
865 return max(10 + (rb_aligned ? (int)AMD_FMT_MOD_GET(RB, modifier) : 0), 12);
866 }
867 case AMD_FMT_MOD_TILE_VER_GFX10:
868 case AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS: {
869 int pipes_log2 = AMD_FMT_MOD_GET(PIPE_XOR_BITS, modifier);
870
871 if (ver == AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS && pipes_log2 > 1 &&
872 AMD_FMT_MOD_GET(PACKERS, modifier) == pipes_log2)
873 ++pipes_log2;
874
875 return max(8 + (pipe_aligned ? pipes_log2 : 0), 12);
876 }
877 default:
878 return 0;
879 }
880}
881
882static int amdgpu_display_verify_plane(struct amdgpu_framebuffer *rfb, int plane,
883 const struct drm_format_info *format,
884 unsigned int block_width, unsigned int block_height,
885 unsigned int block_size_log2)
886{
887 unsigned int width = rfb->base.width /
888 ((plane && plane < format->num_planes) ? format->hsub : 1);
889 unsigned int height = rfb->base.height /
890 ((plane && plane < format->num_planes) ? format->vsub : 1);
891 unsigned int cpp = plane < format->num_planes ? format->cpp[plane] : 1;
892 unsigned int block_pitch = block_width * cpp;
893 unsigned int min_pitch = ALIGN(width * cpp, block_pitch);
894 unsigned int block_size = 1 << block_size_log2;
895 uint64_t size;
896
897 if (rfb->base.pitches[plane] % block_pitch) {
898 drm_dbg_kms(rfb->base.dev,
899 "pitch %d for plane %d is not a multiple of block pitch %d\n",
900 rfb->base.pitches[plane], plane, block_pitch);
901 return -EINVAL;
902 }
903 if (rfb->base.pitches[plane] < min_pitch) {
904 drm_dbg_kms(rfb->base.dev,
905 "pitch %d for plane %d is less than minimum pitch %d\n",
906 rfb->base.pitches[plane], plane, min_pitch);
907 return -EINVAL;
908 }
909
910
911 if (rfb->base.offsets[plane] % block_size) {
912 drm_dbg_kms(rfb->base.dev,
913 "offset 0x%x for plane %d is not a multiple of block pitch 0x%x\n",
914 rfb->base.offsets[plane], plane, block_size);
915 return -EINVAL;
916 }
917
918 size = rfb->base.offsets[plane] +
919 (uint64_t)rfb->base.pitches[plane] / block_pitch *
920 block_size * DIV_ROUND_UP(height, block_height);
921
922 if (rfb->base.obj[0]->size < size) {
923 drm_dbg_kms(rfb->base.dev,
924 "BO size 0x%zx is less than 0x%llx required for plane %d\n",
925 rfb->base.obj[0]->size, size, plane);
926 return -EINVAL;
927 }
928
929 return 0;
930}
931
932
933static int amdgpu_display_verify_sizes(struct amdgpu_framebuffer *rfb)
934{
935 const struct drm_format_info *format_info = drm_format_info(rfb->base.format->format);
936 uint64_t modifier = rfb->base.modifier;
937 int ret;
938 unsigned int i, block_width, block_height, block_size_log2;
939
940 if (rfb->base.dev->mode_config.fb_modifiers_not_supported)
941 return 0;
942
943 for (i = 0; i < format_info->num_planes; ++i) {
944 if (modifier == DRM_FORMAT_MOD_LINEAR) {
945 block_width = 256 / format_info->cpp[i];
946 block_height = 1;
947 block_size_log2 = 8;
948 } else {
949 int swizzle = AMD_FMT_MOD_GET(TILE, modifier);
950
951 switch ((swizzle & ~3) + 1) {
952 case DC_SW_256B_S:
953 block_size_log2 = 8;
954 break;
955 case DC_SW_4KB_S:
956 case DC_SW_4KB_S_X:
957 block_size_log2 = 12;
958 break;
959 case DC_SW_64KB_S:
960 case DC_SW_64KB_S_T:
961 case DC_SW_64KB_S_X:
962 block_size_log2 = 16;
963 break;
964 default:
965 drm_dbg_kms(rfb->base.dev,
966 "Swizzle mode with unknown block size: %d\n", swizzle);
967 return -EINVAL;
968 }
969
970 get_block_dimensions(block_size_log2, format_info->cpp[i],
971 &block_width, &block_height);
972 }
973
974 ret = amdgpu_display_verify_plane(rfb, i, format_info,
975 block_width, block_height, block_size_log2);
976 if (ret)
977 return ret;
978 }
979
980 if (AMD_FMT_MOD_GET(DCC, modifier)) {
981 if (AMD_FMT_MOD_GET(DCC_RETILE, modifier)) {
982 block_size_log2 = get_dcc_block_size(modifier, false, false);
983 get_block_dimensions(block_size_log2 + 8, format_info->cpp[0],
984 &block_width, &block_height);
985 ret = amdgpu_display_verify_plane(rfb, i, format_info,
986 block_width, block_height,
987 block_size_log2);
988 if (ret)
989 return ret;
990
991 ++i;
992 block_size_log2 = get_dcc_block_size(modifier, true, true);
993 } else {
994 bool pipe_aligned = AMD_FMT_MOD_GET(DCC_PIPE_ALIGN, modifier);
995
996 block_size_log2 = get_dcc_block_size(modifier, true, pipe_aligned);
997 }
998 get_block_dimensions(block_size_log2 + 8, format_info->cpp[0],
999 &block_width, &block_height);
1000 ret = amdgpu_display_verify_plane(rfb, i, format_info,
1001 block_width, block_height, block_size_log2);
1002 if (ret)
1003 return ret;
1004 }
1005
1006 return 0;
1007}
1008
1009static int amdgpu_display_get_fb_info(const struct amdgpu_framebuffer *amdgpu_fb,
1010 uint64_t *tiling_flags, bool *tmz_surface)
1011{
1012 struct amdgpu_bo *rbo;
1013 int r;
1014
1015 if (!amdgpu_fb) {
1016 *tiling_flags = 0;
1017 *tmz_surface = false;
1018 return 0;
1019 }
1020
1021 rbo = gem_to_amdgpu_bo(amdgpu_fb->base.obj[0]);
1022 r = amdgpu_bo_reserve(rbo, false);
1023
1024 if (unlikely(r)) {
1025
1026 if (r != -ERESTARTSYS)
1027 DRM_ERROR("Unable to reserve buffer: %d\n", r);
1028 return r;
1029 }
1030
1031 if (tiling_flags)
1032 amdgpu_bo_get_tiling_flags(rbo, tiling_flags);
1033
1034 if (tmz_surface)
1035 *tmz_surface = amdgpu_bo_encrypted(rbo);
1036
1037 amdgpu_bo_unreserve(rbo);
1038
1039 return r;
1040}
1041
1042int amdgpu_display_gem_fb_init(struct drm_device *dev,
1043 struct amdgpu_framebuffer *rfb,
1044 const struct drm_mode_fb_cmd2 *mode_cmd,
1045 struct drm_gem_object *obj)
1046{
1047 int ret;
1048
1049 rfb->base.obj[0] = obj;
1050 drm_helper_mode_fill_fb_struct(dev, &rfb->base, mode_cmd);
1051
1052 ret = amdgpu_display_framebuffer_init(dev, rfb, mode_cmd, obj);
1053 if (ret)
1054 goto err;
1055
1056 ret = drm_framebuffer_init(dev, &rfb->base, &amdgpu_fb_funcs);
1057 if (ret)
1058 goto err;
1059
1060 return 0;
1061err:
1062 drm_dbg_kms(dev, "Failed to init gem fb: %d\n", ret);
1063 rfb->base.obj[0] = NULL;
1064 return ret;
1065}
1066
1067int amdgpu_display_gem_fb_verify_and_init(
1068 struct drm_device *dev, struct amdgpu_framebuffer *rfb,
1069 struct drm_file *file_priv, const struct drm_mode_fb_cmd2 *mode_cmd,
1070 struct drm_gem_object *obj)
1071{
1072 int ret;
1073
1074 rfb->base.obj[0] = obj;
1075 drm_helper_mode_fill_fb_struct(dev, &rfb->base, mode_cmd);
1076
1077 if (!drm_any_plane_has_format(dev, mode_cmd->pixel_format,
1078 mode_cmd->modifier[0])) {
1079 drm_dbg_kms(dev,
1080 "unsupported pixel format %p4cc / modifier 0x%llx\n",
1081 &mode_cmd->pixel_format, mode_cmd->modifier[0]);
1082
1083 ret = -EINVAL;
1084 goto err;
1085 }
1086
1087 ret = amdgpu_display_framebuffer_init(dev, rfb, mode_cmd, obj);
1088 if (ret)
1089 goto err;
1090
1091 ret = drm_framebuffer_init(dev, &rfb->base, &amdgpu_fb_funcs);
1092 if (ret)
1093 goto err;
1094
1095 return 0;
1096err:
1097 drm_dbg_kms(dev, "Failed to verify and init gem fb: %d\n", ret);
1098 rfb->base.obj[0] = NULL;
1099 return ret;
1100}
1101
1102int amdgpu_display_framebuffer_init(struct drm_device *dev,
1103 struct amdgpu_framebuffer *rfb,
1104 const struct drm_mode_fb_cmd2 *mode_cmd,
1105 struct drm_gem_object *obj)
1106{
1107 struct amdgpu_device *adev = drm_to_adev(dev);
1108 int ret, i;
1109
1110
1111
1112
1113
1114 for (i = 1; i < rfb->base.format->num_planes; ++i) {
1115 if (mode_cmd->handles[i] != mode_cmd->handles[0]) {
1116 drm_dbg_kms(dev, "Plane 0 and %d have different BOs: %u vs. %u\n",
1117 i, mode_cmd->handles[0], mode_cmd->handles[i]);
1118 ret = -EINVAL;
1119 return ret;
1120 }
1121 }
1122
1123 ret = amdgpu_display_get_fb_info(rfb, &rfb->tiling_flags, &rfb->tmz_surface);
1124 if (ret)
1125 return ret;
1126
1127 if (dev->mode_config.fb_modifiers_not_supported && !adev->enable_virtual_display) {
1128 drm_WARN_ONCE(dev, adev->family >= AMDGPU_FAMILY_AI,
1129 "GFX9+ requires FB check based on format modifier\n");
1130 ret = check_tiling_flags_gfx6(rfb);
1131 if (ret)
1132 return ret;
1133 }
1134
1135 if (!dev->mode_config.fb_modifiers_not_supported &&
1136 !(rfb->base.flags & DRM_MODE_FB_MODIFIERS)) {
1137 ret = convert_tiling_flags_to_modifier(rfb);
1138 if (ret) {
1139 drm_dbg_kms(dev, "Failed to convert tiling flags 0x%llX to a modifier",
1140 rfb->tiling_flags);
1141 return ret;
1142 }
1143 }
1144
1145 ret = amdgpu_display_verify_sizes(rfb);
1146 if (ret)
1147 return ret;
1148
1149 for (i = 0; i < rfb->base.format->num_planes; ++i) {
1150 drm_gem_object_get(rfb->base.obj[0]);
1151 rfb->base.obj[i] = rfb->base.obj[0];
1152 }
1153
1154 return 0;
1155}
1156
1157struct drm_framebuffer *
1158amdgpu_display_user_framebuffer_create(struct drm_device *dev,
1159 struct drm_file *file_priv,
1160 const struct drm_mode_fb_cmd2 *mode_cmd)
1161{
1162 struct amdgpu_framebuffer *amdgpu_fb;
1163 struct drm_gem_object *obj;
1164 struct amdgpu_bo *bo;
1165 uint32_t domains;
1166 int ret;
1167
1168 obj = drm_gem_object_lookup(file_priv, mode_cmd->handles[0]);
1169 if (obj == NULL) {
1170 drm_dbg_kms(dev, "No GEM object associated to handle 0x%08X, "
1171 "can't create framebuffer\n", mode_cmd->handles[0]);
1172 return ERR_PTR(-ENOENT);
1173 }
1174
1175
1176 bo = gem_to_amdgpu_bo(obj);
1177 domains = amdgpu_display_supported_domains(drm_to_adev(dev), bo->flags);
1178 if (obj->import_attach && !(domains & AMDGPU_GEM_DOMAIN_GTT)) {
1179 drm_dbg_kms(dev, "Cannot create framebuffer from imported dma_buf\n");
1180 drm_gem_object_put(obj);
1181 return ERR_PTR(-EINVAL);
1182 }
1183
1184 amdgpu_fb = kzalloc(sizeof(*amdgpu_fb), GFP_KERNEL);
1185 if (amdgpu_fb == NULL) {
1186 drm_gem_object_put(obj);
1187 return ERR_PTR(-ENOMEM);
1188 }
1189
1190 ret = amdgpu_display_gem_fb_verify_and_init(dev, amdgpu_fb, file_priv,
1191 mode_cmd, obj);
1192 if (ret) {
1193 kfree(amdgpu_fb);
1194 drm_gem_object_put(obj);
1195 return ERR_PTR(ret);
1196 }
1197
1198 drm_gem_object_put(obj);
1199 return &amdgpu_fb->base;
1200}
1201
1202const struct drm_mode_config_funcs amdgpu_mode_funcs = {
1203 .fb_create = amdgpu_display_user_framebuffer_create,
1204 .output_poll_changed = drm_fb_helper_output_poll_changed,
1205};
1206
1207static const struct drm_prop_enum_list amdgpu_underscan_enum_list[] =
1208{ { UNDERSCAN_OFF, "off" },
1209 { UNDERSCAN_ON, "on" },
1210 { UNDERSCAN_AUTO, "auto" },
1211};
1212
1213static const struct drm_prop_enum_list amdgpu_audio_enum_list[] =
1214{ { AMDGPU_AUDIO_DISABLE, "off" },
1215 { AMDGPU_AUDIO_ENABLE, "on" },
1216 { AMDGPU_AUDIO_AUTO, "auto" },
1217};
1218
1219
1220static const struct drm_prop_enum_list amdgpu_dither_enum_list[] =
1221{ { AMDGPU_FMT_DITHER_DISABLE, "off" },
1222 { AMDGPU_FMT_DITHER_ENABLE, "on" },
1223};
1224
1225int amdgpu_display_modeset_create_props(struct amdgpu_device *adev)
1226{
1227 int sz;
1228
1229 adev->mode_info.coherent_mode_property =
1230 drm_property_create_range(adev_to_drm(adev), 0, "coherent", 0, 1);
1231 if (!adev->mode_info.coherent_mode_property)
1232 return -ENOMEM;
1233
1234 adev->mode_info.load_detect_property =
1235 drm_property_create_range(adev_to_drm(adev), 0, "load detection", 0, 1);
1236 if (!adev->mode_info.load_detect_property)
1237 return -ENOMEM;
1238
1239 drm_mode_create_scaling_mode_property(adev_to_drm(adev));
1240
1241 sz = ARRAY_SIZE(amdgpu_underscan_enum_list);
1242 adev->mode_info.underscan_property =
1243 drm_property_create_enum(adev_to_drm(adev), 0,
1244 "underscan",
1245 amdgpu_underscan_enum_list, sz);
1246
1247 adev->mode_info.underscan_hborder_property =
1248 drm_property_create_range(adev_to_drm(adev), 0,
1249 "underscan hborder", 0, 128);
1250 if (!adev->mode_info.underscan_hborder_property)
1251 return -ENOMEM;
1252
1253 adev->mode_info.underscan_vborder_property =
1254 drm_property_create_range(adev_to_drm(adev), 0,
1255 "underscan vborder", 0, 128);
1256 if (!adev->mode_info.underscan_vborder_property)
1257 return -ENOMEM;
1258
1259 sz = ARRAY_SIZE(amdgpu_audio_enum_list);
1260 adev->mode_info.audio_property =
1261 drm_property_create_enum(adev_to_drm(adev), 0,
1262 "audio",
1263 amdgpu_audio_enum_list, sz);
1264
1265 sz = ARRAY_SIZE(amdgpu_dither_enum_list);
1266 adev->mode_info.dither_property =
1267 drm_property_create_enum(adev_to_drm(adev), 0,
1268 "dither",
1269 amdgpu_dither_enum_list, sz);
1270
1271 if (amdgpu_device_has_dc_support(adev)) {
1272 adev->mode_info.abm_level_property =
1273 drm_property_create_range(adev_to_drm(adev), 0,
1274 "abm level", 0, 4);
1275 if (!adev->mode_info.abm_level_property)
1276 return -ENOMEM;
1277 }
1278
1279 return 0;
1280}
1281
1282void amdgpu_display_update_priority(struct amdgpu_device *adev)
1283{
1284
1285 if ((amdgpu_disp_priority == 0) || (amdgpu_disp_priority > 2))
1286 adev->mode_info.disp_priority = 0;
1287 else
1288 adev->mode_info.disp_priority = amdgpu_disp_priority;
1289
1290}
1291
1292static bool amdgpu_display_is_hdtv_mode(const struct drm_display_mode *mode)
1293{
1294
1295 if ((mode->vdisplay == 480 && mode->hdisplay == 720) ||
1296 (mode->vdisplay == 576) ||
1297 (mode->vdisplay == 720) ||
1298 (mode->vdisplay == 1080))
1299 return true;
1300 else
1301 return false;
1302}
1303
1304bool amdgpu_display_crtc_scaling_mode_fixup(struct drm_crtc *crtc,
1305 const struct drm_display_mode *mode,
1306 struct drm_display_mode *adjusted_mode)
1307{
1308 struct drm_device *dev = crtc->dev;
1309 struct drm_encoder *encoder;
1310 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1311 struct amdgpu_encoder *amdgpu_encoder;
1312 struct drm_connector *connector;
1313 u32 src_v = 1, dst_v = 1;
1314 u32 src_h = 1, dst_h = 1;
1315
1316 amdgpu_crtc->h_border = 0;
1317 amdgpu_crtc->v_border = 0;
1318
1319 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
1320 if (encoder->crtc != crtc)
1321 continue;
1322 amdgpu_encoder = to_amdgpu_encoder(encoder);
1323 connector = amdgpu_get_connector_for_encoder(encoder);
1324
1325
1326 if (amdgpu_encoder->rmx_type == RMX_OFF)
1327 amdgpu_crtc->rmx_type = RMX_OFF;
1328 else if (mode->hdisplay < amdgpu_encoder->native_mode.hdisplay ||
1329 mode->vdisplay < amdgpu_encoder->native_mode.vdisplay)
1330 amdgpu_crtc->rmx_type = amdgpu_encoder->rmx_type;
1331 else
1332 amdgpu_crtc->rmx_type = RMX_OFF;
1333
1334 memcpy(&amdgpu_crtc->native_mode,
1335 &amdgpu_encoder->native_mode,
1336 sizeof(struct drm_display_mode));
1337 src_v = crtc->mode.vdisplay;
1338 dst_v = amdgpu_crtc->native_mode.vdisplay;
1339 src_h = crtc->mode.hdisplay;
1340 dst_h = amdgpu_crtc->native_mode.hdisplay;
1341
1342
1343 if ((!(mode->flags & DRM_MODE_FLAG_INTERLACE)) &&
1344 ((amdgpu_encoder->underscan_type == UNDERSCAN_ON) ||
1345 ((amdgpu_encoder->underscan_type == UNDERSCAN_AUTO) &&
1346 connector->display_info.is_hdmi &&
1347 amdgpu_display_is_hdtv_mode(mode)))) {
1348 if (amdgpu_encoder->underscan_hborder != 0)
1349 amdgpu_crtc->h_border = amdgpu_encoder->underscan_hborder;
1350 else
1351 amdgpu_crtc->h_border = (mode->hdisplay >> 5) + 16;
1352 if (amdgpu_encoder->underscan_vborder != 0)
1353 amdgpu_crtc->v_border = amdgpu_encoder->underscan_vborder;
1354 else
1355 amdgpu_crtc->v_border = (mode->vdisplay >> 5) + 16;
1356 amdgpu_crtc->rmx_type = RMX_FULL;
1357 src_v = crtc->mode.vdisplay;
1358 dst_v = crtc->mode.vdisplay - (amdgpu_crtc->v_border * 2);
1359 src_h = crtc->mode.hdisplay;
1360 dst_h = crtc->mode.hdisplay - (amdgpu_crtc->h_border * 2);
1361 }
1362 }
1363 if (amdgpu_crtc->rmx_type != RMX_OFF) {
1364 fixed20_12 a, b;
1365 a.full = dfixed_const(src_v);
1366 b.full = dfixed_const(dst_v);
1367 amdgpu_crtc->vsc.full = dfixed_div(a, b);
1368 a.full = dfixed_const(src_h);
1369 b.full = dfixed_const(dst_h);
1370 amdgpu_crtc->hsc.full = dfixed_div(a, b);
1371 } else {
1372 amdgpu_crtc->vsc.full = dfixed_const(1);
1373 amdgpu_crtc->hsc.full = dfixed_const(1);
1374 }
1375 return true;
1376}
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415int amdgpu_display_get_crtc_scanoutpos(struct drm_device *dev,
1416 unsigned int pipe, unsigned int flags, int *vpos,
1417 int *hpos, ktime_t *stime, ktime_t *etime,
1418 const struct drm_display_mode *mode)
1419{
1420 u32 vbl = 0, position = 0;
1421 int vbl_start, vbl_end, vtotal, ret = 0;
1422 bool in_vbl = true;
1423
1424 struct amdgpu_device *adev = drm_to_adev(dev);
1425
1426
1427
1428
1429 if (stime)
1430 *stime = ktime_get();
1431
1432 if (amdgpu_display_page_flip_get_scanoutpos(adev, pipe, &vbl, &position) == 0)
1433 ret |= DRM_SCANOUTPOS_VALID;
1434
1435
1436 if (etime)
1437 *etime = ktime_get();
1438
1439
1440
1441
1442 *vpos = position & 0x1fff;
1443 *hpos = (position >> 16) & 0x1fff;
1444
1445
1446 if (vbl > 0) {
1447
1448 ret |= DRM_SCANOUTPOS_ACCURATE;
1449 vbl_start = vbl & 0x1fff;
1450 vbl_end = (vbl >> 16) & 0x1fff;
1451 }
1452 else {
1453
1454 vbl_start = mode->crtc_vdisplay;
1455 vbl_end = 0;
1456 }
1457
1458
1459 if (flags & GET_DISTANCE_TO_VBLANKSTART) {
1460
1461 *hpos = *vpos - vbl_start;
1462 }
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474 if (!(flags & USE_REAL_VBLANKSTART))
1475 vbl_start -= adev->mode_info.crtcs[pipe]->lb_vblank_lead_lines;
1476
1477
1478 if ((*vpos < vbl_start) && (*vpos >= vbl_end))
1479 in_vbl = false;
1480
1481
1482 if (in_vbl)
1483 ret |= DRM_SCANOUTPOS_IN_VBLANK;
1484
1485
1486 if (flags & GET_DISTANCE_TO_VBLANKSTART) {
1487
1488 *vpos -= vbl_start;
1489 return ret;
1490 }
1491
1492
1493
1494
1495
1496
1497
1498
1499 if (in_vbl && (*vpos >= vbl_start)) {
1500 vtotal = mode->crtc_vtotal;
1501
1502
1503
1504
1505
1506 *vpos = (*vpos < vtotal) ? (*vpos - vtotal) : 0;
1507 }
1508
1509
1510 *vpos = *vpos - vbl_end;
1511
1512 return ret;
1513}
1514
1515int amdgpu_display_crtc_idx_to_irq_type(struct amdgpu_device *adev, int crtc)
1516{
1517 if (crtc < 0 || crtc >= adev->mode_info.num_crtc)
1518 return AMDGPU_CRTC_IRQ_NONE;
1519
1520 switch (crtc) {
1521 case 0:
1522 return AMDGPU_CRTC_IRQ_VBLANK1;
1523 case 1:
1524 return AMDGPU_CRTC_IRQ_VBLANK2;
1525 case 2:
1526 return AMDGPU_CRTC_IRQ_VBLANK3;
1527 case 3:
1528 return AMDGPU_CRTC_IRQ_VBLANK4;
1529 case 4:
1530 return AMDGPU_CRTC_IRQ_VBLANK5;
1531 case 5:
1532 return AMDGPU_CRTC_IRQ_VBLANK6;
1533 default:
1534 return AMDGPU_CRTC_IRQ_NONE;
1535 }
1536}
1537
1538bool amdgpu_crtc_get_scanout_position(struct drm_crtc *crtc,
1539 bool in_vblank_irq, int *vpos,
1540 int *hpos, ktime_t *stime, ktime_t *etime,
1541 const struct drm_display_mode *mode)
1542{
1543 struct drm_device *dev = crtc->dev;
1544 unsigned int pipe = crtc->index;
1545
1546 return amdgpu_display_get_crtc_scanoutpos(dev, pipe, 0, vpos, hpos,
1547 stime, etime, mode);
1548}
1549
1550int amdgpu_display_suspend_helper(struct amdgpu_device *adev)
1551{
1552 struct drm_device *dev = adev_to_drm(adev);
1553 struct drm_crtc *crtc;
1554 struct drm_connector *connector;
1555 struct drm_connector_list_iter iter;
1556 int r;
1557
1558
1559 drm_modeset_lock_all(dev);
1560 drm_connector_list_iter_begin(dev, &iter);
1561 drm_for_each_connector_iter(connector, &iter)
1562 drm_helper_connector_dpms(connector,
1563 DRM_MODE_DPMS_OFF);
1564 drm_connector_list_iter_end(&iter);
1565 drm_modeset_unlock_all(dev);
1566
1567 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
1568 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1569 struct drm_framebuffer *fb = crtc->primary->fb;
1570 struct amdgpu_bo *robj;
1571
1572 if (amdgpu_crtc->cursor_bo && !adev->enable_virtual_display) {
1573 struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
1574 r = amdgpu_bo_reserve(aobj, true);
1575 if (r == 0) {
1576 amdgpu_bo_unpin(aobj);
1577 amdgpu_bo_unreserve(aobj);
1578 }
1579 }
1580
1581 if (fb == NULL || fb->obj[0] == NULL) {
1582 continue;
1583 }
1584 robj = gem_to_amdgpu_bo(fb->obj[0]);
1585 r = amdgpu_bo_reserve(robj, true);
1586 if (r == 0) {
1587 amdgpu_bo_unpin(robj);
1588 amdgpu_bo_unreserve(robj);
1589 }
1590 }
1591 return 0;
1592}
1593
1594int amdgpu_display_resume_helper(struct amdgpu_device *adev)
1595{
1596 struct drm_device *dev = adev_to_drm(adev);
1597 struct drm_connector *connector;
1598 struct drm_connector_list_iter iter;
1599 struct drm_crtc *crtc;
1600 int r;
1601
1602
1603 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
1604 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1605
1606 if (amdgpu_crtc->cursor_bo && !adev->enable_virtual_display) {
1607 struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
1608 r = amdgpu_bo_reserve(aobj, true);
1609 if (r == 0) {
1610 r = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM);
1611 if (r != 0)
1612 dev_err(adev->dev, "Failed to pin cursor BO (%d)\n", r);
1613 amdgpu_crtc->cursor_addr = amdgpu_bo_gpu_offset(aobj);
1614 amdgpu_bo_unreserve(aobj);
1615 }
1616 }
1617 }
1618
1619 drm_helper_resume_force_mode(dev);
1620
1621
1622 drm_modeset_lock_all(dev);
1623
1624 drm_connector_list_iter_begin(dev, &iter);
1625 drm_for_each_connector_iter(connector, &iter)
1626 drm_helper_connector_dpms(connector,
1627 DRM_MODE_DPMS_ON);
1628 drm_connector_list_iter_end(&iter);
1629
1630 drm_modeset_unlock_all(dev);
1631
1632 return 0;
1633}
1634
1635