1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27#include <drm/amdgpu_drm.h>
28#include "amdgpu.h"
29#include "amdgpu_i2c.h"
30#include "atom.h"
31#include "amdgpu_connectors.h"
32#include "amdgpu_display.h"
33#include <asm/div64.h>
34
35#include <linux/pci.h>
36#include <linux/pm_runtime.h>
37#include <drm/drm_crtc_helper.h>
38#include <drm/drm_edid.h>
39#include <drm/drm_gem_framebuffer_helper.h>
40#include <drm/drm_fb_helper.h>
41#include <drm/drm_fourcc.h>
42#include <drm/drm_vblank.h>
43
44static int amdgpu_display_framebuffer_init(struct drm_device *dev,
45 struct amdgpu_framebuffer *rfb,
46 const struct drm_mode_fb_cmd2 *mode_cmd,
47 struct drm_gem_object *obj);
48
49static void amdgpu_display_flip_callback(struct dma_fence *f,
50 struct dma_fence_cb *cb)
51{
52 struct amdgpu_flip_work *work =
53 container_of(cb, struct amdgpu_flip_work, cb);
54
55 dma_fence_put(f);
56 schedule_work(&work->flip_work.work);
57}
58
59static bool amdgpu_display_flip_handle_fence(struct amdgpu_flip_work *work,
60 struct dma_fence **f)
61{
62 struct dma_fence *fence= *f;
63
64 if (fence == NULL)
65 return false;
66
67 *f = NULL;
68
69 if (!dma_fence_add_callback(fence, &work->cb,
70 amdgpu_display_flip_callback))
71 return true;
72
73 dma_fence_put(fence);
74 return false;
75}
76
77static void amdgpu_display_flip_work_func(struct work_struct *__work)
78{
79 struct delayed_work *delayed_work =
80 container_of(__work, struct delayed_work, work);
81 struct amdgpu_flip_work *work =
82 container_of(delayed_work, struct amdgpu_flip_work, flip_work);
83 struct amdgpu_device *adev = work->adev;
84 struct amdgpu_crtc *amdgpu_crtc = adev->mode_info.crtcs[work->crtc_id];
85
86 struct drm_crtc *crtc = &amdgpu_crtc->base;
87 unsigned long flags;
88 unsigned i;
89 int vpos, hpos;
90
91 for (i = 0; i < work->shared_count; ++i)
92 if (amdgpu_display_flip_handle_fence(work, &work->shared[i]))
93 return;
94
95
96
97
98 if (amdgpu_crtc->enabled &&
99 (amdgpu_display_get_crtc_scanoutpos(adev_to_drm(adev), work->crtc_id, 0,
100 &vpos, &hpos, NULL, NULL,
101 &crtc->hwmode)
102 & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
103 (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
104 (int)(work->target_vblank -
105 amdgpu_get_vblank_counter_kms(crtc)) > 0) {
106 schedule_delayed_work(&work->flip_work, usecs_to_jiffies(1000));
107 return;
108 }
109
110
111 spin_lock_irqsave(&crtc->dev->event_lock, flags);
112
113
114 adev->mode_info.funcs->page_flip(adev, work->crtc_id, work->base, work->async);
115
116
117 amdgpu_crtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
118 spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
119
120
121 drm_dbg_vbl(adev_to_drm(adev),
122 "crtc:%d[%p], pflip_stat:AMDGPU_FLIP_SUBMITTED, work: %p,\n",
123 amdgpu_crtc->crtc_id, amdgpu_crtc, work);
124
125}
126
127
128
129
130static void amdgpu_display_unpin_work_func(struct work_struct *__work)
131{
132 struct amdgpu_flip_work *work =
133 container_of(__work, struct amdgpu_flip_work, unpin_work);
134 int r;
135
136
137 r = amdgpu_bo_reserve(work->old_abo, true);
138 if (likely(r == 0)) {
139 amdgpu_bo_unpin(work->old_abo);
140 amdgpu_bo_unreserve(work->old_abo);
141 } else
142 DRM_ERROR("failed to reserve buffer after flip\n");
143
144 amdgpu_bo_unref(&work->old_abo);
145 kfree(work->shared);
146 kfree(work);
147}
148
149int amdgpu_display_crtc_page_flip_target(struct drm_crtc *crtc,
150 struct drm_framebuffer *fb,
151 struct drm_pending_vblank_event *event,
152 uint32_t page_flip_flags, uint32_t target,
153 struct drm_modeset_acquire_ctx *ctx)
154{
155 struct drm_device *dev = crtc->dev;
156 struct amdgpu_device *adev = drm_to_adev(dev);
157 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
158 struct drm_gem_object *obj;
159 struct amdgpu_flip_work *work;
160 struct amdgpu_bo *new_abo;
161 unsigned long flags;
162 u64 tiling_flags;
163 int i, r;
164
165 work = kzalloc(sizeof *work, GFP_KERNEL);
166 if (work == NULL)
167 return -ENOMEM;
168
169 INIT_DELAYED_WORK(&work->flip_work, amdgpu_display_flip_work_func);
170 INIT_WORK(&work->unpin_work, amdgpu_display_unpin_work_func);
171
172 work->event = event;
173 work->adev = adev;
174 work->crtc_id = amdgpu_crtc->crtc_id;
175 work->async = (page_flip_flags & DRM_MODE_PAGE_FLIP_ASYNC) != 0;
176
177
178 obj = crtc->primary->fb->obj[0];
179
180
181 work->old_abo = gem_to_amdgpu_bo(obj);
182 amdgpu_bo_ref(work->old_abo);
183
184 obj = fb->obj[0];
185 new_abo = gem_to_amdgpu_bo(obj);
186
187
188 r = amdgpu_bo_reserve(new_abo, false);
189 if (unlikely(r != 0)) {
190 DRM_ERROR("failed to reserve new abo buffer before flip\n");
191 goto cleanup;
192 }
193
194 if (!adev->enable_virtual_display) {
195 r = amdgpu_bo_pin(new_abo,
196 amdgpu_display_supported_domains(adev, new_abo->flags));
197 if (unlikely(r != 0)) {
198 DRM_ERROR("failed to pin new abo buffer before flip\n");
199 goto unreserve;
200 }
201 }
202
203 r = amdgpu_ttm_alloc_gart(&new_abo->tbo);
204 if (unlikely(r != 0)) {
205 DRM_ERROR("%p bind failed\n", new_abo);
206 goto unpin;
207 }
208
209 r = dma_resv_get_fences(new_abo->tbo.base.resv, DMA_RESV_USAGE_WRITE,
210 &work->shared_count,
211 &work->shared);
212 if (unlikely(r != 0)) {
213 DRM_ERROR("failed to get fences for buffer\n");
214 goto unpin;
215 }
216
217 amdgpu_bo_get_tiling_flags(new_abo, &tiling_flags);
218 amdgpu_bo_unreserve(new_abo);
219
220 if (!adev->enable_virtual_display)
221 work->base = amdgpu_bo_gpu_offset(new_abo);
222 work->target_vblank = target - (uint32_t)drm_crtc_vblank_count(crtc) +
223 amdgpu_get_vblank_counter_kms(crtc);
224
225
226 spin_lock_irqsave(&crtc->dev->event_lock, flags);
227 if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_NONE) {
228 DRM_DEBUG_DRIVER("flip queue: crtc already busy\n");
229 spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
230 r = -EBUSY;
231 goto pflip_cleanup;
232 }
233
234 amdgpu_crtc->pflip_status = AMDGPU_FLIP_PENDING;
235 amdgpu_crtc->pflip_works = work;
236
237
238 DRM_DEBUG_DRIVER("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_PENDING, work: %p,\n",
239 amdgpu_crtc->crtc_id, amdgpu_crtc, work);
240
241 crtc->primary->fb = fb;
242 spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
243 amdgpu_display_flip_work_func(&work->flip_work.work);
244 return 0;
245
246pflip_cleanup:
247 if (unlikely(amdgpu_bo_reserve(new_abo, false) != 0)) {
248 DRM_ERROR("failed to reserve new abo in error path\n");
249 goto cleanup;
250 }
251unpin:
252 if (!adev->enable_virtual_display)
253 amdgpu_bo_unpin(new_abo);
254
255unreserve:
256 amdgpu_bo_unreserve(new_abo);
257
258cleanup:
259 amdgpu_bo_unref(&work->old_abo);
260 for (i = 0; i < work->shared_count; ++i)
261 dma_fence_put(work->shared[i]);
262 kfree(work->shared);
263 kfree(work);
264
265 return r;
266}
267
268int amdgpu_display_crtc_set_config(struct drm_mode_set *set,
269 struct drm_modeset_acquire_ctx *ctx)
270{
271 struct drm_device *dev;
272 struct amdgpu_device *adev;
273 struct drm_crtc *crtc;
274 bool active = false;
275 int ret;
276
277 if (!set || !set->crtc)
278 return -EINVAL;
279
280 dev = set->crtc->dev;
281
282 ret = pm_runtime_get_sync(dev->dev);
283 if (ret < 0)
284 goto out;
285
286 ret = drm_crtc_helper_set_config(set, ctx);
287
288 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
289 if (crtc->enabled)
290 active = true;
291
292 pm_runtime_mark_last_busy(dev->dev);
293
294 adev = drm_to_adev(dev);
295
296
297 if (active && !adev->have_disp_power_ref) {
298 adev->have_disp_power_ref = true;
299 return ret;
300 }
301
302
303 if (!active && adev->have_disp_power_ref) {
304 pm_runtime_put_autosuspend(dev->dev);
305 adev->have_disp_power_ref = false;
306 }
307
308out:
309
310 pm_runtime_put_autosuspend(dev->dev);
311 return ret;
312}
313
314static const char *encoder_names[41] = {
315 "NONE",
316 "INTERNAL_LVDS",
317 "INTERNAL_TMDS1",
318 "INTERNAL_TMDS2",
319 "INTERNAL_DAC1",
320 "INTERNAL_DAC2",
321 "INTERNAL_SDVOA",
322 "INTERNAL_SDVOB",
323 "SI170B",
324 "CH7303",
325 "CH7301",
326 "INTERNAL_DVO1",
327 "EXTERNAL_SDVOA",
328 "EXTERNAL_SDVOB",
329 "TITFP513",
330 "INTERNAL_LVTM1",
331 "VT1623",
332 "HDMI_SI1930",
333 "HDMI_INTERNAL",
334 "INTERNAL_KLDSCP_TMDS1",
335 "INTERNAL_KLDSCP_DVO1",
336 "INTERNAL_KLDSCP_DAC1",
337 "INTERNAL_KLDSCP_DAC2",
338 "SI178",
339 "MVPU_FPGA",
340 "INTERNAL_DDI",
341 "VT1625",
342 "HDMI_SI1932",
343 "DP_AN9801",
344 "DP_DP501",
345 "INTERNAL_UNIPHY",
346 "INTERNAL_KLDSCP_LVTMA",
347 "INTERNAL_UNIPHY1",
348 "INTERNAL_UNIPHY2",
349 "NUTMEG",
350 "TRAVIS",
351 "INTERNAL_VCE",
352 "INTERNAL_UNIPHY3",
353 "HDMI_ANX9805",
354 "INTERNAL_AMCLK",
355 "VIRTUAL",
356};
357
358static const char *hpd_names[6] = {
359 "HPD1",
360 "HPD2",
361 "HPD3",
362 "HPD4",
363 "HPD5",
364 "HPD6",
365};
366
367void amdgpu_display_print_display_setup(struct drm_device *dev)
368{
369 struct drm_connector *connector;
370 struct amdgpu_connector *amdgpu_connector;
371 struct drm_encoder *encoder;
372 struct amdgpu_encoder *amdgpu_encoder;
373 struct drm_connector_list_iter iter;
374 uint32_t devices;
375 int i = 0;
376
377 drm_connector_list_iter_begin(dev, &iter);
378 DRM_INFO("AMDGPU Display Connectors\n");
379 drm_for_each_connector_iter(connector, &iter) {
380 amdgpu_connector = to_amdgpu_connector(connector);
381 DRM_INFO("Connector %d:\n", i);
382 DRM_INFO(" %s\n", connector->name);
383 if (amdgpu_connector->hpd.hpd != AMDGPU_HPD_NONE)
384 DRM_INFO(" %s\n", hpd_names[amdgpu_connector->hpd.hpd]);
385 if (amdgpu_connector->ddc_bus) {
386 DRM_INFO(" DDC: 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n",
387 amdgpu_connector->ddc_bus->rec.mask_clk_reg,
388 amdgpu_connector->ddc_bus->rec.mask_data_reg,
389 amdgpu_connector->ddc_bus->rec.a_clk_reg,
390 amdgpu_connector->ddc_bus->rec.a_data_reg,
391 amdgpu_connector->ddc_bus->rec.en_clk_reg,
392 amdgpu_connector->ddc_bus->rec.en_data_reg,
393 amdgpu_connector->ddc_bus->rec.y_clk_reg,
394 amdgpu_connector->ddc_bus->rec.y_data_reg);
395 if (amdgpu_connector->router.ddc_valid)
396 DRM_INFO(" DDC Router 0x%x/0x%x\n",
397 amdgpu_connector->router.ddc_mux_control_pin,
398 amdgpu_connector->router.ddc_mux_state);
399 if (amdgpu_connector->router.cd_valid)
400 DRM_INFO(" Clock/Data Router 0x%x/0x%x\n",
401 amdgpu_connector->router.cd_mux_control_pin,
402 amdgpu_connector->router.cd_mux_state);
403 } else {
404 if (connector->connector_type == DRM_MODE_CONNECTOR_VGA ||
405 connector->connector_type == DRM_MODE_CONNECTOR_DVII ||
406 connector->connector_type == DRM_MODE_CONNECTOR_DVID ||
407 connector->connector_type == DRM_MODE_CONNECTOR_DVIA ||
408 connector->connector_type == DRM_MODE_CONNECTOR_HDMIA ||
409 connector->connector_type == DRM_MODE_CONNECTOR_HDMIB)
410 DRM_INFO(" DDC: no ddc bus - possible BIOS bug - please report to xorg-driver-ati@lists.x.org\n");
411 }
412 DRM_INFO(" Encoders:\n");
413 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
414 amdgpu_encoder = to_amdgpu_encoder(encoder);
415 devices = amdgpu_encoder->devices & amdgpu_connector->devices;
416 if (devices) {
417 if (devices & ATOM_DEVICE_CRT1_SUPPORT)
418 DRM_INFO(" CRT1: %s\n", encoder_names[amdgpu_encoder->encoder_id]);
419 if (devices & ATOM_DEVICE_CRT2_SUPPORT)
420 DRM_INFO(" CRT2: %s\n", encoder_names[amdgpu_encoder->encoder_id]);
421 if (devices & ATOM_DEVICE_LCD1_SUPPORT)
422 DRM_INFO(" LCD1: %s\n", encoder_names[amdgpu_encoder->encoder_id]);
423 if (devices & ATOM_DEVICE_DFP1_SUPPORT)
424 DRM_INFO(" DFP1: %s\n", encoder_names[amdgpu_encoder->encoder_id]);
425 if (devices & ATOM_DEVICE_DFP2_SUPPORT)
426 DRM_INFO(" DFP2: %s\n", encoder_names[amdgpu_encoder->encoder_id]);
427 if (devices & ATOM_DEVICE_DFP3_SUPPORT)
428 DRM_INFO(" DFP3: %s\n", encoder_names[amdgpu_encoder->encoder_id]);
429 if (devices & ATOM_DEVICE_DFP4_SUPPORT)
430 DRM_INFO(" DFP4: %s\n", encoder_names[amdgpu_encoder->encoder_id]);
431 if (devices & ATOM_DEVICE_DFP5_SUPPORT)
432 DRM_INFO(" DFP5: %s\n", encoder_names[amdgpu_encoder->encoder_id]);
433 if (devices & ATOM_DEVICE_DFP6_SUPPORT)
434 DRM_INFO(" DFP6: %s\n", encoder_names[amdgpu_encoder->encoder_id]);
435 if (devices & ATOM_DEVICE_TV1_SUPPORT)
436 DRM_INFO(" TV1: %s\n", encoder_names[amdgpu_encoder->encoder_id]);
437 if (devices & ATOM_DEVICE_CV_SUPPORT)
438 DRM_INFO(" CV: %s\n", encoder_names[amdgpu_encoder->encoder_id]);
439 }
440 }
441 i++;
442 }
443 drm_connector_list_iter_end(&iter);
444}
445
446bool amdgpu_display_ddc_probe(struct amdgpu_connector *amdgpu_connector,
447 bool use_aux)
448{
449 u8 out = 0x0;
450 u8 buf[8];
451 int ret;
452 struct i2c_msg msgs[] = {
453 {
454 .addr = DDC_ADDR,
455 .flags = 0,
456 .len = 1,
457 .buf = &out,
458 },
459 {
460 .addr = DDC_ADDR,
461 .flags = I2C_M_RD,
462 .len = 8,
463 .buf = buf,
464 }
465 };
466
467
468 if (amdgpu_connector->router.ddc_valid)
469 amdgpu_i2c_router_select_ddc_port(amdgpu_connector);
470
471 if (use_aux) {
472 ret = i2c_transfer(&amdgpu_connector->ddc_bus->aux.ddc, msgs, 2);
473 } else {
474 ret = i2c_transfer(&amdgpu_connector->ddc_bus->adapter, msgs, 2);
475 }
476
477 if (ret != 2)
478
479 return false;
480
481
482
483
484
485 if (drm_edid_header_is_valid(buf) < 6) {
486
487
488 return false;
489 }
490 return true;
491}
492
493static const struct drm_framebuffer_funcs amdgpu_fb_funcs = {
494 .destroy = drm_gem_fb_destroy,
495 .create_handle = drm_gem_fb_create_handle,
496};
497
498uint32_t amdgpu_display_supported_domains(struct amdgpu_device *adev,
499 uint64_t bo_flags)
500{
501 uint32_t domain = AMDGPU_GEM_DOMAIN_VRAM;
502
503#if defined(CONFIG_DRM_AMD_DC)
504
505
506
507
508
509
510
511
512 if ((bo_flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC) &&
513 amdgpu_bo_support_uswc(bo_flags) &&
514 amdgpu_device_asic_has_dc_support(adev->asic_type) &&
515 adev->mode_info.gpu_vm_support)
516 domain |= AMDGPU_GEM_DOMAIN_GTT;
517#endif
518
519 return domain;
520}
521
522static const struct drm_format_info dcc_formats[] = {
523 { .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 2,
524 .cpp = { 4, 0, }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1, },
525 { .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 2,
526 .cpp = { 4, 0, }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1, },
527 { .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 2,
528 .cpp = { 4, 0, }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1,
529 .has_alpha = true, },
530 { .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 2,
531 .cpp = { 4, 0, }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1,
532 .has_alpha = true, },
533 { .format = DRM_FORMAT_BGRA8888, .depth = 32, .num_planes = 2,
534 .cpp = { 4, 0, }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1,
535 .has_alpha = true, },
536 { .format = DRM_FORMAT_XRGB2101010, .depth = 30, .num_planes = 2,
537 .cpp = { 4, 0, }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1, },
538 { .format = DRM_FORMAT_XBGR2101010, .depth = 30, .num_planes = 2,
539 .cpp = { 4, 0, }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1, },
540 { .format = DRM_FORMAT_ARGB2101010, .depth = 30, .num_planes = 2,
541 .cpp = { 4, 0, }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1,
542 .has_alpha = true, },
543 { .format = DRM_FORMAT_ABGR2101010, .depth = 30, .num_planes = 2,
544 .cpp = { 4, 0, }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1,
545 .has_alpha = true, },
546 { .format = DRM_FORMAT_RGB565, .depth = 16, .num_planes = 2,
547 .cpp = { 2, 0, }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1, },
548};
549
550static const struct drm_format_info dcc_retile_formats[] = {
551 { .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 3,
552 .cpp = { 4, 0, 0 }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1, },
553 { .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 3,
554 .cpp = { 4, 0, 0 }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1, },
555 { .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 3,
556 .cpp = { 4, 0, 0 }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1,
557 .has_alpha = true, },
558 { .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 3,
559 .cpp = { 4, 0, 0 }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1,
560 .has_alpha = true, },
561 { .format = DRM_FORMAT_BGRA8888, .depth = 32, .num_planes = 3,
562 .cpp = { 4, 0, 0 }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1,
563 .has_alpha = true, },
564 { .format = DRM_FORMAT_XRGB2101010, .depth = 30, .num_planes = 3,
565 .cpp = { 4, 0, 0 }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1, },
566 { .format = DRM_FORMAT_XBGR2101010, .depth = 30, .num_planes = 3,
567 .cpp = { 4, 0, 0 }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1, },
568 { .format = DRM_FORMAT_ARGB2101010, .depth = 30, .num_planes = 3,
569 .cpp = { 4, 0, 0 }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1,
570 .has_alpha = true, },
571 { .format = DRM_FORMAT_ABGR2101010, .depth = 30, .num_planes = 3,
572 .cpp = { 4, 0, 0 }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1,
573 .has_alpha = true, },
574 { .format = DRM_FORMAT_RGB565, .depth = 16, .num_planes = 3,
575 .cpp = { 2, 0, 0 }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1, },
576};
577
578static const struct drm_format_info *
579lookup_format_info(const struct drm_format_info formats[],
580 int num_formats, u32 format)
581{
582 int i;
583
584 for (i = 0; i < num_formats; i++) {
585 if (formats[i].format == format)
586 return &formats[i];
587 }
588
589 return NULL;
590}
591
592const struct drm_format_info *
593amdgpu_lookup_format_info(u32 format, uint64_t modifier)
594{
595 if (!IS_AMD_FMT_MOD(modifier))
596 return NULL;
597
598 if (AMD_FMT_MOD_GET(DCC_RETILE, modifier))
599 return lookup_format_info(dcc_retile_formats,
600 ARRAY_SIZE(dcc_retile_formats),
601 format);
602
603 if (AMD_FMT_MOD_GET(DCC, modifier))
604 return lookup_format_info(dcc_formats, ARRAY_SIZE(dcc_formats),
605 format);
606
607
608 return NULL;
609}
610
611
612
613
614
615
616static int
617extract_render_dcc_offset(struct amdgpu_device *adev,
618 struct drm_gem_object *obj,
619 uint64_t *offset)
620{
621 struct amdgpu_bo *rbo;
622 int r = 0;
623 uint32_t metadata[10];
624 uint32_t size;
625
626 rbo = gem_to_amdgpu_bo(obj);
627 r = amdgpu_bo_reserve(rbo, false);
628
629 if (unlikely(r)) {
630
631 if (r != -ERESTARTSYS)
632 DRM_ERROR("Unable to reserve buffer: %d\n", r);
633 return r;
634 }
635
636 r = amdgpu_bo_get_metadata(rbo, metadata, sizeof(metadata), &size, NULL);
637 amdgpu_bo_unreserve(rbo);
638
639 if (r)
640 return r;
641
642
643
644
645
646 if (size < 40 || metadata[0] != 1)
647 return -EINVAL;
648
649 if (adev->family >= AMDGPU_FAMILY_NV) {
650
651 *offset = ((u64)metadata[9] << 16u) |
652 ((metadata[8] & 0xFF000000u) >> 16);
653 } else {
654
655 *offset = ((u64)metadata[9] << 8u) |
656 ((u64)(metadata[7] & 0x1FE0000u) << 23);
657 }
658
659 return 0;
660}
661
662static int convert_tiling_flags_to_modifier(struct amdgpu_framebuffer *afb)
663{
664 struct amdgpu_device *adev = drm_to_adev(afb->base.dev);
665 uint64_t modifier = 0;
666
667 if (!afb->tiling_flags || !AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE)) {
668 modifier = DRM_FORMAT_MOD_LINEAR;
669 } else {
670 int swizzle = AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE);
671 bool has_xor = swizzle >= 16;
672 int block_size_bits;
673 int version;
674 int pipe_xor_bits = 0;
675 int bank_xor_bits = 0;
676 int packers = 0;
677 int rb = 0;
678 int pipes = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
679 uint32_t dcc_offset = AMDGPU_TILING_GET(afb->tiling_flags, DCC_OFFSET_256B);
680
681 switch (swizzle >> 2) {
682 case 0:
683 block_size_bits = 8;
684 break;
685 case 1:
686 case 5:
687 block_size_bits = 12;
688 break;
689 case 2:
690 case 4:
691 case 6:
692 block_size_bits = 16;
693 break;
694 default:
695
696 return -EINVAL;
697 }
698
699 if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 3, 0))
700 version = AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS;
701 else if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 0, 0))
702 version = AMD_FMT_MOD_TILE_VER_GFX10;
703 else
704 version = AMD_FMT_MOD_TILE_VER_GFX9;
705
706 switch (swizzle & 3) {
707 case 0:
708 return -EINVAL;
709 case 1:
710 if (!has_xor)
711 version = AMD_FMT_MOD_TILE_VER_GFX9;
712 break;
713 case 2:
714 if (!has_xor && afb->base.format->cpp[0] != 4)
715 version = AMD_FMT_MOD_TILE_VER_GFX9;
716 break;
717 case 3:
718 break;
719 }
720
721 if (has_xor) {
722 switch (version) {
723 case AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS:
724 pipe_xor_bits = min(block_size_bits - 8, pipes);
725 packers = min(block_size_bits - 8 - pipe_xor_bits,
726 ilog2(adev->gfx.config.gb_addr_config_fields.num_pkrs));
727 break;
728 case AMD_FMT_MOD_TILE_VER_GFX10:
729 pipe_xor_bits = min(block_size_bits - 8, pipes);
730 break;
731 case AMD_FMT_MOD_TILE_VER_GFX9:
732 rb = ilog2(adev->gfx.config.gb_addr_config_fields.num_se) +
733 ilog2(adev->gfx.config.gb_addr_config_fields.num_rb_per_se);
734 pipe_xor_bits = min(block_size_bits - 8, pipes +
735 ilog2(adev->gfx.config.gb_addr_config_fields.num_se));
736 bank_xor_bits = min(block_size_bits - 8 - pipe_xor_bits,
737 ilog2(adev->gfx.config.gb_addr_config_fields.num_banks));
738 break;
739 }
740 }
741
742 modifier = AMD_FMT_MOD |
743 AMD_FMT_MOD_SET(TILE, AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE)) |
744 AMD_FMT_MOD_SET(TILE_VERSION, version) |
745 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
746 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
747 AMD_FMT_MOD_SET(PACKERS, packers);
748
749 if (dcc_offset != 0) {
750 bool dcc_i64b = AMDGPU_TILING_GET(afb->tiling_flags, DCC_INDEPENDENT_64B) != 0;
751 bool dcc_i128b = version >= AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS;
752 const struct drm_format_info *format_info;
753 u64 render_dcc_offset;
754
755
756 bool dcc_constant_encode = adev->asic_type > CHIP_RAVEN ||
757 (adev->asic_type == CHIP_RAVEN &&
758 adev->external_rev_id >= 0x81);
759
760 int max_cblock_size = dcc_i64b ? AMD_FMT_MOD_DCC_BLOCK_64B :
761 dcc_i128b ? AMD_FMT_MOD_DCC_BLOCK_128B :
762 AMD_FMT_MOD_DCC_BLOCK_256B;
763
764 modifier |= AMD_FMT_MOD_SET(DCC, 1) |
765 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, dcc_constant_encode) |
766 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, dcc_i64b) |
767 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, dcc_i128b) |
768 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, max_cblock_size);
769
770 afb->base.offsets[1] = dcc_offset * 256 + afb->base.offsets[0];
771 afb->base.pitches[1] =
772 AMDGPU_TILING_GET(afb->tiling_flags, DCC_PITCH_MAX) + 1;
773
774
775
776
777
778
779
780
781
782 if (extract_render_dcc_offset(adev, afb->base.obj[0],
783 &render_dcc_offset) == 0 &&
784 render_dcc_offset != 0 &&
785 render_dcc_offset != afb->base.offsets[1] &&
786 render_dcc_offset < UINT_MAX) {
787 uint32_t dcc_block_bits;
788
789 modifier |= AMD_FMT_MOD_SET(DCC_RETILE, 1);
790 afb->base.offsets[2] = render_dcc_offset;
791
792 if (adev->family >= AMDGPU_FAMILY_NV) {
793 int extra_pipe = 0;
794
795 if ((adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 3, 0)) &&
796 pipes == packers && pipes > 1)
797 extra_pipe = 1;
798
799 dcc_block_bits = max(20, 16 + pipes + extra_pipe);
800 } else {
801 modifier |= AMD_FMT_MOD_SET(RB, rb) |
802 AMD_FMT_MOD_SET(PIPE, pipes);
803 dcc_block_bits = max(20, 18 + rb);
804 }
805
806 dcc_block_bits -= ilog2(afb->base.format->cpp[0]);
807 afb->base.pitches[2] = ALIGN(afb->base.width,
808 1u << ((dcc_block_bits + 1) / 2));
809 }
810 format_info = amdgpu_lookup_format_info(afb->base.format->format,
811 modifier);
812 if (!format_info)
813 return -EINVAL;
814
815 afb->base.format = format_info;
816 }
817 }
818
819 afb->base.modifier = modifier;
820 afb->base.flags |= DRM_MODE_FB_MODIFIERS;
821 return 0;
822}
823
824
825static int check_tiling_flags_gfx6(struct amdgpu_framebuffer *afb)
826{
827 u64 micro_tile_mode;
828
829
830 if (AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE) == 0)
831 return 0;
832
833 micro_tile_mode = AMDGPU_TILING_GET(afb->tiling_flags, MICRO_TILE_MODE);
834 switch (micro_tile_mode) {
835 case 0:
836 case 3:
837 return 0;
838 default:
839 drm_dbg_kms(afb->base.dev,
840 "Micro tile mode %llu not supported for scanout\n",
841 micro_tile_mode);
842 return -EINVAL;
843 }
844}
845
846static void get_block_dimensions(unsigned int block_log2, unsigned int cpp,
847 unsigned int *width, unsigned int *height)
848{
849 unsigned int cpp_log2 = ilog2(cpp);
850 unsigned int pixel_log2 = block_log2 - cpp_log2;
851 unsigned int width_log2 = (pixel_log2 + 1) / 2;
852 unsigned int height_log2 = pixel_log2 - width_log2;
853
854 *width = 1 << width_log2;
855 *height = 1 << height_log2;
856}
857
858static unsigned int get_dcc_block_size(uint64_t modifier, bool rb_aligned,
859 bool pipe_aligned)
860{
861 unsigned int ver = AMD_FMT_MOD_GET(TILE_VERSION, modifier);
862
863 switch (ver) {
864 case AMD_FMT_MOD_TILE_VER_GFX9: {
865
866
867
868
869
870 return max(10 + (rb_aligned ? (int)AMD_FMT_MOD_GET(RB, modifier) : 0), 12);
871 }
872 case AMD_FMT_MOD_TILE_VER_GFX10:
873 case AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS: {
874 int pipes_log2 = AMD_FMT_MOD_GET(PIPE_XOR_BITS, modifier);
875
876 if (ver == AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS && pipes_log2 > 1 &&
877 AMD_FMT_MOD_GET(PACKERS, modifier) == pipes_log2)
878 ++pipes_log2;
879
880 return max(8 + (pipe_aligned ? pipes_log2 : 0), 12);
881 }
882 default:
883 return 0;
884 }
885}
886
887static int amdgpu_display_verify_plane(struct amdgpu_framebuffer *rfb, int plane,
888 const struct drm_format_info *format,
889 unsigned int block_width, unsigned int block_height,
890 unsigned int block_size_log2)
891{
892 unsigned int width = rfb->base.width /
893 ((plane && plane < format->num_planes) ? format->hsub : 1);
894 unsigned int height = rfb->base.height /
895 ((plane && plane < format->num_planes) ? format->vsub : 1);
896 unsigned int cpp = plane < format->num_planes ? format->cpp[plane] : 1;
897 unsigned int block_pitch = block_width * cpp;
898 unsigned int min_pitch = ALIGN(width * cpp, block_pitch);
899 unsigned int block_size = 1 << block_size_log2;
900 uint64_t size;
901
902 if (rfb->base.pitches[plane] % block_pitch) {
903 drm_dbg_kms(rfb->base.dev,
904 "pitch %d for plane %d is not a multiple of block pitch %d\n",
905 rfb->base.pitches[plane], plane, block_pitch);
906 return -EINVAL;
907 }
908 if (rfb->base.pitches[plane] < min_pitch) {
909 drm_dbg_kms(rfb->base.dev,
910 "pitch %d for plane %d is less than minimum pitch %d\n",
911 rfb->base.pitches[plane], plane, min_pitch);
912 return -EINVAL;
913 }
914
915
916 if (rfb->base.offsets[plane] % block_size) {
917 drm_dbg_kms(rfb->base.dev,
918 "offset 0x%x for plane %d is not a multiple of block pitch 0x%x\n",
919 rfb->base.offsets[plane], plane, block_size);
920 return -EINVAL;
921 }
922
923 size = rfb->base.offsets[plane] +
924 (uint64_t)rfb->base.pitches[plane] / block_pitch *
925 block_size * DIV_ROUND_UP(height, block_height);
926
927 if (rfb->base.obj[0]->size < size) {
928 drm_dbg_kms(rfb->base.dev,
929 "BO size 0x%zx is less than 0x%llx required for plane %d\n",
930 rfb->base.obj[0]->size, size, plane);
931 return -EINVAL;
932 }
933
934 return 0;
935}
936
937
938static int amdgpu_display_verify_sizes(struct amdgpu_framebuffer *rfb)
939{
940 const struct drm_format_info *format_info = drm_format_info(rfb->base.format->format);
941 uint64_t modifier = rfb->base.modifier;
942 int ret;
943 unsigned int i, block_width, block_height, block_size_log2;
944
945 if (rfb->base.dev->mode_config.fb_modifiers_not_supported)
946 return 0;
947
948 for (i = 0; i < format_info->num_planes; ++i) {
949 if (modifier == DRM_FORMAT_MOD_LINEAR) {
950 block_width = 256 / format_info->cpp[i];
951 block_height = 1;
952 block_size_log2 = 8;
953 } else {
954 int swizzle = AMD_FMT_MOD_GET(TILE, modifier);
955
956 switch ((swizzle & ~3) + 1) {
957 case DC_SW_256B_S:
958 block_size_log2 = 8;
959 break;
960 case DC_SW_4KB_S:
961 case DC_SW_4KB_S_X:
962 block_size_log2 = 12;
963 break;
964 case DC_SW_64KB_S:
965 case DC_SW_64KB_S_T:
966 case DC_SW_64KB_S_X:
967 block_size_log2 = 16;
968 break;
969 default:
970 drm_dbg_kms(rfb->base.dev,
971 "Swizzle mode with unknown block size: %d\n", swizzle);
972 return -EINVAL;
973 }
974
975 get_block_dimensions(block_size_log2, format_info->cpp[i],
976 &block_width, &block_height);
977 }
978
979 ret = amdgpu_display_verify_plane(rfb, i, format_info,
980 block_width, block_height, block_size_log2);
981 if (ret)
982 return ret;
983 }
984
985 if (AMD_FMT_MOD_GET(DCC, modifier)) {
986 if (AMD_FMT_MOD_GET(DCC_RETILE, modifier)) {
987 block_size_log2 = get_dcc_block_size(modifier, false, false);
988 get_block_dimensions(block_size_log2 + 8, format_info->cpp[0],
989 &block_width, &block_height);
990 ret = amdgpu_display_verify_plane(rfb, i, format_info,
991 block_width, block_height,
992 block_size_log2);
993 if (ret)
994 return ret;
995
996 ++i;
997 block_size_log2 = get_dcc_block_size(modifier, true, true);
998 } else {
999 bool pipe_aligned = AMD_FMT_MOD_GET(DCC_PIPE_ALIGN, modifier);
1000
1001 block_size_log2 = get_dcc_block_size(modifier, true, pipe_aligned);
1002 }
1003 get_block_dimensions(block_size_log2 + 8, format_info->cpp[0],
1004 &block_width, &block_height);
1005 ret = amdgpu_display_verify_plane(rfb, i, format_info,
1006 block_width, block_height, block_size_log2);
1007 if (ret)
1008 return ret;
1009 }
1010
1011 return 0;
1012}
1013
1014static int amdgpu_display_get_fb_info(const struct amdgpu_framebuffer *amdgpu_fb,
1015 uint64_t *tiling_flags, bool *tmz_surface)
1016{
1017 struct amdgpu_bo *rbo;
1018 int r;
1019
1020 if (!amdgpu_fb) {
1021 *tiling_flags = 0;
1022 *tmz_surface = false;
1023 return 0;
1024 }
1025
1026 rbo = gem_to_amdgpu_bo(amdgpu_fb->base.obj[0]);
1027 r = amdgpu_bo_reserve(rbo, false);
1028
1029 if (unlikely(r)) {
1030
1031 if (r != -ERESTARTSYS)
1032 DRM_ERROR("Unable to reserve buffer: %d\n", r);
1033 return r;
1034 }
1035
1036 if (tiling_flags)
1037 amdgpu_bo_get_tiling_flags(rbo, tiling_flags);
1038
1039 if (tmz_surface)
1040 *tmz_surface = amdgpu_bo_encrypted(rbo);
1041
1042 amdgpu_bo_unreserve(rbo);
1043
1044 return r;
1045}
1046
1047static int amdgpu_display_gem_fb_verify_and_init(struct drm_device *dev,
1048 struct amdgpu_framebuffer *rfb,
1049 struct drm_file *file_priv,
1050 const struct drm_mode_fb_cmd2 *mode_cmd,
1051 struct drm_gem_object *obj)
1052{
1053 int ret;
1054
1055 rfb->base.obj[0] = obj;
1056 drm_helper_mode_fill_fb_struct(dev, &rfb->base, mode_cmd);
1057
1058 if (!drm_any_plane_has_format(dev, mode_cmd->pixel_format,
1059 mode_cmd->modifier[0])) {
1060 drm_dbg_kms(dev,
1061 "unsupported pixel format %p4cc / modifier 0x%llx\n",
1062 &mode_cmd->pixel_format, mode_cmd->modifier[0]);
1063
1064 ret = -EINVAL;
1065 goto err;
1066 }
1067
1068 ret = amdgpu_display_framebuffer_init(dev, rfb, mode_cmd, obj);
1069 if (ret)
1070 goto err;
1071
1072 ret = drm_framebuffer_init(dev, &rfb->base, &amdgpu_fb_funcs);
1073 if (ret)
1074 goto err;
1075
1076 return 0;
1077err:
1078 drm_dbg_kms(dev, "Failed to verify and init gem fb: %d\n", ret);
1079 rfb->base.obj[0] = NULL;
1080 return ret;
1081}
1082
1083static int amdgpu_display_framebuffer_init(struct drm_device *dev,
1084 struct amdgpu_framebuffer *rfb,
1085 const struct drm_mode_fb_cmd2 *mode_cmd,
1086 struct drm_gem_object *obj)
1087{
1088 struct amdgpu_device *adev = drm_to_adev(dev);
1089 int ret, i;
1090
1091
1092
1093
1094
1095 for (i = 1; i < rfb->base.format->num_planes; ++i) {
1096 if (mode_cmd->handles[i] != mode_cmd->handles[0]) {
1097 drm_dbg_kms(dev, "Plane 0 and %d have different BOs: %u vs. %u\n",
1098 i, mode_cmd->handles[0], mode_cmd->handles[i]);
1099 ret = -EINVAL;
1100 return ret;
1101 }
1102 }
1103
1104 ret = amdgpu_display_get_fb_info(rfb, &rfb->tiling_flags, &rfb->tmz_surface);
1105 if (ret)
1106 return ret;
1107
1108 if (dev->mode_config.fb_modifiers_not_supported && !adev->enable_virtual_display) {
1109 drm_WARN_ONCE(dev, adev->family >= AMDGPU_FAMILY_AI,
1110 "GFX9+ requires FB check based on format modifier\n");
1111 ret = check_tiling_flags_gfx6(rfb);
1112 if (ret)
1113 return ret;
1114 }
1115
1116 if (!dev->mode_config.fb_modifiers_not_supported &&
1117 !(rfb->base.flags & DRM_MODE_FB_MODIFIERS)) {
1118 ret = convert_tiling_flags_to_modifier(rfb);
1119 if (ret) {
1120 drm_dbg_kms(dev, "Failed to convert tiling flags 0x%llX to a modifier",
1121 rfb->tiling_flags);
1122 return ret;
1123 }
1124 }
1125
1126 ret = amdgpu_display_verify_sizes(rfb);
1127 if (ret)
1128 return ret;
1129
1130 for (i = 0; i < rfb->base.format->num_planes; ++i) {
1131 drm_gem_object_get(rfb->base.obj[0]);
1132 rfb->base.obj[i] = rfb->base.obj[0];
1133 }
1134
1135 return 0;
1136}
1137
1138struct drm_framebuffer *
1139amdgpu_display_user_framebuffer_create(struct drm_device *dev,
1140 struct drm_file *file_priv,
1141 const struct drm_mode_fb_cmd2 *mode_cmd)
1142{
1143 struct amdgpu_framebuffer *amdgpu_fb;
1144 struct drm_gem_object *obj;
1145 struct amdgpu_bo *bo;
1146 uint32_t domains;
1147 int ret;
1148
1149 obj = drm_gem_object_lookup(file_priv, mode_cmd->handles[0]);
1150 if (obj == NULL) {
1151 drm_dbg_kms(dev, "No GEM object associated to handle 0x%08X, "
1152 "can't create framebuffer\n", mode_cmd->handles[0]);
1153 return ERR_PTR(-ENOENT);
1154 }
1155
1156
1157 bo = gem_to_amdgpu_bo(obj);
1158 domains = amdgpu_display_supported_domains(drm_to_adev(dev), bo->flags);
1159 if (obj->import_attach && !(domains & AMDGPU_GEM_DOMAIN_GTT)) {
1160 drm_dbg_kms(dev, "Cannot create framebuffer from imported dma_buf\n");
1161 drm_gem_object_put(obj);
1162 return ERR_PTR(-EINVAL);
1163 }
1164
1165 amdgpu_fb = kzalloc(sizeof(*amdgpu_fb), GFP_KERNEL);
1166 if (amdgpu_fb == NULL) {
1167 drm_gem_object_put(obj);
1168 return ERR_PTR(-ENOMEM);
1169 }
1170
1171 ret = amdgpu_display_gem_fb_verify_and_init(dev, amdgpu_fb, file_priv,
1172 mode_cmd, obj);
1173 if (ret) {
1174 kfree(amdgpu_fb);
1175 drm_gem_object_put(obj);
1176 return ERR_PTR(ret);
1177 }
1178
1179 drm_gem_object_put(obj);
1180 return &amdgpu_fb->base;
1181}
1182
1183const struct drm_mode_config_funcs amdgpu_mode_funcs = {
1184 .fb_create = amdgpu_display_user_framebuffer_create,
1185 .output_poll_changed = drm_fb_helper_output_poll_changed,
1186};
1187
1188static const struct drm_prop_enum_list amdgpu_underscan_enum_list[] =
1189{ { UNDERSCAN_OFF, "off" },
1190 { UNDERSCAN_ON, "on" },
1191 { UNDERSCAN_AUTO, "auto" },
1192};
1193
1194static const struct drm_prop_enum_list amdgpu_audio_enum_list[] =
1195{ { AMDGPU_AUDIO_DISABLE, "off" },
1196 { AMDGPU_AUDIO_ENABLE, "on" },
1197 { AMDGPU_AUDIO_AUTO, "auto" },
1198};
1199
1200
1201static const struct drm_prop_enum_list amdgpu_dither_enum_list[] =
1202{ { AMDGPU_FMT_DITHER_DISABLE, "off" },
1203 { AMDGPU_FMT_DITHER_ENABLE, "on" },
1204};
1205
1206int amdgpu_display_modeset_create_props(struct amdgpu_device *adev)
1207{
1208 int sz;
1209
1210 adev->mode_info.coherent_mode_property =
1211 drm_property_create_range(adev_to_drm(adev), 0, "coherent", 0, 1);
1212 if (!adev->mode_info.coherent_mode_property)
1213 return -ENOMEM;
1214
1215 adev->mode_info.load_detect_property =
1216 drm_property_create_range(adev_to_drm(adev), 0, "load detection", 0, 1);
1217 if (!adev->mode_info.load_detect_property)
1218 return -ENOMEM;
1219
1220 drm_mode_create_scaling_mode_property(adev_to_drm(adev));
1221
1222 sz = ARRAY_SIZE(amdgpu_underscan_enum_list);
1223 adev->mode_info.underscan_property =
1224 drm_property_create_enum(adev_to_drm(adev), 0,
1225 "underscan",
1226 amdgpu_underscan_enum_list, sz);
1227
1228 adev->mode_info.underscan_hborder_property =
1229 drm_property_create_range(adev_to_drm(adev), 0,
1230 "underscan hborder", 0, 128);
1231 if (!adev->mode_info.underscan_hborder_property)
1232 return -ENOMEM;
1233
1234 adev->mode_info.underscan_vborder_property =
1235 drm_property_create_range(adev_to_drm(adev), 0,
1236 "underscan vborder", 0, 128);
1237 if (!adev->mode_info.underscan_vborder_property)
1238 return -ENOMEM;
1239
1240 sz = ARRAY_SIZE(amdgpu_audio_enum_list);
1241 adev->mode_info.audio_property =
1242 drm_property_create_enum(adev_to_drm(adev), 0,
1243 "audio",
1244 amdgpu_audio_enum_list, sz);
1245
1246 sz = ARRAY_SIZE(amdgpu_dither_enum_list);
1247 adev->mode_info.dither_property =
1248 drm_property_create_enum(adev_to_drm(adev), 0,
1249 "dither",
1250 amdgpu_dither_enum_list, sz);
1251
1252 if (amdgpu_device_has_dc_support(adev)) {
1253 adev->mode_info.abm_level_property =
1254 drm_property_create_range(adev_to_drm(adev), 0,
1255 "abm level", 0, 4);
1256 if (!adev->mode_info.abm_level_property)
1257 return -ENOMEM;
1258 }
1259
1260 return 0;
1261}
1262
1263void amdgpu_display_update_priority(struct amdgpu_device *adev)
1264{
1265
1266 if ((amdgpu_disp_priority == 0) || (amdgpu_disp_priority > 2))
1267 adev->mode_info.disp_priority = 0;
1268 else
1269 adev->mode_info.disp_priority = amdgpu_disp_priority;
1270
1271}
1272
1273static bool amdgpu_display_is_hdtv_mode(const struct drm_display_mode *mode)
1274{
1275
1276 if ((mode->vdisplay == 480 && mode->hdisplay == 720) ||
1277 (mode->vdisplay == 576) ||
1278 (mode->vdisplay == 720) ||
1279 (mode->vdisplay == 1080))
1280 return true;
1281 else
1282 return false;
1283}
1284
1285bool amdgpu_display_crtc_scaling_mode_fixup(struct drm_crtc *crtc,
1286 const struct drm_display_mode *mode,
1287 struct drm_display_mode *adjusted_mode)
1288{
1289 struct drm_device *dev = crtc->dev;
1290 struct drm_encoder *encoder;
1291 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1292 struct amdgpu_encoder *amdgpu_encoder;
1293 struct drm_connector *connector;
1294 u32 src_v = 1, dst_v = 1;
1295 u32 src_h = 1, dst_h = 1;
1296
1297 amdgpu_crtc->h_border = 0;
1298 amdgpu_crtc->v_border = 0;
1299
1300 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
1301 if (encoder->crtc != crtc)
1302 continue;
1303 amdgpu_encoder = to_amdgpu_encoder(encoder);
1304 connector = amdgpu_get_connector_for_encoder(encoder);
1305
1306
1307 if (amdgpu_encoder->rmx_type == RMX_OFF)
1308 amdgpu_crtc->rmx_type = RMX_OFF;
1309 else if (mode->hdisplay < amdgpu_encoder->native_mode.hdisplay ||
1310 mode->vdisplay < amdgpu_encoder->native_mode.vdisplay)
1311 amdgpu_crtc->rmx_type = amdgpu_encoder->rmx_type;
1312 else
1313 amdgpu_crtc->rmx_type = RMX_OFF;
1314
1315 memcpy(&amdgpu_crtc->native_mode,
1316 &amdgpu_encoder->native_mode,
1317 sizeof(struct drm_display_mode));
1318 src_v = crtc->mode.vdisplay;
1319 dst_v = amdgpu_crtc->native_mode.vdisplay;
1320 src_h = crtc->mode.hdisplay;
1321 dst_h = amdgpu_crtc->native_mode.hdisplay;
1322
1323
1324 if ((!(mode->flags & DRM_MODE_FLAG_INTERLACE)) &&
1325 ((amdgpu_encoder->underscan_type == UNDERSCAN_ON) ||
1326 ((amdgpu_encoder->underscan_type == UNDERSCAN_AUTO) &&
1327 connector->display_info.is_hdmi &&
1328 amdgpu_display_is_hdtv_mode(mode)))) {
1329 if (amdgpu_encoder->underscan_hborder != 0)
1330 amdgpu_crtc->h_border = amdgpu_encoder->underscan_hborder;
1331 else
1332 amdgpu_crtc->h_border = (mode->hdisplay >> 5) + 16;
1333 if (amdgpu_encoder->underscan_vborder != 0)
1334 amdgpu_crtc->v_border = amdgpu_encoder->underscan_vborder;
1335 else
1336 amdgpu_crtc->v_border = (mode->vdisplay >> 5) + 16;
1337 amdgpu_crtc->rmx_type = RMX_FULL;
1338 src_v = crtc->mode.vdisplay;
1339 dst_v = crtc->mode.vdisplay - (amdgpu_crtc->v_border * 2);
1340 src_h = crtc->mode.hdisplay;
1341 dst_h = crtc->mode.hdisplay - (amdgpu_crtc->h_border * 2);
1342 }
1343 }
1344 if (amdgpu_crtc->rmx_type != RMX_OFF) {
1345 fixed20_12 a, b;
1346 a.full = dfixed_const(src_v);
1347 b.full = dfixed_const(dst_v);
1348 amdgpu_crtc->vsc.full = dfixed_div(a, b);
1349 a.full = dfixed_const(src_h);
1350 b.full = dfixed_const(dst_h);
1351 amdgpu_crtc->hsc.full = dfixed_div(a, b);
1352 } else {
1353 amdgpu_crtc->vsc.full = dfixed_const(1);
1354 amdgpu_crtc->hsc.full = dfixed_const(1);
1355 }
1356 return true;
1357}
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396int amdgpu_display_get_crtc_scanoutpos(struct drm_device *dev,
1397 unsigned int pipe, unsigned int flags, int *vpos,
1398 int *hpos, ktime_t *stime, ktime_t *etime,
1399 const struct drm_display_mode *mode)
1400{
1401 u32 vbl = 0, position = 0;
1402 int vbl_start, vbl_end, vtotal, ret = 0;
1403 bool in_vbl = true;
1404
1405 struct amdgpu_device *adev = drm_to_adev(dev);
1406
1407
1408
1409
1410 if (stime)
1411 *stime = ktime_get();
1412
1413 if (amdgpu_display_page_flip_get_scanoutpos(adev, pipe, &vbl, &position) == 0)
1414 ret |= DRM_SCANOUTPOS_VALID;
1415
1416
1417 if (etime)
1418 *etime = ktime_get();
1419
1420
1421
1422
1423 *vpos = position & 0x1fff;
1424 *hpos = (position >> 16) & 0x1fff;
1425
1426
1427 if (vbl > 0) {
1428
1429 ret |= DRM_SCANOUTPOS_ACCURATE;
1430 vbl_start = vbl & 0x1fff;
1431 vbl_end = (vbl >> 16) & 0x1fff;
1432 }
1433 else {
1434
1435 vbl_start = mode->crtc_vdisplay;
1436 vbl_end = 0;
1437 }
1438
1439
1440 if (flags & GET_DISTANCE_TO_VBLANKSTART) {
1441
1442 *hpos = *vpos - vbl_start;
1443 }
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455 if (!(flags & USE_REAL_VBLANKSTART))
1456 vbl_start -= adev->mode_info.crtcs[pipe]->lb_vblank_lead_lines;
1457
1458
1459 if ((*vpos < vbl_start) && (*vpos >= vbl_end))
1460 in_vbl = false;
1461
1462
1463 if (in_vbl)
1464 ret |= DRM_SCANOUTPOS_IN_VBLANK;
1465
1466
1467 if (flags & GET_DISTANCE_TO_VBLANKSTART) {
1468
1469 *vpos -= vbl_start;
1470 return ret;
1471 }
1472
1473
1474
1475
1476
1477
1478
1479
1480 if (in_vbl && (*vpos >= vbl_start)) {
1481 vtotal = mode->crtc_vtotal;
1482
1483
1484
1485
1486
1487 *vpos = (*vpos < vtotal) ? (*vpos - vtotal) : 0;
1488 }
1489
1490
1491 *vpos = *vpos - vbl_end;
1492
1493 return ret;
1494}
1495
1496int amdgpu_display_crtc_idx_to_irq_type(struct amdgpu_device *adev, int crtc)
1497{
1498 if (crtc < 0 || crtc >= adev->mode_info.num_crtc)
1499 return AMDGPU_CRTC_IRQ_NONE;
1500
1501 switch (crtc) {
1502 case 0:
1503 return AMDGPU_CRTC_IRQ_VBLANK1;
1504 case 1:
1505 return AMDGPU_CRTC_IRQ_VBLANK2;
1506 case 2:
1507 return AMDGPU_CRTC_IRQ_VBLANK3;
1508 case 3:
1509 return AMDGPU_CRTC_IRQ_VBLANK4;
1510 case 4:
1511 return AMDGPU_CRTC_IRQ_VBLANK5;
1512 case 5:
1513 return AMDGPU_CRTC_IRQ_VBLANK6;
1514 default:
1515 return AMDGPU_CRTC_IRQ_NONE;
1516 }
1517}
1518
1519bool amdgpu_crtc_get_scanout_position(struct drm_crtc *crtc,
1520 bool in_vblank_irq, int *vpos,
1521 int *hpos, ktime_t *stime, ktime_t *etime,
1522 const struct drm_display_mode *mode)
1523{
1524 struct drm_device *dev = crtc->dev;
1525 unsigned int pipe = crtc->index;
1526
1527 return amdgpu_display_get_crtc_scanoutpos(dev, pipe, 0, vpos, hpos,
1528 stime, etime, mode);
1529}
1530
1531static bool
1532amdgpu_display_robj_is_fb(struct amdgpu_device *adev, struct amdgpu_bo *robj)
1533{
1534 struct drm_device *dev = adev_to_drm(adev);
1535 struct drm_fb_helper *fb_helper = dev->fb_helper;
1536
1537 if (!fb_helper || !fb_helper->buffer)
1538 return false;
1539
1540 if (gem_to_amdgpu_bo(fb_helper->buffer->gem) != robj)
1541 return false;
1542
1543 return true;
1544}
1545
1546int amdgpu_display_suspend_helper(struct amdgpu_device *adev)
1547{
1548 struct drm_device *dev = adev_to_drm(adev);
1549 struct drm_crtc *crtc;
1550 struct drm_connector *connector;
1551 struct drm_connector_list_iter iter;
1552 int r;
1553
1554
1555 drm_modeset_lock_all(dev);
1556 drm_connector_list_iter_begin(dev, &iter);
1557 drm_for_each_connector_iter(connector, &iter)
1558 drm_helper_connector_dpms(connector,
1559 DRM_MODE_DPMS_OFF);
1560 drm_connector_list_iter_end(&iter);
1561 drm_modeset_unlock_all(dev);
1562
1563 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
1564 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1565 struct drm_framebuffer *fb = crtc->primary->fb;
1566 struct amdgpu_bo *robj;
1567
1568 if (amdgpu_crtc->cursor_bo && !adev->enable_virtual_display) {
1569 struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
1570 r = amdgpu_bo_reserve(aobj, true);
1571 if (r == 0) {
1572 amdgpu_bo_unpin(aobj);
1573 amdgpu_bo_unreserve(aobj);
1574 }
1575 }
1576
1577 if (fb == NULL || fb->obj[0] == NULL) {
1578 continue;
1579 }
1580 robj = gem_to_amdgpu_bo(fb->obj[0]);
1581 if (!amdgpu_display_robj_is_fb(adev, robj)) {
1582 r = amdgpu_bo_reserve(robj, true);
1583 if (r == 0) {
1584 amdgpu_bo_unpin(robj);
1585 amdgpu_bo_unreserve(robj);
1586 }
1587 }
1588 }
1589 return 0;
1590}
1591
1592int amdgpu_display_resume_helper(struct amdgpu_device *adev)
1593{
1594 struct drm_device *dev = adev_to_drm(adev);
1595 struct drm_connector *connector;
1596 struct drm_connector_list_iter iter;
1597 struct drm_crtc *crtc;
1598 int r;
1599
1600
1601 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
1602 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1603
1604 if (amdgpu_crtc->cursor_bo && !adev->enable_virtual_display) {
1605 struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
1606 r = amdgpu_bo_reserve(aobj, true);
1607 if (r == 0) {
1608 r = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM);
1609 if (r != 0)
1610 dev_err(adev->dev, "Failed to pin cursor BO (%d)\n", r);
1611 amdgpu_crtc->cursor_addr = amdgpu_bo_gpu_offset(aobj);
1612 amdgpu_bo_unreserve(aobj);
1613 }
1614 }
1615 }
1616
1617 drm_helper_resume_force_mode(dev);
1618
1619
1620 drm_modeset_lock_all(dev);
1621
1622 drm_connector_list_iter_begin(dev, &iter);
1623 drm_for_each_connector_iter(connector, &iter)
1624 drm_helper_connector_dpms(connector,
1625 DRM_MODE_DPMS_ON);
1626 drm_connector_list_iter_end(&iter);
1627
1628 drm_modeset_unlock_all(dev);
1629
1630 return 0;
1631}
1632
1633