linux/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
<<
>>
Prefs
   1/*
   2 * Copyright 2015 Advanced Micro Devices, Inc.
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice shall be included in
  12 * all copies or substantial portions of the Software.
  13 *
  14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20 * OTHER DEALINGS IN THE SOFTWARE.
  21 *
  22 * Authors: AMD
  23 *
  24 */
  25
  26#include "dm_services_types.h"
  27#include "dc.h"
  28
  29#include "amdgpu.h"
  30#include "amdgpu_dm.h"
  31#include "amdgpu_dm_irq.h"
  32
  33/**
  34 * DOC: overview
  35 *
  36 * DM provides another layer of IRQ management on top of what the base driver
  37 * already provides. This is something that could be cleaned up, and is a
  38 * future TODO item.
  39 *
  40 * The base driver provides IRQ source registration with DRM, handler
  41 * registration into the base driver's IRQ table, and a handler callback
  42 * amdgpu_irq_handler(), with which DRM calls on interrupts. This generic
  43 * handler looks up the IRQ table, and calls the respective
  44 * &amdgpu_irq_src_funcs.process hookups.
  45 *
  46 * What DM provides on top are two IRQ tables specifically for top-half and
  47 * bottom-half IRQ handling, with the bottom-half implementing workqueues:
  48 *
  49 * - &amdgpu_display_manager.irq_handler_list_high_tab
  50 * - &amdgpu_display_manager.irq_handler_list_low_tab
  51 *
  52 * They override the base driver's IRQ table, and the effect can be seen
  53 * in the hooks that DM provides for &amdgpu_irq_src_funcs.process. They
  54 * are all set to the DM generic handler amdgpu_dm_irq_handler(), which looks up
  55 * DM's IRQ tables. However, in order for base driver to recognize this hook, DM
  56 * still needs to register the IRQ with the base driver. See
  57 * dce110_register_irq_handlers() and dcn10_register_irq_handlers().
  58 *
  59 * To expose DC's hardware interrupt toggle to the base driver, DM implements
  60 * &amdgpu_irq_src_funcs.set hooks. Base driver calls it through
  61 * amdgpu_irq_update() to enable or disable the interrupt.
  62 */
  63
  64/******************************************************************************
  65 * Private declarations.
  66 *****************************************************************************/
  67
  68/**
  69 * struct amdgpu_dm_irq_handler_data - Data for DM interrupt handlers.
  70 *
  71 * @list: Linked list entry referencing the next/previous handler
  72 * @handler: Handler function
  73 * @handler_arg: Argument passed to the handler when triggered
  74 * @dm: DM which this handler belongs to
  75 * @irq_source: DC interrupt source that this handler is registered for
  76 */
  77struct amdgpu_dm_irq_handler_data {
  78        struct list_head list;
  79        interrupt_handler handler;
  80        void *handler_arg;
  81
  82        struct amdgpu_display_manager *dm;
  83        /* DAL irq source which registered for this interrupt. */
  84        enum dc_irq_source irq_source;
  85};
  86
  87#define DM_IRQ_TABLE_LOCK(adev, flags) \
  88        spin_lock_irqsave(&adev->dm.irq_handler_list_table_lock, flags)
  89
  90#define DM_IRQ_TABLE_UNLOCK(adev, flags) \
  91        spin_unlock_irqrestore(&adev->dm.irq_handler_list_table_lock, flags)
  92
  93/******************************************************************************
  94 * Private functions.
  95 *****************************************************************************/
  96
  97static void init_handler_common_data(struct amdgpu_dm_irq_handler_data *hcd,
  98                                     void (*ih)(void *),
  99                                     void *args,
 100                                     struct amdgpu_display_manager *dm)
 101{
 102        hcd->handler = ih;
 103        hcd->handler_arg = args;
 104        hcd->dm = dm;
 105}
 106
 107/**
 108 * dm_irq_work_func() - Handle an IRQ outside of the interrupt handler proper.
 109 *
 110 * @work: work struct
 111 */
 112static void dm_irq_work_func(struct work_struct *work)
 113{
 114        struct irq_list_head *irq_list_head =
 115                container_of(work, struct irq_list_head, work);
 116        struct list_head *handler_list = &irq_list_head->head;
 117        struct amdgpu_dm_irq_handler_data *handler_data;
 118
 119        list_for_each_entry(handler_data, handler_list, list) {
 120                DRM_DEBUG_KMS("DM_IRQ: work_func: for dal_src=%d\n",
 121                                handler_data->irq_source);
 122
 123                DRM_DEBUG_KMS("DM_IRQ: schedule_work: for dal_src=%d\n",
 124                        handler_data->irq_source);
 125
 126                handler_data->handler(handler_data->handler_arg);
 127        }
 128
 129        /* Call a DAL subcomponent which registered for interrupt notification
 130         * at INTERRUPT_LOW_IRQ_CONTEXT.
 131         * (The most common use is HPD interrupt) */
 132}
 133
 134/*
 135 * Remove a handler and return a pointer to handler list from which the
 136 * handler was removed.
 137 */
 138static struct list_head *remove_irq_handler(struct amdgpu_device *adev,
 139                                            void *ih,
 140                                            const struct dc_interrupt_params *int_params)
 141{
 142        struct list_head *hnd_list;
 143        struct list_head *entry, *tmp;
 144        struct amdgpu_dm_irq_handler_data *handler;
 145        unsigned long irq_table_flags;
 146        bool handler_removed = false;
 147        enum dc_irq_source irq_source;
 148
 149        DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
 150
 151        irq_source = int_params->irq_source;
 152
 153        switch (int_params->int_context) {
 154        case INTERRUPT_HIGH_IRQ_CONTEXT:
 155                hnd_list = &adev->dm.irq_handler_list_high_tab[irq_source];
 156                break;
 157        case INTERRUPT_LOW_IRQ_CONTEXT:
 158        default:
 159                hnd_list = &adev->dm.irq_handler_list_low_tab[irq_source].head;
 160                break;
 161        }
 162
 163        list_for_each_safe(entry, tmp, hnd_list) {
 164
 165                handler = list_entry(entry, struct amdgpu_dm_irq_handler_data,
 166                                     list);
 167
 168                if (ih == handler) {
 169                        /* Found our handler. Remove it from the list. */
 170                        list_del(&handler->list);
 171                        handler_removed = true;
 172                        break;
 173                }
 174        }
 175
 176        DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
 177
 178        if (handler_removed == false) {
 179                /* Not necessarily an error - caller may not
 180                 * know the context. */
 181                return NULL;
 182        }
 183
 184        kfree(handler);
 185
 186        DRM_DEBUG_KMS(
 187        "DM_IRQ: removed irq handler: %p for: dal_src=%d, irq context=%d\n",
 188                ih, int_params->irq_source, int_params->int_context);
 189
 190        return hnd_list;
 191}
 192
 193static bool
 194validate_irq_registration_params(struct dc_interrupt_params *int_params,
 195                                 void (*ih)(void *))
 196{
 197        if (NULL == int_params || NULL == ih) {
 198                DRM_ERROR("DM_IRQ: invalid input!\n");
 199                return false;
 200        }
 201
 202        if (int_params->int_context >= INTERRUPT_CONTEXT_NUMBER) {
 203                DRM_ERROR("DM_IRQ: invalid context: %d!\n",
 204                                int_params->int_context);
 205                return false;
 206        }
 207
 208        if (!DAL_VALID_IRQ_SRC_NUM(int_params->irq_source)) {
 209                DRM_ERROR("DM_IRQ: invalid irq_source: %d!\n",
 210                                int_params->irq_source);
 211                return false;
 212        }
 213
 214        return true;
 215}
 216
 217static bool validate_irq_unregistration_params(enum dc_irq_source irq_source,
 218                                               irq_handler_idx handler_idx)
 219{
 220        if (DAL_INVALID_IRQ_HANDLER_IDX == handler_idx) {
 221                DRM_ERROR("DM_IRQ: invalid handler_idx==NULL!\n");
 222                return false;
 223        }
 224
 225        if (!DAL_VALID_IRQ_SRC_NUM(irq_source)) {
 226                DRM_ERROR("DM_IRQ: invalid irq_source:%d!\n", irq_source);
 227                return false;
 228        }
 229
 230        return true;
 231}
 232/******************************************************************************
 233 * Public functions.
 234 *
 235 * Note: caller is responsible for input validation.
 236 *****************************************************************************/
 237
 238/**
 239 * amdgpu_dm_irq_register_interrupt() - Register a handler within DM.
 240 * @adev: The base driver device containing the DM device.
 241 * @int_params: Interrupt parameters containing the source, and handler context
 242 * @ih: Function pointer to the interrupt handler to register
 243 * @handler_args: Arguments passed to the handler when the interrupt occurs
 244 *
 245 * Register an interrupt handler for the given IRQ source, under the given
 246 * context. The context can either be high or low. High context handlers are
 247 * executed directly within ISR context, while low context is executed within a
 248 * workqueue, thereby allowing operations that sleep.
 249 *
 250 * Registered handlers are called in a FIFO manner, i.e. the most recently
 251 * registered handler will be called first.
 252 *
 253 * Return: Handler data &struct amdgpu_dm_irq_handler_data containing the IRQ
 254 *         source, handler function, and args
 255 */
 256void *amdgpu_dm_irq_register_interrupt(struct amdgpu_device *adev,
 257                                       struct dc_interrupt_params *int_params,
 258                                       void (*ih)(void *),
 259                                       void *handler_args)
 260{
 261        struct list_head *hnd_list;
 262        struct amdgpu_dm_irq_handler_data *handler_data;
 263        unsigned long irq_table_flags;
 264        enum dc_irq_source irq_source;
 265
 266        if (false == validate_irq_registration_params(int_params, ih))
 267                return DAL_INVALID_IRQ_HANDLER_IDX;
 268
 269        handler_data = kzalloc(sizeof(*handler_data), GFP_KERNEL);
 270        if (!handler_data) {
 271                DRM_ERROR("DM_IRQ: failed to allocate irq handler!\n");
 272                return DAL_INVALID_IRQ_HANDLER_IDX;
 273        }
 274
 275        init_handler_common_data(handler_data, ih, handler_args, &adev->dm);
 276
 277        irq_source = int_params->irq_source;
 278
 279        handler_data->irq_source = irq_source;
 280
 281        /* Lock the list, add the handler. */
 282        DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
 283
 284        switch (int_params->int_context) {
 285        case INTERRUPT_HIGH_IRQ_CONTEXT:
 286                hnd_list = &adev->dm.irq_handler_list_high_tab[irq_source];
 287                break;
 288        case INTERRUPT_LOW_IRQ_CONTEXT:
 289        default:
 290                hnd_list = &adev->dm.irq_handler_list_low_tab[irq_source].head;
 291                break;
 292        }
 293
 294        list_add_tail(&handler_data->list, hnd_list);
 295
 296        DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
 297
 298        /* This pointer will be stored by code which requested interrupt
 299         * registration.
 300         * The same pointer will be needed in order to unregister the
 301         * interrupt. */
 302
 303        DRM_DEBUG_KMS(
 304                "DM_IRQ: added irq handler: %p for: dal_src=%d, irq context=%d\n",
 305                handler_data,
 306                irq_source,
 307                int_params->int_context);
 308
 309        return handler_data;
 310}
 311
 312/**
 313 * amdgpu_dm_irq_unregister_interrupt() - Remove a handler from the DM IRQ table
 314 * @adev: The base driver device containing the DM device
 315 * @irq_source: IRQ source to remove the given handler from
 316 * @ih: Function pointer to the interrupt handler to unregister
 317 *
 318 * Go through both low and high context IRQ tables, and find the given handler
 319 * for the given irq source. If found, remove it. Otherwise, do nothing.
 320 */
 321void amdgpu_dm_irq_unregister_interrupt(struct amdgpu_device *adev,
 322                                        enum dc_irq_source irq_source,
 323                                        void *ih)
 324{
 325        struct list_head *handler_list;
 326        struct dc_interrupt_params int_params;
 327        int i;
 328
 329        if (false == validate_irq_unregistration_params(irq_source, ih))
 330                return;
 331
 332        memset(&int_params, 0, sizeof(int_params));
 333
 334        int_params.irq_source = irq_source;
 335
 336        for (i = 0; i < INTERRUPT_CONTEXT_NUMBER; i++) {
 337
 338                int_params.int_context = i;
 339
 340                handler_list = remove_irq_handler(adev, ih, &int_params);
 341
 342                if (handler_list != NULL)
 343                        break;
 344        }
 345
 346        if (handler_list == NULL) {
 347                /* If we got here, it means we searched all irq contexts
 348                 * for this irq source, but the handler was not found. */
 349                DRM_ERROR(
 350                "DM_IRQ: failed to find irq handler:%p for irq_source:%d!\n",
 351                        ih, irq_source);
 352        }
 353}
 354
 355/**
 356 * amdgpu_dm_irq_init() - Initialize DM IRQ management
 357 * @adev:  The base driver device containing the DM device
 358 *
 359 * Initialize DM's high and low context IRQ tables.
 360 *
 361 * The N by M table contains N IRQ sources, with M
 362 * &struct amdgpu_dm_irq_handler_data hooked together in a linked list. The
 363 * list_heads are initialized here. When an interrupt n is triggered, all m
 364 * handlers are called in sequence, FIFO according to registration order.
 365 *
 366 * The low context table requires special steps to initialize, since handlers
 367 * will be deferred to a workqueue. See &struct irq_list_head.
 368 */
 369int amdgpu_dm_irq_init(struct amdgpu_device *adev)
 370{
 371        int src;
 372        struct irq_list_head *lh;
 373
 374        DRM_DEBUG_KMS("DM_IRQ\n");
 375
 376        spin_lock_init(&adev->dm.irq_handler_list_table_lock);
 377
 378        for (src = 0; src < DAL_IRQ_SOURCES_NUMBER; src++) {
 379                /* low context handler list init */
 380                lh = &adev->dm.irq_handler_list_low_tab[src];
 381                INIT_LIST_HEAD(&lh->head);
 382                INIT_WORK(&lh->work, dm_irq_work_func);
 383
 384                /* high context handler init */
 385                INIT_LIST_HEAD(&adev->dm.irq_handler_list_high_tab[src]);
 386        }
 387
 388        return 0;
 389}
 390
 391/**
 392 * amdgpu_dm_irq_fini() - Tear down DM IRQ management
 393 * @adev: The base driver device containing the DM device
 394 *
 395 * Flush all work within the low context IRQ table.
 396 */
 397void amdgpu_dm_irq_fini(struct amdgpu_device *adev)
 398{
 399        int src;
 400        struct irq_list_head *lh;
 401        unsigned long irq_table_flags;
 402        DRM_DEBUG_KMS("DM_IRQ: releasing resources.\n");
 403        for (src = 0; src < DAL_IRQ_SOURCES_NUMBER; src++) {
 404                DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
 405                /* The handler was removed from the table,
 406                 * it means it is safe to flush all the 'work'
 407                 * (because no code can schedule a new one). */
 408                lh = &adev->dm.irq_handler_list_low_tab[src];
 409                DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
 410                flush_work(&lh->work);
 411        }
 412}
 413
 414int amdgpu_dm_irq_suspend(struct amdgpu_device *adev)
 415{
 416        int src;
 417        struct list_head *hnd_list_h;
 418        struct list_head *hnd_list_l;
 419        unsigned long irq_table_flags;
 420
 421        DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
 422
 423        DRM_DEBUG_KMS("DM_IRQ: suspend\n");
 424
 425        /**
 426         * Disable HW interrupt  for HPD and HPDRX only since FLIP and VBLANK
 427         * will be disabled from manage_dm_interrupts on disable CRTC.
 428         */
 429        for (src = DC_IRQ_SOURCE_HPD1; src <= DC_IRQ_SOURCE_HPD6RX; src++) {
 430                hnd_list_l = &adev->dm.irq_handler_list_low_tab[src].head;
 431                hnd_list_h = &adev->dm.irq_handler_list_high_tab[src];
 432                if (!list_empty(hnd_list_l) || !list_empty(hnd_list_h))
 433                        dc_interrupt_set(adev->dm.dc, src, false);
 434
 435                DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
 436                flush_work(&adev->dm.irq_handler_list_low_tab[src].work);
 437
 438                DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
 439        }
 440
 441        DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
 442        return 0;
 443}
 444
 445int amdgpu_dm_irq_resume_early(struct amdgpu_device *adev)
 446{
 447        int src;
 448        struct list_head *hnd_list_h, *hnd_list_l;
 449        unsigned long irq_table_flags;
 450
 451        DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
 452
 453        DRM_DEBUG_KMS("DM_IRQ: early resume\n");
 454
 455        /* re-enable short pulse interrupts HW interrupt */
 456        for (src = DC_IRQ_SOURCE_HPD1RX; src <= DC_IRQ_SOURCE_HPD6RX; src++) {
 457                hnd_list_l = &adev->dm.irq_handler_list_low_tab[src].head;
 458                hnd_list_h = &adev->dm.irq_handler_list_high_tab[src];
 459                if (!list_empty(hnd_list_l) || !list_empty(hnd_list_h))
 460                        dc_interrupt_set(adev->dm.dc, src, true);
 461        }
 462
 463        DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
 464
 465        return 0;
 466}
 467
 468int amdgpu_dm_irq_resume_late(struct amdgpu_device *adev)
 469{
 470        int src;
 471        struct list_head *hnd_list_h, *hnd_list_l;
 472        unsigned long irq_table_flags;
 473
 474        DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
 475
 476        DRM_DEBUG_KMS("DM_IRQ: resume\n");
 477
 478        /**
 479         * Renable HW interrupt  for HPD and only since FLIP and VBLANK
 480         * will be enabled from manage_dm_interrupts on enable CRTC.
 481         */
 482        for (src = DC_IRQ_SOURCE_HPD1; src <= DC_IRQ_SOURCE_HPD6; src++) {
 483                hnd_list_l = &adev->dm.irq_handler_list_low_tab[src].head;
 484                hnd_list_h = &adev->dm.irq_handler_list_high_tab[src];
 485                if (!list_empty(hnd_list_l) || !list_empty(hnd_list_h))
 486                        dc_interrupt_set(adev->dm.dc, src, true);
 487        }
 488
 489        DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
 490        return 0;
 491}
 492
 493/*
 494 * amdgpu_dm_irq_schedule_work - schedule all work items registered for the
 495 * "irq_source".
 496 */
 497static void amdgpu_dm_irq_schedule_work(struct amdgpu_device *adev,
 498                                        enum dc_irq_source irq_source)
 499{
 500        unsigned long irq_table_flags;
 501        struct work_struct *work = NULL;
 502
 503        DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
 504
 505        if (!list_empty(&adev->dm.irq_handler_list_low_tab[irq_source].head))
 506                work = &adev->dm.irq_handler_list_low_tab[irq_source].work;
 507
 508        DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
 509
 510        if (work) {
 511                if (!schedule_work(work))
 512                        DRM_INFO("amdgpu_dm_irq_schedule_work FAILED src %d\n",
 513                                                irq_source);
 514        }
 515
 516}
 517
 518/*
 519 * amdgpu_dm_irq_immediate_work
 520 * Callback high irq work immediately, don't send to work queue
 521 */
 522static void amdgpu_dm_irq_immediate_work(struct amdgpu_device *adev,
 523                                         enum dc_irq_source irq_source)
 524{
 525        struct amdgpu_dm_irq_handler_data *handler_data;
 526        unsigned long irq_table_flags;
 527
 528        DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
 529
 530        list_for_each_entry(handler_data,
 531                            &adev->dm.irq_handler_list_high_tab[irq_source],
 532                            list) {
 533                /* Call a subcomponent which registered for immediate
 534                 * interrupt notification */
 535                handler_data->handler(handler_data->handler_arg);
 536        }
 537
 538        DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
 539}
 540
 541/**
 542 * amdgpu_dm_irq_handler - Generic DM IRQ handler
 543 * @adev: amdgpu base driver device containing the DM device
 544 * @source: Unused
 545 * @entry: Data about the triggered interrupt
 546 *
 547 * Calls all registered high irq work immediately, and schedules work for low
 548 * irq. The DM IRQ table is used to find the corresponding handlers.
 549 */
 550static int amdgpu_dm_irq_handler(struct amdgpu_device *adev,
 551                                 struct amdgpu_irq_src *source,
 552                                 struct amdgpu_iv_entry *entry)
 553{
 554
 555        enum dc_irq_source src =
 556                dc_interrupt_to_irq_source(
 557                        adev->dm.dc,
 558                        entry->src_id,
 559                        entry->src_data[0]);
 560
 561        dc_interrupt_ack(adev->dm.dc, src);
 562
 563        /* Call high irq work immediately */
 564        amdgpu_dm_irq_immediate_work(adev, src);
 565        /*Schedule low_irq work */
 566        amdgpu_dm_irq_schedule_work(adev, src);
 567
 568        return 0;
 569}
 570
 571static enum dc_irq_source amdgpu_dm_hpd_to_dal_irq_source(unsigned type)
 572{
 573        switch (type) {
 574        case AMDGPU_HPD_1:
 575                return DC_IRQ_SOURCE_HPD1;
 576        case AMDGPU_HPD_2:
 577                return DC_IRQ_SOURCE_HPD2;
 578        case AMDGPU_HPD_3:
 579                return DC_IRQ_SOURCE_HPD3;
 580        case AMDGPU_HPD_4:
 581                return DC_IRQ_SOURCE_HPD4;
 582        case AMDGPU_HPD_5:
 583                return DC_IRQ_SOURCE_HPD5;
 584        case AMDGPU_HPD_6:
 585                return DC_IRQ_SOURCE_HPD6;
 586        default:
 587                return DC_IRQ_SOURCE_INVALID;
 588        }
 589}
 590
 591static int amdgpu_dm_set_hpd_irq_state(struct amdgpu_device *adev,
 592                                       struct amdgpu_irq_src *source,
 593                                       unsigned type,
 594                                       enum amdgpu_interrupt_state state)
 595{
 596        enum dc_irq_source src = amdgpu_dm_hpd_to_dal_irq_source(type);
 597        bool st = (state == AMDGPU_IRQ_STATE_ENABLE);
 598
 599        dc_interrupt_set(adev->dm.dc, src, st);
 600        return 0;
 601}
 602
 603static inline int dm_irq_state(struct amdgpu_device *adev,
 604                               struct amdgpu_irq_src *source,
 605                               unsigned crtc_id,
 606                               enum amdgpu_interrupt_state state,
 607                               const enum irq_type dal_irq_type,
 608                               const char *func)
 609{
 610        bool st;
 611        enum dc_irq_source irq_source;
 612
 613        struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc_id];
 614
 615        if (!acrtc) {
 616                DRM_ERROR(
 617                        "%s: crtc is NULL at id :%d\n",
 618                        func,
 619                        crtc_id);
 620                return 0;
 621        }
 622
 623        if (acrtc->otg_inst == -1)
 624                return 0;
 625
 626        irq_source = dal_irq_type + acrtc->otg_inst;
 627
 628        st = (state == AMDGPU_IRQ_STATE_ENABLE);
 629
 630        dc_interrupt_set(adev->dm.dc, irq_source, st);
 631        return 0;
 632}
 633
 634static int amdgpu_dm_set_pflip_irq_state(struct amdgpu_device *adev,
 635                                         struct amdgpu_irq_src *source,
 636                                         unsigned crtc_id,
 637                                         enum amdgpu_interrupt_state state)
 638{
 639        return dm_irq_state(
 640                adev,
 641                source,
 642                crtc_id,
 643                state,
 644                IRQ_TYPE_PFLIP,
 645                __func__);
 646}
 647
 648static int amdgpu_dm_set_crtc_irq_state(struct amdgpu_device *adev,
 649                                        struct amdgpu_irq_src *source,
 650                                        unsigned crtc_id,
 651                                        enum amdgpu_interrupt_state state)
 652{
 653        return dm_irq_state(
 654                adev,
 655                source,
 656                crtc_id,
 657                state,
 658                IRQ_TYPE_VBLANK,
 659                __func__);
 660}
 661
 662static int amdgpu_dm_set_vupdate_irq_state(struct amdgpu_device *adev,
 663                                           struct amdgpu_irq_src *source,
 664                                           unsigned int crtc_id,
 665                                           enum amdgpu_interrupt_state state)
 666{
 667        return dm_irq_state(
 668                adev,
 669                source,
 670                crtc_id,
 671                state,
 672                IRQ_TYPE_VUPDATE,
 673                __func__);
 674}
 675
 676static const struct amdgpu_irq_src_funcs dm_crtc_irq_funcs = {
 677        .set = amdgpu_dm_set_crtc_irq_state,
 678        .process = amdgpu_dm_irq_handler,
 679};
 680
 681static const struct amdgpu_irq_src_funcs dm_vupdate_irq_funcs = {
 682        .set = amdgpu_dm_set_vupdate_irq_state,
 683        .process = amdgpu_dm_irq_handler,
 684};
 685
 686static const struct amdgpu_irq_src_funcs dm_pageflip_irq_funcs = {
 687        .set = amdgpu_dm_set_pflip_irq_state,
 688        .process = amdgpu_dm_irq_handler,
 689};
 690
 691static const struct amdgpu_irq_src_funcs dm_hpd_irq_funcs = {
 692        .set = amdgpu_dm_set_hpd_irq_state,
 693        .process = amdgpu_dm_irq_handler,
 694};
 695
 696void amdgpu_dm_set_irq_funcs(struct amdgpu_device *adev)
 697{
 698
 699        adev->crtc_irq.num_types = adev->mode_info.num_crtc;
 700        adev->crtc_irq.funcs = &dm_crtc_irq_funcs;
 701
 702        adev->vupdate_irq.num_types = adev->mode_info.num_crtc;
 703        adev->vupdate_irq.funcs = &dm_vupdate_irq_funcs;
 704
 705        adev->pageflip_irq.num_types = adev->mode_info.num_crtc;
 706        adev->pageflip_irq.funcs = &dm_pageflip_irq_funcs;
 707
 708        adev->hpd_irq.num_types = adev->mode_info.num_hpd;
 709        adev->hpd_irq.funcs = &dm_hpd_irq_funcs;
 710}
 711
 712/**
 713 * amdgpu_dm_hpd_init - hpd setup callback.
 714 *
 715 * @adev: amdgpu_device pointer
 716 *
 717 * Setup the hpd pins used by the card (evergreen+).
 718 * Enable the pin, set the polarity, and enable the hpd interrupts.
 719 */
 720void amdgpu_dm_hpd_init(struct amdgpu_device *adev)
 721{
 722        struct drm_device *dev = adev->ddev;
 723        struct drm_connector *connector;
 724        struct drm_connector_list_iter iter;
 725
 726        drm_connector_list_iter_begin(dev, &iter);
 727        drm_for_each_connector_iter(connector, &iter) {
 728                struct amdgpu_dm_connector *amdgpu_dm_connector =
 729                                to_amdgpu_dm_connector(connector);
 730
 731                const struct dc_link *dc_link = amdgpu_dm_connector->dc_link;
 732
 733                if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
 734                        dc_interrupt_set(adev->dm.dc,
 735                                        dc_link->irq_source_hpd,
 736                                        true);
 737                }
 738
 739                if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
 740                        dc_interrupt_set(adev->dm.dc,
 741                                        dc_link->irq_source_hpd_rx,
 742                                        true);
 743                }
 744        }
 745        drm_connector_list_iter_end(&iter);
 746}
 747
 748/**
 749 * amdgpu_dm_hpd_fini - hpd tear down callback.
 750 *
 751 * @adev: amdgpu_device pointer
 752 *
 753 * Tear down the hpd pins used by the card (evergreen+).
 754 * Disable the hpd interrupts.
 755 */
 756void amdgpu_dm_hpd_fini(struct amdgpu_device *adev)
 757{
 758        struct drm_device *dev = adev->ddev;
 759        struct drm_connector *connector;
 760        struct drm_connector_list_iter iter;
 761
 762        drm_connector_list_iter_begin(dev, &iter);
 763        drm_for_each_connector_iter(connector, &iter) {
 764                struct amdgpu_dm_connector *amdgpu_dm_connector =
 765                                to_amdgpu_dm_connector(connector);
 766                const struct dc_link *dc_link = amdgpu_dm_connector->dc_link;
 767
 768                dc_interrupt_set(adev->dm.dc, dc_link->irq_source_hpd, false);
 769
 770                if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
 771                        dc_interrupt_set(adev->dm.dc,
 772                                        dc_link->irq_source_hpd_rx,
 773                                        false);
 774                }
 775        }
 776        drm_connector_list_iter_end(&iter);
 777}
 778