linux/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
<<
>>
Prefs
   1/*
   2 * Copyright 2008 Advanced Micro Devices, Inc.
   3 * Copyright 2008 Red Hat Inc.
   4 * Copyright 2009 Jerome Glisse.
   5 *
   6 * Permission is hereby granted, free of charge, to any person obtaining a
   7 * copy of this software and associated documentation files (the "Software"),
   8 * to deal in the Software without restriction, including without limitation
   9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  10 * and/or sell copies of the Software, and to permit persons to whom the
  11 * Software is furnished to do so, subject to the following conditions:
  12 *
  13 * The above copyright notice and this permission notice shall be included in
  14 * all copies or substantial portions of the Software.
  15 *
  16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  22 * OTHER DEALINGS IN THE SOFTWARE.
  23 *
  24 * Authors: Dave Airlie
  25 *          Alex Deucher
  26 *          Jerome Glisse
  27 */
  28
  29/**
  30 * DOC: Interrupt Handling
  31 *
  32 * Interrupts generated within GPU hardware raise interrupt requests that are
  33 * passed to amdgpu IRQ handler which is responsible for detecting source and
  34 * type of the interrupt and dispatching matching handlers. If handling an
  35 * interrupt requires calling kernel functions that may sleep processing is
  36 * dispatched to work handlers.
  37 *
  38 * If MSI functionality is not disabled by module parameter then MSI
  39 * support will be enabled.
  40 *
  41 * For GPU interrupt sources that may be driven by another driver, IRQ domain
  42 * support is used (with mapping between virtual and hardware IRQs).
  43 */
  44
  45#include <linux/irq.h>
  46#include <linux/pci.h>
  47
  48#include <drm/drm_crtc_helper.h>
  49#include <drm/drm_irq.h>
  50#include <drm/drm_vblank.h>
  51#include <drm/amdgpu_drm.h>
  52#include "amdgpu.h"
  53#include "amdgpu_ih.h"
  54#include "atom.h"
  55#include "amdgpu_connectors.h"
  56#include "amdgpu_trace.h"
  57#include "amdgpu_amdkfd.h"
  58
  59#include <linux/pm_runtime.h>
  60
  61#ifdef CONFIG_DRM_AMD_DC
  62#include "amdgpu_dm_irq.h"
  63#endif
  64
  65#define AMDGPU_WAIT_IDLE_TIMEOUT 200
  66
  67/**
  68 * amdgpu_hotplug_work_func - work handler for display hotplug event
  69 *
  70 * @work: work struct pointer
  71 *
  72 * This is the hotplug event work handler (all ASICs).
  73 * The work gets scheduled from the IRQ handler if there
  74 * was a hotplug interrupt.  It walks through the connector table
  75 * and calls hotplug handler for each connector. After this, it sends
  76 * a DRM hotplug event to alert userspace.
  77 *
  78 * This design approach is required in order to defer hotplug event handling
  79 * from the IRQ handler to a work handler because hotplug handler has to use
  80 * mutexes which cannot be locked in an IRQ handler (since &mutex_lock may
  81 * sleep).
  82 */
  83static void amdgpu_hotplug_work_func(struct work_struct *work)
  84{
  85        struct amdgpu_device *adev = container_of(work, struct amdgpu_device,
  86                                                  hotplug_work);
  87        struct drm_device *dev = adev->ddev;
  88        struct drm_mode_config *mode_config = &dev->mode_config;
  89        struct drm_connector *connector;
  90
  91        mutex_lock(&mode_config->mutex);
  92        list_for_each_entry(connector, &mode_config->connector_list, head)
  93                amdgpu_connector_hotplug(connector);
  94        mutex_unlock(&mode_config->mutex);
  95        /* Just fire off a uevent and let userspace tell us what to do */
  96        drm_helper_hpd_irq_event(dev);
  97}
  98
  99/**
 100 * amdgpu_irq_disable_all - disable *all* interrupts
 101 *
 102 * @adev: amdgpu device pointer
 103 *
 104 * Disable all types of interrupts from all sources.
 105 */
 106void amdgpu_irq_disable_all(struct amdgpu_device *adev)
 107{
 108        unsigned long irqflags;
 109        unsigned i, j, k;
 110        int r;
 111
 112        spin_lock_irqsave(&adev->irq.lock, irqflags);
 113        for (i = 0; i < AMDGPU_IRQ_CLIENTID_MAX; ++i) {
 114                if (!adev->irq.client[i].sources)
 115                        continue;
 116
 117                for (j = 0; j < AMDGPU_MAX_IRQ_SRC_ID; ++j) {
 118                        struct amdgpu_irq_src *src = adev->irq.client[i].sources[j];
 119
 120                        if (!src || !src->funcs->set || !src->num_types)
 121                                continue;
 122
 123                        for (k = 0; k < src->num_types; ++k) {
 124                                atomic_set(&src->enabled_types[k], 0);
 125                                r = src->funcs->set(adev, src, k,
 126                                                    AMDGPU_IRQ_STATE_DISABLE);
 127                                if (r)
 128                                        DRM_ERROR("error disabling interrupt (%d)\n",
 129                                                  r);
 130                        }
 131                }
 132        }
 133        spin_unlock_irqrestore(&adev->irq.lock, irqflags);
 134}
 135
 136/**
 137 * amdgpu_irq_handler - IRQ handler
 138 *
 139 * @irq: IRQ number (unused)
 140 * @arg: pointer to DRM device
 141 *
 142 * IRQ handler for amdgpu driver (all ASICs).
 143 *
 144 * Returns:
 145 * result of handling the IRQ, as defined by &irqreturn_t
 146 */
 147irqreturn_t amdgpu_irq_handler(int irq, void *arg)
 148{
 149        struct drm_device *dev = (struct drm_device *) arg;
 150        struct amdgpu_device *adev = dev->dev_private;
 151        irqreturn_t ret;
 152
 153        ret = amdgpu_ih_process(adev, &adev->irq.ih);
 154        if (ret == IRQ_HANDLED)
 155                pm_runtime_mark_last_busy(dev->dev);
 156        return ret;
 157}
 158
 159/**
 160 * amdgpu_irq_handle_ih1 - kick of processing for IH1
 161 *
 162 * @work: work structure in struct amdgpu_irq
 163 *
 164 * Kick of processing IH ring 1.
 165 */
 166static void amdgpu_irq_handle_ih1(struct work_struct *work)
 167{
 168        struct amdgpu_device *adev = container_of(work, struct amdgpu_device,
 169                                                  irq.ih1_work);
 170
 171        amdgpu_ih_process(adev, &adev->irq.ih1);
 172}
 173
 174/**
 175 * amdgpu_irq_handle_ih2 - kick of processing for IH2
 176 *
 177 * @work: work structure in struct amdgpu_irq
 178 *
 179 * Kick of processing IH ring 2.
 180 */
 181static void amdgpu_irq_handle_ih2(struct work_struct *work)
 182{
 183        struct amdgpu_device *adev = container_of(work, struct amdgpu_device,
 184                                                  irq.ih2_work);
 185
 186        amdgpu_ih_process(adev, &adev->irq.ih2);
 187}
 188
 189/**
 190 * amdgpu_msi_ok - check whether MSI functionality is enabled
 191 *
 192 * @adev: amdgpu device pointer (unused)
 193 *
 194 * Checks whether MSI functionality has been disabled via module parameter
 195 * (all ASICs).
 196 *
 197 * Returns:
 198 * *true* if MSIs are allowed to be enabled or *false* otherwise
 199 */
 200static bool amdgpu_msi_ok(struct amdgpu_device *adev)
 201{
 202        if (amdgpu_msi == 1)
 203                return true;
 204        else if (amdgpu_msi == 0)
 205                return false;
 206
 207        return true;
 208}
 209
 210/**
 211 * amdgpu_irq_init - initialize interrupt handling
 212 *
 213 * @adev: amdgpu device pointer
 214 *
 215 * Sets up work functions for hotplug and reset interrupts, enables MSI
 216 * functionality, initializes vblank, hotplug and reset interrupt handling.
 217 *
 218 * Returns:
 219 * 0 on success or error code on failure
 220 */
 221int amdgpu_irq_init(struct amdgpu_device *adev)
 222{
 223        int r = 0;
 224
 225        spin_lock_init(&adev->irq.lock);
 226
 227        /* Enable MSI if not disabled by module parameter */
 228        adev->irq.msi_enabled = false;
 229
 230        if (amdgpu_msi_ok(adev)) {
 231                int ret = pci_enable_msi(adev->pdev);
 232                if (!ret) {
 233                        adev->irq.msi_enabled = true;
 234                        dev_dbg(adev->dev, "amdgpu: using MSI.\n");
 235                }
 236        }
 237
 238        if (!amdgpu_device_has_dc_support(adev)) {
 239                if (!adev->enable_virtual_display)
 240                        /* Disable vblank IRQs aggressively for power-saving */
 241                        /* XXX: can this be enabled for DC? */
 242                        adev->ddev->vblank_disable_immediate = true;
 243
 244                r = drm_vblank_init(adev->ddev, adev->mode_info.num_crtc);
 245                if (r)
 246                        return r;
 247
 248                /* Pre-DCE11 */
 249                INIT_WORK(&adev->hotplug_work,
 250                                amdgpu_hotplug_work_func);
 251        }
 252
 253        INIT_WORK(&adev->irq.ih1_work, amdgpu_irq_handle_ih1);
 254        INIT_WORK(&adev->irq.ih2_work, amdgpu_irq_handle_ih2);
 255
 256        adev->irq.installed = true;
 257        r = drm_irq_install(adev->ddev, adev->ddev->pdev->irq);
 258        if (r) {
 259                adev->irq.installed = false;
 260                if (!amdgpu_device_has_dc_support(adev))
 261                        flush_work(&adev->hotplug_work);
 262                return r;
 263        }
 264        adev->ddev->max_vblank_count = 0x00ffffff;
 265
 266        DRM_DEBUG("amdgpu: irq initialized.\n");
 267        return 0;
 268}
 269
 270/**
 271 * amdgpu_irq_fini - shut down interrupt handling
 272 *
 273 * @adev: amdgpu device pointer
 274 *
 275 * Tears down work functions for hotplug and reset interrupts, disables MSI
 276 * functionality, shuts down vblank, hotplug and reset interrupt handling,
 277 * turns off interrupts from all sources (all ASICs).
 278 */
 279void amdgpu_irq_fini(struct amdgpu_device *adev)
 280{
 281        unsigned i, j;
 282
 283        if (adev->irq.installed) {
 284                drm_irq_uninstall(adev->ddev);
 285                adev->irq.installed = false;
 286                if (adev->irq.msi_enabled)
 287                        pci_disable_msi(adev->pdev);
 288                if (!amdgpu_device_has_dc_support(adev))
 289                        flush_work(&adev->hotplug_work);
 290        }
 291
 292        for (i = 0; i < AMDGPU_IRQ_CLIENTID_MAX; ++i) {
 293                if (!adev->irq.client[i].sources)
 294                        continue;
 295
 296                for (j = 0; j < AMDGPU_MAX_IRQ_SRC_ID; ++j) {
 297                        struct amdgpu_irq_src *src = adev->irq.client[i].sources[j];
 298
 299                        if (!src)
 300                                continue;
 301
 302                        kfree(src->enabled_types);
 303                        src->enabled_types = NULL;
 304                        if (src->data) {
 305                                kfree(src->data);
 306                                kfree(src);
 307                                adev->irq.client[i].sources[j] = NULL;
 308                        }
 309                }
 310                kfree(adev->irq.client[i].sources);
 311                adev->irq.client[i].sources = NULL;
 312        }
 313}
 314
 315/**
 316 * amdgpu_irq_add_id - register IRQ source
 317 *
 318 * @adev: amdgpu device pointer
 319 * @client_id: client id
 320 * @src_id: source id
 321 * @source: IRQ source pointer
 322 *
 323 * Registers IRQ source on a client.
 324 *
 325 * Returns:
 326 * 0 on success or error code otherwise
 327 */
 328int amdgpu_irq_add_id(struct amdgpu_device *adev,
 329                      unsigned client_id, unsigned src_id,
 330                      struct amdgpu_irq_src *source)
 331{
 332        if (client_id >= AMDGPU_IRQ_CLIENTID_MAX)
 333                return -EINVAL;
 334
 335        if (src_id >= AMDGPU_MAX_IRQ_SRC_ID)
 336                return -EINVAL;
 337
 338        if (!source->funcs)
 339                return -EINVAL;
 340
 341        if (!adev->irq.client[client_id].sources) {
 342                adev->irq.client[client_id].sources =
 343                        kcalloc(AMDGPU_MAX_IRQ_SRC_ID,
 344                                sizeof(struct amdgpu_irq_src *),
 345                                GFP_KERNEL);
 346                if (!adev->irq.client[client_id].sources)
 347                        return -ENOMEM;
 348        }
 349
 350        if (adev->irq.client[client_id].sources[src_id] != NULL)
 351                return -EINVAL;
 352
 353        if (source->num_types && !source->enabled_types) {
 354                atomic_t *types;
 355
 356                types = kcalloc(source->num_types, sizeof(atomic_t),
 357                                GFP_KERNEL);
 358                if (!types)
 359                        return -ENOMEM;
 360
 361                source->enabled_types = types;
 362        }
 363
 364        adev->irq.client[client_id].sources[src_id] = source;
 365        return 0;
 366}
 367
 368/**
 369 * amdgpu_irq_dispatch - dispatch IRQ to IP blocks
 370 *
 371 * @adev: amdgpu device pointer
 372 * @entry: interrupt vector pointer
 373 *
 374 * Dispatches IRQ to IP blocks.
 375 */
 376void amdgpu_irq_dispatch(struct amdgpu_device *adev,
 377                         struct amdgpu_ih_ring *ih)
 378{
 379        u32 ring_index = ih->rptr >> 2;
 380        struct amdgpu_iv_entry entry;
 381        unsigned client_id, src_id;
 382        struct amdgpu_irq_src *src;
 383        bool handled = false;
 384        int r;
 385
 386        entry.iv_entry = (const uint32_t *)&ih->ring[ring_index];
 387        amdgpu_ih_decode_iv(adev, &entry);
 388
 389        trace_amdgpu_iv(ih - &adev->irq.ih, &entry);
 390
 391        client_id = entry.client_id;
 392        src_id = entry.src_id;
 393
 394        if (client_id >= AMDGPU_IRQ_CLIENTID_MAX) {
 395                DRM_DEBUG("Invalid client_id in IV: %d\n", client_id);
 396
 397        } else  if (src_id >= AMDGPU_MAX_IRQ_SRC_ID) {
 398                DRM_DEBUG("Invalid src_id in IV: %d\n", src_id);
 399
 400        } else if (adev->irq.virq[src_id]) {
 401                generic_handle_irq(irq_find_mapping(adev->irq.domain, src_id));
 402
 403        } else if (!adev->irq.client[client_id].sources) {
 404                DRM_DEBUG("Unregistered interrupt client_id: %d src_id: %d\n",
 405                          client_id, src_id);
 406
 407        } else if ((src = adev->irq.client[client_id].sources[src_id])) {
 408                r = src->funcs->process(adev, src, &entry);
 409                if (r < 0)
 410                        DRM_ERROR("error processing interrupt (%d)\n", r);
 411                else if (r)
 412                        handled = true;
 413
 414        } else {
 415                DRM_DEBUG("Unhandled interrupt src_id: %d\n", src_id);
 416        }
 417
 418        /* Send it to amdkfd as well if it isn't already handled */
 419        if (!handled)
 420                amdgpu_amdkfd_interrupt(adev, entry.iv_entry);
 421}
 422
 423/**
 424 * amdgpu_irq_update - update hardware interrupt state
 425 *
 426 * @adev: amdgpu device pointer
 427 * @src: interrupt source pointer
 428 * @type: type of interrupt
 429 *
 430 * Updates interrupt state for the specific source (all ASICs).
 431 */
 432int amdgpu_irq_update(struct amdgpu_device *adev,
 433                             struct amdgpu_irq_src *src, unsigned type)
 434{
 435        unsigned long irqflags;
 436        enum amdgpu_interrupt_state state;
 437        int r;
 438
 439        spin_lock_irqsave(&adev->irq.lock, irqflags);
 440
 441        /* We need to determine after taking the lock, otherwise
 442           we might disable just enabled interrupts again */
 443        if (amdgpu_irq_enabled(adev, src, type))
 444                state = AMDGPU_IRQ_STATE_ENABLE;
 445        else
 446                state = AMDGPU_IRQ_STATE_DISABLE;
 447
 448        r = src->funcs->set(adev, src, type, state);
 449        spin_unlock_irqrestore(&adev->irq.lock, irqflags);
 450        return r;
 451}
 452
 453/**
 454 * amdgpu_irq_gpu_reset_resume_helper - update interrupt states on all sources
 455 *
 456 * @adev: amdgpu device pointer
 457 *
 458 * Updates state of all types of interrupts on all sources on resume after
 459 * reset.
 460 */
 461void amdgpu_irq_gpu_reset_resume_helper(struct amdgpu_device *adev)
 462{
 463        int i, j, k;
 464
 465        for (i = 0; i < AMDGPU_IRQ_CLIENTID_MAX; ++i) {
 466                if (!adev->irq.client[i].sources)
 467                        continue;
 468
 469                for (j = 0; j < AMDGPU_MAX_IRQ_SRC_ID; ++j) {
 470                        struct amdgpu_irq_src *src = adev->irq.client[i].sources[j];
 471
 472                        if (!src)
 473                                continue;
 474                        for (k = 0; k < src->num_types; k++)
 475                                amdgpu_irq_update(adev, src, k);
 476                }
 477        }
 478}
 479
 480/**
 481 * amdgpu_irq_get - enable interrupt
 482 *
 483 * @adev: amdgpu device pointer
 484 * @src: interrupt source pointer
 485 * @type: type of interrupt
 486 *
 487 * Enables specified type of interrupt on the specified source (all ASICs).
 488 *
 489 * Returns:
 490 * 0 on success or error code otherwise
 491 */
 492int amdgpu_irq_get(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
 493                   unsigned type)
 494{
 495        if (!adev->ddev->irq_enabled)
 496                return -ENOENT;
 497
 498        if (type >= src->num_types)
 499                return -EINVAL;
 500
 501        if (!src->enabled_types || !src->funcs->set)
 502                return -EINVAL;
 503
 504        if (atomic_inc_return(&src->enabled_types[type]) == 1)
 505                return amdgpu_irq_update(adev, src, type);
 506
 507        return 0;
 508}
 509
 510/**
 511 * amdgpu_irq_put - disable interrupt
 512 *
 513 * @adev: amdgpu device pointer
 514 * @src: interrupt source pointer
 515 * @type: type of interrupt
 516 *
 517 * Enables specified type of interrupt on the specified source (all ASICs).
 518 *
 519 * Returns:
 520 * 0 on success or error code otherwise
 521 */
 522int amdgpu_irq_put(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
 523                   unsigned type)
 524{
 525        if (!adev->ddev->irq_enabled)
 526                return -ENOENT;
 527
 528        if (type >= src->num_types)
 529                return -EINVAL;
 530
 531        if (!src->enabled_types || !src->funcs->set)
 532                return -EINVAL;
 533
 534        if (atomic_dec_and_test(&src->enabled_types[type]))
 535                return amdgpu_irq_update(adev, src, type);
 536
 537        return 0;
 538}
 539
 540/**
 541 * amdgpu_irq_enabled - check whether interrupt is enabled or not
 542 *
 543 * @adev: amdgpu device pointer
 544 * @src: interrupt source pointer
 545 * @type: type of interrupt
 546 *
 547 * Checks whether the given type of interrupt is enabled on the given source.
 548 *
 549 * Returns:
 550 * *true* if interrupt is enabled, *false* if interrupt is disabled or on
 551 * invalid parameters
 552 */
 553bool amdgpu_irq_enabled(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
 554                        unsigned type)
 555{
 556        if (!adev->ddev->irq_enabled)
 557                return false;
 558
 559        if (type >= src->num_types)
 560                return false;
 561
 562        if (!src->enabled_types || !src->funcs->set)
 563                return false;
 564
 565        return !!atomic_read(&src->enabled_types[type]);
 566}
 567
 568/* XXX: Generic IRQ handling */
 569static void amdgpu_irq_mask(struct irq_data *irqd)
 570{
 571        /* XXX */
 572}
 573
 574static void amdgpu_irq_unmask(struct irq_data *irqd)
 575{
 576        /* XXX */
 577}
 578
 579/* amdgpu hardware interrupt chip descriptor */
 580static struct irq_chip amdgpu_irq_chip = {
 581        .name = "amdgpu-ih",
 582        .irq_mask = amdgpu_irq_mask,
 583        .irq_unmask = amdgpu_irq_unmask,
 584};
 585
 586/**
 587 * amdgpu_irqdomain_map - create mapping between virtual and hardware IRQ numbers
 588 *
 589 * @d: amdgpu IRQ domain pointer (unused)
 590 * @irq: virtual IRQ number
 591 * @hwirq: hardware irq number
 592 *
 593 * Current implementation assigns simple interrupt handler to the given virtual
 594 * IRQ.
 595 *
 596 * Returns:
 597 * 0 on success or error code otherwise
 598 */
 599static int amdgpu_irqdomain_map(struct irq_domain *d,
 600                                unsigned int irq, irq_hw_number_t hwirq)
 601{
 602        if (hwirq >= AMDGPU_MAX_IRQ_SRC_ID)
 603                return -EPERM;
 604
 605        irq_set_chip_and_handler(irq,
 606                                 &amdgpu_irq_chip, handle_simple_irq);
 607        return 0;
 608}
 609
 610/* Implementation of methods for amdgpu IRQ domain */
 611static const struct irq_domain_ops amdgpu_hw_irqdomain_ops = {
 612        .map = amdgpu_irqdomain_map,
 613};
 614
 615/**
 616 * amdgpu_irq_add_domain - create a linear IRQ domain
 617 *
 618 * @adev: amdgpu device pointer
 619 *
 620 * Creates an IRQ domain for GPU interrupt sources
 621 * that may be driven by another driver (e.g., ACP).
 622 *
 623 * Returns:
 624 * 0 on success or error code otherwise
 625 */
 626int amdgpu_irq_add_domain(struct amdgpu_device *adev)
 627{
 628        adev->irq.domain = irq_domain_add_linear(NULL, AMDGPU_MAX_IRQ_SRC_ID,
 629                                                 &amdgpu_hw_irqdomain_ops, adev);
 630        if (!adev->irq.domain) {
 631                DRM_ERROR("GPU irq add domain failed\n");
 632                return -ENODEV;
 633        }
 634
 635        return 0;
 636}
 637
 638/**
 639 * amdgpu_irq_remove_domain - remove the IRQ domain
 640 *
 641 * @adev: amdgpu device pointer
 642 *
 643 * Removes the IRQ domain for GPU interrupt sources
 644 * that may be driven by another driver (e.g., ACP).
 645 */
 646void amdgpu_irq_remove_domain(struct amdgpu_device *adev)
 647{
 648        if (adev->irq.domain) {
 649                irq_domain_remove(adev->irq.domain);
 650                adev->irq.domain = NULL;
 651        }
 652}
 653
 654/**
 655 * amdgpu_irq_create_mapping - create mapping between domain Linux IRQs
 656 *
 657 * @adev: amdgpu device pointer
 658 * @src_id: IH source id
 659 *
 660 * Creates mapping between a domain IRQ (GPU IH src id) and a Linux IRQ
 661 * Use this for components that generate a GPU interrupt, but are driven
 662 * by a different driver (e.g., ACP).
 663 *
 664 * Returns:
 665 * Linux IRQ
 666 */
 667unsigned amdgpu_irq_create_mapping(struct amdgpu_device *adev, unsigned src_id)
 668{
 669        adev->irq.virq[src_id] = irq_create_mapping(adev->irq.domain, src_id);
 670
 671        return adev->irq.virq[src_id];
 672}
 673