linux/drivers/gpu/drm/i915/i915_driver.c
<<
>>
Prefs
   1/* i915_drv.c -- i830,i845,i855,i865,i915 driver -*- linux-c -*-
   2 */
   3/*
   4 *
   5 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
   6 * All Rights Reserved.
   7 *
   8 * Permission is hereby granted, free of charge, to any person obtaining a
   9 * copy of this software and associated documentation files (the
  10 * "Software"), to deal in the Software without restriction, including
  11 * without limitation the rights to use, copy, modify, merge, publish,
  12 * distribute, sub license, and/or sell copies of the Software, and to
  13 * permit persons to whom the Software is furnished to do so, subject to
  14 * the following conditions:
  15 *
  16 * The above copyright notice and this permission notice (including the
  17 * next paragraph) shall be included in all copies or substantial portions
  18 * of the Software.
  19 *
  20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
  21 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  22 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
  23 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
  24 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
  25 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
  26 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
  27 *
  28 */
  29
  30#include <linux/acpi.h>
  31#include <linux/device.h>
  32#include <linux/module.h>
  33#include <linux/oom.h>
  34#include <linux/pci.h>
  35#include <linux/pm.h>
  36#include <linux/pm_runtime.h>
  37#include <linux/pnp.h>
  38#include <linux/slab.h>
  39#include <linux/vga_switcheroo.h>
  40#include <linux/vt.h>
  41
  42#include <drm/drm_aperture.h>
  43#include <drm/drm_atomic_helper.h>
  44#include <drm/drm_ioctl.h>
  45#include <drm/drm_managed.h>
  46#include <drm/drm_probe_helper.h>
  47
  48#include "display/intel_acpi.h"
  49#include "display/intel_bw.h"
  50#include "display/intel_cdclk.h"
  51#include "display/intel_display_types.h"
  52#include "display/intel_dmc.h"
  53#include "display/intel_dp.h"
  54#include "display/intel_dpt.h"
  55#include "display/intel_fbdev.h"
  56#include "display/intel_hotplug.h"
  57#include "display/intel_overlay.h"
  58#include "display/intel_pch_refclk.h"
  59#include "display/intel_pipe_crc.h"
  60#include "display/intel_pps.h"
  61#include "display/intel_sprite.h"
  62#include "display/intel_vga.h"
  63
  64#include "gem/i915_gem_context.h"
  65#include "gem/i915_gem_create.h"
  66#include "gem/i915_gem_dmabuf.h"
  67#include "gem/i915_gem_ioctls.h"
  68#include "gem/i915_gem_mman.h"
  69#include "gem/i915_gem_pm.h"
  70#include "gt/intel_gt.h"
  71#include "gt/intel_gt_pm.h"
  72#include "gt/intel_rc6.h"
  73
  74#include "pxp/intel_pxp_pm.h"
  75
  76#include "i915_file_private.h"
  77#include "i915_debugfs.h"
  78#include "i915_driver.h"
  79#include "i915_drv.h"
  80#include "i915_getparam.h"
  81#include "i915_ioc32.h"
  82#include "i915_ioctl.h"
  83#include "i915_irq.h"
  84#include "i915_memcpy.h"
  85#include "i915_perf.h"
  86#include "i915_query.h"
  87#include "i915_suspend.h"
  88#include "i915_switcheroo.h"
  89#include "i915_sysfs.h"
  90#include "i915_vgpu.h"
  91#include "intel_dram.h"
  92#include "intel_gvt.h"
  93#include "intel_memory_region.h"
  94#include "intel_pci_config.h"
  95#include "intel_pcode.h"
  96#include "intel_pm.h"
  97#include "intel_region_ttm.h"
  98#include "vlv_suspend.h"
  99
 100static const struct drm_driver i915_drm_driver;
 101
 102static int i915_get_bridge_dev(struct drm_i915_private *dev_priv)
 103{
 104        int domain = pci_domain_nr(to_pci_dev(dev_priv->drm.dev)->bus);
 105
 106        dev_priv->bridge_dev =
 107                pci_get_domain_bus_and_slot(domain, 0, PCI_DEVFN(0, 0));
 108        if (!dev_priv->bridge_dev) {
 109                drm_err(&dev_priv->drm, "bridge device not found\n");
 110                return -EIO;
 111        }
 112        return 0;
 113}
 114
 115/* Allocate space for the MCH regs if needed, return nonzero on error */
 116static int
 117intel_alloc_mchbar_resource(struct drm_i915_private *dev_priv)
 118{
 119        int reg = GRAPHICS_VER(dev_priv) >= 4 ? MCHBAR_I965 : MCHBAR_I915;
 120        u32 temp_lo, temp_hi = 0;
 121        u64 mchbar_addr;
 122        int ret;
 123
 124        if (GRAPHICS_VER(dev_priv) >= 4)
 125                pci_read_config_dword(dev_priv->bridge_dev, reg + 4, &temp_hi);
 126        pci_read_config_dword(dev_priv->bridge_dev, reg, &temp_lo);
 127        mchbar_addr = ((u64)temp_hi << 32) | temp_lo;
 128
 129        /* If ACPI doesn't have it, assume we need to allocate it ourselves */
 130#ifdef CONFIG_PNP
 131        if (mchbar_addr &&
 132            pnp_range_reserved(mchbar_addr, mchbar_addr + MCHBAR_SIZE))
 133                return 0;
 134#endif
 135
 136        /* Get some space for it */
 137        dev_priv->mch_res.name = "i915 MCHBAR";
 138        dev_priv->mch_res.flags = IORESOURCE_MEM;
 139        ret = pci_bus_alloc_resource(dev_priv->bridge_dev->bus,
 140                                     &dev_priv->mch_res,
 141                                     MCHBAR_SIZE, MCHBAR_SIZE,
 142                                     PCIBIOS_MIN_MEM,
 143                                     0, pcibios_align_resource,
 144                                     dev_priv->bridge_dev);
 145        if (ret) {
 146                drm_dbg(&dev_priv->drm, "failed bus alloc: %d\n", ret);
 147                dev_priv->mch_res.start = 0;
 148                return ret;
 149        }
 150
 151        if (GRAPHICS_VER(dev_priv) >= 4)
 152                pci_write_config_dword(dev_priv->bridge_dev, reg + 4,
 153                                       upper_32_bits(dev_priv->mch_res.start));
 154
 155        pci_write_config_dword(dev_priv->bridge_dev, reg,
 156                               lower_32_bits(dev_priv->mch_res.start));
 157        return 0;
 158}
 159
 160/* Setup MCHBAR if possible, return true if we should disable it again */
 161static void
 162intel_setup_mchbar(struct drm_i915_private *dev_priv)
 163{
 164        int mchbar_reg = GRAPHICS_VER(dev_priv) >= 4 ? MCHBAR_I965 : MCHBAR_I915;
 165        u32 temp;
 166        bool enabled;
 167
 168        if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
 169                return;
 170
 171        dev_priv->mchbar_need_disable = false;
 172
 173        if (IS_I915G(dev_priv) || IS_I915GM(dev_priv)) {
 174                pci_read_config_dword(dev_priv->bridge_dev, DEVEN, &temp);
 175                enabled = !!(temp & DEVEN_MCHBAR_EN);
 176        } else {
 177                pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
 178                enabled = temp & 1;
 179        }
 180
 181        /* If it's already enabled, don't have to do anything */
 182        if (enabled)
 183                return;
 184
 185        if (intel_alloc_mchbar_resource(dev_priv))
 186                return;
 187
 188        dev_priv->mchbar_need_disable = true;
 189
 190        /* Space is allocated or reserved, so enable it. */
 191        if (IS_I915G(dev_priv) || IS_I915GM(dev_priv)) {
 192                pci_write_config_dword(dev_priv->bridge_dev, DEVEN,
 193                                       temp | DEVEN_MCHBAR_EN);
 194        } else {
 195                pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
 196                pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp | 1);
 197        }
 198}
 199
 200static void
 201intel_teardown_mchbar(struct drm_i915_private *dev_priv)
 202{
 203        int mchbar_reg = GRAPHICS_VER(dev_priv) >= 4 ? MCHBAR_I965 : MCHBAR_I915;
 204
 205        if (dev_priv->mchbar_need_disable) {
 206                if (IS_I915G(dev_priv) || IS_I915GM(dev_priv)) {
 207                        u32 deven_val;
 208
 209                        pci_read_config_dword(dev_priv->bridge_dev, DEVEN,
 210                                              &deven_val);
 211                        deven_val &= ~DEVEN_MCHBAR_EN;
 212                        pci_write_config_dword(dev_priv->bridge_dev, DEVEN,
 213                                               deven_val);
 214                } else {
 215                        u32 mchbar_val;
 216
 217                        pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg,
 218                                              &mchbar_val);
 219                        mchbar_val &= ~1;
 220                        pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg,
 221                                               mchbar_val);
 222                }
 223        }
 224
 225        if (dev_priv->mch_res.start)
 226                release_resource(&dev_priv->mch_res);
 227}
 228
 229static int i915_workqueues_init(struct drm_i915_private *dev_priv)
 230{
 231        /*
 232         * The i915 workqueue is primarily used for batched retirement of
 233         * requests (and thus managing bo) once the task has been completed
 234         * by the GPU. i915_retire_requests() is called directly when we
 235         * need high-priority retirement, such as waiting for an explicit
 236         * bo.
 237         *
 238         * It is also used for periodic low-priority events, such as
 239         * idle-timers and recording error state.
 240         *
 241         * All tasks on the workqueue are expected to acquire the dev mutex
 242         * so there is no point in running more than one instance of the
 243         * workqueue at any time.  Use an ordered one.
 244         */
 245        dev_priv->wq = alloc_ordered_workqueue("i915", 0);
 246        if (dev_priv->wq == NULL)
 247                goto out_err;
 248
 249        dev_priv->hotplug.dp_wq = alloc_ordered_workqueue("i915-dp", 0);
 250        if (dev_priv->hotplug.dp_wq == NULL)
 251                goto out_free_wq;
 252
 253        return 0;
 254
 255out_free_wq:
 256        destroy_workqueue(dev_priv->wq);
 257out_err:
 258        drm_err(&dev_priv->drm, "Failed to allocate workqueues.\n");
 259
 260        return -ENOMEM;
 261}
 262
 263static void i915_workqueues_cleanup(struct drm_i915_private *dev_priv)
 264{
 265        destroy_workqueue(dev_priv->hotplug.dp_wq);
 266        destroy_workqueue(dev_priv->wq);
 267}
 268
 269/*
 270 * We don't keep the workarounds for pre-production hardware, so we expect our
 271 * driver to fail on these machines in one way or another. A little warning on
 272 * dmesg may help both the user and the bug triagers.
 273 *
 274 * Our policy for removing pre-production workarounds is to keep the
 275 * current gen workarounds as a guide to the bring-up of the next gen
 276 * (workarounds have a habit of persisting!). Anything older than that
 277 * should be removed along with the complications they introduce.
 278 */
 279static void intel_detect_preproduction_hw(struct drm_i915_private *dev_priv)
 280{
 281        bool pre = false;
 282
 283        pre |= IS_HSW_EARLY_SDV(dev_priv);
 284        pre |= IS_SKYLAKE(dev_priv) && INTEL_REVID(dev_priv) < 0x6;
 285        pre |= IS_BROXTON(dev_priv) && INTEL_REVID(dev_priv) < 0xA;
 286        pre |= IS_KABYLAKE(dev_priv) && INTEL_REVID(dev_priv) < 0x1;
 287        pre |= IS_GEMINILAKE(dev_priv) && INTEL_REVID(dev_priv) < 0x3;
 288        pre |= IS_ICELAKE(dev_priv) && INTEL_REVID(dev_priv) < 0x7;
 289
 290        if (pre) {
 291                drm_err(&dev_priv->drm, "This is a pre-production stepping. "
 292                          "It may not be fully functional.\n");
 293                add_taint(TAINT_MACHINE_CHECK, LOCKDEP_STILL_OK);
 294        }
 295}
 296
 297static void sanitize_gpu(struct drm_i915_private *i915)
 298{
 299        if (!INTEL_INFO(i915)->gpu_reset_clobbers_display)
 300                __intel_gt_reset(to_gt(i915), ALL_ENGINES);
 301}
 302
 303/**
 304 * i915_driver_early_probe - setup state not requiring device access
 305 * @dev_priv: device private
 306 *
 307 * Initialize everything that is a "SW-only" state, that is state not
 308 * requiring accessing the device or exposing the driver via kernel internal
 309 * or userspace interfaces. Example steps belonging here: lock initialization,
 310 * system memory allocation, setting up device specific attributes and
 311 * function hooks not requiring accessing the device.
 312 */
 313static int i915_driver_early_probe(struct drm_i915_private *dev_priv)
 314{
 315        int ret = 0;
 316
 317        if (i915_inject_probe_failure(dev_priv))
 318                return -ENODEV;
 319
 320        intel_device_info_subplatform_init(dev_priv);
 321        intel_step_init(dev_priv);
 322
 323        intel_gt_init_early(to_gt(dev_priv), dev_priv);
 324        intel_uncore_mmio_debug_init_early(&dev_priv->mmio_debug);
 325        intel_uncore_init_early(&dev_priv->uncore, to_gt(dev_priv));
 326
 327        spin_lock_init(&dev_priv->irq_lock);
 328        spin_lock_init(&dev_priv->gpu_error.lock);
 329        mutex_init(&dev_priv->backlight_lock);
 330
 331        mutex_init(&dev_priv->sb_lock);
 332        cpu_latency_qos_add_request(&dev_priv->sb_qos, PM_QOS_DEFAULT_VALUE);
 333
 334        mutex_init(&dev_priv->audio.mutex);
 335        mutex_init(&dev_priv->wm.wm_mutex);
 336        mutex_init(&dev_priv->pps_mutex);
 337        mutex_init(&dev_priv->hdcp_comp_mutex);
 338
 339        i915_memcpy_init_early(dev_priv);
 340        intel_runtime_pm_init_early(&dev_priv->runtime_pm);
 341
 342        ret = i915_workqueues_init(dev_priv);
 343        if (ret < 0)
 344                return ret;
 345
 346        ret = vlv_suspend_init(dev_priv);
 347        if (ret < 0)
 348                goto err_workqueues;
 349
 350        ret = intel_region_ttm_device_init(dev_priv);
 351        if (ret)
 352                goto err_ttm;
 353
 354        intel_wopcm_init_early(&dev_priv->wopcm);
 355
 356        __intel_gt_init_early(to_gt(dev_priv), dev_priv);
 357
 358        i915_gem_init_early(dev_priv);
 359
 360        /* This must be called before any calls to HAS_PCH_* */
 361        intel_detect_pch(dev_priv);
 362
 363        intel_pm_setup(dev_priv);
 364        ret = intel_power_domains_init(dev_priv);
 365        if (ret < 0)
 366                goto err_gem;
 367        intel_irq_init(dev_priv);
 368        intel_init_display_hooks(dev_priv);
 369        intel_init_clock_gating_hooks(dev_priv);
 370
 371        intel_detect_preproduction_hw(dev_priv);
 372
 373        return 0;
 374
 375err_gem:
 376        i915_gem_cleanup_early(dev_priv);
 377        intel_gt_driver_late_release(to_gt(dev_priv));
 378        intel_region_ttm_device_fini(dev_priv);
 379err_ttm:
 380        vlv_suspend_cleanup(dev_priv);
 381err_workqueues:
 382        i915_workqueues_cleanup(dev_priv);
 383        return ret;
 384}
 385
 386/**
 387 * i915_driver_late_release - cleanup the setup done in
 388 *                             i915_driver_early_probe()
 389 * @dev_priv: device private
 390 */
 391static void i915_driver_late_release(struct drm_i915_private *dev_priv)
 392{
 393        intel_irq_fini(dev_priv);
 394        intel_power_domains_cleanup(dev_priv);
 395        i915_gem_cleanup_early(dev_priv);
 396        intel_gt_driver_late_release(to_gt(dev_priv));
 397        intel_region_ttm_device_fini(dev_priv);
 398        vlv_suspend_cleanup(dev_priv);
 399        i915_workqueues_cleanup(dev_priv);
 400
 401        cpu_latency_qos_remove_request(&dev_priv->sb_qos);
 402        mutex_destroy(&dev_priv->sb_lock);
 403
 404        i915_params_free(&dev_priv->params);
 405}
 406
 407/**
 408 * i915_driver_mmio_probe - setup device MMIO
 409 * @dev_priv: device private
 410 *
 411 * Setup minimal device state necessary for MMIO accesses later in the
 412 * initialization sequence. The setup here should avoid any other device-wide
 413 * side effects or exposing the driver via kernel internal or user space
 414 * interfaces.
 415 */
 416static int i915_driver_mmio_probe(struct drm_i915_private *dev_priv)
 417{
 418        int ret;
 419
 420        if (i915_inject_probe_failure(dev_priv))
 421                return -ENODEV;
 422
 423        ret = i915_get_bridge_dev(dev_priv);
 424        if (ret < 0)
 425                return ret;
 426
 427        ret = intel_uncore_setup_mmio(&dev_priv->uncore);
 428        if (ret < 0)
 429                goto err_bridge;
 430
 431        ret = intel_uncore_init_mmio(&dev_priv->uncore);
 432        if (ret)
 433                goto err_mmio;
 434
 435        /* Try to make sure MCHBAR is enabled before poking at it */
 436        intel_setup_mchbar(dev_priv);
 437        intel_device_info_runtime_init(dev_priv);
 438
 439        ret = intel_gt_init_mmio(to_gt(dev_priv));
 440        if (ret)
 441                goto err_uncore;
 442
 443        /* As early as possible, scrub existing GPU state before clobbering */
 444        sanitize_gpu(dev_priv);
 445
 446        return 0;
 447
 448err_uncore:
 449        intel_teardown_mchbar(dev_priv);
 450        intel_uncore_fini_mmio(&dev_priv->uncore);
 451err_mmio:
 452        intel_uncore_cleanup_mmio(&dev_priv->uncore);
 453err_bridge:
 454        pci_dev_put(dev_priv->bridge_dev);
 455
 456        return ret;
 457}
 458
 459/**
 460 * i915_driver_mmio_release - cleanup the setup done in i915_driver_mmio_probe()
 461 * @dev_priv: device private
 462 */
 463static void i915_driver_mmio_release(struct drm_i915_private *dev_priv)
 464{
 465        intel_teardown_mchbar(dev_priv);
 466        intel_uncore_fini_mmio(&dev_priv->uncore);
 467        intel_uncore_cleanup_mmio(&dev_priv->uncore);
 468        pci_dev_put(dev_priv->bridge_dev);
 469}
 470
 471static void intel_sanitize_options(struct drm_i915_private *dev_priv)
 472{
 473        intel_gvt_sanitize_options(dev_priv);
 474}
 475
 476/**
 477 * i915_set_dma_info - set all relevant PCI dma info as configured for the
 478 * platform
 479 * @i915: valid i915 instance
 480 *
 481 * Set the dma max segment size, device and coherent masks.  The dma mask set
 482 * needs to occur before i915_ggtt_probe_hw.
 483 *
 484 * A couple of platforms have special needs.  Address them as well.
 485 *
 486 */
 487static int i915_set_dma_info(struct drm_i915_private *i915)
 488{
 489        unsigned int mask_size = INTEL_INFO(i915)->dma_mask_size;
 490        int ret;
 491
 492        GEM_BUG_ON(!mask_size);
 493
 494        /*
 495         * We don't have a max segment size, so set it to the max so sg's
 496         * debugging layer doesn't complain
 497         */
 498        dma_set_max_seg_size(i915->drm.dev, UINT_MAX);
 499
 500        ret = dma_set_mask(i915->drm.dev, DMA_BIT_MASK(mask_size));
 501        if (ret)
 502                goto mask_err;
 503
 504        /* overlay on gen2 is broken and can't address above 1G */
 505        if (GRAPHICS_VER(i915) == 2)
 506                mask_size = 30;
 507
 508        /*
 509         * 965GM sometimes incorrectly writes to hardware status page (HWS)
 510         * using 32bit addressing, overwriting memory if HWS is located
 511         * above 4GB.
 512         *
 513         * The documentation also mentions an issue with undefined
 514         * behaviour if any general state is accessed within a page above 4GB,
 515         * which also needs to be handled carefully.
 516         */
 517        if (IS_I965G(i915) || IS_I965GM(i915))
 518                mask_size = 32;
 519
 520        ret = dma_set_coherent_mask(i915->drm.dev, DMA_BIT_MASK(mask_size));
 521        if (ret)
 522                goto mask_err;
 523
 524        return 0;
 525
 526mask_err:
 527        drm_err(&i915->drm, "Can't set DMA mask/consistent mask (%d)\n", ret);
 528        return ret;
 529}
 530
 531/**
 532 * i915_driver_hw_probe - setup state requiring device access
 533 * @dev_priv: device private
 534 *
 535 * Setup state that requires accessing the device, but doesn't require
 536 * exposing the driver via kernel internal or userspace interfaces.
 537 */
 538static int i915_driver_hw_probe(struct drm_i915_private *dev_priv)
 539{
 540        struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
 541        int ret;
 542
 543        if (i915_inject_probe_failure(dev_priv))
 544                return -ENODEV;
 545
 546        if (HAS_PPGTT(dev_priv)) {
 547                if (intel_vgpu_active(dev_priv) &&
 548                    !intel_vgpu_has_full_ppgtt(dev_priv)) {
 549                        i915_report_error(dev_priv,
 550                                          "incompatible vGPU found, support for isolated ppGTT required\n");
 551                        return -ENXIO;
 552                }
 553        }
 554
 555        if (HAS_EXECLISTS(dev_priv)) {
 556                /*
 557                 * Older GVT emulation depends upon intercepting CSB mmio,
 558                 * which we no longer use, preferring to use the HWSP cache
 559                 * instead.
 560                 */
 561                if (intel_vgpu_active(dev_priv) &&
 562                    !intel_vgpu_has_hwsp_emulation(dev_priv)) {
 563                        i915_report_error(dev_priv,
 564                                          "old vGPU host found, support for HWSP emulation required\n");
 565                        return -ENXIO;
 566                }
 567        }
 568
 569        intel_sanitize_options(dev_priv);
 570
 571        /* needs to be done before ggtt probe */
 572        intel_dram_edram_detect(dev_priv);
 573
 574        ret = i915_set_dma_info(dev_priv);
 575        if (ret)
 576                return ret;
 577
 578        i915_perf_init(dev_priv);
 579
 580        ret = intel_gt_assign_ggtt(to_gt(dev_priv));
 581        if (ret)
 582                goto err_perf;
 583
 584        ret = i915_ggtt_probe_hw(dev_priv);
 585        if (ret)
 586                goto err_perf;
 587
 588        ret = drm_aperture_remove_conflicting_pci_framebuffers(pdev, dev_priv->drm.driver);
 589        if (ret)
 590                goto err_ggtt;
 591
 592        ret = i915_ggtt_init_hw(dev_priv);
 593        if (ret)
 594                goto err_ggtt;
 595
 596        ret = intel_memory_regions_hw_probe(dev_priv);
 597        if (ret)
 598                goto err_ggtt;
 599
 600        ret = intel_gt_probe_lmem(to_gt(dev_priv));
 601        if (ret)
 602                goto err_mem_regions;
 603
 604        ret = i915_ggtt_enable_hw(dev_priv);
 605        if (ret) {
 606                drm_err(&dev_priv->drm, "failed to enable GGTT\n");
 607                goto err_mem_regions;
 608        }
 609
 610        pci_set_master(pdev);
 611
 612        /* On the 945G/GM, the chipset reports the MSI capability on the
 613         * integrated graphics even though the support isn't actually there
 614         * according to the published specs.  It doesn't appear to function
 615         * correctly in testing on 945G.
 616         * This may be a side effect of MSI having been made available for PEG
 617         * and the registers being closely associated.
 618         *
 619         * According to chipset errata, on the 965GM, MSI interrupts may
 620         * be lost or delayed, and was defeatured. MSI interrupts seem to
 621         * get lost on g4x as well, and interrupt delivery seems to stay
 622         * properly dead afterwards. So we'll just disable them for all
 623         * pre-gen5 chipsets.
 624         *
 625         * dp aux and gmbus irq on gen4 seems to be able to generate legacy
 626         * interrupts even when in MSI mode. This results in spurious
 627         * interrupt warnings if the legacy irq no. is shared with another
 628         * device. The kernel then disables that interrupt source and so
 629         * prevents the other device from working properly.
 630         */
 631        if (GRAPHICS_VER(dev_priv) >= 5) {
 632                if (pci_enable_msi(pdev) < 0)
 633                        drm_dbg(&dev_priv->drm, "can't enable MSI");
 634        }
 635
 636        ret = intel_gvt_init(dev_priv);
 637        if (ret)
 638                goto err_msi;
 639
 640        intel_opregion_setup(dev_priv);
 641
 642        ret = intel_pcode_init(dev_priv);
 643        if (ret)
 644                goto err_msi;
 645
 646        /*
 647         * Fill the dram structure to get the system dram info. This will be
 648         * used for memory latency calculation.
 649         */
 650        intel_dram_detect(dev_priv);
 651
 652        intel_bw_init_hw(dev_priv);
 653
 654        return 0;
 655
 656err_msi:
 657        if (pdev->msi_enabled)
 658                pci_disable_msi(pdev);
 659err_mem_regions:
 660        intel_memory_regions_driver_release(dev_priv);
 661err_ggtt:
 662        i915_ggtt_driver_release(dev_priv);
 663        i915_gem_drain_freed_objects(dev_priv);
 664        i915_ggtt_driver_late_release(dev_priv);
 665err_perf:
 666        i915_perf_fini(dev_priv);
 667        return ret;
 668}
 669
 670/**
 671 * i915_driver_hw_remove - cleanup the setup done in i915_driver_hw_probe()
 672 * @dev_priv: device private
 673 */
 674static void i915_driver_hw_remove(struct drm_i915_private *dev_priv)
 675{
 676        struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
 677
 678        i915_perf_fini(dev_priv);
 679
 680        if (pdev->msi_enabled)
 681                pci_disable_msi(pdev);
 682}
 683
 684/**
 685 * i915_driver_register - register the driver with the rest of the system
 686 * @dev_priv: device private
 687 *
 688 * Perform any steps necessary to make the driver available via kernel
 689 * internal or userspace interfaces.
 690 */
 691static void i915_driver_register(struct drm_i915_private *dev_priv)
 692{
 693        struct drm_device *dev = &dev_priv->drm;
 694
 695        i915_gem_driver_register(dev_priv);
 696        i915_pmu_register(dev_priv);
 697
 698        intel_vgpu_register(dev_priv);
 699
 700        /* Reveal our presence to userspace */
 701        if (drm_dev_register(dev, 0)) {
 702                drm_err(&dev_priv->drm,
 703                        "Failed to register driver for userspace access!\n");
 704                return;
 705        }
 706
 707        i915_debugfs_register(dev_priv);
 708        i915_setup_sysfs(dev_priv);
 709
 710        /* Depends on sysfs having been initialized */
 711        i915_perf_register(dev_priv);
 712
 713        intel_gt_driver_register(to_gt(dev_priv));
 714
 715        intel_display_driver_register(dev_priv);
 716
 717        intel_power_domains_enable(dev_priv);
 718        intel_runtime_pm_enable(&dev_priv->runtime_pm);
 719
 720        intel_register_dsm_handler();
 721
 722        if (i915_switcheroo_register(dev_priv))
 723                drm_err(&dev_priv->drm, "Failed to register vga switcheroo!\n");
 724}
 725
 726/**
 727 * i915_driver_unregister - cleanup the registration done in i915_driver_regiser()
 728 * @dev_priv: device private
 729 */
 730static void i915_driver_unregister(struct drm_i915_private *dev_priv)
 731{
 732        i915_switcheroo_unregister(dev_priv);
 733
 734        intel_unregister_dsm_handler();
 735
 736        intel_runtime_pm_disable(&dev_priv->runtime_pm);
 737        intel_power_domains_disable(dev_priv);
 738
 739        intel_display_driver_unregister(dev_priv);
 740
 741        intel_gt_driver_unregister(to_gt(dev_priv));
 742
 743        i915_perf_unregister(dev_priv);
 744        i915_pmu_unregister(dev_priv);
 745
 746        i915_teardown_sysfs(dev_priv);
 747        drm_dev_unplug(&dev_priv->drm);
 748
 749        i915_gem_driver_unregister(dev_priv);
 750}
 751
 752void
 753i915_print_iommu_status(struct drm_i915_private *i915, struct drm_printer *p)
 754{
 755        drm_printf(p, "iommu: %s\n", enableddisabled(intel_vtd_active(i915)));
 756}
 757
 758static void i915_welcome_messages(struct drm_i915_private *dev_priv)
 759{
 760        if (drm_debug_enabled(DRM_UT_DRIVER)) {
 761                struct drm_printer p = drm_debug_printer("i915 device info:");
 762
 763                drm_printf(&p, "pciid=0x%04x rev=0x%02x platform=%s (subplatform=0x%x) gen=%i\n",
 764                           INTEL_DEVID(dev_priv),
 765                           INTEL_REVID(dev_priv),
 766                           intel_platform_name(INTEL_INFO(dev_priv)->platform),
 767                           intel_subplatform(RUNTIME_INFO(dev_priv),
 768                                             INTEL_INFO(dev_priv)->platform),
 769                           GRAPHICS_VER(dev_priv));
 770
 771                intel_device_info_print_static(INTEL_INFO(dev_priv), &p);
 772                intel_device_info_print_runtime(RUNTIME_INFO(dev_priv), &p);
 773                i915_print_iommu_status(dev_priv, &p);
 774                intel_gt_info_print(&to_gt(dev_priv)->info, &p);
 775        }
 776
 777        if (IS_ENABLED(CONFIG_DRM_I915_DEBUG))
 778                drm_info(&dev_priv->drm, "DRM_I915_DEBUG enabled\n");
 779        if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
 780                drm_info(&dev_priv->drm, "DRM_I915_DEBUG_GEM enabled\n");
 781        if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM))
 782                drm_info(&dev_priv->drm,
 783                         "DRM_I915_DEBUG_RUNTIME_PM enabled\n");
 784}
 785
 786static struct drm_i915_private *
 787i915_driver_create(struct pci_dev *pdev, const struct pci_device_id *ent)
 788{
 789        const struct intel_device_info *match_info =
 790                (struct intel_device_info *)ent->driver_data;
 791        struct intel_device_info *device_info;
 792        struct drm_i915_private *i915;
 793
 794        i915 = devm_drm_dev_alloc(&pdev->dev, &i915_drm_driver,
 795                                  struct drm_i915_private, drm);
 796        if (IS_ERR(i915))
 797                return i915;
 798
 799        pci_set_drvdata(pdev, i915);
 800
 801        /* Device parameters start as a copy of module parameters. */
 802        i915_params_copy(&i915->params, &i915_modparams);
 803
 804        /* Setup the write-once "constant" device info */
 805        device_info = mkwrite_device_info(i915);
 806        memcpy(device_info, match_info, sizeof(*device_info));
 807        RUNTIME_INFO(i915)->device_id = pdev->device;
 808
 809        return i915;
 810}
 811
 812/**
 813 * i915_driver_probe - setup chip and create an initial config
 814 * @pdev: PCI device
 815 * @ent: matching PCI ID entry
 816 *
 817 * The driver probe routine has to do several things:
 818 *   - drive output discovery via intel_modeset_init()
 819 *   - initialize the memory manager
 820 *   - allocate initial config memory
 821 *   - setup the DRM framebuffer with the allocated memory
 822 */
 823int i915_driver_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 824{
 825        const struct intel_device_info *match_info =
 826                (struct intel_device_info *)ent->driver_data;
 827        struct drm_i915_private *i915;
 828        int ret;
 829
 830        i915 = i915_driver_create(pdev, ent);
 831        if (IS_ERR(i915))
 832                return PTR_ERR(i915);
 833
 834        /* Disable nuclear pageflip by default on pre-ILK */
 835        if (!i915->params.nuclear_pageflip && match_info->graphics.ver < 5)
 836                i915->drm.driver_features &= ~DRIVER_ATOMIC;
 837
 838        ret = pci_enable_device(pdev);
 839        if (ret)
 840                goto out_fini;
 841
 842        ret = i915_driver_early_probe(i915);
 843        if (ret < 0)
 844                goto out_pci_disable;
 845
 846        disable_rpm_wakeref_asserts(&i915->runtime_pm);
 847
 848        intel_vgpu_detect(i915);
 849
 850        ret = i915_driver_mmio_probe(i915);
 851        if (ret < 0)
 852                goto out_runtime_pm_put;
 853
 854        ret = i915_driver_hw_probe(i915);
 855        if (ret < 0)
 856                goto out_cleanup_mmio;
 857
 858        ret = intel_modeset_init_noirq(i915);
 859        if (ret < 0)
 860                goto out_cleanup_hw;
 861
 862        ret = intel_irq_install(i915);
 863        if (ret)
 864                goto out_cleanup_modeset;
 865
 866        ret = intel_modeset_init_nogem(i915);
 867        if (ret)
 868                goto out_cleanup_irq;
 869
 870        ret = i915_gem_init(i915);
 871        if (ret)
 872                goto out_cleanup_modeset2;
 873
 874        ret = intel_modeset_init(i915);
 875        if (ret)
 876                goto out_cleanup_gem;
 877
 878        i915_driver_register(i915);
 879
 880        enable_rpm_wakeref_asserts(&i915->runtime_pm);
 881
 882        i915_welcome_messages(i915);
 883
 884        i915->do_release = true;
 885
 886        return 0;
 887
 888out_cleanup_gem:
 889        i915_gem_suspend(i915);
 890        i915_gem_driver_remove(i915);
 891        i915_gem_driver_release(i915);
 892out_cleanup_modeset2:
 893        /* FIXME clean up the error path */
 894        intel_modeset_driver_remove(i915);
 895        intel_irq_uninstall(i915);
 896        intel_modeset_driver_remove_noirq(i915);
 897        goto out_cleanup_modeset;
 898out_cleanup_irq:
 899        intel_irq_uninstall(i915);
 900out_cleanup_modeset:
 901        intel_modeset_driver_remove_nogem(i915);
 902out_cleanup_hw:
 903        i915_driver_hw_remove(i915);
 904        intel_memory_regions_driver_release(i915);
 905        i915_ggtt_driver_release(i915);
 906        i915_gem_drain_freed_objects(i915);
 907        i915_ggtt_driver_late_release(i915);
 908out_cleanup_mmio:
 909        i915_driver_mmio_release(i915);
 910out_runtime_pm_put:
 911        enable_rpm_wakeref_asserts(&i915->runtime_pm);
 912        i915_driver_late_release(i915);
 913out_pci_disable:
 914        pci_disable_device(pdev);
 915out_fini:
 916        i915_probe_error(i915, "Device initialization failed (%d)\n", ret);
 917        return ret;
 918}
 919
 920void i915_driver_remove(struct drm_i915_private *i915)
 921{
 922        disable_rpm_wakeref_asserts(&i915->runtime_pm);
 923
 924        i915_driver_unregister(i915);
 925
 926        /* Flush any external code that still may be under the RCU lock */
 927        synchronize_rcu();
 928
 929        i915_gem_suspend(i915);
 930
 931        intel_gvt_driver_remove(i915);
 932
 933        intel_modeset_driver_remove(i915);
 934
 935        intel_irq_uninstall(i915);
 936
 937        intel_modeset_driver_remove_noirq(i915);
 938
 939        i915_reset_error_state(i915);
 940        i915_gem_driver_remove(i915);
 941
 942        intel_modeset_driver_remove_nogem(i915);
 943
 944        i915_driver_hw_remove(i915);
 945
 946        enable_rpm_wakeref_asserts(&i915->runtime_pm);
 947}
 948
 949static void i915_driver_release(struct drm_device *dev)
 950{
 951        struct drm_i915_private *dev_priv = to_i915(dev);
 952        struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
 953
 954        if (!dev_priv->do_release)
 955                return;
 956
 957        disable_rpm_wakeref_asserts(rpm);
 958
 959        i915_gem_driver_release(dev_priv);
 960
 961        intel_memory_regions_driver_release(dev_priv);
 962        i915_ggtt_driver_release(dev_priv);
 963        i915_gem_drain_freed_objects(dev_priv);
 964        i915_ggtt_driver_late_release(dev_priv);
 965
 966        i915_driver_mmio_release(dev_priv);
 967
 968        enable_rpm_wakeref_asserts(rpm);
 969        intel_runtime_pm_driver_release(rpm);
 970
 971        i915_driver_late_release(dev_priv);
 972}
 973
 974static int i915_driver_open(struct drm_device *dev, struct drm_file *file)
 975{
 976        struct drm_i915_private *i915 = to_i915(dev);
 977        int ret;
 978
 979        ret = i915_gem_open(i915, file);
 980        if (ret)
 981                return ret;
 982
 983        return 0;
 984}
 985
 986/**
 987 * i915_driver_lastclose - clean up after all DRM clients have exited
 988 * @dev: DRM device
 989 *
 990 * Take care of cleaning up after all DRM clients have exited.  In the
 991 * mode setting case, we want to restore the kernel's initial mode (just
 992 * in case the last client left us in a bad state).
 993 *
 994 * Additionally, in the non-mode setting case, we'll tear down the GTT
 995 * and DMA structures, since the kernel won't be using them, and clea
 996 * up any GEM state.
 997 */
 998static void i915_driver_lastclose(struct drm_device *dev)
 999{
1000        struct drm_i915_private *i915 = to_i915(dev);
1001
1002        intel_fbdev_restore_mode(dev);
1003
1004        if (HAS_DISPLAY(i915))
1005                vga_switcheroo_process_delayed_switch();
1006}
1007
1008static void i915_driver_postclose(struct drm_device *dev, struct drm_file *file)
1009{
1010        struct drm_i915_file_private *file_priv = file->driver_priv;
1011
1012        i915_gem_context_close(file);
1013
1014        kfree_rcu(file_priv, rcu);
1015
1016        /* Catch up with all the deferred frees from "this" client */
1017        i915_gem_flush_free_objects(to_i915(dev));
1018}
1019
1020static void intel_suspend_encoders(struct drm_i915_private *dev_priv)
1021{
1022        struct drm_device *dev = &dev_priv->drm;
1023        struct intel_encoder *encoder;
1024
1025        if (!HAS_DISPLAY(dev_priv))
1026                return;
1027
1028        drm_modeset_lock_all(dev);
1029        for_each_intel_encoder(dev, encoder)
1030                if (encoder->suspend)
1031                        encoder->suspend(encoder);
1032        drm_modeset_unlock_all(dev);
1033}
1034
1035static void intel_shutdown_encoders(struct drm_i915_private *dev_priv)
1036{
1037        struct drm_device *dev = &dev_priv->drm;
1038        struct intel_encoder *encoder;
1039
1040        if (!HAS_DISPLAY(dev_priv))
1041                return;
1042
1043        drm_modeset_lock_all(dev);
1044        for_each_intel_encoder(dev, encoder)
1045                if (encoder->shutdown)
1046                        encoder->shutdown(encoder);
1047        drm_modeset_unlock_all(dev);
1048}
1049
1050void i915_driver_shutdown(struct drm_i915_private *i915)
1051{
1052        disable_rpm_wakeref_asserts(&i915->runtime_pm);
1053        intel_runtime_pm_disable(&i915->runtime_pm);
1054        intel_power_domains_disable(i915);
1055
1056        i915_gem_suspend(i915);
1057
1058        if (HAS_DISPLAY(i915)) {
1059                drm_kms_helper_poll_disable(&i915->drm);
1060
1061                drm_atomic_helper_shutdown(&i915->drm);
1062        }
1063
1064        intel_dp_mst_suspend(i915);
1065
1066        intel_runtime_pm_disable_interrupts(i915);
1067        intel_hpd_cancel_work(i915);
1068
1069        intel_suspend_encoders(i915);
1070        intel_shutdown_encoders(i915);
1071
1072        intel_dmc_ucode_suspend(i915);
1073
1074        /*
1075         * The only requirement is to reboot with display DC states disabled,
1076         * for now leaving all display power wells in the INIT power domain
1077         * enabled.
1078         *
1079         * TODO:
1080         * - unify the pci_driver::shutdown sequence here with the
1081         *   pci_driver.driver.pm.poweroff,poweroff_late sequence.
1082         * - unify the driver remove and system/runtime suspend sequences with
1083         *   the above unified shutdown/poweroff sequence.
1084         */
1085        intel_power_domains_driver_remove(i915);
1086        enable_rpm_wakeref_asserts(&i915->runtime_pm);
1087
1088        intel_runtime_pm_driver_release(&i915->runtime_pm);
1089}
1090
1091static bool suspend_to_idle(struct drm_i915_private *dev_priv)
1092{
1093#if IS_ENABLED(CONFIG_ACPI_SLEEP)
1094        if (acpi_target_system_state() < ACPI_STATE_S3)
1095                return true;
1096#endif
1097        return false;
1098}
1099
1100static int i915_drm_prepare(struct drm_device *dev)
1101{
1102        struct drm_i915_private *i915 = to_i915(dev);
1103
1104        /*
1105         * NB intel_display_suspend() may issue new requests after we've
1106         * ostensibly marked the GPU as ready-to-sleep here. We need to
1107         * split out that work and pull it forward so that after point,
1108         * the GPU is not woken again.
1109         */
1110        return i915_gem_backup_suspend(i915);
1111}
1112
1113static int i915_drm_suspend(struct drm_device *dev)
1114{
1115        struct drm_i915_private *dev_priv = to_i915(dev);
1116        struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
1117        pci_power_t opregion_target_state;
1118
1119        disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
1120
1121        /* We do a lot of poking in a lot of registers, make sure they work
1122         * properly. */
1123        intel_power_domains_disable(dev_priv);
1124        if (HAS_DISPLAY(dev_priv))
1125                drm_kms_helper_poll_disable(dev);
1126
1127        pci_save_state(pdev);
1128
1129        intel_display_suspend(dev);
1130
1131        intel_dp_mst_suspend(dev_priv);
1132
1133        intel_runtime_pm_disable_interrupts(dev_priv);
1134        intel_hpd_cancel_work(dev_priv);
1135
1136        intel_suspend_encoders(dev_priv);
1137
1138        intel_suspend_hw(dev_priv);
1139
1140        /* Must be called before GGTT is suspended. */
1141        intel_dpt_suspend(dev_priv);
1142        i915_ggtt_suspend(to_gt(dev_priv)->ggtt);
1143
1144        i915_save_display(dev_priv);
1145
1146        opregion_target_state = suspend_to_idle(dev_priv) ? PCI_D1 : PCI_D3cold;
1147        intel_opregion_suspend(dev_priv, opregion_target_state);
1148
1149        intel_fbdev_set_suspend(dev, FBINFO_STATE_SUSPENDED, true);
1150
1151        dev_priv->suspend_count++;
1152
1153        intel_dmc_ucode_suspend(dev_priv);
1154
1155        enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
1156
1157        return 0;
1158}
1159
1160static enum i915_drm_suspend_mode
1161get_suspend_mode(struct drm_i915_private *dev_priv, bool hibernate)
1162{
1163        if (hibernate)
1164                return I915_DRM_SUSPEND_HIBERNATE;
1165
1166        if (suspend_to_idle(dev_priv))
1167                return I915_DRM_SUSPEND_IDLE;
1168
1169        return I915_DRM_SUSPEND_MEM;
1170}
1171
1172static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation)
1173{
1174        struct drm_i915_private *dev_priv = to_i915(dev);
1175        struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
1176        struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
1177        int ret;
1178
1179        disable_rpm_wakeref_asserts(rpm);
1180
1181        i915_gem_suspend_late(dev_priv);
1182
1183        intel_uncore_suspend(&dev_priv->uncore);
1184
1185        intel_power_domains_suspend(dev_priv,
1186                                    get_suspend_mode(dev_priv, hibernation));
1187
1188        intel_display_power_suspend_late(dev_priv);
1189
1190        ret = vlv_suspend_complete(dev_priv);
1191        if (ret) {
1192                drm_err(&dev_priv->drm, "Suspend complete failed: %d\n", ret);
1193                intel_power_domains_resume(dev_priv);
1194
1195                goto out;
1196        }
1197
1198        /*
1199         * FIXME: Temporary hammer to avoid freezing the machine on our DGFX
1200         * This should be totally removed when we handle the pci states properly
1201         * on runtime PM and on s2idle cases.
1202         */
1203        if (suspend_to_idle(dev_priv))
1204                pci_d3cold_disable(pdev);
1205
1206        pci_disable_device(pdev);
1207        /*
1208         * During hibernation on some platforms the BIOS may try to access
1209         * the device even though it's already in D3 and hang the machine. So
1210         * leave the device in D0 on those platforms and hope the BIOS will
1211         * power down the device properly. The issue was seen on multiple old
1212         * GENs with different BIOS vendors, so having an explicit blacklist
1213         * is inpractical; apply the workaround on everything pre GEN6. The
1214         * platforms where the issue was seen:
1215         * Lenovo Thinkpad X301, X61s, X60, T60, X41
1216         * Fujitsu FSC S7110
1217         * Acer Aspire 1830T
1218         */
1219        if (!(hibernation && GRAPHICS_VER(dev_priv) < 6))
1220                pci_set_power_state(pdev, PCI_D3hot);
1221
1222out:
1223        enable_rpm_wakeref_asserts(rpm);
1224        if (!dev_priv->uncore.user_forcewake_count)
1225                intel_runtime_pm_driver_release(rpm);
1226
1227        return ret;
1228}
1229
1230int i915_driver_suspend_switcheroo(struct drm_i915_private *i915,
1231                                   pm_message_t state)
1232{
1233        int error;
1234
1235        if (drm_WARN_ON_ONCE(&i915->drm, state.event != PM_EVENT_SUSPEND &&
1236                             state.event != PM_EVENT_FREEZE))
1237                return -EINVAL;
1238
1239        if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF)
1240                return 0;
1241
1242        error = i915_drm_suspend(&i915->drm);
1243        if (error)
1244                return error;
1245
1246        return i915_drm_suspend_late(&i915->drm, false);
1247}
1248
1249static int i915_drm_resume(struct drm_device *dev)
1250{
1251        struct drm_i915_private *dev_priv = to_i915(dev);
1252        int ret;
1253
1254        disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
1255
1256        ret = intel_pcode_init(dev_priv);
1257        if (ret)
1258                return ret;
1259
1260        sanitize_gpu(dev_priv);
1261
1262        ret = i915_ggtt_enable_hw(dev_priv);
1263        if (ret)
1264                drm_err(&dev_priv->drm, "failed to re-enable GGTT\n");
1265
1266        i915_ggtt_resume(to_gt(dev_priv)->ggtt);
1267        /* Must be called after GGTT is resumed. */
1268        intel_dpt_resume(dev_priv);
1269
1270        intel_dmc_ucode_resume(dev_priv);
1271
1272        i915_restore_display(dev_priv);
1273        intel_pps_unlock_regs_wa(dev_priv);
1274
1275        intel_init_pch_refclk(dev_priv);
1276
1277        /*
1278         * Interrupts have to be enabled before any batches are run. If not the
1279         * GPU will hang. i915_gem_init_hw() will initiate batches to
1280         * update/restore the context.
1281         *
1282         * drm_mode_config_reset() needs AUX interrupts.
1283         *
1284         * Modeset enabling in intel_modeset_init_hw() also needs working
1285         * interrupts.
1286         */
1287        intel_runtime_pm_enable_interrupts(dev_priv);
1288
1289        if (HAS_DISPLAY(dev_priv))
1290                drm_mode_config_reset(dev);
1291
1292        i915_gem_resume(dev_priv);
1293
1294        intel_modeset_init_hw(dev_priv);
1295        intel_init_clock_gating(dev_priv);
1296        intel_hpd_init(dev_priv);
1297
1298        /* MST sideband requires HPD interrupts enabled */
1299        intel_dp_mst_resume(dev_priv);
1300        intel_display_resume(dev);
1301
1302        intel_hpd_poll_disable(dev_priv);
1303        if (HAS_DISPLAY(dev_priv))
1304                drm_kms_helper_poll_enable(dev);
1305
1306        intel_opregion_resume(dev_priv);
1307
1308        intel_fbdev_set_suspend(dev, FBINFO_STATE_RUNNING, false);
1309
1310        intel_power_domains_enable(dev_priv);
1311
1312        intel_gvt_resume(dev_priv);
1313
1314        enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
1315
1316        return 0;
1317}
1318
1319static int i915_drm_resume_early(struct drm_device *dev)
1320{
1321        struct drm_i915_private *dev_priv = to_i915(dev);
1322        struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
1323        int ret;
1324
1325        /*
1326         * We have a resume ordering issue with the snd-hda driver also
1327         * requiring our device to be power up. Due to the lack of a
1328         * parent/child relationship we currently solve this with an early
1329         * resume hook.
1330         *
1331         * FIXME: This should be solved with a special hdmi sink device or
1332         * similar so that power domains can be employed.
1333         */
1334
1335        /*
1336         * Note that we need to set the power state explicitly, since we
1337         * powered off the device during freeze and the PCI core won't power
1338         * it back up for us during thaw. Powering off the device during
1339         * freeze is not a hard requirement though, and during the
1340         * suspend/resume phases the PCI core makes sure we get here with the
1341         * device powered on. So in case we change our freeze logic and keep
1342         * the device powered we can also remove the following set power state
1343         * call.
1344         */
1345        ret = pci_set_power_state(pdev, PCI_D0);
1346        if (ret) {
1347                drm_err(&dev_priv->drm,
1348                        "failed to set PCI D0 power state (%d)\n", ret);
1349                return ret;
1350        }
1351
1352        /*
1353         * Note that pci_enable_device() first enables any parent bridge
1354         * device and only then sets the power state for this device. The
1355         * bridge enabling is a nop though, since bridge devices are resumed
1356         * first. The order of enabling power and enabling the device is
1357         * imposed by the PCI core as described above, so here we preserve the
1358         * same order for the freeze/thaw phases.
1359         *
1360         * TODO: eventually we should remove pci_disable_device() /
1361         * pci_enable_enable_device() from suspend/resume. Due to how they
1362         * depend on the device enable refcount we can't anyway depend on them
1363         * disabling/enabling the device.
1364         */
1365        if (pci_enable_device(pdev))
1366                return -EIO;
1367
1368        pci_set_master(pdev);
1369
1370        pci_d3cold_enable(pdev);
1371
1372        disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
1373
1374        ret = vlv_resume_prepare(dev_priv, false);
1375        if (ret)
1376                drm_err(&dev_priv->drm,
1377                        "Resume prepare failed: %d, continuing anyway\n", ret);
1378
1379        intel_uncore_resume_early(&dev_priv->uncore);
1380
1381        intel_gt_check_and_clear_faults(to_gt(dev_priv));
1382
1383        intel_display_power_resume_early(dev_priv);
1384
1385        intel_power_domains_resume(dev_priv);
1386
1387        enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
1388
1389        return ret;
1390}
1391
1392int i915_driver_resume_switcheroo(struct drm_i915_private *i915)
1393{
1394        int ret;
1395
1396        if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF)
1397                return 0;
1398
1399        ret = i915_drm_resume_early(&i915->drm);
1400        if (ret)
1401                return ret;
1402
1403        return i915_drm_resume(&i915->drm);
1404}
1405
1406static int i915_pm_prepare(struct device *kdev)
1407{
1408        struct drm_i915_private *i915 = kdev_to_i915(kdev);
1409
1410        if (!i915) {
1411                dev_err(kdev, "DRM not initialized, aborting suspend.\n");
1412                return -ENODEV;
1413        }
1414
1415        if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF)
1416                return 0;
1417
1418        return i915_drm_prepare(&i915->drm);
1419}
1420
1421static int i915_pm_suspend(struct device *kdev)
1422{
1423        struct drm_i915_private *i915 = kdev_to_i915(kdev);
1424
1425        if (!i915) {
1426                dev_err(kdev, "DRM not initialized, aborting suspend.\n");
1427                return -ENODEV;
1428        }
1429
1430        if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF)
1431                return 0;
1432
1433        return i915_drm_suspend(&i915->drm);
1434}
1435
1436static int i915_pm_suspend_late(struct device *kdev)
1437{
1438        struct drm_i915_private *i915 = kdev_to_i915(kdev);
1439
1440        /*
1441         * We have a suspend ordering issue with the snd-hda driver also
1442         * requiring our device to be power up. Due to the lack of a
1443         * parent/child relationship we currently solve this with an late
1444         * suspend hook.
1445         *
1446         * FIXME: This should be solved with a special hdmi sink device or
1447         * similar so that power domains can be employed.
1448         */
1449        if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF)
1450                return 0;
1451
1452        return i915_drm_suspend_late(&i915->drm, false);
1453}
1454
1455static int i915_pm_poweroff_late(struct device *kdev)
1456{
1457        struct drm_i915_private *i915 = kdev_to_i915(kdev);
1458
1459        if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF)
1460                return 0;
1461
1462        return i915_drm_suspend_late(&i915->drm, true);
1463}
1464
1465static int i915_pm_resume_early(struct device *kdev)
1466{
1467        struct drm_i915_private *i915 = kdev_to_i915(kdev);
1468
1469        if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF)
1470                return 0;
1471
1472        return i915_drm_resume_early(&i915->drm);
1473}
1474
1475static int i915_pm_resume(struct device *kdev)
1476{
1477        struct drm_i915_private *i915 = kdev_to_i915(kdev);
1478
1479        if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF)
1480                return 0;
1481
1482        return i915_drm_resume(&i915->drm);
1483}
1484
1485/* freeze: before creating the hibernation_image */
1486static int i915_pm_freeze(struct device *kdev)
1487{
1488        struct drm_i915_private *i915 = kdev_to_i915(kdev);
1489        int ret;
1490
1491        if (i915->drm.switch_power_state != DRM_SWITCH_POWER_OFF) {
1492                ret = i915_drm_suspend(&i915->drm);
1493                if (ret)
1494                        return ret;
1495        }
1496
1497        ret = i915_gem_freeze(i915);
1498        if (ret)
1499                return ret;
1500
1501        return 0;
1502}
1503
1504static int i915_pm_freeze_late(struct device *kdev)
1505{
1506        struct drm_i915_private *i915 = kdev_to_i915(kdev);
1507        int ret;
1508
1509        if (i915->drm.switch_power_state != DRM_SWITCH_POWER_OFF) {
1510                ret = i915_drm_suspend_late(&i915->drm, true);
1511                if (ret)
1512                        return ret;
1513        }
1514
1515        ret = i915_gem_freeze_late(i915);
1516        if (ret)
1517                return ret;
1518
1519        return 0;
1520}
1521
1522/* thaw: called after creating the hibernation image, but before turning off. */
1523static int i915_pm_thaw_early(struct device *kdev)
1524{
1525        return i915_pm_resume_early(kdev);
1526}
1527
1528static int i915_pm_thaw(struct device *kdev)
1529{
1530        return i915_pm_resume(kdev);
1531}
1532
1533/* restore: called after loading the hibernation image. */
1534static int i915_pm_restore_early(struct device *kdev)
1535{
1536        return i915_pm_resume_early(kdev);
1537}
1538
1539static int i915_pm_restore(struct device *kdev)
1540{
1541        return i915_pm_resume(kdev);
1542}
1543
1544static int intel_runtime_suspend(struct device *kdev)
1545{
1546        struct drm_i915_private *dev_priv = kdev_to_i915(kdev);
1547        struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
1548        struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
1549        int ret;
1550
1551        if (drm_WARN_ON_ONCE(&dev_priv->drm, !HAS_RUNTIME_PM(dev_priv)))
1552                return -ENODEV;
1553
1554        drm_dbg_kms(&dev_priv->drm, "Suspending device\n");
1555
1556        disable_rpm_wakeref_asserts(rpm);
1557
1558        /*
1559         * We are safe here against re-faults, since the fault handler takes
1560         * an RPM reference.
1561         */
1562        i915_gem_runtime_suspend(dev_priv);
1563
1564        intel_gt_runtime_suspend(to_gt(dev_priv));
1565
1566        intel_runtime_pm_disable_interrupts(dev_priv);
1567
1568        intel_uncore_suspend(&dev_priv->uncore);
1569
1570        intel_display_power_suspend(dev_priv);
1571
1572        ret = vlv_suspend_complete(dev_priv);
1573        if (ret) {
1574                drm_err(&dev_priv->drm,
1575                        "Runtime suspend failed, disabling it (%d)\n", ret);
1576                intel_uncore_runtime_resume(&dev_priv->uncore);
1577
1578                intel_runtime_pm_enable_interrupts(dev_priv);
1579
1580                intel_gt_runtime_resume(to_gt(dev_priv));
1581
1582                enable_rpm_wakeref_asserts(rpm);
1583
1584                return ret;
1585        }
1586
1587        enable_rpm_wakeref_asserts(rpm);
1588        intel_runtime_pm_driver_release(rpm);
1589
1590        if (intel_uncore_arm_unclaimed_mmio_detection(&dev_priv->uncore))
1591                drm_err(&dev_priv->drm,
1592                        "Unclaimed access detected prior to suspending\n");
1593
1594        /*
1595         * FIXME: Temporary hammer to avoid freezing the machine on our DGFX
1596         * This should be totally removed when we handle the pci states properly
1597         * on runtime PM and on s2idle cases.
1598         */
1599        pci_d3cold_disable(pdev);
1600        rpm->suspended = true;
1601
1602        /*
1603         * FIXME: We really should find a document that references the arguments
1604         * used below!
1605         */
1606        if (IS_BROADWELL(dev_priv)) {
1607                /*
1608                 * On Broadwell, if we use PCI_D1 the PCH DDI ports will stop
1609                 * being detected, and the call we do at intel_runtime_resume()
1610                 * won't be able to restore them. Since PCI_D3hot matches the
1611                 * actual specification and appears to be working, use it.
1612                 */
1613                intel_opregion_notify_adapter(dev_priv, PCI_D3hot);
1614        } else {
1615                /*
1616                 * current versions of firmware which depend on this opregion
1617                 * notification have repurposed the D1 definition to mean
1618                 * "runtime suspended" vs. what you would normally expect (D3)
1619                 * to distinguish it from notifications that might be sent via
1620                 * the suspend path.
1621                 */
1622                intel_opregion_notify_adapter(dev_priv, PCI_D1);
1623        }
1624
1625        assert_forcewakes_inactive(&dev_priv->uncore);
1626
1627        if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv))
1628                intel_hpd_poll_enable(dev_priv);
1629
1630        drm_dbg_kms(&dev_priv->drm, "Device suspended\n");
1631        return 0;
1632}
1633
1634static int intel_runtime_resume(struct device *kdev)
1635{
1636        struct drm_i915_private *dev_priv = kdev_to_i915(kdev);
1637        struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
1638        struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
1639        int ret;
1640
1641        if (drm_WARN_ON_ONCE(&dev_priv->drm, !HAS_RUNTIME_PM(dev_priv)))
1642                return -ENODEV;
1643
1644        drm_dbg_kms(&dev_priv->drm, "Resuming device\n");
1645
1646        drm_WARN_ON_ONCE(&dev_priv->drm, atomic_read(&rpm->wakeref_count));
1647        disable_rpm_wakeref_asserts(rpm);
1648
1649        intel_opregion_notify_adapter(dev_priv, PCI_D0);
1650        rpm->suspended = false;
1651        pci_d3cold_enable(pdev);
1652        if (intel_uncore_unclaimed_mmio(&dev_priv->uncore))
1653                drm_dbg(&dev_priv->drm,
1654                        "Unclaimed access during suspend, bios?\n");
1655
1656        intel_display_power_resume(dev_priv);
1657
1658        ret = vlv_resume_prepare(dev_priv, true);
1659
1660        intel_uncore_runtime_resume(&dev_priv->uncore);
1661
1662        intel_runtime_pm_enable_interrupts(dev_priv);
1663
1664        /*
1665         * No point of rolling back things in case of an error, as the best
1666         * we can do is to hope that things will still work (and disable RPM).
1667         */
1668        intel_gt_runtime_resume(to_gt(dev_priv));
1669
1670        /*
1671         * On VLV/CHV display interrupts are part of the display
1672         * power well, so hpd is reinitialized from there. For
1673         * everyone else do it here.
1674         */
1675        if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) {
1676                intel_hpd_init(dev_priv);
1677                intel_hpd_poll_disable(dev_priv);
1678        }
1679
1680        intel_enable_ipc(dev_priv);
1681
1682        enable_rpm_wakeref_asserts(rpm);
1683
1684        if (ret)
1685                drm_err(&dev_priv->drm,
1686                        "Runtime resume failed, disabling it (%d)\n", ret);
1687        else
1688                drm_dbg_kms(&dev_priv->drm, "Device resumed\n");
1689
1690        return ret;
1691}
1692
1693const struct dev_pm_ops i915_pm_ops = {
1694        /*
1695         * S0ix (via system suspend) and S3 event handlers [PMSG_SUSPEND,
1696         * PMSG_RESUME]
1697         */
1698        .prepare = i915_pm_prepare,
1699        .suspend = i915_pm_suspend,
1700        .suspend_late = i915_pm_suspend_late,
1701        .resume_early = i915_pm_resume_early,
1702        .resume = i915_pm_resume,
1703
1704        /*
1705         * S4 event handlers
1706         * @freeze, @freeze_late    : called (1) before creating the
1707         *                            hibernation image [PMSG_FREEZE] and
1708         *                            (2) after rebooting, before restoring
1709         *                            the image [PMSG_QUIESCE]
1710         * @thaw, @thaw_early       : called (1) after creating the hibernation
1711         *                            image, before writing it [PMSG_THAW]
1712         *                            and (2) after failing to create or
1713         *                            restore the image [PMSG_RECOVER]
1714         * @poweroff, @poweroff_late: called after writing the hibernation
1715         *                            image, before rebooting [PMSG_HIBERNATE]
1716         * @restore, @restore_early : called after rebooting and restoring the
1717         *                            hibernation image [PMSG_RESTORE]
1718         */
1719        .freeze = i915_pm_freeze,
1720        .freeze_late = i915_pm_freeze_late,
1721        .thaw_early = i915_pm_thaw_early,
1722        .thaw = i915_pm_thaw,
1723        .poweroff = i915_pm_suspend,
1724        .poweroff_late = i915_pm_poweroff_late,
1725        .restore_early = i915_pm_restore_early,
1726        .restore = i915_pm_restore,
1727
1728        /* S0ix (via runtime suspend) event handlers */
1729        .runtime_suspend = intel_runtime_suspend,
1730        .runtime_resume = intel_runtime_resume,
1731};
1732
1733static const struct file_operations i915_driver_fops = {
1734        .owner = THIS_MODULE,
1735        .open = drm_open,
1736        .release = drm_release_noglobal,
1737        .unlocked_ioctl = drm_ioctl,
1738        .mmap = i915_gem_mmap,
1739        .poll = drm_poll,
1740        .read = drm_read,
1741        .compat_ioctl = i915_ioc32_compat_ioctl,
1742        .llseek = noop_llseek,
1743};
1744
1745static int
1746i915_gem_reject_pin_ioctl(struct drm_device *dev, void *data,
1747                          struct drm_file *file)
1748{
1749        return -ENODEV;
1750}
1751
1752static const struct drm_ioctl_desc i915_ioctls[] = {
1753        DRM_IOCTL_DEF_DRV(I915_INIT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1754        DRM_IOCTL_DEF_DRV(I915_FLUSH, drm_noop, DRM_AUTH),
1755        DRM_IOCTL_DEF_DRV(I915_FLIP, drm_noop, DRM_AUTH),
1756        DRM_IOCTL_DEF_DRV(I915_BATCHBUFFER, drm_noop, DRM_AUTH),
1757        DRM_IOCTL_DEF_DRV(I915_IRQ_EMIT, drm_noop, DRM_AUTH),
1758        DRM_IOCTL_DEF_DRV(I915_IRQ_WAIT, drm_noop, DRM_AUTH),
1759        DRM_IOCTL_DEF_DRV(I915_GETPARAM, i915_getparam_ioctl, DRM_RENDER_ALLOW),
1760        DRM_IOCTL_DEF_DRV(I915_SETPARAM, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1761        DRM_IOCTL_DEF_DRV(I915_ALLOC, drm_noop, DRM_AUTH),
1762        DRM_IOCTL_DEF_DRV(I915_FREE, drm_noop, DRM_AUTH),
1763        DRM_IOCTL_DEF_DRV(I915_INIT_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1764        DRM_IOCTL_DEF_DRV(I915_CMDBUFFER, drm_noop, DRM_AUTH),
1765        DRM_IOCTL_DEF_DRV(I915_DESTROY_HEAP,  drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1766        DRM_IOCTL_DEF_DRV(I915_SET_VBLANK_PIPE,  drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1767        DRM_IOCTL_DEF_DRV(I915_GET_VBLANK_PIPE,  drm_noop, DRM_AUTH),
1768        DRM_IOCTL_DEF_DRV(I915_VBLANK_SWAP, drm_noop, DRM_AUTH),
1769        DRM_IOCTL_DEF_DRV(I915_HWS_ADDR, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1770        DRM_IOCTL_DEF_DRV(I915_GEM_INIT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1771        DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER, drm_invalid_op, DRM_AUTH),
1772        DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER2_WR, i915_gem_execbuffer2_ioctl, DRM_RENDER_ALLOW),
1773        DRM_IOCTL_DEF_DRV(I915_GEM_PIN, i915_gem_reject_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY),
1774        DRM_IOCTL_DEF_DRV(I915_GEM_UNPIN, i915_gem_reject_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY),
1775        DRM_IOCTL_DEF_DRV(I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_RENDER_ALLOW),
1776        DRM_IOCTL_DEF_DRV(I915_GEM_SET_CACHING, i915_gem_set_caching_ioctl, DRM_RENDER_ALLOW),
1777        DRM_IOCTL_DEF_DRV(I915_GEM_GET_CACHING, i915_gem_get_caching_ioctl, DRM_RENDER_ALLOW),
1778        DRM_IOCTL_DEF_DRV(I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_RENDER_ALLOW),
1779        DRM_IOCTL_DEF_DRV(I915_GEM_ENTERVT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1780        DRM_IOCTL_DEF_DRV(I915_GEM_LEAVEVT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1781        DRM_IOCTL_DEF_DRV(I915_GEM_CREATE, i915_gem_create_ioctl, DRM_RENDER_ALLOW),
1782        DRM_IOCTL_DEF_DRV(I915_GEM_CREATE_EXT, i915_gem_create_ext_ioctl, DRM_RENDER_ALLOW),
1783        DRM_IOCTL_DEF_DRV(I915_GEM_PREAD, i915_gem_pread_ioctl, DRM_RENDER_ALLOW),
1784        DRM_IOCTL_DEF_DRV(I915_GEM_PWRITE, i915_gem_pwrite_ioctl, DRM_RENDER_ALLOW),
1785        DRM_IOCTL_DEF_DRV(I915_GEM_MMAP, i915_gem_mmap_ioctl, DRM_RENDER_ALLOW),
1786        DRM_IOCTL_DEF_DRV(I915_GEM_MMAP_OFFSET, i915_gem_mmap_offset_ioctl, DRM_RENDER_ALLOW),
1787        DRM_IOCTL_DEF_DRV(I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, DRM_RENDER_ALLOW),
1788        DRM_IOCTL_DEF_DRV(I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, DRM_RENDER_ALLOW),
1789        DRM_IOCTL_DEF_DRV(I915_GEM_SET_TILING, i915_gem_set_tiling_ioctl, DRM_RENDER_ALLOW),
1790        DRM_IOCTL_DEF_DRV(I915_GEM_GET_TILING, i915_gem_get_tiling_ioctl, DRM_RENDER_ALLOW),
1791        DRM_IOCTL_DEF_DRV(I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_RENDER_ALLOW),
1792        DRM_IOCTL_DEF_DRV(I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id_ioctl, 0),
1793        DRM_IOCTL_DEF_DRV(I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_RENDER_ALLOW),
1794        DRM_IOCTL_DEF_DRV(I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image_ioctl, DRM_MASTER),
1795        DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS, intel_overlay_attrs_ioctl, DRM_MASTER),
1796        DRM_IOCTL_DEF_DRV(I915_SET_SPRITE_COLORKEY, intel_sprite_set_colorkey_ioctl, DRM_MASTER),
1797        DRM_IOCTL_DEF_DRV(I915_GET_SPRITE_COLORKEY, drm_noop, DRM_MASTER),
1798        DRM_IOCTL_DEF_DRV(I915_GEM_WAIT, i915_gem_wait_ioctl, DRM_RENDER_ALLOW),
1799        DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_CREATE_EXT, i915_gem_context_create_ioctl, DRM_RENDER_ALLOW),
1800        DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_DESTROY, i915_gem_context_destroy_ioctl, DRM_RENDER_ALLOW),
1801        DRM_IOCTL_DEF_DRV(I915_REG_READ, i915_reg_read_ioctl, DRM_RENDER_ALLOW),
1802        DRM_IOCTL_DEF_DRV(I915_GET_RESET_STATS, i915_gem_context_reset_stats_ioctl, DRM_RENDER_ALLOW),
1803        DRM_IOCTL_DEF_DRV(I915_GEM_USERPTR, i915_gem_userptr_ioctl, DRM_RENDER_ALLOW),
1804        DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_GETPARAM, i915_gem_context_getparam_ioctl, DRM_RENDER_ALLOW),
1805        DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_SETPARAM, i915_gem_context_setparam_ioctl, DRM_RENDER_ALLOW),
1806        DRM_IOCTL_DEF_DRV(I915_PERF_OPEN, i915_perf_open_ioctl, DRM_RENDER_ALLOW),
1807        DRM_IOCTL_DEF_DRV(I915_PERF_ADD_CONFIG, i915_perf_add_config_ioctl, DRM_RENDER_ALLOW),
1808        DRM_IOCTL_DEF_DRV(I915_PERF_REMOVE_CONFIG, i915_perf_remove_config_ioctl, DRM_RENDER_ALLOW),
1809        DRM_IOCTL_DEF_DRV(I915_QUERY, i915_query_ioctl, DRM_RENDER_ALLOW),
1810        DRM_IOCTL_DEF_DRV(I915_GEM_VM_CREATE, i915_gem_vm_create_ioctl, DRM_RENDER_ALLOW),
1811        DRM_IOCTL_DEF_DRV(I915_GEM_VM_DESTROY, i915_gem_vm_destroy_ioctl, DRM_RENDER_ALLOW),
1812};
1813
1814/*
1815 * Interface history:
1816 *
1817 * 1.1: Original.
1818 * 1.2: Add Power Management
1819 * 1.3: Add vblank support
1820 * 1.4: Fix cmdbuffer path, add heap destroy
1821 * 1.5: Add vblank pipe configuration
1822 * 1.6: - New ioctl for scheduling buffer swaps on vertical blank
1823 *      - Support vertical blank on secondary display pipe
1824 */
1825#define DRIVER_MAJOR            1
1826#define DRIVER_MINOR            6
1827#define DRIVER_PATCHLEVEL       0
1828
1829static const struct drm_driver i915_drm_driver = {
1830        /* Don't use MTRRs here; the Xserver or userspace app should
1831         * deal with them for Intel hardware.
1832         */
1833        .driver_features =
1834            DRIVER_GEM |
1835            DRIVER_RENDER | DRIVER_MODESET | DRIVER_ATOMIC | DRIVER_SYNCOBJ |
1836            DRIVER_SYNCOBJ_TIMELINE,
1837        .release = i915_driver_release,
1838        .open = i915_driver_open,
1839        .lastclose = i915_driver_lastclose,
1840        .postclose = i915_driver_postclose,
1841
1842        .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
1843        .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
1844        .gem_prime_import = i915_gem_prime_import,
1845
1846        .dumb_create = i915_gem_dumb_create,
1847        .dumb_map_offset = i915_gem_dumb_mmap_offset,
1848
1849        .ioctls = i915_ioctls,
1850        .num_ioctls = ARRAY_SIZE(i915_ioctls),
1851        .fops = &i915_driver_fops,
1852        .name = DRIVER_NAME,
1853        .desc = DRIVER_DESC,
1854        .date = DRIVER_DATE,
1855        .major = DRIVER_MAJOR,
1856        .minor = DRIVER_MINOR,
1857        .patchlevel = DRIVER_PATCHLEVEL,
1858};
1859