linux/drivers/gpu/drm/i915/i915_dma.c
<<
>>
Prefs
   1/* i915_dma.c -- DMA support for the I915 -*- linux-c -*-
   2 */
   3/*
   4 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
   5 * All Rights Reserved.
   6 *
   7 * Permission is hereby granted, free of charge, to any person obtaining a
   8 * copy of this software and associated documentation files (the
   9 * "Software"), to deal in the Software without restriction, including
  10 * without limitation the rights to use, copy, modify, merge, publish,
  11 * distribute, sub license, and/or sell copies of the Software, and to
  12 * permit persons to whom the Software is furnished to do so, subject to
  13 * the following conditions:
  14 *
  15 * The above copyright notice and this permission notice (including the
  16 * next paragraph) shall be included in all copies or substantial portions
  17 * of the Software.
  18 *
  19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
  20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
  22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
  23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
  24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
  25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
  26 *
  27 */
  28
  29#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  30
  31#include <linux/async.h>
  32#include <drm/drmP.h>
  33#include <drm/drm_crtc_helper.h>
  34#include <drm/drm_fb_helper.h>
  35#include <drm/drm_legacy.h>
  36#include "intel_drv.h"
  37#include <drm/i915_drm.h>
  38#include "i915_drv.h"
  39#include "i915_vgpu.h"
  40#include "i915_trace.h"
  41#include <linux/pci.h>
  42#include <linux/console.h>
  43#include <linux/vt.h>
  44#include <linux/vgaarb.h>
  45#include <linux/acpi.h>
  46#include <linux/pnp.h>
  47#include <linux/vga_switcheroo.h>
  48#include <linux/slab.h>
  49#include <acpi/video.h>
  50#include <linux/pm.h>
  51#include <linux/pm_runtime.h>
  52#include <linux/oom.h>
  53
  54
  55static int i915_getparam(struct drm_device *dev, void *data,
  56                         struct drm_file *file_priv)
  57{
  58        struct drm_i915_private *dev_priv = dev->dev_private;
  59        drm_i915_getparam_t *param = data;
  60        int value;
  61
  62        switch (param->param) {
  63        case I915_PARAM_IRQ_ACTIVE:
  64        case I915_PARAM_ALLOW_BATCHBUFFER:
  65        case I915_PARAM_LAST_DISPATCH:
  66                /* Reject all old ums/dri params. */
  67                return -ENODEV;
  68        case I915_PARAM_CHIPSET_ID:
  69                value = dev->pdev->device;
  70                break;
  71        case I915_PARAM_REVISION:
  72                value = dev->pdev->revision;
  73                break;
  74        case I915_PARAM_HAS_GEM:
  75                value = 1;
  76                break;
  77        case I915_PARAM_NUM_FENCES_AVAIL:
  78                value = dev_priv->num_fence_regs - dev_priv->fence_reg_start;
  79                break;
  80        case I915_PARAM_HAS_OVERLAY:
  81                value = dev_priv->overlay ? 1 : 0;
  82                break;
  83        case I915_PARAM_HAS_PAGEFLIPPING:
  84                value = 1;
  85                break;
  86        case I915_PARAM_HAS_EXECBUF2:
  87                /* depends on GEM */
  88                value = 1;
  89                break;
  90        case I915_PARAM_HAS_BSD:
  91                value = intel_ring_initialized(&dev_priv->ring[VCS]);
  92                break;
  93        case I915_PARAM_HAS_BLT:
  94                value = intel_ring_initialized(&dev_priv->ring[BCS]);
  95                break;
  96        case I915_PARAM_HAS_VEBOX:
  97                value = intel_ring_initialized(&dev_priv->ring[VECS]);
  98                break;
  99        case I915_PARAM_HAS_BSD2:
 100                value = intel_ring_initialized(&dev_priv->ring[VCS2]);
 101                break;
 102        case I915_PARAM_HAS_RELAXED_FENCING:
 103                value = 1;
 104                break;
 105        case I915_PARAM_HAS_COHERENT_RINGS:
 106                value = 1;
 107                break;
 108        case I915_PARAM_HAS_EXEC_CONSTANTS:
 109                value = INTEL_INFO(dev)->gen >= 4;
 110                break;
 111        case I915_PARAM_HAS_RELAXED_DELTA:
 112                value = 1;
 113                break;
 114        case I915_PARAM_HAS_GEN7_SOL_RESET:
 115                value = 1;
 116                break;
 117        case I915_PARAM_HAS_LLC:
 118                value = HAS_LLC(dev);
 119                break;
 120        case I915_PARAM_HAS_WT:
 121                value = HAS_WT(dev);
 122                break;
 123        case I915_PARAM_HAS_ALIASING_PPGTT:
 124                value = USES_PPGTT(dev);
 125                break;
 126        case I915_PARAM_HAS_WAIT_TIMEOUT:
 127                value = 1;
 128                break;
 129        case I915_PARAM_HAS_SEMAPHORES:
 130                value = i915_semaphore_is_enabled(dev);
 131                break;
 132        case I915_PARAM_HAS_PRIME_VMAP_FLUSH:
 133                value = 1;
 134                break;
 135        case I915_PARAM_HAS_SECURE_BATCHES:
 136                value = capable(CAP_SYS_ADMIN);
 137                break;
 138        case I915_PARAM_HAS_PINNED_BATCHES:
 139                value = 1;
 140                break;
 141        case I915_PARAM_HAS_EXEC_NO_RELOC:
 142                value = 1;
 143                break;
 144        case I915_PARAM_HAS_EXEC_HANDLE_LUT:
 145                value = 1;
 146                break;
 147        case I915_PARAM_CMD_PARSER_VERSION:
 148                value = i915_cmd_parser_get_version();
 149                break;
 150        case I915_PARAM_HAS_COHERENT_PHYS_GTT:
 151                value = 1;
 152                break;
 153        case I915_PARAM_MMAP_VERSION:
 154                value = 1;
 155                break;
 156        case I915_PARAM_SUBSLICE_TOTAL:
 157                value = INTEL_INFO(dev)->subslice_total;
 158                if (!value)
 159                        return -ENODEV;
 160                break;
 161        case I915_PARAM_EU_TOTAL:
 162                value = INTEL_INFO(dev)->eu_total;
 163                if (!value)
 164                        return -ENODEV;
 165                break;
 166        case I915_PARAM_HAS_GPU_RESET:
 167                value = i915.enable_hangcheck &&
 168                        intel_has_gpu_reset(dev);
 169                break;
 170        case I915_PARAM_HAS_RESOURCE_STREAMER:
 171                value = HAS_RESOURCE_STREAMER(dev);
 172                break;
 173        default:
 174                DRM_DEBUG("Unknown parameter %d\n", param->param);
 175                return -EINVAL;
 176        }
 177
 178        if (copy_to_user(param->value, &value, sizeof(int))) {
 179                DRM_ERROR("copy_to_user failed\n");
 180                return -EFAULT;
 181        }
 182
 183        return 0;
 184}
 185
 186static int i915_setparam(struct drm_device *dev, void *data,
 187                         struct drm_file *file_priv)
 188{
 189        struct drm_i915_private *dev_priv = dev->dev_private;
 190        drm_i915_setparam_t *param = data;
 191
 192        switch (param->param) {
 193        case I915_SETPARAM_USE_MI_BATCHBUFFER_START:
 194        case I915_SETPARAM_TEX_LRU_LOG_GRANULARITY:
 195        case I915_SETPARAM_ALLOW_BATCHBUFFER:
 196                /* Reject all old ums/dri params. */
 197                return -ENODEV;
 198
 199        case I915_SETPARAM_NUM_USED_FENCES:
 200                if (param->value > dev_priv->num_fence_regs ||
 201                    param->value < 0)
 202                        return -EINVAL;
 203                /* Userspace can use first N regs */
 204                dev_priv->fence_reg_start = param->value;
 205                break;
 206        default:
 207                DRM_DEBUG_DRIVER("unknown parameter %d\n",
 208                                        param->param);
 209                return -EINVAL;
 210        }
 211
 212        return 0;
 213}
 214
 215static int i915_get_bridge_dev(struct drm_device *dev)
 216{
 217        struct drm_i915_private *dev_priv = dev->dev_private;
 218
 219        dev_priv->bridge_dev = pci_get_bus_and_slot(0, PCI_DEVFN(0, 0));
 220        if (!dev_priv->bridge_dev) {
 221                DRM_ERROR("bridge device not found\n");
 222                return -1;
 223        }
 224        return 0;
 225}
 226
 227#define MCHBAR_I915 0x44
 228#define MCHBAR_I965 0x48
 229#define MCHBAR_SIZE (4*4096)
 230
 231#define DEVEN_REG 0x54
 232#define   DEVEN_MCHBAR_EN (1 << 28)
 233
 234/* Allocate space for the MCH regs if needed, return nonzero on error */
 235static int
 236intel_alloc_mchbar_resource(struct drm_device *dev)
 237{
 238        struct drm_i915_private *dev_priv = dev->dev_private;
 239        int reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
 240        u32 temp_lo, temp_hi = 0;
 241        u64 mchbar_addr;
 242        int ret;
 243
 244        if (INTEL_INFO(dev)->gen >= 4)
 245                pci_read_config_dword(dev_priv->bridge_dev, reg + 4, &temp_hi);
 246        pci_read_config_dword(dev_priv->bridge_dev, reg, &temp_lo);
 247        mchbar_addr = ((u64)temp_hi << 32) | temp_lo;
 248
 249        /* If ACPI doesn't have it, assume we need to allocate it ourselves */
 250#ifdef CONFIG_PNP
 251        if (mchbar_addr &&
 252            pnp_range_reserved(mchbar_addr, mchbar_addr + MCHBAR_SIZE))
 253                return 0;
 254#endif
 255
 256        /* Get some space for it */
 257        dev_priv->mch_res.name = "i915 MCHBAR";
 258        dev_priv->mch_res.flags = IORESOURCE_MEM;
 259        ret = pci_bus_alloc_resource(dev_priv->bridge_dev->bus,
 260                                     &dev_priv->mch_res,
 261                                     MCHBAR_SIZE, MCHBAR_SIZE,
 262                                     PCIBIOS_MIN_MEM,
 263                                     0, pcibios_align_resource,
 264                                     dev_priv->bridge_dev);
 265        if (ret) {
 266                DRM_DEBUG_DRIVER("failed bus alloc: %d\n", ret);
 267                dev_priv->mch_res.start = 0;
 268                return ret;
 269        }
 270
 271        if (INTEL_INFO(dev)->gen >= 4)
 272                pci_write_config_dword(dev_priv->bridge_dev, reg + 4,
 273                                       upper_32_bits(dev_priv->mch_res.start));
 274
 275        pci_write_config_dword(dev_priv->bridge_dev, reg,
 276                               lower_32_bits(dev_priv->mch_res.start));
 277        return 0;
 278}
 279
 280/* Setup MCHBAR if possible, return true if we should disable it again */
 281static void
 282intel_setup_mchbar(struct drm_device *dev)
 283{
 284        struct drm_i915_private *dev_priv = dev->dev_private;
 285        int mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
 286        u32 temp;
 287        bool enabled;
 288
 289        if (IS_VALLEYVIEW(dev))
 290                return;
 291
 292        dev_priv->mchbar_need_disable = false;
 293
 294        if (IS_I915G(dev) || IS_I915GM(dev)) {
 295                pci_read_config_dword(dev_priv->bridge_dev, DEVEN_REG, &temp);
 296                enabled = !!(temp & DEVEN_MCHBAR_EN);
 297        } else {
 298                pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
 299                enabled = temp & 1;
 300        }
 301
 302        /* If it's already enabled, don't have to do anything */
 303        if (enabled)
 304                return;
 305
 306        if (intel_alloc_mchbar_resource(dev))
 307                return;
 308
 309        dev_priv->mchbar_need_disable = true;
 310
 311        /* Space is allocated or reserved, so enable it. */
 312        if (IS_I915G(dev) || IS_I915GM(dev)) {
 313                pci_write_config_dword(dev_priv->bridge_dev, DEVEN_REG,
 314                                       temp | DEVEN_MCHBAR_EN);
 315        } else {
 316                pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
 317                pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp | 1);
 318        }
 319}
 320
 321static void
 322intel_teardown_mchbar(struct drm_device *dev)
 323{
 324        struct drm_i915_private *dev_priv = dev->dev_private;
 325        int mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
 326        u32 temp;
 327
 328        if (dev_priv->mchbar_need_disable) {
 329                if (IS_I915G(dev) || IS_I915GM(dev)) {
 330                        pci_read_config_dword(dev_priv->bridge_dev, DEVEN_REG, &temp);
 331                        temp &= ~DEVEN_MCHBAR_EN;
 332                        pci_write_config_dword(dev_priv->bridge_dev, DEVEN_REG, temp);
 333                } else {
 334                        pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
 335                        temp &= ~1;
 336                        pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp);
 337                }
 338        }
 339
 340        if (dev_priv->mch_res.start)
 341                release_resource(&dev_priv->mch_res);
 342}
 343
 344/* true = enable decode, false = disable decoder */
 345static unsigned int i915_vga_set_decode(void *cookie, bool state)
 346{
 347        struct drm_device *dev = cookie;
 348
 349        intel_modeset_vga_set_state(dev, state);
 350        if (state)
 351                return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
 352                       VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
 353        else
 354                return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
 355}
 356
 357static void i915_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state)
 358{
 359        struct drm_device *dev = pci_get_drvdata(pdev);
 360        pm_message_t pmm = { .event = PM_EVENT_SUSPEND };
 361
 362        if (state == VGA_SWITCHEROO_ON) {
 363                pr_info("switched on\n");
 364                dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
 365                /* i915 resume handler doesn't set to D0 */
 366                pci_set_power_state(dev->pdev, PCI_D0);
 367                i915_resume_legacy(dev);
 368                dev->switch_power_state = DRM_SWITCH_POWER_ON;
 369        } else {
 370                pr_err("switched off\n");
 371                dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
 372                i915_suspend_legacy(dev, pmm);
 373                dev->switch_power_state = DRM_SWITCH_POWER_OFF;
 374        }
 375}
 376
 377static bool i915_switcheroo_can_switch(struct pci_dev *pdev)
 378{
 379        struct drm_device *dev = pci_get_drvdata(pdev);
 380
 381        /*
 382         * FIXME: open_count is protected by drm_global_mutex but that would lead to
 383         * locking inversion with the driver load path. And the access here is
 384         * completely racy anyway. So don't bother with locking for now.
 385         */
 386        return dev->open_count == 0;
 387}
 388
 389static const struct vga_switcheroo_client_ops i915_switcheroo_ops = {
 390        .set_gpu_state = i915_switcheroo_set_state,
 391        .reprobe = NULL,
 392        .can_switch = i915_switcheroo_can_switch,
 393};
 394
 395static int i915_load_modeset_init(struct drm_device *dev)
 396{
 397        struct drm_i915_private *dev_priv = dev->dev_private;
 398        int ret;
 399
 400        ret = intel_parse_bios(dev);
 401        if (ret)
 402                DRM_INFO("failed to find VBIOS tables\n");
 403
 404        /* If we have > 1 VGA cards, then we need to arbitrate access
 405         * to the common VGA resources.
 406         *
 407         * If we are a secondary display controller (!PCI_DISPLAY_CLASS_VGA),
 408         * then we do not take part in VGA arbitration and the
 409         * vga_client_register() fails with -ENODEV.
 410         */
 411        ret = vga_client_register(dev->pdev, dev, NULL, i915_vga_set_decode);
 412        if (ret && ret != -ENODEV)
 413                goto out;
 414
 415        intel_register_dsm_handler();
 416
 417        ret = vga_switcheroo_register_client(dev->pdev, &i915_switcheroo_ops, false);
 418        if (ret)
 419                goto cleanup_vga_client;
 420
 421        /* Initialise stolen first so that we may reserve preallocated
 422         * objects for the BIOS to KMS transition.
 423         */
 424        ret = i915_gem_init_stolen(dev);
 425        if (ret)
 426                goto cleanup_vga_switcheroo;
 427
 428        intel_power_domains_init_hw(dev_priv);
 429
 430        ret = intel_irq_install(dev_priv);
 431        if (ret)
 432                goto cleanup_gem_stolen;
 433
 434        /* Important: The output setup functions called by modeset_init need
 435         * working irqs for e.g. gmbus and dp aux transfers. */
 436        intel_modeset_init(dev);
 437
 438        ret = i915_gem_init(dev);
 439        if (ret)
 440                goto cleanup_irq;
 441
 442        intel_modeset_gem_init(dev);
 443
 444        /* Always safe in the mode setting case. */
 445        /* FIXME: do pre/post-mode set stuff in core KMS code */
 446        dev->vblank_disable_allowed = true;
 447        if (INTEL_INFO(dev)->num_pipes == 0)
 448                return 0;
 449
 450        ret = intel_fbdev_init(dev);
 451        if (ret)
 452                goto cleanup_gem;
 453
 454        /* Only enable hotplug handling once the fbdev is fully set up. */
 455        intel_hpd_init(dev_priv);
 456
 457        /*
 458         * Some ports require correctly set-up hpd registers for detection to
 459         * work properly (leading to ghost connected connector status), e.g. VGA
 460         * on gm45.  Hence we can only set up the initial fbdev config after hpd
 461         * irqs are fully enabled. Now we should scan for the initial config
 462         * only once hotplug handling is enabled, but due to screwed-up locking
 463         * around kms/fbdev init we can't protect the fdbev initial config
 464         * scanning against hotplug events. Hence do this first and ignore the
 465         * tiny window where we will loose hotplug notifactions.
 466         */
 467        async_schedule(intel_fbdev_initial_config, dev_priv);
 468
 469        drm_kms_helper_poll_init(dev);
 470
 471        return 0;
 472
 473cleanup_gem:
 474        mutex_lock(&dev->struct_mutex);
 475        i915_gem_cleanup_ringbuffer(dev);
 476        i915_gem_context_fini(dev);
 477        mutex_unlock(&dev->struct_mutex);
 478cleanup_irq:
 479        drm_irq_uninstall(dev);
 480cleanup_gem_stolen:
 481        i915_gem_cleanup_stolen(dev);
 482cleanup_vga_switcheroo:
 483        vga_switcheroo_unregister_client(dev->pdev);
 484cleanup_vga_client:
 485        vga_client_register(dev->pdev, NULL, NULL, NULL);
 486out:
 487        return ret;
 488}
 489
 490#if IS_ENABLED(CONFIG_FB)
 491static int i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
 492{
 493        struct apertures_struct *ap;
 494        struct pci_dev *pdev = dev_priv->dev->pdev;
 495        bool primary;
 496        int ret;
 497
 498        ap = alloc_apertures(1);
 499        if (!ap)
 500                return -ENOMEM;
 501
 502        ap->ranges[0].base = dev_priv->gtt.mappable_base;
 503        ap->ranges[0].size = dev_priv->gtt.mappable_end;
 504
 505        primary =
 506                pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW;
 507
 508        ret = remove_conflicting_framebuffers(ap, "inteldrmfb", primary);
 509
 510        kfree(ap);
 511
 512        return ret;
 513}
 514#else
 515static int i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
 516{
 517        return 0;
 518}
 519#endif
 520
 521#if !defined(CONFIG_VGA_CONSOLE)
 522static int i915_kick_out_vgacon(struct drm_i915_private *dev_priv)
 523{
 524        return 0;
 525}
 526#elif !defined(CONFIG_DUMMY_CONSOLE)
 527static int i915_kick_out_vgacon(struct drm_i915_private *dev_priv)
 528{
 529        return -ENODEV;
 530}
 531#else
 532static int i915_kick_out_vgacon(struct drm_i915_private *dev_priv)
 533{
 534        int ret = 0;
 535
 536        DRM_INFO("Replacing VGA console driver\n");
 537
 538        console_lock();
 539        if (con_is_bound(&vga_con))
 540                ret = do_take_over_console(&dummy_con, 0, MAX_NR_CONSOLES - 1, 1);
 541        if (ret == 0) {
 542                ret = do_unregister_con_driver(&vga_con);
 543
 544                /* Ignore "already unregistered". */
 545                if (ret == -ENODEV)
 546                        ret = 0;
 547        }
 548        console_unlock();
 549
 550        return ret;
 551}
 552#endif
 553
 554static void i915_dump_device_info(struct drm_i915_private *dev_priv)
 555{
 556        const struct intel_device_info *info = &dev_priv->info;
 557
 558#define PRINT_S(name) "%s"
 559#define SEP_EMPTY
 560#define PRINT_FLAG(name) info->name ? #name "," : ""
 561#define SEP_COMMA ,
 562        DRM_DEBUG_DRIVER("i915 device info: gen=%i, pciid=0x%04x rev=0x%02x flags="
 563                         DEV_INFO_FOR_EACH_FLAG(PRINT_S, SEP_EMPTY),
 564                         info->gen,
 565                         dev_priv->dev->pdev->device,
 566                         dev_priv->dev->pdev->revision,
 567                         DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG, SEP_COMMA));
 568#undef PRINT_S
 569#undef SEP_EMPTY
 570#undef PRINT_FLAG
 571#undef SEP_COMMA
 572}
 573
 574static void cherryview_sseu_info_init(struct drm_device *dev)
 575{
 576        struct drm_i915_private *dev_priv = dev->dev_private;
 577        struct intel_device_info *info;
 578        u32 fuse, eu_dis;
 579
 580        info = (struct intel_device_info *)&dev_priv->info;
 581        fuse = I915_READ(CHV_FUSE_GT);
 582
 583        info->slice_total = 1;
 584
 585        if (!(fuse & CHV_FGT_DISABLE_SS0)) {
 586                info->subslice_per_slice++;
 587                eu_dis = fuse & (CHV_FGT_EU_DIS_SS0_R0_MASK |
 588                                 CHV_FGT_EU_DIS_SS0_R1_MASK);
 589                info->eu_total += 8 - hweight32(eu_dis);
 590        }
 591
 592        if (!(fuse & CHV_FGT_DISABLE_SS1)) {
 593                info->subslice_per_slice++;
 594                eu_dis = fuse & (CHV_FGT_EU_DIS_SS1_R0_MASK |
 595                                 CHV_FGT_EU_DIS_SS1_R1_MASK);
 596                info->eu_total += 8 - hweight32(eu_dis);
 597        }
 598
 599        info->subslice_total = info->subslice_per_slice;
 600        /*
 601         * CHV expected to always have a uniform distribution of EU
 602         * across subslices.
 603        */
 604        info->eu_per_subslice = info->subslice_total ?
 605                                info->eu_total / info->subslice_total :
 606                                0;
 607        /*
 608         * CHV supports subslice power gating on devices with more than
 609         * one subslice, and supports EU power gating on devices with
 610         * more than one EU pair per subslice.
 611        */
 612        info->has_slice_pg = 0;
 613        info->has_subslice_pg = (info->subslice_total > 1);
 614        info->has_eu_pg = (info->eu_per_subslice > 2);
 615}
 616
 617static void gen9_sseu_info_init(struct drm_device *dev)
 618{
 619        struct drm_i915_private *dev_priv = dev->dev_private;
 620        struct intel_device_info *info;
 621        int s_max = 3, ss_max = 4, eu_max = 8;
 622        int s, ss;
 623        u32 fuse2, s_enable, ss_disable, eu_disable;
 624        u8 eu_mask = 0xff;
 625
 626        /*
 627         * BXT has a single slice. BXT also has at most 6 EU per subslice,
 628         * and therefore only the lowest 6 bits of the 8-bit EU disable
 629         * fields are valid.
 630        */
 631        if (IS_BROXTON(dev)) {
 632                s_max = 1;
 633                eu_max = 6;
 634                eu_mask = 0x3f;
 635        }
 636
 637        info = (struct intel_device_info *)&dev_priv->info;
 638        fuse2 = I915_READ(GEN8_FUSE2);
 639        s_enable = (fuse2 & GEN8_F2_S_ENA_MASK) >>
 640                   GEN8_F2_S_ENA_SHIFT;
 641        ss_disable = (fuse2 & GEN9_F2_SS_DIS_MASK) >>
 642                     GEN9_F2_SS_DIS_SHIFT;
 643
 644        info->slice_total = hweight32(s_enable);
 645        /*
 646         * The subslice disable field is global, i.e. it applies
 647         * to each of the enabled slices.
 648        */
 649        info->subslice_per_slice = ss_max - hweight32(ss_disable);
 650        info->subslice_total = info->slice_total *
 651                               info->subslice_per_slice;
 652
 653        /*
 654         * Iterate through enabled slices and subslices to
 655         * count the total enabled EU.
 656        */
 657        for (s = 0; s < s_max; s++) {
 658                if (!(s_enable & (0x1 << s)))
 659                        /* skip disabled slice */
 660                        continue;
 661
 662                eu_disable = I915_READ(GEN9_EU_DISABLE(s));
 663                for (ss = 0; ss < ss_max; ss++) {
 664                        int eu_per_ss;
 665
 666                        if (ss_disable & (0x1 << ss))
 667                                /* skip disabled subslice */
 668                                continue;
 669
 670                        eu_per_ss = eu_max - hweight8((eu_disable >> (ss*8)) &
 671                                                      eu_mask);
 672
 673                        /*
 674                         * Record which subslice(s) has(have) 7 EUs. we
 675                         * can tune the hash used to spread work among
 676                         * subslices if they are unbalanced.
 677                         */
 678                        if (eu_per_ss == 7)
 679                                info->subslice_7eu[s] |= 1 << ss;
 680
 681                        info->eu_total += eu_per_ss;
 682                }
 683        }
 684
 685        /*
 686         * SKL is expected to always have a uniform distribution
 687         * of EU across subslices with the exception that any one
 688         * EU in any one subslice may be fused off for die
 689         * recovery. BXT is expected to be perfectly uniform in EU
 690         * distribution.
 691        */
 692        info->eu_per_subslice = info->subslice_total ?
 693                                DIV_ROUND_UP(info->eu_total,
 694                                             info->subslice_total) : 0;
 695        /*
 696         * SKL supports slice power gating on devices with more than
 697         * one slice, and supports EU power gating on devices with
 698         * more than one EU pair per subslice. BXT supports subslice
 699         * power gating on devices with more than one subslice, and
 700         * supports EU power gating on devices with more than one EU
 701         * pair per subslice.
 702        */
 703        info->has_slice_pg = (IS_SKYLAKE(dev) && (info->slice_total > 1));
 704        info->has_subslice_pg = (IS_BROXTON(dev) && (info->subslice_total > 1));
 705        info->has_eu_pg = (info->eu_per_subslice > 2);
 706}
 707
 708/*
 709 * Determine various intel_device_info fields at runtime.
 710 *
 711 * Use it when either:
 712 *   - it's judged too laborious to fill n static structures with the limit
 713 *     when a simple if statement does the job,
 714 *   - run-time checks (eg read fuse/strap registers) are needed.
 715 *
 716 * This function needs to be called:
 717 *   - after the MMIO has been setup as we are reading registers,
 718 *   - after the PCH has been detected,
 719 *   - before the first usage of the fields it can tweak.
 720 */
 721static void intel_device_info_runtime_init(struct drm_device *dev)
 722{
 723        struct drm_i915_private *dev_priv = dev->dev_private;
 724        struct intel_device_info *info;
 725        enum pipe pipe;
 726
 727        info = (struct intel_device_info *)&dev_priv->info;
 728
 729        /*
 730         * Skylake and Broxton currently don't expose the topmost plane as its
 731         * use is exclusive with the legacy cursor and we only want to expose
 732         * one of those, not both. Until we can safely expose the topmost plane
 733         * as a DRM_PLANE_TYPE_CURSOR with all the features exposed/supported,
 734         * we don't expose the topmost plane at all to prevent ABI breakage
 735         * down the line.
 736         */
 737        if (IS_BROXTON(dev)) {
 738                info->num_sprites[PIPE_A] = 2;
 739                info->num_sprites[PIPE_B] = 2;
 740                info->num_sprites[PIPE_C] = 1;
 741        } else if (IS_VALLEYVIEW(dev))
 742                for_each_pipe(dev_priv, pipe)
 743                        info->num_sprites[pipe] = 2;
 744        else
 745                for_each_pipe(dev_priv, pipe)
 746                        info->num_sprites[pipe] = 1;
 747
 748        if (i915.disable_display) {
 749                DRM_INFO("Display disabled (module parameter)\n");
 750                info->num_pipes = 0;
 751        } else if (info->num_pipes > 0 &&
 752                   (INTEL_INFO(dev)->gen == 7 || INTEL_INFO(dev)->gen == 8) &&
 753                   !IS_VALLEYVIEW(dev)) {
 754                u32 fuse_strap = I915_READ(FUSE_STRAP);
 755                u32 sfuse_strap = I915_READ(SFUSE_STRAP);
 756
 757                /*
 758                 * SFUSE_STRAP is supposed to have a bit signalling the display
 759                 * is fused off. Unfortunately it seems that, at least in
 760                 * certain cases, fused off display means that PCH display
 761                 * reads don't land anywhere. In that case, we read 0s.
 762                 *
 763                 * On CPT/PPT, we can detect this case as SFUSE_STRAP_FUSE_LOCK
 764                 * should be set when taking over after the firmware.
 765                 */
 766                if (fuse_strap & ILK_INTERNAL_DISPLAY_DISABLE ||
 767                    sfuse_strap & SFUSE_STRAP_DISPLAY_DISABLED ||
 768                    (dev_priv->pch_type == PCH_CPT &&
 769                     !(sfuse_strap & SFUSE_STRAP_FUSE_LOCK))) {
 770                        DRM_INFO("Display fused off, disabling\n");
 771                        info->num_pipes = 0;
 772                }
 773        }
 774
 775        /* Initialize slice/subslice/EU info */
 776        if (IS_CHERRYVIEW(dev))
 777                cherryview_sseu_info_init(dev);
 778        else if (INTEL_INFO(dev)->gen >= 9)
 779                gen9_sseu_info_init(dev);
 780
 781        DRM_DEBUG_DRIVER("slice total: %u\n", info->slice_total);
 782        DRM_DEBUG_DRIVER("subslice total: %u\n", info->subslice_total);
 783        DRM_DEBUG_DRIVER("subslice per slice: %u\n", info->subslice_per_slice);
 784        DRM_DEBUG_DRIVER("EU total: %u\n", info->eu_total);
 785        DRM_DEBUG_DRIVER("EU per subslice: %u\n", info->eu_per_subslice);
 786        DRM_DEBUG_DRIVER("has slice power gating: %s\n",
 787                         info->has_slice_pg ? "y" : "n");
 788        DRM_DEBUG_DRIVER("has subslice power gating: %s\n",
 789                         info->has_subslice_pg ? "y" : "n");
 790        DRM_DEBUG_DRIVER("has EU power gating: %s\n",
 791                         info->has_eu_pg ? "y" : "n");
 792}
 793
 794/**
 795 * i915_driver_load - setup chip and create an initial config
 796 * @dev: DRM device
 797 * @flags: startup flags
 798 *
 799 * The driver load routine has to do several things:
 800 *   - drive output discovery via intel_modeset_init()
 801 *   - initialize the memory manager
 802 *   - allocate initial config memory
 803 *   - setup the DRM framebuffer with the allocated memory
 804 */
 805int i915_driver_load(struct drm_device *dev, unsigned long flags)
 806{
 807        struct drm_i915_private *dev_priv;
 808        struct intel_device_info *info, *device_info;
 809        int ret = 0, mmio_bar, mmio_size;
 810        uint32_t aperture_size;
 811
 812        info = (struct intel_device_info *) flags;
 813
 814        dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
 815        if (dev_priv == NULL)
 816                return -ENOMEM;
 817
 818        dev->dev_private = dev_priv;
 819        dev_priv->dev = dev;
 820
 821        /* Setup the write-once "constant" device info */
 822        device_info = (struct intel_device_info *)&dev_priv->info;
 823        memcpy(device_info, info, sizeof(dev_priv->info));
 824        device_info->device_id = dev->pdev->device;
 825
 826        spin_lock_init(&dev_priv->irq_lock);
 827        spin_lock_init(&dev_priv->gpu_error.lock);
 828        mutex_init(&dev_priv->backlight_lock);
 829        spin_lock_init(&dev_priv->uncore.lock);
 830        spin_lock_init(&dev_priv->mm.object_stat_lock);
 831        spin_lock_init(&dev_priv->mmio_flip_lock);
 832        mutex_init(&dev_priv->sb_lock);
 833        mutex_init(&dev_priv->modeset_restore_lock);
 834        mutex_init(&dev_priv->csr_lock);
 835
 836        intel_pm_setup(dev);
 837
 838        intel_display_crc_init(dev);
 839
 840        i915_dump_device_info(dev_priv);
 841
 842        /* Not all pre-production machines fall into this category, only the
 843         * very first ones. Almost everything should work, except for maybe
 844         * suspend/resume. And we don't implement workarounds that affect only
 845         * pre-production machines. */
 846        if (IS_HSW_EARLY_SDV(dev))
 847                DRM_INFO("This is an early pre-production Haswell machine. "
 848                         "It may not be fully functional.\n");
 849
 850        if (i915_get_bridge_dev(dev)) {
 851                ret = -EIO;
 852                goto free_priv;
 853        }
 854
 855        mmio_bar = IS_GEN2(dev) ? 1 : 0;
 856        /* Before gen4, the registers and the GTT are behind different BARs.
 857         * However, from gen4 onwards, the registers and the GTT are shared
 858         * in the same BAR, so we want to restrict this ioremap from
 859         * clobbering the GTT which we want ioremap_wc instead. Fortunately,
 860         * the register BAR remains the same size for all the earlier
 861         * generations up to Ironlake.
 862         */
 863        if (info->gen < 5)
 864                mmio_size = 512*1024;
 865        else
 866                mmio_size = 2*1024*1024;
 867
 868        dev_priv->regs = pci_iomap(dev->pdev, mmio_bar, mmio_size);
 869        if (!dev_priv->regs) {
 870                DRM_ERROR("failed to map registers\n");
 871                ret = -EIO;
 872                goto put_bridge;
 873        }
 874
 875        /* This must be called before any calls to HAS_PCH_* */
 876        intel_detect_pch(dev);
 877
 878        intel_uncore_init(dev);
 879
 880        /* Load CSR Firmware for SKL */
 881        intel_csr_ucode_init(dev);
 882
 883        ret = i915_gem_gtt_init(dev);
 884        if (ret)
 885                goto out_freecsr;
 886
 887        /* WARNING: Apparently we must kick fbdev drivers before vgacon,
 888         * otherwise the vga fbdev driver falls over. */
 889        ret = i915_kick_out_firmware_fb(dev_priv);
 890        if (ret) {
 891                DRM_ERROR("failed to remove conflicting framebuffer drivers\n");
 892                goto out_gtt;
 893        }
 894
 895        ret = i915_kick_out_vgacon(dev_priv);
 896        if (ret) {
 897                DRM_ERROR("failed to remove conflicting VGA console\n");
 898                goto out_gtt;
 899        }
 900
 901        pci_set_master(dev->pdev);
 902
 903        /* overlay on gen2 is broken and can't address above 1G */
 904        if (IS_GEN2(dev))
 905                dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(30));
 906
 907        /* 965GM sometimes incorrectly writes to hardware status page (HWS)
 908         * using 32bit addressing, overwriting memory if HWS is located
 909         * above 4GB.
 910         *
 911         * The documentation also mentions an issue with undefined
 912         * behaviour if any general state is accessed within a page above 4GB,
 913         * which also needs to be handled carefully.
 914         */
 915        if (IS_BROADWATER(dev) || IS_CRESTLINE(dev))
 916                dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(32));
 917
 918        aperture_size = dev_priv->gtt.mappable_end;
 919
 920        dev_priv->gtt.mappable =
 921                io_mapping_create_wc(dev_priv->gtt.mappable_base,
 922                                     aperture_size);
 923        if (dev_priv->gtt.mappable == NULL) {
 924                ret = -EIO;
 925                goto out_gtt;
 926        }
 927
 928        dev_priv->gtt.mtrr = arch_phys_wc_add(dev_priv->gtt.mappable_base,
 929                                              aperture_size);
 930
 931        /* The i915 workqueue is primarily used for batched retirement of
 932         * requests (and thus managing bo) once the task has been completed
 933         * by the GPU. i915_gem_retire_requests() is called directly when we
 934         * need high-priority retirement, such as waiting for an explicit
 935         * bo.
 936         *
 937         * It is also used for periodic low-priority events, such as
 938         * idle-timers and recording error state.
 939         *
 940         * All tasks on the workqueue are expected to acquire the dev mutex
 941         * so there is no point in running more than one instance of the
 942         * workqueue at any time.  Use an ordered one.
 943         */
 944        dev_priv->wq = alloc_ordered_workqueue("i915", 0);
 945        if (dev_priv->wq == NULL) {
 946                DRM_ERROR("Failed to create our workqueue.\n");
 947                ret = -ENOMEM;
 948                goto out_mtrrfree;
 949        }
 950
 951        dev_priv->hotplug.dp_wq = alloc_ordered_workqueue("i915-dp", 0);
 952        if (dev_priv->hotplug.dp_wq == NULL) {
 953                DRM_ERROR("Failed to create our dp workqueue.\n");
 954                ret = -ENOMEM;
 955                goto out_freewq;
 956        }
 957
 958        dev_priv->gpu_error.hangcheck_wq =
 959                alloc_ordered_workqueue("i915-hangcheck", 0);
 960        if (dev_priv->gpu_error.hangcheck_wq == NULL) {
 961                DRM_ERROR("Failed to create our hangcheck workqueue.\n");
 962                ret = -ENOMEM;
 963                goto out_freedpwq;
 964        }
 965
 966        intel_irq_init(dev_priv);
 967        intel_uncore_sanitize(dev);
 968
 969        /* Try to make sure MCHBAR is enabled before poking at it */
 970        intel_setup_mchbar(dev);
 971        intel_setup_gmbus(dev);
 972        intel_opregion_setup(dev);
 973
 974        intel_setup_bios(dev);
 975
 976        i915_gem_load(dev);
 977
 978        /* On the 945G/GM, the chipset reports the MSI capability on the
 979         * integrated graphics even though the support isn't actually there
 980         * according to the published specs.  It doesn't appear to function
 981         * correctly in testing on 945G.
 982         * This may be a side effect of MSI having been made available for PEG
 983         * and the registers being closely associated.
 984         *
 985         * According to chipset errata, on the 965GM, MSI interrupts may
 986         * be lost or delayed, but we use them anyways to avoid
 987         * stuck interrupts on some machines.
 988         */
 989        if (!IS_I945G(dev) && !IS_I945GM(dev))
 990                pci_enable_msi(dev->pdev);
 991
 992        intel_device_info_runtime_init(dev);
 993
 994        if (INTEL_INFO(dev)->num_pipes) {
 995                ret = drm_vblank_init(dev, INTEL_INFO(dev)->num_pipes);
 996                if (ret)
 997                        goto out_gem_unload;
 998        }
 999
1000        intel_power_domains_init(dev_priv);
1001
1002        ret = i915_load_modeset_init(dev);
1003        if (ret < 0) {
1004                DRM_ERROR("failed to init modeset\n");
1005                goto out_power_well;
1006        }
1007
1008        /*
1009         * Notify a valid surface after modesetting,
1010         * when running inside a VM.
1011         */
1012        if (intel_vgpu_active(dev))
1013                I915_WRITE(vgtif_reg(display_ready), VGT_DRV_DISPLAY_READY);
1014
1015        i915_setup_sysfs(dev);
1016
1017        if (INTEL_INFO(dev)->num_pipes) {
1018                /* Must be done after probing outputs */
1019                intel_opregion_init(dev);
1020                acpi_video_register();
1021        }
1022
1023        if (IS_GEN5(dev))
1024                intel_gpu_ips_init(dev_priv);
1025
1026        intel_runtime_pm_enable(dev_priv);
1027
1028        i915_audio_component_init(dev_priv);
1029
1030        return 0;
1031
1032out_power_well:
1033        intel_power_domains_fini(dev_priv);
1034        drm_vblank_cleanup(dev);
1035out_gem_unload:
1036        WARN_ON(unregister_oom_notifier(&dev_priv->mm.oom_notifier));
1037        unregister_shrinker(&dev_priv->mm.shrinker);
1038
1039        if (dev->pdev->msi_enabled)
1040                pci_disable_msi(dev->pdev);
1041
1042        intel_teardown_gmbus(dev);
1043        intel_teardown_mchbar(dev);
1044        pm_qos_remove_request(&dev_priv->pm_qos);
1045        destroy_workqueue(dev_priv->gpu_error.hangcheck_wq);
1046out_freedpwq:
1047        destroy_workqueue(dev_priv->hotplug.dp_wq);
1048out_freewq:
1049        destroy_workqueue(dev_priv->wq);
1050out_mtrrfree:
1051        arch_phys_wc_del(dev_priv->gtt.mtrr);
1052        io_mapping_free(dev_priv->gtt.mappable);
1053out_gtt:
1054        i915_global_gtt_cleanup(dev);
1055out_freecsr:
1056        intel_csr_ucode_fini(dev);
1057        intel_uncore_fini(dev);
1058        pci_iounmap(dev->pdev, dev_priv->regs);
1059put_bridge:
1060        pci_dev_put(dev_priv->bridge_dev);
1061free_priv:
1062        if (dev_priv->requests)
1063                kmem_cache_destroy(dev_priv->requests);
1064        if (dev_priv->vmas)
1065                kmem_cache_destroy(dev_priv->vmas);
1066        if (dev_priv->objects)
1067                kmem_cache_destroy(dev_priv->objects);
1068        kfree(dev_priv);
1069        return ret;
1070}
1071
1072int i915_driver_unload(struct drm_device *dev)
1073{
1074        struct drm_i915_private *dev_priv = dev->dev_private;
1075        int ret;
1076
1077        i915_audio_component_cleanup(dev_priv);
1078
1079        ret = i915_gem_suspend(dev);
1080        if (ret) {
1081                DRM_ERROR("failed to idle hardware: %d\n", ret);
1082                return ret;
1083        }
1084
1085        intel_power_domains_fini(dev_priv);
1086
1087        intel_gpu_ips_teardown();
1088
1089        i915_teardown_sysfs(dev);
1090
1091        WARN_ON(unregister_oom_notifier(&dev_priv->mm.oom_notifier));
1092        unregister_shrinker(&dev_priv->mm.shrinker);
1093
1094        io_mapping_free(dev_priv->gtt.mappable);
1095        arch_phys_wc_del(dev_priv->gtt.mtrr);
1096
1097        acpi_video_unregister();
1098
1099        intel_fbdev_fini(dev);
1100
1101        drm_vblank_cleanup(dev);
1102
1103        intel_modeset_cleanup(dev);
1104
1105        /*
1106         * free the memory space allocated for the child device
1107         * config parsed from VBT
1108         */
1109        if (dev_priv->vbt.child_dev && dev_priv->vbt.child_dev_num) {
1110                kfree(dev_priv->vbt.child_dev);
1111                dev_priv->vbt.child_dev = NULL;
1112                dev_priv->vbt.child_dev_num = 0;
1113        }
1114
1115        vga_switcheroo_unregister_client(dev->pdev);
1116        vga_client_register(dev->pdev, NULL, NULL, NULL);
1117
1118        /* Free error state after interrupts are fully disabled. */
1119        cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
1120        i915_destroy_error_state(dev);
1121
1122        if (dev->pdev->msi_enabled)
1123                pci_disable_msi(dev->pdev);
1124
1125        intel_opregion_fini(dev);
1126
1127        /* Flush any outstanding unpin_work. */
1128        flush_workqueue(dev_priv->wq);
1129
1130        mutex_lock(&dev->struct_mutex);
1131        i915_gem_cleanup_ringbuffer(dev);
1132        i915_gem_context_fini(dev);
1133        mutex_unlock(&dev->struct_mutex);
1134        intel_fbc_cleanup_cfb(dev_priv);
1135        i915_gem_cleanup_stolen(dev);
1136
1137        intel_csr_ucode_fini(dev);
1138
1139        intel_teardown_gmbus(dev);
1140        intel_teardown_mchbar(dev);
1141
1142        destroy_workqueue(dev_priv->hotplug.dp_wq);
1143        destroy_workqueue(dev_priv->wq);
1144        destroy_workqueue(dev_priv->gpu_error.hangcheck_wq);
1145        pm_qos_remove_request(&dev_priv->pm_qos);
1146
1147        i915_global_gtt_cleanup(dev);
1148
1149        intel_uncore_fini(dev);
1150        if (dev_priv->regs != NULL)
1151                pci_iounmap(dev->pdev, dev_priv->regs);
1152
1153        if (dev_priv->requests)
1154                kmem_cache_destroy(dev_priv->requests);
1155        if (dev_priv->vmas)
1156                kmem_cache_destroy(dev_priv->vmas);
1157        if (dev_priv->objects)
1158                kmem_cache_destroy(dev_priv->objects);
1159
1160        pci_dev_put(dev_priv->bridge_dev);
1161        kfree(dev_priv);
1162
1163        return 0;
1164}
1165
1166int i915_driver_open(struct drm_device *dev, struct drm_file *file)
1167{
1168        int ret;
1169
1170        ret = i915_gem_open(dev, file);
1171        if (ret)
1172                return ret;
1173
1174        return 0;
1175}
1176
1177/**
1178 * i915_driver_lastclose - clean up after all DRM clients have exited
1179 * @dev: DRM device
1180 *
1181 * Take care of cleaning up after all DRM clients have exited.  In the
1182 * mode setting case, we want to restore the kernel's initial mode (just
1183 * in case the last client left us in a bad state).
1184 *
1185 * Additionally, in the non-mode setting case, we'll tear down the GTT
1186 * and DMA structures, since the kernel won't be using them, and clea
1187 * up any GEM state.
1188 */
1189void i915_driver_lastclose(struct drm_device *dev)
1190{
1191        intel_fbdev_restore_mode(dev);
1192        vga_switcheroo_process_delayed_switch();
1193}
1194
1195void i915_driver_preclose(struct drm_device *dev, struct drm_file *file)
1196{
1197        mutex_lock(&dev->struct_mutex);
1198        i915_gem_context_close(dev, file);
1199        i915_gem_release(dev, file);
1200        mutex_unlock(&dev->struct_mutex);
1201
1202        intel_modeset_preclose(dev, file);
1203}
1204
1205void i915_driver_postclose(struct drm_device *dev, struct drm_file *file)
1206{
1207        struct drm_i915_file_private *file_priv = file->driver_priv;
1208
1209        if (file_priv && file_priv->bsd_ring)
1210                file_priv->bsd_ring = NULL;
1211        kfree(file_priv);
1212}
1213
1214static int
1215i915_gem_reject_pin_ioctl(struct drm_device *dev, void *data,
1216                          struct drm_file *file)
1217{
1218        return -ENODEV;
1219}
1220
1221const struct drm_ioctl_desc i915_ioctls[] = {
1222        DRM_IOCTL_DEF_DRV(I915_INIT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1223        DRM_IOCTL_DEF_DRV(I915_FLUSH, drm_noop, DRM_AUTH),
1224        DRM_IOCTL_DEF_DRV(I915_FLIP, drm_noop, DRM_AUTH),
1225        DRM_IOCTL_DEF_DRV(I915_BATCHBUFFER, drm_noop, DRM_AUTH),
1226        DRM_IOCTL_DEF_DRV(I915_IRQ_EMIT, drm_noop, DRM_AUTH),
1227        DRM_IOCTL_DEF_DRV(I915_IRQ_WAIT, drm_noop, DRM_AUTH),
1228        DRM_IOCTL_DEF_DRV(I915_GETPARAM, i915_getparam, DRM_AUTH|DRM_RENDER_ALLOW),
1229        DRM_IOCTL_DEF_DRV(I915_SETPARAM, i915_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1230        DRM_IOCTL_DEF_DRV(I915_ALLOC, drm_noop, DRM_AUTH),
1231        DRM_IOCTL_DEF_DRV(I915_FREE, drm_noop, DRM_AUTH),
1232        DRM_IOCTL_DEF_DRV(I915_INIT_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1233        DRM_IOCTL_DEF_DRV(I915_CMDBUFFER, drm_noop, DRM_AUTH),
1234        DRM_IOCTL_DEF_DRV(I915_DESTROY_HEAP,  drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1235        DRM_IOCTL_DEF_DRV(I915_SET_VBLANK_PIPE,  drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1236        DRM_IOCTL_DEF_DRV(I915_GET_VBLANK_PIPE,  drm_noop, DRM_AUTH),
1237        DRM_IOCTL_DEF_DRV(I915_VBLANK_SWAP, drm_noop, DRM_AUTH),
1238        DRM_IOCTL_DEF_DRV(I915_HWS_ADDR, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1239        DRM_IOCTL_DEF_DRV(I915_GEM_INIT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
1240        DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH|DRM_UNLOCKED),
1241        DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER2, i915_gem_execbuffer2, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
1242        DRM_IOCTL_DEF_DRV(I915_GEM_PIN, i915_gem_reject_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED),
1243        DRM_IOCTL_DEF_DRV(I915_GEM_UNPIN, i915_gem_reject_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED),
1244        DRM_IOCTL_DEF_DRV(I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
1245        DRM_IOCTL_DEF_DRV(I915_GEM_SET_CACHING, i915_gem_set_caching_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
1246        DRM_IOCTL_DEF_DRV(I915_GEM_GET_CACHING, i915_gem_get_caching_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
1247        DRM_IOCTL_DEF_DRV(I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
1248        DRM_IOCTL_DEF_DRV(I915_GEM_ENTERVT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
1249        DRM_IOCTL_DEF_DRV(I915_GEM_LEAVEVT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
1250        DRM_IOCTL_DEF_DRV(I915_GEM_CREATE, i915_gem_create_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
1251        DRM_IOCTL_DEF_DRV(I915_GEM_PREAD, i915_gem_pread_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
1252        DRM_IOCTL_DEF_DRV(I915_GEM_PWRITE, i915_gem_pwrite_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
1253        DRM_IOCTL_DEF_DRV(I915_GEM_MMAP, i915_gem_mmap_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
1254        DRM_IOCTL_DEF_DRV(I915_GEM_MMAP_GTT, i915_gem_mmap_gtt_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
1255        DRM_IOCTL_DEF_DRV(I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
1256        DRM_IOCTL_DEF_DRV(I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
1257        DRM_IOCTL_DEF_DRV(I915_GEM_SET_TILING, i915_gem_set_tiling, DRM_UNLOCKED|DRM_RENDER_ALLOW),
1258        DRM_IOCTL_DEF_DRV(I915_GEM_GET_TILING, i915_gem_get_tiling, DRM_UNLOCKED|DRM_RENDER_ALLOW),
1259        DRM_IOCTL_DEF_DRV(I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
1260        DRM_IOCTL_DEF_DRV(I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id, DRM_UNLOCKED),
1261        DRM_IOCTL_DEF_DRV(I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
1262        DRM_IOCTL_DEF_DRV(I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
1263        DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS, intel_overlay_attrs, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
1264        DRM_IOCTL_DEF_DRV(I915_SET_SPRITE_COLORKEY, intel_sprite_set_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
1265        DRM_IOCTL_DEF_DRV(I915_GET_SPRITE_COLORKEY, drm_noop, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
1266        DRM_IOCTL_DEF_DRV(I915_GEM_WAIT, i915_gem_wait_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
1267        DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_CREATE, i915_gem_context_create_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
1268        DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_DESTROY, i915_gem_context_destroy_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
1269        DRM_IOCTL_DEF_DRV(I915_REG_READ, i915_reg_read_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
1270        DRM_IOCTL_DEF_DRV(I915_GET_RESET_STATS, i915_get_reset_stats_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
1271        DRM_IOCTL_DEF_DRV(I915_GEM_USERPTR, i915_gem_userptr_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
1272        DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_GETPARAM, i915_gem_context_getparam_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
1273        DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_SETPARAM, i915_gem_context_setparam_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
1274};
1275
1276int i915_max_ioctl = ARRAY_SIZE(i915_ioctls);
1277